summaryrefslogtreecommitdiffstats
path: root/arch/x86/Kconfig.debug
Commit message (Expand)AuthorAgeFilesLines
* x86, nfit_test: Add unit test for memcpy_mcsafe()Dan Williams2018-05-231-0/+3
* Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/k...Linus Torvalds2018-01-301-8/+0Star
|\
| * x86: Remove unused IOMMU_STRESS KconfigCorentin Labbe2018-01-251-8/+0Star
* | x86/unwinder/guess: Prevent using CONFIG_UNWINDER_GUESS=y with CONFIG_STACKDE...Andrey Ryabinin2017-12-111-0/+1
|/
* Merge branch 'linus' into x86/asm, to pick up fixes and resolve conflictsIngo Molnar2017-11-071-0/+1
|\
| * License cleanup: add SPDX GPL-2.0 license identifier to files with no licenseGreg Kroah-Hartman2017-11-021-0/+1
* | x86/unwind: Make CONFIG_UNWINDER_ORC=y the default in kconfig for 64-bitJosh Poimboeuf2017-10-141-16/+17
* | x86/unwind: Rename unwinder config options to 'CONFIG_UNWINDER_*'Josh Poimboeuf2017-10-141-5/+5
|/
* Merge branch 'x86/urgent' into x86/asm, to pick up fixesIngo Molnar2017-08-101-0/+1
|\
| * x86/platform: Add PCI dependency for PUNIT_ATOM_DEBUGArnd Bergmann2017-07-201-0/+1
* | x86/kconfig: Consolidate unwinders into multiple choice selectionJosh Poimboeuf2017-07-261-7/+40
* | x86/kconfig: Make it easier to switch to the new ORC unwinderJosh Poimboeuf2017-07-261-4/+3Star
* | x86/unwind: Add the ORC unwinderJosh Poimboeuf2017-07-261-0/+25
* | x86/entry/64: Refactor IRQ stacks and make them NMI-safeAndy Lutomirski2017-07-181-2/+0Star
|/
* usb/early: Add driver for xhci debug capabilityLu Baolu2017-03-211-2/+25
* mm: add arch-independent testcases for RODATAJinbum Park2017-02-281-8/+0Star
* Merge tag 'rodata-v4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...Linus Torvalds2017-02-221-11/+0Star
|\
| * arch: Move CONFIG_DEBUG_RODATA and CONFIG_SET_MODULE_RONX to be commonLaura Abbott2017-02-071-11/+0Star
* | x86/mm: Remove CONFIG_DEBUG_NX_TESTKees Cook2017-01-311-8/+0Star
|/
* Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...Linus Torvalds2016-03-151-10/+0Star
|\
| * x86/cpufeature: Replace the old static_cpu_has() with safe variantBorislav Petkov2016-01-301-10/+0Star
* | x86/mm: Always enable CONFIG_DEBUG_RODATA and remove the Kconfig optionKees Cook2016-02-221-15/+3Star
|/
* Merge tag 'libnvdimm-for-4.5' of git://git.kernel.org/pub/scm/linux/kernel/gi...Linus Torvalds2016-01-141-17/+0Star
|\
| * arch: consolidate CONFIG_STRICT_DEVM in lib/Kconfig.debugDan Williams2016-01-091-17/+0Star
* | x86/mm: Turn CONFIG_X86_PTDUMP into a moduleKees Cook2015-11-231-1/+1
|/
* x86: don't make DEBUG_WX default to 'y' even with DEBUG_RODATALinus Torvalds2015-11-061-1/+0Star
* x86/mm: Warn on W^X mappingsStephen Smalley2015-10-061-1/+35
* x86/entry/64, x86/nmi/64: Add CONFIG_DEBUG_ENTRY NMI testing codeAndy Lutomirski2015-07-171-0/+12
* Merge branch 'x86-core-for-linus' of git://git.kernel.org/pub/scm/linux/kerne...Linus Torvalds2015-06-231-0/+11
|\
| * x86/platform/atom/punit: Add Punit device state debug driverSrinivas Pandruvada2015-05-071-0/+11
* | x86/fpu: Add CONFIG_X86_DEBUG_FPU=y FPU debugging codeIngo Molnar2015-05-191-0/+12
|/
* x86, intel-mid: remove Intel MID specific serial supportAndy Shevchenko2015-03-071-4/+0Star
* x86/intel/quark: Add Isolated Memory Regions for Quark X1000Bryan O'Donoghue2015-02-181-0/+13
* x86/efi: Dump the EFI page tableBorislav Petkov2014-03-041-0/+9
* x86: Disable CONFIG_X86_DECODER_SELFTEST in allmod/allyesconfigsIngo Molnar2014-02-051-0/+1
* x86/efi: Add EFI framebuffer earlyprintk supportMatt Fleming2013-10-281-0/+10
* Merge branch 'kconfig-diet' from Dave HansenLinus Torvalds2013-07-041-10/+0Star
|\
| * consolidate per-arch stack overflow debugging optionsDave Hansen2013-07-041-10/+0Star
* | Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...Linus Torvalds2013-07-031-0/+10
|\ \
| * | x86: Sanity-check static_cpu_has usageBorislav Petkov2013-06-211-0/+10
| |/
* / x86: Extend #DF debugging aid to 64-bitBorislav Petkov2013-05-131-1/+0Star
|/
* Kconfig: consolidate CONFIG_DEBUG_STRICT_USER_COPY_CHECKSStephen Boyd2013-05-011-14/+0Star
* x86/mm: Re-enable DEBUG_TLBFLUSH for X86_32Paul Bolle2013-04-101-1/+1
* x86/tlb: add tlb_flushall_shift knob into debugfsAlex Shi2012-06-281-0/+19
* Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/k...Linus Torvalds2012-01-121-3/+3
|\
| * x86/config: Revamp configuration for MID devicesAlan Cox2011-12-051-3/+3
* | x86, NMI: NMI selftest depends on the local apicDon Zickus2011-12-071-1/+1
* | x86, NMI: Add NMI IPI selftestDon Zickus2011-12-051-0/+12
* | x86: Check stack overflow in detailMitsuo Hayasaka2011-12-051-2/+5
|/
* doc: fix broken referencesPaul Bolle2011-09-271-1/+1
ght'>2
-rw-r--r--Documentation/arm/SPEAr/overview.txt60
-rw-r--r--Documentation/arm/Samsung-S3C24XX/GPIO.txt81
-rw-r--r--Documentation/arm/Samsung-S3C24XX/Overview.txt15
-rw-r--r--Documentation/arm/Samsung/GPIO.txt42
-rw-r--r--Documentation/arm/Samsung/Overview.txt33
-rw-r--r--Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen2
-rw-r--r--Documentation/atomic_ops.txt2
-rw-r--r--Documentation/blackfin/bfin-gpio-notes.txt2
-rw-r--r--Documentation/cachetlb.txt6
-rw-r--r--Documentation/cgroups/blkio-controller.txt151
-rw-r--r--Documentation/cgroups/cgroups.txt4
-rw-r--r--Documentation/cgroups/cpusets.txt38
-rw-r--r--Documentation/cgroups/memcg_test.txt2
-rw-r--r--Documentation/cgroups/memory.txt328
-rw-r--r--Documentation/connector/connector.txt2
-rw-r--r--Documentation/credentials.txt14
-rw-r--r--Documentation/development-process/2.Process29
-rw-r--r--Documentation/development-process/7.AdvancedTopics2
-rw-r--r--Documentation/devices.txt2
-rw-r--r--Documentation/dvb/ci.txt2
-rw-r--r--Documentation/dvb/contributors.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt98
-rw-r--r--Documentation/filesystems/Locking9
-rw-r--r--Documentation/filesystems/autofs4-mount-control.txt2
-rw-r--r--Documentation/filesystems/ceph.txt2
-rw-r--r--Documentation/filesystems/dlmfs.txt2
-rw-r--r--Documentation/filesystems/ext3.txt15
-rw-r--r--Documentation/filesystems/fiemap.txt12
-rw-r--r--Documentation/filesystems/fuse.txt4
-rw-r--r--Documentation/filesystems/gfs2.txt12
-rw-r--r--Documentation/filesystems/hpfs.txt2
-rw-r--r--Documentation/filesystems/logfs.txt8
-rw-r--r--Documentation/filesystems/nfs/nfs41-server.txt2
-rw-r--r--Documentation/filesystems/nfs/rpc-cache.txt2
-rw-r--r--Documentation/filesystems/nilfs2.txt4
-rw-r--r--Documentation/filesystems/ocfs2.txt7
-rw-r--r--Documentation/filesystems/proc.txt13
-rw-r--r--Documentation/filesystems/smbfs.txt2
-rw-r--r--Documentation/filesystems/squashfs.txt32
-rw-r--r--Documentation/filesystems/sysfs-tagging.txt42
-rw-r--r--Documentation/filesystems/tmpfs.txt10
-rw-r--r--Documentation/filesystems/vfs.txt11
-rw-r--r--Documentation/filesystems/xfs-delayed-logging-design.txt816
-rw-r--r--Documentation/hwmon/dme173751
-rw-r--r--Documentation/hwmon/lm637
-rw-r--r--Documentation/hwmon/lm852
-rw-r--r--Documentation/hwmon/ltc42454
-rw-r--r--Documentation/hwmon/sysfs-interface13
-rw-r--r--Documentation/hwmon/tmp10226
-rw-r--r--Documentation/i2c/busses/i2c-i8018
-rw-r--r--Documentation/input/joystick.txt2
-rw-r--r--Documentation/intel_txt.txt18
-rw-r--r--Documentation/kbuild/kconfig-language.txt2
-rw-r--r--Documentation/kbuild/kconfig.txt2
-rw-r--r--Documentation/kernel-docs.txt10
-rw-r--r--Documentation/kernel-parameters.txt77
-rw-r--r--Documentation/kprobes.txt12
-rw-r--r--Documentation/kvm/api.txt208
-rw-r--r--Documentation/kvm/cpuid.txt42
-rw-r--r--Documentation/kvm/mmu.txt304
-rw-r--r--Documentation/laptops/laptop-mode.txt2
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt66
-rw-r--r--Documentation/lguest/lguest.c2
-rw-r--r--Documentation/md.txt2
-rw-r--r--Documentation/netlabel/lsm_interface.txt2
-rw-r--r--Documentation/networking/caif/Linux-CAIF.txt212
-rw-r--r--Documentation/networking/caif/README109
-rw-r--r--Documentation/networking/ifenslave.c2
-rw-r--r--Documentation/networking/ip-sysctl.txt31
-rw-r--r--Documentation/networking/l2tp.txt247
-rw-r--r--Documentation/networking/packet_mmap.txt4
-rw-r--r--Documentation/networking/x25-iface.txt16
-rw-r--r--Documentation/oops-tracing.txt4
-rw-r--r--Documentation/padata.txt107
-rw-r--r--Documentation/pcmcia/driver-changes.txt13
-rw-r--r--Documentation/power/devices.txt847
-rw-r--r--Documentation/power/pci.txt1258
-rw-r--r--Documentation/power/pm_qos_interface.txt48
-rw-r--r--Documentation/power/regulator/consumer.txt10
-rw-r--r--Documentation/power/regulator/machine.txt2
-rw-r--r--Documentation/power/regulator/overview.txt6
-rw-r--r--Documentation/power/userland-swsusp.txt4
-rw-r--r--Documentation/powerpc/booting-without-of.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/reboot.txt18
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt22
-rw-r--r--Documentation/powerpc/dts-bindings/xilinx.txt2
-rw-r--r--Documentation/powerpc/phyp-assisted-dump.txt2
-rw-r--r--Documentation/rbtree.txt58
-rw-r--r--Documentation/rfkill.txt44
-rw-r--r--Documentation/rt-mutex-design.txt2
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt54
-rw-r--r--Documentation/scheduler/sched-rt-group.txt20
-rw-r--r--Documentation/scsi/ChangeLog.lpfc4
-rw-r--r--Documentation/scsi/FlashPoint.txt2
-rw-r--r--Documentation/scsi/dtc3x80.txt2
-rw-r--r--Documentation/scsi/ncr53c8xx.txt2
-rw-r--r--Documentation/scsi/osst.txt2
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt4
-rw-r--r--Documentation/scsi/sym53c8xx_2.txt2
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt31
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt4
-rw-r--r--Documentation/sound/alsa/soc/dapm.txt4
-rw-r--r--Documentation/sound/alsa/soc/machine.txt2
-rw-r--r--Documentation/sound/alsa/soc/overview.txt2
-rw-r--r--Documentation/sparse.txt4
-rw-r--r--Documentation/spi/ep93xx_spi95
-rw-r--r--Documentation/spi/spidev_fdx.c4
-rw-r--r--Documentation/sysctl/net.txt10
-rw-r--r--Documentation/sysctl/vm.txt25
-rw-r--r--Documentation/sysfs-rules.txt2
-rw-r--r--Documentation/sysrq.txt14
-rw-r--r--Documentation/timers/hpet_example.c2
-rw-r--r--Documentation/trace/events.txt11
-rw-r--r--Documentation/trace/ftrace.txt50
-rw-r--r--Documentation/trace/kprobetrace.txt4
-rw-r--r--Documentation/usb/WUSB-Design-overview.txt2
-rw-r--r--Documentation/usb/bulk-streams.txt78
-rw-r--r--Documentation/usb/dma.txt22
-rw-r--r--Documentation/usb/gadget_hid.txt445
-rw-r--r--Documentation/usb/power-management.txt19
-rw-r--r--Documentation/usb/usb-serial.txt29
-rw-r--r--Documentation/video4linux/CARDLIST.bttv2
-rw-r--r--Documentation/video4linux/CARDLIST.cx881
-rw-r--r--Documentation/video4linux/CARDLIST.em28xx4
-rw-r--r--Documentation/video4linux/CARDLIST.saa71343
-rw-r--r--Documentation/video4linux/extract_xc3028.pl817
-rw-r--r--Documentation/video4linux/gspca.txt5
-rw-r--r--Documentation/video4linux/sh_mobile_ceu_camera.txt80
-rw-r--r--Documentation/video4linux/v4l2-framework.txt143
-rw-r--r--Documentation/vm/map_hugetlb.c2
-rw-r--r--Documentation/vm/numa186
-rw-r--r--Documentation/vm/numa_memory_policy.txt4
-rw-r--r--Documentation/w1/w1.generic2
-rw-r--r--Documentation/watchdog/00-INDEX5
-rw-r--r--Documentation/watchdog/watchdog-parameters.txt390
-rw-r--r--Documentation/watchdog/wdt.txt15
-rw-r--r--MAINTAINERS196
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig20
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/include/asm/atomic.h4
-rw-r--r--arch/alpha/include/asm/bitops.h38
-rw-r--r--arch/alpha/include/asm/scatterlist.h19
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/alpha/kernel/pci-sysfs.c8
-rw-r--r--arch/alpha/kernel/time.c170
-rw-r--r--arch/alpha/mm/fault.c11
-rw-r--r--arch/arm/Kconfig217
-rw-r--r--arch/arm/Makefile15
-rw-r--r--arch/arm/boot/bootp/bootp.lds2
-rw-r--r--arch/arm/boot/compressed/Makefile1
-rw-r--r--arch/arm/boot/compressed/decompress.c4
-rw-r--r--arch/arm/boot/compressed/piggy.lzma.S6
-rw-r--r--arch/arm/common/Kconfig5
-rw-r--r--arch/arm/common/Makefile4
-rw-r--r--arch/arm/common/clkdev.c7
-rw-r--r--arch/arm/common/icst.c100
-rw-r--r--arch/arm/common/icst307.c161
-rw-r--r--arch/arm/common/icst525.c160
-rw-r--r--arch/arm/common/pl330.c1966
-rw-r--r--arch/arm/common/vic.c107
-rw-r--r--arch/arm/configs/am3517_evm_defconfig144
-rw-r--r--arch/arm/configs/ams_delta_defconfig10
-rw-r--r--arch/arm/configs/cns3420vb_defconfig831
-rw-r--r--arch/arm/configs/devkit8000_defconfig157
-rw-r--r--arch/arm/configs/mmp2_defconfig75
-rw-r--r--arch/arm/configs/mx51_defconfig17
-rw-r--r--arch/arm/configs/omap3_defconfig151
-rw-r--r--arch/arm/configs/omap3_evm_defconfig51
-rw-r--r--arch/arm/configs/omap3_stalker_lks_defconfig1691
-rw-r--r--arch/arm/configs/omap_4430sdp_defconfig448
-rw-r--r--arch/arm/configs/rx51_defconfig39
-rw-r--r--arch/arm/configs/s3c2410_defconfig719
-rw-r--r--arch/arm/configs/s3c6400_defconfig637
-rw-r--r--arch/arm/configs/s5p6440_defconfig87
-rw-r--r--arch/arm/configs/s5p6442_defconfig66
-rw-r--r--arch/arm/configs/s5pc100_defconfig235
-rw-r--r--arch/arm/configs/s5pc110_defconfig52
-rw-r--r--arch/arm/configs/s5pv210_defconfig55
-rw-r--r--arch/arm/configs/spear300_defconfig773
-rw-r--r--arch/arm/configs/spear310_defconfig775
-rw-r--r--arch/arm/configs/spear320_defconfig775
-rw-r--r--arch/arm/configs/spear600_defconfig760
-rw-r--r--arch/arm/configs/stamp9g20_defconfig1456
-rw-r--r--arch/arm/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h4
-rw-r--r--arch/arm/include/asm/hardirq.h4
-rw-r--r--arch/arm/include/asm/hardware/arm_timer.h39
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h3
-rw-r--r--arch/arm/include/asm/hardware/icst.h59
-rw-r--r--arch/arm/include/asm/hardware/icst307.h38
-rw-r--r--arch/arm/include/asm/hardware/icst525.h36
-rw-r--r--arch/arm/include/asm/hardware/pl330.h217
-rw-r--r--arch/arm/include/asm/hardware/sp810.h59
-rw-r--r--arch/arm/include/asm/ioctls.h3
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h11
-rw-r--r--arch/arm/include/asm/mach/time.h2
-rw-r--r--arch/arm/include/asm/pci.h15
-rw-r--r--arch/arm/include/asm/perf_event.h17
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/pmu.h32
-rw-r--r--arch/arm/include/asm/scatterlist.h21
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/smp_twd.h17
-rw-r--r--arch/arm/include/asm/system.h2
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h29
-rw-r--r--arch/arm/kernel/bios32.c3
-rw-r--r--arch/arm/kernel/dma.c36
-rw-r--r--arch/arm/kernel/kgdb.c5
-rw-r--r--arch/arm/kernel/perf_event.c928
-rw-r--r--arch/arm/kernel/pmu.c127
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/smp_twd.c17
-rw-r--r--arch/arm/kernel/time.c70
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/lib/clear_user.S1
-rw-r--r--arch/arm/lib/copy_to_user.S1
-rw-r--r--arch/arm/mach-at91/Kconfig24
-rw-r--r--arch/arm/mach-at91/Makefile2
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c1
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c315
-rw-r--r--arch/arm/mach-at91/include/mach/board.h8
-rw-r--r--arch/arm/mach-at91/include/mach/cpu.h4
-rw-r--r--arch/arm/mach-at91/include/mach/system.h7
-rw-r--r--arch/arm/mach-bcmring/arch.c16
-rw-r--r--arch/arm/mach-clps711x/Makefile.boot3
-rw-r--r--arch/arm/mach-clps711x/mm.c1
-rw-r--r--arch/arm/mach-cns3xxx/Kconfig12
-rw-r--r--arch/arm/mach-cns3xxx/Makefile2
-rw-r--r--arch/arm/mach-cns3xxx/Makefile.boot3
-rw-r--r--arch/arm/mach-cns3xxx/cns3420vb.c148
-rw-r--r--arch/arm/mach-cns3xxx/core.c249
-rw-r--r--arch/arm/mach-cns3xxx/core.h23
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/cns3xxx.h635
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/entry-macro.S82
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/hardware.h22
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/io.h17
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/irqs.h24
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/memory.h26
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/system.h29
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/timex.h12
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/uncompress.h55
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/vmalloc.h11
-rw-r--r--arch/arm/mach-cns3xxx/pm.c86
-rw-r--r--arch/arm/mach-davinci/Kconfig2
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c52
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c47
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c18
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c18
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c16
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c60
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c22
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c54
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c20
-rw-r--r--arch/arm/mach-davinci/cdce949.c1
-rw-r--r--arch/arm/mach-davinci/clock.c32
-rw-r--r--arch/arm/mach-davinci/clock.h9
-rw-r--r--arch/arm/mach-davinci/common.c57
-rw-r--r--arch/arm/mach-davinci/cp_intc.c22
-rw-r--r--arch/arm/mach-davinci/da830.c31
-rw-r--r--arch/arm/mach-davinci/da850.c30
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c15
-rw-r--r--arch/arm/mach-davinci/devices.c50
-rw-r--r--arch/arm/mach-davinci/dm355.c21
-rw-r--r--arch/arm/mach-davinci/dm365.c34
-rw-r--r--arch/arm/mach-davinci/dm644x.c21
-rw-r--r--arch/arm/mach-davinci/dm646x.c53
-rw-r--r--arch/arm/mach-davinci/dma.c234
-rw-r--r--arch/arm/mach-davinci/gpio.c160
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h32
-rw-r--r--arch/arm/mach-davinci/include/mach/cp_intc.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/cputype.h8
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h29
-rw-r--r--arch/arm/mach-davinci/include/mach/dm355.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/dm365.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/dm644x.h6
-rw-r--r--arch/arm/mach-davinci/include/mach/dm646x.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/gpio.h72
-rw-r--r--arch/arm/mach-davinci/include/mach/irqs.h97
-rw-r--r--arch/arm/mach-davinci/include/mach/mmc.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/mux.h290
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h55
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/system.h5
-rw-r--r--arch/arm/mach-davinci/io.c20
-rw-r--r--arch/arm/mach-davinci/irq.c7
-rw-r--r--arch/arm/mach-davinci/mux.c19
-rw-r--r--arch/arm/mach-davinci/mux.h2
-rw-r--r--arch/arm/mach-davinci/psc.c10
-rw-r--r--arch/arm/mach-davinci/serial.c34
-rw-r--r--arch/arm/mach-davinci/time.c37
-rw-r--r--arch/arm/mach-ep93xx/adssphere.c2
-rw-r--r--arch/arm/mach-ep93xx/clock.c13
-rw-r--r--arch/arm/mach-ep93xx/core.c123
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c4
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h27
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h12
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ts72xx.h19
-rw-r--r--arch/arm/mach-ep93xx/micro9.c2
-rw-r--r--arch/arm/mach-ep93xx/simone.c6
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c6
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c190
-rw-r--r--arch/arm/mach-footbridge/ebsa285-pci.c6
-rw-r--r--arch/arm/mach-h720x/common.h4
-rw-r--r--arch/arm/mach-integrator/Kconfig1
-rw-r--r--arch/arm/mach-integrator/Makefile2
-rw-r--r--arch/arm/mach-integrator/common.h2
-rw-r--r--arch/arm/mach-integrator/core.c127
-rw-r--r--arch/arm/mach-integrator/cpu.c53
-rw-r--r--arch/arm/mach-integrator/impd1.c43
-rw-r--r--arch/arm/mach-integrator/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-integrator/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-integrator/include/mach/hardware.h17
-rw-r--r--arch/arm/mach-integrator/include/mach/platform.h54
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c166
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c88
-rw-r--r--arch/arm/mach-integrator/leds.c1
-rw-r--r--arch/arm/mach-integrator/pci_v3.c7
-rw-r--r--arch/arm/mach-iop32x/n2100.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c1
-rw-r--r--arch/arm/mach-kirkwood/Kconfig18
-rw-r--r--arch/arm/mach-kirkwood/Makefile3
-rw-r--r--arch/arm/mach-kirkwood/common.c9
-rw-r--r--arch/arm/mach-kirkwood/common.h2
-rw-r--r--arch/arm/mach-kirkwood/guruplug-setup.c131
-rw-r--r--arch/arm/mach-kirkwood/netxbig_v2-setup.c415
-rw-r--r--arch/arm/mach-mmp/aspenite.c13
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/irqs.h10
-rw-r--r--arch/arm/mach-mmp/include/mach/mfp-mmp2.h187
-rw-r--r--arch/arm/mach-mmp/include/mach/mmp2.h14
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa168.h21
-rw-r--r--arch/arm/mach-mmp/include/mach/regs-apbc.h10
-rw-r--r--arch/arm/mach-mmp/include/mach/regs-smc.h37
-rw-r--r--arch/arm/mach-mmp/include/mach/timex.h4
-rw-r--r--arch/arm/mach-mmp/jasper.c64
-rw-r--r--arch/arm/mach-mmp/mmp2.c52
-rw-r--r--arch/arm/mach-mmp/pxa168.c15
-rw-r--r--arch/arm/mach-msm/Kconfig90
-rw-r--r--arch/arm/mach-msm/Makefile23
-rw-r--r--arch/arm/mach-msm/acpuclock-arm11.c526
-rw-r--r--arch/arm/mach-msm/acpuclock.h32
-rw-r--r--arch/arm/mach-msm/board-halibut.c17
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c87
-rw-r--r--arch/arm/mach-msm/board-msm7x27.c178
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c119
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c93
-rw-r--r--arch/arm/mach-msm/board-sapphire.c118
-rw-r--r--arch/arm/mach-msm/board-trout.c (renamed from arch/arm/mach-msm/board-dream.c)8
-rw-r--r--arch/arm/mach-msm/board-trout.h (renamed from arch/arm/mach-msm/board-dream.h)0
-rw-r--r--arch/arm/mach-msm/clock-7x01a.c126
-rw-r--r--arch/arm/mach-msm/clock-7x30.h168
-rw-r--r--arch/arm/mach-msm/clock-pcom.c131
-rw-r--r--arch/arm/mach-msm/clock-pcom.h153
-rw-r--r--arch/arm/mach-msm/clock.c258
-rw-r--r--arch/arm/mach-msm/clock.h71
-rw-r--r--arch/arm/mach-msm/devices-msm7x00.c (renamed from arch/arm/mach-msm/devices.c)125
-rw-r--r--arch/arm/mach-msm/devices-msm7x30.c128
-rw-r--r--arch/arm/mach-msm/devices-qsd8x50.c92
-rw-r--r--arch/arm/mach-msm/devices.h11
-rw-r--r--arch/arm/mach-msm/dma.c28
-rw-r--r--arch/arm/mach-msm/gpio.c85
-rw-r--r--arch/arm/mach-msm/include/mach/board.h14
-rw-r--r--arch/arm/mach-msm/include/mach/clk.h57
-rw-r--r--arch/arm/mach-msm/include/mach/dma.h34
-rw-r--r--arch/arm/mach-msm/include/mach/gpio.h142
-rw-r--r--arch/arm/mach-msm/include/mach/io.h5
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-7x00.h75
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-7x30.h170
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8x50.h105
-rw-r--r--arch/arm/mach-msm/include/mach/irqs.h81
-rw-r--r--arch/arm/mach-msm/include/mach/memory.h8
-rw-r--r--arch/arm/mach-msm/include/mach/msm_fb.h147
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x00.h139
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h122
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x50.h147
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h95
-rw-r--r--arch/arm/mach-msm/include/mach/msm_smd.h109
-rw-r--r--arch/arm/mach-msm/include/mach/sirc.h115
-rw-r--r--arch/arm/mach-msm/include/mach/system.h5
-rw-r--r--arch/arm/mach-msm/include/mach/vreg.h2
-rw-r--r--arch/arm/mach-msm/io.c75
-rw-r--r--arch/arm/mach-msm/irq-vic.c365
-rw-r--r--arch/arm/mach-msm/irq.c4
-rw-r--r--arch/arm/mach-msm/last_radio_log.c82
-rw-r--r--arch/arm/mach-msm/proc_comm.c26
-rw-r--r--arch/arm/mach-msm/proc_comm.h105
-rw-r--r--arch/arm/mach-msm/sirc.c177
-rw-r--r--arch/arm/mach-msm/smd.c (renamed from drivers/staging/dream/smd/smd.c)862
-rw-r--r--arch/arm/mach-msm/smd_debug.c315
-rw-r--r--arch/arm/mach-msm/smd_private.h403
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-msm/vreg.c154
-rw-r--r--arch/arm/mach-mx2/devices.c13
-rw-r--r--arch/arm/mach-mx2/mach-pca100.c1
-rw-r--r--arch/arm/mach-mx2/mach-pcm038.c1
-rw-r--r--arch/arm/mach-mx2/pcm970-baseboard.c6
-rw-r--r--arch/arm/mach-mx25/devices.c15
-rw-r--r--arch/arm/mach-mx25/devices.h1
-rw-r--r--arch/arm/mach-mx3/Kconfig1
-rw-r--r--arch/arm/mach-mx3/devices.c40
-rw-r--r--arch/arm/mach-mx3/devices.h2
-rw-r--r--arch/arm/mach-mx3/mach-mx31_3ds.c87
-rw-r--r--arch/arm/mach-mx3/mach-mx31lilly.c145
-rw-r--r--arch/arm/mach-mx3/mach-mx31moboard.c149
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c7
-rw-r--r--arch/arm/mach-mx3/mach-pcm043.c1
-rw-r--r--arch/arm/mach-mx3/mx31lite-db.c1
-rw-r--r--arch/arm/mach-mx3/mx31moboard-devboard.c9
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c8
-rw-r--r--arch/arm/mach-mx3/mx31moboard-smartbot.c53
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c167
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c45
-rw-r--r--arch/arm/mach-mx5/devices.c109
-rw-r--r--arch/arm/mach-mx5/devices.h4
-rw-r--r--arch/arm/mach-nomadik/Kconfig1
-rw-r--r--arch/arm/mach-nomadik/Makefile2
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c10
-rw-r--r--arch/arm/mach-nomadik/clock.c39
-rw-r--r--arch/arm/mach-nomadik/clock.h1
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c83
-rw-r--r--arch/arm/mach-nomadik/include/mach/gpio.h67
-rw-r--r--arch/arm/mach-omap1/Kconfig10
-rw-r--r--arch/arm/mach-omap1/Makefile1
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S278
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c155
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c18
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c1
-rw-r--r--arch/arm/mach-omap1/clock.c2
-rw-r--r--arch/arm/mach-omap1/clock.h2
-rw-r--r--arch/arm/mach-omap1/include/mach/ams-delta-fiq.h79
-rw-r--r--arch/arm/mach-omap1/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-omap2/Kconfig7
-rw-r--r--arch/arm/mach-omap2/Makefile13
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c21
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c5
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c360
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c166
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c1
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c155
-rw-r--r--arch/arm/mach-omap2/board-ldp.c3
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c101
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c13
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c672
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c3
-rw-r--r--arch/arm/mach-omap2/board-overo.c14
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c120
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c109
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom2.c4
-rw-r--r--arch/arm/mach-omap2/board-zoom3.c4
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_apll.c4
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c17
-rw-r--r--arch/arm/mach-omap2/clkt_clksel.c472
-rw-r--r--arch/arm/mach-omap2/clock.c9
-rw-r--r--arch/arm/mach-omap2/clock.h11
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c52
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c56
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c414
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/clock_common_data.c12
-rw-r--r--arch/arm/mach-omap2/clockdomain.c4
-rw-r--r--arch/arm/mach-omap2/clockdomains44xx.h4
-rw-r--r--arch/arm/mach-omap2/cm-regbits-24xx.h236
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h222
-rw-r--r--arch/arm/mach-omap2/cm.c3
-rw-r--r--arch/arm/mach-omap2/cm.h16
-rw-r--r--arch/arm/mach-omap2/cm44xx.h301
-rw-r--r--arch/arm/mach-omap2/cm4xxx.c54
-rw-r--r--arch/arm/mach-omap2/control.c7
-rw-r--r--arch/arm/mach-omap2/devices.c129
-rw-r--r--arch/arm/mach-omap2/hsmmc.c120
-rw-r--r--arch/arm/mach-omap2/include/mach/am35xx.h20
-rw-r--r--arch/arm/mach-omap2/include/mach/debug-macro.S15
-rw-r--r--arch/arm/mach-omap2/include/mach/omap4-common.h26
-rw-r--r--arch/arm/mach-omap2/io.c9
-rw-r--r--arch/arm/mach-omap2/iommu2.c6
-rw-r--r--arch/arm/mach-omap2/mcbsp.c12
-rw-r--r--arch/arm/mach-omap2/mux34xx.c86
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c157
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mach-omap2/omap3-iommu.c105
-rw-r--r--arch/arm/mach-omap2/omap4-common.c72
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c122
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm-debug.c3
-rw-r--r--arch/arm/mach-omap2/pm.h1
-rw-r--r--arch/arm/mach-omap2/pm24xx.c130
-rw-r--r--arch/arm/mach-omap2/pm34xx.c259
-rw-r--r--arch/arm/mach-omap2/powerdomain.c56
-rw-r--r--arch/arm/mach-omap2/powerdomains44xx.h23
-rw-r--r--arch/arm/mach-omap2/prcm-common.h164
-rw-r--r--arch/arm/mach-omap2/prcm.c4
-rw-r--r--arch/arm/mach-omap2/prm-regbits-24xx.h120
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h360
-rw-r--r--arch/arm/mach-omap2/prm.h22
-rw-r--r--arch/arm/mach-omap2/prm44xx.h389
-rw-r--r--arch/arm/mach-omap2/usb-ehci.c155
-rw-r--r--arch/arm/mach-omap2/usb-musb.c1
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c23
-rw-r--r--arch/arm/mach-pxa/Kconfig57
-rw-r--r--arch/arm/mach-pxa/Makefile4
-rw-r--r--arch/arm/mach-pxa/cm-x300.c104
-rw-r--r--arch/arm/mach-pxa/colibri-pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/corgi.c56
-rw-r--r--arch/arm/mach-pxa/corgi_lcd.c288
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c77
-rw-r--r--arch/arm/mach-pxa/corgi_ssp.c274
-rw-r--r--arch/arm/mach-pxa/csb726.c11
-rw-r--r--arch/arm/mach-pxa/em-x270.c2
-rw-r--r--arch/arm/mach-pxa/generic.c31
-rw-r--r--arch/arm/mach-pxa/icontrol.c9
-rw-r--r--arch/arm/mach-pxa/include/mach/corgi.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/mmc.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pxa2xx-gpio.h375
-rw-r--r--arch/arm/mach-pxa/include/mach/ssp.h109
-rw-r--r--arch/arm/mach-pxa/include/mach/tosa.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/vpac270.h42
-rw-r--r--arch/arm/mach-pxa/include/mach/z2.h41
-rw-r--r--arch/arm/mach-pxa/littleton.c3
-rw-r--r--arch/arm/mach-pxa/lubbock.c2
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c43
-rw-r--r--arch/arm/mach-pxa/mioa701.c2
-rw-r--r--arch/arm/mach-pxa/mxm8x10.c2
-rw-r--r--arch/arm/mach-pxa/palmld.c2
-rw-r--r--arch/arm/mach-pxa/palmt5.c2
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-pxa/palmtx.c2
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-pxa/poodle.c5
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c1
-rw-r--r--arch/arm/mach-pxa/raumfeld.c2
-rw-r--r--arch/arm/mach-pxa/sharpsl.h23
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c1
-rw-r--r--arch/arm/mach-pxa/spitz.c6
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c28
-rw-r--r--arch/arm/mach-pxa/ssp.c510
-rw-r--r--arch/arm/mach-pxa/stargate2.c5
-rw-r--r--arch/arm/mach-pxa/tosa.c2
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c615
-rw-r--r--arch/arm/mach-pxa/z2.c609
-rw-r--r--arch/arm/mach-pxa/zeus.c6
-rw-r--r--arch/arm/mach-pxa/zylonite.c6
-rw-r--r--arch/arm/mach-realview/Makefile2
-rw-r--r--arch/arm/mach-realview/clock.c64
-rw-r--r--arch/arm/mach-realview/clock.h19
-rw-r--r--arch/arm/mach-realview/core.c194
-rw-r--r--arch/arm/mach-realview/hotplug.c2
-rw-r--r--arch/arm/mach-realview/include/mach/clkdev.h9
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-pb1176.h1
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-pba8.h8
-rw-r--r--arch/arm/mach-realview/include/mach/irqs-pbx.h14
-rw-r--r--arch/arm/mach-realview/include/mach/platform.h20
-rw-r--r--arch/arm/mach-realview/realview_eb.c34
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c17
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c34
-rw-r--r--arch/arm/mach-realview/realview_pba8.c17
-rw-r--r--arch/arm/mach-realview/realview_pbx.c32
-rw-r--r--arch/arm/mach-s3c2410/Kconfig10
-rw-r--r--arch/arm/mach-s3c2410/Makefile.boot10
-rw-r--r--arch/arm/mach-s3c2410/h1940-bluetooth.c32
-rw-r--r--arch/arm/mach-s3c2410/include/mach/dma.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio-fns.h47
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio-nrs.h37
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio-track.h4
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio.h8
-rw-r--r--arch/arm/mach-s3c2410/include/mach/irqs.h28
-rw-r--r--arch/arm/mach-s3c2410/include/mach/map.h9
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-clock.h2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-dsc.h36
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-gpio.h67
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-gpioj.h36
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-irq.h10
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h30
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h24
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h3
-rw-r--r--arch/arm/mach-s3c2410/include/mach/uncompress.h4
-rw-r--r--arch/arm/mach-s3c2410/mach-amlm5900.c5
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c11
-rw-r--r--arch/arm/mach-s3c2410/mach-h1940.c32
-rw-r--r--arch/arm/mach-s3c2410/mach-n30.c83
-rw-r--r--arch/arm/mach-s3c2410/mach-qt2410.c10
-rw-r--r--arch/arm/mach-s3c2410/mach-vr1000.c5
-rw-r--r--arch/arm/mach-s3c2410/pm.c15
-rw-r--r--arch/arm/mach-s3c2410/s3c2410.c8
-rw-r--r--arch/arm/mach-s3c2412/Kconfig3
-rw-r--r--arch/arm/mach-s3c2412/dma.c3
-rw-r--r--arch/arm/mach-s3c2412/gpio.c20
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c28
-rw-r--r--arch/arm/mach-s3c2412/mach-smdk2413.c14
-rw-r--r--arch/arm/mach-s3c2416/Kconfig39
-rw-r--r--arch/arm/mach-s3c2416/Makefile19
-rw-r--r--arch/arm/mach-s3c2416/clock.c135
-rw-r--r--arch/arm/mach-s3c2416/irq.c254
-rw-r--r--arch/arm/mach-s3c2416/mach-smdk2416.c206
-rw-r--r--arch/arm/mach-s3c2416/s3c2416.c130
-rw-r--r--arch/arm/mach-s3c2440/Kconfig14
-rw-r--r--arch/arm/mach-s3c2440/Makefile1
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c76
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c23
-rw-r--r--arch/arm/mach-s3c2440/mach-nexcoder.c9
-rw-r--r--arch/arm/mach-s3c2440/mach-osiris.c5
-rw-r--r--arch/arm/mach-s3c2440/mach-rx1950.c582
-rw-r--r--arch/arm/mach-s3c2440/mach-rx3715.c2
-rw-r--r--arch/arm/mach-s3c2440/mach-smdk2440.c2
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c8
-rw-r--r--arch/arm/mach-s3c2443/Kconfig1
-rw-r--r--arch/arm/mach-s3c2443/clock.c481
-rw-r--r--arch/arm/mach-s3c2443/mach-smdk2443.c2
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig41
-rw-r--r--arch/arm/mach-s3c64xx/Makefile7
-rw-r--r--arch/arm/mach-s3c64xx/clock.c42
-rw-r--r--arch/arm/mach-s3c64xx/dev-onenand1.c55
-rw-r--r--arch/arm/mach-s3c64xx/dma.c2
-rw-r--r--arch/arm/mach-s3c64xx/gpiolib.c6
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/map.h17
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/pll.h35
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/regs-clock.h1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq.c363
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq.h20
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c185
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c201
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6400.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c15
-rw-r--r--arch/arm/mach-s3c64xx/pm.c20
-rw-r--r--arch/arm/mach-s3c64xx/s3c6400.c4
-rw-r--r--arch/arm/mach-s3c64xx/s3c6410.c5
-rw-r--r--arch/arm/mach-s5p6440/Kconfig5
-rw-r--r--arch/arm/mach-s5p6440/Makefile7
-rw-r--r--arch/arm/mach-s5p6440/clock.c350
-rw-r--r--arch/arm/mach-s5p6440/cpu.c3
-rw-r--r--arch/arm/mach-s5p6440/dev-audio.c127
-rw-r--r--arch/arm/mach-s5p6440/dev-spi.c176
-rw-r--r--arch/arm/mach-s5p6440/dma.c105
-rw-r--r--arch/arm/mach-s5p6440/gpio.c20
-rw-r--r--arch/arm/mach-s5p6440/include/mach/dma.h26
-rw-r--r--arch/arm/mach-s5p6440/include/mach/irqs.h9
-rw-r--r--arch/arm/mach-s5p6440/include/mach/map.h16
-rw-r--r--arch/arm/mach-s5p6440/include/mach/pwm-clock.h24
-rw-r--r--arch/arm/mach-s5p6440/include/mach/spi-clocks.h17
-rw-r--r--arch/arm/mach-s5p6440/mach-smdk6440.c14
-rw-r--r--arch/arm/mach-s5p6440/setup-i2c0.c (renamed from arch/arm/plat-s5p/setup-i2c0.c)2
-rw-r--r--arch/arm/mach-s5p6442/Kconfig1
-rw-r--r--arch/arm/mach-s5p6442/Makefile7
-rw-r--r--arch/arm/mach-s5p6442/cpu.c2
-rw-r--r--arch/arm/mach-s5p6442/dev-audio.c197
-rw-r--r--arch/arm/mach-s5p6442/dev-spi.c123
-rw-r--r--arch/arm/mach-s5p6442/dma.c105
-rw-r--r--arch/arm/mach-s5p6442/include/mach/dma.h26
-rw-r--r--arch/arm/mach-s5p6442/include/mach/irqs.h5
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h13
-rw-r--r--arch/arm/mach-s5p6442/include/mach/pwm-clock.h21
-rw-r--r--arch/arm/mach-s5p6442/include/mach/spi-clocks.h17
-rw-r--r--arch/arm/mach-s5p6442/mach-smdk6442.c1
-rw-r--r--arch/arm/mach-s5p6442/setup-i2c0.c25
-rw-r--r--arch/arm/mach-s5pc100/Kconfig37
-rw-r--r--arch/arm/mach-s5pc100/Makefile17
-rw-r--r--arch/arm/mach-s5pc100/clock.c1358
-rw-r--r--arch/arm/mach-s5pc100/cpu.c60
-rw-r--r--arch/arm/mach-s5pc100/dev-audio.c287
-rw-r--r--arch/arm/mach-s5pc100/dev-spi.c233
-rw-r--r--arch/arm/mach-s5pc100/dma.c167
-rw-r--r--arch/arm/mach-s5pc100/gpiolib.c (renamed from arch/arm/plat-s5pc1xx/gpiolib.c)125
-rw-r--r--arch/arm/mach-s5pc100/include/mach/debug-macro.S6
-rw-r--r--arch/arm/mach-s5pc100/include/mach/dma.h26
-rw-r--r--arch/arm/mach-s5pc100/include/mach/entry-macro.S8
-rw-r--r--arch/arm/mach-s5pc100/include/mach/gpio.h102
-rw-r--r--arch/arm/mach-s5pc100/include/mach/irqs.h99
-rw-r--r--arch/arm/mach-s5pc100/include/mach/map.h124
-rw-r--r--arch/arm/mach-s5pc100/include/mach/regs-clock.h77
-rw-r--r--arch/arm/mach-s5pc100/include/mach/regs-gpio.h (renamed from arch/arm/plat-s5pc1xx/include/plat/regs-gpio.h)45
-rw-r--r--arch/arm/mach-s5pc100/include/mach/regs-irq.h7
-rw-r--r--arch/arm/mach-s5pc100/include/mach/spi-clocks.h18
-rw-r--r--arch/arm/mach-s5pc100/include/mach/system.h9
-rw-r--r--arch/arm/mach-s5pc100/include/mach/tick.h4
-rw-r--r--arch/arm/mach-s5pc100/init.c (renamed from arch/arm/plat-s5pc1xx/s5pc100-init.c)7
-rw-r--r--arch/arm/mach-s5pc100/irq-gpio.c (renamed from arch/arm/plat-s5pc1xx/irq-gpio.c)78
-rw-r--r--arch/arm/mach-s5pc100/mach-smdkc100.c57
-rw-r--r--arch/arm/mach-s5pc100/setup-fb-24bpp.c (renamed from arch/arm/plat-s5pc1xx/setup-fb-24bpp.c)5
-rw-r--r--arch/arm/mach-s5pc100/setup-i2c0.c (renamed from arch/arm/plat-s5pc1xx/setup-i2c0.c)4
-rw-r--r--arch/arm/mach-s5pc100/setup-i2c1.c (renamed from arch/arm/plat-s5pc1xx/setup-i2c1.c)4
-rw-r--r--arch/arm/mach-s5pc100/setup-sdhci-gpio.c (renamed from arch/arm/plat-s5pc1xx/setup-sdhci-gpio.c)4
-rw-r--r--arch/arm/mach-s5pv210/Kconfig63
-rw-r--r--arch/arm/mach-s5pv210/Makefile17
-rw-r--r--arch/arm/mach-s5pv210/clock.c833
-rw-r--r--arch/arm/mach-s5pv210/cpu.c18
-rw-r--r--arch/arm/mach-s5pv210/dev-audio.c327
-rw-r--r--arch/arm/mach-s5pv210/dev-onenand.c50
-rw-r--r--arch/arm/mach-s5pv210/dev-spi.c178
-rw-r--r--arch/arm/mach-s5pv210/dma.c168
-rw-r--r--arch/arm/mach-s5pv210/gpiolib.c261
-rw-r--r--arch/arm/mach-s5pv210/include/mach/dma.h26
-rw-r--r--arch/arm/mach-s5pv210/include/mach/gpio.h18
-rw-r--r--arch/arm/mach-s5pv210/include/mach/irqs.h28
-rw-r--r--arch/arm/mach-s5pv210/include/mach/map.h43
-rw-r--r--arch/arm/mach-s5pv210/include/mach/pwm-clock.h21
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-clock.h1
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-fb.h21
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-gpio.h48
-rw-r--r--arch/arm/mach-s5pv210/include/mach/spi-clocks.h17
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c149
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c98
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkc110.c3
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c14
-rw-r--r--arch/arm/mach-s5pv210/setup-fb-24bpp.c62
-rw-r--r--arch/arm/mach-s5pv210/setup-i2c0.c30
-rw-r--r--arch/arm/mach-s5pv210/setup-i2c1.c30
-rw-r--r--arch/arm/mach-s5pv210/setup-i2c2.c30
-rw-r--r--arch/arm/mach-s5pv210/setup-sdhci-gpio.c104
-rw-r--r--arch/arm/mach-s5pv210/setup-sdhci.c63
-rw-r--r--arch/arm/mach-sa1100/leds.c8
-rw-r--r--arch/arm/mach-shark/pci.c11
-rw-r--r--arch/arm/mach-shmobile/Kconfig3
-rw-r--r--arch/arm/mach-spear3xx/Kconfig33
-rw-r--r--arch/arm/mach-spear3xx/Kconfig30017
-rw-r--r--arch/arm/mach-spear3xx/Kconfig31017
-rw-r--r--arch/arm/mach-spear3xx/Kconfig32017
-rw-r--r--arch/arm/mach-spear3xx/Makefile26
-rw-r--r--arch/arm/mach-spear3xx/Makefile.boot3
-rw-r--r--arch/arm/mach-spear3xx/clock.c389
-rw-r--r--arch/arm/mach-spear3xx/include/mach/clkdev.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/debug-macro.S14
-rw-r--r--arch/arm/mach-spear3xx/include/mach/entry-macro.S46
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h205
-rw-r--r--arch/arm/mach-spear3xx/include/mach/gpio.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/hardware.h20
-rw-r--r--arch/arm/mach-spear3xx/include/mach/io.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h152
-rw-r--r--arch/arm/mach-spear3xx/include/mach/memory.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h163
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear.h144
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear300.h83
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear310.h70
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear320.h96
-rw-r--r--arch/arm/mach-spear3xx/include/mach/system.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/timex.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-spear3xx/include/mach/vmalloc.h19
-rw-r--r--arch/arm/mach-spear3xx/spear300.c468
-rw-r--r--arch/arm/mach-spear3xx/spear300_evb.c77
-rw-r--r--arch/arm/mach-spear3xx/spear310.c302
-rw-r--r--arch/arm/mach-spear3xx/spear310_evb.c84
-rw-r--r--arch/arm/mach-spear3xx/spear320.c549
-rw-r--r--arch/arm/mach-spear3xx/spear320_evb.c81
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c548
-rw-r--r--arch/arm/mach-spear6xx/Kconfig20
-rw-r--r--arch/arm/mach-spear6xx/Kconfig60017
-rw-r--r--arch/arm/mach-spear6xx/Makefile12
-rw-r--r--arch/arm/mach-spear6xx/Makefile.boot3
-rw-r--r--arch/arm/mach-spear6xx/clock.c483
-rw-r--r--arch/arm/mach-spear6xx/include/mach/clkdev.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/debug-macro.S14
-rw-r--r--arch/arm/mach-spear6xx/include/mach/entry-macro.S55
-rw-r--r--arch/arm/mach-spear6xx/include/mach/generic.h45
-rw-r--r--arch/arm/mach-spear6xx/include/mach/gpio.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/hardware.h21
-rw-r--r--arch/arm/mach-spear6xx/include/mach/io.h20
-rw-r--r--arch/arm/mach-spear6xx/include/mach/irqs.h97
-rw-r--r--arch/arm/mach-spear6xx/include/mach/memory.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h173
-rw-r--r--arch/arm/mach-spear6xx/include/mach/spear.h173
-rw-r--r--arch/arm/mach-spear6xx/include/mach/spear600.h21
-rw-r--r--arch/arm/mach-spear6xx/include/mach/system.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/timex.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-spear6xx/include/mach/vmalloc.h19
-rw-r--r--arch/arm/mach-spear6xx/spear600.c25
-rw-r--r--arch/arm/mach-spear6xx/spear600_evb.c51
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c157
-rw-r--r--arch/arm/mach-u300/i2c.c57
-rw-r--r--arch/arm/mach-u300/include/mach/coh901318.h21
-rw-r--r--arch/arm/mach-u300/include/mach/irqs.h7
-rw-r--r--arch/arm/mach-u300/mmc.c3
-rw-r--r--arch/arm/mach-ux500/Kconfig41
-rw-r--r--arch/arm/mach-ux500/Makefile6
-rw-r--r--arch/arm/mach-ux500/board-mop500.c136
-rw-r--r--arch/arm/mach-ux500/board-u5500.c41
-rw-r--r--arch/arm/mach-ux500/clock.c501
-rw-r--r--arch/arm/mach-ux500/clock.h125
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c50
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c (renamed from arch/arm/mach-ux500/cpu-u8500.c)59
-rw-r--r--arch/arm/mach-ux500/cpu.c99
-rw-r--r--arch/arm/mach-ux500/devices-db5500.c46
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c216
-rw-r--r--arch/arm/mach-ux500/devices.c88
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h103
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h147
-rw-r--r--arch/arm/mach-ux500/include/mach/debug-macro.S12
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h32
-rw-r--r--arch/arm/mach-ux500/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-ux500/include/mach/gpio.h50
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h197
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h9
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h24
-rw-r--r--arch/arm/mach-ux500/platsmp.c13
-rw-r--r--arch/arm/mach-ux500/ste-dma40-db8500.h154
-rw-r--r--arch/arm/mach-versatile/Makefile2
-rw-r--r--arch/arm/mach-versatile/clock.c65
-rw-r--r--arch/arm/mach-versatile/clock.h20
-rw-r--r--arch/arm/mach-versatile/core.c194
-rw-r--r--arch/arm/mach-versatile/include/mach/clkdev.h9
-rw-r--r--arch/arm/mach-versatile/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-versatile/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-versatile/include/mach/platform.h26
-rw-r--r--arch/arm/mach-vexpress/Kconfig9
-rw-r--r--arch/arm/mach-vexpress/Makefile8
-rw-r--r--arch/arm/mach-vexpress/Makefile.boot3
-rw-r--r--arch/arm/mach-vexpress/core.h26
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c249
-rw-r--r--arch/arm/mach-vexpress/headsmp.S39
-rw-r--r--arch/arm/mach-vexpress/include/mach/clkdev.h15
-rw-r--r--arch/arm/mach-vexpress/include/mach/ct-ca9x4.h47
-rw-r--r--arch/arm/mach-vexpress/include/mach/debug-macro.S23
-rw-r--r--arch/arm/mach-vexpress/include/mach/entry-macro.S67
-rw-r--r--arch/arm/mach-vexpress/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-vexpress/include/mach/io.h28
-rw-r--r--arch/arm/mach-vexpress/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-vexpress/include/mach/memory.h25
-rw-r--r--arch/arm/mach-vexpress/include/mach/motherboard.h121
-rw-r--r--arch/arm/mach-vexpress/include/mach/smp.h21
-rw-r--r--arch/arm/mach-vexpress/include/mach/system.h37
-rw-r--r--arch/arm/mach-vexpress/include/mach/timex.h23
-rw-r--r--arch/arm/mach-vexpress/include/mach/uncompress.h52
-rw-r--r--arch/arm/mach-vexpress/include/mach/vmalloc.h21
-rw-r--r--arch/arm/mach-vexpress/localtimer.c26
-rw-r--r--arch/arm/mach-vexpress/platsmp.c190
-rw-r--r--arch/arm/mach-vexpress/v2m.c361
-rw-r--r--arch/arm/mach-w90x900/dev.c28
-rw-r--r--arch/arm/mach-w90x900/include/mach/mfp.h24
-rw-r--r--arch/arm/mach-w90x900/mfp.c6
-rw-r--r--arch/arm/mm/Kconfig26
-rw-r--r--arch/arm/mm/abort-ev7.S21
-rw-r--r--arch/arm/mm/alignment.c53
-rw-r--r--arch/arm/mm/cache-l2x0.c39
-rw-r--r--arch/arm/mm/cache-v6.S17
-rw-r--r--arch/arm/mm/cache-v7.S8
-rw-r--r--arch/arm/mm/copypage-fa.c2
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/init.c34
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c44
-rw-r--r--arch/arm/mm/nommu.c13
-rw-r--r--arch/arm/mm/tlb-v7.S8
-rw-r--r--arch/arm/nwfpe/ChangeLog2
-rw-r--r--arch/arm/nwfpe/fpmodule.c13
-rw-r--r--arch/arm/nwfpe/fpsr.h2
-rw-r--r--arch/arm/oprofile/Makefile7
-rw-r--r--arch/arm/oprofile/backtrace.c83
-rw-r--r--arch/arm/oprofile/common.c375
-rw-r--r--arch/arm/oprofile/op_arm_model.h35
-rw-r--r--arch/arm/oprofile/op_counter.h27
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.c162
-rw-r--r--arch/arm/oprofile/op_model_arm11_core.h45
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c306
-rw-r--r--arch/arm/oprofile/op_model_mpcore.h61
-rw-r--r--arch/arm/oprofile/op_model_v6.c78
-rw-r--r--arch/arm/oprofile/op_model_v7.c415
-rw-r--r--arch/arm/oprofile/op_model_v7.h103
-rw-r--r--arch/arm/oprofile/op_model_xscale.c444
-rw-r--r--arch/arm/plat-iop/Makefile2
-rw-r--r--arch/arm/plat-iop/pmu.c40
-rw-r--r--arch/arm/plat-mxc/ehci.c100
-rw-r--r--arch/arm/plat-mxc/gpio.c5
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31moboard.h3
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx3.h17
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx51.h34
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_ehci.h14
-rw-r--r--arch/arm/plat-mxc/time.c46
-rw-r--r--arch/arm/plat-mxc/tzic.c4
-rw-r--r--arch/arm/plat-nomadik/Kconfig5
-rw-r--r--arch/arm/plat-nomadik/Makefile1
-rw-r--r--arch/arm/plat-nomadik/gpio.c (renamed from arch/arm/mach-nomadik/gpio.c)188
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h70
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h239
-rw-r--r--arch/arm/plat-nomadik/timer.c145
-rw-r--r--arch/arm/plat-omap/Kconfig9
-rw-r--r--arch/arm/plat-omap/clock.c25
-rw-r--r--arch/arm/plat-omap/common.c3
-rw-r--r--arch/arm/plat-omap/dma.c45
-rw-r--r--arch/arm/plat-omap/dmtimer.c4
-rw-r--r--arch/arm/plat-omap/gpio.c338
-rw-r--r--arch/arm/plat-omap/i2c.c135
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h14
-rw-r--r--arch/arm/plat-omap/include/plat/common.h3
-rw-r--r--arch/arm/plat-omap/include/plat/control.h20
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h4
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h4
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h6
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h4
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h16
-rw-r--r--arch/arm/plat-omap/include/plat/omap34xx.h5
-rw-r--r--arch/arm/plat-omap/include/plat/omap44xx.h4
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h16
-rw-r--r--arch/arm/plat-omap/include/plat/powerdomain.h7
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h16
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h44
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h24
-rw-r--r--arch/arm/plat-omap/iommu.c101
-rw-r--r--arch/arm/plat-omap/iovmm.c9
-rw-r--r--arch/arm/plat-omap/mcbsp.c89
-rw-r--r--arch/arm/plat-omap/omap_device.c4
-rw-r--r--arch/arm/plat-omap/sram.c16
-rw-r--r--arch/arm/plat-orion/include/plat/orion_nand.h1
-rw-r--r--arch/arm/plat-pxa/Kconfig5
-rw-r--r--arch/arm/plat-pxa/Makefile3
-rw-r--r--arch/arm/plat-pxa/include/plat/mfp.h7
-rw-r--r--arch/arm/plat-pxa/include/plat/ssp.h (renamed from arch/arm/mach-pxa/include/mach/regs-ssp.h)128
-rw-r--r--arch/arm/plat-pxa/mfp.c1
-rw-r--r--arch/arm/plat-pxa/pmu.c33
-rw-r--r--arch/arm/plat-pxa/ssp.c224
-rw-r--r--arch/arm/plat-s3c24xx/Kconfig12
-rw-r--r--arch/arm/plat-s3c24xx/Makefile2
-rw-r--r--arch/arm/plat-s3c24xx/common-smdk.c9
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c21
-rw-r--r--arch/arm/plat-s3c24xx/devs.c48
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-s3c24xx/gpio.c150
-rw-r--r--arch/arm/plat-s3c24xx/gpiolib.c60
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/pll.h25
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/s3c2416.h31
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/s3c2443.h19
-rw-r--r--arch/arm/plat-s3c24xx/pm.c9
-rw-r--r--arch/arm/plat-s3c24xx/s3c2410-clock.c15
-rw-r--r--arch/arm/plat-s3c24xx/s3c2443-clock.c472
-rw-r--r--arch/arm/plat-s3c24xx/setup-i2c.c5
-rw-r--r--arch/arm/plat-s3c24xx/setup-ts.c34
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c16
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c16
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c16
-rw-r--r--arch/arm/plat-s5p/Kconfig9
-rw-r--r--arch/arm/plat-s5p/Makefile3
-rw-r--r--arch/arm/plat-s5p/clock.c16
-rw-r--r--arch/arm/plat-s5p/cpu.c10
-rw-r--r--arch/arm/plat-s5p/include/plat/irqs.h9
-rw-r--r--arch/arm/plat-s5p/include/plat/pll.h22
-rw-r--r--arch/arm/plat-s5p/include/plat/s5p-clock.h4
-rw-r--r--arch/arm/plat-s5p/include/plat/s5pc100.h33
-rw-r--r--arch/arm/plat-s5p/irq-eint.c218
-rw-r--r--arch/arm/plat-s5pc1xx/Kconfig65
-rw-r--r--arch/arm/plat-s5pc1xx/Makefile31
-rw-r--r--arch/arm/plat-s5pc1xx/clock.c709
-rw-r--r--arch/arm/plat-s5pc1xx/cpu.c122
-rw-r--r--arch/arm/plat-s5pc1xx/dev-uart.c145
-rw-r--r--arch/arm/plat-s5pc1xx/gpio-config.c62
-rw-r--r--arch/arm/plat-s5pc1xx/include/plat/gpio-cfg-s5pc1xx.h32
-rw-r--r--arch/arm/plat-s5pc1xx/include/plat/gpio-ext.h44
-rw-r--r--arch/arm/plat-s5pc1xx/include/plat/irqs.h198
-rw-r--r--arch/arm/plat-s5pc1xx/include/plat/pll.h38
-rw-r--r--arch/arm/plat-s5pc1xx/include/plat/regs-clock.h252
-rw-r--r--arch/arm/plat-s5pc1xx/include/plat/regs-power.h84
-rw-r--r--arch/arm/plat-s5pc1xx/include/plat/s5pc100.h64
-rw-r--r--arch/arm/plat-s5pc1xx/irq-eint.c281
-rw-r--r--arch/arm/plat-s5pc1xx/irq.c75
-rw-r--r--arch/arm/plat-s5pc1xx/s5pc100-clock.c876
-rw-r--r--arch/arm/plat-samsung/Kconfig64
-rw-r--r--arch/arm/plat-samsung/Makefile12
-rw-r--r--arch/arm/plat-samsung/adc.c26
-rw-r--r--arch/arm/plat-samsung/clock.c15
-rw-r--r--arch/arm/plat-samsung/dev-adc.c (renamed from arch/arm/mach-s3c64xx/dev-adc.c)8
-rw-r--r--arch/arm/plat-samsung/dev-fb.c1
-rw-r--r--arch/arm/plat-samsung/dev-hwmon.c42
-rw-r--r--arch/arm/plat-samsung/dev-i2c2.c70
-rw-r--r--arch/arm/plat-samsung/dev-onenand.c55
-rw-r--r--arch/arm/plat-samsung/dev-rtc.c (renamed from arch/arm/mach-s3c64xx/dev-rtc.c)28
-rw-r--r--arch/arm/plat-samsung/dev-ts.c61
-rw-r--r--arch/arm/plat-samsung/dev-wdt.c40
-rw-r--r--arch/arm/plat-samsung/gpio-config.c159
-rw-r--r--arch/arm/plat-samsung/gpio.c15
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h36
-rw-r--r--arch/arm/plat-samsung/include/plat/dma.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/fb.h14
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h58
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h64
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-core.h19
-rw-r--r--arch/arm/plat-samsung/include/plat/hwmon.h10
-rw-r--r--arch/arm/plat-samsung/include/plat/iic-core.h7
-rw-r--r--arch/arm/plat-samsung/include/plat/iic.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/onenand-core.h37
-rw-r--r--arch/arm/plat-samsung/include/plat/pll6553x.h51
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-onenand.h63
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-rtc.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h78
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h32
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/sdhci.h55
-rw-r--r--arch/arm/plat-samsung/include/plat/ts.h (renamed from arch/arm/mach-s3c2410/include/mach/ts.h)6
-rw-r--r--arch/arm/plat-samsung/include/plat/wakeup-mask.h44
-rw-r--r--arch/arm/plat-samsung/pm-gpio.c12
-rw-r--r--arch/arm/plat-samsung/s3c-pl330.c1224
-rw-r--r--arch/arm/plat-samsung/wakeup-mask.c47
-rw-r--r--arch/arm/plat-spear/Kconfig31
-rw-r--r--arch/arm/plat-spear/Makefile8
-rw-r--r--arch/arm/plat-spear/clock.c435
-rw-r--r--arch/arm/plat-spear/include/plat/clkdev.h20
-rw-r--r--arch/arm/plat-spear/include/plat/clock.h126
-rw-r--r--arch/arm/plat-spear/include/plat/debug-macro.S38
-rw-r--r--arch/arm/plat-spear/include/plat/gpio.h24
-rw-r--r--arch/arm/plat-spear/include/plat/io.h22
-rw-r--r--arch/arm/plat-spear/include/plat/memory.h20
-rw-r--r--arch/arm/plat-spear/include/plat/padmux.h92
-rw-r--r--arch/arm/plat-spear/include/plat/shirq.h73
-rw-r--r--arch/arm/plat-spear/include/plat/system.h41
-rw-r--r--arch/arm/plat-spear/include/plat/timex.h19
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h43
-rw-r--r--arch/arm/plat-spear/include/plat/vmalloc.h19
-rw-r--r--arch/arm/plat-spear/padmux.c164
-rw-r--r--arch/arm/plat-spear/shirq.c118
-rw-r--r--arch/arm/plat-spear/time.c292
-rw-r--r--arch/arm/plat-versatile/Makefile4
-rw-r--r--arch/arm/plat-versatile/clock.c (renamed from arch/arm/mach-integrator/clock.c)43
-rw-r--r--arch/arm/plat-versatile/include/plat/clock.h15
-rw-r--r--arch/arm/plat-versatile/include/plat/timer-sp.h2
-rw-r--r--arch/arm/plat-versatile/sched-clock.c53
-rw-r--r--arch/arm/plat-versatile/timer-sp.c156
-rw-r--r--arch/avr32/include/asm/atomic.h2
-rw-r--r--arch/avr32/include/asm/scatterlist.h20
-rw-r--r--arch/avr32/include/asm/thread_info.h2
-rw-r--r--arch/avr32/kernel/time.c12
-rw-r--r--arch/blackfin/Kconfig61
-rw-r--r--arch/blackfin/Kconfig.debug11
-rw-r--r--arch/blackfin/include/asm/bfin-global.h6
-rw-r--r--arch/blackfin/include/asm/bug.h7
-rw-r--r--arch/blackfin/include/asm/cache.h2
-rw-r--r--arch/blackfin/include/asm/gpio.h22
-rw-r--r--arch/blackfin/include/asm/pgtable.h3
-rw-r--r--arch/blackfin/include/asm/pseudo_instructions.h18
-rw-r--r--arch/blackfin/include/asm/scatterlist.h22
-rw-r--r--arch/blackfin/include/asm/string.h113
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/include/asm/tlbflush.h1
-rw-r--r--arch/blackfin/include/asm/trace.h7
-rw-r--r--arch/blackfin/kernel/Makefile5
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c131
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c12
-rw-r--r--arch/blackfin/kernel/dumpstack.c174
-rw-r--r--arch/blackfin/kernel/exception.c45
-rw-r--r--arch/blackfin/kernel/kgdb.c7
-rw-r--r--arch/blackfin/kernel/pseudodbg.c191
-rw-r--r--arch/blackfin/kernel/ptrace.c33
-rw-r--r--arch/blackfin/kernel/setup.c4
-rw-r--r--arch/blackfin/kernel/sys_bfin.c23
-rw-r--r--arch/blackfin/kernel/time-ts.c13
-rw-r--r--arch/blackfin/kernel/time.c39
-rw-r--r--arch/blackfin/kernel/trace.c981
-rw-r--r--arch/blackfin/kernel/traps.c900
-rw-r--r--arch/blackfin/lib/memset.S1
-rw-r--r--arch/blackfin/lib/strcmp.S43
-rw-r--r--arch/blackfin/lib/strcmp.c19
-rw-r--r--arch/blackfin/lib/strcpy.S35
-rw-r--r--arch/blackfin/lib/strcpy.c19
-rw-r--r--arch/blackfin/lib/strncmp.S52
-rw-r--r--arch/blackfin/lib/strncmp.c18
-rw-r--r--arch/blackfin/lib/strncpy.S85
-rw-r--r--arch/blackfin/lib/strncpy.c19
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c3
-rw-r--r--arch/blackfin/mach-bf537/include/mach/defBF534.h622
-rw-r--r--arch/blackfin/mach-bf537/include/mach/irq.h2
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF539.h621
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c4
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF54x_base.h671
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c3
-rw-r--r--arch/blackfin/mach-common/ints-priority.c42
-rw-r--r--arch/blackfin/mach-common/pm.c24
-rw-r--r--arch/blackfin/mach-common/smp.c4
-rw-r--r--arch/blackfin/mm/init.c60
-rw-r--r--arch/blackfin/mm/isram-driver.c94
-rw-r--r--arch/blackfin/mm/sram-alloc.c3
-rw-r--r--arch/cris/Kconfig3
-rw-r--r--arch/cris/arch-v10/drivers/eeprom.c48
-rw-r--r--arch/cris/arch-v10/kernel/time.c37
-rw-r--r--arch/cris/arch-v32/kernel/time.c40
-rw-r--r--arch/cris/include/asm/atomic.h2
-rw-r--r--arch/cris/include/asm/scatterlist.h17
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/cris/kernel/time.c20
-rw-r--r--arch/frv/include/asm/atomic.h2
-rw-r--r--arch/frv/include/asm/gdb-stub.h7
-rw-r--r--arch/frv/include/asm/mem-layout.h4
-rw-r--r--arch/frv/include/asm/scatterlist.h40
-rw-r--r--arch/frv/include/asm/thread_info.h2
-rw-r--r--arch/frv/kernel/gdb-io.c4
-rw-r--r--arch/frv/kernel/gdb-stub.c61
-rw-r--r--arch/frv/kernel/ptrace.c20
-rw-r--r--arch/frv/kernel/sysctl.c18
-rw-r--r--arch/frv/kernel/time.c34
-rw-r--r--arch/h8300/include/asm/atomic.h2
-rw-r--r--arch/h8300/include/asm/scatterlist.h12
-rw-r--r--arch/h8300/include/asm/thread_info.h2
-rw-r--r--arch/h8300/kernel/time.c10
-rw-r--r--arch/ia64/Kconfig14
-rw-r--r--arch/ia64/hp/common/sba_iommu.c8
-rw-r--r--arch/ia64/include/asm/acpi.h1
-rw-r--r--arch/ia64/include/asm/atomic.h4
-rw-r--r--arch/ia64/include/asm/bitops.h11
-rw-r--r--arch/ia64/include/asm/mmzone.h4
-rw-r--r--arch/ia64/include/asm/percpu.h5
-rw-r--r--arch/ia64/include/asm/scatterlist.h4
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/topology.h5
-rw-r--r--arch/ia64/kernel/acpi.c8
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c10
-rw-r--r--arch/ia64/kernel/irq_ia64.c9
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/ia64/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/salinfo.c5
-rw-r--r--arch/ia64/kernel/smpboot.c11
-rw-r--r--arch/ia64/kernel/time.c12
-rw-r--r--arch/ia64/kernel/topology.c4
-rw-r--r--arch/ia64/kernel/unaligned.c24
-rw-r--r--arch/ia64/kvm/kvm-ia64.c8
-rw-r--r--arch/ia64/kvm/vmm.c2
-rw-r--r--arch/ia64/mm/fault.c13
-rw-r--r--arch/ia64/pci/pci.c5
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c13
-rw-r--r--arch/m32r/include/asm/atomic.h2
-rw-r--r--arch/m32r/include/asm/scatterlist.h15
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/kernel/time.c47
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/amiga/Makefile2
-rw-r--r--arch/m68k/amiga/config.c174
-rw-r--r--arch/m68k/amiga/platform.c197
-rw-r--r--arch/m68k/bvme6000/rtc.c29
-rw-r--r--arch/m68k/hp300/time.h4
-rw-r--r--arch/m68k/include/asm/amigayle.h6
-rw-r--r--arch/m68k/include/asm/atomic.h2
-rw-r--r--arch/m68k/include/asm/atomic_mm.h2
-rw-r--r--arch/m68k/include/asm/atomic_no.h2
-rw-r--r--arch/m68k/include/asm/bitops_mm.h14
-rw-r--r--arch/m68k/include/asm/cache.h2
-rw-r--r--arch/m68k/include/asm/m520xsim.h1
-rw-r--r--arch/m68k/include/asm/m523xsim.h5
-rw-r--r--arch/m68k/include/asm/m5249sim.h2
-rw-r--r--arch/m68k/include/asm/m527xsim.h7
-rw-r--r--arch/m68k/include/asm/m528xsim.h67
-rw-r--r--arch/m68k/include/asm/m532xsim.h1
-rw-r--r--arch/m68k/include/asm/mcfqspi.h64
-rw-r--r--arch/m68k/include/asm/mcfsmc.h187
-rw-r--r--arch/m68k/include/asm/param.h16
-rw-r--r--arch/m68k/include/asm/processor.h4
-rw-r--r--arch/m68k/include/asm/scatterlist.h16
-rw-r--r--arch/m68k/include/asm/thread_info_mm.h2
-rw-r--r--arch/m68k/include/asm/thread_info_no.h2
-rw-r--r--arch/m68k/kernel/time.c11
-rw-r--r--arch/m68k/kernel/traps.c2
-rw-r--r--arch/m68k/mac/config.c10
-rw-r--r--arch/m68k/mm/fault.c14
-rw-r--r--arch/m68k/mvme16x/rtc.c19
-rw-r--r--arch/m68k/q40/config.c2
-rw-r--r--arch/m68knommu/Kconfig2
-rw-r--r--arch/m68knommu/mm/fault.c10
-rw-r--r--arch/m68knommu/platform/520x/config.c149
-rw-r--r--arch/m68knommu/platform/523x/config.c170
-rw-r--r--arch/m68knommu/platform/5249/config.c215
-rw-r--r--arch/m68knommu/platform/527x/config.c182
-rw-r--r--arch/m68knommu/platform/528x/config.c137
-rw-r--r--arch/m68knommu/platform/5307/Makefile4
-rw-r--r--arch/m68knommu/platform/5307/nettel.c153
-rw-r--r--arch/m68knommu/platform/532x/config.c124
-rw-r--r--arch/m68knommu/platform/68360/commproc.c4
-rw-r--r--arch/microblaze/include/asm/device.h16
-rw-r--r--arch/microblaze/include/asm/of_device.h3
-rw-r--r--arch/microblaze/include/asm/scatterlist.h2
-rw-r--r--arch/microblaze/include/asm/system.h11
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h87
-rw-r--r--arch/microblaze/kernel/cpu/cache.c3
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c11
-rw-r--r--arch/microblaze/kernel/module.c2
-rw-r--r--arch/microblaze/kernel/of_device.c13
-rw-r--r--arch/microblaze/kernel/of_platform.c6
-rw-r--r--arch/microblaze/mm/init.c1
-rw-r--r--arch/microblaze/mm/pgtable.c1
-rw-r--r--arch/microblaze/pci/pci-common.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/alchemy/common/dbdma.c101
-rw-r--r--arch/mips/alchemy/common/irq.c174
-rw-r--r--arch/mips/alchemy/common/power.c16
-rw-r--r--arch/mips/alchemy/devboards/pb1000/board_setup.c16
-rw-r--r--arch/mips/alchemy/devboards/pb1100/board_setup.c5
-rw-r--r--arch/mips/alchemy/devboards/pb1200/board_setup.c6
-rw-r--r--arch/mips/alchemy/devboards/pb1500/board_setup.c5
-rw-r--r--arch/mips/alchemy/devboards/pb1550/board_setup.c5
-rw-r--r--arch/mips/ar7/platform.c17
-rw-r--r--arch/mips/bcm63xx/gpio.c6
-rw-r--r--arch/mips/cavium-octeon/serial.c6
-rw-r--r--arch/mips/cavium-octeon/setup.c27
-rw-r--r--arch/mips/configs/ar7_defconfig196
-rw-r--r--arch/mips/configs/bcm47xx_defconfig973
-rw-r--r--arch/mips/configs/mtx1_defconfig2404
-rw-r--r--arch/mips/configs/rb532_defconfig521
-rw-r--r--arch/mips/include/asm/atomic.h4
-rw-r--r--arch/mips/include/asm/i8253.h2
-rw-r--r--arch/mips/include/asm/kgdb.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h34
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h4
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/gpio.h35
-rw-r--r--arch/mips/include/asm/mipsregs.h9
-rw-r--r--arch/mips/include/asm/processor.h12
-rw-r--r--arch/mips/include/asm/scatterlist.h22
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/kernel/cpu-probe.c30
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c4
-rw-r--r--arch/mips/kernel/i8253.c14
-rw-r--r--arch/mips/kernel/kgdb.c27
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c4
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/setup.c21
-rw-r--r--arch/mips/kernel/traps.c15
-rw-r--r--arch/mips/loongson/common/Makefile1
-rw-r--r--arch/mips/loongson/common/gpio.c139
-rw-r--r--arch/mips/math-emu/cp1emu.c26
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c60
-rw-r--r--arch/mips/powertv/asic/prealloc-calliope.c673
-rw-r--r--arch/mips/powertv/asic/prealloc-cronus.c668
-rw-r--r--arch/mips/powertv/asic/prealloc-cronuslite.c302
-rw-r--r--arch/mips/powertv/asic/prealloc-zeus.c505
-rw-r--r--arch/mips/powertv/asic/prealloc.h70
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c2
-rw-r--r--arch/mips/sibyte/sb1250/irq.c2
-rw-r--r--arch/mips/sibyte/swarm/platform.c54
-rw-r--r--arch/mips/txx9/generic/setup.c4
-rw-r--r--arch/mn10300/Kconfig3
-rw-r--r--arch/mn10300/include/asm/atomic.h158
-rw-r--r--arch/mn10300/include/asm/cache.h2
-rw-r--r--arch/mn10300/include/asm/scatterlist.h39
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/rtc.c27
-rw-r--r--arch/mn10300/kernel/time.c4
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/include/asm/bug.h8
-rw-r--r--arch/parisc/include/asm/cacheflush.h16
-rw-r--r--arch/parisc/include/asm/scatterlist.h20
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/kernel/asm-offsets.c15
-rw-r--r--arch/parisc/kernel/entry.S52
-rw-r--r--arch/parisc/kernel/syscall.S32
-rw-r--r--arch/parisc/math-emu/decode_exc.c1
-rw-r--r--arch/parisc/mm/fault.c7
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Kconfig.debug12
-rw-r--r--arch/powerpc/boot/Makefile9
-rw-r--r--arch/powerpc/boot/dts/iss4xx-mpic.dts155
-rw-r--r--arch/powerpc/boot/dts/iss4xx.dts116
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts16
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts14
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts14
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts14
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dts125
-rw-r--r--arch/powerpc/boot/treeboot-iss4xx.c56
-rwxr-xr-xarch/powerpc/boot/wrapper9
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig1026
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/bug.h6
-rw-r--r--arch/powerpc/include/asm/cache.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h5
-rw-r--r--arch/powerpc/include/asm/device.h16
-rw-r--r--arch/powerpc/include/asm/hvcall.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h38
-rw-r--r--arch/powerpc/include/asm/kexec.h4
-rw-r--r--arch/powerpc/include/asm/kmap_types.h1
-rw-r--r--arch/powerpc/include/asm/kvm.h10
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h157
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h42
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h28
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h (renamed from arch/powerpc/include/asm/kvm_book3s_64_asm.h)25
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h96
-rw-r--r--arch/powerpc/include/asm/kvm_fpu.h85
-rw-r--r--arch/powerpc/include/asm/kvm_host.h38
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h97
-rw-r--r--arch/powerpc/include/asm/macio.h2
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h51
-rw-r--r--arch/powerpc/include/asm/mmu.h1
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/mmzone.h2
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h1
-rw-r--r--arch/powerpc/include/asm/mpic.h3
-rw-r--r--arch/powerpc/include/asm/of_device.h3
-rw-r--r--arch/powerpc/include/asm/paca.h11
-rw-r--r--arch/powerpc/include/asm/parport.h11
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h6
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h2
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/ptrace.h64
-rw-r--r--arch/powerpc/include/asm/reg.h14
-rw-r--r--arch/powerpc/include/asm/reg_booke.h24
-rw-r--r--arch/powerpc/include/asm/scatterlist.h28
-rw-r--r--arch/powerpc/include/asm/smp.h18
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/topology.h26
-rw-r--r--arch/powerpc/kernel/asm-offsets.c112
-rw-r--r--arch/powerpc/kernel/cputable.c29
-rw-r--r--arch/powerpc/kernel/crash.c38
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c9
-rw-r--r--arch/powerpc/kernel/dma.c12
-rw-r--r--arch/powerpc/kernel/entry_32.S5
-rw-r--r--arch/powerpc/kernel/entry_64.S9
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S13
-rw-r--r--arch/powerpc/kernel/head_32.S14
-rw-r--r--arch/powerpc/kernel/head_44x.S828
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/head_8xx.S70
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S13
-rw-r--r--arch/powerpc/kernel/ibmebus.c6
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c23
-rw-r--r--arch/powerpc/kernel/kgdb.c11
-rw-r--r--arch/powerpc/kernel/kprobes.c3
-rw-r--r--arch/powerpc/kernel/lparcfg.c12
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c48
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/of_device.c13
-rw-r--r--arch/powerpc/kernel/of_platform.c21
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c4
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c129
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c103
-rw-r--r--arch/powerpc/kernel/rtas.c15
-rw-r--r--arch/powerpc/kernel/rtasd.c16
-rw-r--r--arch/powerpc/kernel/setup-common.c86
-rw-r--r--arch/powerpc/kernel/setup_64.c23
-rw-r--r--arch/powerpc/kernel/smp.c73
-rw-r--r--arch/powerpc/kernel/sysfs.c46
-rw-r--r--arch/powerpc/kernel/time.c60
-rw-r--r--arch/powerpc/kernel/traps.c47
-rw-r--r--arch/powerpc/kernel/vio.c44
-rw-r--r--arch/powerpc/kvm/44x.c2
-rw-r--r--arch/powerpc/kvm/44x_tlb.c2
-rw-r--r--arch/powerpc/kvm/Kconfig24
-rw-r--r--arch/powerpc/kvm/Makefile20
-rw-r--r--arch/powerpc/kvm/book3s.c503
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c483
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S143
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c36
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c102
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S183
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c (renamed from arch/powerpc/kvm/book3s_64_emulate.c)245
-rw-r--r--arch/powerpc/kvm/book3s_exports.c (renamed from arch/powerpc/kvm/book3s_64_exports.c)0
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S (renamed from arch/powerpc/kvm/book3s_64_interrupts.S)204
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c1289
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S (renamed from arch/powerpc/kvm/book3s_64_rmhandlers.S)135
-rw-r--r--arch/powerpc/kvm/book3s_segment.S259
-rw-r--r--arch/powerpc/kvm/booke.c21
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/emulate.c55
-rw-r--r--arch/powerpc/kvm/fpu.S273
-rw-r--r--arch/powerpc/kvm/powerpc.c110
-rw-r--r--arch/powerpc/lib/string.S18
-rw-r--r--arch/powerpc/mm/44x_mmu.c144
-rw-r--r--arch/powerpc/mm/fault.c19
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c25
-rw-r--r--arch/powerpc/mm/init_64.c43
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c29
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c8
-rw-r--r--arch/powerpc/mm/mmu_decl.h17
-rw-r--r--arch/powerpc/mm/numa.c78
-rw-r--r--arch/powerpc/mm/pgtable_32.c12
-rw-r--r--arch/powerpc/mm/pgtable_64.c8
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S146
-rw-r--r--arch/powerpc/platforms/44x/Kconfig20
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c167
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c78
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpio.c18
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c23
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c14
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c9
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c9
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype5
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c9
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c4
-rw-r--r--arch/powerpc/platforms/iseries/exception.S4
-rw-r--r--arch/powerpc/platforms/iseries/pci.c10
-rw-r--r--arch/powerpc/platforms/iseries/smp.c2
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c2
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c9
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c4
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c7
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c13
-rw-r--r--arch/powerpc/platforms/powermac/smp.c9
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c2
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c8
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c79
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c67
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S38
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c10
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c33
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h26
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h7
-rw-r--r--arch/powerpc/platforms/pseries/ras.c62
-rw-r--r--arch/powerpc/platforms/pseries/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/smp.c42
-rw-r--r--arch/powerpc/platforms/pseries/xics.c38
-rw-r--r--arch/powerpc/sysdev/axonram.c14
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c18
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c21
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c9
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c413
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c147
-rw-r--r--arch/powerpc/sysdev/mpic.c72
-rw-r--r--arch/powerpc/sysdev/mv64x60_pci.c4
-rw-r--r--arch/powerpc/sysdev/pmi.c9
-rw-r--r--arch/powerpc/sysdev/ppc4xx_soc.c24
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c7
-rw-r--r--arch/s390/Kconfig15
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/boot/compressed/Makefile5
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/hypfs/hypfs.h4
-rw-r--r--arch/s390/hypfs/hypfs_diag.c123
-rw-r--r--arch/s390/hypfs/hypfs_vm.c87
-rw-r--r--arch/s390/hypfs/inode.c42
-rw-r--r--arch/s390/include/asm/atomic.h19
-rw-r--r--arch/s390/include/asm/bug.h8
-rw-r--r--arch/s390/include/asm/ccwdev.h10
-rw-r--r--arch/s390/include/asm/cputime.h9
-rw-r--r--arch/s390/include/asm/lowcore.h89
-rw-r--r--arch/s390/include/asm/ptrace.h3
-rw-r--r--arch/s390/include/asm/qdio.h2
-rw-r--r--arch/s390/include/asm/scatterlist.h2
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/system.h5
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/timex.h8
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c6
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S324
-rw-r--r--arch/s390/kernel/entry64.S617
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/head31.S2
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/ipl.c14
-rw-r--r--arch/s390/kernel/kprobes.c3
-rw-r--r--arch/s390/kernel/nmi.c3
-rw-r--r--arch/s390/kernel/processor.c37
-rw-r--r--arch/s390/kernel/ptrace.c73
-rw-r--r--arch/s390/kernel/s390_ext.c3
-rw-r--r--arch/s390/kernel/setup.c27
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/topology.c7
-rw-r--r--arch/s390/kernel/traps.c31
-rw-r--r--arch/s390/kernel/vdso.c4
-rw-r--r--arch/s390/kernel/vtime.c15
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/s390/kvm/sie64a.S75
-rw-r--r--arch/s390/mm/cmm.c109
-rw-r--r--arch/s390/mm/fault.c32
-rw-r--r--arch/score/include/asm/scatterlist.h2
-rw-r--r--arch/score/include/asm/thread_info.h2
-rw-r--r--arch/sh/Kconfig40
-rw-r--r--arch/sh/Makefile1
-rw-r--r--arch/sh/boards/board-urquell.c3
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c113
-rw-r--r--arch/sh/boards/mach-highlander/setup.c12
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c11
-rw-r--r--arch/sh/boards/mach-migor/setup.c9
-rw-r--r--arch/sh/boards/mach-sdk7786/setup.c17
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c127
-rw-r--r--arch/sh/boards/mach-x3proto/setup.c7
-rw-r--r--arch/sh/boot/compressed/Makefile6
-rw-r--r--arch/sh/boot/compressed/head_32.S4
-rw-r--r--arch/sh/configs/ap325rxa_defconfig56
-rw-r--r--arch/sh/configs/cayman_defconfig46
-rw-r--r--arch/sh/configs/dreamcast_defconfig29
-rw-r--r--arch/sh/configs/ecovec24-romimage_defconfig54
-rw-r--r--arch/sh/configs/ecovec24_defconfig7
-rw-r--r--arch/sh/configs/edosk7705_defconfig21
-rw-r--r--arch/sh/configs/edosk7760_defconfig38
-rw-r--r--arch/sh/configs/espt_defconfig42
-rw-r--r--arch/sh/configs/hp6xx_defconfig24
-rw-r--r--arch/sh/configs/kfr2r09-romimage_defconfig50
-rw-r--r--arch/sh/configs/kfr2r09_defconfig57
-rw-r--r--arch/sh/configs/landisk_defconfig45
-rw-r--r--arch/sh/configs/lboxre2_defconfig35
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig28
-rw-r--r--arch/sh/configs/microdev_defconfig23
-rw-r--r--arch/sh/configs/migor_defconfig60
-rw-r--r--arch/sh/configs/polaris_defconfig31
-rw-r--r--arch/sh/configs/r7780mp_defconfig72
-rw-r--r--arch/sh/configs/r7785rp_defconfig76
-rw-r--r--arch/sh/configs/rsk7201_defconfig31
-rw-r--r--arch/sh/configs/rsk7203_defconfig49
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig54
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig54
-rw-r--r--arch/sh/configs/sdk7780_defconfig53
-rw-r--r--arch/sh/configs/sdk7786_defconfig644
-rw-r--r--arch/sh/configs/se7206_defconfig38
-rw-r--r--arch/sh/configs/se7343_defconfig53
-rw-r--r--arch/sh/configs/se7619_defconfig22
-rw-r--r--arch/sh/configs/se7705_defconfig25
-rw-r--r--arch/sh/configs/se7712_defconfig28
-rw-r--r--arch/sh/configs/se7721_defconfig40
-rw-r--r--arch/sh/configs/se7722_defconfig29
-rw-r--r--arch/sh/configs/se7724_defconfig78
-rw-r--r--arch/sh/configs/se7750_defconfig24
-rw-r--r--arch/sh/configs/se7751_defconfig25
-rw-r--r--arch/sh/configs/se7780_defconfig39
-rw-r--r--arch/sh/configs/sh03_defconfig43
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig29
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig58
-rw-r--r--arch/sh/configs/sh7763rdp_defconfig45
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig54
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig326
-rw-r--r--arch/sh/configs/sh7785lcr_defconfig62
-rw-r--r--arch/sh/configs/shmin_defconfig23
-rw-r--r--arch/sh/configs/shx3_defconfig67
-rw-r--r--arch/sh/configs/snapgear_defconfig31
-rw-r--r--arch/sh/configs/systemh_defconfig23
-rw-r--r--arch/sh/configs/titan_defconfig49
-rw-r--r--arch/sh/configs/ul2_defconfig38
-rw-r--r--arch/sh/configs/urquell_defconfig82
-rw-r--r--arch/sh/include/asm/atomic.h2
-rw-r--r--arch/sh/include/asm/bug.h4
-rw-r--r--arch/sh/include/asm/cache.h4
-rw-r--r--arch/sh/include/asm/clkdev.h35
-rw-r--r--arch/sh/include/asm/clock.h161
-rw-r--r--arch/sh/include/asm/hw_breakpoint.h11
-rw-r--r--arch/sh/include/asm/hwblk.h12
-rw-r--r--arch/sh/include/asm/io_generic.h1
-rw-r--r--arch/sh/include/asm/irq.h19
-rw-r--r--arch/sh/include/asm/kexec.h8
-rw-r--r--arch/sh/include/asm/machvec.h2
-rw-r--r--arch/sh/include/asm/mmzone.h3
-rw-r--r--arch/sh/include/asm/page.h15
-rw-r--r--arch/sh/include/asm/processor.h7
-rw-r--r--arch/sh/include/asm/processor_32.h2
-rw-r--r--arch/sh/include/asm/setup.h1
-rw-r--r--arch/sh/include/asm/siu.h10
-rw-r--r--arch/sh/include/asm/smp-ops.h51
-rw-r--r--arch/sh/include/asm/smp.h40
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-register.h3
-rw-r--r--arch/sh/include/cpu-sh4/cpu/mmu_context.h18
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7722.h15
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h19
-rw-r--r--arch/sh/include/mach-sdk7786/mach/fpga.h9
-rw-r--r--arch/sh/kernel/Makefile2
-rw-r--r--arch/sh/kernel/clkdev.c169
-rw-r--r--arch/sh/kernel/cpu/Makefile2
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c312
-rw-r--r--arch/sh/kernel/cpu/clock.c630
-rw-r--r--arch/sh/kernel/cpu/hwblk.c5
-rw-r--r--arch/sh/kernel/cpu/init.c24
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c5
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c6
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c9
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c9
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c12
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c15
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c9
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c24
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c17
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c9
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c15
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c9
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c231
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c210
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c177
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c279
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c295
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c11
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c12
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c12
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c176
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c222
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c16
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c13
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c32
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c27
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c153
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c6
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c18
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c27
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c27
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c32
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c157
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c18
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c74
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c4
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c9
-rw-r--r--arch/sh/kernel/crash_dump.c20
-rw-r--r--arch/sh/kernel/dwarf.c5
-rw-r--r--arch/sh/kernel/head_32.S7
-rw-r--r--arch/sh/kernel/hw_breakpoint.c39
-rw-r--r--arch/sh/kernel/idle.c8
-rw-r--r--arch/sh/kernel/irq.c91
-rw-r--r--arch/sh/kernel/kgdb.c14
-rw-r--r--arch/sh/kernel/localtimer.c6
-rw-r--r--arch/sh/kernel/machine_kexec.c62
-rw-r--r--arch/sh/kernel/machvec.c1
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/ptrace_32.c25
-rw-r--r--arch/sh/kernel/setup.c298
-rw-r--r--arch/sh/kernel/smp.c160
-rw-r--r--arch/sh/kernel/topology.c6
-rw-r--r--arch/sh/kernel/traps_64.c5
-rw-r--r--arch/sh/lib/strlen.S2
-rw-r--r--arch/sh/mm/Makefile9
-rw-r--r--arch/sh/mm/cache-shx3.c44
-rw-r--r--arch/sh/mm/cache.c7
-rw-r--r--arch/sh/mm/fault_32.c14
-rw-r--r--arch/sh/mm/init.c173
-rw-r--r--arch/sh/mm/numa.c38
-rw-r--r--arch/sh/mm/pmb.c4
-rw-r--r--arch/sh/mm/tlb-debugfs.c179
-rw-r--r--arch/sh/mm/tlbflush_64.c20
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/sparc/include/asm/bitops_64.h11
-rw-r--r--arch/sparc/include/asm/cache.h2
-rw-r--r--arch/sparc/include/asm/device.h15
-rw-r--r--arch/sparc/include/asm/fb.h2
-rw-r--r--arch/sparc/include/asm/floppy_64.h4
-rw-r--r--arch/sparc/include/asm/of_device.h1
-rw-r--r--arch/sparc/include/asm/parport.h9
-rw-r--r--arch/sparc/include/asm/scatterlist.h5
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/kernel/apc.c7
-rw-r--r--arch/sparc/kernel/auxio_64.c9
-rw-r--r--arch/sparc/kernel/central.c18
-rw-r--r--arch/sparc/kernel/chmc.c17
-rw-r--r--arch/sparc/kernel/ioport.c2
-rw-r--r--arch/sparc/kernel/kgdb_32.c6
-rw-r--r--arch/sparc/kernel/kgdb_64.c6
-rw-r--r--arch/sparc/kernel/of_device_32.c17
-rw-r--r--arch/sparc/kernel/of_device_64.c29
-rw-r--r--arch/sparc/kernel/of_device_common.c4
-rw-r--r--arch/sparc/kernel/pci.c14
-rw-r--r--arch/sparc/kernel/pci_common.c9
-rw-r--r--arch/sparc/kernel/pci_fire.c11
-rw-r--r--arch/sparc/kernel/pci_msi.c18
-rw-r--r--arch/sparc/kernel/pci_psycho.c11
-rw-r--r--arch/sparc/kernel/pci_sabre.c11
-rw-r--r--arch/sparc/kernel/pci_schizo.c21
-rw-r--r--arch/sparc/kernel/pci_sun4v.c15
-rw-r--r--arch/sparc/kernel/perf_event.c122
-rw-r--r--arch/sparc/kernel/pmc.c7
-rw-r--r--arch/sparc/kernel/power.c11
-rw-r--r--arch/sparc/kernel/process_64.c1
-rw-r--r--arch/sparc/kernel/psycho_common.c2
-rw-r--r--arch/sparc/kernel/sbus.c16
-rw-r--r--arch/sparc/kernel/stacktrace.c23
-rw-r--r--arch/sparc/kernel/time_32.c27
-rw-r--r--arch/sparc/kernel/time_64.c27
-rw-r--r--arch/sparc/kernel/traps_64.c14
-rw-r--r--arch/um/drivers/harddog_kern.c18
-rw-r--r--arch/um/drivers/hostaudio_kern.c8
-rw-r--r--arch/um/drivers/line.c1
-rw-r--r--arch/um/drivers/mmapper_kern.c5
-rw-r--r--arch/um/include/asm/system.h3
-rw-r--r--arch/um/include/asm/thread_info.h7
-rw-r--r--arch/um/kernel/skas/syscall.c4
-rw-r--r--arch/um/sys-i386/asm/elf.h2
-rw-r--r--arch/um/sys-x86_64/asm/elf.h2
-rw-r--r--arch/um/sys-x86_64/signal.c3
-rw-r--r--arch/x86/Kconfig73
-rw-r--r--arch/x86/Kconfig.cpu24
-rw-r--r--arch/x86/Kconfig.debug11
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S115
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c130
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h20
-rw-r--r--arch/x86/include/asm/apic.h13
-rw-r--r--arch/x86/include/asm/arch_hweight.h61
-rw-r--r--arch/x86/include/asm/atomic.h25
-rw-r--r--arch/x86/include/asm/atomic64_32.h278
-rw-r--r--arch/x86/include/asm/atomic64_64.h25
-rw-r--r--arch/x86/include/asm/bitops.h4
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/cacheflush.h46
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h3
-rw-r--r--arch/x86/include/asm/cpufeature.h65
-rw-r--r--arch/x86/include/asm/ds.h302
-rw-r--r--arch/x86/include/asm/dwarf2.h12
-rw-r--r--arch/x86/include/asm/e820.h7
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/hpet.h1
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h10
-rw-r--r--arch/x86/include/asm/hyperv.h11
-rw-r--r--arch/x86/include/asm/hypervisor.h27
-rw-r--r--arch/x86/include/asm/i387.h129
-rw-r--r--arch/x86/include/asm/i8253.h2
-rw-r--r--arch/x86/include/asm/insn.h2
-rw-r--r--arch/x86/include/asm/inst.h96
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h55
-rw-r--r--arch/x86/include/asm/io_apic.h13
-rw-r--r--arch/x86/include/asm/k8.h5
-rw-r--r--arch/x86/include/asm/kgdb.h3
-rw-r--r--arch/x86/include/asm/kprobes.h2
-rw-r--r--arch/x86/include/asm/kvm.h17
-rw-r--r--arch/x86/include/asm/kvm_emulate.h46
-rw-r--r--arch/x86/include/asm/kvm_host.h80
-rw-r--r--arch/x86/include/asm/kvm_para.h13
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/mpspec.h10
-rw-r--r--arch/x86/include/asm/mshyperv.h14
-rw-r--r--arch/x86/include/asm/msr-index.h22
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/percpu.h26
-rw-r--r--arch/x86/include/asm/perf_event.h76
-rw-r--r--arch/x86/include/asm/perf_event_p4.h795
-rw-r--r--arch/x86/include/asm/processor.h47
-rw-r--r--arch/x86/include/asm/ptrace-abi.h57
-rw-r--r--arch/x86/include/asm/ptrace.h6
-rw-r--r--arch/x86/include/asm/pvclock-abi.h4
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h12
-rw-r--r--arch/x86/include/asm/scatterlist.h5
-rw-r--r--arch/x86/include/asm/svm.h9
-rw-r--r--arch/x86/include/asm/thread_info.h15
-rw-r--r--arch/x86/include/asm/topology.h26
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h247
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h528
-rw-r--r--arch/x86/include/asm/vmware.h27
-rw-r--r--arch/x86/include/asm/vmx.h12
-rw-r--r--arch/x86/include/asm/xsave.h7
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c156
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/alternative.c47
-rw-r--r--arch/x86/kernel/apic/apic.c41
-rw-r--r--arch/x86/kernel/apic/es7000_32.c19
-rw-r--r--arch/x86/kernel/apic/io_apic.c99
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c3
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c14
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/common.c40
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Makefile4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c44
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.c51
-rw-r--r--arch/x86/kernel/cpu/cpufreq/mperf.h9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c169
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h2
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c52
-rw-r--r--arch/x86/kernel/cpu/intel.c8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c181
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c138
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c83
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c55
-rw-r--r--arch/x86/kernel/cpu/perf_event.c821
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c46
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c357
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c641
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c218
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c858
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c31
-rw-r--r--arch/x86/kernel/cpu/vmware.c38
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/ds.c1437
-rw-r--r--arch/x86/kernel/ds_selftest.c408
-rw-r--r--arch/x86/kernel/ds_selftest.h15
-rw-r--r--arch/x86/kernel/dumpstack.c5
-rw-r--r--arch/x86/kernel/early_printk.c8
-rw-r--r--arch/x86/kernel/entry_32.S19
-rw-r--r--arch/x86/kernel/hpet.c29
-rw-r--r--arch/x86/kernel/hw_breakpoint.c41
-rw-r--r--arch/x86/kernel/i387.c107
-rw-r--r--arch/x86/kernel/i8253.c14
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/kgdb.c103
-rw-r--r--arch/x86/kernel/kprobes.c43
-rw-r--r--arch/x86/kernel/kvmclock.c56
-rw-r--r--arch/x86/kernel/microcode_core.c5
-rw-r--r--arch/x86/kernel/microcode_intel.c22
-rw-r--r--arch/x86/kernel/mpparse.c25
-rw-r--r--arch/x86/kernel/mrst.c5
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/process.c50
-rw-r--r--arch/x86/kernel/process_32.c10
-rw-r--r--arch/x86/kernel/process_64.c10
-rw-r--r--arch/x86/kernel/ptrace.c384
-rw-r--r--arch/x86/kernel/pvclock.c37
-rw-r--r--arch/x86/kernel/quirks.c8
-rw-r--r--arch/x86/kernel/setup.c12
-rw-r--r--arch/x86/kernel/setup_percpu.c4
-rw-r--r--arch/x86/kernel/sfi.c4
-rw-r--r--arch/x86/kernel/smpboot.c26
-rw-r--r--arch/x86/kernel/step.c46
-rw-r--r--arch/x86/kernel/tboot.c21
-rw-r--r--arch/x86/kernel/tlb_uv.c1280
-rw-r--r--arch/x86/kernel/traps.c197
-rw-r--r--arch/x86/kernel/uv_irq.c12
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c1
-rw-r--r--arch/x86/kernel/xsave.c8
-rw-r--r--arch/x86/kvm/emulate.c1247
-rw-r--r--arch/x86/kvm/i8259.c53
-rw-r--r--arch/x86/kvm/irq.h1
-rw-r--r--arch/x86/kvm/kvm_timer.h4
-rw-r--r--arch/x86/kvm/mmu.c225
-rw-r--r--arch/x86/kvm/mmutrace.h84
-rw-r--r--arch/x86/kvm/paging_tmpl.h46
-rw-r--r--arch/x86/kvm/svm.c952
-rw-r--r--arch/x86/kvm/timer.c3
-rw-r--r--arch/x86/kvm/trace.h165
-rw-r--r--arch/x86/kvm/vmx.c386
-rw-r--r--arch/x86/kvm/x86.c1653
-rw-r--r--arch/x86/kvm/x86.h10
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/lib/Makefile5
-rw-r--r--arch/x86/lib/atomic64_32.c273
-rw-r--r--arch/x86/lib/atomic64_386_32.S174
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S224
-rw-r--r--arch/x86/math-emu/fpu_aux.c6
-rw-r--r--arch/x86/math-emu/fpu_entry.c4
-rw-r--r--arch/x86/math-emu/fpu_system.h2
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/numa_64.c9
-rw-r--r--arch/x86/mm/pageattr.c53
-rw-r--r--arch/x86/mm/pat.c247
-rw-r--r--arch/x86/mm/pat_internal.h46
-rw-r--r--arch/x86/mm/pat_rbtree.c274
-rw-r--r--arch/x86/mm/pf_in.c2
-rw-r--r--arch/x86/mm/pgtable_32.c1
-rw-r--r--arch/x86/mm/srat_64.c51
-rw-r--r--arch/x86/oprofile/nmi_int.c199
-rw-r--r--arch/x86/oprofile/op_model_amd.c280
-rw-r--r--arch/x86/oprofile/op_model_p4.c52
-rw-r--r--arch/x86/oprofile/op_model_ppro.c81
-rw-r--r--arch/x86/oprofile/op_x86_model.h4
-rw-r--r--arch/x86/pci/Makefile2
-rw-r--r--arch/x86/pci/acpi.c8
-rw-r--r--arch/x86/pci/broadcom_bus.c101
-rw-r--r--arch/x86/pci/common.c2
-rw-r--r--arch/x86/pci/direct.c16
-rw-r--r--arch/x86/pci/irq.c9
-rw-r--r--arch/x86/pci/mmconfig-shared.c17
-rw-r--r--arch/x86/pci/mmconfig_32.c8
-rw-r--r--arch/x86/pci/mrst.c6
-rw-r--r--arch/x86/pci/numaq_32.c8
-rw-r--r--arch/x86/pci/pcbios.c8
-rw-r--r--arch/x86/xen/time.c6
-rw-r--r--arch/xtensa/include/asm/atomic.h2
-rw-r--r--arch/xtensa/include/asm/cache.h1
-rw-r--r--arch/xtensa/include/asm/hardirq.h15
-rw-r--r--arch/xtensa/include/asm/scatterlist.h23
-rw-r--r--arch/xtensa/include/asm/thread_info.h2
-rw-r--r--arch/xtensa/kernel/irq.c9
-rw-r--r--arch/xtensa/kernel/time.c5
-rw-r--r--arch/xtensa/kernel/vectors.S2
-rw-r--r--block/Kconfig23
-rw-r--r--block/Kconfig.iosched16
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-barrier.c147
-rw-r--r--block/blk-cgroup.c791
-rw-r--r--block/blk-cgroup.h178
-rw-r--r--block/blk-core.c31
-rw-r--r--block/blk-lib.c233
-rw-r--r--block/cfq-iosched.c81
-rw-r--r--block/elevator.c11
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c2
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/ablkcipher.c277
-rw-r--r--crypto/algapi.c2
-rw-r--r--crypto/async_tx/async_tx.c46
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/internal.h2
-rw-r--r--crypto/pcrypt.c11
-rw-r--r--crypto/scatterwalk.c2
-rw-r--r--crypto/shash.c2
-rw-r--r--crypto/tcrypt.c343
-rw-r--r--crypto/tcrypt.h29
-rw-r--r--crypto/testmgr.c66
-rw-r--r--crypto/testmgr.h64
-rw-r--r--crypto/vmac.c75
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_pad.c22
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acevents.h51
-rw-r--r--drivers/acpi/acpica/acglobal.h25
-rw-r--r--drivers/acpi/acpica/acinterp.h9
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/actables.h4
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c10
-rw-r--r--drivers/acpi/acpica/dsobject.c14
-rw-r--r--drivers/acpi/acpica/dsopcode.c13
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswstate.c10
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c167
-rw-r--r--drivers/acpi/acpica/evgpeblk.c766
-rw-r--r--drivers/acpi/acpica/evgpeinit.c653
-rw-r--r--drivers/acpi/acpica/evgpeutil.c337
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evxface.c24
-rw-r--r--drivers/acpi/acpica/evxfevnt.c224
-rw-r--r--drivers/acpi/acpica/exconfig.c21
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c4
-rw-r--r--drivers/acpi/acpica/exdebug.c261
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c16
-rw-r--r--drivers/acpi/acpica/exmisc.c8
-rw-r--r--drivers/acpi/acpica/exmutex.c46
-rw-r--r--drivers/acpi/acpica/exnames.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c18
-rw-r--r--drivers/acpi/acpica/exoparg2.c37
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c4
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exregion.c17
-rw-r--r--drivers/acpi/acpica/exresnte.c4
-rw-r--r--drivers/acpi/acpica/exresolv.c11
-rw-r--r--drivers/acpi/acpica/exresop.c8
-rw-r--r--drivers/acpi/acpica/exstore.c218
-rw-r--r--drivers/acpi/acpica/exsystem.c10
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/acpica/hwregs.c6
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c4
-rw-r--r--drivers/acpi/acpica/psargs.c4
-rw-r--r--drivers/acpi/acpica/psloop.c3
-rw-r--r--drivers/acpi/acpica/psxface.c5
-rw-r--r--drivers/acpi/acpica/rscreate.c14
-rw-r--r--drivers/acpi/acpica/rslist.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c16
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c69
-rw-r--r--drivers/acpi/acpica/tbutils.c101
-rw-r--r--drivers/acpi/acpica/tbxface.c80
-rw-r--r--drivers/acpi/acpica/tbxfroot.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c14
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utmisc.c6
-rw-r--r--drivers/acpi/acpica/utmutex.c4
-rw-r--r--drivers/acpi/acpica/utobject.c8
-rw-r--r--drivers/acpi/apei/Kconfig30
-rw-r--r--drivers/acpi/apei/Makefile5
-rw-r--r--drivers/acpi/apei/apei-base.c593
-rw-r--r--drivers/acpi/apei/apei-internal.h114
-rw-r--r--drivers/acpi/apei/cper.c84
-rw-r--r--drivers/acpi/apei/einj.c548
-rw-r--r--drivers/acpi/apei/erst.c855
-rw-r--r--drivers/acpi/apei/ghes.c427
-rw-r--r--drivers/acpi/apei/hest.c173
-rw-r--r--drivers/acpi/atomicio.c360
-rw-r--r--drivers/acpi/bus.c53
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/hed.c112
-rw-r--r--drivers/acpi/hest.c139
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_irq.c8
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/processor_driver.c15
-rw-r--r--drivers/acpi/processor_idle.c58
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c245
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/system.c7
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/video.c118
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/ata/Kconfig521
-rw-r--r--drivers/ata/Makefile86
-rw-r--r--drivers/ata/ahci.c2544
-rw-r--r--drivers/ata/ahci.h343
-rw-r--r--drivers/ata/ahci_platform.c192
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c6
-rw-r--r--drivers/ata/libahci.c2216
-rw-r--r--drivers/ata/libata-core.c232
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/libata-pmp.c32
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-sff.c1799
-rw-r--r--drivers/ata/libata.h29
-rw-r--r--drivers/ata/pata_acpi.c10
-rw-r--r--drivers/ata/pata_ali.c7
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_artop.c2
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_atiixp.c6
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_bf54x.c89
-rw-r--r--drivers/ata/pata_cmd640.c13
-rw-r--r--drivers/ata/pata_cmd64x.c2
-rw-r--r--drivers/ata/pata_cs5520.c4
-rw-r--r--drivers/ata/pata_cs5530.c6
-rw-r--r--drivers/ata/pata_cs5535.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_efar.c4
-rw-r--r--drivers/ata/pata_hpt366.c4
-rw-r--r--drivers/ata/pata_hpt37x.c6
-rw-r--r--drivers/ata/pata_hpt3x2n.c4
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_icside.c7
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_it821x.c8
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_macio.c9
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c94
-rw-r--r--drivers/ata/pata_netcell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_ns87415.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c30
-rw-r--r--drivers/ata/pata_of_platform.c9
-rw-r--r--drivers/ata/pata_oldpiix.c4
-rw-r--r--drivers/ata/pata_optidma.c2
-rw-r--r--drivers/ata/pata_pcmcia.c49
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pdc202xx_old.c4
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/pata_rdc.c4
-rw-r--r--drivers/ata/pata_sc1200.c6
-rw-r--r--drivers/ata/pata_scc.c84
-rw-r--r--drivers/ata/pata_sch.c12
-rw-r--r--drivers/ata/pata_serverworks.c8
-rw-r--r--drivers/ata/pata_sil680.c34
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c8
-rw-r--r--drivers/ata/pdc_adma.c74
-rw-r--r--drivers/ata/sata_fsl.c11
-rw-r--r--drivers/ata/sata_inic162x.c25
-rw-r--r--drivers/ata/sata_mv.c51
-rw-r--r--drivers/ata/sata_nv.c269
-rw-r--r--drivers/ata/sata_promise.c32
-rw-r--r--drivers/ata/sata_qstor.c104
-rw-r--r--drivers/ata/sata_sil.c13
-rw-r--r--drivers/ata/sata_sil24.c9
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c10
-rw-r--r--drivers/ata/sata_uli.c6
-rw-r--r--drivers/ata/sata_via.c8
-rw-r--r--drivers/ata/sata_vsc.c12
-rw-r--r--drivers/atm/Kconfig1
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/fore200e.c23
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c8
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/class.c9
-rw-r--r--drivers/base/core.c130
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c4
-rw-r--r--drivers/base/devtmpfs.c5
-rw-r--r--drivers/base/firmware_class.c206
-rw-r--r--drivers/base/module.c4
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/platform.c38
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/power/sysfs.c65
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/block/Kconfig22
-rw-r--r--drivers/block/amiflop.c47
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/drbd/drbd_bitmap.c21
-rw-r--r--drivers/block/drbd/drbd_int.h151
-rw-r--r--drivers/block/drbd/drbd_main.c158
-rw-r--r--drivers/block/drbd/drbd_nl.c52
-rw-r--r--drivers/block/drbd/drbd_proc.c19
-rw-r--r--drivers/block/drbd/drbd_receiver.c666
-rw-r--r--drivers/block/drbd/drbd_req.c40
-rw-r--r--drivers/block/drbd/drbd_strings.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c206
-rw-r--r--drivers/block/drbd/drbd_wrappers.h16
-rw-r--r--drivers/block/hd.c4
-rw-r--r--drivers/block/loop.c14
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/virtio_blk.c46
-rw-r--r--drivers/block/xsysace.c13
-rw-r--r--drivers/bluetooth/bluecard_cs.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c11
-rw-r--r--drivers/bluetooth/btmrvl_drv.h8
-rw-r--r--drivers/bluetooth/btmrvl_main.c92
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c7
-rw-r--r--drivers/bluetooth/btuart_cs.c11
-rw-r--r--drivers/bluetooth/dtl1_cs.c11
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ll.c8
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig17
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/agp.h80
-rw-r--r--drivers/char/agp/ali-agp.c1
-rw-r--r--drivers/char/agp/amd-k7-agp.c9
-rw-r--r--drivers/char/agp/amd64-agp.c56
-rw-r--r--drivers/char/agp/ati-agp.c8
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/agp/intel-agp.c1883
-rw-r--r--drivers/char/agp/intel-agp.h239
-rw-r--r--drivers/char/agp/intel-gtt.c1516
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/agp/sis-agp.c9
-rw-r--r--drivers/char/agp/uninorth-agp.c16
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/amiserial.c61
-rw-r--r--drivers/char/apm-emulation.c8
-rw-r--r--drivers/char/applicom.c22
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/ds1620.c16
-rw-r--r--drivers/char/dtlk.c15
-rw-r--r--drivers/char/generic_nvram.c17
-rw-r--r--drivers/char/genrtc.c16
-rw-r--r--drivers/char/hangcheck-timer.c20
-rw-r--r--drivers/char/hpet.c14
-rw-r--r--drivers/char/hvsi.c6
-rw-r--r--drivers/char/hw_random/n2-drv.c9
-rw-r--r--drivers/char/hw_random/nomadik-rng.c17
-rw-r--r--drivers/char/hw_random/pasemi-rng.c9
-rw-r--r--drivers/char/hw_random/virtio-rng.c6
-rw-r--r--drivers/char/i8k.c21
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c26
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c479
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c17
-rw-r--r--drivers/char/isicom.c8
-rw-r--r--drivers/char/keyboard.c325
-rw-r--r--drivers/char/misc.c1
-rw-r--r--drivers/char/n_gsm.c2763
-rw-r--r--drivers/char/nvram.c10
-rw-r--r--drivers/char/nwflash.c7
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c5
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h1
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c22
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/ps3flash.c3
-rw-r--r--drivers/char/ramoops.c162
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/raw.c42
-rw-r--r--drivers/char/serial167.c225
-rw-r--r--drivers/char/sysrq.c245
-rw-r--r--drivers/char/tpm/Kconfig6
-rw-r--r--drivers/char/tpm/tpm.c47
-rw-r--r--drivers/char/tpm/tpm_tis.c40
-rw-r--r--drivers/char/tty_buffer.c2
-rw-r--r--drivers/char/tty_io.c1
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/virtio_console.c700
-rw-r--r--drivers/char/vt.c10
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c11
-rw-r--r--drivers/clocksource/cs5535-clockevt.c8
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c48
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c115
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/driver.c16
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c62
-rw-r--r--drivers/cpuidle/sysfs.c5
-rw-r--r--drivers/crypto/Kconfig21
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c7
-rw-r--r--drivers/crypto/geode-aes.c36
-rw-r--r--drivers/crypto/hifn_795x.c18
-rw-r--r--drivers/crypto/mv_cesa.c692
-rw-r--r--drivers/crypto/mv_cesa.h40
-rw-r--r--drivers/crypto/n2_asm.S95
-rw-r--r--drivers/crypto/n2_core.c2083
-rw-r--r--drivers/crypto/n2_core.h231
-rw-r--r--drivers/crypto/omap-sham.c1259
-rw-r--r--drivers/crypto/talitos.c708
-rw-r--r--drivers/crypto/talitos.h12
-rw-r--r--drivers/dma/Kconfig23
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/at_hdmac.c35
-rw-r--r--drivers/dma/coh901318.c263
-rw-r--r--drivers/dma/dmaengine.c22
-rw-r--r--drivers/dma/dw_dmac.c24
-rw-r--r--drivers/dma/fsldma.c45
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h19
-rw-r--r--drivers/dma/ioat/dma_v2.c186
-rw-r--r--drivers/dma/ioat/dma_v2.h33
-rw-r--r--drivers/dma/ioat/dma_v3.c143
-rw-r--r--drivers/dma/ioat/pci.c7
-rw-r--r--drivers/dma/iop-adma.c39
-rw-r--r--drivers/dma/ipu/ipu_idmac.c34
-rw-r--r--drivers/dma/mpc512x_dma.c15
-rw-r--r--drivers/dma/mv_xor.c25
-rw-r--r--drivers/dma/pl330.c866
-rw-r--r--drivers/dma/ppc4xx/adma.c21
-rw-r--r--drivers/dma/shdma.c57
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/ste_dma40.c2657
-rw-r--r--drivers/dma/ste_dma40_ll.c454
-rw-r--r--drivers/dma/ste_dma40_ll.h354
-rw-r--r--drivers/dma/timb_dma.c860
-rw-r--r--drivers/dma/txx9dmac.c23
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c20
-rw-r--r--drivers/edac/i5400_edac.c20
-rw-r--r--drivers/edac/i82443bxgx_edac.c24
-rw-r--r--drivers/edac/mpc85xx_edac.c30
-rw-r--r--drivers/edac/ppc4xx_edac.c10
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/firewire/core-card.c22
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firewire/core-transaction.c96
-rw-r--r--drivers/firewire/core.h6
-rw-r--r--drivers/firewire/ohci.c190
-rw-r--r--drivers/firewire/ohci.h10
-rw-r--r--drivers/firmware/dcdbas.c4
-rw-r--r--drivers/firmware/dell_rbu.c10
-rw-r--r--drivers/firmware/efivars.c4
-rw-r--r--drivers/gpio/Kconfig37
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/cs5535-gpio.c2
-rw-r--r--drivers/gpio/gpiolib.c51
-rw-r--r--drivers/gpio/it8761e_gpio.c23
-rw-r--r--drivers/gpio/janz-ttl.c258
-rw-r--r--drivers/gpio/langwell_gpio.c83
-rw-r--r--drivers/gpio/max732x.c368
-rw-r--r--drivers/gpio/pca953x.c4
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpio/rdc321x-gpio.c246
-rw-r--r--drivers/gpio/tc35892-gpio.c381
-rw-r--r--drivers/gpio/wm8994-gpio.c12
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c9
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c490
-rw-r--r--drivers/gpu/drm/drm_dma.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c807
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c908
-rw-r--r--drivers/gpu/drm/drm_fops.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c49
-rw-r--r--drivers/gpu/drm/drm_modes.c105
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h10
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c46
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c38
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c32
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h38
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c154
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c31
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h143
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c41
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h88
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c93
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1068
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c256
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h31
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c103
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c217
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c71
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c111
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c21
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1009
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c185
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c500
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c259
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c566
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c87
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c15
-rw-r--r--drivers/gpu/drm/radeon/atombios.h80
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c23
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1549
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h556
-rw-r--r--drivers/gpu/drm/radeon/r100.c729
-rw-r--r--drivers/gpu/drm/radeon/r100d.h164
-rw-r--r--drivers/gpu/drm/radeon/r300.c151
-rw-r--r--drivers/gpu/drm/radeon/r300d.h47
-rw-r--r--drivers/gpu/drm/radeon/r420.c36
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/r520.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c600
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c58
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon.h259
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c255
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c129
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c358
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c814
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c122
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c231
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h80
-rw-r--r--drivers/gpu/drm/radeon/rs690.c289
-rw-r--r--drivers/gpu/drm/radeon/rv515.c287
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c28
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c98
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/vga/Kconfig6
-rw-r--r--drivers/hid/Kconfig159
-rw-r--r--drivers/hid/Makefile7
-rw-r--r--drivers/hid/hid-3m-pct.c31
-rw-r--r--drivers/hid/hid-cando.c272
-rw-r--r--drivers/hid/hid-cherry.c1
-rw-r--r--drivers/hid/hid-core.c64
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-egalax.c281
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-ids.h27
-rw-r--r--drivers/hid/hid-lg.c9
-rw-r--r--drivers/hid/hid-magicmouse.c5
-rw-r--r--drivers/hid/hid-ntrig.c590
-rw-r--r--drivers/hid/hid-picolcd.c2631
-rw-r--r--drivers/hid/hid-prodikeys.c910
-rw-r--r--drivers/hid/hid-roccat-kone.c1043
-rw-r--r--drivers/hid/hid-roccat-kone.h233
-rw-r--r--drivers/hid/hid-roccat.c428
-rw-r--r--drivers/hid/hid-roccat.h31
-rw-r--r--drivers/hid/hid-samsung.c95
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-topseed.c38
-rw-r--r--drivers/hid/hid-wacom.c230
-rw-r--r--drivers/hid/hid-zydacron.c237
-rw-r--r--drivers/hid/hidraw.c50
-rw-r--r--drivers/hid/usbhid/hid-core.c106
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c19
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/usbhid/usbkbd.c17
-rw-r--r--drivers/hid/usbhid/usbmouse.c6
-rw-r--r--drivers/hwmon/Kconfig43
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adm1031.c68
-rw-r--r--drivers/hwmon/ads7871.c253
-rw-r--r--drivers/hwmon/applesmc.c247
-rw-r--r--drivers/hwmon/asc7621.c63
-rw-r--r--drivers/hwmon/asus_atk0110.c7
-rw-r--r--drivers/hwmon/coretemp.c93
-rw-r--r--drivers/hwmon/dme1737.c328
-rw-r--r--drivers/hwmon/emc1403.c344
-rw-r--r--drivers/hwmon/f71882fg.c170
-rw-r--r--drivers/hwmon/fschmd.c9
-rw-r--r--drivers/hwmon/hp_accel.c2
-rw-r--r--drivers/hwmon/lis3lv02d.c245
-rw-r--r--drivers/hwmon/lis3lv02d.h11
-rw-r--r--drivers/hwmon/lm63.c16
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm90.c3
-rw-r--r--drivers/hwmon/ltc4245.c18
-rw-r--r--drivers/hwmon/tmp102.c321
-rw-r--r--drivers/hwmon/tmp401.c255
-rw-r--r--drivers/hwmon/ultra45_env.c7
-rw-r--r--drivers/hwmon/w83793.c10
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c36
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c168
-rw-r--r--drivers/i2c/busses/i2c-cpm.c32
-rw-r--r--drivers/i2c/busses/i2c-elektor.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c5
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c59
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c27
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c3
-rw-r--r--drivers/i2c/busses/i2c-mpc.c31
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c3
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c12
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c265
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c28
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c21
-rw-r--r--drivers/i2c/busses/i2c-s6000.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c3
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-stub.c9
-rw-r--r--drivers/i2c/busses/i2c-versatile.c3
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c4
-rw-r--r--drivers/i2c/busses/scx200_i2c.c2
-rw-r--r--drivers/i2c/i2c-core.c259
-rw-r--r--drivers/i2c/i2c-dev.c36
-rw-r--r--drivers/ide/cmd640.c6
-rw-r--r--drivers/ide/gayle.c147
-rw-r--r--drivers/ide/ide-cs.c20
-rw-r--r--drivers/ide/ide-disk.c40
-rw-r--r--drivers/ide/ide-gd.c11
-rw-r--r--drivers/ide/ide_platform.c1
-rw-r--r--drivers/ide/pdc202xx_old.c5
-rw-r--r--drivers/ide/pmac.c10
-rw-r--r--drivers/idle/Kconfig11
-rw-r--r--drivers/idle/Makefile1
-rwxr-xr-xdrivers/idle/intel_idle.c461
-rw-r--r--drivers/ieee1394/dv1394.c11
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/video1394.c5
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/Makefile2
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/core_priv.h4
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/core/sysfs.c21
-rw-r--r--drivers/infiniband/core/ucm.c14
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c886
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c544
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h746
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c812
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1576
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h548
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c20
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig8
-rw-r--r--drivers/infiniband/hw/ipath/Makefile6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c28
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c1862
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2631
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c22
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c95
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/Kconfig7
-rw-r--r--drivers/infiniband/hw/qib/Makefile15
-rw-r--r--drivers/infiniband/hw/qib/qib.h1439
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h156
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h758
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c484
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c894
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c182
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c665
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c451
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2317
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3576
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7645
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1586
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c236
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c328
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2173
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h373
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c503
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c738
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1255
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c564
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h184
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2288
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c817
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220.c)859
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220_img.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220_img.c)19
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c973
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c375
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c691
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c498
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c557
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c555
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c607
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c157
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c897
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2248
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1100
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c368
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c (renamed from drivers/infiniband/hw/ipath/ipath_7220.h)49
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c171
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/joydev.c10
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c1
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/keyboard/Kconfig66
-rw-r--r--drivers/input/keyboard/Makefile4
-rw-r--r--drivers/input/keyboard/amikbd.c97
-rw-r--r--drivers/input/keyboard/corgikbd.c414
-rw-r--r--drivers/input/keyboard/lm8323.c6
-rw-r--r--drivers/input/keyboard/spitzkbd.c496
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c349
-rw-r--r--drivers/input/keyboard/tosakbd.c431
-rw-r--r--drivers/input/misc/88pm860x_onkey.c1
-rw-r--r--drivers/input/misc/Kconfig50
-rw-r--r--drivers/input/misc/Makefile5
-rw-r--r--drivers/input/misc/ad714x-i2c.c140
-rw-r--r--drivers/input/misc/ad714x-spi.c103
-rw-r--r--drivers/input/misc/ad714x.c1347
-rw-r--r--drivers/input/misc/ad714x.h26
-rw-r--r--drivers/input/misc/ati_remote.c12
-rw-r--r--drivers/input/misc/ati_remote2.c4
-rw-r--r--drivers/input/misc/cm109.c28
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c34
-rw-r--r--drivers/input/misc/keyspan_remote.c6
-rw-r--r--drivers/input/misc/max8925_onkey.c148
-rw-r--r--drivers/input/misc/pcf8574_keypad.c227
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/misc/powermate.c17
-rw-r--r--drivers/input/misc/sparcspkr.c14
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/misc/wistron_btns.c4
-rw-r--r--drivers/input/misc/yealink.c25
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/amimouse.c98
-rw-r--r--drivers/input/mouse/appletouch.c12
-rw-r--r--drivers/input/mouse/bcm5974.c24
-rw-r--r--drivers/input/mouse/elantech.c107
-rw-r--r--drivers/input/mouse/elantech.h5
-rw-r--r--drivers/input/mouse/hgpk.c4
-rw-r--r--drivers/input/mouse/logips2pp.c98
-rw-r--r--drivers/input/mouse/psmouse-base.c86
-rw-r--r--drivers/input/mouse/synaptics.c53
-rw-r--r--drivers/input/mouse/synaptics.h6
-rw-r--r--drivers/input/serio/Kconfig16
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/ams_delta_serio.c177
-rw-r--r--drivers/input/serio/i8042-sparcio.h9
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/serio/xilinx_ps2.c15
-rw-r--r--drivers/input/tablet/acecad.c90
-rw-r--r--drivers/input/tablet/aiptek.c14
-rw-r--r--drivers/input/tablet/gtco.c12
-rw-r--r--drivers/input/tablet/kbtab.c55
-rw-r--r--drivers/input/tablet/wacom.h36
-rw-r--r--drivers/input/tablet/wacom_sys.c324
-rw-r--r--drivers/input/tablet/wacom_wac.c1168
-rw-r--r--drivers/input/tablet/wacom_wac.h13
-rw-r--r--drivers/input/touchscreen/Kconfig45
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c15
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/corgi_ts.c385
-rw-r--r--drivers/input/touchscreen/hampshire.c205
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c40
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c400
-rw-r--r--drivers/input/touchscreen/tsc2007.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c20
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/isdn/capi/capi.c17
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/isdn/divert/divert_procfs.c18
-rw-r--r--drivers/isdn/gigaset/capi.c41
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c76
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c63
-rw-r--r--drivers/isdn/hisax/elsa_cs.c40
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c3
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c60
-rw-r--r--drivers/isdn/hisax/teles_cs.c50
-rw-r--r--drivers/isdn/i4l/isdn_common.c18
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c17
-rw-r--r--drivers/isdn/mISDN/timerdev.c12
-rw-r--r--drivers/leds/Kconfig19
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/leds-88pm860x.c11
-rw-r--r--drivers/leds/leds-gpio.c34
-rw-r--r--drivers/leds/leds-lp3944.c9
-rw-r--r--drivers/leds/leds-mc13783.c403
-rw-r--r--drivers/leds/leds-net5501.c94
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/macintosh/macio-adb.c1
-rw-r--r--drivers/macintosh/macio_asic.c28
-rw-r--r--drivers/macintosh/macio_sysfs.c6
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/macintosh/nvram.c2
-rw-r--r--drivers/macintosh/rack-meter.c4
-rw-r--r--drivers/macintosh/smu.c11
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c9
-rw-r--r--drivers/macintosh/therm_windtunnel.c7
-rw-r--r--drivers/macintosh/via-pmu.c17
-rw-r--r--drivers/macintosh/windfarm_pm81.c8
-rw-r--r--drivers/macintosh/windfarm_pm91.c9
-rw-r--r--drivers/md/Kconfig4
-rw-r--r--drivers/md/bitmap.c47
-rw-r--r--drivers/md/bitmap.h2
-rw-r--r--drivers/md/faulty.c9
-rw-r--r--drivers/md/linear.c36
-rw-r--r--drivers/md/md.c543
-rw-r--r--drivers/md/md.h16
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c251
-rw-r--r--drivers/md/raid0.h3
-rw-r--r--drivers/md/raid1.c114
-rw-r--r--drivers/md/raid10.c300
-rw-r--r--drivers/md/raid10.h12
-rw-r--r--drivers/md/raid5.c233
-rw-r--r--drivers/media/IR/Kconfig59
-rw-r--r--drivers/media/IR/Makefile14
-rw-r--r--drivers/media/IR/imon.c2396
-rw-r--r--drivers/media/IR/ir-core-priv.h126
-rw-r--r--drivers/media/IR/ir-functions.c1
-rw-r--r--drivers/media/IR/ir-jvc-decoder.c320
-rw-r--r--drivers/media/IR/ir-keymaps.c3494
-rw-r--r--drivers/media/IR/ir-keytable.c687
-rw-r--r--drivers/media/IR/ir-nec-decoder.c328
-rw-r--r--drivers/media/IR/ir-raw-event.c251
-rw-r--r--drivers/media/IR/ir-rc5-decoder.c324
-rw-r--r--drivers/media/IR/ir-rc6-decoder.c419
-rw-r--r--drivers/media/IR/ir-sony-decoder.c312
-rw-r--r--drivers/media/IR/ir-sysfs.c202
-rw-r--r--drivers/media/IR/keymaps/Kconfig15
-rw-r--r--drivers/media/IR/keymaps/Makefile67
-rw-r--r--drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c89
-rw-r--r--drivers/media/IR/keymaps/rc-apac-viewcomp.c80
-rw-r--r--drivers/media/IR/keymaps/rc-asus-pc39.c91
-rw-r--r--drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c69
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-a16d.c75
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-cardbus.c97
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-dvbt.c78
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c90
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia.c86
-rw-r--r--drivers/media/IR/keymaps/rc-avertv-303.c85
-rw-r--r--drivers/media/IR/keymaps/rc-behold-columbus.c108
-rw-r--r--drivers/media/IR/keymaps/rc-behold.c141
-rw-r--r--drivers/media/IR/keymaps/rc-budget-ci-old.c92
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy-1400.c84
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dm1105-nec.c76
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c97
-rw-r--r--drivers/media/IR/keymaps/rc-em-terratec.c69
-rw-r--r--drivers/media/IR/keymaps/rc-empty.c44
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv-fm53.c81
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv.c112
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv2.c90
-rw-r--r--drivers/media/IR/keymaps/rc-evga-indtube.c61
-rw-r--r--drivers/media/IR/keymaps/rc-eztv.c96
-rw-r--r--drivers/media/IR/keymaps/rc-flydvb.c77
-rw-r--r--drivers/media/IR/keymaps/rc-flyvideo.c70
-rw-r--r--drivers/media/IR/keymaps/rc-fusionhdtv-mce.c98
-rw-r--r--drivers/media/IR/keymaps/rc-gadmei-rm008z.c81
-rw-r--r--drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c84
-rw-r--r--drivers/media/IR/keymaps/rc-gotview7135.c79
-rw-r--r--drivers/media/IR/keymaps/rc-hauppauge-new.c100
-rw-r--r--drivers/media/IR/keymaps/rc-imon-mce.c142
-rw-r--r--drivers/media/IR/keymaps/rc-imon-pad.c156
-rw-r--r--drivers/media/IR/keymaps/rc-iodata-bctv7e.c88
-rw-r--r--drivers/media/IR/keymaps/rc-kaiomy.c87
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-315u.c83
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c99
-rw-r--r--drivers/media/IR/keymaps/rc-manli.c135
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c123
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere.c69
-rw-r--r--drivers/media/IR/keymaps/rc-nebula.c96
-rw-r--r--drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c105
-rw-r--r--drivers/media/IR/keymaps/rc-norwood.c85
-rw-r--r--drivers/media/IR/keymaps/rc-npgtech.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pctv-sedna.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-color.c94
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-grey.c89
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c73
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-mk12.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-new.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview.c82
-rw-r--r--drivers/media/IR/keymaps/rc-powercolor-real-angel.c81
-rw-r--r--drivers/media/IR/keymaps/rc-proteus-2309.c69
-rw-r--r--drivers/media/IR/keymaps/rc-purpletv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-pv951.c78
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c103
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-tv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c78
-rw-r--r--drivers/media/IR/keymaps/rc-tbs-nec.c73
-rw-r--r--drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c92
-rw-r--r--drivers/media/IR/keymaps/rc-tevii-nec.c88
-rw-r--r--drivers/media/IR/keymaps/rc-tt-1500.c82
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-s350.c85
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-tv-pvr.c87
-rw-r--r--drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c82
-rw-r--r--drivers/media/IR/keymaps/rc-winfast.c102
-rw-r--r--drivers/media/IR/rc-map.c84
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c25
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/dvb/bt8xx/dst.c2
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c25
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c31
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c11
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c55
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c29
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h11
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c6
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-remote.c16
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c8
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.h4
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c43
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h18
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c6
-rw-r--r--drivers/media/dvb/dvb-usb/az6027.c114
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c6
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c51
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c100
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c8
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c18
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h9
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h7
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c44
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c4
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c18
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c18
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c16
-rw-r--r--drivers/media/dvb/dvb-usb/usb-urb.c7
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c12
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c12
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c5
-rw-r--r--drivers/media/dvb/frontends/atbm8830_priv.h2
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c7
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c26
-rw-r--r--drivers/media/dvb/frontends/au8522_priv.h5
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c35
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c36
-rw-r--r--drivers/media/dvb/frontends/dib8000.h1
-rw-r--r--drivers/media/dvb/frontends/ds3000.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c16
-rw-r--r--drivers/media/dvb/frontends/stv090x.c94
-rw-r--r--drivers/media/dvb/frontends/stv090x.h1
-rw-r--r--drivers/media/dvb/frontends/stv6110x.c34
-rw-r--r--drivers/media/dvb/frontends/stv6110x.h1
-rw-r--r--drivers/media/dvb/mantis/mantis_input.c4
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c10
-rw-r--r--drivers/media/dvb/ngene/Kconfig2
-rw-r--r--drivers/media/dvb/ngene/Makefile4
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c328
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c518
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c172
-rw-r--r--drivers/media/dvb/ngene/ngene-i2c.c179
-rw-r--r--drivers/media/dvb/ngene/ngene.h30
-rw-r--r--drivers/media/dvb/pt1/pt1.c271
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.c32
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.h8
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.c31
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.h8
-rw-r--r--drivers/media/dvb/ttpci/av7110.c4
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c5
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c52
-rw-r--r--drivers/media/dvb/ttpci/budget.c47
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c6
-rw-r--r--drivers/media/radio/radio-mr800.c19
-rw-r--r--drivers/media/video/Kconfig52
-rw-r--r--drivers/media/video/Makefile14
-rw-r--r--drivers/media/video/ak881x.c368
-rw-r--r--drivers/media/video/arv.c524
-rw-r--r--drivers/media/video/au0828/au0828-video.c6
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c27
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c30
-rw-r--r--drivers/media/video/bw-qcam.c452
-rw-r--r--drivers/media/video/c-qcam.c483
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c54
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h24
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c42
-rw-r--r--drivers/media/video/cx18/cx18-cards.c4
-rw-r--r--drivers/media/video/cx18/cx18-cards.h4
-rw-r--r--drivers/media/video/cx18/cx18-controls.c2
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c22
-rw-r--r--drivers/media/video/cx18/cx18-streams.c5
-rw-r--r--drivers/media/video/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c27
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c52
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c9
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx2341x.c14
-rw-r--r--drivers/media/video/cx23885/cimax2.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c16
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c8
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c17
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h5
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c40
-rw-r--r--drivers/media/video/cx88/cx88-core.c3
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/video/cx88/cx88-input.c149
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c11
-rw-r--r--drivers/media/video/cx88/cx88-video.c20
-rw-r--r--drivers/media/video/cx88/cx88.h8
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc.c131
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc_regs.h10
-rw-r--r--drivers/media/video/davinci/isif_regs.h2
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c83
-rw-r--r--drivers/media/video/davinci/vpif_capture.c8
-rw-r--r--drivers/media/video/davinci/vpif_display.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c56
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c29
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c29
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c66
-rw-r--r--drivers/media/video/em28xx/em28xx.h7
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c6
-rw-r--r--drivers/media/video/font.h407
-rw-r--r--drivers/media/video/gspca/Kconfig6
-rw-r--r--drivers/media/video/gspca/benq.c4
-rw-r--r--drivers/media/video/gspca/cpia1.c36
-rw-r--r--drivers/media/video/gspca/gspca.c152
-rw-r--r--drivers/media/video/gspca/gspca.h6
-rw-r--r--drivers/media/video/gspca/ov534.c563
-rw-r--r--drivers/media/video/gspca/pac207.c4
-rw-r--r--drivers/media/video/gspca/sn9c2028.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c359
-rw-r--r--drivers/media/video/gspca/sonixj.c585
-rw-r--r--drivers/media/video/gspca/spca561.c58
-rw-r--r--drivers/media/video/gspca/t613.c172
-rw-r--r--drivers/media/video/gspca/vc032x.c747
-rw-r--r--drivers/media/video/gspca/zc3xx.c95
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c33
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c26
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h5
-rw-r--r--drivers/media/video/ir-kbd-i2c.c28
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h10
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c55
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c119
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c23
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c18
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/mem2mem_testdev.c1052
-rw-r--r--drivers/media/video/meye.c78
-rw-r--r--drivers/media/video/meye.h10
-rw-r--r--drivers/media/video/mt9t031.c66
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/mx1_camera.c4
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/media/video/omap/Kconfig11
-rw-r--r--drivers/media/video/omap/Makefile7
-rw-r--r--drivers/media/video/omap/omap_vout.c2643
-rw-r--r--drivers/media/video/omap/omap_voutdef.h147
-rw-r--r--drivers/media/video/omap/omap_voutlib.c293
-rw-r--r--drivers/media/video/omap/omap_voutlib.h34
-rw-r--r--drivers/media/video/omap24xxcam.c4
-rw-r--r--drivers/media/video/ov511.c11
-rw-r--r--drivers/media/video/ov7670.c286
-rw-r--r--drivers/media/video/ov9640.c6
-rw-r--r--drivers/media/video/pms.c18
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c28
-rw-r--r--drivers/media/video/pwc/Kconfig2
-rw-r--r--drivers/media/video/pxa_camera.c4
-rw-r--r--drivers/media/video/rj54n1cb0c.c18
-rw-r--r--drivers/media/video/s2255drv.c1161
-rw-r--r--drivers/media/video/saa7115.c74
-rw-r--r--drivers/media/video/saa7127.c31
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c111
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c22
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c9
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c283
-rw-r--r--drivers/media/video/saa7134/saa7134-reg.h24
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c25
-rw-r--r--drivers/media/video/saa7134/saa7134.h7
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c613
-rw-r--r--drivers/media/video/sh_vou.c1476
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c6
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h18
-rw-r--r--drivers/media/video/sn9c102/sn9c102_hv7131d.c2
-rw-r--r--drivers/media/video/soc_camera.c21
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c6
-rw-r--r--drivers/media/video/tlg2300/pd-main.c9
-rw-r--r--drivers/media/video/tlg2300/pd-radio.c21
-rw-r--r--drivers/media/video/tlg2300/pd-video.c32
-rw-r--r--drivers/media/video/tvp514x.c13
-rw-r--r--drivers/media/video/tvp5150.c67
-rw-r--r--drivers/media/video/tvp7002.c53
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c16
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c82
-rw-r--r--drivers/media/video/usbvision/usbvision.h6
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c50
-rw-r--r--drivers/media/video/uvc/uvc_driver.c25
-rw-r--r--drivers/media/video/uvc/uvc_queue.c8
-rw-r--r--drivers/media/video/uvc/uvc_video.c4
-rw-r--r--drivers/media/video/uvc/uvcvideo.h4
-rw-r--r--drivers/media/video/v4l2-common.c101
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c3
-rw-r--r--drivers/media/video/v4l2-dev.c6
-rw-r--r--drivers/media/video/v4l2-device.c11
-rw-r--r--drivers/media/video/v4l2-event.c292
-rw-r--r--drivers/media/video/v4l2-fh.c79
-rw-r--r--drivers/media/video/v4l2-ioctl.c103
-rw-r--r--drivers/media/video/v4l2-mem2mem.c633
-rw-r--r--drivers/media/video/videobuf-core.c244
-rw-r--r--drivers/media/video/videobuf-dma-contig.c115
-rw-r--r--drivers/media/video/videobuf-dma-sg.c383
-rw-r--r--drivers/media/video/videobuf-dvb.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c183
-rw-r--r--drivers/media/video/vivi.c806
-rw-r--r--drivers/media/video/w9966.c1149
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c6
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h2
-rw-r--r--drivers/media/video/zoran/zoran.h24
-rw-r--r--drivers/media/video/zoran/zoran_driver.c16
-rw-r--r--drivers/media/video/zr364xx.c4
-rw-r--r--drivers/message/fusion/mptbase.c177
-rw-r--r--drivers/message/fusion/mptbase.h5
-rw-r--r--drivers/message/fusion/mptctl.c181
-rw-r--r--drivers/message/fusion/mptfc.c22
-rw-r--r--drivers/message/fusion/mptsas.c55
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/message/fusion/mptscsih.c33
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/message/i2o/i2o_config.c27
-rw-r--r--drivers/mfd/88pm860x-core.c42
-rw-r--r--drivers/mfd/88pm860x-i2c.c2
-rw-r--r--drivers/mfd/Kconfig101
-rw-r--r--drivers/mfd/Makefile16
-rw-r--r--drivers/mfd/ab3100-core.c99
-rw-r--r--drivers/mfd/ab3100-otp.c13
-rw-r--r--drivers/mfd/ab3550-core.c1401
-rw-r--r--drivers/mfd/ab4500-core.c209
-rw-r--r--drivers/mfd/ab8500-core.c444
-rw-r--r--drivers/mfd/ab8500-spi.c133
-rw-r--r--drivers/mfd/abx500-core.c157
-rw-r--r--drivers/mfd/da903x.c1
-rw-r--r--drivers/mfd/davinci_voicecodec.c190
-rw-r--r--drivers/mfd/janz-cmodio.c304
-rw-r--r--drivers/mfd/max8925-core.c7
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/mc13783-core.c4
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c39
-rw-r--r--drivers/mfd/pcf50633-core.c348
-rw-r--r--drivers/mfd/pcf50633-irq.c318
-rw-r--r--drivers/mfd/rdc321x-southbridge.c123
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c28
-rw-r--r--drivers/mfd/t7l66xb.c3
-rw-r--r--drivers/mfd/tc35892.c347
-rw-r--r--drivers/mfd/timberdale.c156
-rw-r--r--drivers/mfd/timberdale.h16
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6507x.c159
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/twl4030-irq.c11
-rw-r--r--drivers/mfd/wm831x-core.c109
-rw-r--r--drivers/mfd/wm831x-irq.c48
-rw-r--r--drivers/mfd/wm8350-core.c4
-rw-r--r--drivers/mfd/wm8350-i2c.c6
-rw-r--r--drivers/mfd/wm8400-core.c4
-rw-r--r--drivers/mfd/wm8994-core.c43
-rw-r--r--drivers/mfd/wm8994-irq.c310
-rw-r--r--drivers/misc/Kconfig32
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c134
-rw-r--r--drivers/misc/ad525x_dpot-spi.c172
-rw-r--r--drivers/misc/ad525x_dpot.c1016
-rw-r--r--drivers/misc/ad525x_dpot.h202
-rw-r--r--drivers/misc/c2port/core.c4
-rw-r--r--drivers/misc/ds1682.c6
-rw-r--r--drivers/misc/eeprom/at24.c65
-rw-r--r--drivers/misc/eeprom/at25.c6
-rw-r--r--drivers/misc/eeprom/eeprom.c39
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c39
-rw-r--r--drivers/misc/eeprom/max6875.c54
-rw-r--r--drivers/misc/hdpuftrs/Makefile1
-rw-r--r--drivers/misc/hdpuftrs/hdpu_cpustate.c256
-rw-r--r--drivers/misc/hdpuftrs/hdpu_nexus.c149
-rw-r--r--drivers/misc/lkdtm.c20
-rw-r--r--drivers/misc/vmware_balloon.c4
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c4
-rw-r--r--drivers/mmc/host/atmel-mci.c80
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c21
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/msm_sdcc.c474
-rw-r--r--drivers/mmc/host/msm_sdcc.h15
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c116
-rw-r--r--drivers/mmc/host/of_mmc_spi.c4
-rw-r--r--drivers/mmc/host/omap.c64
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c11
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c965
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c369
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c137
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c344
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c136
-rw-r--r--drivers/mtd/chips/cfi_probe.c56
-rw-r--r--drivers/mtd/chips/cfi_util.c3
-rw-r--r--drivers/mtd/chips/fwh_lock.h6
-rw-r--r--drivers/mtd/chips/gen_probe.c15
-rw-r--r--drivers/mtd/chips/jedec_probe.c288
-rw-r--r--drivers/mtd/devices/Makefile2
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/pmc551.c4
-rw-r--r--drivers/mtd/devices/sst25l.c68
-rw-r--r--drivers/mtd/ftl.c1
-rw-r--r--drivers/mtd/inftlcore.c1
-rw-r--r--drivers/mtd/inftlmount.c7
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c79
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c7
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c16
-rw-r--r--drivers/mtd/maps/ceiva.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c3
-rw-r--r--drivers/mtd/maps/ixp4xx.c7
-rw-r--r--drivers/mtd/maps/pcmciamtd.c91
-rw-r--r--drivers/mtd/maps/physmap.c7
-rw-r--r--drivers/mtd/maps/physmap_of.c68
-rw-r--r--drivers/mtd/maps/pismo.c8
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c3
-rw-r--r--drivers/mtd/maps/sun_uflash.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c335
-rw-r--r--drivers/mtd/mtdblock.c72
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c124
-rw-r--r--drivers/mtd/mtdconcat.c3
-rw-r--r--drivers/mtd/mtdcore.c285
-rw-r--r--drivers/mtd/mtdcore.h7
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdsuper.c18
-rw-r--r--drivers/mtd/nand/Kconfig69
-rw-r--r--drivers/mtd/nand/Makefile10
-rw-r--r--drivers/mtd/nand/alauda.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c12
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c3
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c29
-rw-r--r--drivers/mtd/nand/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/nand/denali.c2134
-rw-r--r--drivers/mtd/nand/denali.h816
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c15
-rw-r--r--drivers/mtd/nand/fsl_upm.c16
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c917
-rw-r--r--drivers/mtd/nand/mxc_nand.c146
-rw-r--r--drivers/mtd/nand/nand_base.c387
-rw-r--r--drivers/mtd/nand/nand_bbt.c29
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h71
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/nandsim.c17
-rw-r--r--drivers/mtd/nand/ndfc.c15
-rw-r--r--drivers/mtd/nand/nomadik_nand.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c (renamed from drivers/mtd/nand/w90p910_nand.c)144
-rw-r--r--drivers/mtd/nand/omap2.c16
-rw-r--r--drivers/mtd/nand/orion_nand.c13
-rw-r--r--drivers/mtd/nand/pasemi_nand.c11
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c11
-rw-r--r--drivers/mtd/nand/r852.c1140
-rw-r--r--drivers/mtd/nand/r852.h163
-rw-r--r--drivers/mtd/nand/s3c2410.c12
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/sm_common.c148
-rw-r--r--drivers/mtd/nand/sm_common.h61
-rw-r--r--drivers/mtd/nand/socrates_nand.c11
-rw-r--r--drivers/mtd/nand/tmio_nand.c14
-rw-r--r--drivers/mtd/nand/ts7250.c207
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nftlcore.c1
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/Makefile1
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/onenand/onenand_base.c63
-rw-r--r--drivers/mtd/onenand/samsung.c1071
-rw-r--r--drivers/mtd/rfd_ftl.c1
-rw-r--r--drivers/mtd/sm_ftl.c1284
-rw-r--r--drivers/mtd/sm_ftl.h94
-rw-r--r--drivers/mtd/ssfdc.c1
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c3
-rw-r--r--drivers/mtd/tests/mtd_readtest.c3
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c3
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c3
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c3
-rw-r--r--drivers/mtd/ubi/Kconfig2
-rw-r--r--drivers/mtd/ubi/build.c60
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/io.c6
-rw-r--r--drivers/mtd/ubi/kapi.c6
-rw-r--r--drivers/mtd/ubi/scan.c4
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vtbl.c4
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c503.c44
-rw-r--r--drivers/net/3c505.c14
-rw-r--r--drivers/net/3c507.c8
-rw-r--r--drivers/net/3c509.c4
-rw-r--r--drivers/net/3c515.c6
-rw-r--r--drivers/net/3c523.c11
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c11
-rw-r--r--drivers/net/7990.c12
-rw-r--r--drivers/net/8139cp.c9
-rw-r--r--drivers/net/8139too.c8
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig46
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/a2065.c11
-rw-r--r--drivers/net/ac3200.c2
-rw-r--r--drivers/net/acenic.c44
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/amd8111e.c8
-rw-r--r--drivers/net/apne.c1
-rw-r--r--drivers/net/appletalk/cops.c9
-rw-r--r--drivers/net/appletalk/ltpc.c1
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c4
-rw-r--r--drivers/net/ariadne.c3
-rw-r--r--drivers/net/arm/am79c961a.c7
-rw-r--r--drivers/net/arm/at91_ether.c7
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/arm/ether1.c1
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c9
-rw-r--r--drivers/net/arm/ks8695net.c13
-rw-r--r--drivers/net/arm/w90p910_ether.c7
-rw-r--r--drivers/net/at1700.c11
-rw-r--r--drivers/net/atarilance.c5
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c16
-rw-r--r--drivers/net/atlx/atl1.c7
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/atlx/atlx.c6
-rw-r--r--drivers/net/atp.c10
-rw-r--r--drivers/net/au1000_eth.c262
-rw-r--r--drivers/net/au1000_eth.h4
-rw-r--r--drivers/net/ax88796.c1
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/bcm63xx_enet.c14
-rw-r--r--drivers/net/benet/be.h13
-rw-r--r--drivers/net/benet/be_cmds.c35
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c5
-rw-r--r--drivers/net/benet/be_hw.h3
-rw-r--r--drivers/net/benet/be_main.c316
-rw-r--r--drivers/net/bfin_mac.c561
-rw-r--r--drivers/net/bfin_mac.h18
-rw-r--r--drivers/net/bmac.c15
-rw-r--r--drivers/net/bnx2.c102
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bnx2x.h66
-rw-r--r--drivers/net/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x_link.c12
-rw-r--r--drivers/net/bnx2x_main.c1878
-rw-r--r--drivers/net/bnx2x_reg.h27
-rw-r--r--drivers/net/bonding/bond_ipv6.c9
-rw-r--r--drivers/net/bonding/bond_main.c275
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/Kconfig17
-rw-r--r--drivers/net/caif/Makefile12
-rw-r--r--drivers/net/caif/caif_serial.c449
-rw-r--r--drivers/net/can/Kconfig10
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c3
-rw-r--r--drivers/net/can/janz-ican3.c1830
-rw-r--r--drivers/net/can/mcp251x.c16
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c9
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c154
-rw-r--r--drivers/net/can/sja1000/sja1000.c25
-rw-r--r--drivers/net/can/sja1000/sja1000.h1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c13
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c48
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c22
-rw-r--r--drivers/net/cassini.c15
-rw-r--r--drivers/net/chelsio/pm3393.c7
-rw-r--r--drivers/net/chelsio/sge.c58
-rw-r--r--drivers/net/cnic.c90
-rw-r--r--drivers/net/cnic.h10
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/cpmac.c17
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/cxgb3/xgmac.c8
-rw-r--r--drivers/net/cxgb4/cxgb4.h9
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c102
-rw-r--r--drivers/net/cxgb4/sge.c11
-rw-r--r--drivers/net/cxgb4/t4_hw.c117
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/davinci_emac.c8
-rw-r--r--drivers/net/de600.c4
-rw-r--r--drivers/net/de620.c1
-rw-r--r--drivers/net/declance.c10
-rw-r--r--drivers/net/defxx.c6
-rw-r--r--drivers/net/depca.c15
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c50
-rw-r--r--drivers/net/dnet.c4
-rw-r--r--drivers/net/e100.c190
-rw-r--r--drivers/net/e1000/e1000.h37
-rw-r--r--drivers/net/e1000/e1000_ethtool.c89
-rw-r--r--drivers/net/e1000/e1000_hw.c356
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c429
-rw-r--r--drivers/net/e1000/e1000_osdep.h14
-rw-r--r--drivers/net/e1000/e1000_param.c112
-rw-r--r--drivers/net/e1000e/82571.c29
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/es2lan.c11
-rw-r--r--drivers/net/e1000e/ethtool.c48
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c391
-rw-r--r--drivers/net/e1000e/lib.c60
-rw-r--r--drivers/net/e1000e/netdev.c866
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/e1000e/phy.c21
-rw-r--r--drivers/net/e2100.c1
-rw-r--r--drivers/net/eepro.c13
-rw-r--r--drivers/net/eexpress.c11
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c99
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/cq_enet_desc.h12
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_main.c354
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_dev.c110
-rw-r--r--drivers/net/enic/vnic_dev.h10
-rw-r--r--drivers/net/enic/vnic_rq.c4
-rw-r--r--drivers/net/enic/vnic_vic.c73
-rw-r--r--drivers/net/enic/vnic_vic.h59
-rw-r--r--drivers/net/enic/vnic_wq.c4
-rw-r--r--drivers/net/epic100.c13
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/es3210.c2
-rw-r--r--drivers/net/eth16i.c5
-rw-r--r--drivers/net/ethoc.c42
-rw-r--r--drivers/net/ewrk3.c14
-rw-r--r--drivers/net/fealnx.c9
-rw-r--r--drivers/net/fec.c1162
-rw-r--r--drivers/net/fec.h2
-rw-r--r--drivers/net/fec_mpc52xx.c28
-rw-r--r--drivers/net/fec_mpc52xx_phy.c11
-rw-r--r--drivers/net/forcedeth.c251
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/fs_enet/mac-fcc.c14
-rw-r--r--drivers/net/fs_enet/mac-fec.c10
-rw-r--r--drivers/net/fs_enet/mac-scc.c12
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c7
-rw-r--r--drivers/net/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/fsl_pq_mdio.c15
-rw-r--r--drivers/net/gianfar.c231
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/greth.c10
-rw-r--r--drivers/net/hamachi.c11
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c3
-rw-r--r--drivers/net/hp-plus.c4
-rw-r--r--drivers/net/hp.c3
-rw-r--r--drivers/net/hp100.c16
-rw-r--r--drivers/net/hydra.c1
-rw-r--r--drivers/net/ibm_newemac/core.c33
-rw-r--r--drivers/net/ibm_newemac/debug.c9
-rw-r--r--drivers/net/ibm_newemac/debug.h4
-rw-r--r--drivers/net/ibm_newemac/mal.c36
-rw-r--r--drivers/net/ibm_newemac/rgmii.c20
-rw-r--r--drivers/net/ibm_newemac/tah.c15
-rw-r--r--drivers/net/ibm_newemac/zmii.c17
-rw-r--r--drivers/net/ibmlana.c8
-rw-r--r--drivers/net/ibmveth.c24
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/igb/e1000_82575.c35
-rw-r--r--drivers/net/igb/e1000_82575.h9
-rw-r--r--drivers/net/igb/e1000_defines.h5
-rw-r--r--drivers/net/igb/e1000_hw.h17
-rw-r--r--drivers/net/igb/e1000_mac.c27
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c58
-rw-r--r--drivers/net/igb/igb_main.c613
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c88
-rw-r--r--drivers/net/ioc3-eth.c7
-rw-r--r--drivers/net/ipg.c11
-rw-r--r--drivers/net/ipg.h109
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c32
-rw-r--r--drivers/net/irda/au1k_ir.c1
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/mcs7780.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c1
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sh_irda.c865
-rw-r--r--drivers/net/irda/sh_sir.c12
-rw-r--r--drivers/net/irda/sir_dev.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c3
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/ixgb/ixgb.h8
-rw-r--r--drivers/net/ixgb/ixgb_ee.c24
-rw-r--r--drivers/net/ixgb/ixgb_hw.c164
-rw-r--r--drivers/net/ixgb/ixgb_hw.h12
-rw-r--r--drivers/net/ixgb/ixgb_main.c159
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h16
-rw-r--r--drivers/net/ixgb/ixgb_param.c31
-rw-r--r--drivers/net/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c9
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c419
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c539
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h22
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c146
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c723
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c72
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h54
-rw-r--r--drivers/net/ixgbevf/defines.h12
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c125
-rw-r--r--drivers/net/ixgbevf/vf.c27
-rw-r--r--drivers/net/ixgbevf/vf.h4
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/jme.c12
-rw-r--r--drivers/net/korina.c12
-rw-r--r--drivers/net/ks8842.c61
-rw-r--r--drivers/net/ks8851.c448
-rw-r--r--drivers/net/ks8851.h14
-rw-r--r--drivers/net/ks8851_mll.c63
-rw-r--r--drivers/net/ksz884x.c86
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/lib82596.c11
-rw-r--r--drivers/net/lib8390.c22
-rw-r--r--drivers/net/ll_temac.h19
-rw-r--r--drivers/net/ll_temac_main.c253
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/lp486e.c8
-rw-r--r--drivers/net/mac8390.c2
-rw-r--r--drivers/net/mac89x0.c1
-rw-r--r--drivers/net/macb.c9
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/macmace.c7
-rw-r--r--drivers/net/macvlan.c32
-rw-r--r--drivers/net/macvtap.c46
-rw-r--r--drivers/net/meth.c4
-rw-r--r--drivers/net/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c53
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/icm.c36
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/mv643xx_eth.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c54
-rw-r--r--drivers/net/myri_sbus.c11
-rw-r--r--drivers/net/natsemi.c10
-rw-r--r--drivers/net/ne-h8300.c1
-rw-r--r--drivers/net/ne.c1
-rw-r--r--drivers/net/ne2.c1
-rw-r--r--drivers/net/ne2k-pci.c1
-rw-r--r--drivers/net/ne3210.c2
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c9
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h8
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c131
-rw-r--r--drivers/net/netxen/netxen_nic_init.c169
-rw-r--r--drivers/net/netxen/netxen_nic_main.c85
-rw-r--r--drivers/net/ni5010.c5
-rw-r--r--drivers/net/ni52.c13
-rw-r--r--drivers/net/ni65.c5
-rw-r--r--drivers/net/niu.c74
-rw-r--r--drivers/net/niu.h7
-rw-r--r--drivers/net/octeon/octeon_mgmt.c66
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c7
-rw-r--r--drivers/net/pcmcia/3c574_cs.c21
-rw-r--r--drivers/net/pcmcia/3c589_cs.c303
-rw-r--r--drivers/net/pcmcia/axnet_cs.c31
-rw-r--r--drivers/net/pcmcia/com20020_cs.c29
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c27
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c18
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c26
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c16
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c31
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c45
-rw-r--r--drivers/net/pcnet32.c15
-rw-r--r--drivers/net/phy/bcm63xx.c8
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c8
-rw-r--r--drivers/net/phy/davicom.c9
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c13
-rw-r--r--drivers/net/phy/mdio-bitbang.c60
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c10
-rw-r--r--drivers/net/phy/national.c10
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c8
-rw-r--r--drivers/net/phy/vitesse.c8
-rw-r--r--drivers/net/plip.c4
-rw-r--r--drivers/net/ppp_generic.c23
-rw-r--r--drivers/net/pppoe.c10
-rw-r--r--drivers/net/pppol2tp.c2680
-rw-r--r--drivers/net/ps3_gelic_net.c13
-rw-r--r--drivers/net/ps3_gelic_wireless.c76
-rw-r--r--drivers/net/qla3xxx.c72
-rw-r--r--drivers/net/qla3xxx.h8
-rw-r--r--drivers/net/qlcnic/qlcnic.h40
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c34
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h60
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c136
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c101
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c447
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c64
-rw-r--r--drivers/net/r6040.c46
-rw-r--r--drivers/net/r8169.c158
-rw-r--r--drivers/net/rrunner.c1
-rw-r--r--drivers/net/s2io.c15
-rw-r--r--drivers/net/s6gmac.c3
-rw-r--r--drivers/net/sb1000.c5
-rw-r--r--drivers/net/sb1250-mac.c275
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/seeq8005.c4
-rw-r--r--drivers/net/sfc/efx.c137
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/falcon.c16
-rw-r--r--drivers/net/sfc/falcon_xmac.c22
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi_mac.c25
-rw-r--r--drivers/net/sfc/mcdi_pcol.h71
-rw-r--r--drivers/net/sfc/mcdi_phy.c152
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c114
-rw-r--r--drivers/net/sfc/nic.h3
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c19
-rw-r--r--drivers/net/sfc/tx.c61
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sgiseeq.c6
-rw-r--r--drivers/net/sh_eth.c5
-rw-r--r--drivers/net/sis190.c6
-rw-r--r--drivers/net/sis900.c25
-rw-r--r--drivers/net/skfp/fplustm.c2
-rw-r--r--drivers/net/skfp/pcmplc.c4
-rw-r--r--drivers/net/skfp/skfddi.c15
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c46
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/sky2.c215
-rw-r--r--drivers/net/sky2.h41
-rw-r--r--drivers/net/slhc.c1
-rw-r--r--drivers/net/slip.c4
-rw-r--r--drivers/net/smc-mca.c1
-rw-r--r--drivers/net/smc-ultra.c1
-rw-r--r--drivers/net/smc-ultra32.c1
-rw-r--r--drivers/net/smc911x.c21
-rw-r--r--drivers/net/smc9194.c61
-rw-r--r--drivers/net/smc91x.c12
-rw-r--r--drivers/net/smsc911x.c11
-rw-r--r--drivers/net/smsc9420.c8
-rw-r--r--drivers/net/sonic.c10
-rw-r--r--drivers/net/spider_net.c8
-rw-r--r--drivers/net/starfire.c16
-rw-r--r--drivers/net/stmmac/Makefile2
-rw-r--r--drivers/net/stmmac/common.h21
-rw-r--r--drivers/net/stmmac/dwmac100.c538
-rw-r--r--drivers/net/stmmac/dwmac100.h5
-rw-r--r--drivers/net/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c41
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c338
-rw-r--r--drivers/net/stmmac/dwmac100_core.c196
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c134
-rw-r--r--drivers/net/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/stmmac/dwmac_lib.c19
-rw-r--r--drivers/net/stmmac/enh_desc.c337
-rw-r--r--drivers/net/stmmac/norm_desc.c236
-rw-r--r--drivers/net/stmmac/stmmac.h10
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/stmmac/stmmac_timer.c6
-rw-r--r--drivers/net/stnic.c1
-rw-r--r--drivers/net/sun3_82586.c13
-rw-r--r--drivers/net/sun3lance.c8
-rw-r--r--drivers/net/sunbmac.c25
-rw-r--r--drivers/net/sundance.c14
-rw-r--r--drivers/net/sungem.c9
-rw-r--r--drivers/net/sunhme.c37
-rw-r--r--drivers/net/sunlance.c24
-rw-r--r--drivers/net/sunqe.c20
-rw-r--r--drivers/net/sunvnet.c9
-rw-r--r--drivers/net/tc35815.c8
-rw-r--r--drivers/net/tehuti.c10
-rw-r--r--drivers/net/tg3.c839
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tlan.c13
-rw-r--r--drivers/net/tokenring/3c359.c112
-rw-r--r--drivers/net/tokenring/ibmtr.c13
-rw-r--r--drivers/net/tokenring/lanstreamer.c58
-rw-r--r--drivers/net/tokenring/madgemc.c12
-rw-r--r--drivers/net/tokenring/olympic.c74
-rw-r--r--drivers/net/tokenring/smctr.c4
-rw-r--r--drivers/net/tokenring/tms380tr.c65
-rw-r--r--drivers/net/tsi108_eth.c16
-rw-r--r--drivers/net/tulip/de2104x.c13
-rw-r--r--drivers/net/tulip/de4x5.c87
-rw-r--r--drivers/net/tulip/dmfe.c17
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/pnic.c2
-rw-r--r--drivers/net/tulip/tulip_core.c31
-rw-r--r--drivers/net/tulip/uli526x.c10
-rw-r--r--drivers/net/tulip/winbond-840.c18
-rw-r--r--drivers/net/tulip/xircom_cb.c6
-rw-r--r--drivers/net/tun.c58
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/ucc_geth.c21
-rw-r--r--drivers/net/usb/asix.c53
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/cdc_ether.c113
-rw-r--r--drivers/net/usb/dm9601.c9
-rw-r--r--drivers/net/usb/hso.c7
-rw-r--r--drivers/net/usb/ipheth.c24
-rw-r--r--drivers/net/usb/kaweth.c13
-rw-r--r--drivers/net/usb/mcs7830.c10
-rw-r--r--drivers/net/usb/pegasus.c9
-rw-r--r--drivers/net/usb/pegasus.h2
-rw-r--r--drivers/net/usb/rndis_host.c18
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/via-rhine.c10
-rw-r--r--drivers/net/via-velocity.c121
-rw-r--r--drivers/net/via-velocity.h77
-rw-r--r--drivers/net/virtio_net.c95
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
-rw-r--r--drivers/net/vxge/vxge-config.c41
-rw-r--r--drivers/net/vxge/vxge-config.h34
-rw-r--r--drivers/net/vxge/vxge-ethtool.c5
-rw-r--r--drivers/net/vxge/vxge-main.c245
-rw-r--r--drivers/net/vxge/vxge-main.h6
-rw-r--r--drivers/net/vxge/vxge-traffic.c79
-rw-r--r--drivers/net/vxge/vxge-traffic.h50
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cycx_x25.c13
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/lmc/lmc_main.c6
-rw-r--r--drivers/net/wan/pc300_drv.c5
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c12
-rw-r--r--drivers/net/wd.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c27
-rw-r--r--drivers/net/wimax/i2400m/driver.c167
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h82
-rw-r--r--drivers/net/wimax/i2400m/netdev.c14
-rw-r--r--drivers/net/wimax/i2400m/rx.c116
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c35
-rw-r--r--drivers/net/wimax/i2400m/sdio.c7
-rw-r--r--drivers/net/wimax/i2400m/tx.c155
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c14
-rw-r--r--drivers/net/wireless/Kconfig92
-rw-r--r--drivers/net/wireless/adm8211.c12
-rw-r--r--drivers/net/wireless/airo.c52
-rw-r--r--drivers/net/wireless/airo_cs.c72
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h52
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c587
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c36
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h1
-rw-r--r--drivers/net/wireless/ath/ath.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c744
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h313
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c336
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h39
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c382
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h35
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c379
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c77
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h42
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig21
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile26
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c217
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h742
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1374
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1000
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c598
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h (renamed from drivers/net/wireless/ath/ath9k/initvals.h)2292
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c802
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1838
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h323
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_initvals.h1784
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c614
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h120
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1134
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h847
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h28
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c109
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1024
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c392
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c297
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1014
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h465
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c255
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c834
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1775
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c707
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h245
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h280
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1913
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h275
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c88
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c571
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h93
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c130
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c978
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h596
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c550
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h183
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c336
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h139
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c591
-rw-r--r--drivers/net/wireless/ath/debug.h1
-rw-r--r--drivers/net/wireless/ath/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/atmel.c1
-rw-r--r--drivers/net/wireless/atmel_cs.c70
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43/pcmcia.c5
-rw-r--r--drivers/net/wireless/b43/phy_n.c479
-rw-r--r--drivers/net/wireless/b43/phy_n.h21
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c22
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h37
-rw-r--r--drivers/net/wireless/b43/xmit.c1
-rw-r--r--drivers/net/wireless/b43legacy/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c38
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c60
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c190
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h14
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c13
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c500
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c361
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1493
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c331
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c850
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c276
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c308
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1530
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c208
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c1340
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c425
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1021
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h181
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h120
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1021
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c912
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c826
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c544
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c901
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1074
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c409
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c14
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c79
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c19
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c12
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h2
-rw-r--r--drivers/net/wireless/libertas/assoc.c22
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/debugfs.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_cs.c21
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c127
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/rx.c51
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c4
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c203
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h104
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c252
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c106
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c96
-rw-r--r--drivers/net/wireless/mwl8k.c30
-rw-r--r--drivers/net/wireless/orinoco/Kconfig20
-rw-r--r--drivers/net/wireless/orinoco/Makefile4
-rw-r--r--drivers/net/wireless/orinoco/airport.c8
-rw-r--r--drivers/net/wireless/orinoco/cfg.c91
-rw-r--r--drivers/net/wireless/orinoco/fw.c10
-rw-r--r--drivers/net/wireless/orinoco/hermes.c286
-rw-r--r--drivers/net/wireless/orinoco/hermes.h62
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c243
-rw-r--r--drivers/net/wireless/orinoco/hw.c102
-rw-r--r--drivers/net/wireless/orinoco/hw.h1
-rw-r--r--drivers/net/wireless/orinoco/main.c307
-rw-r--r--drivers/net/wireless/orinoco/main.h12
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h38
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c110
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1795
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c32
-rw-r--r--drivers/net/wireless/orinoco/wext.c273
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c10
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c18
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c8
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/ray_cs.c257
-rw-r--r--drivers/net/wireless/ray_cs.h1
-rw-r--r--drivers/net/wireless/rndis_wlan.c390
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c65
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c63
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c130
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h119
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c676
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c318
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c299
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h40
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c102
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c135
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig88
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c115
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig24
-rw-r--r--drivers/net/wireless/wl12xx/Makefile6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.h20
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c73
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c144
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h63
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c179
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h157
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c46
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c337
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h27
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h488
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c57
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c87
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h139
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c1272
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c291
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c315
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c133
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h9
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c80
-rw-r--r--drivers/net/wireless/zd1201.c12
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c10
-rw-r--r--drivers/net/xilinx_emaclite.c27
-rw-r--r--drivers/net/yellowfin.c13
-rw-r--r--drivers/net/znet.c2
-rw-r--r--drivers/net/zorro8390.c2
-rw-r--r--drivers/of/device.c25
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/of_i2c.c4
-rw-r--r--drivers/of/of_mdio.c6
-rw-r--r--drivers/of/of_spi.c2
-rw-r--r--drivers/of/platform.c13
-rw-r--r--drivers/oprofile/cpu_buffer.c75
-rw-r--r--drivers/oprofile/oprof.c12
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/timer_int.c78
-rw-r--r--drivers/parport/parport_amiga.c64
-rw-r--r--drivers/parport/parport_cs.c13
-rw-r--r--drivers/parport/parport_sunbpp.c7
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/access.c41
-rw-r--r--drivers/pci/dmar.c82
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c5
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c17
-rw-r--r--drivers/pci/intel-iommu.c129
-rw-r--r--drivers/pci/intr_remapping.c6
-rw-r--r--drivers/pci/pci-sysfs.c89
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c179
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h23
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c566
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/pci/quirks.c21
-rw-r--r--drivers/pci/setup-bus.c114
-rw-r--r--drivers/pci/slot.c48
-rw-r--r--drivers/pcmcia/Kconfig25
-rw-r--r--drivers/pcmcia/Makefile10
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/cistpl.c125
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/cs_internal.h22
-rw-r--r--drivers/pcmcia/ds.c34
-rw-r--r--drivers/pcmcia/electra_cf.c9
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c7
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_cis.c356
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c40
-rw-r--r--drivers/pcmcia/pcmcia_resource.c634
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c229
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c172
-rw-r--r--drivers/pcmcia/rsrc_mgr.c112
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c164
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/classmate-laptop.c170
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c6
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c829
-rw-r--r--drivers/platform/x86/msi-laptop.c163
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c600
-rw-r--r--drivers/platform/x86/wmi.c103
-rw-r--r--drivers/power/Kconfig15
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ds2760_battery.c64
-rw-r--r--drivers/power/ds2782_battery.c194
-rw-r--r--drivers/power/olpc_battery.c2
-rw-r--r--drivers/power/pda_power.c10
-rw-r--r--drivers/power/power_supply.h7
-rw-r--r--drivers/power/power_supply_core.c48
-rw-r--r--drivers/power/power_supply_sysfs.c147
-rw-r--r--drivers/power/test_power.c163
-rw-r--r--drivers/power/tosa_battery.c4
-rw-r--r--drivers/power/wm831x_power.c33
-rw-r--r--drivers/power/wm97xx_battery.c3
-rw-r--r--drivers/power/z2_battery.c328
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/rapidio/Kconfig24
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c424
-rw-r--r--drivers/rapidio/rio-sysfs.c6
-rw-r--r--drivers/rapidio/rio.c433
-rw-r--r--drivers/rapidio/rio.h44
-rw-r--r--drivers/rapidio/switches/Kconfig28
-rw-r--r--drivers/rapidio/switches/Makefile9
-rw-r--r--drivers/rapidio/switches/idtcps.c137
-rw-r--r--drivers/rapidio/switches/tsi500.c20
-rw-r--r--drivers/rapidio/switches/tsi568.c146
-rw-r--r--drivers/rapidio/switches/tsi57x.c315
-rw-r--r--drivers/regulator/88pm8607.c533
-rw-r--r--drivers/regulator/ab3100.c45
-rw-r--r--drivers/regulator/bq24022.c1
-rw-r--r--drivers/regulator/core.c85
-rw-r--r--drivers/regulator/mc13783-regulator.c6
-rw-r--r--drivers/regulator/tps6507x-regulator.c373
-rw-r--r--drivers/regulator/twl-regulator.c138
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ab3100.c41
-rw-r--r--drivers/rtc/rtc-ab8500.c363
-rw-r--r--drivers/rtc/rtc-cmos.c94
-rw-r--r--drivers/rtc/rtc-davinci.c673
-rw-r--r--drivers/rtc/rtc-ds1302.c85
-rw-r--r--drivers/rtc/rtc-ds1305.c6
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/rtc/rtc-ds1511.c10
-rw-r--r--drivers/rtc/rtc-ds1553.c4
-rw-r--r--drivers/rtc/rtc-ds1742.c4
-rw-r--r--drivers/rtc/rtc-isl1208.c45
-rw-r--r--drivers/rtc/rtc-m41t80.c22
-rw-r--r--drivers/rtc/rtc-m48t59.c4
-rw-r--r--drivers/rtc/rtc-mxc.c25
-rw-r--r--drivers/rtc/rtc-rx8581.c6
-rw-r--r--drivers/rtc/rtc-s3c.c107
-rw-r--r--drivers/rtc/rtc-stk17ta8.c8
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/rtc/rtc-wm831x.c16
-rw-r--r--drivers/s390/block/dasd.c62
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_alias.c125
-rw-r--r--drivers/s390/block/dasd_devmap.c174
-rw-r--r--drivers/s390/block/dasd_eckd.c117
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_int.h50
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/keyboard.c21
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/vmcp.c38
-rw-r--r--drivers/s390/char/zcore.c4
-rw-r--r--drivers/s390/cio/ccwgroup.c7
-rw-r--r--drivers/s390/cio/ccwreq.c15
-rw-r--r--drivers/s390/cio/chp.c5
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/ioasm.h15
-rw-r--r--drivers/s390/cio/qdio.h15
-rw-r--r--drivers/s390/cio/qdio_main.c67
-rw-r--r--drivers/s390/cio/qdio_setup.c8
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/net/ctcm_main.c6
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h32
-rw-r--r--drivers/s390/net/qeth_core_main.c254
-rw-r--r--drivers/s390/net/qeth_core_mpc.h10
-rw-r--r--drivers/s390/net/qeth_core_sys.c151
-rw-r--r--drivers/s390/net/qeth_l2_main.c47
-rw-r--r--drivers/s390/net/qeth_l3_main.c108
-rw-r--r--drivers/s390/net/qeth_l3_sys.c244
-rw-r--r--drivers/s390/scsi/zfcp_aux.c7
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/s390/scsi/zfcp_def.h19
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c246
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c108
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h104
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c23
-rw-r--r--drivers/sbus/char/bbc_envctrl.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c11
-rw-r--r--drivers/sbus/char/display7seg.c9
-rw-r--r--drivers/sbus/char/envctrl.c9
-rw-r--r--drivers/sbus/char/flash.c17
-rw-r--r--drivers/sbus/char/openprom.c44
-rw-r--r--drivers/sbus/char/uctrl.c9
-rw-r--r--drivers/scsi/3w-9xxx.c34
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-sas.c12
-rw-r--r--drivers/scsi/3w-xxxx.c34
-rw-r--r--drivers/scsi/3w-xxxx.h8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c398
-rw-r--r--drivers/scsi/a2091.h46
-rw-r--r--drivers/scsi/a3000.c391
-rw-r--r--drivers/scsi/a3000.h50
-rw-r--r--drivers/scsi/a4000t.c101
-rw-r--r--drivers/scsi/aacraid/aachba.c67
-rw-r--r--drivers/scsi/aacraid/aacraid.h4
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/aacraid/commsup.c18
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h29
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c684
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h29
-rw-r--r--drivers/scsi/bfa/bfa_core.c22
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c22
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h23
-rw-r--r--drivers/scsi/bfa/bfad.c21
-rw-r--r--drivers/scsi/bfa/bfad_attr.c201
-rw-r--r--drivers/scsi/bfa/bfad_drv.h4
-rw-r--r--drivers/scsi/bfa/bfad_im.c67
-rw-r--r--drivers/scsi/bfa/bfad_im.h6
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h2
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c11
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/dpt_i2o.c20
-rw-r--r--drivers/scsi/fcoe/fcoe.c214
-rw-r--r--drivers/scsi/fcoe/libfcoe.c111
-rw-r--r--drivers/scsi/fnic/fnic.h4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/gdth.c22
-rw-r--r--drivers/scsi/gvp11.c582
-rw-r--r--drivers/scsi/gvp11.h49
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa_cmd.h15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c13
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ipr.c236
-rw-r--r--drivers/scsi/ipr.h31
-rw-r--r--drivers/scsi/iscsi_tcp.c12
-rw-r--r--drivers/scsi/iscsi_tcp.h1
-rw-r--r--drivers/scsi/libfc/fc_disc.c8
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c51
-rw-r--r--drivers/scsi/libfc/fc_fcp.c103
-rw-r--r--drivers/scsi/libfc/fc_libfc.h8
-rw-r--r--drivers/scsi/libfc/fc_lport.c58
-rw-r--r--drivers/scsi/libfc/fc_npiv.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c195
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c18
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c498
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h190
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c99
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c269
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c22
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c92
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h36
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c10
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c46
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1055
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c89
-rw-r--r--drivers/scsi/mvme147.c178
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c25
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c201
-rw-r--r--drivers/scsi/mvsas/mv_sas.h11
-rw-r--r--drivers/scsi/osst.c23
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c9
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c9
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c23
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h1
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c13
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c6
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c804
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c1212
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h135
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c61
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h343
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h126
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c724
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c879
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c266
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3636
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h889
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c543
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c149
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h48
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h46
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c374
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c337
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c135
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicpti.c17
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c89
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_scan.c38
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_trace.c284
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/sun_esp.c23
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd33c93.h1
-rw-r--r--drivers/scsi/zorro7xx.c1
-rw-r--r--drivers/serial/68328serial.c2
-rw-r--r--drivers/serial/8250.c4
-rw-r--r--drivers/serial/Kconfig105
-rw-r--r--drivers/serial/Makefile3
-rw-r--r--drivers/serial/altera_jtaguart.c504
-rw-r--r--drivers/serial/altera_uart.c570
-rw-r--r--drivers/serial/amba-pl011.c6
-rw-r--r--drivers/serial/apbuart.c10
-rw-r--r--drivers/serial/atmel_serial.c207
-rw-r--r--drivers/serial/bfin_sport_uart.c209
-rw-r--r--drivers/serial/bfin_sport_uart.h27
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/serial/imx.c10
-rw-r--r--drivers/serial/kgdboc.c94
-rw-r--r--drivers/serial/mpc52xx_uart.c86
-rw-r--r--drivers/serial/mpsc.c1
-rw-r--r--drivers/serial/nwpserial.c2
-rw-r--r--drivers/serial/of_serial.c12
-rw-r--r--drivers/serial/pmac_zilog.c2
-rw-r--r--drivers/serial/s5pv210.c8
-rw-r--r--drivers/serial/serial_cs.c36
-rw-r--r--drivers/serial/sh-sci.c202
-rw-r--r--drivers/serial/sunhv.c9
-rw-r--r--drivers/serial/sunsab.c13
-rw-r--r--drivers/serial/sunsu.c13
-rw-r--r--drivers/serial/sunzilog.c65
-rw-r--r--drivers/serial/timbuart.c25
-rw-r--r--drivers/serial/uartlite.c43
-rw-r--r--drivers/serial/ucc_uart.c10
-rw-r--r--drivers/sfi/sfi_acpi.c41
-rw-r--r--drivers/sfi/sfi_core.c105
-rw-r--r--drivers/sfi/sfi_core.h8
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/clk-cpg.c298
-rw-r--r--drivers/sh/clk.c545
-rw-r--r--drivers/sh/intc.c333
-rw-r--r--drivers/spi/Kconfig23
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/amba-pl022.c250
-rw-r--r--drivers/spi/davinci_spi.c12
-rw-r--r--drivers/spi/ep93xx_spi.c938
-rw-r--r--drivers/spi/mpc512x_psc_spi.c576
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c15
-rw-r--r--drivers/spi/mpc52xx_spi.c22
-rw-r--r--drivers/spi/omap2_mcspi.c153
-rw-r--r--drivers/spi/pxa2xx_spi.c25
-rw-r--r--drivers/spi/spi_bitbang_txrx.h93
-rw-r--r--drivers/spi/spi_butterfly.c3
-rw-r--r--drivers/spi/spi_gpio.c3
-rw-r--r--drivers/spi/spi_lm70llp.c3
-rw-r--r--drivers/spi/spi_mpc8xxx.c127
-rw-r--r--drivers/spi/spi_ppc4xx.c2
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c3
-rw-r--r--drivers/spi/spi_sh_sci.c3
-rw-r--r--drivers/spi/xilinx_spi_of.c10
-rw-r--r--drivers/ssb/driver_chipcommon.c3
-rw-r--r--drivers/ssb/main.c5
-rw-r--r--drivers/ssb/pci.c14
-rw-r--r--drivers/ssb/sprom.c14
-rw-r--r--drivers/staging/Kconfig20
-rw-r--r--drivers/staging/Makefile12
-rw-r--r--drivers/staging/adis16255/Kconfig11
-rw-r--r--drivers/staging/adis16255/Makefile1
-rw-r--r--drivers/staging/adis16255/adis16255.c466
-rw-r--r--drivers/staging/adis16255/adis16255.h12
-rw-r--r--drivers/staging/arlan/Kconfig15
-rw-r--r--drivers/staging/arlan/Makefile3
-rw-r--r--drivers/staging/arlan/TODO7
-rw-r--r--drivers/staging/arlan/arlan-main.c1882
-rw-r--r--drivers/staging/arlan/arlan-proc.c1210
-rw-r--r--drivers/staging/arlan/arlan.h535
-rw-r--r--drivers/staging/asus_oled/asus_oled.c2
-rw-r--r--drivers/staging/batman-adv/CHANGELOG14
-rw-r--r--drivers/staging/batman-adv/Makefile4
-rw-r--r--drivers/staging/batman-adv/README255
-rw-r--r--drivers/staging/batman-adv/TODO23
-rw-r--r--drivers/staging/batman-adv/aggregation.c45
-rw-r--r--drivers/staging/batman-adv/aggregation.h7
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c484
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.h29
-rw-r--r--drivers/staging/batman-adv/bitarray.c76
-rw-r--r--drivers/staging/batman-adv/bitarray.h2
-rw-r--r--drivers/staging/batman-adv/device.c37
-rw-r--r--drivers/staging/batman-adv/device.h2
-rw-r--r--drivers/staging/batman-adv/hard-interface.c530
-rw-r--r--drivers/staging/batman-adv/hard-interface.h20
-rw-r--r--drivers/staging/batman-adv/hash.c2
-rw-r--r--drivers/staging/batman-adv/hash.h2
-rw-r--r--drivers/staging/batman-adv/main.c60
-rw-r--r--drivers/staging/batman-adv/main.h23
-rw-r--r--drivers/staging/batman-adv/originator.c295
-rw-r--r--drivers/staging/batman-adv/originator.h7
-rw-r--r--drivers/staging/batman-adv/packet.h2
-rw-r--r--drivers/staging/batman-adv/proc.c670
-rw-r--r--drivers/staging/batman-adv/proc.h40
-rw-r--r--drivers/staging/batman-adv/ring_buffer.c2
-rw-r--r--drivers/staging/batman-adv/ring_buffer.h2
-rw-r--r--drivers/staging/batman-adv/routing.c221
-rw-r--r--drivers/staging/batman-adv/routing.h2
-rw-r--r--drivers/staging/batman-adv/send.c149
-rw-r--r--drivers/staging/batman-adv/send.h6
-rw-r--r--drivers/staging/batman-adv/soft-interface.c30
-rw-r--r--drivers/staging/batman-adv/soft-interface.h2
-rw-r--r--drivers/staging/batman-adv/translation-table.c92
-rw-r--r--drivers/staging/batman-adv/translation-table.h8
-rw-r--r--drivers/staging/batman-adv/types.h54
-rw-r--r--drivers/staging/batman-adv/vis.c353
-rw-r--r--drivers/staging/batman-adv/vis.h19
-rw-r--r--drivers/staging/comedi/Kconfig1284
-rw-r--r--drivers/staging/comedi/Makefile1
-rw-r--r--drivers/staging/comedi/comedi.h202
-rw-r--r--drivers/staging/comedi/comedi_compat32.c3
-rw-r--r--drivers/staging/comedi/comedi_fops.c242
-rw-r--r--drivers/staging/comedi/comedi_fops.h1
-rw-r--r--drivers/staging/comedi/comedi_ksyms.c69
-rw-r--r--drivers/staging/comedi/comedidev.h34
-rw-r--r--drivers/staging/comedi/comedilib.h170
-rw-r--r--drivers/staging/comedi/drivers.c112
-rw-r--r--drivers/staging/comedi/drivers/8253.h3
-rw-r--r--drivers/staging/comedi/drivers/8255.c16
-rw-r--r--drivers/staging/comedi/drivers/8255.h22
-rw-r--r--drivers/staging/comedi/drivers/Makefile245
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_amcc_s5933.h6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c13
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/amcc_s5933_58.h4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c6
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.h16
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c94
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.h2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c191
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c206
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c41
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c857
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c27
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c153
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c36
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c116
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c132
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c61
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c94
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c3
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c13
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c10
-rw-r--r--drivers/staging/comedi/drivers/das08.c156
-rw-r--r--drivers/staging/comedi/drivers/das08.h2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c40
-rw-r--r--drivers/staging/comedi/drivers/das16.c159
-rw-r--r--drivers/staging/comedi/drivers/das1800.c10
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c2
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c191
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c38
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c225
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c21
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.h3
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c1
-rw-r--r--drivers/staging/comedi/drivers/mite.c2
-rw-r--r--drivers/staging/comedi/drivers/mite.h2
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c185
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c47
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c46
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c46
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c24
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c25
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c107
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h4
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c88
-rw-r--r--drivers/staging/comedi/drivers/skel.c21
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c86
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c13
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c49
-rw-r--r--drivers/staging/comedi/internal.h12
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile9
-rw-r--r--drivers/staging/comedi/kcomedilib/data.c92
-rw-r--r--drivers/staging/comedi/kcomedilib/dio.c95
-rw-r--r--drivers/staging/comedi/kcomedilib/get.c293
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c523
-rw-r--r--drivers/staging/comedi/kcomedilib/ksyms.c146
-rw-r--r--drivers/staging/comedi/pci_ids.h31
-rw-r--r--drivers/staging/comedi/proc.c23
-rw-r--r--drivers/staging/comedi/range.c36
-rw-r--r--drivers/staging/comedi/wrapper.h25
-rw-r--r--drivers/staging/crystalhd/TODO1
-rw-r--r--drivers/staging/crystalhd/bc_dts_defs.h72
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h119
-rw-r--r--drivers/staging/crystalhd/bc_dts_types.h25
-rw-r--r--drivers/staging/crystalhd/bcm_70012_regs.h24
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c178
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h21
-rw-r--r--drivers/staging/crystalhd/crystalhd_fw_if.h60
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c212
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h121
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c76
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h10
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c100
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h55
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c103
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c152
-rw-r--r--drivers/staging/cx25821/cx25821-audups11.c84
-rw-r--r--drivers/staging/cx25821/cx25821-cards.c4
-rw-r--r--drivers/staging/cx25821/cx25821-core.c160
-rw-r--r--drivers/staging/cx25821/cx25821-gpio.c25
-rw-r--r--drivers/staging/cx25821/cx25821-i2c.c14
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c260
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream-ch2.c6
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c149
-rw-r--r--drivers/staging/cx25821/cx25821-video.c145
-rw-r--r--drivers/staging/cx25821/cx25821-video.h78
-rw-r--r--drivers/staging/cx25821/cx25821-video0.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video1.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video2.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video3.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video4.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video5.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video6.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video7.c88
-rw-r--r--drivers/staging/cx25821/cx25821-videoioctl.c84
-rw-r--r--drivers/staging/cx25821/cx25821-vidups10.c84
-rw-r--r--drivers/staging/cx25821/cx25821-vidups9.c84
-rw-r--r--drivers/staging/cxt1e1/Kconfig22
-rw-r--r--drivers/staging/cxt1e1/Makefile19
-rw-r--r--drivers/staging/cxt1e1/comet.c568
-rw-r--r--drivers/staging/cxt1e1/comet.h366
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c561
-rw-r--r--drivers/staging/cxt1e1/comet_tables.h85
-rw-r--r--drivers/staging/cxt1e1/functions.c368
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c402
-rw-r--r--drivers/staging/cxt1e1/libsbew.h581
-rw-r--r--drivers/staging/cxt1e1/linux.c1356
-rw-r--r--drivers/staging/cxt1e1/musycc.c2185
-rw-r--r--drivers/staging/cxt1e1/musycc.h460
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c39
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c561
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.h60
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h155
-rw-r--r--drivers/staging/cxt1e1/pmcc4_cpld.h124
-rw-r--r--drivers/staging/cxt1e1/pmcc4_defs.h82
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c1860
-rw-r--r--drivers/staging/cxt1e1/pmcc4_ioctls.h81
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h296
-rw-r--r--drivers/staging/cxt1e1/pmcc4_sysdep.h62
-rw-r--r--drivers/staging/cxt1e1/sbe_bid.h61
-rw-r--r--drivers/staging/cxt1e1/sbe_promformat.h157
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h310
-rw-r--r--drivers/staging/cxt1e1/sbecrc.c137
-rw-r--r--drivers/staging/cxt1e1/sbeid.c217
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c358
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h52
-rw-r--r--drivers/staging/cxt1e1/sbew_ioc.h136
-rw-r--r--drivers/staging/dream/Kconfig11
-rw-r--r--drivers/staging/dream/Makefile3
-rw-r--r--drivers/staging/dream/TODO1
-rw-r--r--drivers/staging/dream/camera/msm_vfe8x_proc.c2
-rw-r--r--drivers/staging/dream/pmem.c27
-rw-r--r--drivers/staging/dream/qdsp5/audio_out.c9
-rw-r--r--drivers/staging/dream/smd/Kconfig26
-rw-r--r--drivers/staging/dream/smd/Makefile7
-rw-r--r--drivers/staging/dream/smd/rpc_server_dog_keepalive.c68
-rw-r--r--drivers/staging/dream/smd/rpc_server_time_remote.c77
-rw-r--r--drivers/staging/dream/smd/smd_private.h171
-rw-r--r--drivers/staging/dream/smd/smd_qmi.c855
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter.c1261
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter.h193
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter_device.c377
-rw-r--r--drivers/staging/dream/smd/smd_rpcrouter_servers.c230
-rw-r--r--drivers/staging/dream/smd/smd_tty.c208
-rw-r--r--drivers/staging/dream/synaptics_i2c_rmi.c28
-rw-r--r--drivers/staging/dt3155/allocator.c16
-rw-r--r--drivers/staging/dt3155/allocator.h4
-rw-r--r--drivers/staging/dt3155/dt3155.h44
-rw-r--r--drivers/staging/dt3155/dt3155_drv.c380
-rw-r--r--drivers/staging/dt3155/dt3155_io.c24
-rw-r--r--drivers/staging/dt3155/dt3155_isr.c297
-rw-r--r--drivers/staging/dt3155/dt3155_isr.h2
-rw-r--r--drivers/staging/dt3155v4l/Kconfig20
-rw-r--r--drivers/staging/dt3155v4l/Makefile1
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c1200
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.h224
-rw-r--r--drivers/staging/echo/echo.c2
-rw-r--r--drivers/staging/et131x/et1310_address_map.h7
-rw-r--r--drivers/staging/et131x/et1310_eeprom.c8
-rw-r--r--drivers/staging/et131x/et1310_phy.c2
-rw-r--r--drivers/staging/et131x/et1310_rx.c55
-rw-r--r--drivers/staging/et131x/et1310_rx.h5
-rw-r--r--drivers/staging/et131x/et1310_tx.c2
-rw-r--r--drivers/staging/et131x/et131x_initpci.c12
-rw-r--r--drivers/staging/et131x/et131x_isr.c6
-rw-r--r--drivers/staging/et131x/et131x_netdev.c20
-rw-r--r--drivers/staging/frontier/alphatrack.c34
-rw-r--r--drivers/staging/frontier/tranzport.c36
-rw-r--r--drivers/staging/go7007/go7007-fw.c12
-rw-r--r--drivers/staging/go7007/go7007-usb.c3
-rw-r--r--drivers/staging/go7007/go7007-v4l2.c4
-rw-r--r--drivers/staging/go7007/saa7134-go7007.c11
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/hv/Kconfig8
-rw-r--r--drivers/staging/hv/Makefile11
-rw-r--r--drivers/staging/hv/TODO3
-rw-r--r--drivers/staging/hv/blkvsc.c (renamed from drivers/staging/hv/BlkVsc.c)4
-rw-r--r--drivers/staging/hv/blkvsc_drv.c45
-rw-r--r--drivers/staging/hv/channel.c (renamed from drivers/staging/hv/Channel.c)195
-rw-r--r--drivers/staging/hv/channel.h (renamed from drivers/staging/hv/Channel.h)2
-rw-r--r--drivers/staging/hv/channel_interface.c (renamed from drivers/staging/hv/ChannelInterface.c)2
-rw-r--r--drivers/staging/hv/channel_interface.h (renamed from drivers/staging/hv/ChannelInterface.h)2
-rw-r--r--drivers/staging/hv/channel_mgmt.c (renamed from drivers/staging/hv/ChannelMgmt.c)228
-rw-r--r--drivers/staging/hv/channel_mgmt.h (renamed from drivers/staging/hv/ChannelMgmt.h)6
-rw-r--r--drivers/staging/hv/connection.c (renamed from drivers/staging/hv/Connection.c)31
-rw-r--r--drivers/staging/hv/hv.c (renamed from drivers/staging/hv/Hv.c)28
-rw-r--r--drivers/staging/hv/hv.h (renamed from drivers/staging/hv/Hv.h)0
-rw-r--r--drivers/staging/hv/hv_utils.c295
-rw-r--r--drivers/staging/hv/logging.h7
-rw-r--r--drivers/staging/hv/netvsc.c (renamed from drivers/staging/hv/NetVsc.c)104
-rw-r--r--drivers/staging/hv/netvsc.h (renamed from drivers/staging/hv/NetVsc.h)7
-rw-r--r--drivers/staging/hv/netvsc_api.h (renamed from drivers/staging/hv/NetVscApi.h)9
-rw-r--r--drivers/staging/hv/netvsc_drv.c243
-rw-r--r--drivers/staging/hv/osd.c70
-rw-r--r--drivers/staging/hv/ring_buffer.c (renamed from drivers/staging/hv/RingBuffer.c)16
-rw-r--r--drivers/staging/hv/ring_buffer.h (renamed from drivers/staging/hv/RingBuffer.h)0
-rw-r--r--drivers/staging/hv/rndis.h2
-rw-r--r--drivers/staging/hv/rndis_filter.c (renamed from drivers/staging/hv/RndisFilter.c)56
-rw-r--r--drivers/staging/hv/rndis_filter.h (renamed from drivers/staging/hv/RndisFilter.h)2
-rw-r--r--drivers/staging/hv/storvsc.c (renamed from drivers/staging/hv/StorVsc.c)54
-rw-r--r--drivers/staging/hv/storvsc_api.h (renamed from drivers/staging/hv/StorVscApi.h)2
-rw-r--r--drivers/staging/hv/storvsc_drv.c54
-rw-r--r--drivers/staging/hv/utils.h119
-rw-r--r--drivers/staging/hv/version_info.h (renamed from drivers/staging/hv/VersionInfo.h)7
-rw-r--r--drivers/staging/hv/vmbus.c (renamed from drivers/staging/hv/Vmbus.c)33
-rw-r--r--drivers/staging/hv/vmbus.h2
-rw-r--r--drivers/staging/hv/vmbus_api.h (renamed from drivers/staging/hv/VmbusApi.h)18
-rw-r--r--drivers/staging/hv/vmbus_channel_interface.h (renamed from drivers/staging/hv/VmbusChannelInterface.h)0
-rw-r--r--drivers/staging/hv/vmbus_drv.c96
-rw-r--r--drivers/staging/hv/vmbus_packet_format.h (renamed from drivers/staging/hv/VmbusPacketFormat.h)1
-rw-r--r--drivers/staging/hv/vmbus_private.h (renamed from drivers/staging/hv/VmbusPrivate.h)12
-rw-r--r--drivers/staging/hv/vstorage.h2
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h266
-rw-r--r--drivers/staging/iio/Documentation/lis3l02dqbuffersimple.c237
-rw-r--r--drivers/staging/iio/Documentation/sysfs-class-iio294
-rw-r--r--drivers/staging/iio/Documentation/userspace.txt2
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/Makefile2
-rw-r--r--drivers/staging/iio/accel/Kconfig25
-rw-r--r--drivers/staging/iio/accel/Makefile11
-rw-r--r--drivers/staging/iio/accel/accel.h12
-rw-r--r--drivers/staging/iio/accel/adis16209.h193
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c615
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c266
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c124
-rw-r--r--drivers/staging/iio/accel/adis16220.h147
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c670
-rw-r--r--drivers/staging/iio/accel/adis16240.h218
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c599
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c254
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c124
-rw-r--r--drivers/staging/iio/accel/inclinometer.h23
-rw-r--r--drivers/staging/iio/accel/kxsd9.c88
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h4
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c173
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c24
-rw-r--r--drivers/staging/iio/accel/sca3000.h2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c178
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c35
-rw-r--r--drivers/staging/iio/adc/Kconfig11
-rw-r--r--drivers/staging/iio/adc/Makefile2
-rw-r--r--drivers/staging/iio/adc/adc.h15
-rw-r--r--drivers/staging/iio/adc/max1363.h122
-rw-r--r--drivers/staging/iio/adc/max1363_core.c1107
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c82
-rw-r--r--drivers/staging/iio/chrdev.h2
-rw-r--r--drivers/staging/iio/gyro/Kconfig13
-rw-r--r--drivers/staging/iio/gyro/Makefile7
-rw-r--r--drivers/staging/iio/gyro/adis16260.h175
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c661
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c256
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c124
-rw-r--r--drivers/staging/iio/gyro/gyro.h43
-rw-r--r--drivers/staging/iio/iio.h47
-rw-r--r--drivers/staging/iio/imu/Kconfig33
-rw-r--r--drivers/staging/iio/imu/Makefile14
-rw-r--r--drivers/staging/iio/imu/adis16300.h194
-rw-r--r--drivers/staging/iio/imu/adis16300_core.c768
-rw-r--r--drivers/staging/iio/imu/adis16300_ring.c233
-rw-r--r--drivers/staging/iio/imu/adis16300_trigger.c127
-rw-r--r--drivers/staging/iio/imu/adis16350.h193
-rw-r--r--drivers/staging/iio/imu/adis16350_core.c736
-rw-r--r--drivers/staging/iio/imu/adis16350_ring.c286
-rw-r--r--drivers/staging/iio/imu/adis16350_trigger.c127
-rw-r--r--drivers/staging/iio/imu/adis16400.h229
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c800
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c245
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c127
-rw-r--r--drivers/staging/iio/industrialio-core.c60
-rw-r--r--drivers/staging/iio/industrialio-ring.c75
-rw-r--r--drivers/staging/iio/industrialio-trigger.c20
-rw-r--r--drivers/staging/iio/light/tsl2563.c18
-rw-r--r--drivers/staging/iio/magnetometer/magnet.h31
-rw-r--r--drivers/staging/iio/ring_generic.h50
-rw-r--r--drivers/staging/iio/ring_sw.c50
-rw-r--r--drivers/staging/iio/sysfs.h15
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c131
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c4
-rw-r--r--drivers/staging/line6/control.h166
-rw-r--r--drivers/staging/line6/driver.c10
-rw-r--r--drivers/staging/line6/dumprequest.c3
-rw-r--r--drivers/staging/line6/pod.c6
-rw-r--r--drivers/staging/line6/variax.c5
-rw-r--r--drivers/staging/memrar/Kconfig15
-rw-r--r--drivers/staging/memrar/Makefile2
-rw-r--r--drivers/staging/memrar/TODO43
-rw-r--r--drivers/staging/memrar/memrar-abi89
-rw-r--r--drivers/staging/memrar/memrar.h155
-rw-r--r--drivers/staging/memrar/memrar_allocator.c432
-rw-r--r--drivers/staging/memrar/memrar_allocator.h149
-rw-r--r--drivers/staging/memrar/memrar_handler.c996
-rw-r--r--drivers/staging/netwave/Kconfig11
-rw-r--r--drivers/staging/netwave/Makefile1
-rw-r--r--drivers/staging/netwave/TODO7
-rw-r--r--drivers/staging/netwave/netwave_cs.c1369
-rw-r--r--drivers/staging/otus/80211core/cagg.c34
-rw-r--r--drivers/staging/otus/80211core/ccmd.c3
-rw-r--r--drivers/staging/otus/80211core/cfunc.c3
-rw-r--r--drivers/staging/otus/80211core/cic.c9
-rw-r--r--drivers/staging/otus/80211core/cinit.c3
-rw-r--r--drivers/staging/otus/80211core/cmm.c144
-rw-r--r--drivers/staging/otus/80211core/cmmap.c103
-rw-r--r--drivers/staging/otus/80211core/cmmsta.c123
-rw-r--r--drivers/staging/otus/80211core/coid.c3
-rw-r--r--drivers/staging/otus/80211core/cpsmgr.c3
-rw-r--r--drivers/staging/otus/80211core/ctxrx.c81
-rw-r--r--drivers/staging/otus/80211core/queue.c5
-rw-r--r--drivers/staging/otus/80211core/ratectrl.c3
-rw-r--r--drivers/staging/otus/hal/hpani.c33
-rw-r--r--drivers/staging/otus/hal/hpani.h7
-rw-r--r--drivers/staging/otus/hal/hpfw2.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_2k.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_BA.c2
-rw-r--r--drivers/staging/otus/hal/hpfwu_OTUS_RC.c2
-rw-r--r--drivers/staging/otus/hal/hpfwuinit.c2
-rw-r--r--drivers/staging/otus/hal/hpmain.c30
-rw-r--r--drivers/staging/otus/hal/hpreg.c1586
-rw-r--r--drivers/staging/otus/ioctl.c37
-rw-r--r--drivers/staging/otus/usbdrv.c6
-rw-r--r--drivers/staging/otus/wwrap.c1
-rw-r--r--drivers/staging/otus/zdusb.c6
-rw-r--r--drivers/staging/panel/panel.c5
-rw-r--r--drivers/staging/phison/phison.c4
-rw-r--r--drivers/staging/poch/Kconfig6
-rw-r--r--drivers/staging/poch/Makefile1
-rw-r--r--drivers/staging/poch/README136
-rw-r--r--drivers/staging/poch/poch.c1443
-rw-r--r--drivers/staging/poch/poch.h35
-rw-r--r--drivers/staging/pohmelfs/config.c41
-rw-r--r--drivers/staging/pohmelfs/crypto.c38
-rw-r--r--drivers/staging/pohmelfs/dir.c14
-rw-r--r--drivers/staging/pohmelfs/inode.c74
-rw-r--r--drivers/staging/pohmelfs/net.c38
-rw-r--r--drivers/staging/pohmelfs/netfs.h17
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c12
-rw-r--r--drivers/staging/ramzswap/TODO5
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.c667
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.h51
-rw-r--r--drivers/staging/ramzswap/ramzswap_ioctl.h14
-rw-r--r--drivers/staging/rar_register/Kconfig28
-rw-r--r--drivers/staging/rar_register/rar_register.c610
-rw-r--r--drivers/staging/rar_register/rar_register.h62
-rw-r--r--drivers/staging/rt2860/chip/mac_pci.h41
-rw-r--r--drivers/staging/rt2860/chip/mac_usb.h55
-rw-r--r--drivers/staging/rt2860/chip/rtmp_mac.h52
-rw-r--r--drivers/staging/rt2860/chip/rtmp_phy.h338
-rw-r--r--drivers/staging/rt2860/chips/rt3070.c4
-rw-r--r--drivers/staging/rt2860/chips/rt3090.c4
-rw-r--r--drivers/staging/rt2860/chips/rt30xx.c9
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_data.c8
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_pci.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_mac_usb.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_wpa.c18
-rw-r--r--drivers/staging/rt2860/common/rtmp_init.c15
-rw-r--r--drivers/staging/rt2860/common/spectrum.c7
-rw-r--r--drivers/staging/rt2860/iface/rtmp_usb.h4
-rw-r--r--drivers/staging/rt2860/mlme.h2
-rw-r--r--drivers/staging/rt2860/pci_main_dev.c37
-rw-r--r--drivers/staging/rt2860/rt_linux.c51
-rw-r--r--drivers/staging/rt2860/rt_linux.h14
-rw-r--r--drivers/staging/rt2860/rt_main_dev.c20
-rw-r--r--drivers/staging/rt2860/rt_pci_rbus.c24
-rw-r--r--drivers/staging/rt2860/rt_usb.c20
-rw-r--r--drivers/staging/rt2860/rtmp.h412
-rw-r--r--drivers/staging/rt2860/sta/assoc.c1
-rw-r--r--drivers/staging/rt2860/sta_ioctl.c15
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c5
-rw-r--r--drivers/staging/rt2870/Kconfig2
-rw-r--r--drivers/staging/rt2870/common/rtusb_bulk.c42
-rw-r--r--drivers/staging/rt2870/common/rtusb_data.c8
-rw-r--r--drivers/staging/rt2870/common/rtusb_io.c39
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c7
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c20
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c2076
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c129
-rw-r--r--drivers/staging/rtl8192e/Makefile1
-rw-r--r--drivers/staging/rtl8192e/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c17
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_Qos.h168
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_TSProc.c38
-rw-r--r--drivers/staging/rtl8192e/r8190_rtl8256.c4
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c14
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.c6
-rw-r--r--drivers/staging/rtl8192e/r8192_pm.h4
-rw-r--r--drivers/staging/rtl8192su/Kconfig1
-rw-r--r--drivers/staging/rtl8192su/Makefile2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h24
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c17
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_Qos.h1
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_TSProc.c12
-rw-r--r--drivers/staging/rtl8192su/r8180_93cx6.c146
-rw-r--r--drivers/staging/rtl8192su/r8180_93cx6.h40
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.c2347
-rw-r--r--drivers/staging/rtl8192su/r8192SU_led.h93
-rw-r--r--drivers/staging/rtl8192su/r8192S_firmware.c503
-rw-r--r--drivers/staging/rtl8192su/r8192S_phy.c194
-rw-r--r--drivers/staging/rtl8192su/r8192U.h41
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c626
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.c663
-rw-r--r--drivers/staging/rtl8192su/r819xU_cmdpkt.h366
-rw-r--r--drivers/staging/rtl8192u/dot11d.h52
-rw-r--r--drivers/staging/rtl8192u/ieee80211.h548
-rw-r--r--drivers/staging/rtl8192u/ieee80211/api.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c12
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c1
-rw-r--r--drivers/staging/sep/sep_driver.c1
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c26
-rw-r--r--drivers/staging/slicoss/slicoss.c6
-rw-r--r--drivers/staging/sm7xx/smtcfb.c18
-rw-r--r--drivers/staging/sm7xx/smtcfb.h2
-rw-r--r--drivers/staging/strip/Kconfig22
-rw-r--r--drivers/staging/strip/Makefile1
-rw-r--r--drivers/staging/strip/TODO7
-rw-r--r--drivers/staging/strip/strip.c2823
-rw-r--r--drivers/staging/ti-st/Kconfig25
-rw-r--r--drivers/staging/ti-st/Makefile7
-rw-r--r--drivers/staging/ti-st/TODO19
-rw-r--r--drivers/staging/ti-st/bt_drv.c502
-rw-r--r--drivers/staging/ti-st/bt_drv.h61
-rw-r--r--drivers/staging/ti-st/fm.h13
-rw-r--r--drivers/staging/ti-st/st.h90
-rw-r--r--drivers/staging/ti-st/st_core.c1062
-rw-r--r--drivers/staging/ti-st/st_core.h98
-rw-r--r--drivers/staging/ti-st/st_kim.c754
-rw-r--r--drivers/staging/ti-st/st_kim.h150
-rw-r--r--drivers/staging/ti-st/st_ll.c147
-rw-r--r--drivers/staging/ti-st/st_ll.h62
-rw-r--r--drivers/staging/ti-st/sysfs-uim16
-rw-r--r--drivers/staging/tm6000/Kconfig32
-rw-r--r--drivers/staging/tm6000/Makefile17
-rw-r--r--drivers/staging/tm6000/README22
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c414
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c973
-rw-r--r--drivers/staging/tm6000/tm6000-core.c602
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c346
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c370
-rw-r--r--drivers/staging/tm6000/tm6000-regs.h541
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c873
-rw-r--r--drivers/staging/tm6000/tm6000-usb-isoc.h53
-rw-r--r--drivers/staging/tm6000/tm6000-video.c1557
-rw-r--r--drivers/staging/tm6000/tm6000.h301
-rw-r--r--drivers/staging/udlfb/udlfb.c73
-rw-r--r--drivers/staging/usbip/stub_rx.c6
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/usbip/usbip_common.h2
-rw-r--r--drivers/staging/usbip/vhci.h2
-rw-r--r--drivers/staging/usbip/vhci_hcd.c2
-rw-r--r--drivers/staging/usbip/vhci_tx.c2
-rw-r--r--drivers/staging/vme/boards/vme_vmivme7805.c1
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c115
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c396
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.h26
-rw-r--r--drivers/staging/vme/devices/vme_user.c18
-rw-r--r--drivers/staging/vme/vme.c4
-rw-r--r--drivers/staging/vt6655/80211hdr.h46
-rw-r--r--drivers/staging/vt6655/80211mgr.c88
-rw-r--r--drivers/staging/vt6655/80211mgr.h88
-rw-r--r--drivers/staging/vt6655/IEEE11h.c6
-rw-r--r--drivers/staging/vt6655/IEEE11h.h2
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c10
-rw-r--r--drivers/staging/vt6655/baseband.c64
-rw-r--r--drivers/staging/vt6655/baseband.h52
-rw-r--r--drivers/staging/vt6655/bssdb.c214
-rw-r--r--drivers/staging/vt6655/bssdb.h148
-rw-r--r--drivers/staging/vt6655/card.c214
-rw-r--r--drivers/staging/vt6655/card.h158
-rw-r--r--drivers/staging/vt6655/datarate.c52
-rw-r--r--drivers/staging/vt6655/datarate.h36
-rw-r--r--drivers/staging/vt6655/desc.h12
-rw-r--r--drivers/staging/vt6655/device.h28
-rw-r--r--drivers/staging/vt6655/device_main.c99
-rw-r--r--drivers/staging/vt6655/dpc.c184
-rw-r--r--drivers/staging/vt6655/dpc.h6
-rw-r--r--drivers/staging/vt6655/hostap.c13
-rw-r--r--drivers/staging/vt6655/hostap.h4
-rw-r--r--drivers/staging/vt6655/ioctl.c32
-rw-r--r--drivers/staging/vt6655/ioctl.h10
-rw-r--r--drivers/staging/vt6655/iwctl.c31
-rw-r--r--drivers/staging/vt6655/key.c36
-rw-r--r--drivers/staging/vt6655/key.h30
-rw-r--r--drivers/staging/vt6655/mac.c8
-rw-r--r--drivers/staging/vt6655/mac.h52
-rw-r--r--drivers/staging/vt6655/mib.c6
-rw-r--r--drivers/staging/vt6655/mib.h2
-rw-r--r--drivers/staging/vt6655/michael.c24
-rw-r--r--drivers/staging/vt6655/michael.h8
-rw-r--r--drivers/staging/vt6655/power.c26
-rw-r--r--drivers/staging/vt6655/power.h28
-rw-r--r--drivers/staging/vt6655/rc4.c4
-rw-r--r--drivers/staging/vt6655/rc4.h2
-rw-r--r--drivers/staging/vt6655/rf.c22
-rw-r--r--drivers/staging/vt6655/rf.h14
-rw-r--r--drivers/staging/vt6655/rxtx.c524
-rw-r--r--drivers/staging/vt6655/rxtx.h84
-rw-r--r--drivers/staging/vt6655/srom.c54
-rw-r--r--drivers/staging/vt6655/srom.h2
-rw-r--r--drivers/staging/vt6655/tether.c2
-rw-r--r--drivers/staging/vt6655/tether.h31
-rw-r--r--drivers/staging/vt6655/tkip.c2
-rw-r--r--drivers/staging/vt6655/tkip.h2
-rw-r--r--drivers/staging/vt6655/ttype.h22
-rw-r--r--drivers/staging/vt6655/vntwifi.c142
-rw-r--r--drivers/staging/vt6655/vntwifi.h142
-rw-r--r--drivers/staging/vt6655/wcmd.c104
-rw-r--r--drivers/staging/vt6655/wcmd.h26
-rw-r--r--drivers/staging/vt6655/wctl.c4
-rw-r--r--drivers/staging/vt6655/wmgr.c672
-rw-r--r--drivers/staging/vt6655/wmgr.h96
-rw-r--r--drivers/staging/vt6655/wpa.c14
-rw-r--r--drivers/staging/vt6655/wpa.h14
-rw-r--r--drivers/staging/vt6655/wpa2.c16
-rw-r--r--drivers/staging/vt6655/wpa2.h14
-rw-r--r--drivers/staging/vt6655/wpactl.c19
-rw-r--r--drivers/staging/vt6655/wroute.c6
-rw-r--r--drivers/staging/vt6656/80211hdr.h83
-rw-r--r--drivers/staging/vt6656/80211mgr.c88
-rw-r--r--drivers/staging/vt6656/80211mgr.h136
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c564
-rw-r--r--drivers/staging/vt6656/aes_ccmp.h2
-rw-r--r--drivers/staging/vt6656/baseband.c114
-rw-r--r--drivers/staging/vt6656/baseband.h59
-rw-r--r--drivers/staging/vt6656/bssdb.c402
-rw-r--r--drivers/staging/vt6656/bssdb.h252
-rw-r--r--drivers/staging/vt6656/card.c68
-rw-r--r--drivers/staging/vt6656/card.h62
-rw-r--r--drivers/staging/vt6656/channel.c12
-rw-r--r--drivers/staging/vt6656/channel.h10
-rw-r--r--drivers/staging/vt6656/control.c87
-rw-r--r--drivers/staging/vt6656/control.h31
-rw-r--r--drivers/staging/vt6656/datarate.c71
-rw-r--r--drivers/staging/vt6656/datarate.h39
-rw-r--r--drivers/staging/vt6656/desc.h12
-rw-r--r--drivers/staging/vt6656/device.h218
-rw-r--r--drivers/staging/vt6656/dpc.c305
-rw-r--r--drivers/staging/vt6656/dpc.h27
-rw-r--r--drivers/staging/vt6656/firmware.c6
-rw-r--r--drivers/staging/vt6656/firmware.h9
-rw-r--r--drivers/staging/vt6656/hostap.c27
-rw-r--r--drivers/staging/vt6656/hostap.h9
-rw-r--r--drivers/staging/vt6656/int.c251
-rw-r--r--drivers/staging/vt6656/int.h12
-rw-r--r--drivers/staging/vt6656/iocmd.h353
-rw-r--r--drivers/staging/vt6656/ioctl.c47
-rw-r--r--drivers/staging/vt6656/ioctl.h15
-rw-r--r--drivers/staging/vt6656/iowpa.h4
-rw-r--r--drivers/staging/vt6656/iwctl.c52
-rw-r--r--drivers/staging/vt6656/iwctl.h5
-rw-r--r--drivers/staging/vt6656/key.c87
-rw-r--r--drivers/staging/vt6656/key.h62
-rw-r--r--drivers/staging/vt6656/mac.c14
-rw-r--r--drivers/staging/vt6656/mac.h9
-rw-r--r--drivers/staging/vt6656/main_usb.c229
-rw-r--r--drivers/staging/vt6656/mib.c115
-rw-r--r--drivers/staging/vt6656/mib.h192
-rw-r--r--drivers/staging/vt6656/michael.c181
-rw-r--r--drivers/staging/vt6656/michael.h12
-rw-r--r--drivers/staging/vt6656/power.c58
-rw-r--r--drivers/staging/vt6656/power.h45
-rw-r--r--drivers/staging/vt6656/rc4.c84
-rw-r--r--drivers/staging/vt6656/rc4.h13
-rw-r--r--drivers/staging/vt6656/rf.c32
-rw-r--r--drivers/staging/vt6656/rf.h33
-rw-r--r--drivers/staging/vt6656/rndis.h3
-rw-r--r--drivers/staging/vt6656/rxtx.c874
-rw-r--r--drivers/staging/vt6656/rxtx.h46
-rw-r--r--drivers/staging/vt6656/srom.h2
-rw-r--r--drivers/staging/vt6656/tcrc.c23
-rw-r--r--drivers/staging/vt6656/tcrc.h11
-rw-r--r--drivers/staging/vt6656/tether.c45
-rw-r--r--drivers/staging/vt6656/tether.h33
-rw-r--r--drivers/staging/vt6656/tkip.c2
-rw-r--r--drivers/staging/vt6656/tkip.h7
-rw-r--r--drivers/staging/vt6656/tmacro.h4
-rw-r--r--drivers/staging/vt6656/ttype.h57
-rw-r--r--drivers/staging/vt6656/upc.h8
-rw-r--r--drivers/staging/vt6656/usbpipe.c138
-rw-r--r--drivers/staging/vt6656/usbpipe.h51
-rw-r--r--drivers/staging/vt6656/wcmd.c172
-rw-r--r--drivers/staging/vt6656/wcmd.h36
-rw-r--r--drivers/staging/vt6656/wctl.c23
-rw-r--r--drivers/staging/vt6656/wctl.h13
-rw-r--r--drivers/staging/vt6656/wmgr.c960
-rw-r--r--drivers/staging/vt6656/wmgr.h180
-rw-r--r--drivers/staging/vt6656/wpa.c14
-rw-r--r--drivers/staging/vt6656/wpa.h16
-rw-r--r--drivers/staging/vt6656/wpa2.c50
-rw-r--r--drivers/staging/vt6656/wpa2.h20
-rw-r--r--drivers/staging/vt6656/wpactl.c25
-rw-r--r--drivers/staging/vt6656/wpactl.h9
-rw-r--r--drivers/staging/wavelan/Kconfig38
-rw-r--r--drivers/staging/wavelan/Makefile2
-rw-r--r--drivers/staging/wavelan/TODO7
-rw-r--r--drivers/staging/wavelan/i82586.h413
-rw-r--r--drivers/staging/wavelan/wavelan.c4383
-rw-r--r--drivers/staging/wavelan/wavelan.h370
-rw-r--r--drivers/staging/wavelan/wavelan.p.h696
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c4610
-rw-r--r--drivers/staging/wavelan/wavelan_cs.h386
-rw-r--r--drivers/staging/wavelan/wavelan_cs.p.h766
-rw-r--r--drivers/staging/winbond/TODO (renamed from drivers/staging/winbond/README)3
-rw-r--r--drivers/staging/winbond/core.h14
-rw-r--r--drivers/staging/winbond/localpara.h452
-rw-r--r--drivers/staging/winbond/mac_structures.h342
-rw-r--r--drivers/staging/winbond/mds.c324
-rw-r--r--drivers/staging/winbond/mds_f.h20
-rw-r--r--drivers/staging/winbond/mds_s.h173
-rw-r--r--drivers/staging/winbond/mlme_s.h288
-rw-r--r--drivers/staging/winbond/mlmetxrx.c62
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h6
-rw-r--r--drivers/staging/winbond/mto.c299
-rw-r--r--drivers/staging/winbond/mto.h179
-rw-r--r--drivers/staging/winbond/phy_calibration.c9
-rw-r--r--drivers/staging/winbond/phy_calibration.h177
-rw-r--r--drivers/staging/winbond/reg.c3985
-rw-r--r--drivers/staging/winbond/scan_s.h152
-rw-r--r--drivers/staging/winbond/sme_api.h211
-rw-r--r--drivers/staging/winbond/sysdef.h18
-rw-r--r--drivers/staging/winbond/wb35reg.c618
-rw-r--r--drivers/staging/winbond/wb35reg_f.h104
-rw-r--r--drivers/staging/winbond/wb35reg_s.h221
-rw-r--r--drivers/staging/winbond/wb35rx.c258
-rw-r--r--drivers/staging/winbond/wb35tx_f.h18
-rw-r--r--drivers/staging/winbond/wbhal_f.h137
-rw-r--r--drivers/staging/winbond/wbhal_s.h502
-rw-r--r--drivers/staging/winbond/wblinux_f.h19
-rw-r--r--drivers/staging/winbond/wbusb.c193
-rw-r--r--drivers/staging/winbond/wbusb_s.h23
-rw-r--r--drivers/staging/wlags49_h2/Kconfig2
-rw-r--r--drivers/staging/wlags49_h2/README.wlags492
-rw-r--r--drivers/staging/wlags49_h2/ap_h2.c80
-rw-r--r--drivers/staging/wlags49_h2/debug.h104
-rw-r--r--drivers/staging/wlags49_h2/dhf.c143
-rw-r--r--drivers/staging/wlags49_h2/dhf.h56
-rw-r--r--drivers/staging/wlags49_h2/dhfcfg.h118
-rw-r--r--drivers/staging/wlags49_h2/hcf.c15
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c39
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h8
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c51
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c11
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c957
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c9
-rw-r--r--drivers/staging/wlags49_h25/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c154
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c5
-rw-r--r--drivers/staging/wlan-ng/p80211req.c5
-rw-r--r--drivers/staging/wlan-ng/p80211wext.c399
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c36
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c71
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c65
-rw-r--r--drivers/telephony/ixj.c15
-rw-r--r--drivers/telephony/ixj_pcmcia.c3
-rw-r--r--drivers/usb/atm/speedtch.c5
-rw-r--r--drivers/usb/atm/ueagle-atm.c347
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/class/cdc-acm.c22
-rw-r--r--drivers/usb/class/cdc-acm.h4
-rw-r--r--drivers/usb/class/cdc-wdm.c38
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/config.c214
-rw-r--r--drivers/usb/core/devices.c19
-rw-r--r--drivers/usb/core/devio.c3
-rw-r--r--drivers/usb/core/driver.c60
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c246
-rw-r--r--drivers/usb/core/hub.c30
-rw-r--r--drivers/usb/core/inode.c3
-rw-r--r--drivers/usb/core/message.c133
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/sysfs.c25
-rw-r--r--drivers/usb/core/urb.c18
-rw-r--r--drivers/usb/core/usb.c96
-rw-r--r--drivers/usb/early/ehci-dbgp.c120
-rw-r--r--drivers/usb/gadget/Kconfig58
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/at91_udc.c7
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/composite.c60
-rw-r--r--drivers/usb/gadget/config.c4
-rw-r--r--drivers/usb/gadget/dummy_hcd.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c12
-rw-r--r--drivers/usb/gadget/f_acm.c32
-rw-r--r--drivers/usb/gadget/f_ecm.c33
-rw-r--r--drivers/usb/gadget/f_fs.c2442
-rw-r--r--drivers/usb/gadget/f_hid.c673
-rw-r--r--drivers/usb/gadget/f_mass_storage.c138
-rw-r--r--drivers/usb/gadget/f_rndis.c33
-rw-r--r--drivers/usb/gadget/f_uvc.c661
-rw-r--r--drivers/usb/gadget/f_uvc.h376
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c (renamed from drivers/usb/gadget/fsl_mx3_udc.c)14
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c7
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/g_ffs.c426
-rw-r--r--drivers/usb/gadget/hid.c298
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/storage_common.c2
-rw-r--r--drivers/usb/gadget/u_ether.c4
-rw-r--r--drivers/usb/gadget/uvc.h241
-rw-r--r--drivers/usb/gadget/uvc_queue.c583
-rw-r--r--drivers/usb/gadget/uvc_queue.h89
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c374
-rw-r--r--drivers/usb/gadget/uvc_video.c386
-rw-r--r--drivers/usb/gadget/webcam.c399
-rw-r--r--drivers/usb/host/Kconfig15
-rw-r--r--drivers/usb/host/ehci-au1xxx.c27
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/ehci-hub.c182
-rw-r--r--drivers/usb/host/ehci-mxc.c4
-rw-r--r--drivers/usb/host/ehci-omap.c23
-rw-r--r--drivers/usb/host/ehci-pci.c18
-rw-r--r--drivers/usb/host/ehci-ppc-of.c11
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c9
-rw-r--r--drivers/usb/host/ehci.h18
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c13
-rw-r--r--drivers/usb/host/fhci-hub.c2
-rw-r--r--drivers/usb/host/fhci-mem.c2
-rw-r--r--drivers/usb/host/fhci-q.c2
-rw-r--r--drivers/usb/host/fhci-sched.c2
-rw-r--r--drivers/usb/host/fhci-tds.c2
-rw-r--r--drivers/usb/host/fhci.h11
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c6
-rw-r--r--drivers/usb/host/isp1760-hcd.c29
-rw-r--r--drivers/usb/host/isp1760-if.c22
-rw-r--r--drivers/usb/host/ohci-hcd.c33
-rw-r--r--drivers/usb/host/ohci-omap3.c735
-rw-r--r--drivers/usb/host/ohci-ppc-of.c15
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c31
-rw-r--r--drivers/usb/host/r8a66597-hcd.c39
-rw-r--r--drivers/usb/host/sl811-hcd.c60
-rw-r--r--drivers/usb/host/sl811_cs.c28
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/whci/debug.c2
-rw-r--r--drivers/usb/host/whci/qset.c6
-rw-r--r--drivers/usb/host/xhci-dbg.c24
-rw-r--r--drivers/usb/host/xhci-hub.c39
-rw-r--r--drivers/usb/host/xhci-mem.c489
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c329
-rw-r--r--drivers/usb/host/xhci.c416
-rw-r--r--drivers/usb/host/xhci.h112
-rw-r--r--drivers/usb/misc/appledisplay.c6
-rw-r--r--drivers/usb/misc/ftdi-elan.c20
-rw-r--r--drivers/usb/misc/iowarrior.c12
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c13
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c8
-rw-r--r--drivers/usb/misc/usblcd.c8
-rw-r--r--drivers/usb/misc/usbtest.c17
-rw-r--r--drivers/usb/mon/mon_bin.c27
-rw-r--r--drivers/usb/mon/mon_main.c3
-rw-r--r--drivers/usb/mon/mon_stat.c3
-rw-r--r--drivers/usb/mon/mon_text.c6
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/Makefile14
-rw-r--r--drivers/usb/musb/blackfin.c96
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.c147
-rw-r--r--drivers/usb/musb/musb_core.h10
-rw-r--r--drivers/usb/musb/musb_debug.h13
-rw-r--r--drivers/usb/musb/musb_debugfs.c294
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c25
-rw-r--r--drivers/usb/musb/musb_regs.h10
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/musb/musbhsdma.h16
-rw-r--r--drivers/usb/musb/omap2430.c29
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c108
-rw-r--r--drivers/usb/otg/ulpi.c50
-rw-r--r--drivers/usb/serial/Kconfig23
-rw-r--r--drivers/usb/serial/Makefile2
-rw-r--r--drivers/usb/serial/aircable.c499
-rw-r--r--drivers/usb/serial/ark3116.c111
-rw-r--r--drivers/usb/serial/belkin_sa.c130
-rw-r--r--drivers/usb/serial/belkin_sa.h10
-rw-r--r--drivers/usb/serial/ch341.c5
-rw-r--r--drivers/usb/serial/console.c27
-rw-r--r--drivers/usb/serial/cp210x.c63
-rw-r--r--drivers/usb/serial/cypress_m8.c242
-rw-r--r--drivers/usb/serial/cypress_m8.h53
-rw-r--r--drivers/usb/serial/digi_acceleport.c4
-rw-r--r--drivers/usb/serial/empeg.c401
-rw-r--r--drivers/usb/serial/ftdi_sio.c457
-rw-r--r--drivers/usb/serial/ftdi_sio.h126
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h42
-rw-r--r--drivers/usb/serial/generic.c330
-rw-r--r--drivers/usb/serial/io_edgeport.c3
-rw-r--r--drivers/usb/serial/io_edgeport.h16
-rw-r--r--drivers/usb/serial/io_ionsp.h95
-rw-r--r--drivers/usb/serial/io_ti.c228
-rw-r--r--drivers/usb/serial/io_ti.h92
-rw-r--r--drivers/usb/serial/io_usbvend.h87
-rw-r--r--drivers/usb/serial/ipaq.c357
-rw-r--r--drivers/usb/serial/ipaq.h54
-rw-r--r--drivers/usb/serial/ipw.c184
-rw-r--r--drivers/usb/serial/ir-usb.c272
-rw-r--r--drivers/usb/serial/iuu_phoenix.c30
-rw-r--r--drivers/usb/serial/kl5kusb105.c436
-rw-r--r--drivers/usb/serial/kl5kusb105.h47
-rw-r--r--drivers/usb/serial/kobil_sct.c3
-rw-r--r--drivers/usb/serial/kobil_sct.h75
-rw-r--r--drivers/usb/serial/mct_u232.c7
-rw-r--r--drivers/usb/serial/mct_u232.h254
-rw-r--r--drivers/usb/serial/mos7720.c1130
-rw-r--r--drivers/usb/serial/mos7840.c1
-rw-r--r--drivers/usb/serial/option.c841
-rw-r--r--drivers/usb/serial/oti6858.c254
-rw-r--r--drivers/usb/serial/pl2303.c430
-rw-r--r--drivers/usb/serial/pl2303.h6
-rw-r--r--drivers/usb/serial/qcaux.c5
-rw-r--r--drivers/usb/serial/qcserial.c64
-rw-r--r--drivers/usb/serial/safe_serial.c231
-rw-r--r--drivers/usb/serial/spcp8x5.c407
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c179
-rw-r--r--drivers/usb/serial/usb-serial.c47
-rw-r--r--drivers/usb/serial/usb-wwan.h67
-rw-r--r--drivers/usb/serial/usb_debug.c12
-rw-r--r--drivers/usb/serial/usb_wwan.c665
-rw-r--r--drivers/usb/serial/visor.c344
-rw-r--r--drivers/usb/serial/visor.h9
-rw-r--r--drivers/usb/serial/zio.c64
-rw-r--r--drivers/usb/storage/isd200.c4
-rw-r--r--drivers/usb/storage/onetouch.c12
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h24
-rw-r--r--drivers/usb/storage/usb.c87
-rw-r--r--drivers/usb/storage/usb.h3
-rw-r--r--drivers/usb/usb-skeleton.c10
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c4
-rw-r--r--drivers/usb/wusbcore/wusbhc.h4
-rw-r--r--drivers/vhost/net.c18
-rw-r--r--drivers/vhost/vhost.c77
-rw-r--r--drivers/video/Kconfig17
-rw-r--r--drivers/video/amifb.c49
-rw-r--r--drivers/video/arcfb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/aty/radeon_base.c4
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/Kconfig116
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/adp8860_bl.c838
-rw-r--r--drivers/video/backlight/adx_bl.c4
-rw-r--r--drivers/video/backlight/ep93xx_bl.c160
-rw-r--r--drivers/video/backlight/l4f00242t03.c11
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c47
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c190
-rw-r--r--drivers/video/backlight/s6e63m0.c920
-rw-r--r--drivers/video/backlight/s6e63m0_gamma.h266
-rw-r--r--drivers/video/bf54x-lq043fb.c7
-rw-r--r--drivers/video/bfin-lq035q1-fb.c252
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c22
-rw-r--r--drivers/video/bw2.c7
-rw-r--r--drivers/video/cg14.c7
-rw-r--r--drivers/video/cg3.c7
-rw-r--r--drivers/video/cg6.c9
-rw-r--r--drivers/video/cirrusfb.c1
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/da8xx-fb.c301
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/video/fb_defio.c42
-rw-r--r--drivers/video/fbmem.c77
-rw-r--r--drivers/video/fbsysfs.c1
-rw-r--r--drivers/video/ffb.c9
-rw-r--r--drivers/video/fm2fb.c1
-rw-r--r--drivers/video/fsl-diu-fb.c10
-rw-r--r--drivers/video/hgafb.c10
-rw-r--r--drivers/video/hitfb.c8
-rw-r--r--drivers/video/intelfb/intelfb.h4
-rw-r--r--drivers/video/leo.c7
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c8
-rw-r--r--drivers/video/mx3fb.c3
-rw-r--r--drivers/video/nuc900fb.c2
-rw-r--r--drivers/video/offb.c28
-rw-r--r--drivers/video/omap2/displays/Kconfig9
-rw-r--r--drivers/video/omap2/displays/Makefile1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c819
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c78
-rw-r--r--drivers/video/omap2/displays/panel-taal.c143
-rw-r--r--drivers/video/omap2/dss/Kconfig6
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/core.c85
-rw-r--r--drivers/video/omap2/dss/display.c9
-rw-r--r--drivers/video/omap2/dss/dss.c24
-rw-r--r--drivers/video/omap2/dss/dss.h50
-rw-r--r--drivers/video/omap2/dss/manager.c21
-rw-r--r--drivers/video/omap2/dss/sdi.c26
-rw-r--r--drivers/video/omap2/dss/venc.c15
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c25
-rw-r--r--drivers/video/p9100.c7
-rw-r--r--drivers/video/platinumfb.c9
-rw-r--r--drivers/video/s3c2410fb.c10
-rw-r--r--drivers/video/s3fb.c101
-rw-r--r--drivers/video/sgivwfb.c10
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sunxvr1000.c9
-rw-r--r--drivers/video/tcx.c7
-rw-r--r--drivers/video/vesafb.c11
-rw-r--r--drivers/video/vfb.c4
-rw-r--r--drivers/video/vga16fb.c36
-rw-r--r--drivers/video/via/Makefile4
-rw-r--r--drivers/video/via/accel.c137
-rw-r--r--drivers/video/via/accel.h40
-rw-r--r--drivers/video/via/chip.h8
-rw-r--r--drivers/video/via/dvi.c37
-rw-r--r--drivers/video/via/global.h1
-rw-r--r--drivers/video/via/hw.c307
-rw-r--r--drivers/video/via/hw.h20
-rw-r--r--drivers/video/via/ioctl.h2
-rw-r--r--drivers/video/via/lcd.c31
-rw-r--r--drivers/video/via/lcd.h2
-rw-r--r--drivers/video/via/share.h20
-rw-r--r--drivers/video/via/via-core.c668
-rw-r--r--drivers/video/via/via-gpio.c285
-rw-r--r--drivers/video/via/via_i2c.c232
-rw-r--r--drivers/video/via/via_modesetting.c126
-rw-r--r--drivers/video/via/via_modesetting.h38
-rw-r--r--drivers/video/via/via_utility.c1
-rw-r--r--drivers/video/via/viafbdev.c192
-rw-r--r--drivers/video/via/viafbdev.h14
-rw-r--r--drivers/video/via/viamode.c15
-rw-r--r--drivers/video/via/vt1636.c34
-rw-r--r--drivers/video/via/vt1636.h2
-rw-r--r--drivers/video/w100fb.c10
-rw-r--r--drivers/video/xilinxfb.c23
-rw-r--r--drivers/virtio/virtio_balloon.c17
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/virtio/virtio_ring.c44
-rw-r--r--drivers/vlynq/Kconfig2
-rw-r--r--drivers/w1/slaves/w1_ds2431.c4
-rw-r--r--drivers/w1/slaves/w1_ds2433.c4
-rw-r--r--drivers/w1/slaves/w1_ds2760.c2
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/watchdog/Kconfig28
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bfin_wdt.c19
-rw-r--r--drivers/watchdog/booke_wdt.c6
-rw-r--r--drivers/watchdog/cpwd.c9
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c8
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c11
-rw-r--r--drivers/watchdog/iTCO_wdt.c29
-rw-r--r--drivers/watchdog/imx2_wdt.c358
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c10
-rw-r--r--drivers/watchdog/mpcore_wdt.c21
-rw-r--r--drivers/watchdog/pc87413_wdt.c9
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pnx833x_wdt.c11
-rw-r--r--drivers/watchdog/rdc321x_wdt.c53
-rw-r--r--drivers/watchdog/riowd.c7
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/xen/manage.c28
-rw-r--r--drivers/zorro/proc.c6
-rw-r--r--drivers/zorro/zorro-driver.c24
-rw-r--r--drivers/zorro/zorro-sysfs.c13
-rw-r--r--drivers/zorro/zorro.c243
-rw-r--r--firmware/Makefile2
-rw-r--r--firmware/bnx2/bnx2-mips-09-5.0.0.j15.fw.ihex6081
-rw-r--r--firmware/bnx2/bnx2-mips-09-5.0.0.j9.fw.ihex6058
-rw-r--r--fs/9p/v9fs_vfs.h2
-rw-r--r--fs/9p/vfs_dir.c8
-rw-r--r--fs/9p/vfs_file.c17
-rw-r--r--fs/9p/vfs_inode.c111
-rw-r--r--fs/9p/vfs_super.c55
-rw-r--r--fs/Makefile2
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/adfs/file.c2
-rw-r--r--fs/adfs/inode.c3
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/file.c4
-rw-r--r--fs/affs/namei.c2
-rw-r--r--fs/afs/dir.c6
-rw-r--r--fs/afs/file.c64
-rw-r--r--fs/afs/internal.h3
-rw-r--r--fs/afs/mntpt.c6
-rw-r--r--fs/afs/write.c3
-rw-r--r--fs/aio.c71
-rw-r--r--fs/attr.c50
-rw-r--r--fs/autofs/root.c1
-rw-r--r--fs/autofs4/dev-ioctl.c18
-rw-r--r--fs/autofs4/root.c27
-rw-r--r--fs/bad_inode.c3
-rw-r--r--fs/bfs/dir.c6
-rw-r--r--fs/block_dev.c346
-rw-r--r--fs/btrfs/acl.c4
-rw-r--r--fs/btrfs/async-thread.c1
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/ctree.c109
-rw-r--r--fs/btrfs/ctree.h165
-rw-r--r--fs/btrfs/delayed-ref.c101
-rw-r--r--fs/btrfs/delayed-ref.h3
-rw-r--r--fs/btrfs/disk-io.c169
-rw-r--r--fs/btrfs/disk-io.h4
-rw-r--r--fs/btrfs/extent-tree.c2257
-rw-r--r--fs/btrfs/extent_io.c85
-rw-r--r--fs/btrfs/extent_io.h14
-rw-r--r--fs/btrfs/file-item.c28
-rw-r--r--fs/btrfs/file.c169
-rw-r--r--fs/btrfs/inode-item.c27
-rw-r--r--fs/btrfs/inode.c1724
-rw-r--r--fs/btrfs/ioctl.c211
-rw-r--r--fs/btrfs/ordered-data.c82
-rw-r--r--fs/btrfs/ordered-data.h9
-rw-r--r--fs/btrfs/relocation.c1971
-rw-r--r--fs/btrfs/root-tree.c23
-rw-r--r--fs/btrfs/super.c35
-rw-r--r--fs/btrfs/transaction.c232
-rw-r--r--fs/btrfs/transaction.h24
-rw-r--r--fs/btrfs/tree-defrag.c7
-rw-r--r--fs/btrfs/tree-log.c241
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c17
-rw-r--r--fs/btrfs/xattr.c14
-rw-r--r--fs/btrfs/xattr.h6
-rw-r--r--fs/buffer.c149
-rw-r--r--fs/cachefiles/internal.h1
-rw-r--r--fs/cachefiles/namei.c98
-rw-r--r--fs/cachefiles/security.c4
-rw-r--r--fs/ceph/addr.c17
-rw-r--r--fs/ceph/auth.c8
-rw-r--r--fs/ceph/auth.h8
-rw-r--r--fs/ceph/auth_none.c9
-rw-r--r--fs/ceph/auth_x.c31
-rw-r--r--fs/ceph/caps.c47
-rw-r--r--fs/ceph/ceph_fs.h83
-rw-r--r--fs/ceph/ceph_strings.c16
-rw-r--r--fs/ceph/debugfs.c13
-rw-r--r--fs/ceph/dir.c52
-rw-r--r--fs/ceph/export.c16
-rw-r--r--fs/ceph/file.c21
-rw-r--r--fs/ceph/inode.c103
-rw-r--r--fs/ceph/ioctl.c2
-rw-r--r--fs/ceph/mds_client.c426
-rw-r--r--fs/ceph/mds_client.h6
-rw-r--r--fs/ceph/messenger.c114
-rw-r--r--fs/ceph/messenger.h12
-rw-r--r--fs/ceph/mon_client.c262
-rw-r--r--fs/ceph/mon_client.h27
-rw-r--r--fs/ceph/msgpool.c180
-rw-r--r--fs/ceph/msgpool.h12
-rw-r--r--fs/ceph/msgr.h21
-rw-r--r--fs/ceph/osd_client.c131
-rw-r--r--fs/ceph/osd_client.h3
-rw-r--r--fs/ceph/osdmap.c31
-rw-r--r--fs/ceph/osdmap.h2
-rw-r--r--fs/ceph/pagelist.c2
-rw-r--r--fs/ceph/rados.h24
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/super.c163
-rw-r--r--fs/ceph/super.h33
-rw-r--r--fs/ceph/xattr.c35
-rw-r--r--fs/cifs/asn1.c103
-rw-r--r--fs/cifs/cifs_debug.c48
-rw-r--r--fs/cifs/cifs_debug.h42
-rw-r--r--fs/cifs/cifs_dfs_ref.c34
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifs_unicode.c5
-rw-r--r--fs/cifs/cifsacl.c76
-rw-r--r--fs/cifs/cifsencrypt.c10
-rw-r--r--fs/cifs/cifsfs.c163
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/cifsglob.h10
-rw-r--r--fs/cifs/cifsproto.h30
-rw-r--r--fs/cifs/cifssmb.c437
-rw-r--r--fs/cifs/connect.c639
-rw-r--r--fs/cifs/dir.c91
-rw-r--r--fs/cifs/dns_resolve.c16
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/cifs/file.c224
-rw-r--r--fs/cifs/inode.c144
-rw-r--r--fs/cifs/ioctl.c10
-rw-r--r--fs/cifs/link.c10
-rw-r--r--fs/cifs/misc.c81
-rw-r--r--fs/cifs/netmisc.c16
-rw-r--r--fs/cifs/readdir.c85
-rw-r--r--fs/cifs/sess.c81
-rw-r--r--fs/cifs/transport.c92
-rw-r--r--fs/cifs/xattr.c40
-rw-r--r--fs/coda/coda_int.h3
-rw-r--r--fs/coda/file.c6
-rw-r--r--fs/coda/pioctl.c76
-rw-r--r--fs/coda/psdev.c5
-rw-r--r--fs/compat.c134
-rw-r--r--fs/configfs/dir.c4
-rw-r--r--fs/configfs/inode.c9
-rw-r--r--fs/dcache.c20
-rw-r--r--fs/debugfs/file.c21
-rw-r--r--fs/devpts/inode.c9
-rw-r--r--fs/direct-io.c123
-rw-r--r--fs/dlm/lock.c5
-rw-r--r--fs/dlm/user.c88
-rw-r--r--fs/drop_caches.c24
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h5
-rw-r--r--fs/ecryptfs/file.c6
-rw-r--r--fs/ecryptfs/inode.c52
-rw-r--r--fs/ecryptfs/main.c166
-rw-r--r--fs/ecryptfs/mmap.c19
-rw-r--r--fs/ecryptfs/read_write.c13
-rw-r--r--fs/ecryptfs/super.c22
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/exec.c204
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/exofs/file.c7
-rw-r--r--fs/exofs/inode.c41
-rw-r--r--fs/ext2/acl.c4
-rw-r--r--fs/ext2/balloc.c6
-rw-r--r--fs/ext2/ext2.h3
-rw-r--r--fs/ext2/file.c7
-rw-r--r--fs/ext2/ialloc.c21
-rw-r--r--fs/ext2/inode.c160
-rw-r--r--fs/ext2/super.c117
-rw-r--r--fs/ext2/xattr.c12
-rw-r--r--fs/ext2/xattr.h12
-rw-r--r--fs/ext2/xattr_security.c2
-rw-r--r--fs/ext2/xattr_trusted.c2
-rw-r--r--fs/ext2/xattr_user.c2
-rw-r--r--fs/ext3/acl.c4
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/ext3/dir.c2
-rw-r--r--fs/ext3/fsync.c27
-rw-r--r--fs/ext3/ialloc.c13
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext3/super.c115
-rw-r--r--fs/ext3/xattr.c10
-rw-r--r--fs/ext3/xattr.h12
-rw-r--r--fs/ext3/xattr_security.c2
-rw-r--r--fs/ext3/xattr_trusted.c2
-rw-r--r--fs/ext3/xattr_user.c2
-rw-r--r--fs/ext4/acl.c4
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/block_validity.c4
-rw-r--r--fs/ext4/dir.c26
-rw-r--r--fs/ext4/ext4.h169
-rw-r--r--fs/ext4/ext4_jbd2.h8
-rw-r--r--fs/ext4/extents.c417
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/fsync.c47
-rw-r--r--fs/ext4/ialloc.c101
-rw-r--r--fs/ext4/inode.c725
-rw-r--r--fs/ext4/ioctl.c27
-rw-r--r--fs/ext4/mballoc.c120
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c13
-rw-r--r--fs/ext4/namei.c61
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c117
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/ext4/xattr.c49
-rw-r--r--fs/ext4/xattr.h12
-rw-r--r--fs/ext4/xattr_security.c2
-rw-r--r--fs/ext4/xattr_trusted.c2
-rw-r--r--fs/ext4/xattr_user.c2
-rw-r--r--fs/fat/cache.c13
-rw-r--r--fs/fat/dir.c28
-rw-r--r--fs/fat/fat.h22
-rw-r--r--fs/fat/file.c59
-rw-r--r--fs/fat/inode.c43
-rw-r--r--fs/fat/misc.c22
-rw-r--r--fs/fcntl.c71
-rw-r--r--fs/file_table.c21
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/fs-writeback.c108
-rw-r--r--fs/fscache/object-list.c2
-rw-r--r--fs/fuse/dev.c528
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/fuse/file.c48
-rw-r--r--fs/fuse/fuse_i.h6
-rw-r--r--fs/generic_acl.c4
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/gfs2/acl.h2
-rw-r--r--fs/gfs2/aops.c16
-rw-r--r--fs/gfs2/bmap.c17
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/export.c2
-rw-r--r--fs/gfs2/file.c11
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/incore.h11
-rw-r--r--fs/gfs2/inode.c103
-rw-r--r--fs/gfs2/inode.h4
-rw-r--r--fs/gfs2/log.c160
-rw-r--r--fs/gfs2/log.h30
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/meta_io.c5
-rw-r--r--fs/gfs2/ops_fstype.c19
-rw-r--r--fs/gfs2/ops_inode.c5
-rw-r--r--fs/gfs2/quota.c114
-rw-r--r--fs/gfs2/rgrp.c81
-rw-r--r--fs/gfs2/super.c11
-rw-r--r--fs/gfs2/super.h2
-rw-r--r--fs/gfs2/sys.c6
-rw-r--r--fs/gfs2/trans.c18
-rw-r--r--fs/gfs2/xattr.c6
-rw-r--r--fs/hfsplus/dir.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h3
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/ioctl.c12
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/file.c4
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c28
-rw-r--r--fs/internal.h2
-rw-r--r--fs/ioctl.c15
-rw-r--r--fs/isofs/dir.c1
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/jbd/journal.c33
-rw-r--r--fs/jbd2/checkpoint.c3
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/jffs2/acl.c4
-rw-r--r--fs/jffs2/acl.h4
-rw-r--r--fs/jffs2/background.c3
-rw-r--r--fs/jffs2/erase.c12
-rw-r--r--fs/jffs2/file.c4
-rw-r--r--fs/jffs2/fs.c14
-rw-r--r--fs/jffs2/gc.c17
-rw-r--r--fs/jffs2/nodelist.h10
-rw-r--r--fs/jffs2/nodemgmt.c28
-rw-r--r--fs/jffs2/os-linux.h5
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/security.c2
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/jffs2/wbuf.c8
-rw-r--r--fs/jffs2/xattr.c8
-rw-r--r--fs/jffs2/xattr.h8
-rw-r--r--fs/jffs2/xattr_trusted.c2
-rw-r--r--fs/jffs2/xattr_user.c2
-rw-r--r--fs/jfs/file.c6
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_inode.c12
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/super.c29
-rw-r--r--fs/libfs.c143
-rw-r--r--fs/logfs/dev_bdev.c6
-rw-r--r--fs/logfs/dev_mtd.c26
-rw-r--r--fs/logfs/dir.c2
-rw-r--r--fs/logfs/file.c20
-rw-r--r--fs/logfs/gc.c49
-rw-r--r--fs/logfs/inode.c15
-rw-r--r--fs/logfs/journal.c7
-rw-r--r--fs/logfs/logfs.h17
-rw-r--r--fs/logfs/logfs_abi.h10
-rw-r--r--fs/logfs/readwrite.c19
-rw-r--r--fs/logfs/segment.c7
-rw-r--r--fs/logfs/super.c22
-rw-r--r--fs/minix/bitmap.c5
-rw-r--r--fs/minix/dir.c7
-rw-r--r--fs/minix/file.c2
-rw-r--r--fs/minix/itree_v2.c27
-rw-r--r--fs/minix/minix.h2
-rw-r--r--fs/minix/namei.c11
-rw-r--r--fs/namei.c34
-rw-r--r--fs/namespace.c19
-rw-r--r--fs/ncpfs/dir.c3
-rw-r--r--fs/ncpfs/file.c4
-rw-r--r--fs/ncpfs/ioctl.c27
-rw-r--r--fs/nfs/client.c55
-rw-r--r--fs/nfs/delegation.c2
-rw-r--r--fs/nfs/dir.c148
-rw-r--r--fs/nfs/file.c20
-rw-r--r--fs/nfs/fscache.c3
-rw-r--r--fs/nfs/getroot.c191
-rw-r--r--fs/nfs/inode.c58
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/iostat.h6
-rw-r--r--fs/nfs/namespace.c20
-rw-r--r--fs/nfs/nfs3acl.c23
-rw-r--r--fs/nfs/nfs3proc.c128
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h8
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4proc.c172
-rw-r--r--fs/nfs/nfs4state.c36
-rw-r--r--fs/nfs/nfs4xdr.c22
-rw-r--r--fs/nfs/nfsroot.c14
-rw-r--r--fs/nfs/pagelist.c14
-rw-r--r--fs/nfs/proc.c144
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfs/super.c151
-rw-r--r--fs/nfs/unlink.c4
-rw-r--r--fs/nfs/write.c20
-rw-r--r--fs/nfsd/export.c44
-rw-r--r--fs/nfsd/nfs4callback.c140
-rw-r--r--fs/nfsd/nfs4proc.c50
-rw-r--r--fs/nfsd/nfs4recover.c87
-rw-r--r--fs/nfsd/nfs4state.c376
-rw-r--r--fs/nfsd/nfs4xdr.c27
-rw-r--r--fs/nfsd/nfsctl.c68
-rw-r--r--fs/nfsd/nfsd.h6
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/state.h47
-rw-r--r--fs/nfsd/vfs.c13
-rw-r--r--fs/nfsd/vfs.h1
-rw-r--r--fs/nfsd/xdr4.h11
-rw-r--r--fs/nilfs2/alloc.c154
-rw-r--r--fs/nilfs2/alloc.h7
-rw-r--r--fs/nilfs2/btree.c91
-rw-r--r--fs/nilfs2/btree.h23
-rw-r--r--fs/nilfs2/file.c4
-rw-r--r--fs/nilfs2/inode.c15
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/recovery.c2
-rw-r--r--fs/nilfs2/segbuf.c70
-rw-r--r--fs/nilfs2/segbuf.h10
-rw-r--r--fs/nilfs2/segment.c157
-rw-r--r--fs/nilfs2/segment.h6
-rw-r--r--fs/nilfs2/super.c218
-rw-r--r--fs/nilfs2/the_nilfs.c14
-rw-r--r--fs/notify/inotify/inotify.c88
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c2
-rw-r--r--fs/notify/inotify/inotify_user.c16
-rw-r--r--fs/ntfs/dir.c5
-rw-r--r--fs/ntfs/file.c37
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/alloc.c908
-rw-r--r--fs/ocfs2/alloc.h12
-rw-r--r--fs/ocfs2/aops.c3
-rw-r--r--fs/ocfs2/blockcheck.c4
-rw-r--r--fs/ocfs2/cluster/masklog.c1
-rw-r--r--fs/ocfs2/cluster/masklog.h1
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/ocfs2/dir.c75
-rw-r--r--fs/ocfs2/dlm/dlmast.c8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c4
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c28
-rw-r--r--fs/ocfs2/dlm/dlmlock.c6
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c30
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c27
-rw-r--r--fs/ocfs2/dlm/dlmthread.c16
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c3
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/ocfs2/file.c251
-rw-r--r--fs/ocfs2/inode.c45
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/ocfs2/journal.c26
-rw-r--r--fs/ocfs2/journal.h15
-rw-r--r--fs/ocfs2/localalloc.c275
-rw-r--r--fs/ocfs2/localalloc.h3
-rw-r--r--fs/ocfs2/mmap.c48
-rw-r--r--fs/ocfs2/namei.c100
-rw-r--r--fs/ocfs2/ocfs2.h22
-rw-r--r--fs/ocfs2/ocfs2_fs.h144
-rw-r--r--fs/ocfs2/quota.h12
-rw-r--r--fs/ocfs2/quota_global.c351
-rw-r--r--fs/ocfs2/quota_local.c183
-rw-r--r--fs/ocfs2/refcounttree.c74
-rw-r--r--fs/ocfs2/refcounttree.h4
-rw-r--r--fs/ocfs2/reservations.c847
-rw-r--r--fs/ocfs2/reservations.h159
-rw-r--r--fs/ocfs2/resize.c19
-rw-r--r--fs/ocfs2/suballoc.c688
-rw-r--r--fs/ocfs2/suballoc.h21
-rw-r--r--fs/ocfs2/super.c142
-rw-r--r--fs/ocfs2/super.h7
-rw-r--r--fs/ocfs2/xattr.c115
-rw-r--r--fs/ocfs2/xattr.h12
-rw-r--r--fs/omfs/file.c2
-rw-r--r--fs/omfs/inode.c5
-rw-r--r--fs/open.c166
-rw-r--r--fs/partitions/acorn.c68
-rw-r--r--fs/partitions/acorn.h10
-rw-r--r--fs/partitions/amiga.c13
-rw-r--r--fs/partitions/amiga.h2
-rw-r--r--fs/partitions/atari.c8
-rw-r--r--fs/partitions/atari.h2
-rw-r--r--fs/partitions/check.c84
-rw-r--r--fs/partitions/check.h12
-rw-r--r--fs/partitions/efi.c93
-rw-r--r--fs/partitions/efi.h2
-rw-r--r--fs/partitions/ibm.c21
-rw-r--r--fs/partitions/ibm.h2
-rw-r--r--fs/partitions/karma.c4
-rw-r--r--fs/partitions/karma.h2
-rw-r--r--fs/partitions/ldm.c107
-rw-r--r--fs/partitions/ldm.h2
-rw-r--r--fs/partitions/mac.c13
-rw-r--r--fs/partitions/mac.h2
-rw-r--r--fs/partitions/msdos.c87
-rw-r--r--fs/partitions/msdos.h2
-rw-r--r--fs/partitions/osf.c4
-rw-r--r--fs/partitions/osf.h2
-rw-r--r--fs/partitions/sgi.c6
-rw-r--r--fs/partitions/sgi.h2
-rw-r--r--fs/partitions/sun.c6
-rw-r--r--fs/partitions/sun.h2
-rw-r--r--fs/partitions/sysv68.c6
-rw-r--r--fs/partitions/sysv68.h2
-rw-r--r--fs/partitions/ultrix.c4
-rw-r--r--fs/partitions/ultrix.h2
-rw-r--r--fs/pipe.c133
-rw-r--r--fs/proc/array.c7
-rw-r--r--fs/proc/base.c26
-rw-r--r--fs/proc/generic.c15
-rw-r--r--fs/proc/inode.c4
-rw-r--r--fs/proc/kcore.c3
-rw-r--r--fs/proc/kmsg.c1
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/proc/task_mmu.c23
-rw-r--r--fs/proc/vmcore.c1
-rw-r--r--fs/qnx4/dir.c3
-rw-r--r--fs/quota/dquot.c408
-rw-r--r--fs/quota/quota.c99
-rw-r--r--fs/quota/quota_tree.c50
-rw-r--r--fs/quota/quota_tree.h6
-rw-r--r--fs/quota/quota_v1.c4
-rw-r--r--fs/quota/quota_v2.c6
-rw-r--r--fs/ramfs/file-mmu.c3
-rw-r--r--fs/ramfs/file-nommu.c9
-rw-r--r--fs/ramfs/inode.c22
-rw-r--r--fs/read_write.c17
-rw-r--r--fs/reiserfs/dir.c9
-rw-r--r--fs/reiserfs/file.c8
-rw-r--r--fs/reiserfs/inode.c3
-rw-r--r--fs/reiserfs/namei.c18
-rw-r--r--fs/reiserfs/super.c48
-rw-r--r--fs/reiserfs/xattr.c16
-rw-r--r--fs/reiserfs/xattr_acl.c4
-rw-r--r--fs/reiserfs/xattr_security.c2
-rw-r--r--fs/reiserfs/xattr_trusted.c2
-rw-r--r--fs/reiserfs/xattr_user.c2
-rw-r--r--fs/smbfs/dir.c3
-rw-r--r--fs/smbfs/file.c5
-rw-r--r--fs/smbfs/inode.c2
-rw-r--r--fs/smbfs/ioctl.c10
-rw-r--r--fs/smbfs/proto.h2
-rw-r--r--fs/smbfs/symlink.c1
-rw-r--r--fs/splice.c151
-rw-r--r--fs/squashfs/Kconfig11
-rw-r--r--fs/squashfs/Makefile2
-rw-r--r--fs/squashfs/inode.c92
-rw-r--r--fs/squashfs/namei.c6
-rw-r--r--fs/squashfs/squashfs.h12
-rw-r--r--fs/squashfs/squashfs_fs.h76
-rw-r--r--fs/squashfs/squashfs_fs_i.h3
-rw-r--r--fs/squashfs/squashfs_fs_sb.h3
-rw-r--r--fs/squashfs/super.c30
-rw-r--r--fs/squashfs/symlink.c11
-rw-r--r--fs/squashfs/xattr.c323
-rw-r--r--fs/squashfs/xattr.h46
-rw-r--r--fs/squashfs/xattr_id.c100
-rw-r--r--fs/statfs.c196
-rw-r--r--fs/super.c334
-rw-r--r--fs/sync.c94
-rw-r--r--fs/sysfs/bin.c26
-rw-r--r--fs/sysfs/dir.c114
-rw-r--r--fs/sysfs/file.c17
-rw-r--r--fs/sysfs/group.c6
-rw-r--r--fs/sysfs/inode.c14
-rw-r--r--fs/sysfs/mount.c95
-rw-r--r--fs/sysfs/symlink.c36
-rw-r--r--fs/sysfs/sysfs.h34
-rw-r--r--fs/sysv/dir.c4
-rw-r--r--fs/sysv/file.c2
-rw-r--r--fs/sysv/ialloc.c11
-rw-r--r--fs/sysv/inode.c1
-rw-r--r--fs/timerfd.c25
-rw-r--r--fs/ubifs/dir.c9
-rw-r--r--fs/ubifs/file.c17
-rw-r--r--fs/ubifs/io.c1
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/balloc.c43
-rw-r--r--fs/udf/dir.c5
-rw-r--r--fs/udf/file.c71
-rw-r--r--fs/udf/ialloc.c32
-rw-r--r--fs/udf/inode.c5
-rw-r--r--fs/udf/namei.c30
-rw-r--r--fs/udf/super.c13
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/ufs/balloc.c24
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/file.c5
-rw-r--r--fs/ufs/ialloc.c23
-rw-r--r--fs/ufs/inode.c6
-rw-r--r--fs/ufs/namei.c18
-rw-r--r--fs/ufs/super.c112
-rw-r--r--fs/ufs/symlink.c8
-rw-r--r--fs/ufs/truncate.c24
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/ufs/ufs_fs.h1
-rw-r--r--fs/xattr.c14
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c231
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c36
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c25
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c91
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h233
-rw-r--r--fs/xfs/linux-2.6/xfs_xattr.c8
-rw-r--r--fs/xfs/quota/xfs_dquot.c199
-rw-r--r--fs/xfs/quota/xfs_dquot.h35
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c30
-rw-r--r--fs/xfs/quota/xfs_qm.c609
-rw-r--r--fs/xfs/quota/xfs_qm.h23
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c162
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h102
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c29
-rw-r--r--fs/xfs/xfs_acl.h4
-rw-r--r--fs/xfs/xfs_ag.h24
-rw-r--r--fs/xfs/xfs_alloc.c357
-rw-r--r--fs/xfs/xfs_alloc.h7
-rw-r--r--fs/xfs/xfs_alloc_btree.c2
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_buf_item.c221
-rw-r--r--fs/xfs/xfs_buf_item.h20
-rw-r--r--fs/xfs/xfs_error.c32
-rw-r--r--fs/xfs/xfs_error.h9
-rw-r--r--fs/xfs/xfs_extfree_item.c18
-rw-r--r--fs/xfs/xfs_inode.c2
-rw-r--r--fs/xfs/xfs_inode_item.c21
-rw-r--r--fs/xfs/xfs_iomap.c123
-rw-r--r--fs/xfs/xfs_iomap.h47
-rw-r--r--fs/xfs/xfs_log.c796
-rw-r--r--fs/xfs/xfs_log.h27
-rw-r--r--fs/xfs/xfs_log_cil.c725
-rw-r--r--fs/xfs/xfs_log_priv.h130
-rw-r--r--fs/xfs/xfs_log_recover.c355
-rw-r--r--fs/xfs/xfs_log_recover.h2
-rw-r--r--fs/xfs/xfs_mount.c7
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_quota.h3
-rw-r--r--fs/xfs/xfs_trans.c810
-rw-r--r--fs/xfs/xfs_trans.h58
-rw-r--r--fs/xfs/xfs_trans_buf.c233
-rw-r--r--fs/xfs/xfs_trans_item.c114
-rw-r--r--fs/xfs/xfs_trans_priv.h15
-rw-r--r--fs/xfs/xfs_types.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/acpi_drivers.h3
-rw-r--r--include/acpi/acpi_hest.h12
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/acpixf.h43
-rw-r--r--include/acpi/actbl2.h27
-rw-r--r--include/acpi/actypes.h44
-rw-r--r--include/acpi/apei.h34
-rw-r--r--include/acpi/atomicio.h10
-rw-r--r--include/acpi/hed.h18
-rw-r--r--include/acpi/processor.h13
-rw-r--r--include/acpi/video.h16
-rw-r--r--include/asm-generic/atomic.h10
-rw-r--r--include/asm-generic/bitops/arch_hweight.h25
-rw-r--r--include/asm-generic/bitops/const_hweight.h42
-rw-r--r--include/asm-generic/bitops/hweight.h8
-rw-r--r--include/asm-generic/bug.h34
-rw-r--r--include/asm-generic/dma-mapping-common.h20
-rw-r--r--include/asm-generic/gpio.h11
-rw-r--r--include/asm-generic/kmap_types.h6
-rw-r--r--include/asm-generic/scatterlist.h17
-rw-r--r--include/asm-generic/topology.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/crypto/algapi.h40
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h43
-rw-r--r--include/drm/drm_crtc_helper.h9
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_fb_helper.h67
-rw-r--r--include/drm/drm_fixed.h (renamed from drivers/gpu/drm/radeon/radeon_fixed.h)40
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h46
-rw-r--r--include/drm/ttm/ttm_bo_driver.h57
-rw-r--r--include/drm/ttm/ttm_page_alloc.h74
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/ahci_platform.h29
-rw-r--r--include/linux/aio.h5
-rw-r--r--include/linux/altera_jtaguart.h16
-rw-r--r--include/linux/altera_uart.h14
-rw-r--r--include/linux/amba/mmci.h23
-rw-r--r--include/linux/amba/pl022.h32
-rw-r--r--include/linux/amba/pl330.h45
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/backing-dev.h6
-rw-r--r--include/linux/bitops.h30
-rw-r--r--include/linux/blkdev.h72
-rw-r--r--include/linux/buffer_head.h11
-rw-r--r--include/linux/caif/caif_socket.h165
-rw-r--r--include/linux/caif/if_caif.h34
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/platform/mcp251x.h4
-rw-r--r--include/linux/can/platform/sja1000.h2
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clocksource.h19
-rw-r--r--include/linux/compaction.h89
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/completion.h2
-rw-r--r--include/linux/cper.h314
-rw-r--r--include/linux/cpufreq.h30
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/cpuset.h65
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/crypto.h6
-rw-r--r--include/linux/dcache.h14
-rw-r--r--include/linux/dcbnl.h2
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/debugobjects.h11
-rw-r--r--include/linux/device.h24
-rw-r--r--include/linux/dma-mapping.h25
-rw-r--r--include/linux/dmaengine.h127
-rw-r--r--include/linux/dqblk_xfs.h9
-rw-r--r--include/linux/drbd.h5
-rw-r--r--include/linux/drbd_limits.h16
-rw-r--r--include/linux/drbd_nl.h5
-rw-r--r--include/linux/ds2782_battery.h8
-rw-r--r--include/linux/dynamic_debug.h2
-rw-r--r--include/linux/elevator.h6
-rw-r--r--include/linux/elf.h1
-rw-r--r--include/linux/elfcore.h4
-rw-r--r--include/linux/err.h10
-rw-r--r--include/linux/ethtool.h116
-rw-r--r--include/linux/ext2_fs_sb.h9
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/fb.h26
-rw-r--r--include/linux/fcntl.h6
-rw-r--r--include/linux/fec.h21
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/filter.h3
-rw-r--r--include/linux/firewire.h5
-rw-r--r--include/linux/firmware.h1
-rw-r--r--include/linux/fs.h83
-rw-r--r--include/linux/ftrace.h61
-rw-r--r--include/linux/ftrace_event.h107
-rw-r--r--include/linux/fuse.h5
-rw-r--r--include/linux/generic_acl.h4
-rw-r--r--include/linux/genetlink.h8
-rw-r--r--include/linux/gfp.h18
-rw-r--r--include/linux/gpio.h5
-rw-r--r--include/linux/gsmmux.h25
-rw-r--r--include/linux/hdpu_features.h26
-rw-r--r--include/linux/hid.h10
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hw_breakpoint.h25
-rw-r--r--include/linux/i2c-omap.h9
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/i2c/adp8860.h154
-rw-r--r--include/linux/i2c/max732x.h3
-rw-r--r--include/linux/i2c/pca953x.h2
-rw-r--r--include/linux/i2c/twl.h14
-rw-r--r--include/linux/ide.h6
-rw-r--r--include/linux/ieee80211.h4
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/if_link.h131
-rw-r--r--include/linux/if_macvlan.h3
-rw-r--r--include/linux/if_packet.h1
-rw-r--r--include/linux/if_pppol2tp.h16
-rw-r--r--include/linux/if_pppox.h9
-rw-r--r--include/linux/if_tun.h2
-rw-r--r--include/linux/if_x25.h26
-rw-r--r--include/linux/in6.h5
-rw-r--r--include/linux/init_task.h14
-rw-r--r--include/linux/input.h13
-rw-r--r--include/linux/input/ad714x.h63
-rw-r--r--include/linux/input/tps6507x-ts.h24
-rw-r--r--include/linux/interrupt.h32
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ipv6.h18
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/isapnp.h8
-rw-r--r--include/linux/ivtvfb.h1
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/jffs2.h4
-rw-r--r--include/linux/joystick.h4
-rw-r--r--include/linux/kdb.h117
-rw-r--r--include/linux/kernel.h43
-rw-r--r--include/linux/kfifo.h4
-rw-r--r--include/linux/kgdb.h55
-rw-r--r--include/linux/kmod.h64
-rw-r--r--include/linux/kobj_map.h9
-rw-r--r--include/linux/kobject.h38
-rw-r--r--include/linux/kref.h3
-rw-r--r--include/linux/ks8842.h34
-rw-r--r--include/linux/ktime.h10
-rw-r--r--include/linux/kvm.h26
-rw-r--r--include/linux/kvm_host.h16
-rw-r--r--include/linux/l2tp.h163
-rw-r--r--include/linux/lcd.h23
-rw-r--r--include/linux/leds.h12
-rw-r--r--include/linux/libata.h116
-rw-r--r--include/linux/lis3lv02d.h12
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/matroxfb.h3
-rw-r--r--include/linux/memcontrol.h19
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h15
-rw-r--r--include/linux/meye.h12
-rw-r--r--include/linux/mfd/88pm860x.h7
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/ab8500.h128
-rw-r--r--include/linux/mfd/abx500.h (renamed from include/linux/mfd/ab3100.h)134
-rw-r--r--include/linux/mfd/davinci_voicecodec.h126
-rw-r--r--include/linux/mfd/janz.h54
-rw-r--r--include/linux/mfd/mc13783.h66
-rw-r--r--include/linux/mfd/pcf50633/backlight.h51
-rw-r--r--include/linux/mfd/pcf50633/core.h4
-rw-r--r--include/linux/mfd/rdc321x.h26
-rw-r--r--include/linux/mfd/sh_mobile_sdhi.h6
-rw-r--r--include/linux/mfd/tc35892.h132
-rw-r--r--include/linux/mfd/tmio.h11
-rw-r--r--include/linux/mfd/tps6507x.h169
-rw-r--r--include/linux/mfd/wm831x/core.h5
-rw-r--r--include/linux/mfd/wm8350/audio.h2
-rw-r--r--include/linux/mfd/wm8994/core.h53
-rw-r--r--include/linux/mfd/wm8994/pdata.h1
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx4/qp.h7
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/sdhci-spear.h42
-rw-r--r--include/linux/mmc/sdio.h2
-rw-r--r--include/linux/mmc/sdio_func.h3
-rw-r--r--include/linux/mmc/sh_mmcif.h39
-rw-r--r--include/linux/mmzone.h20
-rw-r--r--include/linux/mod_devicetable.h42
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mroute.h20
-rw-r--r--include/linux/mroute6.h35
-rw-r--r--include/linux/msm_mdp.h78
-rw-r--r--include/linux/mtd/blktrans.h15
-rw-r--r--include/linux/mtd/cfi.h30
-rw-r--r--include/linux/mtd/flashchip.h4
-rw-r--r--include/linux/mtd/map.h3
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/mtdram.h2
-rw-r--r--include/linux/mtd/nand.h25
-rw-r--r--include/linux/mtd/onenand.h9
-rw-r--r--include/linux/mtd/sh_flctl.h7
-rw-r--r--include/linux/ncp_fs.h2
-rw-r--r--include/linux/net.h14
-rw-r--r--include/linux/netdevice.h306
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_tuple_common.h3
-rw-r--r--include/linux/netfilter/x_tables.h95
-rw-r--r--include/linux/netfilter/xt_CONNMARK.h22
-rw-r--r--include/linux/netfilter/xt_MARK.h6
-rw-r--r--include/linux/netfilter/xt_TEE.h12
-rw-r--r--include/linux/netfilter/xt_connmark.h11
-rw-r--r--include/linux/netfilter/xt_mark.h4
-rw-r--r--include/linux/netfilter/xt_recent.h7
-rw-r--r--include/linux/netfilter_bridge.h29
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h4
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nfs_fs.h14
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h7
-rw-r--r--include/linux/nfsd/nfsfh.h6
-rw-r--r--include/linux/nilfs2_fs.h17
-rw-r--r--include/linux/nl80211.h76
-rw-r--r--include/linux/notifier.h15
-rw-r--r--include/linux/of_device.h4
-rw-r--r--include/linux/of_fdt.h4
-rw-r--r--include/linux/of_platform.h6
-rw-r--r--include/linux/padata.h55
-rw-r--r--include/linux/page_cgroup.h5
-rw-r--r--include/linux/pci.h13
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/pci_regs.h6
-rw-r--r--include/linux/pda_power.h2
-rw-r--r--include/linux/perf_event.h111
-rw-r--r--include/linux/phy.h13
-rw-r--r--include/linux/pipe_fs_i.h13
-rw-r--r--include/linux/platform_device.h6
-rw-r--r--include/linux/pm_qos_params.h14
-rw-r--r--include/linux/pm_runtime.h7
-rw-r--r--include/linux/pm_wakeup.h38
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/ppp_channel.h3
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/quota.h44
-rw-r--r--include/linux/quotaops.h127
-rw-r--r--include/linux/ramfs.h5
-rw-r--r--include/linux/random.h28
-rw-r--r--include/linux/ratelimit.h13
-rw-r--r--include/linux/rbtree.h5
-rw-r--r--include/linux/rculist.h42
-rw-r--r--include/linux/rcupdate.h50
-rw-r--r--include/linux/rcutiny.h31
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/regulator/machine.h9
-rw-r--r--include/linux/reiserfs_acl.h4
-rw-r--r--include/linux/reiserfs_xattr.h6
-rw-r--r--include/linux/ring_buffer.h10
-rw-r--r--include/linux/rio.h55
-rw-r--r--include/linux/rio_drv.h6
-rw-r--r--include/linux/rio_ids.h14
-rw-r--r--include/linux/rio_regs.h80
-rw-r--r--include/linux/rmap.h27
-rw-r--r--include/linux/rtc-v3020.h2
-rw-r--r--include/linux/rtnetlink.h7
-rw-r--r--include/linux/sched.h100
-rw-r--r--include/linux/sdhci-pltfm.h35
-rw-r--r--include/linux/security.h180
-rw-r--r--include/linux/sem.h4
-rw-r--r--include/linux/serial_core.h5
-rw-r--r--include/linux/serial_sci.h4
-rw-r--r--include/linux/serio.h1
-rw-r--r--include/linux/sfi.h24
-rw-r--r--include/linux/sh_clk.h150
-rw-r--r--include/linux/sh_dma.h (renamed from arch/sh/include/asm/dmaengine.h)109
-rw-r--r--include/linux/sh_intc.h26
-rw-r--r--include/linux/skbuff.h83
-rw-r--r--include/linux/slab_def.h24
-rw-r--r--include/linux/slob_def.h8
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--include/linux/snmp.h2
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/spi/spi_bitbang.h101
-rw-r--r--include/linux/spi/wl12xx.h2
-rw-r--r--include/linux/splice.h7
-rw-r--r--include/linux/srcu.h6
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h15
-rw-r--r--include/linux/ssb/ssb_regs.h239
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/stop_machine.h122
-rw-r--r--include/linux/sunrpc/auth.h1
-rw-r--r--include/linux/sunrpc/auth_gss.h1
-rw-r--r--include/linux/sunrpc/gss_api.h8
-rw-r--r--include/linux/sunrpc/gss_krb5.h184
-rw-r--r--include/linux/sunrpc/metrics.h7
-rw-r--r--include/linux/sunrpc/sched.h20
-rw-r--r--include/linux/sunrpc/xdr.h8
-rw-r--r--include/linux/sunrpc/xprt.h13
-rw-r--r--include/linux/swap.h20
-rw-r--r--include/linux/swiotlb.h10
-rw-r--r--include/linux/syscalls.h57
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/sysfs.h26
-rw-r--r--include/linux/sysrq.h23
-rw-r--r--include/linux/tboot.h1
-rw-r--r--include/linux/tca6416_keypad.h34
-rw-r--r--include/linux/threads.h9
-rw-r--r--include/linux/tick.h5
-rw-r--r--include/linux/timb_dma.h55
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timer.h10
-rw-r--r--include/linux/timex.h5
-rw-r--r--include/linux/tipc.h36
-rw-r--r--include/linux/tipc_config.h1
-rw-r--r--include/linux/topology.h112
-rw-r--r--include/linux/tracepoint.h196
-rw-r--r--include/linux/tty.h5
-rw-r--r--include/linux/types.h4
-rw-r--r--include/linux/uinput.h10
-rw-r--r--include/linux/usb.h137
-rw-r--r--include/linux/usb/Kbuild1
-rw-r--r--include/linux/usb/atmel_usba_udc.h2
-rw-r--r--include/linux/usb/audio-v2.h394
-rw-r--r--include/linux/usb/audio.h233
-rw-r--r--include/linux/usb/cdc.h94
-rw-r--r--include/linux/usb/ch11.h (renamed from drivers/usb/core/hub.h)60
-rw-r--r--include/linux/usb/ch9.h16
-rw-r--r--include/linux/usb/composite.h1
-rw-r--r--include/linux/usb/ehci_def.h6
-rw-r--r--include/linux/usb/functionfs.h199
-rw-r--r--include/linux/usb/g_hid.h32
-rw-r--r--include/linux/usb/gadget.h6
-rw-r--r--include/linux/usb/gadgetfs.h2
-rw-r--r--include/linux/usb/hcd.h (renamed from drivers/usb/core/hcd.h)75
-rw-r--r--include/linux/usb/langwell_udc.h2
-rw-r--r--include/linux/usb/musb.h40
-rw-r--r--include/linux/usb/ncm.h114
-rw-r--r--include/linux/usb/net2280.h6
-rw-r--r--include/linux/usb/quirks.h4
-rw-r--r--include/linux/usb/rndis_host.h66
-rw-r--r--include/linux/usb/serial.h36
-rw-r--r--include/linux/usb/ulpi.h140
-rw-r--r--include/linux/usb/usbnet.h40
-rw-r--r--include/linux/usb/wusb-wa.h2
-rw-r--r--include/linux/uuid.h70
-rw-r--r--include/linux/via-core.h219
-rw-r--r--include/linux/via-gpio.h14
-rw-r--r--include/linux/via_i2c.h (renamed from drivers/video/via/via_i2c.h)24
-rw-r--r--include/linux/videodev2.h75
-rw-r--r--include/linux/virtio.h55
-rw-r--r--include/linux/virtio_blk.h5
-rw-r--r--include/linux/virtio_console.h25
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/wait.h184
-rw-r--r--include/linux/wireless.h4
-rw-r--r--include/linux/writeback.h18
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/linux/z2_battery.h17
-rw-r--r--include/linux/zorro.h22
-rw-r--r--include/media/ak881x.h25
-rw-r--r--include/media/davinci/vpfe_capture.h2
-rw-r--r--include/media/ir-common.h74
-rw-r--r--include/media/ir-core.h142
-rw-r--r--include/media/ir-kbd-i2c.h6
-rw-r--r--include/media/rc-map.h121
-rw-r--r--include/media/sh_vou.h34
-rw-r--r--include/media/soc_camera.h12
-rw-r--r--include/media/v4l2-chip-ident.h127
-rw-r--r--include/media/v4l2-common.h27
-rw-r--r--include/media/v4l2-dev.h5
-rw-r--r--include/media/v4l2-event.h67
-rw-r--r--include/media/v4l2-fh.h65
-rw-r--r--include/media/v4l2-ioctl.h7
-rw-r--r--include/media/v4l2-mem2mem.h201
-rw-r--r--include/media/v4l2-subdev.h70
-rw-r--r--include/media/videobuf-core.h35
-rw-r--r--include/media/videobuf-dma-sg.h26
-rw-r--r--include/media/videobuf-vmalloc.h12
-rw-r--r--include/net/9p/9p.h33
-rw-r--r--include/net/9p/client.h2
-rw-r--r--include/net/af_unix.h20
-rw-r--r--include/net/bluetooth/hci_core.h6
-rw-r--r--include/net/bluetooth/l2cap.h41
-rw-r--r--include/net/caif/caif_dev.h103
-rw-r--r--include/net/caif/caif_device.h55
-rw-r--r--include/net/caif/caif_layer.h283
-rw-r--r--include/net/caif/cfcnfg.h140
-rw-r--r--include/net/caif/cfctrl.h139
-rw-r--r--include/net/caif/cffrml.h16
-rw-r--r--include/net/caif/cfmuxl.h22
-rw-r--r--include/net/caif/cfpkt.h274
-rw-r--r--include/net/caif/cfserl.h12
-rw-r--r--include/net/caif/cfsrvl.h56
-rw-r--r--include/net/cfg80211.h47
-rw-r--r--include/net/cls_cgroup.h63
-rw-r--r--include/net/dn_fib.h4
-rw-r--r--include/net/dst.h83
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow.h23
-rw-r--r--include/net/icmp.h11
-rw-r--r--include/net/if_inet6.h25
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip.h15
-rw-r--r--include/net/ip6_fib.h29
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ipv6.h16
-rw-r--r--include/net/iw_handler.h2
-rw-r--r--include/net/mac80211.h151
-rw-r--r--include/net/mld.h75
-rw-r--r--include/net/neighbour.h14
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/netns/generic.h9
-rw-r--r--include/net/netns/ipv4.h15
-rw-r--r--include/net/netns/ipv6.h14
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/raw.h13
-rw-r--r--include/net/route.h17
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/sm.h3
-rw-r--r--include/net/sctp/structs.h69
-rw-r--r--include/net/snmp.h31
-rw-r--r--include/net/sock.h210
-rw-r--r--include/net/tcp.h34
-rw-r--r--include/net/tipc/tipc.h16
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/x25.h10
-rw-r--r--include/net/x25device.h1
-rw-r--r--include/net/xfrm.h19
-rw-r--r--include/pcmcia/cs.h19
-rw-r--r--include/pcmcia/ds.h29
-rw-r--r--include/pcmcia/mem_op.h116
-rw-r--r--include/pcmcia/ss.h7
-rw-r--r--include/rdma/ib_verbs.h11
-rw-r--r--include/scsi/Kbuild1
-rw-r--r--include/scsi/fc/fc_fcp.h1
-rw-r--r--include/scsi/fc_encode.h18
-rw-r--r--include/scsi/libfc.h29
-rw-r--r--include/scsi/libfcoe.h10
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_transport_fc.h2
-rw-r--r--include/sound/asound.h20
-rw-r--r--include/sound/es1688.h11
-rw-r--r--include/sound/info.h24
-rw-r--r--include/sound/jack.h8
-rw-r--r--include/sound/pcm.h3
-rw-r--r--include/sound/soc-dai.h7
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/sound/soc.h61
-rw-r--r--include/sound/tlv320aic3x.h17
-rw-r--r--include/sound/tlv320dac33-plat.h1
-rw-r--r--include/sound/uda134x.h1
-rw-r--r--include/sound/version.h2
-rw-r--r--include/sound/wm8903.h249
-rw-r--r--include/sound/wm8904.h110
-rw-r--r--include/sound/wm8960.h24
-rw-r--r--include/sound/wm9090.h28
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/ext4.h100
-rw-r--r--include/trace/events/kvm.h1
-rw-r--r--include/trace/events/lock.h55
-rw-r--r--include/trace/events/module.h18
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/scsi.h345
-rw-r--r--include/trace/events/signal.h52
-rw-r--r--include/trace/ftrace.h284
-rw-r--r--include/trace/syscall.h10
-rw-r--r--include/video/da8xx-fb.h1
-rw-r--r--include/video/sh_mobile_lcdc.h2
-rw-r--r--init/Kconfig30
-rw-r--r--init/main.c4
-rw-r--r--ipc/mqueue.c76
-rw-r--r--ipc/msg.c12
-rw-r--r--ipc/sem.c322
-rw-r--r--ipc/shm.c11
-rw-r--r--ipc/util.c4
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/acct.c37
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/cgroup.c51
-rw-r--r--kernel/cgroup_freezer.c21
-rw-r--r--kernel/compat.c25
-rw-r--r--kernel/cpu.c157
-rw-r--r--kernel/cpuset.c145
-rw-r--r--kernel/cred-internals.h21
-rw-r--r--kernel/cred.c65
-rw-r--r--kernel/debug/Makefile6
-rw-r--r--kernel/debug/debug_core.c983
-rw-r--r--kernel/debug/debug_core.h81
-rw-r--r--kernel/debug/gdbstub.c1017
-rw-r--r--kernel/debug/kdb/.gitignore1
-rw-r--r--kernel/debug/kdb/Makefile25
-rw-r--r--kernel/debug/kdb/kdb_bp.c564
-rw-r--r--kernel/debug/kdb/kdb_bt.c210
-rw-r--r--kernel/debug/kdb/kdb_cmds35
-rw-r--r--kernel/debug/kdb/kdb_debugger.c169
-rw-r--r--kernel/debug/kdb/kdb_io.c826
-rw-r--r--kernel/debug/kdb/kdb_keyboard.c212
-rw-r--r--kernel/debug/kdb/kdb_main.c2849
-rw-r--r--kernel/debug/kdb/kdb_private.h300
-rw-r--r--kernel/debug/kdb/kdb_support.c927
-rw-r--r--kernel/exit.c43
-rw-r--r--kernel/fork.c55
-rw-r--r--kernel/groups.c6
-rw-r--r--kernel/hrtimer.c69
-rw-r--r--kernel/hw_breakpoint.c196
-rw-r--r--kernel/irq/handle.c3
-rw-r--r--kernel/irq/manage.c89
-rw-r--r--kernel/irq/proc.c60
-rw-r--r--kernel/kallsyms.c21
-rw-r--r--kernel/kexec.c6
-rw-r--r--kernel/kgdb.c1764
-rw-r--r--kernel/kmod.c193
-rw-r--r--kernel/kprobes.c132
-rw-r--r--kernel/ksysfs.c3
-rw-r--r--kernel/lockdep.c93
-rw-r--r--kernel/lockdep_internals.h72
-rw-r--r--kernel/lockdep_proc.c58
-rw-r--r--kernel/module.c30
-rw-r--r--kernel/mutex.c7
-rw-r--r--kernel/padata.c189
-rw-r--r--kernel/panic.c27
-rw-r--r--kernel/perf_event.c770
-rw-r--r--kernel/pid.c7
-rw-r--r--kernel/pm_qos_params.c218
-rw-r--r--kernel/posix-cpu-timers.c310
-rw-r--r--kernel/posix-timers.c11
-rw-r--r--kernel/power/Makefile3
-rw-r--r--kernel/power/block_io.c103
-rw-r--r--kernel/power/power.h27
-rw-r--r--kernel/power/snapshot.c145
-rw-r--r--kernel/power/swap.c333
-rw-r--r--kernel/power/user.c37
-rw-r--r--kernel/printk.c25
-rw-r--r--kernel/profile.c12
-rw-r--r--kernel/ptrace.c38
-rw-r--r--kernel/rcupdate.c19
-rw-r--r--kernel/rcutiny.c35
-rw-r--r--kernel/rcutiny_plugin.h39
-rw-r--r--kernel/rcutorture.c4
-rw-r--r--kernel/rcutree.c131
-rw-r--r--kernel/rcutree.h2
-rw-r--r--kernel/rcutree_plugin.h69
-rw-r--r--kernel/rcutree_trace.c4
-rw-r--r--kernel/relay.c17
-rw-r--r--kernel/resource.c16
-rw-r--r--kernel/sched.c804
-rw-r--r--kernel/sched_clock.c1
-rw-r--r--kernel/sched_debug.c118
-rw-r--r--kernel/sched_fair.c350
-rw-r--r--kernel/sched_features.h55
-rw-r--r--kernel/sched_idletask.c8
-rw-r--r--kernel/sched_rt.c15
-rw-r--r--kernel/signal.c63
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/stop_machine.c537
-rw-r--r--kernel/sys.c37
-rw-r--r--kernel/sysctl.c613
-rw-r--r--kernel/sysctl_binary.c10
-rw-r--r--kernel/time.c11
-rw-r--r--kernel/time/clocksource.c48
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/tick-sched.c84
-rw-r--r--kernel/time/timekeeping.c35
-rw-r--r--kernel/time/timer_list.c1
-rw-r--r--kernel/timer.c149
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/blktrace.c138
-rw-r--r--kernel/trace/ftrace.c36
-rw-r--r--kernel/trace/kmemtrace.c70
-rw-r--r--kernel/trace/ring_buffer.c194
-rw-r--r--kernel/trace/ring_buffer_benchmark.c5
-rw-r--r--kernel/trace/trace.c202
-rw-r--r--kernel/trace/trace.h56
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_entries.h12
-rw-r--r--kernel/trace/trace_event_perf.c185
-rw-r--r--kernel/trace/trace_events.c139
-rw-r--r--kernel/trace/trace_events_filter.c30
-rw-r--r--kernel/trace/trace_export.c16
-rw-r--r--kernel/trace/trace_functions_graph.c176
-rw-r--r--kernel/trace/trace_hw_branches.c312
-rw-r--r--kernel/trace/trace_irqsoff.c271
-rw-r--r--kernel/trace/trace_kprobe.c648
-rw-r--r--kernel/trace/trace_ksym.c26
-rw-r--r--kernel/trace/trace_output.c155
-rw-r--r--kernel/trace/trace_output.h2
-rw-r--r--kernel/trace/trace_sched_switch.c21
-rw-r--r--kernel/trace/trace_sched_wakeup.c29
-rw-r--r--kernel/trace/trace_selftest.c64
-rw-r--r--kernel/trace/trace_syscalls.c146
-rw-r--r--kernel/trace/trace_workqueue.c26
-rw-r--r--kernel/tracepoint.c91
-rw-r--r--kernel/user.c11
-rw-r--r--kernel/user_namespace.c4
-rw-r--r--kernel/workqueue.c45
-rw-r--r--lib/Kconfig.debug49
-rw-r--r--lib/Kconfig.kgdb24
-rw-r--r--lib/Makefile8
-rw-r--r--lib/atomic64.c4
-rw-r--r--lib/atomic64_test.c165
-rw-r--r--lib/btree.c3
-rw-r--r--lib/bug.c2
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/crc32.c24
-rw-r--r--lib/debugobjects.c63
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/gen_crc32table.c47
-rw-r--r--lib/hexdump.c54
-rw-r--r--lib/hweight.c19
-rw-r--r--lib/idr.c7
-rw-r--r--lib/kobject.c115
-rw-r--r--lib/kobject_uevent.c109
-rw-r--r--lib/kref.c15
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/random32.c38
-rw-r--r--lib/rbtree.c48
-rw-r--r--lib/rwsem.c5
-rw-r--r--lib/swiotlb.c31
-rw-r--r--lib/uuid.c53
-rw-r--r--lib/vsprintf.c69
-rw-r--r--mm/Kconfig17
-rw-r--r--mm/Makefile1
-rw-r--r--mm/backing-dev.c15
-rw-r--r--mm/compaction.c605
-rw-r--r--mm/filemap.c57
-rw-r--r--mm/highmem.c2
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/memcontrol.c712
-rw-r--r--mm/memory.c13
-rw-r--r--mm/memory_hotplug.c36
-rw-r--r--mm/mempolicy.c227
-rw-r--r--mm/migrate.c74
-rw-r--r--mm/mincore.c263
-rw-r--r--mm/mlock.c41
-rw-r--r--mm/msync.c2
-rw-r--r--mm/nommu.c32
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page-writeback.c44
-rw-r--r--mm/page_alloc.c319
-rw-r--r--mm/percpu-km.c104
-rw-r--r--mm/percpu-vm.c451
-rw-r--r--mm/percpu.c585
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/rmap.c49
-rw-r--r--mm/shmem.c140
-rw-r--r--mm/slab.c249
-rw-r--r--mm/slob.c8
-rw-r--r--mm/slub.c85
-rw-r--r--mm/sparse.c9
-rw-r--r--mm/swap.c1
-rw-r--r--mm/swapfile.c14
-rw-r--r--mm/truncate.c10
-rw-r--r--mm/vmscan.c213
-rw-r--r--mm/vmstat.c253
-rw-r--r--net/802/garp.c4
-rw-r--r--net/8021q/vlan.c8
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c18
-rw-r--r--net/9p/client.c70
-rw-r--r--net/9p/protocol.c8
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/9p/trans_virtio.c6
-rw-r--r--net/Kconfig8
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/br2684.c1
-rw-r--r--net/atm/common.c30
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/mpc.c32
-rw-r--r--net/atm/mpoa_caches.c20
-rw-r--r--net/atm/proc.c10
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c62
-rw-r--r--net/ax25/af_ax25.c8
-rw-r--r--net/bluetooth/Kconfig13
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/bnep/netdev.c20
-rw-r--r--net/bluetooth/cmtp/cmtp.h2
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_sysfs.c34
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c1115
-rw-r--r--net/bluetooth/rfcomm/sock.c8
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c31
-rw-r--r--net/bridge/Kconfig6
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/bridge/br_device.c131
-rw-r--r--net/bridge/br_fdb.c9
-rw-r--r--net/bridge/br_forward.c51
-rw-r--r--net/bridge/br_if.c14
-rw-r--r--net/bridge/br_input.c12
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_multicast.c690
-rw-r--r--net/bridge/br_netfilter.c263
-rw-r--r--net/bridge/br_netlink.c8
-rw-r--r--net/bridge/br_notify.c11
-rw-r--r--net/bridge/br_private.h57
-rw-r--r--net/bridge/br_stp.c11
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/bridge/br_stp_if.c16
-rw-r--r--net/bridge/br_stp_timer.c24
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_sysfs_if.c32
-rw-r--r--net/bridge/netfilter/ebt_802_3.c8
-rw-r--r--net/bridge/netfilter/ebt_among.c27
-rw-r--r--net/bridge/netfilter/ebt_arp.c10
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c10
-rw-r--r--net/bridge/netfilter/ebt_dnat.c12
-rw-r--r--net/bridge/netfilter/ebt_ip.c18
-rw-r--r--net/bridge/netfilter/ebt_ip6.c39
-rw-r--r--net/bridge/netfilter/ebt_limit.c11
-rw-r--r--net/bridge/netfilter/ebt_log.c10
-rw-r--r--net/bridge/netfilter/ebt_mark.c12
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c12
-rw-r--r--net/bridge/netfilter/ebt_nflog.c8
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c8
-rw-r--r--net/bridge/netfilter/ebt_redirect.c12
-rw-r--r--net/bridge/netfilter/ebt_snat.c12
-rw-r--r--net/bridge/netfilter/ebt_stp.c10
-rw-r--r--net/bridge/netfilter/ebt_ulog.c38
-rw-r--r--net/bridge/netfilter/ebt_vlan.c54
-rw-r--r--net/bridge/netfilter/ebtables.c56
-rw-r--r--net/caif/Kconfig45
-rw-r--r--net/caif/Makefile26
-rw-r--r--net/caif/caif_config_util.c87
-rw-r--r--net/caif/caif_dev.c417
-rw-r--r--net/caif/caif_socket.c1231
-rw-r--r--net/caif/cfcnfg.c470
-rw-r--r--net/caif/cfctrl.c652
-rw-r--r--net/caif/cfdbgl.c40
-rw-r--r--net/caif/cfdgml.c108
-rw-r--r--net/caif/cffrml.c151
-rw-r--r--net/caif/cfmuxl.c252
-rw-r--r--net/caif/cfpkt_skbuff.c580
-rw-r--r--net/caif/cfrfml.c108
-rw-r--r--net/caif/cfserl.c193
-rw-r--r--net/caif/cfsrvl.c198
-rw-r--r--net/caif/cfutill.c115
-rw-r--r--net/caif/cfveil.c107
-rw-r--r--net/caif/cfvidl.c65
-rw-r--r--net/caif/chnl_net.c467
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c23
-rw-r--r--net/core/dev.c1424
-rw-r--r--net/core/dev_addr_lists.c741
-rw-r--r--net/core/dev_mcast.c232
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/dst.c45
-rw-r--r--net/core/ethtool.c152
-rw-r--r--net/core/fib_rules.c31
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/flow.c405
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/net-sysfs.c377
-rw-r--r--net/core/net-sysfs.h1
-rw-r--r--net/core/net_namespace.c95
-rw-r--r--net/core/netpoll.c26
-rw-r--r--net/core/pktgen.c58
-rw-r--r--net/core/rtnetlink.c375
-rw-r--r--net/core/skbuff.c72
-rw-r--r--net/core/sock.c130
-rw-r--r--net/core/stream.c22
-rw-r--r--net/core/sysctl_net_core.c75
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/input.c8
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c7
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/dccp/output.c18
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/af_decnet.c32
-rw-r--r--net/decnet/dn_dev.c15
-rw-r--r--net/decnet/dn_neigh.c9
-rw-r--r--net/decnet/dn_nsp_in.c3
-rw-r--r--net/decnet/dn_route.c29
-rw-r--r--net/decnet/dn_rules.c22
-rw-r--r--net/dsa/slave.c14
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/ieee802154/wpan-class.c7
-rw-r--r--net/ipv4/Kconfig22
-rw-r--r--net/ipv4/af_inet.c57
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_rules.c22
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c11
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_forward.c4
-rw-r--r--net/ipv4/ip_gre.c10
-rw-r--r--net/ipv4/ip_input.c8
-rw-r--r--net/ipv4/ip_options.c10
-rw-r--r--net/ipv4/ip_output.c35
-rw-r--r--net/ipv4/ip_sockglue.c20
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv4/ipmr.c928
-rw-r--r--net/ipv4/netfilter.c6
-rw-r--r--net/ipv4/netfilter/arp_tables.c102
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c4
-rw-r--r--net/ipv4/netfilter/ip_queue.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c260
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c77
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c23
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c19
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c18
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c16
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c16
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c19
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c47
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c28
-rw-r--r--net/ipv4/netfilter/ipt_ah.c28
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c19
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c10
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c21
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c1
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c154
-rw-r--r--net/ipv4/sysctl_net_ipv4.c17
-rw-r--r--net/ipv4/tcp.c55
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_ipv4.c43
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c25
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv4/udp.c50
-rw-r--r--net/ipv4/xfrm4_input.c6
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv4/xfrm4_policy.c22
-rw-r--r--net/ipv6/Kconfig14
-rw-r--r--net/ipv6/addrconf.c886
-rw-r--r--net/ipv6/addrlabel.c8
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/datagram.c116
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/icmp.c7
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_input.c4
-rw-r--r--net/ipv6/ip6_output.c105
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ip6mr.c944
-rw-r--r--net/ipv6/ipv6_sockglue.c86
-rw-r--r--net/ipv6/mcast.c143
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter.c25
-rw-r--r--net/ipv6/netfilter/ip6_queue.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c236
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c20
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c32
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c18
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c4
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c18
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c33
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c8
-rw-r--r--net/ipv6/netfilter/ip6t_mh.c21
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c20
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c2
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/raw.c18
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/tcp_ipv6.c68
-rw-r--r--net/ipv6/udp.c42
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_output.c4
-rw-r--r--net/ipv6/xfrm6_policy.c31
-rw-r--r--net/irda/af_irda.c14
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irnet/irnet_irda.c3
-rw-r--r--net/iucv/af_iucv.c24
-rw-r--r--net/iucv/iucv.c9
-rw-r--r--net/key/af_key.c10
-rw-r--r--net/l2tp/Kconfig107
-rw-r--r--net/l2tp/Makefile12
-rw-r--r--net/l2tp/l2tp_core.c1666
-rw-r--r--net/l2tp/l2tp_core.h304
-rw-r--r--net/l2tp/l2tp_debugfs.c341
-rw-r--r--net/l2tp/l2tp_eth.c334
-rw-r--r--net/l2tp/l2tp_ip.c679
-rw-r--r--net/l2tp/l2tp_netlink.c840
-rw-r--r--net/l2tp/l2tp_ppp.c1837
-rw-r--r--net/llc/af_llc.c12
-rw-r--r--net/llc/llc_core.c6
-rw-r--r--net/llc/llc_sap.c2
-rw-r--r--net/mac80211/Kconfig17
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-rx.c80
-rw-r--r--net/mac80211/agg-tx.c16
-rw-r--r--net/mac80211/cfg.c122
-rw-r--r--net/mac80211/chan.c127
-rw-r--r--net/mac80211/debugfs.h1
-rw-r--r--net/mac80211/debugfs_netdev.c12
-rw-r--r--net/mac80211/debugfs_sta.c79
-rw-r--r--net/mac80211/driver-ops.h33
-rw-r--r--net/mac80211/driver-trace.h333
-rw-r--r--net/mac80211/ht.c3
-rw-r--r--net/mac80211/ibss.c46
-rw-r--r--net/mac80211/ieee80211_i.h51
-rw-r--r--net/mac80211/iface.c124
-rw-r--r--net/mac80211/main.c28
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c5
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c315
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.h11
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c41
-rw-r--r--net/mac80211/rx.c108
-rw-r--r--net/mac80211/scan.c126
-rw-r--r--net/mac80211/sta_info.c107
-rw-r--r--net/mac80211/sta_info.h14
-rw-r--r--net/mac80211/status.c21
-rw-r--r--net/mac80211/tx.c26
-rw-r--r--net/mac80211/util.c36
-rw-r--r--net/mac80211/work.c62
-rw-r--r--net/netfilter/Kconfig133
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c28
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c16
-rw-r--r--net/netfilter/nf_conntrack_amanda.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_ecache.c12
-rw-r--r--net/netfilter/nf_conntrack_ftp.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c9
-rw-r--r--net/netfilter/nf_conntrack_irc.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c30
-rw-r--r--net/netfilter/nf_conntrack_proto.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c16
-rw-r--r--net/netfilter/nf_conntrack_standalone.c9
-rw-r--r--net/netfilter/nf_conntrack_tftp.c4
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_log.c6
-rw-r--r--net/netfilter/nf_queue.c3
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/x_tables.c128
-rw-r--r--net/netfilter/xt_CLASSIFY.c2
-rw-r--r--net/netfilter/xt_CONNMARK.c113
-rw-r--r--net/netfilter/xt_CONNSECMARK.c29
-rw-r--r--net/netfilter/xt_CT.c25
-rw-r--r--net/netfilter/xt_DSCP.c18
-rw-r--r--net/netfilter/xt_HL.c30
-rw-r--r--net/netfilter/xt_LED.c93
-rw-r--r--net/netfilter/xt_MARK.c56
-rw-r--r--net/netfilter/xt_NFLOG.c10
-rw-r--r--net/netfilter/xt_NFQUEUE.c50
-rw-r--r--net/netfilter/xt_NOTRACK.c2
-rw-r--r--net/netfilter/xt_RATEEST.c20
-rw-r--r--net/netfilter/xt_SECMARK.c48
-rw-r--r--net/netfilter/xt_TCPMSS.c41
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c7
-rw-r--r--net/netfilter/xt_TEE.c309
-rw-r--r--net/netfilter/xt_TPROXY.c12
-rw-r--r--net/netfilter/xt_TRACE.c2
-rw-r--r--net/netfilter/xt_cluster.c21
-rw-r--r--net/netfilter/xt_comment.c2
-rw-r--r--net/netfilter/xt_connbytes.c22
-rw-r--r--net/netfilter/xt_connlimit.c24
-rw-r--r--net/netfilter/xt_connmark.c104
-rw-r--r--net/netfilter/xt_conntrack.c23
-rw-r--r--net/netfilter/xt_dccp.c18
-rw-r--r--net/netfilter/xt_dscp.c18
-rw-r--r--net/netfilter/xt_esp.c28
-rw-r--r--net/netfilter/xt_hashlimit.c346
-rw-r--r--net/netfilter/xt_helper.c18
-rw-r--r--net/netfilter/xt_hl.c16
-rw-r--r--net/netfilter/xt_iprange.c5
-rw-r--r--net/netfilter/xt_length.c4
-rw-r--r--net/netfilter/xt_limit.c15
-rw-r--r--net/netfilter/xt_mac.c23
-rw-r--r--net/netfilter/xt_mark.c37
-rw-r--r--net/netfilter/xt_multiport.c103
-rw-r--r--net/netfilter/xt_osf.c12
-rw-r--r--net/netfilter/xt_owner.c2
-rw-r--r--net/netfilter/xt_physdev.c18
-rw-r--r--net/netfilter/xt_pkttype.c2
-rw-r--r--net/netfilter/xt_policy.c31
-rw-r--r--net/netfilter/xt_quota.c10
-rw-r--r--net/netfilter/xt_rateest.c10
-rw-r--r--net/netfilter/xt_realm.c2
-rw-r--r--net/netfilter/xt_recent.c189
-rw-r--r--net/netfilter/xt_sctp.c57
-rw-r--r--net/netfilter/xt_socket.c11
-rw-r--r--net/netfilter/xt_state.c50
-rw-r--r--net/netfilter/xt_statistic.c14
-rw-r--r--net/netfilter/xt_string.c68
-rw-r--r--net/netfilter/xt_tcpmss.c4
-rw-r--r--net/netfilter/xt_tcpudp.c38
-rw-r--r--net/netfilter/xt_time.c16
-rw-r--r--net/netfilter/xt_u32.c5
-rw-r--r--net/netlabel/netlabel_addrlist.h2
-rw-r--r--net/netlabel/netlabel_unlabeled.c1
-rw-r--r--net/netlink/af_netlink.c23
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/netrom/af_netrom.c8
-rw-r--r--net/packet/af_packet.c69
-rw-r--r--net/phonet/pep.c10
-rw-r--r--net/phonet/pn_dev.c23
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rds/af_rds.c11
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/ib_rdma.c5
-rw-r--r--net/rds/ib_recv.c4
-rw-r--r--net/rds/ib_send.c20
-rw-r--r--net/rds/iw_cm.c4
-rw-r--r--net/rds/iw_recv.c4
-rw-r--r--net/rds/iw_send.c3
-rw-r--r--net/rds/loop.c7
-rw-r--r--net/rds/rdma.c4
-rw-r--r--net/rds/rdma_transport.c5
-rw-r--r--net/rds/rds.h4
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c40
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_recv.c1
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rfkill/core.c53
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-recvmsg.c6
-rw-r--r--net/sched/act_api.c65
-rw-r--r--net/sched/act_gact.c4
-rw-r--r--net/sched/act_ipt.c9
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/act_pedit.c11
-rw-r--r--net/sched/act_simple.c4
-rw-r--r--net/sched/cls_api.c30
-rw-r--r--net/sched/cls_cgroup.c50
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/ematch.c3
-rw-r--r--net/sched/sch_api.c135
-rw-r--r--net/sched/sch_generic.c21
-rw-r--r--net/sched/sch_hfsc.c7
-rw-r--r--net/sched/sch_ingress.c1
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfq.c10
-rw-r--r--net/sched/sch_tbf.c6
-rw-r--r--net/sctp/Kconfig12
-rw-r--r--net/sctp/Makefile3
-rw-r--r--net/sctp/associola.c15
-rw-r--r--net/sctp/chunk.c4
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/input.c22
-rw-r--r--net/sctp/ipv6.c27
-rw-r--r--net/sctp/output.c27
-rw-r--r--net/sctp/outqueue.c96
-rw-r--r--net/sctp/probe.c214
-rw-r--r--net/sctp/proc.c3
-rw-r--r--net/sctp/protocol.c9
-rw-r--r--net/sctp/sm_make_chunk.c28
-rw-r--r--net/sctp/sm_sideeffect.c47
-rw-r--r--net/sctp/socket.c41
-rw-r--r--net/sctp/transport.c67
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c132
-rw-r--r--net/sunrpc/auth.c19
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c89
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c697
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c336
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c584
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c155
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c83
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c113
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c404
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c21
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c5
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c17
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/cache.c73
-rw-r--r--net/sunrpc/clnt.c20
-rw-r--r--net/sunrpc/rpc_pipe.c18
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/sched.c26
-rw-r--r--net/sunrpc/stats.c29
-rw-r--r--net/sunrpc/svc_xprt.c6
-rw-r--r--net/sunrpc/svcsock.c40
-rw-r--r--net/sunrpc/xdr.c1
-rw-r--r--net/sunrpc/xprt.c66
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c3
-rw-r--r--net/sunrpc/xprtrdma/transport.c31
-rw-r--r--net/sunrpc/xprtsock.c73
-rw-r--r--net/sysctl_net.c1
-rw-r--r--net/tipc/addr.c32
-rw-r--r--net/tipc/addr.h37
-rw-r--r--net/tipc/bcast.c149
-rw-r--r--net/tipc/bcast.h117
-rw-r--r--net/tipc/bearer.c16
-rw-r--r--net/tipc/bearer.h16
-rw-r--r--net/tipc/cluster.c2
-rw-r--r--net/tipc/config.c68
-rw-r--r--net/tipc/core.c26
-rw-r--r--net/tipc/core.h27
-rw-r--r--net/tipc/discover.c8
-rw-r--r--net/tipc/link.c102
-rw-r--r--net/tipc/link.h35
-rw-r--r--net/tipc/msg.c94
-rw-r--r--net/tipc/msg.h99
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/name_table.c2
-rw-r--r--net/tipc/net.c8
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/port.c27
-rw-r--r--net/tipc/port.h2
-rw-r--r--net/tipc/socket.c26
-rw-r--r--net/tipc/subscr.c15
-rw-r--r--net/unix/af_unix.c25
-rw-r--r--net/unix/garbage.c13
-rw-r--r--net/wimax/op-reset.c2
-rw-r--r--net/wimax/op-rfkill.c2
-rw-r--r--net/wimax/op-state-get.c2
-rw-r--r--net/wimax/stack.c4
-rw-r--r--net/wireless/chan.c56
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/core.h27
-rw-r--r--net/wireless/ibss.c5
-rw-r--r--net/wireless/mlme.c52
-rw-r--r--net/wireless/nl80211.c334
-rw-r--r--net/wireless/nl80211.h6
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/scan.c4
-rw-r--r--net/wireless/sme.c36
-rw-r--r--net/wireless/util.c24
-rw-r--r--net/wireless/wext-compat.c15
-rw-r--r--net/wireless/wext-core.c134
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/x25/af_x25.c42
-rw-r--r--net/x25/x25_dev.c36
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/x25/x25_out.c5
-rw-r--r--net/xfrm/xfrm_hash.h9
-rw-r--r--net/xfrm/xfrm_policy.c848
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c22
-rw-r--r--samples/tracepoints/tp-samples-trace.h4
-rw-r--r--samples/tracepoints/tracepoint-probe-sample.c13
-rw-r--r--samples/tracepoints/tracepoint-probe-sample2.c7
-rw-r--r--scripts/Makefile.lib4
-rwxr-xr-xscripts/checkpatch.pl31
-rw-r--r--scripts/gen_initramfs_list.sh1
-rwxr-xr-xscripts/get_maintainer.pl66
-rw-r--r--scripts/kconfig/streamline_config.pl9
-rw-r--r--scripts/kconfig/util.c2
-rw-r--r--scripts/mod/file2alias.c57
-rwxr-xr-xscripts/package/mkspec2
-rw-r--r--scripts/selinux/genheaders/genheaders.c2
-rw-r--r--security/capability.c76
-rw-r--r--security/commoncap.c6
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/integrity/ima/Kconfig5
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_audit.c2
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/integrity/ima/ima_fs.c38
-rw-r--r--security/integrity/ima/ima_iint.c10
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_main.c2
-rw-r--r--security/integrity/ima/ima_policy.c107
-rw-r--r--security/integrity/ima/ima_queue.c4
-rw-r--r--security/keys/internal.h12
-rw-r--r--security/keys/key.c47
-rw-r--r--security/keys/keyctl.c67
-rw-r--r--security/keys/keyring.c273
-rw-r--r--security/keys/permission.c2
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/keys/process_keys.c9
-rw-r--r--security/keys/request_key.c87
-rw-r--r--security/lsm_audit.c2
-rw-r--r--security/min_addr.c2
-rw-r--r--security/security.c75
-rw-r--r--security/selinux/avc.c3
-rw-r--r--security/selinux/hooks.c121
-rw-r--r--security/selinux/include/initial_sid_to_string.h2
-rw-r--r--security/selinux/include/netlabel.h8
-rw-r--r--security/selinux/include/objsec.h1
-rw-r--r--security/selinux/netlabel.c14
-rw-r--r--security/selinux/netlink.c1
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--security/selinux/selinuxfs.c44
-rw-r--r--security/selinux/ss/mls.c2
-rw-r--r--security/selinux/ss/policydb.c7
-rw-r--r--security/selinux/ss/services.c49
-rw-r--r--security/smack/smack_lsm.c11
-rw-r--r--security/tomoyo/Makefile2
-rw-r--r--security/tomoyo/common.c157
-rw-r--r--security/tomoyo/common.h121
-rw-r--r--security/tomoyo/domain.c143
-rw-r--r--security/tomoyo/file.c250
-rw-r--r--security/tomoyo/gc.c55
-rw-r--r--security/tomoyo/path_group.c172
-rw-r--r--security/tomoyo/realpath.c30
-rw-r--r--sound/aoa/core/gpio-pmf.c9
-rw-r--r--sound/aoa/fabrics/layout.c2
-rw-r--r--sound/aoa/soundbus/core.c8
-rw-r--r--sound/aoa/soundbus/i2sbus/control.c2
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c8
-rw-r--r--sound/aoa/soundbus/sysfs.c4
-rw-r--r--sound/atmel/Kconfig2
-rw-r--r--sound/atmel/ac97c.c355
-rw-r--r--sound/core/control.c5
-rw-r--r--sound/core/info.c74
-rw-r--r--sound/core/jack.c71
-rw-r--r--sound/core/oss/mixer_oss.c5
-rw-r--r--sound/core/oss/pcm_oss.c5
-rw-r--r--sound/core/pcm.c3
-rw-r--r--sound/core/pcm_lib.c13
-rw-r--r--sound/core/pcm_native.c109
-rw-r--r--sound/core/rawmidi.c5
-rw-r--r--sound/core/seq/seq_clientmgr.c6
-rw-r--r--sound/core/sound.c73
-rw-r--r--sound/core/timer.c6
-rw-r--r--sound/drivers/opl4/opl4_proc.c83
-rw-r--r--sound/drivers/pcsp/pcsp.h2
-rw-r--r--sound/drivers/pcsp/pcsp_input.c4
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c12
-rw-r--r--sound/i2c/i2c.c18
-rw-r--r--sound/isa/Kconfig16
-rw-r--r--sound/isa/es1688/es1688.c225
-rw-r--r--sound/isa/es1688/es1688_lib.c47
-rw-r--r--sound/isa/gus/gus_mem_proc.c48
-rw-r--r--sound/isa/gus/gusextreme.c26
-rw-r--r--sound/isa/sb/Makefile2
-rw-r--r--sound/isa/sb/es968.c248
-rw-r--r--sound/mips/au1x00.c1
-rw-r--r--sound/oss/dmasound/dmasound_atari.c5
-rw-r--r--sound/oss/dmasound/dmasound_paula.c51
-rw-r--r--sound/pci/Kconfig32
-rw-r--r--sound/pci/Makefile1
-rw-r--r--sound/pci/asihpi/Makefile5
-rw-r--r--sound/pci/asihpi/asihpi.c3002
-rw-r--r--sound/pci/asihpi/hpi.h2007
-rw-r--r--sound/pci/asihpi/hpi6000.c1838
-rw-r--r--sound/pci/asihpi/hpi6000.h70
-rw-r--r--sound/pci/asihpi/hpi6205.c2326
-rw-r--r--sound/pci/asihpi/hpi6205.h93
-rw-r--r--sound/pci/asihpi/hpi_internal.h1646
-rw-r--r--sound/pci/asihpi/hpicmn.c631
-rw-r--r--sound/pci/asihpi/hpicmn.h64
-rw-r--r--sound/pci/asihpi/hpidebug.c225
-rw-r--r--sound/pci/asihpi/hpidebug.h385
-rw-r--r--sound/pci/asihpi/hpidspcd.c172
-rw-r--r--sound/pci/asihpi/hpidspcd.h104
-rw-r--r--sound/pci/asihpi/hpifunc.c3877
-rw-r--r--sound/pci/asihpi/hpimsginit.c130
-rw-r--r--sound/pci/asihpi/hpimsginit.h40
-rw-r--r--sound/pci/asihpi/hpimsgx.c907
-rw-r--r--sound/pci/asihpi/hpimsgx.h36
-rw-r--r--sound/pci/asihpi/hpioctl.c484
-rw-r--r--sound/pci/asihpi/hpioctl.h38
-rw-r--r--sound/pci/asihpi/hpios.c91
-rw-r--r--sound/pci/asihpi/hpios.h169
-rw-r--r--sound/pci/asihpi/hpipcida.h37
-rw-r--r--sound/pci/aw2/aw2-alsa.c11
-rw-r--r--sound/pci/cs4281.c40
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c19
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c2
-rw-r--r--sound/pci/emu10k1/emufx.c36
-rw-r--r--sound/pci/emu10k1/emuproc.c51
-rw-r--r--sound/pci/es1968.c129
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/hda_codec.c76
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/hda_intel.c120
-rw-r--r--sound/pci/hda/hda_local.h2
-rw-r--r--sound/pci/hda/patch_analog.c21
-rw-r--r--sound/pci/hda/patch_conexant.c167
-rw-r--r--sound/pci/hda/patch_hdmi.c10
-rw-r--r--sound/pci/hda/patch_intelhdmi.c21
-rw-r--r--sound/pci/hda/patch_realtek.c323
-rw-r--r--sound/pci/hda/patch_sigmatel.c23
-rw-r--r--sound/pci/ice1712/aureon.c89
-rw-r--r--sound/pci/ice1712/maya44.c6
-rw-r--r--sound/pci/maestro3.c139
-rw-r--r--sound/pci/mixart/mixart.c79
-rw-r--r--sound/pci/oxygen/xonar_cs43xx.c3
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c12
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.h1
-rw-r--r--sound/pcmcia/vx/vxpocket.c10
-rw-r--r--sound/pcmcia/vx/vxpocket.h1
-rw-r--r--sound/ppc/tumbler.c12
-rw-r--r--sound/soc/atmel/atmel-pcm.c14
-rw-r--r--sound/soc/blackfin/Kconfig9
-rw-r--r--sound/soc/blackfin/Makefile4
-rw-r--r--sound/soc/blackfin/bf5xx-ad193x.c (renamed from sound/soc/blackfin/bf5xx-ad1938.c)66
-rw-r--r--sound/soc/blackfin/bf5xx-sport.h28
-rw-r--r--sound/soc/codecs/Kconfig16
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ad1836.c2
-rw-r--r--sound/soc/codecs/ad1938.c522
-rw-r--r--sound/soc/codecs/ad1938.h100
-rw-r--r--sound/soc/codecs/ad193x.c547
-rw-r--r--sound/soc/codecs/ad193x.h81
-rw-r--r--sound/soc/codecs/ak4104.c2
-rw-r--r--sound/soc/codecs/ak4535.c11
-rw-r--r--sound/soc/codecs/ak4642.c177
-rw-r--r--sound/soc/codecs/ak4671.c2
-rw-r--r--sound/soc/codecs/cq93vc.c299
-rw-r--r--sound/soc/codecs/cq93vc.h29
-rw-r--r--sound/soc/codecs/cs4270.c20
-rw-r--r--sound/soc/codecs/cx20442.c2
-rw-r--r--sound/soc/codecs/da7210.c157
-rw-r--r--sound/soc/codecs/ssm2602.c17
-rw-r--r--sound/soc/codecs/stac9766.c5
-rw-r--r--sound/soc/codecs/tlv320aic23.c1
-rw-r--r--sound/soc/codecs/tlv320aic26.c14
-rw-r--r--sound/soc/codecs/tlv320aic3x.c118
-rw-r--r--sound/soc/codecs/tlv320dac33.c543
-rw-r--r--sound/soc/codecs/tpa6130a2.c103
-rw-r--r--sound/soc/codecs/twl4030.c203
-rw-r--r--sound/soc/codecs/twl6040.c1246
-rw-r--r--sound/soc/codecs/twl6040.h141
-rw-r--r--sound/soc/codecs/uda134x.c29
-rw-r--r--sound/soc/codecs/uda1380.c5
-rw-r--r--sound/soc/codecs/wm8350.c107
-rw-r--r--sound/soc/codecs/wm8350.h3
-rw-r--r--sound/soc/codecs/wm8400.c34
-rw-r--r--sound/soc/codecs/wm8510.c2
-rw-r--r--sound/soc/codecs/wm8523.c10
-rw-r--r--sound/soc/codecs/wm8580.c4
-rw-r--r--sound/soc/codecs/wm8711.c8
-rw-r--r--sound/soc/codecs/wm8728.c2
-rw-r--r--sound/soc/codecs/wm8731.c66
-rw-r--r--sound/soc/codecs/wm8750.c347
-rw-r--r--sound/soc/codecs/wm8753.c8
-rw-r--r--sound/soc/codecs/wm8776.c6
-rw-r--r--sound/soc/codecs/wm8900.c10
-rw-r--r--sound/soc/codecs/wm8903.c217
-rw-r--r--sound/soc/codecs/wm8903.h221
-rw-r--r--sound/soc/codecs/wm8904.c59
-rw-r--r--sound/soc/codecs/wm8904.h97
-rw-r--r--sound/soc/codecs/wm8940.c5
-rw-r--r--sound/soc/codecs/wm8955.c16
-rw-r--r--sound/soc/codecs/wm8960.c215
-rw-r--r--sound/soc/codecs/wm8960.h11
-rw-r--r--sound/soc/codecs/wm8961.c6
-rw-r--r--sound/soc/codecs/wm8971.c12
-rw-r--r--sound/soc/codecs/wm8974.c6
-rw-r--r--sound/soc/codecs/wm8978.c12
-rw-r--r--sound/soc/codecs/wm8988.c8
-rw-r--r--sound/soc/codecs/wm8990.c26
-rw-r--r--sound/soc/codecs/wm8993.c24
-rw-r--r--sound/soc/codecs/wm8994.c264
-rw-r--r--sound/soc/codecs/wm8994.h8
-rw-r--r--sound/soc/codecs/wm9081.c18
-rw-r--r--sound/soc/codecs/wm9090.c773
-rw-r--r--sound/soc/codecs/wm9090.h715
-rw-r--r--sound/soc/codecs/wm9712.c3
-rw-r--r--sound/soc/codecs/wm9713.c16
-rw-r--r--sound/soc/codecs/wm_hubs.c18
-rw-r--r--sound/soc/davinci/Kconfig27
-rw-r--r--sound/soc/davinci/Makefile2
-rw-r--r--sound/soc/davinci/davinci-evm.c61
-rw-r--r--sound/soc/davinci/davinci-vcif.c274
-rw-r--r--sound/soc/davinci/davinci-vcif.h28
-rw-r--r--sound/soc/fsl/mpc5200_dma.c6
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c2
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c4
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c10
-rw-r--r--sound/soc/imx/Kconfig8
-rw-r--r--sound/soc/imx/Makefile3
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c7
-rw-r--r--sound/soc/imx/wm1133-ev1.c308
-rw-r--r--sound/soc/omap/Kconfig19
-rw-r--r--sound/soc/omap/Makefile4
-rw-r--r--sound/soc/omap/mcpdm.c548
-rw-r--r--sound/soc/omap/omap-mcbsp.c66
-rw-r--r--sound/soc/omap/omap3pandora.c2
-rw-r--r--sound/soc/omap/rx51.c294
-rw-r--r--sound/soc/omap/sdp4430.c233
-rw-r--r--sound/soc/omap/zoom2.c3
-rw-r--r--sound/soc/pxa/Kconfig9
-rw-r--r--sound/soc/pxa/Makefile2
-rw-r--r--sound/soc/pxa/pxa-ssp.c135
-rw-r--r--sound/soc/pxa/spitz.c43
-rw-r--r--sound/soc/pxa/z2.c246
-rw-r--r--sound/soc/s3c24xx/Kconfig12
-rw-r--r--sound/soc/s3c24xx/Makefile2
-rw-r--r--sound/soc/s3c24xx/jive_wm8750.c7
-rw-r--r--sound/soc/s3c24xx/neo1973_gta02_wm8753.c8
-rw-r--r--sound/soc/s3c24xx/regs-i2s-v2.h (renamed from arch/arm/plat-samsung/include/plat/regs-s3c2412-iis.h)51
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.c205
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.h15
-rw-r--r--sound/soc/s3c24xx/s3c2412-i2s.c77
-rw-r--r--sound/soc/s3c24xx/s3c2412-i2s.h6
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s-v4.c209
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.c69
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.h18
-rw-r--r--sound/soc/s3c24xx/smdk64xx_wm8580.c6
-rw-r--r--sound/soc/sh/Kconfig3
-rw-r--r--sound/soc/sh/fsi-ak4642.c14
-rw-r--r--sound/soc/sh/fsi.c191
-rw-r--r--sound/soc/sh/siu.h3
-rw-r--r--sound/soc/sh/siu_dai.c2
-rw-r--r--sound/soc/sh/siu_pcm.c9
-rw-r--r--sound/soc/soc-cache.c134
-rw-r--r--sound/soc/soc-core.c255
-rw-r--r--sound/soc/soc-dapm.c217
-rw-r--r--sound/soc/soc-jack.c43
-rw-r--r--sound/soc/txx9/txx9aclc.c7
-rw-r--r--sound/sparc/amd7930.c7
-rw-r--r--sound/sparc/cs4231.c13
-rw-r--r--sound/sparc/dbri.c9
-rw-r--r--sound/usb/Kconfig4
-rw-r--r--sound/usb/Makefile26
-rw-r--r--sound/usb/caiaq/control.c99
-rw-r--r--sound/usb/caiaq/device.c16
-rw-r--r--sound/usb/caiaq/device.h24
-rw-r--r--sound/usb/caiaq/input.c163
-rw-r--r--sound/usb/card.c652
-rw-r--r--sound/usb/card.h105
-rw-r--r--sound/usb/debug.h15
-rw-r--r--sound/usb/endpoint.c394
-rw-r--r--sound/usb/endpoint.h11
-rw-r--r--sound/usb/format.c432
-rw-r--r--sound/usb/format.h9
-rw-r--r--sound/usb/helper.c113
-rw-r--r--sound/usb/helper.h32
-rw-r--r--sound/usb/midi.c (renamed from sound/usb/usbmidi.c)127
-rw-r--r--sound/usb/midi.h50
-rw-r--r--sound/usb/misc/Makefile2
-rw-r--r--sound/usb/misc/ua101.c (renamed from sound/usb/ua101.c)19
-rw-r--r--sound/usb/mixer.c (renamed from sound/usb/usbmixer.c)933
-rw-r--r--sound/usb/mixer.h55
-rw-r--r--sound/usb/mixer_maps.c (renamed from sound/usb/usbmixer_maps.c)4
-rw-r--r--sound/usb/mixer_quirks.c412
-rw-r--r--sound/usb/mixer_quirks.h13
-rw-r--r--sound/usb/pcm.c958
-rw-r--r--sound/usb/pcm.h14
-rw-r--r--sound/usb/proc.c168
-rw-r--r--sound/usb/proc.h8
-rw-r--r--sound/usb/quirks-table.h (renamed from sound/usb/usbquirks.h)79
-rw-r--r--sound/usb/quirks.c595
-rw-r--r--sound/usb/quirks.h23
-rw-r--r--sound/usb/urb.c995
-rw-r--r--sound/usb/urb.h21
-rw-r--r--sound/usb/usbaudio.c4050
-rw-r--r--sound/usb/usbaudio.h94
-rw-r--r--sound/usb/usx2y/us122l.c1
-rw-r--r--sound/usb/usx2y/usbusx2y.h1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt2
-rw-r--r--tools/perf/Documentation/perf-bench.txt8
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt4
-rw-r--r--tools/perf/Documentation/perf-diff.txt2
-rw-r--r--tools/perf/Documentation/perf-inject.txt35
-rw-r--r--tools/perf/Documentation/perf-kmem.txt2
-rw-r--r--tools/perf/Documentation/perf-kvm.txt68
-rw-r--r--tools/perf/Documentation/perf-list.txt33
-rw-r--r--tools/perf/Documentation/perf-probe.txt21
-rw-r--r--tools/perf/Documentation/perf-record.txt8
-rw-r--r--tools/perf/Documentation/perf-sched.txt4
-rw-r--r--tools/perf/Documentation/perf-stat.txt7
-rw-r--r--tools/perf/Documentation/perf-test.txt22
-rw-r--r--tools/perf/Documentation/perf-trace-perl.txt6
-rw-r--r--tools/perf/Documentation/perf-trace-python.txt16
-rw-r--r--tools/perf/Documentation/perf-trace.txt2
-rw-r--r--tools/perf/Makefile388
-rw-r--r--tools/perf/arch/powerpc/Makefile4
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c88
-rw-r--r--tools/perf/arch/x86/Makefile4
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c75
-rw-r--r--tools/perf/bench/mem-memcpy.c3
-rw-r--r--tools/perf/bench/sched-messaging.c10
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-annotate.c300
-rw-r--r--tools/perf/builtin-bench.c4
-rw-r--r--tools/perf/builtin-buildid-cache.c2
-rw-r--r--tools/perf/builtin-buildid-list.c8
-rw-r--r--tools/perf/builtin-diff.c76
-rw-r--r--tools/perf/builtin-help.c8
-rw-r--r--tools/perf/builtin-inject.c228
-rw-r--r--tools/perf/builtin-kmem.c22
-rw-r--r--tools/perf/builtin-kvm.c144
-rw-r--r--tools/perf/builtin-lock.c693
-rw-r--r--tools/perf/builtin-probe.c290
-rw-r--r--tools/perf/builtin-record.c565
-rw-r--r--tools/perf/builtin-report.c240
-rw-r--r--tools/perf/builtin-sched.c41
-rw-r--r--tools/perf/builtin-stat.c179
-rw-r--r--tools/perf/builtin-test.c281
-rw-r--r--tools/perf/builtin-timechart.c119
-rw-r--r--tools/perf/builtin-top.c298
-rw-r--r--tools/perf/builtin-trace.c97
-rw-r--r--tools/perf/builtin.h3
-rw-r--r--tools/perf/command-list.txt3
-rw-r--r--tools/perf/perf-archive.sh3
-rw-r--r--tools/perf/perf.c29
-rw-r--r--tools/perf/perf.h9
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm6
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report8
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record3
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-report8
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-report2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-report23
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-report2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-report2
-rw-r--r--tools/perf/scripts/perl/failed-syscalls.pl6
-rw-r--r--tools/perf/scripts/perl/rw-by-pid.pl60
-rw-r--r--tools/perf/scripts/perl/rwtop.pl199
-rw-r--r--tools/perf/scripts/perl/wakeup-latency.pl12
-rw-r--r--tools/perf/scripts/perl/workqueue-stats.pl12
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py3
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report8
-rw-r--r--tools/perf/scripts/python/bin/sctop-record2
-rw-r--r--tools/perf/scripts/python/bin/sctop-report24
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report8
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report8
-rw-r--r--tools/perf/scripts/python/sctop.py78
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN6
-rw-r--r--tools/perf/util/abspath.c81
-rw-r--r--tools/perf/util/bitmap.c21
-rw-r--r--tools/perf/util/build-id.c24
-rw-r--r--tools/perf/util/build-id.h2
-rw-r--r--tools/perf/util/cache.h67
-rw-r--r--tools/perf/util/callchain.c122
-rw-r--r--tools/perf/util/callchain.h11
-rw-r--r--tools/perf/util/color.c48
-rw-r--r--tools/perf/util/color.h4
-rw-r--r--tools/perf/util/config.c461
-rw-r--r--tools/perf/util/debug.c8
-rw-r--r--tools/perf/util/debug.h30
-rw-r--r--tools/perf/util/event.c368
-rw-r--r--tools/perf/util/event.h66
-rw-r--r--tools/perf/util/exec_cmd.c6
-rw-r--r--tools/perf/util/exec_cmd.h1
-rw-r--r--tools/perf/util/header.c509
-rw-r--r--tools/perf/util/header.h39
-rw-r--r--tools/perf/util/help.c30
-rw-r--r--tools/perf/util/hist.c637
-rw-r--r--tools/perf/util/hist.h122
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/bitops.h18
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/include/dwarf-regs.h8
-rw-r--r--tools/perf/util/include/linux/bitmap.h38
-rw-r--r--tools/perf/util/include/linux/bitops.h20
-rw-r--r--tools/perf/util/include/linux/compiler.h2
-rw-r--r--tools/perf/util/include/linux/kernel.h11
-rw-r--r--tools/perf/util/map.c409
-rw-r--r--tools/perf/util/map.h131
-rw-r--r--tools/perf/util/newt.c1167
-rw-r--r--tools/perf/util/parse-events.c44
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/parse-options.c55
-rw-r--r--tools/perf/util/parse-options.h29
-rw-r--r--tools/perf/util/path.c204
-rw-r--r--tools/perf/util/probe-event.c1580
-rw-r--r--tools/perf/util/probe-event.h130
-rw-r--r--tools/perf/util/probe-finder.c1059
-rw-r--r--tools/perf/util/probe-finder.h70
-rw-r--r--tools/perf/util/pstack.c75
-rw-r--r--tools/perf/util/pstack.h12
-rw-r--r--tools/perf/util/quote.c433
-rw-r--r--tools/perf/util/quote.h39
-rw-r--r--tools/perf/util/run-command.c90
-rw-r--r--tools/perf/util/run-command.h30
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c4
-rw-r--r--tools/perf/util/session.c570
-rw-r--r--tools/perf/util/session.h122
-rw-r--r--tools/perf/util/sigchain.c2
-rw-r--r--tools/perf/util/sigchain.h1
-rw-r--r--tools/perf/util/sort.c153
-rw-r--r--tools/perf/util/sort.h35
-rw-r--r--tools/perf/util/strbuf.c229
-rw-r--r--tools/perf/util/strbuf.h45
-rw-r--r--tools/perf/util/string.c45
-rw-r--r--tools/perf/util/string.h18
-rw-r--r--tools/perf/util/symbol.c608
-rw-r--r--tools/perf/util/symbol.h81
-rw-r--r--tools/perf/util/thread.c242
-rw-r--r--tools/perf/util/thread.h53
-rw-r--r--tools/perf/util/trace-event-info.c35
-rw-r--r--tools/perf/util/trace-event-parse.c120
-rw-r--r--tools/perf/util/trace-event-read.c123
-rw-r--r--tools/perf/util/trace-event.h15
-rw-r--r--tools/perf/util/util.c22
-rw-r--r--tools/perf/util/util.h187
-rw-r--r--tools/perf/util/wrapper.c110
-rw-r--r--tools/usb/ffs-test.c554
-rw-r--r--tools/usb/testusb.c547
-rw-r--r--usr/Makefile5
-rw-r--r--usr/initramfs_data.lzo.S29
-rw-r--r--virt/kvm/assigned-dev.c8
-rw-r--r--virt/kvm/coalesced_mmio.c6
-rw-r--r--virt/kvm/ioapic.c30
-rw-r--r--virt/kvm/ioapic.h2
-rw-r--r--virt/kvm/iommu.c4
-rw-r--r--virt/kvm/kvm_main.c63
7908 files changed, 581696 insertions, 242050 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 06b982affe76..dd10b51b4e65 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -250,6 +250,8 @@ numastat.txt
- info on how to read Numa policy hit/miss statistics in sysfs.
oops-tracing.txt
- how to decode those nasty internal kernel error dump messages.
+padata.txt
+ - An introduction to the "padata" parallel execution API
parisc/
- directory with info on using Linux on PA-RISC architecture.
parport.txt
diff --git a/Documentation/ABI/obsolete/sysfs-bus-usb b/Documentation/ABI/obsolete/sysfs-bus-usb
new file mode 100644
index 000000000000..bd096d33fbc7
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-bus-usb
@@ -0,0 +1,31 @@
+What: /sys/bus/usb/devices/.../power/level
+Date: March 2007
+KernelVersion: 2.6.21
+Contact: Alan Stern <stern@rowland.harvard.edu>
+Description:
+ Each USB device directory will contain a file named
+ power/level. This file holds a power-level setting for
+ the device, either "on" or "auto".
+
+ "on" means that the device is not allowed to autosuspend,
+ although normal suspends for system sleep will still
+ be honored. "auto" means the device will autosuspend
+ and autoresume in the usual manner, according to the
+ capabilities of its driver.
+
+ During normal use, devices should be left in the "auto"
+ level. The "on" level is meant for administrative uses.
+ If you want to suspend a device immediately but leave it
+ free to wake up in response to I/O requests, you should
+ write "0" to power/autosuspend.
+
+ Device not capable of proper suspend and resume should be
+ left in the "on" level. Although the USB spec requires
+ devices to support suspend/resume, many of them do not.
+ In fact so many don't that by default, the USB core
+ initializes all non-hub devices in the "on" level. Some
+ drivers may change this setting when they are bound.
+
+ This file is deprecated and will be removed after 2010.
+ Use the power/control file instead; it does exactly the
+ same thing.
diff --git a/Documentation/ABI/obsolete/sysfs-class-rfkill b/Documentation/ABI/obsolete/sysfs-class-rfkill
new file mode 100644
index 000000000000..4201d5b05515
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-class-rfkill
@@ -0,0 +1,29 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+What: /sys/class/rfkill/rfkill[0-9]+/state
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Current state of the transmitter.
+ This file is deprecated and sheduled to be removed in 2014,
+ because its not possible to express the 'soft and hard block'
+ state of the rfkill driver.
+Values: A numeric value.
+ 0: RFKILL_STATE_SOFT_BLOCKED
+ transmitter is turned off by software
+ 1: RFKILL_STATE_UNBLOCKED
+ transmitter is (potentially) active
+ 2: RFKILL_STATE_HARD_BLOCKED
+ transmitter is forced off by something outside of
+ the driver's control.
+
+What: /sys/class/rfkill/rfkill[0-9]+/claim
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: This file is deprecated because there no longer is a way to
+ claim just control over a single rfkill instance.
+ This file is scheduled to be removed in 2012.
+Values: 0: Kernel handles events
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
new file mode 100644
index 000000000000..097f522c33bb
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -0,0 +1,67 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+For the deprecated /sys/class/rfkill/*/state and
+/sys/class/rfkill/*/claim knobs of this interface look in
+Documentation/ABI/obsolete/sysfs-class-rfkill.
+
+What: /sys/class/rfkill
+Date: 09-Jul-2007
+KernelVersion: v2.6.22
+Contact: linux-wireless@vger.kernel.org,
+Description: The rfkill class subsystem folder.
+ Each registered rfkill driver is represented by an rfkillX
+ subfolder (X being an integer > 0).
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/name
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Name assigned by driver to this key (interface or driver name).
+Values: arbitrary string.
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/type
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Driver type string ("wlan", "bluetooth", etc).
+Values: See include/linux/rfkill.h.
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/persistent
+Date: 09-Jul-2007
+KernelVersion v2.6.22
+Contact: linux-wireless@vger.kernel.org
+Description: Whether the soft blocked state is initialised from non-volatile
+ storage at startup.
+Values: A numeric value.
+ 0: false
+ 1: true
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/hard
+Date: 12-March-2010
+KernelVersion v2.6.34
+Contact: linux-wireless@vger.kernel.org
+Description: Current hardblock state. This file is read only.
+Values: A numeric value.
+ 0: inactive
+ The transmitter is (potentially) active.
+ 1: active
+ The transmitter is forced off by something outside of
+ the driver's control.
+
+
+What: /sys/class/rfkill/rfkill[0-9]+/soft
+Date: 12-March-2010
+KernelVersion v2.6.34
+Contact: linux-wireless@vger.kernel.org
+Description: Current softblock state. This file is read and write.
+Values: A numeric value.
+ 0: inactive
+ The transmitter is (potentially) active.
+ 1: active
+ The transmitter is turned off by software.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 25be3250f7d6..428676cfa61e 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -133,6 +133,46 @@ Description:
The symbolic link points to the PCI device sysfs entry of the
Physical Function this device associates with.
+
+What: /sys/bus/pci/slots/...
+Date: April 2005 (possibly older)
+KernelVersion: 2.6.12 (possibly older)
+Contact: linux-pci@vger.kernel.org
+Description:
+ When the appropriate driver is loaded, it will create a
+ directory per claimed physical PCI slot in
+ /sys/bus/pci/slots/. The names of these directories are
+ specific to the driver, which in turn, are specific to the
+ platform, but in general, should match the label on the
+ machine's physical chassis.
+
+ The drivers that can create slot directories include the
+ PCI hotplug drivers, and as of 2.6.27, the pci_slot driver.
+
+ The slot directories contain, at a minimum, a file named
+ 'address' which contains the PCI bus:device:function tuple.
+ Other files may appear as well, but are specific to the
+ driver.
+
+What: /sys/bus/pci/slots/.../function[0-7]
+Date: March 2010
+KernelVersion: 2.6.35
+Contact: linux-pci@vger.kernel.org
+Description:
+ If PCI slot directories (as described above) are created,
+ and the physical slot is actually populated with a device,
+ symbolic links in the slot directory pointing to the
+ device's PCI functions are created as well.
+
+What: /sys/bus/pci/devices/.../slot
+Date: March 2010
+KernelVersion: 2.6.35
+Contact: linux-pci@vger.kernel.org
+Description:
+ If PCI slot directories (as described above) are created,
+ a symbolic link pointing to the slot directory will be
+ created as well.
+
What: /sys/bus/pci/slots/.../module
Date: June 2009
Contact: linux-pci@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index bcebb9eaedce..294aa864a60a 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -14,34 +14,6 @@ Description:
The autosuspend delay for newly-created devices is set to
the value of the usbcore.autosuspend module parameter.
-What: /sys/bus/usb/devices/.../power/level
-Date: March 2007
-KernelVersion: 2.6.21
-Contact: Alan Stern <stern@rowland.harvard.edu>
-Description:
- Each USB device directory will contain a file named
- power/level. This file holds a power-level setting for
- the device, either "on" or "auto".
-
- "on" means that the device is not allowed to autosuspend,
- although normal suspends for system sleep will still
- be honored. "auto" means the device will autosuspend
- and autoresume in the usual manner, according to the
- capabilities of its driver.
-
- During normal use, devices should be left in the "auto"
- level. The "on" level is meant for administrative uses.
- If you want to suspend a device immediately but leave it
- free to wake up in response to I/O requests, you should
- write "0" to power/autosuspend.
-
- Device not capable of proper suspend and resume should be
- left in the "on" level. Although the USB spec requires
- devices to support suspend/resume, many of them do not.
- In fact so many don't that by default, the USB core
- initializes all non-hub devices in the "on" level. Some
- drivers may change this setting when they are bound.
-
What: /sys/bus/usb/devices/.../power/persist
Date: May 2007
KernelVersion: 2.6.23
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
new file mode 100644
index 000000000000..78c7baca3587
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -0,0 +1,20 @@
+What: /sys/class/power/ds2760-battery.*/charge_now
+Date: May 2010
+KernelVersion: 2.6.35
+Contact: Daniel Mack <daniel@caiaq.de>
+Description:
+ This file is writeable and can be used to set the current
+ coloumb counter value inside the battery monitor chip. This
+ is needed for unavoidable corrections of aging batteries.
+ A userspace daemon can monitor the battery charging logic
+ and once the counter drops out of considerable bounds, take
+ appropriate action.
+
+What: /sys/class/power/ds2760-battery.*/charge_full
+Date: May 2010
+KernelVersion: 2.6.35
+Contact: Daniel Mack <daniel@caiaq.de>
+Description:
+ This file is writeable and can be used to set the assumed
+ battery 'full level'. As batteries age, this value has to be
+ amended over time.
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index bf1627b02a03..aba7d989208c 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -43,7 +43,7 @@ Date: September 2008
Contact: Badari Pulavarty <pbadari@us.ibm.com>
Description:
The file /sys/devices/system/memory/memoryX/state
- is read-write. When read, it's contents show the
+ is read-write. When read, its contents show the
online/offline state of the memory section. When written,
root can toggle the the online/offline state of a removable
memory section (see removable file description above)
diff --git a/Documentation/ABI/testing/sysfs-devices-node b/Documentation/ABI/testing/sysfs-devices-node
new file mode 100644
index 000000000000..453a210c3ceb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-node
@@ -0,0 +1,7 @@
+What: /sys/devices/system/node/nodeX/compact
+Date: February 2010
+Contact: Mel Gorman <mel@csn.ul.ie>
+Description:
+ When this file is written to, all memory within that node
+ will be compacted. When it completes, memory will be freed
+ into blocks which have as many contiguous pages as possible
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget b/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
new file mode 100644
index 000000000000..34034027b13c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-_UDC_-gadget
@@ -0,0 +1,9 @@
+What: /sys/devices/platform/_UDC_/gadget/suspended
+Date: April 2010
+Contact: Fabien Chouteau <fabien.chouteau@barco.com>
+Description:
+ Show the suspend state of an USB composite gadget.
+ 1 -> suspended
+ 0 -> resumed
+
+ (_UDC_ is the name of the USB Device Controller driver)
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-picolcd b/Documentation/ABI/testing/sysfs-driver-hid-picolcd
new file mode 100644
index 000000000000..08579e7e1e89
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-picolcd
@@ -0,0 +1,43 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/operation_mode
+Date: March 2010
+Contact: Bruno Prémont <bonbons@linux-vserver.org>
+Description: Make it possible to switch the PicoLCD device between LCD
+ (firmware) and bootloader (flasher) operation modes.
+
+ Reading: returns list of available modes, the active mode being
+ enclosed in brackets ('[' and ']')
+
+ Writing: causes operation mode switch. Permitted values are
+ the non-active mode names listed when read.
+
+ Note: when switching mode the current PicoLCD HID device gets
+ disconnected and reconnects after above delay (see attribute
+ operation_mode_delay for its value).
+
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/operation_mode_delay
+Date: April 2010
+Contact: Bruno Prémont <bonbons@linux-vserver.org>
+Description: Delay PicoLCD waits before restarting in new mode when
+ operation_mode has changed.
+
+ Reading/Writing: It is expressed in ms and permitted range is
+ 0..30000ms.
+
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/fb_update_rate
+Date: March 2010
+Contact: Bruno Prémont <bonbons@linux-vserver.org>
+Description: Make it possible to adjust defio refresh rate.
+
+ Reading: returns list of available refresh rates (expressed in Hz),
+ the active refresh rate being enclosed in brackets ('[' and ']')
+
+ Writing: accepts new refresh rate expressed in integer Hz
+ within permitted rates.
+
+ Note: As device can barely do 2 complete refreshes a second
+ it only makes sense to adjust this value if only one or two
+ tiles get changed and it's not appropriate to expect the application
+ to flush it's tiny changes explicitely at higher than default rate.
+
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-prodikeys b/Documentation/ABI/testing/sysfs-driver-hid-prodikeys
new file mode 100644
index 000000000000..05d988c29a83
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-prodikeys
@@ -0,0 +1,29 @@
+What: /sys/bus/hid/drivers/prodikeys/.../channel
+Date: April 2010
+KernelVersion: 2.6.34
+Contact: Don Prince <dhprince.devel@yahoo.co.uk>
+Description:
+ Allows control (via software) the midi channel to which
+ that the pc-midi keyboard will output.midi data.
+ Range: 0..15
+ Type: Read/write
+What: /sys/bus/hid/drivers/prodikeys/.../sustain
+Date: April 2010
+KernelVersion: 2.6.34
+Contact: Don Prince <dhprince.devel@yahoo.co.uk>
+Description:
+ Allows control (via software) the sustain duration of a
+ note held by the pc-midi driver.
+ 0 means sustain mode is disabled.
+ Range: 0..5000 (milliseconds)
+ Type: Read/write
+What: /sys/bus/hid/drivers/prodikeys/.../octave
+Date: April 2010
+KernelVersion: 2.6.34
+Contact: Don Prince <dhprince.devel@yahoo.co.uk>
+Description:
+ Controls the octave shift modifier in the pc-midi driver.
+ The octave can be shifted via software up/down 2 octaves.
+ 0 means the no ocatve shift.
+ Range: -2..2 (minus 2 to plus 2)
+ Type: Read/Write
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
new file mode 100644
index 000000000000..88340a23ce91
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
@@ -0,0 +1,111 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_dpi
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: It is possible to switch the dpi setting of the mouse with the
+ press of a button.
+ When read, this file returns the raw number of the actual dpi
+ setting reported by the mouse. This number has to be further
+ processed to receive the real dpi value.
+
+ VALUE DPI
+ 1 800
+ 2 1200
+ 3 1600
+ 4 2000
+ 5 2400
+ 6 3200
+
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_profile
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the number of the actual profile.
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/firmware_version
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the raw integer version number of the
+ firmware reported by the mouse. Using the integer value eases
+ further usage in other programs. To receive the real version
+ number the decimal point has to be shifted 2 positions to the
+ left. E.g. a returned value of 138 means 1.38
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/kone_driver_version
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the driver version.
+ The format of the string is "v<major>.<minor>.<patchlevel>".
+ This attribute is used by the userland tools to find the sysfs-
+ paths of installed kone-mice and determine the capabilites of
+ the driver. Versions of this driver for old kernels replace
+ usbhid instead of generic-usb. The way to scan for this file
+ has been chosen to provide a consistent way for all supported
+ kernel versions.
+ This file is readonly.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile holds informations like button
+ mappings, sensitivity, the colors of the 5 leds and light
+ effects.
+ When read, these files return the respective profile. The
+ returned data is 975 bytes in size.
+ When written, this file lets one write the respective profile
+ data back to the mouse. The data has to be 975 bytes long.
+ The mouse will reject invalid data, whereas the profile number
+ stored in the profile doesn't need to fit the number of the
+ store.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/settings
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns the settings stored in the mouse.
+ The size of the data is 36 bytes and holds information like the
+ startup_profile, tcu state and calibration_data.
+ When written, this file lets write settings back to the mouse.
+ The data has to be 36 bytes long. The mouse will reject invalid
+ data.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/startup_profile
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The integer value of this attribute ranges from 1 to 5.
+ When read, this attribute returns the number of the profile
+ that's active when the mouse is powered on.
+ When written, this file sets the number of the startup profile
+ and the mouse activates this profile immediately.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/tcu
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse has a "Tracking Control Unit" which lets the user
+ calibrate the laser power to fit the mousepad surface.
+ When read, this file returns the current state of the TCU,
+ where 0 means off and 1 means on.
+ Writing 0 in this file will switch the TCU off.
+ Writing 1 in this file will start the calibration which takes
+ around 6 seconds to complete and activates the TCU.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/weight
+Date: March 2010
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can be equipped with one of four supplied weights
+ ranging from 5 to 20 grams which are recognized by the mouse
+ and its value can be read out. When read, this file returns the
+ raw value returned by the mouse which eases further processing
+ in other software.
+ The values map to the weights as follows:
+
+ VALUE WEIGHT
+ 0 none
+ 1 5g
+ 2 10g
+ 3 15g
+ 4 20g
+
+ This file is readonly.
diff --git a/Documentation/ABI/testing/sysfs-firmware-sfi b/Documentation/ABI/testing/sysfs-firmware-sfi
new file mode 100644
index 000000000000..4be7d44aeacf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-sfi
@@ -0,0 +1,15 @@
+What: /sys/firmware/sfi/tables/
+Date: May 2010
+Contact: Len Brown <lenb@kernel.org>
+Description:
+ SFI defines a number of small static memory tables
+ so the kernel can get platform information from firmware.
+
+ The tables are defined in the latest SFI specification:
+ http://simplefirmware.org/documentation
+
+ While the tables are used by the kernel, user-space
+ can observe them this way:
+
+ # cd /sys/firmware/sfi/tables
+ # cat $TABLENAME > $TABLENAME.bin
diff --git a/Documentation/ABI/testing/sysfs-wacom b/Documentation/ABI/testing/sysfs-wacom
new file mode 100644
index 000000000000..1517976e25c4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-wacom
@@ -0,0 +1,10 @@
+What: /sys/class/hidraw/hidraw*/device/speed
+Date: April 2010
+Kernel Version: 2.6.35
+Contact: linux-bluetooth@vger.kernel.org
+Description:
+ The /sys/class/hidraw/hidraw*/device/speed file controls
+ reporting speed of wacom bluetooth tablet. Reading from
+ this file returns 1 if tablet reports in high speed mode
+ or 0 otherwise. Writing to this file one of these values
+ switches reporting speed.
diff --git a/Documentation/Changes b/Documentation/Changes
index f08b313cd235..eca9f6e6fbe6 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -49,7 +49,7 @@ o oprofile 0.9 # oprofiled --version
o udev 081 # udevinfo -V
o grub 0.93 # grub --version
o mcelog 0.6
-o iptables 1.4.1 # iptables -V
+o iptables 1.4.2 # iptables -V
Kernel compilation
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 52618ab069ad..98ce51796f71 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -639,6 +639,36 @@ is planned to completely remove virt_to_bus() and bus_to_virt() as
they are entirely deprecated. Some ports already do not provide these
as it is impossible to correctly support them.
+ Handling Errors
+
+DMA address space is limited on some architectures and an allocation
+failure can be determined by:
+
+- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
+
+- checking the returned dma_addr_t of dma_map_single and dma_map_page
+ by using dma_mapping_error():
+
+ dma_addr_t dma_handle;
+
+ dma_handle = dma_map_single(dev, addr, size, direction);
+ if (dma_mapping_error(dev, dma_handle)) {
+ /*
+ * reduce current DMA mapping usage,
+ * delay and try again later or
+ * reset driver.
+ */
+ }
+
+Networking drivers must call dev_kfree_skb to free the socket buffer
+and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook
+(ndo_start_xmit). This means that the socket buffer is just dropped in
+the failure case.
+
+SCSI drivers must return SCSI_MLQUEUE_HOST_BUSY if the DMA mapping
+fails in the queuecommand hook. This means that the SCSI subsystem
+passes the command to the driver again later.
+
Optimizing Unmap State Space Consumption
On many platforms, dma_unmap_{single,page}() is simply a nop.
@@ -703,46 +733,29 @@ to "Closing".
1) Struct scatterlist requirements.
- Struct scatterlist must contain, at a minimum, the following
- members:
+ Don't invent the architecture specific struct scatterlist; just use
+ <asm-generic/scatterlist.h>. You need to enable
+ CONFIG_NEED_SG_DMA_LENGTH if the architecture supports IOMMUs
+ (including software IOMMU).
- struct page *page;
- unsigned int offset;
- unsigned int length;
+2) ARCH_KMALLOC_MINALIGN
- The base address is specified by a "page+offset" pair.
+ Architectures must ensure that kmalloc'ed buffer is
+ DMA-safe. Drivers and subsystems depend on it. If an architecture
+ isn't fully DMA-coherent (i.e. hardware doesn't ensure that data in
+ the CPU cache is identical to data in main memory),
+ ARCH_KMALLOC_MINALIGN must be set so that the memory allocator
+ makes sure that kmalloc'ed buffer doesn't share a cache line with
+ the others. See arch/arm/include/asm/cache.h as an example.
- Previous versions of struct scatterlist contained a "void *address"
- field that was sometimes used instead of page+offset. As of Linux
- 2.5., page+offset is always used, and the "address" field has been
- deleted.
-
-2) More to come...
-
- Handling Errors
-
-DMA address space is limited on some architectures and an allocation
-failure can be determined by:
-
-- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
-
-- checking the returned dma_addr_t of dma_map_single and dma_map_page
- by using dma_mapping_error():
-
- dma_addr_t dma_handle;
-
- dma_handle = dma_map_single(dev, addr, size, direction);
- if (dma_mapping_error(dev, dma_handle)) {
- /*
- * reduce current DMA mapping usage,
- * delay and try again later or
- * reset driver.
- */
- }
+ Note that ARCH_KMALLOC_MINALIGN is about DMA memory alignment
+ constraints. You don't need to worry about the architecture data
+ alignment constraints (e.g. the alignment constraints about 64-bit
+ objects).
Closing
-This document, and the API itself, would not be in it's current
+This document, and the API itself, would not be in its current
form without the feedback and suggestions from numerous individuals.
We would like to specifically mention, in no particular order, the
following people:
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 325cfd1d6d99..c7e5dc7e8cb3 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
mac80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
- tracepoint.xml media.xml
+ tracepoint.xml media.xml drm.xml
###
# The build process is as follows (targets):
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
new file mode 100644
index 000000000000..7583dc7cf64d
--- /dev/null
+++ b/Documentation/DocBook/drm.tmpl
@@ -0,0 +1,839 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="drmDevelopersGuide">
+ <bookinfo>
+ <title>Linux DRM Developer's Guide</title>
+
+ <copyright>
+ <year>2008-2009</year>
+ <holder>
+ Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
+ </holder>
+ </copyright>
+
+ <legalnotice>
+ <para>
+ The contents of this file may be used under the terms of the GNU
+ General Public License version 2 (the "GPL") as distributed in
+ the kernel source COPYING file.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <!-- Introduction -->
+
+ <chapter id="drmIntroduction">
+ <title>Introduction</title>
+ <para>
+ The Linux DRM layer contains code intended to support the needs
+ of complex graphics devices, usually containing programmable
+ pipelines well suited to 3D graphics acceleration. Graphics
+ drivers in the kernel can make use of DRM functions to make
+ tasks like memory management, interrupt handling and DMA easier,
+ and provide a uniform interface to applications.
+ </para>
+ <para>
+ A note on versions: this guide covers features found in the DRM
+ tree, including the TTM memory manager, output configuration and
+ mode setting, and the new vblank internals, in addition to all
+ the regular features found in current kernels.
+ </para>
+ <para>
+ [Insert diagram of typical DRM stack here]
+ </para>
+ </chapter>
+
+ <!-- Internals -->
+
+ <chapter id="drmInternals">
+ <title>DRM Internals</title>
+ <para>
+ This chapter documents DRM internals relevant to driver authors
+ and developers working to add support for the latest features to
+ existing drivers.
+ </para>
+ <para>
+ First, we'll go over some typical driver initialization
+ requirements, like setting up command buffers, creating an
+ initial output configuration, and initializing core services.
+ Subsequent sections will cover core internals in more detail,
+ providing implementation notes and examples.
+ </para>
+ <para>
+ The DRM layer provides several services to graphics drivers,
+ many of them driven by the application interfaces it provides
+ through libdrm, the library that wraps most of the DRM ioctls.
+ These include vblank event handling, memory
+ management, output management, framebuffer management, command
+ submission &amp; fencing, suspend/resume support, and DMA
+ services.
+ </para>
+ <para>
+ The core of every DRM driver is struct drm_device. Drivers
+ will typically statically initialize a drm_device structure,
+ then pass it to drm_init() at load time.
+ </para>
+
+ <!-- Internals: driver init -->
+
+ <sect1>
+ <title>Driver initialization</title>
+ <para>
+ Before calling the DRM initialization routines, the driver must
+ first create and fill out a struct drm_device structure.
+ </para>
+ <programlisting>
+ static struct drm_driver driver = {
+ /* don't use mtrr's here, the Xserver or user space app should
+ * deal with them for intel hardware.
+ */
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
+ .load = i915_driver_load,
+ .unload = i915_driver_unload,
+ .firstopen = i915_driver_firstopen,
+ .lastclose = i915_driver_lastclose,
+ .preclose = i915_driver_preclose,
+ .save = i915_save,
+ .restore = i915_restore,
+ .device_is_agp = i915_driver_device_is_agp,
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915_enable_vblank,
+ .disable_vblank = i915_disable_vblank,
+ .irq_preinstall = i915_driver_irq_preinstall,
+ .irq_postinstall = i915_driver_irq_postinstall,
+ .irq_uninstall = i915_driver_irq_uninstall,
+ .irq_handler = i915_driver_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .fb_probe = intelfb_probe,
+ .fb_remove = intelfb_remove,
+ .fb_resize = intelfb_resize,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = i915_debugfs_init,
+ .debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
+ .gem_vm_ops = &amp;i915_gem_vm_ops,
+ .ioctls = i915_ioctls,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = probe,
+ .remove = __devexit_p(drm_cleanup_pci),
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ };
+ </programlisting>
+ <para>
+ In the example above, taken from the i915 DRM driver, the driver
+ sets several flags indicating what core features it supports.
+ We'll go over the individual callbacks in later sections. Since
+ flags indicate which features your driver supports to the DRM
+ core, you need to set most of them prior to calling drm_init(). Some,
+ like DRIVER_MODESET can be set later based on user supplied parameters,
+ but that's the exception rather than the rule.
+ </para>
+ <variablelist>
+ <title>Driver flags</title>
+ <varlistentry>
+ <term>DRIVER_USE_AGP</term>
+ <listitem><para>
+ Driver uses AGP interface
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_REQUIRE_AGP</term>
+ <listitem><para>
+ Driver needs AGP interface to function.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_USE_MTRR</term>
+ <listitem>
+ <para>
+ Driver uses MTRR interface for mapping memory. Deprecated.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_PCI_DMA</term>
+ <listitem><para>
+ Driver is capable of PCI DMA. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_SG</term>
+ <listitem><para>
+ Driver can perform scatter/gather DMA. Deprecated.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_HAVE_DMA</term>
+ <listitem><para>Driver supports DMA. Deprecated.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+ <listitem>
+ <para>
+ DRIVER_HAVE_IRQ indicates whether the driver has a IRQ
+ handler, DRIVER_IRQ_SHARED indicates whether the device &amp;
+ handler support shared IRQs (note that this is required of
+ PCI drivers).
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_DMA_QUEUE</term>
+ <listitem>
+ <para>
+ If the driver queues DMA requests and completes them
+ asynchronously, this flag should be set. Deprecated.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_FB_DMA</term>
+ <listitem>
+ <para>
+ Driver supports DMA to/from the framebuffer. Deprecated.
+ </para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term>DRIVER_MODESET</term>
+ <listitem>
+ <para>
+ Driver supports mode setting interfaces.
+ </para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ <para>
+ In this specific case, the driver requires AGP and supports
+ IRQs. DMA, as we'll see, is handled by device specific ioctls
+ in this case. It also supports the kernel mode setting APIs, though
+ unlike in the actual i915 driver source, this example unconditionally
+ exports KMS capability.
+ </para>
+ </sect1>
+
+ <!-- Internals: driver load -->
+
+ <sect1>
+ <title>Driver load</title>
+ <para>
+ In the previous section, we saw what a typical drm_driver
+ structure might look like. One of the more important fields in
+ the structure is the hook for the load function.
+ </para>
+ <programlisting>
+ static struct drm_driver driver = {
+ ...
+ .load = i915_driver_load,
+ ...
+ };
+ </programlisting>
+ <para>
+ The load function has many responsibilities: allocating a driver
+ private structure, specifying supported performance counters,
+ configuring the device (e.g. mapping registers &amp; command
+ buffers), initializing the memory manager, and setting up the
+ initial output configuration.
+ </para>
+ <para>
+ Note that the tasks performed at driver load time must not
+ conflict with DRM client requirements. For instance, if user
+ level mode setting drivers are in use, it would be problematic
+ to perform output discovery &amp; configuration at load time.
+ Likewise, if pre-memory management aware user level drivers are
+ in use, memory management and command buffer setup may need to
+ be omitted. These requirements are driver specific, and care
+ needs to be taken to keep both old and new applications and
+ libraries working. The i915 driver supports the "modeset"
+ module parameter to control whether advanced features are
+ enabled at load time or in legacy fashion. If compatibility is
+ a concern (e.g. with drivers converted over to the new interfaces
+ from the old ones), care must be taken to prevent incompatible
+ device initialization and control with the currently active
+ userspace drivers.
+ </para>
+
+ <sect2>
+ <title>Driver private &amp; performance counters</title>
+ <para>
+ The driver private hangs off the main drm_device structure and
+ can be used for tracking various device specific bits of
+ information, like register offsets, command buffer status,
+ register state for suspend/resume, etc. At load time, a
+ driver can simply allocate one and set drm_device.dev_priv
+ appropriately; at unload the driver can free it and set
+ drm_device.dev_priv to NULL.
+ </para>
+ <para>
+ The DRM supports several counters which can be used for rough
+ performance characterization. Note that the DRM stat counter
+ system is not often used by applications, and supporting
+ additional counters is completely optional.
+ </para>
+ <para>
+ These interfaces are deprecated and should not be used. If performance
+ monitoring is desired, the developer should investigate and
+ potentially enhance the kernel perf and tracing infrastructure to export
+ GPU related performance information to performance monitoring
+ tools and applications.
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>Configuring the device</title>
+ <para>
+ Obviously, device configuration will be device specific.
+ However, there are several common operations: finding a
+ device's PCI resources, mapping them, and potentially setting
+ up an IRQ handler.
+ </para>
+ <para>
+ Finding &amp; mapping resources is fairly straightforward. The
+ DRM wrapper functions, drm_get_resource_start() and
+ drm_get_resource_len() can be used to find BARs on the given
+ drm_device struct. Once those values have been retrieved, the
+ driver load function can call drm_addmap() to create a new
+ mapping for the BAR in question. Note you'll probably want a
+ drm_local_map_t in your driver private structure to track any
+ mappings you create.
+<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
+<!-- !Finclude/drm/drmP.h drm_local_map_t -->
+ </para>
+ <para>
+ if compatibility with other operating systems isn't a concern
+ (DRM drivers can run under various BSD variants and OpenSolaris),
+ native Linux calls can be used for the above, e.g. pci_resource_*
+ and iomap*/iounmap. See the Linux device driver book for more
+ info.
+ </para>
+ <para>
+ Once you have a register map, you can use the DRM_READn() and
+ DRM_WRITEn() macros to access the registers on your device, or
+ use driver specific versions to offset into your MMIO space
+ relative to a driver specific base pointer (see I915_READ for
+ example).
+ </para>
+ <para>
+ If your device supports interrupt generation, you may want to
+ setup an interrupt handler at driver load time as well. This
+ is done using the drm_irq_install() function. If your device
+ supports vertical blank interrupts, it should call
+ drm_vblank_init() to initialize the core vblank handling code before
+ enabling interrupts on your device. This ensures the vblank related
+ structures are allocated and allows the core to handle vblank events.
+ </para>
+<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+ <para>
+ Once your interrupt handler is registered (it'll use your
+ drm_driver.irq_handler as the actual interrupt handling
+ function), you can safely enable interrupts on your device,
+ assuming any other state your interrupt handler uses is also
+ initialized.
+ </para>
+ <para>
+ Another task that may be necessary during configuration is
+ mapping the video BIOS. On many devices, the VBIOS describes
+ device configuration, LCD panel timings (if any), and contains
+ flags indicating device state. Mapping the BIOS can be done
+ using the pci_map_rom() call, a convenience function that
+ takes care of mapping the actual ROM, whether it has been
+ shadowed into memory (typically at address 0xc0000) or exists
+ on the PCI device in the ROM BAR. Note that once you've
+ mapped the ROM and extracted any necessary information, be
+ sure to unmap it; on many devices the ROM address decoder is
+ shared with other BARs, so leaving it mapped can cause
+ undesired behavior like hangs or memory corruption.
+<!--!Fdrivers/pci/rom.c pci_map_rom-->
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>Memory manager initialization</title>
+ <para>
+ In order to allocate command buffers, cursor memory, scanout
+ buffers, etc., as well as support the latest features provided
+ by packages like Mesa and the X.Org X server, your driver
+ should support a memory manager.
+ </para>
+ <para>
+ If your driver supports memory management (it should!), you'll
+ need to set that up at load time as well. How you intialize
+ it depends on which memory manager you're using, TTM or GEM.
+ </para>
+ <sect3>
+ <title>TTM initialization</title>
+ <para>
+ TTM (for Translation Table Manager) manages video memory and
+ aperture space for graphics devices. TTM supports both UMA devices
+ and devices with dedicated video RAM (VRAM), i.e. most discrete
+ graphics devices. If your device has dedicated RAM, supporting
+ TTM is desireable. TTM also integrates tightly with your
+ driver specific buffer execution function. See the radeon
+ driver for examples.
+ </para>
+ <para>
+ The core TTM structure is the ttm_bo_driver struct. It contains
+ several fields with function pointers for initializing the TTM,
+ allocating and freeing memory, waiting for command completion
+ and fence synchronization, and memory migration. See the
+ radeon_ttm.c file for an example of usage.
+ </para>
+ <para>
+ The ttm_global_reference structure is made up of several fields:
+ </para>
+ <programlisting>
+ struct ttm_global_reference {
+ enum ttm_global_types global_type;
+ size_t size;
+ void *object;
+ int (*init) (struct ttm_global_reference *);
+ void (*release) (struct ttm_global_reference *);
+ };
+ </programlisting>
+ <para>
+ There should be one global reference structure for your memory
+ manager as a whole, and there will be others for each object
+ created by the memory manager at runtime. Your global TTM should
+ have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
+ object should be sizeof(struct ttm_mem_global), and the init and
+ release hooks should point at your driver specific init and
+ release routines, which will probably eventually call
+ ttm_mem_global_init and ttm_mem_global_release respectively.
+ </para>
+ <para>
+ Once your global TTM accounting structure is set up and initialized
+ (done by calling ttm_global_item_ref on the global object you
+ just created), you'll need to create a buffer object TTM to
+ provide a pool for buffer object allocation by clients and the
+ kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
+ and its size should be sizeof(struct ttm_bo_global). Again,
+ driver specific init and release functions can be provided,
+ likely eventually calling ttm_bo_global_init and
+ ttm_bo_global_release, respectively. Also like the previous
+ object, ttm_global_item_ref is used to create an initial reference
+ count for the TTM, which will call your initalization function.
+ </para>
+ </sect3>
+ <sect3>
+ <title>GEM initialization</title>
+ <para>
+ GEM is an alternative to TTM, designed specifically for UMA
+ devices. It has simpler initialization and execution requirements
+ than TTM, but has no VRAM management capability. Core GEM
+ initialization is comprised of a basic drm_mm_init call to create
+ a GTT DRM MM object, which provides an address space pool for
+ object allocation. In a KMS configuration, the driver will
+ need to allocate and initialize a command ring buffer following
+ basic GEM initialization. Most UMA devices have a so-called
+ "stolen" memory region, which provides space for the initial
+ framebuffer and large, contiguous memory regions required by the
+ device. This space is not typically managed by GEM, and must
+ be initialized separately into its own DRM MM object.
+ </para>
+ <para>
+ Initialization will be driver specific, and will depend on
+ the architecture of the device. In the case of Intel
+ integrated graphics chips like 965GM, GEM initialization can
+ be done by calling the internal GEM init function,
+ i915_gem_do_init(). Since the 965GM is a UMA device
+ (i.e. it doesn't have dedicated VRAM), GEM will manage
+ making regular RAM available for GPU operations. Memory set
+ aside by the BIOS (called "stolen" memory by the i915
+ driver) will be managed by the DRM memrange allocator; the
+ rest of the aperture will be managed by GEM.
+ <programlisting>
+ /* Basic memrange allocator for stolen space (aka vram) */
+ drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
+ /* Let GEM Manage from end of prealloc space to end of aperture */
+ i915_gem_do_init(dev, prealloc_size, agp_size);
+ </programlisting>
+<!--!Edrivers/char/drm/drm_memrange.c-->
+ </para>
+ <para>
+ Once the memory manager has been set up, we can allocate the
+ command buffer. In the i915 case, this is also done with a
+ GEM function, i915_gem_init_ringbuffer().
+ </para>
+ </sect3>
+ </sect2>
+
+ <sect2>
+ <title>Output configuration</title>
+ <para>
+ The final initialization task is output configuration. This involves
+ finding and initializing the CRTCs, encoders and connectors
+ for your device, creating an initial configuration and
+ registering a framebuffer console driver.
+ </para>
+ <sect3>
+ <title>Output discovery and initialization</title>
+ <para>
+ Several core functions exist to create CRTCs, encoders and
+ connectors, namely drm_crtc_init(), drm_connector_init() and
+ drm_encoder_init(), along with several "helper" functions to
+ perform common tasks.
+ </para>
+ <para>
+ Connectors should be registered with sysfs once they've been
+ detected and initialized, using the
+ drm_sysfs_connector_add() function. Likewise, when they're
+ removed from the system, they should be destroyed with
+ drm_sysfs_connector_remove().
+ </para>
+ <programlisting>
+<![CDATA[
+void intel_crt_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+
+ intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, &intel_output->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+
+ /* Set up the DDC bus. */
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+ if (!intel_output->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ return;
+ }
+
+ intel_output->type = INTEL_OUTPUT_ANALOG;
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+}
+]]>
+ </programlisting>
+ <para>
+ In the example above (again, taken from the i915 driver), a
+ CRT connector and encoder combination is created. A device
+ specific i2c bus is also created, for fetching EDID data and
+ performing monitor detection. Once the process is complete,
+ the new connector is regsitered with sysfs, to make its
+ properties available to applications.
+ </para>
+ <sect4>
+ <title>Helper functions and core functions</title>
+ <para>
+ Since many PC-class graphics devices have similar display output
+ designs, the DRM provides a set of helper functions to make
+ output management easier. The core helper routines handle
+ encoder re-routing and disabling of unused functions following
+ mode set. Using the helpers is optional, but recommended for
+ devices with PC-style architectures (i.e. a set of display planes
+ for feeding pixels to encoders which are in turn routed to
+ connectors). Devices with more complex requirements needing
+ finer grained management can opt to use the core callbacks
+ directly.
+ </para>
+ <para>
+ [Insert typical diagram here.] [Insert OMAP style config here.]
+ </para>
+ </sect4>
+ <para>
+ For each encoder, CRTC and connector, several functions must
+ be provided, depending on the object type. Encoder objects
+ need should provide a DPMS (basically on/off) function, mode fixup
+ (for converting requested modes into native hardware timings),
+ and prepare, set and commit functions for use by the core DRM
+ helper functions. Connector helpers need to provide mode fetch and
+ validity functions as well as an encoder matching function for
+ returing an ideal encoder for a given connector. The core
+ connector functions include a DPMS callback, (deprecated)
+ save/restore routines, detection, mode probing, property handling,
+ and cleanup functions.
+ </para>
+<!--!Edrivers/char/drm/drm_crtc.h-->
+<!--!Edrivers/char/drm/drm_crtc.c-->
+<!--!Edrivers/char/drm/drm_crtc_helper.c-->
+ </sect3>
+ </sect2>
+ </sect1>
+
+ <!-- Internals: vblank handling -->
+
+ <sect1>
+ <title>VBlank event handling</title>
+ <para>
+ The DRM core exposes two vertical blank related ioctls:
+ DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL.
+<!--!Edrivers/char/drm/drm_irq.c-->
+ </para>
+ <para>
+ DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
+ as its argument, and is used to block or request a signal when a
+ specified vblank event occurs.
+ </para>
+ <para>
+ DRM_IOCTL_MODESET_CTL should be called by application level
+ drivers before and after mode setting, since on many devices the
+ vertical blank counter will be reset at that time. Internally,
+ the DRM snapshots the last vblank count when the ioctl is called
+ with the _DRM_PRE_MODESET command so that the counter won't go
+ backwards (which is dealt with when _DRM_POST_MODESET is used).
+ </para>
+ <para>
+ To support the functions above, the DRM core provides several
+ helper functions for tracking vertical blank counters, and
+ requires drivers to provide several callbacks:
+ get_vblank_counter(), enable_vblank() and disable_vblank(). The
+ core uses get_vblank_counter() to keep the counter accurate
+ across interrupt disable periods. It should return the current
+ vertical blank event count, which is often tracked in a device
+ register. The enable and disable vblank callbacks should enable
+ and disable vertical blank interrupts, respectively. In the
+ absence of DRM clients waiting on vblank events, the core DRM
+ code will use the disable_vblank() function to disable
+ interrupts, which saves power. They'll be re-enabled again when
+ a client calls the vblank wait ioctl above.
+ </para>
+ <para>
+ Devices that don't provide a count register can simply use an
+ internal atomic counter incremented on every vertical blank
+ interrupt, and can make their enable and disable vblank
+ functions into no-ops.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Memory management</title>
+ <para>
+ The memory manager lies at the heart of many DRM operations, and
+ is also required to support advanced client features like OpenGL
+ pbuffers. The DRM currently contains two memory managers, TTM
+ and GEM.
+ </para>
+
+ <sect2>
+ <title>The Translation Table Manager (TTM)</title>
+ <para>
+ TTM was developed by Tungsten Graphics, primarily by Thomas
+ Hellström, and is intended to be a flexible, high performance
+ graphics memory manager.
+ </para>
+ <para>
+ Drivers wishing to support TTM must fill out a drm_bo_driver
+ structure.
+ </para>
+ <para>
+ TTM design background and information belongs here.
+ </para>
+ </sect2>
+
+ <sect2>
+ <title>The Graphics Execution Manager (GEM)</title>
+ <para>
+ GEM is an Intel project, authored by Eric Anholt and Keith
+ Packard. It provides simpler interfaces than TTM, and is well
+ suited for UMA devices.
+ </para>
+ <para>
+ GEM-enabled drivers must provide gem_init_object() and
+ gem_free_object() callbacks to support the core memory
+ allocation routines. They should also provide several driver
+ specific ioctls to support command execution, pinning, buffer
+ read &amp; write, mapping, and domain ownership transfers.
+ </para>
+ <para>
+ On a fundamental level, GEM involves several operations: memory
+ allocation and freeing, command execution, and aperture management
+ at command execution time. Buffer object allocation is relatively
+ straightforward and largely provided by Linux's shmem layer, which
+ provides memory to back each object. When mapped into the GTT
+ or used in a command buffer, the backing pages for an object are
+ flushed to memory and marked write combined so as to be coherent
+ with the GPU. Likewise, when the GPU finishes rendering to an object,
+ if the CPU accesses it, it must be made coherent with the CPU's view
+ of memory, usually involving GPU cache flushing of various kinds.
+ This core CPU&lt;-&gt;GPU coherency management is provided by the GEM
+ set domain function, which evaluates an object's current domain and
+ performs any necessary flushing or synchronization to put the object
+ into the desired coherency domain (note that the object may be busy,
+ i.e. an active render target; in that case the set domain function
+ will block the client and wait for rendering to complete before
+ performing any necessary flushing operations).
+ </para>
+ <para>
+ Perhaps the most important GEM function is providing a command
+ execution interface to clients. Client programs construct command
+ buffers containing references to previously allocated memory objects
+ and submit them to GEM. At that point, GEM will take care to bind
+ all the objects into the GTT, execute the buffer, and provide
+ necessary synchronization between clients accessing the same buffers.
+ This often involves evicting some objects from the GTT and re-binding
+ others (a fairly expensive operation), and providing relocation
+ support which hides fixed GTT offsets from clients. Clients must
+ take care not to submit command buffers that reference more objects
+ than can fit in the GTT or GEM will reject them and no rendering
+ will occur. Similarly, if several objects in the buffer require
+ fence registers to be allocated for correct rendering (e.g. 2D blits
+ on pre-965 chips), care must be taken not to require more fence
+ registers than are available to the client. Such resource management
+ should be abstracted from the client in libdrm.
+ </para>
+ </sect2>
+
+ </sect1>
+
+ <!-- Output management -->
+ <sect1>
+ <title>Output management</title>
+ <para>
+ At the core of the DRM output management code is a set of
+ structures representing CRTCs, encoders and connectors.
+ </para>
+ <para>
+ A CRTC is an abstraction representing a part of the chip that
+ contains a pointer to a scanout buffer. Therefore, the number
+ of CRTCs available determines how many independent scanout
+ buffers can be active at any given time. The CRTC structure
+ contains several fields to support this: a pointer to some video
+ memory, a display mode, and an (x, y) offset into the video
+ memory to support panning or configurations where one piece of
+ video memory spans multiple CRTCs.
+ </para>
+ <para>
+ An encoder takes pixel data from a CRTC and converts it to a
+ format suitable for any attached connectors. On some devices,
+ it may be possible to have a CRTC send data to more than one
+ encoder. In that case, both encoders would receive data from
+ the same scanout buffer, resulting in a "cloned" display
+ configuration across the connectors attached to each encoder.
+ </para>
+ <para>
+ A connector is the final destination for pixel data on a device,
+ and usually connects directly to an external display device like
+ a monitor or laptop panel. A connector can only be attached to
+ one encoder at a time. The connector is also the structure
+ where information about the attached display is kept, so it
+ contains fields for display data, EDID data, DPMS &amp;
+ connection status, and information about modes supported on the
+ attached displays.
+ </para>
+<!--!Edrivers/char/drm/drm_crtc.c-->
+ </sect1>
+
+ <sect1>
+ <title>Framebuffer management</title>
+ <para>
+ In order to set a mode on a given CRTC, encoder and connector
+ configuration, clients need to provide a framebuffer object which
+ will provide a source of pixels for the CRTC to deliver to the encoder(s)
+ and ultimately the connector(s) in the configuration. A framebuffer
+ is fundamentally a driver specific memory object, made into an opaque
+ handle by the DRM addfb function. Once an fb has been created this
+ way it can be passed to the KMS mode setting routines for use in
+ a configuration.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Command submission &amp; fencing</title>
+ <para>
+ This should cover a few device specific command submission
+ implementations.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Suspend/resume</title>
+ <para>
+ The DRM core provides some suspend/resume code, but drivers
+ wanting full suspend/resume support should provide save() and
+ restore() functions. These will be called at suspend,
+ hibernate, or resume time, and should perform any state save or
+ restore required by your device across suspend or hibernate
+ states.
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>DMA services</title>
+ <para>
+ This should cover how DMA mapping etc. is supported by the core.
+ These functions are deprecated and should not be used.
+ </para>
+ </sect1>
+ </chapter>
+
+ <!-- External interfaces -->
+
+ <chapter id="drmExternals">
+ <title>Userland interfaces</title>
+ <para>
+ The DRM core exports several interfaces to applications,
+ generally intended to be used through corresponding libdrm
+ wrapper functions. In addition, drivers export device specific
+ interfaces for use by userspace drivers &amp; device aware
+ applications through ioctls and sysfs files.
+ </para>
+ <para>
+ External interfaces include: memory mapping, context management,
+ DMA operations, AGP management, vblank control, fence
+ management, memory management, and output management.
+ </para>
+ <para>
+ Cover generic ioctls and sysfs layout here. Only need high
+ level info, since man pages will cover the rest.
+ </para>
+ </chapter>
+
+ <!-- API reference -->
+
+ <appendix id="drmDriverApi">
+ <title>DRM Driver API</title>
+ <para>
+ Include auto-generated API reference here (need to reference it
+ from paragraphs above too).
+ </para>
+ </appendix>
+
+</book>
diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl
index 5cff41a5fa7c..55f12ac37acd 100644
--- a/Documentation/DocBook/kgdb.tmpl
+++ b/Documentation/DocBook/kgdb.tmpl
@@ -4,7 +4,7 @@
<book id="kgdbOnLinux">
<bookinfo>
- <title>Using kgdb and the kgdb Internals</title>
+ <title>Using kgdb, kdb and the kernel debugger internals</title>
<authorgroup>
<author>
@@ -17,33 +17,8 @@
</affiliation>
</author>
</authorgroup>
-
- <authorgroup>
- <author>
- <firstname>Tom</firstname>
- <surname>Rini</surname>
- <affiliation>
- <address>
- <email>trini@kernel.crashing.org</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <authorgroup>
- <author>
- <firstname>Amit S.</firstname>
- <surname>Kale</surname>
- <affiliation>
- <address>
- <email>amitkale@linsyssoft.com</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
<copyright>
- <year>2008</year>
+ <year>2008,2010</year>
<holder>Wind River Systems, Inc.</holder>
</copyright>
<copyright>
@@ -69,41 +44,76 @@
<chapter id="Introduction">
<title>Introduction</title>
<para>
- kgdb is a source level debugger for linux kernel. It is used along
- with gdb to debug a linux kernel. The expectation is that gdb can
- be used to "break in" to the kernel to inspect memory, variables
- and look through call stack information similar to what an
- application developer would use gdb for. It is possible to place
- breakpoints in kernel code and perform some limited execution
- stepping.
+ The kernel has two different debugger front ends (kdb and kgdb)
+ which interface to the debug core. It is possible to use either
+ of the debugger front ends and dynamically transition between them
+ if you configure the kernel properly at compile and runtime.
+ </para>
+ <para>
+ Kdb is simplistic shell-style interface which you can use on a
+ system console with a keyboard or serial console. You can use it
+ to inspect memory, registers, process lists, dmesg, and even set
+ breakpoints to stop in a certain location. Kdb is not a source
+ level debugger, although you can set breakpoints and execute some
+ basic kernel run control. Kdb is mainly aimed at doing some
+ analysis to aid in development or diagnosing kernel problems. You
+ can access some symbols by name in kernel built-ins or in kernel
+ modules if the code was built
+ with <symbol>CONFIG_KALLSYMS</symbol>.
+ </para>
+ <para>
+ Kgdb is intended to be used as a source level debugger for the
+ Linux kernel. It is used along with gdb to debug a Linux kernel.
+ The expectation is that gdb can be used to "break in" to the
+ kernel to inspect memory, variables and look through call stack
+ information similar to the way an application developer would use
+ gdb to debug an application. It is possible to place breakpoints
+ in kernel code and perform some limited execution stepping.
</para>
<para>
- Two machines are required for using kgdb. One of these machines is a
- development machine and the other is a test machine. The kernel
- to be debugged runs on the test machine. The development machine
- runs an instance of gdb against the vmlinux file which contains
- the symbols (not boot image such as bzImage, zImage, uImage...).
- In gdb the developer specifies the connection parameters and
- connects to kgdb. The type of connection a developer makes with
- gdb depends on the availability of kgdb I/O modules compiled as
- builtin's or kernel modules in the test machine's kernel.
+ Two machines are required for using kgdb. One of these machines is
+ a development machine and the other is the target machine. The
+ kernel to be debugged runs on the target machine. The development
+ machine runs an instance of gdb against the vmlinux file which
+ contains the symbols (not boot image such as bzImage, zImage,
+ uImage...). In gdb the developer specifies the connection
+ parameters and connects to kgdb. The type of connection a
+ developer makes with gdb depends on the availability of kgdb I/O
+ modules compiled as built-ins or loadable kernel modules in the test
+ machine's kernel.
</para>
</chapter>
<chapter id="CompilingAKernel">
- <title>Compiling a kernel</title>
+ <title>Compiling a kernel</title>
+ <para>
+ <itemizedlist>
+ <listitem><para>In order to enable compilation of kdb, you must first enable kgdb.</para></listitem>
+ <listitem><para>The kgdb test compile options are described in the kgdb test suite chapter.</para></listitem>
+ </itemizedlist>
+ </para>
+ <sect1 id="CompileKGDB">
+ <title>Kernel config options for kgdb</title>
<para>
To enable <symbol>CONFIG_KGDB</symbol> you should first turn on
"Prompt for development and/or incomplete code/drivers"
(CONFIG_EXPERIMENTAL) in "General setup", then under the
- "Kernel debugging" select "KGDB: kernel debugging with remote gdb".
+ "Kernel debugging" select "KGDB: kernel debugger".
+ </para>
+ <para>
+ While it is not a hard requirement that you have symbols in your
+ vmlinux file, gdb tends not to be very useful without the symbolic
+ data, so you will want to turn
+ on <symbol>CONFIG_DEBUG_INFO</symbol> which is called "Compile the
+ kernel with debug info" in the config menu.
</para>
<para>
It is advised, but not required that you turn on the
- CONFIG_FRAME_POINTER kernel option. This option inserts code to
- into the compiled executable which saves the frame information in
- registers or on the stack at different points which will allow a
- debugger such as gdb to more accurately construct stack back traces
- while debugging the kernel.
+ <symbol>CONFIG_FRAME_POINTER</symbol> kernel option which is called "Compile the
+ kernel with frame pointers" in the config menu. This option
+ inserts code to into the compiled executable which saves the frame
+ information in registers or on the stack at different points which
+ allows a debugger such as gdb to more accurately construct
+ stack back traces while debugging the kernel.
</para>
<para>
If the architecture that you are using supports the kernel option
@@ -116,38 +126,160 @@
this option.
</para>
<para>
- Next you should choose one of more I/O drivers to interconnect debugging
- host and debugged target. Early boot debugging requires a KGDB
- I/O driver that supports early debugging and the driver must be
- built into the kernel directly. Kgdb I/O driver configuration
- takes place via kernel or module parameters, see following
- chapter.
+ Next you should choose one of more I/O drivers to interconnect
+ debugging host and debugged target. Early boot debugging requires
+ a KGDB I/O driver that supports early debugging and the driver
+ must be built into the kernel directly. Kgdb I/O driver
+ configuration takes place via kernel or module parameters which
+ you can learn more about in the in the section that describes the
+ parameter "kgdboc".
</para>
- <para>
- The kgdb test compile options are described in the kgdb test suite chapter.
+ <para>Here is an example set of .config symbols to enable or
+ disable for kgdb:
+ <itemizedlist>
+ <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+ <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+ <listitem><para>CONFIG_KGDB=y</para></listitem>
+ <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+ </itemizedlist>
</para>
-
+ </sect1>
+ <sect1 id="CompileKDB">
+ <title>Kernel config options for kdb</title>
+ <para>Kdb is quite a bit more complex than the simple gdbstub
+ sitting on top of the kernel's debug core. Kdb must implement a
+ shell, and also adds some helper functions in other parts of the
+ kernel, responsible for printing out interesting data such as what
+ you would see if you ran "lsmod", or "ps". In order to build kdb
+ into the kernel you follow the same steps as you would for kgdb.
+ </para>
+ <para>The main config option for kdb
+ is <symbol>CONFIG_KGDB_KDB</symbol> which is called "KGDB_KDB:
+ include kdb frontend for kgdb" in the config menu. In theory you
+ would have already also selected an I/O driver such as the
+ CONFIG_KGDB_SERIAL_CONSOLE interface if you plan on using kdb on a
+ serial port, when you were configuring kgdb.
+ </para>
+ <para>If you want to use a PS/2-style keyboard with kdb, you would
+ select CONFIG_KDB_KEYBOARD which is called "KGDB_KDB: keyboard as
+ input device" in the config menu. The CONFIG_KDB_KEYBOARD option
+ is not used for anything in the gdb interface to kgdb. The
+ CONFIG_KDB_KEYBOARD option only works with kdb.
+ </para>
+ <para>Here is an example set of .config symbols to enable/disable kdb:
+ <itemizedlist>
+ <listitem><para># CONFIG_DEBUG_RODATA is not set</para></listitem>
+ <listitem><para>CONFIG_FRAME_POINTER=y</para></listitem>
+ <listitem><para>CONFIG_KGDB=y</para></listitem>
+ <listitem><para>CONFIG_KGDB_SERIAL_CONSOLE=y</para></listitem>
+ <listitem><para>CONFIG_KGDB_KDB=y</para></listitem>
+ <listitem><para>CONFIG_KDB_KEYBOARD=y</para></listitem>
+ </itemizedlist>
+ </para>
+ </sect1>
</chapter>
- <chapter id="EnableKGDB">
- <title>Enable kgdb for debugging</title>
- <para>
- In order to use kgdb you must activate it by passing configuration
- information to one of the kgdb I/O drivers. If you do not pass any
- configuration information kgdb will not do anything at all. Kgdb
- will only actively hook up to the kernel trap hooks if a kgdb I/O
- driver is loaded and configured. If you unconfigure a kgdb I/O
- driver, kgdb will unregister all the kernel hook points.
+ <chapter id="kgdbKernelArgs">
+ <title>Kernel Debugger Boot Arguments</title>
+ <para>This section describes the various runtime kernel
+ parameters that affect the configuration of the kernel debugger.
+ The following chapter covers using kdb and kgdb as well as
+ provides some examples of the configuration parameters.</para>
+ <sect1 id="kgdboc">
+ <title>Kernel parameter: kgdboc</title>
+ <para>The kgdboc driver was originally an abbreviation meant to
+ stand for "kgdb over console". Today it is the primary mechanism
+ to configure how to communicate from gdb to kgdb as well as the
+ devices you want to use to interact with the kdb shell.
+ </para>
+ <para>For kgdb/gdb, kgdboc is designed to work with a single serial
+ port. It is intended to cover the circumstance where you want to
+ use a serial console as your primary console as well as using it to
+ perform kernel debugging. It is also possible to use kgdb on a
+ serial port which is not designated as a system console. Kgdboc
+ may be configured as a kernel built-in or a kernel loadable module.
+ You can only make use of <constant>kgdbwait</constant> and early
+ debugging if you build kgdboc into the kernel as a built-in.
</para>
+ <sect2 id="kgdbocArgs">
+ <title>kgdboc arguments</title>
+ <para>Usage: <constant>kgdboc=[kbd][[,]serial_device][,baud]</constant></para>
+ <sect3 id="kgdbocArgs1">
+ <title>Using loadable module or built-in</title>
<para>
- All drivers can be reconfigured at run time, if
- <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
- are enabled, by echo'ing a new config string to
- <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
- The driver can be unconfigured by passing an empty string. You cannot
- change the configuration while the debugger is attached. Make sure
- to detach the debugger with the <constant>detach</constant> command
- prior to trying unconfigure a kgdb I/O driver.
+ <orderedlist>
+ <listitem><para>As a kernel built-in:</para>
+ <para>Use the kernel boot argument: <constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para></listitem>
+ <listitem>
+ <para>As a kernel loadable module:</para>
+ <para>Use the command: <constant>modprobe kgdboc kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+ <para>Here are two examples of how you might formate the kgdboc
+ string. The first is for an x86 target using the first serial port.
+ The second example is for the ARM Versatile AB using the second
+ serial port.
+ <orderedlist>
+ <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+ <listitem><para><constant>kgdboc=ttyAMA1,115200</constant></para></listitem>
+ </orderedlist>
</para>
+ </listitem>
+ </orderedlist></para>
+ </sect3>
+ <sect3 id="kgdbocArgs2">
+ <title>Configure kgdboc at runtime with sysfs</title>
+ <para>At run time you can enable or disable kgdboc by echoing a
+ parameters into the sysfs. Here are two examples:</para>
+ <orderedlist>
+ <listitem><para>Enable kgdboc on ttyS0</para>
+ <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ <listitem><para>Disable kgdboc</para>
+ <para><constant>echo "" &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </orderedlist>
+ <para>NOTE: You do not need to specify the baud if you are
+ configuring the console on tty which is already configured or
+ open.</para>
+ </sect3>
+ <sect3 id="kgdbocArgs3">
+ <title>More examples</title>
+ <para>You can configure kgdboc to use the keyboard, and or a serial device
+ depending on if you are using kdb and or kgdb, in one of the
+ following scenarios.
+ <orderedlist>
+ <listitem><para>kdb and kgdb over only a serial port</para>
+ <para><constant>kgdboc=&lt;serial_device&gt;[,baud]</constant></para>
+ <para>Example: <constant>kgdboc=ttyS0,115200</constant></para>
+ </listitem>
+ <listitem><para>kdb and kgdb with keyboard and a serial port</para>
+ <para><constant>kgdboc=kbd,&lt;serial_device&gt;[,baud]</constant></para>
+ <para>Example: <constant>kgdboc=kbd,ttyS0,115200</constant></para>
+ </listitem>
+ <listitem><para>kdb with a keyboard</para>
+ <para><constant>kgdboc=kbd</constant></para>
+ </listitem>
+ </orderedlist>
+ </para>
+ </sect3>
+ <para>NOTE: Kgdboc does not support interrupting the target via the
+ gdb remote protocol. You must manually send a sysrq-g unless you
+ have a proxy that splits console output to a terminal program.
+ A console proxy has a separate TCP port for the debugger and a separate
+ TCP port for the "human" console. The proxy can take care of sending
+ the sysrq-g for you.
+ </para>
+ <para>When using kgdboc with no debugger proxy, you can end up
+ connecting the debugger at one of two entry points. If an
+ exception occurs after you have loaded kgdboc, a message should
+ print on the console stating it is waiting for the debugger. In
+ this case you disconnect your terminal program and then connect the
+ debugger in its place. If you want to interrupt the target system
+ and forcibly enter a debug session you have to issue a Sysrq
+ sequence and then type the letter <constant>g</constant>. Then
+ you disconnect the terminal session and connect gdb. Your options
+ if you don't like this are to hack gdb to send the sysrq-g for you
+ as well as on the initial connect, or to use a debugger proxy that
+ allows an unmodified gdb to do the debugging.
+ </para>
+ </sect2>
+ </sect1>
<sect1 id="kgdbwait">
<title>Kernel parameter: kgdbwait</title>
<para>
@@ -162,103 +294,204 @@
</para>
<para>
The kernel will stop and wait as early as the I/O driver and
- architecture will allow when you use this option. If you build the
- kgdb I/O driver as a kernel module kgdbwait will not do anything.
+ architecture allows when you use this option. If you build the
+ kgdb I/O driver as a loadable kernel module kgdbwait will not do
+ anything.
</para>
</sect1>
- <sect1 id="kgdboc">
- <title>Kernel parameter: kgdboc</title>
- <para>
- The kgdboc driver was originally an abbreviation meant to stand for
- "kgdb over console". Kgdboc is designed to work with a single
- serial port. It was meant to cover the circumstance
- where you wanted to use a serial console as your primary console as
- well as using it to perform kernel debugging. Of course you can
- also use kgdboc without assigning a console to the same port.
+ <sect1 id="kgdbcon">
+ <title>Kernel parameter: kgdbcon</title>
+ <para> The kgdbcon feature allows you to see printk() messages
+ inside gdb while gdb is connected to the kernel. Kdb does not make
+ use of the kgdbcon feature.
+ </para>
+ <para>Kgdb supports using the gdb serial protocol to send console
+ messages to the debugger when the debugger is connected and running.
+ There are two ways to activate this feature.
+ <orderedlist>
+ <listitem><para>Activate with the kernel command line option:</para>
+ <para><constant>kgdbcon</constant></para>
+ </listitem>
+ <listitem><para>Use sysfs before configuring an I/O driver</para>
+ <para>
+ <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
+ </para>
+ <para>
+ NOTE: If you do this after you configure the kgdb I/O driver, the
+ setting will not take effect until the next point the I/O is
+ reconfigured.
+ </para>
+ </listitem>
+ </orderedlist>
+ <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
+ active system console. An example incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
+ </para>
+ <para>It is possible to use this option with kgdboc on a tty that is not a system console.
+ </para>
</para>
- <sect2 id="UsingKgdboc">
- <title>Using kgdboc</title>
- <para>
- You can configure kgdboc via sysfs or a module or kernel boot line
- parameter depending on if you build with CONFIG_KGDBOC as a module
- or built-in.
- <orderedlist>
- <listitem><para>From the module load or build-in</para>
- <para><constant>kgdboc=&lt;tty-device&gt;,[baud]</constant></para>
+ </sect1>
+ </chapter>
+ <chapter id="usingKDB">
+ <title>Using kdb</title>
<para>
- The example here would be if your console port was typically ttyS0, you would use something like <constant>kgdboc=ttyS0,115200</constant> or on the ARM Versatile AB you would likely use <constant>kgdboc=ttyAMA0,115200</constant>
+ </para>
+ <sect1 id="quickKDBserial">
+ <title>Quick start for kdb on a serial port</title>
+ <para>This is a quick example of how to use kdb.</para>
+ <para><orderedlist>
+ <listitem><para>Boot kernel with arguments:
+ <itemizedlist>
+ <listitem><para><constant>console=ttyS0,115200 kgdboc=ttyS0,115200</constant></para></listitem>
+ </itemizedlist></para>
+ <para>OR</para>
+ <para>Configure kgdboc after the kernel booted; assuming you are using a serial port console:
+ <itemizedlist>
+ <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </itemizedlist>
</para>
</listitem>
- <listitem><para>From sysfs</para>
- <para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para>
+ <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault. There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+ <itemizedlist>
+ <listitem><para>When logged in as root or with a super user session you can run:</para>
+ <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+ <listitem><para>Example using minicom 2.2</para>
+ <para>Press: <constant>Control-a</constant></para>
+ <para>Press: <constant>f</constant></para>
+ <para>Press: <constant>g</constant></para>
</listitem>
- </orderedlist>
- </para>
- <para>
- NOTE: Kgdboc does not support interrupting the target via the
- gdb remote protocol. You must manually send a sysrq-g unless you
- have a proxy that splits console output to a terminal problem and
- has a separate port for the debugger to connect to that sends the
- sysrq-g for you.
+ <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+ <para>Press: <constant>Control-]</constant></para>
+ <para>Type in:<constant>send break</constant></para>
+ <para>Press: <constant>Enter</constant></para>
+ <para>Press: <constant>g</constant></para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem><para>From the kdb prompt you can run the "help" command to see a complete list of the commands that are available.</para>
+ <para>Some useful commands in kdb include:
+ <itemizedlist>
+ <listitem><para>lsmod -- Shows where kernel modules are loaded</para></listitem>
+ <listitem><para>ps -- Displays only the active processes</para></listitem>
+ <listitem><para>ps A -- Shows all the processes</para></listitem>
+ <listitem><para>summary -- Shows kernel version info and memory usage</para></listitem>
+ <listitem><para>bt -- Get a backtrace of the current process using dump_stack()</para></listitem>
+ <listitem><para>dmesg -- View the kernel syslog buffer</para></listitem>
+ <listitem><para>go -- Continue the system</para></listitem>
+ </itemizedlist>
</para>
- <para>When using kgdboc with no debugger proxy, you can end up
- connecting the debugger for one of two entry points. If an
- exception occurs after you have loaded kgdboc a message should print
- on the console stating it is waiting for the debugger. In case you
- disconnect your terminal program and then connect the debugger in
- its place. If you want to interrupt the target system and forcibly
- enter a debug session you have to issue a Sysrq sequence and then
- type the letter <constant>g</constant>. Then you disconnect the
- terminal session and connect gdb. Your options if you don't like
- this are to hack gdb to send the sysrq-g for you as well as on the
- initial connect, or to use a debugger proxy that allows an
- unmodified gdb to do the debugging.
+ </listitem>
+ <listitem>
+ <para>When you are done using kdb you need to consider rebooting the
+ system or using the "go" command to resuming normal kernel
+ execution. If you have paused the kernel for a lengthy period of
+ time, applications that rely on timely networking or anything to do
+ with real wall clock time could be adversely affected, so you
+ should take this into consideration when using the kernel
+ debugger.</para>
+ </listitem>
+ </orderedlist></para>
+ </sect1>
+ <sect1 id="quickKDBkeyboard">
+ <title>Quick start for kdb using a keyboard connected console</title>
+ <para>This is a quick example of how to use kdb with a keyboard.</para>
+ <para><orderedlist>
+ <listitem><para>Boot kernel with arguments:
+ <itemizedlist>
+ <listitem><para><constant>kgdboc=kbd</constant></para></listitem>
+ </itemizedlist></para>
+ <para>OR</para>
+ <para>Configure kgdboc after the kernel booted:
+ <itemizedlist>
+ <listitem><para><constant>echo kbd &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </itemizedlist>
</para>
- </sect2>
+ </listitem>
+ <listitem><para>Enter the kernel debugger manually or by waiting for an oops or fault. There are several ways you can enter the kernel debugger manually; all involve using the sysrq-g, which means you must have enabled CONFIG_MAGIC_SYSRQ=y in your kernel config.</para>
+ <itemizedlist>
+ <listitem><para>When logged in as root or with a super user session you can run:</para>
+ <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+ <listitem><para>Example using a laptop keyboard</para>
+ <para>Press and hold down: <constant>Alt</constant></para>
+ <para>Press and hold down: <constant>Fn</constant></para>
+ <para>Press and release the key with the label: <constant>SysRq</constant></para>
+ <para>Release: <constant>Fn</constant></para>
+ <para>Press and release: <constant>g</constant></para>
+ <para>Release: <constant>Alt</constant></para>
+ </listitem>
+ <listitem><para>Example using a PS/2 101-key keyboard</para>
+ <para>Press and hold down: <constant>Alt</constant></para>
+ <para>Press and release the key with the label: <constant>SysRq</constant></para>
+ <para>Press and release: <constant>g</constant></para>
+ <para>Release: <constant>Alt</constant></para>
+ </listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem>
+ <para>Now type in a kdb command such as "help", "dmesg", "bt" or "go" to continue kernel execution.</para>
+ </listitem>
+ </orderedlist></para>
</sect1>
- <sect1 id="kgdbcon">
- <title>Kernel parameter: kgdbcon</title>
- <para>
- Kgdb supports using the gdb serial protocol to send console messages
- to the debugger when the debugger is connected and running. There
- are two ways to activate this feature.
+ </chapter>
+ <chapter id="EnableKGDB">
+ <title>Using kgdb / gdb</title>
+ <para>In order to use kgdb you must activate it by passing
+ configuration information to one of the kgdb I/O drivers. If you
+ do not pass any configuration information kgdb will not do anything
+ at all. Kgdb will only actively hook up to the kernel trap hooks
+ if a kgdb I/O driver is loaded and configured. If you unconfigure
+ a kgdb I/O driver, kgdb will unregister all the kernel hook points.
+ </para>
+ <para> All kgdb I/O drivers can be reconfigured at run time, if
+ <symbol>CONFIG_SYSFS</symbol> and <symbol>CONFIG_MODULES</symbol>
+ are enabled, by echo'ing a new config string to
+ <constant>/sys/module/&lt;driver&gt;/parameter/&lt;option&gt;</constant>.
+ The driver can be unconfigured by passing an empty string. You cannot
+ change the configuration while the debugger is attached. Make sure
+ to detach the debugger with the <constant>detach</constant> command
+ prior to trying to unconfigure a kgdb I/O driver.
+ </para>
+ <sect1 id="ConnectingGDB">
+ <title>Connecting with gdb to a serial port</title>
<orderedlist>
- <listitem><para>Activate with the kernel command line option:</para>
- <para><constant>kgdbcon</constant></para>
+ <listitem><para>Configure kgdboc</para>
+ <para>Boot kernel with arguments:
+ <itemizedlist>
+ <listitem><para><constant>kgdboc=ttyS0,115200</constant></para></listitem>
+ </itemizedlist></para>
+ <para>OR</para>
+ <para>Configure kgdboc after the kernel booted:
+ <itemizedlist>
+ <listitem><para><constant>echo ttyS0 &gt; /sys/module/kgdboc/parameters/kgdboc</constant></para></listitem>
+ </itemizedlist></para>
</listitem>
- <listitem><para>Use sysfs before configuring an io driver</para>
- <para>
- <constant>echo 1 &gt; /sys/module/kgdb/parameters/kgdb_use_con</constant>
- </para>
- <para>
- NOTE: If you do this after you configure the kgdb I/O driver, the
- setting will not take effect until the next point the I/O is
- reconfigured.
- </para>
+ <listitem>
+ <para>Stop kernel execution (break into the debugger)</para>
+ <para>In order to connect to gdb via kgdboc, the kernel must
+ first be stopped. There are several ways to stop the kernel which
+ include using kgdbwait as a boot argument, via a sysrq-g, or running
+ the kernel until it takes an exception where it waits for the
+ debugger to attach.
+ <itemizedlist>
+ <listitem><para>When logged in as root or with a super user session you can run:</para>
+ <para><constant>echo g &gt; /proc/sysrq-trigger</constant></para></listitem>
+ <listitem><para>Example using minicom 2.2</para>
+ <para>Press: <constant>Control-a</constant></para>
+ <para>Press: <constant>f</constant></para>
+ <para>Press: <constant>g</constant></para>
</listitem>
- </orderedlist>
- </para>
- <para>
- IMPORTANT NOTE: Using this option with kgdb over the console
- (kgdboc) is not supported.
+ <listitem><para>When you have telneted to a terminal server that supports sending a remote break</para>
+ <para>Press: <constant>Control-]</constant></para>
+ <para>Type in:<constant>send break</constant></para>
+ <para>Press: <constant>Enter</constant></para>
+ <para>Press: <constant>g</constant></para>
+ </listitem>
+ </itemizedlist>
</para>
- </sect1>
- </chapter>
- <chapter id="ConnectingGDB">
- <title>Connecting gdb</title>
- <para>
- If you are using kgdboc, you need to have used kgdbwait as a boot
- argument, issued a sysrq-g, or the system you are going to debug
- has already taken an exception and is waiting for the debugger to
- attach before you can connect gdb.
- </para>
- <para>
- If you are not using different kgdb I/O driver other than kgdboc,
- you should be able to connect and the target will automatically
- respond.
- </para>
+ </listitem>
+ <listitem>
+ <para>Connect from from gdb</para>
<para>
- Example (using a serial port):
+ Example (using a directly connected port):
</para>
<programlisting>
% gdb ./vmlinux
@@ -266,7 +499,7 @@
(gdb) target remote /dev/ttyS0
</programlisting>
<para>
- Example (kgdb to a terminal server on tcp port 2012):
+ Example (kgdb to a terminal server on TCP port 2012):
</para>
<programlisting>
% gdb ./vmlinux
@@ -283,6 +516,83 @@
communications. You do this prior to issuing the <constant>target
remote</constant> command by typing in: <constant>set debug remote 1</constant>
</para>
+ </listitem>
+ </orderedlist>
+ <para>Remember if you continue in gdb, and need to "break in" again,
+ you need to issue an other sysrq-g. It is easy to create a simple
+ entry point by putting a breakpoint at <constant>sys_sync</constant>
+ and then you can run "sync" from a shell or script to break into the
+ debugger.</para>
+ </sect1>
+ </chapter>
+ <chapter id="switchKdbKgdb">
+ <title>kgdb and kdb interoperability</title>
+ <para>It is possible to transition between kdb and kgdb dynamically.
+ The debug core will remember which you used the last time and
+ automatically start in the same mode.</para>
+ <sect1>
+ <title>Switching between kdb and kgdb</title>
+ <sect2>
+ <title>Switching from kgdb to kdb</title>
+ <para>
+ There are two ways to switch from kgdb to kdb: you can use gdb to
+ issue a maintenance packet, or you can blindly type the command $3#33.
+ Whenever kernel debugger stops in kgdb mode it will print the
+ message <constant>KGDB or $3#33 for KDB</constant>. It is important
+ to note that you have to type the sequence correctly in one pass.
+ You cannot type a backspace or delete because kgdb will interpret
+ that as part of the debug stream.
+ <orderedlist>
+ <listitem><para>Change from kgdb to kdb by blindly typing:</para>
+ <para><constant>$3#33</constant></para></listitem>
+ <listitem><para>Change from kgdb to kdb with gdb</para>
+ <para><constant>maintenance packet 3</constant></para>
+ <para>NOTE: Now you must kill gdb. Typically you press control-z and
+ issue the command: kill -9 %</para></listitem>
+ </orderedlist>
+ </para>
+ </sect2>
+ <sect2>
+ <title>Change from kdb to kgdb</title>
+ <para>There are two ways you can change from kdb to kgdb. You can
+ manually enter kgdb mode by issuing the kgdb command from the kdb
+ shell prompt, or you can connect gdb while the kdb shell prompt is
+ active. The kdb shell looks for the typical first commands that gdb
+ would issue with the gdb remote protocol and if it sees one of those
+ commands it automatically changes into kgdb mode.</para>
+ <orderedlist>
+ <listitem><para>From kdb issue the command:</para>
+ <para><constant>kgdb</constant></para>
+ <para>Now disconnect your terminal program and connect gdb in its place</para></listitem>
+ <listitem><para>At the kdb prompt, disconnect the terminal program and connect gdb in its place.</para></listitem>
+ </orderedlist>
+ </sect2>
+ </sect1>
+ <sect1>
+ <title>Running kdb commands from gdb</title>
+ <para>It is possible to run a limited set of kdb commands from gdb,
+ using the gdb monitor command. You don't want to execute any of the
+ run control or breakpoint operations, because it can disrupt the
+ state of the kernel debugger. You should be using gdb for
+ breakpoints and run control operations if you have gdb connected.
+ The more useful commands to run are things like lsmod, dmesg, ps or
+ possibly some of the memory information commands. To see all the kdb
+ commands you can run <constant>monitor help</constant>.</para>
+ <para>Example:
+ <informalexample><programlisting>
+(gdb) monitor ps
+1 idle process (state I) and
+27 sleeping system daemon (state M) processes suppressed,
+use 'ps A' to see all.
+Task Addr Pid Parent [*] cpu State Thread Command
+
+0xc78291d0 1 0 0 0 S 0xc7829404 init
+0xc7954150 942 1 0 0 S 0xc7954384 dropbear
+0xc78789c0 944 1 0 0 S 0xc7878bf4 sh
+(gdb)
+ </programlisting></informalexample>
+ </para>
+ </sect1>
</chapter>
<chapter id="KGDBTestSuite">
<title>kgdb Test Suite</title>
@@ -309,34 +619,36 @@
</para>
</chapter>
<chapter id="CommonBackEndReq">
- <title>KGDB Internals</title>
+ <title>Kernel Debugger Internals</title>
<sect1 id="kgdbArchitecture">
<title>Architecture Specifics</title>
<para>
- Kgdb is organized into three basic components:
+ The kernel debugger is organized into a number of components:
<orderedlist>
- <listitem><para>kgdb core</para>
+ <listitem><para>The debug core</para>
<para>
- The kgdb core is found in kernel/kgdb.c. It contains:
+ The debug core is found in kernel/debugger/debug_core.c. It contains:
<itemizedlist>
- <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
- <listitem><para>A generic OS exception handler which includes sync'ing the processors into a stopped state on an multi cpu system.</para></listitem>
+ <listitem><para>A generic OS exception handler which includes
+ sync'ing the processors into a stopped state on an multi-CPU
+ system.</para></listitem>
<listitem><para>The API to talk to the kgdb I/O drivers</para></listitem>
- <listitem><para>The API to make calls to the arch specific kgdb implementation</para></listitem>
+ <listitem><para>The API to make calls to the arch-specific kgdb implementation</para></listitem>
<listitem><para>The logic to perform safe memory reads and writes to memory while using the debugger</para></listitem>
<listitem><para>A full implementation for software breakpoints unless overridden by the arch</para></listitem>
+ <listitem><para>The API to invoke either the kdb or kgdb frontend to the debug core.</para></listitem>
</itemizedlist>
</para>
</listitem>
- <listitem><para>kgdb arch specific implementation</para>
+ <listitem><para>kgdb arch-specific implementation</para>
<para>
This implementation is generally found in arch/*/kernel/kgdb.c.
As an example, arch/x86/kernel/kgdb.c contains the specifics to
implement HW breakpoint as well as the initialization to
dynamically register and unregister for the trap handlers on
- this architecture. The arch specific portion implements:
+ this architecture. The arch-specific portion implements:
<itemizedlist>
- <listitem><para>contains an arch specific trap catcher which
+ <listitem><para>contains an arch-specific trap catcher which
invokes kgdb_handle_exception() to start kgdb about doing its
work</para></listitem>
<listitem><para>translation to and from gdb specific packet format to pt_regs</para></listitem>
@@ -347,11 +659,35 @@
</itemizedlist>
</para>
</listitem>
+ <listitem><para>gdbstub frontend (aka kgdb)</para>
+ <para>The gdbstub is located in kernel/debug/gdbstub.c. It contains:</para>
+ <itemizedlist>
+ <listitem><para>All the logic to implement the gdb serial protocol</para></listitem>
+ </itemizedlist>
+ </listitem>
+ <listitem><para>kdb frontend</para>
+ <para>The kdb debugger shell is broken down into a number of
+ components. The kdb core is located in kernel/debug/kdb. There
+ are a number of helper functions in some of the other kernel
+ components to make it possible for kdb to examine and report
+ information about the kernel without taking locks that could
+ cause a kernel deadlock. The kdb core contains implements the following functionality.</para>
+ <itemizedlist>
+ <listitem><para>A simple shell</para></listitem>
+ <listitem><para>The kdb core command set</para></listitem>
+ <listitem><para>A registration API to register additional kdb shell commands.</para>
+ <para>A good example of a self-contained kdb module is the "ftdump" command for dumping the ftrace buffer. See: kernel/trace/trace_kdb.c</para></listitem>
+ <listitem><para>The implementation for kdb_printf() which
+ emits messages directly to I/O drivers, bypassing the kernel
+ log.</para></listitem>
+ <listitem><para>SW / HW breakpoint management for the kdb shell</para></listitem>
+ </itemizedlist>
+ </listitem>
<listitem><para>kgdb I/O driver</para>
<para>
- Each kgdb I/O driver has to provide an implemenation for the following:
+ Each kgdb I/O driver has to provide an implementation for the following:
<itemizedlist>
- <listitem><para>configuration via builtin or module</para></listitem>
+ <listitem><para>configuration via built-in or module</para></listitem>
<listitem><para>dynamic configuration and kgdb hook registration calls</para></listitem>
<listitem><para>read and write character interface</para></listitem>
<listitem><para>A cleanup handler for unconfiguring from the kgdb core</para></listitem>
@@ -416,15 +752,15 @@
underlying low level to the hardware driver having "polling hooks"
which the to which the tty driver is attached. In the initial
implementation of kgdboc it the serial_core was changed to expose a
- low level uart hook for doing polled mode reading and writing of a
+ low level UART hook for doing polled mode reading and writing of a
single character while in an atomic context. When kgdb makes an I/O
request to the debugger, kgdboc invokes a call back in the serial
- core which in turn uses the call back in the uart driver. It is
- certainly possible to extend kgdboc to work with non-uart based
+ core which in turn uses the call back in the UART driver. It is
+ certainly possible to extend kgdboc to work with non-UART based
consoles in the future.
</para>
<para>
- When using kgdboc with a uart, the uart driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
+ When using kgdboc with a UART, the UART driver must implement two callbacks in the <constant>struct uart_ops</constant>. Example from drivers/8250.c:<programlisting>
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = serial8250_get_poll_char,
.poll_put_char = serial8250_put_poll_char,
@@ -434,7 +770,7 @@
<constant>#ifdef CONFIG_CONSOLE_POLL</constant>, as shown above.
Keep in mind that polling hooks have to be implemented in such a way
that they can be called from an atomic context and have to restore
- the state of the uart chip on return such that the system can return
+ the state of the UART chip on return such that the system can return
to normal when the debugger detaches. You need to be very careful
with any kind of lock you consider, because failing here is most
going to mean pressing the reset button.
@@ -453,6 +789,10 @@
<itemizedlist>
<listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
</itemizedlist>
+ In Jan 2010 this document was updated to include kdb.
+ <itemizedlist>
+ <listitem><para>Jason Wessel<email>jason.wessel@windriver.com</email></para></listitem>
+ </itemizedlist>
</para>
</chapter>
</book>
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index ff3e5bec1c24..8c5411cfeaf0 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -81,16 +81,14 @@ void (*port_disable) (struct ata_port *);
</programlisting>
<para>
- Called from ata_bus_probe() and ata_bus_reset() error paths,
- as well as when unregistering from the SCSI module (rmmod, hot
- unplug).
+ Called from ata_bus_probe() error path, as well as when
+ unregistering from the SCSI module (rmmod, hot unplug).
This function should do whatever needs to be done to take the
port out of use. In most cases, ata_port_disable() can be used
as this hook.
</para>
<para>
Called from ata_bus_probe() on a failed probe.
- Called from ata_bus_reset() on a failed bus reset.
Called from ata_scsi_release().
</para>
@@ -227,6 +225,18 @@ u8 (*sff_check_altstatus)(struct ata_port *ap);
</sect2>
+ <sect2><title>Write specific ATA shadow register</title>
+ <programlisting>
+void (*sff_set_devctl)(struct ata_port *ap, u8 ctl);
+ </programlisting>
+
+ <para>
+ Write the device control ATA shadow register to the hardware.
+ Most drivers don't need to define this.
+ </para>
+
+ </sect2>
+
<sect2><title>Select ATA device on bus</title>
<programlisting>
void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
@@ -477,7 +487,7 @@ void (*host_stop) (struct ata_host_set *host_set);
allocates space for a legacy IDE PRD table and returns.
</para>
<para>
- ->port_stop() is called after ->host_stop(). It's sole function
+ ->port_stop() is called after ->host_stop(). Its sole function
is to release DMA/memory resources, now that they are no longer
actively being used. Many drivers also free driver-private
data from port at this time.
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index c725cb852c54..5d4d40f429a5 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -17,6 +17,7 @@
<!ENTITY VIDIOC-DBG-G-REGISTER "<link linkend='vidioc-dbg-g-register'><constant>VIDIOC_DBG_G_REGISTER</constant></link>">
<!ENTITY VIDIOC-DBG-S-REGISTER "<link linkend='vidioc-dbg-g-register'><constant>VIDIOC_DBG_S_REGISTER</constant></link>">
<!ENTITY VIDIOC-DQBUF "<link linkend='vidioc-qbuf'><constant>VIDIOC_DQBUF</constant></link>">
+<!ENTITY VIDIOC-DQEVENT "<link linkend='vidioc-dqevent'><constant>VIDIOC_DQEVENT</constant></link>">
<!ENTITY VIDIOC-ENCODER-CMD "<link linkend='vidioc-encoder-cmd'><constant>VIDIOC_ENCODER_CMD</constant></link>">
<!ENTITY VIDIOC-ENUMAUDIO "<link linkend='vidioc-enumaudio'><constant>VIDIOC_ENUMAUDIO</constant></link>">
<!ENTITY VIDIOC-ENUMAUDOUT "<link linkend='vidioc-enumaudioout'><constant>VIDIOC_ENUMAUDOUT</constant></link>">
@@ -60,6 +61,7 @@
<!ENTITY VIDIOC-REQBUFS "<link linkend='vidioc-reqbufs'><constant>VIDIOC_REQBUFS</constant></link>">
<!ENTITY VIDIOC-STREAMOFF "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMOFF</constant></link>">
<!ENTITY VIDIOC-STREAMON "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMON</constant></link>">
+<!ENTITY VIDIOC-SUBSCRIBE-EVENT "<link linkend='vidioc-subscribe-event'><constant>VIDIOC_SUBSCRIBE_EVENT</constant></link>">
<!ENTITY VIDIOC-S-AUDIO "<link linkend='vidioc-g-audio'><constant>VIDIOC_S_AUDIO</constant></link>">
<!ENTITY VIDIOC-S-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_S_AUDOUT</constant></link>">
<!ENTITY VIDIOC-S-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_S_CROP</constant></link>">
@@ -83,6 +85,7 @@
<!ENTITY VIDIOC-TRY-ENCODER-CMD "<link linkend='vidioc-encoder-cmd'><constant>VIDIOC_TRY_ENCODER_CMD</constant></link>">
<!ENTITY VIDIOC-TRY-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_TRY_EXT_CTRLS</constant></link>">
<!ENTITY VIDIOC-TRY-FMT "<link linkend='vidioc-g-fmt'><constant>VIDIOC_TRY_FMT</constant></link>">
+<!ENTITY VIDIOC-UNSUBSCRIBE-EVENT "<link linkend='vidioc-subscribe-event'><constant>VIDIOC_UNSUBSCRIBE_EVENT</constant></link>">
<!-- Types -->
<!ENTITY v4l2-std-id "<link linkend='v4l2-std-id'>v4l2_std_id</link>">
@@ -141,6 +144,9 @@
<!ENTITY v4l2-enc-idx "struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link>">
<!ENTITY v4l2-enc-idx-entry "struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link>">
<!ENTITY v4l2-encoder-cmd "struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link>">
+<!ENTITY v4l2-event "struct&nbsp;<link linkend='v4l2-event'>v4l2_event</link>">
+<!ENTITY v4l2-event-subscription "struct&nbsp;<link linkend='v4l2-event-subscription'>v4l2_event_subscription</link>">
+<!ENTITY v4l2-event-vsync "struct&nbsp;<link linkend='v4l2-event-vsync'>v4l2_event_vsync</link>">
<!ENTITY v4l2-ext-control "struct&nbsp;<link linkend='v4l2-ext-control'>v4l2_ext_control</link>">
<!ENTITY v4l2-ext-controls "struct&nbsp;<link linkend='v4l2-ext-controls'>v4l2_ext_controls</link>">
<!ENTITY v4l2-fmtdesc "struct&nbsp;<link linkend='v4l2-fmtdesc'>v4l2_fmtdesc</link>">
@@ -200,6 +206,7 @@
<!ENTITY sub-controls SYSTEM "v4l/controls.xml">
<!ENTITY sub-dev-capture SYSTEM "v4l/dev-capture.xml">
<!ENTITY sub-dev-codec SYSTEM "v4l/dev-codec.xml">
+<!ENTITY sub-dev-event SYSTEM "v4l/dev-event.xml">
<!ENTITY sub-dev-effect SYSTEM "v4l/dev-effect.xml">
<!ENTITY sub-dev-osd SYSTEM "v4l/dev-osd.xml">
<!ENTITY sub-dev-output SYSTEM "v4l/dev-output.xml">
@@ -292,6 +299,8 @@
<!ENTITY sub-v4l2grab-c SYSTEM "v4l/v4l2grab.c.xml">
<!ENTITY sub-videodev2-h SYSTEM "v4l/videodev2.h.xml">
<!ENTITY sub-v4l2 SYSTEM "v4l/v4l2.xml">
+<!ENTITY sub-dqevent SYSTEM "v4l/vidioc-dqevent.xml">
+<!ENTITY sub-subscribe-event SYSTEM "v4l/vidioc-subscribe-event.xml">
<!ENTITY sub-intro SYSTEM "dvb/intro.xml">
<!ENTITY sub-frontend SYSTEM "dvb/frontend.xml">
<!ENTITY sub-dvbproperty SYSTEM "dvb/dvbproperty.xml">
@@ -381,3 +390,5 @@
<!ENTITY reqbufs SYSTEM "v4l/vidioc-reqbufs.xml">
<!ENTITY s-hw-freq-seek SYSTEM "v4l/vidioc-s-hw-freq-seek.xml">
<!ENTITY streamon SYSTEM "v4l/vidioc-streamon.xml">
+<!ENTITY dqevent SYSTEM "v4l/vidioc-dqevent.xml">
+<!ENTITY subscribe_event SYSTEM "v4l/vidioc-subscribe-event.xml">
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 133cd6c3f3c1..020ac80d4682 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -269,7 +269,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
information about the device.
</para>
<programlisting>
-int __init board_init (void)
+static int __init board_init (void)
{
struct nand_chip *this;
int err = 0;
diff --git a/Documentation/DocBook/sh.tmpl b/Documentation/DocBook/sh.tmpl
index 0c3dc4c69dd1..d858d92cf6d9 100644
--- a/Documentation/DocBook/sh.tmpl
+++ b/Documentation/DocBook/sh.tmpl
@@ -19,13 +19,17 @@
</authorgroup>
<copyright>
- <year>2008</year>
+ <year>2008-2010</year>
<holder>Paul Mundt</holder>
</copyright>
<copyright>
- <year>2008</year>
+ <year>2008-2010</year>
<holder>Renesas Technology Corp.</holder>
</copyright>
+ <copyright>
+ <year>2010</year>
+ <holder>Renesas Electronics Corp.</holder>
+ </copyright>
<legalnotice>
<para>
@@ -77,7 +81,7 @@
</chapter>
<chapter id="clk">
<title>Clock Framework Extensions</title>
-!Iarch/sh/include/asm/clock.h
+!Iinclude/linux/sh_clk.h
</chapter>
<chapter id="mach">
<title>Machine Specific Interfaces</title>
diff --git a/Documentation/DocBook/v4l/compat.xml b/Documentation/DocBook/v4l/compat.xml
index b9dbdf9e6d29..b42b935913cd 100644
--- a/Documentation/DocBook/v4l/compat.xml
+++ b/Documentation/DocBook/v4l/compat.xml
@@ -2332,15 +2332,26 @@ more information.</para>
</listitem>
</orderedlist>
</section>
- </section>
+ <section>
+ <title>V4L2 in Linux 2.6.34</title>
+ <orderedlist>
+ <listitem>
+ <para>Added
+<constant>V4L2_CID_IRIS_ABSOLUTE</constant> and
+<constant>V4L2_CID_IRIS_RELATIVE</constant> controls to the
+ <link linkend="camera-controls">Camera controls class</link>.
+ </para>
+ </listitem>
+ </orderedlist>
+ </section>
- <section id="other">
- <title>Relation of V4L2 to other Linux multimedia APIs</title>
+ <section id="other">
+ <title>Relation of V4L2 to other Linux multimedia APIs</title>
- <section id="xvideo">
- <title>X Video Extension</title>
+ <section id="xvideo">
+ <title>X Video Extension</title>
- <para>The X Video Extension (abbreviated XVideo or just Xv) is
+ <para>The X Video Extension (abbreviated XVideo or just Xv) is
an extension of the X Window system, implemented for example by the
XFree86 project. Its scope is similar to V4L2, an API to video capture
and output devices for X clients. Xv allows applications to display
@@ -2351,7 +2362,7 @@ capture or output still images in XPixmaps<footnote>
extension available across many operating systems and
architectures.</para>
- <para>Because the driver is embedded into the X server Xv has a
+ <para>Because the driver is embedded into the X server Xv has a
number of advantages over the V4L2 <link linkend="overlay">video
overlay interface</link>. The driver can easily determine the overlay
target, &ie; visible graphics memory or off-screen buffers for a
@@ -2360,16 +2371,16 @@ overlay, scaling or color-keying, or the clipping functions of the
video capture hardware, always in sync with drawing operations or
windows moving or changing their stacking order.</para>
- <para>To combine the advantages of Xv and V4L a special Xv
+ <para>To combine the advantages of Xv and V4L a special Xv
driver exists in XFree86 and XOrg, just programming any overlay capable
Video4Linux device it finds. To enable it
<filename>/etc/X11/XF86Config</filename> must contain these lines:</para>
- <para><screen>
+ <para><screen>
Section "Module"
Load "v4l"
EndSection</screen></para>
- <para>As of XFree86 4.2 this driver still supports only V4L
+ <para>As of XFree86 4.2 this driver still supports only V4L
ioctls, however it should work just fine with all V4L2 devices through
the V4L2 backward-compatibility layer. Since V4L2 permits multiple
opens it is possible (if supported by the V4L2 driver) to capture
@@ -2377,83 +2388,84 @@ video while an X client requested video overlay. Restrictions of
simultaneous capturing and overlay are discussed in <xref
linkend="overlay" /> apply.</para>
- <para>Only marginally related to V4L2, XFree86 extended Xv to
+ <para>Only marginally related to V4L2, XFree86 extended Xv to
support hardware YUV to RGB conversion and scaling for faster video
playback, and added an interface to MPEG-2 decoding hardware. This API
is useful to display images captured with V4L2 devices.</para>
- </section>
+ </section>
- <section>
- <title>Digital Video</title>
+ <section>
+ <title>Digital Video</title>
- <para>V4L2 does not support digital terrestrial, cable or
+ <para>V4L2 does not support digital terrestrial, cable or
satellite broadcast. A separate project aiming at digital receivers
exists. You can find its homepage at <ulink
url="http://linuxtv.org">http://linuxtv.org</ulink>. The Linux DVB API
has no connection to the V4L2 API except that drivers for hybrid
hardware may support both.</para>
- </section>
+ </section>
- <section>
- <title>Audio Interfaces</title>
+ <section>
+ <title>Audio Interfaces</title>
- <para>[to do - OSS/ALSA]</para>
+ <para>[to do - OSS/ALSA]</para>
+ </section>
</section>
- </section>
- <section id="experimental">
- <title>Experimental API Elements</title>
+ <section id="experimental">
+ <title>Experimental API Elements</title>
- <para>The following V4L2 API elements are currently experimental
+ <para>The following V4L2 API elements are currently experimental
and may change in the future.</para>
- <itemizedlist>
- <listitem>
- <para>Video Output Overlay (OSD) Interface, <xref
+ <itemizedlist>
+ <listitem>
+ <para>Video Output Overlay (OSD) Interface, <xref
linkend="osd" />.</para>
- </listitem>
+ </listitem>
<listitem>
- <para><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY</constant>,
+ <para><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY</constant>,
&v4l2-buf-type;, <xref linkend="v4l2-buf-type" />.</para>
- </listitem>
- <listitem>
- <para><constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant>,
+ </listitem>
+ <listitem>
+ <para><constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant>,
&VIDIOC-QUERYCAP; ioctl, <xref linkend="device-capabilities" />.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-ENUM-FRAMESIZES; and
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-ENUM-FRAMESIZES; and
&VIDIOC-ENUM-FRAMEINTERVALS; ioctls.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-G-ENC-INDEX; ioctl.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-ENCODER-CMD; and &VIDIOC-TRY-ENCODER-CMD;
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-G-ENC-INDEX; ioctl.</para>
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-ENCODER-CMD; and &VIDIOC-TRY-ENCODER-CMD;
ioctls.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-DBG-G-REGISTER; and &VIDIOC-DBG-S-REGISTER;
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-DBG-G-REGISTER; and &VIDIOC-DBG-S-REGISTER;
ioctls.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-DBG-G-CHIP-IDENT; ioctl.</para>
- </listitem>
- </itemizedlist>
- </section>
+ </listitem>
+ <listitem>
+ <para>&VIDIOC-DBG-G-CHIP-IDENT; ioctl.</para>
+ </listitem>
+ </itemizedlist>
+ </section>
- <section id="obsolete">
- <title>Obsolete API Elements</title>
+ <section id="obsolete">
+ <title>Obsolete API Elements</title>
- <para>The following V4L2 API elements were superseded by new
+ <para>The following V4L2 API elements were superseded by new
interfaces and should not be implemented in new drivers.</para>
- <itemizedlist>
- <listitem>
- <para><constant>VIDIOC_G_MPEGCOMP</constant> and
+ <itemizedlist>
+ <listitem>
+ <para><constant>VIDIOC_G_MPEGCOMP</constant> and
<constant>VIDIOC_S_MPEGCOMP</constant> ioctls. Use Extended Controls,
<xref linkend="extended-controls" />.</para>
- </listitem>
- </itemizedlist>
+ </listitem>
+ </itemizedlist>
+ </section>
</section>
<!--
diff --git a/Documentation/DocBook/v4l/controls.xml b/Documentation/DocBook/v4l/controls.xml
index f46450610412..8408caaee276 100644
--- a/Documentation/DocBook/v4l/controls.xml
+++ b/Documentation/DocBook/v4l/controls.xml
@@ -267,6 +267,12 @@ minimum value disables backlight compensation.</entry>
<entry>Chroma automatic gain control.</entry>
</row>
<row>
+ <entry><constant>V4L2_CID_CHROMA_GAIN</constant></entry>
+ <entry>integer</entry>
+ <entry>Adjusts the Chroma gain control (for use when chroma AGC
+ is disabled).</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CID_COLOR_KILLER</constant></entry>
<entry>boolean</entry>
<entry>Enable the color killer (&ie; force a black &amp; white image in case of a weak video signal).</entry>
@@ -277,8 +283,15 @@ minimum value disables backlight compensation.</entry>
<entry>Selects a color effect. Possible values for
<constant>enum v4l2_colorfx</constant> are:
<constant>V4L2_COLORFX_NONE</constant> (0),
-<constant>V4L2_COLORFX_BW</constant> (1) and
-<constant>V4L2_COLORFX_SEPIA</constant> (2).</entry>
+<constant>V4L2_COLORFX_BW</constant> (1),
+<constant>V4L2_COLORFX_SEPIA</constant> (2),
+<constant>V4L2_COLORFX_NEGATIVE</constant> (3),
+<constant>V4L2_COLORFX_EMBOSS</constant> (4),
+<constant>V4L2_COLORFX_SKETCH</constant> (5),
+<constant>V4L2_COLORFX_SKY_BLUE</constant> (6),
+<constant>V4L2_COLORFX_GRASS_GREEN</constant> (7),
+<constant>V4L2_COLORFX_SKIN_WHITEN</constant> (8) and
+<constant>V4L2_COLORFX_VIVID</constant> (9).</entry>
</row>
<row>
<entry><constant>V4L2_CID_ROTATE</constant></entry>
@@ -1825,6 +1838,25 @@ wide-angle direction. The zoom speed unit is driver-specific.</entry>
<row><entry></entry></row>
<row>
+ <entry spanname="id"><constant>V4L2_CID_IRIS_ABSOLUTE</constant>&nbsp;</entry>
+ <entry>integer</entry>
+ </row><row><entry spanname="descr">This control sets the
+camera's aperture to the specified value. The unit is undefined.
+Larger values open the iris wider, smaller values close it.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_IRIS_RELATIVE</constant>&nbsp;</entry>
+ <entry>integer</entry>
+ </row><row><entry spanname="descr">This control modifies the
+camera's aperture by the specified amount. The unit is undefined.
+Positive values open the iris one step further, negative values close
+it one step further. This is a write-only control.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
<entry spanname="id"><constant>V4L2_CID_PRIVACY</constant>&nbsp;</entry>
<entry>boolean</entry>
</row><row><entry spanname="descr">Prevent video from being acquired
diff --git a/Documentation/DocBook/v4l/dev-event.xml b/Documentation/DocBook/v4l/dev-event.xml
new file mode 100644
index 000000000000..be5a98fb4fab
--- /dev/null
+++ b/Documentation/DocBook/v4l/dev-event.xml
@@ -0,0 +1,31 @@
+ <title>Event Interface</title>
+
+ <para>The V4L2 event interface provides means for user to get
+ immediately notified on certain conditions taking place on a device.
+ This might include start of frame or loss of signal events, for
+ example.
+ </para>
+
+ <para>To receive events, the events the user is interested in first must
+ be subscribed using the &VIDIOC-SUBSCRIBE-EVENT; ioctl. Once an event is
+ subscribed, the events of subscribed types are dequeueable using the
+ &VIDIOC-DQEVENT; ioctl. Events may be unsubscribed using
+ VIDIOC_UNSUBSCRIBE_EVENT ioctl. The special event type V4L2_EVENT_ALL may
+ be used to unsubscribe all the events the driver supports.</para>
+
+ <para>The event subscriptions and event queues are specific to file
+ handles. Subscribing an event on one file handle does not affect
+ other file handles.
+ </para>
+
+ <para>The information on dequeueable events is obtained by using select or
+ poll system calls on video devices. The V4L2 events use POLLPRI events on
+ poll system call and exceptions on select system call. </para>
+
+ <!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+ -->
diff --git a/Documentation/DocBook/v4l/io.xml b/Documentation/DocBook/v4l/io.xml
index e870330cbf77..d424886beda0 100644
--- a/Documentation/DocBook/v4l/io.xml
+++ b/Documentation/DocBook/v4l/io.xml
@@ -702,6 +702,16 @@ They can be both cleared however, then the buffer is in "dequeued"
state, in the application domain to say so.</entry>
</row>
<row>
+ <entry><constant>V4L2_BUF_FLAG_ERROR</constant></entry>
+ <entry>0x0040</entry>
+ <entry>When this flag is set, the buffer has been dequeued
+ successfully, although the data might have been corrupted.
+ This is recoverable, streaming may continue as normal and
+ the buffer may be reused normally.
+ Drivers set this flag when the <constant>VIDIOC_DQBUF</constant>
+ ioctl is called.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_BUF_FLAG_KEYFRAME</constant></entry>
<entry>0x0008</entry>
<entry>Drivers set or clear this flag when calling the
@@ -918,8 +928,8 @@ order</emphasis>.</para>
<para>When the driver provides or accepts images field by field
rather than interleaved, it is also important applications understand
-how the fields combine to frames. We distinguish between top and
-bottom fields, the <emphasis>spatial order</emphasis>: The first line
+how the fields combine to frames. We distinguish between top (aka odd) and
+bottom (aka even) fields, the <emphasis>spatial order</emphasis>: The first line
of the top field is the first line of an interlaced frame, the first
line of the bottom field is the second line of that frame.</para>
@@ -972,12 +982,12 @@ between <constant>V4L2_FIELD_TOP</constant> and
<row>
<entry><constant>V4L2_FIELD_TOP</constant></entry>
<entry>2</entry>
- <entry>Images consist of the top field only.</entry>
+ <entry>Images consist of the top (aka odd) field only.</entry>
</row>
<row>
<entry><constant>V4L2_FIELD_BOTTOM</constant></entry>
<entry>3</entry>
- <entry>Images consist of the bottom field only.
+ <entry>Images consist of the bottom (aka even) field only.
Applications may wish to prevent a device from capturing interlaced
images because they will have "comb" or "feathering" artefacts around
moving objects.</entry>
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index 885968d6a2fc..c4ad0a8e42dc 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -792,6 +792,18 @@ http://www.thedirks.org/winnov/</ulink></para></entry>
<entry>'YYUV'</entry>
<entry>unknown</entry>
</row>
+ <row id="V4L2-PIX-FMT-Y4">
+ <entry><constant>V4L2_PIX_FMT_Y4</constant></entry>
+ <entry>'Y04 '</entry>
+ <entry>Old 4-bit greyscale format. Only the least significant 4 bits of each byte are used,
+the other bits are set to 0.</entry>
+ </row>
+ <row id="V4L2-PIX-FMT-Y6">
+ <entry><constant>V4L2_PIX_FMT_Y6</constant></entry>
+ <entry>'Y06 '</entry>
+ <entry>Old 6-bit greyscale format. Only the least significant 6 bits of each byte are used,
+the other bits are set to 0.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 060105af49e5..9737243377a3 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -401,6 +401,7 @@ and discussions on the V4L mailing list.</revremark>
<section id="ttx"> &sub-dev-teletext; </section>
<section id="radio"> &sub-dev-radio; </section>
<section id="rds"> &sub-dev-rds; </section>
+ <section id="event"> &sub-dev-event; </section>
</chapter>
<chapter id="driver">
@@ -426,6 +427,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-cropcap;
&sub-dbg-g-chip-ident;
&sub-dbg-g-register;
+ &sub-dqevent;
&sub-encoder-cmd;
&sub-enumaudio;
&sub-enumaudioout;
@@ -467,6 +469,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-reqbufs;
&sub-s-hw-freq-seek;
&sub-streamon;
+ &sub-subscribe-event;
<!-- End of ioctls. -->
&sub-mmap;
&sub-munmap;
diff --git a/Documentation/DocBook/v4l/videodev2.h.xml b/Documentation/DocBook/v4l/videodev2.h.xml
index 068325940658..865b06d9e679 100644
--- a/Documentation/DocBook/v4l/videodev2.h.xml
+++ b/Documentation/DocBook/v4l/videodev2.h.xml
@@ -1018,6 +1018,13 @@ enum <link linkend="v4l2-colorfx">v4l2_colorfx</link> {
V4L2_COLORFX_NONE = 0,
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
+ V4L2_COLORFX_NEGATIVE = 3,
+ V4L2_COLORFX_EMBOSS = 4,
+ V4L2_COLORFX_SKETCH = 5,
+ V4L2_COLORFX_SKY_BLUE = 6,
+ V4L2_COLORFX_GRASS_GREEN = 7,
+ V4L2_COLORFX_SKIN_WHITEN = 8,
+ V4L2_COLORFX_VIVID = 9.
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
@@ -1271,6 +1278,9 @@ enum <link linkend="v4l2-exposure-auto-type">v4l2_exposure_auto_type</link> {
#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
+#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17)
+#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18)
+
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1)
diff --git a/Documentation/DocBook/v4l/vidioc-dqevent.xml b/Documentation/DocBook/v4l/vidioc-dqevent.xml
new file mode 100644
index 000000000000..4e0a7cc30812
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-dqevent.xml
@@ -0,0 +1,131 @@
+<refentry id="vidioc-dqevent">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_DQEVENT</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_DQEVENT</refname>
+ <refpurpose>Dequeue event</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_event
+*<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_DQEVENT</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para>Dequeue an event from a video device. No input is required
+ for this ioctl. All the fields of the &v4l2-event; structure are
+ filled by the driver. The file handle will also receive exceptions
+ which the application may get by e.g. using the select system
+ call.</para>
+
+ <table frame="none" pgwide="1" id="v4l2-event">
+ <title>struct <structname>v4l2_event</structname></title>
+ <tgroup cols="4">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry></entry>
+ <entry>Type of the event.</entry>
+ </row>
+ <row>
+ <entry>union</entry>
+ <entry><structfield>u</structfield></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>&v4l2-event-vsync;</entry>
+ <entry><structfield>vsync</structfield></entry>
+ <entry>Event data for event V4L2_EVENT_VSYNC.
+ </entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>__u8</entry>
+ <entry><structfield>data</structfield>[64]</entry>
+ <entry>Event data. Defined by the event type. The union
+ should be used to define easily accessible type for
+ events.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>pending</structfield></entry>
+ <entry></entry>
+ <entry>Number of pending events excluding this one.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>sequence</structfield></entry>
+ <entry></entry>
+ <entry>Event sequence number. The sequence number is
+ incremented for every subscribed event that takes place.
+ If sequence numbers are not contiguous it means that
+ events have been lost.
+ </entry>
+ </row>
+ <row>
+ <entry>struct timespec</entry>
+ <entry><structfield>timestamp</structfield></entry>
+ <entry></entry>
+ <entry>Event timestamp.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[9]</entry>
+ <entry></entry>
+ <entry>Reserved for future extensions. Drivers must set
+ the array to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </refsect1>
+</refentry>
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/v4l/vidioc-enuminput.xml b/Documentation/DocBook/v4l/vidioc-enuminput.xml
index 71b868e2fb8f..476fe1d2bba0 100644
--- a/Documentation/DocBook/v4l/vidioc-enuminput.xml
+++ b/Documentation/DocBook/v4l/vidioc-enuminput.xml
@@ -283,7 +283,7 @@ input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
<entry>This input supports setting DV presets by using VIDIOC_S_DV_PRESET.</entry>
</row>
<row>
- <entry><constant>V4L2_OUT_CAP_CUSTOM_TIMINGS</constant></entry>
+ <entry><constant>V4L2_IN_CAP_CUSTOM_TIMINGS</constant></entry>
<entry>0x00000002</entry>
<entry>This input supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.</entry>
</row>
diff --git a/Documentation/DocBook/v4l/vidioc-qbuf.xml b/Documentation/DocBook/v4l/vidioc-qbuf.xml
index b843bd7b3897..ab691ebf3b93 100644
--- a/Documentation/DocBook/v4l/vidioc-qbuf.xml
+++ b/Documentation/DocBook/v4l/vidioc-qbuf.xml
@@ -111,7 +111,11 @@ from the driver's outgoing queue. They just set the
and <structfield>reserved</structfield>
fields of a &v4l2-buffer; as above, when <constant>VIDIOC_DQBUF</constant>
is called with a pointer to this structure the driver fills the
-remaining fields or returns an error code.</para>
+remaining fields or returns an error code. The driver may also set
+<constant>V4L2_BUF_FLAG_ERROR</constant> in the <structfield>flags</structfield>
+field. It indicates a non-critical (recoverable) streaming error. In such case
+the application may continue as normal, but should be aware that data in the
+dequeued buffer might be corrupted.</para>
<para>By default <constant>VIDIOC_DQBUF</constant> blocks when no
buffer is in the outgoing queue. When the
@@ -158,7 +162,13 @@ enqueue a user pointer buffer.</para>
<para><constant>VIDIOC_DQBUF</constant> failed due to an
internal error. Can also indicate temporary problems like signal
loss. Note the driver might dequeue an (empty) buffer despite
-returning an error, or even stop capturing.</para>
+returning an error, or even stop capturing. Reusing such buffer may be unsafe
+though and its details (e.g. <structfield>index</structfield>) may not be
+returned either. It is recommended that drivers indicate recoverable errors
+by setting the <constant>V4L2_BUF_FLAG_ERROR</constant> and returning 0 instead.
+In that case the application should be able to safely reuse the buffer and
+continue streaming.
+ </para>
</listitem>
</varlistentry>
</variablelist>
diff --git a/Documentation/DocBook/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
index 4876ff1a1a04..8e0e055ac934 100644
--- a/Documentation/DocBook/v4l/vidioc-queryctrl.xml
+++ b/Documentation/DocBook/v4l/vidioc-queryctrl.xml
@@ -325,7 +325,7 @@ should be part of the control documentation.</entry>
<entry>n/a</entry>
<entry>This is not a control. When
<constant>VIDIOC_QUERYCTRL</constant> is called with a control ID
-equal to a control class code (see <xref linkend="ctrl-class" />), the
+equal to a control class code (see <xref linkend="ctrl-class" />) + 1, the
ioctl returns the name of the control class and this control type.
Older drivers which do not support this feature return an
&EINVAL;.</entry>
diff --git a/Documentation/DocBook/v4l/vidioc-reqbufs.xml b/Documentation/DocBook/v4l/vidioc-reqbufs.xml
index 1c0816372074..69800ae23348 100644
--- a/Documentation/DocBook/v4l/vidioc-reqbufs.xml
+++ b/Documentation/DocBook/v4l/vidioc-reqbufs.xml
@@ -61,7 +61,7 @@ fields of the <structname>v4l2_requestbuffers</structname> structure.
They set the <structfield>type</structfield> field to the respective
stream or buffer type, the <structfield>count</structfield> field to
the desired number of buffers, <structfield>memory</structfield>
-must be set to the requested I/O method and the reserved array
+must be set to the requested I/O method and the <structfield>reserved</structfield> array
must be zeroed. When the ioctl
is called with a pointer to this structure the driver will attempt to allocate
the requested number of buffers and it stores the actual number
diff --git a/Documentation/DocBook/v4l/vidioc-subscribe-event.xml b/Documentation/DocBook/v4l/vidioc-subscribe-event.xml
new file mode 100644
index 000000000000..8b501791aa68
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-subscribe-event.xml
@@ -0,0 +1,133 @@
+<refentry id="vidioc-subscribe-event">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</refname>
+ <refpurpose>Subscribe or unsubscribe event</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_event_subscription
+*<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para>Subscribe or unsubscribe V4L2 event. Subscribed events are
+ dequeued by using the &VIDIOC-DQEVENT; ioctl.</para>
+
+ <table frame="none" pgwide="1" id="v4l2-event-subscription">
+ <title>struct <structname>v4l2_event_subscription</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry>Type of the event.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[7]</entry>
+ <entry>Reserved for future extensions. Drivers and applications
+ must set the array to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table frame="none" pgwide="1" id="event-type">
+ <title>Event Types</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_EVENT_ALL</constant></entry>
+ <entry>0</entry>
+ <entry>All events. V4L2_EVENT_ALL is valid only for
+ VIDIOC_UNSUBSCRIBE_EVENT for unsubscribing all events at once.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_VSYNC</constant></entry>
+ <entry>1</entry>
+ <entry>This event is triggered on the vertical sync.
+ This event has &v4l2-event-vsync; associated with it.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_EOS</constant></entry>
+ <entry>2</entry>
+ <entry>This event is triggered when the end of a stream is reached.
+ This is typically used with MPEG decoders to report to the application
+ when the last of the MPEG stream has been decoded.
+ </entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_EVENT_PRIVATE_START</constant></entry>
+ <entry>0x08000000</entry>
+ <entry>Base event number for driver-private events.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table frame="none" pgwide="1" id="v4l2-event-vsync">
+ <title>struct <structname>v4l2_event_vsync</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u8</entry>
+ <entry><structfield>field</structfield></entry>
+ <entry>The upcoming field. See &v4l2-field;.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </refsect1>
+</refentry>
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index 0d0f7b4d4b1a..0ba149de2608 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -5518,34 +5518,41 @@ struct _snd_pcm_runtime {
]]>
</programlisting>
</informalexample>
+
+ For the raw data, <structfield>size</structfield> field must be
+ set properly. This specifies the maximum size of the proc file access.
</para>
<para>
- The callback is much more complicated than the text-file
- version. You need to use a low-level I/O functions such as
+ The read/write callbacks of raw mode are more direct than the text mode.
+ You need to use a low-level I/O functions such as
<function>copy_from/to_user()</function> to transfer the
data.
<informalexample>
<programlisting>
<![CDATA[
- static long my_file_io_read(struct snd_info_entry *entry,
+ static ssize_t my_file_io_read(struct snd_info_entry *entry,
void *file_private_data,
struct file *file,
char *buf,
- unsigned long count,
- unsigned long pos)
+ size_t count,
+ loff_t pos)
{
- long size = count;
- if (pos + size > local_max_size)
- size = local_max_size - pos;
- if (copy_to_user(buf, local_data + pos, size))
+ if (copy_to_user(buf, local_data + pos, count))
return -EFAULT;
- return size;
+ return count;
}
]]>
</programlisting>
</informalexample>
+
+ If the size of the info entry has been set up properly,
+ <structfield>count</structfield> and <structfield>pos</structfield> are
+ guaranteed to fit within 0 and the given size.
+ You don't have to check the range in the callbacks unless any
+ other condition is required.
+
</para>
</chapter>
diff --git a/Documentation/DocBook/writing_usb_driver.tmpl b/Documentation/DocBook/writing_usb_driver.tmpl
index eeff19ca831b..bd97a13fa5ae 100644
--- a/Documentation/DocBook/writing_usb_driver.tmpl
+++ b/Documentation/DocBook/writing_usb_driver.tmpl
@@ -342,7 +342,7 @@ static inline void skel_delete (struct usb_skel *dev)
{
kfree (dev->bulk_in_buffer);
if (dev->bulk_out_buffer != NULL)
- usb_buffer_free (dev->udev, dev->bulk_out_size,
+ usb_free_coherent (dev->udev, dev->bulk_out_size,
dev->bulk_out_buffer,
dev->write_urb->transfer_dma);
usb_free_urb (dev->write_urb);
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index e83f2ea76415..898ded24510d 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -216,7 +216,7 @@ The driver should return one of the following result codes:
- PCI_ERS_RESULT_NEED_RESET
Driver returns this if it thinks the device is not
- recoverable in it's current state and it needs a slot
+ recoverable in its current state and it needs a slot
reset to proceed.
- PCI_ERS_RESULT_DISCONNECT
@@ -241,7 +241,7 @@ in working condition.
The driver is not supposed to restart normal driver I/O operations
at this point. It should limit itself to "probing" the device to
-check it's recoverability status. If all is right, then the platform
+check its recoverability status. If all is right, then the platform
will call resume() once all drivers have ack'd link_reset().
Result codes:
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index be21001ab144..26d3d945c3c2 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -13,7 +13,7 @@ Reporting (AER) driver and provides information on how to use it, as
well as how to enable the drivers of endpoint devices to conform with
PCI Express AER driver.
-1.2 Copyright © Intel Corporation 2006.
+1.2 Copyright (C) Intel Corporation 2006.
1.3 What is the PCI Express AER Driver?
@@ -71,15 +71,11 @@ console. If it's a correctable error, it is outputed as a warning.
Otherwise, it is printed as an error. So users could choose different
log level to filter out correctable error messages.
-Below shows an example.
-+------ PCI-Express Device Error -----+
-Error Severity : Uncorrected (Fatal)
-PCIE Bus Error type : Transaction Layer
-Unsupported Request : First
-Requester ID : 0500
-VendorID=8086h, DeviceID=0329h, Bus=05h, Device=00h, Function=00h
-TLB Header:
-04000001 00200a03 05010000 00050100
+Below shows an example:
+0000:50:00.0: PCIe Bus Error: severity=Uncorrected (Fatal), type=Transaction Layer, id=0500(Requester ID)
+0000:50:00.0: device [8086:0329] error status/mask=00100000/00000000
+0000:50:00.0: [20] Unsupported Request (First)
+0000:50:00.0: TLP Header: 04000001 00200a03 05010000 00050100
In the example, 'Requester ID' means the ID of the device who sends
the error message to root port. Pls. refer to pci express specs for
@@ -112,7 +108,7 @@ but the PCI Express link itself is fully functional. Fatal errors, on
the other hand, cause the link to be unreliable.
When AER is enabled, a PCI Express device will automatically send an
-error message to the PCIE root port above it when the device captures
+error message to the PCIe root port above it when the device captures
an error. The Root Port, upon receiving an error reporting message,
internally processes and logs the error message in its PCI Express
capability structure. Error information being logged includes storing
@@ -198,8 +194,9 @@ to reset link, AER port service driver is required to provide the
function to reset link. Firstly, kernel looks for if the upstream
component has an aer driver. If it has, kernel uses the reset_link
callback of the aer driver. If the upstream component has no aer driver
-and the port is downstream port, we will use the aer driver of the
-root port who reports the AER error. As for upstream ports,
+and the port is downstream port, we will perform a hot reset as the
+default by setting the Secondary Bus Reset bit of the Bridge Control
+register associated with the downstream port. As for upstream ports,
they should provide their own aer service drivers with reset_link
function. If error_detected returns PCI_ERS_RESULT_CAN_RECOVER and
reset_link returns PCI_ERS_RESULT_RECOVERED, the error handling goes
@@ -253,11 +250,11 @@ cleanup uncorrectable status register. Pls. refer to section 3.3.
4. Software error injection
-Debugging PCIE AER error recovery code is quite difficult because it
+Debugging PCIe AER error recovery code is quite difficult because it
is hard to trigger real hardware errors. Software based error
-injection can be used to fake various kinds of PCIE errors.
+injection can be used to fake various kinds of PCIe errors.
-First you should enable PCIE AER software error injection in kernel
+First you should enable PCIe AER software error injection in kernel
configuration, that is, following item should be in your .config.
CONFIG_PCIEAER_INJECT=y or CONFIG_PCIEAER_INJECT=m
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 1423d2570d78..44c6dcc93d6d 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -3,35 +3,79 @@ Using RCU's CPU Stall Detector
The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables
RCU's CPU stall detector, which detects conditions that unduly delay
RCU grace periods. The stall detector's idea of what constitutes
-"unduly delayed" is controlled by a pair of C preprocessor macros:
+"unduly delayed" is controlled by a set of C preprocessor macros:
RCU_SECONDS_TILL_STALL_CHECK
This macro defines the period of time that RCU will wait from
the beginning of a grace period until it issues an RCU CPU
- stall warning. It is normally ten seconds.
+ stall warning. This time period is normally ten seconds.
RCU_SECONDS_TILL_STALL_RECHECK
This macro defines the period of time that RCU will wait after
- issuing a stall warning until it issues another stall warning.
- It is normally set to thirty seconds.
+ issuing a stall warning until it issues another stall warning
+ for the same stall. This time period is normally set to thirty
+ seconds.
RCU_STALL_RAT_DELAY
- The CPU stall detector tries to make the offending CPU rat on itself,
- as this often gives better-quality stack traces. However, if
- the offending CPU does not detect its own stall in the number
- of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will
- complain. This is normally set to two jiffies.
+ The CPU stall detector tries to make the offending CPU print its
+ own warnings, as this often gives better-quality stack traces.
+ However, if the offending CPU does not detect its own stall in
+ the number of jiffies specified by RCU_STALL_RAT_DELAY, then
+ some other CPU will complain. This delay is normally set to
+ two jiffies.
-The following problems can result in an RCU CPU stall warning:
+When a CPU detects that it is stalling, it will print a message similar
+to the following:
+
+INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies)
+
+This message indicates that CPU 5 detected that it was causing a stall,
+and that the stall was affecting RCU-sched. This message will normally be
+followed by a stack dump of the offending CPU. On TREE_RCU kernel builds,
+RCU and RCU-sched are implemented by the same underlying mechanism,
+while on TREE_PREEMPT_RCU kernel builds, RCU is instead implemented
+by rcu_preempt_state.
+
+On the other hand, if the offending CPU fails to print out a stall-warning
+message quickly enough, some other CPU will print a message similar to
+the following:
+
+INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies)
+
+This message indicates that CPU 2 detected that CPUs 3 and 5 were both
+causing stalls, and that the stall was affecting RCU-bh. This message
+will normally be followed by stack dumps for each CPU. Please note that
+TREE_PREEMPT_RCU builds can be stalled by tasks as well as by CPUs,
+and that the tasks will be indicated by PID, for example, "P3421".
+It is even possible for a rcu_preempt_state stall to be caused by both
+CPUs -and- tasks, in which case the offending CPUs and tasks will all
+be called out in the list.
+
+Finally, if the grace period ends just as the stall warning starts
+printing, there will be a spurious stall-warning message:
+
+INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies)
+
+This is rare, but does happen from time to time in real life.
+
+So your kernel printed an RCU CPU stall warning. The next question is
+"What caused it?" The following problems can result in RCU CPU stall
+warnings:
o A CPU looping in an RCU read-side critical section.
-o A CPU looping with interrupts disabled.
+o A CPU looping with interrupts disabled. This condition can
+ result in RCU-sched and RCU-bh stalls.
-o A CPU looping with preemption disabled.
+o A CPU looping with preemption disabled. This condition can
+ result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
+ stalls.
+
+o A CPU looping with bottom halves disabled. This condition can
+ result in RCU-sched and RCU-bh stalls.
o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
without invoking schedule().
@@ -39,20 +83,24 @@ o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
o A bug in the RCU implementation.
o A hardware failure. This is quite unlikely, but has occurred
- at least once in a former life. A CPU failed in a running system,
+ at least once in real life. A CPU failed in a running system,
becoming unresponsive, but not causing an immediate crash.
This resulted in a series of RCU CPU stall warnings, eventually
leading the realization that the CPU had failed.
-The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
-SRCU does not do so directly, but its calls to synchronize_sched() will
-result in RCU-sched detecting any CPU stalls that might be occurring.
-
-To diagnose the cause of the stall, inspect the stack traces. The offending
-function will usually be near the top of the stack. If you have a series
-of stall warnings from a single extended stall, comparing the stack traces
-can often help determine where the stall is occurring, which will usually
-be in the function nearest the top of the stack that stays the same from
-trace to trace.
+The RCU, RCU-sched, and RCU-bh implementations have CPU stall
+warning. SRCU does not have its own CPU stall warnings, but its
+calls to synchronize_sched() will result in RCU-sched detecting
+RCU-sched-related CPU stalls. Please note that RCU only detects
+CPU stalls when there is a grace period in progress. No grace period,
+no CPU stall warnings.
+
+To diagnose the cause of the stall, inspect the stack traces.
+The offending function will usually be near the top of the stack.
+If you have a series of stall warnings from a single extended stall,
+comparing the stack traces can often help determine where the stall
+is occurring, which will usually be in the function nearest the top of
+that portion of the stack which remains the same from trace to trace.
+If you can reliably trigger the stall, ftrace can be quite helpful.
RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE.
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 0e50bc2aa1e2..5d9016795fd8 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -182,16 +182,6 @@ Similarly, sched_expedited RCU provides the following:
sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
- state: -1 / 0:0 3:0 4:0
-
-As before, the first four lines are similar to those for RCU.
-The last line shows the task-migration state. The first number is
--1 if synchronize_sched_expedited() is idle, -2 if in the process of
-posting wakeups to the migration kthreads, and N when waiting on CPU N.
-Each of the colon-separated fields following the "/" is a CPU:state pair.
-Valid states are "0" for idle, "1" for waiting for quiescent state,
-"2" for passed through quiescent state, and "3" when a race with a
-CPU-hotplug event forces use of the synchronize_sched() primitive.
USAGE
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 8608fd85e921..efd8cc95c06b 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -256,23 +256,23 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
The output of "cat rcu/rcu_pending" looks as follows:
rcu_sched:
- 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
- 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
- 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
- 3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723
- 4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110
- 5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456
- 6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834
- 7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888
+ 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
+ 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
+ 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
+ 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723
+ 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110
+ 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456
+ 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834
+ 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888
rcu_bh:
- 0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314
- 1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180
- 2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936
- 3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863
- 4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671
- 5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235
- 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
- 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
+ 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314
+ 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180
+ 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936
+ 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863
+ 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671
+ 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235
+ 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
+ 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
As always, this is once again split into "rcu_sched" and "rcu_bh"
portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional
@@ -284,6 +284,9 @@ o "np" is the number of times that __rcu_pending() has been invoked
o "qsp" is the number of times that the RCU was waiting for a
quiescent state from this CPU.
+o "rpq" is the number of times that the CPU had passed through
+ a quiescent state, but not yet reported it to RCU.
+
o "cbr" is the number of times that this CPU had RCU callbacks
that had passed through a grace period, and were thus ready
to be invoked.
diff --git a/Documentation/Smack.txt b/Documentation/Smack.txt
index 34614b4c708e..e9dab41c0fe0 100644
--- a/Documentation/Smack.txt
+++ b/Documentation/Smack.txt
@@ -73,7 +73,7 @@ NOTE: Smack labels are limited to 23 characters. The attr command
If you don't do anything special all users will get the floor ("_")
label when they log in. If you do want to log in via the hacked ssh
at other labels use the attr command to set the smack value on the
-home directory and it's contents.
+home directory and its contents.
You can add access rules in /etc/smack/accesses. They take the form:
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 8916ca48bc95..da0382daa395 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -18,6 +18,8 @@ kernel patches.
2b: Passes allnoconfig, allmodconfig
+2c: Builds successfully when using O=builddir
+
3: Builds on multiple CPU architectures by using local cross-compile tools
or some other build farm.
@@ -95,3 +97,13 @@ kernel patches.
25: If any ioctl's are added by the patch, then also update
Documentation/ioctl/ioctl-number.txt.
+
+26: If your modified source code depends on or uses any of the kernel
+ APIs or features that are related to the following kconfig symbols,
+ then test multiple builds with the related kconfig symbols disabled
+ and/or =m (if that option is available) [not all of these at the
+ same time, just various/random combinations of them]:
+
+ CONFIG_SMP, CONFIG_SYSFS, CONFIG_PROC_FS, CONFIG_INPUT, CONFIG_PCI,
+ CONFIG_BLOCK, CONFIG_PM, CONFIG_HOTPLUG, CONFIG_MAGIC_SYSRQ,
+ CONFIG_NET, CONFIG_INET=n (but latter with CONFIG_NET=y)
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index 99e72a81fa2f..4947fd8fb182 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -130,6 +130,8 @@ Linux kernel master tree:
ftp.??.kernel.org:/pub/linux/kernel/...
?? == your country code, such as "us", "uk", "fr", etc.
+ http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git
+
Linux kernel mailing list:
linux-kernel@vger.kernel.org
[mail majordomo@vger.kernel.org to subscribe]
@@ -160,3 +162,6 @@ How to NOT write kernel driver by Arjan van de Ven:
Kernel Janitor:
http://janitor.kernelnewbies.org/
+
+GIT, Fast Version Control System:
+ http://git-scm.com/
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
new file mode 100644
index 000000000000..dfab71848dc8
--- /dev/null
+++ b/Documentation/acpi/apei/einj.txt
@@ -0,0 +1,59 @@
+ APEI Error INJection
+ ~~~~~~~~~~~~~~~~~~~~
+
+EINJ provides a hardware error injection mechanism
+It is very useful for debugging and testing of other APEI and RAS features.
+
+To use EINJ, make sure the following are enabled in your kernel
+configuration:
+
+CONFIG_DEBUG_FS
+CONFIG_ACPI_APEI
+CONFIG_ACPI_APEI_EINJ
+
+The user interface of EINJ is debug file system, under the
+directory apei/einj. The following files are provided.
+
+- available_error_type
+ Reading this file returns the error injection capability of the
+ platform, that is, which error types are supported. The error type
+ definition is as follow, the left field is the error type value, the
+ right field is error description.
+
+ 0x00000001 Processor Correctable
+ 0x00000002 Processor Uncorrectable non-fatal
+ 0x00000004 Processor Uncorrectable fatal
+ 0x00000008 Memory Correctable
+ 0x00000010 Memory Uncorrectable non-fatal
+ 0x00000020 Memory Uncorrectable fatal
+ 0x00000040 PCI Express Correctable
+ 0x00000080 PCI Express Uncorrectable fatal
+ 0x00000100 PCI Express Uncorrectable non-fatal
+ 0x00000200 Platform Correctable
+ 0x00000400 Platform Uncorrectable non-fatal
+ 0x00000800 Platform Uncorrectable fatal
+
+ The format of file contents are as above, except there are only the
+ available error type lines.
+
+- error_type
+ This file is used to set the error type value. The error type value
+ is defined in "available_error_type" description.
+
+- error_inject
+ Write any integer to this file to trigger the error
+ injection. Before this, please specify all necessary error
+ parameters.
+
+- param1
+ This file is used to set the first error parameter value. Effect of
+ parameter depends on error_type specified. For memory error, this is
+ physical memory address.
+
+- param2
+ This file is used to set the second error parameter value. Effect of
+ parameter depends on error_type specified. For memory error, this is
+ physical memory address mask.
+
+For more information about EINJ, please refer to ACPI specification
+version 4.0, section 17.5.
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 82e418d648d0..7f5fc3ba9c91 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -20,6 +20,8 @@ Samsung-S3C24XX
- S3C24XX ARM Linux Overview
Sharp-LH
- Linux on Sharp LH79524 and LH7A40X System On a Chip (SOC)
+SPEAr
+ - ST SPEAr platform Linux Overview
VFP/
- Release notes for Linux Kernel Vector Floating Point support code
empeg/
diff --git a/Documentation/arm/SA1100/ADSBitsy b/Documentation/arm/SA1100/ADSBitsy
index 7197a9e958ee..f9f62e8c0719 100644
--- a/Documentation/arm/SA1100/ADSBitsy
+++ b/Documentation/arm/SA1100/ADSBitsy
@@ -32,7 +32,7 @@ Notes:
- The flash on board is divided into 3 partitions.
You should be careful to use flash on board.
- It's partition is different from GraphicsClient Plus and GraphicsMaster
+ Its partition is different from GraphicsClient Plus and GraphicsMaster
- 16bpp mode requires a different cable than what ships with the board.
Contact ADS or look through the manual to wire your own. Currently,
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
new file mode 100644
index 000000000000..253a35c6f782
--- /dev/null
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -0,0 +1,60 @@
+ SPEAr ARM Linux Overview
+ ==========================
+
+Introduction
+------------
+
+ SPEAr (Structured Processor Enhanced Architecture).
+ weblink : http://www.st.com/spear
+
+ The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are
+ supported by the 'spear' platform of ARM Linux. Currently SPEAr300,
+ SPEAr310, SPEAr320 and SPEAr600 SOCs are supported. Support for the SPEAr13XX
+ series is in progress.
+
+ Hierarchy in SPEAr is as follows:
+
+ SPEAr (Platform)
+ - SPEAr3XX (3XX SOC series, based on ARM9)
+ - SPEAr300 (SOC)
+ - SPEAr300_EVB (Evaluation Board)
+ - SPEAr310 (SOC)
+ - SPEAr310_EVB (Evaluation Board)
+ - SPEAr320 (SOC)
+ - SPEAr320_EVB (Evaluation Board)
+ - SPEAr6XX (6XX SOC series, based on ARM9)
+ - SPEAr600 (SOC)
+ - SPEAr600_EVB (Evaluation Board)
+ - SPEAr13XX (13XX SOC series, based on ARM CORTEXA9)
+ - SPEAr1300 (SOC)
+
+ Configuration
+ -------------
+
+ A generic configuration is provided for each machine, and can be used as the
+ default by
+ make spear600_defconfig
+ make spear300_defconfig
+ make spear310_defconfig
+ make spear320_defconfig
+
+ Layout
+ ------
+
+ The common files for multiple machine families (SPEAr3XX, SPEAr6XX and
+ SPEAr13XX) are located in the platform code contained in arch/arm/plat-spear
+ with headers in plat/.
+
+ Each machine series have a directory with name arch/arm/mach-spear followed by
+ series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx.
+
+ Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c and for
+ spear6xx is mach-spear6xx/spear6xx.c. mach-spear* also contain soc/machine
+ specific files, like spear300.c, spear310.c, spear320.c and spear600.c.
+ mach-spear* also contains board specific files for each machine type.
+
+
+ Document Author
+ ---------------
+
+ Viresh Kumar, (c) 2010 ST Microelectronics
diff --git a/Documentation/arm/Samsung-S3C24XX/GPIO.txt b/Documentation/arm/Samsung-S3C24XX/GPIO.txt
index 2af2cf39915f..816d6071669e 100644
--- a/Documentation/arm/Samsung-S3C24XX/GPIO.txt
+++ b/Documentation/arm/Samsung-S3C24XX/GPIO.txt
@@ -12,6 +12,8 @@ Introduction
of the s3c2410 GPIO system, please read the Samsung provided
data-sheet/users manual to find out the complete list.
+ See Documentation/arm/Samsung/GPIO.txt for the core implemetation.
+
GPIOLIB
-------
@@ -24,8 +26,60 @@ GPIOLIB
listed below will be removed (they may be marked as __deprecated
in the near future).
- - s3c2410_gpio_getpin
- - s3c2410_gpio_setpin
+ The following functions now either have a s3c_ specific variant
+ or are merged into gpiolib. See the definitions in
+ arch/arm/plat-samsung/include/plat/gpio-cfg.h:
+
+ s3c2410_gpio_setpin() gpio_set_value() or gpio_direction_output()
+ s3c2410_gpio_getpin() gpio_get_value() or gpio_direction_input()
+ s3c2410_gpio_getirq() gpio_to_irq()
+ s3c2410_gpio_cfgpin() s3c_gpio_cfgpin()
+ s3c2410_gpio_getcfg() s3c_gpio_getcfg()
+ s3c2410_gpio_pullup() s3c_gpio_setpull()
+
+
+GPIOLIB conversion
+------------------
+
+If you need to convert your board or driver to use gpiolib from the exiting
+s3c2410 api, then here are some notes on the process.
+
+1) If your board is exclusively using an GPIO, say to control peripheral
+ power, then it will require to claim the gpio with gpio_request() before
+ it can use it.
+
+ It is recommended to check the return value, with at least WARN_ON()
+ during initialisation.
+
+2) The s3c2410_gpio_cfgpin() can be directly replaced with s3c_gpio_cfgpin()
+ as they have the same arguments, and can either take the pin specific
+ values, or the more generic special-function-number arguments.
+
+3) s3c2410_gpio_pullup() changs have the problem that whilst the
+ s3c2410_gpio_pullup(x, 1) can be easily translated to the
+ s3c_gpio_setpull(x, S3C_GPIO_PULL_NONE), the s3c2410_gpio_pullup(x, 0)
+ are not so easy.
+
+ The s3c2410_gpio_pullup(x, 0) case enables the pull-up (or in the case
+ of some of the devices, a pull-down) and as such the new API distinguishes
+ between the UP and DOWN case. There is currently no 'just turn on' setting
+ which may be required if this becomes a problem.
+
+4) s3c2410_gpio_setpin() can be replaced by gpio_set_value(), the old call
+ does not implicitly configure the relevant gpio to output. The gpio
+ direction should be changed before using gpio_set_value().
+
+5) s3c2410_gpio_getpin() is replaceable by gpio_get_value() if the pin
+ has been set to input. It is currently unknown what the behaviour is
+ when using gpio_get_value() on an output pin (s3c2410_gpio_getpin
+ would return the value the pin is supposed to be outputting).
+
+6) s3c2410_gpio_getirq() should be directly replacable with the
+ gpio_to_irq() call.
+
+The s3c2410_gpio and gpio_ calls have always operated on the same gpio
+numberspace, so there is no problem with converting the gpio numbering
+between the calls.
Headers
@@ -54,6 +108,11 @@ PIN Numbers
eg S3C2410_GPA(0) or S3C2410_GPF(1). These defines are used to tell
the GPIO functions which pin is to be used.
+ With the conversion to gpiolib, there is no longer a direct conversion
+ from gpio pin number to register base address as in earlier kernels. This
+ is due to the number space required for newer SoCs where the later
+ GPIOs are not contiguous.
+
Configuring a pin
-----------------
@@ -71,6 +130,8 @@ Configuring a pin
which would turn GPA(0) into the lowest Address line A0, and set
GPE(8) to be connected to the SDIO/MMC controller's SDDAT1 line.
+ The s3c_gpio_cfgpin() call is a functional replacement for this call.
+
Reading the current configuration
---------------------------------
@@ -82,6 +143,9 @@ Reading the current configuration
The return value will be from the same set of values which can be
passed to s3c2410_gpio_cfgpin().
+ The s3c_gpio_getcfg() call should be a functional replacement for
+ this call.
+
Configuring a pull-up resistor
------------------------------
@@ -95,6 +159,10 @@ Configuring a pull-up resistor
Where the to value is zero to set the pull-up off, and 1 to enable
the specified pull-up. Any other values are currently undefined.
+ The s3c_gpio_setpull() offers similar functionality, but with the
+ ability to encode whether the pull is up or down. Currently there
+ is no 'just on' state, so up or down must be selected.
+
Getting the state of a PIN
--------------------------
@@ -106,6 +174,9 @@ Getting the state of a PIN
This will return either zero or non-zero. Do not count on this
function returning 1 if the pin is set.
+ This call is now implemented by the relevant gpiolib calls, convert
+ your board or driver to use gpiolib.
+
Setting the state of a PIN
--------------------------
@@ -117,6 +188,9 @@ Setting the state of a PIN
Which sets the given pin to the value. Use 0 to write 0, and 1 to
set the output to 1.
+ This call is now implemented by the relevant gpiolib calls, convert
+ your board or driver to use gpiolib.
+
Getting the IRQ number associated with a PIN
--------------------------------------------
@@ -128,6 +202,9 @@ Getting the IRQ number associated with a PIN
Note, not all pins have an IRQ.
+ This call is now implemented by the relevant gpiolib calls, convert
+ your board or driver to use gpiolib.
+
Authour
-------
diff --git a/Documentation/arm/Samsung-S3C24XX/Overview.txt b/Documentation/arm/Samsung-S3C24XX/Overview.txt
index 081892df4fda..c12bfc1a00c9 100644
--- a/Documentation/arm/Samsung-S3C24XX/Overview.txt
+++ b/Documentation/arm/Samsung-S3C24XX/Overview.txt
@@ -8,10 +8,16 @@ Introduction
The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported
by the 's3c2410' architecture of ARM Linux. Currently the S3C2410,
- S3C2412, S3C2413, S3C2440, S3C2442 and S3C2443 devices are supported.
+ S3C2412, S3C2413, S3C2416 S3C2440, S3C2442, S3C2443 and S3C2450 devices
+ are supported.
Support for the S3C2400 and S3C24A0 series are in progress.
+ The S3C2416 and S3C2450 devices are very similar and S3C2450 support is
+ included under the arch/arm/mach-s3c2416 directory. Note, whilst core
+ support for these SoCs is in, work on some of the extra peripherals
+ and extra interrupts is still ongoing.
+
Configuration
-------------
@@ -209,6 +215,13 @@ GPIO
Newer kernels carry GPIOLIB, and support is being moved towards
this with some of the older support in line to be removed.
+ As of v2.6.34, the move towards using gpiolib support is almost
+ complete, and very little of the old calls are left.
+
+ See Documentation/arm/Samsung-S3C24XX/GPIO.txt for the S3C24XX specific
+ support and Documentation/arm/Samsung/GPIO.txt for the core Samsung
+ implementation.
+
Clock Management
----------------
diff --git a/Documentation/arm/Samsung/GPIO.txt b/Documentation/arm/Samsung/GPIO.txt
new file mode 100644
index 000000000000..05850c62abeb
--- /dev/null
+++ b/Documentation/arm/Samsung/GPIO.txt
@@ -0,0 +1,42 @@
+ Samsung GPIO implementation
+ ===========================
+
+Introduction
+------------
+
+This outlines the Samsung GPIO implementation and the architecture
+specfic calls provided alongisde the drivers/gpio core.
+
+
+S3C24XX (Legacy)
+----------------
+
+See Documentation/arm/Samsung-S3C24XX/GPIO.txt for more information
+about these devices. Their implementation is being brought into line
+with the core samsung implementation described in this document.
+
+
+GPIOLIB integration
+-------------------
+
+The gpio implementation uses gpiolib as much as possible, only providing
+specific calls for the items that require Samsung specific handling, such
+as pin special-function or pull resistor control.
+
+GPIO numbering is synchronised between the Samsung and gpiolib system.
+
+
+PIN configuration
+-----------------
+
+Pin configuration is specific to the Samsung architecutre, with each SoC
+registering the necessary information for the core gpio configuration
+implementation to configure pins as necessary.
+
+The s3c_gpio_cfgpin() and s3c_gpio_setpull() provide the means for a
+driver or machine to change gpio configuration.
+
+See arch/arm/plat-samsung/include/plat/gpio-cfg.h for more information
+on these functions.
+
+
diff --git a/Documentation/arm/Samsung/Overview.txt b/Documentation/arm/Samsung/Overview.txt
index 7cced1fea9c3..c3094ea51aa7 100644
--- a/Documentation/arm/Samsung/Overview.txt
+++ b/Documentation/arm/Samsung/Overview.txt
@@ -13,9 +13,10 @@ Introduction
- S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list
- S3C64XX: S3C6400 and S3C6410
- - S5PC6440
-
- S5PC100 and S5PC110 support is currently being merged
+ - S5P6440
+ - S5P6442
+ - S5PC100
+ - S5PC110 / S5PV210
S3C24XX Systems
@@ -35,7 +36,10 @@ Configuration
unifying all the SoCs into one kernel.
s5p6440_defconfig - S5P6440 specific default configuration
+ s5p6442_defconfig - S5P6442 specific default configuration
s5pc100_defconfig - S5PC100 specific default configuration
+ s5pc110_defconfig - S5PC110 specific default configuration
+ s5pv210_defconfig - S5PV210 specific default configuration
Layout
@@ -50,18 +54,27 @@ Layout
specific information. It contains the base clock, GPIO and device definitions
to get the system running.
- plat-s3c is the s3c24xx/s3c64xx platform directory, although it is currently
- involved in other builds this will be phased out once the relevant code is
- moved elsewhere.
-
plat-s3c24xx is for s3c24xx specific builds, see the S3C24XX docs.
- plat-s3c64xx is for the s3c64xx specific bits, see the S3C24XX docs.
+ plat-s5p is for s5p specific builds, and contains common support for the
+ S5P specific systems. Not all S5Ps use all the features in this directory
+ due to differences in the hardware.
+
+
+Layout changes
+--------------
+
+ The old plat-s3c and plat-s5pc1xx directories have been removed, with
+ support moved to either plat-samsung or plat-s5p as necessary. These moves
+ where to simplify the include and dependency issues involved with having
+ so many different platform directories.
- plat-s5p is for s5p specific builds, more to be added.
+ It was decided to remove plat-s5pc1xx as some of the support was already
+ in plat-s5p or plat-samsung, with the S5PC110 support added with S5PV210
+ the only user was the S5PC100. The S5PC100 specific items where moved to
+ arch/arm/mach-s5pc100.
- [ to finish ]
Port Contributors
diff --git a/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen b/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
index 1e6a23fdf2fc..dc460f055647 100644
--- a/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
+++ b/Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
@@ -7,7 +7,7 @@ The driver only implements a four-wire touch panel protocol.
The touchscreen driver is maintenance free except for the pen-down or
touch threshold. Some resistive displays and board combinations may
-require tuning of this threshold. The driver exposes some of it's
+require tuning of this threshold. The driver exposes some of its
internal state in the sys filesystem. If the kernel is configured
with it, CONFIG_SYSFS, and sysfs is mounted at /sys, there will be a
directory
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 396bec3b74ed..ac4d47187122 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -320,7 +320,7 @@ counter decrement would not become globally visible until the
obj->active update does.
As a historical note, 32-bit Sparc used to only allow usage of
-24-bits of it's atomic_t type. This was because it used 8 bits
+24-bits of its atomic_t type. This was because it used 8 bits
as a spinlock for SMP safety. Sparc32 lacked a "compare and swap"
type instruction. However, 32-bit Sparc has since been moved over
to a "hash table of spinlocks" scheme, that allows the full 32-bit
diff --git a/Documentation/blackfin/bfin-gpio-notes.txt b/Documentation/blackfin/bfin-gpio-notes.txt
index 9898c7ded7d3..f731c1e56475 100644
--- a/Documentation/blackfin/bfin-gpio-notes.txt
+++ b/Documentation/blackfin/bfin-gpio-notes.txt
@@ -43,7 +43,7 @@
void bfin_gpio_irq_free(unsigned gpio);
The request functions will record the function state for a certain pin,
- the free functions will clear it's function state.
+ the free functions will clear its function state.
Once a pin is requested, it can't be requested again before it is freed by
previous caller, otherwise kernel will dump stacks, and the request
function fail.
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 2b5f823abd03..9164ae3b83bc 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -5,7 +5,7 @@
This document describes the cache/tlb flushing interfaces called
by the Linux VM subsystem. It enumerates over each interface,
-describes it's intended purpose, and what side effect is expected
+describes its intended purpose, and what side effect is expected
after the interface is invoked.
The side effects described below are stated for a uniprocessor
@@ -231,7 +231,7 @@ require a whole different set of interfaces to handle properly.
The biggest problem is that of virtual aliasing in the data cache
of a processor.
-Is your port susceptible to virtual aliasing in it's D-cache?
+Is your port susceptible to virtual aliasing in its D-cache?
Well, if your D-cache is virtually indexed, is larger in size than
PAGE_SIZE, and does not prevent multiple cache lines for the same
physical address from existing at once, you have this problem.
@@ -249,7 +249,7 @@ one way to solve this (in particular SPARC_FLAG_MMAPSHARED).
Next, you have to solve the D-cache aliasing issue for all
other cases. Please keep in mind that fact that, for a given page
mapped into some user address space, there is always at least one more
-mapping, that of the kernel in it's linear mapping starting at
+mapping, that of the kernel in its linear mapping starting at
PAGE_OFFSET. So immediately, once the first user maps a given
physical page into its address space, by implication the D-cache
aliasing problem has the potential to exist since the kernel already
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 630879cd9a42..48e0b21b0059 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -17,6 +17,9 @@ HOWTO
You can do a very simple testing of running two dd threads in two different
cgroups. Here is what you can do.
+- Enable Block IO controller
+ CONFIG_BLK_CGROUP=y
+
- Enable group scheduling in CFQ
CONFIG_CFQ_GROUP_IOSCHED=y
@@ -54,32 +57,52 @@ cgroups. Here is what you can do.
Various user visible config options
===================================
-CONFIG_CFQ_GROUP_IOSCHED
- - Enables group scheduling in CFQ. Currently only 1 level of group
- creation is allowed.
-
-CONFIG_DEBUG_CFQ_IOSCHED
- - Enables some debugging messages in blktrace. Also creates extra
- cgroup file blkio.dequeue.
-
-Config options selected automatically
-=====================================
-These config options are not user visible and are selected/deselected
-automatically based on IO scheduler configuration.
-
CONFIG_BLK_CGROUP
- - Block IO controller. Selected by CONFIG_CFQ_GROUP_IOSCHED.
+ - Block IO controller.
CONFIG_DEBUG_BLK_CGROUP
- - Debug help. Selected by CONFIG_DEBUG_CFQ_IOSCHED.
+ - Debug help. Right now some additional stats file show up in cgroup
+ if this option is enabled.
+
+CONFIG_CFQ_GROUP_IOSCHED
+ - Enables group scheduling in CFQ. Currently only 1 level of group
+ creation is allowed.
Details of cgroup files
=======================
- blkio.weight
- - Specifies per cgroup weight.
-
+ - Specifies per cgroup weight. This is default weight of the group
+ on all the devices until and unless overridden by per device rule.
+ (See blkio.weight_device).
Currently allowed range of weights is from 100 to 1000.
+- blkio.weight_device
+ - One can specify per cgroup per device rules using this interface.
+ These rules override the default value of group weight as specified
+ by blkio.weight.
+
+ Following is the format.
+
+ #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device
+ Configure weight=300 on /dev/sdb (8:16) in this cgroup
+ # echo 8:16 300 > blkio.weight_device
+ # cat blkio.weight_device
+ dev weight
+ 8:16 300
+
+ Configure weight=500 on /dev/sda (8:0) in this cgroup
+ # echo 8:0 500 > blkio.weight_device
+ # cat blkio.weight_device
+ dev weight
+ 8:0 500
+ 8:16 300
+
+ Remove specific weight for /dev/sda in this cgroup
+ # echo 8:0 0 > blkio.weight_device
+ # cat blkio.weight_device
+ dev weight
+ 8:16 300
+
- blkio.time
- disk time allocated to cgroup per device in milliseconds. First
two fields specify the major and minor number of the device and
@@ -92,13 +115,105 @@ Details of cgroup files
third field specifies the number of sectors transferred by the
group to/from the device.
+- blkio.io_service_bytes
+ - Number of bytes transferred to/from the disk by the group. These
+ are further divided by the type of operation - read or write, sync
+ or async. First two fields specify the major and minor number of the
+ device, third field specifies the operation type and the fourth field
+ specifies the number of bytes.
+
+- blkio.io_serviced
+ - Number of IOs completed to/from the disk by the group. These
+ are further divided by the type of operation - read or write, sync
+ or async. First two fields specify the major and minor number of the
+ device, third field specifies the operation type and the fourth field
+ specifies the number of IOs.
+
+- blkio.io_service_time
+ - Total amount of time between request dispatch and request completion
+ for the IOs done by this cgroup. This is in nanoseconds to make it
+ meaningful for flash devices too. For devices with queue depth of 1,
+ this time represents the actual service time. When queue_depth > 1,
+ that is no longer true as requests may be served out of order. This
+ may cause the service time for a given IO to include the service time
+ of multiple IOs when served out of order which may result in total
+ io_service_time > actual time elapsed. This time is further divided by
+ the type of operation - read or write, sync or async. First two fields
+ specify the major and minor number of the device, third field
+ specifies the operation type and the fourth field specifies the
+ io_service_time in ns.
+
+- blkio.io_wait_time
+ - Total amount of time the IOs for this cgroup spent waiting in the
+ scheduler queues for service. This can be greater than the total time
+ elapsed since it is cumulative io_wait_time for all IOs. It is not a
+ measure of total time the cgroup spent waiting but rather a measure of
+ the wait_time for its individual IOs. For devices with queue_depth > 1
+ this metric does not include the time spent waiting for service once
+ the IO is dispatched to the device but till it actually gets serviced
+ (there might be a time lag here due to re-ordering of requests by the
+ device). This is in nanoseconds to make it meaningful for flash
+ devices too. This time is further divided by the type of operation -
+ read or write, sync or async. First two fields specify the major and
+ minor number of the device, third field specifies the operation type
+ and the fourth field specifies the io_wait_time in ns.
+
+- blkio.io_merged
+ - Total number of bios/requests merged into requests belonging to this
+ cgroup. This is further divided by the type of operation - read or
+ write, sync or async.
+
+- blkio.io_queued
+ - Total number of requests queued up at any given instant for this
+ cgroup. This is further divided by the type of operation - read or
+ write, sync or async.
+
+- blkio.avg_queue_size
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ The average queue size for this cgroup over the entire time of this
+ cgroup's existence. Queue size samples are taken each time one of the
+ queues of this cgroup gets a timeslice.
+
+- blkio.group_wait_time
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ This is the amount of time the cgroup had to wait since it became busy
+ (i.e., went from 0 to 1 request queued) to get a timeslice for one of
+ its queues. This is different from the io_wait_time which is the
+ cumulative total of the amount of time spent by each IO in that cgroup
+ waiting in the scheduler queue. This is in nanoseconds. If this is
+ read when the cgroup is in a waiting (for timeslice) state, the stat
+ will only report the group_wait_time accumulated till the last time it
+ got a timeslice and will not include the current delta.
+
+- blkio.empty_time
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ This is the amount of time a cgroup spends without any pending
+ requests when not being served, i.e., it does not include any time
+ spent idling for one of the queues of the cgroup. This is in
+ nanoseconds. If this is read when the cgroup is in an empty state,
+ the stat will only report the empty_time accumulated till the last
+ time it had a pending request and will not include the current delta.
+
+- blkio.idle_time
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y.
+ This is the amount of time spent by the IO scheduler idling for a
+ given cgroup in anticipation of a better request than the exising ones
+ from other queues/cgroups. This is in nanoseconds. If this is read
+ when the cgroup is in an idling state, the stat will only report the
+ idle_time accumulated till the last idle period and will not include
+ the current delta.
+
- blkio.dequeue
- - Debugging aid only enabled if CONFIG_DEBUG_CFQ_IOSCHED=y. This
+ - Debugging aid only enabled if CONFIG_DEBUG_BLK_CGROUP=y. This
gives the statistics about how many a times a group was dequeued
from service tree of the device. First two fields specify the major
and minor number of the device and third field specifies the number
of times a group was dequeued from a particular device.
+- blkio.reset_stats
+ - Writing an int to this file will result in resetting all the stats
+ for that cgroup.
+
CFQ sysfs tunable
=================
/sys/block/<disk>/queue/iosched/group_isolation
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index a1ca5924faff..b34823ff1646 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -339,7 +339,7 @@ To mount a cgroup hierarchy with all available subsystems, type:
The "xxx" is not interpreted by the cgroup code, but will appear in
/proc/mounts so may be any useful identifying string that you like.
-To mount a cgroup hierarchy with just the cpuset and numtasks
+To mount a cgroup hierarchy with just the cpuset and memory
subsystems, type:
# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
@@ -572,7 +572,7 @@ void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Called when a task attach operation has failed after can_attach() has succeeded.
A subsystem whose can_attach() has some side-effects should provide this
-function, so that the subsytem can implement a rollback. If not, not necessary.
+function, so that the subsystem can implement a rollback. If not, not necessary.
This will be called only about subsystems whose can_attach() operation have
succeeded.
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 4160df82b3f5..51682ab2dd1a 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -42,7 +42,7 @@ Nodes to a set of tasks. In this document "Memory Node" refers to
an on-line node that contains memory.
Cpusets constrain the CPU and Memory placement of tasks to only
-the resources within a tasks current cpuset. They form a nested
+the resources within a task's current cpuset. They form a nested
hierarchy visible in a virtual file system. These are the essential
hooks, beyond what is already present, required to manage dynamic
job placement on large systems.
@@ -53,11 +53,11 @@ Documentation/cgroups/cgroups.txt.
Requests by a task, using the sched_setaffinity(2) system call to
include CPUs in its CPU affinity mask, and using the mbind(2) and
set_mempolicy(2) system calls to include Memory Nodes in its memory
-policy, are both filtered through that tasks cpuset, filtering out any
+policy, are both filtered through that task's cpuset, filtering out any
CPUs or Memory Nodes not in that cpuset. The scheduler will not
schedule a task on a CPU that is not allowed in its cpus_allowed
vector, and the kernel page allocator will not allocate a page on a
-node that is not allowed in the requesting tasks mems_allowed vector.
+node that is not allowed in the requesting task's mems_allowed vector.
User level code may create and destroy cpusets by name in the cgroup
virtual file system, manage the attributes and permissions of these
@@ -121,9 +121,9 @@ Cpusets extends these two mechanisms as follows:
- Each task in the system is attached to a cpuset, via a pointer
in the task structure to a reference counted cgroup structure.
- Calls to sched_setaffinity are filtered to just those CPUs
- allowed in that tasks cpuset.
+ allowed in that task's cpuset.
- Calls to mbind and set_mempolicy are filtered to just
- those Memory Nodes allowed in that tasks cpuset.
+ those Memory Nodes allowed in that task's cpuset.
- The root cpuset contains all the systems CPUs and Memory
Nodes.
- For any cpuset, one can define child cpusets containing a subset
@@ -141,11 +141,11 @@ into the rest of the kernel, none in performance critical paths:
- in init/main.c, to initialize the root cpuset at system boot.
- in fork and exit, to attach and detach a task from its cpuset.
- in sched_setaffinity, to mask the requested CPUs by what's
- allowed in that tasks cpuset.
+ allowed in that task's cpuset.
- in sched.c migrate_live_tasks(), to keep migrating tasks within
the CPUs allowed by their cpuset, if possible.
- in the mbind and set_mempolicy system calls, to mask the requested
- Memory Nodes by what's allowed in that tasks cpuset.
+ Memory Nodes by what's allowed in that task's cpuset.
- in page_alloc.c, to restrict memory to allowed nodes.
- in vmscan.c, to restrict page recovery to the current cpuset.
@@ -155,7 +155,7 @@ new system calls are added for cpusets - all support for querying and
modifying cpusets is via this cpuset file system.
The /proc/<pid>/status file for each task has four added lines,
-displaying the tasks cpus_allowed (on which CPUs it may be scheduled)
+displaying the task's cpus_allowed (on which CPUs it may be scheduled)
and mems_allowed (on which Memory Nodes it may obtain memory),
in the two formats seen in the following example:
@@ -323,17 +323,17 @@ stack segment pages of a task.
By default, both kinds of memory spreading are off, and memory
pages are allocated on the node local to where the task is running,
-except perhaps as modified by the tasks NUMA mempolicy or cpuset
+except perhaps as modified by the task's NUMA mempolicy or cpuset
configuration, so long as sufficient free memory pages are available.
When new cpusets are created, they inherit the memory spread settings
of their parent.
Setting memory spreading causes allocations for the affected page
-or slab caches to ignore the tasks NUMA mempolicy and be spread
+or slab caches to ignore the task's NUMA mempolicy and be spread
instead. Tasks using mbind() or set_mempolicy() calls to set NUMA
mempolicies will not notice any change in these calls as a result of
-their containing tasks memory spread settings. If memory spreading
+their containing task's memory spread settings. If memory spreading
is turned off, then the currently specified NUMA mempolicy once again
applies to memory page allocations.
@@ -357,7 +357,7 @@ pages from the node returned by cpuset_mem_spread_node().
The cpuset_mem_spread_node() routine is also simple. It uses the
value of a per-task rotor cpuset_mem_spread_rotor to select the next
-node in the current tasks mems_allowed to prefer for the allocation.
+node in the current task's mems_allowed to prefer for the allocation.
This memory placement policy is also known (in other contexts) as
round-robin or interleave.
@@ -594,7 +594,7 @@ is attached, is subtle.
If a cpuset has its Memory Nodes modified, then for each task attached
to that cpuset, the next time that the kernel attempts to allocate
a page of memory for that task, the kernel will notice the change
-in the tasks cpuset, and update its per-task memory placement to
+in the task's cpuset, and update its per-task memory placement to
remain within the new cpusets memory placement. If the task was using
mempolicy MPOL_BIND, and the nodes to which it was bound overlap with
its new cpuset, then the task will continue to use whatever subset
@@ -603,13 +603,13 @@ was using MPOL_BIND and now none of its MPOL_BIND nodes are allowed
in the new cpuset, then the task will be essentially treated as if it
was MPOL_BIND bound to the new cpuset (even though its NUMA placement,
as queried by get_mempolicy(), doesn't change). If a task is moved
-from one cpuset to another, then the kernel will adjust the tasks
+from one cpuset to another, then the kernel will adjust the task's
memory placement, as above, the next time that the kernel attempts
to allocate a page of memory for that task.
If a cpuset has its 'cpuset.cpus' modified, then each task in that cpuset
will have its allowed CPU placement changed immediately. Similarly,
-if a tasks pid is written to another cpusets 'cpuset.tasks' file, then its
+if a task's pid is written to another cpusets 'cpuset.tasks' file, then its
allowed CPU placement is changed immediately. If such a task had been
bound to some subset of its cpuset using the sched_setaffinity() call,
the task will be allowed to run on any CPU allowed in its new cpuset,
@@ -626,16 +626,16 @@ cpusets memory placement policy 'cpuset.mems' subsequently changes.
If the cpuset flag file 'cpuset.memory_migrate' is set true, then when
tasks are attached to that cpuset, any pages that task had
allocated to it on nodes in its previous cpuset are migrated
-to the tasks new cpuset. The relative placement of the page within
+to the task's new cpuset. The relative placement of the page within
the cpuset is preserved during these migration operations if possible.
For example if the page was on the second valid node of the prior cpuset
then the page will be placed on the second valid node of the new cpuset.
-Also if 'cpuset.memory_migrate' is set true, then if that cpusets
+Also if 'cpuset.memory_migrate' is set true, then if that cpuset's
'cpuset.mems' file is modified, pages allocated to tasks in that
cpuset, that were on nodes in the previous setting of 'cpuset.mems',
will be moved to nodes in the new setting of 'mems.'
-Pages that were not in the tasks prior cpuset, or in the cpusets
+Pages that were not in the task's prior cpuset, or in the cpuset's
prior 'cpuset.mems' setting, will not be moved.
There is an exception to the above. If hotplug functionality is used
@@ -655,7 +655,7 @@ There is a second exception to the above. GFP_ATOMIC requests are
kernel internal allocations that must be satisfied, immediately.
The kernel may drop some request, in rare cases even panic, if a
GFP_ATOMIC alloc fails. If the request cannot be satisfied within
-the current tasks cpuset, then we relax the cpuset, and look for
+the current task's cpuset, then we relax the cpuset, and look for
memory anywhere we can find it. It's better to violate the cpuset
than stress the kernel.
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
index f7f68b2ac199..b7eececfb195 100644
--- a/Documentation/cgroups/memcg_test.txt
+++ b/Documentation/cgroups/memcg_test.txt
@@ -244,7 +244,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
we have to check if OLDPAGE/NEWPAGE is a valid page after commit().
8. LRU
- Each memcg has its own private LRU. Now, it's handling is under global
+ Each memcg has its own private LRU. Now, its handling is under global
VM's control (means that it's handled under global zone->lru_lock).
Almost all routines around memcg's LRU is called by global LRU's
list management functions under zone->lru_lock().
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 3a6aecd078ba..7781857dc940 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -1,18 +1,15 @@
Memory Resource Controller
NOTE: The Memory Resource Controller has been generically been referred
-to as the memory controller in this document. Do not confuse memory controller
-used here with the memory controller that is used in hardware.
+ to as the memory controller in this document. Do not confuse memory
+ controller used here with the memory controller that is used in hardware.
-Salient features
-
-a. Enable control of Anonymous, Page Cache (mapped and unmapped) and
- Swap Cache memory pages.
-b. The infrastructure allows easy addition of other types of memory to control
-c. Provides *zero overhead* for non memory controller users
-d. Provides a double LRU: global memory pressure causes reclaim from the
- global LRU; a cgroup on hitting a limit, reclaims from the per
- cgroup LRU
+(For editors)
+In this document:
+ When we mention a cgroup (cgroupfs's directory) with memory controller,
+ we call it "memory cgroup". When you see git-log and source code, you'll
+ see patch's title and function names tend to use "memcg".
+ In this document, we avoid using it.
Benefits and Purpose of the memory controller
@@ -33,6 +30,45 @@ d. A CD/DVD burner could control the amount of memory used by the
e. There are several other use cases, find one or use the controller just
for fun (to learn and hack on the VM subsystem).
+Current Status: linux-2.6.34-mmotm(development version of 2010/April)
+
+Features:
+ - accounting anonymous pages, file caches, swap caches usage and limiting them.
+ - private LRU and reclaim routine. (system's global LRU and private LRU
+ work independently from each other)
+ - optionally, memory+swap usage can be accounted and limited.
+ - hierarchical accounting
+ - soft limit
+ - moving(recharging) account at moving a task is selectable.
+ - usage threshold notifier
+ - oom-killer disable knob and oom-notifier
+ - Root cgroup has no limit controls.
+
+ Kernel memory and Hugepages are not under control yet. We just manage
+ pages on LRU. To add more controls, we have to take care of performance.
+
+Brief summary of control files.
+
+ tasks # attach a task(thread) and show list of threads
+ cgroup.procs # show list of processes
+ cgroup.event_control # an interface for event_fd()
+ memory.usage_in_bytes # show current memory(RSS+Cache) usage.
+ memory.memsw.usage_in_bytes # show current memory+Swap usage
+ memory.limit_in_bytes # set/show limit of memory usage
+ memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage
+ memory.failcnt # show the number of memory usage hits limits
+ memory.memsw.failcnt # show the number of memory+Swap hits limits
+ memory.max_usage_in_bytes # show max memory usage recorded
+ memory.memsw.usage_in_bytes # show max memory+Swap usage recorded
+ memory.soft_limit_in_bytes # set/show soft limit of memory usage
+ memory.stat # show various statistics
+ memory.use_hierarchy # set/show hierarchical account enabled
+ memory.force_empty # trigger forced move charge to parent
+ memory.swappiness # set/show swappiness parameter of vmscan
+ (See sysctl's vm.swappiness)
+ memory.move_charge_at_immigrate # set/show controls of moving charges
+ memory.oom_control # set/show oom controls.
+
1. History
The memory controller has a long history. A request for comments for the memory
@@ -106,14 +142,14 @@ the necessary data structures and check if the cgroup that is being charged
is over its limit. If it is then reclaim is invoked on the cgroup.
More details can be found in the reclaim section of this document.
If everything goes well, a page meta-data-structure called page_cgroup is
-allocated and associated with the page. This routine also adds the page to
-the per cgroup LRU.
+updated. page_cgroup has its own LRU on cgroup.
+(*) page_cgroup structure is allocated at boot/memory-hotplug time.
2.2.1 Accounting details
All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
-(some pages which never be reclaimable and will not be on global LRU
- are not accounted. we just accounts pages under usual vm management.)
+Some pages which are never reclaimable and will not be on the global LRU
+are not accounted. We just account pages under usual VM management.
RSS pages are accounted at page_fault unless they've already been accounted
for earlier. A file page will be accounted for as Page Cache when it's
@@ -121,12 +157,19 @@ inserted into inode (radix-tree). While it's mapped into the page tables of
processes, duplicate accounting is carefully avoided.
A RSS page is unaccounted when it's fully unmapped. A PageCache page is
-unaccounted when it's removed from radix-tree.
+unaccounted when it's removed from radix-tree. Even if RSS pages are fully
+unmapped (by kswapd), they may exist as SwapCache in the system until they
+are really freed. Such SwapCaches also also accounted.
+A swapped-in page is not accounted until it's mapped.
+
+Note: The kernel does swapin-readahead and read multiple swaps at once.
+This means swapped-in pages may contain pages for other tasks than a task
+causing page fault. So, we avoid accounting at swap-in I/O.
At page migration, accounting information is kept.
-Note: we just account pages-on-lru because our purpose is to control amount
-of used pages. not-on-lru pages are tend to be out-of-control from vm view.
+Note: we just account pages-on-LRU because our purpose is to control amount
+of used pages; not-on-LRU pages tend to be out-of-control from VM view.
2.3 Shared Page Accounting
@@ -143,6 +186,7 @@ caller of swapoff rather than the users of shmem.
2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
+
Swap Extension allows you to record charge for swap. A swapped-in page is
charged back to original page allocator if possible.
@@ -150,13 +194,20 @@ When swap is accounted, following files are added.
- memory.memsw.usage_in_bytes.
- memory.memsw.limit_in_bytes.
-usage of mem+swap is limited by memsw.limit_in_bytes.
+memsw means memory+swap. Usage of memory+swap is limited by
+memsw.limit_in_bytes.
-* why 'mem+swap' rather than swap.
+Example: Assume a system with 4G of swap. A task which allocates 6G of memory
+(by mistake) under 2G memory limitation will use all swap.
+In this case, setting memsw.limit_in_bytes=3G will prevent bad use of swap.
+By using memsw limit, you can avoid system OOM which can be caused by swap
+shortage.
+
+* why 'memory+swap' rather than swap.
The global LRU(kswapd) can swap out arbitrary pages. Swap-out means
to move account from memory to swap...there is no change in usage of
-mem+swap. In other words, when we want to limit the usage of swap without
-affecting global LRU, mem+swap limit is better than just limiting swap from
+memory+swap. In other words, when we want to limit the usage of swap without
+affecting global LRU, memory+swap limit is better than just limiting swap from
OS point of view.
* What happens when a cgroup hits memory.memsw.limit_in_bytes
@@ -168,12 +219,12 @@ it by cgroup.
2.5 Reclaim
-Each cgroup maintains a per cgroup LRU that consists of an active
-and inactive list. When a cgroup goes over its limit, we first try
+Each cgroup maintains a per cgroup LRU which has the same structure as
+global VM. When a cgroup goes over its limit, we first try
to reclaim memory from the cgroup so as to make space for the new
pages that the cgroup has touched. If the reclaim is unsuccessful,
an OOM routine is invoked to select and kill the bulkiest task in the
-cgroup.
+cgroup. (See 10. OOM Control below.)
The reclaim algorithm has not been modified for cgroups, except that
pages that are selected for reclaiming come from the per cgroup LRU
@@ -184,13 +235,22 @@ limits on the root cgroup.
Note2: When panic_on_oom is set to "2", the whole system will panic.
-2. Locking
+When oom event notifier is registered, event will be delivered.
+(See oom_control section)
+
+2.6 Locking
-The memory controller uses the following hierarchy
+ lock_page_cgroup()/unlock_page_cgroup() should not be called under
+ mapping->tree_lock.
-1. zone->lru_lock is used for selecting pages to be isolated
-2. mem->per_zone->lru_lock protects the per cgroup LRU (per zone)
-3. lock_page_cgroup() is used to protect page->page_cgroup
+ Other lock order is following:
+ PG_locked.
+ mm->page_table_lock
+ zone->lru_lock
+ lock_page_cgroup.
+ In many cases, just lock_page_cgroup() is called.
+ per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
+ zone->lru_lock, it has no lock of its own.
3. User Interface
@@ -199,6 +259,7 @@ The memory controller uses the following hierarchy
a. Enable CONFIG_CGROUPS
b. Enable CONFIG_RESOURCE_COUNTERS
c. Enable CONFIG_CGROUP_MEM_RES_CTLR
+d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
1. Prepare the cgroups
# mkdir -p /cgroups
@@ -206,31 +267,28 @@ c. Enable CONFIG_CGROUP_MEM_RES_CTLR
2. Make the new group and move bash into it
# mkdir /cgroups/0
-# echo $$ > /cgroups/0/tasks
+# echo $$ > /cgroups/0/tasks
-Since now we're in the 0 cgroup,
-We can alter the memory limit:
+Since now we're in the 0 cgroup, we can alter the memory limit:
# echo 4M > /cgroups/0/memory.limit_in_bytes
NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
-mega or gigabytes.
+mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
+
NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
NOTE: We cannot set limits on the root cgroup any more.
# cat /cgroups/0/memory.limit_in_bytes
4194304
-NOTE: The interface has now changed to display the usage in bytes
-instead of pages
-
We can check the usage:
# cat /cgroups/0/memory.usage_in_bytes
1216512
A successful write to this file does not guarantee a successful set of
-this limit to the value written into the file. This can be due to a
+this limit to the value written into the file. This can be due to a
number of factors, such as rounding up to page boundaries or the total
-availability of memory on the system. The user is required to re-read
+availability of memory on the system. The user is required to re-read
this file after a write to guarantee the value committed by the kernel.
# echo 1 > memory.limit_in_bytes
@@ -245,15 +303,23 @@ caches, RSS and Active pages/Inactive pages are shown.
4. Testing
-Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11].
-Apart from that v6 has been tested with several applications and regular
-daily use. The controller has also been tested on the PPC64, x86_64 and
-UML platforms.
+For testing features and implementation, see memcg_test.txt.
+
+Performance test is also important. To see pure memory controller's overhead,
+testing on tmpfs will give you good numbers of small overheads.
+Example: do kernel make on tmpfs.
+
+Page-fault scalability is also important. At measuring parallel
+page fault test, multi-process test may be better than multi-thread
+test because it has noise of shared objects/status.
+
+But the above two are testing extreme situations.
+Trying usual test under memory controller is always helpful.
4.1 Troubleshooting
Sometimes a user might find that the application under a cgroup is
-terminated. There are several causes for this:
+terminated by OOM killer. There are several causes for this:
1. The cgroup limit is too low (just too low to do anything useful)
2. The user is using anonymous memory and swap is turned off or too low
@@ -261,23 +327,29 @@ terminated. There are several causes for this:
A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
some of the pages cached in the cgroup (page cache pages).
+To know what happens, disable OOM_Kill by 10. OOM Control(see below) and
+seeing what happens will be helpful.
+
4.2 Task migration
-When a task migrates from one cgroup to another, it's charge is not
+When a task migrates from one cgroup to another, its charge is not
carried forward by default. The pages allocated from the original cgroup still
remain charged to it, the charge is dropped when the page is freed or
reclaimed.
-Note: You can move charges of a task along with task migration. See 8.
+You can move charges of a task along with task migration.
+See 8. "Move charges at task migration"
4.3 Removing a cgroup
A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
cgroup might have some charge associated with it, even though all
-tasks have migrated away from it.
-Such charges are freed(at default) or moved to its parent. When moved,
-both of RSS and CACHES are moved to parent.
-If both of them are busy, rmdir() returns -EBUSY. See 5.1 Also.
+tasks have migrated away from it. (because we charge against pages, not
+against tasks.)
+
+Such charges are freed or moved to their parent. At moving, both of RSS
+and CACHES are moved to parent.
+rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
Charges recorded in swap information is not updated at removal of cgroup.
Recorded information is discarded and a cgroup which uses swap (swapcache)
@@ -293,10 +365,10 @@ will be charged as a new owner of it.
# echo 0 > memory.force_empty
- Almost all pages tracked by this memcg will be unmapped and freed. Some of
- pages cannot be freed because it's locked or in-use. Such pages are moved
- to parent and this cgroup will be empty. But this may return -EBUSY in
- some too busy case.
+ Almost all pages tracked by this memory cgroup will be unmapped and freed.
+ Some pages cannot be freed because they are locked or in-use. Such pages are
+ moved to parent and this cgroup will be empty. This may return -EBUSY if
+ VM is too busy to free/move all pages immediately.
Typical use case of this interface is that calling this before rmdir().
Because rmdir() moves all pages to parent, some out-of-use page caches can be
@@ -306,19 +378,41 @@ will be charged as a new owner of it.
memory.stat file includes following statistics
+# per-memory cgroup local status
cache - # of bytes of page cache memory.
rss - # of bytes of anonymous and swap cache memory.
+mapped_file - # of bytes of mapped file (includes tmpfs/shmem)
pgpgin - # of pages paged in (equivalent to # of charging events).
pgpgout - # of pages paged out (equivalent to # of uncharging events).
-active_anon - # of bytes of anonymous and swap cache memory on active
- lru list.
+swap - # of bytes of swap usage
inactive_anon - # of bytes of anonymous memory and swap cache memory on
- inactive lru list.
-active_file - # of bytes of file-backed memory on active lru list.
-inactive_file - # of bytes of file-backed memory on inactive lru list.
+ LRU list.
+active_anon - # of bytes of anonymous and swap cache memory on active
+ inactive LRU list.
+inactive_file - # of bytes of file-backed memory on inactive LRU list.
+active_file - # of bytes of file-backed memory on active LRU list.
unevictable - # of bytes of memory that cannot be reclaimed (mlocked etc).
-The following additional stats are dependent on CONFIG_DEBUG_VM.
+# status considering hierarchy (see memory.use_hierarchy settings)
+
+hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
+ under which the memory cgroup is
+hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
+ hierarchy under which memory cgroup is.
+
+total_cache - sum of all children's "cache"
+total_rss - sum of all children's "rss"
+total_mapped_file - sum of all children's "cache"
+total_pgpgin - sum of all children's "pgpgin"
+total_pgpgout - sum of all children's "pgpgout"
+total_swap - sum of all children's "swap"
+total_inactive_anon - sum of all children's "inactive_anon"
+total_active_anon - sum of all children's "active_anon"
+total_inactive_file - sum of all children's "inactive_file"
+total_active_file - sum of all children's "active_file"
+total_unevictable - sum of all children's "unevictable"
+
+# The following additional stats are dependent on CONFIG_DEBUG_VM.
inactive_ratio - VM internal parameter. (see mm/page_alloc.c)
recent_rotated_anon - VM internal parameter. (see mm/vmscan.c)
@@ -327,24 +421,37 @@ recent_scanned_anon - VM internal parameter. (see mm/vmscan.c)
recent_scanned_file - VM internal parameter. (see mm/vmscan.c)
Memo:
- recent_rotated means recent frequency of lru rotation.
- recent_scanned means recent # of scans to lru.
+ recent_rotated means recent frequency of LRU rotation.
+ recent_scanned means recent # of scans to LRU.
showing for better debug please see the code for meanings.
Note:
Only anonymous and swap cache memory is listed as part of 'rss' stat.
This should not be confused with the true 'resident set size' or the
- amount of physical memory used by the cgroup. Per-cgroup rss
- accounting is not done yet.
+ amount of physical memory used by the cgroup.
+ 'rss + file_mapped" will give you resident set size of cgroup.
+ (Note: file and shmem may be shared among other cgroups. In that case,
+ file_mapped is accounted only when the memory cgroup is owner of page
+ cache.)
5.3 swappiness
- Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
- Following cgroups' swappiness can't be changed.
- - root cgroup (uses /proc/sys/vm/swappiness).
- - a cgroup which uses hierarchy and it has child cgroup.
- - a cgroup which uses hierarchy and not the root of hierarchy.
+Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
+Following cgroups' swappiness can't be changed.
+- root cgroup (uses /proc/sys/vm/swappiness).
+- a cgroup which uses hierarchy and it has other cgroup(s) below it.
+- a cgroup which uses hierarchy and not the root of hierarchy.
+
+5.4 failcnt
+
+A memory cgroup provides memory.failcnt and memory.memsw.failcnt files.
+This failcnt(== failure count) shows the number of times that a usage counter
+hit its limit. When a memory cgroup hits a limit, failcnt increases and
+memory under it will be reclaimed.
+
+You can reset failcnt by writing 0 to failcnt file.
+# echo 0 > .../memory.failcnt
6. Hierarchy support
@@ -363,13 +470,13 @@ hierarchy
In the diagram above, with hierarchical accounting enabled, all memory
usage of e, is accounted to its ancestors up until the root (i.e, c and root),
-that has memory.use_hierarchy enabled. If one of the ancestors goes over its
+that has memory.use_hierarchy enabled. If one of the ancestors goes over its
limit, the reclaim algorithm reclaims from the tasks in the ancestor and the
children of the ancestor.
6.1 Enabling hierarchical accounting and reclaim
-The memory controller by default disables the hierarchy feature. Support
+A memory cgroup by default disables the hierarchy feature. Support
can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup
# echo 1 > memory.use_hierarchy
@@ -379,10 +486,10 @@ The feature can be disabled by
# echo 0 > memory.use_hierarchy
NOTE1: Enabling/disabling will fail if the cgroup already has other
-cgroups created below it.
+ cgroups created below it.
NOTE2: When panic_on_oom is set to "2", the whole system will panic in
-case of an oom event in any cgroup.
+ case of an OOM event in any cgroup.
7. Soft limits
@@ -392,7 +499,7 @@ is to allow control groups to use as much of the memory as needed, provided
a. There is no memory contention
b. They do not exceed their hard limit
-When the system detects memory contention or low memory control groups
+When the system detects memory contention or low memory, control groups
are pushed back to their soft limits. If the soft limit of each control
group is very high, they are pushed back as much as possible to make
sure that one control group does not starve the others of memory.
@@ -406,7 +513,7 @@ it gets invoked from balance_pgdat (kswapd).
7.1 Interface
Soft limits can be setup by using the following commands (in this example we
-assume a soft limit of 256 megabytes)
+assume a soft limit of 256 MiB)
# echo 256M > memory.soft_limit_in_bytes
@@ -442,7 +549,7 @@ Note: Charges are moved only when you move mm->owner, IOW, a leader of a thread
Note: If we cannot find enough space for the task in the destination cgroup, we
try to make space by reclaiming memory. Task migration may fail if we
cannot make enough space.
-Note: It can take several seconds if you move charges in giga bytes order.
+Note: It can take several seconds if you move charges much.
And if you want disable it again:
@@ -451,21 +558,27 @@ And if you want disable it again:
8.2 Type of charges which can be move
Each bits of move_charge_at_immigrate has its own meaning about what type of
-charges should be moved.
+charges should be moved. But in any cases, it must be noted that an account of
+a page or a swap can be moved only when it is charged to the task's current(old)
+memory cgroup.
bit | what type of charges would be moved ?
-----+------------------------------------------------------------------------
0 | A charge of an anonymous page(or swap of it) used by the target task.
| Those pages and swaps must be used only by the target task. You must
| enable Swap Extension(see 2.4) to enable move of swap charges.
-
-Note: Those pages and swaps must be charged to the old cgroup.
-Note: More type of pages(e.g. file cache, shmem,) will be supported by other
- bits in future.
+ -----+------------------------------------------------------------------------
+ 1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
+ | and swaps of tmpfs file) mmapped by the target task. Unlike the case of
+ | anonymous pages, file pages(and swaps) in the range mmapped by the task
+ | will be moved even if the task hasn't done page fault, i.e. they might
+ | not be the task's "RSS", but other task's "RSS" that maps the same file.
+ | And mapcount of the page is ignored(the page can be moved even if
+ | page_mapcount(page) > 1). You must enable Swap Extension(see 2.4) to
+ | enable move of swap charges.
8.3 TODO
-- Add support for other types of pages(e.g. file cache, shmem, etc.).
- Implement madvise(2) to let users decide the vma to be moved or not to be
moved.
- All of moving charge operations are done under cgroup_mutex. It's not good
@@ -473,22 +586,61 @@ Note: More type of pages(e.g. file cache, shmem,) will be supported by other
9. Memory thresholds
-Memory controler implements memory thresholds using cgroups notification
+Memory cgroup implements memory thresholds using cgroups notification
API (see cgroups.txt). It allows to register multiple memory and memsw
thresholds and gets notifications when it crosses.
To register a threshold application need:
- - create an eventfd using eventfd(2);
- - open memory.usage_in_bytes or memory.memsw.usage_in_bytes;
- - write string like "<event_fd> <memory.usage_in_bytes> <threshold>" to
- cgroup.event_control.
+- create an eventfd using eventfd(2);
+- open memory.usage_in_bytes or memory.memsw.usage_in_bytes;
+- write string like "<event_fd> <fd of memory.usage_in_bytes> <threshold>" to
+ cgroup.event_control.
Application will be notified through eventfd when memory usage crosses
threshold in any direction.
It's applicable for root and non-root cgroup.
-10. TODO
+10. OOM Control
+
+memory.oom_control file is for OOM notification and other controls.
+
+Memory cgroup implements OOM notifier using cgroup notification
+API (See cgroups.txt). It allows to register multiple OOM notification
+delivery and gets notification when OOM happens.
+
+To register a notifier, application need:
+ - create an eventfd using eventfd(2)
+ - open memory.oom_control file
+ - write string like "<event_fd> <fd of memory.oom_control>" to
+ cgroup.event_control
+
+Application will be notified through eventfd when OOM happens.
+OOM notification doesn't work for root cgroup.
+
+You can disable OOM-killer by writing "1" to memory.oom_control file, as:
+
+ #echo 1 > memory.oom_control
+
+This operation is only allowed to the top cgroup of sub-hierarchy.
+If OOM-killer is disabled, tasks under cgroup will hang/sleep
+in memory cgroup's OOM-waitqueue when they request accountable memory.
+
+For running them, you have to relax the memory cgroup's OOM status by
+ * enlarge limit or reduce usage.
+To reduce usage,
+ * kill some tasks.
+ * move some tasks to other group with account migration.
+ * remove some files (on tmpfs?)
+
+Then, stopped tasks will work again.
+
+At reading, current status of OOM is shown.
+ oom_kill_disable 0 or 1 (if 1, oom-killer is disabled)
+ under_oom 0 or 1 (if 1, the memory cgroup is under OOM, tasks may
+ be stopped.)
+
+11. TODO
1. Add support for accounting huge pages (as a separate controller)
2. Make per-cgroup scanner reclaim not-shared pages first
diff --git a/Documentation/connector/connector.txt b/Documentation/connector/connector.txt
index 78c9466a9aa8..e5c5f5e6ab70 100644
--- a/Documentation/connector/connector.txt
+++ b/Documentation/connector/connector.txt
@@ -88,7 +88,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __groups, int gfp_mask);
int gfp_mask - GFP mask.
Note: When registering new callback user, connector core assigns
- netlink group to the user which is equal to it's id.idx.
+ netlink group to the user which is equal to its id.idx.
/*****************************************/
Protocol description.
diff --git a/Documentation/credentials.txt b/Documentation/credentials.txt
index df03169782ea..a2db35287003 100644
--- a/Documentation/credentials.txt
+++ b/Documentation/credentials.txt
@@ -408,9 +408,6 @@ This should be used inside the RCU read lock, as in the following example:
...
}
-A function need not get RCU read lock to use __task_cred() if it is holding a
-spinlock at the time as this implicitly holds the RCU read lock.
-
Should it be necessary to hold another task's credentials for a long period of
time, and possibly to sleep whilst doing so, then the caller should get a
reference on them using:
@@ -426,17 +423,16 @@ credentials, hiding the RCU magic from the caller:
uid_t task_uid(task) Task's real UID
uid_t task_euid(task) Task's effective UID
-If the caller is holding a spinlock or the RCU read lock at the time anyway,
-then:
+If the caller is holding the RCU read lock at the time anyway, then:
__task_cred(task)->uid
__task_cred(task)->euid
should be used instead. Similarly, if multiple aspects of a task's credentials
-need to be accessed, RCU read lock or a spinlock should be used, __task_cred()
-called, the result stored in a temporary pointer and then the credential
-aspects called from that before dropping the lock. This prevents the
-potentially expensive RCU magic from being invoked multiple times.
+need to be accessed, RCU read lock should be used, __task_cred() called, the
+result stored in a temporary pointer and then the credential aspects called
+from that before dropping the lock. This prevents the potentially expensive
+RCU magic from being invoked multiple times.
Should some other single aspect of another task's credentials need to be
accessed, then this can be used:
diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process
index d750321acd5a..97726eba6102 100644
--- a/Documentation/development-process/2.Process
+++ b/Documentation/development-process/2.Process
@@ -151,7 +151,7 @@ The stages that a patch goes through are, generally:
well.
- Wider review. When the patch is getting close to ready for mainline
- inclusion, it will be accepted by a relevant subsystem maintainer -
+ inclusion, it should be accepted by a relevant subsystem maintainer -
though this acceptance is not a guarantee that the patch will make it
all the way to the mainline. The patch will show up in the maintainer's
subsystem tree and into the staging trees (described below). When the
@@ -159,6 +159,15 @@ The stages that a patch goes through are, generally:
the discovery of any problems resulting from the integration of this
patch with work being done by others.
+- Please note that most maintainers also have day jobs, so merging
+ your patch may not be their highest priority. If your patch is
+ getting feedback about changes that are needed, you should either
+ make those changes or justify why they should not be made. If your
+ patch has no review complaints but is not being merged by its
+ appropriate subsystem or driver maintainer, you should be persistent
+ in updating the patch to the current kernel so that it applies cleanly
+ and keep sending it for review and merging.
+
- Merging into the mainline. Eventually, a successful patch will be
merged into the mainline repository managed by Linus Torvalds. More
comments and/or problems may surface at this time; it is important that
@@ -258,12 +267,8 @@ an appropriate subsystem tree or be sent directly to Linus. In a typical
development cycle, approximately 10% of the patches going into the mainline
get there via -mm.
-The current -mm patch can always be found from the front page of
-
- http://kernel.org/
-
-Those who want to see the current state of -mm can get the "-mm of the
-moment" tree, found at:
+The current -mm patch is available in the "mmotm" (-mm of the moment)
+directory at:
http://userweb.kernel.org/~akpm/mmotm/
@@ -298,6 +303,12 @@ volatility of linux-next tends to make it a difficult development target.
See http://lwn.net/Articles/289013/ for more information on this topic, and
stay tuned; much is still in flux where linux-next is involved.
+Besides the mmotm and linux-next trees, the kernel source tree now contains
+the drivers/staging/ directory and many sub-directories for drivers or
+filesystems that are on their way to being added to the kernel tree
+proper, but they remain in drivers/staging/ while they still need more
+work.
+
2.5: TOOLS
@@ -319,9 +330,9 @@ developers; even if they do not use it for their own work, they'll need git
to keep up with what other developers (and the mainline) are doing.
Git is now packaged by almost all Linux distributions. There is a home
-page at
+page at:
- http://git.or.cz/
+ http://git-scm.com/
That page has pointers to documentation and tutorials. One should be
aware, in particular, of the Kernel Hacker's Guide to git, which has
diff --git a/Documentation/development-process/7.AdvancedTopics b/Documentation/development-process/7.AdvancedTopics
index a2cf74093aa1..837179447e17 100644
--- a/Documentation/development-process/7.AdvancedTopics
+++ b/Documentation/development-process/7.AdvancedTopics
@@ -25,7 +25,7 @@ long document in its own right. Instead, the focus here will be on how git
fits into the kernel development process in particular. Developers who
wish to come up to speed with git will find more information at:
- http://git.or.cz/
+ http://git-scm.com/
http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 53d64d382343..1d83d124056c 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -443,6 +443,8 @@ Your cooperation is appreciated.
231 = /dev/snapshot System memory snapshot device
232 = /dev/kvm Kernel-based virtual machine (hardware virtualization extensions)
233 = /dev/kmview View-OS A process with a view
+ 234 = /dev/btrfs-control Btrfs control device
+ 235 = /dev/autofs Autofs control device
240-254 Reserved for local use
255 Reserved for MISC_DYNAMIC_MINOR
diff --git a/Documentation/dvb/ci.txt b/Documentation/dvb/ci.txt
index 2ecd834585e6..4a0c2b56e690 100644
--- a/Documentation/dvb/ci.txt
+++ b/Documentation/dvb/ci.txt
@@ -41,7 +41,7 @@ This application requires the following to function properly as of now.
* Cards that fall in this category
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-At present the cards that fall in this category are the Twinhan and it's
+At present the cards that fall in this category are the Twinhan and its
clones, these cards are available as VVMER, Tomato, Hercules, Orange and
so on.
diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt
index 4865addebe1c..47c30098dab6 100644
--- a/Documentation/dvb/contributors.txt
+++ b/Documentation/dvb/contributors.txt
@@ -1,7 +1,7 @@
Thanks go to the following people for patches and contributions:
Michael Hunold <m.hunold@gmx.de>
- for the initial saa7146 driver and it's recent overhaul
+ for the initial saa7146 driver and its recent overhaul
Christian Theiss
for his work on the initial Linux DVB driver
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 05df0b7514b6..672be0109d02 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -241,16 +241,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
---------------------------
-What (Why):
- - xt_recent: the old ipt_recent proc dir
- (superseded by /proc/net/xt_recent)
-
-When: January 2009 or Linux 2.7.0, whichever comes first
-Why: Superseded by newer revisions or modules
-Who: Jan Engelhardt <jengelh@computergmbh.de>
-
----------------------------
-
What: GPIO autorequest on gpio_direction_{input,output}() in gpiolib
When: February 2010
Why: All callers should use explicit gpio_request()/gpio_free().
@@ -520,26 +510,21 @@ Who: Hans de Goede <hdegoede@redhat.com>
----------------------------
-What: corgikbd, spitzkbd, tosakbd driver
-When: 2.6.35
-Files: drivers/input/keyboard/{corgi,spitz,tosa}kbd.c
-Why: We now have a generic GPIO based matrix keyboard driver that
- are fully capable of handling all the keys on these devices.
- The original drivers manipulate the GPIO registers directly
- and so are difficult to maintain.
-Who: Eric Miao <eric.y.miao@gmail.com>
+What: sysfs-class-rfkill state file
+When: Feb 2014
+Files: net/rfkill/core.c
+Why: Documented as obsolete since Feb 2010. This file is limited to 3
+ states while the rfkill drivers can have 4 states.
+Who: anybody or Florian Mickler <florian@mickler.org>
----------------------------
-What: corgi_ssp and corgi_ts driver
-When: 2.6.35
-Files: arch/arm/mach-pxa/corgi_ssp.c, drivers/input/touchscreen/corgi_ts.c
-Why: The corgi touchscreen is now deprecated in favour of the generic
- ads7846.c driver. The noise reduction technique used in corgi_ts.c,
- that's to wait till vsync before ADC sampling, is also integrated into
- ads7846 driver now. Provided that the original driver is not generic
- and is difficult to maintain, it will be removed later.
-Who: Eric Miao <eric.y.miao@gmail.com>
+What: sysfs-class-rfkill claim file
+When: Feb 2012
+Files: net/rfkill/core.c
+Why: It is not possible to claim an rfkill driver since 2007. This is
+ Documented as obsolete since Feb 2010.
+Who: anybody or Florian Mickler <florian@mickler.org>
----------------------------
@@ -564,6 +549,16 @@ Who: Avi Kivity <avi@redhat.com>
----------------------------
+What: xtime, wall_to_monotonic
+When: 2.6.36+
+Files: kernel/time/timekeeping.c include/linux/time.h
+Why: Cleaning up timekeeping internal values. Please use
+ existing timekeeping accessor functions to access
+ the equivalent functionality.
+Who: John Stultz <johnstul@us.ibm.com>
+
+----------------------------
+
What: KVM kernel-allocated memory slots
When: July 2010
Why: Since 2.6.25, kvm supports user-allocated memory slots, which are
@@ -592,6 +587,35 @@ Who: Len Brown <len.brown@intel.com>
----------------------------
+What: iwlwifi 50XX module parameters
+When: 2.6.40
+Why: The "..50" modules parameters were used to configure 5000 series and
+ up devices; different set of module parameters also available for 4965
+ with same functionalities. Consolidate both set into single place
+ in drivers/net/wireless/iwlwifi/iwl-agn.c
+
+Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+
+----------------------------
+
+What: iwl4965 alias support
+When: 2.6.40
+Why: Internal alias support has been present in module-init-tools for some
+ time, the MODULE_ALIAS("iwl4965") boilerplate aliases can be removed
+ with no impact.
+
+Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+
+---------------------------
+
+What: xt_NOTRACK
+Files: net/netfilter/xt_NOTRACK.c
+When: April 2011
+Why: Superseded by xt_CT
+Who: Netfilter developer team <netfilter-devel@vger.kernel.org>
+
+---------------------------
+
What: video4linux /dev/vtx teletext API support
When: 2.6.35
Files: drivers/media/video/saa5246a.c drivers/media/video/saa5249.c
@@ -612,3 +636,23 @@ Why: The vtx device nodes have been superseded by vbi device nodes
provided by the vtx API, then that functionality should be build
around the sliced VBI API instead.
Who: Hans Verkuil <hverkuil@xs4all.nl>
+
+----------------------------
+
+What: IRQF_DISABLED
+When: 2.6.36
+Why: The flag is a NOOP as we run interrupt handlers with interrupts disabled
+Who: Thomas Gleixner <tglx@linutronix.de>
+
+----------------------------
+
+What: old ieee1394 subsystem (CONFIG_IEEE1394)
+When: 2.6.37
+Files: drivers/ieee1394/ except init_ohci1394_dma.c
+Why: superseded by drivers/firewire/ (CONFIG_FIREWIRE) which offers more
+ features, better performance, and better security, all with smaller
+ and more modern code base
+Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+----------------------------
+
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 06bbbed71206..96d4293607ec 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -178,7 +178,7 @@ prototypes:
locking rules:
All except set_page_dirty may block
- BKL PageLocked(page) i_sem
+ BKL PageLocked(page) i_mutex
writepage: no yes, unlocks (see below)
readpage: no yes, unlocks
sync_page: no maybe
@@ -380,7 +380,7 @@ prototypes:
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *);
int (*release) (struct inode *, struct file *);
- int (*fsync) (struct file *, struct dentry *, int datasync);
+ int (*fsync) (struct file *, int datasync);
int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
@@ -429,8 +429,9 @@ check_flags: no
implementations. If your fs is not using generic_file_llseek, you
need to acquire and release the appropriate locks in your ->llseek().
For many filesystems, it is probably safe to acquire the inode
-semaphore. Note some filesystems (i.e. remote ones) provide no
-protection for i_size so you will need to use the BKL.
+mutex or just to use i_size_read() instead.
+Note: this does not protect the file->f_pos against concurrent modifications
+since this is something the userspace has to take care about.
Note: ext2_release() was *the* source of contention on fs-intensive
loads and dropping BKL on ->release() helps to get rid of that (we still
diff --git a/Documentation/filesystems/autofs4-mount-control.txt b/Documentation/filesystems/autofs4-mount-control.txt
index 8f78ded4b648..51986bf08a4d 100644
--- a/Documentation/filesystems/autofs4-mount-control.txt
+++ b/Documentation/filesystems/autofs4-mount-control.txt
@@ -146,7 +146,7 @@ found to be inadequate, in this case. The Generic Netlink system was
used for this as raw Netlink would lead to a significant increase in
complexity. There's no question that the Generic Netlink system is an
elegant solution for common case ioctl functions but it's not a complete
-replacement probably because it's primary purpose in life is to be a
+replacement probably because its primary purpose in life is to be a
message bus implementation rather than specifically an ioctl replacement.
While it would be possible to work around this there is one concern
that lead to the decision to not use it. This is that the autofs
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index 0660c9f5deef..763d8ebbbebd 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -90,7 +90,7 @@ Mount Options
Specify the IP and/or port the client should bind to locally.
There is normally not much reason to do this. If the IP is not
specified, the client's IP address is determined by looking at the
- address it's connection to the monitor originates from.
+ address its connection to the monitor originates from.
wsize=X
Specify the maximum write size in bytes. By default there is no
diff --git a/Documentation/filesystems/dlmfs.txt b/Documentation/filesystems/dlmfs.txt
index c50bbb2d52b4..1b528b2ad809 100644
--- a/Documentation/filesystems/dlmfs.txt
+++ b/Documentation/filesystems/dlmfs.txt
@@ -47,7 +47,7 @@ You'll want to start heartbeating on a volume which all the nodes in
your lockspace can access. The easiest way to do this is via
ocfs2_hb_ctl (distributed with ocfs2-tools). Right now it requires
that an OCFS2 file system be in place so that it can automatically
-find it's heartbeat area, though it will eventually support heartbeat
+find its heartbeat area, though it will eventually support heartbeat
against raw disks.
Please see the ocfs2_hb_ctl and mkfs.ocfs2 manual pages distributed
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 867c5b50cb42..272f80d5f966 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,8 +59,19 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
Setting it to very large values will improve
performance.
-barrier=1 This enables/disables barriers. barrier=0 disables
- it, barrier=1 enables it.
+barrier=<0(*)|1> This enables/disables the use of write barriers in
+barrier the jbd code. barrier=0 disables, barrier=1 enables.
+nobarrier (*) This also requires an IO stack which can support
+ barriers, and if jbd gets an error on a barrier
+ write, it will disable again with a warning.
+ Write barriers enforce proper on-disk ordering
+ of journal commits, making volatile disk write caches
+ safe to use, at some performance penalty. If
+ your disks are battery-backed in one way or another,
+ disabling barriers may safely improve performance.
+ The mount options "barrier" and "nobarrier" can
+ also be used to enable or disable barriers, for
+ consistency with other ext3 mount options.
orlov (*) This enables the new Orlov block allocator. It is
enabled by default.
diff --git a/Documentation/filesystems/fiemap.txt b/Documentation/filesystems/fiemap.txt
index 606233cd4618..1b805a0efbb0 100644
--- a/Documentation/filesystems/fiemap.txt
+++ b/Documentation/filesystems/fiemap.txt
@@ -38,7 +38,7 @@ flags, it will return EBADR and the contents of fm_flags will contain
the set of flags which caused the error. If the kernel is compatible
with all flags passed, the contents of fm_flags will be unmodified.
It is up to userspace to determine whether rejection of a particular
-flag is fatal to it's operation. This scheme is intended to allow the
+flag is fatal to its operation. This scheme is intended to allow the
fiemap interface to grow in the future but without losing
compatibility with old software.
@@ -56,7 +56,7 @@ If this flag is set, the kernel will sync the file before mapping extents.
* FIEMAP_FLAG_XATTR
If this flag is set, the extents returned will describe the inodes
-extended attribute lookup tree, instead of it's data tree.
+extended attribute lookup tree, instead of its data tree.
Extent Mapping
@@ -89,7 +89,7 @@ struct fiemap_extent {
};
All offsets and lengths are in bytes and mirror those on disk. It is valid
-for an extents logical offset to start before the request or it's logical
+for an extents logical offset to start before the request or its logical
length to extend past the request. Unless FIEMAP_EXTENT_NOT_ALIGNED is
returned, fe_logical, fe_physical, and fe_length will be aligned to the
block size of the file system. With the exception of extents flagged as
@@ -125,7 +125,7 @@ been allocated for the file yet.
* FIEMAP_EXTENT_DELALLOC
- This will also set FIEMAP_EXTENT_UNKNOWN.
-Delayed allocation - while there is data for this extent, it's
+Delayed allocation - while there is data for this extent, its
physical location has not been allocated yet.
* FIEMAP_EXTENT_ENCODED
@@ -159,7 +159,7 @@ Data is located within a meta data block.
Data is packed into a block with data from other files.
* FIEMAP_EXTENT_UNWRITTEN
-Unwritten extent - the extent is allocated but it's data has not been
+Unwritten extent - the extent is allocated but its data has not been
initialized. This indicates the extent's data will be all zero if read
through the filesystem but the contents are undefined if read directly from
the device.
@@ -176,7 +176,7 @@ VFS -> File System Implementation
File systems wishing to support fiemap must implement a ->fiemap callback on
their inode_operations structure. The fs ->fiemap call is responsible for
-defining it's set of supported fiemap flags, and calling a helper function on
+defining its set of supported fiemap flags, and calling a helper function on
each discovered extent:
struct inode_operations {
diff --git a/Documentation/filesystems/fuse.txt b/Documentation/filesystems/fuse.txt
index 397a41adb4c3..13af4a49e7db 100644
--- a/Documentation/filesystems/fuse.txt
+++ b/Documentation/filesystems/fuse.txt
@@ -91,7 +91,7 @@ Mount options
'default_permissions'
By default FUSE doesn't check file access permissions, the
- filesystem is free to implement it's access policy or leave it to
+ filesystem is free to implement its access policy or leave it to
the underlying file access mechanism (e.g. in case of network
filesystems). This option enables permission checking, restricting
access based on file mode. It is usually useful together with the
@@ -171,7 +171,7 @@ or may honor them by sending a reply to the _original_ request, with
the error set to EINTR.
It is also possible that there's a race between processing the
-original request and it's INTERRUPT request. There are two possibilities:
+original request and its INTERRUPT request. There are two possibilities:
1) The INTERRUPT request is processed before the original request is
processed
diff --git a/Documentation/filesystems/gfs2.txt b/Documentation/filesystems/gfs2.txt
index 5e3ab8f3beff..0b59c0200912 100644
--- a/Documentation/filesystems/gfs2.txt
+++ b/Documentation/filesystems/gfs2.txt
@@ -1,7 +1,7 @@
Global File System
------------------
-http://sources.redhat.com/cluster/
+http://sources.redhat.com/cluster/wiki/
GFS is a cluster file system. It allows a cluster of computers to
simultaneously use a block device that is shared between them (with FC,
@@ -36,11 +36,11 @@ GFS2 is not on-disk compatible with previous versions of GFS, but it
is pretty close.
The following man pages can be found at the URL above:
- fsck.gfs2 to repair a filesystem
- gfs2_grow to expand a filesystem online
- gfs2_jadd to add journals to a filesystem online
- gfs2_tool to manipulate, examine and tune a filesystem
+ fsck.gfs2 to repair a filesystem
+ gfs2_grow to expand a filesystem online
+ gfs2_jadd to add journals to a filesystem online
+ gfs2_tool to manipulate, examine and tune a filesystem
gfs2_quota to examine and change quota values in a filesystem
gfs2_convert to convert a gfs filesystem to gfs2 in-place
mount.gfs2 to help mount(8) mount a filesystem
- mkfs.gfs2 to make a filesystem
+ mkfs.gfs2 to make a filesystem
diff --git a/Documentation/filesystems/hpfs.txt b/Documentation/filesystems/hpfs.txt
index fa45c3baed98..74630bd504fb 100644
--- a/Documentation/filesystems/hpfs.txt
+++ b/Documentation/filesystems/hpfs.txt
@@ -103,7 +103,7 @@ to analyze or change OS2SYS.INI.
Codepages
HPFS can contain several uppercasing tables for several codepages and each
-file has a pointer to codepage it's name is in. However OS/2 was created in
+file has a pointer to codepage its name is in. However OS/2 was created in
America where people don't care much about codepages and so multiple codepages
support is quite buggy. I have Czech OS/2 working in codepage 852 on my disk.
Once I booted English OS/2 working in cp 850 and I created a file on my 852
diff --git a/Documentation/filesystems/logfs.txt b/Documentation/filesystems/logfs.txt
index e64c94ba401a..bca42c22a143 100644
--- a/Documentation/filesystems/logfs.txt
+++ b/Documentation/filesystems/logfs.txt
@@ -59,7 +59,7 @@ Levels
------
Garbage collection (GC) may fail if all data is written
-indiscriminately. One requirement of GC is that data is seperated
+indiscriminately. One requirement of GC is that data is separated
roughly according to the distance between the tree root and the data.
Effectively that means all file data is on level 0, indirect blocks
are on levels 1, 2, 3 4 or 5 for 1x, 2x, 3x, 4x or 5x indirect blocks,
@@ -67,7 +67,7 @@ respectively. Inode file data is on level 6 for the inodes and 7-11
for indirect blocks.
Each segment contains objects of a single level only. As a result,
-each level requires its own seperate segment to be open for writing.
+each level requires its own separate segment to be open for writing.
Inode File
----------
@@ -106,9 +106,9 @@ Vim
---
By cleverly predicting the life time of data, it is possible to
-seperate long-living data from short-living data and thereby reduce
+separate long-living data from short-living data and thereby reduce
the GC overhead later. Each type of distinc life expectency (vim) can
-have a seperate segment open for writing. Each (level, vim) tupel can
+have a separate segment open for writing. Each (level, vim) tupel can
be open just once. If an open segment with unknown vim is encountered
at mount time, it is closed and ignored henceforth.
diff --git a/Documentation/filesystems/nfs/nfs41-server.txt b/Documentation/filesystems/nfs/nfs41-server.txt
index 6a53a84afc72..04884914a1c8 100644
--- a/Documentation/filesystems/nfs/nfs41-server.txt
+++ b/Documentation/filesystems/nfs/nfs41-server.txt
@@ -137,7 +137,7 @@ NS*| OPENATTR | OPT | | Section 18.17 |
| READ | REQ | | Section 18.22 |
| READDIR | REQ | | Section 18.23 |
| READLINK | OPT | | Section 18.24 |
-NS | RECLAIM_COMPLETE | REQ | | Section 18.51 |
+ | RECLAIM_COMPLETE | REQ | | Section 18.51 |
| RELEASE_LOCKOWNER | MNI | | N/A |
| REMOVE | REQ | | Section 18.25 |
| RENAME | REQ | | Section 18.26 |
diff --git a/Documentation/filesystems/nfs/rpc-cache.txt b/Documentation/filesystems/nfs/rpc-cache.txt
index 8a382bea6808..ebcaaee21616 100644
--- a/Documentation/filesystems/nfs/rpc-cache.txt
+++ b/Documentation/filesystems/nfs/rpc-cache.txt
@@ -185,7 +185,7 @@ failed lookup meant a definite 'no'.
request/response format
-----------------------
-While each cache is free to use it's own format for requests
+While each cache is free to use its own format for requests
and responses over channel, the following is recommended as
appropriate and support routines are available to help:
Each request or response record should be printable ASCII
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index cf6d0d85ca82..d3e7673995eb 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -50,8 +50,8 @@ NILFS2 supports the following mount options:
(*) == default
nobarrier Disables barriers.
-errors=continue(*) Keep going on a filesystem error.
-errors=remount-ro Remount the filesystem read-only on an error.
+errors=continue Keep going on a filesystem error.
+errors=remount-ro(*) Remount the filesystem read-only on an error.
errors=panic Panic and halt the machine if an error occurs.
cp=n Specify the checkpoint-number of the snapshot to be
mounted. Checkpoints and snapshots are listed by lscp
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index c58b9f5ba002..1f7ae144f6d8 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -80,3 +80,10 @@ user_xattr (*) Enables Extended User Attributes.
nouser_xattr Disables Extended User Attributes.
acl Enables POSIX Access Control Lists support.
noacl (*) Disables POSIX Access Control Lists support.
+resv_level=2 (*) Set how agressive allocation reservations will be.
+ Valid values are between 0 (reservations off) to 8
+ (maximum space for reservations).
+dir_resv_level= (*) By default, directory reservations will scale with file
+ reservations - users should rarely need to change this
+ value. If allocation reservations are turned off, this
+ option will have no effect.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index a4f30faa4f1f..9fb6cbe70bde 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -305,7 +305,7 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
cgtime guest time of the task children in jiffies
..............................................................................
-The /proc/PID/map file containing the currently mapped memory regions and
+The /proc/PID/maps file containing the currently mapped memory regions and
their access permissions.
The format is:
@@ -316,7 +316,7 @@ address perms offset dev inode pathname
08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
0804a000-0806b000 rw-p 00000000 00:00 0 [heap]
a7cb1000-a7cb2000 ---p 00000000 00:00 0
-a7cb2000-a7eb2000 rw-p 00000000 00:00 0 [threadstack:001ff4b4]
+a7cb2000-a7eb2000 rw-p 00000000 00:00 0
a7eb2000-a7eb3000 ---p 00000000 00:00 0
a7eb3000-a7ed5000 rw-p 00000000 00:00 0
a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
@@ -352,7 +352,6 @@ is not associated with a file:
[stack] = the stack of the main process
[vdso] = the "virtual dynamic shared object",
the kernel system call handler
- [threadstack:xxxxxxxx] = the stack of the thread, xxxxxxxx is the stack size
or if empty, the mapping is anonymous.
@@ -566,6 +565,10 @@ The default_smp_affinity mask applies to all non-active IRQs, which are the
IRQs which have not yet been allocated/activated, and hence which lack a
/proc/irq/[0-9]* directory.
+The node file on an SMP system shows the node to which the device using the IRQ
+reports itself as being attached. This hardware locality information does not
+include information about any possible driver locality preference.
+
prof_cpu_mask specifies which CPUs are to be profiled by the system wide
profiler. Default value is ffffffff (all cpus).
@@ -965,7 +968,7 @@ your system and how much traffic was routed over those devices:
...] 1375103 17405 0 0 0 0 0 0
...] 1703981 5535 0 0 0 3 0 0
-In addition, each Channel Bond interface has it's own directory. For
+In addition, each Channel Bond interface has its own directory. For
example, the bond0 device will have a directory called /proc/net/bond0/.
It will contain information that is specific to that bond, such as the
current slaves of the bond, the link status of the slaves, and how
@@ -1362,7 +1365,7 @@ been accounted as having caused 1MB of write.
In other words: The number of bytes which this process caused to not happen,
by truncating pagecache. A task can cause "negative" IO too. If this task
truncates some dirty pagecache, some IO which another task has been accounted
-for (in it's write_bytes) will not be happening. We _could_ just subtract that
+for (in its write_bytes) will not be happening. We _could_ just subtract that
from the truncating task's write_bytes, but there is information loss in doing
that.
diff --git a/Documentation/filesystems/smbfs.txt b/Documentation/filesystems/smbfs.txt
index f673ef0de0f7..194fb0decd2c 100644
--- a/Documentation/filesystems/smbfs.txt
+++ b/Documentation/filesystems/smbfs.txt
@@ -3,6 +3,6 @@ protocol used by Windows for Workgroups, Windows 95 and Windows NT.
Smbfs was inspired by Samba, the program written by Andrew Tridgell
that turns any Unix host into a file server for DOS or Windows clients.
-Smbfs is a SMB client, but uses parts of samba for it's operation. For
+Smbfs is a SMB client, but uses parts of samba for its operation. For
more info on samba, including documentation, please go to
http://www.samba.org/ and then on to your nearest mirror.
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
index b324c033035a..203f7202cc9e 100644
--- a/Documentation/filesystems/squashfs.txt
+++ b/Documentation/filesystems/squashfs.txt
@@ -38,7 +38,8 @@ Hard link support: yes no
Real inode numbers: yes no
32-bit uids/gids: yes no
File creation time: yes no
-Xattr and ACL support: no no
+Xattr support: yes no
+ACL support: no no
Squashfs compresses data, inodes and directories. In addition, inode and
directory data are highly compacted, and packed on byte boundaries. Each
@@ -58,7 +59,7 @@ obtained from this site also.
3. SQUASHFS FILESYSTEM DESIGN
-----------------------------
-A squashfs filesystem consists of seven parts, packed together on a byte
+A squashfs filesystem consists of a maximum of eight parts, packed together on a byte
alignment:
---------------
@@ -80,6 +81,9 @@ alignment:
|---------------|
| uid/gid |
| lookup table |
+ |---------------|
+ | xattr |
+ | table |
---------------
Compressed data blocks are written to the filesystem as files are read from
@@ -192,6 +196,26 @@ This table is stored compressed into metadata blocks. A second index table is
used to locate these. This second index table for speed of access (and because
it is small) is read at mount time and cached in memory.
+3.7 Xattr table
+---------------
+
+The xattr table contains extended attributes for each inode. The xattrs
+for each inode are stored in a list, each list entry containing a type,
+name and value field. The type field encodes the xattr prefix
+("user.", "trusted." etc) and it also encodes how the name/value fields
+should be interpreted. Currently the type indicates whether the value
+is stored inline (in which case the value field contains the xattr value),
+or if it is stored out of line (in which case the value field stores a
+reference to where the actual value is stored). This allows large values
+to be stored out of line improving scanning and lookup performance and it
+also allows values to be de-duplicated, the value being stored once, and
+all other occurences holding an out of line reference to that value.
+
+The xattr lists are packed into compressed 8K metadata blocks.
+To reduce overhead in inodes, rather than storing the on-disk
+location of the xattr list inside each inode, a 32-bit xattr id
+is stored. This xattr id is mapped into the location of the xattr
+list using a second xattr id lookup table.
4. TODOS AND OUTSTANDING ISSUES
-------------------------------
@@ -199,9 +223,7 @@ it is small) is read at mount time and cached in memory.
4.1 Todo list
-------------
-Implement Xattr and ACL support. The Squashfs 4.0 filesystem layout has hooks
-for these but the code has not been written. Once the code has been written
-the existing layout should not require modification.
+Implement ACL support.
4.2 Squashfs internal cache
---------------------------
diff --git a/Documentation/filesystems/sysfs-tagging.txt b/Documentation/filesystems/sysfs-tagging.txt
new file mode 100644
index 000000000000..caaaf1266d8f
--- /dev/null
+++ b/Documentation/filesystems/sysfs-tagging.txt
@@ -0,0 +1,42 @@
+Sysfs tagging
+-------------
+
+(Taken almost verbatim from Eric Biederman's netns tagging patch
+commit msg)
+
+The problem. Network devices show up in sysfs and with the network
+namespace active multiple devices with the same name can show up in
+the same directory, ouch!
+
+To avoid that problem and allow existing applications in network
+namespaces to see the same interface that is currently presented in
+sysfs, sysfs now has tagging directory support.
+
+By using the network namespace pointers as tags to separate out the
+the sysfs directory entries we ensure that we don't have conflicts
+in the directories and applications only see a limited set of
+the network devices.
+
+Each sysfs directory entry may be tagged with zero or one
+namespaces. A sysfs_dirent is augmented with a void *s_ns. If a
+directory entry is tagged, then sysfs_dirent->s_flags will have a
+flag between KOBJ_NS_TYPE_NONE and KOBJ_NS_TYPES, and s_ns will
+point to the namespace to which it belongs.
+
+Each sysfs superblock's sysfs_super_info contains an array void
+*ns[KOBJ_NS_TYPES]. When a a task in a tagging namespace
+kobj_nstype first mounts sysfs, a new superblock is created. It
+will be differentiated from other sysfs mounts by having its
+s_fs_info->ns[kobj_nstype] set to the new namespace. Note that
+through bind mounting and mounts propagation, a task can easily view
+the contents of other namespaces' sysfs mounts. Therefore, when a
+namespace exits, it will call kobj_ns_exit() to invalidate any
+sysfs_dirent->s_ns pointers pointing to it.
+
+Users of this interface:
+- define a type in the kobj_ns_type enumeration.
+- call kobj_ns_type_register() with its kobj_ns_type_operations which has
+ - current_ns() which returns current's namespace
+ - netlink_ns() which returns a socket's namespace
+ - initial_ns() which returns the initial namesapce
+- call kobj_ns_exit() when an individual tag is no longer valid
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index fe09a2cb1858..98ef55124158 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -94,11 +94,19 @@ NodeList format is a comma-separated list of decimal numbers and ranges,
a range being two hyphen-separated decimal numbers, the smallest and
largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
+A memory policy with a valid NodeList will be saved, as specified, for
+use at file creation time. When a task allocates a file in the file
+system, the mount option memory policy will be applied with a NodeList,
+if any, modified by the calling task's cpuset constraints
+[See Documentation/cgroups/cpusets.txt] and any optional flags, listed
+below. If the resulting NodeLists is the empty set, the effective memory
+policy for the file will revert to "default" policy.
+
NUMA memory allocation policies have optional flags that can be used in
conjunction with their modes. These optional flags can be specified
when tmpfs is mounted by appending them to the mode before the NodeList.
See Documentation/vm/numa_memory_policy.txt for a list of all available
-memory allocation policy mode flags.
+memory allocation policy mode flags and their effect on memory policy.
=static is equivalent to MPOL_F_STATIC_NODES
=relative is equivalent to MPOL_F_RELATIVE_NODES
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 3de2f32edd90..94677e7dcb13 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -72,7 +72,7 @@ structure (this is the kernel-side implementation of file
descriptors). The freshly allocated file structure is initialized with
a pointer to the dentry and a set of file operation member functions.
These are taken from the inode data. The open() file method is then
-called so the specific filesystem implementation can do it's work. You
+called so the specific filesystem implementation can do its work. You
can see that this is another switch performed by the VFS. The file
structure is placed into the file descriptor table for the process.
@@ -401,11 +401,16 @@ otherwise noted.
started might not be in the page cache at the end of the
walk).
- truncate: called by the VFS to change the size of a file. The
+ truncate: Deprecated. This will not be called if ->setsize is defined.
+ Called by the VFS to change the size of a file. The
i_size field of the inode is set to the desired size by the
VFS before this method is called. This method is called by
the truncate(2) system call and related functionality.
+ Note: ->truncate and vmtruncate are deprecated. Do not add new
+ instances/calls of these. Filesystems should be converted to do their
+ truncate sequence via ->setattr().
+
permission: called by the VFS to check for access rights on a POSIX-like
filesystem.
@@ -729,7 +734,7 @@ struct file_operations {
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *);
int (*release) (struct inode *, struct file *);
- int (*fsync) (struct file *, struct dentry *, int datasync);
+ int (*fsync) (struct file *, int datasync);
int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
new file mode 100644
index 000000000000..d8119e9d2d60
--- /dev/null
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -0,0 +1,816 @@
+XFS Delayed Logging Design
+--------------------------
+
+Introduction to Re-logging in XFS
+---------------------------------
+
+XFS logging is a combination of logical and physical logging. Some objects,
+such as inodes and dquots, are logged in logical format where the details
+logged are made up of the changes to in-core structures rather than on-disk
+structures. Other objects - typically buffers - have their physical changes
+logged. The reason for these differences is to reduce the amount of log space
+required for objects that are frequently logged. Some parts of inodes are more
+frequently logged than others, and inodes are typically more frequently logged
+than any other object (except maybe the superblock buffer) so keeping the
+amount of metadata logged low is of prime importance.
+
+The reason that this is such a concern is that XFS allows multiple separate
+modifications to a single object to be carried in the log at any given time.
+This allows the log to avoid needing to flush each change to disk before
+recording a new change to the object. XFS does this via a method called
+"re-logging". Conceptually, this is quite simple - all it requires is that any
+new change to the object is recorded with a *new copy* of all the existing
+changes in the new transaction that is written to the log.
+
+That is, if we have a sequence of changes A through to F, and the object was
+written to disk after change D, we would see in the log the following series
+of transactions, their contents and the log sequence number (LSN) of the
+transaction:
+
+ Transaction Contents LSN
+ A A X
+ B A+B X+n
+ C A+B+C X+n+m
+ D A+B+C+D X+n+m+o
+ <object written to disk>
+ E E Y (> X+n+m+o)
+ F E+F YÙ+p
+
+In other words, each time an object is relogged, the new transaction contains
+the aggregation of all the previous changes currently held only in the log.
+
+This relogging technique also allows objects to be moved forward in the log so
+that an object being relogged does not prevent the tail of the log from ever
+moving forward. This can be seen in the table above by the changing
+(increasing) LSN of each subsquent transaction - the LSN is effectively a
+direct encoding of the location in the log of the transaction.
+
+This relogging is also used to implement long-running, multiple-commit
+transactions. These transaction are known as rolling transactions, and require
+a special log reservation known as a permanent transaction reservation. A
+typical example of a rolling transaction is the removal of extents from an
+inode which can only be done at a rate of two extents per transaction because
+of reservation size limitations. Hence a rolling extent removal transaction
+keeps relogging the inode and btree buffers as they get modified in each
+removal operation. This keeps them moving forward in the log as the operation
+progresses, ensuring that current operation never gets blocked by itself if the
+log wraps around.
+
+Hence it can be seen that the relogging operation is fundamental to the correct
+working of the XFS journalling subsystem. From the above description, most
+people should be able to see why the XFS metadata operations writes so much to
+the log - repeated operations to the same objects write the same changes to
+the log over and over again. Worse is the fact that objects tend to get
+dirtier as they get relogged, so each subsequent transaction is writing more
+metadata into the log.
+
+Another feature of the XFS transaction subsystem is that most transactions are
+asynchronous. That is, they don't commit to disk until either a log buffer is
+filled (a log buffer can hold multiple transactions) or a synchronous operation
+forces the log buffers holding the transactions to disk. This means that XFS is
+doing aggregation of transactions in memory - batching them, if you like - to
+minimise the impact of the log IO on transaction throughput.
+
+The limitation on asynchronous transaction throughput is the number and size of
+log buffers made available by the log manager. By default there are 8 log
+buffers available and the size of each is 32kB - the size can be increased up
+to 256kB by use of a mount option.
+
+Effectively, this gives us the maximum bound of outstanding metadata changes
+that can be made to the filesystem at any point in time - if all the log
+buffers are full and under IO, then no more transactions can be committed until
+the current batch completes. It is now common for a single current CPU core to
+be to able to issue enough transactions to keep the log buffers full and under
+IO permanently. Hence the XFS journalling subsystem can be considered to be IO
+bound.
+
+Delayed Logging: Concepts
+-------------------------
+
+The key thing to note about the asynchronous logging combined with the
+relogging technique XFS uses is that we can be relogging changed objects
+multiple times before they are committed to disk in the log buffers. If we
+return to the previous relogging example, it is entirely possible that
+transactions A through D are committed to disk in the same log buffer.
+
+That is, a single log buffer may contain multiple copies of the same object,
+but only one of those copies needs to be there - the last one "D", as it
+contains all the changes from the previous changes. In other words, we have one
+necessary copy in the log buffer, and three stale copies that are simply
+wasting space. When we are doing repeated operations on the same set of
+objects, these "stale objects" can be over 90% of the space used in the log
+buffers. It is clear that reducing the number of stale objects written to the
+log would greatly reduce the amount of metadata we write to the log, and this
+is the fundamental goal of delayed logging.
+
+From a conceptual point of view, XFS is already doing relogging in memory (where
+memory == log buffer), only it is doing it extremely inefficiently. It is using
+logical to physical formatting to do the relogging because there is no
+infrastructure to keep track of logical changes in memory prior to physically
+formatting the changes in a transaction to the log buffer. Hence we cannot avoid
+accumulating stale objects in the log buffers.
+
+Delayed logging is the name we've given to keeping and tracking transactional
+changes to objects in memory outside the log buffer infrastructure. Because of
+the relogging concept fundamental to the XFS journalling subsystem, this is
+actually relatively easy to do - all the changes to logged items are already
+tracked in the current infrastructure. The big problem is how to accumulate
+them and get them to the log in a consistent, recoverable manner.
+Describing the problems and how they have been solved is the focus of this
+document.
+
+One of the key changes that delayed logging makes to the operation of the
+journalling subsystem is that it disassociates the amount of outstanding
+metadata changes from the size and number of log buffers available. In other
+words, instead of there only being a maximum of 2MB of transaction changes not
+written to the log at any point in time, there may be a much greater amount
+being accumulated in memory. Hence the potential for loss of metadata on a
+crash is much greater than for the existing logging mechanism.
+
+It should be noted that this does not change the guarantee that log recovery
+will result in a consistent filesystem. What it does mean is that as far as the
+recovered filesystem is concerned, there may be many thousands of transactions
+that simply did not occur as a result of the crash. This makes it even more
+important that applications that care about their data use fsync() where they
+need to ensure application level data integrity is maintained.
+
+It should be noted that delayed logging is not an innovative new concept that
+warrants rigorous proofs to determine whether it is correct or not. The method
+of accumulating changes in memory for some period before writing them to the
+log is used effectively in many filesystems including ext3 and ext4. Hence
+no time is spent in this document trying to convince the reader that the
+concept is sound. Instead it is simply considered a "solved problem" and as
+such implementing it in XFS is purely an exercise in software engineering.
+
+The fundamental requirements for delayed logging in XFS are simple:
+
+ 1. Reduce the amount of metadata written to the log by at least
+ an order of magnitude.
+ 2. Supply sufficient statistics to validate Requirement #1.
+ 3. Supply sufficient new tracing infrastructure to be able to debug
+ problems with the new code.
+ 4. No on-disk format change (metadata or log format).
+ 5. Enable and disable with a mount option.
+ 6. No performance regressions for synchronous transaction workloads.
+
+Delayed Logging: Design
+-----------------------
+
+Storing Changes
+
+The problem with accumulating changes at a logical level (i.e. just using the
+existing log item dirty region tracking) is that when it comes to writing the
+changes to the log buffers, we need to ensure that the object we are formatting
+is not changing while we do this. This requires locking the object to prevent
+concurrent modification. Hence flushing the logical changes to the log would
+require us to lock every object, format them, and then unlock them again.
+
+This introduces lots of scope for deadlocks with transactions that are already
+running. For example, a transaction has object A locked and modified, but needs
+the delayed logging tracking lock to commit the transaction. However, the
+flushing thread has the delayed logging tracking lock already held, and is
+trying to get the lock on object A to flush it to the log buffer. This appears
+to be an unsolvable deadlock condition, and it was solving this problem that
+was the barrier to implementing delayed logging for so long.
+
+The solution is relatively simple - it just took a long time to recognise it.
+Put simply, the current logging code formats the changes to each item into an
+vector array that points to the changed regions in the item. The log write code
+simply copies the memory these vectors point to into the log buffer during
+transaction commit while the item is locked in the transaction. Instead of
+using the log buffer as the destination of the formatting code, we can use an
+allocated memory buffer big enough to fit the formatted vector.
+
+If we then copy the vector into the memory buffer and rewrite the vector to
+point to the memory buffer rather than the object itself, we now have a copy of
+the changes in a format that is compatible with the log buffer writing code.
+that does not require us to lock the item to access. This formatting and
+rewriting can all be done while the object is locked during transaction commit,
+resulting in a vector that is transactionally consistent and can be accessed
+without needing to lock the owning item.
+
+Hence we avoid the need to lock items when we need to flush outstanding
+asynchronous transactions to the log. The differences between the existing
+formatting method and the delayed logging formatting can be seen in the
+diagram below.
+
+Current format log vector:
+
+Object +---------------------------------------------+
+Vector 1 +----+
+Vector 2 +----+
+Vector 3 +----------+
+
+After formatting:
+
+Log Buffer +-V1-+-V2-+----V3----+
+
+Delayed logging vector:
+
+Object +---------------------------------------------+
+Vector 1 +----+
+Vector 2 +----+
+Vector 3 +----------+
+
+After formatting:
+
+Memory Buffer +-V1-+-V2-+----V3----+
+Vector 1 +----+
+Vector 2 +----+
+Vector 3 +----------+
+
+The memory buffer and associated vector need to be passed as a single object,
+but still need to be associated with the parent object so if the object is
+relogged we can replace the current memory buffer with a new memory buffer that
+contains the latest changes.
+
+The reason for keeping the vector around after we've formatted the memory
+buffer is to support splitting vectors across log buffer boundaries correctly.
+If we don't keep the vector around, we do not know where the region boundaries
+are in the item, so we'd need a new encapsulation method for regions in the log
+buffer writing (i.e. double encapsulation). This would be an on-disk format
+change and as such is not desirable. It also means we'd have to write the log
+region headers in the formatting stage, which is problematic as there is per
+region state that needs to be placed into the headers during the log write.
+
+Hence we need to keep the vector, but by attaching the memory buffer to it and
+rewriting the vector addresses to point at the memory buffer we end up with a
+self-describing object that can be passed to the log buffer write code to be
+handled in exactly the same manner as the existing log vectors are handled.
+Hence we avoid needing a new on-disk format to handle items that have been
+relogged in memory.
+
+
+Tracking Changes
+
+Now that we can record transactional changes in memory in a form that allows
+them to be used without limitations, we need to be able to track and accumulate
+them so that they can be written to the log at some later point in time. The
+log item is the natural place to store this vector and buffer, and also makes sense
+to be the object that is used to track committed objects as it will always
+exist once the object has been included in a transaction.
+
+The log item is already used to track the log items that have been written to
+the log but not yet written to disk. Such log items are considered "active"
+and as such are stored in the Active Item List (AIL) which is a LSN-ordered
+double linked list. Items are inserted into this list during log buffer IO
+completion, after which they are unpinned and can be written to disk. An object
+that is in the AIL can be relogged, which causes the object to be pinned again
+and then moved forward in the AIL when the log buffer IO completes for that
+transaction.
+
+Essentially, this shows that an item that is in the AIL can still be modified
+and relogged, so any tracking must be separate to the AIL infrastructure. As
+such, we cannot reuse the AIL list pointers for tracking committed items, nor
+can we store state in any field that is protected by the AIL lock. Hence the
+committed item tracking needs it's own locks, lists and state fields in the log
+item.
+
+Similar to the AIL, tracking of committed items is done through a new list
+called the Committed Item List (CIL). The list tracks log items that have been
+committed and have formatted memory buffers attached to them. It tracks objects
+in transaction commit order, so when an object is relogged it is removed from
+it's place in the list and re-inserted at the tail. This is entirely arbitrary
+and done to make it easy for debugging - the last items in the list are the
+ones that are most recently modified. Ordering of the CIL is not necessary for
+transactional integrity (as discussed in the next section) so the ordering is
+done for convenience/sanity of the developers.
+
+
+Delayed Logging: Checkpoints
+
+When we have a log synchronisation event, commonly known as a "log force",
+all the items in the CIL must be written into the log via the log buffers.
+We need to write these items in the order that they exist in the CIL, and they
+need to be written as an atomic transaction. The need for all the objects to be
+written as an atomic transaction comes from the requirements of relogging and
+log replay - all the changes in all the objects in a given transaction must
+either be completely replayed during log recovery, or not replayed at all. If
+a transaction is not replayed because it is not complete in the log, then
+no later transactions should be replayed, either.
+
+To fulfill this requirement, we need to write the entire CIL in a single log
+transaction. Fortunately, the XFS log code has no fixed limit on the size of a
+transaction, nor does the log replay code. The only fundamental limit is that
+the transaction cannot be larger than just under half the size of the log. The
+reason for this limit is that to find the head and tail of the log, there must
+be at least one complete transaction in the log at any given time. If a
+transaction is larger than half the log, then there is the possibility that a
+crash during the write of a such a transaction could partially overwrite the
+only complete previous transaction in the log. This will result in a recovery
+failure and an inconsistent filesystem and hence we must enforce the maximum
+size of a checkpoint to be slightly less than a half the log.
+
+Apart from this size requirement, a checkpoint transaction looks no different
+to any other transaction - it contains a transaction header, a series of
+formatted log items and a commit record at the tail. From a recovery
+perspective, the checkpoint transaction is also no different - just a lot
+bigger with a lot more items in it. The worst case effect of this is that we
+might need to tune the recovery transaction object hash size.
+
+Because the checkpoint is just another transaction and all the changes to log
+items are stored as log vectors, we can use the existing log buffer writing
+code to write the changes into the log. To do this efficiently, we need to
+minimise the time we hold the CIL locked while writing the checkpoint
+transaction. The current log write code enables us to do this easily with the
+way it separates the writing of the transaction contents (the log vectors) from
+the transaction commit record, but tracking this requires us to have a
+per-checkpoint context that travels through the log write process through to
+checkpoint completion.
+
+Hence a checkpoint has a context that tracks the state of the current
+checkpoint from initiation to checkpoint completion. A new context is initiated
+at the same time a checkpoint transaction is started. That is, when we remove
+all the current items from the CIL during a checkpoint operation, we move all
+those changes into the current checkpoint context. We then initialise a new
+context and attach that to the CIL for aggregation of new transactions.
+
+This allows us to unlock the CIL immediately after transfer of all the
+committed items and effectively allow new transactions to be issued while we
+are formatting the checkpoint into the log. It also allows concurrent
+checkpoints to be written into the log buffers in the case of log force heavy
+workloads, just like the existing transaction commit code does. This, however,
+requires that we strictly order the commit records in the log so that
+checkpoint sequence order is maintained during log replay.
+
+To ensure that we can be writing an item into a checkpoint transaction at
+the same time another transaction modifies the item and inserts the log item
+into the new CIL, then checkpoint transaction commit code cannot use log items
+to store the list of log vectors that need to be written into the transaction.
+Hence log vectors need to be able to be chained together to allow them to be
+detatched from the log items. That is, when the CIL is flushed the memory
+buffer and log vector attached to each log item needs to be attached to the
+checkpoint context so that the log item can be released. In diagrammatic form,
+the CIL would look like this before the flush:
+
+ CIL Head
+ |
+ V
+ Log Item <-> log vector 1 -> memory buffer
+ | -> vector array
+ V
+ Log Item <-> log vector 2 -> memory buffer
+ | -> vector array
+ V
+ ......
+ |
+ V
+ Log Item <-> log vector N-1 -> memory buffer
+ | -> vector array
+ V
+ Log Item <-> log vector N -> memory buffer
+ -> vector array
+
+And after the flush the CIL head is empty, and the checkpoint context log
+vector list would look like:
+
+ Checkpoint Context
+ |
+ V
+ log vector 1 -> memory buffer
+ | -> vector array
+ | -> Log Item
+ V
+ log vector 2 -> memory buffer
+ | -> vector array
+ | -> Log Item
+ V
+ ......
+ |
+ V
+ log vector N-1 -> memory buffer
+ | -> vector array
+ | -> Log Item
+ V
+ log vector N -> memory buffer
+ -> vector array
+ -> Log Item
+
+Once this transfer is done, the CIL can be unlocked and new transactions can
+start, while the checkpoint flush code works over the log vector chain to
+commit the checkpoint.
+
+Once the checkpoint is written into the log buffers, the checkpoint context is
+attached to the log buffer that the commit record was written to along with a
+completion callback. Log IO completion will call that callback, which can then
+run transaction committed processing for the log items (i.e. insert into AIL
+and unpin) in the log vector chain and then free the log vector chain and
+checkpoint context.
+
+Discussion Point: I am uncertain as to whether the log item is the most
+efficient way to track vectors, even though it seems like the natural way to do
+it. The fact that we walk the log items (in the CIL) just to chain the log
+vectors and break the link between the log item and the log vector means that
+we take a cache line hit for the log item list modification, then another for
+the log vector chaining. If we track by the log vectors, then we only need to
+break the link between the log item and the log vector, which means we should
+dirty only the log item cachelines. Normally I wouldn't be concerned about one
+vs two dirty cachelines except for the fact I've seen upwards of 80,000 log
+vectors in one checkpoint transaction. I'd guess this is a "measure and
+compare" situation that can be done after a working and reviewed implementation
+is in the dev tree....
+
+Delayed Logging: Checkpoint Sequencing
+
+One of the key aspects of the XFS transaction subsystem is that it tags
+committed transactions with the log sequence number of the transaction commit.
+This allows transactions to be issued asynchronously even though there may be
+future operations that cannot be completed until that transaction is fully
+committed to the log. In the rare case that a dependent operation occurs (e.g.
+re-using a freed metadata extent for a data extent), a special, optimised log
+force can be issued to force the dependent transaction to disk immediately.
+
+To do this, transactions need to record the LSN of the commit record of the
+transaction. This LSN comes directly from the log buffer the transaction is
+written into. While this works just fine for the existing transaction
+mechanism, it does not work for delayed logging because transactions are not
+written directly into the log buffers. Hence some other method of sequencing
+transactions is required.
+
+As discussed in the checkpoint section, delayed logging uses per-checkpoint
+contexts, and as such it is simple to assign a sequence number to each
+checkpoint. Because the switching of checkpoint contexts must be done
+atomically, it is simple to ensure that each new context has a monotonically
+increasing sequence number assigned to it without the need for an external
+atomic counter - we can just take the current context sequence number and add
+one to it for the new context.
+
+Then, instead of assigning a log buffer LSN to the transaction commit LSN
+during the commit, we can assign the current checkpoint sequence. This allows
+operations that track transactions that have not yet completed know what
+checkpoint sequence needs to be committed before they can continue. As a
+result, the code that forces the log to a specific LSN now needs to ensure that
+the log forces to a specific checkpoint.
+
+To ensure that we can do this, we need to track all the checkpoint contexts
+that are currently committing to the log. When we flush a checkpoint, the
+context gets added to a "committing" list which can be searched. When a
+checkpoint commit completes, it is removed from the committing list. Because
+the checkpoint context records the LSN of the commit record for the checkpoint,
+we can also wait on the log buffer that contains the commit record, thereby
+using the existing log force mechanisms to execute synchronous forces.
+
+It should be noted that the synchronous forces may need to be extended with
+mitigation algorithms similar to the current log buffer code to allow
+aggregation of multiple synchronous transactions if there are already
+synchronous transactions being flushed. Investigation of the performance of the
+current design is needed before making any decisions here.
+
+The main concern with log forces is to ensure that all the previous checkpoints
+are also committed to disk before the one we need to wait for. Therefore we
+need to check that all the prior contexts in the committing list are also
+complete before waiting on the one we need to complete. We do this
+synchronisation in the log force code so that we don't need to wait anywhere
+else for such serialisation - it only matters when we do a log force.
+
+The only remaining complexity is that a log force now also has to handle the
+case where the forcing sequence number is the same as the current context. That
+is, we need to flush the CIL and potentially wait for it to complete. This is a
+simple addition to the existing log forcing code to check the sequence numbers
+and push if required. Indeed, placing the current sequence checkpoint flush in
+the log force code enables the current mechanism for issuing synchronous
+transactions to remain untouched (i.e. commit an asynchronous transaction, then
+force the log at the LSN of that transaction) and so the higher level code
+behaves the same regardless of whether delayed logging is being used or not.
+
+Delayed Logging: Checkpoint Log Space Accounting
+
+The big issue for a checkpoint transaction is the log space reservation for the
+transaction. We don't know how big a checkpoint transaction is going to be
+ahead of time, nor how many log buffers it will take to write out, nor the
+number of split log vector regions are going to be used. We can track the
+amount of log space required as we add items to the commit item list, but we
+still need to reserve the space in the log for the checkpoint.
+
+A typical transaction reserves enough space in the log for the worst case space
+usage of the transaction. The reservation accounts for log record headers,
+transaction and region headers, headers for split regions, buffer tail padding,
+etc. as well as the actual space for all the changed metadata in the
+transaction. While some of this is fixed overhead, much of it is dependent on
+the size of the transaction and the number of regions being logged (the number
+of log vectors in the transaction).
+
+An example of the differences would be logging directory changes versus logging
+inode changes. If you modify lots of inode cores (e.g. chmod -R g+w *), then
+there are lots of transactions that only contain an inode core and an inode log
+format structure. That is, two vectors totaling roughly 150 bytes. If we modify
+10,000 inodes, we have about 1.5MB of metadata to write in 20,000 vectors. Each
+vector is 12 bytes, so the total to be logged is approximately 1.75MB. In
+comparison, if we are logging full directory buffers, they are typically 4KB
+each, so we in 1.5MB of directory buffers we'd have roughly 400 buffers and a
+buffer format structure for each buffer - roughly 800 vectors or 1.51MB total
+space. From this, it should be obvious that a static log space reservation is
+not particularly flexible and is difficult to select the "optimal value" for
+all workloads.
+
+Further, if we are going to use a static reservation, which bit of the entire
+reservation does it cover? We account for space used by the transaction
+reservation by tracking the space currently used by the object in the CIL and
+then calculating the increase or decrease in space used as the object is
+relogged. This allows for a checkpoint reservation to only have to account for
+log buffer metadata used such as log header records.
+
+However, even using a static reservation for just the log metadata is
+problematic. Typically log record headers use at least 16KB of log space per
+1MB of log space consumed (512 bytes per 32k) and the reservation needs to be
+large enough to handle arbitrary sized checkpoint transactions. This
+reservation needs to be made before the checkpoint is started, and we need to
+be able to reserve the space without sleeping. For a 8MB checkpoint, we need a
+reservation of around 150KB, which is a non-trivial amount of space.
+
+A static reservation needs to manipulate the log grant counters - we can take a
+permanent reservation on the space, but we still need to make sure we refresh
+the write reservation (the actual space available to the transaction) after
+every checkpoint transaction completion. Unfortunately, if this space is not
+available when required, then the regrant code will sleep waiting for it.
+
+The problem with this is that it can lead to deadlocks as we may need to commit
+checkpoints to be able to free up log space (refer back to the description of
+rolling transactions for an example of this). Hence we *must* always have
+space available in the log if we are to use static reservations, and that is
+very difficult and complex to arrange. It is possible to do, but there is a
+simpler way.
+
+The simpler way of doing this is tracking the entire log space used by the
+items in the CIL and using this to dynamically calculate the amount of log
+space required by the log metadata. If this log metadata space changes as a
+result of a transaction commit inserting a new memory buffer into the CIL, then
+the difference in space required is removed from the transaction that causes
+the change. Transactions at this level will *always* have enough space
+available in their reservation for this as they have already reserved the
+maximal amount of log metadata space they require, and such a delta reservation
+will always be less than or equal to the maximal amount in the reservation.
+
+Hence we can grow the checkpoint transaction reservation dynamically as items
+are added to the CIL and avoid the need for reserving and regranting log space
+up front. This avoids deadlocks and removes a blocking point from the
+checkpoint flush code.
+
+As mentioned early, transactions can't grow to more than half the size of the
+log. Hence as part of the reservation growing, we need to also check the size
+of the reservation against the maximum allowed transaction size. If we reach
+the maximum threshold, we need to push the CIL to the log. This is effectively
+a "background flush" and is done on demand. This is identical to
+a CIL push triggered by a log force, only that there is no waiting for the
+checkpoint commit to complete. This background push is checked and executed by
+transaction commit code.
+
+If the transaction subsystem goes idle while we still have items in the CIL,
+they will be flushed by the periodic log force issued by the xfssyncd. This log
+force will push the CIL to disk, and if the transaction subsystem stays idle,
+allow the idle log to be covered (effectively marked clean) in exactly the same
+manner that is done for the existing logging method. A discussion point is
+whether this log force needs to be done more frequently than the current rate
+which is once every 30s.
+
+
+Delayed Logging: Log Item Pinning
+
+Currently log items are pinned during transaction commit while the items are
+still locked. This happens just after the items are formatted, though it could
+be done any time before the items are unlocked. The result of this mechanism is
+that items get pinned once for every transaction that is committed to the log
+buffers. Hence items that are relogged in the log buffers will have a pin count
+for every outstanding transaction they were dirtied in. When each of these
+transactions is completed, they will unpin the item once. As a result, the item
+only becomes unpinned when all the transactions complete and there are no
+pending transactions. Thus the pinning and unpinning of a log item is symmetric
+as there is a 1:1 relationship with transaction commit and log item completion.
+
+For delayed logging, however, we have an assymetric transaction commit to
+completion relationship. Every time an object is relogged in the CIL it goes
+through the commit process without a corresponding completion being registered.
+That is, we now have a many-to-one relationship between transaction commit and
+log item completion. The result of this is that pinning and unpinning of the
+log items becomes unbalanced if we retain the "pin on transaction commit, unpin
+on transaction completion" model.
+
+To keep pin/unpin symmetry, the algorithm needs to change to a "pin on
+insertion into the CIL, unpin on checkpoint completion". In other words, the
+pinning and unpinning becomes symmetric around a checkpoint context. We have to
+pin the object the first time it is inserted into the CIL - if it is already in
+the CIL during a transaction commit, then we do not pin it again. Because there
+can be multiple outstanding checkpoint contexts, we can still see elevated pin
+counts, but as each checkpoint completes the pin count will retain the correct
+value according to it's context.
+
+Just to make matters more slightly more complex, this checkpoint level context
+for the pin count means that the pinning of an item must take place under the
+CIL commit/flush lock. If we pin the object outside this lock, we cannot
+guarantee which context the pin count is associated with. This is because of
+the fact pinning the item is dependent on whether the item is present in the
+current CIL or not. If we don't pin the CIL first before we check and pin the
+object, we have a race with CIL being flushed between the check and the pin
+(or not pinning, as the case may be). Hence we must hold the CIL flush/commit
+lock to guarantee that we pin the items correctly.
+
+Delayed Logging: Concurrent Scalability
+
+A fundamental requirement for the CIL is that accesses through transaction
+commits must scale to many concurrent commits. The current transaction commit
+code does not break down even when there are transactions coming from 2048
+processors at once. The current transaction code does not go any faster than if
+there was only one CPU using it, but it does not slow down either.
+
+As a result, the delayed logging transaction commit code needs to be designed
+for concurrency from the ground up. It is obvious that there are serialisation
+points in the design - the three important ones are:
+
+ 1. Locking out new transaction commits while flushing the CIL
+ 2. Adding items to the CIL and updating item space accounting
+ 3. Checkpoint commit ordering
+
+Looking at the transaction commit and CIL flushing interactions, it is clear
+that we have a many-to-one interaction here. That is, the only restriction on
+the number of concurrent transactions that can be trying to commit at once is
+the amount of space available in the log for their reservations. The practical
+limit here is in the order of several hundred concurrent transactions for a
+128MB log, which means that it is generally one per CPU in a machine.
+
+The amount of time a transaction commit needs to hold out a flush is a
+relatively long period of time - the pinning of log items needs to be done
+while we are holding out a CIL flush, so at the moment that means it is held
+across the formatting of the objects into memory buffers (i.e. while memcpy()s
+are in progress). Ultimately a two pass algorithm where the formatting is done
+separately to the pinning of objects could be used to reduce the hold time of
+the transaction commit side.
+
+Because of the number of potential transaction commit side holders, the lock
+really needs to be a sleeping lock - if the CIL flush takes the lock, we do not
+want every other CPU in the machine spinning on the CIL lock. Given that
+flushing the CIL could involve walking a list of tens of thousands of log
+items, it will get held for a significant time and so spin contention is a
+significant concern. Preventing lots of CPUs spinning doing nothing is the
+main reason for choosing a sleeping lock even though nothing in either the
+transaction commit or CIL flush side sleeps with the lock held.
+
+It should also be noted that CIL flushing is also a relatively rare operation
+compared to transaction commit for asynchronous transaction workloads - only
+time will tell if using a read-write semaphore for exclusion will limit
+transaction commit concurrency due to cache line bouncing of the lock on the
+read side.
+
+The second serialisation point is on the transaction commit side where items
+are inserted into the CIL. Because transactions can enter this code
+concurrently, the CIL needs to be protected separately from the above
+commit/flush exclusion. It also needs to be an exclusive lock but it is only
+held for a very short time and so a spin lock is appropriate here. It is
+possible that this lock will become a contention point, but given the short
+hold time once per transaction I think that contention is unlikely.
+
+The final serialisation point is the checkpoint commit record ordering code
+that is run as part of the checkpoint commit and log force sequencing. The code
+path that triggers a CIL flush (i.e. whatever triggers the log force) will enter
+an ordering loop after writing all the log vectors into the log buffers but
+before writing the commit record. This loop walks the list of committing
+checkpoints and needs to block waiting for checkpoints to complete their commit
+record write. As a result it needs a lock and a wait variable. Log force
+sequencing also requires the same lock, list walk, and blocking mechanism to
+ensure completion of checkpoints.
+
+These two sequencing operations can use the mechanism even though the
+events they are waiting for are different. The checkpoint commit record
+sequencing needs to wait until checkpoint contexts contain a commit LSN
+(obtained through completion of a commit record write) while log force
+sequencing needs to wait until previous checkpoint contexts are removed from
+the committing list (i.e. they've completed). A simple wait variable and
+broadcast wakeups (thundering herds) has been used to implement these two
+serialisation queues. They use the same lock as the CIL, too. If we see too
+much contention on the CIL lock, or too many context switches as a result of
+the broadcast wakeups these operations can be put under a new spinlock and
+given separate wait lists to reduce lock contention and the number of processes
+woken by the wrong event.
+
+
+Lifecycle Changes
+
+The existing log item life cycle is as follows:
+
+ 1. Transaction allocate
+ 2. Transaction reserve
+ 3. Lock item
+ 4. Join item to transaction
+ If not already attached,
+ Allocate log item
+ Attach log item to owner item
+ Attach log item to transaction
+ 5. Modify item
+ Record modifications in log item
+ 6. Transaction commit
+ Pin item in memory
+ Format item into log buffer
+ Write commit LSN into transaction
+ Unlock item
+ Attach transaction to log buffer
+
+ <log buffer IO dispatched>
+ <log buffer IO completes>
+
+ 7. Transaction completion
+ Mark log item committed
+ Insert log item into AIL
+ Write commit LSN into log item
+ Unpin log item
+ 8. AIL traversal
+ Lock item
+ Mark log item clean
+ Flush item to disk
+
+ <item IO completion>
+
+ 9. Log item removed from AIL
+ Moves log tail
+ Item unlocked
+
+Essentially, steps 1-6 operate independently from step 7, which is also
+independent of steps 8-9. An item can be locked in steps 1-6 or steps 8-9
+at the same time step 7 is occurring, but only steps 1-6 or 8-9 can occur
+at the same time. If the log item is in the AIL or between steps 6 and 7
+and steps 1-6 are re-entered, then the item is relogged. Only when steps 8-9
+are entered and completed is the object considered clean.
+
+With delayed logging, there are new steps inserted into the life cycle:
+
+ 1. Transaction allocate
+ 2. Transaction reserve
+ 3. Lock item
+ 4. Join item to transaction
+ If not already attached,
+ Allocate log item
+ Attach log item to owner item
+ Attach log item to transaction
+ 5. Modify item
+ Record modifications in log item
+ 6. Transaction commit
+ Pin item in memory if not pinned in CIL
+ Format item into log vector + buffer
+ Attach log vector and buffer to log item
+ Insert log item into CIL
+ Write CIL context sequence into transaction
+ Unlock item
+
+ <next log force>
+
+ 7. CIL push
+ lock CIL flush
+ Chain log vectors and buffers together
+ Remove items from CIL
+ unlock CIL flush
+ write log vectors into log
+ sequence commit records
+ attach checkpoint context to log buffer
+
+ <log buffer IO dispatched>
+ <log buffer IO completes>
+
+ 8. Checkpoint completion
+ Mark log item committed
+ Insert item into AIL
+ Write commit LSN into log item
+ Unpin log item
+ 9. AIL traversal
+ Lock item
+ Mark log item clean
+ Flush item to disk
+ <item IO completion>
+ 10. Log item removed from AIL
+ Moves log tail
+ Item unlocked
+
+From this, it can be seen that the only life cycle differences between the two
+logging methods are in the middle of the life cycle - they still have the same
+beginning and end and execution constraints. The only differences are in the
+commiting of the log items to the log itself and the completion processing.
+Hence delayed logging should not introduce any constraints on log item
+behaviour, allocation or freeing that don't already exist.
+
+As a result of this zero-impact "insertion" of delayed logging infrastructure
+and the design of the internal structures to avoid on disk format changes, we
+can basically switch between delayed logging and the existing mechanism with a
+mount option. Fundamentally, there is no reason why the log manager would not
+be able to swap methods automatically and transparently depending on load
+characteristics, but this should not be necessary if delayed logging works as
+designed.
+
+Roadmap:
+
+2.6.35 Inclusion in mainline as an experimental mount option
+ => approximately 2-3 months to merge window
+ => needs to be in xfs-dev tree in 4-6 weeks
+ => code is nearing readiness for review
+
+2.6.37 Remove experimental tag from mount option
+ => should be roughly 6 months after initial merge
+ => enough time to:
+ => gain confidence and fix problems reported by early
+ adopters (a.k.a. guinea pigs)
+ => address worst performance regressions and undesired
+ behaviours
+ => start tuning/optimising code for parallelism
+ => start tuning/optimising algorithms consuming
+ excessive CPU time
+
+2.6.39 Switch default mount option to use delayed logging
+ => should be roughly 12 months after initial merge
+ => enough time to shake out remaining problems before next round of
+ enterprise distro kernel rebases
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737
index 001d2e70bc11..fc5df7654d63 100644
--- a/Documentation/hwmon/dme1737
+++ b/Documentation/hwmon/dme1737
@@ -9,11 +9,15 @@ Supported chips:
* SMSC SCH3112, SCH3114, SCH3116
Prefix: 'sch311x'
Addresses scanned: none, address read from Super-I/O config space
- Datasheet: http://www.nuhorizons.com/FeaturedProducts/Volume1/SMSC/311x.pdf
+ Datasheet: Available on the Internet
* SMSC SCH5027
Prefix: 'sch5027'
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
Datasheet: Provided by SMSC upon request and under NDA
+ * SMSC SCH5127
+ Prefix: 'sch5127'
+ Addresses scanned: none, address read from Super-I/O config space
+ Datasheet: Provided by SMSC upon request and under NDA
Authors:
Juerg Haefliger <juergh@gmail.com>
@@ -36,8 +40,8 @@ Description
-----------
This driver implements support for the hardware monitoring capabilities of the
-SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, and SMSC
-SCH311x Super-I/O chips. These chips feature monitoring of 3 temp sensors
+SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, SCH311x,
+and SCH5127 Super-I/O chips. These chips feature monitoring of 3 temp sensors
temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and
1 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement
up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and
@@ -48,14 +52,14 @@ Fan[3-6] and pwm[3,5-6] are optional features and their availability depends on
the configuration of the chip. The driver will detect which features are
present during initialization and create the sysfs attributes accordingly.
-For the SCH311x, fan[1-3] and pwm[1-3] are always present and fan[4-6] and
-pwm[5-6] don't exist.
+For the SCH311x and SCH5127, fan[1-3] and pwm[1-3] are always present and
+fan[4-6] and pwm[5-6] don't exist.
The hardware monitoring features of the DME1737, A8000, and SCH5027 are only
-accessible via SMBus, while the SCH311x only provides access via the ISA bus.
-The driver will therefore register itself as an I2C client driver if it detects
-a DME1737, A8000, or SCH5027 and as a platform driver if it detects a SCH311x
-chip.
+accessible via SMBus, while the SCH311x and SCH5127 only provide access via
+the ISA bus. The driver will therefore register itself as an I2C client driver
+if it detects a DME1737, A8000, or SCH5027 and as a platform driver if it
+detects a SCH311x or SCH5127 chip.
Voltage Monitoring
@@ -76,7 +80,7 @@ DME1737, A8000:
in6: Vbat (+3.0V) 0V - 4.38V
SCH311x:
- in0: +2.5V 0V - 6.64V
+ in0: +2.5V 0V - 3.32V
in1: Vccp (processor core) 0V - 2V
in2: VCC (internal +3.3V) 0V - 4.38V
in3: +5V 0V - 6.64V
@@ -93,6 +97,15 @@ SCH5027:
in5: VTR (+3.3V standby) 0V - 4.38V
in6: Vbat (+3.0V) 0V - 4.38V
+SCH5127:
+ in0: +2.5 0V - 3.32V
+ in1: Vccp (processor core) 0V - 3V
+ in2: VCC (internal +3.3V) 0V - 4.38V
+ in3: V2_IN 0V - 1.5V
+ in4: V1_IN 0V - 1.5V
+ in5: VTR (+3.3V standby) 0V - 4.38V
+ in6: Vbat (+3.0V) 0V - 4.38V
+
Each voltage input has associated min and max limits which trigger an alarm
when crossed.
@@ -293,3 +306,21 @@ pwm[1-3]_auto_point1_pwm RW Auto PWM pwm point. Auto_point1 is the
pwm[1-3]_auto_point2_pwm RO Auto PWM pwm point. Auto_point2 is the
full-speed duty-cycle which is hard-
wired to 255 (100% duty-cycle).
+
+Chip Differences
+----------------
+
+Feature dme1737 sch311x sch5027 sch5127
+-------------------------------------------------------
+temp[1-3]_offset yes yes
+vid yes
+zone3 yes yes yes
+zone[1-3]_hyst yes yes
+pwm min/off yes yes
+fan3 opt yes opt yes
+pwm3 opt yes opt yes
+fan4 opt opt
+fan5 opt opt
+pwm5 opt opt
+fan6 opt opt
+pwm6 opt opt
diff --git a/Documentation/hwmon/lm63 b/Documentation/hwmon/lm63
index 31660bf97979..b9843eab1afb 100644
--- a/Documentation/hwmon/lm63
+++ b/Documentation/hwmon/lm63
@@ -7,6 +7,11 @@ Supported chips:
Addresses scanned: I2C 0x4c
Datasheet: Publicly available at the National Semiconductor website
http://www.national.com/pf/LM/LM63.html
+ * National Semiconductor LM64
+ Prefix: 'lm64'
+ Addresses scanned: I2C 0x18 and 0x4e
+ Datasheet: Publicly available at the National Semiconductor website
+ http://www.national.com/pf/LM/LM64.html
Author: Jean Delvare <khali@linux-fr.org>
@@ -55,3 +60,5 @@ The lm63 driver will not update its values more frequently than every
second; reading them more often will do no harm, but will return 'old'
values.
+The LM64 is effectively an LM63 with GPIO lines. The driver does not
+support these GPIO lines at present.
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index a13680871bc7..a76aefeeb68a 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -157,7 +157,7 @@ temperature configuration points:
There are three PWM outputs. The LM85 datasheet suggests that the
pwm3 output control both fan3 and fan4. Each PWM can be individually
-configured and assigned to a zone for it's control value. Each PWM can be
+configured and assigned to a zone for its control value. Each PWM can be
configured individually according to the following options.
* pwm#_auto_pwm_min - this specifies the PWM value for temp#_auto_temp_off
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
index 02838a47d862..86b5880d8502 100644
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245
@@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm
in7_min_alarm 3v output undervoltage alarm
in8_min_alarm Vee (-12v) output undervoltage alarm
-in9_input GPIO #1 voltage data
-in10_input GPIO #2 voltage data
-in11_input GPIO #3 voltage data
+in9_input GPIO voltage data
power1_input 12v power usage (mW)
power2_input 5v power usage (mW)
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 3de6b0bcb147..d4e2917c6f18 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -80,9 +80,9 @@ All entries (except name) are optional, and should only be created in a
given driver if the chip has the feature.
-********
-* Name *
-********
+*********************
+* Global attributes *
+*********************
name The chip name.
This should be a short, lowercase string, not containing
@@ -91,6 +91,13 @@ name The chip name.
I2C devices get this attribute created automatically.
RO
+update_rate The rate at which the chip will update readings.
+ Unit: millisecond
+ RW
+ Some devices have a variable update rate. This attribute
+ can be used to change the update rate to the desired
+ frequency.
+
************
* Voltages *
diff --git a/Documentation/hwmon/tmp102 b/Documentation/hwmon/tmp102
new file mode 100644
index 000000000000..8454a7763122
--- /dev/null
+++ b/Documentation/hwmon/tmp102
@@ -0,0 +1,26 @@
+Kernel driver tmp102
+====================
+
+Supported chips:
+ * Texas Instruments TMP102
+ Prefix: 'tmp102'
+ Addresses scanned: none
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp102.html
+
+Author:
+ Steven King <sfking@fdwdc.com>
+
+Description
+-----------
+
+The Texas Instruments TMP102 implements one temperature sensor. Limits can be
+set through the Overtemperature Shutdown register and Hysteresis register. The
+sensor is accurate to 0.5 degree over the range of -25 to +85 C, and to 1.0
+degree from -40 to +125 C. Resolution of the sensor is 0.0625 degree. The
+operating temperature has a minimum of -55 C and a maximum of +150 C.
+
+The TMP102 has a programmable update rate that can select between 8, 4, 1, and
+0.5 Hz. (Currently the driver only supports the default of 4 Hz).
+
+The driver provides the common sysfs-interface for temperatures (see
+Documentation/hwmon/sysfs-interface under Temperatures).
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index e1bb5b261693..e307914a3eda 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -27,7 +27,13 @@ Authors:
Module Parameters
-----------------
-None.
+* disable_features (bit vector)
+Disable selected features normally supported by the device. This makes it
+possible to work around possible driver or hardware bugs if the feature in
+question doesn't work as intended for whatever reason. Bit values:
+ 1 disable SMBus PEC
+ 2 disable the block buffer
+ 8 disable the I2C block read functionality
Description
diff --git a/Documentation/input/joystick.txt b/Documentation/input/joystick.txt
index 154d767b2acb..8007b7ca87bf 100644
--- a/Documentation/input/joystick.txt
+++ b/Documentation/input/joystick.txt
@@ -402,7 +402,7 @@ for the port of the SoundFusion is supported by the cs461x.c module.
~~~~~~~~~~~~~~~~~~~~~~~~
The Live! has a special PCI gameport, which, although it doesn't provide
any "Enhanced" stuff like 4DWave and friends, is quite a bit faster than
-it's ISA counterparts. It also requires special support, hence the
+its ISA counterparts. It also requires special support, hence the
emu10k1-gp.c module for it instead of the normal ns558.c one.
3.15 SoundBlaster 64 and 128 - ES1370 and ES1371, ESS Solo1 and S3 SonicVibes
diff --git a/Documentation/intel_txt.txt b/Documentation/intel_txt.txt
index f40a1f030019..5dc59b04a71f 100644
--- a/Documentation/intel_txt.txt
+++ b/Documentation/intel_txt.txt
@@ -126,7 +126,7 @@ o Tboot then applies an (optional) user-defined launch policy to
o Tboot adjusts the e820 table provided by the bootloader to reserve
its own location in memory as well as to reserve certain other
TXT-related regions.
-o As part of it's launch, tboot DMA protects all of RAM (using the
+o As part of its launch, tboot DMA protects all of RAM (using the
VT-d PMRs). Thus, the kernel must be booted with 'intel_iommu=on'
in order to remove this blanket protection and use VT-d's
page-level protection.
@@ -161,13 +161,15 @@ o In order to put a system into any of the sleep states after a TXT
has been restored, it will restore the TPM PCRs and then
transfer control back to the kernel's S3 resume vector.
In order to preserve system integrity across S3, the kernel
- provides tboot with a set of memory ranges (kernel
- code/data/bss, S3 resume code, and AP trampoline) that tboot
- will calculate a MAC (message authentication code) over and then
- seal with the TPM. On resume and once the measured environment
- has been re-established, tboot will re-calculate the MAC and
- verify it against the sealed value. Tboot's policy determines
- what happens if the verification fails.
+ provides tboot with a set of memory ranges (RAM and RESERVED_KERN
+ in the e820 table, but not any memory that BIOS might alter over
+ the S3 transition) that tboot will calculate a MAC (message
+ authentication code) over and then seal with the TPM. On resume
+ and once the measured environment has been re-established, tboot
+ will re-calculate the MAC and verify it against the sealed value.
+ Tboot's policy determines what happens if the verification fails.
+ Note that the c/s 194 of tboot which has the new MAC code supports
+ this.
That's pretty much it for TXT support.
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index c412c245848f..b472e4e0ba67 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -181,7 +181,7 @@ Expressions are listed in decreasing order of precedence.
(7) Returns the result of max(/expr/, /expr/).
An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2
-respectively for calculations). A menu entry becomes visible when it's
+respectively for calculations). A menu entry becomes visible when its
expression evaluates to 'm' or 'y'.
There are two types of symbols: constant and non-constant symbols.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 49efae703979..b2cb16ebcb16 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -96,7 +96,7 @@ Environment variables for 'silentoldconfig'
KCONFIG_NOSILENTUPDATE
--------------------------------------------------
If this variable has a non-blank value, it prevents silent kernel
-config udpates (requires explicit updates).
+config updates (requires explicit updates).
KCONFIG_AUTOCONFIG
--------------------------------------------------
diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt
index 28cdc2af2131..ec8d31ee12e0 100644
--- a/Documentation/kernel-docs.txt
+++ b/Documentation/kernel-docs.txt
@@ -116,7 +116,7 @@
Author: Ingo Molnar, Gadi Oxman and Miguel de Icaza.
URL: http://www.linuxjournal.com/article.php?sid=2391
Keywords: RAID, MD driver.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "A description of the implementation of the RAID-1,
RAID-4 and RAID-5 personalities of the MD device driver in the
Linux kernel, providing users with high performance and reliable,
@@ -127,7 +127,7 @@
URL: http://www.linuxjournal.com/article.php?sid=1219
Keywords: device driver, module, loading/unloading modules,
allocating resources.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This is the first of a series of four articles
co-authored by Alessandro Rubini and Georg Zezchwitz which present
a practical approach to writing Linux device drivers as kernel
@@ -141,7 +141,7 @@
Keywords: character driver, init_module, clean_up module,
autodetection, mayor number, minor number, file operations,
open(), close().
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This article, the second of four, introduces part of
the actual code to create custom module implementing a character
device driver. It describes the code for module initialization and
@@ -152,7 +152,7 @@
URL: http://www.linuxjournal.com/article.php?sid=1221
Keywords: read(), write(), select(), ioctl(), blocking/non
blocking mode, interrupt handler.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This article, the third of four on writing character
device drivers, introduces concepts of reading, writing, and using
ioctl-calls".
@@ -161,7 +161,7 @@
Author: Alessandro Rubini and Georg v. Zezschwitz.
URL: http://www.linuxjournal.com/article.php?sid=1222
Keywords: interrupts, irqs, DMA, bottom halves, task queues.
- Description: Linux Journal Kernel Korner article. Here is it's
+ Description: Linux Journal Kernel Korner article. Here is its
abstract: "This is the fourth in a series of articles about
writing character device drivers as loadable kernel modules. This
month, we further investigate the field of interrupt handling.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 0c6c56076d19..1808f1157f30 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -58,6 +58,7 @@ parameter is applicable:
ISAPNP ISA PnP code is enabled.
ISDN Appropriate ISDN support is enabled.
JOY Appropriate joystick support is enabled.
+ KGDB Kernel debugger support is enabled.
KVM Kernel Virtual Machine support is enabled.
LIBATA Libata driver is enabled
LP Printer support is enabled.
@@ -99,6 +100,7 @@ parameter is applicable:
SWSUSP Software suspend (hibernation) is enabled.
SUSPEND System suspend states are enabled.
FTRACE Function tracing enabled.
+ TPM TPM drivers are enabled.
TS Appropriate touchscreen support is enabled.
UMS USB Mass Storage support is enabled.
USB USB support is enabled.
@@ -143,14 +145,14 @@ and is between 256 and 4096 characters. It is defined in the file
acpi= [HW,ACPI,X86]
Advanced Configuration and Power Interface
- Format: { force | off | ht | strict | noirq | rsdt }
+ Format: { force | off | strict | noirq | rsdt }
force -- enable ACPI if default was off
off -- disable ACPI if default was on
noirq -- do not use ACPI for IRQ routing
- ht -- run only enough ACPI to enable Hyper Threading
strict -- Be less tolerant of platforms that are not
strictly ACPI specification compliant.
rsdt -- prefer RSDT over (default) XSDT
+ copy_dsdt -- copy DSDT to memory
See also Documentation/power/pm.txt, pci=noacpi
@@ -287,9 +289,6 @@ and is between 256 and 4096 characters. It is defined in the file
advansys= [HW,SCSI]
See header of drivers/scsi/advansys.c.
- advwdt= [HW,WDT] Advantech WDT
- Format: <iostart>,<iostop>
-
aedsp16= [HW,OSS] Audio Excel DSP 16
Format: <io>,<irq>,<dma>,<mss_io>,<mpu_io>,<mpu_irq>
See also header of sound/oss/aedsp16.c.
@@ -710,6 +709,12 @@ and is between 256 and 4096 characters. It is defined in the file
The VGA output is eventually overwritten by the real
console.
+ ekgdboc= [X86,KGDB] Allow early kernel console debugging
+ ekgdboc=kbd
+
+ This is desgined to be used in conjunction with
+ the boot argument: earlyprintk=vga
+
eata= [HW,SCSI]
edd= [EDD]
@@ -752,13 +757,14 @@ and is between 256 and 4096 characters. It is defined in the file
Default value is 0.
Value can be changed at runtime via /selinux/enforce.
+ erst_disable [ACPI]
+ Disable Error Record Serialization Table (ERST)
+ support.
+
ether= [HW,NET] Ethernet cards parameters
This option is obsoleted by the "netdev=" option, which
has equivalent usage. See its documentation for details.
- eurwdt= [HW,WDT] Eurotech CPU-1220/1410 onboard watchdog.
- Format: <io>[,<irq>]
-
failslab=
fail_page_alloc=
fail_make_request=[KNL]
@@ -786,8 +792,12 @@ and is between 256 and 4096 characters. It is defined in the file
as early as possible in order to facilitate early
boot debugging.
- ftrace_dump_on_oops
+ ftrace_dump_on_oops[=orig_cpu]
[FTRACE] will dump the trace buffers on oops.
+ If no parameter is passed, ftrace will dump
+ buffers of all CPUs, but if you pass orig_cpu, it will
+ dump only the buffer of the CPU that triggered the
+ oops.
ftrace_filter=[function-list]
[FTRACE] Limit the functions traced by the function
@@ -845,6 +855,11 @@ and is between 256 and 4096 characters. It is defined in the file
hd= [EIDE] (E)IDE hard drive subsystem geometry
Format: <cyl>,<head>,<sect>
+ hest_disable [ACPI]
+ Disable Hardware Error Source Table (HEST) support;
+ corresponding firmware-first mode error processing
+ logic will be disabled.
+
highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact
size of <nn>. This works even on boxes that have no
highmem otherwise. This also works to reduce highmem
@@ -1114,10 +1129,26 @@ and is between 256 and 4096 characters. It is defined in the file
use the HighMem zone if it exists, and the Normal
zone if it does not.
- kgdboc= [HW] kgdb over consoles.
- Requires a tty driver that supports console polling.
- (only serial supported for now)
- Format: <serial_device>[,baud]
+ kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
+ Format: <Controller#>[,poll interval]
+ The controller # is the number of the ehci usb debug
+ port as it is probed via PCI. The poll interval is
+ optional and is the number seconds in between
+ each poll cycle to the debug port in case you need
+ the functionality for interrupting the kernel with
+ gdb or control-c on the dbgp connection. When
+ not using this parameter you use sysrq-g to break into
+ the kernel debugger.
+
+ kgdboc= [KGDB,HW] kgdb over consoles.
+ Requires a tty driver that supports console polling,
+ or a supported polling keyboard driver (non-usb).
+ Serial only format: <serial_device>[,baud]
+ keyboard only format: kbd
+ keyboard and serial format: kbd,<serial_device>[,baud]
+
+ kgdbwait [KGDB] Stop kernel execution and enter the
+ kernel debugger at the earliest opportunity.
kmac= [MIPS] korina ethernet MAC address.
Configure the RouterBoard 532 series on-chip
@@ -1229,6 +1260,8 @@ and is between 256 and 4096 characters. It is defined in the file
* nohrst, nosrst, norst: suppress hard, soft
and both resets.
+ * dump_id: dump IDENTIFY data.
+
If there are multiple matching configurations changing
the same attribute, the last one is used.
@@ -2238,9 +2271,6 @@ and is between 256 and 4096 characters. It is defined in the file
sched_debug [KNL] Enables verbose scheduler debug messages.
- sc1200wdt= [HW,WDT] SC1200 WDT (watchdog) driver
- Format: <io>[,<timeout>[,<isapnp>]]
-
scsi_debug_*= [SCSI]
See drivers/scsi/scsi_debug.c.
@@ -2612,6 +2642,15 @@ and is between 256 and 4096 characters. It is defined in the file
tp720= [HW,PS2]
+ tpm_suspend_pcr=[HW,TPM]
+ Format: integer pcr id
+ Specify that at suspend time, the tpm driver
+ should extend the specified pcr with zeros,
+ as a workaround for some chips which fail to
+ flush the last written pcr on TPM_SaveState.
+ This will guarantee that all the other pcrs
+ are saved.
+
trace_buf_size=nn[KMG]
[FTRACE] will set tracing buffer size.
@@ -2820,8 +2859,10 @@ and is between 256 and 4096 characters. It is defined in the file
wd7000= [HW,SCSI]
See header of drivers/scsi/wd7000.c.
- wdt= [WDT] Watchdog
- See Documentation/watchdog/wdt.txt.
+ watchdog timers [HW,WDT] For information on watchdog timers,
+ see Documentation/watchdog/watchdog-parameters.txt
+ or other driver-specific files in the
+ Documentation/watchdog/ directory.
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 2f9115c0ae62..6653017680dd 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -165,8 +165,8 @@ the user entry_handler invocation is also skipped.
1.4 How Does Jump Optimization Work?
-If you configured your kernel with CONFIG_OPTPROBES=y (currently
-this option is supported on x86/x86-64, non-preemptive kernel) and
+If your kernel is built with CONFIG_OPTPROBES=y (currently this flag
+is automatically set 'y' on x86/x86-64, non-preemptive kernel) and
the "debug.kprobes_optimization" kernel parameter is set to 1 (see
sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
instruction instead of a breakpoint instruction at each probepoint.
@@ -271,8 +271,6 @@ tweak the kernel's execution path, you need to suppress optimization,
using one of the following techniques:
- Specify an empty function for the kprobe's post_handler or break_handler.
or
-- Config CONFIG_OPTPROBES=n.
- or
- Execute 'sysctl -w debug.kprobes_optimization=n'
2. Architectures Supported
@@ -307,10 +305,6 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
so you can use "objdump -d -l vmlinux" to see the source-to-object
code mapping.
-If you want to reduce probing overhead, set "Kprobes jump optimization
-support" (CONFIG_OPTPROBES) to "y". You can find this option under the
-"Kprobes" line.
-
4. API Reference
The Kprobes API includes a "register" function and an "unregister"
@@ -332,7 +326,7 @@ occurs during execution of kp->pre_handler or kp->post_handler,
or during single-stepping of the probed instruction, Kprobes calls
kp->fault_handler. Any or all handlers can be NULL. If kp->flags
is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled,
-so, it's handlers aren't hit until calling enable_kprobe(kp).
+so, its handlers aren't hit until calling enable_kprobe(kp).
NOTE:
1. With the introduction of the "symbol_name" field to struct kprobe,
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index c6416a398163..a237518e51b9 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -656,6 +656,7 @@ struct kvm_clock_data {
4.29 KVM_GET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
+Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_vcpu_event (out)
@@ -676,7 +677,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 soft;
- __u8 pad;
+ __u8 shadow;
} interrupt;
struct {
__u8 injected;
@@ -688,9 +689,13 @@ struct kvm_vcpu_events {
__u32 flags;
};
+KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
+interrupt.shadow contains a valid state. Otherwise, this field is undefined.
+
4.30 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
+Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86
Type: vm ioctl
Parameters: struct kvm_vcpu_event (in)
@@ -709,6 +714,183 @@ current in-kernel state. The bits are:
KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
+If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
+the flags field to signal that interrupt.shadow contains a valid state and
+shall be written into the VCPU.
+
+4.32 KVM_GET_DEBUGREGS
+
+Capability: KVM_CAP_DEBUGREGS
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_debugregs (out)
+Returns: 0 on success, -1 on error
+
+Reads debug registers from the vcpu.
+
+struct kvm_debugregs {
+ __u64 db[4];
+ __u64 dr6;
+ __u64 dr7;
+ __u64 flags;
+ __u64 reserved[9];
+};
+
+4.33 KVM_SET_DEBUGREGS
+
+Capability: KVM_CAP_DEBUGREGS
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_debugregs (in)
+Returns: 0 on success, -1 on error
+
+Writes debug registers into the vcpu.
+
+See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
+yet and must be cleared on entry.
+
+4.34 KVM_SET_USER_MEMORY_REGION
+
+Capability: KVM_CAP_USER_MEM
+Architectures: all
+Type: vm ioctl
+Parameters: struct kvm_userspace_memory_region (in)
+Returns: 0 on success, -1 on error
+
+struct kvm_userspace_memory_region {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+ __u64 userspace_addr; /* start of the userspace allocated memory */
+};
+
+/* for kvm_memory_region::flags */
+#define KVM_MEM_LOG_DIRTY_PAGES 1UL
+
+This ioctl allows the user to create or modify a guest physical memory
+slot. When changing an existing slot, it may be moved in the guest
+physical memory space, or its flags may be modified. It may not be
+resized. Slots may not overlap in guest physical address space.
+
+Memory for the region is taken starting at the address denoted by the
+field userspace_addr, which must point at user addressable memory for
+the entire memory slot size. Any object may back this memory, including
+anonymous memory, ordinary files, and hugetlbfs.
+
+It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
+be identical. This allows large pages in the guest to be backed by large
+pages in the host.
+
+The flags field supports just one flag, KVM_MEM_LOG_DIRTY_PAGES, which
+instructs kvm to keep track of writes to memory within the slot. See
+the KVM_GET_DIRTY_LOG ioctl.
+
+When the KVM_CAP_SYNC_MMU capability, changes in the backing of the memory
+region are automatically reflected into the guest. For example, an mmap()
+that affects the region will be made visible immediately. Another example
+is madvise(MADV_DROP).
+
+It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
+The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
+allocation and is deprecated.
+
+4.35 KVM_SET_TSS_ADDR
+
+Capability: KVM_CAP_SET_TSS_ADDR
+Architectures: x86
+Type: vm ioctl
+Parameters: unsigned long tss_address (in)
+Returns: 0 on success, -1 on error
+
+This ioctl defines the physical address of a three-page region in the guest
+physical address space. The region must be within the first 4GB of the
+guest physical address space and must not conflict with any memory slot
+or any mmio address. The guest may malfunction if it accesses this memory
+region.
+
+This ioctl is required on Intel-based hosts. This is needed on Intel hardware
+because of a quirk in the virtualization implementation (see the internals
+documentation when it pops into existence).
+
+4.36 KVM_ENABLE_CAP
+
+Capability: KVM_CAP_ENABLE_CAP
+Architectures: ppc
+Type: vcpu ioctl
+Parameters: struct kvm_enable_cap (in)
+Returns: 0 on success; -1 on error
+
++Not all extensions are enabled by default. Using this ioctl the application
+can enable an extension, making it available to the guest.
+
+On systems that do not support this ioctl, it always fails. On systems that
+do support it, it only works for extensions that are supported for enablement.
+
+To check if a capability can be enabled, the KVM_CHECK_EXTENSION ioctl should
+be used.
+
+struct kvm_enable_cap {
+ /* in */
+ __u32 cap;
+
+The capability that is supposed to get enabled.
+
+ __u32 flags;
+
+A bitfield indicating future enhancements. Has to be 0 for now.
+
+ __u64 args[4];
+
+Arguments for enabling a feature. If a feature needs initial values to
+function properly, this is the place to put them.
+
+ __u8 pad[64];
+};
+
+4.37 KVM_GET_MP_STATE
+
+Capability: KVM_CAP_MP_STATE
+Architectures: x86, ia64
+Type: vcpu ioctl
+Parameters: struct kvm_mp_state (out)
+Returns: 0 on success; -1 on error
+
+struct kvm_mp_state {
+ __u32 mp_state;
+};
+
+Returns the vcpu's current "multiprocessing state" (though also valid on
+uniprocessor guests).
+
+Possible values are:
+
+ - KVM_MP_STATE_RUNNABLE: the vcpu is currently running
+ - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
+ which has not yet received an INIT signal
+ - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
+ now ready for a SIPI
+ - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and
+ is waiting for an interrupt
+ - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
+ accesible via KVM_GET_VCPU_EVENTS)
+
+This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
+irqchip, the multiprocessing state must be maintained by userspace.
+
+4.38 KVM_SET_MP_STATE
+
+Capability: KVM_CAP_MP_STATE
+Architectures: x86, ia64
+Type: vcpu ioctl
+Parameters: struct kvm_mp_state (in)
+Returns: 0 on success; -1 on error
+
+Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
+arguments.
+
+This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
+irqchip, the multiprocessing state must be maintained by userspace.
5. The kvm_run structure
@@ -820,6 +1002,13 @@ executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
+NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO and KVM_EXIT_OSI, the corresponding
+operations are complete (and guest state is consistent) only after userspace
+has re-entered the kernel with KVM_RUN. The kernel side will first finish
+incomplete operations and then check for pending signals. Userspace
+can re-enter the guest with an unmasked signal pending to complete
+pending operations.
+
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
@@ -829,7 +1018,9 @@ true, and should be filled by application code otherwise.
__u32 pad;
} hypercall;
-Unused.
+Unused. This was once used for 'hypercall to userspace'. To implement
+such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390).
+Note KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO.
/* KVM_EXIT_TPR_ACCESS */
struct {
@@ -870,6 +1061,19 @@ s390 specific.
powerpc specific.
+ /* KVM_EXIT_OSI */
+ struct {
+ __u64 gprs[32];
+ } osi;
+
+MOL uses a special hypercall interface it calls 'OSI'. To enable it, we catch
+hypercalls and exit with this exit struct that contains all the guest gprs.
+
+If exit_reason is KVM_EXIT_OSI, then the vcpu has triggered such a hypercall.
+Userspace can now handle the hypercall and when it's done modify the gprs as
+necessary. Upon guest entry all guest GPRs will then be replaced by the values
+in this struct.
+
/* Fix the size of the union. */
char padding[256];
};
diff --git a/Documentation/kvm/cpuid.txt b/Documentation/kvm/cpuid.txt
new file mode 100644
index 000000000000..14a12ea92b7f
--- /dev/null
+++ b/Documentation/kvm/cpuid.txt
@@ -0,0 +1,42 @@
+KVM CPUID bits
+Glauber Costa <glommer@redhat.com>, Red Hat Inc, 2010
+=====================================================
+
+A guest running on a kvm host, can check some of its features using
+cpuid. This is not always guaranteed to work, since userspace can
+mask-out some, or even all KVM-related cpuid features before launching
+a guest.
+
+KVM cpuid functions are:
+
+function: KVM_CPUID_SIGNATURE (0x40000000)
+returns : eax = 0,
+ ebx = 0x4b4d564b,
+ ecx = 0x564b4d56,
+ edx = 0x4d.
+Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM".
+This function queries the presence of KVM cpuid leafs.
+
+
+function: define KVM_CPUID_FEATURES (0x40000001)
+returns : ebx, ecx, edx = 0
+ eax = and OR'ed group of (1 << flag), where each flags is:
+
+
+flag || value || meaning
+=============================================================================
+KVM_FEATURE_CLOCKSOURCE || 0 || kvmclock available at msrs
+ || || 0x11 and 0x12.
+------------------------------------------------------------------------------
+KVM_FEATURE_NOP_IO_DELAY || 1 || not necessary to perform delays
+ || || on PIO operations.
+------------------------------------------------------------------------------
+KVM_FEATURE_MMU_OP || 2 || deprecated.
+------------------------------------------------------------------------------
+KVM_FEATURE_CLOCKSOURCE2 || 3 || kvmclock available at msrs
+ || || 0x4b564d00 and 0x4b564d01
+------------------------------------------------------------------------------
+KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
+ || || per-cpu warps are expected in
+ || || kvmclock.
+------------------------------------------------------------------------------
diff --git a/Documentation/kvm/mmu.txt b/Documentation/kvm/mmu.txt
new file mode 100644
index 000000000000..aaed6ab9d7ab
--- /dev/null
+++ b/Documentation/kvm/mmu.txt
@@ -0,0 +1,304 @@
+The x86 kvm shadow mmu
+======================
+
+The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible
+for presenting a standard x86 mmu to the guest, while translating guest
+physical addresses to host physical addresses.
+
+The mmu code attempts to satisfy the following requirements:
+
+- correctness: the guest should not be able to determine that it is running
+ on an emulated mmu except for timing (we attempt to comply
+ with the specification, not emulate the characteristics of
+ a particular implementation such as tlb size)
+- security: the guest must not be able to touch host memory not assigned
+ to it
+- performance: minimize the performance penalty imposed by the mmu
+- scaling: need to scale to large memory and large vcpu guests
+- hardware: support the full range of x86 virtualization hardware
+- integration: Linux memory management code must be in control of guest memory
+ so that swapping, page migration, page merging, transparent
+ hugepages, and similar features work without change
+- dirty tracking: report writes to guest memory to enable live migration
+ and framebuffer-based displays
+- footprint: keep the amount of pinned kernel memory low (most memory
+ should be shrinkable)
+- reliablity: avoid multipage or GFP_ATOMIC allocations
+
+Acronyms
+========
+
+pfn host page frame number
+hpa host physical address
+hva host virtual address
+gfn guest frame number
+gpa guest physical address
+gva guest virtual address
+ngpa nested guest physical address
+ngva nested guest virtual address
+pte page table entry (used also to refer generically to paging structure
+ entries)
+gpte guest pte (referring to gfns)
+spte shadow pte (referring to pfns)
+tdp two dimensional paging (vendor neutral term for NPT and EPT)
+
+Virtual and real hardware supported
+===================================
+
+The mmu supports first-generation mmu hardware, which allows an atomic switch
+of the current paging mode and cr3 during guest entry, as well as
+two-dimensional paging (AMD's NPT and Intel's EPT). The emulated hardware
+it exposes is the traditional 2/3/4 level x86 mmu, with support for global
+pages, pae, pse, pse36, cr0.wp, and 1GB pages. Work is in progress to support
+exposing NPT capable hardware on NPT capable hosts.
+
+Translation
+===========
+
+The primary job of the mmu is to program the processor's mmu to translate
+addresses for the guest. Different translations are required at different
+times:
+
+- when guest paging is disabled, we translate guest physical addresses to
+ host physical addresses (gpa->hpa)
+- when guest paging is enabled, we translate guest virtual addresses, to
+ guest physical addresses, to host physical addresses (gva->gpa->hpa)
+- when the guest launches a guest of its own, we translate nested guest
+ virtual addresses, to nested guest physical addresses, to guest physical
+ addresses, to host physical addresses (ngva->ngpa->gpa->hpa)
+
+The primary challenge is to encode between 1 and 3 translations into hardware
+that support only 1 (traditional) and 2 (tdp) translations. When the
+number of required translations matches the hardware, the mmu operates in
+direct mode; otherwise it operates in shadow mode (see below).
+
+Memory
+======
+
+Guest memory (gpa) is part of the user address space of the process that is
+using kvm. Userspace defines the translation between guest addresses and user
+addresses (gpa->hva); note that two gpas may alias to the same gva, but not
+vice versa.
+
+These gvas may be backed using any method available to the host: anonymous
+memory, file backed memory, and device memory. Memory might be paged by the
+host at any time.
+
+Events
+======
+
+The mmu is driven by events, some from the guest, some from the host.
+
+Guest generated events:
+- writes to control registers (especially cr3)
+- invlpg/invlpga instruction execution
+- access to missing or protected translations
+
+Host generated events:
+- changes in the gpa->hpa translation (either through gpa->hva changes or
+ through hva->hpa changes)
+- memory pressure (the shrinker)
+
+Shadow pages
+============
+
+The principal data structure is the shadow page, 'struct kvm_mmu_page'. A
+shadow page contains 512 sptes, which can be either leaf or nonleaf sptes. A
+shadow page may contain a mix of leaf and nonleaf sptes.
+
+A nonleaf spte allows the hardware mmu to reach the leaf pages and
+is not related to a translation directly. It points to other shadow pages.
+
+A leaf spte corresponds to either one or two translations encoded into
+one paging structure entry. These are always the lowest level of the
+translation stack, with optional higher level translations left to NPT/EPT.
+Leaf ptes point at guest pages.
+
+The following table shows translations encoded by leaf ptes, with higher-level
+translations in parentheses:
+
+ Non-nested guests:
+ nonpaging: gpa->hpa
+ paging: gva->gpa->hpa
+ paging, tdp: (gva->)gpa->hpa
+ Nested guests:
+ non-tdp: ngva->gpa->hpa (*)
+ tdp: (ngva->)ngpa->gpa->hpa
+
+(*) the guest hypervisor will encode the ngva->gpa translation into its page
+ tables if npt is not present
+
+Shadow pages contain the following information:
+ role.level:
+ The level in the shadow paging hierarchy that this shadow page belongs to.
+ 1=4k sptes, 2=2M sptes, 3=1G sptes, etc.
+ role.direct:
+ If set, leaf sptes reachable from this page are for a linear range.
+ Examples include real mode translation, large guest pages backed by small
+ host pages, and gpa->hpa translations when NPT or EPT is active.
+ The linear range starts at (gfn << PAGE_SHIFT) and its size is determined
+ by role.level (2MB for first level, 1GB for second level, 0.5TB for third
+ level, 256TB for fourth level)
+ If clear, this page corresponds to a guest page table denoted by the gfn
+ field.
+ role.quadrant:
+ When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit
+ sptes. That means a guest page table contains more ptes than the host,
+ so multiple shadow pages are needed to shadow one guest page.
+ For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
+ first or second 512-gpte block in the guest page table. For second-level
+ page tables, each 32-bit gpte is converted to two 64-bit sptes
+ (since each first-level guest page is shadowed by two first-level
+ shadow pages) so role.quadrant takes values in the range 0..3. Each
+ quadrant maps 1GB virtual address space.
+ role.access:
+ Inherited guest access permissions in the form uwx. Note execute
+ permission is positive, not negative.
+ role.invalid:
+ The page is invalid and should not be used. It is a root page that is
+ currently pinned (by a cpu hardware register pointing to it); once it is
+ unpinned it will be destroyed.
+ role.cr4_pae:
+ Contains the value of cr4.pae for which the page is valid (e.g. whether
+ 32-bit or 64-bit gptes are in use).
+ role.cr4_nxe:
+ Contains the value of efer.nxe for which the page is valid.
+ role.cr0_wp:
+ Contains the value of cr0.wp for which the page is valid.
+ gfn:
+ Either the guest page table containing the translations shadowed by this
+ page, or the base page frame for linear translations. See role.direct.
+ spt:
+ A pageful of 64-bit sptes containing the translations for this page.
+ Accessed by both kvm and hardware.
+ The page pointed to by spt will have its page->private pointing back
+ at the shadow page structure.
+ sptes in spt point either at guest pages, or at lower-level shadow pages.
+ Specifically, if sp1 and sp2 are shadow pages, then sp1->spt[n] may point
+ at __pa(sp2->spt). sp2 will point back at sp1 through parent_pte.
+ The spt array forms a DAG structure with the shadow page as a node, and
+ guest pages as leaves.
+ gfns:
+ An array of 512 guest frame numbers, one for each present pte. Used to
+ perform a reverse map from a pte to a gfn.
+ slot_bitmap:
+ A bitmap containing one bit per memory slot. If the page contains a pte
+ mapping a page from memory slot n, then bit n of slot_bitmap will be set
+ (if a page is aliased among several slots, then it is not guaranteed that
+ all slots will be marked).
+ Used during dirty logging to avoid scanning a shadow page if none if its
+ pages need tracking.
+ root_count:
+ A counter keeping track of how many hardware registers (guest cr3 or
+ pdptrs) are now pointing at the page. While this counter is nonzero, the
+ page cannot be destroyed. See role.invalid.
+ multimapped:
+ Whether there exist multiple sptes pointing at this page.
+ parent_pte/parent_ptes:
+ If multimapped is zero, parent_pte points at the single spte that points at
+ this page's spt. Otherwise, parent_ptes points at a data structure
+ with a list of parent_ptes.
+ unsync:
+ If true, then the translations in this page may not match the guest's
+ translation. This is equivalent to the state of the tlb when a pte is
+ changed but before the tlb entry is flushed. Accordingly, unsync ptes
+ are synchronized when the guest executes invlpg or flushes its tlb by
+ other means. Valid for leaf pages.
+ unsync_children:
+ How many sptes in the page point at pages that are unsync (or have
+ unsynchronized children).
+ unsync_child_bitmap:
+ A bitmap indicating which sptes in spt point (directly or indirectly) at
+ pages that may be unsynchronized. Used to quickly locate all unsychronized
+ pages reachable from a given page.
+
+Reverse map
+===========
+
+The mmu maintains a reverse mapping whereby all ptes mapping a page can be
+reached given its gfn. This is used, for example, when swapping out a page.
+
+Synchronized and unsynchronized pages
+=====================================
+
+The guest uses two events to synchronize its tlb and page tables: tlb flushes
+and page invalidations (invlpg).
+
+A tlb flush means that we need to synchronize all sptes reachable from the
+guest's cr3. This is expensive, so we keep all guest page tables write
+protected, and synchronize sptes to gptes when a gpte is written.
+
+A special case is when a guest page table is reachable from the current
+guest cr3. In this case, the guest is obliged to issue an invlpg instruction
+before using the translation. We take advantage of that by removing write
+protection from the guest page, and allowing the guest to modify it freely.
+We synchronize modified gptes when the guest invokes invlpg. This reduces
+the amount of emulation we have to do when the guest modifies multiple gptes,
+or when the a guest page is no longer used as a page table and is used for
+random guest data.
+
+As a side effect we have to resynchronize all reachable unsynchronized shadow
+pages on a tlb flush.
+
+
+Reaction to events
+==================
+
+- guest page fault (or npt page fault, or ept violation)
+
+This is the most complicated event. The cause of a page fault can be:
+
+ - a true guest fault (the guest translation won't allow the access) (*)
+ - access to a missing translation
+ - access to a protected translation
+ - when logging dirty pages, memory is write protected
+ - synchronized shadow pages are write protected (*)
+ - access to untranslatable memory (mmio)
+
+ (*) not applicable in direct mode
+
+Handling a page fault is performed as follows:
+
+ - if needed, walk the guest page tables to determine the guest translation
+ (gva->gpa or ngpa->gpa)
+ - if permissions are insufficient, reflect the fault back to the guest
+ - determine the host page
+ - if this is an mmio request, there is no host page; call the emulator
+ to emulate the instruction instead
+ - walk the shadow page table to find the spte for the translation,
+ instantiating missing intermediate page tables as necessary
+ - try to unsynchronize the page
+ - if successful, we can let the guest continue and modify the gpte
+ - emulate the instruction
+ - if failed, unshadow the page and let the guest continue
+ - update any translations that were modified by the instruction
+
+invlpg handling:
+
+ - walk the shadow page hierarchy and drop affected translations
+ - try to reinstantiate the indicated translation in the hope that the
+ guest will use it in the near future
+
+Guest control register updates:
+
+- mov to cr3
+ - look up new shadow roots
+ - synchronize newly reachable shadow pages
+
+- mov to cr0/cr4/efer
+ - set up mmu context for new paging mode
+ - look up new shadow roots
+ - synchronize newly reachable shadow pages
+
+Host translation updates:
+
+ - mmu notifier called with updated hva
+ - look up affected sptes through reverse map
+ - drop (or update) translations
+
+Further reading
+===============
+
+- NPT presentation from KVM Forum 2008
+ http://www.linux-kvm.org/wiki/images/c/c8/KvmForum2008%24kdf2008_21.pdf
+
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 2c3c35093023..0bf25eebce94 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -207,7 +207,7 @@ Tips & Tricks
* Drew Scott Daniels observed: "I don't know why, but when I decrease the number
of colours that my display uses it consumes less battery power. I've seen
this on powerbooks too. I hope that this is a piece of information that
- might be useful to the Laptop Mode patch or it's users."
+ might be useful to the Laptop Mode patch or its users."
* In syslog.conf, you can prefix entries with a dash ``-'' to omit syncing the
file after every logging. When you're using laptop-mode and your disk doesn't
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 39c0a09d0105..fc15538d8b46 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -292,13 +292,13 @@ sysfs notes:
Warning: when in NVRAM mode, the volume up/down/mute
keys are synthesized according to changes in the mixer,
- so you have to use volume up or volume down to unmute,
- as per the ThinkPad volume mixer user interface. When
- in ACPI event mode, volume up/down/mute are reported as
- separate events, but this behaviour may be corrected in
- future releases of this driver, in which case the
- ThinkPad volume mixer user interface semantics will be
- enforced.
+ which uses a single volume up or volume down hotkey
+ press to unmute, as per the ThinkPad volume mixer user
+ interface. When in ACPI event mode, volume up/down/mute
+ events are reported by the firmware and can behave
+ differently (and that behaviour changes with firmware
+ version -- not just with firmware models -- as well as
+ OSI(Linux) state).
hotkey_poll_freq:
frequency in Hz for hot key polling. It must be between
@@ -309,7 +309,7 @@ sysfs notes:
will cause hot key presses that require NVRAM polling
to never be reported.
- Setting hotkey_poll_freq too low will cause repeated
+ Setting hotkey_poll_freq too low may cause repeated
pressings of the same hot key to be misreported as a
single key press, or to not even be detected at all.
The recommended polling frequency is 10Hz.
@@ -397,6 +397,7 @@ ACPI Scan
event code Key Notes
0x1001 0x00 FN+F1 -
+
0x1002 0x01 FN+F2 IBM: battery (rare)
Lenovo: Screen lock
@@ -404,7 +405,8 @@ event code Key Notes
this hot key, even with hot keys
disabled or with Fn+F3 masked
off
- IBM: screen lock
+ IBM: screen lock, often turns
+ off the ThinkLight as side-effect
Lenovo: battery
0x1004 0x03 FN+F4 Sleep button (ACPI sleep button
@@ -433,7 +435,8 @@ event code Key Notes
Do you feel lucky today?
0x1008 0x07 FN+F8 IBM: toggle screen expand
- Lenovo: configure UltraNav
+ Lenovo: configure UltraNav,
+ or toggle screen expand
0x1009 0x08 FN+F9 -
.. .. ..
@@ -444,7 +447,7 @@ event code Key Notes
either through the ACPI event,
or through a hotkey event.
The firmware may refuse to
- generate further FN+F4 key
+ generate further FN+F12 key
press events until a S3 or S4
ACPI sleep cycle is performed,
or some time passes.
@@ -512,15 +515,19 @@ events for switches:
SW_RFKILL_ALL T60 and later hardware rfkill rocker switch
SW_TABLET_MODE Tablet ThinkPads HKEY events 0x5009 and 0x500A
-Non hot-key ACPI HKEY event map:
+Non hotkey ACPI HKEY event map:
+-------------------------------
+
+Events that are not propagated by the driver, except for legacy
+compatibility purposes when hotkey_report_mode is set to 1:
+
0x5001 Lid closed
0x5002 Lid opened
0x5009 Tablet swivel: switched to tablet mode
0x500A Tablet swivel: switched to normal mode
0x7000 Radio Switch may have changed state
-The above events are not propagated by the driver, except for legacy
-compatibility purposes when hotkey_report_mode is set to 1.
+Events that are never propagated by the driver:
0x2304 System is waking up from suspend to undock
0x2305 System is waking up from suspend to eject bay
@@ -528,14 +535,39 @@ compatibility purposes when hotkey_report_mode is set to 1.
0x2405 System is waking up from hibernation to eject bay
0x5010 Brightness level changed/control event
-The above events are never propagated by the driver.
+Events that are propagated by the driver to userspace:
+0x2313 ALARM: System is waking up from suspend because
+ the battery is nearly empty
+0x2413 ALARM: System is waking up from hibernation because
+ the battery is nearly empty
0x3003 Bay ejection (see 0x2x05) complete, can sleep again
+0x3006 Bay hotplug request (hint to power up SATA link when
+ the optical drive tray is ejected)
0x4003 Undocked (see 0x2x04), can sleep again
0x500B Tablet pen inserted into its storage bay
0x500C Tablet pen removed from its storage bay
-
-The above events are propagated by the driver.
+0x6011 ALARM: battery is too hot
+0x6012 ALARM: battery is extremely hot
+0x6021 ALARM: a sensor is too hot
+0x6022 ALARM: a sensor is extremely hot
+0x6030 System thermal table changed
+
+Battery nearly empty alarms are a last resort attempt to get the
+operating system to hibernate or shutdown cleanly (0x2313), or shutdown
+cleanly (0x2413) before power is lost. They must be acted upon, as the
+wake up caused by the firmware will have negated most safety nets...
+
+When any of the "too hot" alarms happen, according to Lenovo the user
+should suspend or hibernate the laptop (and in the case of battery
+alarms, unplug the AC adapter) to let it cool down. These alarms do
+signal that something is wrong, they should never happen on normal
+operating conditions.
+
+The "extremely hot" alarms are emergencies. According to Lenovo, the
+operating system is to force either an immediate suspend or hibernate
+cycle, or a system shutdown. Obviously, something is very wrong if this
+happens.
Compatibility notes:
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 3119f5db75bd..e9ce3c554514 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -263,7 +263,7 @@ static u8 *get_feature_bits(struct device *dev)
* Launcher virtual with an offset.
*
* This can be tough to get your head around, but usually it just means that we
- * use these trivial conversion functions when the Guest gives us it's
+ * use these trivial conversion functions when the Guest gives us its
* "physical" addresses:
*/
static void *from_guest_phys(unsigned long addr)
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 188f4768f1d5..e4e893ef3e01 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -136,7 +136,7 @@ raid_disks != 0.
Then uninitialized devices can be added with ADD_NEW_DISK. The
structure passed to ADD_NEW_DISK must specify the state of the device
-and it's role in the array.
+and its role in the array.
Once started with RUN_ARRAY, uninitialized spares can be added with
HOT_ADD_DISK.
diff --git a/Documentation/netlabel/lsm_interface.txt b/Documentation/netlabel/lsm_interface.txt
index 98dd9f7430f2..638c74f7de7f 100644
--- a/Documentation/netlabel/lsm_interface.txt
+++ b/Documentation/netlabel/lsm_interface.txt
@@ -38,7 +38,7 @@ Depending on the exact configuration, translation between the network packet
label and the internal LSM security identifier can be time consuming. The
NetLabel label mapping cache is a caching mechanism which can be used to
sidestep much of this overhead once a mapping has been established. Once the
-LSM has received a packet, used NetLabel to decode it's security attributes,
+LSM has received a packet, used NetLabel to decode its security attributes,
and translated the security attributes into a LSM internal identifier the LSM
can use the NetLabel caching functions to associate the LSM internal
identifier with the network packet's label. This means that in the future
diff --git a/Documentation/networking/caif/Linux-CAIF.txt b/Documentation/networking/caif/Linux-CAIF.txt
new file mode 100644
index 000000000000..7fe7a9a33a4f
--- /dev/null
+++ b/Documentation/networking/caif/Linux-CAIF.txt
@@ -0,0 +1,212 @@
+Linux CAIF
+===========
+copyright (C) ST-Ericsson AB 2010
+Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+License terms: GNU General Public License (GPL) version 2
+
+
+Introduction
+------------
+CAIF is a MUX protocol used by ST-Ericsson cellular modems for
+communication between Modem and host. The host processes can open virtual AT
+channels, initiate GPRS Data connections, Video channels and Utility Channels.
+The Utility Channels are general purpose pipes between modem and host.
+
+ST-Ericsson modems support a number of transports between modem
+and host. Currently, UART and Loopback are available for Linux.
+
+
+Architecture:
+------------
+The implementation of CAIF is divided into:
+* CAIF Socket Layer, Kernel API, and Net Device.
+* CAIF Core Protocol Implementation
+* CAIF Link Layer, implemented as NET devices.
+
+
+ RTNL
+ !
+ ! +------+ +------+ +------+
+ ! +------+! +------+! +------+!
+ ! ! Sock !! !Kernel!! ! Net !!
+ ! ! API !+ ! API !+ ! Dev !+ <- CAIF Client APIs
+ ! +------+ +------! +------+
+ ! ! ! !
+ ! +----------!----------+
+ ! +------+ <- CAIF Protocol Implementation
+ +-------> ! CAIF !
+ ! Core !
+ +------+
+ +--------!--------+
+ ! !
+ +------+ +-----+
+ ! ! ! TTY ! <- Link Layer (Net Devices)
+ +------+ +-----+
+
+
+Using the Kernel API
+----------------------
+The Kernel API is used for accessing CAIF channels from the
+kernel.
+The user of the API has to implement two callbacks for receive
+and control.
+The receive callback gives a CAIF packet as a SKB. The control
+callback will
+notify of channel initialization complete, and flow-on/flow-
+off.
+
+
+ struct caif_device caif_dev = {
+ .caif_config = {
+ .name = "MYDEV"
+ .type = CAIF_CHTY_AT
+ }
+ .receive_cb = my_receive,
+ .control_cb = my_control,
+ };
+ caif_add_device(&caif_dev);
+ caif_transmit(&caif_dev, skb);
+
+See the caif_kernel.h for details about the CAIF kernel API.
+
+
+I M P L E M E N T A T I O N
+===========================
+===========================
+
+CAIF Core Protocol Layer
+=========================================
+
+CAIF Core layer implements the CAIF protocol as defined by ST-Ericsson.
+It implements the CAIF protocol stack in a layered approach, where
+each layer described in the specification is implemented as a separate layer.
+The architecture is inspired by the design patterns "Protocol Layer" and
+"Protocol Packet".
+
+== CAIF structure ==
+The Core CAIF implementation contains:
+ - Simple implementation of CAIF.
+ - Layered architecture (a la Streams), each layer in the CAIF
+ specification is implemented in a separate c-file.
+ - Clients must implement PHY layer to access physical HW
+ with receive and transmit functions.
+ - Clients must call configuration function to add PHY layer.
+ - Clients must implement CAIF layer to consume/produce
+ CAIF payload with receive and transmit functions.
+ - Clients must call configuration function to add and connect the
+ Client layer.
+ - When receiving / transmitting CAIF Packets (cfpkt), ownership is passed
+ to the called function (except for framing layers' receive functions
+ or if a transmit function returns an error, in which case the caller
+ must free the packet).
+
+Layered Architecture
+--------------------
+The CAIF protocol can be divided into two parts: Support functions and Protocol
+Implementation. The support functions include:
+
+ - CFPKT CAIF Packet. Implementation of CAIF Protocol Packet. The
+ CAIF Packet has functions for creating, destroying and adding content
+ and for adding/extracting header and trailers to protocol packets.
+
+ - CFLST CAIF list implementation.
+
+ - CFGLUE CAIF Glue. Contains OS Specifics, such as memory
+ allocation, endianness, etc.
+
+The CAIF Protocol implementation contains:
+
+ - CFCNFG CAIF Configuration layer. Configures the CAIF Protocol
+ Stack and provides a Client interface for adding Link-Layer and
+ Driver interfaces on top of the CAIF Stack.
+
+ - CFCTRL CAIF Control layer. Encodes and Decodes control messages
+ such as enumeration and channel setup. Also matches request and
+ response messages.
+
+ - CFSERVL General CAIF Service Layer functionality; handles flow
+ control and remote shutdown requests.
+
+ - CFVEI CAIF VEI layer. Handles CAIF AT Channels on VEI (Virtual
+ External Interface). This layer encodes/decodes VEI frames.
+
+ - CFDGML CAIF Datagram layer. Handles CAIF Datagram layer (IP
+ traffic), encodes/decodes Datagram frames.
+
+ - CFMUX CAIF Mux layer. Handles multiplexing between multiple
+ physical bearers and multiple channels such as VEI, Datagram, etc.
+ The MUX keeps track of the existing CAIF Channels and
+ Physical Instances and selects the apropriate instance based
+ on Channel-Id and Physical-ID.
+
+ - CFFRML CAIF Framing layer. Handles Framing i.e. Frame length
+ and frame checksum.
+
+ - CFSERL CAIF Serial layer. Handles concatenation/split of frames
+ into CAIF Frames with correct length.
+
+
+
+ +---------+
+ | Config |
+ | CFCNFG |
+ +---------+
+ !
+ +---------+ +---------+ +---------+
+ | AT | | Control | | Datagram|
+ | CFVEIL | | CFCTRL | | CFDGML |
+ +---------+ +---------+ +---------+
+ \_____________!______________/
+ !
+ +---------+
+ | MUX |
+ | |
+ +---------+
+ _____!_____
+ / \
+ +---------+ +---------+
+ | CFFRML | | CFFRML |
+ | Framing | | Framing |
+ +---------+ +---------+
+ ! !
+ +---------+ +---------+
+ | | | Serial |
+ | | | CFSERL |
+ +---------+ +---------+
+
+
+In this layered approach the following "rules" apply.
+ - All layers embed the same structure "struct cflayer"
+ - A layer does not depend on any other layer's private data.
+ - Layers are stacked by setting the pointers
+ layer->up , layer->dn
+ - In order to send data upwards, each layer should do
+ layer->up->receive(layer->up, packet);
+ - In order to send data downwards, each layer should do
+ layer->dn->transmit(layer->dn, packet);
+
+
+Linux Driver Implementation
+===========================
+
+Linux GPRS Net Device and CAIF socket are implemented on top of the
+CAIF Core protocol. The Net device and CAIF socket have an instance of
+'struct cflayer', just like the CAIF Core protocol stack.
+Net device and Socket implement the 'receive()' function defined by
+'struct cflayer', just like the rest of the CAIF stack. In this way, transmit and
+receive of packets is handled as by the rest of the layers: the 'dn->transmit()'
+function is called in order to transmit data.
+
+The layer on top of the CAIF Core implementation is
+sometimes referred to as the "Client layer".
+
+
+Configuration of Link Layer
+---------------------------
+The Link Layer is implemented as Linux net devices (struct net_device).
+Payload handling and registration is done using standard Linux mechanisms.
+
+The CAIF Protocol relies on a loss-less link layer without implementing
+retransmission. This implies that packet drops must not happen.
+Therefore a flow-control mechanism is implemented where the physical
+interface can initiate flow stop for all CAIF Channels.
diff --git a/Documentation/networking/caif/README b/Documentation/networking/caif/README
new file mode 100644
index 000000000000..757ccfaa1385
--- /dev/null
+++ b/Documentation/networking/caif/README
@@ -0,0 +1,109 @@
+Copyright (C) ST-Ericsson AB 2010
+Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
+License terms: GNU General Public License (GPL) version 2
+---------------------------------------------------------
+
+=== Start ===
+If you have compiled CAIF for modules do:
+
+$modprobe crc_ccitt
+$modprobe caif
+$modprobe caif_socket
+$modprobe chnl_net
+
+
+=== Preparing the setup with a STE modem ===
+
+If you are working on integration of CAIF you should make sure
+that the kernel is built with module support.
+
+There are some things that need to be tweaked to get the host TTY correctly
+set up to talk to the modem.
+Since the CAIF stack is running in the kernel and we want to use the existing
+TTY, we are installing our physical serial driver as a line discipline above
+the TTY device.
+
+To achieve this we need to install the N_CAIF ldisc from user space.
+The benefit is that we can hook up to any TTY.
+
+The use of Start-of-frame-extension (STX) must also be set as
+module parameter "ser_use_stx".
+
+Normally Frame Checksum is always used on UART, but this is also provided as a
+module parameter "ser_use_fcs".
+
+$ modprobe caif_serial ser_ttyname=/dev/ttyS0 ser_use_stx=yes
+$ ifconfig caif_ttyS0 up
+
+PLEASE NOTE: There is a limitation in Android shell.
+ It only accepts one argument to insmod/modprobe!
+
+=== Trouble shooting ===
+
+There are debugfs parameters provided for serial communication.
+/sys/kernel/debug/caif_serial/<tty-name>/
+
+* ser_state: Prints the bit-mask status where
+ - 0x02 means SENDING, this is a transient state.
+ - 0x10 means FLOW_OFF_SENT, i.e. the previous frame has not been sent
+ and is blocking further send operation. Flow OFF has been propagated
+ to all CAIF Channels using this TTY.
+
+* tty_status: Prints the bit-mask tty status information
+ - 0x01 - tty->warned is on.
+ - 0x02 - tty->low_latency is on.
+ - 0x04 - tty->packed is on.
+ - 0x08 - tty->flow_stopped is on.
+ - 0x10 - tty->hw_stopped is on.
+ - 0x20 - tty->stopped is on.
+
+* last_tx_msg: Binary blob Prints the last transmitted frame.
+ This can be printed with
+ $od --format=x1 /sys/kernel/debug/caif_serial/<tty>/last_rx_msg.
+ The first two tx messages sent look like this. Note: The initial
+ byte 02 is start of frame extension (STX) used for re-syncing
+ upon errors.
+
+ - Enumeration:
+ 0000000 02 05 00 00 03 01 d2 02
+ | | | | | |
+ STX(1) | | | |
+ Length(2)| | |
+ Control Channel(1)
+ Command:Enumeration(1)
+ Link-ID(1)
+ Checksum(2)
+ - Channel Setup:
+ 0000000 02 07 00 00 00 21 a1 00 48 df
+ | | | | | | | |
+ STX(1) | | | | | |
+ Length(2)| | | | |
+ Control Channel(1)
+ Command:Channel Setup(1)
+ Channel Type(1)
+ Priority and Link-ID(1)
+ Endpoint(1)
+ Checksum(2)
+
+* last_rx_msg: Prints the last transmitted frame.
+ The RX messages for LinkSetup look almost identical but they have the
+ bit 0x20 set in the command bit, and Channel Setup has added one byte
+ before Checksum containing Channel ID.
+ NOTE: Several CAIF Messages might be concatenated. The maximum debug
+ buffer size is 128 bytes.
+
+== Error Scenarios:
+- last_tx_msg contains channel setup message and last_rx_msg is empty ->
+ The host seems to be able to send over the UART, at least the CAIF ldisc get
+ notified that sending is completed.
+
+- last_tx_msg contains enumeration message and last_rx_msg is empty ->
+ The host is not able to send the message from UART, the tty has not been
+ able to complete the transmit operation.
+
+- if /sys/kernel/debug/caif_serial/<tty>/tty_status is non-zero there
+ might be problems transmitting over UART.
+ E.g. host and modem wiring is not correct you will typically see
+ tty_status = 0x10 (hw_stopped) and ser_state = 0x10 (FLOW_OFF_SENT).
+ You will probably see the enumeration message in last_tx_message
+ and empty last_rx_message.
diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c
index 1b96ccda3836..2bac9618c345 100644
--- a/Documentation/networking/ifenslave.c
+++ b/Documentation/networking/ifenslave.c
@@ -756,7 +756,7 @@ static int enslave(char *master_ifname, char *slave_ifname)
*/
if (abi_ver < 1) {
/* For old ABI, the master needs to be
- * down before setting it's hwaddr
+ * down before setting its hwaddr
*/
res = set_if_down(master_ifname, master_flags.ifr_flags);
if (res) {
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8b72c88ba213..d0536b5a4e01 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -588,6 +588,37 @@ ip_local_port_range - 2 INTEGERS
(i.e. by default) range 1024-4999 is enough to issue up to
2000 connections per second to systems supporting timestamps.
+ip_local_reserved_ports - list of comma separated ranges
+ Specify the ports which are reserved for known third-party
+ applications. These ports will not be used by automatic port
+ assignments (e.g. when calling connect() or bind() with port
+ number 0). Explicit port allocation behavior is unchanged.
+
+ The format used for both input and output is a comma separated
+ list of ranges (e.g. "1,2-4,10-10" for ports 1, 2, 3, 4 and
+ 10). Writing to the file will clear all previously reserved
+ ports and update the current list with the one given in the
+ input.
+
+ Note that ip_local_port_range and ip_local_reserved_ports
+ settings are independent and both are considered by the kernel
+ when determining which ports are available for automatic port
+ assignments.
+
+ You can reserve ports which are not in the current
+ ip_local_port_range, e.g.:
+
+ $ cat /proc/sys/net/ipv4/ip_local_port_range
+ 32000 61000
+ $ cat /proc/sys/net/ipv4/ip_local_reserved_ports
+ 8080,9148
+
+ although this is redundant. However such a setting is useful
+ if later the port range is changed to a value that will
+ include the reserved ports.
+
+ Default: Empty
+
ip_nonlocal_bind - BOOLEAN
If set, allows processes to bind() to non-local IP addresses,
which can be quite useful - but may break some applications.
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index 63214b280e00..e7bf3979facb 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -1,44 +1,95 @@
-This brief document describes how to use the kernel's PPPoL2TP driver
-to provide L2TP functionality. L2TP is a protocol that tunnels one or
-more PPP sessions over a UDP tunnel. It is commonly used for VPNs
+This document describes how to use the kernel's L2TP drivers to
+provide L2TP functionality. L2TP is a protocol that tunnels one or
+more sessions over an IP tunnel. It is commonly used for VPNs
(L2TP/IPSec) and by ISPs to tunnel subscriber PPP sessions over an IP
-network infrastructure.
+network infrastructure. With L2TPv3, it is also useful as a Layer-2
+tunneling infrastructure.
+
+Features
+========
+
+L2TPv2 (PPP over L2TP (UDP tunnels)).
+L2TPv3 ethernet pseudowires.
+L2TPv3 PPP pseudowires.
+L2TPv3 IP encapsulation.
+Netlink sockets for L2TPv3 configuration management.
+
+History
+=======
+
+The original pppol2tp driver was introduced in 2.6.23 and provided
+L2TPv2 functionality (rfc2661). L2TPv2 is used to tunnel one or more PPP
+sessions over a UDP tunnel.
+
+L2TPv3 (rfc3931) changes the protocol to allow different frame types
+to be passed over an L2TP tunnel by moving the PPP-specific parts of
+the protocol out of the core L2TP packet headers. Each frame type is
+known as a pseudowire type. Ethernet, PPP, HDLC, Frame Relay and ATM
+pseudowires for L2TP are defined in separate RFC standards. Another
+change for L2TPv3 is that it can be carried directly over IP with no
+UDP header (UDP is optional). It is also possible to create static
+unmanaged L2TPv3 tunnels manually without a control protocol
+(userspace daemon) to manage them.
+
+To support L2TPv3, the original pppol2tp driver was split up to
+separate the L2TP and PPP functionality. Existing L2TPv2 userspace
+apps should be unaffected as the original pppol2tp sockets API is
+retained. L2TPv3, however, uses netlink to manage L2TPv3 tunnels and
+sessions.
Design
======
-The PPPoL2TP driver, drivers/net/pppol2tp.c, provides a mechanism by
-which PPP frames carried through an L2TP session are passed through
-the kernel's PPP subsystem. The standard PPP daemon, pppd, handles all
-PPP interaction with the peer. PPP network interfaces are created for
-each local PPP endpoint.
-
-The L2TP protocol http://www.faqs.org/rfcs/rfc2661.html defines L2TP
-control and data frames. L2TP control frames carry messages between
-L2TP clients/servers and are used to setup / teardown tunnels and
-sessions. An L2TP client or server is implemented in userspace and
-will use a regular UDP socket per tunnel. L2TP data frames carry PPP
-frames, which may be PPP control or PPP data. The kernel's PPP
+The L2TP protocol separates control and data frames. The L2TP kernel
+drivers handle only L2TP data frames; control frames are always
+handled by userspace. L2TP control frames carry messages between L2TP
+clients/servers and are used to setup / teardown tunnels and
+sessions. An L2TP client or server is implemented in userspace.
+
+Each L2TP tunnel is implemented using a UDP or L2TPIP socket; L2TPIP
+provides L2TPv3 IP encapsulation (no UDP) and is implemented using a
+new l2tpip socket family. The tunnel socket is typically created by
+userspace, though for unmanaged L2TPv3 tunnels, the socket can also be
+created by the kernel. Each L2TP session (pseudowire) gets a network
+interface instance. In the case of PPP, these interfaces are created
+indirectly by pppd using a pppol2tp socket. In the case of ethernet,
+the netdevice is created upon a netlink request to create an L2TPv3
+ethernet pseudowire.
+
+For PPP, the PPPoL2TP driver, net/l2tp/l2tp_ppp.c, provides a
+mechanism by which PPP frames carried through an L2TP session are
+passed through the kernel's PPP subsystem. The standard PPP daemon,
+pppd, handles all PPP interaction with the peer. PPP network
+interfaces are created for each local PPP endpoint. The kernel's PPP
subsystem arranges for PPP control frames to be delivered to pppd,
while data frames are forwarded as usual.
+For ethernet, the L2TPETH driver, net/l2tp/l2tp_eth.c, implements a
+netdevice driver, managing virtual ethernet devices, one per
+pseudowire. These interfaces can be managed using standard Linux tools
+such as "ip" and "ifconfig". If only IP frames are passed over the
+tunnel, the interface can be given an IP addresses of itself and its
+peer. If non-IP frames are to be passed over the tunnel, the interface
+can be added to a bridge using brctl. All L2TP datapath protocol
+functions are handled by the L2TP core driver.
+
Each tunnel and session within a tunnel is assigned a unique tunnel_id
and session_id. These ids are carried in the L2TP header of every
-control and data packet. The pppol2tp driver uses them to lookup
-internal tunnel and/or session contexts. Zero tunnel / session ids are
-treated specially - zero ids are never assigned to tunnels or sessions
-in the network. In the driver, the tunnel context keeps a pointer to
-the tunnel UDP socket. The session context keeps a pointer to the
-PPPoL2TP socket, as well as other data that lets the driver interface
-to the kernel PPP subsystem.
-
-Note that the pppol2tp kernel driver handles only L2TP data frames;
-L2TP control frames are simply passed up to userspace in the UDP
-tunnel socket. The kernel handles all datapath aspects of the
-protocol, including data packet resequencing (if enabled).
-
-There are a number of requirements on the userspace L2TP daemon in
-order to use the pppol2tp driver.
+control and data packet. (Actually, in L2TPv3, the tunnel_id isn't
+present in data frames - it is inferred from the IP connection on
+which the packet was received.) The L2TP driver uses the ids to lookup
+internal tunnel and/or session contexts to determine how to handle the
+packet. Zero tunnel / session ids are treated specially - zero ids are
+never assigned to tunnels or sessions in the network. In the driver,
+the tunnel context keeps a reference to the tunnel UDP or L2TPIP
+socket. The session context holds data that lets the driver interface
+to the kernel's network frame type subsystems, i.e. PPP, ethernet.
+
+Userspace Programming
+=====================
+
+For L2TPv2, there are a number of requirements on the userspace L2TP
+daemon in order to use the pppol2tp driver.
1. Use a UDP socket per tunnel.
@@ -86,6 +137,35 @@ In addition to the standard PPP ioctls, a PPPIOCGL2TPSTATS is provided
to retrieve tunnel and session statistics from the kernel using the
PPPoX socket of the appropriate tunnel or session.
+For L2TPv3, userspace must use the netlink API defined in
+include/linux/l2tp.h to manage tunnel and session contexts. The
+general procedure to create a new L2TP tunnel with one session is:-
+
+1. Open a GENL socket using L2TP_GENL_NAME for configuring the kernel
+ using netlink.
+
+2. Create a UDP or L2TPIP socket for the tunnel.
+
+3. Create a new L2TP tunnel using a L2TP_CMD_TUNNEL_CREATE
+ request. Set attributes according to desired tunnel parameters,
+ referencing the UDP or L2TPIP socket created in the previous step.
+
+4. Create a new L2TP session in the tunnel using a
+ L2TP_CMD_SESSION_CREATE request.
+
+The tunnel and all of its sessions are closed when the tunnel socket
+is closed. The netlink API may also be used to delete sessions and
+tunnels. Configuration and status info may be set or read using netlink.
+
+The L2TP driver also supports static (unmanaged) L2TPv3 tunnels. These
+are where there is no L2TP control message exchange with the peer to
+setup the tunnel; the tunnel is configured manually at each end of the
+tunnel. There is no need for an L2TP userspace application in this
+case -- the tunnel socket is created by the kernel and configured
+using parameters sent in the L2TP_CMD_TUNNEL_CREATE netlink
+request. The "ip" utility of iproute2 has commands for managing static
+L2TPv3 tunnels; do "ip l2tp help" for more information.
+
Debugging
=========
@@ -102,6 +182,69 @@ PPPOL2TP_MSG_CONTROL userspace - kernel interface
PPPOL2TP_MSG_SEQ sequence numbers handling
PPPOL2TP_MSG_DATA data packets
+If enabled, files under a l2tp debugfs directory can be used to dump
+kernel state about L2TP tunnels and sessions. To access it, the
+debugfs filesystem must first be mounted.
+
+# mount -t debugfs debugfs /debug
+
+Files under the l2tp directory can then be accessed.
+
+# cat /debug/l2tp/tunnels
+
+The debugfs files should not be used by applications to obtain L2TP
+state information because the file format is subject to change. It is
+implemented to provide extra debug information to help diagnose
+problems.) Users should use the netlink API.
+
+/proc/net/pppol2tp is also provided for backwards compaibility with
+the original pppol2tp driver. It lists information about L2TPv2
+tunnels and sessions only. Its use is discouraged.
+
+Unmanaged L2TPv3 Tunnels
+========================
+
+Some commercial L2TP products support unmanaged L2TPv3 ethernet
+tunnels, where there is no L2TP control protocol; tunnels are
+configured at each side manually. New commands are available in
+iproute2's ip utility to support this.
+
+To create an L2TPv3 ethernet pseudowire between local host 192.168.1.1
+and peer 192.168.1.2, using IP addresses 10.5.1.1 and 10.5.1.2 for the
+tunnel endpoints:-
+
+# modprobe l2tp_eth
+# modprobe l2tp_netlink
+
+# ip l2tp add tunnel tunnel_id 1 peer_tunnel_id 1 udp_sport 5000 \
+ udp_dport 5000 encap udp local 192.168.1.1 remote 192.168.1.2
+# ip l2tp add session tunnel_id 1 session_id 1 peer_session_id 1
+# ifconfig -a
+# ip addr add 10.5.1.2/32 peer 10.5.1.1/32 dev l2tpeth0
+# ifconfig l2tpeth0 up
+
+Choose IP addresses to be the address of a local IP interface and that
+of the remote system. The IP addresses of the l2tpeth0 interface can be
+anything suitable.
+
+Repeat the above at the peer, with ports, tunnel/session ids and IP
+addresses reversed. The tunnel and session IDs can be any non-zero
+32-bit number, but the values must be reversed at the peer.
+
+Host 1 Host2
+udp_sport=5000 udp_sport=5001
+udp_dport=5001 udp_dport=5000
+tunnel_id=42 tunnel_id=45
+peer_tunnel_id=45 peer_tunnel_id=42
+session_id=128 session_id=5196755
+peer_session_id=5196755 peer_session_id=128
+
+When done at both ends of the tunnel, it should be possible to send
+data over the network. e.g.
+
+# ping 10.5.1.1
+
+
Sample Userspace Code
=====================
@@ -158,12 +301,48 @@ Sample Userspace Code
}
return 0;
+Internal Implementation
+=======================
+
+The driver keeps a struct l2tp_tunnel context per L2TP tunnel and a
+struct l2tp_session context for each session. The l2tp_tunnel is
+always associated with a UDP or L2TP/IP socket and keeps a list of
+sessions in the tunnel. The l2tp_session context keeps kernel state
+about the session. It has private data which is used for data specific
+to the session type. With L2TPv2, the session always carried PPP
+traffic. With L2TPv3, the session can also carry ethernet frames
+(ethernet pseudowire) or other data types such as ATM, HDLC or Frame
+Relay.
+
+When a tunnel is first opened, the reference count on the socket is
+increased using sock_hold(). This ensures that the kernel socket
+cannot be removed while L2TP's data structures reference it.
+
+Some L2TP sessions also have a socket (PPP pseudowires) while others
+do not (ethernet pseudowires). We can't use the socket reference count
+as the reference count for session contexts. The L2TP implementation
+therefore has its own internal reference counts on the session
+contexts.
+
+To Do
+=====
+
+Add L2TP tunnel switching support. This would route tunneled traffic
+from one L2TP tunnel into another. Specified in
+http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08
+
+Add L2TPv3 VLAN pseudowire support.
+
+Add L2TPv3 IP pseudowire support.
+
+Add L2TPv3 ATM pseudowire support.
+
Miscellaneous
-============
+=============
-The PPPoL2TP driver was developed as part of the OpenL2TP project by
+The L2TP drivers were developed as part of the OpenL2TP project by
Katalix Systems Ltd. OpenL2TP is a full-featured L2TP client / server,
designed from the ground up to have the L2TP datapath in the
kernel. The project also implemented the pppol2tp plugin for pppd
which allows pppd to use the kernel driver. Details can be found at
-http://openl2tp.sourceforge.net.
+http://www.openl2tp.org.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 09ab0d290326..98f71a5cef00 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -100,7 +100,7 @@ by the kernel.
The destruction of the socket and all associated resources
is done by a simple call to close(fd).
-Next I will describe PACKET_MMAP settings and it's constraints,
+Next I will describe PACKET_MMAP settings and its constraints,
also the mapping of the circular buffer in the user process and
the use of this buffer.
@@ -432,7 +432,7 @@ TP_STATUS_LOSING : indicates there were packet drops from last time
the PACKET_STATISTICS option.
TP_STATUS_CSUMNOTREADY: currently it's used for outgoing IP packets which
- it's checksum will be done in hardware. So while
+ its checksum will be done in hardware. So while
reading the packet we should not try to check the
checksum.
diff --git a/Documentation/networking/x25-iface.txt b/Documentation/networking/x25-iface.txt
index 975cc87ebdd1..78f662ee0622 100644
--- a/Documentation/networking/x25-iface.txt
+++ b/Documentation/networking/x25-iface.txt
@@ -20,23 +20,23 @@ the rest of the skbuff, if any more information does exist.
Packet Layer to Device Driver
-----------------------------
-First Byte = 0x00
+First Byte = 0x00 (X25_IFACE_DATA)
This indicates that the rest of the skbuff contains data to be transmitted
over the LAPB link. The LAPB link should already exist before any data is
passed down.
-First Byte = 0x01
+First Byte = 0x01 (X25_IFACE_CONNECT)
Establish the LAPB link. If the link is already established then the connect
confirmation message should be returned as soon as possible.
-First Byte = 0x02
+First Byte = 0x02 (X25_IFACE_DISCONNECT)
Terminate the LAPB link. If it is already disconnected then the disconnect
confirmation message should be returned as soon as possible.
-First Byte = 0x03
+First Byte = 0x03 (X25_IFACE_PARAMS)
LAPB parameters. To be defined.
@@ -44,22 +44,22 @@ LAPB parameters. To be defined.
Device Driver to Packet Layer
-----------------------------
-First Byte = 0x00
+First Byte = 0x00 (X25_IFACE_DATA)
This indicates that the rest of the skbuff contains data that has been
received over the LAPB link.
-First Byte = 0x01
+First Byte = 0x01 (X25_IFACE_CONNECT)
LAPB link has been established. The same message is used for both a LAPB
link connect_confirmation and a connect_indication.
-First Byte = 0x02
+First Byte = 0x02 (X25_IFACE_DISCONNECT)
LAPB link has been terminated. This same message is used for both a LAPB
link disconnect_confirmation and a disconnect_indication.
-First Byte = 0x03
+First Byte = 0x03 (X25_IFACE_PARAMS)
LAPB parameters. To be defined.
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
index c10c022b911c..6fe9001b9263 100644
--- a/Documentation/oops-tracing.txt
+++ b/Documentation/oops-tracing.txt
@@ -256,9 +256,13 @@ characters, each representing a particular tainted value.
9: 'A' if the ACPI table has been overridden.
10: 'W' if a warning has previously been issued by the kernel.
+ (Though some warnings may set more specific taint flags.)
11: 'C' if a staging driver has been loaded.
+ 12: 'I' if the kernel is working around a severe bug in the platform
+ firmware (BIOS or similar).
+
The primary reason for the 'Tainted: ' string is to tell kernel
debuggers if this is a clean kernel or if anything unusual has
occurred. Tainting is permanent: even if an offending module is
diff --git a/Documentation/padata.txt b/Documentation/padata.txt
new file mode 100644
index 000000000000..269d7d0d8335
--- /dev/null
+++ b/Documentation/padata.txt
@@ -0,0 +1,107 @@
+The padata parallel execution mechanism
+Last updated for 2.6.34
+
+Padata is a mechanism by which the kernel can farm work out to be done in
+parallel on multiple CPUs while retaining the ordering of tasks. It was
+developed for use with the IPsec code, which needs to be able to perform
+encryption and decryption on large numbers of packets without reordering
+those packets. The crypto developers made a point of writing padata in a
+sufficiently general fashion that it could be put to other uses as well.
+
+The first step in using padata is to set up a padata_instance structure for
+overall control of how tasks are to be run:
+
+ #include <linux/padata.h>
+
+ struct padata_instance *padata_alloc(const struct cpumask *cpumask,
+ struct workqueue_struct *wq);
+
+The cpumask describes which processors will be used to execute work
+submitted to this instance. The workqueue wq is where the work will
+actually be done; it should be a multithreaded queue, naturally.
+
+There are functions for enabling and disabling the instance:
+
+ void padata_start(struct padata_instance *pinst);
+ void padata_stop(struct padata_instance *pinst);
+
+These functions literally do nothing beyond setting or clearing the
+"padata_start() was called" flag; if that flag is not set, other functions
+will refuse to work.
+
+The list of CPUs to be used can be adjusted with these functions:
+
+ int padata_set_cpumask(struct padata_instance *pinst,
+ cpumask_var_t cpumask);
+ int padata_add_cpu(struct padata_instance *pinst, int cpu);
+ int padata_remove_cpu(struct padata_instance *pinst, int cpu);
+
+Changing the CPU mask has the look of an expensive operation, though, so it
+probably should not be done with great frequency.
+
+Actually submitting work to the padata instance requires the creation of a
+padata_priv structure:
+
+ struct padata_priv {
+ /* Other stuff here... */
+ void (*parallel)(struct padata_priv *padata);
+ void (*serial)(struct padata_priv *padata);
+ };
+
+This structure will almost certainly be embedded within some larger
+structure specific to the work to be done. Most its fields are private to
+padata, but the structure should be zeroed at initialization time, and the
+parallel() and serial() functions should be provided. Those functions will
+be called in the process of getting the work done as we will see
+momentarily.
+
+The submission of work is done with:
+
+ int padata_do_parallel(struct padata_instance *pinst,
+ struct padata_priv *padata, int cb_cpu);
+
+The pinst and padata structures must be set up as described above; cb_cpu
+specifies which CPU will be used for the final callback when the work is
+done; it must be in the current instance's CPU mask. The return value from
+padata_do_parallel() is a little strange; zero is an error return
+indicating that the caller forgot the padata_start() formalities. -EBUSY
+means that somebody, somewhere else is messing with the instance's CPU
+mask, while -EINVAL is a complaint about cb_cpu not being in that CPU mask.
+If all goes well, this function will return -EINPROGRESS, indicating that
+the work is in progress.
+
+Each task submitted to padata_do_parallel() will, in turn, be passed to
+exactly one call to the above-mentioned parallel() function, on one CPU, so
+true parallelism is achieved by submitting multiple tasks. Despite the
+fact that the workqueue is used to make these calls, parallel() is run with
+software interrupts disabled and thus cannot sleep. The parallel()
+function gets the padata_priv structure pointer as its lone parameter;
+information about the actual work to be done is probably obtained by using
+container_of() to find the enclosing structure.
+
+Note that parallel() has no return value; the padata subsystem assumes that
+parallel() will take responsibility for the task from this point. The work
+need not be completed during this call, but, if parallel() leaves work
+outstanding, it should be prepared to be called again with a new job before
+the previous one completes. When a task does complete, parallel() (or
+whatever function actually finishes the job) should inform padata of the
+fact with a call to:
+
+ void padata_do_serial(struct padata_priv *padata);
+
+At some point in the future, padata_do_serial() will trigger a call to the
+serial() function in the padata_priv structure. That call will happen on
+the CPU requested in the initial call to padata_do_parallel(); it, too, is
+done through the workqueue, but with local software interrupts disabled.
+Note that this call may be deferred for a while since the padata code takes
+pains to ensure that tasks are completed in the order in which they were
+submitted.
+
+The one remaining function in the padata API should be called to clean up
+when a padata instance is no longer needed:
+
+ void padata_free(struct padata_instance *pinst);
+
+This function will busy-wait while any remaining tasks are completed, so it
+might be best not to call it while there is work outstanding. Shutting
+down the workqueue, if necessary, should be done separately.
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 446f43b309df..61bc4e943116 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -1,4 +1,17 @@
This file details changes in 2.6 which affect PCMCIA card driver authors:
+* No dev_node_t (as of 2.6.35)
+ There is no more need to fill out a "dev_node_t" structure.
+
+* New IRQ request rules (as of 2.6.35)
+ Instead of the old pcmcia_request_irq() interface, drivers may now
+ choose between:
+ - calling request_irq/free_irq directly. Use the IRQ from *p_dev->irq.
+ - use pcmcia_request_irq(p_dev, handler_t); the PCMCIA core will
+ clean up automatically on calls to pcmcia_disable_device() or
+ device ejection.
+ - drivers still not capable of IRQF_SHARED (or not telling us so) may
+ use the deprecated pcmcia_request_exclusive_irq() for the time
+ being; they might receive a shared IRQ nonetheless.
* no cs_error / CS_CHECK / CONFIG_PCMCIA_DEBUG (as of 2.6.33)
Instead of the cs_error() callback or the CS_CHECK() macro, please use
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index c9abbd86bc18..57080cd74575 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -1,7 +1,13 @@
+Device Power Management
+
+Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+Copyright (c) 2010 Alan Stern <stern@rowland.harvard.edu>
+
+
Most of the code in Linux is device drivers, so most of the Linux power
-management code is also driver-specific. Most drivers will do very little;
-others, especially for platforms with small batteries (like cell phones),
-will do a lot.
+management (PM) code is also driver-specific. Most drivers will do very
+little; others, especially for platforms with small batteries (like cell
+phones), will do a lot.
This writeup gives an overview of how drivers interact with system-wide
power management goals, emphasizing the models and interfaces that are
@@ -15,9 +21,10 @@ Drivers will use one or both of these models to put devices into low-power
states:
System Sleep model:
- Drivers can enter low power states as part of entering system-wide
- low-power states like "suspend-to-ram", or (mostly for systems with
- disks) "hibernate" (suspend-to-disk).
+ Drivers can enter low-power states as part of entering system-wide
+ low-power states like "suspend" (also known as "suspend-to-RAM"), or
+ (mostly for systems with disks) "hibernation" (also known as
+ "suspend-to-disk").
This is something that device, bus, and class drivers collaborate on
by implementing various role-specific suspend and resume methods to
@@ -25,33 +32,41 @@ states:
them without loss of data.
Some drivers can manage hardware wakeup events, which make the system
- leave that low-power state. This feature may be disabled using the
- relevant /sys/devices/.../power/wakeup file; enabling it may cost some
- power usage, but let the whole system enter low power states more often.
+ leave the low-power state. This feature may be enabled or disabled
+ using the relevant /sys/devices/.../power/wakeup file (for Ethernet
+ drivers the ioctl interface used by ethtool may also be used for this
+ purpose); enabling it may cost some power usage, but let the whole
+ system enter low-power states more often.
Runtime Power Management model:
- Drivers may also enter low power states while the system is running,
- independently of other power management activity. Upstream drivers
- will normally not know (or care) if the device is in some low power
- state when issuing requests; the driver will auto-resume anything
- that's needed when it gets a request.
-
- This doesn't have, or need much infrastructure; it's just something you
- should do when writing your drivers. For example, clk_disable() unused
- clocks as part of minimizing power drain for currently-unused hardware.
- Of course, sometimes clusters of drivers will collaborate with each
- other, which could involve task-specific power management.
-
-There's not a lot to be said about those low power states except that they
-are very system-specific, and often device-specific. Also, that if enough
-drivers put themselves into low power states (at "runtime"), the effect may be
-the same as entering some system-wide low-power state (system sleep) ... and
-that synergies exist, so that several drivers using runtime pm might put the
-system into a state where even deeper power saving options are available.
-
-Most suspended devices will have quiesced all I/O: no more DMA or irqs, no
-more data read or written, and requests from upstream drivers are no longer
-accepted. A given bus or platform may have different requirements though.
+ Devices may also be put into low-power states while the system is
+ running, independently of other power management activity in principle.
+ However, devices are not generally independent of each other (for
+ example, a parent device cannot be suspended unless all of its child
+ devices have been suspended). Moreover, depending on the bus type the
+ device is on, it may be necessary to carry out some bus-specific
+ operations on the device for this purpose. Devices put into low power
+ states at run time may require special handling during system-wide power
+ transitions (suspend or hibernation).
+
+ For these reasons not only the device driver itself, but also the
+ appropriate subsystem (bus type, device type or device class) driver and
+ the PM core are involved in runtime power management. As in the system
+ sleep power management case, they need to collaborate by implementing
+ various role-specific suspend and resume methods, so that the hardware
+ is cleanly powered down and reactivated without data or service loss.
+
+There's not a lot to be said about those low-power states except that they are
+very system-specific, and often device-specific. Also, that if enough devices
+have been put into low-power states (at runtime), the effect may be very similar
+to entering some system-wide low-power state (system sleep) ... and that
+synergies exist, so that several drivers using runtime PM might put the system
+into a state where even deeper power saving options are available.
+
+Most suspended devices will have quiesced all I/O: no more DMA or IRQs (except
+for wakeup events), no more data read or written, and requests from upstream
+drivers are no longer accepted. A given bus or platform may have different
+requirements though.
Examples of hardware wakeup events include an alarm from a real time clock,
network wake-on-LAN packets, keyboard or mouse activity, and media insertion
@@ -60,129 +75,152 @@ or removal (for PCMCIA, MMC/SD, USB, and so on).
Interfaces for Entering System Sleep States
===========================================
-Most of the programming interfaces a device driver needs to know about
-relate to that first model: entering a system-wide low power state,
-rather than just minimizing power consumption by one device.
-
-
-Bus Driver Methods
-------------------
-The core methods to suspend and resume devices reside in struct bus_type.
-These are mostly of interest to people writing infrastructure for busses
-like PCI or USB, or because they define the primitives that device drivers
-may need to apply in domain-specific ways to their devices:
-
-struct bus_type {
- ...
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
+There are programming interfaces provided for subsystems (bus type, device type,
+device class) and device drivers to allow them to participate in the power
+management of devices they are concerned with. These interfaces cover both
+system sleep and runtime power management.
+
+
+Device Power Management Operations
+----------------------------------
+Device power management operations, at the subsystem level as well as at the
+device driver level, are implemented by defining and populating objects of type
+struct dev_pm_ops:
+
+struct dev_pm_ops {
+ int (*prepare)(struct device *dev);
+ void (*complete)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ int (*poweroff)(struct device *dev);
+ int (*restore)(struct device *dev);
+ int (*suspend_noirq)(struct device *dev);
+ int (*resume_noirq)(struct device *dev);
+ int (*freeze_noirq)(struct device *dev);
+ int (*thaw_noirq)(struct device *dev);
+ int (*poweroff_noirq)(struct device *dev);
+ int (*restore_noirq)(struct device *dev);
+ int (*runtime_suspend)(struct device *dev);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_idle)(struct device *dev);
};
-Bus drivers implement those methods as appropriate for the hardware and
-the drivers using it; PCI works differently from USB, and so on. Not many
-people write bus drivers; most driver code is a "device driver" that
-builds on top of bus-specific framework code.
+This structure is defined in include/linux/pm.h and the methods included in it
+are also described in that file. Their roles will be explained in what follows.
+For now, it should be sufficient to remember that the last three methods are
+specific to runtime power management while the remaining ones are used during
+system-wide power transitions.
-For more information on these driver calls, see the description later;
-they are called in phases for every device, respecting the parent-child
-sequencing in the driver model tree. Note that as this is being written,
-only the suspend() and resume() are widely available; not many bus drivers
-leverage all of those phases, or pass them down to lower driver levels.
+There also is a deprecated "old" or "legacy" interface for power management
+operations available at least for some subsystems. This approach does not use
+struct dev_pm_ops objects and it is suitable only for implementing system sleep
+power management methods. Therefore it is not described in this document, so
+please refer directly to the source code for more information about it.
-/sys/devices/.../power/wakeup files
------------------------------------
-All devices in the driver model have two flags to control handling of
-wakeup events, which are hardware signals that can force the device and/or
-system out of a low power state. These are initialized by bus or device
-driver code using device_init_wakeup(dev,can_wakeup).
+Subsystem-Level Methods
+-----------------------
+The core methods to suspend and resume devices reside in struct dev_pm_ops
+pointed to by the pm member of struct bus_type, struct device_type and
+struct class. They are mostly of interest to the people writing infrastructure
+for buses, like PCI or USB, or device type and device class drivers.
-The "can_wakeup" flag just records whether the device (and its driver) can
-physically support wakeup events. When that flag is clear, the sysfs
-"wakeup" file is empty, and device_may_wakeup() returns false.
+Bus drivers implement these methods as appropriate for the hardware and the
+drivers using it; PCI works differently from USB, and so on. Not many people
+write subsystem-level drivers; most driver code is a "device driver" that builds
+on top of bus-specific framework code.
-For devices that can issue wakeup events, a separate flag controls whether
-that device should try to use its wakeup mechanism. The initial value of
-device_may_wakeup() will be true, so that the device's "wakeup" file holds
-the value "enabled". Userspace can change that to "disabled" so that
-device_may_wakeup() returns false; or change it back to "enabled" (so that
-it returns true again).
+For more information on these driver calls, see the description later;
+they are called in phases for every device, respecting the parent-child
+sequencing in the driver model tree.
-EXAMPLE: PCI Device Driver Methods
+/sys/devices/.../power/wakeup files
-----------------------------------
-PCI framework software calls these methods when the PCI device driver bound
-to a device device has provided them:
-
-struct pci_driver {
- ...
- int (*suspend)(struct pci_device *pdev, pm_message_t state);
- int (*suspend_late)(struct pci_device *pdev, pm_message_t state);
+All devices in the driver model have two flags to control handling of wakeup
+events (hardware signals that can force the device and/or system out of a low
+power state). These flags are initialized by bus or device driver code using
+device_set_wakeup_capable() and device_set_wakeup_enable(), defined in
+include/linux/pm_wakeup.h.
- int (*resume_early)(struct pci_device *pdev);
- int (*resume)(struct pci_device *pdev);
-};
-
-Drivers will implement those methods, and call PCI-specific procedures
-like pci_set_power_state(), pci_enable_wake(), pci_save_state(), and
-pci_restore_state() to manage PCI-specific mechanisms. (PCI config space
-could be saved during driver probe, if it weren't for the fact that some
-systems rely on userspace tweaking using setpci.) Devices are suspended
-before their bridges enter low power states, and likewise bridges resume
-before their devices.
-
-
-Upper Layers of Driver Stacks
------------------------------
-Device drivers generally have at least two interfaces, and the methods
-sketched above are the ones which apply to the lower level (nearer PCI, USB,
-or other bus hardware). The network and block layers are examples of upper
-level interfaces, as is a character device talking to userspace.
-
-Power management requests normally need to flow through those upper levels,
-which often use domain-oriented requests like "blank that screen". In
-some cases those upper levels will have power management intelligence that
-relates to end-user activity, or other devices that work in cooperation.
-
-When those interfaces are structured using class interfaces, there is a
-standard way to have the upper layer stop issuing requests to a given
-class device (and restart later):
-
-struct class {
- ...
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
-};
-
-Those calls are issued in specific phases of the process by which the
-system enters a low power "suspend" state, or resumes from it.
-
-
-Calling Drivers to Enter System Sleep States
-============================================
-When the system enters a low power state, each device's driver is asked
-to suspend the device by putting it into state compatible with the target
+The "can_wakeup" flag just records whether the device (and its driver) can
+physically support wakeup events. The device_set_wakeup_capable() routine
+affects this flag. The "should_wakeup" flag controls whether the device should
+try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag;
+for the most part drivers should not change its value. The initial value of
+should_wakeup is supposed to be false for the majority of devices; the major
+exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
+(wake-on-LAN) feature has been set up with ethtool.
+
+Whether or not a device is capable of issuing wakeup events is a hardware
+matter, and the kernel is responsible for keeping track of it. By contrast,
+whether or not a wakeup-capable device should issue wakeup events is a policy
+decision, and it is managed by user space through a sysfs attribute: the
+power/wakeup file. User space can write the strings "enabled" or "disabled" to
+set or clear the should_wakeup flag, respectively. Reads from the file will
+return the corresponding string if can_wakeup is true, but if can_wakeup is
+false then reads will return an empty string, to indicate that the device
+doesn't support wakeup events. (But even though the file appears empty, writes
+will still affect the should_wakeup flag.)
+
+The device_may_wakeup() routine returns true only if both flags are set.
+Drivers should check this routine when putting devices in a low-power state
+during a system sleep transition, to see whether or not to enable the devices'
+wakeup mechanisms. However for runtime power management, wakeup events should
+be enabled whenever the device and driver both support them, regardless of the
+should_wakeup flag.
+
+
+/sys/devices/.../power/control files
+------------------------------------
+Each device in the driver model has a flag to control whether it is subject to
+runtime power management. This flag, called runtime_auto, is initialized by the
+bus type (or generally subsystem) code using pm_runtime_allow() or
+pm_runtime_forbid(); the default is to allow runtime power management.
+
+The setting can be adjusted by user space by writing either "on" or "auto" to
+the device's power/control sysfs file. Writing "auto" calls pm_runtime_allow(),
+setting the flag and allowing the device to be runtime power-managed by its
+driver. Writing "on" calls pm_runtime_forbid(), clearing the flag, returning
+the device to full power if it was in a low-power state, and preventing the
+device from being runtime power-managed. User space can check the current value
+of the runtime_auto flag by reading the file.
+
+The device's runtime_auto flag has no effect on the handling of system-wide
+power transitions. In particular, the device can (and in the majority of cases
+should and will) be put into a low-power state during a system-wide transition
+to a sleep state even though its runtime_auto flag is clear.
+
+For more information about the runtime power management framework, refer to
+Documentation/power/runtime_pm.txt.
+
+
+Calling Drivers to Enter and Leave System Sleep States
+======================================================
+When the system goes into a sleep state, each device's driver is asked to
+suspend the device by putting it into a state compatible with the target
system state. That's usually some version of "off", but the details are
system-specific. Also, wakeup-enabled devices will usually stay partly
functional in order to wake the system.
-When the system leaves that low power state, the device's driver is asked
-to resume it. The suspend and resume operations always go together, and
-both are multi-phase operations.
+When the system leaves that low-power state, the device's driver is asked to
+resume it by returning it to full power. The suspend and resume operations
+always go together, and both are multi-phase operations.
-For simple drivers, suspend might quiesce the device using the class code
-and then turn its hardware as "off" as possible with late_suspend. The
+For simple drivers, suspend might quiesce the device using class code
+and then turn its hardware as "off" as possible during suspend_noirq. The
matching resume calls would then completely reinitialize the hardware
before reactivating its class I/O queues.
-More power-aware drivers drivers will use more than one device low power
-state, either at runtime or during system sleep states, and might trigger
-system wakeup events.
+More power-aware drivers might prepare the devices for triggering system wakeup
+events.
Call Sequence Guarantees
------------------------
-To ensure that bridges and similar links needed to talk to a device are
+To ensure that bridges and similar links needing to talk to a device are
available when the device is suspended or resumed, the device tree is
walked in a bottom-up order to suspend devices. A top-down order is
used to resume those devices.
@@ -194,67 +232,310 @@ its parent; and can't be removed or suspended after that parent.
The policy is that the device tree should match hardware bus topology.
(Or at least the control bus, for devices which use multiple busses.)
In particular, this means that a device registration may fail if the parent of
-the device is suspending (ie. has been chosen by the PM core as the next
+the device is suspending (i.e. has been chosen by the PM core as the next
device to suspend) or has already suspended, as well as after all of the other
devices have been suspended. Device drivers must be prepared to cope with such
situations.
-Suspending Devices
-------------------
-Suspending a given device is done in several phases. Suspending the
-system always includes every phase, executing calls for every device
-before the next phase begins. Not all busses or classes support all
-these callbacks; and not all drivers use all the callbacks.
+System Power Management Phases
+------------------------------
+Suspending or resuming the system is done in several phases. Different phases
+are used for standby or memory sleep states ("suspend-to-RAM") and the
+hibernation state ("suspend-to-disk"). Each phase involves executing callbacks
+for every device before the next phase begins. Not all busses or classes
+support all these callbacks and not all drivers use all the callbacks. The
+various phases always run after tasks have been frozen and before they are
+unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
+been disabled (except for those marked with the IRQ_WAKEUP flag).
-The phases are seen by driver notifications issued in this order:
+Most phases use bus, type, and class callbacks (that is, methods defined in
+dev->bus->pm, dev->type->pm, and dev->class->pm). The prepare and complete
+phases are exceptions; they use only bus callbacks. When multiple callbacks
+are used in a phase, they are invoked in the order: <class, type, bus> during
+power-down transitions and in the opposite order during power-up transitions.
+For example, during the suspend phase the PM core invokes
- 1 class.suspend(dev, message) is called after tasks are frozen, for
- devices associated with a class that has such a method. This
- method may sleep.
+ dev->class->pm.suspend(dev);
+ dev->type->pm.suspend(dev);
+ dev->bus->pm.suspend(dev);
- Since I/O activity usually comes from such higher layers, this is
- a good place to quiesce all drivers of a given type (and keep such
- code out of those drivers).
+before moving on to the next device, whereas during the resume phase the core
+invokes
- 2 bus.suspend(dev, message) is called next. This method may sleep,
- and is often morphed into a device driver call with bus-specific
- parameters and/or rules.
+ dev->bus->pm.resume(dev);
+ dev->type->pm.resume(dev);
+ dev->class->pm.resume(dev);
- This call should handle parts of device suspend logic that require
- sleeping. It probably does work to quiesce the device which hasn't
- been abstracted into class.suspend().
+These callbacks may in turn invoke device- or driver-specific methods stored in
+dev->driver->pm, but they don't have to.
-The pm_message_t parameter is currently used to refine those semantics
-(described later).
-At the end of those phases, drivers should normally have stopped all I/O
-transactions (DMA, IRQs), saved enough state that they can re-initialize
-or restore previous state (as needed by the hardware), and placed the
-device into a low-power state. On many platforms they will also use
-clk_disable() to gate off one or more clock sources; sometimes they will
-also switch off power supplies, or reduce voltages. Drivers which have
-runtime PM support may already have performed some or all of the steps
-needed to prepare for the upcoming system sleep state.
+Entering System Suspend
+-----------------------
+When the system goes into the standby or memory sleep state, the phases are:
+
+ prepare, suspend, suspend_noirq.
+
+ 1. The prepare phase is meant to prevent races by preventing new devices
+ from being registered; the PM core would never know that all the
+ children of a device had been suspended if new children could be
+ registered at will. (By contrast, devices may be unregistered at any
+ time.) Unlike the other suspend-related phases, during the prepare
+ phase the device tree is traversed top-down.
+
+ The prepare phase uses only a bus callback. After the callback method
+ returns, no new children may be registered below the device. The method
+ may also prepare the device or driver in some way for the upcoming
+ system power transition, but it should not put the device into a
+ low-power state.
+
+ 2. The suspend methods should quiesce the device to stop it from performing
+ I/O. They also may save the device registers and put it into the
+ appropriate low-power state, depending on the bus type the device is on,
+ and they may enable wakeup events.
+
+ 3. The suspend_noirq phase occurs after IRQ handlers have been disabled,
+ which means that the driver's interrupt handler will not be called while
+ the callback method is running. The methods should save the values of
+ the device's registers that weren't saved previously and finally put the
+ device into the appropriate low-power state.
+
+ The majority of subsystems and device drivers need not implement this
+ callback. However, bus types allowing devices to share interrupt
+ vectors, like PCI, generally need it; otherwise a driver might encounter
+ an error during the suspend phase by fielding a shared interrupt
+ generated by some other device after its own device had been set to low
+ power.
+
+At the end of these phases, drivers should have stopped all I/O transactions
+(DMA, IRQs), saved enough state that they can re-initialize or restore previous
+state (as needed by the hardware), and placed the device into a low-power state.
+On many platforms they will gate off one or more clock sources; sometimes they
+will also switch off power supplies or reduce voltages. (Drivers supporting
+runtime PM may already have performed some or all of these steps.)
+
+If device_may_wakeup(dev) returns true, the device should be prepared for
+generating hardware wakeup signals to trigger a system wakeup event when the
+system is in the sleep state. For example, enable_irq_wake() might identify
+GPIO signals hooked up to a switch or other external hardware, and
+pci_enable_wake() does something similar for the PCI PME signal.
+
+If any of these callbacks returns an error, the system won't enter the desired
+low-power state. Instead the PM core will unwind its actions by resuming all
+the devices that were suspended.
+
+
+Leaving System Suspend
+----------------------
+When resuming from standby or memory sleep, the phases are:
+
+ resume_noirq, resume, complete.
+
+ 1. The resume_noirq callback methods should perform any actions needed
+ before the driver's interrupt handlers are invoked. This generally
+ means undoing the actions of the suspend_noirq phase. If the bus type
+ permits devices to share interrupt vectors, like PCI, the method should
+ bring the device and its driver into a state in which the driver can
+ recognize if the device is the source of incoming interrupts, if any,
+ and handle them correctly.
+
+ For example, the PCI bus type's ->pm.resume_noirq() puts the device into
+ the full-power state (D0 in the PCI terminology) and restores the
+ standard configuration registers of the device. Then it calls the
+ device driver's ->pm.resume_noirq() method to perform device-specific
+ actions.
+
+ 2. The resume methods should bring the the device back to its operating
+ state, so that it can perform normal I/O. This generally involves
+ undoing the actions of the suspend phase.
+
+ 3. The complete phase uses only a bus callback. The method should undo the
+ actions of the prepare phase. Note, however, that new children may be
+ registered below the device as soon as the resume callbacks occur; it's
+ not necessary to wait until the complete phase.
+
+At the end of these phases, drivers should be as functional as they were before
+suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are
+gated on. Even if the device was in a low-power state before the system sleep
+because of runtime power management, afterwards it should be back in its
+full-power state. There are multiple reasons why it's best to do this; they are
+discussed in more detail in Documentation/power/runtime_pm.txt.
-When any driver sees that its device_can_wakeup(dev), it should make sure
-to use the relevant hardware signals to trigger a system wakeup event.
-For example, enable_irq_wake() might identify GPIO signals hooked up to
-a switch or other external hardware, and pci_enable_wake() does something
-similar for PCI's PME# signal.
+However, the details here may again be platform-specific. For example,
+some systems support multiple "run" states, and the mode in effect at
+the end of resume might not be the one which preceded suspension.
+That means availability of certain clocks or power supplies changed,
+which could easily affect how a driver works.
+
+Drivers need to be able to handle hardware which has been reset since the
+suspend methods were called, for example by complete reinitialization.
+This may be the hardest part, and the one most protected by NDA'd documents
+and chip errata. It's simplest if the hardware state hasn't changed since
+the suspend was carried out, but that can't be guaranteed (in fact, it ususally
+is not the case).
+
+Drivers must also be prepared to notice that the device has been removed
+while the system was powered down, whenever that's physically possible.
+PCMCIA, MMC, USB, Firewire, SCSI, and even IDE are common examples of busses
+where common Linux platforms will see such removal. Details of how drivers
+will notice and handle such removals are currently bus-specific, and often
+involve a separate thread.
+
+These callbacks may return an error value, but the PM core will ignore such
+errors since there's nothing it can do about them other than printing them in
+the system log.
+
+
+Entering Hibernation
+--------------------
+Hibernating the system is more complicated than putting it into the standby or
+memory sleep state, because it involves creating and saving a system image.
+Therefore there are more phases for hibernation, with a different set of
+callbacks. These phases always run after tasks have been frozen and memory has
+been freed.
+
+The general procedure for hibernation is to quiesce all devices (freeze), create
+an image of the system memory while everything is stable, reactivate all
+devices (thaw), write the image to permanent storage, and finally shut down the
+system (poweroff). The phases used to accomplish this are:
+
+ prepare, freeze, freeze_noirq, thaw_noirq, thaw, complete,
+ prepare, poweroff, poweroff_noirq
+
+ 1. The prepare phase is discussed in the "Entering System Suspend" section
+ above.
+
+ 2. The freeze methods should quiesce the device so that it doesn't generate
+ IRQs or DMA, and they may need to save the values of device registers.
+ However the device does not have to be put in a low-power state, and to
+ save time it's best not to do so. Also, the device should not be
+ prepared to generate wakeup events.
+
+ 3. The freeze_noirq phase is analogous to the suspend_noirq phase discussed
+ above, except again that the device should not be put in a low-power
+ state and should not be allowed to generate wakeup events.
+
+At this point the system image is created. All devices should be inactive and
+the contents of memory should remain undisturbed while this happens, so that the
+image forms an atomic snapshot of the system state.
+
+ 4. The thaw_noirq phase is analogous to the resume_noirq phase discussed
+ above. The main difference is that its methods can assume the device is
+ in the same state as at the end of the freeze_noirq phase.
+
+ 5. The thaw phase is analogous to the resume phase discussed above. Its
+ methods should bring the device back to an operating state, so that it
+ can be used for saving the image if necessary.
+
+ 6. The complete phase is discussed in the "Leaving System Suspend" section
+ above.
+
+At this point the system image is saved, and the devices then need to be
+prepared for the upcoming system shutdown. This is much like suspending them
+before putting the system into the standby or memory sleep state, and the phases
+are similar.
+
+ 7. The prepare phase is discussed above.
+
+ 8. The poweroff phase is analogous to the suspend phase.
+
+ 9. The poweroff_noirq phase is analogous to the suspend_noirq phase.
+
+The poweroff and poweroff_noirq callbacks should do essentially the same things
+as the suspend and suspend_noirq callbacks. The only notable difference is that
+they need not store the device register values, because the registers should
+already have been stored during the freeze or freeze_noirq phases.
+
+
+Leaving Hibernation
+-------------------
+Resuming from hibernation is, again, more complicated than resuming from a sleep
+state in which the contents of main memory are preserved, because it requires
+a system image to be loaded into memory and the pre-hibernation memory contents
+to be restored before control can be passed back to the image kernel.
+
+Although in principle, the image might be loaded into memory and the
+pre-hibernation memory contents restored by the boot loader, in practice this
+can't be done because boot loaders aren't smart enough and there is no
+established protocol for passing the necessary information. So instead, the
+boot loader loads a fresh instance of the kernel, called the boot kernel, into
+memory and passes control to it in the usual way. Then the boot kernel reads
+the system image, restores the pre-hibernation memory contents, and passes
+control to the image kernel. Thus two different kernels are involved in
+resuming from hibernation. In fact, the boot kernel may be completely different
+from the image kernel: a different configuration and even a different version.
+This has important consequences for device drivers and their subsystems.
+
+To be able to load the system image into memory, the boot kernel needs to
+include at least a subset of device drivers allowing it to access the storage
+medium containing the image, although it doesn't need to include all of the
+drivers present in the image kernel. After the image has been loaded, the
+devices managed by the boot kernel need to be prepared for passing control back
+to the image kernel. This is very similar to the initial steps involved in
+creating a system image, and it is accomplished in the same way, using prepare,
+freeze, and freeze_noirq phases. However the devices affected by these phases
+are only those having drivers in the boot kernel; other devices will still be in
+whatever state the boot loader left them.
+
+Should the restoration of the pre-hibernation memory contents fail, the boot
+kernel would go through the "thawing" procedure described above, using the
+thaw_noirq, thaw, and complete phases, and then continue running normally. This
+happens only rarely. Most often the pre-hibernation memory contents are
+restored successfully and control is passed to the image kernel, which then
+becomes responsible for bringing the system back to the working state.
+
+To achieve this, the image kernel must restore the devices' pre-hibernation
+functionality. The operation is much like waking up from the memory sleep
+state, although it involves different phases:
+
+ restore_noirq, restore, complete
+
+ 1. The restore_noirq phase is analogous to the resume_noirq phase.
+
+ 2. The restore phase is analogous to the resume phase.
+
+ 3. The complete phase is discussed above.
+
+The main difference from resume[_noirq] is that restore[_noirq] must assume the
+device has been accessed and reconfigured by the boot loader or the boot kernel.
+Consequently the state of the device may be different from the state remembered
+from the freeze and freeze_noirq phases. The device may even need to be reset
+and completely re-initialized. In many cases this difference doesn't matter, so
+the resume[_noirq] and restore[_norq] method pointers can be set to the same
+routines. Nevertheless, different callback pointers are used in case there is a
+situation where it actually matters.
-If a driver (or bus, or class) fails it suspend method, the system won't
-enter the desired low power state; it will resume all the devices it's
-suspended so far.
-Note that drivers may need to perform different actions based on the target
-system lowpower/sleep state. At this writing, there are only platform
-specific APIs through which drivers could determine those target states.
+System Devices
+--------------
+System devices (sysdevs) follow a slightly different API, which can be found in
+
+ include/linux/sysdev.h
+ drivers/base/sys.c
+
+System devices will be suspended with interrupts disabled, and after all other
+devices have been suspended. On resume, they will be resumed before any other
+devices, and also with interrupts disabled. These things occur in special
+"sysdev_driver" phases, which affect only system devices.
+
+Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
+the non-boot CPUs are all offline and IRQs are disabled on the remaining online
+CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
+sleep state (or a system image is created). During resume (or after the image
+has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
+are enabled on the only online CPU, the non-boot CPUs are enabled, and the
+resume_noirq (or thaw_noirq or restore_noirq) phase begins.
+
+Code to actually enter and exit the system-wide low power state sometimes
+involves hardware details that are only known to the boot firmware, and
+may leave a CPU running software (from SRAM or flash memory) that monitors
+the system and manages its wakeup sequence.
Device Low Power (suspend) States
---------------------------------
-Device low-power states aren't very standard. One device might only handle
+Device low-power states aren't standard. One device might only handle
"on" and "off, while another might support a dozen different versions of
"on" (how many engines are active?), plus a state that gets back to "on"
faster than from a full "off".
@@ -265,7 +546,7 @@ PCI device may not perform DMA or issue IRQs, and any wakeup events it
issues would be issued through the PME# bus signal. Plus, there are
several PCI-standard device states, some of which are optional.
-In contrast, integrated system-on-chip processors often use irqs as the
+In contrast, integrated system-on-chip processors often use IRQs as the
wakeup event sources (so drivers would call enable_irq_wake) and might
be able to treat DMA completion as a wakeup event (sometimes DMA can stay
active too, it'd only be the CPU and some peripherals that sleep).
@@ -284,120 +565,17 @@ ways; the aforementioned LCD might be active in one product's "standby",
but a different product using the same SOC might work differently.
-Meaning of pm_message_t.event
------------------------------
-Parameters to suspend calls include the device affected and a message of
-type pm_message_t, which has one field: the event. If driver does not
-recognize the event code, suspend calls may abort the request and return
-a negative errno. However, most drivers will be fine if they implement
-PM_EVENT_SUSPEND semantics for all messages.
+Power Management Notifiers
+--------------------------
+There are some operations that cannot be carried out by the power management
+callbacks discussed above, because the callbacks occur too late or too early.
+To handle these cases, subsystems and device drivers may register power
+management notifiers that are called before tasks are frozen and after they have
+been thawed. Generally speaking, the PM notifiers are suitable for performing
+actions that either require user space to be available, or at least won't
+interfere with user space.
-The event codes are used to refine the goal of suspending the device, and
-mostly matter when creating or resuming system memory image snapshots, as
-used with suspend-to-disk:
-
- PM_EVENT_SUSPEND -- quiesce the driver and put hardware into a low-power
- state. When used with system sleep states like "suspend-to-RAM" or
- "standby", the upcoming resume() call will often be able to rely on
- state kept in hardware, or issue system wakeup events.
-
- PM_EVENT_HIBERNATE -- Put hardware into a low-power state and enable wakeup
- events as appropriate. It is only used with hibernation
- (suspend-to-disk) and few devices are able to wake up the system from
- this state; most are completely powered off.
-
- PM_EVENT_FREEZE -- quiesce the driver, but don't necessarily change into
- any low power mode. A system snapshot is about to be taken, often
- followed by a call to the driver's resume() method. Neither wakeup
- events nor DMA are allowed.
-
- PM_EVENT_PRETHAW -- quiesce the driver, knowing that the upcoming resume()
- will restore a suspend-to-disk snapshot from a different kernel image.
- Drivers that are smart enough to look at their hardware state during
- resume() processing need that state to be correct ... a PRETHAW could
- be used to invalidate that state (by resetting the device), like a
- shutdown() invocation would before a kexec() or system halt. Other
- drivers might handle this the same way as PM_EVENT_FREEZE. Neither
- wakeup events nor DMA are allowed.
-
-To enter "standby" (ACPI S1) or "Suspend to RAM" (STR, ACPI S3) states, or
-the similarly named APM states, only PM_EVENT_SUSPEND is used; the other event
-codes are used for hibernation ("Suspend to Disk", STD, ACPI S4).
-
-There's also PM_EVENT_ON, a value which never appears as a suspend event
-but is sometimes used to record the "not suspended" device state.
-
-
-Resuming Devices
-----------------
-Resuming is done in multiple phases, much like suspending, with all
-devices processing each phase's calls before the next phase begins.
-
-The phases are seen by driver notifications issued in this order:
-
- 1 bus.resume(dev) reverses the effects of bus.suspend(). This may
- be morphed into a device driver call with bus-specific parameters;
- implementations may sleep.
-
- 2 class.resume(dev) is called for devices associated with a class
- that has such a method. Implementations may sleep.
-
- This reverses the effects of class.suspend(), and would usually
- reactivate the device's I/O queue.
-
-At the end of those phases, drivers should normally be as functional as
-they were before suspending: I/O can be performed using DMA and IRQs, and
-the relevant clocks are gated on. The device need not be "fully on"; it
-might be in a runtime lowpower/suspend state that acts as if it were.
-
-However, the details here may again be platform-specific. For example,
-some systems support multiple "run" states, and the mode in effect at
-the end of resume() might not be the one which preceded suspension.
-That means availability of certain clocks or power supplies changed,
-which could easily affect how a driver works.
-
-
-Drivers need to be able to handle hardware which has been reset since the
-suspend methods were called, for example by complete reinitialization.
-This may be the hardest part, and the one most protected by NDA'd documents
-and chip errata. It's simplest if the hardware state hasn't changed since
-the suspend() was called, but that can't always be guaranteed.
-
-Drivers must also be prepared to notice that the device has been removed
-while the system was powered off, whenever that's physically possible.
-PCMCIA, MMC, USB, Firewire, SCSI, and even IDE are common examples of busses
-where common Linux platforms will see such removal. Details of how drivers
-will notice and handle such removals are currently bus-specific, and often
-involve a separate thread.
-
-
-Note that the bus-specific runtime PM wakeup mechanism can exist, and might
-be defined to share some of the same driver code as for system wakeup. For
-example, a bus-specific device driver's resume() method might be used there,
-so it wouldn't only be called from bus.resume() during system-wide wakeup.
-See bus-specific information about how runtime wakeup events are handled.
-
-
-System Devices
---------------
-System devices follow a slightly different API, which can be found in
-
- include/linux/sysdev.h
- drivers/base/sys.c
-
-System devices will only be suspended with interrupts disabled, and after
-all other devices have been suspended. On resume, they will be resumed
-before any other devices, and also with interrupts disabled.
-
-That is, IRQs are disabled, the suspend_late() phase begins, then the
-sysdev_driver.suspend() phase, and the system enters a sleep state. Then
-the sysdev_driver.resume() phase begins, followed by the resume_early()
-phase, after which IRQs are enabled.
-
-Code to actually enter and exit the system-wide low power state sometimes
-involves hardware details that are only known to the boot firmware, and
-may leave a CPU running software (from SRAM or flash memory) that monitors
-the system and manages its wakeup sequence.
+For details refer to Documentation/power/notifiers.txt.
Runtime Power Management
@@ -407,82 +585,23 @@ running. This feature is useful for devices that are not being used, and
can offer significant power savings on a running system. These devices
often support a range of runtime power states, which might use names such
as "off", "sleep", "idle", "active", and so on. Those states will in some
-cases (like PCI) be partially constrained by a bus the device uses, and will
+cases (like PCI) be partially constrained by the bus the device uses, and will
usually include hardware states that are also used in system sleep states.
-However, note that if a driver puts a device into a runtime low power state
-and the system then goes into a system-wide sleep state, it normally ought
-to resume into that runtime low power state rather than "full on". Such
-distinctions would be part of the driver-internal state machine for that
-hardware; the whole point of runtime power management is to be sure that
-drivers are decoupled in that way from the state machine governing phases
-of the system-wide power/sleep state transitions.
-
-
-Power Saving Techniques
------------------------
-Normally runtime power management is handled by the drivers without specific
-userspace or kernel intervention, by device-aware use of techniques like:
-
- Using information provided by other system layers
- - stay deeply "off" except between open() and close()
- - if transceiver/PHY indicates "nobody connected", stay "off"
- - application protocols may include power commands or hints
-
- Using fewer CPU cycles
- - using DMA instead of PIO
- - removing timers, or making them lower frequency
- - shortening "hot" code paths
- - eliminating cache misses
- - (sometimes) offloading work to device firmware
-
- Reducing other resource costs
- - gating off unused clocks in software (or hardware)
- - switching off unused power supplies
- - eliminating (or delaying/merging) IRQs
- - tuning DMA to use word and/or burst modes
-
- Using device-specific low power states
- - using lower voltages
- - avoiding needless DMA transfers
-
-Read your hardware documentation carefully to see the opportunities that
-may be available. If you can, measure the actual power usage and check
-it against the budget established for your project.
-
-
-Examples: USB hosts, system timer, system CPU
-----------------------------------------------
-USB host controllers make interesting, if complex, examples. In many cases
-these have no work to do: no USB devices are connected, or all of them are
-in the USB "suspend" state. Linux host controller drivers can then disable
-periodic DMA transfers that would otherwise be a constant power drain on the
-memory subsystem, and enter a suspend state. In power-aware controllers,
-entering that suspend state may disable the clock used with USB signaling,
-saving a certain amount of power.
-
-The controller will be woken from that state (with an IRQ) by changes to the
-signal state on the data lines of a given port, for example by an existing
-peripheral requesting "remote wakeup" or by plugging a new peripheral. The
-same wakeup mechanism usually works from "standby" sleep states, and on some
-systems also from "suspend to RAM" (or even "suspend to disk") states.
-(Except that ACPI may be involved instead of normal IRQs, on some hardware.)
-
-System devices like timers and CPUs may have special roles in the platform
-power management scheme. For example, system timers using a "dynamic tick"
-approach don't just save CPU cycles (by eliminating needless timer IRQs),
-but they may also open the door to using lower power CPU "idle" states that
-cost more than a jiffie to enter and exit. On x86 systems these are states
-like "C3"; note that periodic DMA transfers from a USB host controller will
-also prevent entry to a C3 state, much like a periodic timer IRQ.
-
-That kind of runtime mechanism interaction is common. "System On Chip" (SOC)
-processors often have low power idle modes that can't be entered unless
-certain medium-speed clocks (often 12 or 48 MHz) are gated off. When the
-drivers gate those clocks effectively, then the system idle task may be able
-to use the lower power idle modes and thereby increase battery life.
-
-If the CPU can have a "cpufreq" driver, there also may be opportunities
-to shift to lower voltage settings and reduce the power cost of executing
-a given number of instructions. (Without voltage adjustment, it's rare
-for cpufreq to save much power; the cost-per-instruction must go down.)
+A system-wide power transition can be started while some devices are in low
+power states due to runtime power management. The system sleep PM callbacks
+should recognize such situations and react to them appropriately, but the
+necessary actions are subsystem-specific.
+
+In some cases the decision may be made at the subsystem level while in other
+cases the device driver may be left to decide. In some cases it may be
+desirable to leave a suspended device in that state during a system-wide power
+transition, but in other cases the device must be put back into the full-power
+state temporarily, for example so that its system wakeup capability can be
+disabled. This all depends on the hardware and the design of the subsystem and
+device driver in question.
+
+During system-wide resume from a sleep state it's best to put devices into the
+full-power state, as explained in Documentation/power/runtime_pm.txt. Refer to
+that document for more information regarding this particular issue as well as
+for information on the device runtime power management framework in general.
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index dd8fe43888d3..62328d76b55b 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -1,299 +1,1025 @@
-
PCI Power Management
-~~~~~~~~~~~~~~~~~~~~
-An overview of the concepts and the related functions in the Linux kernel
+Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+
+An overview of concepts and the Linux kernel's interfaces related to PCI power
+management. Based on previous work by Patrick Mochel <mochel@transmeta.com>
+(and others).
-Patrick Mochel <mochel@transmeta.com>
-(and others)
+This document only covers the aspects of power management specific to PCI
+devices. For general description of the kernel's interfaces related to device
+power management refer to Documentation/power/devices.txt and
+Documentation/power/runtime_pm.txt.
---------------------------------------------------------------------------
-1. Overview
-2. How the PCI Subsystem Does Power Management
-3. PCI Utility Functions
-4. PCI Device Drivers
-5. Resources
-
-1. Overview
-~~~~~~~~~~~
-
-The PCI Power Management Specification was introduced between the PCI 2.1 and
-PCI 2.2 Specifications. It a standard interface for controlling various
-power management operations.
-
-Implementation of the PCI PM Spec is optional, as are several sub-components of
-it. If a device supports the PCI PM Spec, the device will have an 8 byte
-capability field in its PCI configuration space. This field is used to describe
-and control the standard PCI power management features.
-
-The PCI PM spec defines 4 operating states for devices (D0 - D3) and for buses
-(B0 - B3). The higher the number, the less power the device consumes. However,
-the higher the number, the longer the latency is for the device to return to
-an operational state (D0).
-
-There are actually two D3 states. When someone talks about D3, they usually
-mean D3hot, which corresponds to an ACPI D2 state (power is reduced, the
-device may lose some context). But they may also mean D3cold, which is an
-ACPI D3 state (power is fully off, all state was discarded); or both.
-
-Bus power management is not covered in this version of this document.
-
-Note that all PCI devices support D0 and D3cold by default, regardless of
-whether or not they implement any of the PCI PM spec.
-
-The possible state transitions that a device can undergo are:
-
-+---------------------------+
-| Current State | New State |
-+---------------------------+
-| D0 | D1, D2, D3|
-+---------------------------+
-| D1 | D2, D3 |
-+---------------------------+
-| D2 | D3 |
-+---------------------------+
-| D1, D2, D3 | D0 |
-+---------------------------+
-
-Note that when the system is entering a global suspend state, all devices will
-be placed into D3 and when resuming, all devices will be placed into D0.
-However, when the system is running, other state transitions are possible.
-
-2. How The PCI Subsystem Handles Power Management
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The PCI suspend/resume functionality is accessed indirectly via the Power
-Management subsystem. At boot, the PCI driver registers a power management
-callback with that layer. Upon entering a suspend state, the PM layer iterates
-through all of its registered callbacks. This currently takes place only during
-APM state transitions.
-
-Upon going to sleep, the PCI subsystem walks its device tree twice. Both times,
-it does a depth first walk of the device tree. The first walk saves each of the
-device's state and checks for devices that will prevent the system from entering
-a global power state. The next walk then places the devices in a low power
+1. Hardware and Platform Support for PCI Power Management
+2. PCI Subsystem and Device Power Management
+3. PCI Device Drivers and Power Management
+4. Resources
+
+
+1. Hardware and Platform Support for PCI Power Management
+=========================================================
+
+1.1. Native and Platform-Based Power Management
+-----------------------------------------------
+In general, power management is a feature allowing one to save energy by putting
+devices into states in which they draw less power (low-power states) at the
+price of reduced functionality or performance.
+
+Usually, a device is put into a low-power state when it is underutilized or
+completely inactive. However, when it is necessary to use the device once
+again, it has to be put back into the "fully functional" state (full-power
+state). This may happen when there are some data for the device to handle or
+as a result of an external event requiring the device to be active, which may
+be signaled by the device itself.
+
+PCI devices may be put into low-power states in two ways, by using the device
+capabilities introduced by the PCI Bus Power Management Interface Specification,
+or with the help of platform firmware, such as an ACPI BIOS. In the first
+approach, that is referred to as the native PCI power management (native PCI PM)
+in what follows, the device power state is changed as a result of writing a
+specific value into one of its standard configuration registers. The second
+approach requires the platform firmware to provide special methods that may be
+used by the kernel to change the device's power state.
+
+Devices supporting the native PCI PM usually can generate wakeup signals called
+Power Management Events (PMEs) to let the kernel know about external events
+requiring the device to be active. After receiving a PME the kernel is supposed
+to put the device that sent it into the full-power state. However, the PCI Bus
+Power Management Interface Specification doesn't define any standard method of
+delivering the PME from the device to the CPU and the operating system kernel.
+It is assumed that the platform firmware will perform this task and therefore,
+even though a PCI device is set up to generate PMEs, it also may be necessary to
+prepare the platform firmware for notifying the CPU of the PMEs coming from the
+device (e.g. by generating interrupts).
+
+In turn, if the methods provided by the platform firmware are used for changing
+the power state of a device, usually the platform also provides a method for
+preparing the device to generate wakeup signals. In that case, however, it
+often also is necessary to prepare the device for generating PMEs using the
+native PCI PM mechanism, because the method provided by the platform depends on
+that.
+
+Thus in many situations both the native and the platform-based power management
+mechanisms have to be used simultaneously to obtain the desired result.
+
+1.2. Native PCI Power Management
+--------------------------------
+The PCI Bus Power Management Interface Specification (PCI PM Spec) was
+introduced between the PCI 2.1 and PCI 2.2 Specifications. It defined a
+standard interface for performing various operations related to power
+management.
+
+The implementation of the PCI PM Spec is optional for conventional PCI devices,
+but it is mandatory for PCI Express devices. If a device supports the PCI PM
+Spec, it has an 8 byte power management capability field in its PCI
+configuration space. This field is used to describe and control the standard
+features related to the native PCI power management.
+
+The PCI PM Spec defines 4 operating states for devices (D0-D3) and for buses
+(B0-B3). The higher the number, the less power is drawn by the device or bus
+in that state. However, the higher the number, the longer the latency for
+the device or bus to return to the full-power state (D0 or B0, respectively).
+
+There are two variants of the D3 state defined by the specification. The first
+one is D3hot, referred to as the software accessible D3, because devices can be
+programmed to go into it. The second one, D3cold, is the state that PCI devices
+are in when the supply voltage (Vcc) is removed from them. It is not possible
+to program a PCI device to go into D3cold, although there may be a programmable
+interface for putting the bus the device is on into a state in which Vcc is
+removed from all devices on the bus.
+
+PCI bus power management, however, is not supported by the Linux kernel at the
+time of this writing and therefore it is not covered by this document.
+
+Note that every PCI device can be in the full-power state (D0) or in D3cold,
+regardless of whether or not it implements the PCI PM Spec. In addition to
+that, if the PCI PM Spec is implemented by the device, it must support D3hot
+as well as D0. The support for the D1 and D2 power states is optional.
+
+PCI devices supporting the PCI PM Spec can be programmed to go to any of the
+supported low-power states (except for D3cold). While in D1-D3hot the
+standard configuration registers of the device must be accessible to software
+(i.e. the device is required to respond to PCI configuration accesses), although
+its I/O and memory spaces are then disabled. This allows the device to be
+programmatically put into D0. Thus the kernel can switch the device back and
+forth between D0 and the supported low-power states (except for D3cold) and the
+possible power state transitions the device can undergo are the following:
+
++----------------------------+
+| Current State | New State |
++----------------------------+
+| D0 | D1, D2, D3 |
++----------------------------+
+| D1 | D2, D3 |
++----------------------------+
+| D2 | D3 |
++----------------------------+
+| D1, D2, D3 | D0 |
++----------------------------+
+
+The transition from D3cold to D0 occurs when the supply voltage is provided to
+the device (i.e. power is restored). In that case the device returns to D0 with
+a full power-on reset sequence and the power-on defaults are restored to the
+device by hardware just as at initial power up.
+
+PCI devices supporting the PCI PM Spec can be programmed to generate PMEs
+while in a low-power state (D1-D3), but they are not required to be capable
+of generating PMEs from all supported low-power states. In particular, the
+capability of generating PMEs from D3cold is optional and depends on the
+presence of additional voltage (3.3Vaux) allowing the device to remain
+sufficiently active to generate a wakeup signal.
+
+1.3. ACPI Device Power Management
+---------------------------------
+The platform firmware support for the power management of PCI devices is
+system-specific. However, if the system in question is compliant with the
+Advanced Configuration and Power Interface (ACPI) Specification, like the
+majority of x86-based systems, it is supposed to implement device power
+management interfaces defined by the ACPI standard.
+
+For this purpose the ACPI BIOS provides special functions called "control
+methods" that may be executed by the kernel to perform specific tasks, such as
+putting a device into a low-power state. These control methods are encoded
+using special byte-code language called the ACPI Machine Language (AML) and
+stored in the machine's BIOS. The kernel loads them from the BIOS and executes
+them as needed using an AML interpreter that translates the AML byte code into
+computations and memory or I/O space accesses. This way, in theory, a BIOS
+writer can provide the kernel with a means to perform actions depending
+on the system design in a system-specific fashion.
+
+ACPI control methods may be divided into global control methods, that are not
+associated with any particular devices, and device control methods, that have
+to be defined separately for each device supposed to be handled with the help of
+the platform. This means, in particular, that ACPI device control methods can
+only be used to handle devices that the BIOS writer knew about in advance. The
+ACPI methods used for device power management fall into that category.
+
+The ACPI specification assumes that devices can be in one of four power states
+labeled as D0, D1, D2, and D3 that roughly correspond to the native PCI PM
+D0-D3 states (although the difference between D3hot and D3cold is not taken
+into account by ACPI). Moreover, for each power state of a device there is a
+set of power resources that have to be enabled for the device to be put into
+that state. These power resources are controlled (i.e. enabled or disabled)
+with the help of their own control methods, _ON and _OFF, that have to be
+defined individually for each of them.
+
+To put a device into the ACPI power state Dx (where x is a number between 0 and
+3 inclusive) the kernel is supposed to (1) enable the power resources required
+by the device in this state using their _ON control methods and (2) execute the
+_PSx control method defined for the device. In addition to that, if the device
+is going to be put into a low-power state (D1-D3) and is supposed to generate
+wakeup signals from that state, the _DSW (or _PSW, replaced with _DSW by ACPI
+3.0) control method defined for it has to be executed before _PSx. Power
+resources that are not required by the device in the target power state and are
+not required any more by any other device should be disabled (by executing their
+_OFF control methods). If the current power state of the device is D3, it can
+only be put into D0 this way.
+
+However, quite often the power states of devices are changed during a
+system-wide transition into a sleep state or back into the working state. ACPI
+defines four system sleep states, S1, S2, S3, and S4, and denotes the system
+working state as S0. In general, the target system sleep (or working) state
+determines the highest power (lowest number) state the device can be put
+into and the kernel is supposed to obtain this information by executing the
+device's _SxD control method (where x is a number between 0 and 4 inclusive).
+If the device is required to wake up the system from the target sleep state, the
+lowest power (highest number) state it can be put into is also determined by the
+target state of the system. The kernel is then supposed to use the device's
+_SxW control method to obtain the number of that state. It also is supposed to
+use the device's _PRW control method to learn which power resources need to be
+enabled for the device to be able to generate wakeup signals.
+
+1.4. Wakeup Signaling
+---------------------
+Wakeup signals generated by PCI devices, either as native PCI PMEs, or as
+a result of the execution of the _DSW (or _PSW) ACPI control method before
+putting the device into a low-power state, have to be caught and handled as
+appropriate. If they are sent while the system is in the working state
+(ACPI S0), they should be translated into interrupts so that the kernel can
+put the devices generating them into the full-power state and take care of the
+events that triggered them. In turn, if they are sent while the system is
+sleeping, they should cause the system's core logic to trigger wakeup.
+
+On ACPI-based systems wakeup signals sent by conventional PCI devices are
+converted into ACPI General-Purpose Events (GPEs) which are hardware signals
+from the system core logic generated in response to various events that need to
+be acted upon. Every GPE is associated with one or more sources of potentially
+interesting events. In particular, a GPE may be associated with a PCI device
+capable of signaling wakeup. The information on the connections between GPEs
+and event sources is recorded in the system's ACPI BIOS from where it can be
+read by the kernel.
+
+If a PCI device known to the system's ACPI BIOS signals wakeup, the GPE
+associated with it (if there is one) is triggered. The GPEs associated with PCI
+bridges may also be triggered in response to a wakeup signal from one of the
+devices below the bridge (this also is the case for root bridges) and, for
+example, native PCI PMEs from devices unknown to the system's ACPI BIOS may be
+handled this way.
+
+A GPE may be triggered when the system is sleeping (i.e. when it is in one of
+the ACPI S1-S4 states), in which case system wakeup is started by its core logic
+(the device that was the source of the signal causing the system wakeup to occur
+may be identified later). The GPEs used in such situations are referred to as
+wakeup GPEs.
+
+Usually, however, GPEs are also triggered when the system is in the working
+state (ACPI S0) and in that case the system's core logic generates a System
+Control Interrupt (SCI) to notify the kernel of the event. Then, the SCI
+handler identifies the GPE that caused the interrupt to be generated which,
+in turn, allows the kernel to identify the source of the event (that may be
+a PCI device signaling wakeup). The GPEs used for notifying the kernel of
+events occurring while the system is in the working state are referred to as
+runtime GPEs.
+
+Unfortunately, there is no standard way of handling wakeup signals sent by
+conventional PCI devices on systems that are not ACPI-based, but there is one
+for PCI Express devices. Namely, the PCI Express Base Specification introduced
+a native mechanism for converting native PCI PMEs into interrupts generated by
+root ports. For conventional PCI devices native PMEs are out-of-band, so they
+are routed separately and they need not pass through bridges (in principle they
+may be routed directly to the system's core logic), but for PCI Express devices
+they are in-band messages that have to pass through the PCI Express hierarchy,
+including the root port on the path from the device to the Root Complex. Thus
+it was possible to introduce a mechanism by which a root port generates an
+interrupt whenever it receives a PME message from one of the devices below it.
+The PCI Express Requester ID of the device that sent the PME message is then
+recorded in one of the root port's configuration registers from where it may be
+read by the interrupt handler allowing the device to be identified. [PME
+messages sent by PCI Express endpoints integrated with the Root Complex don't
+pass through root ports, but instead they cause a Root Complex Event Collector
+(if there is one) to generate interrupts.]
+
+In principle the native PCI Express PME signaling may also be used on ACPI-based
+systems along with the GPEs, but to use it the kernel has to ask the system's
+ACPI BIOS to release control of root port configuration registers. The ACPI
+BIOS, however, is not required to allow the kernel to control these registers
+and if it doesn't do that, the kernel must not modify their contents. Of course
+the native PCI Express PME signaling cannot be used by the kernel in that case.
+
+
+2. PCI Subsystem and Device Power Management
+============================================
+
+2.1. Device Power Management Callbacks
+--------------------------------------
+The PCI Subsystem participates in the power management of PCI devices in a
+number of ways. First of all, it provides an intermediate code layer between
+the device power management core (PM core) and PCI device drivers.
+Specifically, the pm field of the PCI subsystem's struct bus_type object,
+pci_bus_type, points to a struct dev_pm_ops object, pci_dev_pm_ops, containing
+pointers to several device power management callbacks:
+
+const struct dev_pm_ops pci_dev_pm_ops = {
+ .prepare = pci_pm_prepare,
+ .complete = pci_pm_complete,
+ .suspend = pci_pm_suspend,
+ .resume = pci_pm_resume,
+ .freeze = pci_pm_freeze,
+ .thaw = pci_pm_thaw,
+ .poweroff = pci_pm_poweroff,
+ .restore = pci_pm_restore,
+ .suspend_noirq = pci_pm_suspend_noirq,
+ .resume_noirq = pci_pm_resume_noirq,
+ .freeze_noirq = pci_pm_freeze_noirq,
+ .thaw_noirq = pci_pm_thaw_noirq,
+ .poweroff_noirq = pci_pm_poweroff_noirq,
+ .restore_noirq = pci_pm_restore_noirq,
+ .runtime_suspend = pci_pm_runtime_suspend,
+ .runtime_resume = pci_pm_runtime_resume,
+ .runtime_idle = pci_pm_runtime_idle,
+};
+
+These callbacks are executed by the PM core in various situations related to
+device power management and they, in turn, execute power management callbacks
+provided by PCI device drivers. They also perform power management operations
+involving some standard configuration registers of PCI devices that device
+drivers need not know or care about.
+
+The structure representing a PCI device, struct pci_dev, contains several fields
+that these callbacks operate on:
+
+struct pci_dev {
+ ...
+ pci_power_t current_state; /* Current operating state. */
+ int pm_cap; /* PM capability offset in the
+ configuration space */
+ unsigned int pme_support:5; /* Bitmask of states from which PME#
+ can be generated */
+ unsigned int pme_interrupt:1;/* Is native PCIe PME signaling used? */
+ unsigned int d1_support:1; /* Low power state D1 is supported */
+ unsigned int d2_support:1; /* Low power state D2 is supported */
+ unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
+ unsigned int wakeup_prepared:1; /* Device prepared for wake up */
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
+ ...
+};
+
+They also indirectly use some fields of the struct device that is embedded in
+struct pci_dev.
+
+2.2. Device Initialization
+--------------------------
+The PCI subsystem's first task related to device power management is to
+prepare the device for power management and initialize the fields of struct
+pci_dev used for this purpose. This happens in two functions defined in
+drivers/pci/pci.c, pci_pm_init() and platform_pci_wakeup_init().
+
+The first of these functions checks if the device supports native PCI PM
+and if that's the case the offset of its power management capability structure
+in the configuration space is stored in the pm_cap field of the device's struct
+pci_dev object. Next, the function checks which PCI low-power states are
+supported by the device and from which low-power states the device can generate
+native PCI PMEs. The power management fields of the device's struct pci_dev and
+the struct device embedded in it are updated accordingly and the generation of
+PMEs by the device is disabled.
+
+The second function checks if the device can be prepared to signal wakeup with
+the help of the platform firmware, such as the ACPI BIOS. If that is the case,
+the function updates the wakeup fields in struct device embedded in the
+device's struct pci_dev and uses the firmware-provided method to prevent the
+device from signaling wakeup.
+
+At this point the device is ready for power management. For driverless devices,
+however, this functionality is limited to a few basic operations carried out
+during system-wide transitions to a sleep state and back to the working state.
+
+2.3. Runtime Device Power Management
+------------------------------------
+The PCI subsystem plays a vital role in the runtime power management of PCI
+devices. For this purpose it uses the general runtime power management
+(runtime PM) framework described in Documentation/power/runtime_pm.txt.
+Namely, it provides subsystem-level callbacks:
+
+ pci_pm_runtime_suspend()
+ pci_pm_runtime_resume()
+ pci_pm_runtime_idle()
+
+that are executed by the core runtime PM routines. It also implements the
+entire mechanics necessary for handling runtime wakeup signals from PCI devices
+in low-power states, which at the time of this writing works for both the native
+PCI Express PME signaling and the ACPI GPE-based wakeup signaling described in
+Section 1.
+
+First, a PCI device is put into a low-power state, or suspended, with the help
+of pm_schedule_suspend() or pm_runtime_suspend() which for PCI devices call
+pci_pm_runtime_suspend() to do the actual job. For this to work, the device's
+driver has to provide a pm->runtime_suspend() callback (see below), which is
+run by pci_pm_runtime_suspend() as the first action. If the driver's callback
+returns successfully, the device's standard configuration registers are saved,
+the device is prepared to generate wakeup signals and, finally, it is put into
+the target low-power state.
+
+The low-power state to put the device into is the lowest-power (highest number)
+state from which it can signal wakeup. The exact method of signaling wakeup is
+system-dependent and is determined by the PCI subsystem on the basis of the
+reported capabilities of the device and the platform firmware. To prepare the
+device for signaling wakeup and put it into the selected low-power state, the
+PCI subsystem can use the platform firmware as well as the device's native PCI
+PM capabilities, if supported.
+
+It is expected that the device driver's pm->runtime_suspend() callback will
+not attempt to prepare the device for signaling wakeup or to put it into a
+low-power state. The driver ought to leave these tasks to the PCI subsystem
+that has all of the information necessary to perform them.
+
+A suspended device is brought back into the "active" state, or resumed,
+with the help of pm_request_resume() or pm_runtime_resume() which both call
+pci_pm_runtime_resume() for PCI devices. Again, this only works if the device's
+driver provides a pm->runtime_resume() callback (see below). However, before
+the driver's callback is executed, pci_pm_runtime_resume() brings the device
+back into the full-power state, prevents it from signaling wakeup while in that
+state and restores its standard configuration registers. Thus the driver's
+callback need not worry about the PCI-specific aspects of the device resume.
+
+Note that generally pci_pm_runtime_resume() may be called in two different
+situations. First, it may be called at the request of the device's driver, for
+example if there are some data for it to process. Second, it may be called
+as a result of a wakeup signal from the device itself (this sometimes is
+referred to as "remote wakeup"). Of course, for this purpose the wakeup signal
+is handled in one of the ways described in Section 1 and finally converted into
+a notification for the PCI subsystem after the source device has been
+identified.
+
+The pci_pm_runtime_idle() function, called for PCI devices by pm_runtime_idle()
+and pm_request_idle(), executes the device driver's pm->runtime_idle()
+callback, if defined, and if that callback doesn't return error code (or is not
+present at all), suspends the device with the help of pm_runtime_suspend().
+Sometimes pci_pm_runtime_idle() is called automatically by the PM core (for
+example, it is called right after the device has just been resumed), in which
+cases it is expected to suspend the device if that makes sense. Usually,
+however, the PCI subsystem doesn't really know if the device really can be
+suspended, so it lets the device's driver decide by running its
+pm->runtime_idle() callback.
+
+2.4. System-Wide Power Transitions
+----------------------------------
+There are a few different types of system-wide power transitions, described in
+Documentation/power/devices.txt. Each of them requires devices to be handled
+in a specific way and the PM core executes subsystem-level power management
+callbacks for this purpose. They are executed in phases such that each phase
+involves executing the same subsystem-level callback for every device belonging
+to the given subsystem before the next phase begins. These phases always run
+after tasks have been frozen.
+
+2.4.1. System Suspend
+
+When the system is going into a sleep state in which the contents of memory will
+be preserved, such as one of the ACPI sleep states S1-S3, the phases are:
+
+ prepare, suspend, suspend_noirq.
+
+The following PCI bus type's callbacks, respectively, are used in these phases:
+
+ pci_pm_prepare()
+ pci_pm_suspend()
+ pci_pm_suspend_noirq()
+
+The pci_pm_prepare() routine first puts the device into the "fully functional"
+state with the help of pm_runtime_resume(). Then, it executes the device
+driver's pm->prepare() callback if defined (i.e. if the driver's struct
+dev_pm_ops object is present and the prepare pointer in that object is valid).
+
+The pci_pm_suspend() routine first checks if the device's driver implements
+legacy PCI suspend routines (see Section 3), in which case the driver's legacy
+suspend callback is executed, if present, and its result is returned. Next, if
+the device's driver doesn't provide a struct dev_pm_ops object (containing
+pointers to the driver's callbacks), pci_pm_default_suspend() is called, which
+simply turns off the device's bus master capability and runs
+pcibios_disable_device() to disable it, unless the device is a bridge (PCI
+bridges are ignored by this routine). Next, the device driver's pm->suspend()
+callback is executed, if defined, and its result is returned if it fails.
+Finally, pci_fixup_device() is called to apply hardware suspend quirks related
+to the device if necessary.
+
+Note that the suspend phase is carried out asynchronously for PCI devices, so
+the pci_pm_suspend() callback may be executed in parallel for any pair of PCI
+devices that don't depend on each other in a known way (i.e. none of the paths
+in the device tree from the root bridge to a leaf device contains both of them).
+
+The pci_pm_suspend_noirq() routine is executed after suspend_device_irqs() has
+been called, which means that the device driver's interrupt handler won't be
+invoked while this routine is running. It first checks if the device's driver
+implements legacy PCI suspends routines (Section 3), in which case the legacy
+late suspend routine is called and its result is returned (the standard
+configuration registers of the device are saved if the driver's callback hasn't
+done that). Second, if the device driver's struct dev_pm_ops object is not
+present, the device's standard configuration registers are saved and the routine
+returns success. Otherwise the device driver's pm->suspend_noirq() callback is
+executed, if present, and its result is returned if it fails. Next, if the
+device's standard configuration registers haven't been saved yet (one of the
+device driver's callbacks executed before might do that), pci_pm_suspend_noirq()
+saves them, prepares the device to signal wakeup (if necessary) and puts it into
+a low-power state.
+
+The low-power state to put the device into is the lowest-power (highest number)
+state from which it can signal wakeup while the system is in the target sleep
+state. Just like in the runtime PM case described above, the mechanism of
+signaling wakeup is system-dependent and determined by the PCI subsystem, which
+is also responsible for preparing the device to signal wakeup from the system's
+target sleep state as appropriate.
+
+PCI device drivers (that don't implement legacy power management callbacks) are
+generally not expected to prepare devices for signaling wakeup or to put them
+into low-power states. However, if one of the driver's suspend callbacks
+(pm->suspend() or pm->suspend_noirq()) saves the device's standard configuration
+registers, pci_pm_suspend_noirq() will assume that the device has been prepared
+to signal wakeup and put into a low-power state by the driver (the driver is
+then assumed to have used the helper functions provided by the PCI subsystem for
+this purpose). PCI device drivers are not encouraged to do that, but in some
+rare cases doing that in the driver may be the optimum approach.
+
+2.4.2. System Resume
+
+When the system is undergoing a transition from a sleep state in which the
+contents of memory have been preserved, such as one of the ACPI sleep states
+S1-S3, into the working state (ACPI S0), the phases are:
+
+ resume_noirq, resume, complete.
+
+The following PCI bus type's callbacks, respectively, are executed in these
+phases:
+
+ pci_pm_resume_noirq()
+ pci_pm_resume()
+ pci_pm_complete()
+
+The pci_pm_resume_noirq() routine first puts the device into the full-power
+state, restores its standard configuration registers and applies early resume
+hardware quirks related to the device, if necessary. This is done
+unconditionally, regardless of whether or not the device's driver implements
+legacy PCI power management callbacks (this way all PCI devices are in the
+full-power state and their standard configuration registers have been restored
+when their interrupt handlers are invoked for the first time during resume,
+which allows the kernel to avoid problems with the handling of shared interrupts
+by drivers whose devices are still suspended). If legacy PCI power management
+callbacks (see Section 3) are implemented by the device's driver, the legacy
+early resume callback is executed and its result is returned. Otherwise, the
+device driver's pm->resume_noirq() callback is executed, if defined, and its
+result is returned.
+
+The pci_pm_resume() routine first checks if the device's standard configuration
+registers have been restored and restores them if that's not the case (this
+only is necessary in the error path during a failing suspend). Next, resume
+hardware quirks related to the device are applied, if necessary, and if the
+device's driver implements legacy PCI power management callbacks (see
+Section 3), the driver's legacy resume callback is executed and its result is
+returned. Otherwise, the device's wakeup signaling mechanisms are blocked and
+its driver's pm->resume() callback is executed, if defined (the callback's
+result is then returned).
+
+The resume phase is carried out asynchronously for PCI devices, like the
+suspend phase described above, which means that if two PCI devices don't depend
+on each other in a known way, the pci_pm_resume() routine may be executed for
+the both of them in parallel.
+
+The pci_pm_complete() routine only executes the device driver's pm->complete()
+callback, if defined.
+
+2.4.3. System Hibernation
+
+System hibernation is more complicated than system suspend, because it requires
+a system image to be created and written into a persistent storage medium. The
+image is created atomically and all devices are quiesced, or frozen, before that
+happens.
+
+The freezing of devices is carried out after enough memory has been freed (at
+the time of this writing the image creation requires at least 50% of system RAM
+to be free) in the following three phases:
+
+ prepare, freeze, freeze_noirq
+
+that correspond to the PCI bus type's callbacks:
+
+ pci_pm_prepare()
+ pci_pm_freeze()
+ pci_pm_freeze_noirq()
+
+This means that the prepare phase is exactly the same as for system suspend.
+The other two phases, however, are different.
+
+The pci_pm_freeze() routine is quite similar to pci_pm_suspend(), but it runs
+the device driver's pm->freeze() callback, if defined, instead of pm->suspend(),
+and it doesn't apply the suspend-related hardware quirks. It is executed
+asynchronously for different PCI devices that don't depend on each other in a
+known way.
+
+The pci_pm_freeze_noirq() routine, in turn, is similar to
+pci_pm_suspend_noirq(), but it calls the device driver's pm->freeze_noirq()
+routine instead of pm->suspend_noirq(). It also doesn't attempt to prepare the
+device for signaling wakeup and put it into a low-power state. Still, it saves
+the device's standard configuration registers if they haven't been saved by one
+of the driver's callbacks.
+
+Once the image has been created, it has to be saved. However, at this point all
+devices are frozen and they cannot handle I/O, while their ability to handle
+I/O is obviously necessary for the image saving. Thus they have to be brought
+back to the fully functional state and this is done in the following phases:
+
+ thaw_noirq, thaw, complete
+
+using the following PCI bus type's callbacks:
+
+ pci_pm_thaw_noirq()
+ pci_pm_thaw()
+ pci_pm_complete()
+
+respectively.
+
+The first of them, pci_pm_thaw_noirq(), is analogous to pci_pm_resume_noirq(),
+but it doesn't put the device into the full power state and doesn't attempt to
+restore its standard configuration registers. It also executes the device
+driver's pm->thaw_noirq() callback, if defined, instead of pm->resume_noirq().
+
+The pci_pm_thaw() routine is similar to pci_pm_resume(), but it runs the device
+driver's pm->thaw() callback instead of pm->resume(). It is executed
+asynchronously for different PCI devices that don't depend on each other in a
+known way.
+
+The complete phase it the same as for system resume.
+
+After saving the image, devices need to be powered down before the system can
+enter the target sleep state (ACPI S4 for ACPI-based systems). This is done in
+three phases:
+
+ prepare, poweroff, poweroff_noirq
+
+where the prepare phase is exactly the same as for system suspend. The other
+two phases are analogous to the suspend and suspend_noirq phases, respectively.
+The PCI subsystem-level callbacks they correspond to
+
+ pci_pm_poweroff()
+ pci_pm_poweroff_noirq()
+
+work in analogy with pci_pm_suspend() and pci_pm_poweroff_noirq(), respectively,
+although they don't attempt to save the device's standard configuration
+registers.
+
+2.4.4. System Restore
+
+System restore requires a hibernation image to be loaded into memory and the
+pre-hibernation memory contents to be restored before the pre-hibernation system
+activity can be resumed.
+
+As described in Documentation/power/devices.txt, the hibernation image is loaded
+into memory by a fresh instance of the kernel, called the boot kernel, which in
+turn is loaded and run by a boot loader in the usual way. After the boot kernel
+has loaded the image, it needs to replace its own code and data with the code
+and data of the "hibernated" kernel stored within the image, called the image
+kernel. For this purpose all devices are frozen just like before creating
+the image during hibernation, in the
+
+ prepare, freeze, freeze_noirq
+
+phases described above. However, the devices affected by these phases are only
+those having drivers in the boot kernel; other devices will still be in whatever
+state the boot loader left them.
+
+Should the restoration of the pre-hibernation memory contents fail, the boot
+kernel would go through the "thawing" procedure described above, using the
+thaw_noirq, thaw, and complete phases (that will only affect the devices having
+drivers in the boot kernel), and then continue running normally.
+
+If the pre-hibernation memory contents are restored successfully, which is the
+usual situation, control is passed to the image kernel, which then becomes
+responsible for bringing the system back to the working state. To achieve this,
+it must restore the devices' pre-hibernation functionality, which is done much
+like waking up from the memory sleep state, although it involves different
+phases:
+
+ restore_noirq, restore, complete
+
+The first two of these are analogous to the resume_noirq and resume phases
+described above, respectively, and correspond to the following PCI subsystem
+callbacks:
+
+ pci_pm_restore_noirq()
+ pci_pm_restore()
+
+These callbacks work in analogy with pci_pm_resume_noirq() and pci_pm_resume(),
+respectively, but they execute the device driver's pm->restore_noirq() and
+pm->restore() callbacks, if available.
+
+The complete phase is carried out in exactly the same way as during system
+resume.
+
+
+3. PCI Device Drivers and Power Management
+==========================================
+
+3.1. Power Management Callbacks
+-------------------------------
+PCI device drivers participate in power management by providing callbacks to be
+executed by the PCI subsystem's power management routines described above and by
+controlling the runtime power management of their devices.
+
+At the time of this writing there are two ways to define power management
+callbacks for a PCI device driver, the recommended one, based on using a
+dev_pm_ops structure described in Documentation/power/devices.txt, and the
+"legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
+.resume() callbacks from struct pci_driver are used. The legacy approach,
+however, doesn't allow one to define runtime power management callbacks and is
+not really suitable for any new drivers. Therefore it is not covered by this
+document (refer to the source code to learn more about it).
+
+It is recommended that all PCI device drivers define a struct dev_pm_ops object
+containing pointers to power management (PM) callbacks that will be executed by
+the PCI subsystem's PM routines in various circumstances. A pointer to the
+driver's struct dev_pm_ops object has to be assigned to the driver.pm field in
+its struct pci_driver object. Once that has happened, the "legacy" PM callbacks
+in struct pci_driver are ignored (even if they are not NULL).
+
+The PM callbacks in struct dev_pm_ops are not mandatory and if they are not
+defined (i.e. the respective fields of struct dev_pm_ops are unset) the PCI
+subsystem will handle the device in a simplified default manner. If they are
+defined, though, they are expected to behave as described in the following
+subsections.
+
+3.1.1. prepare()
+
+The prepare() callback is executed during system suspend, during hibernation
+(when a hibernation image is about to be created), during power-off after
+saving a hibernation image and during system restore, when a hibernation image
+has just been loaded into memory.
+
+This callback is only necessary if the driver's device has children that in
+general may be registered at any time. In that case the role of the prepare()
+callback is to prevent new children of the device from being registered until
+one of the resume_noirq(), thaw_noirq(), or restore_noirq() callbacks is run.
+
+In addition to that the prepare() callback may carry out some operations
+preparing the device to be suspended, although it should not allocate memory
+(if additional memory is required to suspend the device, it has to be
+preallocated earlier, for example in a suspend/hibernate notifier as described
+in Documentation/power/notifiers.txt).
+
+3.1.2. suspend()
+
+The suspend() callback is only executed during system suspend, after prepare()
+callbacks have been executed for all devices in the system.
+
+This callback is expected to quiesce the device and prepare it to be put into a
+low-power state by the PCI subsystem. It is not required (in fact it even is
+not recommended) that a PCI driver's suspend() callback save the standard
+configuration registers of the device, prepare it for waking up the system, or
+put it into a low-power state. All of these operations can very well be taken
+care of by the PCI subsystem, without the driver's participation.
+
+However, in some rare case it is convenient to carry out these operations in
+a PCI driver. Then, pci_save_state(), pci_prepare_to_sleep(), and
+pci_set_power_state() should be used to save the device's standard configuration
+registers, to prepare it for system wakeup (if necessary), and to put it into a
+low-power state, respectively. Moreover, if the driver calls pci_save_state(),
+the PCI subsystem will not execute either pci_prepare_to_sleep(), or
+pci_set_power_state() for its device, so the driver is then responsible for
+handling the device as appropriate.
+
+While the suspend() callback is being executed, the driver's interrupt handler
+can be invoked to handle an interrupt from the device, so all suspend-related
+operations relying on the driver's ability to handle interrupts should be
+carried out in this callback.
+
+3.1.3. suspend_noirq()
+
+The suspend_noirq() callback is only executed during system suspend, after
+suspend() callbacks have been executed for all devices in the system and
+after device interrupts have been disabled by the PM core.
+
+The difference between suspend_noirq() and suspend() is that the driver's
+interrupt handler will not be invoked while suspend_noirq() is running. Thus
+suspend_noirq() can carry out operations that would cause race conditions to
+arise if they were performed in suspend().
+
+3.1.4. freeze()
+
+The freeze() callback is hibernation-specific and is executed in two situations,
+during hibernation, after prepare() callbacks have been executed for all devices
+in preparation for the creation of a system image, and during restore,
+after a system image has been loaded into memory from persistent storage and the
+prepare() callbacks have been executed for all devices.
+
+The role of this callback is analogous to the role of the suspend() callback
+described above. In fact, they only need to be different in the rare cases when
+the driver takes the responsibility for putting the device into a low-power
state.
-The first walk allows a graceful recovery in the event of a failure, since none
-of the devices have actually been powered down.
-
-In both walks, in particular the second, all children of a bridge are touched
-before the actual bridge itself. This allows the bridge to retain power while
-its children are being accessed.
-
-Upon resuming from sleep, just the opposite must be true: all bridges must be
-powered on and restored before their children are powered on. This is easily
-accomplished with a breadth-first walk of the PCI device tree.
-
-
-3. PCI Utility Functions
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-These are helper functions designed to be called by individual device drivers.
-Assuming that a device behaves as advertised, these should be applicable in most
-cases. However, results may vary.
-
-Note that these functions are never implicitly called for the driver. The driver
-is always responsible for deciding when and if to call these.
-
-
-pci_save_state
---------------
-
-Usage:
- pci_save_state(struct pci_dev *dev);
-
-Description:
- Save first 64 bytes of PCI config space, along with any additional
- PCI-Express or PCI-X information.
-
-
-pci_restore_state
------------------
-
-Usage:
- pci_restore_state(struct pci_dev *dev);
-
-Description:
- Restore previously saved config space.
-
-
-pci_set_power_state
--------------------
-
-Usage:
- pci_set_power_state(struct pci_dev *dev, pci_power_t state);
-
-Description:
- Transition device to low power state using PCI PM Capabilities
- registers.
-
- Will fail under one of the following conditions:
- - If state is less than current state, but not D0 (illegal transition)
- - Device doesn't support PM Capabilities
- - Device does not support requested state
-
-
-pci_enable_wake
----------------
-
-Usage:
- pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
-
-Description:
- Enable device to generate PME# during low power state using PCI PM
- Capabilities.
-
- Checks whether if device supports generating PME# from requested state
- and fail if it does not, unless enable == 0 (request is to disable wake
- events, which is implicit if it doesn't even support it in the first
- place).
-
- Note that the PMC Register in the device's PM Capabilities has a bitmask
- of the states it supports generating PME# from. D3hot is bit 3 and
- D3cold is bit 4. So, while a value of 4 as the state may not seem
- semantically correct, it is.
-
-
-4. PCI Device Drivers
-~~~~~~~~~~~~~~~~~~~~~
-
-These functions are intended for use by individual drivers, and are defined in
-struct pci_driver:
-
- int (*suspend) (struct pci_dev *dev, pm_message_t state);
- int (*resume) (struct pci_dev *dev);
-
-
-suspend
--------
-
-Usage:
-
-if (dev->driver && dev->driver->suspend)
- dev->driver->suspend(dev,state);
-
-A driver uses this function to actually transition the device into a low power
-state. This should include disabling I/O, IRQs, and bus-mastering, as well as
-physically transitioning the device to a lower power state; it may also include
-calls to pci_enable_wake().
-
-Bus mastering may be disabled by doing:
-
-pci_disable_device(dev);
-
-For devices that support the PCI PM Spec, this may be used to set the device's
-power state to match the suspend() parameter:
-
-pci_set_power_state(dev,state);
-
-The driver is also responsible for disabling any other device-specific features
-(e.g blanking screen, turning off on-card memory, etc).
-
-The driver should be sure to track the current state of the device, as it may
-obviate the need for some operations.
-
-The driver should update the current_state field in its pci_dev structure in
-this function, except for PM-capable devices when pci_set_power_state is used.
-
-resume
-------
-
-Usage:
-
-if (dev->driver && dev->driver->resume)
- dev->driver->resume(dev)
+In that cases the freeze() callback should not prepare the device system wakeup
+or put it into a low-power state. Still, either it or freeze_noirq() should
+save the device's standard configuration registers using pci_save_state().
-The resume callback may be called from any power state, and is always meant to
-transition the device to the D0 state.
+3.1.5. freeze_noirq()
-The driver is responsible for reenabling any features of the device that had
-been disabled during previous suspend calls, such as IRQs and bus mastering,
-as well as calling pci_restore_state().
+The freeze_noirq() callback is hibernation-specific. It is executed during
+hibernation, after prepare() and freeze() callbacks have been executed for all
+devices in preparation for the creation of a system image, and during restore,
+after a system image has been loaded into memory and after prepare() and
+freeze() callbacks have been executed for all devices. It is always executed
+after device interrupts have been disabled by the PM core.
-If the device is currently in D3, it may need to be reinitialized in resume().
+The role of this callback is analogous to the role of the suspend_noirq()
+callback described above and it very rarely is necessary to define
+freeze_noirq().
- * Some types of devices, like bus controllers, will preserve context in D3hot
- (using Vcc power). Their drivers will often want to avoid re-initializing
- them after re-entering D0 (perhaps to avoid resetting downstream devices).
+The difference between freeze_noirq() and freeze() is analogous to the
+difference between suspend_noirq() and suspend().
- * Other kinds of devices in D3hot will discard device context as part of a
- soft reset when re-entering the D0 state.
-
- * Devices resuming from D3cold always go through a power-on reset. Some
- device context can also be preserved using Vaux power.
+3.1.6. poweroff()
- * Some systems hide D3cold resume paths from drivers. For example, on PCs
- the resume path for suspend-to-disk often runs BIOS powerup code, which
- will sometimes re-initialize the device.
+The poweroff() callback is hibernation-specific. It is executed when the system
+is about to be powered off after saving a hibernation image to a persistent
+storage. prepare() callbacks are executed for all devices before poweroff() is
+called.
-To handle resets during D3 to D0 transitions, it may be convenient to share
-device initialization code between probe() and resume(). Device parameters
-can also be saved before the driver suspends into D3, avoiding re-probe.
+The role of this callback is analogous to the role of the suspend() and freeze()
+callbacks described above, although it does not need to save the contents of
+the device's registers. In particular, if the driver wants to put the device
+into a low-power state itself instead of allowing the PCI subsystem to do that,
+the poweroff() callback should use pci_prepare_to_sleep() and
+pci_set_power_state() to prepare the device for system wakeup and to put it
+into a low-power state, respectively, but it need not save the device's standard
+configuration registers.
-If the device supports the PCI PM Spec, it can use this to physically transition
-the device to D0:
+3.1.7. poweroff_noirq()
-pci_set_power_state(dev,0);
+The poweroff_noirq() callback is hibernation-specific. It is executed after
+poweroff() callbacks have been executed for all devices in the system.
-Note that if the entire system is transitioning out of a global sleep state, all
-devices will be placed in the D0 state, so this is not necessary. However, in
-the event that the device is placed in the D3 state during normal operation,
-this call is necessary. It is impossible to determine which of the two events is
-taking place in the driver, so it is always a good idea to make that call.
+The role of this callback is analogous to the role of the suspend_noirq() and
+freeze_noirq() callbacks described above, but it does not need to save the
+contents of the device's registers.
-The driver should take note of the state that it is resuming from in order to
-ensure correct (and speedy) operation.
+The difference between poweroff_noirq() and poweroff() is analogous to the
+difference between suspend_noirq() and suspend().
-The driver should update the current_state field in its pci_dev structure in
-this function, except for PM-capable devices when pci_set_power_state is used.
+3.1.8. resume_noirq()
+The resume_noirq() callback is only executed during system resume, after the
+PM core has enabled the non-boot CPUs. The driver's interrupt handler will not
+be invoked while resume_noirq() is running, so this callback can carry out
+operations that might race with the interrupt handler.
+Since the PCI subsystem unconditionally puts all devices into the full power
+state in the resume_noirq phase of system resume and restores their standard
+configuration registers, resume_noirq() is usually not necessary. In general
+it should only be used for performing operations that would lead to race
+conditions if carried out by resume().
-A reference implementation
--------------------------
-.suspend()
-{
- /* driver specific operations */
+3.1.9. resume()
- /* Disable IRQ */
- free_irq();
- /* If using MSI */
- pci_disable_msi();
+The resume() callback is only executed during system resume, after
+resume_noirq() callbacks have been executed for all devices in the system and
+device interrupts have been enabled by the PM core.
- pci_save_state();
- pci_enable_wake();
- /* Disable IO/bus master/irq router */
- pci_disable_device();
- pci_set_power_state(pci_choose_state());
-}
+This callback is responsible for restoring the pre-suspend configuration of the
+device and bringing it back to the fully functional state. The device should be
+able to process I/O in a usual way after resume() has returned.
-.resume()
-{
- pci_set_power_state(PCI_D0);
- pci_restore_state();
- /* device's irq possibly is changed, driver should take care */
- pci_enable_device();
- pci_set_master();
+3.1.10. thaw_noirq()
- /* if using MSI, device's vector possibly is changed */
- pci_enable_msi();
+The thaw_noirq() callback is hibernation-specific. It is executed after a
+system image has been created and the non-boot CPUs have been enabled by the PM
+core, in the thaw_noirq phase of hibernation. It also may be executed if the
+loading of a hibernation image fails during system restore (it is then executed
+after enabling the non-boot CPUs). The driver's interrupt handler will not be
+invoked while thaw_noirq() is running.
- request_irq();
- /* driver specific operations; */
-}
+The role of this callback is analogous to the role of resume_noirq(). The
+difference between these two callbacks is that thaw_noirq() is executed after
+freeze() and freeze_noirq(), so in general it does not need to modify the
+contents of the device's registers.
-This is a typical implementation. Drivers can slightly change the order
-of the operations in the implementation, ignore some operations or add
-more driver specific operations in it, but drivers should do something like
-this on the whole.
+3.1.11. thaw()
-5. Resources
-~~~~~~~~~~~~
+The thaw() callback is hibernation-specific. It is executed after thaw_noirq()
+callbacks have been executed for all devices in the system and after device
+interrupts have been enabled by the PM core.
-PCI Local Bus Specification
-PCI Bus Power Management Interface Specification
+This callback is responsible for restoring the pre-freeze configuration of
+the device, so that it will work in a usual way after thaw() has returned.
- http://www.pcisig.com
+3.1.12. restore_noirq()
+The restore_noirq() callback is hibernation-specific. It is executed in the
+restore_noirq phase of hibernation, when the boot kernel has passed control to
+the image kernel and the non-boot CPUs have been enabled by the image kernel's
+PM core.
+
+This callback is analogous to resume_noirq() with the exception that it cannot
+make any assumption on the previous state of the device, even if the BIOS (or
+generally the platform firmware) is known to preserve that state over a
+suspend-resume cycle.
+
+For the vast majority of PCI device drivers there is no difference between
+resume_noirq() and restore_noirq().
+
+3.1.13. restore()
+
+The restore() callback is hibernation-specific. It is executed after
+restore_noirq() callbacks have been executed for all devices in the system and
+after the PM core has enabled device drivers' interrupt handlers to be invoked.
+
+This callback is analogous to resume(), just like restore_noirq() is analogous
+to resume_noirq(). Consequently, the difference between restore_noirq() and
+restore() is analogous to the difference between resume_noirq() and resume().
+
+For the vast majority of PCI device drivers there is no difference between
+resume() and restore().
+
+3.1.14. complete()
+
+The complete() callback is executed in the following situations:
+ - during system resume, after resume() callbacks have been executed for all
+ devices,
+ - during hibernation, before saving the system image, after thaw() callbacks
+ have been executed for all devices,
+ - during system restore, when the system is going back to its pre-hibernation
+ state, after restore() callbacks have been executed for all devices.
+It also may be executed if the loading of a hibernation image into memory fails
+(in that case it is run after thaw() callbacks have been executed for all
+devices that have drivers in the boot kernel).
+
+This callback is entirely optional, although it may be necessary if the
+prepare() callback performs operations that need to be reversed.
+
+3.1.15. runtime_suspend()
+
+The runtime_suspend() callback is specific to device runtime power management
+(runtime PM). It is executed by the PM core's runtime PM framework when the
+device is about to be suspended (i.e. quiesced and put into a low-power state)
+at run time.
+
+This callback is responsible for freezing the device and preparing it to be
+put into a low-power state, but it must allow the PCI subsystem to perform all
+of the PCI-specific actions necessary for suspending the device.
+
+3.1.16. runtime_resume()
+
+The runtime_resume() callback is specific to device runtime PM. It is executed
+by the PM core's runtime PM framework when the device is about to be resumed
+(i.e. put into the full-power state and programmed to process I/O normally) at
+run time.
+
+This callback is responsible for restoring the normal functionality of the
+device after it has been put into the full-power state by the PCI subsystem.
+The device is expected to be able to process I/O in the usual way after
+runtime_resume() has returned.
+
+3.1.17. runtime_idle()
+
+The runtime_idle() callback is specific to device runtime PM. It is executed
+by the PM core's runtime PM framework whenever it may be desirable to suspend
+the device according to the PM core's information. In particular, it is
+automatically executed right after runtime_resume() has returned in case the
+resume of the device has happened as a result of a spurious event.
+
+This callback is optional, but if it is not implemented or if it returns 0, the
+PCI subsystem will call pm_runtime_suspend() for the device, which in turn will
+cause the driver's runtime_suspend() callback to be executed.
+
+3.1.18. Pointing Multiple Callback Pointers to One Routine
+
+Although in principle each of the callbacks described in the previous
+subsections can be defined as a separate function, it often is convenient to
+point two or more members of struct dev_pm_ops to the same routine. There are
+a few convenience macros that can be used for this purpose.
+
+The SIMPLE_DEV_PM_OPS macro declares a struct dev_pm_ops object with one
+suspend routine pointed to by the .suspend(), .freeze(), and .poweroff()
+members and one resume routine pointed to by the .resume(), .thaw(), and
+.restore() members. The other function pointers in this struct dev_pm_ops are
+unset.
+
+The UNIVERSAL_DEV_PM_OPS macro is similar to SIMPLE_DEV_PM_OPS, but it
+additionally sets the .runtime_resume() pointer to the same value as
+.resume() (and .thaw(), and .restore()) and the .runtime_suspend() pointer to
+the same value as .suspend() (and .freeze() and .poweroff()).
+
+The SET_SYSTEM_SLEEP_PM_OPS can be used inside of a declaration of struct
+dev_pm_ops to indicate that one suspend routine is to be pointed to by the
+.suspend(), .freeze(), and .poweroff() members and one resume routine is to
+be pointed to by the .resume(), .thaw(), and .restore() members.
+
+3.2. Device Runtime Power Management
+------------------------------------
+In addition to providing device power management callbacks PCI device drivers
+are responsible for controlling the runtime power management (runtime PM) of
+their devices.
+
+The PCI device runtime PM is optional, but it is recommended that PCI device
+drivers implement it at least in the cases where there is a reliable way of
+verifying that the device is not used (like when the network cable is detached
+from an Ethernet adapter or there are no devices attached to a USB controller).
+
+To support the PCI runtime PM the driver first needs to implement the
+runtime_suspend() and runtime_resume() callbacks. It also may need to implement
+the runtime_idle() callback to prevent the device from being suspended again
+every time right after the runtime_resume() callback has returned
+(alternatively, the runtime_suspend() callback will have to check if the
+device should really be suspended and return -EAGAIN if that is not the case).
+
+The runtime PM of PCI devices is disabled by default. It is also blocked by
+pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI
+driver implements the runtime PM callbacks and intends to use the runtime PM
+framework provided by the PM core and the PCI subsystem, it should enable this
+feature by executing the pm_runtime_enable() helper function. However, the
+driver should not call the pm_runtime_allow() helper function unblocking
+the runtime PM of the device. Instead, it should allow user space or some
+platform-specific code to do that (user space can do it via sysfs), although
+once it has called pm_runtime_enable(), it must be prepared to handle the
+runtime PM of the device correctly as soon as pm_runtime_allow() is called
+(which may happen at any time). [It also is possible that user space causes
+pm_runtime_allow() to be called via sysfs before the driver is loaded, so in
+fact the driver has to be prepared to handle the runtime PM of the device as
+soon as it calls pm_runtime_enable().]
+
+The runtime PM framework works by processing requests to suspend or resume
+devices, or to check if they are idle (in which cases it is reasonable to
+subsequently request that they be suspended). These requests are represented
+by work items put into the power management workqueue, pm_wq. Although there
+are a few situations in which power management requests are automatically
+queued by the PM core (for example, after processing a request to resume a
+device the PM core automatically queues a request to check if the device is
+idle), device drivers are generally responsible for queuing power management
+requests for their devices. For this purpose they should use the runtime PM
+helper functions provided by the PM core, discussed in
+Documentation/power/runtime_pm.txt.
+
+Devices can also be suspended and resumed synchronously, without placing a
+request into pm_wq. In the majority of cases this also is done by their
+drivers that use helper functions provided by the PM core for this purpose.
+
+For more information on the runtime PM of devices refer to
+Documentation/power/runtime_pm.txt.
+
+
+4. Resources
+============
+
+PCI Local Bus Specification, Rev. 3.0
+PCI Bus Power Management Interface Specification, Rev. 1.2
+Advanced Configuration and Power Interface (ACPI) Specification, Rev. 3.0b
+PCI Express Base Specification, Rev. 2.0
+Documentation/power/devices.txt
+Documentation/power/runtime_pm.txt
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt
index c40866e8b957..bfed898a03fc 100644
--- a/Documentation/power/pm_qos_interface.txt
+++ b/Documentation/power/pm_qos_interface.txt
@@ -18,44 +18,46 @@ and pm_qos_params.h. This is done because having the available parameters
being runtime configurable or changeable from a driver was seen as too easy to
abuse.
-For each parameter a list of performance requirements is maintained along with
+For each parameter a list of performance requests is maintained along with
an aggregated target value. The aggregated target value is updated with
-changes to the requirement list or elements of the list. Typically the
-aggregated target value is simply the max or min of the requirement values held
+changes to the request list or elements of the list. Typically the
+aggregated target value is simply the max or min of the request values held
in the parameter list elements.
From kernel mode the use of this interface is simple:
-pm_qos_add_requirement(param_id, name, target_value):
-Will insert a named element in the list for that identified PM_QOS parameter
-with the target value. Upon change to this list the new target is recomputed
-and any registered notifiers are called only if the target value is now
-different.
-pm_qos_update_requirement(param_id, name, new_target_value):
-Will search the list identified by the param_id for the named list element and
-then update its target value, calling the notification tree if the aggregated
-target is changed. with that name is already registered.
+handle = pm_qos_add_request(param_class, target_value):
+Will insert an element into the list for that identified PM_QOS class with the
+target value. Upon change to this list the new target is recomputed and any
+registered notifiers are called only if the target value is now different.
+Clients of pm_qos need to save the returned handle.
-pm_qos_remove_requirement(param_id, name):
-Will search the identified list for the named element and remove it, after
-removal it will update the aggregate target and call the notification tree if
-the target was changed as a result of removing the named requirement.
+void pm_qos_update_request(handle, new_target_value):
+Will update the list element pointed to by the handle with the new target value
+and recompute the new aggregated target, calling the notification tree if the
+target is changed.
+
+void pm_qos_remove_request(handle):
+Will remove the element. After removal it will update the aggregate target and
+call the notification tree if the target was changed as a result of removing
+the request.
From user mode:
-Only processes can register a pm_qos requirement. To provide for automatic
-cleanup for process the interface requires the process to register its
-parameter requirements in the following way:
+Only processes can register a pm_qos request. To provide for automatic
+cleanup of a process, the interface requires the process to register its
+parameter requests in the following way:
To register the default pm_qos target for the specific parameter, the process
must open one of /dev/[cpu_dma_latency, network_latency, network_throughput]
As long as the device node is held open that process has a registered
-requirement on the parameter. The name of the requirement is "process_<PID>"
-derived from the current->pid from within the open system call.
+request on the parameter.
-To change the requested target value the process needs to write a s32 value to
-the open device node. This translates to a pm_qos_update_requirement call.
+To change the requested target value the process needs to write an s32 value to
+the open device node. Alternatively the user mode program could write a hex
+string for the value using 10 char long format e.g. "0x12345678". This
+translates to a pm_qos_update_request call.
To remove the user mode request for a target value simply close the device
node.
diff --git a/Documentation/power/regulator/consumer.txt b/Documentation/power/regulator/consumer.txt
index cdebb5145c25..55c4175d8099 100644
--- a/Documentation/power/regulator/consumer.txt
+++ b/Documentation/power/regulator/consumer.txt
@@ -8,11 +8,11 @@ Please see overview.txt for a description of the terms used in this text.
1. Consumer Regulator Access (static & dynamic drivers)
=======================================================
-A consumer driver can get access to it's supply regulator by calling :-
+A consumer driver can get access to its supply regulator by calling :-
regulator = regulator_get(dev, "Vcc");
-The consumer passes in it's struct device pointer and power supply ID. The core
+The consumer passes in its struct device pointer and power supply ID. The core
then finds the correct regulator by consulting a machine specific lookup table.
If the lookup is successful then this call will return a pointer to the struct
regulator that supplies this consumer.
@@ -34,7 +34,7 @@ usually be called in your device drivers probe() and remove() respectively.
2. Regulator Output Enable & Disable (static & dynamic drivers)
====================================================================
-A consumer can enable it's power supply by calling:-
+A consumer can enable its power supply by calling:-
int regulator_enable(regulator);
@@ -49,7 +49,7 @@ int regulator_is_enabled(regulator);
This will return > zero when the regulator is enabled.
-A consumer can disable it's supply when no longer needed by calling :-
+A consumer can disable its supply when no longer needed by calling :-
int regulator_disable(regulator);
@@ -140,7 +140,7 @@ by calling :-
int regulator_set_optimum_mode(struct regulator *regulator, int load_uA);
This will cause the core to recalculate the total load on the regulator (based
-on all it's consumers) and change operating mode (if necessary and permitted)
+on all its consumers) and change operating mode (if necessary and permitted)
to best match the current operating load.
The load_uA value can be determined from the consumers datasheet. e.g.most
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index 63728fed620b..bdec39b9bd75 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -52,7 +52,7 @@ static struct regulator_init_data regulator1_data = {
};
Regulator-1 supplies power to Regulator-2. This relationship must be registered
-with the core so that Regulator-1 is also enabled when Consumer A enables it's
+with the core so that Regulator-1 is also enabled when Consumer A enables its
supply (Regulator-2). The supply regulator is set by the supply_regulator_dev
field below:-
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt
index ffd185bb6054..9363e056188a 100644
--- a/Documentation/power/regulator/overview.txt
+++ b/Documentation/power/regulator/overview.txt
@@ -35,16 +35,16 @@ Some terms used in this document:-
o Consumer - Electronic device that is supplied power by a regulator.
Consumers can be classified into two types:-
- Static: consumer does not change it's supply voltage or
+ Static: consumer does not change its supply voltage or
current limit. It only needs to enable or disable it's
- power supply. It's supply voltage is set by the hardware,
+ power supply. Its supply voltage is set by the hardware,
bootloader, firmware or kernel board initialisation code.
Dynamic: consumer needs to change it's supply voltage or
current limit to meet operation demands.
- o Power Domain - Electronic circuit that is supplied it's input power by the
+ o Power Domain - Electronic circuit that is supplied its input power by the
output power of a regulator, switch or by another power
domain.
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index b967cd9137d6..81680f9f5909 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -24,6 +24,10 @@ assumed to be in the resume mode. The device cannot be open for simultaneous
reading and writing. It is also impossible to have the device open more than
once at a time.
+Even opening the device has side effects. Data structures are
+allocated, and PM_HIBERNATION_PREPARE / PM_RESTORE_PREPARE chains are
+called.
+
The ioctl() commands recognized by the device are:
SNAPSHOT_FREEZE - freeze user space processes (the current process is
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 79f533f38c61..46d22105aa07 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1289,7 +1289,7 @@ link between a device node and its interrupt parent in
the interrupt tree. The value of interrupt-parent is the
phandle of the parent node.
-If the interrupt-parent property is not defined for a node, it's
+If the interrupt-parent property is not defined for a node, its
interrupt parent is assumed to be an ancestor in the node's
_device tree_ hierarchy.
diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/powerpc/dts-bindings/4xx/reboot.txt
new file mode 100644
index 000000000000..d7217260589c
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/reboot.txt
@@ -0,0 +1,18 @@
+Reboot property to control system reboot on PPC4xx systems:
+
+By setting "reset_type" to one of the following values, the default
+software reset mechanism may be overidden. Here the possible values of
+"reset_type":
+
+ 1 - PPC4xx core reset
+ 2 - PPC4xx chip reset
+ 3 - PPC4xx system reset (default)
+
+Example:
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,440SPe";
+ ...
+ reset-type = <2>; /* Use chip-reset */
+ };
diff --git a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt b/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
index d015dcec4011..b0019eb5330e 100644
--- a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
@@ -11,7 +11,7 @@ Required properties:
83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx.
- #gpio-cells : Should be two. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused).
- - interrupts : Interrupt mapping for GPIO IRQ (currently unused).
+ - interrupts : Interrupt mapping for GPIO IRQ.
- interrupt-parent : Phandle for the interrupt controller that
services interrupts for this device.
- gpio-controller : Marks the port as GPIO controller.
@@ -38,3 +38,23 @@ Example of gpio-controller nodes for a MPC8347 SoC:
See booting-without-of.txt for details of how to specify GPIO
information for devices.
+
+To use GPIO pins as interrupt sources for peripherals, specify the
+GPIO controller as the interrupt parent and define GPIO number +
+trigger mode using the interrupts property, which is defined like
+this:
+
+interrupts = <number trigger>, where:
+ - number: GPIO pin (0..31)
+ - trigger: trigger mode:
+ 2 = trigger on falling edge
+ 3 = trigger on both edges
+
+Example of device using this is:
+
+ funkyfpga@0 {
+ compatible = "funky-fpga";
+ ...
+ interrupts = <4 3>;
+ interrupt-parent = <&gpio1>;
+ };
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/powerpc/dts-bindings/xilinx.txt
index ea68046bb9cb..299d0923537b 100644
--- a/Documentation/powerpc/dts-bindings/xilinx.txt
+++ b/Documentation/powerpc/dts-bindings/xilinx.txt
@@ -11,7 +11,7 @@
control how the core is synthesized. Historically, the EDK tool would
extract the device parameters relevant to device drivers and copy them
into an 'xparameters.h' in the form of #define symbols. This tells the
- device drivers how the IP cores are configured, but it requres the kernel
+ device drivers how the IP cores are configured, but it requires the kernel
to be recompiled every time the FPGA bitstream is resynthesized.
The new approach is to export the parameters into the device tree and
diff --git a/Documentation/powerpc/phyp-assisted-dump.txt b/Documentation/powerpc/phyp-assisted-dump.txt
index c4682b982a2e..ad340205d96a 100644
--- a/Documentation/powerpc/phyp-assisted-dump.txt
+++ b/Documentation/powerpc/phyp-assisted-dump.txt
@@ -19,7 +19,7 @@ dump offers several strong, practical advantages:
immediately available to the system for normal use.
-- After the dump is completed, no further reboots are
required; the system will be fully usable, and running
- in it's normal, production mode on it normal kernel.
+ in its normal, production mode on its normal kernel.
The above can only be accomplished by coordination with,
and assistance from the hypervisor. The procedure is
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index aae8355d3166..221f38be98f4 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -190,3 +190,61 @@ Example:
for (node = rb_first(&mytree); node; node = rb_next(node))
printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring);
+Support for Augmented rbtrees
+-----------------------------
+
+Augmented rbtree is an rbtree with "some" additional data stored in each node.
+This data can be used to augment some new functionality to rbtree.
+Augmented rbtree is an optional feature built on top of basic rbtree
+infrastructure. rbtree user who wants this feature will have an augment
+callback function in rb_root initialized.
+
+This callback function will be called from rbtree core routines whenever
+a node has a change in one or both of its children. It is the responsibility
+of the callback function to recalculate the additional data that is in the
+rb node using new children information. Note that if this new additional
+data affects the parent node's additional data, then callback function has
+to handle it and do the recursive updates.
+
+
+Interval tree is an example of augmented rb tree. Reference -
+"Introduction to Algorithms" by Cormen, Leiserson, Rivest and Stein.
+More details about interval trees:
+
+Classical rbtree has a single key and it cannot be directly used to store
+interval ranges like [lo:hi] and do a quick lookup for any overlap with a new
+lo:hi or to find whether there is an exact match for a new lo:hi.
+
+However, rbtree can be augmented to store such interval ranges in a structured
+way making it possible to do efficient lookup and exact match.
+
+This "extra information" stored in each node is the maximum hi
+(max_hi) value among all the nodes that are its descendents. This
+information can be maintained at each node just be looking at the node
+and its immediate children. And this will be used in O(log n) lookup
+for lowest match (lowest start address among all possible matches)
+with something like:
+
+find_lowest_match(lo, hi, node)
+{
+ lowest_match = NULL;
+ while (node) {
+ if (max_hi(node->left) > lo) {
+ // Lowest overlap if any must be on left side
+ node = node->left;
+ } else if (overlap(lo, hi, node)) {
+ lowest_match = node;
+ break;
+ } else if (lo > node->lo) {
+ // Lowest overlap if any must be on right side
+ node = node->right;
+ } else {
+ break;
+ }
+ }
+ return lowest_match;
+}
+
+Finding exact match will be to first find lowest match and then to follow
+successor nodes looking for exact match, until the start of a node is beyond
+the hi value we are looking for.
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index b4860509c319..83668e5dd17f 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -99,37 +99,15 @@ system. Also, it is possible to switch all rfkill drivers (or all drivers of
a specified type) into a state which also updates the default state for
hotplugged devices.
-After an application opens /dev/rfkill, it can read the current state of
-all devices, and afterwards can poll the descriptor for hotplug or state
-change events.
-
-Applications must ignore operations (the "op" field) they do not handle,
-this allows the API to be extended in the future.
-
-Additionally, each rfkill device is registered in sysfs and there has the
-following attributes:
-
- name: Name assigned by driver to this key (interface or driver name).
- type: Driver type string ("wlan", "bluetooth", etc).
- persistent: Whether the soft blocked state is initialised from
- non-volatile storage at startup.
- state: Current state of the transmitter
- 0: RFKILL_STATE_SOFT_BLOCKED
- transmitter is turned off by software
- 1: RFKILL_STATE_UNBLOCKED
- transmitter is (potentially) active
- 2: RFKILL_STATE_HARD_BLOCKED
- transmitter is forced off by something outside of
- the driver's control.
- This file is deprecated because it can only properly show
- three of the four possible states, soft-and-hard-blocked is
- missing.
- claim: 0: Kernel handles events
- This file is deprecated because there no longer is a way to
- claim just control over a single rfkill instance.
-
-rfkill devices also issue uevents (with an action of "change"), with the
-following environment variables set:
+After an application opens /dev/rfkill, it can read the current state of all
+devices. Changes can be either obtained by either polling the descriptor for
+hotplug or state change events or by listening for uevents emitted by the
+rfkill core framework.
+
+Additionally, each rfkill device is registered in sysfs and emits uevents.
+
+rfkill devices issue uevents (with an action of "change"), with the following
+environment variables set:
RFKILL_NAME
RFKILL_STATE
@@ -137,3 +115,7 @@ RFKILL_TYPE
The contents of these variables corresponds to the "name", "state" and
"type" sysfs files explained above.
+
+
+For further details consult Documentation/ABI/stable/dev-rfkill and
+Documentation/ABI/stable/sysfs-class-rfkill.
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt
index 4b736d24da7a..8df0b782c4d7 100644
--- a/Documentation/rt-mutex-design.txt
+++ b/Documentation/rt-mutex-design.txt
@@ -657,7 +657,7 @@ here.
The waiter structure has a "task" field that points to the task that is blocked
on the mutex. This field can be NULL the first time it goes through the loop
-or if the task is a pending owner and had it's mutex stolen. If the "task"
+or if the task is a pending owner and had its mutex stolen. If the "task"
field is NULL then we need to set up the accounting for it.
Task blocks on mutex
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 6f33593e59e2..8239ebbcddce 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -211,7 +211,7 @@ provide fair CPU time to each such task group. For example, it may be
desirable to first provide fair CPU time to each user on the system and then to
each task belonging to a user.
-CONFIG_GROUP_SCHED strives to achieve exactly that. It lets tasks to be
+CONFIG_CGROUP_SCHED strives to achieve exactly that. It lets tasks to be
grouped and divides CPU time fairly among such groups.
CONFIG_RT_GROUP_SCHED permits to group real-time (i.e., SCHED_FIFO and
@@ -220,38 +220,11 @@ SCHED_RR) tasks.
CONFIG_FAIR_GROUP_SCHED permits to group CFS (i.e., SCHED_NORMAL and
SCHED_BATCH) tasks.
-At present, there are two (mutually exclusive) mechanisms to group tasks for
-CPU bandwidth control purposes:
-
- - Based on user id (CONFIG_USER_SCHED)
-
- With this option, tasks are grouped according to their user id.
-
- - Based on "cgroup" pseudo filesystem (CONFIG_CGROUP_SCHED)
-
- This options needs CONFIG_CGROUPS to be defined, and lets the administrator
+ These options need CONFIG_CGROUPS to be defined, and let the administrator
create arbitrary groups of tasks, using the "cgroup" pseudo filesystem. See
Documentation/cgroups/cgroups.txt for more information about this filesystem.
-Only one of these options to group tasks can be chosen and not both.
-
-When CONFIG_USER_SCHED is defined, a directory is created in sysfs for each new
-user and a "cpu_share" file is added in that directory.
-
- # cd /sys/kernel/uids
- # cat 512/cpu_share # Display user 512's CPU share
- 1024
- # echo 2048 > 512/cpu_share # Modify user 512's CPU share
- # cat 512/cpu_share # Display user 512's CPU share
- 2048
- #
-
-CPU bandwidth between two users is divided in the ratio of their CPU shares.
-For example: if you would like user "root" to get twice the bandwidth of user
-"guest," then set the cpu_share for both the users such that "root"'s cpu_share
-is twice "guest"'s cpu_share.
-
-When CONFIG_CGROUP_SCHED is defined, a "cpu.shares" file is created for each
+When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
group created using the pseudo filesystem. See example steps below to create
task groups and modify their CPU share using the "cgroups" pseudo filesystem.
@@ -273,24 +246,3 @@ task groups and modify their CPU share using the "cgroups" pseudo filesystem.
# #Launch gmplayer (or your favourite movie player)
# echo <movie_player_pid> > multimedia/tasks
-
-8. Implementation note: user namespaces
-
-User namespaces are intended to be hierarchical. But they are currently
-only partially implemented. Each of those has ramifications for CFS.
-
-First, since user namespaces are hierarchical, the /sys/kernel/uids
-presentation is inadequate. Eventually we will likely want to use sysfs
-tagging to provide private views of /sys/kernel/uids within each user
-namespace.
-
-Second, the hierarchical nature is intended to support completely
-unprivileged use of user namespaces. So if using user groups, then
-we want the users in a user namespace to be children of the user
-who created it.
-
-That is currently unimplemented. So instead, every user in a new
-user namespace will receive 1024 shares just like any user in the
-initial user namespace. Note that at the moment creation of a new
-user namespace requires each of CAP_SYS_ADMIN, CAP_SETUID, and
-CAP_SETGID.
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 86eabe6c3419..605b0d40329d 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -126,23 +126,12 @@ priority!
2.3 Basis for grouping tasks
----------------------------
-There are two compile-time settings for allocating CPU bandwidth. These are
-configured using the "Basis for grouping tasks" multiple choice menu under
-General setup > Group CPU Scheduler:
-
-a. CONFIG_USER_SCHED (aka "Basis for grouping tasks" = "user id")
-
-This lets you use the virtual files under
-"/sys/kernel/uids/<uid>/cpu_rt_runtime_us" to control he CPU time reserved for
-each user .
-
-The other option is:
-
-.o CONFIG_CGROUP_SCHED (aka "Basis for grouping tasks" = "Control groups")
+Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
+CPU bandwidth to task groups.
This uses the /cgroup virtual file system and
"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
-control group instead.
+control group.
For more information on working with control groups, you should read
Documentation/cgroups/cgroups.txt as well.
@@ -161,8 +150,7 @@ For now, this can be simplified to just the following (but see Future plans):
===============
There is work in progress to make the scheduling period for each group
-("/sys/kernel/uids/<uid>/cpu_rt_period_us" or
-"/cgroup/<cgroup>/cpu.rt_period_us" respectively) configurable as well.
+("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well.
The constraint on the period is that a subgroup must have a smaller or
equal period to its parent. But realistically its not very useful _yet_
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
index 2ffc1148eb95..e759e92e286d 100644
--- a/Documentation/scsi/ChangeLog.lpfc
+++ b/Documentation/scsi/ChangeLog.lpfc
@@ -707,7 +707,7 @@ Changes from 20040920 to 20041018
* Integrate patches from Christoph Hellwig: two new helpers common
to lpfc_sli_resume_iocb and lpfc_sli_issue_iocb - singificant
cleanup of those two functions - the unused SLI_IOCB_USE_TXQ is
- gone - lpfc_sli_issue_iocb_wait loses it's flags argument
+ gone - lpfc_sli_issue_iocb_wait loses its flags argument
totally.
* Fix in lpfc_sli.c: we can not store a 5 bit value in a 4-bit
field.
@@ -1028,7 +1028,7 @@ Changes from 20040614 to 20040709
* Remove the need for buf_tmo.
* Changed ULP_BDE64 to struct ulp_bde64.
* Changed ULP_BDE to struct ulp_bde.
- * Cleanup lpfc_os_return_scsi_cmd() and it's call path.
+ * Cleanup lpfc_os_return_scsi_cmd() and its call path.
* Removed lpfc_no_device_delay.
* Consolidating lpfc_hba_put_event() into lpfc_put_event().
* Removed following attributes and their functionality:
diff --git a/Documentation/scsi/FlashPoint.txt b/Documentation/scsi/FlashPoint.txt
index d5acaa300a46..1540a92f6d2b 100644
--- a/Documentation/scsi/FlashPoint.txt
+++ b/Documentation/scsi/FlashPoint.txt
@@ -71,7 +71,7 @@ peters@mylex.com
Ever since its introduction last October, the BusLogic FlashPoint LT has
been problematic for members of the Linux community, in that no Linux
-drivers have been available for this new Ultra SCSI product. Despite it's
+drivers have been available for this new Ultra SCSI product. Despite its
officially being positioned as a desktop workstation product, and not being
particularly well suited for a high performance multitasking operating
system like Linux, the FlashPoint LT has been touted by computer system
diff --git a/Documentation/scsi/dtc3x80.txt b/Documentation/scsi/dtc3x80.txt
index e8ae6230ab3e..1d7af9f9a8ed 100644
--- a/Documentation/scsi/dtc3x80.txt
+++ b/Documentation/scsi/dtc3x80.txt
@@ -12,7 +12,7 @@ The 3180 does not. Otherwise, they are identical.
The DTC3x80 does not support DMA but it does have Pseudo-DMA which is
supported by the driver.
-It's DTC406 scsi chip is supposedly compatible with the NCR 53C400.
+Its DTC406 scsi chip is supposedly compatible with the NCR 53C400.
It is memory mapped, uses an IRQ, but no dma or io-port. There is
internal DMA, between SCSI bus and an on-chip 128-byte buffer. Double
buffering is done automagically by the chip. Data is transferred
diff --git a/Documentation/scsi/ncr53c8xx.txt b/Documentation/scsi/ncr53c8xx.txt
index 08e2b4d04aab..cda5f8fa2c66 100644
--- a/Documentation/scsi/ncr53c8xx.txt
+++ b/Documentation/scsi/ncr53c8xx.txt
@@ -1479,7 +1479,7 @@ Wide16 SCSI.
Enabling serial NVRAM support enables detection of the serial NVRAM included
on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
-host adaptor and it's attached drives.
+host adaptor and its attached drives.
The Symbios NVRAM also holds data on the boot order of host adaptors in a
system with more than one host adaptor. This enables the order of scanning
diff --git a/Documentation/scsi/osst.txt b/Documentation/scsi/osst.txt
index f536907e241d..2b21890bc983 100644
--- a/Documentation/scsi/osst.txt
+++ b/Documentation/scsi/osst.txt
@@ -40,7 +40,7 @@ behavior looks very much the same as st to the userspace applications.
History
-------
-In the first place, osst shared it's identity very much with st. That meant
+In the first place, osst shared its identity very much with st. That meant
that it used the same kernel structures and the same device node as st.
So you could only have either of them being present in the kernel. This has
been fixed by registering an own device, now.
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
index aec6549ab097..e00192de4d1c 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -70,7 +70,7 @@ Overview:
up to an administrative entity controlling the vport. For example,
if vports are to be associated with virtual machines, a XEN mgmt
utility would be responsible for creating wwpn/wwnn's for the vport,
- using it's own naming authority and OUI. (Note: it already does this
+ using its own naming authority and OUI. (Note: it already does this
for virtual MAC addresses).
@@ -81,7 +81,7 @@ Device Trees and Vport Objects:
with rports and scsi target objects underneath it. Currently the FC
transport creates the vport object and places it under the scsi_host
object corresponding to the physical adapter. The LLDD will allocate
- a new scsi_host for the vport and link it's object under the vport.
+ a new scsi_host for the vport and link its object under the vport.
The remainder of the tree under the vports scsi_host is the same
as the non-NPIV case. The transport is written currently to easily
allow the parent of the vport to be something other than the scsi_host.
diff --git a/Documentation/scsi/sym53c8xx_2.txt b/Documentation/scsi/sym53c8xx_2.txt
index eb9a7b905b64..6f63b7989679 100644
--- a/Documentation/scsi/sym53c8xx_2.txt
+++ b/Documentation/scsi/sym53c8xx_2.txt
@@ -687,7 +687,7 @@ maintain the driver code.
Enabling serial NVRAM support enables detection of the serial NVRAM included
on Symbios and some Symbios compatible host adaptors, and Tekram boards. The
serial NVRAM is used by Symbios and Tekram to hold set up parameters for the
-host adaptor and it's attached drives.
+host adaptor and its attached drives.
The Symbios NVRAM also holds data on the boot order of host adaptors in a
system with more than one host adaptor. This information is no longer used
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index bfcbbf88c44d..2075bbb8b3e2 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -227,6 +227,16 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
The power-management is supported.
+ Module snd-asihpi
+ -----------------
+
+ Module for AudioScience ASI soundcards
+
+ enable_hpi_hwdep - enable HPI hwdep for AudioScience soundcard
+
+ This module supports multiple cards.
+ The driver requires the firmware loader support on kernel.
+
Module snd-atiixp
-----------------
@@ -622,28 +632,23 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
The power-management is supported.
- Module snd-es968
- ----------------
-
- Module for sound cards based on ESS ES968 chip (PnP only).
-
- This module supports multiple cards, PnP and autoprobe.
-
- The power-management is supported.
-
Module snd-es1688
-----------------
Module for ESS AudioDrive ES-1688 and ES-688 sound cards.
- port - port # for ES-1688 chip (0x220,0x240,0x260)
- fm_port - port # for OPL3 (option; share the same port as default)
+ isapnp - ISA PnP detection - 0 = disable, 1 = enable (default)
mpu_port - port # for MPU-401 port (0x300,0x310,0x320,0x330), -1 = disable (default)
- irq - IRQ # for ES-1688 chip (5,7,9,10)
mpu_irq - IRQ # for MPU-401 port (5,7,9,10)
+ fm_port - port # for OPL3 (option; share the same port as default)
+
+ with isapnp=0, the following additional options are available:
+ port - port # for ES-1688 chip (0x220,0x240,0x260)
+ irq - IRQ # for ES-1688 chip (5,7,9,10)
dma8 - DMA # for ES-1688 chip (0,1,3)
- This module supports multiple cards and autoprobe (without MPU-401 port).
+ This module supports multiple cards and autoprobe (without MPU-401 port)
+ and PnP with the ES968 chip.
Module snd-es18xx
-----------------
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 98d14cb8a85d..bdafdbd32561 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -204,7 +204,6 @@ generic parser regardless of the codec. Usually the codec-specific
parser is much better than the generic parser (as now). Thus this
option is more about the debugging purpose.
-
Speaker and Headphone Output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
One of the most frequent (and obvious) bugs with HD-audio is the
@@ -600,6 +599,9 @@ probing, the proc file is available, so you can get the raw codec
information before modified by the driver. Of course, the driver
isn't usable with `probe_only=1`. But you can continue the
configuration via hwdep sysfs file if hda-reconfig option is enabled.
+Using `probe_only` mask 2 skips the reset of HDA codecs (use
+`probe_only=3` as module option). The hwdep interface can be used
+to determine the BIOS codec initialization.
hda-verb
diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt
index 9ac842be9b4f..05bf5a0eee41 100644
--- a/Documentation/sound/alsa/soc/dapm.txt
+++ b/Documentation/sound/alsa/soc/dapm.txt
@@ -188,8 +188,8 @@ The WM8731 output mixer has 3 inputs (sources)
3. Mic Sidetone Input
Each input in this example has a kcontrol associated with it (defined in example
-above) and is connected to the output mixer via it's kcontrol name. We can now
-connect the destination widget (wrt audio signal) with it's source widgets.
+above) and is connected to the output mixer via its kcontrol name. We can now
+connect the destination widget (wrt audio signal) with its source widgets.
/* output mixer */
{"Output Mixer", "Line Bypass Switch", "Line Input"},
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index bab7711ce963..2524c75557df 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -67,7 +67,7 @@ static struct snd_soc_dai_link corgi_dai = {
.ops = &corgi_ops,
};
-struct snd_soc_card then sets up the machine with it's DAIs. e.g.
+struct snd_soc_card then sets up the machine with its DAIs. e.g.
/* corgi audio machine driver */
static struct snd_soc_card snd_soc_corgi = {
diff --git a/Documentation/sound/alsa/soc/overview.txt b/Documentation/sound/alsa/soc/overview.txt
index 1e4c6d3655f2..138ac88c1461 100644
--- a/Documentation/sound/alsa/soc/overview.txt
+++ b/Documentation/sound/alsa/soc/overview.txt
@@ -33,7 +33,7 @@ features :-
and machines.
* Easy I2S/PCM audio interface setup between codec and SoC. Each SoC
- interface and codec registers it's audio interface capabilities with the
+ interface and codec registers its audio interface capabilities with the
core and are subsequently matched and configured when the application
hardware parameters are known.
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index 34c76a55bc04..9b659c79a547 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -54,12 +54,12 @@ Getting sparse
~~~~~~~~~~~~~~
You can get latest released versions from the Sparse homepage at
-http://www.kernel.org/pub/linux/kernel/people/josh/sparse/
+https://sparse.wiki.kernel.org/index.php/Main_Page
Alternatively, you can get snapshots of the latest development version
of sparse using git to clone..
- git://git.kernel.org/pub/scm/linux/kernel/git/josh/sparse.git
+ git://git.kernel.org/pub/scm/devel/sparse/sparse.git
DaveJ has hourly generated tarballs of the git tree available at..
diff --git a/Documentation/spi/ep93xx_spi b/Documentation/spi/ep93xx_spi
new file mode 100644
index 000000000000..6325f5b48635
--- /dev/null
+++ b/Documentation/spi/ep93xx_spi
@@ -0,0 +1,95 @@
+Cirrus EP93xx SPI controller driver HOWTO
+=========================================
+
+ep93xx_spi driver brings SPI master support for EP93xx SPI controller. Chip
+selects are implemented with GPIO lines.
+
+NOTE: If possible, don't use SFRMOUT (SFRM1) signal as a chip select. It will
+not work correctly (it cannot be controlled by software). Use GPIO lines
+instead.
+
+Sample configuration
+====================
+
+Typically driver configuration is done in platform board files (the files under
+arch/arm/mach-ep93xx/*.c). In this example we configure MMC over SPI through
+this driver on TS-7260 board. You can adapt the code to suit your needs.
+
+This example uses EGPIO9 as SD/MMC card chip select (this is wired in DIO1
+header on the board).
+
+You need to select CONFIG_MMC_SPI to use mmc_spi driver.
+
+arch/arm/mach-ep93xx/ts72xx.c:
+
+...
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+
+#include <mach/ep93xx_spi.h>
+
+/* this is our GPIO line used for chip select */
+#define MMC_CHIP_SELECT_GPIO EP93XX_GPIO_LINE_EGPIO9
+
+static int ts72xx_mmc_spi_setup(struct spi_device *spi)
+{
+ int err;
+
+ err = gpio_request(MMC_CHIP_SELECT_GPIO, spi->modalias);
+ if (err)
+ return err;
+
+ gpio_direction_output(MMC_CHIP_SELECT_GPIO, 1);
+
+ return 0;
+}
+
+static void ts72xx_mmc_spi_cleanup(struct spi_device *spi)
+{
+ gpio_set_value(MMC_CHIP_SELECT_GPIO, 1);
+ gpio_direction_input(MMC_CHIP_SELECT_GPIO);
+ gpio_free(MMC_CHIP_SELECT_GPIO);
+}
+
+static void ts72xx_mmc_spi_cs_control(struct spi_device *spi, int value)
+{
+ gpio_set_value(MMC_CHIP_SELECT_GPIO, value);
+}
+
+static struct ep93xx_spi_chip_ops ts72xx_mmc_spi_ops = {
+ .setup = ts72xx_mmc_spi_setup,
+ .cleanup = ts72xx_mmc_spi_cleanup,
+ .cs_control = ts72xx_mmc_spi_cs_control,
+};
+
+static struct spi_board_info ts72xx_spi_devices[] __initdata = {
+ {
+ .modalias = "mmc_spi",
+ .controller_data = &ts72xx_mmc_spi_ops,
+ /*
+ * We use 10 MHz even though the maximum is 7.4 MHz. The driver
+ * will limit it automatically to max. frequency.
+ */
+ .max_speed_hz = 10 * 1000 * 1000,
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ },
+};
+
+static struct ep93xx_spi_info ts72xx_spi_info = {
+ .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices),
+};
+
+static void __init ts72xx_init_machine(void)
+{
+ ...
+ ep93xx_register_spi(&ts72xx_spi_info, ts72xx_spi_devices,
+ ARRAY_SIZE(ts72xx_spi_devices));
+}
+
+Thanks to
+=========
+Martin Guy, H. Hartley Sweeten and others who helped me during development of
+the driver. Simplemachines.it donated me a Sim.One board which I used testing
+the driver on EP9307.
diff --git a/Documentation/spi/spidev_fdx.c b/Documentation/spi/spidev_fdx.c
index fc354f760384..36ec0774ca0b 100644
--- a/Documentation/spi/spidev_fdx.c
+++ b/Documentation/spi/spidev_fdx.c
@@ -58,10 +58,10 @@ static void do_msg(int fd, int len)
len = sizeof buf;
buf[0] = 0xaa;
- xfer[0].tx_buf = (__u64) buf;
+ xfer[0].tx_buf = (unsigned long)buf;
xfer[0].len = 1;
- xfer[1].rx_buf = (__u64) buf;
+ xfer[1].rx_buf = (unsigned long) buf;
xfer[1].len = len;
status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index df38ef046f8d..cbd05ffc606b 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -84,6 +84,16 @@ netdev_max_backlog
Maximum number of packets, queued on the INPUT side, when the interface
receives packets faster than kernel can process them.
+netdev_tstamp_prequeue
+----------------------
+
+If set to 0, RX packet timestamps can be sampled after RPS processing, when
+the target CPU processes packets. It might give some delay on timestamps, but
+permit to distribute the load on several cpus.
+
+If set to 1 (default), timestamps are sampled as soon as possible, before
+queueing.
+
optmem_max
----------
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 6c7d18c53f84..5fdbb612aeb8 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -19,6 +19,7 @@ files can be found in mm/swap.c.
Currently, these files are in /proc/sys/vm:
- block_dump
+- compact_memory
- dirty_background_bytes
- dirty_background_ratio
- dirty_bytes
@@ -26,6 +27,7 @@ Currently, these files are in /proc/sys/vm:
- dirty_ratio
- dirty_writeback_centisecs
- drop_caches
+- extfrag_threshold
- hugepages_treat_as_movable
- hugetlb_shm_group
- laptop_mode
@@ -64,6 +66,15 @@ information on block I/O debugging is in Documentation/laptops/laptop-mode.txt.
==============================================================
+compact_memory
+
+Available only when CONFIG_COMPACTION is set. When 1 is written to the file,
+all zones are compacted such that free memory is available in contiguous
+blocks where possible. This can be important for example in the allocation of
+huge pages although processes will also directly compact memory as required.
+
+==============================================================
+
dirty_background_bytes
Contains the amount of dirty memory at which the pdflush background writeback
@@ -139,6 +150,20 @@ user should run `sync' first.
==============================================================
+extfrag_threshold
+
+This parameter affects whether the kernel will compact memory or direct
+reclaim to satisfy a high-order allocation. /proc/extfrag_index shows what
+the fragmentation index for each order is in each zone in the system. Values
+tending towards 0 imply allocations would fail due to lack of memory,
+values towards 1000 imply failures are due to fragmentation and -1 implies
+that the allocation will succeed as long as watermarks are met.
+
+The kernel will not compact memory in a zone if the
+fragmentation index is <= extfrag_threshold. The default value is 500.
+
+==============================================================
+
hugepages_treat_as_movable
This parameter is only useful when kernelcore= is specified at boot time to
diff --git a/Documentation/sysfs-rules.txt b/Documentation/sysfs-rules.txt
index 5d8bc2cd250c..c1a1fd636bf9 100644
--- a/Documentation/sysfs-rules.txt
+++ b/Documentation/sysfs-rules.txt
@@ -125,7 +125,7 @@ versions of the sysfs interface.
- Block
The converted block subsystem at /sys/class/block or
/sys/subsystem/block will contain the links for disks and partitions
- at the same level, never in a hierarchy. Assuming the block subsytem to
+ at the same level, never in a hierarchy. Assuming the block subsystem to
contain only disks and not partition devices in the same flat list is
a bug in the application.
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index d56a01775423..5c17196c8fe9 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -177,13 +177,13 @@ virtual console (ALT+Fn) and then back again should also help.
* I hit SysRq, but nothing seems to happen, what's wrong?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-There are some keyboards that send different scancodes for SysRq than the
-pre-defined 0x54. So if SysRq doesn't work out of the box for a certain
-keyboard, run 'showkey -s' to find out the proper scancode sequence. Then
-use 'setkeycodes <sequence> 84' to define this sequence to the usual SysRq
-code (84 is decimal for 0x54). It's probably best to put this command in a
-boot script. Oh, and by the way, you exit 'showkey' by not typing anything
-for ten seconds.
+There are some keyboards that produce a different keycode for SysRq than the
+pre-defined value of 99 (see KEY_SYSRQ in include/linux/input.h), or which
+don't have a SysRq key at all. In these cases, run 'showkey -s' to find an
+appropriate scancode sequence, and use 'setkeycodes <sequence> 99' to map
+this sequence to the usual SysRq code (e.g., 'setkeycodes e05b 99'). It's
+probably best to put this command in a boot script. Oh, and by the way, you
+exit 'showkey' by not typing anything for ten seconds.
* I want to add SysRQ key events to a module, how does it work?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/timers/hpet_example.c b/Documentation/timers/hpet_example.c
index f9ce2d9fdfd5..4bfafb7bc4c5 100644
--- a/Documentation/timers/hpet_example.c
+++ b/Documentation/timers/hpet_example.c
@@ -10,7 +10,6 @@
#include <sys/types.h>
#include <sys/wait.h>
#include <signal.h>
-#include <fcntl.h>
#include <errno.h>
#include <sys/time.h>
#include <linux/hpet.h>
@@ -24,7 +23,6 @@ extern void hpet_read(int, const char **);
#include <sys/poll.h>
#include <sys/ioctl.h>
-#include <signal.h>
struct hpet_command {
char *command;
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 02ac6ed38b2d..09bd8e902989 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -90,7 +90,8 @@ In order to facilitate early boot debugging, use boot option:
trace_event=[event-list]
-The format of this boot option is the same as described in section 2.1.
+event-list is a comma separated list of events. See section 2.1 for event
+format.
3. Defining an event-enabled tracepoint
=======================================
@@ -238,7 +239,7 @@ subsystem's filter file.
For convenience, filters for every event in a subsystem can be set or
cleared as a group by writing a filter expression into the filter file
-at the root of the subsytem. Note however, that if a filter for any
+at the root of the subsystem. Note however, that if a filter for any
event within the subsystem lacks a field specified in the subsystem
filter, or if the filter can't be applied for any other reason, the
filter for that event will retain its previous setting. This can
@@ -250,7 +251,7 @@ fields can be guaranteed to propagate successfully to all events.
Here are a few subsystem filter examples that also illustrate the
above points:
-Clear the filters on all events in the sched subsytem:
+Clear the filters on all events in the sched subsystem:
# cd /sys/kernel/debug/tracing/events/sched
# echo 0 > filter
@@ -260,7 +261,7 @@ none
none
Set a filter using only common fields for all events in the sched
-subsytem (all events end up with the same filter):
+subsystem (all events end up with the same filter):
# cd /sys/kernel/debug/tracing/events/sched
# echo common_pid == 0 > filter
@@ -270,7 +271,7 @@ common_pid == 0
common_pid == 0
Attempt to set a filter using a non-common field for all events in the
-sched subsytem (all events but those that have a prev_pid field retain
+sched subsystem (all events but those that have a prev_pid field retain
their old filters):
# cd /sys/kernel/debug/tracing/events/sched
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 03485bfbd797..557c1edeccaf 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -155,6 +155,9 @@ of ftrace. Here is a list of some of the key files:
to be traced. Echoing names of functions into this file
will limit the trace to only those functions.
+ This interface also allows for commands to be used. See the
+ "Filter commands" section for more details.
+
set_ftrace_notrace:
This has an effect opposite to that of
@@ -1337,12 +1340,14 @@ ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
can either use the sysctl function or set it via the proc system
interface.
- sysctl kernel.ftrace_dump_on_oops=1
+ sysctl kernel.ftrace_dump_on_oops=n
or
- echo 1 > /proc/sys/kernel/ftrace_dump_on_oops
+ echo n > /proc/sys/kernel/ftrace_dump_on_oops
+If n = 1, ftrace will dump buffers of all CPUs, if n = 2 ftrace will
+only dump the buffer of the CPU that triggered the oops.
Here's an example of such a dump after a null pointer
dereference in a kernel module:
@@ -1822,6 +1827,47 @@ this special filter via:
echo > set_graph_function
+Filter commands
+---------------
+
+A few commands are supported by the set_ftrace_filter interface.
+Trace commands have the following format:
+
+<function>:<command>:<parameter>
+
+The following commands are supported:
+
+- mod
+ This command enables function filtering per module. The
+ parameter defines the module. For example, if only the write*
+ functions in the ext3 module are desired, run:
+
+ echo 'write*:mod:ext3' > set_ftrace_filter
+
+ This command interacts with the filter in the same way as
+ filtering based on function names. Thus, adding more functions
+ in a different module is accomplished by appending (>>) to the
+ filter file. Remove specific module functions by prepending
+ '!':
+
+ echo '!writeback*:mod:ext3' >> set_ftrace_filter
+
+- traceon/traceoff
+ These commands turn tracing on and off when the specified
+ functions are hit. The parameter determines how many times the
+ tracing system is turned on and off. If unspecified, there is
+ no limit. For example, to disable tracing when a schedule bug
+ is hit the first 5 times, run:
+
+ echo '__schedule_bug:traceoff:5' > set_ftrace_filter
+
+ These commands are cumulative whether or not they are appended
+ to set_ftrace_filter. To remove a command, prepend it by '!'
+ and drop the parameter:
+
+ echo '!__schedule_bug:traceoff' > set_ftrace_filter
+
+
trace_pipe
----------
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt
index a9100b28eb84..ec94748ae65b 100644
--- a/Documentation/trace/kprobetrace.txt
+++ b/Documentation/trace/kprobetrace.txt
@@ -40,7 +40,9 @@ Synopsis of kprobe_events
$stack : Fetch stack address.
$retval : Fetch return value.(*)
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
- NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
+ NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
+ FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
+ (u8/u16/u32/u64/s8/s16/s32/s64) are supported.
(*) only for return probe.
(**) this is useful for fetching a field of data structures.
diff --git a/Documentation/usb/WUSB-Design-overview.txt b/Documentation/usb/WUSB-Design-overview.txt
index c480e9c32dbd..4c5e37939344 100644
--- a/Documentation/usb/WUSB-Design-overview.txt
+++ b/Documentation/usb/WUSB-Design-overview.txt
@@ -381,7 +381,7 @@ descriptor that gives us the status of the transfer, its identification
we issue another URB to read into the destination buffer the chunk of
data coming out of the remote endpoint. Done, wait for the next guy. The
callbacks for the URBs issued from here are the ones that will declare
-the xfer complete at some point and call it's callback.
+the xfer complete at some point and call its callback.
Seems simple, but the implementation is not trivial.
diff --git a/Documentation/usb/bulk-streams.txt b/Documentation/usb/bulk-streams.txt
new file mode 100644
index 000000000000..ffc02021863e
--- /dev/null
+++ b/Documentation/usb/bulk-streams.txt
@@ -0,0 +1,78 @@
+Background
+==========
+
+Bulk endpoint streams were added in the USB 3.0 specification. Streams allow a
+device driver to overload a bulk endpoint so that multiple transfers can be
+queued at once.
+
+Streams are defined in sections 4.4.6.4 and 8.12.1.4 of the Universal Serial Bus
+3.0 specification at http://www.usb.org/developers/docs/ The USB Attached SCSI
+Protocol, which uses streams to queue multiple SCSI commands, can be found on
+the T10 website (http://t10.org/).
+
+
+Device-side implications
+========================
+
+Once a buffer has been queued to a stream ring, the device is notified (through
+an out-of-band mechanism on another endpoint) that data is ready for that stream
+ID. The device then tells the host which "stream" it wants to start. The host
+can also initiate a transfer on a stream without the device asking, but the
+device can refuse that transfer. Devices can switch between streams at any
+time.
+
+
+Driver implications
+===================
+
+int usb_alloc_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags);
+
+Device drivers will call this API to request that the host controller driver
+allocate memory so the driver can use up to num_streams stream IDs. They must
+pass an array of usb_host_endpoints that need to be setup with similar stream
+IDs. This is to ensure that a UASP driver will be able to use the same stream
+ID for the bulk IN and OUT endpoints used in a Bi-directional command sequence.
+
+The return value is an error condition (if one of the endpoints doesn't support
+streams, or the xHCI driver ran out of memory), or the number of streams the
+host controller allocated for this endpoint. The xHCI host controller hardware
+declares how many stream IDs it can support, and each bulk endpoint on a
+SuperSpeed device will say how many stream IDs it can handle. Therefore,
+drivers should be able to deal with being allocated less stream IDs than they
+requested.
+
+Do NOT call this function if you have URBs enqueued for any of the endpoints
+passed in as arguments. Do not call this function to request less than two
+streams.
+
+Drivers will only be allowed to call this API once for the same endpoint
+without calling usb_free_streams(). This is a simplification for the xHCI host
+controller driver, and may change in the future.
+
+
+Picking new Stream IDs to use
+============================
+
+Stream ID 0 is reserved, and should not be used to communicate with devices. If
+usb_alloc_streams() returns with a value of N, you may use streams 1 though N.
+To queue an URB for a specific stream, set the urb->stream_id value. If the
+endpoint does not support streams, an error will be returned.
+
+Note that new API to choose the next stream ID will have to be added if the xHCI
+driver supports secondary stream IDs.
+
+
+Clean up
+========
+
+If a driver wishes to stop using streams to communicate with the device, it
+should call
+
+void usb_free_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags);
+
+All stream IDs will be deallocated when the driver releases the interface, to
+ensure that drivers that don't support streams will be able to use the endpoint.
diff --git a/Documentation/usb/dma.txt b/Documentation/usb/dma.txt
index cfdcd16e3abf..84ef865237db 100644
--- a/Documentation/usb/dma.txt
+++ b/Documentation/usb/dma.txt
@@ -16,11 +16,11 @@ OR: they can now be DMA-aware.
manage dma mappings for existing dma-ready buffers (see below).
- URBs have an additional "transfer_dma" field, as well as a transfer_flags
- bit saying if it's valid. (Control requests also have "setup_dma" and a
- corresponding transfer_flags bit.)
+ bit saying if it's valid. (Control requests also have "setup_dma", but
+ drivers must not use it.)
-- "usbcore" will map those DMA addresses, if a DMA-aware driver didn't do
- it first and set URB_NO_TRANSFER_DMA_MAP or URB_NO_SETUP_DMA_MAP. HCDs
+- "usbcore" will map this DMA address, if a DMA-aware driver didn't do
+ it first and set URB_NO_TRANSFER_DMA_MAP. HCDs
don't manage dma mappings for URBs.
- There's a new "generic DMA API", parts of which are usable by USB device
@@ -43,22 +43,16 @@ and effects like cache-trashing can impose subtle penalties.
kind of addresses to store in urb->transfer_buffer and urb->transfer_dma.
You'd also set URB_NO_TRANSFER_DMA_MAP in urb->transfer_flags:
- void *usb_buffer_alloc (struct usb_device *dev, size_t size,
+ void *usb_alloc_coherent (struct usb_device *dev, size_t size,
int mem_flags, dma_addr_t *dma);
- void usb_buffer_free (struct usb_device *dev, size_t size,
+ void usb_free_coherent (struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
Most drivers should *NOT* be using these primitives; they don't need
to use this type of memory ("dma-coherent"), and memory returned from
kmalloc() will work just fine.
- For control transfers you can use the buffer primitives or not for each
- of the transfer buffer and setup buffer independently. Set the flag bits
- URB_NO_TRANSFER_DMA_MAP and URB_NO_SETUP_DMA_MAP to indicate which
- buffers you have prepared. For non-control transfers URB_NO_SETUP_DMA_MAP
- is ignored.
-
The memory buffer returned is "dma-coherent"; sometimes you might need to
force a consistent memory access ordering by using memory barriers. It's
not using a streaming DMA mapping, so it's good for small transfers on
@@ -130,8 +124,8 @@ of Documentation/PCI/PCI-DMA-mapping.txt, titled "What memory is DMA-able?")
void usb_buffer_unmap (struct urb *urb);
The calls manage urb->transfer_dma for you, and set URB_NO_TRANSFER_DMA_MAP
- so that usbcore won't map or unmap the buffer. The same goes for
- urb->setup_dma and URB_NO_SETUP_DMA_MAP for control requests.
+ so that usbcore won't map or unmap the buffer. They cannot be used for
+ setup_packet buffers in control requests.
Note that several of those interfaces are currently commented out, since
they don't have current users. See the source code. Other than the dmasync
diff --git a/Documentation/usb/gadget_hid.txt b/Documentation/usb/gadget_hid.txt
new file mode 100644
index 000000000000..f4a51f567427
--- /dev/null
+++ b/Documentation/usb/gadget_hid.txt
@@ -0,0 +1,445 @@
+
+ Linux USB HID gadget driver
+
+Introduction
+
+ The HID Gadget driver provides emulation of USB Human Interface
+ Devices (HID). The basic HID handling is done in the kernel,
+ and HID reports can be sent/received through I/O on the
+ /dev/hidgX character devices.
+
+ For more details about HID, see the developer page on
+ http://www.usb.org/developers/hidpage/
+
+Configuration
+
+ g_hid is a platform driver, so to use it you need to add
+ struct platform_device(s) to your platform code defining the
+ HID function descriptors you want to use - E.G. something
+ like:
+
+#include <linux/platform_device.h>
+#include <linux/usb/g_hid.h>
+
+/* hid descriptor for a keyboard */
+static struct hidg_func_descriptor my_hid_data = {
+ .subclass = 0, /* No subclass */
+ .protocol = 1, /* Keyboard */
+ .report_length = 8,
+ .report_desc_length = 63,
+ .report_desc = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x06, /* USAGE (Keyboard) */
+ 0xa1, 0x01, /* COLLECTION (Application) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0xe0, /* USAGE_MINIMUM (Keyboard LeftControl) */
+ 0x29, 0xe7, /* USAGE_MAXIMUM (Keyboard Right GUI) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x81, 0x03, /* INPUT (Cnst,Var,Abs) */
+ 0x95, 0x05, /* REPORT_COUNT (5) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x05, 0x08, /* USAGE_PAGE (LEDs) */
+ 0x19, 0x01, /* USAGE_MINIMUM (Num Lock) */
+ 0x29, 0x05, /* USAGE_MAXIMUM (Kana) */
+ 0x91, 0x02, /* OUTPUT (Data,Var,Abs) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x75, 0x03, /* REPORT_SIZE (3) */
+ 0x91, 0x03, /* OUTPUT (Cnst,Var,Abs) */
+ 0x95, 0x06, /* REPORT_COUNT (6) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x65, /* LOGICAL_MAXIMUM (101) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0x00, /* USAGE_MINIMUM (Reserved) */
+ 0x29, 0x65, /* USAGE_MAXIMUM (Keyboard Application) */
+ 0x81, 0x00, /* INPUT (Data,Ary,Abs) */
+ 0xc0 /* END_COLLECTION */
+ }
+};
+
+static struct platform_device my_hid = {
+ .name = "hidg",
+ .id = 0,
+ .num_resources = 0,
+ .resource = 0,
+ .dev.platform_data = &my_hid_data,
+};
+
+ You can add as many HID functions as you want, only limited by
+ the amount of interrupt endpoints your gadget driver supports.
+
+Send and receive HID reports
+
+ HID reports can be sent/received using read/write on the
+ /dev/hidgX character devices. See below for an example program
+ to do this.
+
+ hid_gadget_test is a small interactive program to test the HID
+ gadget driver. To use, point it at a hidg device and set the
+ device type (keyboard / mouse / joystick) - E.G.:
+
+ # hid_gadget_test /dev/hidg0 keyboard
+
+ You are now in the prompt of hid_gadget_test. You can type any
+ combination of options and values. Available options and
+ values are listed at program start. In keyboard mode you can
+ send up to six values.
+
+ For example type: g i s t r --left-shift
+
+ Hit return and the corresponding report will be sent by the
+ HID gadget.
+
+ Another interesting example is the caps lock test. Type
+ -–caps-lock and hit return. A report is then sent by the
+ gadget and you should receive the host answer, corresponding
+ to the caps lock LED status.
+
+ --caps-lock
+ recv report:2
+
+ With this command:
+
+ # hid_gadget_test /dev/hidg1 mouse
+
+ You can test the mouse emulation. Values are two signed numbers.
+
+
+Sample code
+
+/* hid_gadget_test */
+
+#include <pthread.h>
+#include <string.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#define BUF_LEN 512
+
+struct options {
+ const char *opt;
+ unsigned char val;
+};
+
+static struct options kmod[] = {
+ {.opt = "--left-ctrl", .val = 0x01},
+ {.opt = "--right-ctrl", .val = 0x10},
+ {.opt = "--left-shift", .val = 0x02},
+ {.opt = "--right-shift", .val = 0x20},
+ {.opt = "--left-alt", .val = 0x04},
+ {.opt = "--right-alt", .val = 0x40},
+ {.opt = "--left-meta", .val = 0x08},
+ {.opt = "--right-meta", .val = 0x80},
+ {.opt = NULL}
+};
+
+static struct options kval[] = {
+ {.opt = "--return", .val = 0x28},
+ {.opt = "--esc", .val = 0x29},
+ {.opt = "--bckspc", .val = 0x2a},
+ {.opt = "--tab", .val = 0x2b},
+ {.opt = "--spacebar", .val = 0x2c},
+ {.opt = "--caps-lock", .val = 0x39},
+ {.opt = "--f1", .val = 0x3a},
+ {.opt = "--f2", .val = 0x3b},
+ {.opt = "--f3", .val = 0x3c},
+ {.opt = "--f4", .val = 0x3d},
+ {.opt = "--f5", .val = 0x3e},
+ {.opt = "--f6", .val = 0x3f},
+ {.opt = "--f7", .val = 0x40},
+ {.opt = "--f8", .val = 0x41},
+ {.opt = "--f9", .val = 0x42},
+ {.opt = "--f10", .val = 0x43},
+ {.opt = "--f11", .val = 0x44},
+ {.opt = "--f12", .val = 0x45},
+ {.opt = "--insert", .val = 0x49},
+ {.opt = "--home", .val = 0x4a},
+ {.opt = "--pageup", .val = 0x4b},
+ {.opt = "--del", .val = 0x4c},
+ {.opt = "--end", .val = 0x4d},
+ {.opt = "--pagedown", .val = 0x4e},
+ {.opt = "--right", .val = 0x4f},
+ {.opt = "--left", .val = 0x50},
+ {.opt = "--down", .val = 0x51},
+ {.opt = "--kp-enter", .val = 0x58},
+ {.opt = "--up", .val = 0x52},
+ {.opt = "--num-lock", .val = 0x53},
+ {.opt = NULL}
+};
+
+int keyboard_fill_report(char report[8], char buf[BUF_LEN], int *hold)
+{
+ char *tok = strtok(buf, " ");
+ int key = 0;
+ int i = 0;
+
+ for (; tok != NULL; tok = strtok(NULL, " ")) {
+
+ if (strcmp(tok, "--quit") == 0)
+ return -1;
+
+ if (strcmp(tok, "--hold") == 0) {
+ *hold = 1;
+ continue;
+ }
+
+ if (key < 6) {
+ for (i = 0; kval[i].opt != NULL; i++)
+ if (strcmp(tok, kval[i].opt) == 0) {
+ report[2 + key++] = kval[i].val;
+ break;
+ }
+ if (kval[i].opt != NULL)
+ continue;
+ }
+
+ if (key < 6)
+ if (islower(tok[0])) {
+ report[2 + key++] = (tok[0] - ('a' - 0x04));
+ continue;
+ }
+
+ for (i = 0; kmod[i].opt != NULL; i++)
+ if (strcmp(tok, kmod[i].opt) == 0) {
+ report[0] = report[0] | kmod[i].val;
+ break;
+ }
+ if (kmod[i].opt != NULL)
+ continue;
+
+ if (key < 6)
+ fprintf(stderr, "unknown option: %s\n", tok);
+ }
+ return 8;
+}
+
+static struct options mmod[] = {
+ {.opt = "--b1", .val = 0x01},
+ {.opt = "--b2", .val = 0x02},
+ {.opt = "--b3", .val = 0x04},
+ {.opt = NULL}
+};
+
+int mouse_fill_report(char report[8], char buf[BUF_LEN], int *hold)
+{
+ char *tok = strtok(buf, " ");
+ int mvt = 0;
+ int i = 0;
+ for (; tok != NULL; tok = strtok(NULL, " ")) {
+
+ if (strcmp(tok, "--quit") == 0)
+ return -1;
+
+ if (strcmp(tok, "--hold") == 0) {
+ *hold = 1;
+ continue;
+ }
+
+ for (i = 0; mmod[i].opt != NULL; i++)
+ if (strcmp(tok, mmod[i].opt) == 0) {
+ report[0] = report[0] | mmod[i].val;
+ break;
+ }
+ if (mmod[i].opt != NULL)
+ continue;
+
+ if (!(tok[0] == '-' && tok[1] == '-') && mvt < 2) {
+ errno = 0;
+ report[1 + mvt++] = (char)strtol(tok, NULL, 0);
+ if (errno != 0) {
+ fprintf(stderr, "Bad value:'%s'\n", tok);
+ report[1 + mvt--] = 0;
+ }
+ continue;
+ }
+
+ fprintf(stderr, "unknown option: %s\n", tok);
+ }
+ return 3;
+}
+
+static struct options jmod[] = {
+ {.opt = "--b1", .val = 0x10},
+ {.opt = "--b2", .val = 0x20},
+ {.opt = "--b3", .val = 0x40},
+ {.opt = "--b4", .val = 0x80},
+ {.opt = "--hat1", .val = 0x00},
+ {.opt = "--hat2", .val = 0x01},
+ {.opt = "--hat3", .val = 0x02},
+ {.opt = "--hat4", .val = 0x03},
+ {.opt = "--hatneutral", .val = 0x04},
+ {.opt = NULL}
+};
+
+int joystick_fill_report(char report[8], char buf[BUF_LEN], int *hold)
+{
+ char *tok = strtok(buf, " ");
+ int mvt = 0;
+ int i = 0;
+
+ *hold = 1;
+
+ /* set default hat position: neutral */
+ report[3] = 0x04;
+
+ for (; tok != NULL; tok = strtok(NULL, " ")) {
+
+ if (strcmp(tok, "--quit") == 0)
+ return -1;
+
+ for (i = 0; jmod[i].opt != NULL; i++)
+ if (strcmp(tok, jmod[i].opt) == 0) {
+ report[3] = (report[3] & 0xF0) | jmod[i].val;
+ break;
+ }
+ if (jmod[i].opt != NULL)
+ continue;
+
+ if (!(tok[0] == '-' && tok[1] == '-') && mvt < 3) {
+ errno = 0;
+ report[mvt++] = (char)strtol(tok, NULL, 0);
+ if (errno != 0) {
+ fprintf(stderr, "Bad value:'%s'\n", tok);
+ report[mvt--] = 0;
+ }
+ continue;
+ }
+
+ fprintf(stderr, "unknown option: %s\n", tok);
+ }
+ return 4;
+}
+
+void print_options(char c)
+{
+ int i = 0;
+
+ if (c == 'k') {
+ printf(" keyboard options:\n"
+ " --hold\n");
+ for (i = 0; kmod[i].opt != NULL; i++)
+ printf("\t\t%s\n", kmod[i].opt);
+ printf("\n keyboard values:\n"
+ " [a-z] or\n");
+ for (i = 0; kval[i].opt != NULL; i++)
+ printf("\t\t%-8s%s", kval[i].opt, i % 2 ? "\n" : "");
+ printf("\n");
+ } else if (c == 'm') {
+ printf(" mouse options:\n"
+ " --hold\n");
+ for (i = 0; mmod[i].opt != NULL; i++)
+ printf("\t\t%s\n", mmod[i].opt);
+ printf("\n mouse values:\n"
+ " Two signed numbers\n"
+ "--quit to close\n");
+ } else {
+ printf(" joystick options:\n");
+ for (i = 0; jmod[i].opt != NULL; i++)
+ printf("\t\t%s\n", jmod[i].opt);
+ printf("\n joystick values:\n"
+ " three signed numbers\n"
+ "--quit to close\n");
+ }
+}
+
+int main(int argc, const char *argv[])
+{
+ const char *filename = NULL;
+ int fd = 0;
+ char buf[BUF_LEN];
+ int cmd_len;
+ char report[8];
+ int to_send = 8;
+ int hold = 0;
+ fd_set rfds;
+ int retval, i;
+
+ if (argc < 3) {
+ fprintf(stderr, "Usage: %s devname mouse|keyboard|joystick\n",
+ argv[0]);
+ return 1;
+ }
+
+ if (argv[2][0] != 'k' && argv[2][0] != 'm' && argv[2][0] != 'j')
+ return 2;
+
+ filename = argv[1];
+
+ if ((fd = open(filename, O_RDWR, 0666)) == -1) {
+ perror(filename);
+ return 3;
+ }
+
+ print_options(argv[2][0]);
+
+ while (42) {
+
+ FD_ZERO(&rfds);
+ FD_SET(STDIN_FILENO, &rfds);
+ FD_SET(fd, &rfds);
+
+ retval = select(fd + 1, &rfds, NULL, NULL, NULL);
+ if (retval == -1 && errno == EINTR)
+ continue;
+ if (retval < 0) {
+ perror("select()");
+ return 4;
+ }
+
+ if (FD_ISSET(fd, &rfds)) {
+ cmd_len = read(fd, buf, BUF_LEN - 1);
+ printf("recv report:");
+ for (i = 0; i < cmd_len; i++)
+ printf(" %02x", buf[i]);
+ printf("\n");
+ }
+
+ if (FD_ISSET(STDIN_FILENO, &rfds)) {
+ memset(report, 0x0, sizeof(report));
+ cmd_len = read(STDIN_FILENO, buf, BUF_LEN - 1);
+
+ if (cmd_len == 0)
+ break;
+
+ buf[cmd_len - 1] = '\0';
+ hold = 0;
+
+ memset(report, 0x0, sizeof(report));
+ if (argv[2][0] == 'k')
+ to_send = keyboard_fill_report(report, buf, &hold);
+ else if (argv[2][0] == 'm')
+ to_send = mouse_fill_report(report, buf, &hold);
+ else
+ to_send = joystick_fill_report(report, buf, &hold);
+
+ if (to_send == -1)
+ break;
+
+ if (write(fd, report, to_send) != to_send) {
+ perror(filename);
+ return 5;
+ }
+ if (!hold) {
+ memset(report, 0x0, sizeof(report));
+ if (write(fd, report, to_send) != to_send) {
+ perror(filename);
+ return 6;
+ }
+ }
+ }
+ }
+
+ close(fd);
+ return 0;
+}
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 2790ad48cfc2..b29d8e56cf28 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -107,7 +107,9 @@ allowed to issue dynamic suspends.
The user interface for controlling dynamic PM is located in the power/
subdirectory of each USB device's sysfs directory, that is, in
/sys/bus/usb/devices/.../power/ where "..." is the device's ID. The
-relevant attribute files are: wakeup, level, and autosuspend.
+relevant attribute files are: wakeup, control, and autosuspend.
+(There may also be a file named "level"; this file was deprecated
+as of the 2.6.35 kernel and replaced by the "control" file.)
power/wakeup
@@ -120,7 +122,7 @@ relevant attribute files are: wakeup, level, and autosuspend.
while the device is suspended, the change won't take
effect until the following suspend.)
- power/level
+ power/control
This file contains one of two words: "on" or "auto".
You can write those words to the file to change the
@@ -148,14 +150,15 @@ relevant attribute files are: wakeup, level, and autosuspend.
never to autosuspend. You can write a number to the
file to change the autosuspend idle-delay time.
-Writing "-1" to power/autosuspend and writing "on" to power/level do
+Writing "-1" to power/autosuspend and writing "on" to power/control do
essentially the same thing -- they both prevent the device from being
autosuspended. Yes, this is a redundancy in the API.
(In 2.6.21 writing "0" to power/autosuspend would prevent the device
from being autosuspended; the behavior was changed in 2.6.22. The
power/autosuspend attribute did not exist prior to 2.6.21, and the
-power/level attribute did not exist prior to 2.6.22.)
+power/level attribute did not exist prior to 2.6.22. power/control
+was added in 2.6.34.)
Changing the default idle-delay time
@@ -212,7 +215,7 @@ among printers and scanners, but plenty of other types of device have
the same deficiency.
For this reason, by default the kernel disables autosuspend (the
-power/level attribute is initialized to "on") for all devices other
+power/control attribute is initialized to "on") for all devices other
than hubs. Hubs, at least, appear to be reasonably well-behaved in
this regard.
@@ -373,7 +376,7 @@ usb_autopm_put_interface() in its close or release routine. But other
patterns are possible.
The autosuspend attempts mentioned above will often fail for one
-reason or another. For example, the power/level attribute might be
+reason or another. For example, the power/control attribute might be
set to "on", or another interface in the same device might not be
idle. This is perfectly normal. If the reason for failure was that
the device hasn't been idle for long enough, a timer is scheduled to
@@ -394,12 +397,12 @@ Drivers can enable autosuspend for their devices by calling
in their probe() routine, if they know that the device is capable of
suspending and resuming correctly. This is exactly equivalent to
-writing "auto" to the device's power/level attribute. Likewise,
+writing "auto" to the device's power/control attribute. Likewise,
drivers can disable autosuspend by calling
usb_disable_autosuspend(struct usb_device *udev);
-This is exactly the same as writing "on" to the power/level attribute.
+This is exactly the same as writing "on" to the power/control attribute.
Sometimes a driver needs to make sure that remote wakeup is enabled
during autosuspend. For example, there's not much point
diff --git a/Documentation/usb/usb-serial.txt b/Documentation/usb/usb-serial.txt
index ff2c1ff57ba2..f4d214510259 100644
--- a/Documentation/usb/usb-serial.txt
+++ b/Documentation/usb/usb-serial.txt
@@ -194,6 +194,10 @@ FTDI Single Port Serial Driver
This is a single port DB-25 serial adapter.
+ Devices supported include:
+ -TripNav TN-200 USB GPS
+ -Navis Engineering Bureau CH-4711 USB GPS
+
For any questions or problems with this driver, please contact Bill Ryder.
@@ -216,7 +220,7 @@ Cypress M8 CY4601 Family Serial Driver
Devices supported:
- -DeLorme's USB Earthmate (SiRF Star II lp arch)
+ -DeLorme's USB Earthmate GPS (SiRF Star II lp arch)
-Cypress HID->COM RS232 adapter
Note: Cypress Semiconductor claims no affiliation with the
@@ -392,9 +396,10 @@ REINER SCT cyberJack pinpad/e-com USB chipcard reader
Prolific PL2303 Driver
This driver supports any device that has the PL2303 chip from Prolific
- in it. This includes a number of single port USB to serial
- converters and USB GPS devices. Devices from Aten (the UC-232) and
- IO-Data work with this driver, as does the DCU-11 mobile-phone cable.
+ in it. This includes a number of single port USB to serial converters,
+ more than 70% of USB GPS devices (in 2010), and some USB UPSes. Devices
+ from Aten (the UC-232) and IO-Data work with this driver, as does
+ the DCU-11 mobile-phone cable.
For any questions or problems with this driver, please contact Greg
Kroah-Hartman at greg@kroah.com
@@ -435,6 +440,22 @@ Winchiphead CH341 Driver
For any questions or problems with this driver, please contact
frank@kingswood-consulting.co.uk.
+Moschip MCS7720, MCS7715 driver
+
+ These chips are present in devices sold by various manufacturers, such as Syba
+ and Cables Unlimited. There may be others. The 7720 provides two serial
+ ports, and the 7715 provides one serial and one standard PC parallel port.
+ Support for the 7715's parallel port is enabled by a separate option, which
+ will not appear unless parallel port support is first enabled at the top-level
+ of the Device Drivers config menu. Currently only compatibility mode is
+ supported on the parallel port (no ECP/EPP).
+
+ TODO:
+ - Implement ECP/EPP modes for the parallel port.
+ - Baud rates higher than 115200 are currently broken.
+ - Devices with a single serial port based on the Moschip MCS7703 may work
+ with this driver with a simple addition to the usb_device_id table. I
+ don't have one of these devices, so I can't say for sure.
Generic Serial driver
diff --git a/Documentation/video4linux/CARDLIST.bttv b/Documentation/video4linux/CARDLIST.bttv
index f11c583295e9..4739d5684305 100644
--- a/Documentation/video4linux/CARDLIST.bttv
+++ b/Documentation/video4linux/CARDLIST.bttv
@@ -100,7 +100,7 @@
99 -> AD-TVK503
100 -> Hercules Smart TV Stereo
101 -> Pace TV & Radio Card
-102 -> IVC-200 [0000:a155,0001:a155,0002:a155,0003:a155,0100:a155,0101:a155,0102:a155,0103:a155]
+102 -> IVC-200 [0000:a155,0001:a155,0002:a155,0003:a155,0100:a155,0101:a155,0102:a155,0103:a155,0800:a155,0801:a155,0802:a155,0803:a155]
103 -> Grand X-Guard / Trust 814PCI [0304:0102]
104 -> Nebula Electronics DigiTV [0071:0101]
105 -> ProVideo PV143 [aa00:1430,aa00:1431,aa00:1432,aa00:1433,aa03:1433]
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 7ec3c4e4b60f..f2510541373b 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -82,3 +82,4 @@
81 -> Leadtek WinFast DTV1800 Hybrid [107d:6654]
82 -> WinFast DTV2000 H rev. J [107d:6f2b]
83 -> Prof 7301 DVB-S/S2 [b034:3034]
+ 84 -> Samsung SMT 7020 DVB-S [18ac:dc00,18ac:dccd]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index 0c166ff003a0..3a623aaeae5f 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -1,5 +1,5 @@
0 -> Unknown EM2800 video grabber (em2800) [eb1a:2800]
- 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2710,eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2862,eb1a:2870,eb1a:2881,eb1a:2883,eb1a:2868]
+ 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2710,eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2862,eb1a:2863,eb1a:2870,eb1a:2881,eb1a:2883,eb1a:2868]
2 -> Terratec Cinergy 250 USB (em2820/em2840) [0ccd:0036]
3 -> Pinnacle PCTV USB 2 (em2820/em2840) [2304:0208]
4 -> Hauppauge WinTV USB 2 (em2820/em2840) [2040:4200,2040:4201]
@@ -27,6 +27,7 @@
26 -> Hercules Smart TV USB 2.0 (em2820/em2840)
27 -> Pinnacle PCTV USB 2 (Philips FM1216ME) (em2820/em2840)
28 -> Leadtek Winfast USB II Deluxe (em2820/em2840)
+ 29 -> EM2860/TVP5150 Reference Design (em2860)
30 -> Videology 20K14XUSB USB2.0 (em2820/em2840)
31 -> Usbgear VD204v9 (em2821)
32 -> Supercomp USB 2.0 TV (em2821)
@@ -70,3 +71,4 @@
72 -> Gadmei UTV330+ (em2861)
73 -> Reddo DVB-C USB TV Box (em2870)
74 -> Actionmaster/LinXcel/Digitus VC211A (em2800)
+ 75 -> Dikom DK300 (em2882)
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index b4a767060ed7..070f2576707e 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -175,3 +175,6 @@
174 -> Asus Europa Hybrid OEM [1043:4847]
175 -> Leadtek Winfast DTV1000S [107d:6655]
176 -> Beholder BeholdTV 505 RDS [0000:5051]
+177 -> Hawell HW-404M7
+179 -> Beholder BeholdTV H7 [5ace:7190]
+180 -> Beholder BeholdTV A7 [5ace:7090]
diff --git a/Documentation/video4linux/extract_xc3028.pl b/Documentation/video4linux/extract_xc3028.pl
index 2cb816047fc1..47877deae6d7 100644
--- a/Documentation/video4linux/extract_xc3028.pl
+++ b/Documentation/video4linux/extract_xc3028.pl
@@ -5,12 +5,18 @@
#
# In order to use, you need to:
# 1) Download the windows driver with something like:
+# Version 2.4
+# wget http://www.twinhan.com/files/AW/BDA T/20080303_V1.0.6.7.zip
+# or wget http://www.stefanringel.de/pub/20080303_V1.0.6.7.zip
+# Version 2.7
# wget http://www.steventoth.net/linux/xc5000/HVR-12x0-14x0-17x0_1_25_25271_WHQL.zip
-# 2) Extract the file hcw85bda.sys from the zip into the current dir:
+# 2) Extract the files from the zip into the current dir:
+# unzip -j 20080303_V1.0.6.7.zip 20080303_v1.0.6.7/UDXTTM6000.sys
# unzip -j HVR-12x0-14x0-17x0_1_25_25271_WHQL.zip Driver85/hcw85bda.sys
# 3) run the script:
# ./extract_xc3028.pl
-# 4) copy the generated file:
+# 4) copy the generated files:
+# cp xc3028-v24.fw /lib/firmware
# cp xc3028-v27.fw /lib/firmware
#use strict;
@@ -135,7 +141,7 @@ sub write_hunk_fix_endian($$)
}
}
-sub main_firmware($$$$)
+sub main_firmware_24($$$$)
{
my $out;
my $j=0;
@@ -146,8 +152,774 @@ sub main_firmware($$$$)
for ($j = length($name); $j <32; $j++) {
$name = $name.chr(0);
+ }
+
+ open OUTFILE, ">$outfile";
+ syswrite(OUTFILE, $name);
+ write_le16($version);
+ write_le16($nr_desc);
+
+ #
+ # Firmware 0, type: BASE FW F8MHZ (0x00000003), id: (0000000000000000), size: 6635
+ #
+
+ write_le32(0x00000003); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6635); # Size
+ write_hunk_fix_endian(257752, 6635);
+
+ #
+ # Firmware 1, type: BASE FW F8MHZ MTS (0x00000007), id: (0000000000000000), size: 6635
+ #
+
+ write_le32(0x00000007); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6635); # Size
+ write_hunk_fix_endian(264392, 6635);
+
+ #
+ # Firmware 2, type: BASE FW FM (0x00000401), id: (0000000000000000), size: 6525
+ #
+
+ write_le32(0x00000401); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6525); # Size
+ write_hunk_fix_endian(271040, 6525);
+
+ #
+ # Firmware 3, type: BASE FW FM INPUT1 (0x00000c01), id: (0000000000000000), size: 6539
+ #
+
+ write_le32(0x00000c01); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6539); # Size
+ write_hunk_fix_endian(277568, 6539);
+
+ #
+ # Firmware 4, type: BASE FW (0x00000001), id: (0000000000000000), size: 6633
+ #
+
+ write_le32(0x00000001); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6633); # Size
+ write_hunk_fix_endian(284120, 6633);
+
+ #
+ # Firmware 5, type: BASE FW MTS (0x00000005), id: (0000000000000000), size: 6617
+ #
+
+ write_le32(0x00000005); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(6617); # Size
+ write_hunk_fix_endian(290760, 6617);
+
+ #
+ # Firmware 6, type: STD FW (0x00000000), id: PAL/BG A2/A (0000000100000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000001, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(297384, 161);
+
+ #
+ # Firmware 7, type: STD FW MTS (0x00000004), id: PAL/BG A2/A (0000000100000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000001, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(297552, 169);
+
+ #
+ # Firmware 8, type: STD FW (0x00000000), id: PAL/BG A2/B (0000000200000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000002, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(297728, 161);
+
+ #
+ # Firmware 9, type: STD FW MTS (0x00000004), id: PAL/BG A2/B (0000000200000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000002, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(297896, 169);
+
+ #
+ # Firmware 10, type: STD FW (0x00000000), id: PAL/BG NICAM/A (0000000400000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000004, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(298072, 161);
+
+ #
+ # Firmware 11, type: STD FW MTS (0x00000004), id: PAL/BG NICAM/A (0000000400000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000004, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(298240, 169);
+
+ #
+ # Firmware 12, type: STD FW (0x00000000), id: PAL/BG NICAM/B (0000000800000007), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000008, 0x00000007); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(298416, 161);
+
+ #
+ # Firmware 13, type: STD FW MTS (0x00000004), id: PAL/BG NICAM/B (0000000800000007), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000008, 0x00000007); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(298584, 169);
+
+ #
+ # Firmware 14, type: STD FW (0x00000000), id: PAL/DK A2 (00000003000000e0), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(298760, 161);
+
+ #
+ # Firmware 15, type: STD FW MTS (0x00000004), id: PAL/DK A2 (00000003000000e0), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(298928, 169);
+
+ #
+ # Firmware 16, type: STD FW (0x00000000), id: PAL/DK NICAM (0000000c000000e0), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x0000000c, 0x000000e0); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(299104, 161);
+
+ #
+ # Firmware 17, type: STD FW MTS (0x00000004), id: PAL/DK NICAM (0000000c000000e0), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x0000000c, 0x000000e0); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(299272, 169);
+
+ #
+ # Firmware 18, type: STD FW (0x00000000), id: SECAM/K1 (0000000000200000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(299448, 161);
+
+ #
+ # Firmware 19, type: STD FW MTS (0x00000004), id: SECAM/K1 (0000000000200000), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(299616, 169);
+
+ #
+ # Firmware 20, type: STD FW (0x00000000), id: SECAM/K3 (0000000004000000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x04000000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(299792, 161);
+
+ #
+ # Firmware 21, type: STD FW MTS (0x00000004), id: SECAM/K3 (0000000004000000), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x04000000); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(299960, 169);
+
+ #
+ # Firmware 22, type: STD FW D2633 DTV6 ATSC (0x00010030), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00010030); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300136, 149);
+
+ #
+ # Firmware 23, type: STD FW D2620 DTV6 QAM (0x00000068), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000068); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300296, 149);
+
+ #
+ # Firmware 24, type: STD FW D2633 DTV6 QAM (0x00000070), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000070); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300448, 149);
+
+ #
+ # Firmware 25, type: STD FW D2620 DTV7 (0x00000088), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000088); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300608, 149);
+
+ #
+ # Firmware 26, type: STD FW D2633 DTV7 (0x00000090), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000090); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300760, 149);
+
+ #
+ # Firmware 27, type: STD FW D2620 DTV78 (0x00000108), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000108); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(300920, 149);
+
+ #
+ # Firmware 28, type: STD FW D2633 DTV78 (0x00000110), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000110); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(301072, 149);
+
+ #
+ # Firmware 29, type: STD FW D2620 DTV8 (0x00000208), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000208); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(301232, 149);
+
+ #
+ # Firmware 30, type: STD FW D2633 DTV8 (0x00000210), id: (0000000000000000), size: 149
+ #
+
+ write_le32(0x00000210); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(149); # Size
+ write_hunk_fix_endian(301384, 149);
+
+ #
+ # Firmware 31, type: STD FW FM (0x00000400), id: (0000000000000000), size: 135
+ #
+
+ write_le32(0x00000400); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le32(135); # Size
+ write_hunk_fix_endian(301554, 135);
+
+ #
+ # Firmware 32, type: STD FW (0x00000000), id: PAL/I (0000000000000010), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00000010); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(301688, 161);
+
+ #
+ # Firmware 33, type: STD FW MTS (0x00000004), id: PAL/I (0000000000000010), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x00000010); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(301856, 169);
+
+ #
+ # Firmware 34, type: STD FW (0x00000000), id: SECAM/L AM (0000001000400000), size: 169
+ #
+
+ #
+ # Firmware 35, type: STD FW (0x00000000), id: SECAM/L NICAM (0000000c00400000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x0000000c, 0x00400000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302032, 161);
+
+ #
+ # Firmware 36, type: STD FW (0x00000000), id: SECAM/Lc (0000000000800000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00800000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302200, 161);
+
+ #
+ # Firmware 37, type: STD FW (0x00000000), id: NTSC/M Kr (0000000000008000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302368, 161);
+
+ #
+ # Firmware 38, type: STD FW LCD (0x00001000), id: NTSC/M Kr (0000000000008000), size: 161
+ #
+
+ write_le32(0x00001000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302536, 161);
+
+ #
+ # Firmware 39, type: STD FW LCD NOGD (0x00003000), id: NTSC/M Kr (0000000000008000), size: 161
+ #
+
+ write_le32(0x00003000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(302704, 161);
+
+ #
+ # Firmware 40, type: STD FW MTS (0x00000004), id: NTSC/M Kr (0000000000008000), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(302872, 169);
+
+ #
+ # Firmware 41, type: STD FW (0x00000000), id: NTSC PAL/M PAL/N (000000000000b700), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303048, 161);
+
+ #
+ # Firmware 42, type: STD FW LCD (0x00001000), id: NTSC PAL/M PAL/N (000000000000b700), size: 161
+ #
+
+ write_le32(0x00001000); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303216, 161);
+
+ #
+ # Firmware 43, type: STD FW LCD NOGD (0x00003000), id: NTSC PAL/M PAL/N (000000000000b700), size: 161
+ #
+
+ write_le32(0x00003000); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303384, 161);
+
+ #
+ # Firmware 44, type: STD FW (0x00000000), id: NTSC/M Jp (0000000000002000), size: 161
+ #
+
+ write_le32(0x00000000); # Type
+ write_le64(0x00000000, 0x00002000); # ID
+ write_le32(161); # Size
+ write_hunk_fix_endian(303552, 161);
+
+ #
+ # Firmware 45, type: STD FW MTS (0x00000004), id: NTSC PAL/M PAL/N (000000000000b700), size: 169
+ #
+
+ write_le32(0x00000004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(303720, 169);
+
+ #
+ # Firmware 46, type: STD FW MTS LCD (0x00001004), id: NTSC PAL/M PAL/N (000000000000b700), size: 169
+ #
+
+ write_le32(0x00001004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(303896, 169);
+
+ #
+ # Firmware 47, type: STD FW MTS LCD NOGD (0x00003004), id: NTSC PAL/M PAL/N (000000000000b700), size: 169
+ #
+
+ write_le32(0x00003004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le32(169); # Size
+ write_hunk_fix_endian(304072, 169);
+
+ #
+ # Firmware 48, type: SCODE FW HAS IF (0x60000000), IF = 3.28 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3280); # IF
+ write_le32(192); # Size
+ write_hunk(309048, 192);
+
+ #
+ # Firmware 49, type: SCODE FW HAS IF (0x60000000), IF = 3.30 MHz id: (0000000000000000), size: 192
+ #
+
+# write_le32(0x60000000); # Type
+# write_le64(0x00000000, 0x00000000); # ID
+# write_le16(3300); # IF
+# write_le32(192); # Size
+# write_hunk(304440, 192);
+
+ #
+ # Firmware 50, type: SCODE FW HAS IF (0x60000000), IF = 3.44 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3440); # IF
+ write_le32(192); # Size
+ write_hunk(309432, 192);
+
+ #
+ # Firmware 51, type: SCODE FW HAS IF (0x60000000), IF = 3.46 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3460); # IF
+ write_le32(192); # Size
+ write_hunk(309624, 192);
+
+ #
+ # Firmware 52, type: SCODE FW DTV6 ATSC OREN36 HAS IF (0x60210020), IF = 3.80 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60210020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(3800); # IF
+ write_le32(192); # Size
+ write_hunk(306936, 192);
+
+ #
+ # Firmware 53, type: SCODE FW HAS IF (0x60000000), IF = 4.00 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4000); # IF
+ write_le32(192); # Size
+ write_hunk(309240, 192);
+
+ #
+ # Firmware 54, type: SCODE FW DTV6 ATSC TOYOTA388 HAS IF (0x60410020), IF = 4.08 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60410020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4080); # IF
+ write_le32(192); # Size
+ write_hunk(307128, 192);
+
+ #
+ # Firmware 55, type: SCODE FW HAS IF (0x60000000), IF = 4.20 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4200); # IF
+ write_le32(192); # Size
+ write_hunk(308856, 192);
+
+ #
+ # Firmware 56, type: SCODE FW MONO HAS IF (0x60008000), IF = 4.32 MHz id: NTSC/M Kr (0000000000008000), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le16(4320); # IF
+ write_le32(192); # Size
+ write_hunk(305208, 192);
+
+ #
+ # Firmware 57, type: SCODE FW HAS IF (0x60000000), IF = 4.45 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4450); # IF
+ write_le32(192); # Size
+ write_hunk(309816, 192);
+
+ #
+ # Firmware 58, type: SCODE FW MTS LCD NOGD MONO IF HAS IF (0x6002b004), IF = 4.50 MHz id: NTSC PAL/M PAL/N (000000000000b700), size: 192
+ #
+
+ write_le32(0x6002b004); # Type
+ write_le64(0x00000000, 0x0000b700); # ID
+ write_le16(4500); # IF
+ write_le32(192); # Size
+ write_hunk(304824, 192);
+
+ #
+ # Firmware 59, type: SCODE FW LCD NOGD IF HAS IF (0x60023000), IF = 4.60 MHz id: NTSC/M Kr (0000000000008000), size: 192
+ #
+
+ write_le32(0x60023000); # Type
+ write_le64(0x00000000, 0x00008000); # ID
+ write_le16(4600); # IF
+ write_le32(192); # Size
+ write_hunk(305016, 192);
+
+ #
+ # Firmware 60, type: SCODE FW DTV6 QAM DTV7 DTV78 DTV8 ZARLINK456 HAS IF (0x620003e0), IF = 4.76 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x620003e0); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4760); # IF
+ write_le32(192); # Size
+ write_hunk(304440, 192);
+
+ #
+ # Firmware 61, type: SCODE FW HAS IF (0x60000000), IF = 4.94 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(4940); # IF
+ write_le32(192); # Size
+ write_hunk(308664, 192);
+
+ #
+ # Firmware 62, type: SCODE FW HAS IF (0x60000000), IF = 5.26 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5260); # IF
+ write_le32(192); # Size
+ write_hunk(307704, 192);
+
+ #
+ # Firmware 63, type: SCODE FW MONO HAS IF (0x60008000), IF = 5.32 MHz id: PAL/BG A2 NICAM (0000000f00000007), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x0000000f, 0x00000007); # ID
+ write_le16(5320); # IF
+ write_le32(192); # Size
+ write_hunk(307896, 192);
+
+ #
+ # Firmware 64, type: SCODE FW DTV7 DTV78 DTV8 DIBCOM52 CHINA HAS IF (0x65000380), IF = 5.40 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x65000380); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5400); # IF
+ write_le32(192); # Size
+ write_hunk(304248, 192);
+
+ #
+ # Firmware 65, type: SCODE FW DTV6 ATSC OREN538 HAS IF (0x60110020), IF = 5.58 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60110020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5580); # IF
+ write_le32(192); # Size
+ write_hunk(306744, 192);
+
+ #
+ # Firmware 66, type: SCODE FW HAS IF (0x60000000), IF = 5.64 MHz id: PAL/BG A2 (0000000300000007), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000003, 0x00000007); # ID
+ write_le16(5640); # IF
+ write_le32(192); # Size
+ write_hunk(305592, 192);
+
+ #
+ # Firmware 67, type: SCODE FW HAS IF (0x60000000), IF = 5.74 MHz id: PAL/BG NICAM (0000000c00000007), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x0000000c, 0x00000007); # ID
+ write_le16(5740); # IF
+ write_le32(192); # Size
+ write_hunk(305784, 192);
+
+ #
+ # Firmware 68, type: SCODE FW HAS IF (0x60000000), IF = 5.90 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(5900); # IF
+ write_le32(192); # Size
+ write_hunk(307512, 192);
+
+ #
+ # Firmware 69, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.00 MHz id: PAL/DK PAL/I SECAM/K3 SECAM/L SECAM/Lc NICAM (0000000c04c000f0), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x0000000c, 0x04c000f0); # ID
+ write_le16(6000); # IF
+ write_le32(192); # Size
+ write_hunk(305576, 192);
+
+ #
+ # Firmware 70, type: SCODE FW DTV6 QAM ATSC LG60 F6MHZ HAS IF (0x68050060), IF = 6.20 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x68050060); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(6200); # IF
+ write_le32(192); # Size
+ write_hunk(306552, 192);
+
+ #
+ # Firmware 71, type: SCODE FW HAS IF (0x60000000), IF = 6.24 MHz id: PAL/I (0000000000000010), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00000010); # ID
+ write_le16(6240); # IF
+ write_le32(192); # Size
+ write_hunk(305400, 192);
+
+ #
+ # Firmware 72, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.32 MHz id: SECAM/K1 (0000000000200000), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le16(6320); # IF
+ write_le32(192); # Size
+ write_hunk(308472, 192);
+
+ #
+ # Firmware 73, type: SCODE FW HAS IF (0x60000000), IF = 6.34 MHz id: SECAM/K1 (0000000000200000), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000000, 0x00200000); # ID
+ write_le16(6340); # IF
+ write_le32(192); # Size
+ write_hunk(306360, 192);
+
+ #
+ # Firmware 74, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.50 MHz id: PAL/DK SECAM/K3 SECAM/L NICAM (0000000c044000e0), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x0000000c, 0x044000e0); # ID
+ write_le16(6500); # IF
+ write_le32(192); # Size
+ write_hunk(308280, 192);
+
+ #
+ # Firmware 75, type: SCODE FW DTV6 ATSC ATI638 HAS IF (0x60090020), IF = 6.58 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60090020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(6580); # IF
+ write_le32(192); # Size
+ write_hunk(304632, 192);
+
+ #
+ # Firmware 76, type: SCODE FW HAS IF (0x60000000), IF = 6.60 MHz id: PAL/DK A2 (00000003000000e0), size: 192
+ #
+
+ write_le32(0x60000000); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le16(6600); # IF
+ write_le32(192); # Size
+ write_hunk(306168, 192);
+
+ #
+ # Firmware 77, type: SCODE FW MONO HAS IF (0x60008000), IF = 6.68 MHz id: PAL/DK A2 (00000003000000e0), size: 192
+ #
+
+ write_le32(0x60008000); # Type
+ write_le64(0x00000003, 0x000000e0); # ID
+ write_le16(6680); # IF
+ write_le32(192); # Size
+ write_hunk(308088, 192);
+
+ #
+ # Firmware 78, type: SCODE FW DTV6 ATSC TOYOTA794 HAS IF (0x60810020), IF = 8.14 MHz id: (0000000000000000), size: 192
+ #
+
+ write_le32(0x60810020); # Type
+ write_le64(0x00000000, 0x00000000); # ID
+ write_le16(8140); # IF
+ write_le32(192); # Size
+ write_hunk(307320, 192);
+
+ #
+ # Firmware 79, type: SCODE FW HAS IF (0x60000000), IF = 8.20 MHz id: (0000000000000000), size: 192
+ #
+
+# write_le32(0x60000000); # Type
+# write_le64(0x00000000, 0x00000000); # ID
+# write_le16(8200); # IF
+# write_le32(192); # Size
+# write_hunk(308088, 192);
}
+sub main_firmware_27($$$$)
+{
+ my $out;
+ my $j=0;
+ my $outfile = shift;
+ my $name = shift;
+ my $version = shift;
+ my $nr_desc = shift;
+
+ for ($j = length($name); $j <32; $j++) {
+ $name = $name.chr(0);
+ }
+
open OUTFILE, ">$outfile";
syswrite(OUTFILE, $name);
write_le16($version);
@@ -906,20 +1678,39 @@ sub main_firmware($$$$)
write_hunk(812856, 192);
}
+
sub extract_firmware {
- my $sourcefile = "hcw85bda.sys";
- my $hash = "0e44dbf63bb0169d57446aec21881ff2";
- my $outfile = "xc3028-v27.fw";
- my $name = "xc2028 firmware";
- my $version = 519;
- my $nr_desc = 80;
+ my $sourcefile_24 = "UDXTTM6000.sys";
+ my $hash_24 = "cb9deb5508a5e150af2880f5b0066d78";
+ my $outfile_24 = "xc3028-v24.fw";
+ my $name_24 = "xc2028 firmware";
+ my $version_24 = 516;
+ my $nr_desc_24 = 77;
+ my $out;
+
+ my $sourcefile_27 = "hcw85bda.sys";
+ my $hash_27 = "0e44dbf63bb0169d57446aec21881ff2";
+ my $outfile_27 = "xc3028-v27.fw";
+ my $name_27 = "xc2028 firmware";
+ my $version_27 = 519;
+ my $nr_desc_27 = 80;
my $out;
- verify($sourcefile, $hash);
+ if (-e $sourcefile_24) {
+ verify($sourcefile_24, $hash_24);
+
+ open INFILE, "<$sourcefile_24";
+ main_firmware_24($outfile_24, $name_24, $version_24, $nr_desc_24);
+ close INFILE;
+ }
- open INFILE, "<$sourcefile";
- main_firmware($outfile, $name, $version, $nr_desc);
- close INFILE;
+ if (-e $sourcefile_27) {
+ verify($sourcefile_27, $hash_27);
+
+ open INFILE, "<$sourcefile_27";
+ main_firmware_27($outfile_27, $name_27, $version_27, $nr_desc_27);
+ close INFILE;
+ }
}
extract_firmware;
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 181b9e6fd984..8f3f5d33327c 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -50,6 +50,8 @@ zc3xx 0458:700f Genius VideoCam Web V2
sonixj 0458:7025 Genius Eye 311Q
sn9c20x 0458:7029 Genius Look 320s
sonixj 0458:702e Genius Slim 310 NB
+sn9c20x 0458:704a Genius Slim 1320
+sn9c20x 0458:704c Genius i-Look 1321
sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
sonixj 045e:00f5 MicroSoft VX3000
sonixj 045e:00f7 MicroSoft VX1000
@@ -305,12 +307,14 @@ sonixj 0c45:6138 Sn9c120 Mo4000
sonixj 0c45:613a Microdia Sonix PC Camera
sonixj 0c45:613b Surfer SN-206
sonixj 0c45:613c Sonix Pccam168
+sonixj 0c45:6142 Hama PC-Webcam AC-150
sonixj 0c45:6143 Sonix Pccam168
sonixj 0c45:6148 Digitus DA-70811/ZSMC USB PC Camera ZS211/Microdia
sonixj 0c45:614a Frontech E-Ccam (JIL-2225)
sn9c20x 0c45:6240 PC Camera (SN9C201 + MT9M001)
sn9c20x 0c45:6242 PC Camera (SN9C201 + MT9M111)
sn9c20x 0c45:6248 PC Camera (SN9C201 + OV9655)
+sn9c20x 0c45:624c PC Camera (SN9C201 + MT9M112)
sn9c20x 0c45:624e PC Camera (SN9C201 + SOI968)
sn9c20x 0c45:624f PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6251 PC Camera (SN9C201 + OV9650)
@@ -323,6 +327,7 @@ sn9c20x 0c45:627f PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6280 PC Camera (SN9C202 + MT9M001)
sn9c20x 0c45:6282 PC Camera (SN9C202 + MT9M111)
sn9c20x 0c45:6288 PC Camera (SN9C202 + OV9655)
+sn9c20x 0c45:628c PC Camera (SN9C201 + MT9M112)
sn9c20x 0c45:628e PC Camera (SN9C202 + SOI968)
sn9c20x 0c45:628f PC Camera (SN9C202 + OV9650)
sn9c20x 0c45:62a0 PC Camera (SN9C202 + OV7670)
diff --git a/Documentation/video4linux/sh_mobile_ceu_camera.txt b/Documentation/video4linux/sh_mobile_ceu_camera.txt
index 2ae16349a78d..cb47e723af74 100644
--- a/Documentation/video4linux/sh_mobile_ceu_camera.txt
+++ b/Documentation/video4linux/sh_mobile_ceu_camera.txt
@@ -17,18 +17,18 @@ Generic scaling / cropping scheme
-2-- -\
| --\
| --\
-+-5-- -\ -- -3--
-| ---\
-| --- -4-- -\
-| -\
-| - -6--
++-5-- . -- -3-- -\
+| `... -\
+| `... -4-- . - -7..
+| `.
+| `. .6--
|
-| - -6'-
-| -/
-| --- -4'- -/
-| ---/
-+-5'- -/
-| -- -3'-
+| . .6'-
+| .´
+| ... -4'- .´
+| ...´ - -7'.
++-5'- .´ -/
+| -- -3'- -/
| --/
| --/
-2'- -/
@@ -36,7 +36,11 @@ Generic scaling / cropping scheme
|
-1'-
-Produced by user requests:
+In the above chart minuses and slashes represent "real" data amounts, points and
+accents represent "useful" data, basically, CEU scaled amd cropped output,
+mapped back onto the client's source plane.
+
+Such a configuration can be produced by user requests:
S_CROP(left / top = (5) - (1), width / height = (5') - (5))
S_FMT(width / height = (6') - (6))
@@ -106,52 +110,30 @@ window:
S_CROP
------
-If old scale applied to new crop is invalid produce nearest new scale possible
-
-1. Calculate current combined scales.
-
- scale_comb = (((4') - (4)) / ((6') - (6))) * (((2') - (2)) / ((3') - (3)))
-
-2. Apply iterative sensor S_CROP for new input window.
-
-3. If old combined scales applied to new crop produce an impossible user window,
-adjust scales to produce nearest possible window.
-
- width_u_out = ((5') - (5)) / scale_comb
+The API at http://v4l2spec.bytesex.org/spec/x1904.htm says:
- if (width_u_out > max)
- scale_comb = ((5') - (5)) / max;
- else if (width_u_out < min)
- scale_comb = ((5') - (5)) / min;
+"...specification does not define an origin or units. However by convention
+drivers should horizontally count unscaled samples relative to 0H."
-4. Issue G_CROP to retrieve actual input window.
+We choose to follow the advise and interpret cropping units as client input
+pixels.
-5. Using actual input window and calculated combined scales calculate sensor
-target output window.
-
- width_s_out = ((3') - (3)) = ((2') - (2)) / scale_comb
-
-6. Apply iterative S_FMT for new sensor target output window.
-
-7. Issue G_FMT to retrieve the actual sensor output window.
-
-8. Calculate sensor scales.
-
- scale_s = ((3') - (3)) / ((2') - (2))
+Cropping is performed in the following 6 steps:
-9. Calculate sensor output subwindow to be cropped on CEU by applying sensor
-scales to the requested window.
+1. Request exactly user rectangle from the sensor.
- width_ceu = ((5') - (5)) / scale_s
+2. If smaller - iterate until a larger one is obtained. Result: sensor cropped
+ to 2 : 2', target crop 5 : 5', current output format 6' - 6.
-10. Use CEU cropping for above calculated window.
+3. In the previous step the sensor has tried to preserve its output frame as
+ good as possible, but it could have changed. Retrieve it again.
-11. Calculate CEU scales from sensor scales from results of (10) and user window
-from (3)
+4. Sensor scaled to 3 : 3'. Sensor's scale is (2' - 2) / (3' - 3). Calculate
+ intermediate window: 4' - 4 = (5' - 5) * (3' - 3) / (2' - 2)
- scale_ceu = calc_scale(((5') - (5)), &width_u_out)
+5. Calculate and apply host scale = (6' - 6) / (4' - 4)
-12. Apply CEU scales.
+6. Calculate and apply host crop: 6 - 7 = (5 - 2) * (6' - 6) / (5' - 5)
--
Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index 5155700c206b..e831aaca66f8 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -545,12 +545,11 @@ unregister them:
This will remove the device nodes from sysfs (causing udev to remove them
from /dev).
-After video_unregister_device() returns no new opens can be done.
-
-However, in the case of USB devices some application might still have one
-of these device nodes open. You should block all new accesses to read,
-write, poll, etc. except possibly for certain ioctl operations like
-queueing buffers.
+After video_unregister_device() returns no new opens can be done. However,
+in the case of USB devices some application might still have one of these
+device nodes open. So after the unregister all file operations will return
+an error as well, except for the ioctl and unlocked_ioctl file operations:
+those will still be passed on since some buffer ioctls may still be needed.
When the last user of the video device node exits, then the vdev->release()
callback is called and you can do the final cleanup there.
@@ -609,3 +608,135 @@ scatter/gather method (videobuf-dma-sg), DMA with linear access
Please see Documentation/video4linux/videobuf for more information on how
to use the videobuf layer.
+
+struct v4l2_fh
+--------------
+
+struct v4l2_fh provides a way to easily keep file handle specific data
+that is used by the V4L2 framework. Using v4l2_fh is optional for
+drivers.
+
+The users of v4l2_fh (in the V4L2 framework, not the driver) know
+whether a driver uses v4l2_fh as its file->private_data pointer by
+testing the V4L2_FL_USES_V4L2_FH bit in video_device->flags.
+
+Useful functions:
+
+- v4l2_fh_init()
+
+ Initialise the file handle. This *MUST* be performed in the driver's
+ v4l2_file_operations->open() handler.
+
+- v4l2_fh_add()
+
+ Add a v4l2_fh to video_device file handle list. May be called after
+ initialising the file handle.
+
+- v4l2_fh_del()
+
+ Unassociate the file handle from video_device(). The file handle
+ exit function may now be called.
+
+- v4l2_fh_exit()
+
+ Uninitialise the file handle. After uninitialisation the v4l2_fh
+ memory can be freed.
+
+struct v4l2_fh is allocated as a part of the driver's own file handle
+structure and is set to file->private_data in the driver's open
+function by the driver. Drivers can extract their own file handle
+structure by using the container_of macro. Example:
+
+struct my_fh {
+ int blah;
+ struct v4l2_fh fh;
+};
+
+...
+
+int my_open(struct file *file)
+{
+ struct my_fh *my_fh;
+ struct video_device *vfd;
+ int ret;
+
+ ...
+
+ ret = v4l2_fh_init(&my_fh->fh, vfd);
+ if (ret)
+ return ret;
+
+ v4l2_fh_add(&my_fh->fh);
+
+ file->private_data = &my_fh->fh;
+
+ ...
+}
+
+int my_release(struct file *file)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct my_fh *my_fh = container_of(fh, struct my_fh, fh);
+
+ ...
+}
+
+V4L2 events
+-----------
+
+The V4L2 events provide a generic way to pass events to user space.
+The driver must use v4l2_fh to be able to support V4L2 events.
+
+Useful functions:
+
+- v4l2_event_alloc()
+
+ To use events, the driver must allocate events for the file handle. By
+ calling the function more than once, the driver may assure that at least n
+ events in total have been allocated. The function may not be called in
+ atomic context.
+
+- v4l2_event_queue()
+
+ Queue events to video device. The driver's only responsibility is to fill
+ in the type and the data fields. The other fields will be filled in by
+ V4L2.
+
+- v4l2_event_subscribe()
+
+ The video_device->ioctl_ops->vidioc_subscribe_event must check the driver
+ is able to produce events with specified event id. Then it calls
+ v4l2_event_subscribe() to subscribe the event.
+
+- v4l2_event_unsubscribe()
+
+ vidioc_unsubscribe_event in struct v4l2_ioctl_ops. A driver may use
+ v4l2_event_unsubscribe() directly unless it wants to be involved in
+ unsubscription process.
+
+ The special type V4L2_EVENT_ALL may be used to unsubscribe all events. The
+ drivers may want to handle this in a special way.
+
+- v4l2_event_pending()
+
+ Returns the number of pending events. Useful when implementing poll.
+
+Drivers do not initialise events directly. The events are initialised
+through v4l2_fh_init() if video_device->ioctl_ops->vidioc_subscribe_event is
+non-NULL. This *MUST* be performed in the driver's
+v4l2_file_operations->open() handler.
+
+Events are delivered to user space through the poll system call. The driver
+can use v4l2_fh->events->wait wait_queue_head_t as the argument for
+poll_wait().
+
+There are standard and private events. New standard events must use the
+smallest available event type. The drivers must allocate their events from
+their own class starting from class base. Class base is
+V4L2_EVENT_PRIVATE_START + n * 1000 where n is the lowest available number.
+The first event type in the class is reserved for future use, so the first
+available event type is 'class base + 1'.
+
+An example on how the V4L2 events may be used can be found in the OMAP
+3 ISP driver available at <URL:http://gitorious.org/omap3camera> as of
+writing this.
diff --git a/Documentation/vm/map_hugetlb.c b/Documentation/vm/map_hugetlb.c
index 9969c7d9f985..eda1a6d3578a 100644
--- a/Documentation/vm/map_hugetlb.c
+++ b/Documentation/vm/map_hugetlb.c
@@ -19,7 +19,7 @@
#define PROTECTION (PROT_READ | PROT_WRITE)
#ifndef MAP_HUGETLB
-#define MAP_HUGETLB 0x40
+#define MAP_HUGETLB 0x40000 /* arch specific */
#endif
/* Only ia64 requires this */
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index e93ad9425e2a..a200a386429d 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -1,41 +1,149 @@
Started Nov 1999 by Kanoj Sarcar <kanoj@sgi.com>
-The intent of this file is to have an uptodate, running commentary
-from different people about NUMA specific code in the Linux vm.
-
-What is NUMA? It is an architecture where the memory access times
-for different regions of memory from a given processor varies
-according to the "distance" of the memory region from the processor.
-Each region of memory to which access times are the same from any
-cpu, is called a node. On such architectures, it is beneficial if
-the kernel tries to minimize inter node communications. Schemes
-for this range from kernel text and read-only data replication
-across nodes, and trying to house all the data structures that
-key components of the kernel need on memory on that node.
-
-Currently, all the numa support is to provide efficient handling
-of widely discontiguous physical memory, so architectures which
-are not NUMA but can have huge holes in the physical address space
-can use the same code. All this code is bracketed by CONFIG_DISCONTIGMEM.
-
-The initial port includes NUMAizing the bootmem allocator code by
-encapsulating all the pieces of information into a bootmem_data_t
-structure. Node specific calls have been added to the allocator.
-In theory, any platform which uses the bootmem allocator should
-be able to put the bootmem and mem_map data structures anywhere
-it deems best.
-
-Each node's page allocation data structures have also been encapsulated
-into a pg_data_t. The bootmem_data_t is just one part of this. To
-make the code look uniform between NUMA and regular UMA platforms,
-UMA platforms have a statically allocated pg_data_t too (contig_page_data).
-For the sake of uniformity, the function num_online_nodes() is also defined
-for all platforms. As we run benchmarks, we might decide to NUMAize
-more variables like low_on_memory, nr_free_pages etc into the pg_data_t.
-
-The NUMA aware page allocation code currently tries to allocate pages
-from different nodes in a round robin manner. This will be changed to
-do concentratic circle search, starting from current node, once the
-NUMA port achieves more maturity. The call alloc_pages_node has been
-added, so that drivers can make the call and not worry about whether
-it is running on a NUMA or UMA platform.
+What is NUMA?
+
+This question can be answered from a couple of perspectives: the
+hardware view and the Linux software view.
+
+From the hardware perspective, a NUMA system is a computer platform that
+comprises multiple components or assemblies each of which may contain 0
+or more CPUs, local memory, and/or IO buses. For brevity and to
+disambiguate the hardware view of these physical components/assemblies
+from the software abstraction thereof, we'll call the components/assemblies
+'cells' in this document.
+
+Each of the 'cells' may be viewed as an SMP [symmetric multi-processor] subset
+of the system--although some components necessary for a stand-alone SMP system
+may not be populated on any given cell. The cells of the NUMA system are
+connected together with some sort of system interconnect--e.g., a crossbar or
+point-to-point link are common types of NUMA system interconnects. Both of
+these types of interconnects can be aggregated to create NUMA platforms with
+cells at multiple distances from other cells.
+
+For Linux, the NUMA platforms of interest are primarily what is known as Cache
+Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible
+to and accessible from any CPU attached to any cell and cache coherency
+is handled in hardware by the processor caches and/or the system interconnect.
+
+Memory access time and effective memory bandwidth varies depending on how far
+away the cell containing the CPU or IO bus making the memory access is from the
+cell containing the target memory. For example, access to memory by CPUs
+attached to the same cell will experience faster access times and higher
+bandwidths than accesses to memory on other, remote cells. NUMA platforms
+can have cells at multiple remote distances from any given cell.
+
+Platform vendors don't build NUMA systems just to make software developers'
+lives interesting. Rather, this architecture is a means to provide scalable
+memory bandwidth. However, to achieve scalable memory bandwidth, system and
+application software must arrange for a large majority of the memory references
+[cache misses] to be to "local" memory--memory on the same cell, if any--or
+to the closest cell with memory.
+
+This leads to the Linux software view of a NUMA system:
+
+Linux divides the system's hardware resources into multiple software
+abstractions called "nodes". Linux maps the nodes onto the physical cells
+of the hardware platform, abstracting away some of the details for some
+architectures. As with physical cells, software nodes may contain 0 or more
+CPUs, memory and/or IO buses. And, again, memory accesses to memory on
+"closer" nodes--nodes that map to closer cells--will generally experience
+faster access times and higher effective bandwidth than accesses to more
+remote cells.
+
+For some architectures, such as x86, Linux will "hide" any node representing a
+physical cell that has no memory attached, and reassign any CPUs attached to
+that cell to a node representing a cell that does have memory. Thus, on
+these architectures, one cannot assume that all CPUs that Linux associates with
+a given node will see the same local memory access times and bandwidth.
+
+In addition, for some architectures, again x86 is an example, Linux supports
+the emulation of additional nodes. For NUMA emulation, linux will carve up
+the existing nodes--or the system memory for non-NUMA platforms--into multiple
+nodes. Each emulated node will manage a fraction of the underlying cells'
+physical memory. NUMA emluation is useful for testing NUMA kernel and
+application features on non-NUMA platforms, and as a sort of memory resource
+management mechanism when used together with cpusets.
+[see Documentation/cgroups/cpusets.txt]
+
+For each node with memory, Linux constructs an independent memory management
+subsystem, complete with its own free page lists, in-use page lists, usage
+statistics and locks to mediate access. In addition, Linux constructs for
+each memory zone [one or more of DMA, DMA32, NORMAL, HIGH_MEMORY, MOVABLE],
+an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a
+selected zone/node cannot satisfy the allocation request. This situation,
+when a zone has no available memory to satisfy a request, is called
+"overflow" or "fallback".
+
+Because some nodes contain multiple zones containing different types of
+memory, Linux must decide whether to order the zonelists such that allocations
+fall back to the same zone type on a different node, or to a different zone
+type on the same node. This is an important consideration because some zones,
+such as DMA or DMA32, represent relatively scarce resources. Linux chooses
+a default zonelist order based on the sizes of the various zone types relative
+to the total memory of the node and the total memory of the system. The
+default zonelist order may be overridden using the numa_zonelist_order kernel
+boot parameter or sysctl. [see Documentation/kernel-parameters.txt and
+Documentation/sysctl/vm.txt]
+
+By default, Linux will attempt to satisfy memory allocation requests from the
+node to which the CPU that executes the request is assigned. Specifically,
+Linux will attempt to allocate from the first node in the appropriate zonelist
+for the node where the request originates. This is called "local allocation."
+If the "local" node cannot satisfy the request, the kernel will examine other
+nodes' zones in the selected zonelist looking for the first zone in the list
+that can satisfy the request.
+
+Local allocation will tend to keep subsequent access to the allocated memory
+"local" to the underlying physical resources and off the system interconnect--
+as long as the task on whose behalf the kernel allocated some memory does not
+later migrate away from that memory. The Linux scheduler is aware of the
+NUMA topology of the platform--embodied in the "scheduling domains" data
+structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler
+attempts to minimize task migration to distant scheduling domains. However,
+the scheduler does not take a task's NUMA footprint into account directly.
+Thus, under sufficient imbalance, tasks can migrate between nodes, remote
+from their initial node and kernel data structures.
+
+System administrators and application designers can restrict a task's migration
+to improve NUMA locality using various CPU affinity command line interfaces,
+such as taskset(1) and numactl(1), and program interfaces such as
+sched_setaffinity(2). Further, one can modify the kernel's default local
+allocation behavior using Linux NUMA memory policy.
+[see Documentation/vm/numa_memory_policy.]
+
+System administrators can restrict the CPUs and nodes' memories that a non-
+privileged user can specify in the scheduling or NUMA commands and functions
+using control groups and CPUsets. [see Documentation/cgroups/CPUsets.txt]
+
+On architectures that do not hide memoryless nodes, Linux will include only
+zones [nodes] with memory in the zonelists. This means that for a memoryless
+node the "local memory node"--the node of the first zone in CPU's node's
+zonelist--will not be the node itself. Rather, it will be the node that the
+kernel selected as the nearest node with memory when it built the zonelists.
+So, default, local allocations will succeed with the kernel supplying the
+closest available memory. This is a consequence of the same mechanism that
+allows such allocations to fallback to other nearby nodes when a node that
+does contain memory overflows.
+
+Some kernel allocations do not want or cannot tolerate this allocation fallback
+behavior. Rather they want to be sure they get memory from the specified node
+or get notified that the node has no free memory. This is usually the case when
+a subsystem allocates per CPU memory resources, for example.
+
+A typical model for making such an allocation is to obtain the node id of the
+node to which the "current CPU" is attached using one of the kernel's
+numa_node_id() or CPU_to_node() functions and then request memory from only
+the node id returned. When such an allocation fails, the requesting subsystem
+may revert to its own fallback path. The slab kernel memory allocator is an
+example of this. Or, the subsystem may choose to disable or not to enable
+itself on allocation failure. The kernel profiling subsystem is an example of
+this.
+
+If the architecture supports--does not hide--memoryless nodes, then CPUs
+attached to memoryless nodes would always incur the fallback path overhead
+or some subsystems would fail to initialize if they attempted to allocated
+memory exclusively from a node without memory. To support such
+architectures transparently, kernel subsystems can use the numa_mem_id()
+or cpu_to_mem() function to locate the "local memory node" for the calling or
+specified CPU. Again, this is the same node from which default, local page
+allocations will be attempted.
diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt
index be45dbb9d7f2..6690fc34ef6d 100644
--- a/Documentation/vm/numa_memory_policy.txt
+++ b/Documentation/vm/numa_memory_policy.txt
@@ -45,7 +45,7 @@ most general to most specific:
to establish the task policy for a child task exec()'d from an
executable image that has no awareness of memory policy. See the
MEMORY POLICY APIS section, below, for an overview of the system call
- that a task may use to set/change it's task/process policy.
+ that a task may use to set/change its task/process policy.
In a multi-threaded task, task policies apply only to the thread
[Linux kernel task] that installs the policy and any threads
@@ -301,7 +301,7 @@ decrement this reference count, respectively. mpol_put() will only free
the structure back to the mempolicy kmem cache when the reference count
goes to zero.
-When a new memory policy is allocated, it's reference count is initialized
+When a new memory policy is allocated, its reference count is initialized
to '1', representing the reference held by the task that is installing the
new policy. When a pointer to a memory policy structure is stored in another
structure, another reference is added, as the task's reference will be dropped
diff --git a/Documentation/w1/w1.generic b/Documentation/w1/w1.generic
index e3333eec4320..212f4ac31c01 100644
--- a/Documentation/w1/w1.generic
+++ b/Documentation/w1/w1.generic
@@ -25,7 +25,7 @@ When a w1 master driver registers with the w1 subsystem, the following occurs:
- sysfs entries for that w1 master are created
- the w1 bus is periodically searched for new slave devices
-When a device is found on the bus, w1 core checks if driver for it's family is
+When a device is found on the bus, w1 core checks if driver for its family is
loaded. If so, the family driver is attached to the slave.
If there is no driver for the family, default one is assigned, which allows to perform
almost any kind of operations. Each logical operation is a transaction
diff --git a/Documentation/watchdog/00-INDEX b/Documentation/watchdog/00-INDEX
index c3ea47e507fe..ee994513a9b1 100644
--- a/Documentation/watchdog/00-INDEX
+++ b/Documentation/watchdog/00-INDEX
@@ -1,10 +1,15 @@
00-INDEX
- this file.
+hpwdt.txt
+ - information on the HP iLO2 NMI watchdog
pcwd-watchdog.txt
- documentation for Berkshire Products PC Watchdog ISA cards.
src/
- directory holding watchdog related example programs.
watchdog-api.txt
- description of the Linux Watchdog driver API.
+watchdog-parameters.txt
+ - information on driver parameters (for drivers other than
+ the ones that have driver-specific files here)
wdt.txt
- description of the Watchdog Timer Interfaces for Linux.
diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
new file mode 100644
index 000000000000..41c95cc1dc1f
--- /dev/null
+++ b/Documentation/watchdog/watchdog-parameters.txt
@@ -0,0 +1,390 @@
+This file provides information on the module parameters of many of
+the Linux watchdog drivers. Watchdog driver parameter specs should
+be listed here unless the driver has its own driver-specific information
+file.
+
+
+See Documentation/kernel-parameters.txt for information on
+providing kernel parameters for builtin drivers versus loadable
+modules.
+
+
+-------------------------------------------------
+acquirewdt:
+wdt_stop: Acquire WDT 'stop' io port (default 0x43)
+wdt_start: Acquire WDT 'start' io port (default 0x443)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+advantechwdt:
+wdt_stop: Advantech WDT 'stop' io port (default 0x443)
+wdt_start: Advantech WDT 'start' io port (default 0x443)
+timeout: Watchdog timeout in seconds. 1<= timeout <=63, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+alim1535_wdt:
+timeout: Watchdog timeout in seconds. (0 < timeout < 18000, default=60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+alim7101_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=30
+use_gpio: Use the gpio watchdog (required by old cobalt boards).
+ default=0/off/no
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ar7_wdt:
+margin: Watchdog margin in seconds (default=60)
+nowayout: Disable watchdog shutdown on close
+ (default=kernel config parameter)
+-------------------------------------------------
+at32ap700x_wdt:
+timeout: Timeout value. Limited to be 1 or 2 seconds. (default=2)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+at91rm9200_wdt:
+wdt_time: Watchdog time in seconds. (default=5)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+at91sam9_wdt:
+heartbeat: Watchdog heartbeats in seconds. (default = 15)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+bcm47xx_wdt:
+wdt_time: Watchdog time in seconds. (default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+bfin_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=((2^32)/SCLK), default=20)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+coh901327_wdt:
+margin: Watchdog margin in seconds (default 60s)
+-------------------------------------------------
+cpu5wdt:
+port: base address of watchdog card, default is 0x91
+verbose: be verbose, default is 0 (no)
+ticks: count down ticks, default is 10000
+-------------------------------------------------
+cpwd:
+wd0_timeout: Default watchdog0 timeout in 1/10secs
+wd1_timeout: Default watchdog1 timeout in 1/10secs
+wd2_timeout: Default watchdog2 timeout in 1/10secs
+-------------------------------------------------
+davinci_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 600, default 60
+-------------------------------------------------
+ep93xx_wdt:
+nowayout: Watchdog cannot be stopped once started
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=TBD)
+-------------------------------------------------
+eurotechwdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+io: Eurotech WDT io port (default=0x3f0)
+irq: Eurotech WDT irq (default=10)
+ev: Eurotech WDT event type (default is `int')
+-------------------------------------------------
+gef_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+geodewdt:
+timeout: Watchdog timeout in seconds. 1<= timeout <=131, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+i6300esb:
+heartbeat: Watchdog heartbeat in seconds. (1<heartbeat<2046, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+iTCO_wdt:
+heartbeat: Watchdog heartbeat in seconds.
+ (2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+iTCO_vendor_support:
+vendorsupport: iTCO vendor specific support mode, default=0 (none),
+ 1=SuperMicro Pent3, 2=SuperMicro Pent4+, 911=Broken SMI BIOS
+-------------------------------------------------
+ib700wdt:
+timeout: Watchdog timeout in seconds. 0<= timeout <=30, default=30.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ibmasr:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+indydog:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+iop_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+it8712f_wdt:
+margin: Watchdog margin in seconds (default 60)
+nowayout: Disable watchdog shutdown on close
+ (default=kernel config parameter)
+-------------------------------------------------
+it87_wdt:
+nogameport: Forbid the activation of game port, default=0
+exclusive: Watchdog exclusive device open, default=1
+timeout: Watchdog timeout in seconds, default=60
+testmode: Watchdog test mode (1 = no reboot), default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ixp2000_wdt:
+heartbeat: Watchdog heartbeat in seconds (default 60s)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ixp4xx_wdt:
+heartbeat: Watchdog heartbeat in seconds (default 60s)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+ks8695_wdt:
+wdt_time: Watchdog time in seconds. (default=5)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+machzwd:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+action: after watchdog resets, generate:
+ 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI
+-------------------------------------------------
+max63xx_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 60, default 60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+nodelay: Force selection of a timeout setting without initial delay
+ (max6373/74 only, default=0)
+-------------------------------------------------
+mixcomwd:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+mpc8xxx_wdt:
+timeout: Watchdog timeout in ticks. (0<timeout<65536, default=65535)
+reset: Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+mpcore_wdt:
+mpcore_margin: MPcore timer margin in seconds.
+ (0 < mpcore_margin < 65536, default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+mpcore_noboot: MPcore watchdog action, set to 1 to ignore reboots,
+ 0 to reboot (default=0
+-------------------------------------------------
+mv64x60_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+nuc900_wdt:
+heartbeat: Watchdog heartbeats in seconds.
+ (default = 15)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+omap_wdt:
+timer_margin: initial watchdog timeout (in seconds)
+-------------------------------------------------
+orion_wdt:
+heartbeat: Initial watchdog heartbeat in seconds
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+pc87413_wdt:
+io: pc87413 WDT I/O port (default: io).
+timeout: Watchdog timeout in minutes (default=timeout).
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+pika_wdt:
+heartbeat: Watchdog heartbeats in seconds. (default = 15)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+pnx4008_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 60, default 19
+nowayout: Set to 1 to keep watchdog running after device release
+-------------------------------------------------
+pnx833x_wdt:
+timeout: Watchdog timeout in Mhz. (68Mhz clock), default=2040000000 (30 seconds)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+start_enabled: Watchdog is started on module insertion (default=1)
+-------------------------------------------------
+rc32434_wdt:
+timeout: Watchdog timeout value, in seconds (default=20)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+riowd:
+riowd_timeout: Watchdog timeout in minutes (default=1)
+-------------------------------------------------
+s3c2410_wdt:
+tmr_margin: Watchdog tmr_margin in seconds. (default=15)
+tmr_atboot: Watchdog is started at boot time if set to 1, default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+soft_noboot: Watchdog action, set to 1 to ignore reboots, 0 to reboot
+debug: Watchdog debug, set to >1 for debug, (default 0)
+-------------------------------------------------
+sa1100_wdt:
+margin: Watchdog margin in seconds (default 60s)
+-------------------------------------------------
+sb_wdog:
+timeout: Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)
+-------------------------------------------------
+sbc60xxwdt:
+wdt_stop: SBC60xx WDT 'stop' io port (default 0x45)
+wdt_start: SBC60xx WDT 'start' io port (default 0x443)
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sbc7240_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=255, default=30)
+nowayout: Disable watchdog when closing device file
+-------------------------------------------------
+sbc8360:
+timeout: Index into timeout table (0-63) (default=27 (60s))
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sbc_epx_c3:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sbc_fitpc2_wdt:
+margin: Watchdog margin in seconds (default 60s)
+nowayout: Watchdog cannot be stopped once started
+-------------------------------------------------
+sc1200wdt:
+isapnp: When set to 0 driver ISA PnP support will be disabled (default=1)
+io: io port
+timeout: range is 0-255 minutes, default is 1
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sc520_wdt:
+timeout: Watchdog timeout in seconds. (1 <= timeout <= 3600, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+sch311x_wdt:
+force_id: Override the detected device ID
+therm_trip: Should a ThermTrip trigger the reset generator
+timeout: Watchdog timeout in seconds. 1<= timeout <=15300, default=60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+scx200_wdt:
+margin: Watchdog margin in seconds
+nowayout: Disable watchdog shutdown on close
+-------------------------------------------------
+shwdt:
+clock_division_ratio: Clock division ratio. Valid ranges are from 0x5 (1.31ms)
+ to 0x7 (5.25ms). (default=7)
+heartbeat: Watchdog heartbeat in seconds. (1 <= heartbeat <= 3600, default=30
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+smsc37b787_wdt:
+timeout: range is 1-255 units, default is 60
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+softdog:
+soft_margin: Watchdog soft_margin in seconds.
+ (0 < soft_margin < 65536, default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+soft_noboot: Softdog action, set to 1 to ignore reboots, 0 to reboot
+ (default=0)
+-------------------------------------------------
+stmp3xxx_wdt:
+heartbeat: Watchdog heartbeat period in seconds from 1 to 4194304, default 19
+-------------------------------------------------
+ts72xx_wdt:
+timeout: Watchdog timeout in seconds. (1 <= timeout <= 8, default=8)
+nowayout: Disable watchdog shutdown on close
+-------------------------------------------------
+twl4030_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+txx9wdt:
+timeout: Watchdog timeout in seconds. (0<timeout<N, default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83627hf_wdt:
+wdt_io: w83627hf/thf WDT io port (default 0x2E)
+timeout: Watchdog timeout in seconds. 1 <= timeout <= 255, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83697hf_wdt:
+wdt_io: w83697hf/hg WDT io port (default 0x2e, 0 = autodetect)
+timeout: Watchdog timeout in seconds. 1<= timeout <=255 (default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+early_disable: Watchdog gets disabled at boot time (default=1)
+-------------------------------------------------
+w83697ug_wdt:
+wdt_io: w83697ug/uf WDT io port (default 0x2e)
+timeout: Watchdog timeout in seconds. 1<= timeout <=255 (default=60)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83877f_wdt:
+timeout: Watchdog timeout in seconds. (1<=timeout<=3600, default=30)
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+w83977f_wdt:
+timeout: Watchdog timeout in seconds (15..7635), default=45)
+testmode: Watchdog testmode (1 = no reboot), default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wafer5823wdt:
+timeout: Watchdog timeout in seconds. 1 <= timeout <= 255, default=60.
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wdt285:
+soft_margin: Watchdog timeout in seconds (default=60)
+-------------------------------------------------
+wdt977:
+timeout: Watchdog timeout in seconds (60..15300, default=60)
+testmode: Watchdog testmode (1 = no reboot), default=0
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wm831x_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
+wm8350_wdt:
+nowayout: Watchdog cannot be stopped once started
+ (default=kernel config parameter)
+-------------------------------------------------
diff --git a/Documentation/watchdog/wdt.txt b/Documentation/watchdog/wdt.txt
index 03fd756d976d..061c2e35384f 100644
--- a/Documentation/watchdog/wdt.txt
+++ b/Documentation/watchdog/wdt.txt
@@ -14,14 +14,22 @@ reboot will depend on the state of the machines and interrupts. The hardware
boards physically pull the machine down off their own onboard timers and
will reboot from almost anything.
-A second temperature monitoring interface is available on the WDT501P cards
+A second temperature monitoring interface is available on the WDT501P cards.
This provides /dev/temperature. This is the machine internal temperature in
degrees Fahrenheit. Each read returns a single byte giving the temperature.
The third interface logs kernel messages on additional alert events.
-The wdt card cannot be safely probed for. Instead you need to pass
-wdt=ioaddr,irq as a boot parameter - eg "wdt=0x240,11".
+The ICS ISA-bus wdt card cannot be safely probed for. Instead you need to
+pass IO address and IRQ boot parameters. E.g.:
+ wdt.io=0x240 wdt.irq=11
+
+Other "wdt" driver parameters are:
+ heartbeat Watchdog heartbeat in seconds (default 60)
+ nowayout Watchdog cannot be stopped once started (kernel
+ build parameter)
+ tachometer WDT501-P Fan Tachometer support (0=disable, default=0)
+ type WDT501-P Card type (500 or 501, default=500)
Features
--------
@@ -40,4 +48,3 @@ Minor numbers are however allocated for it.
Example Watchdog Driver: see Documentation/watchdog/src/watchdog-simple.c
-
diff --git a/MAINTAINERS b/MAINTAINERS
index d5b0b1b6dc52..13608bd2e791 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -131,19 +131,12 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/typhoon*
-3W-9XXX SATA-RAID CONTROLLER DRIVER
-M: Adam Radford <linuxraid@amcc.com>
+3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
+M: Adam Radford <linuxraid@lsi.com>
L: linux-scsi@vger.kernel.org
-W: http://www.amcc.com
+W: http://www.lsi.com
S: Supported
-F: drivers/scsi/3w-9xxx*
-
-3W-XXXX ATA-RAID CONTROLLER DRIVER
-M: Adam Radford <linuxraid@amcc.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.amcc.com
-S: Supported
-F: drivers/scsi/3w-xxxx*
+F: drivers/scsi/3w-*
53C700 AND 53C700-66 SCSI DRIVER
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@@ -586,6 +579,12 @@ F: drivers/mtd/nand/bcm_umi_bch.c
F: drivers/mtd/nand/bcm_umi_hamming.c
F: drivers/mtd/nand/nand_bcm_umi.h
+ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT
+M: Anton Vorontsov <avorontsov@mvista.com>
+S: Maintained
+F: arch/arm/mach-cns3xxx/
+T: git git://git.infradead.org/users/cbou/linux-cns3xxx.git
+
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
M: Hartley Sweeten <hsweeten@visionengravers.com>
M: Ryan Mallon <ryan@bluewatersys.com>
@@ -775,11 +774,10 @@ M: Philipp Zabel <philipp.zabel@gmail.com>
S: Maintained
ARM/Marvell Loki/Kirkwood/MV78xx0/Orion SOC support
-M: Lennert Buytenhek <buytenh@marvell.com>
-M: Nicolas Pitre <nico@marvell.com>
+M: Lennert Buytenhek <kernel@wantstofly.org>
+M: Nicolas Pitre <nico@fluxnic.net>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://git.marvell.com/orion
-S: Maintained
+S: Odd Fixes
F: arch/arm/mach-loki/
F: arch/arm/mach-kirkwood/
F: arch/arm/mach-mv78xx0/
@@ -814,6 +812,7 @@ ARM/QUALCOMM MSM MACHINE SUPPORT
M: David Brown <davidb@codeaurora.org>
M: Daniel Walker <dwalker@codeaurora.org>
M: Bryan Huntsman <bryanh@codeaurora.org>
+L: linux-arm-msm@vger.kernel.org
F: arch/arm/mach-msm/
F: drivers/video/msm/
F: drivers/mmc/host/msm_sdcc.c
@@ -970,6 +969,18 @@ M: Wan ZongShun <mcuos.com@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.mcuos.com
S: Maintained
+F: arch/arm/mach-w90x900/
+F: arch/arm/mach-nuc93x/
+F: drivers/input/keyboard/w90p910_keypad.c
+F: drivers/input/touchscreen/w90p910_ts.c
+F: drivers/watchdog/nuc900_wdt.c
+F: drivers/net/arm/w90p910_ether.c
+F: drivers/mtd/nand/w90p910_nand.c
+F: drivers/rtc/rtc-nuc900.c
+F: drivers/spi/spi_nuc900.c
+F: drivers/usb/host/ehci-w90x900.c
+F: drivers/video/nuc900fb.c
+F: drivers/sound/soc/nuc900/
ARM/U300 MACHINE SUPPORT
M: Linus Walleij <linus.walleij@stericsson.com>
@@ -994,6 +1005,20 @@ W: http://www.arm.linux.org.uk/
S: Maintained
F: arch/arm/vfp/
+ARM/VOIPAC PXA270 SUPPORT
+M: Marek Vasut <marek.vasut@gmail.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-pxa/vpac270.c
+F: arch/arm/mach-pxa/include/mach-pxa/vpac270.h
+
+ARM/ZIPIT Z2 SUPPORT
+M: Marek Vasut <marek.vasut@gmail.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-pxa/z2.c
+F: arch/arm/mach-pxa/include/mach-pxa/z2.h
+
ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
L: lm-sensors@lm-sensors.org
@@ -1501,9 +1526,10 @@ M: Andy Whitcroft <apw@canonical.com>
S: Supported
F: scripts/checkpatch.pl
-CISCO 10G ETHERNET DRIVER
+CISCO VIC ETHERNET NIC DRIVER
M: Scott Feldman <scofeldm@cisco.com>
-M: Joe Eykholt <jeykholt@cisco.com>
+M: Vasanthy Kolluri <vkolluri@cisco.com>
+M: Roopa Prabhu <roprabhu@cisco.com>
S: Supported
F: drivers/net/enic/
@@ -1729,6 +1755,20 @@ W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb3/
+CXGB4 ETHERNET DRIVER (CXGB4)
+M: Dimitris Michailidis <dm@chelsio.com>
+L: netdev@vger.kernel.org
+W: http://www.chelsio.com
+S: Supported
+F: drivers/net/cxgb4/
+
+CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
+M: Steve Wise <swise@chelsio.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.openfabrics.org
+S: Supported
+F: drivers/infiniband/hw/cxgb4/
+
CYBERPRO FB DRIVER
M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2667,16 +2707,12 @@ F: Documentation/timers/hpet.txt
F: drivers/char/hpet.c
F: include/linux/hpet.h
-HPET: i386
-M: "Venkatesh Pallipadi (Venki)" <venkatesh.pallipadi@intel.com>
+HPET: x86
+M: "Venkatesh Pallipadi (Venki)" <venki@google.com>
S: Maintained
F: arch/x86/kernel/hpet.c
F: arch/x86/include/asm/hpet.h
-HPET: x86_64
-M: Vojtech Pavlik <vojtech@suse.cz>
-S: Maintained
-
HPET: ACPI
M: Bob Picco <bob.picco@hp.com>
S: Maintained
@@ -2717,6 +2753,7 @@ M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
L: linux-i2c@vger.kernel.org
W: http://i2c.wiki.kernel.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
+T: git git://git.fluff.org/bjdooks/linux.git
S: Maintained
F: Documentation/i2c/
F: drivers/i2c/
@@ -2850,6 +2887,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
S: Maintained
F: drivers/input/
+INTEL IDLE DRIVER
+M: Len Brown <lenb@kernel.org>
+L: linux-pm@lists.linux-foundation.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git
+S: Supported
+F: drivers/idle/intel_idle.c
+
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
M: Maik Broemme <mbroemme@plusserver.de>
L: linux-fbdev@vger.kernel.org
@@ -2953,6 +2997,17 @@ S: Odd Fixes
F: Documentation/networking/README.ipw2200
F: drivers/net/wireless/ipw2x00/ipw2200.*
+INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
+M: Joseph Cihula <joseph.cihula@intel.com>
+M: Shane Wang <shane.wang@intel.com>
+L: tboot-devel@lists.sourceforge.net
+W: http://tboot.sourceforge.net
+T: Mercurial http://www.bughost.org/repos.hg/tboot.hg
+S: Supported
+F: Documentation/intel_txt.txt
+F: include/linux/tboot.h
+F: arch/x86/kernel/tboot.c
+
INTEL WIRELESS WIMAX CONNECTION 2400
M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
M: linux-wimax@intel.com
@@ -3002,10 +3057,9 @@ F: net/ipv4/netfilter/ipt_MASQUERADE.c
IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
M: Sorbica Shieh <sorbica@icplus.com.tw>
-M: Jesse Huang <jesse@icplus.com.tw>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ipg.c
+F: drivers/net/ipg.*
IPATH DRIVER
M: Ralph Campbell <infinipath@qlogic.com>
@@ -3284,15 +3338,17 @@ F: include/linux/key-type.h
F: include/keys/
F: security/keys/
-KGDB
+KGDB / KDB /debug_core
M: Jason Wessel <jason.wessel@windriver.com>
+W: http://kgdb.wiki.kernel.org/
L: kgdb-bugreport@lists.sourceforge.net
S: Maintained
F: Documentation/DocBook/kgdb.tmpl
F: drivers/misc/kgdbts.c
F: drivers/serial/kgdboc.c
+F: include/linux/kdb.h
F: include/linux/kgdb.h
-F: kernel/kgdb.c
+F: kernel/debug/
KMEMCHECK
M: Vegard Nossum <vegardno@ifi.uio.no>
@@ -3624,7 +3680,8 @@ F: drivers/net/wireless/mwl8k.c
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
-S: Maintained
+S: Odd Fixes
+F: drivers/mmc/host/mvsdio.*
MARVELL YUKON / SYSKONNECT DRIVER
M: Mirko Lindner <mlindner@syskonnect.de>
@@ -3852,7 +3909,6 @@ M: Ramkrishna Vepa <ram.vepa@neterion.com>
M: Rastapur Santosh <santosh.rastapur@neterion.com>
M: Sivakumar Subramani <sivakumar.subramani@neterion.com>
M: Sreenivasa Honnur <sreenivasa.honnur@neterion.com>
-M: Anil Murthy <anil.murthy@neterion.com>
L: netdev@vger.kernel.org
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -3957,6 +4013,7 @@ F: net/rfkill/
F: net/wireless/
F: include/net/ieee80211*
F: include/linux/wireless.h
+F: include/linux/iw_handler.h
F: drivers/net/wireless/
NETWORKING DRIVERS
@@ -4165,6 +4222,7 @@ OPROFILE
M: Robert Richter <robert.richter@amd.com>
L: oprofile-list@lists.sf.net
S: Maintained
+F: arch/*/include/asm/oprofile*.h
F: arch/*/oprofile/
F: drivers/oprofile/
F: include/linux/oprofile.h
@@ -4353,13 +4411,13 @@ M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
M: Arnaldo Carvalho de Melo <acme@redhat.com>
S: Supported
-F: kernel/perf_event.c
+F: kernel/perf_event*.c
F: include/linux/perf_event.h
-F: arch/*/kernel/perf_event.c
-F: arch/*/kernel/*/perf_event.c
-F: arch/*/kernel/*/*/perf_event.c
+F: arch/*/kernel/perf_event*.c
+F: arch/*/kernel/*/perf_event*.c
+F: arch/*/kernel/*/*/perf_event*.c
F: arch/*/include/asm/perf_event.h
-F: arch/*/lib/perf_event.c
+F: arch/*/lib/perf_event*.c
F: arch/*/kernel/perf_callchain.c
F: tools/perf/
@@ -4577,6 +4635,14 @@ S: Supported
F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/
+QLOGIC QLA4XXX iSCSI DRIVER
+M: Ravi Anand <ravi.anand@qlogic.com>
+M: Vikas Chaudhary <vikas.chaudhary@qlogic.com>
+M: iscsi-driver@qlogic.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/qla4xxx/
+
QLOGIC QLA3XXX NETWORK DRIVER
M: Ron Mercer <ron.mercer@qlogic.com>
M: linux-driver@qlogic.com
@@ -4587,6 +4653,7 @@ F: drivers/net/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
M: Amit Kumar Salecha <amit.salecha@qlogic.com>
+M: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
S: Supported
@@ -4716,6 +4783,12 @@ S: Maintained
F: Documentation/rfkill.txt
F: net/rfkill/
+RICOH SMARTMEDIA/XD DRIVER
+M: Maxim Levitsky <maximlevitsky@gmail.com>
+S: Maintained
+F: drivers/mtd/nand/r822.c
+F: drivers/mtd/nand/r822.h
+
RISCOM8 DRIVER
S: Orphan
F: Documentation/serial/riscom8.txt
@@ -4770,6 +4843,9 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: arch/s390/
F: drivers/s390/
+F: fs/partitions/ibm.c
+F: Documentation/s390/
+F: Documentation/DocBook/s390*
S390 NETWORK DRIVERS
M: Ursula Braun <ursula.braun@de.ibm.com>
@@ -4938,6 +5014,12 @@ L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-s3c.c
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
+M: Viresh Kumar <viresh.kumar@st.com>
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/sdhci-spear.c
+
SECURITY SUBSYSTEM
M: James Morris <jmorris@namei.org>
L: linux-security-module@vger.kernel.org (suggested Cc:)
@@ -5121,7 +5203,7 @@ F: mm/sl?b.c
SMC91x ETHERNET DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
-S: Maintained
+S: Odd Fixes
F: drivers/net/smc91x.*
SMSC47B397 HARDWARE MONITOR DRIVER
@@ -5252,6 +5334,46 @@ F: drivers/serial/sunsu.c
F: drivers/serial/sunzilog.c
F: drivers/serial/sunzilog.h
+SPEAR PLATFORM SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/plat-spear/
+
+SPEAR3XX MACHINE SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear3xx/
+
+SPEAR6XX MACHINE SUPPORT
+M: Rajeev Kumar <rajeev-dlh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear6xx/
+
+SPEAR CLOCK FRAMEWORK SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear*/clock.c
+F: arch/arm/mach-spear*/include/mach/clkdev.h
+F: arch/arm/plat-spear/clock.c
+F: arch/arm/plat-spear/include/plat/clock.h and clkdev.h
+
+SPEAR PAD MULTIPLEXING SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/plat-spear/include/plat/padmux.h
+F: arch/arm/plat-spear/padmux.c
+F: arch/arm/mach-spear*/spear*xx.c
+F: arch/arm/mach-spear*/include/mach/generic.h
+F: arch/arm/mach-spear3xx/spear3*0.c
+F: arch/arm/mach-spear3xx/spear3*0_evb.c
+F: arch/arm/mach-spear6xx/spear600.c
+F: arch/arm/mach-spear6xx/spear600_evb.c
+
SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
M: Roger Wolff <R.E.Wolff@BitWizard.nl>
S: Supported
@@ -5492,7 +5614,7 @@ S: Maintained
F: drivers/mmc/host/tmio_mmc.*
TMPFS (SHMEM FILESYSTEM)
-M: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+M: Hugh Dickins <hughd@google.com>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/shmem_fs.h
@@ -5880,7 +6002,7 @@ M: Laurent Pinchart <laurent.pinchart@skynet.be>
L: linux-uvc-devel@lists.berlios.de (subscribers-only)
L: linux-media@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
-W: http://linux-uvc.berlios.de
+W: http://www.ideasonboard.org/uvc/
S: Maintained
F: drivers/media/video/uvc/
diff --git a/Makefile b/Makefile
index 701bc65b3952..6e39ec701cbf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
-SUBLEVEL = 34
-EXTRAVERSION = -rc7
+SUBLEVEL = 35
+EXTRAVERSION = -rc1
NAME = Sheep on Meth
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index e5eb1337a537..acda512da2e2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -42,15 +42,10 @@ config KPROBES
If in doubt, say "N".
config OPTPROBES
- bool "Kprobes jump optimization support (EXPERIMENTAL)"
- default y
- depends on KPROBES
+ def_bool y
+ depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
- depends on HAVE_OPTPROBES
select KALLSYMS_ALL
- help
- This option will allow kprobes to optimize breakpoint to
- a jump for reducing its overhead.
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool
@@ -142,6 +137,17 @@ config HAVE_HW_BREAKPOINT
bool
depends on PERF_EVENTS
+config HAVE_MIXED_BREAKPOINTS_REGS
+ bool
+ depends on HAVE_HW_BREAKPOINT
+ help
+ Depending on the arch implementation of hardware breakpoints,
+ some of them have separate registers for data and instruction
+ breakpoints addresses, others have mixed registers to store
+ them but define the access type in a control register.
+ Select this option if your arch implements breakpoints under the
+ latter fashion.
+
config HAVE_USER_RETURN_NOTIFIER
bool
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 75291fdd379f..3e2e540a0f2a 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -51,9 +51,8 @@ config GENERIC_TIME
bool
default y
-config ARCH_USES_GETTIMEOFFSET
- bool
- default y
+config GENERIC_CMOS_UPDATE
+ def_bool y
config ZONE_DMA
bool
@@ -62,6 +61,9 @@ config ZONE_DMA
config NEED_DMA_MAP_STATE
def_bool y
+config NEED_SG_DMA_LENGTH
+ def_bool y
+
config GENERIC_ISA_DMA
bool
default y
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 610dff44d94b..e756d04b6cd5 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -17,8 +17,8 @@
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
-#define atomic_read(v) ((v)->counter + 0)
-#define atomic64_read(v) ((v)->counter + 0)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#define atomic64_set(v,i) ((v)->counter = (i))
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index 15f3ae25c511..1dce24bc455a 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -405,29 +405,31 @@ static inline int fls(int x)
#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
/* Whee. EV67 can calculate it directly. */
-static inline unsigned long hweight64(unsigned long w)
+static inline unsigned long __arch_hweight64(unsigned long w)
{
return __kernel_ctpop(w);
}
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_weight32(unsigned int w)
{
- return hweight64(w);
+ return __arch_hweight64(w);
}
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
{
- return hweight64(w & 0xffff);
+ return __arch_hweight64(w & 0xffff);
}
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
{
- return hweight64(w & 0xff);
+ return __arch_hweight64(w & 0xff);
}
#else
-#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/arch_hweight.h>
#endif
+#include <asm-generic/bitops/const_hweight.h>
+
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
@@ -436,22 +438,20 @@ static inline unsigned int hweight8(unsigned int w)
/*
* Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is set.
+ * way of searching a 100-bit bitmap. It's guaranteed that at least
+ * one of the 100 bits is cleared.
*/
static inline unsigned long
-sched_find_first_bit(unsigned long b[3])
+sched_find_first_bit(const unsigned long b[2])
{
- unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
- unsigned long ofs;
+ unsigned long b0, b1, ofs, tmp;
- ofs = (b1 ? 64 : 128);
- b1 = (b1 ? b1 : b2);
- ofs = (b0 ? 0 : ofs);
- b0 = (b0 ? b0 : b1);
+ b0 = b[0];
+ b1 = b[1];
+ ofs = (b0 ? 0 : 64);
+ tmp = (b0 ? b0 : b1);
- return __ffs(b0) + ofs;
+ return __ffs(tmp) + ofs;
}
#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h
index 440747ca6349..5728c52a7412 100644
--- a/arch/alpha/include/asm/scatterlist.h
+++ b/arch/alpha/include/asm/scatterlist.h
@@ -1,24 +1,7 @@
#ifndef _ALPHA_SCATTERLIST_H
#define _ALPHA_SCATTERLIST_H
-#include <asm/page.h>
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
-
- unsigned int length;
-
- dma_addr_t dma_address;
- __u32 dma_length;
-};
-
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->dma_length)
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index b3e888638bb7..6f32f9c84a2d 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -77,7 +77,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TIF_UAC_NOPRINT 10 /* see sysinfo.h */
#define TIF_UAC_NOFIX 11
#define TIF_UAC_SIGBUS 12
-#define TIF_MEMDIE 13
+#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
#define TIF_FREEZE 16 /* is freezing for suspend */
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index d979e7c7bc4b..a5fffc882c72 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -53,6 +53,7 @@ static int __pci_mmap_fits(struct pci_dev *pdev, int num,
/**
* pci_mmap_resource - map a PCI resource into user memory space
+ * @filp: open sysfs file
* @kobj: kobject for mapping
* @attr: struct bin_attribute for the file being mapped
* @vma: struct vm_area_struct passed into the mmap
@@ -60,7 +61,8 @@ static int __pci_mmap_fits(struct pci_dev *pdev, int num,
*
* Use the bus mapping routines to map a PCI resource into userspace.
*/
-static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+static int pci_mmap_resource(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma, int sparse)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
@@ -89,14 +91,14 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse);
}
-static int pci_mmap_resource_sparse(struct kobject *kobj,
+static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
-static int pci_mmap_resource_dense(struct kobject *kobj,
+static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 5d0826654c61..1efbed82c0fd 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -51,6 +51,7 @@
#include <linux/mc146818rtc.h>
#include <linux/time.h>
#include <linux/timex.h>
+#include <linux/clocksource.h>
#include "proto.h"
#include "irq_impl.h"
@@ -75,8 +76,6 @@ static struct {
__u32 last_time;
/* ticks/cycle * 2^48 */
unsigned long scaled_ticks_per_cycle;
- /* last time the CMOS clock got updated */
- time_t last_rtc_update;
/* partial unused tick */
unsigned long partial_tick;
} state;
@@ -91,6 +90,52 @@ static inline __u32 rpcc(void)
return result;
}
+int update_persistent_clock(struct timespec now)
+{
+ return set_rtc_mmss(now.tv_sec);
+}
+
+void read_persistent_clock(struct timespec *ts)
+{
+ unsigned int year, mon, day, hour, min, sec, epoch;
+
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ sec = bcd2bin(sec);
+ min = bcd2bin(min);
+ hour = bcd2bin(hour);
+ day = bcd2bin(day);
+ mon = bcd2bin(mon);
+ year = bcd2bin(year);
+ }
+
+ /* PC-like is standard; used for year >= 70 */
+ epoch = 1900;
+ if (year < 20)
+ epoch = 2000;
+ else if (year >= 20 && year < 48)
+ /* NT epoch */
+ epoch = 1980;
+ else if (year >= 48 && year < 70)
+ /* Digital UNIX epoch */
+ epoch = 1952;
+
+ printk(KERN_INFO "Using epoch = %d\n", epoch);
+
+ if ((year += epoch) < 1970)
+ year += 100;
+
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+}
+
+
+
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -123,19 +168,6 @@ irqreturn_t timer_interrupt(int irq, void *dev)
if (nticks)
do_timer(nticks);
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced()
- && xtime.tv_sec > state.last_rtc_update + 660
- && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2
- && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- int tmp = set_rtc_mmss(xtime.tv_sec);
- state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
- }
-
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
@@ -301,10 +333,38 @@ rpcc_after_update_in_progress(void)
return rpcc();
}
+#ifndef CONFIG_SMP
+/* Until and unless we figure out how to get cpu cycle counters
+ in sync and keep them there, we can't use the rpcc. */
+static cycle_t read_rpcc(struct clocksource *cs)
+{
+ cycle_t ret = (cycle_t)rpcc();
+ return ret;
+}
+
+static struct clocksource clocksource_rpcc = {
+ .name = "rpcc",
+ .rating = 300,
+ .read = read_rpcc,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS
+};
+
+static inline void register_rpcc_clocksource(long cycle_freq)
+{
+ clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
+ clocksource_register(&clocksource_rpcc);
+}
+#else /* !CONFIG_SMP */
+static inline void register_rpcc_clocksource(long cycle_freq)
+{
+}
+#endif /* !CONFIG_SMP */
+
void __init
time_init(void)
{
- unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch;
+ unsigned int cc1, cc2;
unsigned long cycle_freq, tolerance;
long diff;
@@ -348,53 +408,17 @@ time_init(void)
bogomips yet, but this is close on a 500Mhz box. */
__delay(1000000);
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
-
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- sec = bcd2bin(sec);
- min = bcd2bin(min);
- hour = bcd2bin(hour);
- day = bcd2bin(day);
- mon = bcd2bin(mon);
- year = bcd2bin(year);
- }
-
- /* PC-like is standard; used for year >= 70 */
- epoch = 1900;
- if (year < 20)
- epoch = 2000;
- else if (year >= 20 && year < 48)
- /* NT epoch */
- epoch = 1980;
- else if (year >= 48 && year < 70)
- /* Digital UNIX epoch */
- epoch = 1952;
-
- printk(KERN_INFO "Using epoch = %d\n", epoch);
-
- if ((year += epoch) < 1970)
- year += 100;
-
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = 0;
-
- wall_to_monotonic.tv_sec -= xtime.tv_sec;
- wall_to_monotonic.tv_nsec = 0;
if (HZ > (1<<16)) {
extern void __you_loose (void);
__you_loose();
}
+ register_rpcc_clocksource(cycle_freq);
+
state.last_time = cc1;
state.scaled_ticks_per_cycle
= ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
- state.last_rtc_update = 0;
state.partial_tick = 0L;
/* Startup the timer source. */
@@ -402,44 +426,6 @@ time_init(void)
}
/*
- * Use the cycle counter to estimate an displacement from the last time
- * tick. Unfortunately the Alpha designers made only the low 32-bits of
- * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz
- * part. So we can't do the "find absolute time in terms of cycles" thing
- * that the other ports do.
- */
-u32 arch_gettimeoffset(void)
-{
-#ifdef CONFIG_SMP
- /* Until and unless we figure out how to get cpu cycle counters
- in sync and keep them there, we can't use the rpcc tricks. */
- return 0;
-#else
- unsigned long delta_cycles, delta_usec, partial_tick;
-
- delta_cycles = rpcc() - state.last_time;
- partial_tick = state.partial_tick;
- /*
- * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
- * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
- * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks)
- *
- * which, given a 600MHz cycle and a 1024Hz tick, has a
- * dynamic range of about 1.7e17, which is less than the
- * 1.8e19 in an unsigned long, so we are safe from overflow.
- *
- * Round, but with .5 up always, since .5 to even is harder
- * with no clear gain.
- */
-
- delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
- + partial_tick) * 15625;
- delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
- return delta_usec * 1000;
-#endif
-}
-
-/*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when
* nowtime is written into the registers of the CMOS clock, it will
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 00a31deaa96e..fadd5f882ff9 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -142,7 +142,6 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto bad_area;
}
- survive:
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
@@ -188,16 +187,10 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk(KERN_ALERT "VM: killing process %s(%d)\n",
- current->comm, task_pid_nr(current));
if (!user_mode(regs))
goto no_context;
- do_group_exit(SIGKILL);
+ pagefault_out_of_memory();
+ return;
do_sigbus:
/* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 92622eb5cc0d..1f254bd6c937 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -13,7 +13,7 @@ config ARM
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K)
- select HAVE_OPROFILE
+ select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL)
select HAVE_KRETPROBES if (HAVE_KPROBES)
@@ -21,6 +21,7 @@ config ARM
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_LZMA
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
help
@@ -42,6 +43,11 @@ config GENERIC_GPIO
config GENERIC_TIME
bool
+ default y
+
+config ARCH_USES_GETTIMEOFFSET
+ bool
+ default n
config GENERIC_CLOCKEVENTS
bool
@@ -175,28 +181,6 @@ config ARM_L1_CACHE_SHIFT_6
help
Setting ARM L1 cache line size to 64 Bytes.
-if OPROFILE
-
-config OPROFILE_ARMV6
- def_bool y
- depends on CPU_V6 && !SMP
- select OPROFILE_ARM11_CORE
-
-config OPROFILE_MPCORE
- def_bool y
- depends on CPU_V6 && SMP
- select OPROFILE_ARM11_CORE
-
-config OPROFILE_ARM11_CORE
- bool
-
-config OPROFILE_ARMV7
- def_bool y
- depends on CPU_V7 && !SMP
- bool
-
-endif
-
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -231,6 +215,7 @@ config ARCH_AAEC2000
select CPU_ARM920T
select ARM_AMBA
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Agilent AAEC-2000
@@ -238,21 +223,22 @@ config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
select ARCH_HAS_CPUFREQ
- select HAVE_CLK
select COMMON_CLKDEV
- select ICST525
+ select ICST
+ select GENERIC_CLOCKEVENTS
+ select PLAT_VERSATILE
help
Support for ARM's Integrator platform.
config ARCH_REALVIEW
bool "ARM Ltd. RealView family"
select ARM_AMBA
- select HAVE_CLK
select COMMON_CLKDEV
- select ICST307
- select GENERIC_TIME
+ select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select PLAT_VERSATILE
+ select ARM_TIMER_SP804
select GPIO_PL061 if GPIOLIB
help
This enables support for ARM Ltd RealView boards.
@@ -261,20 +247,33 @@ config ARCH_VERSATILE
bool "ARM Ltd. Versatile family"
select ARM_AMBA
select ARM_VIC
- select HAVE_CLK
select COMMON_CLKDEV
- select ICST307
- select GENERIC_TIME
+ select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select PLAT_VERSATILE
+ select ARM_TIMER_SP804
help
This enables support for ARM Ltd Versatile board.
+config ARCH_VEXPRESS
+ bool "ARM Ltd. Versatile Express family"
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARM_AMBA
+ select ARM_TIMER_SP804
+ select COMMON_CLKDEV
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
+ select ICST
+ select PLAT_VERSATILE
+ help
+ This enables support for the ARM Ltd Versatile Express boards.
+
config ARCH_AT91
bool "Atmel AT91"
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@@ -285,7 +284,6 @@ config ARCH_BCMRING
select CPU_V6
select ARM_AMBA
select COMMON_CLKDEV
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
help
@@ -294,14 +292,23 @@ config ARCH_BCMRING
config ARCH_CLPS711X
bool "Cirrus Logic CLPS711x/EP721x-based"
select CPU_ARM720T
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Cirrus Logic 711x/721x based boards.
+config ARCH_CNS3XXX
+ bool "Cavium Networks CNS3XXX family"
+ select CPU_V6
+ select GENERIC_CLOCKEVENTS
+ select ARM_GIC
+ help
+ Support for Cavium Networks CNS3XXX platform.
+
config ARCH_GEMINI
bool "Cortina Systems Gemini"
select CPU_FA526
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select ARCH_USES_GETTIMEOFFSET
help
Support for the Cortina Systems Gemini family SoCs
@@ -310,6 +317,7 @@ config ARCH_EBSA110
select CPU_SA110
select ISA
select NO_IOPORT
+ select ARCH_USES_GETTIMEOFFSET
help
This is an evaluation board for the StrongARM processor available
from Digital. It has limited hardware on-board, including an
@@ -321,11 +329,10 @@ config ARCH_EP93XX
select CPU_ARM920T
select ARM_AMBA
select ARM_VIC
- select GENERIC_GPIO
- select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_HOLES_MEMORYMODEL
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for the Cirrus EP93xx series of CPUs.
@@ -333,16 +340,15 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
+ select ARCH_USES_GETTIMEOFFSET
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
config ARCH_MXC
bool "Freescale MXC/iMX-based"
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
- select HAVE_CLK
select COMMON_CLKDEV
help
Support for Freescale MXC/iMX-based family of processors
@@ -350,12 +356,9 @@ config ARCH_MXC
config ARCH_STMP3XXX
bool "Freescale STMP3xxx"
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
- select GENERIC_GPIO
select USB_ARCH_HAS_EHCI
help
Support for systems based on the Freescale 3xxx CPUs.
@@ -365,7 +368,6 @@ config ARCH_NETX
select CPU_ARM926T
select ARM_VIC
select GENERIC_CLOCKEVENTS
- select GENERIC_TIME
help
This enables support for systems based on the Hilscher NetX Soc
@@ -373,6 +375,7 @@ config ARCH_H720X
bool "Hynix HMS720x-based"
select CPU_ARM720T
select ISA_DMA_API
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Hynix HMS720x
@@ -393,7 +396,6 @@ config ARCH_IOP32X
select CPU_XSCALE
select PLAT_IOP
select PCI
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Support for Intel's 80219 and IOP32X (XScale) family of
@@ -405,7 +407,6 @@ config ARCH_IOP33X
select CPU_XSCALE
select PLAT_IOP
select PCI
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Support for Intel's IOP33X (XScale) family of processors.
@@ -415,6 +416,7 @@ config ARCH_IXP23XX
depends on MMU
select CPU_XSC3
select PCI
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Intel's IXP23xx (XScale) family of processors.
@@ -423,6 +425,7 @@ config ARCH_IXP2000
depends on MMU
select CPU_XSCALE
select PCI
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Intel's IXP2400/2800 (XScale) family of processors.
@@ -431,7 +434,6 @@ config ARCH_IXP4XX
depends on MMU
select CPU_XSCALE
select GENERIC_GPIO
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select DMABOUNCE if PCI
help
@@ -441,6 +443,7 @@ config ARCH_L7200
bool "LinkUp-L7200"
select CPU_ARM720T
select FIQ
+ select ARCH_USES_GETTIMEOFFSET
help
Say Y here if you intend to run this kernel on a LinkUp Systems
L7200 Software Development Board which uses an ARM720T processor.
@@ -454,9 +457,7 @@ config ARCH_L7200
config ARCH_DOVE
bool "Marvell Dove"
select PCI
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select PLAT_ORION
help
@@ -466,9 +467,7 @@ config ARCH_KIRKWOOD
bool "Marvell Kirkwood"
select CPU_FEROCEON
select PCI
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select PLAT_ORION
help
@@ -478,7 +477,6 @@ config ARCH_KIRKWOOD
config ARCH_LOKI
bool "Marvell Loki (88RC8480)"
select CPU_FEROCEON
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select PLAT_ORION
help
@@ -488,9 +486,7 @@ config ARCH_MV78XX0
bool "Marvell MV78xx0"
select CPU_FEROCEON
select PCI
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select PLAT_ORION
help
@@ -502,9 +498,7 @@ config ARCH_ORION5X
depends on MMU
select CPU_FEROCEON
select PCI
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select PLAT_ORION
help
@@ -515,11 +509,8 @@ config ARCH_ORION5X
config ARCH_MMP
bool "Marvell PXA168/910/MMP2"
depends on MMU
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select HAVE_CLK
select COMMON_CLKDEV
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select TICK_ONESHOT
select PLAT_PXA
@@ -529,8 +520,8 @@ config ARCH_MMP
config ARCH_KS8695
bool "Micrel/Kendin KS8695"
select CPU_ARM922T
- select GENERIC_GPIO
- select ARCH_REQUIRE_GPIOLIB
+ select ARCH_REQUIRE_GPIOLIB
+ select ARCH_USES_GETTIMEOFFSET
help
Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based
System-on-Chip devices.
@@ -539,7 +530,6 @@ config ARCH_NS9XXX
bool "NetSilicon NS9xxx"
select CPU_ARM926T
select GENERIC_GPIO
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_CLK
help
@@ -552,10 +542,7 @@ config ARCH_W90X900
bool "Nuvoton W90X900 CPU"
select CPU_ARM926T
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
- select HAVE_CLK
select COMMON_CLKDEV
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
help
Support for Nuvoton (Winbond logic dept.) ARM9 processor,
@@ -569,7 +556,6 @@ config ARCH_W90X900
config ARCH_NUC93X
bool "Nuvoton NUC93X CPU"
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
help
Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a
@@ -578,8 +564,8 @@ config ARCH_NUC93X
config ARCH_PNX4008
bool "Philips Nexperia PNX4008 Mobile"
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
+ select ARCH_USES_GETTIMEOFFSET
help
This enables support for Philips PNX4008 mobile platform.
@@ -588,11 +574,8 @@ config ARCH_PXA
depends on MMU
select ARCH_MTD_XIP
select ARCH_HAS_CPUFREQ
- select GENERIC_GPIO
- select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select TICK_ONESHOT
select PLAT_PXA
@@ -601,14 +584,14 @@ config ARCH_PXA
config ARCH_MSM
bool "Qualcomm MSM"
- select CPU_V6
- select GENERIC_TIME
+ select HAVE_CLK
select GENERIC_CLOCKEVENTS
help
- Support for Qualcomm MSM7K based systems. This runs on the ARM11
- apps processor of the MSM7K and depends on a shared memory
- interface to the ARM9 modem processor which runs the baseband stack
- and controls some vital subsystems (clock and power control, etc).
+ Support for Qualcomm MSM/QSD based systems. This runs on the
+ apps processor of the MSM/QSD and depends on a shared memory
+ interface to the modem processor which runs the baseband
+ stack and controls some vital subsystems
+ (clock and power control, etc).
config ARCH_SHMOBILE
bool "Renesas SH-Mobile"
@@ -625,6 +608,7 @@ config ARCH_RPC
select ISA_DMA_API
select NO_IOPORT
select ARCH_SPARSEMEM_ENABLE
+ select ARCH_USES_GETTIMEOFFSET
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -637,8 +621,6 @@ config ARCH_SA1100
select ARCH_MTD_XIP
select ARCH_HAS_CPUFREQ
select CPU_FREQ
- select GENERIC_GPIO
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_CLK
select TICK_ONESHOT
@@ -647,23 +629,28 @@ config ARCH_SA1100
Support for StrongARM 11x0 based boards.
config ARCH_S3C2410
- bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
+ bool "Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443, S3C2450"
select GENERIC_GPIO
select ARCH_HAS_CPUFREQ
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
Samsung S3C2410X CPU based systems, such as the Simtec Electronics
BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
the Samsung SMDK2410 development board (and derivatives).
+ Note, the S3C2416 and the S3C2450 are so close that they even share
+ the same SoC ID code. This means that there is no seperate machine
+ directory (no arch/arm/mach-s3c2450) as the S3C2416 was first.
+
config ARCH_S3C64XX
bool "Samsung S3C64XX"
select PLAT_SAMSUNG
select CPU_V6
- select GENERIC_GPIO
select ARM_VIC
select HAVE_CLK
select NO_IOPORT
+ select ARCH_USES_GETTIMEOFFSET
select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select SAMSUNG_CLKSRC
@@ -684,6 +671,7 @@ config ARCH_S5P6440
select CPU_V6
select GENERIC_GPIO
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
Samsung S5P6440 CPU based systems
@@ -692,17 +680,19 @@ config ARCH_S5P6442
select CPU_V6
select GENERIC_GPIO
select HAVE_CLK
+ select ARCH_USES_GETTIMEOFFSET
help
Samsung S5P6442 CPU based systems
-config ARCH_S5PC1XX
- bool "Samsung S5PC1XX"
+config ARCH_S5PC100
+ bool "Samsung S5PC100"
select GENERIC_GPIO
select HAVE_CLK
select CPU_V7
select ARM_L1_CACHE_SHIFT_6
+ select ARCH_USES_GETTIMEOFFSET
help
- Samsung S5PC1XX series based systems
+ Samsung S5PC100 series based systems
config ARCH_S5PV210
bool "Samsung S5PV210/S5PC110"
@@ -710,6 +700,7 @@ config ARCH_S5PV210
select GENERIC_GPIO
select HAVE_CLK
select ARM_L1_CACHE_SHIFT_6
+ select ARCH_USES_GETTIMEOFFSET
help
Samsung S5PV210/S5PC110 series based systems
@@ -720,6 +711,7 @@ config ARCH_SHARK
select ISA_DMA
select ZONE_DMA
select PCI
+ select ARCH_USES_GETTIMEOFFSET
help
Support for the StrongARM based Digital DNARD machine, also known
as "Shark" (<http://www.shark-linux.de/shark.html>).
@@ -729,6 +721,7 @@ config ARCH_LH7A40X
select CPU_ARM922T
select ARCH_DISCONTIGMEM_ENABLE if !LH7A40X_CONTIGMEM
select ARCH_SPARSEMEM_ENABLE if !LH7A40X_CONTIGMEM
+ select ARCH_USES_GETTIMEOFFSET
help
Say Y here for systems based on one of the Sharp LH7A40X
System on a Chip processors. These CPUs include an ARM922T
@@ -742,9 +735,7 @@ config ARCH_U300
select HAVE_TCM
select ARM_AMBA
select ARM_VIC
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
select COMMON_CLKDEV
select GENERIC_GPIO
help
@@ -754,9 +745,9 @@ config ARCH_U8500
bool "ST-Ericsson U8500 Series"
select CPU_V7
select ARM_AMBA
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select COMMON_CLKDEV
+ select ARCH_REQUIRE_GPIOLIB
help
Support for ST-Ericsson's Ux500 architecture
@@ -765,23 +756,16 @@ config ARCH_NOMADIK
select ARM_AMBA
select ARM_VIC
select CPU_ARM926T
- select HAVE_CLK
select COMMON_CLKDEV
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
Support for the Nomadik platform by ST-Ericsson
config ARCH_DAVINCI
bool "TI DaVinci"
- select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
- select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
- select HAVE_CLK
select ZONE_DMA
select HAVE_IDE
select COMMON_CLKDEV
@@ -792,16 +776,24 @@ config ARCH_DAVINCI
config ARCH_OMAP
bool "TI OMAP"
- select GENERIC_GPIO
select HAVE_CLK
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_CPUFREQ
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select ARCH_HAS_HOLES_MEMORYMODEL
help
Support for TI's OMAP platform (OMAP1 and OMAP2).
+config PLAT_SPEAR
+ bool "ST SPEAr"
+ select ARM_AMBA
+ select ARCH_REQUIRE_GPIOLIB
+ select COMMON_CLKDEV
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
+ help
+ Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
+
endchoice
#
@@ -817,6 +809,8 @@ source "arch/arm/mach-bcmring/Kconfig"
source "arch/arm/mach-clps711x/Kconfig"
+source "arch/arm/mach-cns3xxx/Kconfig"
+
source "arch/arm/mach-davinci/Kconfig"
source "arch/arm/mach-dove/Kconfig"
@@ -886,12 +880,14 @@ source "arch/arm/mach-sa1100/Kconfig"
source "arch/arm/plat-samsung/Kconfig"
source "arch/arm/plat-s3c24xx/Kconfig"
source "arch/arm/plat-s5p/Kconfig"
-source "arch/arm/plat-s5pc1xx/Kconfig"
+
+source "arch/arm/plat-spear/Kconfig"
if ARCH_S3C2410
source "arch/arm/mach-s3c2400/Kconfig"
source "arch/arm/mach-s3c2410/Kconfig"
source "arch/arm/mach-s3c2412/Kconfig"
+source "arch/arm/mach-s3c2416/Kconfig"
source "arch/arm/mach-s3c2440/Kconfig"
source "arch/arm/mach-s3c2443/Kconfig"
endif
@@ -904,9 +900,7 @@ source "arch/arm/mach-s5p6440/Kconfig"
source "arch/arm/mach-s5p6442/Kconfig"
-if ARCH_S5PC1XX
source "arch/arm/mach-s5pc100/Kconfig"
-endif
source "arch/arm/mach-s5pv210/Kconfig"
@@ -920,6 +914,8 @@ source "arch/arm/mach-ux500/Kconfig"
source "arch/arm/mach-versatile/Kconfig"
+source "arch/arm/mach-vexpress/Kconfig"
+
source "arch/arm/mach-w90x900/Kconfig"
# Definitions to make life easier
@@ -929,7 +925,6 @@ config ARCH_ACORN
config PLAT_IOP
bool
select GENERIC_CLOCKEVENTS
- select GENERIC_TIME
config PLAT_ORION
bool
@@ -937,6 +932,12 @@ config PLAT_ORION
config PLAT_PXA
bool
+config PLAT_VERSATILE
+ bool
+
+config ARM_TIMER_SP804
+ bool
+
source arch/arm/mm/Kconfig
config IWMMXT
@@ -1065,6 +1066,10 @@ config PCI
your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
VESA. If you have PCI, say Y, otherwise N.
+config PCI_DOMAINS
+ bool
+ depends on PCI
+
config PCI_SYSCALL
def_bool PCI
@@ -1093,10 +1098,11 @@ source "kernel/time/Kconfig"
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP ||\
- MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || ARCH_U8500)
+ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
+ ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
depends on GENERIC_CLOCKEVENTS
select USE_GENERIC_SMP_HELPERS
- select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_OMAP4 || ARCH_U8500)
+ select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
@@ -1277,7 +1283,7 @@ config HIGHPTE
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && CPU_HAS_PMU && (CPU_V6 || CPU_V7)
+ depends on PERF_EVENTS && CPU_HAS_PMU
default y
help
Enable hardware performance counter support for perf events. If
@@ -1415,6 +1421,17 @@ config CMDLINE
time by entering them here. As a minimum, you should specify the
memory size and the root device (e.g., mem=64M root=/dev/nfs).
+config CMDLINE_FORCE
+ bool "Always use the default kernel command string"
+ depends on CMDLINE != ""
+ help
+ Always use the default kernel command string, even if the boot
+ loader passes other arguments to the kernel.
+ This is useful if you cannot or don't want to change the
+ command-line options your boot loader passes to the kernel.
+
+ If unsure, say N.
+
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
depends on !ZBOOT_ROM
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ed820e737a8a..64ba313724d2 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -110,6 +110,8 @@ CHECKFLAGS += -D__arm__
head-y := arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
textofs-y := 0x00008000
textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000
+# We don't want the htc bootloader to corrupt kernel during resume
+textofs-$(CONFIG_PM_H1940) := 0x00108000
# SA1111 DMA bug: we don't want the kernel to live in precious DMA-able memory
ifeq ($(CONFIG_ARCH_SA1100),y)
textofs-$(CONFIG_SA1111) := 0x00208000
@@ -121,6 +123,7 @@ machine-$(CONFIG_ARCH_AAEC2000) := aaec2000
machine-$(CONFIG_ARCH_AT91) := at91
machine-$(CONFIG_ARCH_BCMRING) := bcmring
machine-$(CONFIG_ARCH_CLPS711X) := clps711x
+machine-$(CONFIG_ARCH_CNS3XXX) := cns3xxx
machine-$(CONFIG_ARCH_DAVINCI) := davinci
machine-$(CONFIG_ARCH_DOVE) := dove
machine-$(CONFIG_ARCH_EBSA110) := ebsa110
@@ -160,12 +163,12 @@ machine-$(CONFIG_ARCH_PNX4008) := pnx4008
machine-$(CONFIG_ARCH_PXA) := pxa
machine-$(CONFIG_ARCH_REALVIEW) := realview
machine-$(CONFIG_ARCH_RPC) := rpc
-machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2440 s3c2443
+machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2416 s3c2440 s3c2443
machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0
machine-$(CONFIG_ARCH_S3C64XX) := s3c64xx
machine-$(CONFIG_ARCH_S5P6440) := s5p6440
machine-$(CONFIG_ARCH_S5P6442) := s5p6442
-machine-$(CONFIG_ARCH_S5PC1XX) := s5pc100
+machine-$(CONFIG_ARCH_S5PC100) := s5pc100
machine-$(CONFIG_ARCH_S5PV210) := s5pv210
machine-$(CONFIG_ARCH_SA1100) := sa1100
machine-$(CONFIG_ARCH_SHARK) := shark
@@ -175,9 +178,14 @@ machine-$(CONFIG_ARCH_STMP37XX) := stmp37xx
machine-$(CONFIG_ARCH_U300) := u300
machine-$(CONFIG_ARCH_U8500) := ux500
machine-$(CONFIG_ARCH_VERSATILE) := versatile
+machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_ARCH_NUC93X) := nuc93x
machine-$(CONFIG_FOOTBRIDGE) := footbridge
+machine-$(CONFIG_MACH_SPEAR300) := spear3xx
+machine-$(CONFIG_MACH_SPEAR310) := spear3xx
+machine-$(CONFIG_MACH_SPEAR320) := spear3xx
+machine-$(CONFIG_MACH_SPEAR600) := spear6xx
# Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
@@ -190,8 +198,9 @@ plat-$(CONFIG_PLAT_NOMADIK) := nomadik
plat-$(CONFIG_PLAT_ORION) := orion
plat-$(CONFIG_PLAT_PXA) := pxa
plat-$(CONFIG_PLAT_S3C24XX) := s3c24xx samsung
-plat-$(CONFIG_PLAT_S5PC1XX) := s5pc1xx samsung
plat-$(CONFIG_PLAT_S5P) := s5p samsung
+plat-$(CONFIG_PLAT_SPEAR) := spear
+plat-$(CONFIG_PLAT_VERSATILE) := versatile
ifeq ($(CONFIG_ARCH_EBSA110),y)
# This is what happens if you forget the IOCS16 line.
diff --git a/arch/arm/boot/bootp/bootp.lds b/arch/arm/boot/bootp/bootp.lds
index 8e3d81ce695e..fc54394f4340 100644
--- a/arch/arm/boot/bootp/bootp.lds
+++ b/arch/arm/boot/bootp/bootp.lds
@@ -19,7 +19,7 @@ SECTIONS
initrd_size = initrd_end - initrd_start;
_etext = .;
}
-
+
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 97c89e7de7d3..53faa9063a03 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -65,6 +65,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
suffix_$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo
+suffix_$(CONFIG_KERNEL_LZMA) = lzma
targets := vmlinux vmlinux.lds \
piggy.$(suffix_y) piggy.$(suffix_y).o \
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index 9c097073ce4c..4c72a97bc3e1 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -40,6 +40,10 @@ extern void error(char *);
#include "../../../../lib/decompress_unlzo.c"
#endif
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
{
decompress(input, len, NULL, NULL, output, NULL, error);
diff --git a/arch/arm/boot/compressed/piggy.lzma.S b/arch/arm/boot/compressed/piggy.lzma.S
new file mode 100644
index 000000000000..d7e69cffbc0a
--- /dev/null
+++ b/arch/arm/boot/compressed/piggy.lzma.S
@@ -0,0 +1,6 @@
+ .section .piggydata,#alloc
+ .globl input_data
+input_data:
+ .incbin "arch/arm/boot/compressed/piggy.lzma"
+ .globl input_data_end
+input_data_end:
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 4efbb9df0444..0a34c8186924 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -12,10 +12,10 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
-config ICST525
+config ICST
bool
-config ICST307
+config PL330
bool
config SA1111
@@ -40,3 +40,4 @@ config SHARP_SCOOP
config COMMON_CLKDEV
bool
+ select HAVE_CLK
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 76be7ff2a7ca..e6e8664a9413 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,8 +4,8 @@
obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_ARM_VIC) += vic.o
-obj-$(CONFIG_ICST525) += icst525.o
-obj-$(CONFIG_ICST307) += icst307.o
+obj-$(CONFIG_ICST) += icst.o
+obj-$(CONFIG_PL330) += pl330.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c
index dba4c1da63ed..e2b2bb66e094 100644
--- a/arch/arm/common/clkdev.c
+++ b/arch/arm/common/clkdev.c
@@ -53,12 +53,13 @@ static struct clk *clk_find(const char *dev_id, const char *con_id)
continue;
match += 1;
}
- if (match == 0)
- continue;
if (match > best) {
clk = p->clk;
- best = match;
+ if (match != 3)
+ best = match;
+ else
+ break;
}
}
return clk;
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
new file mode 100644
index 000000000000..9a7f09cff300
--- /dev/null
+++ b/arch/arm/common/icst.c
@@ -0,0 +1,100 @@
+/*
+ * linux/arch/arm/common/icst307.c
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support functions for calculating clocks/divisors for the ICST307
+ * clock generators. See http://www.icst.com/ for more information
+ * on these devices.
+ *
+ * This is an almost identical implementation to the ICST525 clock generator.
+ * The s2div and idx2s files are different
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <asm/hardware/icst.h>
+
+/*
+ * Divisors for each OD setting.
+ */
+const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
+const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
+EXPORT_SYMBOL(icst307_s2div);
+EXPORT_SYMBOL(icst525_s2div);
+
+unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
+{
+ return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+}
+
+EXPORT_SYMBOL(icst_hz);
+
+/*
+ * Ascending divisor S values.
+ */
+const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
+const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 };
+EXPORT_SYMBOL(icst307_idx2s);
+EXPORT_SYMBOL(icst525_idx2s);
+
+struct icst_vco
+icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
+{
+ struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
+ unsigned long f;
+ unsigned int i = 0, rd, best = (unsigned int)-1;
+
+ /*
+ * First, find the PLL output divisor such
+ * that the PLL output is within spec.
+ */
+ do {
+ f = freq * p->s2div[p->idx2s[i]];
+
+ if (f > p->vco_min && f <= p->vco_max)
+ break;
+ } while (i < 8);
+
+ if (i >= 8)
+ return vco;
+
+ vco.s = p->idx2s[i];
+
+ /*
+ * Now find the closest divisor combination
+ * which gives a PLL output of 'f'.
+ */
+ for (rd = p->rd_min; rd <= p->rd_max; rd++) {
+ unsigned long fref_div, f_pll;
+ unsigned int vd;
+ int f_diff;
+
+ fref_div = (2 * p->ref) / rd;
+
+ vd = (f + fref_div / 2) / fref_div;
+ if (vd < p->vd_min || vd > p->vd_max)
+ continue;
+
+ f_pll = fref_div * vd;
+ f_diff = f_pll - f;
+ if (f_diff < 0)
+ f_diff = -f_diff;
+
+ if ((unsigned)f_diff < best) {
+ vco.v = vd - 8;
+ vco.r = rd - 2;
+ if (f_diff == 0)
+ break;
+ best = f_diff;
+ }
+ }
+
+ return vco;
+}
+
+EXPORT_SYMBOL(icst_hz_to_vco);
diff --git a/arch/arm/common/icst307.c b/arch/arm/common/icst307.c
deleted file mode 100644
index 6d094c157540..000000000000
--- a/arch/arm/common/icst307.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * linux/arch/arm/common/icst307.c
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST307
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- *
- * This is an almost identical implementation to the ICST525 clock generator.
- * The s2div and idx2s files are different
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include <asm/hardware/icst307.h>
-
-/*
- * Divisors for each OD setting.
- */
-static unsigned char s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
-
-unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco)
-{
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * s2div[vco.s]);
-}
-
-EXPORT_SYMBOL(icst307_khz);
-
-/*
- * Ascending divisor S values.
- */
-static unsigned char idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
-
-struct icst307_vco
-icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq)
-{
- struct icst307_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = freq * s2div[idx2s[i]];
-
- /*
- * f must be between 6MHz and 200MHz (3.3 or 5V)
- */
- if (f > 6000 && f <= p->vco_max)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long fref_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- fref_div = (2 * p->ref) / rd;
-
- vd = (f + fref_div / 2) / fref_div;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = fref_div * vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst307_khz_to_vco);
-
-struct icst307_vco
-icst307_ps_to_vco(const struct icst307_params *p, unsigned long period)
-{
- struct icst307_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f, ps;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- ps = 1000000000UL / p->vco_max;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = period / s2div[idx2s[i]];
-
- /*
- * f must be between 6MHz and 200MHz (3.3 or 5V)
- */
- if (f >= ps && f < 1000000000UL / 6000 + 1)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- ps = 500000000UL / p->ref;
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long f_in_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- f_in_div = ps * rd;
-
- vd = (f_in_div + f / 2) / f;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = (f_in_div + vd / 2) / vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst307_ps_to_vco);
diff --git a/arch/arm/common/icst525.c b/arch/arm/common/icst525.c
deleted file mode 100644
index 3d377c5bdef6..000000000000
--- a/arch/arm/common/icst525.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * linux/arch/arm/common/icst525.c
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST525
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include <asm/hardware/icst525.h>
-
-/*
- * Divisors for each OD setting.
- */
-static unsigned char s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
-
-unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco)
-{
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * s2div[vco.s]);
-}
-
-EXPORT_SYMBOL(icst525_khz);
-
-/*
- * Ascending divisor S values.
- */
-static unsigned char idx2s[] = { 1, 3, 4, 7, 5, 2, 6, 0 };
-
-struct icst525_vco
-icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq)
-{
- struct icst525_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = freq * s2div[idx2s[i]];
-
- /*
- * f must be between 10MHz and
- * 320MHz (5V) or 200MHz (3V)
- */
- if (f > 10000 && f <= p->vco_max)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long fref_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- fref_div = (2 * p->ref) / rd;
-
- vd = (f + fref_div / 2) / fref_div;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = fref_div * vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst525_khz_to_vco);
-
-struct icst525_vco
-icst525_ps_to_vco(const struct icst525_params *p, unsigned long period)
-{
- struct icst525_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f, ps;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- ps = 1000000000UL / p->vco_max;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = period / s2div[idx2s[i]];
-
- /*
- * f must be between 10MHz and
- * 320MHz (5V) or 200MHz (3V)
- */
- if (f >= ps && f < 100000)
- break;
- } while (i < ARRAY_SIZE(idx2s));
-
- if (i >= ARRAY_SIZE(idx2s))
- return vco;
-
- vco.s = idx2s[i];
-
- ps = 500000000UL / p->ref;
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long f_in_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- f_in_div = ps * rd;
-
- vd = (f_in_div + f / 2) / f;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = (f_in_div + vd / 2) / vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst525_ps_to_vco);
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
new file mode 100644
index 000000000000..5ebbab6242a7
--- /dev/null
+++ b/arch/arm/common/pl330.c
@@ -0,0 +1,1966 @@
+/* linux/arch/arm/common/pl330.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/hardware/pl330.h>
+
+/* Register and Bit field Definitions */
+#define DS 0x0
+#define DS_ST_STOP 0x0
+#define DS_ST_EXEC 0x1
+#define DS_ST_CMISS 0x2
+#define DS_ST_UPDTPC 0x3
+#define DS_ST_WFE 0x4
+#define DS_ST_ATBRR 0x5
+#define DS_ST_QBUSY 0x6
+#define DS_ST_WFP 0x7
+#define DS_ST_KILL 0x8
+#define DS_ST_CMPLT 0x9
+#define DS_ST_FLTCMP 0xe
+#define DS_ST_FAULT 0xf
+
+#define DPC 0x4
+#define INTEN 0x20
+#define ES 0x24
+#define INTSTATUS 0x28
+#define INTCLR 0x2c
+#define FSM 0x30
+#define FSC 0x34
+#define FTM 0x38
+
+#define _FTC 0x40
+#define FTC(n) (_FTC + (n)*0x4)
+
+#define _CS 0x100
+#define CS(n) (_CS + (n)*0x8)
+#define CS_CNS (1 << 21)
+
+#define _CPC 0x104
+#define CPC(n) (_CPC + (n)*0x8)
+
+#define _SA 0x400
+#define SA(n) (_SA + (n)*0x20)
+
+#define _DA 0x404
+#define DA(n) (_DA + (n)*0x20)
+
+#define _CC 0x408
+#define CC(n) (_CC + (n)*0x20)
+
+#define CC_SRCINC (1 << 0)
+#define CC_DSTINC (1 << 14)
+#define CC_SRCPRI (1 << 8)
+#define CC_DSTPRI (1 << 22)
+#define CC_SRCNS (1 << 9)
+#define CC_DSTNS (1 << 23)
+#define CC_SRCIA (1 << 10)
+#define CC_DSTIA (1 << 24)
+#define CC_SRCBRSTLEN_SHFT 4
+#define CC_DSTBRSTLEN_SHFT 18
+#define CC_SRCBRSTSIZE_SHFT 1
+#define CC_DSTBRSTSIZE_SHFT 15
+#define CC_SRCCCTRL_SHFT 11
+#define CC_SRCCCTRL_MASK 0x7
+#define CC_DSTCCTRL_SHFT 25
+#define CC_DRCCCTRL_MASK 0x7
+#define CC_SWAP_SHFT 28
+
+#define _LC0 0x40c
+#define LC0(n) (_LC0 + (n)*0x20)
+
+#define _LC1 0x410
+#define LC1(n) (_LC1 + (n)*0x20)
+
+#define DBGSTATUS 0xd00
+#define DBG_BUSY (1 << 0)
+
+#define DBGCMD 0xd04
+#define DBGINST0 0xd08
+#define DBGINST1 0xd0c
+
+#define CR0 0xe00
+#define CR1 0xe04
+#define CR2 0xe08
+#define CR3 0xe0c
+#define CR4 0xe10
+#define CRD 0xe14
+
+#define PERIPH_ID 0xfe0
+#define PCELL_ID 0xff0
+
+#define CR0_PERIPH_REQ_SET (1 << 0)
+#define CR0_BOOT_EN_SET (1 << 1)
+#define CR0_BOOT_MAN_NS (1 << 2)
+#define CR0_NUM_CHANS_SHIFT 4
+#define CR0_NUM_CHANS_MASK 0x7
+#define CR0_NUM_PERIPH_SHIFT 12
+#define CR0_NUM_PERIPH_MASK 0x1f
+#define CR0_NUM_EVENTS_SHIFT 17
+#define CR0_NUM_EVENTS_MASK 0x1f
+
+#define CR1_ICACHE_LEN_SHIFT 0
+#define CR1_ICACHE_LEN_MASK 0x7
+#define CR1_NUM_ICACHELINES_SHIFT 4
+#define CR1_NUM_ICACHELINES_MASK 0xf
+
+#define CRD_DATA_WIDTH_SHIFT 0
+#define CRD_DATA_WIDTH_MASK 0x7
+#define CRD_WR_CAP_SHIFT 4
+#define CRD_WR_CAP_MASK 0x7
+#define CRD_WR_Q_DEP_SHIFT 8
+#define CRD_WR_Q_DEP_MASK 0xf
+#define CRD_RD_CAP_SHIFT 12
+#define CRD_RD_CAP_MASK 0x7
+#define CRD_RD_Q_DEP_SHIFT 16
+#define CRD_RD_Q_DEP_MASK 0xf
+#define CRD_DATA_BUFF_SHIFT 20
+#define CRD_DATA_BUFF_MASK 0x3ff
+
+#define PART 0x330
+#define DESIGNER 0x41
+#define REVISION 0x0
+#define INTEG_CFG 0x0
+#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12) \
+ | (REVISION << 20) | (INTEG_CFG << 24))
+
+#define PCELL_ID_VAL 0xb105f00d
+
+#define PL330_STATE_STOPPED (1 << 0)
+#define PL330_STATE_EXECUTING (1 << 1)
+#define PL330_STATE_WFE (1 << 2)
+#define PL330_STATE_FAULTING (1 << 3)
+#define PL330_STATE_COMPLETING (1 << 4)
+#define PL330_STATE_WFP (1 << 5)
+#define PL330_STATE_KILLING (1 << 6)
+#define PL330_STATE_FAULT_COMPLETING (1 << 7)
+#define PL330_STATE_CACHEMISS (1 << 8)
+#define PL330_STATE_UPDTPC (1 << 9)
+#define PL330_STATE_ATBARRIER (1 << 10)
+#define PL330_STATE_QUEUEBUSY (1 << 11)
+#define PL330_STATE_INVALID (1 << 15)
+
+#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
+ | PL330_STATE_WFE | PL330_STATE_FAULTING)
+
+#define CMD_DMAADDH 0x54
+#define CMD_DMAEND 0x00
+#define CMD_DMAFLUSHP 0x35
+#define CMD_DMAGO 0xa0
+#define CMD_DMALD 0x04
+#define CMD_DMALDP 0x25
+#define CMD_DMALP 0x20
+#define CMD_DMALPEND 0x28
+#define CMD_DMAKILL 0x01
+#define CMD_DMAMOV 0xbc
+#define CMD_DMANOP 0x18
+#define CMD_DMARMB 0x12
+#define CMD_DMASEV 0x34
+#define CMD_DMAST 0x08
+#define CMD_DMASTP 0x29
+#define CMD_DMASTZ 0x0c
+#define CMD_DMAWFE 0x36
+#define CMD_DMAWFP 0x30
+#define CMD_DMAWMB 0x13
+
+#define SZ_DMAADDH 3
+#define SZ_DMAEND 1
+#define SZ_DMAFLUSHP 2
+#define SZ_DMALD 1
+#define SZ_DMALDP 2
+#define SZ_DMALP 2
+#define SZ_DMALPEND 2
+#define SZ_DMAKILL 1
+#define SZ_DMAMOV 6
+#define SZ_DMANOP 1
+#define SZ_DMARMB 1
+#define SZ_DMASEV 2
+#define SZ_DMAST 1
+#define SZ_DMASTP 2
+#define SZ_DMASTZ 1
+#define SZ_DMAWFE 2
+#define SZ_DMAWFP 2
+#define SZ_DMAWMB 1
+#define SZ_DMAGO 6
+
+#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
+#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
+
+#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
+#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
+
+/*
+ * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
+ * at 1byte/burst for P<->M and M<->M respectively.
+ * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
+ * should be enough for P<->M and M<->M respectively.
+ */
+#define MCODE_BUFF_PER_REQ 256
+
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+#define MARK_FREE(req) do { \
+ _emit_END(0, (req)->mc_cpu); \
+ (req)->mc_len = 0; \
+ } while (0)
+
+/* If the _pl330_req is available to the client */
+#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
+
+/* Use this _only_ to wait on transient states */
+#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
+
+#ifdef PL330_DEBUG_MCGEN
+static unsigned cmd_line;
+#define PL330_DBGCMD_DUMP(off, x...) do { \
+ printk("%x:", cmd_line); \
+ printk(x); \
+ cmd_line += off; \
+ } while (0)
+#define PL330_DBGMC_START(addr) (cmd_line = addr)
+#else
+#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
+#define PL330_DBGMC_START(addr) do {} while (0)
+#endif
+
+struct _xfer_spec {
+ u32 ccr;
+ struct pl330_req *r;
+ struct pl330_xfer *x;
+};
+
+enum dmamov_dst {
+ SAR = 0,
+ CCR,
+ DAR,
+};
+
+enum pl330_dst {
+ SRC = 0,
+ DST,
+};
+
+enum pl330_cond {
+ SINGLE,
+ BURST,
+ ALWAYS,
+};
+
+struct _pl330_req {
+ u32 mc_bus;
+ void *mc_cpu;
+ /* Number of bytes taken to setup MC for the req */
+ u32 mc_len;
+ struct pl330_req *r;
+ /* Hook to attach to DMAC's list of reqs with due callback */
+ struct list_head rqd;
+};
+
+/* ToBeDone for tasklet */
+struct _pl330_tbd {
+ bool reset_dmac;
+ bool reset_mngr;
+ u8 reset_chan;
+};
+
+/* A DMAC Thread */
+struct pl330_thread {
+ u8 id;
+ int ev;
+ /* If the channel is not yet acquired by any client */
+ bool free;
+ /* Parent DMAC */
+ struct pl330_dmac *dmac;
+ /* Only two at a time */
+ struct _pl330_req req[2];
+ /* Index of the last submitted request */
+ unsigned lstenq;
+};
+
+enum pl330_dmac_state {
+ UNINIT,
+ INIT,
+ DYING,
+};
+
+/* A DMAC */
+struct pl330_dmac {
+ spinlock_t lock;
+ /* Holds list of reqs with due callbacks */
+ struct list_head req_done;
+ /* Pointer to platform specific stuff */
+ struct pl330_info *pinfo;
+ /* Maximum possible events/irqs */
+ int events[32];
+ /* BUS address of MicroCode buffer */
+ u32 mcode_bus;
+ /* CPU address of MicroCode buffer */
+ void *mcode_cpu;
+ /* List of all Channel threads */
+ struct pl330_thread *channels;
+ /* Pointer to the MANAGER thread */
+ struct pl330_thread *manager;
+ /* To handle bad news in interrupt */
+ struct tasklet_struct tasks;
+ struct _pl330_tbd dmac_tbd;
+ /* State of DMAC operation */
+ enum pl330_dmac_state state;
+};
+
+static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
+{
+ if (r && r->xfer_cb)
+ r->xfer_cb(r->token, err);
+}
+
+static inline bool _queue_empty(struct pl330_thread *thrd)
+{
+ return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
+ ? true : false;
+}
+
+static inline bool _queue_full(struct pl330_thread *thrd)
+{
+ return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
+ ? false : true;
+}
+
+static inline bool is_manager(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+
+ /* MANAGER is indexed at the end */
+ if (thrd->id == pl330->pinfo->pcfg.num_chan)
+ return true;
+ else
+ return false;
+}
+
+/* If manager of the thread is in Non-Secure mode */
+static inline bool _manager_ns(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+
+ return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
+}
+
+static inline u32 get_id(struct pl330_info *pi, u32 off)
+{
+ void __iomem *regs = pi->base;
+ u32 id = 0;
+
+ id |= (readb(regs + off + 0x0) << 0);
+ id |= (readb(regs + off + 0x4) << 8);
+ id |= (readb(regs + off + 0x8) << 16);
+ id |= (readb(regs + off + 0xc) << 24);
+
+ return id;
+}
+
+static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
+ enum pl330_dst da, u16 val)
+{
+ if (dry_run)
+ return SZ_DMAADDH;
+
+ buf[0] = CMD_DMAADDH;
+ buf[0] |= (da << 1);
+ *((u16 *)&buf[1]) = val;
+
+ PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
+ da == 1 ? "DA" : "SA", val);
+
+ return SZ_DMAADDH;
+}
+
+static inline u32 _emit_END(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMAEND;
+
+ buf[0] = CMD_DMAEND;
+
+ PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
+
+ return SZ_DMAEND;
+}
+
+static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
+{
+ if (dry_run)
+ return SZ_DMAFLUSHP;
+
+ buf[0] = CMD_DMAFLUSHP;
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
+
+ return SZ_DMAFLUSHP;
+}
+
+static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
+{
+ if (dry_run)
+ return SZ_DMALD;
+
+ buf[0] = CMD_DMALD;
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (1 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (1 << 0);
+
+ PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
+
+ return SZ_DMALD;
+}
+
+static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
+ enum pl330_cond cond, u8 peri)
+{
+ if (dry_run)
+ return SZ_DMALDP;
+
+ buf[0] = CMD_DMALDP;
+
+ if (cond == BURST)
+ buf[0] |= (1 << 1);
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
+ cond == SINGLE ? 'S' : 'B', peri >> 3);
+
+ return SZ_DMALDP;
+}
+
+static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
+ unsigned loop, u8 cnt)
+{
+ if (dry_run)
+ return SZ_DMALP;
+
+ buf[0] = CMD_DMALP;
+
+ if (loop)
+ buf[0] |= (1 << 1);
+
+ cnt--; /* DMAC increments by 1 internally */
+ buf[1] = cnt;
+
+ PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
+
+ return SZ_DMALP;
+}
+
+struct _arg_LPEND {
+ enum pl330_cond cond;
+ bool forever;
+ unsigned loop;
+ u8 bjump;
+};
+
+static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
+ const struct _arg_LPEND *arg)
+{
+ enum pl330_cond cond = arg->cond;
+ bool forever = arg->forever;
+ unsigned loop = arg->loop;
+ u8 bjump = arg->bjump;
+
+ if (dry_run)
+ return SZ_DMALPEND;
+
+ buf[0] = CMD_DMALPEND;
+
+ if (loop)
+ buf[0] |= (1 << 2);
+
+ if (!forever)
+ buf[0] |= (1 << 4);
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (1 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (1 << 0);
+
+ buf[1] = bjump;
+
+ PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
+ forever ? "FE" : "END",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
+ loop ? '1' : '0',
+ bjump);
+
+ return SZ_DMALPEND;
+}
+
+static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMAKILL;
+
+ buf[0] = CMD_DMAKILL;
+
+ return SZ_DMAKILL;
+}
+
+static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
+ enum dmamov_dst dst, u32 val)
+{
+ if (dry_run)
+ return SZ_DMAMOV;
+
+ buf[0] = CMD_DMAMOV;
+ buf[1] = dst;
+ *((u32 *)&buf[2]) = val;
+
+ PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
+ dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
+
+ return SZ_DMAMOV;
+}
+
+static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMANOP;
+
+ buf[0] = CMD_DMANOP;
+
+ PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
+
+ return SZ_DMANOP;
+}
+
+static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMARMB;
+
+ buf[0] = CMD_DMARMB;
+
+ PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
+
+ return SZ_DMARMB;
+}
+
+static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
+{
+ if (dry_run)
+ return SZ_DMASEV;
+
+ buf[0] = CMD_DMASEV;
+
+ ev &= 0x1f;
+ ev <<= 3;
+ buf[1] = ev;
+
+ PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
+
+ return SZ_DMASEV;
+}
+
+static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
+{
+ if (dry_run)
+ return SZ_DMAST;
+
+ buf[0] = CMD_DMAST;
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (1 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (1 << 0);
+
+ PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
+
+ return SZ_DMAST;
+}
+
+static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
+ enum pl330_cond cond, u8 peri)
+{
+ if (dry_run)
+ return SZ_DMASTP;
+
+ buf[0] = CMD_DMASTP;
+
+ if (cond == BURST)
+ buf[0] |= (1 << 1);
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
+ cond == SINGLE ? 'S' : 'B', peri >> 3);
+
+ return SZ_DMASTP;
+}
+
+static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMASTZ;
+
+ buf[0] = CMD_DMASTZ;
+
+ PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
+
+ return SZ_DMASTZ;
+}
+
+static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
+ unsigned invalidate)
+{
+ if (dry_run)
+ return SZ_DMAWFE;
+
+ buf[0] = CMD_DMAWFE;
+
+ ev &= 0x1f;
+ ev <<= 3;
+ buf[1] = ev;
+
+ if (invalidate)
+ buf[1] |= (1 << 1);
+
+ PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
+ ev >> 3, invalidate ? ", I" : "");
+
+ return SZ_DMAWFE;
+}
+
+static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
+ enum pl330_cond cond, u8 peri)
+{
+ if (dry_run)
+ return SZ_DMAWFP;
+
+ buf[0] = CMD_DMAWFP;
+
+ if (cond == SINGLE)
+ buf[0] |= (0 << 1) | (0 << 0);
+ else if (cond == BURST)
+ buf[0] |= (1 << 1) | (0 << 0);
+ else
+ buf[0] |= (0 << 1) | (1 << 0);
+
+ peri &= 0x1f;
+ peri <<= 3;
+ buf[1] = peri;
+
+ PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
+ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
+
+ return SZ_DMAWFP;
+}
+
+static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
+{
+ if (dry_run)
+ return SZ_DMAWMB;
+
+ buf[0] = CMD_DMAWMB;
+
+ PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
+
+ return SZ_DMAWMB;
+}
+
+struct _arg_GO {
+ u8 chan;
+ u32 addr;
+ unsigned ns;
+};
+
+static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
+ const struct _arg_GO *arg)
+{
+ u8 chan = arg->chan;
+ u32 addr = arg->addr;
+ unsigned ns = arg->ns;
+
+ if (dry_run)
+ return SZ_DMAGO;
+
+ buf[0] = CMD_DMAGO;
+ buf[0] |= (ns << 1);
+
+ buf[1] = chan & 0x7;
+
+ *((u32 *)&buf[2]) = addr;
+
+ return SZ_DMAGO;
+}
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+/* Returns Time-Out */
+static bool _until_dmac_idle(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ unsigned long loops = msecs_to_loops(5);
+
+ do {
+ /* Until Manager is Idle */
+ if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
+ break;
+
+ cpu_relax();
+ } while (--loops);
+
+ if (!loops)
+ return true;
+
+ return false;
+}
+
+static inline void _execute_DBGINSN(struct pl330_thread *thrd,
+ u8 insn[], bool as_manager)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ u32 val;
+
+ val = (insn[0] << 16) | (insn[1] << 24);
+ if (!as_manager) {
+ val |= (1 << 0);
+ val |= (thrd->id << 8); /* Channel Number */
+ }
+ writel(val, regs + DBGINST0);
+
+ val = *((u32 *)&insn[2]);
+ writel(val, regs + DBGINST1);
+
+ /* If timed out due to halted state-machine */
+ if (_until_dmac_idle(thrd)) {
+ dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
+ return;
+ }
+
+ /* Get going */
+ writel(0, regs + DBGCMD);
+}
+
+static inline u32 _state(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ u32 val;
+
+ if (is_manager(thrd))
+ val = readl(regs + DS) & 0xf;
+ else
+ val = readl(regs + CS(thrd->id)) & 0xf;
+
+ switch (val) {
+ case DS_ST_STOP:
+ return PL330_STATE_STOPPED;
+ case DS_ST_EXEC:
+ return PL330_STATE_EXECUTING;
+ case DS_ST_CMISS:
+ return PL330_STATE_CACHEMISS;
+ case DS_ST_UPDTPC:
+ return PL330_STATE_UPDTPC;
+ case DS_ST_WFE:
+ return PL330_STATE_WFE;
+ case DS_ST_FAULT:
+ return PL330_STATE_FAULTING;
+ case DS_ST_ATBRR:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_ATBARRIER;
+ case DS_ST_QBUSY:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_QUEUEBUSY;
+ case DS_ST_WFP:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_WFP;
+ case DS_ST_KILL:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_KILLING;
+ case DS_ST_CMPLT:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_COMPLETING;
+ case DS_ST_FLTCMP:
+ if (is_manager(thrd))
+ return PL330_STATE_INVALID;
+ else
+ return PL330_STATE_FAULT_COMPLETING;
+ default:
+ return PL330_STATE_INVALID;
+ }
+}
+
+/* If the request 'req' of thread 'thrd' is currently active */
+static inline bool _req_active(struct pl330_thread *thrd,
+ struct _pl330_req *req)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
+
+ if (IS_FREE(req))
+ return false;
+
+ return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
+}
+
+/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
+static inline unsigned _thrd_active(struct pl330_thread *thrd)
+{
+ if (_req_active(thrd, &thrd->req[0]))
+ return 1; /* First req active */
+
+ if (_req_active(thrd, &thrd->req[1]))
+ return 2; /* Second req active */
+
+ return 0;
+}
+
+static void _stop(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ u8 insn[6] = {0, 0, 0, 0, 0, 0};
+
+ if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
+ UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
+
+ /* Return if nothing needs to be done */
+ if (_state(thrd) == PL330_STATE_COMPLETING
+ || _state(thrd) == PL330_STATE_KILLING
+ || _state(thrd) == PL330_STATE_STOPPED)
+ return;
+
+ _emit_KILL(0, insn);
+
+ /* Stop generating interrupts for SEV */
+ writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
+
+ _execute_DBGINSN(thrd, insn, is_manager(thrd));
+}
+
+/* Start doing req 'idx' of thread 'thrd' */
+static bool _trigger(struct pl330_thread *thrd)
+{
+ void __iomem *regs = thrd->dmac->pinfo->base;
+ struct _pl330_req *req;
+ struct pl330_req *r;
+ struct _arg_GO go;
+ unsigned ns;
+ u8 insn[6] = {0, 0, 0, 0, 0, 0};
+
+ /* Return if already ACTIVE */
+ if (_state(thrd) != PL330_STATE_STOPPED)
+ return true;
+
+ if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
+ req = &thrd->req[1 - thrd->lstenq];
+ else if (!IS_FREE(&thrd->req[thrd->lstenq]))
+ req = &thrd->req[thrd->lstenq];
+ else
+ req = NULL;
+
+ /* Return if no request */
+ if (!req || !req->r)
+ return true;
+
+ r = req->r;
+
+ if (r->cfg)
+ ns = r->cfg->nonsecure ? 1 : 0;
+ else if (readl(regs + CS(thrd->id)) & CS_CNS)
+ ns = 1;
+ else
+ ns = 0;
+
+ /* See 'Abort Sources' point-4 at Page 2-25 */
+ if (_manager_ns(thrd) && !ns)
+ dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
+ __func__, __LINE__);
+
+ go.chan = thrd->id;
+ go.addr = req->mc_bus;
+ go.ns = ns;
+ _emit_GO(0, insn, &go);
+
+ /* Set to generate interrupts for SEV */
+ writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
+
+ /* Only manager can execute GO */
+ _execute_DBGINSN(thrd, insn, true);
+
+ return true;
+}
+
+static bool _start(struct pl330_thread *thrd)
+{
+ switch (_state(thrd)) {
+ case PL330_STATE_FAULT_COMPLETING:
+ UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
+
+ if (_state(thrd) == PL330_STATE_KILLING)
+ UNTIL(thrd, PL330_STATE_STOPPED)
+
+ case PL330_STATE_FAULTING:
+ _stop(thrd);
+
+ case PL330_STATE_KILLING:
+ case PL330_STATE_COMPLETING:
+ UNTIL(thrd, PL330_STATE_STOPPED)
+
+ case PL330_STATE_STOPPED:
+ return _trigger(thrd);
+
+ case PL330_STATE_WFP:
+ case PL330_STATE_QUEUEBUSY:
+ case PL330_STATE_ATBARRIER:
+ case PL330_STATE_UPDTPC:
+ case PL330_STATE_CACHEMISS:
+ case PL330_STATE_EXECUTING:
+ return true;
+
+ case PL330_STATE_WFE: /* For RESUME, nothing yet */
+ default:
+ return false;
+ }
+}
+
+static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+
+ while (cyc--) {
+ off += _emit_LD(dry_run, &buf[off], ALWAYS);
+ off += _emit_RMB(dry_run, &buf[off]);
+ off += _emit_ST(dry_run, &buf[off], ALWAYS);
+ off += _emit_WMB(dry_run, &buf[off]);
+ }
+
+ return off;
+}
+
+static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+
+ while (cyc--) {
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_ST(dry_run, &buf[off], ALWAYS);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ }
+
+ return off;
+}
+
+static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+
+ while (cyc--) {
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_LD(dry_run, &buf[off], ALWAYS);
+ off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ }
+
+ return off;
+}
+
+static int _bursts(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
+{
+ int off = 0;
+
+ switch (pxs->r->rqtype) {
+ case MEMTODEV:
+ off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
+ break;
+ case DEVTOMEM:
+ off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
+ break;
+ case MEMTOMEM:
+ off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
+ break;
+ default:
+ off += 0x40000000; /* Scare off the Client */
+ break;
+ }
+
+ return off;
+}
+
+/* Returns bytes consumed and updates bursts */
+static inline int _loop(unsigned dry_run, u8 buf[],
+ unsigned long *bursts, const struct _xfer_spec *pxs)
+{
+ int cyc, cycmax, szlp, szlpend, szbrst, off;
+ unsigned lcnt0, lcnt1, ljmp0, ljmp1;
+ struct _arg_LPEND lpend;
+
+ /* Max iterations possibile in DMALP is 256 */
+ if (*bursts >= 256*256) {
+ lcnt1 = 256;
+ lcnt0 = 256;
+ cyc = *bursts / lcnt1 / lcnt0;
+ } else if (*bursts > 256) {
+ lcnt1 = 256;
+ lcnt0 = *bursts / lcnt1;
+ cyc = 1;
+ } else {
+ lcnt1 = *bursts;
+ lcnt0 = 0;
+ cyc = 1;
+ }
+
+ szlp = _emit_LP(1, buf, 0, 0);
+ szbrst = _bursts(1, buf, pxs, 1);
+
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 0;
+ lpend.bjump = 0;
+ szlpend = _emit_LPEND(1, buf, &lpend);
+
+ if (lcnt0) {
+ szlp *= 2;
+ szlpend *= 2;
+ }
+
+ /*
+ * Max bursts that we can unroll due to limit on the
+ * size of backward jump that can be encoded in DMALPEND
+ * which is 8-bits and hence 255
+ */
+ cycmax = (255 - (szlp + szlpend)) / szbrst;
+
+ cyc = (cycmax < cyc) ? cycmax : cyc;
+
+ off = 0;
+
+ if (lcnt0) {
+ off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
+ ljmp0 = off;
+ }
+
+ off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
+ ljmp1 = off;
+
+ off += _bursts(dry_run, &buf[off], pxs, cyc);
+
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 1;
+ lpend.bjump = off - ljmp1;
+ off += _emit_LPEND(dry_run, &buf[off], &lpend);
+
+ if (lcnt0) {
+ lpend.cond = ALWAYS;
+ lpend.forever = false;
+ lpend.loop = 0;
+ lpend.bjump = off - ljmp0;
+ off += _emit_LPEND(dry_run, &buf[off], &lpend);
+ }
+
+ *bursts = lcnt1 * cyc;
+ if (lcnt0)
+ *bursts *= lcnt0;
+
+ return off;
+}
+
+static inline int _setup_loops(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
+{
+ struct pl330_xfer *x = pxs->x;
+ u32 ccr = pxs->ccr;
+ unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
+ int off = 0;
+
+ while (bursts) {
+ c = bursts;
+ off += _loop(dry_run, &buf[off], &c, pxs);
+ bursts -= c;
+ }
+
+ return off;
+}
+
+static inline int _setup_xfer(unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
+{
+ struct pl330_xfer *x = pxs->x;
+ int off = 0;
+
+ /* DMAMOV SAR, x->src_addr */
+ off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
+ /* DMAMOV DAR, x->dst_addr */
+ off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
+
+ /* Setup Loop(s) */
+ off += _setup_loops(dry_run, &buf[off], pxs);
+
+ return off;
+}
+
+/*
+ * A req is a sequence of one or more xfer units.
+ * Returns the number of bytes taken to setup the MC for the req.
+ */
+static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
+ unsigned index, struct _xfer_spec *pxs)
+{
+ struct _pl330_req *req = &thrd->req[index];
+ struct pl330_xfer *x;
+ u8 *buf = req->mc_cpu;
+ int off = 0;
+
+ PL330_DBGMC_START(req->mc_bus);
+
+ /* DMAMOV CCR, ccr */
+ off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
+
+ x = pxs->r->x;
+ do {
+ /* Error if xfer length is not aligned at burst size */
+ if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
+ return -EINVAL;
+
+ pxs->x = x;
+ off += _setup_xfer(dry_run, &buf[off], pxs);
+
+ x = x->next;
+ } while (x);
+
+ /* DMASEV peripheral/event */
+ off += _emit_SEV(dry_run, &buf[off], thrd->ev);
+ /* DMAEND */
+ off += _emit_END(dry_run, &buf[off]);
+
+ return off;
+}
+
+static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
+{
+ u32 ccr = 0;
+
+ if (rqc->src_inc)
+ ccr |= CC_SRCINC;
+
+ if (rqc->dst_inc)
+ ccr |= CC_DSTINC;
+
+ /* We set same protection levels for Src and DST for now */
+ if (rqc->privileged)
+ ccr |= CC_SRCPRI | CC_DSTPRI;
+ if (rqc->nonsecure)
+ ccr |= CC_SRCNS | CC_DSTNS;
+ if (rqc->insnaccess)
+ ccr |= CC_SRCIA | CC_DSTIA;
+
+ ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
+ ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
+
+ ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
+ ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
+
+ ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
+ ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
+
+ ccr |= (rqc->swap << CC_SWAP_SHFT);
+
+ return ccr;
+}
+
+static inline bool _is_valid(u32 ccr)
+{
+ enum pl330_dstcachectrl dcctl;
+ enum pl330_srccachectrl scctl;
+
+ dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
+ scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
+
+ if (dcctl == DINVALID1 || dcctl == DINVALID2
+ || scctl == SINVALID1 || scctl == SINVALID2)
+ return false;
+ else
+ return true;
+}
+
+/*
+ * Submit a list of xfers after which the client wants notification.
+ * Client is not notified after each xfer unit, just once after all
+ * xfer units are done or some error occurs.
+ */
+int pl330_submit_req(void *ch_id, struct pl330_req *r)
+{
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ struct pl330_info *pi;
+ struct _xfer_spec xs;
+ unsigned long flags;
+ void __iomem *regs;
+ unsigned idx;
+ u32 ccr;
+ int ret = 0;
+
+ /* No Req or Unacquired Channel or DMAC */
+ if (!r || !thrd || thrd->free)
+ return -EINVAL;
+
+ pl330 = thrd->dmac;
+ pi = pl330->pinfo;
+ regs = pi->base;
+
+ if (pl330->state == DYING
+ || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
+ dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
+ __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ /* If request for non-existing peripheral */
+ if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
+ dev_info(thrd->dmac->pinfo->dev,
+ "%s:%d Invalid peripheral(%u)!\n",
+ __func__, __LINE__, r->peri);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ if (_queue_full(thrd)) {
+ ret = -EAGAIN;
+ goto xfer_exit;
+ }
+
+ /* Prefer Secure Channel */
+ if (!_manager_ns(thrd))
+ r->cfg->nonsecure = 0;
+ else
+ r->cfg->nonsecure = 1;
+
+ /* Use last settings, if not provided */
+ if (r->cfg)
+ ccr = _prepare_ccr(r->cfg);
+ else
+ ccr = readl(regs + CC(thrd->id));
+
+ /* If this req doesn't have valid xfer settings */
+ if (!_is_valid(ccr)) {
+ ret = -EINVAL;
+ dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
+ __func__, __LINE__, ccr);
+ goto xfer_exit;
+ }
+
+ idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
+
+ xs.ccr = ccr;
+ xs.r = r;
+
+ /* First dry run to check if req is acceptable */
+ ret = _setup_req(1, thrd, idx, &xs);
+ if (ret < 0)
+ goto xfer_exit;
+
+ if (ret > pi->mcbufsz / 2) {
+ dev_info(thrd->dmac->pinfo->dev,
+ "%s:%d Trying increasing mcbufsz\n",
+ __func__, __LINE__);
+ ret = -ENOMEM;
+ goto xfer_exit;
+ }
+
+ /* Hook the request */
+ thrd->lstenq = idx;
+ thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
+ thrd->req[idx].r = r;
+
+ ret = 0;
+
+xfer_exit:
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(pl330_submit_req);
+
+static void pl330_dotask(unsigned long data)
+{
+ struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
+ struct pl330_info *pi = pl330->pinfo;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ /* The DMAC itself gone nuts */
+ if (pl330->dmac_tbd.reset_dmac) {
+ pl330->state = DYING;
+ /* Reset the manager too */
+ pl330->dmac_tbd.reset_mngr = true;
+ /* Clear the reset flag */
+ pl330->dmac_tbd.reset_dmac = false;
+ }
+
+ if (pl330->dmac_tbd.reset_mngr) {
+ _stop(pl330->manager);
+ /* Reset all channels */
+ pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
+ /* Clear the reset flag */
+ pl330->dmac_tbd.reset_mngr = false;
+ }
+
+ for (i = 0; i < pi->pcfg.num_chan; i++) {
+
+ if (pl330->dmac_tbd.reset_chan & (1 << i)) {
+ struct pl330_thread *thrd = &pl330->channels[i];
+ void __iomem *regs = pi->base;
+ enum pl330_op_err err;
+
+ _stop(thrd);
+
+ if (readl(regs + FSC) & (1 << thrd->id))
+ err = PL330_ERR_FAIL;
+ else
+ err = PL330_ERR_ABORT;
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ _callback(thrd->req[1 - thrd->lstenq].r, err);
+ _callback(thrd->req[thrd->lstenq].r, err);
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ thrd->req[0].r = NULL;
+ thrd->req[1].r = NULL;
+ MARK_FREE(&thrd->req[0]);
+ MARK_FREE(&thrd->req[1]);
+
+ /* Clear the reset flag */
+ pl330->dmac_tbd.reset_chan &= ~(1 << i);
+ }
+ }
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ return;
+}
+
+/* Returns 1 if state was updated, 0 otherwise */
+int pl330_update(const struct pl330_info *pi)
+{
+ struct _pl330_req *rqdone;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+ void __iomem *regs;
+ u32 val;
+ int id, ev, ret = 0;
+
+ if (!pi || !pi->pl330_data)
+ return 0;
+
+ regs = pi->base;
+ pl330 = pi->pl330_data;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ val = readl(regs + FSM) & 0x1;
+ if (val)
+ pl330->dmac_tbd.reset_mngr = true;
+ else
+ pl330->dmac_tbd.reset_mngr = false;
+
+ val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
+ pl330->dmac_tbd.reset_chan |= val;
+ if (val) {
+ int i = 0;
+ while (i < pi->pcfg.num_chan) {
+ if (val & (1 << i)) {
+ dev_info(pi->dev,
+ "Reset Channel-%d\t CS-%x FTC-%x\n",
+ i, readl(regs + CS(i)),
+ readl(regs + FTC(i)));
+ _stop(&pl330->channels[i]);
+ }
+ i++;
+ }
+ }
+
+ /* Check which event happened i.e, thread notified */
+ val = readl(regs + ES);
+ if (pi->pcfg.num_events < 32
+ && val & ~((1 << pi->pcfg.num_events) - 1)) {
+ pl330->dmac_tbd.reset_dmac = true;
+ dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
+ ret = 1;
+ goto updt_exit;
+ }
+
+ for (ev = 0; ev < pi->pcfg.num_events; ev++) {
+ if (val & (1 << ev)) { /* Event occured */
+ struct pl330_thread *thrd;
+ u32 inten = readl(regs + INTEN);
+ int active;
+
+ /* Clear the event */
+ if (inten & (1 << ev))
+ writel(1 << ev, regs + INTCLR);
+
+ ret = 1;
+
+ id = pl330->events[ev];
+
+ thrd = &pl330->channels[id];
+
+ active = _thrd_active(thrd);
+ if (!active) /* Aborted */
+ continue;
+
+ active -= 1;
+
+ rqdone = &thrd->req[active];
+ MARK_FREE(rqdone);
+
+ /* Get going again ASAP */
+ _start(thrd);
+
+ /* For now, just make a list of callbacks to be done */
+ list_add_tail(&rqdone->rqd, &pl330->req_done);
+ }
+ }
+
+ /* Now that we are in no hurry, do the callbacks */
+ while (!list_empty(&pl330->req_done)) {
+ rqdone = container_of(pl330->req_done.next,
+ struct _pl330_req, rqd);
+
+ list_del_init(&rqdone->rqd);
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+ _callback(rqdone->r, PL330_ERR_NONE);
+ spin_lock_irqsave(&pl330->lock, flags);
+ }
+
+updt_exit:
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ if (pl330->dmac_tbd.reset_dmac
+ || pl330->dmac_tbd.reset_mngr
+ || pl330->dmac_tbd.reset_chan) {
+ ret = 1;
+ tasklet_schedule(&pl330->tasks);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(pl330_update);
+
+int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
+{
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+ int ret = 0, active;
+
+ if (!thrd || thrd->free || thrd->dmac->state == DYING)
+ return -EINVAL;
+
+ pl330 = thrd->dmac;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ switch (op) {
+ case PL330_OP_FLUSH:
+ /* Make sure the channel is stopped */
+ _stop(thrd);
+
+ thrd->req[0].r = NULL;
+ thrd->req[1].r = NULL;
+ MARK_FREE(&thrd->req[0]);
+ MARK_FREE(&thrd->req[1]);
+ break;
+
+ case PL330_OP_ABORT:
+ active = _thrd_active(thrd);
+
+ /* Make sure the channel is stopped */
+ _stop(thrd);
+
+ /* ABORT is only for the active req */
+ if (!active)
+ break;
+
+ active--;
+
+ thrd->req[active].r = NULL;
+ MARK_FREE(&thrd->req[active]);
+
+ /* Start the next */
+ case PL330_OP_START:
+ if (!_start(thrd))
+ ret = -EIO;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(pl330_chan_ctrl);
+
+int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
+{
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ struct pl330_info *pi;
+ void __iomem *regs;
+ int active;
+ u32 val;
+
+ if (!pstatus || !thrd || thrd->free)
+ return -EINVAL;
+
+ pl330 = thrd->dmac;
+ pi = pl330->pinfo;
+ regs = pi->base;
+
+ /* The client should remove the DMAC and add again */
+ if (pl330->state == DYING)
+ pstatus->dmac_halted = true;
+ else
+ pstatus->dmac_halted = false;
+
+ val = readl(regs + FSC);
+ if (val & (1 << thrd->id))
+ pstatus->faulting = true;
+ else
+ pstatus->faulting = false;
+
+ active = _thrd_active(thrd);
+
+ if (!active) {
+ /* Indicate that the thread is not running */
+ pstatus->top_req = NULL;
+ pstatus->wait_req = NULL;
+ } else {
+ active--;
+ pstatus->top_req = thrd->req[active].r;
+ pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
+ ? thrd->req[1 - active].r : NULL;
+ }
+
+ pstatus->src_addr = readl(regs + SA(thrd->id));
+ pstatus->dst_addr = readl(regs + DA(thrd->id));
+
+ return 0;
+}
+EXPORT_SYMBOL(pl330_chan_status);
+
+/* Reserve an event */
+static inline int _alloc_event(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+ struct pl330_info *pi = pl330->pinfo;
+ int ev;
+
+ for (ev = 0; ev < pi->pcfg.num_events; ev++)
+ if (pl330->events[ev] == -1) {
+ pl330->events[ev] = thrd->id;
+ return ev;
+ }
+
+ return -1;
+}
+
+/* Upon success, returns IdentityToken for the
+ * allocated channel, NULL otherwise.
+ */
+void *pl330_request_channel(const struct pl330_info *pi)
+{
+ struct pl330_thread *thrd = NULL;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+ int chans, i;
+
+ if (!pi || !pi->pl330_data)
+ return NULL;
+
+ pl330 = pi->pl330_data;
+
+ if (pl330->state == DYING)
+ return NULL;
+
+ chans = pi->pcfg.num_chan;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+ for (i = 0; i < chans; i++) {
+ thrd = &pl330->channels[i];
+ if (thrd->free) {
+ thrd->ev = _alloc_event(thrd);
+ if (thrd->ev >= 0) {
+ thrd->free = false;
+ thrd->lstenq = 1;
+ thrd->req[0].r = NULL;
+ MARK_FREE(&thrd->req[0]);
+ thrd->req[1].r = NULL;
+ MARK_FREE(&thrd->req[1]);
+ break;
+ }
+ }
+ thrd = NULL;
+ }
+
+ spin_unlock_irqrestore(&pl330->lock, flags);
+
+ return thrd;
+}
+EXPORT_SYMBOL(pl330_request_channel);
+
+/* Release an event */
+static inline void _free_event(struct pl330_thread *thrd, int ev)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+ struct pl330_info *pi = pl330->pinfo;
+
+ /* If the event is valid and was held by the thread */
+ if (ev >= 0 && ev < pi->pcfg.num_events
+ && pl330->events[ev] == thrd->id)
+ pl330->events[ev] = -1;
+}
+
+void pl330_release_channel(void *ch_id)
+{
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+
+ if (!thrd || thrd->free)
+ return;
+
+ _stop(thrd);
+
+ _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
+ _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
+
+ pl330 = thrd->dmac;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+ _free_event(thrd, thrd->ev);
+ thrd->free = true;
+ spin_unlock_irqrestore(&pl330->lock, flags);
+}
+EXPORT_SYMBOL(pl330_release_channel);
+
+/* Initialize the structure for PL330 configuration, that can be used
+ * by the client driver the make best use of the DMAC
+ */
+static void read_dmac_config(struct pl330_info *pi)
+{
+ void __iomem *regs = pi->base;
+ u32 val;
+
+ val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
+ val &= CRD_DATA_WIDTH_MASK;
+ pi->pcfg.data_bus_width = 8 * (1 << val);
+
+ val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
+ val &= CRD_DATA_BUFF_MASK;
+ pi->pcfg.data_buf_dep = val + 1;
+
+ val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
+ val &= CR0_NUM_CHANS_MASK;
+ val += 1;
+ pi->pcfg.num_chan = val;
+
+ val = readl(regs + CR0);
+ if (val & CR0_PERIPH_REQ_SET) {
+ val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
+ val += 1;
+ pi->pcfg.num_peri = val;
+ pi->pcfg.peri_ns = readl(regs + CR4);
+ } else {
+ pi->pcfg.num_peri = 0;
+ }
+
+ val = readl(regs + CR0);
+ if (val & CR0_BOOT_MAN_NS)
+ pi->pcfg.mode |= DMAC_MODE_NS;
+ else
+ pi->pcfg.mode &= ~DMAC_MODE_NS;
+
+ val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
+ val &= CR0_NUM_EVENTS_MASK;
+ val += 1;
+ pi->pcfg.num_events = val;
+
+ pi->pcfg.irq_ns = readl(regs + CR3);
+
+ pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
+ pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
+}
+
+static inline void _reset_thread(struct pl330_thread *thrd)
+{
+ struct pl330_dmac *pl330 = thrd->dmac;
+ struct pl330_info *pi = pl330->pinfo;
+
+ thrd->req[0].mc_cpu = pl330->mcode_cpu
+ + (thrd->id * pi->mcbufsz);
+ thrd->req[0].mc_bus = pl330->mcode_bus
+ + (thrd->id * pi->mcbufsz);
+ thrd->req[0].r = NULL;
+ MARK_FREE(&thrd->req[0]);
+
+ thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
+ + pi->mcbufsz / 2;
+ thrd->req[1].mc_bus = thrd->req[0].mc_bus
+ + pi->mcbufsz / 2;
+ thrd->req[1].r = NULL;
+ MARK_FREE(&thrd->req[1]);
+}
+
+static int dmac_alloc_threads(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+ struct pl330_thread *thrd;
+ int i;
+
+ /* Allocate 1 Manager and 'chans' Channel threads */
+ pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
+ GFP_KERNEL);
+ if (!pl330->channels)
+ return -ENOMEM;
+
+ /* Init Channel threads */
+ for (i = 0; i < chans; i++) {
+ thrd = &pl330->channels[i];
+ thrd->id = i;
+ thrd->dmac = pl330;
+ _reset_thread(thrd);
+ thrd->free = true;
+ }
+
+ /* MANAGER is indexed at the end */
+ thrd = &pl330->channels[chans];
+ thrd->id = chans;
+ thrd->dmac = pl330;
+ thrd->free = false;
+ pl330->manager = thrd;
+
+ return 0;
+}
+
+static int dmac_alloc_resources(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+ int ret;
+
+ /*
+ * Alloc MicroCode buffer for 'chans' Channel threads.
+ * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
+ */
+ pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
+ chans * pi->mcbufsz,
+ &pl330->mcode_bus, GFP_KERNEL);
+ if (!pl330->mcode_cpu) {
+ dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ ret = dmac_alloc_threads(pl330);
+ if (ret) {
+ dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
+ __func__, __LINE__);
+ dma_free_coherent(pi->dev,
+ chans * pi->mcbufsz,
+ pl330->mcode_cpu, pl330->mcode_bus);
+ return ret;
+ }
+
+ return 0;
+}
+
+int pl330_add(struct pl330_info *pi)
+{
+ struct pl330_dmac *pl330;
+ void __iomem *regs;
+ int i, ret;
+
+ if (!pi || !pi->dev)
+ return -EINVAL;
+
+ /* If already added */
+ if (pi->pl330_data)
+ return -EINVAL;
+
+ /*
+ * If the SoC can perform reset on the DMAC, then do it
+ * before reading its configuration.
+ */
+ if (pi->dmac_reset)
+ pi->dmac_reset(pi);
+
+ regs = pi->base;
+
+ /* Check if we can handle this DMAC */
+ if (get_id(pi, PERIPH_ID) != PERIPH_ID_VAL
+ || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
+ dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
+ readl(regs + PERIPH_ID), readl(regs + PCELL_ID));
+ return -EINVAL;
+ }
+
+ /* Read the configuration of the DMAC */
+ read_dmac_config(pi);
+
+ if (pi->pcfg.num_events == 0) {
+ dev_err(pi->dev, "%s:%d Can't work without events!\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
+ if (!pl330) {
+ dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* Assign the info structure and private data */
+ pl330->pinfo = pi;
+ pi->pl330_data = pl330;
+
+ spin_lock_init(&pl330->lock);
+
+ INIT_LIST_HEAD(&pl330->req_done);
+
+ /* Use default MC buffer size if not provided */
+ if (!pi->mcbufsz)
+ pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
+
+ /* Mark all events as free */
+ for (i = 0; i < pi->pcfg.num_events; i++)
+ pl330->events[i] = -1;
+
+ /* Allocate resources needed by the DMAC */
+ ret = dmac_alloc_resources(pl330);
+ if (ret) {
+ dev_err(pi->dev, "Unable to create channels for DMAC\n");
+ kfree(pl330);
+ return ret;
+ }
+
+ tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
+
+ pl330->state = INIT;
+
+ return 0;
+}
+EXPORT_SYMBOL(pl330_add);
+
+static int dmac_free_threads(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+ struct pl330_thread *thrd;
+ int i;
+
+ /* Release Channel threads */
+ for (i = 0; i < chans; i++) {
+ thrd = &pl330->channels[i];
+ pl330_release_channel((void *)thrd);
+ }
+
+ /* Free memory */
+ kfree(pl330->channels);
+
+ return 0;
+}
+
+static void dmac_free_resources(struct pl330_dmac *pl330)
+{
+ struct pl330_info *pi = pl330->pinfo;
+ int chans = pi->pcfg.num_chan;
+
+ dmac_free_threads(pl330);
+
+ dma_free_coherent(pi->dev, chans * pi->mcbufsz,
+ pl330->mcode_cpu, pl330->mcode_bus);
+}
+
+void pl330_del(struct pl330_info *pi)
+{
+ struct pl330_dmac *pl330;
+
+ if (!pi || !pi->pl330_data)
+ return;
+
+ pl330 = pi->pl330_data;
+
+ pl330->state = UNINIT;
+
+ tasklet_kill(&pl330->tasks);
+
+ /* Free DMAC resources */
+ dmac_free_resources(pl330);
+
+ kfree(pl330);
+ pi->pl330_data = NULL;
+}
+EXPORT_SYMBOL(pl330_del);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 1cf999ade4bc..ba65f6eedca6 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -266,13 +266,53 @@ static int vic_set_wake(unsigned int irq, unsigned int on)
#endif /* CONFIG_PM */
static struct irq_chip vic_chip = {
- .name = "VIC",
- .ack = vic_ack_irq,
- .mask = vic_mask_irq,
- .unmask = vic_unmask_irq,
- .set_wake = vic_set_wake,
+ .name = "VIC",
+ .ack = vic_ack_irq,
+ .mask = vic_mask_irq,
+ .unmask = vic_unmask_irq,
+ .set_wake = vic_set_wake,
};
+static void __init vic_disable(void __iomem *base)
+{
+ writel(0, base + VIC_INT_SELECT);
+ writel(0, base + VIC_INT_ENABLE);
+ writel(~0, base + VIC_INT_ENABLE_CLEAR);
+ writel(0, base + VIC_IRQ_STATUS);
+ writel(0, base + VIC_ITCR);
+ writel(~0, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void __init vic_clear_interrupts(void __iomem *base)
+{
+ unsigned int i;
+
+ writel(0, base + VIC_PL190_VECT_ADDR);
+ for (i = 0; i < 19; i++) {
+ unsigned int value;
+
+ value = readl(base + VIC_PL190_VECT_ADDR);
+ writel(value, base + VIC_PL190_VECT_ADDR);
+ }
+}
+
+static void __init vic_set_irq_sources(void __iomem *base,
+ unsigned int irq_start, u32 vic_sources)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; i++) {
+ if (vic_sources & (1 << i)) {
+ unsigned int irq = irq_start + i;
+
+ set_irq_chip(irq, &vic_chip);
+ set_irq_chip_data(irq, base);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ }
+}
+
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
@@ -287,13 +327,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
/* Disable all interrupts initially. */
-
- writel(0, base + VIC_INT_SELECT);
- writel(0, base + VIC_INT_ENABLE);
- writel(~0, base + VIC_INT_ENABLE_CLEAR);
- writel(0, base + VIC_IRQ_STATUS);
- writel(0, base + VIC_ITCR);
- writel(~0, base + VIC_INT_SOFT_CLEAR);
+ vic_disable(base);
/*
* Make sure we clear all existing interrupts. The vector registers
@@ -302,13 +336,8 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
* the second base address, which is 0x20 in the page
*/
if (vic_2nd_block) {
- writel(0, base + VIC_PL190_VECT_ADDR);
- for (i = 0; i < 19; i++) {
- unsigned int value;
+ vic_clear_interrupts(base);
- value = readl(base + VIC_PL190_VECT_ADDR);
- writel(value, base + VIC_PL190_VECT_ADDR);
- }
/* ST has 16 vectors as well, but we don't enable them by now */
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
@@ -318,16 +347,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
- for (i = 0; i < 32; i++) {
- if (vic_sources & (1 << i)) {
- unsigned int irq = irq_start + i;
-
- set_irq_chip(irq, &vic_chip);
- set_irq_chip_data(irq, base);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- }
+ vic_set_irq_sources(base, irq_start, vic_sources);
}
/**
@@ -365,37 +385,14 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
}
/* Disable all interrupts initially. */
+ vic_disable(base);
- writel(0, base + VIC_INT_SELECT);
- writel(0, base + VIC_INT_ENABLE);
- writel(~0, base + VIC_INT_ENABLE_CLEAR);
- writel(0, base + VIC_IRQ_STATUS);
- writel(0, base + VIC_ITCR);
- writel(~0, base + VIC_INT_SOFT_CLEAR);
-
- /*
- * Make sure we clear all existing interrupts
- */
- writel(0, base + VIC_PL190_VECT_ADDR);
- for (i = 0; i < 19; i++) {
- unsigned int value;
-
- value = readl(base + VIC_PL190_VECT_ADDR);
- writel(value, base + VIC_PL190_VECT_ADDR);
- }
+ /* Make sure we clear all existing interrupts */
+ vic_clear_interrupts(base);
vic_init2(base);
- for (i = 0; i < 32; i++) {
- if (vic_sources & (1 << i)) {
- unsigned int irq = irq_start + i;
-
- set_irq_chip(irq, &vic_chip);
- set_irq_chip_data(irq, base);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- }
+ vic_set_irq_sources(base, irq_start, vic_sources);
vic_pm_register(base, irq_start, resume_sources);
}
diff --git a/arch/arm/configs/am3517_evm_defconfig b/arch/arm/configs/am3517_evm_defconfig
index 66a10b50d938..e4f4fb522bac 100644
--- a/arch/arm/configs/am3517_evm_defconfig
+++ b/arch/arm/configs/am3517_evm_defconfig
@@ -422,15 +422,29 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
+CONFIG_CAN=y
+CONFIG_CAN_RAW=y
+CONFIG_CAN_BCM=y
+
+#
+# CAN Device Drivers
+#
+CONFIG_CAN_VCAN=y
+CONFIG_CAN_DEV=y
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_TI_HECC=y
+# CONFIG_CAN_SJA1000 is not set
+
+#
+# CAN USB interfaces
+#
+# CONFIG_CAN_EMS_USB is not set
+CONFIG_CAN_DEBUG_DEVICES=y
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
-CONFIG_CFG80211_DEFAULT_PS_VALUE=0
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
#
@@ -517,7 +531,75 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_OSD_INITIATOR is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
-# CONFIG_NETDEVICES is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+CONFIG_TI_DAVINCI_EMAC=y
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -692,7 +774,57 @@ CONFIG_SSB_POSSIBLE=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_ARMCLCD is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+CONFIG_PANEL_SHARP_LQ043T1DG01=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
diff --git a/arch/arm/configs/ams_delta_defconfig b/arch/arm/configs/ams_delta_defconfig
index 3b3a3775bbf4..6d8a0c891f80 100644
--- a/arch/arm/configs/ams_delta_defconfig
+++ b/arch/arm/configs/ams_delta_defconfig
@@ -47,6 +47,7 @@ CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_TASKSTATS is not set
# CONFIG_UTS_NS is not set
# CONFIG_AUDIT is not set
+CONFIG_TREE_PREEMPT_RCU=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
@@ -95,9 +96,8 @@ CONFIG_KMOD=y
# Block layer
#
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
#
# IO Schedulers
@@ -699,6 +699,7 @@ CONFIG_SERIO=y
CONFIG_SERIO_SERPORT=y
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
+CONFIG_SERIO_AMS_DELTA=y
# CONFIG_GAMEPORT is not set
#
@@ -835,7 +836,8 @@ CONFIG_DAB=y
#
# Graphics support
#
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
#
# Display device support
@@ -1283,7 +1285,7 @@ CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_LIST is not set
diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig
new file mode 100644
index 000000000000..d5c088149e46
--- /dev/null
+++ b/arch/arm/configs/cns3420vb_defconfig
@@ -0,0 +1,831 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.34-rc6
+# Sun May 2 21:58:08 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_OPROFILE_ARMV6=y
+CONFIG_OPROFILE_ARM11_CORE=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=m
+# CONFIG_CFQ_GROUP_IOSCHED is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+CONFIG_ARCH_CNS3XXX=y
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+
+#
+# CNS3XXX platform type
+#
+CONFIG_MACH_CNS3420VB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_V6=y
+# CONFIG_CPU_32v6K is not set
+CONFIG_CPU_32v6=y
+CONFIG_CPU_ABRT_EV6=y
+CONFIG_CPU_PABRT_V6=y
+CONFIG_CPU_CACHE_V6=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V6=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_CPU_HAS_PMU=y
+# CONFIG_ARM_ERRATA_411920 is not set
+CONFIG_ARM_GIC=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyS0,38400 mem=128M root=/dev/mmcblk0p1 ro rootwait"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=20000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+# CONFIG_SATA_PMP is not set
+# CONFIG_ATA_SFF is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=y
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+CONFIG_FSCACHE=y
+# CONFIG_FSCACHE_STATS is not set
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+# CONFIG_CACHEFILES is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/arm/configs/devkit8000_defconfig b/arch/arm/configs/devkit8000_defconfig
index 61a817e8cf81..c7a68202fa3f 100644
--- a/arch/arm/configs/devkit8000_defconfig
+++ b/arch/arm/configs/devkit8000_defconfig
@@ -1,13 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc6
-# Thu Feb 4 15:42:56 2010
+# Linux kernel version: 2.6.34-rc2
+# Wed Mar 24 13:27:25 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -19,7 +20,9 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -60,11 +63,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -96,10 +94,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
@@ -170,7 +172,7 @@ CONFIG_INLINE_WRITE_UNLOCK=y
CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
# CONFIG_MUTEX_SPIN_ON_OWNER is not set
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System Type
@@ -181,6 +183,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
@@ -190,7 +193,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -207,21 +209,26 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
CONFIG_ARCH_OMAP=y
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
#
# TI OMAP Implementations
@@ -237,16 +244,20 @@ CONFIG_ARCH_OMAP3=y
# OMAP Feature Selections
#
# CONFIG_OMAP_RESET_CLOCKS is not set
-# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
CONFIG_OMAP_MCBSP=y
# CONFIG_OMAP_MBOX_FWK is not set
# CONFIG_OMAP_MPU_TIMER is not set
CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
CONFIG_OMAP_32K_TIMER_HZ=128
CONFIG_OMAP_DM_TIMER=y
# CONFIG_OMAP_PM_NONE is not set
CONFIG_OMAP_PM_NOOP=y
CONFIG_ARCH_OMAP3430=y
+CONFIG_OMAP_PACKAGE_CUS=y
#
# OMAP Board Type
@@ -295,6 +306,7 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
@@ -387,7 +399,14 @@ CONFIG_HAVE_AOUT=y
#
# Power management options
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_NET=y
@@ -395,7 +414,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -666,6 +684,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -717,6 +736,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
# CONFIG_SMC91X is not set
+# CONFIG_TI_DAVINCI_EMAC is not set
CONFIG_DM9000=y
CONFIG_DM9000_DEBUGLEVEL=4
CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y
@@ -863,6 +883,7 @@ CONFIG_SERIAL_8250_RSA=y
# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -891,6 +912,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_OMAP=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -904,15 +926,9 @@ CONFIG_I2C_OMAP=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -944,10 +960,12 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
@@ -984,10 +1002,12 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
CONFIG_TWL4030_CORE=y
CONFIG_TWL4030_POWER=y
@@ -998,22 +1018,25 @@ CONFIG_TWL4030_CODEC=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
# CONFIG_REGULATOR_MAX8660 is not set
CONFIG_REGULATOR_TWL4030=y
# CONFIG_REGULATOR_LP3971 is not set
@@ -1072,7 +1095,6 @@ CONFIG_OMAP2_DSS_VENC=y
CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
CONFIG_FB_OMAP2=y
CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
-# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
CONFIG_FB_OMAP2_NUM_FBS=3
#
@@ -1080,7 +1102,9 @@ CONFIG_FB_OMAP2_NUM_FBS=3
#
CONFIG_PANEL_GENERIC=y
# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
-CONFIG_PANEL_INNOLUX_AT070TN83=y
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -1136,6 +1160,7 @@ CONFIG_SND_ARM=y
CONFIG_SND_SPI=y
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
CONFIG_SND_SOC=y
CONFIG_SND_OMAP_SOC=y
@@ -1147,42 +1172,44 @@ CONFIG_SND_SOC_TWL4030=y
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-CONFIG_HIDRAW=y
+# CONFIG_HIDRAW is not set
#
# USB Input Devices
#
CONFIG_USB_HID=y
# CONFIG_HID_PID is not set
-CONFIG_USB_HIDDEV=y
+# CONFIG_USB_HIDDEV is not set
#
# Special HID drivers
#
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
# CONFIG_HID_DRAGONRISE is not set
-CONFIG_HID_EZKEY=y
+# CONFIG_HID_EZKEY is not set
# CONFIG_HID_KYE is not set
-CONFIG_HID_GYRATION=y
+# CONFIG_HID_GYRATION is not set
# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
-CONFIG_HID_LOGITECH=y
-# CONFIG_LOGITECH_FF is not set
-# CONFIG_LOGIRUMBLEPAD2_FF is not set
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
-CONFIG_HID_PANTHERLORD=y
-# CONFIG_PANTHERLORD_FF is not set
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
# CONFIG_HID_TOPSEED is not set
@@ -1193,7 +1220,7 @@ CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=y
-# CONFIG_USB_DEBUG is not set
+CONFIG_USB_DEBUG=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
#
@@ -1202,7 +1229,7 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
# CONFIG_USB_DEVICEFS is not set
# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_OTG is not set
+CONFIG_USB_OTG=y
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
CONFIG_USB_MON=y
@@ -1230,15 +1257,15 @@ CONFIG_USB_MUSB_SOC=y
#
# OMAP 343x high speed USB support
#
-CONFIG_USB_MUSB_HOST=y
+# CONFIG_USB_MUSB_HOST is not set
# CONFIG_USB_MUSB_PERIPHERAL is not set
-# CONFIG_USB_MUSB_OTG is not set
-# CONFIG_USB_GADGET_MUSB_HDRC is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
CONFIG_USB_MUSB_HDRC_HCD=y
# CONFIG_MUSB_PIO_ONLY is not set
CONFIG_USB_INVENTRA_DMA=y
# CONFIG_USB_TI_CPPI_DMA is not set
-# CONFIG_USB_MUSB_DEBUG is not set
+CONFIG_USB_MUSB_DEBUG=y
#
# USB Device Class drivers
@@ -1291,7 +1318,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1304,9 +1330,8 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
-# CONFIG_USB_GADGET_DEBUG is not set
+CONFIG_USB_GADGET_DEBUG=y
# CONFIG_USB_GADGET_DEBUG_FILES is not set
CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
@@ -1314,8 +1339,7 @@ CONFIG_USB_GADGET_SELECTED=y
# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
# CONFIG_USB_GADGET_LH7A40X is not set
-CONFIG_USB_GADGET_OMAP=y
-CONFIG_USB_OMAP=y
+# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
@@ -1330,19 +1354,20 @@ CONFIG_USB_OMAP=y
# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
-# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_ZERO is not set
-CONFIG_USB_AUDIO=m
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-CONFIG_USB_ETH_EEM=y
-CONFIG_USB_GADGETFS=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=y
+# CONFIG_USB_ETH_RNDIS is not set
+# CONFIG_USB_ETH_EEM is not set
+# CONFIG_USB_GADGETFS is not set
# CONFIG_USB_FILE_STORAGE is not set
# CONFIG_USB_MASS_STORAGE is not set
-CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_G_SERIAL is not set
# CONFIG_USB_MIDI_GADGET is not set
-CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1373,8 +1398,6 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=m
# CONFIG_MMC_OMAP is not set
CONFIG_MMC_OMAP_HS=y
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -1392,11 +1415,11 @@ CONFIG_LEDS_GPIO_PLATFORM=y
# CONFIG_LEDS_REGULATOR is not set
# CONFIG_LEDS_BD2802 is not set
# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
# CONFIG_LEDS_TRIGGER_TIMER is not set
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
@@ -1580,6 +1603,7 @@ CONFIG_UBIFS_FS=y
CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1606,6 +1630,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index 03f76cfc941c..4b55dcb60029 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -1,13 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Tue Jan 5 13:55:22 2010
+# Linux kernel version: 2.6.34-rc5
+# Wed Apr 28 11:23:19 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -18,6 +19,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -32,6 +34,12 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -52,7 +60,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -85,10 +92,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
@@ -104,6 +115,7 @@ CONFIG_HAVE_CLK=y
#
# GCOV-based kernel profiling
#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -170,6 +182,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
@@ -179,7 +192,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -196,21 +208,26 @@ CONFIG_ARCH_MMP=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
# CONFIG_MACH_TAVOREVB is not set
#
@@ -218,8 +235,10 @@ CONFIG_ARCH_MMP=y
#
# CONFIG_MACH_ASPENITE is not set
# CONFIG_MACH_ZYLONITE2 is not set
+# CONFIG_MACH_AVENGERS_LITE is not set
# CONFIG_MACH_TTC_DKB is not set
CONFIG_MACH_FLINT=y
+CONFIG_MACH_MARVELL_JASPER=y
CONFIG_CPU_MMP2=y
CONFIG_PLAT_PXA=y
@@ -246,7 +265,10 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_ICACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_OUTER_CACHE=y
+CONFIG_CACHE_TAUROS2=y
CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_411920 is not set
CONFIG_COMMON_CLKDEV=y
@@ -298,7 +320,7 @@ CONFIG_ALIGNMENT_TRAP=y
#
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfsroot/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS0,115200 mem=128M user_debug=255"
+CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfsroot/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS2,38400 mem=128M user_debug=255"
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -338,7 +360,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -532,6 +553,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -640,6 +662,7 @@ CONFIG_SERIAL_PXA=y
CONFIG_SERIAL_PXA_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -667,6 +690,7 @@ CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_PXA=y
# CONFIG_I2C_PXA_SLAVE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -679,15 +703,9 @@ CONFIG_I2C_PXA=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -702,13 +720,16 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -737,10 +758,12 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
@@ -749,24 +772,27 @@ CONFIG_MFD_CORE=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+CONFIG_MFD_MAX8925=y
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-CONFIG_MFD_88PM8607=y
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
# CONFIG_REGULATOR_MAX1586 is not set
-CONFIG_REGULATOR_MAX8660=y
+CONFIG_REGULATOR_MAX8649=y
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_MAX8925=y
# CONFIG_REGULATOR_LP3971 is not set
# CONFIG_REGULATOR_TPS65023 is not set
# CONFIG_REGULATOR_TPS6507X is not set
-CONFIG_REGULATOR_88PM8607=y
# CONFIG_MEDIA_SUPPORT is not set
#
@@ -781,6 +807,7 @@ CONFIG_LCD_CLASS_DEVICE=y
# CONFIG_LCD_PLATFORM is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_MAX8925=y
#
# Display device support
@@ -821,6 +848,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_DS1374 is not set
# CONFIG_RTC_DRV_DS1672 is not set
# CONFIG_RTC_DRV_MAX6900 is not set
+CONFIG_RTC_DRV_MAX8925=y
# CONFIG_RTC_DRV_RS5C372 is not set
# CONFIG_RTC_DRV_ISL1208 is not set
# CONFIG_RTC_DRV_X1205 is not set
@@ -872,7 +900,6 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
@@ -883,7 +910,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
-CONFIG_DNOTIFY=y
+# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
@@ -940,6 +967,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -967,6 +995,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -990,7 +1019,7 @@ CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
@@ -1032,6 +1061,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
@@ -1052,6 +1082,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_DYNAMIC_DEBUG=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -1059,9 +1090,7 @@ CONFIG_ARM_UNWIND=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_DEBUG_LL=y
-# CONFIG_EARLY_PRINTK is not set
-# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_DEBUG_LL is not set
# CONFIG_OC_ETM is not set
#
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index c88e9527a8ec..a708fd6d6ffe 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -809,7 +809,22 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_SOUND is not set
# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_MXC=y
+
+
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
diff --git a/arch/arm/configs/omap3_defconfig b/arch/arm/configs/omap3_defconfig
index d6ad92177324..94dfcf0aa674 100644
--- a/arch/arm/configs/omap3_defconfig
+++ b/arch/arm/configs/omap3_defconfig
@@ -1,13 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc5
-# Tue Jan 26 11:05:31 2010
+# Linux kernel version: 2.6.34-rc7
+# Thu May 13 10:54:43 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
@@ -19,7 +20,9 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
CONFIG_OPROFILE_ARMV6=y
CONFIG_OPROFILE_ARM11_CORE=y
CONFIG_OPROFILE_ARMV7=y
@@ -63,12 +66,7 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+CONFIG_LOG_BUF_SHIFT=16
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -100,17 +98,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
@@ -189,6 +191,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
@@ -198,7 +201,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -215,21 +217,26 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
CONFIG_ARCH_OMAP=y
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
#
# TI OMAP Implementations
@@ -254,6 +261,7 @@ CONFIG_OMAP_MCBSP=y
# CONFIG_OMAP_MBOX_FWK is not set
# CONFIG_OMAP_MPU_TIMER is not set
CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
CONFIG_OMAP_32K_TIMER_HZ=128
CONFIG_OMAP_DM_TIMER=y
# CONFIG_OMAP_PM_NONE is not set
@@ -264,7 +272,7 @@ CONFIG_MACH_OMAP_GENERIC=y
# OMAP Core Type
#
CONFIG_ARCH_OMAP2420=y
-# CONFIG_ARCH_OMAP2430 is not set
+CONFIG_ARCH_OMAP2430=y
CONFIG_ARCH_OMAP3430=y
CONFIG_OMAP_PACKAGE_CBB=y
CONFIG_OMAP_PACKAGE_CUS=y
@@ -276,8 +284,9 @@ CONFIG_OMAP_PACKAGE_CBP=y
CONFIG_MACH_OMAP2_TUSB6010=y
CONFIG_MACH_OMAP_H4=y
CONFIG_MACH_OMAP_APOLLON=y
-# CONFIG_MACH_OMAP_2430SDP is not set
+CONFIG_MACH_OMAP_2430SDP=y
CONFIG_MACH_OMAP3_BEAGLE=y
+CONFIG_MACH_DEVKIT8000=y
CONFIG_MACH_OMAP_LDP=y
CONFIG_MACH_OVERO=y
CONFIG_MACH_OMAP3EVM=y
@@ -294,6 +303,7 @@ CONFIG_MACH_OMAP_ZOOM2=y
CONFIG_MACH_OMAP_ZOOM3=y
CONFIG_MACH_CM_T35=y
CONFIG_MACH_IGEP0020=y
+CONFIG_MACH_SBC3530=y
CONFIG_MACH_OMAP_3630SDP=y
CONFIG_MACH_OMAP_4430SDP=y
# CONFIG_OMAP3_EMU is not set
@@ -330,11 +340,16 @@ CONFIG_ARM_THUMBEE=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
+CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
+CONFIG_CACHE_L2X0=y
CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_411920 is not set
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_PL310_ERRATA_588369 is not set
CONFIG_ARM_GIC=y
CONFIG_COMMON_CLKDEV=y
@@ -368,6 +383,7 @@ CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
# CONFIG_HIGHMEM is not set
+CONFIG_HW_PERF_EVENTS=y
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -390,7 +406,7 @@ CONFIG_ALIGNMENT_TRAP=y
#
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/home/user/buildroot ip=192.168.0.2:192.168.0.1:192.168.0.1:255.255.255.0:tgt:eth0:off rw console=ttyS2,115200n8"
+CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyS2,115200"
# CONFIG_XIP_KERNEL is not set
CONFIG_KEXEC=y
CONFIG_ATAGS_PROC=y
@@ -443,7 +459,8 @@ CONFIG_BINFMT_MISC=y
#
CONFIG_PM=y
CONFIG_PM_DEBUG=y
-CONFIG_PM_VERBOSE=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_PM_VERBOSE is not set
CONFIG_CAN_PM_TRACE=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
@@ -451,6 +468,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_APM_EMULATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_NET=y
@@ -458,7 +476,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=y
@@ -544,7 +561,6 @@ CONFIG_NETFILTER_ADVANCED=y
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -584,7 +600,7 @@ CONFIG_CFG80211=y
# CONFIG_CFG80211_REG_DEBUG is not set
CONFIG_CFG80211_DEFAULT_PS=y
# CONFIG_CFG80211_DEBUGFS is not set
-CONFIG_WIRELESS_OLD_REGULATORY=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
CONFIG_CFG80211_WEXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
CONFIG_LIB80211=y
@@ -676,7 +692,6 @@ CONFIG_MTD_CFI_UTIL=y
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
# CONFIG_MTD_PHYSMAP is not set
# CONFIG_MTD_ARM_INTEGRATOR is not set
-CONFIG_MTD_OMAP_NOR=y
# CONFIG_MTD_PLATRAM is not set
#
@@ -754,6 +769,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
@@ -773,6 +789,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -839,12 +856,14 @@ CONFIG_SMSC_PHY=y
# CONFIG_NATIONAL_PHY is not set
# CONFIG_STE10XP is not set
# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
CONFIG_SMC91X=y
+# CONFIG_TI_DAVINCI_EMAC is not set
# CONFIG_DM9000 is not set
# CONFIG_ENC28J60 is not set
# CONFIG_ETHOC is not set
@@ -881,6 +900,7 @@ CONFIG_LIBERTAS_USB=y
CONFIG_LIBERTAS_SDIO=y
# CONFIG_LIBERTAS_SPI is not set
CONFIG_LIBERTAS_DEBUG=y
+# CONFIG_LIBERTAS_MESH is not set
# CONFIG_P54_COMMON is not set
# CONFIG_RT2X00 is not set
# CONFIG_WL12XX is not set
@@ -902,6 +922,7 @@ CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_CDCETHER=y
# CONFIG_USB_NET_CDC_EEM is not set
# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
# CONFIG_USB_NET_SMSC95XX is not set
# CONFIG_USB_NET_GL620A is not set
CONFIG_USB_NET_NET1080=y
@@ -917,6 +938,8 @@ CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
CONFIG_USB_NET_ZAURUS=y
# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -1012,6 +1035,7 @@ CONFIG_INPUT_MISC=y
# CONFIG_INPUT_YEALINK is not set
# CONFIG_INPUT_CM109 is not set
CONFIG_INPUT_TWL4030_PWRBUTTON=y
+# CONFIG_INPUT_TWL4030_VIBRA is not set
# CONFIG_INPUT_UINPUT is not set
# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
@@ -1055,6 +1079,7 @@ CONFIG_SERIAL_8250_RSA=y
# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -1083,6 +1108,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_OMAP=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -1096,15 +1122,9 @@ CONFIG_I2C_OMAP=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -1136,10 +1156,12 @@ CONFIG_GPIO_SYSFS=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
@@ -1204,10 +1226,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set
@@ -1262,7 +1285,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_LIS3_I2C is not set
# CONFIG_THERMAL is not set
CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
#
# Watchdog Device Drivers
@@ -1270,6 +1293,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_OMAP_WATCHDOG=y
CONFIG_TWL4030_WATCHDOG=y
+# CONFIG_MAX63XX_WATCHDOG is not set
#
# USB-based Watchdog Cards
@@ -1286,14 +1310,16 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
-# CONFIG_MENELAUS is not set
+CONFIG_MENELAUS=y
CONFIG_TWL4030_CORE=y
-# CONFIG_TWL4030_POWER is not set
+CONFIG_TWL4030_POWER=y
CONFIG_TWL4030_CODEC=y
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_T7L66XB is not set
@@ -1301,27 +1327,30 @@ CONFIG_TWL4030_CODEC=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
# CONFIG_REGULATOR_MAX8660 is not set
CONFIG_REGULATOR_TWL4030=y
# CONFIG_REGULATOR_LP3971 is not set
-# CONFIG_REGULATOR_TPS65023 is not set
-# CONFIG_REGULATOR_TPS6507X is not set
+CONFIG_REGULATOR_TPS65023=y
+CONFIG_REGULATOR_TPS6507X=y
# CONFIG_MEDIA_SUPPORT is not set
#
@@ -1333,9 +1362,9 @@ CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
# CONFIG_FB_DDC is not set
# CONFIG_FB_BOOT_VESA_SUPPORT is not set
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
@@ -1358,19 +1387,12 @@ CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
-CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP is not set
CONFIG_FB_OMAP_LCD_VGA=y
-# CONFIG_FB_OMAP_031M3R is not set
-# CONFIG_FB_OMAP_048M3R is not set
-CONFIG_FB_OMAP_079M3R=y
-# CONFIG_FB_OMAP_092M9R is not set
-# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
-# CONFIG_FB_OMAP_LCD_MIPID is not set
-# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
-CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
# CONFIG_OMAP2_DSS is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_L4F00242T03 is not set
# CONFIG_LCD_LMS283GF05 is not set
# CONFIG_LCD_LTV350QV is not set
# CONFIG_LCD_ILI9320 is not set
@@ -1448,6 +1470,7 @@ CONFIG_SND_ARM=y
CONFIG_SND_SPI=y
CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=y
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
CONFIG_SND_SOC=y
CONFIG_SND_OMAP_SOC=y
@@ -1479,6 +1502,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
# CONFIG_HID_A4TECH is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
@@ -1492,13 +1516,18 @@ CONFIG_USB_HID=y
# CONFIG_HID_TWINHAN is not set
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
# CONFIG_HID_PANTHERLORD is not set
# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1545,6 +1574,10 @@ CONFIG_USB_MUSB_HDRC=y
CONFIG_USB_MUSB_SOC=y
#
+# OMAP 243x high speed USB support
+#
+
+#
# OMAP 343x high speed USB support
#
# CONFIG_USB_MUSB_HOST is not set
@@ -1608,7 +1641,6 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1621,7 +1653,6 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_IOWARRIOR is not set
CONFIG_USB_TEST=y
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG=y
CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -1659,6 +1690,7 @@ CONFIG_USB_ZERO=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1686,10 +1718,8 @@ CONFIG_SDIO_UART=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP=y
CONFIG_MMC_OMAP_HS=y
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -1707,11 +1737,11 @@ CONFIG_LEDS_GPIO_PLATFORM=y
# CONFIG_LEDS_REGULATOR is not set
# CONFIG_LEDS_BD2802 is not set
# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
@@ -1751,6 +1781,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL92330=y
CONFIG_RTC_DRV_TWL4030=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
@@ -1799,6 +1830,11 @@ CONFIG_RTC_DRV_TWL4030=y
# CONFIG_STAGING is not set
#
+# CBUS support
+#
+# CONFIG_CBUS is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
@@ -1826,6 +1862,7 @@ CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
# CONFIG_QUOTA_NETLINK_INTERFACE is not set
CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
CONFIG_QUOTA_TREE=y
# CONFIG_QFMT_V1 is not set
CONFIG_QFMT_V2=y
@@ -1897,6 +1934,7 @@ CONFIG_UBIFS_FS=y
CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1924,6 +1962,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -2024,6 +2063,7 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_LOCK_ALLOC=y
CONFIG_PROVE_LOCKING=y
+# CONFIG_PROVE_RCU is not set
CONFIG_LOCKDEP=y
CONFIG_LOCK_STAT=y
# CONFIG_DEBUG_LOCKDEP is not set
@@ -2053,13 +2093,9 @@ CONFIG_DEBUG_INFO=y
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -2199,7 +2235,7 @@ CONFIG_CRYPTO_LZO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
-CONFIG_BINARY_PRINTF=y
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
@@ -2222,3 +2258,4 @@ CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/arm/configs/omap3_evm_defconfig b/arch/arm/configs/omap3_evm_defconfig
index a6dd6d1af806..b02e371b0997 100644
--- a/arch/arm/configs/omap3_evm_defconfig
+++ b/arch/arm/configs/omap3_evm_defconfig
@@ -911,7 +911,56 @@ CONFIG_DAB=y
#
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4
+CONFIG_FB_OMAP2=y
+# CONFIG_FB_OMAP2_DEBUG_SUPPORT is not set
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_SAMSUNG_LTE430WQ_F0C is not set
+CONFIG_PANEL_SHARP_LS037V7DW01=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
diff --git a/arch/arm/configs/omap3_stalker_lks_defconfig b/arch/arm/configs/omap3_stalker_lks_defconfig
new file mode 100644
index 000000000000..83365f075cea
--- /dev/null
+++ b/arch/arm/configs/omap3_stalker_lks_defconfig
@@ -0,0 +1,1691 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.34-rc7
+# Mon May 17 16:57:28 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+# CONFIG_ARCH_OMAP4 is not set
+
+#
+# OMAP Feature Selections
+#
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
+# CONFIG_OMAP_MCBSP is not set
+# CONFIG_OMAP_MBOX_FWK is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_PM_NONE is not set
+CONFIG_OMAP_PM_NOOP=y
+CONFIG_ARCH_OMAP3430=y
+CONFIG_OMAP_PACKAGE_CUS=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_DEVKIT8000 is not set
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OVERO is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3517EVM is not set
+# CONFIG_MACH_OMAP3_PANDORA is not set
+# CONFIG_MACH_OMAP3_TOUCHBOOK is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_NOKIA_RX51 is not set
+# CONFIG_MACH_OMAP_ZOOM2 is not set
+# CONFIG_MACH_OMAP_ZOOM3 is not set
+# CONFIG_MACH_CM_T35 is not set
+# CONFIG_MACH_IGEP0020 is not set
+CONFIG_MACH_SBC3530=y
+# CONFIG_MACH_OMAP_3630SDP is not set
+# CONFIG_OMAP3_EMU is not set
+# CONFIG_OMAP3_SDRC_AC_TIMING is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_ARM_THUMBEE is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_CPU_HAS_PMU=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/nfs nfsroot=192.168.0.1:/home/user/buildroot ip=192.168.0.2:192.168.0.1:192.168.0.1:255.255.255.0:tgt:eth0:off rw console=ttyS2,115200n8"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_PM_VERBOSE is not set
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+# CONFIG_MTD_NAND_OMAP2 is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+CONFIG_MTD_ONENAND=y
+CONFIG_MTD_ONENAND_VERIFY_WRITE=y
+# CONFIG_MTD_ONENAND_GENERIC is not set
+CONFIG_MTD_ONENAND_OMAP2=y
+# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
+# CONFIG_MTD_ONENAND_SIM is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_TI_DAVINCI_EMAC is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+CONFIG_SMSC911X=y
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+# CONFIG_TWL4030_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+# CONFIG_TWL4030_CODEC is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+# CONFIG_OMAP2_DSS is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=y
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_ZERO_HNPTEST is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_USB_ULPI is not set
+CONFIG_TWL4030_USB=y
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig
index a96bca290cd1..1fb04567f6e1 100644
--- a/arch/arm/configs/omap_4430sdp_defconfig
+++ b/arch/arm/configs/omap_4430sdp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32
-# Sun Dec 6 23:37:45 2009
+# Linux kernel version: 2.6.34-rc7
+# Wed May 12 12:26:05 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -9,6 +9,7 @@ CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_LOCKDEP_SUPPORT=y
@@ -20,6 +21,7 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -33,28 +35,33 @@ CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
#
# RCU Subsystem
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -64,6 +71,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -85,10 +93,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -127,14 +139,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
# CONFIG_FREEZER is not set
#
@@ -146,6 +185,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
@@ -155,7 +195,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -163,6 +202,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_IXP2000 is not set
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
# CONFIG_ARCH_KIRKWOOD is not set
# CONFIG_ARCH_LOKI is not set
# CONFIG_ARCH_MV78XX0 is not set
@@ -171,25 +211,32 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
CONFIG_ARCH_OMAP=y
-# CONFIG_ARCH_BCMRING is not set
#
# TI OMAP Implementations
#
# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
# CONFIG_ARCH_OMAP2 is not set
# CONFIG_ARCH_OMAP3 is not set
CONFIG_ARCH_OMAP4=y
@@ -205,10 +252,6 @@ CONFIG_OMAP_MCBSP=y
CONFIG_OMAP_32K_TIMER=y
CONFIG_OMAP_32K_TIMER_HZ=128
CONFIG_OMAP_DM_TIMER=y
-# CONFIG_OMAP_LL_DEBUG_UART1 is not set
-# CONFIG_OMAP_LL_DEBUG_UART2 is not set
-CONFIG_OMAP_LL_DEBUG_UART3=y
-# CONFIG_OMAP_LL_DEBUG_NONE is not set
# CONFIG_OMAP_PM_NONE is not set
CONFIG_OMAP_PM_NOOP=y
@@ -243,13 +286,16 @@ CONFIG_CPU_CP15_MMU=y
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
CONFIG_OUTER_CACHE=y
+CONFIG_OUTER_CACHE_SYNC=y
CONFIG_CACHE_L2X0=y
CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
CONFIG_PL310_ERRATA_588369=y
CONFIG_ARM_GIC=y
+CONFIG_COMMON_CLKDEV=y
#
# Bus support
@@ -280,6 +326,7 @@ CONFIG_HZ=128
# CONFIG_THUMB2_KERNEL is not set
CONFIG_AEABI=y
CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
# CONFIG_HIGHMEM is not set
@@ -294,8 +341,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_LEDS is not set
@@ -343,7 +388,83 @@ CONFIG_BINFMT_MISC=y
#
# CONFIG_PM is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
-# CONFIG_NET is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_UNIX is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -360,17 +481,24 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
# CONFIG_MTD is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
# CONFIG_MG_DISK is not set
# CONFIG_MISC_DEVICES is not set
CONFIG_HAVE_IDE=y
@@ -379,12 +507,56 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+CONFIG_KS8851=y
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
#
@@ -393,6 +565,7 @@ CONFIG_HAVE_IDE=y
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -445,8 +618,10 @@ CONFIG_SERIAL_8250_RSA=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -456,8 +631,58 @@ CONFIG_HW_RANDOM=y
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
-# CONFIG_SPI is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
#
# PPS support
@@ -471,10 +696,17 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_TWL4030 is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -483,6 +715,9 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
#
# AC97 GPIO expanders:
@@ -492,7 +727,15 @@ CONFIG_GPIOLIB=y
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
CONFIG_OMAP_WATCHDOG=y
+# CONFIG_TWL4030_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
@@ -504,15 +747,46 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+# CONFIG_TWL4030_CODEC is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_T7L66XB is not set
# CONFIG_MFD_TC6387XB is not set
# CONFIG_MFD_TC6393XB is not set
-# CONFIG_REGULATOR is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
# CONFIG_MEDIA_SUPPORT is not set
#
@@ -536,12 +810,94 @@ CONFIG_DUMMY_CONSOLE=y
# CONFIG_SOUND is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
@@ -564,9 +920,10 @@ CONFIG_EXT3_FS=y
CONFIG_JBD=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
@@ -575,7 +932,9 @@ CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
CONFIG_QUOTA_TREE=y
# CONFIG_QFMT_V1 is not set
CONFIG_QFMT_V2=y
@@ -624,6 +983,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -634,6 +994,28 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
#
# Partition Types
@@ -696,6 +1078,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
#
# Kernel hacking
@@ -750,13 +1133,11 @@ CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
-# CONFIG_BRANCH_PROFILE_NONE is not set
-# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
-# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -765,6 +1146,7 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_DEBUG_ERRORS is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
#
# Security options
@@ -772,7 +1154,11 @@ CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
@@ -791,6 +1177,7 @@ CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
@@ -889,3 +1276,4 @@ CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 473f9e13f08b..56d4928cd4c9 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -784,6 +784,7 @@ CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_NEWTON is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_TWL4030=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -809,6 +810,7 @@ CONFIG_INPUT_MISC=y
# CONFIG_INPUT_POWERMATE is not set
# CONFIG_INPUT_YEALINK is not set
# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
CONFIG_INPUT_UINPUT=m
#
@@ -1110,7 +1112,40 @@ CONFIG_RADIO_ADAPTERS=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+
+# Frame buffer hardware drivers
+#
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=0
+# CONFIG_OMAP2_DSS_DEBUG_SUPPORT is not set
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+# CONFIG_OMAP2_DSS_DPI is not set
+# CONFIG_OMAP2_DSS_RFBI is not set
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_OMAP2_DSS_SDI=y
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+# CONFIG_PANEL_GENERIC is not set
+# CONFIG_PANEL_SHARP_LS037V7DW01 is not set
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
+CONFIG_PANEL_ACX565AKM=y
+
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -1127,6 +1162,8 @@ CONFIG_DISPLAY_SUPPORT=y
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
CONFIG_SOUND=y
# CONFIG_SOUND_OSS_CORE is not set
CONFIG_SND=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 8e94c3caeb8c..44cea2ddd22b 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,14 +1,15 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
+# Linux kernel version: 2.6.34
+# Fri May 28 19:15:48 2010
#
CONFIG_ARM=y
CONFIG_HAVE_PWM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
-# CONFIG_GENERIC_TIME is not set
-# CONFIG_GENERIC_CLOCKEVENTS is not set
-CONFIG_MMU=y
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
@@ -18,13 +19,14 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,6 +36,13 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -45,15 +54,16 @@ CONFIG_SYSVIPC_SYSCTL=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=m
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -69,6 +79,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -78,7 +89,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -91,19 +101,30 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_CLK=y
-# CONFIG_SLOW_WORK is not set
+
+#
+# GCOV-based kernel profiling
+#
+CONFIG_SLOW_WORK=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@@ -115,7 +136,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -123,32 +144,64 @@ CONFIG_BLOCK=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
CONFIG_FREEZER=y
#
# System Type
#
+CONFIG_MMU=y
# CONFIG_ARCH_AAEC2000 is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
-# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -156,42 +209,37 @@ CONFIG_FREEZER=y
# CONFIG_ARCH_IXP2000 is not set
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
# CONFIG_ARCH_KIRKWOOD is not set
-# CONFIG_ARCH_KS8695 is not set
-# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_LOKI is not set
# CONFIG_ARCH_MV78XX0 is not set
-# CONFIG_ARCH_MXC is not set
# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
-# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
CONFIG_ARCH_S3C2410=y
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_MSM is not set
-# CONFIG_ARCH_W90X900 is not set
-CONFIG_PLAT_S3C24XX=y
-CONFIG_S3C2410_CLOCK=y
-CONFIG_S3C24XX_DCLK=y
-CONFIG_CPU_S3C244X=y
-CONFIG_S3C24XX_PWM=y
-CONFIG_S3C24XX_GPIO_EXTRA=128
-CONFIG_S3C24XX_GPIO_EXTRA64=y
-CONFIG_S3C24XX_GPIO_EXTRA128=y
-CONFIG_PM_SIMTEC=y
-CONFIG_S3C2410_DMA=y
-# CONFIG_S3C2410_DMA_DEBUG is not set
-CONFIG_S3C_ADC=y
-CONFIG_MACH_SMDK=y
-CONFIG_PLAT_S3C=y
-CONFIG_CPU_LLSERIAL_S3C2410=y
-CONFIG_CPU_LLSERIAL_S3C2440=y
+# CONFIG_PLAT_SPEAR is not set
+CONFIG_PLAT_SAMSUNG=y
#
# Boot options
@@ -199,15 +247,44 @@ CONFIG_CPU_LLSERIAL_S3C2440=y
# CONFIG_S3C_BOOT_WATCHDOG is not set
CONFIG_S3C_BOOT_ERROR_RESET=y
CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=0
+CONFIG_SAMSUNG_CLKSRC=y
+CONFIG_S3C_GPIO_CFG_S3C24XX=y
+CONFIG_S3C_GPIO_PULL_UPDOWN=y
+CONFIG_S3C_GPIO_PULL_UP=y
+CONFIG_SAMSUNG_GPIO_EXTRA=0
+CONFIG_S3C_GPIO_SPACE=0
+CONFIG_S3C_ADC=y
+CONFIG_S3C_DEV_HSMMC=y
+CONFIG_S3C_DEV_HSMMC1=y
+CONFIG_S3C_DEV_HWMON=y
+CONFIG_S3C_DEV_FB=y
+CONFIG_S3C_DEV_USB_HOST=y
+CONFIG_S3C_DEV_WDT=y
+CONFIG_S3C_DEV_NAND=y
+CONFIG_S3C_DMA=y
#
# Power management
#
# CONFIG_SAMSUNG_PM_DEBUG is not set
# CONFIG_SAMSUNG_PM_CHECK is not set
-CONFIG_S3C_LOWLEVEL_UART_PORT=0
-CONFIG_S3C_GPIO_SPACE=0
-CONFIG_S3C_DEV_HSMMC=y
+CONFIG_PLAT_S3C24XX=y
+CONFIG_CPU_LLSERIAL_S3C2410=y
+CONFIG_CPU_LLSERIAL_S3C2440=y
+CONFIG_S3C2410_CLOCK=y
+CONFIG_S3C2443_CLOCK=y
+CONFIG_S3C24XX_DCLK=y
+CONFIG_S3C24XX_PWM=y
+CONFIG_S3C24XX_GPIO_EXTRA=128
+CONFIG_S3C24XX_GPIO_EXTRA64=y
+CONFIG_S3C24XX_GPIO_EXTRA128=y
+CONFIG_PM_SIMTEC=y
+CONFIG_S3C2410_DMA=y
+# CONFIG_S3C2410_DMA_DEBUG is not set
+CONFIG_MACH_SMDK=y
+CONFIG_S3C24XX_SIMTEC_AUDIO=y
+CONFIG_S3C2410_SETUP_TS=y
#
# S3C2400 Machines
@@ -224,8 +301,10 @@ CONFIG_MACH_BAST_IDE=y
#
CONFIG_ARCH_SMDK2410=y
CONFIG_ARCH_H1940=y
+# CONFIG_H1940BT is not set
CONFIG_PM_H1940=y
CONFIG_MACH_N30=y
+CONFIG_MACH_N35=y
CONFIG_ARCH_BAST=y
CONFIG_MACH_OTOM=y
CONFIG_MACH_AML_M5900=y
@@ -246,26 +325,35 @@ CONFIG_MACH_SMDK2413=y
CONFIG_MACH_S3C2413=y
CONFIG_MACH_SMDK2412=y
CONFIG_MACH_VSTMS=y
+CONFIG_CPU_S3C2416=y
+CONFIG_S3C2416_DMA=y
+
+#
+# S3C2416 Machines
+#
+CONFIG_MACH_SMDK2416=y
CONFIG_CPU_S3C2440=y
+CONFIG_CPU_S3C2442=y
+CONFIG_CPU_S3C244X=y
+CONFIG_S3C2440_XTAL_12000000=y
+CONFIG_S3C2440_XTAL_16934400=y
CONFIG_S3C2440_DMA=y
#
-# S3C2440 Machines
+# S3C2440 and S3C2442 Machines
#
CONFIG_MACH_ANUBIS=y
+CONFIG_MACH_NEO1973_GTA02=y
CONFIG_MACH_OSIRIS=y
+CONFIG_MACH_OSIRIS_DVS=m
CONFIG_MACH_RX3715=y
CONFIG_ARCH_S3C2440=y
CONFIG_MACH_NEXCODER_2440=y
CONFIG_SMDK2440_CPU2440=y
+CONFIG_SMDK2440_CPU2442=y
CONFIG_MACH_AT2440EVB=y
-CONFIG_CPU_S3C2442=y
CONFIG_MACH_MINI2440=y
-
-#
-# S3C2442 Machines
-#
-CONFIG_SMDK2440_CPU2442=y
+CONFIG_MACH_RX1950=y
CONFIG_CPU_S3C2443=y
CONFIG_S3C2443_DMA=y
@@ -283,7 +371,7 @@ CONFIG_CPU_32v4T=y
CONFIG_CPU_32v5=y
CONFIG_CPU_ABRT_EV4T=y
CONFIG_CPU_ABRT_EV5TJ=y
-CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_PABRT_LEGACY=y
CONFIG_CPU_CACHE_V4WT=y
CONFIG_CPU_CACHE_VIVT=y
CONFIG_CPU_COPY_V4WB=y
@@ -299,7 +387,7 @@ CONFIG_CPU_CP15_MMU=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
-# CONFIG_OUTER_CACHE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
#
# Bus support
@@ -316,10 +404,11 @@ CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_2G is not set
# CONFIG_VMSPLIT_1G is not set
CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_HZ=200
# CONFIG_AEABI is not set
-CONFIG_ARCH_FLATMEM_HAS_HOLES=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
# CONFIG_HIGHMEM is not set
@@ -330,14 +419,14 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=999999
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
#
# Boot options
@@ -345,12 +434,14 @@ CONFIG_ALIGNMENT_TRAP=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
+# CONFIG_CMDLINE_FORCE is not set
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
#
# CPU Power Management
#
+# CONFIG_CPU_FREQ is not set
# CONFIG_CPU_IDLE is not set
#
@@ -384,6 +475,8 @@ CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
CONFIG_APM_EMULATION=m
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_NET=y
@@ -391,7 +484,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
@@ -442,7 +534,9 @@ CONFIG_TCP_CONG_ILLINOIS=m
# CONFIG_DEFAULT_BIC is not set
CONFIG_DEFAULT_CUBIC=y
# CONFIG_DEFAULT_HTCP is not set
+# CONFIG_DEFAULT_HYBLA is not set
# CONFIG_DEFAULT_VEGAS is not set
+# CONFIG_DEFAULT_VENO is not set
# CONFIG_DEFAULT_WESTWOOD is not set
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -463,6 +557,7 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
CONFIG_IPV6_NDISC_NODETYPE=y
CONFIG_IPV6_TUNNEL=m
# CONFIG_IPV6_MULTIPLE_TABLES is not set
@@ -498,8 +593,19 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=m
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=m
+CONFIG_NETFILTER_XT_CONNMARK=m
+
+#
+# Xtables targets
+#
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
CONFIG_NETFILTER_XT_TARGET_HL=m
CONFIG_NETFILTER_XT_TARGET_LED=m
@@ -508,9 +614,14 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@@ -529,6 +640,7 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -536,7 +648,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-# CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
@@ -556,6 +667,7 @@ CONFIG_IP_VS_TAB_BITS=12
# CONFIG_IP_VS_PROTO_UDP is not set
# CONFIG_IP_VS_PROTO_ESP is not set
# CONFIG_IP_VS_PROTO_AH is not set
+# CONFIG_IP_VS_PROTO_SCTP is not set
#
# IPVS scheduler
@@ -639,8 +751,10 @@ CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
# CONFIG_BRIDGE is not set
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
@@ -653,6 +767,7 @@ CONFIG_IP6_NF_RAW=m
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
CONFIG_NET_CLS_ROUTE=y
# CONFIG_DCB is not set
@@ -666,6 +781,7 @@ CONFIG_NET_CLS_ROUTE=y
# CONFIG_IRDA is not set
CONFIG_BT=m
CONFIG_BT_L2CAP=m
+# CONFIG_BT_L2CAP_EXT_FEATURES is not set
CONFIG_BT_SCO=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
@@ -687,19 +803,22 @@ CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
# CONFIG_CFG80211_REG_DEBUG is not set
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-CONFIG_WIRELESS_EXT=y
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
# CONFIG_LIB80211 is not set
CONFIG_MAC80211=m
-
-#
-# Rate control algorithm selection
-#
+CONFIG_MAC80211_HAS_RC=y
CONFIG_MAC80211_RC_MINSTREL=y
# CONFIG_MAC80211_RC_DEFAULT_PID is not set
CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
@@ -710,6 +829,7 @@ CONFIG_MAC80211_LEDS=y
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
#
# Device Drivers
@@ -719,6 +839,7 @@ CONFIG_MAC80211_LEDS=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -730,9 +851,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
@@ -752,6 +873,7 @@ CONFIG_MTD_BLOCK=y
# CONFIG_INFTL is not set
# CONFIG_RFD_FTL is not set
# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
# CONFIG_MTD_OOPS is not set
#
@@ -793,6 +915,7 @@ CONFIG_MTD_ROM=y
#
# CONFIG_MTD_DATAFLASH is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -805,9 +928,12 @@ CONFIG_MTD_ROM=y
# CONFIG_MTD_DOC2001 is not set
# CONFIG_MTD_DOC2001PLUS is not set
CONFIG_MTD_NAND=y
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+CONFIG_MTD_NAND_ECC=y
# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_SM_COMMON is not set
# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018
# CONFIG_MTD_NAND_GPIO is not set
CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_S3C2410=y
@@ -843,6 +969,10 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_UB=m
CONFIG_BLK_DEV_RAM=y
@@ -851,19 +981,26 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
CONFIG_ATA_OVER_ETH=m
+# CONFIG_MG_DISK is not set
CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
#
# EEPROM support
#
-CONFIG_EEPROM_AT24=m
+CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=m
CONFIG_EEPROM_LEGACY=m
+# CONFIG_EEPROM_MAX6875 is not set
CONFIG_EEPROM_93CX6=m
+# CONFIG_IWMC3200TOP is not set
CONFIG_HAVE_IDE=y
CONFIG_IDE=y
@@ -890,6 +1027,7 @@ CONFIG_BLK_DEV_PLATFORM=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -907,10 +1045,6 @@ CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
@@ -951,7 +1085,6 @@ CONFIG_SCSI_LOWLEVEL=y
CONFIG_HAVE_PATA_PLATFORM=y
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -989,16 +1122,30 @@ CONFIG_DM9000_DEBUGLEVEL=4
# CONFIG_NET_PCI is not set
# CONFIG_B44 is not set
# CONFIG_CS89x0 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NET_POCKET is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
+# CONFIG_ZD1211RW is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -1012,6 +1159,7 @@ CONFIG_NETDEV_10000=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PLIP is not set
# CONFIG_PPP is not set
@@ -1020,6 +1168,7 @@ CONFIG_NETDEV_10000=y
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
#
# Input device support
@@ -1027,6 +1176,7 @@ CONFIG_NETDEV_10000=y
CONFIG_INPUT=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -1043,13 +1193,20 @@ CONFIG_INPUT_EVDEV=y
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
@@ -1057,6 +1214,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
# CONFIG_MOUSE_SERIAL is not set
CONFIG_MOUSE_APPLETOUCH=m
@@ -1066,6 +1224,7 @@ CONFIG_MOUSE_BCM5974=m
# CONFIG_MOUSE_PC110PAD is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_ANALOG=m
CONFIG_JOYSTICK_A3D=m
@@ -1102,10 +1261,15 @@ CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_S3C2410 is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
@@ -1126,9 +1290,16 @@ CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
CONFIG_TOUCHSCREEN_USB_GOTOP=y
+CONFIG_TOUCHSCREEN_USB_JASTEC=y
+CONFIG_TOUCHSCREEN_USB_E2I=y
+CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
+CONFIG_TOUCHSCREEN_USB_ETT_TC5UH=y
+CONFIG_TOUCHSCREEN_USB_NEXIO=y
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
CONFIG_INPUT_ATI_REMOTE=m
CONFIG_INPUT_ATI_REMOTE2=m
CONFIG_INPUT_KEYSPAN_REMOTE=m
@@ -1136,6 +1307,8 @@ CONFIG_INPUT_POWERMATE=m
CONFIG_INPUT_YEALINK=m
CONFIG_INPUT_CM109=m
CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_PCF50633_PMU is not set
+# CONFIG_INPUT_PCF8574 is not set
CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
#
@@ -1146,6 +1319,7 @@ CONFIG_SERIO_SERPORT=y
# CONFIG_SERIO_PARKBD is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
CONFIG_GAMEPORT=m
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
@@ -1167,10 +1341,9 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_MOXA_INTELLIO is not set
# CONFIG_MOXA_SMARTIO is not set
# CONFIG_N_HDLC is not set
+# CONFIG_N_GSM is not set
# CONFIG_RISCOM8 is not set
# CONFIG_SPECIALIX is not set
-# CONFIG_SX is not set
-# CONFIG_RIO is not set
# CONFIG_STALDRV is not set
#
@@ -1195,6 +1368,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
# Non-8250 serial port support
#
CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_UARTS_4=y
CONFIG_SERIAL_SAMSUNG_UARTS=4
# CONFIG_SERIAL_SAMSUNG_DEBUG is not set
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
@@ -1204,6 +1378,9 @@ CONFIG_SERIAL_S3C2440=y
# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -1221,6 +1398,7 @@ CONFIG_HW_RANDOM=y
CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=y
@@ -1232,10 +1410,12 @@ CONFIG_I2C_ALGOBIT=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_S3C2410=y
CONFIG_I2C_SIMTEC=y
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -1252,20 +1432,9 @@ CONFIG_I2C_SIMTEC=y
# CONFIG_I2C_PCA_ISA is not set
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_MAX6875 is not set
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -1278,13 +1447,21 @@ CONFIG_SPI_BITBANG=m
CONFIG_SPI_GPIO=m
# CONFIG_SPI_LM70_LLP is not set
CONFIG_SPI_S3C24XX=m
+# CONFIG_SPI_S3C24XX_FIQ is not set
CONFIG_SPI_S3C24XX_GPIO=m
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
#
# SPI Protocol Masters
#
CONFIG_SPI_SPIDEV=m
CONFIG_SPI_TLE62X0=m
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -1293,13 +1470,16 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -1310,10 +1490,29 @@ CONFIG_GPIOLIB=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_APM_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_CHARGER_PCF50633 is not set
CONFIG_HWMON=y
CONFIG_HWMON_VID=m
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
# CONFIG_SENSORS_ADCXX is not set
@@ -1323,10 +1522,11 @@ CONFIG_HWMON_VID=m
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set
@@ -1338,6 +1538,7 @@ CONFIG_HWMON_VID=m
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
CONFIG_SENSORS_LM75=m
# CONFIG_SENSORS_LM77 is not set
CONFIG_SENSORS_LM78=m
@@ -1358,12 +1559,17 @@ CONFIG_SENSORS_LM85=m
# CONFIG_SENSORS_PC87427 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_S3C is not set
# CONFIG_SENSORS_DME1737 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83781D is not set
# CONFIG_SENSORS_W83791D is not set
@@ -1374,9 +1580,8 @@ CONFIG_SENSORS_LM85=m
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_LIS3_SPI is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -1385,6 +1590,7 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_S3C2410_WATCHDOG=y
+# CONFIG_MAX63XX_WATCHDOG is not set
#
# ISA-based Watchdog Cards
@@ -1408,213 +1614,36 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
CONFIG_MFD_SM501=y
# CONFIG_MFD_SM501_GPIO is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_UCB1400_CORE is not set
-# CONFIG_TPS65010 is not set
+CONFIG_TPS65010=m
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_T7L66XB is not set
# CONFIG_MFD_TC6387XB is not set
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
-# CONFIG_MFD_PCF50633 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_COMMON=m
-CONFIG_VIDEO_ALLOW_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_DVB_CORE=m
-CONFIG_VIDEO_MEDIA=m
-
-#
-# Multimedia drivers
-#
-CONFIG_MEDIA_ATTACH=y
-CONFIG_MEDIA_TUNER=m
-# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
-CONFIG_MEDIA_TUNER_SIMPLE=m
-CONFIG_MEDIA_TUNER_TDA8290=m
-CONFIG_MEDIA_TUNER_TDA827X=m
-CONFIG_MEDIA_TUNER_TDA18271=m
-CONFIG_MEDIA_TUNER_TDA9887=m
-CONFIG_MEDIA_TUNER_TEA5761=m
-CONFIG_MEDIA_TUNER_TEA5767=m
-CONFIG_MEDIA_TUNER_MT20XX=m
-CONFIG_MEDIA_TUNER_MT2060=m
-CONFIG_MEDIA_TUNER_MT2266=m
-CONFIG_MEDIA_TUNER_QT1010=m
-CONFIG_MEDIA_TUNER_XC2028=m
-CONFIG_MEDIA_TUNER_XC5000=m
-CONFIG_MEDIA_TUNER_MXL5005S=m
-CONFIG_MEDIA_TUNER_MXL5007T=m
-CONFIG_MEDIA_TUNER_MC44S803=m
-CONFIG_VIDEO_V4L2=m
-CONFIG_VIDEO_V4L1=m
-CONFIG_VIDEOBUF_GEN=m
-CONFIG_VIDEOBUF_VMALLOC=m
-CONFIG_VIDEO_TVEEPROM=m
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
-CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
-CONFIG_VIDEO_VIVI=m
-CONFIG_VIDEO_PMS=m
-CONFIG_VIDEO_BWQCAM=m
-CONFIG_VIDEO_CQCAM=m
-CONFIG_VIDEO_W9966=m
-CONFIG_VIDEO_CPIA=m
-CONFIG_VIDEO_CPIA_PP=m
-CONFIG_VIDEO_CPIA_USB=m
-CONFIG_VIDEO_CPIA2=m
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
-CONFIG_VIDEO_AU0828=m
-# CONFIG_SOC_CAMERA is not set
-CONFIG_V4L_USB_DRIVERS=y
-# CONFIG_USB_VIDEO_CLASS is not set
-CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
-CONFIG_USB_GSPCA=m
-# CONFIG_USB_M5602 is not set
-# CONFIG_USB_STV06XX is not set
-# CONFIG_USB_GSPCA_CONEX is not set
-# CONFIG_USB_GSPCA_ETOMS is not set
-# CONFIG_USB_GSPCA_FINEPIX is not set
-# CONFIG_USB_GSPCA_MARS is not set
-# CONFIG_USB_GSPCA_MR97310A is not set
-# CONFIG_USB_GSPCA_OV519 is not set
-# CONFIG_USB_GSPCA_OV534 is not set
-# CONFIG_USB_GSPCA_PAC207 is not set
-# CONFIG_USB_GSPCA_PAC7311 is not set
-# CONFIG_USB_GSPCA_SONIXB is not set
-# CONFIG_USB_GSPCA_SONIXJ is not set
-# CONFIG_USB_GSPCA_SPCA500 is not set
-# CONFIG_USB_GSPCA_SPCA501 is not set
-# CONFIG_USB_GSPCA_SPCA505 is not set
-# CONFIG_USB_GSPCA_SPCA506 is not set
-# CONFIG_USB_GSPCA_SPCA508 is not set
-# CONFIG_USB_GSPCA_SPCA561 is not set
-# CONFIG_USB_GSPCA_SQ905 is not set
-# CONFIG_USB_GSPCA_SQ905C is not set
-# CONFIG_USB_GSPCA_STK014 is not set
-# CONFIG_USB_GSPCA_SUNPLUS is not set
-# CONFIG_USB_GSPCA_T613 is not set
-# CONFIG_USB_GSPCA_TV8532 is not set
-# CONFIG_USB_GSPCA_VC032X is not set
-# CONFIG_USB_GSPCA_ZC3XX is not set
-# CONFIG_VIDEO_PVRUSB2 is not set
-# CONFIG_VIDEO_HDPVR is not set
-# CONFIG_VIDEO_EM28XX is not set
-# CONFIG_VIDEO_CX231XX is not set
-# CONFIG_VIDEO_USBVISION is not set
-# CONFIG_USB_VICAM is not set
-# CONFIG_USB_IBMCAM is not set
-# CONFIG_USB_KONICAWC is not set
-# CONFIG_USB_QUICKCAM_MESSENGER is not set
-# CONFIG_USB_ET61X251 is not set
-# CONFIG_VIDEO_OVCAMCHIP is not set
-# CONFIG_USB_OV511 is not set
-# CONFIG_USB_SE401 is not set
-# CONFIG_USB_SN9C102 is not set
-# CONFIG_USB_STV680 is not set
-# CONFIG_USB_ZC0301 is not set
-# CONFIG_USB_PWC is not set
-CONFIG_USB_PWC_INPUT_EVDEV=y
-# CONFIG_USB_ZR364XX is not set
-# CONFIG_USB_STKWEBCAM is not set
-# CONFIG_USB_S2255 is not set
-CONFIG_RADIO_ADAPTERS=y
-CONFIG_RADIO_CADET=m
-CONFIG_RADIO_RTRACK=m
-CONFIG_RADIO_RTRACK2=m
-CONFIG_RADIO_AZTECH=m
-CONFIG_RADIO_GEMTEK=m
-CONFIG_RADIO_SF16FMI=m
-CONFIG_RADIO_SF16FMR2=m
-CONFIG_RADIO_TERRATEC=m
-CONFIG_RADIO_TRUST=m
-CONFIG_RADIO_TYPHOON=m
-CONFIG_RADIO_TYPHOON_PROC_FS=y
-CONFIG_RADIO_ZOLTRIX=m
-CONFIG_USB_DSBR=m
-CONFIG_USB_SI470X=m
-CONFIG_USB_MR800=m
-CONFIG_RADIO_TEA5764=m
-CONFIG_DVB_DYNAMIC_MINORS=y
-CONFIG_DVB_CAPTURE_DRIVERS=y
-# CONFIG_TTPCI_EEPROM is not set
-
-#
-# Supported USB Adapters
-#
-CONFIG_DVB_USB=m
-# CONFIG_DVB_USB_DEBUG is not set
-# CONFIG_DVB_USB_A800 is not set
-CONFIG_DVB_USB_DIBUSB_MB=m
-# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
-CONFIG_DVB_USB_DIBUSB_MC=m
-CONFIG_DVB_USB_DIB0700=m
-CONFIG_DVB_USB_UMT_010=m
-CONFIG_DVB_USB_CXUSB=m
-CONFIG_DVB_USB_M920X=m
-# CONFIG_DVB_USB_GL861 is not set
-# CONFIG_DVB_USB_AU6610 is not set
-# CONFIG_DVB_USB_DIGITV is not set
-# CONFIG_DVB_USB_VP7045 is not set
-# CONFIG_DVB_USB_VP702X is not set
-# CONFIG_DVB_USB_GP8PSK is not set
-# CONFIG_DVB_USB_NOVA_T_USB2 is not set
-# CONFIG_DVB_USB_TTUSB2 is not set
-# CONFIG_DVB_USB_DTT200U is not set
-# CONFIG_DVB_USB_OPERA1 is not set
-CONFIG_DVB_USB_AF9005=m
-# CONFIG_DVB_USB_AF9005_REMOTE is not set
-# CONFIG_DVB_USB_DW2102 is not set
-# CONFIG_DVB_USB_CINERGY_T2 is not set
-# CONFIG_DVB_USB_ANYSEE is not set
-# CONFIG_DVB_USB_DTV5100 is not set
-# CONFIG_DVB_USB_AF9015 is not set
-# CONFIG_DVB_USB_CE6230 is not set
-# CONFIG_DVB_SIANO_SMS1XXX is not set
-
-#
-# Supported FlexCopII (B2C2) Adapters
-#
-# CONFIG_DVB_B2C2_FLEXCOP is not set
-
-#
-# Supported DVB Frontends
-#
-# CONFIG_DVB_FE_CUSTOMISE is not set
-CONFIG_DVB_CX22702=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_ZL10353=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
-CONFIG_DVB_DIB7000M=m
-CONFIG_DVB_DIB7000P=m
-CONFIG_DVB_LGDT330X=m
-CONFIG_DVB_LGDT3305=m
-CONFIG_DVB_AU8522=m
-CONFIG_DVB_S5H1411=m
-CONFIG_DVB_PLL=m
-CONFIG_DVB_TUNER_DIB0070=m
-CONFIG_DVB_LGS8GL5=m
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+# CONFIG_MFD_WM8994 is not set
+CONFIG_MFD_PCF50633=y
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_PCF50633_ADC is not set
+CONFIG_PCF50633_GPIO=y
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -1653,6 +1682,8 @@ CONFIG_FB_SM501=y
# CONFIG_FB_BROADSHEET is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=m
+# CONFIG_LCD_L4F00242T03 is not set
+# CONFIG_LCD_LMS283GF05 is not set
# CONFIG_LCD_LTV350QV is not set
# CONFIG_LCD_ILI9320 is not set
# CONFIG_LCD_TDO24M is not set
@@ -1682,6 +1713,7 @@ CONFIG_FONT_8x16=y
# CONFIG_LOGO is not set
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
@@ -1701,36 +1733,44 @@ CONFIG_SND_VERBOSE_PROCFS=y
CONFIG_SND_VERBOSE_PRINTK=y
# CONFIG_SND_DEBUG is not set
CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_ARM is not set
# CONFIG_SND_SPI is not set
CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_UA101 is not set
CONFIG_SND_USB_CAIAQ=m
# CONFIG_SND_USB_CAIAQ_INPUT is not set
CONFIG_SND_SOC=y
CONFIG_SND_SOC_AC97_BUS=y
CONFIG_SND_S3C24XX_SOC=y
-CONFIG_SND_S3C24XX_SOC_I2S=m
+CONFIG_SND_S3C24XX_SOC_I2S=y
CONFIG_SND_S3C_I2SV2_SOC=m
CONFIG_SND_S3C2412_SOC_I2S=m
-CONFIG_SND_S3C2443_SOC_AC97=m
+CONFIG_SND_S3C_SOC_AC97=m
+# CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753 is not set
CONFIG_SND_S3C24XX_SOC_JIVE_WM8750=m
CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710=m
CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650=m
-CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X=m
+CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X=y
+# CONFIG_SND_S3C24XX_SOC_SIMTEC_TLV320AIC23 is not set
+# CONFIG_SND_S3C24XX_SOC_SIMTEC_HERMES is not set
CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_ALL_CODECS is not set
CONFIG_SND_SOC_AC97_CODEC=m
-CONFIG_SND_SOC_L3=m
-CONFIG_SND_SOC_UDA134X=m
+CONFIG_SND_SOC_L3=y
+CONFIG_SND_SOC_UDA134X=y
CONFIG_SND_SOC_WM8750=m
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=y
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
@@ -1743,6 +1783,8 @@ CONFIG_HID=y
# Special HID drivers
#
CONFIG_HID_APPLE=m
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_WACOM is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1757,8 +1799,6 @@ CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_SUSPEND is not set
-# CONFIG_USB_OTG is not set
CONFIG_USB_MON=y
# CONFIG_USB_WUSB is not set
# CONFIG_USB_WUSB_CBAF is not set
@@ -1770,6 +1810,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1854,6 +1895,7 @@ CONFIG_USB_SERIAL_FTDI_SIO=y
CONFIG_USB_SERIAL_NAVMAN=m
CONFIG_USB_SERIAL_PL2303=y
# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
# CONFIG_USB_SERIAL_QUALCOMM is not set
# CONFIG_USB_SERIAL_SPCP8X5 is not set
# CONFIG_USB_SERIAL_HP4X is not set
@@ -1864,9 +1906,12 @@ CONFIG_USB_SERIAL_PL2303=y
# CONFIG_USB_SERIAL_TI is not set
# CONFIG_USB_SERIAL_CYBERJACK is not set
# CONFIG_USB_SERIAL_XIRCOM is not set
+CONFIG_USB_SERIAL_WWAN=m
CONFIG_USB_SERIAL_OPTION=m
# CONFIG_USB_SERIAL_OMNINET is not set
# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_ZIO is not set
# CONFIG_USB_SERIAL_DEBUG is not set
#
@@ -1879,7 +1924,6 @@ CONFIG_USB_SEVSEG=m
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-CONFIG_USB_BERRY_CHARGE=m
CONFIG_USB_LED=m
CONFIG_USB_CYPRESS_CY7C63=m
CONFIG_USB_CYTHERM=m
@@ -1891,13 +1935,13 @@ CONFIG_USB_TRANCEVIBRATOR=m
CONFIG_USB_IOWARRIOR=m
CONFIG_USB_TEST=m
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
# OTG and related infrastructure
#
# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
@@ -1915,10 +1959,15 @@ CONFIG_MMC_TEST=m
# MMC/SD/SDIO Host Controller Drivers
#
CONFIG_MMC_SDHCI=m
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_SDHCI_S3C is not set
CONFIG_MMC_SPI=m
CONFIG_MMC_S3C=y
+# CONFIG_MMC_S3C_HW_SDIO_IRQ is not set
+CONFIG_MMC_S3C_PIO=y
+# CONFIG_MMC_S3C_DMA is not set
+# CONFIG_MMC_S3C_PIODMA is not set
# CONFIG_MEMSTICK is not set
-# CONFIG_ACCESSIBILITY is not set
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=m
@@ -1930,26 +1979,28 @@ CONFIG_LEDS_H1940=m
CONFIG_LEDS_PCA9532=m
CONFIG_LEDS_GPIO=m
CONFIG_LEDS_GPIO_PLATFORM=y
-CONFIG_LEDS_LP5521=m
+# CONFIG_LEDS_LP3944 is not set
CONFIG_LEDS_PCA955X=m
CONFIG_LEDS_DAC124S085=m
CONFIG_LEDS_PWM=m
CONFIG_LEDS_BD2802=m
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
# CONFIG_LEDS_TRIGGER_IDE_DISK is not set
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_GPIO=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
#
# iptables trigger is under Netfilter config (LED target)
#
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@@ -1978,9 +2029,11 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1992,6 +2045,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -2005,8 +2059,11 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_PCF50633 is not set
#
# on-CPU RTC drivers
@@ -2014,7 +2071,6 @@ CONFIG_RTC_INTF_DEV=y
CONFIG_RTC_DRV_S3C=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
-# CONFIG_REGULATOR is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@@ -2032,20 +2088,23 @@ CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
# CONFIG_EXT3_FS_SECURITY is not set
CONFIG_EXT4_FS=m
-# CONFIG_EXT4DEV_COMPAT is not set
CONFIG_EXT4_FS_XATTR=y
CONFIG_EXT4_FS_POSIX_ACL=y
# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
CONFIG_JBD2=m
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -2053,6 +2112,7 @@ CONFIG_INOTIFY_USER=y
CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
CONFIG_GENERIC_ACL=y
#
@@ -2111,6 +2171,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=m
# CONFIG_SQUASHFS_EMBEDDED is not set
@@ -2127,7 +2188,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -2149,6 +2209,7 @@ CONFIG_SUNRPC_GSS=m
CONFIG_RPCSEC_GSS_KRB5=m
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
@@ -2230,6 +2291,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -2246,6 +2308,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -2264,32 +2327,34 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -2297,7 +2362,9 @@ CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
+# CONFIG_EARLY_PRINTK is not set
# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_OC_ETM is not set
CONFIG_DEBUG_S3C_UART=0
#
@@ -2306,13 +2373,16 @@ CONFIG_DEBUG_S3C_UART=0
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=m
CONFIG_CRYPTO_ALGAPI2=m
CONFIG_CRYPTO_AEAD=m
@@ -2355,11 +2425,13 @@ CONFIG_CRYPTO_ECB=m
#
CONFIG_CRYPTO_HMAC=m
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -2420,9 +2492,11 @@ CONFIG_CRC7=m
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_DECOMPRESS=y
CONFIG_DECOMPRESS_GZIP=y
CONFIG_DECOMPRESS_BZIP2=y
CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
@@ -2430,3 +2504,4 @@ CONFIG_TEXTSEARCH_FSM=m
CONFIG_HAS_IOMEM=y
CONFIG_HAS_DMA=y
CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig
index 5e7d4c1b8fc1..2b642386f030 100644
--- a/arch/arm/configs/s3c6400_defconfig
+++ b/arch/arm/configs/s3c6400_defconfig
@@ -1,11 +1,15 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc4
-# Tue Jan 19 13:12:40 2010
+# Linux kernel version: 2.6.34
+# Fri May 28 19:05:39 2010
#
CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
@@ -18,6 +22,7 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -32,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
@@ -53,7 +59,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -89,10 +94,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -164,7 +173,7 @@ CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
# CONFIG_MUTEX_SPIN_ON_OWNER is not set
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# System Type
@@ -174,8 +183,11 @@ CONFIG_MMU=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
@@ -184,7 +196,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -201,70 +212,89 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
CONFIG_ARCH_S3C64XX=y
# CONFIG_ARCH_S5P6440 is not set
-# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
+# CONFIG_PLAT_SPEAR is not set
CONFIG_PLAT_SAMSUNG=y
+
+#
+# Boot options
+#
+CONFIG_S3C_BOOT_ERROR_RESET=y
+CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=0
CONFIG_SAMSUNG_CLKSRC=y
CONFIG_SAMSUNG_IRQ_VIC_TIMER=y
CONFIG_SAMSUNG_IRQ_UART=y
+CONFIG_SAMSUNG_GPIOLIB_4BIT=y
CONFIG_S3C_GPIO_CFG_S3C24XX=y
CONFIG_S3C_GPIO_CFG_S3C64XX=y
CONFIG_S3C_GPIO_PULL_UPDOWN=y
CONFIG_SAMSUNG_GPIO_EXTRA=0
+CONFIG_S3C_GPIO_SPACE=0
+CONFIG_S3C_GPIO_TRACK=y
# CONFIG_S3C_ADC is not set
CONFIG_S3C_DEV_HSMMC=y
CONFIG_S3C_DEV_HSMMC1=y
+CONFIG_S3C_DEV_HSMMC2=y
+CONFIG_S3C_DEV_HWMON=y
CONFIG_S3C_DEV_I2C1=y
CONFIG_S3C_DEV_FB=y
CONFIG_S3C_DEV_USB_HOST=y
CONFIG_S3C_DEV_USB_HSOTG=y
+CONFIG_S3C_DEV_WDT=y
CONFIG_S3C_DEV_NAND=y
-CONFIG_PLAT_S3C64XX=y
-CONFIG_CPU_S3C6400_INIT=y
-CONFIG_CPU_S3C6400_CLOCK=y
-# CONFIG_S3C64XX_DMA is not set
-CONFIG_S3C64XX_SETUP_I2C0=y
-CONFIG_S3C64XX_SETUP_I2C1=y
-CONFIG_S3C64XX_SETUP_FB_24BPP=y
-CONFIG_S3C64XX_SETUP_SDHCI_GPIO=y
-CONFIG_PLAT_S3C=y
-
-#
-# Boot options
-#
-CONFIG_S3C_BOOT_ERROR_RESET=y
-CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+CONFIG_S3C_DEV_RTC=y
+CONFIG_SAMSUNG_DEV_ADC=y
+CONFIG_SAMSUNG_DEV_TS=y
+CONFIG_S3C_DMA=y
#
# Power management
#
-CONFIG_S3C_LOWLEVEL_UART_PORT=0
-CONFIG_S3C_GPIO_SPACE=0
-CONFIG_S3C_GPIO_TRACK=y
-# CONFIG_MACH_SMDK6400 is not set
+# CONFIG_SAMSUNG_PM_DEBUG is not set
+# CONFIG_S3C_PM_DEBUG_LED_SMDK is not set
+# CONFIG_SAMSUNG_PM_CHECK is not set
+CONFIG_SAMSUNG_WAKEMASK=y
+CONFIG_PLAT_S3C64XX=y
+CONFIG_CPU_S3C6400=y
CONFIG_CPU_S3C6410=y
-CONFIG_S3C6410_SETUP_SDHCI=y
-# CONFIG_MACH_ANW6410 is not set
+CONFIG_S3C64XX_DMA=y
+CONFIG_S3C64XX_SETUP_SDHCI=y
+CONFIG_S3C64XX_SETUP_I2C0=y
+CONFIG_S3C64XX_SETUP_I2C1=y
+CONFIG_S3C64XX_SETUP_FB_24BPP=y
+CONFIG_S3C64XX_SETUP_SDHCI_GPIO=y
+CONFIG_MACH_SMDK6400=y
+CONFIG_MACH_ANW6410=y
CONFIG_MACH_SMDK6410=y
CONFIG_SMDK6410_SD_CH0=y
# CONFIG_SMDK6410_SD_CH1 is not set
# CONFIG_SMDK6410_WM1190_EV1 is not set
-# CONFIG_MACH_NCP is not set
-# CONFIG_MACH_HMT is not set
+# CONFIG_SMDK6410_WM1192_EV1 is not set
+CONFIG_MACH_NCP=y
+CONFIG_MACH_HMT=y
+CONFIG_MACH_SMARTQ=y
+CONFIG_MACH_SMARTQ5=y
+CONFIG_MACH_SMARTQ7=y
#
# Processor Type
@@ -290,6 +320,8 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_411920 is not set
CONFIG_ARM_VIC=y
CONFIG_ARM_VIC_NR=2
@@ -339,6 +371,7 @@ CONFIG_ALIGNMENT_TRAP=y
CONFIG_ZBOOT_ROM_TEXT=0
CONFIG_ZBOOT_ROM_BSS=0
CONFIG_CMDLINE="console=ttySAC0,115200 root=/dev/ram init=/linuxrc initrd=0x51000000,6M ramdisk_size=6144"
+# CONFIG_CMDLINE_FORCE is not set
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -371,7 +404,14 @@ CONFIG_HAVE_AOUT=y
#
# Power management options
#
-# CONFIG_PM is not set
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_NET is not set
@@ -392,7 +432,96 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_MTD is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+# CONFIG_MTD_PARTITIONS is not set
+
+#
+# User Modules And Translation Layers
+#
+# CONFIG_MTD_CHAR is not set
+# CONFIG_MTD_BLKDEVS is not set
+# CONFIG_MTD_BLOCK is not set
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_SM_COMMON is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_S3C2410=y
+# CONFIG_MTD_NAND_S3C2410_DEBUG is not set
+# CONFIG_MTD_NAND_S3C2410_HWECC is not set
+# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
@@ -402,6 +531,7 @@ CONFIG_BLK_DEV_LOOP=y
#
# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
#
+# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
@@ -413,13 +543,16 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
#
# EEPROM support
#
CONFIG_EEPROM_AT24=y
+# CONFIG_EEPROM_AT25 is not set
# CONFIG_EEPROM_LEGACY is not set
# CONFIG_EEPROM_MAX6875 is not set
# CONFIG_EEPROM_93CX6 is not set
@@ -430,6 +563,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -466,6 +600,7 @@ CONFIG_KEYBOARD_ATKBD=y
# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
# CONFIG_KEYBOARD_MATRIX is not set
# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
@@ -527,12 +662,17 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# Non-8250 serial port support
#
CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_UARTS_4=y
CONFIG_SERIAL_SAMSUNG_UARTS=4
# CONFIG_SERIAL_SAMSUNG_DEBUG is not set
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_S3C6400=y
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -561,28 +701,41 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_S3C2410=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
#
# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
#
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
-# CONFIG_SPI is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=m
+CONFIG_SPI_GPIO=m
+CONFIG_SPI_S3C64XX=m
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
#
# PPS support
@@ -596,10 +749,12 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
@@ -612,6 +767,9 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
#
# AC97 GPIO expanders:
@@ -627,16 +785,18 @@ CONFIG_HWMON=y
#
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
# CONFIG_SENSORS_ADM1021 is not set
# CONFIG_SENSORS_ADM1025 is not set
# CONFIG_SENSORS_ADM1026 is not set
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set
@@ -647,6 +807,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_GL520SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
# CONFIG_SENSORS_LM73 is not set
# CONFIG_SENSORS_LM75 is not set
# CONFIG_SENSORS_LM77 is not set
@@ -661,6 +822,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_LTC4215 is not set
# CONFIG_SENSORS_LTC4245 is not set
# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_MAX6650 is not set
# CONFIG_SENSORS_PC87360 is not set
@@ -672,6 +834,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
@@ -685,9 +848,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83L786NG is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
# CONFIG_SENSORS_LIS3_I2C is not set
# CONFIG_THERMAL is not set
# CONFIG_WATCHDOG is not set
+CONFIG_HAVE_S3C2410_WATCHDOG=y
CONFIG_SSB_POSSIBLE=y
#
@@ -699,10 +864,13 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_UCB1400_CORE is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
@@ -711,12 +879,16 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -725,8 +897,47 @@ CONFIG_SSB_POSSIBLE=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_S3C=y
+# CONFIG_FB_S3C_DEBUG_REGWRITE is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_L4F00242T03 is not set
+# CONFIG_LCD_LMS283GF05 is not set
+CONFIG_LCD_LTV350QV=y
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_PWM=y
#
# Display device support
@@ -738,33 +949,246 @@ CONFIG_SSB_POSSIBLE=y
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-# CONFIG_SOUND is not set
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_CAIAQ is not set
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_AC97_BUS=y
+CONFIG_SND_S3C24XX_SOC=m
+CONFIG_SND_S3C_SOC_AC97=m
+# CONFIG_SND_S3C64XX_SOC_WM8580 is not set
+CONFIG_SND_SOC_SMDK_WM9713=m
+CONFIG_SND_SOC_I2C_AND_SPI=m
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_WM9713=m
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+# CONFIG_HID_CANDO is not set
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+# CONFIG_HID_PRODIKEYS is not set
+CONFIG_HID_CYPRESS=y
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EGALAX is not set
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
+CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
+CONFIG_HID_MONTEREY=y
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_ROCCAT_KONE is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
-# CONFIG_USB is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
#
-# Enable Host or Gadget support to see Inventra options
+# USB Device Class drivers
#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_EZUSB is not set
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=m
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
+# CONFIG_USB_SERIAL_ZIO is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_GADGET is not set
#
# OTG and related infrastructure
#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
CONFIG_MMC_DEBUG=y
CONFIG_MMC_UNSAFE_RESUME=y
@@ -784,20 +1208,80 @@ CONFIG_MMC_SDHCI=y
# CONFIG_MMC_SDHCI_PLTFM is not set
CONFIG_MMC_SDHCI_S3C=y
# CONFIG_MMC_SDHCI_S3C_DMA is not set
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
+# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
-# CONFIG_RTC_CLASS is not set
-# CONFIG_DMADEVICES is not set
-# CONFIG_AUXDISPLAY is not set
-# CONFIG_UIO is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
#
-# TI VLYNQ
+# on-CPU RTC drivers
#
+CONFIG_RTC_DRV_S3C=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
# CONFIG_STAGING is not set
#
@@ -869,6 +1353,8 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -889,7 +1375,46 @@ CONFIG_ROMFS_ON_BLOCK=y
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
#
# Kernel hacking
@@ -952,6 +1477,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
@@ -962,6 +1488,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s5p6440_defconfig b/arch/arm/configs/s5p6440_defconfig
index 279a15e53114..532e987beb4d 100644
--- a/arch/arm/configs/s5p6440_defconfig
+++ b/arch/arm/configs/s5p6440_defconfig
@@ -1,11 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Sat Jan 9 16:33:55 2010
+# Linux kernel version: 2.6.34
+# Wed May 26 19:04:32 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
@@ -17,6 +20,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -30,6 +34,13 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -46,7 +57,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -60,6 +70,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -81,10 +92,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -166,8 +181,11 @@ CONFIG_MMU=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
@@ -176,7 +194,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -193,44 +210,56 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
CONFIG_ARCH_S5P6440=y
-# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
+# CONFIG_PLAT_SPEAR is not set
CONFIG_PLAT_SAMSUNG=y
-CONFIG_SAMSUNG_CLKSRC=y
-CONFIG_SAMSUNG_IRQ_VIC_TIMER=y
-CONFIG_SAMSUNG_IRQ_UART=y
-CONFIG_SAMSUNG_GPIO_EXTRA=0
-CONFIG_PLAT_S3C=y
#
# Boot options
#
CONFIG_S3C_BOOT_ERROR_RESET=y
CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=1
+CONFIG_SAMSUNG_CLKSRC=y
+CONFIG_SAMSUNG_IRQ_VIC_TIMER=y
+CONFIG_SAMSUNG_IRQ_UART=y
+CONFIG_SAMSUNG_GPIOLIB_4BIT=y
+CONFIG_S3C_GPIO_CFG_S3C24XX=y
+CONFIG_S3C_GPIO_CFG_S3C64XX=y
+CONFIG_S3C_GPIO_PULL_UPDOWN=y
+CONFIG_S5P_GPIO_DRVSTR=y
+CONFIG_SAMSUNG_GPIO_EXTRA=0
+CONFIG_S3C_GPIO_SPACE=0
+CONFIG_S3C_GPIO_TRACK=y
+# CONFIG_S3C_ADC is not set
+CONFIG_S3C_DEV_WDT=y
+CONFIG_SAMSUNG_DEV_ADC=y
+CONFIG_SAMSUNG_DEV_TS=y
+CONFIG_S3C_PL330_DMA=y
#
# Power management
#
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
-CONFIG_S3C_GPIO_SPACE=0
-CONFIG_S3C_GPIO_TRACK=y
CONFIG_PLAT_S5P=y
-CONFIG_CPU_S5P6440_INIT=y
-CONFIG_CPU_S5P6440_CLOCK=y
CONFIG_CPU_S5P6440=y
CONFIG_MACH_SMDK6440=y
@@ -258,9 +287,12 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_411920 is not set
CONFIG_ARM_VIC=y
CONFIG_ARM_VIC_NR=2
+CONFIG_PL330=y
#
# Bus support
@@ -307,6 +339,7 @@ CONFIG_ALIGNMENT_TRAP=y
CONFIG_ZBOOT_ROM_TEXT=0
CONFIG_ZBOOT_ROM_BSS=0
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
+# CONFIG_CMDLINE_FORCE is not set
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -382,6 +415,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -470,7 +504,9 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_AD7879 is not set
# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_S3C2410 is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
@@ -518,12 +554,16 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=3
# Non-8250 serial port support
#
CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_UARTS_4=y
CONFIG_SERIAL_SAMSUNG_UARTS=4
# CONFIG_SERIAL_SAMSUNG_DEBUG is not set
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
-CONFIG_SERIAL_S5P6440=y
+CONFIG_SERIAL_S3C6400=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -549,6 +589,7 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
@@ -570,6 +611,7 @@ CONFIG_GPIOLIB=y
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
# CONFIG_WATCHDOG is not set
+CONFIG_HAVE_S3C2410_WATCHDOG=y
CONFIG_SSB_POSSIBLE=y
#
@@ -626,10 +668,6 @@ CONFIG_RTC_LIB=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
-
-#
-# TI VLYNQ
-#
# CONFIG_STAGING is not set
#
@@ -704,6 +742,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -826,6 +865,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
@@ -836,6 +876,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -962,8 +1003,10 @@ CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_DECOMPRESS=y
CONFIG_DECOMPRESS_GZIP=y
CONFIG_DECOMPRESS_BZIP2=y
CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/s5p6442_defconfig b/arch/arm/configs/s5p6442_defconfig
index 74e20bfc0487..068219b360f5 100644
--- a/arch/arm/configs/s5p6442_defconfig
+++ b/arch/arm/configs/s5p6442_defconfig
@@ -1,11 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc4
-# Mon Jan 25 08:50:28 2010
+# Linux kernel version: 2.6.34
+# Wed May 26 19:04:34 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
@@ -17,6 +20,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -31,6 +35,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
@@ -52,7 +57,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -88,10 +92,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -173,8 +181,11 @@ CONFIG_MMU=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
@@ -183,7 +194,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -200,24 +210,35 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
# CONFIG_ARCH_S5P6440 is not set
CONFIG_ARCH_S5P6442=y
-# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
+# CONFIG_PLAT_SPEAR is not set
CONFIG_PLAT_SAMSUNG=y
+
+#
+# Boot options
+#
+# CONFIG_S3C_BOOT_ERROR_RESET is not set
+CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=1
CONFIG_SAMSUNG_CLKSRC=y
CONFIG_SAMSUNG_IRQ_VIC_TIMER=y
CONFIG_SAMSUNG_IRQ_UART=y
@@ -225,22 +246,16 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y
CONFIG_S3C_GPIO_CFG_S3C24XX=y
CONFIG_S3C_GPIO_CFG_S3C64XX=y
CONFIG_S3C_GPIO_PULL_UPDOWN=y
+CONFIG_S5P_GPIO_DRVSTR=y
CONFIG_SAMSUNG_GPIO_EXTRA=0
+CONFIG_S3C_GPIO_SPACE=0
+CONFIG_S3C_GPIO_TRACK=y
# CONFIG_S3C_ADC is not set
+CONFIG_S3C_PL330_DMA=y
#
# Power management
#
-CONFIG_PLAT_S3C=y
-
-#
-# Boot options
-#
-# CONFIG_S3C_BOOT_ERROR_RESET is not set
-CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
-CONFIG_S3C_LOWLEVEL_UART_PORT=1
-CONFIG_S3C_GPIO_SPACE=0
-CONFIG_S3C_GPIO_TRACK=y
CONFIG_PLAT_S5P=y
CONFIG_CPU_S5P6442=y
CONFIG_MACH_SMDK6442=y
@@ -269,9 +284,12 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_411920 is not set
CONFIG_ARM_VIC=y
CONFIG_ARM_VIC_NR=2
+CONFIG_PL330=y
#
# Bus support
@@ -318,6 +336,7 @@ CONFIG_ALIGNMENT_TRAP=y
CONFIG_ZBOOT_ROM_TEXT=0
CONFIG_ZBOOT_ROM_BSS=0
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
+# CONFIG_CMDLINE_FORCE is not set
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -394,6 +413,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -462,6 +482,7 @@ CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_AD7879 is not set
# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
@@ -515,6 +536,9 @@ CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_S5PV210=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -540,6 +564,7 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
@@ -613,10 +638,6 @@ CONFIG_RTC_LIB=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
-
-#
-# TI VLYNQ
-#
# CONFIG_STAGING is not set
#
@@ -685,6 +706,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -824,6 +846,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
@@ -834,6 +857,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s5pc100_defconfig b/arch/arm/configs/s5pc100_defconfig
index dc108afc060c..ebc6245b9fca 100644
--- a/arch/arm/configs/s5pc100_defconfig
+++ b/arch/arm/configs/s5pc100_defconfig
@@ -1,12 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Wed Jul 1 15:53:07 2009
+# Linux kernel version: 2.6.34
+# Wed May 26 19:04:35 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
-CONFIG_MMU=y
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
@@ -18,7 +20,9 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -31,6 +35,13 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -38,14 +49,15 @@ CONFIG_SWAP=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -59,6 +71,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -80,19 +93,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_KPROBES=y
@@ -122,25 +137,56 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
# System Type
#
+CONFIG_MMU=y
# CONFIG_ARCH_AAEC2000 is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
@@ -156,6 +202,7 @@ CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_ARCH_IXP2000 is not set
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
# CONFIG_ARCH_KIRKWOOD is not set
# CONFIG_ARCH_LOKI is not set
# CONFIG_ARCH_MV78XX0 is not set
@@ -164,39 +211,64 @@ CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
-CONFIG_ARCH_S5PC1XX=y
+# CONFIG_ARCH_S5P6440 is not set
+# CONFIG_ARCH_S5P6442 is not set
+CONFIG_ARCH_S5PC100=y
+# CONFIG_ARCH_S5PV210 is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-CONFIG_PLAT_S3C=y
+# CONFIG_PLAT_SPEAR is not set
+CONFIG_PLAT_SAMSUNG=y
#
# Boot options
#
# CONFIG_S3C_BOOT_ERROR_RESET is not set
CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=0
+CONFIG_SAMSUNG_CLKSRC=y
+CONFIG_SAMSUNG_IRQ_VIC_TIMER=y
+CONFIG_SAMSUNG_IRQ_UART=y
+CONFIG_SAMSUNG_GPIOLIB_4BIT=y
+CONFIG_S3C_GPIO_CFG_S3C24XX=y
+CONFIG_S3C_GPIO_CFG_S3C64XX=y
+CONFIG_S3C_GPIO_PULL_UPDOWN=y
+CONFIG_S5P_GPIO_DRVSTR=y
+CONFIG_SAMSUNG_GPIO_EXTRA=0
+CONFIG_S3C_GPIO_SPACE=0
+CONFIG_S3C_GPIO_TRACK=y
+# CONFIG_S3C_ADC is not set
+CONFIG_S3C_DEV_HSMMC=y
+CONFIG_S3C_DEV_HSMMC1=y
+CONFIG_S3C_DEV_HSMMC2=y
+CONFIG_S3C_DEV_I2C1=y
+CONFIG_S3C_DEV_FB=y
+CONFIG_S3C_PL330_DMA=y
#
# Power management
#
-CONFIG_S3C_LOWLEVEL_UART_PORT=0
-CONFIG_S3C_GPIO_SPACE=0
-CONFIG_S3C_GPIO_TRACK=y
-CONFIG_S3C_GPIO_PULL_UPDOWN=y
-CONFIG_PLAT_S5PC1XX=y
-CONFIG_CPU_S5PC100_INIT=y
-CONFIG_CPU_S5PC100_CLOCK=y
-CONFIG_S5PC100_SETUP_I2C0=y
+CONFIG_PLAT_S5P=y
+CONFIG_S5P_EXT_INT=y
CONFIG_CPU_S5PC100=y
+CONFIG_S5PC100_SETUP_FB_24BPP=y
+CONFIG_S5PC100_SETUP_I2C1=y
+CONFIG_S5PC100_SETUP_SDHCI=y
+CONFIG_S5PC100_SETUP_SDHCI_GPIO=y
CONFIG_MACH_SMDKC100=y
#
@@ -206,7 +278,7 @@ CONFIG_CPU_32v6K=y
CONFIG_CPU_V7=y
CONFIG_CPU_32v7=y
CONFIG_CPU_ABRT_EV7=y
-CONFIG_CPU_PABRT_IFAR=y
+CONFIG_CPU_PABRT_V7=y
CONFIG_CPU_CACHE_V7=y
CONFIG_CPU_CACHE_VIPT=y
CONFIG_CPU_COPY_V6=y
@@ -224,11 +296,15 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
CONFIG_ARM_VIC=y
CONFIG_ARM_VIC_NR=2
+CONFIG_PL330=y
#
# Bus support
@@ -244,8 +320,11 @@ CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_2G is not set
# CONFIG_VMSPLIT_1G is not set
CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_HZ=100
+# CONFIG_THUMB2_KERNEL is not set
CONFIG_AEABI=y
CONFIG_OABI_COMPAT=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
@@ -258,12 +337,11 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_SPLIT_PTLOCK_CPUS=999999
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_ALIGNMENT_TRAP=y
# CONFIG_UACCESS_WITH_MEMCPY is not set
@@ -274,6 +352,7 @@ CONFIG_ALIGNMENT_TRAP=y
CONFIG_ZBOOT_ROM_TEXT=0
CONFIG_ZBOOT_ROM_BSS=0
CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=cramfs init=/linuxrc console=ttySAC2,115200 mem=128M"
+# CONFIG_CMDLINE_FORCE is not set
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -317,6 +396,7 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -331,6 +411,10 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -338,9 +422,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_MG_DISK is not set
CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
#
@@ -350,18 +437,21 @@ CONFIG_EEPROM_AT24=y
# CONFIG_EEPROM_LEGACY is not set
# CONFIG_EEPROM_MAX6875 is not set
# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
+# CONFIG_PHONE is not set
#
# Input device support
@@ -369,6 +459,7 @@ CONFIG_HAVE_IDE=y
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -385,13 +476,19 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
@@ -399,6 +496,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_APPLETOUCH is not set
@@ -418,6 +516,7 @@ CONFIG_SERIO=y
CONFIG_SERIO_SERPORT=y
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
# CONFIG_GAMEPORT is not set
#
@@ -444,11 +543,16 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# Non-8250 serial port support
#
CONFIG_SERIAL_SAMSUNG=y
-CONFIG_SERIAL_SAMSUNG_UARTS=3
+CONFIG_SERIAL_SAMSUNG_UARTS_4=y
+CONFIG_SERIAL_SAMSUNG_UARTS=4
# CONFIG_SERIAL_SAMSUNG_DEBUG is not set
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_S3C6400=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -461,6 +565,7 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_HELPER_AUTO=y
@@ -471,9 +576,11 @@ CONFIG_I2C_HELPER_AUTO=y
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -486,20 +593,15 @@ CONFIG_I2C_HELPER_AUTO=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -508,13 +610,16 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -523,10 +628,19 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
# CONFIG_SENSORS_ADM1021 is not set
@@ -535,10 +649,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set
@@ -549,6 +664,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_GL520SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM73 is not set
# CONFIG_SENSORS_LM75 is not set
# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
@@ -573,8 +689,10 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83781D is not set
# CONFIG_SENSORS_W83791D is not set
@@ -584,9 +702,8 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83L786NG is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -599,10 +716,12 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_ASIC3 is not set
# CONFIG_HTC_EGPIO is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
@@ -610,10 +729,15 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TC6387XB is not set
# CONFIG_MFD_TC6393XB is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
+# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
#
@@ -637,7 +761,6 @@ CONFIG_DUMMY_CONSOLE=y
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-CONFIG_HID_DEBUG=y
# CONFIG_HIDRAW is not set
# CONFIG_HID_PID is not set
@@ -680,13 +803,12 @@ CONFIG_SDIO_UART=y
CONFIG_MMC_SDHCI=y
# CONFIG_MMC_SDHCI_PLTFM is not set
# CONFIG_MEMSTICK is not set
-# CONFIG_ACCESSIBILITY is not set
# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
-# CONFIG_REGULATOR is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@@ -710,6 +832,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -758,6 +881,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -772,7 +896,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
#
# Partition Types
@@ -789,6 +912,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -826,11 +950,13 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
@@ -839,6 +965,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
@@ -849,6 +976,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -857,8 +985,9 @@ CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
+# CONFIG_EARLY_PRINTK is not set
# CONFIG_DEBUG_ICEDCC is not set
-CONFIG_DEBUG_S3C_PORT=y
+# CONFIG_OC_ETM is not set
CONFIG_DEBUG_S3C_UART=0
#
@@ -867,7 +996,11 @@ CONFIG_DEBUG_S3C_UART=0
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
# CONFIG_BINARY_PRINTF is not set
@@ -884,8 +1017,10 @@ CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_DECOMPRESS=y
CONFIG_DECOMPRESS_GZIP=y
CONFIG_DECOMPRESS_BZIP2=y
CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/s5pc110_defconfig b/arch/arm/configs/s5pc110_defconfig
index 6ea636131ac8..c4de360b0f69 100644
--- a/arch/arm/configs/s5pc110_defconfig
+++ b/arch/arm/configs/s5pc110_defconfig
@@ -1,11 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc4
-# Wed Feb 24 15:36:54 2010
+# Linux kernel version: 2.6.34
+# Wed May 26 19:04:37 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
@@ -17,6 +20,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_ARM_L1_CACHE_SHIFT_6=y
CONFIG_VECTORS_BASE=0xffff0000
@@ -33,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
@@ -54,7 +59,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -90,10 +94,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -175,8 +183,11 @@ CONFIG_MMU=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
@@ -185,7 +196,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -202,24 +212,27 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
# CONFIG_ARCH_S5P6440 is not set
# CONFIG_ARCH_S5P6442 is not set
-# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PC100 is not set
CONFIG_ARCH_S5PV210=y
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
+# CONFIG_PLAT_SPEAR is not set
CONFIG_PLAT_SAMSUNG=y
#
@@ -235,16 +248,22 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y
CONFIG_S3C_GPIO_CFG_S3C24XX=y
CONFIG_S3C_GPIO_CFG_S3C64XX=y
CONFIG_S3C_GPIO_PULL_UPDOWN=y
+CONFIG_S5P_GPIO_DRVSTR=y
CONFIG_SAMSUNG_GPIO_EXTRA=0
CONFIG_S3C_GPIO_SPACE=0
CONFIG_S3C_GPIO_TRACK=y
# CONFIG_S3C_ADC is not set
+CONFIG_S3C_DEV_WDT=y
+CONFIG_S3C_PL330_DMA=y
#
# Power management
#
CONFIG_PLAT_S5P=y
+CONFIG_S5P_EXT_INT=y
CONFIG_CPU_S5PV210=y
+# CONFIG_MACH_AQUILA is not set
+# CONFIG_MACH_GONI is not set
# CONFIG_MACH_SMDKV210 is not set
CONFIG_MACH_SMDKC110=y
@@ -274,11 +293,14 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
CONFIG_ARM_VIC=y
CONFIG_ARM_VIC_NR=2
+CONFIG_PL330=y
#
# Bus support
@@ -327,6 +349,7 @@ CONFIG_ALIGNMENT_TRAP=y
CONFIG_ZBOOT_ROM_TEXT=0
CONFIG_ZBOOT_ROM_BSS=0
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
+# CONFIG_CMDLINE_FORCE is not set
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -404,6 +427,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -472,6 +496,7 @@ CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_AD7879 is not set
# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
@@ -526,6 +551,9 @@ CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_S5PV210=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -551,6 +579,7 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
@@ -572,6 +601,7 @@ CONFIG_GPIOLIB=y
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
# CONFIG_WATCHDOG is not set
+CONFIG_HAVE_S3C2410_WATCHDOG=y
CONFIG_SSB_POSSIBLE=y
#
@@ -624,10 +654,6 @@ CONFIG_RTC_LIB=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
-
-#
-# TI VLYNQ
-#
# CONFIG_STAGING is not set
#
@@ -696,6 +722,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -835,6 +862,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
@@ -845,6 +874,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
index 3f7d47491b54..e2f5bce29828 100644
--- a/arch/arm/configs/s5pv210_defconfig
+++ b/arch/arm/configs/s5pv210_defconfig
@@ -1,11 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc4
-# Wed Feb 24 15:36:16 2010
+# Linux kernel version: 2.6.34
+# Wed May 26 19:04:39 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_ARCH_USES_GETTIMEOFFSET=y
+CONFIG_HAVE_PROC_CPU=y
CONFIG_NO_IOPORT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
@@ -17,6 +20,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_ARM_L1_CACHE_SHIFT_6=y
CONFIG_VECTORS_BASE=0xffff0000
@@ -33,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
@@ -54,7 +59,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -90,10 +94,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -175,8 +183,11 @@ CONFIG_MMU=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
# CONFIG_ARCH_GEMINI is not set
# CONFIG_ARCH_EBSA110 is not set
# CONFIG_ARCH_EP93XX is not set
@@ -185,7 +196,6 @@ CONFIG_MMU=y
# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -202,24 +212,27 @@ CONFIG_MMU=y
# CONFIG_ARCH_KS8695 is not set
# CONFIG_ARCH_NS9XXX is not set
# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
# CONFIG_ARCH_PNX4008 is not set
# CONFIG_ARCH_PXA is not set
# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
# CONFIG_ARCH_RPC is not set
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
# CONFIG_ARCH_S5P6440 is not set
# CONFIG_ARCH_S5P6442 is not set
-# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_S5PC100 is not set
CONFIG_ARCH_S5PV210=y
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_BCMRING is not set
-# CONFIG_ARCH_U8500 is not set
+# CONFIG_PLAT_SPEAR is not set
CONFIG_PLAT_SAMSUNG=y
#
@@ -235,16 +248,24 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y
CONFIG_S3C_GPIO_CFG_S3C24XX=y
CONFIG_S3C_GPIO_CFG_S3C64XX=y
CONFIG_S3C_GPIO_PULL_UPDOWN=y
+CONFIG_S5P_GPIO_DRVSTR=y
CONFIG_SAMSUNG_GPIO_EXTRA=0
CONFIG_S3C_GPIO_SPACE=0
CONFIG_S3C_GPIO_TRACK=y
# CONFIG_S3C_ADC is not set
+CONFIG_S3C_DEV_WDT=y
+CONFIG_SAMSUNG_DEV_ADC=y
+CONFIG_SAMSUNG_DEV_TS=y
+CONFIG_S3C_PL330_DMA=y
#
# Power management
#
CONFIG_PLAT_S5P=y
+CONFIG_S5P_EXT_INT=y
CONFIG_CPU_S5PV210=y
+# CONFIG_MACH_AQUILA is not set
+# CONFIG_MACH_GONI is not set
CONFIG_MACH_SMDKV210=y
# CONFIG_MACH_SMDKC110 is not set
@@ -274,11 +295,14 @@ CONFIG_ARM_THUMB=y
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+CONFIG_CPU_HAS_PMU=y
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
CONFIG_ARM_VIC=y
CONFIG_ARM_VIC_NR=2
+CONFIG_PL330=y
#
# Bus support
@@ -327,6 +351,7 @@ CONFIG_ALIGNMENT_TRAP=y
CONFIG_ZBOOT_ROM_TEXT=0
CONFIG_ZBOOT_ROM_BSS=0
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
+# CONFIG_CMDLINE_FORCE is not set
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
@@ -404,6 +429,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -472,7 +498,9 @@ CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_AD7879 is not set
# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_S3C2410 is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
@@ -526,6 +554,9 @@ CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_S5PV210=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -551,6 +582,7 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
@@ -572,6 +604,7 @@ CONFIG_GPIOLIB=y
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
# CONFIG_WATCHDOG is not set
+CONFIG_HAVE_S3C2410_WATCHDOG=y
CONFIG_SSB_POSSIBLE=y
#
@@ -624,10 +657,6 @@ CONFIG_RTC_LIB=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
-
-#
-# TI VLYNQ
-#
# CONFIG_STAGING is not set
#
@@ -696,6 +725,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -835,6 +865,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
@@ -845,6 +877,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/spear300_defconfig b/arch/arm/configs/spear300_defconfig
new file mode 100644
index 000000000000..35e64d1cb750
--- /dev/null
+++ b/arch/arm/configs/spear300_defconfig
@@ -0,0 +1,773 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:36:23 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR3XX=y
+# CONFIG_ARCH_SPEAR6XX is not set
+CONFIG_MACH_SPEAR300=y
+# CONFIG_MACH_SPEAR310 is not set
+# CONFIG_MACH_SPEAR320 is not set
+CONFIG_BOARD_SPEAR300_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/spear310_defconfig b/arch/arm/configs/spear310_defconfig
new file mode 100644
index 000000000000..cbbfd290bba8
--- /dev/null
+++ b/arch/arm/configs/spear310_defconfig
@@ -0,0 +1,775 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:37:01 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR3XX=y
+# CONFIG_ARCH_SPEAR6XX is not set
+# CONFIG_MACH_SPEAR300 is not set
+CONFIG_MACH_SPEAR310=y
+# CONFIG_MACH_SPEAR320 is not set
+# CONFIG_BOARD_SPEAR300_EVB is not set
+CONFIG_BOARD_SPEAR310_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/spear320_defconfig b/arch/arm/configs/spear320_defconfig
new file mode 100644
index 000000000000..2ae3c110a21a
--- /dev/null
+++ b/arch/arm/configs/spear320_defconfig
@@ -0,0 +1,775 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:37:12 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR3XX=y
+# CONFIG_ARCH_SPEAR6XX is not set
+# CONFIG_MACH_SPEAR300 is not set
+# CONFIG_MACH_SPEAR310 is not set
+CONFIG_MACH_SPEAR320=y
+# CONFIG_BOARD_SPEAR300_EVB is not set
+CONFIG_BOARD_SPEAR320_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/spear600_defconfig b/arch/arm/configs/spear600_defconfig
new file mode 100644
index 000000000000..c85a02924ec5
--- /dev/null
+++ b/arch/arm/configs/spear600_defconfig
@@ -0,0 +1,760 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Mar 23 14:37:26 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+CONFIG_PLAT_SPEAR=y
+# CONFIG_ARCH_SPEAR3XX is not set
+CONFIG_ARCH_SPEAR6XX=y
+# CONFIG_MACH_SPEAR300 is not set
+# CONFIG_MACH_SPEAR310 is not set
+# CONFIG_MACH_SPEAR320 is not set
+# CONFIG_BOARD_SPEAR300_EVB is not set
+CONFIG_MACH_SPEAR600=y
+CONFIG_BOARD_SPEAR600_EVB=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_ARM_VIC=y
+CONFIG_ARM_VIC_NR=2
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ARM_AMBA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_NET is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_AMBAKMI is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_R3964 is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+CONFIG_GPIO_PL061=y
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=m
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC32 is not set
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/stamp9g20_defconfig b/arch/arm/configs/stamp9g20_defconfig
new file mode 100644
index 000000000000..06a8293c61ca
--- /dev/null
+++ b/arch/arm/configs/stamp9g20_defconfig
@@ -0,0 +1,1456 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.34-rc1
+# Wed Mar 17 16:38:03 2010
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+CONFIG_TREE_PREEMPT_RCU=y
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_PERF_EVENTS is not set
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+CONFIG_ARCH_AT91=y
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_U8500 is not set
+CONFIG_HAVE_AT91_USART3=y
+CONFIG_HAVE_AT91_USART4=y
+CONFIG_HAVE_AT91_USART5=y
+
+#
+# Atmel AT91 System-on-Chip
+#
+# CONFIG_ARCH_AT91RM9200 is not set
+# CONFIG_ARCH_AT91SAM9260 is not set
+# CONFIG_ARCH_AT91SAM9261 is not set
+# CONFIG_ARCH_AT91SAM9G10 is not set
+# CONFIG_ARCH_AT91SAM9263 is not set
+# CONFIG_ARCH_AT91SAM9RL is not set
+CONFIG_ARCH_AT91SAM9G20=y
+# CONFIG_ARCH_AT91SAM9G45 is not set
+# CONFIG_ARCH_AT91CAP9 is not set
+# CONFIG_ARCH_AT572D940HF is not set
+# CONFIG_ARCH_AT91X40 is not set
+CONFIG_AT91_PMC_UNIT=y
+
+#
+# AT91SAM9G20 Board Type
+#
+# CONFIG_MACH_AT91SAM9G20EK is not set
+# CONFIG_MACH_AT91SAM9G20EK_2MMC is not set
+# CONFIG_MACH_CPU9G20 is not set
+CONFIG_MACH_PORTUXG20=y
+CONFIG_MACH_STAMP9G20=y
+
+#
+# AT91 Board Options
+#
+
+#
+# AT91 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+CONFIG_AT91_SLOW_CLOCK=y
+CONFIG_AT91_TIMER_HZ=100
+CONFIG_AT91_EARLY_DBGU=y
+# CONFIG_AT91_EARLY_USART0 is not set
+# CONFIG_AT91_EARLY_USART1 is not set
+# CONFIG_AT91_EARLY_USART2 is not set
+# CONFIG_AT91_EARLY_USART3 is not set
+# CONFIG_AT91_EARLY_USART4 is not set
+# CONFIG_AT91_EARLY_USART5 is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_HZ=100
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=999999
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=64M console=ttyS0,115200 initrd=0x21100000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+# CONFIG_MTD_NAND_ATMEL_ECC_HW is not set
+CONFIG_MTD_NAND_ATMEL_ECC_SOFT=y
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=240
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+CONFIG_W1=y
+
+#
+# 1-wire Bus Masters
+#
+# CONFIG_W1_MASTER_DS2490 is not set
+# CONFIG_W1_MASTER_DS2482 is not set
+# CONFIG_W1_MASTER_DS1WM is not set
+CONFIG_W1_MASTER_GPIO=y
+
+#
+# 1-wire Slaves
+#
+CONFIG_W1_SLAVE_THERM=y
+# CONFIG_W1_SLAVE_SMEM is not set
+CONFIG_W1_SLAVE_DS2431=y
+# CONFIG_W1_SLAVE_DS2433 is not set
+# CONFIG_W1_SLAVE_DS2760 is not set
+# CONFIG_W1_SLAVE_BQ27000 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MAX63XX_WATCHDOG is not set
+CONFIG_AT91SAM9X_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_AB4500_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_SOUND is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=m
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+# CONFIG_USB_GADGETFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ULPI is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT91SAM9=y
+CONFIG_RTC_DRV_AT91SAM9_RTT=0
+CONFIG_RTC_DRV_AT91SAM9_GPBR=0
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=y
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e8ddec2cb158..a0162fa94564 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -24,7 +24,7 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 0d08d4170b64..4656a24058d2 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -371,6 +371,10 @@ static inline void __flush_icache_all(void)
#ifdef CONFIG_ARM_ERRATA_411920
extern void v6_icache_inval_all(void);
v6_icache_inval_all();
+#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
+ asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
+ :
+ : "r" (0));
#else
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
:
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 182310b99195..6d7485aff955 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -12,7 +12,9 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#if NR_IRQS > 256
+#if NR_IRQS > 512
+#define HARDIRQ_BITS 10
+#elif NR_IRQS > 256
#define HARDIRQ_BITS 9
#else
#define HARDIRQ_BITS 8
diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h
index 04be3bdf46b8..c0f4e7bf22de 100644
--- a/arch/arm/include/asm/hardware/arm_timer.h
+++ b/arch/arm/include/asm/hardware/arm_timer.h
@@ -1,21 +1,30 @@
#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H
#define __ASM_ARM_HARDWARE_ARM_TIMER_H
-#define TIMER_LOAD 0x00
-#define TIMER_VALUE 0x04
-#define TIMER_CTRL 0x08
-#define TIMER_CTRL_ONESHOT (1 << 0)
-#define TIMER_CTRL_32BIT (1 << 1)
-#define TIMER_CTRL_DIV1 (0 << 2)
-#define TIMER_CTRL_DIV16 (1 << 2)
-#define TIMER_CTRL_DIV256 (2 << 2)
-#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable (versatile only) */
-#define TIMER_CTRL_PERIODIC (1 << 6)
-#define TIMER_CTRL_ENABLE (1 << 7)
+/*
+ * ARM timer implementation, found in Integrator, Versatile and Realview
+ * platforms. Not all platforms support all registers and bits in these
+ * registers, so we mark them with A for Integrator AP, C for Integrator
+ * CP, V for Versatile and R for Realview.
+ *
+ * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview
+ * can have 16-bit or 32-bit selectable via a bit in the control register.
+ */
+#define TIMER_LOAD 0x00 /* ACVR rw */
+#define TIMER_VALUE 0x04 /* ACVR ro */
+#define TIMER_CTRL 0x08 /* ACVR rw */
+#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */
+#define TIMER_CTRL_32BIT (1 << 1) /* CVR */
+#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */
+#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */
+#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */
+#define TIMER_CTRL_IE (1 << 5) /* VR */
+#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */
+#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */
-#define TIMER_INTCLR 0x0c
-#define TIMER_RIS 0x10
-#define TIMER_MIS 0x14
-#define TIMER_BGLOAD 0x18
+#define TIMER_INTCLR 0x0c /* ACVR wo */
+#define TIMER_RIS 0x10 /* CVR ro */
+#define TIMER_MIS 0x14 /* CVR ro */
+#define TIMER_BGLOAD 0x18 /* CVR rw */
#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index cdb9022716fd..6bcba48800fe 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -21,6 +21,9 @@
#define __ASM_ARM_HARDWARE_L2X0_H
#define L2X0_CACHE_ID 0x000
+#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
+#define L2X0_CACHE_ID_PART_L210 (1 << 6)
+#define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_CACHE_TYPE 0x004
#define L2X0_CTRL 0x100
#define L2X0_AUX_CTRL 0x104
diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h
new file mode 100644
index 000000000000..10382a3dcec9
--- /dev/null
+++ b/arch/arm/include/asm/hardware/icst.h
@@ -0,0 +1,59 @@
+/*
+ * arch/arm/include/asm/hardware/icst.h
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support functions for calculating clocks/divisors for the ICST
+ * clock generators. See http://www.icst.com/ for more information
+ * on these devices.
+ */
+#ifndef ASMARM_HARDWARE_ICST_H
+#define ASMARM_HARDWARE_ICST_H
+
+struct icst_params {
+ unsigned long ref;
+ unsigned long vco_max; /* inclusive */
+ unsigned long vco_min; /* exclusive */
+ unsigned short vd_min; /* inclusive */
+ unsigned short vd_max; /* inclusive */
+ unsigned char rd_min; /* inclusive */
+ unsigned char rd_max; /* inclusive */
+ const unsigned char *s2div; /* chip specific s2div array */
+ const unsigned char *idx2s; /* chip specific idx2s array */
+};
+
+struct icst_vco {
+ unsigned short v;
+ unsigned char r;
+ unsigned char s;
+};
+
+unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco);
+struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq);
+
+/*
+ * ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V).
+ * This frequency is pre-output divider.
+ */
+#define ICST307_VCO_MIN 6000000
+#define ICST307_VCO_MAX 200000000
+
+extern const unsigned char icst307_s2div[];
+extern const unsigned char icst307_idx2s[];
+
+/*
+ * ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V).
+ * This frequency is pre-output divider.
+ */
+#define ICST525_VCO_MIN 10000000
+#define ICST525_VCO_MAX_3V 200000000
+#define ICST525_VCO_MAX_5V 320000000
+
+extern const unsigned char icst525_s2div[];
+extern const unsigned char icst525_idx2s[];
+
+#endif
diff --git a/arch/arm/include/asm/hardware/icst307.h b/arch/arm/include/asm/hardware/icst307.h
deleted file mode 100644
index 554f128a1046..000000000000
--- a/arch/arm/include/asm/hardware/icst307.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/icst307.h
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICS307
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- *
- * This file is similar to the icst525.h file
- */
-#ifndef ASMARM_HARDWARE_ICST307_H
-#define ASMARM_HARDWARE_ICST307_H
-
-struct icst307_params {
- unsigned long ref;
- unsigned long vco_max; /* inclusive */
- unsigned short vd_min; /* inclusive */
- unsigned short vd_max; /* inclusive */
- unsigned char rd_min; /* inclusive */
- unsigned char rd_max; /* inclusive */
-};
-
-struct icst307_vco {
- unsigned short v;
- unsigned char r;
- unsigned char s;
-};
-
-unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco);
-struct icst307_vco icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq);
-struct icst307_vco icst307_ps_to_vco(const struct icst307_params *p, unsigned long period);
-
-#endif
diff --git a/arch/arm/include/asm/hardware/icst525.h b/arch/arm/include/asm/hardware/icst525.h
deleted file mode 100644
index 58f0dc43e2ed..000000000000
--- a/arch/arm/include/asm/hardware/icst525.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/icst525.h
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST525
- * clock generators. See http://www.icst.com/ for more information
- * on these devices.
- */
-#ifndef ASMARM_HARDWARE_ICST525_H
-#define ASMARM_HARDWARE_ICST525_H
-
-struct icst525_params {
- unsigned long ref;
- unsigned long vco_max; /* inclusive */
- unsigned short vd_min; /* inclusive */
- unsigned short vd_max; /* inclusive */
- unsigned char rd_min; /* inclusive */
- unsigned char rd_max; /* inclusive */
-};
-
-struct icst525_vco {
- unsigned short v;
- unsigned char r;
- unsigned char s;
-};
-
-unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco);
-struct icst525_vco icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq);
-struct icst525_vco icst525_ps_to_vco(const struct icst525_params *p, unsigned long period);
-
-#endif
diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h
new file mode 100644
index 000000000000..575fa8186ca0
--- /dev/null
+++ b/arch/arm/include/asm/hardware/pl330.h
@@ -0,0 +1,217 @@
+/* linux/include/asm/hardware/pl330.h
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __PL330_CORE_H
+#define __PL330_CORE_H
+
+#define PL330_MAX_CHAN 8
+#define PL330_MAX_IRQS 32
+#define PL330_MAX_PERI 32
+
+enum pl330_srccachectrl {
+ SCCTRL0 = 0, /* Noncacheable and nonbufferable */
+ SCCTRL1, /* Bufferable only */
+ SCCTRL2, /* Cacheable, but do not allocate */
+ SCCTRL3, /* Cacheable and bufferable, but do not allocate */
+ SINVALID1,
+ SINVALID2,
+ SCCTRL6, /* Cacheable write-through, allocate on reads only */
+ SCCTRL7, /* Cacheable write-back, allocate on reads only */
+};
+
+enum pl330_dstcachectrl {
+ DCCTRL0 = 0, /* Noncacheable and nonbufferable */
+ DCCTRL1, /* Bufferable only */
+ DCCTRL2, /* Cacheable, but do not allocate */
+ DCCTRL3, /* Cacheable and bufferable, but do not allocate */
+ DINVALID1 = 8,
+ DINVALID2,
+ DCCTRL6, /* Cacheable write-through, allocate on writes only */
+ DCCTRL7, /* Cacheable write-back, allocate on writes only */
+};
+
+/* Populated by the PL330 core driver for DMA API driver's info */
+struct pl330_config {
+ u32 periph_id;
+ u32 pcell_id;
+#define DMAC_MODE_NS (1 << 0)
+ unsigned int mode;
+ unsigned int data_bus_width:10; /* In number of bits */
+ unsigned int data_buf_dep:10;
+ unsigned int num_chan:4;
+ unsigned int num_peri:6;
+ u32 peri_ns;
+ unsigned int num_events:6;
+ u32 irq_ns;
+};
+
+/* Handle to the DMAC provided to the PL330 core */
+struct pl330_info {
+ /* Owning device */
+ struct device *dev;
+ /* Size of MicroCode buffers for each channel. */
+ unsigned mcbufsz;
+ /* ioremap'ed address of PL330 registers. */
+ void __iomem *base;
+ /* Client can freely use it. */
+ void *client_data;
+ /* PL330 core data, Client must not touch it. */
+ void *pl330_data;
+ /* Populated by the PL330 core driver during pl330_add */
+ struct pl330_config pcfg;
+ /*
+ * If the DMAC has some reset mechanism, then the
+ * client may want to provide pointer to the method.
+ */
+ void (*dmac_reset)(struct pl330_info *pi);
+};
+
+enum pl330_byteswap {
+ SWAP_NO = 0,
+ SWAP_2,
+ SWAP_4,
+ SWAP_8,
+ SWAP_16,
+};
+
+/**
+ * Request Configuration.
+ * The PL330 core does not modify this and uses the last
+ * working configuration if the request doesn't provide any.
+ *
+ * The Client may want to provide this info only for the
+ * first request and a request with new settings.
+ */
+struct pl330_reqcfg {
+ /* Address Incrementing */
+ unsigned dst_inc:1;
+ unsigned src_inc:1;
+
+ /*
+ * For now, the SRC & DST protection levels
+ * and burst size/length are assumed same.
+ */
+ bool nonsecure;
+ bool privileged;
+ bool insnaccess;
+ unsigned brst_len:5;
+ unsigned brst_size:3; /* in power of 2 */
+
+ enum pl330_dstcachectrl dcctl;
+ enum pl330_srccachectrl scctl;
+ enum pl330_byteswap swap;
+};
+
+/*
+ * One cycle of DMAC operation.
+ * There may be more than one xfer in a request.
+ */
+struct pl330_xfer {
+ u32 src_addr;
+ u32 dst_addr;
+ /* Size to xfer */
+ u32 bytes;
+ /*
+ * Pointer to next xfer in the list.
+ * The last xfer in the req must point to NULL.
+ */
+ struct pl330_xfer *next;
+};
+
+/* The xfer callbacks are made with one of these arguments. */
+enum pl330_op_err {
+ /* The all xfers in the request were success. */
+ PL330_ERR_NONE,
+ /* If req aborted due to global error. */
+ PL330_ERR_ABORT,
+ /* If req failed due to problem with Channel. */
+ PL330_ERR_FAIL,
+};
+
+enum pl330_reqtype {
+ MEMTOMEM,
+ MEMTODEV,
+ DEVTOMEM,
+ DEVTODEV,
+};
+
+/* A request defining Scatter-Gather List ending with NULL xfer. */
+struct pl330_req {
+ enum pl330_reqtype rqtype;
+ /* Index of peripheral for the xfer. */
+ unsigned peri:5;
+ /* Unique token for this xfer, set by the client. */
+ void *token;
+ /* Callback to be called after xfer. */
+ void (*xfer_cb)(void *token, enum pl330_op_err err);
+ /* If NULL, req will be done at last set parameters. */
+ struct pl330_reqcfg *cfg;
+ /* Pointer to first xfer in the request. */
+ struct pl330_xfer *x;
+};
+
+/*
+ * To know the status of the channel and DMAC, the client
+ * provides a pointer to this structure. The PL330 core
+ * fills it with current information.
+ */
+struct pl330_chanstatus {
+ /*
+ * If the DMAC engine halted due to some error,
+ * the client should remove-add DMAC.
+ */
+ bool dmac_halted;
+ /*
+ * If channel is halted due to some error,
+ * the client should ABORT/FLUSH and START the channel.
+ */
+ bool faulting;
+ /* Location of last load */
+ u32 src_addr;
+ /* Location of last store */
+ u32 dst_addr;
+ /*
+ * Pointer to the currently active req, NULL if channel is
+ * inactive, even though the requests may be present.
+ */
+ struct pl330_req *top_req;
+ /* Pointer to req waiting second in the queue if any. */
+ struct pl330_req *wait_req;
+};
+
+enum pl330_chan_op {
+ /* Start the channel */
+ PL330_OP_START,
+ /* Abort the active xfer */
+ PL330_OP_ABORT,
+ /* Stop xfer and flush queue */
+ PL330_OP_FLUSH,
+};
+
+extern int pl330_add(struct pl330_info *);
+extern void pl330_del(struct pl330_info *pi);
+extern int pl330_update(const struct pl330_info *pi);
+extern void pl330_release_channel(void *ch_id);
+extern void *pl330_request_channel(const struct pl330_info *pi);
+extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus);
+extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op);
+extern int pl330_submit_req(void *ch_id, struct pl330_req *r);
+
+#endif /* __PL330_CORE_H */
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
new file mode 100644
index 000000000000..a101f10bb5b1
--- /dev/null
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -0,0 +1,59 @@
+/*
+ * arch/arm/include/asm/hardware/sp810.h
+ *
+ * ARM PrimeXsys System Controller SP810 header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARM_SP810_H
+#define __ASM_ARM_SP810_H
+
+#include <linux/io.h>
+
+/* sysctl registers offset */
+#define SCCTRL 0x000
+#define SCSYSSTAT 0x004
+#define SCIMCTRL 0x008
+#define SCIMSTAT 0x00C
+#define SCXTALCTRL 0x010
+#define SCPLLCTRL 0x014
+#define SCPLLFCTRL 0x018
+#define SCPERCTRL0 0x01C
+#define SCPERCTRL1 0x020
+#define SCPEREN 0x024
+#define SCPERDIS 0x028
+#define SCPERCLKEN 0x02C
+#define SCPERSTAT 0x030
+#define SCSYSID0 0xEE0
+#define SCSYSID1 0xEE4
+#define SCSYSID2 0xEE8
+#define SCSYSID3 0xEEC
+#define SCITCR 0xF00
+#define SCITIR0 0xF04
+#define SCITIR1 0xF08
+#define SCITOR 0xF0C
+#define SCCNTCTRL 0xF10
+#define SCCNTDATA 0xF14
+#define SCCNTSTEP 0xF18
+#define SCPERIPHID0 0xFE0
+#define SCPERIPHID1 0xFE4
+#define SCPERIPHID2 0xFE8
+#define SCPERIPHID3 0xFEC
+#define SCPCELLID0 0xFF0
+#define SCPCELLID1 0xFF4
+#define SCPCELLID2 0xFF8
+#define SCPCELLID3 0xFFC
+
+static inline void sysctl_soft_reset(void __iomem *base)
+{
+ /* writing any value to SCSYSSTAT reg will reset system */
+ writel(0, base + SCSYSSTAT);
+}
+
+#endif /* __ASM_ARM_SP810_H */
diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h
index a91d8a1523cf..7f0b6d13296a 100644
--- a/arch/arm/include/asm/ioctls.h
+++ b/arch/arm/include/asm/ioctls.h
@@ -53,6 +53,9 @@
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+#define TIOCGRS485 0x542E
+#define TIOCSRS485 0x542F
+
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c4b2ea3fbe42..e51b1e81df05 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -20,6 +20,7 @@ enum km_type {
KM_SOFTIRQ1,
KM_L1_CACHE,
KM_L2_CACHE,
+ KM_KDB,
KM_TYPE_NR
};
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index a38bdc7afa34..52f0da1e97df 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -8,10 +8,16 @@
* published by the Free Software Foundation.
*/
+#ifndef __ASM_MACH_PCI_H
+#define __ASM_MACH_PCI_H
+
struct pci_sys_data;
struct pci_bus;
struct hw_pci {
+#ifdef CONFIG_PCI_DOMAINS
+ int domain;
+#endif
struct list_head buses;
int nr_controllers;
int (*setup)(int nr, struct pci_sys_data *);
@@ -26,6 +32,9 @@ struct hw_pci {
* Per-controller structure
*/
struct pci_sys_data {
+#ifdef CONFIG_PCI_DOMAINS
+ int domain;
+#endif
struct list_head node;
int busnr; /* primary bus number */
u64 mem_offset; /* bus->cpu memory mapping offset */
@@ -70,3 +79,5 @@ extern int pci_v3_setup(int nr, struct pci_sys_data *);
extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
extern void pci_v3_preinit(void);
extern void pci_v3_postinit(void);
+
+#endif /* __ASM_MACH_PCI_H */
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index 8bffc3ff3acf..35d408f6dccf 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -38,7 +38,7 @@ struct sys_timer {
void (*init)(void);
void (*suspend)(void);
void (*resume)(void);
-#ifndef CONFIG_GENERIC_TIME
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
unsigned long (*offset)(void);
#endif
};
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 47980118d0a5..92e2a833693d 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -4,8 +4,23 @@
#ifdef __KERNEL__
#include <asm-generic/pci-dma-compat.h>
+#include <asm/mach/pci.h> /* for pci_sys_data */
#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_domain_nr(struct pci_bus *bus)
+{
+ struct pci_sys_data *root = bus->sysdata;
+
+ return root->domain;
+}
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
#ifdef CONFIG_PCI_HOST_ITE8152
/* ITE bridge requires setting latency timer to avoid early bus access
termination by PIC bus mater devices
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 49e3049aba32..48837e6d8887 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -28,4 +28,21 @@ set_perf_event_pending(void)
* same indexes here for consistency. */
#define PERF_EVENT_INDEX_OFFSET 1
+/* ARM perf PMU IDs for use by internal perf clients. */
+enum arm_perf_pmu_ids {
+ ARM_PERF_PMU_ID_XSCALE1 = 0,
+ ARM_PERF_PMU_ID_XSCALE2,
+ ARM_PERF_PMU_ID_V6,
+ ARM_PERF_PMU_ID_V6MP,
+ ARM_PERF_PMU_ID_CA8,
+ ARM_PERF_PMU_ID_CA9,
+ ARM_NUM_PMU_IDS,
+};
+
+extern enum arm_perf_pmu_ids
+armpmu_get_pmu_id(void);
+
+extern int
+armpmu_get_max_events(void);
+
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 11397687f42c..ab68cf1ef80f 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -314,7 +314,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
-#if __LINUX_ARM_ARCH__ >= 7
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
#else
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 2829b9f981a1..8ccea012722c 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -12,33 +12,33 @@
#ifndef __ARM_PMU_H__
#define __ARM_PMU_H__
-#ifdef CONFIG_CPU_HAS_PMU
-
-struct pmu_irqs {
- const int *irqs;
- int num_irqs;
+enum arm_pmu_type {
+ ARM_PMU_DEVICE_CPU = 0,
+ ARM_NUM_PMU_DEVICES,
};
+#ifdef CONFIG_CPU_HAS_PMU
+
/**
* reserve_pmu() - reserve the hardware performance counters
*
* Reserve the hardware performance counters in the system for exclusive use.
- * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
+ * The platform_device for the system is returned on success, ERR_PTR()
* encoded error on failure.
*/
-extern const struct pmu_irqs *
-reserve_pmu(void);
+extern struct platform_device *
+reserve_pmu(enum arm_pmu_type device);
/**
* release_pmu() - Relinquish control of the performance counters
*
* Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
- * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
+ * this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/
extern int
-release_pmu(const struct pmu_irqs *irqs);
+release_pmu(struct platform_device *pdev);
/**
* init_pmu() - Initialise the PMU.
@@ -48,24 +48,26 @@ release_pmu(const struct pmu_irqs *irqs);
* the actual hardware initialisation.
*/
extern int
-init_pmu(void);
+init_pmu(enum arm_pmu_type device);
#else /* CONFIG_CPU_HAS_PMU */
-static inline const struct pmu_irqs *
-reserve_pmu(void)
+#include <linux/err.h>
+
+static inline struct platform_device *
+reserve_pmu(enum arm_pmu_type device)
{
return ERR_PTR(-ENODEV);
}
static inline int
-release_pmu(const struct pmu_irqs *irqs)
+release_pmu(struct platform_device *pdev)
{
return -ENODEV;
}
static inline int
-init_pmu(void)
+init_pmu(enum arm_pmu_type device)
{
return -ENODEV;
}
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
index ca0a37d03400..2f87870d9347 100644
--- a/arch/arm/include/asm/scatterlist.h
+++ b/arch/arm/include/asm/scatterlist.h
@@ -3,25 +3,6 @@
#include <asm/memory.h>
#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset; /* buffer offset */
- dma_addr_t dma_address; /* dma address */
- unsigned int length; /* length */
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
+#include <asm-generic/scatterlist.h>
#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index e0d763be1846..3d05190797cb 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -82,7 +82,7 @@ struct secondary_data {
extern struct secondary_data secondary_data;
extern int __cpu_disable(void);
-extern int mach_cpu_disable(unsigned int cpu);
+extern int platform_cpu_disable(unsigned int cpu);
extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void);
diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h
index 7be0978b2625..634f357be6bb 100644
--- a/arch/arm/include/asm/smp_twd.h
+++ b/arch/arm/include/asm/smp_twd.h
@@ -1,6 +1,23 @@
#ifndef __ASMARM_SMP_TWD_H
#define __ASMARM_SMP_TWD_H
+#define TWD_TIMER_LOAD 0x00
+#define TWD_TIMER_COUNTER 0x04
+#define TWD_TIMER_CONTROL 0x08
+#define TWD_TIMER_INTSTAT 0x0C
+
+#define TWD_WDOG_LOAD 0x20
+#define TWD_WDOG_COUNTER 0x24
+#define TWD_WDOG_CONTROL 0x28
+#define TWD_WDOG_INTSTAT 0x2C
+#define TWD_WDOG_RESETSTAT 0x30
+#define TWD_WDOG_DISABLE 0x34
+
+#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
+#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
+#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
+#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
+
struct clock_event_device;
extern void __iomem *twd_base;
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 4ace45ec3ef8..5f4f48002734 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -141,7 +141,7 @@ extern unsigned int user_debug;
#ifdef CONFIG_ARCH_HAS_BARRIERS
#include <mach/barriers.h>
-#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
+#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dmb()
#define wmb() mb()
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index b74970ec02c4..763e29fa8530 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -141,7 +141,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_SYSCALL_TRACE 8
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
-#define TIF_MEMDIE 18
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index e085e2c545eb..bd863d8608cd 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -46,6 +46,9 @@
#define TLB_V7_UIS_FULL (1 << 20)
#define TLB_V7_UIS_ASID (1 << 21)
+/* Inner Shareable BTB operation (ARMv7 MP extensions) */
+#define TLB_V7_IS_BTB (1 << 22)
+
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
@@ -183,7 +186,7 @@
#endif
#ifdef CONFIG_SMP
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
#else
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
@@ -339,6 +342,12 @@ static inline void local_flush_tlb_all(void)
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -376,6 +385,12 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void
@@ -416,6 +431,12 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -454,6 +475,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
/*
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index bd397e0b663e..c6273a3bfc25 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -527,6 +527,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
if (!sys)
panic("PCI: unable to allocate sys data!");
+#ifdef CONFIG_PCI_DOMAINS
+ sys->domain = hw->domain;
+#endif
sys->hw = hw;
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 7d5b9fb01e71..2c4a185f92cd 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -16,6 +16,8 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
#include <asm/dma.h>
@@ -264,3 +266,37 @@ int get_dma_residue(unsigned int chan)
return ret;
}
EXPORT_SYMBOL(get_dma_residue);
+
+#ifdef CONFIG_PROC_FS
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
+ dma_t *dma = dma_channel(i);
+ if (dma && dma->lock)
+ seq_printf(m, "%2d: %s\n", i, dma->device_id);
+ }
+ return 0;
+}
+
+static int proc_dma_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_dma_show, NULL);
+}
+
+static const struct file_operations proc_dma_operations = {
+ .open = proc_dma_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+ proc_create("dma", 0, NULL, &proc_dma_operations);
+ return 0;
+}
+
+__initcall(proc_dma_init);
+#endif
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a5b846b9895d..c868a8864117 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -98,6 +98,11 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->ARM_pc = pc;
+}
+
static int compiled_break;
int kgdb_arch_handle_exception(int exception_vector, int signo,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 9e70f2053f9a..c45768614c8a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -16,7 +16,9 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -26,7 +28,7 @@
#include <asm/pmu.h>
#include <asm/stacktrace.h>
-static const struct pmu_irqs *pmu_irqs;
+static struct platform_device *pmu_device;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
@@ -67,8 +69,18 @@ struct cpu_hw_events {
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+/* PMU names. */
+static const char *arm_pmu_names[] = {
+ [ARM_PERF_PMU_ID_XSCALE1] = "xscale1",
+ [ARM_PERF_PMU_ID_XSCALE2] = "xscale2",
+ [ARM_PERF_PMU_ID_V6] = "v6",
+ [ARM_PERF_PMU_ID_V6MP] = "v6mpcore",
+ [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8",
+ [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9",
+};
+
struct arm_pmu {
- char *name;
+ enum arm_perf_pmu_ids id;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
@@ -87,6 +99,30 @@ struct arm_pmu {
/* Set at runtime when we know what CPU type we are. */
static const struct arm_pmu *armpmu;
+enum arm_perf_pmu_ids
+armpmu_get_pmu_id(void)
+{
+ int id = -ENODEV;
+
+ if (armpmu != NULL)
+ id = armpmu->id;
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
+
+int
+armpmu_get_max_events(void)
+{
+ int max_events = 0;
+
+ if (armpmu != NULL)
+ max_events = armpmu->num_events;
+
+ return max_events;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_max_events);
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) \
@@ -314,38 +350,44 @@ validate_group(struct perf_event *event)
static int
armpmu_reserve_hardware(void)
{
- int i;
- int err;
+ int i, err = -ENODEV, irq;
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs)) {
+ pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
+ if (IS_ERR(pmu_device)) {
pr_warning("unable to reserve pmu\n");
- return PTR_ERR(pmu_irqs);
+ return PTR_ERR(pmu_device);
}
- init_pmu();
+ init_pmu(ARM_PMU_DEVICE_CPU);
- if (pmu_irqs->num_irqs < 1) {
+ if (pmu_device->num_resources < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < pmu_irqs->num_irqs; ++i) {
- err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq,
+ for (i = 0; i < pmu_device->num_resources; ++i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ err = request_irq(irq, armpmu->handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
"armpmu", NULL);
if (err) {
- pr_warning("unable to request IRQ%d for ARM "
- "perf counters\n", pmu_irqs->irqs[i]);
+ pr_warning("unable to request IRQ%d for ARM perf "
+ "counters\n", irq);
break;
}
}
if (err) {
- for (i = i - 1; i >= 0; --i)
- free_irq(pmu_irqs->irqs[i], NULL);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
+ for (i = i - 1; i >= 0; --i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
+ release_pmu(pmu_device);
+ pmu_device = NULL;
}
return err;
@@ -354,14 +396,17 @@ armpmu_reserve_hardware(void)
static void
armpmu_release_hardware(void)
{
- int i;
+ int i, irq;
- for (i = pmu_irqs->num_irqs - 1; i >= 0; --i)
- free_irq(pmu_irqs->irqs[i], NULL);
+ for (i = pmu_device->num_resources - 1; i >= 0; --i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
armpmu->stop();
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
+ release_pmu(pmu_device);
+ pmu_device = NULL;
}
static atomic_t active_events = ATOMIC_INIT(0);
@@ -1144,7 +1189,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
}
static const struct arm_pmu armv6pmu = {
- .name = "v6",
+ .id = ARM_PERF_PMU_ID_V6,
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
@@ -1167,7 +1212,7 @@ static const struct arm_pmu armv6pmu = {
* reset the period and enable the interrupt reporting.
*/
static const struct arm_pmu armv6mpcore_pmu = {
- .name = "v6mpcore",
+ .id = ARM_PERF_PMU_ID_V6MP,
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
@@ -1197,10 +1242,6 @@ static const struct arm_pmu armv6mpcore_pmu = {
* counter and all 4 performance counters together can be reset separately.
*/
-#define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
-
-#define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
-
/* Common ARMv7 event types */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
@@ -2079,6 +2120,803 @@ static u32 __init armv7_reset_read_pmnc(void)
return nb_cnt + 1;
}
+/*
+ * ARMv5 [xscale] Performance counter handling code.
+ *
+ * Based on xscale OProfile code.
+ *
+ * There are two variants of the xscale PMU that we support:
+ * - xscale1pmu: 2 event counters and a cycle counter
+ * - xscale2pmu: 4 event counters and a cycle counter
+ * The two variants share event definitions, but have different
+ * PMU structures.
+ */
+
+enum xscale_perf_types {
+ XSCALE_PERFCTR_ICACHE_MISS = 0x00,
+ XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
+ XSCALE_PERFCTR_DATA_STALL = 0x02,
+ XSCALE_PERFCTR_ITLB_MISS = 0x03,
+ XSCALE_PERFCTR_DTLB_MISS = 0x04,
+ XSCALE_PERFCTR_BRANCH = 0x05,
+ XSCALE_PERFCTR_BRANCH_MISS = 0x06,
+ XSCALE_PERFCTR_INSTRUCTION = 0x07,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
+ XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
+ XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
+ XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
+ XSCALE_PERFCTR_PC_CHANGED = 0x0D,
+ XSCALE_PERFCTR_BCU_REQUEST = 0x10,
+ XSCALE_PERFCTR_BCU_FULL = 0x11,
+ XSCALE_PERFCTR_BCU_DRAIN = 0x12,
+ XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
+ XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
+ XSCALE_PERFCTR_RMW = 0x16,
+ /* XSCALE_PERFCTR_CCNT is not hardware defined */
+ XSCALE_PERFCTR_CCNT = 0xFE,
+ XSCALE_PERFCTR_UNUSED = 0xFF,
+};
+
+enum xscale_counters {
+ XSCALE_CYCLE_COUNTER = 1,
+ XSCALE_COUNTER0,
+ XSCALE_COUNTER1,
+ XSCALE_COUNTER2,
+ XSCALE_COUNTER3,
+};
+
+static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
+ [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+#define XSCALE_PMU_ENABLE 0x001
+#define XSCALE_PMN_RESET 0x002
+#define XSCALE_CCNT_RESET 0x004
+#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
+#define XSCALE_PMU_CNT64 0x008
+
+static inline int
+xscalepmu_event_map(int config)
+{
+ int mapping = xscale_perf_map[config];
+ if (HW_OP_UNSUPPORTED == mapping)
+ mapping = -EOPNOTSUPP;
+ return mapping;
+}
+
+static u64
+xscalepmu_raw_event(u64 config)
+{
+ return config & 0xff;
+}
+
+#define XSCALE1_OVERFLOWED_MASK 0x700
+#define XSCALE1_CCOUNT_OVERFLOW 0x400
+#define XSCALE1_COUNT0_OVERFLOW 0x100
+#define XSCALE1_COUNT1_OVERFLOW 0x200
+#define XSCALE1_CCOUNT_INT_EN 0x040
+#define XSCALE1_COUNT0_INT_EN 0x010
+#define XSCALE1_COUNT1_INT_EN 0x020
+#define XSCALE1_COUNT0_EVT_SHFT 12
+#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
+#define XSCALE1_COUNT1_EVT_SHFT 20
+#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
+
+static inline u32
+xscale1pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale1pmu_write_pmnc(u32 val)
+{
+ /* upper 4bits and 7, 11 are write-as-0 */
+ val &= 0xffff77f;
+ asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
+}
+
+static inline int
+xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale1pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmnc;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * NOTE: there's an A stepping erratum that states if an overflow
+ * bit already exists and another occurs, the previous
+ * Overflow bit gets cleared. There's no workaround.
+ * Fixed in B stepping or later.
+ */
+ pmnc = xscale1pmu_read_pmnc();
+
+ /*
+ * Write the value back to clear the overflow flags. Overflow
+ * flags remain in pmnc for use below. We also disable the PMU
+ * while we process the interrupt.
+ */
+ xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ perf_event_do_pending();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void
+xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = 0;
+ evt = XSCALE1_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
+ XSCALE1_COUNT0_INT_EN;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
+ XSCALE1_COUNT1_INT_EN;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = XSCALE1_CCOUNT_INT_EN;
+ evt = 0;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ if (XSCALE_PERFCTR_CCNT == event->config_base) {
+ if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return XSCALE_CYCLE_COUNTER;
+ } else {
+ if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) {
+ return XSCALE_COUNTER1;
+ }
+
+ if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) {
+ return XSCALE_COUNTER0;
+ }
+
+ return -EAGAIN;
+ }
+}
+
+static void
+xscale1pmu_start(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val |= XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_stop(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale1pmu_read_counter(int counter)
+{
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void
+xscale1pmu_write_counter(int counter, u32 val)
+{
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
+ break;
+ }
+}
+
+static const struct arm_pmu xscale1pmu = {
+ .id = ARM_PERF_PMU_ID_XSCALE1,
+ .handle_irq = xscale1pmu_handle_irq,
+ .enable = xscale1pmu_enable_event,
+ .disable = xscale1pmu_disable_event,
+ .event_map = xscalepmu_event_map,
+ .raw_event = xscalepmu_raw_event,
+ .read_counter = xscale1pmu_read_counter,
+ .write_counter = xscale1pmu_write_counter,
+ .get_event_idx = xscale1pmu_get_event_idx,
+ .start = xscale1pmu_start,
+ .stop = xscale1pmu_stop,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+};
+
+#define XSCALE2_OVERFLOWED_MASK 0x01f
+#define XSCALE2_CCOUNT_OVERFLOW 0x001
+#define XSCALE2_COUNT0_OVERFLOW 0x002
+#define XSCALE2_COUNT1_OVERFLOW 0x004
+#define XSCALE2_COUNT2_OVERFLOW 0x008
+#define XSCALE2_COUNT3_OVERFLOW 0x010
+#define XSCALE2_CCOUNT_INT_EN 0x001
+#define XSCALE2_COUNT0_INT_EN 0x002
+#define XSCALE2_COUNT1_INT_EN 0x004
+#define XSCALE2_COUNT2_INT_EN 0x008
+#define XSCALE2_COUNT3_INT_EN 0x010
+#define XSCALE2_COUNT0_EVT_SHFT 0
+#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
+#define XSCALE2_COUNT1_EVT_SHFT 8
+#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
+#define XSCALE2_COUNT2_EVT_SHFT 16
+#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
+#define XSCALE2_COUNT3_EVT_SHFT 24
+#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
+
+static inline u32
+xscale2pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
+ /* bits 1-2 and 4-23 are read-unpredictable */
+ return val & 0xff000009;
+}
+
+static inline void
+xscale2pmu_write_pmnc(u32 val)
+{
+ /* bits 4-23 are write-as-0, 24-31 are write ignored */
+ val &= 0xf;
+ asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_overflow_flags(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_overflow_flags(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_event_select(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_event_select(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
+}
+
+static inline u32
+xscale2pmu_read_int_enable(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
+ return val;
+}
+
+static void
+xscale2pmu_write_int_enable(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
+}
+
+static inline int
+xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
+ break;
+ case XSCALE_COUNTER2:
+ ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
+ break;
+ case XSCALE_COUNTER3:
+ ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale2pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmnc, of_flags;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /* Disable the PMU. */
+ pmnc = xscale2pmu_read_pmnc();
+ xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ /* Check the overflow flag register. */
+ of_flags = xscale2pmu_read_overflow_flags();
+ if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ /* Clear the overflow bits. */
+ xscale2pmu_write_overflow_flags(of_flags);
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ perf_event_do_pending();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void
+xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags, ien, evtsel;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien |= XSCALE2_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ ien |= XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ ien |= XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER2:
+ ien |= XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER3:
+ ien |= XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags, ien, evtsel;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien &= ~XSCALE2_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ ien &= ~XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ ien &= ~XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER2:
+ ien &= ~XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER3:
+ ien &= ~XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ int idx = xscale1pmu_get_event_idx(cpuc, event);
+ if (idx >= 0)
+ goto out;
+
+ if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
+ idx = XSCALE_COUNTER3;
+ else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
+ idx = XSCALE_COUNTER2;
+out:
+ return idx;
+}
+
+static void
+xscale2pmu_start(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
+ val |= XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_stop(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale2pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale2pmu_read_counter(int counter)
+{
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void
+xscale2pmu_write_counter(int counter, u32 val)
+{
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
+ break;
+ }
+}
+
+static const struct arm_pmu xscale2pmu = {
+ .id = ARM_PERF_PMU_ID_XSCALE2,
+ .handle_irq = xscale2pmu_handle_irq,
+ .enable = xscale2pmu_enable_event,
+ .disable = xscale2pmu_disable_event,
+ .event_map = xscalepmu_event_map,
+ .raw_event = xscalepmu_raw_event,
+ .read_counter = xscale2pmu_read_counter,
+ .write_counter = xscale2pmu_write_counter,
+ .get_event_idx = xscale2pmu_get_event_idx,
+ .start = xscale2pmu_start,
+ .stop = xscale2pmu_stop,
+ .num_events = 5,
+ .max_period = (1LLU << 32) - 1,
+};
+
static int __init
init_hw_perf_events(void)
{
@@ -2086,7 +2924,7 @@ init_hw_perf_events(void)
unsigned long implementor = (cpuid & 0xFF000000) >> 24;
unsigned long part_number = (cpuid & 0xFFF0);
- /* We only support ARM CPUs implemented by ARM at the moment. */
+ /* ARM Ltd CPUs. */
if (0x41 == implementor) {
switch (part_number) {
case 0xB360: /* ARM1136 */
@@ -2105,7 +2943,7 @@ init_hw_perf_events(void)
perf_max_events = armv6mpcore_pmu.num_events;
break;
case 0xC080: /* Cortex-A8 */
- armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
+ armv7pmu.id = ARM_PERF_PMU_ID_CA8;
memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
sizeof(armv7_a8_perf_cache_map));
armv7pmu.event_map = armv7_a8_pmu_event_map;
@@ -2117,7 +2955,7 @@ init_hw_perf_events(void)
perf_max_events = armv7pmu.num_events;
break;
case 0xC090: /* Cortex-A9 */
- armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
+ armv7pmu.id = ARM_PERF_PMU_ID_CA9;
memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
sizeof(armv7_a9_perf_cache_map));
armv7pmu.event_map = armv7_a9_pmu_event_map;
@@ -2128,15 +2966,33 @@ init_hw_perf_events(void)
armv7pmu.num_events = armv7_reset_read_pmnc();
perf_max_events = armv7pmu.num_events;
break;
- default:
- pr_info("no hardware support available\n");
- perf_max_events = -1;
+ }
+ /* Intel CPUs [xscale]. */
+ } else if (0x69 == implementor) {
+ part_number = (cpuid >> 13) & 0x7;
+ switch (part_number) {
+ case 1:
+ armpmu = &xscale1pmu;
+ memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
+ sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale1pmu.num_events;
+ break;
+ case 2:
+ armpmu = &xscale2pmu;
+ memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
+ sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale2pmu.num_events;
+ break;
}
}
- if (armpmu)
+ if (armpmu) {
pr_info("enabled with %s PMU driver, %d counters available\n",
- armpmu->name, armpmu->num_events);
+ arm_pmu_names[armpmu->id], armpmu->num_events);
+ } else {
+ pr_info("no hardware support available\n");
+ perf_max_events = -1;
+ }
return 0;
}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index a124312e343f..b8af96ea62e6 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -2,6 +2,7 @@
* linux/arch/arm/kernel/pmu.c
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ * Copyright (C) 2010 ARM Ltd, Will Deacon
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,65 +10,78 @@
*
*/
+#define pr_fmt(fmt) "PMU: " fmt
+
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/pmu.h>
-/*
- * Define the IRQs for the system. We could use something like a platform
- * device but that seems fairly heavyweight for this. Also, the performance
- * counters can't be removed or hotplugged.
- *
- * Ordering is important: init_pmu() will use the ordering to set the affinity
- * to the corresponding core. e.g. the first interrupt will go to cpu 0, the
- * second goes to cpu 1 etc.
- */
-static const int irqs[] = {
-#if defined(CONFIG_ARCH_OMAP2)
- 3,
-#elif defined(CONFIG_ARCH_BCMRING)
- IRQ_PMUIRQ,
-#elif defined(CONFIG_MACH_REALVIEW_EB)
- IRQ_EB11MP_PMU_CPU0,
- IRQ_EB11MP_PMU_CPU1,
- IRQ_EB11MP_PMU_CPU2,
- IRQ_EB11MP_PMU_CPU3,
-#elif defined(CONFIG_ARCH_OMAP3)
- INT_34XX_BENCH_MPU_EMUL,
-#elif defined(CONFIG_ARCH_IOP32X)
- IRQ_IOP32X_CORE_PMU,
-#elif defined(CONFIG_ARCH_IOP33X)
- IRQ_IOP33X_CORE_PMU,
-#elif defined(CONFIG_ARCH_PXA)
- IRQ_PMU,
-#endif
-};
+static volatile long pmu_lock;
+
+static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
+
+static int __devinit pmu_device_probe(struct platform_device *pdev)
+{
+
+ if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
+ pr_warning("received registration request for unknown "
+ "device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ if (pmu_devices[pdev->id])
+ pr_warning("registering new PMU device type %d overwrites "
+ "previous registration!\n", pdev->id);
+ else
+ pr_info("registered new PMU device of type %d\n",
+ pdev->id);
-static const struct pmu_irqs pmu_irqs = {
- .irqs = irqs,
- .num_irqs = ARRAY_SIZE(irqs),
+ pmu_devices[pdev->id] = pdev;
+ return 0;
+}
+
+static struct platform_driver pmu_driver = {
+ .driver = {
+ .name = "arm-pmu",
+ },
+ .probe = pmu_device_probe,
};
-static volatile long pmu_lock;
+static int __init register_pmu_driver(void)
+{
+ return platform_driver_register(&pmu_driver);
+}
+device_initcall(register_pmu_driver);
-const struct pmu_irqs *
-reserve_pmu(void)
+struct platform_device *
+reserve_pmu(enum arm_pmu_type device)
{
- return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) :
- &pmu_irqs;
+ struct platform_device *pdev;
+
+ if (test_and_set_bit_lock(device, &pmu_lock)) {
+ pdev = ERR_PTR(-EBUSY);
+ } else if (pmu_devices[device] == NULL) {
+ clear_bit_unlock(device, &pmu_lock);
+ pdev = ERR_PTR(-ENODEV);
+ } else {
+ pdev = pmu_devices[device];
+ }
+
+ return pdev;
}
EXPORT_SYMBOL_GPL(reserve_pmu);
int
-release_pmu(const struct pmu_irqs *irqs)
+release_pmu(struct platform_device *pdev)
{
- if (WARN_ON(irqs != &pmu_irqs))
+ if (WARN_ON(pdev != pmu_devices[pdev->id]))
return -EINVAL;
- clear_bit_unlock(0, &pmu_lock);
+ clear_bit_unlock(pdev->id, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
@@ -87,17 +101,42 @@ set_irq_affinity(int irq,
#endif
}
-int
-init_pmu(void)
+static int
+init_cpu_pmu(void)
{
int i, err = 0;
+ struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
+
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
- for (i = 0; i < pmu_irqs.num_irqs; ++i) {
- err = set_irq_affinity(pmu_irqs.irqs[i], i);
+ for (i = 0; i < pdev->num_resources; ++i) {
+ err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
+out:
+ return err;
+}
+
+int
+init_pmu(enum arm_pmu_type device)
+{
+ int err = 0;
+
+ switch (device) {
+ case ARM_PMU_DEVICE_CPU:
+ err = init_cpu_pmu();
+ break;
+ default:
+ pr_warning("attempt to initialise unknown device %d\n",
+ device);
+ err = -EINVAL;
+ }
+
return err;
}
EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c91c77b54dea..122d999bdc7c 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -593,6 +593,7 @@ static int __init parse_tag_revision(const struct tag *tag)
__tagtable(ATAG_REVISION, parse_tag_revision);
+#ifndef CONFIG_CMDLINE_FORCE
static int __init parse_tag_cmdline(const struct tag *tag)
{
strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
@@ -600,6 +601,7 @@ static int __init parse_tag_cmdline(const struct tag *tag)
}
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
+#endif /* CONFIG_CMDLINE_FORCE */
/*
* Scan the tag table for this tag, and call its parse function.
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index a01194e583ff..b8c3d0f689d9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -168,7 +168,7 @@ int __cpu_disable(void)
struct task_struct *p;
int ret;
- ret = mach_cpu_disable(cpu);
+ ret = platform_cpu_disable(cpu);
if (ret)
return ret;
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index ea02a7b1c244..7c5f0c024db7 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -21,23 +21,6 @@
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
-#define TWD_TIMER_LOAD 0x00
-#define TWD_TIMER_COUNTER 0x04
-#define TWD_TIMER_CONTROL 0x08
-#define TWD_TIMER_INTSTAT 0x0C
-
-#define TWD_WDOG_LOAD 0x20
-#define TWD_WDOG_COUNTER 0x24
-#define TWD_WDOG_CONTROL 0x28
-#define TWD_WDOG_INTSTAT 0x2C
-#define TWD_WDOG_RESETSTAT 0x30
-#define TWD_WDOG_DISABLE 0x34
-
-#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
-#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
-#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
-#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
-
/* set up by the platform code */
void __iomem *twd_base;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 28753805d2d1..38c261f9951c 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -72,12 +72,15 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-#ifndef CONFIG_GENERIC_TIME
-static unsigned long dummy_gettimeoffset(void)
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+u32 arch_gettimeoffset(void)
{
+ if (system_timer->offset != NULL)
+ return system_timer->offset() * 1000;
+
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
#ifdef CONFIG_LEDS_TIMER
static inline void do_leds(void)
@@ -93,63 +96,6 @@ static inline void do_leds(void)
#define do_leds()
#endif
-#ifndef CONFIG_GENERIC_TIME
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
- unsigned long seq;
- unsigned long usec, sec;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
- usec = system_timer->offset();
- sec = xtime.tv_sec;
- usec += xtime.tv_nsec / 1000;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- /* usec may have gone up a lot: be safe */
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * done, and then undo it!
- */
- nsec -= system_timer->offset() * NSEC_PER_USEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-#endif /* !CONFIG_GENERIC_TIME */
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
@@ -214,10 +160,6 @@ device_initcall(timer_init_sysfs);
void __init time_init(void)
{
-#ifndef CONFIG_GENERIC_TIME
- if (system_timer->offset == NULL)
- system_timer->offset = dummy_gettimeoffset;
-#endif
system_timer->init();
}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 50292cd9c120..dd81a918c106 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -26,6 +26,7 @@
* http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
*/
+#ifndef __CHECKER__
#if !defined (__ARM_EABI__)
#warning Your compiler does not have EABI support.
#warning ARM unwind is known to compile only with EABI compilers.
@@ -34,6 +35,7 @@
#warning Your compiler is too buggy; it is known to not compile ARM unwind support.
#warning Change compiler or disable ARM_UNWIND option.
#endif
+#endif /* __CHECKER__ */
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 5e3f99620c04..14a0d988c82c 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -45,6 +45,7 @@ USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
ENDPROC(__clear_user)
+ENDPROC(__clear_user_std)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 027b69bdbad1..d066df686e17 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -93,6 +93,7 @@ WEAK(__copy_to_user)
#include "copy_template.S"
ENDPROC(__copy_to_user)
+ENDPROC(__copy_to_user_std)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 2db43a5ddd9b..841eaf8f27e2 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -23,14 +23,12 @@ choice
config ARCH_AT91RM9200
bool "AT91RM9200"
select CPU_ARM920T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_AT91_USART3
config ARCH_AT91SAM9260
bool "AT91SAM9260 or AT91SAM9XE"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_AT91_USART3
select HAVE_AT91_USART4
@@ -39,28 +37,24 @@ config ARCH_AT91SAM9260
config ARCH_AT91SAM9261
bool "AT91SAM9261"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
config ARCH_AT91SAM9G10
bool "AT91SAM9G10"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
config ARCH_AT91SAM9263
bool "AT91SAM9263"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
config ARCH_AT91SAM9RL
bool "AT91SAM9RL"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_AT91_USART3
select HAVE_FB_ATMEL
@@ -68,7 +62,6 @@ config ARCH_AT91SAM9RL
config ARCH_AT91SAM9G20
bool "AT91SAM9G20"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_AT91_USART3
select HAVE_AT91_USART4
@@ -77,7 +70,6 @@ config ARCH_AT91SAM9G20
config ARCH_AT91SAM9G45
bool "AT91SAM9G45"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_AT91_USART3
select HAVE_FB_ATMEL
@@ -85,18 +77,17 @@ config ARCH_AT91SAM9G45
config ARCH_AT91CAP9
bool "AT91CAP9"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
config ARCH_AT572D940HF
bool "AT572D940HF"
select CPU_ARM926T
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
config ARCH_AT91X40
bool "AT91x40"
+ select ARCH_USES_GETTIMEOFFSET
endchoice
@@ -360,6 +351,19 @@ config MACH_CPU9G20
Select this if you are using a Eukrea Electromatique's
CPU9G20 Board <http://www.eukrea.com/>
+config MACH_PORTUXG20
+ bool "taskit PortuxG20"
+ help
+ Select this if you are using taskit's PortuxG20.
+ <http://www.taskit.de/en/>
+
+config MACH_STAMP9G20
+ bool "taskit Stamp9G20 CPU module"
+ help
+ Select this if you are using taskit's Stamp9G20 CPU module on its
+ evaluation board.
+ <http://www.taskit.de/en/>
+
endif
# ----------------------------------------------------------
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index d4004557532a..c1f821e58222 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -63,6 +63,8 @@ obj-$(CONFIG_MACH_AT91SAM9RLEK) += board-sam9rlek.o
obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
obj-$(CONFIG_MACH_AT91SAM9G20EK_2MMC) += board-sam9g20ek-2slot-mmc.o
obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o
+obj-$(CONFIG_MACH_STAMP9G20) += board-stamp9g20.o
+obj-$(CONFIG_MACH_PORTUXG20) += board-stamp9g20.o
# AT91SAM9G45 board-specific support
obj-$(CONFIG_MACH_AT91SAM9G45EKES) += board-sam9m10g45ek.o
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 98f9f4bc9396..ee800595594d 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -25,7 +25,6 @@
#include <linux/leds.h>
#include <linux/clk.h>
-#include <mach/hardware.h>
#include <video/atmel_lcdc.h>
#include <asm/setup.h>
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
new file mode 100644
index 000000000000..87958274290f
--- /dev/null
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2010 Christian Glindkamp <christian.glindkamp@taskit.de>
+ * taskit GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/w1-gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/board.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+
+static void __init portuxg20_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DGBU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
+ | ATMEL_UART_DTR | ATMEL_UART_DSR
+ | ATMEL_UART_DCD | ATMEL_UART_RI);
+
+ /* USART1 on ttyS2. (Rx, Tx, CTS, RTS) */
+ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* USART2 on ttyS3. (Rx, Tx, CTS, RTS) */
+ at91_register_uart(AT91SAM9260_ID_US2, 3, ATMEL_UART_CTS | ATMEL_UART_RTS);
+
+ /* USART4 on ttyS5. (Rx, Tx only) */
+ at91_register_uart(AT91SAM9260_ID_US4, 5, 0);
+
+ /* USART5 on ttyS6. (Rx, Tx only) */
+ at91_register_uart(AT91SAM9260_ID_US5, 6, 0);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+static void __init stamp9g20_map_io(void)
+{
+ /* Initialize processor: 18.432 MHz crystal */
+ at91sam9260_initialize(18432000);
+
+ /* DGBU on ttyS0. (Rx & Tx only) */
+ at91_register_uart(0, 0, 0);
+
+ /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
+ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
+ | ATMEL_UART_DTR | ATMEL_UART_DSR
+ | ATMEL_UART_DCD | ATMEL_UART_RI);
+
+ /* set serial console to ttyS0 (ie, DBGU) */
+ at91_set_serial_console(0);
+}
+
+static void __init init_irq(void)
+{
+ at91sam9260_init_interrupts(NULL);
+}
+
+
+/*
+ * NAND flash
+ */
+static struct atmel_nand_data __initdata nand_data = {
+ .ale = 21,
+ .cle = 22,
+ .rdy_pin = AT91_PIN_PC13,
+ .enable_pin = AT91_PIN_PC14,
+ .bus_width_16 = 0,
+};
+
+static struct sam9_smc_config __initdata nand_smc_config = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 2,
+ .ncs_write_setup = 0,
+ .nwe_setup = 2,
+
+ .ncs_read_pulse = 4,
+ .nrd_pulse = 4,
+ .ncs_write_pulse = 4,
+ .nwe_pulse = 4,
+
+ .read_cycle = 7,
+ .write_cycle = 7,
+
+ .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
+ .tdf_cycles = 3,
+};
+
+static void __init add_device_nand(void)
+{
+ /* configure chip-select 3 (NAND) */
+ sam9_smc_configure(3, &nand_smc_config);
+
+ at91_add_device_nand(&nand_data);
+}
+
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+static struct mci_platform_data __initdata mmc_data = {
+ .slot[0] = {
+ .bus_width = 4,
+ },
+};
+#else
+static struct at91_mmc_data __initdata mmc_data = {
+ .slot_b = 0,
+ .wire4 = 1,
+};
+#endif
+
+
+/*
+ * USB Host port
+ */
+static struct at91_usbh_data __initdata usbh_data = {
+ .ports = 2,
+};
+
+
+/*
+ * USB Device port
+ */
+static struct at91_udc_data __initdata portuxg20_udc_data = {
+ .vbus_pin = AT91_PIN_PC7,
+ .pullup_pin = 0, /* pull-up driven by UDC */
+};
+
+static struct at91_udc_data __initdata stamp9g20_udc_data = {
+ .vbus_pin = AT91_PIN_PA22,
+ .pullup_pin = 0, /* pull-up driven by UDC */
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata macb_data = {
+ .phy_irq_pin = AT91_PIN_PA28,
+ .is_rmii = 1,
+};
+
+
+/*
+ * LEDs
+ */
+static struct gpio_led portuxg20_leds[] = {
+ {
+ .name = "LED2",
+ .gpio = AT91_PIN_PC5,
+ .default_trigger = "none",
+ }, {
+ .name = "LED3",
+ .gpio = AT91_PIN_PC4,
+ .default_trigger = "none",
+ }, {
+ .name = "LED4",
+ .gpio = AT91_PIN_PC10,
+ .default_trigger = "heartbeat",
+ }
+};
+
+static struct gpio_led stamp9g20_leds[] = {
+ {
+ .name = "D8",
+ .gpio = AT91_PIN_PB18,
+ .active_low = 1,
+ .default_trigger = "none",
+ }, {
+ .name = "D9",
+ .gpio = AT91_PIN_PB19,
+ .active_low = 1,
+ .default_trigger = "none",
+ }, {
+ .name = "D10",
+ .gpio = AT91_PIN_PB20,
+ .active_low = 1,
+ .default_trigger = "heartbeat",
+ }
+};
+
+
+/*
+ * SPI devices
+ */
+static struct spi_board_info portuxg20_spi_devices[] = {
+ {
+ .modalias = "spidev",
+ .chip_select = 0,
+ .max_speed_hz = 1 * 1000 * 1000,
+ .bus_num = 0,
+ }, {
+ .modalias = "spidev",
+ .chip_select = 0,
+ .max_speed_hz = 1 * 1000 * 1000,
+ .bus_num = 1,
+ },
+};
+
+
+/*
+ * Dallas 1-Wire
+ */
+static struct w1_gpio_platform_data w1_gpio_pdata = {
+ .pin = AT91_PIN_PA29,
+ .is_open_drain = 1,
+};
+
+static struct platform_device w1_device = {
+ .name = "w1-gpio",
+ .id = -1,
+ .dev.platform_data = &w1_gpio_pdata,
+};
+
+void add_w1(void)
+{
+ at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
+ at91_set_multi_drive(w1_gpio_pdata.pin, 1);
+ platform_device_register(&w1_device);
+}
+
+
+static void __init generic_board_init(void)
+{
+ /* Serial */
+ at91_add_device_serial();
+ /* NAND */
+ add_device_nand();
+ /* MMC */
+#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
+ at91_add_device_mci(0, &mmc_data);
+#else
+ at91_add_device_mmc(0, &mmc_data);
+#endif
+ /* USB Host */
+ at91_add_device_usbh(&usbh_data);
+ /* Ethernet */
+ at91_add_device_eth(&macb_data);
+ /* I2C */
+ at91_add_device_i2c(NULL, 0);
+ /* W1 */
+ add_w1();
+}
+
+static void __init portuxg20_board_init(void)
+{
+ generic_board_init();
+ /* SPI */
+ at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices));
+ /* USB Device */
+ at91_add_device_udc(&portuxg20_udc_data);
+ /* LEDs */
+ at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds));
+}
+
+static void __init stamp9g20_board_init(void)
+{
+ generic_board_init();
+ /* USB Device */
+ at91_add_device_udc(&stamp9g20_udc_data);
+ /* LEDs */
+ at91_gpio_leds(stamp9g20_leds, ARRAY_SIZE(stamp9g20_leds));
+}
+
+MACHINE_START(PORTUXG20, "taskit PortuxG20")
+ /* Maintainer: taskit GmbH */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = portuxg20_map_io,
+ .init_irq = init_irq,
+ .init_machine = portuxg20_board_init,
+MACHINE_END
+
+MACHINE_START(STAMP9G20, "taskit Stamp9G20")
+ /* Maintainer: taskit GmbH */
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91sam926x_timer,
+ .map_io = stamp9g20_map_io,
+ .init_irq = init_irq,
+ .init_machine = stamp9g20_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index ceaec6c16eb2..df2ed848c9f8 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -39,6 +39,7 @@
#include <linux/usb/atmel_usba_udc.h>
#include <linux/atmel-mci.h>
#include <sound/atmel-ac97c.h>
+#include <linux/serial.h>
/* USB Device */
struct at91_udc_data {
@@ -143,9 +144,10 @@ extern struct platform_device *atmel_default_console_device;
extern void __init __deprecated at91_init_serial(struct at91_uart_config *config);
struct atmel_uart_data {
- short use_dma_tx; /* use transmit DMA? */
- short use_dma_rx; /* use receive DMA? */
- void __iomem *regs; /* virtual base address, if any */
+ short use_dma_tx; /* use transmit DMA? */
+ short use_dma_rx; /* use receive DMA? */
+ void __iomem *regs; /* virt. base address, if any */
+ struct serial_rs485 rs485; /* rs485 settings */
};
extern void __init at91_add_device_serial(void);
diff --git a/arch/arm/mach-at91/include/mach/cpu.h b/arch/arm/mach-at91/include/mach/cpu.h
index 5a0650101d45..833659d1200a 100644
--- a/arch/arm/mach-at91/include/mach/cpu.h
+++ b/arch/arm/mach-at91/include/mach/cpu.h
@@ -21,7 +21,7 @@
#define ARCH_ID_AT91SAM9260 0x019803a0
#define ARCH_ID_AT91SAM9261 0x019703a0
#define ARCH_ID_AT91SAM9263 0x019607a0
-#define ARCH_ID_AT91SAM9G10 0x819903a0
+#define ARCH_ID_AT91SAM9G10 0x019903a0
#define ARCH_ID_AT91SAM9G20 0x019905a0
#define ARCH_ID_AT91SAM9RL64 0x019b03a0
#define ARCH_ID_AT91SAM9G45 0x819b05a0
@@ -108,7 +108,7 @@ static inline unsigned long at91cap9_rev_identify(void)
#endif
#ifdef CONFIG_ARCH_AT91SAM9G10
-#define cpu_is_at91sam9g10() (at91_cpu_identify() == ARCH_ID_AT91SAM9G10)
+#define cpu_is_at91sam9g10() ((at91_cpu_identify() & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10)
#else
#define cpu_is_at91sam9g10() (0)
#endif
diff --git a/arch/arm/mach-at91/include/mach/system.h b/arch/arm/mach-at91/include/mach/system.h
index 5268af3933c2..c80e090b3670 100644
--- a/arch/arm/mach-at91/include/mach/system.h
+++ b/arch/arm/mach-at91/include/mach/system.h
@@ -24,21 +24,24 @@
#include <mach/hardware.h>
#include <mach/at91_st.h>
#include <mach/at91_dbgu.h>
+#include <mach/at91_pmc.h>
static inline void arch_idle(void)
{
+#ifndef CONFIG_DEBUG_KERNEL
/*
* Disable the processor clock. The processor will be automatically
* re-enabled by an interrupt or by a reset.
*/
-// at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-
+ at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+#else
/*
* Set the processor (CP15) into 'Wait for Interrupt' mode.
* Unlike disabling the processor clock via the PMC (above)
* this allows the processor to be woken via JTAG.
*/
cpu_do_idle();
+#endif
}
void (*at91_arch_reset)(void);
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 53dd2a9eecf9..2f139196d63d 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -29,6 +29,7 @@
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/time.h>
+#include <asm/pmu.h>
#include <asm/mach/arch.h>
#include <mach/dma.h>
@@ -85,8 +86,23 @@ static struct platform_device nand_device = {
.num_resources = ARRAY_SIZE(nand_resource),
};
+static struct resource pmu_resource = {
+ .start = IRQ_PMUIRQ,
+ .end = IRQ_PMUIRQ,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .resource = &pmu_resource,
+ .num_resources = 1,
+};
+
+
static struct platform_device *devices[] __initdata = {
&nand_device,
+ &pmu_device,
};
/****************************************************************************
diff --git a/arch/arm/mach-clps711x/Makefile.boot b/arch/arm/mach-clps711x/Makefile.boot
index d3d29339e149..a51fcef64fe0 100644
--- a/arch/arm/mach-clps711x/Makefile.boot
+++ b/arch/arm/mach-clps711x/Makefile.boot
@@ -1,7 +1,6 @@
# The standard locations for stuff on CLPS711x type processors
- zreladdr-y := 0xc0028000
+ zreladdr-y := 0xc0028000
params_phys-y := 0xc0000100
# Should probably have some agreement on these...
initrd_phys-$(CONFIG_ARCH_P720T) := 0xc0400000
initrd_phys-$(CONFIG_ARCH_CDB89712) := 0x00700000
-
diff --git a/arch/arm/mach-clps711x/mm.c b/arch/arm/mach-clps711x/mm.c
index a7b4591205a3..986592176767 100644
--- a/arch/arm/mach-clps711x/mm.c
+++ b/arch/arm/mach-clps711x/mm.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-cns3xxx/Kconfig b/arch/arm/mach-cns3xxx/Kconfig
new file mode 100644
index 000000000000..9ebfcc46feb1
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/Kconfig
@@ -0,0 +1,12 @@
+menu "CNS3XXX platform type"
+ depends on ARCH_CNS3XXX
+
+config MACH_CNS3420VB
+ bool "Support for CNS3420 Validation Board"
+ help
+ Include support for the Cavium Networks CNS3420 MPCore Platform
+ Baseboard.
+ This is a platform with an on-board ARM11 MPCore and has support
+ for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc.
+
+endmenu
diff --git a/arch/arm/mach-cns3xxx/Makefile b/arch/arm/mach-cns3xxx/Makefile
new file mode 100644
index 000000000000..427507a2d696
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ARCH_CNS3XXX) += core.o pm.o
+obj-$(CONFIG_MACH_CNS3420VB) += cns3420vb.o
diff --git a/arch/arm/mach-cns3xxx/Makefile.boot b/arch/arm/mach-cns3xxx/Makefile.boot
new file mode 100644
index 000000000000..777012865220
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/Makefile.boot
@@ -0,0 +1,3 @@
+ zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00C00000
diff --git a/arch/arm/mach-cns3xxx/cns3420vb.c b/arch/arm/mach-cns3xxx/cns3420vb.c
new file mode 100644
index 000000000000..2e30c8288740
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/cns3420vb.c
@@ -0,0 +1,148 @@
+/*
+ * Cavium Networks CNS3420 Validation Board
+ *
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2008 ARM Limited
+ * Copyright 2008 Cavium Networks
+ * Scott Shu
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@mvista.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/partitions.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <mach/hardware.h>
+#include <mach/cns3xxx.h>
+#include <mach/irqs.h>
+#include "core.h"
+
+/*
+ * NOR Flash
+ */
+static struct mtd_partition cns3420_nor_partitions[] = {
+ {
+ .name = "uboot",
+ .size = 0x00040000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE,
+ }, {
+ .name = "kernel",
+ .size = 0x004C0000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "filesystem",
+ .size = 0x7000000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "filesystem2",
+ .size = 0x0AE0000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "ubootenv",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ },
+};
+
+static struct physmap_flash_data cns3420_nor_pdata = {
+ .width = 2,
+ .parts = cns3420_nor_partitions,
+ .nr_parts = ARRAY_SIZE(cns3420_nor_partitions),
+};
+
+static struct resource cns3420_nor_res = {
+ .start = CNS3XXX_FLASH_BASE,
+ .end = CNS3XXX_FLASH_BASE + SZ_128M - 1,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+};
+
+static struct platform_device cns3420_nor_pdev = {
+ .name = "physmap-flash",
+ .id = 0,
+ .resource = &cns3420_nor_res,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = &cns3420_nor_pdata,
+ },
+};
+
+/*
+ * UART
+ */
+static void __init cns3420_early_serial_setup(void)
+{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ static struct uart_port cns3420_serial_port = {
+ .membase = (void __iomem *)CNS3XXX_UART0_BASE_VIRT,
+ .mapbase = CNS3XXX_UART0_BASE,
+ .irq = IRQ_CNS3XXX_UART0,
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
+ .regshift = 2,
+ .uartclk = 24000000,
+ .line = 0,
+ .type = PORT_16550A,
+ .fifosize = 16,
+ };
+
+ early_serial_setup(&cns3420_serial_port);
+#endif
+}
+
+/*
+ * Initialization
+ */
+static struct platform_device *cns3420_pdevs[] __initdata = {
+ &cns3420_nor_pdev,
+};
+
+static void __init cns3420_init(void)
+{
+ platform_add_devices(cns3420_pdevs, ARRAY_SIZE(cns3420_pdevs));
+
+ pm_power_off = cns3xxx_power_off;
+}
+
+static struct map_desc cns3420_io_desc[] __initdata = {
+ {
+ .virtual = CNS3XXX_UART0_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_UART0_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init cns3420_map_io(void)
+{
+ cns3xxx_map_io();
+ iotable_init(cns3420_io_desc, ARRAY_SIZE(cns3420_io_desc));
+
+ cns3420_early_serial_setup();
+}
+
+MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board")
+ .phys_io = CNS3XXX_UART0_BASE,
+ .io_pg_offst = (CNS3XXX_UART0_BASE_VIRT >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .map_io = cns3420_map_io,
+ .init_irq = cns3xxx_init_irq,
+ .timer = &cns3xxx_timer,
+ .init_machine = cns3420_init,
+MACHINE_END
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
new file mode 100644
index 000000000000..9ca4d581016f
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 1999 - 2003 ARM Limited
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/io.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/mach/irq.h>
+#include <asm/hardware/gic.h>
+#include <mach/cns3xxx.h>
+#include "core.h"
+
+static struct map_desc cns3xxx_io_desc[] __initdata = {
+ {
+ .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TIMER1_2_3_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_GPIOA_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_GPIOA_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_GPIOB_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_GPIOB_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_MISC_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_MISC_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_PM_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_PM_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init cns3xxx_map_io(void)
+{
+ iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc));
+}
+
+/* used by entry-macro.S */
+void __iomem *gic_cpu_base_addr;
+
+void __init cns3xxx_init_irq(void)
+{
+ gic_cpu_base_addr = __io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT);
+ gic_dist_init(0, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT), 29);
+ gic_cpu_init(0, gic_cpu_base_addr);
+}
+
+void cns3xxx_power_off(void)
+{
+ u32 __iomem *pm_base = __io(CNS3XXX_PM_BASE_VIRT);
+ u32 clkctrl;
+
+ printk(KERN_INFO "powering system down...\n");
+
+ clkctrl = readl(pm_base + PM_SYS_CLK_CTRL_OFFSET);
+ clkctrl &= 0xfffff1ff;
+ clkctrl |= (0x5 << 9); /* Hibernate */
+ writel(clkctrl, pm_base + PM_SYS_CLK_CTRL_OFFSET);
+
+}
+
+/*
+ * Timer
+ */
+static void __iomem *cns3xxx_tmr1;
+
+static void cns3xxx_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ int pclk = cns3xxx_cpu_clock() / 8;
+ int reload;
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ reload = pclk * 20 / (3 * HZ) * 0x25000;
+ writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
+ ctrl |= (1 << 0) | (1 << 2) | (1 << 9);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* period set, and timer enabled in 'next_event' hook */
+ ctrl |= (1 << 2) | (1 << 9);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ ctrl = 0;
+ }
+
+ writel(ctrl, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+}
+
+static int cns3xxx_timer_set_next_event(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ writel(evt, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
+ writel(ctrl | (1 << 0), cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ return 0;
+}
+
+static struct clock_event_device cns3xxx_tmr1_clockevent = {
+ .name = "cns3xxx timer1",
+ .shift = 8,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = cns3xxx_timer_set_mode,
+ .set_next_event = cns3xxx_timer_set_next_event,
+ .rating = 350,
+ .cpumask = cpu_all_mask,
+};
+
+static void __init cns3xxx_clockevents_init(unsigned int timer_irq)
+{
+ cns3xxx_tmr1_clockevent.irq = timer_irq;
+ cns3xxx_tmr1_clockevent.mult =
+ div_sc((cns3xxx_cpu_clock() >> 3) * 1000000, NSEC_PER_SEC,
+ cns3xxx_tmr1_clockevent.shift);
+ cns3xxx_tmr1_clockevent.max_delta_ns =
+ clockevent_delta2ns(0xffffffff, &cns3xxx_tmr1_clockevent);
+ cns3xxx_tmr1_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xf, &cns3xxx_tmr1_clockevent);
+
+ clockevents_register_device(&cns3xxx_tmr1_clockevent);
+}
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &cns3xxx_tmr1_clockevent;
+ u32 __iomem *stat = cns3xxx_tmr1 + TIMER1_2_INTERRUPT_STATUS_OFFSET;
+ u32 val;
+
+ /* Clear the interrupt */
+ val = readl(stat);
+ writel(val & ~(1 << 2), stat);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction cns3xxx_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = cns3xxx_timer_interrupt,
+};
+
+/*
+ * Set up the clock source and clock events devices
+ */
+static void __init __cns3xxx_timer_init(unsigned int timer_irq)
+{
+ u32 val;
+ u32 irq_mask;
+
+ /*
+ * Initialise to a known state (all timers off)
+ */
+
+ /* disable timer1 and timer2 */
+ writel(0, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ /* stop free running timer3 */
+ writel(0, cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET);
+
+ /* timer1 */
+ writel(0x5C800, cns3xxx_tmr1 + TIMER1_COUNTER_OFFSET);
+ writel(0x5C800, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
+
+ writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V1_OFFSET);
+ writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V2_OFFSET);
+
+ /* mask irq, non-mask timer1 overflow */
+ irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+ irq_mask &= ~(1 << 2);
+ irq_mask |= 0x03;
+ writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+
+ /* down counter */
+ val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ val |= (1 << 9);
+ writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ /* timer2 */
+ writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V1_OFFSET);
+ writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V2_OFFSET);
+
+ /* mask irq */
+ irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+ irq_mask |= ((1 << 3) | (1 << 4) | (1 << 5));
+ writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
+
+ /* down counter */
+ val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+ val |= (1 << 10);
+ writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+ /* Make irqs happen for the system timer */
+ setup_irq(timer_irq, &cns3xxx_timer_irq);
+
+ cns3xxx_clockevents_init(timer_irq);
+}
+
+static void __init cns3xxx_timer_init(void)
+{
+ cns3xxx_tmr1 = __io(CNS3XXX_TIMER1_2_3_BASE_VIRT);
+
+ __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0);
+}
+
+struct sys_timer cns3xxx_timer = {
+ .init = cns3xxx_timer_init,
+};
diff --git a/arch/arm/mach-cns3xxx/core.h b/arch/arm/mach-cns3xxx/core.h
new file mode 100644
index 000000000000..6b33ec11346e
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/core.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2004 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CNS3XXX_CORE_H
+#define __CNS3XXX_CORE_H
+
+extern void __iomem *gic_cpu_base_addr;
+extern struct sys_timer cns3xxx_timer;
+
+void __init cns3xxx_map_io(void);
+void __init cns3xxx_init_irq(void);
+void cns3xxx_power_off(void);
+void cns3xxx_pwr_power_up(unsigned int block);
+void cns3xxx_pwr_power_down(unsigned int block);
+
+#endif /* __CNS3XXX_CORE_H */
diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
new file mode 100644
index 000000000000..8a2f5a21d4ee
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_BOARD_CNS3XXXH
+#define __MACH_BOARD_CNS3XXXH
+
+/*
+ * Memory map
+ */
+#define CNS3XXX_FLASH_BASE 0x10000000 /* Flash/SRAM Memory Bank 0 */
+#define CNS3XXX_FLASH_SIZE SZ_256M
+
+#define CNS3XXX_DDR2SDRAM_BASE 0x20000000 /* DDR2 SDRAM Memory */
+
+#define CNS3XXX_SPI_FLASH_BASE 0x60000000 /* SPI Serial Flash Memory */
+
+#define CNS3XXX_SWITCH_BASE 0x70000000 /* Switch and HNAT Control */
+#define CNS3XXX_SWITCH_BASE_VIRT 0xFFF00000
+
+#define CNS3XXX_PPE_BASE 0x70001000 /* HANT */
+#define CNS3XXX_PPE_BASE_VIRT 0xFFF50000
+
+#define CNS3XXX_EMBEDDED_SRAM_BASE 0x70002000 /* HANT Embedded SRAM */
+#define CNS3XXX_EMBEDDED_SRAM_BASE_VIRT 0xFFF60000
+
+#define CNS3XXX_SSP_BASE 0x71000000 /* Synchronous Serial Port - SPI/PCM/I2C */
+#define CNS3XXX_SSP_BASE_VIRT 0xFFF01000
+
+#define CNS3XXX_DMC_BASE 0x72000000 /* DMC Control (DDR2 SDRAM) */
+#define CNS3XXX_DMC_BASE_VIRT 0xFFF02000
+
+#define CNS3XXX_SMC_BASE 0x73000000 /* SMC Control */
+#define CNS3XXX_SMC_BASE_VIRT 0xFFF03000
+
+#define SMC_MEMC_STATUS_OFFSET 0x000
+#define SMC_MEMIF_CFG_OFFSET 0x004
+#define SMC_MEMC_CFG_SET_OFFSET 0x008
+#define SMC_MEMC_CFG_CLR_OFFSET 0x00C
+#define SMC_DIRECT_CMD_OFFSET 0x010
+#define SMC_SET_CYCLES_OFFSET 0x014
+#define SMC_SET_OPMODE_OFFSET 0x018
+#define SMC_REFRESH_PERIOD_0_OFFSET 0x020
+#define SMC_REFRESH_PERIOD_1_OFFSET 0x024
+#define SMC_SRAM_CYCLES0_0_OFFSET 0x100
+#define SMC_NAND_CYCLES0_0_OFFSET 0x100
+#define SMC_OPMODE0_0_OFFSET 0x104
+#define SMC_SRAM_CYCLES0_1_OFFSET 0x120
+#define SMC_NAND_CYCLES0_1_OFFSET 0x120
+#define SMC_OPMODE0_1_OFFSET 0x124
+#define SMC_USER_STATUS_OFFSET 0x200
+#define SMC_USER_CONFIG_OFFSET 0x204
+#define SMC_ECC_STATUS_OFFSET 0x300
+#define SMC_ECC_MEMCFG_OFFSET 0x304
+#define SMC_ECC_MEMCOMMAND1_OFFSET 0x308
+#define SMC_ECC_MEMCOMMAND2_OFFSET 0x30C
+#define SMC_ECC_ADDR0_OFFSET 0x310
+#define SMC_ECC_ADDR1_OFFSET 0x314
+#define SMC_ECC_VALUE0_OFFSET 0x318
+#define SMC_ECC_VALUE1_OFFSET 0x31C
+#define SMC_ECC_VALUE2_OFFSET 0x320
+#define SMC_ECC_VALUE3_OFFSET 0x324
+#define SMC_PERIPH_ID_0_OFFSET 0xFE0
+#define SMC_PERIPH_ID_1_OFFSET 0xFE4
+#define SMC_PERIPH_ID_2_OFFSET 0xFE8
+#define SMC_PERIPH_ID_3_OFFSET 0xFEC
+#define SMC_PCELL_ID_0_OFFSET 0xFF0
+#define SMC_PCELL_ID_1_OFFSET 0xFF4
+#define SMC_PCELL_ID_2_OFFSET 0xFF8
+#define SMC_PCELL_ID_3_OFFSET 0xFFC
+
+#define CNS3XXX_GPIOA_BASE 0x74000000 /* GPIO port A */
+#define CNS3XXX_GPIOA_BASE_VIRT 0xFFF04000
+
+#define CNS3XXX_GPIOB_BASE 0x74800000 /* GPIO port B */
+#define CNS3XXX_GPIOB_BASE_VIRT 0xFFF05000
+
+#define CNS3XXX_RTC_BASE 0x75000000 /* Real Time Clock */
+#define CNS3XXX_RTC_BASE_VIRT 0xFFF06000
+
+#define RTC_SEC_OFFSET 0x00
+#define RTC_MIN_OFFSET 0x04
+#define RTC_HOUR_OFFSET 0x08
+#define RTC_DAY_OFFSET 0x0C
+#define RTC_SEC_ALM_OFFSET 0x10
+#define RTC_MIN_ALM_OFFSET 0x14
+#define RTC_HOUR_ALM_OFFSET 0x18
+#define RTC_REC_OFFSET 0x1C
+#define RTC_CTRL_OFFSET 0x20
+#define RTC_INTR_STS_OFFSET 0x34
+
+#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
+#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */
+
+#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
+#define CNS3XXX_PM_BASE_VIRT 0xFFF08000
+
+#define PM_CLK_GATE_OFFSET 0x00
+#define PM_SOFT_RST_OFFSET 0x04
+#define PM_HS_CFG_OFFSET 0x08
+#define PM_CACTIVE_STA_OFFSET 0x0C
+#define PM_PWR_STA_OFFSET 0x10
+#define PM_SYS_CLK_CTRL_OFFSET 0x14
+#define PM_PLL_LCD_I2S_CTRL_OFFSET 0x18
+#define PM_PLL_HM_PD_OFFSET 0x1C
+
+#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
+#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000
+
+#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
+#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
+
+#define CNS3XXX_UART2_BASE 0x78800000 /* UART 2 */
+#define CNS3XXX_UART2_BASE_VIRT 0xFFF0B000
+
+#define CNS3XXX_DMAC_BASE 0x79000000 /* Generic DMA Control */
+#define CNS3XXX_DMAC_BASE_VIRT 0xFFF0D000
+
+#define CNS3XXX_CORESIGHT_BASE 0x7A000000 /* CoreSight */
+#define CNS3XXX_CORESIGHT_BASE_VIRT 0xFFF0E000
+
+#define CNS3XXX_CRYPTO_BASE 0x7B000000 /* Crypto */
+#define CNS3XXX_CRYPTO_BASE_VIRT 0xFFF0F000
+
+#define CNS3XXX_I2S_BASE 0x7C000000 /* I2S */
+#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
+
+#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
+#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800
+
+#define TIMER1_COUNTER_OFFSET 0x00
+#define TIMER1_AUTO_RELOAD_OFFSET 0x04
+#define TIMER1_MATCH_V1_OFFSET 0x08
+#define TIMER1_MATCH_V2_OFFSET 0x0C
+
+#define TIMER2_COUNTER_OFFSET 0x10
+#define TIMER2_AUTO_RELOAD_OFFSET 0x14
+#define TIMER2_MATCH_V1_OFFSET 0x18
+#define TIMER2_MATCH_V2_OFFSET 0x1C
+
+#define TIMER1_2_CONTROL_OFFSET 0x30
+#define TIMER1_2_INTERRUPT_STATUS_OFFSET 0x34
+#define TIMER1_2_INTERRUPT_MASK_OFFSET 0x38
+
+#define TIMER_FREERUN_OFFSET 0x40
+#define TIMER_FREERUN_CONTROL_OFFSET 0x44
+
+#define CNS3XXX_HCIE_BASE 0x7D000000 /* HCIE Control */
+#define CNS3XXX_HCIE_BASE_VIRT 0xFFF30000
+
+#define CNS3XXX_RAID_BASE 0x7E000000 /* RAID Control */
+#define CNS3XXX_RAID_BASE_VIRT 0xFFF12000
+
+#define CNS3XXX_AXI_IXC_BASE 0x7F000000 /* AXI IXC */
+#define CNS3XXX_AXI_IXC_BASE_VIRT 0xFFF13000
+
+#define CNS3XXX_CLCD_BASE 0x80000000 /* LCD Control */
+#define CNS3XXX_CLCD_BASE_VIRT 0xFFF14000
+
+#define CNS3XXX_USBOTG_BASE 0x81000000 /* USB OTG Control */
+#define CNS3XXX_USBOTG_BASE_VIRT 0xFFF15000
+
+#define CNS3XXX_USB_BASE 0x82000000 /* USB Host Control */
+#define CNS3XXX_USB_BASE_VIRT 0xFFF16000
+
+#define CNS3XXX_SATA2_BASE 0x83000000 /* SATA */
+#define CNS3XXX_SATA2_SIZE SZ_16M
+#define CNS3XXX_SATA2_BASE_VIRT 0xFFF17000
+
+#define CNS3XXX_CAMERA_BASE 0x84000000 /* Camera Interface */
+#define CNS3XXX_CAMERA_BASE_VIRT 0xFFF18000
+
+#define CNS3XXX_SDIO_BASE 0x85000000 /* SDIO */
+#define CNS3XXX_SDIO_BASE_VIRT 0xFFF19000
+
+#define CNS3XXX_I2S_TDM_BASE 0x86000000 /* I2S TDM */
+#define CNS3XXX_I2S_TDM_BASE_VIRT 0xFFF1A000
+
+#define CNS3XXX_2DG_BASE 0x87000000 /* 2D Graphic Control */
+#define CNS3XXX_2DG_BASE_VIRT 0xFFF1B000
+
+#define CNS3XXX_USB_OHCI_BASE 0x88000000 /* USB OHCI */
+#define CNS3XXX_USB_OHCI_BASE_VIRT 0xFFF1C000
+
+#define CNS3XXX_L2C_BASE 0x92000000 /* L2 Cache Control */
+#define CNS3XXX_L2C_BASE_VIRT 0xFFF27000
+
+#define CNS3XXX_PCIE0_MEM_BASE 0xA0000000 /* PCIe Port 0 IO/Memory Space */
+#define CNS3XXX_PCIE0_MEM_BASE_VIRT 0xE0000000
+
+#define CNS3XXX_PCIE0_HOST_BASE 0xAB000000 /* PCIe Port 0 RC Base */
+#define CNS3XXX_PCIE0_HOST_BASE_VIRT 0xE1000000
+
+#define CNS3XXX_PCIE0_IO_BASE 0xAC000000 /* PCIe Port 0 */
+#define CNS3XXX_PCIE0_IO_BASE_VIRT 0xE2000000
+
+#define CNS3XXX_PCIE0_CFG0_BASE 0xAD000000 /* PCIe Port 0 CFG Type 0 */
+#define CNS3XXX_PCIE0_CFG0_BASE_VIRT 0xE3000000
+
+#define CNS3XXX_PCIE0_CFG1_BASE 0xAE000000 /* PCIe Port 0 CFG Type 1 */
+#define CNS3XXX_PCIE0_CFG1_BASE_VIRT 0xE4000000
+
+#define CNS3XXX_PCIE0_MSG_BASE 0xAF000000 /* PCIe Port 0 Message Space */
+#define CNS3XXX_PCIE0_MSG_BASE_VIRT 0xE5000000
+
+#define CNS3XXX_PCIE1_MEM_BASE 0xB0000000 /* PCIe Port 1 IO/Memory Space */
+#define CNS3XXX_PCIE1_MEM_BASE_VIRT 0xE8000000
+
+#define CNS3XXX_PCIE1_HOST_BASE 0xBB000000 /* PCIe Port 1 RC Base */
+#define CNS3XXX_PCIE1_HOST_BASE_VIRT 0xE9000000
+
+#define CNS3XXX_PCIE1_IO_BASE 0xBC000000 /* PCIe Port 1 */
+#define CNS3XXX_PCIE1_IO_BASE_VIRT 0xEA000000
+
+#define CNS3XXX_PCIE1_CFG0_BASE 0xBD000000 /* PCIe Port 1 CFG Type 0 */
+#define CNS3XXX_PCIE1_CFG0_BASE_VIRT 0xEB000000
+
+#define CNS3XXX_PCIE1_CFG1_BASE 0xBE000000 /* PCIe Port 1 CFG Type 1 */
+#define CNS3XXX_PCIE1_CFG1_BASE_VIRT 0xEC000000
+
+#define CNS3XXX_PCIE1_MSG_BASE 0xBF000000 /* PCIe Port 1 Message Space */
+#define CNS3XXX_PCIE1_MSG_BASE_VIRT 0xED000000
+
+/*
+ * Testchip peripheral and fpga gic regions
+ */
+#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
+#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000
+
+#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
+#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100
+
+#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
+#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600
+
+#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
+#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000
+
+#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
+#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
+
+/*
+ * Misc block
+ */
+#define MISC_MEM_MAP(offs) (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + (offs))
+#define MISC_MEM_MAP_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_MISC_BASE_VIRT + (offset))))
+
+#define MISC_MEMORY_REMAP_REG MISC_MEM_MAP_VALUE(0x00)
+#define MISC_CHIP_CONFIG_REG MISC_MEM_MAP_VALUE(0x04)
+#define MISC_DEBUG_PROBE_DATA_REG MISC_MEM_MAP_VALUE(0x08)
+#define MISC_DEBUG_PROBE_SELECTION_REG MISC_MEM_MAP_VALUE(0x0C)
+#define MISC_IO_PIN_FUNC_SELECTION_REG MISC_MEM_MAP_VALUE(0x10)
+#define MISC_GPIOA_PIN_ENABLE_REG MISC_MEM_MAP_VALUE(0x14)
+#define MISC_GPIOB_PIN_ENABLE_REG MISC_MEM_MAP_VALUE(0x18)
+#define MISC_IO_PAD_DRIVE_STRENGTH_CTRL_A MISC_MEM_MAP_VALUE(0x1C)
+#define MISC_IO_PAD_DRIVE_STRENGTH_CTRL_B MISC_MEM_MAP_VALUE(0x20)
+#define MISC_GPIOA_15_0_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x24)
+#define MISC_GPIOA_16_31_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x28)
+#define MISC_GPIOB_15_0_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x2C)
+#define MISC_GPIOB_16_31_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x30)
+#define MISC_IO_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x34)
+#define MISC_E_FUSE_31_0_REG MISC_MEM_MAP_VALUE(0x40)
+#define MISC_E_FUSE_63_32_REG MISC_MEM_MAP_VALUE(0x44)
+#define MISC_E_FUSE_95_64_REG MISC_MEM_MAP_VALUE(0x48)
+#define MISC_E_FUSE_127_96_REG MISC_MEM_MAP_VALUE(0x4C)
+#define MISC_SOFTWARE_TEST_1_REG MISC_MEM_MAP_VALUE(0x50)
+#define MISC_SOFTWARE_TEST_2_REG MISC_MEM_MAP_VALUE(0x54)
+
+#define MISC_SATA_POWER_MODE MISC_MEM_MAP_VALUE(0x310)
+
+#define MISC_USB_CFG_REG MISC_MEM_MAP_VALUE(0x800)
+#define MISC_USB_STS_REG MISC_MEM_MAP_VALUE(0x804)
+#define MISC_USBPHY00_CFG_REG MISC_MEM_MAP_VALUE(0x808)
+#define MISC_USBPHY01_CFG_REG MISC_MEM_MAP_VALUE(0x80c)
+#define MISC_USBPHY10_CFG_REG MISC_MEM_MAP_VALUE(0x810)
+#define MISC_USBPHY11_CFG_REG MISC_MEM_MAP_VALUE(0x814)
+
+#define MISC_PCIEPHY_CMCTL(x) MISC_MEM_MAP(0x900 + (x) * 0x004)
+#define MISC_PCIEPHY_CTL(x) MISC_MEM_MAP(0x940 + (x) * 0x100)
+#define MISC_PCIE_AXIS_AWMISC(x) MISC_MEM_MAP(0x944 + (x) * 0x100)
+#define MISC_PCIE_AXIS_ARMISC(x) MISC_MEM_MAP(0x948 + (x) * 0x100)
+#define MISC_PCIE_AXIS_RMISC(x) MISC_MEM_MAP(0x94C + (x) * 0x100)
+#define MISC_PCIE_AXIS_BMISC(x) MISC_MEM_MAP(0x950 + (x) * 0x100)
+#define MISC_PCIE_AXIM_RMISC(x) MISC_MEM_MAP(0x954 + (x) * 0x100)
+#define MISC_PCIE_AXIM_BMISC(x) MISC_MEM_MAP(0x958 + (x) * 0x100)
+#define MISC_PCIE_CTRL(x) MISC_MEM_MAP(0x95C + (x) * 0x100)
+#define MISC_PCIE_PM_DEBUG(x) MISC_MEM_MAP(0x960 + (x) * 0x100)
+#define MISC_PCIE_RFC_DEBUG(x) MISC_MEM_MAP(0x964 + (x) * 0x100)
+#define MISC_PCIE_CXPL_DEBUGL(x) MISC_MEM_MAP(0x968 + (x) * 0x100)
+#define MISC_PCIE_CXPL_DEBUGH(x) MISC_MEM_MAP(0x96C + (x) * 0x100)
+#define MISC_PCIE_DIAG_DEBUGH(x) MISC_MEM_MAP(0x970 + (x) * 0x100)
+#define MISC_PCIE_W1CLR(x) MISC_MEM_MAP(0x974 + (x) * 0x100)
+#define MISC_PCIE_INT_MASK(x) MISC_MEM_MAP(0x978 + (x) * 0x100)
+#define MISC_PCIE_INT_STATUS(x) MISC_MEM_MAP(0x97C + (x) * 0x100)
+
+/*
+ * Power management and clock control
+ */
+#define PMU_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_PM_BASE_VIRT + (offset))))
+
+#define PM_CLK_GATE_REG PMU_REG_VALUE(0x000)
+#define PM_SOFT_RST_REG PMU_REG_VALUE(0x004)
+#define PM_HS_CFG_REG PMU_REG_VALUE(0x008)
+#define PM_CACTIVE_STA_REG PMU_REG_VALUE(0x00C)
+#define PM_PWR_STA_REG PMU_REG_VALUE(0x010)
+#define PM_CLK_CTRL_REG PMU_REG_VALUE(0x014)
+#define PM_PLL_LCD_I2S_CTRL_REG PMU_REG_VALUE(0x018)
+#define PM_PLL_HM_PD_CTRL_REG PMU_REG_VALUE(0x01C)
+#define PM_REGULAT_CTRL_REG PMU_REG_VALUE(0x020)
+#define PM_WDT_CTRL_REG PMU_REG_VALUE(0x024)
+#define PM_WU_CTRL0_REG PMU_REG_VALUE(0x028)
+#define PM_WU_CTRL1_REG PMU_REG_VALUE(0x02C)
+#define PM_CSR_REG PMU_REG_VALUE(0x030)
+
+/* PM_CLK_GATE_REG */
+#define PM_CLK_GATE_REG_OFFSET_SDIO (25)
+#define PM_CLK_GATE_REG_OFFSET_GPU (24)
+#define PM_CLK_GATE_REG_OFFSET_CIM (23)
+#define PM_CLK_GATE_REG_OFFSET_LCDC (22)
+#define PM_CLK_GATE_REG_OFFSET_I2S (21)
+#define PM_CLK_GATE_REG_OFFSET_RAID (20)
+#define PM_CLK_GATE_REG_OFFSET_SATA (19)
+#define PM_CLK_GATE_REG_OFFSET_PCIE(x) (17 + (x))
+#define PM_CLK_GATE_REG_OFFSET_USB_HOST (16)
+#define PM_CLK_GATE_REG_OFFSET_USB_OTG (15)
+#define PM_CLK_GATE_REG_OFFSET_TIMER (14)
+#define PM_CLK_GATE_REG_OFFSET_CRYPTO (13)
+#define PM_CLK_GATE_REG_OFFSET_HCIE (12)
+#define PM_CLK_GATE_REG_OFFSET_SWITCH (11)
+#define PM_CLK_GATE_REG_OFFSET_GPIO (10)
+#define PM_CLK_GATE_REG_OFFSET_UART3 (9)
+#define PM_CLK_GATE_REG_OFFSET_UART2 (8)
+#define PM_CLK_GATE_REG_OFFSET_UART1 (7)
+#define PM_CLK_GATE_REG_OFFSET_RTC (5)
+#define PM_CLK_GATE_REG_OFFSET_GDMA (4)
+#define PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C (3)
+#define PM_CLK_GATE_REG_OFFSET_SMC_NFI (1)
+#define PM_CLK_GATE_REG_MASK (0x03FFFFBA)
+
+/* PM_SOFT_RST_REG */
+#define PM_SOFT_RST_REG_OFFST_WARM_RST_FLAG (31)
+#define PM_SOFT_RST_REG_OFFST_CPU1 (29)
+#define PM_SOFT_RST_REG_OFFST_CPU0 (28)
+#define PM_SOFT_RST_REG_OFFST_SDIO (25)
+#define PM_SOFT_RST_REG_OFFST_GPU (24)
+#define PM_SOFT_RST_REG_OFFST_CIM (23)
+#define PM_SOFT_RST_REG_OFFST_LCDC (22)
+#define PM_SOFT_RST_REG_OFFST_I2S (21)
+#define PM_SOFT_RST_REG_OFFST_RAID (20)
+#define PM_SOFT_RST_REG_OFFST_SATA (19)
+#define PM_SOFT_RST_REG_OFFST_PCIE(x) (17 + (x))
+#define PM_SOFT_RST_REG_OFFST_USB_HOST (16)
+#define PM_SOFT_RST_REG_OFFST_USB_OTG (15)
+#define PM_SOFT_RST_REG_OFFST_TIMER (14)
+#define PM_SOFT_RST_REG_OFFST_CRYPTO (13)
+#define PM_SOFT_RST_REG_OFFST_HCIE (12)
+#define PM_SOFT_RST_REG_OFFST_SWITCH (11)
+#define PM_SOFT_RST_REG_OFFST_GPIO (10)
+#define PM_SOFT_RST_REG_OFFST_UART3 (9)
+#define PM_SOFT_RST_REG_OFFST_UART2 (8)
+#define PM_SOFT_RST_REG_OFFST_UART1 (7)
+#define PM_SOFT_RST_REG_OFFST_RTC (5)
+#define PM_SOFT_RST_REG_OFFST_GDMA (4)
+#define PM_SOFT_RST_REG_OFFST_SPI_PCM_I2C (3)
+#define PM_SOFT_RST_REG_OFFST_DMC (2)
+#define PM_SOFT_RST_REG_OFFST_SMC_NFI (1)
+#define PM_SOFT_RST_REG_OFFST_GLOBAL (0)
+#define PM_SOFT_RST_REG_MASK (0xF3FFFFBF)
+
+/* PMHS_CFG_REG */
+#define PM_HS_CFG_REG_OFFSET_SDIO (25)
+#define PM_HS_CFG_REG_OFFSET_GPU (24)
+#define PM_HS_CFG_REG_OFFSET_CIM (23)
+#define PM_HS_CFG_REG_OFFSET_LCDC (22)
+#define PM_HS_CFG_REG_OFFSET_I2S (21)
+#define PM_HS_CFG_REG_OFFSET_RAID (20)
+#define PM_HS_CFG_REG_OFFSET_SATA (19)
+#define PM_HS_CFG_REG_OFFSET_PCIE1 (18)
+#define PM_HS_CFG_REG_OFFSET_PCIE0 (17)
+#define PM_HS_CFG_REG_OFFSET_USB_HOST (16)
+#define PM_HS_CFG_REG_OFFSET_USB_OTG (15)
+#define PM_HS_CFG_REG_OFFSET_TIMER (14)
+#define PM_HS_CFG_REG_OFFSET_CRYPTO (13)
+#define PM_HS_CFG_REG_OFFSET_HCIE (12)
+#define PM_HS_CFG_REG_OFFSET_SWITCH (11)
+#define PM_HS_CFG_REG_OFFSET_GPIO (10)
+#define PM_HS_CFG_REG_OFFSET_UART3 (9)
+#define PM_HS_CFG_REG_OFFSET_UART2 (8)
+#define PM_HS_CFG_REG_OFFSET_UART1 (7)
+#define PM_HS_CFG_REG_OFFSET_RTC (5)
+#define PM_HS_CFG_REG_OFFSET_GDMA (4)
+#define PM_HS_CFG_REG_OFFSET_SPI_PCM_I2S (3)
+#define PM_HS_CFG_REG_OFFSET_DMC (2)
+#define PM_HS_CFG_REG_OFFSET_SMC_NFI (1)
+#define PM_HS_CFG_REG_MASK (0x03FFFFBE)
+#define PM_HS_CFG_REG_MASK_SUPPORT (0x01100806)
+
+/* PM_CACTIVE_STA_REG */
+#define PM_CACTIVE_STA_REG_OFFSET_SDIO (25)
+#define PM_CACTIVE_STA_REG_OFFSET_GPU (24)
+#define PM_CACTIVE_STA_REG_OFFSET_CIM (23)
+#define PM_CACTIVE_STA_REG_OFFSET_LCDC (22)
+#define PM_CACTIVE_STA_REG_OFFSET_I2S (21)
+#define PM_CACTIVE_STA_REG_OFFSET_RAID (20)
+#define PM_CACTIVE_STA_REG_OFFSET_SATA (19)
+#define PM_CACTIVE_STA_REG_OFFSET_PCIE1 (18)
+#define PM_CACTIVE_STA_REG_OFFSET_PCIE0 (17)
+#define PM_CACTIVE_STA_REG_OFFSET_USB_HOST (16)
+#define PM_CACTIVE_STA_REG_OFFSET_USB_OTG (15)
+#define PM_CACTIVE_STA_REG_OFFSET_TIMER (14)
+#define PM_CACTIVE_STA_REG_OFFSET_CRYPTO (13)
+#define PM_CACTIVE_STA_REG_OFFSET_HCIE (12)
+#define PM_CACTIVE_STA_REG_OFFSET_SWITCH (11)
+#define PM_CACTIVE_STA_REG_OFFSET_GPIO (10)
+#define PM_CACTIVE_STA_REG_OFFSET_UART3 (9)
+#define PM_CACTIVE_STA_REG_OFFSET_UART2 (8)
+#define PM_CACTIVE_STA_REG_OFFSET_UART1 (7)
+#define PM_CACTIVE_STA_REG_OFFSET_RTC (5)
+#define PM_CACTIVE_STA_REG_OFFSET_GDMA (4)
+#define PM_CACTIVE_STA_REG_OFFSET_SPI_PCM_I2S (3)
+#define PM_CACTIVE_STA_REG_OFFSET_DMC (2)
+#define PM_CACTIVE_STA_REG_OFFSET_SMC_NFI (1)
+#define PM_CACTIVE_STA_REG_MASK (0x03FFFFBE)
+
+/* PM_PWR_STA_REG */
+#define PM_PWR_STA_REG_REG_OFFSET_SDIO (25)
+#define PM_PWR_STA_REG_REG_OFFSET_GPU (24)
+#define PM_PWR_STA_REG_REG_OFFSET_CIM (23)
+#define PM_PWR_STA_REG_REG_OFFSET_LCDC (22)
+#define PM_PWR_STA_REG_REG_OFFSET_I2S (21)
+#define PM_PWR_STA_REG_REG_OFFSET_RAID (20)
+#define PM_PWR_STA_REG_REG_OFFSET_SATA (19)
+#define PM_PWR_STA_REG_REG_OFFSET_PCIE1 (18)
+#define PM_PWR_STA_REG_REG_OFFSET_PCIE0 (17)
+#define PM_PWR_STA_REG_REG_OFFSET_USB_HOST (16)
+#define PM_PWR_STA_REG_REG_OFFSET_USB_OTG (15)
+#define PM_PWR_STA_REG_REG_OFFSET_TIMER (14)
+#define PM_PWR_STA_REG_REG_OFFSET_CRYPTO (13)
+#define PM_PWR_STA_REG_REG_OFFSET_HCIE (12)
+#define PM_PWR_STA_REG_REG_OFFSET_SWITCH (11)
+#define PM_PWR_STA_REG_REG_OFFSET_GPIO (10)
+#define PM_PWR_STA_REG_REG_OFFSET_UART3 (9)
+#define PM_PWR_STA_REG_REG_OFFSET_UART2 (8)
+#define PM_PWR_STA_REG_REG_OFFSET_UART1 (7)
+#define PM_PWR_STA_REG_REG_OFFSET_RTC (5)
+#define PM_PWR_STA_REG_REG_OFFSET_GDMA (4)
+#define PM_PWR_STA_REG_REG_OFFSET_SPI_PCM_I2S (3)
+#define PM_PWR_STA_REG_REG_OFFSET_DMC (2)
+#define PM_PWR_STA_REG_REG_OFFSET_SMC_NFI (1)
+#define PM_PWR_STA_REG_REG_MASK (0x03FFFFBE)
+
+/* PM_CLK_CTRL_REG */
+#define PM_CLK_CTRL_REG_OFFSET_I2S_MCLK (31)
+#define PM_CLK_CTRL_REG_OFFSET_DDR2_CHG_EN (30)
+#define PM_CLK_CTRL_REG_OFFSET_PCIE_REF1_EN (29)
+#define PM_CLK_CTRL_REG_OFFSET_PCIE_REF0_EN (28)
+#define PM_CLK_CTRL_REG_OFFSET_TIMER_SIM_MODE (27)
+#define PM_CLK_CTRL_REG_OFFSET_I2SCLK_DIV (24)
+#define PM_CLK_CTRL_REG_OFFSET_I2SCLK_SEL (22)
+#define PM_CLK_CTRL_REG_OFFSET_CLKOUT_DIV (20)
+#define PM_CLK_CTRL_REG_OFFSET_CLKOUT_SEL (16)
+#define PM_CLK_CTRL_REG_OFFSET_MDC_DIV (14)
+#define PM_CLK_CTRL_REG_OFFSET_CRYPTO_CLK_SEL (12)
+#define PM_CLK_CTRL_REG_OFFSET_CPU_PWR_MODE (9)
+#define PM_CLK_CTRL_REG_OFFSET_PLL_DDR2_SEL (7)
+#define PM_CLK_CTRL_REG_OFFSET_DIV_IMMEDIATE (6)
+#define PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV (4)
+#define PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL (0)
+
+#define PM_CPU_CLK_DIV(DIV) { \
+ PM_CLK_CTRL_REG &= ~((0x3) << PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV); \
+ PM_CLK_CTRL_REG |= (((DIV)&0x3) << PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV); \
+}
+
+#define PM_PLL_CPU_SEL(CPU) { \
+ PM_CLK_CTRL_REG &= ~((0xF) << PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL); \
+ PM_CLK_CTRL_REG |= (((CPU)&0xF) << PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL); \
+}
+
+/* PM_PLL_LCD_I2S_CTRL_REG */
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_MCLK_SMC_DIV (22)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_R_SEL (17)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_P (11)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_M (3)
+#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_S (0)
+
+/* PM_PLL_HM_PD_CTRL_REG */
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY1 (11)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY0 (10)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_I2SCD (6)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_I2S (5)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_LCD (4)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_USB (3)
+#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_RGMII (2)
+#define PM_PLL_HM_PD_CTRL_REG_MASK (0x00000C7C)
+
+/* PM_WDT_CTRL_REG */
+#define PM_WDT_CTRL_REG_OFFSET_RESET_CPU_ONLY (0)
+
+/* PM_CSR_REG - Clock Scaling Register*/
+#define PM_CSR_REG_OFFSET_CSR_EN (30)
+#define PM_CSR_REG_OFFSET_CSR_NUM (0)
+
+#define CNS3XXX_PWR_CLK_EN(BLOCK) (0x1<<PM_CLK_GATE_REG_OFFSET_##BLOCK)
+
+/* Software reset*/
+#define CNS3XXX_PWR_SOFTWARE_RST(BLOCK) (0x1<<PM_SOFT_RST_REG_OFFST_##BLOCK)
+
+/*
+ * CNS3XXX support several power saving mode as following,
+ * DFS, IDLE, HALT, DOZE, SLEEP, Hibernate
+ */
+#define CNS3XXX_PWR_CPU_MODE_DFS (0)
+#define CNS3XXX_PWR_CPU_MODE_IDLE (1)
+#define CNS3XXX_PWR_CPU_MODE_HALT (2)
+#define CNS3XXX_PWR_CPU_MODE_DOZE (3)
+#define CNS3XXX_PWR_CPU_MODE_SLEEP (4)
+#define CNS3XXX_PWR_CPU_MODE_HIBERNATE (5)
+
+#define CNS3XXX_PWR_PLL(BLOCK) (0x1<<PM_PLL_HM_PD_CTRL_REG_OFFSET_##BLOCK)
+#define CNS3XXX_PWR_PLL_ALL PM_PLL_HM_PD_CTRL_REG_MASK
+
+/* Change CPU frequency and divider */
+#define CNS3XXX_PWR_PLL_CPU_300MHZ (0)
+#define CNS3XXX_PWR_PLL_CPU_333MHZ (1)
+#define CNS3XXX_PWR_PLL_CPU_366MHZ (2)
+#define CNS3XXX_PWR_PLL_CPU_400MHZ (3)
+#define CNS3XXX_PWR_PLL_CPU_433MHZ (4)
+#define CNS3XXX_PWR_PLL_CPU_466MHZ (5)
+#define CNS3XXX_PWR_PLL_CPU_500MHZ (6)
+#define CNS3XXX_PWR_PLL_CPU_533MHZ (7)
+#define CNS3XXX_PWR_PLL_CPU_566MHZ (8)
+#define CNS3XXX_PWR_PLL_CPU_600MHZ (9)
+#define CNS3XXX_PWR_PLL_CPU_633MHZ (10)
+#define CNS3XXX_PWR_PLL_CPU_666MHZ (11)
+#define CNS3XXX_PWR_PLL_CPU_700MHZ (12)
+
+#define CNS3XXX_PWR_CPU_CLK_DIV_BY1 (0)
+#define CNS3XXX_PWR_CPU_CLK_DIV_BY2 (1)
+#define CNS3XXX_PWR_CPU_CLK_DIV_BY4 (2)
+
+/* Change DDR2 frequency */
+#define CNS3XXX_PWR_PLL_DDR2_200MHZ (0)
+#define CNS3XXX_PWR_PLL_DDR2_266MHZ (1)
+#define CNS3XXX_PWR_PLL_DDR2_333MHZ (2)
+#define CNS3XXX_PWR_PLL_DDR2_400MHZ (3)
+
+void cns3xxx_pwr_soft_rst(unsigned int block);
+void cns3xxx_pwr_clk_en(unsigned int block);
+int cns3xxx_cpu_clock(void);
+
+/*
+ * ARM11 MPCore interrupt sources (primary GIC)
+ */
+#define IRQ_CNS3XXX_PMU (IRQ_TC11MP_GIC_START + 0)
+#define IRQ_CNS3XXX_SDIO (IRQ_TC11MP_GIC_START + 1)
+#define IRQ_CNS3XXX_L2CC (IRQ_TC11MP_GIC_START + 2)
+#define IRQ_CNS3XXX_RTC (IRQ_TC11MP_GIC_START + 3)
+#define IRQ_CNS3XXX_I2S (IRQ_TC11MP_GIC_START + 4)
+#define IRQ_CNS3XXX_PCM (IRQ_TC11MP_GIC_START + 5)
+#define IRQ_CNS3XXX_SPI (IRQ_TC11MP_GIC_START + 6)
+#define IRQ_CNS3XXX_I2C (IRQ_TC11MP_GIC_START + 7)
+#define IRQ_CNS3XXX_CIM (IRQ_TC11MP_GIC_START + 8)
+#define IRQ_CNS3XXX_GPU (IRQ_TC11MP_GIC_START + 9)
+#define IRQ_CNS3XXX_LCD (IRQ_TC11MP_GIC_START + 10)
+#define IRQ_CNS3XXX_GPIOA (IRQ_TC11MP_GIC_START + 11)
+#define IRQ_CNS3XXX_GPIOB (IRQ_TC11MP_GIC_START + 12)
+#define IRQ_CNS3XXX_UART0 (IRQ_TC11MP_GIC_START + 13)
+#define IRQ_CNS3XXX_UART1 (IRQ_TC11MP_GIC_START + 14)
+#define IRQ_CNS3XXX_UART2 (IRQ_TC11MP_GIC_START + 15)
+#define IRQ_CNS3XXX_ARM11 (IRQ_TC11MP_GIC_START + 16)
+
+#define IRQ_CNS3XXX_SW_STATUS (IRQ_TC11MP_GIC_START + 17)
+#define IRQ_CNS3XXX_SW_R0TXC (IRQ_TC11MP_GIC_START + 18)
+#define IRQ_CNS3XXX_SW_R0RXC (IRQ_TC11MP_GIC_START + 19)
+#define IRQ_CNS3XXX_SW_R0QE (IRQ_TC11MP_GIC_START + 20)
+#define IRQ_CNS3XXX_SW_R0QF (IRQ_TC11MP_GIC_START + 21)
+#define IRQ_CNS3XXX_SW_R1TXC (IRQ_TC11MP_GIC_START + 22)
+#define IRQ_CNS3XXX_SW_R1RXC (IRQ_TC11MP_GIC_START + 23)
+#define IRQ_CNS3XXX_SW_R1QE (IRQ_TC11MP_GIC_START + 24)
+#define IRQ_CNS3XXX_SW_R1QF (IRQ_TC11MP_GIC_START + 25)
+#define IRQ_CNS3XXX_SW_PPE (IRQ_TC11MP_GIC_START + 26)
+
+#define IRQ_CNS3XXX_CRYPTO (IRQ_TC11MP_GIC_START + 27)
+#define IRQ_CNS3XXX_HCIE (IRQ_TC11MP_GIC_START + 28)
+#define IRQ_CNS3XXX_PCIE0_DEVICE (IRQ_TC11MP_GIC_START + 29)
+#define IRQ_CNS3XXX_PCIE1_DEVICE (IRQ_TC11MP_GIC_START + 30)
+#define IRQ_CNS3XXX_USB_OTG (IRQ_TC11MP_GIC_START + 31)
+#define IRQ_CNS3XXX_USB_EHCI (IRQ_TC11MP_GIC_START + 32)
+#define IRQ_CNS3XXX_SATA (IRQ_TC11MP_GIC_START + 33)
+#define IRQ_CNS3XXX_RAID (IRQ_TC11MP_GIC_START + 34)
+#define IRQ_CNS3XXX_SMC (IRQ_TC11MP_GIC_START + 35)
+
+#define IRQ_CNS3XXX_DMAC_ABORT (IRQ_TC11MP_GIC_START + 36)
+#define IRQ_CNS3XXX_DMAC0 (IRQ_TC11MP_GIC_START + 37)
+#define IRQ_CNS3XXX_DMAC1 (IRQ_TC11MP_GIC_START + 38)
+#define IRQ_CNS3XXX_DMAC2 (IRQ_TC11MP_GIC_START + 39)
+#define IRQ_CNS3XXX_DMAC3 (IRQ_TC11MP_GIC_START + 40)
+#define IRQ_CNS3XXX_DMAC4 (IRQ_TC11MP_GIC_START + 41)
+#define IRQ_CNS3XXX_DMAC5 (IRQ_TC11MP_GIC_START + 42)
+#define IRQ_CNS3XXX_DMAC6 (IRQ_TC11MP_GIC_START + 43)
+#define IRQ_CNS3XXX_DMAC7 (IRQ_TC11MP_GIC_START + 44)
+#define IRQ_CNS3XXX_DMAC8 (IRQ_TC11MP_GIC_START + 45)
+#define IRQ_CNS3XXX_DMAC9 (IRQ_TC11MP_GIC_START + 46)
+#define IRQ_CNS3XXX_DMAC10 (IRQ_TC11MP_GIC_START + 47)
+#define IRQ_CNS3XXX_DMAC11 (IRQ_TC11MP_GIC_START + 48)
+#define IRQ_CNS3XXX_DMAC12 (IRQ_TC11MP_GIC_START + 49)
+#define IRQ_CNS3XXX_DMAC13 (IRQ_TC11MP_GIC_START + 50)
+#define IRQ_CNS3XXX_DMAC14 (IRQ_TC11MP_GIC_START + 51)
+#define IRQ_CNS3XXX_DMAC15 (IRQ_TC11MP_GIC_START + 52)
+#define IRQ_CNS3XXX_DMAC16 (IRQ_TC11MP_GIC_START + 53)
+#define IRQ_CNS3XXX_DMAC17 (IRQ_TC11MP_GIC_START + 54)
+
+#define IRQ_CNS3XXX_PCIE0_RC (IRQ_TC11MP_GIC_START + 55)
+#define IRQ_CNS3XXX_PCIE1_RC (IRQ_TC11MP_GIC_START + 56)
+#define IRQ_CNS3XXX_TIMER0 (IRQ_TC11MP_GIC_START + 57)
+#define IRQ_CNS3XXX_TIMER1 (IRQ_TC11MP_GIC_START + 58)
+#define IRQ_CNS3XXX_USB_OHCI (IRQ_TC11MP_GIC_START + 59)
+#define IRQ_CNS3XXX_TIMER2 (IRQ_TC11MP_GIC_START + 60)
+#define IRQ_CNS3XXX_EXTERNAL_PIN0 (IRQ_TC11MP_GIC_START + 61)
+#define IRQ_CNS3XXX_EXTERNAL_PIN1 (IRQ_TC11MP_GIC_START + 62)
+#define IRQ_CNS3XXX_EXTERNAL_PIN2 (IRQ_TC11MP_GIC_START + 63)
+
+#define NR_IRQS_CNS3XXX (IRQ_TC11MP_GIC_START + 64)
+
+#if !defined(NR_IRQS) || (NR_IRQS < NR_IRQS_CNS3XXX)
+#undef NR_IRQS
+#define NR_IRQS NR_IRQS_CNS3XXX
+#endif
+
+#endif /* __MACH_BOARD_CNS3XXX_H */
diff --git a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..d16ce7eb00e9
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S
@@ -0,0 +1,21 @@
+/*
+ * Debugging macro include header
+ *
+ * Copyright 1994-1999 Russell King
+ * Copyright 2008 Cavium Networks
+ * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x10000000
+ movne \rx, #0xf0000000 @ virtual base
+ orr \rx, \rx, #0x00009000
+ .endm
+
+#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-cns3xxx/include/mach/entry-macro.S b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
new file mode 100644
index 000000000000..5e1c5545680f
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S
@@ -0,0 +1,82 @@
+/*
+ * Low-level IRQ helper macros for Cavium Networks platforms
+ *
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <mach/hardware.h>
+#include <asm/hardware/gic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =gic_cpu_base_addr
+ ldr \base, [\base]
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ /*
+ * The interrupt numbering scheme is defined in the
+ * interrupt controller spec. To wit:
+ *
+ * Interrupts 0-15 are IPI
+ * 16-28 are reserved
+ * 29-31 are local. We allow 30 to be used for the watchdog.
+ * 32-1020 are global
+ * 1021-1022 are reserved
+ * 1023 is "spurious" (no interrupt)
+ *
+ * For now, we ignore all local interrupts so only return an interrupt if it's
+ * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
+ *
+ * A simple read from the controller will tell us the number of the highest
+ * priority enabled interrupt. We then just need to check whether it is in the
+ * valid range for an IRQ (30-1020 inclusive).
+ */
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+
+ ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
+
+ ldr \tmp, =1021
+
+ bic \irqnr, \irqstat, #0x1c00
+
+ cmp \irqnr, #29
+ cmpcc \irqnr, \irqnr
+ cmpne \irqnr, \tmp
+ cmpcs \irqnr, \irqnr
+
+ .endm
+
+ /* We assume that irqstat (the raw value of the IRQ acknowledge
+ * register) is preserved from the macro above.
+ * If there is an IPI, we immediately signal end of interrupt on the
+ * controller, since this requires the original irqstat value which
+ * we won't easily be able to recreate later.
+ */
+
+ .macro test_for_ipi, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #16
+ strcc \irqstat, [\base, #GIC_CPU_EOI]
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* As above, this assumes that irqstat and base are preserved.. */
+
+ .macro test_for_ltirq, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ mov \tmp, #0
+ cmp \irqnr, #29
+ moveq \tmp, #1
+ streq \irqstat, [\base, #GIC_CPU_EOI]
+ cmp \tmp, #0
+ .endm
diff --git a/arch/arm/mach-cns3xxx/include/mach/hardware.h b/arch/arm/mach-cns3xxx/include/mach/hardware.h
new file mode 100644
index 000000000000..57e09836f9d7
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/hardware.h
@@ -0,0 +1,22 @@
+/*
+ * This file contains the hardware definitions of the Cavium Networks boards.
+ *
+ * Copyright 2003 ARM Limited.
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_HARDWARE_H
+#define __MACH_HARDWARE_H
+
+#include <asm/sizes.h>
+
+/* macro to get at IO space when running virtually */
+#define PCIBIOS_MIN_IO 0x00000000
+#define PCIBIOS_MIN_MEM 0x00000000
+#define pcibios_assign_all_busses() 1
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/io.h b/arch/arm/mach-cns3xxx/include/mach/io.h
new file mode 100644
index 000000000000..33b6fc1ece7c
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/io.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2008 Cavium Networks
+ * Copyright 2003 ARM Limited
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_IO_H
+#define __MACH_IO_H
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/irqs.h b/arch/arm/mach-cns3xxx/include/mach/irqs.h
new file mode 100644
index 000000000000..2ab96f8085c8
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/irqs.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2000 Deep Blue Solutions Ltd.
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+#define IRQ_LOCALTIMER 29
+#define IRQ_LOCALWDOG 30
+#define IRQ_TC11MP_GIC_START 32
+
+#include <mach/cns3xxx.h>
+
+#ifndef NR_IRQS
+#error "NR_IRQS not defined by the board-specific files"
+#endif
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/memory.h b/arch/arm/mach-cns3xxx/include/mach/memory.h
new file mode 100644
index 000000000000..3b6b769b7a27
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/memory.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_MEMORY_H
+#define __MACH_MEMORY_H
+
+/*
+ * Physical DRAM offset.
+ */
+#define PHYS_OFFSET UL(0x00000000)
+
+#define __phys_to_bus(x) ((x) + PHYS_OFFSET)
+#define __bus_to_phys(x) ((x) - PHYS_OFFSET)
+
+#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
+#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
+#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
+#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/system.h b/arch/arm/mach-cns3xxx/include/mach/system.h
new file mode 100644
index 000000000000..58bb03ae3cf4
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/system.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2000 Deep Blue Solutions Ltd
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_SYSTEM_H
+#define __MACH_SYSTEM_H
+
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+#include <mach/hardware.h>
+
+static inline void arch_idle(void)
+{
+ /*
+ * This should do all the clock switching
+ * and wait for interrupt tricks
+ */
+ cpu_do_idle();
+}
+
+void arch_reset(char mode, const char *cmd);
+
+#endif
diff --git a/arch/arm/mach-cns3xxx/include/mach/timex.h b/arch/arm/mach-cns3xxx/include/mach/timex.h
new file mode 100644
index 000000000000..1fd04217cacb
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/timex.h
@@ -0,0 +1,12 @@
+/*
+ * Cavium Networks architecture timex specifications
+ *
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-cns3xxx/include/mach/uncompress.h b/arch/arm/mach-cns3xxx/include/mach/uncompress.h
new file mode 100644
index 000000000000..de8ead9b91f7
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/uncompress.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include <mach/cns3xxx.h>
+
+#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))
+#define AMBA_UART_LCRH(base) (*(volatile unsigned char *)((base) + 0x2c))
+#define AMBA_UART_CR(base) (*(volatile unsigned char *)((base) + 0x30))
+#define AMBA_UART_FR(base) (*(volatile unsigned char *)((base) + 0x18))
+
+/*
+ * Return the UART base address
+ */
+static inline unsigned long get_uart_base(void)
+{
+ if (machine_is_cns3420vb())
+ return CNS3XXX_UART0_BASE;
+ else
+ return 0;
+}
+
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 5))
+ barrier();
+
+ AMBA_UART_DR(base) = c;
+}
+
+static inline void flush(void)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 3))
+ barrier();
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
new file mode 100644
index 000000000000..4d381ec05278
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
@@ -0,0 +1,11 @@
+/*
+ * Copyright 2000 Russell King.
+ * Copyright 2003 ARM Limited
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define VMALLOC_END 0xd8000000
diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c
new file mode 100644
index 000000000000..725e1a4fc231
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/pm.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <mach/system.h>
+#include <mach/cns3xxx.h>
+
+void cns3xxx_pwr_clk_en(unsigned int block)
+{
+ PM_CLK_GATE_REG |= (block & PM_CLK_GATE_REG_MASK);
+}
+
+void cns3xxx_pwr_power_up(unsigned int block)
+{
+ PM_PLL_HM_PD_CTRL_REG &= ~(block & CNS3XXX_PWR_PLL_ALL);
+
+ /* Wait for 300us for the PLL output clock locked. */
+ udelay(300);
+};
+
+void cns3xxx_pwr_power_down(unsigned int block)
+{
+ /* write '1' to power down */
+ PM_PLL_HM_PD_CTRL_REG |= (block & CNS3XXX_PWR_PLL_ALL);
+};
+
+static void cns3xxx_pwr_soft_rst_force(unsigned int block)
+{
+ /*
+ * bit 0, 28, 29 => program low to reset,
+ * the other else program low and then high
+ */
+ if (block & 0x30000001) {
+ PM_SOFT_RST_REG &= ~(block & PM_SOFT_RST_REG_MASK);
+ } else {
+ PM_SOFT_RST_REG &= ~(block & PM_SOFT_RST_REG_MASK);
+ PM_SOFT_RST_REG |= (block & PM_SOFT_RST_REG_MASK);
+ }
+}
+
+void cns3xxx_pwr_soft_rst(unsigned int block)
+{
+ static unsigned int soft_reset;
+
+ if (soft_reset & block) {
+ /* SPI/I2C/GPIO use the same block, reset once. */
+ return;
+ } else {
+ soft_reset |= block;
+ }
+ cns3xxx_pwr_soft_rst_force(block);
+}
+
+void arch_reset(char mode, const char *cmd)
+{
+ /*
+ * To reset, we hit the on-board reset register
+ * in the system FPGA.
+ */
+ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(GLOBAL));
+}
+
+/*
+ * cns3xxx_cpu_clock - return CPU/L2 clock
+ * aclk: cpu clock/2
+ * hclk: cpu clock/4
+ * pclk: cpu clock/8
+ */
+int cns3xxx_cpu_clock(void)
+{
+ int cpu;
+ int cpu_sel;
+ int div_sel;
+
+ cpu_sel = (PM_CLK_CTRL_REG >> PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL) & 0xf;
+ div_sel = (PM_CLK_CTRL_REG >> PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV) & 0x3;
+
+ cpu = (300 + ((cpu_sel / 3) * 100) + ((cpu_sel % 3) * 33)) >> div_sel;
+
+ return cpu;
+}
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 0ebe185610bf..0316e201ada0 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -7,6 +7,7 @@ config CP_INTC
bool
config ARCH_DAVINCI_DMx
+ select CPU_ARM926T
bool
menu "TI DaVinci Implementations"
@@ -41,6 +42,7 @@ config ARCH_DAVINCI_DA850
select ARCH_HAS_CPUFREQ
config ARCH_DAVINCI_DA8XX
+ select CPU_ARM926T
bool
config ARCH_DAVINCI_DM365
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index dc19870b23cd..212d97084bd7 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -33,9 +33,6 @@
#define DA830_EVM_PHY_MASK 0x0
#define DA830_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-#define DA830_EMIF25_ASYNC_DATA_CE3_BASE 0x62000000
-#define DA830_EMIF25_CONTROL_BASE 0x68000000
-
/*
* USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4].
*/
@@ -157,7 +154,7 @@ static __init void da830_evm_usb_init(void)
__func__, ret);
}
- ret = da8xx_pinmux_setup(da830_evm_usb11_pins);
+ ret = davinci_cfg_reg_list(da830_evm_usb11_pins);
if (ret) {
pr_warning("%s: USB 1.1 PinMux setup failed: %d\n",
__func__, ret);
@@ -229,15 +226,22 @@ static const short da830_evm_mmc_sd_pins[] = {
};
#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1)
+#define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2)
static int da830_evm_mmc_get_ro(int index)
{
return gpio_get_value(DA830_MMCSD_WP_PIN);
}
+static int da830_evm_mmc_get_cd(int index)
+{
+ return !gpio_get_value(DA830_MMCSD_CD_PIN);
+}
+
static struct davinci_mmc_config da830_evm_mmc_config = {
.get_ro = da830_evm_mmc_get_ro,
- .wires = 4,
+ .get_cd = da830_evm_mmc_get_cd,
+ .wires = 8,
.max_freq = 50000000,
.caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.version = MMC_CTLR_VERSION_2,
@@ -247,7 +251,7 @@ static inline void da830_evm_init_mmc(void)
{
int ret;
- ret = da8xx_pinmux_setup(da830_evm_mmc_sd_pins);
+ ret = davinci_cfg_reg_list(da830_evm_mmc_sd_pins);
if (ret) {
pr_warning("da830_evm_init: mmc/sd mux setup failed: %d\n",
ret);
@@ -262,6 +266,14 @@ static inline void da830_evm_init_mmc(void)
}
gpio_direction_input(DA830_MMCSD_WP_PIN);
+ ret = gpio_request(DA830_MMCSD_CD_PIN, "MMC CD\n");
+ if (ret) {
+ pr_warning("da830_evm_init: can not open GPIO %d\n",
+ DA830_MMCSD_CD_PIN);
+ return;
+ }
+ gpio_direction_input(DA830_MMCSD_CD_PIN);
+
ret = da8xx_register_mmcsd0(&da830_evm_mmc_config);
if (ret) {
pr_warning("da830_evm_init: mmc/sd registration failed: %d\n",
@@ -360,13 +372,13 @@ static struct davinci_nand_pdata da830_evm_nand_pdata = {
static struct resource da830_evm_nand_resources[] = {
[0] = { /* First memory resource is NAND I/O window */
- .start = DA830_EMIF25_ASYNC_DATA_CE3_BASE,
- .end = DA830_EMIF25_ASYNC_DATA_CE3_BASE + PAGE_SIZE - 1,
+ .start = DA8XX_AEMIF_CS3_BASE,
+ .end = DA8XX_AEMIF_CS3_BASE + PAGE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = { /* Second memory resource is AEMIF control registers */
- .start = DA830_EMIF25_CONTROL_BASE,
- .end = DA830_EMIF25_CONTROL_BASE + SZ_32K - 1,
+ .start = DA8XX_AEMIF_CTL_BASE,
+ .end = DA8XX_AEMIF_CTL_BASE + SZ_32K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -392,7 +404,7 @@ static inline void da830_evm_init_nand(int mux_mode)
return;
}
- ret = da8xx_pinmux_setup(da830_evm_emif25_pins);
+ ret = davinci_cfg_reg_list(da830_evm_emif25_pins);
if (ret)
pr_warning("da830_evm_init: emif25 mux setup failed: %d\n",
ret);
@@ -412,7 +424,7 @@ static inline void da830_evm_init_lcdc(int mux_mode)
{
int ret;
- ret = da8xx_pinmux_setup(da830_lcdcntl_pins);
+ ret = davinci_cfg_reg_list(da830_lcdcntl_pins);
if (ret)
pr_warning("da830_evm_init: lcdcntl mux setup failed: %d\n",
ret);
@@ -492,7 +504,7 @@ static __init void da830_evm_init(void)
pr_warning("da830_evm_init: edma registration failed: %d\n",
ret);
- ret = da8xx_pinmux_setup(da830_i2c0_pins);
+ ret = davinci_cfg_reg_list(da830_i2c0_pins);
if (ret)
pr_warning("da830_evm_init: i2c0 mux setup failed: %d\n",
ret);
@@ -508,7 +520,7 @@ static __init void da830_evm_init(void)
soc_info->emac_pdata->mdio_max_freq = DA830_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->rmii_en = 1;
- ret = da8xx_pinmux_setup(da830_cpgmac_pins);
+ ret = davinci_cfg_reg_list(da830_cpgmac_pins);
if (ret)
pr_warning("da830_evm_init: cpgmac mux setup failed: %d\n",
ret);
@@ -527,7 +539,7 @@ static __init void da830_evm_init(void)
i2c_register_board_info(1, da830_evm_i2c_devices,
ARRAY_SIZE(da830_evm_i2c_devices));
- ret = da8xx_pinmux_setup(da830_evm_mcasp1_pins);
+ ret = davinci_cfg_reg_list(da830_evm_mcasp1_pins);
if (ret)
pr_warning("da830_evm_init: mcasp1 mux setup failed: %d\n",
ret);
@@ -549,14 +561,6 @@ static int __init da830_evm_console_init(void)
console_initcall(da830_evm_console_init);
#endif
-static __init void da830_evm_irq_init(void)
-{
- struct davinci_soc_info *soc_info = &davinci_soc_info;
-
- cp_intc_init((void __iomem *)DA8XX_CP_INTC_VIRT, DA830_N_CP_INTC_IRQ,
- soc_info->intc_irq_prios);
-}
-
static void __init da830_evm_map_io(void)
{
da830_init();
@@ -567,7 +571,7 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137 EVM")
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = da830_evm_map_io,
- .init_irq = da830_evm_irq_init,
+ .init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da830_evm_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 411284d0b0fa..2ec3095ffb7b 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/i2c/pca953x.h>
+#include <linux/mfd/tps6507x.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
@@ -24,6 +25,8 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/regulator/machine.h>
+#include <linux/mfd/tps6507x.h>
+#include <linux/input/tps6507x-ts.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -206,12 +209,12 @@ static __init void da850_evm_setup_nor_nand(void)
int ret = 0;
if (ui_card_detected & !HAS_MMC) {
- ret = da8xx_pinmux_setup(da850_nand_pins);
+ ret = davinci_cfg_reg_list(da850_nand_pins);
if (ret)
pr_warning("da850_evm_init: nand mux setup failed: "
"%d\n", ret);
- ret = da8xx_pinmux_setup(da850_nor_pins);
+ ret = davinci_cfg_reg_list(da850_nor_pins);
if (ret)
pr_warning("da850_evm_init: nor mux setup failed: %d\n",
ret);
@@ -533,10 +536,24 @@ struct regulator_init_data tps65070_regulator_data[] = {
},
};
+static struct touchscreen_init_data tps6507x_touchscreen_data = {
+ .poll_period = 30, /* ms between touch samples */
+ .min_pressure = 0x30, /* minimum pressure to trigger touch */
+ .vref = 0, /* turn off vref when not using A/D */
+ .vendor = 0, /* /sys/class/input/input?/id/vendor */
+ .product = 65070, /* /sys/class/input/input?/id/product */
+ .version = 0x100, /* /sys/class/input/input?/id/version */
+};
+
+static struct tps6507x_board tps_board = {
+ .tps6507x_pmic_init_data = &tps65070_regulator_data[0],
+ .tps6507x_ts_init_data = &tps6507x_touchscreen_data,
+};
+
static struct i2c_board_info __initdata da850evm_tps65070_info[] = {
{
I2C_BOARD_INFO("tps6507x", 0x48),
- .platform_data = &tps65070_regulator_data[0],
+ .platform_data = &tps_board,
},
};
@@ -568,12 +585,12 @@ static int __init da850_evm_config_emac(void)
if (rmii_en) {
val |= BIT(8);
- ret = da8xx_pinmux_setup(da850_rmii_pins);
+ ret = davinci_cfg_reg_list(da850_rmii_pins);
pr_info("EMAC: RMII PHY configured, MII PHY will not be"
" functional\n");
} else {
val &= ~BIT(8);
- ret = da8xx_pinmux_setup(da850_cpgmac_pins);
+ ret = davinci_cfg_reg_list(da850_cpgmac_pins);
pr_info("EMAC: MII PHY configured, RMII PHY will not be"
" functional\n");
}
@@ -626,7 +643,7 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: edma registration failed: %d\n",
ret);
- ret = da8xx_pinmux_setup(da850_i2c0_pins);
+ ret = davinci_cfg_reg_list(da850_i2c0_pins);
if (ret)
pr_warning("da850_evm_init: i2c0 mux setup failed: %d\n",
ret);
@@ -643,7 +660,7 @@ static __init void da850_evm_init(void)
ret);
if (HAS_MMC) {
- ret = da8xx_pinmux_setup(da850_mmcsd0_pins);
+ ret = davinci_cfg_reg_list(da850_mmcsd0_pins);
if (ret)
pr_warning("da850_evm_init: mmcsd0 mux setup failed:"
" %d\n", ret);
@@ -679,20 +696,20 @@ static __init void da850_evm_init(void)
__raw_writel(0, IO_ADDRESS(DA8XX_UART1_BASE) + 0x30);
__raw_writel(0, IO_ADDRESS(DA8XX_UART0_BASE) + 0x30);
- ret = da8xx_pinmux_setup(da850_mcasp_pins);
+ ret = davinci_cfg_reg_list(da850_mcasp_pins);
if (ret)
pr_warning("da850_evm_init: mcasp mux setup failed: %d\n",
ret);
da8xx_register_mcasp(0, &da850_evm_snd_data);
- ret = da8xx_pinmux_setup(da850_lcdcntl_pins);
+ ret = davinci_cfg_reg_list(da850_lcdcntl_pins);
if (ret)
pr_warning("da850_evm_init: lcdcntl mux setup failed: %d\n",
ret);
/* Handle board specific muxing for LCD here */
- ret = da8xx_pinmux_setup(da850_evm_lcdc_pins);
+ ret = davinci_cfg_reg_list(da850_evm_lcdc_pins);
if (ret)
pr_warning("da850_evm_init: evm specific lcd mux setup "
"failed: %d\n", ret);
@@ -736,14 +753,6 @@ static int __init da850_evm_console_init(void)
console_initcall(da850_evm_console_init);
#endif
-static __init void da850_evm_irq_init(void)
-{
- struct davinci_soc_info *soc_info = &davinci_soc_info;
-
- cp_intc_init((void __iomem *)DA8XX_CP_INTC_VIRT, DA850_N_CP_INTC_IRQ,
- soc_info->intc_irq_prios);
-}
-
static void __init da850_evm_map_io(void)
{
da850_init();
@@ -754,7 +763,7 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138 EVM")
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = da850_evm_map_io,
- .init_irq = da850_evm_irq_init,
+ .init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da850_evm_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index aa48e3f69715..a3191015efee 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -33,9 +33,6 @@
#include <mach/mmc.h>
#include <mach/usb.h>
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
/* NOTE: this is geared for the standard config, with a socketed
* 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
* swap chips, maybe with a different block size, partitioning may
@@ -86,12 +83,12 @@ static struct davinci_nand_pdata davinci_nand_data = {
static struct resource davinci_nand_resources[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+ .start = DM355_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM355_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM355_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM355_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -353,17 +350,12 @@ static __init void dm355_evm_init(void)
dm355_init_asp1(ASP1_TX_EVT_EN | ASP1_RX_EVT_EN, &dm355_evm_snd_data);
}
-static __init void dm355_evm_irq_init(void)
-{
- davinci_irq_init();
-}
-
MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (0x80000100),
.map_io = dm355_evm_map_io,
- .init_irq = dm355_evm_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_evm_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 21f32eb41e8c..f1d8132cf0c3 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -30,9 +30,6 @@
#include <mach/mmc.h>
#include <mach/usb.h>
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e10000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
/* NOTE: this is geared for the standard config, with a socketed
* 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
* swap chips, maybe with a different block size, partitioning may
@@ -82,12 +79,12 @@ static struct davinci_nand_pdata davinci_nand_data = {
static struct resource davinci_nand_resources[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+ .start = DM355_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM355_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM355_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM355_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -272,17 +269,12 @@ static __init void dm355_leopard_init(void)
ARRAY_SIZE(dm355_leopard_spi_info));
}
-static __init void dm355_leopard_irq_init(void)
-{
- davinci_irq_init();
-}
-
MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (0x80000100),
.map_io = dm355_leopard_map_io,
- .init_irq = dm355_leopard_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_leopard_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index df4ab2105869..84acef1d0b3d 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -54,11 +54,6 @@ static inline int have_tvp7002(void)
return 0;
}
-
-#define DM365_ASYNC_EMIF_CONTROL_BASE 0x01d10000
-#define DM365_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-#define DM365_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
-
#define DM365_EVM_PHY_MASK (0x2)
#define DM365_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
@@ -605,7 +600,11 @@ static __init void dm365_evm_init(void)
/* maybe setup mmc1/etc ... _after_ mmc0 */
evm_init_cpld();
+#ifdef CONFIG_SND_DM365_AIC3X_CODEC
dm365_init_asp(&dm365_evm_snd_data);
+#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
+ dm365_init_vc(&dm365_evm_snd_data);
+#endif
dm365_init_rtc();
dm365_init_ks(&dm365evm_ks_data);
@@ -613,17 +612,12 @@ static __init void dm365_evm_init(void)
ARRAY_SIZE(dm365_evm_spi_info));
}
-static __init void dm365_evm_irq_init(void)
-{
- davinci_irq_init();
-}
-
MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (0x80000100),
.map_io = dm365_evm_map_io,
- .init_irq = dm365_evm_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm365_evm_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 976e11b7fa4a..34c8b418cd72 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -41,14 +41,6 @@
#define DM644X_EVM_PHY_MASK (0x2)
#define DM644X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-#define DAVINCI_CFC_ATA_BASE 0x01C66000
-
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e00000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-#define DAVINCI_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
-#define DAVINCI_ASYNC_EMIF_DATA_CE2_BASE 0x06000000
-#define DAVINCI_ASYNC_EMIF_DATA_CE3_BASE 0x08000000
-
#define LXT971_PHY_ID (0x001378e2)
#define LXT971_PHY_MASK (0xfffffff0)
@@ -92,8 +84,8 @@ static struct physmap_flash_data davinci_evm_norflash_data = {
/* NOTE: CFI probe will correctly detect flash part as 32M, but EMIF
* limits addresses to 16M, so using addresses past 16M will wrap */
static struct resource davinci_evm_norflash_resource = {
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
};
@@ -111,7 +103,7 @@ static struct platform_device davinci_evm_norflash_device = {
* It may used instead of the (default) NOR chip to boot, using TI's
* tools to install the secondary boot loader (UBL) and U-Boot.
*/
-struct mtd_partition davinci_evm_nandflash_partition[] = {
+static struct mtd_partition davinci_evm_nandflash_partition[] = {
/* Bootloader layout depends on whose u-boot is installed, but we
* can hide all the details.
* - block 0 for u-boot environment ... in mainline u-boot
@@ -154,12 +146,12 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
static struct resource davinci_evm_nandflash_resource[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM644X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -258,32 +250,6 @@ static struct platform_device rtc_dev = {
.id = -1,
};
-static struct resource ide_resources[] = {
- {
- .start = DAVINCI_CFC_ATA_BASE,
- .end = DAVINCI_CFC_ATA_BASE + 0x7ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_IDE,
- .end = IRQ_IDE,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ide_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ide_dev = {
- .name = "palm_bk3710",
- .id = -1,
- .resource = ide_resources,
- .num_resources = ARRAY_SIZE(ide_resources),
- .dev = {
- .dma_mask = &ide_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
static struct snd_platform_data dm644x_evm_snd_data;
/*----------------------------------------------------------------------*/
@@ -704,10 +670,7 @@ static __init void davinci_evm_init(void)
pr_warning("WARNING: both IDE and Flash are "
"enabled, but they share AEMIF pins.\n"
"\tDisable IDE for NAND/NOR support.\n");
- davinci_cfg_reg(DM644X_HPIEN_DISABLE);
- davinci_cfg_reg(DM644X_ATAEN);
- davinci_cfg_reg(DM644X_HDIREN);
- platform_device_register(&ide_dev);
+ davinci_init_ide();
} else if (HAS_NAND || HAS_NOR) {
davinci_cfg_reg(DM644X_HPIEN_DISABLE);
davinci_cfg_reg(DM644X_ATAEN_DISABLE);
@@ -741,18 +704,13 @@ static __init void davinci_evm_init(void)
}
-static __init void davinci_evm_irq_init(void)
-{
- davinci_irq_init();
-}
-
MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
/* Maintainer: MontaVista Software <source@mvista.com> */
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (DAVINCI_DDR_BASE + 0x100),
.map_io = davinci_evm_map_io,
- .init_irq = davinci_evm_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_evm_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 5ba3cb2daaa0..6d8889342c9f 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -80,17 +80,14 @@ static struct davinci_nand_pdata davinci_nand_data = {
.options = 0,
};
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x20008000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x42000000
-
static struct resource davinci_nand_resources[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
+ .start = DM646X_ASYNC_EMIF_CS2_SPACE_BASE,
+ .end = DM646X_ASYNC_EMIF_CS2_SPACE_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM646X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM646X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -736,17 +733,12 @@ static __init void evm_init(void)
platform_device_register(&davinci_nand_device);
if (HAS_ATA)
- dm646x_init_ide();
+ davinci_init_ide();
soc_info->emac_pdata->phy_mask = DM646X_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DM646X_EVM_MDIO_FREQUENCY;
}
-static __init void davinci_dm646x_evm_irq_init(void)
-{
- davinci_irq_init();
-}
-
#define DM646X_EVM_REF_FREQ 27000000
#define DM6467T_EVM_REF_FREQ 33000000
@@ -763,7 +755,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (0x80000100),
.map_io = davinci_map_io,
- .init_irq = davinci_dm646x_evm_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
MACHINE_END
@@ -773,7 +765,7 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (0x80000100),
.map_io = davinci_map_io,
- .init_irq = davinci_dm646x_evm_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index bd9ca079b69d..4c30e929bbf9 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -31,6 +31,7 @@
#include <asm/mach/arch.h>
#include <mach/dm644x.h>
+#include <mach/common.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/mux.h>
@@ -41,11 +42,6 @@
#define NEUROS_OSD2_PHY_MASK 0x2
#define NEUROS_OSD2_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
-#define DAVINCI_CFC_ATA_BASE 0x01C66000
-
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e00000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
#define LXT971_PHY_ID 0x001378e2
#define LXT971_PHY_MASK 0xfffffff0
@@ -60,7 +56,7 @@
#define NAND_BLOCK_SIZE SZ_128K
-struct mtd_partition davinci_ntosd2_nandflash_partition[] = {
+static struct mtd_partition davinci_ntosd2_nandflash_partition[] = {
{
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
@@ -98,12 +94,12 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
static struct resource davinci_ntosd2_nandflash_resource[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM644X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -130,32 +126,6 @@ static struct platform_device davinci_fb_device = {
.num_resources = 0,
};
-static struct resource ide_resources[] = {
- {
- .start = DAVINCI_CFC_ATA_BASE,
- .end = DAVINCI_CFC_ATA_BASE + 0x7ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_IDE,
- .end = IRQ_IDE,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ide_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ide_dev = {
- .name = "palm_bk3710",
- .id = -1,
- .resource = ide_resources,
- .num_resources = ARRAY_SIZE(ide_resources),
- .dev = {
- .dma_mask = &ide_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
static struct snd_platform_data dm644x_ntosd2_snd_data;
static struct gpio_led ntosd2_leds[] = {
@@ -259,10 +229,7 @@ static __init void davinci_ntosd2_init(void)
pr_warning("WARNING: both IDE and Flash are "
"enabled, but they share AEMIF pins.\n"
"\tDisable IDE for NAND/NOR support.\n");
- davinci_cfg_reg(DM644X_HPIEN_DISABLE);
- davinci_cfg_reg(DM644X_ATAEN);
- davinci_cfg_reg(DM644X_HDIREN);
- platform_device_register(&ide_dev);
+ davinci_init_ide();
} else if (HAS_NAND) {
davinci_cfg_reg(DM644X_HPIEN_DISABLE);
davinci_cfg_reg(DM644X_ATAEN_DISABLE);
@@ -306,18 +273,13 @@ static __init void davinci_ntosd2_init(void)
davinci_setup_mmc(0, &davinci_ntosd2_mmc_config);
}
-static __init void davinci_ntosd2_irq_init(void)
-{
- davinci_irq_init();
-}
-
MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
/* Maintainer: Neuros Technologies <neuros@groups.google.com> */
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (DAVINCI_DDR_BASE + 0x100),
.map_io = davinci_ntosd2_map_io,
- .init_irq = davinci_ntosd2_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_ntosd2_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 08d373bfcc8a..23e664a1a802 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -45,10 +45,7 @@
#define SFFSDR_PHY_MASK (0x2)
#define SFFSDR_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x01e00000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
-
-struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
+static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0
* UBL: Block 1
* U-Boot: Blocks 6-7 (256 kb)
@@ -76,12 +73,12 @@ static struct flash_platform_data davinci_sffsdr_nandflash_data = {
static struct resource davinci_sffsdr_nandflash_resource[] = {
{
- .start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
- .end = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
+ .start = DM644X_ASYNC_EMIF_DATA_CE0_BASE,
+ .end = DM644X_ASYNC_EMIF_DATA_CE0_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = DAVINCI_ASYNC_EMIF_CONTROL_BASE,
- .end = DAVINCI_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
+ .start = DM644X_ASYNC_EMIF_CONTROL_BASE,
+ .end = DM644X_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
@@ -155,18 +152,13 @@ static __init void davinci_sffsdr_init(void)
davinci_cfg_reg(DM644X_VLYNQWD);
}
-static __init void davinci_sffsdr_irq_init(void)
-{
- davinci_irq_init();
-}
-
MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
/* Maintainer: Hugo Villeneuve hugo.villeneuve@lyrtech.com */
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (DAVINCI_DDR_BASE + 0x100),
.map_io = davinci_sffsdr_map_io,
- .init_irq = davinci_sffsdr_irq_init,
+ .init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_sffsdr_init,
MACHINE_END
diff --git a/arch/arm/mach-davinci/cdce949.c b/arch/arm/mach-davinci/cdce949.c
index aec375690543..ba8b12b2913b 100644
--- a/arch/arm/mach-davinci/cdce949.c
+++ b/arch/arm/mach-davinci/cdce949.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <mach/clock.h>
+#include <mach/cdce949.h>
#include "clock.h"
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index bf6218ee94e1..054c303caead 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -22,6 +22,7 @@
#include <mach/hardware.h>
+#include <mach/clock.h>
#include <mach/psc.h>
#include <mach/cputype.h>
#include "clock.h"
@@ -42,7 +43,8 @@ static void __clk_enable(struct clk *clk)
if (clk->parent)
__clk_enable(clk->parent);
if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
- davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 1);
+ davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
+ PSC_STATE_ENABLE);
}
static void __clk_disable(struct clk *clk)
@@ -51,7 +53,9 @@ static void __clk_disable(struct clk *clk)
return;
if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
(clk->flags & CLK_PSC))
- davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 0);
+ davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
+ (clk->flags & PSC_SWRSTDISABLE) ?
+ PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE);
if (clk->parent)
__clk_disable(clk->parent);
}
@@ -233,7 +237,10 @@ static int __init clk_disable_unused(void)
continue;
pr_info("Clocks: disable unused %s\n", ck->name);
- davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc, 0);
+
+ davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc,
+ (ck->flags & PSC_SWRSTDISABLE) ?
+ PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE);
}
spin_unlock_irq(&clockfw_lock);
@@ -272,7 +279,7 @@ static unsigned long clk_sysclk_recalc(struct clk *clk)
v = __raw_readl(pll->base + clk->div_reg);
if (v & PLLDIV_EN) {
- plldiv = (v & PLLDIV_RATIO_MASK) + 1;
+ plldiv = (v & pll->div_ratio_mask) + 1;
if (plldiv)
rate /= plldiv;
}
@@ -295,7 +302,6 @@ static unsigned long clk_pllclk_recalc(struct clk *clk)
struct pll_data *pll = clk->pll_data;
unsigned long rate = clk->rate;
- pll->base = IO_ADDRESS(pll->phys_base);
ctrl = __raw_readl(pll->base + PLLCTL);
rate = pll->input_rate = clk->parent->rate;
@@ -312,7 +318,7 @@ static unsigned long clk_pllclk_recalc(struct clk *clk)
if (pll->flags & PLL_HAS_PREDIV) {
prediv = __raw_readl(pll->base + PREDIV);
if (prediv & PLLDIV_EN)
- prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
+ prediv = (prediv & pll->div_ratio_mask) + 1;
else
prediv = 1;
}
@@ -324,7 +330,7 @@ static unsigned long clk_pllclk_recalc(struct clk *clk)
if (pll->flags & PLL_HAS_POSTDIV) {
postdiv = __raw_readl(pll->base + POSTDIV);
if (postdiv & PLLDIV_EN)
- postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
+ postdiv = (postdiv & pll->div_ratio_mask) + 1;
else
postdiv = 1;
}
@@ -451,6 +457,18 @@ int __init davinci_clk_init(struct clk_lookup *clocks)
clk->recalc = clk_leafclk_recalc;
}
+ if (clk->pll_data) {
+ struct pll_data *pll = clk->pll_data;
+
+ if (!pll->div_ratio_mask)
+ pll->div_ratio_mask = PLLDIV_RATIO_MASK;
+
+ if (pll->phys_base && !pll->base) {
+ pll->base = ioremap(pll->phys_base, SZ_4K);
+ WARN_ON(!pll->base);
+ }
+ }
+
if (clk->recalc)
clk->rate = clk->recalc(clk);
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index aa0a61150325..01e36483ac3d 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -76,6 +76,7 @@ struct pll_data {
u32 num;
u32 flags;
u32 input_rate;
+ u32 div_ratio_mask;
};
#define PLL_HAS_PREDIV 0x01
#define PLL_HAS_POSTDIV 0x02
@@ -101,10 +102,11 @@ struct clk {
/* Clock flags: SoC-specific flags start at BIT(16) */
#define ALWAYS_ENABLED BIT(1)
-#define CLK_PSC BIT(2)
-#define PSC_DSP BIT(3) /* PSC uses DSP domain, not ARM */
+#define CLK_PSC BIT(2)
+#define PSC_DSP BIT(3) /* PSC uses DSP domain, not ARM */
#define CLK_PLL BIT(4) /* PLL-derived clock */
-#define PRE_PLL BIT(5) /* source is before PLL mult/div */
+#define PRE_PLL BIT(5) /* source is before PLL mult/div */
+#define PSC_SWRSTDISABLE BIT(6) /* Disable state is SwRstDisable */
#define CLK(dev, con, ck) \
{ \
@@ -118,6 +120,7 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
unsigned int mult, unsigned int postdiv);
extern struct platform_device davinci_wdt_device;
+extern void davinci_watchdog_reset(struct platform_device *);
#endif
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index 94f27cbcd55a..1d2557394235 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -37,26 +37,43 @@ void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context)
pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
}
-static struct davinci_id * __init davinci_get_id(u32 jtag_id)
+static int __init davinci_init_id(struct davinci_soc_info *soc_info)
{
- int i;
- struct davinci_id *dip;
- u8 variant = (jtag_id & 0xf0000000) >> 28;
- u16 part_no = (jtag_id & 0x0ffff000) >> 12;
+ int i;
+ struct davinci_id *dip;
+ u8 variant;
+ u16 part_no;
+ void __iomem *base;
+
+ base = ioremap(soc_info->jtag_id_reg, SZ_4K);
+ if (!base) {
+ pr_err("Unable to map JTAG ID register\n");
+ return -ENOMEM;
+ }
+
+ soc_info->jtag_id = __raw_readl(base);
+ iounmap(base);
+
+ variant = (soc_info->jtag_id & 0xf0000000) >> 28;
+ part_no = (soc_info->jtag_id & 0x0ffff000) >> 12;
- for (i = 0, dip = davinci_soc_info.ids; i < davinci_soc_info.ids_num;
+ for (i = 0, dip = soc_info->ids; i < soc_info->ids_num;
i++, dip++)
/* Don't care about the manufacturer right now */
- if ((dip->part_no == part_no) && (dip->variant == variant))
- return dip;
-
- return NULL;
+ if ((dip->part_no == part_no) && (dip->variant == variant)) {
+ soc_info->cpu_id = dip->cpu_id;
+ pr_info("DaVinci %s variant 0x%x\n", dip->name,
+ dip->variant);
+ return 0;
+ }
+
+ pr_err("Unknown DaVinci JTAG ID 0x%x\n", soc_info->jtag_id);
+ return -EINVAL;
}
void __init davinci_common_init(struct davinci_soc_info *soc_info)
{
int ret;
- struct davinci_id *dip;
if (!soc_info) {
ret = -EINVAL;
@@ -77,22 +94,16 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
local_flush_tlb_all();
flush_cache_all();
+ if (!davinci_soc_info.reset)
+ davinci_soc_info.reset = davinci_watchdog_reset;
+
/*
* We want to check CPU revision early for cpu_is_xxxx() macros.
* IO space mapping must be initialized before we can do that.
*/
- davinci_soc_info.jtag_id = __raw_readl(davinci_soc_info.jtag_id_base);
-
- dip = davinci_get_id(davinci_soc_info.jtag_id);
- if (!dip) {
- ret = -EINVAL;
- pr_err("Unknown DaVinci JTAG ID 0x%x\n",
- davinci_soc_info.jtag_id);
+ ret = davinci_init_id(&davinci_soc_info);
+ if (ret < 0)
goto err;
- }
-
- davinci_soc_info.cpu_id = dip->cpu_id;
- pr_info("DaVinci %s variant 0x%x\n", dip->name, dip->variant);
if (davinci_soc_info.cpu_clks) {
ret = davinci_clk_init(davinci_soc_info.cpu_clks);
@@ -101,8 +112,6 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
goto err;
}
- davinci_intc_base = davinci_soc_info.intc_base;
- davinci_intc_type = davinci_soc_info.intc_type;
return;
err:
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index 37311d1830eb..bb4c40ecb803 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -13,18 +13,17 @@
#include <linux/irq.h>
#include <linux/io.h>
+#include <mach/common.h>
#include <mach/cp_intc.h>
-static void __iomem *cp_intc_base;
-
static inline unsigned int cp_intc_read(unsigned offset)
{
- return __raw_readl(cp_intc_base + offset);
+ return __raw_readl(davinci_intc_base + offset);
}
static inline void cp_intc_write(unsigned long value, unsigned offset)
{
- __raw_writel(value, cp_intc_base + offset);
+ __raw_writel(value, davinci_intc_base + offset);
}
static void cp_intc_ack_irq(unsigned int irq)
@@ -100,13 +99,18 @@ static struct irq_chip cp_intc_irq_chip = {
.set_wake = cp_intc_set_wake,
};
-void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
- u8 *irq_prio)
+void __init cp_intc_init(void)
{
+ unsigned long num_irq = davinci_soc_info.intc_irq_num;
+ u8 *irq_prio = davinci_soc_info.intc_irq_prios;
+ u32 *host_map = davinci_soc_info.intc_host_map;
unsigned num_reg = BITS_TO_LONGS(num_irq);
int i;
- cp_intc_base = base;
+ davinci_intc_type = DAVINCI_INTC_TYPE_CP_INTC;
+ davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_8K);
+ if (WARN_ON(!davinci_intc_base))
+ return;
cp_intc_write(0, CP_INTC_GLOBAL_ENABLE);
@@ -157,6 +161,10 @@ void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
cp_intc_write(0x0f0f0f0f, CP_INTC_CHAN_MAP(i));
}
+ if (host_map)
+ for (i = 0; host_map[i] != -1; i++)
+ cp_intc_write(host_map[i], CP_INTC_HOST_MAP(i));
+
/* Set up genirq dispatching for cp_intc */
for (i = 0; i < num_irq; i++) {
set_irq_chip(i, &cp_intc_irq_chip);
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 122e61a9f505..23e9eda5a377 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -19,6 +19,7 @@
#include <mach/common.h>
#include <mach/time.h>
#include <mach/da8xx.h>
+#include <mach/gpio.h>
#include "clock.h"
#include "mux.h"
@@ -410,7 +411,7 @@ static struct clk_lookup da830_clks[] = {
CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
- CLK("musb_hdrc", NULL, &usb20_clk),
+ CLK(NULL, "usb20", &usb20_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "aintc", &aintc_clk),
CLK(NULL, "secu_mgr", &secu_mgr_clk),
@@ -1126,10 +1127,7 @@ static struct map_desc da830_io_desc[] = {
},
};
-static void __iomem *da830_psc_bases[] = {
- IO_ADDRESS(DA8XX_PSC0_BASE),
- IO_ADDRESS(DA8XX_PSC1_BASE),
-};
+static u32 da830_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE };
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id da830_ids[] = {
@@ -1158,14 +1156,14 @@ static struct davinci_id da830_ids[] = {
static struct davinci_timer_instance da830_timer_instance[2] = {
{
- .base = IO_ADDRESS(DA8XX_TIMER64P0_BASE),
+ .base = DA8XX_TIMER64P0_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_0,
.top_irq = IRQ_DA8XX_TINT34_0,
.cmp_off = DA830_CMP12_0,
.cmp_irq = IRQ_DA830_T12CMPINT0_0,
},
{
- .base = IO_ADDRESS(DA8XX_TIMER64P1_BASE),
+ .base = DA8XX_TIMER64P1_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_1,
.top_irq = IRQ_DA8XX_TINT34_1,
.cmp_off = DA830_CMP12_0,
@@ -1187,34 +1185,33 @@ static struct davinci_timer_info da830_timer_info = {
static struct davinci_soc_info davinci_soc_info_da830 = {
.io_desc = da830_io_desc,
.io_desc_num = ARRAY_SIZE(da830_io_desc),
+ .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG,
.ids = da830_ids,
.ids_num = ARRAY_SIZE(da830_ids),
.cpu_clks = da830_clks,
.psc_bases = da830_psc_bases,
.psc_bases_num = ARRAY_SIZE(da830_psc_bases),
+ .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da830_pins,
.pinmux_pins_num = ARRAY_SIZE(da830_pins),
- .intc_base = (void __iomem *)DA8XX_CP_INTC_VIRT,
+ .intc_base = DA8XX_CP_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_CP_INTC,
.intc_irq_prios = da830_default_priorities,
.intc_irq_num = DA830_N_CP_INTC_IRQ,
.timer_info = &da830_timer_info,
- .gpio_base = IO_ADDRESS(DA8XX_GPIO_BASE),
+ .gpio_type = GPIO_TYPE_DAVINCI,
+ .gpio_base = DA8XX_GPIO_BASE,
.gpio_num = 128,
.gpio_irq = IRQ_DA8XX_GPIO0,
.serial_dev = &da8xx_serial_device,
.emac_pdata = &da8xx_emac_pdata,
+ .reset_device = &da8xx_wdt_device,
};
void __init da830_init(void)
{
- da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
- if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"))
- return;
-
- davinci_soc_info_da830.jtag_id_base =
- DA8XX_SYSCFG0_VIRT(DA8XX_JTAG_ID_REG);
- davinci_soc_info_da830.pinmux_base = DA8XX_SYSCFG0_VIRT(0x120);
-
davinci_common_init(&davinci_soc_info_da830);
+
+ da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
+ WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module");
}
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index d0fd7566712a..6b8331bf8cf3 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -27,6 +27,7 @@
#include <mach/da8xx.h>
#include <mach/cpufreq.h>
#include <mach/pm.h>
+#include <mach/gpio.h>
#include "clock.h"
#include "mux.h"
@@ -781,10 +782,7 @@ static struct map_desc da850_io_desc[] = {
},
};
-static void __iomem *da850_psc_bases[] = {
- IO_ADDRESS(DA8XX_PSC0_BASE),
- IO_ADDRESS(DA8XX_PSC1_BASE),
-};
+static u32 da850_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE };
/* Contents of JTAG ID register used to identify exact cpu type */
static struct davinci_id da850_ids[] = {
@@ -799,22 +797,22 @@ static struct davinci_id da850_ids[] = {
static struct davinci_timer_instance da850_timer_instance[4] = {
{
- .base = IO_ADDRESS(DA8XX_TIMER64P0_BASE),
+ .base = DA8XX_TIMER64P0_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_0,
.top_irq = IRQ_DA8XX_TINT34_0,
},
{
- .base = IO_ADDRESS(DA8XX_TIMER64P1_BASE),
+ .base = DA8XX_TIMER64P1_BASE,
.bottom_irq = IRQ_DA8XX_TINT12_1,
.top_irq = IRQ_DA8XX_TINT34_1,
},
{
- .base = IO_ADDRESS(DA850_TIMER64P2_BASE),
+ .base = DA850_TIMER64P2_BASE,
.bottom_irq = IRQ_DA850_TINT12_2,
.top_irq = IRQ_DA850_TINT34_2,
},
{
- .base = IO_ADDRESS(DA850_TIMER64P3_BASE),
+ .base = DA850_TIMER64P3_BASE,
.bottom_irq = IRQ_DA850_TINT12_3,
.top_irq = IRQ_DA850_TINT34_3,
},
@@ -1072,31 +1070,37 @@ no_ddrpll_mem:
static struct davinci_soc_info davinci_soc_info_da850 = {
.io_desc = da850_io_desc,
.io_desc_num = ARRAY_SIZE(da850_io_desc),
+ .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG,
.ids = da850_ids,
.ids_num = ARRAY_SIZE(da850_ids),
.cpu_clks = da850_clks,
.psc_bases = da850_psc_bases,
.psc_bases_num = ARRAY_SIZE(da850_psc_bases),
+ .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120,
.pinmux_pins = da850_pins,
.pinmux_pins_num = ARRAY_SIZE(da850_pins),
- .intc_base = (void __iomem *)DA8XX_CP_INTC_VIRT,
+ .intc_base = DA8XX_CP_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_CP_INTC,
.intc_irq_prios = da850_default_priorities,
.intc_irq_num = DA850_N_CP_INTC_IRQ,
.timer_info = &da850_timer_info,
- .gpio_base = IO_ADDRESS(DA8XX_GPIO_BASE),
+ .gpio_type = GPIO_TYPE_DAVINCI,
+ .gpio_base = DA8XX_GPIO_BASE,
.gpio_num = 144,
.gpio_irq = IRQ_DA8XX_GPIO0,
.serial_dev = &da8xx_serial_device,
.emac_pdata = &da8xx_emac_pdata,
.sram_dma = DA8XX_ARM_RAM_BASE,
.sram_len = SZ_8K,
+ .reset_device = &da8xx_wdt_device,
};
void __init da850_init(void)
{
unsigned int v;
+ davinci_common_init(&davinci_soc_info_da850);
+
da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"))
return;
@@ -1105,12 +1109,6 @@ void __init da850_init(void)
if (WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module"))
return;
- davinci_soc_info_da850.jtag_id_base =
- DA8XX_SYSCFG0_VIRT(DA8XX_JTAG_ID_REG);
- davinci_soc_info_da850.pinmux_base = DA8XX_SYSCFG0_VIRT(0x120);
-
- davinci_common_init(&davinci_soc_info_da850);
-
/*
* Move the clock source of Async3 domain to PLL1 SYSCLK2.
* This helps keeping the peripherals on this domain insulated
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 0a96791d3b0f..8cda729be273 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -326,7 +326,7 @@ static struct resource da8xx_watchdog_resources[] = {
},
};
-struct platform_device davinci_wdt_device = {
+struct platform_device da8xx_wdt_device = {
.name = "watchdog",
.id = -1,
.num_resources = ARRAY_SIZE(da8xx_watchdog_resources),
@@ -335,7 +335,7 @@ struct platform_device davinci_wdt_device = {
int __init da8xx_register_watchdog(void)
{
- return platform_device_register(&davinci_wdt_device);
+ return platform_device_register(&da8xx_wdt_device);
}
static struct resource da8xx_emac_resources[] = {
@@ -584,10 +584,17 @@ static struct platform_device da8xx_rtc_device = {
int da8xx_register_rtc(void)
{
int ret;
+ void __iomem *base;
+
+ base = ioremap(DA8XX_RTC_BASE, SZ_4K);
+ if (WARN_ON(!base))
+ return -ENOMEM;
/* Unlock the rtc's registers */
- __raw_writel(0x83e70b13, IO_ADDRESS(DA8XX_RTC_BASE + 0x6c));
- __raw_writel(0x95a4f1e0, IO_ADDRESS(DA8XX_RTC_BASE + 0x70));
+ __raw_writel(0x83e70b13, base + 0x6c);
+ __raw_writel(0x95a4f1e0, base + 0x70);
+
+ iounmap(base);
ret = platform_device_register(&da8xx_rtc_device);
if (!ret)
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 147949650c25..8b7201e4c79c 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -23,7 +23,10 @@
#include <mach/mmc.h>
#include <mach/time.h>
+#include "clock.h"
+
#define DAVINCI_I2C_BASE 0x01C21000
+#define DAVINCI_ATA_BASE 0x01C66000
#define DAVINCI_MMCSD0_BASE 0x01E10000
#define DM355_MMCSD0_BASE 0x01E11000
#define DM355_MMCSD1_BASE 0x01E00000
@@ -58,6 +61,49 @@ void __init davinci_init_i2c(struct davinci_i2c_platform_data *pdata)
(void) platform_device_register(&davinci_i2c_device);
}
+static struct resource ide_resources[] = {
+ {
+ .start = DAVINCI_ATA_BASE,
+ .end = DAVINCI_ATA_BASE + 0x7ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_IDE,
+ .end = IRQ_IDE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 ide_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device ide_device = {
+ .name = "palm_bk3710",
+ .id = -1,
+ .resource = ide_resources,
+ .num_resources = ARRAY_SIZE(ide_resources),
+ .dev = {
+ .dma_mask = &ide_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+void __init davinci_init_ide(void)
+{
+ if (cpu_is_davinci_dm644x()) {
+ davinci_cfg_reg(DM644X_HPIEN_DISABLE);
+ davinci_cfg_reg(DM644X_ATAEN);
+ davinci_cfg_reg(DM644X_HDIREN);
+ } else if (cpu_is_davinci_dm646x()) {
+ /* IRQ_DM646X_IDE is the same as IRQ_IDE */
+ davinci_cfg_reg(DM646X_ATAEN);
+ } else {
+ WARN_ON(1);
+ return;
+ }
+
+ platform_device_register(&ide_device);
+}
+
#if defined(CONFIG_MMC_DAVINCI) || defined(CONFIG_MMC_DAVINCI_MODULE)
static u64 mmcsd0_dma_mask = DMA_BIT_MASK(32);
@@ -251,12 +297,12 @@ static void davinci_init_wdt(void)
struct davinci_timer_instance davinci_timer_instance[2] = {
{
- .base = IO_ADDRESS(DAVINCI_TIMER0_BASE),
+ .base = DAVINCI_TIMER0_BASE,
.bottom_irq = IRQ_TINT0_TINT12,
.top_irq = IRQ_TINT0_TINT34,
},
{
- .base = IO_ADDRESS(DAVINCI_TIMER1_BASE),
+ .base = DAVINCI_TIMER1_BASE,
.bottom_irq = IRQ_TINT1_TINT12,
.top_irq = IRQ_TINT1_TINT34,
},
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3dc0a88712eb..383478116ef5 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -450,11 +450,6 @@ void __init dm355_init_spi0(unsigned chipselect_mask,
/*----------------------------------------------------------------------*/
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-#define PINMUX2 0x08
-#define PINMUX3 0x0c
-#define PINMUX4 0x10
#define INTMUX 0x18
#define EVTMUX 0x1c
@@ -788,9 +783,7 @@ static struct davinci_id dm355_ids[] = {
},
};
-static void __iomem *dm355_psc_bases[] = {
- IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
-};
+static u32 dm355_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
/*
* T0_BOT: Timer 0, bottom: clockevent source for hrtimers
@@ -798,7 +791,7 @@ static void __iomem *dm355_psc_bases[] = {
* T1_BOT: Timer 1, bottom: (used by DSP in TI DSPLink code)
* T1_TOP: Timer 1, top : <unused>
*/
-struct davinci_timer_info dm355_timer_info = {
+static struct davinci_timer_info dm355_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -845,26 +838,28 @@ static struct platform_device dm355_serial_device = {
static struct davinci_soc_info davinci_soc_info_dm355 = {
.io_desc = dm355_io_desc,
.io_desc_num = ARRAY_SIZE(dm355_io_desc),
- .jtag_id_base = IO_ADDRESS(0x01c40028),
+ .jtag_id_reg = 0x01c40028,
.ids = dm355_ids,
.ids_num = ARRAY_SIZE(dm355_ids),
.cpu_clks = dm355_clks,
.psc_bases = dm355_psc_bases,
.psc_bases_num = ARRAY_SIZE(dm355_psc_bases),
- .pinmux_base = IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE),
+ .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm355_pins,
.pinmux_pins_num = ARRAY_SIZE(dm355_pins),
- .intc_base = IO_ADDRESS(DAVINCI_ARM_INTC_BASE),
+ .intc_base = DAVINCI_ARM_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_AINTC,
.intc_irq_prios = dm355_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm355_timer_info,
- .gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
+ .gpio_type = GPIO_TYPE_DAVINCI,
+ .gpio_base = DAVINCI_GPIO_BASE,
.gpio_num = 104,
.gpio_irq = IRQ_DM355_GPIOBNK0,
.serial_dev = &dm355_serial_device,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
};
void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 0d6ee583f65c..a146849d78f0 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -467,11 +467,6 @@ static struct clk_lookup dm365_clks[] = {
/*----------------------------------------------------------------------*/
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-#define PINMUX2 0x08
-#define PINMUX3 0x0c
-#define PINMUX4 0x10
#define INTMUX 0x18
#define EVTMUX 0x1c
@@ -490,11 +485,14 @@ MUX_CFG(DM365, SD1_DATA0, 4, 22, 3, 1, false)
MUX_CFG(DM365, I2C_SDA, 3, 23, 3, 2, false)
MUX_CFG(DM365, I2C_SCL, 3, 21, 3, 2, false)
-MUX_CFG(DM365, AEMIF_AR, 2, 0, 3, 1, false)
+MUX_CFG(DM365, AEMIF_AR_A14, 2, 0, 3, 1, false)
+MUX_CFG(DM365, AEMIF_AR_BA0, 2, 0, 3, 2, false)
MUX_CFG(DM365, AEMIF_A3, 2, 2, 3, 1, false)
MUX_CFG(DM365, AEMIF_A7, 2, 4, 3, 1, false)
MUX_CFG(DM365, AEMIF_D15_8, 2, 6, 1, 1, false)
MUX_CFG(DM365, AEMIF_CE0, 2, 7, 1, 0, false)
+MUX_CFG(DM365, AEMIF_CE1, 2, 8, 1, 0, false)
+MUX_CFG(DM365, AEMIF_WE_OE, 2, 9, 1, 0, false)
MUX_CFG(DM365, MCBSP0_BDX, 0, 23, 1, 1, false)
MUX_CFG(DM365, MCBSP0_X, 0, 22, 1, 1, false)
@@ -573,9 +571,17 @@ MUX_CFG(DM365, SPI4_SDO, 4, 16, 3, 1, false)
MUX_CFG(DM365, SPI4_SDENA0, 4, 20, 3, 1, false)
MUX_CFG(DM365, SPI4_SDENA1, 4, 16, 3, 2, false)
+MUX_CFG(DM365, CLKOUT0, 4, 20, 3, 3, false)
+MUX_CFG(DM365, CLKOUT1, 4, 16, 3, 3, false)
+MUX_CFG(DM365, CLKOUT2, 4, 8, 3, 3, false)
+
MUX_CFG(DM365, GPIO20, 3, 21, 3, 0, false)
+MUX_CFG(DM365, GPIO30, 4, 6, 3, 0, false)
+MUX_CFG(DM365, GPIO31, 4, 8, 3, 0, false)
+MUX_CFG(DM365, GPIO32, 4, 10, 3, 0, false)
MUX_CFG(DM365, GPIO33, 4, 12, 3, 0, false)
MUX_CFG(DM365, GPIO40, 4, 26, 3, 0, false)
+MUX_CFG(DM365, GPIO64_57, 2, 6, 1, 0, false)
MUX_CFG(DM365, VOUT_FIELD, 1, 18, 3, 1, false)
MUX_CFG(DM365, VOUT_FIELD_G81, 1, 18, 3, 0, false)
@@ -1006,11 +1012,9 @@ static struct davinci_id dm365_ids[] = {
},
};
-static void __iomem *dm365_psc_bases[] = {
- IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
-};
+static u32 dm365_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
-struct davinci_timer_info dm365_timer_info = {
+static struct davinci_timer_info dm365_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -1049,21 +1053,22 @@ static struct platform_device dm365_serial_device = {
static struct davinci_soc_info davinci_soc_info_dm365 = {
.io_desc = dm365_io_desc,
.io_desc_num = ARRAY_SIZE(dm365_io_desc),
- .jtag_id_base = IO_ADDRESS(0x01c40028),
+ .jtag_id_reg = 0x01c40028,
.ids = dm365_ids,
.ids_num = ARRAY_SIZE(dm365_ids),
.cpu_clks = dm365_clks,
.psc_bases = dm365_psc_bases,
.psc_bases_num = ARRAY_SIZE(dm365_psc_bases),
- .pinmux_base = IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE),
+ .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm365_pins,
.pinmux_pins_num = ARRAY_SIZE(dm365_pins),
- .intc_base = IO_ADDRESS(DAVINCI_ARM_INTC_BASE),
+ .intc_base = DAVINCI_ARM_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_AINTC,
.intc_irq_prios = dm365_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm365_timer_info,
- .gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
+ .gpio_type = GPIO_TYPE_DAVINCI,
+ .gpio_base = DAVINCI_GPIO_BASE,
.gpio_num = 104,
.gpio_irq = IRQ_DM365_GPIO0,
.gpio_unbanked = 8, /* really 16 ... skip muxed GPIOs */
@@ -1071,6 +1076,7 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
.emac_pdata = &dm365_emac_pdata,
.sram_dma = 0x00010000,
.sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
};
void __init dm365_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 2f2ae8bc77bb..7ad15208b841 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -277,7 +277,7 @@ static struct clk timer2_clk = {
.usecount = 1, /* REVISIT: why cant' this be disabled? */
};
-struct clk_lookup dm644x_clks[] = {
+static struct clk_lookup dm644x_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "pll1", &pll1_clk),
CLK(NULL, "pll1_sysclk1", &pll1_sysclk1),
@@ -350,9 +350,6 @@ static struct platform_device dm644x_emac_device = {
.resource = dm644x_emac_resources,
};
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-
/*
* Device specific mux setup
*
@@ -677,9 +674,7 @@ static struct davinci_id dm644x_ids[] = {
},
};
-static void __iomem *dm644x_psc_bases[] = {
- IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
-};
+static u32 dm644x_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
/*
* T0_BOT: Timer 0, bottom: clockevent source for hrtimers
@@ -687,7 +682,7 @@ static void __iomem *dm644x_psc_bases[] = {
* T1_BOT: Timer 1, bottom: (used by DSP in TI DSPLink code)
* T1_TOP: Timer 1, top : <unused>
*/
-struct davinci_timer_info dm644x_timer_info = {
+static struct davinci_timer_info dm644x_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -734,27 +729,29 @@ static struct platform_device dm644x_serial_device = {
static struct davinci_soc_info davinci_soc_info_dm644x = {
.io_desc = dm644x_io_desc,
.io_desc_num = ARRAY_SIZE(dm644x_io_desc),
- .jtag_id_base = IO_ADDRESS(0x01c40028),
+ .jtag_id_reg = 0x01c40028,
.ids = dm644x_ids,
.ids_num = ARRAY_SIZE(dm644x_ids),
.cpu_clks = dm644x_clks,
.psc_bases = dm644x_psc_bases,
.psc_bases_num = ARRAY_SIZE(dm644x_psc_bases),
- .pinmux_base = IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE),
+ .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm644x_pins,
.pinmux_pins_num = ARRAY_SIZE(dm644x_pins),
- .intc_base = IO_ADDRESS(DAVINCI_ARM_INTC_BASE),
+ .intc_base = DAVINCI_ARM_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_AINTC,
.intc_irq_prios = dm644x_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm644x_timer_info,
- .gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
+ .gpio_type = GPIO_TYPE_DAVINCI,
+ .gpio_base = DAVINCI_GPIO_BASE,
.gpio_num = 71,
.gpio_irq = IRQ_GPIOBNK0,
.serial_dev = &dm644x_serial_device,
.emac_pdata = &dm644x_emac_pdata,
.sram_dma = 0x00008000,
.sram_len = SZ_16K,
+ .reset_device = &davinci_wdt_device,
};
void __init dm644x_init_asp(struct snd_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 893baf4ad37d..94045656cff6 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -311,7 +311,7 @@ static struct clk vpif1_clk = {
.flags = ALWAYS_ENABLED,
};
-struct clk_lookup dm646x_clks[] = {
+static struct clk_lookup dm646x_clks[] = {
CLK(NULL, "ref", &ref_clk),
CLK(NULL, "aux", &aux_clkin),
CLK(NULL, "pll1", &pll1_clk),
@@ -401,9 +401,6 @@ static struct platform_device dm646x_emac_device = {
.resource = dm646x_emac_resources,
};
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-
/*
* Device specific mux setup
*
@@ -596,32 +593,6 @@ static struct platform_device dm646x_edma_device = {
.resource = edma_resources,
};
-static struct resource ide_resources[] = {
- {
- .start = DM646X_ATA_REG_BASE,
- .end = DM646X_ATA_REG_BASE + 0x7ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_DM646X_IDE,
- .end = IRQ_DM646X_IDE,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 ide_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ide_dev = {
- .name = "palm_bk3710",
- .id = -1,
- .resource = ide_resources,
- .num_resources = ARRAY_SIZE(ide_resources),
- .dev = {
- .dma_mask = &ide_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
static struct resource dm646x_mcasp0_resources[] = {
{
.name = "mcasp0",
@@ -787,9 +758,7 @@ static struct davinci_id dm646x_ids[] = {
},
};
-static void __iomem *dm646x_psc_bases[] = {
- IO_ADDRESS(DAVINCI_PWR_SLEEP_CNTRL_BASE),
-};
+static u32 dm646x_psc_bases[] = { DAVINCI_PWR_SLEEP_CNTRL_BASE };
/*
* T0_BOT: Timer 0, bottom: clockevent source for hrtimers
@@ -797,7 +766,7 @@ static void __iomem *dm646x_psc_bases[] = {
* T1_BOT: Timer 1, bottom: (used by DSP in TI DSPLink code)
* T1_TOP: Timer 1, top : <unused>
*/
-struct davinci_timer_info dm646x_timer_info = {
+static struct davinci_timer_info dm646x_timer_info = {
.timers = davinci_timer_instance,
.clockevent_id = T0_BOT,
.clocksource_id = T0_TOP,
@@ -844,35 +813,31 @@ static struct platform_device dm646x_serial_device = {
static struct davinci_soc_info davinci_soc_info_dm646x = {
.io_desc = dm646x_io_desc,
.io_desc_num = ARRAY_SIZE(dm646x_io_desc),
- .jtag_id_base = IO_ADDRESS(0x01c40028),
+ .jtag_id_reg = 0x01c40028,
.ids = dm646x_ids,
.ids_num = ARRAY_SIZE(dm646x_ids),
.cpu_clks = dm646x_clks,
.psc_bases = dm646x_psc_bases,
.psc_bases_num = ARRAY_SIZE(dm646x_psc_bases),
- .pinmux_base = IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE),
+ .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE,
.pinmux_pins = dm646x_pins,
.pinmux_pins_num = ARRAY_SIZE(dm646x_pins),
- .intc_base = IO_ADDRESS(DAVINCI_ARM_INTC_BASE),
+ .intc_base = DAVINCI_ARM_INTC_BASE,
.intc_type = DAVINCI_INTC_TYPE_AINTC,
.intc_irq_prios = dm646x_default_priorities,
.intc_irq_num = DAVINCI_N_AINTC_IRQ,
.timer_info = &dm646x_timer_info,
- .gpio_base = IO_ADDRESS(DAVINCI_GPIO_BASE),
+ .gpio_type = GPIO_TYPE_DAVINCI,
+ .gpio_base = DAVINCI_GPIO_BASE,
.gpio_num = 43, /* Only 33 usable */
.gpio_irq = IRQ_DM646X_GPIOBNK0,
.serial_dev = &dm646x_serial_device,
.emac_pdata = &dm646x_emac_pdata,
.sram_dma = 0x10010000,
.sram_len = SZ_32K,
+ .reset_device = &davinci_wdt_device,
};
-void __init dm646x_init_ide()
-{
- davinci_cfg_reg(DM646X_ATAEN);
- platform_device_register(&ide_dev);
-}
-
void __init dm646x_init_mcasp0(struct snd_platform_data *pdata)
{
dm646x_mcasp0_device.dev.platform_data = pdata;
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 53137387aee1..d33827aadda7 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -243,7 +243,7 @@ struct edma {
} intr_data[EDMA_MAX_DMACH];
};
-static struct edma *edma_info[EDMA_MAX_CC];
+static struct edma *edma_cc[EDMA_MAX_CC];
static int arch_num_cc;
/* dummy param set used to (re)initialize parameter RAM slots */
@@ -261,7 +261,7 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
/* default to low priority queue */
if (queue_no == EVENTQ_DEFAULT)
- queue_no = edma_info[ctlr]->default_queue;
+ queue_no = edma_cc[ctlr]->default_queue;
queue_no &= 7;
edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
@@ -310,29 +310,27 @@ setup_dma_interrupt(unsigned lch,
ctlr = EDMA_CTLR(lch);
lch = EDMA_CHAN_SLOT(lch);
- if (!callback) {
+ if (!callback)
edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
- (1 << (lch & 0x1f)));
- }
+ BIT(lch & 0x1f));
- edma_info[ctlr]->intr_data[lch].callback = callback;
- edma_info[ctlr]->intr_data[lch].data = data;
+ edma_cc[ctlr]->intr_data[lch].callback = callback;
+ edma_cc[ctlr]->intr_data[lch].data = data;
if (callback) {
edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
- (1 << (lch & 0x1f)));
+ BIT(lch & 0x1f));
edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
- (1 << (lch & 0x1f)));
+ BIT(lch & 0x1f));
}
}
static int irq2ctlr(int irq)
{
- if (irq >= edma_info[0]->irq_res_start &&
- irq <= edma_info[0]->irq_res_end)
+ if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
return 0;
- else if (irq >= edma_info[1]->irq_res_start &&
- irq <= edma_info[1]->irq_res_end)
+ else if (irq >= edma_cc[1]->irq_res_start &&
+ irq <= edma_cc[1]->irq_res_end)
return 1;
return -1;
@@ -353,15 +351,17 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
dev_dbg(data, "dma_irq_handler\n");
- if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0)
- && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
+ if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
+ (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
return IRQ_NONE;
while (1) {
int j;
- if (edma_shadow0_read_array(ctlr, SH_IPR, 0))
+ if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
+ edma_shadow0_read_array(ctlr, SH_IER, 0))
j = 0;
- else if (edma_shadow0_read_array(ctlr, SH_IPR, 1))
+ else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
+ edma_shadow0_read_array(ctlr, SH_IER, 1))
j = 1;
else
break;
@@ -369,17 +369,17 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
edma_shadow0_read_array(ctlr, SH_IPR, j));
for (i = 0; i < 32; i++) {
int k = (j << 5) + i;
- if (edma_shadow0_read_array(ctlr, SH_IPR, j) &
- (1 << i)) {
+ if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
+ && (edma_shadow0_read_array(ctlr,
+ SH_IER, j) & BIT(i))) {
/* Clear the corresponding IPR bits */
edma_shadow0_write_array(ctlr, SH_ICR, j,
- (1 << i));
- if (edma_info[ctlr]->intr_data[k].callback) {
- edma_info[ctlr]->intr_data[k].callback(
+ BIT(i));
+ if (edma_cc[ctlr]->intr_data[k].callback)
+ edma_cc[ctlr]->intr_data[k].callback(
k, DMA_COMPLETE,
- edma_info[ctlr]->intr_data[k].
+ edma_cc[ctlr]->intr_data[k].
data);
- }
}
}
cnt++;
@@ -423,19 +423,19 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
for (i = 0; i < 32; i++) {
int k = (j << 5) + i;
if (edma_read_array(ctlr, EDMA_EMR, j) &
- (1 << i)) {
+ BIT(i)) {
/* Clear the corresponding EMR bits */
edma_write_array(ctlr, EDMA_EMCR, j,
- 1 << i);
+ BIT(i));
/* Clear any SER */
edma_shadow0_write_array(ctlr, SH_SECR,
- j, (1 << i));
- if (edma_info[ctlr]->intr_data[k].
+ j, BIT(i));
+ if (edma_cc[ctlr]->intr_data[k].
callback) {
- edma_info[ctlr]->intr_data[k].
+ edma_cc[ctlr]->intr_data[k].
callback(k,
DMA_CC_ERROR,
- edma_info[ctlr]->intr_data
+ edma_cc[ctlr]->intr_data
[k].data);
}
}
@@ -444,11 +444,11 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
dev_dbg(data, "QEMR %02x\n",
edma_read(ctlr, EDMA_QEMR));
for (i = 0; i < 8; i++) {
- if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) {
+ if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
/* Clear the corresponding IPR bits */
- edma_write(ctlr, EDMA_QEMCR, 1 << i);
+ edma_write(ctlr, EDMA_QEMCR, BIT(i));
edma_shadow0_write(ctlr, SH_QSECR,
- (1 << i));
+ BIT(i));
/* NOTE: not reported!! */
}
@@ -460,20 +460,19 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
* to just write CCERRCLR with CCERR value...
*/
for (i = 0; i < 8; i++) {
- if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) {
+ if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
/* Clear the corresponding IPR bits */
- edma_write(ctlr, EDMA_CCERRCLR, 1 << i);
+ edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
/* NOTE: not reported!! */
}
}
}
- if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0)
- && (edma_read_array(ctlr, EDMA_EMR, 1) == 0)
- && (edma_read(ctlr, EDMA_QEMR) == 0)
- && (edma_read(ctlr, EDMA_CCERR) == 0)) {
+ if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
+ (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
+ (edma_read(ctlr, EDMA_QEMR) == 0) &&
+ (edma_read(ctlr, EDMA_CCERR) == 0))
break;
- }
cnt++;
if (cnt > 10)
break;
@@ -511,9 +510,9 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
int stop_slot = start_slot;
DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
- for (i = start_slot; i < edma_info[ctlr]->num_slots; ++i) {
+ for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
j = EDMA_CHAN_SLOT(i);
- if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse)) {
+ if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
/* Record our current beginning slot */
if (count == num_slots)
stop_slot = i;
@@ -529,8 +528,9 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
stop_slot = i;
break;
- } else
+ } else {
count = num_slots;
+ }
}
}
@@ -540,12 +540,12 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
* of contiguous parameter RAM slots but do not find the exact number
* requested as we may reach the total number of parameter RAM slots
*/
- if (i == edma_info[ctlr]->num_slots)
+ if (i == edma_cc[ctlr]->num_slots)
stop_slot = i;
for (j = start_slot; j < stop_slot; j++)
if (test_bit(j, tmp_inuse))
- clear_bit(j, edma_info[ctlr]->edma_inuse);
+ clear_bit(j, edma_cc[ctlr]->edma_inuse);
if (count)
return -EBUSY;
@@ -567,7 +567,7 @@ static int prepare_unused_channel_list(struct device *dev, void *data)
(int)pdev->resource[i].start >= 0) {
ctlr = EDMA_CTLR(pdev->resource[i].start);
clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
- edma_info[ctlr]->edma_unused);
+ edma_cc[ctlr]->edma_unused);
}
}
@@ -641,14 +641,13 @@ int edma_alloc_channel(int channel,
for (i = 0; i < arch_num_cc; i++) {
channel = 0;
for (;;) {
- channel = find_next_bit(edma_info[i]->
- edma_unused,
- edma_info[i]->num_channels,
+ channel = find_next_bit(edma_cc[i]->edma_unused,
+ edma_cc[i]->num_channels,
channel);
- if (channel == edma_info[i]->num_channels)
+ if (channel == edma_cc[i]->num_channels)
break;
if (!test_and_set_bit(channel,
- edma_info[i]->edma_inuse)) {
+ edma_cc[i]->edma_inuse)) {
done = 1;
ctlr = i;
break;
@@ -660,14 +659,14 @@ int edma_alloc_channel(int channel,
}
if (!done)
return -ENOMEM;
- } else if (channel >= edma_info[ctlr]->num_channels) {
+ } else if (channel >= edma_cc[ctlr]->num_channels) {
return -EINVAL;
- } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
+ } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
return -EBUSY;
}
/* ensure access through shadow region 0 */
- edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
+ edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
/* ensure no events are pending */
edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
@@ -703,7 +702,7 @@ void edma_free_channel(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel >= edma_info[ctlr]->num_channels)
+ if (channel >= edma_cc[ctlr]->num_channels)
return;
setup_dma_interrupt(channel, NULL, NULL);
@@ -711,7 +710,7 @@ void edma_free_channel(unsigned channel)
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
&dummy_paramset, PARM_SIZE);
- clear_bit(channel, edma_info[ctlr]->edma_inuse);
+ clear_bit(channel, edma_cc[ctlr]->edma_inuse);
}
EXPORT_SYMBOL(edma_free_channel);
@@ -735,20 +734,19 @@ int edma_alloc_slot(unsigned ctlr, int slot)
slot = EDMA_CHAN_SLOT(slot);
if (slot < 0) {
- slot = edma_info[ctlr]->num_channels;
+ slot = edma_cc[ctlr]->num_channels;
for (;;) {
- slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse,
- edma_info[ctlr]->num_slots, slot);
- if (slot == edma_info[ctlr]->num_slots)
+ slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
+ edma_cc[ctlr]->num_slots, slot);
+ if (slot == edma_cc[ctlr]->num_slots)
return -ENOMEM;
- if (!test_and_set_bit(slot,
- edma_info[ctlr]->edma_inuse))
+ if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
break;
}
- } else if (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots) {
+ } else if (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots) {
return -EINVAL;
- } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) {
+ } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
return -EBUSY;
}
@@ -774,13 +772,13 @@ void edma_free_slot(unsigned slot)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots)
+ if (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots)
return;
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
&dummy_paramset, PARM_SIZE);
- clear_bit(slot, edma_info[ctlr]->edma_inuse);
+ clear_bit(slot, edma_cc[ctlr]->edma_inuse);
}
EXPORT_SYMBOL(edma_free_slot);
@@ -818,8 +816,8 @@ int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
* of slots
*/
if ((id != EDMA_CONT_PARAMS_ANY) &&
- (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots))
+ (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots))
return -EINVAL;
/*
@@ -828,13 +826,13 @@ int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
* channels
*/
if (count < 1 || count >
- (edma_info[ctlr]->num_slots - edma_info[ctlr]->num_channels))
+ (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
return -EINVAL;
switch (id) {
case EDMA_CONT_PARAMS_ANY:
return reserve_contiguous_slots(ctlr, id, count,
- edma_info[ctlr]->num_channels);
+ edma_cc[ctlr]->num_channels);
case EDMA_CONT_PARAMS_FIXED_EXACT:
case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
return reserve_contiguous_slots(ctlr, id, count, slot);
@@ -866,8 +864,8 @@ int edma_free_cont_slots(unsigned slot, int count)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_channels ||
- slot >= edma_info[ctlr]->num_slots ||
+ if (slot < edma_cc[ctlr]->num_channels ||
+ slot >= edma_cc[ctlr]->num_slots ||
count < 1)
return -EINVAL;
@@ -877,7 +875,7 @@ int edma_free_cont_slots(unsigned slot, int count)
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
&dummy_paramset, PARM_SIZE);
- clear_bit(slot_to_free, edma_info[ctlr]->edma_inuse);
+ clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
}
return 0;
@@ -907,7 +905,7 @@ void edma_set_src(unsigned slot, dma_addr_t src_port,
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
if (mode) {
@@ -945,7 +943,7 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port,
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
if (mode) {
@@ -1005,7 +1003,7 @@ void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
0xffff0000, src_bidx);
edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
@@ -1031,7 +1029,7 @@ void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
0x0000ffff, dest_bidx << 16);
edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
@@ -1078,7 +1076,7 @@ void edma_set_transfer_params(unsigned slot,
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot < edma_info[ctlr]->num_slots) {
+ if (slot < edma_cc[ctlr]->num_slots) {
edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
0x0000ffff, bcnt_rld << 16);
if (sync_mode == ASYNC)
@@ -1108,9 +1106,9 @@ void edma_link(unsigned from, unsigned to)
ctlr_to = EDMA_CTLR(to);
to = EDMA_CHAN_SLOT(to);
- if (from >= edma_info[ctlr_from]->num_slots)
+ if (from >= edma_cc[ctlr_from]->num_slots)
return;
- if (to >= edma_info[ctlr_to]->num_slots)
+ if (to >= edma_cc[ctlr_to]->num_slots)
return;
edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
PARM_OFFSET(to));
@@ -1131,7 +1129,7 @@ void edma_unlink(unsigned from)
ctlr = EDMA_CTLR(from);
from = EDMA_CHAN_SLOT(from);
- if (from >= edma_info[ctlr]->num_slots)
+ if (from >= edma_cc[ctlr]->num_slots)
return;
edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
}
@@ -1158,7 +1156,7 @@ void edma_write_slot(unsigned slot, const struct edmacc_param *param)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot >= edma_info[ctlr]->num_slots)
+ if (slot >= edma_cc[ctlr]->num_slots)
return;
memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
PARM_SIZE);
@@ -1180,7 +1178,7 @@ void edma_read_slot(unsigned slot, struct edmacc_param *param)
ctlr = EDMA_CTLR(slot);
slot = EDMA_CHAN_SLOT(slot);
- if (slot >= edma_info[ctlr]->num_slots)
+ if (slot >= edma_cc[ctlr]->num_slots)
return;
memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
PARM_SIZE);
@@ -1205,8 +1203,8 @@ void edma_pause(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
- unsigned int mask = (1 << (channel & 0x1f));
+ if (channel < edma_cc[ctlr]->num_channels) {
+ unsigned int mask = BIT(channel & 0x1f);
edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
}
@@ -1226,8 +1224,8 @@ void edma_resume(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
- unsigned int mask = (1 << (channel & 0x1f));
+ if (channel < edma_cc[ctlr]->num_channels) {
+ unsigned int mask = BIT(channel & 0x1f);
edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
}
@@ -1252,12 +1250,12 @@ int edma_start(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
int j = channel >> 5;
- unsigned int mask = (1 << (channel & 0x1f));
+ unsigned int mask = BIT(channel & 0x1f);
/* EDMA channels without event association */
- if (test_bit(channel, edma_info[ctlr]->edma_unused)) {
+ if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
pr_debug("EDMA: ESR%d %08x\n", j,
edma_shadow0_read_array(ctlr, SH_ESR, j));
edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
@@ -1298,9 +1296,9 @@ void edma_stop(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
int j = channel >> 5;
- unsigned int mask = (1 << (channel & 0x1f));
+ unsigned int mask = BIT(channel & 0x1f);
edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
@@ -1337,9 +1335,9 @@ void edma_clean_channel(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel < edma_info[ctlr]->num_channels) {
+ if (channel < edma_cc[ctlr]->num_channels) {
int j = (channel >> 5);
- unsigned int mask = 1 << (channel & 0x1f);
+ unsigned int mask = BIT(channel & 0x1f);
pr_debug("EDMA: EMR%d %08x\n", j,
edma_read_array(ctlr, EDMA_EMR, j));
@@ -1348,7 +1346,7 @@ void edma_clean_channel(unsigned channel)
edma_write_array(ctlr, EDMA_EMCR, j, mask);
/* Clear any SER */
edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
- edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3);
+ edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
}
}
EXPORT_SYMBOL(edma_clean_channel);
@@ -1365,12 +1363,12 @@ void edma_clear_event(unsigned channel)
ctlr = EDMA_CTLR(channel);
channel = EDMA_CHAN_SLOT(channel);
- if (channel >= edma_info[ctlr]->num_channels)
+ if (channel >= edma_cc[ctlr]->num_channels)
return;
if (channel < 32)
- edma_write(ctlr, EDMA_ECR, 1 << channel);
+ edma_write(ctlr, EDMA_ECR, BIT(channel));
else
- edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32));
+ edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
}
EXPORT_SYMBOL(edma_clear_event);
@@ -1402,8 +1400,9 @@ static int __init edma_probe(struct platform_device *pdev)
break;
else
return -ENODEV;
- } else
+ } else {
found = 1;
+ }
len[j] = resource_size(r[j]);
@@ -1420,38 +1419,37 @@ static int __init edma_probe(struct platform_device *pdev)
goto fail1;
}
- edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
- if (!edma_info[j]) {
+ edma_cc[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
+ if (!edma_cc[j]) {
status = -ENOMEM;
goto fail1;
}
- memset(edma_info[j], 0, sizeof(struct edma));
+ memset(edma_cc[j], 0, sizeof(struct edma));
- edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel,
+ edma_cc[j]->num_channels = min_t(unsigned, info[j].n_channel,
EDMA_MAX_DMACH);
- edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot,
+ edma_cc[j]->num_slots = min_t(unsigned, info[j].n_slot,
EDMA_MAX_PARAMENTRY);
- edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc,
- EDMA_MAX_CC);
+ edma_cc[j]->num_cc = min_t(unsigned, info[j].n_cc, EDMA_MAX_CC);
- edma_info[j]->default_queue = info[j].default_queue;
- if (!edma_info[j]->default_queue)
- edma_info[j]->default_queue = EVENTQ_1;
+ edma_cc[j]->default_queue = info[j].default_queue;
+ if (!edma_cc[j]->default_queue)
+ edma_cc[j]->default_queue = EVENTQ_1;
dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
edmacc_regs_base[j]);
- for (i = 0; i < edma_info[j]->num_slots; i++)
+ for (i = 0; i < edma_cc[j]->num_slots; i++)
memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
&dummy_paramset, PARM_SIZE);
/* Mark all channels as unused */
- memset(edma_info[j]->edma_unused, 0xff,
- sizeof(edma_info[j]->edma_unused));
+ memset(edma_cc[j]->edma_unused, 0xff,
+ sizeof(edma_cc[j]->edma_unused));
sprintf(irq_name, "edma%d", j);
irq[j] = platform_get_irq_byname(pdev, irq_name);
- edma_info[j]->irq_res_start = irq[j];
+ edma_cc[j]->irq_res_start = irq[j];
status = request_irq(irq[j], dma_irq_handler, 0, "edma",
&pdev->dev);
if (status < 0) {
@@ -1462,7 +1460,7 @@ static int __init edma_probe(struct platform_device *pdev)
sprintf(irq_name, "edma%d_err", j);
err_irq[j] = platform_get_irq_byname(pdev, irq_name);
- edma_info[j]->irq_res_end = err_irq[j];
+ edma_cc[j]->irq_res_end = err_irq[j];
status = request_irq(err_irq[j], dma_ccerr_handler, 0,
"edma_error", &pdev->dev);
if (status < 0) {
@@ -1475,7 +1473,7 @@ static int __init edma_probe(struct platform_device *pdev)
* specified. This way, long transfers on the low priority queue
* started by the codec engine will not cause audio defects.
*/
- for (i = 0; i < edma_info[j]->num_channels; i++)
+ for (i = 0; i < edma_cc[j]->num_channels; i++)
map_dmach_queue(j, i, EVENTQ_1);
queue_tc_mapping = info[j].queue_tc_mapping;
@@ -1538,7 +1536,7 @@ fail1:
release_mem_region(r[i]->start, len[i]);
if (edmacc_regs_base[i])
iounmap(edmacc_regs_base[i]);
- kfree(edma_info[i]);
+ kfree(edma_cc[i]);
}
return status;
}
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index 744755b53236..bf0ff587e46a 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -20,46 +20,92 @@
#include <asm/mach/irq.h>
-static DEFINE_SPINLOCK(gpio_lock);
-
-struct davinci_gpio {
- struct gpio_chip chip;
- struct gpio_controller *__iomem regs;
- int irq_base;
+struct davinci_gpio_regs {
+ u32 dir;
+ u32 out_data;
+ u32 set_data;
+ u32 clr_data;
+ u32 in_data;
+ u32 set_rising;
+ u32 clr_rising;
+ u32 set_falling;
+ u32 clr_falling;
+ u32 intstat;
};
-static struct davinci_gpio chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
+#define chip2controller(chip) \
+ container_of(chip, struct davinci_gpio_controller, chip)
+
+static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)];
+static void __iomem *gpio_base;
+
+static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio)
+{
+ void __iomem *ptr;
+
+ if (gpio < 32 * 1)
+ ptr = gpio_base + 0x10;
+ else if (gpio < 32 * 2)
+ ptr = gpio_base + 0x38;
+ else if (gpio < 32 * 3)
+ ptr = gpio_base + 0x60;
+ else if (gpio < 32 * 4)
+ ptr = gpio_base + 0x88;
+ else if (gpio < 32 * 5)
+ ptr = gpio_base + 0xb0;
+ else
+ ptr = NULL;
+ return ptr;
+}
-/* create a non-inlined version */
-static struct gpio_controller __iomem * __init gpio2controller(unsigned gpio)
+static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
{
- return __gpio_to_controller(gpio);
+ struct davinci_gpio_regs __iomem *g;
+
+ g = (__force struct davinci_gpio_regs __iomem *)get_irq_chip_data(irq);
+
+ return g;
}
static int __init davinci_gpio_irq_setup(void);
/*--------------------------------------------------------------------------*/
-/*
- * board setup code *MUST* set PINMUX0 and PINMUX1 as
- * needed, and enable the GPIO clock.
- */
-
-static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
+/* board setup code *MUST* setup pinmux and enable the GPIO clock. */
+static inline int __davinci_direction(struct gpio_chip *chip,
+ unsigned offset, bool out, int value)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
+ unsigned long flags;
u32 temp;
+ u32 mask = 1 << offset;
- spin_lock(&gpio_lock);
+ spin_lock_irqsave(&d->lock, flags);
temp = __raw_readl(&g->dir);
- temp |= (1 << offset);
+ if (out) {
+ temp &= ~mask;
+ __raw_writel(mask, value ? &g->set_data : &g->clr_data);
+ } else {
+ temp |= mask;
+ }
__raw_writel(temp, &g->dir);
- spin_unlock(&gpio_lock);
+ spin_unlock_irqrestore(&d->lock, flags);
return 0;
}
+static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ return __davinci_direction(chip, offset, false, 0);
+}
+
+static int
+davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ return __davinci_direction(chip, offset, true, value);
+}
+
/*
* Read the pin's value (works even if it's set up as output);
* returns zero/nonzero.
@@ -69,37 +115,20 @@ static int davinci_direction_in(struct gpio_chip *chip, unsigned offset)
*/
static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
return (1 << offset) & __raw_readl(&g->in_data);
}
-static int
-davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
- u32 temp;
- u32 mask = 1 << offset;
-
- spin_lock(&gpio_lock);
- temp = __raw_readl(&g->dir);
- temp &= ~mask;
- __raw_writel(mask, value ? &g->set_data : &g->clr_data);
- __raw_writel(temp, &g->dir);
- spin_unlock(&gpio_lock);
- return 0;
-}
-
/*
* Assuming the pin is muxed as a gpio output, set its output value.
*/
static void
davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
- struct gpio_controller *__iomem g = d->regs;
+ struct davinci_gpio_controller *d = chip2controller(chip);
+ struct davinci_gpio_regs __iomem *g = d->regs;
__raw_writel((1 << offset), value ? &g->set_data : &g->clr_data);
}
@@ -109,6 +138,10 @@ static int __init davinci_gpio_setup(void)
int i, base;
unsigned ngpio;
struct davinci_soc_info *soc_info = &davinci_soc_info;
+ struct davinci_gpio_regs *regs;
+
+ if (soc_info->gpio_type != GPIO_TYPE_DAVINCI)
+ return 0;
/*
* The gpio banks conceptually expose a segmented bitmap,
@@ -124,6 +157,10 @@ static int __init davinci_gpio_setup(void)
if (WARN_ON(DAVINCI_N_GPIO < ngpio))
ngpio = DAVINCI_N_GPIO;
+ gpio_base = ioremap(soc_info->gpio_base, SZ_4K);
+ if (WARN_ON(!gpio_base))
+ return -ENOMEM;
+
for (i = 0, base = 0; base < ngpio; i++, base += 32) {
chips[i].chip.label = "DaVinci";
@@ -137,11 +174,20 @@ static int __init davinci_gpio_setup(void)
if (chips[i].chip.ngpio > 32)
chips[i].chip.ngpio = 32;
- chips[i].regs = gpio2controller(base);
+ spin_lock_init(&chips[i].lock);
+
+ regs = gpio2regs(base);
+ chips[i].regs = regs;
+ chips[i].set_data = &regs->set_data;
+ chips[i].clr_data = &regs->clr_data;
+ chips[i].in_data = &regs->in_data;
gpiochip_add(&chips[i].chip);
}
+ soc_info->gpio_ctlrs = chips;
+ soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32);
+
davinci_gpio_irq_setup();
return 0;
}
@@ -161,7 +207,7 @@ pure_initcall(davinci_gpio_setup);
static void gpio_irq_disable(unsigned irq)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
__raw_writel(mask, &g->clr_falling);
@@ -170,7 +216,7 @@ static void gpio_irq_disable(unsigned irq)
static void gpio_irq_enable(unsigned irq)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
unsigned status = irq_desc[irq].status;
@@ -186,7 +232,7 @@ static void gpio_irq_enable(unsigned irq)
static int gpio_irq_type(unsigned irq, unsigned trigger)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
@@ -215,7 +261,7 @@ static struct irq_chip gpio_irqchip = {
static void
gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = 0xffff;
/* we only care about one bank */
@@ -253,7 +299,7 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset)
{
- struct davinci_gpio *d = container_of(chip, struct davinci_gpio, chip);
+ struct davinci_gpio_controller *d = chip2controller(chip);
if (d->irq_base >= 0)
return d->irq_base + offset;
@@ -276,7 +322,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
static int gpio_irq_type_unbanked(unsigned irq, unsigned trigger)
{
- struct gpio_controller *__iomem g = get_irq_chip_data(irq);
+ struct davinci_gpio_regs __iomem *g = irq2regs(irq);
u32 mask = (u32) get_irq_data(irq);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
@@ -305,7 +351,7 @@ static int __init davinci_gpio_irq_setup(void)
u32 binten = 0;
unsigned ngpio, bank_irq;
struct davinci_soc_info *soc_info = &davinci_soc_info;
- struct gpio_controller *__iomem g;
+ struct davinci_gpio_regs __iomem *g;
ngpio = soc_info->gpio_num;
@@ -354,7 +400,7 @@ static int __init davinci_gpio_irq_setup(void)
gpio_irqchip_unbanked.set_type = gpio_irq_type_unbanked;
/* default trigger: both edges */
- g = gpio2controller(0);
+ g = gpio2regs(0);
__raw_writel(~0, &g->set_falling);
__raw_writel(~0, &g->set_rising);
@@ -362,7 +408,7 @@ static int __init davinci_gpio_irq_setup(void)
for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
set_irq_chip(irq, &gpio_irqchip_unbanked);
set_irq_data(irq, (void *) __gpio_mask(gpio));
- set_irq_chip_data(irq, g);
+ set_irq_chip_data(irq, (__force void *) g);
irq_desc[irq].status |= IRQ_TYPE_EDGE_BOTH;
}
@@ -379,18 +425,18 @@ static int __init davinci_gpio_irq_setup(void)
unsigned i;
/* disabled by default, enabled only as needed */
- g = gpio2controller(gpio);
+ g = gpio2regs(gpio);
__raw_writel(~0, &g->clr_falling);
__raw_writel(~0, &g->clr_rising);
/* set up all irqs in this bank */
set_irq_chained_handler(bank_irq, gpio_irq_handler);
- set_irq_chip_data(bank_irq, g);
- set_irq_data(bank_irq, (void *)irq);
+ set_irq_chip_data(bank_irq, (__force void *) g);
+ set_irq_data(bank_irq, (void *) irq);
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
set_irq_chip(irq, &gpio_irqchip);
- set_irq_chip_data(irq, g);
+ set_irq_chip_data(irq, (__force void *) g);
set_irq_data(irq, (void *) __gpio_mask(gpio));
set_irq_handler(irq, handle_simple_irq);
set_irq_flags(irq, IRQF_VALID);
@@ -403,7 +449,7 @@ done:
/* BINTEN -- per-bank interrupt enable. genirq would also let these
* bits be set/cleared dynamically.
*/
- __raw_writel(binten, soc_info->gpio_base + 0x08);
+ __raw_writel(binten, gpio_base + 0x08);
printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0));
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 50a955f05ef9..a57cba21e21e 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -12,6 +12,9 @@
#ifndef __ARCH_ARM_MACH_DAVINCI_COMMON_H
#define __ARCH_ARM_MACH_DAVINCI_COMMON_H
+#include <linux/compiler.h>
+#include <linux/types.h>
+
struct sys_timer;
extern struct sys_timer davinci_timer;
@@ -21,7 +24,7 @@ extern void __iomem *davinci_intc_base;
extern int davinci_intc_type;
struct davinci_timer_instance {
- void __iomem *base;
+ u32 base;
u32 bottom_irq;
u32 top_irq;
unsigned long cmp_off;
@@ -34,39 +37,54 @@ struct davinci_timer_info {
unsigned int clocksource_id;
};
-/* SoC specific init support */
+struct davinci_gpio_controller;
+
+/*
+ * SoC info passed into common davinci modules.
+ *
+ * Base addresses in this structure should be physical and not virtual.
+ * Modules that take such base addresses, should internally ioremap() them to
+ * use.
+ */
struct davinci_soc_info {
struct map_desc *io_desc;
unsigned long io_desc_num;
u32 cpu_id;
u32 jtag_id;
- void __iomem *jtag_id_base;
+ u32 jtag_id_reg;
struct davinci_id *ids;
unsigned long ids_num;
struct clk_lookup *cpu_clks;
- void __iomem **psc_bases;
+ u32 *psc_bases;
unsigned long psc_bases_num;
- void __iomem *pinmux_base;
+ u32 pinmux_base;
const struct mux_config *pinmux_pins;
unsigned long pinmux_pins_num;
- void __iomem *intc_base;
+ u32 intc_base;
int intc_type;
u8 *intc_irq_prios;
unsigned long intc_irq_num;
+ u32 *intc_host_map;
struct davinci_timer_info *timer_info;
- void __iomem *gpio_base;
+ int gpio_type;
+ u32 gpio_base;
unsigned gpio_num;
unsigned gpio_irq;
unsigned gpio_unbanked;
+ struct davinci_gpio_controller *gpio_ctlrs;
+ int gpio_ctlrs_num;
struct platform_device *serial_dev;
struct emac_platform_data *emac_pdata;
dma_addr_t sram_dma;
unsigned sram_len;
+ struct platform_device *reset_device;
+ void (*reset)(struct platform_device *);
};
extern struct davinci_soc_info davinci_soc_info;
extern void davinci_common_init(struct davinci_soc_info *soc_info);
+extern void davinci_init_ide(void);
/* standard place to map on-chip SRAMs; they *may* support DMA */
#define SRAM_VIRT 0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/cp_intc.h b/arch/arm/mach-davinci/include/mach/cp_intc.h
index c4d27eec8064..4e8190eed673 100644
--- a/arch/arm/mach-davinci/include/mach/cp_intc.h
+++ b/arch/arm/mach-davinci/include/mach/cp_intc.h
@@ -51,7 +51,6 @@
#define CP_INTC_HOST_PRIO_VECTOR(n) (0x1600 + (n << 2))
#define CP_INTC_VECTOR_ADDR(n) (0x2000 + (n << 2))
-void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
- u8 *irq_prio);
+void __init cp_intc_init(void);
#endif /* __ASM_HARDWARE_CP_INTC_H */
diff --git a/arch/arm/mach-davinci/include/mach/cputype.h b/arch/arm/mach-davinci/include/mach/cputype.h
index 189b1ff13642..cea6b8972043 100644
--- a/arch/arm/mach-davinci/include/mach/cputype.h
+++ b/arch/arm/mach-davinci/include/mach/cputype.h
@@ -33,6 +33,7 @@ struct davinci_id {
#define DAVINCI_CPU_ID_DM365 0x03650000
#define DAVINCI_CPU_ID_DA830 0x08300000
#define DAVINCI_CPU_ID_DA850 0x08500000
+#define DAVINCI_CPU_ID_TNETV107X 0x0b8a0000
#define IS_DAVINCI_CPU(type, id) \
static inline int is_davinci_ ##type(void) \
@@ -46,6 +47,7 @@ IS_DAVINCI_CPU(dm355, DAVINCI_CPU_ID_DM355)
IS_DAVINCI_CPU(dm365, DAVINCI_CPU_ID_DM365)
IS_DAVINCI_CPU(da830, DAVINCI_CPU_ID_DA830)
IS_DAVINCI_CPU(da850, DAVINCI_CPU_ID_DA850)
+IS_DAVINCI_CPU(tnetv107x, DAVINCI_CPU_ID_TNETV107X)
#ifdef CONFIG_ARCH_DAVINCI_DM644x
#define cpu_is_davinci_dm644x() is_davinci_dm644x()
@@ -83,4 +85,10 @@ IS_DAVINCI_CPU(da850, DAVINCI_CPU_ID_DA850)
#define cpu_is_davinci_da850() 0
#endif
+#ifdef CONFIG_ARCH_DAVINCI_TNETV107X
+#define cpu_is_davinci_tnetv107x() is_davinci_tnetv107x()
+#else
+#define cpu_is_davinci_tnetv107x() 0
+#endif
+
#endif
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index 03acfd39042b..1b31a9aa8fba 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -64,27 +64,6 @@ extern void __iomem *da8xx_syscfg1_base;
#define DA8XX_DDR2_CTL_BASE 0xb0000000
#define DA8XX_ARM_RAM_BASE 0xffff0000
-#define PINMUX0 0x00
-#define PINMUX1 0x04
-#define PINMUX2 0x08
-#define PINMUX3 0x0c
-#define PINMUX4 0x10
-#define PINMUX5 0x14
-#define PINMUX6 0x18
-#define PINMUX7 0x1c
-#define PINMUX8 0x20
-#define PINMUX9 0x24
-#define PINMUX10 0x28
-#define PINMUX11 0x2c
-#define PINMUX12 0x30
-#define PINMUX13 0x34
-#define PINMUX14 0x38
-#define PINMUX15 0x3c
-#define PINMUX16 0x40
-#define PINMUX17 0x44
-#define PINMUX18 0x48
-#define PINMUX19 0x4c
-
void __init da830_init(void);
void __init da850_init(void);
@@ -108,6 +87,8 @@ extern struct emac_platform_data da8xx_emac_pdata;
extern struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata;
extern struct da8xx_lcdc_platform_data sharp_lk043t1dg01_pdata;
+extern struct platform_device da8xx_wdt_device;
+
extern const short da830_emif25_pins[];
extern const short da830_spi0_pins[];
extern const short da830_spi1_pins[];
@@ -146,10 +127,4 @@ extern const short da850_mmcsd0_pins[];
extern const short da850_nand_pins[];
extern const short da850_nor_pins[];
-#ifdef CONFIG_DAVINCI_MUX
-int da8xx_pinmux_setup(const short pins[]);
-#else
-static inline int da8xx_pinmux_setup(const short pins[]) { return 0; }
-#endif
-
#endif /* __ASM_ARCH_DAVINCI_DA8XX_H */
diff --git a/arch/arm/mach-davinci/include/mach/dm355.h b/arch/arm/mach-davinci/include/mach/dm355.h
index 85536d8e8336..36dff4a0ce3f 100644
--- a/arch/arm/mach-davinci/include/mach/dm355.h
+++ b/arch/arm/mach-davinci/include/mach/dm355.h
@@ -15,6 +15,9 @@
#include <mach/asp.h>
#include <media/davinci/vpfe_capture.h>
+#define DM355_ASYNC_EMIF_CONTROL_BASE 0x01E10000
+#define DM355_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
+
#define ASP1_TX_EVT_EN 1
#define ASP1_RX_EVT_EN 2
diff --git a/arch/arm/mach-davinci/include/mach/dm365.h b/arch/arm/mach-davinci/include/mach/dm365.h
index 3a37b5a6983c..ea5df3b49ec4 100644
--- a/arch/arm/mach-davinci/include/mach/dm365.h
+++ b/arch/arm/mach-davinci/include/mach/dm365.h
@@ -36,6 +36,10 @@
#define DAVINCI_DMA_VC_TX 2
#define DAVINCI_DMA_VC_RX 3
+#define DM365_ASYNC_EMIF_CONTROL_BASE 0x01D10000
+#define DM365_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
+#define DM365_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
+
void __init dm365_init(void);
void __init dm365_init_asp(struct snd_platform_data *pdata);
void __init dm365_init_vc(struct snd_platform_data *pdata);
diff --git a/arch/arm/mach-davinci/include/mach/dm644x.h b/arch/arm/mach-davinci/include/mach/dm644x.h
index 1a8b09ccc3c8..6fca568a0fd2 100644
--- a/arch/arm/mach-davinci/include/mach/dm644x.h
+++ b/arch/arm/mach-davinci/include/mach/dm644x.h
@@ -34,6 +34,12 @@
#define DM644X_EMAC_MDIO_OFFSET (0x4000)
#define DM644X_EMAC_CNTRL_RAM_SIZE (0x2000)
+#define DM644X_ASYNC_EMIF_CONTROL_BASE 0x01E00000
+#define DM644X_ASYNC_EMIF_DATA_CE0_BASE 0x02000000
+#define DM644X_ASYNC_EMIF_DATA_CE1_BASE 0x04000000
+#define DM644X_ASYNC_EMIF_DATA_CE2_BASE 0x06000000
+#define DM644X_ASYNC_EMIF_DATA_CE3_BASE 0x08000000
+
void __init dm644x_init(void);
void __init dm644x_init_asp(struct snd_platform_data *pdata);
void dm644x_set_vpfe_config(struct vpfe_config *cfg);
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
index 846da98b619a..add6f794a362 100644
--- a/arch/arm/mach-davinci/include/mach/dm646x.h
+++ b/arch/arm/mach-davinci/include/mach/dm646x.h
@@ -25,10 +25,10 @@
#define DM646X_EMAC_MDIO_OFFSET (0x4000)
#define DM646X_EMAC_CNTRL_RAM_SIZE (0x2000)
-#define DM646X_ATA_REG_BASE (0x01C66000)
+#define DM646X_ASYNC_EMIF_CONTROL_BASE 0x20008000
+#define DM646X_ASYNC_EMIF_CS2_SPACE_BASE 0x42000000
void __init dm646x_init(void);
-void __init dm646x_init_ide(void);
void __init dm646x_init_mcasp0(struct snd_platform_data *pdata);
void __init dm646x_init_mcasp1(struct snd_platform_data *pdata);
void __init dm646x_board_setup_refclk(struct clk *clk);
diff --git a/arch/arm/mach-davinci/include/mach/gpio.h b/arch/arm/mach-davinci/include/mach/gpio.h
index f3b8ef878158..504cc180a60b 100644
--- a/arch/arm/mach-davinci/include/mach/gpio.h
+++ b/arch/arm/mach-davinci/include/mach/gpio.h
@@ -14,6 +14,8 @@
#define __DAVINCI_GPIO_H
#include <linux/io.h>
+#include <linux/spinlock.h>
+
#include <asm-generic/gpio.h>
#include <mach/irqs.h>
@@ -21,6 +23,10 @@
#define DAVINCI_GPIO_BASE 0x01C67000
+enum davinci_gpio_type {
+ GPIO_TYPE_DAVINCI = 0,
+};
+
/*
* basic gpio routines
*
@@ -45,17 +51,14 @@
/* Convert GPIO signal to GPIO pin number */
#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-struct gpio_controller {
- u32 dir;
- u32 out_data;
- u32 set_data;
- u32 clr_data;
- u32 in_data;
- u32 set_rising;
- u32 clr_rising;
- u32 set_falling;
- u32 clr_falling;
- u32 intstat;
+struct davinci_gpio_controller {
+ struct gpio_chip chip;
+ int irq_base;
+ spinlock_t lock;
+ void __iomem *regs;
+ void __iomem *set_data;
+ void __iomem *clr_data;
+ void __iomem *in_data;
};
/* The __gpio_to_controller() and __gpio_mask() functions inline to constants
@@ -67,25 +70,16 @@ struct gpio_controller {
*
* These are NOT part of the cross-platform GPIO interface
*/
-static inline struct gpio_controller *__iomem
+static inline struct davinci_gpio_controller *
__gpio_to_controller(unsigned gpio)
{
- void *__iomem ptr;
- void __iomem *base = davinci_soc_info.gpio_base;
-
- if (gpio < 32 * 1)
- ptr = base + 0x10;
- else if (gpio < 32 * 2)
- ptr = base + 0x38;
- else if (gpio < 32 * 3)
- ptr = base + 0x60;
- else if (gpio < 32 * 4)
- ptr = base + 0x88;
- else if (gpio < 32 * 5)
- ptr = base + 0xb0;
- else
- ptr = NULL;
- return ptr;
+ struct davinci_gpio_controller *ctlrs = davinci_soc_info.gpio_ctlrs;
+ int index = gpio / 32;
+
+ if (!ctlrs || index >= davinci_soc_info.gpio_ctlrs_num)
+ return NULL;
+
+ return ctlrs + index;
}
static inline u32 __gpio_mask(unsigned gpio)
@@ -101,16 +95,16 @@ static inline u32 __gpio_mask(unsigned gpio)
*/
static inline void gpio_set_value(unsigned gpio, int value)
{
- if (__builtin_constant_p(value) && gpio < DAVINCI_N_GPIO) {
- struct gpio_controller *__iomem g;
- u32 mask;
+ if (__builtin_constant_p(value) && gpio < davinci_soc_info.gpio_num) {
+ struct davinci_gpio_controller *ctlr;
+ u32 mask;
- g = __gpio_to_controller(gpio);
+ ctlr = __gpio_to_controller(gpio);
mask = __gpio_mask(gpio);
if (value)
- __raw_writel(mask, &g->set_data);
+ __raw_writel(mask, ctlr->set_data);
else
- __raw_writel(mask, &g->clr_data);
+ __raw_writel(mask, ctlr->clr_data);
return;
}
@@ -128,18 +122,18 @@ static inline void gpio_set_value(unsigned gpio, int value)
*/
static inline int gpio_get_value(unsigned gpio)
{
- struct gpio_controller *__iomem g;
+ struct davinci_gpio_controller *ctlr;
- if (!__builtin_constant_p(gpio) || gpio >= DAVINCI_N_GPIO)
+ if (!__builtin_constant_p(gpio) || gpio >= davinci_soc_info.gpio_num)
return __gpio_get_value(gpio);
- g = __gpio_to_controller(gpio);
- return __gpio_mask(gpio) & __raw_readl(&g->in_data);
+ ctlr = __gpio_to_controller(gpio);
+ return __gpio_mask(gpio) & __raw_readl(ctlr->in_data);
}
static inline int gpio_cansleep(unsigned gpio)
{
- if (__builtin_constant_p(gpio) && gpio < DAVINCI_N_GPIO)
+ if (__builtin_constant_p(gpio) && gpio < davinci_soc_info.gpio_num)
return 0;
else
return __gpio_cansleep(gpio);
diff --git a/arch/arm/mach-davinci/include/mach/irqs.h b/arch/arm/mach-davinci/include/mach/irqs.h
index 354af71798dc..ec76c7775c2e 100644
--- a/arch/arm/mach-davinci/include/mach/irqs.h
+++ b/arch/arm/mach-davinci/include/mach/irqs.h
@@ -401,6 +401,103 @@
#define DA850_N_CP_INTC_IRQ 101
+
+/* TNETV107X specific interrupts */
+#define IRQ_TNETV107X_TDM1_TXDMA 0
+#define IRQ_TNETV107X_EXT_INT_0 1
+#define IRQ_TNETV107X_EXT_INT_1 2
+#define IRQ_TNETV107X_GPIO_INT12 3
+#define IRQ_TNETV107X_GPIO_INT13 4
+#define IRQ_TNETV107X_TIMER_0_TINT12 5
+#define IRQ_TNETV107X_TIMER_1_TINT12 6
+#define IRQ_TNETV107X_UART0 7
+#define IRQ_TNETV107X_TDM1_RXDMA 8
+#define IRQ_TNETV107X_MCDMA_INT0 9
+#define IRQ_TNETV107X_MCDMA_INT1 10
+#define IRQ_TNETV107X_TPCC 11
+#define IRQ_TNETV107X_TPCC_INT0 12
+#define IRQ_TNETV107X_TPCC_INT1 13
+#define IRQ_TNETV107X_TPCC_INT2 14
+#define IRQ_TNETV107X_TPCC_INT3 15
+#define IRQ_TNETV107X_TPTC0 16
+#define IRQ_TNETV107X_TPTC1 17
+#define IRQ_TNETV107X_TIMER_0_TINT34 18
+#define IRQ_TNETV107X_ETHSS 19
+#define IRQ_TNETV107X_TIMER_1_TINT34 20
+#define IRQ_TNETV107X_DSP2ARM_INT0 21
+#define IRQ_TNETV107X_DSP2ARM_INT1 22
+#define IRQ_TNETV107X_ARM_NPMUIRQ 23
+#define IRQ_TNETV107X_USB1 24
+#define IRQ_TNETV107X_VLYNQ 25
+#define IRQ_TNETV107X_UART0_DMATX 26
+#define IRQ_TNETV107X_UART0_DMARX 27
+#define IRQ_TNETV107X_TDM1_TXMCSP 28
+#define IRQ_TNETV107X_SSP 29
+#define IRQ_TNETV107X_MCDMA_INT2 30
+#define IRQ_TNETV107X_MCDMA_INT3 31
+#define IRQ_TNETV107X_TDM_CODECIF_EOT 32
+#define IRQ_TNETV107X_IMCOP_SQR_ARM 33
+#define IRQ_TNETV107X_USB0 34
+#define IRQ_TNETV107X_USB_CDMA 35
+#define IRQ_TNETV107X_LCD 36
+#define IRQ_TNETV107X_KEYPAD 37
+#define IRQ_TNETV107X_KEYPAD_FREE 38
+#define IRQ_TNETV107X_RNG 39
+#define IRQ_TNETV107X_PKA 40
+#define IRQ_TNETV107X_TDM0_TXDMA 41
+#define IRQ_TNETV107X_TDM0_RXDMA 42
+#define IRQ_TNETV107X_TDM0_TXMCSP 43
+#define IRQ_TNETV107X_TDM0_RXMCSP 44
+#define IRQ_TNETV107X_TDM1_RXMCSP 45
+#define IRQ_TNETV107X_SDIO1 46
+#define IRQ_TNETV107X_SDIO0 47
+#define IRQ_TNETV107X_TSC 48
+#define IRQ_TNETV107X_TS 49
+#define IRQ_TNETV107X_UART1 50
+#define IRQ_TNETV107X_MBX_LITE 51
+#define IRQ_TNETV107X_GPIO_INT00 52
+#define IRQ_TNETV107X_GPIO_INT01 53
+#define IRQ_TNETV107X_GPIO_INT02 54
+#define IRQ_TNETV107X_GPIO_INT03 55
+#define IRQ_TNETV107X_UART2 56
+#define IRQ_TNETV107X_UART2_DMATX 57
+#define IRQ_TNETV107X_UART2_DMARX 58
+#define IRQ_TNETV107X_IMCOP_IMX 59
+#define IRQ_TNETV107X_IMCOP_VLCD 60
+#define IRQ_TNETV107X_AES 61
+#define IRQ_TNETV107X_DES 62
+#define IRQ_TNETV107X_SHAMD5 63
+#define IRQ_TNETV107X_TPCC_ERR 68
+#define IRQ_TNETV107X_TPCC_PROT 69
+#define IRQ_TNETV107X_TPTC0_ERR 70
+#define IRQ_TNETV107X_TPTC1_ERR 71
+#define IRQ_TNETV107X_UART0_ERR 72
+#define IRQ_TNETV107X_UART1_ERR 73
+#define IRQ_TNETV107X_AEMIF_ERR 74
+#define IRQ_TNETV107X_DDR_ERR 75
+#define IRQ_TNETV107X_WDTARM_INT0 76
+#define IRQ_TNETV107X_MCDMA_ERR 77
+#define IRQ_TNETV107X_GPIO_ERR 78
+#define IRQ_TNETV107X_MPU_ADDR 79
+#define IRQ_TNETV107X_MPU_PROT 80
+#define IRQ_TNETV107X_IOPU_ADDR 81
+#define IRQ_TNETV107X_IOPU_PROT 82
+#define IRQ_TNETV107X_KEYPAD_ADDR_ERR 83
+#define IRQ_TNETV107X_WDT0_ADDR_ERR 84
+#define IRQ_TNETV107X_WDT1_ADDR_ERR 85
+#define IRQ_TNETV107X_CLKCTL_ADDR_ERR 86
+#define IRQ_TNETV107X_PLL_UNLOCK 87
+#define IRQ_TNETV107X_WDTDSP_INT0 88
+#define IRQ_TNETV107X_SEC_CTRL_VIOLATION 89
+#define IRQ_TNETV107X_KEY_MNG_VIOLATION 90
+#define IRQ_TNETV107X_PBIST_CPU 91
+#define IRQ_TNETV107X_WDTARM 92
+#define IRQ_TNETV107X_PSC 93
+#define IRQ_TNETV107X_MMC0 94
+#define IRQ_TNETV107X_MMC1 95
+
+#define TNETV107X_N_CP_INTC_IRQ 96
+
/* da850 currently has the most gpio pins (144) */
#define DAVINCI_N_GPIO 144
/* da850 currently has the most irqs so use DA850_N_CP_INTC_IRQ */
diff --git a/arch/arm/mach-davinci/include/mach/mmc.h b/arch/arm/mach-davinci/include/mach/mmc.h
index 5a85e24f3673..d4f1e9675069 100644
--- a/arch/arm/mach-davinci/include/mach/mmc.h
+++ b/arch/arm/mach-davinci/include/mach/mmc.h
@@ -22,6 +22,9 @@ struct davinci_mmc_config {
/* Version of the MMC/SD controller */
u8 version;
+
+ /* Number of sg segments */
+ u8 nr_sg;
};
void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
diff --git a/arch/arm/mach-davinci/include/mach/mux.h b/arch/arm/mach-davinci/include/mach/mux.h
index 2a68c1d8a24b..de11aac76a80 100644
--- a/arch/arm/mach-davinci/include/mach/mux.h
+++ b/arch/arm/mach-davinci/include/mach/mux.h
@@ -194,11 +194,14 @@ enum davinci_dm365_index {
DM365_I2C_SCL,
/* AEMIF */
- DM365_AEMIF_AR,
+ DM365_AEMIF_AR_A14,
+ DM365_AEMIF_AR_BA0,
DM365_AEMIF_A3,
DM365_AEMIF_A7,
DM365_AEMIF_D15_8,
DM365_AEMIF_CE0,
+ DM365_AEMIF_CE1,
+ DM365_AEMIF_WE_OE,
/* ASP0 function */
DM365_MCBSP0_BDX,
@@ -287,10 +290,19 @@ enum davinci_dm365_index {
DM365_SPI4_SDENA0,
DM365_SPI4_SDENA1,
+ /* Clock */
+ DM365_CLKOUT0,
+ DM365_CLKOUT1,
+ DM365_CLKOUT2,
+
/* GPIO */
DM365_GPIO20,
+ DM365_GPIO30,
+ DM365_GPIO31,
+ DM365_GPIO32,
DM365_GPIO33,
DM365_GPIO40,
+ DM365_GPIO64_57,
/* Video */
DM365_VOUT_FIELD,
@@ -904,12 +916,288 @@ enum davinci_da850_index {
DA850_RTC_ALARM,
};
+enum davinci_tnetv107x_index {
+ TNETV107X_ASR_A00,
+ TNETV107X_GPIO32,
+ TNETV107X_ASR_A01,
+ TNETV107X_GPIO33,
+ TNETV107X_ASR_A02,
+ TNETV107X_GPIO34,
+ TNETV107X_ASR_A03,
+ TNETV107X_GPIO35,
+ TNETV107X_ASR_A04,
+ TNETV107X_GPIO36,
+ TNETV107X_ASR_A05,
+ TNETV107X_GPIO37,
+ TNETV107X_ASR_A06,
+ TNETV107X_GPIO38,
+ TNETV107X_ASR_A07,
+ TNETV107X_GPIO39,
+ TNETV107X_ASR_A08,
+ TNETV107X_GPIO40,
+ TNETV107X_ASR_A09,
+ TNETV107X_GPIO41,
+ TNETV107X_ASR_A10,
+ TNETV107X_GPIO42,
+ TNETV107X_ASR_A11,
+ TNETV107X_BOOT_STRP_0,
+ TNETV107X_ASR_A12,
+ TNETV107X_BOOT_STRP_1,
+ TNETV107X_ASR_A13,
+ TNETV107X_GPIO43,
+ TNETV107X_ASR_A14,
+ TNETV107X_GPIO44,
+ TNETV107X_ASR_A15,
+ TNETV107X_GPIO45,
+ TNETV107X_ASR_A16,
+ TNETV107X_GPIO46,
+ TNETV107X_ASR_A17,
+ TNETV107X_GPIO47,
+ TNETV107X_ASR_A18,
+ TNETV107X_GPIO48,
+ TNETV107X_SDIO1_DATA3_0,
+ TNETV107X_ASR_A19,
+ TNETV107X_GPIO49,
+ TNETV107X_SDIO1_DATA2_0,
+ TNETV107X_ASR_A20,
+ TNETV107X_GPIO50,
+ TNETV107X_SDIO1_DATA1_0,
+ TNETV107X_ASR_A21,
+ TNETV107X_GPIO51,
+ TNETV107X_SDIO1_DATA0_0,
+ TNETV107X_ASR_A22,
+ TNETV107X_GPIO52,
+ TNETV107X_SDIO1_CMD_0,
+ TNETV107X_ASR_A23,
+ TNETV107X_GPIO53,
+ TNETV107X_SDIO1_CLK_0,
+ TNETV107X_ASR_BA_1,
+ TNETV107X_GPIO54,
+ TNETV107X_SYS_PLL_CLK,
+ TNETV107X_ASR_CS0,
+ TNETV107X_ASR_CS1,
+ TNETV107X_ASR_CS2,
+ TNETV107X_TDM_PLL_CLK,
+ TNETV107X_ASR_CS3,
+ TNETV107X_ETH_PHY_CLK,
+ TNETV107X_ASR_D00,
+ TNETV107X_GPIO55,
+ TNETV107X_ASR_D01,
+ TNETV107X_GPIO56,
+ TNETV107X_ASR_D02,
+ TNETV107X_GPIO57,
+ TNETV107X_ASR_D03,
+ TNETV107X_GPIO58,
+ TNETV107X_ASR_D04,
+ TNETV107X_GPIO59_0,
+ TNETV107X_ASR_D05,
+ TNETV107X_GPIO60_0,
+ TNETV107X_ASR_D06,
+ TNETV107X_GPIO61_0,
+ TNETV107X_ASR_D07,
+ TNETV107X_GPIO62_0,
+ TNETV107X_ASR_D08,
+ TNETV107X_GPIO63_0,
+ TNETV107X_ASR_D09,
+ TNETV107X_GPIO64_0,
+ TNETV107X_ASR_D10,
+ TNETV107X_SDIO1_DATA3_1,
+ TNETV107X_ASR_D11,
+ TNETV107X_SDIO1_DATA2_1,
+ TNETV107X_ASR_D12,
+ TNETV107X_SDIO1_DATA1_1,
+ TNETV107X_ASR_D13,
+ TNETV107X_SDIO1_DATA0_1,
+ TNETV107X_ASR_D14,
+ TNETV107X_SDIO1_CMD_1,
+ TNETV107X_ASR_D15,
+ TNETV107X_SDIO1_CLK_1,
+ TNETV107X_ASR_OE,
+ TNETV107X_BOOT_STRP_2,
+ TNETV107X_ASR_RNW,
+ TNETV107X_GPIO29_0,
+ TNETV107X_ASR_WAIT,
+ TNETV107X_GPIO30_0,
+ TNETV107X_ASR_WE,
+ TNETV107X_BOOT_STRP_3,
+ TNETV107X_ASR_WE_DQM0,
+ TNETV107X_GPIO31,
+ TNETV107X_LCD_PD17_0,
+ TNETV107X_ASR_WE_DQM1,
+ TNETV107X_ASR_BA0_0,
+ TNETV107X_VLYNQ_CLK,
+ TNETV107X_GPIO14,
+ TNETV107X_LCD_PD19_0,
+ TNETV107X_VLYNQ_RXD0,
+ TNETV107X_GPIO15,
+ TNETV107X_LCD_PD20_0,
+ TNETV107X_VLYNQ_RXD1,
+ TNETV107X_GPIO16,
+ TNETV107X_LCD_PD21_0,
+ TNETV107X_VLYNQ_TXD0,
+ TNETV107X_GPIO17,
+ TNETV107X_LCD_PD22_0,
+ TNETV107X_VLYNQ_TXD1,
+ TNETV107X_GPIO18,
+ TNETV107X_LCD_PD23_0,
+ TNETV107X_SDIO0_CLK,
+ TNETV107X_GPIO19,
+ TNETV107X_SDIO0_CMD,
+ TNETV107X_GPIO20,
+ TNETV107X_SDIO0_DATA0,
+ TNETV107X_GPIO21,
+ TNETV107X_SDIO0_DATA1,
+ TNETV107X_GPIO22,
+ TNETV107X_SDIO0_DATA2,
+ TNETV107X_GPIO23,
+ TNETV107X_SDIO0_DATA3,
+ TNETV107X_GPIO24,
+ TNETV107X_EMU0,
+ TNETV107X_EMU1,
+ TNETV107X_RTCK,
+ TNETV107X_TRST_N,
+ TNETV107X_TCK,
+ TNETV107X_TDI,
+ TNETV107X_TDO,
+ TNETV107X_TMS,
+ TNETV107X_TDM1_CLK,
+ TNETV107X_TDM1_RX,
+ TNETV107X_TDM1_TX,
+ TNETV107X_TDM1_FS,
+ TNETV107X_KEYPAD_R0,
+ TNETV107X_KEYPAD_R1,
+ TNETV107X_KEYPAD_R2,
+ TNETV107X_KEYPAD_R3,
+ TNETV107X_KEYPAD_R4,
+ TNETV107X_KEYPAD_R5,
+ TNETV107X_KEYPAD_R6,
+ TNETV107X_GPIO12,
+ TNETV107X_KEYPAD_R7,
+ TNETV107X_GPIO10,
+ TNETV107X_KEYPAD_C0,
+ TNETV107X_KEYPAD_C1,
+ TNETV107X_KEYPAD_C2,
+ TNETV107X_KEYPAD_C3,
+ TNETV107X_KEYPAD_C4,
+ TNETV107X_KEYPAD_C5,
+ TNETV107X_KEYPAD_C6,
+ TNETV107X_GPIO13,
+ TNETV107X_TEST_CLK_IN,
+ TNETV107X_KEYPAD_C7,
+ TNETV107X_GPIO11,
+ TNETV107X_SSP0_0,
+ TNETV107X_SCC_DCLK,
+ TNETV107X_LCD_PD20_1,
+ TNETV107X_SSP0_1,
+ TNETV107X_SCC_CS_N,
+ TNETV107X_LCD_PD21_1,
+ TNETV107X_SSP0_2,
+ TNETV107X_SCC_D,
+ TNETV107X_LCD_PD22_1,
+ TNETV107X_SSP0_3,
+ TNETV107X_SCC_RESETN,
+ TNETV107X_LCD_PD23_1,
+ TNETV107X_SSP1_0,
+ TNETV107X_GPIO25,
+ TNETV107X_UART2_CTS,
+ TNETV107X_SSP1_1,
+ TNETV107X_GPIO26,
+ TNETV107X_UART2_RD,
+ TNETV107X_SSP1_2,
+ TNETV107X_GPIO27,
+ TNETV107X_UART2_RTS,
+ TNETV107X_SSP1_3,
+ TNETV107X_GPIO28,
+ TNETV107X_UART2_TD,
+ TNETV107X_UART0_CTS,
+ TNETV107X_UART0_RD,
+ TNETV107X_UART0_RTS,
+ TNETV107X_UART0_TD,
+ TNETV107X_UART1_RD,
+ TNETV107X_UART1_TD,
+ TNETV107X_LCD_AC_NCS,
+ TNETV107X_LCD_HSYNC_RNW,
+ TNETV107X_LCD_VSYNC_A0,
+ TNETV107X_LCD_MCLK,
+ TNETV107X_LCD_PD16_0,
+ TNETV107X_LCD_PCLK_E,
+ TNETV107X_LCD_PD00,
+ TNETV107X_LCD_PD01,
+ TNETV107X_LCD_PD02,
+ TNETV107X_LCD_PD03,
+ TNETV107X_LCD_PD04,
+ TNETV107X_LCD_PD05,
+ TNETV107X_LCD_PD06,
+ TNETV107X_LCD_PD07,
+ TNETV107X_LCD_PD08,
+ TNETV107X_GPIO59_1,
+ TNETV107X_LCD_PD09,
+ TNETV107X_GPIO60_1,
+ TNETV107X_LCD_PD10,
+ TNETV107X_ASR_BA0_1,
+ TNETV107X_GPIO61_1,
+ TNETV107X_LCD_PD11,
+ TNETV107X_GPIO62_1,
+ TNETV107X_LCD_PD12,
+ TNETV107X_GPIO63_1,
+ TNETV107X_LCD_PD13,
+ TNETV107X_GPIO64_1,
+ TNETV107X_LCD_PD14,
+ TNETV107X_GPIO29_1,
+ TNETV107X_LCD_PD15,
+ TNETV107X_GPIO30_1,
+ TNETV107X_EINT0,
+ TNETV107X_GPIO08,
+ TNETV107X_EINT1,
+ TNETV107X_GPIO09,
+ TNETV107X_GPIO00,
+ TNETV107X_LCD_PD20_2,
+ TNETV107X_TDM_CLK_IN_2,
+ TNETV107X_GPIO01,
+ TNETV107X_LCD_PD21_2,
+ TNETV107X_24M_CLK_OUT_1,
+ TNETV107X_GPIO02,
+ TNETV107X_LCD_PD22_2,
+ TNETV107X_GPIO03,
+ TNETV107X_LCD_PD23_2,
+ TNETV107X_GPIO04,
+ TNETV107X_LCD_PD16_1,
+ TNETV107X_USB0_RXERR,
+ TNETV107X_GPIO05,
+ TNETV107X_LCD_PD17_1,
+ TNETV107X_TDM_CLK_IN_1,
+ TNETV107X_GPIO06,
+ TNETV107X_LCD_PD18,
+ TNETV107X_24M_CLK_OUT_2,
+ TNETV107X_GPIO07,
+ TNETV107X_LCD_PD19_1,
+ TNETV107X_USB1_RXERR,
+ TNETV107X_ETH_PLL_CLK,
+ TNETV107X_MDIO,
+ TNETV107X_MDC,
+ TNETV107X_AIC_MUTE_STAT_N,
+ TNETV107X_TDM0_CLK,
+ TNETV107X_AIC_HNS_EN_N,
+ TNETV107X_TDM0_FS,
+ TNETV107X_AIC_HDS_EN_STAT_N,
+ TNETV107X_TDM0_TX,
+ TNETV107X_AIC_HNF_EN_STAT_N,
+ TNETV107X_TDM0_RX,
+};
+
+#define PINMUX(x) (4 * (x))
+
#ifdef CONFIG_DAVINCI_MUX
/* setup pin muxing */
extern int davinci_cfg_reg(unsigned long reg_cfg);
+extern int davinci_cfg_reg_list(const short pins[]);
#else
/* boot loader does it all (no warnings from CONFIG_DAVINCI_MUX_WARNINGS) */
static inline int davinci_cfg_reg(unsigned long reg_cfg) { return 0; }
+static inline int davinci_cfg_reg_list(const short pins[])
+{
+ return 0;
+}
#endif
#endif /* __INC_MACH_MUX_H */
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index 651f6d8158fa..983da6e4554c 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -180,6 +180,53 @@
#define DA8XX_LPSC1_CR_P3_SS 26
#define DA8XX_LPSC1_L3_CBA_RAM 31
+/* TNETV107X LPSC Assignments */
+#define TNETV107X_LPSC_ARM 0
+#define TNETV107X_LPSC_GEM 1
+#define TNETV107X_LPSC_DDR2_PHY 2
+#define TNETV107X_LPSC_TPCC 3
+#define TNETV107X_LPSC_TPTC0 4
+#define TNETV107X_LPSC_TPTC1 5
+#define TNETV107X_LPSC_RAM 6
+#define TNETV107X_LPSC_MBX_LITE 7
+#define TNETV107X_LPSC_LCD 8
+#define TNETV107X_LPSC_ETHSS 9
+#define TNETV107X_LPSC_AEMIF 10
+#define TNETV107X_LPSC_CHIP_CFG 11
+#define TNETV107X_LPSC_TSC 12
+#define TNETV107X_LPSC_ROM 13
+#define TNETV107X_LPSC_UART2 14
+#define TNETV107X_LPSC_PKTSEC 15
+#define TNETV107X_LPSC_SECCTL 16
+#define TNETV107X_LPSC_KEYMGR 17
+#define TNETV107X_LPSC_KEYPAD 18
+#define TNETV107X_LPSC_GPIO 19
+#define TNETV107X_LPSC_MDIO 20
+#define TNETV107X_LPSC_SDIO0 21
+#define TNETV107X_LPSC_UART0 22
+#define TNETV107X_LPSC_UART1 23
+#define TNETV107X_LPSC_TIMER0 24
+#define TNETV107X_LPSC_TIMER1 25
+#define TNETV107X_LPSC_WDT_ARM 26
+#define TNETV107X_LPSC_WDT_DSP 27
+#define TNETV107X_LPSC_SSP 28
+#define TNETV107X_LPSC_TDM0 29
+#define TNETV107X_LPSC_VLYNQ 30
+#define TNETV107X_LPSC_MCDMA 31
+#define TNETV107X_LPSC_USB0 32
+#define TNETV107X_LPSC_TDM1 33
+#define TNETV107X_LPSC_DEBUGSS 34
+#define TNETV107X_LPSC_ETHSS_RGMII 35
+#define TNETV107X_LPSC_SYSTEM 36
+#define TNETV107X_LPSC_IMCOP 37
+#define TNETV107X_LPSC_SPARE 38
+#define TNETV107X_LPSC_SDIO1 39
+#define TNETV107X_LPSC_USB1 40
+#define TNETV107X_LPSC_USBSS 41
+#define TNETV107X_LPSC_DDR2_EMIF1_VRST 42
+#define TNETV107X_LPSC_DDR2_EMIF2_VCTL_RST 43
+#define TNETV107X_LPSC_MAX 44
+
/* PSC register offsets */
#define EPCPR 0x070
#define PTCMD 0x120
@@ -189,13 +236,19 @@
#define MDSTAT 0x800
#define MDCTL 0xA00
+/* PSC module states */
+#define PSC_STATE_SWRSTDISABLE 0
+#define PSC_STATE_SYNCRST 1
+#define PSC_STATE_DISABLE 2
+#define PSC_STATE_ENABLE 3
+
#define MDSTAT_STATE_MASK 0x1f
#ifndef __ASSEMBLER__
extern int davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id);
extern void davinci_psc_config(unsigned int domain, unsigned int ctlr,
- unsigned int id, char enable);
+ unsigned int id, u32 next_state);
#endif
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index a584697a9e70..f6c4f34909a2 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -13,7 +13,6 @@
#include <mach/hardware.h>
-#define DAVINCI_MAX_NR_UARTS 3
#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
diff --git a/arch/arm/mach-davinci/include/mach/system.h b/arch/arm/mach-davinci/include/mach/system.h
index 5a7d7581b8ce..e65629c20769 100644
--- a/arch/arm/mach-davinci/include/mach/system.h
+++ b/arch/arm/mach-davinci/include/mach/system.h
@@ -11,7 +11,7 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-extern void davinci_watchdog_reset(void);
+#include <mach/common.h>
static inline void arch_idle(void)
{
@@ -20,7 +20,8 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
- davinci_watchdog_reset();
+ if (davinci_soc_info.reset)
+ davinci_soc_info.reset(davinci_soc_info.reset_device);
}
#endif /* __ASM_ARCH_SYSTEM_H */
diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c
index a1c0b6b99edf..8ea60a8b2495 100644
--- a/arch/arm/mach-davinci/io.c
+++ b/arch/arm/mach-davinci/io.c
@@ -12,19 +12,29 @@
#include <linux/io.h>
#include <asm/tlb.h>
+#include <asm/mach/map.h>
-#define BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
-#define XLATE(p, pst, vst) ((void __iomem *)((p) - (pst) + (vst)))
+#include <mach/common.h>
/*
* Intercept ioremap() requests for addresses in our fixed mapping regions.
*/
void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
{
- if (BETWEEN(p, IO_PHYS, IO_SIZE))
- return XLATE(p, IO_PHYS, IO_VIRT);
+ struct map_desc *desc = davinci_soc_info.io_desc;
+ int desc_num = davinci_soc_info.io_desc_num;
+ int i;
- return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
+ for (i = 0; i < desc_num; i++, desc++) {
+ unsigned long iophys = __pfn_to_phys(desc->pfn);
+ unsigned long iosize = desc->length;
+
+ if (p >= iophys && (p + size) <= (iophys + iosize))
+ return __io(desc->virtual + p - iophys);
+ }
+
+ return __arm_ioremap_caller(p, size, type,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(davinci_ioremap);
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
index af92ffee8471..784ddf3c5ad4 100644
--- a/arch/arm/mach-davinci/irq.c
+++ b/arch/arm/mach-davinci/irq.c
@@ -116,6 +116,11 @@ void __init davinci_irq_init(void)
unsigned i;
const u8 *davinci_def_priorities = davinci_soc_info.intc_irq_prios;
+ davinci_intc_type = DAVINCI_INTC_TYPE_AINTC;
+ davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_4K);
+ if (WARN_ON(!davinci_intc_base))
+ return;
+
/* Clear all interrupt requests */
davinci_irq_writel(~0x0, FIQ_REG0_OFFSET);
davinci_irq_writel(~0x0, FIQ_REG1_OFFSET);
@@ -148,7 +153,7 @@ void __init davinci_irq_init(void)
}
/* set up genirq dispatch for ARM INTC */
- for (i = 0; i < DAVINCI_N_AINTC_IRQ; i++) {
+ for (i = 0; i < davinci_soc_info.intc_irq_num; i++) {
set_irq_chip(i, &davinci_irq_chip_0);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
if (i != IRQ_TINT1_TINT34)
diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c
index f757e83415f3..f34a8dcdae2b 100644
--- a/arch/arm/mach-davinci/mux.c
+++ b/arch/arm/mach-davinci/mux.c
@@ -22,6 +22,8 @@
#include <mach/mux.h>
#include <mach/common.h>
+static void __iomem *pinmux_base;
+
/*
* Sets the DAVINCI MUX register based on the table
*/
@@ -29,14 +31,19 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
{
static DEFINE_SPINLOCK(mux_spin_lock);
struct davinci_soc_info *soc_info = &davinci_soc_info;
- void __iomem *base = soc_info->pinmux_base;
unsigned long flags;
const struct mux_config *cfg;
unsigned int reg_orig = 0, reg = 0;
unsigned int mask, warn = 0;
- if (!soc_info->pinmux_pins)
- BUG();
+ if (WARN_ON(!soc_info->pinmux_pins))
+ return -ENODEV;
+
+ if (!pinmux_base) {
+ pinmux_base = ioremap(soc_info->pinmux_base, SZ_4K);
+ if (WARN_ON(!pinmux_base))
+ return -ENOMEM;
+ }
if (index >= soc_info->pinmux_pins_num) {
printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n",
@@ -57,7 +64,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
unsigned tmp1, tmp2;
spin_lock_irqsave(&mux_spin_lock, flags);
- reg_orig = __raw_readl(base + cfg->mux_reg);
+ reg_orig = __raw_readl(pinmux_base + cfg->mux_reg);
mask = (cfg->mask << cfg->mask_offset);
tmp1 = reg_orig & mask;
@@ -69,7 +76,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
if (tmp1 != tmp2)
warn = 1;
- __raw_writel(reg, base + cfg->mux_reg);
+ __raw_writel(reg, pinmux_base + cfg->mux_reg);
spin_unlock_irqrestore(&mux_spin_lock, flags);
}
@@ -91,7 +98,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
}
EXPORT_SYMBOL(davinci_cfg_reg);
-int da8xx_pinmux_setup(const short pins[])
+int __init_or_module davinci_cfg_reg_list(const short pins[])
{
int i, error = -EINVAL;
diff --git a/arch/arm/mach-davinci/mux.h b/arch/arm/mach-davinci/mux.h
index adc869413371..5aad1e7dd210 100644
--- a/arch/arm/mach-davinci/mux.h
+++ b/arch/arm/mach-davinci/mux.h
@@ -20,7 +20,7 @@
.name = #desc, \
.debug = dbg, \
.mux_reg_name = "PINMUX"#muxreg, \
- .mux_reg = PINMUX##muxreg, \
+ .mux_reg = PINMUX(muxreg), \
.mask_offset = mode_offset, \
.mask = mode_mask, \
.mode = mux_mode, \
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index adf6b5c7f1e5..1b15dbd0a77b 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -38,8 +38,9 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
return 0;
}
- psc_base = soc_info->psc_bases[ctlr];
+ psc_base = ioremap(soc_info->psc_bases[ctlr], SZ_4K);
mdstat = __raw_readl(psc_base + MDSTAT + 4 * id);
+ iounmap(psc_base);
/* if clocked, state can be "Enable" or "SyncReset" */
return mdstat & BIT(12);
@@ -47,12 +48,11 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
/* Enable or disable a PSC domain */
void davinci_psc_config(unsigned int domain, unsigned int ctlr,
- unsigned int id, char enable)
+ unsigned int id, u32 next_state)
{
u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
void __iomem *psc_base;
struct davinci_soc_info *soc_info = &davinci_soc_info;
- u32 next_state = enable ? 0x3 : 0x2; /* 0x3 enables, 0x2 disables */
if (!soc_info->psc_bases || (ctlr >= soc_info->psc_bases_num)) {
pr_warning("PSC: Bad psc data: 0x%x[%d]\n",
@@ -60,7 +60,7 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
return;
}
- psc_base = soc_info->psc_bases[ctlr];
+ psc_base = ioremap(soc_info->psc_bases[ctlr], SZ_4K);
mdctl = __raw_readl(psc_base + MDCTL + 4 * id);
mdctl &= ~MDSTAT_STATE_MASK;
@@ -100,4 +100,6 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
do {
mdstat = __raw_readl(psc_base + MDSTAT + 4 * id);
} while (!((mdstat & MDSTAT_STATE_MASK) == next_state));
+
+ iounmap(psc_base);
}
diff --git a/arch/arm/mach-davinci/serial.c b/arch/arm/mach-davinci/serial.c
index 7ce5ba086575..1875740fe27c 100644
--- a/arch/arm/mach-davinci/serial.c
+++ b/arch/arm/mach-davinci/serial.c
@@ -35,14 +35,20 @@ static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
int offset)
{
offset <<= up->regshift;
- return (unsigned int)__raw_readl(IO_ADDRESS(up->mapbase) + offset);
+
+ WARN_ONCE(!up->membase, "unmapped read: uart[%d]\n", offset);
+
+ return (unsigned int)__raw_readl(up->membase + offset);
}
static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
int value)
{
offset <<= p->regshift;
- __raw_writel(value, IO_ADDRESS(p->mapbase) + offset);
+
+ WARN_ONCE(!p->membase, "unmapped write: uart[%d]\n", offset);
+
+ __raw_writel(value, p->membase + offset);
}
static void __init davinci_serial_reset(struct plat_serial8250_port *p)
@@ -77,20 +83,32 @@ int __init davinci_serial_init(struct davinci_uart_config *info)
* Make sure the serial ports are muxed on at this point.
* You have to mux them off in device drivers later on if not needed.
*/
- for (i = 0; i < DAVINCI_MAX_NR_UARTS; i++, p++) {
+ for (i = 0; p->flags; i++, p++) {
if (!(info->enabled_uarts & (1 << i)))
continue;
sprintf(name, "uart%d", i);
uart_clk = clk_get(dev, name);
- if (IS_ERR(uart_clk))
+ if (IS_ERR(uart_clk)) {
printk(KERN_ERR "%s:%d: failed to get UART%d clock\n",
__func__, __LINE__, i);
- else {
- clk_enable(uart_clk);
- p->uartclk = clk_get_rate(uart_clk);
- davinci_serial_reset(p);
+ continue;
}
+
+ clk_enable(uart_clk);
+ p->uartclk = clk_get_rate(uart_clk);
+
+ if (!p->membase && p->mapbase) {
+ p->membase = ioremap(p->mapbase, SZ_4K);
+
+ if (p->membase)
+ p->flags &= ~UPF_IOREMAP;
+ else
+ pr_err("uart regs ioremap failed\n");
+ }
+
+ if (p->membase && p->type != PORT_AR7)
+ davinci_serial_reset(p);
}
return platform_device_register(soc_info->serial_dev);
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 9e0b106b4f5f..0f21c36e65dd 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -197,32 +197,36 @@ static void __init timer_init(void)
{
struct davinci_soc_info *soc_info = &davinci_soc_info;
struct davinci_timer_instance *dtip = soc_info->timer_info->timers;
+ void __iomem *base[2];
int i;
/* Global init of each 64-bit timer as a whole */
for(i=0; i<2; i++) {
u32 tgcr;
- void __iomem *base = dtip[i].base;
+
+ base[i] = ioremap(dtip[i].base, SZ_4K);
+ if (WARN_ON(!base[i]))
+ continue;
/* Disabled, Internal clock source */
- __raw_writel(0, base + TCR);
+ __raw_writel(0, base[i] + TCR);
/* reset both timers, no pre-scaler for timer34 */
tgcr = 0;
- __raw_writel(tgcr, base + TGCR);
+ __raw_writel(tgcr, base[i] + TGCR);
/* Set both timers to unchained 32-bit */
tgcr = TGCR_TIMMODE_32BIT_UNCHAINED << TGCR_TIMMODE_SHIFT;
- __raw_writel(tgcr, base + TGCR);
+ __raw_writel(tgcr, base[i] + TGCR);
/* Unreset timers */
tgcr |= (TGCR_UNRESET << TGCR_TIM12RS_SHIFT) |
(TGCR_UNRESET << TGCR_TIM34RS_SHIFT);
- __raw_writel(tgcr, base + TGCR);
+ __raw_writel(tgcr, base[i] + TGCR);
/* Init both counters to zero */
- __raw_writel(0, base + TIM12);
- __raw_writel(0, base + TIM34);
+ __raw_writel(0, base[i] + TIM12);
+ __raw_writel(0, base[i] + TIM34);
}
/* Init of each timer as a 32-bit timer */
@@ -231,7 +235,9 @@ static void __init timer_init(void)
int timer = ID_TO_TIMER(t->id);
u32 irq;
- t->base = dtip[timer].base;
+ t->base = base[timer];
+ if (!t->base)
+ continue;
if (IS_TIMER_BOT(t->id)) {
t->enamode_shift = 6;
@@ -361,13 +367,13 @@ static void __init davinci_timer_init(void)
}
}
- /* init timer hw */
- timer_init();
-
timer_clk = clk_get(NULL, "timer0");
BUG_ON(IS_ERR(timer_clk));
clk_enable(timer_clk);
+ /* init timer hw */
+ timer_init();
+
davinci_clock_tick_rate = clk_get_rate(timer_clk);
/* setup clocksource */
@@ -399,13 +405,16 @@ struct sys_timer davinci_timer = {
/* reset board using watchdog timer */
-void davinci_watchdog_reset(void)
+void davinci_watchdog_reset(struct platform_device *pdev)
{
u32 tgcr, wdtcr;
- struct platform_device *pdev = &davinci_wdt_device;
- void __iomem *base = IO_ADDRESS(pdev->resource[0].start);
+ void __iomem *base;
struct clk *wd_clk;
+ base = ioremap(pdev->resource[0].start, SZ_4K);
+ if (WARN_ON(!base))
+ return;
+
wd_clk = clk_get(&pdev->dev, NULL);
if (WARN_ON(IS_ERR(wd_clk)))
return;
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index caf6d5154aec..3a1a855bfdca 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -41,7 +41,7 @@ static struct platform_device adssphere_flash = {
.resource = &adssphere_flash_resource,
};
-static struct ep93xx_eth_data adssphere_eth_data = {
+static struct ep93xx_eth_data __initdata adssphere_eth_data = {
.phy_id = 1,
};
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 5f80092b6ace..e29bdef9b2e2 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -96,6 +96,10 @@ static struct clk clk_keypad = {
.enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
.set_rate = set_keytchclk_rate,
};
+static struct clk clk_spi = {
+ .parent = &clk_xtali,
+ .rate = EP93XX_EXT_CLK_RATE,
+};
static struct clk clk_pwm = {
.parent = &clk_xtali,
.rate = EP93XX_EXT_CLK_RATE,
@@ -186,6 +190,7 @@ static struct clk_lookup clocks[] = {
INIT_CK("ep93xx-ohci", NULL, &clk_usb_host),
INIT_CK("ep93xx-keypad", NULL, &clk_keypad),
INIT_CK("ep93xx-fb", NULL, &clk_video),
+ INIT_CK("ep93xx-spi.0", NULL, &clk_spi),
INIT_CK(NULL, "pwm_clk", &clk_pwm),
INIT_CK(NULL, "m2p0", &clk_m2p0),
INIT_CK(NULL, "m2p1", &clk_m2p1),
@@ -473,6 +478,14 @@ static int __init ep93xx_clock_init(void)
/* Initialize the pll2 derived clocks */
clk_usb_host.rate = clk_pll2.rate / (((value >> 28) & 0xf) + 1);
+ /*
+ * EP93xx SSP clock rate was doubled in version E2. For more information
+ * see:
+ * http://www.cirrus.com/en/pubs/appNote/AN273REV4.pdf
+ */
+ if (ep93xx_chip_revision() < EP93XX_CHIP_REV_E2)
+ clk_spi.rate /= 2;
+
pr_info("PLL1 running at %ld MHz, PLL2 at %ld MHz\n",
clk_pll1.rate / 1000000, clk_pll2.rate / 1000000);
pr_info("FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n",
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 90fb591cbffa..9092677f63eb 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -31,10 +31,12 @@
#include <linux/amba/serial.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
+#include <linux/spi/spi.h>
#include <mach/hardware.h>
#include <mach/fb.h>
#include <mach/ep93xx_keypad.h>
+#include <mach/ep93xx_spi.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@@ -222,6 +224,20 @@ void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits)
}
EXPORT_SYMBOL(ep93xx_devcfg_set_clear);
+/**
+ * ep93xx_chip_revision() - returns the EP93xx chip revision
+ *
+ * See <mach/platform.h> for more information.
+ */
+unsigned int ep93xx_chip_revision(void)
+{
+ unsigned int v;
+
+ v = __raw_readl(EP93XX_SYSCON_SYSCFG);
+ v &= EP93XX_SYSCON_SYSCFG_REV_MASK;
+ v >>= EP93XX_SYSCON_SYSCFG_REV_SHIFT;
+ return v;
+}
/*************************************************************************
* EP93xx peripheral handling
@@ -330,6 +346,10 @@ static struct platform_device ep93xx_ohci_device = {
.resource = ep93xx_ohci_resources,
};
+
+/*************************************************************************
+ * EP93xx ethernet peripheral handling
+ *************************************************************************/
static struct ep93xx_eth_data ep93xx_eth_data;
static struct resource ep93xx_eth_resource[] = {
@@ -354,6 +374,12 @@ static struct platform_device ep93xx_eth_device = {
.resource = ep93xx_eth_resource,
};
+/**
+ * ep93xx_register_eth - Register the built-in ethernet platform device.
+ * @data: platform specific ethernet configuration (__initdata)
+ * @copy_addr: flag indicating that the MAC address should be copied
+ * from the IndAd registers (as programmed by the bootloader)
+ */
void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
{
if (copy_addr)
@@ -370,11 +396,19 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
static struct i2c_gpio_platform_data ep93xx_i2c_data;
static struct platform_device ep93xx_i2c_device = {
- .name = "i2c-gpio",
- .id = 0,
- .dev.platform_data = &ep93xx_i2c_data,
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &ep93xx_i2c_data,
+ },
};
+/**
+ * ep93xx_register_i2c - Register the i2c platform device.
+ * @data: platform specific i2c-gpio configuration (__initdata)
+ * @devices: platform specific i2c bus device information (__initdata)
+ * @num: the number of devices on the i2c bus
+ */
void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
struct i2c_board_info *devices, int num)
{
@@ -398,17 +432,67 @@ void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
platform_device_register(&ep93xx_i2c_device);
}
+/*************************************************************************
+ * EP93xx SPI peripheral handling
+ *************************************************************************/
+static struct ep93xx_spi_info ep93xx_spi_master_data;
+
+static struct resource ep93xx_spi_resources[] = {
+ {
+ .start = EP93XX_SPI_PHYS_BASE,
+ .end = EP93XX_SPI_PHYS_BASE + 0x18 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_EP93XX_SSP,
+ .end = IRQ_EP93XX_SSP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ep93xx_spi_device = {
+ .name = "ep93xx-spi",
+ .id = 0,
+ .dev = {
+ .platform_data = &ep93xx_spi_master_data,
+ },
+ .num_resources = ARRAY_SIZE(ep93xx_spi_resources),
+ .resource = ep93xx_spi_resources,
+};
+
+/**
+ * ep93xx_register_spi() - registers spi platform device
+ * @info: ep93xx board specific spi master info (__initdata)
+ * @devices: SPI devices to register (__initdata)
+ * @num: number of SPI devices to register
+ *
+ * This function registers platform device for the EP93xx SPI controller and
+ * also makes sure that SPI pins are muxed so that I2S is not using those pins.
+ */
+void __init ep93xx_register_spi(struct ep93xx_spi_info *info,
+ struct spi_board_info *devices, int num)
+{
+ /*
+ * When SPI is used, we need to make sure that I2S is muxed off from
+ * SPI pins.
+ */
+ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_I2SONSSP);
+
+ ep93xx_spi_master_data = *info;
+ spi_register_board_info(devices, num);
+ platform_device_register(&ep93xx_spi_device);
+}
/*************************************************************************
* EP93xx LEDs
*************************************************************************/
static struct gpio_led ep93xx_led_pins[] = {
{
- .name = "platform:grled",
- .gpio = EP93XX_GPIO_LINE_GRLED,
+ .name = "platform:grled",
+ .gpio = EP93XX_GPIO_LINE_GRLED,
}, {
- .name = "platform:rdled",
- .gpio = EP93XX_GPIO_LINE_RDLED,
+ .name = "platform:rdled",
+ .gpio = EP93XX_GPIO_LINE_RDLED,
},
};
@@ -528,7 +612,7 @@ static struct platform_device ep93xx_fb_device = {
.name = "ep93xx-fb",
.id = -1,
.dev = {
- .platform_data = &ep93xxfb_data,
+ .platform_data = &ep93xxfb_data,
.coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &ep93xx_fb_device.dev.coherent_dma_mask,
},
@@ -536,6 +620,10 @@ static struct platform_device ep93xx_fb_device = {
.resource = ep93xx_fb_resource,
};
+/**
+ * ep93xx_register_fb - Register the framebuffer platform device.
+ * @data: platform specific framebuffer configuration (__initdata)
+ */
void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data)
{
ep93xxfb_data = *data;
@@ -546,6 +634,8 @@ void __init ep93xx_register_fb(struct ep93xxfb_mach_info *data)
/*************************************************************************
* EP93xx matrix keypad peripheral handling
*************************************************************************/
+static struct ep93xx_keypad_platform_data ep93xx_keypad_data;
+
static struct resource ep93xx_keypad_resource[] = {
{
.start = EP93XX_KEY_MATRIX_PHYS_BASE,
@@ -559,15 +649,22 @@ static struct resource ep93xx_keypad_resource[] = {
};
static struct platform_device ep93xx_keypad_device = {
- .name = "ep93xx-keypad",
- .id = -1,
- .num_resources = ARRAY_SIZE(ep93xx_keypad_resource),
- .resource = ep93xx_keypad_resource,
+ .name = "ep93xx-keypad",
+ .id = -1,
+ .dev = {
+ .platform_data = &ep93xx_keypad_data,
+ },
+ .num_resources = ARRAY_SIZE(ep93xx_keypad_resource),
+ .resource = ep93xx_keypad_resource,
};
+/**
+ * ep93xx_register_keypad - Register the keypad platform device.
+ * @data: platform specific keypad configuration (__initdata)
+ */
void __init ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data)
{
- ep93xx_keypad_device.dev.platform_data = data;
+ ep93xx_keypad_data = *data;
platform_device_register(&ep93xx_keypad_device);
}
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index d22d67ac8b99..3884182cd362 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -74,7 +74,7 @@ static void __init edb93xx_register_flash(void)
}
}
-static struct ep93xx_eth_data edb93xx_eth_data = {
+static struct ep93xx_eth_data __initdata edb93xx_eth_data = {
.phy_id = 1,
};
@@ -82,7 +82,7 @@ static struct ep93xx_eth_data edb93xx_eth_data = {
/*************************************************************************
* EDB93xx i2c peripheral handling
*************************************************************************/
-static struct i2c_gpio_platform_data edb93xx_i2c_gpio_data = {
+static struct i2c_gpio_platform_data __initdata edb93xx_i2c_gpio_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index 3da7ca816d19..a809618e9f05 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -41,7 +41,7 @@ static struct platform_device gesbc9312_flash = {
.resource = &gesbc9312_flash_resource,
};
-static struct ep93xx_eth_data gesbc9312_eth_data = {
+static struct ep93xx_eth_data __initdata gesbc9312_eth_data = {
.phy_id = 1,
};
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index 93e2ecc79ceb..b1e096f0c2d2 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -106,6 +106,7 @@
#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000)
+#define EP93XX_SPI_PHYS_BASE EP93XX_APB_PHYS(0x000a0000)
#define EP93XX_SPI_BASE EP93XX_APB_IOMEM(0x000a0000)
#define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000)
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
new file mode 100644
index 000000000000..0a37961b3453
--- /dev/null
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_MACH_EP93XX_SPI_H
+#define __ASM_MACH_EP93XX_SPI_H
+
+struct spi_device;
+
+/**
+ * struct ep93xx_spi_info - EP93xx specific SPI descriptor
+ * @num_chipselect: number of chip selects on this board, must be
+ * at least one
+ */
+struct ep93xx_spi_info {
+ int num_chipselect;
+};
+
+/**
+ * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device
+ * @setup: setup the chip select mechanism
+ * @cleanup: cleanup the chip select mechanism
+ * @cs_control: control the device chip select
+ */
+struct ep93xx_spi_chip_ops {
+ int (*setup)(struct spi_device *spi);
+ void (*cleanup)(struct spi_device *spi);
+ void (*cs_control)(struct spi_device *spi, int value);
+};
+
+#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index c6dc14dbca18..9a4413dd44bb 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -6,9 +6,11 @@
struct i2c_gpio_platform_data;
struct i2c_board_info;
+struct spi_board_info;
struct platform_device;
struct ep93xxfb_mach_info;
struct ep93xx_keypad_platform_data;
+struct ep93xx_spi_info;
struct ep93xx_eth_data
{
@@ -33,9 +35,19 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
ep93xx_devcfg_set_clear(0x00, bits);
}
+#define EP93XX_CHIP_REV_D0 3
+#define EP93XX_CHIP_REV_D1 4
+#define EP93XX_CHIP_REV_E0 5
+#define EP93XX_CHIP_REV_E1 6
+#define EP93XX_CHIP_REV_E2 7
+
+unsigned int ep93xx_chip_revision(void);
+
void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
void ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
struct i2c_board_info *devices, int num);
+void ep93xx_register_spi(struct ep93xx_spi_info *info,
+ struct spi_board_info *devices, int num);
void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
void ep93xx_register_pwm(int pwm0, int pwm1);
int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
diff --git a/arch/arm/mach-ep93xx/include/mach/ts72xx.h b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
index 93107d88ff3a..0eabec62cd9d 100644
--- a/arch/arm/mach-ep93xx/include/mach/ts72xx.h
+++ b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
@@ -9,9 +9,6 @@
* febff000 22000000 4K model number register
* febfe000 22400000 4K options register
* febfd000 22800000 4K options register #2
- * febfc000 [67]0000000 4K NAND data register
- * febfb000 [67]0400000 4K NAND control register
- * febfa000 [67]0800000 4K NAND busy register
* febf9000 10800000 4K TS-5620 RTC index register
* febf8000 11700000 4K TS-5620 RTC data register
*/
@@ -41,22 +38,6 @@
#define TS72XX_OPTIONS2_TS9420_BOOT 0x02
-#define TS72XX_NAND1_DATA_PHYS_BASE 0x60000000
-#define TS72XX_NAND2_DATA_PHYS_BASE 0x70000000
-#define TS72XX_NAND_DATA_VIRT_BASE 0xfebfc000
-#define TS72XX_NAND_DATA_SIZE 0x00001000
-
-#define TS72XX_NAND1_CONTROL_PHYS_BASE 0x60400000
-#define TS72XX_NAND2_CONTROL_PHYS_BASE 0x70400000
-#define TS72XX_NAND_CONTROL_VIRT_BASE 0xfebfb000
-#define TS72XX_NAND_CONTROL_SIZE 0x00001000
-
-#define TS72XX_NAND1_BUSY_PHYS_BASE 0x60800000
-#define TS72XX_NAND2_BUSY_PHYS_BASE 0x70800000
-#define TS72XX_NAND_BUSY_VIRT_BASE 0xfebfa000
-#define TS72XX_NAND_BUSY_SIZE 0x00001000
-
-
#define TS72XX_RTC_INDEX_VIRT_BASE 0xfebf9000
#define TS72XX_RTC_INDEX_PHYS_BASE 0x10800000
#define TS72XX_RTC_INDEX_SIZE 0x00001000
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index c33360e82868..1cc911b4efa6 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -80,7 +80,7 @@ static void __init micro9_register_flash(void)
/*************************************************************************
* Micro9 Ethernet
*************************************************************************/
-static struct ep93xx_eth_data micro9_eth_data = {
+static struct ep93xx_eth_data __initdata micro9_eth_data = {
.phy_id = 0x1f,
};
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index cd93990f1b99..388aec95f60e 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -49,17 +49,17 @@ static struct platform_device simone_flash = {
},
};
-static struct ep93xx_eth_data simone_eth_data = {
+static struct ep93xx_eth_data __initdata simone_eth_data = {
.phy_id = 1,
};
-static struct ep93xxfb_mach_info simone_fb_info = {
+static struct ep93xxfb_mach_info __initdata simone_fb_info = {
.num_modes = EP93XXFB_USE_MODEDB,
.bpp = 16,
.flags = EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING,
};
-static struct i2c_gpio_platform_data simone_i2c_gpio_data = {
+static struct i2c_gpio_platform_data __initdata simone_i2c_gpio_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 51134b0382ca..38deaee40397 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -125,11 +125,11 @@ static struct platform_device snappercl15_nand_device = {
.num_resources = ARRAY_SIZE(snappercl15_nand_resource),
};
-static struct ep93xx_eth_data snappercl15_eth_data = {
+static struct ep93xx_eth_data __initdata snappercl15_eth_data = {
.phy_id = 1,
};
-static struct i2c_gpio_platform_data snappercl15_i2c_gpio_data = {
+static struct i2c_gpio_platform_data __initdata snappercl15_i2c_gpio_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
@@ -145,7 +145,7 @@ static struct i2c_board_info __initdata snappercl15_i2c_data[] = {
},
};
-static struct ep93xxfb_mach_info snappercl15_fb_info = {
+static struct ep93xxfb_mach_info __initdata snappercl15_fb_info = {
.num_modes = EP93XXFB_USE_MODEDB,
.bpp = 16,
};
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index fac1ec7a60fb..ae7319e588c7 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -10,12 +10,16 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/m48t86.h>
#include <linux/mtd/physmap.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
#include <mach/hardware.h>
#include <mach/ts72xx.h>
@@ -54,92 +58,162 @@ static struct map_desc ts72xx_io_desc[] __initdata = {
}
};
-static struct map_desc ts72xx_nand_io_desc[] __initdata = {
- {
- .virtual = TS72XX_NAND_DATA_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_DATA_PHYS_BASE),
- .length = TS72XX_NAND_DATA_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_CONTROL_PHYS_BASE),
- .length = TS72XX_NAND_CONTROL_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = TS72XX_NAND_BUSY_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_BUSY_PHYS_BASE),
- .length = TS72XX_NAND_BUSY_SIZE,
- .type = MT_DEVICE,
+static void __init ts72xx_map_io(void)
+{
+ ep93xx_map_io();
+ iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
+}
+
+
+/*************************************************************************
+ * NAND flash
+ *************************************************************************/
+#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */
+#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */
+
+static void ts72xx_nand_hwcontrol(struct mtd_info *mtd,
+ int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ void __iomem *addr = chip->IO_ADDR_R;
+ unsigned char bits;
+
+ addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE);
+
+ bits = __raw_readb(addr) & ~0x07;
+ bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */
+ bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */
+ bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */
+
+ __raw_writeb(bits, addr);
}
-};
-static struct map_desc ts72xx_alternate_nand_io_desc[] __initdata = {
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, chip->IO_ADDR_W);
+}
+
+static int ts72xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *addr = chip->IO_ADDR_R;
+
+ addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE);
+
+ return !!(__raw_readb(addr) & 0x20);
+}
+
+static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
+
+#define TS72XX_BOOTROM_PART_SIZE (SZ_16K)
+#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M)
+
+static struct mtd_partition ts72xx_nand_parts[] = {
{
- .virtual = TS72XX_NAND_DATA_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_DATA_PHYS_BASE),
- .length = TS72XX_NAND_DATA_SIZE,
- .type = MT_DEVICE,
+ .name = "TS-BOOTROM",
+ .offset = 0,
+ .size = TS72XX_BOOTROM_PART_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
- .virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_CONTROL_PHYS_BASE),
- .length = TS72XX_NAND_CONTROL_SIZE,
- .type = MT_DEVICE,
+ .name = "Linux",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0, /* filled in later */
}, {
- .virtual = TS72XX_NAND_BUSY_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_BUSY_PHYS_BASE),
- .length = TS72XX_NAND_BUSY_SIZE,
- .type = MT_DEVICE,
- }
+ .name = "RedBoot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
};
-static void __init ts72xx_map_io(void)
+static void ts72xx_nand_set_parts(uint64_t size,
+ struct platform_nand_chip *chip)
{
- ep93xx_map_io();
- iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
+ /* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */
+ if (size == SZ_32M || size == SZ_128M) {
+ /* Set the "Linux" partition size */
+ ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE;
- /*
- * The TS-7200 has NOR flash, the other models have NAND flash.
- */
- if (!board_is_ts7200()) {
- if (is_ts9420_installed()) {
- iotable_init(ts72xx_alternate_nand_io_desc,
- ARRAY_SIZE(ts72xx_alternate_nand_io_desc));
- } else {
- iotable_init(ts72xx_nand_io_desc,
- ARRAY_SIZE(ts72xx_nand_io_desc));
- }
+ chip->partitions = ts72xx_nand_parts;
+ chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts);
+ } else {
+ pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20);
}
}
+static struct platform_nand_data ts72xx_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .chip_delay = 15,
+ .part_probe_types = ts72xx_nand_part_probes,
+ .set_parts = ts72xx_nand_set_parts,
+ },
+ .ctrl = {
+ .cmd_ctrl = ts72xx_nand_hwcontrol,
+ .dev_ready = ts72xx_nand_device_ready,
+ },
+};
+
+static struct resource ts72xx_nand_resource[] = {
+ {
+ .start = 0, /* filled in later */
+ .end = 0, /* filled in later */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ts72xx_nand_flash = {
+ .name = "gen_nand",
+ .id = -1,
+ .dev.platform_data = &ts72xx_nand_data,
+ .resource = ts72xx_nand_resource,
+ .num_resources = ARRAY_SIZE(ts72xx_nand_resource),
+};
+
+
/*************************************************************************
* NOR flash (TS-7200 only)
*************************************************************************/
-static struct physmap_flash_data ts72xx_flash_data = {
+static struct physmap_flash_data ts72xx_nor_data = {
.width = 2,
};
-static struct resource ts72xx_flash_resource = {
+static struct resource ts72xx_nor_resource = {
.start = EP93XX_CS6_PHYS_BASE,
.end = EP93XX_CS6_PHYS_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
};
-static struct platform_device ts72xx_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &ts72xx_flash_data,
- },
- .num_resources = 1,
- .resource = &ts72xx_flash_resource,
+static struct platform_device ts72xx_nor_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev.platform_data = &ts72xx_nor_data,
+ .resource = &ts72xx_nor_resource,
+ .num_resources = 1,
};
static void __init ts72xx_register_flash(void)
{
- if (board_is_ts7200())
- platform_device_register(&ts72xx_flash);
+ if (board_is_ts7200()) {
+ platform_device_register(&ts72xx_nor_flash);
+ } else {
+ resource_size_t start;
+
+ if (is_ts9420_installed())
+ start = EP93XX_CS7_PHYS_BASE;
+ else
+ start = EP93XX_CS6_PHYS_BASE;
+
+ ts72xx_nand_resource[0].start = start;
+ ts72xx_nand_resource[0].end = start + SZ_16M - 1;
+
+ platform_device_register(&ts72xx_nand_flash);
+ }
}
+
static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
@@ -186,7 +260,7 @@ static struct platform_device ts72xx_wdt_device = {
.resource = ts72xx_wdt_resources,
};
-static struct ep93xx_eth_data ts72xx_eth_data = {
+static struct ep93xx_eth_data __initdata ts72xx_eth_data = {
.phy_id = 1,
};
diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c
index 720c0bac1702..e5ab5bddbc8c 100644
--- a/arch/arm/mach-footbridge/ebsa285-pci.c
+++ b/arch/arm/mach-footbridge/ebsa285-pci.c
@@ -20,9 +20,9 @@ static int __init ebsa285_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
switch (PCI_FUNC(dev->devfn)) {
- case 1: return 14;
- case 2: return 15;
- case 3: return 12;
+ case 1: return 14;
+ case 2: return 15;
+ case 3: return 12;
}
return irqmap_ebsa285[(slot + pin) & 3];
diff --git a/arch/arm/mach-h720x/common.h b/arch/arm/mach-h720x/common.h
index d8798dbc44f8..7dd5fa604efc 100644
--- a/arch/arm/mach-h720x/common.h
+++ b/arch/arm/mach-h720x/common.h
@@ -14,13 +14,13 @@
*/
extern unsigned long h720x_gettimeoffset(void);
-extern void __init h720x_init_irq (void);
+extern void __init h720x_init_irq(void);
extern void __init h720x_map_io(void);
#ifdef CONFIG_ARCH_H7202
extern struct sys_timer h7202_timer;
extern void __init init_hw_h7202(void);
-extern void __init h7202_init_irq (void);
+extern void __init h7202_init_irq(void);
extern void __init h7202_init_time(void);
#endif
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index df97d16390e3..27db275b367c 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -11,6 +11,7 @@ config ARCH_INTEGRATOR_AP
config ARCH_INTEGRATOR_CP
bool "Support Integrator/CP platform"
select ARCH_CINTEGRATOR
+ select ARM_TIMER_SP804
help
Include support for the ARM(R) Integrator CP platform.
diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile
index 6a5ef8d30b10..ebeef966e1f5 100644
--- a/arch/arm/mach-integrator/Makefile
+++ b/arch/arm/mach-integrator/Makefile
@@ -4,7 +4,7 @@
# Object file lists.
-obj-y := clock.o core.o lm.o
+obj-y := core.o lm.o
obj-$(CONFIG_ARCH_INTEGRATOR_AP) += integrator_ap.o
obj-$(CONFIG_ARCH_INTEGRATOR_CP) += integrator_cp.o
diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h
deleted file mode 100644
index 609c49de3d47..000000000000
--- a/arch/arm/mach-integrator/common.h
+++ /dev/null
@@ -1,2 +0,0 @@
-extern void integrator_time_init(unsigned long, unsigned int);
-extern unsigned long integrator_gettimeoffset(void);
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 8b390e36ba69..b02cfc06e0ae 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -24,15 +24,13 @@
#include <asm/clkdev.h>
#include <mach/clkdev.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/irq.h>
-#include <asm/hardware/arm_timer.h>
#include <mach/cm.h>
#include <asm/system.h>
#include <asm/leds.h>
#include <asm/mach/time.h>
-#include "common.h"
-
static struct amba_pl010_data integrator_uart_data;
static struct amba_device rtc_device = {
@@ -163,8 +161,8 @@ arch_initcall(integrator_init);
* UART0 7 6
* UART1 5 4
*/
-#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET)
-#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET)
+#define SC_CTRLC IO_ADDRESS(INTEGRATOR_SC_CTRLC)
+#define SC_CTRLS IO_ADDRESS(INTEGRATOR_SC_CTRLS)
static void integrator_uart_set_mctrl(struct amba_device *dev, void __iomem *base, unsigned int mctrl)
{
@@ -196,7 +194,7 @@ static struct amba_pl010_data integrator_uart_data = {
.set_mctrl = integrator_uart_set_mctrl,
};
-#define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_CTRL_OFFSET
+#define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
static DEFINE_SPINLOCK(cm_lock);
@@ -217,120 +215,3 @@ void cm_control(u32 mask, u32 set)
}
EXPORT_SYMBOL(cm_control);
-
-/*
- * Where is the timer (VA)?
- */
-#define TIMER0_VA_BASE (IO_ADDRESS(INTEGRATOR_CT_BASE)+0x00000000)
-#define TIMER1_VA_BASE (IO_ADDRESS(INTEGRATOR_CT_BASE)+0x00000100)
-#define TIMER2_VA_BASE (IO_ADDRESS(INTEGRATOR_CT_BASE)+0x00000200)
-#define VA_IC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
-
-/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
-static unsigned long timer_reload;
-
-/*
- * Returns number of ms since last clock interrupt. Note that interrupts
- * will have been disabled by do_gettimeoffset()
- */
-unsigned long integrator_gettimeoffset(void)
-{
- unsigned long ticks1, ticks2, status;
-
- /*
- * Get the current number of ticks. Note that there is a race
- * condition between us reading the timer and checking for
- * an interrupt. We get around this by ensuring that the
- * counter has not reloaded between our two reads.
- */
- ticks2 = readl(TIMER1_VA_BASE + TIMER_VALUE) & 0xffff;
- do {
- ticks1 = ticks2;
- status = __raw_readl(VA_IC_BASE + IRQ_RAW_STATUS);
- ticks2 = readl(TIMER1_VA_BASE + TIMER_VALUE) & 0xffff;
- } while (ticks2 > ticks1);
-
- /*
- * Number of ticks since last interrupt.
- */
- ticks1 = timer_reload - ticks2;
-
- /*
- * Interrupt pending? If so, we've reloaded once already.
- */
- if (status & (1 << IRQ_TIMERINT1))
- ticks1 += timer_reload;
-
- /*
- * Convert the ticks to usecs
- */
- return TICKS2USECS(ticks1);
-}
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t
-integrator_timer_interrupt(int irq, void *dev_id)
-{
- /*
- * clear the interrupt
- */
- writel(1, TIMER1_VA_BASE + TIMER_INTCLR);
-
- timer_tick();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction integrator_timer_irq = {
- .name = "Integrator Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = integrator_timer_interrupt,
-};
-
-/*
- * Set up timer interrupt, and return the current time in seconds.
- */
-void __init integrator_time_init(unsigned long reload, unsigned int ctrl)
-{
- unsigned int timer_ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
-
- timer_reload = reload;
- timer_ctrl |= ctrl;
-
- if (timer_reload > 0x100000) {
- timer_reload >>= 8;
- timer_ctrl |= TIMER_CTRL_DIV256;
- } else if (timer_reload > 0x010000) {
- timer_reload >>= 4;
- timer_ctrl |= TIMER_CTRL_DIV16;
- }
-
- /*
- * Initialise to a known state (all timers off)
- */
- writel(0, TIMER0_VA_BASE + TIMER_CTRL);
- writel(0, TIMER1_VA_BASE + TIMER_CTRL);
- writel(0, TIMER2_VA_BASE + TIMER_CTRL);
-
- writel(timer_reload, TIMER1_VA_BASE + TIMER_LOAD);
- writel(timer_reload, TIMER1_VA_BASE + TIMER_VALUE);
- writel(timer_ctrl, TIMER1_VA_BASE + TIMER_CTRL);
-
- /*
- * Make irqs happen for the system timer
- */
- setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);
-}
diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c
index f77f20255045..a3fbcb3adc29 100644
--- a/arch/arm/mach-integrator/cpu.c
+++ b/arch/arm/mach-integrator/cpu.c
@@ -19,32 +19,39 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/mach-types.h>
-#include <asm/hardware/icst525.h>
+#include <asm/hardware/icst.h>
static struct cpufreq_driver integrator_driver;
-#define CM_ID (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_ID_OFFSET)
-#define CM_OSC (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_OSC_OFFSET)
-#define CM_STAT (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_STAT_OFFSET)
-#define CM_LOCK (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
+#define CM_ID IO_ADDRESS(INTEGRATOR_HDR_ID)
+#define CM_OSC IO_ADDRESS(INTEGRATOR_HDR_OSC)
+#define CM_STAT IO_ADDRESS(INTEGRATOR_HDR_STAT)
+#define CM_LOCK IO_ADDRESS(INTEGRATOR_HDR_LOCK)
-static const struct icst525_params lclk_params = {
- .ref = 24000,
- .vco_max = 320000,
+static const struct icst_params lclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 8,
.vd_max = 132,
.rd_min = 24,
.rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
-static const struct icst525_params cclk_params = {
- .ref = 24000,
- .vco_max = 320000,
+static const struct icst_params cclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 12,
.vd_max = 160,
.rd_min = 24,
.rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
/*
@@ -52,17 +59,17 @@ static const struct icst525_params cclk_params = {
*/
static int integrator_verify_policy(struct cpufreq_policy *policy)
{
- struct icst525_vco vco;
+ struct icst_vco vco;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
- vco = icst525_khz_to_vco(&cclk_params, policy->max);
- policy->max = icst525_khz(&cclk_params, vco);
+ vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
+ policy->max = icst_hz(&cclk_params, vco) / 1000;
- vco = icst525_khz_to_vco(&cclk_params, policy->min);
- policy->min = icst525_khz(&cclk_params, vco);
+ vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
+ policy->min = icst_hz(&cclk_params, vco) / 1000;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
@@ -78,7 +85,7 @@ static int integrator_set_target(struct cpufreq_policy *policy,
{
cpumask_t cpus_allowed;
int cpu = policy->cpu;
- struct icst525_vco vco;
+ struct icst_vco vco;
struct cpufreq_freqs freqs;
u_int cm_osc;
@@ -104,17 +111,17 @@ static int integrator_set_target(struct cpufreq_policy *policy,
}
vco.v = cm_osc & 255;
vco.r = 22;
- freqs.old = icst525_khz(&cclk_params, vco);
+ freqs.old = icst_hz(&cclk_params, vco) / 1000;
- /* icst525_khz_to_vco rounds down -- so we need the next
+ /* icst_hz_to_vco rounds down -- so we need the next
* larger freq in case of CPUFREQ_RELATION_L.
*/
if (relation == CPUFREQ_RELATION_L)
target_freq += 999;
if (target_freq > policy->max)
target_freq = policy->max;
- vco = icst525_khz_to_vco(&cclk_params, target_freq);
- freqs.new = icst525_khz(&cclk_params, vco);
+ vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
+ freqs.new = icst_hz(&cclk_params, vco) / 1000;
freqs.cpu = policy->cpu;
@@ -154,7 +161,7 @@ static unsigned int integrator_get(unsigned int cpu)
cpumask_t cpus_allowed;
unsigned int current_freq;
u_int cm_osc;
- struct icst525_vco vco;
+ struct icst_vco vco;
cpus_allowed = current->cpus_allowed;
@@ -172,7 +179,7 @@ static unsigned int integrator_get(unsigned int cpu)
vco.v = cm_osc & 255;
vco.r = 22;
- current_freq = icst525_khz(&cclk_params, vco); /* current freq */
+ current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
set_cpus_allowed(current, cpus_allowed);
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 41b10725cef7..fd684bf205e5 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -25,7 +25,7 @@
#include <asm/clkdev.h>
#include <mach/clkdev.h>
-#include <asm/hardware/icst525.h>
+#include <asm/hardware/icst.h>
#include <mach/lm.h>
#include <mach/impd1.h>
#include <asm/sizes.h>
@@ -41,32 +41,25 @@ struct impd1_module {
struct clk_lookup *clks[3];
};
-static const struct icst525_params impd1_vco_params = {
- .ref = 24000, /* 24 MHz */
- .vco_max = 200000, /* 200 MHz */
+static const struct icst_params impd1_vco_params = {
+ .ref = 24000000, /* 24 MHz */
+ .vco_max = ICST525_VCO_MAX_3V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 12,
.vd_max = 519,
.rd_min = 3,
.rd_max = 120,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
-static void impd1_setvco(struct clk *clk, struct icst525_vco vco)
+static void impd1_setvco(struct clk *clk, struct icst_vco vco)
{
struct impd1_module *impd1 = clk->data;
- int vconr = clk - impd1->vcos;
- u32 val;
-
- val = vco.v | (vco.r << 9) | (vco.s << 16);
+ u32 val = vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, impd1->base + IMPD1_LOCK);
- switch (vconr) {
- case 0:
- writel(val, impd1->base + IMPD1_OSC1);
- break;
- case 1:
- writel(val, impd1->base + IMPD1_OSC2);
- break;
- }
+ writel(val, clk->vcoreg);
writel(0, impd1->base + IMPD1_LOCK);
#ifdef DEBUG
@@ -74,11 +67,17 @@ static void impd1_setvco(struct clk *clk, struct icst525_vco vco)
vco.r = (val >> 9) & 0x7f;
vco.s = (val >> 16) & 7;
- pr_debug("IM-PD1: VCO%d clock is %ld kHz\n",
- vconr, icst525_khz(&impd1_vco_params, vco));
+ pr_debug("IM-PD1: VCO%d clock is %ld Hz\n",
+ vconr, icst525_hz(&impd1_vco_params, vco));
#endif
}
+static const struct clk_ops impd1_clk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = impd1_setvco,
+};
+
void impd1_tweak_control(struct device *dev, u32 mask, u32 val)
{
struct impd1_module *impd1 = dev_get_drvdata(dev);
@@ -374,11 +373,13 @@ static int impd1_probe(struct lm_device *dev)
(unsigned long)dev->resource.start);
for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++) {
+ impd1->vcos[i].ops = &impd1_clk_ops,
impd1->vcos[i].owner = THIS_MODULE,
impd1->vcos[i].params = &impd1_vco_params,
- impd1->vcos[i].data = impd1,
- impd1->vcos[i].setvco = impd1_setvco;
+ impd1->vcos[i].data = impd1;
}
+ impd1->vcos[0].vcoreg = impd1->base + IMPD1_OSC1;
+ impd1->vcos[1].vcoreg = impd1->base + IMPD1_OSC2;
impd1->clks[0] = clkdev_alloc(&impd1->vcos[0], NULL, "lm%x:01000",
dev->id);
diff --git a/arch/arm/mach-integrator/include/mach/clkdev.h b/arch/arm/mach-integrator/include/mach/clkdev.h
index 9293e410832a..bfe07679faec 100644
--- a/arch/arm/mach-integrator/include/mach/clkdev.h
+++ b/arch/arm/mach-integrator/include/mach/clkdev.h
@@ -2,14 +2,15 @@
#define __ASM_MACH_CLKDEV_H
#include <linux/module.h>
-#include <asm/hardware/icst525.h>
+#include <plat/clock.h>
struct clk {
unsigned long rate;
+ const struct clk_ops *ops;
struct module *owner;
- const struct icst525_params *params;
+ const struct icst_params *params;
+ void __iomem *vcoreg;
void *data;
- void (*setvco)(struct clk *, struct icst525_vco vco);
};
static inline int __clk_get(struct clk *clk)
diff --git a/arch/arm/mach-integrator/include/mach/entry-macro.S b/arch/arm/mach-integrator/include/mach/entry-macro.S
index 7649c57acb53..3d029c9f3ef6 100644
--- a/arch/arm/mach-integrator/include/mach/entry-macro.S
+++ b/arch/arm/mach-integrator/include/mach/entry-macro.S
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <mach/irqs.h>
.macro disable_fiq
diff --git a/arch/arm/mach-integrator/include/mach/hardware.h b/arch/arm/mach-integrator/include/mach/hardware.h
index d795642fad22..8e26360ce9a3 100644
--- a/arch/arm/mach-integrator/include/mach/hardware.h
+++ b/arch/arm/mach-integrator/include/mach/hardware.h
@@ -23,7 +23,6 @@
#define __ASM_ARCH_HARDWARE_H
#include <asm/sizes.h>
-#include <mach/platform.h>
/*
* Where in virtual memory the IO devices (timers, system controllers
@@ -36,17 +35,19 @@
#define PCIO_BASE PCI_IO_VADDR
#define PCIMEM_BASE PCI_MEMORY_VADDR
-#ifdef CONFIG_MMU
-/* macro to get at IO space when running virtually */
-#define IO_ADDRESS(x) (((x) >> 4) + IO_BASE)
-#else
-#define IO_ADDRESS(x) (x)
-#endif
-
#define pcibios_assign_all_busses() 1
#define PCIBIOS_MIN_IO 0x6000
#define PCIBIOS_MIN_MEM 0x00100000
+/* macro to get at IO space when running virtually */
+#ifdef CONFIG_MMU
+#define IO_ADDRESS(x) (((x) & 0x000fffff) | (((x) >> 4) & 0x0ff00000) | IO_BASE)
+#else
+#define IO_ADDRESS(x) (x)
+#endif
+
+#define __io_address(n) ((void __iomem *)IO_ADDRESS(n))
+
#endif
diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h
index e00a2624f269..5e6ea5cfea6e 100644
--- a/arch/arm/mach-integrator/include/mach/platform.h
+++ b/arch/arm/mach-integrator/include/mach/platform.h
@@ -23,9 +23,6 @@
*
* Integrator address map
*
- * NOTE: This is a multi-hosted header file for use with uHAL and
- * supported debuggers.
- *
* ***********************************************************************/
#ifndef __address_h
@@ -290,12 +287,14 @@
#define INTEGRATOR_DBG_LEDS (INTEGRATOR_DBG_BASE + INTEGRATOR_DBG_LEDS_OFFSET)
#define INTEGRATOR_DBG_SWITCH (INTEGRATOR_DBG_BASE + INTEGRATOR_DBG_SWITCH_OFFSET)
+#define INTEGRATOR_AP_GPIO_BASE 0x1B000000 /* GPIO */
-#if defined(CONFIG_ARCH_INTEGRATOR_AP)
-#define INTEGRATOR_GPIO_BASE 0x1B000000 /* GPIO */
-#elif defined(CONFIG_ARCH_INTEGRATOR_CP)
-#define INTEGRATOR_GPIO_BASE 0xC9000000 /* GPIO */
-#endif
+#define INTEGRATOR_CP_MMC_BASE 0x1C000000 /* MMC */
+#define INTEGRATOR_CP_AACI_BASE 0x1D000000 /* AACI */
+#define INTEGRATOR_CP_ETH_BASE 0xC8000000 /* Ethernet */
+#define INTEGRATOR_CP_GPIO_BASE 0xC9000000 /* GPIO */
+#define INTEGRATOR_CP_SIC_BASE 0xCA000000 /* SIC */
+#define INTEGRATOR_CP_CTL_BASE 0xCB000000 /* CP system control */
/* ------------------------------------------------------------------------
* KMI keyboard/mouse definitions
@@ -328,20 +327,6 @@
*/
#define PHYS_PCI_V3_BASE 0x62000000
-#define PCI_DRAMSIZE INTEGRATOR_SSRAM_SIZE
-
-/* 'export' these to UHAL */
-#define UHAL_PCI_IO PCI_IO_BASE
-#define UHAL_PCI_MEM PCI_MEM_BASE
-#define UHAL_PCI_ALLOC_IO_BASE 0x00004000
-#define UHAL_PCI_ALLOC_MEM_BASE PCI_MEM_BASE
-#define UHAL_PCI_MAX_SLOT 20
-
-/* ========================================================================
- * Start of uHAL definitions
- * ========================================================================
- */
-
/* ------------------------------------------------------------------------
* Integrator Interrupt Controllers
* ------------------------------------------------------------------------
@@ -389,7 +374,7 @@
*/
/* ------------------------------------------------------------------------
- * LED's - The header LED is not accessible via the uHAL API
+ * LED's
* ------------------------------------------------------------------------
*
*/
@@ -402,34 +387,18 @@
#define LED_BANK INTEGRATOR_DBG_LEDS
/*
- * Memory definitions - run uHAL out of SSRAM.
- *
- */
-#define uHAL_MEMORY_SIZE INTEGRATOR_SSRAM_SIZE
-
-/*
- * Clean base - dummy
- *
- */
-#define CLEAN_BASE INTEGRATOR_BOOT_ROM_HI
-
-/*
* Timer definitions
*
* Only use timer 1 & 2
* (both run at 24MHz and will need the clock divider set to 16).
*
- * Timer 0 runs at bus frequency and therefore could vary and currently
- * uHAL can't handle that.
- *
+ * Timer 0 runs at bus frequency
*/
#define INTEGRATOR_TIMER0_BASE INTEGRATOR_CT_BASE
#define INTEGRATOR_TIMER1_BASE (INTEGRATOR_CT_BASE + 0x100)
#define INTEGRATOR_TIMER2_BASE (INTEGRATOR_CT_BASE + 0x200)
-#define MAX_TIMER 2
-#define MAX_PERIOD 699050
#define TICKS_PER_uSEC 24
/*
@@ -437,14 +406,9 @@
*
*/
#define mSEC_1 1000
-#define mSEC_5 (mSEC_1 * 5)
#define mSEC_10 (mSEC_1 * 10)
-#define mSEC_25 (mSEC_1 * 25)
-#define SEC_1 (mSEC_1 * 1000)
#define INTEGRATOR_CSR_BASE 0x10000000
#define INTEGRATOR_CSR_SIZE 0x10000000
#endif
-
-/* END */
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 8138a7e24562..227cf4d05088 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -27,9 +27,14 @@
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/kmi.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
+#include <asm/hardware/arm_timer.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/param.h> /* HZ */
@@ -43,8 +48,6 @@
#include <asm/mach/map.h>
#include <asm/mach/time.h>
-#include "common.h"
-
/*
* All IO addresses are mapped onto VA 0xFFFx.xxxx, where x.xxxx
* is the (PA >> 12).
@@ -55,7 +58,7 @@
#define VA_IC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
#define VA_SC_BASE IO_ADDRESS(INTEGRATOR_SC_BASE)
#define VA_EBI_BASE IO_ADDRESS(INTEGRATOR_EBI_BASE)
-#define VA_CMIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_IC_OFFSET
+#define VA_CMIC_BASE IO_ADDRESS(INTEGRATOR_HDR_IC)
/*
* Logical Physical
@@ -117,8 +120,8 @@ static struct map_desc ap_io_desc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(INTEGRATOR_GPIO_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_GPIO_BASE),
+ .virtual = IO_ADDRESS(INTEGRATOR_AP_GPIO_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_AP_GPIO_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
@@ -334,14 +337,163 @@ static void __init ap_init(void)
}
}
+/*
+ * Where is the timer (VA)?
+ */
+#define TIMER0_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER0_BASE)
+#define TIMER1_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER1_BASE)
+#define TIMER2_VA_BASE IO_ADDRESS(INTEGRATOR_TIMER2_BASE)
+
+/*
+ * How long is the timer interval?
+ */
+#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
+#if TIMER_INTERVAL >= 0x100000
+#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
+#elif TIMER_INTERVAL >= 0x10000
+#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
+#else
+#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
+#endif
+
+static unsigned long timer_reload;
+
+static void __iomem * const clksrc_base = (void __iomem *)TIMER2_VA_BASE;
+
+static cycle_t timersp_read(struct clocksource *cs)
+{
+ return ~(readl(clksrc_base + TIMER_VALUE) & 0xffff);
+}
+
+static struct clocksource clocksource_timersp = {
+ .name = "timer2",
+ .rating = 200,
+ .read = timersp_read,
+ .mask = CLOCKSOURCE_MASK(16),
+ .shift = 16,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void integrator_clocksource_init(u32 khz)
+{
+ struct clocksource *cs = &clocksource_timersp;
+ void __iomem *base = clksrc_base;
+ u32 ctrl = TIMER_CTRL_ENABLE;
+
+ if (khz >= 1500) {
+ khz /= 16;
+ ctrl = TIMER_CTRL_DIV16;
+ }
+
+ writel(ctrl, base + TIMER_CTRL);
+ writel(0xffff, base + TIMER_LOAD);
+
+ cs->mult = clocksource_khz2mult(khz, cs->shift);
+ clocksource_register(cs);
+}
+
+static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE;
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t integrator_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ /* clear the interrupt */
+ writel(1, clkevt_base + TIMER_INTCLR);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt)
+{
+ u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
+
+ BUG_ON(mode == CLOCK_EVT_MODE_ONESHOT);
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+ writel(timer_reload, clkevt_base + TIMER_LOAD);
+ ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ }
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+}
+
+static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
+{
+ unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
+
+ writel(ctrl & ~TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+ writel(next, clkevt_base + TIMER_LOAD);
+ writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+
+ return 0;
+}
+
+static struct clock_event_device integrator_clockevent = {
+ .name = "timer1",
+ .shift = 34,
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_mode = clkevt_set_mode,
+ .set_next_event = clkevt_set_next_event,
+ .rating = 300,
+ .cpumask = cpu_all_mask,
+};
+
+static struct irqaction integrator_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = integrator_timer_interrupt,
+ .dev_id = &integrator_clockevent,
+};
+
+static void integrator_clockevent_init(u32 khz)
+{
+ struct clock_event_device *evt = &integrator_clockevent;
+ unsigned int ctrl = 0;
+
+ if (khz * 1000 > 0x100000 * HZ) {
+ khz /= 256;
+ ctrl |= TIMER_CTRL_DIV256;
+ } else if (khz * 1000 > 0x10000 * HZ) {
+ khz /= 16;
+ ctrl |= TIMER_CTRL_DIV16;
+ }
+
+ timer_reload = khz * 1000 / HZ;
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+
+ evt->irq = IRQ_TIMERINT1;
+ evt->mult = div_sc(khz, NSEC_PER_MSEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(0xffff, evt);
+ evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
+
+ setup_irq(IRQ_TIMERINT1, &integrator_timer_irq);
+ clockevents_register_device(evt);
+}
+
+/*
+ * Set up timer(s).
+ */
static void __init ap_init_timer(void)
{
- integrator_time_init(1000000 * TICKS_PER_uSEC / HZ, 0);
+ u32 khz = TICKS_PER_uSEC * 1000;
+
+ writel(0, TIMER0_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER1_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER2_VA_BASE + TIMER_CTRL);
+
+ integrator_clocksource_init(khz);
+ integrator_clockevent_init(khz);
}
static struct sys_timer ap_timer = {
.init = ap_init_timer,
- .offset = integrator_gettimeoffset,
};
MACHINE_START(INTEGRATOR, "ARM-Integrator")
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 15e6cc5a352f..cde57b2b83b5 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -25,10 +25,12 @@
#include <asm/clkdev.h>
#include <mach/clkdev.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
-#include <asm/hardware/icst525.h>
+#include <asm/hardware/arm_timer.h>
+#include <asm/hardware/icst.h>
#include <mach/cm.h>
#include <mach/lm.h>
@@ -39,24 +41,20 @@
#include <asm/mach/map.h>
#include <asm/mach/time.h>
-#include "common.h"
-
-#define INTCP_PA_MMC_BASE 0x1c000000
-#define INTCP_PA_AACI_BASE 0x1d000000
+#include <plat/timer-sp.h>
#define INTCP_PA_FLASH_BASE 0x24000000
#define INTCP_FLASH_SIZE SZ_32M
#define INTCP_PA_CLCD_BASE 0xc0000000
-#define INTCP_VA_CIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE) + 0x40
+#define INTCP_VA_CIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE + 0x40)
#define INTCP_VA_PIC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
-#define INTCP_VA_SIC_BASE IO_ADDRESS(0xca000000)
+#define INTCP_VA_SIC_BASE IO_ADDRESS(INTEGRATOR_CP_SIC_BASE)
-#define INTCP_PA_ETH_BASE 0xc8000000
#define INTCP_ETH_SIZE 0x10
-#define INTCP_VA_CTRL_BASE IO_ADDRESS(0xcb000000)
+#define INTCP_VA_CTRL_BASE IO_ADDRESS(INTEGRATOR_CP_CTL_BASE)
#define INTCP_FLASHPROG 0x04
#define CINTEGRATOR_FLASHPROG_FLVPPEN (1 << 0)
#define CINTEGRATOR_FLASHPROG_FLWREN (1 << 1)
@@ -71,7 +69,9 @@
* f1600000 16000000 UART 0
* f1700000 17000000 UART 1
* f1a00000 1a000000 Debug LEDs
- * f1b00000 1b000000 GPIO
+ * fc900000 c9000000 GPIO
+ * fca00000 ca000000 SIC
+ * fcb00000 cb000000 CP system control
*/
static struct map_desc intcp_io_desc[] __initdata = {
@@ -116,18 +116,18 @@ static struct map_desc intcp_io_desc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(INTEGRATOR_GPIO_BASE),
- .pfn = __phys_to_pfn(INTEGRATOR_GPIO_BASE),
+ .virtual = IO_ADDRESS(INTEGRATOR_CP_GPIO_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_CP_GPIO_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(0xca000000),
- .pfn = __phys_to_pfn(0xca000000),
+ .virtual = IO_ADDRESS(INTEGRATOR_CP_SIC_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_CP_SIC_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = IO_ADDRESS(0xcb000000),
- .pfn = __phys_to_pfn(0xcb000000),
+ .virtual = IO_ADDRESS(INTEGRATOR_CP_CTL_BASE),
+ .pfn = __phys_to_pfn(INTEGRATOR_CP_CTL_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}
@@ -266,33 +266,43 @@ static void __init intcp_init_irq(void)
/*
* Clock handling
*/
-#define CM_LOCK (IO_ADDRESS(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
-#define CM_AUXOSC (IO_ADDRESS(INTEGRATOR_HDR_BASE)+0x1c)
+#define CM_LOCK (__io_address(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
+#define CM_AUXOSC (__io_address(INTEGRATOR_HDR_BASE)+0x1c)
-static const struct icst525_params cp_auxvco_params = {
- .ref = 24000,
- .vco_max = 320000,
+static const struct icst_params cp_auxvco_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
.vd_min = 8,
.vd_max = 263,
.rd_min = 3,
.rd_max = 65,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
};
-static void cp_auxvco_set(struct clk *clk, struct icst525_vco vco)
+static void cp_auxvco_set(struct clk *clk, struct icst_vco vco)
{
u32 val;
- val = readl(CM_AUXOSC) & ~0x7ffff;
+ val = readl(clk->vcoreg) & ~0x7ffff;
val |= vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, CM_LOCK);
- writel(val, CM_AUXOSC);
+ writel(val, clk->vcoreg);
writel(0, CM_LOCK);
}
+static const struct clk_ops cp_auxclk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = cp_auxvco_set,
+};
+
static struct clk cp_auxclk = {
+ .ops = &cp_auxclk_ops,
.params = &cp_auxvco_params,
- .setvco = cp_auxvco_set,
+ .vcoreg = CM_AUXOSC,
};
static struct clk_lookup cp_lookups[] = {
@@ -363,8 +373,8 @@ static struct platform_device intcp_flash_device = {
static struct resource smc91x_resources[] = {
[0] = {
- .start = INTCP_PA_ETH_BASE,
- .end = INTCP_PA_ETH_BASE + INTCP_ETH_SIZE - 1,
+ .start = INTEGRATOR_CP_ETH_BASE,
+ .end = INTEGRATOR_CP_ETH_BASE + INTCP_ETH_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -394,8 +404,8 @@ static struct platform_device *intcp_devs[] __initdata = {
*/
static unsigned int mmc_status(struct device *dev)
{
- unsigned int status = readl(IO_ADDRESS(0xca000000) + 4);
- writel(8, IO_ADDRESS(0xcb000000) + 8);
+ unsigned int status = readl(IO_ADDRESS(0xca000000 + 4));
+ writel(8, IO_ADDRESS(INTEGRATOR_CP_CTL_BASE + 8));
return status & 8;
}
@@ -413,8 +423,8 @@ static struct amba_device mmc_device = {
.platform_data = &mmc_data,
},
.res = {
- .start = INTCP_PA_MMC_BASE,
- .end = INTCP_PA_MMC_BASE + SZ_4K - 1,
+ .start = INTEGRATOR_CP_MMC_BASE,
+ .end = INTEGRATOR_CP_MMC_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_CP_MMCIINT0, IRQ_CP_MMCIINT1 },
@@ -426,8 +436,8 @@ static struct amba_device aaci_device = {
.init_name = "mb:1d",
},
.res = {
- .start = INTCP_PA_AACI_BASE,
- .end = INTCP_PA_AACI_BASE + SZ_4K - 1,
+ .start = INTEGRATOR_CP_AACI_BASE,
+ .end = INTEGRATOR_CP_AACI_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_CP_AACIINT, NO_IRQ },
@@ -567,16 +577,22 @@ static void __init intcp_init(void)
}
}
-#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable */
+#define TIMER0_VA_BASE __io_address(INTEGRATOR_TIMER0_BASE)
+#define TIMER1_VA_BASE __io_address(INTEGRATOR_TIMER1_BASE)
+#define TIMER2_VA_BASE __io_address(INTEGRATOR_TIMER2_BASE)
static void __init intcp_timer_init(void)
{
- integrator_time_init(1000000 / HZ, TIMER_CTRL_IE);
+ writel(0, TIMER0_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER1_VA_BASE + TIMER_CTRL);
+ writel(0, TIMER2_VA_BASE + TIMER_CTRL);
+
+ sp804_clocksource_init(TIMER2_VA_BASE);
+ sp804_clockevents_init(TIMER1_VA_BASE, IRQ_TIMERINT1);
}
static struct sys_timer cp_timer = {
.init = intcp_timer_init,
- .offset = integrator_gettimeoffset,
};
MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c
index 8dcc823f4135..28be186adb89 100644
--- a/arch/arm/mach-integrator/leds.c
+++ b/arch/arm/mach-integrator/leds.c
@@ -27,6 +27,7 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/leds.h>
#include <asm/system.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index ffbd349363af..9cef0590d5aa 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/irq.h>
#include <asm/signal.h>
#include <asm/system.h>
@@ -389,9 +390,9 @@ static int __init pci_v3_setup_resources(struct resource **resource)
* means I can't get additional information on the reason for the pm2fb
* problems. I suppose I'll just have to mind-meld with the machine. ;)
*/
-#define SC_PCI (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_PCIENABLE_OFFSET)
-#define SC_LBFADDR (IO_ADDRESS(INTEGRATOR_SC_BASE) + 0x20)
-#define SC_LBFCODE (IO_ADDRESS(INTEGRATOR_SC_BASE) + 0x24)
+#define SC_PCI IO_ADDRESS(INTEGRATOR_SC_PCIENABLE)
+#define SC_LBFADDR IO_ADDRESS(INTEGRATOR_SC_BASE + 0x20)
+#define SC_LBFCODE IO_ADDRESS(INTEGRATOR_SC_BASE + 0x24)
static int
v3_pci_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 5d99039286eb..f108a31afc2b 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -176,7 +176,7 @@ static struct plat_serial8250_port n2100_serial_port[] = {
.mapbase = N2100_UART,
.membase = (char *)N2100_UART,
.irq = 0,
- .flags = UPF_SKIP_TEST,
+ .flags = UPF_SKIP_TEST | UPF_AUTO_IRQ | UPF_SHARE_IRQ,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = 1843200,
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 71728d36d501..0bce09799d18 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -21,7 +21,6 @@
#include <linux/tty.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
-#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/time.h>
diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig
index 17879a876be6..29b2163b1fe3 100644
--- a/arch/arm/mach-kirkwood/Kconfig
+++ b/arch/arm/mach-kirkwood/Kconfig
@@ -38,6 +38,12 @@ config MACH_ESATA_SHEEVAPLUG
Say 'Y' here if you want your kernel to support the
Marvell eSATA SheevaPlug Reference Board.
+config MACH_GURUPLUG
+ bool "Marvell GuruPlug Reference Board"
+ help
+ Say 'Y' here if you want your kernel to support the
+ Marvell GuruPlug Reference Board.
+
config MACH_TS219
bool "QNAP TS-110, TS-119, TS-210, TS-219 and TS-219P Turbo NAS"
help
@@ -81,6 +87,18 @@ config MACH_INETSPACE_V2
Say 'Y' here if you want your kernel to support the
LaCie Internet Space v2 NAS.
+config MACH_NET2BIG_V2
+ bool "LaCie 2Big Network v2 NAS Board"
+ help
+ Say 'Y' here if you want your kernel to support the
+ LaCie 2Big Network v2 NAS.
+
+config MACH_NET5BIG_V2
+ bool "LaCie 5Big Network v2 NAS Board"
+ help
+ Say 'Y' here if you want your kernel to support the
+ LaCie 5Big Network v2 NAS.
+
endmenu
endif
diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile
index a5530e36ba3e..c0cd5d362002 100644
--- a/arch/arm/mach-kirkwood/Makefile
+++ b/arch/arm/mach-kirkwood/Makefile
@@ -6,10 +6,13 @@ obj-$(CONFIG_MACH_RD88F6281) += rd88f6281-setup.o
obj-$(CONFIG_MACH_MV88F6281GTW_GE) += mv88f6281gtw_ge-setup.o
obj-$(CONFIG_MACH_SHEEVAPLUG) += sheevaplug-setup.o
obj-$(CONFIG_MACH_ESATA_SHEEVAPLUG) += sheevaplug-setup.o
+obj-$(CONFIG_MACH_GURUPLUG) += guruplug-setup.o
obj-$(CONFIG_MACH_TS219) += ts219-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_TS41X) += ts41x-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_OPENRD) += openrd-setup.o
obj-$(CONFIG_MACH_NETSPACE_V2) += netspace_v2-setup.o
obj-$(CONFIG_MACH_INETSPACE_V2) += netspace_v2-setup.o
+obj-$(CONFIG_MACH_NET2BIG_V2) += netxbig_v2-setup.o
+obj-$(CONFIG_MACH_NET5BIG_V2) += netxbig_v2-setup.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index f759ca243925..6072eaa5e66a 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -305,6 +305,15 @@ void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
platform_device_register(&kirkwood_nand_flash);
}
+void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts,
+ int (*dev_ready)(struct mtd_info *))
+{
+ kirkwood_clk_ctrl |= CGC_RUNIT;
+ kirkwood_nand_data.parts = parts;
+ kirkwood_nand_data.nr_parts = nr_parts;
+ kirkwood_nand_data.dev_ready = dev_ready;
+ platform_device_register(&kirkwood_nand_flash);
+}
/*****************************************************************************
* SoC RTC
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index d7de43464358..05e8a8a5692e 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -16,6 +16,7 @@ struct mv643xx_eth_platform_data;
struct mv_sata_platform_data;
struct mvsdio_platform_data;
struct mtd_partition;
+struct mtd_info;
/*
* Basic Kirkwood init functions used early by machine-setup.
@@ -41,6 +42,7 @@ void kirkwood_i2c_init(void);
void kirkwood_uart0_init(void);
void kirkwood_uart1_init(void);
void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
+void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
extern int kirkwood_tclk;
extern struct sys_timer kirkwood_timer;
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c
new file mode 100644
index 000000000000..54d07c89d4ff
--- /dev/null
+++ b/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -0,0 +1,131 @@
+/*
+ * arch/arm/mach-kirkwood/guruplug-setup.c
+ *
+ * Marvell GuruPlug Reference Board Setup
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/ata_platform.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/kirkwood.h>
+#include <plat/mvsdio.h>
+#include "common.h"
+#include "mpp.h"
+
+static struct mtd_partition guruplug_nand_parts[] = {
+ {
+ .name = "u-boot",
+ .offset = 0,
+ .size = SZ_1M
+ }, {
+ .name = "uImage",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = SZ_4M
+ }, {
+ .name = "root",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = MTDPART_SIZ_FULL
+ },
+};
+
+static struct mv643xx_eth_platform_data guruplug_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(0),
+};
+
+static struct mv643xx_eth_platform_data guruplug_ge01_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(1),
+};
+
+static struct mv_sata_platform_data guruplug_sata_data = {
+ .n_ports = 1,
+};
+
+static struct mvsdio_platform_data guruplug_mvsdio_data = {
+ /* unfortunately the CD signal has not been connected */
+};
+
+static struct gpio_led guruplug_led_pins[] = {
+ {
+ .name = "guruplug:red:health",
+ .gpio = 46,
+ .active_low = 1,
+ },
+ {
+ .name = "guruplug:green:health",
+ .gpio = 47,
+ .active_low = 1,
+ },
+ {
+ .name = "guruplug:red:wmode",
+ .gpio = 48,
+ .active_low = 1,
+ },
+ {
+ .name = "guruplug:green:wmode",
+ .gpio = 49,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data guruplug_led_data = {
+ .leds = guruplug_led_pins,
+ .num_leds = ARRAY_SIZE(guruplug_led_pins),
+};
+
+static struct platform_device guruplug_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &guruplug_led_data,
+ }
+};
+
+static unsigned int guruplug_mpp_config[] __initdata = {
+ MPP46_GPIO, /* M_RLED */
+ MPP47_GPIO, /* M_GLED */
+ MPP48_GPIO, /* B_RLED */
+ MPP49_GPIO, /* B_GLED */
+ 0
+};
+
+static void __init guruplug_init(void)
+{
+ /*
+ * Basic setup. Needs to be called early.
+ */
+ kirkwood_init();
+ kirkwood_mpp_conf(guruplug_mpp_config);
+
+ kirkwood_uart0_init();
+ kirkwood_nand_init(ARRAY_AND_SIZE(guruplug_nand_parts), 25);
+
+ kirkwood_ehci_init();
+ kirkwood_ge00_init(&guruplug_ge00_data);
+ kirkwood_ge01_init(&guruplug_ge01_data);
+ kirkwood_sata_init(&guruplug_sata_data);
+ kirkwood_sdio_init(&guruplug_mvsdio_data);
+
+ platform_device_register(&guruplug_leds);
+}
+
+MACHINE_START(GURUPLUG, "Marvell GuruPlug Reference Board")
+ /* Maintainer: Siddarth Gore <gores@marvell.com> */
+ .phys_io = KIRKWOOD_REGS_PHYS_BASE,
+ .io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .init_machine = guruplug_init,
+ .map_io = kirkwood_map_io,
+ .init_irq = kirkwood_init_irq,
+ .timer = &kirkwood_timer,
+MACHINE_END
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
new file mode 100644
index 000000000000..8a2bb0228e4f
--- /dev/null
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -0,0 +1,415 @@
+/*
+ * arch/arm/mach-kirkwood/netxbig_v2-setup.c
+ *
+ * LaCie 2Big and 5Big Network v2 board setup
+ *
+ * Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/ata_platform.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/i2c.h>
+#include <linux/i2c/at24.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/leds.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/kirkwood.h>
+#include <plat/time.h>
+#include "common.h"
+#include "mpp.h"
+
+/*****************************************************************************
+ * 512KB SPI Flash on Boot Device (MACRONIX MX25L4005)
+ ****************************************************************************/
+
+static struct mtd_partition netxbig_v2_flash_parts[] = {
+ {
+ .name = "u-boot",
+ .size = MTDPART_SIZ_FULL,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+};
+
+static const struct flash_platform_data netxbig_v2_flash = {
+ .type = "mx25l4005a",
+ .name = "spi_flash",
+ .parts = netxbig_v2_flash_parts,
+ .nr_parts = ARRAY_SIZE(netxbig_v2_flash_parts),
+};
+
+static struct spi_board_info __initdata netxbig_v2_spi_slave_info[] = {
+ {
+ .modalias = "m25p80",
+ .platform_data = &netxbig_v2_flash,
+ .irq = -1,
+ .max_speed_hz = 20000000,
+ .bus_num = 0,
+ .chip_select = 0,
+ },
+};
+
+/*****************************************************************************
+ * Ethernet
+ ****************************************************************************/
+
+static struct mv643xx_eth_platform_data netxbig_v2_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(8),
+};
+
+static struct mv643xx_eth_platform_data netxbig_v2_ge01_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(0),
+};
+
+/*****************************************************************************
+ * I2C devices
+ ****************************************************************************/
+
+static struct at24_platform_data at24c04 = {
+ .byte_len = SZ_4K / 8,
+ .page_size = 16,
+};
+
+/*
+ * i2c addr | chip | description
+ * 0x50 | HT24LC04 | eeprom (512B)
+ */
+
+static struct i2c_board_info __initdata netxbig_v2_i2c_info[] = {
+ {
+ I2C_BOARD_INFO("24c04", 0x50),
+ .platform_data = &at24c04,
+ }
+};
+
+/*****************************************************************************
+ * SATA
+ ****************************************************************************/
+
+static struct mv_sata_platform_data netxbig_v2_sata_data = {
+ .n_ports = 2,
+};
+
+static int __initdata netxbig_v2_gpio_hdd_power[] = { 16, 17, 41, 42, 43 };
+
+static void __init netxbig_v2_sata_power_init(void)
+{
+ int i;
+ int err;
+ int hdd_nb;
+
+ if (machine_is_net2big_v2())
+ hdd_nb = 2;
+ else
+ hdd_nb = 5;
+
+ /* Power up all hard disks. */
+ for (i = 0; i < hdd_nb; i++) {
+ err = gpio_request(netxbig_v2_gpio_hdd_power[i], NULL);
+ if (err == 0) {
+ err = gpio_direction_output(
+ netxbig_v2_gpio_hdd_power[i], 1);
+ /* Free the HDD power GPIOs. This allow user-space to
+ * configure them via the gpiolib sysfs interface. */
+ gpio_free(netxbig_v2_gpio_hdd_power[i]);
+ }
+ if (err)
+ pr_err("netxbig_v2: failed to power up HDD%d\n", i + 1);
+ }
+}
+
+/*****************************************************************************
+ * GPIO keys
+ ****************************************************************************/
+
+#define NETXBIG_V2_GPIO_SWITCH_POWER_ON 13
+#define NETXBIG_V2_GPIO_SWITCH_POWER_OFF 15
+#define NETXBIG_V2_GPIO_FUNC_BUTTON 34
+
+#define NETXBIG_V2_SWITCH_POWER_ON 0x1
+#define NETXBIG_V2_SWITCH_POWER_OFF 0x2
+
+static struct gpio_keys_button netxbig_v2_buttons[] = {
+ [0] = {
+ .type = EV_SW,
+ .code = NETXBIG_V2_SWITCH_POWER_ON,
+ .gpio = NETXBIG_V2_GPIO_SWITCH_POWER_ON,
+ .desc = "Back power switch (on|auto)",
+ .active_low = 1,
+ },
+ [1] = {
+ .type = EV_SW,
+ .code = NETXBIG_V2_SWITCH_POWER_OFF,
+ .gpio = NETXBIG_V2_GPIO_SWITCH_POWER_OFF,
+ .desc = "Back power switch (auto|off)",
+ .active_low = 1,
+ },
+ [2] = {
+ .code = KEY_OPTION,
+ .gpio = NETXBIG_V2_GPIO_FUNC_BUTTON,
+ .desc = "Function button",
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_platform_data netxbig_v2_button_data = {
+ .buttons = netxbig_v2_buttons,
+ .nbuttons = ARRAY_SIZE(netxbig_v2_buttons),
+};
+
+static struct platform_device netxbig_v2_gpio_buttons = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &netxbig_v2_button_data,
+ },
+};
+
+/*****************************************************************************
+ * GPIO LEDs
+ ****************************************************************************/
+
+/*
+ * The LEDs are controlled by a CPLD and can be configured through a GPIO
+ * extension bus:
+ *
+ * - address register : bit [0-2] -> GPIO [47-49]
+ * - data register : bit [0-2] -> GPIO [44-46]
+ * - enable register : GPIO 29
+ *
+ * Address register selection:
+ *
+ * addr | register
+ * ----------------------------
+ * 0 | front LED
+ * 1 | front LED brightness
+ * 2 | HDD LED brightness
+ * 3 | HDD1 LED
+ * 4 | HDD2 LED
+ * 5 | HDD3 LED
+ * 6 | HDD4 LED
+ * 7 | HDD5 LED
+ *
+ * Data register configuration:
+ *
+ * data | LED brightness
+ * -------------------------------------------------
+ * 0 | min (off)
+ * - | -
+ * 7 | max
+ *
+ * data | front LED mode
+ * -------------------------------------------------
+ * 0 | fix off
+ * 1 | fix blue on
+ * 2 | fix red on
+ * 3 | blink blue on=1 sec and blue off=1 sec
+ * 4 | blink red on=1 sec and red off=1 sec
+ * 5 | blink blue on=2.5 sec and red on=0.5 sec
+ * 6 | blink blue on=1 sec and red on=1 sec
+ * 7 | blink blue on=0.5 sec and blue off=2.5 sec
+ *
+ * data | HDD LED mode
+ * -------------------------------------------------
+ * 0 | fix blue on
+ * 1 | SATA activity blink
+ * 2 | fix red on
+ * 3 | blink blue on=1 sec and blue off=1 sec
+ * 4 | blink red on=1 sec and red off=1 sec
+ * 5 | blink blue on=2.5 sec and red on=0.5 sec
+ * 6 | blink blue on=1 sec and red on=1 sec
+ * 7 | blink blue on=0.5 sec and blue off=2.5 sec
+ */
+
+/*****************************************************************************
+ * Timer
+ ****************************************************************************/
+
+static void netxbig_v2_timer_init(void)
+{
+ kirkwood_tclk = 166666667;
+ orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
+}
+
+struct sys_timer netxbig_v2_timer = {
+ .init = netxbig_v2_timer_init,
+};
+
+/*****************************************************************************
+ * General Setup
+ ****************************************************************************/
+
+static unsigned int net2big_v2_mpp_config[] __initdata = {
+ MPP0_SPI_SCn,
+ MPP1_SPI_MOSI,
+ MPP2_SPI_SCK,
+ MPP3_SPI_MISO,
+ MPP6_SYSRST_OUTn,
+ MPP7_GPO, /* Request power-off */
+ MPP8_TW_SDA,
+ MPP9_TW_SCK,
+ MPP10_UART0_TXD,
+ MPP11_UART0_RXD,
+ MPP13_GPIO, /* Rear power switch (on|auto) */
+ MPP14_GPIO, /* USB fuse alarm */
+ MPP15_GPIO, /* Rear power switch (auto|off) */
+ MPP16_GPIO, /* SATA HDD1 power */
+ MPP17_GPIO, /* SATA HDD2 power */
+ MPP20_SATA1_ACTn,
+ MPP21_SATA0_ACTn,
+ MPP24_GPIO, /* USB mode select */
+ MPP26_GPIO, /* USB device vbus */
+ MPP28_GPIO, /* USB enable host vbus */
+ MPP29_GPIO, /* CPLD extension ALE */
+ MPP34_GPIO, /* Rear Push button */
+ MPP35_GPIO, /* Inhibit switch power-off */
+ MPP36_GPIO, /* SATA HDD1 presence */
+ MPP37_GPIO, /* SATA HDD2 presence */
+ MPP40_GPIO, /* eSATA presence */
+ MPP44_GPIO, /* CPLD extension (data 0) */
+ MPP45_GPIO, /* CPLD extension (data 1) */
+ MPP46_GPIO, /* CPLD extension (data 2) */
+ MPP47_GPIO, /* CPLD extension (addr 0) */
+ MPP48_GPIO, /* CPLD extension (addr 1) */
+ MPP49_GPIO, /* CPLD extension (addr 2) */
+ 0
+};
+
+static unsigned int net5big_v2_mpp_config[] __initdata = {
+ MPP0_SPI_SCn,
+ MPP1_SPI_MOSI,
+ MPP2_SPI_SCK,
+ MPP3_SPI_MISO,
+ MPP6_SYSRST_OUTn,
+ MPP7_GPO, /* Request power-off */
+ MPP8_TW_SDA,
+ MPP9_TW_SCK,
+ MPP10_UART0_TXD,
+ MPP11_UART0_RXD,
+ MPP13_GPIO, /* Rear power switch (on|auto) */
+ MPP14_GPIO, /* USB fuse alarm */
+ MPP15_GPIO, /* Rear power switch (auto|off) */
+ MPP16_GPIO, /* SATA HDD1 power */
+ MPP17_GPIO, /* SATA HDD2 power */
+ MPP20_GE1_0,
+ MPP21_GE1_1,
+ MPP22_GE1_2,
+ MPP23_GE1_3,
+ MPP24_GE1_4,
+ MPP25_GE1_5,
+ MPP26_GE1_6,
+ MPP27_GE1_7,
+ MPP28_GPIO, /* USB enable host vbus */
+ MPP29_GPIO, /* CPLD extension ALE */
+ MPP30_GE1_10,
+ MPP31_GE1_11,
+ MPP32_GE1_12,
+ MPP33_GE1_13,
+ MPP34_GPIO, /* Rear Push button */
+ MPP35_GPIO, /* Inhibit switch power-off */
+ MPP36_GPIO, /* SATA HDD1 presence */
+ MPP37_GPIO, /* SATA HDD2 presence */
+ MPP38_GPIO, /* SATA HDD3 presence */
+ MPP39_GPIO, /* SATA HDD4 presence */
+ MPP40_GPIO, /* SATA HDD5 presence */
+ MPP41_GPIO, /* SATA HDD3 power */
+ MPP42_GPIO, /* SATA HDD4 power */
+ MPP43_GPIO, /* SATA HDD5 power */
+ MPP44_GPIO, /* CPLD extension (data 0) */
+ MPP45_GPIO, /* CPLD extension (data 1) */
+ MPP46_GPIO, /* CPLD extension (data 2) */
+ MPP47_GPIO, /* CPLD extension (addr 0) */
+ MPP48_GPIO, /* CPLD extension (addr 1) */
+ MPP49_GPIO, /* CPLD extension (addr 2) */
+ 0
+};
+
+#define NETXBIG_V2_GPIO_POWER_OFF 7
+
+static void netxbig_v2_power_off(void)
+{
+ gpio_set_value(NETXBIG_V2_GPIO_POWER_OFF, 1);
+}
+
+static void __init netxbig_v2_init(void)
+{
+ /*
+ * Basic setup. Needs to be called early.
+ */
+ kirkwood_init();
+ if (machine_is_net2big_v2())
+ kirkwood_mpp_conf(net2big_v2_mpp_config);
+ else
+ kirkwood_mpp_conf(net5big_v2_mpp_config);
+
+ netxbig_v2_sata_power_init();
+
+ kirkwood_ehci_init();
+ kirkwood_ge00_init(&netxbig_v2_ge00_data);
+ if (machine_is_net5big_v2())
+ kirkwood_ge01_init(&netxbig_v2_ge01_data);
+ kirkwood_sata_init(&netxbig_v2_sata_data);
+ kirkwood_uart0_init();
+ spi_register_board_info(netxbig_v2_spi_slave_info,
+ ARRAY_SIZE(netxbig_v2_spi_slave_info));
+ kirkwood_spi_init();
+ kirkwood_i2c_init();
+ i2c_register_board_info(0, netxbig_v2_i2c_info,
+ ARRAY_SIZE(netxbig_v2_i2c_info));
+
+ platform_device_register(&netxbig_v2_gpio_buttons);
+
+ if (gpio_request(NETXBIG_V2_GPIO_POWER_OFF, "power-off") == 0 &&
+ gpio_direction_output(NETXBIG_V2_GPIO_POWER_OFF, 0) == 0)
+ pm_power_off = netxbig_v2_power_off;
+ else
+ pr_err("netxbig_v2: failed to configure power-off GPIO\n");
+}
+
+#ifdef CONFIG_MACH_NET2BIG_V2
+MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
+ .phys_io = KIRKWOOD_REGS_PHYS_BASE,
+ .io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .init_machine = netxbig_v2_init,
+ .map_io = kirkwood_map_io,
+ .init_irq = kirkwood_init_irq,
+ .timer = &netxbig_v2_timer,
+MACHINE_END
+#endif
+
+#ifdef CONFIG_MACH_NET5BIG_V2
+MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
+ .phys_io = KIRKWOOD_REGS_PHYS_BASE,
+ .io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .init_machine = netxbig_v2_init,
+ .map_io = kirkwood_map_io,
+ .init_irq = kirkwood_init_irq,
+ .timer = &netxbig_v2_timer,
+MACHINE_END
+#endif
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index a2d307ec0420..244655d323ea 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -59,6 +59,13 @@ static unsigned long common_pin_config[] __initdata = {
/* UART1 */
GPIO107_UART1_RXD,
GPIO108_UART1_TXD,
+
+ /* SSP1 */
+ GPIO113_I2S_MCLK,
+ GPIO114_I2S_FRM,
+ GPIO115_I2S_BCLK,
+ GPIO116_I2S_RXD,
+ GPIO117_I2S_TXD,
};
static struct smc91x_platdata smc91x_info = {
@@ -123,12 +130,18 @@ static struct pxa3xx_nand_platform_data aspenite_nand_info = {
.nr_parts = ARRAY_SIZE(aspenite_nand_partitions),
};
+static struct i2c_board_info aspenite_i2c_info[] __initdata = {
+ { I2C_BOARD_INFO("wm8753", 0x1b), },
+};
+
static void __init common_init(void)
{
mfp_config(ARRAY_AND_SIZE(common_pin_config));
/* on-chip devices */
pxa168_add_uart(1);
+ pxa168_add_twsi(1, NULL, ARRAY_AND_SIZE(aspenite_i2c_info));
+ pxa168_add_ssp(1);
pxa168_add_nand(&aspenite_nand_info);
/* off-chip devices */
diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h
index ab26d13295c4..ee8b02ed8011 100644
--- a/arch/arm/mach-mmp/include/mach/gpio.h
+++ b/arch/arm/mach-mmp/include/mach/gpio.h
@@ -10,7 +10,7 @@
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
#define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x))))
-#define NR_BUILTIN_GPIO (128)
+#define NR_BUILTIN_GPIO (192)
#define gpio_to_bank(gpio) ((gpio) >> 5)
#define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio))
diff --git a/arch/arm/mach-mmp/include/mach/irqs.h b/arch/arm/mach-mmp/include/mach/irqs.h
index 02701196ea03..b379cdec4d38 100644
--- a/arch/arm/mach-mmp/include/mach/irqs.h
+++ b/arch/arm/mach-mmp/include/mach/irqs.h
@@ -5,10 +5,10 @@
* Interrupt numbers for PXA168
*/
#define IRQ_PXA168_NONE (-1)
-#define IRQ_PXA168_SSP3 0
-#define IRQ_PXA168_SSP2 1
-#define IRQ_PXA168_SSP1 2
-#define IRQ_PXA168_SSP0 3
+#define IRQ_PXA168_SSP4 0
+#define IRQ_PXA168_SSP3 1
+#define IRQ_PXA168_SSP2 2
+#define IRQ_PXA168_SSP1 3
#define IRQ_PXA168_PMIC_INT 4
#define IRQ_PXA168_RTC_INT 5
#define IRQ_PXA168_RTC_ALARM 6
@@ -20,7 +20,7 @@
#define IRQ_PXA168_TIMER2 14
#define IRQ_PXA168_TIMER3 15
#define IRQ_PXA168_CMU 16
-#define IRQ_PXA168_SSP4 17
+#define IRQ_PXA168_SSP5 17
#define IRQ_PXA168_MSP_WAKEUP 19
#define IRQ_PXA168_CF_WAKEUP 20
#define IRQ_PXA168_XD_WAKEUP 21
diff --git a/arch/arm/mach-mmp/include/mach/mfp-mmp2.h b/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
index 9f9f8143e272..761c2dacc079 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-mmp2.h
@@ -9,6 +9,175 @@
#define MFP_DRIVE_FAST (0x8 << 13)
/* GPIO */
+#define GPIO0_GPIO0 MFP_CFG(GPIO0, AF0)
+#define GPIO1_GPIO1 MFP_CFG(GPIO1, AF0)
+#define GPIO2_GPIO2 MFP_CFG(GPIO2, AF0)
+#define GPIO3_GPIO3 MFP_CFG(GPIO3, AF0)
+#define GPIO4_GPIO4 MFP_CFG(GPIO4, AF0)
+#define GPIO5_GPIO5 MFP_CFG(GPIO5, AF0)
+#define GPIO6_GPIO6 MFP_CFG(GPIO6, AF0)
+#define GPIO7_GPIO7 MFP_CFG(GPIO7, AF0)
+#define GPIO8_GPIO8 MFP_CFG(GPIO8, AF0)
+#define GPIO9_GPIO9 MFP_CFG(GPIO9, AF0)
+#define GPIO10_GPIO10 MFP_CFG(GPIO10, AF0)
+#define GPIO11_GPIO11 MFP_CFG(GPIO11, AF0)
+#define GPIO12_GPIO12 MFP_CFG(GPIO12, AF0)
+#define GPIO13_GPIO13 MFP_CFG(GPIO13, AF0)
+#define GPIO14_GPIO14 MFP_CFG(GPIO14, AF0)
+#define GPIO15_GPIO15 MFP_CFG(GPIO15, AF0)
+#define GPIO16_GPIO16 MFP_CFG(GPIO16, AF0)
+#define GPIO17_GPIO17 MFP_CFG(GPIO17, AF0)
+#define GPIO18_GPIO18 MFP_CFG(GPIO18, AF0)
+#define GPIO19_GPIO19 MFP_CFG(GPIO19, AF0)
+#define GPIO20_GPIO20 MFP_CFG(GPIO20, AF0)
+#define GPIO21_GPIO21 MFP_CFG(GPIO21, AF0)
+#define GPIO22_GPIO22 MFP_CFG(GPIO22, AF0)
+#define GPIO23_GPIO23 MFP_CFG(GPIO23, AF0)
+#define GPIO24_GPIO24 MFP_CFG(GPIO24, AF0)
+#define GPIO25_GPIO25 MFP_CFG(GPIO25, AF0)
+#define GPIO26_GPIO26 MFP_CFG(GPIO26, AF0)
+#define GPIO27_GPIO27 MFP_CFG(GPIO27, AF0)
+#define GPIO28_GPIO28 MFP_CFG(GPIO28, AF0)
+#define GPIO29_GPIO29 MFP_CFG(GPIO29, AF0)
+#define GPIO30_GPIO30 MFP_CFG(GPIO30, AF0)
+#define GPIO31_GPIO31 MFP_CFG(GPIO31, AF0)
+#define GPIO32_GPIO32 MFP_CFG(GPIO32, AF0)
+#define GPIO33_GPIO33 MFP_CFG(GPIO33, AF0)
+#define GPIO34_GPIO34 MFP_CFG(GPIO34, AF0)
+#define GPIO35_GPIO35 MFP_CFG(GPIO35, AF0)
+#define GPIO36_GPIO36 MFP_CFG(GPIO36, AF0)
+#define GPIO37_GPIO37 MFP_CFG(GPIO37, AF0)
+#define GPIO38_GPIO38 MFP_CFG(GPIO38, AF0)
+#define GPIO39_GPIO39 MFP_CFG(GPIO39, AF0)
+#define GPIO40_GPIO40 MFP_CFG(GPIO40, AF0)
+#define GPIO41_GPIO41 MFP_CFG(GPIO41, AF0)
+#define GPIO42_GPIO42 MFP_CFG(GPIO42, AF0)
+#define GPIO43_GPIO43 MFP_CFG(GPIO43, AF0)
+#define GPIO44_GPIO44 MFP_CFG(GPIO44, AF0)
+#define GPIO45_GPIO45 MFP_CFG(GPIO45, AF0)
+#define GPIO46_GPIO46 MFP_CFG(GPIO46, AF0)
+#define GPIO47_GPIO47 MFP_CFG(GPIO47, AF0)
+#define GPIO48_GPIO48 MFP_CFG(GPIO48, AF0)
+#define GPIO49_GPIO49 MFP_CFG(GPIO49, AF0)
+#define GPIO50_GPIO50 MFP_CFG(GPIO50, AF0)
+#define GPIO51_GPIO51 MFP_CFG(GPIO51, AF0)
+#define GPIO52_GPIO52 MFP_CFG(GPIO52, AF0)
+#define GPIO53_GPIO53 MFP_CFG(GPIO53, AF0)
+#define GPIO54_GPIO54 MFP_CFG(GPIO54, AF0)
+#define GPIO55_GPIO55 MFP_CFG(GPIO55, AF0)
+#define GPIO56_GPIO56 MFP_CFG(GPIO56, AF0)
+#define GPIO57_GPIO57 MFP_CFG(GPIO57, AF0)
+#define GPIO58_GPIO58 MFP_CFG(GPIO58, AF0)
+#define GPIO59_GPIO59 MFP_CFG(GPIO59, AF0)
+#define GPIO60_GPIO60 MFP_CFG(GPIO60, AF0)
+#define GPIO61_GPIO61 MFP_CFG(GPIO61, AF0)
+#define GPIO62_GPIO62 MFP_CFG(GPIO62, AF0)
+#define GPIO63_GPIO63 MFP_CFG(GPIO63, AF0)
+#define GPIO64_GPIO64 MFP_CFG(GPIO64, AF0)
+#define GPIO65_GPIO65 MFP_CFG(GPIO65, AF0)
+#define GPIO66_GPIO66 MFP_CFG(GPIO66, AF0)
+#define GPIO67_GPIO67 MFP_CFG(GPIO67, AF0)
+#define GPIO68_GPIO68 MFP_CFG(GPIO68, AF0)
+#define GPIO69_GPIO69 MFP_CFG(GPIO69, AF0)
+#define GPIO70_GPIO70 MFP_CFG(GPIO70, AF0)
+#define GPIO71_GPIO71 MFP_CFG(GPIO71, AF0)
+#define GPIO72_GPIO72 MFP_CFG(GPIO72, AF0)
+#define GPIO73_GPIO73 MFP_CFG(GPIO73, AF0)
+#define GPIO74_GPIO74 MFP_CFG(GPIO74, AF0)
+#define GPIO75_GPIO75 MFP_CFG(GPIO75, AF0)
+#define GPIO76_GPIO76 MFP_CFG(GPIO76, AF0)
+#define GPIO77_GPIO77 MFP_CFG(GPIO77, AF0)
+#define GPIO78_GPIO78 MFP_CFG(GPIO78, AF0)
+#define GPIO79_GPIO79 MFP_CFG(GPIO79, AF0)
+#define GPIO80_GPIO80 MFP_CFG(GPIO80, AF0)
+#define GPIO81_GPIO81 MFP_CFG(GPIO81, AF0)
+#define GPIO82_GPIO82 MFP_CFG(GPIO82, AF0)
+#define GPIO83_GPIO83 MFP_CFG(GPIO83, AF0)
+#define GPIO84_GPIO84 MFP_CFG(GPIO84, AF0)
+#define GPIO85_GPIO85 MFP_CFG(GPIO85, AF0)
+#define GPIO86_GPIO86 MFP_CFG(GPIO86, AF0)
+#define GPIO87_GPIO87 MFP_CFG(GPIO87, AF0)
+#define GPIO88_GPIO88 MFP_CFG(GPIO88, AF0)
+#define GPIO89_GPIO89 MFP_CFG(GPIO89, AF0)
+#define GPIO90_GPIO90 MFP_CFG(GPIO90, AF0)
+#define GPIO91_GPIO91 MFP_CFG(GPIO91, AF0)
+#define GPIO92_GPIO92 MFP_CFG(GPIO92, AF0)
+#define GPIO93_GPIO93 MFP_CFG(GPIO93, AF0)
+#define GPIO94_GPIO94 MFP_CFG(GPIO94, AF0)
+#define GPIO95_GPIO95 MFP_CFG(GPIO95, AF0)
+#define GPIO96_GPIO96 MFP_CFG(GPIO96, AF0)
+#define GPIO97_GPIO97 MFP_CFG(GPIO97, AF0)
+#define GPIO98_GPIO98 MFP_CFG(GPIO98, AF0)
+#define GPIO99_GPIO99 MFP_CFG(GPIO99, AF0)
+#define GPIO100_GPIO100 MFP_CFG(GPIO100, AF0)
+#define GPIO101_GPIO101 MFP_CFG(GPIO101, AF0)
+#define GPIO102_GPIO102 MFP_CFG(GPIO102, AF1)
+#define GPIO103_GPIO103 MFP_CFG(GPIO103, AF1)
+#define GPIO104_GPIO104 MFP_CFG(GPIO104, AF1)
+#define GPIO105_GPIO105 MFP_CFG(GPIO105, AF1)
+#define GPIO106_GPIO106 MFP_CFG(GPIO106, AF1)
+#define GPIO107_GPIO107 MFP_CFG(GPIO107, AF1)
+#define GPIO108_GPIO108 MFP_CFG(GPIO108, AF1)
+#define GPIO109_GPIO109 MFP_CFG(GPIO109, AF1)
+#define GPIO110_GPIO110 MFP_CFG(GPIO110, AF1)
+#define GPIO111_GPIO111 MFP_CFG(GPIO111, AF1)
+#define GPIO112_GPIO112 MFP_CFG(GPIO112, AF1)
+#define GPIO113_GPIO113 MFP_CFG(GPIO113, AF1)
+#define GPIO114_GPIO114 MFP_CFG(GPIO114, AF0)
+#define GPIO115_GPIO115 MFP_CFG(GPIO115, AF0)
+#define GPIO116_GPIO116 MFP_CFG(GPIO116, AF0)
+#define GPIO117_GPIO117 MFP_CFG(GPIO117, AF0)
+#define GPIO118_GPIO118 MFP_CFG(GPIO118, AF0)
+#define GPIO119_GPIO119 MFP_CFG(GPIO119, AF0)
+#define GPIO120_GPIO120 MFP_CFG(GPIO120, AF0)
+#define GPIO121_GPIO121 MFP_CFG(GPIO121, AF0)
+#define GPIO122_GPIO122 MFP_CFG(GPIO122, AF0)
+#define GPIO123_GPIO123 MFP_CFG(GPIO123, AF0)
+#define GPIO124_GPIO124 MFP_CFG(GPIO124, AF0)
+#define GPIO125_GPIO125 MFP_CFG(GPIO125, AF0)
+#define GPIO126_GPIO126 MFP_CFG(GPIO126, AF0)
+#define GPIO127_GPIO127 MFP_CFG(GPIO127, AF0)
+#define GPIO128_GPIO128 MFP_CFG(GPIO128, AF0)
+#define GPIO129_GPIO129 MFP_CFG(GPIO129, AF0)
+#define GPIO130_GPIO130 MFP_CFG(GPIO130, AF0)
+#define GPIO131_GPIO131 MFP_CFG(GPIO131, AF0)
+#define GPIO132_GPIO132 MFP_CFG(GPIO132, AF0)
+#define GPIO133_GPIO133 MFP_CFG(GPIO133, AF0)
+#define GPIO134_GPIO134 MFP_CFG(GPIO134, AF0)
+#define GPIO135_GPIO135 MFP_CFG(GPIO135, AF0)
+#define GPIO136_GPIO136 MFP_CFG(GPIO136, AF0)
+#define GPIO137_GPIO137 MFP_CFG(GPIO137, AF0)
+#define GPIO138_GPIO138 MFP_CFG(GPIO138, AF0)
+#define GPIO139_GPIO139 MFP_CFG(GPIO139, AF0)
+#define GPIO140_GPIO140 MFP_CFG(GPIO140, AF0)
+#define GPIO141_GPIO141 MFP_CFG(GPIO141, AF0)
+#define GPIO142_GPIO142 MFP_CFG(GPIO142, AF1)
+#define GPIO143_GPIO143 MFP_CFG(GPIO143, AF1)
+#define GPIO144_GPIO144 MFP_CFG(GPIO144, AF1)
+#define GPIO145_GPIO145 MFP_CFG(GPIO145, AF1)
+#define GPIO146_GPIO146 MFP_CFG(GPIO146, AF1)
+#define GPIO147_GPIO147 MFP_CFG(GPIO147, AF1)
+#define GPIO148_GPIO148 MFP_CFG(GPIO148, AF1)
+#define GPIO149_GPIO149 MFP_CFG(GPIO149, AF1)
+#define GPIO150_GPIO150 MFP_CFG(GPIO150, AF1)
+#define GPIO151_GPIO151 MFP_CFG(GPIO151, AF1)
+#define GPIO152_GPIO152 MFP_CFG(GPIO152, AF1)
+#define GPIO153_GPIO153 MFP_CFG(GPIO153, AF1)
+#define GPIO154_GPIO154 MFP_CFG(GPIO154, AF1)
+#define GPIO155_GPIO155 MFP_CFG(GPIO155, AF1)
+#define GPIO156_GPIO156 MFP_CFG(GPIO156, AF1)
+#define GPIO157_GPIO157 MFP_CFG(GPIO157, AF1)
+#define GPIO158_GPIO158 MFP_CFG(GPIO158, AF1)
+#define GPIO159_GPIO159 MFP_CFG(GPIO159, AF1)
+#define GPIO160_GPIO160 MFP_CFG(GPIO160, AF1)
+#define GPIO161_GPIO161 MFP_CFG(GPIO161, AF1)
+#define GPIO162_GPIO162 MFP_CFG(GPIO162, AF1)
+#define GPIO163_GPIO163 MFP_CFG(GPIO163, AF1)
+#define GPIO164_GPIO164 MFP_CFG(GPIO164, AF1)
+#define GPIO165_GPIO165 MFP_CFG(GPIO165, AF1)
+#define GPIO166_GPIO166 MFP_CFG(GPIO166, AF1)
+#define GPIO167_GPIO167 MFP_CFG(GPIO167, AF1)
+#define GPIO168_GPIO168 MFP_CFG(GPIO168, AF1)
/* DFI */
#define GPIO108_DFI_D15 MFP_CFG(GPIO108, AF0)
@@ -47,7 +216,6 @@
/* Ethernet */
#define GPIO155_SM_ADVMUX MFP_CFG(GPIO155, AF2)
-#define GPIO155_GPIO155 MFP_CFG(GPIO155, AF1)
/* UART1 */
#define GPIO45_UART1_RXD MFP_CFG(GPIO45, AF1)
@@ -159,6 +327,8 @@
#define GPIO44_TWSI2_SDA MFP_CFG_DRV(GPIO44, AF1, SLOW)
#define GPIO71_TWSI3_SCL MFP_CFG_DRV(GPIO71, AF1, SLOW)
#define GPIO72_TWSI3_SDA MFP_CFG_DRV(GPIO72, AF1, SLOW)
+#define TWSI4_SCL MFP_CFG_DRV(TWSI4_SCL, AF0, SLOW)
+#define TWSI4_SDA MFP_CFG_DRV(TWSI4_SDA, AF0, SLOW)
#define GPIO99_TWSI5_SCL MFP_CFG_DRV(GPIO99, AF4, SLOW)
#define GPIO100_TWSI5_SDA MFP_CFG_DRV(GPIO100, AF4, SLOW)
#define GPIO97_TWSI6_SCL MFP_CFG_DRV(GPIO97, AF2, SLOW)
@@ -218,21 +388,6 @@
#define GPIO69_CAM_MCLK MFP_CFG_DRV(GPIO69, AF1, FAST)
#define GPIO70_CAM_PCLK MFP_CFG_DRV(GPIO70, AF1, FAST)
-/* Wifi */
-#define GPIO45_GPIO45 MFP_CFG(GPIO45, AF0)
-#define GPIO46_GPIO46 MFP_CFG(GPIO46, AF0)
-#define GPIO21_GPIO21 MFP_CFG(GPIO21, AF0)
-#define GPIO22_GPIO22 MFP_CFG(GPIO22, AF0)
-#define GPIO55_GPIO55 MFP_CFG(GPIO55, AF0)
-#define GPIO56_GPIO56 MFP_CFG(GPIO56, AF0)
-#define GPIO57_GPIO57 MFP_CFG(GPIO57, AF0)
-#define GPIO58_GPIO58 MFP_CFG(GPIO58, AF0)
-
-/* Codec*/
-#define GPIO23_GPIO23 MFP_CFG(GPIO23, AF0)
-
-#define GPIO101_GPIO101 MFP_CFG(GPIO101, AF0)
-
/* PMIC */
#define PMIC_PMIC_INT MFP_CFG(PMIC_INT, AF0)
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h
index 459f3be9cfb2..fec220bd5046 100644
--- a/arch/arm/mach-mmp/include/mach/mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mmp2.h
@@ -39,17 +39,17 @@ static inline int mmp2_add_twsi(int id, struct i2c_pxa_platform_data *data,
int ret;
switch (id) {
- case 0: d = &mmp2_device_twsi1; break;
- case 1: d = &mmp2_device_twsi2; break;
- case 2: d = &mmp2_device_twsi3; break;
- case 3: d = &mmp2_device_twsi4; break;
- case 4: d = &mmp2_device_twsi5; break;
- case 5: d = &mmp2_device_twsi6; break;
+ case 1: d = &mmp2_device_twsi1; break;
+ case 2: d = &mmp2_device_twsi2; break;
+ case 3: d = &mmp2_device_twsi3; break;
+ case 4: d = &mmp2_device_twsi4; break;
+ case 5: d = &mmp2_device_twsi5; break;
+ case 6: d = &mmp2_device_twsi6; break;
default:
return -EINVAL;
}
- ret = i2c_register_board_info(id, info, size);
+ ret = i2c_register_board_info(id - 1, info, size);
if (ret)
return ret;
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h
index 3ad612cbdf09..3b2bd5d5eb05 100644
--- a/arch/arm/mach-mmp/include/mach/pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/pxa168.h
@@ -14,6 +14,11 @@ extern struct pxa_device_desc pxa168_device_pwm1;
extern struct pxa_device_desc pxa168_device_pwm2;
extern struct pxa_device_desc pxa168_device_pwm3;
extern struct pxa_device_desc pxa168_device_pwm4;
+extern struct pxa_device_desc pxa168_device_ssp1;
+extern struct pxa_device_desc pxa168_device_ssp2;
+extern struct pxa_device_desc pxa168_device_ssp3;
+extern struct pxa_device_desc pxa168_device_ssp4;
+extern struct pxa_device_desc pxa168_device_ssp5;
extern struct pxa_device_desc pxa168_device_nand;
static inline int pxa168_add_uart(int id)
@@ -67,6 +72,22 @@ static inline int pxa168_add_pwm(int id)
return pxa_register_device(d, NULL, 0);
}
+static inline int pxa168_add_ssp(int id)
+{
+ struct pxa_device_desc *d = NULL;
+
+ switch (id) {
+ case 1: d = &pxa168_device_ssp1; break;
+ case 2: d = &pxa168_device_ssp2; break;
+ case 3: d = &pxa168_device_ssp3; break;
+ case 4: d = &pxa168_device_ssp4; break;
+ case 5: d = &pxa168_device_ssp5; break;
+ default:
+ return -EINVAL;
+ }
+ return pxa_register_device(d, NULL, 0);
+}
+
static inline int pxa168_add_nand(struct pxa3xx_nand_platform_data *info)
{
return pxa_register_device(&pxa168_device_nand, info, sizeof(*info));
diff --git a/arch/arm/mach-mmp/include/mach/regs-apbc.h b/arch/arm/mach-mmp/include/mach/regs-apbc.h
index 712af03fd1af..1a96585336ba 100644
--- a/arch/arm/mach-mmp/include/mach/regs-apbc.h
+++ b/arch/arm/mach-mmp/include/mach/regs-apbc.h
@@ -26,8 +26,6 @@
#define APBC_PXA168_PWM2 APBC_REG(0x010)
#define APBC_PXA168_PWM3 APBC_REG(0x014)
#define APBC_PXA168_PWM4 APBC_REG(0x018)
-#define APBC_PXA168_SSP1 APBC_REG(0x01c)
-#define APBC_PXA168_SSP2 APBC_REG(0x020)
#define APBC_PXA168_RTC APBC_REG(0x028)
#define APBC_PXA168_TWSI0 APBC_REG(0x02c)
#define APBC_PXA168_KPC APBC_REG(0x030)
@@ -35,14 +33,16 @@
#define APBC_PXA168_AIB APBC_REG(0x03c)
#define APBC_PXA168_SW_JTAG APBC_REG(0x040)
#define APBC_PXA168_ONEWIRE APBC_REG(0x048)
-#define APBC_PXA168_SSP3 APBC_REG(0x04c)
#define APBC_PXA168_ASFAR APBC_REG(0x050)
#define APBC_PXA168_ASSAR APBC_REG(0x054)
-#define APBC_PXA168_SSP4 APBC_REG(0x058)
-#define APBC_PXA168_SSP5 APBC_REG(0x05c)
#define APBC_PXA168_TWSI1 APBC_REG(0x06c)
#define APBC_PXA168_UART3 APBC_REG(0x070)
#define APBC_PXA168_AC97 APBC_REG(0x084)
+#define APBC_PXA168_SSP1 APBC_REG(0x81c)
+#define APBC_PXA168_SSP2 APBC_REG(0x820)
+#define APBC_PXA168_SSP3 APBC_REG(0x84c)
+#define APBC_PXA168_SSP4 APBC_REG(0x858)
+#define APBC_PXA168_SSP5 APBC_REG(0x85c)
/*
* APB Clock register offsets for PXA910
diff --git a/arch/arm/mach-mmp/include/mach/regs-smc.h b/arch/arm/mach-mmp/include/mach/regs-smc.h
new file mode 100644
index 000000000000..e484d40d71bd
--- /dev/null
+++ b/arch/arm/mach-mmp/include/mach/regs-smc.h
@@ -0,0 +1,37 @@
+/*
+ * linux/arch/arm/mach-mmp/include/mach/regs-smc.h
+ *
+ * Static Memory Controller Registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_REGS_SMC_H
+#define __ASM_MACH_REGS_SMC_H
+
+#include <mach/addr-map.h>
+
+#define SMC_VIRT_BASE (AXI_VIRT_BASE + 0x83800)
+#define SMC_REG(x) (SMC_VIRT_BASE + (x))
+
+#define SMC_MSC0 SMC_REG(0x0020)
+#define SMC_MSC1 SMC_REG(0x0024)
+#define SMC_SXCNFG0 SMC_REG(0x0030)
+#define SMC_SXCNFG1 SMC_REG(0x0034)
+#define SMC_MEMCLKCFG SMC_REG(0x0068)
+#define SMC_CSDFICFG0 SMC_REG(0x0090)
+#define SMC_CSDFICFG1 SMC_REG(0x0094)
+#define SMC_CLK_RET_DEL SMC_REG(0x00b0)
+#define SMC_ADV_RET_DEL SMC_REG(0x00b4)
+#define SMC_CSADRMAP0 SMC_REG(0x00c0)
+#define SMC_CSADRMAP1 SMC_REG(0x00c4)
+#define SMC_WE_AP0 SMC_REG(0x00e0)
+#define SMC_WE_AP1 SMC_REG(0x00e4)
+#define SMC_OE_AP0 SMC_REG(0x00f0)
+#define SMC_OE_AP1 SMC_REG(0x00f4)
+#define SMC_ADV_AP0 SMC_REG(0x0100)
+#define SMC_ADV_AP1 SMC_REG(0x0104)
+
+#endif /* __ASM_MACH_REGS_SMC_H */
diff --git a/arch/arm/mach-mmp/include/mach/timex.h b/arch/arm/mach-mmp/include/mach/timex.h
index 6cebbd0ca8f4..70c9f1d88c02 100644
--- a/arch/arm/mach-mmp/include/mach/timex.h
+++ b/arch/arm/mach-mmp/include/mach/timex.h
@@ -6,4 +6,8 @@
* published by the Free Software Foundation.
*/
+#ifdef CONFIG_CPU_MMP2
+#define CLOCK_TICK_RATE 6500000
+#else
#define CLOCK_TICK_RATE 3250000
+#endif
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index cfd4d66ef800..d77dd41d60e1 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -15,12 +15,16 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/max8649.h>
+#include <linux/mfd/max8925.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/addr-map.h>
#include <mach/mfp-mmp2.h>
#include <mach/mmp2.h>
+#include <mach/irqs.h>
#include "common.h"
@@ -58,6 +62,63 @@ static unsigned long jasper_pin_config[] __initdata = {
GPIO149_ND_CLE,
GPIO112_ND_RDY0,
GPIO160_ND_RDY1,
+
+ /* PMIC */
+ PMIC_PMIC_INT | MFP_LPM_EDGE_FALL,
+};
+
+static struct regulator_consumer_supply max8649_supply[] = {
+ REGULATOR_SUPPLY("vcc_core", NULL),
+};
+
+static struct regulator_init_data max8649_init_data = {
+ .constraints = {
+ .name = "vcc_core range",
+ .min_uV = 1150000,
+ .max_uV = 1280000,
+ .always_on = 1,
+ .boot_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &max8649_supply[0],
+};
+
+static struct max8649_platform_data jasper_max8649_info = {
+ .mode = 2, /* VID1 = 1, VID0 = 0 */
+ .extclk = 0,
+ .ramp_timing = MAX8649_RAMP_32MV,
+ .regulator = &max8649_init_data,
+};
+
+static struct max8925_backlight_pdata jasper_backlight_data = {
+ .dual_string = 0,
+};
+
+static struct max8925_power_pdata jasper_power_data = {
+ .batt_detect = 0, /* can't detect battery by ID pin */
+ .topoff_threshold = MAX8925_TOPOFF_THR_10PER,
+ .fast_charge = MAX8925_FCHG_1000MA,
+};
+
+static struct max8925_platform_data jasper_max8925_info = {
+ .backlight = &jasper_backlight_data,
+ .power = &jasper_power_data,
+ .irq_base = IRQ_BOARD_START,
+};
+
+static struct i2c_board_info jasper_twsi1_info[] = {
+ [0] = {
+ .type = "max8649",
+ .addr = 0x60,
+ .platform_data = &jasper_max8649_info,
+ },
+ [1] = {
+ .type = "max8925",
+ .addr = 0x3c,
+ .irq = IRQ_MMP2_PMIC,
+ .platform_data = &jasper_max8925_info,
+ },
};
static void __init jasper_init(void)
@@ -67,6 +128,9 @@ static void __init jasper_init(void)
/* on-chip devices */
mmp2_add_uart(1);
mmp2_add_uart(3);
+ mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(jasper_twsi1_info));
+
+ regulator_has_full_constraints();
}
MACHINE_START(MARVELL_JASPER, "Jasper Development Platform")
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
index 72eb9daeea99..7f5eb059bb01 100644
--- a/arch/arm/mach-mmp/mmp2.c
+++ b/arch/arm/mach-mmp/mmp2.c
@@ -15,11 +15,14 @@
#include <linux/init.h>
#include <linux/io.h>
+#include <asm/hardware/cache-tauros2.h>
+
#include <mach/addr-map.h>
#include <mach/regs-apbc.h>
#include <mach/regs-apmu.h>
#include <mach/cputype.h>
#include <mach/irqs.h>
+#include <mach/dma.h>
#include <mach/mfp.h>
#include <mach/gpio.h>
#include <mach/devices.h>
@@ -32,7 +35,50 @@
#define APMASK(i) (GPIO_REGS_VIRT + BANK_OFF(i) + 0x9c)
static struct mfp_addr_map mmp2_addr_map[] __initdata = {
+
+ MFP_ADDR_X(GPIO0, GPIO58, 0x54),
+ MFP_ADDR_X(GPIO59, GPIO73, 0x280),
+ MFP_ADDR_X(GPIO74, GPIO101, 0x170),
+
+ MFP_ADDR(GPIO102, 0x0),
+ MFP_ADDR(GPIO103, 0x4),
+ MFP_ADDR(GPIO104, 0x1fc),
+ MFP_ADDR(GPIO105, 0x1f8),
+ MFP_ADDR(GPIO106, 0x1f4),
+ MFP_ADDR(GPIO107, 0x1f0),
+ MFP_ADDR(GPIO108, 0x21c),
+ MFP_ADDR(GPIO109, 0x218),
+ MFP_ADDR(GPIO110, 0x214),
+ MFP_ADDR(GPIO111, 0x200),
+ MFP_ADDR(GPIO112, 0x244),
+ MFP_ADDR(GPIO113, 0x25c),
+ MFP_ADDR(GPIO114, 0x164),
+ MFP_ADDR_X(GPIO115, GPIO122, 0x260),
+
+ MFP_ADDR(GPIO123, 0x148),
+ MFP_ADDR_X(GPIO124, GPIO141, 0xc),
+
+ MFP_ADDR(GPIO142, 0x8),
+ MFP_ADDR_X(GPIO143, GPIO151, 0x220),
+ MFP_ADDR_X(GPIO152, GPIO153, 0x248),
+ MFP_ADDR_X(GPIO154, GPIO155, 0x254),
+ MFP_ADDR_X(GPIO156, GPIO159, 0x14c),
+
+ MFP_ADDR(GPIO160, 0x250),
+ MFP_ADDR(GPIO161, 0x210),
+ MFP_ADDR(GPIO162, 0x20c),
+ MFP_ADDR(GPIO163, 0x208),
+ MFP_ADDR(GPIO164, 0x204),
+ MFP_ADDR(GPIO165, 0x1ec),
+ MFP_ADDR(GPIO166, 0x1e8),
+ MFP_ADDR(GPIO167, 0x1e4),
+ MFP_ADDR(GPIO168, 0x1e0),
+
+ MFP_ADDR_X(TWSI1_SCL, TWSI1_SDA, 0x140),
+ MFP_ADDR_X(TWSI4_SCL, TWSI4_SDA, 0x2bc),
+
MFP_ADDR(PMIC_INT, 0x2c4),
+ MFP_ADDR(CLK_REQ, 0x160),
MFP_ADDR_END,
};
@@ -99,9 +145,13 @@ static struct clk_lookup mmp2_clkregs[] = {
static int __init mmp2_init(void)
{
if (cpu_is_mmp2()) {
+#ifdef CONFIG_CACHE_TAUROS2
+ tauros2_init();
+#endif
mfp_init_base(MFPR_VIRT_BASE);
mfp_init_addr(mmp2_addr_map);
- clks_register(ARRAY_AND_SIZE(mmp2_clkregs));
+ pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16);
+ clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs));
}
return 0;
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 1873c821df90..652ae660634c 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -72,6 +72,11 @@ static APBC_CLK(pwm1, PXA168_PWM1, 1, 13000000);
static APBC_CLK(pwm2, PXA168_PWM2, 1, 13000000);
static APBC_CLK(pwm3, PXA168_PWM3, 1, 13000000);
static APBC_CLK(pwm4, PXA168_PWM4, 1, 13000000);
+static APBC_CLK(ssp1, PXA168_SSP1, 4, 0);
+static APBC_CLK(ssp2, PXA168_SSP2, 4, 0);
+static APBC_CLK(ssp3, PXA168_SSP3, 4, 0);
+static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
+static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
static APMU_CLK(nand, NAND, 0x01db, 208000000);
@@ -85,6 +90,11 @@ static struct clk_lookup pxa168_clkregs[] = {
INIT_CLKREG(&clk_pwm2, "pxa168-pwm.1", NULL),
INIT_CLKREG(&clk_pwm3, "pxa168-pwm.2", NULL),
INIT_CLKREG(&clk_pwm4, "pxa168-pwm.3", NULL),
+ INIT_CLKREG(&clk_ssp1, "pxa168-ssp.0", NULL),
+ INIT_CLKREG(&clk_ssp2, "pxa168-ssp.1", NULL),
+ INIT_CLKREG(&clk_ssp3, "pxa168-ssp.2", NULL),
+ INIT_CLKREG(&clk_ssp4, "pxa168-ssp.3", NULL),
+ INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL),
INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
};
@@ -132,3 +142,8 @@ PXA168_DEVICE(pwm2, "pxa168-pwm", 1, NONE, 0xd401a400, 0x10);
PXA168_DEVICE(pwm3, "pxa168-pwm", 2, NONE, 0xd401a800, 0x10);
PXA168_DEVICE(pwm4, "pxa168-pwm", 3, NONE, 0xd401ac00, 0x10);
PXA168_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x80, 97, 99);
+PXA168_DEVICE(ssp1, "pxa168-ssp", 0, SSP1, 0xd401b000, 0x40, 52, 53);
+PXA168_DEVICE(ssp2, "pxa168-ssp", 1, SSP2, 0xd401c000, 0x40, 54, 55);
+PXA168_DEVICE(ssp3, "pxa168-ssp", 2, SSP3, 0xd401f000, 0x40, 56, 57);
+PXA168_DEVICE(ssp4, "pxa168-ssp", 3, SSP4, 0xd4020000, 0x40, 58, 59);
+PXA168_DEVICE(ssp5, "pxa168-ssp", 4, SSP5, 0xd4021000, 0x40, 60, 61);
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index f780086befd7..47264a76eeb3 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -1,7 +1,80 @@
if ARCH_MSM
-comment "MSM Board Type"
+choice
+ prompt "Qualcomm MSM SoC Type"
+ default ARCH_MSM7X00A
+
+config ARCH_MSM7X00A
+ bool "MSM7x00A / MSM7x01A"
+ select ARCH_MSM_ARM11
+ select MSM_SMD
+ select MSM_SMD_PKG3
+ select CPU_V6
+
+config ARCH_MSM7X30
+ bool "MSM7x30"
+ select ARCH_MSM_SCORPION
+ select MSM_SMD
+ select MSM_VIC
+ select CPU_V7
+ select MSM_REMOTE_SPINLOCK_DEKKERS
+
+config ARCH_QSD8X50
+ bool "QSD8X50"
+ select ARCH_MSM_SCORPION
+ select MSM_SMD
+ select MSM_VIC
+ select CPU_V7
+ select MSM_REMOTE_SPINLOCK_LDREX
+endchoice
+
+config MSM_SOC_REV_A
+ bool
+
+config ARCH_MSM_ARM11
+ bool
+config ARCH_MSM_SCORPION
+ bool
+
+config MSM_VIC
+ bool
+
+menu "Qualcomm MSM Board Type"
+
+config MACH_HALIBUT
depends on ARCH_MSM
+ depends on ARCH_MSM7X00A
+ bool "Halibut Board (QCT SURF7201A)"
+ help
+ Support for the Qualcomm SURF7201A eval board.
+
+config MACH_TROUT
+ depends on ARCH_MSM
+ depends on ARCH_MSM7X00A
+ bool "HTC Dream (aka trout)"
+ help
+ Support for the HTC Dream, T-Mobile G1, Android ADP1 devices.
+
+config MACH_MSM7X30_SURF
+ depends on ARCH_MSM7X30
+ bool "MSM7x30 SURF"
+ help
+ Support for the Qualcomm MSM7x30 SURF eval board.
+
+config MACH_QSD8X50_SURF
+ depends on ARCH_QSD8X50
+ bool "QSD8x50 SURF"
+ help
+ Support for the Qualcomm QSD8x50 SURF eval board.
+
+config MACH_QSD8X50A_ST1_5
+ depends on ARCH_QSD8X50
+ select MSM_SOC_REV_A
+ bool "QSD8x50A ST1.5"
+ help
+ Support for the Qualcomm ST1.5.
+
+endmenu
config MSM_DEBUG_UART
int
@@ -27,17 +100,10 @@ choice
bool "UART3"
endchoice
-config MACH_HALIBUT
- depends on ARCH_MSM
- default y
- bool "Halibut Board (QCT SURF7201A)"
- help
- Support for the Qualcomm SURF7201A eval board.
+config MSM_SMD_PKG3
+ bool
-config MACH_TROUT
- default y
- bool "HTC Dream (aka trout)"
- help
- Support for the HTC Dream, T-Mobile G1, Android ADP1 devices.
+config MSM_SMD
+ bool
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 91e6f5c95dc1..66677f0acaed 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -1,9 +1,22 @@
-obj-y += io.o idle.o irq.o timer.o dma.o
-obj-y += devices.o
obj-y += proc_comm.o
+obj-y += io.o idle.o timer.o dma.o
obj-y += vreg.o
-obj-y += clock.o clock-7x01a.o
+obj-y += acpuclock-arm11.o
+obj-y += clock.o clock-pcom.o
+obj-y += gpio.o
-obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o
+ifdef CONFIG_MSM_VIC
+obj-y += irq-vic.o
+else
+obj-y += irq.o
+endif
+
+obj-$(CONFIG_ARCH_QSD8X50) += sirc.o
+obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
+obj-$(CONFIG_MSM_SMD) += last_radio_log.o
+
+obj-$(CONFIG_MACH_TROUT) += board-trout.o devices-msm7x00.o
+obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o devices-msm7x00.o
+obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o devices-msm7x30.o
+obj-$(CONFIG_ARCH_QSD8X50) += board-qsd8x50.o devices-qsd8x50.o
-obj-$(CONFIG_MACH_TROUT) += board-dream.o
diff --git a/arch/arm/mach-msm/acpuclock-arm11.c b/arch/arm/mach-msm/acpuclock-arm11.c
new file mode 100644
index 000000000000..af5e85b91d02
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-arm11.c
@@ -0,0 +1,526 @@
+/* arch/arm/mach-msm/acpuclock.c
+ *
+ * MSM architecture clock driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007 QUALCOMM Incorporated
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+
+#include "proc_comm.h"
+#include "acpuclock.h"
+
+
+#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
+#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
+#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
+
+/*
+ * ARM11 clock configuration for specific ACPU speeds
+ */
+
+#define ACPU_PLL_TCXO -1
+#define ACPU_PLL_0 0
+#define ACPU_PLL_1 1
+#define ACPU_PLL_2 2
+#define ACPU_PLL_3 3
+
+#define PERF_SWITCH_DEBUG 0
+#define PERF_SWITCH_STEP_DEBUG 0
+
+struct clock_state
+{
+ struct clkctl_acpu_speed *current_speed;
+ struct mutex lock;
+ uint32_t acpu_switch_time_us;
+ uint32_t max_speed_delta_khz;
+ uint32_t vdd_switch_time_us;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
+};
+
+static struct clk *ebi1_clk;
+static struct clock_state drv_state = { 0 };
+
+static void __init acpuclk_init(void);
+
+/* MSM7201A Levels 3-6 all correspond to 1.2V, level 7 corresponds to 1.325V. */
+enum {
+ VDD_0 = 0,
+ VDD_1 = 1,
+ VDD_2 = 2,
+ VDD_3 = 3,
+ VDD_4 = 3,
+ VDD_5 = 3,
+ VDD_6 = 3,
+ VDD_7 = 7,
+ VDD_END
+};
+
+struct clkctl_acpu_speed {
+ unsigned int a11clk_khz;
+ int pll;
+ unsigned int a11clk_src_sel;
+ unsigned int a11clk_src_div;
+ unsigned int ahbclk_khz;
+ unsigned int ahbclk_div;
+ int vdd;
+ unsigned int axiclk_khz;
+ unsigned long lpj; /* loops_per_jiffy */
+/* Index in acpu_freq_tbl[] for steppings. */
+ short down;
+ short up;
+};
+
+/*
+ * ACPU speed table. Complete table is shown but certain speeds are commented
+ * out to optimized speed switching. Initalize loops_per_jiffy to 0.
+ *
+ * Table stepping up/down is optimized for 256mhz jumps while staying on the
+ * same PLL.
+ */
+#if (0)
+static struct clkctl_acpu_speed acpu_freq_tbl[] = {
+ { 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, VDD_0, 30720, 0, 0, 8 },
+ { 61440, ACPU_PLL_0, 4, 3, 61440, 0, VDD_0, 30720, 0, 0, 8 },
+ { 81920, ACPU_PLL_0, 4, 2, 40960, 1, VDD_0, 61440, 0, 0, 8 },
+ { 96000, ACPU_PLL_1, 1, 7, 48000, 1, VDD_0, 61440, 0, 0, 9 },
+ { 122880, ACPU_PLL_0, 4, 1, 61440, 1, VDD_3, 61440, 0, 0, 8 },
+ { 128000, ACPU_PLL_1, 1, 5, 64000, 1, VDD_3, 61440, 0, 0, 12 },
+ { 176000, ACPU_PLL_2, 2, 5, 88000, 1, VDD_3, 61440, 0, 0, 11 },
+ { 192000, ACPU_PLL_1, 1, 3, 64000, 2, VDD_3, 61440, 0, 0, 12 },
+ { 245760, ACPU_PLL_0, 4, 0, 81920, 2, VDD_4, 61440, 0, 0, 12 },
+ { 256000, ACPU_PLL_1, 1, 2, 128000, 2, VDD_5, 128000, 0, 0, 12 },
+ { 264000, ACPU_PLL_2, 2, 3, 88000, 2, VDD_5, 128000, 0, 6, 13 },
+ { 352000, ACPU_PLL_2, 2, 2, 88000, 3, VDD_5, 128000, 0, 6, 13 },
+ { 384000, ACPU_PLL_1, 1, 1, 128000, 2, VDD_6, 128000, 0, 5, -1 },
+ { 528000, ACPU_PLL_2, 2, 1, 132000, 3, VDD_7, 128000, 0, 11, -1 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+};
+#else /* Table of freq we currently use. */
+static struct clkctl_acpu_speed acpu_freq_tbl[] = {
+ { 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, VDD_0, 30720, 0, 0, 4 },
+ { 122880, ACPU_PLL_0, 4, 1, 61440, 1, VDD_3, 61440, 0, 0, 4 },
+ { 128000, ACPU_PLL_1, 1, 5, 64000, 1, VDD_3, 61440, 0, 0, 6 },
+ { 176000, ACPU_PLL_2, 2, 5, 88000, 1, VDD_3, 61440, 0, 0, 5 },
+ { 245760, ACPU_PLL_0, 4, 0, 81920, 2, VDD_4, 61440, 0, 0, 5 },
+ { 352000, ACPU_PLL_2, 2, 2, 88000, 3, VDD_5, 128000, 0, 3, 7 },
+ { 384000, ACPU_PLL_1, 1, 1, 128000, 2, VDD_6, 128000, 0, 2, -1 },
+ { 528000, ACPU_PLL_2, 2, 1, 132000, 3, VDD_7, 128000, 0, 5, -1 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+static struct cpufreq_frequency_table freq_table[] = {
+ { 0, 122880 },
+ { 1, 128000 },
+ { 2, 245760 },
+ { 3, 384000 },
+ { 4, 528000 },
+ { 5, CPUFREQ_TABLE_END },
+};
+#endif
+
+static int pc_pll_request(unsigned id, unsigned on)
+{
+ int res;
+ on = !!on;
+
+#if PERF_SWITCH_DEBUG
+ if (on)
+ printk(KERN_DEBUG "Enabling PLL %d\n", id);
+ else
+ printk(KERN_DEBUG "Disabling PLL %d\n", id);
+#endif
+
+ res = msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on);
+ if (res < 0)
+ return res;
+
+#if PERF_SWITCH_DEBUG
+ if (on)
+ printk(KERN_DEBUG "PLL %d enabled\n", id);
+ else
+ printk(KERN_DEBUG "PLL %d disabled\n", id);
+#endif
+ return res;
+}
+
+
+/*----------------------------------------------------------------------------
+ * ARM11 'owned' clock control
+ *---------------------------------------------------------------------------*/
+
+unsigned long acpuclk_power_collapse(void) {
+ int ret = acpuclk_get_rate();
+ ret *= 1000;
+ if (ret > drv_state.power_collapse_khz)
+ acpuclk_set_rate(drv_state.power_collapse_khz, 1);
+ return ret;
+}
+
+unsigned long acpuclk_get_wfi_rate(void)
+{
+ return drv_state.wait_for_irq_khz;
+}
+
+unsigned long acpuclk_wait_for_irq(void) {
+ int ret = acpuclk_get_rate();
+ ret *= 1000;
+ if (ret > drv_state.wait_for_irq_khz)
+ acpuclk_set_rate(drv_state.wait_for_irq_khz, 1);
+ return ret;
+}
+
+static int acpuclk_set_vdd_level(int vdd)
+{
+ uint32_t current_vdd;
+
+ current_vdd = readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x07;
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "acpuclock: Switching VDD from %u -> %d\n",
+ current_vdd, vdd);
+#endif
+ writel((1 << 7) | (vdd << 3), A11S_VDD_SVS_PLEVEL_ADDR);
+ udelay(drv_state.vdd_switch_time_us);
+ if ((readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x7) != vdd) {
+#if PERF_SWITCH_DEBUG
+ printk(KERN_ERR "acpuclock: VDD set failed\n");
+#endif
+ return -EIO;
+ }
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "acpuclock: VDD switched\n");
+#endif
+ return 0;
+}
+
+/* Set proper dividers for the given clock speed. */
+static void acpuclk_set_div(const struct clkctl_acpu_speed *hunt_s) {
+ uint32_t reg_clkctl, reg_clksel, clk_div;
+
+ /* AHB_CLK_DIV */
+ clk_div = (readl(A11S_CLK_SEL_ADDR) >> 1) & 0x03;
+ /*
+ * If the new clock divider is higher than the previous, then
+ * program the divider before switching the clock
+ */
+ if (hunt_s->ahbclk_div > clk_div) {
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+ if ((readl(A11S_CLK_SEL_ADDR) & 0x01) == 0) {
+ /* SRC0 */
+
+ /* Program clock source */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0x07 << 4);
+ reg_clkctl |= (hunt_s->a11clk_src_sel << 4);
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock divider */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~0xf;
+ reg_clkctl |= hunt_s->a11clk_src_div;
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock source selection */
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel |= 1; /* CLK_SEL_SRC1NO == SRC1 */
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ } else {
+ /* SRC1 */
+
+ /* Program clock source */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0x07 << 12);
+ reg_clkctl |= (hunt_s->a11clk_src_sel << 12);
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock divider */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0xf << 8);
+ reg_clkctl |= (hunt_s->a11clk_src_div << 8);
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock source selection */
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel &= ~1; /* CLK_SEL_SRC1NO == SRC0 */
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+
+ /*
+ * If the new clock divider is lower than the previous, then
+ * program the divider after switching the clock
+ */
+ if (hunt_s->ahbclk_div < clk_div) {
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+}
+
+int acpuclk_set_rate(unsigned long rate, int for_power_collapse)
+{
+ uint32_t reg_clkctl;
+ struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
+ int rc = 0;
+ unsigned int plls_enabled = 0, pll;
+
+ strt_s = cur_s = drv_state.current_speed;
+
+ WARN_ONCE(cur_s == NULL, "acpuclk_set_rate: not initialized\n");
+ if (cur_s == NULL)
+ return -ENOENT;
+
+ if (rate == (cur_s->a11clk_khz * 1000))
+ return 0;
+
+ for (tgt_s = acpu_freq_tbl; tgt_s->a11clk_khz != 0; tgt_s++) {
+ if (tgt_s->a11clk_khz == (rate / 1000))
+ break;
+ }
+
+ if (tgt_s->a11clk_khz == 0)
+ return -EINVAL;
+
+ /* Choose the highest speed speed at or below 'rate' with same PLL. */
+ if (for_power_collapse && tgt_s->a11clk_khz < cur_s->a11clk_khz) {
+ while (tgt_s->pll != ACPU_PLL_TCXO && tgt_s->pll != cur_s->pll)
+ tgt_s--;
+ }
+
+ if (strt_s->pll != ACPU_PLL_TCXO)
+ plls_enabled |= 1 << strt_s->pll;
+
+ if (!for_power_collapse) {
+ mutex_lock(&drv_state.lock);
+ if (strt_s->pll != tgt_s->pll && tgt_s->pll != ACPU_PLL_TCXO) {
+ rc = pc_pll_request(tgt_s->pll, 1);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ tgt_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << tgt_s->pll;
+ }
+ /* Increase VDD if needed. */
+ if (tgt_s->vdd > cur_s->vdd) {
+ if ((rc = acpuclk_set_vdd_level(tgt_s->vdd)) < 0) {
+ printk(KERN_ERR "Unable to switch ACPU vdd\n");
+ goto out;
+ }
+ }
+ }
+
+ /* Set wait states for CPU inbetween frequency changes */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl |= (100 << 16); /* set WT_ST_CNT */
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_INFO "acpuclock: Switching from ACPU rate %u -> %u\n",
+ strt_s->a11clk_khz * 1000, tgt_s->a11clk_khz * 1000);
+#endif
+
+ while (cur_s != tgt_s) {
+ /*
+ * Always jump to target freq if within 256mhz, regulardless of
+ * PLL. If differnece is greater, use the predefinied
+ * steppings in the table.
+ */
+ int d = abs((int)(cur_s->a11clk_khz - tgt_s->a11clk_khz));
+ if (d > drv_state.max_speed_delta_khz) {
+ /* Step up or down depending on target vs current. */
+ int clk_index = tgt_s->a11clk_khz > cur_s->a11clk_khz ?
+ cur_s->up : cur_s->down;
+ if (clk_index < 0) { /* This should not happen. */
+ printk(KERN_ERR "cur:%u target: %u\n",
+ cur_s->a11clk_khz, tgt_s->a11clk_khz);
+ rc = -EINVAL;
+ goto out;
+ }
+ cur_s = &acpu_freq_tbl[clk_index];
+ } else {
+ cur_s = tgt_s;
+ }
+#if PERF_SWITCH_STEP_DEBUG
+ printk(KERN_DEBUG "%s: STEP khz = %u, pll = %d\n",
+ __FUNCTION__, cur_s->a11clk_khz, cur_s->pll);
+#endif
+ if (!for_power_collapse&& cur_s->pll != ACPU_PLL_TCXO
+ && !(plls_enabled & (1 << cur_s->pll))) {
+ rc = pc_pll_request(cur_s->pll, 1);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ cur_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << cur_s->pll;
+ }
+
+ acpuclk_set_div(cur_s);
+ drv_state.current_speed = cur_s;
+ /* Re-adjust lpj for the new clock speed. */
+ loops_per_jiffy = cur_s->lpj;
+ udelay(drv_state.acpu_switch_time_us);
+ }
+
+ /* Nothing else to do for power collapse. */
+ if (for_power_collapse)
+ return 0;
+
+ /* Disable PLLs we are not using anymore. */
+ plls_enabled &= ~(1 << tgt_s->pll);
+ for (pll = ACPU_PLL_0; pll <= ACPU_PLL_2; pll++)
+ if (plls_enabled & (1 << pll)) {
+ rc = pc_pll_request(pll, 0);
+ if (rc < 0) {
+ pr_err("PLL%d disable failed (%d)\n", pll, rc);
+ goto out;
+ }
+ }
+
+ /* Change the AXI bus frequency if we can. */
+ if (strt_s->axiclk_khz != tgt_s->axiclk_khz) {
+ rc = clk_set_rate(ebi1_clk, tgt_s->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+ }
+
+ /* Drop VDD level if we can. */
+ if (tgt_s->vdd < strt_s->vdd) {
+ if (acpuclk_set_vdd_level(tgt_s->vdd) < 0)
+ printk(KERN_ERR "acpuclock: Unable to drop ACPU vdd\n");
+ }
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "%s: ACPU speed change complete\n", __FUNCTION__);
+#endif
+out:
+ if (!for_power_collapse)
+ mutex_unlock(&drv_state.lock);
+ return rc;
+}
+
+static void __init acpuclk_init(void)
+{
+ struct clkctl_acpu_speed *speed;
+ uint32_t div, sel;
+ int rc;
+
+ /*
+ * Determine the rate of ACPU clock
+ */
+
+ if (!(readl(A11S_CLK_SEL_ADDR) & 0x01)) { /* CLK_SEL_SRC1N0 */
+ /* CLK_SRC0_SEL */
+ sel = (readl(A11S_CLK_CNTL_ADDR) >> 12) & 0x7;
+ /* CLK_SRC0_DIV */
+ div = (readl(A11S_CLK_CNTL_ADDR) >> 8) & 0x0f;
+ } else {
+ /* CLK_SRC1_SEL */
+ sel = (readl(A11S_CLK_CNTL_ADDR) >> 4) & 0x07;
+ /* CLK_SRC1_DIV */
+ div = readl(A11S_CLK_CNTL_ADDR) & 0x0f;
+ }
+
+ for (speed = acpu_freq_tbl; speed->a11clk_khz != 0; speed++) {
+ if (speed->a11clk_src_sel == sel
+ && (speed->a11clk_src_div == div))
+ break;
+ }
+ if (speed->a11clk_khz == 0) {
+ printk(KERN_WARNING "Warning - ACPU clock reports invalid speed\n");
+ return;
+ }
+
+ drv_state.current_speed = speed;
+
+ rc = clk_set_rate(ebi1_clk, speed->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+
+ printk(KERN_INFO "ACPU running at %d KHz\n", speed->a11clk_khz);
+}
+
+unsigned long acpuclk_get_rate(void)
+{
+ WARN_ONCE(drv_state.current_speed == NULL,
+ "acpuclk_get_rate: not initialized\n");
+ if (drv_state.current_speed)
+ return drv_state.current_speed->a11clk_khz;
+ else
+ return 0;
+}
+
+uint32_t acpuclk_get_switch_time(void)
+{
+ return drv_state.acpu_switch_time_us;
+}
+
+/*----------------------------------------------------------------------------
+ * Clock driver initialization
+ *---------------------------------------------------------------------------*/
+
+/* Initalize the lpj field in the acpu_freq_tbl. */
+static void __init lpj_init(void)
+{
+ int i;
+ const struct clkctl_acpu_speed *base_clk = drv_state.current_speed;
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz; i++) {
+ acpu_freq_tbl[i].lpj = cpufreq_scale(loops_per_jiffy,
+ base_clk->a11clk_khz,
+ acpu_freq_tbl[i].a11clk_khz);
+ }
+}
+
+void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata)
+{
+ pr_info("acpu_clock_init()\n");
+
+ ebi1_clk = clk_get(NULL, "ebi1_clk");
+
+ mutex_init(&drv_state.lock);
+ drv_state.acpu_switch_time_us = clkdata->acpu_switch_time_us;
+ drv_state.max_speed_delta_khz = clkdata->max_speed_delta_khz;
+ drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us;
+ drv_state.power_collapse_khz = clkdata->power_collapse_khz;
+ drv_state.wait_for_irq_khz = clkdata->wait_for_irq_khz;
+ acpuclk_init();
+ lpj_init();
+#ifdef CONFIG_CPU_FREQ_TABLE
+ cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
+#endif
+}
diff --git a/arch/arm/mach-msm/acpuclock.h b/arch/arm/mach-msm/acpuclock.h
new file mode 100644
index 000000000000..415de2eb9a5e
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock.h
@@ -0,0 +1,32 @@
+/* arch/arm/mach-msm/acpuclock.h
+ *
+ * MSM architecture clock driver header
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007 QUALCOMM Incorporated
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_H
+#define __ARCH_ARM_MACH_MSM_ACPUCLOCK_H
+
+int acpuclk_set_rate(unsigned long rate, int for_power_collapse);
+unsigned long acpuclk_get_rate(void);
+uint32_t acpuclk_get_switch_time(void);
+unsigned long acpuclk_wait_for_irq(void);
+unsigned long acpuclk_power_collapse(void);
+unsigned long acpuclk_get_wfi_rate(void);
+
+
+#endif
+
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index e61967dde9a1..7bd72e8f127e 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -26,6 +26,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
+#include <asm/setup.h>
#include <mach/irqs.h>
#include <mach/board.h>
@@ -77,14 +78,28 @@ static void __init halibut_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
+static void __init halibut_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ mi->nr_banks=1;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ mi->bank[0].size = (101*1024*1024);
+}
+
static void __init halibut_map_io(void)
{
msm_map_common_io();
- msm_clock_init();
+ msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
}
MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
.boot_params = 0x10000100,
+ .fixup = halibut_fixup,
.map_io = halibut_map_io,
.init_irq = halibut_init_irq,
.init_machine = halibut_init,
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
new file mode 100644
index 000000000000..bcbefdfe7b5e
--- /dev/null
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -0,0 +1,87 @@
+/* linux/arch/arm/mach-msm/board-mahimahi.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Copyright (C) 2009 HTC Corporation.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/setup.h>
+
+#include <mach/board.h>
+#include <mach/hardware.h>
+#include <mach/system.h>
+
+#include "board-mahimahi.h"
+#include "devices.h"
+#include "proc_comm.h"
+
+static uint debug_uart;
+
+module_param_named(debug_uart, debug_uart, uint, 0);
+
+static struct platform_device *devices[] __initdata = {
+#if !defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ &msm_device_uart1,
+#endif
+ &msm_device_uart_dm1,
+ &msm_device_nand,
+};
+
+static void __init mahimahi_init(void)
+{
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static void __init mahimahi_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ mi->nr_banks = 2;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ mi->bank[0].size = (219*1024*1024);
+ mi->bank[1].start = MSM_HIGHMEM_BASE;
+ mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE);
+ mi->bank[1].size = MSM_HIGHMEM_SIZE;
+}
+
+static void __init mahimahi_map_io(void)
+{
+ msm_map_common_io();
+ msm_clock_init();
+}
+
+extern struct sys_timer msm_timer;
+
+MACHINE_START(MAHIMAHI, "mahimahi")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x20000100,
+ .fixup = mahimahi_fixup,
+ .map_io = mahimahi_map_io,
+ .init_irq = msm_init_irq,
+ .init_machine = mahimahi_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
new file mode 100644
index 000000000000..db9381b85bf0
--- /dev/null
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/power_supply.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+#include <asm/setup.h>
+#ifdef CONFIG_CACHE_L2X0
+#include <asm/hardware/cache-l2x0.h>
+#endif
+
+#include <mach/vreg.h>
+#include <mach/mpp.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include "devices.h"
+#include "socinfo.h"
+#include "clock.h"
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = 0x9C004300,
+ .end = 0x9C0043ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MSM_GPIO_TO_INT(132),
+ .end = MSM_GPIO_TO_INT(132),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_uart3,
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+ &smc91x_device,
+};
+
+extern struct sys_timer msm_timer;
+
+static void __init msm7x2x_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init msm7x2x_init(void)
+{
+ if (socinfo_init() < 0)
+ BUG();
+
+ if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) {
+ smc91x_resources[0].start = 0x98000300;
+ smc91x_resources[0].end = 0x980003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(85);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(85);
+ if (gpio_tlmm_config(GPIO_CFG(85, 0,
+ GPIO_INPUT,
+ GPIO_PULL_DOWN,
+ GPIO_2MA),
+ GPIO_ENABLE)) {
+ printk(KERN_ERR
+ "%s: Err: Config GPIO-85 INT\n",
+ __func__);
+ }
+ }
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static void __init msm7x2x_map_io(void)
+{
+ msm_map_common_io();
+ /* Technically dependent on the SoC but using machine_is
+ * macros since socinfo is not available this early and there
+ * are plans to restructure the code which will eliminate the
+ * need for socinfo.
+ */
+ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa())
+ msm_clock_init(msm_clocks_7x27, msm_num_clocks_7x27);
+
+ if (machine_is_msm7x25_surf() || machine_is_msm7x25_ffa())
+ msm_clock_init(msm_clocks_7x25, msm_num_clocks_7x25);
+
+#ifdef CONFIG_CACHE_L2X0
+ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) {
+ /* 7x27 has 256KB L2 cache:
+ 64Kb/Way and 4-Way Associativity;
+ R/W latency: 3 cycles;
+ evmon/parity/share disabled. */
+ l2x0_init(MSM_L2CC_BASE, 0x00068012, 0xfe000000);
+ }
+#endif
+}
+
+MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x2x_map_io,
+ .init_irq = msm7x2x_init_irq,
+ .init_machine = msm7x2x_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
new file mode 100644
index 000000000000..e32981928c77
--- /dev/null
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -0,0 +1,119 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/smsc911x.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/setup.h>
+
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/memory.h>
+#include <mach/msm_iomap.h>
+#include <mach/dma.h>
+
+#include <mach/vreg.h>
+#include "devices.h"
+#include "proc_comm.h"
+
+extern struct sys_timer msm_timer;
+
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+static struct msm_gpio uart2_config_data[] = {
+ { GPIO_CFG(49, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_RFR"},
+ { GPIO_CFG(50, 2, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_CTS"},
+ { GPIO_CFG(51, 2, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_Rx"},
+ { GPIO_CFG(52, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_Tx"},
+};
+
+static void msm7x30_init_uart2(void)
+{
+ msm_gpios_request_enable(uart2_config_data,
+ ARRAY_SIZE(uart2_config_data));
+
+}
+#endif
+
+static struct platform_device *devices[] __initdata = {
+#if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ &msm_device_uart2,
+#endif
+
+};
+
+static void __init msm7x30_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init msm7x30_init(void)
+{
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ msm7x30_init_uart2();
+#endif
+
+}
+
+static void __init msm7x30_map_io(void)
+{
+ msm_map_msm7x30_io();
+ msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
+}
+
+MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
new file mode 100644
index 000000000000..e3cc80792d6c
--- /dev/null
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -0,0 +1,93 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+
+#include <mach/board.h>
+#include <mach/irqs.h>
+#include <mach/sirc.h>
+#include <mach/gpio.h>
+
+#include "devices.h"
+
+extern struct sys_timer msm_timer;
+
+static struct msm_gpio uart3_config_data[] = {
+ { GPIO_CFG(86, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_Rx"},
+ { GPIO_CFG(87, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_Tx"},
+};
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_uart3,
+};
+
+static void msm8x50_init_uart3(void)
+{
+ msm_gpios_request_enable(uart3_config_data,
+ ARRAY_SIZE(uart3_config_data));
+}
+
+static void __init qsd8x50_map_io(void)
+{
+ msm_map_qsd8x50_io();
+ msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50);
+}
+
+static void __init qsd8x50_init_irq(void)
+{
+ msm_init_irq();
+ msm_init_sirc();
+}
+
+static void __init qsd8x50_init(void)
+{
+ msm8x50_init_uart3();
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
new file mode 100644
index 000000000000..2bc1b9d5623e
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -0,0 +1,118 @@
+/* linux/arch/arm/mach-msm/board-sapphire.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sysdev.h>
+
+#include <linux/delay.h>
+
+#include <asm/gpio.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+#include <asm/system.h>
+#include <mach/system.h>
+#include <mach/vreg.h>
+#include <mach/board.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/setup.h>
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+#include "proc_comm.h"
+#include "devices.h"
+
+void msm_init_irq(void);
+void msm_init_gpio(void);
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+ &msm_device_uart1,
+ &msm_device_uart3,
+};
+
+extern struct sys_timer msm_timer;
+
+static void __init sapphire_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init sapphire_init(void)
+{
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static struct map_desc sapphire_io_desc[] __initdata = {
+ {
+ .virtual = SAPPHIRE_CPLD_BASE,
+ .pfn = __phys_to_pfn(SAPPHIRE_CPLD_START),
+ .length = SAPPHIRE_CPLD_SIZE,
+ .type = MT_DEVICE_NONSHARED
+ }
+};
+
+static void __init sapphire_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ int smi_sz = parse_tag_smi((const struct tag *)tags);
+
+ mi->nr_banks = 1;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ if (smi_sz == 32) {
+ mi->bank[0].size = (84*1024*1024);
+ } else if (smi_sz == 64) {
+ mi->bank[0].size = (101*1024*1024);
+ } else {
+ /* Give a default value when not get smi size */
+ smi_sz = 64;
+ mi->bank[0].size = (101*1024*1024);
+ }
+}
+
+static void __init sapphire_map_io(void)
+{
+ msm_map_common_io();
+ iotable_init(sapphire_io_desc, ARRAY_SIZE(sapphire_io_desc));
+ msm_clock_init();
+}
+
+MACHINE_START(SAPPHIRE, "sapphire")
+/* Maintainer: Brian Swetland <swetland@google.com> */
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = PHYS_OFFSET + 0x100,
+ .fixup = sapphire_fixup,
+ .map_io = sapphire_map_io,
+ .init_irq = sapphire_init_irq,
+ .init_machine = sapphire_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-dream.c b/arch/arm/mach-msm/board-trout.c
index 21afa8513168..dca5a5f062dc 100644
--- a/arch/arm/mach-msm/board-dream.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/mach-msm/board-dream.c
+/* linux/arch/arm/mach-msm/board-trout.c
*
* Copyright (C) 2009 Google, Inc.
* Author: Brian Swetland <swetland@google.com>
@@ -28,7 +28,7 @@
#include <mach/msm_iomap.h>
#include "devices.h"
-#include "board-dream.h"
+#include "board-trout.h"
static struct platform_device *devices[] __initdata = {
&msm_device_uart3,
@@ -78,12 +78,14 @@ static void __init trout_map_io(void)
writeb(0x80, TROUT_CPLD_BASE + 0x00);
#endif
- msm_clock_init();
+ msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
}
MACHINE_START(TROUT, "HTC Dream")
+#ifdef CONFIG_MSM_DEBUG_UART
.phys_io = MSM_DEBUG_UART_PHYS,
.io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
.boot_params = 0x10000100,
.fixup = trout_fixup,
.map_io = trout_map_io,
diff --git a/arch/arm/mach-msm/board-dream.h b/arch/arm/mach-msm/board-trout.h
index 4f345a5a0a61..4f345a5a0a61 100644
--- a/arch/arm/mach-msm/board-dream.h
+++ b/arch/arm/mach-msm/board-trout.h
diff --git a/arch/arm/mach-msm/clock-7x01a.c b/arch/arm/mach-msm/clock-7x01a.c
deleted file mode 100644
index 62230a3428ee..000000000000
--- a/arch/arm/mach-msm/clock-7x01a.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/* arch/arm/mach-msm/clock-7x01a.c
- *
- * Clock tables for MSM7X01A
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include "clock.h"
-#include "devices.h"
-
-/* clock IDs used by the modem processor */
-
-#define ACPU_CLK 0 /* Applications processor clock */
-#define ADM_CLK 1 /* Applications data mover clock */
-#define ADSP_CLK 2 /* ADSP clock */
-#define EBI1_CLK 3 /* External bus interface 1 clock */
-#define EBI2_CLK 4 /* External bus interface 2 clock */
-#define ECODEC_CLK 5 /* External CODEC clock */
-#define EMDH_CLK 6 /* External MDDI host clock */
-#define GP_CLK 7 /* General purpose clock */
-#define GRP_CLK 8 /* Graphics clock */
-#define I2C_CLK 9 /* I2C clock */
-#define ICODEC_RX_CLK 10 /* Internal CODEX RX clock */
-#define ICODEC_TX_CLK 11 /* Internal CODEX TX clock */
-#define IMEM_CLK 12 /* Internal graphics memory clock */
-#define MDC_CLK 13 /* MDDI client clock */
-#define MDP_CLK 14 /* Mobile display processor clock */
-#define PBUS_CLK 15 /* Peripheral bus clock */
-#define PCM_CLK 16 /* PCM clock */
-#define PMDH_CLK 17 /* Primary MDDI host clock */
-#define SDAC_CLK 18 /* Stereo DAC clock */
-#define SDC1_CLK 19 /* Secure Digital Card clocks */
-#define SDC1_PCLK 20
-#define SDC2_CLK 21
-#define SDC2_PCLK 22
-#define SDC3_CLK 23
-#define SDC3_PCLK 24
-#define SDC4_CLK 25
-#define SDC4_PCLK 26
-#define TSIF_CLK 27 /* Transport Stream Interface clocks */
-#define TSIF_REF_CLK 28
-#define TV_DAC_CLK 29 /* TV clocks */
-#define TV_ENC_CLK 30
-#define UART1_CLK 31 /* UART clocks */
-#define UART2_CLK 32
-#define UART3_CLK 33
-#define UART1DM_CLK 34
-#define UART2DM_CLK 35
-#define USB_HS_CLK 36 /* High speed USB core clock */
-#define USB_HS_PCLK 37 /* High speed USB pbus clock */
-#define USB_OTG_CLK 38 /* Full speed USB clock */
-#define VDC_CLK 39 /* Video controller clock */
-#define VFE_CLK 40 /* Camera / Video Front End clock */
-#define VFE_MDC_CLK 41 /* VFE MDDI client clock */
-
-#define NR_CLKS 42
-
-#define CLOCK(clk_name, clk_id, clk_dev, clk_flags) { \
- .name = clk_name, \
- .id = clk_id, \
- .flags = clk_flags, \
- .dev = clk_dev, \
- }
-
-#define OFF CLKFLAG_AUTO_OFF
-#define MINMAX CLKFLAG_USE_MIN_MAX_TO_SET
-
-struct clk msm_clocks[] = {
- CLOCK("adm_clk", ADM_CLK, NULL, 0),
- CLOCK("adsp_clk", ADSP_CLK, NULL, 0),
- CLOCK("ebi1_clk", EBI1_CLK, NULL, 0),
- CLOCK("ebi2_clk", EBI2_CLK, NULL, 0),
- CLOCK("ecodec_clk", ECODEC_CLK, NULL, 0),
- CLOCK("emdh_clk", EMDH_CLK, NULL, OFF),
- CLOCK("gp_clk", GP_CLK, NULL, 0),
- CLOCK("grp_clk", GRP_CLK, NULL, OFF),
- CLOCK("i2c_clk", I2C_CLK, &msm_device_i2c.dev, 0),
- CLOCK("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
- CLOCK("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
- CLOCK("imem_clk", IMEM_CLK, NULL, OFF),
- CLOCK("mdc_clk", MDC_CLK, NULL, 0),
- CLOCK("mdp_clk", MDP_CLK, NULL, OFF),
- CLOCK("pbus_clk", PBUS_CLK, NULL, 0),
- CLOCK("pcm_clk", PCM_CLK, NULL, 0),
- CLOCK("pmdh_clk", PMDH_CLK, NULL, OFF | MINMAX),
- CLOCK("sdac_clk", SDAC_CLK, NULL, OFF),
- CLOCK("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
- CLOCK("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF),
- CLOCK("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
- CLOCK("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF),
- CLOCK("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
- CLOCK("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF),
- CLOCK("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
- CLOCK("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF),
- CLOCK("tsif_clk", TSIF_CLK, NULL, 0),
- CLOCK("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
- CLOCK("tv_dac_clk", TV_DAC_CLK, NULL, 0),
- CLOCK("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLOCK("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
- CLOCK("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
- CLOCK("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
- CLOCK("uart1dm_clk", UART1DM_CLK, NULL, OFF),
- CLOCK("uart2dm_clk", UART2DM_CLK, NULL, 0),
- CLOCK("usb_hs_clk", USB_HS_CLK, &msm_device_hsusb.dev, OFF),
- CLOCK("usb_hs_pclk", USB_HS_PCLK, &msm_device_hsusb.dev, OFF),
- CLOCK("usb_otg_clk", USB_OTG_CLK, NULL, 0),
- CLOCK("vdc_clk", VDC_CLK, NULL, OFF | MINMAX),
- CLOCK("vfe_clk", VFE_CLK, NULL, OFF),
- CLOCK("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
-};
-
-unsigned msm_num_clocks = ARRAY_SIZE(msm_clocks);
diff --git a/arch/arm/mach-msm/clock-7x30.h b/arch/arm/mach-msm/clock-7x30.h
new file mode 100644
index 000000000000..e16f72f32829
--- /dev/null
+++ b/arch/arm/mach-msm/clock-7x30.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_7X30_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_7X30_H
+
+enum {
+ L_7X30_NONE_CLK = -1,
+ L_7X30_ADM_CLK,
+ L_7X30_I2C_CLK,
+ L_7X30_I2C_2_CLK,
+ L_7X30_QUP_I2C_CLK,
+ L_7X30_UART1DM_CLK,
+ L_7X30_UART1DM_P_CLK,
+ L_7X30_UART2DM_CLK,
+ L_7X30_UART2DM_P_CLK,
+ L_7X30_EMDH_CLK,
+ L_7X30_EMDH_P_CLK,
+ L_7X30_PMDH_CLK,
+ L_7X30_PMDH_P_CLK,
+ L_7X30_GRP_2D_CLK,
+ L_7X30_GRP_2D_P_CLK,
+ L_7X30_GRP_3D_SRC_CLK,
+ L_7X30_GRP_3D_CLK,
+ L_7X30_GRP_3D_P_CLK,
+ L_7X30_IMEM_CLK,
+ L_7X30_SDC1_CLK,
+ L_7X30_SDC1_P_CLK,
+ L_7X30_SDC2_CLK,
+ L_7X30_SDC2_P_CLK,
+ L_7X30_SDC3_CLK,
+ L_7X30_SDC3_P_CLK,
+ L_7X30_SDC4_CLK,
+ L_7X30_SDC4_P_CLK,
+ L_7X30_MDP_CLK,
+ L_7X30_MDP_P_CLK,
+ L_7X30_MDP_LCDC_PCLK_CLK,
+ L_7X30_MDP_LCDC_PAD_PCLK_CLK,
+ L_7X30_MDP_VSYNC_CLK,
+ L_7X30_MI2S_CODEC_RX_M_CLK,
+ L_7X30_MI2S_CODEC_RX_S_CLK,
+ L_7X30_MI2S_CODEC_TX_M_CLK,
+ L_7X30_MI2S_CODEC_TX_S_CLK,
+ L_7X30_MI2S_M_CLK,
+ L_7X30_MI2S_S_CLK,
+ L_7X30_LPA_CODEC_CLK,
+ L_7X30_LPA_CORE_CLK,
+ L_7X30_LPA_P_CLK,
+ L_7X30_MIDI_CLK,
+ L_7X30_MDC_CLK,
+ L_7X30_ROTATOR_IMEM_CLK,
+ L_7X30_ROTATOR_P_CLK,
+ L_7X30_SDAC_M_CLK,
+ L_7X30_SDAC_CLK,
+ L_7X30_UART1_CLK,
+ L_7X30_UART2_CLK,
+ L_7X30_UART3_CLK,
+ L_7X30_TV_CLK,
+ L_7X30_TV_DAC_CLK,
+ L_7X30_TV_ENC_CLK,
+ L_7X30_HDMI_CLK,
+ L_7X30_TSIF_REF_CLK,
+ L_7X30_TSIF_P_CLK,
+ L_7X30_USB_HS_SRC_CLK,
+ L_7X30_USB_HS_CLK,
+ L_7X30_USB_HS_CORE_CLK,
+ L_7X30_USB_HS_P_CLK,
+ L_7X30_USB_HS2_CLK,
+ L_7X30_USB_HS2_CORE_CLK,
+ L_7X30_USB_HS2_P_CLK,
+ L_7X30_USB_HS3_CLK,
+ L_7X30_USB_HS3_CORE_CLK,
+ L_7X30_USB_HS3_P_CLK,
+ L_7X30_VFE_CLK,
+ L_7X30_VFE_P_CLK,
+ L_7X30_VFE_MDC_CLK,
+ L_7X30_VFE_CAMIF_CLK,
+ L_7X30_CAMIF_PAD_P_CLK,
+ L_7X30_CAM_M_CLK,
+ L_7X30_JPEG_CLK,
+ L_7X30_JPEG_P_CLK,
+ L_7X30_VPE_CLK,
+ L_7X30_MFC_CLK,
+ L_7X30_MFC_DIV2_CLK,
+ L_7X30_MFC_P_CLK,
+ L_7X30_SPI_CLK,
+ L_7X30_SPI_P_CLK,
+ L_7X30_CSI0_CLK,
+ L_7X30_CSI0_VFE_CLK,
+ L_7X30_CSI0_P_CLK,
+ L_7X30_CSI1_CLK,
+ L_7X30_CSI1_VFE_CLK,
+ L_7X30_CSI1_P_CLK,
+ L_7X30_GLBL_ROOT_CLK,
+
+ L_7X30_AXI_LI_VG_CLK,
+ L_7X30_AXI_LI_GRP_CLK,
+ L_7X30_AXI_LI_JPEG_CLK,
+ L_7X30_AXI_GRP_2D_CLK,
+ L_7X30_AXI_MFC_CLK,
+ L_7X30_AXI_VPE_CLK,
+ L_7X30_AXI_LI_VFE_CLK,
+ L_7X30_AXI_LI_APPS_CLK,
+ L_7X30_AXI_MDP_CLK,
+ L_7X30_AXI_IMEM_CLK,
+ L_7X30_AXI_LI_ADSP_A_CLK,
+ L_7X30_AXI_ROTATOR_CLK,
+
+ L_7X30_NR_CLKS
+};
+
+struct clk_ops;
+extern struct clk_ops clk_ops_7x30;
+
+struct clk_ops *clk_7x30_is_local(uint32_t id);
+int clk_7x30_init(void);
+
+void pll_enable(uint32_t pll);
+void pll_disable(uint32_t pll);
+
+extern int internal_pwr_rail_ctl_auto(unsigned rail_id, bool enable);
+
+#define CLK_7X30(clk_name, clk_id, clk_dev, clk_flags) { \
+ .name = clk_name, \
+ .id = L_7X30_##clk_id, \
+ .remote_id = P_##clk_id, \
+ .flags = clk_flags, \
+ .dev = clk_dev, \
+ .dbg_name = #clk_id, \
+ }
+
+#define CLK_7X30S(clk_name, l_id, r_id, clk_dev, clk_flags) { \
+ .name = clk_name, \
+ .id = L_7X30_##l_id, \
+ .remote_id = P_##r_id, \
+ .flags = clk_flags, \
+ .dev = clk_dev, \
+ .dbg_name = #l_id, \
+ }
+
+#endif
+
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
new file mode 100644
index 000000000000..a3b45627eb4a
--- /dev/null
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/stddef.h>
+#include <mach/clk.h>
+
+#include "proc_comm.h"
+#include "clock.h"
+
+/*
+ * glue for the proc_comm interface
+ */
+int pc_clk_enable(unsigned id)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+void pc_clk_disable(unsigned id)
+{
+ msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL);
+}
+
+int pc_clk_reset(unsigned id, enum clk_reset_action action)
+{
+ int rc;
+
+ if (action == CLK_RESET_ASSERT)
+ rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_ASSERT, &id, NULL);
+ else
+ rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_DEASSERT, &id, NULL);
+
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_rate(unsigned id, unsigned rate)
+{
+ /* The rate _might_ be rounded off to the nearest KHz value by the
+ * remote function. So a return value of 0 doesn't necessarily mean
+ * that the exact rate was set successfully.
+ */
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_min_rate(unsigned id, unsigned rate)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &rate);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_max_rate(unsigned id, unsigned rate)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_MAX_RATE, &id, &rate);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_flags(unsigned id, unsigned flags)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_FLAGS, &id, &flags);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+unsigned pc_clk_get_rate(unsigned id)
+{
+ if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL))
+ return 0;
+ else
+ return id;
+}
+
+unsigned pc_clk_is_enabled(unsigned id)
+{
+ if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL))
+ return 0;
+ else
+ return id;
+}
+
+long pc_clk_round_rate(unsigned id, unsigned rate)
+{
+
+ /* Not really supported; pc_clk_set_rate() does rounding on it's own. */
+ return rate;
+}
+
+struct clk_ops clk_ops_pcom = {
+ .enable = pc_clk_enable,
+ .disable = pc_clk_disable,
+ .auto_off = pc_clk_disable,
+ .reset = pc_clk_reset,
+ .set_rate = pc_clk_set_rate,
+ .set_min_rate = pc_clk_set_min_rate,
+ .set_max_rate = pc_clk_set_max_rate,
+ .set_flags = pc_clk_set_flags,
+ .get_rate = pc_clk_get_rate,
+ .is_enabled = pc_clk_is_enabled,
+ .round_rate = pc_clk_round_rate,
+};
diff --git a/arch/arm/mach-msm/clock-pcom.h b/arch/arm/mach-msm/clock-pcom.h
new file mode 100644
index 000000000000..17d027b23501
--- /dev/null
+++ b/arch/arm/mach-msm/clock-pcom.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PCOM_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_PCOM_H
+
+/* clock IDs used by the modem processor */
+
+#define P_ACPU_CLK 0 /* Applications processor clock */
+#define P_ADM_CLK 1 /* Applications data mover clock */
+#define P_ADSP_CLK 2 /* ADSP clock */
+#define P_EBI1_CLK 3 /* External bus interface 1 clock */
+#define P_EBI2_CLK 4 /* External bus interface 2 clock */
+#define P_ECODEC_CLK 5 /* External CODEC clock */
+#define P_EMDH_CLK 6 /* External MDDI host clock */
+#define P_GP_CLK 7 /* General purpose clock */
+#define P_GRP_3D_CLK 8 /* Graphics clock */
+#define P_I2C_CLK 9 /* I2C clock */
+#define P_ICODEC_RX_CLK 10 /* Internal CODEX RX clock */
+#define P_ICODEC_TX_CLK 11 /* Internal CODEX TX clock */
+#define P_IMEM_CLK 12 /* Internal graphics memory clock */
+#define P_MDC_CLK 13 /* MDDI client clock */
+#define P_MDP_CLK 14 /* Mobile display processor clock */
+#define P_PBUS_CLK 15 /* Peripheral bus clock */
+#define P_PCM_CLK 16 /* PCM clock */
+#define P_PMDH_CLK 17 /* Primary MDDI host clock */
+#define P_SDAC_CLK 18 /* Stereo DAC clock */
+#define P_SDC1_CLK 19 /* Secure Digital Card clocks */
+#define P_SDC1_P_CLK 20
+#define P_SDC2_CLK 21
+#define P_SDC2_P_CLK 22
+#define P_SDC3_CLK 23
+#define P_SDC3_P_CLK 24
+#define P_SDC4_CLK 25
+#define P_SDC4_P_CLK 26
+#define P_TSIF_CLK 27 /* Transport Stream Interface clocks */
+#define P_TSIF_REF_CLK 28
+#define P_TV_DAC_CLK 29 /* TV clocks */
+#define P_TV_ENC_CLK 30
+#define P_UART1_CLK 31 /* UART clocks */
+#define P_UART2_CLK 32
+#define P_UART3_CLK 33
+#define P_UART1DM_CLK 34
+#define P_UART2DM_CLK 35
+#define P_USB_HS_CLK 36 /* High speed USB core clock */
+#define P_USB_HS_P_CLK 37 /* High speed USB pbus clock */
+#define P_USB_OTG_CLK 38 /* Full speed USB clock */
+#define P_VDC_CLK 39 /* Video controller clock */
+#define P_VFE_MDC_CLK 40 /* Camera / Video Front End clock */
+#define P_VFE_CLK 41 /* VFE MDDI client clock */
+#define P_MDP_LCDC_PCLK_CLK 42
+#define P_MDP_LCDC_PAD_PCLK_CLK 43
+#define P_MDP_VSYNC_CLK 44
+#define P_SPI_CLK 45
+#define P_VFE_AXI_CLK 46
+#define P_USB_HS2_CLK 47 /* High speed USB 2 core clock */
+#define P_USB_HS2_P_CLK 48 /* High speed USB 2 pbus clock */
+#define P_USB_HS3_CLK 49 /* High speed USB 3 core clock */
+#define P_USB_HS3_P_CLK 50 /* High speed USB 3 pbus clock */
+#define P_GRP_3D_P_CLK 51 /* Graphics pbus clock */
+#define P_USB_PHY_CLK 52 /* USB PHY clock */
+#define P_USB_HS_CORE_CLK 53 /* High speed USB 1 core clock */
+#define P_USB_HS2_CORE_CLK 54 /* High speed USB 2 core clock */
+#define P_USB_HS3_CORE_CLK 55 /* High speed USB 3 core clock */
+#define P_CAM_M_CLK 56
+#define P_CAMIF_PAD_P_CLK 57
+#define P_GRP_2D_CLK 58
+#define P_GRP_2D_P_CLK 59
+#define P_I2S_CLK 60
+#define P_JPEG_CLK 61
+#define P_JPEG_P_CLK 62
+#define P_LPA_CODEC_CLK 63
+#define P_LPA_CORE_CLK 64
+#define P_LPA_P_CLK 65
+#define P_MDC_IO_CLK 66
+#define P_MDC_P_CLK 67
+#define P_MFC_CLK 68
+#define P_MFC_DIV2_CLK 69
+#define P_MFC_P_CLK 70
+#define P_QUP_I2C_CLK 71
+#define P_ROTATOR_IMEM_CLK 72
+#define P_ROTATOR_P_CLK 73
+#define P_VFE_CAMIF_CLK 74
+#define P_VFE_P_CLK 75
+#define P_VPE_CLK 76
+#define P_I2C_2_CLK 77
+#define P_MI2S_CODEC_RX_S_CLK 78
+#define P_MI2S_CODEC_RX_M_CLK 79
+#define P_MI2S_CODEC_TX_S_CLK 80
+#define P_MI2S_CODEC_TX_M_CLK 81
+#define P_PMDH_P_CLK 82
+#define P_EMDH_P_CLK 83
+#define P_SPI_P_CLK 84
+#define P_TSIF_P_CLK 85
+#define P_MDP_P_CLK 86
+#define P_SDAC_M_CLK 87
+#define P_MI2S_S_CLK 88
+#define P_MI2S_M_CLK 89
+#define P_AXI_ROTATOR_CLK 90
+#define P_HDMI_CLK 91
+#define P_CSI0_CLK 92
+#define P_CSI0_VFE_CLK 93
+#define P_CSI0_P_CLK 94
+#define P_CSI1_CLK 95
+#define P_CSI1_VFE_CLK 96
+#define P_CSI1_P_CLK 97
+#define P_GSBI_CLK 98
+#define P_GSBI_P_CLK 99
+
+#define P_NR_CLKS 100
+
+struct clk_ops;
+extern struct clk_ops clk_ops_pcom;
+
+int pc_clk_reset(unsigned id, enum clk_reset_action action);
+
+#define CLK_PCOM(clk_name, clk_id, clk_dev, clk_flags) { \
+ .name = clk_name, \
+ .id = P_##clk_id, \
+ .remote_id = P_##clk_id, \
+ .ops = &clk_ops_pcom, \
+ .flags = clk_flags, \
+ .dev = clk_dev, \
+ .dbg_name = #clk_id, \
+ }
+
+#endif
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 3b1ce36f1032..9cb1276ab749 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/clock.c
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
+ * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -22,68 +22,27 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/pm_qos_params.h>
+#include <mach/clk.h>
#include "clock.h"
#include "proc_comm.h"
+#include "clock-7x30.h"
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clocks_lock);
static LIST_HEAD(clocks);
+struct clk *msm_clocks;
+unsigned msm_num_clocks;
/*
- * glue for the proc_comm interface
+ * Bitmap of enabled clocks, excluding ACPU which is always
+ * enabled
*/
-static inline int pc_clk_enable(unsigned id)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL);
-}
-
-static inline void pc_clk_disable(unsigned id)
-{
- msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL);
-}
-
-static inline int pc_clk_set_rate(unsigned id, unsigned rate)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate);
-}
-
-static inline int pc_clk_set_min_rate(unsigned id, unsigned rate)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &rate);
-}
-
-static inline int pc_clk_set_max_rate(unsigned id, unsigned rate)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_MAX_RATE, &id, &rate);
-}
-
-static inline int pc_clk_set_flags(unsigned id, unsigned flags)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_SET_FLAGS, &id, &flags);
-}
-
-static inline unsigned pc_clk_get_rate(unsigned id)
-{
- if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL))
- return 0;
- else
- return id;
-}
-
-static inline unsigned pc_clk_is_enabled(unsigned id)
-{
- if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL))
- return 0;
- else
- return id;
-}
-
-static inline int pc_pll_request(unsigned id, unsigned on)
-{
- on = !!on;
- return msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on);
-}
+static DECLARE_BITMAP(clock_map_enabled, NR_CLKS);
+static DEFINE_SPINLOCK(clock_map_lock);
/*
* Standard clock functions defined in include/linux/clk.h
@@ -119,8 +78,12 @@ int clk_enable(struct clk *clk)
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
clk->count++;
- if (clk->count == 1)
- pc_clk_enable(clk->id);
+ if (clk->count == 1) {
+ clk->ops->enable(clk->id);
+ spin_lock(&clock_map_lock);
+ clock_map_enabled[BIT_WORD(clk->id)] |= BIT_MASK(clk->id);
+ spin_unlock(&clock_map_lock);
+ }
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
@@ -132,31 +95,54 @@ void clk_disable(struct clk *clk)
spin_lock_irqsave(&clocks_lock, flags);
BUG_ON(clk->count == 0);
clk->count--;
- if (clk->count == 0)
- pc_clk_disable(clk->id);
+ if (clk->count == 0) {
+ clk->ops->disable(clk->id);
+ spin_lock(&clock_map_lock);
+ clock_map_enabled[BIT_WORD(clk->id)] &= ~BIT_MASK(clk->id);
+ spin_unlock(&clock_map_lock);
+ }
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
+int clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+ if (!clk->ops->reset)
+ clk->ops->reset = &pc_clk_reset;
+ return clk->ops->reset(clk->remote_id, action);
+}
+EXPORT_SYMBOL(clk_reset);
+
unsigned long clk_get_rate(struct clk *clk)
{
- return pc_clk_get_rate(clk->id);
+ return clk->ops->get_rate(clk->id);
}
EXPORT_SYMBOL(clk_get_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
- int ret;
- if (clk->flags & CLKFLAG_USE_MIN_MAX_TO_SET) {
- ret = pc_clk_set_max_rate(clk->id, rate);
- if (ret)
- return ret;
- return pc_clk_set_min_rate(clk->id, rate);
- }
- return pc_clk_set_rate(clk->id, rate);
+ return clk->ops->set_rate(clk->id, rate);
}
EXPORT_SYMBOL(clk_set_rate);
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk->ops->round_rate(clk->id, rate);
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+ return clk->ops->set_min_rate(clk->id, rate);
+}
+EXPORT_SYMBOL(clk_set_min_rate);
+
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return clk->ops->set_max_rate(clk->id, rate);
+}
+EXPORT_SYMBOL(clk_set_max_rate);
+
int clk_set_parent(struct clk *clk, struct clk *parent)
{
return -ENOSYS;
@@ -173,22 +159,153 @@ int clk_set_flags(struct clk *clk, unsigned long flags)
{
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
- return pc_clk_set_flags(clk->id, flags);
+ return clk->ops->set_flags(clk->id, flags);
}
EXPORT_SYMBOL(clk_set_flags);
+/* EBI1 is the only shared clock that several clients want to vote on as of
+ * this commit. If this changes in the future, then it might be better to
+ * make clk_min_rate handle the voting or make ebi1_clk_set_min_rate more
+ * generic to support different clocks.
+ */
+static struct clk *ebi1_clk;
-void __init msm_clock_init(void)
+static void __init set_clock_ops(struct clk *clk)
+{
+ if (!clk->ops) {
+ clk->ops = &clk_ops_pcom;
+ clk->id = clk->remote_id;
+ }
+}
+
+void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks)
{
unsigned n;
spin_lock_init(&clocks_lock);
mutex_lock(&clocks_mutex);
- for (n = 0; n < msm_num_clocks; n++)
+ msm_clocks = clock_tbl;
+ msm_num_clocks = num_clocks;
+ for (n = 0; n < msm_num_clocks; n++) {
+ set_clock_ops(&msm_clocks[n]);
list_add_tail(&msm_clocks[n].list, &clocks);
+ }
mutex_unlock(&clocks_mutex);
+
+ ebi1_clk = clk_get(NULL, "ebi1_clk");
+ BUG_ON(ebi1_clk == NULL);
+
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static struct clk *msm_clock_get_nth(unsigned index)
+{
+ if (index < msm_num_clocks)
+ return msm_clocks + index;
+ else
+ return 0;
+}
+
+static int clock_debug_rate_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int ret;
+
+ /* Only increases to max rate will succeed, but that's actually good
+ * for debugging purposes. So we don't check for error. */
+ if (clock->flags & CLK_MAX)
+ clk_set_max_rate(clock, val);
+ if (clock->flags & CLK_MIN)
+ ret = clk_set_min_rate(clock, val);
+ else
+ ret = clk_set_rate(clock, val);
+ if (ret != 0)
+ printk(KERN_ERR "clk_set%s_rate failed (%d)\n",
+ (clock->flags & CLK_MIN) ? "_min" : "", ret);
+ return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+ *val = clk_get_rate(clock);
+ return 0;
+}
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int rc = 0;
+
+ if (val)
+ rc = clock->ops->enable(clock->id);
+ else
+ clock->ops->disable(clock->id);
+
+ return rc;
}
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+
+ *val = clock->ops->is_enabled(clock->id);
+
+ return 0;
+}
+
+static int clock_debug_local_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+
+ *val = clock->ops != &clk_ops_pcom;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+ clock_debug_rate_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+ clock_debug_enable_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
+ NULL, "%llu\n");
+
+static int __init clock_debug_init(void)
+{
+ struct dentry *dent_rate, *dent_enable, *dent_local;
+ struct clk *clock;
+ unsigned n = 0;
+ char temp[50], *ptr;
+
+ dent_rate = debugfs_create_dir("clk_rate", 0);
+ if (IS_ERR(dent_rate))
+ return PTR_ERR(dent_rate);
+
+ dent_enable = debugfs_create_dir("clk_enable", 0);
+ if (IS_ERR(dent_enable))
+ return PTR_ERR(dent_enable);
+
+ dent_local = debugfs_create_dir("clk_local", NULL);
+ if (IS_ERR(dent_local))
+ return PTR_ERR(dent_local);
+
+ while ((clock = msm_clock_get_nth(n++)) != 0) {
+ strncpy(temp, clock->dbg_name, ARRAY_SIZE(temp)-1);
+ for (ptr = temp; *ptr; ptr++)
+ *ptr = tolower(*ptr);
+ debugfs_create_file(temp, 0644, dent_rate,
+ clock, &clock_rate_fops);
+ debugfs_create_file(temp, 0644, dent_enable,
+ clock, &clock_enable_fops);
+ debugfs_create_file(temp, S_IRUGO, dent_local,
+ clock, &clock_local_fops);
+ }
+ return 0;
+}
+
+device_initcall(clock_debug_init);
+#endif
+
/* The bootloader and/or AMSS may have left various clocks enabled.
* Disable any clocks that belong to us (CLKFLAG_AUTO_OFF) but have
* not been explicitly enabled by a clk_enable() call.
@@ -205,7 +322,7 @@ static int __init clock_late_init(void)
spin_lock_irqsave(&clocks_lock, flags);
if (!clk->count) {
count++;
- pc_clk_disable(clk->id);
+ clk->ops->auto_off(clk->id);
}
spin_unlock_irqrestore(&clocks_lock, flags);
}
@@ -216,3 +333,4 @@ static int __init clock_late_init(void)
}
late_initcall(clock_late_init);
+
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index f875e1544e5f..c270b552ed13 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/clock.h
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
+ * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -18,6 +18,10 @@
#define __ARCH_ARM_MACH_MSM_CLOCK_H
#include <linux/list.h>
+#include <mach/clk.h>
+
+#include "clock-pcom.h"
+#include "clock-7x30.h"
#define CLKFLAG_INVERT 0x00000001
#define CLKFLAG_NOINVERT 0x00000002
@@ -25,14 +29,32 @@
#define CLKFLAG_NORESET 0x00000008
#define CLK_FIRST_AVAILABLE_FLAG 0x00000100
-#define CLKFLAG_USE_MIN_MAX_TO_SET 0x00000200
-#define CLKFLAG_AUTO_OFF 0x00000400
+#define CLKFLAG_AUTO_OFF 0x00000200
+#define CLKFLAG_MIN 0x00000400
+#define CLKFLAG_MAX 0x00000800
+
+struct clk_ops {
+ int (*enable)(unsigned id);
+ void (*disable)(unsigned id);
+ void (*auto_off)(unsigned id);
+ int (*reset)(unsigned id, enum clk_reset_action action);
+ int (*set_rate)(unsigned id, unsigned rate);
+ int (*set_min_rate)(unsigned id, unsigned rate);
+ int (*set_max_rate)(unsigned id, unsigned rate);
+ int (*set_flags)(unsigned id, unsigned flags);
+ unsigned (*get_rate)(unsigned id);
+ unsigned (*is_enabled)(unsigned id);
+ long (*round_rate)(unsigned id, unsigned rate);
+};
struct clk {
uint32_t id;
+ uint32_t remote_id;
uint32_t count;
uint32_t flags;
const char *name;
+ struct clk_ops *ops;
+ const char *dbg_name;
struct list_head list;
struct device *dev;
};
@@ -41,8 +63,47 @@ struct clk {
#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
-extern struct clk msm_clocks[];
-extern unsigned msm_num_clocks;
+#ifdef CONFIG_DEBUG_FS
+#define CLOCK_DBG_NAME(x) .dbg_name = x,
+#else
+#define CLOCK_DBG_NAME(x)
+#endif
+
+#define CLOCK(clk_name, clk_id, clk_dev, clk_flags) { \
+ .name = clk_name, \
+ .id = clk_id, \
+ .flags = clk_flags, \
+ .dev = clk_dev, \
+ CLOCK_DBG_NAME(#clk_id) \
+ }
+
+#define OFF CLKFLAG_AUTO_OFF
+#define CLK_MIN CLKFLAG_MIN
+#define CLK_MAX CLKFLAG_MAX
+#define CLK_MINMAX (CLK_MIN | CLK_MAX)
+#define NR_CLKS P_NR_CLKS
+
+enum {
+ PLL_0 = 0,
+ PLL_1,
+ PLL_2,
+ PLL_3,
+ PLL_4,
+ PLL_5,
+ PLL_6,
+ NUM_PLL
+};
+
+enum clkvote_client {
+ CLKVOTE_ACPUCLK = 0,
+ CLKVOTE_PMQOS,
+ CLKVOTE_MAX,
+};
+
+int msm_clock_require_tcxo(unsigned long *reason, int nbits);
+int msm_clock_get_name(uint32_t id, char *name, uint32_t size);
+int ebi1_clk_set_min_rate(enum clkvote_client client, unsigned long rate);
+unsigned long clk_get_max_axi_khz(void);
#endif
diff --git a/arch/arm/mach-msm/devices.c b/arch/arm/mach-msm/devices-msm7x00.c
index 31b6b30e98bf..fde9d8f69f10 100644
--- a/arch/arm/mach-msm/devices.c
+++ b/arch/arm/mach-msm/devices-msm7x00.c
@@ -24,6 +24,10 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+
+#include "clock.h"
+#include <mach/mmc.h>
+
static struct resource resources_uart1[] = {
{
.start = INT_UART1,
@@ -163,8 +167,19 @@ static struct resource resources_sdc1[] = {
},
{
.start = INT_SDC1_0,
+ .end = INT_SDC1_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC1_1,
.end = INT_SDC1_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -181,8 +196,19 @@ static struct resource resources_sdc2[] = {
},
{
.start = INT_SDC2_0,
+ .end = INT_SDC2_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC2_1,
.end = INT_SDC2_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -199,8 +225,19 @@ static struct resource resources_sdc3[] = {
},
{
.start = INT_SDC3_0,
+ .end = INT_SDC3_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC3_1,
.end = INT_SDC3_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -217,8 +254,19 @@ static struct resource resources_sdc4[] = {
},
{
.start = INT_SDC4_0,
+ .end = INT_SDC4_0,
+ .flags = IORESOURCE_IRQ,
+ .name = "cmd_irq",
+ },
+ {
+ .start = INT_SDC4_1,
.end = INT_SDC4_1,
.flags = IORESOURCE_IRQ,
+ .name = "pio_irq",
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
+ .name = "status_irq"
},
{
.start = 8,
@@ -266,3 +314,80 @@ struct platform_device msm_device_sdc4 = {
.coherent_dma_mask = 0xffffffff,
},
};
+
+static struct platform_device *msm_sdcc_devices[] __initdata = {
+ &msm_device_sdc1,
+ &msm_device_sdc2,
+ &msm_device_sdc3,
+ &msm_device_sdc4,
+};
+
+int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat,
+ unsigned int stat_irq, unsigned long stat_irq_flags)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+
+ if (controller < 1 || controller > 4)
+ return -EINVAL;
+
+ pdev = msm_sdcc_devices[controller-1];
+ pdev->dev.platform_data = plat;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq");
+ if (!res)
+ return -EINVAL;
+ else if (stat_irq) {
+ res->start = res->end = stat_irq;
+ res->flags &= ~IORESOURCE_DISABLED;
+ res->flags |= stat_irq_flags;
+ }
+
+ return platform_device_register(pdev);
+}
+
+struct clk msm_clocks_7x01a[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, 0),
+ CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, OFF),
+ CLK_PCOM("i2c_clk", I2C_CLK, &msm_device_i2c.dev, 0),
+ CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
+ CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, 0),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("pmdh_clk", PMDH_CLK, NULL, OFF ),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC1_P_CLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC2_P_CLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC3_P_CLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC4_P_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
+ CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
+ CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
+ CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
+ CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF),
+ CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, &msm_device_hsusb.dev, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, &msm_device_hsusb.dev, OFF),
+ CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF ),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
+};
+
+unsigned msm_num_clocks_7x01a = ARRAY_SIZE(msm_clocks_7x01a);
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
new file mode 100644
index 000000000000..b449e8ad2904
--- /dev/null
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/dma-mapping.h>
+#include <mach/irqs.h>
+#include <mach/msm_iomap.h>
+#include <mach/dma.h>
+#include <mach/board.h>
+
+#include "devices.h"
+#include "smd_private.h"
+
+#include <asm/mach/flash.h>
+
+#include "clock-pcom.h"
+
+#include <mach/mmc.h>
+
+static struct resource resources_uart2[] = {
+ {
+ .start = INT_UART2,
+ .end = INT_UART2,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = MSM_UART2_PHYS,
+ .end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device msm_device_uart2 = {
+ .name = "msm_serial",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(resources_uart2),
+ .resource = resources_uart2,
+};
+
+struct clk msm_clocks_7x30[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
+ CLK_PCOM("cam_m_clk", CAM_M_CLK, NULL, 0),
+ CLK_PCOM("camif_pad_pclk", CAMIF_PAD_P_CLK, NULL, OFF),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("emdh_pclk", EMDH_P_CLK, NULL, OFF),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("grp_2d_clk", GRP_2D_CLK, NULL, 0),
+ CLK_PCOM("grp_2d_pclk", GRP_2D_P_CLK, NULL, 0),
+ CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0),
+ CLK_PCOM("grp_pclk", GRP_3D_P_CLK, NULL, 0),
+ CLK_7X30S("grp_src_clk", GRP_3D_SRC_CLK, GRP_3D_CLK, NULL, 0),
+ CLK_PCOM("hdmi_clk", HDMI_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("jpeg_clk", JPEG_CLK, NULL, OFF),
+ CLK_PCOM("jpeg_pclk", JPEG_P_CLK, NULL, OFF),
+ CLK_PCOM("lpa_codec_clk", LPA_CODEC_CLK, NULL, 0),
+ CLK_PCOM("lpa_core_clk", LPA_CORE_CLK, NULL, 0),
+ CLK_PCOM("lpa_pclk", LPA_P_CLK, NULL, 0),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("mddi_pclk", PMDH_P_CLK, NULL, 0),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("mdp_pclk", MDP_P_CLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
+ CLK_PCOM("mfc_clk", MFC_CLK, NULL, 0),
+ CLK_PCOM("mfc_div2_clk", MFC_DIV2_CLK, NULL, 0),
+ CLK_PCOM("mfc_pclk", MFC_P_CLK, NULL, 0),
+ CLK_PCOM("mi2s_m_clk", MI2S_M_CLK, NULL, 0),
+ CLK_PCOM("mi2s_s_clk", MI2S_S_CLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_rx_m_clk", MI2S_CODEC_RX_M_CLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_rx_s_clk", MI2S_CODEC_RX_S_CLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_tx_m_clk", MI2S_CODEC_TX_M_CLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_tx_s_clk", MI2S_CODEC_TX_S_CLK, NULL, 0),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("rotator_clk", AXI_ROTATOR_CLK, NULL, 0),
+ CLK_PCOM("rotator_imem_clk", ROTATOR_IMEM_CLK, NULL, OFF),
+ CLK_PCOM("rotator_pclk", ROTATOR_P_CLK, NULL, OFF),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
+ CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0),
+ CLK_7X30S("tv_src_clk", TV_CLK, TV_ENC_CLK, NULL, 0),
+ CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
+ CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
+ CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_core_clk", USB_HS_CORE_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_core_clk", USB_HS2_CORE_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_core_clk", USB_HS3_CORE_CLK, NULL, OFF),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
+ CLK_PCOM("vfe_camif_clk", VFE_CAMIF_CLK, NULL, 0),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, 0),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, 0),
+ CLK_PCOM("vfe_pclk", VFE_P_CLK, NULL, OFF),
+ CLK_PCOM("vpe_clk", VPE_CLK, NULL, 0),
+
+ /* 7x30 v2 hardware only. */
+ CLK_PCOM("csi_clk", CSI0_CLK, NULL, 0),
+ CLK_PCOM("csi_pclk", CSI0_P_CLK, NULL, 0),
+ CLK_PCOM("csi_vfe_clk", CSI0_VFE_CLK, NULL, 0),
+};
+
+unsigned msm_num_clocks_7x30 = ARRAY_SIZE(msm_clocks_7x30);
+
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
new file mode 100644
index 000000000000..4d4a50785e34
--- /dev/null
+++ b/arch/arm/mach-msm/devices-qsd8x50.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/dma-mapping.h>
+#include <mach/irqs.h>
+#include <mach/msm_iomap.h>
+#include <mach/dma.h>
+#include <mach/board.h>
+
+#include "devices.h"
+
+#include <asm/mach/flash.h>
+
+#include <mach/mmc.h>
+
+static struct resource resources_uart3[] = {
+ {
+ .start = INT_UART3,
+ .end = INT_UART3,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = MSM_UART3_PHYS,
+ .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device msm_device_uart3 = {
+ .name = "msm_serial",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(resources_uart3),
+ .resource = resources_uart3,
+};
+
+struct clk msm_clocks_8x50[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
+ CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0),
+ CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
+ CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
+ CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
+ CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
+ CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
+ CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
+ CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
+ CLK_PCOM("vfe_axi_clk", VFE_AXI_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF),
+ CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
+};
+
+unsigned msm_num_clocks_8x50 = ARRAY_SIZE(msm_clocks_8x50);
+
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 0744c4a27d6a..568443e76423 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -16,6 +16,8 @@
#ifndef __ARCH_ARM_MACH_MSM_DEVICES_H
#define __ARCH_ARM_MACH_MSM_DEVICES_H
+#include "clock.h"
+
extern struct platform_device msm_device_uart1;
extern struct platform_device msm_device_uart2;
extern struct platform_device msm_device_uart3;
@@ -33,4 +35,13 @@ extern struct platform_device msm_device_smd;
extern struct platform_device msm_device_nand;
+extern struct clk msm_clocks_7x01a[];
+extern unsigned msm_num_clocks_7x01a;
+
+extern struct clk msm_clocks_7x30[];
+extern unsigned msm_num_clocks_7x30;
+
+extern struct clk msm_clocks_8x50[];
+extern unsigned msm_num_clocks_8x50;
+
#endif
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index f5420f9585c5..d029d1f5f9e2 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -13,6 +13,8 @@
*
*/
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <mach/dma.h>
@@ -26,6 +28,7 @@ enum {
};
static DEFINE_SPINLOCK(msm_dmov_lock);
+static struct clk *msm_dmov_clk;
static unsigned int channel_active;
static struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
static struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
@@ -54,6 +57,9 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
unsigned int status;
spin_lock_irqsave(&msm_dmov_lock, irq_flags);
+ if (!channel_active)
+ clk_enable(msm_dmov_clk);
+ dsb();
status = readl(DMOV_STATUS(id));
if (list_empty(&ready_commands[id]) &&
(status & DMOV_STATUS_CMD_PTR_RDY)) {
@@ -63,6 +69,8 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
writel(DMOV_CONFIG_IRQ_EN, DMOV_CONFIG(id));
}
#endif
+ if (cmd->execute_func)
+ cmd->execute_func(cmd);
PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n", id, status);
list_add_tail(&cmd->list, &active_commands[id]);
if (!channel_active)
@@ -70,6 +78,8 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
channel_active |= 1U << id;
writel(cmd->cmdptr, DMOV_CMD_PTR(id));
} else {
+ if (!channel_active)
+ clk_disable(msm_dmov_clk);
if (list_empty(&active_commands[id]))
PRINT_ERROR("msm_dmov_enqueue_cmd(%d), error datamover stalled, status %x\n", id, status);
@@ -108,6 +118,7 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
cmd.dmov_cmd.cmdptr = cmdptr;
cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
+ cmd.dmov_cmd.execute_func = NULL;
cmd.id = id;
init_completion(&cmd.complete);
@@ -165,6 +176,7 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
"for %p, result %x\n", id, cmd, ch_result);
if (cmd) {
list_del(&cmd->list);
+ dsb();
cmd->complete_func(cmd, ch_result, NULL);
}
}
@@ -181,6 +193,7 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
+ dsb();
cmd->complete_func(cmd, ch_result, &errdata);
}
}
@@ -198,6 +211,7 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
if (cmd) {
list_del(&cmd->list);
+ dsb();
cmd->complete_func(cmd, ch_result, &errdata);
}
/* this does not seem to work, once we get an error */
@@ -210,6 +224,8 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
cmd = list_entry(ready_commands[id].next, typeof(*cmd), list);
list_del(&cmd->list);
list_add_tail(&cmd->list, &active_commands[id]);
+ if (cmd->execute_func)
+ cmd->execute_func(cmd);
PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
writel(cmd->cmdptr, DMOV_CMD_PTR(id));
}
@@ -219,8 +235,10 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
}
- if (!channel_active)
- disable_irq(INT_ADM_AARM);
+ if (!channel_active) {
+ disable_irq_nosync(INT_ADM_AARM);
+ clk_disable(msm_dmov_clk);
+ }
spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
return IRQ_HANDLED;
@@ -230,11 +248,17 @@ static int __init msm_init_datamover(void)
{
int i;
int ret;
+ struct clk *clk;
+
for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
INIT_LIST_HEAD(&ready_commands[i]);
INIT_LIST_HEAD(&active_commands[i]);
writel(DMOV_CONFIG_IRQ_EN | DMOV_CONFIG_FORCE_TOP_PTR_RSLT | DMOV_CONFIG_FORCE_FLUSH_RSLT, DMOV_CONFIG(i));
}
+ clk = clk_get(NULL, "adm_clk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ msm_dmov_clk = clk;
ret = request_irq(INT_ADM_AARM, msm_datamover_irq_handler, 0, "msmdatamover", NULL);
if (ret)
return ret;
diff --git a/arch/arm/mach-msm/gpio.c b/arch/arm/mach-msm/gpio.c
new file mode 100644
index 000000000000..bc32c845c7b0
--- /dev/null
+++ b/arch/arm/mach-msm/gpio.c
@@ -0,0 +1,85 @@
+/* linux/arch/arm/mach-msm/gpio.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <mach/gpio.h>
+#include "proc_comm.h"
+
+int gpio_tlmm_config(unsigned config, unsigned disable)
+{
+ return msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, &disable);
+}
+EXPORT_SYMBOL(gpio_tlmm_config);
+
+int msm_gpios_enable(const struct msm_gpio *table, int size)
+{
+ int rc;
+ int i;
+ const struct msm_gpio *g;
+ for (i = 0; i < size; i++) {
+ g = table + i;
+ rc = gpio_tlmm_config(g->gpio_cfg, GPIO_ENABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config(0x%08x, GPIO_ENABLE)"
+ " <%s> failed: %d\n",
+ g->gpio_cfg, g->label ?: "?", rc);
+ pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+ GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+ GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+ GPIO_DRVSTR(g->gpio_cfg));
+ goto err;
+ }
+ }
+ return 0;
+err:
+ msm_gpios_disable(table, i);
+ return rc;
+}
+EXPORT_SYMBOL(msm_gpios_enable);
+
+void msm_gpios_disable(const struct msm_gpio *table, int size)
+{
+ int rc;
+ int i;
+ const struct msm_gpio *g;
+ for (i = size-1; i >= 0; i--) {
+ g = table + i;
+ rc = gpio_tlmm_config(g->gpio_cfg, GPIO_DISABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config(0x%08x, GPIO_DISABLE)"
+ " <%s> failed: %d\n",
+ g->gpio_cfg, g->label ?: "?", rc);
+ pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+ GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+ GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+ GPIO_DRVSTR(g->gpio_cfg));
+ }
+ }
+}
+EXPORT_SYMBOL(msm_gpios_disable);
+
+int msm_gpios_request_enable(const struct msm_gpio *table, int size)
+{
+ int rc = msm_gpios_enable(table, size);
+ return rc;
+}
+EXPORT_SYMBOL(msm_gpios_request_enable);
+
+void msm_gpios_disable_free(const struct msm_gpio *table, int size)
+{
+ msm_gpios_disable(table, size);
+}
+EXPORT_SYMBOL(msm_gpios_disable_free);
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 264d62e519f3..e302fbdc439b 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -21,18 +21,24 @@
/* platform device data structures */
-struct msm_mddi_platform_data
+struct msm_acpu_clock_platform_data
{
- void (*panel_power)(int on);
- unsigned has_vsync_irq:1;
+ uint32_t acpu_switch_time_us;
+ uint32_t max_speed_delta_khz;
+ uint32_t vdd_switch_time_us;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
};
+struct clk;
+
/* common init routines for use by arch/arm/mach-msm/board-*.c */
void __init msm_add_devices(void);
void __init msm_map_common_io(void);
void __init msm_init_irq(void);
void __init msm_init_gpio(void);
-void __init msm_clock_init(void);
+void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks);
+void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *);
#endif
diff --git a/arch/arm/mach-msm/include/mach/clk.h b/arch/arm/mach-msm/include/mach/clk.h
new file mode 100644
index 000000000000..c05ca40478c7
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/clk.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef __MACH_CLK_H
+#define __MACH_CLK_H
+
+/* Magic rate value for use with PM QOS to request the board's maximum
+ * supported AXI rate. PM QOS will only pass positive s32 rate values
+ * through to the clock driver, so INT_MAX is used.
+ */
+#define MSM_AXI_MAX_FREQ LONG_MAX
+
+enum clk_reset_action {
+ CLK_RESET_DEASSERT = 0,
+ CLK_RESET_ASSERT = 1
+};
+
+struct clk;
+
+/* Rate is minimum clock rate in Hz */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/* Rate is maximum clock rate in Hz */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/* Assert/Deassert reset to a hardware block associated with a clock */
+int clk_reset(struct clk *clk, enum clk_reset_action action);
+
+/* Set clock-specific configuration parameters */
+int clk_set_flags(struct clk *clk, unsigned long flags);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index 5ab5bdffab07..00f9bbfadbe6 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -28,6 +28,8 @@ struct msm_dmov_cmd {
void (*complete_func)(struct msm_dmov_cmd *cmd,
unsigned int result,
struct msm_dmov_errdata *err);
+ void (*execute_func)(struct msm_dmov_cmd *cmd);
+ void *data;
};
void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
@@ -41,40 +43,42 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr);
#define DMOV_SD2(off, ch) (MSM_DMOV_BASE + 0x0800 + (off) + ((ch) << 2))
#define DMOV_SD3(off, ch) (MSM_DMOV_BASE + 0x0C00 + (off) + ((ch) << 2))
-/* only security domain 3 is available to the ARM11
- * SD0 -> mARM trusted, SD1 -> mARM nontrusted, SD2 -> aDSP, SD3 -> aARM
- */
+#if defined(CONFIG_ARCH_MSM7X30)
+#define DMOV_SD_AARM DMOV_SD2
+#else
+#define DMOV_SD_AARM DMOV_SD3
+#endif
-#define DMOV_CMD_PTR(ch) DMOV_SD3(0x000, ch)
+#define DMOV_CMD_PTR(ch) DMOV_SD_AARM(0x000, ch)
#define DMOV_CMD_LIST (0 << 29) /* does not work */
#define DMOV_CMD_PTR_LIST (1 << 29) /* works */
#define DMOV_CMD_INPUT_CFG (2 << 29) /* untested */
#define DMOV_CMD_OUTPUT_CFG (3 << 29) /* untested */
#define DMOV_CMD_ADDR(addr) ((addr) >> 3)
-#define DMOV_RSLT(ch) DMOV_SD3(0x040, ch)
+#define DMOV_RSLT(ch) DMOV_SD_AARM(0x040, ch)
#define DMOV_RSLT_VALID (1 << 31) /* 0 == host has empties result fifo */
#define DMOV_RSLT_ERROR (1 << 3)
#define DMOV_RSLT_FLUSH (1 << 2)
#define DMOV_RSLT_DONE (1 << 1) /* top pointer done */
#define DMOV_RSLT_USER (1 << 0) /* command with FR force result */
-#define DMOV_FLUSH0(ch) DMOV_SD3(0x080, ch)
-#define DMOV_FLUSH1(ch) DMOV_SD3(0x0C0, ch)
-#define DMOV_FLUSH2(ch) DMOV_SD3(0x100, ch)
-#define DMOV_FLUSH3(ch) DMOV_SD3(0x140, ch)
-#define DMOV_FLUSH4(ch) DMOV_SD3(0x180, ch)
-#define DMOV_FLUSH5(ch) DMOV_SD3(0x1C0, ch)
+#define DMOV_FLUSH0(ch) DMOV_SD_AARM(0x080, ch)
+#define DMOV_FLUSH1(ch) DMOV_SD_AARM(0x0C0, ch)
+#define DMOV_FLUSH2(ch) DMOV_SD_AARM(0x100, ch)
+#define DMOV_FLUSH3(ch) DMOV_SD_AARM(0x140, ch)
+#define DMOV_FLUSH4(ch) DMOV_SD_AARM(0x180, ch)
+#define DMOV_FLUSH5(ch) DMOV_SD_AARM(0x1C0, ch)
-#define DMOV_STATUS(ch) DMOV_SD3(0x200, ch)
+#define DMOV_STATUS(ch) DMOV_SD_AARM(0x200, ch)
#define DMOV_STATUS_RSLT_COUNT(n) (((n) >> 29))
#define DMOV_STATUS_CMD_COUNT(n) (((n) >> 27) & 3)
#define DMOV_STATUS_RSLT_VALID (1 << 1)
#define DMOV_STATUS_CMD_PTR_RDY (1 << 0)
-#define DMOV_ISR DMOV_SD3(0x380, 0)
-
-#define DMOV_CONFIG(ch) DMOV_SD3(0x300, ch)
+#define DMOV_ISR DMOV_SD_AARM(0x380, 0)
+
+#define DMOV_CONFIG(ch) DMOV_SD_AARM(0x300, ch)
#define DMOV_CONFIG_FORCE_TOP_PTR_RSLT (1 << 2)
#define DMOV_CONFIG_FORCE_FLUSH_RSLT (1 << 1)
#define DMOV_CONFIG_IRQ_EN (1 << 0)
diff --git a/arch/arm/mach-msm/include/mach/gpio.h b/arch/arm/mach-msm/include/mach/gpio.h
new file mode 100644
index 000000000000..262b441b4374
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/gpio.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARCH_MSM_GPIO_H
+#define __ASM_ARCH_MSM_GPIO_H
+
+/**
+ * struct msm_gpio - GPIO pin description
+ * @gpio_cfg - configuration bitmap, as per gpio_tlmm_config()
+ * @label - textual label
+ *
+ * Usually, GPIO's are operated by sets.
+ * This struct accumulate all GPIO information in single source
+ * and facilitete group operations provided by msm_gpios_xxx()
+ */
+struct msm_gpio {
+ u32 gpio_cfg;
+ const char *label;
+};
+
+/**
+ * msm_gpios_request_enable() - request and enable set of GPIOs
+ *
+ * Request and configure set of GPIO's
+ * In case of error, all operations rolled back.
+ * Return error code.
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+int msm_gpios_request_enable(const struct msm_gpio *table, int size);
+
+/**
+ * msm_gpios_disable_free() - disable and free set of GPIOs
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+void msm_gpios_disable_free(const struct msm_gpio *table, int size);
+
+/**
+ * msm_gpios_request() - request set of GPIOs
+ * In case of error, all operations rolled back.
+ * Return error code.
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+int msm_gpios_request(const struct msm_gpio *table, int size);
+
+/**
+ * msm_gpios_free() - free set of GPIOs
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+void msm_gpios_free(const struct msm_gpio *table, int size);
+
+/**
+ * msm_gpios_enable() - enable set of GPIOs
+ * In case of error, all operations rolled back.
+ * Return error code.
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+int msm_gpios_enable(const struct msm_gpio *table, int size);
+
+/**
+ * msm_gpios_disable() - disable set of GPIOs
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+void msm_gpios_disable(const struct msm_gpio *table, int size);
+
+/* GPIO TLMM (Top Level Multiplexing) Definitions */
+
+/* GPIO TLMM: Function -- GPIO specific */
+
+/* GPIO TLMM: Direction */
+enum {
+ GPIO_INPUT,
+ GPIO_OUTPUT,
+};
+
+/* GPIO TLMM: Pullup/Pulldown */
+enum {
+ GPIO_NO_PULL,
+ GPIO_PULL_DOWN,
+ GPIO_KEEPER,
+ GPIO_PULL_UP,
+};
+
+/* GPIO TLMM: Drive Strength */
+enum {
+ GPIO_2MA,
+ GPIO_4MA,
+ GPIO_6MA,
+ GPIO_8MA,
+ GPIO_10MA,
+ GPIO_12MA,
+ GPIO_14MA,
+ GPIO_16MA,
+};
+
+enum {
+ GPIO_ENABLE,
+ GPIO_DISABLE,
+};
+
+#define GPIO_CFG(gpio, func, dir, pull, drvstr) \
+ ((((gpio) & 0x3FF) << 4) | \
+ ((func) & 0xf) | \
+ (((dir) & 0x1) << 14) | \
+ (((pull) & 0x3) << 15) | \
+ (((drvstr) & 0xF) << 17))
+
+/**
+ * extract GPIO pin from bit-field used for gpio_tlmm_config
+ */
+#define GPIO_PIN(gpio_cfg) (((gpio_cfg) >> 4) & 0x3ff)
+#define GPIO_FUNC(gpio_cfg) (((gpio_cfg) >> 0) & 0xf)
+#define GPIO_DIR(gpio_cfg) (((gpio_cfg) >> 14) & 0x1)
+#define GPIO_PULL(gpio_cfg) (((gpio_cfg) >> 15) & 0x3)
+#define GPIO_DRVSTR(gpio_cfg) (((gpio_cfg) >> 17) & 0xf)
+
+int gpio_tlmm_config(unsigned config, unsigned disable);
+
+#endif /* __ASM_ARCH_MSM_GPIO_H */
diff --git a/arch/arm/mach-msm/include/mach/io.h b/arch/arm/mach-msm/include/mach/io.h
index aab964591db4..c35b29f9ac0f 100644
--- a/arch/arm/mach-msm/include/mach/io.h
+++ b/arch/arm/mach-msm/include/mach/io.h
@@ -26,4 +26,9 @@ void __iomem *__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int m
#define __io(a) __typesafe_io(a)
#define __mem_pci(a) (a)
+void msm_map_qsd8x50_io(void);
+void msm_map_msm7x30_io(void);
+
+extern unsigned int msm_shared_ram_phys;
+
#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-7x00.h b/arch/arm/mach-msm/include/mach/irqs-7x00.h
new file mode 100644
index 000000000000..f1fe70612fe9
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-7x00.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_7X00_H
+#define __ASM_ARCH_MSM_IRQS_7X00_H
+
+/* MSM ARM11 Interrupt Numbers */
+/* See 80-VE113-1 A, pp219-221 */
+
+#define INT_A9_M2A_0 0
+#define INT_A9_M2A_1 1
+#define INT_A9_M2A_2 2
+#define INT_A9_M2A_3 3
+#define INT_A9_M2A_4 4
+#define INT_A9_M2A_5 5
+#define INT_A9_M2A_6 6
+#define INT_GP_TIMER_EXP 7
+#define INT_DEBUG_TIMER_EXP 8
+#define INT_UART1 9
+#define INT_UART2 10
+#define INT_UART3 11
+#define INT_UART1_RX 12
+#define INT_UART2_RX 13
+#define INT_UART3_RX 14
+#define INT_USB_OTG 15
+#define INT_MDDI_PRI 16
+#define INT_MDDI_EXT 17
+#define INT_MDDI_CLIENT 18
+#define INT_MDP 19
+#define INT_GRAPHICS 20
+#define INT_ADM_AARM 21
+#define INT_ADSP_A11 22
+#define INT_ADSP_A9_A11 23
+#define INT_SDC1_0 24
+#define INT_SDC1_1 25
+#define INT_SDC2_0 26
+#define INT_SDC2_1 27
+#define INT_KEYSENSE 28
+#define INT_TCHSCRN_SSBI 29
+#define INT_TCHSCRN1 30
+#define INT_TCHSCRN2 31
+
+#define INT_GPIO_GROUP1 (32 + 0)
+#define INT_GPIO_GROUP2 (32 + 1)
+#define INT_PWB_I2C (32 + 2)
+#define INT_SOFTRESET (32 + 3)
+#define INT_NAND_WR_ER_DONE (32 + 4)
+#define INT_NAND_OP_DONE (32 + 5)
+#define INT_PBUS_ARM11 (32 + 6)
+#define INT_AXI_MPU_SMI (32 + 7)
+#define INT_AXI_MPU_EBI1 (32 + 8)
+#define INT_AD_HSSD (32 + 9)
+#define INT_ARM11_PMU (32 + 10)
+#define INT_ARM11_DMA (32 + 11)
+#define INT_TSIF_IRQ (32 + 12)
+#define INT_UART1DM_IRQ (32 + 13)
+#define INT_UART1DM_RX (32 + 14)
+#define INT_USB_HS (32 + 15)
+#define INT_SDC3_0 (32 + 16)
+#define INT_SDC3_1 (32 + 17)
+#define INT_SDC4_0 (32 + 18)
+#define INT_SDC4_1 (32 + 19)
+#define INT_UART2DM_RX (32 + 20)
+#define INT_UART2DM_IRQ (32 + 21)
+
+/* 22-31 are reserved */
+
+#define NR_MSM_IRQS 64
+#define NR_GPIO_IRQS 122
+#define NR_BOARD_IRQS 64
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-7x30.h b/arch/arm/mach-msm/include/mach/irqs-7x30.h
new file mode 100644
index 000000000000..67c5396514fe
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-7x30.h
@@ -0,0 +1,170 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_7X30_H
+#define __ASM_ARCH_MSM_IRQS_7X30_H
+
+/* MSM ACPU Interrupt Numbers */
+
+#define INT_DEBUG_TIMER_EXP 0
+#define INT_GPT0_TIMER_EXP 1
+#define INT_GPT1_TIMER_EXP 2
+#define INT_WDT0_ACCSCSSBARK 3
+#define INT_WDT1_ACCSCSSBARK 4
+#define INT_AVS_SVIC 5
+#define INT_AVS_SVIC_SW_DONE 6
+#define INT_SC_DBG_RX_FULL 7
+#define INT_SC_DBG_TX_EMPTY 8
+#define INT_ARM11_PM 9
+#define INT_AVS_REQ_DOWN 10
+#define INT_AVS_REQ_UP 11
+#define INT_SC_ACG 12
+/* SCSS_VICFIQSTS0[13:15] are RESERVED */
+#define INT_L2_SVICCPUIRPTREQ 16
+#define INT_L2_SVICDMANSIRPTREQ 17
+#define INT_L2_SVICDMASIRPTREQ 18
+#define INT_L2_SVICSLVIRPTREQ 19
+#define INT_AD5A_MPROC_APPS_0 20
+#define INT_AD5A_MPROC_APPS_1 21
+#define INT_A9_M2A_0 22
+#define INT_A9_M2A_1 23
+#define INT_A9_M2A_2 24
+#define INT_A9_M2A_3 25
+#define INT_A9_M2A_4 26
+#define INT_A9_M2A_5 27
+#define INT_A9_M2A_6 28
+#define INT_A9_M2A_7 29
+#define INT_A9_M2A_8 30
+#define INT_A9_M2A_9 31
+
+#define INT_AXI_EBI1_SC (32 + 0)
+#define INT_IMEM_ERR (32 + 1)
+#define INT_AXI_EBI0_SC (32 + 2)
+#define INT_PBUS_SC_IRQC (32 + 3)
+#define INT_PERPH_BUS_BPM (32 + 4)
+#define INT_CC_TEMP_SENSE (32 + 5)
+#define INT_UXMC_EBI0 (32 + 6)
+#define INT_UXMC_EBI1 (32 + 7)
+#define INT_EBI2_OP_DONE (32 + 8)
+#define INT_EBI2_WR_ER_DONE (32 + 9)
+#define INT_TCSR_SPSS_CE (32 + 10)
+#define INT_EMDH (32 + 11)
+#define INT_PMDH (32 + 12)
+#define INT_MDC (32 + 13)
+#define INT_MIDI_TO_SUPSS (32 + 14)
+#define INT_LPA_2 (32 + 15)
+#define INT_GPIO_GROUP1_SECURE (32 + 16)
+#define INT_GPIO_GROUP2_SECURE (32 + 17)
+#define INT_GPIO_GROUP1 (32 + 18)
+#define INT_GPIO_GROUP2 (32 + 19)
+#define INT_MPRPH_SOFTRESET (32 + 20)
+#define INT_PWB_I2C (32 + 21)
+#define INT_PWB_I2C_2 (32 + 22)
+#define INT_TSSC_SAMPLE (32 + 23)
+#define INT_TSSC_PENUP (32 + 24)
+#define INT_TCHSCRN_SSBI (32 + 25)
+#define INT_FM_RDS (32 + 26)
+#define INT_KEYSENSE (32 + 27)
+#define INT_USB_OTG_HS (32 + 28)
+#define INT_USB_OTG_HS2 (32 + 29)
+#define INT_USB_OTG_HS3 (32 + 30)
+#define INT_CSI (32 + 31)
+
+#define INT_SPI_OUTPUT (64 + 0)
+#define INT_SPI_INPUT (64 + 1)
+#define INT_SPI_ERROR (64 + 2)
+#define INT_UART1 (64 + 3)
+#define INT_UART1_RX (64 + 4)
+#define INT_UART2 (64 + 5)
+#define INT_UART2_RX (64 + 6)
+#define INT_UART3 (64 + 7)
+#define INT_UART3_RX (64 + 8)
+#define INT_UART1DM_IRQ (64 + 9)
+#define INT_UART1DM_RX (64 + 10)
+#define INT_UART2DM_IRQ (64 + 11)
+#define INT_UART2DM_RX (64 + 12)
+#define INT_TSIF (64 + 13)
+#define INT_ADM_SC1 (64 + 14)
+#define INT_ADM_SC2 (64 + 15)
+#define INT_MDP (64 + 16)
+#define INT_VPE (64 + 17)
+#define INT_GRP_2D (64 + 18)
+#define INT_GRP_3D (64 + 19)
+#define INT_ROTATOR (64 + 20)
+#define INT_MFC720 (64 + 21)
+#define INT_JPEG (64 + 22)
+#define INT_VFE (64 + 23)
+#define INT_TV_ENC (64 + 24)
+#define INT_PMIC_SSBI (64 + 25)
+#define INT_MPM_1 (64 + 26)
+#define INT_TCSR_SPSS_SAMPLE (64 + 27)
+#define INT_TCSR_SPSS_PENUP (64 + 28)
+#define INT_MPM_2 (64 + 29)
+#define INT_SDC1_0 (64 + 30)
+#define INT_SDC1_1 (64 + 31)
+
+#define INT_SDC3_0 (96 + 0)
+#define INT_SDC3_1 (96 + 1)
+#define INT_SDC2_0 (96 + 2)
+#define INT_SDC2_1 (96 + 3)
+#define INT_SDC4_0 (96 + 4)
+#define INT_SDC4_1 (96 + 5)
+#define INT_PWB_QUP_IN (96 + 6)
+#define INT_PWB_QUP_OUT (96 + 7)
+#define INT_PWB_QUP_ERR (96 + 8)
+#define INT_SCSS_WDT0_BITE (96 + 9)
+/* SCSS_VICFIQSTS3[10:31] are RESERVED */
+
+/* Retrofit universal macro names */
+#define INT_ADM_AARM INT_ADM_SC2
+#define INT_USB_HS INT_USB_OTG_HS
+#define INT_USB_OTG INT_USB_OTG_HS
+#define INT_TCHSCRN1 INT_TSSC_SAMPLE
+#define INT_TCHSCRN2 INT_TSSC_PENUP
+#define INT_GP_TIMER_EXP INT_GPT0_TIMER_EXP
+#define INT_ADSP_A11 INT_AD5A_MPROC_APPS_0
+#define INT_ADSP_A9_A11 INT_AD5A_MPROC_APPS_1
+#define INT_MDDI_EXT INT_EMDH
+#define INT_MDDI_PRI INT_PMDH
+#define INT_MDDI_CLIENT INT_MDC
+#define INT_NAND_WR_ER_DONE INT_EBI2_WR_ER_DONE
+#define INT_NAND_OP_DONE INT_EBI2_OP_DONE
+
+#define NR_MSM_IRQS 128
+#define NR_GPIO_IRQS 182
+#define PMIC8058_IRQ_BASE (NR_MSM_IRQS + NR_GPIO_IRQS)
+#define NR_PMIC8058_GPIO_IRQS 40
+#define NR_PMIC8058_MPP_IRQS 12
+#define NR_PMIC8058_MISC_IRQS 8
+#define NR_PMIC8058_IRQS (NR_PMIC8058_GPIO_IRQS +\
+ NR_PMIC8058_MPP_IRQS +\
+ NR_PMIC8058_MISC_IRQS)
+#define NR_BOARD_IRQS NR_PMIC8058_IRQS
+
+#endif /* __ASM_ARCH_MSM_IRQS_7X30_H */
diff --git a/arch/arm/mach-msm/include/mach/irqs-8x50.h b/arch/arm/mach-msm/include/mach/irqs-8x50.h
new file mode 100644
index 000000000000..de3d8fe24e4e
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-8x50.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_8XXX_H
+#define __ASM_ARCH_MSM_IRQS_8XXX_H
+
+/* MSM ACPU Interrupt Numbers */
+
+#define INT_A9_M2A_0 0
+#define INT_A9_M2A_1 1
+#define INT_A9_M2A_2 2
+#define INT_A9_M2A_3 3
+#define INT_A9_M2A_4 4
+#define INT_A9_M2A_5 5
+#define INT_A9_M2A_6 6
+#define INT_GP_TIMER_EXP 7
+#define INT_DEBUG_TIMER_EXP 8
+#define INT_SIRC_0 9
+#define INT_SDC3_0 10
+#define INT_SDC3_1 11
+#define INT_SDC4_0 12
+#define INT_SDC4_1 13
+#define INT_AD6_EXT_VFR 14
+#define INT_USB_OTG 15
+#define INT_MDDI_PRI 16
+#define INT_MDDI_EXT 17
+#define INT_MDDI_CLIENT 18
+#define INT_MDP 19
+#define INT_GRAPHICS 20
+#define INT_ADM_AARM 21
+#define INT_ADSP_A11 22
+#define INT_ADSP_A9_A11 23
+#define INT_SDC1_0 24
+#define INT_SDC1_1 25
+#define INT_SDC2_0 26
+#define INT_SDC2_1 27
+#define INT_KEYSENSE 28
+#define INT_TCHSCRN_SSBI 29
+#define INT_TCHSCRN1 30
+#define INT_TCHSCRN2 31
+
+#define INT_TCSR_MPRPH_SC1 (32 + 0)
+#define INT_USB_FS2 (32 + 1)
+#define INT_PWB_I2C (32 + 2)
+#define INT_SOFTRESET (32 + 3)
+#define INT_NAND_WR_ER_DONE (32 + 4)
+#define INT_NAND_OP_DONE (32 + 5)
+#define INT_TCSR_MPRPH_SC2 (32 + 6)
+#define INT_OP_PEN (32 + 7)
+#define INT_AD_HSSD (32 + 8)
+#define INT_ARM11_PM (32 + 9)
+#define INT_SDMA_NON_SECURE (32 + 10)
+#define INT_TSIF_IRQ (32 + 11)
+#define INT_UART1DM_IRQ (32 + 12)
+#define INT_UART1DM_RX (32 + 13)
+#define INT_SDMA_SECURE (32 + 14)
+#define INT_SI2S_SLAVE (32 + 15)
+#define INT_SC_I2CPU (32 + 16)
+#define INT_SC_DBG_RDTRFULL (32 + 17)
+#define INT_SC_DBG_WDTRFULL (32 + 18)
+#define INT_SCPLL_CTL_DONE (32 + 19)
+#define INT_UART2DM_IRQ (32 + 20)
+#define INT_UART2DM_RX (32 + 21)
+#define INT_VDC_MEC (32 + 22)
+#define INT_VDC_DB (32 + 23)
+#define INT_VDC_AXI (32 + 24)
+#define INT_VFE (32 + 25)
+#define INT_USB_HS (32 + 26)
+#define INT_AUDIO_OUT0 (32 + 27)
+#define INT_AUDIO_OUT1 (32 + 28)
+#define INT_CRYPTO (32 + 29)
+#define INT_AD6M_IDLE (32 + 30)
+#define INT_SIRC_1 (32 + 31)
+
+#define NR_GPIO_IRQS 165
+#define NR_MSM_IRQS 64
+#define NR_BOARD_IRQS 64
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index 9dd4cf8a2693..164d355c96ea 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -1,6 +1,6 @@
-/* arch/arm/mach-msm/include/mach/irqs.h
- *
+/*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -17,74 +17,21 @@
#ifndef __ASM_ARCH_MSM_IRQS_H
#define __ASM_ARCH_MSM_IRQS_H
-/* MSM ARM11 Interrupt Numbers */
-/* See 80-VE113-1 A, pp219-221 */
-
-#define INT_A9_M2A_0 0
-#define INT_A9_M2A_1 1
-#define INT_A9_M2A_2 2
-#define INT_A9_M2A_3 3
-#define INT_A9_M2A_4 4
-#define INT_A9_M2A_5 5
-#define INT_A9_M2A_6 6
-#define INT_GP_TIMER_EXP 7
-#define INT_DEBUG_TIMER_EXP 8
-#define INT_UART1 9
-#define INT_UART2 10
-#define INT_UART3 11
-#define INT_UART1_RX 12
-#define INT_UART2_RX 13
-#define INT_UART3_RX 14
-#define INT_USB_OTG 15
-#define INT_MDDI_PRI 16
-#define INT_MDDI_EXT 17
-#define INT_MDDI_CLIENT 18
-#define INT_MDP 19
-#define INT_GRAPHICS 20
-#define INT_ADM_AARM 21
-#define INT_ADSP_A11 22
-#define INT_ADSP_A9_A11 23
-#define INT_SDC1_0 24
-#define INT_SDC1_1 25
-#define INT_SDC2_0 26
-#define INT_SDC2_1 27
-#define INT_KEYSENSE 28
-#define INT_TCHSCRN_SSBI 29
-#define INT_TCHSCRN1 30
-#define INT_TCHSCRN2 31
-
-#define INT_GPIO_GROUP1 (32 + 0)
-#define INT_GPIO_GROUP2 (32 + 1)
-#define INT_PWB_I2C (32 + 2)
-#define INT_SOFTRESET (32 + 3)
-#define INT_NAND_WR_ER_DONE (32 + 4)
-#define INT_NAND_OP_DONE (32 + 5)
-#define INT_PBUS_ARM11 (32 + 6)
-#define INT_AXI_MPU_SMI (32 + 7)
-#define INT_AXI_MPU_EBI1 (32 + 8)
-#define INT_AD_HSSD (32 + 9)
-#define INT_ARM11_PMU (32 + 10)
-#define INT_ARM11_DMA (32 + 11)
-#define INT_TSIF_IRQ (32 + 12)
-#define INT_UART1DM_IRQ (32 + 13)
-#define INT_UART1DM_RX (32 + 14)
-#define INT_USB_HS (32 + 15)
-#define INT_SDC3_0 (32 + 16)
-#define INT_SDC3_1 (32 + 17)
-#define INT_SDC4_0 (32 + 18)
-#define INT_SDC4_1 (32 + 19)
-#define INT_UART2DM_RX (32 + 20)
-#define INT_UART2DM_IRQ (32 + 21)
-
-/* 22-31 are reserved */
-
#define MSM_IRQ_BIT(irq) (1 << ((irq) & 31))
-#define NR_MSM_IRQS 64
-#define NR_GPIO_IRQS 122
-#define NR_BOARD_IRQS 64
-#define NR_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS)
+#if defined(CONFIG_ARCH_MSM7X30)
+#include "irqs-7x30.h"
+#elif defined(CONFIG_ARCH_QSD8X50)
+#include "irqs-8x50.h"
+#include "sirc.h"
+#elif defined(CONFIG_ARCH_MSM_ARM11)
+#include "irqs-7x00.h"
+#else
+#error "Unknown architecture specification"
+#endif
+#define NR_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS)
#define MSM_GPIO_TO_INT(n) (NR_MSM_IRQS + (n))
+#define MSM_INT_TO_REG(base, irq) (base + irq / 32)
#endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index f4698baec976..50c7847e6002 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -17,7 +17,15 @@
#define __ASM_ARCH_MEMORY_H
/* physical offset of RAM */
+#if defined(CONFIG_ARCH_QSD8X50) && defined(CONFIG_MSM_SOC_REV_A)
+#define PHYS_OFFSET UL(0x00000000)
+#elif defined(CONFIG_ARCH_QSD8X50)
+#define PHYS_OFFSET UL(0x20000000)
+#elif defined(CONFIG_ARCH_MSM7X30)
+#define PHYS_OFFSET UL(0x00200000)
+#else
#define PHYS_OFFSET UL(0x10000000)
+#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_fb.h b/arch/arm/mach-msm/include/mach/msm_fb.h
new file mode 100644
index 000000000000..1f4fc81b3d8f
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_fb.h
@@ -0,0 +1,147 @@
+/* arch/arm/mach-msm/include/mach/msm_fb.h
+ *
+ * Internal shared definitions for various MSM framebuffer parts.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_FB_H_
+#define _MSM_FB_H_
+
+#include <linux/device.h>
+
+struct mddi_info;
+
+struct msm_fb_data {
+ int xres; /* x resolution in pixels */
+ int yres; /* y resolution in pixels */
+ int width; /* disply width in mm */
+ int height; /* display height in mm */
+ unsigned output_format;
+};
+
+struct msmfb_callback {
+ void (*func)(struct msmfb_callback *);
+};
+
+enum {
+ MSM_MDDI_PMDH_INTERFACE,
+ MSM_MDDI_EMDH_INTERFACE,
+ MSM_EBI2_INTERFACE,
+};
+
+#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
+
+struct msm_panel_data {
+ /* turns off the fb memory */
+ int (*suspend)(struct msm_panel_data *);
+ /* turns on the fb memory */
+ int (*resume)(struct msm_panel_data *);
+ /* turns off the panel */
+ int (*blank)(struct msm_panel_data *);
+ /* turns on the panel */
+ int (*unblank)(struct msm_panel_data *);
+ void (*wait_vsync)(struct msm_panel_data *);
+ void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
+ void (*clear_vsync)(struct msm_panel_data *);
+ /* from the enum above */
+ unsigned interface_type;
+ /* data to be passed to the fb driver */
+ struct msm_fb_data *fb_data;
+
+ /* capabilities supported by the panel */
+ uint32_t caps;
+};
+
+struct msm_mddi_client_data {
+ void (*suspend)(struct msm_mddi_client_data *);
+ void (*resume)(struct msm_mddi_client_data *);
+ void (*activate_link)(struct msm_mddi_client_data *);
+ void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
+ uint32_t reg);
+ uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
+ void (*auto_hibernate)(struct msm_mddi_client_data *, int);
+ /* custom data that needs to be passed from the board file to a
+ * particular client */
+ void *private_client_data;
+ struct resource *fb_resource;
+ /* from the list above */
+ unsigned interface_type;
+};
+
+struct msm_mddi_platform_data {
+ unsigned int clk_rate;
+ void (*power_client)(struct msm_mddi_client_data *, int on);
+
+ /* fixup the mfr name, product id */
+ void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
+
+ struct resource *fb_resource; /*optional*/
+ /* number of clients in the list that follows */
+ int num_clients;
+ /* array of client information of clients */
+ struct {
+ unsigned product_id; /* mfr id in top 16 bits, product id
+ * in lower 16 bits
+ */
+ char *name; /* the device name will be the platform
+ * device name registered for the client,
+ * it should match the name of the associated
+ * driver
+ */
+ unsigned id; /* id for mddi client device node, will also
+ * be used as device id of panel devices, if
+ * the client device will have multiple panels
+ * space must be left here for them
+ */
+ void *client_data; /* required private client data */
+ unsigned int clk_rate; /* optional: if the client requires a
+ * different mddi clk rate
+ */
+ } client_platform_data[];
+};
+
+struct mdp_blit_req;
+struct fb_info;
+struct mdp_device {
+ struct device dev;
+ void (*dma)(struct mdp_device *mpd, uint32_t addr,
+ uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
+ uint32_t y, struct msmfb_callback *callback, int interface);
+ void (*dma_wait)(struct mdp_device *mdp);
+ int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
+ struct mdp_blit_req *req);
+ void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
+};
+
+struct class_interface;
+int register_mdp_client(struct class_interface *class_intf);
+
+/**** private client data structs go below this line ***/
+
+struct msm_mddi_bridge_platform_data {
+ /* from board file */
+ int (*init)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ int (*uninit)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ /* passed to panel for use by the fb driver */
+ int (*blank)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ int (*unblank)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ struct msm_fb_data fb_data;
+};
+
+
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
new file mode 100644
index 000000000000..cfff0e74f128
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
@@ -0,0 +1,139 @@
+/* arch/arm/mach-msm/include/mach/msm_iomap.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * The MSM peripherals are spread all over across 768MB of physical
+ * space, which makes just having a simple IO_ADDRESS macro to slide
+ * them into the right virtual location rough. Instead, we will
+ * provide a master phys->virt mapping for peripherals here.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IOMAP_7X00_H
+#define __ASM_ARCH_MSM_IOMAP_7X00_H
+
+#include <asm/sizes.h>
+
+/* Physical base address and size of peripherals.
+ * Ordered by the virtual base addresses they will be mapped at.
+ *
+ * MSM_VIC_BASE must be an value that can be loaded via a "mov"
+ * instruction, otherwise entry-macro.S will not compile.
+ *
+ * If you add or remove entries here, you'll want to edit the
+ * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
+ * changes.
+ *
+ */
+
+#ifdef __ASSEMBLY__
+#define IOMEM(x) x
+#else
+#define IOMEM(x) ((void __force __iomem *)(x))
+#endif
+
+#define MSM_VIC_BASE IOMEM(0xE0000000)
+#define MSM_VIC_PHYS 0xC0000000
+#define MSM_VIC_SIZE SZ_4K
+
+#define MSM_CSR_BASE IOMEM(0xE0001000)
+#define MSM_CSR_PHYS 0xC0100000
+#define MSM_CSR_SIZE SZ_4K
+
+#define MSM_GPT_PHYS MSM_CSR_PHYS
+#define MSM_GPT_BASE MSM_CSR_BASE
+#define MSM_GPT_SIZE SZ_4K
+
+#define MSM_DMOV_BASE IOMEM(0xE0002000)
+#define MSM_DMOV_PHYS 0xA9700000
+#define MSM_DMOV_SIZE SZ_4K
+
+#define MSM_GPIO1_BASE IOMEM(0xE0003000)
+#define MSM_GPIO1_PHYS 0xA9200000
+#define MSM_GPIO1_SIZE SZ_4K
+
+#define MSM_GPIO2_BASE IOMEM(0xE0004000)
+#define MSM_GPIO2_PHYS 0xA9300000
+#define MSM_GPIO2_SIZE SZ_4K
+
+#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
+#define MSM_CLK_CTL_PHYS 0xA8600000
+#define MSM_CLK_CTL_SIZE SZ_4K
+
+#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
+#define MSM_SHARED_RAM_PHYS 0x01F00000
+#define MSM_SHARED_RAM_SIZE SZ_1M
+
+#define MSM_UART1_PHYS 0xA9A00000
+#define MSM_UART1_SIZE SZ_4K
+
+#define MSM_UART2_PHYS 0xA9B00000
+#define MSM_UART2_SIZE SZ_4K
+
+#define MSM_UART3_PHYS 0xA9C00000
+#define MSM_UART3_SIZE SZ_4K
+
+#ifdef CONFIG_MSM_DEBUG_UART
+#define MSM_DEBUG_UART_BASE 0xE1000000
+#if CONFIG_MSM_DEBUG_UART == 1
+#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 2
+#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 3
+#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
+#endif
+#define MSM_DEBUG_UART_SIZE SZ_4K
+#endif
+
+#define MSM_SDC1_PHYS 0xA0400000
+#define MSM_SDC1_SIZE SZ_4K
+
+#define MSM_SDC2_PHYS 0xA0500000
+#define MSM_SDC2_SIZE SZ_4K
+
+#define MSM_SDC3_PHYS 0xA0600000
+#define MSM_SDC3_SIZE SZ_4K
+
+#define MSM_SDC4_PHYS 0xA0700000
+#define MSM_SDC4_SIZE SZ_4K
+
+#define MSM_I2C_PHYS 0xA9900000
+#define MSM_I2C_SIZE SZ_4K
+
+#define MSM_HSUSB_PHYS 0xA0800000
+#define MSM_HSUSB_SIZE SZ_4K
+
+#define MSM_PMDH_PHYS 0xAA600000
+#define MSM_PMDH_SIZE SZ_4K
+
+#define MSM_EMDH_PHYS 0xAA700000
+#define MSM_EMDH_SIZE SZ_4K
+
+#define MSM_MDP_PHYS 0xAA200000
+#define MSM_MDP_SIZE 0x000F0000
+
+#define MSM_MDC_PHYS 0xAA500000
+#define MSM_MDC_SIZE SZ_1M
+
+#define MSM_AD5_PHYS 0xAC000000
+#define MSM_AD5_SIZE (SZ_1M*13)
+
+
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_GCC_BASE IOMEM(0xF8009000)
+#define MSM_GCC_PHYS 0xC0182000
+#define MSM_GCC_SIZE SZ_4K
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
new file mode 100644
index 000000000000..8a00c2defbc1
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * The MSM peripherals are spread all over across 768MB of physical
+ * space, which makes just having a simple IO_ADDRESS macro to slide
+ * them into the right virtual location rough. Instead, we will
+ * provide a master phys->virt mapping for peripherals here.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IOMAP_7X30_H
+#define __ASM_ARCH_MSM_IOMAP_7X30_H
+
+/* Physical base address and size of peripherals.
+ * Ordered by the virtual base addresses they will be mapped at.
+ *
+ * MSM_VIC_BASE must be an value that can be loaded via a "mov"
+ * instruction, otherwise entry-macro.S will not compile.
+ *
+ * If you add or remove entries here, you'll want to edit the
+ * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
+ * changes.
+ *
+ */
+
+#define MSM_VIC_BASE IOMEM(0xE0000000)
+#define MSM_VIC_PHYS 0xC0080000
+#define MSM_VIC_SIZE SZ_4K
+
+#define MSM_CSR_BASE IOMEM(0xE0001000)
+#define MSM_CSR_PHYS 0xC0100000
+#define MSM_CSR_SIZE SZ_4K
+
+#define MSM_TMR_PHYS MSM_CSR_PHYS
+#define MSM_TMR_BASE MSM_CSR_BASE
+#define MSM_TMR_SIZE SZ_4K
+
+#define MSM_GPT_BASE (MSM_TMR_BASE + 0x4)
+#define MSM_DGT_BASE (MSM_TMR_BASE + 0x24)
+
+#define MSM_DMOV_BASE IOMEM(0xE0002000)
+#define MSM_DMOV_PHYS 0xAC400000
+#define MSM_DMOV_SIZE SZ_4K
+
+#define MSM_GPIO1_BASE IOMEM(0xE0003000)
+#define MSM_GPIO1_PHYS 0xAC001000
+#define MSM_GPIO1_SIZE SZ_4K
+
+#define MSM_GPIO2_BASE IOMEM(0xE0004000)
+#define MSM_GPIO2_PHYS 0xAC101000
+#define MSM_GPIO2_SIZE SZ_4K
+
+#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
+#define MSM_CLK_CTL_PHYS 0xAB800000
+#define MSM_CLK_CTL_SIZE SZ_4K
+
+#define MSM_CLK_CTL_SH2_BASE IOMEM(0xE0006000)
+#define MSM_CLK_CTL_SH2_PHYS 0xABA01000
+#define MSM_CLK_CTL_SH2_SIZE SZ_4K
+
+#define MSM_ACC_BASE IOMEM(0xE0007000)
+#define MSM_ACC_PHYS 0xC0101000
+#define MSM_ACC_SIZE SZ_4K
+
+#define MSM_SAW_BASE IOMEM(0xE0008000)
+#define MSM_SAW_PHYS 0xC0102000
+#define MSM_SAW_SIZE SZ_4K
+
+#define MSM_GCC_BASE IOMEM(0xE0009000)
+#define MSM_GCC_PHYS 0xC0182000
+#define MSM_GCC_SIZE SZ_4K
+
+#define MSM_TCSR_BASE IOMEM(0xE000A000)
+#define MSM_TCSR_PHYS 0xAB600000
+#define MSM_TCSR_SIZE SZ_4K
+
+#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
+#define MSM_SHARED_RAM_PHYS 0x00100000
+#define MSM_SHARED_RAM_SIZE SZ_1M
+
+#define MSM_UART1_PHYS 0xACA00000
+#define MSM_UART1_SIZE SZ_4K
+
+#define MSM_UART2_PHYS 0xACB00000
+#define MSM_UART2_SIZE SZ_4K
+
+#define MSM_UART3_PHYS 0xACC00000
+#define MSM_UART3_SIZE SZ_4K
+
+#ifdef CONFIG_MSM_DEBUG_UART
+#define MSM_DEBUG_UART_BASE 0xE1000000
+#if CONFIG_MSM_DEBUG_UART == 1
+#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 2
+#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 3
+#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
+#endif
+#define MSM_DEBUG_UART_SIZE SZ_4K
+#endif
+
+#define MSM_MDC_BASE IOMEM(0xE0200000)
+#define MSM_MDC_PHYS 0xAA500000
+#define MSM_MDC_SIZE SZ_1M
+
+#define MSM_AD5_BASE IOMEM(0xE0300000)
+#define MSM_AD5_PHYS 0xA7000000
+#define MSM_AD5_SIZE (SZ_1M*13)
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
new file mode 100644
index 000000000000..acc819eb76e5
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * The MSM peripherals are spread all over across 768MB of physical
+ * space, which makes just having a simple IO_ADDRESS macro to slide
+ * them into the right virtual location rough. Instead, we will
+ * provide a master phys->virt mapping for peripherals here.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IOMAP_8X50_H
+#define __ASM_ARCH_MSM_IOMAP_8X50_H
+
+/* Physical base address and size of peripherals.
+ * Ordered by the virtual base addresses they will be mapped at.
+ *
+ * MSM_VIC_BASE must be an value that can be loaded via a "mov"
+ * instruction, otherwise entry-macro.S will not compile.
+ *
+ * If you add or remove entries here, you'll want to edit the
+ * msm_io_desc array in arch/arm/mach-msm/io.c to reflect your
+ * changes.
+ *
+ */
+
+#define MSM_VIC_BASE IOMEM(0xE0000000)
+#define MSM_VIC_PHYS 0xAC000000
+#define MSM_VIC_SIZE SZ_4K
+
+#define MSM_CSR_BASE IOMEM(0xE0001000)
+#define MSM_CSR_PHYS 0xAC100000
+#define MSM_CSR_SIZE SZ_4K
+
+#define MSM_TMR_PHYS MSM_CSR_PHYS
+#define MSM_TMR_BASE MSM_CSR_BASE
+#define MSM_TMR_SIZE SZ_4K
+
+#define MSM_GPT_BASE MSM_TMR_BASE
+#define MSM_DGT_BASE (MSM_TMR_BASE + 0x10)
+
+#define MSM_DMOV_BASE IOMEM(0xE0002000)
+#define MSM_DMOV_PHYS 0xA9700000
+#define MSM_DMOV_SIZE SZ_4K
+
+#define MSM_GPIO1_BASE IOMEM(0xE0003000)
+#define MSM_GPIO1_PHYS 0xA9000000
+#define MSM_GPIO1_SIZE SZ_4K
+
+#define MSM_GPIO2_BASE IOMEM(0xE0004000)
+#define MSM_GPIO2_PHYS 0xA9100000
+#define MSM_GPIO2_SIZE SZ_4K
+
+#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
+#define MSM_CLK_CTL_PHYS 0xA8600000
+#define MSM_CLK_CTL_SIZE SZ_4K
+
+#define MSM_SIRC_BASE IOMEM(0xE1006000)
+#define MSM_SIRC_PHYS 0xAC200000
+#define MSM_SIRC_SIZE SZ_4K
+
+#define MSM_SCPLL_BASE IOMEM(0xE1007000)
+#define MSM_SCPLL_PHYS 0xA8800000
+#define MSM_SCPLL_SIZE SZ_4K
+
+#ifdef CONFIG_MSM_SOC_REV_A
+#define MSM_SMI_BASE 0xE0000000
+#else
+#define MSM_SMI_BASE 0x00000000
+#endif
+
+#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
+#define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000)
+#define MSM_SHARED_RAM_SIZE SZ_1M
+
+#define MSM_UART1_PHYS 0xA9A00000
+#define MSM_UART1_SIZE SZ_4K
+
+#define MSM_UART2_PHYS 0xA9B00000
+#define MSM_UART2_SIZE SZ_4K
+
+#define MSM_UART3_PHYS 0xA9C00000
+#define MSM_UART3_SIZE SZ_4K
+
+#ifdef CONFIG_MSM_DEBUG_UART
+#define MSM_DEBUG_UART_BASE 0xE1000000
+#if CONFIG_MSM_DEBUG_UART == 1
+#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 2
+#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 3
+#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
+#endif
+#define MSM_DEBUG_UART_SIZE SZ_4K
+#endif
+
+#define MSM_MDC_BASE IOMEM(0xE0200000)
+#define MSM_MDC_PHYS 0xAA500000
+#define MSM_MDC_SIZE SZ_1M
+
+#define MSM_AD5_BASE IOMEM(0xE0300000)
+#define MSM_AD5_PHYS 0xAC000000
+#define MSM_AD5_SIZE (SZ_1M*13)
+
+
+#define MSM_I2C_SIZE SZ_4K
+#define MSM_I2C_PHYS 0xA9900000
+
+#define MSM_HSUSB_PHYS 0xA0800000
+#define MSM_HSUSB_SIZE SZ_1K
+
+#define MSM_NAND_PHYS 0xA0A00000
+
+
+#define MSM_TSIF_PHYS (0xa0100000)
+#define MSM_TSIF_SIZE (0x200)
+
+#define MSM_TSSC_PHYS 0xAA300000
+
+#define MSM_UART1DM_PHYS 0xA0200000
+#define MSM_UART2DM_PHYS 0xA0900000
+
+
+#define MSM_SDC1_PHYS 0xA0400000
+#define MSM_SDC1_SIZE SZ_4K
+
+#define MSM_SDC2_PHYS 0xA0500000
+#define MSM_SDC2_SIZE SZ_4K
+
+#define MSM_SDC3_PHYS 0xA0600000
+#define MSM_SDC3_SIZE SZ_4K
+
+#define MSM_SDC4_PHYS 0xA0700000
+#define MSM_SDC4_SIZE SZ_4K
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 9dae1a98c77a..e6b1821cc4ea 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -1,6 +1,6 @@
-/* arch/arm/mach-msm/include/mach/msm_iomap.h
- *
+/*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -43,91 +43,12 @@
#define IOMEM(x) ((void __force __iomem *)(x))
#endif
-#define MSM_VIC_BASE IOMEM(0xE0000000)
-#define MSM_VIC_PHYS 0xC0000000
-#define MSM_VIC_SIZE SZ_4K
-
-#define MSM_CSR_BASE IOMEM(0xE0001000)
-#define MSM_CSR_PHYS 0xC0100000
-#define MSM_CSR_SIZE SZ_4K
-
-#define MSM_GPT_PHYS MSM_CSR_PHYS
-#define MSM_GPT_BASE MSM_CSR_BASE
-#define MSM_GPT_SIZE SZ_4K
-
-#define MSM_DMOV_BASE IOMEM(0xE0002000)
-#define MSM_DMOV_PHYS 0xA9700000
-#define MSM_DMOV_SIZE SZ_4K
-
-#define MSM_GPIO1_BASE IOMEM(0xE0003000)
-#define MSM_GPIO1_PHYS 0xA9200000
-#define MSM_GPIO1_SIZE SZ_4K
-
-#define MSM_GPIO2_BASE IOMEM(0xE0004000)
-#define MSM_GPIO2_PHYS 0xA9300000
-#define MSM_GPIO2_SIZE SZ_4K
-
-#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
-#define MSM_CLK_CTL_PHYS 0xA8600000
-#define MSM_CLK_CTL_SIZE SZ_4K
-
-#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
-#define MSM_SHARED_RAM_PHYS 0x01F00000
-#define MSM_SHARED_RAM_SIZE SZ_1M
-
-#define MSM_UART1_PHYS 0xA9A00000
-#define MSM_UART1_SIZE SZ_4K
-
-#define MSM_UART2_PHYS 0xA9B00000
-#define MSM_UART2_SIZE SZ_4K
-
-#define MSM_UART3_PHYS 0xA9C00000
-#define MSM_UART3_SIZE SZ_4K
-
-#ifdef CONFIG_MSM_DEBUG_UART
-#define MSM_DEBUG_UART_BASE 0xE1000000
-#if CONFIG_MSM_DEBUG_UART == 1
-#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 2
-#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
-#elif CONFIG_MSM_DEBUG_UART == 3
-#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
-#endif
-#define MSM_DEBUG_UART_SIZE SZ_4K
+#if defined(CONFIG_ARCH_MSM7X30)
+#include "msm_iomap-7x30.h"
+#elif defined(CONFIG_ARCH_QSD8X50)
+#include "msm_iomap-8x50.h"
+#else
+#include "msm_iomap-7x00.h"
#endif
-#define MSM_SDC1_PHYS 0xA0400000
-#define MSM_SDC1_SIZE SZ_4K
-
-#define MSM_SDC2_PHYS 0xA0500000
-#define MSM_SDC2_SIZE SZ_4K
-
-#define MSM_SDC3_PHYS 0xA0600000
-#define MSM_SDC3_SIZE SZ_4K
-
-#define MSM_SDC4_PHYS 0xA0700000
-#define MSM_SDC4_SIZE SZ_4K
-
-#define MSM_I2C_PHYS 0xA9900000
-#define MSM_I2C_SIZE SZ_4K
-
-#define MSM_HSUSB_PHYS 0xA0800000
-#define MSM_HSUSB_SIZE SZ_4K
-
-#define MSM_PMDH_PHYS 0xAA600000
-#define MSM_PMDH_SIZE SZ_4K
-
-#define MSM_EMDH_PHYS 0xAA700000
-#define MSM_EMDH_SIZE SZ_4K
-
-#define MSM_MDP_PHYS 0xAA200000
-#define MSM_MDP_SIZE 0x000F0000
-
-#define MSM_MDC_PHYS 0xAA500000
-#define MSM_MDC_SIZE SZ_1M
-
-#define MSM_AD5_PHYS 0xAC000000
-#define MSM_AD5_SIZE (SZ_1M*13)
-
-
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
new file mode 100644
index 000000000000..029463ec8756
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -0,0 +1,109 @@
+/* linux/include/asm-arm/arch-msm/msm_smd.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SMD_H
+#define __ASM_ARCH_MSM_SMD_H
+
+typedef struct smd_channel smd_channel_t;
+
+extern int (*msm_check_for_modem_crash)(void);
+
+/* warning: notify() may be called before open returns */
+int smd_open(const char *name, smd_channel_t **ch, void *priv,
+ void (*notify)(void *priv, unsigned event));
+
+#define SMD_EVENT_DATA 1
+#define SMD_EVENT_OPEN 2
+#define SMD_EVENT_CLOSE 3
+
+int smd_close(smd_channel_t *ch);
+
+/* passing a null pointer for data reads and discards */
+int smd_read(smd_channel_t *ch, void *data, int len);
+
+/* Write to stream channels may do a partial write and return
+** the length actually written.
+** Write to packet channels will never do a partial write --
+** it will return the requested length written or an error.
+*/
+int smd_write(smd_channel_t *ch, const void *data, int len);
+int smd_write_atomic(smd_channel_t *ch, const void *data, int len);
+
+int smd_write_avail(smd_channel_t *ch);
+int smd_read_avail(smd_channel_t *ch);
+
+/* Returns the total size of the current packet being read.
+** Returns 0 if no packets available or a stream channel.
+*/
+int smd_cur_packet_size(smd_channel_t *ch);
+
+/* used for tty unthrottling and the like -- causes the notify()
+** callback to be called from the same lock context as is used
+** when it is called from channel updates
+*/
+void smd_kick(smd_channel_t *ch);
+
+
+#if 0
+/* these are interruptable waits which will block you until the specified
+** number of bytes are readable or writable.
+*/
+int smd_wait_until_readable(smd_channel_t *ch, int bytes);
+int smd_wait_until_writable(smd_channel_t *ch, int bytes);
+#endif
+
+typedef enum {
+ SMD_PORT_DS = 0,
+ SMD_PORT_DIAG,
+ SMD_PORT_RPC_CALL,
+ SMD_PORT_RPC_REPLY,
+ SMD_PORT_BT,
+ SMD_PORT_CONTROL,
+ SMD_PORT_MEMCPY_SPARE1,
+ SMD_PORT_DATA1,
+ SMD_PORT_DATA2,
+ SMD_PORT_DATA3,
+ SMD_PORT_DATA4,
+ SMD_PORT_DATA5,
+ SMD_PORT_DATA6,
+ SMD_PORT_DATA7,
+ SMD_PORT_DATA8,
+ SMD_PORT_DATA9,
+ SMD_PORT_DATA10,
+ SMD_PORT_DATA11,
+ SMD_PORT_DATA12,
+ SMD_PORT_DATA13,
+ SMD_PORT_DATA14,
+ SMD_PORT_DATA15,
+ SMD_PORT_DATA16,
+ SMD_PORT_DATA17,
+ SMD_PORT_DATA18,
+ SMD_PORT_DATA19,
+ SMD_PORT_DATA20,
+ SMD_PORT_GPS_NMEA,
+ SMD_PORT_BRIDGE_1,
+ SMD_PORT_BRIDGE_2,
+ SMD_PORT_BRIDGE_3,
+ SMD_PORT_BRIDGE_4,
+ SMD_PORT_BRIDGE_5,
+ SMD_PORT_LOOPBACK,
+ SMD_PORT_CS_APPS_MODEM,
+ SMD_PORT_CS_APPS_DSP,
+ SMD_PORT_CS_MODEM_DSP,
+ SMD_NUM_PORTS,
+} smd_port_id_type;
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/sirc.h b/arch/arm/mach-msm/include/mach/sirc.h
new file mode 100644
index 000000000000..7281337ee28d
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/sirc.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SIRC_H
+#define __ASM_ARCH_MSM_SIRC_H
+
+struct sirc_regs_t {
+ void *int_enable;
+ void *int_enable_clear;
+ void *int_enable_set;
+ void *int_type;
+ void *int_polarity;
+ void *int_clear;
+};
+
+struct sirc_cascade_regs {
+ void *int_status;
+ unsigned int cascade_irq;
+};
+
+void msm_init_sirc(void);
+void msm_sirc_enter_sleep(void);
+void msm_sirc_exit_sleep(void);
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+
+#include <mach/msm_iomap.h>
+
+/*
+ * Secondary interrupt controller interrupts
+ */
+
+#define FIRST_SIRC_IRQ (NR_MSM_IRQS + NR_GPIO_IRQS)
+
+#define INT_UART1 (FIRST_SIRC_IRQ + 0)
+#define INT_UART2 (FIRST_SIRC_IRQ + 1)
+#define INT_UART3 (FIRST_SIRC_IRQ + 2)
+#define INT_UART1_RX (FIRST_SIRC_IRQ + 3)
+#define INT_UART2_RX (FIRST_SIRC_IRQ + 4)
+#define INT_UART3_RX (FIRST_SIRC_IRQ + 5)
+#define INT_SPI_INPUT (FIRST_SIRC_IRQ + 6)
+#define INT_SPI_OUTPUT (FIRST_SIRC_IRQ + 7)
+#define INT_SPI_ERROR (FIRST_SIRC_IRQ + 8)
+#define INT_GPIO_GROUP1 (FIRST_SIRC_IRQ + 9)
+#define INT_GPIO_GROUP2 (FIRST_SIRC_IRQ + 10)
+#define INT_GPIO_GROUP1_SECURE (FIRST_SIRC_IRQ + 11)
+#define INT_GPIO_GROUP2_SECURE (FIRST_SIRC_IRQ + 12)
+#define INT_AVS_SVIC (FIRST_SIRC_IRQ + 13)
+#define INT_AVS_REQ_UP (FIRST_SIRC_IRQ + 14)
+#define INT_AVS_REQ_DOWN (FIRST_SIRC_IRQ + 15)
+#define INT_PBUS_ERR (FIRST_SIRC_IRQ + 16)
+#define INT_AXI_ERR (FIRST_SIRC_IRQ + 17)
+#define INT_SMI_ERR (FIRST_SIRC_IRQ + 18)
+#define INT_EBI1_ERR (FIRST_SIRC_IRQ + 19)
+#define INT_IMEM_ERR (FIRST_SIRC_IRQ + 20)
+#define INT_TEMP_SENSOR (FIRST_SIRC_IRQ + 21)
+#define INT_TV_ENC (FIRST_SIRC_IRQ + 22)
+#define INT_GRP2D (FIRST_SIRC_IRQ + 23)
+#define INT_GSBI_QUP (FIRST_SIRC_IRQ + 24)
+#define INT_SC_ACG (FIRST_SIRC_IRQ + 25)
+#define INT_WDT0 (FIRST_SIRC_IRQ + 26)
+#define INT_WDT1 (FIRST_SIRC_IRQ + 27)
+
+#if defined(CONFIG_MSM_SOC_REV_A)
+#define NR_SIRC_IRQS 28
+#define SIRC_MASK 0x0FFFFFFF
+#else
+#define NR_SIRC_IRQS 23
+#define SIRC_MASK 0x007FFFFF
+#endif
+
+#define LAST_SIRC_IRQ (FIRST_SIRC_IRQ + NR_SIRC_IRQS - 1)
+
+#define SPSS_SIRC_INT_SELECT (MSM_SIRC_BASE + 0x00)
+#define SPSS_SIRC_INT_ENABLE (MSM_SIRC_BASE + 0x04)
+#define SPSS_SIRC_INT_ENABLE_CLEAR (MSM_SIRC_BASE + 0x08)
+#define SPSS_SIRC_INT_ENABLE_SET (MSM_SIRC_BASE + 0x0C)
+#define SPSS_SIRC_INT_TYPE (MSM_SIRC_BASE + 0x10)
+#define SPSS_SIRC_INT_POLARITY (MSM_SIRC_BASE + 0x14)
+#define SPSS_SIRC_SECURITY (MSM_SIRC_BASE + 0x18)
+#define SPSS_SIRC_IRQ_STATUS (MSM_SIRC_BASE + 0x1C)
+#define SPSS_SIRC_IRQ1_STATUS (MSM_SIRC_BASE + 0x20)
+#define SPSS_SIRC_RAW_STATUS (MSM_SIRC_BASE + 0x24)
+#define SPSS_SIRC_INT_CLEAR (MSM_SIRC_BASE + 0x28)
+#define SPSS_SIRC_SOFT_INT (MSM_SIRC_BASE + 0x2C)
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/system.h b/arch/arm/mach-msm/include/mach/system.h
index 574ccc493daf..d2e83f42ba16 100644
--- a/arch/arm/mach-msm/include/mach/system.h
+++ b/arch/arm/mach-msm/include/mach/system.h
@@ -21,3 +21,8 @@ static inline void arch_reset(char mode, const char *cmd)
{
for (;;) ; /* depends on IPC w/ other core */
}
+
+/* low level hardware reset hook -- for example, hitting the
+ * PSHOLD line on the PMIC to hard reset the system
+ */
+extern void (*msm_hw_reset_hook)(void);
diff --git a/arch/arm/mach-msm/include/mach/vreg.h b/arch/arm/mach-msm/include/mach/vreg.h
index 9f9e25cb718e..6626e7864e28 100644
--- a/arch/arm/mach-msm/include/mach/vreg.h
+++ b/arch/arm/mach-msm/include/mach/vreg.h
@@ -23,7 +23,7 @@ struct vreg *vreg_get(struct device *dev, const char *id);
void vreg_put(struct vreg *vreg);
int vreg_enable(struct vreg *vreg);
-void vreg_disable(struct vreg *vreg);
+int vreg_disable(struct vreg *vreg);
int vreg_set_level(struct vreg *vreg, unsigned mv);
#endif
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 05f96b780aa6..1c05060b5f3b 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -1,8 +1,9 @@
/* arch/arm/mach-msm/io.c
*
- * MSM7K io support
+ * MSM7K, QSD io support
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -34,6 +35,8 @@
.type = MT_DEVICE_NONSHARED, \
}
+#if defined(CONFIG_ARCH_MSM7X00A) || defined(CONFIG_ARCH_MSM7X27) \
+ || defined(CONFIG_ARCH_MSM7X25)
static struct map_desc msm_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_DEVICE(CSR),
@@ -45,9 +48,12 @@ static struct map_desc msm_io_desc[] __initdata = {
#ifdef CONFIG_MSM_DEBUG_UART
MSM_DEVICE(DEBUG_UART),
#endif
+#ifdef CONFIG_ARCH_MSM7X30
+ MSM_DEVICE(GCC),
+#endif
{
.virtual = (unsigned long) MSM_SHARED_RAM_BASE,
- .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
+ .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
.length = MSM_SHARED_RAM_SIZE,
.type = MT_DEVICE,
},
@@ -60,9 +66,72 @@ void __init msm_map_common_io(void)
* pages are peripheral interface or not.
*/
asm("mcr p15, 0, %0, c15, c2, 4" : : "r" (0));
-
iotable_init(msm_io_desc, ARRAY_SIZE(msm_io_desc));
}
+#endif
+
+#ifdef CONFIG_ARCH_QSD8X50
+static struct map_desc qsd8x50_io_desc[] __initdata = {
+ MSM_DEVICE(VIC),
+ MSM_DEVICE(CSR),
+ MSM_DEVICE(TMR),
+ MSM_DEVICE(DMOV),
+ MSM_DEVICE(GPIO1),
+ MSM_DEVICE(GPIO2),
+ MSM_DEVICE(CLK_CTL),
+ MSM_DEVICE(SIRC),
+ MSM_DEVICE(SCPLL),
+ MSM_DEVICE(AD5),
+ MSM_DEVICE(MDC),
+#ifdef CONFIG_MSM_DEBUG_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
+ {
+ .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
+ .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
+ .length = MSM_SHARED_RAM_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init msm_map_qsd8x50_io(void)
+{
+ iotable_init(qsd8x50_io_desc, ARRAY_SIZE(qsd8x50_io_desc));
+}
+#endif /* CONFIG_ARCH_QSD8X50 */
+
+#ifdef CONFIG_ARCH_MSM7X30
+static struct map_desc msm7x30_io_desc[] __initdata = {
+ MSM_DEVICE(VIC),
+ MSM_DEVICE(CSR),
+ MSM_DEVICE(TMR),
+ MSM_DEVICE(DMOV),
+ MSM_DEVICE(GPIO1),
+ MSM_DEVICE(GPIO2),
+ MSM_DEVICE(CLK_CTL),
+ MSM_DEVICE(CLK_CTL_SH2),
+ MSM_DEVICE(AD5),
+ MSM_DEVICE(MDC),
+ MSM_DEVICE(ACC),
+ MSM_DEVICE(SAW),
+ MSM_DEVICE(GCC),
+ MSM_DEVICE(TCSR),
+#ifdef CONFIG_MSM_DEBUG_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
+ {
+ .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
+ .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
+ .length = MSM_SHARED_RAM_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init msm_map_msm7x30_io(void)
+{
+ iotable_init(msm7x30_io_desc, ARRAY_SIZE(msm7x30_io_desc));
+}
+#endif /* CONFIG_ARCH_MSM7X30 */
void __iomem *
__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
diff --git a/arch/arm/mach-msm/irq-vic.c b/arch/arm/mach-msm/irq-vic.c
new file mode 100644
index 000000000000..99f2c3473033
--- /dev/null
+++ b/arch/arm/mach-msm/irq-vic.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+
+#include <mach/hardware.h>
+
+#include <mach/msm_iomap.h>
+
+#include "smd_private.h"
+
+enum {
+ IRQ_DEBUG_SLEEP_INT_TRIGGER = 1U << 0,
+ IRQ_DEBUG_SLEEP_INT = 1U << 1,
+ IRQ_DEBUG_SLEEP_ABORT = 1U << 2,
+ IRQ_DEBUG_SLEEP = 1U << 3,
+ IRQ_DEBUG_SLEEP_REQUEST = 1U << 4,
+};
+static int msm_irq_debug_mask;
+module_param_named(debug_mask, msm_irq_debug_mask, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define VIC_REG(off) (MSM_VIC_BASE + (off))
+#define VIC_INT_TO_REG_ADDR(base, irq) (base + (irq / 32) * 4)
+#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 3)
+
+#define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */
+#define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */
+#define VIC_INT_SELECT2 VIC_REG(0x0008) /* 1: FIQ, 0: IRQ */
+#define VIC_INT_SELECT3 VIC_REG(0x000C) /* 1: FIQ, 0: IRQ */
+#define VIC_INT_EN0 VIC_REG(0x0010)
+#define VIC_INT_EN1 VIC_REG(0x0014)
+#define VIC_INT_EN2 VIC_REG(0x0018)
+#define VIC_INT_EN3 VIC_REG(0x001C)
+#define VIC_INT_ENCLEAR0 VIC_REG(0x0020)
+#define VIC_INT_ENCLEAR1 VIC_REG(0x0024)
+#define VIC_INT_ENCLEAR2 VIC_REG(0x0028)
+#define VIC_INT_ENCLEAR3 VIC_REG(0x002C)
+#define VIC_INT_ENSET0 VIC_REG(0x0030)
+#define VIC_INT_ENSET1 VIC_REG(0x0034)
+#define VIC_INT_ENSET2 VIC_REG(0x0038)
+#define VIC_INT_ENSET3 VIC_REG(0x003C)
+#define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */
+#define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */
+#define VIC_INT_TYPE2 VIC_REG(0x0048) /* 1: EDGE, 0: LEVEL */
+#define VIC_INT_TYPE3 VIC_REG(0x004C) /* 1: EDGE, 0: LEVEL */
+#define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */
+#define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */
+#define VIC_INT_POLARITY2 VIC_REG(0x0058) /* 1: NEG, 0: POS */
+#define VIC_INT_POLARITY3 VIC_REG(0x005C) /* 1: NEG, 0: POS */
+#define VIC_NO_PEND_VAL VIC_REG(0x0060)
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+#define VIC_NO_PEND_VAL_FIQ VIC_REG(0x0064)
+#define VIC_INT_MASTEREN VIC_REG(0x0068) /* 1: IRQ, 2: FIQ */
+#define VIC_CONFIG VIC_REG(0x006C) /* 1: USE SC VIC */
+#else
+#define VIC_INT_MASTEREN VIC_REG(0x0064) /* 1: IRQ, 2: FIQ */
+#define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */
+#define VIC_CONFIG VIC_REG(0x0068) /* 1: USE ARM1136 VIC */
+#endif
+
+#define VIC_IRQ_STATUS0 VIC_REG(0x0080)
+#define VIC_IRQ_STATUS1 VIC_REG(0x0084)
+#define VIC_IRQ_STATUS2 VIC_REG(0x0088)
+#define VIC_IRQ_STATUS3 VIC_REG(0x008C)
+#define VIC_FIQ_STATUS0 VIC_REG(0x0090)
+#define VIC_FIQ_STATUS1 VIC_REG(0x0094)
+#define VIC_FIQ_STATUS2 VIC_REG(0x0098)
+#define VIC_FIQ_STATUS3 VIC_REG(0x009C)
+#define VIC_RAW_STATUS0 VIC_REG(0x00A0)
+#define VIC_RAW_STATUS1 VIC_REG(0x00A4)
+#define VIC_RAW_STATUS2 VIC_REG(0x00A8)
+#define VIC_RAW_STATUS3 VIC_REG(0x00AC)
+#define VIC_INT_CLEAR0 VIC_REG(0x00B0)
+#define VIC_INT_CLEAR1 VIC_REG(0x00B4)
+#define VIC_INT_CLEAR2 VIC_REG(0x00B8)
+#define VIC_INT_CLEAR3 VIC_REG(0x00BC)
+#define VIC_SOFTINT0 VIC_REG(0x00C0)
+#define VIC_SOFTINT1 VIC_REG(0x00C4)
+#define VIC_SOFTINT2 VIC_REG(0x00C8)
+#define VIC_SOFTINT3 VIC_REG(0x00CC)
+#define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */
+#define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */
+#define VIC_IRQ_VEC_WR VIC_REG(0x00D8)
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+#define VIC_FIQ_VEC_RD VIC_REG(0x00DC)
+#define VIC_FIQ_VEC_PEND_RD VIC_REG(0x00E0)
+#define VIC_FIQ_VEC_WR VIC_REG(0x00E4)
+#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E8)
+#define VIC_IRQ_IN_STACK VIC_REG(0x00EC)
+#define VIC_FIQ_IN_SERVICE VIC_REG(0x00F0)
+#define VIC_FIQ_IN_STACK VIC_REG(0x00F4)
+#define VIC_TEST_BUS_SEL VIC_REG(0x00F8)
+#define VIC_IRQ_CTRL_CONFIG VIC_REG(0x00FC)
+#else
+#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E0)
+#define VIC_IRQ_IN_STACK VIC_REG(0x00E4)
+#define VIC_TEST_BUS_SEL VIC_REG(0x00E8)
+#endif
+
+#define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4))
+#define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4))
+
+#if defined(CONFIG_ARCH_MSM7X30)
+#define VIC_NUM_REGS 4
+#else
+#define VIC_NUM_REGS 2
+#endif
+
+#if VIC_NUM_REGS == 2
+#define DPRINT_REGS(base_reg, format, ...) \
+ printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
+ readl(base_reg ## 0), readl(base_reg ## 1))
+#define DPRINT_ARRAY(array, format, ...) \
+ printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
+ array[0], array[1])
+#elif VIC_NUM_REGS == 4
+#define DPRINT_REGS(base_reg, format, ...) \
+ printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
+ readl(base_reg ## 0), readl(base_reg ## 1), \
+ readl(base_reg ## 2), readl(base_reg ## 3))
+#define DPRINT_ARRAY(array, format, ...) \
+ printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
+ array[0], array[1], \
+ array[2], array[3])
+#else
+#error "VIC_NUM_REGS set to illegal value"
+#endif
+
+static uint32_t msm_irq_smsm_wake_enable[2];
+static struct {
+ uint32_t int_en[2];
+ uint32_t int_type;
+ uint32_t int_polarity;
+ uint32_t int_select;
+} msm_irq_shadow_reg[VIC_NUM_REGS];
+static uint32_t msm_irq_idle_disable[VIC_NUM_REGS];
+
+#define SMSM_FAKE_IRQ (0xff)
+static uint8_t msm_irq_to_smsm[NR_IRQS] = {
+ [INT_MDDI_EXT] = 1,
+ [INT_MDDI_PRI] = 2,
+ [INT_MDDI_CLIENT] = 3,
+ [INT_USB_OTG] = 4,
+
+ [INT_PWB_I2C] = 5,
+ [INT_SDC1_0] = 6,
+ [INT_SDC1_1] = 7,
+ [INT_SDC2_0] = 8,
+
+ [INT_SDC2_1] = 9,
+ [INT_ADSP_A9_A11] = 10,
+ [INT_UART1] = 11,
+ [INT_UART2] = 12,
+
+ [INT_UART3] = 13,
+ [INT_UART1_RX] = 14,
+ [INT_UART2_RX] = 15,
+ [INT_UART3_RX] = 16,
+
+ [INT_UART1DM_IRQ] = 17,
+ [INT_UART1DM_RX] = 18,
+ [INT_KEYSENSE] = 19,
+#if !defined(CONFIG_ARCH_MSM7X30)
+ [INT_AD_HSSD] = 20,
+#endif
+
+ [INT_NAND_WR_ER_DONE] = 21,
+ [INT_NAND_OP_DONE] = 22,
+ [INT_TCHSCRN1] = 23,
+ [INT_TCHSCRN2] = 24,
+
+ [INT_TCHSCRN_SSBI] = 25,
+ [INT_USB_HS] = 26,
+ [INT_UART2DM_RX] = 27,
+ [INT_UART2DM_IRQ] = 28,
+
+ [INT_SDC4_1] = 29,
+ [INT_SDC4_0] = 30,
+ [INT_SDC3_1] = 31,
+ [INT_SDC3_0] = 32,
+
+ /* fake wakeup interrupts */
+ [INT_GPIO_GROUP1] = SMSM_FAKE_IRQ,
+ [INT_GPIO_GROUP2] = SMSM_FAKE_IRQ,
+ [INT_A9_M2A_0] = SMSM_FAKE_IRQ,
+ [INT_A9_M2A_1] = SMSM_FAKE_IRQ,
+ [INT_A9_M2A_5] = SMSM_FAKE_IRQ,
+ [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ,
+ [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ,
+ [INT_ADSP_A11] = SMSM_FAKE_IRQ,
+#ifdef CONFIG_ARCH_QSD8X50
+ [INT_SIRC_0] = SMSM_FAKE_IRQ,
+ [INT_SIRC_1] = SMSM_FAKE_IRQ,
+#endif
+};
+
+static inline void msm_irq_write_all_regs(void __iomem *base, unsigned int val)
+{
+ int i;
+
+ for (i = 0; i < VIC_NUM_REGS; i++)
+ writel(val, base + (i * 4));
+}
+
+static void msm_irq_ack(unsigned int irq)
+{
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, irq);
+ irq = 1 << (irq & 31);
+ writel(irq, reg);
+}
+
+static void msm_irq_mask(unsigned int irq)
+{
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ int smsm_irq = msm_irq_to_smsm[irq];
+
+ msm_irq_shadow_reg[index].int_en[0] &= ~mask;
+ writel(mask, reg);
+ if (smsm_irq == 0)
+ msm_irq_idle_disable[index] &= ~mask;
+ else {
+ mask = 1UL << (smsm_irq - 1);
+ msm_irq_smsm_wake_enable[0] &= ~mask;
+ }
+}
+
+static void msm_irq_unmask(unsigned int irq)
+{
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ int smsm_irq = msm_irq_to_smsm[irq];
+
+ msm_irq_shadow_reg[index].int_en[0] |= mask;
+ writel(mask, reg);
+
+ if (smsm_irq == 0)
+ msm_irq_idle_disable[index] |= mask;
+ else {
+ mask = 1UL << (smsm_irq - 1);
+ msm_irq_smsm_wake_enable[0] |= mask;
+ }
+}
+
+static int msm_irq_set_wake(unsigned int irq, unsigned int on)
+{
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ int smsm_irq = msm_irq_to_smsm[irq];
+
+ if (smsm_irq == 0) {
+ printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", irq);
+ return -EINVAL;
+ }
+ if (on)
+ msm_irq_shadow_reg[index].int_en[1] |= mask;
+ else
+ msm_irq_shadow_reg[index].int_en[1] &= ~mask;
+
+ if (smsm_irq == SMSM_FAKE_IRQ)
+ return 0;
+
+ mask = 1UL << (smsm_irq - 1);
+ if (on)
+ msm_irq_smsm_wake_enable[1] |= mask;
+ else
+ msm_irq_smsm_wake_enable[1] &= ~mask;
+ return 0;
+}
+
+static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, irq);
+ void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ int b = 1 << (irq & 31);
+ uint32_t polarity;
+ uint32_t type;
+
+ polarity = msm_irq_shadow_reg[index].int_polarity;
+ if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+ polarity |= b;
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
+ polarity &= ~b;
+ writel(polarity, preg);
+ msm_irq_shadow_reg[index].int_polarity = polarity;
+
+ type = msm_irq_shadow_reg[index].int_type;
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ type |= b;
+ irq_desc[irq].handle_irq = handle_edge_irq;
+ }
+ if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
+ type &= ~b;
+ irq_desc[irq].handle_irq = handle_level_irq;
+ }
+ writel(type, treg);
+ msm_irq_shadow_reg[index].int_type = type;
+ return 0;
+}
+
+static struct irq_chip msm_irq_chip = {
+ .name = "msm",
+ .disable = msm_irq_mask,
+ .ack = msm_irq_ack,
+ .mask = msm_irq_mask,
+ .unmask = msm_irq_unmask,
+ .set_wake = msm_irq_set_wake,
+ .set_type = msm_irq_set_type,
+};
+
+void __init msm_init_irq(void)
+{
+ unsigned n;
+
+ /* select level interrupts */
+ msm_irq_write_all_regs(VIC_INT_TYPE0, 0);
+
+ /* select highlevel interrupts */
+ msm_irq_write_all_regs(VIC_INT_POLARITY0, 0);
+
+ /* select IRQ for all INTs */
+ msm_irq_write_all_regs(VIC_INT_SELECT0, 0);
+
+ /* disable all INTs */
+ msm_irq_write_all_regs(VIC_INT_EN0, 0);
+
+ /* don't use vic */
+ writel(0, VIC_CONFIG);
+
+ /* enable interrupt controller */
+ writel(3, VIC_INT_MASTEREN);
+
+ for (n = 0; n < NR_MSM_IRQS; n++) {
+ set_irq_chip(n, &msm_irq_chip);
+ set_irq_handler(n, handle_level_irq);
+ set_irq_flags(n, IRQF_VALID);
+ }
+}
diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c
index 69ca0dd79bdf..6c8d5f8caef3 100644
--- a/arch/arm/mach-msm/irq.c
+++ b/arch/arm/mach-msm/irq.c
@@ -101,11 +101,11 @@ static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
writel(readl(treg) | b, treg);
- set_irq_handler(irq, handle_edge_irq);
+ irq_desc[irq].handle_irq = handle_edge_irq;
}
if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
writel(readl(treg) & (~b), treg);
- set_irq_handler(irq, handle_level_irq);
+ irq_desc[irq].handle_irq = handle_level_irq;
}
return 0;
}
diff --git a/arch/arm/mach-msm/last_radio_log.c b/arch/arm/mach-msm/last_radio_log.c
new file mode 100644
index 000000000000..b64ba5a98686
--- /dev/null
+++ b/arch/arm/mach-msm/last_radio_log.c
@@ -0,0 +1,82 @@
+/* arch/arm/mach-msm/last_radio_log.c
+ *
+ * Extract the log from a modem crash though SMEM
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+
+#include "smd_private.h"
+
+static void *radio_log_base;
+static size_t radio_log_size;
+
+extern void *smem_item(unsigned id, unsigned *size);
+
+static ssize_t last_radio_log_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ loff_t pos = *offset;
+ ssize_t count;
+
+ if (pos >= radio_log_size)
+ return 0;
+
+ count = min(len, (size_t)(radio_log_size - pos));
+ if (copy_to_user(buf, radio_log_base + pos, count)) {
+ pr_err("%s: copy to user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ *offset += count;
+ return count;
+}
+
+static struct file_operations last_radio_log_fops = {
+ .read = last_radio_log_read
+};
+
+void msm_init_last_radio_log(struct module *owner)
+{
+ struct proc_dir_entry *entry;
+
+ if (last_radio_log_fops.owner) {
+ pr_err("%s: already claimed\n", __func__);
+ return;
+ }
+
+ radio_log_base = smem_item(SMEM_CLKREGIM_BSP, &radio_log_size);
+ if (!radio_log_base) {
+ pr_err("%s: could not retrieve SMEM_CLKREGIM_BSP\n", __func__);
+ return;
+ }
+
+ entry = create_proc_entry("last_radio_log", S_IFREG | S_IRUGO, NULL);
+ if (!entry) {
+ pr_err("%s: could not create proc entry for radio log\n",
+ __func__);
+ return;
+ }
+
+ pr_err("%s: last radio log is %d bytes long\n", __func__,
+ radio_log_size);
+ last_radio_log_fops.owner = owner;
+ entry->proc_fops = &last_radio_log_fops;
+ entry->size = radio_log_size;
+}
+EXPORT_SYMBOL(msm_init_last_radio_log);
diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c
index 915ee704ed3c..67e701c7f183 100644
--- a/arch/arm/mach-msm/proc_comm.c
+++ b/arch/arm/mach-msm/proc_comm.c
@@ -23,11 +23,18 @@
#include "proc_comm.h"
-#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4)
+static inline void msm_a2m_int(uint32_t irq)
+{
+#if defined(CONFIG_ARCH_MSM7X30)
+ writel(1 << irq, MSM_GCC_BASE + 0x8);
+#else
+ writel(1, MSM_CSR_BASE + 0x400 + (irq * 4));
+#endif
+}
static inline void notify_other_proc_comm(void)
{
- writel(1, MSM_A2M_INT(6));
+ msm_a2m_int(6);
}
#define APP_COMMAND 0x00
@@ -107,4 +114,17 @@ int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2)
return ret;
}
-
+/*
+ * We need to wait for the ARM9 to at least partially boot
+ * up before we can continue. Since the ARM9 does resource
+ * allocation, if we dont' wait we could end up crashing or in
+ * and unknown state. This function should be called early to
+ * wait on the ARM9.
+ */
+void __init proc_comm_boot_wait(void)
+{
+ void __iomem *base = MSM_SHARED_RAM_BASE;
+
+ proc_comm_wait_for(base + MDM_STATUS, PCOM_READY);
+
+}
diff --git a/arch/arm/mach-msm/proc_comm.h b/arch/arm/mach-msm/proc_comm.h
index 834760f25692..12da4cacd4a8 100644
--- a/arch/arm/mach-msm/proc_comm.h
+++ b/arch/arm/mach-msm/proc_comm.h
@@ -16,6 +16,8 @@
#ifndef _ARCH_ARM_MACH_MSM_PROC_COMM_H_
#define _ARCH_ARM_MACH_MSM_PROC_COMM_H_
+#include <linux/init.h>
+
enum {
PCOM_CMD_IDLE = 0x0,
PCOM_CMD_DONE,
@@ -62,19 +64,104 @@ enum {
PCOM_RESET_CHIP_IMM,
PCOM_PM_VID_EN,
PCOM_VREG_PULLDOWN,
+ PCOM_GET_MODEM_VERSION,
+ PCOM_CLK_REGIME_SEC_RESET,
+ PCOM_CLK_REGIME_SEC_RESET_ASSERT,
+ PCOM_CLK_REGIME_SEC_RESET_DEASSERT,
+ PCOM_CLK_REGIME_SEC_PLL_REQUEST_WRP,
+ PCOM_CLK_REGIME_SEC_ENABLE,
+ PCOM_CLK_REGIME_SEC_DISABLE,
+ PCOM_CLK_REGIME_SEC_IS_ON,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_INV,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_SRC,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_DIV,
+ PCOM_CLK_REGIME_SEC_ICODEC_CLK_ENABLE,
+ PCOM_CLK_REGIME_SEC_ICODEC_CLK_DISABLE,
+ PCOM_CLK_REGIME_SEC_SEL_SPEED,
+ PCOM_CLK_REGIME_SEC_CONFIG_GP_CLK_WRP,
+ PCOM_CLK_REGIME_SEC_CONFIG_MDH_CLK_WRP,
+ PCOM_CLK_REGIME_SEC_USB_XTAL_ON,
+ PCOM_CLK_REGIME_SEC_USB_XTAL_OFF,
+ PCOM_CLK_REGIME_SEC_SET_QDSP_DME_MODE,
+ PCOM_CLK_REGIME_SEC_SWITCH_ADSP_CLK,
+ PCOM_CLK_REGIME_SEC_GET_MAX_ADSP_CLK_KHZ,
+ PCOM_CLK_REGIME_SEC_GET_I2C_CLK_KHZ,
+ PCOM_CLK_REGIME_SEC_MSM_GET_CLK_FREQ_KHZ,
+ PCOM_CLK_REGIME_SEC_SEL_VFE_SRC,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_CAMCLK,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_LCDCLK,
+ PCOM_CLK_REGIME_SEC_VFE_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_VFE_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_GRP_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_GRP_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_VDC_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_VDC_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_LCD_CTRL,
+ PCOM_CLK_REGIME_SEC_REGISTER_FOR_CPU_RESOURCE,
+ PCOM_CLK_REGIME_SEC_DEREGISTER_FOR_CPU_RESOURCE,
+ PCOM_CLK_REGIME_SEC_RESOURCE_REQUEST_WRP,
+ PCOM_CLK_REGIME_MSM_SEC_SEL_CLK_OWNER,
+ PCOM_CLK_REGIME_SEC_DEVMAN_REQUEST_WRP,
+ PCOM_GPIO_CONFIG,
+ PCOM_GPIO_CONFIGURE_GROUP,
+ PCOM_GPIO_TLMM_SET_PORT,
+ PCOM_GPIO_TLMM_CONFIG_EX,
+ PCOM_SET_FTM_BOOT_COUNT,
+ PCOM_RESERVED0,
+ PCOM_RESERVED1,
+ PCOM_CUSTOMER_CMD1,
+ PCOM_CUSTOMER_CMD2,
+ PCOM_CUSTOMER_CMD3,
+ PCOM_CLK_REGIME_ENTER_APPSBL_CHG_MODE,
+ PCOM_CLK_REGIME_EXIT_APPSBL_CHG_MODE,
+ PCOM_CLK_REGIME_SEC_RAIL_DISABLE,
+ PCOM_CLK_REGIME_SEC_RAIL_ENABLE,
+ PCOM_CLK_REGIME_SEC_RAIL_CONTROL,
+ PCOM_SET_SW_WATCHDOG_STATE,
+ PCOM_PM_MPP_CONFIG_DIGITAL_INPUT,
+ PCOM_PM_MPP_CONFIG_I_SINK,
+ PCOM_RESERVED_101,
+ PCOM_MSM_HSUSB_PHY_RESET,
+ PCOM_GET_BATT_MV_LEVEL,
+ PCOM_CHG_USB_IS_PC_CONNECTED,
+ PCOM_CHG_USB_IS_CHARGER_CONNECTED,
+ PCOM_CHG_USB_IS_DISCONNECTED,
+ PCOM_CHG_USB_IS_AVAILABLE,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_FREQ,
+ PCOM_CLK_REGIME_SEC_SET_PCLK_AXI_POLICY,
+ PCOM_CLKCTL_RPC_RESET_ASSERT,
+ PCOM_CLKCTL_RPC_RESET_DEASSERT,
+ PCOM_CLKCTL_RPC_RAIL_ON,
+ PCOM_CLKCTL_RPC_RAIL_OFF,
+ PCOM_CLKCTL_RPC_RAIL_ENABLE,
+ PCOM_CLKCTL_RPC_RAIL_DISABLE,
+ PCOM_CLKCTL_RPC_RAIL_CONTROL,
+ PCOM_CLKCTL_RPC_MIN_MSMC1,
PCOM_NUM_CMDS,
};
enum {
- PCOM_INVALID_STATUS = 0x0,
- PCOM_READY,
- PCOM_CMD_RUNNING,
- PCOM_CMD_SUCCESS,
- PCOM_CMD_FAIL,
+ PCOM_INVALID_STATUS = 0x0,
+ PCOM_READY,
+ PCOM_CMD_RUNNING,
+ PCOM_CMD_SUCCESS,
+ PCOM_CMD_FAIL,
+ PCOM_CMD_FAIL_FALSE_RETURNED,
+ PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_SERVER,
+ PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_CLIENT,
+ PCOM_CMD_FAIL_CMD_UNREGISTERED,
+ PCOM_CMD_FAIL_CMD_LOCKED,
+ PCOM_CMD_FAIL_SERVER_NOT_YET_READY,
+ PCOM_CMD_FAIL_BAD_DESTINATION,
+ PCOM_CMD_FAIL_SERVER_RESET,
+ PCOM_CMD_FAIL_SMSM_NOT_INIT,
+ PCOM_CMD_FAIL_PROC_COMM_BUSY,
+ PCOM_CMD_FAIL_PROC_COMM_NOT_INIT,
+
};
/* List of VREGs that support the Pull Down Resistor setting. */
-enum {
+enum vreg_pdown_id {
PM_VREG_PDOWN_MSMA_ID,
PM_VREG_PDOWN_MSMP_ID,
PM_VREG_PDOWN_MSME1_ID, /* Not supported in Panoramix */
@@ -131,6 +218,11 @@ enum {
PM_VREG_PDOWN_XO_ID = PM_VREG_PDOWN_TCXO_ID
};
+enum {
+ PCOM_CLKRGM_APPS_RESET_USB_PHY = 34,
+ PCOM_CLKRGM_APPS_RESET_USBH = 37,
+};
+
/* gpio info for PCOM_RPC_GPIO_TLMM_CONFIG_EX */
#define GPIO_ENABLE 0
@@ -161,5 +253,6 @@ enum {
(((drvstr) & 0xF) << 17))
int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2);
+void __init proc_comm_boot_wait(void);
#endif
diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c
new file mode 100644
index 000000000000..b0794524ba6e
--- /dev/null
+++ b/arch/arm/mach-msm/sirc.c
@@ -0,0 +1,177 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+
+static unsigned int int_enable;
+static unsigned int wake_enable;
+
+static struct sirc_regs_t sirc_regs = {
+ .int_enable = SPSS_SIRC_INT_ENABLE,
+ .int_enable_clear = SPSS_SIRC_INT_ENABLE_CLEAR,
+ .int_enable_set = SPSS_SIRC_INT_ENABLE_SET,
+ .int_type = SPSS_SIRC_INT_TYPE,
+ .int_polarity = SPSS_SIRC_INT_POLARITY,
+ .int_clear = SPSS_SIRC_INT_CLEAR,
+};
+
+static struct sirc_cascade_regs sirc_reg_table[] = {
+ {
+ .int_status = SPSS_SIRC_IRQ_STATUS,
+ .cascade_irq = INT_SIRC_0,
+ }
+};
+
+static unsigned int save_type;
+static unsigned int save_polarity;
+
+/* Mask off the given interrupt. Keep the int_enable mask in sync with
+ the enable reg, so it can be restored after power collapse. */
+static void sirc_irq_mask(unsigned int irq)
+{
+ unsigned int mask;
+
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ writel(mask, sirc_regs.int_enable_clear);
+ int_enable &= ~mask;
+ return;
+}
+
+/* Unmask the given interrupt. Keep the int_enable mask in sync with
+ the enable reg, so it can be restored after power collapse. */
+static void sirc_irq_unmask(unsigned int irq)
+{
+ unsigned int mask;
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ writel(mask, sirc_regs.int_enable_set);
+ int_enable |= mask;
+ return;
+}
+
+static void sirc_irq_ack(unsigned int irq)
+{
+ unsigned int mask;
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ writel(mask, sirc_regs.int_clear);
+ return;
+}
+
+static int sirc_irq_set_wake(unsigned int irq, unsigned int on)
+{
+ unsigned int mask;
+
+ /* Used to set the interrupt enable mask during power collapse. */
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ if (on)
+ wake_enable |= mask;
+ else
+ wake_enable &= ~mask;
+
+ return 0;
+}
+
+static int sirc_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ unsigned int mask;
+ unsigned int val;
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ val = readl(sirc_regs.int_polarity);
+
+ if (flow_type & (IRQF_TRIGGER_LOW | IRQF_TRIGGER_FALLING))
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel(val, sirc_regs.int_polarity);
+
+ val = readl(sirc_regs.int_type);
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ val |= mask;
+ irq_desc[irq].handle_irq = handle_edge_irq;
+ } else {
+ val &= ~mask;
+ irq_desc[irq].handle_irq = handle_level_irq;
+ }
+
+ writel(val, sirc_regs.int_type);
+
+ return 0;
+}
+
+/* Finds the pending interrupt on the passed cascade irq and redrives it */
+static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int reg = 0;
+ unsigned int sirq;
+ unsigned int status;
+
+ while ((reg < ARRAY_SIZE(sirc_reg_table)) &&
+ (sirc_reg_table[reg].cascade_irq != irq))
+ reg++;
+
+ status = readl(sirc_reg_table[reg].int_status);
+ status &= SIRC_MASK;
+ if (status == 0)
+ return;
+
+ for (sirq = 0;
+ (sirq < NR_SIRC_IRQS) && ((status & (1U << sirq)) == 0);
+ sirq++)
+ ;
+ generic_handle_irq(sirq+FIRST_SIRC_IRQ);
+
+ desc->chip->ack(irq);
+}
+
+static struct irq_chip sirc_irq_chip = {
+ .name = "sirc",
+ .ack = sirc_irq_ack,
+ .mask = sirc_irq_mask,
+ .unmask = sirc_irq_unmask,
+ .set_wake = sirc_irq_set_wake,
+ .set_type = sirc_irq_set_type,
+};
+
+void __init msm_init_sirc(void)
+{
+ int i;
+
+ int_enable = 0;
+ wake_enable = 0;
+
+ for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) {
+ set_irq_chip(i, &sirc_irq_chip);
+ set_irq_handler(i, handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) {
+ set_irq_chained_handler(sirc_reg_table[i].cascade_irq,
+ sirc_irq_handler);
+ set_irq_wake(sirc_reg_table[i].cascade_irq, 1);
+ }
+ return;
+}
+
diff --git a/drivers/staging/dream/smd/smd.c b/arch/arm/mach-msm/smd.c
index 8f35be7193fb..cf11d414b425 100644
--- a/drivers/staging/dream/smd/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -26,14 +26,16 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <mach/msm_smd.h>
-#include <mach/msm_iomap.h>
#include <mach/system.h>
#include "smd_private.h"
-#include "../../../../arch/arm/mach-msm/proc_comm.h"
+#include "proc_comm.h"
+
+#if defined(CONFIG_ARCH_QSD8X50)
+#define CONFIG_QDSP6 1
+#endif
void (*msm_hw_reset_hook)(void);
@@ -46,24 +48,38 @@ enum {
static int msm_smd_debug_mask;
+struct shared_info {
+ int ready;
+ unsigned state;
+};
+
+static unsigned dummy_state[SMSM_STATE_COUNT];
+
+static struct shared_info smd_info = {
+ .state = (unsigned) &dummy_state,
+};
+
module_param_named(debug_mask, msm_smd_debug_mask,
int, S_IRUGO | S_IWUSR | S_IWGRP);
-void *smem_find(unsigned id, unsigned size);
-static void smd_diag(void);
-
static unsigned last_heap_free = 0xffffffff;
-#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4)
-
static inline void notify_other_smsm(void)
{
- writel(1, MSM_A2M_INT(5));
+ msm_a2m_int(5);
+#ifdef CONFIG_QDSP6
+ msm_a2m_int(8);
+#endif
}
-static inline void notify_other_smd(void)
+static inline void notify_modem_smd(void)
{
- writel(1, MSM_A2M_INT(0));
+ msm_a2m_int(0);
+}
+
+static inline void notify_dsp_smd(void)
+{
+ msm_a2m_int(8);
}
static void smd_diag(void)
@@ -92,166 +108,53 @@ static void handle_modem_crash(void)
;
}
-extern int (*msm_check_for_modem_crash)(void);
+uint32_t raw_smsm_get_state(enum smsm_state_item item)
+{
+ return readl(smd_info.state + item * 4);
+}
static int check_for_modem_crash(void)
{
- struct smsm_shared *smsm;
-
- smsm = smem_find(ID_SHARED_STATE, 2 * sizeof(struct smsm_shared));
-
- /* if the modem's not ready yet, we have to hope for the best */
- if (!smsm)
- return 0;
-
- if (smsm[1].state & SMSM_RESET) {
+ if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET) {
handle_modem_crash();
return -1;
- } else {
- return 0;
}
+ return 0;
}
-#define SMD_SS_CLOSED 0x00000000
-#define SMD_SS_OPENING 0x00000001
-#define SMD_SS_OPENED 0x00000002
-#define SMD_SS_FLUSHING 0x00000003
-#define SMD_SS_CLOSING 0x00000004
-#define SMD_SS_RESET 0x00000005
-#define SMD_SS_RESET_OPENING 0x00000006
-
-#define SMD_BUF_SIZE 8192
-#define SMD_CHANNELS 64
-
-#define SMD_HEADER_SIZE 20
-
-
/* the spinlock is used to synchronize between the
-** irq handler and code that mutates the channel
-** list or fiddles with channel state
-*/
-static DEFINE_SPINLOCK(smd_lock);
-static DEFINE_SPINLOCK(smem_lock);
+ * irq handler and code that mutates the channel
+ * list or fiddles with channel state
+ */
+DEFINE_SPINLOCK(smd_lock);
+DEFINE_SPINLOCK(smem_lock);
/* the mutex is used during open() and close()
-** operations to avoid races while creating or
-** destroying smd_channel structures
-*/
+ * operations to avoid races while creating or
+ * destroying smd_channel structures
+ */
static DEFINE_MUTEX(smd_creation_mutex);
static int smd_initialized;
-struct smd_alloc_elm {
- char name[20];
- uint32_t cid;
- uint32_t ctype;
- uint32_t ref_count;
-};
-
-struct smd_half_channel {
- unsigned state;
- unsigned char fDSR;
- unsigned char fCTS;
- unsigned char fCD;
- unsigned char fRI;
- unsigned char fHEAD;
- unsigned char fTAIL;
- unsigned char fSTATE;
- unsigned char fUNUSED;
- unsigned tail;
- unsigned head;
- unsigned char data[SMD_BUF_SIZE];
-};
-
-struct smd_shared {
- struct smd_half_channel ch0;
- struct smd_half_channel ch1;
-};
-
-struct smd_channel {
- volatile struct smd_half_channel *send;
- volatile struct smd_half_channel *recv;
- struct list_head ch_list;
-
- unsigned current_packet;
- unsigned n;
- void *priv;
- void (*notify)(void *priv, unsigned flags);
-
- int (*read)(smd_channel_t *ch, void *data, int len);
- int (*write)(smd_channel_t *ch, const void *data, int len);
- int (*read_avail)(smd_channel_t *ch);
- int (*write_avail)(smd_channel_t *ch);
-
- void (*update_state)(smd_channel_t *ch);
- unsigned last_state;
-
- char name[32];
- struct platform_device pdev;
-};
-
-static LIST_HEAD(smd_ch_closed_list);
-static LIST_HEAD(smd_ch_list);
+LIST_HEAD(smd_ch_closed_list);
+LIST_HEAD(smd_ch_list_modem);
+LIST_HEAD(smd_ch_list_dsp);
static unsigned char smd_ch_allocated[64];
static struct work_struct probe_work;
-static void smd_alloc_channel(const char *name, uint32_t cid, uint32_t type);
-
-static void smd_channel_probe_worker(struct work_struct *work)
-{
- struct smd_alloc_elm *shared;
- unsigned n;
-
- shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
-
- for (n = 0; n < 64; n++) {
- if (smd_ch_allocated[n])
- continue;
- if (!shared[n].ref_count)
- continue;
- if (!shared[n].name[0])
- continue;
- smd_alloc_channel(shared[n].name,
- shared[n].cid,
- shared[n].ctype);
- smd_ch_allocated[n] = 1;
- }
-}
-
-static char *chstate(unsigned n)
-{
- switch (n) {
- case SMD_SS_CLOSED:
- return "CLOSED";
- case SMD_SS_OPENING:
- return "OPENING";
- case SMD_SS_OPENED:
- return "OPENED";
- case SMD_SS_FLUSHING:
- return "FLUSHING";
- case SMD_SS_CLOSING:
- return "CLOSING";
- case SMD_SS_RESET:
- return "RESET";
- case SMD_SS_RESET_OPENING:
- return "ROPENING";
- default:
- return "UNKNOWN";
- }
-}
-
/* how many bytes are available for reading */
static int smd_stream_read_avail(struct smd_channel *ch)
{
- return (ch->recv->head - ch->recv->tail) & (SMD_BUF_SIZE - 1);
+ return (ch->recv->head - ch->recv->tail) & ch->fifo_mask;
}
/* how many bytes we are free to write */
static int smd_stream_write_avail(struct smd_channel *ch)
{
- return (SMD_BUF_SIZE - 1) -
- ((ch->send->head - ch->send->tail) & (SMD_BUF_SIZE - 1));
+ return ch->fifo_mask -
+ ((ch->send->head - ch->send->tail) & ch->fifo_mask);
}
static int smd_packet_read_avail(struct smd_channel *ch)
@@ -283,26 +186,26 @@ static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
{
unsigned head = ch->recv->head;
unsigned tail = ch->recv->tail;
- *ptr = (void *) (ch->recv->data + tail);
+ *ptr = (void *) (ch->recv_data + tail);
if (tail <= head)
return head - tail;
else
- return SMD_BUF_SIZE - tail;
+ return ch->fifo_size - tail;
}
/* advance the fifo read pointer after data from ch_read_buffer is consumed */
static void ch_read_done(struct smd_channel *ch, unsigned count)
{
BUG_ON(count > smd_stream_read_avail(ch));
- ch->recv->tail = (ch->recv->tail + count) & (SMD_BUF_SIZE - 1);
- ch->recv->fTAIL = 1;
+ ch->recv->tail = (ch->recv->tail + count) & ch->fifo_mask;
+ ch->send->fTAIL = 1;
}
/* basic read interface to ch_read_{buffer,done} used
-** by smd_*_read() and update_packet_state()
-** will read-and-discard if the _data pointer is null
-*/
+ * by smd_*_read() and update_packet_state()
+ * will read-and-discard if the _data pointer is null
+ */
static int ch_read(struct smd_channel *ch, void *_data, int len)
{
void *ptr;
@@ -357,15 +260,15 @@ static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
{
unsigned head = ch->send->head;
unsigned tail = ch->send->tail;
- *ptr = (void *) (ch->send->data + head);
+ *ptr = (void *) (ch->send_data + head);
if (head < tail) {
return tail - head - 1;
} else {
if (tail == 0)
- return SMD_BUF_SIZE - head - 1;
+ return ch->fifo_size - head - 1;
else
- return SMD_BUF_SIZE - head;
+ return ch->fifo_size - head;
}
}
@@ -375,24 +278,24 @@ static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
static void ch_write_done(struct smd_channel *ch, unsigned count)
{
BUG_ON(count > smd_stream_write_avail(ch));
- ch->send->head = (ch->send->head + count) & (SMD_BUF_SIZE - 1);
+ ch->send->head = (ch->send->head + count) & ch->fifo_mask;
ch->send->fHEAD = 1;
}
-static void hc_set_state(volatile struct smd_half_channel *hc, unsigned n)
+static void ch_set_state(struct smd_channel *ch, unsigned n)
{
if (n == SMD_SS_OPENED) {
- hc->fDSR = 1;
- hc->fCTS = 1;
- hc->fCD = 1;
+ ch->send->fDSR = 1;
+ ch->send->fCTS = 1;
+ ch->send->fCD = 1;
} else {
- hc->fDSR = 0;
- hc->fCTS = 0;
- hc->fCD = 0;
+ ch->send->fDSR = 0;
+ ch->send->fCTS = 0;
+ ch->send->fCD = 0;
}
- hc->state = n;
- hc->fSTATE = 1;
- notify_other_smd();
+ ch->send->state = n;
+ ch->send->fSTATE = 1;
+ ch->notify_other_cpu();
}
static void do_smd_probe(void)
@@ -409,15 +312,14 @@ static void smd_state_change(struct smd_channel *ch,
{
ch->last_state = next;
- pr_info("SMD: ch %d %s -> %s\n", ch->n,
- chstate(last), chstate(next));
+ pr_info("SMD: ch %d %d -> %d\n", ch->n, last, next);
switch (next) {
case SMD_SS_OPENING:
ch->recv->tail = 0;
case SMD_SS_OPENED:
if (ch->send->state != SMD_SS_OPENED)
- hc_set_state(ch->send, SMD_SS_OPENED);
+ ch_set_state(ch, SMD_SS_OPENED);
ch->notify(ch->priv, SMD_EVENT_OPEN);
break;
case SMD_SS_FLUSHING:
@@ -428,7 +330,7 @@ static void smd_state_change(struct smd_channel *ch,
}
}
-static irqreturn_t smd_irq_handler(int irq, void *data)
+static void handle_smd_irq(struct list_head *list, void (*notify)(void))
{
unsigned long flags;
struct smd_channel *ch;
@@ -437,7 +339,7 @@ static irqreturn_t smd_irq_handler(int irq, void *data)
unsigned tmp;
spin_lock_irqsave(&smd_lock, flags);
- list_for_each_entry(ch, &smd_ch_list, ch_list) {
+ list_for_each_entry(ch, list, ch_list) {
ch_flags = 0;
if (ch_is_open(ch)) {
if (ch->recv->fHEAD) {
@@ -465,67 +367,66 @@ static irqreturn_t smd_irq_handler(int irq, void *data)
}
}
if (do_notify)
- notify_other_smd();
+ notify();
spin_unlock_irqrestore(&smd_lock, flags);
do_smd_probe();
+}
+
+static irqreturn_t smd_modem_irq_handler(int irq, void *data)
+{
+ handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
return IRQ_HANDLED;
}
+#if defined(CONFIG_QDSP6)
+static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
+{
+ handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
+ return IRQ_HANDLED;
+}
+#endif
+
static void smd_fake_irq_handler(unsigned long arg)
{
- smd_irq_handler(0, NULL);
+ handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
+ handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
}
static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
+static inline int smd_need_int(struct smd_channel *ch)
+{
+ if (ch_is_open(ch)) {
+ if (ch->recv->fHEAD || ch->recv->fTAIL || ch->recv->fSTATE)
+ return 1;
+ if (ch->recv->state != ch->last_state)
+ return 1;
+ }
+ return 0;
+}
+
void smd_sleep_exit(void)
{
unsigned long flags;
struct smd_channel *ch;
- unsigned tmp;
int need_int = 0;
spin_lock_irqsave(&smd_lock, flags);
- list_for_each_entry(ch, &smd_ch_list, ch_list) {
- if (ch_is_open(ch)) {
- if (ch->recv->fHEAD) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d fHEAD "
- "%x %x %x\n",
- ch->n, ch->recv->fHEAD,
- ch->recv->head, ch->recv->tail);
- need_int = 1;
- break;
- }
- if (ch->recv->fTAIL) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d fTAIL "
- "%x %x %x\n",
- ch->n, ch->recv->fTAIL,
- ch->send->head, ch->send->tail);
- need_int = 1;
- break;
- }
- if (ch->recv->fSTATE) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d fSTATE %x"
- "\n", ch->n, ch->recv->fSTATE);
- need_int = 1;
- break;
- }
- tmp = ch->recv->state;
- if (tmp != ch->last_state) {
- if (msm_smd_debug_mask & MSM_SMD_DEBUG)
- pr_info("smd_sleep_exit ch %d "
- "state %x != %x\n",
- ch->n, tmp, ch->last_state);
- need_int = 1;
- break;
- }
+ list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
+ if (smd_need_int(ch)) {
+ need_int = 1;
+ break;
+ }
+ }
+ list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
+ if (smd_need_int(ch)) {
+ need_int = 1;
+ break;
}
}
spin_unlock_irqrestore(&smd_lock, flags);
do_smd_probe();
+
if (need_int) {
if (msm_smd_debug_mask & MSM_SMD_DEBUG)
pr_info("smd_sleep_exit need interrupt\n");
@@ -550,12 +451,19 @@ void smd_kick(smd_channel_t *ch)
ch->notify(ch->priv, SMD_EVENT_CLOSE);
}
ch->notify(ch->priv, SMD_EVENT_DATA);
- notify_other_smd();
+ ch->notify_other_cpu();
spin_unlock_irqrestore(&smd_lock, flags);
}
-static int smd_is_packet(int chn)
+static int smd_is_packet(int chn, unsigned type)
{
+ type &= SMD_KIND_MASK;
+ if (type == SMD_KIND_PACKET)
+ return 1;
+ if (type == SMD_KIND_STREAM)
+ return 0;
+
+ /* older AMSS reports SMD_KIND_UNKNOWN always */
if ((chn > 4) || (chn == 1))
return 1;
else
@@ -585,7 +493,7 @@ static int smd_stream_write(smd_channel_t *ch, const void *_data, int len)
break;
}
- notify_other_smd();
+ ch->notify_other_cpu();
return orig_len - len;
}
@@ -618,7 +526,7 @@ static int smd_stream_read(smd_channel_t *ch, void *data, int len)
r = ch_read(ch, data, len);
if (r > 0)
- notify_other_smd();
+ ch->notify_other_cpu();
return r;
}
@@ -636,7 +544,7 @@ static int smd_packet_read(smd_channel_t *ch, void *data, int len)
r = ch_read(ch, data, len);
if (r > 0)
- notify_other_smd();
+ ch->notify_other_cpu();
spin_lock_irqsave(&smd_lock, flags);
ch->current_packet -= r;
@@ -646,28 +554,31 @@ static int smd_packet_read(smd_channel_t *ch, void *data, int len)
return r;
}
-static void smd_alloc_channel(const char *name, uint32_t cid, uint32_t type)
+static int smd_alloc_channel(const char *name, uint32_t cid, uint32_t type)
{
struct smd_channel *ch;
- struct smd_shared *shared;
-
- shared = smem_alloc(ID_SMD_CHANNELS + cid, sizeof(*shared));
- if (!shared) {
- pr_err("smd_alloc_channel() cid %d does not exist\n", cid);
- return;
- }
ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
if (ch == 0) {
pr_err("smd_alloc_channel() out of memory\n");
- return;
+ return -1;
}
-
- ch->send = &shared->ch0;
- ch->recv = &shared->ch1;
ch->n = cid;
- if (smd_is_packet(cid)) {
+ if (_smd_alloc_channel(ch)) {
+ kfree(ch);
+ return -1;
+ }
+
+ ch->fifo_mask = ch->fifo_size - 1;
+ ch->type = type;
+
+ if ((type & SMD_TYPE_MASK) == SMD_TYPE_APPS_MODEM)
+ ch->notify_other_cpu = notify_modem_smd;
+ else
+ ch->notify_other_cpu = notify_dsp_smd;
+
+ if (smd_is_packet(cid, type)) {
ch->read = smd_packet_read;
ch->write = smd_packet_write;
ch->read_avail = smd_packet_read_avail;
@@ -681,20 +592,60 @@ static void smd_alloc_channel(const char *name, uint32_t cid, uint32_t type)
ch->update_state = update_stream_state;
}
- memcpy(ch->name, "SMD_", 4);
+ if ((type & 0xff) == 0)
+ memcpy(ch->name, "SMD_", 4);
+ else
+ memcpy(ch->name, "DSP_", 4);
memcpy(ch->name + 4, name, 20);
ch->name[23] = 0;
ch->pdev.name = ch->name;
ch->pdev.id = -1;
- pr_info("smd_alloc_channel() '%s' cid=%d, shared=%p\n",
- ch->name, ch->n, shared);
+ pr_info("smd_alloc_channel() cid=%02d size=%05d '%s'\n",
+ ch->n, ch->fifo_size, ch->name);
mutex_lock(&smd_creation_mutex);
list_add(&ch->ch_list, &smd_ch_closed_list);
mutex_unlock(&smd_creation_mutex);
platform_device_register(&ch->pdev);
+ return 0;
+}
+
+static void smd_channel_probe_worker(struct work_struct *work)
+{
+ struct smd_alloc_elm *shared;
+ unsigned ctype;
+ unsigned type;
+ unsigned n;
+
+ shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
+ if (!shared) {
+ pr_err("smd: cannot find allocation table\n");
+ return;
+ }
+ for (n = 0; n < 64; n++) {
+ if (smd_ch_allocated[n])
+ continue;
+ if (!shared[n].ref_count)
+ continue;
+ if (!shared[n].name[0])
+ continue;
+ ctype = shared[n].ctype;
+ type = ctype & SMD_TYPE_MASK;
+
+ /* DAL channels are stream but neither the modem,
+ * nor the DSP correctly indicate this. Fixup manually.
+ */
+ if (!memcmp(shared[n].name, "DAL", 3))
+ ctype = (ctype & (~SMD_KIND_MASK)) | SMD_KIND_STREAM;
+
+ type = shared[n].ctype & SMD_TYPE_MASK;
+ if ((type == SMD_TYPE_APPS_MODEM) ||
+ (type == SMD_TYPE_APPS_DSP))
+ if (!smd_alloc_channel(shared[n].name, shared[n].cid, ctype))
+ smd_ch_allocated[n] = 1;
+ }
}
static void do_nothing_notify(void *priv, unsigned flags)
@@ -744,7 +695,11 @@ int smd_open(const char *name, smd_channel_t **_ch,
*_ch = ch;
spin_lock_irqsave(&smd_lock, flags);
- list_add(&ch->ch_list, &smd_ch_list);
+
+ if ((ch->type & SMD_TYPE_MASK) == SMD_TYPE_APPS_MODEM)
+ list_add(&ch->ch_list, &smd_ch_list_modem);
+ else
+ list_add(&ch->ch_list, &smd_ch_list_dsp);
/* If the remote side is CLOSING, we need to get it to
* move to OPENING (which we'll do by moving from CLOSED to
@@ -756,9 +711,9 @@ int smd_open(const char *name, smd_channel_t **_ch,
*/
if (ch->recv->state == SMD_SS_CLOSING) {
ch->send->head = 0;
- hc_set_state(ch->send, SMD_SS_OPENING);
+ ch_set_state(ch, SMD_SS_OPENING);
} else {
- hc_set_state(ch->send, SMD_SS_OPENED);
+ ch_set_state(ch, SMD_SS_OPENED);
}
spin_unlock_irqrestore(&smd_lock, flags);
smd_kick(ch);
@@ -778,7 +733,7 @@ int smd_close(smd_channel_t *ch)
spin_lock_irqsave(&smd_lock, flags);
ch->notify = do_nothing_notify;
list_del(&ch->ch_list);
- hc_set_state(ch->send, SMD_SS_CLOSED);
+ ch_set_state(ch, SMD_SS_CLOSED);
spin_unlock_irqrestore(&smd_lock, flags);
mutex_lock(&smd_creation_mutex);
@@ -798,6 +753,16 @@ int smd_write(smd_channel_t *ch, const void *data, int len)
return ch->write(ch, data, len);
}
+int smd_write_atomic(smd_channel_t *ch, const void *data, int len)
+{
+ unsigned long flags;
+ int res;
+ spin_lock_irqsave(&smd_lock, flags);
+ res = ch->write(ch, data, len);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ return res;
+}
+
int smd_read_avail(smd_channel_t *ch)
{
return ch->read_avail(ch);
@@ -831,7 +796,7 @@ void *smem_alloc(unsigned id, unsigned size)
return smem_find(id, size);
}
-static void *_smem_find(unsigned id, unsigned *size)
+void *smem_item(unsigned id, unsigned *size)
{
struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
struct smem_heap_entry *toc = shared->heap_toc;
@@ -842,6 +807,8 @@ static void *_smem_find(unsigned id, unsigned *size)
if (toc[id].allocated) {
*size = toc[id].size;
return (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
+ } else {
+ *size = 0;
}
return 0;
@@ -852,7 +819,7 @@ void *smem_find(unsigned id, unsigned size_in)
unsigned size;
void *ptr;
- ptr = _smem_find(id, &size);
+ ptr = smem_item(id, &size);
if (!ptr)
return 0;
@@ -869,204 +836,125 @@ void *smem_find(unsigned id, unsigned size_in)
static irqreturn_t smsm_irq_handler(int irq, void *data)
{
unsigned long flags;
- struct smsm_shared *smsm;
+ unsigned apps, modm;
spin_lock_irqsave(&smem_lock, flags);
- smsm = smem_alloc(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
- if (smsm == 0) {
- pr_info("<SM NO STATE>\n");
- } else {
- unsigned apps = smsm[0].state;
- unsigned modm = smsm[1].state;
-
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("<SM %08x %08x>\n", apps, modm);
- if (modm & SMSM_RESET) {
- handle_modem_crash();
- } else {
- apps |= SMSM_INIT;
- if (modm & SMSM_SMDINIT)
- apps |= SMSM_SMDINIT;
- if (modm & SMSM_RPCINIT)
- apps |= SMSM_RPCINIT;
- }
+ apps = raw_smsm_get_state(SMSM_STATE_APPS);
+ modm = raw_smsm_get_state(SMSM_STATE_MODEM);
+
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
+ pr_info("<SM %08x %08x>\n", apps, modm);
+ if (modm & SMSM_RESET)
+ handle_modem_crash();
+
+ do_smd_probe();
- if (smsm[0].state != apps) {
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("<SM %08x NOTIFY>\n", apps);
- smsm[0].state = apps;
- do_smd_probe();
- notify_other_smsm();
- }
- }
spin_unlock_irqrestore(&smem_lock, flags);
return IRQ_HANDLED;
}
-int smsm_change_state(uint32_t clear_mask, uint32_t set_mask)
+int smsm_change_state(enum smsm_state_item item,
+ uint32_t clear_mask, uint32_t set_mask)
{
+ unsigned long addr = smd_info.state + item * 4;
unsigned long flags;
- struct smsm_shared *smsm;
+ unsigned state;
+
+ if (!smd_info.ready)
+ return -EIO;
spin_lock_irqsave(&smem_lock, flags);
- smsm = smem_alloc(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
-
- if (smsm) {
- if (smsm[1].state & SMSM_RESET)
- handle_modem_crash();
- smsm[0].state = (smsm[0].state & ~clear_mask) | set_mask;
- if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_change_state %x\n",
- smsm[0].state);
- notify_other_smsm();
- }
+ if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET)
+ handle_modem_crash();
+
+ state = (readl(addr) & ~clear_mask) | set_mask;
+ writel(state, addr);
+
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
+ pr_info("smsm_change_state %d %x\n", item, state);
+ notify_other_smsm();
spin_unlock_irqrestore(&smem_lock, flags);
- if (smsm == NULL) {
- pr_err("smsm_change_state <SM NO STATE>\n");
- return -EIO;
- }
return 0;
}
-uint32_t smsm_get_state(void)
+uint32_t smsm_get_state(enum smsm_state_item item)
{
unsigned long flags;
- struct smsm_shared *smsm;
uint32_t rv;
spin_lock_irqsave(&smem_lock, flags);
- smsm = smem_alloc(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
+ rv = readl(smd_info.state + item * 4);
- if (smsm)
- rv = smsm[1].state;
- else
- rv = 0;
-
- if (rv & SMSM_RESET)
+ if (item == SMSM_STATE_MODEM && (rv & SMSM_RESET))
handle_modem_crash();
spin_unlock_irqrestore(&smem_lock, flags);
- if (smsm == NULL)
- pr_err("smsm_get_state <SM NO STATE>\n");
return rv;
}
+#ifdef CONFIG_ARCH_MSM_SCORPION
+
int smsm_set_sleep_duration(uint32_t delay)
{
- uint32_t *ptr;
+ struct msm_dem_slave_data *ptr;
- ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
+ ptr = smem_find(SMEM_APPS_DEM_SLAVE_DATA, sizeof(*ptr));
if (ptr == NULL) {
- pr_err("smsm_set_sleep_duration <SM NO SLEEP_DELAY>\n");
+ pr_err("smsm_set_sleep_duration <SM NO APPS_DEM_SLAVE_DATA>\n");
return -EIO;
}
if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
pr_info("smsm_set_sleep_duration %d -> %d\n",
- *ptr, delay);
- *ptr = delay;
+ ptr->sleep_time, delay);
+ ptr->sleep_time = delay;
return 0;
}
-int smsm_set_interrupt_info(struct smsm_interrupt_info *info)
+#else
+
+int smsm_set_sleep_duration(uint32_t delay)
{
- struct smsm_interrupt_info *ptr;
+ uint32_t *ptr;
- ptr = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*ptr));
+ ptr = smem_find(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
if (ptr == NULL) {
- pr_err("smsm_set_sleep_duration <SM NO INT_INFO>\n");
+ pr_err("smsm_set_sleep_duration <SM NO SLEEP_DELAY>\n");
return -EIO;
}
if (msm_smd_debug_mask & MSM_SMSM_DEBUG)
- pr_info("smsm_set_interrupt_info %x %x -> %x %x\n",
- ptr->aArm_en_mask, ptr->aArm_interrupts_pending,
- info->aArm_en_mask, info->aArm_interrupts_pending);
- *ptr = *info;
+ pr_info("smsm_set_sleep_duration %d -> %d\n",
+ *ptr, delay);
+ *ptr = delay;
return 0;
}
-#define MAX_NUM_SLEEP_CLIENTS 64
-#define MAX_SLEEP_NAME_LEN 8
-
-#define NUM_GPIO_INT_REGISTERS 6
-#define GPIO_SMEM_NUM_GROUPS 2
-#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
-
-struct tramp_gpio_save {
- unsigned int enable;
- unsigned int detect;
- unsigned int polarity;
-};
-
-struct tramp_gpio_smem {
- uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
- uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
- uint32_t enabled[NUM_GPIO_INT_REGISTERS];
- uint32_t detection[NUM_GPIO_INT_REGISTERS];
- uint32_t polarity[NUM_GPIO_INT_REGISTERS];
-};
-
-
-void smsm_print_sleep_info(void)
-{
- unsigned long flags;
- uint32_t *ptr;
- struct tramp_gpio_smem *gpio;
- struct smsm_interrupt_info *int_info;
-
-
- spin_lock_irqsave(&smem_lock, flags);
-
- ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SMSM_SLEEP_DELAY: %x\n", *ptr);
-
- ptr = smem_alloc(SMEM_SMSM_LIMIT_SLEEP, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SMSM_LIMIT_SLEEP: %x\n", *ptr);
-
- ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
- if (ptr)
- pr_info("SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
-
- int_info = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*int_info));
- if (int_info)
- pr_info("SMEM_SMSM_INT_INFO %x %x %x\n",
- int_info->aArm_en_mask,
- int_info->aArm_interrupts_pending,
- int_info->aArm_wakeup_reason);
-
- gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
- if (gpio) {
- int i;
- for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++)
- pr_info("SMEM_GPIO_INT: %d: e %x d %x p %x\n",
- i, gpio->enabled[i], gpio->detection[i],
- gpio->polarity[i]);
-
- for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
- pr_info("SMEM_GPIO_INT: %d: f %d: %d %d...\n",
- i, gpio->num_fired[i], gpio->fired[i][0],
- gpio->fired[i][1]);
- }
-
- spin_unlock_irqrestore(&smem_lock, flags);
-}
+#endif
int smd_core_init(void)
{
int r;
pr_info("smd_core_init()\n");
- r = request_irq(INT_A9_M2A_0, smd_irq_handler,
+ /* wait for essential items to be initialized */
+ for (;;) {
+ unsigned size;
+ void *state;
+ state = smem_item(SMEM_SMSM_SHARED_STATE, &size);
+ if (size == SMSM_V1_SIZE || size == SMSM_V2_SIZE) {
+ smd_info.state = (unsigned)state;
+ break;
+ }
+ }
+
+ smd_info.ready = 1;
+
+ r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
IRQF_TRIGGER_RISING, "smd_dev", 0);
if (r < 0)
return r;
@@ -1084,215 +972,42 @@ int smd_core_init(void)
if (r < 0)
pr_err("smd_core_init: enable_irq_wake failed for A9_M2A_5\n");
- /* we may have missed a signal while booting -- fake
- * an interrupt to make sure we process any existing
- * state
- */
- smsm_irq_handler(0, 0);
-
- pr_info("smd_core_init() done\n");
-
- return 0;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static int dump_ch(char *buf, int max, int n,
- struct smd_half_channel *s,
- struct smd_half_channel *r)
-{
- return scnprintf(
- buf, max,
- "ch%02d:"
- " %8s(%04d/%04d) %c%c%c%c%c%c%c <->"
- " %8s(%04d/%04d) %c%c%c%c%c%c%c\n", n,
- chstate(s->state), s->tail, s->head,
- s->fDSR ? 'D' : 'd',
- s->fCTS ? 'C' : 'c',
- s->fCD ? 'C' : 'c',
- s->fRI ? 'I' : 'i',
- s->fHEAD ? 'W' : 'w',
- s->fTAIL ? 'R' : 'r',
- s->fSTATE ? 'S' : 's',
- chstate(r->state), r->tail, r->head,
- r->fDSR ? 'D' : 'd',
- r->fCTS ? 'R' : 'r',
- r->fCD ? 'C' : 'c',
- r->fRI ? 'I' : 'i',
- r->fHEAD ? 'W' : 'w',
- r->fTAIL ? 'R' : 'r',
- r->fSTATE ? 'S' : 's'
- );
-}
-
-static int debug_read_stat(char *buf, int max)
-{
- struct smsm_shared *smsm;
- char *msg;
- int i = 0;
-
- smsm = smem_find(ID_SHARED_STATE,
- 2 * sizeof(struct smsm_shared));
-
- msg = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
-
- if (smsm) {
- if (smsm[1].state & SMSM_RESET)
- i += scnprintf(buf + i, max - i,
- "smsm: ARM9 HAS CRASHED\n");
- i += scnprintf(buf + i, max - i, "smsm: a9: %08x a11: %08x\n",
- smsm[0].state, smsm[1].state);
- } else {
- i += scnprintf(buf + i, max - i, "smsm: cannot find\n");
- }
- if (msg) {
- msg[SZ_DIAG_ERR_MSG - 1] = 0;
- i += scnprintf(buf + i, max - i, "diag: '%s'\n", msg);
- }
- return i;
-}
-
-static int debug_read_mem(char *buf, int max)
-{
- unsigned n;
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- struct smem_heap_entry *toc = shared->heap_toc;
- int i = 0;
-
- i += scnprintf(buf + i, max - i,
- "heap: init=%d free=%d remain=%d\n",
- shared->heap_info.initialized,
- shared->heap_info.free_offset,
- shared->heap_info.heap_remaining);
-
- for (n = 0; n < SMEM_NUM_ITEMS; n++) {
- if (toc[n].allocated == 0)
- continue;
- i += scnprintf(buf + i, max - i,
- "%04d: offsed %08x size %08x\n",
- n, toc[n].offset, toc[n].size);
- }
- return i;
-}
-
-static int debug_read_ch(char *buf, int max)
-{
- struct smd_shared *shared;
- int n, i = 0;
-
- for (n = 0; n < SMD_CHANNELS; n++) {
- shared = smem_find(ID_SMD_CHANNELS + n,
- sizeof(struct smd_shared));
- if (shared == 0)
- continue;
- i += dump_ch(buf + i, max - i, n, &shared->ch0, &shared->ch1);
- }
-
- return i;
-}
-
-static int debug_read_version(char *buf, int max)
-{
- struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
- unsigned version = shared->version[VERSION_MODEM];
- return sprintf(buf, "%d.%d\n", version >> 16, version & 0xffff);
-}
-
-static int debug_read_build_id(char *buf, int max)
-{
- unsigned size;
- void *data;
-
- data = _smem_find(SMEM_HW_SW_BUILD_ID, &size);
- if (!data)
- return 0;
-
- if (size >= max)
- size = max;
- memcpy(buf, data, size);
-
- return size;
-}
-
-static int debug_read_alloc_tbl(char *buf, int max)
-{
- struct smd_alloc_elm *shared;
- int n, i = 0;
-
- shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
-
- for (n = 0; n < 64; n++) {
- if (shared[n].ref_count == 0)
- continue;
- i += scnprintf(buf + i, max - i,
- "%03d: %20s cid=%02d ctype=%d ref_count=%d\n",
- n, shared[n].name, shared[n].cid,
- shared[n].ctype, shared[n].ref_count);
+#if defined(CONFIG_QDSP6)
+ r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
+ IRQF_TRIGGER_RISING, "smd_dsp", 0);
+ if (r < 0) {
+ free_irq(INT_A9_M2A_0, 0);
+ free_irq(INT_A9_M2A_5, 0);
+ return r;
}
+#endif
- return i;
-}
-
-static int debug_boom(char *buf, int max)
-{
- unsigned ms = 5000;
- msm_proc_comm(PCOM_RESET_MODEM, &ms, 0);
- return 0;
-}
+ /* check for any SMD channels that may already exist */
+ do_smd_probe();
-#define DEBUG_BUFMAX 4096
-static char debug_buffer[DEBUG_BUFMAX];
+ /* indicate that we're up and running */
+ smsm_change_state(SMSM_STATE_APPS,
+ ~0, SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT | SMSM_RUN);
+#ifdef CONFIG_ARCH_MSM_SCORPION
+ smsm_change_state(SMSM_STATE_APPS_DEM, ~0, 0);
+#endif
-static ssize_t debug_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- int (*fill)(char *buf, int max) = file->private_data;
- int bsize = fill(debug_buffer, DEBUG_BUFMAX);
- return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
-}
+ pr_info("smd_core_init() done\n");
-static int debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
return 0;
}
-static const struct file_operations debug_ops = {
- .read = debug_read,
- .open = debug_open,
-};
-
-static void debug_create(const char *name, mode_t mode,
- struct dentry *dent,
- int (*fill)(char *buf, int max))
-{
- debugfs_create_file(name, mode, dent, fill, &debug_ops);
-}
-
-static void smd_debugfs_init(void)
-{
- struct dentry *dent;
-
- dent = debugfs_create_dir("smd", 0);
- if (IS_ERR(dent))
- return;
-
- debug_create("ch", 0444, dent, debug_read_ch);
- debug_create("stat", 0444, dent, debug_read_stat);
- debug_create("mem", 0444, dent, debug_read_mem);
- debug_create("version", 0444, dent, debug_read_version);
- debug_create("tbl", 0444, dent, debug_read_alloc_tbl);
- debug_create("build", 0444, dent, debug_read_build_id);
- debug_create("boom", 0444, dent, debug_boom);
-}
-#else
-static void smd_debugfs_init(void) {}
-#endif
-
static int __init msm_smd_probe(struct platform_device *pdev)
{
pr_info("smd_init()\n");
+ /*
+ * If we haven't waited for the ARM9 to boot up till now,
+ * then we need to wait here. Otherwise this should just
+ * return immediately.
+ */
+ proc_comm_boot_wait();
+
INIT_WORK(&probe_work, smd_channel_probe_worker);
if (smd_core_init()) {
@@ -1304,7 +1019,8 @@ static int __init msm_smd_probe(struct platform_device *pdev)
msm_check_for_modem_crash = check_for_modem_crash;
- smd_debugfs_init();
+ msm_init_last_radio_log(THIS_MODULE);
+
smd_initialized = 1;
return 0;
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
new file mode 100644
index 000000000000..3b2dd717b788
--- /dev/null
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -0,0 +1,315 @@
+/* arch/arm/mach-msm/smd_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+#include <mach/msm_iomap.h>
+
+#include "smd_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+static char *chstate(unsigned n)
+{
+ switch (n) {
+ case SMD_SS_CLOSED:
+ return "CLOSED";
+ case SMD_SS_OPENING:
+ return "OPENING";
+ case SMD_SS_OPENED:
+ return "OPENED";
+ case SMD_SS_FLUSHING:
+ return "FLUSHING";
+ case SMD_SS_CLOSING:
+ return "CLOSING";
+ case SMD_SS_RESET:
+ return "RESET";
+ case SMD_SS_RESET_OPENING:
+ return "ROPENING";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+
+static int dump_ch(char *buf, int max, struct smd_channel *ch)
+{
+ volatile struct smd_half_channel *s = ch->send;
+ volatile struct smd_half_channel *r = ch->recv;
+
+ return scnprintf(
+ buf, max,
+ "ch%02d:"
+ " %8s(%05d/%05d) %c%c%c%c%c%c%c <->"
+ " %8s(%05d/%05d) %c%c%c%c%c%c%c '%s'\n", ch->n,
+ chstate(s->state), s->tail, s->head,
+ s->fDSR ? 'D' : 'd',
+ s->fCTS ? 'C' : 'c',
+ s->fCD ? 'C' : 'c',
+ s->fRI ? 'I' : 'i',
+ s->fHEAD ? 'W' : 'w',
+ s->fTAIL ? 'R' : 'r',
+ s->fSTATE ? 'S' : 's',
+ chstate(r->state), r->tail, r->head,
+ r->fDSR ? 'D' : 'd',
+ r->fCTS ? 'R' : 'r',
+ r->fCD ? 'C' : 'c',
+ r->fRI ? 'I' : 'i',
+ r->fHEAD ? 'W' : 'w',
+ r->fTAIL ? 'R' : 'r',
+ r->fSTATE ? 'S' : 's',
+ ch->name
+ );
+}
+
+static int debug_read_stat(char *buf, int max)
+{
+ char *msg;
+ int i = 0;
+
+ msg = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
+
+ if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET)
+ i += scnprintf(buf + i, max - i,
+ "smsm: ARM9 HAS CRASHED\n");
+
+ i += scnprintf(buf + i, max - i, "smsm: a9: %08x a11: %08x\n",
+ raw_smsm_get_state(SMSM_STATE_MODEM),
+ raw_smsm_get_state(SMSM_STATE_APPS));
+#ifdef CONFIG_ARCH_MSM_SCORPION
+ i += scnprintf(buf + i, max - i, "smsm dem: apps: %08x modem: %08x "
+ "qdsp6: %08x power: %08x time: %08x\n",
+ raw_smsm_get_state(SMSM_STATE_APPS_DEM),
+ raw_smsm_get_state(SMSM_STATE_MODEM_DEM),
+ raw_smsm_get_state(SMSM_STATE_QDSP6_DEM),
+ raw_smsm_get_state(SMSM_STATE_POWER_MASTER_DEM),
+ raw_smsm_get_state(SMSM_STATE_TIME_MASTER_DEM));
+#endif
+ if (msg) {
+ msg[SZ_DIAG_ERR_MSG - 1] = 0;
+ i += scnprintf(buf + i, max - i, "diag: '%s'\n", msg);
+ }
+ return i;
+}
+
+static int debug_read_mem(char *buf, int max)
+{
+ unsigned n;
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ struct smem_heap_entry *toc = shared->heap_toc;
+ int i = 0;
+
+ i += scnprintf(buf + i, max - i,
+ "heap: init=%d free=%d remain=%d\n",
+ shared->heap_info.initialized,
+ shared->heap_info.free_offset,
+ shared->heap_info.heap_remaining);
+
+ for (n = 0; n < SMEM_NUM_ITEMS; n++) {
+ if (toc[n].allocated == 0)
+ continue;
+ i += scnprintf(buf + i, max - i,
+ "%04d: offset %08x size %08x\n",
+ n, toc[n].offset, toc[n].size);
+ }
+ return i;
+}
+
+static int debug_read_ch(char *buf, int max)
+{
+ struct smd_channel *ch;
+ unsigned long flags;
+ int i = 0;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, &smd_ch_list_dsp, ch_list)
+ i += dump_ch(buf + i, max - i, ch);
+ list_for_each_entry(ch, &smd_ch_list_modem, ch_list)
+ i += dump_ch(buf + i, max - i, ch);
+ list_for_each_entry(ch, &smd_ch_closed_list, ch_list)
+ i += dump_ch(buf + i, max - i, ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return i;
+}
+
+static int debug_read_version(char *buf, int max)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ unsigned version = shared->version[VERSION_MODEM];
+ return sprintf(buf, "%d.%d\n", version >> 16, version & 0xffff);
+}
+
+static int debug_read_build_id(char *buf, int max)
+{
+ unsigned size;
+ void *data;
+
+ data = smem_item(SMEM_HW_SW_BUILD_ID, &size);
+ if (!data)
+ return 0;
+
+ if (size >= max)
+ size = max;
+ memcpy(buf, data, size);
+
+ return size;
+}
+
+static int debug_read_alloc_tbl(char *buf, int max)
+{
+ struct smd_alloc_elm *shared;
+ int n, i = 0;
+
+ shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
+
+ for (n = 0; n < 64; n++) {
+ if (shared[n].ref_count == 0)
+ continue;
+ i += scnprintf(buf + i, max - i,
+ "%03d: %-20s cid=%02d type=%03d "
+ "kind=%02d ref_count=%d\n",
+ n, shared[n].name, shared[n].cid,
+ shared[n].ctype & 0xff,
+ (shared[n].ctype >> 8) & 0xf,
+ shared[n].ref_count);
+ }
+
+ return i;
+}
+
+#define DEBUG_BUFMAX 4096
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize = fill(debug_buffer, DEBUG_BUFMAX);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static int smd_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smd", 0);
+ if (IS_ERR(dent))
+ return 1;
+
+ debug_create("ch", 0444, dent, debug_read_ch);
+ debug_create("stat", 0444, dent, debug_read_stat);
+ debug_create("mem", 0444, dent, debug_read_mem);
+ debug_create("version", 0444, dent, debug_read_version);
+ debug_create("tbl", 0444, dent, debug_read_alloc_tbl);
+ debug_create("build", 0444, dent, debug_read_build_id);
+
+ return 0;
+}
+
+late_initcall(smd_debugfs_init);
+#endif
+
+
+#define MAX_NUM_SLEEP_CLIENTS 64
+#define MAX_SLEEP_NAME_LEN 8
+
+#define NUM_GPIO_INT_REGISTERS 6
+#define GPIO_SMEM_NUM_GROUPS 2
+#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
+
+struct tramp_gpio_save {
+ unsigned int enable;
+ unsigned int detect;
+ unsigned int polarity;
+};
+
+struct tramp_gpio_smem {
+ uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
+ uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
+ uint32_t enabled[NUM_GPIO_INT_REGISTERS];
+ uint32_t detection[NUM_GPIO_INT_REGISTERS];
+ uint32_t polarity[NUM_GPIO_INT_REGISTERS];
+};
+
+
+void smsm_print_sleep_info(void)
+{
+ unsigned long flags;
+ uint32_t *ptr;
+ struct tramp_gpio_smem *gpio;
+ struct smsm_interrupt_info *int_info;
+
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
+ if (ptr)
+ pr_info("SMEM_SMSM_SLEEP_DELAY: %x\n", *ptr);
+
+ ptr = smem_alloc(SMEM_SMSM_LIMIT_SLEEP, sizeof(*ptr));
+ if (ptr)
+ pr_info("SMEM_SMSM_LIMIT_SLEEP: %x\n", *ptr);
+
+ ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
+ if (ptr)
+ pr_info("SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
+
+#ifndef CONFIG_ARCH_MSM_SCORPION
+ int_info = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*int_info));
+ if (int_info)
+ pr_info("SMEM_SMSM_INT_INFO %x %x %x\n",
+ int_info->interrupt_mask,
+ int_info->pending_interrupts,
+ int_info->wakeup_reason);
+
+ gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
+ if (gpio) {
+ int i;
+ for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++)
+ pr_info("SMEM_GPIO_INT: %d: e %x d %x p %x\n",
+ i, gpio->enabled[i], gpio->detection[i],
+ gpio->polarity[i]);
+
+ for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
+ pr_info("SMEM_GPIO_INT: %d: f %d: %d %d...\n",
+ i, gpio->num_fired[i], gpio->fired[i][0],
+ gpio->fired[i][1]);
+ }
+#else
+#endif
+ spin_unlock_irqrestore(&smem_lock, flags);
+}
+
diff --git a/arch/arm/mach-msm/smd_private.h b/arch/arm/mach-msm/smd_private.h
new file mode 100644
index 000000000000..727bfe68aa9b
--- /dev/null
+++ b/arch/arm/mach-msm/smd_private.h
@@ -0,0 +1,403 @@
+/* arch/arm/mach-msm/smd_private.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007 QUALCOMM Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/io.h>
+
+#include <mach/msm_iomap.h>
+
+struct smem_heap_info {
+ unsigned initialized;
+ unsigned free_offset;
+ unsigned heap_remaining;
+ unsigned reserved;
+};
+
+struct smem_heap_entry {
+ unsigned allocated;
+ unsigned offset;
+ unsigned size;
+ unsigned reserved;
+};
+
+struct smem_proc_comm {
+ unsigned command;
+ unsigned status;
+ unsigned data1;
+ unsigned data2;
+};
+
+#define PC_APPS 0
+#define PC_MODEM 1
+
+#define VERSION_SMD 0
+#define VERSION_QDSP6 4
+#define VERSION_APPS_SBL 6
+#define VERSION_MODEM_SBL 7
+#define VERSION_APPS 8
+#define VERSION_MODEM 9
+
+struct smem_shared {
+ struct smem_proc_comm proc_comm[4];
+ unsigned version[32];
+ struct smem_heap_info heap_info;
+ struct smem_heap_entry heap_toc[512];
+};
+
+#define SMSM_V1_SIZE (sizeof(unsigned) * 8)
+#define SMSM_V2_SIZE (sizeof(unsigned) * 4)
+
+#ifdef CONFIG_MSM_SMD_PKG3
+struct smsm_interrupt_info {
+ uint32_t interrupt_mask;
+ uint32_t pending_interrupts;
+ uint32_t wakeup_reason;
+};
+#else
+#define DEM_MAX_PORT_NAME_LEN (20)
+struct msm_dem_slave_data {
+ uint32_t sleep_time;
+ uint32_t interrupt_mask;
+ uint32_t resources_used;
+ uint32_t reserved1;
+
+ uint32_t wakeup_reason;
+ uint32_t pending_interrupts;
+ uint32_t rpc_prog;
+ uint32_t rpc_proc;
+ char smd_port_name[DEM_MAX_PORT_NAME_LEN];
+ uint32_t reserved2;
+};
+#endif
+
+#define SZ_DIAG_ERR_MSG 0xC8
+#define ID_DIAG_ERR_MSG SMEM_DIAG_ERR_MESSAGE
+#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
+#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
+#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
+
+#define SMSM_INIT 0x00000001
+#define SMSM_SMDINIT 0x00000008
+#define SMSM_RPCINIT 0x00000020
+#define SMSM_RESET 0x00000040
+#define SMSM_RSA 0x00000080
+#define SMSM_RUN 0x00000100
+#define SMSM_PWRC 0x00000200
+#define SMSM_TIMEWAIT 0x00000400
+#define SMSM_TIMEINIT 0x00000800
+#define SMSM_PWRC_EARLY_EXIT 0x00001000
+#define SMSM_WFPI 0x00002000
+#define SMSM_SLEEP 0x00004000
+#define SMSM_SLEEPEXIT 0x00008000
+#define SMSM_APPS_REBOOT 0x00020000
+#define SMSM_SYSTEM_POWER_DOWN 0x00040000
+#define SMSM_SYSTEM_REBOOT 0x00080000
+#define SMSM_SYSTEM_DOWNLOAD 0x00100000
+#define SMSM_PWRC_SUSPEND 0x00200000
+#define SMSM_APPS_SHUTDOWN 0x00400000
+#define SMSM_SMD_LOOPBACK 0x00800000
+#define SMSM_RUN_QUIET 0x01000000
+#define SMSM_MODEM_WAIT 0x02000000
+#define SMSM_MODEM_BREAK 0x04000000
+#define SMSM_MODEM_CONTINUE 0x08000000
+#define SMSM_UNKNOWN 0x80000000
+
+#define SMSM_WKUP_REASON_RPC 0x00000001
+#define SMSM_WKUP_REASON_INT 0x00000002
+#define SMSM_WKUP_REASON_GPIO 0x00000004
+#define SMSM_WKUP_REASON_TIMER 0x00000008
+#define SMSM_WKUP_REASON_ALARM 0x00000010
+#define SMSM_WKUP_REASON_RESET 0x00000020
+
+#ifdef CONFIG_ARCH_MSM7X00A
+enum smsm_state_item {
+ SMSM_STATE_APPS = 1,
+ SMSM_STATE_MODEM = 3,
+ SMSM_STATE_COUNT,
+};
+#else
+enum smsm_state_item {
+ SMSM_STATE_APPS,
+ SMSM_STATE_MODEM,
+ SMSM_STATE_HEXAGON,
+ SMSM_STATE_APPS_DEM,
+ SMSM_STATE_MODEM_DEM,
+ SMSM_STATE_QDSP6_DEM,
+ SMSM_STATE_POWER_MASTER_DEM,
+ SMSM_STATE_TIME_MASTER_DEM,
+ SMSM_STATE_COUNT,
+};
+#endif
+
+void *smem_alloc(unsigned id, unsigned size);
+int smsm_change_state(enum smsm_state_item item, uint32_t clear_mask, uint32_t set_mask);
+uint32_t smsm_get_state(enum smsm_state_item item);
+int smsm_set_sleep_duration(uint32_t delay);
+void smsm_print_sleep_info(void);
+
+#define SMEM_NUM_SMD_CHANNELS 64
+
+typedef enum {
+ /* fixed items */
+ SMEM_PROC_COMM = 0,
+ SMEM_HEAP_INFO,
+ SMEM_ALLOCATION_TABLE,
+ SMEM_VERSION_INFO,
+ SMEM_HW_RESET_DETECT,
+ SMEM_AARM_WARM_BOOT,
+ SMEM_DIAG_ERR_MESSAGE,
+ SMEM_SPINLOCK_ARRAY,
+ SMEM_MEMORY_BARRIER_LOCATION,
+
+ /* dynamic items */
+ SMEM_AARM_PARTITION_TABLE,
+ SMEM_AARM_BAD_BLOCK_TABLE,
+ SMEM_RESERVE_BAD_BLOCKS,
+ SMEM_WM_UUID,
+ SMEM_CHANNEL_ALLOC_TBL,
+ SMEM_SMD_BASE_ID,
+ SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_CHANNELS,
+ SMEM_SMEM_LOG_EVENTS,
+ SMEM_SMEM_STATIC_LOG_IDX,
+ SMEM_SMEM_STATIC_LOG_EVENTS,
+ SMEM_SMEM_SLOW_CLOCK_SYNC,
+ SMEM_SMEM_SLOW_CLOCK_VALUE,
+ SMEM_BIO_LED_BUF,
+ SMEM_SMSM_SHARED_STATE,
+ SMEM_SMSM_INT_INFO,
+ SMEM_SMSM_SLEEP_DELAY,
+ SMEM_SMSM_LIMIT_SLEEP,
+ SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
+ SMEM_KEYPAD_KEYS_PRESSED,
+ SMEM_KEYPAD_STATE_UPDATED,
+ SMEM_KEYPAD_STATE_IDX,
+ SMEM_GPIO_INT,
+ SMEM_MDDI_LCD_IDX,
+ SMEM_MDDI_HOST_DRIVER_STATE,
+ SMEM_MDDI_LCD_DISP_STATE,
+ SMEM_LCD_CUR_PANEL,
+ SMEM_MARM_BOOT_SEGMENT_INFO,
+ SMEM_AARM_BOOT_SEGMENT_INFO,
+ SMEM_SLEEP_STATIC,
+ SMEM_SCORPION_FREQUENCY,
+ SMEM_SMD_PROFILES,
+ SMEM_TSSC_BUSY,
+ SMEM_HS_SUSPEND_FILTER_INFO,
+ SMEM_BATT_INFO,
+ SMEM_APPS_BOOT_MODE,
+ SMEM_VERSION_FIRST,
+ SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
+ SMEM_OSS_RRCASN1_BUF1,
+ SMEM_OSS_RRCASN1_BUF2,
+ SMEM_ID_VENDOR0,
+ SMEM_ID_VENDOR1,
+ SMEM_ID_VENDOR2,
+ SMEM_HW_SW_BUILD_ID,
+ SMEM_SMD_BLOCK_PORT_BASE_ID,
+ SMEM_SMD_BLOCK_PORT_PROC0_HEAP = SMEM_SMD_BLOCK_PORT_BASE_ID + SMEM_NUM_SMD_CHANNELS,
+ SMEM_SMD_BLOCK_PORT_PROC1_HEAP = SMEM_SMD_BLOCK_PORT_PROC0_HEAP + SMEM_NUM_SMD_CHANNELS,
+ SMEM_I2C_MUTEX = SMEM_SMD_BLOCK_PORT_PROC1_HEAP + SMEM_NUM_SMD_CHANNELS,
+ SMEM_SCLK_CONVERSION,
+ SMEM_SMD_SMSM_INTR_MUX,
+ SMEM_SMSM_CPU_INTR_MASK,
+ SMEM_APPS_DEM_SLAVE_DATA,
+ SMEM_QDSP6_DEM_SLAVE_DATA,
+ SMEM_CLKREGIM_BSP,
+ SMEM_CLKREGIM_SOURCES,
+ SMEM_SMD_FIFO_BASE_ID,
+ SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID + SMEM_NUM_SMD_CHANNELS,
+ SMEM_POWER_ON_STATUS_INFO,
+ SMEM_DAL_AREA,
+ SMEM_SMEM_LOG_POWER_IDX,
+ SMEM_SMEM_LOG_POWER_WRAP,
+ SMEM_SMEM_LOG_POWER_EVENTS,
+ SMEM_ERR_CRASH_LOG,
+ SMEM_ERR_F3_TRACE_LOG,
+ SMEM_NUM_ITEMS,
+} smem_mem_type;
+
+
+#define SMD_SS_CLOSED 0x00000000
+#define SMD_SS_OPENING 0x00000001
+#define SMD_SS_OPENED 0x00000002
+#define SMD_SS_FLUSHING 0x00000003
+#define SMD_SS_CLOSING 0x00000004
+#define SMD_SS_RESET 0x00000005
+#define SMD_SS_RESET_OPENING 0x00000006
+
+#define SMD_BUF_SIZE 8192
+#define SMD_CHANNELS 64
+
+#define SMD_HEADER_SIZE 20
+
+struct smd_alloc_elm {
+ char name[20];
+ uint32_t cid;
+ uint32_t ctype;
+ uint32_t ref_count;
+};
+
+struct smd_half_channel {
+ unsigned state;
+ unsigned char fDSR;
+ unsigned char fCTS;
+ unsigned char fCD;
+ unsigned char fRI;
+ unsigned char fHEAD;
+ unsigned char fTAIL;
+ unsigned char fSTATE;
+ unsigned char fUNUSED;
+ unsigned tail;
+ unsigned head;
+} __attribute__(( aligned(4), packed ));
+
+/* Only used on SMD package v3 on msm7201a */
+struct smd_shared_v1 {
+ struct smd_half_channel ch0;
+ unsigned char data0[SMD_BUF_SIZE];
+ struct smd_half_channel ch1;
+ unsigned char data1[SMD_BUF_SIZE];
+};
+
+/* Used on SMD package v4 */
+struct smd_shared_v2 {
+ struct smd_half_channel ch0;
+ struct smd_half_channel ch1;
+};
+
+struct smd_channel {
+ volatile struct smd_half_channel *send;
+ volatile struct smd_half_channel *recv;
+ unsigned char *send_data;
+ unsigned char *recv_data;
+
+ unsigned fifo_mask;
+ unsigned fifo_size;
+ unsigned current_packet;
+ unsigned n;
+
+ struct list_head ch_list;
+
+ void *priv;
+ void (*notify)(void *priv, unsigned flags);
+
+ int (*read)(struct smd_channel *ch, void *data, int len);
+ int (*write)(struct smd_channel *ch, const void *data, int len);
+ int (*read_avail)(struct smd_channel *ch);
+ int (*write_avail)(struct smd_channel *ch);
+
+ void (*update_state)(struct smd_channel *ch);
+ unsigned last_state;
+ void (*notify_other_cpu)(void);
+ unsigned type;
+
+ char name[32];
+ struct platform_device pdev;
+};
+
+#define SMD_TYPE_MASK 0x0FF
+#define SMD_TYPE_APPS_MODEM 0x000
+#define SMD_TYPE_APPS_DSP 0x001
+#define SMD_TYPE_MODEM_DSP 0x002
+
+#define SMD_KIND_MASK 0xF00
+#define SMD_KIND_UNKNOWN 0x000
+#define SMD_KIND_STREAM 0x100
+#define SMD_KIND_PACKET 0x200
+
+extern struct list_head smd_ch_closed_list;
+extern struct list_head smd_ch_list_modem;
+extern struct list_head smd_ch_list_dsp;
+
+extern spinlock_t smd_lock;
+extern spinlock_t smem_lock;
+
+void *smem_find(unsigned id, unsigned size);
+void *smem_item(unsigned id, unsigned *size);
+uint32_t raw_smsm_get_state(enum smsm_state_item item);
+
+extern void msm_init_last_radio_log(struct module *);
+
+#ifdef CONFIG_MSM_SMD_PKG3
+/*
+ * This allocator assumes an SMD Package v3 which only exists on
+ * MSM7x00 SoC's.
+ */
+static inline int _smd_alloc_channel(struct smd_channel *ch)
+{
+ struct smd_shared_v1 *shared1;
+
+ shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
+ if (!shared1) {
+ pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
+ return -1;
+ }
+ ch->send = &shared1->ch0;
+ ch->recv = &shared1->ch1;
+ ch->send_data = shared1->data0;
+ ch->recv_data = shared1->data1;
+ ch->fifo_size = SMD_BUF_SIZE;
+ return 0;
+}
+#else
+/*
+ * This allocator assumes an SMD Package v4, the most common
+ * and the default.
+ */
+static inline int _smd_alloc_channel(struct smd_channel *ch)
+{
+ struct smd_shared_v2 *shared2;
+ void *buffer;
+ unsigned buffer_sz;
+
+ shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n, sizeof(*shared2));
+ buffer = smem_item(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
+
+ if (!buffer)
+ return -1;
+
+ /* buffer must be a power-of-two size */
+ if (buffer_sz & (buffer_sz - 1))
+ return -1;
+
+ buffer_sz /= 2;
+ ch->send = &shared2->ch0;
+ ch->recv = &shared2->ch1;
+ ch->send_data = buffer;
+ ch->recv_data = buffer + buffer_sz;
+ ch->fifo_size = buffer_sz;
+ return 0;
+}
+#endif /* CONFIG_MSM_SMD_PKG3 */
+
+#if defined(CONFIG_ARCH_MSM7X30)
+static inline void msm_a2m_int(uint32_t irq)
+{
+ writel(1 << irq, MSM_GCC_BASE + 0x8);
+}
+#else
+static inline void msm_a2m_int(uint32_t irq)
+{
+ writel(1, MSM_CSR_BASE + 0x400 + (irq * 4));
+}
+#endif /* CONFIG_ARCH_MSM7X30 */
+
+
+#endif
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 4855b8ca5101..dec5ca622d7d 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -25,7 +25,9 @@
#include <asm/mach/time.h>
#include <mach/msm_iomap.h>
+#ifndef MSM_DGT_BASE
#define MSM_DGT_BASE (MSM_GPT_BASE + 0x10)
+#endif
#define MSM_DGT_SHIFT (5)
#define TIMER_MATCH_VAL 0x0000
diff --git a/arch/arm/mach-msm/vreg.c b/arch/arm/mach-msm/vreg.c
index fcb0b9f25684..a9103bc6615f 100644
--- a/arch/arm/mach-msm/vreg.c
+++ b/arch/arm/mach-msm/vreg.c
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/vreg.c
*
* Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -18,6 +19,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/debugfs.h>
+#include <linux/string.h>
#include <mach/vreg.h>
#include "proc_comm.h"
@@ -25,42 +27,62 @@
struct vreg {
const char *name;
unsigned id;
+ int status;
+ unsigned refcnt;
};
-#define VREG(_name, _id) { .name = _name, .id = _id, }
+#define VREG(_name, _id, _status, _refcnt) \
+ { .name = _name, .id = _id, .status = _status, .refcnt = _refcnt }
static struct vreg vregs[] = {
- VREG("msma", 0),
- VREG("msmp", 1),
- VREG("msme1", 2),
- VREG("msmc1", 3),
- VREG("msmc2", 4),
- VREG("gp3", 5),
- VREG("msme2", 6),
- VREG("gp4", 7),
- VREG("gp1", 8),
- VREG("tcxo", 9),
- VREG("pa", 10),
- VREG("rftx", 11),
- VREG("rfrx1", 12),
- VREG("rfrx2", 13),
- VREG("synt", 14),
- VREG("wlan", 15),
- VREG("usb", 16),
- VREG("boost", 17),
- VREG("mmc", 18),
- VREG("ruim", 19),
- VREG("msmc0", 20),
- VREG("gp2", 21),
- VREG("gp5", 22),
- VREG("gp6", 23),
- VREG("rf", 24),
- VREG("rf_vco", 26),
- VREG("mpll", 27),
- VREG("s2", 28),
- VREG("s3", 29),
- VREG("rfubm", 30),
- VREG("ncp", 31),
+ VREG("msma", 0, 0, 0),
+ VREG("msmp", 1, 0, 0),
+ VREG("msme1", 2, 0, 0),
+ VREG("msmc1", 3, 0, 0),
+ VREG("msmc2", 4, 0, 0),
+ VREG("gp3", 5, 0, 0),
+ VREG("msme2", 6, 0, 0),
+ VREG("gp4", 7, 0, 0),
+ VREG("gp1", 8, 0, 0),
+ VREG("tcxo", 9, 0, 0),
+ VREG("pa", 10, 0, 0),
+ VREG("rftx", 11, 0, 0),
+ VREG("rfrx1", 12, 0, 0),
+ VREG("rfrx2", 13, 0, 0),
+ VREG("synt", 14, 0, 0),
+ VREG("wlan", 15, 0, 0),
+ VREG("usb", 16, 0, 0),
+ VREG("boost", 17, 0, 0),
+ VREG("mmc", 18, 0, 0),
+ VREG("ruim", 19, 0, 0),
+ VREG("msmc0", 20, 0, 0),
+ VREG("gp2", 21, 0, 0),
+ VREG("gp5", 22, 0, 0),
+ VREG("gp6", 23, 0, 0),
+ VREG("rf", 24, 0, 0),
+ VREG("rf_vco", 26, 0, 0),
+ VREG("mpll", 27, 0, 0),
+ VREG("s2", 28, 0, 0),
+ VREG("s3", 29, 0, 0),
+ VREG("rfubm", 30, 0, 0),
+ VREG("ncp", 31, 0, 0),
+ VREG("gp7", 32, 0, 0),
+ VREG("gp8", 33, 0, 0),
+ VREG("gp9", 34, 0, 0),
+ VREG("gp10", 35, 0, 0),
+ VREG("gp11", 36, 0, 0),
+ VREG("gp12", 37, 0, 0),
+ VREG("gp13", 38, 0, 0),
+ VREG("gp14", 39, 0, 0),
+ VREG("gp15", 40, 0, 0),
+ VREG("gp16", 41, 0, 0),
+ VREG("gp17", 42, 0, 0),
+ VREG("s4", 43, 0, 0),
+ VREG("usb2", 44, 0, 0),
+ VREG("wlan2", 45, 0, 0),
+ VREG("xo_out", 46, 0, 0),
+ VREG("lvsw0", 47, 0, 0),
+ VREG("lvsw1", 48, 0, 0),
};
struct vreg *vreg_get(struct device *dev, const char *id)
@@ -70,7 +92,7 @@ struct vreg *vreg_get(struct device *dev, const char *id)
if (!strcmp(vregs[n].name, id))
return vregs + n;
}
- return 0;
+ return ERR_PTR(-ENOENT);
}
void vreg_put(struct vreg *vreg)
@@ -81,20 +103,39 @@ int vreg_enable(struct vreg *vreg)
{
unsigned id = vreg->id;
unsigned enable = 1;
- return msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if (vreg->refcnt == 0)
+ vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if ((vreg->refcnt < UINT_MAX) && (!vreg->status))
+ vreg->refcnt++;
+
+ return vreg->status;
}
-void vreg_disable(struct vreg *vreg)
+int vreg_disable(struct vreg *vreg)
{
unsigned id = vreg->id;
unsigned enable = 0;
- msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if (!vreg->refcnt)
+ return 0;
+
+ if (vreg->refcnt == 1)
+ vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if (!vreg->status)
+ vreg->refcnt--;
+
+ return vreg->status;
}
int vreg_set_level(struct vreg *vreg, unsigned mv)
{
unsigned id = vreg->id;
- return msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
+
+ vreg->status = msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
+ return vreg->status;
}
#if defined(CONFIG_DEBUG_FS)
@@ -118,24 +159,59 @@ static int vreg_debug_set(void *data, u64 val)
static int vreg_debug_get(void *data, u64 *val)
{
- return -ENOSYS;
+ struct vreg *vreg = data;
+
+ if (!vreg->status)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+}
+
+static int vreg_debug_count_set(void *data, u64 val)
+{
+ struct vreg *vreg = data;
+ if (val > UINT_MAX)
+ val = UINT_MAX;
+ vreg->refcnt = val;
+ return 0;
+}
+
+static int vreg_debug_count_get(void *data, u64 *val)
+{
+ struct vreg *vreg = data;
+
+ *val = vreg->refcnt;
+
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vreg_fops, vreg_debug_get, vreg_debug_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(vreg_count_fops, vreg_debug_count_get,
+ vreg_debug_count_set, "%llu\n");
static int __init vreg_debug_init(void)
{
struct dentry *dent;
int n;
+ char name[32];
+ const char *refcnt_name = "_refcnt";
dent = debugfs_create_dir("vreg", 0);
if (IS_ERR(dent))
return 0;
- for (n = 0; n < ARRAY_SIZE(vregs); n++)
+ for (n = 0; n < ARRAY_SIZE(vregs); n++) {
(void) debugfs_create_file(vregs[n].name, 0644,
dent, vregs + n, &vreg_fops);
+ strlcpy(name, vregs[n].name, sizeof(name));
+ strlcat(name, refcnt_name, sizeof(name));
+ (void) debugfs_create_file(name, 0644,
+ dent, vregs + n, &vreg_count_fops);
+ }
+
return 0;
}
diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
index b91e412f7b3e..a0aeb8a4adc1 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-mx2/devices.c
@@ -109,12 +109,7 @@ DEFINE_IMX_GPT_DEVICE(4, MX27_GPT5_BASE_ADDR, MX27_INT_GPT5);
DEFINE_IMX_GPT_DEVICE(5, MX27_GPT6_BASE_ADDR, MX27_INT_GPT6);
#endif
-/*
- * Watchdog:
- * - i.MX1
- * - i.MX21
- * - i.MX27
- */
+/* Watchdog: i.MX1 has seperate driver, i.MX21 and i.MX27 are equal */
static struct resource mxc_wdt_resources[] = {
{
.start = MX2x_WDOG_BASE_ADDR,
@@ -124,7 +119,7 @@ static struct resource mxc_wdt_resources[] = {
};
struct platform_device mxc_wdt = {
- .name = "mxc_wdt",
+ .name = "imx2-wdt",
.id = 0,
.num_resources = ARRAY_SIZE(mxc_wdt_resources),
.resource = mxc_wdt_resources,
@@ -483,8 +478,8 @@ int __init mxc_register_gpios(void)
#ifdef CONFIG_MACH_MX21
static struct resource mx21_usbhc_resources[] = {
{
- .start = MX21_BASE_ADDR,
- .end = MX21_BASE_ADDR + 0x1FFF,
+ .start = MX21_USBOTG_BASE_ADDR,
+ .end = MX21_USBOTG_BASE_ADDR + SZ_8K - 1,
.flags = IORESOURCE_MEM,
},
{
diff --git a/arch/arm/mach-mx2/mach-pca100.c b/arch/arm/mach-mx2/mach-pca100.c
index 778fff230918..a87422ed4ff5 100644
--- a/arch/arm/mach-mx2/mach-pca100.c
+++ b/arch/arm/mach-mx2/mach-pca100.c
@@ -145,6 +145,7 @@ static struct mxc_nand_platform_data pca100_nand_board_info = {
static struct platform_device *platform_devices[] __initdata = {
&mxc_w1_master_device,
&mxc_fec_device,
+ &mxc_wdt,
};
static struct imxi2c_platform_data pca100_i2c_1_data = {
diff --git a/arch/arm/mach-mx2/mach-pcm038.c b/arch/arm/mach-mx2/mach-pcm038.c
index 035fbe046ec0..36c89431679a 100644
--- a/arch/arm/mach-mx2/mach-pcm038.c
+++ b/arch/arm/mach-mx2/mach-pcm038.c
@@ -182,6 +182,7 @@ static struct platform_device *platform_devices[] __initdata = {
&mxc_w1_master_device,
&mxc_fec_device,
&pcm038_sram_mtd_device,
+ &mxc_wdt,
};
/* On pcm038 there's a sram attached to CS1, we enable the chipselect here and
diff --git a/arch/arm/mach-mx2/pcm970-baseboard.c b/arch/arm/mach-mx2/pcm970-baseboard.c
index 4aafd5b8b85b..f490a406d57e 100644
--- a/arch/arm/mach-mx2/pcm970-baseboard.c
+++ b/arch/arm/mach-mx2/pcm970-baseboard.c
@@ -201,9 +201,9 @@ static struct resource pcm970_sja1000_resources[] = {
};
struct sja1000_platform_data pcm970_sja1000_platform_data = {
- .clock = 16000000 / 2,
- .ocr = 0x40 | 0x18,
- .cdr = 0x40,
+ .osc_freq = 16000000,
+ .ocr = OCR_TX1_PULLDOWN | OCR_TX0_PUSHPULL,
+ .cdr = CDR_CBP,
};
static struct platform_device pcm970_sja1000 = {
diff --git a/arch/arm/mach-mx25/devices.c b/arch/arm/mach-mx25/devices.c
index 3f4b8a0b5fac..3a405fa400eb 100644
--- a/arch/arm/mach-mx25/devices.c
+++ b/arch/arm/mach-mx25/devices.c
@@ -500,3 +500,18 @@ struct platform_device mx25_fb_device = {
.coherent_dma_mask = 0xFFFFFFFF,
},
};
+
+static struct resource mxc_wdt_resources[] = {
+ {
+ .start = MX25_WDOG_BASE_ADDR,
+ .end = MX25_WDOG_BASE_ADDR + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device mxc_wdt = {
+ .name = "imx2-wdt",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mxc_wdt_resources),
+ .resource = mxc_wdt_resources,
+};
diff --git a/arch/arm/mach-mx25/devices.h b/arch/arm/mach-mx25/devices.h
index 39560e13bc0d..cee12c0a0be6 100644
--- a/arch/arm/mach-mx25/devices.h
+++ b/arch/arm/mach-mx25/devices.h
@@ -21,3 +21,4 @@ extern struct platform_device mx25_fec_device;
extern struct platform_device mxc_nand_device;
extern struct platform_device mx25_rtc_device;
extern struct platform_device mx25_fb_device;
+extern struct platform_device mxc_wdt;
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index 170f68e46dd5..344753fdf25e 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -82,6 +82,7 @@ config MACH_MX31MOBOARD
config MACH_MX31LILLY
bool "Support MX31 LILLY-1131 platforms (INCO startec)"
select ARCH_MX31
+ select MXC_ULPI if USB_ULPI
help
Include support for mx31 based LILLY1131 modules. This includes
specific configurations for the board and its peripherals.
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index f8911154a9fa..db7acd6e9101 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -582,12 +582,50 @@ static struct resource imx_wdt_resources[] = {
};
struct platform_device imx_wdt_device0 = {
- .name = "imx-wdt",
+ .name = "imx2-wdt",
.id = 0,
.num_resources = ARRAY_SIZE(imx_wdt_resources),
.resource = imx_wdt_resources,
};
+static struct resource imx_rtc_resources[] = {
+ {
+ .start = MX31_RTC_BASE_ADDR,
+ .end = MX31_RTC_BASE_ADDR + 0x3fff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MX31_INT_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device imx_rtc_device0 = {
+ .name = "mxc_rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(imx_rtc_resources),
+ .resource = imx_rtc_resources,
+};
+
+static struct resource imx_kpp_resources[] = {
+ {
+ .start = MX3x_KPP_BASE_ADDR,
+ .end = MX3x_KPP_BASE_ADDR + 0xf,
+ .flags = IORESOURCE_MEM
+ }, {
+ .start = MX3x_INT_KPP,
+ .end = MX3x_INT_KPP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device imx_kpp_device = {
+ .name = "imx-keypad",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(imx_kpp_resources),
+ .resource = imx_kpp_resources,
+};
+
static int __init mx3_devices_init(void)
{
if (cpu_is_mx31()) {
diff --git a/arch/arm/mach-mx3/devices.h b/arch/arm/mach-mx3/devices.h
index 4f77eb501274..2c3c8646a29e 100644
--- a/arch/arm/mach-mx3/devices.h
+++ b/arch/arm/mach-mx3/devices.h
@@ -27,3 +27,5 @@ extern struct platform_device imx_ssi_device0;
extern struct platform_device imx_ssi_device1;
extern struct platform_device imx_ssi_device1;
extern struct platform_device imx_wdt_device0;
+extern struct platform_device imx_rtc_device0;
+extern struct platform_device imx_kpp_device;
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index f54af1e29ca4..58e57291b79d 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -16,6 +16,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/delay.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/clk.h>
@@ -26,6 +27,8 @@
#include <linux/mfd/mc13783.h>
#include <linux/spi/spi.h>
#include <linux/regulator/machine.h>
+#include <linux/fsl_devices.h>
+#include <linux/input/matrix_keypad.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -65,6 +68,50 @@ static int mx31_3ds_pins[] = {
MX31_PIN_CSPI2_SS2__SS2, /*CS for MC13783 */
/* MC13783 IRQ */
IOMUX_MODE(MX31_PIN_GPIO1_3, IOMUX_CONFIG_GPIO),
+ /* USB OTG reset */
+ IOMUX_MODE(MX31_PIN_USB_PWR, IOMUX_CONFIG_GPIO),
+ /* USB OTG */
+ MX31_PIN_USBOTG_DATA0__USBOTG_DATA0,
+ MX31_PIN_USBOTG_DATA1__USBOTG_DATA1,
+ MX31_PIN_USBOTG_DATA2__USBOTG_DATA2,
+ MX31_PIN_USBOTG_DATA3__USBOTG_DATA3,
+ MX31_PIN_USBOTG_DATA4__USBOTG_DATA4,
+ MX31_PIN_USBOTG_DATA5__USBOTG_DATA5,
+ MX31_PIN_USBOTG_DATA6__USBOTG_DATA6,
+ MX31_PIN_USBOTG_DATA7__USBOTG_DATA7,
+ MX31_PIN_USBOTG_CLK__USBOTG_CLK,
+ MX31_PIN_USBOTG_DIR__USBOTG_DIR,
+ MX31_PIN_USBOTG_NXT__USBOTG_NXT,
+ MX31_PIN_USBOTG_STP__USBOTG_STP,
+ /*Keyboard*/
+ MX31_PIN_KEY_ROW0_KEY_ROW0,
+ MX31_PIN_KEY_ROW1_KEY_ROW1,
+ MX31_PIN_KEY_ROW2_KEY_ROW2,
+ MX31_PIN_KEY_COL0_KEY_COL0,
+ MX31_PIN_KEY_COL1_KEY_COL1,
+ MX31_PIN_KEY_COL2_KEY_COL2,
+ MX31_PIN_KEY_COL3_KEY_COL3,
+};
+
+/*
+ * Matrix keyboard
+ */
+
+static const uint32_t mx31_3ds_keymap[] = {
+ KEY(0, 0, KEY_UP),
+ KEY(0, 1, KEY_DOWN),
+ KEY(1, 0, KEY_RIGHT),
+ KEY(1, 1, KEY_LEFT),
+ KEY(1, 2, KEY_ENTER),
+ KEY(2, 0, KEY_F6),
+ KEY(2, 1, KEY_F8),
+ KEY(2, 2, KEY_F9),
+ KEY(2, 3, KEY_F10),
+};
+
+static struct matrix_keymap_data mx31_3ds_keymap_data = {
+ .keymap = mx31_3ds_keymap,
+ .keymap_size = ARRAY_SIZE(mx31_3ds_keymap),
};
/* Regulators */
@@ -126,6 +173,41 @@ static struct mxc_nand_platform_data imx31_3ds_nand_flash_pdata = {
#endif
};
+/*
+ * USB OTG
+ */
+
+#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
+ PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
+
+#define USBOTG_RST_B IOMUX_TO_GPIO(MX31_PIN_USB_PWR)
+
+static void mx31_3ds_usbotg_init(void)
+{
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG);
+
+ gpio_request(USBOTG_RST_B, "otgusb-reset");
+ gpio_direction_output(USBOTG_RST_B, 0);
+ mdelay(1);
+ gpio_set_value(USBOTG_RST_B, 1);
+}
+
+static struct fsl_usb2_platform_data usbotg_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_ULPI,
+};
+
static struct imxuart_platform_data uart_pdata = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -315,6 +397,11 @@ static void __init mxc_board_init(void)
spi_register_board_info(mx31_3ds_spi_devs,
ARRAY_SIZE(mx31_3ds_spi_devs));
+ mxc_register_device(&imx_kpp_device, &mx31_3ds_keymap_data);
+
+ mx31_3ds_usbotg_init();
+ mxc_register_device(&mxc_otg_udc_device, &usbotg_pdata);
+
if (!mx31_3ds_init_expio())
platform_device_register(&smsc911x_device);
}
diff --git a/arch/arm/mach-mx3/mach-mx31lilly.c b/arch/arm/mach-mx3/mach-mx31lilly.c
index 80847b04c063..d3d5877c750e 100644
--- a/arch/arm/mach-mx3/mach-mx31lilly.c
+++ b/arch/arm/mach-mx3/mach-mx31lilly.c
@@ -27,12 +27,15 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/smsc911x.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/mfd/mc13783.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -44,6 +47,8 @@
#include <mach/iomux-mx3.h>
#include <mach/board-mx31lilly.h>
#include <mach/spi.h>
+#include <mach/mxc_ehci.h>
+#include <mach/ulpi.h>
#include "devices.h"
@@ -108,6 +113,137 @@ static struct platform_device physmap_flash_device = {
.num_resources = 1,
};
+/* USB */
+
+#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
+ PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
+
+static int usbotg_init(struct platform_device *pdev)
+{
+ unsigned int pins[] = {
+ MX31_PIN_USBOTG_DATA0__USBOTG_DATA0,
+ MX31_PIN_USBOTG_DATA1__USBOTG_DATA1,
+ MX31_PIN_USBOTG_DATA2__USBOTG_DATA2,
+ MX31_PIN_USBOTG_DATA3__USBOTG_DATA3,
+ MX31_PIN_USBOTG_DATA4__USBOTG_DATA4,
+ MX31_PIN_USBOTG_DATA5__USBOTG_DATA5,
+ MX31_PIN_USBOTG_DATA6__USBOTG_DATA6,
+ MX31_PIN_USBOTG_DATA7__USBOTG_DATA7,
+ MX31_PIN_USBOTG_CLK__USBOTG_CLK,
+ MX31_PIN_USBOTG_DIR__USBOTG_DIR,
+ MX31_PIN_USBOTG_NXT__USBOTG_NXT,
+ MX31_PIN_USBOTG_STP__USBOTG_STP,
+ };
+
+ mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB OTG");
+
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG);
+
+ mxc_iomux_set_gpr(MUX_PGP_USB_4WIRE, true);
+ mxc_iomux_set_gpr(MUX_PGP_USB_COMMON, true);
+
+ /* chip select */
+ mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_DTR_DCE2, IOMUX_CONFIG_GPIO),
+ "USBOTG_CS");
+ gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE2), "USBH1 CS");
+ gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE2), 0);
+
+ return 0;
+}
+
+static int usbh1_init(struct platform_device *pdev)
+{
+ int pins[] = {
+ MX31_PIN_CSPI1_MOSI__USBH1_RXDM,
+ MX31_PIN_CSPI1_MISO__USBH1_RXDP,
+ MX31_PIN_CSPI1_SS0__USBH1_TXDM,
+ MX31_PIN_CSPI1_SS1__USBH1_TXDP,
+ MX31_PIN_CSPI1_SS2__USBH1_RCV,
+ MX31_PIN_CSPI1_SCLK__USBH1_OEB,
+ MX31_PIN_CSPI1_SPI_RDY__USBH1_FS,
+ };
+
+ mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB H1");
+
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_MOSI, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_MISO, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SS0, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SS1, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SS2, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SCLK, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_CSPI1_SPI_RDY, USB_PAD_CFG);
+
+ mxc_iomux_set_gpr(MUX_PGP_USB_SUSPEND, true);
+
+ return 0;
+}
+
+static int usbh2_init(struct platform_device *pdev)
+{
+ int pins[] = {
+ MX31_PIN_USBH2_DATA0__USBH2_DATA0,
+ MX31_PIN_USBH2_DATA1__USBH2_DATA1,
+ MX31_PIN_USBH2_CLK__USBH2_CLK,
+ MX31_PIN_USBH2_DIR__USBH2_DIR,
+ MX31_PIN_USBH2_NXT__USBH2_NXT,
+ MX31_PIN_USBH2_STP__USBH2_STP,
+ };
+
+ mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB H2");
+
+ mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG);
+ mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG);
+
+ mxc_iomux_set_gpr(MUX_PGP_UH2, true);
+
+ /* chip select */
+ mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_DTR_DCE1, IOMUX_CONFIG_GPIO),
+ "USBH2_CS");
+ gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), "USBH2 CS");
+ gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), 0);
+
+ return 0;
+}
+
+static struct mxc_usbh_platform_data usbotg_pdata = {
+ .init = usbotg_init,
+ .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
+ .flags = MXC_EHCI_POWER_PINS_ENABLED,
+};
+
+static struct mxc_usbh_platform_data usbh1_pdata = {
+ .init = usbh1_init,
+ .portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL,
+ .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_SINGLE_UNI,
+};
+
+static struct mxc_usbh_platform_data usbh2_pdata = {
+ .init = usbh2_init,
+ .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
+ .flags = MXC_EHCI_POWER_PINS_ENABLED,
+};
+
static struct platform_device *devices[] __initdata = {
&smsc91x_device,
&physmap_flash_device,
@@ -183,6 +319,15 @@ static void __init mx31lilly_board_init(void)
spi_register_board_info(&mc13783_dev, 1);
platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ /* USB */
+ usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+ usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+
+ mxc_register_device(&mxc_usbh1, &usbh1_pdata);
+ mxc_register_device(&mxc_usbh2, &usbh2_pdata);
}
static void __init mx31lilly_timer_init(void)
diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c
index fccb9207b78d..62b5e40165df 100644
--- a/arch/arm/mach-mx3/mach-mx31moboard.c
+++ b/arch/arm/mach-mx3/mach-mx31moboard.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/fsl_devices.h>
#include <linux/gfp.h>
#include <linux/gpio.h>
#include <linux/init.h>
@@ -221,11 +220,54 @@ static struct mc13783_regulator_init_data moboard_regulators[] = {
},
};
+static struct mc13783_led_platform_data moboard_led[] = {
+ {
+ .id = MC13783_LED_R1,
+ .name = "coreboard-led-4:red",
+ .max_current = 2,
+ },
+ {
+ .id = MC13783_LED_G1,
+ .name = "coreboard-led-4:green",
+ .max_current = 2,
+ },
+ {
+ .id = MC13783_LED_B1,
+ .name = "coreboard-led-4:blue",
+ .max_current = 2,
+ },
+ {
+ .id = MC13783_LED_R2,
+ .name = "coreboard-led-5:red",
+ .max_current = 3,
+ },
+ {
+ .id = MC13783_LED_G2,
+ .name = "coreboard-led-5:green",
+ .max_current = 3,
+ },
+ {
+ .id = MC13783_LED_B2,
+ .name = "coreboard-led-5:blue",
+ .max_current = 3,
+ },
+};
+
+static struct mc13783_leds_platform_data moboard_leds = {
+ .num_leds = ARRAY_SIZE(moboard_led),
+ .led = moboard_led,
+ .flags = MC13783_LED_SLEWLIMTC,
+ .abmode = MC13783_LED_AB_DISABLED,
+ .tc1_period = MC13783_LED_PERIOD_10MS,
+ .tc2_period = MC13783_LED_PERIOD_10MS,
+};
+
static struct mc13783_platform_data moboard_pmic = {
.regulators = moboard_regulators,
.num_regulators = ARRAY_SIZE(moboard_regulators),
+ .leds = &moboard_leds,
.flags = MC13783_USE_REGULATOR | MC13783_USE_RTC |
- MC13783_USE_ADC,
+ MC13783_USE_ADC | MC13783_USE_LED,
};
static struct spi_board_info moboard_spi_board_info[] __initdata = {
@@ -306,84 +348,56 @@ static struct imxmmc_platform_data sdhc1_pdata = {
* this pin is dedicated for all mx31moboard systems, so we do it here
*/
#define USB_RESET_B IOMUX_TO_GPIO(MX31_PIN_GPIO1_0)
-
-static void usb_xcvr_reset(void)
-{
- gpio_request(USB_RESET_B, "usb-reset");
- gpio_direction_output(USB_RESET_B, 0);
- mdelay(1);
- gpio_set_value(USB_RESET_B, 1);
-}
-
#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
- PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
+ PAD_CTL_ODE_CMOS)
#define OTG_EN_B IOMUX_TO_GPIO(MX31_PIN_USB_OC)
-
-static void moboard_usbotg_init(void)
-{
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG);
-
- gpio_request(OTG_EN_B, "usb-udc-en");
- gpio_direction_output(OTG_EN_B, 0);
-}
-
-static struct fsl_usb2_platform_data usb_pdata = {
- .operating_mode = FSL_USB2_DR_DEVICE,
- .phy_mode = FSL_USB2_PHY_ULPI,
-};
-
-#if defined(CONFIG_USB_ULPI)
-
#define USBH2_EN_B IOMUX_TO_GPIO(MX31_PIN_SCK6)
-static int moboard_usbh2_hw_init(struct platform_device *pdev)
+static void usb_xcvr_reset(void)
{
- int ret;
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG | PAD_CTL_100K_PU);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG | PAD_CTL_100K_PU);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG | PAD_CTL_100K_PU);
+ mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG | PAD_CTL_100K_PU);
mxc_iomux_set_gpr(MUX_PGP_UH2, true);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG | PAD_CTL_100K_PU);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG | PAD_CTL_100K_PU);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG | PAD_CTL_100K_PU);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG | PAD_CTL_100K_PU);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG | PAD_CTL_100K_PD);
+ mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG | PAD_CTL_100K_PD);
- mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG);
- mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG);
-
- ret = gpio_request(USBH2_EN_B, "usbh2-en");
- if (ret)
- return ret;
+ gpio_request(OTG_EN_B, "usb-udc-en");
+ gpio_direction_output(OTG_EN_B, 0);
+ gpio_request(USBH2_EN_B, "usbh2-en");
gpio_direction_output(USBH2_EN_B, 0);
- return 0;
+ gpio_request(USB_RESET_B, "usb-reset");
+ gpio_direction_output(USB_RESET_B, 0);
+ mdelay(1);
+ gpio_set_value(USB_RESET_B, 1);
+ mdelay(1);
}
-static int moboard_usbh2_hw_exit(struct platform_device *pdev)
-{
- gpio_free(USBH2_EN_B);
- return 0;
-}
+#if defined(CONFIG_USB_ULPI)
static struct mxc_usbh_platform_data usbh2_pdata = {
- .init = moboard_usbh2_hw_init,
- .exit = moboard_usbh2_hw_exit,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
.flags = MXC_EHCI_POWER_PINS_ENABLED,
};
@@ -508,8 +522,6 @@ static void __init mxc_board_init(void)
usb_xcvr_reset();
- moboard_usbotg_init();
- mxc_register_device(&mxc_otg_udc_device, &usb_pdata);
moboard_usbh2_init();
switch (mx31moboard_baseboard) {
@@ -522,7 +534,8 @@ static void __init mxc_board_init(void)
mx31moboard_marxbot_init();
break;
case MX31SMARTBOT:
- mx31moboard_smartbot_init();
+ case MX31EYEBOT:
+ mx31moboard_smartbot_init(mx31moboard_baseboard);
break;
default:
printk(KERN_ERR "Illegal mx31moboard_baseboard type %d\n",
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index 2df1ec55a97e..cce410662383 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -449,6 +449,7 @@ static int __init pcm037_camera_alloc_dma(const size_t buf_size)
static struct platform_device *devices[] __initdata = {
&pcm037_flash,
&pcm037_sram_device,
+ &imx_wdt_device0,
&pcm037_mt9t031,
&pcm037_mt9v022,
};
@@ -530,9 +531,9 @@ static struct resource pcm970_sja1000_resources[] = {
};
struct sja1000_platform_data pcm970_sja1000_platform_data = {
- .clock = 16000000 / 2,
- .ocr = 0x40 | 0x18,
- .cdr = 0x40,
+ .osc_freq = 16000000,
+ .ocr = OCR_TX1_PULLDOWN | OCR_TX0_PUSHPULL,
+ .cdr = CDR_CBP,
};
static struct platform_device pcm970_sja1000 = {
diff --git a/arch/arm/mach-mx3/mach-pcm043.c b/arch/arm/mach-mx3/mach-pcm043.c
index 1bf1ec2eef5e..78d9185a9d4b 100644
--- a/arch/arm/mach-mx3/mach-pcm043.c
+++ b/arch/arm/mach-mx3/mach-pcm043.c
@@ -150,6 +150,7 @@ static struct i2c_board_info pcm043_i2c_devices[] = {
static struct platform_device *devices[] __initdata = {
&pcm043_flash,
&mxc_fec_device,
+ &imx_wdt_device0,
};
static struct pad_desc pcm043_pads[] = {
diff --git a/arch/arm/mach-mx3/mx31lite-db.c b/arch/arm/mach-mx3/mx31lite-db.c
index 093c595ca581..5f05bfbec380 100644
--- a/arch/arm/mach-mx3/mx31lite-db.c
+++ b/arch/arm/mach-mx3/mx31lite-db.c
@@ -206,5 +206,6 @@ void __init mx31lite_db_init(void)
mxc_register_device(&mxc_spi_device0, &spi0_pdata);
platform_device_register(&litekit_led_device);
mxc_register_device(&imx_wdt_device0, NULL);
+ mxc_register_device(&imx_rtc_device0, NULL);
}
diff --git a/arch/arm/mach-mx3/mx31moboard-devboard.c b/arch/arm/mach-mx3/mx31moboard-devboard.c
index 11b906ce7eae..582299cb2c08 100644
--- a/arch/arm/mach-mx3/mx31moboard-devboard.c
+++ b/arch/arm/mach-mx3/mx31moboard-devboard.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/fsl_devices.h>
#include <linux/usb/otg.h>
@@ -213,6 +214,12 @@ static int __init devboard_usbh1_init(void)
return mxc_register_device(&mxc_usbh1, &usbh1_pdata);
}
+
+static struct fsl_usb2_platform_data usb_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_ULPI,
+};
+
/*
* system init for baseboard usage. Will be called by mx31moboard init.
*/
@@ -229,5 +236,7 @@ void __init mx31moboard_devboard_init(void)
devboard_init_sel_gpios();
+ mxc_register_device(&mxc_otg_udc_device, &usb_pdata);
+
devboard_usbh1_init();
}
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index ffb105e14d88..4930f8c27e66 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <linux/fsl_devices.h>
#include <linux/usb/otg.h>
@@ -329,6 +330,11 @@ static int __init marxbot_usbh1_init(void)
return mxc_register_device(&mxc_usbh1, &usbh1_pdata);
}
+static struct fsl_usb2_platform_data usb_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_ULPI,
+};
+
/*
* system init for baseboard usage. Will be called by mx31moboard init.
*/
@@ -356,5 +362,7 @@ void __init mx31moboard_marxbot_init(void)
gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_LCS0));
gpio_export(IOMUX_TO_GPIO(MX31_PIN_LCS0), false);
+ mxc_register_device(&mxc_otg_udc_device, &usb_pdata);
+
marxbot_usbh1_init();
}
diff --git a/arch/arm/mach-mx3/mx31moboard-smartbot.c b/arch/arm/mach-mx3/mx31moboard-smartbot.c
index 52a69fc8b14f..293eea6d9d97 100644
--- a/arch/arm/mach-mx3/mx31moboard-smartbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-smartbot.c
@@ -23,11 +23,18 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <linux/fsl_devices.h>
+
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
#include <mach/iomux-mx3.h>
+#include <mach/board-mx31moboard.h>
+#include <mach/mxc_ehci.h>
+#include <mach/ulpi.h>
#include <media/soc_camera.h>
@@ -116,10 +123,33 @@ static int __init smartbot_cam_init(void)
return 0;
}
+static struct fsl_usb2_platform_data usb_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_ULPI,
+};
+
+#if defined(CONFIG_USB_ULPI)
+
+static struct mxc_usbh_platform_data otg_host_pdata = {
+ .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
+ .flags = MXC_EHCI_POWER_PINS_ENABLED,
+};
+
+static int __init smartbot_otg_host_init(void)
+{
+ otg_host_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
+ USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+
+ return mxc_register_device(&mxc_otg_host, &otg_host_pdata);
+}
+#else
+static inline int smartbot_otg_host_init(void) { return 0; }
+#endif
+
#define POWER_EN IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1)
#define DSPIC_RST_B IOMUX_TO_GPIO(MX31_PIN_DSR_DCE1)
#define TRSLAT_RST_B IOMUX_TO_GPIO(MX31_PIN_RI_DCE1)
-#define SEL3 IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)
+#define TRSLAT_SRC_CHOICE IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)
static void smartbot_resets_init(void)
{
@@ -138,15 +168,15 @@ static void smartbot_resets_init(void)
gpio_export(TRSLAT_RST_B, false);
}
- if (!gpio_request(SEL3, "sel3")) {
- gpio_direction_input(SEL3);
- gpio_export(SEL3, true);
+ if (!gpio_request(TRSLAT_SRC_CHOICE, "translator-src-choice")) {
+ gpio_direction_output(TRSLAT_SRC_CHOICE, 0);
+ gpio_export(TRSLAT_SRC_CHOICE, false);
}
}
/*
* system init for baseboard usage. Will be called by mx31moboard init.
*/
-void __init mx31moboard_smartbot_init(void)
+void __init mx31moboard_smartbot_init(int board)
{
printk(KERN_INFO "Initializing mx31smartbot peripherals\n");
@@ -155,6 +185,19 @@ void __init mx31moboard_smartbot_init(void)
mxc_register_device(&mxc_uart_device1, &uart_pdata);
+
+ switch (board) {
+ case MX31SMARTBOT:
+ mxc_register_device(&mxc_otg_udc_device, &usb_pdata);
+ break;
+ case MX31EYEBOT:
+ smartbot_otg_host_init();
+ break;
+ default:
+ printk(KERN_WARNING "Unknown board %d, USB OTG not initialized",
+ board);
+ }
+
smartbot_resets_init();
smartbot_cam_init();
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index ee67a71db80d..ed885f9d7b73 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -12,11 +12,16 @@
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/fsl_devices.h>
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
#include <mach/iomux-mx51.h>
+#include <mach/mxc_ehci.h>
#include <asm/irq.h>
#include <asm/setup.h>
@@ -26,6 +31,18 @@
#include "devices.h"
+#define BABBAGE_USB_HUB_RESET (0*32 + 7) /* GPIO_1_7 */
+#define BABBAGE_USBH1_STP (0*32 + 27) /* GPIO_1_27 */
+#define BABBAGE_PHY_RESET (1*32 +5) /* GPIO_2_5 */
+
+/* USB_CTRL_1 */
+#define MX51_USB_CTRL_1_OFFSET 0x10
+#define MX51_USB_CTRL_UH1_EXT_CLK_EN (1 << 25)
+
+#define MX51_USB_PLLDIV_12_MHZ 0x00
+#define MX51_USB_PLL_DIV_19_2_MHZ 0x01
+#define MX51_USB_PLL_DIV_24_MHZ 0x02
+
static struct platform_device *devices[] __initdata = {
&mxc_fec_device,
};
@@ -46,6 +63,22 @@ static struct pad_desc mx51babbage_pads[] = {
MX51_PAD_EIM_D26__UART3_TXD,
MX51_PAD_EIM_D27__UART3_RTS,
MX51_PAD_EIM_D24__UART3_CTS,
+
+ /* USB HOST1 */
+ MX51_PAD_USBH1_CLK__USBH1_CLK,
+ MX51_PAD_USBH1_DIR__USBH1_DIR,
+ MX51_PAD_USBH1_NXT__USBH1_NXT,
+ MX51_PAD_USBH1_DATA0__USBH1_DATA0,
+ MX51_PAD_USBH1_DATA1__USBH1_DATA1,
+ MX51_PAD_USBH1_DATA2__USBH1_DATA2,
+ MX51_PAD_USBH1_DATA3__USBH1_DATA3,
+ MX51_PAD_USBH1_DATA4__USBH1_DATA4,
+ MX51_PAD_USBH1_DATA5__USBH1_DATA5,
+ MX51_PAD_USBH1_DATA6__USBH1_DATA6,
+ MX51_PAD_USBH1_DATA7__USBH1_DATA7,
+
+ /* USB HUB reset line*/
+ MX51_PAD_GPIO_1_7__GPIO1_7,
};
/* Serial ports */
@@ -66,15 +99,149 @@ static inline void mxc_init_imx_uart(void)
}
#endif /* SERIAL_IMX */
+static int gpio_usbh1_active(void)
+{
+ struct pad_desc usbh1stp_gpio = MX51_PAD_USBH1_STP__GPIO_1_27;
+ struct pad_desc phyreset_gpio = MX51_PAD_EIM_D21__GPIO_2_5;
+ int ret;
+
+ /* Set USBH1_STP to GPIO and toggle it */
+ mxc_iomux_v3_setup_pad(&usbh1stp_gpio);
+ ret = gpio_request(BABBAGE_USBH1_STP, "usbh1_stp");
+
+ if (ret) {
+ pr_debug("failed to get MX51_PAD_USBH1_STP__GPIO_1_27: %d\n", ret);
+ return ret;
+ }
+ gpio_direction_output(BABBAGE_USBH1_STP, 0);
+ gpio_set_value(BABBAGE_USBH1_STP, 1);
+ msleep(100);
+ gpio_free(BABBAGE_USBH1_STP);
+
+ /* De-assert USB PHY RESETB */
+ mxc_iomux_v3_setup_pad(&phyreset_gpio);
+ ret = gpio_request(BABBAGE_PHY_RESET, "phy_reset");
+
+ if (ret) {
+ pr_debug("failed to get MX51_PAD_EIM_D21__GPIO_2_5: %d\n", ret);
+ return ret;
+ }
+ gpio_direction_output(BABBAGE_PHY_RESET, 1);
+ return 0;
+}
+
+static inline void babbage_usbhub_reset(void)
+{
+ int ret;
+
+ /* Bring USB hub out of reset */
+ ret = gpio_request(BABBAGE_USB_HUB_RESET, "GPIO1_7");
+ if (ret) {
+ printk(KERN_ERR"failed to get GPIO_USB_HUB_RESET: %d\n", ret);
+ return;
+ }
+ gpio_direction_output(BABBAGE_USB_HUB_RESET, 0);
+
+ /* USB HUB RESET - De-assert USB HUB RESET_N */
+ msleep(1);
+ gpio_set_value(BABBAGE_USB_HUB_RESET, 0);
+ msleep(1);
+ gpio_set_value(BABBAGE_USB_HUB_RESET, 1);
+}
+
+/* This function is board specific as the bit mask for the plldiv will also
+be different for other Freescale SoCs, thus a common bitmask is not
+possible and cannot get place in /plat-mxc/ehci.c.*/
+static int initialize_otg_port(struct platform_device *pdev)
+{
+ u32 v;
+ void __iomem *usb_base;
+ u32 usbother_base;
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+ usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
+
+ /* Set the PHY clock to 19.2MHz */
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
+ v &= ~MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK;
+ v |= MX51_USB_PLL_DIV_19_2_MHZ;
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
+ iounmap(usb_base);
+ return 0;
+}
+
+static int initialize_usbh1_port(struct platform_device *pdev)
+{
+ u32 v;
+ void __iomem *usb_base;
+ u32 usbother_base;
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+ usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
+
+ /* The clock for the USBH1 ULPI port will come externally from the PHY. */
+ v = __raw_readl(usbother_base + MX51_USB_CTRL_1_OFFSET);
+ __raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN, usbother_base + MX51_USB_CTRL_1_OFFSET);
+ iounmap(usb_base);
+ return 0;
+}
+
+static struct mxc_usbh_platform_data dr_utmi_config = {
+ .init = initialize_otg_port,
+ .portsc = MXC_EHCI_UTMI_16BIT,
+ .flags = MXC_EHCI_INTERNAL_PHY,
+};
+
+static struct fsl_usb2_platform_data usb_pdata = {
+ .operating_mode = FSL_USB2_DR_DEVICE,
+ .phy_mode = FSL_USB2_PHY_UTMI_WIDE,
+};
+
+static struct mxc_usbh_platform_data usbh1_config = {
+ .init = initialize_usbh1_port,
+ .portsc = MXC_EHCI_MODE_ULPI,
+ .flags = (MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_ITC_NO_THRESHOLD),
+};
+
+static int otg_mode_host;
+
+static int __init babbage_otg_mode(char *options)
+{
+ if (!strcmp(options, "host"))
+ otg_mode_host = 1;
+ else if (!strcmp(options, "device"))
+ otg_mode_host = 0;
+ else
+ pr_info("otg_mode neither \"host\" nor \"device\". "
+ "Defaulting to device\n");
+ return 0;
+}
+__setup("otg_mode=", babbage_otg_mode);
+
/*
* Board specific initialization.
*/
static void __init mxc_board_init(void)
{
+ struct pad_desc usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
+
mxc_iomux_v3_setup_multiple_pads(mx51babbage_pads,
ARRAY_SIZE(mx51babbage_pads));
mxc_init_imx_uart();
platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ if (otg_mode_host)
+ mxc_register_device(&mxc_usbdr_host_device, &dr_utmi_config);
+ else {
+ initialize_otg_port(NULL);
+ mxc_register_device(&mxc_usbdr_udc_device, &usb_pdata);
+ }
+
+ gpio_usbh1_active();
+ mxc_register_device(&mxc_usbh1_device, &usbh1_config);
+ /* setback USBH1_STP to be function */
+ mxc_iomux_v3_setup_pad(&usbh1stp);
+ babbage_usbhub_reset();
}
static void __init mx51_babbage_timer_init(void)
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index 1ee6ce4087b8..d9f612d3370e 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -37,6 +37,7 @@ static struct clk lp_apm_clk;
static struct clk periph_apm_clk;
static struct clk ahb_clk;
static struct clk ipg_clk;
+static struct clk usboh3_clk;
#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
@@ -570,6 +571,35 @@ static int _clk_uart_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
+static unsigned long clk_usboh3_get_rate(struct clk *clk)
+{
+ u32 reg, prediv, podf;
+ unsigned long parent_rate;
+
+ parent_rate = clk_get_rate(clk->parent);
+
+ reg = __raw_readl(MXC_CCM_CSCDR1);
+ prediv = ((reg & MXC_CCM_CSCDR1_USBOH3_CLK_PRED_MASK) >>
+ MXC_CCM_CSCDR1_USBOH3_CLK_PRED_OFFSET) + 1;
+ podf = ((reg & MXC_CCM_CSCDR1_USBOH3_CLK_PODF_MASK) >>
+ MXC_CCM_CSCDR1_USBOH3_CLK_PODF_OFFSET) + 1;
+
+ return parent_rate / (prediv * podf);
+}
+
+static int _clk_usboh3_set_parent(struct clk *clk, struct clk *parent)
+{
+ u32 reg, mux;
+
+ mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk, &pll3_sw_clk,
+ &lp_apm_clk);
+ reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USBOH3_CLK_SEL_MASK;
+ reg |= mux << MXC_CCM_CSCMR1_USBOH3_CLK_SEL_OFFSET;
+ __raw_writel(reg, MXC_CCM_CSCMR1);
+
+ return 0;
+}
+
static unsigned long get_high_reference_clock_rate(struct clk *clk)
{
return external_high_reference;
@@ -691,6 +721,12 @@ static struct clk uart_root_clk = {
.set_parent = _clk_uart_set_parent,
};
+static struct clk usboh3_clk = {
+ .parent = &pll2_sw_clk,
+ .get_rate = clk_usboh3_get_rate,
+ .set_parent = _clk_usboh3_set_parent,
+};
+
static struct clk ahb_max_clk = {
.parent = &ahb_clk,
.enable_reg = MXC_CCM_CCGR0,
@@ -779,6 +815,12 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("imx-uart.2", NULL, uart3_clk)
_REGISTER_CLOCK(NULL, "gpt", gpt_clk)
_REGISTER_CLOCK("fec.0", NULL, fec_clk)
+ _REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
+ _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", ahb_clk)
+ _REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
+ _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", ahb_clk)
+ _REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
+ _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
};
static void clk_tree_init(void)
@@ -819,6 +861,9 @@ int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
clk_enable(&cpu_clk);
clk_enable(&main_bus_clk);
+ /* set the usboh3_clk parent to pll2_sw_clk */
+ clk_set_parent(&usboh3_clk, &pll2_sw_clk);
+
/* System timer */
mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
MX51_MXC_INT_GPT);
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index d6fd3961ade9..7130449aacdc 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -1,5 +1,6 @@
/*
* Copyright 2009 Amit Kucheria <amit.kucheria@canonical.com>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -10,8 +11,11 @@
*/
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
+#include <mach/irqs.h>
static struct resource uart0[] = {
{
@@ -89,8 +93,109 @@ struct platform_device mxc_fec_device = {
.resource = mxc_fec_resources,
};
-/* Dummy definition to allow compiling in AVIC and TZIC simultaneously */
+static u64 usb_dma_mask = DMA_BIT_MASK(32);
+
+static struct resource usbotg_resources[] = {
+ {
+ .start = MX51_OTG_BASE_ADDR,
+ .end = MX51_OTG_BASE_ADDR + 0x1ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MX51_MXC_INT_USB_OTG,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* OTG gadget device */
+struct platform_device mxc_usbdr_udc_device = {
+ .name = "fsl-usb2-udc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(usbotg_resources),
+ .resource = usbotg_resources,
+ .dev = {
+ .dma_mask = &usb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+struct platform_device mxc_usbdr_host_device = {
+ .name = "mxc-ehci",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(usbotg_resources),
+ .resource = usbotg_resources,
+ .dev = {
+ .dma_mask = &usb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource usbh1_resources[] = {
+ {
+ .start = MX51_OTG_BASE_ADDR + 0x200,
+ .end = MX51_OTG_BASE_ADDR + 0x200 + 0x1ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MX51_MXC_INT_USB_H1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device mxc_usbh1_device = {
+ .name = "mxc-ehci",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(usbh1_resources),
+ .resource = usbh1_resources,
+ .dev = {
+ .dma_mask = &usb_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource mxc_wdt_resources[] = {
+ {
+ .start = MX51_WDOG_BASE_ADDR,
+ .end = MX51_WDOG_BASE_ADDR + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device mxc_wdt = {
+ .name = "imx2-wdt",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mxc_wdt_resources),
+ .resource = mxc_wdt_resources,
+};
+
+static struct mxc_gpio_port mxc_gpio_ports[] = {
+ {
+ .chip.label = "gpio-0",
+ .base = MX51_IO_ADDRESS(MX51_GPIO1_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO1_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START
+ },
+ {
+ .chip.label = "gpio-1",
+ .base = MX51_IO_ADDRESS(MX51_GPIO2_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO2_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 1
+ },
+ {
+ .chip.label = "gpio-2",
+ .base = MX51_IO_ADDRESS(MX51_GPIO3_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO3_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 2
+ },
+ {
+ .chip.label = "gpio-3",
+ .base = MX51_IO_ADDRESS(MX51_GPIO4_BASE_ADDR),
+ .irq = MX51_MXC_INT_GPIO4_LOW,
+ .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 3
+ },
+};
+
int __init mxc_register_gpios(void)
{
- return 0;
+ return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports));
}
diff --git a/arch/arm/mach-mx5/devices.h b/arch/arm/mach-mx5/devices.h
index f339ab8c19be..c879ae71cd5b 100644
--- a/arch/arm/mach-mx5/devices.h
+++ b/arch/arm/mach-mx5/devices.h
@@ -2,3 +2,7 @@ extern struct platform_device mxc_uart_device0;
extern struct platform_device mxc_uart_device1;
extern struct platform_device mxc_uart_device2;
extern struct platform_device mxc_fec_device;
+extern struct platform_device mxc_usbdr_host_device;
+extern struct platform_device mxc_usbh1_device;
+extern struct platform_device mxc_usbdr_udc_device;
+extern struct platform_device mxc_wdt;
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index 3c5e0f522e9c..71f3ea623974 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -6,6 +6,7 @@ config MACH_NOMADIK_8815NHK
bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
select NOMADIK_8815
select HAS_MTU
+ select NOMADIK_GPIO
endmenu
diff --git a/arch/arm/mach-nomadik/Makefile b/arch/arm/mach-nomadik/Makefile
index 36f67fb207d2..a6bbd1a7b4e7 100644
--- a/arch/arm/mach-nomadik/Makefile
+++ b/arch/arm/mach-nomadik/Makefile
@@ -7,7 +7,7 @@
# Object file lists.
-obj-y += clock.o gpio.o
+obj-y += clock.o
# Cpu revision
obj-$(CONFIG_NOMADIK_8815) += cpu-8815.o
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index ab3712c86d2b..841d459ad59d 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -32,7 +32,6 @@
#include <mach/setup.h>
#include <mach/nand.h>
#include <mach/fsmc.h>
-#include "clock.h"
/* Initial value for SRC control register: all timers use MXTAL/8 source */
#define SRC_CR_INIT_MASK 0x00007fff
@@ -202,11 +201,6 @@ static struct amba_device *amba_devs[] __initdata = {
&uart1_device,
};
-/* We have a fixed clock alone, by now */
-static struct clk nhk8815_clk_48 = {
- .rate = 48*1000*1000,
-};
-
static struct resource nhk8815_eth_resources[] = {
{
.name = "smc91x-regs",
@@ -276,10 +270,8 @@ static void __init nhk8815_platform_init(void)
platform_add_devices(nhk8815_platform_devices,
ARRAY_SIZE(nhk8815_platform_devices));
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
- nmdk_clk_create(&nhk8815_clk_48, amba_devs[i]->dev.init_name);
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
- }
}
MACHINE_START(NOMADIK, "NHK8815")
diff --git a/arch/arm/mach-nomadik/clock.c b/arch/arm/mach-nomadik/clock.c
index 9f92502a0083..2c471fc451d7 100644
--- a/arch/arm/mach-nomadik/clock.c
+++ b/arch/arm/mach-nomadik/clock.c
@@ -32,14 +32,37 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-/* Create a clock structure with the given name */
-int nmdk_clk_create(struct clk *clk, const char *dev_id)
-{
- struct clk_lookup *clkdev;
+/* We have a fixed clock alone, for now */
+static struct clk clk_48 = {
+ .rate = 48 * 1000 * 1000,
+};
+
+/*
+ * Catch-all default clock to satisfy drivers using the clk API. We don't
+ * model the actual hardware clocks yet.
+ */
+static struct clk clk_default;
- clkdev = clkdev_alloc(clk, NULL, dev_id);
- if (!clkdev)
- return -ENOMEM;
- clkdev_add(clkdev);
+#define CLK(_clk, dev) \
+ { \
+ .clk = _clk, \
+ .dev_id = dev, \
+ }
+
+static struct clk_lookup lookups[] = {
+ CLK(&clk_48, "uart0"),
+ CLK(&clk_48, "uart1"),
+ CLK(&clk_default, "gpio.0"),
+ CLK(&clk_default, "gpio.1"),
+ CLK(&clk_default, "gpio.2"),
+ CLK(&clk_default, "gpio.3"),
+ CLK(&clk_default, "rng"),
+};
+
+static int __init clk_init(void)
+{
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return 0;
}
+
+arch_initcall(clk_init);
diff --git a/arch/arm/mach-nomadik/clock.h b/arch/arm/mach-nomadik/clock.h
index 235faec7f627..5563985a2cc7 100644
--- a/arch/arm/mach-nomadik/clock.h
+++ b/arch/arm/mach-nomadik/clock.h
@@ -11,4 +11,3 @@
struct clk {
unsigned long rate;
};
-extern int nmdk_clk_create(struct clk *clk, const char *dev_id);
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index 9bf33b30a025..91c3c901b469 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <mach/hardware.h>
@@ -30,60 +31,66 @@
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
+#define __MEM_4K_RESOURCE(x) \
+ .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+
/* The 8815 has 4 GPIO blocks, let's register them immediately */
+
+#define GPIO_RESOURCE(block) \
+ { \
+ .start = NOMADIK_GPIO##block##_BASE, \
+ .end = NOMADIK_GPIO##block##_BASE + SZ_4K - 1, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_GPIO##block, \
+ .end = IRQ_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define GPIO_DEVICE(block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 2, \
+ .resource = &cpu8815_gpio_resources[block * 2], \
+ .dev = { \
+ .platform_data = &cpu8815_gpio[block], \
+ }, \
+ }
+
static struct nmk_gpio_platform_data cpu8815_gpio[] = {
{
.name = "GPIO-0-31",
.first_gpio = 0,
.first_irq = NOMADIK_GPIO_TO_IRQ(0),
- .parent_irq = IRQ_GPIO0,
}, {
.name = "GPIO-32-63",
.first_gpio = 32,
.first_irq = NOMADIK_GPIO_TO_IRQ(32),
- .parent_irq = IRQ_GPIO1,
}, {
.name = "GPIO-64-95",
.first_gpio = 64,
.first_irq = NOMADIK_GPIO_TO_IRQ(64),
- .parent_irq = IRQ_GPIO2,
}, {
.name = "GPIO-96-127", /* 124..127 not routed to pin */
.first_gpio = 96,
.first_irq = NOMADIK_GPIO_TO_IRQ(96),
- .parent_irq = IRQ_GPIO3,
}
};
-#define __MEM_4K_RESOURCE(x) \
- .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+static struct resource cpu8815_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+};
-static struct amba_device cpu8815_amba_gpio[] = {
- {
- .dev = {
- .init_name = "gpio0",
- .platform_data = cpu8815_gpio + 0,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO0_BASE),
- }, {
- .dev = {
- .init_name = "gpio1",
- .platform_data = cpu8815_gpio + 1,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO1_BASE),
- }, {
- .dev = {
- .init_name = "gpio2",
- .platform_data = cpu8815_gpio + 2,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO2_BASE),
- }, {
- .dev = {
- .init_name = "gpio3",
- .platform_data = cpu8815_gpio + 3,
- },
- __MEM_4K_RESOURCE(NOMADIK_GPIO3_BASE),
- },
+static struct platform_device cpu8815_platform_gpio[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
};
static struct amba_device cpu8815_amba_rng = {
@@ -93,11 +100,14 @@ static struct amba_device cpu8815_amba_rng = {
__MEM_4K_RESOURCE(NOMADIK_RNG_BASE),
};
+static struct platform_device *platform_devs[] __initdata = {
+ cpu8815_platform_gpio + 0,
+ cpu8815_platform_gpio + 1,
+ cpu8815_platform_gpio + 2,
+ cpu8815_platform_gpio + 3,
+};
+
static struct amba_device *amba_devs[] __initdata = {
- cpu8815_amba_gpio + 0,
- cpu8815_amba_gpio + 1,
- cpu8815_amba_gpio + 2,
- cpu8815_amba_gpio + 3,
&cpu8815_amba_rng
};
@@ -105,6 +115,7 @@ static int __init cpu8815_init(void)
{
int i;
+ platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
return 0;
diff --git a/arch/arm/mach-nomadik/include/mach/gpio.h b/arch/arm/mach-nomadik/include/mach/gpio.h
index 61577c9f9a7d..7a81a0420343 100644
--- a/arch/arm/mach-nomadik/include/mach/gpio.h
+++ b/arch/arm/mach-nomadik/include/mach/gpio.h
@@ -1,71 +1,6 @@
-/*
- * Structures and registers for GPIO access in the Nomadik SoC
- *
- * Copyright (C) 2008 STMicroelectronics
- * Author: Prafulla WADASKAR <prafulla.wadaskar@st.com>
- * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
#ifndef __ASM_ARCH_GPIO_H
#define __ASM_ARCH_GPIO_H
-#include <asm-generic/gpio.h>
-
-/*
- * These currently cause a function call to happen, they may be optimized
- * if needed by adding cpu-specific defines to identify blocks
- * (see mach-pxa/include/mach/gpio.h as an example using GPLR etc)
- */
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-/*
- * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
- * the "gpio" namespace for generic and cross-machine functions
- */
-
-/* Register in the logic block */
-#define NMK_GPIO_DAT 0x00
-#define NMK_GPIO_DATS 0x04
-#define NMK_GPIO_DATC 0x08
-#define NMK_GPIO_PDIS 0x0c
-#define NMK_GPIO_DIR 0x10
-#define NMK_GPIO_DIRS 0x14
-#define NMK_GPIO_DIRC 0x18
-#define NMK_GPIO_SLPC 0x1c
-#define NMK_GPIO_AFSLA 0x20
-#define NMK_GPIO_AFSLB 0x24
-
-#define NMK_GPIO_RIMSC 0x40
-#define NMK_GPIO_FIMSC 0x44
-#define NMK_GPIO_IS 0x48
-#define NMK_GPIO_IC 0x4c
-#define NMK_GPIO_RWIMSC 0x50
-#define NMK_GPIO_FWIMSC 0x54
-#define NMK_GPIO_WKS 0x58
-
-/* Alternate functions: function C is set in hw by setting both A and B */
-#define NMK_GPIO_ALT_GPIO 0
-#define NMK_GPIO_ALT_A 1
-#define NMK_GPIO_ALT_B 2
-#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
-
-extern int nmk_gpio_set_mode(int gpio, int gpio_mode);
-extern int nmk_gpio_get_mode(int gpio);
-
-/*
- * Platform data to register a block: only the initial gpio/irq number.
- */
-struct nmk_gpio_platform_data {
- char *name;
- int first_gpio;
- int first_irq;
- int parent_irq;
-};
+#include <plat/gpio.h>
#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 27f489747bbd..b18d7c28ab7a 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -152,6 +152,16 @@ config MACH_AMS_DELTA
Support for the Amstrad E3 (codename Delta) videophone. Say Y here
if you have such a device.
+config AMS_DELTA_FIQ
+ bool "Fast Interrupt Request (FIQ) support for the E3"
+ depends on MACH_AMS_DELTA
+ select FIQ
+ help
+ Provide a FIQ handler for the E3.
+ This allows for fast handling of interrupts generated
+ by the clock line of the E3 mailboard (or a PS/2 keyboard)
+ connected to the GPIO based external keyboard port.
+
config MACH_OMAP_GENERIC
bool "Generic OMAP board"
depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index b6a537c875b8..ea231c7a550a 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_MACH_OMAP_PALMZ71) += board-palmz71.o
obj-$(CONFIG_MACH_OMAP_PALMTT) += board-palmtt.o
obj-$(CONFIG_MACH_NOKIA770) += board-nokia770.o
obj-$(CONFIG_MACH_AMS_DELTA) += board-ams-delta.o
+obj-$(CONFIG_AMS_DELTA_FIQ) += ams-delta-fiq.o ams-delta-fiq-handler.o
obj-$(CONFIG_MACH_SX1) += board-sx1.o board-sx1-mmc.o
obj-$(CONFIG_MACH_HERALD) += board-htcherald.o
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
new file mode 100644
index 000000000000..927d5a181760
--- /dev/null
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -0,0 +1,278 @@
+/*
+ * linux/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+ *
+ * Based on linux/arch/arm/lib/floppydma.S
+ * Renamed and modified to work with 2.6 kernel by Matt Callow
+ * Copyright (C) 1995, 1996 Russell King
+ * Copyright (C) 2004 Pete Trapps
+ * Copyright (C) 2006 Matt Callow
+ * Copyright (C) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#include <plat/io.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+/*
+ * GPIO related definitions, copied from arch/arm/plat-omap/gpio.c.
+ * Unfortunately, those were not placed in a separate header file.
+ */
+#define OMAP1510_GPIO_BASE 0xFFFCE000
+#define OMAP1510_GPIO_DATA_INPUT 0x00
+#define OMAP1510_GPIO_DATA_OUTPUT 0x04
+#define OMAP1510_GPIO_DIR_CONTROL 0x08
+#define OMAP1510_GPIO_INT_CONTROL 0x0c
+#define OMAP1510_GPIO_INT_MASK 0x10
+#define OMAP1510_GPIO_INT_STATUS 0x14
+#define OMAP1510_GPIO_PIN_CONTROL 0x18
+
+/* GPIO register bitmasks */
+#define KEYBRD_DATA_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_DATA)
+#define KEYBRD_CLK_MASK (0x1 << AMS_DELTA_GPIO_PIN_KEYBRD_CLK)
+#define MODEM_IRQ_MASK (0x1 << AMS_DELTA_GPIO_PIN_MODEM_IRQ)
+#define HOOK_SWITCH_MASK (0x1 << AMS_DELTA_GPIO_PIN_HOOK_SWITCH)
+#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
+
+/* IRQ handler register bitmasks */
+#define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
+#define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1)
+
+/* Driver buffer byte offsets */
+#define BUF_MASK (FIQ_MASK * 4)
+#define BUF_STATE (FIQ_STATE * 4)
+#define BUF_KEYS_CNT (FIQ_KEYS_CNT * 4)
+#define BUF_TAIL_OFFSET (FIQ_TAIL_OFFSET * 4)
+#define BUF_HEAD_OFFSET (FIQ_HEAD_OFFSET * 4)
+#define BUF_BUF_LEN (FIQ_BUF_LEN * 4)
+#define BUF_KEY (FIQ_KEY * 4)
+#define BUF_MISSED_KEYS (FIQ_MISSED_KEYS * 4)
+#define BUF_BUFFER_START (FIQ_BUFFER_START * 4)
+#define BUF_GPIO_INT_MASK (FIQ_GPIO_INT_MASK * 4)
+#define BUF_KEYS_HICNT (FIQ_KEYS_HICNT * 4)
+#define BUF_IRQ_PEND (FIQ_IRQ_PEND * 4)
+#define BUF_SIR_CODE_L1 (FIQ_SIR_CODE_L1 * 4)
+#define BUF_SIR_CODE_L2 (IRQ_SIR_CODE_L2 * 4)
+#define BUF_CNT_INT_00 (FIQ_CNT_INT_00 * 4)
+#define BUF_CNT_INT_KEY (FIQ_CNT_INT_KEY * 4)
+#define BUF_CNT_INT_MDM (FIQ_CNT_INT_MDM * 4)
+#define BUF_CNT_INT_03 (FIQ_CNT_INT_03 * 4)
+#define BUF_CNT_INT_HSW (FIQ_CNT_INT_HSW * 4)
+#define BUF_CNT_INT_05 (FIQ_CNT_INT_05 * 4)
+#define BUF_CNT_INT_06 (FIQ_CNT_INT_06 * 4)
+#define BUF_CNT_INT_07 (FIQ_CNT_INT_07 * 4)
+#define BUF_CNT_INT_08 (FIQ_CNT_INT_08 * 4)
+#define BUF_CNT_INT_09 (FIQ_CNT_INT_09 * 4)
+#define BUF_CNT_INT_10 (FIQ_CNT_INT_10 * 4)
+#define BUF_CNT_INT_11 (FIQ_CNT_INT_11 * 4)
+#define BUF_CNT_INT_12 (FIQ_CNT_INT_12 * 4)
+#define BUF_CNT_INT_13 (FIQ_CNT_INT_13 * 4)
+#define BUF_CNT_INT_14 (FIQ_CNT_INT_14 * 4)
+#define BUF_CNT_INT_15 (FIQ_CNT_INT_15 * 4)
+#define BUF_CIRC_BUFF (FIQ_CIRC_BUFF * 4)
+
+
+/*
+ * Register useage
+ * r8 - temporary
+ * r9 - the driver buffer
+ * r10 - temporary
+ * r11 - interrupts mask
+ * r12 - base pointers
+ * r13 - interrupts status
+ */
+
+ .text
+
+ .global qwerty_fiqin_end
+
+ENTRY(qwerty_fiqin_start)
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @ FIQ intrrupt handler
+ ldr r12, omap_ih1_base @ set pointer to level1 handler
+
+ ldr r11, [r12, #IRQ_MIR_REG_OFFSET] @ fetch interrupts mask
+
+ ldr r13, [r12, #IRQ_ITR_REG_OFFSET] @ fetch interrupts status
+ bics r13, r13, r11 @ clear masked - any left?
+ beq exit @ none - spurious FIQ? exit
+
+ ldr r10, [r12, #IRQ_SIR_FIQ_REG_OFFSET] @ get requested interrupt number
+
+ mov r8, #2 @ reset FIQ agreement
+ str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
+
+ cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt?
+ beq gpio @ yes - process it
+
+ mov r8, #1
+ orr r8, r11, r8, lsl r10 @ mask spurious interrupt
+ str r8, [r12, #IRQ_MIR_REG_OFFSET]
+exit:
+ subs pc, lr, #4 @ return from FIQ
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@
+gpio: @ GPIO bank interrupt handler
+ ldr r12, omap1510_gpio_base @ set base pointer to GPIO bank
+
+ ldr r11, [r12, #OMAP1510_GPIO_INT_MASK] @ fetch GPIO interrupts mask
+restart:
+ ldr r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ fetch status bits
+ bics r13, r13, r11 @ clear masked - any left?
+ beq exit @ no - spurious interrupt? exit
+
+ orr r11, r11, r13 @ mask all requested interrupts
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
+ ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
+ beq hksw @ no - try next source
+
+
+ @@@@@@@@@@@@@@@@@@@@@@
+ @ Keyboard clock FIQ mode interrupt handler
+ @ r10 now contains KEYBRD_CLK_MASK, use it
+ str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
+ bic r11, r11, r10 @ unmask it
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
+ @ Process keyboard data
+ ldr r8, [r12, #OMAP1510_GPIO_DATA_INPUT] @ fetch GPIO input
+
+ ldr r10, [r9, #BUF_STATE] @ fetch kbd interface state
+ cmp r10, #0 @ are we expecting start bit?
+ bne data @ no - go to data processing
+
+ ands r8, r8, #KEYBRD_DATA_MASK @ check start bit - detected?
+ beq hksw @ no - try next source
+
+ @ r8 contains KEYBRD_DATA_MASK, use it
+ str r8, [r9, #BUF_STATE] @ enter data processing state
+ @ r10 already contains 0, reuse it
+ str r10, [r9, #BUF_KEY] @ clear keycode
+ mov r10, #2 @ reset input bit mask
+ str r10, [r9, #BUF_MASK]
+
+ @ Mask other GPIO line interrupts till key done
+ str r11, [r9, #BUF_GPIO_INT_MASK] @ save mask for later restore
+ mvn r11, #KEYBRD_CLK_MASK @ prepare all except kbd mask
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ store into the mask register
+
+ b restart @ restart
+
+data: ldr r10, [r9, #BUF_MASK] @ fetch current input bit mask
+
+ @ r8 still contains GPIO input bits
+ ands r8, r8, #KEYBRD_DATA_MASK @ is keyboard data line low?
+ ldreq r8, [r9, #BUF_KEY] @ yes - fetch collected so far,
+ orreq r8, r8, r10 @ set 1 at current mask position
+ streq r8, [r9, #BUF_KEY] @ and save back
+
+ mov r10, r10, lsl #1 @ shift mask left
+ bics r10, r10, #0x800 @ have we got all the bits?
+ strne r10, [r9, #BUF_MASK] @ not yet - store the mask
+ bne restart @ and restart
+
+ @ r10 already contains 0, reuse it
+ str r10, [r9, #BUF_STATE] @ reset state to start
+
+ @ Key done - restore interrupt mask
+ ldr r10, [r9, #BUF_GPIO_INT_MASK] @ fetch saved mask
+ and r11, r11, r10 @ unmask all saved as unmasked
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK] @ restore into the mask register
+
+ @ Try appending the keycode to the circular buffer
+ ldr r10, [r9, #BUF_KEYS_CNT] @ get saved keystrokes count
+ ldr r8, [r9, #BUF_BUF_LEN] @ get buffer size
+ cmp r10, r8 @ is buffer full?
+ beq hksw @ yes - key lost, next source
+
+ add r10, r10, #1 @ incremet keystrokes counter
+ str r10, [r9, #BUF_KEYS_CNT]
+
+ ldr r10, [r9, #BUF_TAIL_OFFSET] @ get buffer tail offset
+ @ r8 already contains buffer size
+ cmp r10, r8 @ end of buffer?
+ moveq r10, #0 @ yes - rewind to buffer start
+
+ ldr r12, [r9, #BUF_BUFFER_START] @ get buffer start address
+ add r12, r12, r10, LSL #2 @ calculate buffer tail address
+ ldr r8, [r9, #BUF_KEY] @ get last keycode
+ str r8, [r12] @ append it to the buffer tail
+
+ add r10, r10, #1 @ increment buffer tail offset
+ str r10, [r9, #BUF_TAIL_OFFSET]
+
+ ldr r10, [r9, #BUF_CNT_INT_KEY] @ increment interrupts counter
+ add r10, r10, #1
+ str r10, [r9, #BUF_CNT_INT_KEY]
+ @@@@@@@@@@@@@@@@@@@@@@@@
+
+
+hksw: @Is hook switch interrupt requested?
+ tst r13, #HOOK_SWITCH_MASK @ is hook switch status bit set?
+ beq mdm @ no - try next source
+
+
+ @@@@@@@@@@@@@@@@@@@@@@@@
+ @ Hook switch interrupt FIQ mode simple handler
+
+ @ Don't toggle active edge, the switch always bounces
+
+ @ Increment hook switch interrupt counter
+ ldr r10, [r9, #BUF_CNT_INT_HSW]
+ add r10, r10, #1
+ str r10, [r9, #BUF_CNT_INT_HSW]
+ @@@@@@@@@@@@@@@@@@@@@@@@
+
+
+mdm: @Is it a modem interrupt?
+ tst r13, #MODEM_IRQ_MASK @ is modem status bit set?
+ beq irq @ no - check for next interrupt
+
+
+ @@@@@@@@@@@@@@@@@@@@@@@@
+ @ Modem FIQ mode interrupt handler stub
+
+ @ Increment modem interrupt counter
+ ldr r10, [r9, #BUF_CNT_INT_MDM]
+ add r10, r10, #1
+ str r10, [r9, #BUF_CNT_INT_MDM]
+ @@@@@@@@@@@@@@@@@@@@@@@@
+
+
+irq: @ Place deferred_fiq interrupt request
+ ldr r12, deferred_fiq_ih_base @ set pointer to IRQ handler
+ mov r10, #DEFERRED_FIQ_MASK @ set deferred_fiq bit
+ str r10, [r12, #IRQ_ISR_REG_OFFSET] @ place it in the ISR register
+
+ ldr r12, omap1510_gpio_base @ set pointer back to GPIO bank
+ b restart @ check for next GPIO interrupt
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+
+/*
+ * Virtual addresses for IO
+ */
+omap_ih1_base:
+ .word OMAP1_IO_ADDRESS(OMAP_IH1_BASE)
+deferred_fiq_ih_base:
+ .word OMAP1_IO_ADDRESS(DEFERRED_FIQ_IH_BASE)
+omap1510_gpio_base:
+ .word OMAP1_IO_ADDRESS(OMAP1510_GPIO_BASE)
+qwerty_fiqin_end:
+
+/*
+ * Check the size of the FIQ,
+ * it cannot go beyond 0xffff0200, and is copied to 0xffff001c
+ */
+.if (qwerty_fiqin_end - qwerty_fiqin_start) > (0x200 - 0x1c)
+ .err
+.endif
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
new file mode 100644
index 000000000000..6c994e2d8879
--- /dev/null
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -0,0 +1,155 @@
+/*
+ * Amstrad E3 FIQ handling
+ *
+ * Copyright (C) 2009 Janusz Krzysztofik
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (C) 2001 RidgeRun, Inc.
+ *
+ * Parts of this code are taken from linux/arch/arm/mach-omap/irq.c
+ * in the MontaVista 2.4 kernel (and the Amstrad changes therein)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <plat/board-ams-delta.h>
+
+#include <asm/fiq.h>
+#include <mach/ams-delta-fiq.h>
+
+static struct fiq_handler fh = {
+ .name = "ams-delta-fiq"
+};
+
+/*
+ * This buffer is shared between FIQ and IRQ contexts.
+ * The FIQ and IRQ isrs can both read and write it.
+ * It is structured as a header section several 32bit slots,
+ * followed by the circular buffer where the FIQ isr stores
+ * keystrokes received from the qwerty keyboard.
+ * See ams-delta-fiq.h for details of offsets.
+ */
+unsigned int fiq_buffer[1024];
+EXPORT_SYMBOL(fiq_buffer);
+
+static unsigned int irq_counter[16];
+
+static irqreturn_t deferred_fiq(int irq, void *dev_id)
+{
+ struct irq_desc *irq_desc;
+ struct irq_chip *irq_chip = NULL;
+ int gpio, irq_num, fiq_count;
+
+ irq_desc = irq_to_desc(IH_GPIO_BASE);
+ if (irq_desc)
+ irq_chip = irq_desc->chip;
+
+ /*
+ * For each handled GPIO interrupt, keep calling its interrupt handler
+ * until the IRQ counter catches the FIQ incremented interrupt counter.
+ */
+ for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK;
+ gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) {
+ irq_num = gpio_to_irq(gpio);
+ fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
+
+ while (irq_counter[gpio] < fiq_count) {
+ if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
+ /*
+ * It looks like handle_edge_irq() that
+ * OMAP GPIO edge interrupts default to,
+ * expects interrupt already unmasked.
+ */
+ if (irq_chip && irq_chip->unmask)
+ irq_chip->unmask(irq_num);
+ }
+ generic_handle_irq(irq_num);
+
+ irq_counter[gpio]++;
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+void __init ams_delta_init_fiq(void)
+{
+ void *fiqhandler_start;
+ unsigned int fiqhandler_length;
+ struct pt_regs FIQ_regs;
+ unsigned long val, offset;
+ int i, retval;
+
+ fiqhandler_start = &qwerty_fiqin_start;
+ fiqhandler_length = &qwerty_fiqin_end - &qwerty_fiqin_start;
+ pr_info("Installing fiq handler from %p, length 0x%x\n",
+ fiqhandler_start, fiqhandler_length);
+
+ retval = claim_fiq(&fh);
+ if (retval) {
+ pr_err("ams_delta_init_fiq(): couldn't claim FIQ, ret=%d\n",
+ retval);
+ return;
+ }
+
+ retval = request_irq(INT_DEFERRED_FIQ, deferred_fiq,
+ IRQ_TYPE_EDGE_RISING, "deferred_fiq", 0);
+ if (retval < 0) {
+ pr_err("Failed to get deferred_fiq IRQ, ret=%d\n", retval);
+ release_fiq(&fh);
+ return;
+ }
+ /*
+ * Since no set_type() method is provided by OMAP irq chip,
+ * switch to edge triggered interrupt type manually.
+ */
+ offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+ val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
+ omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
+
+ set_fiq_handler(fiqhandler_start, fiqhandler_length);
+
+ /*
+ * Initialise the buffer which is shared
+ * between FIQ mode and IRQ mode
+ */
+ fiq_buffer[FIQ_GPIO_INT_MASK] = 0;
+ fiq_buffer[FIQ_MASK] = 0;
+ fiq_buffer[FIQ_STATE] = 0;
+ fiq_buffer[FIQ_KEY] = 0;
+ fiq_buffer[FIQ_KEYS_CNT] = 0;
+ fiq_buffer[FIQ_KEYS_HICNT] = 0;
+ fiq_buffer[FIQ_TAIL_OFFSET] = 0;
+ fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+ fiq_buffer[FIQ_BUF_LEN] = 256;
+ fiq_buffer[FIQ_MISSED_KEYS] = 0;
+ fiq_buffer[FIQ_BUFFER_START] =
+ (unsigned int) &fiq_buffer[FIQ_CIRC_BUFF];
+
+ for (i = FIQ_CNT_INT_00; i <= FIQ_CNT_INT_15; i++)
+ fiq_buffer[i] = 0;
+
+ /*
+ * FIQ mode r9 always points to the fiq_buffer, becauses the FIQ isr
+ * will run in an unpredictable context. The fiq_buffer is the FIQ isr's
+ * only means of communication with the IRQ level and other kernel
+ * context code.
+ */
+ FIQ_regs.ARM_r9 = (unsigned int)fiq_buffer;
+ set_fiq_regs(&FIQ_regs);
+
+ pr_info("request_fiq(): fiq_buffer = %p\n", fiq_buffer);
+
+ /*
+ * Redirect GPIO interrupts to FIQ
+ */
+ offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+ val = omap_readl(OMAP_IH1_BASE + offset) | 1;
+ omap_writel(val, OMAP_IH1_BASE + offset);
+}
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 7fc11c34b696..fdd1dd53fa9c 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -33,6 +33,8 @@
#include <plat/board.h>
#include <plat/common.h>
+#include <mach/ams-delta-fiq.h>
+
static u8 ams_delta_latch1_reg;
static u16 ams_delta_latch2_reg;
@@ -236,6 +238,10 @@ static void __init ams_delta_init(void)
omap_usb_init(&ams_delta_usb_config);
platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
+#ifdef CONFIG_AMS_DELTA_FIQ
+ ams_delta_init_fiq();
+#endif
+
omap_writew(omap_readw(ARM_RSTCT1) | 0x0004, ARM_RSTCT1);
}
@@ -263,8 +269,18 @@ static struct platform_device ams_delta_modem_device = {
static int __init ams_delta_modem_init(void)
{
+ int err;
+
omap_cfg_reg(M14_1510_GPIO2);
- ams_delta_modem_ports[0].irq = gpio_to_irq(2);
+ ams_delta_modem_ports[0].irq =
+ gpio_to_irq(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_MODEM_IRQ, "modem");
+ if (err) {
+ pr_err("Couldn't request gpio pin for modem\n");
+ return err;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
ams_delta_latch2_write(
AMS_DELTA_LATCH2_MODEM_NRESET | AMS_DELTA_LATCH2_MODEM_CODEC,
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index e36639f66150..8e313b4b99a9 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -28,7 +28,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/input.h>
-#include <linux/bootmem.h>
#include <linux/io.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index e0aec1007a0d..6bbb1b8b8294 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -578,7 +578,7 @@ int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
#ifdef CONFIG_OMAP_RESET_CLOCKS
-void __init omap1_clk_disable_unused(struct clk *clk)
+void omap1_clk_disable_unused(struct clk *clk)
{
__u32 regval32;
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index a4190afb8614..75d0d7d90bff 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -39,7 +39,7 @@ extern long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate);
extern unsigned long omap1_watchdog_recalc(struct clk *clk);
#ifdef CONFIG_OMAP_RESET_CLOCKS
-extern void __init omap1_clk_disable_unused(struct clk *clk);
+extern void omap1_clk_disable_unused(struct clk *clk);
#else
#define omap1_clk_disable_unused NULL
#endif
diff --git a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
new file mode 100644
index 000000000000..7a2df29400ca
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
@@ -0,0 +1,79 @@
+/*
+ * arch/arm/mach-omap1/include/ams-delta-fiq.h
+ *
+ * Taken from the original Amstrad modifications to fiq.h
+ *
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __AMS_DELTA_FIQ_H
+#define __AMS_DELTA_FIQ_H
+
+#include <plat/irqs.h>
+
+/*
+ * Interrupt number used for passing control from FIQ to IRQ.
+ * IRQ12, described as reserved, has been selected.
+ */
+#define INT_DEFERRED_FIQ INT_1510_RES12
+/*
+ * Base address of an interrupt handler that the INT_DEFERRED_FIQ belongs to.
+ */
+#if (INT_DEFERRED_FIQ < IH2_BASE)
+#define DEFERRED_FIQ_IH_BASE OMAP_IH1_BASE
+#else
+#define DEFERRED_FIQ_IH_BASE OMAP_IH2_BASE
+#endif
+
+/*
+ * These are the offsets from the begining of the fiq_buffer. They are put here
+ * since the buffer and header need to be accessed by drivers servicing devices
+ * which generate GPIO interrupts - e.g. keyboard, modem, hook switch.
+ */
+#define FIQ_MASK 0
+#define FIQ_STATE 1
+#define FIQ_KEYS_CNT 2
+#define FIQ_TAIL_OFFSET 3
+#define FIQ_HEAD_OFFSET 4
+#define FIQ_BUF_LEN 5
+#define FIQ_KEY 6
+#define FIQ_MISSED_KEYS 7
+#define FIQ_BUFFER_START 8
+#define FIQ_GPIO_INT_MASK 9
+#define FIQ_KEYS_HICNT 10
+#define FIQ_IRQ_PEND 11
+#define FIQ_SIR_CODE_L1 12
+#define IRQ_SIR_CODE_L2 13
+
+#define FIQ_CNT_INT_00 14
+#define FIQ_CNT_INT_KEY 15
+#define FIQ_CNT_INT_MDM 16
+#define FIQ_CNT_INT_03 17
+#define FIQ_CNT_INT_HSW 18
+#define FIQ_CNT_INT_05 19
+#define FIQ_CNT_INT_06 20
+#define FIQ_CNT_INT_07 21
+#define FIQ_CNT_INT_08 22
+#define FIQ_CNT_INT_09 23
+#define FIQ_CNT_INT_10 24
+#define FIQ_CNT_INT_11 25
+#define FIQ_CNT_INT_12 26
+#define FIQ_CNT_INT_13 27
+#define FIQ_CNT_INT_14 28
+#define FIQ_CNT_INT_15 29
+
+#define FIQ_CIRC_BUFF 30 /*Start of circular buffer */
+
+#ifndef __ASSEMBLER__
+extern unsigned int fiq_buffer[];
+extern unsigned char qwerty_fiqin_start, qwerty_fiqin_end;
+
+extern void __init ams_delta_init_fiq(void);
+#endif
+
+#endif
diff --git a/arch/arm/mach-omap1/include/mach/debug-macro.S b/arch/arm/mach-omap1/include/mach/debug-macro.S
index b6d9584544b4..e8a8cf36b7f0 100644
--- a/arch/arm/mach-omap1/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap1/include/mach/debug-macro.S
@@ -13,6 +13,8 @@
#include <linux/serial_reg.h>
+#include <asm/memory.h>
+
#include <plat/serial.h>
.pushsection .data
@@ -37,23 +39,12 @@ omap_uart_virt: .word 0x0
cmp \rx, #0 @ is port configured?
bne 99f @ already configured
- /* Check 7XX UART1 scratchpad register for uart to use */
+ /* Check the debug UART configuration set in uncompress.h */
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
- moveq \rx, #0xff000000 @ physical base address
- movne \rx, #0xfe000000 @ virtual base
- orr \rx, \rx, #0x00fb0000 @ OMAP1UART1
- ldrb \rx, [\rx, #(UART_SCR << OMAP7XX_PORT_SHIFT)]
- cmp \rx, #0 @ anything in 7XX scratchpad?
- bne 10f @ found 7XX uart
-
- /* Check 15xx/16xx UART1 scratchpad register for uart to use */
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #0xff000000 @ physical base address
- movne \rx, #0xfe000000 @ virtual base
- orr \rx, \rx, #0x00fb0000 @ OMAP1UART1
- ldrb \rx, [\rx, #(UART_SCR << OMAP_PORT_SHIFT)]
+ ldreq \rx, =OMAP_UART_INFO
+ ldrne \rx, =__phys_to_virt(OMAP_UART_INFO)
+ ldr \rx, [\rx, #0]
/* Select the UART to use based on the UART1 scratchpad value */
10: cmp \rx, #0 @ no port configured?
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 2455dcc744a0..b31b6f123122 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -10,6 +10,7 @@ config ARCH_OMAP2420
config ARCH_OMAP2430
bool "OMAP2430 support"
depends on ARCH_OMAP2
+ select ARCH_OMAP_OTG
config ARCH_OMAP3430
bool "OMAP3430 support"
@@ -141,6 +142,12 @@ config MACH_IGEP0020
depends on ARCH_OMAP3
select OMAP_PACKAGE_CBB
+config MACH_SBC3530
+ bool "OMAP3 SBC STALKER board"
+ depends on ARCH_OMAP3
+ select OMAP_PACKAGE_CUS
+ select OMAP_MUX
+
config MACH_OMAP_3630SDP
bool "OMAP3630 SDP board"
depends on ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4b9fc57770db..ea52b034e963 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
# SMP support ONLY available for OMAP4
obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
-obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o
+obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a
@@ -89,10 +89,7 @@ obj-$(CONFIG_OMAP3_EMU) += emu.o
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
mailbox_mach-objs := mailbox.o
-iommu-y += iommu2.o
-iommu-$(CONFIG_ARCH_OMAP3) += omap3-iommu.o
-
-obj-$(CONFIG_OMAP_IOMMU) += $(iommu-y)
+obj-$(CONFIG_OMAP_IOMMU) := iommu2.o omap-iommu.o
i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
obj-y += $(i2c-omap-m) $(i2c-omap-y)
@@ -122,6 +119,7 @@ obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \
board-rx51-sdram.o \
board-rx51-peripherals.o \
+ board-rx51-video.o \
hsmmc.o
obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom2.o \
board-zoom-peripherals.o \
@@ -140,10 +138,13 @@ obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \
hsmmc.o
obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \
hsmmc.o
-obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
+obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \
+ hsmmc.o
obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
+obj-$(CONFIG_MACH_SBC3530) += board-omap3stalker.o \
+ hsmmc.o
# Platform specific device init code
obj-y += usb-musb.o
obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 01d113ff9fcf..a11a575745e4 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -174,9 +174,18 @@ static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = {
},
};
+static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("isp1301_omap", 0x2D),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = OMAP_GPIO_IRQ(78),
+ },
+};
+
static int __init omap2430_i2c_init(void)
{
- omap_register_i2c_bus(1, 400, NULL, 0);
+ omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
+ ARRAY_SIZE(sdp2430_i2c1_boardinfo));
omap_register_i2c_bus(2, 2600, sdp2430_i2c_boardinfo,
ARRAY_SIZE(sdp2430_i2c_boardinfo));
return 0;
@@ -198,6 +207,15 @@ static struct omap_musb_board_data musb_board_data = {
.mode = MUSB_OTG,
.power = 100,
};
+static struct omap_usb_config sdp2430_usb_config __initdata = {
+ .otg = 1,
+#ifdef CONFIG_USB_GADGET_OMAP
+ .hmc_mode = 0x0,
+#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+ .hmc_mode = 0x1,
+#endif
+ .pins[0] = 3,
+};
static void __init omap_2430sdp_init(void)
{
@@ -208,6 +226,7 @@ static void __init omap_2430sdp_init(void)
platform_add_devices(sdp2430_devices, ARRAY_SIZE(sdp2430_devices));
omap_serial_init();
omap2_hsmmc_init(mmc);
+ omap_usb_init(&sdp2430_usb_config);
usb_musb_init(&musb_board_data);
board_smc91x_init();
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 5822bcf7b15f..f474a80b8867 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -137,9 +137,7 @@ static void ads7846_dev_init(void)
}
gpio_direction_input(ts_gpio);
-
- omap_set_gpio_debounce(ts_gpio, 1);
- omap_set_gpio_debounce_time(ts_gpio, 0xa);
+ gpio_set_debounce(ts_gpio, 310);
}
static int ads7846_get_pendown_state(void)
@@ -150,6 +148,7 @@ static int ads7846_get_pendown_state(void)
static struct ads7846_platform_data tsc2046_config __initdata = {
.get_pendown_state = ads7846_get_pendown_state,
.keep_vref_on = 1,
+ .wakeup = true,
};
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index b88f28c5814b..e4a5d66b83b8 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -18,8 +18,12 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/usb/otg.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c/twl.h>
+#include <linux/regulator/machine.h>
#include <mach/hardware.h>
+#include <mach/omap4-common.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -29,8 +33,77 @@
#include <plat/control.h>
#include <plat/timer-gp.h>
#include <plat/usb.h>
-#include <asm/hardware/gic.h>
-#include <asm/hardware/cache-l2x0.h>
+#include <plat/mmc.h>
+#include "hsmmc.h"
+
+#define ETH_KS8851_IRQ 34
+#define ETH_KS8851_POWER_ON 48
+#define ETH_KS8851_QUART 138
+
+static struct spi_board_info sdp4430_spi_board_info[] __initdata = {
+ {
+ .modalias = "ks8851",
+ .bus_num = 1,
+ .chip_select = 0,
+ .max_speed_hz = 24000000,
+ .irq = ETH_KS8851_IRQ,
+ },
+};
+
+static int omap_ethernet_init(void)
+{
+ int status;
+
+ /* Request of GPIO lines */
+
+ status = gpio_request(ETH_KS8851_POWER_ON, "eth_power");
+ if (status) {
+ pr_err("Cannot request GPIO %d\n", ETH_KS8851_POWER_ON);
+ return status;
+ }
+
+ status = gpio_request(ETH_KS8851_QUART, "quart");
+ if (status) {
+ pr_err("Cannot request GPIO %d\n", ETH_KS8851_QUART);
+ goto error1;
+ }
+
+ status = gpio_request(ETH_KS8851_IRQ, "eth_irq");
+ if (status) {
+ pr_err("Cannot request GPIO %d\n", ETH_KS8851_IRQ);
+ goto error2;
+ }
+
+ /* Configuration of requested GPIO lines */
+
+ status = gpio_direction_output(ETH_KS8851_POWER_ON, 1);
+ if (status) {
+ pr_err("Cannot set output GPIO %d\n", ETH_KS8851_IRQ);
+ goto error3;
+ }
+
+ status = gpio_direction_output(ETH_KS8851_QUART, 1);
+ if (status) {
+ pr_err("Cannot set output GPIO %d\n", ETH_KS8851_QUART);
+ goto error3;
+ }
+
+ status = gpio_direction_input(ETH_KS8851_IRQ);
+ if (status) {
+ pr_err("Cannot set input GPIO %d\n", ETH_KS8851_IRQ);
+ goto error3;
+ }
+
+ return 0;
+
+error3:
+ gpio_free(ETH_KS8851_IRQ);
+error2:
+ gpio_free(ETH_KS8851_QUART);
+error1:
+ gpio_free(ETH_KS8851_POWER_ON);
+ return status;
+}
static struct platform_device sdp4430_lcd_device = {
.name = "sdp4430_lcd",
@@ -49,50 +122,6 @@ static struct omap_board_config_kernel sdp4430_config[] __initdata = {
{ OMAP_TAG_LCD, &sdp4430_lcd_config },
};
-#ifdef CONFIG_CACHE_L2X0
-static int __init omap_l2_cache_init(void)
-{
- extern void omap_smc1(u32 fn, u32 arg);
- void __iomem *l2cache_base;
-
- /* To avoid code running on other OMAPs in
- * multi-omap builds
- */
- if (!cpu_is_omap44xx())
- return -ENODEV;
-
- /* Static mapping, never released */
- l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
- BUG_ON(!l2cache_base);
-
- /* Enable PL310 L2 Cache controller */
- omap_smc1(0x102, 0x1);
-
- /* 32KB way size, 16-way associativity,
- * parity disabled
- */
- l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff);
-
- return 0;
-}
-early_initcall(omap_l2_cache_init);
-#endif
-
-static void __init gic_init_irq(void)
-{
- void __iomem *base;
-
- /* Static mapping, never released */
- base = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
- BUG_ON(!base);
- gic_dist_init(0, base, 29);
-
- /* Static mapping, never released */
- gic_cpu_base_addr = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
- BUG_ON(!gic_cpu_base_addr);
- gic_cpu_init(0, gic_cpu_base_addr);
-}
-
static void __init omap_4430sdp_init_irq(void)
{
omap_board_config = sdp4430_config;
@@ -111,15 +140,254 @@ static struct omap_musb_board_data musb_board_data = {
.power = 100,
};
+static struct omap2_hsmmc_info mmc[] = {
+ {
+ .mmc = 1,
+ .wires = 8,
+ .gpio_wp = -EINVAL,
+ },
+ {
+ .mmc = 2,
+ .wires = 8,
+ .gpio_cd = -EINVAL,
+ .gpio_wp = -EINVAL,
+ .nonremovable = true,
+ },
+ {} /* Terminator */
+};
+
+static struct regulator_consumer_supply sdp4430_vmmc_supply[] = {
+ {
+ .supply = "vmmc",
+ .dev_name = "mmci-omap-hs.0",
+ },
+ {
+ .supply = "vmmc",
+ .dev_name = "mmci-omap-hs.1",
+ },
+};
+
+static int omap4_twl6030_hsmmc_late_init(struct device *dev)
+{
+ int ret = 0;
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct omap_mmc_platform_data *pdata = dev->platform_data;
+
+ /* Setting MMC1 Card detect Irq */
+ if (pdev->id == 0)
+ pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE +
+ MMCDETECT_INTR_OFFSET;
+ return ret;
+}
+
+static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
+{
+ struct omap_mmc_platform_data *pdata = dev->platform_data;
+
+ pdata->init = omap4_twl6030_hsmmc_late_init;
+}
+
+static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
+{
+ struct omap2_hsmmc_info *c;
+
+ omap2_hsmmc_init(controllers);
+ for (c = controllers; c->mmc; c++)
+ omap4_twl6030_hsmmc_set_late_init(c->dev);
+
+ return 0;
+}
+
+static struct regulator_init_data sdp4430_vaux1 = {
+ .constraints = {
+ .min_uV = 1000000,
+ .max_uV = 3000000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data sdp4430_vaux2 = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data sdp4430_vaux3 = {
+ .constraints = {
+ .min_uV = 1000000,
+ .max_uV = 3000000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* VMMC1 for MMC1 card */
+static struct regulator_init_data sdp4430_vmmc = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 3000000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 2,
+ .consumer_supplies = sdp4430_vmmc_supply,
+};
+
+static struct regulator_init_data sdp4430_vpp = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 2500000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data sdp4430_vusim = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2900000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data sdp4430_vana = {
+ .constraints = {
+ .min_uV = 2100000,
+ .max_uV = 2100000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data sdp4430_vcxio = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data sdp4430_vdac = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data sdp4430_vusb = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct twl4030_platform_data sdp4430_twldata = {
+ .irq_base = TWL6030_IRQ_BASE,
+ .irq_end = TWL6030_IRQ_END,
+
+ /* Regulators */
+ .vmmc = &sdp4430_vmmc,
+ .vpp = &sdp4430_vpp,
+ .vusim = &sdp4430_vusim,
+ .vana = &sdp4430_vana,
+ .vcxio = &sdp4430_vcxio,
+ .vdac = &sdp4430_vdac,
+ .vusb = &sdp4430_vusb,
+ .vaux1 = &sdp4430_vaux1,
+ .vaux2 = &sdp4430_vaux2,
+ .vaux3 = &sdp4430_vaux3,
+};
+
+static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("twl6030", 0x48),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = OMAP44XX_IRQ_SYS_1N,
+ .platform_data = &sdp4430_twldata,
+ },
+};
+static int __init omap4_i2c_init(void)
+{
+ /*
+ * Phoenix Audio IC needs I2C1 to
+ * start with 400 KHz or less
+ */
+ omap_register_i2c_bus(1, 400, sdp4430_i2c_boardinfo,
+ ARRAY_SIZE(sdp4430_i2c_boardinfo));
+ omap_register_i2c_bus(2, 400, NULL, 0);
+ omap_register_i2c_bus(3, 400, NULL, 0);
+ omap_register_i2c_bus(4, 400, NULL, 0);
+ return 0;
+}
static void __init omap_4430sdp_init(void)
{
+ int status;
+
+ omap4_i2c_init();
platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
omap_serial_init();
+ omap4_twl6030_hsmmc_init(mmc);
/* OMAP4 SDP uses internal transceiver so register nop transceiver */
usb_nop_xceiv_register();
/* FIXME: allow multi-omap to boot until musb is updated for omap4 */
if (!cpu_is_omap44xx())
usb_musb_init(&musb_board_data);
+
+ status = omap_ethernet_init();
+ if (status) {
+ pr_err("Ethernet initialization failed: %d\n", status);
+ } else {
+ sdp4430_spi_board_info[0].irq = gpio_to_irq(ETH_KS8851_IRQ);
+ spi_register_board_info(sdp4430_spi_board_info,
+ ARRAY_SIZE(sdp4430_spi_board_info));
+ }
}
static void __init omap_4430sdp_map_io(void)
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index c1c4389fbd8f..af383a876943 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/i2c/pca953x.h>
+#include <linux/can/platform/ti_hecc.h>
+#include <linux/davinci_emac.h>
#include <mach/hardware.h>
#include <mach/am35xx.h>
@@ -30,16 +32,111 @@
#include <plat/board.h>
#include <plat/common.h>
+#include <plat/control.h>
#include <plat/usb.h>
#include <plat/display.h>
#include "mux.h"
+#define AM35XX_EVM_PHY_MASK (0xF)
+#define AM35XX_EVM_MDIO_FREQUENCY (1000000)
+
+static struct emac_platform_data am3517_evm_emac_pdata = {
+ .phy_mask = AM35XX_EVM_PHY_MASK,
+ .mdio_max_freq = AM35XX_EVM_MDIO_FREQUENCY,
+ .rmii_en = 1,
+};
+
+static struct resource am3517_emac_resources[] = {
+ {
+ .start = AM35XX_IPSS_EMAC_BASE,
+ .end = AM35XX_IPSS_EMAC_BASE + 0x3FFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
+ .end = INT_35XX_EMAC_C0_RXTHRESH_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
+ .end = INT_35XX_EMAC_C0_RX_PULSE_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
+ .end = INT_35XX_EMAC_C0_TX_PULSE_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
+ .end = INT_35XX_EMAC_C0_MISC_PULSE_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device am3517_emac_device = {
+ .name = "davinci_emac",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(am3517_emac_resources),
+ .resource = am3517_emac_resources,
+};
+
+static void am3517_enable_ethernet_int(void)
+{
+ u32 regval;
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
+ AM35XX_CPGMAC_C0_TX_PULSE_CLR |
+ AM35XX_CPGMAC_C0_MISC_PULSE_CLR |
+ AM35XX_CPGMAC_C0_RX_THRESH_CLR);
+ omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+static void am3517_disable_ethernet_int(void)
+{
+ u32 regval;
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = (regval | AM35XX_CPGMAC_C0_RX_PULSE_CLR |
+ AM35XX_CPGMAC_C0_TX_PULSE_CLR);
+ omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+ regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
+{
+ unsigned int regval;
+
+ pdata->ctrl_reg_offset = AM35XX_EMAC_CNTRL_OFFSET;
+ pdata->ctrl_mod_reg_offset = AM35XX_EMAC_CNTRL_MOD_OFFSET;
+ pdata->ctrl_ram_offset = AM35XX_EMAC_CNTRL_RAM_OFFSET;
+ pdata->mdio_reg_offset = AM35XX_EMAC_MDIO_OFFSET;
+ pdata->ctrl_ram_size = AM35XX_EMAC_CNTRL_RAM_SIZE;
+ pdata->version = EMAC_VERSION_2;
+ pdata->hw_ram_addr = AM35XX_EMAC_HW_RAM_ADDR;
+ pdata->interrupt_enable = am3517_enable_ethernet_int;
+ pdata->interrupt_disable = am3517_disable_ethernet_int;
+ am3517_emac_device.dev.platform_data = pdata;
+ platform_device_register(&am3517_emac_device);
+
+ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+ regval = regval & (~(AM35XX_CPGMACSS_SW_RST));
+ omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+ regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+
+ return ;
+}
+
+
+
#define LCD_PANEL_PWR 176
#define LCD_PANEL_BKLIGHT_PWR 182
#define LCD_PANEL_PWM 181
-static struct i2c_board_info __initdata am3517evm_i2c_boardinfo[] = {
+static struct i2c_board_info __initdata am3517evm_i2c1_boardinfo[] = {
{
I2C_BOARD_INFO("s35390a", 0x30),
.type = "s35390a",
@@ -69,7 +166,7 @@ static void __init am3517_evm_rtc_init(void)
gpio_free(GPIO_RTCS35390A_IRQ);
return;
}
- am3517evm_i2c_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
+ am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
}
/*
@@ -80,7 +177,7 @@ static void __init am3517_evm_rtc_init(void)
static struct pca953x_platform_data am3517evm_gpio_expander_info_0 = {
.gpio_base = OMAP_MAX_GPIO_LINES,
};
-static struct i2c_board_info __initdata am3517evm_tca6416_info_0[] = {
+static struct i2c_board_info __initdata am3517evm_i2c2_boardinfo[] = {
{
I2C_BOARD_INFO("tca6416", 0x21),
.platform_data = &am3517evm_gpio_expander_info_0,
@@ -94,7 +191,7 @@ static struct pca953x_platform_data am3517evm_ui_gpio_expander_info_1 = {
static struct pca953x_platform_data am3517evm_ui_gpio_expander_info_2 = {
.gpio_base = OMAP_MAX_GPIO_LINES + 32,
};
-static struct i2c_board_info __initdata am3517evm_ui_tca6416_info[] = {
+static struct i2c_board_info __initdata am3517evm_i2c3_boardinfo[] = {
{
I2C_BOARD_INFO("tca6416", 0x20),
.platform_data = &am3517evm_ui_gpio_expander_info_1,
@@ -108,10 +205,10 @@ static struct i2c_board_info __initdata am3517evm_ui_tca6416_info[] = {
static int __init am3517_evm_i2c_init(void)
{
omap_register_i2c_bus(1, 400, NULL, 0);
- omap_register_i2c_bus(2, 400, am3517evm_tca6416_info_0,
- ARRAY_SIZE(am3517evm_tca6416_info_0));
- omap_register_i2c_bus(3, 400, am3517evm_ui_tca6416_info,
- ARRAY_SIZE(am3517evm_ui_tca6416_info));
+ omap_register_i2c_bus(2, 400, am3517evm_i2c2_boardinfo,
+ ARRAY_SIZE(am3517evm_i2c2_boardinfo));
+ omap_register_i2c_bus(3, 400, am3517evm_i2c3_boardinfo,
+ ARRAY_SIZE(am3517evm_i2c3_boardinfo));
return 0;
}
@@ -119,6 +216,8 @@ static int __init am3517_evm_i2c_init(void)
static int lcd_enabled;
static int dvi_enabled;
+#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
+ defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
static void __init am3517_evm_display_init(void)
{
int r;
@@ -162,6 +261,9 @@ err_2:
err_1:
gpio_free(LCD_PANEL_BKLIGHT_PWR);
}
+#else
+static void __init am3517_evm_display_init(void) {}
+#endif
static int am3517_evm_panel_enable_lcd(struct omap_dss_device *dssdev)
{
@@ -275,7 +377,12 @@ static void __init am3517_evm_init_irq(void)
static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
+ defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+#else
.port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+#endif
.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
.phy_reset = true,
@@ -292,6 +399,42 @@ static struct omap_board_mux board_mux[] __initdata = {
#define board_mux NULL
#endif
+
+static struct resource am3517_hecc_resources[] = {
+ {
+ .start = AM35XX_IPSS_HECC_BASE,
+ .end = AM35XX_IPSS_HECC_BASE + 0x3FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_35XX_HECC0_IRQ,
+ .end = INT_35XX_HECC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device am3517_hecc_device = {
+ .name = "ti_hecc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(am3517_hecc_resources),
+ .resource = am3517_hecc_resources,
+};
+
+static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
+ .scc_hecc_offset = AM35XX_HECC_SCC_HECC_OFFSET,
+ .scc_ram_offset = AM35XX_HECC_SCC_RAM_OFFSET,
+ .hecc_ram_offset = AM35XX_HECC_RAM_OFFSET,
+ .mbx_offset = AM35XX_HECC_MBOX_OFFSET,
+ .int_line = AM35XX_HECC_INT_LINE,
+ .version = AM35XX_HECC_VERSION,
+};
+
+static void am3517_evm_hecc_init(struct ti_hecc_platform_data *pdata)
+{
+ am3517_hecc_device.dev.platform_data = pdata;
+ platform_device_register(&am3517_hecc_device);
+}
+
static void __init am3517_evm_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@@ -305,14 +448,17 @@ static void __init am3517_evm_init(void)
/* Configure GPIO for EHCI port */
omap_mux_init_gpio(57, OMAP_PIN_OUTPUT);
usb_ehci_init(&ehci_pdata);
+ am3517_evm_hecc_init(&am3517_evm_hecc_pdata);
/* DSS */
am3517_evm_display_init();
/* RTC - S35390A */
am3517_evm_rtc_init();
- i2c_register_board_info(1, am3517evm_i2c_boardinfo,
- ARRAY_SIZE(am3517evm_i2c_boardinfo));
+ i2c_register_board_info(1, am3517evm_i2c1_boardinfo,
+ ARRAY_SIZE(am3517evm_i2c1_boardinfo));
+ /*Ethernet*/
+ am3517_evm_ethernet_init(&am3517_evm_emac_pdata);
}
static void __init am3517_evm_map_io(void)
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 2de4f79f03a0..e679a2cc86c3 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -45,6 +45,7 @@
#include <plat/gpmc.h>
#include <plat/usb.h>
#include <plat/display.h>
+#include <plat/mcspi.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 47e3af2166d4..77022b588816 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -633,8 +633,163 @@ static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.reset_gpio_port[2] = -EINVAL
};
+static struct omap_board_mux board_mux[] __initdata = {
+ /* nCS and IRQ for Devkit8000 ethernet */
+ OMAP3_MUX(GPMC_NCS6, OMAP_MUX_MODE0),
+ OMAP3_MUX(ETK_D11, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
+
+ /* McSPI 2*/
+ OMAP3_MUX(MCSPI2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI2_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCSPI2_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI2_CS0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCSPI2_CS1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* PENDOWN GPIO */
+ OMAP3_MUX(ETK_D13, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
+ /* mUSB */
+ OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* USB 1 */
+ OMAP3_MUX(ETK_CTL, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(ETK_D8, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D9, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D0, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D2, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D4, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D5, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D6, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ OMAP3_MUX(ETK_D7, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+
+ /* MMC 1 */
+ OMAP3_MUX(SDMMC1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(SDMMC1_DAT7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* McBSP 2 */
+ OMAP3_MUX(MCBSP2_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_CLKX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_DR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_DX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* I2C 1 */
+ OMAP3_MUX(I2C1_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C1_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* I2C 2 */
+ OMAP3_MUX(I2C2_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* I2C 3 */
+ OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* I2C 4 */
+ OMAP3_MUX(I2C4_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C4_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* serial ports */
+ OMAP3_MUX(MCBSP3_CLKX, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCBSP3_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(UART1_TX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(UART1_RX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* DSS */
+ OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* expansion port */
+ /* McSPI 1 */
+ OMAP3_MUX(MCSPI1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_CS0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+ OMAP3_MUX(MCSPI1_CS3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+
+ /* HDQ */
+ OMAP3_MUX(HDQ_SIO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* McSPI4 */
+ OMAP3_MUX(MCBSP1_CLKR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_DX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_DR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT_PULLUP),
+
+ /* MMC 2 */
+ OMAP3_MUX(SDMMC2_DAT4, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT5, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT6, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT7, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+
+ /* I2C3 */
+ OMAP3_MUX(I2C3_SCL, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(I2C3_SDA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCBSP1_FSR, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+ OMAP3_MUX(GPMC_NCS7, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(GPMC_NCS3, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+
+ /* TPS IRQ */
+ OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_WAKEUP_EN | \
+ OMAP_PIN_INPUT_PULLUP),
+
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
static void __init devkit8000_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
omap_serial_init();
omap_dm9000_init();
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 5fcb52e71298..fefd7e6e9779 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -209,8 +209,7 @@ static void ads7846_dev_init(void)
}
gpio_direction_input(ts_gpio);
- omap_set_gpio_debounce(ts_gpio, 1);
- omap_set_gpio_debounce_time(ts_gpio, 0xa);
+ gpio_set_debounce(ts_gpio, 310);
}
static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 962d377970e9..69b154cdc75d 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -39,6 +39,7 @@
#include <plat/board.h>
#include <plat/common.h>
+#include <plat/display.h>
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/usb.h>
@@ -106,6 +107,77 @@ static struct platform_device omap3beagle_nand_device = {
.resource = &omap3beagle_nand_resource,
};
+/* DSS */
+
+static int beagle_enable_dvi(struct omap_dss_device *dssdev)
+{
+ if (gpio_is_valid(dssdev->reset_gpio))
+ gpio_set_value(dssdev->reset_gpio, 1);
+
+ return 0;
+}
+
+static void beagle_disable_dvi(struct omap_dss_device *dssdev)
+{
+ if (gpio_is_valid(dssdev->reset_gpio))
+ gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device beagle_dvi_device = {
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .name = "dvi",
+ .driver_name = "generic_panel",
+ .phy.dpi.data_lines = 24,
+ .reset_gpio = 170,
+ .platform_enable = beagle_enable_dvi,
+ .platform_disable = beagle_disable_dvi,
+};
+
+static struct omap_dss_device beagle_tv_device = {
+ .name = "tv",
+ .driver_name = "venc",
+ .type = OMAP_DISPLAY_TYPE_VENC,
+ .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+static struct omap_dss_device *beagle_dss_devices[] = {
+ &beagle_dvi_device,
+ &beagle_tv_device,
+};
+
+static struct omap_dss_board_info beagle_dss_data = {
+ .num_devices = ARRAY_SIZE(beagle_dss_devices),
+ .devices = beagle_dss_devices,
+ .default_device = &beagle_dvi_device,
+};
+
+static struct platform_device beagle_dss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &beagle_dss_data,
+ },
+};
+
+static struct regulator_consumer_supply beagle_vdac_supply =
+ REGULATOR_SUPPLY("vdda_dac", "omapdss");
+
+static struct regulator_consumer_supply beagle_vdvi_supply =
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+
+static void __init beagle_display_init(void)
+{
+ int r;
+
+ r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset");
+ if (r < 0) {
+ printk(KERN_ERR "Unable to get DVI reset GPIO\n");
+ return;
+ }
+
+ gpio_direction_output(beagle_dvi_device.reset_gpio, 0);
+}
+
#include "sdram-micron-mt46h32m32lf-6.h"
static struct omap2_hsmmc_info mmc[] = {
@@ -117,15 +189,6 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct platform_device omap3_beagle_lcd_device = {
- .name = "omap3beagle_lcd",
- .id = -1,
-};
-
-static struct omap_lcd_config omap3_beagle_lcd_config __initdata = {
- .ctrl_name = "internal",
-};
-
static struct regulator_consumer_supply beagle_vmmc1_supply = {
.supply = "vmmc",
};
@@ -181,16 +244,6 @@ static struct twl4030_gpio_platform_data beagle_gpio_data = {
.setup = beagle_twl_gpio_setup,
};
-static struct regulator_consumer_supply beagle_vdac_supply = {
- .supply = "vdac",
- .dev = &omap3_beagle_lcd_device.dev,
-};
-
-static struct regulator_consumer_supply beagle_vdvi_supply = {
- .supply = "vdvi",
- .dev = &omap3_beagle_lcd_device.dev,
-};
-
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data beagle_vmmc1 = {
.constraints = {
@@ -349,14 +402,8 @@ static struct platform_device keys_gpio = {
},
};
-static struct omap_board_config_kernel omap3_beagle_config[] __initdata = {
- { OMAP_TAG_LCD, &omap3_beagle_lcd_config },
-};
-
static void __init omap3_beagle_init_irq(void)
{
- omap_board_config = omap3_beagle_config;
- omap_board_config_size = ARRAY_SIZE(omap3_beagle_config);
omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
omap_init_irq();
@@ -367,9 +414,9 @@ static void __init omap3_beagle_init_irq(void)
}
static struct platform_device *omap3_beagle_devices[] __initdata = {
- &omap3_beagle_lcd_device,
&leds_gpio,
&keys_gpio,
+ &beagle_dss_device,
};
static void __init omap3beagle_flash_init(void)
@@ -456,6 +503,8 @@ static void __init omap3_beagle_init(void)
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
+
+ beagle_display_init();
}
static void __init omap3_beagle_map_io(void)
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 017bb2f4f7d2..b95261013812 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -579,9 +579,7 @@ static void ads7846_dev_init(void)
printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
gpio_direction_input(OMAP3_EVM_TS_GPIO);
-
- omap_set_gpio_debounce(OMAP3_EVM_TS_GPIO, 1);
- omap_set_gpio_debounce_time(OMAP3_EVM_TS_GPIO, 0xa);
+ gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310);
}
static int ads7846_get_pendown_state(void)
@@ -600,6 +598,7 @@ struct ads7846_platform_data ads7846_config = {
.get_pendown_state = ads7846_get_pendown_state,
.keep_vref_on = 1,
.settle_delay_usecs = 150,
+ .wakeup = true,
};
static struct omap2_mcspi_device_config ads7846_mcspi_config = {
@@ -651,11 +650,10 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP |
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
OMAP_PIN_OFF_WAKEUPENABLE),
OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
- OMAP_PIN_OFF_INPUT_PULLUP |
- OMAP_PIN_OFF_WAKEUPENABLE),
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#else
@@ -702,6 +700,9 @@ static void __init omap3_evm_init(void)
omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP);
ehci_pdata.reset_gpio_port[1] = 21;
+ /* EVM REV >= E can supply 500mA with EXTVBUS programming */
+ musb_board_data.power = 500;
+ musb_board_data.extvbus = 1;
} else {
/* setup EHCI phy reset on MDC */
omap_mux_init_gpio(135, OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 395d049bf010..db06dc910ba7 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -130,8 +130,8 @@ static struct platform_device pandora_keys_gpio = {
static void __init pandora_keys_gpio_init(void)
{
/* set debounce time for GPIO banks 4 and 6 */
- omap_set_gpio_debounce_time(32 * 3, GPIO_DEBOUNCE_TIME);
- omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME);
+ gpio_set_debounce(32 * 3, GPIO_DEBOUNCE_TIME);
+ gpio_set_debounce(32 * 5, GPIO_DEBOUNCE_TIME);
}
static int board_keymap[] = {
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
new file mode 100644
index 000000000000..f848ba8dbc16
--- /dev/null
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -0,0 +1,672 @@
+/*
+ * linux/arch/arm/mach-omap2/board-omap3evm.c
+ *
+ * Copyright (C) 2008 Guangzhou EMA-Tech
+ *
+ * Modified from mach-omap2/board-omap3evm.c
+ *
+ * Initial code: Syed Mohammed Khasim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+#include <linux/regulator/machine.h>
+#include <linux/i2c/twl.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/gpmc.h>
+#include <plat/nand.h>
+#include <plat/usb.h>
+#include <plat/timer-gp.h>
+#include <plat/display.h>
+
+#include <plat/mcspi.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/ads7846.h>
+#include <linux/interrupt.h>
+#include <linux/smsc911x.h>
+#include <linux/i2c/at24.h>
+
+#include "sdram-micron-mt46h32m32lf-6.h"
+#include "mux.h"
+#include "hsmmc.h"
+
+#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#define OMAP3STALKER_ETHR_START 0x2c000000
+#define OMAP3STALKER_ETHR_SIZE 1024
+#define OMAP3STALKER_ETHR_GPIO_IRQ 19
+#define OMAP3STALKER_SMC911X_CS 5
+
+static struct resource omap3stalker_smsc911x_resources[] = {
+ [0] = {
+ .start = OMAP3STALKER_ETHR_START,
+ .end =
+ (OMAP3STALKER_ETHR_START + OMAP3STALKER_ETHR_SIZE - 1),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
+ .end = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
+ .flags = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW),
+ },
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS),
+};
+
+static struct platform_device omap3stalker_smsc911x_device = {
+ .name = "smsc911x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(omap3stalker_smsc911x_resources),
+ .resource = &omap3stalker_smsc911x_resources[0],
+ .dev = {
+ .platform_data = &smsc911x_config,
+ },
+};
+
+static inline void __init omap3stalker_init_eth(void)
+{
+ int eth_cs;
+ struct clk *l3ck;
+ unsigned int rate;
+
+ eth_cs = OMAP3STALKER_SMC911X_CS;
+
+ l3ck = clk_get(NULL, "l3_ck");
+ if (IS_ERR(l3ck))
+ rate = 100000000;
+ else
+ rate = clk_get_rate(l3ck);
+
+ omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP);
+ if (gpio_request(OMAP3STALKER_ETHR_GPIO_IRQ, "SMC911x irq") < 0) {
+ printk(KERN_ERR
+ "Failed to request GPIO%d for smc911x IRQ\n",
+ OMAP3STALKER_ETHR_GPIO_IRQ);
+ return;
+ }
+
+ gpio_direction_input(OMAP3STALKER_ETHR_GPIO_IRQ);
+
+ platform_device_register(&omap3stalker_smsc911x_device);
+}
+
+#else
+static inline void __init omap3stalker_init_eth(void)
+{
+ return;
+}
+#endif
+
+/*
+ * OMAP3 DSS control signals
+ */
+
+#define DSS_ENABLE_GPIO 199
+#define LCD_PANEL_BKLIGHT_GPIO 210
+#define ENABLE_VPLL2_DEV_GRP 0xE0
+
+static int lcd_enabled;
+static int dvi_enabled;
+
+static void __init omap3_stalker_display_init(void)
+{
+ return;
+}
+
+static int omap3_stalker_enable_lcd(struct omap_dss_device *dssdev)
+{
+ if (dvi_enabled) {
+ printk(KERN_ERR "cannot enable LCD, DVI is enabled\n");
+ return -EINVAL;
+ }
+ gpio_set_value(DSS_ENABLE_GPIO, 1);
+ gpio_set_value(LCD_PANEL_BKLIGHT_GPIO, 1);
+ lcd_enabled = 1;
+ return 0;
+}
+
+static void omap3_stalker_disable_lcd(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(DSS_ENABLE_GPIO, 0);
+ gpio_set_value(LCD_PANEL_BKLIGHT_GPIO, 0);
+ lcd_enabled = 0;
+}
+
+static struct omap_dss_device omap3_stalker_lcd_device = {
+ .name = "lcd",
+ .driver_name = "generic_panel",
+ .phy.dpi.data_lines = 24,
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .platform_enable = omap3_stalker_enable_lcd,
+ .platform_disable = omap3_stalker_disable_lcd,
+};
+
+static int omap3_stalker_enable_tv(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+
+static void omap3_stalker_disable_tv(struct omap_dss_device *dssdev)
+{
+}
+
+static struct omap_dss_device omap3_stalker_tv_device = {
+ .name = "tv",
+ .driver_name = "venc",
+ .type = OMAP_DISPLAY_TYPE_VENC,
+#if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO)
+ .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+#elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE)
+ .u.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE,
+#endif
+ .platform_enable = omap3_stalker_enable_tv,
+ .platform_disable = omap3_stalker_disable_tv,
+};
+
+static int omap3_stalker_enable_dvi(struct omap_dss_device *dssdev)
+{
+ if (lcd_enabled) {
+ printk(KERN_ERR "cannot enable DVI, LCD is enabled\n");
+ return -EINVAL;
+ }
+ gpio_set_value(DSS_ENABLE_GPIO, 1);
+ dvi_enabled = 1;
+ return 0;
+}
+
+static void omap3_stalker_disable_dvi(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(DSS_ENABLE_GPIO, 0);
+ dvi_enabled = 0;
+}
+
+static struct omap_dss_device omap3_stalker_dvi_device = {
+ .name = "dvi",
+ .driver_name = "generic_panel",
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .phy.dpi.data_lines = 24,
+ .platform_enable = omap3_stalker_enable_dvi,
+ .platform_disable = omap3_stalker_disable_dvi,
+};
+
+static struct omap_dss_device *omap3_stalker_dss_devices[] = {
+ &omap3_stalker_lcd_device,
+ &omap3_stalker_tv_device,
+ &omap3_stalker_dvi_device,
+};
+
+static struct omap_dss_board_info omap3_stalker_dss_data = {
+ .num_devices = ARRAY_SIZE(omap3_stalker_dss_devices),
+ .devices = omap3_stalker_dss_devices,
+ .default_device = &omap3_stalker_dvi_device,
+};
+
+static struct platform_device omap3_stalker_dss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &omap3_stalker_dss_data,
+ },
+};
+
+static struct regulator_consumer_supply omap3stalker_vmmc1_supply = {
+ .supply = "vmmc",
+};
+
+static struct regulator_consumer_supply omap3stalker_vsim_supply = {
+ .supply = "vmmc_aux",
+};
+
+/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
+static struct regulator_init_data omap3stalker_vmmc1 = {
+ .constraints = {
+ .min_uV = 1850000,
+ .max_uV = 3150000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap3stalker_vmmc1_supply,
+};
+
+/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
+static struct regulator_init_data omap3stalker_vsim = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap3stalker_vsim_supply,
+};
+
+static struct omap2_hsmmc_info mmc[] = {
+ {
+ .mmc = 1,
+ .wires = 4,
+ .gpio_cd = -EINVAL,
+ .gpio_wp = 23,
+ },
+ {} /* Terminator */
+};
+
+static struct gpio_keys_button gpio_buttons[] = {
+ {
+ .code = BTN_EXTRA,
+ .gpio = 18,
+ .desc = "user",
+ .wakeup = 1,
+ },
+};
+
+static struct gpio_keys_platform_data gpio_key_info = {
+ .buttons = gpio_buttons,
+ .nbuttons = ARRAY_SIZE(gpio_buttons),
+};
+
+static struct platform_device keys_gpio = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_key_info,
+ },
+};
+
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "stalker:D8:usr0",
+ .default_trigger = "default-on",
+ .gpio = 126,
+ },
+ {
+ .name = "stalker:D9:usr1",
+ .default_trigger = "default-on",
+ .gpio = 127,
+ },
+ {
+ .name = "stalker:D3:mmc0",
+ .gpio = -EINVAL, /* gets replaced */
+ .active_low = true,
+ .default_trigger = "mmc0",
+ },
+ {
+ .name = "stalker:D4:heartbeat",
+ .gpio = -EINVAL, /* gets replaced */
+ .active_low = true,
+ .default_trigger = "heartbeat",
+ },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds_gpio = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ },
+};
+
+static int
+omap3stalker_twl_gpio_setup(struct device *dev,
+ unsigned gpio, unsigned ngpio)
+{
+ /* gpio + 0 is "mmc0_cd" (input/IRQ) */
+ omap_mux_init_gpio(23, OMAP_PIN_INPUT);
+ mmc[0].gpio_cd = gpio + 0;
+ omap2_hsmmc_init(mmc);
+
+ /* link regulators to MMC adapters */
+ omap3stalker_vmmc1_supply.dev = mmc[0].dev;
+ omap3stalker_vsim_supply.dev = mmc[0].dev;
+
+ /*
+ * Most GPIOs are for USB OTG. Some are mostly sent to
+ * the P2 connector; notably LEDA for the LCD backlight.
+ */
+
+ /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
+ gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL");
+ gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+
+ /* gpio + 7 == DVI Enable */
+ gpio_request(gpio + 7, "EN_DVI");
+ gpio_direction_output(gpio + 7, 0);
+
+ /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */
+ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
+ /* GPIO + 13 == ledsync (out, heartbeat) */
+ gpio_leds[3].gpio = gpio + 13;
+
+ platform_device_register(&leds_gpio);
+ return 0;
+}
+
+static struct twl4030_gpio_platform_data omap3stalker_gpio_data = {
+ .gpio_base = OMAP_MAX_GPIO_LINES,
+ .irq_base = TWL4030_GPIO_IRQ_BASE,
+ .irq_end = TWL4030_GPIO_IRQ_END,
+ .use_leds = true,
+ .setup = omap3stalker_twl_gpio_setup,
+};
+
+static struct twl4030_usb_data omap3stalker_usb_data = {
+ .usb_mode = T2_USB_MODE_ULPI,
+};
+
+static int board_keymap[] = {
+ KEY(0, 0, KEY_LEFT),
+ KEY(0, 1, KEY_DOWN),
+ KEY(0, 2, KEY_ENTER),
+ KEY(0, 3, KEY_M),
+
+ KEY(1, 0, KEY_RIGHT),
+ KEY(1, 1, KEY_UP),
+ KEY(1, 2, KEY_I),
+ KEY(1, 3, KEY_N),
+
+ KEY(2, 0, KEY_A),
+ KEY(2, 1, KEY_E),
+ KEY(2, 2, KEY_J),
+ KEY(2, 3, KEY_O),
+
+ KEY(3, 0, KEY_B),
+ KEY(3, 1, KEY_F),
+ KEY(3, 2, KEY_K),
+ KEY(3, 3, KEY_P)
+};
+
+static struct matrix_keymap_data board_map_data = {
+ .keymap = board_keymap,
+ .keymap_size = ARRAY_SIZE(board_keymap),
+};
+
+static struct twl4030_keypad_data omap3stalker_kp_data = {
+ .keymap_data = &board_map_data,
+ .rows = 4,
+ .cols = 4,
+ .rep = 1,
+};
+
+static struct twl4030_madc_platform_data omap3stalker_madc_data = {
+ .irq_line = 1,
+};
+
+static struct twl4030_codec_audio_data omap3stalker_audio_data = {
+ .audio_mclk = 26000000,
+};
+
+static struct twl4030_codec_data omap3stalker_codec_data = {
+ .audio_mclk = 26000000,
+ .audio = &omap3stalker_audio_data,
+};
+
+static struct regulator_consumer_supply omap3_stalker_vdda_dac_supply = {
+ .supply = "vdda_dac",
+ .dev = &omap3_stalker_dss_device.dev,
+};
+
+/* VDAC for DSS driving S-Video */
+static struct regulator_init_data omap3_stalker_vdac = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap3_stalker_vdda_dac_supply,
+};
+
+/* VPLL2 for digital video outputs */
+static struct regulator_consumer_supply omap3_stalker_vpll2_supply = {
+ .supply = "vdds_dsi",
+ .dev = &omap3_stalker_lcd_device.dev,
+};
+
+static struct regulator_init_data omap3_stalker_vpll2 = {
+ .constraints = {
+ .name = "VDVI",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap3_stalker_vpll2_supply,
+};
+
+static struct twl4030_platform_data omap3stalker_twldata = {
+ .irq_base = TWL4030_IRQ_BASE,
+ .irq_end = TWL4030_IRQ_END,
+
+ /* platform_data for children goes here */
+ .keypad = &omap3stalker_kp_data,
+ .madc = &omap3stalker_madc_data,
+ .usb = &omap3stalker_usb_data,
+ .gpio = &omap3stalker_gpio_data,
+ .codec = &omap3stalker_codec_data,
+ .vdac = &omap3_stalker_vdac,
+ .vpll2 = &omap3_stalker_vpll2,
+};
+
+static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("twl4030", 0x48),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = INT_34XX_SYS_NIRQ,
+ .platform_data = &omap3stalker_twldata,
+ },
+};
+
+static struct at24_platform_data fram_info = {
+ .byte_len = (64 * 1024) / 8,
+ .page_size = 8192,
+ .flags = AT24_FLAG_ADDR16 | AT24_FLAG_IRUGO,
+};
+
+static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = {
+ {
+ I2C_BOARD_INFO("24c64", 0x50),
+ .flags = I2C_CLIENT_WAKE,
+ .platform_data = &fram_info,
+ },
+};
+
+static int __init omap3_stalker_i2c_init(void)
+{
+ /*
+ * REVISIT: These entries can be set in omap3evm_twl_data
+ * after a merge with MFD tree
+ */
+ omap3stalker_twldata.vmmc1 = &omap3stalker_vmmc1;
+ omap3stalker_twldata.vsim = &omap3stalker_vsim;
+
+ omap_register_i2c_bus(1, 2600, omap3stalker_i2c_boardinfo,
+ ARRAY_SIZE(omap3stalker_i2c_boardinfo));
+ omap_register_i2c_bus(2, 400, NULL, 0);
+ omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3,
+ ARRAY_SIZE(omap3stalker_i2c_boardinfo3));
+ return 0;
+}
+
+#define OMAP3_STALKER_TS_GPIO 175
+static void ads7846_dev_init(void)
+{
+ if (gpio_request(OMAP3_STALKER_TS_GPIO, "ADS7846 pendown") < 0)
+ printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
+
+ gpio_direction_input(OMAP3_STALKER_TS_GPIO);
+
+ omap_set_gpio_debounce(OMAP3_STALKER_TS_GPIO, 1);
+ omap_set_gpio_debounce_time(OMAP3_STALKER_TS_GPIO, 0xa);
+}
+
+static int ads7846_get_pendown_state(void)
+{
+ return !gpio_get_value(OMAP3_STALKER_TS_GPIO);
+}
+
+static struct ads7846_platform_data ads7846_config = {
+ .x_max = 0x0fff,
+ .y_max = 0x0fff,
+ .x_plate_ohms = 180,
+ .pressure_max = 255,
+ .debounce_max = 10,
+ .debounce_tol = 3,
+ .debounce_rep = 1,
+ .get_pendown_state = ads7846_get_pendown_state,
+ .keep_vref_on = 1,
+ .settle_delay_usecs = 150,
+};
+
+static struct omap2_mcspi_device_config ads7846_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1, /* 0: slave, 1: master */
+};
+
+struct spi_board_info omap3stalker_spi_board_info[] = {
+ [0] = {
+ .modalias = "ads7846",
+ .bus_num = 1,
+ .chip_select = 0,
+ .max_speed_hz = 1500000,
+ .controller_data = &ads7846_mcspi_config,
+ .irq = OMAP_GPIO_IRQ(OMAP3_STALKER_TS_GPIO),
+ .platform_data = &ads7846_config,
+ },
+};
+
+static struct omap_board_config_kernel omap3_stalker_config[] __initdata = {
+};
+
+static void __init omap3_stalker_init_irq(void)
+{
+ omap_board_config = omap3_stalker_config;
+ omap_board_config_size = ARRAY_SIZE(omap3_stalker_config);
+ omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL);
+ omap_init_irq();
+#ifdef CONFIG_OMAP_32K_TIMER
+ omap2_gp_clockevent_set_gptimer(12);
+#endif
+ omap_gpio_init();
+}
+
+static struct platform_device *omap3_stalker_devices[] __initdata = {
+ &omap3_stalker_dss_device,
+ &keys_gpio,
+};
+
+static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+ .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+ .phy_reset = true,
+ .reset_gpio_port[0] = -EINVAL,
+ .reset_gpio_port[1] = 21,
+ .reset_gpio_port[2] = -EINVAL,
+};
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
+ OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
+ {.reg_offset = OMAP_MUX_TERMINATOR},
+};
+#else
+#define board_mux NULL
+#endif
+
+static struct omap_musb_board_data musb_board_data = {
+ .interface_type = MUSB_INTERFACE_ULPI,
+ .mode = MUSB_OTG,
+ .power = 100,
+};
+
+static void __init omap3_stalker_init(void)
+{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
+
+ omap3_stalker_i2c_init();
+
+ platform_add_devices(omap3_stalker_devices,
+ ARRAY_SIZE(omap3_stalker_devices));
+
+ spi_register_board_info(omap3stalker_spi_board_info,
+ ARRAY_SIZE(omap3stalker_spi_board_info));
+
+ omap_serial_init();
+ usb_musb_init(&musb_board_data);
+ usb_ehci_init(&ehci_pdata);
+ ads7846_dev_init();
+
+ omap_mux_init_gpio(21, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP);
+
+ omap3stalker_init_eth();
+ omap3_stalker_display_init();
+/* Ensure SDRC pins are mux'd for self-refresh */
+ omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT);
+}
+
+static void __init omap3_stalker_map_io(void)
+{
+ omap2_set_globals_343x();
+ omap34xx_map_common_io();
+}
+
+MACHINE_START(SBC3530, "OMAP3 STALKER")
+ /* Maintainer: Jason Lam -lzg@ema-tech.com */
+ .phys_io = 0x48000000,
+ .io_pg_offst = ((0xfa000000) >> 18) & 0xfffc,
+ .boot_params = 0x80000100,
+ .map_io = omap3_stalker_map_io,
+ .init_irq = omap3_stalker_init_irq,
+ .init_machine = omap3_stalker_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 2504d41f923e..2f5f8233dd5b 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -328,8 +328,7 @@ static void __init omap3_ads7846_init(void)
}
gpio_direction_input(OMAP3_TS_GPIO);
- omap_set_gpio_debounce(OMAP3_TS_GPIO, 1);
- omap_set_gpio_debounce_time(OMAP3_TS_GPIO, 0xa);
+ gpio_set_debounce(OMAP3_TS_GPIO, 310);
}
static struct ads7846_platform_data ads7846_config = {
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 8848c7c5ce48..79ac41400c21 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -63,6 +63,8 @@
#define OVERO_SMSC911X_CS 5
#define OVERO_SMSC911X_GPIO 176
+#define OVERO_SMSC911X2_CS 4
+#define OVERO_SMSC911X2_GPIO 65
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
@@ -137,6 +139,16 @@ static struct resource overo_smsc911x_resources[] = {
},
};
+static struct resource overo_smsc911x2_resources[] = {
+ {
+ .name = "smsc911x2-memory",
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
+ },
+};
+
static struct smsc911x_platform_config overo_smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
@@ -146,7 +158,7 @@ static struct smsc911x_platform_config overo_smsc911x_config = {
static struct platform_device overo_smsc911x_device = {
.name = "smsc911x",
- .id = -1,
+ .id = 0,
.num_resources = ARRAY_SIZE(overo_smsc911x_resources),
.resource = overo_smsc911x_resources,
.dev = {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 4377a4cf36eb..abdf321c2d41 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -45,6 +45,8 @@
/* list all spi devices here */
enum {
RX51_SPI_WL1251,
+ RX51_SPI_MIPID, /* LCD panel */
+ RX51_SPI_TSC2005, /* Touch Controller */
};
static struct wl12xx_platform_data wl1251_pdata;
@@ -54,6 +56,16 @@ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
.single_channel = 1,
};
+static struct omap2_mcspi_device_config mipid_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+
+static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1,
+};
+
static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
[RX51_SPI_WL1251] = {
.modalias = "wl1251",
@@ -64,6 +76,22 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
.controller_data = &wl1251_mcspi_config,
.platform_data = &wl1251_pdata,
},
+ [RX51_SPI_MIPID] = {
+ .modalias = "acx565akm",
+ .bus_num = 1,
+ .chip_select = 2,
+ .max_speed_hz = 6000000,
+ .controller_data = &mipid_mcspi_config,
+ },
+ [RX51_SPI_TSC2005] = {
+ .modalias = "tsc2005",
+ .bus_num = 1,
+ .chip_select = 0,
+ /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
+ .max_speed_hz = 6000000,
+ .controller_data = &tsc2005_mcspi_config,
+ /* .platform_data = &tsc2005_config,*/
+ },
};
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
@@ -277,7 +305,7 @@ static struct regulator_consumer_supply rx51_vmmc1_supply = {
.dev_name = "mmci-omap-hs.0",
};
-static struct regulator_consumer_supply rx51_vmmc2_supply = {
+static struct regulator_consumer_supply rx51_vaux3_supply = {
.supply = "vmmc",
.dev_name = "mmci-omap-hs.1",
};
@@ -287,6 +315,48 @@ static struct regulator_consumer_supply rx51_vsim_supply = {
.dev_name = "mmci-omap-hs.1",
};
+static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
+ /* tlv320aic3x analog supplies */
+ {
+ .supply = "AVDD",
+ .dev_name = "2-0018",
+ },
+ {
+ .supply = "DRVDD",
+ .dev_name = "2-0018",
+ },
+ /* Keep vmmc as last item. It is not iterated for newer boards */
+ {
+ .supply = "vmmc",
+ .dev_name = "mmci-omap-hs.1",
+ },
+};
+
+static struct regulator_consumer_supply rx51_vio_supplies[] = {
+ /* tlv320aic3x digital supplies */
+ {
+ .supply = "IOVDD",
+ .dev_name = "2-0018"
+ },
+ {
+ .supply = "DVDD",
+ .dev_name = "2-0018"
+ },
+};
+
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+extern struct platform_device rx51_display_device;
+#endif
+
+static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+ {
+ .supply = "vdds_sdi",
+ .dev = &rx51_display_device.dev,
+ },
+#endif
+};
+
static struct regulator_init_data rx51_vaux1 = {
.constraints = {
.name = "V28",
@@ -297,6 +367,8 @@ static struct regulator_init_data rx51_vaux1 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vaux1_consumers),
+ .consumer_supplies = rx51_vaux1_consumers,
};
static struct regulator_init_data rx51_vaux2 = {
@@ -338,7 +410,7 @@ static struct regulator_init_data rx51_vaux3_mmc = {
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
- .consumer_supplies = &rx51_vmmc2_supply,
+ .consumer_supplies = &rx51_vaux3_supply,
};
static struct regulator_init_data rx51_vaux4 = {
@@ -370,9 +442,9 @@ static struct regulator_init_data rx51_vmmc1 = {
static struct regulator_init_data rx51_vmmc2 = {
.constraints = {
- .name = "VMMC2_30",
- .min_uV = 1850000,
- .max_uV = 3150000,
+ .name = "V28_A",
+ .min_uV = 2800000,
+ .max_uV = 3000000,
.apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
@@ -380,8 +452,8 @@ static struct regulator_init_data rx51_vmmc2 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &rx51_vmmc2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vmmc2_supplies),
+ .consumer_supplies = rx51_vmmc2_supplies,
};
static struct regulator_init_data rx51_vsim = {
@@ -411,6 +483,20 @@ static struct regulator_init_data rx51_vdac = {
},
};
+static struct regulator_init_data rx51_vio = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vio_supplies),
+ .consumer_supplies = rx51_vio_supplies,
+};
+
static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
{
/* FIXME this gpio setup is just a placeholder for now */
@@ -618,6 +704,7 @@ static struct twl4030_platform_data rx51_twldata __initdata = {
.vmmc1 = &rx51_vmmc1,
.vsim = &rx51_vsim,
.vdac = &rx51_vdac,
+ .vio = &rx51_vio,
};
static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
@@ -629,18 +716,27 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
},
};
+static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
+ {
+ I2C_BOARD_INFO("tlv320aic3x", 0x18),
+ },
+};
+
static int __init rx51_i2c_init(void)
{
if ((system_rev >= SYSTEM_REV_S_USES_VAUX3 && system_rev < 0x100) ||
- system_rev >= SYSTEM_REV_B_USES_VAUX3)
+ system_rev >= SYSTEM_REV_B_USES_VAUX3) {
rx51_twldata.vaux3 = &rx51_vaux3_mmc;
- else {
+ /* Only older boards use VMMC2 for internal MMC */
+ rx51_vmmc2.num_consumer_supplies--;
+ } else {
rx51_twldata.vaux3 = &rx51_vaux3_cam;
- rx51_twldata.vmmc2 = &rx51_vmmc2;
}
+ rx51_twldata.vmmc2 = &rx51_vmmc2;
omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1,
- ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
- omap_register_i2c_bus(2, 100, NULL, 0);
+ ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
+ omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
+ ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
omap_register_i2c_bus(3, 400, NULL, 0);
return 0;
}
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
new file mode 100644
index 000000000000..b743a4f42649
--- /dev/null
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -0,0 +1,109 @@
+/*
+ * linux/arch/arm/mach-omap2/board-rx51-video.c
+ *
+ * Copyright (C) 2010 Nokia
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/mm.h>
+
+#include <asm/mach-types.h>
+#include <plat/mux.h>
+#include <plat/display.h>
+#include <plat/vram.h>
+#include <plat/mcspi.h>
+
+#include "mux.h"
+
+#define RX51_LCD_RESET_GPIO 90
+
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+
+static int rx51_lcd_enable(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(dssdev->reset_gpio, 1);
+ return 0;
+}
+
+static void rx51_lcd_disable(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device rx51_lcd_device = {
+ .name = "lcd",
+ .driver_name = "panel-acx565akm",
+ .type = OMAP_DISPLAY_TYPE_SDI,
+ .phy.sdi.datapairs = 2,
+ .reset_gpio = RX51_LCD_RESET_GPIO,
+ .platform_enable = rx51_lcd_enable,
+ .platform_disable = rx51_lcd_disable,
+};
+
+static struct omap_dss_device *rx51_dss_devices[] = {
+ &rx51_lcd_device,
+};
+
+static struct omap_dss_board_info rx51_dss_board_info = {
+ .num_devices = ARRAY_SIZE(rx51_dss_devices),
+ .devices = rx51_dss_devices,
+ .default_device = &rx51_lcd_device,
+};
+
+struct platform_device rx51_display_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &rx51_dss_board_info,
+ },
+};
+
+static struct platform_device *rx51_video_devices[] __initdata = {
+ &rx51_display_device,
+};
+
+static int __init rx51_video_init(void)
+{
+ if (!machine_is_nokia_rx51())
+ return 0;
+
+ if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
+ pr_err("%s cannot configure MUX for LCD RESET\n", __func__);
+ return 0;
+ }
+
+ if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) {
+ pr_err("%s failed to get LCD Reset GPIO\n", __func__);
+ return 0;
+ }
+
+ gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
+
+ platform_add_devices(rx51_video_devices,
+ ARRAY_SIZE(rx51_video_devices));
+ return 0;
+}
+
+subsys_initcall(rx51_video_init);
+
+void __init rx51_video_mem_init(void)
+{
+ /*
+ * GFX 864x480x32bpp
+ * VID1/2 1280x720x32bpp double buffered
+ */
+ omap_vram_set_sdram_vram(PAGE_ALIGN(864 * 480 * 4) +
+ 2 * PAGE_ALIGN(1280 * 720 * 4 * 2), 0);
+}
+
+#else
+void __init rx51_video_mem_init(void) { }
+#endif /* defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index b155c366c650..1b86b5bb87a2 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -36,6 +36,7 @@
#define RX51_GPIO_SLEEP_IND 162
struct omap_sdrc_params *rx51_get_sdram_timings(void);
+extern void rx51_video_mem_init(void);
static struct gpio_led gpio_leds[] = {
{
@@ -143,6 +144,7 @@ static void __init rx51_init(void)
static void __init rx51_map_io(void)
{
omap2_set_globals_343x();
+ rx51_video_mem_init();
omap34xx_map_common_io();
}
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index e15d2e87cfc1..1d7f827b0408 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -82,7 +82,7 @@ static inline void __init zoom_init_smsc911x(void)
static struct plat_serial8250_port serial_platform_data[] = {
{
- .mapbase = 0x10000000,
+ .mapbase = ZOOM_UART_BASE,
.irq = OMAP_GPIO_IRQ(102),
.flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP|UPF_SHARE_IRQ,
.irqflags = IRQF_SHARED | IRQF_TRIGGER_RISING,
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
index 9a26f84b1141..803ef14cbf2d 100644
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ b/arch/arm/mach-omap2/board-zoom2.c
@@ -91,8 +91,8 @@ static void __init omap_zoom2_map_io(void)
}
MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
- .phys_io = 0x48000000,
- .io_pg_offst = ((0xfa000000) >> 18) & 0xfffc,
+ .phys_io = ZOOM_UART_BASE,
+ .io_pg_offst = (ZOOM_UART_VIRT >> 18) & 0xfffc,
.boot_params = 0x80000100,
.map_io = omap_zoom2_map_io,
.init_irq = omap_zoom2_init_irq,
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
index cd3e40cf3ac1..33147042485f 100644
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ b/arch/arm/mach-omap2/board-zoom3.c
@@ -73,8 +73,8 @@ static void __init omap_zoom_init(void)
}
MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
- .phys_io = 0x48000000,
- .io_pg_offst = ((0xfa000000) >> 18) & 0xfffc,
+ .phys_io = ZOOM_UART_BASE,
+ .io_pg_offst = (ZOOM_UART_VIRT >> 18) & 0xfffc,
.boot_params = 0x80000100,
.map_io = omap_zoom_map_io,
.init_irq = omap_zoom_init_irq,
diff --git a/arch/arm/mach-omap2/clkt2xxx_apll.c b/arch/arm/mach-omap2/clkt2xxx_apll.c
index 43d7246ce335..66e01acfd585 100644
--- a/arch/arm/mach-omap2/clkt2xxx_apll.c
+++ b/arch/arm/mach-omap2/clkt2xxx_apll.c
@@ -70,12 +70,12 @@ static int omap2_clk_apll_enable(struct clk *clk, u32 status_mask)
static int omap2_clk_apll96_enable(struct clk *clk)
{
- return omap2_clk_apll_enable(clk, OMAP24XX_ST_96M_APLL);
+ return omap2_clk_apll_enable(clk, OMAP24XX_ST_96M_APLL_MASK);
}
static int omap2_clk_apll54_enable(struct clk *clk)
{
- return omap2_clk_apll_enable(clk, OMAP24XX_ST_54M_APLL);
+ return omap2_clk_apll_enable(clk, OMAP24XX_ST_54M_APLL_MASK);
}
/* Stop APLL */
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index e60ca4e47bbd..aef62918aaf0 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -68,16 +68,13 @@ long omap2_round_to_table_rate(struct clk *clk, unsigned long rate)
{
const struct prcm_config *ptr;
long highest_rate;
- long sys_ck_rate;
-
- sys_ck_rate = clk_get_rate(sclk);
highest_rate = -EINVAL;
for (ptr = rate_table; ptr->mpu_speed; ptr++) {
if (!(ptr->flags & cpu_mask))
continue;
- if (ptr->xtal_speed != sys_ck_rate)
+ if (ptr->xtal_speed != sclk->rate)
continue;
highest_rate = ptr->mpu_speed;
@@ -96,15 +93,12 @@ int omap2_select_table_rate(struct clk *clk, unsigned long rate)
const struct prcm_config *prcm;
unsigned long found_speed = 0;
unsigned long flags;
- long sys_ck_rate;
-
- sys_ck_rate = clk_get_rate(sclk);
for (prcm = rate_table; prcm->mpu_speed; prcm++) {
if (!(prcm->flags & cpu_mask))
continue;
- if (prcm->xtal_speed != sys_ck_rate)
+ if (prcm->xtal_speed != sclk->rate)
continue;
if (prcm->mpu_speed <= rate) {
@@ -181,19 +175,16 @@ static struct cpufreq_frequency_table *freq_table;
void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
{
const struct prcm_config *prcm;
- long sys_ck_rate;
int i = 0;
int tbl_sz = 0;
if (!cpu_is_omap24xx())
return;
- sys_ck_rate = clk_get_rate(sclk);
-
for (prcm = rate_table; prcm->mpu_speed; prcm++) {
if (!(prcm->flags & cpu_mask))
continue;
- if (prcm->xtal_speed != sys_ck_rate)
+ if (prcm->xtal_speed != sclk->rate)
continue;
/* don't put bypass rates in table */
@@ -226,7 +217,7 @@ void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
for (prcm = rate_table; prcm->mpu_speed; prcm++) {
if (!(prcm->flags & cpu_mask))
continue;
- if (prcm->xtal_speed != sys_ck_rate)
+ if (prcm->xtal_speed != sclk->rate)
continue;
/* don't put bypass rates in table */
diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c
index e50812dd03fd..a781cd6795a4 100644
--- a/arch/arm/mach-omap2/clkt_clksel.c
+++ b/arch/arm/mach-omap2/clkt_clksel.c
@@ -12,8 +12,26 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * XXX At some point these clksel clocks should be split into
- * "divider" clocks and "mux" clocks to better match the hardware.
+ *
+ * clksel clocks are clocks that do not have a fixed parent, or that
+ * can divide their parent's rate, or possibly both at the same time, based
+ * on the contents of a hardware register bitfield.
+ *
+ * All of the various mux and divider settings can be encoded into
+ * struct clksel* data structures, and then these can be autogenerated
+ * from some hardware database for each new chip generation. This
+ * should avoid the need to write, review, and validate a lot of new
+ * clock code for each new chip, since it can be exported from the SoC
+ * design flow. This is now done on OMAP4.
+ *
+ * The fusion of mux and divider clocks is a software creation. In
+ * hardware reality, the multiplexer (parent selection) and the
+ * divider exist separately. XXX At some point these clksel clocks
+ * should be split into "divider" clocks and "mux" clocks to better
+ * match the hardware.
+ *
+ * (The name "clksel" comes from the name of the corresponding
+ * register field in the OMAP2/3 family of SoCs.)
*
* XXX Currently these clocks are only used in the OMAP2/3/4 code, but
* many of the OMAP1 clocks should be convertible to use this
@@ -29,14 +47,11 @@
#include <plat/clock.h>
#include "clock.h"
-#include "cm.h"
-#include "cm-regbits-24xx.h"
-#include "cm-regbits-34xx.h"
/* Private functions */
/**
- * _omap2_get_clksel_by_parent - return clksel struct for a given clk & parent
+ * _get_clksel_by_parent() - return clksel struct for a given clk & parent
* @clk: OMAP struct clk ptr to inspect
* @src_clk: OMAP struct clk ptr of the parent clk to search for
*
@@ -44,141 +59,217 @@
* the element associated with the supplied parent clock address.
* Returns a pointer to the struct clksel on success or NULL on error.
*/
-static const struct clksel *_omap2_get_clksel_by_parent(struct clk *clk,
- struct clk *src_clk)
+static const struct clksel *_get_clksel_by_parent(struct clk *clk,
+ struct clk *src_clk)
{
const struct clksel *clks;
- if (!clk->clksel)
- return NULL;
-
- for (clks = clk->clksel; clks->parent; clks++) {
+ for (clks = clk->clksel; clks->parent; clks++)
if (clks->parent == src_clk)
break; /* Found the requested parent */
- }
if (!clks->parent) {
- printk(KERN_ERR "clock: Could not find parent clock %s in "
- "clksel array of clock %s\n", src_clk->name,
- clk->name);
+ /* This indicates a data problem */
+ WARN(1, "clock: Could not find parent clock %s in clksel array "
+ "of clock %s\n", src_clk->name, clk->name);
return NULL;
}
return clks;
}
-/*
- * Converts encoded control register address into a full address
- * On error, the return value (parent_div) will be 0.
+/**
+ * _get_div_and_fieldval() - find the new clksel divisor and field value to use
+ * @src_clk: planned new parent struct clk *
+ * @clk: struct clk * that is being reparented
+ * @field_val: pointer to a u32 to contain the register data for the divisor
+ *
+ * Given an intended new parent struct clk * @src_clk, and the struct
+ * clk * @clk to the clock that is being reparented, find the
+ * appropriate rate divisor for the new clock (returned as the return
+ * value), and the corresponding register bitfield data to program to
+ * reach that divisor (returned in the u32 pointed to by @field_val).
+ * Returns 0 on error, or returns the newly-selected divisor upon
+ * success (in this latter case, the corresponding register bitfield
+ * value is passed back in the variable pointed to by @field_val)
*/
-static u32 _omap2_clksel_get_src_field(struct clk *src_clk, struct clk *clk,
- u32 *field_val)
+static u8 _get_div_and_fieldval(struct clk *src_clk, struct clk *clk,
+ u32 *field_val)
{
const struct clksel *clks;
- const struct clksel_rate *clkr;
+ const struct clksel_rate *clkr, *max_clkr;
+ u8 max_div = 0;
- clks = _omap2_get_clksel_by_parent(clk, src_clk);
+ clks = _get_clksel_by_parent(clk, src_clk);
if (!clks)
return 0;
+ /*
+ * Find the highest divisor (e.g., the one resulting in the
+ * lowest rate) to use as the default. This should avoid
+ * clock rates that are too high for the device. XXX A better
+ * solution here would be to try to determine if there is a
+ * divisor matching the original clock rate before the parent
+ * switch, and if it cannot be found, to fall back to the
+ * highest divisor.
+ */
for (clkr = clks->rates; clkr->div; clkr++) {
- if (clkr->flags & cpu_mask && clkr->flags & DEFAULT_RATE)
- break; /* Found the default rate for this platform */
+ if (!(clkr->flags & cpu_mask))
+ continue;
+
+ if (clkr->div > max_div) {
+ max_div = clkr->div;
+ max_clkr = clkr;
+ }
}
- if (!clkr->div) {
- printk(KERN_ERR "clock: Could not find default rate for "
- "clock %s parent %s\n", clk->name,
- src_clk->parent->name);
+ if (max_div == 0) {
+ /* This indicates an error in the clksel data */
+ WARN(1, "clock: Could not find divisor for clock %s parent %s"
+ "\n", clk->name, src_clk->parent->name);
return 0;
}
- /* Should never happen. Add a clksel mask to the struct clk. */
- WARN_ON(clk->clksel_mask == 0);
+ *field_val = max_clkr->val;
- *field_val = clkr->val;
-
- return clkr->div;
+ return max_div;
}
+/**
+ * _write_clksel_reg() - program a clock's clksel register in hardware
+ * @clk: struct clk * to program
+ * @v: clksel bitfield value to program (with LSB at bit 0)
+ *
+ * Shift the clksel register bitfield value @v to its appropriate
+ * location in the clksel register and write it in. This function
+ * will ensure that the write to the clksel_reg reaches its
+ * destination before returning -- important since PRM and CM register
+ * accesses can be quite slow compared to ARM cycles -- but does not
+ * take into account any time the hardware might take to switch the
+ * clock source.
+ */
+static void _write_clksel_reg(struct clk *clk, u32 field_val)
+{
+ u32 v;
-/* Public functions */
+ v = __raw_readl(clk->clksel_reg);
+ v &= ~clk->clksel_mask;
+ v |= field_val << __ffs(clk->clksel_mask);
+ __raw_writel(v, clk->clksel_reg);
+
+ v = __raw_readl(clk->clksel_reg); /* OCP barrier */
+}
/**
- * omap2_init_clksel_parent - set a clksel clk's parent field from the hardware
- * @clk: OMAP clock struct ptr to use
+ * _clksel_to_divisor() - turn clksel field value into integer divider
+ * @clk: OMAP struct clk to use
+ * @field_val: register field value to find
*
- * Given a pointer to a source-selectable struct clk, read the hardware
- * register and determine what its parent is currently set to. Update the
- * clk->parent field with the appropriate clk ptr.
+ * Given a struct clk of a rate-selectable clksel clock, and a register field
+ * value to search for, find the corresponding clock divisor. The register
+ * field value should be pre-masked and shifted down so the LSB is at bit 0
+ * before calling. Returns 0 on error or returns the actual integer divisor
+ * upon success.
*/
-void omap2_init_clksel_parent(struct clk *clk)
+static u32 _clksel_to_divisor(struct clk *clk, u32 field_val)
{
const struct clksel *clks;
const struct clksel_rate *clkr;
- u32 r, found = 0;
- if (!clk->clksel)
- return;
+ clks = _get_clksel_by_parent(clk, clk->parent);
+ if (!clks)
+ return 0;
- r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
- r >>= __ffs(clk->clksel_mask);
+ for (clkr = clks->rates; clkr->div; clkr++) {
+ if (!(clkr->flags & cpu_mask))
+ continue;
- for (clks = clk->clksel; clks->parent && !found; clks++) {
- for (clkr = clks->rates; clkr->div && !found; clkr++) {
- if ((clkr->flags & cpu_mask) && (clkr->val == r)) {
- if (clk->parent != clks->parent) {
- pr_debug("clock: inited %s parent "
- "to %s (was %s)\n",
- clk->name, clks->parent->name,
- ((clk->parent) ?
- clk->parent->name : "NULL"));
- clk_reparent(clk, clks->parent);
- };
- found = 1;
- }
- }
+ if (clkr->val == field_val)
+ break;
}
- if (!found)
- printk(KERN_ERR "clock: init parent: could not find "
- "regval %0x for clock %s\n", r, clk->name);
+ if (!clkr->div) {
+ /* This indicates a data error */
+ WARN(1, "clock: Could not find fieldval %d for clock %s parent "
+ "%s\n", field_val, clk->name, clk->parent->name);
+ return 0;
+ }
- return;
+ return clkr->div;
}
-/*
- * Used for clocks that are part of CLKSEL_xyz governed clocks.
- * REVISIT: Maybe change to use clk->enable() functions like on omap1?
+/**
+ * _divisor_to_clksel() - turn clksel integer divisor into a field value
+ * @clk: OMAP struct clk to use
+ * @div: integer divisor to search for
+ *
+ * Given a struct clk of a rate-selectable clksel clock, and a clock
+ * divisor, find the corresponding register field value. Returns the
+ * register field value _before_ left-shifting (i.e., LSB is at bit
+ * 0); or returns 0xFFFFFFFF (~0) upon error.
*/
-unsigned long omap2_clksel_recalc(struct clk *clk)
+static u32 _divisor_to_clksel(struct clk *clk, u32 div)
{
- unsigned long rate;
- u32 div = 0;
+ const struct clksel *clks;
+ const struct clksel_rate *clkr;
- pr_debug("clock: recalc'ing clksel clk %s\n", clk->name);
+ /* should never happen */
+ WARN_ON(div == 0);
- div = omap2_clksel_get_divisor(clk);
- if (div == 0)
- return clk->rate;
+ clks = _get_clksel_by_parent(clk, clk->parent);
+ if (!clks)
+ return ~0;
- rate = clk->parent->rate / div;
+ for (clkr = clks->rates; clkr->div; clkr++) {
+ if (!(clkr->flags & cpu_mask))
+ continue;
- pr_debug("clock: new clock rate is %ld (div %d)\n", rate, div);
+ if (clkr->div == div)
+ break;
+ }
- return rate;
+ if (!clkr->div) {
+ pr_err("clock: Could not find divisor %d for clock %s parent "
+ "%s\n", div, clk->name, clk->parent->name);
+ return ~0;
+ }
+
+ return clkr->val;
+}
+
+/**
+ * _read_divisor() - get current divisor applied to parent clock (from hdwr)
+ * @clk: OMAP struct clk to use.
+ *
+ * Read the current divisor register value for @clk that is programmed
+ * into the hardware, convert it into the actual divisor value, and
+ * return it; or return 0 on error.
+ */
+static u32 _read_divisor(struct clk *clk)
+{
+ u32 v;
+
+ if (!clk->clksel || !clk->clksel_mask)
+ return 0;
+
+ v = __raw_readl(clk->clksel_reg);
+ v &= clk->clksel_mask;
+ v >>= __ffs(clk->clksel_mask);
+
+ return _clksel_to_divisor(clk, v);
}
+/* Public functions */
+
/**
- * omap2_clksel_round_rate_div - find divisor for the given clock and rate
+ * omap2_clksel_round_rate_div() - find divisor for the given clock and rate
* @clk: OMAP struct clk to use
* @target_rate: desired clock rate
* @new_div: ptr to where we should store the divisor
*
* Finds 'best' divider value in an array based on the source and target
* rates. The divider array must be sorted with smallest divider first.
- * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
- * they are only settable as part of virtual_prcm set.
+ * This function is also used by the DPLL3 M2 divider code.
*
* Returns the rounded clock rate or returns 0xffffffff on error.
*/
@@ -190,12 +281,15 @@ u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
const struct clksel_rate *clkr;
u32 last_div = 0;
+ if (!clk->clksel || !clk->clksel_mask)
+ return ~0;
+
pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n",
clk->name, target_rate);
*new_div = 1;
- clks = _omap2_get_clksel_by_parent(clk, clk->parent);
+ clks = _get_clksel_by_parent(clk, clk->parent);
if (!clks)
return ~0;
@@ -231,168 +325,174 @@ u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
return clk->parent->rate / clkr->div;
}
-/**
- * omap2_clksel_round_rate - find rounded rate for the given clock and rate
- * @clk: OMAP struct clk to use
- * @target_rate: desired clock rate
- *
- * Compatibility wrapper for OMAP clock framework
- * Finds best target rate based on the source clock and possible dividers.
- * rates. The divider array must be sorted with smallest divider first.
- * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
- * they are only settable as part of virtual_prcm set.
- *
- * Returns the rounded clock rate or returns 0xffffffff on error.
+/*
+ * Clocktype interface functions to the OMAP clock code
+ * (i.e., those used in struct clk field function pointers, etc.)
*/
-long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate)
-{
- u32 new_div;
-
- return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
-}
-
-
-/* Given a clock and a rate apply a clock specific rounding function */
-long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (clk->round_rate)
- return clk->round_rate(clk, rate);
-
- return clk->rate;
-}
/**
- * omap2_clksel_to_divisor() - turn clksel field value into integer divider
- * @clk: OMAP struct clk to use
- * @field_val: register field value to find
+ * omap2_init_clksel_parent() - set a clksel clk's parent field from the hdwr
+ * @clk: OMAP clock struct ptr to use
*
- * Given a struct clk of a rate-selectable clksel clock, and a register field
- * value to search for, find the corresponding clock divisor. The register
- * field value should be pre-masked and shifted down so the LSB is at bit 0
- * before calling. Returns 0 on error
+ * Given a pointer @clk to a source-selectable struct clk, read the
+ * hardware register and determine what its parent is currently set
+ * to. Update @clk's .parent field with the appropriate clk ptr. No
+ * return value.
*/
-u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val)
+void omap2_init_clksel_parent(struct clk *clk)
{
const struct clksel *clks;
const struct clksel_rate *clkr;
+ u32 r, found = 0;
- clks = _omap2_get_clksel_by_parent(clk, clk->parent);
- if (!clks)
- return 0;
+ if (!clk->clksel || !clk->clksel_mask)
+ return;
- for (clkr = clks->rates; clkr->div; clkr++) {
- if ((clkr->flags & cpu_mask) && (clkr->val == field_val))
- break;
- }
+ r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
+ r >>= __ffs(clk->clksel_mask);
- if (!clkr->div) {
- printk(KERN_ERR "clock: Could not find fieldval %d for "
- "clock %s parent %s\n", field_val, clk->name,
- clk->parent->name);
- return 0;
+ for (clks = clk->clksel; clks->parent && !found; clks++) {
+ for (clkr = clks->rates; clkr->div && !found; clkr++) {
+ if (!(clkr->flags & cpu_mask))
+ continue;
+
+ if (clkr->val == r) {
+ if (clk->parent != clks->parent) {
+ pr_debug("clock: inited %s parent "
+ "to %s (was %s)\n",
+ clk->name, clks->parent->name,
+ ((clk->parent) ?
+ clk->parent->name : "NULL"));
+ clk_reparent(clk, clks->parent);
+ };
+ found = 1;
+ }
+ }
}
- return clkr->div;
+ /* This indicates a data error */
+ WARN(!found, "clock: %s: init parent: could not find regval %0x\n",
+ clk->name, r);
+
+ return;
}
/**
- * omap2_divisor_to_clksel() - turn clksel integer divisor into a field value
- * @clk: OMAP struct clk to use
- * @div: integer divisor to search for
+ * omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field
+ * @clk: struct clk *
*
- * Given a struct clk of a rate-selectable clksel clock, and a clock divisor,
- * find the corresponding register field value. The return register value is
- * the value before left-shifting. Returns ~0 on error
+ * This function is intended to be called only by the clock framework.
+ * Each clksel clock should have its struct clk .recalc field set to this
+ * function. Returns the clock's current rate, based on its parent's rate
+ * and its current divisor setting in the hardware.
*/
-u32 omap2_divisor_to_clksel(struct clk *clk, u32 div)
+unsigned long omap2_clksel_recalc(struct clk *clk)
{
- const struct clksel *clks;
- const struct clksel_rate *clkr;
-
- /* should never happen */
- WARN_ON(div == 0);
+ unsigned long rate;
+ u32 div = 0;
- clks = _omap2_get_clksel_by_parent(clk, clk->parent);
- if (!clks)
- return ~0;
+ div = _read_divisor(clk);
+ if (div == 0)
+ return clk->rate;
- for (clkr = clks->rates; clkr->div; clkr++) {
- if ((clkr->flags & cpu_mask) && (clkr->div == div))
- break;
- }
+ rate = clk->parent->rate / div;
- if (!clkr->div) {
- printk(KERN_ERR "clock: Could not find divisor %d for "
- "clock %s parent %s\n", div, clk->name,
- clk->parent->name);
- return ~0;
- }
+ pr_debug("clock: %s: recalc'd rate is %ld (div %d)\n", clk->name,
+ rate, div);
- return clkr->val;
+ return rate;
}
/**
- * omap2_clksel_get_divisor - get current divider applied to parent clock.
- * @clk: OMAP struct clk to use.
+ * omap2_clksel_round_rate() - find rounded rate for the given clock and rate
+ * @clk: OMAP struct clk to use
+ * @target_rate: desired clock rate
+ *
+ * This function is intended to be called only by the clock framework.
+ * Finds best target rate based on the source clock and possible dividers.
+ * rates. The divider array must be sorted with smallest divider first.
*
- * Returns the integer divisor upon success or 0 on error.
+ * Returns the rounded clock rate or returns 0xffffffff on error.
*/
-u32 omap2_clksel_get_divisor(struct clk *clk)
+long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate)
{
- u32 v;
-
- if (!clk->clksel_mask)
- return 0;
-
- v = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
- v >>= __ffs(clk->clksel_mask);
+ u32 new_div;
- return omap2_clksel_to_divisor(clk, v);
+ return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
}
+/**
+ * omap2_clksel_set_rate() - program clock rate in hardware
+ * @clk: struct clk * to program rate
+ * @rate: target rate to program
+ *
+ * This function is intended to be called only by the clock framework.
+ * Program @clk's rate to @rate in the hardware. The clock can be
+ * either enabled or disabled when this happens, although if the clock
+ * is enabled, some downstream devices may glitch or behave
+ * unpredictably when the clock rate is changed - this depends on the
+ * hardware. This function does not currently check the usecount of
+ * the clock, so if multiple drivers are using the clock, and the rate
+ * is changed, they will all be affected without any notification.
+ * Returns -EINVAL upon error, or 0 upon success.
+ */
int omap2_clksel_set_rate(struct clk *clk, unsigned long rate)
{
- u32 v, field_val, validrate, new_div = 0;
+ u32 field_val, validrate, new_div = 0;
- if (!clk->clksel_mask)
+ if (!clk->clksel || !clk->clksel_mask)
return -EINVAL;
validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
if (validrate != rate)
return -EINVAL;
- field_val = omap2_divisor_to_clksel(clk, new_div);
+ field_val = _divisor_to_clksel(clk, new_div);
if (field_val == ~0)
return -EINVAL;
- v = __raw_readl(clk->clksel_reg);
- v &= ~clk->clksel_mask;
- v |= field_val << __ffs(clk->clksel_mask);
- __raw_writel(v, clk->clksel_reg);
- v = __raw_readl(clk->clksel_reg); /* OCP barrier */
+ _write_clksel_reg(clk, field_val);
clk->rate = clk->parent->rate / new_div;
+ pr_debug("clock: %s: set rate to %ld\n", clk->name, clk->rate);
+
return 0;
}
+/*
+ * Clksel parent setting function - not passed in struct clk function
+ * pointer - instead, the OMAP clock code currently assumes that any
+ * parent-setting clock is a clksel clock, and calls
+ * omap2_clksel_set_parent() by default
+ */
+
+/**
+ * omap2_clksel_set_parent() - change a clock's parent clock
+ * @clk: struct clk * of the child clock
+ * @new_parent: struct clk * of the new parent clock
+ *
+ * This function is intended to be called only by the clock framework.
+ * Change the parent clock of clock @clk to @new_parent. This is
+ * intended to be used while @clk is disabled. This function does not
+ * currently check the usecount of the clock, so if multiple drivers
+ * are using the clock, and the parent is changed, they will all be
+ * affected without any notification. Returns -EINVAL upon error, or
+ * 0 upon success.
+ */
int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent)
{
- u32 field_val, v, parent_div;
+ u32 field_val = 0;
+ u32 parent_div;
- if (!clk->clksel)
+ if (!clk->clksel || !clk->clksel_mask)
return -EINVAL;
- parent_div = _omap2_clksel_get_src_field(new_parent, clk, &field_val);
+ parent_div = _get_div_and_fieldval(new_parent, clk, &field_val);
if (!parent_div)
return -EINVAL;
- /* Set new source value (previous dividers if any in effect) */
- v = __raw_readl(clk->clksel_reg);
- v &= ~clk->clksel_mask;
- v |= field_val << __ffs(clk->clksel_mask);
- __raw_writel(v, clk->clksel_reg);
- v = __raw_readl(clk->clksel_reg); /* OCP barrier */
+ _write_clksel_reg(clk, field_val);
clk_reparent(clk, new_parent);
@@ -402,7 +502,7 @@ int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent)
if (parent_div > 0)
clk->rate /= parent_div;
- pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
+ pr_debug("clock: %s: set parent to %s (new rate %ld)\n",
clk->name, clk->parent->name, clk->rate);
return 0;
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index a6d0b34b7990..605f531783a8 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -334,6 +334,15 @@ oce_err1:
return ret;
}
+/* Given a clock and a rate apply a clock specific rounding function */
+long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (clk->round_rate)
+ return clk->round_rate(clk, rate);
+
+ return clk->rate;
+}
+
/* Set the clock rate for a clock source */
int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
{
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index ad8a1f7c1afc..a535c7a2a62a 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -73,19 +73,20 @@ void omap2_clk_disable_unused(struct clk *clk);
#define omap2_clk_disable_unused NULL
#endif
-unsigned long omap2_clksel_recalc(struct clk *clk);
void omap2_init_clk_clkdm(struct clk *clk);
-void omap2_init_clksel_parent(struct clk *clk);
-u32 omap2_clksel_get_divisor(struct clk *clk);
+
+/* clkt_clksel.c public functions */
u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
u32 *new_div);
-u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val);
-u32 omap2_divisor_to_clksel(struct clk *clk, u32 div);
+void omap2_init_clksel_parent(struct clk *clk);
+unsigned long omap2_clksel_recalc(struct clk *clk);
long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate);
int omap2_clksel_set_rate(struct clk *clk, unsigned long rate);
int omap2_clksel_set_parent(struct clk *clk, struct clk *new_parent);
+
u32 omap2_get_dpll_rate(struct clk *clk);
void omap2_init_dpll_parent(struct clk *clk);
+
int omap2_wait_clock_ready(void __iomem *reg, u32 cval, const char *name);
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index d932b142d0b6..37d65d62ed8f 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -155,12 +155,12 @@ static struct clk apll54_ck = {
/* func_54m_ck */
static const struct clksel_rate func_54m_apll54_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel_rate func_54m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 },
};
@@ -177,7 +177,7 @@ static struct clk func_54m_ck = {
.clkdm_name = "wkup_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_54M_SOURCE,
+ .clksel_mask = OMAP24XX_54M_SOURCE_MASK,
.clksel = func_54m_clksel,
.recalc = &omap2_clksel_recalc,
};
@@ -201,12 +201,12 @@ static struct clk func_96m_ck = {
/* func_48m_ck */
static const struct clksel_rate func_48m_apll96_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 2, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel_rate func_48m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 },
};
@@ -223,7 +223,7 @@ static struct clk func_48m_ck = {
.clkdm_name = "wkup_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_48M_SOURCE,
+ .clksel_mask = OMAP24XX_48M_SOURCE_MASK,
.clksel = func_48m_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
@@ -256,22 +256,22 @@ static struct clk wdt1_osc_ck = {
* flags fields, which mark them as 2420-only.
*/
static const struct clksel_rate common_clkout_src_core_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_96m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_54m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 3, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -300,7 +300,7 @@ static struct clk sys_clkout_src = {
};
static const struct clksel_rate common_clkout_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 2, .val = 1, .flags = RATE_IN_24XX },
{ .div = 4, .val = 2, .flags = RATE_IN_24XX },
{ .div = 8, .val = 3, .flags = RATE_IN_24XX },
@@ -384,7 +384,7 @@ static struct clk emul_ck = {
*
*/
static const struct clksel_rate mpu_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_242X },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
@@ -420,7 +420,7 @@ static struct clk mpu_ck = { /* Control cpu */
* routed into a synchronizer and out of clocks abc.
*/
static const struct clksel_rate dsp_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
@@ -450,7 +450,7 @@ static struct clk dsp_fck = {
/* DSP interface clock */
static const struct clksel_rate dsp_irate_ick_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 },
};
@@ -532,7 +532,7 @@ static struct clk iva1_mpu_int_ifck = {
static const struct clksel_rate core_l3_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_242X },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_24XX },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 12, .val = 12, .flags = RATE_IN_242X },
@@ -559,7 +559,7 @@ static struct clk core_l3_ck = { /* Used for ick and fck, interconnect */
/* usb_l4_ick */
static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -591,7 +591,7 @@ static struct clk usb_l4_ick = { /* FS-USB interface clock */
* this domain.
*/
static const struct clksel_rate l4_core_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -622,7 +622,7 @@ static struct clk l4_ck = { /* used both as an ick and fck */
*/
static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
@@ -730,7 +730,7 @@ static struct clk gfx_ick = {
/* XXX Add RATE_NOT_VALIDATED */
static const struct clksel_rate dss1_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -744,7 +744,7 @@ static const struct clksel_rate dss1_fck_core_rates[] = {
{ .div = 8, .val = 8, .flags = RATE_IN_24XX },
{ .div = 9, .val = 9, .flags = RATE_IN_24XX },
{ .div = 12, .val = 12, .flags = RATE_IN_24XX },
- { .div = 16, .val = 16, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 16, .val = 16, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -779,12 +779,12 @@ static struct clk dss1_fck = {
};
static const struct clksel_rate dss2_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate dss2_fck_48m_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -825,7 +825,7 @@ static struct clk dss_54m_fck = { /* Alt clk used in power management */
* functional clock parents.
*/
static const struct clksel_rate gpt_alt_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -1588,7 +1588,7 @@ static struct clk vlynq_ick = {
};
static const struct clksel_rate vlynq_fck_96m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_242X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_242X },
{ .div = 0 }
};
@@ -1601,7 +1601,7 @@ static const struct clksel_rate vlynq_fck_core_rates[] = {
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 9, .val = 9, .flags = RATE_IN_242X },
{ .div = 12, .val = 12, .flags = RATE_IN_242X },
- { .div = 16, .val = 16, .flags = RATE_IN_242X | DEFAULT_RATE },
+ { .div = 16, .val = 16, .flags = RATE_IN_242X },
{ .div = 18, .val = 18, .flags = RATE_IN_242X },
{ .div = 0 }
};
@@ -1836,7 +1836,7 @@ static struct omap_clk omap2420_clks[] = {
CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
CLK(NULL, "des_ick", &des_ick, CK_242X),
- CLK(NULL, "sha_ick", &sha_ick, CK_242X),
+ CLK("omap-sham", "ick", &sha_ick, CK_242X),
CLK("omap_rng", "ick", &rng_ick, CK_242X),
CLK(NULL, "aes_ick", &aes_ick, CK_242X),
CLK(NULL, "pka_ick", &pka_ick, CK_242X),
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index 0438b6e4f51a..b33118fb6a87 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -155,12 +155,12 @@ static struct clk apll54_ck = {
/* func_54m_ck */
static const struct clksel_rate func_54m_apll54_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel_rate func_54m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 },
};
@@ -177,7 +177,7 @@ static struct clk func_54m_ck = {
.clkdm_name = "wkup_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_54M_SOURCE,
+ .clksel_mask = OMAP24XX_54M_SOURCE_MASK,
.clksel = func_54m_clksel,
.recalc = &omap2_clksel_recalc,
};
@@ -192,12 +192,12 @@ static struct clk core_ck = {
/* func_96m_ck */
static const struct clksel_rate func_96m_apll96_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel_rate func_96m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_243X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_243X },
{ .div = 0 },
};
@@ -214,7 +214,7 @@ static struct clk func_96m_ck = {
.clkdm_name = "wkup_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP2430_96M_SOURCE,
+ .clksel_mask = OMAP2430_96M_SOURCE_MASK,
.clksel = func_96m_clksel,
.recalc = &omap2_clksel_recalc,
};
@@ -222,12 +222,12 @@ static struct clk func_96m_ck = {
/* func_48m_ck */
static const struct clksel_rate func_48m_apll96_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 2, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel_rate func_48m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 },
};
@@ -244,7 +244,7 @@ static struct clk func_48m_ck = {
.clkdm_name = "wkup_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP24XX_48M_SOURCE,
+ .clksel_mask = OMAP24XX_48M_SOURCE_MASK,
.clksel = func_48m_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
@@ -277,22 +277,22 @@ static struct clk wdt1_osc_ck = {
* flags fields, which mark them as 2420-only.
*/
static const struct clksel_rate common_clkout_src_core_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_96m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_54m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 3, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -321,7 +321,7 @@ static struct clk sys_clkout_src = {
};
static const struct clksel_rate common_clkout_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 2, .val = 1, .flags = RATE_IN_24XX },
{ .div = 4, .val = 2, .flags = RATE_IN_24XX },
{ .div = 8, .val = 3, .flags = RATE_IN_24XX },
@@ -369,7 +369,7 @@ static struct clk emul_ck = {
*
*/
static const struct clksel_rate mpu_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 },
};
@@ -402,7 +402,7 @@ static struct clk mpu_ck = { /* Control cpu */
* routed into a synchronizer and out of clocks abc.
*/
static const struct clksel_rate dsp_fck_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
@@ -429,7 +429,7 @@ static struct clk dsp_fck = {
/* DSP interface clock */
static const struct clksel_rate dsp_irate_ick_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_243X },
{ .div = 0 },
@@ -481,7 +481,7 @@ static struct clk iva2_1_ick = {
*/
static const struct clksel_rate core_l3_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 4, .val = 4, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -505,7 +505,7 @@ static struct clk core_l3_ck = { /* Used for ick and fck, interconnect */
/* usb_l4_ick */
static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -537,7 +537,7 @@ static struct clk usb_l4_ick = { /* FS-USB interface clock */
* this domain.
*/
static const struct clksel_rate l4_core_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -568,7 +568,7 @@ static struct clk l4_ck = { /* used both as an ick and fck */
*/
static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 5, .val = 5, .flags = RATE_IN_243X },
@@ -673,7 +673,7 @@ static struct clk gfx_ick = {
*/
static const struct clksel_rate mdm_ick_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_243X },
- { .div = 4, .val = 4, .flags = RATE_IN_243X | DEFAULT_RATE },
+ { .div = 4, .val = 4, .flags = RATE_IN_243X },
{ .div = 6, .val = 6, .flags = RATE_IN_243X },
{ .div = 9, .val = 9, .flags = RATE_IN_243X },
{ .div = 0 }
@@ -718,7 +718,7 @@ static struct clk mdm_osc_ck = {
/* XXX Add RATE_NOT_VALIDATED */
static const struct clksel_rate dss1_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -732,7 +732,7 @@ static const struct clksel_rate dss1_fck_core_rates[] = {
{ .div = 8, .val = 8, .flags = RATE_IN_24XX },
{ .div = 9, .val = 9, .flags = RATE_IN_24XX },
{ .div = 12, .val = 12, .flags = RATE_IN_24XX },
- { .div = 16, .val = 16, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 16, .val = 16, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -767,12 +767,12 @@ static struct clk dss1_fck = {
};
static const struct clksel_rate dss2_fck_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate dss2_fck_48m_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -813,7 +813,7 @@ static struct clk dss_54m_fck = { /* Alt clk used in power management */
* functional clock parents.
*/
static const struct clksel_rate gpt_alt_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_24XX | DEFAULT_RATE },
+ { .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
@@ -1924,7 +1924,7 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "sdma_ick", &sdma_ick, CK_243X),
CLK(NULL, "sdrc_ick", &sdrc_ick, CK_243X),
CLK(NULL, "des_ick", &des_ick, CK_243X),
- CLK(NULL, "sha_ick", &sha_ick, CK_243X),
+ CLK("omap-sham", "ick", &sha_ick, CK_243X),
CLK("omap_rng", "ick", &rng_ick, CK_243X),
CLK(NULL, "aes_ick", &aes_ick, CK_243X),
CLK(NULL, "pka_ick", &pka_ick, CK_243X),
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 9cba5560519b..41b155acfca7 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -110,32 +110,32 @@ static struct clk virt_38_4m_ck = {
};
static const struct clksel_rate osc_sys_12m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate osc_sys_13m_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate osc_sys_16_8m_rates[] = {
- { .div = 1, .val = 5, .flags = RATE_IN_3430ES2 | DEFAULT_RATE },
+ { .div = 1, .val = 5, .flags = RATE_IN_3430ES2PLUS },
{ .div = 0 }
};
static const struct clksel_rate osc_sys_19_2m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate osc_sys_26m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate osc_sys_38_4m_rates[] = {
- { .div = 1, .val = 4, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 4, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -163,8 +163,8 @@ static struct clk osc_sys_ck = {
};
static const struct clksel_rate div2_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -213,42 +213,42 @@ static struct clk sys_clkout1 = {
/* CM CLOCKS */
static const struct clksel_rate div16_dpll_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 5, .val = 5, .flags = RATE_IN_343X },
- { .div = 6, .val = 6, .flags = RATE_IN_343X },
- { .div = 7, .val = 7, .flags = RATE_IN_343X },
- { .div = 8, .val = 8, .flags = RATE_IN_343X },
- { .div = 9, .val = 9, .flags = RATE_IN_343X },
- { .div = 10, .val = 10, .flags = RATE_IN_343X },
- { .div = 11, .val = 11, .flags = RATE_IN_343X },
- { .div = 12, .val = 12, .flags = RATE_IN_343X },
- { .div = 13, .val = 13, .flags = RATE_IN_343X },
- { .div = 14, .val = 14, .flags = RATE_IN_343X },
- { .div = 15, .val = 15, .flags = RATE_IN_343X },
- { .div = 16, .val = 16, .flags = RATE_IN_343X },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+ { .div = 5, .val = 5, .flags = RATE_IN_3XXX },
+ { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
+ { .div = 7, .val = 7, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
+ { .div = 9, .val = 9, .flags = RATE_IN_3XXX },
+ { .div = 10, .val = 10, .flags = RATE_IN_3XXX },
+ { .div = 11, .val = 11, .flags = RATE_IN_3XXX },
+ { .div = 12, .val = 12, .flags = RATE_IN_3XXX },
+ { .div = 13, .val = 13, .flags = RATE_IN_3XXX },
+ { .div = 14, .val = 14, .flags = RATE_IN_3XXX },
+ { .div = 15, .val = 15, .flags = RATE_IN_3XXX },
+ { .div = 16, .val = 16, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
-static const struct clksel_rate div32_dpll4_rates_3630[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_36XX | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_36XX },
- { .div = 3, .val = 3, .flags = RATE_IN_36XX },
- { .div = 4, .val = 4, .flags = RATE_IN_36XX },
- { .div = 5, .val = 5, .flags = RATE_IN_36XX },
- { .div = 6, .val = 6, .flags = RATE_IN_36XX },
- { .div = 7, .val = 7, .flags = RATE_IN_36XX },
- { .div = 8, .val = 8, .flags = RATE_IN_36XX },
- { .div = 9, .val = 9, .flags = RATE_IN_36XX },
- { .div = 10, .val = 10, .flags = RATE_IN_36XX },
- { .div = 11, .val = 11, .flags = RATE_IN_36XX },
- { .div = 12, .val = 12, .flags = RATE_IN_36XX },
- { .div = 13, .val = 13, .flags = RATE_IN_36XX },
- { .div = 14, .val = 14, .flags = RATE_IN_36XX },
- { .div = 15, .val = 15, .flags = RATE_IN_36XX },
- { .div = 16, .val = 16, .flags = RATE_IN_36XX },
+static const struct clksel_rate dpll4_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+ { .div = 5, .val = 5, .flags = RATE_IN_3XXX },
+ { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
+ { .div = 7, .val = 7, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
+ { .div = 9, .val = 9, .flags = RATE_IN_3XXX },
+ { .div = 10, .val = 10, .flags = RATE_IN_3XXX },
+ { .div = 11, .val = 11, .flags = RATE_IN_3XXX },
+ { .div = 12, .val = 12, .flags = RATE_IN_3XXX },
+ { .div = 13, .val = 13, .flags = RATE_IN_3XXX },
+ { .div = 14, .val = 14, .flags = RATE_IN_3XXX },
+ { .div = 15, .val = 15, .flags = RATE_IN_3XXX },
+ { .div = 16, .val = 16, .flags = RATE_IN_3XXX },
{ .div = 17, .val = 17, .flags = RATE_IN_36XX },
{ .div = 18, .val = 18, .flags = RATE_IN_36XX },
{ .div = 19, .val = 19, .flags = RATE_IN_36XX },
@@ -450,37 +450,37 @@ static struct clk dpll3_x2_ck = {
};
static const struct clksel_rate div31_dpll3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_3430ES2 },
- { .div = 4, .val = 4, .flags = RATE_IN_3430ES2 },
- { .div = 5, .val = 5, .flags = RATE_IN_3430ES2 },
- { .div = 6, .val = 6, .flags = RATE_IN_3430ES2 },
- { .div = 7, .val = 7, .flags = RATE_IN_3430ES2 },
- { .div = 8, .val = 8, .flags = RATE_IN_3430ES2 },
- { .div = 9, .val = 9, .flags = RATE_IN_3430ES2 },
- { .div = 10, .val = 10, .flags = RATE_IN_3430ES2 },
- { .div = 11, .val = 11, .flags = RATE_IN_3430ES2 },
- { .div = 12, .val = 12, .flags = RATE_IN_3430ES2 },
- { .div = 13, .val = 13, .flags = RATE_IN_3430ES2 },
- { .div = 14, .val = 14, .flags = RATE_IN_3430ES2 },
- { .div = 15, .val = 15, .flags = RATE_IN_3430ES2 },
- { .div = 16, .val = 16, .flags = RATE_IN_3430ES2 },
- { .div = 17, .val = 17, .flags = RATE_IN_3430ES2 },
- { .div = 18, .val = 18, .flags = RATE_IN_3430ES2 },
- { .div = 19, .val = 19, .flags = RATE_IN_3430ES2 },
- { .div = 20, .val = 20, .flags = RATE_IN_3430ES2 },
- { .div = 21, .val = 21, .flags = RATE_IN_3430ES2 },
- { .div = 22, .val = 22, .flags = RATE_IN_3430ES2 },
- { .div = 23, .val = 23, .flags = RATE_IN_3430ES2 },
- { .div = 24, .val = 24, .flags = RATE_IN_3430ES2 },
- { .div = 25, .val = 25, .flags = RATE_IN_3430ES2 },
- { .div = 26, .val = 26, .flags = RATE_IN_3430ES2 },
- { .div = 27, .val = 27, .flags = RATE_IN_3430ES2 },
- { .div = 28, .val = 28, .flags = RATE_IN_3430ES2 },
- { .div = 29, .val = 29, .flags = RATE_IN_3430ES2 },
- { .div = 30, .val = 30, .flags = RATE_IN_3430ES2 },
- { .div = 31, .val = 31, .flags = RATE_IN_3430ES2 },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 4, .val = 4, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 5, .val = 5, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 6, .val = 6, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 7, .val = 7, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 8, .val = 8, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 9, .val = 9, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 10, .val = 10, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 11, .val = 11, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 12, .val = 12, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 13, .val = 13, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 14, .val = 14, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 15, .val = 15, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 16, .val = 16, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 17, .val = 17, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 18, .val = 18, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 19, .val = 19, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 20, .val = 20, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 21, .val = 21, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 22, .val = 22, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 23, .val = 23, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 24, .val = 24, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 25, .val = 25, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 26, .val = 26, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 27, .val = 27, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 28, .val = 28, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 29, .val = 29, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 30, .val = 30, .flags = RATE_IN_3430ES2PLUS },
+ { .div = 31, .val = 31, .flags = RATE_IN_3430ES2PLUS },
{ .div = 0 },
};
@@ -562,6 +562,7 @@ static struct clk emu_core_alwon_ck = {
/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
/* Type: DPLL */
static struct dpll_data dpll4_dd;
+
static struct dpll_data dpll4_dd_34xx __initdata = {
.mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
.mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK,
@@ -632,39 +633,20 @@ static struct clk dpll4_x2_ck = {
.recalc = &omap3_clkoutx2_recalc,
};
-static const struct clksel div16_dpll4_clksel[] = {
- { .parent = &dpll4_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-static const struct clksel div32_dpll4_clksel[] = {
- { .parent = &dpll4_ck, .rates = div32_dpll4_rates_3630 },
+static const struct clksel dpll4_clksel[] = {
+ { .parent = &dpll4_ck, .rates = dpll4_rates },
{ .parent = NULL }
};
/* This virtual clock is the source for dpll4_m2x2_ck */
-static struct clk dpll4_m2_ck;
-
-static struct clk dpll4_m2_ck_34xx __initdata = {
- .name = "dpll4_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
- .clksel_mask = OMAP3430_DIV_96M_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m2_ck_3630 __initdata = {
+static struct clk dpll4_m2_ck = {
.name = "dpll4_m2_ck",
.ops = &clkops_null,
.parent = &dpll4_ck,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
.clksel_mask = OMAP3630_DIV_96M_MASK,
- .clksel = div32_dpll4_clksel,
+ .clksel = dpll4_clksel,
.clkdm_name = "dpll4_clkdm",
.recalc = &omap2_clksel_recalc,
};
@@ -698,7 +680,7 @@ static struct clk omap_192m_alwon_fck = {
static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_36XX },
- { .div = 2, .val = 2, .flags = RATE_IN_36XX | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_36XX },
{ .div = 0 }
};
@@ -708,12 +690,12 @@ static const struct clksel omap_96m_alwon_fck_clksel[] = {
};
static const struct clksel_rate omap_96m_dpll_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate omap_96m_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -760,28 +742,14 @@ static struct clk omap_96m_fck = {
};
/* This virtual clock is the source for dpll4_m3x2_ck */
-static struct clk dpll4_m3_ck;
-
-static struct clk dpll4_m3_ck_34xx __initdata = {
+static struct clk dpll4_m3_ck = {
.name = "dpll4_m3_ck",
.ops = &clkops_null,
.parent = &dpll4_ck,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
.clksel_mask = OMAP3430_CLKSEL_TV_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m3_ck_3630 __initdata = {
- .name = "dpll4_m3_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3630_CLKSEL_TV_MASK,
- .clksel = div32_dpll4_clksel,
+ .clksel = dpll4_clksel,
.clkdm_name = "dpll4_clkdm",
.recalc = &omap2_clksel_recalc,
};
@@ -799,12 +767,12 @@ static struct clk dpll4_m3x2_ck = {
};
static const struct clksel_rate omap_54m_d4m3x2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate omap_54m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -825,12 +793,12 @@ static struct clk omap_54m_fck = {
};
static const struct clksel_rate omap_48m_cm96m_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 0, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate omap_48m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -858,31 +826,15 @@ static struct clk omap_12m_fck = {
.recalc = &omap_fixed_divisor_recalc,
};
-/* This virstual clock is the source for dpll4_m4x2_ck */
-static struct clk dpll4_m4_ck;
-
-static struct clk dpll4_m4_ck_34xx __initdata = {
+/* This virtual clock is the source for dpll4_m4x2_ck */
+static struct clk dpll4_m4_ck = {
.name = "dpll4_m4_ck",
.ops = &clkops_null,
.parent = &dpll4_ck,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
.clksel_mask = OMAP3430_CLKSEL_DSS1_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
- .set_rate = &omap2_clksel_set_rate,
- .round_rate = &omap2_clksel_round_rate,
-};
-
-static struct clk dpll4_m4_ck_3630 __initdata = {
- .name = "dpll4_m4_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3630_CLKSEL_DSS1_MASK,
- .clksel = div32_dpll4_clksel,
+ .clksel = dpll4_clksel,
.clkdm_name = "dpll4_clkdm",
.recalc = &omap2_clksel_recalc,
.set_rate = &omap2_clksel_set_rate,
@@ -902,30 +854,14 @@ static struct clk dpll4_m4x2_ck = {
};
/* This virtual clock is the source for dpll4_m5x2_ck */
-static struct clk dpll4_m5_ck;
-
-static struct clk dpll4_m5_ck_34xx __initdata = {
+static struct clk dpll4_m5_ck = {
.name = "dpll4_m5_ck",
.ops = &clkops_null,
.parent = &dpll4_ck,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
.clksel_mask = OMAP3430_CLKSEL_CAM_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .set_rate = &omap2_clksel_set_rate,
- .round_rate = &omap2_clksel_round_rate,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m5_ck_3630 __initdata = {
- .name = "dpll4_m5_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3630_CLKSEL_CAM_MASK,
- .clksel = div32_dpll4_clksel,
+ .clksel = dpll4_clksel,
.clkdm_name = "dpll4_clkdm",
.set_rate = &omap2_clksel_set_rate,
.round_rate = &omap2_clksel_round_rate,
@@ -945,28 +881,14 @@ static struct clk dpll4_m5x2_ck = {
};
/* This virtual clock is the source for dpll4_m6x2_ck */
-static struct clk dpll4_m6_ck;
-
-static struct clk dpll4_m6_ck_34xx __initdata = {
+static struct clk dpll4_m6_ck = {
.name = "dpll4_m6_ck",
.ops = &clkops_null,
.parent = &dpll4_ck,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
.clksel_mask = OMAP3430_DIV_DPLL4_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk dpll4_m6_ck_3630 __initdata = {
- .name = "dpll4_m6_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3630_DIV_DPLL4_MASK,
- .clksel = div32_dpll4_clksel,
+ .clksel = dpll4_clksel,
.clkdm_name = "dpll4_clkdm",
.recalc = &omap2_clksel_recalc,
};
@@ -1049,22 +971,22 @@ static struct clk dpll5_m2_ck = {
/* CM EXTERNAL CLOCK OUTPUTS */
static const struct clksel_rate clkout2_src_core_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate clkout2_src_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate clkout2_src_96m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate clkout2_src_54m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -1090,11 +1012,11 @@ static struct clk clkout2_src_ck = {
};
static const struct clksel_rate sys_clkout2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 1, .flags = RATE_IN_343X },
- { .div = 4, .val = 2, .flags = RATE_IN_343X },
- { .div = 8, .val = 3, .flags = RATE_IN_343X },
- { .div = 16, .val = 4, .flags = RATE_IN_343X },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 16, .val = 4, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -1111,6 +1033,8 @@ static struct clk sys_clkout2 = {
.clksel_mask = OMAP3430_CLKOUT2_DIV_MASK,
.clksel = sys_clkout2_clksel,
.recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate
};
/* CM OUTPUT CLOCKS */
@@ -1125,9 +1049,9 @@ static struct clk corex2_fck = {
/* DPLL power domain clock controls */
static const struct clksel_rate div4_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -1161,8 +1085,8 @@ static struct clk mpu_ck = {
/* arm_fck is divided by two when DPLL1 locked; otherwise, passthrough mpu_ck */
static const struct clksel_rate arm_fck_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 1, .flags = RATE_IN_343X },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -1333,25 +1257,25 @@ static struct clk gfx_cg2_ck = {
static const struct clksel_rate sgx_core_rates[] = {
{ .div = 2, .val = 5, .flags = RATE_IN_36XX },
- { .div = 3, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 4, .val = 1, .flags = RATE_IN_343X },
- { .div = 6, .val = 2, .flags = RATE_IN_343X },
+ { .div = 3, .val = 0, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 6, .val = 2, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
static const struct clksel_rate sgx_192m_rates[] = {
- { .div = 1, .val = 4, .flags = RATE_IN_36XX | DEFAULT_RATE },
+ { .div = 1, .val = 4, .flags = RATE_IN_36XX },
{ .div = 0 },
};
static const struct clksel_rate sgx_corex2_rates[] = {
- { .div = 3, .val = 6, .flags = RATE_IN_36XX | DEFAULT_RATE },
+ { .div = 3, .val = 6, .flags = RATE_IN_36XX },
{ .div = 5, .val = 7, .flags = RATE_IN_36XX },
{ .div = 0 },
};
static const struct clksel_rate sgx_96m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -1576,12 +1500,12 @@ static struct clk i2c1_fck = {
* MCBSP 2, 3, 4 get their 96MHz clock from per_96m_fck.
*/
static const struct clksel_rate common_mcbsp_96m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -1714,12 +1638,12 @@ static struct clk hdq_fck = {
/* DPLL3-derived clock */
static const struct clksel_rate ssi_ssr_corex2_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 6, .val = 6, .flags = RATE_IN_343X },
- { .div = 8, .val = 8, .flags = RATE_IN_343X },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+ { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
{ .div = 0 }
};
@@ -2353,18 +2277,18 @@ static struct clk usbhost_ick = {
/* WKUP */
static const struct clksel_rate usim_96m_rates[] = {
- { .div = 2, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 8, .val = 5, .flags = RATE_IN_343X },
- { .div = 10, .val = 6, .flags = RATE_IN_343X },
+ { .div = 2, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 5, .flags = RATE_IN_3XXX },
+ { .div = 10, .val = 6, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
static const struct clksel_rate usim_120m_rates[] = {
- { .div = 4, .val = 7, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 8, .val = 8, .flags = RATE_IN_343X },
- { .div = 16, .val = 9, .flags = RATE_IN_343X },
- { .div = 20, .val = 10, .flags = RATE_IN_343X },
+ { .div = 4, .val = 7, .flags = RATE_IN_3XXX },
+ { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
+ { .div = 16, .val = 9, .flags = RATE_IN_3XXX },
+ { .div = 20, .val = 10, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -2951,22 +2875,22 @@ static struct clk mcbsp4_fck = {
/* More information: ARM Cortex-A8 Technical Reference Manual, sect 10.1 */
static const struct clksel_rate emu_src_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
static const struct clksel_rate emu_src_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
static const struct clksel_rate emu_src_per_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
static const struct clksel_rate emu_src_mpu_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -2995,10 +2919,10 @@ static struct clk emu_src_ck = {
};
static const struct clksel_rate pclk_emu_rates[] = {
- { .div = 2, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 6, .val = 6, .flags = RATE_IN_343X },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
+ { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -3019,9 +2943,9 @@ static struct clk pclk_fck = {
};
static const struct clksel_rate pclkx2_emu_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -3069,9 +2993,9 @@ static struct clk traceclk_src_fck = {
};
static const struct clksel_rate traceclk_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
{ .div = 0 },
};
@@ -3360,7 +3284,7 @@ static struct omap_clk omap3xxx_clks[] = {
CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX),
CLK(NULL, "icr_ick", &icr_ick, CK_343X),
CLK(NULL, "aes2_ick", &aes2_ick, CK_343X),
- CLK(NULL, "sha12_ick", &sha12_ick, CK_343X),
+ CLK("omap-sham", "ick", &sha12_ick, CK_343X),
CLK(NULL, "des2_ick", &des2_ick, CK_343X),
CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX),
CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_3XXX),
@@ -3472,8 +3396,8 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "ipss_ick", &ipss_ick, CK_AM35XX),
CLK(NULL, "rmii_ck", &rmii_ck, CK_AM35XX),
CLK(NULL, "pclk_ck", &pclk_ck, CK_AM35XX),
- CLK("davinci_emac", "ick", &emac_ick, CK_AM35XX),
- CLK("davinci_emac", "fck", &emac_fck, CK_AM35XX),
+ CLK("davinci_emac", "emac_clk", &emac_ick, CK_AM35XX),
+ CLK("davinci_emac", "phy_clk", &emac_fck, CK_AM35XX),
CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX),
CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX),
CLK("musb_hdrc", "ick", &hsotgusb_ick_am35xx, CK_AM35XX),
@@ -3488,14 +3412,8 @@ int __init omap3xxx_clk_init(void)
struct omap_clk *c;
u32 cpu_clkflg = CK_3XXX;
- if (cpu_is_omap3517()) {
- cpu_mask = RATE_IN_343X | RATE_IN_3430ES2;
- cpu_clkflg |= CK_3517;
- } else if (cpu_is_omap3505()) {
- cpu_mask = RATE_IN_343X | RATE_IN_3430ES2;
- cpu_clkflg |= CK_3505;
- } else if (cpu_is_omap34xx()) {
- cpu_mask = RATE_IN_343X;
+ if (cpu_is_omap34xx()) {
+ cpu_mask = RATE_IN_3XXX;
cpu_clkflg |= CK_343X;
/*
@@ -3506,10 +3424,17 @@ int __init omap3xxx_clk_init(void)
/* No 3430ES1-only rates exist, so no RATE_IN_3430ES1 */
cpu_clkflg |= CK_3430ES1;
} else {
- cpu_mask |= RATE_IN_3430ES2;
+ cpu_mask |= RATE_IN_3430ES2PLUS;
cpu_clkflg |= CK_3430ES2;
}
+ } else if (cpu_is_omap3517()) {
+ cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+ cpu_clkflg |= CK_3517;
+ } else if (cpu_is_omap3505()) {
+ cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+ cpu_clkflg |= CK_3505;
}
+
if (omap3_has_192mhz_clk())
omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
@@ -3520,14 +3445,7 @@ int __init omap3xxx_clk_init(void)
/*
* XXX This type of dynamic rewriting of the clock tree is
* deprecated and should be revised soon.
- */
- dpll4_m2_ck = dpll4_m2_ck_3630;
- dpll4_m3_ck = dpll4_m3_ck_3630;
- dpll4_m4_ck = dpll4_m4_ck_3630;
- dpll4_m5_ck = dpll4_m5_ck_3630;
- dpll4_m6_ck = dpll4_m6_ck_3630;
-
- /*
+ *
* For 3630: override clkops_omap2_dflt_wait for the
* clocks affected from PWRDN reset Limitation
*/
@@ -3543,18 +3461,12 @@ int __init omap3xxx_clk_init(void)
&clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
dpll4_m6x2_ck.ops =
&clkops_omap36xx_pwrdn_with_hsdiv_wait_restore;
- } else {
- /*
- * XXX This type of dynamic rewriting of the clock tree is
- * deprecated and should be revised soon.
- */
- dpll4_m2_ck = dpll4_m2_ck_34xx;
- dpll4_m3_ck = dpll4_m3_ck_34xx;
- dpll4_m4_ck = dpll4_m4_ck_34xx;
- dpll4_m5_ck = dpll4_m5_ck_34xx;
- dpll4_m6_ck = dpll4_m6_ck_34xx;
}
+ /*
+ * XXX This type of dynamic rewriting of the clock tree is
+ * deprecated and should be revised soon.
+ */
if (cpu_is_omap3630())
dpll4_dd = dpll4_dd_3630;
else
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index a5c0c9c8e496..02804224517b 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -2675,6 +2675,11 @@ static struct omap_clk omap44xx_clks[] = {
CLK("omap2_mcspi.2", "ick", &dummy_ck, CK_443X),
CLK("omap2_mcspi.3", "ick", &dummy_ck, CK_443X),
CLK("omap2_mcspi.4", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.0", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.1", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.2", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.3", "ick", &dummy_ck, CK_443X),
+ CLK("mmci-omap-hs.4", "ick", &dummy_ck, CK_443X),
CLK(NULL, "uart1_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
index f69096b88cdb..1cf8131205fa 100644
--- a/arch/arm/mach-omap2/clock_common_data.c
+++ b/arch/arm/mach-omap2/clock_common_data.c
@@ -20,20 +20,20 @@
/* clksel_rate data common to 24xx/343x */
const struct clksel_rate gpt_32k_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_3XXX },
{ .div = 0 }
};
const struct clksel_rate gpt_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX },
{ .div = 0 }
};
const struct clksel_rate gfx_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
- { .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_343X },
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_3XXX },
+ { .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_3XXX },
+ { .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_3XXX },
{ .div = 0 }
};
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 6e568ec995ee..5d80cb897489 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -809,7 +809,7 @@ int omap2_clkdm_sleep(struct clockdomain *clkdm)
if (cpu_is_omap24xx()) {
- cm_set_mod_reg_bits(OMAP24XX_FORCESTATE,
+ cm_set_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
@@ -853,7 +853,7 @@ int omap2_clkdm_wakeup(struct clockdomain *clkdm)
if (cpu_is_omap24xx()) {
- cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE,
+ cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
diff --git a/arch/arm/mach-omap2/clockdomains44xx.h b/arch/arm/mach-omap2/clockdomains44xx.h
index 438aaee2e392..7e5ba0f67925 100644
--- a/arch/arm/mach-omap2/clockdomains44xx.h
+++ b/arch/arm/mach-omap2/clockdomains44xx.h
@@ -131,7 +131,7 @@ static struct clockdomain mpuss_44xx_clkdm = {
static struct clockdomain mpu0_44xx_clkdm = {
.name = "mpu0_clkdm",
.pwrdm = { .name = "cpu0_pwrdm" },
- .clkstctrl_reg = OMAP4430_CM_PDA_CPU0_CLKSTCTRL,
+ .clkstctrl_reg = OMAP4430_CM_CPU0_CLKSTCTRL,
.clktrctrl_mask = OMAP4430_CLKTRCTRL_MASK,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
@@ -140,7 +140,7 @@ static struct clockdomain mpu0_44xx_clkdm = {
static struct clockdomain mpu1_44xx_clkdm = {
.name = "mpu1_clkdm",
.pwrdm = { .name = "cpu1_pwrdm" },
- .clkstctrl_reg = OMAP4430_CM_PDA_CPU1_CLKSTCTRL,
+ .clkstctrl_reg = OMAP4430_CM_CPU1_CLKSTCTRL,
.clktrctrl_mask = OMAP4430_CLKTRCTRL_MASK,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
diff --git a/arch/arm/mach-omap2/cm-regbits-24xx.h b/arch/arm/mach-omap2/cm-regbits-24xx.h
index 297a2fe634ea..da51cc3ed7eb 100644
--- a/arch/arm/mach-omap2/cm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-24xx.h
@@ -20,43 +20,43 @@
/* CM_FCLKEN1_CORE and CM_ICLKEN1_CORE shared bits */
#define OMAP24XX_EN_CAM_SHIFT 31
-#define OMAP24XX_EN_CAM (1 << 31)
+#define OMAP24XX_EN_CAM_MASK (1 << 31)
#define OMAP24XX_EN_WDT4_SHIFT 29
-#define OMAP24XX_EN_WDT4 (1 << 29)
+#define OMAP24XX_EN_WDT4_MASK (1 << 29)
#define OMAP2420_EN_WDT3_SHIFT 28
-#define OMAP2420_EN_WDT3 (1 << 28)
+#define OMAP2420_EN_WDT3_MASK (1 << 28)
#define OMAP24XX_EN_MSPRO_SHIFT 27
-#define OMAP24XX_EN_MSPRO (1 << 27)
+#define OMAP24XX_EN_MSPRO_MASK (1 << 27)
#define OMAP24XX_EN_FAC_SHIFT 25
-#define OMAP24XX_EN_FAC (1 << 25)
+#define OMAP24XX_EN_FAC_MASK (1 << 25)
#define OMAP2420_EN_EAC_SHIFT 24
-#define OMAP2420_EN_EAC (1 << 24)
+#define OMAP2420_EN_EAC_MASK (1 << 24)
#define OMAP24XX_EN_HDQ_SHIFT 23
-#define OMAP24XX_EN_HDQ (1 << 23)
+#define OMAP24XX_EN_HDQ_MASK (1 << 23)
#define OMAP2420_EN_I2C2_SHIFT 20
-#define OMAP2420_EN_I2C2 (1 << 20)
+#define OMAP2420_EN_I2C2_MASK (1 << 20)
#define OMAP2420_EN_I2C1_SHIFT 19
-#define OMAP2420_EN_I2C1 (1 << 19)
+#define OMAP2420_EN_I2C1_MASK (1 << 19)
/* CM_FCLKEN2_CORE and CM_ICLKEN2_CORE shared bits */
#define OMAP2430_EN_MCBSP5_SHIFT 5
-#define OMAP2430_EN_MCBSP5 (1 << 5)
+#define OMAP2430_EN_MCBSP5_MASK (1 << 5)
#define OMAP2430_EN_MCBSP4_SHIFT 4
-#define OMAP2430_EN_MCBSP4 (1 << 4)
+#define OMAP2430_EN_MCBSP4_MASK (1 << 4)
#define OMAP2430_EN_MCBSP3_SHIFT 3
-#define OMAP2430_EN_MCBSP3 (1 << 3)
+#define OMAP2430_EN_MCBSP3_MASK (1 << 3)
#define OMAP24XX_EN_SSI_SHIFT 1
-#define OMAP24XX_EN_SSI (1 << 1)
+#define OMAP24XX_EN_SSI_MASK (1 << 1)
/* CM_FCLKEN_WKUP and CM_ICLKEN_WKUP shared bits */
#define OMAP24XX_EN_MPU_WDT_SHIFT 3
-#define OMAP24XX_EN_MPU_WDT (1 << 3)
+#define OMAP24XX_EN_MPU_WDT_MASK (1 << 3)
/* Bits specific to each register */
/* CM_IDLEST_MPU */
/* 2430 only */
-#define OMAP2430_ST_MPU (1 << 0)
+#define OMAP2430_ST_MPU_MASK (1 << 0)
/* CM_CLKSEL_MPU */
#define OMAP24XX_CLKSEL_MPU_SHIFT 0
@@ -68,46 +68,46 @@
/* CM_FCLKEN1_CORE specific bits*/
#define OMAP24XX_EN_TV_SHIFT 2
-#define OMAP24XX_EN_TV (1 << 2)
+#define OMAP24XX_EN_TV_MASK (1 << 2)
#define OMAP24XX_EN_DSS2_SHIFT 1
-#define OMAP24XX_EN_DSS2 (1 << 1)
+#define OMAP24XX_EN_DSS2_MASK (1 << 1)
#define OMAP24XX_EN_DSS1_SHIFT 0
-#define OMAP24XX_EN_DSS1 (1 << 0)
+#define OMAP24XX_EN_DSS1_MASK (1 << 0)
/* CM_FCLKEN2_CORE specific bits */
#define OMAP2430_EN_I2CHS2_SHIFT 20
-#define OMAP2430_EN_I2CHS2 (1 << 20)
+#define OMAP2430_EN_I2CHS2_MASK (1 << 20)
#define OMAP2430_EN_I2CHS1_SHIFT 19
-#define OMAP2430_EN_I2CHS1 (1 << 19)
+#define OMAP2430_EN_I2CHS1_MASK (1 << 19)
#define OMAP2430_EN_MMCHSDB2_SHIFT 17
-#define OMAP2430_EN_MMCHSDB2 (1 << 17)
+#define OMAP2430_EN_MMCHSDB2_MASK (1 << 17)
#define OMAP2430_EN_MMCHSDB1_SHIFT 16
-#define OMAP2430_EN_MMCHSDB1 (1 << 16)
+#define OMAP2430_EN_MMCHSDB1_MASK (1 << 16)
/* CM_ICLKEN1_CORE specific bits */
#define OMAP24XX_EN_MAILBOXES_SHIFT 30
-#define OMAP24XX_EN_MAILBOXES (1 << 30)
+#define OMAP24XX_EN_MAILBOXES_MASK (1 << 30)
#define OMAP24XX_EN_DSS_SHIFT 0
-#define OMAP24XX_EN_DSS (1 << 0)
+#define OMAP24XX_EN_DSS_MASK (1 << 0)
/* CM_ICLKEN2_CORE specific bits */
/* CM_ICLKEN3_CORE */
/* 2430 only */
#define OMAP2430_EN_SDRC_SHIFT 2
-#define OMAP2430_EN_SDRC (1 << 2)
+#define OMAP2430_EN_SDRC_MASK (1 << 2)
/* CM_ICLKEN4_CORE */
#define OMAP24XX_EN_PKA_SHIFT 4
-#define OMAP24XX_EN_PKA (1 << 4)
+#define OMAP24XX_EN_PKA_MASK (1 << 4)
#define OMAP24XX_EN_AES_SHIFT 3
-#define OMAP24XX_EN_AES (1 << 3)
+#define OMAP24XX_EN_AES_MASK (1 << 3)
#define OMAP24XX_EN_RNG_SHIFT 2
-#define OMAP24XX_EN_RNG (1 << 2)
+#define OMAP24XX_EN_RNG_MASK (1 << 2)
#define OMAP24XX_EN_SHA_SHIFT 1
-#define OMAP24XX_EN_SHA (1 << 1)
+#define OMAP24XX_EN_SHA_MASK (1 << 1)
#define OMAP24XX_EN_DES_SHIFT 0
-#define OMAP24XX_EN_DES (1 << 0)
+#define OMAP24XX_EN_DES_MASK (1 << 0)
/* CM_IDLEST1_CORE specific bits */
#define OMAP24XX_ST_MAILBOXES_SHIFT 30
@@ -138,9 +138,9 @@
/* CM_IDLEST2_CORE */
#define OMAP2430_ST_MCBSP5_SHIFT 5
#define OMAP2430_ST_MCBSP5_MASK (1 << 5)
-#define OMAP2430_ST_MCBSP4_SHIFT 4
+#define OMAP2430_ST_MCBSP4_SHIFT 4
#define OMAP2430_ST_MCBSP4_MASK (1 << 4)
-#define OMAP2430_ST_MCBSP3_SHIFT 3
+#define OMAP2430_ST_MCBSP3_SHIFT 3
#define OMAP2430_ST_MCBSP3_MASK (1 << 3)
#define OMAP24XX_ST_SSI_SHIFT 1
#define OMAP24XX_ST_SSI_MASK (1 << 1)
@@ -162,62 +162,62 @@
#define OMAP24XX_ST_DES_MASK (1 << 0)
/* CM_AUTOIDLE1_CORE */
-#define OMAP24XX_AUTO_CAM (1 << 31)
-#define OMAP24XX_AUTO_MAILBOXES (1 << 30)
-#define OMAP24XX_AUTO_WDT4 (1 << 29)
-#define OMAP2420_AUTO_WDT3 (1 << 28)
-#define OMAP24XX_AUTO_MSPRO (1 << 27)
-#define OMAP2420_AUTO_MMC (1 << 26)
-#define OMAP24XX_AUTO_FAC (1 << 25)
-#define OMAP2420_AUTO_EAC (1 << 24)
-#define OMAP24XX_AUTO_HDQ (1 << 23)
-#define OMAP24XX_AUTO_UART2 (1 << 22)
-#define OMAP24XX_AUTO_UART1 (1 << 21)
-#define OMAP24XX_AUTO_I2C2 (1 << 20)
-#define OMAP24XX_AUTO_I2C1 (1 << 19)
-#define OMAP24XX_AUTO_MCSPI2 (1 << 18)
-#define OMAP24XX_AUTO_MCSPI1 (1 << 17)
-#define OMAP24XX_AUTO_MCBSP2 (1 << 16)
-#define OMAP24XX_AUTO_MCBSP1 (1 << 15)
-#define OMAP24XX_AUTO_GPT12 (1 << 14)
-#define OMAP24XX_AUTO_GPT11 (1 << 13)
-#define OMAP24XX_AUTO_GPT10 (1 << 12)
-#define OMAP24XX_AUTO_GPT9 (1 << 11)
-#define OMAP24XX_AUTO_GPT8 (1 << 10)
-#define OMAP24XX_AUTO_GPT7 (1 << 9)
-#define OMAP24XX_AUTO_GPT6 (1 << 8)
-#define OMAP24XX_AUTO_GPT5 (1 << 7)
-#define OMAP24XX_AUTO_GPT4 (1 << 6)
-#define OMAP24XX_AUTO_GPT3 (1 << 5)
-#define OMAP24XX_AUTO_GPT2 (1 << 4)
-#define OMAP2420_AUTO_VLYNQ (1 << 3)
-#define OMAP24XX_AUTO_DSS (1 << 0)
+#define OMAP24XX_AUTO_CAM_MASK (1 << 31)
+#define OMAP24XX_AUTO_MAILBOXES_MASK (1 << 30)
+#define OMAP24XX_AUTO_WDT4_MASK (1 << 29)
+#define OMAP2420_AUTO_WDT3_MASK (1 << 28)
+#define OMAP24XX_AUTO_MSPRO_MASK (1 << 27)
+#define OMAP2420_AUTO_MMC_MASK (1 << 26)
+#define OMAP24XX_AUTO_FAC_MASK (1 << 25)
+#define OMAP2420_AUTO_EAC_MASK (1 << 24)
+#define OMAP24XX_AUTO_HDQ_MASK (1 << 23)
+#define OMAP24XX_AUTO_UART2_MASK (1 << 22)
+#define OMAP24XX_AUTO_UART1_MASK (1 << 21)
+#define OMAP24XX_AUTO_I2C2_MASK (1 << 20)
+#define OMAP24XX_AUTO_I2C1_MASK (1 << 19)
+#define OMAP24XX_AUTO_MCSPI2_MASK (1 << 18)
+#define OMAP24XX_AUTO_MCSPI1_MASK (1 << 17)
+#define OMAP24XX_AUTO_MCBSP2_MASK (1 << 16)
+#define OMAP24XX_AUTO_MCBSP1_MASK (1 << 15)
+#define OMAP24XX_AUTO_GPT12_MASK (1 << 14)
+#define OMAP24XX_AUTO_GPT11_MASK (1 << 13)
+#define OMAP24XX_AUTO_GPT10_MASK (1 << 12)
+#define OMAP24XX_AUTO_GPT9_MASK (1 << 11)
+#define OMAP24XX_AUTO_GPT8_MASK (1 << 10)
+#define OMAP24XX_AUTO_GPT7_MASK (1 << 9)
+#define OMAP24XX_AUTO_GPT6_MASK (1 << 8)
+#define OMAP24XX_AUTO_GPT5_MASK (1 << 7)
+#define OMAP24XX_AUTO_GPT4_MASK (1 << 6)
+#define OMAP24XX_AUTO_GPT3_MASK (1 << 5)
+#define OMAP24XX_AUTO_GPT2_MASK (1 << 4)
+#define OMAP2420_AUTO_VLYNQ_MASK (1 << 3)
+#define OMAP24XX_AUTO_DSS_MASK (1 << 0)
/* CM_AUTOIDLE2_CORE */
-#define OMAP2430_AUTO_MDM_INTC (1 << 11)
-#define OMAP2430_AUTO_GPIO5 (1 << 10)
-#define OMAP2430_AUTO_MCSPI3 (1 << 9)
-#define OMAP2430_AUTO_MMCHS2 (1 << 8)
-#define OMAP2430_AUTO_MMCHS1 (1 << 7)
-#define OMAP2430_AUTO_USBHS (1 << 6)
-#define OMAP2430_AUTO_MCBSP5 (1 << 5)
-#define OMAP2430_AUTO_MCBSP4 (1 << 4)
-#define OMAP2430_AUTO_MCBSP3 (1 << 3)
-#define OMAP24XX_AUTO_UART3 (1 << 2)
-#define OMAP24XX_AUTO_SSI (1 << 1)
-#define OMAP24XX_AUTO_USB (1 << 0)
+#define OMAP2430_AUTO_MDM_INTC_MASK (1 << 11)
+#define OMAP2430_AUTO_GPIO5_MASK (1 << 10)
+#define OMAP2430_AUTO_MCSPI3_MASK (1 << 9)
+#define OMAP2430_AUTO_MMCHS2_MASK (1 << 8)
+#define OMAP2430_AUTO_MMCHS1_MASK (1 << 7)
+#define OMAP2430_AUTO_USBHS_MASK (1 << 6)
+#define OMAP2430_AUTO_MCBSP5_MASK (1 << 5)
+#define OMAP2430_AUTO_MCBSP4_MASK (1 << 4)
+#define OMAP2430_AUTO_MCBSP3_MASK (1 << 3)
+#define OMAP24XX_AUTO_UART3_MASK (1 << 2)
+#define OMAP24XX_AUTO_SSI_MASK (1 << 1)
+#define OMAP24XX_AUTO_USB_MASK (1 << 0)
/* CM_AUTOIDLE3_CORE */
-#define OMAP24XX_AUTO_SDRC (1 << 2)
-#define OMAP24XX_AUTO_GPMC (1 << 1)
-#define OMAP24XX_AUTO_SDMA (1 << 0)
+#define OMAP24XX_AUTO_SDRC_MASK (1 << 2)
+#define OMAP24XX_AUTO_GPMC_MASK (1 << 1)
+#define OMAP24XX_AUTO_SDMA_MASK (1 << 0)
/* CM_AUTOIDLE4_CORE */
-#define OMAP24XX_AUTO_PKA (1 << 4)
-#define OMAP24XX_AUTO_AES (1 << 3)
-#define OMAP24XX_AUTO_RNG (1 << 2)
-#define OMAP24XX_AUTO_SHA (1 << 1)
-#define OMAP24XX_AUTO_DES (1 << 0)
+#define OMAP24XX_AUTO_PKA_MASK (1 << 4)
+#define OMAP24XX_AUTO_AES_MASK (1 << 3)
+#define OMAP24XX_AUTO_RNG_MASK (1 << 2)
+#define OMAP24XX_AUTO_SHA_MASK (1 << 1)
+#define OMAP24XX_AUTO_DES_MASK (1 << 0)
/* CM_CLKSEL1_CORE */
#define OMAP24XX_CLKSEL_USB_SHIFT 25
@@ -269,9 +269,9 @@
/* CM_FCLKEN_GFX */
#define OMAP24XX_EN_3D_SHIFT 2
-#define OMAP24XX_EN_3D (1 << 2)
+#define OMAP24XX_EN_3D_MASK (1 << 2)
#define OMAP24XX_EN_2D_SHIFT 1
-#define OMAP24XX_EN_2D (1 << 1)
+#define OMAP24XX_EN_2D_MASK (1 << 1)
/* CM_ICLKEN_GFX specific bits */
@@ -287,13 +287,13 @@
/* CM_ICLKEN_WKUP specific bits */
#define OMAP2430_EN_ICR_SHIFT 6
-#define OMAP2430_EN_ICR (1 << 6)
+#define OMAP2430_EN_ICR_MASK (1 << 6)
#define OMAP24XX_EN_OMAPCTRL_SHIFT 5
-#define OMAP24XX_EN_OMAPCTRL (1 << 5)
+#define OMAP24XX_EN_OMAPCTRL_MASK (1 << 5)
#define OMAP24XX_EN_WDT1_SHIFT 4
-#define OMAP24XX_EN_WDT1 (1 << 4)
+#define OMAP24XX_EN_WDT1_MASK (1 << 4)
#define OMAP24XX_EN_32KSYNC_SHIFT 1
-#define OMAP24XX_EN_32KSYNC (1 << 1)
+#define OMAP24XX_EN_32KSYNC_MASK (1 << 1)
/* CM_IDLEST_WKUP specific bits */
#define OMAP2430_ST_ICR_SHIFT 6
@@ -308,12 +308,12 @@
#define OMAP24XX_ST_32KSYNC_MASK (1 << 1)
/* CM_AUTOIDLE_WKUP */
-#define OMAP24XX_AUTO_OMAPCTRL (1 << 5)
-#define OMAP24XX_AUTO_WDT1 (1 << 4)
-#define OMAP24XX_AUTO_MPU_WDT (1 << 3)
-#define OMAP24XX_AUTO_GPIOS (1 << 2)
-#define OMAP24XX_AUTO_32KSYNC (1 << 1)
-#define OMAP24XX_AUTO_GPT1 (1 << 0)
+#define OMAP24XX_AUTO_OMAPCTRL_MASK (1 << 5)
+#define OMAP24XX_AUTO_WDT1_MASK (1 << 4)
+#define OMAP24XX_AUTO_MPU_WDT_MASK (1 << 3)
+#define OMAP24XX_AUTO_GPIOS_MASK (1 << 2)
+#define OMAP24XX_AUTO_32KSYNC_MASK (1 << 1)
+#define OMAP24XX_AUTO_GPT1_MASK (1 << 0)
/* CM_CLKSEL_WKUP */
#define OMAP24XX_CLKSEL_GPT1_SHIFT 0
@@ -328,12 +328,12 @@
#define OMAP24XX_EN_DPLL_MASK (0x3 << 0)
/* CM_IDLEST_CKGEN */
-#define OMAP24XX_ST_54M_APLL (1 << 9)
-#define OMAP24XX_ST_96M_APLL (1 << 8)
-#define OMAP24XX_ST_54M_CLK (1 << 6)
-#define OMAP24XX_ST_12M_CLK (1 << 5)
-#define OMAP24XX_ST_48M_CLK (1 << 4)
-#define OMAP24XX_ST_96M_CLK (1 << 2)
+#define OMAP24XX_ST_54M_APLL_MASK (1 << 9)
+#define OMAP24XX_ST_96M_APLL_MASK (1 << 8)
+#define OMAP24XX_ST_54M_CLK_MASK (1 << 6)
+#define OMAP24XX_ST_12M_CLK_MASK (1 << 5)
+#define OMAP24XX_ST_48M_CLK_MASK (1 << 4)
+#define OMAP24XX_ST_96M_CLK_MASK (1 << 2)
#define OMAP24XX_ST_CORE_CLK_SHIFT 0
#define OMAP24XX_ST_CORE_CLK_MASK (0x3 << 0)
@@ -355,11 +355,11 @@
#define OMAP24XX_DPLL_DIV_SHIFT 8
#define OMAP24XX_DPLL_DIV_MASK (0xf << 8)
#define OMAP24XX_54M_SOURCE_SHIFT 5
-#define OMAP24XX_54M_SOURCE (1 << 5)
+#define OMAP24XX_54M_SOURCE_MASK (1 << 5)
#define OMAP2430_96M_SOURCE_SHIFT 4
-#define OMAP2430_96M_SOURCE (1 << 4)
+#define OMAP2430_96M_SOURCE_MASK (1 << 4)
#define OMAP24XX_48M_SOURCE_SHIFT 3
-#define OMAP24XX_48M_SOURCE (1 << 3)
+#define OMAP24XX_48M_SOURCE_MASK (1 << 3)
#define OMAP2430_ALTCLK_SOURCE_SHIFT 0
#define OMAP2430_ALTCLK_SOURCE_MASK (0x7 << 0)
@@ -369,29 +369,29 @@
/* CM_FCLKEN_DSP */
#define OMAP2420_EN_IVA_COP_SHIFT 10
-#define OMAP2420_EN_IVA_COP (1 << 10)
+#define OMAP2420_EN_IVA_COP_MASK (1 << 10)
#define OMAP2420_EN_IVA_MPU_SHIFT 8
-#define OMAP2420_EN_IVA_MPU (1 << 8)
+#define OMAP2420_EN_IVA_MPU_MASK (1 << 8)
#define OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT 0
-#define OMAP24XX_CM_FCLKEN_DSP_EN_DSP (1 << 0)
+#define OMAP24XX_CM_FCLKEN_DSP_EN_DSP_MASK (1 << 0)
/* CM_ICLKEN_DSP */
#define OMAP2420_EN_DSP_IPI_SHIFT 1
-#define OMAP2420_EN_DSP_IPI (1 << 1)
+#define OMAP2420_EN_DSP_IPI_MASK (1 << 1)
/* CM_IDLEST_DSP */
-#define OMAP2420_ST_IVA (1 << 8)
-#define OMAP2420_ST_IPI (1 << 1)
-#define OMAP24XX_ST_DSP (1 << 0)
+#define OMAP2420_ST_IVA_MASK (1 << 8)
+#define OMAP2420_ST_IPI_MASK (1 << 1)
+#define OMAP24XX_ST_DSP_MASK (1 << 0)
/* CM_AUTOIDLE_DSP */
-#define OMAP2420_AUTO_DSP_IPI (1 << 1)
+#define OMAP2420_AUTO_DSP_IPI_MASK (1 << 1)
/* CM_CLKSEL_DSP */
-#define OMAP2420_SYNC_IVA (1 << 13)
+#define OMAP2420_SYNC_IVA_MASK (1 << 13)
#define OMAP2420_CLKSEL_IVA_SHIFT 8
#define OMAP2420_CLKSEL_IVA_MASK (0x1f << 8)
-#define OMAP24XX_SYNC_DSP (1 << 7)
+#define OMAP24XX_SYNC_DSP_MASK (1 << 7)
#define OMAP24XX_CLKSEL_DSP_IF_SHIFT 5
#define OMAP24XX_CLKSEL_DSP_IF_MASK (0x3 << 5)
#define OMAP24XX_CLKSEL_DSP_SHIFT 0
@@ -406,24 +406,24 @@
/* CM_FCLKEN_MDM */
/* 2430 only */
#define OMAP2430_EN_OSC_SHIFT 1
-#define OMAP2430_EN_OSC (1 << 1)
+#define OMAP2430_EN_OSC_MASK (1 << 1)
/* CM_ICLKEN_MDM */
/* 2430 only */
#define OMAP2430_CM_ICLKEN_MDM_EN_MDM_SHIFT 0
-#define OMAP2430_CM_ICLKEN_MDM_EN_MDM (1 << 0)
+#define OMAP2430_CM_ICLKEN_MDM_EN_MDM_MASK (1 << 0)
/* CM_IDLEST_MDM specific bits */
/* 2430 only */
/* CM_AUTOIDLE_MDM */
/* 2430 only */
-#define OMAP2430_AUTO_OSC (1 << 1)
-#define OMAP2430_AUTO_MDM (1 << 0)
+#define OMAP2430_AUTO_OSC_MASK (1 << 1)
+#define OMAP2430_AUTO_MDM_MASK (1 << 0)
/* CM_CLKSEL_MDM */
/* 2430 only */
-#define OMAP2430_SYNC_MDM (1 << 4)
+#define OMAP2430_SYNC_MDM_MASK (1 << 4)
#define OMAP2430_CLKSEL_MDM_SHIFT 0
#define OMAP2430_CLKSEL_MDM_MASK (0xf << 0)
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index a3a3ca07e383..fe82b79d5f3b 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -21,15 +21,15 @@
/* CM_FCLKEN1_CORE and CM_ICLKEN1_CORE shared bits */
#define OMAP3430ES2_EN_MMC3_MASK (1 << 30)
#define OMAP3430ES2_EN_MMC3_SHIFT 30
-#define OMAP3430_EN_MSPRO (1 << 23)
+#define OMAP3430_EN_MSPRO_MASK (1 << 23)
#define OMAP3430_EN_MSPRO_SHIFT 23
-#define OMAP3430_EN_HDQ (1 << 22)
+#define OMAP3430_EN_HDQ_MASK (1 << 22)
#define OMAP3430_EN_HDQ_SHIFT 22
-#define OMAP3430ES1_EN_FSHOSTUSB (1 << 5)
+#define OMAP3430ES1_EN_FSHOSTUSB_MASK (1 << 5)
#define OMAP3430ES1_EN_FSHOSTUSB_SHIFT 5
-#define OMAP3430ES1_EN_D2D (1 << 3)
+#define OMAP3430ES1_EN_D2D_MASK (1 << 3)
#define OMAP3430ES1_EN_D2D_SHIFT 3
-#define OMAP3430_EN_SSI (1 << 0)
+#define OMAP3430_EN_SSI_MASK (1 << 0)
#define OMAP3430_EN_SSI_SHIFT 0
/* CM_FCLKEN3_CORE and CM_ICLKEN3_CORE shared bits */
@@ -37,19 +37,19 @@
#define OMAP3430ES2_EN_USBTLL_MASK (1 << 2)
/* CM_FCLKEN_WKUP and CM_ICLKEN_WKUP shared bits */
-#define OMAP3430_EN_WDT2 (1 << 5)
+#define OMAP3430_EN_WDT2_MASK (1 << 5)
#define OMAP3430_EN_WDT2_SHIFT 5
/* CM_ICLKEN_CAM, CM_FCLKEN_CAM shared bits */
-#define OMAP3430_EN_CAM (1 << 0)
+#define OMAP3430_EN_CAM_MASK (1 << 0)
#define OMAP3430_EN_CAM_SHIFT 0
/* CM_FCLKEN_PER, CM_ICLKEN_PER shared bits */
-#define OMAP3430_EN_WDT3 (1 << 12)
+#define OMAP3430_EN_WDT3_MASK (1 << 12)
#define OMAP3430_EN_WDT3_SHIFT 12
/* CM_CLKSEL2_EMU, CM_CLKSEL3_EMU shared bits */
-#define OMAP3430_OVERRIDE_ENABLE (1 << 19)
+#define OMAP3430_OVERRIDE_ENABLE_MASK (1 << 19)
/* Bits specific to each register */
@@ -69,7 +69,7 @@
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
/* CM_IDLEST_IVA2 */
-#define OMAP3430_ST_IVA2 (1 << 0)
+#define OMAP3430_ST_IVA2_MASK (1 << 0)
/* CM_IDLEST_PLL_IVA2 */
#define OMAP3430_ST_IVA2_CLK_SHIFT 0
@@ -114,7 +114,7 @@
#define OMAP3430_EN_MPU_DPLL_MASK (0x7 << 0)
/* CM_IDLEST_MPU */
-#define OMAP3430_ST_MPU (1 << 0)
+#define OMAP3430_ST_MPU_MASK (1 << 0)
/* CM_IDLEST_PLL_MPU */
#define OMAP3430_ST_MPU_CLK_SHIFT 0
@@ -145,50 +145,50 @@
#define OMAP3430_CLKACTIVITY_MPU_MASK (1 << 0)
/* CM_FCLKEN1_CORE specific bits */
-#define OMAP3430_EN_MODEM (1 << 31)
+#define OMAP3430_EN_MODEM_MASK (1 << 31)
#define OMAP3430_EN_MODEM_SHIFT 31
/* CM_ICLKEN1_CORE specific bits */
-#define OMAP3430_EN_ICR (1 << 29)
+#define OMAP3430_EN_ICR_MASK (1 << 29)
#define OMAP3430_EN_ICR_SHIFT 29
-#define OMAP3430_EN_AES2 (1 << 28)
+#define OMAP3430_EN_AES2_MASK (1 << 28)
#define OMAP3430_EN_AES2_SHIFT 28
-#define OMAP3430_EN_SHA12 (1 << 27)
+#define OMAP3430_EN_SHA12_MASK (1 << 27)
#define OMAP3430_EN_SHA12_SHIFT 27
-#define OMAP3430_EN_DES2 (1 << 26)
+#define OMAP3430_EN_DES2_MASK (1 << 26)
#define OMAP3430_EN_DES2_SHIFT 26
-#define OMAP3430ES1_EN_FAC (1 << 8)
+#define OMAP3430ES1_EN_FAC_MASK (1 << 8)
#define OMAP3430ES1_EN_FAC_SHIFT 8
-#define OMAP3430_EN_MAILBOXES (1 << 7)
+#define OMAP3430_EN_MAILBOXES_MASK (1 << 7)
#define OMAP3430_EN_MAILBOXES_SHIFT 7
-#define OMAP3430_EN_OMAPCTRL (1 << 6)
+#define OMAP3430_EN_OMAPCTRL_MASK (1 << 6)
#define OMAP3430_EN_OMAPCTRL_SHIFT 6
-#define OMAP3430_EN_SAD2D (1 << 3)
+#define OMAP3430_EN_SAD2D_MASK (1 << 3)
#define OMAP3430_EN_SAD2D_SHIFT 3
-#define OMAP3430_EN_SDRC (1 << 1)
+#define OMAP3430_EN_SDRC_MASK (1 << 1)
#define OMAP3430_EN_SDRC_SHIFT 1
/* AM35XX specific CM_ICLKEN1_CORE bits */
#define AM35XX_EN_IPSS_MASK (1 << 4)
#define AM35XX_EN_IPSS_SHIFT 4
-#define AM35XX_EN_UART4_MASK (1 << 23)
+#define AM35XX_EN_UART4_MASK (1 << 23)
#define AM35XX_EN_UART4_SHIFT 23
/* CM_ICLKEN2_CORE */
-#define OMAP3430_EN_PKA (1 << 4)
+#define OMAP3430_EN_PKA_MASK (1 << 4)
#define OMAP3430_EN_PKA_SHIFT 4
-#define OMAP3430_EN_AES1 (1 << 3)
+#define OMAP3430_EN_AES1_MASK (1 << 3)
#define OMAP3430_EN_AES1_SHIFT 3
-#define OMAP3430_EN_RNG (1 << 2)
+#define OMAP3430_EN_RNG_MASK (1 << 2)
#define OMAP3430_EN_RNG_SHIFT 2
-#define OMAP3430_EN_SHA11 (1 << 1)
+#define OMAP3430_EN_SHA11_MASK (1 << 1)
#define OMAP3430_EN_SHA11_SHIFT 1
-#define OMAP3430_EN_DES1 (1 << 0)
+#define OMAP3430_EN_DES1_MASK (1 << 0)
#define OMAP3430_EN_DES1_SHIFT 0
/* CM_ICLKEN3_CORE */
#define OMAP3430_EN_MAD2D_SHIFT 3
-#define OMAP3430_EN_MAD2D (1 << 3)
+#define OMAP3430_EN_MAD2D_MASK (1 << 3)
/* CM_FCLKEN3_CORE specific bits */
#define OMAP3430ES2_EN_TS_SHIFT 1
@@ -249,79 +249,79 @@
#define OMAP3430ES2_ST_CPEFUSE_MASK (1 << 0)
/* CM_AUTOIDLE1_CORE */
-#define OMAP3430_AUTO_MODEM (1 << 31)
+#define OMAP3430_AUTO_MODEM_MASK (1 << 31)
#define OMAP3430_AUTO_MODEM_SHIFT 31
-#define OMAP3430ES2_AUTO_MMC3 (1 << 30)
+#define OMAP3430ES2_AUTO_MMC3_MASK (1 << 30)
#define OMAP3430ES2_AUTO_MMC3_SHIFT 30
-#define OMAP3430ES2_AUTO_ICR (1 << 29)
+#define OMAP3430ES2_AUTO_ICR_MASK (1 << 29)
#define OMAP3430ES2_AUTO_ICR_SHIFT 29
-#define OMAP3430_AUTO_AES2 (1 << 28)
+#define OMAP3430_AUTO_AES2_MASK (1 << 28)
#define OMAP3430_AUTO_AES2_SHIFT 28
-#define OMAP3430_AUTO_SHA12 (1 << 27)
+#define OMAP3430_AUTO_SHA12_MASK (1 << 27)
#define OMAP3430_AUTO_SHA12_SHIFT 27
-#define OMAP3430_AUTO_DES2 (1 << 26)
+#define OMAP3430_AUTO_DES2_MASK (1 << 26)
#define OMAP3430_AUTO_DES2_SHIFT 26
-#define OMAP3430_AUTO_MMC2 (1 << 25)
+#define OMAP3430_AUTO_MMC2_MASK (1 << 25)
#define OMAP3430_AUTO_MMC2_SHIFT 25
-#define OMAP3430_AUTO_MMC1 (1 << 24)
+#define OMAP3430_AUTO_MMC1_MASK (1 << 24)
#define OMAP3430_AUTO_MMC1_SHIFT 24
-#define OMAP3430_AUTO_MSPRO (1 << 23)
+#define OMAP3430_AUTO_MSPRO_MASK (1 << 23)
#define OMAP3430_AUTO_MSPRO_SHIFT 23
-#define OMAP3430_AUTO_HDQ (1 << 22)
+#define OMAP3430_AUTO_HDQ_MASK (1 << 22)
#define OMAP3430_AUTO_HDQ_SHIFT 22
-#define OMAP3430_AUTO_MCSPI4 (1 << 21)
+#define OMAP3430_AUTO_MCSPI4_MASK (1 << 21)
#define OMAP3430_AUTO_MCSPI4_SHIFT 21
-#define OMAP3430_AUTO_MCSPI3 (1 << 20)
+#define OMAP3430_AUTO_MCSPI3_MASK (1 << 20)
#define OMAP3430_AUTO_MCSPI3_SHIFT 20
-#define OMAP3430_AUTO_MCSPI2 (1 << 19)
+#define OMAP3430_AUTO_MCSPI2_MASK (1 << 19)
#define OMAP3430_AUTO_MCSPI2_SHIFT 19
-#define OMAP3430_AUTO_MCSPI1 (1 << 18)
+#define OMAP3430_AUTO_MCSPI1_MASK (1 << 18)
#define OMAP3430_AUTO_MCSPI1_SHIFT 18
-#define OMAP3430_AUTO_I2C3 (1 << 17)
+#define OMAP3430_AUTO_I2C3_MASK (1 << 17)
#define OMAP3430_AUTO_I2C3_SHIFT 17
-#define OMAP3430_AUTO_I2C2 (1 << 16)
+#define OMAP3430_AUTO_I2C2_MASK (1 << 16)
#define OMAP3430_AUTO_I2C2_SHIFT 16
-#define OMAP3430_AUTO_I2C1 (1 << 15)
+#define OMAP3430_AUTO_I2C1_MASK (1 << 15)
#define OMAP3430_AUTO_I2C1_SHIFT 15
-#define OMAP3430_AUTO_UART2 (1 << 14)
+#define OMAP3430_AUTO_UART2_MASK (1 << 14)
#define OMAP3430_AUTO_UART2_SHIFT 14
-#define OMAP3430_AUTO_UART1 (1 << 13)
+#define OMAP3430_AUTO_UART1_MASK (1 << 13)
#define OMAP3430_AUTO_UART1_SHIFT 13
-#define OMAP3430_AUTO_GPT11 (1 << 12)
+#define OMAP3430_AUTO_GPT11_MASK (1 << 12)
#define OMAP3430_AUTO_GPT11_SHIFT 12
-#define OMAP3430_AUTO_GPT10 (1 << 11)
+#define OMAP3430_AUTO_GPT10_MASK (1 << 11)
#define OMAP3430_AUTO_GPT10_SHIFT 11
-#define OMAP3430_AUTO_MCBSP5 (1 << 10)
+#define OMAP3430_AUTO_MCBSP5_MASK (1 << 10)
#define OMAP3430_AUTO_MCBSP5_SHIFT 10
-#define OMAP3430_AUTO_MCBSP1 (1 << 9)
+#define OMAP3430_AUTO_MCBSP1_MASK (1 << 9)
#define OMAP3430_AUTO_MCBSP1_SHIFT 9
-#define OMAP3430ES1_AUTO_FAC (1 << 8)
+#define OMAP3430ES1_AUTO_FAC_MASK (1 << 8)
#define OMAP3430ES1_AUTO_FAC_SHIFT 8
-#define OMAP3430_AUTO_MAILBOXES (1 << 7)
+#define OMAP3430_AUTO_MAILBOXES_MASK (1 << 7)
#define OMAP3430_AUTO_MAILBOXES_SHIFT 7
-#define OMAP3430_AUTO_OMAPCTRL (1 << 6)
+#define OMAP3430_AUTO_OMAPCTRL_MASK (1 << 6)
#define OMAP3430_AUTO_OMAPCTRL_SHIFT 6
-#define OMAP3430ES1_AUTO_FSHOSTUSB (1 << 5)
+#define OMAP3430ES1_AUTO_FSHOSTUSB_MASK (1 << 5)
#define OMAP3430ES1_AUTO_FSHOSTUSB_SHIFT 5
-#define OMAP3430_AUTO_HSOTGUSB (1 << 4)
+#define OMAP3430_AUTO_HSOTGUSB_MASK (1 << 4)
#define OMAP3430_AUTO_HSOTGUSB_SHIFT 4
-#define OMAP3430ES1_AUTO_D2D (1 << 3)
+#define OMAP3430ES1_AUTO_D2D_MASK (1 << 3)
#define OMAP3430ES1_AUTO_D2D_SHIFT 3
-#define OMAP3430_AUTO_SAD2D (1 << 3)
+#define OMAP3430_AUTO_SAD2D_MASK (1 << 3)
#define OMAP3430_AUTO_SAD2D_SHIFT 3
-#define OMAP3430_AUTO_SSI (1 << 0)
+#define OMAP3430_AUTO_SSI_MASK (1 << 0)
#define OMAP3430_AUTO_SSI_SHIFT 0
/* CM_AUTOIDLE2_CORE */
-#define OMAP3430_AUTO_PKA (1 << 4)
+#define OMAP3430_AUTO_PKA_MASK (1 << 4)
#define OMAP3430_AUTO_PKA_SHIFT 4
-#define OMAP3430_AUTO_AES1 (1 << 3)
+#define OMAP3430_AUTO_AES1_MASK (1 << 3)
#define OMAP3430_AUTO_AES1_SHIFT 3
-#define OMAP3430_AUTO_RNG (1 << 2)
+#define OMAP3430_AUTO_RNG_MASK (1 << 2)
#define OMAP3430_AUTO_RNG_SHIFT 2
-#define OMAP3430_AUTO_SHA11 (1 << 1)
+#define OMAP3430_AUTO_SHA11_MASK (1 << 1)
#define OMAP3430_AUTO_SHA11_SHIFT 1
-#define OMAP3430_AUTO_DES1 (1 << 0)
+#define OMAP3430_AUTO_DES1_MASK (1 << 0)
#define OMAP3430_AUTO_DES1_SHIFT 0
/* CM_AUTOIDLE3_CORE */
@@ -331,7 +331,7 @@
#define OMAP3430ES2_AUTO_USBTLL_SHIFT 2
#define OMAP3430ES2_AUTO_USBTLL_MASK (1 << 2)
#define OMAP3430_AUTO_MAD2D_SHIFT 3
-#define OMAP3430_AUTO_MAD2D (1 << 3)
+#define OMAP3430_AUTO_MAD2D_MASK (1 << 3)
/* CM_CLKSEL_CORE */
#define OMAP3430_CLKSEL_SSI_SHIFT 8
@@ -366,9 +366,9 @@
#define OMAP3430_CLKACTIVITY_L3_MASK (1 << 0)
/* CM_FCLKEN_GFX */
-#define OMAP3430ES1_EN_3D (1 << 2)
+#define OMAP3430ES1_EN_3D_MASK (1 << 2)
#define OMAP3430ES1_EN_3D_SHIFT 2
-#define OMAP3430ES1_EN_2D (1 << 1)
+#define OMAP3430ES1_EN_2D_MASK (1 << 1)
#define OMAP3430ES1_EN_2D_SHIFT 1
/* CM_ICLKEN_GFX specific bits */
@@ -416,9 +416,9 @@
#define OMAP3430ES2_EN_USIMOCP_MASK (1 << 9)
/* CM_ICLKEN_WKUP specific bits */
-#define OMAP3430_EN_WDT1 (1 << 4)
+#define OMAP3430_EN_WDT1_MASK (1 << 4)
#define OMAP3430_EN_WDT1_SHIFT 4
-#define OMAP3430_EN_32KSYNC (1 << 2)
+#define OMAP3430_EN_32KSYNC_MASK (1 << 2)
#define OMAP3430_EN_32KSYNC_SHIFT 2
/* CM_IDLEST_WKUP specific bits */
@@ -432,19 +432,19 @@
#define OMAP3430_ST_32KSYNC_MASK (1 << 2)
/* CM_AUTOIDLE_WKUP */
-#define OMAP3430ES2_AUTO_USIMOCP (1 << 9)
+#define OMAP3430ES2_AUTO_USIMOCP_MASK (1 << 9)
#define OMAP3430ES2_AUTO_USIMOCP_SHIFT 9
-#define OMAP3430_AUTO_WDT2 (1 << 5)
+#define OMAP3430_AUTO_WDT2_MASK (1 << 5)
#define OMAP3430_AUTO_WDT2_SHIFT 5
-#define OMAP3430_AUTO_WDT1 (1 << 4)
+#define OMAP3430_AUTO_WDT1_MASK (1 << 4)
#define OMAP3430_AUTO_WDT1_SHIFT 4
-#define OMAP3430_AUTO_GPIO1 (1 << 3)
+#define OMAP3430_AUTO_GPIO1_MASK (1 << 3)
#define OMAP3430_AUTO_GPIO1_SHIFT 3
-#define OMAP3430_AUTO_32KSYNC (1 << 2)
+#define OMAP3430_AUTO_32KSYNC_MASK (1 << 2)
#define OMAP3430_AUTO_32KSYNC_SHIFT 2
-#define OMAP3430_AUTO_GPT12 (1 << 1)
+#define OMAP3430_AUTO_GPT12_MASK (1 << 1)
#define OMAP3430_AUTO_GPT12_SHIFT 1
-#define OMAP3430_AUTO_GPT1 (1 << 0)
+#define OMAP3430_AUTO_GPT1_MASK (1 << 0)
#define OMAP3430_AUTO_GPT1_SHIFT 0
/* CM_CLKSEL_WKUP */
@@ -479,7 +479,7 @@
#define OMAP3430_EN_CORE_DPLL_MASK (0x7 << 0)
/* CM_CLKEN2_PLL */
-#define OMAP3430ES2_EN_PERIPH2_DPLL_LPMODE_SHIFT 10
+#define OMAP3430ES2_EN_PERIPH2_DPLL_LPMODE_SHIFT 10
#define OMAP3430ES2_PERIPH2_DPLL_RAMPTIME_MASK (0x3 << 8)
#define OMAP3430ES2_PERIPH2_DPLL_FREQSEL_SHIFT 4
#define OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK (0xf << 4)
@@ -488,10 +488,10 @@
#define OMAP3430ES2_EN_PERIPH2_DPLL_MASK (0x7 << 0)
/* CM_IDLEST_CKGEN */
-#define OMAP3430_ST_54M_CLK (1 << 5)
-#define OMAP3430_ST_12M_CLK (1 << 4)
-#define OMAP3430_ST_48M_CLK (1 << 3)
-#define OMAP3430_ST_96M_CLK (1 << 2)
+#define OMAP3430_ST_54M_CLK_MASK (1 << 5)
+#define OMAP3430_ST_12M_CLK_MASK (1 << 4)
+#define OMAP3430_ST_48M_CLK_MASK (1 << 3)
+#define OMAP3430_ST_96M_CLK_MASK (1 << 2)
#define OMAP3430_ST_PERIPH_CLK_SHIFT 1
#define OMAP3430_ST_PERIPH_CLK_MASK (1 << 1)
#define OMAP3430_ST_CORE_CLK_SHIFT 0
@@ -558,22 +558,22 @@
/* CM_CLKOUT_CTRL */
#define OMAP3430_CLKOUT2_EN_SHIFT 7
-#define OMAP3430_CLKOUT2_EN (1 << 7)
+#define OMAP3430_CLKOUT2_EN_MASK (1 << 7)
#define OMAP3430_CLKOUT2_DIV_SHIFT 3
#define OMAP3430_CLKOUT2_DIV_MASK (0x7 << 3)
#define OMAP3430_CLKOUT2SOURCE_SHIFT 0
#define OMAP3430_CLKOUT2SOURCE_MASK (0x3 << 0)
/* CM_FCLKEN_DSS */
-#define OMAP3430_EN_TV (1 << 2)
+#define OMAP3430_EN_TV_MASK (1 << 2)
#define OMAP3430_EN_TV_SHIFT 2
-#define OMAP3430_EN_DSS2 (1 << 1)
+#define OMAP3430_EN_DSS2_MASK (1 << 1)
#define OMAP3430_EN_DSS2_SHIFT 1
-#define OMAP3430_EN_DSS1 (1 << 0)
+#define OMAP3430_EN_DSS1_MASK (1 << 0)
#define OMAP3430_EN_DSS1_SHIFT 0
/* CM_ICLKEN_DSS */
-#define OMAP3430_CM_ICLKEN_DSS_EN_DSS (1 << 0)
+#define OMAP3430_CM_ICLKEN_DSS_EN_DSS_MASK (1 << 0)
#define OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT 0
/* CM_IDLEST_DSS */
@@ -585,7 +585,7 @@
#define OMAP3430ES1_ST_DSS_MASK (1 << 0)
/* CM_AUTOIDLE_DSS */
-#define OMAP3430_AUTO_DSS (1 << 0)
+#define OMAP3430_AUTO_DSS_MASK (1 << 0)
#define OMAP3430_AUTO_DSS_SHIFT 0
/* CM_CLKSEL_DSS */
@@ -607,16 +607,16 @@
#define OMAP3430_CLKACTIVITY_DSS_MASK (1 << 0)
/* CM_FCLKEN_CAM specific bits */
-#define OMAP3430_EN_CSI2 (1 << 1)
+#define OMAP3430_EN_CSI2_MASK (1 << 1)
#define OMAP3430_EN_CSI2_SHIFT 1
/* CM_ICLKEN_CAM specific bits */
/* CM_IDLEST_CAM */
-#define OMAP3430_ST_CAM (1 << 0)
+#define OMAP3430_ST_CAM_MASK (1 << 0)
/* CM_AUTOIDLE_CAM */
-#define OMAP3430_AUTO_CAM (1 << 0)
+#define OMAP3430_AUTO_CAM_MASK (1 << 0)
#define OMAP3430_AUTO_CAM_SHIFT 0
/* CM_CLKSEL_CAM */
@@ -649,41 +649,41 @@
#define OMAP3430_ST_MCBSP2_MASK (1 << 0)
/* CM_AUTOIDLE_PER */
-#define OMAP3430_AUTO_GPIO6 (1 << 17)
+#define OMAP3430_AUTO_GPIO6_MASK (1 << 17)
#define OMAP3430_AUTO_GPIO6_SHIFT 17
-#define OMAP3430_AUTO_GPIO5 (1 << 16)
+#define OMAP3430_AUTO_GPIO5_MASK (1 << 16)
#define OMAP3430_AUTO_GPIO5_SHIFT 16
-#define OMAP3430_AUTO_GPIO4 (1 << 15)
+#define OMAP3430_AUTO_GPIO4_MASK (1 << 15)
#define OMAP3430_AUTO_GPIO4_SHIFT 15
-#define OMAP3430_AUTO_GPIO3 (1 << 14)
+#define OMAP3430_AUTO_GPIO3_MASK (1 << 14)
#define OMAP3430_AUTO_GPIO3_SHIFT 14
-#define OMAP3430_AUTO_GPIO2 (1 << 13)
+#define OMAP3430_AUTO_GPIO2_MASK (1 << 13)
#define OMAP3430_AUTO_GPIO2_SHIFT 13
-#define OMAP3430_AUTO_WDT3 (1 << 12)
+#define OMAP3430_AUTO_WDT3_MASK (1 << 12)
#define OMAP3430_AUTO_WDT3_SHIFT 12
-#define OMAP3430_AUTO_UART3 (1 << 11)
+#define OMAP3430_AUTO_UART3_MASK (1 << 11)
#define OMAP3430_AUTO_UART3_SHIFT 11
-#define OMAP3430_AUTO_GPT9 (1 << 10)
+#define OMAP3430_AUTO_GPT9_MASK (1 << 10)
#define OMAP3430_AUTO_GPT9_SHIFT 10
-#define OMAP3430_AUTO_GPT8 (1 << 9)
+#define OMAP3430_AUTO_GPT8_MASK (1 << 9)
#define OMAP3430_AUTO_GPT8_SHIFT 9
-#define OMAP3430_AUTO_GPT7 (1 << 8)
+#define OMAP3430_AUTO_GPT7_MASK (1 << 8)
#define OMAP3430_AUTO_GPT7_SHIFT 8
-#define OMAP3430_AUTO_GPT6 (1 << 7)
+#define OMAP3430_AUTO_GPT6_MASK (1 << 7)
#define OMAP3430_AUTO_GPT6_SHIFT 7
-#define OMAP3430_AUTO_GPT5 (1 << 6)
+#define OMAP3430_AUTO_GPT5_MASK (1 << 6)
#define OMAP3430_AUTO_GPT5_SHIFT 6
-#define OMAP3430_AUTO_GPT4 (1 << 5)
+#define OMAP3430_AUTO_GPT4_MASK (1 << 5)
#define OMAP3430_AUTO_GPT4_SHIFT 5
-#define OMAP3430_AUTO_GPT3 (1 << 4)
+#define OMAP3430_AUTO_GPT3_MASK (1 << 4)
#define OMAP3430_AUTO_GPT3_SHIFT 4
-#define OMAP3430_AUTO_GPT2 (1 << 3)
+#define OMAP3430_AUTO_GPT2_MASK (1 << 3)
#define OMAP3430_AUTO_GPT2_SHIFT 3
-#define OMAP3430_AUTO_MCBSP4 (1 << 2)
+#define OMAP3430_AUTO_MCBSP4_MASK (1 << 2)
#define OMAP3430_AUTO_MCBSP4_SHIFT 2
-#define OMAP3430_AUTO_MCBSP3 (1 << 1)
+#define OMAP3430_AUTO_MCBSP3_MASK (1 << 1)
#define OMAP3430_AUTO_MCBSP3_SHIFT 1
-#define OMAP3430_AUTO_MCBSP2 (1 << 0)
+#define OMAP3430_AUTO_MCBSP2_MASK (1 << 0)
#define OMAP3430_AUTO_MCBSP2_SHIFT 0
/* CM_CLKSEL_PER */
@@ -705,7 +705,7 @@
#define OMAP3430_CLKSEL_GPT2_SHIFT 0
/* CM_SLEEPDEP_PER specific bits */
-#define OMAP3430_CM_SLEEPDEP_PER_EN_IVA2 (1 << 2)
+#define OMAP3430_CM_SLEEPDEP_PER_EN_IVA2_MASK (1 << 2)
/* CM_CLKSTCTRL_PER */
#define OMAP3430_CLKTRCTRL_PER_SHIFT 0
@@ -755,10 +755,10 @@
#define OMAP3430_PERIPH_DPLL_EMU_DIV_MASK (0x7f << 0)
/* CM_POLCTRL */
-#define OMAP3430_CLKOUT2_POL (1 << 0)
+#define OMAP3430_CLKOUT2_POL_MASK (1 << 0)
/* CM_IDLEST_NEON */
-#define OMAP3430_ST_NEON (1 << 0)
+#define OMAP3430_ST_NEON_MASK (1 << 0)
/* CM_CLKSTCTRL_NEON */
#define OMAP3430_CLKTRCTRL_NEON_SHIFT 0
diff --git a/arch/arm/mach-omap2/cm.c b/arch/arm/mach-omap2/cm.c
index 58e4a1c557d8..2d83565d2be2 100644
--- a/arch/arm/mach-omap2/cm.c
+++ b/arch/arm/mach-omap2/cm.c
@@ -27,9 +27,6 @@
#include "cm-regbits-24xx.h"
#include "cm-regbits-34xx.h"
-/* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */
-#define MAX_MODULE_READY_TIME 20000
-
static const u8 cm_idlest_offs[] = {
CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3
};
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index 94728b1ee3c4..a02ca30423dc 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -112,7 +112,7 @@ extern u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
extern int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id,
u8 idlest_shift);
-extern int omap4_cm_wait_module_ready(u32 prcm_mod, u8 prcm_dev_offs);
+extern int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg);
static inline u32 cm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
{
@@ -134,13 +134,23 @@ static inline u32 cm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
/* CM_ICLKEN_GFX */
#define OMAP_EN_GFX_SHIFT 0
-#define OMAP_EN_GFX (1 << 0)
+#define OMAP_EN_GFX_MASK (1 << 0)
/* CM_IDLEST_GFX */
-#define OMAP_ST_GFX (1 << 0)
+#define OMAP_ST_GFX_MASK (1 << 0)
+
/* CM_IDLEST indicator */
#define OMAP24XX_CM_IDLEST_VAL 0
#define OMAP34XX_CM_IDLEST_VAL 1
+/*
+ * MAX_MODULE_READY_TIME: max duration in microseconds to wait for the
+ * PRCM to request that a module exit the inactive state in the case of
+ * OMAP2 & 3.
+ * In the case of OMAP4 this is the max duration in microseconds for the
+ * module to reach the functionnal state from an inactive state.
+ */
+#define MAX_MODULE_READY_TIME 2000
+
#endif
diff --git a/arch/arm/mach-omap2/cm44xx.h b/arch/arm/mach-omap2/cm44xx.h
index c575b9b0c041..336d94889e5b 100644
--- a/arch/arm/mach-omap2/cm44xx.h
+++ b/arch/arm/mach-omap2/cm44xx.h
@@ -1,8 +1,8 @@
/*
* OMAP44xx CM1 & CM2 instance offset macros
*
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
* Rajendra Nayak (rnayak@ti.com)
@@ -25,334 +25,557 @@
/* CM1 */
-
/* CM1.OCP_SOCKET_CM1 register offsets */
+#define OMAP4_REVISION_CM1_OFFSET 0x0000
#define OMAP4430_REVISION_CM1 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4_CM_CM1_PROFILING_CLKCTRL_OFFSET 0x0040
#define OMAP4430_CM_CM1_PROFILING_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0040)
/* CM1.CKGEN_CM1 register offsets */
+#define OMAP4_CM_CLKSEL_CORE_OFFSET 0x0000
#define OMAP4430_CM_CLKSEL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0000)
+#define OMAP4_CM_CLKSEL_ABE_OFFSET 0x0008
#define OMAP4430_CM_CLKSEL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0008)
+#define OMAP4_CM_DLL_CTRL_OFFSET 0x0010
#define OMAP4430_CM_DLL_CTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0010)
+#define OMAP4_CM_CLKMODE_DPLL_CORE_OFFSET 0x0020
#define OMAP4430_CM_CLKMODE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0020)
+#define OMAP4_CM_IDLEST_DPLL_CORE_OFFSET 0x0024
#define OMAP4430_CM_IDLEST_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0024)
+#define OMAP4_CM_AUTOIDLE_DPLL_CORE_OFFSET 0x0028
#define OMAP4430_CM_AUTOIDLE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0028)
+#define OMAP4_CM_CLKSEL_DPLL_CORE_OFFSET 0x002c
#define OMAP4430_CM_CLKSEL_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x002c)
+#define OMAP4_CM_DIV_M2_DPLL_CORE_OFFSET 0x0030
#define OMAP4430_CM_DIV_M2_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0030)
+#define OMAP4_CM_DIV_M3_DPLL_CORE_OFFSET 0x0034
#define OMAP4430_CM_DIV_M3_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0034)
+#define OMAP4_CM_DIV_M4_DPLL_CORE_OFFSET 0x0038
#define OMAP4430_CM_DIV_M4_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0038)
+#define OMAP4_CM_DIV_M5_DPLL_CORE_OFFSET 0x003c
#define OMAP4430_CM_DIV_M5_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x003c)
+#define OMAP4_CM_DIV_M6_DPLL_CORE_OFFSET 0x0040
#define OMAP4430_CM_DIV_M6_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0040)
+#define OMAP4_CM_DIV_M7_DPLL_CORE_OFFSET 0x0044
#define OMAP4430_CM_DIV_M7_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0044)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET 0x0048
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0048)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET 0x004c
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x004c)
+#define OMAP4_CM_EMU_OVERRIDE_DPLL_CORE_OFFSET 0x0050
#define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0050)
+#define OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET 0x0060
#define OMAP4430_CM_CLKMODE_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0060)
+#define OMAP4_CM_IDLEST_DPLL_MPU_OFFSET 0x0064
#define OMAP4430_CM_IDLEST_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0064)
+#define OMAP4_CM_AUTOIDLE_DPLL_MPU_OFFSET 0x0068
#define OMAP4430_CM_AUTOIDLE_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0068)
+#define OMAP4_CM_CLKSEL_DPLL_MPU_OFFSET 0x006c
#define OMAP4430_CM_CLKSEL_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x006c)
+#define OMAP4_CM_DIV_M2_DPLL_MPU_OFFSET 0x0070
#define OMAP4430_CM_DIV_M2_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0070)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET 0x0088
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0088)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET 0x008c
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x008c)
+#define OMAP4_CM_BYPCLK_DPLL_MPU_OFFSET 0x009c
#define OMAP4430_CM_BYPCLK_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x009c)
+#define OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET 0x00a0
#define OMAP4430_CM_CLKMODE_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a0)
+#define OMAP4_CM_IDLEST_DPLL_IVA_OFFSET 0x00a4
#define OMAP4430_CM_IDLEST_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a4)
+#define OMAP4_CM_AUTOIDLE_DPLL_IVA_OFFSET 0x00a8
#define OMAP4430_CM_AUTOIDLE_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a8)
+#define OMAP4_CM_CLKSEL_DPLL_IVA_OFFSET 0x00ac
#define OMAP4430_CM_CLKSEL_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ac)
+#define OMAP4_CM_DIV_M4_DPLL_IVA_OFFSET 0x00b8
#define OMAP4430_CM_DIV_M4_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00b8)
+#define OMAP4_CM_DIV_M5_DPLL_IVA_OFFSET 0x00bc
#define OMAP4430_CM_DIV_M5_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00bc)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET 0x00c8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00c8)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_IVA_OFFSET 0x00cc
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00cc)
+#define OMAP4_CM_BYPCLK_DPLL_IVA_OFFSET 0x00dc
#define OMAP4430_CM_BYPCLK_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00dc)
+#define OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET 0x00e0
#define OMAP4430_CM_CLKMODE_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e0)
+#define OMAP4_CM_IDLEST_DPLL_ABE_OFFSET 0x00e4
#define OMAP4430_CM_IDLEST_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e4)
+#define OMAP4_CM_AUTOIDLE_DPLL_ABE_OFFSET 0x00e8
#define OMAP4430_CM_AUTOIDLE_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e8)
+#define OMAP4_CM_CLKSEL_DPLL_ABE_OFFSET 0x00ec
#define OMAP4430_CM_CLKSEL_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ec)
+#define OMAP4_CM_DIV_M2_DPLL_ABE_OFFSET 0x00f0
#define OMAP4430_CM_DIV_M2_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f0)
+#define OMAP4_CM_DIV_M3_DPLL_ABE_OFFSET 0x00f4
#define OMAP4430_CM_DIV_M3_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f4)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET 0x0108
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0108)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_ABE_OFFSET 0x010c
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x010c)
+#define OMAP4_CM_CLKMODE_DPLL_DDRPHY_OFFSET 0x0120
#define OMAP4430_CM_CLKMODE_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0120)
+#define OMAP4_CM_IDLEST_DPLL_DDRPHY_OFFSET 0x0124
#define OMAP4430_CM_IDLEST_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0124)
+#define OMAP4_CM_AUTOIDLE_DPLL_DDRPHY_OFFSET 0x0128
#define OMAP4430_CM_AUTOIDLE_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0128)
+#define OMAP4_CM_CLKSEL_DPLL_DDRPHY_OFFSET 0x012c
#define OMAP4430_CM_CLKSEL_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x012c)
+#define OMAP4_CM_DIV_M2_DPLL_DDRPHY_OFFSET 0x0130
#define OMAP4430_CM_DIV_M2_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0130)
+#define OMAP4_CM_DIV_M4_DPLL_DDRPHY_OFFSET 0x0138
#define OMAP4430_CM_DIV_M4_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0138)
+#define OMAP4_CM_DIV_M5_DPLL_DDRPHY_OFFSET 0x013c
#define OMAP4430_CM_DIV_M5_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x013c)
+#define OMAP4_CM_DIV_M6_DPLL_DDRPHY_OFFSET 0x0140
#define OMAP4430_CM_DIV_M6_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0140)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_DDRPHY_OFFSET 0x0148
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0148)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_DDRPHY_OFFSET 0x014c
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x014c)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG1_OFFSET 0x0160
#define OMAP4430_CM_SHADOW_FREQ_CONFIG1 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0160)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG2_OFFSET 0x0164
#define OMAP4430_CM_SHADOW_FREQ_CONFIG2 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0164)
+#define OMAP4_CM_DYN_DEP_PRESCAL_OFFSET 0x0170
#define OMAP4430_CM_DYN_DEP_PRESCAL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0170)
+#define OMAP4_CM_RESTORE_ST_OFFSET 0x0180
#define OMAP4430_CM_RESTORE_ST OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0180)
/* CM1.MPU_CM1 register offsets */
+#define OMAP4_CM_MPU_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_MPU_CLKSTCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0000)
+#define OMAP4_CM_MPU_STATICDEP_OFFSET 0x0004
#define OMAP4430_CM_MPU_STATICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0004)
+#define OMAP4_CM_MPU_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_MPU_DYNAMICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0008)
+#define OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_MPU_MPU_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0020)
/* CM1.TESLA_CM1 register offsets */
+#define OMAP4_CM_TESLA_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_TESLA_CLKSTCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0000)
+#define OMAP4_CM_TESLA_STATICDEP_OFFSET 0x0004
#define OMAP4430_CM_TESLA_STATICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0004)
+#define OMAP4_CM_TESLA_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_TESLA_DYNAMICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0008)
+#define OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_TESLA_TESLA_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0020)
/* CM1.ABE_CM1 register offsets */
+#define OMAP4_CM1_ABE_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM1_ABE_CLKSTCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0000)
+#define OMAP4_CM1_ABE_L4ABE_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM1_ABE_L4ABE_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0020)
+#define OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM1_ABE_AESS_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0028)
+#define OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET 0x0030
#define OMAP4430_CM1_ABE_PDM_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0030)
+#define OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET 0x0038
#define OMAP4430_CM1_ABE_DMIC_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0038)
+#define OMAP4_CM1_ABE_MCASP_CLKCTRL_OFFSET 0x0040
#define OMAP4430_CM1_ABE_MCASP_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0040)
+#define OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET 0x0048
#define OMAP4430_CM1_ABE_MCBSP1_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0048)
+#define OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET 0x0050
#define OMAP4430_CM1_ABE_MCBSP2_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0050)
+#define OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET 0x0058
#define OMAP4430_CM1_ABE_MCBSP3_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0058)
+#define OMAP4_CM1_ABE_SLIMBUS_CLKCTRL_OFFSET 0x0060
#define OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0060)
+#define OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET 0x0068
#define OMAP4430_CM1_ABE_TIMER5_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0068)
+#define OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET 0x0070
#define OMAP4430_CM1_ABE_TIMER6_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0070)
+#define OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET 0x0078
#define OMAP4430_CM1_ABE_TIMER7_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0078)
+#define OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET 0x0080
#define OMAP4430_CM1_ABE_TIMER8_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0080)
+#define OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET 0x0088
#define OMAP4430_CM1_ABE_WDT3_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0088)
-/* CM1.RESTORE_CM1 register offsets */
-#define OMAP4430_CM_CLKSEL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0000)
-#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0004)
-#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0008)
-#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x000c)
-#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0010)
-#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0014)
-#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0018)
-#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x001c)
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0020)
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0024)
-#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0028)
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x002c)
-#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0030)
-#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0034)
-
/* CM2 */
-
/* CM2.OCP_SOCKET_CM2 register offsets */
+#define OMAP4_REVISION_CM2_OFFSET 0x0000
#define OMAP4430_REVISION_CM2 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4_CM_CM2_PROFILING_CLKCTRL_OFFSET 0x0040
#define OMAP4430_CM_CM2_PROFILING_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0040)
/* CM2.CKGEN_CM2 register offsets */
+#define OMAP4_CM_CLKSEL_DUCATI_ISS_ROOT_OFFSET 0x0000
#define OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0000)
+#define OMAP4_CM_CLKSEL_USB_60MHZ_OFFSET 0x0004
#define OMAP4430_CM_CLKSEL_USB_60MHZ OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0004)
+#define OMAP4_CM_SCALE_FCLK_OFFSET 0x0008
#define OMAP4430_CM_SCALE_FCLK OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0008)
+#define OMAP4_CM_CORE_DVFS_PERF1_OFFSET 0x0010
#define OMAP4430_CM_CORE_DVFS_PERF1 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0010)
+#define OMAP4_CM_CORE_DVFS_PERF2_OFFSET 0x0014
#define OMAP4430_CM_CORE_DVFS_PERF2 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0014)
+#define OMAP4_CM_CORE_DVFS_PERF3_OFFSET 0x0018
#define OMAP4430_CM_CORE_DVFS_PERF3 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0018)
+#define OMAP4_CM_CORE_DVFS_PERF4_OFFSET 0x001c
#define OMAP4430_CM_CORE_DVFS_PERF4 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x001c)
+#define OMAP4_CM_CORE_DVFS_CURRENT_OFFSET 0x0024
#define OMAP4430_CM_CORE_DVFS_CURRENT OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0024)
+#define OMAP4_CM_IVA_DVFS_PERF_TESLA_OFFSET 0x0028
#define OMAP4430_CM_IVA_DVFS_PERF_TESLA OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0028)
+#define OMAP4_CM_IVA_DVFS_PERF_IVAHD_OFFSET 0x002c
#define OMAP4430_CM_IVA_DVFS_PERF_IVAHD OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x002c)
+#define OMAP4_CM_IVA_DVFS_PERF_ABE_OFFSET 0x0030
#define OMAP4430_CM_IVA_DVFS_PERF_ABE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0030)
+#define OMAP4_CM_IVA_DVFS_CURRENT_OFFSET 0x0038
#define OMAP4430_CM_IVA_DVFS_CURRENT OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0038)
+#define OMAP4_CM_CLKMODE_DPLL_PER_OFFSET 0x0040
#define OMAP4430_CM_CLKMODE_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0040)
+#define OMAP4_CM_IDLEST_DPLL_PER_OFFSET 0x0044
#define OMAP4430_CM_IDLEST_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0044)
+#define OMAP4_CM_AUTOIDLE_DPLL_PER_OFFSET 0x0048
#define OMAP4430_CM_AUTOIDLE_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0048)
+#define OMAP4_CM_CLKSEL_DPLL_PER_OFFSET 0x004c
#define OMAP4430_CM_CLKSEL_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x004c)
+#define OMAP4_CM_DIV_M2_DPLL_PER_OFFSET 0x0050
#define OMAP4430_CM_DIV_M2_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0050)
+#define OMAP4_CM_DIV_M3_DPLL_PER_OFFSET 0x0054
#define OMAP4430_CM_DIV_M3_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0054)
+#define OMAP4_CM_DIV_M4_DPLL_PER_OFFSET 0x0058
#define OMAP4430_CM_DIV_M4_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0058)
+#define OMAP4_CM_DIV_M5_DPLL_PER_OFFSET 0x005c
#define OMAP4430_CM_DIV_M5_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x005c)
+#define OMAP4_CM_DIV_M6_DPLL_PER_OFFSET 0x0060
#define OMAP4430_CM_DIV_M6_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0060)
+#define OMAP4_CM_DIV_M7_DPLL_PER_OFFSET 0x0064
#define OMAP4430_CM_DIV_M7_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0064)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET 0x0068
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0068)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET 0x006c
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x006c)
+#define OMAP4_CM_EMU_OVERRIDE_DPLL_PER_OFFSET 0x0070
#define OMAP4430_CM_EMU_OVERRIDE_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0070)
+#define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET 0x0080
#define OMAP4430_CM_CLKMODE_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0080)
+#define OMAP4_CM_IDLEST_DPLL_USB_OFFSET 0x0084
#define OMAP4430_CM_IDLEST_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0084)
+#define OMAP4_CM_AUTOIDLE_DPLL_USB_OFFSET 0x0088
#define OMAP4430_CM_AUTOIDLE_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0088)
+#define OMAP4_CM_CLKSEL_DPLL_USB_OFFSET 0x008c
#define OMAP4430_CM_CLKSEL_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x008c)
+#define OMAP4_CM_DIV_M2_DPLL_USB_OFFSET 0x0090
#define OMAP4430_CM_DIV_M2_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0090)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET 0x00a8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00a8)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_USB_OFFSET 0x00ac
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ac)
+#define OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET 0x00b4
#define OMAP4430_CM_CLKDCOLDO_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00b4)
+#define OMAP4_CM_CLKMODE_DPLL_UNIPRO_OFFSET 0x00c0
#define OMAP4430_CM_CLKMODE_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c0)
+#define OMAP4_CM_IDLEST_DPLL_UNIPRO_OFFSET 0x00c4
#define OMAP4430_CM_IDLEST_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c4)
+#define OMAP4_CM_AUTOIDLE_DPLL_UNIPRO_OFFSET 0x00c8
#define OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c8)
+#define OMAP4_CM_CLKSEL_DPLL_UNIPRO_OFFSET 0x00cc
#define OMAP4430_CM_CLKSEL_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00cc)
+#define OMAP4_CM_DIV_M2_DPLL_UNIPRO_OFFSET 0x00d0
#define OMAP4430_CM_DIV_M2_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00d0)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_UNIPRO_OFFSET 0x00e8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00e8)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_UNIPRO_OFFSET 0x00ec
#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ec)
/* CM2.ALWAYS_ON_CM2 register offsets */
+#define OMAP4_CM_ALWON_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_ALWON_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0000)
+#define OMAP4_CM_ALWON_MDMINTC_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_ALWON_MDMINTC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0020)
+#define OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM_ALWON_SR_MPU_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0028)
+#define OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET 0x0030
#define OMAP4430_CM_ALWON_SR_IVA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0030)
+#define OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET 0x0038
#define OMAP4430_CM_ALWON_SR_CORE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0038)
/* CM2.CORE_CM2 register offsets */
+#define OMAP4_CM_L3_1_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_L3_1_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0000)
+#define OMAP4_CM_L3_1_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_L3_1_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0008)
+#define OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_L3_1_L3_1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0020)
+#define OMAP4_CM_L3_2_CLKSTCTRL_OFFSET 0x0100
#define OMAP4430_CM_L3_2_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0100)
+#define OMAP4_CM_L3_2_DYNAMICDEP_OFFSET 0x0108
#define OMAP4430_CM_L3_2_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0108)
+#define OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET 0x0120
#define OMAP4430_CM_L3_2_L3_2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0120)
+#define OMAP4_CM_L3_2_GPMC_CLKCTRL_OFFSET 0x0128
#define OMAP4430_CM_L3_2_GPMC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0128)
+#define OMAP4_CM_L3_2_OCMC_RAM_CLKCTRL_OFFSET 0x0130
#define OMAP4430_CM_L3_2_OCMC_RAM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0130)
+#define OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET 0x0200
#define OMAP4430_CM_DUCATI_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0200)
+#define OMAP4_CM_DUCATI_STATICDEP_OFFSET 0x0204
#define OMAP4430_CM_DUCATI_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0204)
+#define OMAP4_CM_DUCATI_DYNAMICDEP_OFFSET 0x0208
#define OMAP4430_CM_DUCATI_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0208)
+#define OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET 0x0220
#define OMAP4430_CM_DUCATI_DUCATI_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0220)
+#define OMAP4_CM_SDMA_CLKSTCTRL_OFFSET 0x0300
#define OMAP4430_CM_SDMA_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0300)
+#define OMAP4_CM_SDMA_STATICDEP_OFFSET 0x0304
#define OMAP4430_CM_SDMA_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0304)
+#define OMAP4_CM_SDMA_DYNAMICDEP_OFFSET 0x0308
#define OMAP4430_CM_SDMA_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0308)
+#define OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET 0x0320
#define OMAP4430_CM_SDMA_SDMA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0320)
+#define OMAP4_CM_MEMIF_CLKSTCTRL_OFFSET 0x0400
#define OMAP4430_CM_MEMIF_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0400)
+#define OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET 0x0420
#define OMAP4430_CM_MEMIF_DMM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0420)
+#define OMAP4_CM_MEMIF_EMIF_FW_CLKCTRL_OFFSET 0x0428
#define OMAP4430_CM_MEMIF_EMIF_FW_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0428)
+#define OMAP4_CM_MEMIF_EMIF_1_CLKCTRL_OFFSET 0x0430
#define OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0430)
+#define OMAP4_CM_MEMIF_EMIF_2_CLKCTRL_OFFSET 0x0438
#define OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0438)
+#define OMAP4_CM_MEMIF_DLL_CLKCTRL_OFFSET 0x0440
#define OMAP4430_CM_MEMIF_DLL_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0440)
+#define OMAP4_CM_MEMIF_EMIF_H1_CLKCTRL_OFFSET 0x0450
#define OMAP4430_CM_MEMIF_EMIF_H1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0450)
+#define OMAP4_CM_MEMIF_EMIF_H2_CLKCTRL_OFFSET 0x0458
#define OMAP4430_CM_MEMIF_EMIF_H2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0458)
+#define OMAP4_CM_MEMIF_DLL_H_CLKCTRL_OFFSET 0x0460
#define OMAP4430_CM_MEMIF_DLL_H_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0460)
+#define OMAP4_CM_D2D_CLKSTCTRL_OFFSET 0x0500
#define OMAP4430_CM_D2D_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0500)
+#define OMAP4_CM_D2D_STATICDEP_OFFSET 0x0504
#define OMAP4430_CM_D2D_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0504)
+#define OMAP4_CM_D2D_DYNAMICDEP_OFFSET 0x0508
#define OMAP4430_CM_D2D_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0508)
+#define OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET 0x0520
#define OMAP4430_CM_D2D_SAD2D_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0520)
+#define OMAP4_CM_D2D_MODEM_ICR_CLKCTRL_OFFSET 0x0528
#define OMAP4430_CM_D2D_MODEM_ICR_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0528)
+#define OMAP4_CM_D2D_SAD2D_FW_CLKCTRL_OFFSET 0x0530
#define OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0530)
+#define OMAP4_CM_L4CFG_CLKSTCTRL_OFFSET 0x0600
#define OMAP4430_CM_L4CFG_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0600)
+#define OMAP4_CM_L4CFG_DYNAMICDEP_OFFSET 0x0608
#define OMAP4430_CM_L4CFG_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0608)
+#define OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET 0x0620
#define OMAP4430_CM_L4CFG_L4_CFG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0620)
+#define OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET 0x0628
#define OMAP4430_CM_L4CFG_HW_SEM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0628)
+#define OMAP4_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET 0x0630
#define OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0630)
+#define OMAP4_CM_L4CFG_SAR_ROM_CLKCTRL_OFFSET 0x0638
#define OMAP4430_CM_L4CFG_SAR_ROM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0638)
+#define OMAP4_CM_L3INSTR_CLKSTCTRL_OFFSET 0x0700
#define OMAP4430_CM_L3INSTR_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0700)
+#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET 0x0720
#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0720)
+#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET 0x0728
#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0728)
+#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_OFFSET 0x0740
#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0740)
/* CM2.IVAHD_CM2 register offsets */
+#define OMAP4_CM_IVAHD_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_IVAHD_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0000)
+#define OMAP4_CM_IVAHD_STATICDEP_OFFSET 0x0004
#define OMAP4430_CM_IVAHD_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0004)
+#define OMAP4_CM_IVAHD_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_IVAHD_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0008)
+#define OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_IVAHD_IVAHD_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0020)
+#define OMAP4_CM_IVAHD_SL2_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM_IVAHD_SL2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0028)
/* CM2.CAM_CM2 register offsets */
+#define OMAP4_CM_CAM_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_CAM_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0000)
+#define OMAP4_CM_CAM_STATICDEP_OFFSET 0x0004
#define OMAP4430_CM_CAM_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0004)
+#define OMAP4_CM_CAM_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_CAM_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0008)
+#define OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_CAM_ISS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0020)
+#define OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM_CAM_FDIF_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0028)
/* CM2.DSS_CM2 register offsets */
+#define OMAP4_CM_DSS_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_DSS_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0000)
+#define OMAP4_CM_DSS_STATICDEP_OFFSET 0x0004
#define OMAP4430_CM_DSS_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0004)
+#define OMAP4_CM_DSS_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_DSS_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0008)
+#define OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_DSS_DSS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0020)
+#define OMAP4_CM_DSS_DEISS_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM_DSS_DEISS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0028)
/* CM2.GFX_CM2 register offsets */
+#define OMAP4_CM_GFX_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_GFX_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0000)
+#define OMAP4_CM_GFX_STATICDEP_OFFSET 0x0004
#define OMAP4430_CM_GFX_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0004)
+#define OMAP4_CM_GFX_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_GFX_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0008)
+#define OMAP4_CM_GFX_GFX_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_GFX_GFX_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0020)
/* CM2.L3INIT_CM2 register offsets */
+#define OMAP4_CM_L3INIT_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_L3INIT_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0000)
+#define OMAP4_CM_L3INIT_STATICDEP_OFFSET 0x0004
#define OMAP4430_CM_L3INIT_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0004)
+#define OMAP4_CM_L3INIT_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_L3INIT_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0008)
+#define OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM_L3INIT_MMC1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0028)
+#define OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET 0x0030
#define OMAP4430_CM_L3INIT_MMC2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0030)
+#define OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET 0x0038
#define OMAP4430_CM_L3INIT_HSI_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0038)
+#define OMAP4_CM_L3INIT_UNIPRO1_CLKCTRL_OFFSET 0x0040
#define OMAP4430_CM_L3INIT_UNIPRO1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0040)
+#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET 0x0058
#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0058)
+#define OMAP4_CM_L3INIT_USB_OTG_CLKCTRL_OFFSET 0x0060
#define OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0060)
+#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET 0x0068
#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0068)
+#define OMAP4_CM_L3INIT_P1500_CLKCTRL_OFFSET 0x0078
#define OMAP4430_CM_L3INIT_P1500_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0078)
+#define OMAP4_CM_L3INIT_EMAC_CLKCTRL_OFFSET 0x0080
#define OMAP4430_CM_L3INIT_EMAC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0080)
+#define OMAP4_CM_L3INIT_SATA_CLKCTRL_OFFSET 0x0088
#define OMAP4430_CM_L3INIT_SATA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0088)
+#define OMAP4_CM_L3INIT_TPPSS_CLKCTRL_OFFSET 0x0090
#define OMAP4430_CM_L3INIT_TPPSS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0090)
+#define OMAP4_CM_L3INIT_PCIESS_CLKCTRL_OFFSET 0x0098
#define OMAP4430_CM_L3INIT_PCIESS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0098)
+#define OMAP4_CM_L3INIT_CCPTX_CLKCTRL_OFFSET 0x00a8
#define OMAP4430_CM_L3INIT_CCPTX_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00a8)
+#define OMAP4_CM_L3INIT_XHPI_CLKCTRL_OFFSET 0x00c0
#define OMAP4430_CM_L3INIT_XHPI_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c0)
+#define OMAP4_CM_L3INIT_MMC6_CLKCTRL_OFFSET 0x00c8
#define OMAP4430_CM_L3INIT_MMC6_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c8)
+#define OMAP4_CM_L3INIT_USB_HOST_FS_CLKCTRL_OFFSET 0x00d0
#define OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00d0)
+#define OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET 0x00e0
#define OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00e0)
/* CM2.L4PER_CM2 register offsets */
+#define OMAP4_CM_L4PER_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_L4PER_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0000)
+#define OMAP4_CM_L4PER_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_L4PER_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0008)
+#define OMAP4_CM_L4PER_ADC_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_L4PER_ADC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0020)
+#define OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0028)
+#define OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET 0x0030
#define OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0030)
+#define OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET 0x0038
#define OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0038)
+#define OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET 0x0040
#define OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0040)
+#define OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET 0x0048
#define OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0048)
+#define OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET 0x0050
#define OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0050)
+#define OMAP4_CM_L4PER_ELM_CLKCTRL_OFFSET 0x0058
#define OMAP4430_CM_L4PER_ELM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0058)
+#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET 0x0060
#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0060)
+#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET 0x0068
#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0068)
+#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET 0x0070
#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0070)
+#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET 0x0078
#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0078)
+#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET 0x0080
#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0080)
+#define OMAP4_CM_L4PER_HDQ1W_CLKCTRL_OFFSET 0x0088
#define OMAP4430_CM_L4PER_HDQ1W_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0088)
+#define OMAP4_CM_L4PER_HECC1_CLKCTRL_OFFSET 0x0090
#define OMAP4430_CM_L4PER_HECC1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0090)
+#define OMAP4_CM_L4PER_HECC2_CLKCTRL_OFFSET 0x0098
#define OMAP4430_CM_L4PER_HECC2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0098)
+#define OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET 0x00a0
#define OMAP4430_CM_L4PER_I2C1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a0)
+#define OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET 0x00a8
#define OMAP4430_CM_L4PER_I2C2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a8)
+#define OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET 0x00b0
#define OMAP4430_CM_L4PER_I2C3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b0)
+#define OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET 0x00b8
#define OMAP4430_CM_L4PER_I2C4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b8)
+#define OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET 0x00c0
#define OMAP4430_CM_L4PER_L4PER_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00c0)
+#define OMAP4_CM_L4PER_MCASP2_CLKCTRL_OFFSET 0x00d0
#define OMAP4430_CM_L4PER_MCASP2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d0)
+#define OMAP4_CM_L4PER_MCASP3_CLKCTRL_OFFSET 0x00d8
#define OMAP4430_CM_L4PER_MCASP3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d8)
+#define OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET 0x00e0
#define OMAP4430_CM_L4PER_MCBSP4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e0)
+#define OMAP4_CM_L4PER_MGATE_CLKCTRL_OFFSET 0x00e8
#define OMAP4430_CM_L4PER_MGATE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e8)
+#define OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET 0x00f0
#define OMAP4430_CM_L4PER_MCSPI1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f0)
+#define OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET 0x00f8
#define OMAP4430_CM_L4PER_MCSPI2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f8)
+#define OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET 0x0100
#define OMAP4430_CM_L4PER_MCSPI3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0100)
+#define OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET 0x0108
#define OMAP4430_CM_L4PER_MCSPI4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0108)
+#define OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET 0x0120
#define OMAP4430_CM_L4PER_MMCSD3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0120)
+#define OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET 0x0128
#define OMAP4430_CM_L4PER_MMCSD4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0128)
+#define OMAP4_CM_L4PER_MSPROHG_CLKCTRL_OFFSET 0x0130
#define OMAP4430_CM_L4PER_MSPROHG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0130)
+#define OMAP4_CM_L4PER_SLIMBUS2_CLKCTRL_OFFSET 0x0138
#define OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0138)
+#define OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET 0x0140
#define OMAP4430_CM_L4PER_UART1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0140)
+#define OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET 0x0148
#define OMAP4430_CM_L4PER_UART2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0148)
+#define OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET 0x0150
#define OMAP4430_CM_L4PER_UART3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0150)
+#define OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET 0x0158
#define OMAP4430_CM_L4PER_UART4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0158)
+#define OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET 0x0160
#define OMAP4430_CM_L4PER_MMCSD5_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0160)
+#define OMAP4_CM_L4PER_I2C5_CLKCTRL_OFFSET 0x0168
#define OMAP4430_CM_L4PER_I2C5_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0168)
+#define OMAP4_CM_L4SEC_CLKSTCTRL_OFFSET 0x0180
#define OMAP4430_CM_L4SEC_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0180)
+#define OMAP4_CM_L4SEC_STATICDEP_OFFSET 0x0184
#define OMAP4430_CM_L4SEC_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0184)
+#define OMAP4_CM_L4SEC_DYNAMICDEP_OFFSET 0x0188
#define OMAP4430_CM_L4SEC_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0188)
+#define OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET 0x01a0
#define OMAP4430_CM_L4SEC_AES1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a0)
+#define OMAP4_CM_L4SEC_AES2_CLKCTRL_OFFSET 0x01a8
#define OMAP4430_CM_L4SEC_AES2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a8)
+#define OMAP4_CM_L4SEC_DES3DES_CLKCTRL_OFFSET 0x01b0
#define OMAP4430_CM_L4SEC_DES3DES_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b0)
+#define OMAP4_CM_L4SEC_PKAEIP29_CLKCTRL_OFFSET 0x01b8
#define OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b8)
+#define OMAP4_CM_L4SEC_RNG_CLKCTRL_OFFSET 0x01c0
#define OMAP4430_CM_L4SEC_RNG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c0)
+#define OMAP4_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET 0x01c8
#define OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c8)
+#define OMAP4_CM_L4SEC_CRYPTODMA_CLKCTRL_OFFSET 0x01d8
#define OMAP4430_CM_L4SEC_CRYPTODMA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01d8)
/* CM2.CEFUSE_CM2 register offsets */
+#define OMAP4_CM_CEFUSE_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_CEFUSE_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0000)
+#define OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0020)
-
-/* CM2.RESTORE_CM2 register offsets */
-#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0000)
-#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0004)
-#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0008)
-#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x000c)
-#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0010)
-#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0014)
-#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0018)
-#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x001c)
-#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0020)
-#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0024)
-#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0028)
-#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x002c)
-#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0030)
-#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0034)
-#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0038)
-#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x003c)
-#define OMAP4430_CM_SDMA_STATICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0040)
#endif
diff --git a/arch/arm/mach-omap2/cm4xxx.c b/arch/arm/mach-omap2/cm4xxx.c
index 4af76bb1003a..b101091e95d6 100644
--- a/arch/arm/mach-omap2/cm4xxx.c
+++ b/arch/arm/mach-omap2/cm4xxx.c
@@ -21,35 +21,41 @@
#include <asm/atomic.h>
-#include "cm.h"
-
-/* XXX move this to cm.h */
-/* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */
-#define MAX_MODULE_READY_TIME 20000
+#include <plat/common.h>
-/*
- * OMAP4_PRCM_CM_CLKCTRL_IDLEST_MASK: isolates the IDLEST field in the
- * CM_CLKCTRL register.
- */
-#define OMAP4_PRCM_CM_CLKCTRL_IDLEST_MASK (0x2 << 16)
-
-/*
- * OMAP4 prcm_mod u32 fields contain packed data: the CM ID in bit 16 and
- * the PRCM module offset address (from the CM module base) in bits 15-0.
- */
-#define OMAP4_PRCM_MOD_CM_ID_SHIFT 16
-#define OMAP4_PRCM_MOD_OFFS_MASK 0xffff
+#include "cm.h"
+#include "cm-regbits-44xx.h"
/**
- * omap4_cm_wait_idlest_ready - wait for a module to leave idle or standby
- * @prcm_mod: PRCM module offset (XXX example)
- * @prcm_dev_offs: PRCM device offset (e.g. MCASP XXX example)
+ * omap4_cm_wait_module_ready - wait for a module to be in 'func' state
+ * @clkctrl_reg: CLKCTRL module address
+ *
+ * Wait for the module IDLEST to be functional. If the idle state is in any
+ * the non functional state (trans, idle or disabled), module and thus the
+ * sysconfig cannot be accessed and will probably lead to an "imprecise
+ * external abort"
+ *
+ * Module idle state:
+ * 0x0 func: Module is fully functional, including OCP
+ * 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep
+ * abortion
+ * 0x2 idle: Module is in Idle mode (only OCP part). It is functional if
+ * using separate functional clock
+ * 0x3 disabled: Module is disabled and cannot be accessed
*
- * XXX document
+ * TODO: Need to handle module accessible in idle state
*/
-int omap4_cm_wait_idlest_ready(u32 prcm_mod, u8 prcm_dev_offs)
+int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg)
{
- /* FIXME: Add clock manager related code */
- return 0;
+ int i = 0;
+
+ if (!clkctrl_reg)
+ return 0;
+
+ omap_test_timeout(((__raw_readl(clkctrl_reg) &
+ OMAP4430_IDLEST_MASK) == 0),
+ MAX_MODULE_READY_TIME, i);
+
+ return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
}
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index 43f8a33655d4..a8d20eef2306 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -194,11 +194,12 @@ void omap3_clear_scratchpad_contents(void)
u32 offset = 0;
v_addr = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD_ROM);
if (prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) &
- OMAP3430_GLOBAL_COLD_RST) {
+ OMAP3430_GLOBAL_COLD_RST_MASK) {
for ( ; offset <= max_offset; offset += 0x4)
__raw_writel(0x0, (v_addr + offset));
- prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST, OMAP3430_GR_MOD,
- OMAP3_PRM_RSTST_OFFSET);
+ prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST_MASK,
+ OMAP3430_GR_MOD,
+ OMAP3_PRM_RSTST_OFFSET);
}
}
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 2271b9bd1f50..03e6c9ed82a4 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -17,8 +17,10 @@
#include <linux/clk.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
+#include <asm/pmu.h>
#include <plat/control.h>
#include <plat/tc.h>
@@ -26,6 +28,7 @@
#include <plat/mux.h>
#include <mach/gpio.h>
#include <plat/mmc.h>
+#include <plat/dma.h>
#include "mux.h"
@@ -453,8 +456,41 @@ static void omap_init_mcspi(void)
static inline void omap_init_mcspi(void) {}
#endif
-#ifdef CONFIG_OMAP_SHA1_MD5
-static struct resource sha1_md5_resources[] = {
+static struct resource omap2_pmu_resource = {
+ .start = 3,
+ .end = 3,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource omap3_pmu_resource = {
+ .start = INT_34XX_BENCH_MPU_EMUL,
+ .end = INT_34XX_BENCH_MPU_EMUL,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device omap_pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = 1,
+};
+
+static void omap_init_pmu(void)
+{
+ if (cpu_is_omap24xx())
+ omap_pmu_device.resource = &omap2_pmu_resource;
+ else if (cpu_is_omap34xx())
+ omap_pmu_device.resource = &omap3_pmu_resource;
+ else
+ return;
+
+ platform_device_register(&omap_pmu_device);
+}
+
+
+#if defined(CONFIG_CRYPTO_DEV_OMAP_SHAM) || defined(CONFIG_CRYPTO_DEV_OMAP_SHAM_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP2
+static struct resource omap2_sham_resources[] = {
{
.start = OMAP24XX_SEC_SHA1MD5_BASE,
.end = OMAP24XX_SEC_SHA1MD5_BASE + 0x64,
@@ -465,20 +501,55 @@ static struct resource sha1_md5_resources[] = {
.flags = IORESOURCE_IRQ,
}
};
+static int omap2_sham_resources_sz = ARRAY_SIZE(omap2_sham_resources);
+#else
+#define omap2_sham_resources NULL
+#define omap2_sham_resources_sz 0
+#endif
-static struct platform_device sha1_md5_device = {
- .name = "OMAP SHA1/MD5",
+#ifdef CONFIG_ARCH_OMAP3
+static struct resource omap3_sham_resources[] = {
+ {
+ .start = OMAP34XX_SEC_SHA1MD5_BASE,
+ .end = OMAP34XX_SEC_SHA1MD5_BASE + 0x64,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_34XX_SHA1MD52_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = OMAP34XX_DMA_SHA1MD5_RX,
+ .flags = IORESOURCE_DMA,
+ }
+};
+static int omap3_sham_resources_sz = ARRAY_SIZE(omap3_sham_resources);
+#else
+#define omap3_sham_resources NULL
+#define omap3_sham_resources_sz 0
+#endif
+
+static struct platform_device sham_device = {
+ .name = "omap-sham",
.id = -1,
- .num_resources = ARRAY_SIZE(sha1_md5_resources),
- .resource = sha1_md5_resources,
};
-static void omap_init_sha1_md5(void)
+static void omap_init_sham(void)
{
- platform_device_register(&sha1_md5_device);
+ if (cpu_is_omap24xx()) {
+ sham_device.resource = omap2_sham_resources;
+ sham_device.num_resources = omap2_sham_resources_sz;
+ } else if (cpu_is_omap34xx()) {
+ sham_device.resource = omap3_sham_resources;
+ sham_device.num_resources = omap3_sham_resources_sz;
+ } else {
+ pr_err("%s: platform not supported\n", __func__);
+ return;
+ }
+ platform_device_register(&sham_device);
}
#else
-static inline void omap_init_sha1_md5(void) { }
+static inline void omap_init_sham(void) { }
#endif
/*-------------------------------------------------------------------------*/
@@ -591,6 +662,15 @@ static inline void omap_hsmmc_reset(void) {}
static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
int controller_nr)
{
+ if ((mmc_controller->slots[0].switch_pin > 0) && \
+ (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
+ OMAP_PIN_INPUT_PULLUP);
+ if ((mmc_controller->slots[0].gpio_wp > 0) && \
+ (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
+ OMAP_PIN_INPUT_PULLUP);
+
if (cpu_is_omap2420() && controller_nr == 0) {
omap_cfg_reg(H18_24XX_MMC_CMD);
omap_cfg_reg(H15_24XX_MMC_CLKI);
@@ -786,6 +866,33 @@ static inline void omap_hdq_init(void)
static inline void omap_hdq_init(void) {}
#endif
+/*---------------------------------------------------------------------------*/
+
+#if defined(CONFIG_VIDEO_OMAP2_VOUT) || \
+ defined(CONFIG_VIDEO_OMAP2_VOUT_MODULE)
+#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+static struct resource omap_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = {
+};
+#else
+static struct resource omap_vout_resource[2] = {
+};
+#endif
+
+static struct platform_device omap_vout_device = {
+ .name = "omap_vout",
+ .num_resources = ARRAY_SIZE(omap_vout_resource),
+ .resource = &omap_vout_resource[0],
+ .id = -1,
+};
+static void omap_init_vout(void)
+{
+ if (platform_device_register(&omap_vout_device) < 0)
+ printk(KERN_ERR "Unable to register OMAP-VOUT device\n");
+}
+#else
+static inline void omap_init_vout(void) {}
+#endif
+
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
@@ -797,9 +904,11 @@ static int __init omap2_init_devices(void)
omap_init_camera();
omap_init_mbox();
omap_init_mcspi();
+ omap_init_pmu();
omap_hdq_init();
omap_init_sti();
- omap_init_sha1_md5();
+ omap_init_sham();
+ omap_init_vout();
return 0;
}
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 9ad229594b46..1ef54b036103 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -24,6 +24,7 @@
static u16 control_pbias_offset;
static u16 control_devconf1_offset;
+static u16 control_mmc1;
#define HSMMC_NAME_LEN 9
@@ -42,7 +43,7 @@ static int hsmmc_get_context_loss(struct device *dev)
#define hsmmc_get_context_loss NULL
#endif
-static void hsmmc1_before_set_reg(struct device *dev, int slot,
+static void omap_hsmmc1_before_set_reg(struct device *dev, int slot,
int power_on, int vdd)
{
u32 reg, prog_io;
@@ -95,7 +96,7 @@ static void hsmmc1_before_set_reg(struct device *dev, int slot,
}
}
-static void hsmmc1_after_set_reg(struct device *dev, int slot,
+static void omap_hsmmc1_after_set_reg(struct device *dev, int slot,
int power_on, int vdd)
{
u32 reg;
@@ -119,6 +120,60 @@ static void hsmmc1_after_set_reg(struct device *dev, int slot,
}
}
+static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
+ int power_on, int vdd)
+{
+ u32 reg;
+
+ /*
+ * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
+ * card with Vcc regulator (from twl4030 or whatever). OMAP has both
+ * 1.8V and 3.0V modes, controlled by the PBIAS register.
+ *
+ * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which
+ * is most naturally TWL VSIM; those pins also use PBIAS.
+ *
+ * FIXME handle VMMC1A as needed ...
+ */
+ reg = omap_ctrl_readl(control_pbias_offset);
+ reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ | OMAP4_MMC1_PWRDNZ |
+ OMAP4_USBC1_ICUSB_PWRDNZ);
+ omap_ctrl_writel(reg, control_pbias_offset);
+}
+
+static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
+ int power_on, int vdd)
+{
+ u32 reg;
+
+ if (power_on) {
+ reg = omap_ctrl_readl(control_pbias_offset);
+ reg |= OMAP4_MMC1_PBIASLITE_PWRDNZ;
+ if ((1 << vdd) <= MMC_VDD_165_195)
+ reg &= ~OMAP4_MMC1_PBIASLITE_VMODE;
+ else
+ reg |= OMAP4_MMC1_PBIASLITE_VMODE;
+ reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ | OMAP4_MMC1_PWRDNZ |
+ OMAP4_USBC1_ICUSB_PWRDNZ);
+ omap_ctrl_writel(reg, control_pbias_offset);
+ /* 4 microsec delay for comparator to generate an error*/
+ udelay(4);
+ reg = omap_ctrl_readl(control_pbias_offset);
+ if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR) {
+ pr_err("Pbias Voltage is not same as LDO\n");
+ /* Caution : On VMODE_ERROR Power Down MMC IO */
+ reg &= ~(OMAP4_MMC1_PWRDNZ | OMAP4_USBC1_ICUSB_PWRDNZ);
+ omap_ctrl_writel(reg, control_pbias_offset);
+ }
+ } else {
+ reg = omap_ctrl_readl(control_pbias_offset);
+ reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ |
+ OMAP4_MMC1_PBIASLITE_VMODE | OMAP4_MMC1_PWRDNZ |
+ OMAP4_USBC1_ICUSB_PWRDNZ);
+ omap_ctrl_writel(reg, control_pbias_offset);
+ }
+}
+
static void hsmmc23_before_set_reg(struct device *dev, int slot,
int power_on, int vdd)
{
@@ -139,6 +194,12 @@ static void hsmmc23_before_set_reg(struct device *dev, int slot,
}
}
+static int nop_mmc_set_power(struct device *dev, int slot, int power_on,
+ int vdd)
+{
+ return 0;
+}
+
static struct omap_mmc_platform_data *hsmmc_data[OMAP34XX_NR_MMC] __initdata;
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
@@ -146,13 +207,28 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
struct omap2_hsmmc_info *c;
int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
int i;
+ u32 reg;
- if (cpu_is_omap2430()) {
- control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
+ if (!cpu_is_omap44xx()) {
+ if (cpu_is_omap2430()) {
+ control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
+ control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
+ } else {
+ control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
+ control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
+ }
} else {
- control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
+ control_pbias_offset = OMAP44XX_CONTROL_PBIAS_LITE;
+ control_mmc1 = OMAP44XX_CONTROL_MMC1;
+ reg = omap_ctrl_readl(control_mmc1);
+ reg |= (OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP0 |
+ OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP1);
+ reg &= ~(OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP2 |
+ OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP3);
+ reg |= (OMAP4_CONTROL_SDMMC1_DR0_SPEEDCTRL |
+ OMAP4_CONTROL_SDMMC1_DR1_SPEEDCTRL |
+ OMAP4_CONTROL_SDMMC1_DR2_SPEEDCTRL);
+ omap_ctrl_writel(reg, control_mmc1);
}
for (c = controllers; c->mmc; c++) {
@@ -216,11 +292,27 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
*/
mmc->slots[0].ocr_mask = c->ocr_mask;
+ if (cpu_is_omap3517() || cpu_is_omap3505())
+ mmc->slots[0].set_power = nop_mmc_set_power;
+ else
+ mmc->slots[0].features |= HSMMC_HAS_PBIAS;
+
switch (c->mmc) {
case 1:
- /* on-chip level shifting via PBIAS0/PBIAS1 */
- mmc->slots[0].before_set_reg = hsmmc1_before_set_reg;
- mmc->slots[0].after_set_reg = hsmmc1_after_set_reg;
+ if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+ /* on-chip level shifting via PBIAS0/PBIAS1 */
+ if (cpu_is_omap44xx()) {
+ mmc->slots[0].before_set_reg =
+ omap4_hsmmc1_before_set_reg;
+ mmc->slots[0].after_set_reg =
+ omap4_hsmmc1_after_set_reg;
+ } else {
+ mmc->slots[0].before_set_reg =
+ omap_hsmmc1_before_set_reg;
+ mmc->slots[0].after_set_reg =
+ omap_hsmmc1_after_set_reg;
+ }
+ }
/* Omap3630 HSMMC1 supports only 4-bit */
if (cpu_is_omap3630() && c->wires > 4) {
@@ -235,9 +327,11 @@ void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
c->wires = 4;
/* FALLTHROUGH */
case 3:
- /* off-chip level shifting, or none */
- mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
- mmc->slots[0].after_set_reg = NULL;
+ if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
+ /* off-chip level shifting, or none */
+ mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
+ mmc->slots[0].after_set_reg = NULL;
+ }
break;
default:
pr_err("MMC%d configuration not supported!\n", c->mmc);
diff --git a/arch/arm/mach-omap2/include/mach/am35xx.h b/arch/arm/mach-omap2/include/mach/am35xx.h
index a705f946fc46..f1e13d1ca5e7 100644
--- a/arch/arm/mach-omap2/include/mach/am35xx.h
+++ b/arch/arm/mach-omap2/include/mach/am35xx.h
@@ -23,4 +23,22 @@
#define AM35XX_IPSS_HECC_BASE 0x5C050000
#define AM35XX_IPSS_VPFE_BASE 0x5C060000
-#endif /* __ASM_ARCH_AM35XX_H */
+
+/* HECC module specifc offset definitions */
+#define AM35XX_HECC_SCC_HECC_OFFSET (0x0)
+#define AM35XX_HECC_SCC_RAM_OFFSET (0x3000)
+#define AM35XX_HECC_RAM_OFFSET (0x3000)
+#define AM35XX_HECC_MBOX_OFFSET (0x2000)
+#define AM35XX_HECC_INT_LINE (0x0)
+#define AM35XX_HECC_VERSION (0x1)
+
+#define AM35XX_EMAC_CNTRL_OFFSET (0x10000)
+#define AM35XX_EMAC_CNTRL_MOD_OFFSET (0x0)
+#define AM35XX_EMAC_CNTRL_RAM_OFFSET (0x20000)
+#define AM35XX_EMAC_MDIO_OFFSET (0x30000)
+#define AM35XX_EMAC_CNTRL_RAM_SIZE (0x2000)
+#define AM35XX_EMAC_RAM_ADDR (AM3517_EMAC_BASE + \
+ AM3517_EMAC_CNTRL_RAM_OFFSET)
+#define AM35XX_EMAC_HW_RAM_ADDR (0x01E20000)
+
+#endif /* __ASM_ARCH_AM35XX_H */
diff --git a/arch/arm/mach-omap2/include/mach/debug-macro.S b/arch/arm/mach-omap2/include/mach/debug-macro.S
index 4a63a2ea484d..35b24409a0c8 100644
--- a/arch/arm/mach-omap2/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap2/include/mach/debug-macro.S
@@ -13,6 +13,8 @@
#include <linux/serial_reg.h>
+#include <asm/memory.h>
+
#include <plat/serial.h>
#define UART_OFFSET(addr) ((addr) & 0x00ffffff)
@@ -40,13 +42,12 @@ omap_uart_lsr: .word 0
cmp \rx, #0 @ is port configured?
bne 99f @ already configured
- /* Check UART1 scratchpad register for uart to use */
+ /* Check the debug UART configuration set in uncompress.h */
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
- moveq \rx, #0x48000000 @ physical base address
- movne \rx, #0xfa000000 @ virtual base
- orr \rx, \rx, #0x0006a000 @ uart1 on omap2/3/4
- ldrb \rx, [\rx, #(UART_SCR << OMAP_PORT_SHIFT)] @ scratchpad
+ ldreq \rx, =OMAP_UART_INFO
+ ldrne \rx, =__phys_to_virt(OMAP_UART_INFO)
+ ldr \rx, [\rx, #0]
/* Select the UART to use based on the UART1 scratchpad value */
cmp \rx, #0 @ no port configured?
@@ -87,10 +88,10 @@ omap_uart_lsr: .word 0
b 98f
44: mov \rx, #UART_OFFSET(OMAP4_UART4_BASE)
b 98f
-95: mov \rx, #ZOOM_UART_BASE
+95: ldr \rx, =ZOOM_UART_BASE
ldr \tmp, =omap_uart_phys
str \rx, [\tmp, #0]
- mov \rx, #ZOOM_UART_VIRT
+ ldr \rx, =ZOOM_UART_VIRT
ldr \tmp, =omap_uart_virt
str \rx, [\tmp, #0]
mov \rx, #(UART_LSR << ZOOM_PORT_SHIFT)
diff --git a/arch/arm/mach-omap2/include/mach/omap4-common.h b/arch/arm/mach-omap2/include/mach/omap4-common.h
new file mode 100644
index 000000000000..423af3a6dd31
--- /dev/null
+++ b/arch/arm/mach-omap2/include/mach/omap4-common.h
@@ -0,0 +1,26 @@
+/*
+ * omap4-common.h: OMAP4 specific common header file
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Author:
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_OMAP4_COMMON_H
+#define OMAP_ARCH_OMAP4_COMMON_H
+
+#ifdef CONFIG_CACHE_L2X0
+extern void __iomem *l2cache_base;
+#endif
+
+extern void __iomem *gic_cpu_base_addr;
+extern void __iomem *gic_dist_base_addr;
+
+extern void __init gic_init_irq(void);
+extern void omap_smc1(u32 fn, u32 arg);
+
+#endif
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 87f676acf61d..3cfb425ea67e 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -166,6 +166,15 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
.length = L4_EMU_34XX_SIZE,
.type = MT_DEVICE
},
+#if defined(CONFIG_DEBUG_LL) && \
+ (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
+ {
+ .virtual = ZOOM_UART_VIRT,
+ .pfn = __phys_to_pfn(ZOOM_UART_BASE),
+ .length = SZ_1M,
+ .type = MT_DEVICE
+ },
+#endif
};
#endif
#ifdef CONFIG_ARCH_OMAP4
diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c
index 4f63dc6859a4..e82da680d908 100644
--- a/arch/arm/mach-omap2/iommu2.c
+++ b/arch/arm/mach-omap2/iommu2.c
@@ -147,6 +147,7 @@ static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra)
printk("\n");
iommu_write_reg(obj, stat, MMU_IRQSTATUS);
+ omap2_iommu_disable(obj);
return stat;
}
@@ -184,7 +185,7 @@ static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e)
if (!cr)
return ERR_PTR(-ENOMEM);
- cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz;
+ cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
@@ -212,7 +213,8 @@ static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf)
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
- p += sprintf(p, "%08x %08x\n", cr->cam, cr->ram);
+ p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 2f3cad6f9402..c29337074ad3 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -187,32 +187,28 @@ static struct omap_mcbsp_platform_data omap44xx_mcbsp_pdata[] = {
.phys_base = OMAP44XX_MCBSP1_BASE,
.dma_rx_sync = OMAP44XX_DMA_MCBSP1_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP1_TX,
- .rx_irq = INT_24XX_MCBSP1_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP1_IRQ_TX,
+ .tx_irq = OMAP44XX_IRQ_MCBSP1,
.ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP44XX_MCBSP2_BASE,
.dma_rx_sync = OMAP44XX_DMA_MCBSP2_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP2_TX,
- .rx_irq = INT_24XX_MCBSP2_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP2_IRQ_TX,
+ .tx_irq = OMAP44XX_IRQ_MCBSP2,
.ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP44XX_MCBSP3_BASE,
.dma_rx_sync = OMAP44XX_DMA_MCBSP3_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP3_TX,
- .rx_irq = INT_24XX_MCBSP3_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP3_IRQ_TX,
+ .tx_irq = OMAP44XX_IRQ_MCBSP3,
.ops = &omap2_mcbsp_ops,
},
{
.phys_base = OMAP44XX_MCBSP4_BASE,
.dma_rx_sync = OMAP44XX_DMA_MCBSP4_RX,
.dma_tx_sync = OMAP44XX_DMA_MCBSP4_TX,
- .rx_irq = INT_24XX_MCBSP4_IRQ_RX,
- .tx_irq = INT_24XX_MCBSP4_IRQ_TX,
+ .tx_irq = OMAP44XX_IRQ_MCBSP4,
.ops = &omap2_mcbsp_ops,
},
};
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 07aa7b3c95f7..2ff4dce95ee8 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -1901,26 +1901,15 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(GPMC_A8, "m3", "ab18"),
_OMAP3_BALLENTRY(GPMC_A9, "l3", "ac19"),
_OMAP3_BALLENTRY(GPMC_CLK, "t4", "w2"),
- _OMAP3_BALLENTRY(GPMC_D0, "k1", "m2"),
- _OMAP3_BALLENTRY(GPMC_D1, "l1", "m1"),
_OMAP3_BALLENTRY(GPMC_D10, "p1", "ab4"),
_OMAP3_BALLENTRY(GPMC_D11, "r1", "ac4"),
_OMAP3_BALLENTRY(GPMC_D12, "r2", "ab6"),
_OMAP3_BALLENTRY(GPMC_D13, "t2", "ac6"),
_OMAP3_BALLENTRY(GPMC_D14, "w1", "ab7"),
_OMAP3_BALLENTRY(GPMC_D15, "y1", "ac7"),
- _OMAP3_BALLENTRY(GPMC_D2, "l2", "n2"),
- _OMAP3_BALLENTRY(GPMC_D3, "p2", "n1"),
- _OMAP3_BALLENTRY(GPMC_D4, "t1", "r2"),
- _OMAP3_BALLENTRY(GPMC_D5, "v1", "r1"),
- _OMAP3_BALLENTRY(GPMC_D6, "v2", "t2"),
- _OMAP3_BALLENTRY(GPMC_D7, "w2", "t1"),
- _OMAP3_BALLENTRY(GPMC_D8, "h2", "ab3"),
_OMAP3_BALLENTRY(GPMC_D9, "k2", "ac3"),
- _OMAP3_BALLENTRY(GPMC_NADV_ALE, "f3", "w1"),
_OMAP3_BALLENTRY(GPMC_NBE0_CLE, "g3", "ac12"),
_OMAP3_BALLENTRY(GPMC_NBE1, "u3", NULL),
- _OMAP3_BALLENTRY(GPMC_NCS0, "g4", "y2"),
_OMAP3_BALLENTRY(GPMC_NCS1, "h3", "y1"),
_OMAP3_BALLENTRY(GPMC_NCS2, "v8", NULL),
_OMAP3_BALLENTRY(GPMC_NCS3, "u8", NULL),
@@ -1928,10 +1917,7 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(GPMC_NCS5, "r8", NULL),
_OMAP3_BALLENTRY(GPMC_NCS6, "p8", NULL),
_OMAP3_BALLENTRY(GPMC_NCS7, "n8", NULL),
- _OMAP3_BALLENTRY(GPMC_NOE, "g2", "v2"),
- _OMAP3_BALLENTRY(GPMC_NWE, "f4", "v1"),
_OMAP3_BALLENTRY(GPMC_NWP, "h1", "ab10"),
- _OMAP3_BALLENTRY(GPMC_WAIT0, "m8", "ab12"),
_OMAP3_BALLENTRY(GPMC_WAIT1, "l8", "ac10"),
_OMAP3_BALLENTRY(GPMC_WAIT2, "k8", NULL),
_OMAP3_BALLENTRY(GPMC_WAIT3, "j8", NULL),
@@ -1948,8 +1934,6 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(HSUSB0_DIR, "r28", NULL),
_OMAP3_BALLENTRY(HSUSB0_NXT, "t26", NULL),
_OMAP3_BALLENTRY(HSUSB0_STP, "t25", NULL),
- _OMAP3_BALLENTRY(I2C1_SCL, "k21", NULL),
- _OMAP3_BALLENTRY(I2C1_SDA, "j21", NULL),
_OMAP3_BALLENTRY(I2C2_SCL, "af15", NULL),
_OMAP3_BALLENTRY(I2C2_SDA, "ae15", NULL),
_OMAP3_BALLENTRY(I2C3_SCL, "af14", NULL),
@@ -1958,11 +1942,6 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(I2C4_SDA, "ae26", NULL),
_OMAP3_BALLENTRY(JTAG_EMU0, "aa11", NULL),
_OMAP3_BALLENTRY(JTAG_EMU1, "aa10", NULL),
- _OMAP3_BALLENTRY(JTAG_RTCK, "aa12", NULL),
- _OMAP3_BALLENTRY(JTAG_TCK, "aa13", NULL),
- _OMAP3_BALLENTRY(JTAG_TDI, "aa20", NULL),
- _OMAP3_BALLENTRY(JTAG_TDO, "aa19", NULL),
- _OMAP3_BALLENTRY(JTAG_TMS_TMSC, "aa18", NULL),
_OMAP3_BALLENTRY(MCBSP1_CLKR, "y21", NULL),
_OMAP3_BALLENTRY(MCBSP1_CLKX, "w21", NULL),
_OMAP3_BALLENTRY(MCBSP1_DR, "u21", NULL),
@@ -2010,77 +1989,12 @@ struct omap_ball __initdata omap36xx_cbp_ball[] = {
_OMAP3_BALLENTRY(SDMMC2_DAT5, "ah3", NULL),
_OMAP3_BALLENTRY(SDMMC2_DAT6, "af3", NULL),
_OMAP3_BALLENTRY(SDMMC2_DAT7, "ae3", NULL),
- _OMAP3_BALLENTRY(SDRC_A0, NULL, "n22"),
- _OMAP3_BALLENTRY(SDRC_A1, NULL, "n23"),
- _OMAP3_BALLENTRY(SDRC_A10, NULL, "v22"),
- _OMAP3_BALLENTRY(SDRC_A11, NULL, "v23"),
- _OMAP3_BALLENTRY(SDRC_A12, NULL, "w22"),
- _OMAP3_BALLENTRY(SDRC_A13, NULL, "w23"),
- _OMAP3_BALLENTRY(SDRC_A14, NULL, "y22"),
- _OMAP3_BALLENTRY(SDRC_A2, NULL, "p22"),
- _OMAP3_BALLENTRY(SDRC_A3, NULL, "p23"),
- _OMAP3_BALLENTRY(SDRC_A4, NULL, "r22"),
- _OMAP3_BALLENTRY(SDRC_A5, NULL, "r23"),
- _OMAP3_BALLENTRY(SDRC_A6, NULL, "t22"),
- _OMAP3_BALLENTRY(SDRC_A7, NULL, "t23"),
- _OMAP3_BALLENTRY(SDRC_A8, NULL, "u22"),
- _OMAP3_BALLENTRY(SDRC_A9, NULL, "u23"),
- _OMAP3_BALLENTRY(SDRC_BA0, "h9", "ab21"),
- _OMAP3_BALLENTRY(SDRC_BA1, "h10", "ac21"),
_OMAP3_BALLENTRY(SDRC_CKE0, "h16", "j22"),
_OMAP3_BALLENTRY(SDRC_CKE1, "h17", "j23"),
- _OMAP3_BALLENTRY(SDRC_CLK, "a13", "a11"),
- _OMAP3_BALLENTRY(SDRC_D0, NULL, "j2"),
- _OMAP3_BALLENTRY(SDRC_D1, NULL, "j1"),
- _OMAP3_BALLENTRY(SDRC_D10, "c15", "b14"),
- _OMAP3_BALLENTRY(SDRC_D11, "b16", "a14"),
- _OMAP3_BALLENTRY(SDRC_D12, "d17", "b16"),
- _OMAP3_BALLENTRY(SDRC_D13, "c17", "a16"),
- _OMAP3_BALLENTRY(SDRC_D14, "b17", "b19"),
- _OMAP3_BALLENTRY(SDRC_D15, "d18", "a19"),
- _OMAP3_BALLENTRY(SDRC_D16, NULL, "b3"),
- _OMAP3_BALLENTRY(SDRC_D17, NULL, "a3"),
- _OMAP3_BALLENTRY(SDRC_D18, NULL, "b5"),
- _OMAP3_BALLENTRY(SDRC_D19, NULL, "a5"),
- _OMAP3_BALLENTRY(SDRC_D2, NULL, "g2"),
- _OMAP3_BALLENTRY(SDRC_D20, NULL, "b8"),
- _OMAP3_BALLENTRY(SDRC_D21, NULL, "a8"),
- _OMAP3_BALLENTRY(SDRC_D22, NULL, "b9"),
- _OMAP3_BALLENTRY(SDRC_D23, NULL, "a9"),
- _OMAP3_BALLENTRY(SDRC_D24, NULL, "b21"),
- _OMAP3_BALLENTRY(SDRC_D25, NULL, "a21"),
- _OMAP3_BALLENTRY(SDRC_D26, NULL, "d22"),
- _OMAP3_BALLENTRY(SDRC_D27, NULL, "d23"),
- _OMAP3_BALLENTRY(SDRC_D28, NULL, "e22"),
- _OMAP3_BALLENTRY(SDRC_D29, NULL, "e23"),
- _OMAP3_BALLENTRY(SDRC_D3, NULL, "g1"),
- _OMAP3_BALLENTRY(SDRC_D30, NULL, "g22"),
- _OMAP3_BALLENTRY(SDRC_D31, NULL, "g23"),
- _OMAP3_BALLENTRY(SDRC_D4, NULL, "f2"),
- _OMAP3_BALLENTRY(SDRC_D5, NULL, "f1"),
- _OMAP3_BALLENTRY(SDRC_D6, NULL, "d2"),
- _OMAP3_BALLENTRY(SDRC_D7, NULL, "d1"),
- _OMAP3_BALLENTRY(SDRC_D8, "c14", "b13"),
- _OMAP3_BALLENTRY(SDRC_D9, "b14", "a13"),
- _OMAP3_BALLENTRY(SDRC_DM0, NULL, "c1"),
- _OMAP3_BALLENTRY(SDRC_DM1, "a16", "a17"),
- _OMAP3_BALLENTRY(SDRC_DM2, NULL, "a6"),
- _OMAP3_BALLENTRY(SDRC_DM3, NULL, "a20"),
- _OMAP3_BALLENTRY(SDRC_DQS0, NULL, "c2"),
- _OMAP3_BALLENTRY(SDRC_DQS1, "a17", "b17"),
- _OMAP3_BALLENTRY(SDRC_DQS2, NULL, "b6"),
- _OMAP3_BALLENTRY(SDRC_DQS3, NULL, "b20"),
- _OMAP3_BALLENTRY(SDRC_NCAS, "h13", "l22"),
- _OMAP3_BALLENTRY(SDRC_NCLK, "a14", "b11"),
- _OMAP3_BALLENTRY(SDRC_NCS0, "h11", "m22"),
- _OMAP3_BALLENTRY(SDRC_NCS1, "h12", "m23"),
- _OMAP3_BALLENTRY(SDRC_NRAS, "h14", "l23"),
- _OMAP3_BALLENTRY(SDRC_NWE, "h15", "k23"),
_OMAP3_BALLENTRY(SIM_CLK, "p26", NULL),
_OMAP3_BALLENTRY(SIM_IO, "p27", NULL),
_OMAP3_BALLENTRY(SIM_PWRCTRL, "r27", NULL),
_OMAP3_BALLENTRY(SIM_RST, "r25", NULL),
- _OMAP3_BALLENTRY(SYS_32K, "ae25", NULL),
_OMAP3_BALLENTRY(SYS_BOOT0, "ah26", NULL),
_OMAP3_BALLENTRY(SYS_BOOT1, "ag26", NULL),
_OMAP3_BALLENTRY(SYS_BOOT2, "ae14", NULL),
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
new file mode 100644
index 000000000000..eb9bee73e0cb
--- /dev/null
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -0,0 +1,157 @@
+/*
+ * omap iommu: omap device registration
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+
+#include <plat/iommu.h>
+#include <plat/irqs.h>
+
+struct iommu_device {
+ resource_size_t base;
+ int irq;
+ struct iommu_platform_data pdata;
+ struct resource res[2];
+};
+static struct iommu_device *devices;
+static int num_iommu_devices;
+
+#ifdef CONFIG_ARCH_OMAP3
+static struct iommu_device omap3_devices[] = {
+ {
+ .base = 0x480bd400,
+ .irq = 24,
+ .pdata = {
+ .name = "isp",
+ .nr_tlb_entries = 8,
+ .clk_name = "cam_ick",
+ },
+ },
+#if defined(CONFIG_MPU_BRIDGE_IOMMU)
+ {
+ .base = 0x5d000000,
+ .irq = 28,
+ .pdata = {
+ .name = "iva2",
+ .nr_tlb_entries = 32,
+ .clk_name = "iva2_ck",
+ },
+ },
+#endif
+};
+#define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices)
+static struct platform_device *omap3_iommu_pdev[NR_OMAP3_IOMMU_DEVICES];
+#else
+#define omap3_devices NULL
+#define NR_OMAP3_IOMMU_DEVICES 0
+#define omap3_iommu_pdev NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+static struct iommu_device omap4_devices[] = {
+ {
+ .base = OMAP4_MMU1_BASE,
+ .irq = INT_44XX_DUCATI_MMU_IRQ,
+ .pdata = {
+ .name = "ducati",
+ .nr_tlb_entries = 32,
+ .clk_name = "ducati_ick",
+ },
+ },
+#if defined(CONFIG_MPU_TESLA_IOMMU)
+ {
+ .base = OMAP4_MMU2_BASE,
+ .irq = INT_44XX_DSP_MMU,
+ .pdata = {
+ .name = "tesla",
+ .nr_tlb_entries = 32,
+ .clk_name = "tesla_ick",
+ },
+ },
+#endif
+};
+#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices)
+static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES];
+#else
+#define omap4_devices NULL
+#define NR_OMAP4_IOMMU_DEVICES 0
+#define omap4_iommu_pdev NULL
+#endif
+
+static struct platform_device **omap_iommu_pdev;
+
+static int __init omap_iommu_init(void)
+{
+ int i, err;
+ struct resource res[] = {
+ { .flags = IORESOURCE_MEM },
+ { .flags = IORESOURCE_IRQ },
+ };
+
+ if (cpu_is_omap34xx()) {
+ devices = omap3_devices;
+ omap_iommu_pdev = omap3_iommu_pdev;
+ num_iommu_devices = NR_OMAP3_IOMMU_DEVICES;
+ } else if (cpu_is_omap44xx()) {
+ devices = omap4_devices;
+ omap_iommu_pdev = omap4_iommu_pdev;
+ num_iommu_devices = NR_OMAP4_IOMMU_DEVICES;
+ } else
+ return -ENODEV;
+
+ for (i = 0; i < num_iommu_devices; i++) {
+ struct platform_device *pdev;
+ const struct iommu_device *d = &devices[i];
+
+ pdev = platform_device_alloc("omap-iommu", i);
+ if (!pdev) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ res[0].start = d->base;
+ res[0].end = d->base + MMU_REG_SIZE - 1;
+ res[1].start = res[1].end = d->irq;
+
+ err = platform_device_add_resources(pdev, res,
+ ARRAY_SIZE(res));
+ if (err)
+ goto err_out;
+ err = platform_device_add_data(pdev, &d->pdata,
+ sizeof(d->pdata));
+ if (err)
+ goto err_out;
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_out;
+ omap_iommu_pdev[i] = pdev;
+ }
+ return 0;
+
+err_out:
+ while (i--)
+ platform_device_put(omap_iommu_pdev[i]);
+ return err;
+}
+module_init(omap_iommu_init);
+
+static void __exit omap_iommu_exit(void)
+{
+ int i;
+
+ for (i = 0; i < num_iommu_devices; i++)
+ platform_device_unregister(omap_iommu_pdev[i]);
+}
+module_exit(omap_iommu_exit);
+
+MODULE_AUTHOR("Hiroshi DOYU");
+MODULE_DESCRIPTION("omap iommu: omap device registration");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 38153e5fbca0..1cf52313759e 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -24,7 +24,7 @@
#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <mach/hardware.h>
-#include <plat/common.h>
+#include <mach/omap4-common.h>
/* SCU base address */
static void __iomem *scu_base;
diff --git a/arch/arm/mach-omap2/omap3-iommu.c b/arch/arm/mach-omap2/omap3-iommu.c
deleted file mode 100644
index fbbcb5c83367..000000000000
--- a/arch/arm/mach-omap2/omap3-iommu.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * omap iommu: omap3 device registration
- *
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-
-#include <plat/iommu.h>
-
-struct iommu_device {
- resource_size_t base;
- int irq;
- struct iommu_platform_data pdata;
- struct resource res[2];
-};
-
-static struct iommu_device devices[] = {
- {
- .base = 0x480bd400,
- .irq = 24,
- .pdata = {
- .name = "isp",
- .nr_tlb_entries = 8,
- .clk_name = "cam_ick",
- },
- },
-#if defined(CONFIG_MPU_BRIDGE_IOMMU)
- {
- .base = 0x5d000000,
- .irq = 28,
- .pdata = {
- .name = "iva2",
- .nr_tlb_entries = 32,
- .clk_name = "iva2_ck",
- },
- },
-#endif
-};
-#define NR_IOMMU_DEVICES ARRAY_SIZE(devices)
-
-static struct platform_device *omap3_iommu_pdev[NR_IOMMU_DEVICES];
-
-static int __init omap3_iommu_init(void)
-{
- int i, err;
- struct resource res[] = {
- { .flags = IORESOURCE_MEM },
- { .flags = IORESOURCE_IRQ },
- };
-
- for (i = 0; i < NR_IOMMU_DEVICES; i++) {
- struct platform_device *pdev;
- const struct iommu_device *d = &devices[i];
-
- pdev = platform_device_alloc("omap-iommu", i);
- if (!pdev) {
- err = -ENOMEM;
- goto err_out;
- }
-
- res[0].start = d->base;
- res[0].end = d->base + MMU_REG_SIZE - 1;
- res[1].start = res[1].end = d->irq;
-
- err = platform_device_add_resources(pdev, res,
- ARRAY_SIZE(res));
- if (err)
- goto err_out;
- err = platform_device_add_data(pdev, &d->pdata,
- sizeof(d->pdata));
- if (err)
- goto err_out;
- err = platform_device_add(pdev);
- if (err)
- goto err_out;
- omap3_iommu_pdev[i] = pdev;
- }
- return 0;
-
-err_out:
- while (i--)
- platform_device_put(omap3_iommu_pdev[i]);
- return err;
-}
-module_init(omap3_iommu_init);
-
-static void __exit omap3_iommu_exit(void)
-{
- int i;
-
- for (i = 0; i < NR_IOMMU_DEVICES; i++)
- platform_device_unregister(omap3_iommu_pdev[i]);
-}
-module_exit(omap3_iommu_exit);
-
-MODULE_AUTHOR("Hiroshi DOYU");
-MODULE_DESCRIPTION("omap iommu: omap3 device registration");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
new file mode 100644
index 000000000000..13dc9794dcc2
--- /dev/null
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -0,0 +1,72 @@
+/*
+ * OMAP4 specific common source file.
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Author:
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <mach/hardware.h>
+#include <mach/omap4-common.h>
+
+#ifdef CONFIG_CACHE_L2X0
+void __iomem *l2cache_base;
+#endif
+
+void __iomem *gic_cpu_base_addr;
+void __iomem *gic_dist_base_addr;
+
+
+void __init gic_init_irq(void)
+{
+ /* Static mapping, never released */
+ gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
+ BUG_ON(!gic_dist_base_addr);
+ gic_dist_init(0, gic_dist_base_addr, 29);
+
+ /* Static mapping, never released */
+ gic_cpu_base_addr = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
+ BUG_ON(!gic_cpu_base_addr);
+ gic_cpu_init(0, gic_cpu_base_addr);
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static int __init omap_l2_cache_init(void)
+{
+ /*
+ * To avoid code running on other OMAPs in
+ * multi-omap builds
+ */
+ if (!cpu_is_omap44xx())
+ return -ENODEV;
+
+ /* Static mapping, never released */
+ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
+ BUG_ON(!l2cache_base);
+
+ /* Enable PL310 L2 Cache controller */
+ omap_smc1(0x102, 0x1);
+
+ /*
+ * 32KB way size, 16-way associativity,
+ * parity disabled
+ */
+ l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff);
+
+ return 0;
+}
+early_initcall(omap_l2_cache_init);
+#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e436dcb19795..95c9a5f774e1 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2,12 +2,12 @@
* omap_hwmod implementation for OMAP2/3/4
*
* Copyright (C) 2009 Nokia Corporation
- * Paul Walmsley
- * With fixes and testing from Kevin Hilman
*
- * Created in collaboration with (alphabetical order): Benoit Cousson,
- * Kevin Hilman, Tony Lindgren, Rajendra Nayak, Vikram Pandita, Sakari
- * Poussa, Anand Sawant, Santosh Shilimkar, Richard Woodruff
+ * Paul Walmsley, Benoît Cousson, Kevin Hilman
+ *
+ * Created in collaboration with (alphabetical order): Thara Gopinath,
+ * Tony Lindgren, Rajendra Nayak, Vikram Pandita, Sakari Poussa, Anand
+ * Sawant, Santosh Shilimkar, Richard Woodruff
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -43,7 +43,6 @@
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/bootmem.h>
#include <plat/common.h>
#include <plat/cpu.h>
@@ -58,7 +57,7 @@
#define MAX_MODULE_RESET_WAIT 10000
/* Name of the OMAP hwmod for the MPU */
-#define MPU_INITIATOR_NAME "mpu_hwmod"
+#define MPU_INITIATOR_NAME "mpu"
/* omap_hwmod_list contains all registered struct omap_hwmods */
static LIST_HEAD(omap_hwmod_list);
@@ -404,21 +403,20 @@ static int _del_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh)
*/
static int _init_main_clk(struct omap_hwmod *oh)
{
- struct clk *c;
int ret = 0;
if (!oh->main_clk)
return 0;
- c = omap_clk_get_by_name(oh->main_clk);
- WARN(IS_ERR(c), "omap_hwmod: %s: cannot clk_get main_clk %s\n",
- oh->name, oh->main_clk);
- if (IS_ERR(c))
- ret = -EINVAL;
- oh->_clk = c;
+ oh->_clk = omap_clk_get_by_name(oh->main_clk);
+ if (!oh->_clk)
+ pr_warning("omap_hwmod: %s: cannot clk_get main_clk %s\n",
+ oh->name, oh->main_clk);
+ return -EINVAL;
- WARN(!c->clkdm, "omap_hwmod: %s: missing clockdomain for %s.\n",
- oh->main_clk, c->name);
+ if (!oh->_clk->clkdm)
+ pr_warning("omap_hwmod: %s: missing clockdomain for %s.\n",
+ oh->main_clk, oh->_clk->name);
return ret;
}
@@ -432,7 +430,6 @@ static int _init_main_clk(struct omap_hwmod *oh)
*/
static int _init_interface_clks(struct omap_hwmod *oh)
{
- struct omap_hwmod_ocp_if *os;
struct clk *c;
int i;
int ret = 0;
@@ -440,14 +437,16 @@ static int _init_interface_clks(struct omap_hwmod *oh)
if (oh->slaves_cnt == 0)
return 0;
- for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+ for (i = 0; i < oh->slaves_cnt; i++) {
+ struct omap_hwmod_ocp_if *os = oh->slaves[i];
+
if (!os->clk)
continue;
c = omap_clk_get_by_name(os->clk);
- WARN(IS_ERR(c), "omap_hwmod: %s: cannot clk_get "
- "interface_clk %s\n", oh->name, os->clk);
- if (IS_ERR(c))
+ if (!c)
+ pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n",
+ oh->name, os->clk);
ret = -EINVAL;
os->_clk = c;
}
@@ -471,9 +470,9 @@ static int _init_opt_clks(struct omap_hwmod *oh)
for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) {
c = omap_clk_get_by_name(oc->clk);
- WARN(IS_ERR(c), "omap_hwmod: %s: cannot clk_get opt_clk "
- "%s\n", oh->name, oc->clk);
- if (IS_ERR(c))
+ if (!c)
+ pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n",
+ oh->name, oc->clk);
ret = -EINVAL;
oc->_clk = c;
}
@@ -490,19 +489,19 @@ static int _init_opt_clks(struct omap_hwmod *oh)
*/
static int _enable_clocks(struct omap_hwmod *oh)
{
- struct omap_hwmod_ocp_if *os;
int i;
pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name);
- if (oh->_clk && !IS_ERR(oh->_clk))
+ if (oh->_clk)
clk_enable(oh->_clk);
if (oh->slaves_cnt > 0) {
- for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+ for (i = 0; i < oh->slaves_cnt; i++) {
+ struct omap_hwmod_ocp_if *os = oh->slaves[i];
struct clk *c = os->_clk;
- if (c && !IS_ERR(c) && (os->flags & OCPIF_SWSUP_IDLE))
+ if (c && (os->flags & OCPIF_SWSUP_IDLE))
clk_enable(c);
}
}
@@ -520,19 +519,19 @@ static int _enable_clocks(struct omap_hwmod *oh)
*/
static int _disable_clocks(struct omap_hwmod *oh)
{
- struct omap_hwmod_ocp_if *os;
int i;
pr_debug("omap_hwmod: %s: disabling clocks\n", oh->name);
- if (oh->_clk && !IS_ERR(oh->_clk))
+ if (oh->_clk)
clk_disable(oh->_clk);
if (oh->slaves_cnt > 0) {
- for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+ for (i = 0; i < oh->slaves_cnt; i++) {
+ struct omap_hwmod_ocp_if *os = oh->slaves[i];
struct clk *c = os->_clk;
- if (c && !IS_ERR(c) && (os->flags & OCPIF_SWSUP_IDLE))
+ if (c && (os->flags & OCPIF_SWSUP_IDLE))
clk_disable(c);
}
}
@@ -551,14 +550,15 @@ static int _disable_clocks(struct omap_hwmod *oh)
*/
static int _find_mpu_port_index(struct omap_hwmod *oh)
{
- struct omap_hwmod_ocp_if *os;
int i;
int found = 0;
if (!oh || oh->slaves_cnt == 0)
return -EINVAL;
- for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+ for (i = 0; i < oh->slaves_cnt; i++) {
+ struct omap_hwmod_ocp_if *os = oh->slaves[i];
+
if (os->user & OCP_USER_MPU) {
found = 1;
break;
@@ -593,7 +593,7 @@ static void __iomem *_find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
if (!oh || oh->slaves_cnt == 0)
return NULL;
- os = *oh->slaves + index;
+ os = oh->slaves[index];
for (i = 0, mem = os->addr; i < os->addr_cnt; i++, mem++) {
if (mem->flags & ADDR_TYPE_RT) {
@@ -781,9 +781,10 @@ static int _init_clocks(struct omap_hwmod *oh)
ret |= _init_interface_clks(oh);
ret |= _init_opt_clks(oh);
- oh->_state = _HWMOD_STATE_CLKS_INITED;
+ if (!ret)
+ oh->_state = _HWMOD_STATE_CLKS_INITED;
- return ret;
+ return 0;
}
/**
@@ -806,9 +807,9 @@ static int _wait_target_ready(struct omap_hwmod *oh)
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
return 0;
- os = *oh->slaves + oh->_mpu_port_index;
+ os = oh->slaves[oh->_mpu_port_index];
- if (!(os->flags & OCPIF_HAS_IDLEST))
+ if (oh->flags & HWMOD_NO_IDLEST)
return 0;
/* XXX check module SIDLEMODE */
@@ -819,11 +820,8 @@ static int _wait_target_ready(struct omap_hwmod *oh)
ret = omap2_cm_wait_module_ready(oh->prcm.omap2.module_offs,
oh->prcm.omap2.idlest_reg_id,
oh->prcm.omap2.idlest_idle_bit);
-#if 0
} else if (cpu_is_omap44xx()) {
- ret = omap4_cm_wait_module_ready(oh->prcm.omap4.module_offs,
- oh->prcm.omap4.device_offs);
-#endif
+ ret = omap4_cm_wait_module_ready(oh->prcm.omap4.clkctrl_reg);
} else {
BUG();
};
@@ -912,16 +910,21 @@ static int _enable(struct omap_hwmod *oh)
_add_initiator_dep(oh, mpu_oh);
_enable_clocks(oh);
- if (oh->class->sysc) {
- if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
- _update_sysc_cache(oh);
- _sysc_enable(oh);
- }
-
r = _wait_target_ready(oh);
- if (!r)
+ if (!r) {
oh->_state = _HWMOD_STATE_ENABLED;
+ /* Access the sysconfig only if the target is ready */
+ if (oh->class->sysc) {
+ if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
+ _update_sysc_cache(oh);
+ _sysc_enable(oh);
+ }
+ } else {
+ pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
+ oh->name, r);
+ }
+
return r;
}
@@ -998,18 +1001,18 @@ static int _shutdown(struct omap_hwmod *oh)
*/
static int _setup(struct omap_hwmod *oh)
{
- struct omap_hwmod_ocp_if *os;
- int i;
+ int i, r;
if (!oh)
return -EINVAL;
/* Set iclk autoidle mode */
if (oh->slaves_cnt > 0) {
- for (i = 0, os = *oh->slaves; i < oh->slaves_cnt; i++, os++) {
+ for (i = 0; i < oh->slaves_cnt; i++) {
+ struct omap_hwmod_ocp_if *os = oh->slaves[i];
struct clk *c = os->_clk;
- if (!c || IS_ERR(c))
+ if (!c)
continue;
if (os->flags & OCPIF_SWSUP_IDLE) {
@@ -1023,7 +1026,12 @@ static int _setup(struct omap_hwmod *oh)
oh->_state = _HWMOD_STATE_INITIALIZED;
- _enable(oh);
+ r = _enable(oh);
+ if (r) {
+ pr_warning("omap_hwmod: %s: cannot be enabled (%d)\n",
+ oh->name, oh->_state);
+ return 0;
+ }
if (!(oh->flags & HWMOD_INIT_NO_RESET)) {
/*
@@ -1431,7 +1439,7 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh)
ret = oh->mpu_irqs_cnt + oh->sdma_chs_cnt;
for (i = 0; i < oh->slaves_cnt; i++)
- ret += (*oh->slaves + i)->addr_cnt;
+ ret += oh->slaves[i]->addr_cnt;
return ret;
}
@@ -1472,7 +1480,7 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
for (i = 0; i < oh->slaves_cnt; i++) {
struct omap_hwmod_ocp_if *os;
- os = *oh->slaves + i;
+ os = oh->slaves[i];
for (j = 0; j < os->addr_cnt; j++) {
(res + r)->start = (os->addr + j)->pa_start;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index eb7ee2453b24..e5530c51f77d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -125,7 +125,7 @@ static struct omap_hwmod_ocp_if *omap2420_mpu_masters[] = {
/* MPU */
static struct omap_hwmod omap2420_mpu_hwmod = {
- .name = "mpu_hwmod",
+ .name = "mpu",
.class = &mpu_hwmod_class,
.main_clk = "mpu_ck",
.masters = omap2420_mpu_masters,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 241bd8230729..0852d954da40 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -127,7 +127,7 @@ static struct omap_hwmod_ocp_if *omap2430_mpu_masters[] = {
/* MPU */
static struct omap_hwmod omap2430_mpu_hwmod = {
- .name = "mpu_hwmod",
+ .name = "mpu",
.class = &mpu_hwmod_class,
.main_clk = "mpu_ck",
.masters = omap2430_mpu_masters,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index ed6084004260..39b0c0eaa37d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -156,7 +156,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_mpu_masters[] = {
/* MPU */
static struct omap_hwmod omap3xxx_mpu_hwmod = {
- .name = "mpu_hwmod",
+ .name = "mpu",
.class = &mpu_hwmod_class,
.main_clk = "arm_fck",
.masters = omap3xxx_mpu_masters,
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 6cac9817c243..723b44e252fd 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -548,6 +548,9 @@ static int option_set(void *data, u64 val)
{
u32 *option = data;
+ if (option == &wakeup_timer_milliseconds && val >= 1000)
+ return -EINVAL;
+
*option = val;
if (option == &enable_off_mode)
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index bd6466a2b039..3de6ece23fc8 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -43,6 +43,7 @@ extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm);
extern int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state);
extern u32 wakeup_timer_seconds;
+extern u32 wakeup_timer_milliseconds;
extern struct omap_dm_timer *gptimer_wakeup;
#ifdef CONFIG_PM_DEBUG
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 374299ea7ade..e321281ab6e1 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -70,8 +70,8 @@ static int omap2_fclks_active(void)
f2 = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
/* Ignore UART clocks. These are handled by UART core (serial.c) */
- f1 &= ~(OMAP24XX_EN_UART1 | OMAP24XX_EN_UART2);
- f2 &= ~OMAP24XX_EN_UART3;
+ f1 &= ~(OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_UART2_MASK);
+ f2 &= ~OMAP24XX_EN_UART3_MASK;
if (f1 | f2)
return 1;
@@ -107,7 +107,7 @@ static void omap2_enter_full_retention(void)
l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
- omap2_gpio_prepare_for_retention();
+ omap2_gpio_prepare_for_idle(PWRDM_POWER_RET);
if (omap2_pm_debug) {
omap2_pm_dump(0, 0, 0);
@@ -141,7 +141,7 @@ no_sleep:
tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
omap2_pm_dump(0, 1, tmp);
}
- omap2_gpio_resume_after_retention();
+ omap2_gpio_resume_after_idle();
clk_enable(osc_ck);
@@ -170,7 +170,7 @@ static int omap2_i2c_active(void)
u32 l;
l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
- return l & (OMAP2420_EN_I2C2 | OMAP2420_EN_I2C1);
+ return l & (OMAP2420_EN_I2C2_MASK | OMAP2420_EN_I2C1_MASK);
}
static int sti_console_enabled;
@@ -181,13 +181,13 @@ static int omap2_allow_mpu_retention(void)
/* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */
l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
- if (l & (OMAP2420_EN_MMC | OMAP24XX_EN_UART2 |
- OMAP24XX_EN_UART1 | OMAP24XX_EN_MCSPI2 |
- OMAP24XX_EN_MCSPI1 | OMAP24XX_EN_DSS1))
+ if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK |
+ OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK |
+ OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK))
return 0;
/* Check for UART3. */
l = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
- if (l & OMAP24XX_EN_UART3)
+ if (l & OMAP24XX_EN_UART3_MASK)
return 0;
if (sti_console_enabled)
return 0;
@@ -215,12 +215,12 @@ static void omap2_enter_mpu_retention(void)
/* Try to enter MPU retention */
prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) |
- OMAP_LOGICRETSTATE,
+ OMAP_LOGICRETSTATE_MASK,
MPU_MOD, OMAP2_PM_PWSTCTRL);
} else {
/* Block MPU retention */
- prm_write_mod_reg(OMAP_LOGICRETSTATE, MPU_MOD,
+ prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD,
OMAP2_PM_PWSTCTRL);
only_idle = 1;
}
@@ -288,7 +288,8 @@ static int omap2_pm_suspend(void)
u32 wken_wkup, mir1;
wken_wkup = prm_read_mod_reg(WKUP_MOD, PM_WKEN);
- prm_write_mod_reg(wken_wkup & ~OMAP24XX_EN_GPT1, WKUP_MOD, PM_WKEN);
+ wken_wkup &= ~OMAP24XX_EN_GPT1_MASK;
+ prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
/* Mask GPT1 */
mir1 = omap_readl(0x480fe0a4);
@@ -351,7 +352,7 @@ static void __init prcm_setup_regs(void)
struct powerdomain *pwrdm;
/* Enable autoidle */
- prm_write_mod_reg(OMAP24XX_AUTOIDLE, OCP_MOD,
+ prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
OMAP2_PRCM_SYSCONFIG_OFFSET);
/*
@@ -390,53 +391,54 @@ static void __init prcm_setup_regs(void)
clkdm_add_wkdep(mpu_clkdm, wkup_clkdm);
/* Enable clock autoidle for all domains */
- cm_write_mod_reg(OMAP24XX_AUTO_CAM |
- OMAP24XX_AUTO_MAILBOXES |
- OMAP24XX_AUTO_WDT4 |
- OMAP2420_AUTO_WDT3 |
- OMAP24XX_AUTO_MSPRO |
- OMAP2420_AUTO_MMC |
- OMAP24XX_AUTO_FAC |
- OMAP2420_AUTO_EAC |
- OMAP24XX_AUTO_HDQ |
- OMAP24XX_AUTO_UART2 |
- OMAP24XX_AUTO_UART1 |
- OMAP24XX_AUTO_I2C2 |
- OMAP24XX_AUTO_I2C1 |
- OMAP24XX_AUTO_MCSPI2 |
- OMAP24XX_AUTO_MCSPI1 |
- OMAP24XX_AUTO_MCBSP2 |
- OMAP24XX_AUTO_MCBSP1 |
- OMAP24XX_AUTO_GPT12 |
- OMAP24XX_AUTO_GPT11 |
- OMAP24XX_AUTO_GPT10 |
- OMAP24XX_AUTO_GPT9 |
- OMAP24XX_AUTO_GPT8 |
- OMAP24XX_AUTO_GPT7 |
- OMAP24XX_AUTO_GPT6 |
- OMAP24XX_AUTO_GPT5 |
- OMAP24XX_AUTO_GPT4 |
- OMAP24XX_AUTO_GPT3 |
- OMAP24XX_AUTO_GPT2 |
- OMAP2420_AUTO_VLYNQ |
- OMAP24XX_AUTO_DSS,
+ cm_write_mod_reg(OMAP24XX_AUTO_CAM_MASK |
+ OMAP24XX_AUTO_MAILBOXES_MASK |
+ OMAP24XX_AUTO_WDT4_MASK |
+ OMAP2420_AUTO_WDT3_MASK |
+ OMAP24XX_AUTO_MSPRO_MASK |
+ OMAP2420_AUTO_MMC_MASK |
+ OMAP24XX_AUTO_FAC_MASK |
+ OMAP2420_AUTO_EAC_MASK |
+ OMAP24XX_AUTO_HDQ_MASK |
+ OMAP24XX_AUTO_UART2_MASK |
+ OMAP24XX_AUTO_UART1_MASK |
+ OMAP24XX_AUTO_I2C2_MASK |
+ OMAP24XX_AUTO_I2C1_MASK |
+ OMAP24XX_AUTO_MCSPI2_MASK |
+ OMAP24XX_AUTO_MCSPI1_MASK |
+ OMAP24XX_AUTO_MCBSP2_MASK |
+ OMAP24XX_AUTO_MCBSP1_MASK |
+ OMAP24XX_AUTO_GPT12_MASK |
+ OMAP24XX_AUTO_GPT11_MASK |
+ OMAP24XX_AUTO_GPT10_MASK |
+ OMAP24XX_AUTO_GPT9_MASK |
+ OMAP24XX_AUTO_GPT8_MASK |
+ OMAP24XX_AUTO_GPT7_MASK |
+ OMAP24XX_AUTO_GPT6_MASK |
+ OMAP24XX_AUTO_GPT5_MASK |
+ OMAP24XX_AUTO_GPT4_MASK |
+ OMAP24XX_AUTO_GPT3_MASK |
+ OMAP24XX_AUTO_GPT2_MASK |
+ OMAP2420_AUTO_VLYNQ_MASK |
+ OMAP24XX_AUTO_DSS_MASK,
CORE_MOD, CM_AUTOIDLE1);
- cm_write_mod_reg(OMAP24XX_AUTO_UART3 |
- OMAP24XX_AUTO_SSI |
- OMAP24XX_AUTO_USB,
+ cm_write_mod_reg(OMAP24XX_AUTO_UART3_MASK |
+ OMAP24XX_AUTO_SSI_MASK |
+ OMAP24XX_AUTO_USB_MASK,
CORE_MOD, CM_AUTOIDLE2);
- cm_write_mod_reg(OMAP24XX_AUTO_SDRC |
- OMAP24XX_AUTO_GPMC |
- OMAP24XX_AUTO_SDMA,
+ cm_write_mod_reg(OMAP24XX_AUTO_SDRC_MASK |
+ OMAP24XX_AUTO_GPMC_MASK |
+ OMAP24XX_AUTO_SDMA_MASK,
CORE_MOD, CM_AUTOIDLE3);
- cm_write_mod_reg(OMAP24XX_AUTO_PKA |
- OMAP24XX_AUTO_AES |
- OMAP24XX_AUTO_RNG |
- OMAP24XX_AUTO_SHA |
- OMAP24XX_AUTO_DES,
+ cm_write_mod_reg(OMAP24XX_AUTO_PKA_MASK |
+ OMAP24XX_AUTO_AES_MASK |
+ OMAP24XX_AUTO_RNG_MASK |
+ OMAP24XX_AUTO_SHA_MASK |
+ OMAP24XX_AUTO_DES_MASK,
CORE_MOD, OMAP24XX_CM_AUTOIDLE4);
- cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI, OMAP24XX_DSP_MOD, CM_AUTOIDLE);
+ cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI_MASK, OMAP24XX_DSP_MOD,
+ CM_AUTOIDLE);
/* Put DPLL and both APLLs into autoidle mode */
cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) |
@@ -444,12 +446,12 @@ static void __init prcm_setup_regs(void)
(0x03 << OMAP24XX_AUTO_54M_SHIFT),
PLL_MOD, CM_AUTOIDLE);
- cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL |
- OMAP24XX_AUTO_WDT1 |
- OMAP24XX_AUTO_MPU_WDT |
- OMAP24XX_AUTO_GPIOS |
- OMAP24XX_AUTO_32KSYNC |
- OMAP24XX_AUTO_GPT1,
+ cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL_MASK |
+ OMAP24XX_AUTO_WDT1_MASK |
+ OMAP24XX_AUTO_MPU_WDT_MASK |
+ OMAP24XX_AUTO_GPIOS_MASK |
+ OMAP24XX_AUTO_32KSYNC_MASK |
+ OMAP24XX_AUTO_GPT1_MASK,
WKUP_MOD, CM_AUTOIDLE);
/* REVISIT: Configure number of 32 kHz clock cycles for sys_clk
@@ -460,15 +462,15 @@ static void __init prcm_setup_regs(void)
/* Configure automatic voltage transition */
prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
OMAP2_PRCM_VOLTSETUP_OFFSET);
- prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT |
+ prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
(0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
- OMAP24XX_MEMRETCTRL |
+ OMAP24XX_MEMRETCTRL_MASK |
(0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
(0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
/* Enable wake-up events */
- prm_write_mod_reg(OMAP24XX_EN_GPIOS | OMAP24XX_EN_GPT1,
+ prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
WKUP_MOD, PM_WKEN);
}
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index ea0000bc5358..2e967716cc3f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -58,6 +58,7 @@
u32 enable_off_mode;
u32 sleep_while_idle;
u32 wakeup_timer_seconds;
+u32 wakeup_timer_milliseconds;
struct power_state {
struct powerdomain *pwrdm;
@@ -93,19 +94,20 @@ static void omap3_enable_io_chain(void)
int timeout = 0;
if (omap_rev() >= OMAP3430_REV_ES3_1) {
- prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN, WKUP_MOD, PM_WKEN);
+ prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+ PM_WKEN);
/* Do a readback to assure write has been done */
prm_read_mod_reg(WKUP_MOD, PM_WKEN);
while (!(prm_read_mod_reg(WKUP_MOD, PM_WKST) &
- OMAP3430_ST_IO_CHAIN)) {
+ OMAP3430_ST_IO_CHAIN_MASK)) {
timeout++;
if (timeout > 1000) {
printk(KERN_ERR "Wake up daisy chain "
"activation failed.\n");
return;
}
- prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN,
+ prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
WKUP_MOD, PM_WKST);
}
}
@@ -114,7 +116,8 @@ static void omap3_enable_io_chain(void)
static void omap3_disable_io_chain(void)
{
if (omap_rev() >= OMAP3430_REV_ES3_1)
- prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN, WKUP_MOD, PM_WKEN);
+ prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+ PM_WKEN);
}
static void omap3_core_save_context(void)
@@ -267,14 +270,18 @@ static int _prcm_int_handle_wakeup(void)
*/
static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
{
- u32 irqstatus_mpu;
+ u32 irqenable_mpu, irqstatus_mpu;
int c = 0;
- do {
- irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
- OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+ irqenable_mpu = prm_read_mod_reg(OCP_MOD,
+ OMAP3_PRM_IRQENABLE_MPU_OFFSET);
+ irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
+ OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+ irqstatus_mpu &= irqenable_mpu;
- if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) {
+ do {
+ if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
+ OMAP3430_IO_ST_MASK)) {
c = _prcm_int_handle_wakeup();
/*
@@ -292,7 +299,11 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
- } while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET));
+ irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
+ OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+ irqstatus_mpu &= irqenable_mpu;
+
+ } while (irqstatus_mpu);
return IRQ_HANDLED;
}
@@ -371,12 +382,19 @@ void omap_sram_idle(void)
if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
- /* PER */
+ /* Enable IO-PAD and IO-CHAIN wakeups */
per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
+ if (per_next_state < PWRDM_POWER_ON ||
+ core_next_state < PWRDM_POWER_ON) {
+ prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
+ omap3_enable_io_chain();
+ }
+
+ /* PER */
if (per_next_state < PWRDM_POWER_ON) {
omap_uart_prepare_idle(2);
- omap2_gpio_prepare_for_retention();
+ omap2_gpio_prepare_for_idle(per_next_state);
if (per_next_state == PWRDM_POWER_OFF) {
if (core_next_state == PWRDM_POWER_ON) {
per_next_state = PWRDM_POWER_RET;
@@ -398,10 +416,8 @@ void omap_sram_idle(void)
omap3_core_save_context();
omap3_prcm_save_context();
}
- /* Enable IO-PAD and IO-CHAIN wakeups */
- prm_set_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN);
- omap3_enable_io_chain();
}
+
omap3_intc_prepare_idle();
/*
@@ -445,7 +461,7 @@ void omap_sram_idle(void)
omap_uart_resume_idle(0);
omap_uart_resume_idle(1);
if (core_next_state == PWRDM_POWER_OFF)
- prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF,
+ prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
OMAP3_PRM_VOLTCTRL_OFFSET);
}
@@ -454,9 +470,9 @@ void omap_sram_idle(void)
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
+ omap2_gpio_resume_after_idle();
if (per_prev_state == PWRDM_POWER_OFF)
omap3_per_restore_context();
- omap2_gpio_resume_after_retention();
omap_uart_resume_idle(2);
if (per_state_modified)
pwrdm_set_next_pwrst(per_pwrdm, PWRDM_POWER_OFF);
@@ -464,7 +480,7 @@ void omap_sram_idle(void)
/* Disable IO-PAD and IO-CHAIN wakeup */
if (core_next_state < PWRDM_POWER_ON) {
- prm_clear_mod_reg_bits(OMAP3430_EN_IO, WKUP_MOD, PM_WKEN);
+ prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
omap3_disable_io_chain();
}
@@ -548,20 +564,21 @@ out:
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state;
-static void omap2_pm_wakeup_on_timer(u32 seconds)
+static void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds)
{
u32 tick_rate, cycles;
- if (!seconds)
+ if (!seconds && !milliseconds)
return;
tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup));
- cycles = tick_rate * seconds;
+ cycles = tick_rate * seconds + tick_rate * milliseconds / 1000;
omap_dm_timer_stop(gptimer_wakeup);
omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles);
- pr_info("PM: Resume timer in %d secs (%d ticks at %d ticks/sec.)\n",
- seconds, cycles, tick_rate);
+ pr_info("PM: Resume timer in %u.%03u secs"
+ " (%d ticks at %d ticks/sec.)\n",
+ seconds, milliseconds, cycles, tick_rate);
}
static int omap3_pm_prepare(void)
@@ -575,8 +592,9 @@ static int omap3_pm_suspend(void)
struct power_state *pwrst;
int state, ret = 0;
- if (wakeup_timer_seconds)
- omap2_pm_wakeup_on_timer(wakeup_timer_seconds);
+ if (wakeup_timer_seconds || wakeup_timer_milliseconds)
+ omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
+ wakeup_timer_milliseconds);
/* Read current next_pwrsts */
list_for_each_entry(pwrst, &pwrst_list, node)
@@ -683,9 +701,9 @@ static void __init omap3_iva_idle(void)
return;
/* Reset IVA2 */
- prm_write_mod_reg(OMAP3430_RST1_IVA2 |
- OMAP3430_RST2_IVA2 |
- OMAP3430_RST3_IVA2,
+ prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
+ OMAP3430_RST2_IVA2_MASK |
+ OMAP3430_RST3_IVA2_MASK,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
/* Enable IVA2 clock */
@@ -703,9 +721,9 @@ static void __init omap3_iva_idle(void)
cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
/* Reset IVA2 */
- prm_write_mod_reg(OMAP3430_RST1_IVA2 |
- OMAP3430_RST2_IVA2 |
- OMAP3430_RST3_IVA2,
+ prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
+ OMAP3430_RST2_IVA2_MASK |
+ OMAP3430_RST3_IVA2_MASK,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
}
@@ -727,8 +745,8 @@ static void __init omap3_d2d_idle(void)
omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
/* reset modem */
- prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON |
- OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST,
+ prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
+ OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
CORE_MOD, OMAP2_RM_RSTCTRL);
prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
}
@@ -754,102 +772,102 @@ static void __init prcm_setup_regs(void)
* Note that in the long run this should be done by clockfw
*/
cm_write_mod_reg(
- OMAP3430_AUTO_MODEM |
- OMAP3430ES2_AUTO_MMC3 |
- OMAP3430ES2_AUTO_ICR |
- OMAP3430_AUTO_AES2 |
- OMAP3430_AUTO_SHA12 |
- OMAP3430_AUTO_DES2 |
- OMAP3430_AUTO_MMC2 |
- OMAP3430_AUTO_MMC1 |
- OMAP3430_AUTO_MSPRO |
- OMAP3430_AUTO_HDQ |
- OMAP3430_AUTO_MCSPI4 |
- OMAP3430_AUTO_MCSPI3 |
- OMAP3430_AUTO_MCSPI2 |
- OMAP3430_AUTO_MCSPI1 |
- OMAP3430_AUTO_I2C3 |
- OMAP3430_AUTO_I2C2 |
- OMAP3430_AUTO_I2C1 |
- OMAP3430_AUTO_UART2 |
- OMAP3430_AUTO_UART1 |
- OMAP3430_AUTO_GPT11 |
- OMAP3430_AUTO_GPT10 |
- OMAP3430_AUTO_MCBSP5 |
- OMAP3430_AUTO_MCBSP1 |
- OMAP3430ES1_AUTO_FAC | /* This is es1 only */
- OMAP3430_AUTO_MAILBOXES |
- OMAP3430_AUTO_OMAPCTRL |
- OMAP3430ES1_AUTO_FSHOSTUSB |
- OMAP3430_AUTO_HSOTGUSB |
- OMAP3430_AUTO_SAD2D |
- OMAP3430_AUTO_SSI,
+ OMAP3430_AUTO_MODEM_MASK |
+ OMAP3430ES2_AUTO_MMC3_MASK |
+ OMAP3430ES2_AUTO_ICR_MASK |
+ OMAP3430_AUTO_AES2_MASK |
+ OMAP3430_AUTO_SHA12_MASK |
+ OMAP3430_AUTO_DES2_MASK |
+ OMAP3430_AUTO_MMC2_MASK |
+ OMAP3430_AUTO_MMC1_MASK |
+ OMAP3430_AUTO_MSPRO_MASK |
+ OMAP3430_AUTO_HDQ_MASK |
+ OMAP3430_AUTO_MCSPI4_MASK |
+ OMAP3430_AUTO_MCSPI3_MASK |
+ OMAP3430_AUTO_MCSPI2_MASK |
+ OMAP3430_AUTO_MCSPI1_MASK |
+ OMAP3430_AUTO_I2C3_MASK |
+ OMAP3430_AUTO_I2C2_MASK |
+ OMAP3430_AUTO_I2C1_MASK |
+ OMAP3430_AUTO_UART2_MASK |
+ OMAP3430_AUTO_UART1_MASK |
+ OMAP3430_AUTO_GPT11_MASK |
+ OMAP3430_AUTO_GPT10_MASK |
+ OMAP3430_AUTO_MCBSP5_MASK |
+ OMAP3430_AUTO_MCBSP1_MASK |
+ OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
+ OMAP3430_AUTO_MAILBOXES_MASK |
+ OMAP3430_AUTO_OMAPCTRL_MASK |
+ OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
+ OMAP3430_AUTO_HSOTGUSB_MASK |
+ OMAP3430_AUTO_SAD2D_MASK |
+ OMAP3430_AUTO_SSI_MASK,
CORE_MOD, CM_AUTOIDLE1);
cm_write_mod_reg(
- OMAP3430_AUTO_PKA |
- OMAP3430_AUTO_AES1 |
- OMAP3430_AUTO_RNG |
- OMAP3430_AUTO_SHA11 |
- OMAP3430_AUTO_DES1,
+ OMAP3430_AUTO_PKA_MASK |
+ OMAP3430_AUTO_AES1_MASK |
+ OMAP3430_AUTO_RNG_MASK |
+ OMAP3430_AUTO_SHA11_MASK |
+ OMAP3430_AUTO_DES1_MASK,
CORE_MOD, CM_AUTOIDLE2);
if (omap_rev() > OMAP3430_REV_ES1_0) {
cm_write_mod_reg(
- OMAP3430_AUTO_MAD2D |
- OMAP3430ES2_AUTO_USBTLL,
+ OMAP3430_AUTO_MAD2D_MASK |
+ OMAP3430ES2_AUTO_USBTLL_MASK,
CORE_MOD, CM_AUTOIDLE3);
}
cm_write_mod_reg(
- OMAP3430_AUTO_WDT2 |
- OMAP3430_AUTO_WDT1 |
- OMAP3430_AUTO_GPIO1 |
- OMAP3430_AUTO_32KSYNC |
- OMAP3430_AUTO_GPT12 |
- OMAP3430_AUTO_GPT1 ,
+ OMAP3430_AUTO_WDT2_MASK |
+ OMAP3430_AUTO_WDT1_MASK |
+ OMAP3430_AUTO_GPIO1_MASK |
+ OMAP3430_AUTO_32KSYNC_MASK |
+ OMAP3430_AUTO_GPT12_MASK |
+ OMAP3430_AUTO_GPT1_MASK,
WKUP_MOD, CM_AUTOIDLE);
cm_write_mod_reg(
- OMAP3430_AUTO_DSS,
+ OMAP3430_AUTO_DSS_MASK,
OMAP3430_DSS_MOD,
CM_AUTOIDLE);
cm_write_mod_reg(
- OMAP3430_AUTO_CAM,
+ OMAP3430_AUTO_CAM_MASK,
OMAP3430_CAM_MOD,
CM_AUTOIDLE);
cm_write_mod_reg(
- OMAP3430_AUTO_GPIO6 |
- OMAP3430_AUTO_GPIO5 |
- OMAP3430_AUTO_GPIO4 |
- OMAP3430_AUTO_GPIO3 |
- OMAP3430_AUTO_GPIO2 |
- OMAP3430_AUTO_WDT3 |
- OMAP3430_AUTO_UART3 |
- OMAP3430_AUTO_GPT9 |
- OMAP3430_AUTO_GPT8 |
- OMAP3430_AUTO_GPT7 |
- OMAP3430_AUTO_GPT6 |
- OMAP3430_AUTO_GPT5 |
- OMAP3430_AUTO_GPT4 |
- OMAP3430_AUTO_GPT3 |
- OMAP3430_AUTO_GPT2 |
- OMAP3430_AUTO_MCBSP4 |
- OMAP3430_AUTO_MCBSP3 |
- OMAP3430_AUTO_MCBSP2,
+ OMAP3430_AUTO_GPIO6_MASK |
+ OMAP3430_AUTO_GPIO5_MASK |
+ OMAP3430_AUTO_GPIO4_MASK |
+ OMAP3430_AUTO_GPIO3_MASK |
+ OMAP3430_AUTO_GPIO2_MASK |
+ OMAP3430_AUTO_WDT3_MASK |
+ OMAP3430_AUTO_UART3_MASK |
+ OMAP3430_AUTO_GPT9_MASK |
+ OMAP3430_AUTO_GPT8_MASK |
+ OMAP3430_AUTO_GPT7_MASK |
+ OMAP3430_AUTO_GPT6_MASK |
+ OMAP3430_AUTO_GPT5_MASK |
+ OMAP3430_AUTO_GPT4_MASK |
+ OMAP3430_AUTO_GPT3_MASK |
+ OMAP3430_AUTO_GPT2_MASK |
+ OMAP3430_AUTO_MCBSP4_MASK |
+ OMAP3430_AUTO_MCBSP3_MASK |
+ OMAP3430_AUTO_MCBSP2_MASK,
OMAP3430_PER_MOD,
CM_AUTOIDLE);
if (omap_rev() > OMAP3430_REV_ES1_0) {
cm_write_mod_reg(
- OMAP3430ES2_AUTO_USBHOST,
+ OMAP3430ES2_AUTO_USBHOST_MASK,
OMAP3430ES2_USBHOST_MOD,
CM_AUTOIDLE);
}
- omap_ctrl_writel(OMAP3430_AUTOIDLE, OMAP2_CONTROL_SYSCONFIG);
+ omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
/*
* Set all plls to autoidle. This is needed until autoidle is
@@ -879,35 +897,40 @@ static void __init prcm_setup_regs(void)
OMAP3_PRM_CLKSRC_CTRL_OFFSET);
/* setup wakup source */
- prm_write_mod_reg(OMAP3430_EN_IO | OMAP3430_EN_GPIO1 |
- OMAP3430_EN_GPT1 | OMAP3430_EN_GPT12,
+ prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
+ OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
WKUP_MOD, PM_WKEN);
/* No need to write EN_IO, that is always enabled */
- prm_write_mod_reg(OMAP3430_EN_GPIO1 | OMAP3430_EN_GPT1 |
- OMAP3430_EN_GPT12,
+ prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
+ OMAP3430_GRPSEL_GPT1_MASK |
+ OMAP3430_GRPSEL_GPT12_MASK,
WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
/* For some reason IO doesn't generate wakeup event even if
* it is selected to mpu wakeup goup */
- prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN,
+ prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
/* Enable PM_WKEN to support DSS LPR */
- prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS,
+ prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
OMAP3430_DSS_MOD, PM_WKEN);
/* Enable wakeups in PER */
- prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 |
- OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 |
- OMAP3430_EN_GPIO6 | OMAP3430_EN_UART3 |
- OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 |
- OMAP3430_EN_MCBSP4,
+ prm_write_mod_reg(OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
+ OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
+ OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
+ OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
+ OMAP3430_EN_MCBSP4_MASK,
OMAP3430_PER_MOD, PM_WKEN);
/* and allow them to wake up MPU */
- prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 |
- OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 |
- OMAP3430_GRPSEL_GPIO6 | OMAP3430_EN_UART3 |
- OMAP3430_EN_MCBSP2 | OMAP3430_EN_MCBSP3 |
- OMAP3430_EN_MCBSP4,
+ prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2_MASK |
+ OMAP3430_GRPSEL_GPIO3_MASK |
+ OMAP3430_GRPSEL_GPIO4_MASK |
+ OMAP3430_GRPSEL_GPIO5_MASK |
+ OMAP3430_GRPSEL_GPIO6_MASK |
+ OMAP3430_GRPSEL_UART3_MASK |
+ OMAP3430_GRPSEL_MCBSP2_MASK |
+ OMAP3430_GRPSEL_MCBSP3_MASK |
+ OMAP3430_GRPSEL_MCBSP4_MASK,
OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
/* Don't attach IVA interrupts */
@@ -1080,14 +1103,6 @@ static int __init omap3_pm_init(void)
omap3_idle_init();
clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
- /*
- * REVISIT: This wkdep is only necessary when GPIO2-6 are enabled for
- * IO-pad wakeup. Otherwise it will unnecessarily waste power
- * waking up PER with every CORE wakeup - see
- * http://marc.info/?l=linux-omap&m=121852150710062&w=2
- */
- clkdm_add_wkdep(per_clkdm, core_clkdm);
-
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
omap3_secure_ram_storage =
kmalloc(0x803F, GFP_KERNEL);
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index ebfce7d1a5d3..a2904aa7065e 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -5,8 +5,8 @@
* Copyright (C) 2007-2009 Nokia Corporation
*
* Written by Paul Walmsley
- *
* Added OMAP4 specific support by Abhijit Pagare <abhijitpagare@ti.com>
+ * State counting code by Tero Kristo <tero.kristo@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -64,10 +64,10 @@ static u16 pwrstst_reg_offs;
#define OMAP_MEM4_ONSTATE_MASK OMAP4430_OCP_NRET_BANK_ONSTATE_MASK
/* OMAP3 and OMAP4 Memory Retstate Masks (common across all power domains) */
-#define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE
-#define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE
-#define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE
-#define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE
+#define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK
+#define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE_MASK
+#define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK
+#define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE_MASK
#define OMAP_MEM4_RETSTATE_MASK OMAP4430_OCP_NRET_BANK_RETSTATE_MASK
/* OMAP3 and OMAP4 Memory Status bits */
@@ -511,6 +511,8 @@ int pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
*/
int pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
{
+ u32 v;
+
if (!pwrdm)
return -EINVAL;
@@ -526,9 +528,9 @@ int pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
* but the type of value returned is the same for each
* powerdomain.
*/
- prm_rmw_mod_reg_bits(OMAP3430_LOGICL1CACHERETSTATE,
- (pwrst << __ffs(OMAP3430_LOGICL1CACHERETSTATE)),
- pwrdm->prcm_offs, pwrstctrl_reg_offs);
+ v = pwrst << __ffs(OMAP3430_LOGICL1CACHERETSTATE_MASK);
+ prm_rmw_mod_reg_bits(OMAP3430_LOGICL1CACHERETSTATE_MASK, v,
+ pwrdm->prcm_offs, pwrstctrl_reg_offs);
return 0;
}
@@ -676,8 +678,8 @@ int pwrdm_read_logic_pwrst(struct powerdomain *pwrdm)
if (!pwrdm)
return -EINVAL;
- return prm_read_mod_bits_shift(pwrdm->prcm_offs,
- pwrstst_reg_offs, OMAP3430_LOGICSTATEST);
+ return prm_read_mod_bits_shift(pwrdm->prcm_offs, pwrstst_reg_offs,
+ OMAP3430_LOGICSTATEST_MASK);
}
/**
@@ -700,7 +702,7 @@ int pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm)
* powerdomain.
*/
return prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST,
- OMAP3430_LASTLOGICSTATEENTERED);
+ OMAP3430_LASTLOGICSTATEENTERED_MASK);
}
/**
@@ -723,7 +725,7 @@ int pwrdm_read_logic_retst(struct powerdomain *pwrdm)
* powerdomain.
*/
return prm_read_mod_bits_shift(pwrdm->prcm_offs, pwrstctrl_reg_offs,
- OMAP3430_LOGICSTATEST);
+ OMAP3430_LOGICSTATEST_MASK);
}
/**
@@ -978,6 +980,34 @@ bool pwrdm_has_hdwr_sar(struct powerdomain *pwrdm)
}
/**
+ * pwrdm_set_lowpwrstchange - Request a low power state change
+ * @pwrdm: struct powerdomain *
+ *
+ * Allows a powerdomain to transtion to a lower power sleep state
+ * from an existing sleep state without waking up the powerdomain.
+ * Returns -EINVAL if the powerdomain pointer is null or if the
+ * powerdomain does not support LOWPOWERSTATECHANGE, or returns 0
+ * upon success.
+ */
+int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
+{
+ if (!pwrdm)
+ return -EINVAL;
+
+ if (!(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE))
+ return -EINVAL;
+
+ pr_debug("powerdomain: %s: setting LOWPOWERSTATECHANGE bit\n",
+ pwrdm->name);
+
+ prm_rmw_mod_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK,
+ (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT),
+ pwrdm->prcm_offs, pwrstctrl_reg_offs);
+
+ return 0;
+}
+
+/**
* pwrdm_wait_transition - wait for powerdomain power transition to finish
* @pwrdm: struct powerdomain * to wait for
*
@@ -1002,7 +1032,7 @@ int pwrdm_wait_transition(struct powerdomain *pwrdm)
/* XXX Is this udelay() value meaningful? */
while ((prm_read_mod_reg(pwrdm->prcm_offs, pwrstst_reg_offs) &
- OMAP_INTRANSITION) &&
+ OMAP_INTRANSITION_MASK) &&
(c++ < PWRDM_TRANSITION_BAILOUT))
udelay(1);
diff --git a/arch/arm/mach-omap2/powerdomains44xx.h b/arch/arm/mach-omap2/powerdomains44xx.h
index c1015147d579..c7219513472a 100644
--- a/arch/arm/mach-omap2/powerdomains44xx.h
+++ b/arch/arm/mach-omap2/powerdomains44xx.h
@@ -1,12 +1,12 @@
/*
* OMAP4 Power domains framework
*
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Abhijit Pagare (abhijitpagare@ti.com)
* Benoit Cousson (b-cousson@ti.com)
- * Paul Walmsley
+ * Paul Walmsley (paul@pwsan.com)
*
* This file is automatically generated from the OMAP hardware databases.
* We respectfully ask that any modifications to this file be coordinated
@@ -54,6 +54,7 @@ static struct powerdomain core_44xx_pwrdm = {
[3] = PWRDM_POWER_ON, /* ducati_l2ram */
[4] = PWRDM_POWER_ON, /* ducati_unicache */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* gfx_44xx_pwrdm: 3D accelerator power domain */
@@ -69,6 +70,7 @@ static struct powerdomain gfx_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRDM_POWER_ON, /* gfx_mem */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* abe_44xx_pwrdm: Audio back end power domain */
@@ -87,6 +89,7 @@ static struct powerdomain abe_44xx_pwrdm = {
[0] = PWRDM_POWER_ON, /* aessmem */
[1] = PWRDM_POWER_ON, /* periphmem */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* dss_44xx_pwrdm: Display subsystem power domain */
@@ -103,6 +106,7 @@ static struct powerdomain dss_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRDM_POWER_ON, /* dss_mem */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* tesla_44xx_pwrdm: Tesla processor power domain */
@@ -123,6 +127,7 @@ static struct powerdomain tesla_44xx_pwrdm = {
[1] = PWRDM_POWER_ON, /* tesla_l1 */
[2] = PWRDM_POWER_ON, /* tesla_l2 */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* wkup_44xx_pwrdm: Wake-up power domain */
@@ -130,7 +135,7 @@ static struct powerdomain wkup_44xx_pwrdm = {
.name = "wkup_pwrdm",
.prcm_offs = OMAP4430_PRM_WKUP_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRDM_POWER_ON,
+ .pwrsts = PWRSTS_ON,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRDM_POWER_OFF, /* wkup_bank */
@@ -143,7 +148,7 @@ static struct powerdomain wkup_44xx_pwrdm = {
/* cpu0_44xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
static struct powerdomain cpu0_44xx_pwrdm = {
.name = "cpu0_pwrdm",
- .prcm_offs = OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD,
+ .prcm_offs = OMAP4430_PRCM_MPU_CPU0_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
@@ -159,7 +164,7 @@ static struct powerdomain cpu0_44xx_pwrdm = {
/* cpu1_44xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
static struct powerdomain cpu1_44xx_pwrdm = {
.name = "cpu1_pwrdm",
- .prcm_offs = OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD,
+ .prcm_offs = OMAP4430_PRCM_MPU_CPU1_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
@@ -227,6 +232,7 @@ static struct powerdomain ivahd_44xx_pwrdm = {
[2] = PWRDM_POWER_ON, /* tcm1_mem */
[3] = PWRDM_POWER_ON, /* tcm2_mem */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* cam_44xx_pwrdm: Camera subsystem power domain */
@@ -242,6 +248,7 @@ static struct powerdomain cam_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRDM_POWER_ON, /* cam_mem */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* l3init_44xx_pwrdm: L3 initators pheripherals power domain */
@@ -258,6 +265,7 @@ static struct powerdomain l3init_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRDM_POWER_ON, /* l3init_bank1 */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* l4per_44xx_pwrdm: Target peripherals power domain */
@@ -276,6 +284,7 @@ static struct powerdomain l4per_44xx_pwrdm = {
[0] = PWRDM_POWER_ON, /* nonretained_bank */
[1] = PWRDM_POWER_ON, /* retained_bank */
},
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/*
@@ -286,7 +295,7 @@ static struct powerdomain always_on_core_44xx_pwrdm = {
.name = "always_on_core_pwrdm",
.prcm_offs = OMAP4430_PRM_ALWAYS_ON_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
- .pwrsts = PWRDM_POWER_ON,
+ .pwrsts = PWRSTS_ON,
};
/* cefuse_44xx_pwrdm: Customer efuse controller power domain */
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 90f603d434c6..995b7edbf18d 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -112,83 +112,75 @@
#define OMAP4430_SCRM_SCRM_MOD 0x0000
-/* CHIRONSS instances */
+/* PRCM_MPU instances */
-#define OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD 0x0000
-#define OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD 0x0200
-#define OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD 0x0400
-#define OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD 0x0800
-
-/* Base Addresses for the OMAP4 */
-
-#define OMAP4430_CM1_BASE 0x4a004000
-#define OMAP4430_CM2_BASE 0x4a008000
-#define OMAP4430_PRM_BASE 0x4a306000
-#define OMAP4430_SCRM_BASE 0x4a30a000
-#define OMAP4430_CHIRONSS_BASE 0x48243000
+#define OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_MOD 0x0000
+#define OMAP4430_PRCM_MPU_DEVICE_PRM_MOD 0x0200
+#define OMAP4430_PRCM_MPU_CPU0_MOD 0x0400
+#define OMAP4430_PRCM_MPU_CPU1_MOD 0x0800
/* 24XX register bits shared between CM & PRM registers */
/* CM_FCLKEN1_CORE, CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
#define OMAP2420_EN_MMC_SHIFT 26
-#define OMAP2420_EN_MMC (1 << 26)
+#define OMAP2420_EN_MMC_MASK (1 << 26)
#define OMAP24XX_EN_UART2_SHIFT 22
-#define OMAP24XX_EN_UART2 (1 << 22)
+#define OMAP24XX_EN_UART2_MASK (1 << 22)
#define OMAP24XX_EN_UART1_SHIFT 21
-#define OMAP24XX_EN_UART1 (1 << 21)
+#define OMAP24XX_EN_UART1_MASK (1 << 21)
#define OMAP24XX_EN_MCSPI2_SHIFT 18
-#define OMAP24XX_EN_MCSPI2 (1 << 18)
+#define OMAP24XX_EN_MCSPI2_MASK (1 << 18)
#define OMAP24XX_EN_MCSPI1_SHIFT 17
-#define OMAP24XX_EN_MCSPI1 (1 << 17)
+#define OMAP24XX_EN_MCSPI1_MASK (1 << 17)
#define OMAP24XX_EN_MCBSP2_SHIFT 16
-#define OMAP24XX_EN_MCBSP2 (1 << 16)
+#define OMAP24XX_EN_MCBSP2_MASK (1 << 16)
#define OMAP24XX_EN_MCBSP1_SHIFT 15
-#define OMAP24XX_EN_MCBSP1 (1 << 15)
+#define OMAP24XX_EN_MCBSP1_MASK (1 << 15)
#define OMAP24XX_EN_GPT12_SHIFT 14
-#define OMAP24XX_EN_GPT12 (1 << 14)
+#define OMAP24XX_EN_GPT12_MASK (1 << 14)
#define OMAP24XX_EN_GPT11_SHIFT 13
-#define OMAP24XX_EN_GPT11 (1 << 13)
+#define OMAP24XX_EN_GPT11_MASK (1 << 13)
#define OMAP24XX_EN_GPT10_SHIFT 12
-#define OMAP24XX_EN_GPT10 (1 << 12)
+#define OMAP24XX_EN_GPT10_MASK (1 << 12)
#define OMAP24XX_EN_GPT9_SHIFT 11
-#define OMAP24XX_EN_GPT9 (1 << 11)
+#define OMAP24XX_EN_GPT9_MASK (1 << 11)
#define OMAP24XX_EN_GPT8_SHIFT 10
-#define OMAP24XX_EN_GPT8 (1 << 10)
+#define OMAP24XX_EN_GPT8_MASK (1 << 10)
#define OMAP24XX_EN_GPT7_SHIFT 9
-#define OMAP24XX_EN_GPT7 (1 << 9)
+#define OMAP24XX_EN_GPT7_MASK (1 << 9)
#define OMAP24XX_EN_GPT6_SHIFT 8
-#define OMAP24XX_EN_GPT6 (1 << 8)
+#define OMAP24XX_EN_GPT6_MASK (1 << 8)
#define OMAP24XX_EN_GPT5_SHIFT 7
-#define OMAP24XX_EN_GPT5 (1 << 7)
+#define OMAP24XX_EN_GPT5_MASK (1 << 7)
#define OMAP24XX_EN_GPT4_SHIFT 6
-#define OMAP24XX_EN_GPT4 (1 << 6)
+#define OMAP24XX_EN_GPT4_MASK (1 << 6)
#define OMAP24XX_EN_GPT3_SHIFT 5
-#define OMAP24XX_EN_GPT3 (1 << 5)
+#define OMAP24XX_EN_GPT3_MASK (1 << 5)
#define OMAP24XX_EN_GPT2_SHIFT 4
-#define OMAP24XX_EN_GPT2 (1 << 4)
+#define OMAP24XX_EN_GPT2_MASK (1 << 4)
#define OMAP2420_EN_VLYNQ_SHIFT 3
-#define OMAP2420_EN_VLYNQ (1 << 3)
+#define OMAP2420_EN_VLYNQ_MASK (1 << 3)
/* CM_FCLKEN2_CORE, CM_ICLKEN2_CORE, PM_WKEN2_CORE shared bits */
#define OMAP2430_EN_GPIO5_SHIFT 10
-#define OMAP2430_EN_GPIO5 (1 << 10)
+#define OMAP2430_EN_GPIO5_MASK (1 << 10)
#define OMAP2430_EN_MCSPI3_SHIFT 9
-#define OMAP2430_EN_MCSPI3 (1 << 9)
+#define OMAP2430_EN_MCSPI3_MASK (1 << 9)
#define OMAP2430_EN_MMCHS2_SHIFT 8
-#define OMAP2430_EN_MMCHS2 (1 << 8)
+#define OMAP2430_EN_MMCHS2_MASK (1 << 8)
#define OMAP2430_EN_MMCHS1_SHIFT 7
-#define OMAP2430_EN_MMCHS1 (1 << 7)
+#define OMAP2430_EN_MMCHS1_MASK (1 << 7)
#define OMAP24XX_EN_UART3_SHIFT 2
-#define OMAP24XX_EN_UART3 (1 << 2)
+#define OMAP24XX_EN_UART3_MASK (1 << 2)
#define OMAP24XX_EN_USB_SHIFT 0
-#define OMAP24XX_EN_USB (1 << 0)
+#define OMAP24XX_EN_USB_MASK (1 << 0)
/* CM_ICLKEN2_CORE, PM_WKEN2_CORE shared bits */
#define OMAP2430_EN_MDM_INTC_SHIFT 11
-#define OMAP2430_EN_MDM_INTC (1 << 11)
+#define OMAP2430_EN_MDM_INTC_MASK (1 << 11)
#define OMAP2430_EN_USBHS_SHIFT 6
-#define OMAP2430_EN_USBHS (1 << 6)
+#define OMAP2430_EN_USBHS_MASK (1 << 6)
/* CM_IDLEST1_CORE, PM_WKST1_CORE shared bits */
#define OMAP2420_ST_MMC_SHIFT 26
@@ -246,9 +238,9 @@
/* CM_FCLKEN_WKUP, CM_ICLKEN_WKUP, PM_WKEN_WKUP shared bits */
#define OMAP24XX_EN_GPIOS_SHIFT 2
-#define OMAP24XX_EN_GPIOS (1 << 2)
+#define OMAP24XX_EN_GPIOS_MASK (1 << 2)
#define OMAP24XX_EN_GPT1_SHIFT 0
-#define OMAP24XX_EN_GPT1 (1 << 0)
+#define OMAP24XX_EN_GPT1_MASK (1 << 0)
/* PM_WKST_WKUP, CM_IDLEST_WKUP shared bits */
#define OMAP24XX_ST_GPIOS_SHIFT (1 << 2)
@@ -267,47 +259,47 @@
#define OMAP3430_REV_MASK (0xff << 0)
/* CM_SYSCONFIG, PRM_SYSCONFIG shared bits */
-#define OMAP3430_AUTOIDLE (1 << 0)
+#define OMAP3430_AUTOIDLE_MASK (1 << 0)
/* CM_FCLKEN1_CORE, CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
-#define OMAP3430_EN_MMC2 (1 << 25)
+#define OMAP3430_EN_MMC2_MASK (1 << 25)
#define OMAP3430_EN_MMC2_SHIFT 25
-#define OMAP3430_EN_MMC1 (1 << 24)
+#define OMAP3430_EN_MMC1_MASK (1 << 24)
#define OMAP3430_EN_MMC1_SHIFT 24
-#define OMAP3430_EN_MCSPI4 (1 << 21)
+#define OMAP3430_EN_MCSPI4_MASK (1 << 21)
#define OMAP3430_EN_MCSPI4_SHIFT 21
-#define OMAP3430_EN_MCSPI3 (1 << 20)
+#define OMAP3430_EN_MCSPI3_MASK (1 << 20)
#define OMAP3430_EN_MCSPI3_SHIFT 20
-#define OMAP3430_EN_MCSPI2 (1 << 19)
+#define OMAP3430_EN_MCSPI2_MASK (1 << 19)
#define OMAP3430_EN_MCSPI2_SHIFT 19
-#define OMAP3430_EN_MCSPI1 (1 << 18)
+#define OMAP3430_EN_MCSPI1_MASK (1 << 18)
#define OMAP3430_EN_MCSPI1_SHIFT 18
-#define OMAP3430_EN_I2C3 (1 << 17)
+#define OMAP3430_EN_I2C3_MASK (1 << 17)
#define OMAP3430_EN_I2C3_SHIFT 17
-#define OMAP3430_EN_I2C2 (1 << 16)
+#define OMAP3430_EN_I2C2_MASK (1 << 16)
#define OMAP3430_EN_I2C2_SHIFT 16
-#define OMAP3430_EN_I2C1 (1 << 15)
+#define OMAP3430_EN_I2C1_MASK (1 << 15)
#define OMAP3430_EN_I2C1_SHIFT 15
-#define OMAP3430_EN_UART2 (1 << 14)
+#define OMAP3430_EN_UART2_MASK (1 << 14)
#define OMAP3430_EN_UART2_SHIFT 14
-#define OMAP3430_EN_UART1 (1 << 13)
+#define OMAP3430_EN_UART1_MASK (1 << 13)
#define OMAP3430_EN_UART1_SHIFT 13
-#define OMAP3430_EN_GPT11 (1 << 12)
+#define OMAP3430_EN_GPT11_MASK (1 << 12)
#define OMAP3430_EN_GPT11_SHIFT 12
-#define OMAP3430_EN_GPT10 (1 << 11)
+#define OMAP3430_EN_GPT10_MASK (1 << 11)
#define OMAP3430_EN_GPT10_SHIFT 11
-#define OMAP3430_EN_MCBSP5 (1 << 10)
+#define OMAP3430_EN_MCBSP5_MASK (1 << 10)
#define OMAP3430_EN_MCBSP5_SHIFT 10
-#define OMAP3430_EN_MCBSP1 (1 << 9)
+#define OMAP3430_EN_MCBSP1_MASK (1 << 9)
#define OMAP3430_EN_MCBSP1_SHIFT 9
-#define OMAP3430_EN_FSHOSTUSB (1 << 5)
+#define OMAP3430_EN_FSHOSTUSB_MASK (1 << 5)
#define OMAP3430_EN_FSHOSTUSB_SHIFT 5
-#define OMAP3430_EN_D2D (1 << 3)
+#define OMAP3430_EN_D2D_MASK (1 << 3)
#define OMAP3430_EN_D2D_SHIFT 3
/* CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
-#define OMAP3430_EN_HSOTGUSB (1 << 4)
-#define OMAP3430_EN_HSOTGUSB_SHIFT 4
+#define OMAP3430_EN_HSOTGUSB_MASK (1 << 4)
+#define OMAP3430_EN_HSOTGUSB_SHIFT 4
/* PM_WKST1_CORE, CM_IDLEST1_CORE shared bits */
#define OMAP3430_ST_MMC2_SHIFT 25
@@ -352,21 +344,21 @@
#define OMAP3430_ST_D2D_MASK (1 << 3)
/* CM_FCLKEN_WKUP, CM_ICLKEN_WKUP, PM_WKEN_WKUP shared bits */
-#define OMAP3430_EN_GPIO1 (1 << 3)
+#define OMAP3430_EN_GPIO1_MASK (1 << 3)
#define OMAP3430_EN_GPIO1_SHIFT 3
-#define OMAP3430_EN_GPT12 (1 << 1)
+#define OMAP3430_EN_GPT12_MASK (1 << 1)
#define OMAP3430_EN_GPT12_SHIFT 1
-#define OMAP3430_EN_GPT1 (1 << 0)
+#define OMAP3430_EN_GPT1_MASK (1 << 0)
#define OMAP3430_EN_GPT1_SHIFT 0
/* CM_FCLKEN_WKUP, PM_WKEN_WKUP shared bits */
-#define OMAP3430_EN_SR2 (1 << 7)
+#define OMAP3430_EN_SR2_MASK (1 << 7)
#define OMAP3430_EN_SR2_SHIFT 7
-#define OMAP3430_EN_SR1 (1 << 6)
+#define OMAP3430_EN_SR1_MASK (1 << 6)
#define OMAP3430_EN_SR1_SHIFT 6
/* CM_ICLKEN_WKUP, PM_WKEN_WKUP shared bits */
-#define OMAP3430_EN_GPT12 (1 << 1)
+#define OMAP3430_EN_GPT12_MASK (1 << 1)
#define OMAP3430_EN_GPT12_SHIFT 1
/* CM_IDLEST_WKUP, PM_WKST_WKUP shared bits */
@@ -386,47 +378,47 @@
* CM_SLEEPDEP_PER, PM_WKDEP_IVA2, PM_WKDEP_GFX,
* PM_WKDEP_DSS, PM_WKDEP_CAM, PM_WKDEP_PER, PM_WKDEP_NEON shared bits
*/
-#define OMAP3430_EN_MPU (1 << 1)
+#define OMAP3430_EN_MPU_MASK (1 << 1)
#define OMAP3430_EN_MPU_SHIFT 1
/* CM_FCLKEN_PER, CM_ICLKEN_PER, PM_WKEN_PER shared bits */
-#define OMAP3430_EN_GPIO6 (1 << 17)
+#define OMAP3430_EN_GPIO6_MASK (1 << 17)
#define OMAP3430_EN_GPIO6_SHIFT 17
-#define OMAP3430_EN_GPIO5 (1 << 16)
+#define OMAP3430_EN_GPIO5_MASK (1 << 16)
#define OMAP3430_EN_GPIO5_SHIFT 16
-#define OMAP3430_EN_GPIO4 (1 << 15)
+#define OMAP3430_EN_GPIO4_MASK (1 << 15)
#define OMAP3430_EN_GPIO4_SHIFT 15
-#define OMAP3430_EN_GPIO3 (1 << 14)
+#define OMAP3430_EN_GPIO3_MASK (1 << 14)
#define OMAP3430_EN_GPIO3_SHIFT 14
-#define OMAP3430_EN_GPIO2 (1 << 13)
+#define OMAP3430_EN_GPIO2_MASK (1 << 13)
#define OMAP3430_EN_GPIO2_SHIFT 13
-#define OMAP3430_EN_UART3 (1 << 11)
+#define OMAP3430_EN_UART3_MASK (1 << 11)
#define OMAP3430_EN_UART3_SHIFT 11
-#define OMAP3430_EN_GPT9 (1 << 10)
+#define OMAP3430_EN_GPT9_MASK (1 << 10)
#define OMAP3430_EN_GPT9_SHIFT 10
-#define OMAP3430_EN_GPT8 (1 << 9)
+#define OMAP3430_EN_GPT8_MASK (1 << 9)
#define OMAP3430_EN_GPT8_SHIFT 9
-#define OMAP3430_EN_GPT7 (1 << 8)
+#define OMAP3430_EN_GPT7_MASK (1 << 8)
#define OMAP3430_EN_GPT7_SHIFT 8
-#define OMAP3430_EN_GPT6 (1 << 7)
+#define OMAP3430_EN_GPT6_MASK (1 << 7)
#define OMAP3430_EN_GPT6_SHIFT 7
-#define OMAP3430_EN_GPT5 (1 << 6)
+#define OMAP3430_EN_GPT5_MASK (1 << 6)
#define OMAP3430_EN_GPT5_SHIFT 6
-#define OMAP3430_EN_GPT4 (1 << 5)
+#define OMAP3430_EN_GPT4_MASK (1 << 5)
#define OMAP3430_EN_GPT4_SHIFT 5
-#define OMAP3430_EN_GPT3 (1 << 4)
+#define OMAP3430_EN_GPT3_MASK (1 << 4)
#define OMAP3430_EN_GPT3_SHIFT 4
-#define OMAP3430_EN_GPT2 (1 << 3)
+#define OMAP3430_EN_GPT2_MASK (1 << 3)
#define OMAP3430_EN_GPT2_SHIFT 3
/* CM_FCLKEN_PER, CM_ICLKEN_PER, PM_WKEN_PER, PM_WKST_PER shared bits */
/* XXX Possible TI documentation bug: should the PM_WKST_PER EN_* bits
* be ST_* bits instead? */
-#define OMAP3430_EN_MCBSP4 (1 << 2)
+#define OMAP3430_EN_MCBSP4_MASK (1 << 2)
#define OMAP3430_EN_MCBSP4_SHIFT 2
-#define OMAP3430_EN_MCBSP3 (1 << 1)
+#define OMAP3430_EN_MCBSP3_MASK (1 << 1)
#define OMAP3430_EN_MCBSP3_SHIFT 1
-#define OMAP3430_EN_MCBSP2 (1 << 0)
+#define OMAP3430_EN_MCBSP2_MASK (1 << 0)
#define OMAP3430_EN_MCBSP2_SHIFT 0
/* CM_IDLEST_PER, PM_WKST_PER shared bits */
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 07a60f1204ca..c20137497c92 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -158,10 +158,10 @@ void omap_prcm_arch_reset(char mode, const char *cmd)
WARN_ON(1);
if (cpu_is_omap24xx() || cpu_is_omap34xx())
- prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs,
+ prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
OMAP2_RM_RSTCTRL);
if (cpu_is_omap44xx())
- prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs,
+ prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
OMAP4_RM_RSTCTRL);
}
diff --git a/arch/arm/mach-omap2/prm-regbits-24xx.h b/arch/arm/mach-omap2/prm-regbits-24xx.h
index 4002051c20b9..0b188ffa710e 100644
--- a/arch/arm/mach-omap2/prm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-24xx.h
@@ -19,14 +19,14 @@
/* Bits shared between registers */
/* PRCM_IRQSTATUS_MPU, PM_IRQSTATUS_DSP, PRCM_IRQSTATUS_IVA shared bits */
-#define OMAP24XX_VOLTTRANS_ST (1 << 2)
-#define OMAP24XX_WKUP2_ST (1 << 1)
-#define OMAP24XX_WKUP1_ST (1 << 0)
+#define OMAP24XX_VOLTTRANS_ST_MASK (1 << 2)
+#define OMAP24XX_WKUP2_ST_MASK (1 << 1)
+#define OMAP24XX_WKUP1_ST_MASK (1 << 0)
/* PRCM_IRQENABLE_MPU, PM_IRQENABLE_DSP, PRCM_IRQENABLE_IVA shared bits */
-#define OMAP24XX_VOLTTRANS_EN (1 << 2)
-#define OMAP24XX_WKUP2_EN (1 << 1)
-#define OMAP24XX_WKUP1_EN (1 << 0)
+#define OMAP24XX_VOLTTRANS_EN_MASK (1 << 2)
+#define OMAP24XX_WKUP2_EN_MASK (1 << 1)
+#define OMAP24XX_WKUP1_EN_MASK (1 << 0)
/* PM_WKDEP_GFX, PM_WKDEP_MPU, PM_WKDEP_DSP, PM_WKDEP_MDM shared bits */
#define OMAP24XX_EN_MPU_SHIFT 1
@@ -40,16 +40,16 @@
*/
#define OMAP24XX_MEMONSTATE_SHIFT 10
#define OMAP24XX_MEMONSTATE_MASK (0x3 << 10)
-#define OMAP24XX_MEMRETSTATE (1 << 3)
+#define OMAP24XX_MEMRETSTATE_MASK (1 << 3)
/* PM_PWSTCTRL_GFX, PM_PWSTCTRL_DSP, PM_PWSTCTRL_MDM shared bits */
-#define OMAP24XX_FORCESTATE (1 << 18)
+#define OMAP24XX_FORCESTATE_MASK (1 << 18)
/*
* PM_PWSTST_CORE, PM_PWSTST_GFX, PM_PWSTST_MPU, PM_PWSTST_DSP,
* PM_PWSTST_MDM shared bits
*/
-#define OMAP24XX_CLKACTIVITY (1 << 19)
+#define OMAP24XX_CLKACTIVITY_MASK (1 << 19)
/* PM_PWSTST_MPU, PM_PWSTST_CORE, PM_PWSTST_DSP shared bits */
#define OMAP24XX_LASTSTATEENTERED_SHIFT 4
@@ -71,26 +71,26 @@
#define OMAP24XX_REV_MASK (0xff << 0)
/* PRCM_SYSCONFIG */
-#define OMAP24XX_AUTOIDLE (1 << 0)
+#define OMAP24XX_AUTOIDLE_MASK (1 << 0)
/* PRCM_IRQSTATUS_MPU specific bits */
-#define OMAP2430_DPLL_RECAL_ST (1 << 6)
-#define OMAP24XX_TRANSITION_ST (1 << 5)
-#define OMAP24XX_EVGENOFF_ST (1 << 4)
-#define OMAP24XX_EVGENON_ST (1 << 3)
+#define OMAP2430_DPLL_RECAL_ST_MASK (1 << 6)
+#define OMAP24XX_TRANSITION_ST_MASK (1 << 5)
+#define OMAP24XX_EVGENOFF_ST_MASK (1 << 4)
+#define OMAP24XX_EVGENON_ST_MASK (1 << 3)
/* PRCM_IRQENABLE_MPU specific bits */
-#define OMAP2430_DPLL_RECAL_EN (1 << 6)
-#define OMAP24XX_TRANSITION_EN (1 << 5)
-#define OMAP24XX_EVGENOFF_EN (1 << 4)
-#define OMAP24XX_EVGENON_EN (1 << 3)
+#define OMAP2430_DPLL_RECAL_EN_MASK (1 << 6)
+#define OMAP24XX_TRANSITION_EN_MASK (1 << 5)
+#define OMAP24XX_EVGENOFF_EN_MASK (1 << 4)
+#define OMAP24XX_EVGENON_EN_MASK (1 << 3)
/* PRCM_VOLTCTRL */
-#define OMAP24XX_AUTO_EXTVOLT (1 << 15)
-#define OMAP24XX_FORCE_EXTVOLT (1 << 14)
+#define OMAP24XX_AUTO_EXTVOLT_MASK (1 << 15)
+#define OMAP24XX_FORCE_EXTVOLT_MASK (1 << 14)
#define OMAP24XX_SETOFF_LEVEL_SHIFT 12
#define OMAP24XX_SETOFF_LEVEL_MASK (0x3 << 12)
-#define OMAP24XX_MEMRETCTRL (1 << 8)
+#define OMAP24XX_MEMRETCTRL_MASK (1 << 8)
#define OMAP24XX_SETRET_LEVEL_SHIFT 6
#define OMAP24XX_SETRET_LEVEL_MASK (0x3 << 6)
#define OMAP24XX_VOLT_LEVEL_SHIFT 0
@@ -104,13 +104,13 @@
/* PRCM_CLKOUT_CTRL */
#define OMAP2420_CLKOUT2_EN_SHIFT 15
-#define OMAP2420_CLKOUT2_EN (1 << 15)
+#define OMAP2420_CLKOUT2_EN_MASK (1 << 15)
#define OMAP2420_CLKOUT2_DIV_SHIFT 11
#define OMAP2420_CLKOUT2_DIV_MASK (0x7 << 11)
#define OMAP2420_CLKOUT2_SOURCE_SHIFT 8
#define OMAP2420_CLKOUT2_SOURCE_MASK (0x3 << 8)
#define OMAP24XX_CLKOUT_EN_SHIFT 7
-#define OMAP24XX_CLKOUT_EN (1 << 7)
+#define OMAP24XX_CLKOUT_EN_MASK (1 << 7)
#define OMAP24XX_CLKOUT_DIV_SHIFT 3
#define OMAP24XX_CLKOUT_DIV_MASK (0x7 << 3)
#define OMAP24XX_CLKOUT_SOURCE_SHIFT 0
@@ -118,25 +118,25 @@
/* PRCM_CLKEMUL_CTRL */
#define OMAP24XX_EMULATION_EN_SHIFT 0
-#define OMAP24XX_EMULATION_EN (1 << 0)
+#define OMAP24XX_EMULATION_EN_MASK (1 << 0)
/* PRCM_CLKCFG_CTRL */
-#define OMAP24XX_VALID_CONFIG (1 << 0)
+#define OMAP24XX_VALID_CONFIG_MASK (1 << 0)
/* PRCM_CLKCFG_STATUS */
-#define OMAP24XX_CONFIG_STATUS (1 << 0)
+#define OMAP24XX_CONFIG_STATUS_MASK (1 << 0)
/* PRCM_VOLTSETUP specific bits */
/* PRCM_CLKSSETUP specific bits */
/* PRCM_POLCTRL */
-#define OMAP2420_CLKOUT2_POL (1 << 10)
-#define OMAP24XX_CLKOUT_POL (1 << 9)
-#define OMAP24XX_CLKREQ_POL (1 << 8)
-#define OMAP2430_USE_POWEROK (1 << 2)
-#define OMAP2430_POWEROK_POL (1 << 1)
-#define OMAP24XX_EXTVOL_POL (1 << 0)
+#define OMAP2420_CLKOUT2_POL_MASK (1 << 10)
+#define OMAP24XX_CLKOUT_POL_MASK (1 << 9)
+#define OMAP24XX_CLKREQ_POL_MASK (1 << 8)
+#define OMAP2430_USE_POWEROK_MASK (1 << 2)
+#define OMAP2430_POWEROK_POL_MASK (1 << 1)
+#define OMAP24XX_EXTVOL_POL_MASK (1 << 0)
/* RM_RSTST_MPU specific bits */
/* 2430 calls GLOBALWMPU_RST "GLOBALWARM_RST" instead */
@@ -154,7 +154,7 @@
/* PM_EVEGENOFFTIM_MPU specific bits */
/* PM_PWSTCTRL_MPU specific bits */
-#define OMAP2430_FORCESTATE (1 << 18)
+#define OMAP2430_FORCESTATE_MASK (1 << 18)
/* PM_PWSTST_MPU specific bits */
/* INTRANSITION, CLKACTIVITY, POWERSTATE, MEMSTATEST are 2430 only */
@@ -168,21 +168,21 @@
/* PM_WKST2_CORE specific bits */
/* PM_WKDEP_CORE specific bits*/
-#define OMAP2430_PM_WKDEP_CORE_EN_MDM (1 << 5)
-#define OMAP24XX_PM_WKDEP_CORE_EN_GFX (1 << 3)
-#define OMAP24XX_PM_WKDEP_CORE_EN_DSP (1 << 2)
+#define OMAP2430_PM_WKDEP_CORE_EN_MDM_MASK (1 << 5)
+#define OMAP24XX_PM_WKDEP_CORE_EN_GFX_MASK (1 << 3)
+#define OMAP24XX_PM_WKDEP_CORE_EN_DSP_MASK (1 << 2)
/* PM_PWSTCTRL_CORE specific bits */
-#define OMAP24XX_MEMORYCHANGE (1 << 20)
+#define OMAP24XX_MEMORYCHANGE_MASK (1 << 20)
#define OMAP24XX_MEM3ONSTATE_SHIFT 14
#define OMAP24XX_MEM3ONSTATE_MASK (0x3 << 14)
#define OMAP24XX_MEM2ONSTATE_SHIFT 12
#define OMAP24XX_MEM2ONSTATE_MASK (0x3 << 12)
#define OMAP24XX_MEM1ONSTATE_SHIFT 10
#define OMAP24XX_MEM1ONSTATE_MASK (0x3 << 10)
-#define OMAP24XX_MEM3RETSTATE (1 << 5)
-#define OMAP24XX_MEM2RETSTATE (1 << 4)
-#define OMAP24XX_MEM1RETSTATE (1 << 3)
+#define OMAP24XX_MEM3RETSTATE_MASK (1 << 5)
+#define OMAP24XX_MEM2RETSTATE_MASK (1 << 4)
+#define OMAP24XX_MEM1RETSTATE_MASK (1 << 3)
/* PM_PWSTST_CORE specific bits */
#define OMAP24XX_MEM3STATEST_SHIFT 14
@@ -193,10 +193,10 @@
#define OMAP24XX_MEM1STATEST_MASK (0x3 << 10)
/* RM_RSTCTRL_GFX */
-#define OMAP24XX_GFX_RST (1 << 0)
+#define OMAP24XX_GFX_RST_MASK (1 << 0)
/* RM_RSTST_GFX specific bits */
-#define OMAP24XX_GFX_SW_RST (1 << 4)
+#define OMAP24XX_GFX_SW_RST_MASK (1 << 4)
/* PM_PWSTCTRL_GFX specific bits */
@@ -209,25 +209,25 @@
/* RM_RSTST_WKUP specific bits */
/* 2430 calls EXTWMPU_RST "EXTWARM_RST" and GLOBALWMPU_RST "GLOBALWARM_RST" */
-#define OMAP24XX_EXTWMPU_RST (1 << 6)
-#define OMAP24XX_SECU_WD_RST (1 << 5)
-#define OMAP24XX_MPU_WD_RST (1 << 4)
-#define OMAP24XX_SECU_VIOL_RST (1 << 3)
+#define OMAP24XX_EXTWMPU_RST_MASK (1 << 6)
+#define OMAP24XX_SECU_WD_RST_MASK (1 << 5)
+#define OMAP24XX_MPU_WD_RST_MASK (1 << 4)
+#define OMAP24XX_SECU_VIOL_RST_MASK (1 << 3)
/* PM_WKEN_WKUP specific bits */
/* PM_WKST_WKUP specific bits */
/* RM_RSTCTRL_DSP */
-#define OMAP2420_RST_IVA (1 << 8)
-#define OMAP24XX_RST2_DSP (1 << 1)
-#define OMAP24XX_RST1_DSP (1 << 0)
+#define OMAP2420_RST_IVA_MASK (1 << 8)
+#define OMAP24XX_RST2_DSP_MASK (1 << 1)
+#define OMAP24XX_RST1_DSP_MASK (1 << 0)
/* RM_RSTST_DSP specific bits */
/* 2430 calls GLOBALWMPU_RST "GLOBALWARM_RST" */
-#define OMAP2420_IVA_SW_RST (1 << 8)
-#define OMAP24XX_DSP_SW_RST2 (1 << 5)
-#define OMAP24XX_DSP_SW_RST1 (1 << 4)
+#define OMAP2420_IVA_SW_RST_MASK (1 << 8)
+#define OMAP24XX_DSP_SW_RST2_MASK (1 << 5)
+#define OMAP24XX_DSP_SW_RST1_MASK (1 << 4)
/* PM_WKDEP_DSP specific bits */
@@ -235,7 +235,7 @@
/* 2430 only: MEMONSTATE, MEMRETSTATE */
#define OMAP2420_MEMIONSTATE_SHIFT 12
#define OMAP2420_MEMIONSTATE_MASK (0x3 << 12)
-#define OMAP2420_MEMIRETSTATE (1 << 4)
+#define OMAP2420_MEMIRETSTATE_MASK (1 << 4)
/* PM_PWSTST_DSP specific bits */
/* MEMSTATEST is 2430 only */
@@ -248,18 +248,18 @@
/* RM_RSTCTRL_MDM */
/* 2430 only */
-#define OMAP2430_PWRON1_MDM (1 << 1)
-#define OMAP2430_RST1_MDM (1 << 0)
+#define OMAP2430_PWRON1_MDM_MASK (1 << 1)
+#define OMAP2430_RST1_MDM_MASK (1 << 0)
/* RM_RSTST_MDM specific bits */
/* 2430 only */
-#define OMAP2430_MDM_SECU_VIOL (1 << 6)
-#define OMAP2430_MDM_SW_PWRON1 (1 << 5)
-#define OMAP2430_MDM_SW_RST1 (1 << 4)
+#define OMAP2430_MDM_SECU_VIOL_MASK (1 << 6)
+#define OMAP2430_MDM_SW_PWRON1_MASK (1 << 5)
+#define OMAP2430_MDM_SW_RST1_MASK (1 << 4)
/* PM_WKEN_MDM */
/* 2430 only */
-#define OMAP2430_PM_WKEN_MDM_EN_MDM (1 << 0)
+#define OMAP2430_PM_WKEN_MDM_EN_MDM_MASK (1 << 0)
/* PM_WKST_MDM specific bits */
/* 2430 only */
@@ -269,7 +269,7 @@
/* PM_PWSTCTRL_MDM specific bits */
/* 2430 only */
-#define OMAP2430_KILLDOMAINWKUP (1 << 19)
+#define OMAP2430_KILLDOMAINWKUP_MASK (1 << 19)
/* PM_PWSTST_MDM specific bits */
/* 2430 only */
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 8f21bae6dc1c..7fd6023edf96 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -35,10 +35,10 @@
#define OMAP3430_ERRORGAIN_MASK (0xff << 16)
#define OMAP3430_INITVOLTAGE_SHIFT 8
#define OMAP3430_INITVOLTAGE_MASK (0xff << 8)
-#define OMAP3430_TIMEOUTEN (1 << 3)
-#define OMAP3430_INITVDD (1 << 2)
-#define OMAP3430_FORCEUPDATE (1 << 1)
-#define OMAP3430_VPENABLE (1 << 0)
+#define OMAP3430_TIMEOUTEN_MASK (1 << 3)
+#define OMAP3430_INITVDD_MASK (1 << 2)
+#define OMAP3430_FORCEUPDATE_MASK (1 << 1)
+#define OMAP3430_VPENABLE_MASK (1 << 0)
/* PRM_VP1_VSTEPMIN, PRM_VP2_VSTEPMIN shared bits */
#define OMAP3430_SMPSWAITTIMEMIN_SHIFT 8
@@ -65,53 +65,53 @@
#define OMAP3430_VPVOLTAGE_MASK (0xff << 0)
/* PRM_VP1_STATUS, PRM_VP2_STATUS shared bits */
-#define OMAP3430_VPINIDLE (1 << 0)
+#define OMAP3430_VPINIDLE_MASK (1 << 0)
/* PM_WKDEP_IVA2, PM_WKDEP_MPU shared bits */
#define OMAP3430_EN_PER_SHIFT 7
#define OMAP3430_EN_PER_MASK (1 << 7)
/* PM_PWSTCTRL_IVA2, PM_PWSTCTRL_MPU, PM_PWSTCTRL_CORE shared bits */
-#define OMAP3430_MEMORYCHANGE (1 << 3)
+#define OMAP3430_MEMORYCHANGE_MASK (1 << 3)
/* PM_PWSTST_IVA2, PM_PWSTST_CORE shared bits */
-#define OMAP3430_LOGICSTATEST (1 << 2)
+#define OMAP3430_LOGICSTATEST_MASK (1 << 2)
/* PM_PREPWSTST_IVA2, PM_PREPWSTST_CORE shared bits */
-#define OMAP3430_LASTLOGICSTATEENTERED (1 << 2)
+#define OMAP3430_LASTLOGICSTATEENTERED_MASK (1 << 2)
/*
* PM_PREPWSTST_IVA2, PM_PREPWSTST_MPU, PM_PREPWSTST_CORE,
* PM_PREPWSTST_GFX, PM_PREPWSTST_DSS, PM_PREPWSTST_CAM,
* PM_PREPWSTST_PER, PM_PREPWSTST_NEON shared bits
*/
-#define OMAP3430_LASTPOWERSTATEENTERED_SHIFT 0
-#define OMAP3430_LASTPOWERSTATEENTERED_MASK (0x3 << 0)
+#define OMAP3430_LASTPOWERSTATEENTERED_SHIFT 0
+#define OMAP3430_LASTPOWERSTATEENTERED_MASK (0x3 << 0)
/* PRM_IRQSTATUS_IVA2, PRM_IRQSTATUS_MPU shared bits */
-#define OMAP3430_WKUP_ST (1 << 0)
+#define OMAP3430_WKUP_ST_MASK (1 << 0)
/* PRM_IRQENABLE_IVA2, PRM_IRQENABLE_MPU shared bits */
-#define OMAP3430_WKUP_EN (1 << 0)
+#define OMAP3430_WKUP_EN_MASK (1 << 0)
/* PM_MPUGRPSEL1_CORE, PM_IVA2GRPSEL1_CORE shared bits */
-#define OMAP3430_GRPSEL_MMC2 (1 << 25)
-#define OMAP3430_GRPSEL_MMC1 (1 << 24)
-#define OMAP3430_GRPSEL_MCSPI4 (1 << 21)
-#define OMAP3430_GRPSEL_MCSPI3 (1 << 20)
-#define OMAP3430_GRPSEL_MCSPI2 (1 << 19)
-#define OMAP3430_GRPSEL_MCSPI1 (1 << 18)
-#define OMAP3430_GRPSEL_I2C3 (1 << 17)
-#define OMAP3430_GRPSEL_I2C2 (1 << 16)
-#define OMAP3430_GRPSEL_I2C1 (1 << 15)
-#define OMAP3430_GRPSEL_UART2 (1 << 14)
-#define OMAP3430_GRPSEL_UART1 (1 << 13)
-#define OMAP3430_GRPSEL_GPT11 (1 << 12)
-#define OMAP3430_GRPSEL_GPT10 (1 << 11)
-#define OMAP3430_GRPSEL_MCBSP5 (1 << 10)
-#define OMAP3430_GRPSEL_MCBSP1 (1 << 9)
-#define OMAP3430_GRPSEL_HSOTGUSB (1 << 4)
-#define OMAP3430_GRPSEL_D2D (1 << 3)
+#define OMAP3430_GRPSEL_MMC2_MASK (1 << 25)
+#define OMAP3430_GRPSEL_MMC1_MASK (1 << 24)
+#define OMAP3430_GRPSEL_MCSPI4_MASK (1 << 21)
+#define OMAP3430_GRPSEL_MCSPI3_MASK (1 << 20)
+#define OMAP3430_GRPSEL_MCSPI2_MASK (1 << 19)
+#define OMAP3430_GRPSEL_MCSPI1_MASK (1 << 18)
+#define OMAP3430_GRPSEL_I2C3_MASK (1 << 17)
+#define OMAP3430_GRPSEL_I2C2_MASK (1 << 16)
+#define OMAP3430_GRPSEL_I2C1_MASK (1 << 15)
+#define OMAP3430_GRPSEL_UART2_MASK (1 << 14)
+#define OMAP3430_GRPSEL_UART1_MASK (1 << 13)
+#define OMAP3430_GRPSEL_GPT11_MASK (1 << 12)
+#define OMAP3430_GRPSEL_GPT10_MASK (1 << 11)
+#define OMAP3430_GRPSEL_MCBSP5_MASK (1 << 10)
+#define OMAP3430_GRPSEL_MCBSP1_MASK (1 << 9)
+#define OMAP3430_GRPSEL_HSOTGUSB_MASK (1 << 4)
+#define OMAP3430_GRPSEL_D2D_MASK (1 << 3)
/*
* PM_PWSTCTRL_GFX, PM_PWSTCTRL_DSS, PM_PWSTCTRL_CAM,
@@ -119,49 +119,49 @@
*/
#define OMAP3430_MEMONSTATE_SHIFT 16
#define OMAP3430_MEMONSTATE_MASK (0x3 << 16)
-#define OMAP3430_MEMRETSTATE (1 << 8)
+#define OMAP3430_MEMRETSTATE_MASK (1 << 8)
/* PM_MPUGRPSEL_PER, PM_IVA2GRPSEL_PER shared bits */
-#define OMAP3430_GRPSEL_GPIO6 (1 << 17)
-#define OMAP3430_GRPSEL_GPIO5 (1 << 16)
-#define OMAP3430_GRPSEL_GPIO4 (1 << 15)
-#define OMAP3430_GRPSEL_GPIO3 (1 << 14)
-#define OMAP3430_GRPSEL_GPIO2 (1 << 13)
-#define OMAP3430_GRPSEL_UART3 (1 << 11)
-#define OMAP3430_GRPSEL_GPT9 (1 << 10)
-#define OMAP3430_GRPSEL_GPT8 (1 << 9)
-#define OMAP3430_GRPSEL_GPT7 (1 << 8)
-#define OMAP3430_GRPSEL_GPT6 (1 << 7)
-#define OMAP3430_GRPSEL_GPT5 (1 << 6)
-#define OMAP3430_GRPSEL_GPT4 (1 << 5)
-#define OMAP3430_GRPSEL_GPT3 (1 << 4)
-#define OMAP3430_GRPSEL_GPT2 (1 << 3)
-#define OMAP3430_GRPSEL_MCBSP4 (1 << 2)
-#define OMAP3430_GRPSEL_MCBSP3 (1 << 1)
-#define OMAP3430_GRPSEL_MCBSP2 (1 << 0)
+#define OMAP3430_GRPSEL_GPIO6_MASK (1 << 17)
+#define OMAP3430_GRPSEL_GPIO5_MASK (1 << 16)
+#define OMAP3430_GRPSEL_GPIO4_MASK (1 << 15)
+#define OMAP3430_GRPSEL_GPIO3_MASK (1 << 14)
+#define OMAP3430_GRPSEL_GPIO2_MASK (1 << 13)
+#define OMAP3430_GRPSEL_UART3_MASK (1 << 11)
+#define OMAP3430_GRPSEL_GPT9_MASK (1 << 10)
+#define OMAP3430_GRPSEL_GPT8_MASK (1 << 9)
+#define OMAP3430_GRPSEL_GPT7_MASK (1 << 8)
+#define OMAP3430_GRPSEL_GPT6_MASK (1 << 7)
+#define OMAP3430_GRPSEL_GPT5_MASK (1 << 6)
+#define OMAP3430_GRPSEL_GPT4_MASK (1 << 5)
+#define OMAP3430_GRPSEL_GPT3_MASK (1 << 4)
+#define OMAP3430_GRPSEL_GPT2_MASK (1 << 3)
+#define OMAP3430_GRPSEL_MCBSP4_MASK (1 << 2)
+#define OMAP3430_GRPSEL_MCBSP3_MASK (1 << 1)
+#define OMAP3430_GRPSEL_MCBSP2_MASK (1 << 0)
/* PM_MPUGRPSEL_WKUP, PM_IVA2GRPSEL_WKUP shared bits */
-#define OMAP3430_GRPSEL_IO (1 << 8)
-#define OMAP3430_GRPSEL_SR2 (1 << 7)
-#define OMAP3430_GRPSEL_SR1 (1 << 6)
-#define OMAP3430_GRPSEL_GPIO1 (1 << 3)
-#define OMAP3430_GRPSEL_GPT12 (1 << 1)
-#define OMAP3430_GRPSEL_GPT1 (1 << 0)
+#define OMAP3430_GRPSEL_IO_MASK (1 << 8)
+#define OMAP3430_GRPSEL_SR2_MASK (1 << 7)
+#define OMAP3430_GRPSEL_SR1_MASK (1 << 6)
+#define OMAP3430_GRPSEL_GPIO1_MASK (1 << 3)
+#define OMAP3430_GRPSEL_GPT12_MASK (1 << 1)
+#define OMAP3430_GRPSEL_GPT1_MASK (1 << 0)
/* Bits specific to each register */
/* RM_RSTCTRL_IVA2 */
-#define OMAP3430_RST3_IVA2 (1 << 2)
-#define OMAP3430_RST2_IVA2 (1 << 1)
-#define OMAP3430_RST1_IVA2 (1 << 0)
+#define OMAP3430_RST3_IVA2_MASK (1 << 2)
+#define OMAP3430_RST2_IVA2_MASK (1 << 1)
+#define OMAP3430_RST1_IVA2_MASK (1 << 0)
/* RM_RSTST_IVA2 specific bits */
-#define OMAP3430_EMULATION_VSEQ_RST (1 << 13)
-#define OMAP3430_EMULATION_VHWA_RST (1 << 12)
-#define OMAP3430_EMULATION_IVA2_RST (1 << 11)
-#define OMAP3430_IVA2_SW_RST3 (1 << 10)
-#define OMAP3430_IVA2_SW_RST2 (1 << 9)
-#define OMAP3430_IVA2_SW_RST1 (1 << 8)
+#define OMAP3430_EMULATION_VSEQ_RST_MASK (1 << 13)
+#define OMAP3430_EMULATION_VHWA_RST_MASK (1 << 12)
+#define OMAP3430_EMULATION_IVA2_RST_MASK (1 << 11)
+#define OMAP3430_IVA2_SW_RST3_MASK (1 << 10)
+#define OMAP3430_IVA2_SW_RST2_MASK (1 << 9)
+#define OMAP3430_IVA2_SW_RST1_MASK (1 << 8)
/* PM_WKDEP_IVA2 specific bits */
@@ -174,10 +174,10 @@
#define OMAP3430_L1FLATMEMONSTATE_MASK (0x3 << 18)
#define OMAP3430_SHAREDL1CACHEFLATONSTATE_SHIFT 16
#define OMAP3430_SHAREDL1CACHEFLATONSTATE_MASK (0x3 << 16)
-#define OMAP3430_L2FLATMEMRETSTATE (1 << 11)
-#define OMAP3430_SHAREDL2CACHEFLATRETSTATE (1 << 10)
-#define OMAP3430_L1FLATMEMRETSTATE (1 << 9)
-#define OMAP3430_SHAREDL1CACHEFLATRETSTATE (1 << 8)
+#define OMAP3430_L2FLATMEMRETSTATE_MASK (1 << 11)
+#define OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK (1 << 10)
+#define OMAP3430_L1FLATMEMRETSTATE_MASK (1 << 9)
+#define OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK (1 << 8)
/* PM_PWSTST_IVA2 specific bits */
#define OMAP3430_L2FLATMEMSTATEST_SHIFT 10
@@ -200,12 +200,12 @@
#define OMAP3430_LASTSHAREDL1CACHEFLATSTATEENTERED_MASK (0x3 << 4)
/* PRM_IRQSTATUS_IVA2 specific bits */
-#define OMAP3430_PRM_IRQSTATUS_IVA2_IVA2_DPLL_ST (1 << 2)
-#define OMAP3430_FORCEWKUP_ST (1 << 1)
+#define OMAP3430_PRM_IRQSTATUS_IVA2_IVA2_DPLL_ST_MASK (1 << 2)
+#define OMAP3430_FORCEWKUP_ST_MASK (1 << 1)
/* PRM_IRQENABLE_IVA2 specific bits */
-#define OMAP3430_PRM_IRQENABLE_IVA2_IVA2_DPLL_RECAL_EN (1 << 2)
-#define OMAP3430_FORCEWKUP_EN (1 << 1)
+#define OMAP3430_PRM_IRQENABLE_IVA2_IVA2_DPLL_RECAL_EN_MASK (1 << 2)
+#define OMAP3430_FORCEWKUP_EN_MASK (1 << 1)
/* PRM_REVISION specific bits */
@@ -213,70 +213,70 @@
/* PRM_IRQSTATUS_MPU specific bits */
#define OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT 25
-#define OMAP3430ES2_SND_PERIPH_DPLL_ST (1 << 25)
-#define OMAP3430_VC_TIMEOUTERR_ST (1 << 24)
-#define OMAP3430_VC_RAERR_ST (1 << 23)
-#define OMAP3430_VC_SAERR_ST (1 << 22)
-#define OMAP3430_VP2_TRANXDONE_ST (1 << 21)
-#define OMAP3430_VP2_EQVALUE_ST (1 << 20)
-#define OMAP3430_VP2_NOSMPSACK_ST (1 << 19)
-#define OMAP3430_VP2_MAXVDD_ST (1 << 18)
-#define OMAP3430_VP2_MINVDD_ST (1 << 17)
-#define OMAP3430_VP2_OPPCHANGEDONE_ST (1 << 16)
-#define OMAP3430_VP1_TRANXDONE_ST (1 << 15)
-#define OMAP3430_VP1_EQVALUE_ST (1 << 14)
-#define OMAP3430_VP1_NOSMPSACK_ST (1 << 13)
-#define OMAP3430_VP1_MAXVDD_ST (1 << 12)
-#define OMAP3430_VP1_MINVDD_ST (1 << 11)
-#define OMAP3430_VP1_OPPCHANGEDONE_ST (1 << 10)
-#define OMAP3430_IO_ST (1 << 9)
-#define OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST (1 << 8)
+#define OMAP3430ES2_SND_PERIPH_DPLL_ST_MASK (1 << 25)
+#define OMAP3430_VC_TIMEOUTERR_ST_MASK (1 << 24)
+#define OMAP3430_VC_RAERR_ST_MASK (1 << 23)
+#define OMAP3430_VC_SAERR_ST_MASK (1 << 22)
+#define OMAP3430_VP2_TRANXDONE_ST_MASK (1 << 21)
+#define OMAP3430_VP2_EQVALUE_ST_MASK (1 << 20)
+#define OMAP3430_VP2_NOSMPSACK_ST_MASK (1 << 19)
+#define OMAP3430_VP2_MAXVDD_ST_MASK (1 << 18)
+#define OMAP3430_VP2_MINVDD_ST_MASK (1 << 17)
+#define OMAP3430_VP2_OPPCHANGEDONE_ST_MASK (1 << 16)
+#define OMAP3430_VP1_TRANXDONE_ST_MASK (1 << 15)
+#define OMAP3430_VP1_EQVALUE_ST_MASK (1 << 14)
+#define OMAP3430_VP1_NOSMPSACK_ST_MASK (1 << 13)
+#define OMAP3430_VP1_MAXVDD_ST_MASK (1 << 12)
+#define OMAP3430_VP1_MINVDD_ST_MASK (1 << 11)
+#define OMAP3430_VP1_OPPCHANGEDONE_ST_MASK (1 << 10)
+#define OMAP3430_IO_ST_MASK (1 << 9)
+#define OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_MASK (1 << 8)
#define OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT 8
-#define OMAP3430_MPU_DPLL_ST (1 << 7)
+#define OMAP3430_MPU_DPLL_ST_MASK (1 << 7)
#define OMAP3430_MPU_DPLL_ST_SHIFT 7
-#define OMAP3430_PERIPH_DPLL_ST (1 << 6)
+#define OMAP3430_PERIPH_DPLL_ST_MASK (1 << 6)
#define OMAP3430_PERIPH_DPLL_ST_SHIFT 6
-#define OMAP3430_CORE_DPLL_ST (1 << 5)
+#define OMAP3430_CORE_DPLL_ST_MASK (1 << 5)
#define OMAP3430_CORE_DPLL_ST_SHIFT 5
-#define OMAP3430_TRANSITION_ST (1 << 4)
-#define OMAP3430_EVGENOFF_ST (1 << 3)
-#define OMAP3430_EVGENON_ST (1 << 2)
-#define OMAP3430_FS_USB_WKUP_ST (1 << 1)
+#define OMAP3430_TRANSITION_ST_MASK (1 << 4)
+#define OMAP3430_EVGENOFF_ST_MASK (1 << 3)
+#define OMAP3430_EVGENON_ST_MASK (1 << 2)
+#define OMAP3430_FS_USB_WKUP_ST_MASK (1 << 1)
/* PRM_IRQENABLE_MPU specific bits */
#define OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT 25
-#define OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN (1 << 25)
-#define OMAP3430_VC_TIMEOUTERR_EN (1 << 24)
-#define OMAP3430_VC_RAERR_EN (1 << 23)
-#define OMAP3430_VC_SAERR_EN (1 << 22)
-#define OMAP3430_VP2_TRANXDONE_EN (1 << 21)
-#define OMAP3430_VP2_EQVALUE_EN (1 << 20)
-#define OMAP3430_VP2_NOSMPSACK_EN (1 << 19)
-#define OMAP3430_VP2_MAXVDD_EN (1 << 18)
-#define OMAP3430_VP2_MINVDD_EN (1 << 17)
-#define OMAP3430_VP2_OPPCHANGEDONE_EN (1 << 16)
-#define OMAP3430_VP1_TRANXDONE_EN (1 << 15)
-#define OMAP3430_VP1_EQVALUE_EN (1 << 14)
-#define OMAP3430_VP1_NOSMPSACK_EN (1 << 13)
-#define OMAP3430_VP1_MAXVDD_EN (1 << 12)
-#define OMAP3430_VP1_MINVDD_EN (1 << 11)
-#define OMAP3430_VP1_OPPCHANGEDONE_EN (1 << 10)
-#define OMAP3430_IO_EN (1 << 9)
-#define OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN (1 << 8)
+#define OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_MASK (1 << 25)
+#define OMAP3430_VC_TIMEOUTERR_EN_MASK (1 << 24)
+#define OMAP3430_VC_RAERR_EN_MASK (1 << 23)
+#define OMAP3430_VC_SAERR_EN_MASK (1 << 22)
+#define OMAP3430_VP2_TRANXDONE_EN_MASK (1 << 21)
+#define OMAP3430_VP2_EQVALUE_EN_MASK (1 << 20)
+#define OMAP3430_VP2_NOSMPSACK_EN_MASK (1 << 19)
+#define OMAP3430_VP2_MAXVDD_EN_MASK (1 << 18)
+#define OMAP3430_VP2_MINVDD_EN_MASK (1 << 17)
+#define OMAP3430_VP2_OPPCHANGEDONE_EN_MASK (1 << 16)
+#define OMAP3430_VP1_TRANXDONE_EN_MASK (1 << 15)
+#define OMAP3430_VP1_EQVALUE_EN_MASK (1 << 14)
+#define OMAP3430_VP1_NOSMPSACK_EN_MASK (1 << 13)
+#define OMAP3430_VP1_MAXVDD_EN_MASK (1 << 12)
+#define OMAP3430_VP1_MINVDD_EN_MASK (1 << 11)
+#define OMAP3430_VP1_OPPCHANGEDONE_EN_MASK (1 << 10)
+#define OMAP3430_IO_EN_MASK (1 << 9)
+#define OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_MASK (1 << 8)
#define OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT 8
-#define OMAP3430_MPU_DPLL_RECAL_EN (1 << 7)
+#define OMAP3430_MPU_DPLL_RECAL_EN_MASK (1 << 7)
#define OMAP3430_MPU_DPLL_RECAL_EN_SHIFT 7
-#define OMAP3430_PERIPH_DPLL_RECAL_EN (1 << 6)
+#define OMAP3430_PERIPH_DPLL_RECAL_EN_MASK (1 << 6)
#define OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT 6
-#define OMAP3430_CORE_DPLL_RECAL_EN (1 << 5)
+#define OMAP3430_CORE_DPLL_RECAL_EN_MASK (1 << 5)
#define OMAP3430_CORE_DPLL_RECAL_EN_SHIFT 5
-#define OMAP3430_TRANSITION_EN (1 << 4)
-#define OMAP3430_EVGENOFF_EN (1 << 3)
-#define OMAP3430_EVGENON_EN (1 << 2)
-#define OMAP3430_FS_USB_WKUP_EN (1 << 1)
+#define OMAP3430_TRANSITION_EN_MASK (1 << 4)
+#define OMAP3430_EVGENOFF_EN_MASK (1 << 3)
+#define OMAP3430_EVGENON_EN_MASK (1 << 2)
+#define OMAP3430_FS_USB_WKUP_EN_MASK (1 << 1)
/* RM_RSTST_MPU specific bits */
-#define OMAP3430_EMULATION_MPU_RST (1 << 11)
+#define OMAP3430_EMULATION_MPU_RST_MASK (1 << 11)
/* PM_WKDEP_MPU specific bits */
#define OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT 5
@@ -289,7 +289,7 @@
#define OMAP3430_OFFLOADMODE_MASK (0x3 << 3)
#define OMAP3430_ONLOADMODE_SHIFT 1
#define OMAP3430_ONLOADMODE_MASK (0x3 << 1)
-#define OMAP3430_ENABLE (1 << 0)
+#define OMAP3430_ENABLE_MASK (1 << 0)
/* PM_EVGENONTIM_MPU */
#define OMAP3430_ONTIMEVAL_SHIFT 0
@@ -302,32 +302,32 @@
/* PM_PWSTCTRL_MPU specific bits */
#define OMAP3430_L2CACHEONSTATE_SHIFT 16
#define OMAP3430_L2CACHEONSTATE_MASK (0x3 << 16)
-#define OMAP3430_L2CACHERETSTATE (1 << 8)
-#define OMAP3430_LOGICL1CACHERETSTATE (1 << 2)
+#define OMAP3430_L2CACHERETSTATE_MASK (1 << 8)
+#define OMAP3430_LOGICL1CACHERETSTATE_MASK (1 << 2)
/* PM_PWSTST_MPU specific bits */
#define OMAP3430_L2CACHESTATEST_SHIFT 6
#define OMAP3430_L2CACHESTATEST_MASK (0x3 << 6)
-#define OMAP3430_LOGICL1CACHESTATEST (1 << 2)
+#define OMAP3430_LOGICL1CACHESTATEST_MASK (1 << 2)
/* PM_PREPWSTST_MPU specific bits */
#define OMAP3430_LASTL2CACHESTATEENTERED_SHIFT 6
#define OMAP3430_LASTL2CACHESTATEENTERED_MASK (0x3 << 6)
-#define OMAP3430_LASTLOGICL1CACHESTATEENTERED (1 << 2)
+#define OMAP3430_LASTLOGICL1CACHESTATEENTERED_MASK (1 << 2)
/* RM_RSTCTRL_CORE */
-#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON (1 << 1)
-#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST (1 << 0)
+#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK (1 << 1)
+#define OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK (1 << 0)
/* RM_RSTST_CORE specific bits */
-#define OMAP3430_MODEM_SECURITY_VIOL_RST (1 << 10)
-#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RSTPWRON (1 << 9)
-#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RST (1 << 8)
+#define OMAP3430_MODEM_SECURITY_VIOL_RST_MASK (1 << 10)
+#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RSTPWRON_MASK (1 << 9)
+#define OMAP3430_RM_RSTST_CORE_MODEM_SW_RST_MASK (1 << 8)
/* PM_WKEN1_CORE specific bits */
/* PM_MPUGRPSEL1_CORE specific bits */
-#define OMAP3430_GRPSEL_FSHOSTUSB (1 << 5)
+#define OMAP3430_GRPSEL_FSHOSTUSB_MASK (1 << 5)
/* PM_IVA2GRPSEL1_CORE specific bits */
@@ -338,8 +338,8 @@
#define OMAP3430_MEM2ONSTATE_MASK (0x3 << 18)
#define OMAP3430_MEM1ONSTATE_SHIFT 16
#define OMAP3430_MEM1ONSTATE_MASK (0x3 << 16)
-#define OMAP3430_MEM2RETSTATE (1 << 9)
-#define OMAP3430_MEM1RETSTATE (1 << 8)
+#define OMAP3430_MEM2RETSTATE_MASK (1 << 9)
+#define OMAP3430_MEM1RETSTATE_MASK (1 << 8)
/* PM_PWSTST_CORE specific bits */
#define OMAP3430_MEM2STATEST_SHIFT 6
@@ -356,7 +356,7 @@
/* RM_RSTST_GFX specific bits */
/* PM_WKDEP_GFX specific bits */
-#define OMAP3430_PM_WKDEP_GFX_EN_IVA2 (1 << 2)
+#define OMAP3430_PM_WKDEP_GFX_EN_IVA2_MASK (1 << 2)
/* PM_PWSTCTRL_GFX specific bits */
@@ -365,33 +365,33 @@
/* PM_PREPWSTST_GFX specific bits */
/* PM_WKEN_WKUP specific bits */
-#define OMAP3430_EN_IO_CHAIN (1 << 16)
-#define OMAP3430_EN_IO (1 << 8)
-#define OMAP3430_EN_GPIO1 (1 << 3)
+#define OMAP3430_EN_IO_CHAIN_MASK (1 << 16)
+#define OMAP3430_EN_IO_MASK (1 << 8)
+#define OMAP3430_EN_GPIO1_MASK (1 << 3)
/* PM_MPUGRPSEL_WKUP specific bits */
/* PM_IVA2GRPSEL_WKUP specific bits */
/* PM_WKST_WKUP specific bits */
-#define OMAP3430_ST_IO_CHAIN (1 << 16)
-#define OMAP3430_ST_IO (1 << 8)
+#define OMAP3430_ST_IO_CHAIN_MASK (1 << 16)
+#define OMAP3430_ST_IO_MASK (1 << 8)
/* PRM_CLKSEL */
#define OMAP3430_SYS_CLKIN_SEL_SHIFT 0
#define OMAP3430_SYS_CLKIN_SEL_MASK (0x7 << 0)
/* PRM_CLKOUT_CTRL */
-#define OMAP3430_CLKOUT_EN (1 << 7)
+#define OMAP3430_CLKOUT_EN_MASK (1 << 7)
#define OMAP3430_CLKOUT_EN_SHIFT 7
/* RM_RSTST_DSS specific bits */
/* PM_WKEN_DSS */
-#define OMAP3430_PM_WKEN_DSS_EN_DSS (1 << 0)
+#define OMAP3430_PM_WKEN_DSS_EN_DSS_MASK (1 << 0)
/* PM_WKDEP_DSS specific bits */
-#define OMAP3430_PM_WKDEP_DSS_EN_IVA2 (1 << 2)
+#define OMAP3430_PM_WKDEP_DSS_EN_IVA2_MASK (1 << 2)
/* PM_PWSTCTRL_DSS specific bits */
@@ -402,7 +402,7 @@
/* RM_RSTST_CAM specific bits */
/* PM_WKDEP_CAM specific bits */
-#define OMAP3430_PM_WKDEP_CAM_EN_IVA2 (1 << 2)
+#define OMAP3430_PM_WKDEP_CAM_EN_IVA2_MASK (1 << 2)
/* PM_PWSTCTRL_CAM specific bits */
@@ -424,7 +424,7 @@
/* PM_WKST_PER specific bits */
/* PM_WKDEP_PER specific bits */
-#define OMAP3430_PM_WKDEP_PER_EN_IVA2 (1 << 2)
+#define OMAP3430_PM_WKDEP_PER_EN_IVA2_MASK (1 << 2)
/* PM_PWSTCTRL_PER specific bits */
@@ -467,26 +467,26 @@
/* PRM_VC_CMD_VAL_1 specific bits */
/* PRM_VC_CH_CONF */
-#define OMAP3430_CMD1 (1 << 20)
-#define OMAP3430_RACEN1 (1 << 19)
-#define OMAP3430_RAC1 (1 << 18)
-#define OMAP3430_RAV1 (1 << 17)
-#define OMAP3430_PRM_VC_CH_CONF_SA1 (1 << 16)
-#define OMAP3430_CMD0 (1 << 4)
-#define OMAP3430_RACEN0 (1 << 3)
-#define OMAP3430_RAC0 (1 << 2)
-#define OMAP3430_RAV0 (1 << 1)
-#define OMAP3430_PRM_VC_CH_CONF_SA0 (1 << 0)
+#define OMAP3430_CMD1_MASK (1 << 20)
+#define OMAP3430_RACEN1_MASK (1 << 19)
+#define OMAP3430_RAC1_MASK (1 << 18)
+#define OMAP3430_RAV1_MASK (1 << 17)
+#define OMAP3430_PRM_VC_CH_CONF_SA1_MASK (1 << 16)
+#define OMAP3430_CMD0_MASK (1 << 4)
+#define OMAP3430_RACEN0_MASK (1 << 3)
+#define OMAP3430_RAC0_MASK (1 << 2)
+#define OMAP3430_RAV0_MASK (1 << 1)
+#define OMAP3430_PRM_VC_CH_CONF_SA0_MASK (1 << 0)
/* PRM_VC_I2C_CFG */
-#define OMAP3430_HSMASTER (1 << 5)
-#define OMAP3430_SREN (1 << 4)
-#define OMAP3430_HSEN (1 << 3)
+#define OMAP3430_HSMASTER_MASK (1 << 5)
+#define OMAP3430_SREN_MASK (1 << 4)
+#define OMAP3430_HSEN_MASK (1 << 3)
#define OMAP3430_MCODE_SHIFT 0
#define OMAP3430_MCODE_MASK (0x7 << 0)
/* PRM_VC_BYPASS_VAL */
-#define OMAP3430_VALID (1 << 24)
+#define OMAP3430_VALID_MASK (1 << 24)
#define OMAP3430_DATA_SHIFT 16
#define OMAP3430_DATA_MASK (0xff << 16)
#define OMAP3430_REGADDR_SHIFT 8
@@ -495,8 +495,8 @@
#define OMAP3430_SLAVEADDR_MASK (0x7f << 0)
/* PRM_RSTCTRL */
-#define OMAP3430_RST_DPLL3 (1 << 2)
-#define OMAP3430_RST_GS (1 << 1)
+#define OMAP3430_RST_DPLL3_MASK (1 << 2)
+#define OMAP3430_RST_GS_MASK (1 << 1)
/* PRM_RSTTIME */
#define OMAP3430_RSTTIME2_SHIFT 8
@@ -505,23 +505,23 @@
#define OMAP3430_RSTTIME1_MASK (0xff << 0)
/* PRM_RSTST */
-#define OMAP3430_ICECRUSHER_RST (1 << 10)
-#define OMAP3430_ICEPICK_RST (1 << 9)
-#define OMAP3430_VDD2_VOLTAGE_MANAGER_RST (1 << 8)
-#define OMAP3430_VDD1_VOLTAGE_MANAGER_RST (1 << 7)
-#define OMAP3430_EXTERNAL_WARM_RST (1 << 6)
-#define OMAP3430_SECURE_WD_RST (1 << 5)
-#define OMAP3430_MPU_WD_RST (1 << 4)
-#define OMAP3430_SECURITY_VIOL_RST (1 << 3)
-#define OMAP3430_GLOBAL_SW_RST (1 << 1)
-#define OMAP3430_GLOBAL_COLD_RST (1 << 0)
+#define OMAP3430_ICECRUSHER_RST_MASK (1 << 10)
+#define OMAP3430_ICEPICK_RST_MASK (1 << 9)
+#define OMAP3430_VDD2_VOLTAGE_MANAGER_RST_MASK (1 << 8)
+#define OMAP3430_VDD1_VOLTAGE_MANAGER_RST_MASK (1 << 7)
+#define OMAP3430_EXTERNAL_WARM_RST_MASK (1 << 6)
+#define OMAP3430_SECURE_WD_RST_MASK (1 << 5)
+#define OMAP3430_MPU_WD_RST_MASK (1 << 4)
+#define OMAP3430_SECURITY_VIOL_RST_MASK (1 << 3)
+#define OMAP3430_GLOBAL_SW_RST_MASK (1 << 1)
+#define OMAP3430_GLOBAL_COLD_RST_MASK (1 << 0)
/* PRM_VOLTCTRL */
-#define OMAP3430_SEL_VMODE (1 << 4)
-#define OMAP3430_SEL_OFF (1 << 3)
-#define OMAP3430_AUTO_OFF (1 << 2)
-#define OMAP3430_AUTO_RET (1 << 1)
-#define OMAP3430_AUTO_SLEEP (1 << 0)
+#define OMAP3430_SEL_VMODE_MASK (1 << 4)
+#define OMAP3430_SEL_OFF_MASK (1 << 3)
+#define OMAP3430_AUTO_OFF_MASK (1 << 2)
+#define OMAP3430_AUTO_RET_MASK (1 << 1)
+#define OMAP3430_AUTO_SLEEP_MASK (1 << 0)
/* PRM_SRAM_PCHARGE */
#define OMAP3430_PCHARGE_TIME_SHIFT 0
@@ -550,10 +550,10 @@
#define OMAP3430_SETUP_TIME_MASK (0xffff << 0)
/* PRM_POLCTRL */
-#define OMAP3430_OFFMODE_POL (1 << 3)
-#define OMAP3430_CLKOUT_POL (1 << 2)
-#define OMAP3430_CLKREQ_POL (1 << 1)
-#define OMAP3430_EXTVOL_POL (1 << 0)
+#define OMAP3430_OFFMODE_POL_MASK (1 << 3)
+#define OMAP3430_CLKOUT_POL_MASK (1 << 2)
+#define OMAP3430_CLKREQ_POL_MASK (1 << 1)
+#define OMAP3430_EXTVOL_POL_MASK (1 << 0)
/* PRM_VOLTSETUP2 */
#define OMAP3430_OFFMODESETUPTIME_SHIFT 0
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index 5fba2aa8932c..588873b9303a 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -24,8 +24,8 @@
OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg))
#define OMAP44XX_PRM_REGADDR(module, reg) \
OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg))
-#define OMAP44XX_CHIRONSS_REGADDR(module, reg) \
- OMAP2_L4_IO_ADDRESS(OMAP4430_CHIRONSS_BASE + (module) + (reg))
+#define OMAP44XX_PRCM_MPU_REGADDR(module, reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE + (module) + (reg))
#include "prm44xx.h"
@@ -284,7 +284,7 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
#define OMAP_OFFLOADMODE_MASK (0x3 << 3)
#define OMAP_ONLOADMODE_SHIFT 1
#define OMAP_ONLOADMODE_MASK (0x3 << 1)
-#define OMAP_ENABLE (1 << 0)
+#define OMAP_ENABLE_MASK (1 << 0)
/* PRM_RSTTIME */
/* Named RM_RSTTIME_WKUP on the 24xx */
@@ -296,8 +296,8 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
/* PRM_RSTCTRL */
/* Named RM_RSTCTRL_WKUP on the 24xx */
/* 2420 calls RST_DPLL3 'RST_DPLL' */
-#define OMAP_RST_DPLL3 (1 << 2)
-#define OMAP_RST_GS (1 << 1)
+#define OMAP_RST_DPLL3_MASK (1 << 2)
+#define OMAP_RST_GS_MASK (1 << 1)
/*
@@ -316,7 +316,7 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
* PM_PWSTST_DSS, PM_PWSTST_CAM, PM_PWSTST_PER, PM_PWSTST_EMU,
* PM_PWSTST_NEON
*/
-#define OMAP_INTRANSITION (1 << 20)
+#define OMAP_INTRANSITION_MASK (1 << 20)
/*
@@ -338,7 +338,7 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
* 3430: RM_RSTST_IVA2, RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSS,
* RM_RSTST_CAM, RM_RSTST_PER, RM_RSTST_NEON
*/
-#define OMAP_COREDOMAINWKUP_RST (1 << 3)
+#define OMAP_COREDOMAINWKUP_RST_MASK (1 << 3)
/*
* 24XX: RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSP
@@ -347,7 +347,7 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
*
* 3430: RM_RSTST_CORE, RM_RSTST_EMU
*/
-#define OMAP_DOMAINWKUP_RST (1 << 2)
+#define OMAP_DOMAINWKUP_RST_MASK (1 << 2)
/*
* 24XX: RM_RSTST_MPU, RM_RSTST_WKUP, RM_RSTST_DSP
@@ -357,8 +357,8 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
*
* 3430: RM_RSTST_CORE, RM_RSTST_EMU
*/
-#define OMAP_GLOBALWARM_RST (1 << 1)
-#define OMAP_GLOBALCOLD_RST (1 << 0)
+#define OMAP_GLOBALWARM_RST_MASK (1 << 1)
+#define OMAP_GLOBALCOLD_RST_MASK (1 << 0)
/*
* 24XX: PM_WKDEP_GFX, PM_WKDEP_MPU, PM_WKDEP_CORE, PM_WKDEP_DSP
@@ -382,7 +382,7 @@ static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
* PM_PWSTCTRL_DSS, PM_PWSTCTRL_CAM, PM_PWSTCTRL_PER,
* PM_PWSTCTRL_NEON
*/
-#define OMAP_LOGICRETSTATE (1 << 2)
+#define OMAP_LOGICRETSTATE_MASK (1 << 2)
/*
* 24XX: PM_PWSTCTRL_MPU, PM_PWSTCTRL_CORE, PM_PWSTCTRL_GFX,
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index adb2558bb121..fe8ef26431e5 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -1,8 +1,8 @@
/*
* OMAP44xx PRM instance offset macros
*
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
* Rajendra Nayak (rnayak@ti.com)
@@ -25,387 +25,726 @@
/* PRM */
-
/* PRM.OCP_SOCKET_PRM register offsets */
+#define OMAP4_REVISION_PRM_OFFSET 0x0000
#define OMAP4430_REVISION_PRM OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4_PRM_IRQSTATUS_MPU_OFFSET 0x0010
#define OMAP4430_PRM_IRQSTATUS_MPU OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0010)
+#define OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET 0x0014
#define OMAP4430_PRM_IRQSTATUS_MPU_2 OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0014)
+#define OMAP4_PRM_IRQENABLE_MPU_OFFSET 0x0018
#define OMAP4430_PRM_IRQENABLE_MPU OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0018)
+#define OMAP4_PRM_IRQENABLE_MPU_2_OFFSET 0x001c
#define OMAP4430_PRM_IRQENABLE_MPU_2 OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x001c)
+#define OMAP4_PRM_IRQSTATUS_DUCATI_OFFSET 0x0020
#define OMAP4430_PRM_IRQSTATUS_DUCATI OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0020)
+#define OMAP4_PRM_IRQENABLE_DUCATI_OFFSET 0x0028
#define OMAP4430_PRM_IRQENABLE_DUCATI OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0028)
+#define OMAP4_PRM_IRQSTATUS_TESLA_OFFSET 0x0030
#define OMAP4430_PRM_IRQSTATUS_TESLA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0030)
+#define OMAP4_PRM_IRQENABLE_TESLA_OFFSET 0x0038
#define OMAP4430_PRM_IRQENABLE_TESLA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0038)
+#define OMAP4_PRM_PRM_PROFILING_CLKCTRL_OFFSET 0x0040
#define OMAP4430_PRM_PRM_PROFILING_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0040)
/* PRM.CKGEN_PRM register offsets */
+#define OMAP4_CM_ABE_DSS_SYS_CLKSEL_OFFSET 0x0000
#define OMAP4430_CM_ABE_DSS_SYS_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0000)
+#define OMAP4_CM_DPLL_SYS_REF_CLKSEL_OFFSET 0x0004
#define OMAP4430_CM_DPLL_SYS_REF_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0004)
+#define OMAP4_CM_L4_WKUP_CLKSEL_OFFSET 0x0008
#define OMAP4430_CM_L4_WKUP_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0008)
+#define OMAP4_CM_ABE_PLL_REF_CLKSEL_OFFSET 0x000c
#define OMAP4430_CM_ABE_PLL_REF_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x000c)
+#define OMAP4_CM_SYS_CLKSEL_OFFSET 0x0010
#define OMAP4430_CM_SYS_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0010)
/* PRM.MPU_PRM register offsets */
+#define OMAP4_PM_MPU_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_MPU_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0000)
+#define OMAP4_PM_MPU_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_MPU_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0004)
+#define OMAP4_RM_MPU_RSTST_OFFSET 0x0014
#define OMAP4430_RM_MPU_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0014)
+#define OMAP4_RM_MPU_MPU_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_MPU_MPU_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0024)
/* PRM.TESLA_PRM register offsets */
+#define OMAP4_PM_TESLA_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_TESLA_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0000)
+#define OMAP4_PM_TESLA_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_TESLA_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0004)
+#define OMAP4_RM_TESLA_RSTCTRL_OFFSET 0x0010
#define OMAP4430_RM_TESLA_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0010)
+#define OMAP4_RM_TESLA_RSTST_OFFSET 0x0014
#define OMAP4430_RM_TESLA_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0014)
+#define OMAP4_RM_TESLA_TESLA_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_TESLA_TESLA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0024)
/* PRM.ABE_PRM register offsets */
+#define OMAP4_PM_ABE_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_ABE_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0000)
+#define OMAP4_PM_ABE_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_ABE_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0004)
+#define OMAP4_RM_ABE_AESS_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_ABE_AESS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x002c)
+#define OMAP4_PM_ABE_PDM_WKDEP_OFFSET 0x0030
#define OMAP4430_PM_ABE_PDM_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0030)
+#define OMAP4_RM_ABE_PDM_CONTEXT_OFFSET 0x0034
#define OMAP4430_RM_ABE_PDM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0034)
+#define OMAP4_PM_ABE_DMIC_WKDEP_OFFSET 0x0038
#define OMAP4430_PM_ABE_DMIC_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0038)
+#define OMAP4_RM_ABE_DMIC_CONTEXT_OFFSET 0x003c
#define OMAP4430_RM_ABE_DMIC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x003c)
+#define OMAP4_PM_ABE_MCASP_WKDEP_OFFSET 0x0040
#define OMAP4430_PM_ABE_MCASP_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0040)
+#define OMAP4_RM_ABE_MCASP_CONTEXT_OFFSET 0x0044
#define OMAP4430_RM_ABE_MCASP_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0044)
+#define OMAP4_PM_ABE_MCBSP1_WKDEP_OFFSET 0x0048
#define OMAP4430_PM_ABE_MCBSP1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0048)
+#define OMAP4_RM_ABE_MCBSP1_CONTEXT_OFFSET 0x004c
#define OMAP4430_RM_ABE_MCBSP1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x004c)
+#define OMAP4_PM_ABE_MCBSP2_WKDEP_OFFSET 0x0050
#define OMAP4430_PM_ABE_MCBSP2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0050)
+#define OMAP4_RM_ABE_MCBSP2_CONTEXT_OFFSET 0x0054
#define OMAP4430_RM_ABE_MCBSP2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0054)
+#define OMAP4_PM_ABE_MCBSP3_WKDEP_OFFSET 0x0058
#define OMAP4430_PM_ABE_MCBSP3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0058)
+#define OMAP4_RM_ABE_MCBSP3_CONTEXT_OFFSET 0x005c
#define OMAP4430_RM_ABE_MCBSP3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x005c)
+#define OMAP4_PM_ABE_SLIMBUS_WKDEP_OFFSET 0x0060
#define OMAP4430_PM_ABE_SLIMBUS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0060)
+#define OMAP4_RM_ABE_SLIMBUS_CONTEXT_OFFSET 0x0064
#define OMAP4430_RM_ABE_SLIMBUS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0064)
+#define OMAP4_PM_ABE_TIMER5_WKDEP_OFFSET 0x0068
#define OMAP4430_PM_ABE_TIMER5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0068)
+#define OMAP4_RM_ABE_TIMER5_CONTEXT_OFFSET 0x006c
#define OMAP4430_RM_ABE_TIMER5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x006c)
+#define OMAP4_PM_ABE_TIMER6_WKDEP_OFFSET 0x0070
#define OMAP4430_PM_ABE_TIMER6_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0070)
+#define OMAP4_RM_ABE_TIMER6_CONTEXT_OFFSET 0x0074
#define OMAP4430_RM_ABE_TIMER6_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0074)
+#define OMAP4_PM_ABE_TIMER7_WKDEP_OFFSET 0x0078
#define OMAP4430_PM_ABE_TIMER7_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0078)
+#define OMAP4_RM_ABE_TIMER7_CONTEXT_OFFSET 0x007c
#define OMAP4430_RM_ABE_TIMER7_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x007c)
+#define OMAP4_PM_ABE_TIMER8_WKDEP_OFFSET 0x0080
#define OMAP4430_PM_ABE_TIMER8_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0080)
+#define OMAP4_RM_ABE_TIMER8_CONTEXT_OFFSET 0x0084
#define OMAP4430_RM_ABE_TIMER8_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0084)
+#define OMAP4_PM_ABE_WDT3_WKDEP_OFFSET 0x0088
#define OMAP4430_PM_ABE_WDT3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0088)
+#define OMAP4_RM_ABE_WDT3_CONTEXT_OFFSET 0x008c
#define OMAP4430_RM_ABE_WDT3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x008c)
/* PRM.ALWAYS_ON_PRM register offsets */
+#define OMAP4_RM_ALWON_MDMINTC_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_ALWON_MDMINTC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0024)
+#define OMAP4_PM_ALWON_SR_MPU_WKDEP_OFFSET 0x0028
#define OMAP4430_PM_ALWON_SR_MPU_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0028)
+#define OMAP4_RM_ALWON_SR_MPU_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_ALWON_SR_MPU_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x002c)
+#define OMAP4_PM_ALWON_SR_IVA_WKDEP_OFFSET 0x0030
#define OMAP4430_PM_ALWON_SR_IVA_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0030)
+#define OMAP4_RM_ALWON_SR_IVA_CONTEXT_OFFSET 0x0034
#define OMAP4430_RM_ALWON_SR_IVA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0034)
+#define OMAP4_PM_ALWON_SR_CORE_WKDEP_OFFSET 0x0038
#define OMAP4430_PM_ALWON_SR_CORE_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0038)
+#define OMAP4_RM_ALWON_SR_CORE_CONTEXT_OFFSET 0x003c
#define OMAP4430_RM_ALWON_SR_CORE_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x003c)
/* PRM.CORE_PRM register offsets */
+#define OMAP4_PM_CORE_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_CORE_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0000)
+#define OMAP4_PM_CORE_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_CORE_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0004)
+#define OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_L3_1_L3_1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0024)
+#define OMAP4_RM_L3_2_L3_2_CONTEXT_OFFSET 0x0124
#define OMAP4430_RM_L3_2_L3_2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0124)
+#define OMAP4_RM_L3_2_GPMC_CONTEXT_OFFSET 0x012c
#define OMAP4430_RM_L3_2_GPMC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x012c)
+#define OMAP4_RM_L3_2_OCMC_RAM_CONTEXT_OFFSET 0x0134
#define OMAP4430_RM_L3_2_OCMC_RAM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0134)
+#define OMAP4_RM_DUCATI_RSTCTRL_OFFSET 0x0210
#define OMAP4430_RM_DUCATI_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0210)
+#define OMAP4_RM_DUCATI_RSTST_OFFSET 0x0214
#define OMAP4430_RM_DUCATI_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0214)
+#define OMAP4_RM_DUCATI_DUCATI_CONTEXT_OFFSET 0x0224
#define OMAP4430_RM_DUCATI_DUCATI_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0224)
+#define OMAP4_RM_SDMA_SDMA_CONTEXT_OFFSET 0x0324
#define OMAP4430_RM_SDMA_SDMA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0324)
+#define OMAP4_RM_MEMIF_DMM_CONTEXT_OFFSET 0x0424
#define OMAP4430_RM_MEMIF_DMM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0424)
+#define OMAP4_RM_MEMIF_EMIF_FW_CONTEXT_OFFSET 0x042c
#define OMAP4430_RM_MEMIF_EMIF_FW_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x042c)
+#define OMAP4_RM_MEMIF_EMIF_1_CONTEXT_OFFSET 0x0434
#define OMAP4430_RM_MEMIF_EMIF_1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0434)
+#define OMAP4_RM_MEMIF_EMIF_2_CONTEXT_OFFSET 0x043c
#define OMAP4430_RM_MEMIF_EMIF_2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x043c)
+#define OMAP4_RM_MEMIF_DLL_CONTEXT_OFFSET 0x0444
#define OMAP4430_RM_MEMIF_DLL_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0444)
+#define OMAP4_RM_MEMIF_EMIF_H1_CONTEXT_OFFSET 0x0454
#define OMAP4430_RM_MEMIF_EMIF_H1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0454)
+#define OMAP4_RM_MEMIF_EMIF_H2_CONTEXT_OFFSET 0x045c
#define OMAP4430_RM_MEMIF_EMIF_H2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x045c)
+#define OMAP4_RM_MEMIF_DLL_H_CONTEXT_OFFSET 0x0464
#define OMAP4430_RM_MEMIF_DLL_H_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0464)
+#define OMAP4_RM_D2D_SAD2D_CONTEXT_OFFSET 0x0524
#define OMAP4430_RM_D2D_SAD2D_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0524)
+#define OMAP4_RM_D2D_MODEM_ICR_CONTEXT_OFFSET 0x052c
#define OMAP4430_RM_D2D_MODEM_ICR_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x052c)
+#define OMAP4_RM_D2D_SAD2D_FW_CONTEXT_OFFSET 0x0534
#define OMAP4430_RM_D2D_SAD2D_FW_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0534)
+#define OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET 0x0624
#define OMAP4430_RM_L4CFG_L4_CFG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0624)
+#define OMAP4_RM_L4CFG_HW_SEM_CONTEXT_OFFSET 0x062c
#define OMAP4430_RM_L4CFG_HW_SEM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x062c)
+#define OMAP4_RM_L4CFG_MAILBOX_CONTEXT_OFFSET 0x0634
#define OMAP4430_RM_L4CFG_MAILBOX_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0634)
+#define OMAP4_RM_L4CFG_SAR_ROM_CONTEXT_OFFSET 0x063c
#define OMAP4430_RM_L4CFG_SAR_ROM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x063c)
+#define OMAP4_RM_L3INSTR_L3_3_CONTEXT_OFFSET 0x0724
#define OMAP4430_RM_L3INSTR_L3_3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0724)
+#define OMAP4_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET 0x072c
#define OMAP4430_RM_L3INSTR_L3_INSTR_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x072c)
+#define OMAP4_RM_L3INSTR_OCP_WP1_CONTEXT_OFFSET 0x0744
#define OMAP4430_RM_L3INSTR_OCP_WP1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0744)
/* PRM.IVAHD_PRM register offsets */
+#define OMAP4_PM_IVAHD_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_IVAHD_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0000)
+#define OMAP4_PM_IVAHD_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_IVAHD_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0004)
+#define OMAP4_RM_IVAHD_RSTCTRL_OFFSET 0x0010
#define OMAP4430_RM_IVAHD_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0010)
+#define OMAP4_RM_IVAHD_RSTST_OFFSET 0x0014
#define OMAP4430_RM_IVAHD_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0014)
+#define OMAP4_RM_IVAHD_IVAHD_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_IVAHD_IVAHD_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0024)
+#define OMAP4_RM_IVAHD_SL2_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_IVAHD_SL2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x002c)
/* PRM.CAM_PRM register offsets */
+#define OMAP4_PM_CAM_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_CAM_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0000)
+#define OMAP4_PM_CAM_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_CAM_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0004)
+#define OMAP4_RM_CAM_ISS_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_CAM_ISS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0024)
+#define OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_CAM_FDIF_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x002c)
/* PRM.DSS_PRM register offsets */
+#define OMAP4_PM_DSS_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_DSS_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0000)
+#define OMAP4_PM_DSS_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_DSS_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0004)
+#define OMAP4_PM_DSS_DSS_WKDEP_OFFSET 0x0020
#define OMAP4430_PM_DSS_DSS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0020)
+#define OMAP4_RM_DSS_DSS_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_DSS_DSS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0024)
+#define OMAP4_RM_DSS_DEISS_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_DSS_DEISS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x002c)
/* PRM.GFX_PRM register offsets */
+#define OMAP4_PM_GFX_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_GFX_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0000)
+#define OMAP4_PM_GFX_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_GFX_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0004)
+#define OMAP4_RM_GFX_GFX_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_GFX_GFX_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0024)
/* PRM.L3INIT_PRM register offsets */
+#define OMAP4_PM_L3INIT_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_L3INIT_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0000)
+#define OMAP4_PM_L3INIT_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_L3INIT_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0004)
+#define OMAP4_PM_L3INIT_MMC1_WKDEP_OFFSET 0x0028
#define OMAP4430_PM_L3INIT_MMC1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0028)
+#define OMAP4_RM_L3INIT_MMC1_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_L3INIT_MMC1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x002c)
+#define OMAP4_PM_L3INIT_MMC2_WKDEP_OFFSET 0x0030
#define OMAP4430_PM_L3INIT_MMC2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0030)
+#define OMAP4_RM_L3INIT_MMC2_CONTEXT_OFFSET 0x0034
#define OMAP4430_RM_L3INIT_MMC2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0034)
+#define OMAP4_PM_L3INIT_HSI_WKDEP_OFFSET 0x0038
#define OMAP4430_PM_L3INIT_HSI_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0038)
+#define OMAP4_RM_L3INIT_HSI_CONTEXT_OFFSET 0x003c
#define OMAP4430_RM_L3INIT_HSI_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x003c)
+#define OMAP4_PM_L3INIT_UNIPRO1_WKDEP_OFFSET 0x0040
#define OMAP4430_PM_L3INIT_UNIPRO1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0040)
+#define OMAP4_RM_L3INIT_UNIPRO1_CONTEXT_OFFSET 0x0044
#define OMAP4430_RM_L3INIT_UNIPRO1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0044)
+#define OMAP4_PM_L3INIT_USB_HOST_WKDEP_OFFSET 0x0058
#define OMAP4430_PM_L3INIT_USB_HOST_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0058)
+#define OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET 0x005c
#define OMAP4430_RM_L3INIT_USB_HOST_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x005c)
+#define OMAP4_PM_L3INIT_USB_OTG_WKDEP_OFFSET 0x0060
#define OMAP4430_PM_L3INIT_USB_OTG_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0060)
+#define OMAP4_RM_L3INIT_USB_OTG_CONTEXT_OFFSET 0x0064
#define OMAP4430_RM_L3INIT_USB_OTG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0064)
+#define OMAP4_PM_L3INIT_USB_TLL_WKDEP_OFFSET 0x0068
#define OMAP4430_PM_L3INIT_USB_TLL_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0068)
+#define OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET 0x006c
#define OMAP4430_RM_L3INIT_USB_TLL_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x006c)
+#define OMAP4_RM_L3INIT_P1500_CONTEXT_OFFSET 0x007c
#define OMAP4430_RM_L3INIT_P1500_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x007c)
+#define OMAP4_RM_L3INIT_EMAC_CONTEXT_OFFSET 0x0084
#define OMAP4430_RM_L3INIT_EMAC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0084)
+#define OMAP4_PM_L3INIT_SATA_WKDEP_OFFSET 0x0088
#define OMAP4430_PM_L3INIT_SATA_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0088)
+#define OMAP4_RM_L3INIT_SATA_CONTEXT_OFFSET 0x008c
#define OMAP4430_RM_L3INIT_SATA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x008c)
+#define OMAP4_RM_L3INIT_TPPSS_CONTEXT_OFFSET 0x0094
#define OMAP4430_RM_L3INIT_TPPSS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0094)
+#define OMAP4_PM_L3INIT_PCIESS_WKDEP_OFFSET 0x0098
#define OMAP4430_PM_L3INIT_PCIESS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0098)
+#define OMAP4_RM_L3INIT_PCIESS_CONTEXT_OFFSET 0x009c
#define OMAP4430_RM_L3INIT_PCIESS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x009c)
+#define OMAP4_RM_L3INIT_CCPTX_CONTEXT_OFFSET 0x00ac
#define OMAP4430_RM_L3INIT_CCPTX_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00ac)
+#define OMAP4_PM_L3INIT_XHPI_WKDEP_OFFSET 0x00c0
#define OMAP4430_PM_L3INIT_XHPI_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c0)
+#define OMAP4_RM_L3INIT_XHPI_CONTEXT_OFFSET 0x00c4
#define OMAP4430_RM_L3INIT_XHPI_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c4)
+#define OMAP4_PM_L3INIT_MMC6_WKDEP_OFFSET 0x00c8
#define OMAP4430_PM_L3INIT_MMC6_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c8)
+#define OMAP4_RM_L3INIT_MMC6_CONTEXT_OFFSET 0x00cc
#define OMAP4430_RM_L3INIT_MMC6_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00cc)
+#define OMAP4_PM_L3INIT_USB_HOST_FS_WKDEP_OFFSET 0x00d0
#define OMAP4430_PM_L3INIT_USB_HOST_FS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d0)
+#define OMAP4_RM_L3INIT_USB_HOST_FS_CONTEXT_OFFSET 0x00d4
#define OMAP4430_RM_L3INIT_USB_HOST_FS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d4)
+#define OMAP4_RM_L3INIT_USBPHYOCP2SCP_CONTEXT_OFFSET 0x00e4
#define OMAP4430_RM_L3INIT_USBPHYOCP2SCP_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00e4)
/* PRM.L4PER_PRM register offsets */
+#define OMAP4_PM_L4PER_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_L4PER_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0000)
+#define OMAP4_PM_L4PER_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_L4PER_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0004)
+#define OMAP4_RM_L4PER_ADC_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_L4PER_ADC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0024)
+#define OMAP4_PM_L4PER_DMTIMER10_WKDEP_OFFSET 0x0028
#define OMAP4430_PM_L4PER_DMTIMER10_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0028)
+#define OMAP4_RM_L4PER_DMTIMER10_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_L4PER_DMTIMER10_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x002c)
+#define OMAP4_PM_L4PER_DMTIMER11_WKDEP_OFFSET 0x0030
#define OMAP4430_PM_L4PER_DMTIMER11_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0030)
+#define OMAP4_RM_L4PER_DMTIMER11_CONTEXT_OFFSET 0x0034
#define OMAP4430_RM_L4PER_DMTIMER11_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0034)
+#define OMAP4_PM_L4PER_DMTIMER2_WKDEP_OFFSET 0x0038
#define OMAP4430_PM_L4PER_DMTIMER2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0038)
+#define OMAP4_RM_L4PER_DMTIMER2_CONTEXT_OFFSET 0x003c
#define OMAP4430_RM_L4PER_DMTIMER2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x003c)
+#define OMAP4_PM_L4PER_DMTIMER3_WKDEP_OFFSET 0x0040
#define OMAP4430_PM_L4PER_DMTIMER3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0040)
+#define OMAP4_RM_L4PER_DMTIMER3_CONTEXT_OFFSET 0x0044
#define OMAP4430_RM_L4PER_DMTIMER3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0044)
+#define OMAP4_PM_L4PER_DMTIMER4_WKDEP_OFFSET 0x0048
#define OMAP4430_PM_L4PER_DMTIMER4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0048)
+#define OMAP4_RM_L4PER_DMTIMER4_CONTEXT_OFFSET 0x004c
#define OMAP4430_RM_L4PER_DMTIMER4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x004c)
+#define OMAP4_PM_L4PER_DMTIMER9_WKDEP_OFFSET 0x0050
#define OMAP4430_PM_L4PER_DMTIMER9_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0050)
+#define OMAP4_RM_L4PER_DMTIMER9_CONTEXT_OFFSET 0x0054
#define OMAP4430_RM_L4PER_DMTIMER9_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0054)
+#define OMAP4_RM_L4PER_ELM_CONTEXT_OFFSET 0x005c
#define OMAP4430_RM_L4PER_ELM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x005c)
+#define OMAP4_PM_L4PER_GPIO2_WKDEP_OFFSET 0x0060
#define OMAP4430_PM_L4PER_GPIO2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0060)
+#define OMAP4_RM_L4PER_GPIO2_CONTEXT_OFFSET 0x0064
#define OMAP4430_RM_L4PER_GPIO2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0064)
+#define OMAP4_PM_L4PER_GPIO3_WKDEP_OFFSET 0x0068
#define OMAP4430_PM_L4PER_GPIO3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0068)
+#define OMAP4_RM_L4PER_GPIO3_CONTEXT_OFFSET 0x006c
#define OMAP4430_RM_L4PER_GPIO3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x006c)
+#define OMAP4_PM_L4PER_GPIO4_WKDEP_OFFSET 0x0070
#define OMAP4430_PM_L4PER_GPIO4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0070)
+#define OMAP4_RM_L4PER_GPIO4_CONTEXT_OFFSET 0x0074
#define OMAP4430_RM_L4PER_GPIO4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0074)
+#define OMAP4_PM_L4PER_GPIO5_WKDEP_OFFSET 0x0078
#define OMAP4430_PM_L4PER_GPIO5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0078)
+#define OMAP4_RM_L4PER_GPIO5_CONTEXT_OFFSET 0x007c
#define OMAP4430_RM_L4PER_GPIO5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x007c)
+#define OMAP4_PM_L4PER_GPIO6_WKDEP_OFFSET 0x0080
#define OMAP4430_PM_L4PER_GPIO6_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0080)
+#define OMAP4_RM_L4PER_GPIO6_CONTEXT_OFFSET 0x0084
#define OMAP4430_RM_L4PER_GPIO6_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0084)
+#define OMAP4_RM_L4PER_HDQ1W_CONTEXT_OFFSET 0x008c
#define OMAP4430_RM_L4PER_HDQ1W_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x008c)
+#define OMAP4_PM_L4PER_HECC1_WKDEP_OFFSET 0x0090
#define OMAP4430_PM_L4PER_HECC1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0090)
+#define OMAP4_RM_L4PER_HECC1_CONTEXT_OFFSET 0x0094
#define OMAP4430_RM_L4PER_HECC1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0094)
+#define OMAP4_PM_L4PER_HECC2_WKDEP_OFFSET 0x0098
#define OMAP4430_PM_L4PER_HECC2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0098)
+#define OMAP4_RM_L4PER_HECC2_CONTEXT_OFFSET 0x009c
#define OMAP4430_RM_L4PER_HECC2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x009c)
+#define OMAP4_PM_L4PER_I2C1_WKDEP_OFFSET 0x00a0
#define OMAP4430_PM_L4PER_I2C1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a0)
+#define OMAP4_RM_L4PER_I2C1_CONTEXT_OFFSET 0x00a4
#define OMAP4430_RM_L4PER_I2C1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a4)
+#define OMAP4_PM_L4PER_I2C2_WKDEP_OFFSET 0x00a8
#define OMAP4430_PM_L4PER_I2C2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a8)
+#define OMAP4_RM_L4PER_I2C2_CONTEXT_OFFSET 0x00ac
#define OMAP4430_RM_L4PER_I2C2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ac)
+#define OMAP4_PM_L4PER_I2C3_WKDEP_OFFSET 0x00b0
#define OMAP4430_PM_L4PER_I2C3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b0)
+#define OMAP4_RM_L4PER_I2C3_CONTEXT_OFFSET 0x00b4
#define OMAP4430_RM_L4PER_I2C3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b4)
+#define OMAP4_PM_L4PER_I2C4_WKDEP_OFFSET 0x00b8
#define OMAP4430_PM_L4PER_I2C4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b8)
+#define OMAP4_RM_L4PER_I2C4_CONTEXT_OFFSET 0x00bc
#define OMAP4430_RM_L4PER_I2C4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00bc)
+#define OMAP4_RM_L4PER_L4_PER_CONTEXT_OFFSET 0x00c0
#define OMAP4430_RM_L4PER_L4_PER_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00c0)
+#define OMAP4_PM_L4PER_MCASP2_WKDEP_OFFSET 0x00d0
#define OMAP4430_PM_L4PER_MCASP2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d0)
+#define OMAP4_RM_L4PER_MCASP2_CONTEXT_OFFSET 0x00d4
#define OMAP4430_RM_L4PER_MCASP2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d4)
+#define OMAP4_PM_L4PER_MCASP3_WKDEP_OFFSET 0x00d8
#define OMAP4430_PM_L4PER_MCASP3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d8)
+#define OMAP4_RM_L4PER_MCASP3_CONTEXT_OFFSET 0x00dc
#define OMAP4430_RM_L4PER_MCASP3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00dc)
+#define OMAP4_PM_L4PER_MCBSP4_WKDEP_OFFSET 0x00e0
#define OMAP4430_PM_L4PER_MCBSP4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e0)
+#define OMAP4_RM_L4PER_MCBSP4_CONTEXT_OFFSET 0x00e4
#define OMAP4430_RM_L4PER_MCBSP4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e4)
+#define OMAP4_RM_L4PER_MGATE_CONTEXT_OFFSET 0x00ec
#define OMAP4430_RM_L4PER_MGATE_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ec)
+#define OMAP4_PM_L4PER_MCSPI1_WKDEP_OFFSET 0x00f0
#define OMAP4430_PM_L4PER_MCSPI1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f0)
+#define OMAP4_RM_L4PER_MCSPI1_CONTEXT_OFFSET 0x00f4
#define OMAP4430_RM_L4PER_MCSPI1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f4)
+#define OMAP4_PM_L4PER_MCSPI2_WKDEP_OFFSET 0x00f8
#define OMAP4430_PM_L4PER_MCSPI2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f8)
+#define OMAP4_RM_L4PER_MCSPI2_CONTEXT_OFFSET 0x00fc
#define OMAP4430_RM_L4PER_MCSPI2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00fc)
+#define OMAP4_PM_L4PER_MCSPI3_WKDEP_OFFSET 0x0100
#define OMAP4430_PM_L4PER_MCSPI3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0100)
+#define OMAP4_RM_L4PER_MCSPI3_CONTEXT_OFFSET 0x0104
#define OMAP4430_RM_L4PER_MCSPI3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0104)
+#define OMAP4_PM_L4PER_MCSPI4_WKDEP_OFFSET 0x0108
#define OMAP4430_PM_L4PER_MCSPI4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0108)
+#define OMAP4_RM_L4PER_MCSPI4_CONTEXT_OFFSET 0x010c
#define OMAP4430_RM_L4PER_MCSPI4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x010c)
+#define OMAP4_PM_L4PER_MMCSD3_WKDEP_OFFSET 0x0120
#define OMAP4430_PM_L4PER_MMCSD3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0120)
+#define OMAP4_RM_L4PER_MMCSD3_CONTEXT_OFFSET 0x0124
#define OMAP4430_RM_L4PER_MMCSD3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0124)
+#define OMAP4_PM_L4PER_MMCSD4_WKDEP_OFFSET 0x0128
#define OMAP4430_PM_L4PER_MMCSD4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0128)
+#define OMAP4_RM_L4PER_MMCSD4_CONTEXT_OFFSET 0x012c
#define OMAP4430_RM_L4PER_MMCSD4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x012c)
+#define OMAP4_RM_L4PER_MSPROHG_CONTEXT_OFFSET 0x0134
#define OMAP4430_RM_L4PER_MSPROHG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0134)
+#define OMAP4_PM_L4PER_SLIMBUS2_WKDEP_OFFSET 0x0138
#define OMAP4430_PM_L4PER_SLIMBUS2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0138)
+#define OMAP4_RM_L4PER_SLIMBUS2_CONTEXT_OFFSET 0x013c
#define OMAP4430_RM_L4PER_SLIMBUS2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x013c)
+#define OMAP4_PM_L4PER_UART1_WKDEP_OFFSET 0x0140
#define OMAP4430_PM_L4PER_UART1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0140)
+#define OMAP4_RM_L4PER_UART1_CONTEXT_OFFSET 0x0144
#define OMAP4430_RM_L4PER_UART1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0144)
+#define OMAP4_PM_L4PER_UART2_WKDEP_OFFSET 0x0148
#define OMAP4430_PM_L4PER_UART2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0148)
+#define OMAP4_RM_L4PER_UART2_CONTEXT_OFFSET 0x014c
#define OMAP4430_RM_L4PER_UART2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x014c)
+#define OMAP4_PM_L4PER_UART3_WKDEP_OFFSET 0x0150
#define OMAP4430_PM_L4PER_UART3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0150)
+#define OMAP4_RM_L4PER_UART3_CONTEXT_OFFSET 0x0154
#define OMAP4430_RM_L4PER_UART3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0154)
+#define OMAP4_PM_L4PER_UART4_WKDEP_OFFSET 0x0158
#define OMAP4430_PM_L4PER_UART4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0158)
+#define OMAP4_RM_L4PER_UART4_CONTEXT_OFFSET 0x015c
#define OMAP4430_RM_L4PER_UART4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x015c)
+#define OMAP4_PM_L4PER_MMCSD5_WKDEP_OFFSET 0x0160
#define OMAP4430_PM_L4PER_MMCSD5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0160)
+#define OMAP4_RM_L4PER_MMCSD5_CONTEXT_OFFSET 0x0164
#define OMAP4430_RM_L4PER_MMCSD5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0164)
+#define OMAP4_PM_L4PER_I2C5_WKDEP_OFFSET 0x0168
#define OMAP4430_PM_L4PER_I2C5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0168)
+#define OMAP4_RM_L4PER_I2C5_CONTEXT_OFFSET 0x016c
#define OMAP4430_RM_L4PER_I2C5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x016c)
+#define OMAP4_RM_L4SEC_AES1_CONTEXT_OFFSET 0x01a4
#define OMAP4430_RM_L4SEC_AES1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01a4)
+#define OMAP4_RM_L4SEC_AES2_CONTEXT_OFFSET 0x01ac
#define OMAP4430_RM_L4SEC_AES2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01ac)
+#define OMAP4_RM_L4SEC_DES3DES_CONTEXT_OFFSET 0x01b4
#define OMAP4430_RM_L4SEC_DES3DES_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01b4)
+#define OMAP4_RM_L4SEC_PKAEIP29_CONTEXT_OFFSET 0x01bc
#define OMAP4430_RM_L4SEC_PKAEIP29_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01bc)
+#define OMAP4_RM_L4SEC_RNG_CONTEXT_OFFSET 0x01c4
#define OMAP4430_RM_L4SEC_RNG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01c4)
+#define OMAP4_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET 0x01cc
#define OMAP4430_RM_L4SEC_SHA2MD51_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01cc)
+#define OMAP4_RM_L4SEC_CRYPTODMA_CONTEXT_OFFSET 0x01dc
#define OMAP4430_RM_L4SEC_CRYPTODMA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01dc)
/* PRM.CEFUSE_PRM register offsets */
+#define OMAP4_PM_CEFUSE_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_CEFUSE_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0000)
+#define OMAP4_PM_CEFUSE_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_CEFUSE_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0004)
+#define OMAP4_RM_CEFUSE_CEFUSE_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_CEFUSE_CEFUSE_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0024)
/* PRM.WKUP_PRM register offsets */
+#define OMAP4_RM_WKUP_L4WKUP_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_WKUP_L4WKUP_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0024)
+#define OMAP4_RM_WKUP_WDT1_CONTEXT_OFFSET 0x002c
#define OMAP4430_RM_WKUP_WDT1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x002c)
+#define OMAP4_PM_WKUP_WDT2_WKDEP_OFFSET 0x0030
#define OMAP4430_PM_WKUP_WDT2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0030)
+#define OMAP4_RM_WKUP_WDT2_CONTEXT_OFFSET 0x0034
#define OMAP4430_RM_WKUP_WDT2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0034)
+#define OMAP4_PM_WKUP_GPIO1_WKDEP_OFFSET 0x0038
#define OMAP4430_PM_WKUP_GPIO1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0038)
+#define OMAP4_RM_WKUP_GPIO1_CONTEXT_OFFSET 0x003c
#define OMAP4430_RM_WKUP_GPIO1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x003c)
+#define OMAP4_PM_WKUP_TIMER1_WKDEP_OFFSET 0x0040
#define OMAP4430_PM_WKUP_TIMER1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0040)
+#define OMAP4_RM_WKUP_TIMER1_CONTEXT_OFFSET 0x0044
#define OMAP4430_RM_WKUP_TIMER1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0044)
+#define OMAP4_PM_WKUP_TIMER12_WKDEP_OFFSET 0x0048
#define OMAP4430_PM_WKUP_TIMER12_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0048)
+#define OMAP4_RM_WKUP_TIMER12_CONTEXT_OFFSET 0x004c
#define OMAP4430_RM_WKUP_TIMER12_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x004c)
+#define OMAP4_RM_WKUP_SYNCTIMER_CONTEXT_OFFSET 0x0054
#define OMAP4430_RM_WKUP_SYNCTIMER_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0054)
+#define OMAP4_PM_WKUP_USIM_WKDEP_OFFSET 0x0058
#define OMAP4430_PM_WKUP_USIM_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0058)
+#define OMAP4_RM_WKUP_USIM_CONTEXT_OFFSET 0x005c
#define OMAP4430_RM_WKUP_USIM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x005c)
+#define OMAP4_RM_WKUP_SARRAM_CONTEXT_OFFSET 0x0064
#define OMAP4430_RM_WKUP_SARRAM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0064)
+#define OMAP4_PM_WKUP_KEYBOARD_WKDEP_OFFSET 0x0078
#define OMAP4430_PM_WKUP_KEYBOARD_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0078)
+#define OMAP4_RM_WKUP_KEYBOARD_CONTEXT_OFFSET 0x007c
#define OMAP4430_RM_WKUP_KEYBOARD_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x007c)
+#define OMAP4_PM_WKUP_RTC_WKDEP_OFFSET 0x0080
#define OMAP4430_PM_WKUP_RTC_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0080)
+#define OMAP4_RM_WKUP_RTC_CONTEXT_OFFSET 0x0084
#define OMAP4430_RM_WKUP_RTC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0084)
/* PRM.WKUP_CM register offsets */
+#define OMAP4_CM_WKUP_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_WKUP_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0000)
+#define OMAP4_CM_WKUP_L4WKUP_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_WKUP_L4WKUP_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0020)
+#define OMAP4_CM_WKUP_WDT1_CLKCTRL_OFFSET 0x0028
#define OMAP4430_CM_WKUP_WDT1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0028)
+#define OMAP4_CM_WKUP_WDT2_CLKCTRL_OFFSET 0x0030
#define OMAP4430_CM_WKUP_WDT2_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0030)
+#define OMAP4_CM_WKUP_GPIO1_CLKCTRL_OFFSET 0x0038
#define OMAP4430_CM_WKUP_GPIO1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0038)
+#define OMAP4_CM_WKUP_TIMER1_CLKCTRL_OFFSET 0x0040
#define OMAP4430_CM_WKUP_TIMER1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0040)
+#define OMAP4_CM_WKUP_TIMER12_CLKCTRL_OFFSET 0x0048
#define OMAP4430_CM_WKUP_TIMER12_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0048)
+#define OMAP4_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET 0x0050
#define OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0050)
+#define OMAP4_CM_WKUP_USIM_CLKCTRL_OFFSET 0x0058
#define OMAP4430_CM_WKUP_USIM_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0058)
+#define OMAP4_CM_WKUP_SARRAM_CLKCTRL_OFFSET 0x0060
#define OMAP4430_CM_WKUP_SARRAM_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0060)
+#define OMAP4_CM_WKUP_KEYBOARD_CLKCTRL_OFFSET 0x0078
#define OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0078)
+#define OMAP4_CM_WKUP_RTC_CLKCTRL_OFFSET 0x0080
#define OMAP4430_CM_WKUP_RTC_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0080)
+#define OMAP4_CM_WKUP_BANDGAP_CLKCTRL_OFFSET 0x0088
#define OMAP4430_CM_WKUP_BANDGAP_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0088)
/* PRM.EMU_PRM register offsets */
+#define OMAP4_PM_EMU_PWRSTCTRL_OFFSET 0x0000
#define OMAP4430_PM_EMU_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0000)
+#define OMAP4_PM_EMU_PWRSTST_OFFSET 0x0004
#define OMAP4430_PM_EMU_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0004)
+#define OMAP4_RM_EMU_DEBUGSS_CONTEXT_OFFSET 0x0024
#define OMAP4430_RM_EMU_DEBUGSS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0024)
/* PRM.EMU_CM register offsets */
+#define OMAP4_CM_EMU_CLKSTCTRL_OFFSET 0x0000
#define OMAP4430_CM_EMU_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0000)
+#define OMAP4_CM_EMU_DYNAMICDEP_OFFSET 0x0008
#define OMAP4430_CM_EMU_DYNAMICDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0008)
+#define OMAP4_CM_EMU_DEBUGSS_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_EMU_DEBUGSS_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0020)
/* PRM.DEVICE_PRM register offsets */
+#define OMAP4_PRM_RSTCTRL_OFFSET 0x0000
#define OMAP4430_PRM_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0000)
+#define OMAP4_PRM_RSTST_OFFSET 0x0004
#define OMAP4430_PRM_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0004)
+#define OMAP4_PRM_RSTTIME_OFFSET 0x0008
#define OMAP4430_PRM_RSTTIME OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0008)
+#define OMAP4_PRM_CLKREQCTRL_OFFSET 0x000c
#define OMAP4430_PRM_CLKREQCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x000c)
+#define OMAP4_PRM_VOLTCTRL_OFFSET 0x0010
#define OMAP4430_PRM_VOLTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0010)
+#define OMAP4_PRM_PWRREQCTRL_OFFSET 0x0014
#define OMAP4430_PRM_PWRREQCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0014)
+#define OMAP4_PRM_PSCON_COUNT_OFFSET 0x0018
#define OMAP4430_PRM_PSCON_COUNT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0018)
+#define OMAP4_PRM_IO_COUNT_OFFSET 0x001c
#define OMAP4430_PRM_IO_COUNT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x001c)
+#define OMAP4_PRM_IO_PMCTRL_OFFSET 0x0020
#define OMAP4430_PRM_IO_PMCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0020)
+#define OMAP4_PRM_VOLTSETUP_WARMRESET_OFFSET 0x0024
#define OMAP4430_PRM_VOLTSETUP_WARMRESET OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0024)
+#define OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET 0x0028
#define OMAP4430_PRM_VOLTSETUP_CORE_OFF OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0028)
+#define OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET 0x002c
#define OMAP4430_PRM_VOLTSETUP_MPU_OFF OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x002c)
+#define OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET 0x0030
#define OMAP4430_PRM_VOLTSETUP_IVA_OFF OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0030)
+#define OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET 0x0034
#define OMAP4430_PRM_VOLTSETUP_CORE_RET_SLEEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0034)
+#define OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET 0x0038
#define OMAP4430_PRM_VOLTSETUP_MPU_RET_SLEEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0038)
+#define OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET 0x003c
#define OMAP4430_PRM_VOLTSETUP_IVA_RET_SLEEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x003c)
+#define OMAP4_PRM_VP_CORE_CONFIG_OFFSET 0x0040
#define OMAP4430_PRM_VP_CORE_CONFIG OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0040)
+#define OMAP4_PRM_VP_CORE_STATUS_OFFSET 0x0044
#define OMAP4430_PRM_VP_CORE_STATUS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0044)
+#define OMAP4_PRM_VP_CORE_VLIMITTO_OFFSET 0x0048
#define OMAP4430_PRM_VP_CORE_VLIMITTO OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0048)
+#define OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET 0x004c
#define OMAP4430_PRM_VP_CORE_VOLTAGE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x004c)
+#define OMAP4_PRM_VP_CORE_VSTEPMAX_OFFSET 0x0050
#define OMAP4430_PRM_VP_CORE_VSTEPMAX OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0050)
+#define OMAP4_PRM_VP_CORE_VSTEPMIN_OFFSET 0x0054
#define OMAP4430_PRM_VP_CORE_VSTEPMIN OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0054)
+#define OMAP4_PRM_VP_MPU_CONFIG_OFFSET 0x0058
#define OMAP4430_PRM_VP_MPU_CONFIG OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0058)
+#define OMAP4_PRM_VP_MPU_STATUS_OFFSET 0x005c
#define OMAP4430_PRM_VP_MPU_STATUS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x005c)
+#define OMAP4_PRM_VP_MPU_VLIMITTO_OFFSET 0x0060
#define OMAP4430_PRM_VP_MPU_VLIMITTO OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0060)
+#define OMAP4_PRM_VP_MPU_VOLTAGE_OFFSET 0x0064
#define OMAP4430_PRM_VP_MPU_VOLTAGE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0064)
+#define OMAP4_PRM_VP_MPU_VSTEPMAX_OFFSET 0x0068
#define OMAP4430_PRM_VP_MPU_VSTEPMAX OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0068)
+#define OMAP4_PRM_VP_MPU_VSTEPMIN_OFFSET 0x006c
#define OMAP4430_PRM_VP_MPU_VSTEPMIN OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x006c)
+#define OMAP4_PRM_VP_IVA_CONFIG_OFFSET 0x0070
#define OMAP4430_PRM_VP_IVA_CONFIG OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0070)
+#define OMAP4_PRM_VP_IVA_STATUS_OFFSET 0x0074
#define OMAP4430_PRM_VP_IVA_STATUS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0074)
+#define OMAP4_PRM_VP_IVA_VLIMITTO_OFFSET 0x0078
#define OMAP4430_PRM_VP_IVA_VLIMITTO OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0078)
+#define OMAP4_PRM_VP_IVA_VOLTAGE_OFFSET 0x007c
#define OMAP4430_PRM_VP_IVA_VOLTAGE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x007c)
+#define OMAP4_PRM_VP_IVA_VSTEPMAX_OFFSET 0x0080
#define OMAP4430_PRM_VP_IVA_VSTEPMAX OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0080)
+#define OMAP4_PRM_VP_IVA_VSTEPMIN_OFFSET 0x0084
#define OMAP4430_PRM_VP_IVA_VSTEPMIN OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0084)
+#define OMAP4_PRM_VC_SMPS_SA_OFFSET 0x0088
#define OMAP4430_PRM_VC_SMPS_SA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0088)
+#define OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET 0x008c
#define OMAP4430_PRM_VC_VAL_SMPS_RA_VOL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x008c)
+#define OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET 0x0090
#define OMAP4430_PRM_VC_VAL_SMPS_RA_CMD OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0090)
+#define OMAP4_PRM_VC_VAL_CMD_VDD_CORE_L_OFFSET 0x0094
#define OMAP4430_PRM_VC_VAL_CMD_VDD_CORE_L OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0094)
+#define OMAP4_PRM_VC_VAL_CMD_VDD_MPU_L_OFFSET 0x0098
#define OMAP4430_PRM_VC_VAL_CMD_VDD_MPU_L OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0098)
+#define OMAP4_PRM_VC_VAL_CMD_VDD_IVA_L_OFFSET 0x009c
#define OMAP4430_PRM_VC_VAL_CMD_VDD_IVA_L OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x009c)
+#define OMAP4_PRM_VC_VAL_BYPASS_OFFSET 0x00a0
#define OMAP4430_PRM_VC_VAL_BYPASS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a0)
+#define OMAP4_PRM_VC_CFG_CHANNEL_OFFSET 0x00a4
#define OMAP4430_PRM_VC_CFG_CHANNEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a4)
+#define OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET 0x00a8
#define OMAP4430_PRM_VC_CFG_I2C_MODE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a8)
+#define OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET 0x00ac
#define OMAP4430_PRM_VC_CFG_I2C_CLK OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ac)
+#define OMAP4_PRM_SRAM_COUNT_OFFSET 0x00b0
#define OMAP4430_PRM_SRAM_COUNT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b0)
+#define OMAP4_PRM_SRAM_WKUP_SETUP_OFFSET 0x00b4
#define OMAP4430_PRM_SRAM_WKUP_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b4)
+#define OMAP4_PRM_LDO_SRAM_CORE_SETUP_OFFSET 0x00b8
#define OMAP4430_PRM_LDO_SRAM_CORE_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b8)
+#define OMAP4_PRM_LDO_SRAM_CORE_CTRL_OFFSET 0x00bc
#define OMAP4430_PRM_LDO_SRAM_CORE_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00bc)
+#define OMAP4_PRM_LDO_SRAM_MPU_SETUP_OFFSET 0x00c0
#define OMAP4430_PRM_LDO_SRAM_MPU_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c0)
+#define OMAP4_PRM_LDO_SRAM_MPU_CTRL_OFFSET 0x00c4
#define OMAP4430_PRM_LDO_SRAM_MPU_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c4)
+#define OMAP4_PRM_LDO_SRAM_IVA_SETUP_OFFSET 0x00c8
#define OMAP4430_PRM_LDO_SRAM_IVA_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c8)
+#define OMAP4_PRM_LDO_SRAM_IVA_CTRL_OFFSET 0x00cc
#define OMAP4430_PRM_LDO_SRAM_IVA_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00cc)
+#define OMAP4_PRM_LDO_ABB_MPU_SETUP_OFFSET 0x00d0
#define OMAP4430_PRM_LDO_ABB_MPU_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d0)
+#define OMAP4_PRM_LDO_ABB_MPU_CTRL_OFFSET 0x00d4
#define OMAP4430_PRM_LDO_ABB_MPU_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d4)
+#define OMAP4_PRM_LDO_ABB_IVA_SETUP_OFFSET 0x00d8
#define OMAP4430_PRM_LDO_ABB_IVA_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d8)
+#define OMAP4_PRM_LDO_ABB_IVA_CTRL_OFFSET 0x00dc
#define OMAP4430_PRM_LDO_ABB_IVA_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00dc)
+#define OMAP4_PRM_LDO_BANDGAP_CTRL_OFFSET 0x00e0
#define OMAP4430_PRM_LDO_BANDGAP_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e0)
+#define OMAP4_PRM_DEVICE_OFF_CTRL_OFFSET 0x00e4
#define OMAP4430_PRM_DEVICE_OFF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e4)
+#define OMAP4_PRM_PHASE1_CNDP_OFFSET 0x00e8
#define OMAP4430_PRM_PHASE1_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e8)
+#define OMAP4_PRM_PHASE2A_CNDP_OFFSET 0x00ec
#define OMAP4430_PRM_PHASE2A_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ec)
+#define OMAP4_PRM_PHASE2B_CNDP_OFFSET 0x00f0
#define OMAP4430_PRM_PHASE2B_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f0)
+#define OMAP4_PRM_MODEM_IF_CTRL_OFFSET 0x00f4
#define OMAP4430_PRM_MODEM_IF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f4)
-/* CHIRON_PRCM */
-
+/*
+ * PRCM_MPU
+ *
+ * The PRCM_MPU is a local PRCM inside the MPU subsystem. For the PRCM (global)
+ * point of view the PRCM_MPU is a single entity. It shares the same
+ * programming model as the global PRCM and thus can be assimilate as two new
+ * MOD inside the PRCM
+ */
-/* CHIRON_PRCM.CHIRONSS_OCP_SOCKET_PRCM register offsets */
-#define OMAP4430_REVISION_PRCM OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD, 0x0000)
+/* PRCM_MPU.OCP_SOCKET_PRCM register offsets */
+#define OMAP4_REVISION_PRCM_OFFSET 0x0000
+#define OMAP4430_REVISION_PRCM OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_MOD, 0x0000)
-/* CHIRON_PRCM.CHIRONSS_DEVICE_PRM register offsets */
-#define OMAP4430_CHIRON_PRCM_PRM_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD, 0x0000)
+/* PRCM_MPU.DEVICE_PRM register offsets */
+#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET 0x0000
+#define OMAP4430_PRCM_MPU_PRM_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_MOD, 0x0000)
-/* CHIRON_PRCM.CHIRONSS_CPU0 register offsets */
-#define OMAP4430_PM_PDA_CPU0_PWRSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0000)
-#define OMAP4430_PM_PDA_CPU0_PWRSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0004)
-#define OMAP4430_RM_PDA_CPU0_CPU0_CONTEXT OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0008)
-#define OMAP4430_RM_PDA_CPU0_CPU0_RSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x000c)
-#define OMAP4430_RM_PDA_CPU0_CPU0_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0010)
-#define OMAP4430_CM_PDA_CPU0_CPU0_CLKCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0014)
-#define OMAP4430_CM_PDA_CPU0_CLKSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0018)
+/* PRCM_MPU.CPU0 register offsets */
+#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET 0x0000
+#define OMAP4430_PM_CPU0_PWRSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0000)
+#define OMAP4_PM_CPU0_PWRSTST_OFFSET 0x0004
+#define OMAP4430_PM_CPU0_PWRSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0004)
+#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET 0x0008
+#define OMAP4430_RM_CPU0_CPU0_CONTEXT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0008)
+#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET 0x000c
+#define OMAP4430_RM_CPU0_CPU0_RSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x000c)
+#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET 0x0010
+#define OMAP4430_RM_CPU0_CPU0_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0010)
+#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET 0x0014
+#define OMAP4430_CM_CPU0_CPU0_CLKCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0014)
+#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET 0x0018
+#define OMAP4430_CM_CPU0_CLKSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0018)
-/* CHIRON_PRCM.CHIRONSS_CPU1 register offsets */
-#define OMAP4430_PM_PDA_CPU1_PWRSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0000)
-#define OMAP4430_PM_PDA_CPU1_PWRSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0004)
-#define OMAP4430_RM_PDA_CPU1_CPU1_CONTEXT OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0008)
-#define OMAP4430_RM_PDA_CPU1_CPU1_RSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x000c)
-#define OMAP4430_RM_PDA_CPU1_CPU1_RSTST OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0010)
-#define OMAP4430_CM_PDA_CPU1_CPU1_CLKCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0014)
-#define OMAP4430_CM_PDA_CPU1_CLKSTCTRL OMAP44XX_CHIRONSS_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0018)
+/* PRCM_MPU.CPU1 register offsets */
+#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET 0x0000
+#define OMAP4430_PM_CPU1_PWRSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0000)
+#define OMAP4_PM_CPU1_PWRSTST_OFFSET 0x0004
+#define OMAP4430_PM_CPU1_PWRSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0004)
+#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET 0x0008
+#define OMAP4430_RM_CPU1_CPU1_CONTEXT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0008)
+#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET 0x000c
+#define OMAP4430_RM_CPU1_CPU1_RSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x000c)
+#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET 0x0010
+#define OMAP4430_RM_CPU1_CPU1_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0010)
+#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET 0x0014
+#define OMAP4430_CM_CPU1_CPU1_CLKCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0014)
+#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET 0x0018
+#define OMAP4430_CM_CPU1_CLKSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0018)
#endif
diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-ehci.c
index ee9f548d5d81..c68f799e83c5 100644
--- a/arch/arm/mach-omap2/usb-ehci.c
+++ b/arch/arm/mach-omap2/usb-ehci.c
@@ -236,3 +236,158 @@ void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata)
#endif /* CONFIG_USB_EHCI_HCD */
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+
+static struct resource ohci_resources[] = {
+ {
+ .start = OMAP34XX_OHCI_BASE,
+ .end = OMAP34XX_OHCI_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = OMAP34XX_UHH_CONFIG_BASE,
+ .end = OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = OMAP34XX_USBTLL_BASE,
+ .end = OMAP34XX_USBTLL_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* general IRQ */
+ .start = INT_34XX_OHCI_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static u64 ohci_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ohci_device = {
+ .name = "ohci-omap3",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ohci_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(ohci_resources),
+ .resource = ohci_resources,
+};
+
+static void setup_ohci_io_mux(const enum ohci_omap3_port_mode *port_mode)
+{
+ switch (port_mode[0]) {
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
+ omap_mux_init_signal("mm1_rxdp",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("mm1_rxdm",
+ OMAP_PIN_INPUT_PULLDOWN);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
+ omap_mux_init_signal("mm1_rxrcv",
+ OMAP_PIN_INPUT_PULLDOWN);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
+ omap_mux_init_signal("mm1_txen_n", OMAP_PIN_OUTPUT);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
+ omap_mux_init_signal("mm1_txse0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("mm1_txdat",
+ OMAP_PIN_INPUT_PULLDOWN);
+ break;
+ case OMAP_OHCI_PORT_MODE_UNUSED:
+ /* FALLTHROUGH */
+ default:
+ break;
+ }
+ switch (port_mode[1]) {
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
+ omap_mux_init_signal("mm2_rxdp",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("mm2_rxdm",
+ OMAP_PIN_INPUT_PULLDOWN);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
+ omap_mux_init_signal("mm2_rxrcv",
+ OMAP_PIN_INPUT_PULLDOWN);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
+ omap_mux_init_signal("mm2_txen_n", OMAP_PIN_OUTPUT);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
+ omap_mux_init_signal("mm2_txse0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("mm2_txdat",
+ OMAP_PIN_INPUT_PULLDOWN);
+ break;
+ case OMAP_OHCI_PORT_MODE_UNUSED:
+ /* FALLTHROUGH */
+ default:
+ break;
+ }
+ switch (port_mode[2]) {
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
+ omap_mux_init_signal("mm3_rxdp",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("mm3_rxdm",
+ OMAP_PIN_INPUT_PULLDOWN);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
+ case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
+ omap_mux_init_signal("mm3_rxrcv",
+ OMAP_PIN_INPUT_PULLDOWN);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
+ omap_mux_init_signal("mm3_txen_n", OMAP_PIN_OUTPUT);
+ /* FALLTHROUGH */
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
+ case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
+ omap_mux_init_signal("mm3_txse0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("mm3_txdat",
+ OMAP_PIN_INPUT_PULLDOWN);
+ break;
+ case OMAP_OHCI_PORT_MODE_UNUSED:
+ /* FALLTHROUGH */
+ default:
+ break;
+ }
+}
+
+void __init usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata)
+{
+ platform_device_add_data(&ohci_device, pdata, sizeof(*pdata));
+
+ /* Setup Pin IO MUX for OHCI */
+ if (cpu_is_omap34xx())
+ setup_ohci_io_mux(pdata->port_mode);
+
+ if (platform_device_register(&ohci_device) < 0) {
+ pr_err("Unable to register FS-USB (OHCI) device\n");
+ return;
+ }
+}
+
+#else
+
+void __init usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata)
+{
+}
+
+#endif /* CONFIG_USB_OHCI_HCD */
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 6d41fa7b2ce8..96f6787e00b2 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -107,6 +107,7 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
musb_plat.board_data = board_data;
musb_plat.power = board_data->power >> 1;
musb_plat.mode = board_data->mode;
+ musb_plat.extvbus = board_data->extvbus;
if (platform_device_register(&musb_device) < 0)
printk(KERN_ERR "Unable to register HS-USB (MUSB) device\n");
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index 421b82f7c63d..fe0de1698edc 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -240,22 +240,23 @@ error_fail:
#define ORION_BLINK_HALF_PERIOD 100 /* ms */
-static int dns323_gpio_blink_set(unsigned gpio,
+static int dns323_gpio_blink_set(unsigned gpio, int state,
unsigned long *delay_on, unsigned long *delay_off)
{
- static int value = 0;
- if (!*delay_on && !*delay_off)
+ if (delay_on && delay_off && !*delay_on && !*delay_off)
*delay_on = *delay_off = ORION_BLINK_HALF_PERIOD;
- if (ORION_BLINK_HALF_PERIOD == *delay_on
- && ORION_BLINK_HALF_PERIOD == *delay_off) {
- value = !value;
- orion_gpio_set_blink(gpio, value);
- return 0;
+ switch(state) {
+ case GPIO_LED_NO_BLINK_LOW:
+ case GPIO_LED_NO_BLINK_HIGH:
+ orion_gpio_set_blink(gpio, 0);
+ gpio_set_value(gpio, state);
+ break;
+ case GPIO_LED_BLINK:
+ orion_gpio_set_blink(gpio, 1);
}
-
- return -EINVAL;
+ return 0;
}
static struct gpio_led dns323_leds[] = {
@@ -263,6 +264,7 @@ static struct gpio_led dns323_leds[] = {
.name = "power:blue",
.gpio = DNS323_GPIO_LED_POWER2,
.default_trigger = "timer",
+ .active_low = 1,
}, {
.name = "right:amber",
.gpio = DNS323_GPIO_LED_RIGHT_AMBER,
@@ -439,6 +441,7 @@ static void __init dns323_init(void)
*/
if (dns323_dev_id() == MV88F5181_DEV_ID) {
dns323_leds[0].active_low = 1;
+ gpio_request(DNS323_GPIO_LED_POWER1, "Power Led Enable");
gpio_direction_output(DNS323_GPIO_LED_POWER1, 0);
}
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 5b6ee46fa7f6..3b51741a4810 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -19,7 +19,6 @@ config MACH_MAINSTONE
config MACH_ZYLONITE
bool
select PXA3xx
- select PXA_SSP
select HAVE_PWM
select PXA_HAVE_BOARD_IRQS
@@ -39,7 +38,6 @@ config MACH_LITTLETON
select PXA3xx
select CPU_PXA300
select CPU_PXA310
- select PXA_SSP
config MACH_TAVOREVB
bool "PXA930 Evaluation Board (aka TavorEVB)"
@@ -98,7 +96,6 @@ config MACH_ARMCORE
select PXA27x
select IWMMXT
select PXA25x
- select PXA_SSP
config MACH_EM_X270
bool "CompuLab EM-x270 platform"
@@ -161,7 +158,6 @@ config MACH_XCEP
select MTD_CFI
select MTD_CHAR
select SMC91X
- select PXA_SSP
help
PXA255 based Single Board Computer with SMC 91C111 ethernet chip and 64 MB of flash.
Tuned for usage in Libera instruments for particle accelerators.
@@ -180,7 +176,6 @@ config MACH_TRIZEPS4WL
depends on TRIZEPS_PXA
select TRIZEPS_PCMCIA
select PXA27x
- select PXA_SSP
choice
prompt "Select base board for Trizeps module"
@@ -217,7 +212,6 @@ config MACH_PCM027
bool "Phytec phyCORE-PXA270 CPU module (PCM-027)"
select PXA27x
select IWMMXT
- select PXA_SSP
select PXA_HAVE_BOARD_IRQS
config MACH_PCM990_BASEBOARD
@@ -255,13 +249,19 @@ config MACH_COLIBRI320
select PXA3xx
select CPU_PXA320
+config MACH_VPAC270
+ bool "Voipac PXA270"
+ select PXA27x
+ select HAVE_PATA_PLATFORM
+ help
+ PXA270 based Single Board Computer.
+
comment "End-user Products (sorted by vendor name)"
config MACH_H4700
bool "HP iPAQ hx4700"
select PXA27x
select IWMMXT
- select PXA_SSP
select HAVE_PWM
select PXA_HAVE_BOARD_IRQS
@@ -277,7 +277,6 @@ config MACH_MAGICIAN
bool "Enable HTC Magician Support"
select PXA27x
select IWMMXT
- select PXA_SSP
select HAVE_PWM
select PXA_HAVE_BOARD_IRQS
@@ -431,13 +430,11 @@ config MACH_RAUMFELD_CONNECTOR
bool "Raumfeld Connector"
select PXA3xx
select CPU_PXA300
- select PXA_SSP
config MACH_RAUMFELD_SPEAKER
bool "Raumfeld Speaker"
select PXA3xx
select CPU_PXA300
- select PXA_SSP
config PXA_SHARPSL
bool "SHARP Zaurus SL-5600, SL-C7xx and SL-Cxx00 Models"
@@ -461,21 +458,11 @@ config SHARPSL_PM_MAX1111
select HWMON
select SENSORS_MAX1111
-config CORGI_SSP_DEPRECATED
- bool
- select PXA_SSP
- select PXA_SSP_LEGACY
- help
- This option will include corgi_ssp.c and corgi_lcd.c
- that corgi_ts.c and other legacy drivers (corgi_bl.c
- and sharpsl_pm.c) may depend on.
-
config MACH_POODLE
bool "Enable Sharp SL-5600 (Poodle) Support"
depends on PXA_SHARPSL
select PXA25x
select SHARP_LOCOMO
- select PXA_SSP
select PXA_HAVE_BOARD_IRQS
config MACH_CORGI
@@ -581,6 +568,12 @@ config MACH_E800
Say Y here if you intend to run this kernel on a Toshiba
e800 family PDA.
+config MACH_ZIPIT2
+ bool "Zipit Z2 Handheld"
+ select PXA27x
+ select HAVE_PWM
+ select PXA_HAVE_BOARD_IRQS
+
endmenu
config PXA25x
@@ -645,28 +638,16 @@ config CPU_PXA950
config PXA_SHARP_C7xx
bool
- select PXA_SSP
select SHARPSL_PM
help
Enable support for all Sharp C7xx models
config PXA_SHARP_Cxx00
bool
- select PXA_SSP
select SHARPSL_PM
help
Enable common support for Sharp Cxx00 models
-config PXA_SSP
- tristate
- help
- Enable support for PXA2xx SSP ports
-
-config PXA_SSP_LEGACY
- bool
- help
- Support of legacy SSP API
-
config TOSA_BT
tristate "Control the state of built-in bluetooth chip on Sharp SL-6000"
depends on MACH_TOSA
@@ -675,6 +656,18 @@ config TOSA_BT
This is a simple driver that is able to control
the state of built in bluetooth chip on tosa.
+config TOSA_USE_EXT_KEYCODES
+ bool "Tosa keyboard: use extended keycodes"
+ depends on MACH_TOSA
+ default n
+ help
+ Say Y here to enable the tosa keyboard driver to generate extended
+ (>= 127) keycodes. Be aware, that they can't be correctly interpreted
+ by either console keyboard driver or by Kdrive keybd driver.
+
+ Say Y only if you know, what you are doing!
+
+
config PXA_HAVE_BOARD_IRQS
bool
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 86bc87b7f2dd..b8f1f4bc7ca7 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_PXA3xx) += cpufreq-pxa3xx.o
endif
# Generic drivers that other drivers may depend upon
-obj-$(CONFIG_PXA_SSP) += ssp.o
# SoC-specific code
obj-$(CONFIG_PXA25x) += mfp-pxa2xx.o pxa2xx.o pxa25x.o
@@ -62,6 +61,7 @@ obj-$(CONFIG_MACH_PCM990_BASEBOARD) += pcm990-baseboard.o
obj-$(CONFIG_MACH_COLIBRI) += colibri-pxa270.o
obj-$(CONFIG_MACH_COLIBRI300) += colibri-pxa3xx.o colibri-pxa300.o
obj-$(CONFIG_MACH_COLIBRI320) += colibri-pxa3xx.o colibri-pxa320.o
+obj-$(CONFIG_MACH_VPAC270) += vpac270.o
# End-user Products
obj-$(CONFIG_MACH_H4700) += hx4700.o
@@ -80,7 +80,6 @@ obj-$(CONFIG_MACH_PALMLD) += palmld.o
obj-$(CONFIG_PALM_TREO) += palmtreo.o
obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o sharpsl_pm.o corgi_pm.o
obj-$(CONFIG_PXA_SHARP_Cxx00) += spitz.o sharpsl_pm.o spitz_pm.o
-obj-$(CONFIG_CORGI_SSP_DEPRECATED) += corgi_ssp.o corgi_lcd.o
obj-$(CONFIG_MACH_POODLE) += poodle.o
obj-$(CONFIG_MACH_TOSA) += tosa.o
obj-$(CONFIG_MACH_ICONTROL) += icontrol.o mxm8x10.o
@@ -94,6 +93,7 @@ obj-$(CONFIG_MACH_E800) += e800.o
obj-$(CONFIG_MACH_RAUMFELD_RC) += raumfeld.o
obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o
obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o
+obj-$(CONFIG_MACH_ZIPIT2) += z2.o
# Support for blinky lights
led-y := leds.o
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index d37cfa132a65..fdda6be6c391 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -30,6 +30,9 @@
#include <linux/i2c/pca953x.h>
#include <linux/mfd/da903x.h>
+#include <linux/regulator/machine.h>
+#include <linux/power_supply.h>
+#include <linux/apm-emulation.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_gpio.h>
@@ -430,7 +433,7 @@ static inline void cm_x300_init_nand(void) {}
#if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE)
static struct pxamci_platform_data cm_x300_mci_platform_data = {
- .detect_delay = 20,
+ .detect_delay_ms = 200,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.gpio_card_detect = GPIO82_MMC_IRQ,
.gpio_card_ro = GPIO85_MMC_WP,
@@ -451,7 +454,7 @@ static void cm_x300_mci2_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data cm_x300_mci2_platform_data = {
- .detect_delay = 20,
+ .detect_delay_ms = 200,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.init = cm_x300_mci2_init,
.exit = cm_x300_mci2_exit,
@@ -584,12 +587,87 @@ static void __init cm_x300_init_rtc(void)
static inline void cm_x300_init_rtc(void) {}
#endif
+/* Battery */
+struct power_supply_info cm_x300_psy_info = {
+ .name = "battery",
+ .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .voltage_max_design = 4200000,
+ .voltage_min_design = 3000000,
+ .use_for_apm = 1,
+};
+
+static void cm_x300_battery_low(void)
+{
+#if defined(CONFIG_APM_EMULATION)
+ apm_queue_event(APM_LOW_BATTERY);
+#endif
+}
+
+static void cm_x300_battery_critical(void)
+{
+#if defined(CONFIG_APM_EMULATION)
+ apm_queue_event(APM_CRITICAL_SUSPEND);
+#endif
+}
+
+struct da9030_battery_info cm_x300_battery_info = {
+ .battery_info = &cm_x300_psy_info,
+
+ .charge_milliamp = 1000,
+ .charge_millivolt = 4200,
+
+ .vbat_low = 3600,
+ .vbat_crit = 3400,
+ .vbat_charge_start = 4100,
+ .vbat_charge_stop = 4200,
+ .vbat_charge_restart = 4000,
+
+ .vcharge_min = 3200,
+ .vcharge_max = 5500,
+
+ .tbat_low = 197,
+ .tbat_high = 78,
+ .tbat_restart = 100,
+
+ .batmon_interval = 0,
+
+ .battery_low = cm_x300_battery_low,
+ .battery_critical = cm_x300_battery_critical,
+};
+
+static struct regulator_consumer_supply buck2_consumers[] = {
+ {
+ .dev = NULL,
+ .supply = "vcc_core",
+ },
+};
+
+static struct regulator_init_data buck2_data = {
+ .constraints = {
+ .min_uV = 1375000,
+ .max_uV = 1375000,
+ .state_mem = {
+ .enabled = 0,
+ },
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .apply_uV = 1,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(buck2_consumers),
+ .consumer_supplies = buck2_consumers,
+};
+
/* DA9030 */
struct da903x_subdev_info cm_x300_da9030_subdevs[] = {
{
- .name = "da903x-backlight",
- .id = DA9030_ID_WLED,
- }
+ .name = "da903x-battery",
+ .id = DA9030_ID_BAT,
+ .platform_data = &cm_x300_battery_info,
+ },
+ {
+ .name = "da903x-regulator",
+ .id = DA9030_ID_BUCK2,
+ .platform_data = &buck2_data,
+ },
};
static struct da903x_platform_data cm_x300_da9030_info = {
@@ -599,7 +677,7 @@ static struct da903x_platform_data cm_x300_da9030_info = {
static struct i2c_board_info cm_x300_pmic_info = {
I2C_BOARD_INFO("da9030", 0x49),
- .irq = IRQ_GPIO(0),
+ .irq = IRQ_WAKEUP0,
.platform_data = &cm_x300_da9030_info,
};
@@ -689,13 +767,13 @@ static void __init cm_x300_init(void)
static void __init cm_x300_fixup(struct machine_desc *mdesc, struct tag *tags,
char **cmdline, struct meminfo *mi)
{
- mi->nr_banks = 2;
- mi->bank[0].start = 0xa0000000;
- mi->bank[0].node = 0;
- mi->bank[0].size = (64*1024*1024);
- mi->bank[1].start = 0xc0000000;
- mi->bank[1].node = 0;
- mi->bank[1].size = (64*1024*1024);
+ /* Make sure that mi->bank[0].start = PHYS_ADDR */
+ for (; tags->hdr.size; tags = tag_next(tags))
+ if (tags->hdr.tag == ATAG_MEM &&
+ tags->u.mem.start == 0x80000000) {
+ tags->u.mem.start = 0xa0000000;
+ break;
+ }
}
MACHINE_START(CM_X300, "CM-X300 module")
diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c
index e6c0a2287eb8..199afa2ae303 100644
--- a/arch/arm/mach-pxa/colibri-pxa3xx.c
+++ b/arch/arm/mach-pxa/colibri-pxa3xx.c
@@ -96,7 +96,7 @@ static void colibri_pxa3xx_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data colibri_pxa3xx_mci_platform_data = {
- .detect_delay = 20,
+ .detect_delay_ms = 200,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.init = colibri_pxa3xx_mci_init,
.exit = colibri_pxa3xx_mci_exit,
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index da3156d8690b..3d1dcb9ac08f 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -106,18 +106,18 @@ static unsigned long corgi_pin_config[] __initdata = {
GPIO8_MMC_CS0,
/* GPIO Matrix Keypad */
- GPIO66_GPIO, /* column 0 */
- GPIO67_GPIO, /* column 1 */
- GPIO68_GPIO, /* column 2 */
- GPIO69_GPIO, /* column 3 */
- GPIO70_GPIO, /* column 4 */
- GPIO71_GPIO, /* column 5 */
- GPIO72_GPIO, /* column 6 */
- GPIO73_GPIO, /* column 7 */
- GPIO74_GPIO, /* column 8 */
- GPIO75_GPIO, /* column 9 */
- GPIO76_GPIO, /* column 10 */
- GPIO77_GPIO, /* column 11 */
+ GPIO66_GPIO | MFP_LPM_DRIVE_HIGH, /* column 0 */
+ GPIO67_GPIO | MFP_LPM_DRIVE_HIGH, /* column 1 */
+ GPIO68_GPIO | MFP_LPM_DRIVE_HIGH, /* column 2 */
+ GPIO69_GPIO | MFP_LPM_DRIVE_HIGH, /* column 3 */
+ GPIO70_GPIO | MFP_LPM_DRIVE_HIGH, /* column 4 */
+ GPIO71_GPIO | MFP_LPM_DRIVE_HIGH, /* column 5 */
+ GPIO72_GPIO | MFP_LPM_DRIVE_HIGH, /* column 6 */
+ GPIO73_GPIO | MFP_LPM_DRIVE_HIGH, /* column 7 */
+ GPIO74_GPIO | MFP_LPM_DRIVE_HIGH, /* column 8 */
+ GPIO75_GPIO | MFP_LPM_DRIVE_HIGH, /* column 9 */
+ GPIO76_GPIO | MFP_LPM_DRIVE_HIGH, /* column 10 */
+ GPIO77_GPIO | MFP_LPM_DRIVE_HIGH, /* column 11 */
GPIO58_GPIO, /* row 0 */
GPIO59_GPIO, /* row 1 */
GPIO60_GPIO, /* row 2 */
@@ -128,13 +128,20 @@ static unsigned long corgi_pin_config[] __initdata = {
GPIO65_GPIO, /* row 7 */
/* GPIO */
- GPIO9_GPIO, /* CORGI_GPIO_nSD_DETECT */
- GPIO7_GPIO, /* CORGI_GPIO_nSD_WP */
- GPIO33_GPIO, /* CORGI_GPIO_SD_PWR */
- GPIO22_GPIO, /* CORGI_GPIO_IR_ON */
- GPIO44_GPIO, /* CORGI_GPIO_HSYNC */
-
- GPIO1_GPIO | WAKEUP_ON_EDGE_RISE,
+ GPIO9_GPIO, /* CORGI_GPIO_nSD_DETECT */
+ GPIO7_GPIO, /* CORGI_GPIO_nSD_WP */
+ GPIO11_GPIO | WAKEUP_ON_EDGE_BOTH, /* CORGI_GPIO_MAIN_BAT_{LOW,COVER} */
+ GPIO13_GPIO | MFP_LPM_KEEP_OUTPUT, /* CORGI_GPIO_LED_ORANGE */
+ GPIO21_GPIO, /* CORGI_GPIO_ADC_TEMP */
+ GPIO22_GPIO, /* CORGI_GPIO_IR_ON */
+ GPIO33_GPIO, /* CORGI_GPIO_SD_PWR */
+ GPIO38_GPIO | MFP_LPM_KEEP_OUTPUT, /* CORGI_GPIO_CHRG_ON */
+ GPIO43_GPIO | MFP_LPM_KEEP_OUTPUT, /* CORGI_GPIO_CHRG_UKN */
+ GPIO44_GPIO, /* CORGI_GPIO_HSYNC */
+
+ GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, /* CORGI_GPIO_KEY_INT */
+ GPIO1_GPIO | WAKEUP_ON_EDGE_RISE, /* CORGI_GPIO_AC_IN */
+ GPIO3_GPIO | WAKEUP_ON_EDGE_BOTH, /* CORGI_GPIO_WAKEUP */
};
/*
@@ -437,6 +444,7 @@ static struct platform_device corgiled_device = {
* to give the card a chance to fully insert/eject.
*/
static struct pxamci_platform_data corgi_mci_platform_data = {
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.gpio_card_detect = -1,
.gpio_card_ro = CORGI_GPIO_nSD_WP,
@@ -672,6 +680,15 @@ static void __init corgi_init(void)
pxa2xx_mfp_config(ARRAY_AND_SIZE(corgi_pin_config));
+ /* allow wakeup from various GPIOs */
+ gpio_set_wake(CORGI_GPIO_KEY_INT, 1);
+ gpio_set_wake(CORGI_GPIO_WAKEUP, 1);
+ gpio_set_wake(CORGI_GPIO_AC_IN, 1);
+ gpio_set_wake(CORGI_GPIO_CHRG_FULL, 1);
+
+ if (!machine_is_corgi())
+ gpio_set_wake(CORGI_GPIO_MAIN_BAT_LOW, 1);
+
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
@@ -679,7 +696,6 @@ static void __init corgi_init(void)
corgi_init_spi();
pxa_set_udc_info(&udc_info);
- corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&corgi_mci_platform_data);
pxa_set_ficp_info(&corgi_ficp_platform_data);
pxa_set_i2c_info(NULL);
diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c
deleted file mode 100644
index d9b96319d498..000000000000
--- a/arch/arm/mach-pxa/corgi_lcd.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * linux/arch/arm/mach-pxa/corgi_lcd.c
- *
- * Corgi/Spitz LCD Specific Code
- *
- * Copyright (C) 2005 Richard Purdie
- *
- * Connectivity:
- * Corgi - LCD to ATI Imageon w100 (Wallaby)
- * Spitz - LCD to PXA Framebuffer
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <mach/corgi.h>
-#include <mach/hardware.h>
-#include <mach/sharpsl.h>
-#include <mach/spitz.h>
-#include <asm/hardware/scoop.h>
-#include <asm/mach/sharpsl_param.h>
-#include "generic.h"
-
-/* Register Addresses */
-#define RESCTL_ADRS 0x00
-#define PHACTRL_ADRS 0x01
-#define DUTYCTRL_ADRS 0x02
-#define POWERREG0_ADRS 0x03
-#define POWERREG1_ADRS 0x04
-#define GPOR3_ADRS 0x05
-#define PICTRL_ADRS 0x06
-#define POLCTRL_ADRS 0x07
-
-/* Register Bit Definitions */
-#define RESCTL_QVGA 0x01
-#define RESCTL_VGA 0x00
-
-#define POWER1_VW_ON 0x01 /* VW Supply FET ON */
-#define POWER1_GVSS_ON 0x02 /* GVSS(-8V) Power Supply ON */
-#define POWER1_VDD_ON 0x04 /* VDD(8V),SVSS(-4V) Power Supply ON */
-
-#define POWER1_VW_OFF 0x00 /* VW Supply FET OFF */
-#define POWER1_GVSS_OFF 0x00 /* GVSS(-8V) Power Supply OFF */
-#define POWER1_VDD_OFF 0x00 /* VDD(8V),SVSS(-4V) Power Supply OFF */
-
-#define POWER0_COM_DCLK 0x01 /* COM Voltage DC Bias DAC Serial Data Clock */
-#define POWER0_COM_DOUT 0x02 /* COM Voltage DC Bias DAC Serial Data Out */
-#define POWER0_DAC_ON 0x04 /* DAC Power Supply ON */
-#define POWER0_COM_ON 0x08 /* COM Power Supply ON */
-#define POWER0_VCC5_ON 0x10 /* VCC5 Power Supply ON */
-
-#define POWER0_DAC_OFF 0x00 /* DAC Power Supply OFF */
-#define POWER0_COM_OFF 0x00 /* COM Power Supply OFF */
-#define POWER0_VCC5_OFF 0x00 /* VCC5 Power Supply OFF */
-
-#define PICTRL_INIT_STATE 0x01
-#define PICTRL_INIOFF 0x02
-#define PICTRL_POWER_DOWN 0x04
-#define PICTRL_COM_SIGNAL_OFF 0x08
-#define PICTRL_DAC_SIGNAL_OFF 0x10
-
-#define POLCTRL_SYNC_POL_FALL 0x01
-#define POLCTRL_EN_POL_FALL 0x02
-#define POLCTRL_DATA_POL_FALL 0x04
-#define POLCTRL_SYNC_ACT_H 0x08
-#define POLCTRL_EN_ACT_L 0x10
-
-#define POLCTRL_SYNC_POL_RISE 0x00
-#define POLCTRL_EN_POL_RISE 0x00
-#define POLCTRL_DATA_POL_RISE 0x00
-#define POLCTRL_SYNC_ACT_L 0x00
-#define POLCTRL_EN_ACT_H 0x00
-
-#define PHACTRL_PHASE_MANUAL 0x01
-#define DEFAULT_PHAD_QVGA (9)
-#define DEFAULT_COMADJ (125)
-
-/*
- * This is only a psuedo I2C interface. We can't use the standard kernel
- * routines as the interface is write only. We just assume the data is acked...
- */
-static void lcdtg_ssp_i2c_send(u8 data)
-{
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, data);
- udelay(10);
-}
-
-static void lcdtg_i2c_send_bit(u8 data)
-{
- lcdtg_ssp_i2c_send(data);
- lcdtg_ssp_i2c_send(data | POWER0_COM_DCLK);
- lcdtg_ssp_i2c_send(data);
-}
-
-static void lcdtg_i2c_send_start(u8 base)
-{
- lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
- lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
- lcdtg_ssp_i2c_send(base);
-}
-
-static void lcdtg_i2c_send_stop(u8 base)
-{
- lcdtg_ssp_i2c_send(base);
- lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
- lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
-}
-
-static void lcdtg_i2c_send_byte(u8 base, u8 data)
-{
- int i;
- for (i = 0; i < 8; i++) {
- if (data & 0x80)
- lcdtg_i2c_send_bit(base | POWER0_COM_DOUT);
- else
- lcdtg_i2c_send_bit(base);
- data <<= 1;
- }
-}
-
-static void lcdtg_i2c_wait_ack(u8 base)
-{
- lcdtg_i2c_send_bit(base);
-}
-
-static void lcdtg_set_common_voltage(u8 base_data, u8 data)
-{
- /* Set Common Voltage to M62332FP via I2C */
- lcdtg_i2c_send_start(base_data);
- lcdtg_i2c_send_byte(base_data, 0x9c);
- lcdtg_i2c_wait_ack(base_data);
- lcdtg_i2c_send_byte(base_data, 0x00);
- lcdtg_i2c_wait_ack(base_data);
- lcdtg_i2c_send_byte(base_data, data);
- lcdtg_i2c_wait_ack(base_data);
- lcdtg_i2c_send_stop(base_data);
-}
-
-/* Set Phase Adjust */
-static void lcdtg_set_phadadj(int mode)
-{
- int adj;
- switch(mode) {
- case 480:
- case 640:
- /* Setting for VGA */
- adj = sharpsl_param.phadadj;
- if (adj < 0) {
- adj = PHACTRL_PHASE_MANUAL;
- } else {
- adj = ((adj & 0x0f) << 1) | PHACTRL_PHASE_MANUAL;
- }
- break;
- case 240:
- case 320:
- default:
- /* Setting for QVGA */
- adj = (DEFAULT_PHAD_QVGA << 1) | PHACTRL_PHASE_MANUAL;
- break;
- }
-
- corgi_ssp_lcdtg_send(PHACTRL_ADRS, adj);
-}
-
-static int lcd_inited;
-
-void corgi_lcdtg_hw_init(int mode)
-{
- if (!lcd_inited) {
- int comadj;
-
- /* Initialize Internal Logic & Port */
- corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_POWER_DOWN | PICTRL_INIOFF | PICTRL_INIT_STATE
- | PICTRL_COM_SIGNAL_OFF | PICTRL_DAC_SIGNAL_OFF);
-
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_OFF
- | POWER0_COM_OFF | POWER0_VCC5_OFF);
-
- corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
-
- /* VDD(+8V), SVSS(-4V) ON */
- corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
- mdelay(3);
-
- /* DAC ON */
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON
- | POWER0_COM_OFF | POWER0_VCC5_OFF);
-
- /* INIB = H, INI = L */
- /* PICTL[0] = H , PICTL[1] = PICTL[2] = PICTL[4] = L */
- corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF);
-
- /* Set Common Voltage */
- comadj = sharpsl_param.comadj;
- if (comadj < 0)
- comadj = DEFAULT_COMADJ;
- lcdtg_set_common_voltage((POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF), comadj);
-
- /* VCC5 ON, DAC ON */
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON |
- POWER0_COM_OFF | POWER0_VCC5_ON);
-
- /* GVSS(-8V) ON, VDD ON */
- corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
- mdelay(2);
-
- /* COM SIGNAL ON (PICTL[3] = L) */
- corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIT_STATE);
-
- /* COM ON, DAC ON, VCC5_ON */
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON
- | POWER0_COM_ON | POWER0_VCC5_ON);
-
- /* VW ON, GVSS ON, VDD ON */
- corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_ON | POWER1_GVSS_ON | POWER1_VDD_ON);
-
- /* Signals output enable */
- corgi_ssp_lcdtg_send(PICTRL_ADRS, 0);
-
- /* Set Phase Adjust */
- lcdtg_set_phadadj(mode);
-
- /* Initialize for Input Signals from ATI */
- corgi_ssp_lcdtg_send(POLCTRL_ADRS, POLCTRL_SYNC_POL_RISE | POLCTRL_EN_POL_RISE
- | POLCTRL_DATA_POL_RISE | POLCTRL_SYNC_ACT_L | POLCTRL_EN_ACT_H);
- udelay(1000);
-
- lcd_inited=1;
- } else {
- lcdtg_set_phadadj(mode);
- }
-
- switch(mode) {
- case 480:
- case 640:
- /* Set Lcd Resolution (VGA) */
- corgi_ssp_lcdtg_send(RESCTL_ADRS, RESCTL_VGA);
- break;
- case 240:
- case 320:
- default:
- /* Set Lcd Resolution (QVGA) */
- corgi_ssp_lcdtg_send(RESCTL_ADRS, RESCTL_QVGA);
- break;
- }
-}
-
-void corgi_lcdtg_suspend(void)
-{
- /* 60Hz x 2 frame = 16.7msec x 2 = 33.4 msec */
- mdelay(34);
-
- /* (1)VW OFF */
- corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
-
- /* (2)COM OFF */
- corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_COM_SIGNAL_OFF);
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON);
-
- /* (3)Set Common Voltage Bias 0V */
- lcdtg_set_common_voltage(POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON, 0);
-
- /* (4)GVSS OFF */
- corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
-
- /* (5)VCC5 OFF */
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF);
-
- /* (6)Set PDWN, INIOFF, DACOFF */
- corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIOFF | PICTRL_DAC_SIGNAL_OFF |
- PICTRL_POWER_DOWN | PICTRL_COM_SIGNAL_OFF);
-
- /* (7)DAC OFF */
- corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF);
-
- /* (8)VDD OFF */
- corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
-
- lcd_inited = 0;
-}
-
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index d4a0733e905b..3f1dc74ac048 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/apm-emulation.h>
@@ -25,7 +26,8 @@
#include <mach/sharpsl.h>
#include <mach/corgi.h>
#include <mach/pxa2xx-regs.h>
-#include <mach/pxa2xx-gpio.h>
+
+#include "generic.h"
#include "sharpsl.h"
#define SHARPSL_CHARGE_ON_VOLT 0x99 /* 2.9V */
@@ -35,87 +37,46 @@
#define SHARPSL_FATAL_ACIN_VOLT 182 /* 3.45V */
#define SHARPSL_FATAL_NOACIN_VOLT 170 /* 3.40V */
+static struct gpio charger_gpios[] = {
+ { CORGI_GPIO_ADC_TEMP_ON, GPIOF_OUT_INIT_LOW, "ADC Temp On" },
+ { CORGI_GPIO_CHRG_ON, GPIOF_OUT_INIT_LOW, "Charger On" },
+ { CORGI_GPIO_CHRG_UKN, GPIOF_OUT_INIT_LOW, "Charger Unknown" },
+ { CORGI_GPIO_KEY_INT, GPIOF_IN, "Key Interrupt" },
+};
+
static void corgi_charger_init(void)
{
- pxa_gpio_mode(CORGI_GPIO_ADC_TEMP_ON | GPIO_OUT);
- pxa_gpio_mode(CORGI_GPIO_CHRG_ON | GPIO_OUT);
- pxa_gpio_mode(CORGI_GPIO_CHRG_UKN | GPIO_OUT);
- pxa_gpio_mode(CORGI_GPIO_KEY_INT | GPIO_IN);
+ gpio_request_array(ARRAY_AND_SIZE(charger_gpios));
}
static void corgi_measure_temp(int on)
{
- if (on)
- GPSR(CORGI_GPIO_ADC_TEMP_ON) = GPIO_bit(CORGI_GPIO_ADC_TEMP_ON);
- else
- GPCR(CORGI_GPIO_ADC_TEMP_ON) = GPIO_bit(CORGI_GPIO_ADC_TEMP_ON);
+ gpio_set_value(CORGI_GPIO_ADC_TEMP_ON, on);
}
static void corgi_charge(int on)
{
if (on) {
if (machine_is_corgi() && (sharpsl_pm.flags & SHARPSL_SUSPENDED)) {
- GPCR(CORGI_GPIO_CHRG_ON) = GPIO_bit(CORGI_GPIO_CHRG_ON);
- GPSR(CORGI_GPIO_CHRG_UKN) = GPIO_bit(CORGI_GPIO_CHRG_UKN);
+ gpio_set_value(CORGI_GPIO_CHRG_ON, 0);
+ gpio_set_value(CORGI_GPIO_CHRG_UKN, 1);
} else {
- GPSR(CORGI_GPIO_CHRG_ON) = GPIO_bit(CORGI_GPIO_CHRG_ON);
- GPCR(CORGI_GPIO_CHRG_UKN) = GPIO_bit(CORGI_GPIO_CHRG_UKN);
+ gpio_set_value(CORGI_GPIO_CHRG_ON, 1);
+ gpio_set_value(CORGI_GPIO_CHRG_UKN, 0);
}
} else {
- GPCR(CORGI_GPIO_CHRG_ON) = GPIO_bit(CORGI_GPIO_CHRG_ON);
- GPCR(CORGI_GPIO_CHRG_UKN) = GPIO_bit(CORGI_GPIO_CHRG_UKN);
+ gpio_set_value(CORGI_GPIO_CHRG_ON, 0);
+ gpio_set_value(CORGI_GPIO_CHRG_UKN, 0);
}
}
static void corgi_discharge(int on)
{
- if (on)
- GPSR(CORGI_GPIO_DISCHARGE_ON) = GPIO_bit(CORGI_GPIO_DISCHARGE_ON);
- else
- GPCR(CORGI_GPIO_DISCHARGE_ON) = GPIO_bit(CORGI_GPIO_DISCHARGE_ON);
+ gpio_set_value(CORGI_GPIO_DISCHARGE_ON, on);
}
static void corgi_presuspend(void)
{
- int i;
- unsigned long wakeup_mask;
-
- /* charging , so CHARGE_ON bit is HIGH during OFF. */
- if (READ_GPIO_BIT(CORGI_GPIO_CHRG_ON))
- PGSR1 |= GPIO_bit(CORGI_GPIO_CHRG_ON);
- else
- PGSR1 &= ~GPIO_bit(CORGI_GPIO_CHRG_ON);
-
- if (READ_GPIO_BIT(CORGI_GPIO_LED_ORANGE))
- PGSR0 |= GPIO_bit(CORGI_GPIO_LED_ORANGE);
- else
- PGSR0 &= ~GPIO_bit(CORGI_GPIO_LED_ORANGE);
-
- if (READ_GPIO_BIT(CORGI_GPIO_CHRG_UKN))
- PGSR1 |= GPIO_bit(CORGI_GPIO_CHRG_UKN);
- else
- PGSR1 &= ~GPIO_bit(CORGI_GPIO_CHRG_UKN);
-
- /* Resume on keyboard power key */
- PGSR2 = (PGSR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(0);
-
- wakeup_mask = GPIO_bit(CORGI_GPIO_KEY_INT) | GPIO_bit(CORGI_GPIO_WAKEUP) | GPIO_bit(CORGI_GPIO_AC_IN) | GPIO_bit(CORGI_GPIO_CHRG_FULL);
-
- if (!machine_is_corgi())
- wakeup_mask |= GPIO_bit(CORGI_GPIO_MAIN_BAT_LOW);
-
- PWER = wakeup_mask | PWER_RTC;
- PRER = wakeup_mask;
- PFER = wakeup_mask;
-
- for (i = 0; i <=15; i++) {
- if (PRER & PFER & GPIO_bit(i)) {
- if (GPLR0 & GPIO_bit(i) )
- PRER &= ~GPIO_bit(i);
- else
- PFER &= ~GPIO_bit(i);
- }
- }
}
static void corgi_postsuspend(void)
diff --git a/arch/arm/mach-pxa/corgi_ssp.c b/arch/arm/mach-pxa/corgi_ssp.c
deleted file mode 100644
index 9347254f8bcf..000000000000
--- a/arch/arm/mach-pxa/corgi_ssp.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * SSP control code for Sharp Corgi devices
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-
-#include <mach/ssp.h>
-#include <mach/pxa2xx-gpio.h>
-#include <mach/regs-ssp.h>
-#include "sharpsl.h"
-
-static DEFINE_SPINLOCK(corgi_ssp_lock);
-static struct ssp_dev corgi_ssp_dev;
-static struct ssp_state corgi_ssp_state;
-static struct corgissp_machinfo *ssp_machinfo;
-
-/*
- * There are three devices connected to the SSP interface:
- * 1. A touchscreen controller (TI ADS7846 compatible)
- * 2. An LCD controller (with some Backlight functionality)
- * 3. A battery monitoring IC (Maxim MAX1111)
- *
- * Each device uses a different speed/mode of communication.
- *
- * The touchscreen is very sensitive and the most frequently used
- * so the port is left configured for this.
- *
- * Devices are selected using Chip Selects on GPIOs.
- */
-
-/*
- * ADS7846 Routines
- */
-unsigned long corgi_ssp_ads7846_putget(ulong data)
-{
- unsigned long flag;
- u32 ret = 0;
-
- spin_lock_irqsave(&corgi_ssp_lock, flag);
- if (ssp_machinfo->cs_ads7846 >= 0)
- GPCR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846);
-
- ssp_write_word(&corgi_ssp_dev,data);
- ssp_read_word(&corgi_ssp_dev, &ret);
-
- if (ssp_machinfo->cs_ads7846 >= 0)
- GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846);
- spin_unlock_irqrestore(&corgi_ssp_lock, flag);
-
- return ret;
-}
-
-/*
- * NOTE: These functions should always be called in interrupt context
- * and use the _lock and _unlock functions. They are very time sensitive.
- */
-void corgi_ssp_ads7846_lock(void)
-{
- spin_lock(&corgi_ssp_lock);
- if (ssp_machinfo->cs_ads7846 >= 0)
- GPCR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846);
-}
-
-void corgi_ssp_ads7846_unlock(void)
-{
- if (ssp_machinfo->cs_ads7846 >= 0)
- GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846);
- spin_unlock(&corgi_ssp_lock);
-}
-
-void corgi_ssp_ads7846_put(ulong data)
-{
- ssp_write_word(&corgi_ssp_dev,data);
-}
-
-unsigned long corgi_ssp_ads7846_get(void)
-{
- u32 ret = 0;
- ssp_read_word(&corgi_ssp_dev, &ret);
- return ret;
-}
-
-EXPORT_SYMBOL(corgi_ssp_ads7846_putget);
-EXPORT_SYMBOL(corgi_ssp_ads7846_lock);
-EXPORT_SYMBOL(corgi_ssp_ads7846_unlock);
-EXPORT_SYMBOL(corgi_ssp_ads7846_put);
-EXPORT_SYMBOL(corgi_ssp_ads7846_get);
-
-
-/*
- * LCD/Backlight Routines
- */
-unsigned long corgi_ssp_dac_put(ulong data)
-{
- unsigned long flag, sscr1 = SSCR1_SPH;
- u32 tmp;
-
- spin_lock_irqsave(&corgi_ssp_lock, flag);
-
- if (machine_is_spitz() || machine_is_akita() || machine_is_borzoi())
- sscr1 = 0;
-
- ssp_disable(&corgi_ssp_dev);
- ssp_config(&corgi_ssp_dev, (SSCR0_Motorola | (SSCR0_DSS & 0x07 )), sscr1, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_lcdcon));
- ssp_enable(&corgi_ssp_dev);
-
- if (ssp_machinfo->cs_lcdcon >= 0)
- GPCR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon);
- ssp_write_word(&corgi_ssp_dev,data);
- /* Read null data back from device to prevent SSP overflow */
- ssp_read_word(&corgi_ssp_dev, &tmp);
- if (ssp_machinfo->cs_lcdcon >= 0)
- GPSR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon);
-
- ssp_disable(&corgi_ssp_dev);
- ssp_config(&corgi_ssp_dev, (SSCR0_National | (SSCR0_DSS & 0x0b )), 0, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_ads7846));
- ssp_enable(&corgi_ssp_dev);
-
- spin_unlock_irqrestore(&corgi_ssp_lock, flag);
-
- return 0;
-}
-
-void corgi_ssp_lcdtg_send(u8 adrs, u8 data)
-{
- corgi_ssp_dac_put(((adrs & 0x07) << 5) | (data & 0x1f));
-}
-
-void corgi_ssp_blduty_set(int duty)
-{
- corgi_ssp_lcdtg_send(0x02,duty);
-}
-
-EXPORT_SYMBOL(corgi_ssp_lcdtg_send);
-EXPORT_SYMBOL(corgi_ssp_blduty_set);
-
-/*
- * Max1111 Routines
- */
-int corgi_ssp_max1111_get(ulong data)
-{
- unsigned long flag;
- long voltage = 0, voltage1 = 0, voltage2 = 0;
-
- spin_lock_irqsave(&corgi_ssp_lock, flag);
- if (ssp_machinfo->cs_max1111 >= 0)
- GPCR(ssp_machinfo->cs_max1111) = GPIO_bit(ssp_machinfo->cs_max1111);
- ssp_disable(&corgi_ssp_dev);
- ssp_config(&corgi_ssp_dev, (SSCR0_Motorola | (SSCR0_DSS & 0x07 )), 0, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_max1111));
- ssp_enable(&corgi_ssp_dev);
-
- udelay(1);
-
- /* TB1/RB1 */
- ssp_write_word(&corgi_ssp_dev,data);
- ssp_read_word(&corgi_ssp_dev, (u32*)&voltage1); /* null read */
-
- /* TB12/RB2 */
- ssp_write_word(&corgi_ssp_dev,0);
- ssp_read_word(&corgi_ssp_dev, (u32*)&voltage1);
-
- /* TB13/RB3*/
- ssp_write_word(&corgi_ssp_dev,0);
- ssp_read_word(&corgi_ssp_dev, (u32*)&voltage2);
-
- ssp_disable(&corgi_ssp_dev);
- ssp_config(&corgi_ssp_dev, (SSCR0_National | (SSCR0_DSS & 0x0b )), 0, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_ads7846));
- ssp_enable(&corgi_ssp_dev);
- if (ssp_machinfo->cs_max1111 >= 0)
- GPSR(ssp_machinfo->cs_max1111) = GPIO_bit(ssp_machinfo->cs_max1111);
- spin_unlock_irqrestore(&corgi_ssp_lock, flag);
-
- if (voltage1 & 0xc0 || voltage2 & 0x3f)
- voltage = -1;
- else
- voltage = ((voltage1 << 2) & 0xfc) | ((voltage2 >> 6) & 0x03);
-
- return voltage;
-}
-
-EXPORT_SYMBOL(corgi_ssp_max1111_get);
-
-/*
- * Support Routines
- */
-
-void __init corgi_ssp_set_machinfo(struct corgissp_machinfo *machinfo)
-{
- ssp_machinfo = machinfo;
-}
-
-static int __devinit corgi_ssp_probe(struct platform_device *dev)
-{
- int ret;
-
- /* Chip Select - Disable All */
- if (ssp_machinfo->cs_lcdcon >= 0)
- pxa_gpio_mode(ssp_machinfo->cs_lcdcon | GPIO_OUT | GPIO_DFLT_HIGH);
- if (ssp_machinfo->cs_max1111 >= 0)
- pxa_gpio_mode(ssp_machinfo->cs_max1111 | GPIO_OUT | GPIO_DFLT_HIGH);
- if (ssp_machinfo->cs_ads7846 >= 0)
- pxa_gpio_mode(ssp_machinfo->cs_ads7846 | GPIO_OUT | GPIO_DFLT_HIGH);
-
- ret = ssp_init(&corgi_ssp_dev, ssp_machinfo->port, 0);
-
- if (ret)
- printk(KERN_ERR "Unable to register SSP handler!\n");
- else {
- ssp_disable(&corgi_ssp_dev);
- ssp_config(&corgi_ssp_dev, (SSCR0_National | (SSCR0_DSS & 0x0b )), 0, 0, SSCR0_SerClkDiv(ssp_machinfo->clk_ads7846));
- ssp_enable(&corgi_ssp_dev);
- }
-
- return ret;
-}
-
-static int corgi_ssp_remove(struct platform_device *dev)
-{
- ssp_exit(&corgi_ssp_dev);
- return 0;
-}
-
-static int corgi_ssp_suspend(struct platform_device *dev, pm_message_t state)
-{
- ssp_flush(&corgi_ssp_dev);
- ssp_save_state(&corgi_ssp_dev,&corgi_ssp_state);
-
- return 0;
-}
-
-static int corgi_ssp_resume(struct platform_device *dev)
-{
- if (ssp_machinfo->cs_lcdcon >= 0)
- GPSR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon); /* High - Disable LCD Control/Timing Gen */
- if (ssp_machinfo->cs_max1111 >= 0)
- GPSR(ssp_machinfo->cs_max1111) = GPIO_bit(ssp_machinfo->cs_max1111); /* High - Disable MAX1111*/
- if (ssp_machinfo->cs_ads7846 >= 0)
- GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846); /* High - Disable ADS7846*/
- ssp_restore_state(&corgi_ssp_dev,&corgi_ssp_state);
- ssp_enable(&corgi_ssp_dev);
-
- return 0;
-}
-
-static struct platform_driver corgissp_driver = {
- .probe = corgi_ssp_probe,
- .remove = corgi_ssp_remove,
- .suspend = corgi_ssp_suspend,
- .resume = corgi_ssp_resume,
- .driver = {
- .name = "corgi-ssp",
- },
-};
-
-int __init corgi_ssp_init(void)
-{
- return platform_driver_register(&corgissp_driver);
-}
-
-arch_initcall(corgi_ssp_init);
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index 88575b87bd33..91fd4fea6a54 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -125,18 +125,9 @@ static unsigned long csb726_pin_config[] = {
GPIO118_I2C_SDA,
};
-static struct pxamci_platform_data csb726_mci_data;
-
-static int csb726_mci_init(struct device *dev,
- irq_handler_t detect, void *data)
-{
- csb726_mci_data.detect_delay = msecs_to_jiffies(500);
- return 0;
-}
-
static struct pxamci_platform_data csb726_mci = {
+ .detect_delay_ms = 500,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = csb726_mci_init,
/* FIXME setpower */
.gpio_card_detect = CSB726_GPIO_MMC_DETECT,
.gpio_card_ro = CSB726_GPIO_MMC_RO,
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index aab04f33e49b..0517c17978f3 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -626,6 +626,7 @@ static int em_x270_mci_get_ro(struct device *dev)
}
static struct pxamci_platform_data em_x270_mci_platform_data = {
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_20_21|MMC_VDD_21_22|MMC_VDD_22_23|
MMC_VDD_24_25|MMC_VDD_25_26|MMC_VDD_26_27|
MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|
@@ -643,7 +644,6 @@ static void __init em_x270_init_mmc(void)
if (machine_is_em_x270())
em_x270_mci_platform_data.get_ro = em_x270_mci_get_ro;
- em_x270_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&em_x270_mci_platform_data);
}
#else
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index 3126a35aa002..baabb3ce088e 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -28,7 +28,6 @@
#include <mach/reset.h>
#include <mach/gpio.h>
-#include <mach/pxa2xx-gpio.h>
#include "generic.h"
@@ -128,33 +127,3 @@ void __init pxa_map_io(void)
iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc));
get_clk_frequency_khz(1);
}
-
-/*
- * Configure pins for GPIO or other functions
- */
-int pxa_gpio_mode(int gpio_mode)
-{
- unsigned long flags;
- int gpio = gpio_mode & GPIO_MD_MASK_NR;
- int fn = (gpio_mode & GPIO_MD_MASK_FN) >> 8;
- int gafr;
-
- if (gpio > pxa_last_gpio)
- return -EINVAL;
-
- local_irq_save(flags);
- if (gpio_mode & GPIO_DFLT_LOW)
- GPCR(gpio) = GPIO_bit(gpio);
- else if (gpio_mode & GPIO_DFLT_HIGH)
- GPSR(gpio) = GPIO_bit(gpio);
- if (gpio_mode & GPIO_MD_MASK_DIR)
- GPDR(gpio) |= GPIO_bit(gpio);
- else
- GPDR(gpio) &= ~GPIO_bit(gpio);
- gafr = GAFR(gpio) & ~(0x3 << (((gpio) & 0xf)*2));
- GAFR(gpio) = gafr | (fn << (((gpio) & 0xf)*2));
- local_irq_restore(flags);
-
- return 0;
-}
-EXPORT_SYMBOL(pxa_gpio_mode);
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 771137fc1a82..5ccb0ceff6c4 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -73,7 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
static struct mcp251x_platform_data mcp251x_info = {
.oscillator_frequency = 16E6,
- .model = CAN_MCP251X_MCP2515,
.board_specific_setup = NULL,
.power_enable = NULL,
.transceiver_enable = NULL
@@ -81,7 +80,7 @@ static struct mcp251x_platform_data mcp251x_info = {
static struct spi_board_info mcp251x_board_info[] = {
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 0,
@@ -90,7 +89,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ1)
},
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 1,
@@ -99,7 +98,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ2)
},
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 4,
.chip_select = 0,
@@ -108,7 +107,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ3)
},
{
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 4,
.chip_select = 1,
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h
index 7239281788de..585970ef08ce 100644
--- a/arch/arm/mach-pxa/include/mach/corgi.h
+++ b/arch/arm/mach-pxa/include/mach/corgi.h
@@ -113,7 +113,6 @@
* Shared data structures
*/
extern struct platform_device corgiscoop_device;
-extern struct platform_device corgissp_device;
#endif /* __ASM_ARCH_CORGI_H */
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
index 658b28ed129b..c54cef25895c 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
@@ -25,6 +25,8 @@
#define MFP_DIR(x) (((x) >> 23) & 0x1)
#define MFP_LPM_CAN_WAKEUP (0x1 << 24)
+#define MFP_LPM_KEEP_OUTPUT (0x1 << 25)
+
#define WAKEUP_ON_EDGE_RISE (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_RISE)
#define WAKEUP_ON_EDGE_FALL (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_FALL)
#define WAKEUP_ON_EDGE_BOTH (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_BOTH)
diff --git a/arch/arm/mach-pxa/include/mach/mmc.h b/arch/arm/mach-pxa/include/mach/mmc.h
index 02a69dc2ee63..9eb515bb799d 100644
--- a/arch/arm/mach-pxa/include/mach/mmc.h
+++ b/arch/arm/mach-pxa/include/mach/mmc.h
@@ -9,7 +9,7 @@ struct mmc_host;
struct pxamci_platform_data {
unsigned int ocr_mask; /* available voltages */
- unsigned long detect_delay; /* delay in jiffies before detecting cards after interrupt */
+ unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */
int (*init)(struct device *, irq_handler_t , void *);
int (*get_ro)(struct device *);
void (*setpower)(struct device *, unsigned int);
diff --git a/arch/arm/mach-pxa/include/mach/pxa2xx-gpio.h b/arch/arm/mach-pxa/include/mach/pxa2xx-gpio.h
deleted file mode 100644
index 1209c44aa6f1..000000000000
--- a/arch/arm/mach-pxa/include/mach/pxa2xx-gpio.h
+++ /dev/null
@@ -1,375 +0,0 @@
-#ifndef __ASM_ARCH_PXA2XX_GPIO_H
-#define __ASM_ARCH_PXA2XX_GPIO_H
-
-#warning Please use mfp-pxa2[57]x.h instead of pxa2xx-gpio.h
-
-#include <mach/gpio.h>
-
-/* GPIO alternate function assignments */
-
-#define GPIO1_RST 1 /* reset */
-#define GPIO6_MMCCLK 6 /* MMC Clock */
-#define GPIO7_48MHz 7 /* 48 MHz clock output */
-#define GPIO8_MMCCS0 8 /* MMC Chip Select 0 */
-#define GPIO9_MMCCS1 9 /* MMC Chip Select 1 */
-#define GPIO10_RTCCLK 10 /* real time clock (1 Hz) */
-#define GPIO11_3_6MHz 11 /* 3.6 MHz oscillator out */
-#define GPIO12_32KHz 12 /* 32 kHz out */
-#define GPIO12_CIF_DD_7 12 /* Camera data pin 7 */
-#define GPIO13_MBGNT 13 /* memory controller grant */
-#define GPIO14_MBREQ 14 /* alternate bus master request */
-#define GPIO15_nCS_1 15 /* chip select 1 */
-#define GPIO16_PWM0 16 /* PWM0 output */
-#define GPIO17_PWM1 17 /* PWM1 output */
-#define GPIO17_CIF_DD_6 17 /* Camera data pin 6 */
-#define GPIO18_RDY 18 /* Ext. Bus Ready */
-#define GPIO19_DREQ1 19 /* External DMA Request */
-#define GPIO20_DREQ0 20 /* External DMA Request */
-#define GPIO23_SCLK 23 /* SSP clock */
-#define GPIO23_CIF_MCLK 23 /* Camera Master Clock */
-#define GPIO24_SFRM 24 /* SSP Frame */
-#define GPIO24_CIF_FV 24 /* Camera frame start signal */
-#define GPIO25_STXD 25 /* SSP transmit */
-#define GPIO25_CIF_LV 25 /* Camera line start signal */
-#define GPIO26_SRXD 26 /* SSP receive */
-#define GPIO26_CIF_PCLK 26 /* Camera Pixel Clock */
-#define GPIO27_SEXTCLK 27 /* SSP ext_clk */
-#define GPIO27_CIF_DD_0 27 /* Camera data pin 0 */
-#define GPIO28_BITCLK 28 /* AC97/I2S bit_clk */
-#define GPIO29_SDATA_IN 29 /* AC97 Sdata_in0 / I2S Sdata_in */
-#define GPIO30_SDATA_OUT 30 /* AC97/I2S Sdata_out */
-#define GPIO31_SYNC 31 /* AC97/I2S sync */
-#define GPIO32_SDATA_IN1 32 /* AC97 Sdata_in1 */
-#define GPIO32_SYSCLK 32 /* I2S System Clock */
-#define GPIO32_MMCCLK 32 /* MMC Clock (PXA270) */
-#define GPIO33_nCS_5 33 /* chip select 5 */
-#define GPIO34_FFRXD 34 /* FFUART receive */
-#define GPIO34_MMCCS0 34 /* MMC Chip Select 0 */
-#define GPIO35_FFCTS 35 /* FFUART Clear to send */
-#define GPIO36_FFDCD 36 /* FFUART Data carrier detect */
-#define GPIO37_FFDSR 37 /* FFUART data set ready */
-#define GPIO38_FFRI 38 /* FFUART Ring Indicator */
-#define GPIO39_MMCCS1 39 /* MMC Chip Select 1 */
-#define GPIO39_FFTXD 39 /* FFUART transmit data */
-#define GPIO40_FFDTR 40 /* FFUART data terminal Ready */
-#define GPIO41_FFRTS 41 /* FFUART request to send */
-#define GPIO42_BTRXD 42 /* BTUART receive data */
-#define GPIO42_HWRXD 42 /* HWUART receive data */
-#define GPIO42_CIF_MCLK 42 /* Camera Master Clock */
-#define GPIO43_BTTXD 43 /* BTUART transmit data */
-#define GPIO43_HWTXD 43 /* HWUART transmit data */
-#define GPIO43_CIF_FV 43 /* Camera frame start signal */
-#define GPIO44_BTCTS 44 /* BTUART clear to send */
-#define GPIO44_HWCTS 44 /* HWUART clear to send */
-#define GPIO44_CIF_LV 44 /* Camera line start signal */
-#define GPIO45_BTRTS 45 /* BTUART request to send */
-#define GPIO45_HWRTS 45 /* HWUART request to send */
-#define GPIO45_AC97_SYSCLK 45 /* AC97 System Clock */
-#define GPIO45_CIF_PCLK 45 /* Camera Pixel Clock */
-#define GPIO46_ICPRXD 46 /* ICP receive data */
-#define GPIO46_STRXD 46 /* STD_UART receive data */
-#define GPIO47_ICPTXD 47 /* ICP transmit data */
-#define GPIO47_STTXD 47 /* STD_UART transmit data */
-#define GPIO47_CIF_DD_0 47 /* Camera data pin 0 */
-#define GPIO48_nPOE 48 /* Output Enable for Card Space */
-#define GPIO48_CIF_DD_5 48 /* Camera data pin 5 */
-#define GPIO49_nPWE 49 /* Write Enable for Card Space */
-#define GPIO50_nPIOR 50 /* I/O Read for Card Space */
-#define GPIO50_CIF_DD_3 50 /* Camera data pin 3 */
-#define GPIO51_nPIOW 51 /* I/O Write for Card Space */
-#define GPIO51_CIF_DD_2 51 /* Camera data pin 2 */
-#define GPIO52_nPCE_1 52 /* Card Enable for Card Space */
-#define GPIO52_CIF_DD_4 52 /* Camera data pin 4 */
-#define GPIO53_nPCE_2 53 /* Card Enable for Card Space */
-#define GPIO53_MMCCLK 53 /* MMC Clock */
-#define GPIO53_CIF_MCLK 53 /* Camera Master Clock */
-#define GPIO54_MMCCLK 54 /* MMC Clock */
-#define GPIO54_pSKTSEL 54 /* Socket Select for Card Space */
-#define GPIO54_nPCE_2 54 /* Card Enable for Card Space (PXA27x) */
-#define GPIO54_CIF_PCLK 54 /* Camera Pixel Clock */
-#define GPIO55_nPREG 55 /* Card Address bit 26 */
-#define GPIO55_CIF_DD_1 55 /* Camera data pin 1 */
-#define GPIO56_nPWAIT 56 /* Wait signal for Card Space */
-#define GPIO57_nIOIS16 57 /* Bus Width select for I/O Card Space */
-#define GPIO58_LDD_0 58 /* LCD data pin 0 */
-#define GPIO59_LDD_1 59 /* LCD data pin 1 */
-#define GPIO60_LDD_2 60 /* LCD data pin 2 */
-#define GPIO61_LDD_3 61 /* LCD data pin 3 */
-#define GPIO62_LDD_4 62 /* LCD data pin 4 */
-#define GPIO63_LDD_5 63 /* LCD data pin 5 */
-#define GPIO64_LDD_6 64 /* LCD data pin 6 */
-#define GPIO65_LDD_7 65 /* LCD data pin 7 */
-#define GPIO66_LDD_8 66 /* LCD data pin 8 */
-#define GPIO66_MBREQ 66 /* alternate bus master req */
-#define GPIO67_LDD_9 67 /* LCD data pin 9 */
-#define GPIO67_MMCCS0 67 /* MMC Chip Select 0 */
-#define GPIO68_LDD_10 68 /* LCD data pin 10 */
-#define GPIO68_MMCCS1 68 /* MMC Chip Select 1 */
-#define GPIO69_LDD_11 69 /* LCD data pin 11 */
-#define GPIO69_MMCCLK 69 /* MMC_CLK */
-#define GPIO70_LDD_12 70 /* LCD data pin 12 */
-#define GPIO70_RTCCLK 70 /* Real Time clock (1 Hz) */
-#define GPIO71_LDD_13 71 /* LCD data pin 13 */
-#define GPIO71_3_6MHz 71 /* 3.6 MHz Oscillator clock */
-#define GPIO72_LDD_14 72 /* LCD data pin 14 */
-#define GPIO72_32kHz 72 /* 32 kHz clock */
-#define GPIO73_LDD_15 73 /* LCD data pin 15 */
-#define GPIO73_MBGNT 73 /* Memory controller grant */
-#define GPIO74_LCD_FCLK 74 /* LCD Frame clock */
-#define GPIO75_LCD_LCLK 75 /* LCD line clock */
-#define GPIO76_LCD_PCLK 76 /* LCD Pixel clock */
-#define GPIO77_LCD_ACBIAS 77 /* LCD AC Bias */
-#define GPIO78_nCS_2 78 /* chip select 2 */
-#define GPIO79_nCS_3 79 /* chip select 3 */
-#define GPIO80_nCS_4 80 /* chip select 4 */
-#define GPIO81_NSCLK 81 /* NSSP clock */
-#define GPIO81_CIF_DD_0 81 /* Camera data pin 0 */
-#define GPIO82_NSFRM 82 /* NSSP Frame */
-#define GPIO82_CIF_DD_5 82 /* Camera data pin 5 */
-#define GPIO83_NSTXD 83 /* NSSP transmit */
-#define GPIO83_CIF_DD_4 83 /* Camera data pin 4 */
-#define GPIO84_NSRXD 84 /* NSSP receive */
-#define GPIO84_CIF_FV 84 /* Camera frame start signal */
-#define GPIO85_nPCE_1 85 /* Card Enable for Card Space (PXA27x) */
-#define GPIO85_CIF_LV 85 /* Camera line start signal */
-#define GPIO90_CIF_DD_4 90 /* Camera data pin 4 */
-#define GPIO91_CIF_DD_5 91 /* Camera data pin 5 */
-#define GPIO92_MMCDAT0 92 /* MMC DAT0 (PXA27x) */
-#define GPIO93_CIF_DD_6 93 /* Camera data pin 6 */
-#define GPIO94_CIF_DD_5 94 /* Camera data pin 5 */
-#define GPIO95_CIF_DD_4 95 /* Camera data pin 4 */
-#define GPIO96_FFRXD 96 /* FFUART recieve */
-#define GPIO98_FFRTS 98 /* FFUART request to send */
-#define GPIO98_CIF_DD_0 98 /* Camera data pin 0 */
-#define GPIO99_FFTXD 99 /* FFUART transmit data */
-#define GPIO100_FFCTS 100 /* FFUART Clear to send */
-#define GPIO102_nPCE_1 102 /* PCMCIA (PXA27x) */
-#define GPIO103_CIF_DD_3 103 /* Camera data pin 3 */
-#define GPIO104_CIF_DD_2 104 /* Camera data pin 2 */
-#define GPIO105_CIF_DD_1 105 /* Camera data pin 1 */
-#define GPIO106_CIF_DD_9 106 /* Camera data pin 9 */
-#define GPIO107_CIF_DD_8 107 /* Camera data pin 8 */
-#define GPIO108_CIF_DD_7 108 /* Camera data pin 7 */
-#define GPIO109_MMCDAT1 109 /* MMC DAT1 (PXA27x) */
-#define GPIO110_MMCDAT2 110 /* MMC DAT2 (PXA27x) */
-#define GPIO110_MMCCS0 110 /* MMC Chip Select 0 (PXA27x) */
-#define GPIO111_MMCDAT3 111 /* MMC DAT3 (PXA27x) */
-#define GPIO111_MMCCS1 111 /* MMC Chip Select 1 (PXA27x) */
-#define GPIO112_MMCCMD 112 /* MMC CMD (PXA27x) */
-#define GPIO113_I2S_SYSCLK 113 /* I2S System Clock (PXA27x) */
-#define GPIO113_AC97_RESET_N 113 /* AC97 NRESET on (PXA27x) */
-#define GPIO114_CIF_DD_1 114 /* Camera data pin 1 */
-#define GPIO115_CIF_DD_3 115 /* Camera data pin 3 */
-#define GPIO116_CIF_DD_2 116 /* Camera data pin 2 */
-
-/* GPIO alternate function mode & direction */
-
-#define GPIO_IN 0x000
-#define GPIO_OUT 0x080
-#define GPIO_ALT_FN_1_IN 0x100
-#define GPIO_ALT_FN_1_OUT 0x180
-#define GPIO_ALT_FN_2_IN 0x200
-#define GPIO_ALT_FN_2_OUT 0x280
-#define GPIO_ALT_FN_3_IN 0x300
-#define GPIO_ALT_FN_3_OUT 0x380
-#define GPIO_MD_MASK_NR 0x07f
-#define GPIO_MD_MASK_DIR 0x080
-#define GPIO_MD_MASK_FN 0x300
-#define GPIO_DFLT_LOW 0x400
-#define GPIO_DFLT_HIGH 0x800
-
-#define GPIO1_RTS_MD ( 1 | GPIO_ALT_FN_1_IN)
-#define GPIO6_MMCCLK_MD ( 6 | GPIO_ALT_FN_1_OUT)
-#define GPIO7_48MHz_MD ( 7 | GPIO_ALT_FN_1_OUT)
-#define GPIO8_MMCCS0_MD ( 8 | GPIO_ALT_FN_1_OUT)
-#define GPIO9_MMCCS1_MD ( 9 | GPIO_ALT_FN_1_OUT)
-#define GPIO10_RTCCLK_MD (10 | GPIO_ALT_FN_1_OUT)
-#define GPIO11_3_6MHz_MD (11 | GPIO_ALT_FN_1_OUT)
-#define GPIO12_32KHz_MD (12 | GPIO_ALT_FN_1_OUT)
-#define GPIO12_CIF_DD_7_MD (12 | GPIO_ALT_FN_2_IN)
-#define GPIO13_MBGNT_MD (13 | GPIO_ALT_FN_2_OUT)
-#define GPIO14_MBREQ_MD (14 | GPIO_ALT_FN_1_IN)
-#define GPIO15_nCS_1_MD (15 | GPIO_ALT_FN_2_OUT)
-#define GPIO16_PWM0_MD (16 | GPIO_ALT_FN_2_OUT)
-#define GPIO17_PWM1_MD (17 | GPIO_ALT_FN_2_OUT)
-#define GPIO17_CIF_DD_6_MD (17 | GPIO_ALT_FN_2_IN)
-#define GPIO18_RDY_MD (18 | GPIO_ALT_FN_1_IN)
-#define GPIO19_DREQ1_MD (19 | GPIO_ALT_FN_1_IN)
-#define GPIO20_DREQ0_MD (20 | GPIO_ALT_FN_1_IN)
-#define GPIO23_CIF_MCLK_MD (23 | GPIO_ALT_FN_1_OUT)
-#define GPIO23_SCLK_MD (23 | GPIO_ALT_FN_2_OUT)
-#define GPIO24_CIF_FV_MD (24 | GPIO_ALT_FN_1_OUT)
-#define GPIO24_SFRM_MD (24 | GPIO_ALT_FN_2_OUT)
-#define GPIO25_CIF_LV_MD (25 | GPIO_ALT_FN_1_OUT)
-#define GPIO25_STXD_MD (25 | GPIO_ALT_FN_2_OUT)
-#define GPIO26_SRXD_MD (26 | GPIO_ALT_FN_1_IN)
-#define GPIO26_CIF_PCLK_MD (26 | GPIO_ALT_FN_2_IN)
-#define GPIO27_SEXTCLK_MD (27 | GPIO_ALT_FN_1_IN)
-#define GPIO27_CIF_DD_0_MD (27 | GPIO_ALT_FN_3_IN)
-#define GPIO28_BITCLK_AC97_MD (28 | GPIO_ALT_FN_1_IN)
-#define GPIO28_BITCLK_IN_I2S_MD (28 | GPIO_ALT_FN_2_IN)
-#define GPIO28_BITCLK_OUT_I2S_MD (28 | GPIO_ALT_FN_1_OUT)
-#define GPIO29_SDATA_IN_AC97_MD (29 | GPIO_ALT_FN_1_IN)
-#define GPIO29_SDATA_IN_I2S_MD (29 | GPIO_ALT_FN_2_IN)
-#define GPIO30_SDATA_OUT_AC97_MD (30 | GPIO_ALT_FN_2_OUT)
-#define GPIO30_SDATA_OUT_I2S_MD (30 | GPIO_ALT_FN_1_OUT)
-#define GPIO31_SYNC_I2S_MD (31 | GPIO_ALT_FN_1_OUT)
-#define GPIO31_SYNC_AC97_MD (31 | GPIO_ALT_FN_2_OUT)
-#define GPIO32_SDATA_IN1_AC97_MD (32 | GPIO_ALT_FN_1_IN)
-#define GPIO32_SYSCLK_I2S_MD (32 | GPIO_ALT_FN_1_OUT)
-#define GPIO32_MMCCLK_MD (32 | GPIO_ALT_FN_2_OUT)
-#define GPIO33_nCS_5_MD (33 | GPIO_ALT_FN_2_OUT)
-#define GPIO34_FFRXD_MD (34 | GPIO_ALT_FN_1_IN)
-#define GPIO34_MMCCS0_MD (34 | GPIO_ALT_FN_2_OUT)
-#define GPIO35_FFCTS_MD (35 | GPIO_ALT_FN_1_IN)
-#define GPIO35_KP_MKOUT6_MD (35 | GPIO_ALT_FN_2_OUT)
-#define GPIO36_FFDCD_MD (36 | GPIO_ALT_FN_1_IN)
-#define GPIO37_FFDSR_MD (37 | GPIO_ALT_FN_1_IN)
-#define GPIO38_FFRI_MD (38 | GPIO_ALT_FN_1_IN)
-#define GPIO39_MMCCS1_MD (39 | GPIO_ALT_FN_1_OUT)
-#define GPIO39_FFTXD_MD (39 | GPIO_ALT_FN_2_OUT)
-#define GPIO40_FFDTR_MD (40 | GPIO_ALT_FN_2_OUT)
-#define GPIO41_FFRTS_MD (41 | GPIO_ALT_FN_2_OUT)
-#define GPIO41_KP_MKOUT7_MD (41 | GPIO_ALT_FN_1_OUT)
-#define GPIO42_BTRXD_MD (42 | GPIO_ALT_FN_1_IN)
-#define GPIO42_HWRXD_MD (42 | GPIO_ALT_FN_3_IN)
-#define GPIO42_CIF_MCLK_MD (42 | GPIO_ALT_FN_3_OUT)
-#define GPIO43_BTTXD_MD (43 | GPIO_ALT_FN_2_OUT)
-#define GPIO43_HWTXD_MD (43 | GPIO_ALT_FN_3_OUT)
-#define GPIO43_CIF_FV_MD (43 | GPIO_ALT_FN_3_OUT)
-#define GPIO44_BTCTS_MD (44 | GPIO_ALT_FN_1_IN)
-#define GPIO44_HWCTS_MD (44 | GPIO_ALT_FN_3_IN)
-#define GPIO44_CIF_LV_MD (44 | GPIO_ALT_FN_3_OUT)
-#define GPIO45_CIF_PCLK_MD (45 | GPIO_ALT_FN_3_IN)
-#define GPIO45_BTRTS_MD (45 | GPIO_ALT_FN_2_OUT)
-#define GPIO45_HWRTS_MD (45 | GPIO_ALT_FN_3_OUT)
-#define GPIO45_SYSCLK_AC97_MD (45 | GPIO_ALT_FN_1_OUT)
-#define GPIO46_ICPRXD_MD (46 | GPIO_ALT_FN_1_IN)
-#define GPIO46_STRXD_MD (46 | GPIO_ALT_FN_2_IN)
-#define GPIO47_CIF_DD_0_MD (47 | GPIO_ALT_FN_1_IN)
-#define GPIO47_ICPTXD_MD (47 | GPIO_ALT_FN_2_OUT)
-#define GPIO47_STTXD_MD (47 | GPIO_ALT_FN_1_OUT)
-#define GPIO48_CIF_DD_5_MD (48 | GPIO_ALT_FN_1_IN)
-#define GPIO48_nPOE_MD (48 | GPIO_ALT_FN_2_OUT)
-#define GPIO48_HWTXD_MD (48 | GPIO_ALT_FN_1_OUT)
-#define GPIO48_nPOE_MD (48 | GPIO_ALT_FN_2_OUT)
-#define GPIO49_HWRXD_MD (49 | GPIO_ALT_FN_1_IN)
-#define GPIO49_nPWE_MD (49 | GPIO_ALT_FN_2_OUT)
-#define GPIO50_CIF_DD_3_MD (50 | GPIO_ALT_FN_1_IN)
-#define GPIO50_nPIOR_MD (50 | GPIO_ALT_FN_2_OUT)
-#define GPIO50_HWCTS_MD (50 | GPIO_ALT_FN_1_IN)
-#define GPIO50_CIF_DD_3_MD (50 | GPIO_ALT_FN_1_IN)
-#define GPIO51_CIF_DD_2_MD (51 | GPIO_ALT_FN_1_IN)
-#define GPIO51_nPIOW_MD (51 | GPIO_ALT_FN_2_OUT)
-#define GPIO51_HWRTS_MD (51 | GPIO_ALT_FN_1_OUT)
-#define GPIO51_CIF_DD_2_MD (51 | GPIO_ALT_FN_1_IN)
-#define GPIO52_nPCE_1_MD (52 | GPIO_ALT_FN_2_OUT)
-#define GPIO52_CIF_DD_4_MD (52 | GPIO_ALT_FN_1_IN)
-#define GPIO53_nPCE_2_MD (53 | GPIO_ALT_FN_2_OUT)
-#define GPIO53_MMCCLK_MD (53 | GPIO_ALT_FN_1_OUT)
-#define GPIO53_CIF_MCLK_MD (53 | GPIO_ALT_FN_2_OUT)
-#define GPIO54_MMCCLK_MD (54 | GPIO_ALT_FN_1_OUT)
-#define GPIO54_nPCE_2_MD (54 | GPIO_ALT_FN_2_OUT)
-#define GPIO54_pSKTSEL_MD (54 | GPIO_ALT_FN_2_OUT)
-#define GPIO54_CIF_PCLK_MD (54 | GPIO_ALT_FN_3_IN)
-#define GPIO55_nPREG_MD (55 | GPIO_ALT_FN_2_OUT)
-#define GPIO55_CIF_DD_1_MD (55 | GPIO_ALT_FN_1_IN)
-#define GPIO56_nPWAIT_MD (56 | GPIO_ALT_FN_1_IN)
-#define GPIO57_nIOIS16_MD (57 | GPIO_ALT_FN_1_IN)
-#define GPIO58_LDD_0_MD (58 | GPIO_ALT_FN_2_OUT)
-#define GPIO59_LDD_1_MD (59 | GPIO_ALT_FN_2_OUT)
-#define GPIO60_LDD_2_MD (60 | GPIO_ALT_FN_2_OUT)
-#define GPIO61_LDD_3_MD (61 | GPIO_ALT_FN_2_OUT)
-#define GPIO62_LDD_4_MD (62 | GPIO_ALT_FN_2_OUT)
-#define GPIO63_LDD_5_MD (63 | GPIO_ALT_FN_2_OUT)
-#define GPIO64_LDD_6_MD (64 | GPIO_ALT_FN_2_OUT)
-#define GPIO65_LDD_7_MD (65 | GPIO_ALT_FN_2_OUT)
-#define GPIO66_LDD_8_MD (66 | GPIO_ALT_FN_2_OUT)
-#define GPIO66_MBREQ_MD (66 | GPIO_ALT_FN_1_IN)
-#define GPIO67_LDD_9_MD (67 | GPIO_ALT_FN_2_OUT)
-#define GPIO67_MMCCS0_MD (67 | GPIO_ALT_FN_1_OUT)
-#define GPIO68_LDD_10_MD (68 | GPIO_ALT_FN_2_OUT)
-#define GPIO68_MMCCS1_MD (68 | GPIO_ALT_FN_1_OUT)
-#define GPIO69_LDD_11_MD (69 | GPIO_ALT_FN_2_OUT)
-#define GPIO69_MMCCLK_MD (69 | GPIO_ALT_FN_1_OUT)
-#define GPIO70_LDD_12_MD (70 | GPIO_ALT_FN_2_OUT)
-#define GPIO70_RTCCLK_MD (70 | GPIO_ALT_FN_1_OUT)
-#define GPIO71_LDD_13_MD (71 | GPIO_ALT_FN_2_OUT)
-#define GPIO71_3_6MHz_MD (71 | GPIO_ALT_FN_1_OUT)
-#define GPIO72_LDD_14_MD (72 | GPIO_ALT_FN_2_OUT)
-#define GPIO72_32kHz_MD (72 | GPIO_ALT_FN_1_OUT)
-#define GPIO73_LDD_15_MD (73 | GPIO_ALT_FN_2_OUT)
-#define GPIO73_MBGNT_MD (73 | GPIO_ALT_FN_1_OUT)
-#define GPIO74_LCD_FCLK_MD (74 | GPIO_ALT_FN_2_OUT)
-#define GPIO75_LCD_LCLK_MD (75 | GPIO_ALT_FN_2_OUT)
-#define GPIO76_LCD_PCLK_MD (76 | GPIO_ALT_FN_2_OUT)
-#define GPIO77_LCD_ACBIAS_MD (77 | GPIO_ALT_FN_2_OUT)
-#define GPIO78_nCS_2_MD (78 | GPIO_ALT_FN_2_OUT)
-#define GPIO78_nPCE_2_MD (78 | GPIO_ALT_FN_1_OUT)
-#define GPIO79_nCS_3_MD (79 | GPIO_ALT_FN_2_OUT)
-#define GPIO79_pSKTSEL_MD (79 | GPIO_ALT_FN_1_OUT)
-#define GPIO80_nCS_4_MD (80 | GPIO_ALT_FN_2_OUT)
-#define GPIO81_NSSP_CLK_OUT (81 | GPIO_ALT_FN_1_OUT)
-#define GPIO81_NSSP_CLK_IN (81 | GPIO_ALT_FN_1_IN)
-#define GPIO81_CIF_DD_0_MD (81 | GPIO_ALT_FN_2_IN)
-#define GPIO82_NSSP_FRM_OUT (82 | GPIO_ALT_FN_1_OUT)
-#define GPIO82_NSSP_FRM_IN (82 | GPIO_ALT_FN_1_IN)
-#define GPIO82_CIF_DD_5_MD (82 | GPIO_ALT_FN_3_IN)
-#define GPIO83_NSSP_TX (83 | GPIO_ALT_FN_1_OUT)
-#define GPIO83_NSSP_RX (83 | GPIO_ALT_FN_2_IN)
-#define GPIO83_CIF_DD_4_MD (83 | GPIO_ALT_FN_3_IN)
-#define GPIO84_NSSP_TX (84 | GPIO_ALT_FN_1_OUT)
-#define GPIO84_NSSP_RX (84 | GPIO_ALT_FN_2_IN)
-#define GPIO84_CIF_FV_MD (84 | GPIO_ALT_FN_3_IN)
-#define GPIO85_nPCE_1_MD (85 | GPIO_ALT_FN_1_OUT)
-#define GPIO85_CIF_LV_MD (85 | GPIO_ALT_FN_3_IN)
-#define GPIO86_nPCE_1_MD (86 | GPIO_ALT_FN_1_OUT)
-#define GPIO88_USBH1_PWR_MD (88 | GPIO_ALT_FN_1_IN)
-#define GPIO89_USBH1_PEN_MD (89 | GPIO_ALT_FN_2_OUT)
-#define GPIO90_CIF_DD_4_MD (90 | GPIO_ALT_FN_3_IN)
-#define GPIO91_CIF_DD_5_MD (91 | GPIO_ALT_FN_3_IN)
-#define GPIO92_MMCDAT0_MD (92 | GPIO_ALT_FN_1_OUT)
-#define GPIO93_CIF_DD_6_MD (93 | GPIO_ALT_FN_2_IN)
-#define GPIO94_CIF_DD_5_MD (94 | GPIO_ALT_FN_2_IN)
-#define GPIO95_CIF_DD_4_MD (95 | GPIO_ALT_FN_2_IN)
-#define GPIO95_KP_MKIN6_MD (95 | GPIO_ALT_FN_3_IN)
-#define GPIO96_KP_DKIN3_MD (96 | GPIO_ALT_FN_1_IN)
-#define GPIO96_FFRXD_MD (96 | GPIO_ALT_FN_3_IN)
-#define GPIO97_KP_MKIN3_MD (97 | GPIO_ALT_FN_3_IN)
-#define GPIO98_CIF_DD_0_MD (98 | GPIO_ALT_FN_2_IN)
-#define GPIO98_FFRTS_MD (98 | GPIO_ALT_FN_3_OUT)
-#define GPIO99_FFTXD_MD (99 | GPIO_ALT_FN_3_OUT)
-#define GPIO100_KP_MKIN0_MD (100 | GPIO_ALT_FN_1_IN)
-#define GPIO101_KP_MKIN1_MD (101 | GPIO_ALT_FN_1_IN)
-#define GPIO102_nPCE_1_MD (102 | GPIO_ALT_FN_1_OUT)
-#define GPIO102_KP_MKIN2_MD (102 | GPIO_ALT_FN_1_IN)
-#define GPIO103_CIF_DD_3_MD (103 | GPIO_ALT_FN_1_IN)
-#define GPIO103_KP_MKOUT0_MD (103 | GPIO_ALT_FN_2_OUT)
-#define GPIO104_CIF_DD_2_MD (104 | GPIO_ALT_FN_1_IN)
-#define GPIO104_pSKTSEL_MD (104 | GPIO_ALT_FN_1_OUT)
-#define GPIO104_KP_MKOUT1_MD (104 | GPIO_ALT_FN_2_OUT)
-#define GPIO105_CIF_DD_1_MD (105 | GPIO_ALT_FN_1_IN)
-#define GPIO105_KP_MKOUT2_MD (105 | GPIO_ALT_FN_2_OUT)
-#define GPIO106_CIF_DD_9_MD (106 | GPIO_ALT_FN_1_IN)
-#define GPIO106_KP_MKOUT3_MD (106 | GPIO_ALT_FN_2_OUT)
-#define GPIO107_CIF_DD_8_MD (107 | GPIO_ALT_FN_1_IN)
-#define GPIO107_KP_MKOUT4_MD (107 | GPIO_ALT_FN_2_OUT)
-#define GPIO108_CIF_DD_7_MD (108 | GPIO_ALT_FN_1_IN)
-#define GPIO108_KP_MKOUT5_MD (108 | GPIO_ALT_FN_2_OUT)
-#define GPIO109_MMCDAT1_MD (109 | GPIO_ALT_FN_1_OUT)
-#define GPIO110_MMCDAT2_MD (110 | GPIO_ALT_FN_1_OUT)
-#define GPIO110_MMCCS0_MD (110 | GPIO_ALT_FN_1_OUT)
-#define GPIO111_MMCDAT3_MD (111 | GPIO_ALT_FN_1_OUT)
-#define GPIO110_MMCCS1_MD (111 | GPIO_ALT_FN_1_OUT)
-#define GPIO112_MMCCMD_MD (112 | GPIO_ALT_FN_1_OUT)
-#define GPIO113_I2S_SYSCLK_MD (113 | GPIO_ALT_FN_1_OUT)
-#define GPIO113_AC97_RESET_N_MD (113 | GPIO_ALT_FN_2_OUT)
-#define GPIO117_I2CSCL_MD (117 | GPIO_ALT_FN_1_IN)
-#define GPIO118_I2CSDA_MD (118 | GPIO_ALT_FN_1_IN)
-
-/*
- * Handy routine to set GPIO alternate functions
- */
-extern int pxa_gpio_mode( int gpio_mode );
-
-#endif /* __ASM_ARCH_PXA2XX_GPIO_H */
diff --git a/arch/arm/mach-pxa/include/mach/ssp.h b/arch/arm/mach-pxa/include/mach/ssp.h
deleted file mode 100644
index be1be5b6db51..000000000000
--- a/arch/arm/mach-pxa/include/mach/ssp.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * ssp.h
- *
- * Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This driver supports the following PXA CPU/SSP ports:-
- *
- * PXA250 SSP
- * PXA255 SSP, NSSP
- * PXA26x SSP, NSSP, ASSP
- * PXA27x SSP1, SSP2, SSP3
- * PXA3xx SSP1, SSP2, SSP3, SSP4
- */
-
-#ifndef __ASM_ARCH_SSP_H
-#define __ASM_ARCH_SSP_H
-
-#include <linux/list.h>
-#include <linux/io.h>
-
-enum pxa_ssp_type {
- SSP_UNDEFINED = 0,
- PXA25x_SSP, /* pxa 210, 250, 255, 26x */
- PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
- PXA27x_SSP,
-};
-
-struct ssp_device {
- struct platform_device *pdev;
- struct list_head node;
-
- struct clk *clk;
- void __iomem *mmio_base;
- unsigned long phys_base;
-
- const char *label;
- int port_id;
- int type;
- int use_count;
- int irq;
- int drcmr_rx;
- int drcmr_tx;
-};
-
-#ifdef CONFIG_PXA_SSP_LEGACY
-/*
- * SSP initialisation flags
- */
-#define SSP_NO_IRQ 0x1 /* don't register an irq handler in SSP driver */
-
-struct ssp_state {
- u32 cr0;
- u32 cr1;
- u32 to;
- u32 psp;
-};
-
-struct ssp_dev {
- struct ssp_device *ssp;
- u32 port;
- u32 mode;
- u32 flags;
- u32 psp_flags;
- u32 speed;
- int irq;
-};
-
-int ssp_write_word(struct ssp_dev *dev, u32 data);
-int ssp_read_word(struct ssp_dev *dev, u32 *data);
-int ssp_flush(struct ssp_dev *dev);
-void ssp_enable(struct ssp_dev *dev);
-void ssp_disable(struct ssp_dev *dev);
-void ssp_save_state(struct ssp_dev *dev, struct ssp_state *ssp);
-void ssp_restore_state(struct ssp_dev *dev, struct ssp_state *ssp);
-int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags);
-int ssp_config(struct ssp_dev *dev, u32 mode, u32 flags, u32 psp_flags, u32 speed);
-void ssp_exit(struct ssp_dev *dev);
-#endif /* CONFIG_PXA_SSP_LEGACY */
-
-/**
- * ssp_write_reg - Write to a SSP register
- *
- * @dev: SSP device to access
- * @reg: Register to write to
- * @val: Value to be written.
- */
-static inline void ssp_write_reg(struct ssp_device *dev, u32 reg, u32 val)
-{
- __raw_writel(val, dev->mmio_base + reg);
-}
-
-/**
- * ssp_read_reg - Read from a SSP register
- *
- * @dev: SSP device to access
- * @reg: Register to read from
- */
-static inline u32 ssp_read_reg(struct ssp_device *dev, u32 reg)
-{
- return __raw_readl(dev->mmio_base + reg);
-}
-
-struct ssp_device *ssp_request(int port, const char *label);
-void ssp_free(struct ssp_device *);
-#endif /* __ASM_ARCH_SSP_H */
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index 4df2d38507dc..1bbd1f2e4beb 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -167,7 +167,7 @@
#define TOSA_KEY_SYNC KEY_102ND /* ??? */
-#ifndef CONFIG_KEYBOARD_TOSA_USE_EXT_KEYCODES
+#ifndef CONFIG_TOSA_USE_EXT_KEYCODES
#define TOSA_KEY_RECORD KEY_YEN
#define TOSA_KEY_ADDRESSBOOK KEY_KATAKANA
#define TOSA_KEY_CANCEL KEY_ESC
diff --git a/arch/arm/mach-pxa/include/mach/vpac270.h b/arch/arm/mach-pxa/include/mach/vpac270.h
new file mode 100644
index 000000000000..7bfa3dd0fd5e
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/vpac270.h
@@ -0,0 +1,42 @@
+/*
+ * GPIOs and interrupts for Voipac PXA270
+ *
+ * Copyright (C) 2010
+ * Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _INCLUDE_VPAC270_H_
+#define _INCLUDE_VPAC270_H_
+
+#define GPIO1_VPAC270_USER_BTN 1
+
+#define GPIO15_VPAC270_LED_ORANGE 15
+
+#define GPIO81_VPAC270_BKL_ON 81
+#define GPIO83_VPAC270_NL_ON 83
+
+#define GPIO52_VPAC270_SD_READONLY 52
+#define GPIO53_VPAC270_SD_DETECT_N 53
+
+#define GPIO84_VPAC270_PCMCIA_CD 84
+#define GPIO35_VPAC270_PCMCIA_RDY 35
+#define GPIO107_VPAC270_PCMCIA_PPEN 107
+#define GPIO11_VPAC270_PCMCIA_RESET 11
+#define GPIO17_VPAC270_CF_CD 17
+#define GPIO12_VPAC270_CF_RDY 12
+#define GPIO16_VPAC270_CF_RESET 16
+
+#define GPIO41_VPAC270_UDC_DETECT 41
+
+#define GPIO114_VPAC270_ETH_IRQ 114
+
+#define GPIO36_VPAC270_IDE_IRQ 36
+
+#define GPIO113_VPAC270_TS_IRQ 113
+
+#endif
diff --git a/arch/arm/mach-pxa/include/mach/z2.h b/arch/arm/mach-pxa/include/mach/z2.h
new file mode 100644
index 000000000000..8835c16bc82f
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/z2.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/mach-pxa/include/mach/z2.h
+ *
+ * Author: Ken McGuire
+ * Created: Feb 6, 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARCH_ZIPIT2_H
+#define ASM_ARCH_ZIPIT2_H
+
+/* LEDs */
+#define GPIO10_ZIPITZ2_LED_WIFI 10
+#define GPIO85_ZIPITZ2_LED_CHARGED 85
+#define GPIO83_ZIPITZ2_LED_CHARGING 83
+
+/* SD/MMC */
+#define GPIO96_ZIPITZ2_SD_DETECT 96
+
+/* GPIO Buttons */
+#define GPIO1_ZIPITZ2_POWER_BUTTON 1
+#define GPIO98_ZIPITZ2_LID_BUTTON 98
+
+/* Libertas GSPI8686 WiFi */
+#define GPIO14_ZIPITZ2_WIFI_RESET 14
+#define GPIO15_ZIPITZ2_WIFI_POWER 15
+#define GPIO24_ZIPITZ2_WIFI_CS 24
+#define GPIO36_ZIPITZ2_WIFI_IRQ 36
+
+/* LCD */
+#define GPIO19_ZIPITZ2_LCD_RESET 19
+#define GPIO88_ZIPITZ2_LCD_CS 88
+
+/* MISC GPIOs */
+#define GPIO0_ZIPITZ2_AC_DETECT 0
+#define GPIO37_ZIPITZ2_HEADSET_DETECT 37
+
+#endif
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index fa527b258d61..9b9046185b00 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -41,7 +41,6 @@
#include <mach/pxa300.h>
#include <mach/pxafb.h>
-#include <mach/ssp.h>
#include <mach/mmc.h>
#include <mach/pxa2xx_spi.h>
#include <mach/pxa27x_keypad.h>
@@ -272,7 +271,7 @@ static inline void littleton_init_keypad(void) {}
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data littleton_mci_platform_data = {
- .detect_delay = 20,
+ .detect_delay_ms = 200,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.gpio_card_detect = GPIO_MMC1_CARD_DETECT,
.gpio_card_ro = -1,
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index 63d65a2a0387..330c3282856e 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -478,7 +478,7 @@ static void lubbock_mci_exit(struct device *dev, void *data)
static struct pxamci_platform_data lubbock_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .detect_delay = 1,
+ .detect_delay_ms = 10,
.init = lubbock_mci_init,
.get_ro = lubbock_mci_get_ro,
.exit = lubbock_mci_exit,
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index cf6b720c055f..1d1419b73457 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -81,6 +81,7 @@ static int __mfp_config_gpio(unsigned gpio, unsigned long c)
PGSR(bank) &= ~mask;
is_out = 1;
break;
+ case MFP_LPM_INPUT:
case MFP_LPM_DEFAULT:
break;
default:
@@ -178,8 +179,17 @@ int gpio_set_wake(unsigned int gpio, unsigned int on)
if (!d->valid)
return -EINVAL;
- if (d->keypad_gpio)
- return -EINVAL;
+ /* Allow keypad GPIOs to wakeup system when
+ * configured as generic GPIOs.
+ */
+ if (d->keypad_gpio && (MFP_AF(d->config) == 0) &&
+ (d->config & MFP_LPM_CAN_WAKEUP)) {
+ if (on)
+ PKWR |= d->mask;
+ else
+ PKWR &= ~d->mask;
+ return 0;
+ }
mux_taken = (PWER & d->mux_mask) & (~d->mask);
if (on && mux_taken)
@@ -239,21 +249,25 @@ static int pxa27x_pkwr_gpio[] = {
int keypad_set_wake(unsigned int on)
{
unsigned int i, gpio, mask = 0;
-
- if (!on) {
- PKWR = 0;
- return 0;
- }
+ struct gpio_desc *d;
for (i = 0; i < ARRAY_SIZE(pxa27x_pkwr_gpio); i++) {
gpio = pxa27x_pkwr_gpio[i];
+ d = &gpio_desc[gpio];
- if (gpio_desc[gpio].config & MFP_LPM_CAN_WAKEUP)
+ /* skip if configured as generic GPIO */
+ if (MFP_AF(d->config) == 0)
+ continue;
+
+ if (d->config & MFP_LPM_CAN_WAKEUP)
mask |= gpio_desc[gpio].mask;
}
- PKWR = mask;
+ if (on)
+ PKWR |= mask;
+ else
+ PKWR &= ~mask;
return 0;
}
@@ -328,6 +342,17 @@ static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state)
{
int i;
+ /* set corresponding PGSR bit of those marked MFP_LPM_KEEP_OUTPUT */
+ for (i = 0; i < pxa_last_gpio; i++) {
+ if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
+ (GPDR(i) & GPIO_bit(i))) {
+ if (GPLR(i) & GPIO_bit(i))
+ PGSR(i) |= GPIO_bit(i);
+ else
+ PGSR(i) &= ~GPIO_bit(i);
+ }
+ }
+
for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) {
saved_gafr[0][i] = GAFR_L(i);
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 7a50ed8fce94..d60db87dde08 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -426,6 +426,7 @@ struct gpio_vbus_mach_info gpio_vbus_data = {
* to give the card a chance to fully insert/eject.
*/
static struct pxamci_platform_data mioa701_mci_info = {
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.gpio_card_detect = GPIO15_SDIO_INSERT,
.gpio_card_ro = GPIO78_SDIO_RO,
@@ -791,7 +792,6 @@ static void __init mioa701_machine_init(void)
mio_gpio_request(ARRAY_AND_SIZE(global_gpios));
bootstrap_init();
set_pxa_fb_info(&mioa701_pxafb_info);
- mioa701_mci_info.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&mioa701_mci_info);
pxa_set_keypad_info(&mioa701_keypad_info);
wm97xx_bat_set_pdata(&mioa701_battery_data);
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index 8c9c6f0d56bb..462167ac05f9 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -325,7 +325,7 @@ static mfp_cfg_t mfp_cfg[] __initdata = {
#if defined(CONFIG_MMC)
static struct pxamci_platform_data mxm_8x10_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .detect_delay = 1,
+ .detect_delay_ms = 10,
.gpio_card_detect = MXM_8X10_SD_nCD,
.gpio_card_ro = MXM_8X10_SD_WP,
.gpio_power = -1
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index f70c75b38769..1963819dba98 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -168,7 +168,7 @@ static struct pxamci_platform_data palmld_mci_platform_data = {
.gpio_card_detect = GPIO_NR_PALMLD_SD_DETECT_N,
.gpio_card_ro = GPIO_NR_PALMLD_SD_READONLY,
.gpio_power = GPIO_NR_PALMLD_SD_POWER,
- .detect_delay = 20,
+ .detect_delay_ms = 200,
};
/******************************************************************************
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index d902a813aae3..5305a3993e69 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -110,7 +110,7 @@ static struct pxamci_platform_data palmt5_mci_platform_data = {
.gpio_card_detect = GPIO_NR_PALMT5_SD_DETECT_N,
.gpio_card_ro = GPIO_NR_PALMT5_SD_READONLY,
.gpio_power = GPIO_NR_PALMT5_SD_POWER,
- .detect_delay = 20,
+ .detect_delay_ms = 200,
};
/******************************************************************************
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 717d7a638675..033b567e50bb 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -121,7 +121,7 @@ static struct pxamci_platform_data palmtc_mci_platform_data = {
.gpio_power = GPIO_NR_PALMTC_SD_POWER,
.gpio_card_ro = GPIO_NR_PALMTC_SD_READONLY,
.gpio_card_detect = GPIO_NR_PALMTC_SD_DETECT_N,
- .detect_delay = 20,
+ .detect_delay_ms = 200,
};
/******************************************************************************
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 007b58c11f8d..ecc1a401598e 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -170,7 +170,7 @@ static struct pxamci_platform_data palmtx_mci_platform_data = {
.gpio_card_detect = GPIO_NR_PALMTX_SD_DETECT_N,
.gpio_card_ro = GPIO_NR_PALMTX_SD_READONLY,
.gpio_power = GPIO_NR_PALMTX_SD_POWER,
- .detect_delay = 20,
+ .detect_delay_ms = 200,
};
/******************************************************************************
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 9d0ecea1760c..f56ae1008759 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -326,7 +326,7 @@ static void pcm990_mci_exit(struct device *dev, void *data)
#define MSECS_PER_JIFFY (1000/HZ)
static struct pxamci_platform_data pcm990_mci_platform_data = {
- .detect_delay = 250 / MSECS_PER_JIFFY,
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.init = pcm990_mci_init,
.setpower = pcm990_mci_setpower,
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index d58a52415d75..f4abdaafdac4 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -40,13 +40,12 @@
#include <mach/pxa25x.h>
#include <mach/mmc.h>
#include <mach/udc.h>
-#include <plat/i2c.h>
#include <mach/irda.h>
#include <mach/poodle.h>
#include <mach/pxafb.h>
#include <mach/sharpsl.h>
-#include <mach/ssp.h>
#include <mach/pxa2xx_spi.h>
+#include <plat/i2c.h>
#include <asm/hardware/scoop.h>
#include <asm/hardware/locomo.h>
@@ -277,6 +276,7 @@ static void poodle_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data poodle_mci_platform_data = {
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.init = poodle_mci_init,
.setpower = poodle_mci_setpower,
@@ -450,7 +450,6 @@ static void __init poodle_init(void)
set_pxa_fb_parent(&poodle_locomo_device.dev);
set_pxa_fb_info(&poodle_fb_info);
pxa_set_udc_info(&udc_info);
- poodle_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&poodle_mci_platform_data);
pxa_set_ficp_info(&poodle_ficp_platform_data);
pxa_set_i2c_info(NULL);
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 4d7c03e72504..f544e58e1536 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -29,7 +29,6 @@
#include <mach/ohci.h>
#include <mach/pm.h>
#include <mach/dma.h>
-#include <mach/ssp.h>
#include <mach/regs-intc.h>
#include <plat/i2c.h>
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index d12667bd9ebe..d4b61b3f08f3 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -714,7 +714,7 @@ static void raumfeld_mci_exit(struct device *dev, void *data)
static struct pxamci_platform_data raumfeld_mci_platform_data = {
.init = raumfeld_mci_init,
.exit = raumfeld_mci_exit,
- .detect_delay = 20,
+ .detect_delay_ms = 200,
.gpio_card_detect = -1,
.gpio_card_ro = -1,
.gpio_power = -1,
diff --git a/arch/arm/mach-pxa/sharpsl.h b/arch/arm/mach-pxa/sharpsl.h
index 1439785d3979..0cc1203c5bef 100644
--- a/arch/arm/mach-pxa/sharpsl.h
+++ b/arch/arm/mach-pxa/sharpsl.h
@@ -10,29 +10,6 @@
#include <mach/sharpsl_pm.h>
/*
- * SharpSL SSP Driver
- */
-struct corgissp_machinfo {
- int port;
- int cs_lcdcon;
- int cs_ads7846;
- int cs_max1111;
- int clk_lcdcon;
- int clk_ads7846;
- int clk_max1111;
-};
-
-void corgi_ssp_set_machinfo(struct corgissp_machinfo *machinfo);
-
-
-/*
- * SharpSL/Corgi LCD Driver
- */
-void corgi_lcdtg_suspend(void);
-void corgi_lcdtg_hw_init(int mode);
-
-
-/*
* SharpSL Battery/PM Driver
*/
#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 463d874bb867..cb4767251f3c 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -28,7 +28,6 @@
#include <asm/mach-types.h>
#include <mach/pm.h>
#include <mach/pxa2xx-regs.h>
-#include <mach/pxa2xx-gpio.h>
#include <mach/regs-rtc.h>
#include <mach/sharpsl.h>
#include <mach/sharpsl_pm.h>
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 01bdd7500df4..4d2413ed0ffa 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -86,6 +86,7 @@ static unsigned long spitz_pin_config[] __initdata = {
/* GPIOs */
GPIO9_GPIO, /* SPITZ_GPIO_nSD_DETECT */
+ GPIO16_GPIO, /* SPITZ_GPIO_SYNC */
GPIO81_GPIO, /* SPITZ_GPIO_nSD_WP */
GPIO41_GPIO, /* SPITZ_GPIO_USB_CONNECT */
GPIO37_GPIO, /* SPITZ_GPIO_USB_HOST */
@@ -119,7 +120,8 @@ static unsigned long spitz_pin_config[] __initdata = {
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
- GPIO1_GPIO | WAKEUP_ON_EDGE_RISE,
+ GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, /* SPITZ_GPIO_KEY_INT */
+ GPIO1_GPIO | WAKEUP_ON_EDGE_FALL, /* SPITZ_GPIO_RESET */
};
/*
@@ -537,6 +539,7 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
}
static struct pxamci_platform_data spitz_mci_platform_data = {
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.setpower = spitz_mci_setpower,
.gpio_card_detect = SPITZ_GPIO_nSD_DETECT,
@@ -757,7 +760,6 @@ static void __init common_init(void)
spitz_init_spi();
platform_add_devices(devices, ARRAY_SIZE(devices));
- spitz_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&spitz_mci_platform_data);
pxa_set_ohci_info(&spitz_ohci_platform_data);
pxa_set_ficp_info(&spitz_ficp_platform_data);
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index fc5a70c40358..4209ddf6da61 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -24,9 +24,10 @@
#include <mach/sharpsl.h>
#include <mach/spitz.h>
-#include <mach/pxa2xx-regs.h>
-#include <mach/pxa2xx-gpio.h>
+#include <mach/pxa27x.h>
+
#include "sharpsl.h"
+#include "generic.h"
#define SHARPSL_CHARGE_ON_VOLT 0x99 /* 2.9V */
#define SHARPSL_CHARGE_ON_TEMP 0xe0 /* 2.9V */
@@ -37,10 +38,17 @@
static int spitz_last_ac_status;
+static struct gpio spitz_charger_gpios[] = {
+ { SPITZ_GPIO_KEY_INT, GPIOF_IN, "Keyboard Interrupt" },
+ { SPITZ_GPIO_SYNC, GPIOF_IN, "Sync" },
+ { SPITZ_GPIO_ADC_TEMP_ON, GPIOF_OUT_INIT_LOW, "ADC Temp On" },
+ { SPITZ_GPIO_JK_B, GPIOF_OUT_INIT_LOW, "JK B" },
+ { SPITZ_GPIO_CHRG_ON, GPIOF_OUT_INIT_LOW, "Charger On" },
+};
+
static void spitz_charger_init(void)
{
- pxa_gpio_mode(SPITZ_GPIO_KEY_INT | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
+ gpio_request_array(ARRAY_AND_SIZE(spitz_charger_gpios));
}
static void spitz_measure_temp(int on)
@@ -76,6 +84,11 @@ static void spitz_discharge1(int on)
gpio_set_value(SPITZ_GPIO_LED_GREEN, on);
}
+static unsigned long gpio18_config[] = {
+ GPIO18_RDY,
+ GPIO18_GPIO,
+};
+
static void spitz_presuspend(void)
{
spitz_last_ac_status = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN);
@@ -97,7 +110,9 @@ static void spitz_presuspend(void)
PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0);
- pxa_gpio_mode(GPIO18_RDY|GPIO_OUT | GPIO_DFLT_HIGH);
+ pxa2xx_mfp_config(&gpio18_config[0], 1);
+ gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown");
+ gpio_free(18);
PRER = GPIO_bit(SPITZ_GPIO_KEY_INT);
PFER = GPIO_bit(SPITZ_GPIO_KEY_INT) | GPIO_bit(SPITZ_GPIO_RESET);
@@ -114,8 +129,7 @@ static void spitz_presuspend(void)
static void spitz_postsuspend(void)
{
- pxa_gpio_mode(GPIO18_RDY_MD);
- pxa_gpio_mode(10 | GPIO_IN);
+ pxa2xx_mfp_config(&gpio18_config[1], 1);
}
static int spitz_should_wakeup(unsigned int resume_on_alarm)
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c
deleted file mode 100644
index a81d6dbf662d..000000000000
--- a/arch/arm/mach-pxa/ssp.c
+++ /dev/null
@@ -1,510 +0,0 @@
-/*
- * linux/arch/arm/mach-pxa/ssp.c
- *
- * based on linux/arch/arm/mach-sa1100/ssp.c by Russell King
- *
- * Copyright (C) 2003 Russell King.
- * Copyright (C) 2003 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * PXA2xx SSP driver. This provides the generic core for simple
- * IO-based SSP applications and allows easy port setup for DMA access.
- *
- * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <mach/ssp.h>
-#include <mach/regs-ssp.h>
-
-#ifdef CONFIG_PXA_SSP_LEGACY
-
-#define TIMEOUT 100000
-
-static irqreturn_t ssp_interrupt(int irq, void *dev_id)
-{
- struct ssp_dev *dev = dev_id;
- struct ssp_device *ssp = dev->ssp;
- unsigned int status;
-
- status = __raw_readl(ssp->mmio_base + SSSR);
- __raw_writel(status, ssp->mmio_base + SSSR);
-
- if (status & SSSR_ROR)
- printk(KERN_WARNING "SSP(%d): receiver overrun\n", dev->port);
-
- if (status & SSSR_TUR)
- printk(KERN_WARNING "SSP(%d): transmitter underrun\n", dev->port);
-
- if (status & SSSR_BCE)
- printk(KERN_WARNING "SSP(%d): bit count error\n", dev->port);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ssp_write_word - write a word to the SSP port
- * @data: 32-bit, MSB justified data to write.
- *
- * Wait for a free entry in the SSP transmit FIFO, and write a data
- * word to the SSP port.
- *
- * The caller is expected to perform the necessary locking.
- *
- * Returns:
- * %-ETIMEDOUT timeout occurred
- * 0 success
- */
-int ssp_write_word(struct ssp_dev *dev, u32 data)
-{
- struct ssp_device *ssp = dev->ssp;
- int timeout = TIMEOUT;
-
- while (!(__raw_readl(ssp->mmio_base + SSSR) & SSSR_TNF)) {
- if (!--timeout)
- return -ETIMEDOUT;
- cpu_relax();
- }
-
- __raw_writel(data, ssp->mmio_base + SSDR);
-
- return 0;
-}
-
-/**
- * ssp_read_word - read a word from the SSP port
- *
- * Wait for a data word in the SSP receive FIFO, and return the
- * received data. Data is LSB justified.
- *
- * Note: Currently, if data is not expected to be received, this
- * function will wait for ever.
- *
- * The caller is expected to perform the necessary locking.
- *
- * Returns:
- * %-ETIMEDOUT timeout occurred
- * 32-bit data success
- */
-int ssp_read_word(struct ssp_dev *dev, u32 *data)
-{
- struct ssp_device *ssp = dev->ssp;
- int timeout = TIMEOUT;
-
- while (!(__raw_readl(ssp->mmio_base + SSSR) & SSSR_RNE)) {
- if (!--timeout)
- return -ETIMEDOUT;
- cpu_relax();
- }
-
- *data = __raw_readl(ssp->mmio_base + SSDR);
- return 0;
-}
-
-/**
- * ssp_flush - flush the transmit and receive FIFOs
- *
- * Wait for the SSP to idle, and ensure that the receive FIFO
- * is empty.
- *
- * The caller is expected to perform the necessary locking.
- */
-int ssp_flush(struct ssp_dev *dev)
-{
- struct ssp_device *ssp = dev->ssp;
- int timeout = TIMEOUT * 2;
-
- /* ensure TX FIFO is empty instead of not full */
- if (cpu_is_pxa3xx()) {
- while (__raw_readl(ssp->mmio_base + SSSR) & 0xf00) {
- if (!--timeout)
- return -ETIMEDOUT;
- cpu_relax();
- }
- timeout = TIMEOUT * 2;
- }
-
- do {
- while (__raw_readl(ssp->mmio_base + SSSR) & SSSR_RNE) {
- if (!--timeout)
- return -ETIMEDOUT;
- (void)__raw_readl(ssp->mmio_base + SSDR);
- }
- if (!--timeout)
- return -ETIMEDOUT;
- } while (__raw_readl(ssp->mmio_base + SSSR) & SSSR_BSY);
-
- return 0;
-}
-
-/**
- * ssp_enable - enable the SSP port
- *
- * Turn on the SSP port.
- */
-void ssp_enable(struct ssp_dev *dev)
-{
- struct ssp_device *ssp = dev->ssp;
- uint32_t sscr0;
-
- sscr0 = __raw_readl(ssp->mmio_base + SSCR0);
- sscr0 |= SSCR0_SSE;
- __raw_writel(sscr0, ssp->mmio_base + SSCR0);
-}
-
-/**
- * ssp_disable - shut down the SSP port
- *
- * Turn off the SSP port, optionally powering it down.
- */
-void ssp_disable(struct ssp_dev *dev)
-{
- struct ssp_device *ssp = dev->ssp;
- uint32_t sscr0;
-
- sscr0 = __raw_readl(ssp->mmio_base + SSCR0);
- sscr0 &= ~SSCR0_SSE;
- __raw_writel(sscr0, ssp->mmio_base + SSCR0);
-}
-
-/**
- * ssp_save_state - save the SSP configuration
- * @ssp: pointer to structure to save SSP configuration
- *
- * Save the configured SSP state for suspend.
- */
-void ssp_save_state(struct ssp_dev *dev, struct ssp_state *state)
-{
- struct ssp_device *ssp = dev->ssp;
-
- state->cr0 = __raw_readl(ssp->mmio_base + SSCR0);
- state->cr1 = __raw_readl(ssp->mmio_base + SSCR1);
- state->to = __raw_readl(ssp->mmio_base + SSTO);
- state->psp = __raw_readl(ssp->mmio_base + SSPSP);
-
- ssp_disable(dev);
-}
-
-/**
- * ssp_restore_state - restore a previously saved SSP configuration
- * @ssp: pointer to configuration saved by ssp_save_state
- *
- * Restore the SSP configuration saved previously by ssp_save_state.
- */
-void ssp_restore_state(struct ssp_dev *dev, struct ssp_state *state)
-{
- struct ssp_device *ssp = dev->ssp;
- uint32_t sssr = SSSR_ROR | SSSR_TUR | SSSR_BCE;
-
- __raw_writel(sssr, ssp->mmio_base + SSSR);
-
- __raw_writel(state->cr0 & ~SSCR0_SSE, ssp->mmio_base + SSCR0);
- __raw_writel(state->cr1, ssp->mmio_base + SSCR1);
- __raw_writel(state->to, ssp->mmio_base + SSTO);
- __raw_writel(state->psp, ssp->mmio_base + SSPSP);
- __raw_writel(state->cr0, ssp->mmio_base + SSCR0);
-}
-
-/**
- * ssp_config - configure SSP port settings
- * @mode: port operating mode
- * @flags: port config flags
- * @psp_flags: port PSP config flags
- * @speed: port speed
- *
- * Port MUST be disabled by ssp_disable before making any config changes.
- */
-int ssp_config(struct ssp_dev *dev, u32 mode, u32 flags, u32 psp_flags, u32 speed)
-{
- struct ssp_device *ssp = dev->ssp;
-
- dev->mode = mode;
- dev->flags = flags;
- dev->psp_flags = psp_flags;
- dev->speed = speed;
-
- /* set up port type, speed, port settings */
- __raw_writel((dev->speed | dev->mode), ssp->mmio_base + SSCR0);
- __raw_writel(dev->flags, ssp->mmio_base + SSCR1);
- __raw_writel(dev->psp_flags, ssp->mmio_base + SSPSP);
-
- return 0;
-}
-
-/**
- * ssp_init - setup the SSP port
- *
- * initialise and claim resources for the SSP port.
- *
- * Returns:
- * %-ENODEV if the SSP port is unavailable
- * %-EBUSY if the resources are already in use
- * %0 on success
- */
-int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
-{
- struct ssp_device *ssp;
- int ret;
-
- ssp = ssp_request(port, "SSP");
- if (ssp == NULL)
- return -ENODEV;
-
- dev->ssp = ssp;
- dev->port = port;
-
- /* do we need to get irq */
- if (!(init_flags & SSP_NO_IRQ)) {
- ret = request_irq(ssp->irq, ssp_interrupt,
- 0, "SSP", dev);
- if (ret)
- goto out_region;
- dev->irq = ssp->irq;
- } else
- dev->irq = NO_IRQ;
-
- /* turn on SSP port clock */
- clk_enable(ssp->clk);
- return 0;
-
-out_region:
- ssp_free(ssp);
- return ret;
-}
-
-/**
- * ssp_exit - undo the effects of ssp_init
- *
- * release and free resources for the SSP port.
- */
-void ssp_exit(struct ssp_dev *dev)
-{
- struct ssp_device *ssp = dev->ssp;
-
- ssp_disable(dev);
- if (dev->irq != NO_IRQ)
- free_irq(dev->irq, dev);
- clk_disable(ssp->clk);
- ssp_free(ssp);
-}
-#endif /* CONFIG_PXA_SSP_LEGACY */
-
-static DEFINE_MUTEX(ssp_lock);
-static LIST_HEAD(ssp_list);
-
-struct ssp_device *ssp_request(int port, const char *label)
-{
- struct ssp_device *ssp = NULL;
-
- mutex_lock(&ssp_lock);
-
- list_for_each_entry(ssp, &ssp_list, node) {
- if (ssp->port_id == port && ssp->use_count == 0) {
- ssp->use_count++;
- ssp->label = label;
- break;
- }
- }
-
- mutex_unlock(&ssp_lock);
-
- if (&ssp->node == &ssp_list)
- return NULL;
-
- return ssp;
-}
-EXPORT_SYMBOL(ssp_request);
-
-void ssp_free(struct ssp_device *ssp)
-{
- mutex_lock(&ssp_lock);
- if (ssp->use_count) {
- ssp->use_count--;
- ssp->label = NULL;
- } else
- dev_err(&ssp->pdev->dev, "device already free\n");
- mutex_unlock(&ssp_lock);
-}
-EXPORT_SYMBOL(ssp_free);
-
-static int __devinit ssp_probe(struct platform_device *pdev)
-{
- const struct platform_device_id *id = platform_get_device_id(pdev);
- struct resource *res;
- struct ssp_device *ssp;
- int ret = 0;
-
- ssp = kzalloc(sizeof(struct ssp_device), GFP_KERNEL);
- if (ssp == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory");
- return -ENOMEM;
- }
- ssp->pdev = pdev;
-
- ssp->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(ssp->clk)) {
- ret = PTR_ERR(ssp->clk);
- goto err_free;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- ret = -ENODEV;
- goto err_free_clk;
- }
-
- res = request_mem_region(res->start, res->end - res->start + 1,
- pdev->name);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- ret = -EBUSY;
- goto err_free_clk;
- }
-
- ssp->phys_base = res->start;
-
- ssp->mmio_base = ioremap(res->start, res->end - res->start + 1);
- if (ssp->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() registers\n");
- ret = -ENODEV;
- goto err_free_mem;
- }
-
- ssp->irq = platform_get_irq(pdev, 0);
- if (ssp->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource defined\n");
- ret = -ENODEV;
- goto err_free_io;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no SSP RX DRCMR defined\n");
- ret = -ENODEV;
- goto err_free_io;
- }
- ssp->drcmr_rx = res->start;
-
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (res == NULL) {
- dev_err(&pdev->dev, "no SSP TX DRCMR defined\n");
- ret = -ENODEV;
- goto err_free_io;
- }
- ssp->drcmr_tx = res->start;
-
- /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
- * starts from 0, do a translation here
- */
- ssp->port_id = pdev->id + 1;
- ssp->use_count = 0;
- ssp->type = (int)id->driver_data;
-
- mutex_lock(&ssp_lock);
- list_add(&ssp->node, &ssp_list);
- mutex_unlock(&ssp_lock);
-
- platform_set_drvdata(pdev, ssp);
- return 0;
-
-err_free_io:
- iounmap(ssp->mmio_base);
-err_free_mem:
- release_mem_region(res->start, res->end - res->start + 1);
-err_free_clk:
- clk_put(ssp->clk);
-err_free:
- kfree(ssp);
- return ret;
-}
-
-static int __devexit ssp_remove(struct platform_device *pdev)
-{
- struct resource *res;
- struct ssp_device *ssp;
-
- ssp = platform_get_drvdata(pdev);
- if (ssp == NULL)
- return -ENODEV;
-
- iounmap(ssp->mmio_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
-
- clk_put(ssp->clk);
-
- mutex_lock(&ssp_lock);
- list_del(&ssp->node);
- mutex_unlock(&ssp_lock);
-
- kfree(ssp);
- return 0;
-}
-
-static const struct platform_device_id ssp_id_table[] = {
- { "pxa25x-ssp", PXA25x_SSP },
- { "pxa25x-nssp", PXA25x_NSSP },
- { "pxa27x-ssp", PXA27x_SSP },
- { },
-};
-
-static struct platform_driver ssp_driver = {
- .probe = ssp_probe,
- .remove = __devexit_p(ssp_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "pxa2xx-ssp",
- },
- .id_table = ssp_id_table,
-};
-
-static int __init pxa_ssp_init(void)
-{
- return platform_driver_register(&ssp_driver);
-}
-
-static void __exit pxa_ssp_exit(void)
-{
- platform_driver_unregister(&ssp_driver);
-}
-
-arch_initcall(pxa_ssp_init);
-module_exit(pxa_ssp_exit);
-
-#ifdef CONFIG_PXA_SSP_LEGACY
-EXPORT_SYMBOL(ssp_write_word);
-EXPORT_SYMBOL(ssp_read_word);
-EXPORT_SYMBOL(ssp_flush);
-EXPORT_SYMBOL(ssp_enable);
-EXPORT_SYMBOL(ssp_disable);
-EXPORT_SYMBOL(ssp_save_state);
-EXPORT_SYMBOL(ssp_restore_state);
-EXPORT_SYMBOL(ssp_init);
-EXPORT_SYMBOL(ssp_exit);
-EXPORT_SYMBOL(ssp_config);
-#endif
-
-MODULE_DESCRIPTION("PXA SSP driver");
-MODULE_AUTHOR("Liam Girdwood");
-MODULE_LICENSE("GPL");
-
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 2041eb1d90ba..af40d2a12d37 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -464,8 +464,6 @@ static struct platform_device smc91x_device = {
-static struct pxamci_platform_data stargate2_mci_platform_data;
-
/*
* The card detect interrupt isn't debounced so we delay it by 250ms
* to give the card a chance to fully insert / eject.
@@ -489,8 +487,6 @@ static int stargate2_mci_init(struct device *dev,
goto free_power_en;
}
gpio_direction_input(SG2_GPIO_nSD_DETECT);
- /* Delay to allow for full insertion */
- stargate2_mci_platform_data.detect_delay = msecs_to_jiffies(250);
err = request_irq(IRQ_GPIO(SG2_GPIO_nSD_DETECT),
stargate2_detect_int,
@@ -529,6 +525,7 @@ static void stargate2_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data stargate2_mci_platform_data = {
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.init = stargate2_mci_init,
.setpower = stargate2_mci_setpower,
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index ad552791c4ce..7512b822c6ca 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -275,6 +275,7 @@ static void tosa_mci_exit(struct device *dev, void *data)
}
static struct pxamci_platform_data tosa_mci_platform_data = {
+ .detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.init = tosa_mci_init,
.exit = tosa_mci_exit,
@@ -926,7 +927,6 @@ static void __init tosa_init(void)
dummy = gpiochip_reserve(TOSA_SCOOP_JC_GPIO_BASE, 12);
dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16);
- tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250);
pxa_set_mci_info(&tosa_mci_platform_data);
pxa_set_udc_info(&udc_info);
pxa_set_ficp_info(&tosa_ficp_platform_data);
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 797f2544d0ce..69689112eae7 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -349,7 +349,7 @@ static void trizeps4_mci_exit(struct device *dev, void *data)
static struct pxamci_platform_data trizeps4_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .detect_delay = 1,
+ .detect_delay_ms= 10,
.init = trizeps4_mci_init,
.exit = trizeps4_mci_exit,
.get_ro = NULL, /* write-protection not supported */
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
new file mode 100644
index 000000000000..9884fa978f16
--- /dev/null
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -0,0 +1,615 @@
+/*
+ * Hardware definitions for Voipac PXA270
+ *
+ * Copyright (C) 2010
+ * Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/sysdev.h>
+#include <linux/usb/gpio_vbus.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/onenand.h>
+#include <linux/dm9000.h>
+#include <linux/ucb1400.h>
+#include <linux/ata_platform.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/pxa27x.h>
+#include <mach/audio.h>
+#include <mach/vpac270.h>
+#include <mach/mmc.h>
+#include <mach/pxafb.h>
+#include <mach/ohci.h>
+#include <mach/pxa27x-udc.h>
+#include <mach/udc.h>
+
+#include <plat/i2c.h>
+
+#include "generic.h"
+#include "devices.h"
+
+/******************************************************************************
+ * Pin configuration
+ ******************************************************************************/
+static unsigned long vpac270_pin_config[] __initdata = {
+ /* MMC */
+ GPIO32_MMC_CLK,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO112_MMC_CMD,
+ GPIO53_GPIO, /* SD detect */
+ GPIO52_GPIO, /* SD r/o switch */
+
+ /* GPIO KEYS */
+ GPIO1_GPIO, /* USER BTN */
+
+ /* LEDs */
+ GPIO15_GPIO, /* orange led */
+
+ /* FFUART */
+ GPIO34_FFUART_RXD,
+ GPIO39_FFUART_TXD,
+ GPIO27_FFUART_RTS,
+ GPIO100_FFUART_CTS,
+ GPIO33_FFUART_DSR,
+ GPIO40_FFUART_DTR,
+ GPIO10_FFUART_DCD,
+ GPIO38_FFUART_RI,
+
+ /* LCD */
+ GPIO58_LCD_LDD_0,
+ GPIO59_LCD_LDD_1,
+ GPIO60_LCD_LDD_2,
+ GPIO61_LCD_LDD_3,
+ GPIO62_LCD_LDD_4,
+ GPIO63_LCD_LDD_5,
+ GPIO64_LCD_LDD_6,
+ GPIO65_LCD_LDD_7,
+ GPIO66_LCD_LDD_8,
+ GPIO67_LCD_LDD_9,
+ GPIO68_LCD_LDD_10,
+ GPIO69_LCD_LDD_11,
+ GPIO70_LCD_LDD_12,
+ GPIO71_LCD_LDD_13,
+ GPIO72_LCD_LDD_14,
+ GPIO73_LCD_LDD_15,
+ GPIO86_LCD_LDD_16,
+ GPIO87_LCD_LDD_17,
+ GPIO74_LCD_FCLK,
+ GPIO75_LCD_LCLK,
+ GPIO76_LCD_PCLK,
+ GPIO77_LCD_BIAS,
+
+ /* PCMCIA */
+ GPIO48_nPOE,
+ GPIO49_nPWE,
+ GPIO50_nPIOR,
+ GPIO51_nPIOW,
+ GPIO85_nPCE_1,
+ GPIO54_nPCE_2,
+ GPIO55_nPREG,
+ GPIO57_nIOIS16,
+ GPIO56_nPWAIT,
+ GPIO104_PSKTSEL,
+ GPIO84_GPIO, /* PCMCIA CD */
+ GPIO35_GPIO, /* PCMCIA RDY */
+ GPIO107_GPIO, /* PCMCIA PPEN */
+ GPIO11_GPIO, /* PCMCIA RESET */
+ GPIO17_GPIO, /* CF CD */
+ GPIO12_GPIO, /* CF RDY */
+ GPIO16_GPIO, /* CF RESET */
+
+ /* UHC */
+ GPIO88_USBH1_PWR,
+ GPIO89_USBH1_PEN,
+ GPIO119_USBH2_PWR,
+ GPIO120_USBH2_PEN,
+
+ /* UDC */
+ GPIO41_GPIO,
+
+ /* Ethernet */
+ GPIO114_GPIO, /* IRQ */
+
+ /* AC97 */
+ GPIO28_AC97_BITCLK,
+ GPIO29_AC97_SDATA_IN_0,
+ GPIO30_AC97_SDATA_OUT,
+ GPIO31_AC97_SYNC,
+ GPIO95_AC97_nRESET,
+ GPIO98_AC97_SYSCLK,
+ GPIO113_GPIO, /* TS IRQ */
+
+ /* I2C */
+ GPIO117_I2C_SCL,
+ GPIO118_I2C_SDA,
+
+ /* IDE */
+ GPIO36_GPIO, /* IDE IRQ */
+ GPIO80_DREQ_1,
+};
+
+/******************************************************************************
+ * NOR Flash
+ ******************************************************************************/
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct mtd_partition vpac270_nor_partitions[] = {
+ {
+ .name = "Flash",
+ .offset = 0x00000000,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+static struct physmap_flash_data vpac270_flash_data[] = {
+ {
+ .width = 2, /* bankwidth in bytes */
+ .parts = vpac270_nor_partitions,
+ .nr_parts = ARRAY_SIZE(vpac270_nor_partitions)
+ }
+};
+
+static struct resource vpac270_flash_resource = {
+ .start = PXA_CS0_PHYS,
+ .end = PXA_CS0_PHYS + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device vpac270_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .resource = &vpac270_flash_resource,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = vpac270_flash_data,
+ },
+};
+static void __init vpac270_nor_init(void)
+{
+ platform_device_register(&vpac270_flash);
+}
+#else
+static inline void vpac270_nor_init(void) {}
+#endif
+
+/******************************************************************************
+ * OneNAND Flash
+ ******************************************************************************/
+#if defined(CONFIG_MTD_ONENAND) || defined(CONFIG_MTD_ONENAND_MODULE)
+static struct mtd_partition vpac270_onenand_partitions[] = {
+ {
+ .name = "Flash",
+ .offset = 0x00000000,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+static struct onenand_platform_data vpac270_onenand_info = {
+ .parts = vpac270_onenand_partitions,
+ .nr_parts = ARRAY_SIZE(vpac270_onenand_partitions),
+};
+
+static struct resource vpac270_onenand_resources[] = {
+ [0] = {
+ .start = PXA_CS0_PHYS,
+ .end = PXA_CS0_PHYS + SZ_1M,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device vpac270_onenand = {
+ .name = "onenand-flash",
+ .id = -1,
+ .resource = vpac270_onenand_resources,
+ .num_resources = ARRAY_SIZE(vpac270_onenand_resources),
+ .dev = {
+ .platform_data = &vpac270_onenand_info,
+ },
+};
+
+static void __init vpac270_onenand_init(void)
+{
+ platform_device_register(&vpac270_onenand);
+}
+#else
+static void __init vpac270_onenand_init(void) {}
+#endif
+
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
+#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
+static struct pxamci_platform_data vpac270_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
+ .gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
+ .detect_delay_ms = 200,
+};
+
+static void __init vpac270_mmc_init(void)
+{
+ pxa_set_mci_info(&vpac270_mci_platform_data);
+}
+#else
+static inline void vpac270_mmc_init(void) {}
+#endif
+
+/******************************************************************************
+ * GPIO keys
+ ******************************************************************************/
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button vpac270_pxa_buttons[] = {
+ {KEY_POWER, GPIO1_VPAC270_USER_BTN, 0, "USER BTN"},
+};
+
+static struct gpio_keys_platform_data vpac270_pxa_keys_data = {
+ .buttons = vpac270_pxa_buttons,
+ .nbuttons = ARRAY_SIZE(vpac270_pxa_buttons),
+};
+
+static struct platform_device vpac270_pxa_keys = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &vpac270_pxa_keys_data,
+ },
+};
+
+static void __init vpac270_keys_init(void)
+{
+ platform_device_register(&vpac270_pxa_keys);
+}
+#else
+static inline void vpac270_keys_init(void) {}
+#endif
+
+/******************************************************************************
+ * LED
+ ******************************************************************************/
+#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
+struct gpio_led vpac270_gpio_leds[] = {
+{
+ .name = "vpac270:orange:user",
+ .default_trigger = "none",
+ .gpio = GPIO15_VPAC270_LED_ORANGE,
+ .active_low = 1,
+}
+};
+
+static struct gpio_led_platform_data vpac270_gpio_led_info = {
+ .leds = vpac270_gpio_leds,
+ .num_leds = ARRAY_SIZE(vpac270_gpio_leds),
+};
+
+static struct platform_device vpac270_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &vpac270_gpio_led_info,
+ }
+};
+
+static void __init vpac270_leds_init(void)
+{
+ platform_device_register(&vpac270_leds);
+}
+#else
+static inline void vpac270_leds_init(void) {}
+#endif
+
+/******************************************************************************
+ * USB Host
+ ******************************************************************************/
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+static int vpac270_ohci_init(struct device *dev)
+{
+ UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE;
+ return 0;
+}
+
+static struct pxaohci_platform_data vpac270_ohci_info = {
+ .port_mode = PMM_PERPORT_MODE,
+ .flags = ENABLE_PORT1 | ENABLE_PORT2 |
+ POWER_CONTROL_LOW | POWER_SENSE_LOW,
+ .init = vpac270_ohci_init,
+};
+
+static void __init vpac270_uhc_init(void)
+{
+ pxa_set_ohci_info(&vpac270_ohci_info);
+}
+#else
+static inline void vpac270_uhc_init(void) {}
+#endif
+
+/******************************************************************************
+ * USB Gadget
+ ******************************************************************************/
+#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
+ .gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
+ .gpio_pullup = -1,
+};
+
+static struct platform_device vpac270_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &vpac270_gpio_vbus_info,
+ },
+};
+
+static void vpac270_udc_command(int cmd)
+{
+ if (cmd == PXA2XX_UDC_CMD_CONNECT)
+ UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE;
+ else if (cmd == PXA2XX_UDC_CMD_DISCONNECT)
+ UP2OCR = UP2OCR_HXOE;
+}
+
+static struct pxa2xx_udc_mach_info vpac270_udc_info __initdata = {
+ .udc_command = vpac270_udc_command,
+ .gpio_pullup = -1,
+};
+
+static void __init vpac270_udc_init(void)
+{
+ pxa_set_udc_info(&vpac270_udc_info);
+ platform_device_register(&vpac270_gpio_vbus);
+}
+#else
+static inline void vpac270_udc_init(void) {}
+#endif
+
+/******************************************************************************
+ * Ethernet
+ ******************************************************************************/
+#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
+static struct resource vpac270_dm9000_resources[] = {
+ [0] = {
+ .start = PXA_CS2_PHYS + 0x300,
+ .end = PXA_CS2_PHYS + 0x303,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PXA_CS2_PHYS + 0x304,
+ .end = PXA_CS2_PHYS + 0x343,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = IRQ_GPIO(GPIO114_VPAC270_ETH_IRQ),
+ .end = IRQ_GPIO(GPIO114_VPAC270_ETH_IRQ),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ },
+};
+
+static struct dm9000_plat_data vpac270_dm9000_platdata = {
+ .flags = DM9000_PLATF_32BITONLY,
+};
+
+static struct platform_device vpac270_dm9000_device = {
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(vpac270_dm9000_resources),
+ .resource = vpac270_dm9000_resources,
+ .dev = {
+ .platform_data = &vpac270_dm9000_platdata,
+ }
+};
+
+static void __init vpac270_eth_init(void)
+{
+ platform_device_register(&vpac270_dm9000_device);
+}
+#else
+static inline void vpac270_eth_init(void) {}
+#endif
+
+/******************************************************************************
+ * Audio and Touchscreen
+ ******************************************************************************/
+#if defined(CONFIG_TOUCHSCREEN_UCB1400) || \
+ defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE)
+static pxa2xx_audio_ops_t vpac270_ac97_pdata = {
+ .reset_gpio = 95,
+};
+
+static struct ucb1400_pdata vpac270_ucb1400_pdata = {
+ .irq = IRQ_GPIO(GPIO113_VPAC270_TS_IRQ),
+};
+
+static struct platform_device vpac270_ucb1400_device = {
+ .name = "ucb1400_core",
+ .id = -1,
+ .dev = {
+ .platform_data = &vpac270_ucb1400_pdata,
+ },
+};
+
+static void __init vpac270_ts_init(void)
+{
+ pxa_set_ac97_info(&vpac270_ac97_pdata);
+ platform_device_register(&vpac270_ucb1400_device);
+}
+#else
+static inline void vpac270_ts_init(void) {}
+#endif
+
+/******************************************************************************
+ * RTC
+ ******************************************************************************/
+#if defined(CONFIG_RTC_DRV_DS1307) || defined(CONFIG_RTC_DRV_DS1307_MODULE)
+static struct i2c_board_info __initdata vpac270_i2c_devs[] = {
+ {
+ I2C_BOARD_INFO("ds1339", 0x68),
+ },
+};
+
+static void __init vpac270_rtc_init(void)
+{
+ pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(vpac270_i2c_devs));
+}
+#else
+static inline void vpac270_rtc_init(void) {}
+#endif
+
+/******************************************************************************
+ * Framebuffer
+ ******************************************************************************/
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
+static struct pxafb_mode_info vpac270_lcd_modes[] = {
+{
+ .pixclock = 57692,
+ .xres = 640,
+ .yres = 480,
+ .bpp = 32,
+ .depth = 18,
+
+ .left_margin = 144,
+ .right_margin = 32,
+ .upper_margin = 13,
+ .lower_margin = 30,
+
+ .hsync_len = 32,
+ .vsync_len = 2,
+
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+},
+};
+
+static struct pxafb_mach_info vpac270_lcd_screen = {
+ .modes = vpac270_lcd_modes,
+ .num_modes = ARRAY_SIZE(vpac270_lcd_modes),
+ .lcd_conn = LCD_COLOR_TFT_18BPP,
+};
+
+static void vpac270_lcd_power(int on, struct fb_var_screeninfo *info)
+{
+ gpio_set_value(GPIO81_VPAC270_BKL_ON, on);
+}
+
+static void __init vpac270_lcd_init(void)
+{
+ int ret;
+
+ ret = gpio_request(GPIO81_VPAC270_BKL_ON, "BKL-ON");
+ if (ret) {
+ pr_err("Requesting BKL-ON GPIO failed!\n");
+ goto err;
+ }
+
+ ret = gpio_direction_output(GPIO81_VPAC270_BKL_ON, 1);
+ if (ret) {
+ pr_err("Setting BKL-ON GPIO direction failed!\n");
+ goto err2;
+ }
+
+ vpac270_lcd_screen.pxafb_lcd_power = vpac270_lcd_power;
+ set_pxa_fb_info(&vpac270_lcd_screen);
+ return;
+
+err2:
+ gpio_free(GPIO81_VPAC270_BKL_ON);
+err:
+ return;
+}
+#else
+static inline void vpac270_lcd_init(void) {}
+#endif
+
+/******************************************************************************
+ * PATA IDE
+ ******************************************************************************/
+#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
+static struct pata_platform_info vpac270_pata_pdata = {
+ .ioport_shift = 1,
+ .irq_flags = IRQF_TRIGGER_RISING,
+};
+
+static struct resource vpac270_ide_resources[] = {
+ [0] = { /* I/O Base address */
+ .start = PXA_CS3_PHYS + 0x120,
+ .end = PXA_CS3_PHYS + 0x13f,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = { /* CTL Base address */
+ .start = PXA_CS3_PHYS + 0x15c,
+ .end = PXA_CS3_PHYS + 0x15f,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = { /* IDE IRQ pin */
+ .start = gpio_to_irq(GPIO36_VPAC270_IDE_IRQ),
+ .end = gpio_to_irq(GPIO36_VPAC270_IDE_IRQ),
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct platform_device vpac270_ide_device = {
+ .name = "pata_platform",
+ .num_resources = ARRAY_SIZE(vpac270_ide_resources),
+ .resource = vpac270_ide_resources,
+ .dev = {
+ .platform_data = &vpac270_pata_pdata,
+ }
+};
+
+static void __init vpac270_ide_init(void)
+{
+ platform_device_register(&vpac270_ide_device);
+}
+#else
+static inline void vpac270_ide_init(void) {}
+#endif
+
+/******************************************************************************
+ * Machine init
+ ******************************************************************************/
+static void __init vpac270_init(void)
+{
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(vpac270_pin_config));
+
+ pxa_set_ffuart_info(NULL);
+ pxa_set_btuart_info(NULL);
+ pxa_set_stuart_info(NULL);
+
+ vpac270_lcd_init();
+ vpac270_mmc_init();
+ vpac270_nor_init();
+ vpac270_onenand_init();
+ vpac270_leds_init();
+ vpac270_keys_init();
+ vpac270_uhc_init();
+ vpac270_udc_init();
+ vpac270_eth_init();
+ vpac270_ts_init();
+ vpac270_rtc_init();
+ vpac270_ide_init();
+}
+
+MACHINE_START(VPAC270, "Voipac PXA270")
+ .phys_io = 0x40000000,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .boot_params = 0xa0000100,
+ .map_io = pxa_map_io,
+ .init_irq = pxa27x_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = vpac270_init
+MACHINE_END
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
new file mode 100644
index 000000000000..f5d1ae3db3a4
--- /dev/null
+++ b/arch/arm/mach-pxa/z2.c
@@ -0,0 +1,609 @@
+/*
+ * linux/arch/arm/mach-pxa/z2.c
+ *
+ * Support for the Zipit Z2 Handheld device.
+ *
+ * Author: Ken McGuire
+ * Created: Jan 25, 2009
+ * Based on mainstone.c as modified for the Zipit Z2.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/pwm_backlight.h>
+#include <linux/dma-mapping.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/libertas_spi.h>
+#include <linux/spi/lms283gf05.h>
+#include <linux/power_supply.h>
+#include <linux/mtd/physmap.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/delay.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/pxa27x.h>
+#include <mach/mfp-pxa27x.h>
+#include <mach/z2.h>
+#include <mach/pxafb.h>
+#include <mach/mmc.h>
+#include <mach/pxa27x_keypad.h>
+#include <mach/pxa2xx_spi.h>
+
+#include <plat/i2c.h>
+
+#include "generic.h"
+#include "devices.h"
+
+/******************************************************************************
+ * Pin configuration
+ ******************************************************************************/
+static unsigned long z2_pin_config[] = {
+
+ /* LCD - 16bpp Active TFT */
+ GPIO58_LCD_LDD_0,
+ GPIO59_LCD_LDD_1,
+ GPIO60_LCD_LDD_2,
+ GPIO61_LCD_LDD_3,
+ GPIO62_LCD_LDD_4,
+ GPIO63_LCD_LDD_5,
+ GPIO64_LCD_LDD_6,
+ GPIO65_LCD_LDD_7,
+ GPIO66_LCD_LDD_8,
+ GPIO67_LCD_LDD_9,
+ GPIO68_LCD_LDD_10,
+ GPIO69_LCD_LDD_11,
+ GPIO70_LCD_LDD_12,
+ GPIO71_LCD_LDD_13,
+ GPIO72_LCD_LDD_14,
+ GPIO73_LCD_LDD_15,
+ GPIO74_LCD_FCLK,
+ GPIO75_LCD_LCLK,
+ GPIO76_LCD_PCLK,
+ GPIO77_LCD_BIAS,
+ GPIO19_GPIO, /* LCD reset */
+ GPIO88_GPIO, /* LCD chipselect */
+
+ /* PWM */
+ GPIO115_PWM1_OUT, /* Keypad Backlight */
+ GPIO11_PWM2_OUT, /* LCD Backlight */
+
+ /* MMC */
+ GPIO32_MMC_CLK,
+ GPIO112_MMC_CMD,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO96_GPIO, /* SD detect */
+
+ /* STUART */
+ GPIO46_STUART_RXD,
+ GPIO47_STUART_TXD,
+
+ /* Keypad */
+ GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO34_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO38_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO16_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO17_KP_MKIN_6 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO103_KP_MKOUT_0,
+ GPIO104_KP_MKOUT_1,
+ GPIO105_KP_MKOUT_2,
+ GPIO106_KP_MKOUT_3,
+ GPIO107_KP_MKOUT_4,
+ GPIO108_KP_MKOUT_5,
+ GPIO35_KP_MKOUT_6,
+ GPIO41_KP_MKOUT_7,
+
+ /* I2C */
+ GPIO117_I2C_SCL,
+ GPIO118_I2C_SDA,
+
+ /* SSP1 */
+ GPIO23_SSP1_SCLK, /* SSP1_SCK */
+ GPIO25_SSP1_TXD, /* SSP1_TXD */
+ GPIO26_SSP1_RXD, /* SSP1_RXD */
+
+ /* SSP2 */
+ GPIO22_SSP2_SCLK, /* SSP2_SCK */
+ GPIO13_SSP2_TXD, /* SSP2_TXD */
+ GPIO40_SSP2_RXD, /* SSP2_RXD */
+
+ /* LEDs */
+ GPIO10_GPIO, /* WiFi LED */
+ GPIO83_GPIO, /* Charging LED */
+ GPIO85_GPIO, /* Charged LED */
+
+ /* I2S */
+ GPIO28_I2S_BITCLK_OUT,
+ GPIO29_I2S_SDATA_IN,
+ GPIO30_I2S_SDATA_OUT,
+ GPIO31_I2S_SYNC,
+ GPIO113_I2S_SYSCLK,
+
+ /* MISC */
+ GPIO0_GPIO, /* AC power detect */
+ GPIO1_GPIO, /* Power button */
+ GPIO37_GPIO, /* Headphone detect */
+ GPIO98_GPIO, /* Lid switch */
+ GPIO14_GPIO, /* WiFi Reset */
+ GPIO15_GPIO, /* WiFi Power */
+ GPIO24_GPIO, /* WiFi CS */
+ GPIO36_GPIO, /* WiFi IRQ */
+ GPIO88_GPIO, /* LCD CS */
+};
+
+/******************************************************************************
+ * NOR Flash
+ ******************************************************************************/
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct resource z2_flash_resource = {
+ .start = PXA_CS0_PHYS,
+ .end = PXA_CS0_PHYS + SZ_8M - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct mtd_partition z2_flash_parts[] = {
+ {
+ .name = "U-Boot Bootloader",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "Linux Kernel",
+ .offset = 0x20000,
+ .size = 0x220000,
+ },
+ {
+ .name = "Filesystem",
+ .offset = 0x240000,
+ .size = 0x5b0000,
+ },
+ {
+ .name = "U-Boot Environment",
+ .offset = 0x7f0000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct physmap_flash_data z2_flash_data = {
+ .width = 2,
+ .parts = z2_flash_parts,
+ .nr_parts = ARRAY_SIZE(z2_flash_parts),
+};
+
+static struct platform_device z2_flash = {
+ .name = "physmap-flash",
+ .id = -1,
+ .resource = &z2_flash_resource,
+ .num_resources = 1,
+ .dev = {
+ .platform_data = &z2_flash_data,
+ },
+};
+
+static void __init z2_nor_init(void)
+{
+ platform_device_register(&z2_flash);
+}
+#else
+static inline void z2_nor_init(void) {}
+#endif
+
+/******************************************************************************
+ * Backlight
+ ******************************************************************************/
+#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
+static struct platform_pwm_backlight_data z2_backlight_data[] = {
+ [0] = {
+ /* Keypad Backlight */
+ .pwm_id = 1,
+ .max_brightness = 1023,
+ .dft_brightness = 512,
+ .pwm_period_ns = 1260320,
+ },
+ [1] = {
+ /* LCD Backlight */
+ .pwm_id = 2,
+ .max_brightness = 1023,
+ .dft_brightness = 512,
+ .pwm_period_ns = 1260320,
+ },
+};
+
+static struct platform_device z2_backlight_devices[2] = {
+ {
+ .name = "pwm-backlight",
+ .id = 0,
+ .dev = {
+ .platform_data = &z2_backlight_data[1],
+ },
+ },
+ {
+ .name = "pwm-backlight",
+ .id = 1,
+ .dev = {
+ .platform_data = &z2_backlight_data[0],
+ },
+ },
+};
+static void __init z2_pwm_init(void)
+{
+ platform_device_register(&z2_backlight_devices[0]);
+ platform_device_register(&z2_backlight_devices[1]);
+}
+#else
+static inline void z2_pwm_init(void) {}
+#endif
+
+/******************************************************************************
+ * Framebuffer
+ ******************************************************************************/
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
+static struct pxafb_mode_info z2_lcd_modes[] = {
+{
+ .pixclock = 192000,
+ .xres = 240,
+ .yres = 320,
+ .bpp = 16,
+
+ .left_margin = 4,
+ .right_margin = 8,
+ .upper_margin = 4,
+ .lower_margin = 8,
+
+ .hsync_len = 4,
+ .vsync_len = 4,
+},
+};
+
+static struct pxafb_mach_info z2_lcd_screen = {
+ .modes = z2_lcd_modes,
+ .num_modes = ARRAY_SIZE(z2_lcd_modes),
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_BIAS_ACTIVE_LOW |
+ LCD_ALTERNATE_MAPPING,
+};
+
+static void __init z2_lcd_init(void)
+{
+ set_pxa_fb_info(&z2_lcd_screen);
+}
+#else
+static inline void z2_lcd_init(void) {}
+#endif
+
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
+#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
+static struct pxamci_platform_data z2_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .gpio_card_detect = GPIO96_ZIPITZ2_SD_DETECT,
+ .gpio_power = -1,
+ .gpio_card_ro = -1,
+ .detect_delay_ms = 200,
+};
+
+static void __init z2_mmc_init(void)
+{
+ pxa_set_mci_info(&z2_mci_platform_data);
+}
+#else
+static inline void z2_mmc_init(void) {}
+#endif
+
+/******************************************************************************
+ * LEDs
+ ******************************************************************************/
+#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
+struct gpio_led z2_gpio_leds[] = {
+{
+ .name = "z2:green:wifi",
+ .default_trigger = "none",
+ .gpio = GPIO10_ZIPITZ2_LED_WIFI,
+ .active_low = 1,
+}, {
+ .name = "z2:green:charged",
+ .default_trigger = "none",
+ .gpio = GPIO85_ZIPITZ2_LED_CHARGED,
+ .active_low = 1,
+}, {
+ .name = "z2:amber:charging",
+ .default_trigger = "none",
+ .gpio = GPIO83_ZIPITZ2_LED_CHARGING,
+ .active_low = 1,
+},
+};
+
+static struct gpio_led_platform_data z2_gpio_led_info = {
+ .leds = z2_gpio_leds,
+ .num_leds = ARRAY_SIZE(z2_gpio_leds),
+};
+
+static struct platform_device z2_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &z2_gpio_led_info,
+ }
+};
+
+static void __init z2_leds_init(void)
+{
+ platform_device_register(&z2_leds);
+}
+#else
+static inline void z2_leds_init(void) {}
+#endif
+
+/******************************************************************************
+ * GPIO keyboard
+ ******************************************************************************/
+#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
+static unsigned int z2_matrix_keys[] = {
+ KEY(0, 0, KEY_OPTION),
+ KEY(1, 0, KEY_UP),
+ KEY(2, 0, KEY_DOWN),
+ KEY(3, 0, KEY_LEFT),
+ KEY(4, 0, KEY_RIGHT),
+ KEY(5, 0, KEY_END),
+ KEY(6, 0, KEY_KPPLUS),
+
+ KEY(0, 1, KEY_HOME),
+ KEY(1, 1, KEY_Q),
+ KEY(2, 1, KEY_I),
+ KEY(3, 1, KEY_G),
+ KEY(4, 1, KEY_X),
+ KEY(5, 1, KEY_ENTER),
+ KEY(6, 1, KEY_KPMINUS),
+
+ KEY(0, 2, KEY_PAGEUP),
+ KEY(1, 2, KEY_W),
+ KEY(2, 2, KEY_O),
+ KEY(3, 2, KEY_H),
+ KEY(4, 2, KEY_C),
+ KEY(5, 2, KEY_LEFTALT),
+
+ KEY(0, 3, KEY_PAGEDOWN),
+ KEY(1, 3, KEY_E),
+ KEY(2, 3, KEY_P),
+ KEY(3, 3, KEY_J),
+ KEY(4, 3, KEY_V),
+ KEY(5, 3, KEY_LEFTSHIFT),
+
+ KEY(0, 4, KEY_ESC),
+ KEY(1, 4, KEY_R),
+ KEY(2, 4, KEY_A),
+ KEY(3, 4, KEY_K),
+ KEY(4, 4, KEY_B),
+ KEY(5, 4, KEY_LEFTCTRL),
+
+ KEY(0, 5, KEY_TAB),
+ KEY(1, 5, KEY_T),
+ KEY(2, 5, KEY_S),
+ KEY(3, 5, KEY_L),
+ KEY(4, 5, KEY_N),
+ KEY(5, 5, KEY_SPACE),
+
+ KEY(0, 6, KEY_STOPCD),
+ KEY(1, 6, KEY_Y),
+ KEY(2, 6, KEY_D),
+ KEY(3, 6, KEY_BACKSPACE),
+ KEY(4, 6, KEY_M),
+ KEY(5, 6, KEY_COMMA),
+
+ KEY(0, 7, KEY_PLAYCD),
+ KEY(1, 7, KEY_U),
+ KEY(2, 7, KEY_F),
+ KEY(3, 7, KEY_Z),
+ KEY(4, 7, KEY_SEMICOLON),
+ KEY(5, 7, KEY_DOT),
+};
+
+static struct pxa27x_keypad_platform_data z2_keypad_platform_data = {
+ .matrix_key_rows = 7,
+ .matrix_key_cols = 8,
+ .matrix_key_map = z2_matrix_keys,
+ .matrix_key_map_size = ARRAY_SIZE(z2_matrix_keys),
+
+ .debounce_interval = 30,
+};
+
+static void __init z2_mkp_init(void)
+{
+ pxa_set_keypad_info(&z2_keypad_platform_data);
+}
+#else
+static inline void z2_mkp_init(void) {}
+#endif
+
+/******************************************************************************
+ * GPIO keys
+ ******************************************************************************/
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button z2_pxa_buttons[] = {
+ {KEY_POWER, GPIO1_ZIPITZ2_POWER_BUTTON, 0, "Power Button" },
+ {KEY_CLOSE, GPIO98_ZIPITZ2_LID_BUTTON, 0, "Lid Button" },
+};
+
+static struct gpio_keys_platform_data z2_pxa_keys_data = {
+ .buttons = z2_pxa_buttons,
+ .nbuttons = ARRAY_SIZE(z2_pxa_buttons),
+};
+
+static struct platform_device z2_pxa_keys = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &z2_pxa_keys_data,
+ },
+};
+
+static void __init z2_keys_init(void)
+{
+ platform_device_register(&z2_pxa_keys);
+}
+#else
+static inline void z2_keys_init(void) {}
+#endif
+
+/******************************************************************************
+ * SSP Devices - WiFi and LCD control
+ ******************************************************************************/
+#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
+/* WiFi */
+static int z2_lbs_spi_setup(struct spi_device *spi)
+{
+ int ret = 0;
+
+ ret = gpio_request(GPIO15_ZIPITZ2_WIFI_POWER, "WiFi Power");
+ if (ret)
+ goto err;
+
+ ret = gpio_direction_output(GPIO15_ZIPITZ2_WIFI_POWER, 1);
+ if (ret)
+ goto err2;
+
+ ret = gpio_request(GPIO14_ZIPITZ2_WIFI_RESET, "WiFi Reset");
+ if (ret)
+ goto err2;
+
+ ret = gpio_direction_output(GPIO14_ZIPITZ2_WIFI_RESET, 0);
+ if (ret)
+ goto err3;
+
+ /* Reset the card */
+ mdelay(180);
+ gpio_set_value(GPIO14_ZIPITZ2_WIFI_RESET, 1);
+ mdelay(20);
+
+ spi->bits_per_word = 16;
+ spi->mode = SPI_MODE_2,
+
+ spi_setup(spi);
+
+ return 0;
+
+err3:
+ gpio_free(GPIO14_ZIPITZ2_WIFI_RESET);
+err2:
+ gpio_free(GPIO15_ZIPITZ2_WIFI_POWER);
+err:
+ return ret;
+};
+
+static int z2_lbs_spi_teardown(struct spi_device *spi)
+{
+ gpio_set_value(GPIO14_ZIPITZ2_WIFI_RESET, 0);
+ gpio_set_value(GPIO15_ZIPITZ2_WIFI_POWER, 0);
+ gpio_free(GPIO14_ZIPITZ2_WIFI_RESET);
+ gpio_free(GPIO15_ZIPITZ2_WIFI_POWER);
+ return 0;
+
+};
+
+static struct pxa2xx_spi_chip z2_lbs_chip_info = {
+ .rx_threshold = 8,
+ .tx_threshold = 8,
+ .timeout = 1000,
+ .gpio_cs = GPIO24_ZIPITZ2_WIFI_CS,
+};
+
+static struct libertas_spi_platform_data z2_lbs_pdata = {
+ .use_dummy_writes = 1,
+ .setup = z2_lbs_spi_setup,
+ .teardown = z2_lbs_spi_teardown,
+};
+
+/* LCD */
+static struct pxa2xx_spi_chip lms283_chip_info = {
+ .rx_threshold = 1,
+ .tx_threshold = 1,
+ .timeout = 64,
+ .gpio_cs = GPIO88_ZIPITZ2_LCD_CS,
+};
+
+static const struct lms283gf05_pdata lms283_pdata = {
+ .reset_gpio = GPIO19_ZIPITZ2_LCD_RESET,
+};
+
+static struct spi_board_info spi_board_info[] __initdata = {
+{
+ .modalias = "libertas_spi",
+ .platform_data = &z2_lbs_pdata,
+ .controller_data = &z2_lbs_chip_info,
+ .irq = gpio_to_irq(GPIO36_ZIPITZ2_WIFI_IRQ),
+ .max_speed_hz = 13000000,
+ .bus_num = 1,
+ .chip_select = 0,
+},
+{
+ .modalias = "lms283gf05",
+ .controller_data = &lms283_chip_info,
+ .platform_data = &lms283_pdata,
+ .max_speed_hz = 400000,
+ .bus_num = 2,
+ .chip_select = 0,
+},
+};
+
+static struct pxa2xx_spi_master pxa_ssp1_master_info = {
+ .clock_enable = CKEN_SSP,
+ .num_chipselect = 1,
+ .enable_dma = 1,
+};
+
+static struct pxa2xx_spi_master pxa_ssp2_master_info = {
+ .clock_enable = CKEN_SSP2,
+ .num_chipselect = 1,
+};
+
+static void __init z2_spi_init(void)
+{
+ pxa2xx_set_spi_info(1, &pxa_ssp1_master_info);
+ pxa2xx_set_spi_info(2, &pxa_ssp2_master_info);
+ spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+}
+#else
+static inline void z2_spi_init(void) {}
+#endif
+
+/******************************************************************************
+ * Machine init
+ ******************************************************************************/
+static void __init z2_init(void)
+{
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(z2_pin_config));
+
+ z2_lcd_init();
+ z2_mmc_init();
+ z2_mkp_init();
+
+ pxa_set_i2c_info(NULL);
+
+ z2_spi_init();
+ z2_nor_init();
+ z2_pwm_init();
+ z2_leds_init();
+ z2_keys_init();
+}
+
+MACHINE_START(ZIPIT2, "Zipit Z2")
+ .phys_io = 0x40000000,
+ .boot_params = 0xa0000100,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .map_io = pxa_map_io,
+ .init_irq = pxa27x_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = z2_init,
+MACHINE_END
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 39896d883584..03b9cb910e08 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -414,15 +414,13 @@ static int zeus_mcp2515_transceiver_enable(int enable)
static struct mcp251x_platform_data zeus_mcp2515_pdata = {
.oscillator_frequency = 16*1000*1000,
- .model = CAN_MCP251X_MCP2515,
.board_specific_setup = zeus_mcp2515_setup,
- .transceiver_enable = zeus_mcp2515_transceiver_enable,
.power_enable = zeus_mcp2515_transceiver_enable,
};
static struct spi_board_info zeus_spi_board_info[] = {
[0] = {
- .modalias = "mcp251x",
+ .modalias = "mcp2515",
.platform_data = &zeus_mcp2515_pdata,
.irq = gpio_to_irq(ZEUS_CAN_GPIO),
.max_speed_hz = 1*1000*1000,
@@ -644,7 +642,7 @@ static struct pxafb_mach_info zeus_fb_info = {
static struct pxamci_platform_data zeus_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .detect_delay = HZ/4,
+ .detect_delay_ms = 250,
.gpio_card_detect = ZEUS_MMC_CD_GPIO,
.gpio_card_ro = ZEUS_MMC_WP_GPIO,
.gpio_card_ro_invert = 1,
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 2b4043c04d0c..c479cbecf784 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -218,7 +218,7 @@ static inline void zylonite_init_lcd(void) {}
#if defined(CONFIG_MMC)
static struct pxamci_platform_data zylonite_mci_platform_data = {
- .detect_delay = 20,
+ .detect_delay_ms= 200,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.gpio_card_detect = EXT_GPIO(0),
.gpio_card_ro = EXT_GPIO(2),
@@ -226,7 +226,7 @@ static struct pxamci_platform_data zylonite_mci_platform_data = {
};
static struct pxamci_platform_data zylonite_mci2_platform_data = {
- .detect_delay = 20,
+ .detect_delay_ms= 200,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.gpio_card_detect = EXT_GPIO(1),
.gpio_card_ro = EXT_GPIO(3),
@@ -234,7 +234,7 @@ static struct pxamci_platform_data zylonite_mci2_platform_data = {
};
static struct pxamci_platform_data zylonite_mci3_platform_data = {
- .detect_delay = 20,
+ .detect_delay_ms= 200,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.gpio_card_detect = EXT_GPIO(30),
.gpio_card_ro = EXT_GPIO(31),
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile
index e704edb733c0..a01b76b7c956 100644
--- a/arch/arm/mach-realview/Makefile
+++ b/arch/arm/mach-realview/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y := core.o clock.o
+obj-y := core.o
obj-$(CONFIG_MACH_REALVIEW_EB) += realview_eb.o
obj-$(CONFIG_MACH_REALVIEW_PB11MP) += realview_pb11mp.o
obj-$(CONFIG_MACH_REALVIEW_PB1176) += realview_pb1176.o
diff --git a/arch/arm/mach-realview/clock.c b/arch/arm/mach-realview/clock.c
deleted file mode 100644
index a7043115de72..000000000000
--- a/arch/arm/mach-realview/clock.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/clock.c
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-
-#include <asm/hardware/icst307.h>
-
-#include "clock.h"
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- struct icst307_vco vco;
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- return icst307_khz(clk->params, vco) * 1000;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- int ret = -EIO;
-
- if (clk->setvco) {
- struct icst307_vco vco;
-
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- clk->rate = icst307_khz(clk->params, vco) * 1000;
- clk->setvco(clk, vco);
- ret = 0;
- }
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-realview/clock.h b/arch/arm/mach-realview/clock.h
deleted file mode 100644
index ebbb0f06b600..000000000000
--- a/arch/arm/mach-realview/clock.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * linux/arch/arm/mach-realview/clock.h
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-struct module;
-struct icst307_params;
-
-struct clk {
- unsigned long rate;
- const struct icst307_params *params;
- void *data;
- void (*setvco)(struct clk *, struct icst307_vco vco);
-};
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index d5a95738f85b..595be19f8ad5 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/ata_platform.h>
@@ -40,7 +38,7 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/hardware/arm_timer.h>
-#include <asm/hardware/icst307.h>
+#include <asm/hardware/icst.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
@@ -49,13 +47,12 @@
#include <asm/hardware/gic.h>
+#include <mach/clkdev.h>
#include <mach/platform.h>
#include <mach/irqs.h>
+#include <plat/timer-sp.h>
#include "core.h"
-#include "clock.h"
-
-#define REALVIEW_REFCOUNTER (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_24MHz_OFFSET)
/* used by entry-macro.S and platsmp.c */
void __iomem *gic_cpu_base_addr;
@@ -79,20 +76,6 @@ void __init realview_adjust_zones(int node, unsigned long *size,
}
#endif
-/*
- * This is the RealView sched_clock implementation. This has
- * a resolution of 41.7ns, and a maximum value of about 179s.
- */
-unsigned long long sched_clock(void)
-{
- unsigned long long v;
-
- v = (unsigned long long)readl(REALVIEW_REFCOUNTER) * 125;
- do_div(v, 3);
-
- return v;
-}
-
#define REALVIEW_FLASHCTRL (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_FLASH_OFFSET)
@@ -274,37 +257,40 @@ struct mmci_platform_data realview_mmc1_plat_data = {
/*
* Clock handling
*/
-static const struct icst307_params realview_oscvco_params = {
- .ref = 24000,
- .vco_max = 200000,
+static const struct icst_params realview_oscvco_params = {
+ .ref = 24000000,
+ .vco_max = ICST307_VCO_MAX,
+ .vco_min = ICST307_VCO_MIN,
.vd_min = 4 + 8,
.vd_max = 511 + 8,
.rd_min = 1 + 2,
.rd_max = 127 + 2,
+ .s2div = icst307_s2div,
+ .idx2s = icst307_idx2s,
};
-static void realview_oscvco_set(struct clk *clk, struct icst307_vco vco)
+static void realview_oscvco_set(struct clk *clk, struct icst_vco vco)
{
void __iomem *sys_lock = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_LOCK_OFFSET;
- void __iomem *sys_osc;
u32 val;
- if (machine_is_realview_pb1176())
- sys_osc = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC0_OFFSET;
- else
- sys_osc = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC4_OFFSET;
-
- val = readl(sys_osc) & ~0x7ffff;
+ val = readl(clk->vcoreg) & ~0x7ffff;
val |= vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, sys_lock);
- writel(val, sys_osc);
+ writel(val, clk->vcoreg);
writel(0, sys_lock);
}
+static const struct clk_ops oscvco_clk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = realview_oscvco_set,
+};
+
static struct clk oscvco_clk = {
+ .ops = &oscvco_clk_ops,
.params = &realview_oscvco_params,
- .setvco = realview_oscvco_set,
};
/*
@@ -347,7 +333,13 @@ static struct clk_lookup lookups[] = {
static int __init clk_init(void)
{
+ if (machine_is_realview_pb1176())
+ oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC0_OFFSET;
+ else
+ oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC4_OFFSET;
+
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
return 0;
}
arch_initcall(clk_init);
@@ -644,133 +636,6 @@ void __iomem *timer2_va_base;
void __iomem *timer3_va_base;
/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 8)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV256)
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 4) /* Divide by 16 */
-#define TIMER_DIVISOR (TIMER_CTRL_DIV16)
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TIMER_RELOAD (TIMER_INTERVAL)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV1)
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
-static void timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
-{
- unsigned long ctrl;
-
- switch(mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- writel(TIMER_RELOAD, timer0_va_base + TIMER_LOAD);
-
- ctrl = TIMER_CTRL_PERIODIC;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE | TIMER_CTRL_ENABLE;
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* period set, and timer enabled in 'next_event' hook */
- ctrl = TIMER_CTRL_ONESHOT;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE;
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- ctrl = 0;
- }
-
- writel(ctrl, timer0_va_base + TIMER_CTRL);
-}
-
-static int timer_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long ctrl = readl(timer0_va_base + TIMER_CTRL);
-
- writel(evt, timer0_va_base + TIMER_LOAD);
- writel(ctrl | TIMER_CTRL_ENABLE, timer0_va_base + TIMER_CTRL);
-
- return 0;
-}
-
-static struct clock_event_device timer0_clockevent = {
- .name = "timer0",
- .shift = 32,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = timer_set_mode,
- .set_next_event = timer_set_next_event,
- .rating = 300,
- .cpumask = cpu_all_mask,
-};
-
-static void __init realview_clockevents_init(unsigned int timer_irq)
-{
- timer0_clockevent.irq = timer_irq;
- timer0_clockevent.mult =
- div_sc(1000000, NSEC_PER_SEC, timer0_clockevent.shift);
- timer0_clockevent.max_delta_ns =
- clockevent_delta2ns(0xffffffff, &timer0_clockevent);
- timer0_clockevent.min_delta_ns =
- clockevent_delta2ns(0xf, &timer0_clockevent);
-
- clockevents_register_device(&timer0_clockevent);
-}
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t realview_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &timer0_clockevent;
-
- /* clear the interrupt */
- writel(1, timer0_va_base + TIMER_INTCLR);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction realview_timer_irq = {
- .name = "RealView Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = realview_timer_interrupt,
-};
-
-static cycle_t realview_get_cycles(struct clocksource *cs)
-{
- return ~readl(timer3_va_base + TIMER_VALUE);
-}
-
-static struct clocksource clocksource_realview = {
- .name = "timer3",
- .rating = 200,
- .read = realview_get_cycles,
- .mask = CLOCKSOURCE_MASK(32),
- .shift = 20,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void __init realview_clocksource_init(void)
-{
- /* setup timer 0 as free-running clocksource */
- writel(0, timer3_va_base + TIMER_CTRL);
- writel(0xffffffff, timer3_va_base + TIMER_LOAD);
- writel(0xffffffff, timer3_va_base + TIMER_VALUE);
- writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
- timer3_va_base + TIMER_CTRL);
-
- clocksource_realview.mult =
- clocksource_khz2mult(1000, clocksource_realview.shift);
- clocksource_register(&clocksource_realview);
-}
-
-/*
* Set up the clock source and clock events devices
*/
void __init realview_timer_init(unsigned int timer_irq)
@@ -797,13 +662,8 @@ void __init realview_timer_init(unsigned int timer_irq)
writel(0, timer2_va_base + TIMER_CTRL);
writel(0, timer3_va_base + TIMER_CTRL);
- /*
- * Make irqs happen for the system timer
- */
- setup_irq(timer_irq, &realview_timer_irq);
-
- realview_clocksource_init();
- realview_clockevents_init(timer_irq);
+ sp804_clocksource_init(timer3_va_base);
+ sp804_clockevents_init(timer0_va_base, timer_irq);
}
/*
diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c
index be048e3e8799..f95521a5e5ce 100644
--- a/arch/arm/mach-realview/hotplug.c
+++ b/arch/arm/mach-realview/hotplug.c
@@ -131,7 +131,7 @@ void platform_cpu_die(unsigned int cpu)
cpu_leave_lowpower();
}
-int mach_cpu_disable(unsigned int cpu)
+int platform_cpu_disable(unsigned int cpu)
{
/*
* we don't allow CPU 0 to be shutdown (it is still too special
diff --git a/arch/arm/mach-realview/include/mach/clkdev.h b/arch/arm/mach-realview/include/mach/clkdev.h
index 04b37a89801c..e58d0771b64e 100644
--- a/arch/arm/mach-realview/include/mach/clkdev.h
+++ b/arch/arm/mach-realview/include/mach/clkdev.h
@@ -1,6 +1,15 @@
#ifndef __ASM_MACH_CLKDEV_H
#define __ASM_MACH_CLKDEV_H
+#include <plat/clock.h>
+
+struct clk {
+ unsigned long rate;
+ const struct clk_ops *ops;
+ const struct icst_params *params;
+ void __iomem *vcoreg;
+};
+
#define __clk_get(clk) ({ 1; })
#define __clk_put(clk) do { } while (0)
diff --git a/arch/arm/mach-realview/include/mach/irqs-pb1176.h b/arch/arm/mach-realview/include/mach/irqs-pb1176.h
index 2410d4f8ddd3..830055bb8628 100644
--- a/arch/arm/mach-realview/include/mach/irqs-pb1176.h
+++ b/arch/arm/mach-realview/include/mach/irqs-pb1176.h
@@ -31,6 +31,7 @@
#define IRQ_DC1176_SOFTINT (IRQ_DC1176_GIC_START + 1) /* Software interrupt */
#define IRQ_DC1176_COMMRx (IRQ_DC1176_GIC_START + 2) /* Debug Comm Rx interrupt */
#define IRQ_DC1176_COMMTx (IRQ_DC1176_GIC_START + 3) /* Debug Comm Tx interrupt */
+#define IRQ_DC1176_CORE_PMU (IRQ_DC1176_GIC_START + 7) /* Core PMU interrupt */
#define IRQ_DC1176_TIMER0 (IRQ_DC1176_GIC_START + 8) /* Timer 0 */
#define IRQ_DC1176_TIMER1 (IRQ_DC1176_GIC_START + 9) /* Timer 1 */
#define IRQ_DC1176_TIMER2 (IRQ_DC1176_GIC_START + 10) /* Timer 2 */
diff --git a/arch/arm/mach-realview/include/mach/irqs-pba8.h b/arch/arm/mach-realview/include/mach/irqs-pba8.h
index 86792a9f2ab6..4a88a4edb651 100644
--- a/arch/arm/mach-realview/include/mach/irqs-pba8.h
+++ b/arch/arm/mach-realview/include/mach/irqs-pba8.h
@@ -23,12 +23,6 @@
#define IRQ_PBA8_GIC_START 32
-/* L220
-#define IRQ_PBA8_L220_EVENT (IRQ_PBA8_GIC_START + 29)
-#define IRQ_PBA8_L220_SLAVE (IRQ_PBA8_GIC_START + 30)
-#define IRQ_PBA8_L220_DECODE (IRQ_PBA8_GIC_START + 31)
-*/
-
/*
* PB-A8 on-board gic irq sources
*/
@@ -65,6 +59,8 @@
#define IRQ_PBA8_TSPEN (IRQ_PBA8_GIC_START + 30) /* Touchscreen pen */
#define IRQ_PBA8_TSKPAD (IRQ_PBA8_GIC_START + 31) /* Touchscreen keypad */
+#define IRQ_PBA8_PMU (IRQ_PBA8_GIC_START + 47) /* Cortex-A8 PMU */
+
/* ... */
#define IRQ_PBA8_PCI0 (IRQ_PBA8_GIC_START + 50)
#define IRQ_PBA8_PCI1 (IRQ_PBA8_GIC_START + 51)
diff --git a/arch/arm/mach-realview/include/mach/irqs-pbx.h b/arch/arm/mach-realview/include/mach/irqs-pbx.h
index deaad4302b17..206a3001f46b 100644
--- a/arch/arm/mach-realview/include/mach/irqs-pbx.h
+++ b/arch/arm/mach-realview/include/mach/irqs-pbx.h
@@ -22,12 +22,6 @@
#define IRQ_PBX_GIC_START 32
-/* L220
-#define IRQ_PBX_L220_EVENT (IRQ_PBX_GIC_START + 29)
-#define IRQ_PBX_L220_SLAVE (IRQ_PBX_GIC_START + 30)
-#define IRQ_PBX_L220_DECODE (IRQ_PBX_GIC_START + 31)
-*/
-
/*
* PBX on-board gic irq sources
*/
@@ -77,10 +71,10 @@
#define IRQ_PBX_TIMER4_5 (IRQ_PBX_GIC_START + 41) /* Timer 0/1 (default timer) */
#define IRQ_PBX_TIMER6_7 (IRQ_PBX_GIC_START + 42) /* Timer 2/3 */
/* ... */
-#define IRQ_PBX_PMU_CPU3 (IRQ_PBX_GIC_START + 44) /* CPU PMU Interrupts */
-#define IRQ_PBX_PMU_CPU2 (IRQ_PBX_GIC_START + 45)
-#define IRQ_PBX_PMU_CPU1 (IRQ_PBX_GIC_START + 46)
-#define IRQ_PBX_PMU_CPU0 (IRQ_PBX_GIC_START + 47)
+#define IRQ_PBX_PMU_CPU0 (IRQ_PBX_GIC_START + 44) /* CPU PMU Interrupts */
+#define IRQ_PBX_PMU_CPU1 (IRQ_PBX_GIC_START + 45)
+#define IRQ_PBX_PMU_CPU2 (IRQ_PBX_GIC_START + 46)
+#define IRQ_PBX_PMU_CPU3 (IRQ_PBX_GIC_START + 47)
/* ... */
#define IRQ_PBX_PCI0 (IRQ_PBX_GIC_START + 50)
diff --git a/arch/arm/mach-realview/include/mach/platform.h b/arch/arm/mach-realview/include/mach/platform.h
index 86c0c4435a46..1b77a27badaf 100644
--- a/arch/arm/mach-realview/include/mach/platform.h
+++ b/arch/arm/mach-realview/include/mach/platform.h
@@ -231,12 +231,6 @@
#define REALVIEW_INTREG_OFFSET 0x8 /* Interrupt control */
#define REALVIEW_DECODE_OFFSET 0xC /* Fitted logic modules */
-/*
- * Clean base - dummy
- *
- */
-#define CLEAN_BASE REALVIEW_BOOT_ROM_HI
-
/*
* System controller bit assignment
*/
@@ -249,20 +243,6 @@
#define REALVIEW_TIMER4_EnSel 21
-#define MAX_TIMER 2
-#define MAX_PERIOD 699050
-#define TICKS_PER_uSEC 1
-
-/*
- * These are useconds NOT ticks.
- *
- */
-#define mSEC_1 1000
-#define mSEC_5 (mSEC_1 * 5)
-#define mSEC_10 (mSEC_1 * 10)
-#define mSEC_25 (mSEC_1 * 25)
-#define SEC_1 (mSEC_1 * 1000)
-
#define REALVIEW_CSR_BASE 0x10000000
#define REALVIEW_CSR_SIZE 0x10000000
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 7d857d300558..422ccd70d5f5 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -31,8 +31,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/localtimer.h>
@@ -44,7 +44,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_eb_io_desc[] __initdata = {
{
@@ -294,6 +293,36 @@ static struct resource realview_eb_isp1761_resources[] = {
},
};
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_EB11MP_PMU_CPU0,
+ .end = IRQ_EB11MP_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_EB11MP_PMU_CPU1,
+ .end = IRQ_EB11MP_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_EB11MP_PMU_CPU2,
+ .end = IRQ_EB11MP_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_EB11MP_PMU_CPU3,
+ .end = IRQ_EB11MP_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
static void __init gic_init_irq(void)
{
if (core_tile_eb11mp() || core_tile_a9mp()) {
@@ -407,6 +436,7 @@ static void __init realview_eb_init(void)
* Bits: .... ...0 0111 1001 0000 .... .... .... */
l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
#endif
+ platform_device_register(&pmu_device);
}
realview_flash_register(&realview_eb_flash_resource, 1);
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index 44392e51dd50..96568ebfa2bb 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -31,8 +31,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
@@ -44,7 +44,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_pb1176_io_desc[] __initdata = {
{
@@ -263,6 +262,19 @@ static struct resource realview_pb1176_isp1761_resources[] = {
},
};
+static struct resource pmu_resource = {
+ .start = IRQ_DC1176_CORE_PMU,
+ .end = IRQ_DC1176_CORE_PMU,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = 1,
+ .resource = &pmu_resource,
+};
+
static void __init gic_init_irq(void)
{
/* ARM1176 DevChip GIC, primary */
@@ -324,6 +336,7 @@ static void __init realview_pb1176_init(void)
realview_eth_register(NULL, realview_pb1176_smsc911x_resources);
platform_device_register(&realview_i2c_device);
realview_usb_register(realview_pb1176_isp1761_resources);
+ platform_device_register(&pmu_device);
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index 3e02731af959..7fbefbbebaf0 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -31,8 +31,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/localtimer.h>
@@ -45,7 +45,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_pb11mp_io_desc[] __initdata = {
{
@@ -260,6 +259,36 @@ static struct resource realview_pb11mp_isp1761_resources[] = {
},
};
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_TC11MP_PMU_CPU0,
+ .end = IRQ_TC11MP_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_TC11MP_PMU_CPU1,
+ .end = IRQ_TC11MP_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_TC11MP_PMU_CPU2,
+ .end = IRQ_TC11MP_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_TC11MP_PMU_CPU3,
+ .end = IRQ_TC11MP_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
static void __init gic_init_irq(void)
{
unsigned int pldctrl;
@@ -329,6 +358,7 @@ static void __init realview_pb11mp_init(void)
platform_device_register(&realview_i2c_device);
platform_device_register(&realview_cf_device);
realview_usb_register(realview_pb11mp_isp1761_resources);
+ platform_device_register(&pmu_device);
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index fe4e25c4201a..d3c113b3dfce 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -30,8 +30,8 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/icst307.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -42,7 +42,6 @@
#include <mach/irqs.h>
#include "core.h"
-#include "clock.h"
static struct map_desc realview_pba8_io_desc[] __initdata = {
{
@@ -250,6 +249,19 @@ static struct resource realview_pba8_isp1761_resources[] = {
},
};
+static struct resource pmu_resource = {
+ .start = IRQ_PBA8_PMU,
+ .end = IRQ_PBA8_PMU,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = 1,
+ .resource = &pmu_resource,
+};
+
static void __init gic_init_irq(void)
{
/* ARM PB-A8 on-board GIC */
@@ -296,6 +308,7 @@ static void __init realview_pba8_init(void)
platform_device_register(&realview_i2c_device);
platform_device_register(&realview_cf_device);
realview_usb_register(realview_pba8_isp1761_resources);
+ platform_device_register(&pmu_device);
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index d94857eb0690..a235ba30996b 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -29,6 +29,7 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/pmu.h>
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
@@ -270,6 +271,36 @@ static struct resource realview_pbx_isp1761_resources[] = {
},
};
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_PBX_PMU_CPU0,
+ .end = IRQ_PBX_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_PBX_PMU_CPU1,
+ .end = IRQ_PBX_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_PBX_PMU_CPU2,
+ .end = IRQ_PBX_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_PBX_PMU_CPU3,
+ .end = IRQ_PBX_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
static void __init gic_init_irq(void)
{
/* ARM PBX on-board GIC */
@@ -354,6 +385,7 @@ static void __init realview_pbx_init(void)
/* 16KB way size, 8-way associativity, parity disabled
* Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
l2x0_init(l2x0_base, 0x02520000, 0xc0000fff);
+ platform_device_register(&pmu_device);
}
#endif
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index 554731868b07..7245a55795dc 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -6,6 +6,7 @@ config CPU_S3C2410
bool
depends on ARCH_S3C2410
select CPU_ARM920T
+ select S3C_GPIO_PULL_UP
select S3C2410_CLOCK
select S3C2410_GPIO
select CPU_LLSERIAL_S3C2410
@@ -76,6 +77,7 @@ config ARCH_H1940
select PM_H1940 if PM
select S3C_DEV_USB_HOST
select S3C_DEV_NAND
+ select S3C2410_SETUP_TS
help
Say Y here if you are using the HP IPAQ H1940
@@ -95,12 +97,19 @@ config PM_H1940
config MACH_N30
bool "Acer N30 family"
select CPU_S3C2410
+ select MACH_N35
select S3C_DEV_USB_HOST
select S3C_DEV_NAND
help
Say Y here if you want suppt for the Acer N30, Acer N35,
Navman PiN570, Yakumo AlphaX or Airis NC05 PDAs.
+config MACH_N35
+ bool
+ help
+ Internal node in order to enable support for Acer N35 if Acer N30 is
+ selected.
+
config ARCH_BAST
bool "Simtec Electronics BAST (EB2410ITX)"
select CPU_S3C2410
@@ -110,6 +119,7 @@ config ARCH_BAST
select MACH_BAST_IDE
select S3C24XX_DCLK
select ISA
+ select S3C_DEV_HWMON
select S3C_DEV_USB_HOST
select S3C_DEV_NAND
help
diff --git a/arch/arm/mach-s3c2410/Makefile.boot b/arch/arm/mach-s3c2410/Makefile.boot
index 7dab2a0325b5..58c1dd7f8e1d 100644
--- a/arch/arm/mach-s3c2410/Makefile.boot
+++ b/arch/arm/mach-s3c2410/Makefile.boot
@@ -1,3 +1,7 @@
- zreladdr-y := 0x30008000
-params_phys-y := 0x30000100
-
+ifeq ($(CONFIG_PM_H1940),y)
+ zreladdr-y := 0x30108000
+ params_phys-y := 0x30100100
+else
+ zreladdr-y := 0x30008000
+ params_phys-y := 0x30000100
+endif
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c
index a3f3c7b1ca38..8cdeb14af592 100644
--- a/arch/arm/mach-s3c2410/h1940-bluetooth.c
+++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c
@@ -33,14 +33,15 @@ static void h1940bt_enable(int on)
h1940_latch_control(0, H1940_LATCH_BLUETOOTH_POWER);
/* Reset the chip */
mdelay(10);
- s3c2410_gpio_setpin(S3C2410_GPH(1), 1);
+
+ gpio_set_value(S3C2410_GPH(1), 1);
mdelay(10);
- s3c2410_gpio_setpin(S3C2410_GPH(1), 0);
+ gpio_set_value(S3C2410_GPH(1), 0);
}
else {
- s3c2410_gpio_setpin(S3C2410_GPH(1), 1);
+ gpio_set_value(S3C2410_GPH(1), 1);
mdelay(10);
- s3c2410_gpio_setpin(S3C2410_GPH(1), 0);
+ gpio_set_value(S3C2410_GPH(1), 0);
mdelay(10);
h1940_latch_control(H1940_LATCH_BLUETOOTH_POWER, 0);
}
@@ -61,15 +62,21 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
struct rfkill *rfk;
int ret = 0;
+ ret = gpio_request(S3C2410_GPH(1), dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev, "could not get GPH1\n");\
+ return ret;
+ }
+
/* Configures BT serial port GPIOs */
- s3c2410_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0);
- s3c2410_gpio_pullup(S3C2410_GPH(0), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_pullup(S3C2410_GPH(1), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0);
- s3c2410_gpio_pullup(S3C2410_GPH(2), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
- s3c2410_gpio_pullup(S3C2410_GPH(3), 1);
+ s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0);
+ s3c_gpio_cfgpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0);
+ s3c_gpio_cfgpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
+ s3c_gpio_cfgpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
@@ -100,6 +107,7 @@ static int h1940bt_remove(struct platform_device *pdev)
struct rfkill *rfk = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
+ gpio_free(S3C2410_GPH(1));
if (rfk) {
rfkill_unregister(rfk);
diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h
index 08ac5f96c012..cf68136cc668 100644
--- a/arch/arm/mach-s3c2410/include/mach/dma.h
+++ b/arch/arm/mach-s3c2410/include/mach/dma.h
@@ -54,7 +54,7 @@ enum dma_ch {
#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
/* we have 4 dma channels */
-#ifndef CONFIG_CPU_S3C2443
+#if !defined(CONFIG_CPU_S3C2443) && !defined(CONFIG_CPU_S3C2416)
#define S3C_DMA_CHANNELS (4)
#else
#define S3C_DMA_CHANNELS (6)
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-fns.h b/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
index 035a493952db..f453c4f2cb8e 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
@@ -10,14 +10,28 @@
* published by the Free Software Foundation.
*/
+#ifndef __MACH_GPIO_FNS_H
+#define __MACH_GPIO_FNS_H __FILE__
+
/* These functions are in the to-be-removed category and it is strongly
* encouraged not to use these in new code. They will be marked deprecated
* very soon.
*
* Most of the functionality can be either replaced by the gpiocfg calls
* for the s3c platform or by the generic GPIOlib API.
+ *
+ * As of 2.6.35-rc, these will be removed, with the few drivers using them
+ * either replaced or given a wrapper until the calls can be removed.
*/
+#include <plat/gpio-cfg.h>
+
+static inline void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int cfg)
+{
+ /* 1:1 mapping between cfgpin and setcfg calls at the moment */
+ s3c_gpio_cfgpin(pin, cfg);
+}
+
/* external functions for GPIO support
*
* These allow various different clients to access the same GPIO
@@ -25,17 +39,6 @@
* GPIO register, then it is safe to ioremap/__raw_{read|write} to it.
*/
-/* s3c2410_gpio_cfgpin
- *
- * set the configuration of the given pin to the value passed.
- *
- * eg:
- * s3c2410_gpio_cfgpin(S3C2410_GPA(0), S3C2410_GPA0_ADDR0);
- * s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
-*/
-
-extern void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function);
-
extern unsigned int s3c2410_gpio_getcfg(unsigned int pin);
/* s3c2410_gpio_getirq
@@ -73,6 +76,14 @@ extern int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on,
/* s3c2410_gpio_pullup
*
+ * This call should be replaced with s3c_gpio_setpull().
+ *
+ * As a note, there is currently no distinction between pull-up and pull-down
+ * in the s3c24xx series devices with only an on/off configuration.
+ */
+
+/* s3c2410_gpio_pullup
+ *
* configure the pull-up control on the given pin
*
* to = 1 => disable the pull-up
@@ -86,18 +97,8 @@ extern int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on,
extern void s3c2410_gpio_pullup(unsigned int pin, unsigned int to);
-/* s3c2410_gpio_getpull
- *
- * Read the state of the pull-up on a given pin
- *
- * return:
- * < 0 => error code
- * 0 => enabled
- * 1 => disabled
-*/
-
-extern int s3c2410_gpio_getpull(unsigned int pin);
-
extern void s3c2410_gpio_setpin(unsigned int pin, unsigned int to);
extern unsigned int s3c2410_gpio_getpin(unsigned int pin);
+
+#endif /* __MACH_GPIO_FNS_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h b/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h
index 2edbb9c88ab3..4f7bf3272e87 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio-nrs.h
@@ -16,15 +16,28 @@
#define S3C2410_GPIONO(bank,offset) ((bank) + (offset))
-#define S3C2410_GPIO_BANKA (32*0)
-#define S3C2410_GPIO_BANKB (32*1)
-#define S3C2410_GPIO_BANKC (32*2)
-#define S3C2410_GPIO_BANKD (32*3)
-#define S3C2410_GPIO_BANKE (32*4)
-#define S3C2410_GPIO_BANKF (32*5)
#define S3C2410_GPIO_BANKG (32*6)
#define S3C2410_GPIO_BANKH (32*7)
+/* GPIO sizes for various SoCs:
+ *
+ * 2442
+ * 2410 2412 2440 2443 2416
+ * ---- ---- ---- ---- ----
+ * A 23 22 25 16 25
+ * B 11 11 11 11 9
+ * C 16 15 16 16 16
+ * D 16 16 16 16 16
+ * E 16 16 16 16 16
+ * F 8 8 8 8 8
+ * G 16 16 16 16 8
+ * H 11 11 9 15 15
+ * J -- -- 13 16 --
+ * K -- -- -- -- 16
+ * L -- -- -- 15 7
+ * M -- -- -- 2 2
+ */
+
/* GPIO bank sizes */
#define S3C2410_GPIO_A_NR (32)
#define S3C2410_GPIO_B_NR (32)
@@ -34,6 +47,10 @@
#define S3C2410_GPIO_F_NR (32)
#define S3C2410_GPIO_G_NR (32)
#define S3C2410_GPIO_H_NR (32)
+#define S3C2410_GPIO_J_NR (32) /* technically 16. */
+#define S3C2410_GPIO_K_NR (32) /* technically 16. */
+#define S3C2410_GPIO_L_NR (32) /* technically 15. */
+#define S3C2410_GPIO_M_NR (32) /* technically 2. */
#if CONFIG_S3C_GPIO_SPACE != 0
#error CONFIG_S3C_GPIO_SPACE cannot be zero at the moment
@@ -53,6 +70,10 @@ enum s3c_gpio_number {
S3C2410_GPIO_F_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_E),
S3C2410_GPIO_G_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_F),
S3C2410_GPIO_H_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_G),
+ S3C2410_GPIO_J_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_H),
+ S3C2410_GPIO_K_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_J),
+ S3C2410_GPIO_L_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_K),
+ S3C2410_GPIO_M_START = S3C2410_GPIO_NEXT(S3C2410_GPIO_L),
};
#endif /* __ASSEMBLY__ */
@@ -67,6 +88,10 @@ enum s3c_gpio_number {
#define S3C2410_GPF(_nr) (S3C2410_GPIO_F_START + (_nr))
#define S3C2410_GPG(_nr) (S3C2410_GPIO_G_START + (_nr))
#define S3C2410_GPH(_nr) (S3C2410_GPIO_H_START + (_nr))
+#define S3C2410_GPJ(_nr) (S3C2410_GPIO_J_START + (_nr))
+#define S3C2410_GPK(_nr) (S3C2410_GPIO_K_START + (_nr))
+#define S3C2410_GPL(_nr) (S3C2410_GPIO_L_START + (_nr))
+#define S3C2410_GPM(_nr) (S3C2410_GPIO_M_START + (_nr))
/* compatibility until drivers can be modified */
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-track.h b/arch/arm/mach-s3c2410/include/mach/gpio-track.h
index acb259103808..d67819dde42a 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio-track.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio-track.h
@@ -23,11 +23,11 @@ static inline struct s3c_gpio_chip *s3c_gpiolib_getchip(unsigned int pin)
{
struct s3c_gpio_chip *chip;
- if (pin > S3C2410_GPG(10))
+ if (pin > S3C_GPIO_END)
return NULL;
chip = &s3c24xx_gpios[pin/32];
- return (S3C2410_GPIO_OFFSET(pin) < chip->chip.ngpio) ? chip : NULL;
+ return ((pin - chip->chip.base) < chip->chip.ngpio) ? chip : NULL;
}
#endif /* __ASM_ARCH_GPIO_CORE_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio.h b/arch/arm/mach-s3c2410/include/mach/gpio.h
index 15f0b3e7ce69..b649bf2ccd5c 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio.h
@@ -20,10 +20,18 @@
* devices that need GPIO.
*/
+#ifdef CONFIG_CPU_S3C244X
+#define ARCH_NR_GPIOS (32 * 9 + CONFIG_S3C24XX_GPIO_EXTRA)
+#else
#define ARCH_NR_GPIOS (256 + CONFIG_S3C24XX_GPIO_EXTRA)
+#endif
#include <asm-generic/gpio.h>
#include <mach/gpio-nrs.h>
#include <mach/gpio-fns.h>
+#ifdef CONFIG_CPU_S3C24XX
+#define S3C_GPIO_END (S3C2410_GPIO_BANKJ + 32)
+#else
#define S3C_GPIO_END (S3C2410_GPIO_BANKH + 32)
+#endif
diff --git a/arch/arm/mach-s3c2410/include/mach/irqs.h b/arch/arm/mach-s3c2410/include/mach/irqs.h
index 6c12c6312ad8..11bb0f08fe6a 100644
--- a/arch/arm/mach-s3c2410/include/mach/irqs.h
+++ b/arch/arm/mach-s3c2410/include/mach/irqs.h
@@ -115,6 +115,26 @@
#define IRQ_S3C2412_SDI S3C2410_IRQSUB(13)
#define IRQ_S3C2412_CF S3C2410_IRQSUB(14)
+
+#define IRQ_S3C2416_EINT8t15 S3C2410_IRQ(5)
+#define IRQ_S3C2416_DMA S3C2410_IRQ(17)
+#define IRQ_S3C2416_UART3 S3C2410_IRQ(18)
+#define IRQ_S3C2416_SDI1 S3C2410_IRQ(20)
+#define IRQ_S3C2416_SDI0 S3C2410_IRQ(21)
+
+#define IRQ_S3C2416_LCD2 S3C2410_IRQSUB(15)
+#define IRQ_S3C2416_LCD3 S3C2410_IRQSUB(16)
+#define IRQ_S3C2416_LCD4 S3C2410_IRQSUB(17)
+#define IRQ_S3C2416_DMA0 S3C2410_IRQSUB(18)
+#define IRQ_S3C2416_DMA1 S3C2410_IRQSUB(19)
+#define IRQ_S3C2416_DMA2 S3C2410_IRQSUB(20)
+#define IRQ_S3C2416_DMA3 S3C2410_IRQSUB(21)
+#define IRQ_S3C2416_DMA4 S3C2410_IRQSUB(22)
+#define IRQ_S3C2416_DMA5 S3C2410_IRQSUB(23)
+#define IRQ_S32416_WDT S3C2410_IRQSUB(27)
+#define IRQ_S32416_AC97 S3C2410_IRQSUB(28)
+
+
/* extra irqs for s3c2440 */
#define IRQ_S3C2440_CAM_C S3C2410_IRQSUB(11) /* S3C2443 too */
@@ -130,7 +150,10 @@
#define IRQ_S3C2443_HSMMC S3C2410_IRQ(20) /* IRQ_SDI */
#define IRQ_S3C2443_NAND S3C2410_IRQ(24) /* reserved */
+#define IRQ_S3C2416_HSMMC0 S3C2410_IRQ(21) /* S3C2416/S3C2450 */
+
#define IRQ_HSMMC0 IRQ_S3C2443_HSMMC
+#define IRQ_HSMMC1 IRQ_S3C2416_HSMMC0
#define IRQ_S3C2443_LCD1 S3C2410_IRQSUB(14)
#define IRQ_S3C2443_LCD2 S3C2410_IRQSUB(15)
@@ -152,7 +175,7 @@
#define IRQ_S3C2443_WDT S3C2410_IRQSUB(27)
#define IRQ_S3C2443_AC97 S3C2410_IRQSUB(28)
-#ifdef CONFIG_CPU_S3C2443
+#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416)
#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
#define NR_IRQS (IRQ_S3C2440_AC97+1)
@@ -164,6 +187,9 @@
#define IRQ_S3CUART_TX3 IRQ_S3C2443_TX3
#define IRQ_S3CUART_ERR3 IRQ_S3C2443_ERR3
+#define IRQ_LCD_VSYNC IRQ_S3C2443_LCD3
+#define IRQ_LCD_SYSTEM IRQ_S3C2443_LCD2
+
#ifdef CONFIG_CPU_S3C2440
#define IRQ_S3C244x_AC97 IRQ_S3C2440_AC97
#else
diff --git a/arch/arm/mach-s3c2410/include/mach/map.h b/arch/arm/mach-s3c2410/include/mach/map.h
index b049e61460b6..cd3983ad4160 100644
--- a/arch/arm/mach-s3c2410/include/mach/map.h
+++ b/arch/arm/mach-s3c2410/include/mach/map.h
@@ -63,9 +63,11 @@
#define S3C2440_PA_AC97 (0x5B000000)
#define S3C2440_SZ_AC97 SZ_1M
-/* S3C2443 High-speed SD/MMC */
+/* S3C2443/S3C2416 High-speed SD/MMC */
#define S3C2443_PA_HSMMC (0x4A800000)
-#define S3C2443_SZ_HSMMC (256)
+#define S3C2416_PA_HSMMC0 (0x4AC00000)
+
+#define S3C2443_PA_FB (0x4C800000)
/* S3C2412 memory and IO controls */
#define S3C2412_PA_SSMC (0x4F000000)
@@ -106,10 +108,13 @@
#define S3C24XX_PA_SDI S3C2410_PA_SDI
#define S3C24XX_PA_NAND S3C2410_PA_NAND
+#define S3C_PA_FB S3C2443_PA_FB
#define S3C_PA_IIC S3C2410_PA_IIC
#define S3C_PA_UART S3C24XX_PA_UART
#define S3C_PA_USBHOST S3C2410_PA_USBHOST
#define S3C_PA_HSMMC0 S3C2443_PA_HSMMC
+#define S3C_PA_HSMMC1 S3C2416_PA_HSMMC0
+#define S3C_PA_WDT S3C2410_PA_WATCHDOG
#define S3C_PA_NAND S3C24XX_PA_NAND
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-clock.h
index 9a0d169be137..3415b60082d7 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-clock.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-clock.h
@@ -161,4 +161,6 @@
#endif /* CONFIG_CPU_S3C2412 | CONFIG_CPU_S3C2413 */
+#define S3C2416_CLKDIV2 S3C2410_CLKREG(0x28)
+
#endif /* __ASM_ARM_REGS_CLOCK */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-dsc.h b/arch/arm/mach-s3c2410/include/mach/regs-dsc.h
index 3c3853cd3cf7..98fd4a05587c 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-dsc.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-dsc.h
@@ -19,6 +19,42 @@
#define S3C2412_DSC1 S3C2410_GPIOREG(0xe0)
#endif
+#if defined(CONFIG_CPU_S3C2416)
+#define S3C2416_DSC0 S3C2410_GPIOREG(0xc0)
+#define S3C2416_DSC1 S3C2410_GPIOREG(0xc4)
+#define S3C2416_DSC2 S3C2410_GPIOREG(0xc8)
+#define S3C2416_DSC3 S3C2410_GPIOREG(0x110)
+
+#define S3C2416_SELECT_DSC0 (0 << 30)
+#define S3C2416_SELECT_DSC1 (1 << 30)
+#define S3C2416_SELECT_DSC2 (2 << 30)
+#define S3C2416_SELECT_DSC3 (3 << 30)
+
+#define S3C2416_DSC_GETSHIFT(x) (x & 30)
+
+#define S3C2416_DSC0_CF (S3C2416_SELECT_DSC0 | 28)
+#define S3C2416_DSC0_CF_5mA (0 << 28)
+#define S3C2416_DSC0_CF_10mA (1 << 28)
+#define S3C2416_DSC0_CF_15mA (2 << 28)
+#define S3C2416_DSC0_CF_21mA (3 << 28)
+#define S3C2416_DSC0_CF_MASK (3 << 28)
+
+#define S3C2416_DSC0_nRBE (S3C2416_SELECT_DSC0 | 26)
+#define S3C2416_DSC0_nRBE_5mA (0 << 26)
+#define S3C2416_DSC0_nRBE_10mA (1 << 26)
+#define S3C2416_DSC0_nRBE_15mA (2 << 26)
+#define S3C2416_DSC0_nRBE_21mA (3 << 26)
+#define S3C2416_DSC0_nRBE_MASK (3 << 26)
+
+#define S3C2416_DSC0_nROE (S3C2416_SELECT_DSC0 | 24)
+#define S3C2416_DSC0_nROE_5mA (0 << 24)
+#define S3C2416_DSC0_nROE_10mA (1 << 24)
+#define S3C2416_DSC0_nROE_15mA (2 << 24)
+#define S3C2416_DSC0_nROE_21mA (3 << 24)
+#define S3C2416_DSC0_nROE_MASK (3 << 24)
+
+#endif
+
#if defined(CONFIG_CPU_S3C244X)
#define S3C2440_DSC0 S3C2410_GPIOREG(0xc4)
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
index fd672f330bf2..a0a89d429296 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
@@ -17,29 +17,11 @@
#include <mach/gpio-nrs.h>
#ifdef CONFIG_CPU_S3C2400
-#define S3C24XX_GPIO_BASE(x) S3C2400_GPIO_BASE(x)
-#define S3C24XX_MISCCR S3C2400_MISCCR
+#define S3C24XX_MISCCR S3C2400_MISCCR
#else
-#define S3C24XX_GPIO_BASE(x) S3C2410_GPIO_BASE(x)
-#define S3C24XX_MISCCR S3C24XX_GPIOREG2(0x80)
+#define S3C24XX_MISCCR S3C24XX_GPIOREG2(0x80)
#endif /* CONFIG_CPU_S3C2400 */
-
-/* S3C2400 doesn't have a 1:1 mapping to S3C2410 gpio base pins */
-
-#define S3C2400_BANKNUM(pin) (((pin) & ~31) / 32)
-#define S3C2400_BASEA2B(pin) ((((pin) & ~31) >> 2))
-#define S3C2400_BASEC2H(pin) ((S3C2400_BANKNUM(pin) * 10) + \
- (2 * (S3C2400_BANKNUM(pin)-2)))
-
-#define S3C2400_GPIO_BASE(pin) (pin < S3C2410_GPIO_BANKC ? \
- S3C2400_BASEA2B(pin)+S3C24XX_VA_GPIO : \
- S3C2400_BASEC2H(pin)+S3C24XX_VA_GPIO)
-
-
-#define S3C2410_GPIO_BASE(pin) ((((pin) & ~31) >> 1) + S3C24XX_VA_GPIO)
-#define S3C2410_GPIO_OFFSET(pin) ((pin) & 31)
-
/* general configuration options */
#define S3C2410_GPIO_LEAVE (0xFFFFFFFF)
@@ -610,35 +592,73 @@
#define S3C2410_GPHUP S3C2410_GPIOREG(0x78)
#define S3C2410_GPH0_nCTS0 (0x02 << 0)
+#define S3C2416_GPH0_TXD0 (0x02 << 0)
#define S3C2410_GPH1_nRTS0 (0x02 << 2)
+#define S3C2416_GPH1_RXD0 (0x02 << 2)
#define S3C2410_GPH2_TXD0 (0x02 << 4)
+#define S3C2416_GPH2_TXD1 (0x02 << 4)
#define S3C2410_GPH3_RXD0 (0x02 << 6)
+#define S3C2416_GPH3_RXD1 (0x02 << 6)
#define S3C2410_GPH4_TXD1 (0x02 << 8)
+#define S3C2416_GPH4_TXD2 (0x02 << 8)
#define S3C2410_GPH5_RXD1 (0x02 << 10)
+#define S3C2416_GPH5_RXD2 (0x02 << 10)
#define S3C2410_GPH6_TXD2 (0x02 << 12)
+#define S3C2416_GPH6_TXD3 (0x02 << 12)
#define S3C2410_GPH6_nRTS1 (0x03 << 12)
+#define S3C2416_GPH6_nRTS2 (0x03 << 12)
#define S3C2410_GPH7_RXD2 (0x02 << 14)
+#define S3C2416_GPH7_RXD3 (0x02 << 14)
#define S3C2410_GPH7_nCTS1 (0x03 << 14)
+#define S3C2416_GPH7_nCTS2 (0x03 << 14)
#define S3C2410_GPH8_UCLK (0x02 << 16)
+#define S3C2416_GPH8_nCTS0 (0x02 << 16)
#define S3C2410_GPH9_CLKOUT0 (0x02 << 18)
#define S3C2442_GPH9_nSPICS0 (0x03 << 18)
+#define S3C2416_GPH9_nRTS0 (0x02 << 18)
#define S3C2410_GPH10_CLKOUT1 (0x02 << 20)
+#define S3C2416_GPH10_nCTS1 (0x02 << 20)
+
+#define S3C2416_GPH11_nRTS1 (0x02 << 22)
+
+#define S3C2416_GPH12_EXTUARTCLK (0x02 << 24)
+
+#define S3C2416_GPH13_CLKOUT0 (0x02 << 26)
+
+#define S3C2416_GPH14_CLKOUT1 (0x02 << 28)
/* The S3C2412 and S3C2413 move the GPJ register set to after
* GPH, which means all registers after 0x80 are now offset by 0x10
* for the 2412/2413 from the 2410/2440/2442
*/
+/* S3C2443 and above */
+#define S3C2440_GPJCON S3C2410_GPIOREG(0xD0)
+#define S3C2440_GPJDAT S3C2410_GPIOREG(0xD4)
+#define S3C2440_GPJUP S3C2410_GPIOREG(0xD8)
+
+#define S3C2443_GPKCON S3C2410_GPIOREG(0xE0)
+#define S3C2443_GPKDAT S3C2410_GPIOREG(0xE4)
+#define S3C2443_GPKUP S3C2410_GPIOREG(0xE8)
+
+#define S3C2443_GPLCON S3C2410_GPIOREG(0xF0)
+#define S3C2443_GPLDAT S3C2410_GPIOREG(0xF4)
+#define S3C2443_GPLUP S3C2410_GPIOREG(0xF8)
+
+#define S3C2443_GPMCON S3C2410_GPIOREG(0x100)
+#define S3C2443_GPMDAT S3C2410_GPIOREG(0x104)
+#define S3C2443_GPMUP S3C2410_GPIOREG(0x108)
+
/* miscellaneous control */
#define S3C2400_MISCCR S3C2410_GPIOREG(0x54)
#define S3C2410_MISCCR S3C2410_GPIOREG(0x80)
@@ -686,6 +706,7 @@
#define S3C2412_MISCCR_CLK1_CLKsrc (0<<8)
#define S3C2410_MISCCR_USBSUSPND0 (1<<12)
+#define S3C2416_MISCCR_SEL_SUSPND (1<<12)
#define S3C2410_MISCCR_USBSUSPND1 (1<<13)
#define S3C2410_MISCCR_nRSTCON (1<<16)
@@ -695,6 +716,9 @@
#define S3C2410_MISCCR_nEN_SCLKE (1<<19) /* not 2412 */
#define S3C2410_MISCCR_SDSLEEP (7<<17)
+#define S3C2416_MISCCR_FLT_I2C (1<<24)
+#define S3C2416_MISCCR_HSSPI_EN2 (1<<31)
+
/* external interrupt control... */
/* S3C2410_EXTINT0 -> irq sense control for EINT0..EINT7
* S3C2410_EXTINT1 -> irq sense control for EINT8..EINT15
@@ -762,8 +786,11 @@
#define S3C2410_GSTATUS1_IDMASK (0xffff0000)
#define S3C2410_GSTATUS1_2410 (0x32410000)
#define S3C2410_GSTATUS1_2412 (0x32412001)
+#define S3C2410_GSTATUS1_2416 (0x32416003)
#define S3C2410_GSTATUS1_2440 (0x32440000)
#define S3C2410_GSTATUS1_2442 (0x32440aaa)
+/* some 2416 CPUs report this value also */
+#define S3C2410_GSTATUS1_2450 (0x32450003)
#define S3C2410_GSTATUS2_WTRESET (1<<2)
#define S3C2410_GSTATUS2_OFFRESET (1<<1)
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h b/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h
index 1202ca5e99f6..19575e061114 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-gpioj.h
@@ -22,85 +22,49 @@
* pull up works like all other ports.
*/
-#define S3C2440_GPIO_BANKJ (416)
-
-#define S3C2440_GPJCON S3C2410_GPIOREG(0xd0)
-#define S3C2440_GPJDAT S3C2410_GPIOREG(0xd4)
-#define S3C2440_GPJUP S3C2410_GPIOREG(0xd8)
-
#define S3C2413_GPJCON S3C2410_GPIOREG(0x80)
#define S3C2413_GPJDAT S3C2410_GPIOREG(0x84)
#define S3C2413_GPJUP S3C2410_GPIOREG(0x88)
#define S3C2413_GPJSLPCON S3C2410_GPIOREG(0x8C)
-#define S3C2440_GPJ0 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 0)
-#define S3C2440_GPJ0_INP (0x00 << 0)
#define S3C2440_GPJ0_OUTP (0x01 << 0)
#define S3C2440_GPJ0_CAMDATA0 (0x02 << 0)
-#define S3C2440_GPJ1 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 1)
-#define S3C2440_GPJ1_INP (0x00 << 2)
#define S3C2440_GPJ1_OUTP (0x01 << 2)
#define S3C2440_GPJ1_CAMDATA1 (0x02 << 2)
-#define S3C2440_GPJ2 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 2)
-#define S3C2440_GPJ2_INP (0x00 << 4)
#define S3C2440_GPJ2_OUTP (0x01 << 4)
#define S3C2440_GPJ2_CAMDATA2 (0x02 << 4)
-#define S3C2440_GPJ3 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 3)
-#define S3C2440_GPJ3_INP (0x00 << 6)
#define S3C2440_GPJ3_OUTP (0x01 << 6)
#define S3C2440_GPJ3_CAMDATA3 (0x02 << 6)
-#define S3C2440_GPJ4 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 4)
-#define S3C2440_GPJ4_INP (0x00 << 8)
#define S3C2440_GPJ4_OUTP (0x01 << 8)
#define S3C2440_GPJ4_CAMDATA4 (0x02 << 8)
-#define S3C2440_GPJ5 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 5)
-#define S3C2440_GPJ5_INP (0x00 << 10)
#define S3C2440_GPJ5_OUTP (0x01 << 10)
#define S3C2440_GPJ5_CAMDATA5 (0x02 << 10)
-#define S3C2440_GPJ6 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 6)
-#define S3C2440_GPJ6_INP (0x00 << 12)
#define S3C2440_GPJ6_OUTP (0x01 << 12)
#define S3C2440_GPJ6_CAMDATA6 (0x02 << 12)
-#define S3C2440_GPJ7 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 7)
-#define S3C2440_GPJ7_INP (0x00 << 14)
#define S3C2440_GPJ7_OUTP (0x01 << 14)
#define S3C2440_GPJ7_CAMDATA7 (0x02 << 14)
-#define S3C2440_GPJ8 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 8)
-#define S3C2440_GPJ8_INP (0x00 << 16)
#define S3C2440_GPJ8_OUTP (0x01 << 16)
#define S3C2440_GPJ8_CAMPCLK (0x02 << 16)
-#define S3C2440_GPJ9 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 9)
-#define S3C2440_GPJ9_INP (0x00 << 18)
#define S3C2440_GPJ9_OUTP (0x01 << 18)
#define S3C2440_GPJ9_CAMVSYNC (0x02 << 18)
-#define S3C2440_GPJ10 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 10)
-#define S3C2440_GPJ10_INP (0x00 << 20)
#define S3C2440_GPJ10_OUTP (0x01 << 20)
#define S3C2440_GPJ10_CAMHREF (0x02 << 20)
-#define S3C2440_GPJ11 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 11)
-#define S3C2440_GPJ11_INP (0x00 << 22)
#define S3C2440_GPJ11_OUTP (0x01 << 22)
#define S3C2440_GPJ11_CAMCLKOUT (0x02 << 22)
-#define S3C2440_GPJ12 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 12)
-#define S3C2440_GPJ12_INP (0x00 << 24)
#define S3C2440_GPJ12_OUTP (0x01 << 24)
#define S3C2440_GPJ12_CAMRESET (0x02 << 24)
-#define S3C2443_GPJ13 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 13)
-#define S3C2443_GPJ14 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 14)
-#define S3C2443_GPJ15 S3C2410_GPIONO(S3C2440_GPIO_BANKJ, 15)
-
#endif /* __ASM_ARCH_REGS_GPIOJ_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-irq.h b/arch/arm/mach-s3c2410/include/mach/regs-irq.h
index de86ee8812bd..0f07ba30b1fb 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-irq.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-irq.h
@@ -27,6 +27,16 @@
#define S3C2410_SUBSRCPND S3C2410_IRQREG(0x018)
#define S3C2410_INTSUBMSK S3C2410_IRQREG(0x01C)
+#define S3C2416_PRIORITY_MODE1 S3C2410_IRQREG(0x030)
+#define S3C2416_PRIORITY_UPDATE1 S3C2410_IRQREG(0x034)
+#define S3C2416_SRCPND2 S3C2410_IRQREG(0x040)
+#define S3C2416_INTMOD2 S3C2410_IRQREG(0x044)
+#define S3C2416_INTMSK2 S3C2410_IRQREG(0x048)
+#define S3C2416_INTPND2 S3C2410_IRQREG(0x050)
+#define S3C2416_INTOFFSET2 S3C2410_IRQREG(0x054)
+#define S3C2416_PRIORITY_MODE2 S3C2410_IRQREG(0x070)
+#define S3C2416_PRIORITY_UPDATE2 S3C2410_IRQREG(0x074)
+
/* mask: 0=enable, 1=disable
* 1 bit EINT, 4=EINT4, 23=EINT23
* EINT0,1,2,3 are not handled here.
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h
new file mode 100644
index 000000000000..2f31b74974af
--- /dev/null
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h
@@ -0,0 +1,30 @@
+/* arch/arm/mach-s3c2410/include/mach/regs-s3c2416-mem.h
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * S3C2416 memory register definitions
+*/
+
+#ifndef __ASM_ARM_REGS_S3C2416_MEM
+#define __ASM_ARM_REGS_S3C2416_MEM
+
+#ifndef S3C2416_MEMREG
+#define S3C2416_MEMREG(x) (S3C24XX_VA_MEMCTRL + (x))
+#endif
+
+#define S3C2416_BANKCFG S3C2416_MEMREG(0x00)
+#define S3C2416_BANKCON1 S3C2416_MEMREG(0x04)
+#define S3C2416_BANKCON2 S3C2416_MEMREG(0x08)
+#define S3C2416_BANKCON3 S3C2416_MEMREG(0x0C)
+
+#define S3C2416_REFRESH S3C2416_MEMREG(0x10)
+#define S3C2416_TIMEOUT S3C2416_MEMREG(0x14)
+
+#endif /* __ASM_ARM_REGS_S3C2416_MEM */
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h
new file mode 100644
index 000000000000..e443167efb87
--- /dev/null
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h
@@ -0,0 +1,24 @@
+/* arch/arm/mach-s3c2410/include/mach/regs-s3c2416.h
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * S3C2416 specific register definitions
+*/
+
+#ifndef __ASM_ARCH_REGS_S3C2416_H
+#define __ASM_ARCH_REGS_S3C2416_H "s3c2416"
+
+#define S3C2416_SWRST (S3C24XX_VA_CLKPWR + 0x44)
+#define S3C2416_SWRST_RESET (0x533C2416)
+
+/* see regs-power.h for the other registers in the power block. */
+
+#endif /* __ASM_ARCH_REGS_S3C2416_H */
+
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
index d87ebe0cb625..08ab9dfb6ae6 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-s3c2443-clock.h
@@ -83,8 +83,7 @@
#define S3C2443_HCLKCON_DMA4 (1<<4)
#define S3C2443_HCLKCON_DMA5 (1<<5)
#define S3C2443_HCLKCON_CAMIF (1<<8)
-#define S3C2443_HCLKCON_DISP (1<<9)
-#define S3C2443_HCLKCON_LCDC (1<<10)
+#define S3C2443_HCLKCON_LCDC (1<<9)
#define S3C2443_HCLKCON_USBH (1<<11)
#define S3C2443_HCLKCON_USBD (1<<12)
#define S3C2443_HCLKCON_HSMMC (1<<16)
diff --git a/arch/arm/mach-s3c2410/include/mach/uncompress.h b/arch/arm/mach-s3c2410/include/mach/uncompress.h
index 72f756c5e504..8b283f847daa 100644
--- a/arch/arm/mach-s3c2410/include/mach/uncompress.h
+++ b/arch/arm/mach-s3c2410/include/mach/uncompress.h
@@ -40,7 +40,9 @@ static void arch_detect_cpu(void)
cpuid &= S3C2410_GSTATUS1_IDMASK;
if (is_arm926() || cpuid == S3C2410_GSTATUS1_2440 ||
- cpuid == S3C2410_GSTATUS1_2442) {
+ cpuid == S3C2410_GSTATUS1_2442 ||
+ cpuid == S3C2410_GSTATUS1_2416 ||
+ cpuid == S3C2410_GSTATUS1_2450) {
fifo_mask = S3C2440_UFSTAT_TXMASK;
fifo_max = 63 << S3C2440_UFSTAT_TXSHIFT;
} else {
diff --git a/arch/arm/mach-s3c2410/mach-amlm5900.c b/arch/arm/mach-s3c2410/mach-amlm5900.c
index 7047317ed7f4..34fc05a4244b 100644
--- a/arch/arm/mach-s3c2410/mach-amlm5900.c
+++ b/arch/arm/mach-s3c2410/mach-amlm5900.c
@@ -56,6 +56,7 @@
#include <plat/iic.h>
#include <plat/devs.h>
#include <plat/cpu.h>
+#include <plat/gpio-cfg.h>
#ifdef CONFIG_MTD_PARTITIONS
@@ -225,8 +226,8 @@ static void amlm5900_init_pm(void)
} else {
enable_irq_wake(IRQ_EINT9);
/* configure the suspend/resume status pin */
- s3c2410_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_pullup(S3C2410_GPF(2), 0);
+ s3c_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_setpull(S3C2410_GPF(2), S3C_GPIO_PULL_UP);
}
}
static void __init amlm5900_init(void)
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index 02b1b6220cba..c1f90f6fab42 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -61,6 +61,7 @@
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/cpu-freq.h>
+#include <plat/gpio-cfg.h>
#include <plat/audio-simtec.h>
#include "usb-simtec.h"
@@ -216,15 +217,13 @@ static struct s3c2410_uartcfg bast_uartcfgs[] __initdata = {
static int bast_pm_suspend(struct sys_device *sd, pm_message_t state)
{
/* ensure that an nRESET is not generated on resume. */
- s3c2410_gpio_setpin(S3C2410_GPA(21), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPIO_OUTPUT);
-
+ gpio_direction_output(S3C2410_GPA(21), 1);
return 0;
}
static int bast_pm_resume(struct sys_device *sd)
{
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
+ s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
return 0;
}
@@ -634,7 +633,7 @@ static void __init bast_map_io(void)
s3c24xx_register_clocks(bast_clocks, ARRAY_SIZE(bast_clocks));
- s3c_device_hwmon.dev.platform_data = &bast_hwmon_info;
+ s3c_hwmon_set_platdata(&bast_hwmon_info);
s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc));
s3c24xx_init_clocks(0);
@@ -658,6 +657,8 @@ static void __init bast_init(void)
nor_simtec_init();
simtec_audio_add(NULL, true, &bast_audio);
+ WARN_ON(gpio_request(S3C2410_GPA(21), "bast nreset"));
+
s3c_cpufreq_setboard(&bast_cpufreq);
}
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index fbedd0760941..779b45b3f80f 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -46,16 +46,17 @@
#include <mach/h1940.h>
#include <mach/h1940-latch.h>
#include <mach/fb.h>
-#include <mach/ts.h>
#include <plat/udc.h>
#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/pll.h>
#include <plat/pm.h>
#include <plat/mci.h>
+#include <plat/ts.h>
static struct map_desc h1940_iodesc[] __initdata = {
[0] = {
@@ -145,6 +146,7 @@ static struct s3c2410_ts_mach_info h1940_ts_cfg __initdata = {
.delay = 10000,
.presc = 49,
.oversampling_shift = 2,
+ .cfg_gpio = s3c24xx_ts_cfg_gpio,
};
/**
@@ -162,8 +164,8 @@ static struct s3c2410fb_display h1940_lcd __initdata = {
.xres = 240,
.yres = 320,
.bpp = 16,
- .left_margin = 20,
- .right_margin = 8,
+ .left_margin = 8,
+ .right_margin = 20,
.hsync_len = 4,
.upper_margin = 8,
.lower_margin = 7,
@@ -207,16 +209,16 @@ static int h1940_backlight_init(struct device *dev)
{
gpio_request(S3C2410_GPB(0), "Backlight");
- s3c2410_gpio_setpin(S3C2410_GPB(0), 0);
- s3c2410_gpio_pullup(S3C2410_GPB(0), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
+ gpio_direction_output(S3C2410_GPB(0), 0);
+ s3c_gpio_setpull(S3C2410_GPB(0), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
return 0;
}
static void h1940_backlight_exit(struct device *dev)
{
- s3c2410_gpio_cfgpin(S3C2410_GPB(0), 1/*S3C2410_GPB0_OUTP*/);
+ gpio_direction_output(S3C2410_GPB(0), 1);
}
static struct platform_pwm_backlight_data backlight_data = {
@@ -245,18 +247,18 @@ static void h1940_lcd_power_set(struct plat_lcd_data *pd,
if (!power) {
/* set to 3ec */
- s3c2410_gpio_setpin(S3C2410_GPC(0), 0);
+ gpio_direction_output(S3C2410_GPC(0), 0);
/* wait for 3ac */
do {
- value = s3c2410_gpio_getpin(S3C2410_GPC(6));
+ value = gpio_get_value(S3C2410_GPC(6));
} while (value);
/* set to 38c */
- s3c2410_gpio_setpin(S3C2410_GPC(5), 0);
+ gpio_direction_output(S3C2410_GPC(5), 0);
} else {
/* Set to 3ac */
- s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
+ gpio_direction_output(S3C2410_GPC(5), 1);
/* Set to 3ad */
- s3c2410_gpio_setpin(S3C2410_GPC(0), 1);
+ gpio_direction_output(S3C2410_GPC(0), 1);
}
}
@@ -271,7 +273,6 @@ static struct platform_device h1940_lcd_powerdev = {
};
static struct platform_device *h1940_devices[] __initdata = {
- &s3c_device_ts,
&s3c_device_ohci,
&s3c_device_lcd,
&s3c_device_wdt,
@@ -285,6 +286,8 @@ static struct platform_device *h1940_devices[] __initdata = {
&s3c_device_timer[0],
&h1940_backlight,
&h1940_lcd_powerdev,
+ &s3c_device_adc,
+ &s3c_device_ts,
};
static void __init h1940_map_io(void)
@@ -332,12 +335,13 @@ static void __init h1940_init(void)
gpio_request(S3C2410_GPC(5), "LCD power");
gpio_request(S3C2410_GPC(6), "LCD power");
+ gpio_direction_input(S3C2410_GPC(6));
platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices));
}
MACHINE_START(H1940, "IPAQ-H1940")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
diff --git a/arch/arm/mach-s3c2410/mach-n30.c b/arch/arm/mach-s3c2410/mach-n30.c
index 684710f88142..41f299d983eb 100644
--- a/arch/arm/mach-s3c2410/mach-n30.c
+++ b/arch/arm/mach-s3c2410/mach-n30.c
@@ -26,6 +26,7 @@
#include <linux/serial_core.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/irq.h>
@@ -46,6 +47,7 @@
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
+#include <plat/mci.h>
#include <plat/s3c2410.h>
#include <plat/udc.h>
@@ -86,10 +88,10 @@ static void n30_udc_pullup(enum s3c2410_udc_cmd_e cmd)
{
switch (cmd) {
case S3C2410_UDC_P_ENABLE :
- s3c2410_gpio_setpin(S3C2410_GPB(3), 1);
+ gpio_set_value(S3C2410_GPB(3), 1);
break;
case S3C2410_UDC_P_DISABLE :
- s3c2410_gpio_setpin(S3C2410_GPB(3), 0);
+ gpio_set_value(S3C2410_GPB(3), 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -172,8 +174,10 @@ static struct gpio_keys_button n35_buttons[] = {
{
.gpio = S3C2410_GPF(0),
.code = KEY_POWER,
+ .type = EV_PWR,
.desc = "Power",
.active_low = 0,
+ .wakeup = 1,
},
{
.gpio = S3C2410_GPG(9),
@@ -264,6 +268,14 @@ static struct s3c24xx_led_platdata n30_blue_led_pdata = {
.def_trigger = "",
};
+/* This is the blue LED on the device. Originaly used to indicate GPS activity
+ * by flashing. */
+static struct s3c24xx_led_platdata n35_blue_led_pdata = {
+ .name = "blue_led",
+ .gpio = S3C2410_GPD(8),
+ .def_trigger = "",
+};
+
/* This LED is driven by the battery microcontroller, and is blinking
* red, blinking green or solid green when the battery is low,
* charging or full respectively. By driving GPD9 low, it's possible
@@ -275,6 +287,13 @@ static struct s3c24xx_led_platdata n30_warning_led_pdata = {
.def_trigger = "",
};
+static struct s3c24xx_led_platdata n35_warning_led_pdata = {
+ .name = "warning_led",
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .gpio = S3C2410_GPD(9),
+ .def_trigger = "",
+};
+
static struct platform_device n30_blue_led = {
.name = "s3c24xx_led",
.id = 1,
@@ -283,6 +302,14 @@ static struct platform_device n30_blue_led = {
},
};
+static struct platform_device n35_blue_led = {
+ .name = "s3c24xx_led",
+ .id = 1,
+ .dev = {
+ .platform_data = &n35_blue_led_pdata,
+ },
+};
+
static struct platform_device n30_warning_led = {
.name = "s3c24xx_led",
.id = 2,
@@ -291,6 +318,14 @@ static struct platform_device n30_warning_led = {
},
};
+static struct platform_device n35_warning_led = {
+ .name = "s3c24xx_led",
+ .id = 2,
+ .dev = {
+ .platform_data = &n35_warning_led_pdata,
+ },
+};
+
static struct s3c2410fb_display n30_display __initdata = {
.type = S3C2410_LCDCON1_TFT,
.width = 240,
@@ -317,13 +352,36 @@ static struct s3c2410fb_mach_info n30_fb_info __initdata = {
.lpcsel = 0x06,
};
+static void n30_sdi_set_power(unsigned char power_mode, unsigned short vdd)
+{
+ switch (power_mode) {
+ case MMC_POWER_ON:
+ case MMC_POWER_UP:
+ gpio_set_value(S3C2410_GPG(4), 1);
+ break;
+ case MMC_POWER_OFF:
+ default:
+ gpio_set_value(S3C2410_GPG(4), 0);
+ break;
+ }
+}
+
+static struct s3c24xx_mci_pdata n30_mci_cfg __initdata = {
+ .gpio_detect = S3C2410_GPF(1),
+ .gpio_wprotect = S3C2410_GPG(10),
+ .ocr_avail = MMC_VDD_32_33,
+ .set_power = n30_sdi_set_power,
+};
+
static struct platform_device *n30_devices[] __initdata = {
&s3c_device_lcd,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
&s3c_device_ohci,
+ &s3c_device_rtc,
&s3c_device_usbgadget,
+ &s3c_device_sdi,
&n30_button_device,
&n30_blue_led,
&n30_warning_led,
@@ -334,8 +392,12 @@ static struct platform_device *n35_devices[] __initdata = {
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
+ &s3c_device_rtc,
&s3c_device_usbgadget,
+ &s3c_device_sdi,
&n35_button_device,
+ &n35_blue_led,
+ &n35_warning_led,
};
static struct s3c2410_platform_i2c __initdata n30_i2ccfg = {
@@ -490,17 +552,15 @@ static void __init n30_map_io(void)
s3c24xx_init_uarts(n30_uartcfgs, ARRAY_SIZE(n30_uartcfgs));
}
-static void __init n30_init_irq(void)
-{
- s3c24xx_init_irq();
-}
-
/* GPB3 is the line that controls the pull-up for the USB D+ line */
static void __init n30_init(void)
{
+ WARN_ON(gpio_request(S3C2410_GPG(4), "mmc power"));
+
s3c24xx_fb_set_platdata(&n30_fb_info);
s3c24xx_udc_set_platdata(&n30_udc_cfg);
+ s3c24xx_mci_set_platdata(&n30_mci_cfg);
s3c_i2c0_set_platdata(&n30_i2ccfg);
/* Turn off suspend on both USB ports, and switch the
@@ -532,10 +592,13 @@ static void __init n30_init(void)
s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST |
S3C2410_MISCCR_USBSUSPND0 |
S3C2410_MISCCR_USBSUSPND1,
- S3C2410_MISCCR_USBSUSPND1);
+ S3C2410_MISCCR_USBSUSPND0);
platform_add_devices(n35_devices, ARRAY_SIZE(n35_devices));
}
+
+ WARN_ON(gpio_request(S3C2410_GPB(3), "udc pup"));
+ gpio_direction_output(S3C2410_GPB(3), 0);
}
MACHINE_START(N30, "Acer-N30")
@@ -547,7 +610,7 @@ MACHINE_START(N30, "Acer-N30")
.boot_params = S3C2410_SDRAM_PA + 0x100,
.timer = &s3c24xx_timer,
.init_machine = n30_init,
- .init_irq = n30_init_irq,
+ .init_irq = s3c24xx_init_irq,
.map_io = n30_map_io,
MACHINE_END
@@ -559,6 +622,6 @@ MACHINE_START(N35, "Acer-N35")
.boot_params = S3C2410_SDRAM_PA + 0x100,
.timer = &s3c24xx_timer,
.init_machine = n30_init,
- .init_irq = n30_init_irq,
+ .init_irq = s3c24xx_init_irq,
.map_io = n30_map_io,
MACHINE_END
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index 92a4ec375d82..d0e87b6e2e0f 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -58,6 +58,7 @@
#include <plat/iic.h>
#include <plat/common-smdk.h>
+#include <plat/gpio-cfg.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/pm.h>
@@ -219,10 +220,10 @@ static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs)
{
switch (cs) {
case BITBANG_CS_ACTIVE:
- s3c2410_gpio_setpin(S3C2410_GPB(5), 0);
+ gpio_set_value(S3C2410_GPB(5), 0);
break;
case BITBANG_CS_INACTIVE:
- s3c2410_gpio_setpin(S3C2410_GPB(5), 1);
+ gpio_set_value(S3C2410_GPB(5), 1);
break;
}
}
@@ -347,13 +348,14 @@ static void __init qt2410_machine_init(void)
}
s3c24xx_fb_set_platdata(&qt2410_fb_info);
- s3c2410_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPB(0), 1);
s3c24xx_udc_set_platdata(&qt2410_udc_cfg);
s3c_i2c0_set_platdata(NULL);
- s3c2410_gpio_cfgpin(S3C2410_GPB(5), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPB(5), "spi cs"));
+ gpio_direction_output(S3C2410_GPB(5), 1);
platform_add_devices(qt2410_devices, ARRAY_SIZE(qt2410_devices));
s3c_pm_init();
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c
index 9051f0d31123..d540d79dd264 100644
--- a/arch/arm/mach-s3c2410/mach-vr1000.c
+++ b/arch/arm/mach-s3c2410/mach-vr1000.c
@@ -357,8 +357,7 @@ static struct clk *vr1000_clocks[] __initdata = {
static void vr1000_power_off(void)
{
- s3c2410_gpio_cfgpin(S3C2410_GPB(9), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_setpin(S3C2410_GPB(9), 1);
+ gpio_direction_output(S3C2410_GPB(9), 1);
}
static void __init vr1000_map_io(void)
@@ -395,6 +394,8 @@ static void __init vr1000_init(void)
nor_simtec_init();
simtec_audio_add(NULL, true, NULL);
+
+ WARN_ON(gpio_request(S3C2410_GPB(9), "power off"));
}
MACHINE_START(VR1000, "Thorcom-VR1000")
diff --git a/arch/arm/mach-s3c2410/pm.c b/arch/arm/mach-s3c2410/pm.c
index 966119c8efee..725636fc4dc3 100644
--- a/arch/arm/mach-s3c2410/pm.c
+++ b/arch/arm/mach-s3c2410/pm.c
@@ -60,10 +60,10 @@ static void s3c2410_pm_prepare(void)
__raw_writel(calc, phys_to_virt(H1940_SUSPEND_CHECKSUM));
}
- /* the RX3715 uses similar code and the same H1940 and the
+ /* RX3715 and RX1950 use similar to H1940 code and the
* same offsets for resume and checksum pointers */
- if (machine_is_rx3715()) {
+ if (machine_is_rx3715() || machine_is_rx1950()) {
void *base = phys_to_virt(H1940_SUSPEND_CHECK);
unsigned long ptr;
unsigned long calc = 0;
@@ -79,6 +79,17 @@ static void s3c2410_pm_prepare(void)
if ( machine_is_aml_m5900() )
s3c2410_gpio_setpin(S3C2410_GPF(2), 1);
+ if (machine_is_rx1950()) {
+ /* According to S3C2442 user's manual, page 7-17,
+ * when the system is operating in NAND boot mode,
+ * the hardware pin configuration - EINT[23:21] –
+ * must be set as input for starting up after
+ * wakeup from sleep mode
+ */
+ s3c_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPIO_INPUT);
+ }
}
static int s3c2410_pm_resume(struct sys_device *dev)
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c
index 91ba42f688ac..adc90a3c5890 100644
--- a/arch/arm/mach-s3c2410/s3c2410.c
+++ b/arch/arm/mach-s3c2410/s3c2410.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
+#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/sysdev.h>
#include <linux/serial_core.h>
@@ -40,6 +41,10 @@
#include <plat/clock.h>
#include <plat/pll.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
/* Initial IO mappings */
static struct map_desc s3c2410_iodesc[] __initdata = {
@@ -65,6 +70,9 @@ void __init s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no)
void __init s3c2410_map_io(void)
{
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
+
iotable_init(s3c2410_iodesc, ARRAY_SIZE(s3c2410_iodesc));
}
diff --git a/arch/arm/mach-s3c2412/Kconfig b/arch/arm/mach-s3c2412/Kconfig
index 9a8c0657ae50..cef6a65637bd 100644
--- a/arch/arm/mach-s3c2412/Kconfig
+++ b/arch/arm/mach-s3c2412/Kconfig
@@ -16,7 +16,8 @@ config CPU_S3C2412
config CPU_S3C2412_ONLY
bool
depends on ARCH_S3C2410 && !CPU_S3C2400 && !CPU_S3C2410 && \
- !CPU_S3C2440 && !CPU_S3C2442 && !CPU_S3C2443 && CPU_S3C2412
+ !CPU_2416 && !CPU_S3C2440 && !CPU_S3C2442 && \
+ !CPU_S3C2443 && CPU_S3C2412
default y if CPU_S3C2412
config S3C2412_DMA
diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c
index e880524904eb..7abecfca0b7e 100644
--- a/arch/arm/mach-s3c2412/dma.c
+++ b/arch/arm/mach-s3c2412/dma.c
@@ -30,7 +30,6 @@
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
#include <mach/regs-sdi.h>
-#include <plat/regs-s3c2412-iis.h>
#include <plat/regs-iis.h>
#include <plat/regs-spi.h>
@@ -119,13 +118,11 @@ static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = {
.name = "i2s-sdi",
.channels = MAP(S3C2412_DMAREQSEL_I2SRX),
.channels_rx = MAP(S3C2412_DMAREQSEL_I2SRX),
- .hw_addr.from = S3C2410_PA_IIS + S3C2412_IISRXD,
},
[DMACH_I2S_OUT] = {
.name = "i2s-sdo",
.channels = MAP(S3C2412_DMAREQSEL_I2STX),
.channels_rx = MAP(S3C2412_DMAREQSEL_I2STX),
- .hw_addr.to = S3C2410_PA_IIS + S3C2412_IISTXD,
},
[DMACH_USB_EP1] = {
.name = "usb-ep1",
diff --git a/arch/arm/mach-s3c2412/gpio.c b/arch/arm/mach-s3c2412/gpio.c
index f7afece7fc38..3404a876b33e 100644
--- a/arch/arm/mach-s3c2412/gpio.c
+++ b/arch/arm/mach-s3c2412/gpio.c
@@ -16,41 +16,43 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/gpio.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/regs-gpio.h>
-
#include <mach/hardware.h>
+#include <plat/gpio-core.h>
+
int s3c2412_gpio_set_sleepcfg(unsigned int pin, unsigned int state)
{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
+ struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
+ unsigned long offs = pin - chip->chip.base;
unsigned long flags;
unsigned long slpcon;
offs *= 2;
- if (pin < S3C2410_GPIO_BANKB)
+ if (pin < S3C2410_GPB(0))
return -EINVAL;
- if (pin >= S3C2410_GPIO_BANKF &&
- pin <= S3C2410_GPIO_BANKG)
+ if (pin >= S3C2410_GPF(0) &&
+ pin <= S3C2410_GPG(16))
return -EINVAL;
- if (pin > (S3C2410_GPIO_BANKH + 32))
+ if (pin > S3C2410_GPH(16))
return -EINVAL;
local_irq_save(flags);
- slpcon = __raw_readl(base + 0x0C);
+ slpcon = __raw_readl(chip->base + 0x0C);
slpcon &= ~(3 << offs);
slpcon |= state << offs;
- __raw_writel(slpcon, base + 0x0C);
+ __raw_writel(slpcon, chip->base + 0x0C);
local_irq_restore(flags);
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 14f4798291aa..478f4b4606c2 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -48,6 +48,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -357,8 +358,7 @@ static void jive_lcm_reset(unsigned int set)
{
printk(KERN_DEBUG "%s(%d)\n", __func__, set);
- s3c2410_gpio_setpin(S3C2410_GPG(13), set);
- s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPIO_OUTPUT);
+ gpio_set_value(S3C2410_GPG(13), set);
}
#undef LCD_UPPER_MARGIN
@@ -391,7 +391,7 @@ static struct ili9320_platdata jive_lcm_config = {
static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs)
{
- s3c2410_gpio_setpin(S3C2410_GPB(7), cs ? 0 : 1);
+ gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1);
}
static struct s3c2410_spigpio_info jive_lcd_spi = {
@@ -413,7 +413,7 @@ static struct platform_device jive_device_lcdspi = {
static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs)
{
- s3c2410_gpio_setpin(S3C2410_GPH(10), cs ? 0 : 1);
+ gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1);
}
static struct s3c2410_spigpio_info jive_wm8750_spi = {
@@ -531,7 +531,7 @@ static void jive_power_off(void)
printk(KERN_INFO "powering system down...\n");
s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
}
static void __init jive_machine_init(void)
@@ -636,22 +636,22 @@ static void __init jive_machine_init(void)
/* initialise the spi */
- s3c2410_gpio_setpin(S3C2410_GPG(13), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPIO_OUTPUT);
+ gpio_request(S3C2410_GPG(13), "lcm reset");
+ gpio_direction_output(S3C2410_GPG(13), 0);
- s3c2410_gpio_setpin(S3C2410_GPB(7), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPB(7), S3C2410_GPIO_OUTPUT);
+ gpio_request(S3C2410_GPB(7), "jive spi");
+ gpio_direction_output(S3C2410_GPB(7), 1);
s3c2410_gpio_setpin(S3C2410_GPB(6), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPB(6), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPB(6), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPG(8), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(8), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPG(8), S3C2410_GPIO_OUTPUT);
/* initialise the WM8750 spi */
- s3c2410_gpio_setpin(S3C2410_GPH(10), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPH(10), S3C2410_GPIO_OUTPUT);
+ gpio_request(S3C2410_GPH(10), "jive wm8750 spi");
+ gpio_direction_output(S3C2410_GPH(10), 1);
/* Turn off suspend on both USB ports, and switch the
* selectable USB port to USB device mode. */
@@ -674,7 +674,7 @@ static void __init jive_machine_init(void)
}
MACHINE_START(JIVE, "JIVE")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
diff --git a/arch/arm/mach-s3c2412/mach-smdk2413.c b/arch/arm/mach-s3c2412/mach-smdk2413.c
index 0392065af1af..ba93a356a839 100644
--- a/arch/arm/mach-s3c2412/mach-smdk2413.c
+++ b/arch/arm/mach-s3c2412/mach-smdk2413.c
@@ -85,10 +85,10 @@ static void smdk2413_udc_pullup(enum s3c2410_udc_cmd_e cmd)
switch (cmd)
{
case S3C2410_UDC_P_ENABLE :
- s3c2410_gpio_setpin(S3C2410_GPF(2), 1);
+ gpio_set_value(S3C2410_GPF(2), 1);
break;
case S3C2410_UDC_P_DISABLE :
- s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
+ gpio_set_value(S3C2410_GPF(2), 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -134,8 +134,8 @@ static void __init smdk2413_machine_init(void)
{ /* Turn off suspend on both USB ports, and switch the
* selectable USB port to USB device mode. */
- s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPF(2), "udc pull"));
+ gpio_direction_output(S3C2410_GPF(2), 0);
s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST |
S3C2410_MISCCR_USBSUSPND0 |
@@ -150,7 +150,7 @@ static void __init smdk2413_machine_init(void)
}
MACHINE_START(S3C2413, "S3C2413")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
@@ -163,7 +163,7 @@ MACHINE_START(S3C2413, "S3C2413")
MACHINE_END
MACHINE_START(SMDK2412, "SMDK2412")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
@@ -176,7 +176,7 @@ MACHINE_START(SMDK2412, "SMDK2412")
MACHINE_END
MACHINE_START(SMDK2413, "SMDK2413")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
diff --git a/arch/arm/mach-s3c2416/Kconfig b/arch/arm/mach-s3c2416/Kconfig
new file mode 100644
index 000000000000..657e4fe17f39
--- /dev/null
+++ b/arch/arm/mach-s3c2416/Kconfig
@@ -0,0 +1,39 @@
+# arch/arm/mach-s3c2416/Kconfig
+#
+# Copyright 2009 Yauhen Kharuzhy <jekhor@gmail.com>
+#
+# Licensed under GPLv2
+
+# note, this also supports the S3C2450 which is so similar it has the same
+# ID code as the S3C2416.
+
+config CPU_S3C2416
+ bool
+ depends on ARCH_S3C2410
+ select CPU_ARM926T
+ select S3C2416_DMA if S3C2410_DMA
+ select CPU_LLSERIAL_S3C2440
+ select S3C_GPIO_PULL_UPDOWN
+ select SAMSUNG_CLKSRC
+ select S3C2443_CLOCK
+ help
+ Support for the S3C2416 SoC from the S3C24XX line
+
+config S3C2416_DMA
+ bool
+ depends on CPU_S3C2416
+ help
+ Internal config node for S3C2416 DMA support
+
+menu "S3C2416 Machines"
+
+config MACH_SMDK2416
+ bool "SMDK2416"
+ select CPU_S3C2416
+ select S3C_DEV_FB
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ help
+ Say Y here if you are using an SMDK2416
+
+endmenu
diff --git a/arch/arm/mach-s3c2416/Makefile b/arch/arm/mach-s3c2416/Makefile
new file mode 100644
index 000000000000..6c12c7bf40ad
--- /dev/null
+++ b/arch/arm/mach-s3c2416/Makefile
@@ -0,0 +1,19 @@
+# arch/arm/mach-s3c2416/Makefile
+#
+# Copyright 2009 Yauhen Kharuzhy <jekhor@gmail.com>
+#
+# Licensed under GPLv2
+
+obj-y :=
+obj-m :=
+obj-n :=
+obj- :=
+
+obj-$(CONFIG_CPU_S3C2416) += s3c2416.o clock.o
+obj-$(CONFIG_CPU_S3C2416) += irq.o
+
+#obj-$(CONFIG_S3C2416_DMA) += dma.o
+
+# Machine support
+
+obj-$(CONFIG_MACH_SMDK2416) += mach-smdk2416.o
diff --git a/arch/arm/mach-s3c2416/clock.c b/arch/arm/mach-s3c2416/clock.c
new file mode 100644
index 000000000000..7ccf5a2a2bfc
--- /dev/null
+++ b/arch/arm/mach-s3c2416/clock.c
@@ -0,0 +1,135 @@
+/* linux/arch/arm/mach-s3c2416/clock.c
+ *
+ * Copyright (c) 2010 Simtec Electronics
+ * Copyright (c) 2010 Ben Dooks <ben-linux@fluff.org>
+ *
+ * S3C2416 Clock control support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <plat/s3c2416.h>
+#include <plat/s3c2443.h>
+#include <plat/clock.h>
+#include <plat/clock-clksrc.h>
+#include <plat/cpu.h>
+
+#include <plat/cpu-freq.h>
+#include <plat/pll6553x.h>
+#include <plat/pll.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+#include <mach/regs-s3c2443-clock.h>
+
+static unsigned int armdiv[8] = {
+ [0] = 1,
+ [1] = 2,
+ [2] = 3,
+ [3] = 4,
+ [5] = 6,
+ [7] = 8,
+};
+
+/* ID to hardware numbering, 0 is HSMMC1, 1 is HSMMC0 */
+static struct clksrc_clk hsmmc_div[] = {
+ [0] = {
+ .clk = {
+ .name = "hsmmc-div",
+ .id = 1,
+ .parent = &clk_esysclk.clk,
+ },
+ .reg_div = { .reg = S3C2416_CLKDIV2, .size = 2, .shift = 6 },
+ },
+ [1] = {
+ .clk = {
+ .name = "hsmmc-div",
+ .id = 0,
+ .parent = &clk_esysclk.clk,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
+ },
+};
+
+static struct clksrc_clk hsmmc_mux[] = {
+ [0] = {
+ .clk = {
+ .id = 1,
+ .name = "hsmmc-if",
+ .ctrlbit = (1 << 6),
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .sources = &(struct clksrc_sources) {
+ .nr_sources = 2,
+ .sources = (struct clk *[]) {
+ [0] = &hsmmc_div[0].clk,
+ [1] = NULL, /* to fix */
+ },
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 16 },
+ },
+ [1] = {
+ .clk = {
+ .id = 0,
+ .name = "hsmmc-if",
+ .ctrlbit = (1 << 12),
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .sources = &(struct clksrc_sources) {
+ .nr_sources = 2,
+ .sources = (struct clk *[]) {
+ [0] = &hsmmc_div[1].clk,
+ [1] = NULL, /* to fix */
+ },
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 17 },
+ },
+};
+
+
+static inline unsigned int s3c2416_fclk_div(unsigned long clkcon0)
+{
+ clkcon0 &= 7 << S3C2443_CLKDIV0_ARMDIV_SHIFT;
+
+ return armdiv[clkcon0 >> S3C2443_CLKDIV0_ARMDIV_SHIFT];
+}
+
+void __init_or_cpufreq s3c2416_setup_clocks(void)
+{
+ s3c2443_common_setup_clocks(s3c2416_get_pll, s3c2416_fclk_div);
+}
+
+
+static struct clksrc_clk *clksrcs[] __initdata = {
+ &hsmmc_div[0],
+ &hsmmc_div[1],
+ &hsmmc_mux[0],
+ &hsmmc_mux[1],
+};
+
+void __init s3c2416_init_clocks(int xtal)
+{
+ u32 epllcon = __raw_readl(S3C2443_EPLLCON);
+ u32 epllcon1 = __raw_readl(S3C2443_EPLLCON+4);
+ int ptr;
+
+ /* s3c2416 EPLL compatible with s3c64xx */
+ clk_epll.rate = s3c_get_pll6553x(xtal, epllcon, epllcon1);
+
+ clk_epll.parent = &clk_epllref.clk;
+
+ s3c2443_common_init_clocks(xtal, s3c2416_get_pll, s3c2416_fclk_div);
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
+ s3c_register_clksrc(clksrcs[ptr], 1);
+
+ s3c_pwmclk_init();
+
+}
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
new file mode 100644
index 000000000000..89f521d59d06
--- /dev/null
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -0,0 +1,254 @@
+/* linux/arch/arm/mach-s3c2416/irq.c
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/sysdev.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+
+#include <asm/mach/irq.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/irq.h>
+
+#define INTMSK(start, end) ((1 << ((end) + 1 - (start))) - 1)
+
+static inline void s3c2416_irq_demux(unsigned int irq, unsigned int len)
+{
+ unsigned int subsrc, submsk;
+ unsigned int end;
+
+ /* read the current pending interrupts, and the mask
+ * for what it is available */
+
+ subsrc = __raw_readl(S3C2410_SUBSRCPND);
+ submsk = __raw_readl(S3C2410_INTSUBMSK);
+
+ subsrc &= ~submsk;
+ subsrc >>= (irq - S3C2410_IRQSUB(0));
+ subsrc &= (1 << len)-1;
+
+ end = len + irq;
+
+ for (; irq < end && subsrc; irq++) {
+ if (subsrc & 1)
+ generic_handle_irq(irq);
+
+ subsrc >>= 1;
+ }
+}
+
+/* WDT/AC97 sub interrupts */
+
+static void s3c2416_irq_demux_wdtac97(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_WDT, 4);
+}
+
+#define INTMSK_WDTAC97 (1UL << (IRQ_WDT - IRQ_EINT0))
+#define SUBMSK_WDTAC97 INTMSK(IRQ_S3C2443_WDT, IRQ_S3C2443_AC97)
+
+static void s3c2416_irq_wdtac97_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+}
+
+static void s3c2416_irq_wdtac97_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_WDTAC97);
+}
+
+static void s3c2416_irq_wdtac97_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_WDTAC97, SUBMSK_WDTAC97);
+}
+
+static struct irq_chip s3c2416_irq_wdtac97 = {
+ .mask = s3c2416_irq_wdtac97_mask,
+ .unmask = s3c2416_irq_wdtac97_unmask,
+ .ack = s3c2416_irq_wdtac97_ack,
+};
+
+
+/* LCD sub interrupts */
+
+static void s3c2416_irq_demux_lcd(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_LCD1, 4);
+}
+
+#define INTMSK_LCD (1UL << (IRQ_LCD - IRQ_EINT0))
+#define SUBMSK_LCD INTMSK(IRQ_S3C2443_LCD1, IRQ_S3C2443_LCD4)
+
+static void s3c2416_irq_lcd_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_LCD, SUBMSK_LCD);
+}
+
+static void s3c2416_irq_lcd_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_LCD);
+}
+
+static void s3c2416_irq_lcd_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_LCD, SUBMSK_LCD);
+}
+
+static struct irq_chip s3c2416_irq_lcd = {
+ .mask = s3c2416_irq_lcd_mask,
+ .unmask = s3c2416_irq_lcd_unmask,
+ .ack = s3c2416_irq_lcd_ack,
+};
+
+
+/* DMA sub interrupts */
+
+static void s3c2416_irq_demux_dma(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_DMA0, 6);
+}
+
+#define INTMSK_DMA (1UL << (IRQ_S3C2443_DMA - IRQ_EINT0))
+#define SUBMSK_DMA INTMSK(IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5)
+
+
+static void s3c2416_irq_dma_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_DMA, SUBMSK_DMA);
+}
+
+static void s3c2416_irq_dma_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_DMA);
+}
+
+static void s3c2416_irq_dma_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_DMA, SUBMSK_DMA);
+}
+
+static struct irq_chip s3c2416_irq_dma = {
+ .mask = s3c2416_irq_dma_mask,
+ .unmask = s3c2416_irq_dma_unmask,
+ .ack = s3c2416_irq_dma_ack,
+};
+
+
+/* UART3 sub interrupts */
+
+static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
+{
+ s3c2416_irq_demux(IRQ_S3C2443_UART3, 3);
+}
+
+#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
+#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
+
+
+static void s3c2416_irq_uart3_mask(unsigned int irqno)
+{
+ s3c_irqsub_mask(irqno, INTMSK_UART3, SUBMSK_UART3);
+}
+
+static void s3c2416_irq_uart3_unmask(unsigned int irqno)
+{
+ s3c_irqsub_unmask(irqno, INTMSK_UART3);
+}
+
+static void s3c2416_irq_uart3_ack(unsigned int irqno)
+{
+ s3c_irqsub_maskack(irqno, INTMSK_UART3, SUBMSK_UART3);
+}
+
+static struct irq_chip s3c2416_irq_uart3 = {
+ .mask = s3c2416_irq_uart3_mask,
+ .unmask = s3c2416_irq_uart3_unmask,
+ .ack = s3c2416_irq_uart3_ack,
+};
+
+
+/* IRQ initialisation code */
+
+static int __init s3c2416_add_sub(unsigned int base,
+ void (*demux)(unsigned int,
+ struct irq_desc *),
+ struct irq_chip *chip,
+ unsigned int start, unsigned int end)
+{
+ unsigned int irqno;
+
+ set_irq_chip(base, &s3c_irq_level_chip);
+ set_irq_handler(base, handle_level_irq);
+ set_irq_chained_handler(base, demux);
+
+ for (irqno = start; irqno <= end; irqno++) {
+ set_irq_chip(irqno, chip);
+ set_irq_handler(irqno, handle_level_irq);
+ set_irq_flags(irqno, IRQF_VALID);
+ }
+
+ return 0;
+}
+
+static int __init s3c2416_irq_add(struct sys_device *sysdev)
+{
+ printk(KERN_INFO "S3C2416: IRQ Support\n");
+
+ s3c2416_add_sub(IRQ_LCD, s3c2416_irq_demux_lcd, &s3c2416_irq_lcd,
+ IRQ_S3C2443_LCD2, IRQ_S3C2443_LCD4);
+
+ s3c2416_add_sub(IRQ_S3C2443_DMA, s3c2416_irq_demux_dma,
+ &s3c2416_irq_dma, IRQ_S3C2443_DMA0, IRQ_S3C2443_DMA5);
+
+ s3c2416_add_sub(IRQ_S3C2443_UART3, s3c2416_irq_demux_uart3,
+ &s3c2416_irq_uart3,
+ IRQ_S3C2443_RX3, IRQ_S3C2443_ERR3);
+
+ s3c2416_add_sub(IRQ_WDT, s3c2416_irq_demux_wdtac97,
+ &s3c2416_irq_wdtac97,
+ IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
+
+ return 0;
+}
+
+static struct sysdev_driver s3c2416_irq_driver = {
+ .add = s3c2416_irq_add,
+};
+
+static int __init s3c2416_irq_init(void)
+{
+ return sysdev_driver_register(&s3c2416_sysclass, &s3c2416_irq_driver);
+}
+
+arch_initcall(s3c2416_irq_init);
+
diff --git a/arch/arm/mach-s3c2416/mach-smdk2416.c b/arch/arm/mach-s3c2416/mach-smdk2416.c
new file mode 100644
index 000000000000..5fc3f67ef265
--- /dev/null
+++ b/arch/arm/mach-s3c2416/mach-smdk2416.c
@@ -0,0 +1,206 @@
+/* linux/arch/arm/mach-s3c2416/mach-hanlin_v3c.c
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
+#include <linux/fb.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-lcd.h>
+
+#include <mach/idle.h>
+#include <mach/leds-gpio.h>
+#include <plat/iic.h>
+
+#include <plat/s3c2416.h>
+#include <plat/gpio-cfg.h>
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/nand.h>
+
+#include <plat/regs-fb-v4.h>
+#include <plat/fb.h>
+
+#include <plat/common-smdk.h>
+
+static struct map_desc smdk2416_iodesc[] __initdata = {
+ /* ISA IO Space map (memory space selected by A24) */
+
+ {
+ .virtual = (u32)S3C24XX_VA_ISA_WORD,
+ .pfn = __phys_to_pfn(S3C2410_CS2),
+ .length = 0x10000,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (u32)S3C24XX_VA_ISA_WORD + 0x10000,
+ .pfn = __phys_to_pfn(S3C2410_CS2 + (1<<24)),
+ .length = SZ_4M,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (u32)S3C24XX_VA_ISA_BYTE,
+ .pfn = __phys_to_pfn(S3C2410_CS2),
+ .length = 0x10000,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (u32)S3C24XX_VA_ISA_BYTE + 0x10000,
+ .pfn = __phys_to_pfn(S3C2410_CS2 + (1<<24)),
+ .length = SZ_4M,
+ .type = MT_DEVICE,
+ }
+};
+
+#define UCON (S3C2410_UCON_DEFAULT | \
+ S3C2440_UCON_PCLK | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
+
+#define UFCON (S3C2410_UFCON_RXTRIG8 | \
+ S3C2410_UFCON_FIFOMODE | \
+ S3C2440_UFCON_TXTRIG16)
+
+static struct s3c2410_uartcfg smdk2416_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ /* IR port */
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON | 0x50,
+ .ufcon = UFCON,
+ }
+};
+
+struct s3c_fb_pd_win smdk2416_fb_win[] = {
+ [0] = {
+ /* think this is the same as the smdk6410 */
+ .win_mode = {
+ .pixclock = 41094,
+ .left_margin = 8,
+ .right_margin = 13,
+ .upper_margin = 7,
+ .lower_margin = 5,
+ .hsync_len = 3,
+ .vsync_len = 1,
+ .xres = 800,
+ .yres = 480,
+ },
+ .default_bpp = 16,
+ .max_bpp = 32,
+ },
+};
+
+static void s3c2416_fb_gpio_setup_24bpp(void)
+{
+ unsigned int gpio;
+
+ for (gpio = S3C2410_GPC(1); gpio <= S3C2410_GPC(4); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+
+ for (gpio = S3C2410_GPC(8); gpio <= S3C2410_GPC(15); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+
+ for (gpio = S3C2410_GPD(0); gpio <= S3C2410_GPD(15); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+}
+
+static struct s3c_fb_platdata smdk2416_fb_platdata = {
+ .win[0] = &smdk2416_fb_win[0],
+ .setup_gpio = s3c2416_fb_gpio_setup_24bpp,
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+};
+
+static struct platform_device *smdk2416_devices[] __initdata = {
+ &s3c_device_fb,
+ &s3c_device_wdt,
+ &s3c_device_ohci,
+ &s3c_device_i2c0,
+ &s3c_device_hsmmc0,
+ &s3c_device_hsmmc1,
+};
+
+static void __init smdk2416_map_io(void)
+{
+ s3c24xx_init_io(smdk2416_iodesc, ARRAY_SIZE(smdk2416_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(smdk2416_uartcfgs, ARRAY_SIZE(smdk2416_uartcfgs));
+}
+
+static void __init smdk2416_machine_init(void)
+{
+ s3c_i2c0_set_platdata(NULL);
+ s3c_fb_set_platdata(&smdk2416_fb_platdata);
+
+ gpio_request(S3C2410_GPB(4), "USBHost Power");
+ gpio_direction_output(S3C2410_GPB(4), 1);
+
+ gpio_request(S3C2410_GPB(3), "Display Power");
+ gpio_direction_output(S3C2410_GPB(3), 1);
+
+ gpio_request(S3C2410_GPB(1), "Display Reset");
+ gpio_direction_output(S3C2410_GPB(1), 1);
+
+ platform_add_devices(smdk2416_devices, ARRAY_SIZE(smdk2416_devices));
+ smdk_machine_init();
+}
+
+MACHINE_START(SMDK2416, "SMDK2416")
+ /* Maintainer: Yauhen Kharuzhy <jekhor@gmail.com> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+
+ .init_irq = s3c24xx_init_irq,
+ .map_io = smdk2416_map_io,
+ .init_machine = smdk2416_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c
new file mode 100644
index 000000000000..35dabccd0ac2
--- /dev/null
+++ b/arch/arm/mach-s3c2416/s3c2416.c
@@ -0,0 +1,130 @@
+/* linux/arch/arm/mach-s3c2416/s3c2416.c
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>,
+ * as part of OpenInkpot project
+ * Copyright (c) 2009 Promwad Innovation Company
+ * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com>
+ *
+ * Samsung S3C2416 Mobile CPU support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/sysdev.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/hardware.h>
+#include <asm/proc-fns.h>
+#include <asm/irq.h>
+
+#include <mach/reset.h>
+#include <mach/idle.h>
+#include <mach/regs-s3c2443-clock.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+#include <plat/s3c2416.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+#include <plat/iic-core.h>
+
+static struct map_desc s3c2416_iodesc[] __initdata = {
+ IODESC_ENT(WATCHDOG),
+ IODESC_ENT(CLKPWR),
+ IODESC_ENT(TIMER),
+};
+
+struct sysdev_class s3c2416_sysclass = {
+ .name = "s3c2416-core",
+};
+
+static struct sys_device s3c2416_sysdev = {
+ .cls = &s3c2416_sysclass,
+};
+
+static void s3c2416_hard_reset(void)
+{
+ __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST);
+}
+
+int __init s3c2416_init(void)
+{
+ printk(KERN_INFO "S3C2416: Initializing architecture\n");
+
+ s3c24xx_reset_hook = s3c2416_hard_reset;
+ /* s3c24xx_idle = s3c2416_idle; */
+
+ /* change WDT IRQ number */
+ s3c_device_wdt.resource[1].start = IRQ_S3C2443_WDT;
+ s3c_device_wdt.resource[1].end = IRQ_S3C2443_WDT;
+
+ /* the i2c devices are directly compatible with s3c2440 */
+ s3c_i2c0_setname("s3c2440-i2c");
+ s3c_i2c1_setname("s3c2440-i2c");
+
+ s3c_device_fb.name = "s3c2443-fb";
+
+ return sysdev_register(&s3c2416_sysdev);
+}
+
+void __init s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no)
+{
+ s3c24xx_init_uartdevs("s3c2440-uart", s3c2410_uart_resources, cfg, no);
+
+ s3c_device_nand.name = "s3c2416-nand";
+}
+
+/* s3c2416_map_io
+ *
+ * register the standard cpu IO areas, and any passed in from the
+ * machine specific initialisation.
+ */
+
+void __init s3c2416_map_io(void)
+{
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_updown;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_updown;
+
+ iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
+}
+
+/* need to register class before we actually register the device, and
+ * we also need to ensure that it has been initialised before any of the
+ * drivers even try to use it (even if not on an s3c2416 based system)
+ * as a driver which may support both 2443 and 2440 may try and use it.
+*/
+
+static int __init s3c2416_core_init(void)
+{
+ return sysdev_class_register(&s3c2416_sysclass);
+}
+
+core_initcall(s3c2416_core_init);
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index 7f465265cf04..cd8e7de388f0 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -6,6 +6,7 @@ config CPU_S3C2440
bool
depends on ARCH_S3C2410
select CPU_ARM920T
+ select S3C_GPIO_PULL_UP
select S3C2410_CLOCK
select S3C2410_PM if PM
select S3C2410_GPIO
@@ -187,4 +188,17 @@ config MACH_MINI2440
Say Y here to select support for the MINI2440. Is a 10cm x 10cm board
available via various sources. It can come with a 3.5" or 7" touch LCD.
+config MACH_RX1950
+ bool "HP iPAQ rx1950"
+ select CPU_S3C2442
+ select S3C24XX_DCLK
+ select PM_H1940 if PM
+ select I2C
+ select S3C2410_PWM
+ select S3C_DEV_NAND
+ select S3C2410_IOTIMING if S3C2440_CPUFREQ
+ select S3C2440_XTAL_16934400
+ help
+ Say Y here if you're using HP iPAQ rx1950
+
endmenu
diff --git a/arch/arm/mach-s3c2440/Makefile b/arch/arm/mach-s3c2440/Makefile
index c85ba32d8956..d5440fa34b04 100644
--- a/arch/arm/mach-s3c2440/Makefile
+++ b/arch/arm/mach-s3c2440/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_MACH_NEXCODER_2440) += mach-nexcoder.o
obj-$(CONFIG_MACH_AT2440EVB) += mach-at2440evb.o
obj-$(CONFIG_MACH_MINI2440) += mach-mini2440.o
obj-$(CONFIG_MACH_NEO1973_GTA02) += mach-gta02.o
+obj-$(CONFIG_MACH_RX1950) += mach-rx1950.o
# extra machine support
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 45799c608d8f..9e39faa283b9 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -49,7 +49,6 @@
#include <linux/io.h>
#include <linux/i2c.h>
-#include <linux/backlight.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/pcf50633/core.h>
@@ -57,6 +56,7 @@
#include <linux/mfd/pcf50633/adc.h>
#include <linux/mfd/pcf50633/gpio.h>
#include <linux/mfd/pcf50633/pmic.h>
+#include <linux/mfd/pcf50633/backlight.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -254,6 +254,12 @@ static char *gta02_batteries[] = {
"battery",
};
+static struct pcf50633_bl_platform_data gta02_backlight_data = {
+ .default_brightness = 0x3f,
+ .default_brightness_limit = 0,
+ .ramp_time = 5,
+};
+
struct pcf50633_platform_data gta02_pcf_pdata = {
.resumers = {
[0] = PCF50633_INT1_USBINS |
@@ -271,6 +277,8 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.charger_reference_current_ma = 1000,
+ .backlight_data = &gta02_backlight_data,
+
.reg_init_data = {
[PCF50633_REGULATOR_AUTO] = {
.constraints = {
@@ -478,71 +486,6 @@ static struct s3c2410_udc_mach_info gta02_udc_cfg = {
};
-
-
-static void gta02_bl_set_intensity(int intensity)
-{
- struct pcf50633 *pcf = gta02_pcf;
- int old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
-
- /* We map 8-bit intensity to 6-bit intensity in hardware. */
- intensity >>= 2;
-
- /*
- * This can happen during, eg, print of panic on blanked console,
- * but we can't service i2c without interrupts active, so abort.
- */
- if (in_atomic()) {
- printk(KERN_ERR "gta02_bl_set_intensity called while atomic\n");
- return;
- }
-
- old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
- if (intensity == old_intensity)
- return;
-
- /* We can't do this anywhere else. */
- pcf50633_reg_write(pcf, PCF50633_REG_LEDDIM, 5);
-
- if (!(pcf50633_reg_read(pcf, PCF50633_REG_LEDENA) & 3))
- old_intensity = 0;
-
- /*
- * The PCF50633 cannot handle LEDOUT = 0 (datasheet p60)
- * if seen, you have to re-enable the LED unit.
- */
- if (!intensity || !old_intensity)
- pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 0);
-
- /* Illegal to set LEDOUT to 0. */
- if (!intensity)
- pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, 2);
- else
- pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f,
- intensity);
-
- if (intensity)
- pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 2);
-
-}
-
-static struct generic_bl_info gta02_bl_info = {
- .name = "gta02-bl",
- .max_intensity = 0xff,
- .default_intensity = 0xff,
- .set_bl_intensity = gta02_bl_set_intensity,
-};
-
-static struct platform_device gta02_bl_dev = {
- .name = "generic-bl",
- .id = 1,
- .dev = {
- .platform_data = &gta02_bl_info,
- },
-};
-
-
-
/* USB */
static struct s3c2410_hcd_info gta02_usb_info __initdata = {
.port[0] = {
@@ -579,7 +522,6 @@ static struct platform_device *gta02_devices[] __initdata = {
/* These guys DO need to be children of PMU. */
static struct platform_device *gta02_devices_pmu_children[] = {
- &gta02_bl_dev,
};
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index 571b17683d96..a76bcda210ad 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -53,6 +53,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -102,10 +103,10 @@ static void mini2440_udc_pullup(enum s3c2410_udc_cmd_e cmd)
switch (cmd) {
case S3C2410_UDC_P_ENABLE :
- s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
+ gpio_set_value(S3C2410_GPC(5), 1);
break;
case S3C2410_UDC_P_DISABLE :
- s3c2410_gpio_setpin(S3C2410_GPC(5), 0);
+ gpio_set_value(S3C2410_GPC(5), 0);
break;
case S3C2410_UDC_P_RESET :
break;
@@ -632,25 +633,25 @@ static void __init mini2440_init(void)
mini2440_parse_features(&features, mini2440_features_str);
/* turn LCD on */
- s3c2410_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
+ s3c_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
/* Turn the backlight early on */
- s3c2410_gpio_setpin(S3C2410_GPG(4), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(4), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPG(4), "backlight"));
+ gpio_direction_output(S3C2410_GPG(4), 1);
/* remove pullup on optional PWM backlight -- unused on 3.5 and 7"s */
- s3c2410_gpio_pullup(S3C2410_GPB(1), 0);
+ s3c_gpio_setpull(S3C2410_GPB(1), S3C_GPIO_PULL_UP);
s3c2410_gpio_setpin(S3C2410_GPB(1), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT);
/* Make sure the D+ pullup pin is output */
- s3c2410_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
+ WARN_ON(gpio_request(S3C2410_GPC(5), "udc pup"));
+ gpio_direction_output(S3C2410_GPC(5), 0);
/* mark the key as input, without pullups (there is one on the board) */
for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) {
- s3c2410_gpio_pullup(mini2440_buttons[i].gpio, 0);
- s3c2410_gpio_cfgpin(mini2440_buttons[i].gpio,
- S3C2410_GPIO_INPUT);
+ s3c_gpio_setpull(mini2440_buttons[i].gpio, S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(mini2440_buttons[i].gpio, S3C2410_GPIO_INPUT);
}
if (features.lcd_index != -1) {
int li;
diff --git a/arch/arm/mach-s3c2440/mach-nexcoder.c b/arch/arm/mach-s3c2440/mach-nexcoder.c
index 342041593f22..3ff62de45fde 100644
--- a/arch/arm/mach-s3c2440/mach-nexcoder.c
+++ b/arch/arm/mach-s3c2440/mach-nexcoder.c
@@ -40,6 +40,7 @@
#include <plat/regs-serial.h>
#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
#include <plat/s3c2410.h>
#include <plat/s3c244x.h>
#include <plat/clock.h>
@@ -122,15 +123,15 @@ static void __init nexcoder_sensorboard_init(void)
{
// Initialize SCCB bus
s3c2410_gpio_setpin(S3C2410_GPE(14), 1); // IICSCL
- s3c2410_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPE(15), 1); // IICSDA
- s3c2410_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPIO_OUTPUT);
// Power up the sensor board
s3c2410_gpio_setpin(S3C2410_GPF(1), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPF(1), S3C2410_GPIO_OUTPUT); // CAM_GPIO7 => nLDO_PWRDN
+ s3c_gpio_cfgpin(S3C2410_GPF(1), S3C2410_GPIO_OUTPUT); // CAM_GPIO7 => nLDO_PWRDN
s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
- s3c2410_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT); // CAM_GPIO6 => CAM_PWRDN
+ s3c_gpio_cfgpin(S3C2410_GPF(2), S3C2410_GPIO_OUTPUT); // CAM_GPIO6 => CAM_PWRDN
}
static void __init nexcoder_map_io(void)
diff --git a/arch/arm/mach-s3c2440/mach-osiris.c b/arch/arm/mach-s3c2440/mach-osiris.c
index f35371db33f5..319458da71a0 100644
--- a/arch/arm/mach-s3c2440/mach-osiris.c
+++ b/arch/arm/mach-s3c2440/mach-osiris.c
@@ -49,6 +49,7 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
@@ -298,7 +299,7 @@ static int osiris_pm_suspend(struct sys_device *sd, pm_message_t state)
/* ensure that an nRESET is not generated on resume. */
s3c2410_gpio_setpin(S3C2410_GPA(21), 1);
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPIO_OUTPUT);
return 0;
}
@@ -310,7 +311,7 @@ static int osiris_pm_resume(struct sys_device *sd)
__raw_writeb(pm_osiris_ctrl0, OSIRIS_VA_CTRL0);
- s3c2410_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
+ s3c_gpio_cfgpin(S3C2410_GPA(21), S3C2410_GPA21_nRSTOUT);
return 0;
}
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
new file mode 100644
index 000000000000..8603b577a24b
--- /dev/null
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -0,0 +1,582 @@
+/* linux/arch/arm/mach-s3c2440/mach-rx1950.c
+ *
+ * Copyright (c) 2006-2009 Victor Chukhantsev, Denis Grigoriev,
+ * Copyright (c) 2007-2010 Vasily Khoruzhick
+ *
+ * based on smdk2440 written by Ben Dooks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/sysdev.h>
+#include <linux/pwm_backlight.h>
+#include <linux/pwm.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/mmc/host.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach-types.h>
+
+#include <mach/regs-gpio.h>
+#include <mach/regs-gpioj.h>
+#include <mach/h1940.h>
+#include <mach/fb.h>
+
+#include <plat/clock.h>
+#include <plat/regs-serial.h>
+#include <plat/regs-iic.h>
+#include <plat/mci.h>
+#include <plat/udc.h>
+#include <plat/nand.h>
+#include <plat/iic.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/irq.h>
+#include <plat/ts.h>
+
+#define LCD_PWM_PERIOD 192960
+#define LCD_PWM_DUTY 127353
+
+static struct map_desc rx1950_iodesc[] __initdata = {
+};
+
+static struct s3c24xx_uart_clksrc rx1950_serial_clocks[] = {
+ [0] = {
+ .name = "fclk",
+ .divisor = 0x0a,
+ .min_baud = 0,
+ .max_baud = 0,
+ },
+};
+
+static struct s3c2410_uartcfg rx1950_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = 0x3c5,
+ .ulcon = 0x03,
+ .ufcon = 0x51,
+ .clocks = rx1950_serial_clocks,
+ .clocks_size = ARRAY_SIZE(rx1950_serial_clocks),
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = 0x3c5,
+ .ulcon = 0x03,
+ .ufcon = 0x51,
+ .clocks = rx1950_serial_clocks,
+ .clocks_size = ARRAY_SIZE(rx1950_serial_clocks),
+ },
+ /* IR port */
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = 0x3c5,
+ .ulcon = 0x43,
+ .ufcon = 0xf1,
+ .clocks = rx1950_serial_clocks,
+ .clocks_size = ARRAY_SIZE(rx1950_serial_clocks),
+ },
+};
+
+static struct s3c2410fb_display rx1950_display = {
+ .type = S3C2410_LCDCON1_TFT,
+ .width = 240,
+ .height = 320,
+ .xres = 240,
+ .yres = 320,
+ .bpp = 16,
+
+ .pixclock = 260000,
+ .left_margin = 10,
+ .right_margin = 20,
+ .hsync_len = 10,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .vsync_len = 2,
+
+ .lcdcon5 = S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVCLK |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_HWSWP |
+ (0x02 << 13) |
+ (0x02 << 15),
+
+};
+
+static struct s3c2410fb_mach_info rx1950_lcd_cfg = {
+ .displays = &rx1950_display,
+ .num_displays = 1,
+ .default_display = 0,
+
+ .lpcsel = 0x02,
+ .gpccon = 0xaa9556a9,
+ .gpccon_mask = 0xffc003fc,
+ .gpcup = 0x0000ffff,
+ .gpcup_mask = 0xffffffff,
+
+ .gpdcon = 0xaa90aaa1,
+ .gpdcon_mask = 0xffc0fff0,
+ .gpdup = 0x0000fcfd,
+ .gpdup_mask = 0xffffffff,
+
+};
+
+static struct pwm_device *lcd_pwm;
+
+void rx1950_lcd_power(int enable)
+{
+ int i;
+ static int enabled;
+ if (enabled == enable)
+ return;
+ if (!enable) {
+
+ /* GPC11-GPC15->OUTPUT */
+ for (i = 11; i < 16; i++)
+ gpio_direction_output(S3C2410_GPC(i), 1);
+
+ /* Wait a bit here... */
+ mdelay(100);
+
+ /* GPD2-GPD7->OUTPUT */
+ /* GPD11-GPD15->OUTPUT */
+ /* GPD2-GPD7->1, GPD11-GPD15->1 */
+ for (i = 2; i < 8; i++)
+ gpio_direction_output(S3C2410_GPD(i), 1);
+ for (i = 11; i < 16; i++)
+ gpio_direction_output(S3C2410_GPD(i), 1);
+
+ /* Wait a bit here...*/
+ mdelay(100);
+
+ /* GPB0->OUTPUT, GPB0->0 */
+ gpio_direction_output(S3C2410_GPB(0), 0);
+
+ /* GPC1-GPC4->OUTPUT, GPC1-4->0 */
+ for (i = 1; i < 5; i++)
+ gpio_direction_output(S3C2410_GPC(i), 0);
+
+ /* GPC15-GPC11->0 */
+ for (i = 11; i < 16; i++)
+ gpio_direction_output(S3C2410_GPC(i), 0);
+
+ /* GPD15-GPD11->0, GPD2->GPD7->0 */
+ for (i = 11; i < 16; i++)
+ gpio_direction_output(S3C2410_GPD(i), 0);
+
+ for (i = 2; i < 8; i++)
+ gpio_direction_output(S3C2410_GPD(i), 0);
+
+ /* GPC6->0, GPC7->0, GPC5->0 */
+ gpio_direction_output(S3C2410_GPC(6), 0);
+ gpio_direction_output(S3C2410_GPC(7), 0);
+ gpio_direction_output(S3C2410_GPC(5), 0);
+
+ /* GPB1->OUTPUT, GPB1->0 */
+ gpio_direction_output(S3C2410_GPB(1), 0);
+ pwm_config(lcd_pwm, 0, LCD_PWM_PERIOD);
+ pwm_disable(lcd_pwm);
+
+ /* GPC0->0, GPC10->0 */
+ gpio_direction_output(S3C2410_GPC(0), 0);
+ gpio_direction_output(S3C2410_GPC(10), 0);
+ } else {
+ pwm_config(lcd_pwm, LCD_PWM_DUTY, LCD_PWM_PERIOD);
+ pwm_enable(lcd_pwm);
+
+ gpio_direction_output(S3C2410_GPC(0), 1);
+ gpio_direction_output(S3C2410_GPC(5), 1);
+
+ s3c_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPB1_TOUT1);
+ gpio_direction_output(S3C2410_GPC(7), 1);
+
+ for (i = 1; i < 5; i++)
+ s3c_gpio_cfgpin(S3C2410_GPC(i), S3C_GPIO_SFN(2));
+
+ for (i = 11; i < 16; i++)
+ s3c_gpio_cfgpin(S3C2410_GPC(i), S3C_GPIO_SFN(2));
+
+ for (i = 2; i < 8; i++)
+ s3c_gpio_cfgpin(S3C2410_GPD(i), S3C_GPIO_SFN(2));
+
+ for (i = 11; i < 16; i++)
+ s3c_gpio_cfgpin(S3C2410_GPD(i), S3C_GPIO_SFN(2));
+
+ gpio_direction_output(S3C2410_GPC(10), 1);
+ gpio_direction_output(S3C2410_GPC(6), 1);
+ }
+ enabled = enable;
+}
+
+static void rx1950_bl_power(int enable)
+{
+ static int enabled;
+ if (enabled == enable)
+ return;
+ if (!enable) {
+ gpio_direction_output(S3C2410_GPB(0), 0);
+ } else {
+ /* LED driver need a "push" to power on */
+ gpio_direction_output(S3C2410_GPB(0), 1);
+ /* Warm up backlight for one period of PWM.
+ * Without this trick its almost impossible to
+ * enable backlight with low brightness value
+ */
+ ndelay(48000);
+ s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0);
+ }
+ enabled = enable;
+}
+
+static int rx1950_backlight_init(struct device *dev)
+{
+ WARN_ON(gpio_request(S3C2410_GPB(0), "Backlight"));
+ lcd_pwm = pwm_request(1, "RX1950 LCD");
+ if (IS_ERR(lcd_pwm)) {
+ dev_err(dev, "Unable to request PWM for LCD power!\n");
+ return PTR_ERR(lcd_pwm);
+ }
+
+ rx1950_lcd_power(1);
+ rx1950_bl_power(1);
+
+ return 0;
+}
+
+static void rx1950_backlight_exit(struct device *dev)
+{
+ rx1950_bl_power(0);
+ rx1950_lcd_power(0);
+
+ pwm_free(lcd_pwm);
+ gpio_free(S3C2410_GPB(0));
+}
+
+
+static int rx1950_backlight_notify(struct device *dev, int brightness)
+{
+ if (!brightness) {
+ rx1950_bl_power(0);
+ rx1950_lcd_power(0);
+ } else {
+ rx1950_lcd_power(1);
+ rx1950_bl_power(1);
+ }
+ return brightness;
+}
+
+static struct platform_pwm_backlight_data rx1950_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = 24,
+ .dft_brightness = 4,
+ .pwm_period_ns = 48000,
+ .init = rx1950_backlight_init,
+ .notify = rx1950_backlight_notify,
+ .exit = rx1950_backlight_exit,
+};
+
+static struct platform_device rx1950_backlight = {
+ .name = "pwm-backlight",
+ .dev = {
+ .parent = &s3c_device_timer[0].dev,
+ .platform_data = &rx1950_backlight_data,
+ },
+};
+
+static void rx1950_set_mmc_power(unsigned char power_mode, unsigned short vdd)
+{
+ switch (power_mode) {
+ case MMC_POWER_OFF:
+ gpio_direction_output(S3C2410_GPJ(1), 0);
+ break;
+ case MMC_POWER_UP:
+ case MMC_POWER_ON:
+ gpio_direction_output(S3C2410_GPJ(1), 1);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct s3c24xx_mci_pdata rx1950_mmc_cfg __initdata = {
+ .gpio_detect = S3C2410_GPF(5),
+ .gpio_wprotect = S3C2410_GPH(8),
+ .set_power = rx1950_set_mmc_power,
+ .ocr_avail = MMC_VDD_32_33,
+};
+
+static struct mtd_partition rx1950_nand_part[] = {
+ [0] = {
+ .name = "Boot0",
+ .offset = 0,
+ .size = 0x4000,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ [1] = {
+ .name = "Boot1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x40000,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ [2] = {
+ .name = "Kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0x300000,
+ .mask_flags = 0,
+ },
+ [3] = {
+ .name = "Filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0,
+ },
+};
+
+static struct s3c2410_nand_set rx1950_nand_sets[] = {
+ [0] = {
+ .name = "Internal",
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(rx1950_nand_part),
+ .partitions = rx1950_nand_part,
+ },
+};
+
+static struct s3c2410_platform_nand rx1950_nand_info = {
+ .tacls = 25,
+ .twrph0 = 50,
+ .twrph1 = 15,
+ .nr_sets = ARRAY_SIZE(rx1950_nand_sets),
+ .sets = rx1950_nand_sets,
+};
+
+static void rx1950_udc_pullup(enum s3c2410_udc_cmd_e cmd)
+{
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE:
+ gpio_direction_output(S3C2410_GPJ(5), 1);
+ break;
+ case S3C2410_UDC_P_DISABLE:
+ gpio_direction_output(S3C2410_GPJ(5), 0);
+ break;
+ case S3C2410_UDC_P_RESET:
+ break;
+ default:
+ break;
+ }
+}
+
+static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = {
+ .udc_command = rx1950_udc_pullup,
+ .vbus_pin = S3C2410_GPG(5),
+ .vbus_pin_inverted = 1,
+};
+
+static struct s3c2410_ts_mach_info rx1950_ts_cfg __initdata = {
+ .delay = 10000,
+ .presc = 49,
+ .oversampling_shift = 3,
+};
+
+static struct gpio_keys_button rx1950_gpio_keys_table[] = {
+ {
+ .code = KEY_POWER,
+ .gpio = S3C2410_GPF(0),
+ .active_low = 1,
+ .desc = "Power button",
+ .wakeup = 1,
+ },
+ {
+ .code = KEY_F5,
+ .gpio = S3C2410_GPF(7),
+ .active_low = 1,
+ .desc = "Record button",
+ },
+ {
+ .code = KEY_F1,
+ .gpio = S3C2410_GPG(0),
+ .active_low = 1,
+ .desc = "Calendar button",
+ },
+ {
+ .code = KEY_F2,
+ .gpio = S3C2410_GPG(2),
+ .active_low = 1,
+ .desc = "Contacts button",
+ },
+ {
+ .code = KEY_F3,
+ .gpio = S3C2410_GPG(3),
+ .active_low = 1,
+ .desc = "Mail button",
+ },
+ {
+ .code = KEY_F4,
+ .gpio = S3C2410_GPG(7),
+ .active_low = 1,
+ .desc = "WLAN button",
+ },
+ {
+ .code = KEY_LEFT,
+ .gpio = S3C2410_GPG(10),
+ .active_low = 1,
+ .desc = "Left button",
+ },
+ {
+ .code = KEY_RIGHT,
+ .gpio = S3C2410_GPG(11),
+ .active_low = 1,
+ .desc = "Right button",
+ },
+ {
+ .code = KEY_UP,
+ .gpio = S3C2410_GPG(4),
+ .active_low = 1,
+ .desc = "Up button",
+ },
+ {
+ .code = KEY_DOWN,
+ .gpio = S3C2410_GPG(6),
+ .active_low = 1,
+ .desc = "Down button",
+ },
+ {
+ .code = KEY_ENTER,
+ .gpio = S3C2410_GPG(9),
+ .active_low = 1,
+ .desc = "Ok button"
+ },
+};
+
+static struct gpio_keys_platform_data rx1950_gpio_keys_data = {
+ .buttons = rx1950_gpio_keys_table,
+ .nbuttons = ARRAY_SIZE(rx1950_gpio_keys_table),
+};
+
+static struct platform_device rx1950_device_gpiokeys = {
+ .name = "gpio-keys",
+ .dev.platform_data = &rx1950_gpio_keys_data,
+};
+
+static struct s3c2410_platform_i2c rx1950_i2c_data = {
+ .flags = 0,
+ .slave_addr = 0x42,
+ .frequency = 400 * 1000,
+ .sda_delay = S3C2410_IICLC_SDA_DELAY5 | S3C2410_IICLC_FILTER_ON,
+};
+
+static struct platform_device *rx1950_devices[] __initdata = {
+ &s3c_device_lcd,
+ &s3c_device_wdt,
+ &s3c_device_i2c0,
+ &s3c_device_iis,
+ &s3c_device_usbgadget,
+ &s3c_device_rtc,
+ &s3c_device_nand,
+ &s3c_device_sdi,
+ &s3c_device_adc,
+ &s3c_device_ts,
+ &s3c_device_timer[0],
+ &s3c_device_timer[1],
+ &rx1950_backlight,
+ &rx1950_device_gpiokeys,
+};
+
+static struct clk *rx1950_clocks[] __initdata = {
+ &s3c24xx_clkout0,
+ &s3c24xx_clkout1,
+};
+
+static void __init rx1950_map_io(void)
+{
+ s3c24xx_clkout0.parent = &clk_h;
+ s3c24xx_clkout1.parent = &clk_f;
+
+ s3c24xx_register_clocks(rx1950_clocks, ARRAY_SIZE(rx1950_clocks));
+
+ s3c24xx_init_io(rx1950_iodesc, ARRAY_SIZE(rx1950_iodesc));
+ s3c24xx_init_clocks(16934000);
+ s3c24xx_init_uarts(rx1950_uartcfgs, ARRAY_SIZE(rx1950_uartcfgs));
+
+ /* setup PM */
+
+#ifdef CONFIG_PM_H1940
+ memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 8);
+#endif
+
+ s3c_pm_init();
+}
+
+static void __init rx1950_init_machine(void)
+{
+ int i;
+
+ s3c24xx_fb_set_platdata(&rx1950_lcd_cfg);
+ s3c24xx_udc_set_platdata(&rx1950_udc_cfg);
+ s3c24xx_ts_set_platdata(&rx1950_ts_cfg);
+ s3c24xx_mci_set_platdata(&rx1950_mmc_cfg);
+ s3c_i2c0_set_platdata(&rx1950_i2c_data);
+ s3c_nand_set_platdata(&rx1950_nand_info);
+
+ /* Turn off suspend on both USB ports, and switch the
+ * selectable USB port to USB device mode. */
+ s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST |
+ S3C2410_MISCCR_USBSUSPND0 |
+ S3C2410_MISCCR_USBSUSPND1, 0x0);
+
+ WARN_ON(gpio_request(S3C2410_GPJ(5), "UDC pullup"));
+ gpio_direction_output(S3C2410_GPJ(5), 0);
+
+ /* mmc power is disabled by default */
+ WARN_ON(gpio_request(S3C2410_GPJ(1), "MMC power"));
+ gpio_direction_output(S3C2410_GPJ(1), 0);
+
+ for (i = 0; i < 8; i++)
+ WARN_ON(gpio_request(S3C2410_GPC(i), "LCD power"));
+
+ for (i = 10; i < 16; i++)
+ WARN_ON(gpio_request(S3C2410_GPC(i), "LCD power"));
+
+ for (i = 2; i < 8; i++)
+ WARN_ON(gpio_request(S3C2410_GPD(i), "LCD power"));
+
+ for (i = 11; i < 16; i++)
+ WARN_ON(gpio_request(S3C2410_GPD(i), "LCD power"));
+
+ WARN_ON(gpio_request(S3C2410_GPB(1), "LCD power"));
+
+ platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices));
+}
+
+MACHINE_START(RX1950, "HP iPAQ RX1950")
+ /* Maintainers: Vasily Khoruzhick */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32) S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+ .map_io = rx1950_map_io,
+ .init_irq = s3c24xx_init_irq,
+ .init_machine = rx1950_init_machine,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c2440/mach-rx3715.c b/arch/arm/mach-s3c2440/mach-rx3715.c
index 1e836e506f8b..d2946de3f365 100644
--- a/arch/arm/mach-s3c2440/mach-rx3715.c
+++ b/arch/arm/mach-s3c2440/mach-rx3715.c
@@ -209,7 +209,7 @@ static void __init rx3715_init_machine(void)
}
MACHINE_START(RX3715, "IPAQ-RX3715")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
diff --git a/arch/arm/mach-s3c2440/mach-smdk2440.c b/arch/arm/mach-s3c2440/mach-smdk2440.c
index 3ac3d636d615..df83276d85ae 100644
--- a/arch/arm/mach-s3c2440/mach-smdk2440.c
+++ b/arch/arm/mach-s3c2440/mach-smdk2440.c
@@ -174,7 +174,7 @@ static void __init smdk2440_machine_init(void)
}
MACHINE_START(S3C2440, "SMDK2440")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index 2b68f7ea45ae..d50f3ae6173d 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/sysdev.h>
+#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -33,6 +34,10 @@
#include <plat/cpu.h>
#include <plat/s3c244x.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
static struct sys_device s3c2440_sysdev = {
.cls = &s3c2440_sysclass,
};
@@ -41,6 +46,9 @@ int __init s3c2440_init(void)
{
printk("S3C2440: Initialising architecture\n");
+ s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
+ s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
+
/* change irq for watchdog */
s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT;
diff --git a/arch/arm/mach-s3c2443/Kconfig b/arch/arm/mach-s3c2443/Kconfig
index 698140af247c..4fef723126fa 100644
--- a/arch/arm/mach-s3c2443/Kconfig
+++ b/arch/arm/mach-s3c2443/Kconfig
@@ -8,6 +8,7 @@ config CPU_S3C2443
select S3C2443_DMA if S3C2410_DMA
select CPU_LLSERIAL_S3C2440
select SAMSUNG_CLKSRC
+ select S3C2443_CLOCK
help
Support for the S3C2443 SoC from the S3C24XX line
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 62cd4eaee01b..0c3c0c884cd3 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -21,6 +21,7 @@
*/
#include <linux/init.h>
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -54,111 +55,13 @@
* set the correct muxing at initialisation
*/
-static int s3c2443_gate(void __iomem *reg, struct clk *clk, int enable)
-{
- u32 ctrlbit = clk->ctrlbit;
- u32 con = __raw_readl(reg);
-
- if (enable)
- con |= ctrlbit;
- else
- con &= ~ctrlbit;
-
- __raw_writel(con, reg);
- return 0;
-}
-
-static int s3c2443_clkcon_enable_h(struct clk *clk, int enable)
-{
- return s3c2443_gate(S3C2443_HCLKCON, clk, enable);
-}
-
-static int s3c2443_clkcon_enable_p(struct clk *clk, int enable)
-{
- return s3c2443_gate(S3C2443_PCLKCON, clk, enable);
-}
-
-static int s3c2443_clkcon_enable_s(struct clk *clk, int enable)
-{
- return s3c2443_gate(S3C2443_SCLKCON, clk, enable);
-}
-
/* clock selections */
-/* mpllref is a direct descendant of clk_xtal by default, but it is not
- * elided as the EPLL can be either sourced by the XTAL or EXTCLK and as
- * such directly equating the two source clocks is impossible.
- */
-static struct clk clk_mpllref = {
- .name = "mpllref",
- .parent = &clk_xtal,
- .id = -1,
-};
-
static struct clk clk_i2s_ext = {
.name = "i2s-ext",
.id = -1,
};
-static struct clk *clk_epllref_sources[] = {
- [0] = &clk_mpllref,
- [1] = &clk_mpllref,
- [2] = &clk_xtal,
- [3] = &clk_ext,
-};
-
-static struct clksrc_clk clk_epllref = {
- .clk = {
- .name = "epllref",
- .id = -1,
- },
- .sources = &(struct clksrc_sources) {
- .sources = clk_epllref_sources,
- .nr_sources = ARRAY_SIZE(clk_epllref_sources),
- },
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 7 },
-};
-
-static unsigned long s3c2443_getrate_mdivclk(struct clk *clk)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- unsigned long div = __raw_readl(S3C2443_CLKDIV0);
-
- div &= S3C2443_CLKDIV0_EXTDIV_MASK;
- div >>= (S3C2443_CLKDIV0_EXTDIV_SHIFT-1); /* x2 */
-
- return parent_rate / (div + 1);
-}
-
-static struct clk clk_mdivclk = {
- .name = "mdivclk",
- .parent = &clk_mpllref,
- .id = -1,
- .ops = &(struct clk_ops) {
- .get_rate = s3c2443_getrate_mdivclk,
- },
-};
-
-static struct clk *clk_msysclk_sources[] = {
- [0] = &clk_mpllref,
- [1] = &clk_mpll,
- [2] = &clk_mdivclk,
- [3] = &clk_mpllref,
-};
-
-static struct clksrc_clk clk_msysclk = {
- .clk = {
- .name = "msysclk",
- .parent = &clk_xtal,
- .id = -1,
- },
- .sources = &(struct clksrc_sources) {
- .sources = clk_msysclk_sources,
- .nr_sources = ARRAY_SIZE(clk_msysclk_sources),
- },
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 3 },
-};
-
/* armdiv
*
* this clock is sourced from msysclk and can have a number of
@@ -266,44 +169,6 @@ static struct clksrc_clk clk_arm = {
.reg_src = { .reg = S3C2443_CLKDIV0, .size = 1, .shift = 13 },
};
-/* esysclk
- *
- * this is sourced from either the EPLL or the EPLLref clock
-*/
-
-static struct clk *clk_sysclk_sources[] = {
- [0] = &clk_epllref.clk,
- [1] = &clk_epll,
-};
-
-static struct clksrc_clk clk_esysclk = {
- .clk = {
- .name = "esysclk",
- .parent = &clk_epll,
- .id = -1,
- },
- .sources = &(struct clksrc_sources) {
- .sources = clk_sysclk_sources,
- .nr_sources = ARRAY_SIZE(clk_sysclk_sources),
- },
- .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 6 },
-};
-
-/* uartclk
- *
- * UART baud-rate clock sourced from esysclk via a divisor
-*/
-
-static struct clksrc_clk clk_uart = {
- .clk = {
- .name = "uartclk",
- .id = -1,
- .parent = &clk_esysclk.clk,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
-};
-
-
/* hsspi
*
* high-speed spi clock, sourced from esysclk
@@ -320,21 +185,6 @@ static struct clksrc_clk clk_hsspi = {
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
};
-/* usbhost
- *
- * usb host bus-clock, usually 48MHz to provide USB bus clock timing
-*/
-
-static struct clksrc_clk clk_usb_bus_host = {
- .clk = {
- .name = "usb-bus-host-parent",
- .id = -1,
- .parent = &clk_esysclk.clk,
- .ctrlbit = S3C2443_SCLKCON_USBHOST,
- .enable = s3c2443_clkcon_enable_s,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
-};
/* clk_hsmcc_div
*
@@ -391,7 +241,7 @@ static struct clk clk_hsmmc = {
/* i2s_eplldiv
*
- * This clock is the output from the I2S divisor of ESYSCLK, and is seperate
+ * This clock is the output from the I2S divisor of ESYSCLK, and is separate
* from the mux that comes after it (cannot merge into one single clock)
*/
@@ -433,89 +283,16 @@ static struct clksrc_clk clk_i2s = {
.reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 14 },
};
-/* cam-if
- *
- * camera interface bus-clock, divided down from esysclk
-*/
-
-static struct clksrc_clk clk_cam = {
- .clk = {
- .name = "camif-upll", /* same as 2440 name */
- .id = -1,
- .parent = &clk_esysclk.clk,
- .ctrlbit = S3C2443_SCLKCON_CAMCLK,
- .enable = s3c2443_clkcon_enable_s,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 26 },
-};
-
-/* display-if
- *
- * display interface clock, divided from esysclk
-*/
-
-static struct clksrc_clk clk_display = {
- .clk = {
- .name = "display-if",
- .id = -1,
- .parent = &clk_esysclk.clk,
- .ctrlbit = S3C2443_SCLKCON_DISPCLK,
- .enable = s3c2443_clkcon_enable_s,
- },
- .reg_div = { .reg = S3C2443_CLKDIV1, .size = 8, .shift = 16 },
-};
-
-/* prediv
- *
- * this divides the msysclk down to pass to h/p/etc.
- */
-
-static unsigned long s3c2443_prediv_getrate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
-
- clkdiv0 &= S3C2443_CLKDIV0_PREDIV_MASK;
- clkdiv0 >>= S3C2443_CLKDIV0_PREDIV_SHIFT;
-
- return rate / (clkdiv0 + 1);
-}
-
-static struct clk clk_prediv = {
- .name = "prediv",
- .id = -1,
- .parent = &clk_msysclk.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s3c2443_prediv_getrate,
- },
-};
-
/* standard clock definitions */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
{
- .name = "nand",
- .id = -1,
- .parent = &clk_h,
- }, {
.name = "sdi",
.id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SDI,
}, {
- .name = "adc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_ADC,
- }, {
- .name = "i2c",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_IIC,
- }, {
.name = "iis",
.id = -1,
.parent = &clk_p,
@@ -537,179 +314,12 @@ static struct clk init_clocks_disable[] = {
};
static struct clk init_clocks[] = {
- {
- .name = "dma",
- .id = 0,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA0,
- }, {
- .name = "dma",
- .id = 1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA1,
- }, {
- .name = "dma",
- .id = 2,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA2,
- }, {
- .name = "dma",
- .id = 3,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA3,
- }, {
- .name = "dma",
- .id = 4,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA4,
- }, {
- .name = "dma",
- .id = 5,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_DMA5,
- }, {
- .name = "lcd",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_LCDC,
- }, {
- .name = "gpio",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_GPIO,
- }, {
- .name = "usb-host",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_USBH,
- }, {
- .name = "usb-device",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_USBD,
- }, {
- .name = "hsmmc",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_HSMMC,
- }, {
- .name = "cfc",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_CFC,
- }, {
- .name = "ssmc",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c2443_clkcon_enable_h,
- .ctrlbit = S3C2443_HCLKCON_SSMC,
- }, {
- .name = "timers",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_PWMT,
- }, {
- .name = "uart",
- .id = 0,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART0,
- }, {
- .name = "uart",
- .id = 1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART1,
- }, {
- .name = "uart",
- .id = 2,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART2,
- }, {
- .name = "uart",
- .id = 3,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_UART3,
- }, {
- .name = "rtc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_RTC,
- }, {
- .name = "watchdog",
- .id = -1,
- .parent = &clk_p,
- .ctrlbit = S3C2443_PCLKCON_WDT,
- }, {
- .name = "usb-bus-host",
- .id = -1,
- .parent = &clk_usb_bus_host.clk,
- }, {
- .name = "ac97",
- .id = -1,
- .parent = &clk_p,
- .ctrlbit = S3C2443_PCLKCON_AC97,
- }
-};
-
-/* clocks to add where we need to check their parentage */
-
-static struct clksrc_clk __initdata *init_list[] = {
- &clk_epllref, /* should be first */
- &clk_esysclk,
- &clk_msysclk,
- &clk_arm,
- &clk_i2s_eplldiv,
- &clk_i2s,
- &clk_cam,
- &clk_uart,
- &clk_display,
- &clk_hsmmc_div,
- &clk_usb_bus_host,
};
-static void __init s3c2443_clk_initparents(void)
-{
- int ptr;
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_list); ptr++)
- s3c_set_clksrc(init_list[ptr], true);
-}
-
-static inline unsigned long s3c2443_get_hdiv(unsigned long clkcon0)
-{
- clkcon0 &= S3C2443_CLKDIV0_HCLKDIV_MASK;
-
- return clkcon0 + 1;
-}
-
/* clocks to add straight away */
static struct clksrc_clk *clksrcs[] __initdata = {
- &clk_usb_bus_host,
- &clk_epllref,
- &clk_esysclk,
- &clk_msysclk,
&clk_arm,
- &clk_uart,
- &clk_display,
- &clk_cam,
&clk_i2s_eplldiv,
&clk_i2s,
&clk_hsspi,
@@ -717,92 +327,32 @@ static struct clksrc_clk *clksrcs[] __initdata = {
};
static struct clk *clks[] __initdata = {
- &clk_ext,
- &clk_epll,
- &clk_usb_bus,
- &clk_mpllref,
&clk_hsmmc,
&clk_armdiv,
- &clk_prediv,
};
void __init_or_cpufreq s3c2443_setup_clocks(void)
{
- unsigned long mpllcon = __raw_readl(S3C2443_MPLLCON);
- unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
- struct clk *xtal_clk;
- unsigned long xtal;
- unsigned long pll;
- unsigned long fclk;
- unsigned long hclk;
- unsigned long pclk;
-
- xtal_clk = clk_get(NULL, "xtal");
- xtal = clk_get_rate(xtal_clk);
- clk_put(xtal_clk);
-
- pll = s3c2443_get_mpll(mpllcon, xtal);
- clk_msysclk.clk.rate = pll;
-
- fclk = pll / s3c2443_fclk_div(clkdiv0);
- hclk = s3c2443_prediv_getrate(&clk_prediv);
- hclk /= s3c2443_get_hdiv(clkdiv0);
- pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1);
-
- s3c24xx_setup_clocks(fclk, hclk, pclk);
-
- printk("S3C2443: mpll %s %ld.%03ld MHz, cpu %ld.%03ld MHz, mem %ld.%03ld MHz, pclk %ld.%03ld MHz\n",
- (mpllcon & S3C2443_PLLCON_OFF) ? "off":"on",
- print_mhz(pll), print_mhz(fclk),
- print_mhz(hclk), print_mhz(pclk));
-
- s3c24xx_setup_clocks(fclk, hclk, pclk);
+ s3c2443_common_setup_clocks(s3c2443_get_mpll, s3c2443_fclk_div);
}
void __init s3c2443_init_clocks(int xtal)
{
- struct clk *clkp;
unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
- int ret;
int ptr;
- /* s3c2443 parents h and p clocks from prediv */
- clk_h.parent = &clk_prediv;
- clk_p.parent = &clk_prediv;
+ clk_epll.rate = s3c2443_get_epll(epllcon, xtal);
+ clk_epll.parent = &clk_epllref.clk;
+
+ s3c2443_common_init_clocks(xtal, s3c2443_get_mpll, s3c2443_fclk_div);
- s3c24xx_register_baseclocks(xtal);
s3c2443_setup_clocks();
- s3c2443_clk_initparents();
-
- for (ptr = 0; ptr < ARRAY_SIZE(clks); ptr++) {
- clkp = clks[ptr];
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
- }
+ s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_register_clksrc(clksrcs[ptr], 1);
- clk_epll.rate = s3c2443_get_epll(epllcon, xtal);
- clk_epll.parent = &clk_epllref.clk;
- clk_usb_bus.parent = &clk_usb_bus_host.clk;
-
- /* ensure usb bus clock is within correct rate of 48MHz */
-
- if (clk_get_rate(&clk_usb_bus_host.clk) != (48 * 1000 * 1000)) {
- printk(KERN_INFO "Warning: USB host bus not at 48MHz\n");
- clk_set_rate(&clk_usb_bus_host.clk, 48*1000*1000);
- }
-
- printk("S3C2443: epll %s %ld.%03ld MHz, usb-bus %ld.%03ld MHz\n",
- (epllcon & S3C2443_PLLCON_OFF) ? "off":"on",
- print_mhz(clk_get_rate(&clk_epll)),
- print_mhz(clk_get_rate(&clk_usb_bus)));
-
/* register clocks from clock array */
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
@@ -819,17 +369,8 @@ void __init s3c2443_init_clocks(int xtal)
/* install (and disable) the clocks we do not need immediately */
- clkp = init_clocks_disable;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
-
- (clkp->enable)(clkp, 0);
- }
+ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s3c2443/mach-smdk2443.c b/arch/arm/mach-s3c2443/mach-smdk2443.c
index e2e362bda9b7..4c863d3a52f4 100644
--- a/arch/arm/mach-s3c2443/mach-smdk2443.c
+++ b/arch/arm/mach-s3c2443/mach-smdk2443.c
@@ -131,7 +131,7 @@ static void __init smdk2443_machine_init(void)
}
MACHINE_START(SMDK2443, "SMDK2443")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 959df3840de5..f5a59727949f 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -7,6 +7,7 @@
config PLAT_S3C64XX
bool
depends on ARCH_S3C64XX
+ select SAMSUNG_WAKEMASK
default y
help
Base platform code for any Samsung S3C64XX device
@@ -35,6 +36,11 @@ config S3C64XX_SETUP_SDHCI
Internal configuration for default SDHCI setup for S3C6400 and
S3C6410 SoCs.
+config S3C64XX_DEV_ONENAND1
+ bool
+ help
+ Compile in platform device definition for OneNAND1 controller
+
# platform specific device setup
config S3C64XX_SETUP_I2C0
@@ -85,12 +91,16 @@ config MACH_ANW6410
config MACH_SMDK6410
bool "SMDK6410"
select CPU_S3C6410
+ select SAMSUNG_DEV_ADC
select S3C_DEV_HSMMC
select S3C_DEV_HSMMC1
select S3C_DEV_I2C1
select S3C_DEV_FB
+ select SAMSUNG_DEV_TS
select S3C_DEV_USB_HOST
select S3C_DEV_USB_HSOTG
+ select S3C_DEV_WDT
+ select HAVE_S3C2410_WATCHDOG
select S3C64XX_SETUP_SDHCI
select S3C64XX_SETUP_I2C1
select S3C64XX_SETUP_FB_24BPP
@@ -178,3 +188,34 @@ config MACH_HMT
select HAVE_PWM
help
Machine support for the Airgoo HMT
+
+config MACH_SMARTQ
+ bool
+ select CPU_S3C6410
+ select S3C_DEV_HSMMC
+ select S3C_DEV_HSMMC1
+ select S3C_DEV_HSMMC2
+ select S3C_DEV_FB
+ select S3C_DEV_HWMON
+ select S3C_DEV_RTC
+ select S3C_DEV_USB_HSOTG
+ select S3C_DEV_USB_HOST
+ select S3C64XX_SETUP_SDHCI
+ select S3C64XX_SETUP_FB_24BPP
+ select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_TS
+ select HAVE_PWM
+ help
+ Shared machine support for SmartQ 5/7
+
+config MACH_SMARTQ5
+ bool "SmartQ 5"
+ select MACH_SMARTQ
+ help
+ Machine support for the SmartQ 5
+
+config MACH_SMARTQ7
+ bool "SmartQ 7"
+ select MACH_SMARTQ
+ help
+ Machine support for the SmartQ 7
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index 3758e15086be..9d1006938f5c 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -52,11 +52,14 @@ obj-$(CONFIG_MACH_SMDK6400) += mach-smdk6400.o
obj-$(CONFIG_MACH_SMDK6410) += mach-smdk6410.o
obj-$(CONFIG_MACH_NCP) += mach-ncp.o
obj-$(CONFIG_MACH_HMT) += mach-hmt.o
+obj-$(CONFIG_MACH_SMARTQ) += mach-smartq.o
+obj-$(CONFIG_MACH_SMARTQ5) += mach-smartq5.o
+obj-$(CONFIG_MACH_SMARTQ7) += mach-smartq7.o
# device support
obj-y += dev-uart.o
-obj-y += dev-rtc.o
obj-y += dev-audio.o
-obj-$(CONFIG_S3C_ADC) += dev-adc.o
obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
+obj-$(CONFIG_S3C64XX_DEV_TS) += dev-ts.o
+obj-$(CONFIG_S3C64XX_DEV_ONENAND1) += dev-onenand1.o
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 2ac2e7d73e53..fbd85a9b7bbf 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -88,6 +88,12 @@ struct clk clk_48m = {
.enable = clk_48m_ctrl,
};
+struct clk clk_xusbxti = {
+ .name = "xusbxti",
+ .id = -1,
+ .rate = 48000000,
+};
+
static int inline s3c64xx_gate(void __iomem *reg,
struct clk *clk,
int enable)
@@ -253,6 +259,12 @@ static struct clk init_clocks[] = {
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_HSMMC2,
}, {
+ .name = "otg",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c64xx_hclk_ctrl,
+ .ctrlbit = S3C_CLKCON_HCLK_USB,
+ }, {
.name = "timers",
.id = -1,
.parent = &clk_p,
@@ -518,6 +530,11 @@ static struct clk clk_iis_cd1 = {
.id = -1,
};
+static struct clk clk_iisv4_cd = {
+ .name = "iis_cdclk_v4",
+ .id = -1,
+};
+
static struct clk clk_pcm_cd = {
.name = "pcm_cdclk",
.id = -1,
@@ -549,6 +566,19 @@ static struct clksrc_sources clkset_audio1 = {
.nr_sources = ARRAY_SIZE(clkset_audio1_list),
};
+static struct clk *clkset_audio2_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_dout_mpll,
+ [2] = &clk_fin_epll,
+ [3] = &clk_iisv4_cd,
+ [4] = &clk_pcm_cd,
+};
+
+static struct clksrc_sources clkset_audio2 = {
+ .sources = clkset_audio2_list,
+ .nr_sources = ARRAY_SIZE(clkset_audio2_list),
+};
+
static struct clk *clkset_camif_list[] = {
&clk_h2,
};
@@ -652,6 +682,16 @@ static struct clksrc_clk clksrcs[] = {
.sources = &clkset_audio1,
}, {
.clk = {
+ .name = "audio-bus",
+ .id = -1, /* There's only one IISv4 port */
+ .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C6410_CLK_SRC2, .shift = 0, .size = 3 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 24, .size = 4 },
+ .sources = &clkset_audio2,
+ }, {
+ .clk = {
.name = "irda-bus",
.id = 0,
.ctrlbit = S3C_CLKCON_SCLK_IRDA,
@@ -749,6 +789,7 @@ static struct clk *clks1[] __initdata = {
&clk_ext_xtal_mux,
&clk_iis_cd0,
&clk_iis_cd1,
+ &clk_iisv4_cd,
&clk_pcm_cd,
&clk_mout_epll.clk,
&clk_mout_mpll.clk,
@@ -762,6 +803,7 @@ static struct clk *clks[] __initdata = {
&clk_27m,
&clk_48m,
&clk_h2,
+ &clk_xusbxti,
};
/**
diff --git a/arch/arm/mach-s3c64xx/dev-onenand1.c b/arch/arm/mach-s3c64xx/dev-onenand1.c
new file mode 100644
index 000000000000..92ffd5bac104
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/dev-onenand1.c
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/arm/mach-s3c64xx/dev-onenand1.c
+ *
+ * Copyright (c) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * S3C64XX series device definition for OneNAND devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+static struct resource s3c64xx_onenand1_resources[] = {
+ [0] = {
+ .start = S3C64XX_PA_ONENAND1,
+ .end = S3C64XX_PA_ONENAND1 + 0x400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = S3C64XX_PA_ONENAND1_BUF,
+ .end = S3C64XX_PA_ONENAND1_BUF + S3C64XX_SZ_ONENAND1_BUF - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = IRQ_ONENAND1,
+ .end = IRQ_ONENAND1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c64xx_device_onenand1 = {
+ .name = "samsung-onenand",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s3c64xx_onenand1_resources),
+ .resource = s3c64xx_onenand1_resources,
+};
+
+void s3c64xx_onenand1_set_platdata(struct onenand_platform_data *pdata)
+{
+ struct onenand_platform_data *pd;
+
+ pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
+ if (!pd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ s3c64xx_device_onenand1.dev.platform_data = pd;
+}
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 33ccf7bf766a..5567e037b0d1 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -414,7 +414,7 @@ err_buff:
EXPORT_SYMBOL(s3c2410_dma_enqueue);
-int s3c2410_dma_devconfig(int channel,
+int s3c2410_dma_devconfig(unsigned int channel,
enum s3c2410_dmasrc source,
unsigned long devaddr)
{
diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c
index 66e6794481d2..60c929a3cab6 100644
--- a/arch/arm/mach-s3c64xx/gpiolib.c
+++ b/arch/arm/mach-s3c64xx/gpiolib.c
@@ -51,6 +51,7 @@
static struct s3c_gpio_cfg gpio_4bit_cfg_noint = {
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .get_config = s3c_gpio_getcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
@@ -58,12 +59,14 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_noint = {
static struct s3c_gpio_cfg gpio_4bit_cfg_eint0111 = {
.cfg_eint = 7,
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .get_config = s3c_gpio_getcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
.cfg_eint = 3,
+ .get_config = s3c_gpio_getcfg_s3c64xx_4bit,
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
@@ -171,6 +174,7 @@ static struct s3c_gpio_chip gpio_4bit2[] = {
static struct s3c_gpio_cfg gpio_2bit_cfg_noint = {
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
@@ -178,6 +182,7 @@ static struct s3c_gpio_cfg gpio_2bit_cfg_noint = {
static struct s3c_gpio_cfg gpio_2bit_cfg_eint10 = {
.cfg_eint = 2,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
@@ -185,6 +190,7 @@ static struct s3c_gpio_cfg gpio_2bit_cfg_eint10 = {
static struct s3c_gpio_cfg gpio_2bit_cfg_eint11 = {
.cfg_eint = 3,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
.set_pull = s3c_gpio_setpull_updown,
.get_pull = s3c_gpio_getpull_updown,
};
diff --git a/arch/arm/mach-s3c64xx/include/mach/irqs.h b/arch/arm/mach-s3c64xx/include/mach/irqs.h
index e9ab4ac0b9a8..8e2df26cf14a 100644
--- a/arch/arm/mach-s3c64xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c64xx/include/mach/irqs.h
@@ -212,5 +212,9 @@
#define NR_IRQS (IRQ_BOARD_END + 1)
+/* Compatibility */
+
+#define IRQ_ONENAND IRQ_ONENAND0
+
#endif /* __ASM_MACH_S3C64XX_IRQS_H */
diff --git a/arch/arm/mach-s3c64xx/include/mach/map.h b/arch/arm/mach-s3c64xx/include/mach/map.h
index 801c1c0f3a95..e1eab3c94aea 100644
--- a/arch/arm/mach-s3c64xx/include/mach/map.h
+++ b/arch/arm/mach-s3c64xx/include/mach/map.h
@@ -52,6 +52,16 @@
#define S3C64XX_PA_SROM (0x70000000)
+#define S3C64XX_PA_ONENAND0 (0x70100000)
+#define S3C64XX_PA_ONENAND0_BUF (0x20000000)
+#define S3C64XX_SZ_ONENAND0_BUF (SZ_64M)
+
+/* NAND and OneNAND1 controllers occupy the same register region
+ (depending on SoC POP version) */
+#define S3C64XX_PA_ONENAND1 (0x70200000)
+#define S3C64XX_PA_ONENAND1_BUF (0x28000000)
+#define S3C64XX_SZ_ONENAND1_BUF (SZ_64M)
+
#define S3C64XX_PA_NAND (0x70200000)
#define S3C64XX_PA_FB (0x77100000)
#define S3C64XX_PA_USB_HSOTG (0x7C000000)
@@ -99,9 +109,16 @@
#define S3C_PA_IIC S3C64XX_PA_IIC0
#define S3C_PA_IIC1 S3C64XX_PA_IIC1
#define S3C_PA_NAND S3C64XX_PA_NAND
+#define S3C_PA_ONENAND S3C64XX_PA_ONENAND0
+#define S3C_PA_ONENAND_BUF S3C64XX_PA_ONENAND0_BUF
+#define S3C_SZ_ONENAND_BUF S3C64XX_SZ_ONENAND0_BUF
#define S3C_PA_FB S3C64XX_PA_FB
#define S3C_PA_USBHOST S3C64XX_PA_USBHOST
#define S3C_PA_USB_HSOTG S3C64XX_PA_USB_HSOTG
#define S3C_VA_USB_HSPHY S3C64XX_VA_USB_HSPHY
+#define S3C_PA_RTC S3C64XX_PA_RTC
+#define S3C_PA_WDT S3C64XX_PA_WATCHDOG
+
+#define SAMSUNG_PA_ADC S3C64XX_PA_ADC
#endif /* __ASM_ARCH_6400_MAP_H */
diff --git a/arch/arm/mach-s3c64xx/include/mach/pll.h b/arch/arm/mach-s3c64xx/include/mach/pll.h
index 90bbd72fdc4e..5ef0bb698ee0 100644
--- a/arch/arm/mach-s3c64xx/include/mach/pll.h
+++ b/arch/arm/mach-s3c64xx/include/mach/pll.h
@@ -20,6 +20,7 @@
#define S3C6400_PLL_SDIV_SHIFT (0)
#include <asm/div64.h>
+#include <plat/pll6553x.h>
static inline unsigned long s3c6400_get_pll(unsigned long baseclk,
u32 pllcon)
@@ -37,38 +38,8 @@ static inline unsigned long s3c6400_get_pll(unsigned long baseclk,
return (unsigned long)fvco;
}
-#define S3C6400_EPLL_MDIV_MASK ((1 << (23-16)) - 1)
-#define S3C6400_EPLL_PDIV_MASK ((1 << (13-8)) - 1)
-#define S3C6400_EPLL_SDIV_MASK ((1 << (2-0)) - 1)
-#define S3C6400_EPLL_MDIV_SHIFT (16)
-#define S3C6400_EPLL_PDIV_SHIFT (8)
-#define S3C6400_EPLL_SDIV_SHIFT (0)
-#define S3C6400_EPLL_KDIV_MASK (0xffff)
-
static inline unsigned long s3c6400_get_epll(unsigned long baseclk)
{
- unsigned long result;
- u32 epll0 = __raw_readl(S3C_EPLL_CON0);
- u32 epll1 = __raw_readl(S3C_EPLL_CON1);
- u32 mdiv, pdiv, sdiv, kdiv;
- u64 tmp;
-
- mdiv = (epll0 >> S3C6400_EPLL_MDIV_SHIFT) & S3C6400_EPLL_MDIV_MASK;
- pdiv = (epll0 >> S3C6400_EPLL_PDIV_SHIFT) & S3C6400_EPLL_PDIV_MASK;
- sdiv = (epll0 >> S3C6400_EPLL_SDIV_SHIFT) & S3C6400_EPLL_SDIV_MASK;
- kdiv = epll1 & S3C6400_EPLL_KDIV_MASK;
-
- /* We need to multiple baseclk by mdiv (the integer part) and kdiv
- * which is in 2^16ths, so shift mdiv up (does not overflow) and
- * add kdiv before multiplying. The use of tmp is to avoid any
- * overflows before shifting bac down into result when multipling
- * by the mdiv and kdiv pair.
- */
-
- tmp = baseclk;
- tmp *= (mdiv << 16) + kdiv;
- do_div(tmp, (pdiv << sdiv));
- result = tmp >> 16;
-
- return result;
+ return s3c_get_pll6553x(baseclk, __raw_readl(S3C_EPLL_CON0),
+ __raw_readl(S3C_EPLL_CON1));
}
diff --git a/arch/arm/mach-s3c64xx/include/mach/regs-clock.h b/arch/arm/mach-s3c64xx/include/mach/regs-clock.h
index 3ef62741e5d1..0114eb0c1fe7 100644
--- a/arch/arm/mach-s3c64xx/include/mach/regs-clock.h
+++ b/arch/arm/mach-s3c64xx/include/mach/regs-clock.h
@@ -33,6 +33,7 @@
#define S3C_PCLK_GATE S3C_CLKREG(0x34)
#define S3C_SCLK_GATE S3C_CLKREG(0x38)
#define S3C_MEM0_GATE S3C_CLKREG(0x3C)
+#define S3C6410_CLK_SRC2 S3C_CLKREG(0x10C)
/* CLKDIV0 */
#define S3C6400_CLKDIV0_PCLK_MASK (0xf << 12)
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
new file mode 100644
index 000000000000..028d080dcd35
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -0,0 +1,363 @@
+/*
+ * linux/arch/arm/mach-s3c64xx/mach-smartq.c
+ *
+ * Copyright (C) 2010 Maurus Cuelenaere
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/pwm_backlight.h>
+#include <linux/serial_core.h>
+#include <linux/usb/gpio_vbus.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/map.h>
+
+#include <mach/map.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-modem.h>
+
+#include <plat/clock.h>
+#include <plat/cpu.h>
+#include <plat/devs.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+#include <plat/hwmon.h>
+#include <plat/regs-serial.h>
+#include <plat/udc-hs.h>
+#include <plat/usb-control.h>
+#include <plat/sdhci.h>
+#include <plat/ts.h>
+
+#include <video/platform_lcd.h>
+
+#define UCON S3C2410_UCON_DEFAULT
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
+
+static struct s3c2410_uartcfg smartq_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+static void smartq_usb_host_powercontrol(int port, int to)
+{
+ pr_debug("%s(%d, %d)\n", __func__, port, to);
+
+ if (port == 0) {
+ gpio_set_value(S3C64XX_GPL(0), to);
+ gpio_set_value(S3C64XX_GPL(1), to);
+ }
+}
+
+static irqreturn_t smartq_usb_host_ocirq(int irq, void *pw)
+{
+ struct s3c2410_hcd_info *info = pw;
+
+ if (gpio_get_value(S3C64XX_GPL(10)) == 0) {
+ pr_debug("%s: over-current irq (oc detected)\n", __func__);
+ s3c2410_usb_report_oc(info, 3);
+ } else {
+ pr_debug("%s: over-current irq (oc cleared)\n", __func__);
+ s3c2410_usb_report_oc(info, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void smartq_usb_host_enableoc(struct s3c2410_hcd_info *info, int on)
+{
+ int ret;
+
+ /* This isn't present on a SmartQ 5 board */
+ if (machine_is_smartq5())
+ return;
+
+ if (on) {
+ ret = request_irq(gpio_to_irq(S3C64XX_GPL(10)),
+ smartq_usb_host_ocirq, IRQF_DISABLED |
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "USB host overcurrent", info);
+ if (ret != 0)
+ pr_err("failed to request usb oc irq: %d\n", ret);
+ } else {
+ free_irq(gpio_to_irq(S3C64XX_GPL(10)), info);
+ }
+}
+
+static struct s3c2410_hcd_info smartq_usb_host_info = {
+ .port[0] = {
+ .flags = S3C_HCDFLG_USED
+ },
+ .port[1] = {
+ .flags = 0
+ },
+
+ .power_control = smartq_usb_host_powercontrol,
+ .enable_oc = smartq_usb_host_enableoc,
+};
+
+static struct gpio_vbus_mach_info smartq_usb_otg_vbus_pdata = {
+ .gpio_vbus = S3C64XX_GPL(9),
+ .gpio_pullup = -1,
+ .gpio_vbus_inverted = true,
+};
+
+static struct platform_device smartq_usb_otg_vbus_dev = {
+ .name = "gpio-vbus",
+ .dev.platform_data = &smartq_usb_otg_vbus_pdata,
+};
+
+static int __init smartq_bl_init(struct device *dev)
+{
+ s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2));
+
+ return 0;
+}
+
+static struct platform_pwm_backlight_data smartq_backlight_data = {
+ .pwm_id = 1,
+ .max_brightness = 1000,
+ .dft_brightness = 600,
+ .pwm_period_ns = 1000000000 / (1000 * 20),
+ .init = smartq_bl_init,
+};
+
+static struct platform_device smartq_backlight_device = {
+ .name = "pwm-backlight",
+ .dev = {
+ .parent = &s3c_device_timer[1].dev,
+ .platform_data = &smartq_backlight_data,
+ },
+};
+
+static struct s3c2410_ts_mach_info smartq_touchscreen_pdata __initdata = {
+ .delay = 65535,
+ .presc = 99,
+ .oversampling_shift = 4,
+};
+
+static struct s3c_sdhci_platdata smartq_internal_hsmmc_pdata = {
+ .max_width = 4,
+ /*.broken_card_detection = true,*/
+};
+
+static struct s3c_hwmon_pdata smartq_hwmon_pdata __initdata = {
+ /* Battery voltage (?-4.2V) */
+ .in[0] = &(struct s3c_hwmon_chcfg) {
+ .name = "smartq:battery-voltage",
+ .mult = 3300,
+ .div = 2048,
+ },
+ /* Reference voltage (1.2V) */
+ .in[1] = &(struct s3c_hwmon_chcfg) {
+ .name = "smartq:reference-voltage",
+ .mult = 3300,
+ .div = 4096,
+ },
+};
+
+static void smartq_lcd_power_set(struct plat_lcd_data *pd, unsigned int power)
+{
+ gpio_direction_output(S3C64XX_GPM(3), power);
+}
+
+static struct plat_lcd_data smartq_lcd_power_data = {
+ .set_power = smartq_lcd_power_set,
+};
+
+static struct platform_device smartq_lcd_power_device = {
+ .name = "platform-lcd",
+ .dev.parent = &s3c_device_fb.dev,
+ .dev.platform_data = &smartq_lcd_power_data,
+};
+
+
+static struct platform_device *smartq_devices[] __initdata = {
+ &s3c_device_hsmmc1, /* Init iNAND first, ... */
+ &s3c_device_hsmmc0, /* ... then the external SD card */
+ &s3c_device_hsmmc2,
+ &s3c_device_adc,
+ &s3c_device_fb,
+ &s3c_device_hwmon,
+ &s3c_device_i2c0,
+ &s3c_device_ohci,
+ &s3c_device_rtc,
+ &s3c_device_timer[1],
+ &s3c_device_ts,
+ &s3c_device_usb_hsotg,
+ &smartq_backlight_device,
+ &smartq_lcd_power_device,
+ &smartq_usb_otg_vbus_dev,
+};
+
+static void __init smartq_lcd_mode_set(void)
+{
+ u32 tmp;
+
+ /* set the LCD type */
+ tmp = __raw_readl(S3C64XX_SPCON);
+ tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK;
+ tmp |= S3C64XX_SPCON_LCD_SEL_RGB;
+ __raw_writel(tmp, S3C64XX_SPCON);
+
+ /* remove the LCD bypass */
+ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON);
+ tmp &= ~MIFPCON_LCD_BYPASS;
+ __raw_writel(tmp, S3C64XX_MODEM_MIFPCON);
+}
+
+static void smartq_power_off(void)
+{
+ gpio_direction_output(S3C64XX_GPK(15), 1);
+}
+
+static int __init smartq_power_off_init(void)
+{
+ int ret;
+
+ ret = gpio_request(S3C64XX_GPK(15), "Power control");
+ if (ret < 0) {
+ pr_err("%s: failed to get GPK15\n", __func__);
+ return ret;
+ }
+
+ /* leave power on */
+ gpio_direction_output(S3C64XX_GPK(15), 0);
+
+
+ pm_power_off = smartq_power_off;
+
+ return ret;
+}
+
+static int __init smartq_usb_host_init(void)
+{
+ int ret;
+
+ ret = gpio_request(S3C64XX_GPL(0), "USB power control");
+ if (ret < 0) {
+ pr_err("%s: failed to get GPL0\n", __func__);
+ return ret;
+ }
+
+ ret = gpio_request(S3C64XX_GPL(1), "USB host power control");
+ if (ret < 0) {
+ pr_err("%s: failed to get GPL1\n", __func__);
+ goto err;
+ }
+
+ if (!machine_is_smartq5()) {
+ /* This isn't present on a SmartQ 5 board */
+ ret = gpio_request(S3C64XX_GPL(10), "USB host overcurrent");
+ if (ret < 0) {
+ pr_err("%s: failed to get GPL10\n", __func__);
+ goto err2;
+ }
+ }
+
+ /* turn power off */
+ gpio_direction_output(S3C64XX_GPL(0), 0);
+ gpio_direction_output(S3C64XX_GPL(1), 0);
+ if (!machine_is_smartq5())
+ gpio_direction_input(S3C64XX_GPL(10));
+
+ s3c_device_ohci.dev.platform_data = &smartq_usb_host_info;
+
+ return 0;
+
+err2:
+ gpio_free(S3C64XX_GPL(1));
+err:
+ gpio_free(S3C64XX_GPL(0));
+ return ret;
+}
+
+static int __init smartq_usb_otg_init(void)
+{
+ clk_xusbxti.rate = 12000000;
+
+ return 0;
+}
+
+static int __init smartq_wifi_init(void)
+{
+ int ret;
+
+ ret = gpio_request(S3C64XX_GPK(1), "wifi control");
+ if (ret < 0) {
+ pr_err("%s: failed to get GPK1\n", __func__);
+ return ret;
+ }
+
+ ret = gpio_request(S3C64XX_GPK(2), "wifi reset");
+ if (ret < 0) {
+ pr_err("%s: failed to get GPK2\n", __func__);
+ gpio_free(S3C64XX_GPK(1));
+ return ret;
+ }
+
+ /* turn power on */
+ gpio_direction_output(S3C64XX_GPK(1), 1);
+
+ /* reset device */
+ gpio_direction_output(S3C64XX_GPK(2), 0);
+ mdelay(100);
+ gpio_set_value(S3C64XX_GPK(2), 1);
+ gpio_direction_input(S3C64XX_GPK(2));
+
+ return 0;
+}
+
+static struct map_desc smartq_iodesc[] __initdata = {};
+void __init smartq_map_io(void)
+{
+ s3c64xx_init_io(smartq_iodesc, ARRAY_SIZE(smartq_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(smartq_uartcfgs, ARRAY_SIZE(smartq_uartcfgs));
+
+ smartq_lcd_mode_set();
+}
+
+void __init smartq_machine_init(void)
+{
+ s3c_i2c0_set_platdata(NULL);
+ s3c_hwmon_set_platdata(&smartq_hwmon_pdata);
+ s3c_sdhci1_set_platdata(&smartq_internal_hsmmc_pdata);
+ s3c_sdhci2_set_platdata(&smartq_internal_hsmmc_pdata);
+ s3c24xx_ts_set_platdata(&smartq_touchscreen_pdata);
+
+ WARN_ON(smartq_power_off_init());
+ WARN_ON(smartq_usb_host_init());
+ WARN_ON(smartq_usb_otg_init());
+ WARN_ON(smartq_wifi_init());
+
+ platform_add_devices(smartq_devices, ARRAY_SIZE(smartq_devices));
+}
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.h b/arch/arm/mach-s3c64xx/mach-smartq.h
new file mode 100644
index 000000000000..8e8b693db3af
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/mach-smartq.h
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/arm/mach-s3c64xx/mach-smartq.h
+ *
+ * Copyright (C) 2010 Maurus Cuelenaere
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __MACH_SMARTQ_H
+#define __MACH_SMARTQ_H __FILE__
+
+#include <linux/init.h>
+
+extern void __init smartq_map_io(void);
+extern void __init smartq_machine_init(void);
+
+#endif /* __MACH_SMARTQ_H */
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
new file mode 100644
index 000000000000..1d0326ead90f
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -0,0 +1,185 @@
+/*
+ * linux/arch/arm/mach-s3c64xx/mach-smartq5.c
+ *
+ * Copyright (C) 2010 Maurus Cuelenaere
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/i2c-gpio.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/map.h>
+#include <mach/regs-fb.h>
+#include <mach/regs-gpio.h>
+#include <mach/s3c6410.h>
+
+#include <plat/cpu.h>
+#include <plat/devs.h>
+#include <plat/fb.h>
+#include <plat/gpio-cfg.h>
+
+#include "mach-smartq.h"
+
+static void __init smartq5_lcd_setup_gpio(void)
+{
+ gpio_request(S3C64XX_GPM(0), "LCD SCEN pin");
+ gpio_request(S3C64XX_GPM(1), "LCD SCL pin");
+ gpio_request(S3C64XX_GPM(2), "LCD SDA pin");
+ gpio_request(S3C64XX_GPM(3), "LCD power");
+
+ /* turn power off */
+ gpio_direction_output(S3C64XX_GPM(0), 1);
+ gpio_direction_input(S3C64XX_GPM(1));
+ gpio_direction_input(S3C64XX_GPM(2));
+ gpio_direction_output(S3C64XX_GPM(3), 0);
+}
+
+static struct i2c_gpio_platform_data smartq5_lcd_control = {
+ .sda_pin = S3C64XX_GPM(2),
+ .scl_pin = S3C64XX_GPM(1),
+};
+
+static struct platform_device smartq5_lcd_control_device = {
+ .name = "i2c-gpio",
+ .id = 1,
+ .dev.platform_data = &smartq5_lcd_control,
+};
+
+static struct gpio_led smartq5_leds[] __initdata = {
+ {
+ .name = "smartq5:green",
+ .active_low = 1,
+ .gpio = S3C64XX_GPN(8),
+ },
+ {
+ .name = "smartq5:red",
+ .active_low = 1,
+ .gpio = S3C64XX_GPN(9),
+ },
+};
+
+static struct gpio_led_platform_data smartq5_led_data = {
+ .num_leds = ARRAY_SIZE(smartq5_leds),
+ .leds = smartq5_leds,
+};
+
+static struct platform_device smartq5_leds_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &smartq5_led_data,
+};
+
+/* Labels according to the SmartQ manual */
+static struct gpio_keys_button smartq5_buttons[] = {
+ {
+ .gpio = S3C64XX_GPL(14),
+ .code = KEY_POWER,
+ .desc = "Power",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(2),
+ .code = KEY_KPMINUS,
+ .desc = "Minus",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(12),
+ .code = KEY_KPPLUS,
+ .desc = "Plus",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(15),
+ .code = KEY_ENTER,
+ .desc = "Move",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+};
+
+static struct gpio_keys_platform_data smartq5_buttons_data = {
+ .buttons = smartq5_buttons,
+ .nbuttons = ARRAY_SIZE(smartq5_buttons),
+};
+
+static struct platform_device smartq5_buttons_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = &smartq5_buttons_data,
+ }
+};
+
+static struct s3c_fb_pd_win smartq5_fb_win0 = {
+ .win_mode = {
+ .pixclock = 1000000000000ULL /
+ ((40+1+216+800)*(10+1+35+480)*80),
+ .left_margin = 40,
+ .right_margin = 216,
+ .upper_margin = 10,
+ .lower_margin = 35,
+ .hsync_len = 1,
+ .vsync_len = 1,
+ .xres = 800,
+ .yres = 480,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+};
+
+static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = {
+ .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
+ .win[0] = &smartq5_fb_win0,
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
+ VIDCON1_INV_VDEN,
+};
+
+static struct platform_device *smartq5_devices[] __initdata = {
+ &smartq5_leds_device,
+ &smartq5_buttons_device,
+ &smartq5_lcd_control_device,
+};
+
+static void __init smartq5_machine_init(void)
+{
+ s3c_fb_set_platdata(&smartq5_lcd_pdata);
+
+ smartq_machine_init();
+ smartq5_lcd_setup_gpio();
+
+ platform_add_devices(smartq5_devices, ARRAY_SIZE(smartq5_devices));
+}
+
+MACHINE_START(SMARTQ5, "SmartQ 5")
+ /* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
+ .phys_io = S3C_PA_UART & 0xfff00000,
+ .io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C64XX_PA_SDRAM + 0x100,
+ .init_irq = s3c6410_init_irq,
+ .map_io = smartq_map_io,
+ .init_machine = smartq5_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
new file mode 100644
index 000000000000..e0bc78ecb156
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -0,0 +1,201 @@
+/*
+ * linux/arch/arm/mach-s3c64xx/mach-smartq7.c
+ *
+ * Copyright (C) 2010 Maurus Cuelenaere
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/i2c-gpio.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/map.h>
+#include <mach/regs-fb.h>
+#include <mach/regs-gpio.h>
+#include <mach/s3c6410.h>
+
+#include <plat/cpu.h>
+#include <plat/devs.h>
+#include <plat/fb.h>
+#include <plat/gpio-cfg.h>
+
+#include "mach-smartq.h"
+
+static void __init smartq7_lcd_setup_gpio(void)
+{
+ gpio_request(S3C64XX_GPM(0), "LCD CSB pin");
+ gpio_request(S3C64XX_GPM(3), "LCD power");
+ gpio_request(S3C64XX_GPM(4), "LCD power status");
+
+ /* turn power off */
+ gpio_direction_output(S3C64XX_GPM(0), 1);
+ gpio_direction_output(S3C64XX_GPM(3), 0);
+ gpio_direction_input(S3C64XX_GPM(4));
+}
+
+static struct i2c_gpio_platform_data smartq7_lcd_control = {
+ .sda_pin = S3C64XX_GPM(2),
+ .scl_pin = S3C64XX_GPM(1),
+ .sda_is_open_drain = 1,
+ .scl_is_open_drain = 1,
+};
+
+static struct platform_device smartq7_lcd_control_device = {
+ .name = "i2c-gpio",
+ .id = 1,
+ .dev.platform_data = &smartq7_lcd_control,
+};
+
+static struct gpio_led smartq7_leds[] __initdata = {
+ {
+ .name = "smartq7:red",
+ .active_low = 1,
+ .gpio = S3C64XX_GPN(8),
+ },
+ {
+ .name = "smartq7:green",
+ .active_low = 1,
+ .gpio = S3C64XX_GPN(9),
+ },
+};
+
+static struct gpio_led_platform_data smartq7_led_data = {
+ .num_leds = ARRAY_SIZE(smartq7_leds),
+ .leds = smartq7_leds,
+};
+
+static struct platform_device smartq7_leds_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &smartq7_led_data,
+};
+
+/* Labels according to the SmartQ manual */
+static struct gpio_keys_button smartq7_buttons[] = {
+ {
+ .gpio = S3C64XX_GPL(14),
+ .code = KEY_POWER,
+ .desc = "Power",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(2),
+ .code = KEY_FN,
+ .desc = "Function",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(3),
+ .code = KEY_KPMINUS,
+ .desc = "Minus",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(4),
+ .code = KEY_KPPLUS,
+ .desc = "Plus",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(12),
+ .code = KEY_ENTER,
+ .desc = "Enter",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+ {
+ .gpio = S3C64XX_GPN(15),
+ .code = KEY_ESC,
+ .desc = "Cancel",
+ .active_low = 1,
+ .debounce_interval = 5,
+ .type = EV_KEY,
+ },
+};
+
+static struct gpio_keys_platform_data smartq7_buttons_data = {
+ .buttons = smartq7_buttons,
+ .nbuttons = ARRAY_SIZE(smartq7_buttons),
+};
+
+static struct platform_device smartq7_buttons_device = {
+ .name = "gpio-keys",
+ .id = 0,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = &smartq7_buttons_data,
+ }
+};
+
+static struct s3c_fb_pd_win smartq7_fb_win0 = {
+ .win_mode = {
+ .pixclock = 1000000000000ULL /
+ ((3+10+5+800)*(1+3+20+480)*80),
+ .left_margin = 3,
+ .right_margin = 5,
+ .upper_margin = 1,
+ .lower_margin = 20,
+ .hsync_len = 10,
+ .vsync_len = 3,
+ .xres = 800,
+ .yres = 480,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+};
+
+static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = {
+ .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
+ .win[0] = &smartq7_fb_win0,
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
+ VIDCON1_INV_VCLK,
+};
+
+static struct platform_device *smartq7_devices[] __initdata = {
+ &smartq7_leds_device,
+ &smartq7_buttons_device,
+ &smartq7_lcd_control_device,
+};
+
+static void __init smartq7_machine_init(void)
+{
+ s3c_fb_set_platdata(&smartq7_lcd_pdata);
+
+ smartq_machine_init();
+ smartq7_lcd_setup_gpio();
+
+ platform_add_devices(smartq7_devices, ARRAY_SIZE(smartq7_devices));
+}
+
+MACHINE_START(SMARTQ7, "SmartQ 7")
+ /* Maintainer: Maurus Cuelenaere <mcuelenaere AT gmail DOT com> */
+ .phys_io = S3C_PA_UART & 0xfff00000,
+ .io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C64XX_PA_SDRAM + 0x100,
+ .init_irq = s3c6410_init_irq,
+ .map_io = smartq_map_io,
+ .init_machine = smartq7_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index f7b18983950c..59916676d8d2 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -84,7 +84,7 @@ static void __init smdk6400_machine_init(void)
}
MACHINE_START(SMDK6400, "SMDK6400")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C_PA_UART & 0xfff00000,
.io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C64XX_PA_SDRAM + 0x100,
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 2d5afd221d77..d9a03555f88b 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -64,6 +64,8 @@
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
+#include <plat/adc.h>
+#include <plat/ts.h>
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
@@ -262,6 +264,9 @@ static struct platform_device *smdk6410_devices[] __initdata = {
&smdk6410_lcd_powerdev,
&smdk6410_smsc911x,
+ &s3c_device_adc,
+ &s3c_device_ts,
+ &s3c_device_wdt,
};
#ifdef CONFIG_REGULATOR
@@ -596,6 +601,12 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
{ I2C_BOARD_INFO("24c128", 0x57), }, /* Samsung S524AD0XD1 */
};
+static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
+ .delay = 10000,
+ .presc = 49,
+ .oversampling_shift = 2,
+};
+
static void __init smdk6410_map_io(void)
{
u32 tmp;
@@ -625,6 +636,8 @@ static void __init smdk6410_machine_init(void)
s3c_i2c1_set_platdata(NULL);
s3c_fb_set_platdata(&smdk6410_lcd_pdata);
+ s3c24xx_ts_set_platdata(&s3c_ts_platform);
+
/* configure nCS1 width to 16 bits */
cs1 = __raw_readl(S3C64XX_SROM_BW) &
@@ -656,7 +669,7 @@ static void __init smdk6410_machine_init(void)
}
MACHINE_START(SMDK6410, "SMDK6410")
- /* Maintainer: Ben Dooks <ben@fluff.org> */
+ /* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.phys_io = S3C_PA_UART & 0xfff00000,
.io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C64XX_PA_SDRAM + 0x100,
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index b8ac4597fad7..79412f735a8d 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -18,8 +18,11 @@
#include <linux/io.h>
#include <mach/map.h>
+#include <mach/irqs.h>
#include <plat/pm.h>
+#include <plat/wakeup-mask.h>
+
#include <mach/regs-sys.h>
#include <mach/regs-gpio.h>
#include <mach/regs-clock.h>
@@ -153,8 +156,25 @@ static void s3c64xx_cpu_suspend(void)
panic("sleep resumed to originator?");
}
+/* mapping of interrupts to parts of the wakeup mask */
+static struct samsung_wakeup_mask wake_irqs[] = {
+ { .irq = IRQ_RTC_ALARM, .bit = S3C64XX_PWRCFG_RTC_ALARM_DISABLE, },
+ { .irq = IRQ_RTC_TIC, .bit = S3C64XX_PWRCFG_RTC_TICK_DISABLE, },
+ { .irq = IRQ_PENDN, .bit = S3C64XX_PWRCFG_TS_DISABLE, },
+ { .irq = IRQ_HSMMC0, .bit = S3C64XX_PWRCFG_MMC0_DISABLE, },
+ { .irq = IRQ_HSMMC1, .bit = S3C64XX_PWRCFG_MMC1_DISABLE, },
+ { .irq = IRQ_HSMMC2, .bit = S3C64XX_PWRCFG_MMC2_DISABLE, },
+ { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_BATF_DISABLE},
+ { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_MSM_DISABLE },
+ { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_HSI_DISABLE },
+ { .irq = NO_WAKEUP_IRQ, .bit = S3C64XX_PWRCFG_MSM_DISABLE },
+};
+
static void s3c64xx_pm_prepare(void)
{
+ samsung_sync_wakemask(S3C64XX_PWR_CFG,
+ wake_irqs, ARRAY_SIZE(wake_irqs));
+
/* store address of resume. */
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C64XX_INFORM0);
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 707e34e3afd1..5e93fe3f3f40 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -37,6 +37,7 @@
#include <plat/clock.h>
#include <plat/sdhci.h>
#include <plat/iic-core.h>
+#include <plat/onenand-core.h>
#include <mach/s3c6400.h>
void __init s3c6400_map_io(void)
@@ -51,6 +52,9 @@ void __init s3c6400_map_io(void)
s3c_i2c0_setname("s3c2440-i2c");
s3c_device_nand.name = "s3c6400-nand";
+
+ s3c_onenand_setname("s3c6400-onenand");
+ s3c64xx_onenand1_setname("s3c6400-onenand");
}
void __init s3c6400_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c64xx/s3c6410.c b/arch/arm/mach-s3c64xx/s3c6410.c
index 59635d19466a..014401c39f36 100644
--- a/arch/arm/mach-s3c64xx/s3c6410.c
+++ b/arch/arm/mach-s3c64xx/s3c6410.c
@@ -38,6 +38,8 @@
#include <plat/clock.h>
#include <plat/sdhci.h>
#include <plat/iic-core.h>
+#include <plat/adc.h>
+#include <plat/onenand-core.h>
#include <mach/s3c6400.h>
#include <mach/s3c6410.h>
@@ -52,7 +54,10 @@ void __init s3c6410_map_io(void)
s3c_i2c0_setname("s3c2440-i2c");
s3c_i2c1_setname("s3c2440-i2c");
+ s3c_device_adc.name = "s3c64xx-adc";
s3c_device_nand.name = "s3c6400-nand";
+ s3c_onenand_setname("s3c6410-onenand");
+ s3c64xx_onenand1_setname("s3c6410-onenand");
}
void __init s3c6410_init_clocks(int xtal)
diff --git a/arch/arm/mach-s5p6440/Kconfig b/arch/arm/mach-s5p6440/Kconfig
index 4c29ff8b07de..f066fae07c57 100644
--- a/arch/arm/mach-s5p6440/Kconfig
+++ b/arch/arm/mach-s5p6440/Kconfig
@@ -9,12 +9,17 @@ if ARCH_S5P6440
config CPU_S5P6440
bool
+ select S3C_PL330_DMA
help
Enable S5P6440 CPU support
config MACH_SMDK6440
bool "SMDK6440"
select CPU_S5P6440
+ select SAMSUNG_DEV_TS
+ select SAMSUNG_DEV_ADC
+ select S3C_DEV_WDT
+ select HAVE_S3C2410_WATCHDOG
help
Machine support for the Samsung SMDK6440
diff --git a/arch/arm/mach-s5p6440/Makefile b/arch/arm/mach-s5p6440/Makefile
index 1ad894b1d3ab..be3c53aab23f 100644
--- a/arch/arm/mach-s5p6440/Makefile
+++ b/arch/arm/mach-s5p6440/Makefile
@@ -12,8 +12,13 @@ obj- :=
# Core support for S5P6440 system
-obj-$(CONFIG_CPU_S5P6440) += cpu.o init.o clock.o gpio.o
+obj-$(CONFIG_CPU_S5P6440) += cpu.o init.o clock.o gpio.o dma.o
+obj-$(CONFIG_CPU_S5P6440) += setup-i2c0.o
# machine support
obj-$(CONFIG_MACH_SMDK6440) += mach-smdk6440.o
+
+# device support
+obj-y += dev-audio.o
+obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
diff --git a/arch/arm/mach-s5p6440/clock.c b/arch/arm/mach-s5p6440/clock.c
index b2672e16e7aa..ca6e48dce777 100644
--- a/arch/arm/mach-s5p6440/clock.c
+++ b/arch/arm/mach-s5p6440/clock.c
@@ -134,24 +134,6 @@ static struct clksrc_clk clk_mout_mpll = {
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 1, .size = 1 },
};
-static struct clk clk_h_low = {
- .name = "hclk_low",
- .id = -1,
- .rate = 0,
- .parent = NULL,
- .ctrlbit = 0,
- .ops = &clk_ops_def_setrate,
-};
-
-static struct clk clk_p_low = {
- .name = "pclk_low",
- .id = -1,
- .rate = 0,
- .parent = NULL,
- .ctrlbit = 0,
- .ops = &clk_ops_def_setrate,
-};
-
enum perf_level {
L0 = 532*1000,
L1 = 266*1000,
@@ -247,23 +229,70 @@ static struct clk_ops s5p6440_clkarm_ops = {
.round_rate = s5p6440_armclk_round_rate,
};
-static unsigned long s5p6440_clk_doutmpll_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
+static struct clksrc_clk clk_armclk = {
+ .clk = {
+ .name = "armclk",
+ .id = 1,
+ .parent = &clk_mout_apll.clk,
+ .ops = &s5p6440_clkarm_ops,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 4 },
+};
- if (__raw_readl(S5P_CLK_DIV0) & S5P_CLKDIV0_MPLL_MASK)
- rate /= 2;
+static struct clksrc_clk clk_dout_mpll = {
+ .clk = {
+ .name = "dout_mpll",
+ .id = -1,
+ .parent = &clk_mout_mpll.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 1 },
+};
- return rate;
-}
+static struct clksrc_clk clk_hclk = {
+ .clk = {
+ .name = "clk_hclk",
+ .id = -1,
+ .parent = &clk_armclk.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 4 },
+};
-static struct clk clk_dout_mpll = {
- .name = "dout_mpll",
- .id = -1,
- .parent = &clk_mout_mpll.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s5p6440_clk_doutmpll_get_rate,
+static struct clksrc_clk clk_pclk = {
+ .clk = {
+ .name = "clk_pclk",
+ .id = -1,
+ .parent = &clk_hclk.clk,
},
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 4 },
+};
+
+static struct clk *clkset_hclklow_list[] = {
+ &clk_mout_apll.clk,
+ &clk_mout_mpll.clk,
+};
+
+static struct clksrc_sources clkset_hclklow = {
+ .sources = clkset_hclklow_list,
+ .nr_sources = ARRAY_SIZE(clkset_hclklow_list),
+};
+
+static struct clksrc_clk clk_hclk_low = {
+ .clk = {
+ .name = "hclk_low",
+ .id = -1,
+ },
+ .sources = &clkset_hclklow,
+ .reg_src = { .reg = S5P_SYS_OTHERS, .shift = 6, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 8, .size = 4 },
+};
+
+static struct clksrc_clk clk_pclk_low = {
+ .clk = {
+ .name = "pclk_low",
+ .id = -1,
+ .parent = &clk_hclk_low.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 12, .size = 4 },
};
int s5p6440_clk48m_ctrl(struct clk *clk, int enable)
@@ -307,6 +336,11 @@ static int s5p6440_sclk_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(S5P_CLK_GATE_SCLK0, clk, enable);
}
+static int s5p6440_sclk1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLK_GATE_SCLK1, clk, enable);
+}
+
static int s5p6440_mem_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLK_GATE_MEM0, clk, enable);
@@ -321,37 +355,37 @@ static struct clk init_clocks_disable[] = {
{
.name = "nand",
.id = -1,
- .parent = &clk_h,
+ .parent = &clk_hclk.clk,
.enable = s5p6440_mem_ctrl,
.ctrlbit = S5P_CLKCON_MEM0_HCLK_NFCON,
}, {
.name = "adc",
.id = -1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_TSADC,
}, {
.name = "i2c",
.id = -1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_IIC0,
}, {
.name = "i2s_v40",
.id = 0,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_IIS2,
}, {
.name = "spi",
.id = 0,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_SPI0,
}, {
.name = "spi",
.id = 1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_SPI1,
}, {
@@ -387,58 +421,124 @@ static struct clk init_clocks_disable[] = {
}, {
.name = "otg",
.id = -1,
- .parent = &clk_h_low,
+ .parent = &clk_hclk_low.clk,
.enable = s5p6440_hclk0_ctrl,
.ctrlbit = S5P_CLKCON_HCLK0_USB
}, {
.name = "post",
.id = -1,
- .parent = &clk_h_low,
+ .parent = &clk_hclk_low.clk,
.enable = s5p6440_hclk0_ctrl,
.ctrlbit = S5P_CLKCON_HCLK0_POST0
}, {
.name = "lcd",
.id = -1,
- .parent = &clk_h_low,
+ .parent = &clk_hclk_low.clk,
.enable = s5p6440_hclk1_ctrl,
.ctrlbit = S5P_CLKCON_HCLK1_DISPCON,
}, {
.name = "hsmmc",
.id = 0,
- .parent = &clk_h_low,
+ .parent = &clk_hclk_low.clk,
.enable = s5p6440_hclk0_ctrl,
.ctrlbit = S5P_CLKCON_HCLK0_HSMMC0,
}, {
.name = "hsmmc",
.id = 1,
- .parent = &clk_h_low,
+ .parent = &clk_hclk_low.clk,
.enable = s5p6440_hclk0_ctrl,
.ctrlbit = S5P_CLKCON_HCLK0_HSMMC1,
}, {
.name = "hsmmc",
.id = 2,
- .parent = &clk_h_low,
+ .parent = &clk_hclk_low.clk,
.enable = s5p6440_hclk0_ctrl,
.ctrlbit = S5P_CLKCON_HCLK0_HSMMC2,
}, {
.name = "rtc",
.id = -1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_RTC,
}, {
.name = "watchdog",
.id = -1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_WDT,
}, {
.name = "timers",
.id = -1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_PWM,
- }
+ }, {
+ .name = "hclk_fimgvg",
+ .id = -1,
+ .parent = &clk_hclk.clk,
+ .enable = s5p6440_hclk1_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "tsi",
+ .id = -1,
+ .parent = &clk_hclk_low.clk,
+ .enable = s5p6440_hclk1_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "pclk_fimgvg",
+ .id = -1,
+ .parent = &clk_pclk.clk,
+ .enable = s5p6440_pclk_ctrl,
+ .ctrlbit = (1 << 31),
+ }, {
+ .name = "dmc0",
+ .id = -1,
+ .parent = &clk_pclk.clk,
+ .enable = s5p6440_pclk_ctrl,
+ .ctrlbit = (1 << 30),
+ }, {
+ .name = "etm",
+ .id = -1,
+ .parent = &clk_pclk.clk,
+ .enable = s5p6440_pclk_ctrl,
+ .ctrlbit = (1 << 29),
+ }, {
+ .name = "dsim",
+ .id = -1,
+ .parent = &clk_pclk_low.clk,
+ .enable = s5p6440_pclk_ctrl,
+ .ctrlbit = (1 << 28),
+ }, {
+ .name = "gps",
+ .id = -1,
+ .parent = &clk_pclk_low.clk,
+ .enable = s5p6440_pclk_ctrl,
+ .ctrlbit = (1 << 25),
+ }, {
+ .name = "pcm",
+ .id = -1,
+ .parent = &clk_pclk_low.clk,
+ .enable = s5p6440_pclk_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "irom",
+ .id = -1,
+ .parent = &clk_hclk.clk,
+ .enable = s5p6440_hclk0_ctrl,
+ .ctrlbit = (1 << 25),
+ }, {
+ .name = "dma",
+ .id = -1,
+ .parent = &clk_hclk_low.clk,
+ .enable = s5p6440_hclk0_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
+ .name = "2d",
+ .id = -1,
+ .parent = &clk_hclk.clk,
+ .enable = s5p6440_hclk0_ctrl,
+ .ctrlbit = (1 << 8),
+ },
};
/*
@@ -448,34 +548,46 @@ static struct clk init_clocks[] = {
{
.name = "gpio",
.id = -1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_GPIO,
}, {
.name = "uart",
.id = 0,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_UART0,
}, {
.name = "uart",
.id = 1,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_UART1,
}, {
.name = "uart",
.id = 2,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_UART2,
}, {
.name = "uart",
.id = 3,
- .parent = &clk_p_low,
+ .parent = &clk_pclk_low.clk,
.enable = s5p6440_pclk_ctrl,
.ctrlbit = S5P_CLKCON_PCLK_UART3,
- }
+ }, {
+ .name = "mem",
+ .id = -1,
+ .parent = &clk_hclk.clk,
+ .enable = s5p6440_hclk0_ctrl,
+ .ctrlbit = (1 << 21),
+ }, {
+ .name = "intc",
+ .id = -1,
+ .parent = &clk_hclk.clk,
+ .enable = s5p6440_hclk0_ctrl,
+ .ctrlbit = (1 << 1),
+ },
};
static struct clk clk_iis_cd_v40 = {
@@ -488,20 +600,20 @@ static struct clk clk_pcm_cd = {
.id = -1,
};
-static struct clk *clkset_spi_mmc_list[] = {
+static struct clk *clkset_group1_list[] = {
&clk_mout_epll.clk,
- &clk_dout_mpll,
+ &clk_dout_mpll.clk,
&clk_fin_epll,
};
-static struct clksrc_sources clkset_spi_mmc = {
- .sources = clkset_spi_mmc_list,
- .nr_sources = ARRAY_SIZE(clkset_spi_mmc_list),
+static struct clksrc_sources clkset_group1 = {
+ .sources = clkset_group1_list,
+ .nr_sources = ARRAY_SIZE(clkset_group1_list),
};
static struct clk *clkset_uart_list[] = {
&clk_mout_epll.clk,
- &clk_dout_mpll
+ &clk_dout_mpll.clk,
};
static struct clksrc_sources clkset_uart = {
@@ -509,6 +621,19 @@ static struct clksrc_sources clkset_uart = {
.nr_sources = ARRAY_SIZE(clkset_uart_list),
};
+static struct clk *clkset_audio_list[] = {
+ &clk_mout_epll.clk,
+ &clk_dout_mpll.clk,
+ &clk_fin_epll,
+ &clk_iis_cd_v40,
+ &clk_pcm_cd,
+};
+
+static struct clksrc_sources clkset_audio = {
+ .sources = clkset_audio_list,
+ .nr_sources = ARRAY_SIZE(clkset_audio_list),
+};
+
static struct clksrc_clk clksrcs[] = {
{
.clk = {
@@ -517,7 +642,7 @@ static struct clksrc_clk clksrcs[] = {
.ctrlbit = S5P_CLKCON_SCLK0_MMC0,
.enable = s5p6440_sclk_ctrl,
},
- .sources = &clkset_spi_mmc,
+ .sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 18, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 4 },
}, {
@@ -527,7 +652,7 @@ static struct clksrc_clk clksrcs[] = {
.ctrlbit = S5P_CLKCON_SCLK0_MMC1,
.enable = s5p6440_sclk_ctrl,
},
- .sources = &clkset_spi_mmc,
+ .sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 4, .size = 4 },
}, {
@@ -537,7 +662,7 @@ static struct clksrc_clk clksrcs[] = {
.ctrlbit = S5P_CLKCON_SCLK0_MMC2,
.enable = s5p6440_sclk_ctrl,
},
- .sources = &clkset_spi_mmc,
+ .sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 22, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 8, .size = 4 },
}, {
@@ -557,7 +682,7 @@ static struct clksrc_clk clksrcs[] = {
.ctrlbit = S5P_CLKCON_SCLK0_SPI0,
.enable = s5p6440_sclk_ctrl,
},
- .sources = &clkset_spi_mmc,
+ .sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 14, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 },
}, {
@@ -567,17 +692,63 @@ static struct clksrc_clk clksrcs[] = {
.ctrlbit = S5P_CLKCON_SCLK0_SPI1,
.enable = s5p6440_sclk_ctrl,
},
- .sources = &clkset_spi_mmc,
+ .sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 2 },
.reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 },
- }
+ }, {
+ .clk = {
+ .name = "sclk_post",
+ .id = -1,
+ .ctrlbit = (1 << 10),
+ .enable = s5p6440_sclk_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 26, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_dispcon",
+ .id = -1,
+ .ctrlbit = (1 << 1),
+ .enable = s5p6440_sclk1_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimgvg",
+ .id = -1,
+ .ctrlbit = (1 << 2),
+ .enable = s5p6440_sclk1_ctrl,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 4, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_audio2",
+ .id = -1,
+ .ctrlbit = (1 << 11),
+ .enable = s5p6440_sclk_ctrl,
+ },
+ .sources = &clkset_audio,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 3 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 24, .size = 4 },
+ },
};
/* Clock initialisation code */
-static struct clksrc_clk *init_parents[] = {
+static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
&clk_mout_epll,
&clk_mout_mpll,
+ &clk_dout_mpll,
+ &clk_armclk,
+ &clk_hclk,
+ &clk_pclk,
+ &clk_hclk_low,
+ &clk_pclk_low,
};
void __init_or_cpufreq s5p6440_setup_clocks(void)
@@ -593,21 +764,13 @@ void __init_or_cpufreq s5p6440_setup_clocks(void)
unsigned long apll;
unsigned long mpll;
unsigned int ptr;
- u32 clkdiv0;
- u32 clkdiv3;
/* Set S5P6440 functions for clk_fout_epll */
clk_fout_epll.enable = s5p6440_epll_enable;
clk_fout_epll.ops = &s5p6440_epll_ops;
- /* Set S5P6440 functions for arm clock */
- clk_arm.parent = &clk_mout_apll.clk;
- clk_arm.ops = &s5p6440_clkarm_ops;
clk_48m.enable = s5p6440_clk48m_ctrl;
- clkdiv0 = __raw_readl(S5P_CLK_DIV0);
- clkdiv3 = __raw_readl(S5P_CLK_DIV3);
-
xtal_clk = clk_get(NULL, "ext_xtal");
BUG_ON(IS_ERR(xtal_clk));
@@ -619,41 +782,28 @@ void __init_or_cpufreq s5p6440_setup_clocks(void)
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4502);
+ clk_fout_mpll.rate = mpll;
+ clk_fout_epll.rate = epll;
+ clk_fout_apll.rate = apll;
+
printk(KERN_INFO "S5P6440: PLL settings, A=%ld.%ldMHz, M=%ld.%ldMHz," \
" E=%ld.%ldMHz\n",
print_mhz(apll), print_mhz(mpll), print_mhz(epll));
- fclk = apll / GET_DIV(clkdiv0, S5P_CLKDIV0_ARM);
- hclk = fclk / GET_DIV(clkdiv0, S5P_CLKDIV0_HCLK);
- pclk = hclk / GET_DIV(clkdiv0, S5P_CLKDIV0_PCLK);
-
- if (__raw_readl(S5P_OTHERS) & S5P_OTHERS_HCLK_LOW_SEL_MPLL) {
- /* Asynchronous mode */
- hclk_low = mpll / GET_DIV(clkdiv3, S5P_CLKDIV3_HCLK_LOW);
- } else {
- /* Synchronous mode */
- hclk_low = apll / GET_DIV(clkdiv3, S5P_CLKDIV3_HCLK_LOW);
- }
-
- pclk_low = hclk_low / GET_DIV(clkdiv3, S5P_CLKDIV3_PCLK_LOW);
+ fclk = clk_get_rate(&clk_armclk.clk);
+ hclk = clk_get_rate(&clk_hclk.clk);
+ pclk = clk_get_rate(&clk_pclk.clk);
+ hclk_low = clk_get_rate(&clk_hclk_low.clk);
+ pclk_low = clk_get_rate(&clk_pclk_low.clk);
printk(KERN_INFO "S5P6440: HCLK=%ld.%ldMHz, HCLK_LOW=%ld.%ldMHz," \
" PCLK=%ld.%ldMHz, PCLK_LOW=%ld.%ldMHz\n",
print_mhz(hclk), print_mhz(hclk_low),
print_mhz(pclk), print_mhz(pclk_low));
- clk_fout_mpll.rate = mpll;
- clk_fout_epll.rate = epll;
- clk_fout_apll.rate = apll;
-
clk_f.rate = fclk;
clk_h.rate = hclk;
clk_p.rate = pclk;
- clk_h_low.rate = hclk_low;
- clk_p_low.rate = pclk_low;
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
- s3c_set_clksrc(init_parents[ptr], true);
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_set_clksrc(&clksrcs[ptr], true);
@@ -661,13 +811,8 @@ void __init_or_cpufreq s5p6440_setup_clocks(void)
static struct clk *clks[] __initdata = {
&clk_ext,
- &clk_mout_epll.clk,
- &clk_mout_mpll.clk,
- &clk_dout_mpll,
&clk_iis_cd_v40,
&clk_pcm_cd,
- &clk_p_low,
- &clk_h_low,
};
void __init s5p6440_register_clocks(void)
@@ -680,6 +825,9 @@ void __init s5p6440_register_clocks(void)
if (ret > 0)
printk(KERN_ERR "Failed to register %u clocks\n", ret);
+ for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
+ s3c_register_clksrc(sysclks[ptr], 1);
+
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
diff --git a/arch/arm/mach-s5p6440/cpu.c b/arch/arm/mach-s5p6440/cpu.c
index 1794131aeacb..b2fe6a58155a 100644
--- a/arch/arm/mach-s5p6440/cpu.c
+++ b/arch/arm/mach-s5p6440/cpu.c
@@ -61,6 +61,7 @@ static void s5p6440_idle(void)
void __init s5p6440_map_io(void)
{
/* initialize any device information early */
+ s3c_device_adc.name = "s3c64xx-adc";
}
void __init s5p6440_init_clocks(int xtal)
@@ -88,7 +89,7 @@ void __init s5p6440_init_irq(void)
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
-static struct sysdev_class s5p6440_sysclass = {
+struct sysdev_class s5p6440_sysclass = {
.name = "s5p6440-core",
};
diff --git a/arch/arm/mach-s5p6440/dev-audio.c b/arch/arm/mach-s5p6440/dev-audio.c
new file mode 100644
index 000000000000..0c5367962830
--- /dev/null
+++ b/arch/arm/mach-s5p6440/dev-audio.c
@@ -0,0 +1,127 @@
+/* linux/arch/arm/mach-s5p6440/dev-audio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/audio.h>
+
+#include <mach/gpio.h>
+#include <mach/map.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+
+static int s5p6440_cfg_i2s(struct platform_device *pdev)
+{
+ /* configure GPIO for i2s port */
+ switch (pdev->id) {
+ case -1:
+ s3c_gpio_cfgpin(S5P6440_GPR(4), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5P6440_GPR(5), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5P6440_GPR(6), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5P6440_GPR(7), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5P6440_GPR(8), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5P6440_GPR(13), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5P6440_GPR(14), S3C_GPIO_SFN(5));
+ break;
+
+ default:
+ printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_i2s_pdata = {
+ .cfg_gpio = s5p6440_cfg_i2s,
+};
+
+static struct resource s5p6440_iis0_resource[] = {
+ [0] = {
+ .start = S5P6440_PA_I2S,
+ .end = S5P6440_PA_I2S + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S0_TX,
+ .end = DMACH_I2S0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S0_RX,
+ .end = DMACH_I2S0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5p6440_device_iis = {
+ .name = "s3c64xx-iis-v4",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5p6440_iis0_resource),
+ .resource = s5p6440_iis0_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+/* PCM Controller platform_devices */
+
+static int s5p6440_pcm_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5P6440_GPR(7), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPR(13), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPR(14), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPR(8), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPR(6), S3C_GPIO_SFN(2));
+ break;
+
+ default:
+ printk(KERN_DEBUG "Invalid PCM Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_pcm_pdata = {
+ .cfg_gpio = s5p6440_pcm_cfg_gpio,
+};
+
+static struct resource s5p6440_pcm0_resource[] = {
+ [0] = {
+ .start = S5P6440_PA_PCM,
+ .end = S5P6440_PA_PCM + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM0_TX,
+ .end = DMACH_PCM0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM0_RX,
+ .end = DMACH_PCM0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5p6440_device_pcm = {
+ .name = "samsung-pcm",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5p6440_pcm0_resource),
+ .resource = s5p6440_pcm0_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
diff --git a/arch/arm/mach-s5p6440/dev-spi.c b/arch/arm/mach-s5p6440/dev-spi.c
new file mode 100644
index 000000000000..0a30280019c0
--- /dev/null
+++ b/arch/arm/mach-s5p6440/dev-spi.c
@@ -0,0 +1,176 @@
+/* linux/arch/arm/mach-s5p6440/dev-spi.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/dma.h>
+#include <mach/map.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+#include <mach/spi-clocks.h>
+
+#include <plat/s3c64xx-spi.h>
+#include <plat/gpio-cfg.h>
+
+static char *spi_src_clks[] = {
+ [S5P6440_SPI_SRCCLK_PCLK] = "pclk",
+ [S5P6440_SPI_SRCCLK_SCLK] = "spi_epll",
+};
+
+/* SPI Controller platform_devices */
+
+/* Since we emulate multi-cs capability, we do not touch the CS.
+ * The emulated CS is toggled by board specific mechanism, as it can
+ * be either some immediate GPIO or some signal out of some other
+ * chip in between ... or some yet another way.
+ * We simply do not assume anything about CS.
+ */
+static int s5p6440_spi_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5P6440_GPC(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPC(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPC(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5P6440_GPC(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5P6440_GPC(1), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5P6440_GPC(2), S3C_GPIO_PULL_UP);
+ break;
+
+ case 1:
+ s3c_gpio_cfgpin(S5P6440_GPC(4), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPC(5), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6440_GPC(6), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5P6440_GPC(4), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5P6440_GPC(5), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5P6440_GPC(6), S3C_GPIO_PULL_UP);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Invalid SPI Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct resource s5p6440_spi0_resource[] = {
+ [0] = {
+ .start = S5P6440_PA_SPI0,
+ .end = S5P6440_PA_SPI0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI0_TX,
+ .end = DMACH_SPI0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI0_RX,
+ .end = DMACH_SPI0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5p6440_spi0_pdata = {
+ .cfg_gpio = s5p6440_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x1ff,
+ .rx_lvl_offset = 15,
+};
+
+static u64 spi_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5p6440_device_spi0 = {
+ .name = "s3c64xx-spi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5p6440_spi0_resource),
+ .resource = s5p6440_spi0_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5p6440_spi0_pdata,
+ },
+};
+
+static struct resource s5p6440_spi1_resource[] = {
+ [0] = {
+ .start = S5P6440_PA_SPI1,
+ .end = S5P6440_PA_SPI1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI1_TX,
+ .end = DMACH_SPI1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI1_RX,
+ .end = DMACH_SPI1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5p6440_spi1_pdata = {
+ .cfg_gpio = s5p6440_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 15,
+};
+
+struct platform_device s5p6440_device_spi1 = {
+ .name = "s3c64xx-spi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5p6440_spi1_resource),
+ .resource = s5p6440_spi1_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5p6440_spi1_pdata,
+ },
+};
+
+void __init s5p6440_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
+{
+ struct s3c64xx_spi_info *pd;
+
+ /* Reject invalid configuration */
+ if (!num_cs || src_clk_nr < 0
+ || src_clk_nr > S5P6440_SPI_SRCCLK_SCLK) {
+ printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
+ return;
+ }
+
+ switch (cntrlr) {
+ case 0:
+ pd = &s5p6440_spi0_pdata;
+ break;
+ case 1:
+ pd = &s5p6440_spi1_pdata;
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
+ __func__, cntrlr);
+ return;
+ }
+
+ pd->num_cs = num_cs;
+ pd->src_clk_nr = src_clk_nr;
+ pd->src_clk_name = spi_src_clks[src_clk_nr];
+}
diff --git a/arch/arm/mach-s5p6440/dma.c b/arch/arm/mach-s5p6440/dma.c
new file mode 100644
index 000000000000..07606ad57519
--- /dev/null
+++ b/arch/arm/mach-s5p6440/dma.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/devs.h>
+#include <plat/irqs.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+static u64 dma_dmamask = DMA_BIT_MASK(32);
+
+static struct resource s5p6440_pdma_resource[] = {
+ [0] = {
+ .start = S5P6440_PA_PDMA,
+ .end = S5P6440_PA_PDMA + SZ_4K,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_DMA0,
+ .end = IRQ_DMA0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_pl330_platdata s5p6440_pdma_pdata = {
+ .peri = {
+ [0] = DMACH_UART0_RX,
+ [1] = DMACH_UART0_TX,
+ [2] = DMACH_UART1_RX,
+ [3] = DMACH_UART1_TX,
+ [4] = DMACH_UART2_RX,
+ [5] = DMACH_UART2_TX,
+ [6] = DMACH_UART3_RX,
+ [7] = DMACH_UART3_TX,
+ [8] = DMACH_MAX,
+ [9] = DMACH_MAX,
+ [10] = DMACH_PCM0_TX,
+ [11] = DMACH_PCM0_RX,
+ [12] = DMACH_I2S0_TX,
+ [13] = DMACH_I2S0_RX,
+ [14] = DMACH_SPI0_TX,
+ [15] = DMACH_SPI0_RX,
+ [16] = DMACH_MAX,
+ [17] = DMACH_MAX,
+ [18] = DMACH_MAX,
+ [19] = DMACH_MAX,
+ [20] = DMACH_SPI1_TX,
+ [21] = DMACH_SPI1_RX,
+ [22] = DMACH_MAX,
+ [23] = DMACH_MAX,
+ [24] = DMACH_MAX,
+ [25] = DMACH_MAX,
+ [26] = DMACH_MAX,
+ [27] = DMACH_MAX,
+ [28] = DMACH_MAX,
+ [29] = DMACH_PWM,
+ [30] = DMACH_MAX,
+ [31] = DMACH_MAX,
+ },
+};
+
+static struct platform_device s5p6440_device_pdma = {
+ .name = "s3c-pl330",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5p6440_pdma_resource),
+ .resource = s5p6440_pdma_resource,
+ .dev = {
+ .dma_mask = &dma_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5p6440_pdma_pdata,
+ },
+};
+
+static struct platform_device *s5p6440_dmacs[] __initdata = {
+ &s5p6440_device_pdma,
+};
+
+static int __init s5p6440_dma_init(void)
+{
+ platform_add_devices(s5p6440_dmacs, ARRAY_SIZE(s5p6440_dmacs));
+
+ return 0;
+}
+arch_initcall(s5p6440_dma_init);
diff --git a/arch/arm/mach-s5p6440/gpio.c b/arch/arm/mach-s5p6440/gpio.c
index b0ea741177ad..92efc05b1ba2 100644
--- a/arch/arm/mach-s5p6440/gpio.c
+++ b/arch/arm/mach-s5p6440/gpio.c
@@ -46,6 +46,7 @@ static int s5p6440_gpiolib_rbank_4bit2_input(struct gpio_chip *chip,
void __iomem *base = ourchip->base;
void __iomem *regcon = base;
unsigned long con;
+ unsigned long flags;
switch (offset) {
case 6:
@@ -63,10 +64,14 @@ static int s5p6440_gpiolib_rbank_4bit2_input(struct gpio_chip *chip,
break;
}
+ s3c_gpio_lock(ourchip, flags);
+
con = __raw_readl(regcon);
con &= ~(0xf << con_4bit_shift(offset));
__raw_writel(con, regcon);
+ s3c_gpio_unlock(ourchip, flags);
+
return 0;
}
@@ -78,6 +83,7 @@ static int s5p6440_gpiolib_rbank_4bit2_output(struct gpio_chip *chip,
void __iomem *regcon = base;
unsigned long con;
unsigned long dat;
+ unsigned long flags;
unsigned con_offset = offset;
switch (con_offset) {
@@ -96,6 +102,8 @@ static int s5p6440_gpiolib_rbank_4bit2_output(struct gpio_chip *chip,
break;
}
+ s3c_gpio_lock(ourchip, flags);
+
con = __raw_readl(regcon);
con &= ~(0xf << con_4bit_shift(con_offset));
con |= 0x1 << con_4bit_shift(con_offset);
@@ -109,6 +117,8 @@ static int s5p6440_gpiolib_rbank_4bit2_output(struct gpio_chip *chip,
__raw_writel(con, regcon);
__raw_writel(dat, base + GPIODAT_OFF);
+ s3c_gpio_unlock(ourchip, flags);
+
return 0;
}
@@ -117,6 +127,7 @@ int s5p6440_gpio_setcfg_4bit_rbank(struct s3c_gpio_chip *chip,
{
void __iomem *reg = chip->base;
unsigned int shift;
+ unsigned long flags;
u32 con;
switch (off) {
@@ -142,11 +153,15 @@ int s5p6440_gpio_setcfg_4bit_rbank(struct s3c_gpio_chip *chip,
cfg <<= shift;
}
+ s3c_gpio_lock(chip, flags);
+
con = __raw_readl(reg);
con &= ~(0xf << shift);
con |= cfg;
__raw_writel(con, reg);
+ s3c_gpio_unlock(chip, flags);
+
return 0;
}
@@ -161,12 +176,15 @@ static struct s3c_gpio_cfg s5p6440_gpio_cfgs[] = {
}, {
.cfg_eint = 0,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
}, {
.cfg_eint = 2,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
}, {
.cfg_eint = 3,
.set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
},
};
@@ -279,6 +297,8 @@ void __init s5p6440_gpiolib_set_cfg(struct s3c_gpio_cfg *chipcfg, int nr_chips)
for (; nr_chips > 0; nr_chips--, chipcfg++) {
if (!chipcfg->set_config)
chipcfg->set_config = s3c_gpio_setcfg_s3c64xx_4bit;
+ if (!chipcfg->get_config)
+ chipcfg->get_config = s3c_gpio_getcfg_s3c64xx_4bit;
if (!chipcfg->set_pull)
chipcfg->set_pull = s3c_gpio_setpull_updown;
if (!chipcfg->get_pull)
diff --git a/arch/arm/mach-s5p6440/include/mach/dma.h b/arch/arm/mach-s5p6440/include/mach/dma.h
new file mode 100644
index 000000000000..81209eb1409b
--- /dev/null
+++ b/arch/arm/mach-s5p6440/include/mach/dma.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* This platform uses the common S3C DMA API driver for PL330 */
+#include <plat/s3c-dma-pl330.h>
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5p6440/include/mach/irqs.h b/arch/arm/mach-s5p6440/include/mach/irqs.h
index a4b9b40d18f2..911854d9ad42 100644
--- a/arch/arm/mach-s5p6440/include/mach/irqs.h
+++ b/arch/arm/mach-s5p6440/include/mach/irqs.h
@@ -72,7 +72,14 @@
#define S5P_IRQ_EINT_BASE (S5P_IRQ_VIC1(31) + 6)
#define S5P_EINT(x) ((x) + S5P_IRQ_EINT_BASE)
-#define IRQ_EINT(x) S5P_EINT(x)
+
+#define S5P_EINT_BASE1 (S5P_IRQ_EINT_BASE)
+/*
+ * S5P6440 has 0-15 external interrupts in group 0. Only these can be used
+ * to wake up from sleep. If request is beyond this range, by mistake, a large
+ * return value for an irq number should be indication of something amiss.
+ */
+#define S5P_EINT_BASE2 (0xf0000000)
/*
* Next the external interrupt groups. These are similar to the IRQ_EINT(x)
diff --git a/arch/arm/mach-s5p6440/include/mach/map.h b/arch/arm/mach-s5p6440/include/mach/map.h
index 8924e5a4d6a6..44011b91fbd1 100644
--- a/arch/arm/mach-s5p6440/include/mach/map.h
+++ b/arch/arm/mach-s5p6440/include/mach/map.h
@@ -29,6 +29,8 @@
#define S5P6440_PA_VIC0 (0xE4000000)
#define S5P_PA_VIC0 S5P6440_PA_VIC0
+#define S5P6440_PA_PDMA 0xE9000000
+
#define S5P6440_PA_VIC1 (0xE4100000)
#define S5P_PA_VIC1 S5P6440_PA_VIC1
@@ -52,6 +54,9 @@
#define S5P6440_PA_IIC0 (0xEC104000)
+#define S5P6440_PA_SPI0 0xEC400000
+#define S5P6440_PA_SPI1 0xEC500000
+
#define S5P6440_PA_HSOTG (0xED100000)
#define S5P6440_PA_HSMMC0 (0xED800000)
@@ -61,8 +66,19 @@
#define S5P6440_PA_SDRAM (0x20000000)
#define S5P_PA_SDRAM S5P6440_PA_SDRAM
+/* I2S */
+#define S5P6440_PA_I2S 0xF2000000
+
+/* PCM */
+#define S5P6440_PA_PCM 0xF2100000
+
+#define S5P6440_PA_ADC (0xF3000000)
+
/* compatibiltiy defines. */
#define S3C_PA_UART S5P6440_PA_UART
#define S3C_PA_IIC S5P6440_PA_IIC0
+#define S3C_PA_WDT S5P6440_PA_WDT
+
+#define SAMSUNG_PA_ADC S5P6440_PA_ADC
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p6440/include/mach/pwm-clock.h b/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
index c4bb7c555477..6a2a02fdf12a 100644
--- a/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
+++ b/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
@@ -1,11 +1,14 @@
/* linux/arch/arm/mach-s5p6440/include/mach/pwm-clock.h
*
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
- * Copyright 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
+ * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
*
* S5P6440 - pwm clock and timer support
*
@@ -14,16 +17,19 @@
* published by the Free Software Foundation.
*/
+#ifndef __ASM_ARCH_PWMCLK_H
+#define __ASM_ARCH_PWMCLK_H __FILE__
+
/**
* pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @cfg: The timer TCFG1 register bits shifted down to 0.
+ * @tcfg: The timer TCFG1 register bits shifted down to 0.
*
* Return true if the given configuration from TCFG1 is a TCLK instead
* any of the TDIV clocks.
*/
static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
{
- return tcfg == S3C2410_TCFG1_MUX_TCLK;
+ return 0;
}
/**
@@ -35,7 +41,7 @@ static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
*/
static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
{
- return 1 << (1 + tcfg1);
+ return 1 << tcfg1;
}
/**
@@ -45,7 +51,7 @@ static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
*/
static inline unsigned int pwm_tdiv_has_div1(void)
{
- return 0;
+ return 1;
}
/**
@@ -56,7 +62,9 @@ static inline unsigned int pwm_tdiv_has_div1(void)
*/
static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
{
- return ilog2(div) - 1;
+ return ilog2(div);
}
-#define S3C_TCFG1_MUX_TCLK S3C2410_TCFG1_MUX_TCLK
+#define S3C_TCFG1_MUX_TCLK 0
+
+#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5p6440/include/mach/spi-clocks.h b/arch/arm/mach-s5p6440/include/mach/spi-clocks.h
new file mode 100644
index 000000000000..5fbca50d1cfb
--- /dev/null
+++ b/arch/arm/mach-s5p6440/include/mach/spi-clocks.h
@@ -0,0 +1,17 @@
+/* linux/arch/arm/mach-s5p6440/include/mach/spi-clocks.h
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __S5P6440_PLAT_SPI_CLKS_H
+#define __S5P6440_PLAT_SPI_CLKS_H __FILE__
+
+#define S5P6440_SPI_SRCCLK_PCLK 0
+#define S5P6440_SPI_SRCCLK_SCLK 1
+
+#endif /* __S5P6440_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5p6440/mach-smdk6440.c b/arch/arm/mach-s5p6440/mach-smdk6440.c
index 3ae88f2c7c77..8291fecc701a 100644
--- a/arch/arm/mach-s5p6440/mach-smdk6440.c
+++ b/arch/arm/mach-s5p6440/mach-smdk6440.c
@@ -38,6 +38,8 @@
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/pll.h>
+#include <plat/adc.h>
+#include <plat/ts.h>
#define S5P6440_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -84,6 +86,16 @@ static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = {
};
static struct platform_device *smdk6440_devices[] __initdata = {
+ &s5p6440_device_iis,
+ &s3c_device_adc,
+ &s3c_device_ts,
+ &s3c_device_wdt,
+};
+
+static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
+ .delay = 10000,
+ .presc = 49,
+ .oversampling_shift = 2,
};
static void __init smdk6440_map_io(void)
@@ -95,6 +107,8 @@ static void __init smdk6440_map_io(void)
static void __init smdk6440_machine_init(void)
{
+ s3c24xx_ts_set_platdata(&s3c_ts_platform);
+
platform_add_devices(smdk6440_devices, ARRAY_SIZE(smdk6440_devices));
}
diff --git a/arch/arm/plat-s5p/setup-i2c0.c b/arch/arm/mach-s5p6440/setup-i2c0.c
index 67a66e02a97a..69e8a664aedb 100644
--- a/arch/arm/plat-s5p/setup-i2c0.c
+++ b/arch/arm/mach-s5p6440/setup-i2c0.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/plat-s5p/setup-i2c0.c
+/* linux/arch/arm/mach-s5p6440/setup-i2c0.c
*
* Copyright (c) 2009 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
diff --git a/arch/arm/mach-s5p6442/Kconfig b/arch/arm/mach-s5p6442/Kconfig
index 4f3f6de6a013..0fd41b447915 100644
--- a/arch/arm/mach-s5p6442/Kconfig
+++ b/arch/arm/mach-s5p6442/Kconfig
@@ -12,6 +12,7 @@ if ARCH_S5P6442
config CPU_S5P6442
bool
select PLAT_S5P
+ select S3C_PL330_DMA
help
Enable S5P6442 CPU support
diff --git a/arch/arm/mach-s5p6442/Makefile b/arch/arm/mach-s5p6442/Makefile
index dde39a6ce6bc..90a3d8373416 100644
--- a/arch/arm/mach-s5p6442/Makefile
+++ b/arch/arm/mach-s5p6442/Makefile
@@ -12,8 +12,13 @@ obj- :=
# Core support for S5P6442 system
-obj-$(CONFIG_CPU_S5P6442) += cpu.o init.o clock.o
+obj-$(CONFIG_CPU_S5P6442) += cpu.o init.o clock.o dma.o
+obj-$(CONFIG_CPU_S5P6442) += setup-i2c0.o
# machine support
obj-$(CONFIG_MACH_SMDK6442) += mach-smdk6442.o
+
+# device support
+obj-y += dev-audio.o
+obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
diff --git a/arch/arm/mach-s5p6442/cpu.c b/arch/arm/mach-s5p6442/cpu.c
index bc2524df89b3..a48fb553fd01 100644
--- a/arch/arm/mach-s5p6442/cpu.c
+++ b/arch/arm/mach-s5p6442/cpu.c
@@ -95,7 +95,7 @@ void __init s5p6442_init_irq(void)
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
-static struct sysdev_class s5p6442_sysclass = {
+struct sysdev_class s5p6442_sysclass = {
.name = "s5p6442-core",
};
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
new file mode 100644
index 000000000000..cb801e1f5e23
--- /dev/null
+++ b/arch/arm/mach-s5p6442/dev-audio.c
@@ -0,0 +1,197 @@
+/* linux/arch/arm/mach-s5p6442/dev-audio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/audio.h>
+
+#include <mach/gpio.h>
+#include <mach/map.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+
+static int s5p6442_cfg_i2s(struct platform_device *pdev)
+{
+ /* configure GPIO for i2s port */
+ switch (pdev->id) {
+ case 1:
+ s3c_gpio_cfgpin(S5P6442_GPC1(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC1(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC1(2), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC1(3), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC1(4), S3C_GPIO_SFN(2));
+ break;
+
+ case -1:
+ s3c_gpio_cfgpin(S5P6442_GPC0(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC0(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC0(2), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC0(3), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPC0(4), S3C_GPIO_SFN(2));
+ break;
+
+ default:
+ printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_i2s_pdata = {
+ .cfg_gpio = s5p6442_cfg_i2s,
+};
+
+static struct resource s5p6442_iis0_resource[] = {
+ [0] = {
+ .start = S5P6442_PA_I2S0,
+ .end = S5P6442_PA_I2S0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S0_TX,
+ .end = DMACH_I2S0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S0_RX,
+ .end = DMACH_I2S0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5p6442_device_iis0 = {
+ .name = "s3c64xx-iis-v4",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5p6442_iis0_resource),
+ .resource = s5p6442_iis0_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+static struct resource s5p6442_iis1_resource[] = {
+ [0] = {
+ .start = S5P6442_PA_I2S1,
+ .end = S5P6442_PA_I2S1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S1_TX,
+ .end = DMACH_I2S1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S1_RX,
+ .end = DMACH_I2S1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5p6442_device_iis1 = {
+ .name = "s3c64xx-iis",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5p6442_iis1_resource),
+ .resource = s5p6442_iis1_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+/* PCM Controller platform_devices */
+
+static int s5p6442_pcm_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5P6442_GPC0(0), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC0(1), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC0(2), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC0(3), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC0(4), S3C_GPIO_SFN(3));
+ break;
+
+ case 1:
+ s3c_gpio_cfgpin(S5P6442_GPC1(0), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC1(1), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC1(2), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC1(3), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5P6442_GPC1(4), S3C_GPIO_SFN(3));
+ break;
+
+ default:
+ printk(KERN_DEBUG "Invalid PCM Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_pcm_pdata = {
+ .cfg_gpio = s5p6442_pcm_cfg_gpio,
+};
+
+static struct resource s5p6442_pcm0_resource[] = {
+ [0] = {
+ .start = S5P6442_PA_PCM0,
+ .end = S5P6442_PA_PCM0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM0_TX,
+ .end = DMACH_PCM0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM0_RX,
+ .end = DMACH_PCM0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5p6442_device_pcm0 = {
+ .name = "samsung-pcm",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5p6442_pcm0_resource),
+ .resource = s5p6442_pcm0_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
+
+static struct resource s5p6442_pcm1_resource[] = {
+ [0] = {
+ .start = S5P6442_PA_PCM1,
+ .end = S5P6442_PA_PCM1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM1_TX,
+ .end = DMACH_PCM1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM1_RX,
+ .end = DMACH_PCM1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5p6442_device_pcm1 = {
+ .name = "samsung-pcm",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5p6442_pcm1_resource),
+ .resource = s5p6442_pcm1_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
diff --git a/arch/arm/mach-s5p6442/dev-spi.c b/arch/arm/mach-s5p6442/dev-spi.c
new file mode 100644
index 000000000000..30199525daca
--- /dev/null
+++ b/arch/arm/mach-s5p6442/dev-spi.c
@@ -0,0 +1,123 @@
+/* linux/arch/arm/mach-s5p6442/dev-spi.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/dma.h>
+#include <mach/map.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+#include <mach/spi-clocks.h>
+
+#include <plat/s3c64xx-spi.h>
+#include <plat/gpio-cfg.h>
+
+static char *spi_src_clks[] = {
+ [S5P6442_SPI_SRCCLK_PCLK] = "pclk",
+ [S5P6442_SPI_SRCCLK_SCLK] = "spi_epll",
+};
+
+/* SPI Controller platform_devices */
+
+/* Since we emulate multi-cs capability, we do not touch the CS.
+ * The emulated CS is toggled by board specific mechanism, as it can
+ * be either some immediate GPIO or some signal out of some other
+ * chip in between ... or some yet another way.
+ * We simply do not assume anything about CS.
+ */
+static int s5p6442_spi_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5P6442_GPB(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPB(2), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5P6442_GPB(3), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5P6442_GPB(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5P6442_GPB(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5P6442_GPB(3), S3C_GPIO_PULL_UP);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Invalid SPI Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct resource s5p6442_spi0_resource[] = {
+ [0] = {
+ .start = S5P6442_PA_SPI,
+ .end = S5P6442_PA_SPI + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI0_TX,
+ .end = DMACH_SPI0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI0_RX,
+ .end = DMACH_SPI0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5p6442_spi0_pdata = {
+ .cfg_gpio = s5p6442_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x1ff,
+ .rx_lvl_offset = 15,
+};
+
+static u64 spi_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5p6442_device_spi = {
+ .name = "s3c64xx-spi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5p6442_spi0_resource),
+ .resource = s5p6442_spi0_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5p6442_spi0_pdata,
+ },
+};
+
+void __init s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
+{
+ struct s3c64xx_spi_info *pd;
+
+ /* Reject invalid configuration */
+ if (!num_cs || src_clk_nr < 0
+ || src_clk_nr > S5P6442_SPI_SRCCLK_SCLK) {
+ printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
+ return;
+ }
+
+ switch (cntrlr) {
+ case 0:
+ pd = &s5p6442_spi0_pdata;
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
+ __func__, cntrlr);
+ return;
+ }
+
+ pd->num_cs = num_cs;
+ pd->src_clk_nr = src_clk_nr;
+ pd->src_clk_name = spi_src_clks[src_clk_nr];
+}
diff --git a/arch/arm/mach-s5p6442/dma.c b/arch/arm/mach-s5p6442/dma.c
new file mode 100644
index 000000000000..ad4f8704b93d
--- /dev/null
+++ b/arch/arm/mach-s5p6442/dma.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/devs.h>
+#include <plat/irqs.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+static u64 dma_dmamask = DMA_BIT_MASK(32);
+
+static struct resource s5p6442_pdma_resource[] = {
+ [0] = {
+ .start = S5P6442_PA_PDMA,
+ .end = S5P6442_PA_PDMA + SZ_4K,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_PDMA,
+ .end = IRQ_PDMA,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_pl330_platdata s5p6442_pdma_pdata = {
+ .peri = {
+ [0] = DMACH_UART0_RX,
+ [1] = DMACH_UART0_TX,
+ [2] = DMACH_UART1_RX,
+ [3] = DMACH_UART1_TX,
+ [4] = DMACH_UART2_RX,
+ [5] = DMACH_UART2_TX,
+ [6] = DMACH_MAX,
+ [7] = DMACH_MAX,
+ [8] = DMACH_MAX,
+ [9] = DMACH_I2S0_RX,
+ [10] = DMACH_I2S0_TX,
+ [11] = DMACH_I2S0S_TX,
+ [12] = DMACH_I2S1_RX,
+ [13] = DMACH_I2S1_TX,
+ [14] = DMACH_MAX,
+ [15] = DMACH_MAX,
+ [16] = DMACH_SPI0_RX,
+ [17] = DMACH_SPI0_TX,
+ [18] = DMACH_MAX,
+ [19] = DMACH_MAX,
+ [20] = DMACH_PCM0_RX,
+ [21] = DMACH_PCM0_TX,
+ [22] = DMACH_PCM1_RX,
+ [23] = DMACH_PCM1_TX,
+ [24] = DMACH_MAX,
+ [25] = DMACH_MAX,
+ [26] = DMACH_MAX,
+ [27] = DMACH_MSM_REQ0,
+ [28] = DMACH_MSM_REQ1,
+ [29] = DMACH_MSM_REQ2,
+ [30] = DMACH_MSM_REQ3,
+ [31] = DMACH_MAX,
+ },
+};
+
+static struct platform_device s5p6442_device_pdma = {
+ .name = "s3c-pl330",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5p6442_pdma_resource),
+ .resource = s5p6442_pdma_resource,
+ .dev = {
+ .dma_mask = &dma_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5p6442_pdma_pdata,
+ },
+};
+
+static struct platform_device *s5p6442_dmacs[] __initdata = {
+ &s5p6442_device_pdma,
+};
+
+static int __init s5p6442_dma_init(void)
+{
+ platform_add_devices(s5p6442_dmacs, ARRAY_SIZE(s5p6442_dmacs));
+
+ return 0;
+}
+arch_initcall(s5p6442_dma_init);
diff --git a/arch/arm/mach-s5p6442/include/mach/dma.h b/arch/arm/mach-s5p6442/include/mach/dma.h
new file mode 100644
index 000000000000..81209eb1409b
--- /dev/null
+++ b/arch/arm/mach-s5p6442/include/mach/dma.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* This platform uses the common S3C DMA API driver for PL330 */
+#include <plat/s3c-dma-pl330.h>
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/irqs.h b/arch/arm/mach-s5p6442/include/mach/irqs.h
index da665809f6e4..02c23749c023 100644
--- a/arch/arm/mach-s5p6442/include/mach/irqs.h
+++ b/arch/arm/mach-s5p6442/include/mach/irqs.h
@@ -77,8 +77,9 @@
#define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1)
-#define IRQ_EINT(x) ((x) < 16 ? S5P_IRQ_VIC0(x) : \
- (S5P_IRQ_EINT_BASE + (x)-16))
+#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
+#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE)
+
/* Set the default NR_IRQS */
#define NR_IRQS (IRQ_EINT(31) + 1)
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
index 685277d792fb..32ca424ef7f9 100644
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ b/arch/arm/mach-s5p6442/include/mach/map.h
@@ -34,6 +34,9 @@
#define S5P6442_PA_VIC2 (0xE4200000)
#define S5P_PA_VIC2 S5P6442_PA_VIC2
+#define S5P6442_PA_MDMA 0xE8000000
+#define S5P6442_PA_PDMA 0xE9000000
+
#define S5P6442_PA_TIMER (0xEA000000)
#define S5P_PA_TIMER S5P6442_PA_TIMER
@@ -51,6 +54,16 @@
#define S5P6442_PA_SDRAM (0x20000000)
#define S5P_PA_SDRAM S5P6442_PA_SDRAM
+#define S5P6442_PA_SPI 0xEC300000
+
+/* I2S */
+#define S5P6442_PA_I2S0 0xC0B00000
+#define S5P6442_PA_I2S1 0xF2200000
+
+/* PCM */
+#define S5P6442_PA_PCM0 0xF2400000
+#define S5P6442_PA_PCM1 0xF2500000
+
/* compatibiltiy defines. */
#define S3C_PA_UART S5P6442_PA_UART
#define S3C_PA_IIC S5P6442_PA_IIC0
diff --git a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h b/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
index 15e8525da0f1..2724b37def31 100644
--- a/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
+++ b/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
@@ -1,13 +1,14 @@
/* linux/arch/arm/mach-s5p6442/include/mach/pwm-clock.h
*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
- * Copyright 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/plat-s3c24xx/include/mach/pwm-clock.h
+ * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
*
* S5P6442 - pwm clock and timer support
*
@@ -21,14 +22,14 @@
/**
* pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @cfg: The timer TCFG1 register bits shifted down to 0.
+ * @tcfg: The timer TCFG1 register bits shifted down to 0.
*
* Return true if the given configuration from TCFG1 is a TCLK instead
* any of the TDIV clocks.
*/
static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
{
- return tcfg == S3C2410_TCFG1_MUX_TCLK;
+ return tcfg == S3C64XX_TCFG1_MUX_TCLK;
}
/**
@@ -40,7 +41,7 @@ static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
*/
static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
{
- return 1 << (1 + tcfg1);
+ return 1 << tcfg1;
}
/**
@@ -50,7 +51,7 @@ static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
*/
static inline unsigned int pwm_tdiv_has_div1(void)
{
- return 0;
+ return 1;
}
/**
@@ -61,9 +62,9 @@ static inline unsigned int pwm_tdiv_has_div1(void)
*/
static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
{
- return ilog2(div) - 1;
+ return ilog2(div);
}
-#define S3C_TCFG1_MUX_TCLK S3C2410_TCFG1_MUX_TCLK
+#define S3C_TCFG1_MUX_TCLK S3C64XX_TCFG1_MUX_TCLK
#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/spi-clocks.h b/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
new file mode 100644
index 000000000000..7fd88205a97c
--- /dev/null
+++ b/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
@@ -0,0 +1,17 @@
+/* linux/arch/arm/mach-s5p6442/include/mach/spi-clocks.h
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __S5P6442_PLAT_SPI_CLKS_H
+#define __S5P6442_PLAT_SPI_CLKS_H __FILE__
+
+#define S5P6442_SPI_SRCCLK_PCLK 0
+#define S5P6442_SPI_SRCCLK_SCLK 1
+
+#endif /* __S5P6442_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5p6442/mach-smdk6442.c b/arch/arm/mach-s5p6442/mach-smdk6442.c
index 0d63371ce07c..ebcf99777259 100644
--- a/arch/arm/mach-s5p6442/mach-smdk6442.c
+++ b/arch/arm/mach-s5p6442/mach-smdk6442.c
@@ -65,6 +65,7 @@ static struct s3c2410_uartcfg smdk6442_uartcfgs[] __initdata = {
};
static struct platform_device *smdk6442_devices[] __initdata = {
+ &s5p6442_device_iis0,
};
static void __init smdk6442_map_io(void)
diff --git a/arch/arm/mach-s5p6442/setup-i2c0.c b/arch/arm/mach-s5p6442/setup-i2c0.c
new file mode 100644
index 000000000000..662695dd7761
--- /dev/null
+++ b/arch/arm/mach-s5p6442/setup-i2c0.c
@@ -0,0 +1,25 @@
+/* linux/arch/arm/mach-s5p6442/setup-i2c0.c
+ *
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * I2C0 GPIO configuration.
+ *
+ * Based on plat-s3c64xx/setup-i2c0.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct platform_device; /* don't need the contents */
+
+#include <plat/iic.h>
+
+void s3c_i2c0_cfg_gpio(struct platform_device *dev)
+{
+ /* Will be populated later */
+}
diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig
index 27ec167d2808..b2a11dfa3399 100644
--- a/arch/arm/mach-s5pc100/Kconfig
+++ b/arch/arm/mach-s5pc100/Kconfig
@@ -5,18 +5,36 @@
# Configuration options for the S5PC100 CPU
+if ARCH_S5PC100
+
config CPU_S5PC100
bool
- select CPU_S5PC100_INIT
- select CPU_S5PC100_CLOCK
+ select PLAT_S5P
+ select S5P_EXT_INT
+ select S3C_PL330_DMA
help
Enable S5PC100 CPU support
+config S5PC100_SETUP_FB_24BPP
+ bool
+ help
+ Common setup code for S5PC1XX with an 24bpp RGB display helper.
+
+config S5PC100_SETUP_I2C1
+ bool
+ help
+ Common setup code for i2c bus 1.
+
config S5PC100_SETUP_SDHCI
- bool
- select S5PC1XX_SETUP_SDHCI_GPIO
- help
- Internal helper functions for S5PC100 based SDHCI systems
+ bool
+ select S5PC100_SETUP_SDHCI_GPIO
+ help
+ Internal helper functions for S5PC100 based SDHCI systems
+
+config S5PC100_SETUP_SDHCI_GPIO
+ bool
+ help
+ Common setup code for SDHCI gpio.
config MACH_SMDKC100
bool "SMDKC100"
@@ -26,9 +44,10 @@ config MACH_SMDKC100
select S3C_DEV_HSMMC
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
- select S5PC1XX_SETUP_I2C0
- select S5PC1XX_SETUP_I2C1
- select S5PC1XX_SETUP_FB_24BPP
+ select S5PC100_SETUP_FB_24BPP
+ select S5PC100_SETUP_I2C1
select S5PC100_SETUP_SDHCI
help
Machine support for the Samsung SMDKC100
+
+endif
diff --git a/arch/arm/mach-s5pc100/Makefile b/arch/arm/mach-s5pc100/Makefile
index 809ff10f768f..543f3de5131e 100644
--- a/arch/arm/mach-s5pc100/Makefile
+++ b/arch/arm/mach-s5pc100/Makefile
@@ -11,11 +11,24 @@ obj- :=
# Core support for S5PC100 system
-obj-$(CONFIG_CPU_S5PC100) += cpu.o
+obj-$(CONFIG_CPU_S5PC100) += cpu.o init.o clock.o gpiolib.o irq-gpio.o
+obj-$(CONFIG_CPU_S5PC100) += setup-i2c0.o
+obj-$(CONFIG_CPU_S5PC100) += dma.o
# Helper and device support
-obj-$(CONFIG_S5PC100_SETUP_SDHCI) += setup-sdhci.o
+obj-$(CONFIG_S5PC100_SETUP_FB_24BPP) += setup-fb-24bpp.o
+obj-$(CONFIG_S5PC100_SETUP_I2C1) += setup-i2c1.o
+obj-$(CONFIG_S5PC100_SETUP_SDHCI) += setup-sdhci.o
+obj-$(CONFIG_S5PC100_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+
+# device support
+obj-y += dev-audio.o
+obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
# machine support
+
obj-$(CONFIG_MACH_SMDKC100) += mach-smdkc100.o
+
+# device support
+obj-y += dev-audio.o
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
new file mode 100644
index 000000000000..e3fed4cfe7ad
--- /dev/null
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -0,0 +1,1358 @@
+/* linux/arch/arm/mach-s5pc100/clock.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PC100 - Clock support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <mach/map.h>
+
+#include <plat/cpu-freq.h>
+#include <mach/regs-clock.h>
+#include <plat/clock.h>
+#include <plat/cpu.h>
+#include <plat/pll.h>
+#include <plat/s5p-clock.h>
+#include <plat/clock-clksrc.h>
+#include <plat/s5pc100.h>
+
+static struct clk s5p_clk_otgphy = {
+ .name = "otg_phy",
+ .id = -1,
+};
+
+static struct clk *clk_src_mout_href_list[] = {
+ [0] = &s5p_clk_27m,
+ [1] = &clk_fin_hpll,
+};
+
+static struct clksrc_sources clk_src_mout_href = {
+ .sources = clk_src_mout_href_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mout_href_list),
+};
+
+static struct clksrc_clk clk_mout_href = {
+ .clk = {
+ .name = "mout_href",
+ .id = -1,
+ },
+ .sources = &clk_src_mout_href,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 },
+};
+
+static struct clk *clk_src_mout_48m_list[] = {
+ [0] = &clk_xusbxti,
+ [1] = &s5p_clk_otgphy,
+};
+
+static struct clksrc_sources clk_src_mout_48m = {
+ .sources = clk_src_mout_48m_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mout_48m_list),
+};
+
+static struct clksrc_clk clk_mout_48m = {
+ .clk = {
+ .name = "mout_48m",
+ .id = -1,
+ },
+ .sources = &clk_src_mout_48m,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 1 },
+};
+
+static struct clksrc_clk clk_mout_mpll = {
+ .clk = {
+ .name = "mout_mpll",
+ .id = -1,
+ },
+ .sources = &clk_src_mpll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
+};
+
+
+static struct clksrc_clk clk_mout_apll = {
+ .clk = {
+ .name = "mout_apll",
+ .id = -1,
+ },
+ .sources = &clk_src_apll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
+};
+
+static struct clksrc_clk clk_mout_epll = {
+ .clk = {
+ .name = "mout_epll",
+ .id = -1,
+ },
+ .sources = &clk_src_epll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
+};
+
+static struct clk *clk_src_mout_hpll_list[] = {
+ [0] = &s5p_clk_27m,
+};
+
+static struct clksrc_sources clk_src_mout_hpll = {
+ .sources = clk_src_mout_hpll_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mout_hpll_list),
+};
+
+static struct clksrc_clk clk_mout_hpll = {
+ .clk = {
+ .name = "mout_hpll",
+ .id = -1,
+ },
+ .sources = &clk_src_mout_hpll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 },
+};
+
+static struct clksrc_clk clk_div_apll = {
+ .clk = {
+ .name = "div_apll",
+ .id = -1,
+ .parent = &clk_mout_apll.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 1 },
+};
+
+static struct clksrc_clk clk_div_arm = {
+ .clk = {
+ .name = "div_arm",
+ .id = -1,
+ .parent = &clk_div_apll.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
+};
+
+static struct clksrc_clk clk_div_d0_bus = {
+ .clk = {
+ .name = "div_d0_bus",
+ .id = -1,
+ .parent = &clk_div_arm.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 },
+};
+
+static struct clksrc_clk clk_div_pclkd0 = {
+ .clk = {
+ .name = "div_pclkd0",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 },
+};
+
+static struct clksrc_clk clk_div_secss = {
+ .clk = {
+ .name = "div_secss",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 3 },
+};
+
+static struct clksrc_clk clk_div_apll2 = {
+ .clk = {
+ .name = "div_apll2",
+ .id = -1,
+ .parent = &clk_mout_apll.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 3 },
+};
+
+static struct clk *clk_src_mout_am_list[] = {
+ [0] = &clk_mout_mpll.clk,
+ [1] = &clk_div_apll2.clk,
+};
+
+struct clksrc_sources clk_src_mout_am = {
+ .sources = clk_src_mout_am_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mout_am_list),
+};
+
+static struct clksrc_clk clk_mout_am = {
+ .clk = {
+ .name = "mout_am",
+ .id = -1,
+ },
+ .sources = &clk_src_mout_am,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 },
+};
+
+static struct clksrc_clk clk_div_d1_bus = {
+ .clk = {
+ .name = "div_d1_bus",
+ .id = -1,
+ .parent = &clk_mout_am.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 3 },
+};
+
+static struct clksrc_clk clk_div_mpll2 = {
+ .clk = {
+ .name = "div_mpll2",
+ .id = -1,
+ .parent = &clk_mout_am.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 8, .size = 1 },
+};
+
+static struct clksrc_clk clk_div_mpll = {
+ .clk = {
+ .name = "div_mpll",
+ .id = -1,
+ .parent = &clk_mout_am.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 4, .size = 2 },
+};
+
+static struct clk *clk_src_mout_onenand_list[] = {
+ [0] = &clk_div_d0_bus.clk,
+ [1] = &clk_div_d1_bus.clk,
+};
+
+struct clksrc_sources clk_src_mout_onenand = {
+ .sources = clk_src_mout_onenand_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mout_onenand_list),
+};
+
+static struct clksrc_clk clk_mout_onenand = {
+ .clk = {
+ .name = "mout_onenand",
+ .id = -1,
+ },
+ .sources = &clk_src_mout_onenand,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 },
+};
+
+static struct clksrc_clk clk_div_onenand = {
+ .clk = {
+ .name = "div_onenand",
+ .id = -1,
+ .parent = &clk_mout_onenand.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 2 },
+};
+
+static struct clksrc_clk clk_div_pclkd1 = {
+ .clk = {
+ .name = "div_pclkd1",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 16, .size = 3 },
+};
+
+static struct clksrc_clk clk_div_cam = {
+ .clk = {
+ .name = "div_cam",
+ .id = -1,
+ .parent = &clk_div_mpll2.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 24, .size = 5 },
+};
+
+static struct clksrc_clk clk_div_hdmi = {
+ .clk = {
+ .name = "div_hdmi",
+ .id = -1,
+ .parent = &clk_mout_hpll.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 28, .size = 4 },
+};
+
+static int s5pc100_epll_enable(struct clk *clk, int enable)
+{
+ unsigned int ctrlbit = clk->ctrlbit;
+ unsigned int epll_con = __raw_readl(S5P_EPLL_CON) & ~ctrlbit;
+
+ if (enable)
+ __raw_writel(epll_con | ctrlbit, S5P_EPLL_CON);
+ else
+ __raw_writel(epll_con, S5P_EPLL_CON);
+
+ return 0;
+}
+
+static unsigned long s5pc100_epll_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+
+static u32 epll_div[][4] = {
+ { 32750000, 131, 3, 4 },
+ { 32768000, 131, 3, 4 },
+ { 36000000, 72, 3, 3 },
+ { 45000000, 90, 3, 3 },
+ { 45158000, 90, 3, 3 },
+ { 45158400, 90, 3, 3 },
+ { 48000000, 96, 3, 3 },
+ { 49125000, 131, 4, 3 },
+ { 49152000, 131, 4, 3 },
+ { 60000000, 120, 3, 3 },
+ { 67737600, 226, 5, 3 },
+ { 67738000, 226, 5, 3 },
+ { 73800000, 246, 5, 3 },
+ { 73728000, 246, 5, 3 },
+ { 72000000, 144, 3, 3 },
+ { 84000000, 168, 3, 3 },
+ { 96000000, 96, 3, 2 },
+ { 144000000, 144, 3, 2 },
+ { 192000000, 96, 3, 1 }
+};
+
+static int s5pc100_epll_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned int epll_con;
+ unsigned int i;
+
+ if (clk->rate == rate) /* Return if nothing changed */
+ return 0;
+
+ epll_con = __raw_readl(S5P_EPLL_CON);
+
+ epll_con &= ~(PLL65XX_MDIV_MASK | PLL65XX_PDIV_MASK | PLL65XX_SDIV_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(epll_div); i++) {
+ if (epll_div[i][0] == rate) {
+ epll_con |= (epll_div[i][1] << PLL65XX_MDIV_SHIFT) |
+ (epll_div[i][2] << PLL65XX_PDIV_SHIFT) |
+ (epll_div[i][3] << PLL65XX_SDIV_SHIFT);
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(epll_div)) {
+ printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n", __func__);
+ return -EINVAL;
+ }
+
+ __raw_writel(epll_con, S5P_EPLL_CON);
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+static struct clk_ops s5pc100_epll_ops = {
+ .get_rate = s5pc100_epll_get_rate,
+ .set_rate = s5pc100_epll_set_rate,
+};
+
+static int s5pc100_d0_0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D00, clk, enable);
+}
+
+static int s5pc100_d0_1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D01, clk, enable);
+}
+
+static int s5pc100_d0_2_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D02, clk, enable);
+}
+
+static int s5pc100_d1_0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D10, clk, enable);
+}
+
+static int s5pc100_d1_1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D11, clk, enable);
+}
+
+static int s5pc100_d1_2_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D12, clk, enable);
+}
+
+static int s5pc100_d1_3_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D13, clk, enable);
+}
+
+static int s5pc100_d1_4_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D14, clk, enable);
+}
+
+static int s5pc100_d1_5_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_D15, clk, enable);
+}
+
+static int s5pc100_sclk0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_SCLK0, clk, enable);
+}
+
+static int s5pc100_sclk1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_SCLK1, clk, enable);
+}
+
+/*
+ * The following clocks will be disabled during clock initialization. It is
+ * recommended to keep the following clocks disabled until the driver requests
+ * for enabling the clock.
+ */
+static struct clk init_clocks_disable[] = {
+ {
+ .name = "cssys",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_0_ctrl,
+ .ctrlbit = (1 << 6),
+ }, {
+ .name = "secss",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_0_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "g2d",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_0_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "mdma",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_0_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "cfcon",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_0_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "nfcon",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_1_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "onenandc",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_1_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "sdm",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_2_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "seckey",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_2_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "hsmmc",
+ .id = 2,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "hsmmc",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 6),
+ }, {
+ .name = "hsmmc",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "modemif",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "otg",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "usbhost",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "pdma",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "pdma",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "lcd",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "rotator",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "fimc",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "fimc",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "fimc",
+ .id = 2,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "jpeg",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "mipi-dsim",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 6),
+ }, {
+ .name = "mipi-csis",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_1_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "g3d",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_0_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "tv",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_2_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "vp",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_2_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "mixer",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_2_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "hdmi",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_2_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "mfc",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_2_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "apc",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_3_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "iec",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_3_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "systimer",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_3_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "watchdog",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_3_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "rtc",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_3_ctrl,
+ .ctrlbit = (1 << 9),
+ }, {
+ .name = "i2c",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "i2c",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "spi",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 6),
+ }, {
+ .name = "spi",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "spi",
+ .id = 2,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "irda",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 9),
+ }, {
+ .name = "ccan",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 10),
+ }, {
+ .name = "ccan",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 11),
+ }, {
+ .name = "hsitx",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
+ .name = "hsirx",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 13),
+ }, {
+ .name = "iis",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "iis",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "iis",
+ .id = 2,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "ac97",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "pcm",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "pcm",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "spdif",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 6),
+ }, {
+ .name = "adc",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "keyif",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_5_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "spi_48m",
+ .id = 0,
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = "spi_48m",
+ .id = 1,
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = "spi_48m",
+ .id = 2,
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 9),
+ }, {
+ .name = "mmc_48m",
+ .id = 0,
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 15),
+ }, {
+ .name = "mmc_48m",
+ .id = 1,
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 16),
+ }, {
+ .name = "mmc_48m",
+ .id = 2,
+ .parent = &clk_mout_48m.clk,
+ .enable = s5pc100_sclk0_ctrl,
+ .ctrlbit = (1 << 17),
+ },
+};
+
+static struct clk clk_vclk54m = {
+ .name = "vclk_54m",
+ .id = -1,
+ .rate = 54000000,
+};
+
+static struct clk clk_i2scdclk0 = {
+ .name = "i2s_cdclk0",
+ .id = -1,
+};
+
+static struct clk clk_i2scdclk1 = {
+ .name = "i2s_cdclk1",
+ .id = -1,
+};
+
+static struct clk clk_i2scdclk2 = {
+ .name = "i2s_cdclk2",
+ .id = -1,
+};
+
+static struct clk clk_pcmcdclk0 = {
+ .name = "pcm_cdclk0",
+ .id = -1,
+};
+
+static struct clk clk_pcmcdclk1 = {
+ .name = "pcm_cdclk1",
+ .id = -1,
+};
+
+static struct clk *clk_src_group1_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll2.clk,
+ [2] = &clk_fin_epll,
+ [3] = &clk_mout_hpll.clk,
+};
+
+struct clksrc_sources clk_src_group1 = {
+ .sources = clk_src_group1_list,
+ .nr_sources = ARRAY_SIZE(clk_src_group1_list),
+};
+
+static struct clk *clk_src_group2_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+};
+
+struct clksrc_sources clk_src_group2 = {
+ .sources = clk_src_group2_list,
+ .nr_sources = ARRAY_SIZE(clk_src_group2_list),
+};
+
+static struct clk *clk_src_group3_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+ [2] = &clk_fin_epll,
+ [3] = &clk_i2scdclk0,
+ [4] = &clk_pcmcdclk0,
+ [5] = &clk_mout_hpll.clk,
+};
+
+struct clksrc_sources clk_src_group3 = {
+ .sources = clk_src_group3_list,
+ .nr_sources = ARRAY_SIZE(clk_src_group3_list),
+};
+
+static struct clk *clk_src_group4_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+ [2] = &clk_fin_epll,
+ [3] = &clk_i2scdclk1,
+ [4] = &clk_pcmcdclk1,
+ [5] = &clk_mout_hpll.clk,
+};
+
+struct clksrc_sources clk_src_group4 = {
+ .sources = clk_src_group4_list,
+ .nr_sources = ARRAY_SIZE(clk_src_group4_list),
+};
+
+static struct clk *clk_src_group5_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+ [2] = &clk_fin_epll,
+ [3] = &clk_i2scdclk2,
+ [4] = &clk_mout_hpll.clk,
+};
+
+struct clksrc_sources clk_src_group5 = {
+ .sources = clk_src_group5_list,
+ .nr_sources = ARRAY_SIZE(clk_src_group5_list),
+};
+
+static struct clk *clk_src_group6_list[] = {
+ [0] = &s5p_clk_27m,
+ [1] = &clk_vclk54m,
+ [2] = &clk_div_hdmi.clk,
+};
+
+struct clksrc_sources clk_src_group6 = {
+ .sources = clk_src_group6_list,
+ .nr_sources = ARRAY_SIZE(clk_src_group6_list),
+};
+
+static struct clk *clk_src_group7_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+ [2] = &clk_mout_hpll.clk,
+ [3] = &clk_vclk54m,
+};
+
+struct clksrc_sources clk_src_group7 = {
+ .sources = clk_src_group7_list,
+ .nr_sources = ARRAY_SIZE(clk_src_group7_list),
+};
+
+static struct clk *clk_src_mmc0_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+ [2] = &clk_fin_epll,
+};
+
+struct clksrc_sources clk_src_mmc0 = {
+ .sources = clk_src_mmc0_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mmc0_list),
+};
+
+static struct clk *clk_src_mmc12_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+ [2] = &clk_fin_epll,
+ [3] = &clk_mout_hpll.clk,
+};
+
+struct clksrc_sources clk_src_mmc12 = {
+ .sources = clk_src_mmc12_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mmc12_list),
+};
+
+static struct clk *clk_src_irda_usb_list[] = {
+ [0] = &clk_mout_epll.clk,
+ [1] = &clk_div_mpll.clk,
+ [2] = &clk_fin_epll,
+ [3] = &clk_mout_hpll.clk,
+};
+
+struct clksrc_sources clk_src_irda_usb = {
+ .sources = clk_src_irda_usb_list,
+ .nr_sources = ARRAY_SIZE(clk_src_irda_usb_list),
+};
+
+static struct clk *clk_src_pwi_list[] = {
+ [0] = &clk_fin_epll,
+ [1] = &clk_mout_epll.clk,
+ [2] = &clk_div_mpll.clk,
+};
+
+struct clksrc_sources clk_src_pwi = {
+ .sources = clk_src_pwi_list,
+ .nr_sources = ARRAY_SIZE(clk_src_pwi_list),
+};
+
+static struct clksrc_clk clksrcs[] = {
+ {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 0,
+ .ctrlbit = (1 << 4),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 1,
+ .ctrlbit = (1 << 5),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 2,
+ .ctrlbit = (1 << 6),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_group1,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "uclk1",
+ .id = -1,
+ .ctrlbit = (1 << 3),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_group2,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mixer",
+ .id = -1,
+ .ctrlbit = (1 << 6),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_group6,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 28, .size = 2 },
+ }, {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 0,
+ .ctrlbit = (1 << 8),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_group3,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 3 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 1,
+ .ctrlbit = (1 << 9),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_group4,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 3 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 2,
+ .ctrlbit = (1 << 10),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_group5,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 3 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_lcd",
+ .id = -1,
+ .ctrlbit = (1 << 0),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_group7,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 12, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 0,
+ .ctrlbit = (1 << 1),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_group7,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 16, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 16, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 1,
+ .ctrlbit = (1 << 2),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_group7,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 20, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 2,
+ .ctrlbit = (1 << 3),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_group7,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 24, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 24, .size = 4 },
+ }, {
+ .clk = {
+ .name = "mmc_bus",
+ .id = 0,
+ .ctrlbit = (1 << 12),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_mmc0,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "mmc_bus",
+ .id = 1,
+ .ctrlbit = (1 << 13),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_mmc12,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 4, .size = 4 },
+ }, {
+ .clk = {
+ .name = "mmc_bus",
+ .id = 2,
+ .ctrlbit = (1 << 14),
+ .enable = s5pc100_sclk1_ctrl,
+
+ },
+ .sources = &clk_src_mmc12,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 8, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_irda",
+ .id = 2,
+ .ctrlbit = (1 << 10),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_irda_usb,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 8, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_irda",
+ .id = -1,
+ .ctrlbit = (1 << 10),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_mmc12,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 16, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 16, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_pwi",
+ .id = -1,
+ .ctrlbit = (1 << 1),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_pwi,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 0, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 3 },
+ }, {
+ .clk = {
+ .name = "sclk_uhost",
+ .id = -1,
+ .ctrlbit = (1 << 11),
+ .enable = s5pc100_sclk0_ctrl,
+
+ },
+ .sources = &clk_src_irda_usb,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 20, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 20, .size = 4 },
+ },
+};
+
+/* Clock initialisation code */
+static struct clksrc_clk *sysclks[] = {
+ &clk_mout_apll,
+ &clk_mout_epll,
+ &clk_mout_mpll,
+ &clk_mout_hpll,
+ &clk_mout_href,
+ &clk_mout_48m,
+ &clk_div_apll,
+ &clk_div_arm,
+ &clk_div_d0_bus,
+ &clk_div_pclkd0,
+ &clk_div_secss,
+ &clk_div_apll2,
+ &clk_mout_am,
+ &clk_div_d1_bus,
+ &clk_div_mpll2,
+ &clk_div_mpll,
+ &clk_mout_onenand,
+ &clk_div_onenand,
+ &clk_div_pclkd1,
+ &clk_div_cam,
+ &clk_div_hdmi,
+};
+
+void __init_or_cpufreq s5pc100_setup_clocks(void)
+{
+ unsigned long xtal;
+ unsigned long arm;
+ unsigned long hclkd0;
+ unsigned long hclkd1;
+ unsigned long pclkd0;
+ unsigned long pclkd1;
+ unsigned long apll;
+ unsigned long mpll;
+ unsigned long epll;
+ unsigned long hpll;
+ unsigned int ptr;
+
+ /* Set S5PC100 functions for clk_fout_epll */
+ clk_fout_epll.enable = s5pc100_epll_enable;
+ clk_fout_epll.ops = &s5pc100_epll_ops;
+
+ printk(KERN_DEBUG "%s: registering clocks\n", __func__);
+
+ xtal = clk_get_rate(&clk_xtal);
+
+ printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
+
+ apll = s5p_get_pll65xx(xtal, __raw_readl(S5P_APLL_CON));
+ mpll = s5p_get_pll65xx(xtal, __raw_readl(S5P_MPLL_CON));
+ epll = s5p_get_pll65xx(xtal, __raw_readl(S5P_EPLL_CON));
+ hpll = s5p_get_pll65xx(xtal, __raw_readl(S5P_HPLL_CON));
+
+ printk(KERN_INFO "S5PC100: PLL settings, A=%ld.%ldMHz, M=%ld.%ldMHz, E=%ld.%ldMHz, H=%ld.%ldMHz\n",
+ print_mhz(apll), print_mhz(mpll), print_mhz(epll), print_mhz(hpll));
+
+ clk_fout_apll.rate = apll;
+ clk_fout_mpll.rate = mpll;
+ clk_fout_epll.rate = epll;
+ clk_mout_hpll.clk.rate = hpll;
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
+ s3c_set_clksrc(&clksrcs[ptr], true);
+
+ arm = clk_get_rate(&clk_div_arm.clk);
+ hclkd0 = clk_get_rate(&clk_div_d0_bus.clk);
+ pclkd0 = clk_get_rate(&clk_div_pclkd0.clk);
+ hclkd1 = clk_get_rate(&clk_div_d1_bus.clk);
+ pclkd1 = clk_get_rate(&clk_div_pclkd1.clk);
+
+ printk(KERN_INFO "S5PC100: HCLKD0=%ld.%ldMHz, HCLKD1=%ld.%ldMHz, PCLKD0=%ld.%ldMHz, PCLKD1=%ld.%ldMHz\n",
+ print_mhz(hclkd0), print_mhz(hclkd1), print_mhz(pclkd0), print_mhz(pclkd1));
+
+ clk_f.rate = arm;
+ clk_h.rate = hclkd1;
+ clk_p.rate = pclkd1;
+}
+
+/*
+ * The following clocks will be enabled during clock initialization.
+ */
+static struct clk init_clocks[] = {
+ {
+ .name = "tzic",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_0_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "intc",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_0_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "ebi",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_1_ctrl,
+ .ctrlbit = (1 << 5),
+ }, {
+ .name = "intmem",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_1_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "sromc",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_1_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "dmc",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_1_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "chipid",
+ .id = -1,
+ .parent = &clk_div_d0_bus.clk,
+ .enable = s5pc100_d0_1_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "gpio",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_3_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "uart",
+ .id = 0,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 0),
+ }, {
+ .name = "uart",
+ .id = 1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = "uart",
+ .id = 2,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = "uart",
+ .id = 3,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_4_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = "timers",
+ .id = -1,
+ .parent = &clk_div_d1_bus.clk,
+ .enable = s5pc100_d1_3_ctrl,
+ .ctrlbit = (1 << 6),
+ },
+};
+
+static struct clk *clks[] __initdata = {
+ &clk_ext,
+ &clk_i2scdclk0,
+ &clk_i2scdclk1,
+ &clk_i2scdclk2,
+ &clk_pcmcdclk0,
+ &clk_pcmcdclk1,
+};
+
+void __init s5pc100_register_clocks(void)
+{
+ struct clk *clkp;
+ int ret;
+ int ptr;
+
+ s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
+
+ for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
+ s3c_register_clksrc(sysclks[ptr], 1);
+
+ s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
+ s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+
+ clkp = init_clocks_disable;
+ for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
+
+ ret = s3c24xx_register_clock(clkp);
+ if (ret < 0) {
+ printk(KERN_ERR "Failed to register clock %s (%d)\n",
+ clkp->name, ret);
+ }
+ (clkp->enable)(clkp, 0);
+ }
+
+ s3c_pwmclk_init();
+}
diff --git a/arch/arm/mach-s5pc100/cpu.c b/arch/arm/mach-s5pc100/cpu.c
index d79e7574a852..7b5bdbc9a5df 100644
--- a/arch/arm/mach-s5pc100/cpu.c
+++ b/arch/arm/mach-s5pc100/cpu.c
@@ -22,47 +22,55 @@
#include <linux/serial_core.h>
#include <linux/platform_device.h>
-#include <asm/proc-fns.h>
-
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
+#include <asm/proc-fns.h>
+
#include <mach/hardware.h>
#include <mach/map.h>
#include <asm/irq.h>
-#include <plat/cpu-freq.h>
#include <plat/regs-serial.h>
-#include <plat/regs-power.h>
+#include <mach/regs-clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/clock.h>
-#include <plat/sdhci.h>
#include <plat/iic-core.h>
+#include <plat/sdhci.h>
+#include <plat/onenand-core.h>
+
#include <plat/s5pc100.h>
/* Initial IO mappings */
static struct map_desc s5pc100_iodesc[] __initdata = {
+ {
+ .virtual = (unsigned long)S5P_VA_SYSTIMER,
+ .pfn = __phys_to_pfn(S5PC100_PA_SYSTIMER),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)VA_VIC2,
+ .pfn = __phys_to_pfn(S5P_PA_VIC2),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = (unsigned long)S5PC100_VA_OTHERS,
+ .pfn = __phys_to_pfn(S5PC100_PA_OTHERS),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }
};
static void s5pc100_idle(void)
{
- unsigned long tmp;
-
- tmp = __raw_readl(S5PC100_PWR_CFG);
- tmp &= ~S5PC100_PWRCFG_CFG_DEEP_IDLE;
- tmp &= ~S5PC100_PWRCFG_CFG_WFI_MASK;
- tmp |= S5PC100_PWRCFG_CFG_WFI_DEEP_IDLE;
- __raw_writel(tmp, S5PC100_PWR_CFG);
-
- tmp = __raw_readl(S5PC100_OTHERS);
- tmp |= S5PC100_PMU_INT_DISABLE;
- __raw_writel(tmp, S5PC100_OTHERS);
+ if (!need_resched())
+ cpu_do_idle();
- cpu_do_idle();
+ local_irq_enable();
}
/* s5pc100_map_io
@@ -82,26 +90,29 @@ void __init s5pc100_map_io(void)
/* the i2c devices are directly compatible with s3c2440 */
s3c_i2c0_setname("s3c2440-i2c");
s3c_i2c1_setname("s3c2440-i2c");
+
+ s3c_onenand_setname("s5pc100-onenand");
}
void __init s5pc100_init_clocks(int xtal)
{
- printk(KERN_DEBUG "%s: initialising clocks\n", __func__);
+ printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
+
s3c24xx_register_baseclocks(xtal);
- s5pc1xx_register_clocks();
+ s5p_register_clocks(xtal);
s5pc100_register_clocks();
s5pc100_setup_clocks();
}
void __init s5pc100_init_irq(void)
{
- u32 vic_valid[] = {~0, ~0, ~0};
+ u32 vic[] = {~0, ~0, ~0};
/* VIC0, VIC1, and VIC2 are fully populated. */
- s5pc1xx_init_irq(vic_valid, ARRAY_SIZE(vic_valid));
+ s5p_init_irq(vic, ARRAY_SIZE(vic));
}
-struct sysdev_class s5pc100_sysclass = {
+static struct sysdev_class s5pc100_sysclass = {
.name = "s5pc100-core",
};
@@ -118,9 +129,10 @@ core_initcall(s5pc100_core_init);
int __init s5pc100_init(void)
{
- printk(KERN_DEBUG "S5PC100: Initialising architecture\n");
+ printk(KERN_INFO "S5PC100: Initializing architecture\n");
- s5pc1xx_idle = s5pc100_idle;
+ /* set idle function */
+ pm_idle = s5pc100_idle;
return sysdev_register(&s5pc100_sysdev);
}
diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c
new file mode 100644
index 000000000000..18cfe9ae1936
--- /dev/null
+++ b/arch/arm/mach-s5pc100/dev-audio.c
@@ -0,0 +1,287 @@
+/* linux/arch/arm/mach-s5pc100/dev-audio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/audio.h>
+
+#include <mach/gpio.h>
+#include <mach/map.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+
+static int s5pc100_cfg_i2s(struct platform_device *pdev)
+{
+ /* configure GPIO for i2s port */
+ switch (pdev->id) {
+ case 1:
+ s3c_gpio_cfgpin(S5PC100_GPC(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPC(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPC(2), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPC(3), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPC(4), S3C_GPIO_SFN(2));
+ break;
+
+ case 2:
+ s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPG3(1), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPG3(2), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPG3(3), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPG3(4), S3C_GPIO_SFN(4));
+ break;
+
+ case -1: /* Dedicated pins */
+ break;
+
+ default:
+ printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_i2s_pdata = {
+ .cfg_gpio = s5pc100_cfg_i2s,
+};
+
+static struct resource s5pc100_iis0_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_I2S0,
+ .end = S5PC100_PA_I2S0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S0_TX,
+ .end = DMACH_I2S0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S0_RX,
+ .end = DMACH_I2S0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pc100_device_iis0 = {
+ .name = "s3c64xx-iis-v4",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5pc100_iis0_resource),
+ .resource = s5pc100_iis0_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+static struct resource s5pc100_iis1_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_I2S1,
+ .end = S5PC100_PA_I2S1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S1_TX,
+ .end = DMACH_I2S1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S1_RX,
+ .end = DMACH_I2S1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pc100_device_iis1 = {
+ .name = "s3c64xx-iis",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pc100_iis1_resource),
+ .resource = s5pc100_iis1_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+static struct resource s5pc100_iis2_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_I2S2,
+ .end = S5PC100_PA_I2S2 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S2_TX,
+ .end = DMACH_I2S2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S2_RX,
+ .end = DMACH_I2S2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pc100_device_iis2 = {
+ .name = "s3c64xx-iis",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s5pc100_iis2_resource),
+ .resource = s5pc100_iis2_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+/* PCM Controller platform_devices */
+
+static int s5pc100_pcm_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5PC100_GPG3(1), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5PC100_GPG3(2), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5PC100_GPG3(3), S3C_GPIO_SFN(5));
+ s3c_gpio_cfgpin(S5PC100_GPG3(4), S3C_GPIO_SFN(5));
+ break;
+
+ case 1:
+ s3c_gpio_cfgpin(S5PC100_GPC(0), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PC100_GPC(1), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PC100_GPC(2), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PC100_GPC(3), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PC100_GPC(4), S3C_GPIO_SFN(3));
+ break;
+
+ default:
+ printk(KERN_DEBUG "Invalid PCM Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_pcm_pdata = {
+ .cfg_gpio = s5pc100_pcm_cfg_gpio,
+};
+
+static struct resource s5pc100_pcm0_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_PCM0,
+ .end = S5PC100_PA_PCM0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM0_TX,
+ .end = DMACH_PCM0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM0_RX,
+ .end = DMACH_PCM0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pc100_device_pcm0 = {
+ .name = "samsung-pcm",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5pc100_pcm0_resource),
+ .resource = s5pc100_pcm0_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
+
+static struct resource s5pc100_pcm1_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_PCM1,
+ .end = S5PC100_PA_PCM1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM1_TX,
+ .end = DMACH_PCM1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM1_RX,
+ .end = DMACH_PCM1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pc100_device_pcm1 = {
+ .name = "samsung-pcm",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pc100_pcm1_resource),
+ .resource = s5pc100_pcm1_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
+
+/* AC97 Controller platform devices */
+
+static int s5pc100_ac97_cfg_gpio(struct platform_device *pdev)
+{
+ s3c_gpio_cfgpin(S5PC100_GPC(0), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPC(1), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPC(2), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPC(3), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PC100_GPC(4), S3C_GPIO_SFN(4));
+
+ return 0;
+}
+
+static struct resource s5pc100_ac97_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_AC97,
+ .end = S5PC100_PA_AC97 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_AC97_PCMOUT,
+ .end = DMACH_AC97_PCMOUT,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_AC97_PCMIN,
+ .end = DMACH_AC97_PCMIN,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = DMACH_AC97_MICIN,
+ .end = DMACH_AC97_MICIN,
+ .flags = IORESOURCE_DMA,
+ },
+ [4] = {
+ .start = IRQ_AC97,
+ .end = IRQ_AC97,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_audio_pdata s3c_ac97_pdata = {
+ .cfg_gpio = s5pc100_ac97_cfg_gpio,
+};
+
+static u64 s5pc100_ac97_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pc100_device_ac97 = {
+ .name = "s3c-ac97",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5pc100_ac97_resource),
+ .resource = s5pc100_ac97_resource,
+ .dev = {
+ .platform_data = &s3c_ac97_pdata,
+ .dma_mask = &s5pc100_ac97_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c
new file mode 100644
index 000000000000..14618c346057
--- /dev/null
+++ b/arch/arm/mach-s5pc100/dev-spi.c
@@ -0,0 +1,233 @@
+/* linux/arch/arm/mach-s5pc100/dev-spi.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/dma.h>
+#include <mach/map.h>
+#include <mach/gpio.h>
+#include <mach/spi-clocks.h>
+
+#include <plat/s3c64xx-spi.h>
+#include <plat/gpio-cfg.h>
+#include <plat/irqs.h>
+
+static char *spi_src_clks[] = {
+ [S5PC100_SPI_SRCCLK_PCLK] = "pclk",
+ [S5PC100_SPI_SRCCLK_48M] = "spi_48m",
+ [S5PC100_SPI_SRCCLK_SPIBUS] = "spi_bus",
+};
+
+/* SPI Controller platform_devices */
+
+/* Since we emulate multi-cs capability, we do not touch the CS.
+ * The emulated CS is toggled by board specific mechanism, as it can
+ * be either some immediate GPIO or some signal out of some other
+ * chip in between ... or some yet another way.
+ * We simply do not assume anything about CS.
+ */
+static int s5pc100_spi_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5PC100_GPB(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPB(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPB(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PC100_GPB(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PC100_GPB(1), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PC100_GPB(2), S3C_GPIO_PULL_UP);
+ break;
+
+ case 1:
+ s3c_gpio_cfgpin(S5PC100_GPB(4), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPB(5), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PC100_GPB(6), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PC100_GPB(4), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PC100_GPB(5), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PC100_GPB(6), S3C_GPIO_PULL_UP);
+ break;
+
+ case 2:
+ s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PC100_GPG3(2), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PC100_GPG3(3), S3C_GPIO_SFN(3));
+ s3c_gpio_setpull(S5PC100_GPG3(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PC100_GPG3(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PC100_GPG3(3), S3C_GPIO_PULL_UP);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Invalid SPI Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct resource s5pc100_spi0_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_SPI0,
+ .end = S5PC100_PA_SPI0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI0_TX,
+ .end = DMACH_SPI0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI0_RX,
+ .end = DMACH_SPI0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5pc100_spi0_pdata = {
+ .cfg_gpio = s5pc100_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .high_speed = 1,
+};
+
+static u64 spi_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pc100_device_spi0 = {
+ .name = "s3c64xx-spi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5pc100_spi0_resource),
+ .resource = s5pc100_spi0_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pc100_spi0_pdata,
+ },
+};
+
+static struct resource s5pc100_spi1_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_SPI1,
+ .end = S5PC100_PA_SPI1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI1_TX,
+ .end = DMACH_SPI1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI1_RX,
+ .end = DMACH_SPI1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5pc100_spi1_pdata = {
+ .cfg_gpio = s5pc100_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .high_speed = 1,
+};
+
+struct platform_device s5pc100_device_spi1 = {
+ .name = "s3c64xx-spi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pc100_spi1_resource),
+ .resource = s5pc100_spi1_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pc100_spi1_pdata,
+ },
+};
+
+static struct resource s5pc100_spi2_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_SPI2,
+ .end = S5PC100_PA_SPI2 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI2_TX,
+ .end = DMACH_SPI2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI2_RX,
+ .end = DMACH_SPI2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI2,
+ .end = IRQ_SPI2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5pc100_spi2_pdata = {
+ .cfg_gpio = s5pc100_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .high_speed = 1,
+};
+
+struct platform_device s5pc100_device_spi2 = {
+ .name = "s3c64xx-spi",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s5pc100_spi2_resource),
+ .resource = s5pc100_spi2_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pc100_spi2_pdata,
+ },
+};
+
+void __init s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
+{
+ struct s3c64xx_spi_info *pd;
+
+ /* Reject invalid configuration */
+ if (!num_cs || src_clk_nr < 0
+ || src_clk_nr > S5PC100_SPI_SRCCLK_SPIBUS) {
+ printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
+ return;
+ }
+
+ switch (cntrlr) {
+ case 0:
+ pd = &s5pc100_spi0_pdata;
+ break;
+ case 1:
+ pd = &s5pc100_spi1_pdata;
+ break;
+ case 2:
+ pd = &s5pc100_spi2_pdata;
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
+ __func__, cntrlr);
+ return;
+ }
+
+ pd->num_cs = num_cs;
+ pd->src_clk_nr = src_clk_nr;
+ pd->src_clk_name = spi_src_clks[src_clk_nr];
+}
diff --git a/arch/arm/mach-s5pc100/dma.c b/arch/arm/mach-s5pc100/dma.c
new file mode 100644
index 000000000000..0f5517571e2c
--- /dev/null
+++ b/arch/arm/mach-s5pc100/dma.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/devs.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+static u64 dma_dmamask = DMA_BIT_MASK(32);
+
+static struct resource s5pc100_pdma0_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_PDMA0,
+ .end = S5PC100_PA_PDMA0 + SZ_4K,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_PDMA0,
+ .end = IRQ_PDMA0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_pl330_platdata s5pc100_pdma0_pdata = {
+ .peri = {
+ [0] = DMACH_UART0_RX,
+ [1] = DMACH_UART0_TX,
+ [2] = DMACH_UART1_RX,
+ [3] = DMACH_UART1_TX,
+ [4] = DMACH_UART2_RX,
+ [5] = DMACH_UART2_TX,
+ [6] = DMACH_UART3_RX,
+ [7] = DMACH_UART3_TX,
+ [8] = DMACH_IRDA,
+ [9] = DMACH_I2S0_RX,
+ [10] = DMACH_I2S0_TX,
+ [11] = DMACH_I2S0S_TX,
+ [12] = DMACH_I2S1_RX,
+ [13] = DMACH_I2S1_TX,
+ [14] = DMACH_I2S2_RX,
+ [15] = DMACH_I2S2_TX,
+ [16] = DMACH_SPI0_RX,
+ [17] = DMACH_SPI0_TX,
+ [18] = DMACH_SPI1_RX,
+ [19] = DMACH_SPI1_TX,
+ [20] = DMACH_SPI2_RX,
+ [21] = DMACH_SPI2_TX,
+ [22] = DMACH_AC97_MICIN,
+ [23] = DMACH_AC97_PCMIN,
+ [24] = DMACH_AC97_PCMOUT,
+ [25] = DMACH_EXTERNAL,
+ [26] = DMACH_PWM,
+ [27] = DMACH_SPDIF,
+ [28] = DMACH_HSI_RX,
+ [29] = DMACH_HSI_TX,
+ [30] = DMACH_MAX,
+ [31] = DMACH_MAX,
+ },
+};
+
+static struct platform_device s5pc100_device_pdma0 = {
+ .name = "s3c-pl330",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pc100_pdma0_resource),
+ .resource = s5pc100_pdma0_resource,
+ .dev = {
+ .dma_mask = &dma_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pc100_pdma0_pdata,
+ },
+};
+
+static struct resource s5pc100_pdma1_resource[] = {
+ [0] = {
+ .start = S5PC100_PA_PDMA1,
+ .end = S5PC100_PA_PDMA1 + SZ_4K,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_PDMA1,
+ .end = IRQ_PDMA1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_pl330_platdata s5pc100_pdma1_pdata = {
+ .peri = {
+ [0] = DMACH_UART0_RX,
+ [1] = DMACH_UART0_TX,
+ [2] = DMACH_UART1_RX,
+ [3] = DMACH_UART1_TX,
+ [4] = DMACH_UART2_RX,
+ [5] = DMACH_UART2_TX,
+ [6] = DMACH_UART3_RX,
+ [7] = DMACH_UART3_TX,
+ [8] = DMACH_IRDA,
+ [9] = DMACH_I2S0_RX,
+ [10] = DMACH_I2S0_TX,
+ [11] = DMACH_I2S0S_TX,
+ [12] = DMACH_I2S1_RX,
+ [13] = DMACH_I2S1_TX,
+ [14] = DMACH_I2S2_RX,
+ [15] = DMACH_I2S2_TX,
+ [16] = DMACH_SPI0_RX,
+ [17] = DMACH_SPI0_TX,
+ [18] = DMACH_SPI1_RX,
+ [19] = DMACH_SPI1_TX,
+ [20] = DMACH_SPI2_RX,
+ [21] = DMACH_SPI2_TX,
+ [22] = DMACH_PCM0_RX,
+ [23] = DMACH_PCM0_TX,
+ [24] = DMACH_PCM1_RX,
+ [25] = DMACH_PCM1_TX,
+ [26] = DMACH_MSM_REQ0,
+ [27] = DMACH_MSM_REQ1,
+ [28] = DMACH_MSM_REQ2,
+ [29] = DMACH_MSM_REQ3,
+ [30] = DMACH_MAX,
+ [31] = DMACH_MAX,
+ },
+};
+
+static struct platform_device s5pc100_device_pdma1 = {
+ .name = "s3c-pl330",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s5pc100_pdma1_resource),
+ .resource = s5pc100_pdma1_resource,
+ .dev = {
+ .dma_mask = &dma_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pc100_pdma1_pdata,
+ },
+};
+
+static struct platform_device *s5pc100_dmacs[] __initdata = {
+ &s5pc100_device_pdma0,
+ &s5pc100_device_pdma1,
+};
+
+static int __init s5pc100_dma_init(void)
+{
+ platform_add_devices(s5pc100_dmacs, ARRAY_SIZE(s5pc100_dmacs));
+
+ return 0;
+}
+arch_initcall(s5pc100_dma_init);
diff --git a/arch/arm/plat-s5pc1xx/gpiolib.c b/arch/arm/mach-s5pc100/gpiolib.c
index 1ffc57ac293d..0fab7f2cd8bf 100644
--- a/arch/arm/plat-s5pc1xx/gpiolib.c
+++ b/arch/arm/mach-s5pc100/gpiolib.c
@@ -1,10 +1,10 @@
/*
- * arch/arm/plat-s5pc1xx/gpiolib.c
+ * arch/arm/plat-s5pc100/gpiolib.c
*
* Copyright 2009 Samsung Electronics Co
* Kyungmin Park <kyungmin.park@samsung.com>
*
- * S5PC1XX - GPIOlib support
+ * S5PC100 - GPIOlib support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -17,11 +17,11 @@
#include <linux/gpio.h>
#include <mach/map.h>
+#include <mach/regs-gpio.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
-#include <plat/regs-gpio.h>
/* S5PC100 GPIO bank summary:
*
@@ -61,80 +61,12 @@
* L3 8 4Bit None
*/
-#define OFF_GPCON (0x00)
-#define OFF_GPDAT (0x04)
-
-#define con_4bit_shift(__off) ((__off) * 4)
-
-#if 1
-#define gpio_dbg(x...) do { } while (0)
-#else
-#define gpio_dbg(x...) printk(KERN_DEBUG x)
-#endif
-
-/* The s5pc1xx_gpiolib routines are to control the gpio banks where
- * the gpio configuration register (GPxCON) has 4 bits per GPIO, as the
- * following example:
- *
- * base + 0x00: Control register, 4 bits per gpio
- * gpio n: 4 bits starting at (4*n)
- * 0000 = input, 0001 = output, others mean special-function
- * base + 0x04: Data register, 1 bit per gpio
- * bit n: data bit n
- *
- * Note, since the data register is one bit per gpio and is at base + 0x4
- * we can use s3c_gpiolib_get and s3c_gpiolib_set to change the state of
- * the output.
- */
-
-static int s5pc1xx_gpiolib_input(struct gpio_chip *chip, unsigned offset)
-{
- struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
- void __iomem *base = ourchip->base;
- unsigned long con;
-
- con = __raw_readl(base + OFF_GPCON);
- con &= ~(0xf << con_4bit_shift(offset));
- __raw_writel(con, base + OFF_GPCON);
-
- gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
-
- return 0;
-}
-
-static int s5pc1xx_gpiolib_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
- void __iomem *base = ourchip->base;
- unsigned long con;
- unsigned long dat;
-
- con = __raw_readl(base + OFF_GPCON);
- con &= ~(0xf << con_4bit_shift(offset));
- con |= 0x1 << con_4bit_shift(offset);
-
- dat = __raw_readl(base + OFF_GPDAT);
- if (value)
- dat |= 1 << offset;
- else
- dat &= ~(1 << offset);
-
- __raw_writel(dat, base + OFF_GPDAT);
- __raw_writel(con, base + OFF_GPCON);
- __raw_writel(dat, base + OFF_GPDAT);
-
- gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
-
- return 0;
-}
-
-static int s5pc1xx_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset)
+static int s5pc100_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset)
{
return S3C_IRQ_GPIO(chip->base + offset);
}
-static int s5pc1xx_gpiolib_to_eint(struct gpio_chip *chip, unsigned int offset)
+static int s5pc100_gpiolib_to_eint(struct gpio_chip *chip, unsigned int offset)
{
int base;
@@ -449,55 +381,46 @@ static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
};
/* FIXME move from irq-gpio.c */
-extern struct irq_chip s5pc1xx_gpioint;
-extern void s5pc1xx_irq_gpioint_handler(unsigned int irq, struct irq_desc *desc);
+extern struct irq_chip s5pc100_gpioint;
+extern void s5pc100_irq_gpioint_handler(unsigned int irq, struct irq_desc *desc);
-static __init void s5pc1xx_gpiolib_link(struct s3c_gpio_chip *chip)
+static __init void s5pc100_gpiolib_link(struct s3c_gpio_chip *chip)
{
- chip->chip.direction_input = s5pc1xx_gpiolib_input;
- chip->chip.direction_output = s5pc1xx_gpiolib_output;
- chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
-
/* Interrupt */
if (chip->config == &gpio_cfg) {
int i, irq;
- chip->chip.to_irq = s5pc1xx_gpiolib_to_irq;
+ chip->chip.to_irq = s5pc100_gpiolib_to_irq;
for (i = 0; i < chip->chip.ngpio; i++) {
irq = S3C_IRQ_GPIO_BASE + chip->chip.base + i;
- set_irq_chip(irq, &s5pc1xx_gpioint);
+ set_irq_chip(irq, &s5pc100_gpioint);
set_irq_data(irq, &chip->chip);
set_irq_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
- } else if (chip->config == &gpio_cfg_eint)
- chip->chip.to_irq = s5pc1xx_gpiolib_to_eint;
-}
-
-static __init void s5pc1xx_gpiolib_add(struct s3c_gpio_chip *chips,
- int nr_chips,
- void (*fn)(struct s3c_gpio_chip *))
-{
- for (; nr_chips > 0; nr_chips--, chips++) {
- if (fn)
- (fn)(chips);
- s3c_gpiolib_add(chips);
+ } else if (chip->config == &gpio_cfg_eint) {
+ chip->chip.to_irq = s5pc100_gpiolib_to_eint;
}
}
-static __init int s5pc1xx_gpiolib_init(void)
+static __init int s5pc100_gpiolib_init(void)
{
- struct s3c_gpio_chip *chips;
+ struct s3c_gpio_chip *chip;
int nr_chips;
- chips = s5pc100_gpio_chips;
- nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
+ chip = s5pc100_gpio_chips;
+ nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
+
+ for (; nr_chips > 0; nr_chips--, chip++)
+ s5pc100_gpiolib_link(chip);
+
+ samsung_gpiolib_add_4bit_chips(s5pc100_gpio_chips,
+ ARRAY_SIZE(s5pc100_gpio_chips));
- s5pc1xx_gpiolib_add(chips, nr_chips, s5pc1xx_gpiolib_link);
/* Interrupt */
- set_irq_chained_handler(IRQ_GPIOINT, s5pc1xx_irq_gpioint_handler);
+ set_irq_chained_handler(IRQ_GPIOINT, s5pc100_irq_gpioint_handler);
return 0;
}
-core_initcall(s5pc1xx_gpiolib_init);
+core_initcall(s5pc100_gpiolib_init);
diff --git a/arch/arm/mach-s5pc100/include/mach/debug-macro.S b/arch/arm/mach-s5pc100/include/mach/debug-macro.S
index e181f5789482..70e02e91ee3c 100644
--- a/arch/arm/mach-s5pc100/include/mach/debug-macro.S
+++ b/arch/arm/mach-s5pc100/include/mach/debug-macro.S
@@ -22,12 +22,14 @@
* aligned and add in the offset when we load the value here.
*/
- .macro addruart, rx, tmp
+ .macro addruart, rx, rtmp
mrc p15, 0, \rx, c1, c0
tst \rx, #1
ldreq \rx, = S3C_PA_UART
- ldrne \rx, = (S3C_VA_UART + S3C_PA_UART & 0xfffff)
+ ldrne \rx, = S3C_VA_UART
+#if CONFIG_DEBUG_S3C_UART != 0
add \rx, \rx, #(0x400 * CONFIG_DEBUG_S3C_UART)
+#endif
.endm
/* include the reset of the code which will do the work, we're only
diff --git a/arch/arm/mach-s5pc100/include/mach/dma.h b/arch/arm/mach-s5pc100/include/mach/dma.h
new file mode 100644
index 000000000000..81209eb1409b
--- /dev/null
+++ b/arch/arm/mach-s5pc100/include/mach/dma.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* This platform uses the common S3C DMA API driver for PL330 */
+#include <plat/s3c-dma-pl330.h>
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/entry-macro.S b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
index 67131939e626..ba76af052c81 100644
--- a/arch/arm/mach-s5pc100/include/mach/entry-macro.S
+++ b/arch/arm/mach-s5pc100/include/mach/entry-macro.S
@@ -20,7 +20,7 @@
.endm
.macro get_irqnr_preamble, base, tmp
- ldr \base, =S3C_VA_VIC0
+ ldr \base, =VA_VIC0
.endm
.macro arch_ret_to_user, tmp1, tmp2
@@ -29,18 +29,18 @@
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
@ check the vic0
- mov \irqnr, # S3C_IRQ_OFFSET + 31
+ mov \irqnr, # S5P_IRQ_OFFSET + 31
ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
teq \irqstat, #0
@ otherwise try vic1
- addeq \tmp, \base, #(S3C_VA_VIC1 - S3C_VA_VIC0)
+ addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
addeq \irqnr, \irqnr, #32
ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
teqeq \irqstat, #0
@ otherwise try vic2
- addeq \tmp, \base, #(S3C_VA_VIC2 - S3C_VA_VIC0)
+ addeq \tmp, \base, #(VA_VIC2 - VA_VIC0)
addeq \irqnr, \irqnr, #32
ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
teqeq \irqstat, #0
diff --git a/arch/arm/mach-s5pc100/include/mach/gpio.h b/arch/arm/mach-s5pc100/include/mach/gpio.h
index 2c4cbe8ee6b7..71ae1f52df1d 100644
--- a/arch/arm/mach-s5pc100/include/mach/gpio.h
+++ b/arch/arm/mach-s5pc100/include/mach/gpio.h
@@ -12,6 +12,9 @@
* published by the Free Software Foundation.
*/
+#ifndef __ASM_ARCH_GPIO_H
+#define __ASM_ARCH_GPIO_H __FILE__
+
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
#define gpio_cansleep __gpio_cansleep
@@ -52,11 +55,6 @@
#define S5PC100_GPIO_L2_NR (8)
#define S5PC100_GPIO_L3_NR (8)
#define S5PC100_GPIO_L4_NR (8)
-#define S5PC100_GPIO_MP00_NR (8)
-#define S5PC100_GPIO_MP01_NR (8)
-#define S5PC100_GPIO_MP02_NR (8)
-#define S5PC100_GPIO_MP03_NR (8)
-#define S5PC100_GPIO_MP04_NR (5)
/* GPIO bank numbes */
@@ -65,50 +63,45 @@
* change from one gpio bank to another can be caught.
*/
-#define S5PC1XX_GPIO_NEXT(__gpio) \
+#define S5PC100_GPIO_NEXT(__gpio) \
((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1)
-enum s3c_gpio_number {
+enum s5p_gpio_number {
S5PC100_GPIO_A0_START = 0,
- S5PC100_GPIO_A1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_A0),
- S5PC100_GPIO_B_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_A1),
- S5PC100_GPIO_C_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_B),
- S5PC100_GPIO_D_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_C),
- S5PC100_GPIO_E0_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_D),
- S5PC100_GPIO_E1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_E0),
- S5PC100_GPIO_F0_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_E1),
- S5PC100_GPIO_F1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_F0),
- S5PC100_GPIO_F2_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_F1),
- S5PC100_GPIO_F3_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_F2),
- S5PC100_GPIO_G0_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_F3),
- S5PC100_GPIO_G1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_G0),
- S5PC100_GPIO_G2_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_G1),
- S5PC100_GPIO_G3_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_G2),
- S5PC100_GPIO_H0_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_G3),
- S5PC100_GPIO_H1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_H0),
- S5PC100_GPIO_H2_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_H1),
- S5PC100_GPIO_H3_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_H2),
- S5PC100_GPIO_I_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_H3),
- S5PC100_GPIO_J0_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_I),
- S5PC100_GPIO_J1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_J0),
- S5PC100_GPIO_J2_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_J1),
- S5PC100_GPIO_J3_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_J2),
- S5PC100_GPIO_J4_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_J3),
- S5PC100_GPIO_K0_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_J4),
- S5PC100_GPIO_K1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_K0),
- S5PC100_GPIO_K2_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_K1),
- S5PC100_GPIO_K3_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_K2),
- S5PC100_GPIO_L0_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_K3),
- S5PC100_GPIO_L1_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_L0),
- S5PC100_GPIO_L2_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_L1),
- S5PC100_GPIO_L3_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_L2),
- S5PC100_GPIO_L4_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_L3),
- S5PC100_GPIO_MP00_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_L4),
- S5PC100_GPIO_MP01_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_MP00),
- S5PC100_GPIO_MP02_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_MP01),
- S5PC100_GPIO_MP03_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_MP02),
- S5PC100_GPIO_MP04_START = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_MP03),
- S5PC100_GPIO_END = S5PC1XX_GPIO_NEXT(S5PC100_GPIO_MP04),
+ S5PC100_GPIO_A1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_A0),
+ S5PC100_GPIO_B_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_A1),
+ S5PC100_GPIO_C_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_B),
+ S5PC100_GPIO_D_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_C),
+ S5PC100_GPIO_E0_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_D),
+ S5PC100_GPIO_E1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_E0),
+ S5PC100_GPIO_F0_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_E1),
+ S5PC100_GPIO_F1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_F0),
+ S5PC100_GPIO_F2_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_F1),
+ S5PC100_GPIO_F3_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_F2),
+ S5PC100_GPIO_G0_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_F3),
+ S5PC100_GPIO_G1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_G0),
+ S5PC100_GPIO_G2_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_G1),
+ S5PC100_GPIO_G3_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_G2),
+ S5PC100_GPIO_H0_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_G3),
+ S5PC100_GPIO_H1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_H0),
+ S5PC100_GPIO_H2_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_H1),
+ S5PC100_GPIO_H3_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_H2),
+ S5PC100_GPIO_I_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_H3),
+ S5PC100_GPIO_J0_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_I),
+ S5PC100_GPIO_J1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_J0),
+ S5PC100_GPIO_J2_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_J1),
+ S5PC100_GPIO_J3_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_J2),
+ S5PC100_GPIO_J4_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_J3),
+ S5PC100_GPIO_K0_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_J4),
+ S5PC100_GPIO_K1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_K0),
+ S5PC100_GPIO_K2_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_K1),
+ S5PC100_GPIO_K3_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_K2),
+ S5PC100_GPIO_L0_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_K3),
+ S5PC100_GPIO_L1_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_L0),
+ S5PC100_GPIO_L2_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_L1),
+ S5PC100_GPIO_L3_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_L2),
+ S5PC100_GPIO_L4_START = S5PC100_GPIO_NEXT(S5PC100_GPIO_L3),
+ S5PC100_GPIO_END = S5PC100_GPIO_NEXT(S5PC100_GPIO_L4),
};
/* S5PC100 GPIO number definitions. */
@@ -146,17 +139,20 @@ enum s3c_gpio_number {
#define S5PC100_GPL2(_nr) (S5PC100_GPIO_L2_START + (_nr))
#define S5PC100_GPL3(_nr) (S5PC100_GPIO_L3_START + (_nr))
#define S5PC100_GPL4(_nr) (S5PC100_GPIO_L4_START + (_nr))
-#define S5PC100_MP00(_nr) (S5PC100_GPIO_MP00_START + (_nr))
-#define S5PC100_MP01(_nr) (S5PC100_GPIO_MP01_START + (_nr))
-#define S5PC100_MP02(_nr) (S5PC100_GPIO_MP02_START + (_nr))
-#define S5PC100_MP03(_nr) (S5PC100_GPIO_MP03_START + (_nr))
-#define S5PC100_MP04(_nr) (S5PC100_GPIO_MP04_START + (_nr))
-#define S5PC100_MP05(_nr) (S5PC100_GPIO_MP05_START + (_nr))
-/* It used the end of the S5PC1XX gpios */
+/* It used the end of the S5PC100 gpios */
#define S3C_GPIO_END S5PC100_GPIO_END
/* define the number of gpios we need to the one after the MP04() range */
#define ARCH_NR_GPIOS (S5PC100_GPIO_END + 1)
+#define EINT_MODE S3C_GPIO_SFN(0x2)
+
+#define EINT_GPIO_0(x) S5PC100_GPH0(x)
+#define EINT_GPIO_1(x) S5PC100_GPH1(x)
+#define EINT_GPIO_2(x) S5PC100_GPH2(x)
+#define EINT_GPIO_3(x) S5PC100_GPH3(x)
+
#include <asm-generic/gpio.h>
+
+#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/irqs.h b/arch/arm/mach-s5pc100/include/mach/irqs.h
index b53fa48a52c6..28aa551dc3a8 100644
--- a/arch/arm/mach-s5pc100/include/mach/irqs.h
+++ b/arch/arm/mach-s5pc100/include/mach/irqs.h
@@ -11,9 +11,104 @@
#include <plat/irqs.h>
-/* LCD */
+/* VIC0: system, DMA, timer */
+#define IRQ_EINT16_31 S5P_IRQ_VIC0(16)
+#define IRQ_BATF S5P_IRQ_VIC0(17)
+#define IRQ_MDMA S5P_IRQ_VIC0(18)
+#define IRQ_PDMA0 S5P_IRQ_VIC0(19)
+#define IRQ_PDMA1 S5P_IRQ_VIC0(20)
+#define IRQ_TIMER0_VIC S5P_IRQ_VIC0(21)
+#define IRQ_TIMER1_VIC S5P_IRQ_VIC0(22)
+#define IRQ_TIMER2_VIC S5P_IRQ_VIC0(23)
+#define IRQ_TIMER3_VIC S5P_IRQ_VIC0(24)
+#define IRQ_TIMER4_VIC S5P_IRQ_VIC0(25)
+#define IRQ_SYSTIMER S5P_IRQ_VIC0(26)
+#define IRQ_WDT S5P_IRQ_VIC0(27)
+#define IRQ_RTC_ALARM S5P_IRQ_VIC0(28)
+#define IRQ_RTC_TIC S5P_IRQ_VIC0(29)
+#define IRQ_GPIOINT S5P_IRQ_VIC0(30)
+
+/* VIC1: ARM, power, memory, connectivity */
+#define IRQ_CORTEX0 S5P_IRQ_VIC1(0)
+#define IRQ_CORTEX1 S5P_IRQ_VIC1(1)
+#define IRQ_CORTEX2 S5P_IRQ_VIC1(2)
+#define IRQ_CORTEX3 S5P_IRQ_VIC1(3)
+#define IRQ_CORTEX4 S5P_IRQ_VIC1(4)
+#define IRQ_IEMAPC S5P_IRQ_VIC1(5)
+#define IRQ_IEMIEC S5P_IRQ_VIC1(6)
+#define IRQ_ONENAND S5P_IRQ_VIC1(7)
+#define IRQ_NFC S5P_IRQ_VIC1(8)
+#define IRQ_CFC S5P_IRQ_VIC1(9)
+#define IRQ_UART0 S5P_IRQ_VIC1(10)
+#define IRQ_UART1 S5P_IRQ_VIC1(11)
+#define IRQ_UART2 S5P_IRQ_VIC1(12)
+#define IRQ_UART3 S5P_IRQ_VIC1(13)
+#define IRQ_IIC S5P_IRQ_VIC1(14)
+#define IRQ_SPI0 S5P_IRQ_VIC1(15)
+#define IRQ_SPI1 S5P_IRQ_VIC1(16)
+#define IRQ_SPI2 S5P_IRQ_VIC1(17)
+#define IRQ_IRDA S5P_IRQ_VIC1(18)
+#define IRQ_CAN0 S5P_IRQ_VIC1(19)
+#define IRQ_CAN1 S5P_IRQ_VIC1(20)
+#define IRQ_HSIRX S5P_IRQ_VIC1(21)
+#define IRQ_HSITX S5P_IRQ_VIC1(22)
+#define IRQ_UHOST S5P_IRQ_VIC1(23)
+#define IRQ_OTG S5P_IRQ_VIC1(24)
+#define IRQ_MSM S5P_IRQ_VIC1(25)
+#define IRQ_HSMMC0 S5P_IRQ_VIC1(26)
+#define IRQ_HSMMC1 S5P_IRQ_VIC1(27)
+#define IRQ_HSMMC2 S5P_IRQ_VIC1(28)
+#define IRQ_MIPICSI S5P_IRQ_VIC1(29)
+#define IRQ_MIPIDSI S5P_IRQ_VIC1(30)
+
+/* VIC2: multimedia, audio, security */
+#define IRQ_LCD0 S5P_IRQ_VIC2(0)
+#define IRQ_LCD1 S5P_IRQ_VIC2(1)
+#define IRQ_LCD2 S5P_IRQ_VIC2(2)
+#define IRQ_LCD3 S5P_IRQ_VIC2(3)
+#define IRQ_ROTATOR S5P_IRQ_VIC2(4)
+#define IRQ_FIMC0 S5P_IRQ_VIC2(5)
+#define IRQ_FIMC1 S5P_IRQ_VIC2(6)
+#define IRQ_FIMC2 S5P_IRQ_VIC2(7)
+#define IRQ_JPEG S5P_IRQ_VIC2(8)
+#define IRQ_2D S5P_IRQ_VIC2(9)
+#define IRQ_3D S5P_IRQ_VIC2(10)
+#define IRQ_MIXER S5P_IRQ_VIC2(11)
+#define IRQ_HDMI S5P_IRQ_VIC2(12)
+#define IRQ_IIC1 S5P_IRQ_VIC2(13)
+#define IRQ_MFC S5P_IRQ_VIC2(14)
+#define IRQ_TVENC S5P_IRQ_VIC2(15)
+#define IRQ_I2S0 S5P_IRQ_VIC2(16)
+#define IRQ_I2S1 S5P_IRQ_VIC2(17)
+#define IRQ_I2S2 S5P_IRQ_VIC2(18)
+#define IRQ_AC97 S5P_IRQ_VIC2(19)
+#define IRQ_PCM0 S5P_IRQ_VIC2(20)
+#define IRQ_PCM1 S5P_IRQ_VIC2(21)
+#define IRQ_SPDIF S5P_IRQ_VIC2(22)
+#define IRQ_ADC S5P_IRQ_VIC2(23)
+#define IRQ_PENDN S5P_IRQ_VIC2(24)
+#define IRQ_TC IRQ_PENDN
+#define IRQ_KEYPAD S5P_IRQ_VIC2(25)
+#define IRQ_CG S5P_IRQ_VIC2(26)
+#define IRQ_SEC S5P_IRQ_VIC2(27)
+#define IRQ_SECRX S5P_IRQ_VIC2(28)
+#define IRQ_SECTX S5P_IRQ_VIC2(29)
+#define IRQ_SDMIRQ S5P_IRQ_VIC2(30)
+#define IRQ_SDMFIQ S5P_IRQ_VIC2(31)
+#define IRQ_VIC_END S5P_IRQ_VIC2(31)
+
+#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
+#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
+
+#define S3C_IRQ_GPIO_BASE (IRQ_EINT(31) + 1)
+#define S3C_IRQ_GPIO(x) (S3C_IRQ_GPIO_BASE + (x))
+
+/* Until MP04 Groups -> 40 (exactly 39) Groups * 8 ~= 320 GPIOs */
+#define NR_IRQS (S3C_IRQ_GPIO(320) + 1)
+
+/* Compatibility */
#define IRQ_LCD_FIFO IRQ_LCD0
#define IRQ_LCD_VSYNC IRQ_LCD1
#define IRQ_LCD_SYSTEM IRQ_LCD2
-#endif /* __ASM_ARCH_IRQ_H */
+#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index 4681ebe8bef6..cadae4305688 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -3,9 +3,7 @@
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * Based on mach-s3c6400/include/mach/map.h
- *
- * S5PC1XX - Memory map definitions
+ * S5PC100 - Memory map definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,6 +14,7 @@
#define __ASM_ARCH_MAP_H __FILE__
#include <plat/map-base.h>
+#include <plat/map-s5p.h>
/*
* map-base.h has already defined virtual memory address
@@ -31,25 +30,21 @@
*
*/
+#define S5PC100_PA_ONENAND_BUF (0xB0000000)
+#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
+
/* Chip ID */
+
#define S5PC100_PA_CHIPID (0xE0000000)
-#define S5PC1XX_PA_CHIPID S5PC100_PA_CHIPID
-#define S5PC1XX_VA_CHIPID S3C_VA_SYS
-
-/* System */
-#define S5PC100_PA_CLK (0xE0100000)
-#define S5PC100_PA_CLK_OTHER (0xE0200000)
-#define S5PC100_PA_PWR (0xE0108000)
-#define S5PC1XX_PA_CLK S5PC100_PA_CLK
-#define S5PC1XX_PA_PWR S5PC100_PA_PWR
-#define S5PC1XX_PA_CLK_OTHER S5PC100_PA_CLK_OTHER
-#define S5PC1XX_VA_CLK (S3C_VA_SYS + 0x10000)
-#define S5PC1XX_VA_PWR (S3C_VA_SYS + 0x20000)
-#define S5PC1XX_VA_CLK_OTHER (S3C_VA_SYS + 0x30000)
-
-/* GPIO */
-#define S5PC100_PA_GPIO (0xE0300000)
-#define S5PC1XX_PA_GPIO S5PC100_PA_GPIO
+#define S5P_PA_CHIPID S5PC100_PA_CHIPID
+
+#define S5PC100_PA_SYSCON (0xE0100000)
+#define S5P_PA_SYSCON S5PC100_PA_SYSCON
+
+#define S5PC100_PA_OTHERS (0xE0200000)
+#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
+
+#define S5P_PA_GPIO (0xE0300000)
#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000)
/* Interrupt */
@@ -59,6 +54,12 @@
#define S5PC100_VA_VIC_OFFSET 0x10000
#define S5PC1XX_PA_VIC(x) (S5PC100_PA_VIC + ((x) * S5PC100_PA_VIC_OFFSET))
#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
+#define S5P_PA_VIC0 S5PC1XX_PA_VIC(0)
+#define S5P_PA_VIC1 S5PC1XX_PA_VIC(1)
+#define S5P_PA_VIC2 S5PC1XX_PA_VIC(2)
+
+
+#define S5PC100_PA_ONENAND (0xE7100000)
/* DMA */
#define S5PC100_PA_MDMA (0xE8100000)
@@ -67,84 +68,71 @@
/* Timer */
#define S5PC100_PA_TIMER (0xEA000000)
-#define S5PC1XX_PA_TIMER S5PC100_PA_TIMER
-#define S5PC1XX_VA_TIMER S3C_VA_TIMER
+#define S5P_PA_TIMER S5PC100_PA_TIMER
-/* RTC */
-#define S5PC100_PA_RTC (0xEA300000)
+#define S5PC100_PA_SYSTIMER (0xEA100000)
-/* UART */
#define S5PC100_PA_UART (0xEC000000)
-#define S5PC1XX_PA_UART S5PC100_PA_UART
-#define S5PC1XX_VA_UART S3C_VA_UART
-/* I2C */
-#define S5PC100_PA_I2C (0xEC100000)
-#define S5PC100_PA_I2C1 (0xEC200000)
+#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0)
+#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400)
+#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800)
+#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00)
+#define S5P_SZ_UART SZ_256
+
+#define S5PC100_PA_IIC0 (0xEC100000)
+#define S5PC100_PA_IIC1 (0xEC200000)
+
+/* SPI */
+#define S5PC100_PA_SPI0 0xEC300000
+#define S5PC100_PA_SPI1 0xEC400000
+#define S5PC100_PA_SPI2 0xEC500000
/* USB HS OTG */
#define S5PC100_PA_USB_HSOTG (0xED200000)
#define S5PC100_PA_USB_HSPHY (0xED300000)
-/* SD/MMC */
-#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
-#define S5PC100_PA_HSMMC0 S5PC100_PA_HSMMC(0)
-#define S5PC100_PA_HSMMC1 S5PC100_PA_HSMMC(1)
-#define S5PC100_PA_HSMMC2 S5PC100_PA_HSMMC(2)
-
-/* LCD */
#define S5PC100_PA_FB (0xEE000000)
-/* Multimedia */
-#define S5PC100_PA_G2D (0xEE800000)
-#define S5PC100_PA_JPEG (0xEE500000)
-#define S5PC100_PA_ROTATOR (0xEE100000)
-#define S5PC100_PA_G3D (0xEF000000)
-
-/* I2S */
#define S5PC100_PA_I2S0 (0xF2000000)
#define S5PC100_PA_I2S1 (0xF2100000)
#define S5PC100_PA_I2S2 (0xF2200000)
+#define S5PC100_PA_AC97 0xF2300000
+
+/* PCM */
+#define S5PC100_PA_PCM0 0xF2400000
+#define S5PC100_PA_PCM1 0xF2500000
+
/* KEYPAD */
#define S5PC100_PA_KEYPAD (0xF3100000)
-/* ADC & TouchScreen */
-#define S5PC100_PA_TSADC (0xF3000000)
+#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
-/* ETC */
#define S5PC100_PA_SDRAM (0x20000000)
-#define S5PC1XX_PA_SDRAM S5PC100_PA_SDRAM
+#define S5P_PA_SDRAM S5PC100_PA_SDRAM
-/* compatibility defines. */
-#define S3C_PA_RTC S5PC100_PA_RTC
+/* compatibiltiy defines. */
#define S3C_PA_UART S5PC100_PA_UART
-#define S3C_PA_UART0 (S5PC100_PA_UART + 0x0)
-#define S3C_PA_UART1 (S5PC100_PA_UART + 0x400)
-#define S3C_PA_UART2 (S5PC100_PA_UART + 0x800)
-#define S3C_PA_UART3 (S5PC100_PA_UART + 0xC00)
-#define S3C_VA_UART0 (S3C_VA_UART + 0x0)
-#define S3C_VA_UART1 (S3C_VA_UART + 0x400)
-#define S3C_VA_UART2 (S3C_VA_UART + 0x800)
-#define S3C_VA_UART3 (S3C_VA_UART + 0xC00)
-#define S3C_UART_OFFSET 0x400
-#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
+#define S3C_PA_IIC S5PC100_PA_IIC0
+#define S3C_PA_IIC1 S5PC100_PA_IIC1
#define S3C_PA_FB S5PC100_PA_FB
#define S3C_PA_G2D S5PC100_PA_G2D
#define S3C_PA_G3D S5PC100_PA_G3D
#define S3C_PA_JPEG S5PC100_PA_JPEG
#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR
-#define S3C_VA_VIC0 (S3C_VA_IRQ + 0x0)
-#define S3C_VA_VIC1 (S3C_VA_IRQ + 0x10000)
-#define S3C_VA_VIC2 (S3C_VA_IRQ + 0x20000)
-#define S3C_PA_IIC S5PC100_PA_I2C
-#define S3C_PA_IIC1 S5PC100_PA_I2C1
+#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0)
+#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1)
+#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2)
#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
-#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC0
-#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC1
-#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC2
+#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
#define S3C_PA_TSADC S5PC100_PA_TSADC
+#define S3C_PA_ONENAND S5PC100_PA_ONENAND
+#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
+#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF
#endif /* __ASM_ARCH_C100_MAP_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/regs-clock.h b/arch/arm/mach-s5pc100/include/mach/regs-clock.h
new file mode 100644
index 000000000000..5d27d286d504
--- /dev/null
+++ b/arch/arm/mach-s5pc100/include/mach/regs-clock.h
@@ -0,0 +1,77 @@
+/* linux/arch/arm/mach-s5pc100/include/mach/regs-clock.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PC100 - Clock register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_CLOCK_H
+#define __ASM_ARCH_REGS_CLOCK_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_CLKREG(x) (S3C_VA_SYS + (x))
+
+#define S5PC100_REG_OTHERS(x) (S5PC100_VA_OTHERS + (x))
+
+#define S5P_APLL_LOCK S5P_CLKREG(0x00)
+#define S5P_MPLL_LOCK S5P_CLKREG(0x04)
+#define S5P_EPLL_LOCK S5P_CLKREG(0x08)
+#define S5P_HPLL_LOCK S5P_CLKREG(0x0C)
+
+#define S5P_APLL_CON S5P_CLKREG(0x100)
+#define S5P_MPLL_CON S5P_CLKREG(0x104)
+#define S5P_EPLL_CON S5P_CLKREG(0x108)
+#define S5P_HPLL_CON S5P_CLKREG(0x10C)
+
+#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
+#define S5P_CLK_SRC1 S5P_CLKREG(0x204)
+#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
+#define S5P_CLK_SRC3 S5P_CLKREG(0x20C)
+
+#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
+#define S5P_CLK_DIV1 S5P_CLKREG(0x304)
+#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
+#define S5P_CLK_DIV3 S5P_CLKREG(0x30C)
+#define S5P_CLK_DIV4 S5P_CLKREG(0x310)
+
+#define S5P_CLK_OUT S5P_CLKREG(0x400)
+
+#define S5P_CLKGATE_D00 S5P_CLKREG(0x500)
+#define S5P_CLKGATE_D01 S5P_CLKREG(0x504)
+#define S5P_CLKGATE_D02 S5P_CLKREG(0x508)
+
+#define S5P_CLKGATE_D10 S5P_CLKREG(0x520)
+#define S5P_CLKGATE_D11 S5P_CLKREG(0x524)
+#define S5P_CLKGATE_D12 S5P_CLKREG(0x528)
+#define S5P_CLKGATE_D13 S5P_CLKREG(0x52C)
+#define S5P_CLKGATE_D14 S5P_CLKREG(0x530)
+#define S5P_CLKGATE_D15 S5P_CLKREG(0x534)
+
+#define S5P_CLKGATE_D20 S5P_CLKREG(0x540)
+
+#define S5P_CLKGATE_SCLK0 S5P_CLKREG(0x560)
+#define S5P_CLKGATE_SCLK1 S5P_CLKREG(0x564)
+
+/* CLKDIV0 */
+#define S5P_CLKDIV0_D0_MASK (0x7<<8)
+#define S5P_CLKDIV0_D0_SHIFT (8)
+#define S5P_CLKDIV0_PCLKD0_MASK (0x7<<12)
+#define S5P_CLKDIV0_PCLKD0_SHIFT (12)
+
+/* CLKDIV1 */
+#define S5P_CLKDIV1_D1_MASK (0x7<<12)
+#define S5P_CLKDIV1_D1_SHIFT (12)
+#define S5P_CLKDIV1_PCLKD1_MASK (0x7<<16)
+#define S5P_CLKDIV1_PCLKD1_SHIFT (16)
+
+#define S5PC100_SWRESET S5PC100_REG_OTHERS(0x000)
+
+#define S5PC100_SWRESET_RESETVAL 0xc100
+
+#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/plat-s5pc1xx/include/plat/regs-gpio.h b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
index 43c7bc8bf784..dd6295e1251d 100644
--- a/arch/arm/plat-s5pc1xx/include/plat/regs-gpio.h
+++ b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
@@ -1,18 +1,18 @@
-/* linux/arch/arm/plat-s5pc1xx/include/plat/regs-gpio.h
+/* linux/arch/arm/plat-s5pc100/include/plat/regs-gpio.h
*
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * S5PC1XX - GPIO register definitions
+ * S5PC100 - GPIO register definitions
*/
-#ifndef __ASM_PLAT_S5PC1XX_REGS_GPIO_H
-#define __ASM_PLAT_S5PC1XX_REGS_GPIO_H __FILE__
+#ifndef __ASM_MACH_S5PC100_REGS_GPIO_H
+#define __ASM_MACH_S5PC100_REGS_GPIO_H __FILE__
#include <mach/map.h>
/* S5PC100 */
-#define S5PC100_GPIO_BASE S5PC1XX_VA_GPIO
+#define S5PC100_GPIO_BASE S5P_VA_GPIO
#define S5PC100_GPA0_BASE (S5PC100_GPIO_BASE + 0x0000)
#define S5PC100_GPA1_BASE (S5PC100_GPIO_BASE + 0x0020)
#define S5PC100_GPB_BASE (S5PC100_GPIO_BASE + 0x0040)
@@ -47,24 +47,29 @@
#define S5PC100_GPL2_BASE (S5PC100_GPIO_BASE + 0x0360)
#define S5PC100_GPL3_BASE (S5PC100_GPIO_BASE + 0x0380)
#define S5PC100_GPL4_BASE (S5PC100_GPIO_BASE + 0x03A0)
-#define S5PC100_EINT_BASE (S5PC100_GPIO_BASE + 0x0E00)
-#define S5PC100_UHOST (S5PC100_GPIO_BASE + 0x0B68)
-#define S5PC100_PDNEN (S5PC100_GPIO_BASE + 0x0F80)
+#define S5PC100EINT30CON (S5P_VA_GPIO + 0xE00)
+#define S5P_EINT_CON(x) (S5PC100EINT30CON + ((x) * 0x4))
-/* PDNEN */
-#define S5PC100_PDNEN_CFG_PDNEN (1 << 1)
-#define S5PC100_PDNEN_CFG_AUTO (0 << 1)
-#define S5PC100_PDNEN_POWERDOWN (1 << 0)
-#define S5PC100_PDNEN_NORMAL (0 << 0)
+#define S5PC100EINT30FLTCON0 (S5P_VA_GPIO + 0xE80)
+#define S5P_EINT_FLTCON(x) (S5PC100EINT30FLTCON0 + ((x) * 0x4))
-/* Common part */
-/* External interrupt base is same at both s5pc100 and s5pc110 */
-#define S5PC1XX_EINT_BASE (S5PC100_EINT_BASE)
+#define S5PC100EINT30MASK (S5P_VA_GPIO + 0xF00)
+#define S5P_EINT_MASK(x) (S5PC100EINT30MASK + ((x) * 0x4))
-#define S5PC100_GPx_INPUT(__gpio) (0x0 << ((__gpio) * 4))
-#define S5PC100_GPx_OUTPUT(__gpio) (0x1 << ((__gpio) * 4))
-#define S5PC100_GPx_CONMASK(__gpio) (0xf << ((__gpio) * 4))
+#define S5PC100EINT30PEND (S5P_VA_GPIO + 0xF40)
+#define S5P_EINT_PEND(x) (S5PC100EINT30PEND + ((x) * 0x4))
-#endif /* __ASM_PLAT_S5PC1XX_REGS_GPIO_H */
+#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3)
+
+#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
+
+/* values for S5P_EXTINT0 */
+#define S5P_EXTINT_LOWLEV (0x00)
+#define S5P_EXTINT_HILEV (0x01)
+#define S5P_EXTINT_FALLEDGE (0x02)
+#define S5P_EXTINT_RISEEDGE (0x03)
+#define S5P_EXTINT_BOTHEDGE (0x04)
+
+#endif /* __ASM_MACH_S5PC100_REGS_GPIO_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/regs-irq.h b/arch/arm/mach-s5pc100/include/mach/regs-irq.h
index 751ac15438c8..4d9036d0f288 100644
--- a/arch/arm/mach-s5pc100/include/mach/regs-irq.h
+++ b/arch/arm/mach-s5pc100/include/mach/regs-irq.h
@@ -3,7 +3,7 @@
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * S5PC1XX - IRQ register definitions
+ * S5PC100 - IRQ register definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,9 +16,4 @@
#include <mach/map.h>
#include <asm/hardware/vic.h>
-/* interrupt controller */
-#define S5PC1XX_VIC0REG(x) ((x) + S5PC1XX_VA_VIC(0))
-#define S5PC1XX_VIC1REG(x) ((x) + S5PC1XX_VA_VIC(1))
-#define S5PC1XX_VIC2REG(x) ((x) + S5PC1XX_VA_VIC(2))
-
#endif /* __ASM_ARCH_REGS_IRQ_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/spi-clocks.h b/arch/arm/mach-s5pc100/include/mach/spi-clocks.h
new file mode 100644
index 000000000000..65e426370bb2
--- /dev/null
+++ b/arch/arm/mach-s5pc100/include/mach/spi-clocks.h
@@ -0,0 +1,18 @@
+/* linux/arch/arm/mach-s5pc100/include/mach/spi-clocks.h
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __S5PC100_PLAT_SPI_CLKS_H
+#define __S5PC100_PLAT_SPI_CLKS_H __FILE__
+
+#define S5PC100_SPI_SRCCLK_PCLK 0
+#define S5PC100_SPI_SRCCLK_48M 1
+#define S5PC100_SPI_SRCCLK_SPIBUS 2
+
+#endif /* __S5PC100_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/system.h b/arch/arm/mach-s5pc100/include/mach/system.h
index f0d31a2a598c..681f626a9ae1 100644
--- a/arch/arm/mach-s5pc100/include/mach/system.h
+++ b/arch/arm/mach-s5pc100/include/mach/system.h
@@ -3,7 +3,7 @@
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * S5PC1XX - system implementation
+ * S5PC100 - system implementation
*
* Based on mach-s3c6400/include/mach/system.h
*/
@@ -13,14 +13,11 @@
#include <linux/io.h>
#include <mach/map.h>
-#include <plat/regs-clock.h>
-
-void (*s5pc1xx_idle)(void);
+#include <mach/regs-clock.h>
static void arch_idle(void)
{
- if (s5pc1xx_idle)
- s5pc1xx_idle();
+ /* nothing here yet */
}
static void arch_reset(char mode, const char *cmd)
diff --git a/arch/arm/mach-s5pc100/include/mach/tick.h b/arch/arm/mach-s5pc100/include/mach/tick.h
index f338c9eec717..20f68730ed18 100644
--- a/arch/arm/mach-s5pc100/include/mach/tick.h
+++ b/arch/arm/mach-s5pc100/include/mach/tick.h
@@ -20,8 +20,8 @@
*/
static inline u32 s3c24xx_ostimer_pending(void)
{
- u32 pend = __raw_readl(S3C_VA_VIC0 + VIC_RAW_STATUS);
- return pend & 1 << (IRQ_TIMER4_VIC - S5PC1XX_IRQ_VIC0(0));
+ u32 pend = __raw_readl(VA_VIC0 + VIC_RAW_STATUS);
+ return pend & (1 << (IRQ_TIMER4_VIC - S5P_IRQ_VIC0(0)));
}
#define TICK_MAX (0xffffffff)
diff --git a/arch/arm/plat-s5pc1xx/s5pc100-init.c b/arch/arm/mach-s5pc100/init.c
index c58710884ceb..19d7b523c137 100644
--- a/arch/arm/plat-s5pc1xx/s5pc100-init.c
+++ b/arch/arm/mach-s5pc100/init.c
@@ -1,9 +1,8 @@
-/* linux/arch/arm/plat-s5pc1xx/s5pc100-init.c
+/* linux/arch/arm/plat-s5pc100/s5pc100-init.c
*
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * S5PC100 - CPU initialisation (common with other S5PC1XX chips)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,9 +18,7 @@
#include <plat/s5pc100.h>
/* uart registration process */
-
void __init s5pc100_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
{
- /* The driver name is s3c6400-uart to reuse s3c6400_serial_drv */
- s3c24xx_init_uartdevs("s3c6400-uart", s5pc1xx_uart_resources, cfg, no);
+ s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no);
}
diff --git a/arch/arm/plat-s5pc1xx/irq-gpio.c b/arch/arm/mach-s5pc100/irq-gpio.c
index fecca7a679b0..2bf86c18bc73 100644
--- a/arch/arm/plat-s5pc1xx/irq-gpio.c
+++ b/arch/arm/mach-s5pc100/irq-gpio.c
@@ -1,9 +1,9 @@
/*
- * arch/arm/plat-s5pc1xx/irq-gpio.c
+ * arch/arm/mach-s5pc100/irq-gpio.c
*
* Copyright (C) 2009 Samsung Electronics
*
- * S5PC1XX - Interrupt handling for IRQ_GPIO${group}(x)
+ * S5PC100 - Interrupt handling for IRQ_GPIO${group}(x)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,7 +19,7 @@
#include <mach/map.h>
#include <plat/gpio-cfg.h>
-#define S5PC1XX_GPIOREG(x) (S5PC1XX_VA_GPIO + (x))
+#define S5P_GPIOREG(x) (S5P_VA_GPIO + (x))
#define CON_OFFSET 0x700
#define MASK_OFFSET 0x900
@@ -49,7 +49,7 @@ static int group_to_pend_offset(int group)
return group << 2;
}
-static int s5pc1xx_get_start(unsigned int group)
+static int s5pc100_get_start(unsigned int group)
{
switch (group) {
case 0: return S5PC100_GPIO_A0_START;
@@ -80,7 +80,7 @@ static int s5pc1xx_get_start(unsigned int group)
return -EINVAL;
}
-static int s5pc1xx_get_group(unsigned int irq)
+static int s5pc100_get_group(unsigned int irq)
{
irq -= S3C_IRQ_GPIO(0);
@@ -134,67 +134,67 @@ static int s5pc1xx_get_group(unsigned int irq)
return -EINVAL;
}
-static int s5pc1xx_get_offset(unsigned int irq)
+static int s5pc100_get_offset(unsigned int irq)
{
struct gpio_chip *chip = get_irq_data(irq);
return irq - S3C_IRQ_GPIO(chip->base);
}
-static void s5pc1xx_gpioint_ack(unsigned int irq)
+static void s5pc100_gpioint_ack(unsigned int irq)
{
int group, offset, pend_offset;
unsigned int value;
- group = s5pc1xx_get_group(irq);
- offset = s5pc1xx_get_offset(irq);
+ group = s5pc100_get_group(irq);
+ offset = s5pc100_get_offset(irq);
pend_offset = group_to_pend_offset(group);
- value = __raw_readl(S5PC1XX_GPIOREG(PEND_OFFSET) + pend_offset);
+ value = __raw_readl(S5P_GPIOREG(PEND_OFFSET) + pend_offset);
value |= 1 << offset;
- __raw_writel(value, S5PC1XX_GPIOREG(PEND_OFFSET) + pend_offset);
+ __raw_writel(value, S5P_GPIOREG(PEND_OFFSET) + pend_offset);
}
-static void s5pc1xx_gpioint_mask(unsigned int irq)
+static void s5pc100_gpioint_mask(unsigned int irq)
{
int group, offset, mask_offset;
unsigned int value;
- group = s5pc1xx_get_group(irq);
- offset = s5pc1xx_get_offset(irq);
+ group = s5pc100_get_group(irq);
+ offset = s5pc100_get_offset(irq);
mask_offset = group_to_mask_offset(group);
- value = __raw_readl(S5PC1XX_GPIOREG(MASK_OFFSET) + mask_offset);
+ value = __raw_readl(S5P_GPIOREG(MASK_OFFSET) + mask_offset);
value |= 1 << offset;
- __raw_writel(value, S5PC1XX_GPIOREG(MASK_OFFSET) + mask_offset);
+ __raw_writel(value, S5P_GPIOREG(MASK_OFFSET) + mask_offset);
}
-static void s5pc1xx_gpioint_unmask(unsigned int irq)
+static void s5pc100_gpioint_unmask(unsigned int irq)
{
int group, offset, mask_offset;
unsigned int value;
- group = s5pc1xx_get_group(irq);
- offset = s5pc1xx_get_offset(irq);
+ group = s5pc100_get_group(irq);
+ offset = s5pc100_get_offset(irq);
mask_offset = group_to_mask_offset(group);
- value = __raw_readl(S5PC1XX_GPIOREG(MASK_OFFSET) + mask_offset);
+ value = __raw_readl(S5P_GPIOREG(MASK_OFFSET) + mask_offset);
value &= ~(1 << offset);
- __raw_writel(value, S5PC1XX_GPIOREG(MASK_OFFSET) + mask_offset);
+ __raw_writel(value, S5P_GPIOREG(MASK_OFFSET) + mask_offset);
}
-static void s5pc1xx_gpioint_mask_ack(unsigned int irq)
+static void s5pc100_gpioint_mask_ack(unsigned int irq)
{
- s5pc1xx_gpioint_mask(irq);
- s5pc1xx_gpioint_ack(irq);
+ s5pc100_gpioint_mask(irq);
+ s5pc100_gpioint_ack(irq);
}
-static int s5pc1xx_gpioint_set_type(unsigned int irq, unsigned int type)
+static int s5pc100_gpioint_set_type(unsigned int irq, unsigned int type)
{
int group, offset, con_offset;
unsigned int value;
- group = s5pc1xx_get_group(irq);
- offset = s5pc1xx_get_offset(irq);
+ group = s5pc100_get_group(irq);
+ offset = s5pc100_get_offset(irq);
con_offset = group_to_con_offset(group);
switch (type) {
@@ -221,24 +221,24 @@ static int s5pc1xx_gpioint_set_type(unsigned int irq, unsigned int type)
}
- value = __raw_readl(S5PC1XX_GPIOREG(CON_OFFSET) + con_offset);
+ value = __raw_readl(S5P_GPIOREG(CON_OFFSET) + con_offset);
value &= ~(0xf << (offset * 0x4));
value |= (type << (offset * 0x4));
- __raw_writel(value, S5PC1XX_GPIOREG(CON_OFFSET) + con_offset);
+ __raw_writel(value, S5P_GPIOREG(CON_OFFSET) + con_offset);
return 0;
}
-struct irq_chip s5pc1xx_gpioint = {
+struct irq_chip s5pc100_gpioint = {
.name = "GPIO",
- .ack = s5pc1xx_gpioint_ack,
- .mask = s5pc1xx_gpioint_mask,
- .mask_ack = s5pc1xx_gpioint_mask_ack,
- .unmask = s5pc1xx_gpioint_unmask,
- .set_type = s5pc1xx_gpioint_set_type,
+ .ack = s5pc100_gpioint_ack,
+ .mask = s5pc100_gpioint_mask,
+ .mask_ack = s5pc100_gpioint_mask_ack,
+ .unmask = s5pc100_gpioint_unmask,
+ .set_type = s5pc100_gpioint_set_type,
};
-void s5pc1xx_irq_gpioint_handler(unsigned int irq, struct irq_desc *desc)
+void s5pc100_irq_gpioint_handler(unsigned int irq, struct irq_desc *desc)
{
int group, offset, pend_offset, mask_offset;
int real_irq, group_end;
@@ -248,17 +248,17 @@ void s5pc1xx_irq_gpioint_handler(unsigned int irq, struct irq_desc *desc)
for (group = 0; group < group_end; group++) {
pend_offset = group_to_pend_offset(group);
- pend = __raw_readl(S5PC1XX_GPIOREG(PEND_OFFSET) + pend_offset);
+ pend = __raw_readl(S5P_GPIOREG(PEND_OFFSET) + pend_offset);
if (!pend)
continue;
mask_offset = group_to_mask_offset(group);
- mask = __raw_readl(S5PC1XX_GPIOREG(MASK_OFFSET) + mask_offset);
+ mask = __raw_readl(S5P_GPIOREG(MASK_OFFSET) + mask_offset);
pend &= ~mask;
for (offset = 0; offset < 8; offset++) {
if (pend & (1 << offset)) {
- real_irq = s5pc1xx_get_start(group) + offset;
+ real_irq = s5pc100_get_start(group) + offset;
generic_handle_irq(S3C_IRQ_GPIO(real_irq));
}
}
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index ae3c52cd0ebb..af22f8202a07 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -35,7 +35,6 @@
#include <plat/regs-serial.h>
#include <plat/gpio-cfg.h>
-#include <plat/regs-gpio.h>
#include <plat/clock.h>
#include <plat/devs.h>
@@ -44,38 +43,48 @@
#include <plat/fb.h>
#include <plat/iic.h>
-#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
-#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
-#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
+/* Following are default values for UCON, ULCON and UFCON UART registers */
+#define S5PC100_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define S5PC100_ULCON_DEFAULT S3C2410_LCON_CS8
+
+#define S5PC100_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S3C2440_UFCON_RXTRIG8 | \
+ S3C2440_UFCON_TXTRIG16)
static struct s3c2410_uartcfg smdkc100_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
- .ucon = 0x3c5,
- .ulcon = 0x03,
- .ufcon = 0x51,
+ .ucon = S5PC100_UCON_DEFAULT,
+ .ulcon = S5PC100_ULCON_DEFAULT,
+ .ufcon = S5PC100_UFCON_DEFAULT,
},
[1] = {
.hwport = 1,
.flags = 0,
- .ucon = 0x3c5,
- .ulcon = 0x03,
- .ufcon = 0x51,
+ .ucon = S5PC100_UCON_DEFAULT,
+ .ulcon = S5PC100_ULCON_DEFAULT,
+ .ufcon = S5PC100_UFCON_DEFAULT,
},
[2] = {
.hwport = 2,
.flags = 0,
- .ucon = 0x3c5,
- .ulcon = 0x03,
- .ufcon = 0x51,
+ .ucon = S5PC100_UCON_DEFAULT,
+ .ulcon = S5PC100_ULCON_DEFAULT,
+ .ufcon = S5PC100_UFCON_DEFAULT,
},
[3] = {
.hwport = 3,
.flags = 0,
- .ucon = 0x3c5,
- .ulcon = 0x03,
- .ufcon = 0x51,
+ .ucon = S5PC100_UCON_DEFAULT,
+ .ulcon = S5PC100_ULCON_DEFAULT,
+ .ufcon = S5PC100_UFCON_DEFAULT,
},
};
@@ -119,8 +128,7 @@ static struct platform_device smdkc100_lcd_powerdev = {
static struct s3c_fb_pd_win smdkc100_fb_win0 = {
/* this is to ensure we use win0 */
.win_mode = {
- .refresh = 70,
- .pixclock = (8+13+3+800)*(7+5+1+480),
+ .pixclock = 1000000000000ULL / ((8+13+3+800)*(7+5+1+480)*80),
.left_margin = 8,
.right_margin = 13,
.upper_margin = 7,
@@ -141,8 +149,6 @@ static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = {
.setup_gpio = s5pc100_fb_gpio_setup_24bpp,
};
-static struct map_desc smdkc100_iodesc[] = {};
-
static struct platform_device *smdkc100_devices[] __initdata = {
&s3c_device_i2c0,
&s3c_device_i2c1,
@@ -151,11 +157,13 @@ static struct platform_device *smdkc100_devices[] __initdata = {
&s3c_device_hsmmc1,
&s3c_device_hsmmc2,
&smdkc100_lcd_powerdev,
+ &s5pc100_device_iis0,
+ &s5pc100_device_ac97,
};
static void __init smdkc100_map_io(void)
{
- s5pc1xx_init_io(smdkc100_iodesc, ARRAY_SIZE(smdkc100_iodesc));
+ s5p_init_io(NULL, 0, S5P_VA_CHIPID);
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(smdkc100_uartcfgs, ARRAY_SIZE(smdkc100_uartcfgs));
}
@@ -179,10 +187,9 @@ static void __init smdkc100_machine_init(void)
MACHINE_START(SMDKC100, "SMDKC100")
/* Maintainer: Byungho Min <bhmin@samsung.com> */
- .phys_io = S5PC100_PA_UART & 0xfff00000,
- .io_pg_offst = (((u32)S5PC1XX_VA_UART) >> 18) & 0xfffc,
- .boot_params = S5PC100_PA_SDRAM + 0x100,
-
+ .phys_io = S3C_PA_UART & 0xfff00000,
+ .io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S5P_PA_SDRAM + 0x100,
.init_irq = s5pc100_init_irq,
.map_io = smdkc100_map_io,
.init_machine = smdkc100_machine_init,
diff --git a/arch/arm/plat-s5pc1xx/setup-fb-24bpp.c b/arch/arm/mach-s5pc100/setup-fb-24bpp.c
index 1a63768a9a2e..6eba6cb8e2f4 100644
--- a/arch/arm/plat-s5pc1xx/setup-fb-24bpp.c
+++ b/arch/arm/mach-s5pc100/setup-fb-24bpp.c
@@ -1,9 +1,9 @@
/*
- * linux/arch/arm/plat-s5pc100/setup-fb-24bpp.c
+ * linux/arch/arm/mach-s5pc100/setup-fb-24bpp.c
*
* Copyright 2009 Samsung Electronics
*
- * Base S5PC1XX setup information for 24bpp LCD framebuffer
+ * Base S5PC100 setup information for 24bpp LCD framebuffer
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,7 +19,6 @@
#include <mach/map.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
-#include <plat/gpio-cfg-s5pc1xx.h>
#define DISR_OFFSET 0x7008
diff --git a/arch/arm/plat-s5pc1xx/setup-i2c0.c b/arch/arm/mach-s5pc100/setup-i2c0.c
index 5e4a7c3a231e..dd3174e6ecc5 100644
--- a/arch/arm/plat-s5pc1xx/setup-i2c0.c
+++ b/arch/arm/mach-s5pc100/setup-i2c0.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/plat-s5pc1xx/setup-i2c0.c
+/* linux/arch/arm/mach-s5pc100/setup-i2c0.c
*
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * Base S5PC1XX I2C bus 0 gpio configuration
+ * Base S5PC100 I2C bus 0 gpio configuration
*
* Based on plat-s3c64xx/setup-i2c0.c
*
diff --git a/arch/arm/plat-s5pc1xx/setup-i2c1.c b/arch/arm/mach-s5pc100/setup-i2c1.c
index a0a8b4ae6ad8..d1fec26b69ee 100644
--- a/arch/arm/plat-s5pc1xx/setup-i2c1.c
+++ b/arch/arm/mach-s5pc100/setup-i2c1.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/plat-s3c64xx/setup-i2c1.c
+/* linux/arch/arm/mach-s5pc100/setup-i2c1.c
*
* Copyright 2009 Samsung Electronics Co.
* Byungho Min <bhmin@samsung.com>
*
- * Base S5PC1XX I2C bus 1 gpio configuration
+ * Base S5PC100 I2C bus 1 gpio configuration
*
* Based on plat-s3c64xx/setup-i2c1.c
*
diff --git a/arch/arm/plat-s5pc1xx/setup-sdhci-gpio.c b/arch/arm/mach-s5pc100/setup-sdhci-gpio.c
index 185c8941e644..7769c760c9ef 100644
--- a/arch/arm/plat-s5pc1xx/setup-sdhci-gpio.c
+++ b/arch/arm/mach-s5pc100/setup-sdhci-gpio.c
@@ -1,8 +1,8 @@
-/* linux/arch/arm/plat-s5pc1xx/setup-sdhci-gpio.c
+/* linux/arch/arm/plat-s5pc100/setup-sdhci-gpio.c
*
* Copyright 2009 Samsung Eletronics
*
- * S5PC1XX - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
+ * S5PC100 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index af33a1a89b72..0761eac9aaea 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -12,18 +12,69 @@ if ARCH_S5PV210
config CPU_S5PV210
bool
select PLAT_S5P
+ select S3C_PL330_DMA
+ select S5P_EXT_INT
help
Enable S5PV210 CPU support
-choice
- prompt "Select machine type"
- depends on ARCH_S5PV210
- default MACH_SMDKV210
+config S5PV210_SETUP_I2C1
+ bool
+ help
+ Common setup code for i2c bus 1.
+
+config S5PV210_SETUP_I2C2
+ bool
+ help
+ Common setup code for i2c bus 2.
+
+config S5PV210_SETUP_FB_24BPP
+ bool
+ help
+ Common setup code for S5PV210 with an 24bpp RGB display helper.
+
+config S5PV210_SETUP_SDHCI
+ bool
+ select S5PV210_SETUP_SDHCI_GPIO
+ help
+ Internal helper functions for S5PV210 based SDHCI systems
+
+config S5PV210_SETUP_SDHCI_GPIO
+ bool
+ help
+ Common setup code for SDHCI gpio.
+
+# machine support
+
+config MACH_AQUILA
+ bool "Samsung Aquila"
+ select CPU_S5PV210
+ select ARCH_SPARSEMEM_ENABLE
+ select S5PV210_SETUP_FB_24BPP
+ select S3C_DEV_FB
+ help
+ Machine support for the Samsung Aquila target based on S5PC110 SoC
+
+config MACH_GONI
+ bool "GONI"
+ select CPU_S5PV210
+ select ARCH_SPARSEMEM_ENABLE
+ help
+ Machine support for Samsung GONI board
+ S5PC110(MCP) is one of package option of S5PV210
+
+config S5PC110_DEV_ONENAND
+ bool
+ help
+ Compile in platform device definition for OneNAND1 controller
config MACH_SMDKV210
bool "SMDKV210"
select CPU_S5PV210
select ARCH_SPARSEMEM_ENABLE
+ select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_TS
+ select S3C_DEV_WDT
+ select HAVE_S3C2410_WATCHDOG
help
Machine support for Samsung SMDKV210
@@ -31,10 +82,10 @@ config MACH_SMDKC110
bool "SMDKC110"
select CPU_S5PV210
select ARCH_SPARSEMEM_ENABLE
+ select S3C_DEV_WDT
+ select HAVE_S3C2410_WATCHDOG
help
Machine support for Samsung SMDKC110
S5PC110(MCP) is one of package option of S5PV210
-endchoice
-
endif
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 8ebf51c52a01..30be9a6a4620 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -12,9 +12,24 @@ obj- :=
# Core support for S5PV210 system
-obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o
+obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o gpiolib.o
+obj-$(CONFIG_CPU_S5PV210) += setup-i2c0.o
# machine support
+obj-$(CONFIG_MACH_AQUILA) += mach-aquila.o
obj-$(CONFIG_MACH_SMDKV210) += mach-smdkv210.o
obj-$(CONFIG_MACH_SMDKC110) += mach-smdkc110.o
+obj-$(CONFIG_MACH_GONI) += mach-goni.o
+
+# device support
+
+obj-y += dev-audio.o
+obj-$(CONFIG_S3C64XX_DEV_SPI) += dev-spi.o
+obj-$(CONFIG_S5PC110_DEV_ONENAND) += dev-onenand.o
+
+obj-$(CONFIG_S5PV210_SETUP_FB_24BPP) += setup-fb-24bpp.o
+obj-$(CONFIG_S5PV210_SETUP_I2C1) += setup-i2c1.o
+obj-$(CONFIG_S5PV210_SETUP_I2C2) += setup-i2c2.o
+obj-$(CONFIG_S5PV210_SETUP_SDHCI) += setup-sdhci.o
+obj-$(CONFIG_S5PV210_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index ccccae262351..154bca4abc09 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -31,6 +31,128 @@
#include <plat/clock-clksrc.h>
#include <plat/s5pv210.h>
+static struct clksrc_clk clk_mout_apll = {
+ .clk = {
+ .name = "mout_apll",
+ .id = -1,
+ },
+ .sources = &clk_src_apll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
+};
+
+static struct clksrc_clk clk_mout_epll = {
+ .clk = {
+ .name = "mout_epll",
+ .id = -1,
+ },
+ .sources = &clk_src_epll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
+};
+
+static struct clksrc_clk clk_mout_mpll = {
+ .clk = {
+ .name = "mout_mpll",
+ .id = -1,
+ },
+ .sources = &clk_src_mpll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
+};
+
+static struct clk *clkset_armclk_list[] = {
+ [0] = &clk_mout_apll.clk,
+ [1] = &clk_mout_mpll.clk,
+};
+
+static struct clksrc_sources clkset_armclk = {
+ .sources = clkset_armclk_list,
+ .nr_sources = ARRAY_SIZE(clkset_armclk_list),
+};
+
+static struct clksrc_clk clk_armclk = {
+ .clk = {
+ .name = "armclk",
+ .id = -1,
+ },
+ .sources = &clkset_armclk,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 3 },
+};
+
+static struct clksrc_clk clk_hclk_msys = {
+ .clk = {
+ .name = "hclk_msys",
+ .id = -1,
+ .parent = &clk_armclk.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 },
+};
+
+static struct clksrc_clk clk_pclk_msys = {
+ .clk = {
+ .name = "pclk_msys",
+ .id = -1,
+ .parent = &clk_hclk_msys.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 },
+};
+
+static struct clksrc_clk clk_sclk_a2m = {
+ .clk = {
+ .name = "sclk_a2m",
+ .id = -1,
+ .parent = &clk_mout_apll.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
+};
+
+static struct clk *clkset_hclk_sys_list[] = {
+ [0] = &clk_mout_mpll.clk,
+ [1] = &clk_sclk_a2m.clk,
+};
+
+static struct clksrc_sources clkset_hclk_sys = {
+ .sources = clkset_hclk_sys_list,
+ .nr_sources = ARRAY_SIZE(clkset_hclk_sys_list),
+};
+
+static struct clksrc_clk clk_hclk_dsys = {
+ .clk = {
+ .name = "hclk_dsys",
+ .id = -1,
+ },
+ .sources = &clkset_hclk_sys,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk clk_pclk_dsys = {
+ .clk = {
+ .name = "pclk_dsys",
+ .id = -1,
+ .parent = &clk_hclk_dsys.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 },
+};
+
+static struct clksrc_clk clk_hclk_psys = {
+ .clk = {
+ .name = "hclk_psys",
+ .id = -1,
+ },
+ .sources = &clkset_hclk_sys,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 24, .size = 4 },
+};
+
+static struct clksrc_clk clk_pclk_psys = {
+ .clk = {
+ .name = "pclk_psys",
+ .id = -1,
+ .parent = &clk_hclk_psys.clk,
+ },
+ .reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 },
+};
+
static int s5pv210_clk_ip0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_CLKGATE_IP0, clk, enable);
@@ -51,176 +173,226 @@ static int s5pv210_clk_ip3_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(S5P_CLKGATE_IP3, clk, enable);
}
-static struct clk clk_h200 = {
- .name = "hclk200",
+static int s5pv210_clk_ip4_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLKGATE_IP4, clk, enable);
+}
+
+static int s5pv210_clk_mask0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(S5P_CLK_SRC_MASK0, clk, enable);
+}
+
+static struct clk clk_sclk_hdmi27m = {
+ .name = "sclk_hdmi27m",
.id = -1,
+ .rate = 27000000,
};
-static struct clk clk_h100 = {
- .name = "hclk100",
+static struct clk clk_sclk_hdmiphy = {
+ .name = "sclk_hdmiphy",
.id = -1,
};
-static struct clk clk_h166 = {
- .name = "hclk166",
+static struct clk clk_sclk_usbphy0 = {
+ .name = "sclk_usbphy0",
.id = -1,
};
-static struct clk clk_h133 = {
- .name = "hclk133",
+static struct clk clk_sclk_usbphy1 = {
+ .name = "sclk_usbphy1",
.id = -1,
};
-static struct clk clk_p100 = {
- .name = "pclk100",
+static struct clk clk_pcmcdclk0 = {
+ .name = "pcmcdclk",
.id = -1,
};
-static struct clk clk_p83 = {
- .name = "pclk83",
+static struct clk clk_pcmcdclk1 = {
+ .name = "pcmcdclk",
.id = -1,
};
-static struct clk clk_p66 = {
- .name = "pclk66",
+static struct clk clk_pcmcdclk2 = {
+ .name = "pcmcdclk",
.id = -1,
};
-static struct clk *sys_clks[] = {
- &clk_h200,
- &clk_h100,
- &clk_h166,
- &clk_h133,
- &clk_p100,
- &clk_p83,
- &clk_p66
+static struct clk *clkset_vpllsrc_list[] = {
+ [0] = &clk_fin_vpll,
+ [1] = &clk_sclk_hdmi27m,
+};
+
+static struct clksrc_sources clkset_vpllsrc = {
+ .sources = clkset_vpllsrc_list,
+ .nr_sources = ARRAY_SIZE(clkset_vpllsrc_list),
+};
+
+static struct clksrc_clk clk_vpllsrc = {
+ .clk = {
+ .name = "vpll_src",
+ .id = -1,
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 7),
+ },
+ .sources = &clkset_vpllsrc,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 28, .size = 1 },
+};
+
+static struct clk *clkset_sclk_vpll_list[] = {
+ [0] = &clk_vpllsrc.clk,
+ [1] = &clk_fout_vpll,
+};
+
+static struct clksrc_sources clkset_sclk_vpll = {
+ .sources = clkset_sclk_vpll_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_vpll_list),
+};
+
+static struct clksrc_clk clk_sclk_vpll = {
+ .clk = {
+ .name = "sclk_vpll",
+ .id = -1,
+ },
+ .sources = &clkset_sclk_vpll,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 },
+};
+
+static unsigned long s5pv210_clk_imem_get_rate(struct clk *clk)
+{
+ return clk_get_rate(clk->parent) / 2;
+}
+
+static struct clk_ops clk_hclk_imem_ops = {
+ .get_rate = s5pv210_clk_imem_get_rate,
};
static struct clk init_clocks_disable[] = {
{
.name = "rot",
.id = -1,
- .parent = &clk_h166,
+ .parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1<<29),
}, {
.name = "otg",
.id = -1,
- .parent = &clk_h133,
+ .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "usb-host",
.id = -1,
- .parent = &clk_h133,
+ .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<17),
}, {
.name = "lcd",
.id = -1,
- .parent = &clk_h166,
+ .parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<0),
}, {
.name = "cfcon",
.id = 0,
- .parent = &clk_h133,
+ .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<25),
}, {
.name = "hsmmc",
.id = 0,
- .parent = &clk_h133,
+ .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "hsmmc",
.id = 1,
- .parent = &clk_h133,
+ .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<17),
}, {
.name = "hsmmc",
.id = 2,
- .parent = &clk_h133,
+ .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<18),
}, {
.name = "hsmmc",
.id = 3,
- .parent = &clk_h133,
+ .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<19),
}, {
.name = "systimer",
.id = -1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "watchdog",
.id = -1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<22),
}, {
.name = "rtc",
.id = -1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<15),
}, {
.name = "i2c",
.id = 0,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<7),
}, {
.name = "i2c",
.id = 1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<8),
}, {
.name = "i2c",
.id = 2,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<9),
}, {
.name = "spi",
.id = 0,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<12),
}, {
.name = "spi",
.id = 1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<13),
}, {
.name = "spi",
.id = 2,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<14),
}, {
.name = "timers",
.id = -1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<23),
}, {
.name = "adc",
.id = -1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<24),
}, {
.name = "keypad",
.id = -1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<21),
}, {
@@ -246,106 +418,537 @@ static struct clk init_clocks_disable[] = {
static struct clk init_clocks[] = {
{
+ .name = "hclk_imem",
+ .id = -1,
+ .parent = &clk_hclk_msys.clk,
+ .ctrlbit = (1 << 5),
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ops = &clk_hclk_imem_ops,
+ }, {
.name = "uart",
.id = 0,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<7),
}, {
.name = "uart",
.id = 1,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<8),
}, {
.name = "uart",
.id = 2,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<9),
}, {
.name = "uart",
.id = 3,
- .parent = &clk_p66,
+ .parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<10),
},
};
-static struct clksrc_clk clk_mout_apll = {
- .clk = {
- .name = "mout_apll",
+static struct clk *clkset_uart_list[] = {
+ [6] = &clk_mout_mpll.clk,
+ [7] = &clk_mout_epll.clk,
+};
+
+static struct clksrc_sources clkset_uart = {
+ .sources = clkset_uart_list,
+ .nr_sources = ARRAY_SIZE(clkset_uart_list),
+};
+
+static struct clk *clkset_group1_list[] = {
+ [0] = &clk_sclk_a2m.clk,
+ [1] = &clk_mout_mpll.clk,
+ [2] = &clk_mout_epll.clk,
+ [3] = &clk_sclk_vpll.clk,
+};
+
+static struct clksrc_sources clkset_group1 = {
+ .sources = clkset_group1_list,
+ .nr_sources = ARRAY_SIZE(clkset_group1_list),
+};
+
+static struct clk *clkset_sclk_onenand_list[] = {
+ [0] = &clk_hclk_psys.clk,
+ [1] = &clk_hclk_dsys.clk,
+};
+
+static struct clksrc_sources clkset_sclk_onenand = {
+ .sources = clkset_sclk_onenand_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_onenand_list),
+};
+
+static struct clk *clkset_sclk_dac_list[] = {
+ [0] = &clk_sclk_vpll.clk,
+ [1] = &clk_sclk_hdmiphy,
+};
+
+static struct clksrc_sources clkset_sclk_dac = {
+ .sources = clkset_sclk_dac_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_dac_list),
+};
+
+static struct clksrc_clk clk_sclk_dac = {
+ .clk = {
+ .name = "sclk_dac",
.id = -1,
+ .ctrlbit = (1 << 10),
+ .enable = s5pv210_clk_ip1_ctrl,
},
- .sources = &clk_src_apll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
+ .sources = &clkset_sclk_dac,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 1 },
};
-static struct clksrc_clk clk_mout_epll = {
- .clk = {
- .name = "mout_epll",
+static struct clksrc_clk clk_sclk_pixel = {
+ .clk = {
+ .name = "sclk_pixel",
.id = -1,
+ .parent = &clk_sclk_vpll.clk,
},
- .sources = &clk_src_epll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 4},
};
-static struct clksrc_clk clk_mout_mpll = {
- .clk = {
- .name = "mout_mpll",
+static struct clk *clkset_sclk_hdmi_list[] = {
+ [0] = &clk_sclk_pixel.clk,
+ [1] = &clk_sclk_hdmiphy,
+};
+
+static struct clksrc_sources clkset_sclk_hdmi = {
+ .sources = clkset_sclk_hdmi_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_hdmi_list),
+};
+
+static struct clksrc_clk clk_sclk_hdmi = {
+ .clk = {
+ .name = "sclk_hdmi",
.id = -1,
+ .enable = s5pv210_clk_ip1_ctrl,
+ .ctrlbit = (1 << 11),
},
- .sources = &clk_src_mpll,
- .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
+ .sources = &clkset_sclk_hdmi,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 },
};
-static struct clk *clkset_uart_list[] = {
+static struct clk *clkset_sclk_mixer_list[] = {
+ [0] = &clk_sclk_dac.clk,
+ [1] = &clk_sclk_hdmi.clk,
+};
+
+static struct clksrc_sources clkset_sclk_mixer = {
+ .sources = clkset_sclk_mixer_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_mixer_list),
+};
+
+static struct clk *clkset_sclk_audio0_list[] = {
+ [0] = &clk_ext_xtal_mux,
+ [1] = &clk_pcmcdclk0,
+ [2] = &clk_sclk_hdmi27m,
+ [3] = &clk_sclk_usbphy0,
+ [4] = &clk_sclk_usbphy1,
+ [5] = &clk_sclk_hdmiphy,
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
+ [8] = &clk_sclk_vpll.clk,
};
-static struct clksrc_sources clkset_uart = {
- .sources = clkset_uart_list,
- .nr_sources = ARRAY_SIZE(clkset_uart_list),
+static struct clksrc_sources clkset_sclk_audio0 = {
+ .sources = clkset_sclk_audio0_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_audio0_list),
+};
+
+static struct clksrc_clk clk_sclk_audio0 = {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 0,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 4),
+ },
+ .sources = &clkset_sclk_audio0,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV6, .shift = 0, .size = 4 },
+};
+
+static struct clk *clkset_sclk_audio1_list[] = {
+ [0] = &clk_ext_xtal_mux,
+ [1] = &clk_pcmcdclk1,
+ [2] = &clk_sclk_hdmi27m,
+ [3] = &clk_sclk_usbphy0,
+ [4] = &clk_sclk_usbphy1,
+ [5] = &clk_sclk_hdmiphy,
+ [6] = &clk_mout_mpll.clk,
+ [7] = &clk_mout_epll.clk,
+ [8] = &clk_sclk_vpll.clk,
+};
+
+static struct clksrc_sources clkset_sclk_audio1 = {
+ .sources = clkset_sclk_audio1_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_audio1_list),
+};
+
+static struct clksrc_clk clk_sclk_audio1 = {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 1,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 5),
+ },
+ .sources = &clkset_sclk_audio1,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV6, .shift = 4, .size = 4 },
+};
+
+static struct clk *clkset_sclk_audio2_list[] = {
+ [0] = &clk_ext_xtal_mux,
+ [1] = &clk_pcmcdclk0,
+ [2] = &clk_sclk_hdmi27m,
+ [3] = &clk_sclk_usbphy0,
+ [4] = &clk_sclk_usbphy1,
+ [5] = &clk_sclk_hdmiphy,
+ [6] = &clk_mout_mpll.clk,
+ [7] = &clk_mout_epll.clk,
+ [8] = &clk_sclk_vpll.clk,
+};
+
+static struct clksrc_sources clkset_sclk_audio2 = {
+ .sources = clkset_sclk_audio2_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_audio2_list),
+};
+
+static struct clksrc_clk clk_sclk_audio2 = {
+ .clk = {
+ .name = "sclk_audio",
+ .id = 2,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 6),
+ },
+ .sources = &clkset_sclk_audio2,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 8, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV6, .shift = 8, .size = 4 },
+};
+
+static struct clk *clkset_sclk_spdif_list[] = {
+ [0] = &clk_sclk_audio0.clk,
+ [1] = &clk_sclk_audio1.clk,
+ [2] = &clk_sclk_audio2.clk,
+};
+
+static struct clksrc_sources clkset_sclk_spdif = {
+ .sources = clkset_sclk_spdif_list,
+ .nr_sources = ARRAY_SIZE(clkset_sclk_spdif_list),
+};
+
+static struct clk *clkset_group2_list[] = {
+ [0] = &clk_ext_xtal_mux,
+ [1] = &clk_xusbxti,
+ [2] = &clk_sclk_hdmi27m,
+ [3] = &clk_sclk_usbphy0,
+ [4] = &clk_sclk_usbphy1,
+ [5] = &clk_sclk_hdmiphy,
+ [6] = &clk_mout_mpll.clk,
+ [7] = &clk_mout_epll.clk,
+ [8] = &clk_sclk_vpll.clk,
+};
+
+static struct clksrc_sources clkset_group2 = {
+ .sources = clkset_group2_list,
+ .nr_sources = ARRAY_SIZE(clkset_group2_list),
};
static struct clksrc_clk clksrcs[] = {
{
.clk = {
- .name = "uclk1",
+ .name = "sclk_dmc",
.id = -1,
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_onenand",
+ .id = -1,
+ },
+ .sources = &clkset_sclk_onenand,
+ .reg_src = { .reg = S5P_CLK_SRC0, .shift = 28, .size = 1 },
+ .reg_div = { .reg = S5P_CLK_DIV6, .shift = 12, .size = 3 },
+ }, {
+ .clk = {
+ .name = "uclk1",
+ .id = 0,
.ctrlbit = (1<<17),
.enable = s5pv210_clk_ip3_ctrl,
},
.sources = &clkset_uart,
.reg_src = { .reg = S5P_CLK_SRC4, .shift = 16, .size = 4 },
.reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 },
- }
+ }, {
+ .clk = {
+ .name = "uclk1",
+ .id = 1,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 18),
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
+ .name = "uclk1",
+ .id = 2,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 19),
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 24, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 24, .size = 4 },
+ }, {
+ .clk = {
+ .name = "uclk1",
+ .id = 3,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 20),
+ },
+ .sources = &clkset_uart,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 28, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 28, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mixer",
+ .id = -1,
+ .enable = s5pv210_clk_ip1_ctrl,
+ .ctrlbit = (1 << 9),
+ },
+ .sources = &clkset_sclk_mixer,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 1 },
+ }, {
+ .clk = {
+ .name = "sclk_spdif",
+ .id = -1,
+ .enable = s5pv210_clk_mask0_ctrl,
+ .ctrlbit = (1 << 27),
+ },
+ .sources = &clkset_sclk_spdif,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 12, .size = 2 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 0,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 24),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 1,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 25),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 16, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimc",
+ .id = 2,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 26),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_cam",
+ .id = 0,
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_cam",
+ .id = 1,
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 16, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 16, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimd",
+ .id = -1,
+ .enable = s5pv210_clk_ip1_ctrl,
+ .ctrlbit = (1 << 0),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 0,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 1,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1 << 17),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 4, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 2,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1 << 18),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 8, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 8, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mmc",
+ .id = 3,
+ .enable = s5pv210_clk_ip2_ctrl,
+ .ctrlbit = (1 << 19),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC4, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_mfc",
+ .id = -1,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_g2d",
+ .id = -1,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 12),
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_g3d",
+ .id = -1,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 8),
+ },
+ .sources = &clkset_group1,
+ .reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 },
+ .reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_csis",
+ .id = -1,
+ .enable = s5pv210_clk_ip0_ctrl,
+ .ctrlbit = (1 << 31),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV1, .shift = 28, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 0,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 12),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC5, .shift = 0, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV5, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_spi",
+ .id = 1,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 13),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC5, .shift = 4, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV5, .shift = 4, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_pwi",
+ .id = -1,
+ .enable = &s5pv210_clk_ip4_ctrl,
+ .ctrlbit = (1 << 2),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC6, .shift = 20, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV6, .shift = 24, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_pwm",
+ .id = -1,
+ .enable = s5pv210_clk_ip3_ctrl,
+ .ctrlbit = (1 << 23),
+ },
+ .sources = &clkset_group2,
+ .reg_src = { .reg = S5P_CLK_SRC5, .shift = 12, .size = 4 },
+ .reg_div = { .reg = S5P_CLK_DIV5, .shift = 12, .size = 4 },
+ },
};
/* Clock initialisation code */
-static struct clksrc_clk *init_parents[] = {
+static struct clksrc_clk *sysclks[] = {
&clk_mout_apll,
&clk_mout_epll,
&clk_mout_mpll,
+ &clk_armclk,
+ &clk_hclk_msys,
+ &clk_sclk_a2m,
+ &clk_hclk_dsys,
+ &clk_hclk_psys,
+ &clk_pclk_msys,
+ &clk_pclk_dsys,
+ &clk_pclk_psys,
+ &clk_vpllsrc,
+ &clk_sclk_vpll,
+ &clk_sclk_dac,
+ &clk_sclk_pixel,
+ &clk_sclk_hdmi,
};
-#define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
-
void __init_or_cpufreq s5pv210_setup_clocks(void)
{
struct clk *xtal_clk;
unsigned long xtal;
+ unsigned long vpllsrc;
unsigned long armclk;
- unsigned long hclk200;
- unsigned long hclk166;
- unsigned long hclk133;
- unsigned long pclk100;
- unsigned long pclk83;
- unsigned long pclk66;
+ unsigned long hclk_msys;
+ unsigned long hclk_dsys;
+ unsigned long hclk_psys;
+ unsigned long pclk_msys;
+ unsigned long pclk_dsys;
+ unsigned long pclk_psys;
unsigned long apll;
unsigned long mpll;
unsigned long epll;
+ unsigned long vpll;
unsigned int ptr;
u32 clkdiv0, clkdiv1;
@@ -368,59 +971,46 @@ void __init_or_cpufreq s5pv210_setup_clocks(void)
apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON), pll_4508);
mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON), pll_4502);
epll = s5p_get_pll45xx(xtal, __raw_readl(S5P_EPLL_CON), pll_4500);
-
- printk(KERN_INFO "S5PV210: PLL settings, A=%ld, M=%ld, E=%ld",
- apll, mpll, epll);
-
- armclk = apll / GET_DIV(clkdiv0, S5P_CLKDIV0_APLL);
- if (__raw_readl(S5P_CLK_SRC0) & S5P_CLKSRC0_MUX200_MASK)
- hclk200 = mpll / GET_DIV(clkdiv0, S5P_CLKDIV0_HCLK200);
- else
- hclk200 = armclk / GET_DIV(clkdiv0, S5P_CLKDIV0_HCLK200);
-
- if (__raw_readl(S5P_CLK_SRC0) & S5P_CLKSRC0_MUX166_MASK) {
- hclk166 = apll / GET_DIV(clkdiv0, S5P_CLKDIV0_A2M);
- hclk166 = hclk166 / GET_DIV(clkdiv0, S5P_CLKDIV0_HCLK166);
- } else
- hclk166 = mpll / GET_DIV(clkdiv0, S5P_CLKDIV0_HCLK166);
-
- if (__raw_readl(S5P_CLK_SRC0) & S5P_CLKSRC0_MUX133_MASK) {
- hclk133 = apll / GET_DIV(clkdiv0, S5P_CLKDIV0_A2M);
- hclk133 = hclk133 / GET_DIV(clkdiv0, S5P_CLKDIV0_HCLK133);
- } else
- hclk133 = mpll / GET_DIV(clkdiv0, S5P_CLKDIV0_HCLK133);
-
- pclk100 = hclk200 / GET_DIV(clkdiv0, S5P_CLKDIV0_PCLK100);
- pclk83 = hclk166 / GET_DIV(clkdiv0, S5P_CLKDIV0_PCLK83);
- pclk66 = hclk133 / GET_DIV(clkdiv0, S5P_CLKDIV0_PCLK66);
-
- printk(KERN_INFO "S5PV210: ARMCLK=%ld, HCLKM=%ld, HCLKD=%ld, \
- HCLKP=%ld, PCLKM=%ld, PCLKD=%ld, PCLKP=%ld\n",
- armclk, hclk200, hclk166, hclk133, pclk100, pclk83, pclk66);
+ vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
+ vpll = s5p_get_pll45xx(vpllsrc, __raw_readl(S5P_VPLL_CON), pll_4502);
clk_fout_apll.rate = apll;
clk_fout_mpll.rate = mpll;
clk_fout_epll.rate = epll;
+ clk_fout_vpll.rate = vpll;
- clk_f.rate = armclk;
- clk_h.rate = hclk133;
- clk_p.rate = pclk66;
- clk_p66.rate = pclk66;
- clk_p83.rate = pclk83;
- clk_h133.rate = hclk133;
- clk_h166.rate = hclk166;
- clk_h200.rate = hclk200;
+ printk(KERN_INFO "S5PV210: PLL settings, A=%ld, M=%ld, E=%ld V=%ld",
+ apll, mpll, epll, vpll);
+
+ armclk = clk_get_rate(&clk_armclk.clk);
+ hclk_msys = clk_get_rate(&clk_hclk_msys.clk);
+ hclk_dsys = clk_get_rate(&clk_hclk_dsys.clk);
+ hclk_psys = clk_get_rate(&clk_hclk_psys.clk);
+ pclk_msys = clk_get_rate(&clk_pclk_msys.clk);
+ pclk_dsys = clk_get_rate(&clk_pclk_dsys.clk);
+ pclk_psys = clk_get_rate(&clk_pclk_psys.clk);
+
+ printk(KERN_INFO "S5PV210: ARMCLK=%ld, HCLKM=%ld, HCLKD=%ld\n"
+ "HCLKP=%ld, PCLKM=%ld, PCLKD=%ld, PCLKP=%ld\n",
+ armclk, hclk_msys, hclk_dsys, hclk_psys,
+ pclk_msys, pclk_dsys, pclk_psys);
- for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
- s3c_set_clksrc(init_parents[ptr], true);
+ clk_f.rate = armclk;
+ clk_h.rate = hclk_psys;
+ clk_p.rate = pclk_psys;
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_set_clksrc(&clksrcs[ptr], true);
}
static struct clk *clks[] __initdata = {
- &clk_mout_epll.clk,
- &clk_mout_mpll.clk,
+ &clk_sclk_hdmi27m,
+ &clk_sclk_hdmiphy,
+ &clk_sclk_usbphy0,
+ &clk_sclk_usbphy1,
+ &clk_pcmcdclk0,
+ &clk_pcmcdclk1,
+ &clk_pcmcdclk2,
};
void __init s5pv210_register_clocks(void)
@@ -433,13 +1023,12 @@ void __init s5pv210_register_clocks(void)
if (ret > 0)
printk(KERN_ERR "Failed to register %u clocks\n", ret);
+ for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++)
+ s3c_register_clksrc(sysclks[ptr], 1);
+
s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
- ret = s3c24xx_register_clocks(sys_clks, ARRAY_SIZE(sys_clks));
- if (ret > 0)
- printk(KERN_ERR "Failed to register system clocks\n");
-
clkp = init_clocks_disable;
for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
ret = s3c24xx_register_clock(clkp);
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index 0e0f8fde2aa6..411a4a9cbfc7 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -32,6 +32,8 @@
#include <plat/devs.h>
#include <plat/clock.h>
#include <plat/s5pv210.h>
+#include <plat/iic-core.h>
+#include <plat/sdhci.h>
/* Initial IO mappings */
@@ -74,7 +76,21 @@ static void s5pv210_idle(void)
void __init s5pv210_map_io(void)
{
+#ifdef CONFIG_S3C_DEV_ADC
+ s3c_device_adc.name = "s3c64xx-adc";
+#endif
+
iotable_init(s5pv210_iodesc, ARRAY_SIZE(s5pv210_iodesc));
+
+ /* initialise device information early */
+ s5pv210_default_sdhci0();
+ s5pv210_default_sdhci1();
+ s5pv210_default_sdhci2();
+
+ /* the i2c devices are directly compatible with s3c2440 */
+ s3c_i2c0_setname("s3c2440-i2c");
+ s3c_i2c1_setname("s3c2440-i2c");
+ s3c_i2c2_setname("s3c2440-i2c");
}
void __init s5pv210_init_clocks(int xtal)
@@ -100,7 +116,7 @@ void __init s5pv210_init_irq(void)
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
-static struct sysdev_class s5pv210_sysclass = {
+struct sysdev_class s5pv210_sysclass = {
.name = "s5pv210-core",
};
diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c
new file mode 100644
index 000000000000..6e215330a1be
--- /dev/null
+++ b/arch/arm/mach-s5pv210/dev-audio.c
@@ -0,0 +1,327 @@
+/* linux/arch/arm/mach-s5pv210/dev-audio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/audio.h>
+
+#include <mach/gpio.h>
+#include <mach/map.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+
+static int s5pv210_cfg_i2s(struct platform_device *pdev)
+{
+ /* configure GPIO for i2s port */
+ switch (pdev->id) {
+ case 1:
+ s3c_gpio_cfgpin(S5PV210_GPC0(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC0(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC0(2), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC0(3), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC0(4), S3C_GPIO_SFN(2));
+ break;
+
+ case 2:
+ s3c_gpio_cfgpin(S5PV210_GPC1(0), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC1(1), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC1(2), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC1(3), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC1(4), S3C_GPIO_SFN(4));
+ break;
+
+ case -1:
+ s3c_gpio_cfgpin(S5PV210_GPI(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPI(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPI(2), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPI(3), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPI(4), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPI(5), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPI(6), S3C_GPIO_SFN(2));
+ break;
+
+ default:
+ printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_i2s_pdata = {
+ .cfg_gpio = s5pv210_cfg_i2s,
+};
+
+static struct resource s5pv210_iis0_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_IIS0,
+ .end = S5PV210_PA_IIS0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S0_TX,
+ .end = DMACH_I2S0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S0_RX,
+ .end = DMACH_I2S0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pv210_device_iis0 = {
+ .name = "s3c64xx-iis-v4",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5pv210_iis0_resource),
+ .resource = s5pv210_iis0_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+static struct resource s5pv210_iis1_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_IIS1,
+ .end = S5PV210_PA_IIS1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S1_TX,
+ .end = DMACH_I2S1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S1_RX,
+ .end = DMACH_I2S1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pv210_device_iis1 = {
+ .name = "s3c64xx-iis",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pv210_iis1_resource),
+ .resource = s5pv210_iis1_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+static struct resource s5pv210_iis2_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_IIS2,
+ .end = S5PV210_PA_IIS2 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_I2S2_TX,
+ .end = DMACH_I2S2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_I2S2_RX,
+ .end = DMACH_I2S2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pv210_device_iis2 = {
+ .name = "s3c64xx-iis",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s5pv210_iis2_resource),
+ .resource = s5pv210_iis2_resource,
+ .dev = {
+ .platform_data = &s3c_i2s_pdata,
+ },
+};
+
+/* PCM Controller platform_devices */
+
+static int s5pv210_pcm_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5PV210_GPI(0), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPI(1), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPI(2), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPI(3), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPI(4), S3C_GPIO_SFN(3));
+ break;
+ case 1:
+ s3c_gpio_cfgpin(S5PV210_GPC0(0), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPC0(1), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPC0(2), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPC0(3), S3C_GPIO_SFN(3));
+ s3c_gpio_cfgpin(S5PV210_GPC0(4), S3C_GPIO_SFN(3));
+ break;
+ case 2:
+ s3c_gpio_cfgpin(S5PV210_GPC1(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC1(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC1(2), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC1(3), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPC1(4), S3C_GPIO_SFN(2));
+ break;
+ default:
+ printk(KERN_DEBUG "Invalid PCM Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct s3c_audio_pdata s3c_pcm_pdata = {
+ .cfg_gpio = s5pv210_pcm_cfg_gpio,
+};
+
+static struct resource s5pv210_pcm0_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_PCM0,
+ .end = S5PV210_PA_PCM0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM0_TX,
+ .end = DMACH_PCM0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM0_RX,
+ .end = DMACH_PCM0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pv210_device_pcm0 = {
+ .name = "samsung-pcm",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5pv210_pcm0_resource),
+ .resource = s5pv210_pcm0_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
+
+static struct resource s5pv210_pcm1_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_PCM1,
+ .end = S5PV210_PA_PCM1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM1_TX,
+ .end = DMACH_PCM1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM1_RX,
+ .end = DMACH_PCM1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pv210_device_pcm1 = {
+ .name = "samsung-pcm",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pv210_pcm1_resource),
+ .resource = s5pv210_pcm1_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
+
+static struct resource s5pv210_pcm2_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_PCM2,
+ .end = S5PV210_PA_PCM2 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_PCM2_TX,
+ .end = DMACH_PCM2_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_PCM2_RX,
+ .end = DMACH_PCM2_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device s5pv210_device_pcm2 = {
+ .name = "samsung-pcm",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s5pv210_pcm2_resource),
+ .resource = s5pv210_pcm2_resource,
+ .dev = {
+ .platform_data = &s3c_pcm_pdata,
+ },
+};
+
+/* AC97 Controller platform devices */
+
+static int s5pv210_ac97_cfg_gpio(struct platform_device *pdev)
+{
+ s3c_gpio_cfgpin(S5PV210_GPC0(0), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC0(1), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC0(2), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC0(3), S3C_GPIO_SFN(4));
+ s3c_gpio_cfgpin(S5PV210_GPC0(4), S3C_GPIO_SFN(4));
+
+ return 0;
+}
+
+static struct resource s5pv210_ac97_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_AC97,
+ .end = S5PV210_PA_AC97 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_AC97_PCMOUT,
+ .end = DMACH_AC97_PCMOUT,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_AC97_PCMIN,
+ .end = DMACH_AC97_PCMIN,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = DMACH_AC97_MICIN,
+ .end = DMACH_AC97_MICIN,
+ .flags = IORESOURCE_DMA,
+ },
+ [4] = {
+ .start = IRQ_AC97,
+ .end = IRQ_AC97,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_audio_pdata s3c_ac97_pdata = {
+ .cfg_gpio = s5pv210_ac97_cfg_gpio,
+};
+
+static u64 s5pv210_ac97_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv210_device_ac97 = {
+ .name = "s3c-ac97",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5pv210_ac97_resource),
+ .resource = s5pv210_ac97_resource,
+ .dev = {
+ .platform_data = &s3c_ac97_pdata,
+ .dma_mask = &s5pv210_ac97_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
diff --git a/arch/arm/mach-s5pv210/dev-onenand.c b/arch/arm/mach-s5pv210/dev-onenand.c
new file mode 100644
index 000000000000..34997b752f93
--- /dev/null
+++ b/arch/arm/mach-s5pv210/dev-onenand.c
@@ -0,0 +1,50 @@
+/*
+ * linux/arch/arm/mach-s5pv210/dev-onenand.c
+ *
+ * Copyright (c) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * S5PC110 series device definition for OneNAND devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+static struct resource s5pc110_onenand_resources[] = {
+ [0] = {
+ .start = S5PC110_PA_ONENAND,
+ .end = S5PC110_PA_ONENAND + SZ_128K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = S5PC110_PA_ONENAND_DMA,
+ .end = S5PC110_PA_ONENAND_DMA + SZ_2K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s5pc110_device_onenand = {
+ .name = "s5pc110-onenand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5pc110_onenand_resources),
+ .resource = s5pc110_onenand_resources,
+};
+
+void s5pc110_onenand_set_platdata(struct onenand_platform_data *pdata)
+{
+ struct onenand_platform_data *pd;
+
+ pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
+ if (!pd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ s5pc110_device_onenand.dev.platform_data = pd;
+}
diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c
new file mode 100644
index 000000000000..337a62b57a0b
--- /dev/null
+++ b/arch/arm/mach-s5pv210/dev-spi.c
@@ -0,0 +1,178 @@
+/* linux/arch/arm/mach-s5pv210/dev-spi.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/dma.h>
+#include <mach/map.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+#include <mach/spi-clocks.h>
+
+#include <plat/s3c64xx-spi.h>
+#include <plat/gpio-cfg.h>
+
+static char *spi_src_clks[] = {
+ [S5PV210_SPI_SRCCLK_PCLK] = "pclk",
+ [S5PV210_SPI_SRCCLK_SCLK] = "sclk_spi",
+};
+
+/* SPI Controller platform_devices */
+
+/* Since we emulate multi-cs capability, we do not touch the CS.
+ * The emulated CS is toggled by board specific mechanism, as it can
+ * be either some immediate GPIO or some signal out of some other
+ * chip in between ... or some yet another way.
+ * We simply do not assume anything about CS.
+ */
+static int s5pv210_spi_cfg_gpio(struct platform_device *pdev)
+{
+ switch (pdev->id) {
+ case 0:
+ s3c_gpio_cfgpin(S5PV210_GPB(0), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPB(1), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPB(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPB(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PV210_GPB(1), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PV210_GPB(2), S3C_GPIO_PULL_UP);
+ break;
+
+ case 1:
+ s3c_gpio_cfgpin(S5PV210_GPB(4), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPB(5), S3C_GPIO_SFN(2));
+ s3c_gpio_cfgpin(S5PV210_GPB(6), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPB(4), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PV210_GPB(5), S3C_GPIO_PULL_UP);
+ s3c_gpio_setpull(S5PV210_GPB(6), S3C_GPIO_PULL_UP);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Invalid SPI Controller number!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct resource s5pv210_spi0_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_SPI0,
+ .end = S5PV210_PA_SPI0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI0_TX,
+ .end = DMACH_SPI0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI0_RX,
+ .end = DMACH_SPI0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5pv210_spi0_pdata = {
+ .cfg_gpio = s5pv210_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x1ff,
+ .rx_lvl_offset = 15,
+ .high_speed = 1,
+};
+
+static u64 spi_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv210_device_spi0 = {
+ .name = "s3c64xx-spi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s5pv210_spi0_resource),
+ .resource = s5pv210_spi0_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pv210_spi0_pdata,
+ },
+};
+
+static struct resource s5pv210_spi1_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_SPI1,
+ .end = S5PV210_PA_SPI1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DMACH_SPI1_TX,
+ .end = DMACH_SPI1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = DMACH_SPI1_RX,
+ .end = DMACH_SPI1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c64xx_spi_info s5pv210_spi1_pdata = {
+ .cfg_gpio = s5pv210_spi_cfg_gpio,
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 15,
+ .high_speed = 1,
+};
+
+struct platform_device s5pv210_device_spi1 = {
+ .name = "s3c64xx-spi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pv210_spi1_resource),
+ .resource = s5pv210_spi1_resource,
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pv210_spi1_pdata,
+ },
+};
+
+void __init s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs)
+{
+ struct s3c64xx_spi_info *pd;
+
+ /* Reject invalid configuration */
+ if (!num_cs || src_clk_nr < 0
+ || src_clk_nr > S5PV210_SPI_SRCCLK_SCLK) {
+ printk(KERN_ERR "%s: Invalid SPI configuration\n", __func__);
+ return;
+ }
+
+ switch (cntrlr) {
+ case 0:
+ pd = &s5pv210_spi0_pdata;
+ break;
+ case 1:
+ pd = &s5pv210_spi1_pdata;
+ break;
+ default:
+ printk(KERN_ERR "%s: Invalid SPI controller(%d)\n",
+ __func__, cntrlr);
+ return;
+ }
+
+ pd->num_cs = num_cs;
+ pd->src_clk_nr = src_clk_nr;
+ pd->src_clk_name = spi_src_clks[src_clk_nr];
+}
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c
new file mode 100644
index 000000000000..778ad5fe231a
--- /dev/null
+++ b/arch/arm/mach-s5pv210/dma.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/devs.h>
+#include <plat/irqs.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+static u64 dma_dmamask = DMA_BIT_MASK(32);
+
+static struct resource s5pv210_pdma0_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_PDMA0,
+ .end = S5PV210_PA_PDMA0 + SZ_4K,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_PDMA0,
+ .end = IRQ_PDMA0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_pl330_platdata s5pv210_pdma0_pdata = {
+ .peri = {
+ [0] = DMACH_UART0_RX,
+ [1] = DMACH_UART0_TX,
+ [2] = DMACH_UART1_RX,
+ [3] = DMACH_UART1_TX,
+ [4] = DMACH_UART2_RX,
+ [5] = DMACH_UART2_TX,
+ [6] = DMACH_UART3_RX,
+ [7] = DMACH_UART3_TX,
+ [8] = DMACH_MAX,
+ [9] = DMACH_I2S0_RX,
+ [10] = DMACH_I2S0_TX,
+ [11] = DMACH_I2S0S_TX,
+ [12] = DMACH_I2S1_RX,
+ [13] = DMACH_I2S1_TX,
+ [14] = DMACH_MAX,
+ [15] = DMACH_MAX,
+ [16] = DMACH_SPI0_RX,
+ [17] = DMACH_SPI0_TX,
+ [18] = DMACH_SPI1_RX,
+ [19] = DMACH_SPI1_TX,
+ [20] = DMACH_MAX,
+ [21] = DMACH_MAX,
+ [22] = DMACH_AC97_MICIN,
+ [23] = DMACH_AC97_PCMIN,
+ [24] = DMACH_AC97_PCMOUT,
+ [25] = DMACH_MAX,
+ [26] = DMACH_PWM,
+ [27] = DMACH_SPDIF,
+ [28] = DMACH_MAX,
+ [29] = DMACH_MAX,
+ [30] = DMACH_MAX,
+ [31] = DMACH_MAX,
+ },
+};
+
+static struct platform_device s5pv210_device_pdma0 = {
+ .name = "s3c-pl330",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s5pv210_pdma0_resource),
+ .resource = s5pv210_pdma0_resource,
+ .dev = {
+ .dma_mask = &dma_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pv210_pdma0_pdata,
+ },
+};
+
+static struct resource s5pv210_pdma1_resource[] = {
+ [0] = {
+ .start = S5PV210_PA_PDMA1,
+ .end = S5PV210_PA_PDMA1 + SZ_4K,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_PDMA1,
+ .end = IRQ_PDMA1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct s3c_pl330_platdata s5pv210_pdma1_pdata = {
+ .peri = {
+ [0] = DMACH_UART0_RX,
+ [1] = DMACH_UART0_TX,
+ [2] = DMACH_UART1_RX,
+ [3] = DMACH_UART1_TX,
+ [4] = DMACH_UART2_RX,
+ [5] = DMACH_UART2_TX,
+ [6] = DMACH_UART3_RX,
+ [7] = DMACH_UART3_TX,
+ [8] = DMACH_MAX,
+ [9] = DMACH_I2S0_RX,
+ [10] = DMACH_I2S0_TX,
+ [11] = DMACH_I2S0S_TX,
+ [12] = DMACH_I2S1_RX,
+ [13] = DMACH_I2S1_TX,
+ [14] = DMACH_I2S2_RX,
+ [15] = DMACH_I2S2_TX,
+ [16] = DMACH_SPI0_RX,
+ [17] = DMACH_SPI0_TX,
+ [18] = DMACH_SPI1_RX,
+ [19] = DMACH_SPI1_TX,
+ [20] = DMACH_MAX,
+ [21] = DMACH_MAX,
+ [22] = DMACH_PCM0_RX,
+ [23] = DMACH_PCM0_TX,
+ [24] = DMACH_PCM1_RX,
+ [25] = DMACH_PCM1_TX,
+ [26] = DMACH_MSM_REQ0,
+ [27] = DMACH_MSM_REQ1,
+ [28] = DMACH_MSM_REQ2,
+ [29] = DMACH_MSM_REQ3,
+ [30] = DMACH_PCM2_RX,
+ [31] = DMACH_PCM2_TX,
+ },
+};
+
+static struct platform_device s5pv210_device_pdma1 = {
+ .name = "s3c-pl330",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s5pv210_pdma1_resource),
+ .resource = s5pv210_pdma1_resource,
+ .dev = {
+ .dma_mask = &dma_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &s5pv210_pdma1_pdata,
+ },
+};
+
+static struct platform_device *s5pv210_dmacs[] __initdata = {
+ &s5pv210_device_pdma0,
+ &s5pv210_device_pdma1,
+};
+
+static int __init s5pv210_dma_init(void)
+{
+ platform_add_devices(s5pv210_dmacs, ARRAY_SIZE(s5pv210_dmacs));
+
+ return 0;
+}
+arch_initcall(s5pv210_dma_init);
diff --git a/arch/arm/mach-s5pv210/gpiolib.c b/arch/arm/mach-s5pv210/gpiolib.c
new file mode 100644
index 000000000000..9ea8972e023d
--- /dev/null
+++ b/arch/arm/mach-s5pv210/gpiolib.c
@@ -0,0 +1,261 @@
+/* linux/arch/arm/mach-s5pv210/gpiolib.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV210 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+#include <mach/map.h>
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+/* GPIO bank's base address given the index of the bank in the
+ * list of all gpio banks.
+ */
+#define S5PV210_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
+
+/*
+ * Following are the gpio banks in v210.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
+ {
+ .chip = {
+ .base = S5PV210_GPA0(0),
+ .ngpio = S5PV210_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPA1(0),
+ .ngpio = S5PV210_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPB(0),
+ .ngpio = S5PV210_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC0(0),
+ .ngpio = S5PV210_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC1(0),
+ .ngpio = S5PV210_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD0(0),
+ .ngpio = S5PV210_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD1(0),
+ .ngpio = S5PV210_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE0(0),
+ .ngpio = S5PV210_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE1(0),
+ .ngpio = S5PV210_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF0(0),
+ .ngpio = S5PV210_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF1(0),
+ .ngpio = S5PV210_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF2(0),
+ .ngpio = S5PV210_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF3(0),
+ .ngpio = S5PV210_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG0(0),
+ .ngpio = S5PV210_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG1(0),
+ .ngpio = S5PV210_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG2(0),
+ .ngpio = S5PV210_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG3(0),
+ .ngpio = S5PV210_GPIO_G3_NR,
+ .label = "GPG3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPI(0),
+ .ngpio = S5PV210_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ0(0),
+ .ngpio = S5PV210_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ1(0),
+ .ngpio = S5PV210_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ2(0),
+ .ngpio = S5PV210_GPIO_J2_NR,
+ .label = "GPJ2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ3(0),
+ .ngpio = S5PV210_GPIO_J3_NR,
+ .label = "GPJ3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ4(0),
+ .ngpio = S5PV210_GPIO_J4_NR,
+ .label = "GPJ4",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP01(0),
+ .ngpio = S5PV210_GPIO_MP01_NR,
+ .label = "MP01",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP02(0),
+ .ngpio = S5PV210_GPIO_MP02_NR,
+ .label = "MP02",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP03(0),
+ .ngpio = S5PV210_GPIO_MP03_NR,
+ .label = "MP03",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_GPH0(0),
+ .ngpio = S5PV210_GPIO_H0_NR,
+ .label = "GPH0",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_GPH1(0),
+ .ngpio = S5PV210_GPIO_H1_NR,
+ .label = "GPH1",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_GPH2(0),
+ .ngpio = S5PV210_GPIO_H2_NR,
+ .label = "GPH2",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_GPH3(0),
+ .ngpio = S5PV210_GPIO_H3_NR,
+ .label = "GPH3",
+ },
+ },
+};
+
+static __init int s5pv210_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip = s5pv210_gpio_4bit;
+ int nr_chips = ARRAY_SIZE(s5pv210_gpio_4bit);
+ int i = 0;
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL)
+ chip->config = &gpio_cfg;
+ if (chip->base == NULL)
+ chip->base = S5PV210_BANK_BASE(i);
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pv210_gpio_4bit, nr_chips);
+
+ return 0;
+}
+core_initcall(s5pv210_gpiolib_init);
diff --git a/arch/arm/mach-s5pv210/include/mach/dma.h b/arch/arm/mach-s5pv210/include/mach/dma.h
new file mode 100644
index 000000000000..81209eb1409b
--- /dev/null
+++ b/arch/arm/mach-s5pv210/include/mach/dma.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* This platform uses the common S3C DMA API driver for PL330 */
+#include <plat/s3c-dma-pl330.h>
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/gpio.h b/arch/arm/mach-s5pv210/include/mach/gpio.h
index 533b020e21e9..d6461ba2b71d 100644
--- a/arch/arm/mach-s5pv210/include/mach/gpio.h
+++ b/arch/arm/mach-s5pv210/include/mach/gpio.h
@@ -18,6 +18,8 @@
#define gpio_cansleep __gpio_cansleep
#define gpio_to_irq __gpio_to_irq
+/* Practically, GPIO banks upto MP03 are the configurable gpio banks */
+
/* GPIO bank sizes */
#define S5PV210_GPIO_A0_NR (8)
#define S5PV210_GPIO_A1_NR (4)
@@ -47,6 +49,10 @@
#define S5PV210_GPIO_J3_NR (8)
#define S5PV210_GPIO_J4_NR (5)
+#define S5PV210_GPIO_MP01_NR (8)
+#define S5PV210_GPIO_MP02_NR (4)
+#define S5PV210_GPIO_MP03_NR (8)
+
/* GPIO bank numbers */
/* CONFIG_S3C_GPIO_SPACE allows the user to select extra
@@ -85,6 +91,9 @@ enum s5p_gpio_number {
S5PV210_GPIO_J2_START = S5PV210_GPIO_NEXT(S5PV210_GPIO_J1),
S5PV210_GPIO_J3_START = S5PV210_GPIO_NEXT(S5PV210_GPIO_J2),
S5PV210_GPIO_J4_START = S5PV210_GPIO_NEXT(S5PV210_GPIO_J3),
+ S5PV210_GPIO_MP01_START = S5PV210_GPIO_NEXT(S5PV210_GPIO_J4),
+ S5PV210_GPIO_MP02_START = S5PV210_GPIO_NEXT(S5PV210_GPIO_MP01),
+ S5PV210_GPIO_MP03_START = S5PV210_GPIO_NEXT(S5PV210_GPIO_MP02),
};
/* S5PV210 GPIO number definitions */
@@ -115,13 +124,16 @@ enum s5p_gpio_number {
#define S5PV210_GPJ2(_nr) (S5PV210_GPIO_J2_START + (_nr))
#define S5PV210_GPJ3(_nr) (S5PV210_GPIO_J3_START + (_nr))
#define S5PV210_GPJ4(_nr) (S5PV210_GPIO_J4_START + (_nr))
+#define S5PV210_MP01(_nr) (S5PV210_GPIO_MP01_START + (_nr))
+#define S5PV210_MP02(_nr) (S5PV210_GPIO_MP02_START + (_nr))
+#define S5PV210_MP03(_nr) (S5PV210_GPIO_MP03_START + (_nr))
/* the end of the S5PV210 specific gpios */
-#define S5PV210_GPIO_END (S5PV210_GPJ4(S5PV210_GPIO_J4_NR) + 1)
+#define S5PV210_GPIO_END (S5PV210_MP03(S5PV210_GPIO_MP03_NR) + 1)
#define S3C_GPIO_END S5PV210_GPIO_END
-/* define the number of gpios we need to the one after the GPJ4() range */
-#define ARCH_NR_GPIOS (S5PV210_GPJ4(S5PV210_GPIO_J4_NR) + \
+/* define the number of gpios we need to the one after the MP03() range */
+#define ARCH_NR_GPIOS (S5PV210_MP03(S5PV210_GPIO_MP03_NR) + \
CONFIG_SAMSUNG_GPIO_EXTRA + 1)
#include <asm-generic/gpio.h>
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h
index 62c5175ef291..96895378ea27 100644
--- a/arch/arm/mach-s5pv210/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv210/include/mach/irqs.h
@@ -17,22 +17,6 @@
/* VIC0: System, DMA, Timer */
-#define IRQ_EINT0 S5P_IRQ_VIC0(0)
-#define IRQ_EINT1 S5P_IRQ_VIC0(1)
-#define IRQ_EINT2 S5P_IRQ_VIC0(2)
-#define IRQ_EINT3 S5P_IRQ_VIC0(3)
-#define IRQ_EINT4 S5P_IRQ_VIC0(4)
-#define IRQ_EINT5 S5P_IRQ_VIC0(5)
-#define IRQ_EINT6 S5P_IRQ_VIC0(6)
-#define IRQ_EINT7 S5P_IRQ_VIC0(7)
-#define IRQ_EINT8 S5P_IRQ_VIC0(8)
-#define IRQ_EINT9 S5P_IRQ_VIC0(9)
-#define IRQ_EINT10 S5P_IRQ_VIC0(10)
-#define IRQ_EINT11 S5P_IRQ_VIC0(11)
-#define IRQ_EINT12 S5P_IRQ_VIC0(12)
-#define IRQ_EINT13 S5P_IRQ_VIC0(13)
-#define IRQ_EINT14 S5P_IRQ_VIC0(14)
-#define IRQ_EINT15 S5P_IRQ_VIC0(15)
#define IRQ_EINT16_31 S5P_IRQ_VIC0(16)
#define IRQ_BATF S5P_IRQ_VIC0(17)
#define IRQ_MDMA S5P_IRQ_VIC0(18)
@@ -134,13 +118,15 @@
#define IRQ_MDNIE3 S5P_IRQ_VIC3(8)
#define IRQ_VIC_END S5P_IRQ_VIC3(31)
-#define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1)
-
-#define S5P_EINT(x) ((x) + S5P_IRQ_EINT_BASE)
-#define IRQ_EINT(x) S5P_EINT(x)
+#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
+#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
/* Set the default NR_IRQS */
+#define NR_IRQS (IRQ_EINT(31) + 1)
-#define NR_IRQS (IRQ_EINT(31) + 1)
+/* Compatibility */
+#define IRQ_LCD_FIFO IRQ_LCD0
+#define IRQ_LCD_VSYNC IRQ_LCD1
+#define IRQ_LCD_SYSTEM IRQ_LCD2
#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index c22694c8231f..34eb168ec950 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -16,6 +16,9 @@
#include <plat/map-base.h>
#include <plat/map-s5p.h>
+#define S5PC110_PA_ONENAND (0xB0000000)
+#define S5PC110_PA_ONENAND_DMA (0xB0600000)
+
#define S5PV210_PA_CHIPID (0xE0000000)
#define S5P_PA_CHIPID S5PV210_PA_CHIPID
@@ -25,13 +28,21 @@
#define S5PV210_PA_GPIO (0xE0200000)
#define S5P_PA_GPIO S5PV210_PA_GPIO
+/* SPI */
+#define S5PV210_PA_SPI0 0xE1300000
+#define S5PV210_PA_SPI1 0xE1400000
+
#define S5PV210_PA_IIC0 (0xE1800000)
+#define S5PV210_PA_IIC1 (0xFAB00000)
+#define S5PV210_PA_IIC2 (0xE1A00000)
#define S5PV210_PA_TIMER (0xE2500000)
#define S5P_PA_TIMER S5PV210_PA_TIMER
#define S5PV210_PA_SYSTIMER (0xE2600000)
+#define S5PV210_PA_WATCHDOG (0xE2700000)
+
#define S5PV210_PA_UART (0xE2900000)
#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0)
@@ -43,6 +54,14 @@
#define S5PV210_PA_SROMC (0xE8000000)
+#define S5PV210_PA_MDMA 0xFA200000
+#define S5PV210_PA_PDMA0 0xE0900000
+#define S5PV210_PA_PDMA1 0xE0A00000
+
+#define S5PV210_PA_FB (0xF8000000)
+
+#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
+
#define S5PV210_PA_VIC0 (0xF2000000)
#define S5P_PA_VIC0 S5PV210_PA_VIC0
@@ -58,8 +77,32 @@
#define S5PV210_PA_SDRAM (0x20000000)
#define S5P_PA_SDRAM S5PV210_PA_SDRAM
+/* I2S */
+#define S5PV210_PA_IIS0 0xEEE30000
+#define S5PV210_PA_IIS1 0xE2100000
+#define S5PV210_PA_IIS2 0xE2A00000
+
+/* PCM */
+#define S5PV210_PA_PCM0 0xE2300000
+#define S5PV210_PA_PCM1 0xE1200000
+#define S5PV210_PA_PCM2 0xE2B00000
+
+/* AC97 */
+#define S5PV210_PA_AC97 0xE2200000
+
+#define S5PV210_PA_ADC (0xE1700000)
+
/* compatibiltiy defines. */
#define S3C_PA_UART S5PV210_PA_UART
+#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
+#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
+#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
#define S3C_PA_IIC S5PV210_PA_IIC0
+#define S3C_PA_IIC1 S5PV210_PA_IIC1
+#define S3C_PA_IIC2 S5PV210_PA_IIC2
+#define S3C_PA_FB S5PV210_PA_FB
+#define S3C_PA_WDT S5PV210_PA_WATCHDOG
+
+#define SAMSUNG_PA_ADC S5PV210_PA_ADC
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/pwm-clock.h b/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
index 69027fea987a..f8a9f1b330e0 100644
--- a/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
+++ b/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
@@ -1,13 +1,14 @@
/* linux/arch/arm/mach-s5pv210/include/mach/pwm-clock.h
*
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on arch/arm/plat-s3c24xx/include/mach/pwm-clock.h
+ * Based on arch/arm/mach-s3c64xx/include/mach/pwm-clock.h
*
* S5PV210 - pwm clock and timer support
*
@@ -21,14 +22,14 @@
/**
* pwm_cfg_src_is_tclk() - return whether the given mux config is a tclk
- * @cfg: The timer TCFG1 register bits shifted down to 0.
+ * @tcfg: The timer TCFG1 register bits shifted down to 0.
*
* Return true if the given configuration from TCFG1 is a TCLK instead
* any of the TDIV clocks.
*/
static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
{
- return tcfg == S3C2410_TCFG1_MUX_TCLK;
+ return tcfg == S3C64XX_TCFG1_MUX_TCLK;
}
/**
@@ -40,7 +41,7 @@ static inline int pwm_cfg_src_is_tclk(unsigned long tcfg)
*/
static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
{
- return 1 << (1 + tcfg1);
+ return 1 << tcfg1;
}
/**
@@ -50,7 +51,7 @@ static inline unsigned long tcfg_to_divisor(unsigned long tcfg1)
*/
static inline unsigned int pwm_tdiv_has_div1(void)
{
- return 0;
+ return 1;
}
/**
@@ -61,9 +62,9 @@ static inline unsigned int pwm_tdiv_has_div1(void)
*/
static inline unsigned long pwm_tdiv_div_bits(unsigned int div)
{
- return ilog2(div) - 1;
+ return ilog2(div);
}
-#define S3C_TCFG1_MUX_TCLK S3C2410_TCFG1_MUX_TCLK
+#define S3C_TCFG1_MUX_TCLK S3C64XX_TCFG1_MUX_TCLK
#endif /* __ASM_ARCH_PWMCLK_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-clock.h b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
index e56e0e4673ed..2a25ab40c863 100644
--- a/arch/arm/mach-s5pv210/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv210/include/mach/regs-clock.h
@@ -126,6 +126,7 @@
#define S5P_RST_STAT S5P_CLKREG(0xA000)
#define S5P_OSC_CON S5P_CLKREG(0x8000)
+#define S5P_MDNIE_SEL S5P_CLKREG(0x7008)
#define S5P_MIPI_PHY_CON0 S5P_CLKREG(0x7200)
#define S5P_MIPI_PHY_CON1 S5P_CLKREG(0x7204)
#define S5P_MIPI_CONTROL S5P_CLKREG(0xE814)
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-fb.h b/arch/arm/mach-s5pv210/include/mach/regs-fb.h
new file mode 100644
index 000000000000..60d992989bdc
--- /dev/null
+++ b/arch/arm/mach-s5pv210/include/mach/regs-fb.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
+ *
+ * Dummy framebuffer to allow build for the moment.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_MACH_REGS_FB_H
+#define __ASM_ARCH_MACH_REGS_FB_H __FILE__
+
+#include <plat/regs-fb-v4.h>
+
+static inline unsigned int s3c_fb_pal_reg(unsigned int window, int reg)
+{
+ return 0x2400 + (window * 256 *4 ) + reg;
+}
+
+#endif /* __ASM_ARCH_MACH_REGS_FB_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-gpio.h b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
new file mode 100644
index 000000000000..49e029b4978a
--- /dev/null
+++ b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
@@ -0,0 +1,48 @@
+/* linux/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5PV210 - GPIO (including EINT) register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_REGS_GPIO_H
+#define __ASM_ARCH_REGS_GPIO_H __FILE__
+
+#include <mach/map.h>
+
+#define S5PV210_EINT30CON (S5P_VA_GPIO + 0xE00)
+#define S5P_EINT_CON(x) (S5PV210_EINT30CON + ((x) * 0x4))
+
+#define S5PV210_EINT30FLTCON0 (S5P_VA_GPIO + 0xE80)
+#define S5P_EINT_FLTCON(x) (S5PV210_EINT30FLTCON0 + ((x) * 0x4))
+
+#define S5PV210_EINT30MASK (S5P_VA_GPIO + 0xF00)
+#define S5P_EINT_MASK(x) (S5PV210_EINT30MASK + ((x) * 0x4))
+
+#define S5PV210_EINT30PEND (S5P_VA_GPIO + 0xF40)
+#define S5P_EINT_PEND(x) (S5PV210_EINT30PEND + ((x) * 0x4))
+
+#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3)
+
+#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
+
+/* values for S5P_EXTINT0 */
+#define S5P_EXTINT_LOWLEV (0x00)
+#define S5P_EXTINT_HILEV (0x01)
+#define S5P_EXTINT_FALLEDGE (0x02)
+#define S5P_EXTINT_RISEEDGE (0x03)
+#define S5P_EXTINT_BOTHEDGE (0x04)
+
+#define EINT_MODE S3C_GPIO_SFN(0xf)
+
+#define EINT_GPIO_0(x) S5PV210_GPH0(x)
+#define EINT_GPIO_1(x) S5PV210_GPH1(x)
+#define EINT_GPIO_2(x) S5PV210_GPH2(x)
+#define EINT_GPIO_3(x) S5PV210_GPH3(x)
+
+#endif /* __ASM_ARCH_REGS_GPIO_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/spi-clocks.h b/arch/arm/mach-s5pv210/include/mach/spi-clocks.h
new file mode 100644
index 000000000000..02acded5f73d
--- /dev/null
+++ b/arch/arm/mach-s5pv210/include/mach/spi-clocks.h
@@ -0,0 +1,17 @@
+/* linux/arch/arm/mach-s5pv210/include/mach/spi-clocks.h
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __S5PV210_PLAT_SPI_CLKS_H
+#define __S5PV210_PLAT_SPI_CLKS_H __FILE__
+
+#define S5PV210_SPI_SRCCLK_PCLK 0
+#define S5PV210_SPI_SRCCLK_SCLK 1
+
+#endif /* __S5PV210_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
new file mode 100644
index 000000000000..10bc76ec4025
--- /dev/null
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -0,0 +1,149 @@
+/* linux/arch/arm/mach-s5pv210/mach-aquila.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+#include <linux/fb.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-fb.h>
+
+#include <plat/regs-serial.h>
+#include <plat/s5pv210.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/fb.h>
+
+/* Following are default values for UCON, ULCON and UFCON UART registers */
+#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define S5PV210_ULCON_DEFAULT S3C2410_LCON_CS8
+
+#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S5PV210_UFCON_TXTRIG4 | \
+ S5PV210_UFCON_RXTRIG4)
+
+static struct s3c2410_uartcfg smdkv210_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ [3] = {
+ .hwport = 3,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+};
+
+/* Frame Buffer */
+static struct s3c_fb_pd_win aquila_fb_win0 = {
+ .win_mode = {
+ .pixclock = 1000000000000ULL / ((16+16+2+480)*(28+3+2+800)*60),
+ .left_margin = 16,
+ .right_margin = 16,
+ .upper_margin = 3,
+ .lower_margin = 28,
+ .hsync_len = 2,
+ .vsync_len = 2,
+ .xres = 480,
+ .yres = 800,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+};
+
+static struct s3c_fb_pd_win aquila_fb_win1 = {
+ .win_mode = {
+ .pixclock = 1000000000000ULL / ((16+16+2+480)*(28+3+2+800)*60),
+ .left_margin = 16,
+ .right_margin = 16,
+ .upper_margin = 3,
+ .lower_margin = 28,
+ .hsync_len = 2,
+ .vsync_len = 2,
+ .xres = 480,
+ .yres = 800,
+ },
+ .max_bpp = 32,
+ .default_bpp = 16,
+};
+
+static struct s3c_fb_platdata aquila_lcd_pdata __initdata = {
+ .win[0] = &aquila_fb_win0,
+ .win[1] = &aquila_fb_win1,
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
+ VIDCON1_INV_VCLK | VIDCON1_INV_VDEN,
+ .setup_gpio = s5pv210_fb_gpio_setup_24bpp,
+};
+
+static struct platform_device *aquila_devices[] __initdata = {
+ &s3c_device_fb,
+};
+
+static void __init aquila_map_io(void)
+{
+ s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s3c24xx_init_clocks(24000000);
+ s3c24xx_init_uarts(smdkv210_uartcfgs, ARRAY_SIZE(smdkv210_uartcfgs));
+}
+
+static void __init aquila_machine_init(void)
+{
+ /* FB */
+ s3c_fb_set_platdata(&aquila_lcd_pdata);
+
+ platform_add_devices(aquila_devices, ARRAY_SIZE(aquila_devices));
+}
+
+MACHINE_START(AQUILA, "Aquila")
+ /* Maintainers:
+ Marek Szyprowski <m.szyprowski@samsung.com>
+ Kyungmin Park <kyungmin.park@samsung.com> */
+ .phys_io = S3C_PA_UART & 0xfff00000,
+ .io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S5P_PA_SDRAM + 0x100,
+ .init_irq = s5pv210_init_irq,
+ .map_io = aquila_map_io,
+ .init_machine = aquila_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
new file mode 100644
index 000000000000..4863b13824e4
--- /dev/null
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -0,0 +1,98 @@
+/* linux/arch/arm/mach-s5pv210/mach-goni.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/serial_core.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+
+#include <plat/regs-serial.h>
+#include <plat/s5pv210.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+/* Following are default values for UCON, ULCON and UFCON UART registers */
+#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define S5PV210_ULCON_DEFAULT S3C2410_LCON_CS8
+
+#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S5PV210_UFCON_TXTRIG4 | \
+ S5PV210_UFCON_RXTRIG4)
+
+static struct s3c2410_uartcfg goni_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ [3] = {
+ .hwport = 3,
+ .flags = 0,
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ulcon = S5PV210_ULCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+};
+
+static struct platform_device *goni_devices[] __initdata = {
+};
+
+static void __init goni_map_io(void)
+{
+ s5p_init_io(NULL, 0, S5P_VA_CHIPID);
+ s3c24xx_init_clocks(24000000);
+ s3c24xx_init_uarts(goni_uartcfgs, ARRAY_SIZE(goni_uartcfgs));
+}
+
+static void __init goni_machine_init(void)
+{
+ platform_add_devices(goni_devices, ARRAY_SIZE(goni_devices));
+}
+
+MACHINE_START(GONI, "GONI")
+ /* Maintainers: Kyungmin Park <kyungmin.park@samsung.com> */
+ .phys_io = S3C_PA_UART & 0xfff00000,
+ .io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S5P_PA_SDRAM + 0x100,
+ .init_irq = s5pv210_init_irq,
+ .map_io = goni_map_io,
+ .init_machine = goni_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index ab4869df30c0..4c8903c6d104 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -72,6 +72,9 @@ static struct s3c2410_uartcfg smdkv210_uartcfgs[] __initdata = {
};
static struct platform_device *smdkc110_devices[] __initdata = {
+ &s5pv210_device_iis0,
+ &s5pv210_device_ac97,
+ &s3c_device_wdt,
};
static void __init smdkc110_map_io(void)
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index a27883253204..0d4627948040 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -25,6 +25,8 @@
#include <plat/s5pv210.h>
#include <plat/devs.h>
#include <plat/cpu.h>
+#include <plat/adc.h>
+#include <plat/ts.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -72,6 +74,17 @@ static struct s3c2410_uartcfg smdkv210_uartcfgs[] __initdata = {
};
static struct platform_device *smdkv210_devices[] __initdata = {
+ &s5pv210_device_iis0,
+ &s5pv210_device_ac97,
+ &s3c_device_adc,
+ &s3c_device_ts,
+ &s3c_device_wdt,
+};
+
+static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
+ .delay = 10000,
+ .presc = 49,
+ .oversampling_shift = 2,
};
static void __init smdkv210_map_io(void)
@@ -83,6 +96,7 @@ static void __init smdkv210_map_io(void)
static void __init smdkv210_machine_init(void)
{
+ s3c24xx_ts_set_platdata(&s3c_ts_platform);
platform_add_devices(smdkv210_devices, ARRAY_SIZE(smdkv210_devices));
}
diff --git a/arch/arm/mach-s5pv210/setup-fb-24bpp.c b/arch/arm/mach-s5pv210/setup-fb-24bpp.c
new file mode 100644
index 000000000000..a50cbac8720d
--- /dev/null
+++ b/arch/arm/mach-s5pv210/setup-fb-24bpp.c
@@ -0,0 +1,62 @@
+/* linux/arch/arm/plat-s5pv210/setup-fb-24bpp.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Base s5pv210 setup information for 24bpp LCD framebuffer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fb.h>
+
+#include <mach/regs-fb.h>
+#include <mach/gpio.h>
+#include <mach/map.h>
+#include <plat/fb.h>
+#include <mach/regs-clock.h>
+#include <plat/gpio-cfg.h>
+
+void s5pv210_fb_gpio_setup_24bpp(void)
+{
+ unsigned int gpio = 0;
+
+ for (gpio = S5PV210_GPF0(0); gpio <= S5PV210_GPF0(7); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ for (gpio = S5PV210_GPF1(0); gpio <= S5PV210_GPF1(7); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ for (gpio = S5PV210_GPF2(0); gpio <= S5PV210_GPF2(7); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ for (gpio = S5PV210_GPF3(0); gpio <= S5PV210_GPF3(3); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV4);
+ }
+
+ /* Set DISPLAY_CONTROL register for Display path selection.
+ *
+ * ouput | RGB | I80 | ITU
+ * -----------------------------------
+ * 00 | MIE | FIMD | FIMD
+ * 01 | MDNIE | MDNIE | FIMD
+ * 10 | FIMD | FIMD | FIMD
+ * 11 | FIMD | FIMD | FIMD
+ */
+ writel(0x2, S5P_MDNIE_SEL);
+}
diff --git a/arch/arm/mach-s5pv210/setup-i2c0.c b/arch/arm/mach-s5pv210/setup-i2c0.c
new file mode 100644
index 000000000000..c718253c70b8
--- /dev/null
+++ b/arch/arm/mach-s5pv210/setup-i2c0.c
@@ -0,0 +1,30 @@
+/* linux/arch/arm/mach-s5pv210/setup-i2c0.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * I2C0 GPIO configuration.
+ *
+ * Based on plat-s3c64xx/setup-i2c0.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct platform_device; /* don't need the contents */
+
+#include <mach/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c0_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(S5PV210_GPD1(0), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPD1(0), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5PV210_GPD1(1), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPD1(1), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv210/setup-i2c1.c b/arch/arm/mach-s5pv210/setup-i2c1.c
new file mode 100644
index 000000000000..45e0e6ed2ed0
--- /dev/null
+++ b/arch/arm/mach-s5pv210/setup-i2c1.c
@@ -0,0 +1,30 @@
+/* linux/arch/arm/mach-s5pv210/setup-i2c1.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * I2C1 GPIO configuration.
+ *
+ * Based on plat-s3c64xx/setup-i2c1.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct platform_device; /* don't need the contents */
+
+#include <mach/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c1_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(S5PV210_GPD1(2), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPD1(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5PV210_GPD1(3), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPD1(3), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv210/setup-i2c2.c b/arch/arm/mach-s5pv210/setup-i2c2.c
new file mode 100644
index 000000000000..b11b4bff69ac
--- /dev/null
+++ b/arch/arm/mach-s5pv210/setup-i2c2.c
@@ -0,0 +1,30 @@
+/* linux/arch/arm/mach-s5pv210/setup-i2c2.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * I2C2 GPIO configuration.
+ *
+ * Based on plat-s3c64xx/setup-i2c0.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct platform_device; /* don't need the contents */
+
+#include <mach/gpio.h>
+#include <plat/iic.h>
+#include <plat/gpio-cfg.h>
+
+void s3c_i2c2_cfg_gpio(struct platform_device *dev)
+{
+ s3c_gpio_cfgpin(S5PV210_GPD1(4), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPD1(4), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5PV210_GPD1(5), S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(S5PV210_GPD1(5), S3C_GPIO_PULL_UP);
+}
diff --git a/arch/arm/mach-s5pv210/setup-sdhci-gpio.c b/arch/arm/mach-s5pv210/setup-sdhci-gpio.c
new file mode 100644
index 000000000000..fe7d86dad14c
--- /dev/null
+++ b/arch/arm/mach-s5pv210/setup-sdhci-gpio.c
@@ -0,0 +1,104 @@
+/* linux/arch/arm/plat-s5pc1xx/setup-sdhci-gpio.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV210 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include <mach/gpio.h>
+#include <plat/gpio-cfg.h>
+#include <plat/regs-sdhci.h>
+
+void s5pv210_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
+{
+ unsigned int gpio;
+
+ /* Set all the necessary GPG0/GPG1 pins to special-function 2 */
+ for (gpio = S5PV210_GPG0(0); gpio < S5PV210_GPG0(2); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+ switch (width) {
+ case 8:
+ /* GPG1[3:6] special-funtion 3 */
+ for (gpio = S5PV210_GPG1(3); gpio <= S5PV210_GPG1(6); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+ case 4:
+ /* GPG0[3:6] special-funtion 2 */
+ for (gpio = S5PV210_GPG0(3); gpio <= S5PV210_GPG0(6); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+ default:
+ break;
+ }
+
+ s3c_gpio_setpull(S5PV210_GPG0(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5PV210_GPG0(2), S3C_GPIO_SFN(2));
+}
+
+void s5pv210_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
+{
+ unsigned int gpio;
+
+ /* Set all the necessary GPG1[0:1] pins to special-function 2 */
+ for (gpio = S5PV210_GPG1(0); gpio < S5PV210_GPG1(2); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+
+ /* Data pin GPG1[3:6] to special-function 2 */
+ for (gpio = S5PV210_GPG1(3); gpio <= S5PV210_GPG1(6); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+
+ s3c_gpio_setpull(S5PV210_GPG1(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5PV210_GPG1(2), S3C_GPIO_SFN(2));
+}
+
+void s5pv210_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
+{
+ unsigned int gpio;
+
+ /* Set all the necessary GPG2[0:1] pins to special-function 2 */
+ for (gpio = S5PV210_GPG2(0); gpio < S5PV210_GPG2(2); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+
+ switch (width) {
+ case 8:
+ /* Data pin GPG3[3:6] to special-function 3 */
+ for (gpio = S5PV210_GPG3(3); gpio <= S5PV210_GPG3(6); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+ case 4:
+ /* Data pin GPG2[3:6] to special-function 2 */
+ for (gpio = S5PV210_GPG2(3); gpio <= S5PV210_GPG2(6); gpio++) {
+ s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ }
+ default:
+ break;
+ }
+
+ s3c_gpio_setpull(S5PV210_GPG2(2), S3C_GPIO_PULL_UP);
+ s3c_gpio_cfgpin(S5PV210_GPG2(2), S3C_GPIO_SFN(2));
+}
diff --git a/arch/arm/mach-s5pv210/setup-sdhci.c b/arch/arm/mach-s5pv210/setup-sdhci.c
new file mode 100644
index 000000000000..51815ec60c2a
--- /dev/null
+++ b/arch/arm/mach-s5pv210/setup-sdhci.c
@@ -0,0 +1,63 @@
+/* linux/arch/arm/mach-s5pv210/setup-sdhci.c
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV210 - Helper functions for settign up SDHCI device(s) (HSMMC)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <plat/regs-sdhci.h>
+#include <plat/sdhci.h>
+
+/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
+
+char *s5pv210_hsmmc_clksrcs[4] = {
+ [0] = "hsmmc", /* HCLK */
+ [1] = "hsmmc", /* HCLK */
+ [2] = "sclk_mmc", /* mmc_bus */
+ /*[4] = reserved */
+};
+
+void s5pv210_setup_sdhci_cfg_card(struct platform_device *dev,
+ void __iomem *r,
+ struct mmc_ios *ios,
+ struct mmc_card *card)
+{
+ u32 ctrl2, ctrl3;
+
+ /* don't need to alter anything acording to card-type */
+
+ writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA, r + S3C64XX_SDHCI_CONTROL4);
+
+ ctrl2 = readl(r + S3C_SDHCI_CONTROL2);
+ ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ ctrl2 |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
+ S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
+ S3C_SDHCI_CTRL2_ENFBCLKRX |
+ S3C_SDHCI_CTRL2_DFCNT_NONE |
+ S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
+
+ if (ios->clock < 25 * 1000000)
+ ctrl3 = (S3C_SDHCI_CTRL3_FCSEL3 |
+ S3C_SDHCI_CTRL3_FCSEL2 |
+ S3C_SDHCI_CTRL3_FCSEL1 |
+ S3C_SDHCI_CTRL3_FCSEL0);
+ else
+ ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
+
+ writel(ctrl2, r + S3C_SDHCI_CONTROL2);
+ writel(ctrl3, r + S3C_SDHCI_CONTROL3);
+}
diff --git a/arch/arm/mach-sa1100/leds.c b/arch/arm/mach-sa1100/leds.c
index 4cf7c565aaed..bbfe197fb4d6 100644
--- a/arch/arm/mach-sa1100/leds.c
+++ b/arch/arm/mach-sa1100/leds.c
@@ -2,7 +2,7 @@
* linux/arch/arm/mach-sa1100/leds.c
*
* SA1100 LEDs dispatcher
- *
+ *
* Copyright (C) 2001 Nicolas Pitre
*/
#include <linux/compiler.h>
@@ -18,10 +18,10 @@ sa1100_leds_init(void)
{
if (machine_is_assabet())
leds_event = assabet_leds_event;
- if (machine_is_consus())
- leds_event = consus_leds_event;
+ if (machine_is_consus())
+ leds_event = consus_leds_event;
if (machine_is_badge4())
- leds_event = badge4_leds_event;
+ leds_event = badge4_leds_event;
if (machine_is_brutus())
leds_event = brutus_leds_event;
if (machine_is_cerf())
diff --git a/arch/arm/mach-shark/pci.c b/arch/arm/mach-shark/pci.c
index 37a7112d4117..89d175ce74d2 100644
--- a/arch/arm/mach-shark/pci.c
+++ b/arch/arm/mach-shark/pci.c
@@ -16,16 +16,19 @@
static int __init shark_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->bus->number == 0)
- if (dev->devfn == 0) return 255;
- else return 11;
- else return 255;
+ if (dev->devfn == 0)
+ return 255;
+ else
+ return 11;
+ else
+ return 255;
}
extern void __init via82c505_preinit(void);
static struct hw_pci shark_pci __initdata = {
.setup = via82c505_setup,
- .swizzle = pci_std_swizzle,
+ .swizzle = pci_std_swizzle,
.map_irq = shark_map_irq,
.nr_controllers = 1,
.scan = via82c505_scan_bus,
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index aeceb9b92aeb..f2b88c5fe142 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -7,7 +7,6 @@ config ARCH_SH7367
select CPU_V6
select HAVE_CLK
select COMMON_CLKDEV
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
config ARCH_SH7377
@@ -15,7 +14,6 @@ config ARCH_SH7377
select CPU_V7
select HAVE_CLK
select COMMON_CLKDEV
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
config ARCH_SH7372
@@ -23,7 +21,6 @@ config ARCH_SH7372
select CPU_V7
select HAVE_CLK
select COMMON_CLKDEV
- select GENERIC_TIME
select GENERIC_CLOCKEVENTS
comment "SH-Mobile Board Type"
diff --git a/arch/arm/mach-spear3xx/Kconfig b/arch/arm/mach-spear3xx/Kconfig
new file mode 100644
index 000000000000..20d1317cc486
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig
@@ -0,0 +1,33 @@
+#
+# SPEAr3XX Machine configuration file
+#
+
+if ARCH_SPEAR3XX
+
+choice
+ prompt "SPEAr3XX Family"
+ default MACH_SPEAR300
+
+config MACH_SPEAR300
+ bool "SPEAr300"
+ help
+ Supports ST SPEAr300 Machine
+
+config MACH_SPEAR310
+ bool "SPEAr310"
+ help
+ Supports ST SPEAr310 Machine
+
+config MACH_SPEAR320
+ bool "SPEAr320"
+ help
+ Supports ST SPEAr320 Machine
+
+endchoice
+
+# Adding SPEAr3XX machine specific configuration files
+source "arch/arm/mach-spear3xx/Kconfig300"
+source "arch/arm/mach-spear3xx/Kconfig310"
+source "arch/arm/mach-spear3xx/Kconfig320"
+
+endif #ARCH_SPEAR3XX
diff --git a/arch/arm/mach-spear3xx/Kconfig300 b/arch/arm/mach-spear3xx/Kconfig300
new file mode 100644
index 000000000000..c519a05b4ab4
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig300
@@ -0,0 +1,17 @@
+#
+# SPEAr300 machine configuration file
+#
+
+if MACH_SPEAR300
+
+choice
+ prompt "SPEAr300 Boards"
+ default BOARD_SPEAR300_EVB
+
+config BOARD_SPEAR300_EVB
+ bool "SPEAr300 Evaluation Board"
+ help
+ Supports ST SPEAr300 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR300
diff --git a/arch/arm/mach-spear3xx/Kconfig310 b/arch/arm/mach-spear3xx/Kconfig310
new file mode 100644
index 000000000000..60e7442d75bd
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig310
@@ -0,0 +1,17 @@
+#
+# SPEAr310 machine configuration file
+#
+
+if MACH_SPEAR310
+
+choice
+ prompt "SPEAr310 Boards"
+ default BOARD_SPEAR310_EVB
+
+config BOARD_SPEAR310_EVB
+ bool "SPEAr310 Evaluation Board"
+ help
+ Supports ST SPEAr310 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR310
diff --git a/arch/arm/mach-spear3xx/Kconfig320 b/arch/arm/mach-spear3xx/Kconfig320
new file mode 100644
index 000000000000..1c1d438399b8
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Kconfig320
@@ -0,0 +1,17 @@
+#
+# SPEAr320 machine configuration file
+#
+
+if MACH_SPEAR320
+
+choice
+ prompt "SPEAr320 Boards"
+ default BOARD_SPEAR320_EVB
+
+config BOARD_SPEAR320_EVB
+ bool "SPEAr320 Evaluation Board"
+ help
+ Supports ST SPEAr320 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR320
diff --git a/arch/arm/mach-spear3xx/Makefile b/arch/arm/mach-spear3xx/Makefile
new file mode 100644
index 000000000000..b24862489704
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for SPEAr3XX machine series
+#
+
+# common files
+obj-y += spear3xx.o clock.o
+
+# spear300 specific files
+obj-$(CONFIG_MACH_SPEAR300) += spear300.o
+
+# spear300 boards files
+obj-$(CONFIG_BOARD_SPEAR300_EVB) += spear300_evb.o
+
+
+# spear310 specific files
+obj-$(CONFIG_MACH_SPEAR310) += spear310.o
+
+# spear310 boards files
+obj-$(CONFIG_BOARD_SPEAR310_EVB) += spear310_evb.o
+
+
+# spear320 specific files
+obj-$(CONFIG_MACH_SPEAR320) += spear320.o
+
+# spear320 boards files
+obj-$(CONFIG_BOARD_SPEAR320_EVB) += spear320_evb.o
diff --git a/arch/arm/mach-spear3xx/Makefile.boot b/arch/arm/mach-spear3xx/Makefile.boot
new file mode 100644
index 000000000000..7a1f3c0eadb8
--- /dev/null
+++ b/arch/arm/mach-spear3xx/Makefile.boot
@@ -0,0 +1,3 @@
+zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
new file mode 100644
index 000000000000..39f6ccf22294
--- /dev/null
+++ b/arch/arm/mach-spear3xx/clock.c
@@ -0,0 +1,389 @@
+/*
+ * arch/arm/mach-spear3xx/clock.c
+ *
+ * SPEAr3xx machines clock framework source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <mach/misc_regs.h>
+#include <plat/clock.h>
+
+/* root clks */
+/* 32 KHz oscillator clock */
+static struct clk osc_32k_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 32000,
+};
+
+/* 24 MHz oscillator clock */
+static struct clk osc_24m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 24000000,
+};
+
+/* clock derived from 32 KHz osc clk */
+/* rtc clock */
+static struct clk rtc_clk = {
+ .pclk = &osc_32k_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = RTC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from 24 MHz osc clk */
+/* pll1 configuration structure */
+static struct pll_clk_config pll1_config = {
+ .mode_reg = PLL1_CTR,
+ .cfg_reg = PLL1_FRQ,
+};
+
+/* PLL1 clock */
+static struct clk pll1_clk = {
+ .pclk = &osc_24m_clk,
+ .en_reg = PLL1_CTR,
+ .en_reg_bit = PLL_ENABLE,
+ .recalc = &pll1_clk_recalc,
+ .private_data = &pll1_config,
+};
+
+/* PLL3 48 MHz clock */
+static struct clk pll3_48m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_24m_clk,
+ .rate = 48000000,
+};
+
+/* watch dog timer clock */
+static struct clk wdt_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_24m_clk,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from pll1 clk */
+/* cpu clock */
+static struct clk cpu_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &follow_parent,
+};
+
+/* ahb configuration structure */
+static struct bus_clk_config ahb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = PLL_HCLK_RATIO_MASK,
+ .shift = PLL_HCLK_RATIO_SHIFT,
+};
+
+/* ahb clock */
+static struct clk ahb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &ahb_config,
+};
+
+/* uart configurations */
+static struct aux_clk_config uart_config = {
+ .synth_reg = UART_CLK_SYNT,
+};
+
+/* uart parents */
+static struct pclk_info uart_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* uart parent select structure */
+static struct pclk_sel uart_pclk_sel = {
+ .pclk_info = uart_pclk_info,
+ .pclk_count = ARRAY_SIZE(uart_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = UART_CLK_MASK,
+};
+
+/* uart clock */
+static struct clk uart_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = UART_CLK_ENB,
+ .pclk_sel = &uart_pclk_sel,
+ .pclk_sel_shift = UART_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &uart_config,
+};
+
+/* firda configurations */
+static struct aux_clk_config firda_config = {
+ .synth_reg = FIRDA_CLK_SYNT,
+};
+
+/* firda parents */
+static struct pclk_info firda_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* firda parent select structure */
+static struct pclk_sel firda_pclk_sel = {
+ .pclk_info = firda_pclk_info,
+ .pclk_count = ARRAY_SIZE(firda_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = FIRDA_CLK_MASK,
+};
+
+/* firda clock */
+static struct clk firda_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = FIRDA_CLK_ENB,
+ .pclk_sel = &firda_pclk_sel,
+ .pclk_sel_shift = FIRDA_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &firda_config,
+};
+
+/* gpt parents */
+static struct pclk_info gpt_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt_pclk_sel = {
+ .pclk_info = gpt_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
+};
+
+/* gpt0 configurations */
+static struct aux_clk_config gpt0_config = {
+ .synth_reg = PRSC1_CLK_CFG,
+};
+
+/* gpt0 timer clock */
+static struct clk gpt0_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT0_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt0_config,
+};
+
+/* gpt1 configurations */
+static struct aux_clk_config gpt1_config = {
+ .synth_reg = PRSC2_CLK_CFG,
+};
+
+/* gpt1 timer clock */
+static struct clk gpt1_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT1_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT1_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt1_config,
+};
+
+/* gpt2 configurations */
+static struct aux_clk_config gpt2_config = {
+ .synth_reg = PRSC3_CLK_CFG,
+};
+
+/* gpt2 timer clock */
+static struct clk gpt2_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT2_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT2_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt2_config,
+};
+
+/* clock derived from pll3 clk */
+/* usbh clock */
+static struct clk usbh_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBH_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* usbd clock */
+static struct clk usbd_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBD_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clcd clock */
+static struct clk clcd_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll3_48m_clk,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from ahb clk */
+/* apb configuration structure */
+static struct bus_clk_config apb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = HCLK_PCLK_RATIO_MASK,
+ .shift = HCLK_PCLK_RATIO_SHIFT,
+};
+
+/* apb clock */
+static struct clk apb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &apb_config,
+};
+
+/* i2c clock */
+static struct clk i2c_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = I2C_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* dma clock */
+static struct clk dma_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = DMA_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* jpeg clock */
+static struct clk jpeg_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = JPEG_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gmac clock */
+static struct clk gmac_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GMAC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* smi clock */
+static struct clk smi_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SMI_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* c3 clock */
+static struct clk c3_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = C3_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from apb clk */
+/* adc clock */
+static struct clk adc_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = ADC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp clock */
+static struct clk ssp_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gpio clock */
+static struct clk gpio_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPIO_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* array of all spear 3xx clock lookups */
+static struct clk_lookup spear_clk_lookups[] = {
+ /* root clks */
+ { .con_id = "osc_32k_clk", .clk = &osc_32k_clk},
+ { .con_id = "osc_24m_clk", .clk = &osc_24m_clk},
+ /* clock derived from 32 KHz osc clk */
+ { .dev_id = "rtc", .clk = &rtc_clk},
+ /* clock derived from 24 MHz osc clk */
+ { .con_id = "pll1_clk", .clk = &pll1_clk},
+ { .con_id = "pll3_48m_clk", .clk = &pll3_48m_clk},
+ { .dev_id = "wdt", .clk = &wdt_clk},
+ /* clock derived from pll1 clk */
+ { .con_id = "cpu_clk", .clk = &cpu_clk},
+ { .con_id = "ahb_clk", .clk = &ahb_clk},
+ { .dev_id = "uart", .clk = &uart_clk},
+ { .dev_id = "firda", .clk = &firda_clk},
+ { .dev_id = "gpt0", .clk = &gpt0_clk},
+ { .dev_id = "gpt1", .clk = &gpt1_clk},
+ { .dev_id = "gpt2", .clk = &gpt2_clk},
+ /* clock derived from pll3 clk */
+ { .dev_id = "usbh", .clk = &usbh_clk},
+ { .dev_id = "usbd", .clk = &usbd_clk},
+ { .dev_id = "clcd", .clk = &clcd_clk},
+ /* clock derived from ahb clk */
+ { .con_id = "apb_clk", .clk = &apb_clk},
+ { .dev_id = "i2c", .clk = &i2c_clk},
+ { .dev_id = "dma", .clk = &dma_clk},
+ { .dev_id = "jpeg", .clk = &jpeg_clk},
+ { .dev_id = "gmac", .clk = &gmac_clk},
+ { .dev_id = "smi", .clk = &smi_clk},
+ { .dev_id = "c3", .clk = &c3_clk},
+ /* clock derived from apb clk */
+ { .dev_id = "adc", .clk = &adc_clk},
+ { .dev_id = "ssp", .clk = &ssp_clk},
+ { .dev_id = "gpio", .clk = &gpio_clk},
+};
+
+void __init clk_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
+ clk_register(&spear_clk_lookups[i]);
+
+ recalc_root_clocks();
+}
diff --git a/arch/arm/mach-spear3xx/include/mach/clkdev.h b/arch/arm/mach-spear3xx/include/mach/clkdev.h
new file mode 100644
index 000000000000..a3d07339d9f1
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/clkdev.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/clkdev.h
+ *
+ * Clock Dev framework definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_CLKDEV_H
+#define __MACH_CLKDEV_H
+
+#include <plat/clkdev.h>
+
+#endif /* __MACH_CLKDEV_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/debug-macro.S b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..590519f10d6e
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header spear3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear3xx/include/mach/entry-macro.S b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
new file mode 100644
index 000000000000..947625d6b48d
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/entry-macro.S
@@ -0,0 +1,46 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/entry-macro.S
+ *
+ * Low-level IRQ helper macros for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <mach/hardware.h>
+#include <mach/spear.h>
+#include <asm/hardware/vic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \base, =VA_SPEAR3XX_ML1_VIC_BASE
+ ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+ teq \irqstat, #0
+ beq 1001f @ this will set/reset
+ @ zero register
+ /*
+ * Following code will find bit position of least significang
+ * bit set in irqstat, using following equation
+ * least significant bit set in n = (n & ~(n-1))
+ */
+ sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
+ mvn \tmp, \tmp @ tmp = ~tmp
+ and \irqstat, \irqstat, \tmp @ irqstat &= tmp
+ /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
+ clz \tmp, \irqstat @ tmp = leading zeros
+ rsb \irqnr, \tmp, #0x1F @ irqnr = 32 - tmp - 1
+
+1001: /* EQ will be set if no irqs pending */
+ .endm
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
new file mode 100644
index 000000000000..af7e02c909a3
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -0,0 +1,205 @@
+/*
+ * arch/arm/mach-spear3xx/generic.h
+ *
+ * SPEAr3XX machine family generic header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <asm/mach/time.h>
+#include <asm/mach/map.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <plat/padmux.h>
+
+/* spear3xx declarations */
+/*
+ * Each GPT has 2 timer channels
+ * Following GPT channels will be used as clock source and clockevent
+ */
+#define SPEAR_GPT0_BASE SPEAR3XX_ML1_TMR_BASE
+#define SPEAR_GPT0_CHAN0_IRQ IRQ_CPU_GPT1_1
+#define SPEAR_GPT0_CHAN1_IRQ IRQ_CPU_GPT1_2
+
+/* Add spear3xx family device structure declarations here */
+extern struct amba_device gpio_device;
+extern struct amba_device uart_device;
+extern struct sys_timer spear_sys_timer;
+
+/* Add spear3xx family function declarations here */
+void __init clk_init(void);
+void __init spear3xx_map_io(void);
+void __init spear3xx_init_irq(void);
+void __init spear3xx_init(void);
+void spear_pmx_init(struct pmx_driver *pmx_driver, uint base, uint size);
+
+/* pad mux declarations */
+#define PMX_FIRDA_MASK (1 << 14)
+#define PMX_I2C_MASK (1 << 13)
+#define PMX_SSP_CS_MASK (1 << 12)
+#define PMX_SSP_MASK (1 << 11)
+#define PMX_MII_MASK (1 << 10)
+#define PMX_GPIO_PIN0_MASK (1 << 9)
+#define PMX_GPIO_PIN1_MASK (1 << 8)
+#define PMX_GPIO_PIN2_MASK (1 << 7)
+#define PMX_GPIO_PIN3_MASK (1 << 6)
+#define PMX_GPIO_PIN4_MASK (1 << 5)
+#define PMX_GPIO_PIN5_MASK (1 << 4)
+#define PMX_UART0_MODEM_MASK (1 << 3)
+#define PMX_UART0_MASK (1 << 2)
+#define PMX_TIMER_3_4_MASK (1 << 1)
+#define PMX_TIMER_1_2_MASK (1 << 0)
+
+/* pad mux devices */
+extern struct pmx_dev pmx_firda;
+extern struct pmx_dev pmx_i2c;
+extern struct pmx_dev pmx_ssp_cs;
+extern struct pmx_dev pmx_ssp;
+extern struct pmx_dev pmx_mii;
+extern struct pmx_dev pmx_gpio_pin0;
+extern struct pmx_dev pmx_gpio_pin1;
+extern struct pmx_dev pmx_gpio_pin2;
+extern struct pmx_dev pmx_gpio_pin3;
+extern struct pmx_dev pmx_gpio_pin4;
+extern struct pmx_dev pmx_gpio_pin5;
+extern struct pmx_dev pmx_uart0_modem;
+extern struct pmx_dev pmx_uart0;
+extern struct pmx_dev pmx_timer_3_4;
+extern struct pmx_dev pmx_timer_1_2;
+
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+/* padmux plgpio devices */
+extern struct pmx_dev pmx_plgpio_0_1;
+extern struct pmx_dev pmx_plgpio_2_3;
+extern struct pmx_dev pmx_plgpio_4_5;
+extern struct pmx_dev pmx_plgpio_6_9;
+extern struct pmx_dev pmx_plgpio_10_27;
+extern struct pmx_dev pmx_plgpio_28;
+extern struct pmx_dev pmx_plgpio_29;
+extern struct pmx_dev pmx_plgpio_30;
+extern struct pmx_dev pmx_plgpio_31;
+extern struct pmx_dev pmx_plgpio_32;
+extern struct pmx_dev pmx_plgpio_33;
+extern struct pmx_dev pmx_plgpio_34_36;
+extern struct pmx_dev pmx_plgpio_37_42;
+extern struct pmx_dev pmx_plgpio_43_44_47_48;
+extern struct pmx_dev pmx_plgpio_45_46_49_50;
+#endif
+
+extern struct pmx_driver pmx_driver;
+
+/* spear300 declarations */
+#ifdef CONFIG_MACH_SPEAR300
+/* Add spear300 machine device structure declarations here */
+extern struct amba_device gpio1_device;
+
+/* pad mux modes */
+extern struct pmx_mode nand_mode;
+extern struct pmx_mode nor_mode;
+extern struct pmx_mode photo_frame_mode;
+extern struct pmx_mode lend_ip_phone_mode;
+extern struct pmx_mode hend_ip_phone_mode;
+extern struct pmx_mode lend_wifi_phone_mode;
+extern struct pmx_mode hend_wifi_phone_mode;
+extern struct pmx_mode ata_pabx_wi2s_mode;
+extern struct pmx_mode ata_pabx_i2s_mode;
+extern struct pmx_mode caml_lcdw_mode;
+extern struct pmx_mode camu_lcd_mode;
+extern struct pmx_mode camu_wlcd_mode;
+extern struct pmx_mode caml_lcd_mode;
+
+/* pad mux devices */
+extern struct pmx_dev pmx_fsmc_2_chips;
+extern struct pmx_dev pmx_fsmc_4_chips;
+extern struct pmx_dev pmx_keyboard;
+extern struct pmx_dev pmx_clcd;
+extern struct pmx_dev pmx_telecom_gpio;
+extern struct pmx_dev pmx_telecom_tdm;
+extern struct pmx_dev pmx_telecom_spi_cs_i2c_clk;
+extern struct pmx_dev pmx_telecom_camera;
+extern struct pmx_dev pmx_telecom_dac;
+extern struct pmx_dev pmx_telecom_i2s;
+extern struct pmx_dev pmx_telecom_boot_pins;
+extern struct pmx_dev pmx_telecom_sdio_4bit;
+extern struct pmx_dev pmx_telecom_sdio_8bit;
+extern struct pmx_dev pmx_gpio1;
+
+void spear300_pmx_init(void);
+
+/* Add spear300 machine function declarations here */
+void __init spear300_init(void);
+
+#endif /* CONFIG_MACH_SPEAR300 */
+
+/* spear310 declarations */
+#ifdef CONFIG_MACH_SPEAR310
+/* Add spear310 machine device structure declarations here */
+
+/* pad mux devices */
+extern struct pmx_dev pmx_emi_cs_0_1_4_5;
+extern struct pmx_dev pmx_emi_cs_2_3;
+extern struct pmx_dev pmx_uart1;
+extern struct pmx_dev pmx_uart2;
+extern struct pmx_dev pmx_uart3_4_5;
+extern struct pmx_dev pmx_fsmc;
+extern struct pmx_dev pmx_rs485_0_1;
+extern struct pmx_dev pmx_tdm0;
+
+void spear310_pmx_init(void);
+
+/* Add spear310 machine function declarations here */
+void __init spear310_init(void);
+
+#endif /* CONFIG_MACH_SPEAR310 */
+
+/* spear320 declarations */
+#ifdef CONFIG_MACH_SPEAR320
+/* Add spear320 machine device structure declarations here */
+
+/* pad mux modes */
+extern struct pmx_mode auto_net_smii_mode;
+extern struct pmx_mode auto_net_mii_mode;
+extern struct pmx_mode auto_exp_mode;
+extern struct pmx_mode small_printers_mode;
+
+/* pad mux devices */
+extern struct pmx_dev pmx_clcd;
+extern struct pmx_dev pmx_emi;
+extern struct pmx_dev pmx_fsmc;
+extern struct pmx_dev pmx_spp;
+extern struct pmx_dev pmx_sdio;
+extern struct pmx_dev pmx_i2s;
+extern struct pmx_dev pmx_uart1;
+extern struct pmx_dev pmx_uart1_modem;
+extern struct pmx_dev pmx_uart2;
+extern struct pmx_dev pmx_touchscreen;
+extern struct pmx_dev pmx_can;
+extern struct pmx_dev pmx_sdio_led;
+extern struct pmx_dev pmx_pwm0;
+extern struct pmx_dev pmx_pwm1;
+extern struct pmx_dev pmx_pwm2;
+extern struct pmx_dev pmx_pwm3;
+extern struct pmx_dev pmx_ssp1;
+extern struct pmx_dev pmx_ssp2;
+extern struct pmx_dev pmx_mii1;
+extern struct pmx_dev pmx_smii0;
+extern struct pmx_dev pmx_smii1;
+extern struct pmx_dev pmx_i2c1;
+
+void spear320_pmx_init(void);
+
+/* Add spear320 machine function declarations here */
+void __init spear320_init(void);
+
+#endif /* CONFIG_MACH_SPEAR320 */
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/gpio.h b/arch/arm/mach-spear3xx/include/mach/gpio.h
new file mode 100644
index 000000000000..451b2081bfc9
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/hardware.h b/arch/arm/mach-spear3xx/include/mach/hardware.h
new file mode 100644
index 000000000000..4a86e6a3c444
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/hardware.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/hardware.h
+ *
+ * Hardware definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_HARDWARE_H
+#define __MACH_HARDWARE_H
+
+/* Vitual to physical translation of statically mapped space */
+#define IO_ADDRESS(x) (x | 0xF0000000)
+
+#endif /* __MACH_HARDWARE_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/io.h b/arch/arm/mach-spear3xx/include/mach/io.h
new file mode 100644
index 000000000000..30cff8a1f6b5
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/io.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/io.h
+ *
+ * IO definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IO_H
+#define __MACH_IO_H
+
+#include <plat/io.h>
+
+#endif /* __MACH_IO_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
new file mode 100644
index 000000000000..7f940b818473
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -0,0 +1,152 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+/* SPEAr3xx IRQ definitions */
+#define IRQ_HW_ACCEL_MOD_0 0
+#define IRQ_INTRCOMM_RAS_ARM 1
+#define IRQ_CPU_GPT1_1 2
+#define IRQ_CPU_GPT1_2 3
+#define IRQ_BASIC_GPT1_1 4
+#define IRQ_BASIC_GPT1_2 5
+#define IRQ_BASIC_GPT2_1 6
+#define IRQ_BASIC_GPT2_2 7
+#define IRQ_BASIC_DMA 8
+#define IRQ_BASIC_SMI 9
+#define IRQ_BASIC_RTC 10
+#define IRQ_BASIC_GPIO 11
+#define IRQ_BASIC_WDT 12
+#define IRQ_DDR_CONTROLLER 13
+#define IRQ_SYS_ERROR 14
+#define IRQ_WAKEUP_RCV 15
+#define IRQ_JPEG 16
+#define IRQ_IRDA 17
+#define IRQ_ADC 18
+#define IRQ_UART 19
+#define IRQ_SSP 20
+#define IRQ_I2C 21
+#define IRQ_MAC_1 22
+#define IRQ_MAC_2 23
+#define IRQ_USB_DEV 24
+#define IRQ_USB_H_OHCI_0 25
+#define IRQ_USB_H_EHCI_0 26
+#define IRQ_USB_H_EHCI_1 IRQ_USB_H_EHCI_0
+#define IRQ_USB_H_OHCI_1 27
+#define IRQ_GEN_RAS_1 28
+#define IRQ_GEN_RAS_2 29
+#define IRQ_GEN_RAS_3 30
+#define IRQ_HW_ACCEL_MOD_1 31
+#define IRQ_VIC_END 32
+
+#define VIRQ_START IRQ_VIC_END
+
+/* SPEAr300 Virtual irq definitions */
+#ifdef CONFIG_MACH_SPEAR300
+/* IRQs sharing IRQ_GEN_RAS_1 */
+#define VIRQ_IT_PERS_S (VIRQ_START + 0)
+#define VIRQ_IT_CHANGE_S (VIRQ_START + 1)
+#define VIRQ_I2S (VIRQ_START + 2)
+#define VIRQ_TDM (VIRQ_START + 3)
+#define VIRQ_CAMERA_L (VIRQ_START + 4)
+#define VIRQ_CAMERA_F (VIRQ_START + 5)
+#define VIRQ_CAMERA_V (VIRQ_START + 6)
+#define VIRQ_KEYBOARD (VIRQ_START + 7)
+#define VIRQ_GPIO1 (VIRQ_START + 8)
+
+/* IRQs sharing IRQ_GEN_RAS_3 */
+#define IRQ_CLCD IRQ_GEN_RAS_3
+
+/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
+#define IRQ_SDIO IRQ_INTRCOMM_RAS_ARM
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE (VIRQ_START + 9)
+#define SPEAR_GPIO1_INT_BASE (SPEAR_GPIO_INT_BASE + 8)
+#define SPEAR_GPIO_INT_END (SPEAR_GPIO1_INT_BASE + 8)
+
+/* SPEAr310 Virtual irq definitions */
+#elif defined(CONFIG_MACH_SPEAR310)
+/* IRQs sharing IRQ_GEN_RAS_1 */
+#define VIRQ_SMII0 (VIRQ_START + 0)
+#define VIRQ_SMII1 (VIRQ_START + 1)
+#define VIRQ_SMII2 (VIRQ_START + 2)
+#define VIRQ_SMII3 (VIRQ_START + 3)
+#define VIRQ_WAKEUP_SMII0 (VIRQ_START + 4)
+#define VIRQ_WAKEUP_SMII1 (VIRQ_START + 5)
+#define VIRQ_WAKEUP_SMII2 (VIRQ_START + 6)
+#define VIRQ_WAKEUP_SMII3 (VIRQ_START + 7)
+
+/* IRQs sharing IRQ_GEN_RAS_2 */
+#define VIRQ_UART1 (VIRQ_START + 8)
+#define VIRQ_UART2 (VIRQ_START + 9)
+#define VIRQ_UART3 (VIRQ_START + 10)
+#define VIRQ_UART4 (VIRQ_START + 11)
+#define VIRQ_UART5 (VIRQ_START + 12)
+
+/* IRQs sharing IRQ_GEN_RAS_3 */
+#define VIRQ_EMI (VIRQ_START + 13)
+#define VIRQ_PLGPIO (VIRQ_START + 14)
+
+/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
+#define VIRQ_TDM_HDLC (VIRQ_START + 15)
+#define VIRQ_RS485_0 (VIRQ_START + 16)
+#define VIRQ_RS485_1 (VIRQ_START + 17)
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE (VIRQ_START + 18)
+
+/* SPEAr320 Virtual irq definitions */
+#else
+/* IRQs sharing IRQ_GEN_RAS_1 */
+#define VIRQ_EMI (VIRQ_START + 0)
+#define VIRQ_CLCD (VIRQ_START + 1)
+#define VIRQ_SPP (VIRQ_START + 2)
+
+/* IRQs sharing IRQ_GEN_RAS_2 */
+#define IRQ_SDIO IRQ_GEN_RAS_2
+
+/* IRQs sharing IRQ_GEN_RAS_3 */
+#define VIRQ_PLGPIO (VIRQ_START + 3)
+#define VIRQ_I2S_PLAY (VIRQ_START + 4)
+#define VIRQ_I2S_REC (VIRQ_START + 5)
+
+/* IRQs sharing IRQ_INTRCOMM_RAS_ARM */
+#define VIRQ_CANU (VIRQ_START + 6)
+#define VIRQ_CANL (VIRQ_START + 7)
+#define VIRQ_UART1 (VIRQ_START + 8)
+#define VIRQ_UART2 (VIRQ_START + 9)
+#define VIRQ_SSP1 (VIRQ_START + 10)
+#define VIRQ_SSP2 (VIRQ_START + 11)
+#define VIRQ_SMII0 (VIRQ_START + 12)
+#define VIRQ_MII1_SMII1 (VIRQ_START + 13)
+#define VIRQ_WAKEUP_SMII0 (VIRQ_START + 14)
+#define VIRQ_WAKEUP_MII1_SMII1 (VIRQ_START + 15)
+#define VIRQ_I2C (VIRQ_START + 16)
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE (VIRQ_START + 17)
+
+#endif
+
+/* PLGPIO Virtual IRQs */
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+#define SPEAR_PLGPIO_INT_BASE (SPEAR_GPIO_INT_BASE + 8)
+#define SPEAR_GPIO_INT_END (SPEAR_PLGPIO_INT_BASE + 102)
+#endif
+
+#define VIRQ_END SPEAR_GPIO_INT_END
+#define NR_IRQS VIRQ_END
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/memory.h b/arch/arm/mach-spear3xx/include/mach/memory.h
new file mode 100644
index 000000000000..51735221ea19
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/memory.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/memory.h
+ *
+ * Memory map for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MEMORY_H
+#define __MACH_MEMORY_H
+
+#include <plat/memory.h>
+
+#endif /* __MACH_MEMORY_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
new file mode 100644
index 000000000000..38d767a1aba0
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -0,0 +1,163 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/misc_regs.h
+ *
+ * Miscellaneous registers definitions for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MISC_REGS_H
+#define __MACH_MISC_REGS_H
+
+#include <mach/spear.h>
+
+#define MISC_BASE VA_SPEAR3XX_ICM3_MISC_REG_BASE
+
+#define SOC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x000))
+#define DIAG_CFG_CTR ((unsigned int *)(MISC_BASE + 0x004))
+#define PLL1_CTR ((unsigned int *)(MISC_BASE + 0x008))
+#define PLL1_FRQ ((unsigned int *)(MISC_BASE + 0x00C))
+#define PLL1_MOD ((unsigned int *)(MISC_BASE + 0x010))
+#define PLL2_CTR ((unsigned int *)(MISC_BASE + 0x014))
+/* PLL_CTR register masks */
+#define PLL_ENABLE 2
+#define PLL_MODE_SHIFT 4
+#define PLL_MODE_MASK 0x3
+#define PLL_MODE_NORMAL 0
+#define PLL_MODE_FRACTION 1
+#define PLL_MODE_DITH_DSB 2
+#define PLL_MODE_DITH_SSB 3
+
+#define PLL2_FRQ ((unsigned int *)(MISC_BASE + 0x018))
+/* PLL FRQ register masks */
+#define PLL_DIV_N_SHIFT 0
+#define PLL_DIV_N_MASK 0xFF
+#define PLL_DIV_P_SHIFT 8
+#define PLL_DIV_P_MASK 0x7
+#define PLL_NORM_FDBK_M_SHIFT 24
+#define PLL_NORM_FDBK_M_MASK 0xFF
+#define PLL_DITH_FDBK_M_SHIFT 16
+#define PLL_DITH_FDBK_M_MASK 0xFFFF
+
+#define PLL2_MOD ((unsigned int *)(MISC_BASE + 0x01C))
+#define PLL_CLK_CFG ((unsigned int *)(MISC_BASE + 0x020))
+#define CORE_CLK_CFG ((unsigned int *)(MISC_BASE + 0x024))
+/* CORE CLK CFG register masks */
+#define PLL_HCLK_RATIO_SHIFT 10
+#define PLL_HCLK_RATIO_MASK 0x3
+#define HCLK_PCLK_RATIO_SHIFT 8
+#define HCLK_PCLK_RATIO_MASK 0x3
+
+#define PERIP_CLK_CFG ((unsigned int *)(MISC_BASE + 0x028))
+/* PERIP_CLK_CFG register masks */
+#define UART_CLK_SHIFT 4
+#define UART_CLK_MASK 0x1
+#define FIRDA_CLK_SHIFT 5
+#define FIRDA_CLK_MASK 0x3
+#define GPT0_CLK_SHIFT 8
+#define GPT1_CLK_SHIFT 11
+#define GPT2_CLK_SHIFT 12
+#define GPT_CLK_MASK 0x1
+#define AUX_CLK_PLL3_MASK 0
+#define AUX_CLK_PLL1_MASK 1
+
+#define PERIP1_CLK_ENB ((unsigned int *)(MISC_BASE + 0x02C))
+/* PERIP1_CLK_ENB register masks */
+#define UART_CLK_ENB 3
+#define SSP_CLK_ENB 5
+#define I2C_CLK_ENB 7
+#define JPEG_CLK_ENB 8
+#define FIRDA_CLK_ENB 10
+#define GPT1_CLK_ENB 11
+#define GPT2_CLK_ENB 12
+#define ADC_CLK_ENB 15
+#define RTC_CLK_ENB 17
+#define GPIO_CLK_ENB 18
+#define DMA_CLK_ENB 19
+#define SMI_CLK_ENB 21
+#define GMAC_CLK_ENB 23
+#define USBD_CLK_ENB 24
+#define USBH_CLK_ENB 25
+#define C3_CLK_ENB 31
+
+#define SOC_CORE_ID ((unsigned int *)(MISC_BASE + 0x030))
+#define RAS_CLK_ENB ((unsigned int *)(MISC_BASE + 0x034))
+#define PERIP1_SOF_RST ((unsigned int *)(MISC_BASE + 0x038))
+/* PERIP1_SOF_RST register masks */
+#define JPEG_SOF_RST 8
+
+#define SOC_USER_ID ((unsigned int *)(MISC_BASE + 0x03C))
+#define RAS_SOF_RST ((unsigned int *)(MISC_BASE + 0x040))
+#define PRSC1_CLK_CFG ((unsigned int *)(MISC_BASE + 0x044))
+#define PRSC2_CLK_CFG ((unsigned int *)(MISC_BASE + 0x048))
+#define PRSC3_CLK_CFG ((unsigned int *)(MISC_BASE + 0x04C))
+/* gpt synthesizer register masks */
+#define GPT_MSCALE_SHIFT 0
+#define GPT_MSCALE_MASK 0xFFF
+#define GPT_NSCALE_SHIFT 12
+#define GPT_NSCALE_MASK 0xF
+
+#define AMEM_CLK_CFG ((unsigned int *)(MISC_BASE + 0x050))
+#define EXPI_CLK_CFG ((unsigned int *)(MISC_BASE + 0x054))
+#define CLCD_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x05C))
+#define FIRDA_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x060))
+#define UART_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x064))
+#define GMAC_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x068))
+#define RAS1_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x06C))
+#define RAS2_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x070))
+#define RAS3_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x074))
+#define RAS4_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x078))
+/* aux clk synthesiser register masks for irda to ras4 */
+#define AUX_EQ_SEL_SHIFT 30
+#define AUX_EQ_SEL_MASK 1
+#define AUX_EQ1_SEL 0
+#define AUX_EQ2_SEL 1
+#define AUX_XSCALE_SHIFT 16
+#define AUX_XSCALE_MASK 0xFFF
+#define AUX_YSCALE_SHIFT 0
+#define AUX_YSCALE_MASK 0xFFF
+
+#define ICM1_ARB_CFG ((unsigned int *)(MISC_BASE + 0x07C))
+#define ICM2_ARB_CFG ((unsigned int *)(MISC_BASE + 0x080))
+#define ICM3_ARB_CFG ((unsigned int *)(MISC_BASE + 0x084))
+#define ICM4_ARB_CFG ((unsigned int *)(MISC_BASE + 0x088))
+#define ICM5_ARB_CFG ((unsigned int *)(MISC_BASE + 0x08C))
+#define ICM6_ARB_CFG ((unsigned int *)(MISC_BASE + 0x090))
+#define ICM7_ARB_CFG ((unsigned int *)(MISC_BASE + 0x094))
+#define ICM8_ARB_CFG ((unsigned int *)(MISC_BASE + 0x098))
+#define ICM9_ARB_CFG ((unsigned int *)(MISC_BASE + 0x09C))
+#define DMA_CHN_CFG ((unsigned int *)(MISC_BASE + 0x0A0))
+#define USB2_PHY_CFG ((unsigned int *)(MISC_BASE + 0x0A4))
+#define GMAC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0A8))
+#define EXPI_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0AC))
+#define PRC1_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C0))
+#define PRC2_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C4))
+#define PRC3_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C8))
+#define PRC4_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0CC))
+#define PRC1_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D0))
+#define PRC2_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D4))
+#define PRC3_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D8))
+#define PRC4_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0DC))
+#define PWRDOWN_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0E0))
+#define COMPSSTL_1V8_CFG ((unsigned int *)(MISC_BASE + 0x0E4))
+#define COMPSSTL_2V5_CFG ((unsigned int *)(MISC_BASE + 0x0E8))
+#define COMPCOR_3V3_CFG ((unsigned int *)(MISC_BASE + 0x0EC))
+#define SSTLPAD_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F0))
+#define BIST1_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F4))
+#define BIST2_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F8))
+#define BIST3_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0FC))
+#define BIST4_CFG_CTR ((unsigned int *)(MISC_BASE + 0x100))
+#define BIST5_CFG_CTR ((unsigned int *)(MISC_BASE + 0x104))
+#define BIST1_STS_RES ((unsigned int *)(MISC_BASE + 0x108))
+#define BIST2_STS_RES ((unsigned int *)(MISC_BASE + 0x10C))
+#define BIST3_STS_RES ((unsigned int *)(MISC_BASE + 0x110))
+#define BIST4_STS_RES ((unsigned int *)(MISC_BASE + 0x114))
+#define BIST5_STS_RES ((unsigned int *)(MISC_BASE + 0x118))
+#define SYSERR_CFG_CTR ((unsigned int *)(MISC_BASE + 0x11C))
+
+#endif /* __MACH_MISC_REGS_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
new file mode 100644
index 000000000000..dcca8568a486
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -0,0 +1,144 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear.h
+ *
+ * SPEAr3xx Machine family specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR3XX_H
+#define __MACH_SPEAR3XX_H
+
+#include <mach/hardware.h>
+#include <mach/spear300.h>
+#include <mach/spear310.h>
+#include <mach/spear320.h>
+
+#define SPEAR3XX_ML_SDRAM_BASE 0x00000000
+#define SPEAR3XX_ML_SDRAM_SIZE 0x40000000
+
+#define SPEAR3XX_ICM9_BASE 0xC0000000
+#define SPEAR3XX_ICM9_SIZE 0x10000000
+
+/* ICM1 - Low speed connection */
+#define SPEAR3XX_ICM1_2_BASE 0xD0000000
+#define SPEAR3XX_ICM1_2_SIZE 0x10000000
+
+#define SPEAR3XX_ICM1_UART_BASE 0xD0000000
+#define VA_SPEAR3XX_ICM1_UART_BASE IO_ADDRESS(SPEAR3XX_ICM1_UART_BASE)
+#define SPEAR3XX_ICM1_UART_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_ADC_BASE 0xD0080000
+#define SPEAR3XX_ICM1_ADC_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_SSP_BASE 0xD0100000
+#define SPEAR3XX_ICM1_SSP_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_I2C_BASE 0xD0180000
+#define SPEAR3XX_ICM1_I2C_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_JPEG_BASE 0xD0800000
+#define SPEAR3XX_ICM1_JPEG_SIZE 0x00800000
+
+#define SPEAR3XX_ICM1_IRDA_BASE 0xD1000000
+#define SPEAR3XX_ICM1_IRDA_SIZE 0x00080000
+
+#define SPEAR3XX_ICM1_SRAM_BASE 0xD2800000
+#define SPEAR3XX_ICM1_SRAM_SIZE 0x05800000
+
+/* ICM2 - Application Subsystem */
+#define SPEAR3XX_ICM2_HWACCEL0_BASE 0xD8800000
+#define SPEAR3XX_ICM2_HWACCEL0_SIZE 0x00800000
+
+#define SPEAR3XX_ICM2_HWACCEL1_BASE 0xD9000000
+#define SPEAR3XX_ICM2_HWACCEL1_SIZE 0x00800000
+
+/* ICM4 - High Speed Connection */
+#define SPEAR3XX_ICM4_BASE 0xE0000000
+#define SPEAR3XX_ICM4_SIZE 0x08000000
+
+#define SPEAR3XX_ICM4_MII_BASE 0xE0800000
+#define SPEAR3XX_ICM4_MII_SIZE 0x00800000
+
+#define SPEAR3XX_ICM4_USBD_FIFO_BASE 0xE1000000
+#define SPEAR3XX_ICM4_USBD_FIFO_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USBD_CSR_BASE 0xE1100000
+#define SPEAR3XX_ICM4_USBD_CSR_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USBD_PLDT_BASE 0xE1200000
+#define SPEAR3XX_ICM4_USBD_PLDT_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_EHCI0_1_BASE 0xE1800000
+#define SPEAR3XX_ICM4_USB_EHCI0_1_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_OHCI0_BASE 0xE1900000
+#define SPEAR3XX_ICM4_USB_OHCI0_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_OHCI1_BASE 0xE2100000
+#define SPEAR3XX_ICM4_USB_OHCI1_SIZE 0x00100000
+
+#define SPEAR3XX_ICM4_USB_ARB_BASE 0xE2800000
+#define SPEAR3XX_ICM4_USB_ARB_SIZE 0x00010000
+
+/* ML1 - Multi Layer CPU Subsystem */
+#define SPEAR3XX_ICM3_ML1_2_BASE 0xF0000000
+#define SPEAR3XX_ICM3_ML1_2_SIZE 0x0F000000
+
+#define SPEAR3XX_ML1_TMR_BASE 0xF0000000
+#define SPEAR3XX_ML1_TMR_SIZE 0x00100000
+
+#define SPEAR3XX_ML1_VIC_BASE 0xF1100000
+#define VA_SPEAR3XX_ML1_VIC_BASE IO_ADDRESS(SPEAR3XX_ML1_VIC_BASE)
+#define SPEAR3XX_ML1_VIC_SIZE 0x00100000
+
+/* ICM3 - Basic Subsystem */
+#define SPEAR3XX_ICM3_SMEM_BASE 0xF8000000
+#define SPEAR3XX_ICM3_SMEM_SIZE 0x04000000
+
+#define SPEAR3XX_ICM3_SMI_CTRL_BASE 0xFC000000
+#define SPEAR3XX_ICM3_SMI_CTRL_SIZE 0x00200000
+
+#define SPEAR3XX_ICM3_DMA_BASE 0xFC400000
+#define SPEAR3XX_ICM3_DMA_SIZE 0x00200000
+
+#define SPEAR3XX_ICM3_SDRAM_CTRL_BASE 0xFC600000
+#define SPEAR3XX_ICM3_SDRAM_CTRL_SIZE 0x00200000
+
+#define SPEAR3XX_ICM3_TMR0_BASE 0xFC800000
+#define SPEAR3XX_ICM3_TMR0_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_WDT_BASE 0xFC880000
+#define SPEAR3XX_ICM3_WDT_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_RTC_BASE 0xFC900000
+#define SPEAR3XX_ICM3_RTC_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_GPIO_BASE 0xFC980000
+#define SPEAR3XX_ICM3_GPIO_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_SYS_CTRL_BASE 0xFCA00000
+#define VA_SPEAR3XX_ICM3_SYS_CTRL_BASE IO_ADDRESS(SPEAR3XX_ICM3_SYS_CTRL_BASE)
+#define SPEAR3XX_ICM3_SYS_CTRL_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_MISC_REG_BASE 0xFCA80000
+#define VA_SPEAR3XX_ICM3_MISC_REG_BASE IO_ADDRESS(SPEAR3XX_ICM3_MISC_REG_BASE)
+#define SPEAR3XX_ICM3_MISC_REG_SIZE 0x00080000
+
+#define SPEAR3XX_ICM3_TMR1_BASE 0xFCB00000
+#define SPEAR3XX_ICM3_TMR1_SIZE 0x00080000
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE SPEAR3XX_ICM1_UART_BASE
+#define VA_SPEAR_DBG_UART_BASE VA_SPEAR3XX_ICM1_UART_BASE
+
+/* Sysctl base for spear platform */
+#define SPEAR_SYS_CTRL_BASE SPEAR3XX_ICM3_SYS_CTRL_BASE
+#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR3XX_ICM3_SYS_CTRL_BASE
+
+#endif /* __MACH_SPEAR3XX_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear300.h b/arch/arm/mach-spear3xx/include/mach/spear300.h
new file mode 100644
index 000000000000..ccaa76522ee2
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear300.h
@@ -0,0 +1,83 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear300.h
+ *
+ * SPEAr300 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR300
+
+#ifndef __MACH_SPEAR300_H
+#define __MACH_SPEAR300_H
+
+/* Base address of various IPs */
+#define SPEAR300_TELECOM_BASE 0x50000000
+#define SPEAR300_TELECOM_SIZE 0x10000000
+
+/* Interrupt registers offsets and masks */
+#define SPEAR300_TELECOM_REG_SIZE 0x00010000
+#define INT_ENB_MASK_REG 0x54
+#define INT_STS_MASK_REG 0x58
+#define IT_PERS_S_IRQ_MASK (1 << 0)
+#define IT_CHANGE_S_IRQ_MASK (1 << 1)
+#define I2S_IRQ_MASK (1 << 2)
+#define TDM_IRQ_MASK (1 << 3)
+#define CAMERA_L_IRQ_MASK (1 << 4)
+#define CAMERA_F_IRQ_MASK (1 << 5)
+#define CAMERA_V_IRQ_MASK (1 << 6)
+#define KEYBOARD_IRQ_MASK (1 << 7)
+#define GPIO1_IRQ_MASK (1 << 8)
+
+#define SHIRQ_RAS1_MASK 0x1FF
+
+#define SPEAR300_CLCD_BASE 0x60000000
+#define SPEAR300_CLCD_SIZE 0x10000000
+
+#define SPEAR300_SDIO_BASE 0x70000000
+#define SPEAR300_SDIO_SIZE 0x10000000
+
+#define SPEAR300_NAND_0_BASE 0x80000000
+#define SPEAR300_NAND_0_SIZE 0x04000000
+
+#define SPEAR300_NAND_1_BASE 0x84000000
+#define SPEAR300_NAND_1_SIZE 0x04000000
+
+#define SPEAR300_NAND_2_BASE 0x88000000
+#define SPEAR300_NAND_2_SIZE 0x04000000
+
+#define SPEAR300_NAND_3_BASE 0x8c000000
+#define SPEAR300_NAND_3_SIZE 0x04000000
+
+#define SPEAR300_NOR_0_BASE 0x90000000
+#define SPEAR300_NOR_0_SIZE 0x01000000
+
+#define SPEAR300_NOR_1_BASE 0x91000000
+#define SPEAR300_NOR_1_SIZE 0x01000000
+
+#define SPEAR300_NOR_2_BASE 0x92000000
+#define SPEAR300_NOR_2_SIZE 0x01000000
+
+#define SPEAR300_NOR_3_BASE 0x93000000
+#define SPEAR300_NOR_3_SIZE 0x01000000
+
+#define SPEAR300_FSMC_BASE 0x94000000
+#define SPEAR300_FSMC_SIZE 0x05000000
+
+#define SPEAR300_SOC_CONFIG_BASE 0x99000000
+#define SPEAR300_SOC_CONFIG_SIZE 0x00000008
+
+#define SPEAR300_KEYBOARD_BASE 0xA0000000
+#define SPEAR300_KEYBOARD_SIZE 0x09000000
+
+#define SPEAR300_GPIO_BASE 0xA9000000
+#define SPEAR300_GPIO_SIZE 0x07000000
+
+#endif /* __MACH_SPEAR300_H */
+
+#endif /* CONFIG_MACH_SPEAR300 */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear310.h b/arch/arm/mach-spear3xx/include/mach/spear310.h
new file mode 100644
index 000000000000..b27bb8af3309
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear310.h
@@ -0,0 +1,70 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear310.h
+ *
+ * SPEAr310 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR310
+
+#ifndef __MACH_SPEAR310_H
+#define __MACH_SPEAR310_H
+
+#define SPEAR310_NAND_BASE 0x40000000
+#define SPEAR310_NAND_SIZE 0x04000000
+
+#define SPEAR310_FSMC_BASE 0x44000000
+#define SPEAR310_FSMC_SIZE 0x01000000
+
+#define SPEAR310_UART1_BASE 0xB2000000
+#define SPEAR310_UART2_BASE 0xB2080000
+#define SPEAR310_UART3_BASE 0xB2100000
+#define SPEAR310_UART4_BASE 0xB2180000
+#define SPEAR310_UART5_BASE 0xB2200000
+#define SPEAR310_UART_SIZE 0x00080000
+
+#define SPEAR310_HDLC_BASE 0xB2800000
+#define SPEAR310_HDLC_SIZE 0x00800000
+
+#define SPEAR310_RS485_0_BASE 0xB3000000
+#define SPEAR310_RS485_0_SIZE 0x00800000
+
+#define SPEAR310_RS485_1_BASE 0xB3800000
+#define SPEAR310_RS485_1_SIZE 0x00800000
+
+#define SPEAR310_SOC_CONFIG_BASE 0xB4000000
+#define SPEAR310_SOC_CONFIG_SIZE 0x00000070
+/* Interrupt registers offsets and masks */
+#define INT_STS_MASK_REG 0x04
+#define SMII0_IRQ_MASK (1 << 0)
+#define SMII1_IRQ_MASK (1 << 1)
+#define SMII2_IRQ_MASK (1 << 2)
+#define SMII3_IRQ_MASK (1 << 3)
+#define WAKEUP_SMII0_IRQ_MASK (1 << 4)
+#define WAKEUP_SMII1_IRQ_MASK (1 << 5)
+#define WAKEUP_SMII2_IRQ_MASK (1 << 6)
+#define WAKEUP_SMII3_IRQ_MASK (1 << 7)
+#define UART1_IRQ_MASK (1 << 8)
+#define UART2_IRQ_MASK (1 << 9)
+#define UART3_IRQ_MASK (1 << 10)
+#define UART4_IRQ_MASK (1 << 11)
+#define UART5_IRQ_MASK (1 << 12)
+#define EMI_IRQ_MASK (1 << 13)
+#define TDM_HDLC_IRQ_MASK (1 << 14)
+#define RS485_0_IRQ_MASK (1 << 15)
+#define RS485_1_IRQ_MASK (1 << 16)
+
+#define SHIRQ_RAS1_MASK 0x000FF
+#define SHIRQ_RAS2_MASK 0x01F00
+#define SHIRQ_RAS3_MASK 0x02000
+#define SHIRQ_INTRCOMM_RAS_MASK 0x1C000
+
+#endif /* __MACH_SPEAR310_H */
+
+#endif /* CONFIG_MACH_SPEAR310 */
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h
new file mode 100644
index 000000000000..cacf17a958cd
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/spear320.h
@@ -0,0 +1,96 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/spear320.h
+ *
+ * SPEAr320 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR320
+
+#ifndef __MACH_SPEAR320_H
+#define __MACH_SPEAR320_H
+
+#define SPEAR320_EMI_CTRL_BASE 0x40000000
+#define SPEAR320_EMI_CTRL_SIZE 0x08000000
+
+#define SPEAR320_FSMC_BASE 0x4C000000
+#define SPEAR320_FSMC_SIZE 0x01000000
+
+#define SPEAR320_I2S_BASE 0x60000000
+#define SPEAR320_I2S_SIZE 0x10000000
+
+#define SPEAR320_SDIO_BASE 0x70000000
+#define SPEAR320_SDIO_SIZE 0x10000000
+
+#define SPEAR320_CLCD_BASE 0x90000000
+#define SPEAR320_CLCD_SIZE 0x10000000
+
+#define SPEAR320_PAR_PORT_BASE 0xA0000000
+#define SPEAR320_PAR_PORT_SIZE 0x01000000
+
+#define SPEAR320_CAN0_BASE 0xA1000000
+#define SPEAR320_CAN0_SIZE 0x01000000
+
+#define SPEAR320_CAN1_BASE 0xA2000000
+#define SPEAR320_CAN1_SIZE 0x01000000
+
+#define SPEAR320_UART1_BASE 0xA3000000
+#define SPEAR320_UART2_BASE 0xA4000000
+#define SPEAR320_UART_SIZE 0x01000000
+
+#define SPEAR320_SSP0_BASE 0xA5000000
+#define SPEAR320_SSP0_SIZE 0x01000000
+
+#define SPEAR320_SSP1_BASE 0xA6000000
+#define SPEAR320_SSP1_SIZE 0x01000000
+
+#define SPEAR320_I2C_BASE 0xA7000000
+#define SPEAR320_I2C_SIZE 0x01000000
+
+#define SPEAR320_PWM_BASE 0xA8000000
+#define SPEAR320_PWM_SIZE 0x01000000
+
+#define SPEAR320_SMII0_BASE 0xAA000000
+#define SPEAR320_SMII0_SIZE 0x01000000
+
+#define SPEAR320_SMII1_BASE 0xAB000000
+#define SPEAR320_SMII1_SIZE 0x01000000
+
+#define SPEAR320_SOC_CONFIG_BASE 0xB4000000
+#define SPEAR320_SOC_CONFIG_SIZE 0x00000070
+/* Interrupt registers offsets and masks */
+#define INT_STS_MASK_REG 0x04
+#define INT_CLR_MASK_REG 0x04
+#define INT_ENB_MASK_REG 0x08
+#define GPIO_IRQ_MASK (1 << 0)
+#define I2S_PLAY_IRQ_MASK (1 << 1)
+#define I2S_REC_IRQ_MASK (1 << 2)
+#define EMI_IRQ_MASK (1 << 7)
+#define CLCD_IRQ_MASK (1 << 8)
+#define SPP_IRQ_MASK (1 << 9)
+#define SDIO_IRQ_MASK (1 << 10)
+#define CAN_U_IRQ_MASK (1 << 11)
+#define CAN_L_IRQ_MASK (1 << 12)
+#define UART1_IRQ_MASK (1 << 13)
+#define UART2_IRQ_MASK (1 << 14)
+#define SSP1_IRQ_MASK (1 << 15)
+#define SSP2_IRQ_MASK (1 << 16)
+#define SMII0_IRQ_MASK (1 << 17)
+#define MII1_SMII1_IRQ_MASK (1 << 18)
+#define WAKEUP_SMII0_IRQ_MASK (1 << 19)
+#define WAKEUP_MII1_SMII1_IRQ_MASK (1 << 20)
+#define I2C1_IRQ_MASK (1 << 21)
+
+#define SHIRQ_RAS1_MASK 0x000380
+#define SHIRQ_RAS3_MASK 0x000007
+#define SHIRQ_INTRCOMM_RAS_MASK 0x3FF800
+
+#endif /* __MACH_SPEAR320_H */
+
+#endif /* CONFIG_MACH_SPEAR320 */
diff --git a/arch/arm/mach-spear3xx/include/mach/system.h b/arch/arm/mach-spear3xx/include/mach/system.h
new file mode 100644
index 000000000000..92cee6335c90
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/system.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/system.h
+ *
+ * SPEAr3xx Machine family specific architecture functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SYSTEM_H
+#define __MACH_SYSTEM_H
+
+#include <plat/system.h>
+
+#endif /* __MACH_SYSTEM_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/timex.h b/arch/arm/mach-spear3xx/include/mach/timex.h
new file mode 100644
index 000000000000..a38cc9de876f
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/timex.h
+ *
+ * SPEAr3XX machine family specific timex definitions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/uncompress.h b/arch/arm/mach-spear3xx/include/mach/uncompress.h
new file mode 100644
index 000000000000..53ba8bbc0dfa
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/vmalloc.h b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
new file mode 100644
index 000000000000..df977b3c9a63
--- /dev/null
+++ b/arch/arm/mach-spear3xx/include/mach/vmalloc.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/vmalloc.h
+ *
+ * Defining Vmalloc area for SPEAr3xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_VMALLOC_H
+#define __MACH_VMALLOC_H
+
+#include <plat/vmalloc.h>
+
+#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
new file mode 100644
index 000000000000..3560f8c1e723
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -0,0 +1,468 @@
+/*
+ * arch/arm/mach-spear3xx/spear300.c
+ *
+ * SPEAr300 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+#include <linux/amba/pl061.h>
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+#include <plat/shirq.h>
+
+/* pad multiplexing support */
+/* muxing registers */
+#define PAD_MUX_CONFIG_REG 0x00
+#define MODE_CONFIG_REG 0x04
+
+/* modes */
+#define NAND_MODE (1 << 0)
+#define NOR_MODE (1 << 1)
+#define PHOTO_FRAME_MODE (1 << 2)
+#define LEND_IP_PHONE_MODE (1 << 3)
+#define HEND_IP_PHONE_MODE (1 << 4)
+#define LEND_WIFI_PHONE_MODE (1 << 5)
+#define HEND_WIFI_PHONE_MODE (1 << 6)
+#define ATA_PABX_WI2S_MODE (1 << 7)
+#define ATA_PABX_I2S_MODE (1 << 8)
+#define CAML_LCDW_MODE (1 << 9)
+#define CAMU_LCD_MODE (1 << 10)
+#define CAMU_WLCD_MODE (1 << 11)
+#define CAML_LCD_MODE (1 << 12)
+#define ALL_MODES 0x1FFF
+
+struct pmx_mode nand_mode = {
+ .id = NAND_MODE,
+ .name = "nand mode",
+ .mask = 0x00,
+};
+
+struct pmx_mode nor_mode = {
+ .id = NOR_MODE,
+ .name = "nor mode",
+ .mask = 0x01,
+};
+
+struct pmx_mode photo_frame_mode = {
+ .id = PHOTO_FRAME_MODE,
+ .name = "photo frame mode",
+ .mask = 0x02,
+};
+
+struct pmx_mode lend_ip_phone_mode = {
+ .id = LEND_IP_PHONE_MODE,
+ .name = "lend ip phone mode",
+ .mask = 0x03,
+};
+
+struct pmx_mode hend_ip_phone_mode = {
+ .id = HEND_IP_PHONE_MODE,
+ .name = "hend ip phone mode",
+ .mask = 0x04,
+};
+
+struct pmx_mode lend_wifi_phone_mode = {
+ .id = LEND_WIFI_PHONE_MODE,
+ .name = "lend wifi phone mode",
+ .mask = 0x05,
+};
+
+struct pmx_mode hend_wifi_phone_mode = {
+ .id = HEND_WIFI_PHONE_MODE,
+ .name = "hend wifi phone mode",
+ .mask = 0x06,
+};
+
+struct pmx_mode ata_pabx_wi2s_mode = {
+ .id = ATA_PABX_WI2S_MODE,
+ .name = "ata pabx wi2s mode",
+ .mask = 0x07,
+};
+
+struct pmx_mode ata_pabx_i2s_mode = {
+ .id = ATA_PABX_I2S_MODE,
+ .name = "ata pabx i2s mode",
+ .mask = 0x08,
+};
+
+struct pmx_mode caml_lcdw_mode = {
+ .id = CAML_LCDW_MODE,
+ .name = "caml lcdw mode",
+ .mask = 0x0C,
+};
+
+struct pmx_mode camu_lcd_mode = {
+ .id = CAMU_LCD_MODE,
+ .name = "camu lcd mode",
+ .mask = 0x0D,
+};
+
+struct pmx_mode camu_wlcd_mode = {
+ .id = CAMU_WLCD_MODE,
+ .name = "camu wlcd mode",
+ .mask = 0x0E,
+};
+
+struct pmx_mode caml_lcd_mode = {
+ .id = CAML_LCD_MODE,
+ .name = "caml lcd mode",
+ .mask = 0x0F,
+};
+
+/* devices */
+struct pmx_dev_mode pmx_fsmc_2_chips_modes[] = {
+ {
+ .ids = NAND_MODE | NOR_MODE | PHOTO_FRAME_MODE |
+ ATA_PABX_WI2S_MODE | ATA_PABX_I2S_MODE,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_fsmc_2_chips = {
+ .name = "fsmc_2_chips",
+ .modes = pmx_fsmc_2_chips_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_2_chips_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_fsmc_4_chips_modes[] = {
+ {
+ .ids = NAND_MODE | NOR_MODE | PHOTO_FRAME_MODE |
+ ATA_PABX_WI2S_MODE | ATA_PABX_I2S_MODE,
+ .mask = PMX_FIRDA_MASK | PMX_UART0_MASK,
+ },
+};
+
+struct pmx_dev pmx_fsmc_4_chips = {
+ .name = "fsmc_4_chips",
+ .modes = pmx_fsmc_4_chips_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_4_chips_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_keyboard_modes[] = {
+ {
+ .ids = LEND_IP_PHONE_MODE | HEND_IP_PHONE_MODE |
+ LEND_WIFI_PHONE_MODE | HEND_WIFI_PHONE_MODE |
+ CAML_LCDW_MODE | CAMU_LCD_MODE | CAMU_WLCD_MODE |
+ CAML_LCD_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_keyboard = {
+ .name = "keyboard",
+ .modes = pmx_keyboard_modes,
+ .mode_count = ARRAY_SIZE(pmx_keyboard_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_clcd_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK ,
+ }, {
+ .ids = HEND_IP_PHONE_MODE | HEND_WIFI_PHONE_MODE |
+ CAMU_LCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_clcd = {
+ .name = "clcd",
+ .modes = pmx_clcd_modes,
+ .mode_count = ARRAY_SIZE(pmx_clcd_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_gpio_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | CAMU_LCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_MII_MASK,
+ }, {
+ .ids = LEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ }, {
+ .ids = ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_WLCD_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_3_4_MASK,
+ }, {
+ .ids = HEND_IP_PHONE_MODE | HEND_WIFI_PHONE_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_1_2_MASK,
+ }, {
+ .ids = ATA_PABX_WI2S_MODE,
+ .mask = PMX_MII_MASK | PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK
+ | PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_gpio = {
+ .name = "telecom_gpio",
+ .modes = pmx_telecom_gpio_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_gpio_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_tdm_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | LEND_IP_PHONE_MODE |
+ HEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE
+ | HEND_WIFI_PHONE_MODE | ATA_PABX_WI2S_MODE
+ | ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE
+ | CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_UART0_MODEM_MASK | PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_tdm = {
+ .name = "telecom_tdm",
+ .modes = pmx_telecom_tdm_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_tdm_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_spi_cs_i2c_clk_modes[] = {
+ {
+ .ids = LEND_IP_PHONE_MODE | HEND_IP_PHONE_MODE |
+ LEND_WIFI_PHONE_MODE | HEND_WIFI_PHONE_MODE
+ | ATA_PABX_WI2S_MODE | ATA_PABX_I2S_MODE |
+ CAML_LCDW_MODE | CAML_LCD_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_spi_cs_i2c_clk = {
+ .name = "telecom_spi_cs_i2c_clk",
+ .modes = pmx_telecom_spi_cs_i2c_clk_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_spi_cs_i2c_clk_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_camera_modes[] = {
+ {
+ .ids = CAML_LCDW_MODE | CAML_LCD_MODE,
+ .mask = PMX_MII_MASK,
+ }, {
+ .ids = CAMU_LCD_MODE | CAMU_WLCD_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK | PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_camera = {
+ .name = "telecom_camera",
+ .modes = pmx_telecom_camera_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_camera_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_dac_modes[] = {
+ {
+ .ids = ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE
+ | CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_dac = {
+ .name = "telecom_dac",
+ .modes = pmx_telecom_dac_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_dac_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_i2s_modes[] = {
+ {
+ .ids = LEND_IP_PHONE_MODE | HEND_IP_PHONE_MODE
+ | LEND_WIFI_PHONE_MODE | HEND_WIFI_PHONE_MODE |
+ ATA_PABX_I2S_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE
+ | CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_i2s = {
+ .name = "telecom_i2s",
+ .modes = pmx_telecom_i2s_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_i2s_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_boot_pins_modes[] = {
+ {
+ .ids = NAND_MODE | NOR_MODE,
+ .mask = PMX_UART0_MODEM_MASK | PMX_TIMER_1_2_MASK |
+ PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_boot_pins = {
+ .name = "telecom_boot_pins",
+ .modes = pmx_telecom_boot_pins_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_boot_pins_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_sdio_4bit_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | LEND_IP_PHONE_MODE |
+ HEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE |
+ HEND_WIFI_PHONE_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE |
+ CAMU_WLCD_MODE | CAML_LCD_MODE | ATA_PABX_WI2S_MODE |
+ ATA_PABX_I2S_MODE,
+ .mask = PMX_GPIO_PIN0_MASK | PMX_GPIO_PIN1_MASK |
+ PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK |
+ PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_sdio_4bit = {
+ .name = "telecom_sdio_4bit",
+ .modes = pmx_telecom_sdio_4bit_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_sdio_4bit_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_telecom_sdio_8bit_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE | LEND_IP_PHONE_MODE |
+ HEND_IP_PHONE_MODE | LEND_WIFI_PHONE_MODE |
+ HEND_WIFI_PHONE_MODE | CAML_LCDW_MODE | CAMU_LCD_MODE |
+ CAMU_WLCD_MODE | CAML_LCD_MODE,
+ .mask = PMX_GPIO_PIN0_MASK | PMX_GPIO_PIN1_MASK |
+ PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK |
+ PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK | PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_telecom_sdio_8bit = {
+ .name = "telecom_sdio_8bit",
+ .modes = pmx_telecom_sdio_8bit_modes,
+ .mode_count = ARRAY_SIZE(pmx_telecom_sdio_8bit_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_gpio1_modes[] = {
+ {
+ .ids = PHOTO_FRAME_MODE,
+ .mask = PMX_UART0_MODEM_MASK | PMX_TIMER_1_2_MASK |
+ PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio1 = {
+ .name = "arm gpio1",
+ .modes = pmx_gpio1_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio1_modes),
+ .enb_on_reset = 1,
+};
+
+/* pmx driver structure */
+struct pmx_driver pmx_driver = {
+ .mode_reg = {.offset = MODE_CONFIG_REG, .mask = 0x0000000f},
+ .mux_reg = {.offset = PAD_MUX_CONFIG_REG, .mask = 0x00007fff},
+};
+
+/* Add spear300 specific devices here */
+/* arm gpio1 device registeration */
+static struct pl061_platform_data gpio1_plat_data = {
+ .gpio_base = 8,
+ .irq_base = SPEAR_GPIO1_INT_BASE,
+};
+
+struct amba_device gpio1_device = {
+ .dev = {
+ .init_name = "gpio1",
+ .platform_data = &gpio1_plat_data,
+ },
+ .res = {
+ .start = SPEAR300_GPIO_BASE,
+ .end = SPEAR300_GPIO_BASE + SPEAR300_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {VIRQ_GPIO1, NO_IRQ},
+};
+
+/* spear3xx shared irq */
+struct shirq_dev_config shirq_ras1_config[] = {
+ {
+ .virq = VIRQ_IT_PERS_S,
+ .enb_mask = IT_PERS_S_IRQ_MASK,
+ .status_mask = IT_PERS_S_IRQ_MASK,
+ }, {
+ .virq = VIRQ_IT_CHANGE_S,
+ .enb_mask = IT_CHANGE_S_IRQ_MASK,
+ .status_mask = IT_CHANGE_S_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2S,
+ .enb_mask = I2S_IRQ_MASK,
+ .status_mask = I2S_IRQ_MASK,
+ }, {
+ .virq = VIRQ_TDM,
+ .enb_mask = TDM_IRQ_MASK,
+ .status_mask = TDM_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CAMERA_L,
+ .enb_mask = CAMERA_L_IRQ_MASK,
+ .status_mask = CAMERA_L_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CAMERA_F,
+ .enb_mask = CAMERA_F_IRQ_MASK,
+ .status_mask = CAMERA_F_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CAMERA_V,
+ .enb_mask = CAMERA_V_IRQ_MASK,
+ .status_mask = CAMERA_V_IRQ_MASK,
+ }, {
+ .virq = VIRQ_KEYBOARD,
+ .enb_mask = KEYBOARD_IRQ_MASK,
+ .status_mask = KEYBOARD_IRQ_MASK,
+ }, {
+ .virq = VIRQ_GPIO1,
+ .enb_mask = GPIO1_IRQ_MASK,
+ .status_mask = GPIO1_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras1 = {
+ .irq = IRQ_GEN_RAS_1,
+ .dev_config = shirq_ras1_config,
+ .dev_count = ARRAY_SIZE(shirq_ras1_config),
+ .regs = {
+ .enb_reg = INT_ENB_MASK_REG,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS1_MASK,
+ .clear_reg = -1,
+ },
+};
+
+/* spear300 routines */
+void __init spear300_init(void)
+{
+ int ret = 0;
+
+ /* call spear3xx family common init function */
+ spear3xx_init();
+
+ /* shared irq registeration */
+ shirq_ras1.regs.base =
+ ioremap(SPEAR300_TELECOM_BASE, SPEAR300_TELECOM_REG_SIZE);
+ if (shirq_ras1.regs.base) {
+ ret = spear_shirq_register(&shirq_ras1);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ\n");
+ }
+}
+
+void spear300_pmx_init(void)
+{
+ spear_pmx_init(&pmx_driver, SPEAR300_SOC_CONFIG_BASE,
+ SPEAR300_SOC_CONFIG_SIZE);
+}
diff --git a/arch/arm/mach-spear3xx/spear300_evb.c b/arch/arm/mach-spear3xx/spear300_evb.c
new file mode 100644
index 000000000000..bb21db152a23
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear300_evb.c
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/mach-spear3xx/spear300_evb.c
+ *
+ * SPEAr300 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* padmux devices to enable */
+static struct pmx_dev *pmx_devs[] = {
+ /* spear3xx specific devices */
+ &pmx_i2c,
+ &pmx_ssp_cs,
+ &pmx_ssp,
+ &pmx_mii,
+ &pmx_uart0,
+
+ /* spear300 specific devices */
+ &pmx_fsmc_2_chips,
+ &pmx_clcd,
+ &pmx_telecom_sdio_4bit,
+ &pmx_gpio1,
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ /* spear3xx specific devices */
+ &gpio_device,
+ &uart_device,
+
+ /* spear300 specific devices */
+ &gpio1_device,
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+ /* spear3xx specific devices */
+
+ /* spear300 specific devices */
+};
+
+static void __init spear300_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear300 machine init function */
+ spear300_init();
+
+ /* padmux initialization */
+ pmx_driver.mode = &photo_frame_mode;
+ pmx_driver.devs = pmx_devs;
+ pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
+ spear300_pmx_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR300, "ST-SPEAR300-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear300_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
new file mode 100644
index 000000000000..96a1ab824bac
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -0,0 +1,302 @@
+/*
+ * arch/arm/mach-spear3xx/spear310.c
+ *
+ * SPEAr310 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+#include <plat/shirq.h>
+
+/* pad multiplexing support */
+/* muxing registers */
+#define PAD_MUX_CONFIG_REG 0x08
+
+/* devices */
+struct pmx_dev_mode pmx_emi_cs_0_1_4_5_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_emi_cs_0_1_4_5 = {
+ .name = "emi_cs_0_1_4_5",
+ .modes = pmx_emi_cs_0_1_4_5_modes,
+ .mode_count = ARRAY_SIZE(pmx_emi_cs_0_1_4_5_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_emi_cs_2_3_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_emi_cs_2_3 = {
+ .name = "emi_cs_2_3",
+ .modes = pmx_emi_cs_2_3_modes,
+ .mode_count = ARRAY_SIZE(pmx_emi_cs_2_3_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart1_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart1 = {
+ .name = "uart1",
+ .modes = pmx_uart1_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart2_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart2 = {
+ .name = "uart2",
+ .modes = pmx_uart2_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart3_4_5_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart3_4_5 = {
+ .name = "uart3_4_5",
+ .modes = pmx_uart3_4_5_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart3_4_5_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_fsmc_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_fsmc = {
+ .name = "fsmc",
+ .modes = pmx_fsmc_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_rs485_0_1_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_rs485_0_1 = {
+ .name = "rs485_0_1",
+ .modes = pmx_rs485_0_1_modes,
+ .mode_count = ARRAY_SIZE(pmx_rs485_0_1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_tdm0_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_tdm0 = {
+ .name = "tdm0",
+ .modes = pmx_tdm0_modes,
+ .mode_count = ARRAY_SIZE(pmx_tdm0_modes),
+ .enb_on_reset = 1,
+};
+
+/* pmx driver structure */
+struct pmx_driver pmx_driver = {
+ .mux_reg = {.offset = PAD_MUX_CONFIG_REG, .mask = 0x00007fff},
+};
+
+/* Add spear310 specific devices here */
+
+/* spear3xx shared irq */
+struct shirq_dev_config shirq_ras1_config[] = {
+ {
+ .virq = VIRQ_SMII0,
+ .status_mask = SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII1,
+ .status_mask = SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII2,
+ .status_mask = SMII2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII3,
+ .status_mask = SMII3_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII0,
+ .status_mask = WAKEUP_SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII1,
+ .status_mask = WAKEUP_SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII2,
+ .status_mask = WAKEUP_SMII2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII3,
+ .status_mask = WAKEUP_SMII3_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras1 = {
+ .irq = IRQ_GEN_RAS_1,
+ .dev_config = shirq_ras1_config,
+ .dev_count = ARRAY_SIZE(shirq_ras1_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS1_MASK,
+ .clear_reg = -1,
+ },
+};
+
+struct shirq_dev_config shirq_ras2_config[] = {
+ {
+ .virq = VIRQ_UART1,
+ .status_mask = UART1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART2,
+ .status_mask = UART2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART3,
+ .status_mask = UART3_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART4,
+ .status_mask = UART4_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART5,
+ .status_mask = UART5_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras2 = {
+ .irq = IRQ_GEN_RAS_2,
+ .dev_config = shirq_ras2_config,
+ .dev_count = ARRAY_SIZE(shirq_ras2_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS2_MASK,
+ .clear_reg = -1,
+ },
+};
+
+struct shirq_dev_config shirq_ras3_config[] = {
+ {
+ .virq = VIRQ_EMI,
+ .status_mask = EMI_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras3 = {
+ .irq = IRQ_GEN_RAS_3,
+ .dev_config = shirq_ras3_config,
+ .dev_count = ARRAY_SIZE(shirq_ras3_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS3_MASK,
+ .clear_reg = -1,
+ },
+};
+
+struct shirq_dev_config shirq_intrcomm_ras_config[] = {
+ {
+ .virq = VIRQ_TDM_HDLC,
+ .status_mask = TDM_HDLC_IRQ_MASK,
+ }, {
+ .virq = VIRQ_RS485_0,
+ .status_mask = RS485_0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_RS485_1,
+ .status_mask = RS485_1_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_intrcomm_ras = {
+ .irq = IRQ_INTRCOMM_RAS_ARM,
+ .dev_config = shirq_intrcomm_ras_config,
+ .dev_count = ARRAY_SIZE(shirq_intrcomm_ras_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_INTRCOMM_RAS_MASK,
+ .clear_reg = -1,
+ },
+};
+
+/* spear310 routines */
+void __init spear310_init(void)
+{
+ void __iomem *base;
+ int ret = 0;
+
+ /* call spear3xx family common init function */
+ spear3xx_init();
+
+ /* shared irq registeration */
+ base = ioremap(SPEAR310_SOC_CONFIG_BASE, SPEAR310_SOC_CONFIG_SIZE);
+ if (base) {
+ /* shirq 1 */
+ shirq_ras1.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras1);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 1\n");
+
+ /* shirq 2 */
+ shirq_ras2.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras2);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 2\n");
+
+ /* shirq 3 */
+ shirq_ras3.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras3);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 3\n");
+
+ /* shirq 4 */
+ shirq_intrcomm_ras.regs.base = base;
+ ret = spear_shirq_register(&shirq_intrcomm_ras);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 4\n");
+ }
+}
+
+void spear310_pmx_init(void)
+{
+ spear_pmx_init(&pmx_driver, SPEAR310_SOC_CONFIG_BASE,
+ SPEAR310_SOC_CONFIG_SIZE);
+}
diff --git a/arch/arm/mach-spear3xx/spear310_evb.c b/arch/arm/mach-spear3xx/spear310_evb.c
new file mode 100644
index 000000000000..7facf6643199
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear310_evb.c
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/mach-spear3xx/spear310_evb.c
+ *
+ * SPEAr310 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* padmux devices to enable */
+static struct pmx_dev *pmx_devs[] = {
+ /* spear3xx specific devices */
+ &pmx_i2c,
+ &pmx_ssp,
+ &pmx_gpio_pin0,
+ &pmx_gpio_pin1,
+ &pmx_gpio_pin2,
+ &pmx_gpio_pin3,
+ &pmx_gpio_pin4,
+ &pmx_gpio_pin5,
+ &pmx_uart0,
+
+ /* spear310 specific devices */
+ &pmx_emi_cs_0_1_4_5,
+ &pmx_emi_cs_2_3,
+ &pmx_uart1,
+ &pmx_uart2,
+ &pmx_uart3_4_5,
+ &pmx_fsmc,
+ &pmx_rs485_0_1,
+ &pmx_tdm0,
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ /* spear3xx specific devices */
+ &gpio_device,
+ &uart_device,
+
+ /* spear310 specific devices */
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+ /* spear3xx specific devices */
+
+ /* spear310 specific devices */
+};
+
+static void __init spear310_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear310 machine init function */
+ spear310_init();
+
+ /* padmux initialization */
+ pmx_driver.mode = NULL;
+ pmx_driver.devs = pmx_devs;
+ pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
+ spear310_pmx_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR310, "ST-SPEAR310-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear310_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
new file mode 100644
index 000000000000..6a1219549369
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -0,0 +1,549 @@
+/*
+ * arch/arm/mach-spear3xx/spear320.c
+ *
+ * SPEAr320 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+#include <plat/shirq.h>
+
+/* pad multiplexing support */
+/* muxing registers */
+#define PAD_MUX_CONFIG_REG 0x0C
+#define MODE_CONFIG_REG 0x10
+
+/* modes */
+#define AUTO_NET_SMII_MODE (1 << 0)
+#define AUTO_NET_MII_MODE (1 << 1)
+#define AUTO_EXP_MODE (1 << 2)
+#define SMALL_PRINTERS_MODE (1 << 3)
+#define ALL_MODES 0xF
+
+struct pmx_mode auto_net_smii_mode = {
+ .id = AUTO_NET_SMII_MODE,
+ .name = "Automation Networking SMII Mode",
+ .mask = 0x00,
+};
+
+struct pmx_mode auto_net_mii_mode = {
+ .id = AUTO_NET_MII_MODE,
+ .name = "Automation Networking MII Mode",
+ .mask = 0x01,
+};
+
+struct pmx_mode auto_exp_mode = {
+ .id = AUTO_EXP_MODE,
+ .name = "Automation Expanded Mode",
+ .mask = 0x02,
+};
+
+struct pmx_mode small_printers_mode = {
+ .id = SMALL_PRINTERS_MODE,
+ .name = "Small Printers Mode",
+ .mask = 0x03,
+};
+
+/* devices */
+struct pmx_dev_mode pmx_clcd_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_clcd = {
+ .name = "clcd",
+ .modes = pmx_clcd_modes,
+ .mode_count = ARRAY_SIZE(pmx_clcd_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_emi_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_emi = {
+ .name = "emi",
+ .modes = pmx_emi_modes,
+ .mode_count = ARRAY_SIZE(pmx_emi_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_fsmc_modes[] = {
+ {
+ .ids = ALL_MODES,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_fsmc = {
+ .name = "fsmc",
+ .modes = pmx_fsmc_modes,
+ .mode_count = ARRAY_SIZE(pmx_fsmc_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_spp_modes[] = {
+ {
+ .ids = SMALL_PRINTERS_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_spp = {
+ .name = "spp",
+ .modes = pmx_spp_modes,
+ .mode_count = ARRAY_SIZE(pmx_spp_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_sdio_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE |
+ SMALL_PRINTERS_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_sdio = {
+ .name = "sdio",
+ .modes = pmx_sdio_modes,
+ .mode_count = ARRAY_SIZE(pmx_sdio_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_i2s_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_i2s = {
+ .name = "i2s",
+ .modes = pmx_i2s_modes,
+ .mode_count = ARRAY_SIZE(pmx_i2s_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart1_modes[] = {
+ {
+ .ids = ALL_MODES,
+ .mask = PMX_GPIO_PIN0_MASK | PMX_GPIO_PIN1_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart1 = {
+ .name = "uart1",
+ .modes = pmx_uart1_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart1_modem_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE,
+ .mask = PMX_TIMER_1_2_MASK | PMX_TIMER_3_4_MASK |
+ PMX_SSP_CS_MASK,
+ }, {
+ .ids = SMALL_PRINTERS_MODE,
+ .mask = PMX_GPIO_PIN3_MASK | PMX_GPIO_PIN4_MASK |
+ PMX_GPIO_PIN5_MASK | PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart1_modem = {
+ .name = "uart1_modem",
+ .modes = pmx_uart1_modem_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart1_modem_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_uart2_modes[] = {
+ {
+ .ids = ALL_MODES,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart2 = {
+ .name = "uart2",
+ .modes = pmx_uart2_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_touchscreen_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_touchscreen = {
+ .name = "touchscreen",
+ .modes = pmx_touchscreen_modes,
+ .mode_count = ARRAY_SIZE(pmx_touchscreen_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_can_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE | AUTO_EXP_MODE,
+ .mask = PMX_GPIO_PIN2_MASK | PMX_GPIO_PIN3_MASK |
+ PMX_GPIO_PIN4_MASK | PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_can = {
+ .name = "can",
+ .modes = pmx_can_modes,
+ .mode_count = ARRAY_SIZE(pmx_can_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_sdio_led_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_sdio_led = {
+ .name = "sdio_led",
+ .modes = pmx_sdio_led_modes,
+ .mode_count = ARRAY_SIZE(pmx_sdio_led_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm0_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ }, {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm0 = {
+ .name = "pwm0",
+ .modes = pmx_pwm0_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm0_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm1_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_UART0_MODEM_MASK,
+ }, {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm1 = {
+ .name = "pwm1",
+ .modes = pmx_pwm1_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm2_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_NET_MII_MODE,
+ .mask = PMX_SSP_CS_MASK,
+ }, {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm2 = {
+ .name = "pwm2",
+ .modes = pmx_pwm2_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_pwm3_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE | SMALL_PRINTERS_MODE | AUTO_NET_SMII_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_pwm3 = {
+ .name = "pwm3",
+ .modes = pmx_pwm3_modes,
+ .mode_count = ARRAY_SIZE(pmx_pwm3_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_ssp1_modes[] = {
+ {
+ .ids = SMALL_PRINTERS_MODE | AUTO_NET_SMII_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp1 = {
+ .name = "ssp1",
+ .modes = pmx_ssp1_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_ssp2_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp2 = {
+ .name = "ssp2",
+ .modes = pmx_ssp2_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp2_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_mii1_modes[] = {
+ {
+ .ids = AUTO_NET_MII_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_mii1 = {
+ .name = "mii1",
+ .modes = pmx_mii1_modes,
+ .mode_count = ARRAY_SIZE(pmx_mii1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_smii0_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | AUTO_EXP_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_smii0 = {
+ .name = "smii0",
+ .modes = pmx_smii0_modes,
+ .mode_count = ARRAY_SIZE(pmx_smii0_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_smii1_modes[] = {
+ {
+ .ids = AUTO_NET_SMII_MODE | SMALL_PRINTERS_MODE,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_smii1 = {
+ .name = "smii1",
+ .modes = pmx_smii1_modes,
+ .mode_count = ARRAY_SIZE(pmx_smii1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_i2c1_modes[] = {
+ {
+ .ids = AUTO_EXP_MODE,
+ .mask = 0x0,
+ },
+};
+
+struct pmx_dev pmx_i2c1 = {
+ .name = "i2c1",
+ .modes = pmx_i2c1_modes,
+ .mode_count = ARRAY_SIZE(pmx_i2c1_modes),
+ .enb_on_reset = 1,
+};
+
+/* pmx driver structure */
+struct pmx_driver pmx_driver = {
+ .mode_reg = {.offset = MODE_CONFIG_REG, .mask = 0x00000007},
+ .mux_reg = {.offset = PAD_MUX_CONFIG_REG, .mask = 0x00007fff},
+};
+
+/* Add spear320 specific devices here */
+
+/* spear3xx shared irq */
+struct shirq_dev_config shirq_ras1_config[] = {
+ {
+ .virq = VIRQ_EMI,
+ .status_mask = EMI_IRQ_MASK,
+ .clear_mask = EMI_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CLCD,
+ .status_mask = CLCD_IRQ_MASK,
+ .clear_mask = CLCD_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SPP,
+ .status_mask = SPP_IRQ_MASK,
+ .clear_mask = SPP_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras1 = {
+ .irq = IRQ_GEN_RAS_1,
+ .dev_config = shirq_ras1_config,
+ .dev_count = ARRAY_SIZE(shirq_ras1_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS1_MASK,
+ .clear_reg = INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+struct shirq_dev_config shirq_ras3_config[] = {
+ {
+ .virq = VIRQ_PLGPIO,
+ .enb_mask = GPIO_IRQ_MASK,
+ .status_mask = GPIO_IRQ_MASK,
+ .clear_mask = GPIO_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2S_PLAY,
+ .enb_mask = I2S_PLAY_IRQ_MASK,
+ .status_mask = I2S_PLAY_IRQ_MASK,
+ .clear_mask = I2S_PLAY_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2S_REC,
+ .enb_mask = I2S_REC_IRQ_MASK,
+ .status_mask = I2S_REC_IRQ_MASK,
+ .clear_mask = I2S_REC_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_ras3 = {
+ .irq = IRQ_GEN_RAS_3,
+ .dev_config = shirq_ras3_config,
+ .dev_count = ARRAY_SIZE(shirq_ras3_config),
+ .regs = {
+ .enb_reg = INT_ENB_MASK_REG,
+ .reset_to_enb = 1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_RAS3_MASK,
+ .clear_reg = INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+struct shirq_dev_config shirq_intrcomm_ras_config[] = {
+ {
+ .virq = VIRQ_CANU,
+ .status_mask = CAN_U_IRQ_MASK,
+ .clear_mask = CAN_U_IRQ_MASK,
+ }, {
+ .virq = VIRQ_CANL,
+ .status_mask = CAN_L_IRQ_MASK,
+ .clear_mask = CAN_L_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART1,
+ .status_mask = UART1_IRQ_MASK,
+ .clear_mask = UART1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_UART2,
+ .status_mask = UART2_IRQ_MASK,
+ .clear_mask = UART2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SSP1,
+ .status_mask = SSP1_IRQ_MASK,
+ .clear_mask = SSP1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SSP2,
+ .status_mask = SSP2_IRQ_MASK,
+ .clear_mask = SSP2_IRQ_MASK,
+ }, {
+ .virq = VIRQ_SMII0,
+ .status_mask = SMII0_IRQ_MASK,
+ .clear_mask = SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_MII1_SMII1,
+ .status_mask = MII1_SMII1_IRQ_MASK,
+ .clear_mask = MII1_SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_SMII0,
+ .status_mask = WAKEUP_SMII0_IRQ_MASK,
+ .clear_mask = WAKEUP_SMII0_IRQ_MASK,
+ }, {
+ .virq = VIRQ_WAKEUP_MII1_SMII1,
+ .status_mask = WAKEUP_MII1_SMII1_IRQ_MASK,
+ .clear_mask = WAKEUP_MII1_SMII1_IRQ_MASK,
+ }, {
+ .virq = VIRQ_I2C,
+ .status_mask = I2C1_IRQ_MASK,
+ .clear_mask = I2C1_IRQ_MASK,
+ },
+};
+
+struct spear_shirq shirq_intrcomm_ras = {
+ .irq = IRQ_INTRCOMM_RAS_ARM,
+ .dev_config = shirq_intrcomm_ras_config,
+ .dev_count = ARRAY_SIZE(shirq_intrcomm_ras_config),
+ .regs = {
+ .enb_reg = -1,
+ .status_reg = INT_STS_MASK_REG,
+ .status_reg_mask = SHIRQ_INTRCOMM_RAS_MASK,
+ .clear_reg = INT_CLR_MASK_REG,
+ .reset_to_clear = 1,
+ },
+};
+
+/* spear320 routines */
+void __init spear320_init(void)
+{
+ void __iomem *base;
+ int ret = 0;
+
+ /* call spear3xx family common init function */
+ spear3xx_init();
+
+ /* shared irq registeration */
+ base = ioremap(SPEAR320_SOC_CONFIG_BASE, SPEAR320_SOC_CONFIG_SIZE);
+ if (base) {
+ /* shirq 1 */
+ shirq_ras1.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras1);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 1\n");
+
+ /* shirq 3 */
+ shirq_ras3.regs.base = base;
+ ret = spear_shirq_register(&shirq_ras3);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 3\n");
+
+ /* shirq 4 */
+ shirq_intrcomm_ras.regs.base = base;
+ ret = spear_shirq_register(&shirq_intrcomm_ras);
+ if (ret)
+ printk(KERN_ERR "Error registering Shared IRQ 4\n");
+ }
+}
+
+void spear320_pmx_init(void)
+{
+ spear_pmx_init(&pmx_driver, SPEAR320_SOC_CONFIG_BASE,
+ SPEAR320_SOC_CONFIG_SIZE);
+}
diff --git a/arch/arm/mach-spear3xx/spear320_evb.c b/arch/arm/mach-spear3xx/spear320_evb.c
new file mode 100644
index 000000000000..62ac685a4135
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear320_evb.c
@@ -0,0 +1,81 @@
+/*
+ * arch/arm/mach-spear3xx/spear320_evb.c
+ *
+ * SPEAr320 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* padmux devices to enable */
+static struct pmx_dev *pmx_devs[] = {
+ /* spear3xx specific devices */
+ &pmx_i2c,
+ &pmx_ssp,
+ &pmx_mii,
+ &pmx_uart0,
+
+ /* spear320 specific devices */
+ &pmx_fsmc,
+ &pmx_sdio,
+ &pmx_i2s,
+ &pmx_uart1,
+ &pmx_uart2,
+ &pmx_can,
+ &pmx_pwm0,
+ &pmx_pwm1,
+ &pmx_pwm2,
+ &pmx_mii1,
+};
+
+static struct amba_device *amba_devs[] __initdata = {
+ /* spear3xx specific devices */
+ &gpio_device,
+ &uart_device,
+
+ /* spear320 specific devices */
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+ /* spear3xx specific devices */
+
+ /* spear320 specific devices */
+};
+
+static void __init spear320_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear320 machine init function */
+ spear320_init();
+
+ /* padmux initialization */
+ pmx_driver.mode = &auto_net_mii_mode;
+ pmx_driver.devs = pmx_devs;
+ pmx_driver.devs_count = ARRAY_SIZE(pmx_devs);
+ spear320_pmx_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR320, "ST-SPEAR320-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear3xx_map_io,
+ .init_irq = spear3xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear320_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
new file mode 100644
index 000000000000..e87313aeae20
--- /dev/null
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -0,0 +1,548 @@
+/*
+ * arch/arm/mach-spear3xx/spear3xx.c
+ *
+ * SPEAr3XX machines common source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+#include <linux/amba/pl061.h>
+#include <linux/ptrace.h>
+#include <linux/io.h>
+#include <asm/hardware/vic.h>
+#include <asm/irq.h>
+#include <asm/mach/arch.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Add spear3xx machines common devices here */
+/* gpio device registeration */
+static struct pl061_platform_data gpio_plat_data = {
+ .gpio_base = 0,
+ .irq_base = SPEAR_GPIO_INT_BASE,
+};
+
+struct amba_device gpio_device = {
+ .dev = {
+ .init_name = "gpio",
+ .platform_data = &gpio_plat_data,
+ },
+ .res = {
+ .start = SPEAR3XX_ICM3_GPIO_BASE,
+ .end = SPEAR3XX_ICM3_GPIO_BASE + SPEAR3XX_ICM3_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_BASIC_GPIO, NO_IRQ},
+};
+
+/* uart device registeration */
+struct amba_device uart_device = {
+ .dev = {
+ .init_name = "uart",
+ },
+ .res = {
+ .start = SPEAR3XX_ICM1_UART_BASE,
+ .end = SPEAR3XX_ICM1_UART_BASE + SPEAR3XX_ICM1_UART_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_UART, NO_IRQ},
+};
+
+/* Do spear3xx familiy common initialization part here */
+void __init spear3xx_init(void)
+{
+ /* nothing to do for now */
+}
+
+/* This will initialize vic */
+void __init spear3xx_init_irq(void)
+{
+ vic_init((void __iomem *)VA_SPEAR3XX_ML1_VIC_BASE, 0, ~0, 0);
+}
+
+/* Following will create static virtual/physical mappings */
+struct map_desc spear3xx_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR3XX_ICM1_UART_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ICM1_UART_BASE),
+ .length = SPEAR3XX_ICM1_UART_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR3XX_ML1_VIC_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ML1_VIC_BASE),
+ .length = SPEAR3XX_ML1_VIC_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR3XX_ICM3_SYS_CTRL_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ICM3_SYS_CTRL_BASE),
+ .length = SPEAR3XX_ICM3_SYS_CTRL_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR3XX_ICM3_MISC_REG_BASE,
+ .pfn = __phys_to_pfn(SPEAR3XX_ICM3_MISC_REG_BASE),
+ .length = SPEAR3XX_ICM3_MISC_REG_SIZE,
+ .type = MT_DEVICE
+ },
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear3xx_map_io(void)
+{
+ iotable_init(spear3xx_io_desc, ARRAY_SIZE(spear3xx_io_desc));
+
+ /* This will initialize clock framework */
+ clk_init();
+}
+
+/* pad multiplexing support */
+/* devices */
+struct pmx_dev_mode pmx_firda_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_firda = {
+ .name = "firda",
+ .modes = pmx_firda_modes,
+ .mode_count = ARRAY_SIZE(pmx_firda_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_i2c_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_I2C_MASK,
+ },
+};
+
+struct pmx_dev pmx_i2c = {
+ .name = "i2c",
+ .modes = pmx_i2c_modes,
+ .mode_count = ARRAY_SIZE(pmx_i2c_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_ssp_cs_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp_cs = {
+ .name = "ssp_chip_selects",
+ .modes = pmx_ssp_cs_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp_cs_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_ssp_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_SSP_MASK,
+ },
+};
+
+struct pmx_dev pmx_ssp = {
+ .name = "ssp",
+ .modes = pmx_ssp_modes,
+ .mode_count = ARRAY_SIZE(pmx_ssp_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_mii_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_mii = {
+ .name = "mii",
+ .modes = pmx_mii_modes,
+ .mode_count = ARRAY_SIZE(pmx_mii_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin0_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN0_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin0 = {
+ .name = "gpio_pin0",
+ .modes = pmx_gpio_pin0_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin0_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin1_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN1_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin1 = {
+ .name = "gpio_pin1",
+ .modes = pmx_gpio_pin1_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin1_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin2_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN2_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin2 = {
+ .name = "gpio_pin2",
+ .modes = pmx_gpio_pin2_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin2_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin3_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN3_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin3 = {
+ .name = "gpio_pin3",
+ .modes = pmx_gpio_pin3_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin3_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin4_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN4_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin4 = {
+ .name = "gpio_pin4",
+ .modes = pmx_gpio_pin4_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin4_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_gpio_pin5_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_gpio_pin5 = {
+ .name = "gpio_pin5",
+ .modes = pmx_gpio_pin5_modes,
+ .mode_count = ARRAY_SIZE(pmx_gpio_pin5_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_uart0_modem_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart0_modem = {
+ .name = "uart0_modem",
+ .modes = pmx_uart0_modem_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart0_modem_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_uart0_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_UART0_MASK,
+ },
+};
+
+struct pmx_dev pmx_uart0 = {
+ .name = "uart0",
+ .modes = pmx_uart0_modes,
+ .mode_count = ARRAY_SIZE(pmx_uart0_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_timer_3_4_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_timer_3_4 = {
+ .name = "timer_3_4",
+ .modes = pmx_timer_3_4_modes,
+ .mode_count = ARRAY_SIZE(pmx_timer_3_4_modes),
+ .enb_on_reset = 0,
+};
+
+struct pmx_dev_mode pmx_timer_1_2_modes[] = {
+ {
+ .ids = 0xffffffff,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_timer_1_2 = {
+ .name = "timer_1_2",
+ .modes = pmx_timer_1_2_modes,
+ .mode_count = ARRAY_SIZE(pmx_timer_1_2_modes),
+ .enb_on_reset = 0,
+};
+
+#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
+/* plgpios devices */
+struct pmx_dev_mode pmx_plgpio_0_1_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_FIRDA_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_0_1 = {
+ .name = "plgpio 0 and 1",
+ .modes = pmx_plgpio_0_1_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_0_1_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_2_3_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_UART0_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_2_3 = {
+ .name = "plgpio 2 and 3",
+ .modes = pmx_plgpio_2_3_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_2_3_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_4_5_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_I2C_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_4_5 = {
+ .name = "plgpio 4 and 5",
+ .modes = pmx_plgpio_4_5_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_4_5_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_6_9_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_SSP_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_6_9 = {
+ .name = "plgpio 6 to 9",
+ .modes = pmx_plgpio_6_9_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_6_9_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_10_27_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_MII_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_10_27 = {
+ .name = "plgpio 10 to 27",
+ .modes = pmx_plgpio_10_27_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_10_27_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_28_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN0_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_28 = {
+ .name = "plgpio 28",
+ .modes = pmx_plgpio_28_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_28_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_29_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN1_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_29 = {
+ .name = "plgpio 29",
+ .modes = pmx_plgpio_29_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_29_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_30_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN2_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_30 = {
+ .name = "plgpio 30",
+ .modes = pmx_plgpio_30_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_30_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_31_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN3_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_31 = {
+ .name = "plgpio 31",
+ .modes = pmx_plgpio_31_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_31_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_32_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN4_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_32 = {
+ .name = "plgpio 32",
+ .modes = pmx_plgpio_32_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_32_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_33_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_GPIO_PIN5_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_33 = {
+ .name = "plgpio 33",
+ .modes = pmx_plgpio_33_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_33_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_34_36_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_SSP_CS_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_34_36 = {
+ .name = "plgpio 34 to 36",
+ .modes = pmx_plgpio_34_36_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_34_36_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_37_42_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_UART0_MODEM_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_37_42 = {
+ .name = "plgpio 37 to 42",
+ .modes = pmx_plgpio_37_42_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_37_42_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_43_44_47_48_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_1_2_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_43_44_47_48 = {
+ .name = "plgpio 43, 44, 47 and 48",
+ .modes = pmx_plgpio_43_44_47_48_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_43_44_47_48_modes),
+ .enb_on_reset = 1,
+};
+
+struct pmx_dev_mode pmx_plgpio_45_46_49_50_modes[] = {
+ {
+ .ids = 0x00,
+ .mask = PMX_TIMER_3_4_MASK,
+ },
+};
+
+struct pmx_dev pmx_plgpio_45_46_49_50 = {
+ .name = "plgpio 45, 46, 49 and 50",
+ .modes = pmx_plgpio_45_46_49_50_modes,
+ .mode_count = ARRAY_SIZE(pmx_plgpio_45_46_49_50_modes),
+ .enb_on_reset = 1,
+};
+
+#endif
+
+/* spear padmux initialization function */
+void spear_pmx_init(struct pmx_driver *pmx_driver, uint base, uint size)
+{
+ int ret = 0;
+
+ /* pad mux initialization */
+ pmx_driver->base = ioremap(base, size);
+ if (!pmx_driver->base) {
+ ret = -ENOMEM;
+ goto pmx_fail;
+ }
+
+ ret = pmx_register(pmx_driver);
+ iounmap(pmx_driver->base);
+
+pmx_fail:
+ if (ret)
+ printk(KERN_ERR "padmux: registeration failed. err no: %d\n",
+ ret);
+}
diff --git a/arch/arm/mach-spear6xx/Kconfig b/arch/arm/mach-spear6xx/Kconfig
new file mode 100644
index 000000000000..bddba034f862
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Kconfig
@@ -0,0 +1,20 @@
+#
+# SPEAr6XX Machine configuration file
+#
+
+if ARCH_SPEAR6XX
+
+choice
+ prompt "SPEAr6XX Family"
+ default MACH_SPEAR600
+
+config MACH_SPEAR600
+ bool "SPEAr600"
+ help
+ Supports ST SPEAr600 Machine
+endchoice
+
+# Adding SPEAr6XX machine specific configuration files
+source "arch/arm/mach-spear6xx/Kconfig600"
+
+endif #ARCH_SPEAR6XX
diff --git a/arch/arm/mach-spear6xx/Kconfig600 b/arch/arm/mach-spear6xx/Kconfig600
new file mode 100644
index 000000000000..9e19f65eb78e
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Kconfig600
@@ -0,0 +1,17 @@
+#
+# SPEAr600 machine configuration file
+#
+
+if MACH_SPEAR600
+
+choice
+ prompt "SPEAr600 Boards"
+ default BOARD_SPEAR600_EVB
+
+config BOARD_SPEAR600_EVB
+ bool "SPEAr600 Evaluation Board"
+ help
+ Supports ST SPEAr600 Evaluation Board
+endchoice
+
+endif #MACH_SPEAR600
diff --git a/arch/arm/mach-spear6xx/Makefile b/arch/arm/mach-spear6xx/Makefile
new file mode 100644
index 000000000000..cc1a4d82d459
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for SPEAr6XX machine series
+#
+
+# common files
+obj-y += clock.o spear6xx.o
+
+# spear600 specific files
+obj-$(CONFIG_MACH_SPEAR600) += spear600.o
+
+# spear600 boards files
+obj-$(CONFIG_BOARD_SPEAR600_EVB) += spear600_evb.o
diff --git a/arch/arm/mach-spear6xx/Makefile.boot b/arch/arm/mach-spear6xx/Makefile.boot
new file mode 100644
index 000000000000..7a1f3c0eadb8
--- /dev/null
+++ b/arch/arm/mach-spear6xx/Makefile.boot
@@ -0,0 +1,3 @@
+zreladdr-y := 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
new file mode 100644
index 000000000000..13e27c769685
--- /dev/null
+++ b/arch/arm/mach-spear6xx/clock.c
@@ -0,0 +1,483 @@
+/*
+ * arch/arm/mach-spear6xx/clock.c
+ *
+ * SPEAr6xx machines clock framework source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <mach/misc_regs.h>
+#include <plat/clock.h>
+
+/* root clks */
+/* 32 KHz oscillator clock */
+static struct clk osc_32k_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 32000,
+};
+
+/* 30 MHz oscillator clock */
+static struct clk osc_30m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .rate = 30000000,
+};
+
+/* clock derived from 32 KHz osc clk */
+/* rtc clock */
+static struct clk rtc_clk = {
+ .pclk = &osc_32k_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = RTC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from 30 MHz osc clk */
+/* pll1 configuration structure */
+static struct pll_clk_config pll1_config = {
+ .mode_reg = PLL1_CTR,
+ .cfg_reg = PLL1_FRQ,
+};
+
+/* PLL1 clock */
+static struct clk pll1_clk = {
+ .pclk = &osc_30m_clk,
+ .en_reg = PLL1_CTR,
+ .en_reg_bit = PLL_ENABLE,
+ .recalc = &pll1_clk_recalc,
+ .private_data = &pll1_config,
+};
+
+/* PLL3 48 MHz clock */
+static struct clk pll3_48m_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_30m_clk,
+ .rate = 48000000,
+};
+
+/* watch dog timer clock */
+static struct clk wdt_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &osc_30m_clk,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from pll1 clk */
+/* cpu clock */
+static struct clk cpu_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &follow_parent,
+};
+
+/* ahb configuration structure */
+static struct bus_clk_config ahb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = PLL_HCLK_RATIO_MASK,
+ .shift = PLL_HCLK_RATIO_SHIFT,
+};
+
+/* ahb clock */
+static struct clk ahb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &pll1_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &ahb_config,
+};
+
+/* uart parents */
+static struct pclk_info uart_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* uart parent select structure */
+static struct pclk_sel uart_pclk_sel = {
+ .pclk_info = uart_pclk_info,
+ .pclk_count = ARRAY_SIZE(uart_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = UART_CLK_MASK,
+};
+
+/* uart configurations */
+static struct aux_clk_config uart_config = {
+ .synth_reg = UART_CLK_SYNT,
+};
+
+/* uart0 clock */
+static struct clk uart0_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = UART0_CLK_ENB,
+ .pclk_sel = &uart_pclk_sel,
+ .pclk_sel_shift = UART_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &uart_config,
+};
+
+/* uart1 clock */
+static struct clk uart1_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = UART1_CLK_ENB,
+ .pclk_sel = &uart_pclk_sel,
+ .pclk_sel_shift = UART_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &uart_config,
+};
+
+/* firda configurations */
+static struct aux_clk_config firda_config = {
+ .synth_reg = FIRDA_CLK_SYNT,
+};
+
+/* firda parents */
+static struct pclk_info firda_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* firda parent select structure */
+static struct pclk_sel firda_pclk_sel = {
+ .pclk_info = firda_pclk_info,
+ .pclk_count = ARRAY_SIZE(firda_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = FIRDA_CLK_MASK,
+};
+
+/* firda clock */
+static struct clk firda_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = FIRDA_CLK_ENB,
+ .pclk_sel = &firda_pclk_sel,
+ .pclk_sel_shift = FIRDA_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &firda_config,
+};
+
+/* clcd configurations */
+static struct aux_clk_config clcd_config = {
+ .synth_reg = CLCD_CLK_SYNT,
+};
+
+/* clcd parents */
+static struct pclk_info clcd_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* clcd parent select structure */
+static struct pclk_sel clcd_pclk_sel = {
+ .pclk_info = clcd_pclk_info,
+ .pclk_count = ARRAY_SIZE(clcd_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = CLCD_CLK_MASK,
+};
+
+/* clcd clock */
+static struct clk clcd_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = CLCD_CLK_ENB,
+ .pclk_sel = &clcd_pclk_sel,
+ .pclk_sel_shift = CLCD_CLK_SHIFT,
+ .recalc = &aux_clk_recalc,
+ .private_data = &clcd_config,
+};
+
+/* gpt parents */
+static struct pclk_info gpt_pclk_info[] = {
+ {
+ .pclk = &pll1_clk,
+ .pclk_mask = AUX_CLK_PLL1_MASK,
+ .scalable = 1,
+ }, {
+ .pclk = &pll3_48m_clk,
+ .pclk_mask = AUX_CLK_PLL3_MASK,
+ .scalable = 0,
+ },
+};
+
+/* gpt parent select structure */
+static struct pclk_sel gpt_pclk_sel = {
+ .pclk_info = gpt_pclk_info,
+ .pclk_count = ARRAY_SIZE(gpt_pclk_info),
+ .pclk_sel_reg = PERIP_CLK_CFG,
+ .pclk_sel_mask = GPT_CLK_MASK,
+};
+
+/* gpt0_1 configurations */
+static struct aux_clk_config gpt0_1_config = {
+ .synth_reg = PRSC1_CLK_CFG,
+};
+
+/* gpt0 ARM1 subsystem timer clock */
+static struct clk gpt0_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT0_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt0_1_config,
+};
+
+/* gpt1 timer clock */
+static struct clk gpt1_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT1_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt0_1_config,
+};
+
+/* gpt2 configurations */
+static struct aux_clk_config gpt2_config = {
+ .synth_reg = PRSC2_CLK_CFG,
+};
+
+/* gpt2 timer clock */
+static struct clk gpt2_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT2_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT2_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt2_config,
+};
+
+/* gpt3 configurations */
+static struct aux_clk_config gpt3_config = {
+ .synth_reg = PRSC3_CLK_CFG,
+};
+
+/* gpt3 timer clock */
+static struct clk gpt3_clk = {
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPT3_CLK_ENB,
+ .pclk_sel = &gpt_pclk_sel,
+ .pclk_sel_shift = GPT3_CLK_SHIFT,
+ .recalc = &gpt_clk_recalc,
+ .private_data = &gpt3_config,
+};
+
+/* clock derived from pll3 clk */
+/* usbh0 clock */
+static struct clk usbh0_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBH0_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* usbh1 clock */
+static struct clk usbh1_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBH1_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* usbd clock */
+static struct clk usbd_clk = {
+ .pclk = &pll3_48m_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = USBD_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from ahb clk */
+/* apb configuration structure */
+static struct bus_clk_config apb_config = {
+ .reg = CORE_CLK_CFG,
+ .mask = HCLK_PCLK_RATIO_MASK,
+ .shift = HCLK_PCLK_RATIO_SHIFT,
+};
+
+/* apb clock */
+static struct clk apb_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &ahb_clk,
+ .recalc = &bus_clk_recalc,
+ .private_data = &apb_config,
+};
+
+/* i2c clock */
+static struct clk i2c_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = I2C_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* dma clock */
+static struct clk dma_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = DMA_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* jpeg clock */
+static struct clk jpeg_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = JPEG_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gmac clock */
+static struct clk gmac_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GMAC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* smi clock */
+static struct clk smi_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SMI_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* fsmc clock */
+static struct clk fsmc_clk = {
+ .pclk = &ahb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = FSMC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* clock derived from apb clk */
+/* adc clock */
+static struct clk adc_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = ADC_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp0 clock */
+static struct clk ssp0_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP0_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp1 clock */
+static struct clk ssp1_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP1_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* ssp2 clock */
+static struct clk ssp2_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = SSP2_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gpio0 ARM subsystem clock */
+static struct clk gpio0_clk = {
+ .flags = ALWAYS_ENABLED,
+ .pclk = &apb_clk,
+ .recalc = &follow_parent,
+};
+
+/* gpio1 clock */
+static struct clk gpio1_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPIO1_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* gpio2 clock */
+static struct clk gpio2_clk = {
+ .pclk = &apb_clk,
+ .en_reg = PERIP1_CLK_ENB,
+ .en_reg_bit = GPIO2_CLK_ENB,
+ .recalc = &follow_parent,
+};
+
+/* array of all spear 6xx clock lookups */
+static struct clk_lookup spear_clk_lookups[] = {
+ /* root clks */
+ { .con_id = "osc_32k_clk", .clk = &osc_32k_clk},
+ { .con_id = "osc_30m_clk", .clk = &osc_30m_clk},
+ /* clock derived from 32 KHz os clk */
+ { .dev_id = "rtc", .clk = &rtc_clk},
+ /* clock derived from 30 MHz os clk */
+ { .con_id = "pll1_clk", .clk = &pll1_clk},
+ { .con_id = "pll3_48m_clk", .clk = &pll3_48m_clk},
+ { .dev_id = "wdt", .clk = &wdt_clk},
+ /* clock derived from pll1 clk */
+ { .con_id = "cpu_clk", .clk = &cpu_clk},
+ { .con_id = "ahb_clk", .clk = &ahb_clk},
+ { .dev_id = "uart0", .clk = &uart0_clk},
+ { .dev_id = "uart1", .clk = &uart1_clk},
+ { .dev_id = "firda", .clk = &firda_clk},
+ { .dev_id = "clcd", .clk = &clcd_clk},
+ { .dev_id = "gpt0", .clk = &gpt0_clk},
+ { .dev_id = "gpt1", .clk = &gpt1_clk},
+ { .dev_id = "gpt2", .clk = &gpt2_clk},
+ { .dev_id = "gpt3", .clk = &gpt3_clk},
+ /* clock derived from pll3 clk */
+ { .dev_id = "usbh0", .clk = &usbh0_clk},
+ { .dev_id = "usbh1", .clk = &usbh1_clk},
+ { .dev_id = "usbd", .clk = &usbd_clk},
+ /* clock derived from ahb clk */
+ { .con_id = "apb_clk", .clk = &apb_clk},
+ { .dev_id = "i2c", .clk = &i2c_clk},
+ { .dev_id = "dma", .clk = &dma_clk},
+ { .dev_id = "jpeg", .clk = &jpeg_clk},
+ { .dev_id = "gmac", .clk = &gmac_clk},
+ { .dev_id = "smi", .clk = &smi_clk},
+ { .dev_id = "fsmc", .clk = &fsmc_clk},
+ /* clock derived from apb clk */
+ { .dev_id = "adc", .clk = &adc_clk},
+ { .dev_id = "ssp0", .clk = &ssp0_clk},
+ { .dev_id = "ssp1", .clk = &ssp1_clk},
+ { .dev_id = "ssp2", .clk = &ssp2_clk},
+ { .dev_id = "gpio0", .clk = &gpio0_clk},
+ { .dev_id = "gpio1", .clk = &gpio1_clk},
+ { .dev_id = "gpio2", .clk = &gpio2_clk},
+};
+
+void __init clk_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
+ clk_register(&spear_clk_lookups[i]);
+
+ recalc_root_clocks();
+}
diff --git a/arch/arm/mach-spear6xx/include/mach/clkdev.h b/arch/arm/mach-spear6xx/include/mach/clkdev.h
new file mode 100644
index 000000000000..05676bf440d3
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/clkdev.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/clkdev.h
+ *
+ * Clock Dev framework definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_CLKDEV_H
+#define __MACH_CLKDEV_H
+
+#include <plat/clkdev.h>
+
+#endif /* __MACH_CLKDEV_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/debug-macro.S b/arch/arm/mach-spear6xx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..0f3ea39edd96
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear6xx/include/mach/entry-macro.S b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
new file mode 100644
index 000000000000..9eaecaeafcf0
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/entry-macro.S
@@ -0,0 +1,55 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/entry-macro.S
+ *
+ * Low-level IRQ helper macros for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <mach/hardware.h>
+#include <mach/spear.h>
+#include <asm/hardware/vic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \base, =VA_SPEAR6XX_CPU_VIC_PRI_BASE
+ ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+ mov \irqnr, #0
+ teq \irqstat, #0
+ bne 1001f
+ ldr \base, =VA_SPEAR6XX_CPU_VIC_SEC_BASE
+ ldr \irqstat, [\base, #VIC_IRQ_STATUS] @ get status
+ teq \irqstat, #0
+ beq 1002f @ this will set/reset
+ @ zero register
+ mov \irqnr, #32
+1001:
+ /*
+ * Following code will find bit position of least significang
+ * bit set in irqstat, using following equation
+ * least significant bit set in n = (n & ~(n-1))
+ */
+ sub \tmp, \irqstat, #1 @ tmp = irqstat - 1
+ mvn \tmp, \tmp @ tmp = ~tmp
+ and \irqstat, \irqstat, \tmp @ irqstat &= tmp
+ /* Now, irqstat is = bit no. of 1st bit set in vic irq status */
+ clz \tmp, \irqstat @ tmp = leading zeros
+
+ rsb \tmp, \tmp, #0x1F @ tmp = 32 - tmp - 1
+ add \irqnr, \irqnr, \tmp
+
+1002: /* EQ will be set if no irqs pending */
+ .endm
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
new file mode 100644
index 000000000000..16205a538756
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -0,0 +1,45 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/generic.h
+ *
+ * SPEAr6XX machine family specific generic header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <asm/mach/time.h>
+#include <asm/mach/map.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+
+/*
+ * Each GPT has 2 timer channels
+ * Following GPT channels will be used as clock source and clockevent
+ */
+#define SPEAR_GPT0_BASE SPEAR6XX_CPU_TMR_BASE
+#define SPEAR_GPT0_CHAN0_IRQ IRQ_CPU_GPT1_1
+#define SPEAR_GPT0_CHAN1_IRQ IRQ_CPU_GPT1_2
+
+/* Add spear6xx family device structure declarations here */
+extern struct amba_device gpio_device[];
+extern struct amba_device uart_device[];
+extern struct sys_timer spear_sys_timer;
+
+/* Add spear6xx family function declarations here */
+void __init spear6xx_map_io(void);
+void __init spear6xx_init_irq(void);
+void __init spear6xx_init(void);
+void __init spear600_init(void);
+void __init clk_init(void);
+
+/* Add spear600 machine device structure declarations here */
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/gpio.h b/arch/arm/mach-spear6xx/include/mach/gpio.h
new file mode 100644
index 000000000000..3a789dbb69f7
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/hardware.h b/arch/arm/mach-spear6xx/include/mach/hardware.h
new file mode 100644
index 000000000000..7545116deca9
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/hardware.h
@@ -0,0 +1,21 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/hardware.h
+ *
+ * Hardware definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_HARDWARE_H
+#define __MACH_HARDWARE_H
+
+/* Vitual to physical translation of statically mapped space */
+#define IO_ADDRESS(x) (x | 0xF0000000)
+
+#endif /* __MACH_HARDWARE_H */
+
diff --git a/arch/arm/mach-spear6xx/include/mach/io.h b/arch/arm/mach-spear6xx/include/mach/io.h
new file mode 100644
index 000000000000..fb7c106cea94
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/io.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/io.h
+ *
+ * IO definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IO_H
+#define __MACH_IO_H
+
+#include <plat/io.h>
+
+#endif /* __MACH_IO_H */
+
diff --git a/arch/arm/mach-spear6xx/include/mach/irqs.h b/arch/arm/mach-spear6xx/include/mach/irqs.h
new file mode 100644
index 000000000000..8f214b03d75d
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/irqs.h
@@ -0,0 +1,97 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+/* IRQ definitions */
+/* VIC 1 */
+#define IRQ_INTRCOMM_SW_IRQ 0
+#define IRQ_INTRCOMM_CPU_1 1
+#define IRQ_INTRCOMM_CPU_2 2
+#define IRQ_INTRCOMM_RAS2A11_1 3
+#define IRQ_INTRCOMM_RAS2A11_2 4
+#define IRQ_INTRCOMM_RAS2A12_1 5
+#define IRQ_INTRCOMM_RAS2A12_2 6
+#define IRQ_GEN_RAS_0 7
+#define IRQ_GEN_RAS_1 8
+#define IRQ_GEN_RAS_2 9
+#define IRQ_GEN_RAS_3 10
+#define IRQ_GEN_RAS_4 11
+#define IRQ_GEN_RAS_5 12
+#define IRQ_GEN_RAS_6 13
+#define IRQ_GEN_RAS_7 14
+#define IRQ_GEN_RAS_8 15
+#define IRQ_CPU_GPT1_1 16
+#define IRQ_CPU_GPT1_2 17
+#define IRQ_LOCAL_GPIO 18
+#define IRQ_PLL_UNLOCK 19
+#define IRQ_JPEG 20
+#define IRQ_FSMC 21
+#define IRQ_IRDA 22
+#define IRQ_RESERVED 23
+#define IRQ_UART_0 24
+#define IRQ_UART_1 25
+#define IRQ_SSP_1 26
+#define IRQ_SSP_2 27
+#define IRQ_I2C 28
+#define IRQ_GEN_RAS_9 29
+#define IRQ_GEN_RAS_10 30
+#define IRQ_GEN_RAS_11 31
+
+/* VIC 2 */
+#define IRQ_APPL_GPT1_1 32
+#define IRQ_APPL_GPT1_2 33
+#define IRQ_APPL_GPT2_1 34
+#define IRQ_APPL_GPT2_2 35
+#define IRQ_APPL_GPIO 36
+#define IRQ_APPL_SSP 37
+#define IRQ_APPL_ADC 38
+#define IRQ_APPL_RESERVED 39
+#define IRQ_AHB_EXP_MASTER 40
+#define IRQ_DDR_CONTROLLER 41
+#define IRQ_BASIC_DMA 42
+#define IRQ_BASIC_RESERVED1 43
+#define IRQ_BASIC_SMI 44
+#define IRQ_BASIC_CLCD 45
+#define IRQ_EXP_AHB_1 46
+#define IRQ_EXP_AHB_2 47
+#define IRQ_BASIC_GPT1_1 48
+#define IRQ_BASIC_GPT1_2 49
+#define IRQ_BASIC_RTC 50
+#define IRQ_BASIC_GPIO 51
+#define IRQ_BASIC_WDT 52
+#define IRQ_BASIC_RESERVED 53
+#define IRQ_AHB_EXP_SLAVE 54
+#define IRQ_GMAC_1 55
+#define IRQ_GMAC_2 56
+#define IRQ_USB_DEV 57
+#define IRQ_USB_H_OHCI_0 58
+#define IRQ_USB_H_EHCI_0 59
+#define IRQ_USB_H_OHCI_1 60
+#define IRQ_USB_H_EHCI_1 61
+#define IRQ_EXP_AHB_3 62
+#define IRQ_EXP_AHB_4 63
+
+#define IRQ_VIC_END 64
+
+/* GPIO pins virtual irqs */
+#define SPEAR_GPIO_INT_BASE IRQ_VIC_END
+#define SPEAR_GPIO0_INT_BASE SPEAR_GPIO_INT_BASE
+#define SPEAR_GPIO1_INT_BASE (SPEAR_GPIO0_INT_BASE + 8)
+#define SPEAR_GPIO2_INT_BASE (SPEAR_GPIO1_INT_BASE + 8)
+#define SPEAR_GPIO_INT_END (SPEAR_GPIO2_INT_BASE + 8)
+#define VIRTUAL_IRQS (SPEAR_GPIO_INT_END - IRQ_VIC_END)
+#define NR_IRQS (IRQ_VIC_END + VIRTUAL_IRQS)
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/memory.h b/arch/arm/mach-spear6xx/include/mach/memory.h
new file mode 100644
index 000000000000..781f088fc228
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/memory.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/memory.h
+ *
+ * Memory map for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MEMORY_H
+#define __MACH_MEMORY_H
+
+#include <plat/memory.h>
+
+#endif /* __MACH_MEMORY_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
new file mode 100644
index 000000000000..03908036b0d0
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -0,0 +1,173 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/misc_regs.h
+ *
+ * Miscellaneous registers definitions for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_MISC_REGS_H
+#define __MACH_MISC_REGS_H
+
+#include <mach/spear.h>
+
+#define MISC_BASE VA_SPEAR6XX_ICM3_MISC_REG_BASE
+
+#define SOC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x000))
+#define DIAG_CFG_CTR ((unsigned int *)(MISC_BASE + 0x004))
+#define PLL1_CTR ((unsigned int *)(MISC_BASE + 0x008))
+#define PLL1_FRQ ((unsigned int *)(MISC_BASE + 0x00C))
+#define PLL1_MOD ((unsigned int *)(MISC_BASE + 0x010))
+#define PLL2_CTR ((unsigned int *)(MISC_BASE + 0x014))
+/* PLL_CTR register masks */
+#define PLL_ENABLE 2
+#define PLL_MODE_SHIFT 4
+#define PLL_MODE_MASK 0x3
+#define PLL_MODE_NORMAL 0
+#define PLL_MODE_FRACTION 1
+#define PLL_MODE_DITH_DSB 2
+#define PLL_MODE_DITH_SSB 3
+
+#define PLL2_FRQ ((unsigned int *)(MISC_BASE + 0x018))
+/* PLL FRQ register masks */
+#define PLL_DIV_N_SHIFT 0
+#define PLL_DIV_N_MASK 0xFF
+#define PLL_DIV_P_SHIFT 8
+#define PLL_DIV_P_MASK 0x7
+#define PLL_NORM_FDBK_M_SHIFT 24
+#define PLL_NORM_FDBK_M_MASK 0xFF
+#define PLL_DITH_FDBK_M_SHIFT 16
+#define PLL_DITH_FDBK_M_MASK 0xFFFF
+
+#define PLL2_MOD ((unsigned int *)(MISC_BASE + 0x01C))
+#define PLL_CLK_CFG ((unsigned int *)(MISC_BASE + 0x020))
+#define CORE_CLK_CFG ((unsigned int *)(MISC_BASE + 0x024))
+/* CORE CLK CFG register masks */
+#define PLL_HCLK_RATIO_SHIFT 10
+#define PLL_HCLK_RATIO_MASK 0x3
+#define HCLK_PCLK_RATIO_SHIFT 8
+#define HCLK_PCLK_RATIO_MASK 0x3
+
+#define PERIP_CLK_CFG ((unsigned int *)(MISC_BASE + 0x028))
+/* PERIP_CLK_CFG register masks */
+#define CLCD_CLK_SHIFT 2
+#define CLCD_CLK_MASK 0x3
+#define UART_CLK_SHIFT 4
+#define UART_CLK_MASK 0x1
+#define FIRDA_CLK_SHIFT 5
+#define FIRDA_CLK_MASK 0x3
+#define GPT0_CLK_SHIFT 8
+#define GPT1_CLK_SHIFT 10
+#define GPT2_CLK_SHIFT 11
+#define GPT3_CLK_SHIFT 12
+#define GPT_CLK_MASK 0x1
+#define AUX_CLK_PLL3_MASK 0
+#define AUX_CLK_PLL1_MASK 1
+
+#define PERIP1_CLK_ENB ((unsigned int *)(MISC_BASE + 0x02C))
+/* PERIP1_CLK_ENB register masks */
+#define UART0_CLK_ENB 3
+#define UART1_CLK_ENB 4
+#define SSP0_CLK_ENB 5
+#define SSP1_CLK_ENB 6
+#define I2C_CLK_ENB 7
+#define JPEG_CLK_ENB 8
+#define FSMC_CLK_ENB 9
+#define FIRDA_CLK_ENB 10
+#define GPT2_CLK_ENB 11
+#define GPT3_CLK_ENB 12
+#define GPIO2_CLK_ENB 13
+#define SSP2_CLK_ENB 14
+#define ADC_CLK_ENB 15
+#define GPT1_CLK_ENB 11
+#define RTC_CLK_ENB 17
+#define GPIO1_CLK_ENB 18
+#define DMA_CLK_ENB 19
+#define SMI_CLK_ENB 21
+#define CLCD_CLK_ENB 22
+#define GMAC_CLK_ENB 23
+#define USBD_CLK_ENB 24
+#define USBH0_CLK_ENB 25
+#define USBH1_CLK_ENB 26
+
+#define SOC_CORE_ID ((unsigned int *)(MISC_BASE + 0x030))
+#define RAS_CLK_ENB ((unsigned int *)(MISC_BASE + 0x034))
+#define PERIP1_SOF_RST ((unsigned int *)(MISC_BASE + 0x038))
+/* PERIP1_SOF_RST register masks */
+#define JPEG_SOF_RST 8
+
+#define SOC_USER_ID ((unsigned int *)(MISC_BASE + 0x03C))
+#define RAS_SOF_RST ((unsigned int *)(MISC_BASE + 0x040))
+#define PRSC1_CLK_CFG ((unsigned int *)(MISC_BASE + 0x044))
+#define PRSC2_CLK_CFG ((unsigned int *)(MISC_BASE + 0x048))
+#define PRSC3_CLK_CFG ((unsigned int *)(MISC_BASE + 0x04C))
+/* gpt synthesizer register masks */
+#define GPT_MSCALE_SHIFT 0
+#define GPT_MSCALE_MASK 0xFFF
+#define GPT_NSCALE_SHIFT 12
+#define GPT_NSCALE_MASK 0xF
+
+#define AMEM_CLK_CFG ((unsigned int *)(MISC_BASE + 0x050))
+#define EXPI_CLK_CFG ((unsigned int *)(MISC_BASE + 0x054))
+#define CLCD_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x05C))
+#define FIRDA_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x060))
+#define UART_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x064))
+#define GMAC_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x068))
+#define RAS1_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x06C))
+#define RAS2_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x070))
+#define RAS3_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x074))
+#define RAS4_CLK_SYNT ((unsigned int *)(MISC_BASE + 0x078))
+/* aux clk synthesiser register masks for irda to ras4 */
+#define AUX_EQ_SEL_SHIFT 30
+#define AUX_EQ_SEL_MASK 1
+#define AUX_EQ1_SEL 0
+#define AUX_EQ2_SEL 1
+#define AUX_XSCALE_SHIFT 16
+#define AUX_XSCALE_MASK 0xFFF
+#define AUX_YSCALE_SHIFT 0
+#define AUX_YSCALE_MASK 0xFFF
+
+#define ICM1_ARB_CFG ((unsigned int *)(MISC_BASE + 0x07C))
+#define ICM2_ARB_CFG ((unsigned int *)(MISC_BASE + 0x080))
+#define ICM3_ARB_CFG ((unsigned int *)(MISC_BASE + 0x084))
+#define ICM4_ARB_CFG ((unsigned int *)(MISC_BASE + 0x088))
+#define ICM5_ARB_CFG ((unsigned int *)(MISC_BASE + 0x08C))
+#define ICM6_ARB_CFG ((unsigned int *)(MISC_BASE + 0x090))
+#define ICM7_ARB_CFG ((unsigned int *)(MISC_BASE + 0x094))
+#define ICM8_ARB_CFG ((unsigned int *)(MISC_BASE + 0x098))
+#define ICM9_ARB_CFG ((unsigned int *)(MISC_BASE + 0x09C))
+#define DMA_CHN_CFG ((unsigned int *)(MISC_BASE + 0x0A0))
+#define USB2_PHY_CFG ((unsigned int *)(MISC_BASE + 0x0A4))
+#define GMAC_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0A8))
+#define EXPI_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0AC))
+#define PRC1_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C0))
+#define PRC2_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C4))
+#define PRC3_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0C8))
+#define PRC4_LOCK_CTR ((unsigned int *)(MISC_BASE + 0x0CC))
+#define PRC1_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D0))
+#define PRC2_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D4))
+#define PRC3_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0D8))
+#define PRC4_IRQ_CTR ((unsigned int *)(MISC_BASE + 0x0DC))
+#define PWRDOWN_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0E0))
+#define COMPSSTL_1V8_CFG ((unsigned int *)(MISC_BASE + 0x0E4))
+#define COMPSSTL_2V5_CFG ((unsigned int *)(MISC_BASE + 0x0E8))
+#define COMPCOR_3V3_CFG ((unsigned int *)(MISC_BASE + 0x0EC))
+#define SSTLPAD_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F0))
+#define BIST1_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F4))
+#define BIST2_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0F8))
+#define BIST3_CFG_CTR ((unsigned int *)(MISC_BASE + 0x0FC))
+#define BIST4_CFG_CTR ((unsigned int *)(MISC_BASE + 0x100))
+#define BIST5_CFG_CTR ((unsigned int *)(MISC_BASE + 0x104))
+#define BIST1_STS_RES ((unsigned int *)(MISC_BASE + 0x108))
+#define BIST2_STS_RES ((unsigned int *)(MISC_BASE + 0x10C))
+#define BIST3_STS_RES ((unsigned int *)(MISC_BASE + 0x110))
+#define BIST4_STS_RES ((unsigned int *)(MISC_BASE + 0x114))
+#define BIST5_STS_RES ((unsigned int *)(MISC_BASE + 0x118))
+#define SYSERR_CFG_CTR ((unsigned int *)(MISC_BASE + 0x11C))
+
+#endif /* __MACH_MISC_REGS_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/spear.h b/arch/arm/mach-spear6xx/include/mach/spear.h
new file mode 100644
index 000000000000..a835f5b6b182
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/spear.h
@@ -0,0 +1,173 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/spear.h
+ *
+ * SPEAr6xx Machine family specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR6XX_H
+#define __MACH_SPEAR6XX_H
+
+#include <mach/hardware.h>
+#include <mach/spear600.h>
+
+#define SPEAR6XX_ML_SDRAM_BASE 0x00000000
+#define SPEAR6XX_ML_SDRAM_SIZE 0x40000000
+
+/* ICM1 - Low speed connection */
+#define SPEAR6XX_ICM1_BASE 0xD0000000
+#define SPEAR6XX_ICM1_SIZE 0x08000000
+
+#define SPEAR6XX_ICM1_UART0_BASE 0xD0000000
+#define VA_SPEAR6XX_ICM1_UART0_BASE IO_ADDRESS(SPEAR6XX_ICM1_UART0_BASE)
+#define SPEAR6XX_ICM1_UART0_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_UART1_BASE 0xD0080000
+#define SPEAR6XX_ICM1_UART1_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_SSP0_BASE 0xD0100000
+#define SPEAR6XX_ICM1_SSP0_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_SSP1_BASE 0xD0180000
+#define SPEAR6XX_ICM1_SSP1_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_I2C_BASE 0xD0200000
+#define SPEAR6XX_ICM1_I2C_SIZE 0x00080000
+
+#define SPEAR6XX_ICM1_JPEG_BASE 0xD0800000
+#define SPEAR6XX_ICM1_JPEG_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_IRDA_BASE 0xD1000000
+#define SPEAR6XX_ICM1_IRDA_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_FSMC_BASE 0xD1800000
+#define SPEAR6XX_ICM1_FSMC_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_NAND_BASE 0xD2000000
+#define SPEAR6XX_ICM1_NAND_SIZE 0x00800000
+
+#define SPEAR6XX_ICM1_SRAM_BASE 0xD2800000
+#define SPEAR6XX_ICM1_SRAM_SIZE 0x00800000
+
+/* ICM2 - Application Subsystem */
+#define SPEAR6XX_ICM2_BASE 0xD8000000
+#define SPEAR6XX_ICM2_SIZE 0x08000000
+
+#define SPEAR6XX_ICM2_TMR0_BASE 0xD8000000
+#define SPEAR6XX_ICM2_TMR0_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_TMR1_BASE 0xD8080000
+#define SPEAR6XX_ICM2_TMR1_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_GPIO_BASE 0xD8100000
+#define SPEAR6XX_ICM2_GPIO_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_SPI2_BASE 0xD8180000
+#define SPEAR6XX_ICM2_SPI2_SIZE 0x00080000
+
+#define SPEAR6XX_ICM2_ADC_BASE 0xD8200000
+#define SPEAR6XX_ICM2_ADC_SIZE 0x00080000
+
+/* ML-1, 2 - Multi Layer CPU Subsystem */
+#define SPEAR6XX_ML_CPU_BASE 0xF0000000
+#define SPEAR6XX_ML_CPU_SIZE 0x08000000
+
+#define SPEAR6XX_CPU_TMR_BASE 0xF0000000
+#define SPEAR6XX_CPU_TMR_SIZE 0x00100000
+
+#define SPEAR6XX_CPU_GPIO_BASE 0xF0100000
+#define SPEAR6XX_CPU_GPIO_SIZE 0x00100000
+
+#define SPEAR6XX_CPU_VIC_SEC_BASE 0xF1000000
+#define VA_SPEAR6XX_CPU_VIC_SEC_BASE IO_ADDRESS(SPEAR6XX_CPU_VIC_SEC_BASE)
+#define SPEAR6XX_CPU_VIC_SEC_SIZE 0x00100000
+
+#define SPEAR6XX_CPU_VIC_PRI_BASE 0xF1100000
+#define VA_SPEAR6XX_CPU_VIC_PRI_BASE IO_ADDRESS(SPEAR6XX_CPU_VIC_PRI_BASE)
+#define SPEAR6XX_CPU_VIC_PRI_SIZE 0x00100000
+
+/* ICM3 - Basic Subsystem */
+#define SPEAR6XX_ICM3_BASE 0xF8000000
+#define SPEAR6XX_ICM3_SIZE 0x08000000
+
+#define SPEAR6XX_ICM3_SMEM_BASE 0xF8000000
+#define SPEAR6XX_ICM3_SMEM_SIZE 0x04000000
+
+#define SPEAR6XX_ICM3_SMI_CTRL_BASE 0xFC000000
+#define SPEAR6XX_ICM3_SMI_CTRL_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_CLCD_BASE 0xFC200000
+#define SPEAR6XX_ICM3_CLCD_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_DMA_BASE 0xFC400000
+#define SPEAR6XX_ICM3_DMA_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_SDRAM_CTRL_BASE 0xFC600000
+#define SPEAR6XX_ICM3_SDRAM_CTRL_SIZE 0x00200000
+
+#define SPEAR6XX_ICM3_TMR_BASE 0xFC800000
+#define SPEAR6XX_ICM3_TMR_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_WDT_BASE 0xFC880000
+#define SPEAR6XX_ICM3_WDT_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_RTC_BASE 0xFC900000
+#define SPEAR6XX_ICM3_RTC_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_GPIO_BASE 0xFC980000
+#define SPEAR6XX_ICM3_GPIO_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_SYS_CTRL_BASE 0xFCA00000
+#define VA_SPEAR6XX_ICM3_SYS_CTRL_BASE IO_ADDRESS(SPEAR6XX_ICM3_SYS_CTRL_BASE)
+#define SPEAR6XX_ICM3_SYS_CTRL_SIZE 0x00080000
+
+#define SPEAR6XX_ICM3_MISC_REG_BASE 0xFCA80000
+#define VA_SPEAR6XX_ICM3_MISC_REG_BASE IO_ADDRESS(SPEAR6XX_ICM3_MISC_REG_BASE)
+#define SPEAR6XX_ICM3_MISC_REG_SIZE 0x00080000
+
+/* ICM4 - High Speed Connection */
+#define SPEAR6XX_ICM4_BASE 0xE0000000
+#define SPEAR6XX_ICM4_SIZE 0x08000000
+
+#define SPEAR6XX_ICM4_GMAC_BASE 0xE0800000
+#define SPEAR6XX_ICM4_GMAC_SIZE 0x00800000
+
+#define SPEAR6XX_ICM4_USBD_FIFO_BASE 0xE1000000
+#define SPEAR6XX_ICM4_USBD_FIFO_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USBD_CSR_BASE 0xE1100000
+#define SPEAR6XX_ICM4_USBD_CSR_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USBD_PLDT_BASE 0xE1200000
+#define SPEAR6XX_ICM4_USBD_PLDT_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_EHCI0_BASE 0xE1800000
+#define SPEAR6XX_ICM4_USB_EHCI0_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_OHCI0_BASE 0xE1900000
+#define SPEAR6XX_ICM4_USB_OHCI0_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_EHCI1_BASE 0xE2000000
+#define SPEAR6XX_ICM4_USB_EHCI1_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_OHCI1_BASE 0xE2100000
+#define SPEAR6XX_ICM4_USB_OHCI1_SIZE 0x00100000
+
+#define SPEAR6XX_ICM4_USB_ARB_BASE 0xE2800000
+#define SPEAR6XX_ICM4_USB_ARB_SIZE 0x00010000
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE SPEAR6XX_ICM1_UART0_BASE
+#define VA_SPEAR_DBG_UART_BASE VA_SPEAR6XX_ICM1_UART0_BASE
+
+/* Sysctl base for spear platform */
+#define SPEAR_SYS_CTRL_BASE SPEAR6XX_ICM3_SYS_CTRL_BASE
+#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR6XX_ICM3_SYS_CTRL_BASE
+
+#endif /* __MACH_SPEAR6XX_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/spear600.h b/arch/arm/mach-spear6xx/include/mach/spear600.h
new file mode 100644
index 000000000000..c068cc50b0fb
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/spear600.h
@@ -0,0 +1,21 @@
+/*
+ * arch/arm/mach-spear66xx/include/mach/spear600.h
+ *
+ * SPEAr600 Machine specific definition
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_MACH_SPEAR600
+
+#ifndef __MACH_SPEAR600_H
+#define __MACH_SPEAR600_H
+
+#endif /* __MACH_SPEAR600_H */
+
+#endif /* CONFIG_MACH_SPEAR600 */
diff --git a/arch/arm/mach-spear6xx/include/mach/system.h b/arch/arm/mach-spear6xx/include/mach/system.h
new file mode 100644
index 000000000000..0b1d2be81cfb
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/system.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/system.h
+ *
+ * SPEAr6xx Machine family specific architecture functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SYSTEM_H
+#define __MACH_SYSTEM_H
+
+#include <plat/system.h>
+
+#endif /* __MACH_SYSTEM_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/timex.h b/arch/arm/mach-spear6xx/include/mach/timex.h
new file mode 100644
index 000000000000..ac1c5b005695
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/timex.h
+ *
+ * SPEAr6XX machine family specific timex definitions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/uncompress.h b/arch/arm/mach-spear6xx/include/mach/uncompress.h
new file mode 100644
index 000000000000..77f0765e21e1
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear6xx/include/mach/vmalloc.h b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
new file mode 100644
index 000000000000..4a0b56cb2a91
--- /dev/null
+++ b/arch/arm/mach-spear6xx/include/mach/vmalloc.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear6xx/include/mach/vmalloc.h
+ *
+ * Defining Vmalloc area for SPEAr6xx machine family
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_VMALLOC_H
+#define __MACH_VMALLOC_H
+
+#include <plat/vmalloc.h>
+
+#endif /* __MACH_VMALLOC_H */
diff --git a/arch/arm/mach-spear6xx/spear600.c b/arch/arm/mach-spear6xx/spear600.c
new file mode 100644
index 000000000000..5c484c433dc1
--- /dev/null
+++ b/arch/arm/mach-spear6xx/spear600.c
@@ -0,0 +1,25 @@
+/*
+ * arch/arm/mach-spear6xx/spear600.c
+ *
+ * SPEAr600 machine source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h>
+#include <asm/irq.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Add spear600 specific devices here */
+
+void __init spear600_init(void)
+{
+ /* call spear6xx family common init function */
+ spear6xx_init();
+}
diff --git a/arch/arm/mach-spear6xx/spear600_evb.c b/arch/arm/mach-spear6xx/spear600_evb.c
new file mode 100644
index 000000000000..daff8d04f7b6
--- /dev/null
+++ b/arch/arm/mach-spear6xx/spear600_evb.c
@@ -0,0 +1,51 @@
+/*
+ * arch/arm/mach-spear6xx/spear600_evb.c
+ *
+ * SPEAr600 evaluation board source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+static struct amba_device *amba_devs[] __initdata = {
+ &gpio_device[0],
+ &gpio_device[1],
+ &gpio_device[2],
+ &uart_device[0],
+ &uart_device[1],
+};
+
+static struct platform_device *plat_devs[] __initdata = {
+};
+
+static void __init spear600_evb_init(void)
+{
+ unsigned int i;
+
+ /* call spear600 machine init function */
+ spear600_init();
+
+ /* Add Platform Devices */
+ platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
+
+ /* Add Amba Devices */
+ for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
+ amba_device_register(amba_devs[i], &iomem_resource);
+}
+
+MACHINE_START(SPEAR600, "ST-SPEAR600-EVB")
+ .boot_params = 0x00000100,
+ .map_io = spear6xx_map_io,
+ .init_irq = spear6xx_init_irq,
+ .timer = &spear_sys_timer,
+ .init_machine = spear600_evb_init,
+MACHINE_END
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
new file mode 100644
index 000000000000..baf6bcc3169c
--- /dev/null
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -0,0 +1,157 @@
+/*
+ * arch/arm/mach-spear6xx/spear6xx.c
+ *
+ * SPEAr6XX machines common source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/types.h>
+#include <linux/amba/pl061.h>
+#include <linux/ptrace.h>
+#include <linux/io.h>
+#include <asm/hardware/vic.h>
+#include <asm/irq.h>
+#include <asm/mach/arch.h>
+#include <mach/irqs.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Add spear6xx machines common devices here */
+/* uart device registeration */
+struct amba_device uart_device[] = {
+ {
+ .dev = {
+ .init_name = "uart0",
+ },
+ .res = {
+ .start = SPEAR6XX_ICM1_UART0_BASE,
+ .end = SPEAR6XX_ICM1_UART0_BASE +
+ SPEAR6XX_ICM1_UART0_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_UART_0, NO_IRQ},
+ }, {
+ .dev = {
+ .init_name = "uart1",
+ },
+ .res = {
+ .start = SPEAR6XX_ICM1_UART1_BASE,
+ .end = SPEAR6XX_ICM1_UART1_BASE +
+ SPEAR6XX_ICM1_UART1_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_UART_1, NO_IRQ},
+ }
+};
+
+/* gpio device registeration */
+static struct pl061_platform_data gpio_plat_data[] = {
+ {
+ .gpio_base = 0,
+ .irq_base = SPEAR_GPIO0_INT_BASE,
+ }, {
+ .gpio_base = 8,
+ .irq_base = SPEAR_GPIO1_INT_BASE,
+ }, {
+ .gpio_base = 16,
+ .irq_base = SPEAR_GPIO2_INT_BASE,
+ },
+};
+
+struct amba_device gpio_device[] = {
+ {
+ .dev = {
+ .init_name = "gpio0",
+ .platform_data = &gpio_plat_data[0],
+ },
+ .res = {
+ .start = SPEAR6XX_CPU_GPIO_BASE,
+ .end = SPEAR6XX_CPU_GPIO_BASE +
+ SPEAR6XX_CPU_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_LOCAL_GPIO, NO_IRQ},
+ }, {
+ .dev = {
+ .init_name = "gpio1",
+ .platform_data = &gpio_plat_data[1],
+ },
+ .res = {
+ .start = SPEAR6XX_ICM3_GPIO_BASE,
+ .end = SPEAR6XX_ICM3_GPIO_BASE +
+ SPEAR6XX_ICM3_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_BASIC_GPIO, NO_IRQ},
+ }, {
+ .dev = {
+ .init_name = "gpio2",
+ .platform_data = &gpio_plat_data[2],
+ },
+ .res = {
+ .start = SPEAR6XX_ICM2_GPIO_BASE,
+ .end = SPEAR6XX_ICM2_GPIO_BASE +
+ SPEAR6XX_ICM2_GPIO_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_APPL_GPIO, NO_IRQ},
+ }
+};
+
+/* This will add devices, and do machine specific tasks */
+void __init spear6xx_init(void)
+{
+ /* nothing to do for now */
+}
+
+/* This will initialize vic */
+void __init spear6xx_init_irq(void)
+{
+ vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_PRI_BASE, 0, ~0, 0);
+ vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_SEC_BASE, 32, ~0, 0);
+}
+
+/* Following will create static virtual/physical mappings */
+static struct map_desc spear6xx_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR6XX_ICM1_UART0_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_ICM1_UART0_BASE),
+ .length = SPEAR6XX_ICM1_UART0_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_CPU_VIC_PRI_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_PRI_BASE),
+ .length = SPEAR6XX_CPU_VIC_PRI_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_CPU_VIC_SEC_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_CPU_VIC_SEC_BASE),
+ .length = SPEAR6XX_CPU_VIC_SEC_SIZE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_ICM3_SYS_CTRL_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_ICM3_SYS_CTRL_BASE),
+ .length = SPEAR6XX_ICM3_MISC_REG_BASE,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_SPEAR6XX_ICM3_MISC_REG_BASE,
+ .pfn = __phys_to_pfn(SPEAR6XX_ICM3_MISC_REG_BASE),
+ .length = SPEAR6XX_ICM3_MISC_REG_SIZE,
+ .type = MT_DEVICE
+ },
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear6xx_map_io(void)
+{
+ iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
+
+ /* This will initialize clock framework */
+ clk_init();
+}
diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c
index c73ed06b6065..f0394baa11fa 100644
--- a/arch/arm/mach-u300/i2c.c
+++ b/arch/arm/mach-u300/i2c.c
@@ -9,7 +9,7 @@
*/
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
#include <linux/regulator/machine.h>
#include <linux/amba/bus.h>
#include <mach/irqs.h>
@@ -46,6 +46,7 @@
/* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */
#define BUCK_SLEEP_SETTING 0xAC
+#ifdef CONFIG_AB3100_CORE
static struct regulator_consumer_supply supply_ldo_c[] = {
{
.dev_name = "ab3100-codec",
@@ -253,14 +254,68 @@ static struct ab3100_platform_data ab3100_plf_data = {
LDO_D_SETTING,
},
};
+#endif
+
+#ifdef CONFIG_AB3550_CORE
+static struct abx500_init_settings ab3550_init_settings[] = {
+ {
+ .bank = 0,
+ .reg = AB3550_IMR1,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR2,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR3,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR4,
+ .setting = 0xff
+ },
+ {
+ .bank = 0,
+ .reg = AB3550_IMR5,
+ /* The two most significant bits are not used */
+ .setting = 0x3f
+ },
+};
+
+static struct ab3550_platform_data ab3550_plf_data = {
+ .irq = {
+ .base = IRQ_AB3550_BASE,
+ .count = (IRQ_AB3550_END - IRQ_AB3550_BASE + 1),
+ },
+ .dev_data = {
+ },
+ .init_settings = ab3550_init_settings,
+ .init_settings_sz = ARRAY_SIZE(ab3550_init_settings),
+};
+#endif
static struct i2c_board_info __initdata bus0_i2c_board_info[] = {
+#if defined(CONFIG_AB3550_CORE)
+ {
+ .type = "ab3550",
+ .addr = 0x4A,
+ .irq = IRQ_U300_IRQ0_EXT,
+ .platform_data = &ab3550_plf_data,
+ },
+#elif defined(CONFIG_AB3100_CORE)
{
.type = "ab3100",
.addr = 0x48,
.irq = IRQ_U300_IRQ0_EXT,
.platform_data = &ab3100_plf_data,
},
+#else
+ { },
+#endif
};
static struct i2c_board_info __initdata bus1_i2c_board_info[] = {
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
index b8155b4e5ffa..193da2df732c 100644
--- a/arch/arm/mach-u300/include/mach/coh901318.h
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -103,27 +103,6 @@ struct coh901318_platform {
};
/**
- * coh901318_get_bytes_left() - Get number of bytes left on a current transfer
- * @chan: dma channel handle
- * return number of bytes left, or negative on error
- */
-u32 coh901318_get_bytes_left(struct dma_chan *chan);
-
-/**
- * coh901318_stop() - Stops dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_stop(struct dma_chan *chan);
-
-/**
- * coh901318_continue() - Resumes a stopped dma transfer
- * @chan: dma channel handle
- * return 0 on success otherwise negative value
- */
-void coh901318_continue(struct dma_chan *chan);
-
-/**
* coh901318_filter_id() - DMA channel filter function
* @chan: dma channel handle
* @chan_id: id of dma channel to be filter out
diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h
index a6867b12773e..09b1b28fa8fd 100644
--- a/arch/arm/mach-u300/include/mach/irqs.h
+++ b/arch/arm/mach-u300/include/mach/irqs.h
@@ -109,6 +109,13 @@
#define U300_NR_IRQS 48
#endif
+#ifdef CONFIG_AB3550_CORE
+#define IRQ_AB3550_BASE (U300_NR_IRQS)
+#define IRQ_AB3550_END (IRQ_AB3550_BASE + 37)
+
+#define NR_IRQS (IRQ_AB3550_END + 1)
+#else
#define NR_IRQS U300_NR_IRQS
+#endif
#endif
diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c
index 77fbb1e0e528..88506d030596 100644
--- a/arch/arm/mach-u300/mmc.c
+++ b/arch/arm/mach-u300/mmc.c
@@ -102,11 +102,12 @@ int __devinit mmc_init(struct amba_device *adev)
* we have a regulator we can control instead.
*/
/* Nominally 2.85V on our platform */
+ mmci_card->mmc0_plat_data.f_max = 24000000;
mmci_card->mmc0_plat_data.status = mmc_status;
mmci_card->mmc0_plat_data.gpio_wp = -1;
mmci_card->mmc0_plat_data.gpio_cd = -1;
mmci_card->mmc0_plat_data.capabilities = MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA;
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
mmcsd_device->platform_data = (void *) &mmci_card->mmc0_plat_data;
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 03625d744857..6625e5bbf4d6 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -1,15 +1,42 @@
-menu "ST-Ericsson platform type"
- depends on ARCH_U8500
+if ARCH_U8500
-comment "ST-Ericsson Multicore Mobile Platforms"
-
-config MACH_U8500_MOP
- bool "U8500 Early Development platform"
+config UX500_SOC_COMMON
+ bool
default y
select ARM_GIC
select HAS_MTU
+ select NOMADIK_GPIO
+
+config UX500_SOC_DB8500
+ bool
+
+config UX500_SOC_DB5500
+ bool
+
+choice
+ prompt "Ux500 target platform"
+ default MACH_U8500_MOP
+
+config MACH_U8500_MOP
+ bool "U8500 Development platform"
+ select UX500_SOC_DB8500
help
Include support for mop500 development platform
based on U8500 architecture. The platform is based
on early drop silicon version of 8500.
-endmenu
+
+config MACH_U5500
+ bool "U5500 Development platform"
+ select UX500_SOC_DB5500
+ help
+ Include support for the U5500 development platform.
+endchoice
+
+config UX500_DEBUG_UART
+ int "Ux500 UART to use for low-level debug"
+ default 2
+ help
+ Choose the UART on which kernel low-level debug messages should be
+ output.
+
+endif
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 95e6e24c0042..c7bc4199e3a8 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -2,7 +2,9 @@
# Makefile for the linux kernel, U8500 machine.
#
-obj-y := clock.o
-obj-$(CONFIG_ARCH_U8500) += cpu-u8500.o
+obj-y := clock.o cpu.o devices.o
+obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o devices-db5500.o
+obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
obj-$(CONFIG_MACH_U8500_MOP) += board-mop500.o
+obj-$(CONFIG_MACH_U5500) += board-u5500.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o localtimer.o
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 803aec1d6728..bb8d7b771817 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -17,37 +17,14 @@
#include <linux/amba/pl022.h>
#include <linux/spi/spi.h>
-#include <asm/localtimer.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <plat/mtu.h>
#include <plat/i2c.h>
#include <mach/hardware.h>
#include <mach/setup.h>
-
-#define __MEM_4K_RESOURCE(x) \
- .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
-
-/* These are active devices on this board */
-static struct amba_device uart0_device = {
- .dev = { .init_name = "uart0" },
- __MEM_4K_RESOURCE(U8500_UART0_BASE),
- .irq = {IRQ_UART0, NO_IRQ},
-};
-
-static struct amba_device uart1_device = {
- .dev = { .init_name = "uart1" },
- __MEM_4K_RESOURCE(U8500_UART1_BASE),
- .irq = {IRQ_UART1, NO_IRQ},
-};
-
-static struct amba_device uart2_device = {
- .dev = { .init_name = "uart2" },
- __MEM_4K_RESOURCE(U8500_UART2_BASE),
- .irq = {IRQ_UART2, NO_IRQ},
-};
+#include <mach/devices.h>
static void ab4500_spi_cs_control(u32 command)
{
@@ -73,7 +50,7 @@ struct pl022_config_chip ab4500_chip_info = {
static struct spi_board_info u8500_spi_devices[] = {
{
- .modalias = "ab4500",
+ .modalias = "ab8500",
.controller_data = &ab4500_chip_info,
.max_speed_hz = 12000000,
.bus_num = 0,
@@ -93,55 +70,8 @@ static struct pl022_ssp_controller ssp0_platform_data = {
.num_chipselect = 5,
};
-static struct amba_device pl022_device = {
- .dev = {
- .coherent_dma_mask = ~0,
- .init_name = "pl022",
- .platform_data = &ssp0_platform_data,
- },
- .res = {
- .start = U8500_SSP0_BASE,
- .end = U8500_SSP0_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
- .irq = {IRQ_SSP0, NO_IRQ },
- /* ST-Ericsson modified id */
- .periphid = SSP_PER_ID,
-};
-
-static struct amba_device pl031_device = {
- .dev = {
- .init_name = "pl031",
- },
- .res = {
- .start = U8500_RTC_BASE,
- .end = U8500_RTC_BASE + SZ_4K - 1,
- .flags = IORESOURCE_MEM,
- },
- .irq = {IRQ_RTC_RTT, NO_IRQ},
-};
-
-#define U8500_I2C_RESOURCES(id, size) \
-static struct resource u8500_i2c_resources_##id[] = { \
- [0] = { \
- .start = U8500_I2C##id##_BASE, \
- .end = U8500_I2C##id##_BASE + size - 1, \
- .flags = IORESOURCE_MEM, \
- }, \
- [1] = { \
- .start = IRQ_I2C##id, \
- .end = IRQ_I2C##id, \
- .flags = IORESOURCE_IRQ \
- } \
-}
-
-U8500_I2C_RESOURCES(0, SZ_4K);
-U8500_I2C_RESOURCES(1, SZ_4K);
-U8500_I2C_RESOURCES(2, SZ_4K);
-U8500_I2C_RESOURCES(3, SZ_4K);
-
#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, _sm) \
-static struct nmk_i2c_controller u8500_i2c_##id = { \
+static struct nmk_i2c_controller u8500_i2c##id##_data = { \
/* \
* slave data setup time, which is \
* 250 ns,100ns,10ns which is 14,6,2 \
@@ -169,58 +99,32 @@ U8500_I2C_CONTROLLER(1, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(2, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
U8500_I2C_CONTROLLER(3, 0xe, 1, 1, 100000, I2C_FREQ_MODE_STANDARD);
-#define U8500_I2C_PDEVICE(cid) \
-static struct platform_device i2c_controller##cid = { \
- .name = "nmk-i2c", \
- .id = cid, \
- .num_resources = 2, \
- .resource = u8500_i2c_resources_##cid, \
- .dev = { \
- .platform_data = &u8500_i2c_##cid \
- } \
-}
-
-U8500_I2C_PDEVICE(0);
-U8500_I2C_PDEVICE(1);
-U8500_I2C_PDEVICE(2);
-U8500_I2C_PDEVICE(3);
-
static struct amba_device *amba_devs[] __initdata = {
- &uart0_device,
- &uart1_device,
- &uart2_device,
- &pl022_device,
- &pl031_device,
+ &ux500_uart0_device,
+ &ux500_uart1_device,
+ &ux500_uart2_device,
+ &u8500_ssp0_device,
};
/* add any platform devices here - TODO */
static struct platform_device *platform_devs[] __initdata = {
- &i2c_controller0,
- &i2c_controller1,
- &i2c_controller2,
- &i2c_controller3,
-};
-
-static void __init u8500_timer_init(void)
-{
-#ifdef CONFIG_LOCAL_TIMERS
- /* Setup the local timer base */
- twd_base = __io_address(U8500_TWD_BASE);
-#endif
- /* Setup the MTU base */
- mtu_base = __io_address(U8500_MTU0_BASE);
-
- nmdk_timer_init();
-}
-
-static struct sys_timer u8500_timer = {
- .init = u8500_timer_init,
+ &u8500_i2c0_device,
+ &ux500_i2c1_device,
+ &ux500_i2c2_device,
+ &ux500_i2c3_device,
};
static void __init u8500_init_machine(void)
{
int i;
+ u8500_i2c0_device.dev.platform_data = &u8500_i2c0_data;
+ ux500_i2c1_device.dev.platform_data = &u8500_i2c1_data;
+ ux500_i2c2_device.dev.platform_data = &u8500_i2c2_data;
+ ux500_i2c3_device.dev.platform_data = &u8500_i2c3_data;
+
+ u8500_ssp0_device.dev.platform_data = &ssp0_platform_data;
+
/* Register the active AMBA devices on this board */
for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
@@ -239,8 +143,8 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
.io_pg_offst = (IO_ADDRESS(U8500_UART2_BASE) >> 18) & 0xfffc,
.boot_params = 0x100,
.map_io = u8500_map_io,
- .init_irq = u8500_init_irq,
+ .init_irq = ux500_init_irq,
/* we re-use nomadik timer here */
- .timer = &u8500_timer,
+ .timer = &ux500_timer,
.init_machine = u8500_init_machine,
MACHINE_END
diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c
new file mode 100644
index 000000000000..4430e69cf538
--- /dev/null
+++ b/arch/arm/mach-ux500/board-u5500.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/gpio.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+#include <mach/setup.h>
+
+static struct amba_device *amba_board_devs[] __initdata = {
+ &ux500_uart0_device,
+ &ux500_uart1_device,
+ &ux500_uart2_device,
+};
+
+static void __init u5500_init_machine(void)
+{
+ u5500_init_devices();
+
+ amba_add_devices(amba_board_devs, ARRAY_SIZE(amba_board_devs));
+}
+
+MACHINE_START(U8500, "ST-Ericsson U5500 Platform")
+ .phys_io = UX500_UART0_BASE,
+ .io_pg_offst = (IO_ADDRESS(UX500_UART0_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .map_io = u5500_map_io,
+ .init_irq = ux500_init_irq,
+ .timer = &ux500_timer,
+ .init_machine = u5500_init_machine,
+MACHINE_END
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 8359a73d0041..6544855af2f1 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2009 ST-Ericsson
- * heavily based on realview platform
+ * Copyright (C) 2009 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,33 +12,130 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/mutex.h>
+#include <linux/io.h>
#include <asm/clkdev.h>
-/* currently the clk structure
- * just supports rate. This would
- * be extended as and when new devices are
- * added - TODO
- */
-struct clk {
- unsigned long rate;
-};
+#include <mach/hardware.h>
+#include "clock.h"
+
+#define PRCC_PCKEN 0x00
+#define PRCC_PCKDIS 0x04
+#define PRCC_KCKEN 0x08
+#define PRCC_KCKDIS 0x0C
+
+#define PRCM_YYCLKEN0_MGT_SET 0x510
+#define PRCM_YYCLKEN1_MGT_SET 0x514
+#define PRCM_YYCLKEN0_MGT_CLR 0x518
+#define PRCM_YYCLKEN1_MGT_CLR 0x51C
+#define PRCM_YYCLKEN0_MGT_VAL 0x520
+#define PRCM_YYCLKEN1_MGT_VAL 0x524
+
+#define PRCM_SVAMMDSPCLK_MGT 0x008
+#define PRCM_SIAMMDSPCLK_MGT 0x00C
+#define PRCM_SGACLK_MGT 0x014
+#define PRCM_UARTCLK_MGT 0x018
+#define PRCM_MSP02CLK_MGT 0x01C
+#define PRCM_MSP1CLK_MGT 0x288
+#define PRCM_I2CCLK_MGT 0x020
+#define PRCM_SDMMCCLK_MGT 0x024
+#define PRCM_SLIMCLK_MGT 0x028
+#define PRCM_PER1CLK_MGT 0x02C
+#define PRCM_PER2CLK_MGT 0x030
+#define PRCM_PER3CLK_MGT 0x034
+#define PRCM_PER5CLK_MGT 0x038
+#define PRCM_PER6CLK_MGT 0x03C
+#define PRCM_PER7CLK_MGT 0x040
+#define PRCM_LCDCLK_MGT 0x044
+#define PRCM_BMLCLK_MGT 0x04C
+#define PRCM_HSITXCLK_MGT 0x050
+#define PRCM_HSIRXCLK_MGT 0x054
+#define PRCM_HDMICLK_MGT 0x058
+#define PRCM_APEATCLK_MGT 0x05C
+#define PRCM_APETRACECLK_MGT 0x060
+#define PRCM_MCDECLK_MGT 0x064
+#define PRCM_IPI2CCLK_MGT 0x068
+#define PRCM_DSIALTCLK_MGT 0x06C
+#define PRCM_DMACLK_MGT 0x074
+#define PRCM_B2R2CLK_MGT 0x078
+#define PRCM_TVCLK_MGT 0x07C
+#define PRCM_UNIPROCLK_MGT 0x278
+#define PRCM_SSPCLK_MGT 0x280
+#define PRCM_RNGCLK_MGT 0x284
+#define PRCM_UICCCLK_MGT 0x27C
+
+#define PRCM_MGT_ENABLE (1 << 8)
+
+static DEFINE_SPINLOCK(clocks_lock);
+
+static void __clk_enable(struct clk *clk)
+{
+ if (clk->enabled++ == 0) {
+ if (clk->parent_cluster)
+ __clk_enable(clk->parent_cluster);
+
+ if (clk->parent_periph)
+ __clk_enable(clk->parent_periph);
+
+ if (clk->ops && clk->ops->enable)
+ clk->ops->enable(clk);
+ }
+}
int clk_enable(struct clk *clk)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ __clk_enable(clk);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(clk_enable);
+static void __clk_disable(struct clk *clk)
+{
+ if (--clk->enabled == 0) {
+ if (clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+
+ if (clk->parent_periph)
+ __clk_disable(clk->parent_periph);
+
+ if (clk->parent_cluster)
+ __clk_disable(clk->parent_cluster);
+ }
+}
+
void clk_disable(struct clk *clk)
{
+ unsigned long flags;
+
+ WARN_ON(!clk->enabled);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ __clk_disable(clk);
+ spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
- return clk->rate;
+ unsigned long rate;
+
+ if (clk->ops && clk->ops->get_rate)
+ return clk->ops->get_rate(clk);
+
+ rate = clk->rate;
+ if (!rate) {
+ if (clk->parent_periph)
+ rate = clk_get_rate(clk->parent_periph);
+ else if (clk->parent_cluster)
+ rate = clk_get_rate(clk->parent_cluster);
+ }
+
+ return rate;
}
EXPORT_SYMBOL(clk_get_rate);
@@ -56,37 +153,373 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL(clk_set_rate);
-/* ssp clock */
-static struct clk ssp_clk = {
- .rate = 48000000,
+static void clk_prcmu_enable(struct clk *clk)
+{
+ void __iomem *cg_set_reg = __io_address(U8500_PRCMU_BASE)
+ + PRCM_YYCLKEN0_MGT_SET + clk->prcmu_cg_off;
+
+ writel(1 << clk->prcmu_cg_bit, cg_set_reg);
+}
+
+static void clk_prcmu_disable(struct clk *clk)
+{
+ void __iomem *cg_clr_reg = __io_address(U8500_PRCMU_BASE)
+ + PRCM_YYCLKEN0_MGT_CLR + clk->prcmu_cg_off;
+
+ writel(1 << clk->prcmu_cg_bit, cg_clr_reg);
+}
+
+/* ED doesn't have the combined set/clr registers */
+static void clk_prcmu_ed_enable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(U8500_PRCMU_BASE)
+ + clk->prcmu_cg_mgt;
+
+ writel(readl(addr) | PRCM_MGT_ENABLE, addr);
+}
+
+static void clk_prcmu_ed_disable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(U8500_PRCMU_BASE)
+ + clk->prcmu_cg_mgt;
+
+ writel(readl(addr) & ~PRCM_MGT_ENABLE, addr);
+}
+
+static struct clkops clk_prcmu_ops = {
+ .enable = clk_prcmu_enable,
+ .disable = clk_prcmu_disable,
};
-/* fixed clock */
-static struct clk f38_clk = {
- .rate = 38400000,
+static unsigned int clkrst_base[] = {
+ [1] = U8500_CLKRST1_BASE,
+ [2] = U8500_CLKRST2_BASE,
+ [3] = U8500_CLKRST3_BASE,
+ [5] = U8500_CLKRST5_BASE,
+ [6] = U8500_CLKRST6_BASE,
+ [7] = U8500_CLKRST7_BASE_ED,
};
-static struct clk_lookup lookups[] = {
- {
- /* UART0 */
- .dev_id = "uart0",
- .clk = &f38_clk,
- }, { /* UART1 */
- .dev_id = "uart1",
- .clk = &f38_clk,
- }, { /* UART2 */
- .dev_id = "uart2",
- .clk = &f38_clk,
- }, { /* SSP */
- .dev_id = "pl022",
- .clk = &ssp_clk,
- }
+static void clk_prcc_enable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+
+ if (clk->prcc_kernel != -1)
+ writel(1 << clk->prcc_kernel, addr + PRCC_KCKEN);
+
+ if (clk->prcc_bus != -1)
+ writel(1 << clk->prcc_bus, addr + PRCC_PCKEN);
+}
+
+static void clk_prcc_disable(struct clk *clk)
+{
+ void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
+
+ if (clk->prcc_bus != -1)
+ writel(1 << clk->prcc_bus, addr + PRCC_PCKDIS);
+
+ if (clk->prcc_kernel != -1)
+ writel(1 << clk->prcc_kernel, addr + PRCC_KCKDIS);
+}
+
+static struct clkops clk_prcc_ops = {
+ .enable = clk_prcc_enable,
+ .disable = clk_prcc_disable,
+};
+
+static struct clk clk_32khz = {
+ .rate = 32000,
+};
+
+/*
+ * PRCMU level clock gating
+ */
+
+/* Bank 0 */
+static DEFINE_PRCMU_CLK(svaclk, 0x0, 2, SVAMMDSPCLK);
+static DEFINE_PRCMU_CLK(siaclk, 0x0, 3, SIAMMDSPCLK);
+static DEFINE_PRCMU_CLK(sgaclk, 0x0, 4, SGACLK);
+static DEFINE_PRCMU_CLK_RATE(uartclk, 0x0, 5, UARTCLK, 38400000);
+static DEFINE_PRCMU_CLK(msp02clk, 0x0, 6, MSP02CLK);
+static DEFINE_PRCMU_CLK(msp1clk, 0x0, 7, MSP1CLK); /* v1 */
+static DEFINE_PRCMU_CLK_RATE(i2cclk, 0x0, 8, I2CCLK, 48000000);
+static DEFINE_PRCMU_CLK_RATE(sdmmcclk, 0x0, 9, SDMMCCLK, 50000000);
+static DEFINE_PRCMU_CLK(slimclk, 0x0, 10, SLIMCLK);
+static DEFINE_PRCMU_CLK(per1clk, 0x0, 11, PER1CLK);
+static DEFINE_PRCMU_CLK(per2clk, 0x0, 12, PER2CLK);
+static DEFINE_PRCMU_CLK(per3clk, 0x0, 13, PER3CLK);
+static DEFINE_PRCMU_CLK(per5clk, 0x0, 14, PER5CLK);
+static DEFINE_PRCMU_CLK_RATE(per6clk, 0x0, 15, PER6CLK, 133330000);
+static DEFINE_PRCMU_CLK_RATE(per7clk, 0x0, 16, PER7CLK, 100000000);
+static DEFINE_PRCMU_CLK(lcdclk, 0x0, 17, LCDCLK);
+static DEFINE_PRCMU_CLK(bmlclk, 0x0, 18, BMLCLK);
+static DEFINE_PRCMU_CLK(hsitxclk, 0x0, 19, HSITXCLK);
+static DEFINE_PRCMU_CLK(hsirxclk, 0x0, 20, HSIRXCLK);
+static DEFINE_PRCMU_CLK(hdmiclk, 0x0, 21, HDMICLK);
+static DEFINE_PRCMU_CLK(apeatclk, 0x0, 22, APEATCLK);
+static DEFINE_PRCMU_CLK(apetraceclk, 0x0, 23, APETRACECLK);
+static DEFINE_PRCMU_CLK(mcdeclk, 0x0, 24, MCDECLK);
+static DEFINE_PRCMU_CLK(ipi2clk, 0x0, 25, IPI2CCLK);
+static DEFINE_PRCMU_CLK(dsialtclk, 0x0, 26, DSIALTCLK); /* v1 */
+static DEFINE_PRCMU_CLK(dmaclk, 0x0, 27, DMACLK);
+static DEFINE_PRCMU_CLK(b2r2clk, 0x0, 28, B2R2CLK);
+static DEFINE_PRCMU_CLK(tvclk, 0x0, 29, TVCLK);
+static DEFINE_PRCMU_CLK(uniproclk, 0x0, 30, UNIPROCLK); /* v1 */
+static DEFINE_PRCMU_CLK_RATE(sspclk, 0x0, 31, SSPCLK, 48000000); /* v1 */
+
+/* Bank 1 */
+static DEFINE_PRCMU_CLK(rngclk, 0x4, 0, RNGCLK); /* v1 */
+static DEFINE_PRCMU_CLK(uiccclk, 0x4, 1, UICCCLK); /* v1 */
+
+/*
+ * PRCC level clock gating
+ * Format: per#, clk, PCKEN bit, KCKEN bit, parent
+ */
+
+/* Peripheral Cluster #1 */
+static DEFINE_PRCC_CLK(1, i2c4, 10, 9, &clk_i2cclk);
+static DEFINE_PRCC_CLK(1, gpio0, 9, -1, NULL);
+static DEFINE_PRCC_CLK(1, slimbus0, 8, 8, &clk_slimclk);
+static DEFINE_PRCC_CLK(1, spi3_ed, 7, 7, NULL);
+static DEFINE_PRCC_CLK(1, spi3_v1, 7, -1, NULL);
+static DEFINE_PRCC_CLK(1, i2c2, 6, 6, &clk_i2cclk);
+static DEFINE_PRCC_CLK(1, sdi0, 5, 5, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(1, msp1_ed, 4, 4, &clk_msp02clk);
+static DEFINE_PRCC_CLK(1, msp1_v1, 4, 4, &clk_msp1clk);
+static DEFINE_PRCC_CLK(1, msp0, 3, 3, &clk_msp02clk);
+static DEFINE_PRCC_CLK(1, i2c1, 2, 2, &clk_i2cclk);
+static DEFINE_PRCC_CLK(1, uart1, 1, 1, &clk_uartclk);
+static DEFINE_PRCC_CLK(1, uart0, 0, 0, &clk_uartclk);
+
+/* Peripheral Cluster #2 */
+
+static DEFINE_PRCC_CLK(2, gpio1_ed, 12, -1, NULL);
+static DEFINE_PRCC_CLK(2, ssitx_ed, 11, -1, NULL);
+static DEFINE_PRCC_CLK(2, ssirx_ed, 10, -1, NULL);
+static DEFINE_PRCC_CLK(2, spi0_ed, 9, -1, NULL);
+static DEFINE_PRCC_CLK(2, sdi3_ed, 8, 6, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, sdi1_ed, 7, 5, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, msp2_ed, 6, 4, &clk_msp02clk);
+static DEFINE_PRCC_CLK(2, sdi4_ed, 4, 2, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, pwl_ed, 3, 1, NULL);
+static DEFINE_PRCC_CLK(2, spi1_ed, 2, -1, NULL);
+static DEFINE_PRCC_CLK(2, spi2_ed, 1, -1, NULL);
+static DEFINE_PRCC_CLK(2, i2c3_ed, 0, 0, &clk_i2cclk);
+
+static DEFINE_PRCC_CLK(2, gpio1_v1, 11, -1, NULL);
+static DEFINE_PRCC_CLK(2, ssitx_v1, 10, 7, NULL);
+static DEFINE_PRCC_CLK(2, ssirx_v1, 9, 6, NULL);
+static DEFINE_PRCC_CLK(2, spi0_v1, 8, -1, NULL);
+static DEFINE_PRCC_CLK(2, sdi3_v1, 7, 5, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, sdi1_v1, 6, 4, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, msp2_v1, 5, 3, &clk_msp02clk);
+static DEFINE_PRCC_CLK(2, sdi4_v1, 4, 2, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(2, pwl_v1, 3, 1, NULL);
+static DEFINE_PRCC_CLK(2, spi1_v1, 2, -1, NULL);
+static DEFINE_PRCC_CLK(2, spi2_v1, 1, -1, NULL);
+static DEFINE_PRCC_CLK(2, i2c3_v1, 0, 0, &clk_i2cclk);
+
+/* Peripheral Cluster #3 */
+static DEFINE_PRCC_CLK(3, gpio2, 8, -1, NULL);
+static DEFINE_PRCC_CLK(3, sdi5, 7, 7, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(3, uart2, 6, 6, &clk_uartclk);
+static DEFINE_PRCC_CLK(3, ske, 5, 5, &clk_32khz);
+static DEFINE_PRCC_CLK(3, sdi2, 4, 4, &clk_sdmmcclk);
+static DEFINE_PRCC_CLK(3, i2c0, 3, 3, &clk_i2cclk);
+static DEFINE_PRCC_CLK(3, ssp1_ed, 2, 2, &clk_i2cclk);
+static DEFINE_PRCC_CLK(3, ssp0_ed, 1, 1, &clk_i2cclk);
+static DEFINE_PRCC_CLK(3, ssp1_v1, 2, 2, &clk_sspclk);
+static DEFINE_PRCC_CLK(3, ssp0_v1, 1, 1, &clk_sspclk);
+static DEFINE_PRCC_CLK(3, fsmc, 0, -1, NULL);
+
+/* Peripheral Cluster #4 is in the always on domain */
+
+/* Peripheral Cluster #5 */
+static DEFINE_PRCC_CLK(5, gpio3, 1, -1, NULL);
+static DEFINE_PRCC_CLK(5, usb_ed, 0, 0, &clk_i2cclk);
+static DEFINE_PRCC_CLK(5, usb_v1, 0, 0, NULL);
+
+/* Peripheral Cluster #6 */
+
+static DEFINE_PRCC_CLK(6, mtu1_v1, 8, -1, NULL);
+static DEFINE_PRCC_CLK(6, mtu0_v1, 7, -1, NULL);
+static DEFINE_PRCC_CLK(6, cfgreg_v1, 6, 6, NULL);
+static DEFINE_PRCC_CLK(6, dmc_ed, 6, 6, NULL);
+static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL);
+static DEFINE_PRCC_CLK(6, unipro_v1, 4, 1, &clk_uniproclk);
+static DEFINE_PRCC_CLK(6, cryp1_ed, 4, -1, NULL);
+static DEFINE_PRCC_CLK(6, pka, 3, -1, NULL);
+static DEFINE_PRCC_CLK(6, hash0, 2, -1, NULL);
+static DEFINE_PRCC_CLK(6, cryp0, 1, -1, NULL);
+static DEFINE_PRCC_CLK(6, rng_ed, 0, 0, &clk_i2cclk);
+static DEFINE_PRCC_CLK(6, rng_v1, 0, 0, &clk_rngclk);
+
+/* Peripheral Cluster #7 */
+
+static DEFINE_PRCC_CLK(7, tzpc0_ed, 4, -1, NULL);
+static DEFINE_PRCC_CLK(7, mtu1_ed, 3, -1, NULL);
+static DEFINE_PRCC_CLK(7, mtu0_ed, 2, -1, NULL);
+static DEFINE_PRCC_CLK(7, wdg_ed, 1, -1, NULL);
+static DEFINE_PRCC_CLK(7, cfgreg_ed, 0, -1, NULL);
+
+static struct clk_lookup u8500_common_clks[] = {
+ /* Peripheral Cluster #1 */
+ CLK(gpio0, "gpio.0", NULL),
+ CLK(gpio0, "gpio.1", NULL),
+ CLK(slimbus0, "slimbus0", NULL),
+ CLK(i2c2, "nmk-i2c.2", NULL),
+ CLK(sdi0, "sdi0", NULL),
+ CLK(msp0, "msp0", NULL),
+ CLK(i2c1, "nmk-i2c.1", NULL),
+ CLK(uart1, "uart1", NULL),
+ CLK(uart0, "uart0", NULL),
+
+ /* Peripheral Cluster #3 */
+ CLK(gpio2, "gpio.2", NULL),
+ CLK(gpio2, "gpio.3", NULL),
+ CLK(gpio2, "gpio.4", NULL),
+ CLK(gpio2, "gpio.5", NULL),
+ CLK(sdi5, "sdi5", NULL),
+ CLK(uart2, "uart2", NULL),
+ CLK(ske, "ske", NULL),
+ CLK(sdi2, "sdi2", NULL),
+ CLK(i2c0, "nmk-i2c.0", NULL),
+ CLK(fsmc, "fsmc", NULL),
+
+ /* Peripheral Cluster #5 */
+ CLK(gpio3, "gpio.8", NULL),
+
+ /* Peripheral Cluster #6 */
+ CLK(hash1, "hash1", NULL),
+ CLK(pka, "pka", NULL),
+ CLK(hash0, "hash0", NULL),
+ CLK(cryp0, "cryp0", NULL),
+
+ /* PRCMU level clock gating */
+
+ /* Bank 0 */
+ CLK(svaclk, "sva", NULL),
+ CLK(siaclk, "sia", NULL),
+ CLK(sgaclk, "sga", NULL),
+ CLK(slimclk, "slim", NULL),
+ CLK(lcdclk, "lcd", NULL),
+ CLK(bmlclk, "bml", NULL),
+ CLK(hsitxclk, "stm-hsi.0", NULL),
+ CLK(hsirxclk, "stm-hsi.1", NULL),
+ CLK(hdmiclk, "hdmi", NULL),
+ CLK(apeatclk, "apeat", NULL),
+ CLK(apetraceclk, "apetrace", NULL),
+ CLK(mcdeclk, "mcde", NULL),
+ CLK(ipi2clk, "ipi2", NULL),
+ CLK(dmaclk, "dma40.0", NULL),
+ CLK(b2r2clk, "b2r2", NULL),
+ CLK(tvclk, "tv", NULL),
+};
+
+static struct clk_lookup u8500_ed_clks[] = {
+ /* Peripheral Cluster #1 */
+ CLK(spi3_ed, "spi3", NULL),
+ CLK(msp1_ed, "msp1", NULL),
+
+ /* Peripheral Cluster #2 */
+ CLK(gpio1_ed, "gpio.6", NULL),
+ CLK(gpio1_ed, "gpio.7", NULL),
+ CLK(ssitx_ed, "ssitx", NULL),
+ CLK(ssirx_ed, "ssirx", NULL),
+ CLK(spi0_ed, "spi0", NULL),
+ CLK(sdi3_ed, "sdi3", NULL),
+ CLK(sdi1_ed, "sdi1", NULL),
+ CLK(msp2_ed, "msp2", NULL),
+ CLK(sdi4_ed, "sdi4", NULL),
+ CLK(pwl_ed, "pwl", NULL),
+ CLK(spi1_ed, "spi1", NULL),
+ CLK(spi2_ed, "spi2", NULL),
+ CLK(i2c3_ed, "nmk-i2c.3", NULL),
+
+ /* Peripheral Cluster #3 */
+ CLK(ssp1_ed, "ssp1", NULL),
+ CLK(ssp0_ed, "ssp0", NULL),
+
+ /* Peripheral Cluster #5 */
+ CLK(usb_ed, "musb_hdrc.0", "usb"),
+
+ /* Peripheral Cluster #6 */
+ CLK(dmc_ed, "dmc", NULL),
+ CLK(cryp1_ed, "cryp1", NULL),
+ CLK(rng_ed, "rng", NULL),
+
+ /* Peripheral Cluster #7 */
+ CLK(tzpc0_ed, "tzpc0", NULL),
+ CLK(mtu1_ed, "mtu1", NULL),
+ CLK(mtu0_ed, "mtu0", NULL),
+ CLK(wdg_ed, "wdg", NULL),
+ CLK(cfgreg_ed, "cfgreg", NULL),
+};
+
+static struct clk_lookup u8500_v1_clks[] = {
+ /* Peripheral Cluster #1 */
+ CLK(i2c4, "nmk-i2c.4", NULL),
+ CLK(spi3_v1, "spi3", NULL),
+ CLK(msp1_v1, "msp1", NULL),
+
+ /* Peripheral Cluster #2 */
+ CLK(gpio1_v1, "gpio.6", NULL),
+ CLK(gpio1_v1, "gpio.7", NULL),
+ CLK(ssitx_v1, "ssitx", NULL),
+ CLK(ssirx_v1, "ssirx", NULL),
+ CLK(spi0_v1, "spi0", NULL),
+ CLK(sdi3_v1, "sdi3", NULL),
+ CLK(sdi1_v1, "sdi1", NULL),
+ CLK(msp2_v1, "msp2", NULL),
+ CLK(sdi4_v1, "sdi4", NULL),
+ CLK(pwl_v1, "pwl", NULL),
+ CLK(spi1_v1, "spi1", NULL),
+ CLK(spi2_v1, "spi2", NULL),
+ CLK(i2c3_v1, "nmk-i2c.3", NULL),
+
+ /* Peripheral Cluster #3 */
+ CLK(ssp1_v1, "ssp1", NULL),
+ CLK(ssp0_v1, "ssp0", NULL),
+
+ /* Peripheral Cluster #5 */
+ CLK(usb_v1, "musb_hdrc.0", "usb"),
+
+ /* Peripheral Cluster #6 */
+ CLK(mtu1_v1, "mtu1", NULL),
+ CLK(mtu0_v1, "mtu0", NULL),
+ CLK(cfgreg_v1, "cfgreg", NULL),
+ CLK(hash1, "hash1", NULL),
+ CLK(unipro_v1, "unipro", NULL),
+ CLK(rng_v1, "rng", NULL),
+
+ /* PRCMU level clock gating */
+
+ /* Bank 0 */
+ CLK(uniproclk, "uniproclk", NULL),
+ CLK(dsialtclk, "dsialt", NULL),
+
+ /* Bank 1 */
+ CLK(rngclk, "rng", NULL),
+ CLK(uiccclk, "uicc", NULL),
};
static int __init clk_init(void)
{
- /* register the clock lookups */
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+ if (cpu_is_u8500ed()) {
+ clk_prcmu_ops.enable = clk_prcmu_ed_enable;
+ clk_prcmu_ops.disable = clk_prcmu_ed_disable;
+ } else if (cpu_is_u5500()) {
+ /* Clock tree for U5500 not implemented yet */
+ clk_prcc_ops.enable = clk_prcc_ops.disable = NULL;
+ clk_prcmu_ops.enable = clk_prcmu_ops.disable = NULL;
+ }
+
+ clkdev_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks));
+ if (cpu_is_u8500ed())
+ clkdev_add_table(u8500_ed_clks, ARRAY_SIZE(u8500_ed_clks));
+ else
+ clkdev_add_table(u8500_v1_clks, ARRAY_SIZE(u8500_v1_clks));
+
return 0;
}
arch_initcall(clk_init);
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
new file mode 100644
index 000000000000..e4f99b65026f
--- /dev/null
+++ b/arch/arm/mach-ux500/clock.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson
+ * Copyright (C) 2009 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/**
+ * struct clkops - ux500 clock operations
+ * @enable: function to enable the clock
+ * @disable: function to disable the clock
+ * @get_rate: function to get the current clock rate
+ *
+ * This structure contains function pointers to functions that will be used to
+ * control the clock. All of these functions are optional. If get_rate is
+ * NULL, the rate in the struct clk will be used.
+ */
+struct clkops {
+ void (*enable) (struct clk *);
+ void (*disable) (struct clk *);
+ unsigned long (*get_rate) (struct clk *);
+};
+
+/**
+ * struct clk - ux500 clock structure
+ * @ops: pointer to clkops struct used to control this clock
+ * @name: name, for debugging
+ * @enabled: refcount. positive if enabled, zero if disabled
+ * @rate: fixed rate for clocks which don't implement
+ * ops->getrate
+ * @prcmu_cg_off: address offset of the combined enable/disable register
+ * (used on u8500v1)
+ * @prcmu_cg_bit: bit in the combined enable/disable register (used on
+ * u8500v1)
+ * @prcmu_cg_mgt: address of the enable/disable register (used on
+ * u8500ed)
+ * @cluster: peripheral cluster number
+ * @prcc_bus: bit for the bus clock in the peripheral's CLKRST
+ * @prcc_kernel: bit for the kernel clock in the peripheral's CLKRST.
+ * -1 if no kernel clock exists.
+ * @parent_cluster: pointer to parent's cluster clk struct
+ * @parent_periph: pointer to parent's peripheral clk struct
+ *
+ * Peripherals are organised into clusters, and each cluster has an associated
+ * bus clock. Some peripherals also have a parent peripheral clock.
+ *
+ * In order to enable a clock for a peripheral, we need to enable:
+ * (1) the parent cluster (bus) clock at the PRCMU level
+ * (2) the parent peripheral clock (if any) at the PRCMU level
+ * (3) the peripheral's bus & kernel clock at the PRCC level
+ *
+ * (1) and (2) are handled by defining clk structs (DEFINE_PRCMU_CLK) for each
+ * of the cluster and peripheral clocks, and hooking these as the parents of
+ * the individual peripheral clocks.
+ *
+ * (3) is handled by specifying the bits in the PRCC control registers required
+ * to enable these clocks and modifying them in the ->enable and
+ * ->disable callbacks of the peripheral clocks (DEFINE_PRCC_CLK).
+ *
+ * This structure describes both the PRCMU-level clocks and PRCC-level clocks.
+ * The prcmu_* fields are only used for the PRCMU clocks, and the cluster,
+ * prcc, and parent pointers are only used for the PRCC-level clocks.
+ */
+struct clk {
+ const struct clkops *ops;
+ const char *name;
+ unsigned int enabled;
+
+ unsigned long rate;
+ struct list_head list;
+
+ /* These three are only for PRCMU clks */
+
+ unsigned int prcmu_cg_off;
+ unsigned int prcmu_cg_bit;
+ unsigned int prcmu_cg_mgt;
+
+ /* The rest are only for PRCC clks */
+
+ int cluster;
+ unsigned int prcc_bus;
+ unsigned int prcc_kernel;
+
+ struct clk *parent_cluster;
+ struct clk *parent_periph;
+};
+
+#define DEFINE_PRCMU_CLK(_name, _cg_off, _cg_bit, _reg) \
+struct clk clk_##_name = { \
+ .name = #_name, \
+ .ops = &clk_prcmu_ops, \
+ .prcmu_cg_off = _cg_off, \
+ .prcmu_cg_bit = _cg_bit, \
+ .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+ }
+
+#define DEFINE_PRCMU_CLK_RATE(_name, _cg_off, _cg_bit, _reg, _rate) \
+struct clk clk_##_name = { \
+ .name = #_name, \
+ .ops = &clk_prcmu_ops, \
+ .prcmu_cg_off = _cg_off, \
+ .prcmu_cg_bit = _cg_bit, \
+ .rate = _rate, \
+ .prcmu_cg_mgt = PRCM_##_reg##_MGT \
+ }
+
+#define DEFINE_PRCC_CLK(_pclust, _name, _bus_en, _kernel_en, _kernclk) \
+struct clk clk_##_name = { \
+ .name = #_name, \
+ .ops = &clk_prcc_ops, \
+ .cluster = _pclust, \
+ .prcc_bus = _bus_en, \
+ .prcc_kernel = _kernel_en, \
+ .parent_cluster = &clk_per##_pclust##clk, \
+ .parent_periph = _kernclk \
+ }
+
+#define CLK(_clk, _devname, _conname) \
+ { \
+ .clk = &clk_##_clk, \
+ .dev_id = _devname, \
+ .con_id = _conname, \
+ }
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
new file mode 100644
index 000000000000..6a3ac4539f16
--- /dev/null
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/io.h>
+
+#include <asm/mach/map.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+#include <mach/setup.h>
+
+static struct map_desc u5500_io_desc[] __initdata = {
+ __IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO1_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO2_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO3_BASE, SZ_4K),
+ __IO_DEV_DESC(U5500_GPIO4_BASE, SZ_4K),
+};
+
+static struct platform_device *u5500_platform_devs[] __initdata = {
+ &u5500_gpio_devs[0],
+ &u5500_gpio_devs[1],
+ &u5500_gpio_devs[2],
+ &u5500_gpio_devs[3],
+ &u5500_gpio_devs[4],
+ &u5500_gpio_devs[5],
+ &u5500_gpio_devs[6],
+ &u5500_gpio_devs[7],
+};
+
+void __init u5500_map_io(void)
+{
+ ux500_map_io();
+
+ iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
+}
+
+void __init u5500_init_devices(void)
+{
+ ux500_init_devices();
+
+ platform_add_devices(u5500_platform_devs,
+ ARRAY_SIZE(u5500_platform_devs));
+}
diff --git a/arch/arm/mach-ux500/cpu-u8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 397bc1f9ed94..f21c444edd99 100644
--- a/arch/arm/mach-ux500/cpu-u8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -13,44 +13,56 @@
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
-#include <asm/hardware/gic.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
+#include <mach/setup.h>
+#include <mach/devices.h>
-/* add any platform devices here - TODO */
static struct platform_device *platform_devs[] __initdata = {
- /* yet to be added, add i2c0, gpio.. */
+ &u8500_gpio_devs[0],
+ &u8500_gpio_devs[1],
+ &u8500_gpio_devs[2],
+ &u8500_gpio_devs[3],
+ &u8500_gpio_devs[4],
+ &u8500_gpio_devs[5],
+ &u8500_gpio_devs[6],
+ &u8500_gpio_devs[7],
+ &u8500_gpio_devs[8],
+ &u8500_dma40_device,
};
-#define __IO_DEV_DESC(x, sz) { \
- .virtual = IO_ADDRESS(x), \
- .pfn = __phys_to_pfn(x), \
- .length = sz, \
- .type = MT_DEVICE, \
-}
-
/* minimum static i/o mapping required to boot U8500 platforms */
static struct map_desc u8500_io_desc[] __initdata = {
- __IO_DEV_DESC(U8500_UART2_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_GIC_CPU_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_GIC_DIST_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_PRCMU_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO0_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K),
+ __IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K),
+};
+
+static struct map_desc u8500ed_io_desc[] __initdata = {
+ __IO_DEV_DESC(U8500_MTU0_BASE_ED, SZ_4K),
+ __IO_DEV_DESC(U8500_CLKRST7_BASE_ED, SZ_8K),
+};
+
+static struct map_desc u8500v1_io_desc[] __initdata = {
__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_TWD_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
- __IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K),
};
void __init u8500_map_io(void)
{
+ ux500_map_io();
+
iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc));
-}
-void __init u8500_init_irq(void)
-{
- gic_dist_init(0, __io_address(U8500_GIC_DIST_BASE), 29);
- gic_cpu_init(0, __io_address(U8500_GIC_CPU_BASE));
+ if (cpu_is_u8500ed())
+ iotable_init(u8500ed_io_desc, ARRAY_SIZE(u8500ed_io_desc));
+ else
+ iotable_init(u8500v1_io_desc, ARRAY_SIZE(u8500v1_io_desc));
}
/*
@@ -58,6 +70,11 @@ void __init u8500_init_irq(void)
*/
void __init u8500_init_devices(void)
{
+ ux500_init_devices();
+
+ if (cpu_is_u8500ed())
+ dma40_u8500ed_fixup();
+
/* Register the platform devices */
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
new file mode 100644
index 000000000000..d81ad023963c
--- /dev/null
+++ b/arch/arm/mach-ux500/cpu.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/localtimer.h>
+
+#include <plat/mtu.h>
+#include <mach/hardware.h>
+#include <mach/setup.h>
+#include <mach/devices.h>
+
+#include "clock.h"
+
+static struct map_desc ux500_io_desc[] __initdata = {
+ __IO_DEV_DESC(UX500_UART0_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_UART2_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_GIC_CPU_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_GIC_DIST_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_L2CC_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_TWD_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_SCU_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_CLKRST1_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST2_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST3_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST5_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_CLKRST6_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_MTU0_BASE, SZ_4K),
+ __IO_DEV_DESC(UX500_MTU1_BASE, SZ_4K),
+
+ __IO_DEV_DESC(UX500_BACKUPRAM0_BASE, SZ_8K),
+};
+
+static struct amba_device *ux500_amba_devs[] __initdata = {
+ &ux500_pl031_device,
+};
+
+void __init ux500_map_io(void)
+{
+ iotable_init(ux500_io_desc, ARRAY_SIZE(ux500_io_desc));
+}
+
+void __init ux500_init_devices(void)
+{
+ amba_add_devices(ux500_amba_devs, ARRAY_SIZE(ux500_amba_devs));
+}
+
+void __init ux500_init_irq(void)
+{
+ gic_dist_init(0, __io_address(UX500_GIC_DIST_BASE), 29);
+ gic_cpu_init(0, __io_address(UX500_GIC_CPU_BASE));
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static int ux500_l2x0_init(void)
+{
+ void __iomem *l2x0_base;
+
+ l2x0_base = __io_address(UX500_L2CC_BASE);
+
+ /* 64KB way size, 8 way associativity, force WA */
+ l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
+
+ return 0;
+}
+early_initcall(ux500_l2x0_init);
+#endif
+
+static void __init ux500_timer_init(void)
+{
+#ifdef CONFIG_LOCAL_TIMERS
+ /* Setup the local timer base */
+ twd_base = __io_address(UX500_TWD_BASE);
+#endif
+ /* Setup the MTU base */
+ if (cpu_is_u8500ed())
+ mtu_base = __io_address(U8500_MTU0_BASE_ED);
+ else
+ mtu_base = __io_address(UX500_MTU0_BASE);
+
+ nmdk_timer_init();
+}
+
+struct sys_timer ux500_timer = {
+ .init = ux500_timer_init,
+};
diff --git a/arch/arm/mach-ux500/devices-db5500.c b/arch/arm/mach-ux500/devices-db5500.c
new file mode 100644
index 000000000000..33e5b56bebb6
--- /dev/null
+++ b/arch/arm/mach-ux500/devices-db5500.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include <mach/hardware.h>
+#include <mach/devices.h>
+
+static struct nmk_gpio_platform_data u5500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0),
+ GPIO_DATA("GPIO-32-63", 32), /* 36..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64), /* 83..95 not routed to pin */
+ GPIO_DATA("GPIO-96-127", 96), /* 102..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128), /* 149..159 not routed to pin */
+ GPIO_DATA("GPIO-160-191", 160),
+ GPIO_DATA("GPIO-192-223", 192),
+ GPIO_DATA("GPIO-224-255", 224), /* 228..255 not routed to pin */
+};
+
+static struct resource u5500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+};
+
+struct platform_device u5500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+};
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
new file mode 100644
index 000000000000..822903421943
--- /dev/null
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/amba/bus.h>
+
+#include <plat/ste_dma40.h>
+
+#include <mach/hardware.h>
+#include <mach/setup.h>
+
+#include "ste-dma40-db8500.h"
+
+static struct nmk_gpio_platform_data u8500_gpio_data[] = {
+ GPIO_DATA("GPIO-0-31", 0),
+ GPIO_DATA("GPIO-32-63", 32), /* 37..63 not routed to pin */
+ GPIO_DATA("GPIO-64-95", 64),
+ GPIO_DATA("GPIO-96-127", 96), /* 98..127 not routed to pin */
+ GPIO_DATA("GPIO-128-159", 128),
+ GPIO_DATA("GPIO-160-191", 160), /* 172..191 not routed to pin */
+ GPIO_DATA("GPIO-192-223", 192),
+ GPIO_DATA("GPIO-224-255", 224), /* 231..255 not routed to pin */
+ GPIO_DATA("GPIO-256-288", 256), /* 268..288 not routed to pin */
+};
+
+static struct resource u8500_gpio_resources[] = {
+ GPIO_RESOURCE(0),
+ GPIO_RESOURCE(1),
+ GPIO_RESOURCE(2),
+ GPIO_RESOURCE(3),
+ GPIO_RESOURCE(4),
+ GPIO_RESOURCE(5),
+ GPIO_RESOURCE(6),
+ GPIO_RESOURCE(7),
+ GPIO_RESOURCE(8),
+};
+
+struct platform_device u8500_gpio_devs[] = {
+ GPIO_DEVICE(0),
+ GPIO_DEVICE(1),
+ GPIO_DEVICE(2),
+ GPIO_DEVICE(3),
+ GPIO_DEVICE(4),
+ GPIO_DEVICE(5),
+ GPIO_DEVICE(6),
+ GPIO_DEVICE(7),
+ GPIO_DEVICE(8),
+};
+
+struct amba_device u8500_ssp0_device = {
+ .dev = {
+ .coherent_dma_mask = ~0,
+ .init_name = "ssp0",
+ },
+ .res = {
+ .start = U8500_SSP0_BASE,
+ .end = U8500_SSP0_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_SSP0, NO_IRQ },
+ /* ST-Ericsson modified id */
+ .periphid = SSP_PER_ID,
+};
+
+static struct resource u8500_i2c0_resources[] = {
+ [0] = {
+ .start = U8500_I2C0_BASE,
+ .end = U8500_I2C0_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_I2C0,
+ .end = IRQ_I2C0,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+struct platform_device u8500_i2c0_device = {
+ .name = "nmk-i2c",
+ .id = 0,
+ .resource = u8500_i2c0_resources,
+ .num_resources = ARRAY_SIZE(u8500_i2c0_resources),
+};
+
+static struct resource u8500_i2c4_resources[] = {
+ [0] = {
+ .start = U8500_I2C4_BASE,
+ .end = U8500_I2C4_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_I2C4,
+ .end = IRQ_I2C4,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+struct platform_device u8500_i2c4_device = {
+ .name = "nmk-i2c",
+ .id = 4,
+ .resource = u8500_i2c4_resources,
+ .num_resources = ARRAY_SIZE(u8500_i2c4_resources),
+};
+
+static struct resource dma40_resources[] = {
+ [0] = {
+ .start = U8500_DMA_BASE,
+ .end = U8500_DMA_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "base",
+ },
+ [1] = {
+ .start = U8500_DMA_LCPA_BASE,
+ .end = U8500_DMA_LCPA_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "lcpa",
+ },
+ [2] = {
+ .start = U8500_DMA_LCLA_BASE,
+ .end = U8500_DMA_LCLA_BASE + 16 * 1024 - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "lcla",
+ },
+ [3] = {
+ .start = IRQ_DMA,
+ .end = IRQ_DMA,
+ .flags = IORESOURCE_IRQ}
+};
+
+/* Default configuration for physcial memcpy */
+struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
+ .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE |
+ STEDMA40_LOW_PRIORITY_CHANNEL |
+ STEDMA40_PCHAN_BASIC_MODE),
+ .dir = STEDMA40_MEM_TO_MEM,
+
+ .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.psize = STEDMA40_PSIZE_PHY_1,
+
+ .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.psize = STEDMA40_PSIZE_PHY_1,
+
+};
+/* Default configuration for logical memcpy */
+struct stedma40_chan_cfg dma40_memcpy_conf_log = {
+ .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE |
+ STEDMA40_LOW_PRIORITY_CHANNEL |
+ STEDMA40_LCHAN_SRC_LOG_DST_LOG |
+ STEDMA40_NO_TIM_FOR_LINK),
+ .dir = STEDMA40_MEM_TO_MEM,
+
+ .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
+ .src_info.data_width = STEDMA40_BYTE_WIDTH,
+ .src_info.psize = STEDMA40_PSIZE_LOG_1,
+
+ .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
+ .dst_info.data_width = STEDMA40_BYTE_WIDTH,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_1,
+
+};
+
+/*
+ * Mapping between destination event lines and physical device address.
+ * The event line is tied to a device and therefor the address is constant.
+ */
+static const dma_addr_t dma40_tx_map[STEDMA40_NR_DEV];
+
+/* Mapping between source event lines and physical device address */
+static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV];
+
+/* Reserved event lines for memcpy only */
+static int dma40_memcpy_event[] = {
+ STEDMA40_MEMCPY_TX_1,
+ STEDMA40_MEMCPY_TX_2,
+ STEDMA40_MEMCPY_TX_3,
+ STEDMA40_MEMCPY_TX_4,
+};
+
+static struct stedma40_platform_data dma40_plat_data = {
+ .dev_len = STEDMA40_NR_DEV,
+ .dev_rx = dma40_rx_map,
+ .dev_tx = dma40_tx_map,
+ .memcpy = dma40_memcpy_event,
+ .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
+ .memcpy_conf_phy = &dma40_memcpy_conf_phy,
+ .memcpy_conf_log = &dma40_memcpy_conf_log,
+ .llis_per_log = 8,
+};
+
+struct platform_device u8500_dma40_device = {
+ .dev = {
+ .platform_data = &dma40_plat_data,
+ },
+ .name = "dma40",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dma40_resources),
+ .resource = dma40_resources
+};
+
+void dma40_u8500ed_fixup(void)
+{
+ dma40_plat_data.memcpy = NULL;
+ dma40_plat_data.memcpy_len = 0;
+ dma40_resources[0].start = U8500_DMA_BASE_ED;
+ dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1;
+}
diff --git a/arch/arm/mach-ux500/devices.c b/arch/arm/mach-ux500/devices.c
new file mode 100644
index 000000000000..8a268893cb7f
--- /dev/null
+++ b/arch/arm/mach-ux500/devices.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/amba/bus.h>
+
+#include <mach/hardware.h>
+#include <mach/setup.h>
+
+#define __MEM_4K_RESOURCE(x) \
+ .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
+
+struct amba_device ux500_pl031_device = {
+ .dev = {
+ .init_name = "pl031",
+ },
+ .res = {
+ .start = UX500_RTC_BASE,
+ .end = UX500_RTC_BASE + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ .irq = {IRQ_RTC_RTT, NO_IRQ},
+};
+
+struct amba_device ux500_uart0_device = {
+ .dev = { .init_name = "uart0" },
+ __MEM_4K_RESOURCE(UX500_UART0_BASE),
+ .irq = {IRQ_UART0, NO_IRQ},
+};
+
+struct amba_device ux500_uart1_device = {
+ .dev = { .init_name = "uart1" },
+ __MEM_4K_RESOURCE(UX500_UART1_BASE),
+ .irq = {IRQ_UART1, NO_IRQ},
+};
+
+struct amba_device ux500_uart2_device = {
+ .dev = { .init_name = "uart2" },
+ __MEM_4K_RESOURCE(UX500_UART2_BASE),
+ .irq = {IRQ_UART2, NO_IRQ},
+};
+
+#define UX500_I2C_RESOURCES(id, size) \
+static struct resource ux500_i2c##id##_resources[] = { \
+ [0] = { \
+ .start = UX500_I2C##id##_BASE, \
+ .end = UX500_I2C##id##_BASE + size - 1, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ [1] = { \
+ .start = IRQ_I2C##id, \
+ .end = IRQ_I2C##id, \
+ .flags = IORESOURCE_IRQ \
+ } \
+}
+
+UX500_I2C_RESOURCES(1, SZ_4K);
+UX500_I2C_RESOURCES(2, SZ_4K);
+UX500_I2C_RESOURCES(3, SZ_4K);
+
+#define UX500_I2C_PDEVICE(cid) \
+struct platform_device ux500_i2c##cid##_device = { \
+ .name = "nmk-i2c", \
+ .id = cid, \
+ .num_resources = 2, \
+ .resource = ux500_i2c##cid##_resources, \
+}
+
+UX500_I2C_PDEVICE(1);
+UX500_I2C_PDEVICE(2);
+UX500_I2C_PDEVICE(3);
+
+void __init amba_add_devices(struct amba_device *devs[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct amba_device *d = devs[i];
+ amba_device_register(d, &iomem_resource);
+ }
+}
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
new file mode 100644
index 000000000000..545c80fc8024
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __MACH_DB5500_REGS_H
+#define __MACH_DB5500_REGS_H
+
+#define U5500_PER1_BASE 0xA0020000
+#define U5500_PER2_BASE 0xA0010000
+#define U5500_PER3_BASE 0x80140000
+#define U5500_PER4_BASE 0x80150000
+#define U5500_PER5_BASE 0x80100000
+#define U5500_PER6_BASE 0x80120000
+
+#define U5500_GIC_DIST_BASE 0xA0411000
+#define U5500_GIC_CPU_BASE 0xA0410100
+#define U5500_DMA_BASE 0x90030000
+#define U5500_MCDE_BASE 0xA0400000
+#define U5500_MODEM_BASE 0xB0000000
+#define U5500_L2CC_BASE 0xA0412000
+#define U5500_SCU_BASE 0xA0410000
+#define U5500_DSI1_BASE 0xA0401000
+#define U5500_DSI2_BASE 0xA0402000
+#define U5500_SIA_BASE 0xA0100000
+#define U5500_SVA_BASE 0x80200000
+#define U5500_HSEM_BASE 0xA0000000
+#define U5500_NAND0_BASE 0x60000000
+#define U5500_NAND1_BASE 0x70000000
+#define U5500_TWD_BASE 0xa0410600
+#define U5500_B2R2_BASE 0xa0200000
+
+#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000)
+#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000)
+#define U5500_SDI2_BASE (U5500_PER1_BASE + 0x2000)
+#define U5500_UART0_BASE (U5500_PER1_BASE + 0x3000)
+#define U5500_I2C1_BASE (U5500_PER1_BASE + 0x4000)
+#define U5500_MSP0_BASE (U5500_PER1_BASE + 0x5000)
+#define U5500_GPIO0_BASE (U5500_PER1_BASE + 0xE000)
+#define U5500_CLKRST1_BASE (U5500_PER1_BASE + 0xF000)
+
+#define U5500_USBOTG_BASE (U5500_PER2_BASE + 0x0000)
+#define U5500_GPIO1_BASE (U5500_PER2_BASE + 0xE000)
+#define U5500_CLKRST2_BASE (U5500_PER2_BASE + 0xF000)
+
+#define U5500_KEYPAD_BASE (U5500_PER3_BASE + 0x0000)
+#define U5500_PWM_BASE (U5500_PER3_BASE + 0x1000)
+#define U5500_GPIO3_BASE (U5500_PER3_BASE + 0xE000)
+#define U5500_CLKRST3_BASE (U5500_PER3_BASE + 0xF000)
+
+#define U5500_BACKUPRAM0_BASE (U5500_PER4_BASE + 0x0000)
+#define U5500_BACKUPRAM1_BASE (U5500_PER4_BASE + 0x1000)
+#define U5500_RTT0_BASE (U5500_PER4_BASE + 0x2000)
+#define U5500_RTT1_BASE (U5500_PER4_BASE + 0x3000)
+#define U5500_RTC_BASE (U5500_PER4_BASE + 0x4000)
+#define U5500_SCR_BASE (U5500_PER4_BASE + 0x5000)
+#define U5500_DMC_BASE (U5500_PER4_BASE + 0x6000)
+#define U5500_PRCMU_BASE (U5500_PER4_BASE + 0x7000)
+#define U5500_MSP1_BASE (U5500_PER4_BASE + 0x9000)
+#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000)
+#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000)
+
+#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000)
+#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000)
+#define U5500_SPI2_BASE (U5500_PER5_BASE + 0x2000)
+#define U5500_SPI3_BASE (U5500_PER5_BASE + 0x3000)
+#define U5500_UART1_BASE (U5500_PER5_BASE + 0x4000)
+#define U5500_UART2_BASE (U5500_PER5_BASE + 0x5000)
+#define U5500_UART3_BASE (U5500_PER5_BASE + 0x6000)
+#define U5500_SDI1_BASE (U5500_PER5_BASE + 0x7000)
+#define U5500_SDI3_BASE (U5500_PER5_BASE + 0x8000)
+#define U5500_SDI4_BASE (U5500_PER5_BASE + 0x9000)
+#define U5500_I2C2_BASE (U5500_PER5_BASE + 0xA000)
+#define U5500_I2C3_BASE (U5500_PER5_BASE + 0xB000)
+#define U5500_MSP2_BASE (U5500_PER5_BASE + 0xC000)
+#define U5500_IRDA_BASE (U5500_PER5_BASE + 0xD000)
+#define U5500_IRRC_BASE (U5500_PER5_BASE + 0x10000)
+#define U5500_GPIO4_BASE (U5500_PER5_BASE + 0x1E000)
+#define U5500_CLKRST5_BASE (U5500_PER5_BASE + 0x1F000)
+
+#define U5500_RNG_BASE (U5500_PER6_BASE + 0x0000)
+#define U5500_HASH0_BASE (U5500_PER6_BASE + 0x1000)
+#define U5500_HASH1_BASE (U5500_PER6_BASE + 0x2000)
+#define U5500_PKA_BASE (U5500_PER6_BASE + 0x4000)
+#define U5500_PKAM_BASE (U5500_PER6_BASE + 0x5000)
+#define U5500_MTU0_BASE (U5500_PER6_BASE + 0x6000)
+#define U5500_MTU1_BASE (U5500_PER6_BASE + 0x7000)
+#define U5500_CR_BASE (U5500_PER6_BASE + 0x8000)
+#define U5500_CRYP0_BASE (U5500_PER6_BASE + 0xA000)
+#define U5500_CRYP1_BASE (U5500_PER6_BASE + 0xB000)
+#define U5500_CLKRST6_BASE (U5500_PER6_BASE + 0xF000)
+
+#define U5500_GPIOBANK0_BASE U5500_GPIO0_BASE
+#define U5500_GPIOBANK1_BASE (U5500_GPIO0_BASE + 0x80)
+#define U5500_GPIOBANK2_BASE U5500_GPIO1_BASE
+#define U5500_GPIOBANK3_BASE U5500_GPIO2_BASE
+#define U5500_GPIOBANK4_BASE U5500_GPIO3_BASE
+#define U5500_GPIOBANK5_BASE U5500_GPIO4_BASE
+#define U5500_GPIOBANK6_BASE (U5500_GPIO4_BASE + 0x80)
+#define U5500_GPIOBANK7_BASE (U5500_GPIO4_BASE + 0x100)
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
new file mode 100644
index 000000000000..85fc6a80b386
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __MACH_DB8500_REGS_H
+#define __MACH_DB8500_REGS_H
+
+/* Base address and bank offsets for ESRAM */
+#define U8500_ESRAM_BASE 0x40000000
+#define U8500_ESRAM_BANK_SIZE 0x00020000
+#define U8500_ESRAM_BANK0 U8500_ESRAM_BASE
+#define U8500_ESRAM_BANK1 (U8500_ESRAM_BASE + U8500_ESRAM_BANK_SIZE)
+#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE)
+#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE)
+#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE)
+/* Use bank 4 for DMA LCLA and LCPA */
+#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4
+#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK4 + 0x4000)
+
+#define U8500_PER3_BASE 0x80000000
+#define U8500_STM_BASE 0x80100000
+#define U8500_STM_REG_BASE (U8500_STM_BASE + 0xF000)
+#define U8500_PER2_BASE 0x80110000
+#define U8500_PER1_BASE 0x80120000
+#define U8500_B2R2_BASE 0x80130000
+#define U8500_HSEM_BASE 0x80140000
+#define U8500_PER4_BASE 0x80150000
+#define U8500_ICN_BASE 0x81000000
+
+#define U8500_BOOT_ROM_BASE 0x90000000
+/* ASIC ID is at 0xff4 offset within this region */
+#define U8500_ASIC_ID_BASE 0x9001F000
+
+#define U8500_PER6_BASE 0xa03c0000
+#define U8500_PER5_BASE 0xa03e0000
+#define U8500_PER7_BASE_ED 0xa03d0000
+
+#define U8500_SVA_BASE 0xa0100000
+#define U8500_SIA_BASE 0xa0200000
+
+#define U8500_SGA_BASE 0xa0300000
+#define U8500_MCDE_BASE 0xa0350000
+#define U8500_DMA_BASE_ED 0xa0362000
+#define U8500_DMA_BASE 0x801C0000 /* v1 */
+
+#define U8500_SBAG_BASE 0xa0390000
+
+#define U8500_SCU_BASE 0xa0410000
+#define U8500_GIC_CPU_BASE 0xa0410100
+#define U8500_TWD_BASE 0xa0410600
+#define U8500_GIC_DIST_BASE 0xa0411000
+#define U8500_L2CC_BASE 0xa0412000
+
+#define U8500_MODEM_I2C 0xb7e02000
+
+#define U8500_GPIO0_BASE (U8500_PER1_BASE + 0xE000)
+#define U8500_GPIO1_BASE (U8500_PER3_BASE + 0xE000)
+#define U8500_GPIO2_BASE (U8500_PER2_BASE + 0xE000)
+#define U8500_GPIO3_BASE (U8500_PER5_BASE + 0x1E000)
+
+/* per7 base addressess */
+#define U8500_CR_BASE_ED (U8500_PER7_BASE_ED + 0x8000)
+#define U8500_MTU0_BASE_ED (U8500_PER7_BASE_ED + 0xa000)
+#define U8500_MTU1_BASE_ED (U8500_PER7_BASE_ED + 0xb000)
+#define U8500_TZPC0_BASE_ED (U8500_PER7_BASE_ED + 0xc000)
+#define U8500_CLKRST7_BASE_ED (U8500_PER7_BASE_ED + 0xf000)
+
+#define U8500_UART0_BASE (U8500_PER1_BASE + 0x0000)
+#define U8500_UART1_BASE (U8500_PER1_BASE + 0x1000)
+
+/* per6 base addressess */
+#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000)
+#define U8500_PKA_BASE (U8500_PER6_BASE + 0x1000)
+#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x2000)
+#define U8500_MTU0_BASE (U8500_PER6_BASE + 0x6000) /* v1 */
+#define U8500_MTU1_BASE (U8500_PER6_BASE + 0x7000) /* v1 */
+#define U8500_CR_BASE (U8500_PER6_BASE + 0x8000) /* v1 */
+#define U8500_CRYPTO0_BASE (U8500_PER6_BASE + 0xa000)
+#define U8500_CRYPTO1_BASE (U8500_PER6_BASE + 0xb000)
+#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000)
+
+/* per5 base addressess */
+#define U8500_USBOTG_BASE (U8500_PER5_BASE + 0x00000)
+#define U8500_CLKRST5_BASE (U8500_PER5_BASE + 0x1f000)
+
+/* per4 base addressess */
+#define U8500_BACKUPRAM0_BASE (U8500_PER4_BASE + 0x00000)
+#define U8500_BACKUPRAM1_BASE (U8500_PER4_BASE + 0x01000)
+#define U8500_RTT0_BASE (U8500_PER4_BASE + 0x02000)
+#define U8500_RTT1_BASE (U8500_PER4_BASE + 0x03000)
+#define U8500_RTC_BASE (U8500_PER4_BASE + 0x04000)
+#define U8500_SCR_BASE (U8500_PER4_BASE + 0x05000)
+#define U8500_DMC_BASE (U8500_PER4_BASE + 0x06000)
+#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000)
+#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x0f000)
+
+/* per3 base addresses */
+#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000)
+#define U8500_SSP0_BASE (U8500_PER3_BASE + 0x2000)
+#define U8500_SSP1_BASE (U8500_PER3_BASE + 0x3000)
+#define U8500_I2C0_BASE (U8500_PER3_BASE + 0x4000)
+#define U8500_SDI2_BASE (U8500_PER3_BASE + 0x5000)
+#define U8500_SKE_BASE (U8500_PER3_BASE + 0x6000)
+#define U8500_UART2_BASE (U8500_PER3_BASE + 0x7000)
+#define U8500_SDI5_BASE (U8500_PER3_BASE + 0x8000)
+#define U8500_CLKRST3_BASE (U8500_PER3_BASE + 0xf000)
+
+/* per2 base addressess */
+#define U8500_I2C3_BASE (U8500_PER2_BASE + 0x0000)
+#define U8500_SPI2_BASE (U8500_PER2_BASE + 0x1000)
+#define U8500_SPI1_BASE (U8500_PER2_BASE + 0x2000)
+#define U8500_PWL_BASE (U8500_PER2_BASE + 0x3000)
+#define U8500_SDI4_BASE (U8500_PER2_BASE + 0x4000)
+#define U8500_MSP2_BASE (U8500_PER2_BASE + 0x7000)
+#define U8500_SDI1_BASE (U8500_PER2_BASE + 0x8000)
+#define U8500_SDI3_BASE (U8500_PER2_BASE + 0x9000)
+#define U8500_SPI0_BASE (U8500_PER2_BASE + 0xa000)
+#define U8500_HSIR_BASE (U8500_PER2_BASE + 0xb000)
+#define U8500_HSIT_BASE (U8500_PER2_BASE + 0xc000)
+#define U8500_CLKRST2_BASE (U8500_PER2_BASE + 0xf000)
+
+/* per1 base addresses */
+#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000)
+#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000)
+#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000)
+#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000)
+#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000)
+#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000)
+#define U8500_I2C4_BASE (U8500_PER1_BASE + 0xa000)
+#define U8500_SLIM0_BASE (U8500_PER1_BASE + 0xb000)
+#define U8500_CLKRST1_BASE (U8500_PER1_BASE + 0xf000)
+
+#define U8500_SHRM_GOP_INTERRUPT_BASE 0xB7C00040
+
+#define U8500_GPIOBANK0_BASE U8500_GPIO0_BASE
+#define U8500_GPIOBANK1_BASE (U8500_GPIO0_BASE + 0x80)
+#define U8500_GPIOBANK2_BASE U8500_GPIO1_BASE
+#define U8500_GPIOBANK3_BASE (U8500_GPIO1_BASE + 0x80)
+#define U8500_GPIOBANK4_BASE (U8500_GPIO1_BASE + 0x100)
+#define U8500_GPIOBANK5_BASE (U8500_GPIO1_BASE + 0x180)
+#define U8500_GPIOBANK6_BASE U8500_GPIO2_BASE
+#define U8500_GPIOBANK7_BASE (U8500_GPIO2_BASE + 0x80)
+#define U8500_GPIOBANK8_BASE U8500_GPIO3_BASE
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/debug-macro.S b/arch/arm/mach-ux500/include/mach/debug-macro.S
index 09cbfda8aee5..c5203b7ea552 100644
--- a/arch/arm/mach-ux500/include/mach/debug-macro.S
+++ b/arch/arm/mach-ux500/include/mach/debug-macro.S
@@ -10,11 +10,19 @@
*/
#include <mach/hardware.h>
+#if CONFIG_UX500_DEBUG_UART > 2
+#error Invalid Ux500 debug UART
+#endif
+
+#define __UX500_UART(n) UX500_UART##n##_BASE
+#define UX500_UART(n) __UX500_UART(n)
+#define UART_BASE UX500_UART(CONFIG_UX500_DEBUG_UART)
+
.macro addruart, rx, tmp
mrc p15, 0, \rx, c1, c0
tst \rx, #1 @ MMU enabled?
- ldreq \rx, =U8500_UART2_BASE @ no, physical address
- ldrne \rx, =IO_ADDRESS(U8500_UART2_BASE) @ yes, virtual address
+ ldreq \rx, =UART_BASE @ no, physical address
+ ldrne \rx, =IO_ADDRESS(UART_BASE) @ yes, virtual address
.endm
#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
new file mode 100644
index 000000000000..c2b2f2574947
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __ASM_ARCH_DEVICES_H__
+#define __ASM_ARCH_DEVICES_H__
+
+struct platform_device;
+struct amba_device;
+
+extern struct platform_device u5500_gpio_devs[];
+extern struct platform_device u8500_gpio_devs[];
+
+extern struct amba_device ux500_pl031_device;
+extern struct amba_device u8500_ssp0_device;
+extern struct amba_device ux500_uart0_device;
+extern struct amba_device ux500_uart1_device;
+extern struct amba_device ux500_uart2_device;
+
+extern struct platform_device ux500_i2c1_device;
+extern struct platform_device ux500_i2c2_device;
+extern struct platform_device ux500_i2c3_device;
+
+extern struct platform_device u8500_i2c0_device;
+extern struct platform_device u8500_i2c4_device;
+extern struct platform_device u8500_dma40_device;
+
+void dma40_u8500ed_fixup(void);
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/entry-macro.S b/arch/arm/mach-ux500/include/mach/entry-macro.S
index eece3301fef7..60ea88db8283 100644
--- a/arch/arm/mach-ux500/include/mach/entry-macro.S
+++ b/arch/arm/mach-ux500/include/mach/entry-macro.S
@@ -17,7 +17,7 @@
.endm
.macro get_irqnr_preamble, base, tmp
- ldr \base, =IO_ADDRESS(U8500_GIC_CPU_BASE)
+ ldr \base, =IO_ADDRESS(UX500_GIC_CPU_BASE)
.endm
.macro arch_ret_to_user, tmp1, tmp2
diff --git a/arch/arm/mach-ux500/include/mach/gpio.h b/arch/arm/mach-ux500/include/mach/gpio.h
new file mode 100644
index 000000000000..d548a622e7d2
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/gpio.h
@@ -0,0 +1,50 @@
+#ifndef __ASM_ARCH_GPIO_H
+#define __ASM_ARCH_GPIO_H
+
+/*
+ * 288 (#267 is the highest one actually hooked up) onchip GPIOs, plus enough
+ * room for a couple of GPIO expanders.
+ */
+#define ARCH_NR_GPIOS 350
+
+#include <plat/gpio.h>
+
+#define __GPIO_RESOURCE(soc, block) \
+ { \
+ .start = soc##_GPIOBANK##block##_BASE, \
+ .end = soc##_GPIOBANK##block##_BASE + 127, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = IRQ_GPIO##block, \
+ .end = IRQ_GPIO##block, \
+ .flags = IORESOURCE_IRQ, \
+ }
+
+#define __GPIO_DEVICE(soc, block) \
+ { \
+ .name = "gpio", \
+ .id = block, \
+ .num_resources = 2, \
+ .resource = &soc##_gpio_resources[block * 2], \
+ .dev = { \
+ .platform_data = &soc##_gpio_data[block], \
+ }, \
+ }
+
+#define GPIO_DATA(_name, first) \
+ { \
+ .name = _name, \
+ .first_gpio = first, \
+ .first_irq = NOMADIK_GPIO_TO_IRQ(first), \
+ }
+
+#ifdef CONFIG_UX500_SOC_DB8500
+#define GPIO_RESOURCE(block) __GPIO_RESOURCE(U8500, block)
+#define GPIO_DEVICE(block) __GPIO_DEVICE(u8500, block)
+#elif defined(CONFIG_UX500_SOC_DB5500)
+#define GPIO_RESOURCE(block) __GPIO_RESOURCE(U5500, block)
+#define GPIO_DEVICE(block) __GPIO_DEVICE(u5500, block)
+#endif
+
+#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 04ea836969b3..8656379a8309 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -23,109 +23,106 @@
/* typesafe io address */
#define __io_address(n) __io(IO_ADDRESS(n))
+/* used by some plat-nomadik code */
+#define io_p2v(n) __io_address(n)
-/*
- * Base address definitions for U8500 Onchip IPs. All the
- * peripherals are contained in a single 1 Mbyte region, with
- * AHB peripherals at the bottom and APB peripherals at the
- * top of the region. PER stands for PERIPHERAL region which
- * itself divided into sub regions.
- */
-#define U8500_PER3_BASE 0x80000000
-#define U8500_PER2_BASE 0x80110000
-#define U8500_PER1_BASE 0x80120000
-#define U8500_PER4_BASE 0x80150000
-
-#define U8500_PER6_BASE 0xa03c0000
-#define U8500_PER5_BASE 0xa03e0000
-#define U8500_PER7_BASE 0xa03d0000
-
-#define U8500_SVA_BASE 0xa0100000
-#define U8500_SIA_BASE 0xa0200000
-
-#define U8500_SGA_BASE 0xa0300000
-#define U8500_MCDE_BASE 0xa0350000
-#define U8500_DMA_BASE 0xa0362000
-
-#define U8500_SCU_BASE 0xa0410000
-#define U8500_GIC_CPU_BASE 0xa0410100
-#define U8500_TWD_BASE 0xa0410600
-#define U8500_GIC_DIST_BASE 0xa0411000
-#define U8500_L2CC_BASE 0xa0412000
-
-#define U8500_TWD_SIZE 0x100
-
-/* per7 base addressess */
-#define U8500_CR_BASE (U8500_PER7_BASE + 0x8000)
-#define U8500_MTU0_BASE (U8500_PER7_BASE + 0xa000)
-#define U8500_MTU1_BASE (U8500_PER7_BASE + 0xb000)
-#define U8500_TZPC0_BASE (U8500_PER7_BASE + 0xc000)
-#define U8500_CLKRST7_BASE (U8500_PER7_BASE + 0xf000)
-
-/* per6 base addressess */
-#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000)
-#define U8500_PKA_BASE (U8500_PER6_BASE + 0x1000)
-#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x2000)
-#define U8500_CRYPTO0_BASE (U8500_PER6_BASE + 0xa000)
-#define U8500_CRYPTO1_BASE (U8500_PER6_BASE + 0xb000)
-#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000)
-
-/* per5 base addressess */
-#define U8500_USBOTG_BASE (U8500_PER5_BASE + 0x00000)
-#define U8500_GPIO5_BASE (U8500_PER5_BASE + 0x1e000)
-#define U8500_CLKRST5_BASE (U8500_PER5_BASE + 0x1f000)
-
-/* per4 base addressess */
-#define U8500_BACKUPRAM0_BASE (U8500_PER4_BASE + 0x0000)
-#define U8500_BACKUPRAM1_BASE (U8500_PER4_BASE + 0x1000)
-#define U8500_RTT0_BASE (U8500_PER4_BASE + 0x2000)
-#define U8500_RTT1_BASE (U8500_PER4_BASE + 0x3000)
-#define U8500_RTC_BASE (U8500_PER4_BASE + 0x4000)
-#define U8500_SCR_BASE (U8500_PER4_BASE + 0x5000)
-#define U8500_DMC_BASE (U8500_PER4_BASE + 0x6000)
-#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x7000)
-
-/* per3 base addressess */
-#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000)
-#define U8500_SSP0_BASE (U8500_PER3_BASE + 0x2000)
-#define U8500_SSP1_BASE (U8500_PER3_BASE + 0x3000)
-#define U8500_I2C0_BASE (U8500_PER3_BASE + 0x4000)
-#define U8500_SDI2_BASE (U8500_PER3_BASE + 0x5000)
-#define U8500_SKE_BASE (U8500_PER3_BASE + 0x6000)
-#define U8500_UART2_BASE (U8500_PER3_BASE + 0x7000)
-#define U8500_SDI5_BASE (U8500_PER3_BASE + 0x8000)
-#define U8500_GPIO3_BASE (U8500_PER3_BASE + 0xe000)
-#define U8500_CLKRST3_BASE (U8500_PER3_BASE + 0xf000)
-
-/* per2 base addressess */
-#define U8500_I2C3_BASE (U8500_PER2_BASE + 0x0000)
-#define U8500_SPI2_BASE (U8500_PER2_BASE + 0x1000)
-#define U8500_SPI1_BASE (U8500_PER2_BASE + 0x2000)
-#define U8500_PWL_BASE (U8500_PER2_BASE + 0x3000)
-#define U8500_SDI4_BASE (U8500_PER2_BASE + 0x4000)
-#define U8500_MSP2_BASE (U8500_PER2_BASE + 0x7000)
-#define U8500_SDI1_BASE (U8500_PER2_BASE + 0x8000)
-#define U8500_SDI3_BASE (U8500_PER2_BASE + 0x9000)
-#define U8500_SPI0_BASE (U8500_PER2_BASE + 0xa000)
-#define U8500_HSIR_BASE (U8500_PER2_BASE + 0xb000)
-#define U8500_HSIT_BASE (U8500_PER2_BASE + 0xc000)
-#define U8500_GPIO2_BASE (U8500_PER2_BASE + 0xe000)
-#define U8500_CLKRST2_BASE (U8500_PER2_BASE + 0xf000)
-
-/* per1 base addresses */
-#define U8500_UART0_BASE (U8500_PER1_BASE + 0x0000)
-#define U8500_UART1_BASE (U8500_PER1_BASE + 0x1000)
-#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000)
-#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000)
-#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000)
-#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000)
-#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000)
-#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000)
-#define U8500_SLIM0_BASE (U8500_PER1_BASE + 0xa000)
-#define U8500_GPIO1_BASE (U8500_PER1_BASE + 0xe000)
-#define U8500_CLKRST1_BASE (U8500_PER1_BASE + 0xf000)
+#include <mach/db8500-regs.h>
+#include <mach/db5500-regs.h>
+
+#ifdef CONFIG_UX500_SOC_DB8500
+#define UX500(periph) U8500_##periph##_BASE
+#elif defined(CONFIG_UX500_SOC_DB5500)
+#define UX500(periph) U5500_##periph##_BASE
+#endif
+
+#define UX500_BACKUPRAM0_BASE UX500(BACKUPRAM0)
+#define UX500_BACKUPRAM1_BASE UX500(BACKUPRAM1)
+#define UX500_B2R2_BASE UX500(B2R2)
+
+#define UX500_CLKRST1_BASE UX500(CLKRST1)
+#define UX500_CLKRST2_BASE UX500(CLKRST2)
+#define UX500_CLKRST3_BASE UX500(CLKRST3)
+#define UX500_CLKRST5_BASE UX500(CLKRST5)
+#define UX500_CLKRST6_BASE UX500(CLKRST6)
+
+#define UX500_DMA_BASE UX500(DMA)
+#define UX500_FSMC_BASE UX500(FSMC)
+
+#define UX500_GIC_CPU_BASE UX500(GIC_CPU)
+#define UX500_GIC_DIST_BASE UX500(GIC_DIST)
+
+#define UX500_I2C1_BASE UX500(I2C1)
+#define UX500_I2C2_BASE UX500(I2C2)
+#define UX500_I2C3_BASE UX500(I2C3)
+
+#define UX500_L2CC_BASE UX500(L2CC)
+#define UX500_MCDE_BASE UX500(MCDE)
+#define UX500_MTU0_BASE UX500(MTU0)
+#define UX500_MTU1_BASE UX500(MTU1)
+#define UX500_PRCMU_BASE UX500(PRCMU)
+
+#define UX500_RNG_BASE UX500(RNG)
+#define UX500_RTC_BASE UX500(RTC)
+
+#define UX500_SCU_BASE UX500(SCU)
+
+#define UX500_SDI0_BASE UX500(SDI0)
+#define UX500_SDI1_BASE UX500(SDI1)
+#define UX500_SDI2_BASE UX500(SDI2)
+#define UX500_SDI3_BASE UX500(SDI3)
+#define UX500_SDI4_BASE UX500(SDI4)
+
+#define UX500_SPI0_BASE UX500(SPI0)
+#define UX500_SPI1_BASE UX500(SPI1)
+#define UX500_SPI2_BASE UX500(SPI2)
+#define UX500_SPI3_BASE UX500(SPI3)
+
+#define UX500_SIA_BASE UX500(SIA)
+#define UX500_SVA_BASE UX500(SVA)
+
+#define UX500_TWD_BASE UX500(TWD)
+
+#define UX500_UART0_BASE UX500(UART0)
+#define UX500_UART1_BASE UX500(UART1)
+#define UX500_UART2_BASE UX500(UART2)
+
+#define UX500_USBOTG_BASE UX500(USBOTG)
/* ST-Ericsson modified pl022 id */
#define SSP_PER_ID 0x01080022
+#ifndef __ASSEMBLY__
+
+#include <asm/cputype.h>
+
+static inline bool cpu_is_u8500(void)
+{
+#ifdef CONFIG_UX500_SOC_DB8500
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static inline bool cpu_is_u8500ed(void)
+{
+ return cpu_is_u8500() && (read_cpuid_id() & 15) == 0;
+}
+
+static inline bool cpu_is_u8500v1(void)
+{
+ return cpu_is_u8500() && (read_cpuid_id() & 15) == 1;
+}
+
+static inline bool cpu_is_u5500(void)
+{
+#ifdef CONFIG_UX500_SOC_DB5500
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+#endif
+
#endif /* __MACH_HARDWARE_H */
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index 394b5dd2200f..7970684b1d09 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -42,6 +42,7 @@
#define IRQ_AB4500 (IRQ_SHPI_START + 40)
#define IRQ_DISP (IRQ_SHPI_START + 48)
#define IRQ_SiPI3 (IRQ_SHPI_START + 49)
+#define IRQ_I2C4 (IRQ_SHPI_START + 51)
#define IRQ_SSP1 (IRQ_SHPI_START + 52)
#define IRQ_I2C2 (IRQ_SHPI_START + 55)
#define IRQ_SDMMC0 (IRQ_SHPI_START + 60)
@@ -66,6 +67,12 @@
/* There are 128 shared peripheral interrupts assigned to
* INTID[160:32]. The first 32 interrupts are reserved.
*/
-#define NR_IRQS 161
+#define U8500_SOC_NR_IRQS 161
+
+/* After chip-specific IRQ numbers we have the GPIO ones */
+#define NOMADIK_NR_GPIO 288
+#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + U8500_SOC_NR_IRQS)
+#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - U8500_SOC_NR_IRQS)
+#define NR_IRQS NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
#endif /*ASM_ARCH_IRQS_H*/
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index cf0ce1687f24..e978dbd9e210 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -14,10 +14,28 @@
#include <asm/mach/time.h>
#include <linux/init.h>
-extern void u8500_map_io(void);
-extern void u8500_init_devices(void);
-extern void u8500_init_irq(void);
+extern void __init ux500_map_io(void);
+extern void __init u5500_map_io(void);
+extern void __init u8500_map_io(void);
+
+extern void __init ux500_init_devices(void);
+extern void __init u5500_init_devices(void);
+extern void __init u8500_init_devices(void);
+
+extern void __init ux500_init_irq(void);
/* We re-use nomadik_timer for this platform */
extern void nmdk_timer_init(void);
+extern void __init amba_add_devices(struct amba_device *devs[], int num);
+
+struct sys_timer;
+extern struct sys_timer ux500_timer;
+
+#define __IO_DEV_DESC(x, sz) { \
+ .virtual = IO_ADDRESS(x), \
+ .pfn = __phys_to_pfn(x), \
+ .length = sz, \
+ .type = MT_DEVICE, \
+}
+
#endif /* __ASM_ARCH_SETUP_H */
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 8dfe7ca245d8..438ef16aec90 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -30,7 +30,7 @@ volatile int __cpuinitdata pen_release = -1;
static unsigned int __init get_core_count(void)
{
- return scu_get_core_count(__io_address(U8500_SCU_BASE));
+ return scu_get_core_count(__io_address(UX500_SCU_BASE));
}
static DEFINE_SPINLOCK(boot_lock);
@@ -44,7 +44,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
- gic_cpu_init(0, __io_address(U8500_GIC_CPU_BASE));
+ gic_cpu_init(0, __io_address(UX500_GIC_CPU_BASE));
/*
* let the primary processor know we're out of the
@@ -75,7 +75,8 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* that it has been released by resetting pen_release.
*/
pen_release = cpu;
- flush_cache_all();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@@ -105,12 +106,12 @@ static void __init wakeup_secondary(void)
*/
#define U8500_CPU1_JUMPADDR_OFFSET 0x1FF4
__raw_writel(virt_to_phys(u8500_secondary_startup),
- (void __iomem *)IO_ADDRESS(U8500_BACKUPRAM0_BASE) +
+ __io_address(UX500_BACKUPRAM0_BASE) +
U8500_CPU1_JUMPADDR_OFFSET);
#define U8500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
__raw_writel(0xA1FEED01,
- (void __iomem *)IO_ADDRESS(U8500_BACKUPRAM0_BASE) +
+ __io_address(UX500_BACKUPRAM0_BASE) +
U8500_CPU1_WAKEMAGIC_OFFSET);
/* make sure write buffer is drained */
@@ -171,7 +172,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* boot CPU, but only if we have more than one CPU.
*/
percpu_timer_setup();
- scu_enable(__io_address(U8500_SCU_BASE));
+ scu_enable(__io_address(UX500_SCU_BASE));
wakeup_secondary();
}
}
diff --git a/arch/arm/mach-ux500/ste-dma40-db8500.h b/arch/arm/mach-ux500/ste-dma40-db8500.h
new file mode 100644
index 000000000000..e7016278dfa9
--- /dev/null
+++ b/arch/arm/mach-ux500/ste-dma40-db8500.h
@@ -0,0 +1,154 @@
+/*
+ * arch/arm/mach-ux500/ste_dma40_db8500.h
+ * DB8500-SoC-specific configuration for DMA40
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#ifndef STE_DMA40_DB8500_H
+#define STE_DMA40_DB8500_H
+
+#define STEDMA40_NR_DEV 64
+
+enum dma_src_dev_type {
+ STEDMA40_DEV_SPI0_RX = 0,
+ STEDMA40_DEV_SD_MMC0_RX = 1,
+ STEDMA40_DEV_SD_MMC1_RX = 2,
+ STEDMA40_DEV_SD_MMC2_RX = 3,
+ STEDMA40_DEV_I2C1_RX = 4,
+ STEDMA40_DEV_I2C3_RX = 5,
+ STEDMA40_DEV_I2C2_RX = 6,
+ STEDMA40_DEV_I2C4_RX = 7, /* Only on V1 */
+ STEDMA40_DEV_SSP0_RX = 8,
+ STEDMA40_DEV_SSP1_RX = 9,
+ STEDMA40_DEV_MCDE_RX = 10,
+ STEDMA40_DEV_UART2_RX = 11,
+ STEDMA40_DEV_UART1_RX = 12,
+ STEDMA40_DEV_UART0_RX = 13,
+ STEDMA40_DEV_MSP2_RX = 14,
+ STEDMA40_DEV_I2C0_RX = 15,
+ STEDMA40_DEV_USB_OTG_IEP_8 = 16,
+ STEDMA40_DEV_USB_OTG_IEP_1_9 = 17,
+ STEDMA40_DEV_USB_OTG_IEP_2_10 = 18,
+ STEDMA40_DEV_USB_OTG_IEP_3_11 = 19,
+ STEDMA40_DEV_SLIM0_CH0_RX_HSI_RX_CH0 = 20,
+ STEDMA40_DEV_SLIM0_CH1_RX_HSI_RX_CH1 = 21,
+ STEDMA40_DEV_SLIM0_CH2_RX_HSI_RX_CH2 = 22,
+ STEDMA40_DEV_SLIM0_CH3_RX_HSI_RX_CH3 = 23,
+ STEDMA40_DEV_SRC_SXA0_RX_TX = 24,
+ STEDMA40_DEV_SRC_SXA1_RX_TX = 25,
+ STEDMA40_DEV_SRC_SXA2_RX_TX = 26,
+ STEDMA40_DEV_SRC_SXA3_RX_TX = 27,
+ STEDMA40_DEV_SD_MM2_RX = 28,
+ STEDMA40_DEV_SD_MM0_RX = 29,
+ STEDMA40_DEV_MSP1_RX = 30,
+ /*
+ * This channel is either SlimBus or MSP,
+ * never both at the same time.
+ */
+ STEDMA40_SLIM0_CH0_RX = 31,
+ STEDMA40_DEV_MSP0_RX = 31,
+ STEDMA40_DEV_SD_MM1_RX = 32,
+ STEDMA40_DEV_SPI2_RX = 33,
+ STEDMA40_DEV_I2C3_RX2 = 34,
+ STEDMA40_DEV_SPI1_RX = 35,
+ STEDMA40_DEV_USB_OTG_IEP_4_12 = 36,
+ STEDMA40_DEV_USB_OTG_IEP_5_13 = 37,
+ STEDMA40_DEV_USB_OTG_IEP_6_14 = 38,
+ STEDMA40_DEV_USB_OTG_IEP_7_15 = 39,
+ STEDMA40_DEV_SPI3_RX = 40,
+ STEDMA40_DEV_SD_MM3_RX = 41,
+ STEDMA40_DEV_SD_MM4_RX = 42,
+ STEDMA40_DEV_SD_MM5_RX = 43,
+ STEDMA40_DEV_SRC_SXA4_RX_TX = 44,
+ STEDMA40_DEV_SRC_SXA5_RX_TX = 45,
+ STEDMA40_DEV_SRC_SXA6_RX_TX = 46,
+ STEDMA40_DEV_SRC_SXA7_RX_TX = 47,
+ STEDMA40_DEV_CAC1_RX = 48,
+ /* RX channels 49 and 50 are unused */
+ STEDMA40_DEV_MSHC_RX = 51,
+ STEDMA40_DEV_SLIM1_CH0_RX_HSI_RX_CH4 = 52,
+ STEDMA40_DEV_SLIM1_CH1_RX_HSI_RX_CH5 = 53,
+ STEDMA40_DEV_SLIM1_CH2_RX_HSI_RX_CH6 = 54,
+ STEDMA40_DEV_SLIM1_CH3_RX_HSI_RX_CH7 = 55,
+ /* RX channels 56 thru 60 are unused */
+ STEDMA40_DEV_CAC0_RX = 61,
+ /* RX channels 62 and 63 are unused */
+};
+
+enum dma_dest_dev_type {
+ STEDMA40_DEV_SPI0_TX = 0,
+ STEDMA40_DEV_SD_MMC0_TX = 1,
+ STEDMA40_DEV_SD_MMC1_TX = 2,
+ STEDMA40_DEV_SD_MMC2_TX = 3,
+ STEDMA40_DEV_I2C1_TX = 4,
+ STEDMA40_DEV_I2C3_TX = 5,
+ STEDMA40_DEV_I2C2_TX = 6,
+ STEDMA50_DEV_I2C4_TX = 7, /* Only on V1 */
+ STEDMA40_DEV_SSP0_TX = 8,
+ STEDMA40_DEV_SSP1_TX = 9,
+ /* TX channel 10 is unused */
+ STEDMA40_DEV_UART2_TX = 11,
+ STEDMA40_DEV_UART1_TX = 12,
+ STEDMA40_DEV_UART0_TX= 13,
+ STEDMA40_DEV_MSP2_TX = 14,
+ STEDMA40_DEV_I2C0_TX = 15,
+ STEDMA40_DEV_USB_OTG_OEP_8 = 16,
+ STEDMA40_DEV_USB_OTG_OEP_1_9 = 17,
+ STEDMA40_DEV_USB_OTG_OEP_2_10= 18,
+ STEDMA40_DEV_USB_OTG_OEP_3_11 = 19,
+ STEDMA40_DEV_SLIM0_CH0_TX_HSI_TX_CH0 = 20,
+ STEDMA40_DEV_SLIM0_CH1_TX_HSI_TX_CH1 = 21,
+ STEDMA40_DEV_SLIM0_CH2_TX_HSI_TX_CH2 = 22,
+ STEDMA40_DEV_SLIM0_CH3_TX_HSI_TX_CH3 = 23,
+ STEDMA40_DEV_DST_SXA0_RX_TX = 24,
+ STEDMA40_DEV_DST_SXA1_RX_TX = 25,
+ STEDMA40_DEV_DST_SXA2_RX_TX = 26,
+ STEDMA40_DEV_DST_SXA3_RX_TX = 27,
+ STEDMA40_DEV_SD_MM2_TX = 28,
+ STEDMA40_DEV_SD_MM0_TX = 29,
+ STEDMA40_DEV_MSP1_TX = 30,
+ /*
+ * This channel is either SlimBus or MSP,
+ * never both at the same time.
+ */
+ STEDMA40_SLIM0_CH0_TX = 31,
+ STEDMA40_DEV_MSP0_TX = 31,
+ STEDMA40_DEV_SD_MM1_TX = 32,
+ STEDMA40_DEV_SPI2_TX = 33,
+ /* Secondary I2C3 channel */
+ STEDMA40_DEV_I2C3_TX2 = 34,
+ STEDMA40_DEV_SPI1_TX = 35,
+ STEDMA40_DEV_USB_OTG_OEP_4_12 = 36,
+ STEDMA40_DEV_USB_OTG_OEP_5_13 = 37,
+ STEDMA40_DEV_USB_OTG_OEP_6_14 = 38,
+ STEDMA40_DEV_USB_OTG_OEP_7_15 = 39,
+ STEDMA40_DEV_SPI3_TX = 40,
+ STEDMA40_DEV_SD_MM3_TX = 41,
+ STEDMA40_DEV_SD_MM4_TX = 42,
+ STEDMA40_DEV_SD_MM5_TX = 43,
+ STEDMA40_DEV_DST_SXA4_RX_TX = 44,
+ STEDMA40_DEV_DST_SXA5_RX_TX = 45,
+ STEDMA40_DEV_DST_SXA6_RX_TX = 46,
+ STEDMA40_DEV_DST_SXA7_RX_TX = 47,
+ STEDMA40_DEV_CAC1_TX = 48,
+ STEDMA40_DEV_CAC1_TX_HAC1_TX = 49,
+ STEDMA40_DEV_HAC1_TX = 50,
+ STEDMA40_MEMXCPY_TX_0 = 51,
+ STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52,
+ STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53,
+ STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54,
+ STEDMA40_DEV_SLIM1_CH3_TX_HSI_TX_CH7 = 55,
+ STEDMA40_MEMCPY_TX_1 = 56,
+ STEDMA40_MEMCPY_TX_2 = 57,
+ STEDMA40_MEMCPY_TX_3 = 58,
+ STEDMA40_MEMCPY_TX_4 = 59,
+ STEDMA40_MEMCPY_TX_5 = 60,
+ STEDMA40_DEV_CAC0_TX = 61,
+ STEDMA40_DEV_CAC0_TX_HAC0_TX = 62,
+ STEDMA40_DEV_HAC0_TX = 63,
+};
+
+#endif
diff --git a/arch/arm/mach-versatile/Makefile b/arch/arm/mach-versatile/Makefile
index ba81e70ed813..97cf4d831b0c 100644
--- a/arch/arm/mach-versatile/Makefile
+++ b/arch/arm/mach-versatile/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y := core.o clock.o
+obj-y := core.o
obj-$(CONFIG_ARCH_VERSATILE_PB) += versatile_pb.o
obj-$(CONFIG_MACH_VERSATILE_AB) += versatile_ab.o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c
deleted file mode 100644
index c50a44ea7ee6..000000000000
--- a/arch/arm/mach-versatile/clock.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * linux/arch/arm/mach-versatile/clock.c
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-
-#include <asm/clkdev.h>
-#include <asm/hardware/icst307.h>
-
-#include "clock.h"
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- struct icst307_vco vco;
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- return icst307_khz(clk->params, vco) * 1000;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- int ret = -EIO;
-
- if (clk->setvco) {
- struct icst307_vco vco;
-
- vco = icst307_khz_to_vco(clk->params, rate / 1000);
- clk->rate = icst307_khz(clk->params, vco) * 1000;
- clk->setvco(clk, vco);
- ret = 0;
- }
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-versatile/clock.h b/arch/arm/mach-versatile/clock.h
deleted file mode 100644
index 03468fdc3e58..000000000000
--- a/arch/arm/mach-versatile/clock.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * linux/arch/arm/mach-versatile/clock.h
- *
- * Copyright (C) 2004 ARM Limited.
- * Written by Deep Blue Solutions Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-struct module;
-struct icst307_params;
-
-struct clk {
- unsigned long rate;
- const struct icst307_params *params;
- u32 oscoff;
- void *data;
- void (*setvco)(struct clk *, struct icst307_vco vco);
-};
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 3b1a4ee01815..3dff8641b03f 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -28,19 +28,15 @@
#include <linux/amba/clcd.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/cnt32_to_63.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <asm/clkdev.h>
#include <asm/system.h>
-#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/hardware/arm_timer.h>
-#include <asm/hardware/icst307.h>
+#include <asm/hardware/icst.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
@@ -49,9 +45,12 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
+#include <mach/clkdev.h>
+#include <mach/hardware.h>
+#include <mach/platform.h>
+#include <plat/timer-sp.h>
#include "core.h"
-#include "clock.h"
/*
* All IO addresses are mapped onto VA 0xFFFx.xxxx, where x.xxxx
@@ -59,7 +58,6 @@
*
* Setup a VA for the Versatile Vectored Interrupt Controller.
*/
-#define __io_address(n) __io(IO_ADDRESS(n))
#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
@@ -229,27 +227,6 @@ void __init versatile_map_io(void)
iotable_init(versatile_io_desc, ARRAY_SIZE(versatile_io_desc));
}
-#define VERSATILE_REFCOUNTER (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET)
-
-/*
- * This is the Versatile sched_clock implementation. This has
- * a resolution of 41.7ns, and a maximum value of about 35583 days.
- *
- * The return value is guaranteed to be monotonic in that range as
- * long as there is always less than 89 seconds between successive
- * calls to this function.
- */
-unsigned long long sched_clock(void)
-{
- unsigned long long v = cnt32_to_63(readl(VERSATILE_REFCOUNTER));
-
- /* the <<1 gets rid of the cnt_32_to_63 top bit saving on a bic insn */
- v *= 125<<1;
- do_div(v, 3<<1);
-
- return v;
-}
-
#define VERSATILE_FLASHCTRL (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_FLASH_OFFSET)
@@ -380,33 +357,40 @@ static struct mmci_platform_data mmc0_plat_data = {
/*
* Clock handling
*/
-static const struct icst307_params versatile_oscvco_params = {
- .ref = 24000,
- .vco_max = 200000,
+static const struct icst_params versatile_oscvco_params = {
+ .ref = 24000000,
+ .vco_max = ICST307_VCO_MAX,
+ .vco_min = ICST307_VCO_MIN,
.vd_min = 4 + 8,
.vd_max = 511 + 8,
.rd_min = 1 + 2,
.rd_max = 127 + 2,
+ .s2div = icst307_s2div,
+ .idx2s = icst307_idx2s,
};
-static void versatile_oscvco_set(struct clk *clk, struct icst307_vco vco)
+static void versatile_oscvco_set(struct clk *clk, struct icst_vco vco)
{
- void __iomem *sys = __io_address(VERSATILE_SYS_BASE);
- void __iomem *sys_lock = sys + VERSATILE_SYS_LOCK_OFFSET;
+ void __iomem *sys_lock = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LOCK_OFFSET;
u32 val;
- val = readl(sys + clk->oscoff) & ~0x7ffff;
+ val = readl(clk->vcoreg) & ~0x7ffff;
val |= vco.v | (vco.r << 9) | (vco.s << 16);
writel(0xa05f, sys_lock);
- writel(val, sys + clk->oscoff);
+ writel(val, clk->vcoreg);
writel(0, sys_lock);
}
+static const struct clk_ops osc4_clk_ops = {
+ .round = icst_clk_round,
+ .set = icst_clk_set,
+ .setvco = versatile_oscvco_set,
+};
+
static struct clk osc4_clk = {
+ .ops = &osc4_clk_ops,
.params = &versatile_oscvco_params,
- .oscoff = VERSATILE_SYS_OSCCLCD_OFFSET,
- .setvco = versatile_oscvco_set,
};
/*
@@ -852,6 +836,8 @@ void __init versatile_init(void)
{
int i;
+ osc4_clk.vcoreg = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSCCLCD_OFFSET;
+
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
platform_device_register(&versatile_flash_device);
@@ -875,120 +861,6 @@ void __init versatile_init(void)
#define TIMER1_VA_BASE (__io_address(VERSATILE_TIMER0_1_BASE) + 0x20)
#define TIMER2_VA_BASE __io_address(VERSATILE_TIMER2_3_BASE)
#define TIMER3_VA_BASE (__io_address(VERSATILE_TIMER2_3_BASE) + 0x20)
-#define VA_IC_BASE __io_address(VERSATILE_VIC_BASE)
-
-/*
- * How long is the timer interval?
- */
-#define TIMER_INTERVAL (TICKS_PER_uSEC * mSEC_10)
-#if TIMER_INTERVAL >= 0x100000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 8)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV256)
-#define TICKS2USECS(x) (256 * (x) / TICKS_PER_uSEC)
-#elif TIMER_INTERVAL >= 0x10000
-#define TIMER_RELOAD (TIMER_INTERVAL >> 4) /* Divide by 16 */
-#define TIMER_DIVISOR (TIMER_CTRL_DIV16)
-#define TICKS2USECS(x) (16 * (x) / TICKS_PER_uSEC)
-#else
-#define TIMER_RELOAD (TIMER_INTERVAL)
-#define TIMER_DIVISOR (TIMER_CTRL_DIV1)
-#define TICKS2USECS(x) ((x) / TICKS_PER_uSEC)
-#endif
-
-static void timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
-{
- unsigned long ctrl;
-
- switch(mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- writel(TIMER_RELOAD, TIMER0_VA_BASE + TIMER_LOAD);
-
- ctrl = TIMER_CTRL_PERIODIC;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE | TIMER_CTRL_ENABLE;
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- /* period set, and timer enabled in 'next_event' hook */
- ctrl = TIMER_CTRL_ONESHOT;
- ctrl |= TIMER_CTRL_32BIT | TIMER_CTRL_IE;
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- default:
- ctrl = 0;
- }
-
- writel(ctrl, TIMER0_VA_BASE + TIMER_CTRL);
-}
-
-static int timer_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long ctrl = readl(TIMER0_VA_BASE + TIMER_CTRL);
-
- writel(evt, TIMER0_VA_BASE + TIMER_LOAD);
- writel(ctrl | TIMER_CTRL_ENABLE, TIMER0_VA_BASE + TIMER_CTRL);
-
- return 0;
-}
-
-static struct clock_event_device timer0_clockevent = {
- .name = "timer0",
- .shift = 32,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = timer_set_mode,
- .set_next_event = timer_set_next_event,
-};
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t versatile_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &timer0_clockevent;
-
- writel(1, TIMER0_VA_BASE + TIMER_INTCLR);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction versatile_timer_irq = {
- .name = "Versatile Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = versatile_timer_interrupt,
-};
-
-static cycle_t versatile_get_cycles(struct clocksource *cs)
-{
- return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
-}
-
-static struct clocksource clocksource_versatile = {
- .name = "timer3",
- .rating = 200,
- .read = versatile_get_cycles,
- .mask = CLOCKSOURCE_MASK(32),
- .shift = 20,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init versatile_clocksource_init(void)
-{
- /* setup timer3 as free-running clocksource */
- writel(0, TIMER3_VA_BASE + TIMER_CTRL);
- writel(0xffffffff, TIMER3_VA_BASE + TIMER_LOAD);
- writel(0xffffffff, TIMER3_VA_BASE + TIMER_VALUE);
- writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
- TIMER3_VA_BASE + TIMER_CTRL);
-
- clocksource_versatile.mult =
- clocksource_khz2mult(1000, clocksource_versatile.shift);
- clocksource_register(&clocksource_versatile);
-
- return 0;
-}
/*
* Set up timer interrupt, and return the current time in seconds.
@@ -1017,22 +889,8 @@ static void __init versatile_timer_init(void)
writel(0, TIMER2_VA_BASE + TIMER_CTRL);
writel(0, TIMER3_VA_BASE + TIMER_CTRL);
- /*
- * Make irqs happen for the system timer
- */
- setup_irq(IRQ_TIMERINT0_1, &versatile_timer_irq);
-
- versatile_clocksource_init();
-
- timer0_clockevent.mult =
- div_sc(1000000, NSEC_PER_SEC, timer0_clockevent.shift);
- timer0_clockevent.max_delta_ns =
- clockevent_delta2ns(0xffffffff, &timer0_clockevent);
- timer0_clockevent.min_delta_ns =
- clockevent_delta2ns(0xf, &timer0_clockevent);
-
- timer0_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&timer0_clockevent);
+ sp804_clocksource_init(TIMER3_VA_BASE);
+ sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMERINT0_1);
}
struct sys_timer versatile_timer = {
diff --git a/arch/arm/mach-versatile/include/mach/clkdev.h b/arch/arm/mach-versatile/include/mach/clkdev.h
index 04b37a89801c..e58d0771b64e 100644
--- a/arch/arm/mach-versatile/include/mach/clkdev.h
+++ b/arch/arm/mach-versatile/include/mach/clkdev.h
@@ -1,6 +1,15 @@
#ifndef __ASM_MACH_CLKDEV_H
#define __ASM_MACH_CLKDEV_H
+#include <plat/clock.h>
+
+struct clk {
+ unsigned long rate;
+ const struct clk_ops *ops;
+ const struct icst_params *params;
+ void __iomem *vcoreg;
+};
+
#define __clk_get(clk) ({ 1; })
#define __clk_put(clk) do { } while (0)
diff --git a/arch/arm/mach-versatile/include/mach/entry-macro.S b/arch/arm/mach-versatile/include/mach/entry-macro.S
index 8c8020980585..e6f7c1663160 100644
--- a/arch/arm/mach-versatile/include/mach/entry-macro.S
+++ b/arch/arm/mach-versatile/include/mach/entry-macro.S
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
+#include <mach/platform.h>
#include <asm/hardware/vic.h>
.macro disable_fiq
diff --git a/arch/arm/mach-versatile/include/mach/hardware.h b/arch/arm/mach-versatile/include/mach/hardware.h
index 7aa906c93154..4f8f99aac938 100644
--- a/arch/arm/mach-versatile/include/mach/hardware.h
+++ b/arch/arm/mach-versatile/include/mach/hardware.h
@@ -23,7 +23,6 @@
#define __ASM_ARCH_HARDWARE_H
#include <asm/sizes.h>
-#include <mach/platform.h>
/*
* PCI space virtual addresses
@@ -49,4 +48,6 @@
/* macro to get at IO space when running virtually */
#define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
+#define __io_address(n) __io(IO_ADDRESS(n))
+
#endif
diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
index 83207395191a..ec087407b163 100644
--- a/arch/arm/mach-versatile/include/mach/platform.h
+++ b/arch/arm/mach-versatile/include/mach/platform.h
@@ -205,7 +205,7 @@
#define VERSATILE_CLCD_BASE 0x10120000 /* CLCD */
#define VERSATILE_DMAC_BASE 0x10130000 /* DMA controller */
#define VERSATILE_VIC_BASE 0x10140000 /* Vectored interrupt controller */
-#define VERSATILE_PERIPH_BASE 0x10150000 /* off-chip peripherals alias from */
+#define VERSATILE_PERIPH_BASE 0x10150000 /* off-chip peripherals alias from */
/* 0x10000000 - 0x100FFFFF */
#define VERSATILE_AHBM_BASE 0x101D0000 /* AHB monitor */
#define VERSATILE_SCTL_BASE 0x101E0000 /* System controller */
@@ -213,7 +213,7 @@
#define VERSATILE_TIMER0_1_BASE 0x101E2000 /* Timer 0 and 1 */
#define VERSATILE_TIMER2_3_BASE 0x101E3000 /* Timer 2 and 3 */
#define VERSATILE_GPIO0_BASE 0x101E4000 /* GPIO port 0 */
-#define VERSATILE_GPIO1_BASE 0x101E5000 /* GPIO port 1 */
+#define VERSATILE_GPIO1_BASE 0x101E5000 /* GPIO port 1 */
#define VERSATILE_GPIO2_BASE 0x101E6000 /* GPIO port 2 */
#define VERSATILE_GPIO3_BASE 0x101E7000 /* GPIO port 3 */
#define VERSATILE_RTC_BASE 0x101E8000 /* Real Time Clock */
@@ -379,12 +379,6 @@
#define SIC_INT_PCI3 30
-/*
- * Clean base - dummy
- *
- */
-#define CLEAN_BASE VERSATILE_BOOT_ROM_HI
-
/*
* System controller bit assignment
*/
@@ -397,20 +391,6 @@
#define VERSATILE_TIMER4_EnSel 21
-#define MAX_TIMER 2
-#define MAX_PERIOD 699050
-#define TICKS_PER_uSEC 1
-
-/*
- * These are useconds NOT ticks.
- *
- */
-#define mSEC_1 1000
-#define mSEC_5 (mSEC_1 * 5)
-#define mSEC_10 (mSEC_1 * 10)
-#define mSEC_25 (mSEC_1 * 25)
-#define SEC_1 (mSEC_1 * 1000)
-
#define VERSATILE_CSR_BASE 0x10000000
#define VERSATILE_CSR_SIZE 0x10000000
@@ -432,5 +412,3 @@
#endif
#endif
-
-/* END */
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
new file mode 100644
index 000000000000..3f19b660a165
--- /dev/null
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -0,0 +1,9 @@
+menu "Versatile Express platform type"
+ depends on ARCH_VEXPRESS
+
+config ARCH_VEXPRESS_CA9X4
+ bool "Versatile Express Cortex-A9x4 tile"
+ select CPU_V7
+ select ARM_GIC
+
+endmenu
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
new file mode 100644
index 000000000000..1b71b77ade22
--- /dev/null
+++ b/arch/arm/mach-vexpress/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := v2m.o
+obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
diff --git a/arch/arm/mach-vexpress/Makefile.boot b/arch/arm/mach-vexpress/Makefile.boot
new file mode 100644
index 000000000000..07c2d9c457ec
--- /dev/null
+++ b/arch/arm/mach-vexpress/Makefile.boot
@@ -0,0 +1,3 @@
+ zreladdr-y := 0x60008000
+params_phys-y := 0x60000100
+initrd_phys-y := 0x60800000
diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h
new file mode 100644
index 000000000000..57dd95ce41f9
--- /dev/null
+++ b/arch/arm/mach-vexpress/core.h
@@ -0,0 +1,26 @@
+#define __MMIO_P2V(x) (((x) & 0xfffff) | (((x) & 0x0f000000) >> 4) | 0xf8000000)
+#define MMIO_P2V(x) ((void __iomem *)__MMIO_P2V(x))
+
+#define AMBA_DEVICE(name,busid,base,plat) \
+struct amba_device name##_device = { \
+ .dev = { \
+ .coherent_dma_mask = ~0UL, \
+ .init_name = busid, \
+ .platform_data = plat, \
+ }, \
+ .res = { \
+ .start = base, \
+ .end = base + SZ_4K - 1, \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ .dma_mask = ~0UL, \
+ .irq = IRQ_##base, \
+ /* .dma = DMA_##base,*/ \
+}
+
+struct map_desc;
+
+void v2m_map_io(struct map_desc *tile, size_t num);
+extern struct sys_timer v2m_timer;
+
+extern void __iomem *gic_cpu_base_addr;
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
new file mode 100644
index 000000000000..e6f73030d5f0
--- /dev/null
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -0,0 +1,249 @@
+/*
+ * Versatile Express Core Tile Cortex A9x4 Support
+ */
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
+
+#include <asm/clkdev.h>
+#include <asm/hardware/arm_timer.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach-types.h>
+#include <asm/pmu.h>
+
+#include <mach/clkdev.h>
+#include <mach/ct-ca9x4.h>
+
+#include <plat/timer-sp.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+
+#include "core.h"
+
+#include <mach/motherboard.h>
+
+#define V2M_PA_CS7 0x10000000
+
+static struct map_desc ct_ca9x4_io_desc[] __initdata = {
+ {
+ .virtual = __MMIO_P2V(CT_CA9X4_MPIC),
+ .pfn = __phys_to_pfn(CT_CA9X4_MPIC),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = __MMIO_P2V(CT_CA9X4_SP804_TIMER),
+ .pfn = __phys_to_pfn(CT_CA9X4_SP804_TIMER),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = __MMIO_P2V(CT_CA9X4_L2CC),
+ .pfn = __phys_to_pfn(CT_CA9X4_L2CC),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init ct_ca9x4_map_io(void)
+{
+ v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
+}
+
+void __iomem *gic_cpu_base_addr;
+
+static void __init ct_ca9x4_init_irq(void)
+{
+ gic_cpu_base_addr = MMIO_P2V(A9_MPCORE_GIC_CPU);
+ gic_dist_init(0, MMIO_P2V(A9_MPCORE_GIC_DIST), 29);
+ gic_cpu_init(0, gic_cpu_base_addr);
+}
+
+#if 0
+static void ct_ca9x4_timer_init(void)
+{
+ writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
+ writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);
+
+ sp804_clocksource_init(MMIO_P2V(CT_CA9X4_TIMER1));
+ sp804_clockevents_init(MMIO_P2V(CT_CA9X4_TIMER0), IRQ_CT_CA9X4_TIMER0);
+}
+
+static struct sys_timer ct_ca9x4_timer = {
+ .init = ct_ca9x4_timer_init,
+};
+#endif
+
+static struct clcd_panel xvga_panel = {
+ .mode = {
+ .name = "XVGA",
+ .refresh = 60,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 15384,
+ .left_margin = 168,
+ .right_margin = 8,
+ .upper_margin = 29,
+ .lower_margin = 3,
+ .hsync_len = 144,
+ .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .bpp = 16,
+};
+
+static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
+{
+ v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0);
+ v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2);
+}
+
+static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
+{
+ unsigned long framesize = 1024 * 768 * 2;
+ dma_addr_t dma;
+
+ fb->panel = &xvga_panel;
+
+ fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
+ &dma, GFP_KERNEL);
+ if (!fb->fb.screen_base) {
+ printk(KERN_ERR "CLCD: unable to map frame buffer\n");
+ return -ENOMEM;
+ }
+ fb->fb.fix.smem_start = dma;
+ fb->fb.fix.smem_len = framesize;
+
+ return 0;
+}
+
+static int ct_ca9x4_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
+}
+
+static void ct_ca9x4_clcd_remove(struct clcd_fb *fb)
+{
+ dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
+ fb->fb.screen_base, fb->fb.fix.smem_start);
+}
+
+static struct clcd_board ct_ca9x4_clcd_data = {
+ .name = "CT-CA9X4",
+ .check = clcdfb_check,
+ .decode = clcdfb_decode,
+ .enable = ct_ca9x4_clcd_enable,
+ .setup = ct_ca9x4_clcd_setup,
+ .mmap = ct_ca9x4_clcd_mmap,
+ .remove = ct_ca9x4_clcd_remove,
+};
+
+static AMBA_DEVICE(clcd, "ct:clcd", CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
+static AMBA_DEVICE(dmc, "ct:dmc", CT_CA9X4_DMC, NULL);
+static AMBA_DEVICE(smc, "ct:smc", CT_CA9X4_SMC, NULL);
+static AMBA_DEVICE(gpio, "ct:gpio", CT_CA9X4_GPIO, NULL);
+
+static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
+ &clcd_device,
+ &dmc_device,
+ &smc_device,
+ &gpio_device,
+};
+
+
+static long ct_round(struct clk *clk, unsigned long rate)
+{
+ return rate;
+}
+
+static int ct_set(struct clk *clk, unsigned long rate)
+{
+ return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate);
+}
+
+static const struct clk_ops osc1_clk_ops = {
+ .round = ct_round,
+ .set = ct_set,
+};
+
+static struct clk osc1_clk = {
+ .ops = &osc1_clk_ops,
+ .rate = 24000000,
+};
+
+static struct clk_lookup lookups[] = {
+ { /* CLCD */
+ .dev_id = "ct:clcd",
+ .clk = &osc1_clk,
+ },
+};
+
+static struct resource pmu_resources[] = {
+ [0] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU0,
+ .end = IRQ_CT_CA9X4_PMU_CPU0,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU1,
+ .end = IRQ_CT_CA9X4_PMU_CPU1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU2,
+ .end = IRQ_CT_CA9X4_PMU_CPU2,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = IRQ_CT_CA9X4_PMU_CPU3,
+ .end = IRQ_CT_CA9X4_PMU_CPU3,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .num_resources = ARRAY_SIZE(pmu_resources),
+ .resource = pmu_resources,
+};
+
+static void ct_ca9x4_init(void)
+{
+ int i;
+
+#ifdef CONFIG_CACHE_L2X0
+ l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
+#endif
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
+ amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
+
+ platform_device_register(&pmu_device);
+}
+
+MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4")
+ .phys_io = V2M_UART0,
+ .io_pg_offst = (__MMIO_P2V(V2M_UART0) >> 18) & 0xfffc,
+ .boot_params = PHYS_OFFSET + 0x00000100,
+ .map_io = ct_ca9x4_map_io,
+ .init_irq = ct_ca9x4_init_irq,
+#if 0
+ .timer = &ct_ca9x4_timer,
+#else
+ .timer = &v2m_timer,
+#endif
+ .init_machine = ct_ca9x4_init,
+MACHINE_END
diff --git a/arch/arm/mach-vexpress/headsmp.S b/arch/arm/mach-vexpress/headsmp.S
new file mode 100644
index 000000000000..8a78ff68e1ee
--- /dev/null
+++ b/arch/arm/mach-vexpress/headsmp.S
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/arm/mach-vexpress/headsmp.S
+ *
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __INIT
+
+/*
+ * Versatile Express specific entry point for secondary CPUs. This
+ * provides a "holding pen" into which all secondary cores are held
+ * until we're ready for them to initialise.
+ */
+ENTRY(vexpress_secondary_startup)
+ mrc p15, 0, r0, c0, c0, 5
+ and r0, r0, #15
+ adr r4, 1f
+ ldmia r4, {r5, r6}
+ sub r4, r4, r5
+ add r6, r6, r4
+pen: ldr r7, [r6]
+ cmp r7, r0
+ bne pen
+
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+
+1: .long .
+ .long pen_release
diff --git a/arch/arm/mach-vexpress/include/mach/clkdev.h b/arch/arm/mach-vexpress/include/mach/clkdev.h
new file mode 100644
index 000000000000..3f8307d73cad
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/clkdev.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_MACH_CLKDEV_H
+#define __ASM_MACH_CLKDEV_H
+
+#include <plat/clock.h>
+
+struct clk {
+ const struct clk_ops *ops;
+ unsigned long rate;
+ const struct icst_params *params;
+};
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h b/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h
new file mode 100644
index 000000000000..8650f04136ef
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/ct-ca9x4.h
@@ -0,0 +1,47 @@
+#ifndef __MACH_CT_CA9X4_H
+#define __MACH_CT_CA9X4_H
+
+/*
+ * Physical base addresses
+ */
+#define CT_CA9X4_CLCDC (0x10020000)
+#define CT_CA9X4_AXIRAM (0x10060000)
+#define CT_CA9X4_DMC (0x100e0000)
+#define CT_CA9X4_SMC (0x100e1000)
+#define CT_CA9X4_SCC (0x100e2000)
+#define CT_CA9X4_SP804_TIMER (0x100e4000)
+#define CT_CA9X4_SP805_WDT (0x100e5000)
+#define CT_CA9X4_TZPC (0x100e6000)
+#define CT_CA9X4_GPIO (0x100e8000)
+#define CT_CA9X4_FASTAXI (0x100e9000)
+#define CT_CA9X4_SLOWAXI (0x100ea000)
+#define CT_CA9X4_TZASC (0x100ec000)
+#define CT_CA9X4_CORESIGHT (0x10200000)
+#define CT_CA9X4_MPIC (0x1e000000)
+#define CT_CA9X4_SYSTIMER (0x1e004000)
+#define CT_CA9X4_SYSWDT (0x1e007000)
+#define CT_CA9X4_L2CC (0x1e00a000)
+
+#define CT_CA9X4_TIMER0 (CT_CA9X4_SP804_TIMER + 0x000)
+#define CT_CA9X4_TIMER1 (CT_CA9X4_SP804_TIMER + 0x020)
+
+#define A9_MPCORE_SCU (CT_CA9X4_MPIC + 0x0000)
+#define A9_MPCORE_GIC_CPU (CT_CA9X4_MPIC + 0x0100)
+#define A9_MPCORE_GIT (CT_CA9X4_MPIC + 0x0200)
+#define A9_MPCORE_GIC_DIST (CT_CA9X4_MPIC + 0x1000)
+
+/*
+ * Interrupts. Those in {} are for AMBA devices
+ */
+#define IRQ_CT_CA9X4_CLCDC { 76 }
+#define IRQ_CT_CA9X4_DMC { -1 }
+#define IRQ_CT_CA9X4_SMC { 77, 78 }
+#define IRQ_CT_CA9X4_TIMER0 80
+#define IRQ_CT_CA9X4_TIMER1 81
+#define IRQ_CT_CA9X4_GPIO { 82 }
+#define IRQ_CT_CA9X4_PMU_CPU0 92
+#define IRQ_CT_CA9X4_PMU_CPU1 93
+#define IRQ_CT_CA9X4_PMU_CPU2 94
+#define IRQ_CT_CA9X4_PMU_CPU3 95
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/debug-macro.S b/arch/arm/mach-vexpress/include/mach/debug-macro.S
new file mode 100644
index 000000000000..5167e2aceeba
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/debug-macro.S
@@ -0,0 +1,23 @@
+/* arch/arm/mach-realview/include/mach/debug-macro.S
+ *
+ * Debugging macro include header
+ *
+ * Copyright (C) 1994-1999 Russell King
+ * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define DEBUG_LL_UART_OFFSET 0x00009000
+
+ .macro addruart,rx,tmp
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x10000000
+ movne \rx, #0xf8000000 @ virtual base
+ orr \rx, \rx, #DEBUG_LL_UART_OFFSET
+ .endm
+
+#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-vexpress/include/mach/entry-macro.S b/arch/arm/mach-vexpress/include/mach/entry-macro.S
new file mode 100644
index 000000000000..20e9fb514f0a
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/entry-macro.S
@@ -0,0 +1,67 @@
+#include <asm/hardware/gic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =gic_cpu_base_addr
+ ldr \base, [\base]
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ /*
+ * The interrupt numbering scheme is defined in the
+ * interrupt controller spec. To wit:
+ *
+ * Interrupts 0-15 are IPI
+ * 16-28 are reserved
+ * 29-31 are local. We allow 30 to be used for the watchdog.
+ * 32-1020 are global
+ * 1021-1022 are reserved
+ * 1023 is "spurious" (no interrupt)
+ *
+ * For now, we ignore all local interrupts so only return an interrupt if it's
+ * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
+ *
+ * A simple read from the controller will tell us the number of the highest
+ * priority enabled interrupt. We then just need to check whether it is in the
+ * valid range for an IRQ (30-1020 inclusive).
+ */
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
+ ldr \tmp, =1021
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #29
+ cmpcc \irqnr, \irqnr
+ cmpne \irqnr, \tmp
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* We assume that irqstat (the raw value of the IRQ acknowledge
+ * register) is preserved from the macro above.
+ * If there is an IPI, we immediately signal end of interrupt on the
+ * controller, since this requires the original irqstat value which
+ * we won't easily be able to recreate later.
+ */
+
+ .macro test_for_ipi, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ cmp \irqnr, #16
+ strcc \irqstat, [\base, #GIC_CPU_EOI]
+ cmpcs \irqnr, \irqnr
+ .endm
+
+ /* As above, this assumes that irqstat and base are preserved.. */
+
+ .macro test_for_ltirq, irqnr, irqstat, base, tmp
+ bic \irqnr, \irqstat, #0x1c00
+ mov \tmp, #0
+ cmp \irqnr, #29
+ moveq \tmp, #1
+ streq \irqstat, [\base, #GIC_CPU_EOI]
+ cmp \tmp, #0
+ .endm
+
diff --git a/arch/arm/mach-vexpress/include/mach/hardware.h b/arch/arm/mach-vexpress/include/mach/hardware.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/hardware.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/arm/mach-vexpress/include/mach/io.h b/arch/arm/mach-vexpress/include/mach/io.h
new file mode 100644
index 000000000000..748bb524ee71
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/io.h
@@ -0,0 +1,28 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/io.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/irqs.h b/arch/arm/mach-vexpress/include/mach/irqs.h
new file mode 100644
index 000000000000..7054cbfc9de5
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/irqs.h
@@ -0,0 +1,4 @@
+#define IRQ_LOCALTIMER 29
+#define IRQ_LOCALWDOG 30
+
+#define NR_IRQS 128
diff --git a/arch/arm/mach-vexpress/include/mach/memory.h b/arch/arm/mach-vexpress/include/mach/memory.h
new file mode 100644
index 000000000000..be28232ae639
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/memory.h
@@ -0,0 +1,25 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/memory.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_MEMORY_H
+#define __ASM_ARCH_MEMORY_H
+
+#define PHYS_OFFSET UL(0x60000000)
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/motherboard.h b/arch/arm/mach-vexpress/include/mach/motherboard.h
new file mode 100644
index 000000000000..98a8ded055bf
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/motherboard.h
@@ -0,0 +1,121 @@
+#ifndef __MACH_MOTHERBOARD_H
+#define __MACH_MOTHERBOARD_H
+
+/*
+ * Physical addresses, offset from V2M_PA_CS0-3
+ */
+#define V2M_NOR0 (V2M_PA_CS0)
+#define V2M_NOR1 (V2M_PA_CS1)
+#define V2M_SRAM (V2M_PA_CS2)
+#define V2M_VIDEO_SRAM (V2M_PA_CS3 + 0x00000000)
+#define V2M_LAN9118 (V2M_PA_CS3 + 0x02000000)
+#define V2M_ISP1761 (V2M_PA_CS3 + 0x03000000)
+
+/*
+ * Physical addresses, offset from V2M_PA_CS7
+ */
+#define V2M_SYSREGS (V2M_PA_CS7 + 0x00000000)
+#define V2M_SYSCTL (V2M_PA_CS7 + 0x00001000)
+#define V2M_SERIAL_BUS_PCI (V2M_PA_CS7 + 0x00002000)
+
+#define V2M_AACI (V2M_PA_CS7 + 0x00004000)
+#define V2M_MMCI (V2M_PA_CS7 + 0x00005000)
+#define V2M_KMI0 (V2M_PA_CS7 + 0x00006000)
+#define V2M_KMI1 (V2M_PA_CS7 + 0x00007000)
+
+#define V2M_UART0 (V2M_PA_CS7 + 0x00009000)
+#define V2M_UART1 (V2M_PA_CS7 + 0x0000a000)
+#define V2M_UART2 (V2M_PA_CS7 + 0x0000b000)
+#define V2M_UART3 (V2M_PA_CS7 + 0x0000c000)
+
+#define V2M_WDT (V2M_PA_CS7 + 0x0000f000)
+
+#define V2M_TIMER01 (V2M_PA_CS7 + 0x00011000)
+#define V2M_TIMER23 (V2M_PA_CS7 + 0x00012000)
+
+#define V2M_SERIAL_BUS_DVI (V2M_PA_CS7 + 0x00016000)
+#define V2M_RTC (V2M_PA_CS7 + 0x00017000)
+
+#define V2M_CF (V2M_PA_CS7 + 0x0001a000)
+#define V2M_CLCD (V2M_PA_CS7 + 0x0001f000)
+
+#define V2M_SYS_ID (V2M_SYSREGS + 0x000)
+#define V2M_SYS_SW (V2M_SYSREGS + 0x004)
+#define V2M_SYS_LED (V2M_SYSREGS + 0x008)
+#define V2M_SYS_100HZ (V2M_SYSREGS + 0x024)
+#define V2M_SYS_FLAGS (V2M_SYSREGS + 0x030)
+#define V2M_SYS_FLAGSSET (V2M_SYSREGS + 0x030)
+#define V2M_SYS_FLAGSCLR (V2M_SYSREGS + 0x034)
+#define V2M_SYS_NVFLAGS (V2M_SYSREGS + 0x038)
+#define V2M_SYS_NVFLAGSSET (V2M_SYSREGS + 0x038)
+#define V2M_SYS_NVFLAGSCLR (V2M_SYSREGS + 0x03c)
+#define V2M_SYS_MCI (V2M_SYSREGS + 0x048)
+#define V2M_SYS_FLASH (V2M_SYSREGS + 0x03c)
+#define V2M_SYS_CFGSW (V2M_SYSREGS + 0x058)
+#define V2M_SYS_24MHZ (V2M_SYSREGS + 0x05c)
+#define V2M_SYS_MISC (V2M_SYSREGS + 0x060)
+#define V2M_SYS_DMA (V2M_SYSREGS + 0x064)
+#define V2M_SYS_PROCID0 (V2M_SYSREGS + 0x084)
+#define V2M_SYS_PROCID1 (V2M_SYSREGS + 0x088)
+#define V2M_SYS_CFGDATA (V2M_SYSREGS + 0x0a0)
+#define V2M_SYS_CFGCTRL (V2M_SYSREGS + 0x0a4)
+#define V2M_SYS_CFGSTAT (V2M_SYSREGS + 0x0a8)
+
+#define V2M_TIMER0 (V2M_TIMER01 + 0x000)
+#define V2M_TIMER1 (V2M_TIMER01 + 0x020)
+
+#define V2M_TIMER2 (V2M_TIMER23 + 0x000)
+#define V2M_TIMER3 (V2M_TIMER23 + 0x020)
+
+
+/*
+ * Interrupts. Those in {} are for AMBA devices
+ */
+#define IRQ_V2M_WDT { (32 + 0) }
+#define IRQ_V2M_TIMER0 (32 + 2)
+#define IRQ_V2M_TIMER1 (32 + 2)
+#define IRQ_V2M_TIMER2 (32 + 3)
+#define IRQ_V2M_TIMER3 (32 + 3)
+#define IRQ_V2M_RTC { (32 + 4) }
+#define IRQ_V2M_UART0 { (32 + 5) }
+#define IRQ_V2M_UART1 { (32 + 6) }
+#define IRQ_V2M_UART2 { (32 + 7) }
+#define IRQ_V2M_UART3 { (32 + 8) }
+#define IRQ_V2M_MMCI { (32 + 9), (32 + 10) }
+#define IRQ_V2M_AACI { (32 + 11) }
+#define IRQ_V2M_KMI0 { (32 + 12) }
+#define IRQ_V2M_KMI1 { (32 + 13) }
+#define IRQ_V2M_CLCD { (32 + 14) }
+#define IRQ_V2M_LAN9118 (32 + 15)
+#define IRQ_V2M_ISP1761 (32 + 16)
+#define IRQ_V2M_PCIE (32 + 17)
+
+
+/*
+ * Configuration
+ */
+#define SYS_CFG_START (1 << 31)
+#define SYS_CFG_WRITE (1 << 30)
+#define SYS_CFG_OSC (1 << 20)
+#define SYS_CFG_VOLT (2 << 20)
+#define SYS_CFG_AMP (3 << 20)
+#define SYS_CFG_TEMP (4 << 20)
+#define SYS_CFG_RESET (5 << 20)
+#define SYS_CFG_SCC (6 << 20)
+#define SYS_CFG_MUXFPGA (7 << 20)
+#define SYS_CFG_SHUTDOWN (8 << 20)
+#define SYS_CFG_REBOOT (9 << 20)
+#define SYS_CFG_DVIMODE (11 << 20)
+#define SYS_CFG_POWER (12 << 20)
+#define SYS_CFG_SITE_MB (0 << 16)
+#define SYS_CFG_SITE_DB1 (1 << 16)
+#define SYS_CFG_SITE_DB2 (2 << 16)
+#define SYS_CFG_STACK(n) ((n) << 12)
+
+#define SYS_CFG_ERR (1 << 1)
+#define SYS_CFG_COMPLETE (1 << 0)
+
+int v2m_cfg_write(u32 devfn, u32 data);
+int v2m_cfg_read(u32 devfn, u32 *data);
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/smp.h b/arch/arm/mach-vexpress/include/mach/smp.h
new file mode 100644
index 000000000000..72a9621ed087
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/smp.h
@@ -0,0 +1,21 @@
+#ifndef __MACH_SMP_H
+#define __MACH_SMP_H
+
+#include <asm/hardware/gic.h>
+
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__("mrc p15, 0, %0, c0, c0, 5" \
+ : "=r" (cpunum)); \
+ cpunum &= 0x0F; \
+ })
+
+/*
+ * We use IRQ1 as the IPI
+ */
+static inline void smp_cross_call(const struct cpumask *mask)
+{
+ gic_raise_softirq(mask, 1);
+}
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/system.h b/arch/arm/mach-vexpress/include/mach/system.h
new file mode 100644
index 000000000000..899a4e628a4c
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/system.h
@@ -0,0 +1,37 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/system.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARCH_SYSTEM_H
+#define __ASM_ARCH_SYSTEM_H
+
+static inline void arch_idle(void)
+{
+ /*
+ * This should do all the clock switching
+ * and wait for interrupt tricks
+ */
+ cpu_do_idle();
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+}
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/timex.h b/arch/arm/mach-vexpress/include/mach/timex.h
new file mode 100644
index 000000000000..00029bacd43c
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/timex.h
@@ -0,0 +1,23 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/timex.h
+ *
+ * RealView architecture timex specifications
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-vexpress/include/mach/uncompress.h b/arch/arm/mach-vexpress/include/mach/uncompress.h
new file mode 100644
index 000000000000..7972c5748d0e
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/uncompress.h
@@ -0,0 +1,52 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/uncompress.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))
+#define AMBA_UART_LCRH(base) (*(volatile unsigned char *)((base) + 0x2c))
+#define AMBA_UART_CR(base) (*(volatile unsigned char *)((base) + 0x30))
+#define AMBA_UART_FR(base) (*(volatile unsigned char *)((base) + 0x18))
+
+#define get_uart_base() (0x10000000 + 0x00009000)
+
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 5))
+ barrier();
+
+ AMBA_UART_DR(base) = c;
+}
+
+static inline void flush(void)
+{
+ unsigned long base = get_uart_base();
+
+ while (AMBA_UART_FR(base) & (1 << 3))
+ barrier();
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-vexpress/include/mach/vmalloc.h b/arch/arm/mach-vexpress/include/mach/vmalloc.h
new file mode 100644
index 000000000000..f43a36ef678b
--- /dev/null
+++ b/arch/arm/mach-vexpress/include/mach/vmalloc.h
@@ -0,0 +1,21 @@
+/*
+ * arch/arm/mach-vexpress/include/mach/vmalloc.h
+ *
+ * Copyright (C) 2003 ARM Limited
+ * Copyright (C) 2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-vexpress/localtimer.c b/arch/arm/mach-vexpress/localtimer.c
new file mode 100644
index 000000000000..c0e3a59a0bfc
--- /dev/null
+++ b/arch/arm/mach-vexpress/localtimer.c
@@ -0,0 +1,26 @@
+/*
+ * linux/arch/arm/mach-vexpress/localtimer.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/clockchips.h>
+
+#include <asm/smp_twd.h>
+#include <asm/localtimer.h>
+#include <mach/irqs.h>
+
+/*
+ * Setup the local clock events for a CPU.
+ */
+void __cpuinit local_timer_setup(struct clock_event_device *evt)
+{
+ evt->irq = IRQ_LOCALTIMER;
+ twd_timer_setup(evt);
+}
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
new file mode 100644
index 000000000000..670970699ba9
--- /dev/null
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -0,0 +1,190 @@
+/*
+ * linux/arch/arm/mach-vexpress/platsmp.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/localtimer.h>
+#include <asm/smp_scu.h>
+#include <asm/unified.h>
+
+#include <mach/ct-ca9x4.h>
+#include <mach/motherboard.h>
+#define V2M_PA_CS7 0x10000000
+
+#include "core.h"
+
+extern void vexpress_secondary_startup(void);
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+
+static void __iomem *scu_base_addr(void)
+{
+ return MMIO_P2V(A9_MPCORE_SCU);
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ trace_hardirqs_off();
+
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_cpu_init(0, gic_cpu_base_addr);
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ pen_release = -1;
+ smp_wmb();
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+ * CPUs in the holding pen until we're ready for them. However,
+ * since we haven't sent them a soft interrupt, they shouldn't
+ * be there.
+ */
+ pen_release = cpu;
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+ * the boot monitor to read the system wide flags register,
+ * and branch to the address found there.
+ */
+ smp_cross_call(cpumask_of(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ void __iomem *scu_base = scu_base_addr();
+ unsigned int i, ncores;
+
+ ncores = scu_base ? scu_get_core_count(scu_base) : 1;
+
+ /* sanity check */
+ if (ncores == 0) {
+ printk(KERN_ERR
+ "vexpress: strange CM count of 0? Default to 1\n");
+
+ ncores = 1;
+ }
+
+ if (ncores > NR_CPUS) {
+ printk(KERN_WARNING
+ "vexpress: no. of cores (%d) greater than configured "
+ "maximum of %d - clipping\n",
+ ncores, NR_CPUS);
+ ncores = NR_CPUS;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned int ncores = num_possible_cpus();
+ unsigned int cpu = smp_processor_id();
+ int i;
+
+ smp_store_cpu_info(cpu);
+
+ /*
+ * are we trying to boot more cores than exist?
+ */
+ if (max_cpus > ncores)
+ max_cpus = ncores;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+
+ /*
+ * Initialise the SCU if there are more than one CPU and let
+ * them know where to start.
+ */
+ if (max_cpus > 1) {
+ /*
+ * Enable the local timer or broadcast device for the
+ * boot CPU, but only if we have more than one CPU.
+ */
+ percpu_timer_setup();
+
+ scu_enable(scu_base_addr());
+
+ /*
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
+ */
+ writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
+ writel(BSYM(virt_to_phys(vexpress_secondary_startup)),
+ MMIO_P2V(V2M_SYS_FLAGSSET));
+ }
+}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
new file mode 100644
index 000000000000..d250711b8c7a
--- /dev/null
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -0,0 +1,361 @@
+/*
+ * Versatile Express V2M Motherboard Support
+ */
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/mmci.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/smsc911x.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/usb/isp1760.h>
+
+#include <asm/clkdev.h>
+#include <asm/sizes.h>
+#include <asm/mach/flash.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/hardware/arm_timer.h>
+
+#include <mach/clkdev.h>
+#include <mach/motherboard.h>
+
+#include <plat/timer-sp.h>
+
+#include "core.h"
+
+#define V2M_PA_CS0 0x40000000
+#define V2M_PA_CS1 0x44000000
+#define V2M_PA_CS2 0x48000000
+#define V2M_PA_CS3 0x4c000000
+#define V2M_PA_CS7 0x10000000
+
+static struct map_desc v2m_io_desc[] __initdata = {
+ {
+ .virtual = __MMIO_P2V(V2M_PA_CS7),
+ .pfn = __phys_to_pfn(V2M_PA_CS7),
+ .length = SZ_128K,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init v2m_map_io(struct map_desc *tile, size_t num)
+{
+ iotable_init(v2m_io_desc, ARRAY_SIZE(v2m_io_desc));
+ iotable_init(tile, num);
+}
+
+
+static void v2m_timer_init(void)
+{
+ writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
+ writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
+
+ sp804_clocksource_init(MMIO_P2V(V2M_TIMER1));
+ sp804_clockevents_init(MMIO_P2V(V2M_TIMER0), IRQ_V2M_TIMER0);
+}
+
+struct sys_timer v2m_timer = {
+ .init = v2m_timer_init,
+};
+
+
+static DEFINE_SPINLOCK(v2m_cfg_lock);
+
+int v2m_cfg_write(u32 devfn, u32 data)
+{
+ /* Configuration interface broken? */
+ u32 val;
+
+ printk("%s: writing %08x to %08x\n", __func__, data, devfn);
+
+ devfn |= SYS_CFG_START | SYS_CFG_WRITE;
+
+ spin_lock(&v2m_cfg_lock);
+ val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
+ writel(val & ~SYS_CFG_COMPLETE, MMIO_P2V(V2M_SYS_CFGSTAT));
+
+ writel(data, MMIO_P2V(V2M_SYS_CFGDATA));
+ writel(devfn, MMIO_P2V(V2M_SYS_CFGCTRL));
+
+ do {
+ val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
+ } while (val == 0);
+ spin_unlock(&v2m_cfg_lock);
+
+ return !!(val & SYS_CFG_ERR);
+}
+
+int v2m_cfg_read(u32 devfn, u32 *data)
+{
+ u32 val;
+
+ devfn |= SYS_CFG_START;
+
+ spin_lock(&v2m_cfg_lock);
+ writel(0, MMIO_P2V(V2M_SYS_CFGSTAT));
+ writel(devfn, MMIO_P2V(V2M_SYS_CFGCTRL));
+
+ mb();
+
+ do {
+ cpu_relax();
+ val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
+ } while (val == 0);
+
+ *data = readl(MMIO_P2V(V2M_SYS_CFGDATA));
+ spin_unlock(&v2m_cfg_lock);
+
+ return !!(val & SYS_CFG_ERR);
+}
+
+
+static struct resource v2m_pcie_i2c_resource = {
+ .start = V2M_SERIAL_BUS_PCI,
+ .end = V2M_SERIAL_BUS_PCI + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device v2m_pcie_i2c_device = {
+ .name = "versatile-i2c",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &v2m_pcie_i2c_resource,
+};
+
+static struct resource v2m_ddc_i2c_resource = {
+ .start = V2M_SERIAL_BUS_DVI,
+ .end = V2M_SERIAL_BUS_DVI + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device v2m_ddc_i2c_device = {
+ .name = "versatile-i2c",
+ .id = 1,
+ .num_resources = 1,
+ .resource = &v2m_ddc_i2c_resource,
+};
+
+static struct resource v2m_eth_resources[] = {
+ {
+ .start = V2M_LAN9118,
+ .end = V2M_LAN9118 + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_V2M_LAN9118,
+ .end = IRQ_V2M_LAN9118,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct smsc911x_platform_config v2m_eth_config = {
+ .flags = SMSC911X_USE_32BIT,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH,
+ .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device v2m_eth_device = {
+ .name = "smsc911x",
+ .id = -1,
+ .resource = v2m_eth_resources,
+ .num_resources = ARRAY_SIZE(v2m_eth_resources),
+ .dev.platform_data = &v2m_eth_config,
+};
+
+static struct resource v2m_usb_resources[] = {
+ {
+ .start = V2M_ISP1761,
+ .end = V2M_ISP1761 + SZ_128K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_V2M_ISP1761,
+ .end = IRQ_V2M_ISP1761,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct isp1760_platform_data v2m_usb_config = {
+ .is_isp1761 = true,
+ .bus_width_16 = false,
+ .port1_otg = true,
+ .analog_oc = false,
+ .dack_polarity_high = false,
+ .dreq_polarity_high = false,
+};
+
+static struct platform_device v2m_usb_device = {
+ .name = "isp1760",
+ .id = -1,
+ .resource = v2m_usb_resources,
+ .num_resources = ARRAY_SIZE(v2m_usb_resources),
+ .dev.platform_data = &v2m_usb_config,
+};
+
+static int v2m_flash_init(void)
+{
+ writel(0, MMIO_P2V(V2M_SYS_FLASH));
+ return 0;
+}
+
+static void v2m_flash_exit(void)
+{
+ writel(0, MMIO_P2V(V2M_SYS_FLASH));
+}
+
+static void v2m_flash_set_vpp(int on)
+{
+ writel(on != 0, MMIO_P2V(V2M_SYS_FLASH));
+}
+
+static struct flash_platform_data v2m_flash_data = {
+ .map_name = "cfi_probe",
+ .width = 4,
+ .init = v2m_flash_init,
+ .exit = v2m_flash_exit,
+ .set_vpp = v2m_flash_set_vpp,
+};
+
+static struct resource v2m_flash_resources[] = {
+ {
+ .start = V2M_NOR0,
+ .end = V2M_NOR0 + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = V2M_NOR1,
+ .end = V2M_NOR1 + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device v2m_flash_device = {
+ .name = "armflash",
+ .id = -1,
+ .resource = v2m_flash_resources,
+ .num_resources = ARRAY_SIZE(v2m_flash_resources),
+ .dev.platform_data = &v2m_flash_data,
+};
+
+
+static unsigned int v2m_mmci_status(struct device *dev)
+{
+ return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0));
+}
+
+static struct mmci_platform_data v2m_mmci_data = {
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .status = v2m_mmci_status,
+};
+
+static AMBA_DEVICE(aaci, "mb:aaci", V2M_AACI, NULL);
+static AMBA_DEVICE(mmci, "mb:mmci", V2M_MMCI, &v2m_mmci_data);
+static AMBA_DEVICE(kmi0, "mb:kmi0", V2M_KMI0, NULL);
+static AMBA_DEVICE(kmi1, "mb:kmi1", V2M_KMI1, NULL);
+static AMBA_DEVICE(uart0, "mb:uart0", V2M_UART0, NULL);
+static AMBA_DEVICE(uart1, "mb:uart1", V2M_UART1, NULL);
+static AMBA_DEVICE(uart2, "mb:uart2", V2M_UART2, NULL);
+static AMBA_DEVICE(uart3, "mb:uart3", V2M_UART3, NULL);
+static AMBA_DEVICE(wdt, "mb:wdt", V2M_WDT, NULL);
+static AMBA_DEVICE(rtc, "mb:rtc", V2M_RTC, NULL);
+
+static struct amba_device *v2m_amba_devs[] __initdata = {
+ &aaci_device,
+ &mmci_device,
+ &kmi0_device,
+ &kmi1_device,
+ &uart0_device,
+ &uart1_device,
+ &uart2_device,
+ &uart3_device,
+ &wdt_device,
+ &rtc_device,
+};
+
+
+static long v2m_osc_round(struct clk *clk, unsigned long rate)
+{
+ return rate;
+}
+
+static int v2m_osc1_set(struct clk *clk, unsigned long rate)
+{
+ return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_MB | 1, rate);
+}
+
+static const struct clk_ops osc1_clk_ops = {
+ .round = v2m_osc_round,
+ .set = v2m_osc1_set,
+};
+
+static struct clk osc1_clk = {
+ .ops = &osc1_clk_ops,
+ .rate = 24000000,
+};
+
+static struct clk osc2_clk = {
+ .rate = 24000000,
+};
+
+static struct clk_lookup v2m_lookups[] = {
+ { /* UART0 */
+ .dev_id = "mb:uart0",
+ .clk = &osc2_clk,
+ }, { /* UART1 */
+ .dev_id = "mb:uart1",
+ .clk = &osc2_clk,
+ }, { /* UART2 */
+ .dev_id = "mb:uart2",
+ .clk = &osc2_clk,
+ }, { /* UART3 */
+ .dev_id = "mb:uart3",
+ .clk = &osc2_clk,
+ }, { /* KMI0 */
+ .dev_id = "mb:kmi0",
+ .clk = &osc2_clk,
+ }, { /* KMI1 */
+ .dev_id = "mb:kmi1",
+ .clk = &osc2_clk,
+ }, { /* MMC0 */
+ .dev_id = "mb:mmci",
+ .clk = &osc2_clk,
+ }, { /* CLCD */
+ .dev_id = "mb:clcd",
+ .clk = &osc1_clk,
+ },
+};
+
+static void v2m_power_off(void)
+{
+ if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
+ printk(KERN_EMERG "Unable to shutdown\n");
+}
+
+static void v2m_restart(char str, const char *cmd)
+{
+ if (v2m_cfg_write(SYS_CFG_REBOOT | SYS_CFG_SITE_MB, 0))
+ printk(KERN_EMERG "Unable to reboot\n");
+}
+
+static int __init v2m_init(void)
+{
+ int i;
+
+ clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
+
+ platform_device_register(&v2m_pcie_i2c_device);
+ platform_device_register(&v2m_ddc_i2c_device);
+ platform_device_register(&v2m_flash_device);
+ platform_device_register(&v2m_eth_device);
+ platform_device_register(&v2m_usb_device);
+
+ for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++)
+ amba_device_register(v2m_amba_devs[i], &iomem_resource);
+
+ pm_power_off = v2m_power_off;
+ arm_pm_restart = v2m_restart;
+
+ return 0;
+}
+arch_initcall(v2m_init);
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index e2958eb567f9..b2eda4dc1c34 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -423,6 +423,33 @@ void nuc900_fb_set_platdata(struct nuc900fb_mach_info *pd)
}
#endif
+/* AUDIO controller*/
+static u64 nuc900_device_audio_dmamask = -1;
+static struct resource nuc900_ac97_resource[] = {
+ [0] = {
+ .start = W90X900_PA_ACTL,
+ .end = W90X900_PA_ACTL + W90X900_SZ_ACTL - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_ACTL,
+ .end = IRQ_ACTL,
+ .flags = IORESOURCE_IRQ,
+ }
+
+};
+
+struct platform_device nuc900_device_audio = {
+ .name = "nuc900-audio",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(nuc900_ac97_resource),
+ .resource = nuc900_ac97_resource,
+ .dev = {
+ .dma_mask = &nuc900_device_audio_dmamask,
+ .coherent_dma_mask = -1,
+ }
+};
+
/*Here should be your evb resourse,such as LCD*/
static struct platform_device *nuc900_public_dev[] __initdata = {
@@ -434,6 +461,7 @@ static struct platform_device *nuc900_public_dev[] __initdata = {
&nuc900_device_emc,
&nuc900_device_spi,
&nuc900_device_wdt,
+ &nuc900_device_audio,
};
/* Provide adding specific CPU platform devices API */
diff --git a/arch/arm/mach-w90x900/include/mach/mfp.h b/arch/arm/mach-w90x900/include/mach/mfp.h
new file mode 100644
index 000000000000..94c0e71617c6
--- /dev/null
+++ b/arch/arm/mach-w90x900/include/mach/mfp.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/mach-w90x900/include/mach/mfp.h
+ *
+ * Copyright (c) 2010 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * Based on arch/arm/mach-s3c2410/include/mach/map.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#ifndef __ASM_ARCH_MFP_H
+#define __ASM_ARCH_MFP_H
+
+extern void mfp_set_groupf(struct device *dev);
+extern void mfp_set_groupc(struct device *dev);
+extern void mfp_set_groupi(struct device *dev);
+extern void mfp_set_groupg(struct device *dev);
+
+#endif /* __ASM_ARCH_MFP_H */
diff --git a/arch/arm/mach-w90x900/mfp.c b/arch/arm/mach-w90x900/mfp.c
index a47dc9a708ee..fb7fb627b1a5 100644
--- a/arch/arm/mach-w90x900/mfp.c
+++ b/arch/arm/mach-w90x900/mfp.c
@@ -36,9 +36,12 @@
#define GPIOG0TO1 (0x03 << 14)
#define GPIOG2TO3 (0x03 << 16)
+#define GPIOG22TO23 (0x03 << 22)
+
#define ENSPI (0x0a << 14)
#define ENI2C0 (0x01 << 14)
#define ENI2C1 (0x01 << 16)
+#define ENAC97 (0x02 << 22)
static DEFINE_MUTEX(mfp_mutex);
@@ -146,6 +149,9 @@ void mfp_set_groupg(struct device *dev)
} else if (strcmp(dev_id, "nuc900-i2c1") == 0) {
mfpen &= ~(GPIOG2TO3);
mfpen |= ENI2C1;/*enable i2c1*/
+ } else if (strcmp(dev_id, "nuc900-audio") == 0) {
+ mfpen &= ~(GPIOG22TO23);
+ mfpen |= ENAC97;/*enable AC97*/
} else {
mfpen &= ~(GPIOG0TO1 | GPIOG2TO3);/*GPIOG[3:0]*/
}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5bd7c89a6045..346ae14824a5 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -572,6 +572,8 @@ config CPU_TLB_V6
config CPU_TLB_V7
bool
+config VERIFY_PERMISSION_FAULT
+ bool
endif
config CPU_HAS_ASID
@@ -760,7 +762,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
+ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
@@ -769,7 +772,7 @@ config CACHE_L2X0
config CACHE_TAUROS2
bool "Enable the Tauros2 L2 cache controller"
- depends on ARCH_DOVE
+ depends on (ARCH_DOVE || ARCH_MMP)
default y
select OUTER_CACHE
help
@@ -789,6 +792,25 @@ config ARM_L1_CACHE_SHIFT
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
+config ARM_DMA_MEM_BUFFERABLE
+ bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+ default y if CPU_V6 || CPU_V7
+ help
+ Historically, the kernel has used strongly ordered mappings to
+ provide DMA coherent memory. With the advent of ARMv7, mapping
+ memory with differing types results in unpredictable behaviour,
+ so on these CPUs, this option is forced on.
+
+ Multiple mappings with differing attributes is also unpredictable
+ on ARMv6 CPUs, but since they do not have aggressive speculative
+ prefetch, no harm appears to occur.
+
+ However, drivers may be missing the necessary barriers for ARMv6,
+ and therefore turning this on may result in unpredictable driver
+ behaviour. Therefore, we offer this as an option.
+
+ You are recommended say 'Y' here and debug any affected drivers.
+
config ARCH_HAS_BARRIERS
bool
help
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 2e6dc040c654..ec88b157d3bb 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -29,5 +29,26 @@ ENTRY(v7_early_abort)
* V6 code adjusts the returned DFSR.
* New designs should not need to patch up faults.
*/
+
+#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
+ /*
+ * Detect erroneous permission failures and fix
+ */
+ ldr r3, =0x40d @ On permission fault
+ and r3, r1, r3
+ cmp r3, #0x0d
+ movne pc, lr
+
+ mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
+ isb
+ mrc p15, 0, r2, c7, c4, 0 @ Read the PAR
+ and r3, r2, #0x7b @ On translation fault
+ cmp r3, #0x0b
+ movne pc, lr
+ bic r1, r1, #0xf @ Fix up FSR FS[5:0]
+ and r2, r2, #0x7e
+ orr r1, r1, r2, LSR #1
+#endif
+
mov pc, lr
ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index a2ab51fa73e2..6f98c358989a 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -94,36 +95,29 @@ static const char *usermode_action[] = {
"signal+warn"
};
-static int
-proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int alignment_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += sprintf(p, "User:\t\t%lu\n", ai_user);
- p += sprintf(p, "System:\t\t%lu\n", ai_sys);
- p += sprintf(p, "Skipped:\t%lu\n", ai_skipped);
- p += sprintf(p, "Half:\t\t%lu\n", ai_half);
- p += sprintf(p, "Word:\t\t%lu\n", ai_word);
+ seq_printf(m, "User:\t\t%lu\n", ai_user);
+ seq_printf(m, "System:\t\t%lu\n", ai_sys);
+ seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
+ seq_printf(m, "Half:\t\t%lu\n", ai_half);
+ seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
- p += sprintf(p, "DWord:\t\t%lu\n", ai_dword);
- p += sprintf(p, "Multi:\t\t%lu\n", ai_multi);
- p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode,
+ seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
+ return 0;
+}
- return len;
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
}
-static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char mode;
@@ -136,6 +130,13 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations alignment_proc_fops = {
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
#endif /* CONFIG_PROC_FS */
union offset_union {
@@ -901,12 +902,10 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
+ res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
+ &alignment_proc_fops);
if (!res)
return -ENOMEM;
-
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_write;
#endif
/*
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 21ad68ba22ba..9819869d2bc9 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -27,6 +27,7 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
+static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
@@ -108,8 +109,8 @@ static inline void l2x0_inv_all(void)
/* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags);
- writel(0xff, l2x0_base + L2X0_INV_WAY);
- cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
+ writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -208,9 +209,37 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
+ __u32 cache_id;
+ int ways;
+ const char *type;
l2x0_base = base;
+ cache_id = readl(l2x0_base + L2X0_CACHE_ID);
+ aux = readl(l2x0_base + L2X0_AUX_CTRL);
+
+ /* Determine the number of ways */
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L310:
+ if (aux & (1 << 16))
+ ways = 16;
+ else
+ ways = 8;
+ type = "L310";
+ break;
+ case L2X0_CACHE_ID_PART_L210:
+ ways = (aux >> 13) & 0xf;
+ type = "L210";
+ break;
+ default:
+ /* Assume unknown chips have 8 ways */
+ ways = 8;
+ type = "L2x0 series";
+ break;
+ }
+
+ l2x0_way_mask = (1 << ways) - 1;
+
/*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
@@ -219,8 +248,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
/* l2x0 controller is disabled */
-
- aux = readl(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
writel(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -236,5 +263,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
- printk(KERN_INFO "L2X0 cache controller enabled\n");
+ printk(KERN_INFO "%s cache controller enabled\n", type);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
+ ways, cache_id, aux);
}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 9d89c67a1cc3..e46ecd847138 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,6 +211,9 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
+#ifdef CONFIG_SMP
+ str r0, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
@@ -231,6 +234,9 @@ v6_dma_inv_range:
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
@@ -251,6 +257,10 @@ v6_dma_clean_range:
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+ str r2, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
@@ -273,7 +283,9 @@ ENTRY(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
- b v6_dma_clean_range
+ teq r2, #DMA_TO_DEVICE
+ beq v6_dma_clean_range
+ b v6_dma_flush_range
ENDPROC(v6_dma_map_area)
/*
@@ -283,9 +295,6 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
- add r1, r1, r0
- teq r2, #DMA_TO_DEVICE
- bne v6_dma_inv_range
mov pc, lr
ENDPROC(v6_dma_unmap_area)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index bcd64f265870..37c8157e116e 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -91,7 +91,11 @@ ENTRY(v7_flush_kern_cache_all)
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
+#else
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
+#endif
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
@@ -167,7 +171,11 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1
blo 1b
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
+#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
+#endif
dsb
isb
mov pc, lr
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index b2a6008b0111..d2852e1635b1 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom)
}
void fa_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 0d414c28eb2c..9b906dec1ca1 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -134,8 +134,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
flush_dcache_mmap_unlock(mapping);
if (aliases)
do_adjust_pte(vma, addr, pfn, ptep);
- else
- flush_cache_page(vma, addr, pfn);
}
/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9d40c341e07e..92f5801f99c1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -463,7 +463,12 @@ static struct fsr_info {
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
+/* Do we need runtime check ? */
+#if __LINUX_ARM_ARCH__ < 6
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
+#else
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" },
+#endif
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 0ed29bfeba1c..1ba6cf5a2c02 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,7 +15,6 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
-#include <linux/sort.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
@@ -224,20 +223,6 @@ static int __init check_initrd(struct meminfo *mi)
return initrd_node;
}
-static inline void map_memory_bank(struct membank *bank)
-{
-#ifdef CONFIG_MMU
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-#endif
-}
-
static void __init bootmem_init_node(int node, struct meminfo *mi,
unsigned long start_pfn, unsigned long end_pfn)
{
@@ -247,16 +232,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
int i;
/*
- * Map the memory banks for this node.
- */
- for_each_nodebank(i, mi, node) {
- struct membank *bank = &mi->bank[i];
-
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-
- /*
* Allocate the bootmem bitmap page.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
@@ -385,21 +360,12 @@ static void arm_memory_present(struct meminfo *mi, int node)
}
#endif
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
int node, initrd_node;
- sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
-
/*
* Locate which node contains the ramdisk image, if any.
*/
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a888363398f8..815d08eecbb0 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -28,10 +28,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
-struct map_desc;
-struct meminfo;
struct pglist_data;
-void __init create_mapping(struct map_desc *md);
void __init bootmem_init(void);
void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 241c24a1c18f..285894171186 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,6 +14,7 @@
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <linux/sort.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
@@ -603,7 +604,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long phys, addr, length, end;
const struct mem_type *type;
@@ -869,9 +870,10 @@ void __init reserve_node_zero(pg_data_t *pgdat)
if (machine_is_p720t())
res_size = 0x00014000;
- /* H1940 and RX3715 need to reserve this for suspend */
+ /* H1940, RX3715 and RX1950 need to reserve this for suspend */
- if (machine_is_h1940() || machine_is_rx3715()) {
+ if (machine_is_h1940() || machine_is_rx3715()
+ || machine_is_rx1950()) {
reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
BOOTMEM_DEFAULT);
reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
@@ -1017,6 +1019,39 @@ static void __init kmap_init(void)
#endif
}
+static inline void map_memory_bank(struct membank *bank)
+{
+ struct map_desc map;
+
+ map.pfn = bank_pfn_start(bank);
+ map.virtual = __phys_to_virt(bank_phys_start(bank));
+ map.length = bank_phys_size(bank);
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+}
+
+static void __init map_lowmem(void)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ /* Map all the lowmem memory banks. */
+ for (i = 0; i < mi->nr_banks; i++) {
+ struct membank *bank = &mi->bank[i];
+
+ if (!bank->highmem)
+ map_memory_bank(bank);
+ }
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1025,9 +1060,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
+ map_lowmem();
bootmem_init();
devicemaps_init(mdesc);
kmap_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 9bfeb6b9509a..33b327379f07 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -65,6 +65,15 @@ void flush_dcache_page(struct page *page)
}
EXPORT_SYMBOL(flush_dcache_page);
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC)
+ __cpuc_coherent_user_range(uaddr, uaddr + len);
+}
+
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
size_t size, unsigned int mtype)
{
@@ -87,8 +96,8 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
- unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
}
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 0cb1848bd876..f3f288a9546d 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -50,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
mov ip, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -79,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
cmp r0, r1
blo 1b
mov r2, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
isb
mov pc, lr
diff --git a/arch/arm/nwfpe/ChangeLog b/arch/arm/nwfpe/ChangeLog
index eeb5a7c5ff09..fa8028b1e1cf 100644
--- a/arch/arm/nwfpe/ChangeLog
+++ b/arch/arm/nwfpe/ChangeLog
@@ -72,7 +72,7 @@
1998-11-23 Scott Bambrough <scottb@netwinder.org>
* README.FPE - fix typo in description of lfm/sfm instructions
- * NOTES - Added file to describe known bugs/problems
+ * NOTES - Added file to describe known bugs/problems
* fpmodule.c - Changed version number to 0.94
1998-11-20 Scott Bambrough <scottb@netwinder.org>
diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c
index 4c0ab50f399a..cb7658e8acc5 100644
--- a/arch/arm/nwfpe/fpmodule.c
+++ b/arch/arm/nwfpe/fpmodule.c
@@ -24,6 +24,7 @@
#include "fpa11.h"
#include <linux/module.h>
+#include <linux/moduleparam.h>
/* XXX */
#include <linux/errno.h>
@@ -134,13 +135,17 @@ a SIGFPE exception if necessary. If not the relevant bits in the
cumulative exceptions flag byte are set and we return.
*/
+#ifdef CONFIG_DEBUG_USER
+/* By default, ignore inexact errors as there are far too many of them to log */
+static int debug = ~BIT_IXC;
+#endif
+
void float_raise(signed char flags)
{
register unsigned int fpsr, cumulativeTraps;
#ifdef CONFIG_DEBUG_USER
- /* Ignore inexact errors as there are far too many of them to log */
- if (flags & ~BIT_IXC)
+ if (flags & debug)
printk(KERN_DEBUG
"NWFPE: %s[%d] takes exception %08x at %p from %08lx\n",
current->comm, current->pid, flags,
@@ -179,3 +184,7 @@ module_exit(fpe_exit);
MODULE_AUTHOR("Scott Bambrough <scottb@rebel.com>");
MODULE_DESCRIPTION("NWFPE floating point emulator (" NWFPE_BITS " precision)");
MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_DEBUG_USER
+module_param(debug, int, 0644);
+#endif
diff --git a/arch/arm/nwfpe/fpsr.h b/arch/arm/nwfpe/fpsr.h
index 859b300d89fd..bd425dc13b61 100644
--- a/arch/arm/nwfpe/fpsr.h
+++ b/arch/arm/nwfpe/fpsr.h
@@ -30,7 +30,7 @@ one byte.
EXCEPTION TRAP ENABLE BYTE
SYSTEM CONTROL BYTE
CUMULATIVE EXCEPTION FLAGS BYTE
-
+
The FPCR is a 32 bit register consisting of bit flags.
*/
diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
index 88e31f549f50..e666eafed152 100644
--- a/arch/arm/oprofile/Makefile
+++ b/arch/arm/oprofile/Makefile
@@ -6,9 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-oprofile-$(CONFIG_CPU_XSCALE) += op_model_xscale.o
-oprofile-$(CONFIG_OPROFILE_ARM11_CORE) += op_model_arm11_core.o
-oprofile-$(CONFIG_OPROFILE_ARMV6) += op_model_v6.o
-oprofile-$(CONFIG_OPROFILE_MPCORE) += op_model_mpcore.o
-oprofile-$(CONFIG_OPROFILE_ARMV7) += op_model_v7.o
+oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arm/oprofile/backtrace.c b/arch/arm/oprofile/backtrace.c
deleted file mode 100644
index d805a52b5032..000000000000
--- a/arch/arm/oprofile/backtrace.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Arm specific backtracing code for oprofile
- *
- * Copyright 2005 Openedhand Ltd.
- *
- * Author: Richard Purdie <rpurdie@openedhand.com>
- *
- * Based on i386 oprofile backtrace code by John Levon, David Smith
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/stacktrace.h>
-
-static int report_trace(struct stackframe *frame, void *d)
-{
- unsigned int *depth = d;
-
- if (*depth) {
- oprofile_add_trace(frame->pc);
- (*depth)--;
- }
-
- return *depth == 0;
-}
-
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct frame_tail *)(xxx->fp)-1
- */
-struct frame_tail {
- struct frame_tail *fp;
- unsigned long sp;
- unsigned long lr;
-} __attribute__((packed));
-
-static struct frame_tail* user_backtrace(struct frame_tail *tail)
-{
- struct frame_tail buftail[2];
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
- return NULL;
- if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
- return NULL;
-
- oprofile_add_trace(buftail[0].lr);
-
- /* frame pointers should strictly progress back up the stack
- * (towards higher addresses) */
- if (tail >= buftail[0].fp)
- return NULL;
-
- return buftail[0].fp-1;
-}
-
-void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
-
- if (!user_mode(regs)) {
- struct stackframe frame;
- frame.fp = regs->ARM_fp;
- frame.sp = regs->ARM_sp;
- frame.lr = regs->ARM_lr;
- frame.pc = regs->ARM_pc;
- walk_stackframe(&frame, report_trace, &depth);
- return;
- }
-
- while (depth-- && tail && !((unsigned long) tail & 3))
- tail = user_backtrace(tail);
-}
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 3fcd752d6146..0691176899ff 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -2,32 +2,184 @@
* @file common.c
*
* @remark Copyright 2004 Oprofile Authors
+ * @remark Copyright 2010 ARM Ltd.
* @remark Read the file COPYING
*
* @author Zwane Mwaikambo
+ * @author Will Deacon [move to perf]
*/
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <linux/oprofile.h>
-#include <linux/errno.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/sysdev.h>
-#include <linux/mutex.h>
+#include <asm/stacktrace.h>
+#include <linux/uaccess.h>
-#include "op_counter.h"
-#include "op_arm_model.h"
+#include <asm/perf_event.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_HW_PERF_EVENTS
+/*
+ * Per performance monitor configuration as set via oprofilefs.
+ */
+struct op_counter_config {
+ unsigned long count;
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long unit_mask;
+ unsigned long kernel;
+ unsigned long user;
+ struct perf_event_attr attr;
+};
-static struct op_arm_model_spec *op_arm_model;
static int op_arm_enabled;
static DEFINE_MUTEX(op_arm_mutex);
-struct op_counter_config *counter_config;
+static struct op_counter_config *counter_config;
+static struct perf_event **perf_events[nr_cpumask_bits];
+static int perf_num_counters;
+
+/*
+ * Overflow callback for oprofile.
+ */
+static void op_overflow_handler(struct perf_event *event, int unused,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ int id;
+ u32 cpu = smp_processor_id();
+
+ for (id = 0; id < perf_num_counters; ++id)
+ if (perf_events[cpu][id] == event)
+ break;
+
+ if (id != perf_num_counters)
+ oprofile_add_sample(regs, id);
+ else
+ pr_warning("oprofile: ignoring spurious overflow "
+ "on cpu %u\n", cpu);
+}
+
+/*
+ * Called by op_arm_setup to create perf attributes to mirror the oprofile
+ * settings in counter_config. Attributes are created as `pinned' events and
+ * so are permanently scheduled on the PMU.
+ */
+static void op_perf_setup(void)
+{
+ int i;
+ u32 size = sizeof(struct perf_event_attr);
+ struct perf_event_attr *attr;
+
+ for (i = 0; i < perf_num_counters; ++i) {
+ attr = &counter_config[i].attr;
+ memset(attr, 0, size);
+ attr->type = PERF_TYPE_RAW;
+ attr->size = size;
+ attr->config = counter_config[i].event;
+ attr->sample_period = counter_config[i].count;
+ attr->pinned = 1;
+ }
+}
+
+static int op_create_counter(int cpu, int event)
+{
+ int ret = 0;
+ struct perf_event *pevent;
+
+ if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
+ return ret;
+
+ pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
+ cpu, -1,
+ op_overflow_handler);
+
+ if (IS_ERR(pevent)) {
+ ret = PTR_ERR(pevent);
+ } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
+ pr_warning("oprofile: failed to enable event %d "
+ "on CPU %d\n", event, cpu);
+ ret = -EBUSY;
+ } else {
+ perf_events[cpu][event] = pevent;
+ }
+
+ return ret;
+}
+
+static void op_destroy_counter(int cpu, int event)
+{
+ struct perf_event *pevent = perf_events[cpu][event];
+
+ if (pevent) {
+ perf_event_release_kernel(pevent);
+ perf_events[cpu][event] = NULL;
+ }
+}
+
+/*
+ * Called by op_arm_start to create active perf events based on the
+ * perviously configured attributes.
+ */
+static int op_perf_start(void)
+{
+ int cpu, event, ret = 0;
+
+ for_each_online_cpu(cpu) {
+ for (event = 0; event < perf_num_counters; ++event) {
+ ret = op_create_counter(cpu, event);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * Called by op_arm_stop at the end of a profiling run.
+ */
+static void op_perf_stop(void)
+{
+ int cpu, event;
+
+ for_each_online_cpu(cpu)
+ for (event = 0; event < perf_num_counters; ++event)
+ op_destroy_counter(cpu, event);
+}
+
+
+static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
+{
+ switch (id) {
+ case ARM_PERF_PMU_ID_XSCALE1:
+ return "arm/xscale1";
+ case ARM_PERF_PMU_ID_XSCALE2:
+ return "arm/xscale2";
+ case ARM_PERF_PMU_ID_V6:
+ return "arm/armv6";
+ case ARM_PERF_PMU_ID_V6MP:
+ return "arm/mpcore";
+ case ARM_PERF_PMU_ID_CA8:
+ return "arm/armv7";
+ case ARM_PERF_PMU_ID_CA9:
+ return "arm/armv7-ca9";
+ default:
+ return NULL;
+ }
+}
static int op_arm_create_files(struct super_block *sb, struct dentry *root)
{
unsigned int i;
- for (i = 0; i < op_arm_model->num_counters; i++) {
+ for (i = 0; i < perf_num_counters; i++) {
struct dentry *dir;
char buf[4];
@@ -46,12 +198,10 @@ static int op_arm_create_files(struct super_block *sb, struct dentry *root)
static int op_arm_setup(void)
{
- int ret;
-
spin_lock(&oprofilefs_lock);
- ret = op_arm_model->setup_ctrs();
+ op_perf_setup();
spin_unlock(&oprofilefs_lock);
- return ret;
+ return 0;
}
static int op_arm_start(void)
@@ -60,8 +210,9 @@ static int op_arm_start(void)
mutex_lock(&op_arm_mutex);
if (!op_arm_enabled) {
- ret = op_arm_model->start();
- op_arm_enabled = !ret;
+ ret = 0;
+ op_perf_start();
+ op_arm_enabled = 1;
}
mutex_unlock(&op_arm_mutex);
return ret;
@@ -71,113 +222,205 @@ static void op_arm_stop(void)
{
mutex_lock(&op_arm_mutex);
if (op_arm_enabled)
- op_arm_model->stop();
+ op_perf_stop();
op_arm_enabled = 0;
mutex_unlock(&op_arm_mutex);
}
#ifdef CONFIG_PM
-static int op_arm_suspend(struct sys_device *dev, pm_message_t state)
+static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
{
mutex_lock(&op_arm_mutex);
if (op_arm_enabled)
- op_arm_model->stop();
+ op_perf_stop();
mutex_unlock(&op_arm_mutex);
return 0;
}
-static int op_arm_resume(struct sys_device *dev)
+static int op_arm_resume(struct platform_device *dev)
{
mutex_lock(&op_arm_mutex);
- if (op_arm_enabled && op_arm_model->start())
+ if (op_arm_enabled && op_perf_start())
op_arm_enabled = 0;
mutex_unlock(&op_arm_mutex);
return 0;
}
-static struct sysdev_class oprofile_sysclass = {
- .name = "oprofile",
+static struct platform_driver oprofile_driver = {
+ .driver = {
+ .name = "arm-oprofile",
+ },
.resume = op_arm_resume,
.suspend = op_arm_suspend,
};
-static struct sys_device device_oprofile = {
- .id = 0,
- .cls = &oprofile_sysclass,
-};
+static struct platform_device *oprofile_pdev;
static int __init init_driverfs(void)
{
int ret;
- if (!(ret = sysdev_class_register(&oprofile_sysclass)))
- ret = sysdev_register(&device_oprofile);
+ ret = platform_driver_register(&oprofile_driver);
+ if (ret)
+ goto out;
+ oprofile_pdev = platform_device_register_simple(
+ oprofile_driver.driver.name, 0, NULL, 0);
+ if (IS_ERR(oprofile_pdev)) {
+ ret = PTR_ERR(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
+ }
+
+out:
return ret;
}
static void exit_driverfs(void)
{
- sysdev_unregister(&device_oprofile);
- sysdev_class_unregister(&oprofile_sysclass);
+ platform_device_unregister(oprofile_pdev);
+ platform_driver_unregister(&oprofile_driver);
}
#else
-#define init_driverfs() do { } while (0)
+static int __init init_driverfs(void) { return 0; }
#define exit_driverfs() do { } while (0)
#endif /* CONFIG_PM */
-int __init oprofile_arch_init(struct oprofile_operations *ops)
+static int report_trace(struct stackframe *frame, void *d)
{
- struct op_arm_model_spec *spec = NULL;
- int ret = -ENODEV;
+ unsigned int *depth = d;
- ops->backtrace = arm_backtrace;
+ if (*depth) {
+ oprofile_add_trace(frame->pc);
+ (*depth)--;
+ }
-#ifdef CONFIG_CPU_XSCALE
- spec = &op_xscale_spec;
-#endif
+ return *depth == 0;
+}
-#ifdef CONFIG_OPROFILE_ARMV6
- spec = &op_armv6_spec;
-#endif
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct frame_tail *)(xxx->fp)-1
+ */
+struct frame_tail {
+ struct frame_tail *fp;
+ unsigned long sp;
+ unsigned long lr;
+} __attribute__((packed));
-#ifdef CONFIG_OPROFILE_MPCORE
- spec = &op_mpcore_spec;
-#endif
+static struct frame_tail* user_backtrace(struct frame_tail *tail)
+{
+ struct frame_tail buftail[2];
-#ifdef CONFIG_OPROFILE_ARMV7
- spec = &op_armv7_spec;
-#endif
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+ if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
+ return NULL;
- if (spec) {
- ret = spec->init();
- if (ret < 0)
- return ret;
+ oprofile_add_trace(buftail[0].lr);
- counter_config = kcalloc(spec->num_counters, sizeof(struct op_counter_config),
- GFP_KERNEL);
- if (!counter_config)
- return -ENOMEM;
+ /* frame pointers should strictly progress back up the stack
+ * (towards higher addresses) */
+ if (tail >= buftail[0].fp)
+ return NULL;
- op_arm_model = spec;
- init_driverfs();
- ops->create_files = op_arm_create_files;
- ops->setup = op_arm_setup;
- ops->shutdown = op_arm_stop;
- ops->start = op_arm_start;
- ops->stop = op_arm_stop;
- ops->cpu_type = op_arm_model->name;
- printk(KERN_INFO "oprofile: using %s\n", spec->name);
+ return buftail[0].fp-1;
+}
+
+static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+ struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+
+ if (!user_mode(regs)) {
+ struct stackframe frame;
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+ walk_stackframe(&frame, report_trace, &depth);
+ return;
}
+ while (depth-- && tail && !((unsigned long) tail & 3))
+ tail = user_backtrace(tail);
+}
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ int cpu, ret = 0;
+
+ perf_num_counters = armpmu_get_max_events();
+
+ counter_config = kcalloc(perf_num_counters,
+ sizeof(struct op_counter_config), GFP_KERNEL);
+
+ if (!counter_config) {
+ pr_info("oprofile: failed to allocate %d "
+ "counters\n", perf_num_counters);
+ return -ENOMEM;
+ }
+
+ ret = init_driverfs();
+ if (ret) {
+ kfree(counter_config);
+ return ret;
+ }
+
+ for_each_possible_cpu(cpu) {
+ perf_events[cpu] = kcalloc(perf_num_counters,
+ sizeof(struct perf_event *), GFP_KERNEL);
+ if (!perf_events[cpu]) {
+ pr_info("oprofile: failed to allocate %d perf events "
+ "for cpu %d\n", perf_num_counters, cpu);
+ while (--cpu >= 0)
+ kfree(perf_events[cpu]);
+ return -ENOMEM;
+ }
+ }
+
+ ops->backtrace = arm_backtrace;
+ ops->create_files = op_arm_create_files;
+ ops->setup = op_arm_setup;
+ ops->start = op_arm_start;
+ ops->stop = op_arm_stop;
+ ops->shutdown = op_arm_stop;
+ ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
+
+ if (!ops->cpu_type)
+ ret = -ENODEV;
+ else
+ pr_info("oprofile: using %s\n", ops->cpu_type);
+
return ret;
}
void oprofile_arch_exit(void)
{
- if (op_arm_model) {
+ int cpu, id;
+ struct perf_event *event;
+
+ if (*perf_events) {
exit_driverfs();
- op_arm_model = NULL;
+ for_each_possible_cpu(cpu) {
+ for (id = 0; id < perf_num_counters; ++id) {
+ event = perf_events[cpu][id];
+ if (event != NULL)
+ perf_event_release_kernel(event);
+ }
+ kfree(perf_events[cpu]);
+ }
}
- kfree(counter_config);
+
+ if (counter_config)
+ kfree(counter_config);
+}
+#else
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ pr_info("oprofile: hardware counters not available\n");
+ return -ENODEV;
}
+void oprofile_arch_exit(void) {}
+#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/oprofile/op_arm_model.h b/arch/arm/oprofile/op_arm_model.h
deleted file mode 100644
index 8c4e4f6a1de3..000000000000
--- a/arch/arm/oprofile/op_arm_model.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * @file op_arm_model.h
- * interface to ARM machine specific operations
- *
- * @remark Copyright 2004 Oprofile Authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-
-#ifndef OP_ARM_MODEL_H
-#define OP_ARM_MODEL_H
-
-struct op_arm_model_spec {
- int (*init)(void);
- unsigned int num_counters;
- int (*setup_ctrs)(void);
- int (*start)(void);
- void (*stop)(void);
- char *name;
-};
-
-#ifdef CONFIG_CPU_XSCALE
-extern struct op_arm_model_spec op_xscale_spec;
-#endif
-
-extern struct op_arm_model_spec op_armv6_spec;
-extern struct op_arm_model_spec op_mpcore_spec;
-extern struct op_arm_model_spec op_armv7_spec;
-
-extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-extern int __init op_arm_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec);
-extern void op_arm_exit(void);
-#endif /* OP_ARM_MODEL_H */
diff --git a/arch/arm/oprofile/op_counter.h b/arch/arm/oprofile/op_counter.h
deleted file mode 100644
index ca942a63b52f..000000000000
--- a/arch/arm/oprofile/op_counter.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * @file op_counter.h
- *
- * @remark Copyright 2004 Oprofile Authors
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-
-#ifndef OP_COUNTER_H
-#define OP_COUNTER_H
-
-/* Per performance monitor configuration as set via
- * oprofilefs.
- */
-struct op_counter_config {
- unsigned long count;
- unsigned long enabled;
- unsigned long event;
- unsigned long unit_mask;
- unsigned long kernel;
- unsigned long user;
-};
-
-extern struct op_counter_config *counter_config;
-
-#endif /* OP_COUNTER_H */
diff --git a/arch/arm/oprofile/op_model_arm11_core.c b/arch/arm/oprofile/op_model_arm11_core.c
deleted file mode 100644
index ef3e2653b90c..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * @file op_model_arm11_core.c
- * ARM11 Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-
-/*
- * ARM11 PMU support
- */
-static inline void arm11_write_pmnc(u32 val)
-{
- /* upper 4bits and 7, 11 are write-as-0 */
- val &= 0x0ffff77f;
- asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r" (val));
-}
-
-static inline u32 arm11_read_pmnc(void)
-{
- u32 val;
- asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (val));
- return val;
-}
-
-static void arm11_reset_counter(unsigned int cnt)
-{
- u32 val = -(u32)counter_config[CPU_COUNTER(smp_processor_id(), cnt)].count;
- switch (cnt) {
- case CCNT:
- asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r" (val));
- break;
-
- case PMN0:
- asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r" (val));
- break;
-
- case PMN1:
- asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r" (val));
- break;
- }
-}
-
-int arm11_setup_pmu(void)
-{
- unsigned int cnt;
- u32 pmnc;
-
- if (arm11_read_pmnc() & PMCR_E) {
- printk(KERN_ERR "oprofile: CPU%u PMU still enabled when setup new event counter.\n", smp_processor_id());
- return -EBUSY;
- }
-
- /* initialize PMNC, reset overflow, D bit, C bit and P bit. */
- arm11_write_pmnc(PMCR_OFL_PMN0 | PMCR_OFL_PMN1 | PMCR_OFL_CCNT |
- PMCR_C | PMCR_P);
-
- for (pmnc = 0, cnt = PMN0; cnt <= CCNT; cnt++) {
- unsigned long event;
-
- if (!counter_config[CPU_COUNTER(smp_processor_id(), cnt)].enabled)
- continue;
-
- event = counter_config[CPU_COUNTER(smp_processor_id(), cnt)].event & 255;
-
- /*
- * Set event (if destined for PMNx counters)
- */
- if (cnt == PMN0) {
- pmnc |= event << 20;
- } else if (cnt == PMN1) {
- pmnc |= event << 12;
- }
-
- /*
- * We don't need to set the event if it's a cycle count
- * Enable interrupt for this counter
- */
- pmnc |= PMCR_IEN_PMN0 << cnt;
- arm11_reset_counter(cnt);
- }
- arm11_write_pmnc(pmnc);
-
- return 0;
-}
-
-int arm11_start_pmu(void)
-{
- arm11_write_pmnc(arm11_read_pmnc() | PMCR_E);
- return 0;
-}
-
-int arm11_stop_pmu(void)
-{
- unsigned int cnt;
-
- arm11_write_pmnc(arm11_read_pmnc() & ~PMCR_E);
-
- for (cnt = PMN0; cnt <= CCNT; cnt++)
- arm11_reset_counter(cnt);
-
- return 0;
-}
-
-/*
- * CPU counters' IRQ handler (one IRQ per CPU)
- */
-static irqreturn_t arm11_pmu_interrupt(int irq, void *arg)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned int cnt;
- u32 pmnc;
-
- pmnc = arm11_read_pmnc();
-
- for (cnt = PMN0; cnt <= CCNT; cnt++) {
- if ((pmnc & (PMCR_OFL_PMN0 << cnt)) && (pmnc & (PMCR_IEN_PMN0 << cnt))) {
- arm11_reset_counter(cnt);
- oprofile_add_sample(regs, CPU_COUNTER(smp_processor_id(), cnt));
- }
- }
- /* Clear counter flag(s) */
- arm11_write_pmnc(pmnc);
- return IRQ_HANDLED;
-}
-
-int arm11_request_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
- int ret = 0;
-
- for(i = 0; i < nr; i++) {
- ret = request_irq(irqs[i], arm11_pmu_interrupt, IRQF_DISABLED, "CP15 PMU", NULL);
- if (ret != 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u for MPCORE-EM\n",
- irqs[i]);
- break;
- }
- }
-
- if (i != nr)
- while (i-- != 0)
- free_irq(irqs[i], NULL);
-
- return ret;
-}
-
-void arm11_release_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
-
- for (i = 0; i < nr; i++)
- free_irq(irqs[i], NULL);
-}
diff --git a/arch/arm/oprofile/op_model_arm11_core.h b/arch/arm/oprofile/op_model_arm11_core.h
deleted file mode 100644
index 1902b99d9dfd..000000000000
--- a/arch/arm/oprofile/op_model_arm11_core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * @file op_model_arm11_core.h
- * ARM11 Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-#ifndef OP_MODEL_ARM11_CORE_H
-#define OP_MODEL_ARM11_CORE_H
-
-/*
- * Per-CPU PMCR
- */
-#define PMCR_E (1 << 0) /* Enable */
-#define PMCR_P (1 << 1) /* Count reset */
-#define PMCR_C (1 << 2) /* Cycle counter reset */
-#define PMCR_D (1 << 3) /* Cycle counter counts every 64th cpu cycle */
-#define PMCR_IEN_PMN0 (1 << 4) /* Interrupt enable count reg 0 */
-#define PMCR_IEN_PMN1 (1 << 5) /* Interrupt enable count reg 1 */
-#define PMCR_IEN_CCNT (1 << 6) /* Interrupt enable cycle counter */
-#define PMCR_OFL_PMN0 (1 << 8) /* Count reg 0 overflow */
-#define PMCR_OFL_PMN1 (1 << 9) /* Count reg 1 overflow */
-#define PMCR_OFL_CCNT (1 << 10) /* Cycle counter overflow */
-
-#define PMN0 0
-#define PMN1 1
-#define CCNT 2
-
-#define CPU_COUNTER(cpu, counter) ((cpu) * 3 + (counter))
-
-int arm11_setup_pmu(void);
-int arm11_start_pmu(void);
-int arm11_stop_pmu(void);
-int arm11_request_interrupts(const int *, int);
-void arm11_release_interrupts(const int *, int);
-
-#endif
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
deleted file mode 100644
index f73ce875a395..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/**
- * @file op_model_mpcore.c
- * MPCORE Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- *
- * Counters:
- * 0: PMN0 on CPU0, per-cpu configurable event counter
- * 1: PMN1 on CPU0, per-cpu configurable event counter
- * 2: CCNT on CPU0
- * 3: PMN0 on CPU1
- * 4: PMN1 on CPU1
- * 5: CCNT on CPU1
- * 6: PMN0 on CPU1
- * 7: PMN1 on CPU1
- * 8: CCNT on CPU1
- * 9: PMN0 on CPU1
- * 10: PMN1 on CPU1
- * 11: CCNT on CPU1
- * 12-19: configurable SCU event counters
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <linux/io.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/board-eb.h>
-#include <asm/system.h>
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-#include "op_model_mpcore.h"
-
-/*
- * MPCore SCU event monitor support
- */
-#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
-
-/*
- * Bitmask of used SCU counters
- */
-static unsigned int scu_em_used;
-static const struct pmu_irqs *pmu_irqs;
-
-/*
- * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number)
- */
-static inline void scu_reset_counter(struct eventmonitor __iomem *emc, unsigned int n)
-{
- writel(-(u32)counter_config[SCU_COUNTER(n)].count, &emc->MC[n]);
-}
-
-static inline void scu_set_event(struct eventmonitor __iomem *emc, unsigned int n, u32 event)
-{
- event &= 0xff;
- writeb(event, &emc->MCEB[n]);
-}
-
-/*
- * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
- */
-static irqreturn_t scu_em_interrupt(int irq, void *arg)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int cnt;
-
- cnt = irq - IRQ_EB11MP_PMU_SCU0;
- oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
- scu_reset_counter(emc, cnt);
-
- /* Clear overflow flag for this counter */
- writel(1 << (cnt + 16), &emc->PMCR);
-
- return IRQ_HANDLED;
-}
-
-/* Configure just the SCU counters that the user has requested */
-static void scu_setup(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int i;
-
- scu_em_used = 0;
-
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (counter_config[SCU_COUNTER(i)].enabled &&
- counter_config[SCU_COUNTER(i)].event) {
- scu_set_event(emc, i, 0); /* disable counter for now */
- scu_em_used |= 1 << i;
- }
- }
-}
-
-static int scu_start(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int temp, i;
- unsigned long event;
- int ret = 0;
-
- /*
- * request the SCU counter interrupts that we need
- */
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- ret = request_irq(IRQ_EB11MP_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
- if (ret) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
- IRQ_EB11MP_PMU_SCU0 + i);
- goto err_free_scu;
- }
- }
- }
-
- /*
- * clear overflow and enable interrupt for all used counters
- */
- temp = readl(&emc->PMCR);
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- scu_reset_counter(emc, i);
- event = counter_config[SCU_COUNTER(i)].event;
- scu_set_event(emc, i, event);
-
- /* clear overflow/interrupt */
- temp |= 1 << (i + 16);
- /* enable interrupt*/
- temp |= 1 << (i + 8);
- }
- }
-
- /* Enable all 8 counters */
- temp |= PMCR_E;
- writel(temp, &emc->PMCR);
-
- return 0;
-
- err_free_scu:
- while (i--)
- free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
- return ret;
-}
-
-static void scu_stop(void)
-{
- struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
- unsigned int temp, i;
-
- /* Disable counter interrupts */
- /* Don't disable all 8 counters (with the E bit) as they may be in use */
- temp = readl(&emc->PMCR);
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i))
- temp &= ~(1 << (i + 8));
- }
- writel(temp, &emc->PMCR);
-
- /* Free counter interrupts and reset counters */
- for (i = 0; i < NUM_SCU_COUNTERS; i++) {
- if (scu_em_used & (1 << i)) {
- scu_reset_counter(emc, i);
- free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
- }
- }
-}
-
-struct em_function_data {
- int (*fn)(void);
- int ret;
-};
-
-static void em_func(void *data)
-{
- struct em_function_data *d = data;
- int ret = d->fn();
- if (ret)
- d->ret = ret;
-}
-
-static int em_call_function(int (*fn)(void))
-{
- struct em_function_data data;
-
- data.fn = fn;
- data.ret = 0;
-
- preempt_disable();
- smp_call_function(em_func, &data, 1);
- em_func(&data);
- preempt_enable();
-
- return data.ret;
-}
-
-/*
- * Glue to stick the individual ARM11 PMUs and the SCU
- * into the oprofile framework.
- */
-static int em_setup_ctrs(void)
-{
- int ret;
-
- /* Configure CPU counters by cross-calling to the other CPUs */
- ret = em_call_function(arm11_setup_pmu);
- if (ret == 0)
- scu_setup();
-
- return 0;
-}
-
-static int em_start(void)
-{
- int ret;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs)) {
- ret = PTR_ERR(pmu_irqs);
- goto out;
- }
-
- ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- if (ret == 0) {
- em_call_function(arm11_start_pmu);
-
- ret = scu_start();
- if (ret) {
- arm11_release_interrupts(pmu_irqs->irqs,
- pmu_irqs->num_irqs);
- } else {
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- }
- }
-
-out:
- return ret;
-}
-
-static void em_stop(void)
-{
- em_call_function(arm11_stop_pmu);
- arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- scu_stop();
- release_pmu(pmu_irqs);
-}
-
-/*
- * Why isn't there a function to route an IRQ to a specific CPU in
- * genirq?
- */
-static void em_route_irq(int irq, unsigned int cpu)
-{
- struct irq_desc *desc = irq_desc + irq;
- const struct cpumask *mask = cpumask_of(cpu);
-
- spin_lock_irq(&desc->lock);
- cpumask_copy(desc->affinity, mask);
- desc->chip->set_affinity(irq, mask);
- spin_unlock_irq(&desc->lock);
-}
-
-static int em_setup(void)
-{
- /*
- * Send SCU PMU interrupts to the "owner" CPU.
- */
- em_route_irq(IRQ_EB11MP_PMU_SCU0, 0);
- em_route_irq(IRQ_EB11MP_PMU_SCU1, 0);
- em_route_irq(IRQ_EB11MP_PMU_SCU2, 1);
- em_route_irq(IRQ_EB11MP_PMU_SCU3, 1);
- em_route_irq(IRQ_EB11MP_PMU_SCU4, 2);
- em_route_irq(IRQ_EB11MP_PMU_SCU5, 2);
- em_route_irq(IRQ_EB11MP_PMU_SCU6, 3);
- em_route_irq(IRQ_EB11MP_PMU_SCU7, 3);
-
- return init_pmu();
-}
-
-struct op_arm_model_spec op_mpcore_spec = {
- .init = em_setup,
- .num_counters = MPCORE_NUM_COUNTERS,
- .setup_ctrs = em_setup_ctrs,
- .start = em_start,
- .stop = em_stop,
- .name = "arm/mpcore",
-};
diff --git a/arch/arm/oprofile/op_model_mpcore.h b/arch/arm/oprofile/op_model_mpcore.h
deleted file mode 100644
index 73d811023688..000000000000
--- a/arch/arm/oprofile/op_model_mpcore.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * @file op_model_mpcore.c
- * MPCORE Event Monitor Driver
- * @remark Copyright 2004 ARM SMP Development Team
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 Oprofile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-#ifndef OP_MODEL_MPCORE_H
-#define OP_MODEL_MPCORE_H
-
-struct eventmonitor {
- unsigned long PMCR;
- unsigned char MCEB[8];
- unsigned long MC[8];
-};
-
-/*
- * List of userspace counter numbers: note that the structure is important.
- * The code relies on CPUn's counters being CPU0's counters + 3n
- * and on CPU0's counters starting at 0
- */
-
-#define COUNTER_CPU0_PMN0 0
-#define COUNTER_CPU0_PMN1 1
-#define COUNTER_CPU0_CCNT 2
-
-#define COUNTER_CPU1_PMN0 3
-#define COUNTER_CPU1_PMN1 4
-#define COUNTER_CPU1_CCNT 5
-
-#define COUNTER_CPU2_PMN0 6
-#define COUNTER_CPU2_PMN1 7
-#define COUNTER_CPU2_CCNT 8
-
-#define COUNTER_CPU3_PMN0 9
-#define COUNTER_CPU3_PMN1 10
-#define COUNTER_CPU3_CCNT 11
-
-#define COUNTER_SCU_MN0 12
-#define COUNTER_SCU_MN1 13
-#define COUNTER_SCU_MN2 14
-#define COUNTER_SCU_MN3 15
-#define COUNTER_SCU_MN4 16
-#define COUNTER_SCU_MN5 17
-#define COUNTER_SCU_MN6 18
-#define COUNTER_SCU_MN7 19
-#define NUM_SCU_COUNTERS 8
-
-#define SCU_COUNTER(number) ((number) + COUNTER_SCU_MN0)
-
-#define MPCORE_NUM_COUNTERS SCU_COUNTER(NUM_SCU_COUNTERS)
-
-#endif
diff --git a/arch/arm/oprofile/op_model_v6.c b/arch/arm/oprofile/op_model_v6.c
deleted file mode 100644
index a22357a2fd08..000000000000
--- a/arch/arm/oprofile/op_model_v6.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * @file op_model_v6.c
- * ARM11 Performance Monitor Driver
- *
- * Based on op_model_xscale.c
- *
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 OProfile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Tony Lindgren <tony@atomide.com>
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_arm11_core.h"
-
-static const struct pmu_irqs *pmu_irqs;
-
-static void armv6_pmu_stop(void)
-{
- arm11_stop_pmu();
- arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
-}
-
-static int armv6_pmu_start(void)
-{
- int ret;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs)) {
- ret = PTR_ERR(pmu_irqs);
- goto out;
- }
-
- ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- if (ret >= 0) {
- ret = arm11_start_pmu();
- } else {
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- }
-
-out:
- return ret;
-}
-
-static int armv6_detect_pmu(void)
-{
- return 0;
-}
-
-struct op_arm_model_spec op_armv6_spec = {
- .init = armv6_detect_pmu,
- .num_counters = 3,
- .setup_ctrs = arm11_setup_pmu,
- .start = armv6_pmu_start,
- .stop = armv6_pmu_stop,
- .name = "arm/armv6",
-};
diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c
deleted file mode 100644
index 8642d0891ae1..000000000000
--- a/arch/arm/oprofile/op_model_v7.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/**
- * op_model_v7.c
- * ARM V7 (Cortex A8) Event Monitor Driver
- *
- * Copyright 2008 Jean Pihet <jpihet@mvista.com>
- * Copyright 2004 ARM SMP Development Team
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/smp.h>
-
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-#include "op_model_v7.h"
-
-/* #define DEBUG */
-
-
-/*
- * ARM V7 PMNC support
- */
-
-static u32 cnt_en[CNTMAX];
-
-static inline void armv7_pmnc_write(u32 val)
-{
- val &= PMNC_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
-}
-
-static inline u32 armv7_pmnc_read(void)
-{
- u32 val;
-
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- return val;
-}
-
-static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = CNTENS_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= CNTENS_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u disabling wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = CNTENC_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= CNTENC_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_enable_intens(unsigned int cnt)
-{
- u32 val;
-
- if (cnt >= CNTMAX) {
- printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
- " interrupt enable %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- if (cnt == CCNT)
- val = INTENS_C;
- else
- val = (1 << (cnt - CNT0));
-
- val &= INTENS_MASK;
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
-
- return cnt;
-}
-
-static inline u32 armv7_pmnc_getreset_flags(void)
-{
- u32 val;
-
- /* Read */
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
-
- /* Write to clear flags */
- val &= FLAG_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
-
- return val;
-}
-
-static inline int armv7_pmnc_select_counter(unsigned int cnt)
-{
- u32 val;
-
- if ((cnt == CCNT) || (cnt >= CNTMAX)) {
- printk(KERN_ERR "oprofile: CPU%u selecting wrong PMNC counteri"
- " %d\n", smp_processor_id(), cnt);
- return -1;
- }
-
- val = (cnt - CNT0) & SELECT_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
-
- return cnt;
-}
-
-static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
-{
- if (armv7_pmnc_select_counter(cnt) == cnt) {
- val &= EVTSEL_MASK;
- asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
- }
-}
-
-static void armv7_pmnc_reset_counter(unsigned int cnt)
-{
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
- u32 val = -(u32)counter_config[cpu_cnt].count;
-
- switch (cnt) {
- case CCNT:
- armv7_pmnc_disable_counter(cnt);
-
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
-
- if (cnt_en[cnt] != 0)
- armv7_pmnc_enable_counter(cnt);
-
- break;
-
- case CNT0:
- case CNT1:
- case CNT2:
- case CNT3:
- armv7_pmnc_disable_counter(cnt);
-
- if (armv7_pmnc_select_counter(cnt) == cnt)
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
-
- if (cnt_en[cnt] != 0)
- armv7_pmnc_enable_counter(cnt);
-
- break;
-
- default:
- printk(KERN_ERR "oprofile: CPU%u resetting wrong PMNC counter"
- " %d\n", smp_processor_id(), cnt);
- break;
- }
-}
-
-int armv7_setup_pmnc(void)
-{
- unsigned int cnt;
-
- if (armv7_pmnc_read() & PMNC_E) {
- printk(KERN_ERR "oprofile: CPU%u PMNC still enabled when setup"
- " new event counter.\n", smp_processor_id());
- return -EBUSY;
- }
-
- /* Initialize & Reset PMNC: C bit and P bit */
- armv7_pmnc_write(PMNC_P | PMNC_C);
-
-
- for (cnt = CCNT; cnt < CNTMAX; cnt++) {
- unsigned long event;
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(cnt);
- cnt_en[cnt] = 0;
-
- if (!counter_config[cpu_cnt].enabled)
- continue;
-
- event = counter_config[cpu_cnt].event & 255;
-
- /*
- * Set event (if destined for PMNx counters)
- * We don't need to set the event if it's a cycle count
- */
- if (cnt != CCNT)
- armv7_pmnc_write_evtsel(cnt, event);
-
- /*
- * Enable interrupt for this counter
- */
- armv7_pmnc_enable_intens(cnt);
-
- /*
- * Reset counter
- */
- armv7_pmnc_reset_counter(cnt);
-
- /*
- * Enable counter
- */
- armv7_pmnc_enable_counter(cnt);
- cnt_en[cnt] = 1;
- }
-
- return 0;
-}
-
-static inline void armv7_start_pmnc(void)
-{
- armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
-}
-
-static inline void armv7_stop_pmnc(void)
-{
- armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
-}
-
-/*
- * CPU counters' IRQ handler (one IRQ per CPU)
- */
-static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned int cnt;
- u32 flags;
-
-
- /*
- * Stop IRQ generation
- */
- armv7_stop_pmnc();
-
- /*
- * Get and reset overflow status flags
- */
- flags = armv7_pmnc_getreset_flags();
-
- /*
- * Cycle counter
- */
- if (flags & FLAG_C) {
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), CCNT);
- armv7_pmnc_reset_counter(CCNT);
- oprofile_add_sample(regs, cpu_cnt);
- }
-
- /*
- * PMNC counters 0:3
- */
- for (cnt = CNT0; cnt < CNTMAX; cnt++) {
- if (flags & (1 << (cnt - CNT0))) {
- u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
- armv7_pmnc_reset_counter(cnt);
- oprofile_add_sample(regs, cpu_cnt);
- }
- }
-
- /*
- * Allow IRQ generation
- */
- armv7_start_pmnc();
-
- return IRQ_HANDLED;
-}
-
-int armv7_request_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < nr; i++) {
- ret = request_irq(irqs[i], armv7_pmnc_interrupt,
- IRQF_DISABLED, "CP15 PMNC", NULL);
- if (ret != 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%u"
- " for ARMv7\n",
- irqs[i]);
- break;
- }
- }
-
- if (i != nr)
- while (i-- != 0)
- free_irq(irqs[i], NULL);
-
- return ret;
-}
-
-void armv7_release_interrupts(const int *irqs, int nr)
-{
- unsigned int i;
-
- for (i = 0; i < nr; i++)
- free_irq(irqs[i], NULL);
-}
-
-#ifdef DEBUG
-static void armv7_pmnc_dump_regs(void)
-{
- u32 val;
- unsigned int cnt;
-
- printk(KERN_INFO "PMNC registers dump:\n");
-
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- printk(KERN_INFO "PMNC =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
- printk(KERN_INFO "CNTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
- printk(KERN_INFO "INTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- printk(KERN_INFO "FLAGS =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
- printk(KERN_INFO "SELECT=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
- printk(KERN_INFO "CCNT =0x%08x\n", val);
-
- for (cnt = CNT0; cnt < CNTMAX; cnt++) {
- armv7_pmnc_select_counter(cnt);
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-CNT0, val);
- asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-CNT0, val);
- }
-}
-#endif
-
-static const struct pmu_irqs *pmu_irqs;
-
-static void armv7_pmnc_stop(void)
-{
-#ifdef DEBUG
- armv7_pmnc_dump_regs();
-#endif
- armv7_stop_pmnc();
- armv7_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
-}
-
-static int armv7_pmnc_start(void)
-{
- int ret;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs))
- return PTR_ERR(pmu_irqs);
-
-#ifdef DEBUG
- armv7_pmnc_dump_regs();
-#endif
- ret = armv7_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs);
- if (ret >= 0) {
- armv7_start_pmnc();
- } else {
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- }
-
- return ret;
-}
-
-static int armv7_detect_pmnc(void)
-{
- return 0;
-}
-
-struct op_arm_model_spec op_armv7_spec = {
- .init = armv7_detect_pmnc,
- .num_counters = 5,
- .setup_ctrs = armv7_setup_pmnc,
- .start = armv7_pmnc_start,
- .stop = armv7_pmnc_stop,
- .name = "arm/armv7",
-};
diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h
deleted file mode 100644
index 9ca334b39c75..000000000000
--- a/arch/arm/oprofile/op_model_v7.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * op_model_v7.h
- * ARM v7 (Cortex A8) Event Monitor Driver
- *
- * Copyright 2008 Jean Pihet <jpihet@mvista.com>
- * Copyright 2004 ARM SMP Development Team
- * Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * Copyright 2000-2004 MontaVista Software Inc
- * Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * Copyright 2004 Intel Corporation
- * Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * Copyright 2004 Oprofile Authors
- *
- * Read the file COPYING
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef OP_MODEL_V7_H
-#define OP_MODEL_V7_H
-
-/*
- * Per-CPU PMNC: config reg
- */
-#define PMNC_E (1 << 0) /* Enable all counters */
-#define PMNC_P (1 << 1) /* Reset all counters */
-#define PMNC_C (1 << 2) /* Cycle counter reset */
-#define PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define PMNC_X (1 << 4) /* Export to ETM */
-#define PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define PMNC_MASK 0x3f /* Mask for writable bits */
-
-/*
- * Available counters
- */
-#define CCNT 0
-#define CNT0 1
-#define CNT1 2
-#define CNT2 3
-#define CNT3 4
-#define CNTMAX 5
-
-#define CPU_COUNTER(cpu, counter) ((cpu) * CNTMAX + (counter))
-
-/*
- * CNTENS: counters enable reg
- */
-#define CNTENS_P0 (1 << 0)
-#define CNTENS_P1 (1 << 1)
-#define CNTENS_P2 (1 << 2)
-#define CNTENS_P3 (1 << 3)
-#define CNTENS_C (1 << 31)
-#define CNTENS_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * CNTENC: counters disable reg
- */
-#define CNTENC_P0 (1 << 0)
-#define CNTENC_P1 (1 << 1)
-#define CNTENC_P2 (1 << 2)
-#define CNTENC_P3 (1 << 3)
-#define CNTENC_C (1 << 31)
-#define CNTENC_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * INTENS: counters overflow interrupt enable reg
- */
-#define INTENS_P0 (1 << 0)
-#define INTENS_P1 (1 << 1)
-#define INTENS_P2 (1 << 2)
-#define INTENS_P3 (1 << 3)
-#define INTENS_C (1 << 31)
-#define INTENS_MASK 0x8000000f /* Mask for writable bits */
-
-/*
- * EVTSEL: Event selection reg
- */
-#define EVTSEL_MASK 0x7f /* Mask for writable bits */
-
-/*
- * SELECT: Counter selection reg
- */
-#define SELECT_MASK 0x1f /* Mask for writable bits */
-
-/*
- * FLAG: counters overflow flag status reg
- */
-#define FLAG_P0 (1 << 0)
-#define FLAG_P1 (1 << 1)
-#define FLAG_P2 (1 << 2)
-#define FLAG_P3 (1 << 3)
-#define FLAG_C (1 << 31)
-#define FLAG_MASK 0x8000000f /* Mask for writable bits */
-
-
-int armv7_setup_pmu(void);
-int armv7_start_pmu(void);
-int armv7_stop_pmu(void);
-int armv7_request_interrupts(const int *, int);
-void armv7_release_interrupts(const int *, int);
-
-#endif
diff --git a/arch/arm/oprofile/op_model_xscale.c b/arch/arm/oprofile/op_model_xscale.c
deleted file mode 100644
index 1d34a02048bd..000000000000
--- a/arch/arm/oprofile/op_model_xscale.c
+++ /dev/null
@@ -1,444 +0,0 @@
-/**
- * @file op_model_xscale.c
- * XScale Performance Monitor Driver
- *
- * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
- * @remark Copyright 2000-2004 MontaVista Software Inc
- * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
- * @remark Copyright 2004 Intel Corporation
- * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
- * @remark Copyright 2004 OProfile Authors
- *
- * @remark Read the file COPYING
- *
- * @author Zwane Mwaikambo
- */
-
-/* #define DEBUG */
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/oprofile.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-
-#include <asm/cputype.h>
-#include <asm/pmu.h>
-
-#include "op_counter.h"
-#include "op_arm_model.h"
-
-#define PMU_ENABLE 0x001 /* Enable counters */
-#define PMN_RESET 0x002 /* Reset event counters */
-#define CCNT_RESET 0x004 /* Reset clock counter */
-#define PMU_RESET (CCNT_RESET | PMN_RESET)
-#define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */
-
-/*
- * Different types of events that can be counted by the XScale PMU
- * as used by Oprofile userspace. Here primarily for documentation
- * purposes.
- */
-
-#define EVT_ICACHE_MISS 0x00
-#define EVT_ICACHE_NO_DELIVER 0x01
-#define EVT_DATA_STALL 0x02
-#define EVT_ITLB_MISS 0x03
-#define EVT_DTLB_MISS 0x04
-#define EVT_BRANCH 0x05
-#define EVT_BRANCH_MISS 0x06
-#define EVT_INSTRUCTION 0x07
-#define EVT_DCACHE_FULL_STALL 0x08
-#define EVT_DCACHE_FULL_STALL_CONTIG 0x09
-#define EVT_DCACHE_ACCESS 0x0A
-#define EVT_DCACHE_MISS 0x0B
-#define EVT_DCACE_WRITE_BACK 0x0C
-#define EVT_PC_CHANGED 0x0D
-#define EVT_BCU_REQUEST 0x10
-#define EVT_BCU_FULL 0x11
-#define EVT_BCU_DRAIN 0x12
-#define EVT_BCU_ECC_NO_ELOG 0x14
-#define EVT_BCU_1_BIT_ERR 0x15
-#define EVT_RMW 0x16
-/* EVT_CCNT is not hardware defined */
-#define EVT_CCNT 0xFE
-#define EVT_UNUSED 0xFF
-
-struct pmu_counter {
- volatile unsigned long ovf;
- unsigned long reset_counter;
-};
-
-enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS };
-
-static struct pmu_counter results[MAX_COUNTERS];
-
-/*
- * There are two versions of the PMU in current XScale processors
- * with differing register layouts and number of performance counters.
- * e.g. IOP32x is xsc1 whilst IOP33x is xsc2.
- * We detect which register layout to use in xscale_detect_pmu()
- */
-enum { PMU_XSC1, PMU_XSC2 };
-
-struct pmu_type {
- int id;
- char *name;
- int num_counters;
- unsigned int int_enable;
- unsigned int cnt_ovf[MAX_COUNTERS];
- unsigned int int_mask[MAX_COUNTERS];
-};
-
-static struct pmu_type pmu_parms[] = {
- {
- .id = PMU_XSC1,
- .name = "arm/xscale1",
- .num_counters = 3,
- .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20,
- [CCNT] = 0x40 },
- .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100,
- [PMN1] = 0x200},
- },
- {
- .id = PMU_XSC2,
- .name = "arm/xscale2",
- .num_counters = 5,
- .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02,
- [PMN1] = 0x04, [PMN2] = 0x08,
- [PMN3] = 0x10 },
- .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02,
- [PMN1] = 0x04, [PMN2] = 0x08,
- [PMN3] = 0x10 },
- },
-};
-
-static struct pmu_type *pmu;
-
-static void write_pmnc(u32 val)
-{
- if (pmu->id == PMU_XSC1) {
- /* upper 4bits and 7, 11 are write-as-0 */
- val &= 0xffff77f;
- __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
- } else {
- /* bits 4-23 are write-as-0, 24-31 are write ignored */
- val &= 0xf;
- __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
- }
-}
-
-static u32 read_pmnc(void)
-{
- u32 val;
-
- if (pmu->id == PMU_XSC1)
- __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
- else {
- __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
- /* bits 1-2 and 4-23 are read-unpredictable */
- val &= 0xff000009;
- }
-
- return val;
-}
-
-static u32 __xsc1_read_counter(int counter)
-{
- u32 val = 0;
-
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
- break;
- }
- return val;
-}
-
-static u32 __xsc2_read_counter(int counter)
-{
- u32 val = 0;
-
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
- break;
- case PMN2:
- __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
- break;
- case PMN3:
- __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
- break;
- }
- return val;
-}
-
-static u32 read_counter(int counter)
-{
- u32 val;
-
- if (pmu->id == PMU_XSC1)
- val = __xsc1_read_counter(counter);
- else
- val = __xsc2_read_counter(counter);
-
- return val;
-}
-
-static void __xsc1_write_counter(int counter, u32 val)
-{
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
- break;
- }
-}
-
-static void __xsc2_write_counter(int counter, u32 val)
-{
- switch (counter) {
- case CCNT:
- __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
- break;
- case PMN0:
- __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
- break;
- case PMN1:
- __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
- break;
- case PMN2:
- __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
- break;
- case PMN3:
- __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
- break;
- }
-}
-
-static void write_counter(int counter, u32 val)
-{
- if (pmu->id == PMU_XSC1)
- __xsc1_write_counter(counter, val);
- else
- __xsc2_write_counter(counter, val);
-}
-
-static int xscale_setup_ctrs(void)
-{
- u32 evtsel, pmnc;
- int i;
-
- for (i = CCNT; i < MAX_COUNTERS; i++) {
- if (counter_config[i].enabled)
- continue;
-
- counter_config[i].event = EVT_UNUSED;
- }
-
- switch (pmu->id) {
- case PMU_XSC1:
- pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12);
- pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc);
- write_pmnc(pmnc);
- break;
-
- case PMU_XSC2:
- evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) |
- (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24);
-
- pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel);
- __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel));
- break;
- }
-
- for (i = CCNT; i < MAX_COUNTERS; i++) {
- if (counter_config[i].event == EVT_UNUSED) {
- counter_config[i].event = 0;
- pmu->int_enable &= ~pmu->int_mask[i];
- continue;
- }
-
- results[i].reset_counter = counter_config[i].count;
- write_counter(i, -(u32)counter_config[i].count);
- pmu->int_enable |= pmu->int_mask[i];
- pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i,
- read_counter(i), counter_config[i].count);
- }
-
- return 0;
-}
-
-static void inline __xsc1_check_ctrs(void)
-{
- int i;
- u32 pmnc = read_pmnc();
-
- /* NOTE: there's an A stepping errata that states if an overflow */
- /* bit already exists and another occurs, the previous */
- /* Overflow bit gets cleared. There's no workaround. */
- /* Fixed in B stepping or later */
-
- /* Write the value back to clear the overflow flags. Overflow */
- /* flags remain in pmnc for use below */
- write_pmnc(pmnc & ~PMU_ENABLE);
-
- for (i = CCNT; i <= PMN1; i++) {
- if (!(pmu->int_mask[i] & pmu->int_enable))
- continue;
-
- if (pmnc & pmu->cnt_ovf[i])
- results[i].ovf++;
- }
-}
-
-static void inline __xsc2_check_ctrs(void)
-{
- int i;
- u32 flag = 0, pmnc = read_pmnc();
-
- pmnc &= ~PMU_ENABLE;
- write_pmnc(pmnc);
-
- /* read overflow flag register */
- __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag));
-
- for (i = CCNT; i <= PMN3; i++) {
- if (!(pmu->int_mask[i] & pmu->int_enable))
- continue;
-
- if (flag & pmu->cnt_ovf[i])
- results[i].ovf++;
- }
-
- /* writeback clears overflow bits */
- __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag));
-}
-
-static irqreturn_t xscale_pmu_interrupt(int irq, void *arg)
-{
- int i;
- u32 pmnc;
-
- if (pmu->id == PMU_XSC1)
- __xsc1_check_ctrs();
- else
- __xsc2_check_ctrs();
-
- for (i = CCNT; i < MAX_COUNTERS; i++) {
- if (!results[i].ovf)
- continue;
-
- write_counter(i, -(u32)results[i].reset_counter);
- oprofile_add_sample(get_irq_regs(), i);
- results[i].ovf--;
- }
-
- pmnc = read_pmnc() | PMU_ENABLE;
- write_pmnc(pmnc);
-
- return IRQ_HANDLED;
-}
-
-static const struct pmu_irqs *pmu_irqs;
-
-static void xscale_pmu_stop(void)
-{
- u32 pmnc = read_pmnc();
-
- pmnc &= ~PMU_ENABLE;
- write_pmnc(pmnc);
-
- free_irq(pmu_irqs->irqs[0], results);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
-}
-
-static int xscale_pmu_start(void)
-{
- int ret;
- u32 pmnc;
-
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs))
- return PTR_ERR(pmu_irqs);
-
- pmnc = read_pmnc();
-
- ret = request_irq(pmu_irqs->irqs[0], xscale_pmu_interrupt,
- IRQF_DISABLED, "XScale PMU", (void *)results);
-
- if (ret < 0) {
- printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n",
- pmu_irqs->irqs[0]);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
- return ret;
- }
-
- if (pmu->id == PMU_XSC1)
- pmnc |= pmu->int_enable;
- else {
- __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable));
- pmnc &= ~PMU_CNT64;
- }
-
- pmnc |= PMU_ENABLE;
- write_pmnc(pmnc);
- pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable);
- return 0;
-}
-
-static int xscale_detect_pmu(void)
-{
- int ret = 0;
- u32 id;
-
- id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
-
- switch (id) {
- case 1:
- pmu = &pmu_parms[PMU_XSC1];
- break;
- case 2:
- pmu = &pmu_parms[PMU_XSC2];
- break;
- default:
- ret = -ENODEV;
- break;
- }
-
- if (!ret) {
- op_xscale_spec.name = pmu->name;
- op_xscale_spec.num_counters = pmu->num_counters;
- pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name);
- }
-
- return ret;
-}
-
-struct op_arm_model_spec op_xscale_spec = {
- .init = xscale_detect_pmu,
- .setup_ctrs = xscale_setup_ctrs,
- .start = xscale_pmu_start,
- .stop = xscale_pmu_stop,
-};
-
diff --git a/arch/arm/plat-iop/Makefile b/arch/arm/plat-iop/Makefile
index 36bff0325959..69b09c1cec8b 100644
--- a/arch/arm/plat-iop/Makefile
+++ b/arch/arm/plat-iop/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ARCH_IOP32X) += time.o
obj-$(CONFIG_ARCH_IOP32X) += io.o
obj-$(CONFIG_ARCH_IOP32X) += cp6.o
obj-$(CONFIG_ARCH_IOP32X) += adma.o
+obj-$(CONFIG_ARCH_IOP32X) += pmu.o
# IOP33X
obj-$(CONFIG_ARCH_IOP33X) += gpio.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_ARCH_IOP33X) += time.o
obj-$(CONFIG_ARCH_IOP33X) += io.o
obj-$(CONFIG_ARCH_IOP33X) += cp6.o
obj-$(CONFIG_ARCH_IOP33X) += adma.o
+obj-$(CONFIG_ARCH_IOP33X) += pmu.o
# IOP13XX
obj-$(CONFIG_ARCH_IOP13XX) += cp6.o
diff --git a/arch/arm/plat-iop/pmu.c b/arch/arm/plat-iop/pmu.c
new file mode 100644
index 000000000000..a2024b8685a1
--- /dev/null
+++ b/arch/arm/plat-iop/pmu.c
@@ -0,0 +1,40 @@
+/*
+ * PMU IRQ registration for the iop3xx xscale PMU families.
+ * Copyright (C) 2010 Will Deacon, ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <asm/pmu.h>
+#include <mach/irqs.h>
+
+static struct resource pmu_resource = {
+#ifdef CONFIG_ARCH_IOP32X
+ .start = IRQ_IOP32X_CORE_PMU,
+ .end = IRQ_IOP32X_CORE_PMU,
+#endif
+#ifdef CONFIG_ARCH_IOP33X
+ .start = IRQ_IOP33X_CORE_PMU,
+ .end = IRQ_IOP33X_CORE_PMU,
+#endif
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .resource = &pmu_resource,
+ .num_resources = 1,
+};
+
+static int __init iop3xx_pmu_init(void)
+{
+ platform_device_register(&pmu_device);
+ return 0;
+}
+
+arch_initcall(iop3xx_pmu_init);
diff --git a/arch/arm/plat-mxc/ehci.c b/arch/arm/plat-mxc/ehci.c
index cb0b63874482..2a8646173c2f 100644
--- a/arch/arm/plat-mxc/ehci.c
+++ b/arch/arm/plat-mxc/ehci.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -50,7 +51,26 @@
#define MX35_H1_TLL_BIT (1 << 5)
#define MX35_H1_USBTE_BIT (1 << 4)
-int mxc_set_usbcontrol(int port, unsigned int flags)
+#define MXC_OTG_OFFSET 0
+#define MXC_H1_OFFSET 0x200
+
+/* USB_CTRL */
+#define MXC_OTG_UCTRL_OWIE_BIT (1 << 27) /* OTG wakeup intr enable */
+#define MXC_OTG_UCTRL_OPM_BIT (1 << 24) /* OTG power mask */
+#define MXC_H1_UCTRL_H1UIE_BIT (1 << 12) /* Host1 ULPI interrupt enable */
+#define MXC_H1_UCTRL_H1WIE_BIT (1 << 11) /* HOST1 wakeup intr enable */
+#define MXC_H1_UCTRL_H1PM_BIT (1 << 8) /* HOST1 power mask */
+
+/* USB_PHY_CTRL_FUNC */
+#define MXC_OTG_PHYCTRL_OC_DIS_BIT (1 << 8) /* OTG Disable Overcurrent Event */
+#define MXC_H1_OC_DIS_BIT (1 << 5) /* UH1 Disable Overcurrent Event */
+
+#define MXC_USBCMD_OFFSET 0x140
+
+/* USBCMD */
+#define MXC_UCMD_ITC_NO_THRESHOLD_MASK (~(0xff << 16)) /* Interrupt Threshold Control */
+
+int mxc_initialize_usb_hw(int port, unsigned int flags)
{
unsigned int v;
#ifdef CONFIG_ARCH_MX3
@@ -186,9 +206,85 @@ int mxc_set_usbcontrol(int port, unsigned int flags)
return 0;
}
#endif /* CONFIG_MACH_MX27 */
+#ifdef CONFIG_ARCH_MX51
+ if (cpu_is_mx51()) {
+ void __iomem *usb_base;
+ u32 usbotg_base;
+ u32 usbother_base;
+ int ret = 0;
+
+ usb_base = ioremap(MX51_OTG_BASE_ADDR, SZ_4K);
+
+ switch (port) {
+ case 0: /* OTG port */
+ usbotg_base = usb_base + MXC_OTG_OFFSET;
+ break;
+ case 1: /* Host 1 port */
+ usbotg_base = usb_base + MXC_H1_OFFSET;
+ break;
+ default:
+ printk(KERN_ERR"%s no such port %d\n", __func__, port);
+ ret = -ENOENT;
+ goto error;
+ }
+ usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
+
+ switch (port) {
+ case 0: /*OTG port */
+ if (flags & MXC_EHCI_INTERNAL_PHY) {
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v |= (MXC_OTG_PHYCTRL_OC_DIS_BIT | MXC_OTG_UCTRL_OPM_BIT); /* OC/USBPWR is not used */
+ else
+ v &= ~(MXC_OTG_PHYCTRL_OC_DIS_BIT | MXC_OTG_UCTRL_OPM_BIT); /* OC/USBPWR is used */
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
+ if (flags & MXC_EHCI_WAKEUP_ENABLED)
+ v |= MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup enable */
+ else
+ v &= ~MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup disable */
+ __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
+ }
+ break;
+ case 1: /* Host 1 */
+ /*Host ULPI */
+ v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
+ if (flags & MXC_EHCI_WAKEUP_ENABLED)
+ v &= ~(MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);/* HOST1 wakeup/ULPI intr disable */
+ else
+ v &= ~(MXC_H1_UCTRL_H1WIE_BIT | MXC_H1_UCTRL_H1UIE_BIT);/* HOST1 wakeup/ULPI intr disable */
+
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v &= ~MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
+ else
+ v |= MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
+ __raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
+
+ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v &= ~MXC_H1_OC_DIS_BIT; /* OC is used */
+ else
+ v |= MXC_H1_OC_DIS_BIT; /* OC is not used */
+ __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+
+ v = __raw_readl(usbotg_base + MXC_USBCMD_OFFSET);
+ if (flags & MXC_EHCI_ITC_NO_THRESHOLD)
+ /* Interrupt Threshold Control:Immediate (no threshold) */
+ v &= MXC_UCMD_ITC_NO_THRESHOLD_MASK;
+ __raw_writel(v, usbotg_base + MXC_USBCMD_OFFSET);
+ break;
+ }
+
+error:
+ iounmap(usb_base);
+ return ret;
+ }
+#endif
printk(KERN_WARNING
"%s() unable to setup USBCONTROL for this CPU\n", __func__);
return -EINVAL;
}
-EXPORT_SYMBOL(mxc_set_usbcontrol);
+EXPORT_SYMBOL(mxc_initialize_usb_hw);
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
index 70b23893f094..71437c61cfd7 100644
--- a/arch/arm/plat-mxc/gpio.c
+++ b/arch/arm/plat-mxc/gpio.c
@@ -3,7 +3,7 @@
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*
* Based on code from Freescale,
- * Copyright 2004-2006 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -38,7 +38,6 @@ static int gpio_table_size;
#define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10)
#define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14)
#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18)
-#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18)
#define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0)
#define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1)
@@ -289,7 +288,7 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
/* its a serious configuration bug when it fails */
BUG_ON( gpiochip_add(&port[i].chip) < 0 );
- if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25()) {
+ if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
/* setup one handler for each entry */
set_irq_chained_handler(port[i].irq, mx3_gpio_irq_handler);
set_irq_data(port[i].irq, &port[i]);
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31moboard.h b/arch/arm/plat-mxc/include/mach/board-mx31moboard.h
index fc5fec9b55f0..36ff3cedee1a 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31moboard.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31moboard.h
@@ -26,6 +26,7 @@ enum mx31moboard_boards {
MX31DEVBOARD = 1,
MX31MARXBOT = 2,
MX31SMARTBOT = 3,
+ MX31EYEBOT = 4,
};
/*
@@ -35,7 +36,7 @@ enum mx31moboard_boards {
extern void mx31moboard_devboard_init(void);
extern void mx31moboard_marxbot_init(void);
-extern void mx31moboard_smartbot_init(void);
+extern void mx31moboard_smartbot_init(int board);
#endif
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx3.h b/arch/arm/plat-mxc/include/mach/iomux-mx3.h
index e51465d7b224..cbaed295a2bf 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx3.h
@@ -719,6 +719,23 @@ enum iomux_pins {
#define MX31_PIN_SRXD5__SRXD5 IOMUX_MODE(MX31_PIN_SRXD5, IOMUX_CONFIG_FUNC)
#define MX31_PIN_SCK5__SCK5 IOMUX_MODE(MX31_PIN_SCK5, IOMUX_CONFIG_FUNC)
#define MX31_PIN_SFS5__SFS5 IOMUX_MODE(MX31_PIN_SFS5, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW0_KEY_ROW0 IOMUX_MODE(MX31_PIN_KEY_ROW0, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW1_KEY_ROW1 IOMUX_MODE(MX31_PIN_KEY_ROW1, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW2_KEY_ROW2 IOMUX_MODE(MX31_PIN_KEY_ROW2, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW3_KEY_ROW3 IOMUX_MODE(MX31_PIN_KEY_ROW3, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW4_KEY_ROW4 IOMUX_MODE(MX31_PIN_KEY_ROW4, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW5_KEY_ROW5 IOMUX_MODE(MX31_PIN_KEY_ROW5, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW6_KEY_ROW6 IOMUX_MODE(MX31_PIN_KEY_ROW6, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_ROW7_KEY_ROW7 IOMUX_MODE(MX31_PIN_KEY_ROW7, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL0_KEY_COL0 IOMUX_MODE(MX31_PIN_KEY_COL0, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL1_KEY_COL1 IOMUX_MODE(MX31_PIN_KEY_COL1, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL2_KEY_COL2 IOMUX_MODE(MX31_PIN_KEY_COL2, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL3_KEY_COL3 IOMUX_MODE(MX31_PIN_KEY_COL3, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL4_KEY_COL4 IOMUX_MODE(MX31_PIN_KEY_COL4, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL5_KEY_COL5 IOMUX_MODE(MX31_PIN_KEY_COL5, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL6_KEY_COL6 IOMUX_MODE(MX31_PIN_KEY_COL6, IOMUX_CONFIG_FUNC)
+#define MX31_PIN_KEY_COL7_KEY_COL7 IOMUX_MODE(MX31_PIN_KEY_COL7, IOMUX_CONFIG_FUNC)
+
/*
* XXX: The SS0, SS1, SS2, SS3 lines of spi3 are multiplexed with cspi2_ss0,
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx51.h b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
index b4f975e6a665..ab0f95d953d0 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx51.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -37,6 +38,11 @@ typedef enum iomux_config {
PAD_CTL_SRE_FAST)
#define MX51_UART3_PAD_CTRL (PAD_CTL_PKE | PAD_CTL_DSE_HIGH | \
PAD_CTL_SRE_FAST)
+#define MX51_USBH1_PAD_CTRL (PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | \
+ PAD_CTL_PUS_100K_UP | PAD_CTL_PUE | \
+ PAD_CTL_PKE | PAD_CTL_HYS)
+#define MX51_GPIO_PAD_CTRL (PAD_CTL_DSE_HIGH | PAD_CTL_PKE | \
+ PAD_CTL_SRE_FAST)
/*
* The naming convention for the pad modes is MX51_PAD_<padname>__<padmode>
@@ -57,6 +63,7 @@ typedef enum iomux_config {
#define MX51_PAD_GPIO_2_3__EIM_D19 IOMUX_PAD(0x3fc, 0x068, 1, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_2_4__EIM_D20 IOMUX_PAD(0x400, 0x06c, 1, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_2_5__EIM_D21 IOMUX_PAD(0x404, 0x070, 1, 0x0, 0, NO_PAD_CTRL)
+#define MX51_PAD_EIM_D21__GPIO_2_5 IOMUX_PAD(0x404, 0x070, IOMUX_CONFIG_ALT1, 0x0, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO_2_6__EIM_D22 IOMUX_PAD(0x408, 0x074, 1, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_2_7__EIM_D23 IOMUX_PAD(0x40c, 0x078, 1, 0x0, 0, NO_PAD_CTRL)
@@ -208,18 +215,19 @@ typedef enum iomux_config {
#define MX51_PAD_KEY_COL3__KEY_COL3 IOMUX_PAD(0x658, 0x268, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_KEY_COL4__KEY_COL4 IOMUX_PAD(0x65C, 0x26C, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_KEY_COL5__KEY_COL5 IOMUX_PAD(0x660, 0x270, 0, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_25__USBH1_CLK IOMUX_PAD(0x678, 0x278, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_26__USBH1_DIR IOMUX_PAD(0x67C, 0x27C, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_27__USBH1_STP IOMUX_PAD(0x680, 0x280, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_28__USBH1_NXT IOMUX_PAD(0x684, 0x284, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_11__USBH1_DATA0 IOMUX_PAD(0x688, 0x288, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_12__USBH1_DATA1 IOMUX_PAD(0x68C, 0x28C, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_13__USBH1_DATA2 IOMUX_PAD(0x690, 0x290, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_14__USBH1_DATA3 IOMUX_PAD(0x694, 0x294, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_15__USBH1_DATA4 IOMUX_PAD(0x698, 0x298, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_16__USBH1_DATA5 IOMUX_PAD(0x69C, 0x29C, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_17__USBH1_DATA6 IOMUX_PAD(0x6A0, 0x2A0, 2, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_18__USBH1_DATA7 IOMUX_PAD(0x6A4, 0x2A4, 2, 0x0, 0, NO_PAD_CTRL)
+#define MX51_PAD_USBH1_CLK__USBH1_CLK IOMUX_PAD(0x678, 0x278, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DIR__USBH1_DIR IOMUX_PAD(0x67C, 0x27C, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_STP__USBH1_STP IOMUX_PAD(0x680, 0x280, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_STP__GPIO_1_27 IOMUX_PAD(0x680, 0x280, IOMUX_CONFIG_GPIO, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_NXT__USBH1_NXT IOMUX_PAD(0x684, 0x284, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA0__USBH1_DATA0 IOMUX_PAD(0x688, 0x288, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA1__USBH1_DATA1 IOMUX_PAD(0x68C, 0x28C, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA2__USBH1_DATA2 IOMUX_PAD(0x690, 0x290, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA3__USBH1_DATA3 IOMUX_PAD(0x694, 0x294, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA4__USBH1_DATA4 IOMUX_PAD(0x698, 0x298, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA5__USBH1_DATA5 IOMUX_PAD(0x69C, 0x29C, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA6__USBH1_DATA6 IOMUX_PAD(0x6A0, 0x2A0, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
+#define MX51_PAD_USBH1_DATA7__USBH1_DATA7 IOMUX_PAD(0x6A4, 0x2A4, IOMUX_CONFIG_ALT0, 0x0, 0, MX51_USBH1_PAD_CTRL)
#define MX51_PAD_GPIO_3_0__DI1_PIN11 IOMUX_PAD(0x6A8, 0x2A8, 4, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_3_1__DI1_PIN12 IOMUX_PAD(0x6AC, 0x2AC, 4, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_3_2__DI1_PIN13 IOMUX_PAD(0x6B0, 0x2B0, 4, 0x0, 0, NO_PAD_CTRL)
@@ -299,7 +307,7 @@ typedef enum iomux_config {
#define MX51_PAD_GPIO_1_4__GPIO1_4 IOMUX_PAD(0x804, 0x3D8, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_1_5__GPIO1_5 IOMUX_PAD(0x808, 0x3DC, 0, 0x0, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO_1_6__GPIO1_6 IOMUX_PAD(0x80C, 0x3E0, 0, 0x0, 0, NO_PAD_CTRL)
-#define MX51_PAD_GPIO_1_7__GPIO1_7 IOMUX_PAD(0x810, 0x3E4, 0, 0x0, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO_1_7__GPIO1_7 IOMUX_PAD(0x810, 0x3E4, 0, 0x0, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO_1_8__GPIO1_8 IOMUX_PAD(0x814, 0x3E8, 0, 0x0, 1, \
(PAD_CTL_SRE_SLOW | PAD_CTL_DSE_MED | PAD_CTL_PUS_100K_UP | PAD_CTL_HYS))
#define MX51_PAD_GPIO_1_9__GPIO1_9 IOMUX_PAD(0x818, 0x3EC, 0, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
index 4b9b8368c0c0..7fc5f9946199 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_ehci.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
@@ -25,6 +25,18 @@
#define MXC_EHCI_INTERNAL_PHY (1 << 7)
#define MXC_EHCI_IPPUE_DOWN (1 << 8)
#define MXC_EHCI_IPPUE_UP (1 << 9)
+#define MXC_EHCI_WAKEUP_ENABLED (1 << 10)
+#define MXC_EHCI_ITC_NO_THRESHOLD (1 << 11)
+
+#define MXC_USBCTRL_OFFSET 0
+#define MXC_USB_PHY_CTR_FUNC_OFFSET 0x8
+#define MXC_USB_PHY_CTR_FUNC2_OFFSET 0xc
+
+#define MX5_USBOTHER_REGS_OFFSET 0x800
+
+/* USB_PHY_CTRL_FUNC2*/
+#define MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK 0x3
+#define MX5_USB_UTMI_PHYCTRL1_PLLDIV_SHIFT 0
struct mxc_usbh_platform_data {
int (*init)(struct platform_device *pdev);
@@ -35,7 +47,7 @@ struct mxc_usbh_platform_data {
struct otg_transceiver *otg;
};
-int mxc_set_usbcontrol(int port, unsigned int flags);
+int mxc_initialize_usb_hw(int port, unsigned int flags);
#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index c1ce51abdba6..f9a1b059a76c 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -54,14 +54,14 @@
#define MX2_TSTAT_COMP (1 << 0)
/* MX31, MX35, MX25, MXC91231, MX5 */
-#define MX3_TCTL_WAITEN (1 << 3) /* Wait enable mode */
-#define MX3_TCTL_CLK_IPG (1 << 6)
-#define MX3_TCTL_FRR (1 << 9)
-#define MX3_IR 0x0c
-#define MX3_TSTAT 0x08
-#define MX3_TSTAT_OF1 (1 << 0)
-#define MX3_TCN 0x24
-#define MX3_TCMP 0x10
+#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
+#define V2_TCTL_CLK_IPG (1 << 6)
+#define V2_TCTL_FRR (1 << 9)
+#define V2_IR 0x0c
+#define V2_TSTAT 0x08
+#define V2_TSTAT_OF1 (1 << 0)
+#define V2_TCN 0x24
+#define V2_TCMP 0x10
#define timer_is_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
#define timer_is_v2() (!timer_is_v1())
@@ -76,7 +76,7 @@ static inline void gpt_irq_disable(void)
unsigned int tmp;
if (timer_is_v2())
- __raw_writel(0, timer_base + MX3_IR);
+ __raw_writel(0, timer_base + V2_IR);
else {
tmp = __raw_readl(timer_base + MXC_TCTL);
__raw_writel(tmp & ~MX1_2_TCTL_IRQEN, timer_base + MXC_TCTL);
@@ -86,7 +86,7 @@ static inline void gpt_irq_disable(void)
static inline void gpt_irq_enable(void)
{
if (timer_is_v2())
- __raw_writel(1<<0, timer_base + MX3_IR);
+ __raw_writel(1<<0, timer_base + V2_IR);
else {
__raw_writel(__raw_readl(timer_base + MXC_TCTL) | MX1_2_TCTL_IRQEN,
timer_base + MXC_TCTL);
@@ -102,7 +102,7 @@ static void gpt_irq_acknowledge(void)
__raw_writel(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
timer_base + MX1_2_TSTAT);
} else if (timer_is_v2())
- __raw_writel(MX3_TSTAT_OF1, timer_base + MX3_TSTAT);
+ __raw_writel(V2_TSTAT_OF1, timer_base + V2_TSTAT);
}
static cycle_t mx1_2_get_cycles(struct clocksource *cs)
@@ -110,9 +110,9 @@ static cycle_t mx1_2_get_cycles(struct clocksource *cs)
return __raw_readl(timer_base + MX1_2_TCN);
}
-static cycle_t mx3_get_cycles(struct clocksource *cs)
+static cycle_t v2_get_cycles(struct clocksource *cs)
{
- return __raw_readl(timer_base + MX3_TCN);
+ return __raw_readl(timer_base + V2_TCN);
}
static struct clocksource clocksource_mxc = {
@@ -129,7 +129,7 @@ static int __init mxc_clocksource_init(struct clk *timer_clk)
unsigned int c = clk_get_rate(timer_clk);
if (timer_is_v2())
- clocksource_mxc.read = mx3_get_cycles;
+ clocksource_mxc.read = v2_get_cycles;
clocksource_mxc.mult = clocksource_hz2mult(c,
clocksource_mxc.shift);
@@ -153,16 +153,16 @@ static int mx1_2_set_next_event(unsigned long evt,
-ETIME : 0;
}
-static int mx3_set_next_event(unsigned long evt,
+static int v2_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
unsigned long tcmp;
- tcmp = __raw_readl(timer_base + MX3_TCN) + evt;
+ tcmp = __raw_readl(timer_base + V2_TCN) + evt;
- __raw_writel(tcmp, timer_base + MX3_TCMP);
+ __raw_writel(tcmp, timer_base + V2_TCMP);
- return (int)(tcmp - __raw_readl(timer_base + MX3_TCN)) < 0 ?
+ return (int)(tcmp - __raw_readl(timer_base + V2_TCN)) < 0 ?
-ETIME : 0;
}
@@ -192,8 +192,8 @@ static void mxc_set_mode(enum clock_event_mode mode,
if (mode != clockevent_mode) {
/* Set event time into far-far future */
if (timer_is_v2())
- __raw_writel(__raw_readl(timer_base + MX3_TCN) - 3,
- timer_base + MX3_TCMP);
+ __raw_writel(__raw_readl(timer_base + V2_TCN) - 3,
+ timer_base + V2_TCMP);
else
__raw_writel(__raw_readl(timer_base + MX1_2_TCN) - 3,
timer_base + MX1_2_TCMP);
@@ -245,7 +245,7 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
uint32_t tstat;
if (timer_is_v2())
- tstat = __raw_readl(timer_base + MX3_TSTAT);
+ tstat = __raw_readl(timer_base + V2_TSTAT);
else
tstat = __raw_readl(timer_base + MX1_2_TSTAT);
@@ -276,7 +276,7 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
unsigned int c = clk_get_rate(timer_clk);
if (timer_is_v2())
- clockevent_mxc.set_next_event = mx3_set_next_event;
+ clockevent_mxc.set_next_event = v2_set_next_event;
clockevent_mxc.mult = div_sc(c, NSEC_PER_SEC,
clockevent_mxc.shift);
@@ -308,7 +308,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
__raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
if (timer_is_v2())
- tctl_val = MX3_TCTL_CLK_IPG | MX3_TCTL_FRR | MX3_TCTL_WAITEN | MXC_TCTL_TEN;
+ tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
else
tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index afa6709db0b3..9b86d2a60d43 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C)2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -19,6 +19,7 @@
#include <asm/mach/irq.h>
#include <mach/hardware.h>
+#include <mach/common.h>
/*
*****************************************
@@ -144,6 +145,7 @@ void __init tzic_init_irq(void __iomem *irqbase)
set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
+ mxc_register_gpios();
pr_info("TrustZone Interrupt Controller (TZIC) initialized\n");
}
diff --git a/arch/arm/plat-nomadik/Kconfig b/arch/arm/plat-nomadik/Kconfig
index 159daf583f85..5da3f97c537b 100644
--- a/arch/arm/plat-nomadik/Kconfig
+++ b/arch/arm/plat-nomadik/Kconfig
@@ -19,4 +19,9 @@ config HAS_MTU
to multiple interrupt generating programmable
32-bit free running decrementing counters.
+config NOMADIK_GPIO
+ bool
+ help
+ Support for the Nomadik GPIO controller.
+
endif
diff --git a/arch/arm/plat-nomadik/Makefile b/arch/arm/plat-nomadik/Makefile
index 37c7cdd0f8f0..c33547361bd7 100644
--- a/arch/arm/plat-nomadik/Makefile
+++ b/arch/arm/plat-nomadik/Makefile
@@ -3,3 +3,4 @@
# Licensed under GPLv2
obj-$(CONFIG_HAS_MTU) += timer.o
+obj-$(CONFIG_NOMADIK_GPIO) += gpio.o
diff --git a/arch/arm/mach-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c
index 66b1c91ccc74..5a6ef252c38b 100644
--- a/arch/arm/mach-nomadik/gpio.c
+++ b/arch/arm/plat-nomadik/gpio.c
@@ -13,8 +13,10 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -36,8 +38,9 @@
struct nmk_gpio_chip {
struct gpio_chip chip;
void __iomem *addr;
+ struct clk *clk;
unsigned int parent_irq;
- spinlock_t *lock;
+ spinlock_t lock;
/* Keep track of configured edges */
u32 edge_rising;
u32 edge_falling;
@@ -108,40 +111,37 @@ static void nmk_gpio_irq_ack(unsigned int irq)
writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
}
-static void nmk_gpio_irq_mask(unsigned int irq)
+static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
+ int gpio, bool enable)
{
- int gpio;
- struct nmk_gpio_chip *nmk_chip;
- unsigned long flags;
- u32 bitmask, reg;
-
- gpio = NOMADIK_IRQ_TO_GPIO(irq);
- nmk_chip = get_irq_chip_data(irq);
- bitmask = nmk_gpio_get_bitmask(gpio);
- if (!nmk_chip)
- return;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+ u32 reg;
- /* we must individually clear the two edges */
- spin_lock_irqsave(&nmk_chip->lock, flags);
+ /* we must individually set/clear the two edges */
if (nmk_chip->edge_rising & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ reg = readl(nmk_chip->addr + NMK_GPIO_RIMSC);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_RIMSC);
}
if (nmk_chip->edge_falling & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
- reg &= ~bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_FWIMSC);
+ reg = readl(nmk_chip->addr + NMK_GPIO_FIMSC);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + NMK_GPIO_FIMSC);
}
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
-};
+}
-static void nmk_gpio_irq_unmask(unsigned int irq)
+static void nmk_gpio_irq_modify(unsigned int irq, bool enable)
{
int gpio;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- u32 bitmask, reg;
+ u32 bitmask;
gpio = NOMADIK_IRQ_TO_GPIO(irq);
nmk_chip = get_irq_chip_data(irq);
@@ -149,23 +149,24 @@ static void nmk_gpio_irq_unmask(unsigned int irq)
if (!nmk_chip)
return;
- /* we must individually set the two edges */
spin_lock_irqsave(&nmk_chip->lock, flags);
- if (nmk_chip->edge_rising & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
- reg |= bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_RWIMSC);
- }
- if (nmk_chip->edge_falling & bitmask) {
- reg = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
- reg |= bitmask;
- writel(reg, nmk_chip->addr + NMK_GPIO_FWIMSC);
- }
+ __nmk_gpio_irq_modify(nmk_chip, gpio, enable);
spin_unlock_irqrestore(&nmk_chip->lock, flags);
}
+static void nmk_gpio_irq_mask(unsigned int irq)
+{
+ nmk_gpio_irq_modify(irq, false);
+};
+
+static void nmk_gpio_irq_unmask(unsigned int irq)
+{
+ nmk_gpio_irq_modify(irq, true);
+}
+
static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
{
+ bool enabled = !(irq_to_desc(irq)->status & IRQ_DISABLED);
int gpio;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
@@ -184,19 +185,21 @@ static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
spin_lock_irqsave(&nmk_chip->lock, flags);
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, false);
+
nmk_chip->edge_rising &= ~bitmask;
if (type & IRQ_TYPE_EDGE_RISING)
nmk_chip->edge_rising |= bitmask;
- writel(nmk_chip->edge_rising, nmk_chip->addr + NMK_GPIO_RIMSC);
nmk_chip->edge_falling &= ~bitmask;
if (type & IRQ_TYPE_EDGE_FALLING)
nmk_chip->edge_falling |= bitmask;
- writel(nmk_chip->edge_falling, nmk_chip->addr + NMK_GPIO_FIMSC);
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, true);
- nmk_gpio_irq_unmask(irq);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
return 0;
}
@@ -212,21 +215,27 @@ static struct irq_chip nmk_gpio_irq_chip = {
static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct nmk_gpio_chip *nmk_chip;
- struct irq_chip *host_chip;
+ struct irq_chip *host_chip = get_irq_chip(irq);
unsigned int gpio_irq;
u32 pending;
unsigned int first_irq;
+ if (host_chip->mask_ack)
+ host_chip->mask_ack(irq);
+ else {
+ host_chip->mask(irq);
+ if (host_chip->ack)
+ host_chip->ack(irq);
+ }
+
nmk_chip = get_irq_data(irq);
first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
while ( (pending = readl(nmk_chip->addr + NMK_GPIO_IS)) ) {
gpio_irq = first_irq + __ffs(pending);
generic_handle_irq(gpio_irq);
}
- if (0) {/* don't ack parent irq, as ack == disable */
- host_chip = get_irq_chip(irq);
- host_chip->ack(irq);
- }
+
+ host_chip->unmask(irq);
}
static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
@@ -240,6 +249,7 @@ static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
set_irq_handler(i, handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
set_irq_chip_data(i, nmk_chip);
+ set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
}
set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
set_irq_data(nmk_chip->parent_irq, nmk_chip);
@@ -298,30 +308,59 @@ static struct gpio_chip nmk_gpio_template = {
.can_sleep = 0,
};
-static int __init nmk_gpio_probe(struct amba_device *dev, struct amba_id *id)
+static int __init nmk_gpio_probe(struct platform_device *dev)
{
- struct nmk_gpio_platform_data *pdata;
+ struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
+ struct resource *res;
+ struct clk *clk;
+ int irq;
int ret;
- pdata = dev->dev.platform_data;
- ret = amba_request_regions(dev, pdata->name);
- if (ret)
- return ret;
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+
+ if (request_mem_region(res->start, resource_size(res),
+ dev_name(&dev->dev)) == NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_release;
+ }
+
+ clk_enable(clk);
nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
if (!nmk_chip) {
ret = -ENOMEM;
- goto out_amba;
+ goto out_clk;
}
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
- nmk_chip->addr = io_p2v(dev->res.start);
+ nmk_chip->clk = clk;
+ nmk_chip->addr = io_p2v(res->start);
nmk_chip->chip = nmk_gpio_template;
- nmk_chip->parent_irq = pdata->parent_irq;
+ nmk_chip->parent_irq = irq;
+ spin_lock_init(&nmk_chip->lock);
chip = &nmk_chip->chip;
chip->base = pdata->first_gpio;
@@ -333,7 +372,7 @@ static int __init nmk_gpio_probe(struct amba_device *dev, struct amba_id *id)
if (ret)
goto out_free;
- amba_set_drvdata(dev, nmk_chip);
+ platform_set_drvdata(dev, nmk_chip);
nmk_gpio_init_irq(nmk_chip);
@@ -341,51 +380,50 @@ static int __init nmk_gpio_probe(struct amba_device *dev, struct amba_id *id)
nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
return 0;
- out_free:
+out_free:
kfree(nmk_chip);
- out_amba:
- amba_release_regions(dev);
+out_clk:
+ clk_disable(clk);
+ clk_put(clk);
+out_release:
+ release_mem_region(res->start, resource_size(res));
+out:
dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
pdata->first_gpio, pdata->first_gpio+31);
return ret;
}
-static int nmk_gpio_remove(struct amba_device *dev)
+static int __exit nmk_gpio_remove(struct platform_device *dev)
{
struct nmk_gpio_chip *nmk_chip;
+ struct resource *res;
- nmk_chip = amba_get_drvdata(dev);
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
+ nmk_chip = platform_get_drvdata(dev);
gpiochip_remove(&nmk_chip->chip);
+ clk_disable(nmk_chip->clk);
+ clk_put(nmk_chip->clk);
kfree(nmk_chip);
- amba_release_regions(dev);
+ release_mem_region(res->start, resource_size(res));
return 0;
}
-/* We have 0x1f080060 and 0x1f180060, accept both using the mask */
-static struct amba_id nmk_gpio_ids[] = {
- {
- .id = 0x1f080060,
- .mask = 0xffefffff,
- },
- {0, 0},
-};
-
-static struct amba_driver nmk_gpio_driver = {
- .drv = {
+static struct platform_driver nmk_gpio_driver = {
+ .driver = {
.owner = THIS_MODULE,
.name = "gpio",
},
.probe = nmk_gpio_probe,
- .remove = nmk_gpio_remove,
+ .remove = __exit_p(nmk_gpio_remove),
.suspend = NULL, /* to be done */
.resume = NULL,
- .id_table = nmk_gpio_ids,
};
static int __init nmk_gpio_init(void)
{
- return amba_driver_register(&nmk_gpio_driver);
+ return platform_driver_register(&nmk_gpio_driver);
}
arch_initcall(nmk_gpio_init);
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
new file mode 100644
index 000000000000..4200811249ca
--- /dev/null
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -0,0 +1,70 @@
+/*
+ * Structures and registers for GPIO access in the Nomadik SoC
+ *
+ * Copyright (C) 2008 STMicroelectronics
+ * Author: Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_PLAT_GPIO_H
+#define __ASM_PLAT_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+/*
+ * These currently cause a function call to happen, they may be optimized
+ * if needed by adding cpu-specific defines to identify blocks
+ * (see mach-pxa/include/mach/gpio.h as an example using GPLR etc)
+ */
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+/*
+ * "nmk_gpio" and "NMK_GPIO" stand for "Nomadik GPIO", leaving
+ * the "gpio" namespace for generic and cross-machine functions
+ */
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+extern int nmk_gpio_set_mode(int gpio, int gpio_mode);
+extern int nmk_gpio_get_mode(int gpio);
+
+/*
+ * Platform data to register a block: only the initial gpio/irq number.
+ */
+struct nmk_gpio_platform_data {
+ char *name;
+ int first_gpio;
+ int first_irq;
+};
+
+#endif /* __ASM_PLAT_GPIO_H */
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
new file mode 100644
index 000000000000..4d12ea4ca361
--- /dev/null
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -0,0 +1,239 @@
+/*
+ * arch/arm/plat-nomadik/include/plat/ste_dma40.h
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+
+#ifndef STE_DMA40_H
+#define STE_DMA40_H
+
+#include <linux/dmaengine.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+
+/* dev types for memcpy */
+#define STEDMA40_DEV_DST_MEMORY (-1)
+#define STEDMA40_DEV_SRC_MEMORY (-1)
+
+/*
+ * Description of bitfields of channel_type variable is available in
+ * the info structure.
+ */
+
+/* Priority */
+#define STEDMA40_INFO_PRIO_TYPE_POS 2
+#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
+#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
+
+/* Mode */
+#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
+#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
+#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
+#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
+
+/* Mode options */
+#define STEDMA40_INFO_CH_MODE_OPT_POS 8
+#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
+#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
+
+/* Interrupt */
+#define STEDMA40_INFO_TIM_POS 10
+#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
+#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
+
+/* End of channel_type configuration */
+
+#define STEDMA40_ESIZE_8_BIT 0x0
+#define STEDMA40_ESIZE_16_BIT 0x1
+#define STEDMA40_ESIZE_32_BIT 0x2
+#define STEDMA40_ESIZE_64_BIT 0x3
+
+/* The value 4 indicates that PEN-reg shall be set to 0 */
+#define STEDMA40_PSIZE_PHY_1 0x4
+#define STEDMA40_PSIZE_PHY_2 0x0
+#define STEDMA40_PSIZE_PHY_4 0x1
+#define STEDMA40_PSIZE_PHY_8 0x2
+#define STEDMA40_PSIZE_PHY_16 0x3
+
+/*
+ * The number of elements differ in logical and
+ * physical mode
+ */
+#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
+#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
+#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
+#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
+
+enum stedma40_flow_ctrl {
+ STEDMA40_NO_FLOW_CTRL,
+ STEDMA40_FLOW_CTRL,
+};
+
+enum stedma40_endianess {
+ STEDMA40_LITTLE_ENDIAN,
+ STEDMA40_BIG_ENDIAN
+};
+
+enum stedma40_periph_data_width {
+ STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
+ STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
+ STEDMA40_WORD_WIDTH = STEDMA40_ESIZE_32_BIT,
+ STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
+};
+
+struct stedma40_half_channel_info {
+ enum stedma40_endianess endianess;
+ enum stedma40_periph_data_width data_width;
+ int psize;
+ enum stedma40_flow_ctrl flow_ctrl;
+};
+
+enum stedma40_xfer_dir {
+ STEDMA40_MEM_TO_MEM,
+ STEDMA40_MEM_TO_PERIPH,
+ STEDMA40_PERIPH_TO_MEM,
+ STEDMA40_PERIPH_TO_PERIPH
+};
+
+
+/**
+ * struct stedma40_chan_cfg - Structure to be filled by client drivers.
+ *
+ * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
+ * @channel_type: priority, mode, mode options and interrupt configuration.
+ * @src_dev_type: Src device type
+ * @dst_dev_type: Dst device type
+ * @src_info: Parameters for dst half channel
+ * @dst_info: Parameters for dst half channel
+ * @pre_transfer_data: Data to be passed on to the pre_transfer() function.
+ * @pre_transfer: Callback used if needed before preparation of transfer.
+ * Only called if device is set. size of bytes to transfer
+ * (in case of multiple element transfer size is size of the first element).
+ *
+ *
+ * This structure has to be filled by the client drivers.
+ * It is recommended to do all dma configurations for clients in the machine.
+ *
+ */
+struct stedma40_chan_cfg {
+ enum stedma40_xfer_dir dir;
+ unsigned int channel_type;
+ int src_dev_type;
+ int dst_dev_type;
+ struct stedma40_half_channel_info src_info;
+ struct stedma40_half_channel_info dst_info;
+ void *pre_transfer_data;
+ int (*pre_transfer) (struct dma_chan *chan,
+ void *data,
+ int size);
+};
+
+/**
+ * struct stedma40_platform_data - Configuration struct for the dma device.
+ *
+ * @dev_len: length of dev_tx and dev_rx
+ * @dev_tx: mapping between destination event line and io address
+ * @dev_rx: mapping between source event line and io address
+ * @memcpy: list of memcpy event lines
+ * @memcpy_len: length of memcpy
+ * @memcpy_conf_phy: default configuration of physical channel memcpy
+ * @memcpy_conf_log: default configuration of logical channel memcpy
+ * @llis_per_log: number of max linked list items per logical channel
+ *
+ */
+struct stedma40_platform_data {
+ u32 dev_len;
+ const dma_addr_t *dev_tx;
+ const dma_addr_t *dev_rx;
+ int *memcpy;
+ u32 memcpy_len;
+ struct stedma40_chan_cfg *memcpy_conf_phy;
+ struct stedma40_chan_cfg *memcpy_conf_log;
+ unsigned int llis_per_log;
+};
+
+/**
+ * setdma40_set_psize() - Used for changing the package size of an
+ * already configured dma channel.
+ *
+ * @chan: dmaengine handle
+ * @src_psize: new package side for src. (STEDMA40_PSIZE*)
+ * @src_psize: new package side for dst. (STEDMA40_PSIZE*)
+ *
+ * returns 0 on ok, otherwise negative error number.
+ */
+int stedma40_set_psize(struct dma_chan *chan,
+ int src_psize,
+ int dst_psize);
+
+/**
+ * stedma40_filter() - Provides stedma40_chan_cfg to the
+ * ste_dma40 dma driver via the dmaengine framework.
+ * does some checking of what's provided.
+ *
+ * Never directly called by client. It used by dmaengine.
+ * @chan: dmaengine handle.
+ * @data: Must be of type: struct stedma40_chan_cfg and is
+ * the configuration of the framework.
+ *
+ *
+ */
+
+bool stedma40_filter(struct dma_chan *chan, void *data);
+
+/**
+ * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
+ * scattergatter lists.
+ *
+ * @chan: dmaengine handle
+ * @sgl_dst: Destination scatter list
+ * @sgl_src: Source scatter list
+ * @sgl_len: The length of each scatterlist. Both lists must be of equal length
+ * and each element must match the corresponding element in the other scatter
+ * list.
+ * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
+ */
+
+struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *sgl_dst,
+ struct scatterlist *sgl_src,
+ unsigned int sgl_len,
+ unsigned long flags);
+
+/**
+ * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
+ * (=device)
+ *
+ * @chan: dmaengine handle
+ * @addr: source or destination physicall address.
+ * @size: bytes to transfer
+ * @direction: direction of transfer
+ * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
+ */
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+ dma_addr_t addr,
+ unsigned int size,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct scatterlist sg;
+ sg_init_table(&sg, 1);
+ sg.dma_address = addr;
+ sg.length = size;
+
+ return chan->device->device_prep_slave_sg(chan, &sg, 1,
+ direction, flags);
+}
+
+#endif
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index fa7cb3a57cbf..0ff3798769ab 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -2,7 +2,7 @@
* linux/arch/arm/mach-nomadik/timer.c
*
* Copyright (C) 2008 STMicroelectronics
- * Copyright (C) 2009 Alessandro Rubini, somewhat based on at91sam926x
+ * Copyright (C) 2010 Alessandro Rubini
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
@@ -18,123 +18,150 @@
#include <plat/mtu.h>
-static u32 nmdk_count; /* accumulated count */
-static u32 nmdk_cycle; /* write-once */
-
-/* setup by the platform code */
-void __iomem *mtu_base;
+void __iomem *mtu_base; /* ssigned by machine code */
/*
- * clocksource: the MTU device is a decrementing counters, so we negate
- * the value being read.
+ * Kernel assumes that sched_clock can be called early
+ * but the MTU may not yet be initialized.
*/
-static cycle_t nmdk_read_timer(struct clocksource *cs)
+static cycle_t nmdk_read_timer_dummy(struct clocksource *cs)
{
- u32 count = readl(mtu_base + MTU_VAL(0));
- return nmdk_count + nmdk_cycle - count;
+ return 0;
+}
+/* clocksource: MTU decrements, so we negate the value being read. */
+static cycle_t nmdk_read_timer(struct clocksource *cs)
+{
+ return -readl(mtu_base + MTU_VAL(0));
}
static struct clocksource nmdk_clksrc = {
.name = "mtu_0",
- .rating = 120,
- .read = nmdk_read_timer,
+ .rating = 200,
+ .read = nmdk_read_timer_dummy,
+ .mask = CLOCKSOURCE_MASK(32),
.shift = 20,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
- * Clockevent device: currently only periodic mode is supported
+ * Override the global weak sched_clock symbol with this
+ * local implementation which uses the clocksource to get some
+ * better resolution when scheduling the kernel. We accept that
+ * this wraps around for now, since it is just a relative time
+ * stamp. (Inspired by OMAP implementation.)
*/
+unsigned long long notrace sched_clock(void)
+{
+ return clocksource_cyc2ns(nmdk_clksrc.read(
+ &nmdk_clksrc),
+ nmdk_clksrc.mult,
+ nmdk_clksrc.shift);
+}
+
+/* Clockevent device: use one-shot mode */
static void nmdk_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
+ u32 cr;
+
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- /* count current value? */
- writel(readl(mtu_base + MTU_IMSC) | 1, mtu_base + MTU_IMSC);
+ pr_err("%s: periodic mode not supported\n", __func__);
break;
case CLOCK_EVT_MODE_ONESHOT:
- BUG(); /* Not supported, yet */
- /* FALLTHROUGH */
+ /* Load highest value, enable device, enable interrupts */
+ cr = readl(mtu_base + MTU_CR(1));
+ writel(0, mtu_base + MTU_LR(1));
+ writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
+ writel(0x2, mtu_base + MTU_IMSC);
+ break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
- writel(readl(mtu_base + MTU_IMSC) & ~1, mtu_base + MTU_IMSC);
+ /* disable irq */
+ writel(0, mtu_base + MTU_IMSC);
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
}
+static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
+{
+ /* writing the value has immediate effect */
+ writel(evt, mtu_base + MTU_LR(1));
+ return 0;
+}
+
static struct clock_event_device nmdk_clkevt = {
- .name = "mtu_0",
- .features = CLOCK_EVT_FEAT_PERIODIC,
+ .name = "mtu_1",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
- .rating = 100,
+ .rating = 200,
.set_mode = nmdk_clkevt_mode,
+ .set_next_event = nmdk_clkevt_next,
};
/*
- * IRQ Handler for the timer 0 of the MTU block. The irq is not shared
- * as we are the only users of mtu0 by now.
+ * IRQ Handler for timer 1 of the MTU block.
*/
static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
{
- /* ack: "interrupt clear register" */
- writel(1 << 0, mtu_base + MTU_ICR);
-
- /* we can't count lost ticks, unfortunately */
- nmdk_count += nmdk_cycle;
- nmdk_clkevt.event_handler(&nmdk_clkevt);
+ struct clock_event_device *evdev = dev_id;
+ writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
+ evdev->event_handler(evdev);
return IRQ_HANDLED;
}
-/*
- * Set up timer interrupt, and return the current time in seconds.
- */
static struct irqaction nmdk_timer_irq = {
.name = "Nomadik Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = nmdk_timer_interrupt,
+ .dev_id = &nmdk_clkevt,
};
-static void nmdk_timer_reset(void)
-{
- u32 cr;
-
- writel(0, mtu_base + MTU_CR(0)); /* off */
-
- /* configure load and background-load, and fire it up */
- writel(nmdk_cycle, mtu_base + MTU_LR(0));
- writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
- cr = MTU_CRn_PERIODIC | MTU_CRn_PRESCALE_1 | MTU_CRn_32BITS;
- writel(cr, mtu_base + MTU_CR(0));
- writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
-}
-
void __init nmdk_timer_init(void)
{
unsigned long rate;
- int bits;
-
- rate = CLOCK_TICK_RATE; /* 2.4MHz */
- nmdk_cycle = (rate + HZ/2) / HZ;
+ u32 cr = MTU_CRn_32BITS;;
+
+ /*
+ * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
+ * use a divide-by-16 counter if it's more than 16MHz
+ */
+ rate = CLOCK_TICK_RATE;
+ if (rate > 16 << 20) {
+ rate /= 16;
+ cr |= MTU_CRn_PRESCALE_16;
+ } else {
+ cr |= MTU_CRn_PRESCALE_1;
+ }
- /* Init the timer and register clocksource */
- nmdk_timer_reset();
+ /* Timer 0 is the free running clocksource */
+ writel(cr, mtu_base + MTU_CR(0));
+ writel(0, mtu_base + MTU_LR(0));
+ writel(0, mtu_base + MTU_BGLR(0));
+ writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
nmdk_clksrc.mult = clocksource_hz2mult(rate, nmdk_clksrc.shift);
- bits = 8*sizeof(nmdk_count);
- nmdk_clksrc.mask = CLOCKSOURCE_MASK(bits);
+ /* Now the scheduling clock is ready */
+ nmdk_clksrc.read = nmdk_read_timer;
if (clocksource_register(&nmdk_clksrc))
- printk(KERN_ERR "timer: failed to initialize clock "
- "source %s\n", nmdk_clksrc.name);
+ pr_err("timer: failed to initialize clock source %s\n",
+ nmdk_clksrc.name);
+
+ /* Timer 1 is used for events, fix according to rate */
+ writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
+ nmdk_clkevt.mult = div_sc(rate, NSEC_PER_SEC, nmdk_clkevt.shift);
+ nmdk_clkevt.max_delta_ns =
+ clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
+ nmdk_clkevt.min_delta_ns =
+ clockevent_delta2ns(0x00000002, &nmdk_clkevt);
+ nmdk_clkevt.cpumask = cpumask_of(0);
/* Register irq and clockevents */
setup_irq(IRQ_MTU0, &nmdk_timer_irq);
- nmdk_clkevt.mult = div_sc(rate, NSEC_PER_SEC, nmdk_clkevt.shift);
- nmdk_clkevt.cpumask = cpumask_of(0);
clockevents_register_device(&nmdk_clkevt);
}
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 6da796ef82bd..78b49a626d06 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -110,8 +110,13 @@ config OMAP_IOMMU
tristate
config OMAP_IOMMU_DEBUG
- depends on OMAP_IOMMU
- tristate
+ tristate "Export OMAP IOMMU internals in DebugFS"
+ depends on OMAP_IOMMU && DEBUG_FS
+ help
+ Select this to see extensive information about
+ the internal state of OMAP IOMMU in debugfs.
+
+ Say N unless you know you need this.
choice
prompt "System timer"
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 5261a0923691..7190cbd92620 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -12,14 +12,12 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/clk.h>
#include <linux/mutex.h>
-#include <linux/platform_device.h>
#include <linux/cpufreq.h>
#include <linux/debugfs.h>
#include <linux/io.h>
@@ -32,9 +30,9 @@ static DEFINE_SPINLOCK(clockfw_lock);
static struct clk_functions *arch_clock;
-/*-------------------------------------------------------------------------
+/*
* Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
+ */
int clk_enable(struct clk *clk)
{
@@ -92,9 +90,9 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_rate);
-/*-------------------------------------------------------------------------
+/*
* Optional clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
+ */
long clk_round_rate(struct clk *clk, unsigned long rate)
{
@@ -140,9 +138,6 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
unsigned long flags;
int ret = -EINVAL;
- if (cpu_is_omap44xx())
- /* OMAP4 clk framework not supported yet */
- return 0;
if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
return ret;
@@ -169,9 +164,9 @@ struct clk *clk_get_parent(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_parent);
-/*-------------------------------------------------------------------------
+/*
* OMAP specific clock functions shared between omap1 and omap2
- *-------------------------------------------------------------------------*/
+ */
int __initdata mpurate;
@@ -222,7 +217,7 @@ void clk_reparent(struct clk *child, struct clk *parent)
}
/* Propagate rate to children */
-void propagate_rate(struct clk * tclk)
+void propagate_rate(struct clk *tclk)
{
struct clk *clkp;
@@ -389,7 +384,9 @@ void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
}
#endif
-/*-------------------------------------------------------------------------*/
+/*
+ *
+ */
#ifdef CONFIG_OMAP_RESET_CLOCKS
/*
@@ -404,7 +401,7 @@ static int __init clk_disable_unused(void)
if (ck->ops == &clkops_null)
continue;
- if (ck->usecount > 0 || ck->enable_reg == 0)
+ if (ck->usecount > 0 || !ck->enable_reg)
continue;
spin_lock_irqsave(&clockfw_lock, flags);
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index f12f0e39ddf2..219c01e82bc5 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -47,9 +47,6 @@
struct omap_board_config_kernel *omap_board_config;
int omap_board_config_size;
-/* used by omap-smp.c and board-4430sdp.c */
-void __iomem *gic_cpu_base_addr;
-
static const void *get_config(u16 tag, size_t len, int skip, size_t *len_out)
{
struct omap_board_config_kernel *kinfo = NULL;
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 1d959965ff52..f7f571e7987e 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -501,7 +501,8 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
burst = 0x2;
break;
}
- /* not supported by current hardware on OMAP1
+ /*
+ * not supported by current hardware on OMAP1
* w |= (0x03 << 7);
* fall through
*/
@@ -510,7 +511,8 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
burst = 0x3;
break;
}
- /* OMAP1 don't support burst 16
+ /*
+ * OMAP1 don't support burst 16
* fall through
*/
default:
@@ -604,7 +606,8 @@ void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
burst = 0x3;
break;
}
- /* OMAP1 don't support burst 16
+ /*
+ * OMAP1 don't support burst 16
* fall through
*/
default:
@@ -709,6 +712,21 @@ static inline void omap2_enable_irq_lch(int lch)
spin_unlock_irqrestore(&dma_chan_lock, flags);
}
+static inline void omap2_disable_irq_lch(int lch)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (!cpu_class_is_omap2())
+ return;
+
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ val = dma_read(IRQENABLE_L0);
+ val &= ~(1 << lch);
+ dma_write(val, IRQENABLE_L0);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+}
+
int omap_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch_out)
@@ -807,14 +825,7 @@ void omap_free_dma(int lch)
}
if (cpu_class_is_omap2()) {
- u32 val;
-
- spin_lock_irqsave(&dma_chan_lock, flags);
- /* Disable interrupts */
- val = dma_read(IRQENABLE_L0);
- val &= ~(1 << lch);
- dma_write(val, IRQENABLE_L0);
- spin_unlock_irqrestore(&dma_chan_lock, flags);
+ omap2_disable_irq_lch(lch);
/* Clear the CSR register and IRQ status register */
dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
@@ -1277,8 +1288,10 @@ int omap_request_dma_chain(int dev_id, const char *dev_name,
return -EINVAL;
}
- /* Allocate a queue to maintain the status of the channels
- * in the chain */
+ /*
+ * Allocate a queue to maintain the status of the channels
+ * in the chain
+ */
channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
if (channels == NULL) {
printk(KERN_ERR "omap_dma: No memory for channel queue\n");
@@ -1907,7 +1920,8 @@ static int omap2_dma_handle_ch(int ch)
printk(KERN_INFO "DMA transaction error with device %d\n",
dma_chan[ch].dev_id);
if (cpu_class_is_omap2()) {
- /* Errata: sDMA Channel is not disabled
+ /*
+ * Errata: sDMA Channel is not disabled
* after a transaction error. So we explicitely
* disable the channel
*/
@@ -2107,6 +2121,9 @@ static int __init omap_init_dma(void)
for (ch = 0; ch < dma_chan_count; ch++) {
omap_clear_dma(ch);
+ if (cpu_class_is_omap2())
+ omap2_disable_irq_lch(ch);
+
dma_chan[ch].dev_id = -1;
dma_chan[ch].next_lch = -1;
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 4d99dfbc8bef..c64875f11fac 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -264,8 +264,8 @@ static struct omap_dm_timer omap4_dm_timers[] = {
{ .phys_base = 0x4a320000, .irq = OMAP44XX_IRQ_GPT12 },
};
static const char *omap4_dm_source_names[] __initdata = {
- "sys_ck",
- "omap_32k_fck",
+ "sys_clkin_ck",
+ "sys_32k_ck",
NULL
};
static struct clk *omap4_dm_source_clocks[2];
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 45a225d09125..393e9219a5b6 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -27,6 +27,7 @@
#include <mach/irqs.h>
#include <mach/gpio.h>
#include <asm/mach/irq.h>
+#include <plat/powerdomain.h>
/*
* OMAP1510 GPIO registers
@@ -137,7 +138,11 @@
#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040
#define OMAP4_GPIO_IRQWAKEN0 0x0044
#define OMAP4_GPIO_IRQWAKEN1 0x0048
-#define OMAP4_GPIO_SYSSTATUS 0x0104
+#define OMAP4_GPIO_SYSSTATUS 0x0114
+#define OMAP4_GPIO_IRQENABLE1 0x011c
+#define OMAP4_GPIO_WAKE_EN 0x0120
+#define OMAP4_GPIO_IRQSTATUS2 0x0128
+#define OMAP4_GPIO_IRQENABLE2 0x012c
#define OMAP4_GPIO_CTRL 0x0130
#define OMAP4_GPIO_OE 0x0134
#define OMAP4_GPIO_DATAIN 0x0138
@@ -148,6 +153,10 @@
#define OMAP4_GPIO_FALLINGDETECT 0x014c
#define OMAP4_GPIO_DEBOUNCENABLE 0x0150
#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154
+#define OMAP4_GPIO_CLEARIRQENABLE1 0x0160
+#define OMAP4_GPIO_SETIRQENABLE1 0x0164
+#define OMAP4_GPIO_CLEARWKUENA 0x0180
+#define OMAP4_GPIO_SETWKUENA 0x0184
#define OMAP4_GPIO_CLEARDATAOUT 0x0190
#define OMAP4_GPIO_SETDATAOUT 0x0194
/*
@@ -195,6 +204,7 @@ struct gpio_bank {
struct gpio_chip chip;
struct clk *dbck;
u32 mod_usage;
+ u32 dbck_enable_mask;
};
#define METHOD_MPUIO 0
@@ -303,8 +313,6 @@ struct omap3_gpio_regs {
u32 risingdetect;
u32 fallingdetect;
u32 dataout;
- u32 setwkuena;
- u32 setdataout;
};
static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
@@ -591,12 +599,16 @@ static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
reg += OMAP7XX_GPIO_DATA_OUTPUT;
break;
#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
case METHOD_GPIO_24XX:
- case METHOD_GPIO_44XX:
reg += OMAP24XX_GPIO_DATAOUT;
break;
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_DATAOUT;
+ break;
+#endif
default:
return -EINVAL;
}
@@ -612,78 +624,58 @@ do { \
__raw_writel(l, base + reg); \
} while(0)
-void omap_set_gpio_debounce(int gpio, int enable)
+/**
+ * _set_gpio_debounce - low level gpio debounce time
+ * @bank: the gpio bank we're acting upon
+ * @gpio: the gpio number on this @gpio
+ * @debounce: debounce time to use
+ *
+ * OMAP's debounce time is in 31us steps so we need
+ * to convert and round up to the closest unit.
+ */
+static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+ unsigned debounce)
{
- struct gpio_bank *bank;
- void __iomem *reg;
- unsigned long flags;
- u32 val, l = 1 << get_gpio_index(gpio);
+ void __iomem *reg = bank->base;
+ u32 val;
+ u32 l;
+
+ if (debounce < 32)
+ debounce = 0x01;
+ else if (debounce > 7936)
+ debounce = 0xff;
+ else
+ debounce = (debounce / 0x1f) - 1;
- if (cpu_class_is_omap1())
- return;
+ l = 1 << get_gpio_index(gpio);
- bank = get_gpio_bank(gpio);
- reg = bank->base;
+ if (cpu_is_omap44xx())
+ reg += OMAP4_GPIO_DEBOUNCINGTIME;
+ else
+ reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
+
+ __raw_writel(debounce, reg);
+ reg = bank->base;
if (cpu_is_omap44xx())
reg += OMAP4_GPIO_DEBOUNCENABLE;
else
reg += OMAP24XX_GPIO_DEBOUNCE_EN;
- if (!(bank->mod_usage & l)) {
- printk(KERN_ERR "GPIO %d not requested\n", gpio);
- return;
- }
-
- spin_lock_irqsave(&bank->lock, flags);
val = __raw_readl(reg);
- if (enable && !(val & l))
+ if (debounce) {
val |= l;
- else if (!enable && (val & l))
- val &= ~l;
- else
- goto done;
-
- if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
- if (enable)
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
clk_enable(bank->dbck);
- else
+ } else {
+ val &= ~l;
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
clk_disable(bank->dbck);
}
__raw_writel(val, reg);
-done:
- spin_unlock_irqrestore(&bank->lock, flags);
}
-EXPORT_SYMBOL(omap_set_gpio_debounce);
-
-void omap_set_gpio_debounce_time(int gpio, int enc_time)
-{
- struct gpio_bank *bank;
- void __iomem *reg;
-
- if (cpu_class_is_omap1())
- return;
-
- bank = get_gpio_bank(gpio);
- reg = bank->base;
-
- if (!bank->mod_usage) {
- printk(KERN_ERR "GPIO not requested\n");
- return;
- }
-
- enc_time &= 0xff;
-
- if (cpu_is_omap44xx())
- reg += OMAP4_GPIO_DEBOUNCINGTIME;
- else
- reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
-
- __raw_writel(enc_time, reg);
-}
-EXPORT_SYMBOL(omap_set_gpio_debounce_time);
#ifdef CONFIG_ARCH_OMAP2PLUS
static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
@@ -724,15 +716,27 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
OMAP4_GPIO_IRQWAKEN0);
}
} else {
- if (trigger != 0)
+ /*
+ * GPIO wakeup request can only be generated on edge
+ * transitions
+ */
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
__raw_writel(1 << gpio, bank->base
+ OMAP24XX_GPIO_SETWKUENA);
else
__raw_writel(1 << gpio, bank->base
+ OMAP24XX_GPIO_CLEARWKUENA);
}
- } else {
- if (trigger != 0)
+ }
+ /* This part needs to be executed always for OMAP34xx */
+ if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
+ /*
+ * Log the edge gpio and manually trigger the IRQ
+ * after resume if the input level changes
+ * to avoid irq lost during PER RET/OFF mode
+ * Applies for omap2 non-wakeup gpio and all omap3 gpios
+ */
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
bank->enabled_non_wakeup_gpios |= gpio_bit;
else
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
@@ -1200,11 +1204,17 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
#endif
if (!cpu_class_is_omap1()) {
if (!bank->mod_usage) {
+ void __iomem *reg = bank->base;
u32 ctrl;
- ctrl = __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
- ctrl &= 0xFFFFFFFE;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg += OMAP24XX_GPIO_CTRL;
+ else if (cpu_is_omap44xx())
+ reg += OMAP4_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
/* Module is enabled, clocks are not gated */
- __raw_writel(ctrl, bank->base + OMAP24XX_GPIO_CTRL);
+ ctrl &= 0xFFFFFFFE;
+ __raw_writel(ctrl, reg);
}
bank->mod_usage |= 1 << offset;
}
@@ -1226,22 +1236,34 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
__raw_writel(1 << offset, reg);
}
#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
- if ((bank->method == METHOD_GPIO_24XX) ||
- (bank->method == METHOD_GPIO_44XX)) {
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ if (bank->method == METHOD_GPIO_24XX) {
/* Disable wake-up during idle for dynamic tick */
void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
__raw_writel(1 << offset, reg);
}
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ if (bank->method == METHOD_GPIO_44XX) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ __raw_writel(1 << offset, reg);
+ }
+#endif
if (!cpu_class_is_omap1()) {
bank->mod_usage &= ~(1 << offset);
if (!bank->mod_usage) {
+ void __iomem *reg = bank->base;
u32 ctrl;
- ctrl = __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg += OMAP24XX_GPIO_CTRL;
+ else if (cpu_is_omap44xx())
+ reg += OMAP4_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
/* Module is disabled, clocks are gated */
ctrl |= 1;
- __raw_writel(ctrl, bank->base + OMAP24XX_GPIO_CTRL);
+ __raw_writel(ctrl, reg);
}
}
_reset_gpio(bank, bank->chip.base + offset);
@@ -1570,9 +1592,14 @@ static int gpio_is_input(struct gpio_bank *bank, int mask)
reg += OMAP7XX_GPIO_DIR_CONTROL;
break;
case METHOD_GPIO_24XX:
- case METHOD_GPIO_44XX:
reg += OMAP24XX_GPIO_OE;
break;
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_OE;
+ break;
+ default:
+ WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
+ return -EINVAL;
}
return __raw_readl(reg) & mask;
}
@@ -1608,6 +1635,20 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
return 0;
}
+static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_debounce(bank, offset, debounce);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_bank *bank;
@@ -1845,7 +1886,8 @@ static int __init _omap_gpio_init(void)
__raw_writel(0, bank->base +
OMAP24XX_GPIO_CTRL);
}
- if (i < ARRAY_SIZE(non_wakeup_gpios))
+ if (cpu_is_omap24xx() &&
+ i < ARRAY_SIZE(non_wakeup_gpios))
bank->non_wakeup_gpios = non_wakeup_gpios[i];
gpio_count = 32;
}
@@ -1860,6 +1902,7 @@ static int __init _omap_gpio_init(void)
bank->chip.direction_input = gpio_input;
bank->chip.get = gpio_get;
bank->chip.direction_output = gpio_output;
+ bank->chip.set_debounce = gpio_debounce;
bank->chip.set = gpio_set;
bank->chip.to_irq = gpio_2irq;
if (bank_is_mpuio(bank)) {
@@ -2028,16 +2071,27 @@ static struct sys_device omap_gpio_device = {
static int workaround_enabled;
-void omap2_gpio_prepare_for_retention(void)
+void omap2_gpio_prepare_for_idle(int power_state)
{
int i, c = 0;
+ int min = 0;
- /* Remove triggering for all non-wakeup GPIOs. Otherwise spurious
- * IRQs will be generated. See OMAP2420 Errata item 1.101. */
- for (i = 0; i < gpio_bank_count; i++) {
+ if (cpu_is_omap34xx())
+ min = 1;
+
+ for (i = min; i < gpio_bank_count; i++) {
struct gpio_bank *bank = &gpio_bank[i];
u32 l1, l2;
+ if (bank->dbck_enable_mask)
+ clk_disable(bank->dbck);
+
+ if (power_state > PWRDM_POWER_OFF)
+ continue;
+
+ /* If going to OFF, remove triggering for all
+ * non-wakeup GPIOs. Otherwise spurious IRQs will be
+ * generated. See OMAP2420 Errata item 1.101. */
if (!(bank->enabled_non_wakeup_gpios))
continue;
@@ -2085,16 +2139,23 @@ void omap2_gpio_prepare_for_retention(void)
workaround_enabled = 1;
}
-void omap2_gpio_resume_after_retention(void)
+void omap2_gpio_resume_after_idle(void)
{
int i;
+ int min = 0;
- if (!workaround_enabled)
- return;
- for (i = 0; i < gpio_bank_count; i++) {
+ if (cpu_is_omap34xx())
+ min = 1;
+ for (i = min; i < gpio_bank_count; i++) {
struct gpio_bank *bank = &gpio_bank[i];
u32 l, gen, gen0, gen1;
+ if (bank->dbck_enable_mask)
+ clk_enable(bank->dbck);
+
+ if (!workaround_enabled)
+ continue;
+
if (!(bank->enabled_non_wakeup_gpios))
continue;
@@ -2119,7 +2180,7 @@ void omap2_gpio_resume_after_retention(void)
* horribly racy, but it's the best we can do to work around
* this silicon bug. */
l ^= bank->saved_datain;
- l &= bank->non_wakeup_gpios;
+ l &= bank->enabled_non_wakeup_gpios;
/*
* No need to generate IRQs for the rising edge for gpio IRQs
@@ -2207,10 +2268,6 @@ void omap_gpio_save_context(void)
__raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
gpio_context[i].dataout =
__raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
- gpio_context[i].setwkuena =
- __raw_readl(bank->base + OMAP24XX_GPIO_SETWKUENA);
- gpio_context[i].setdataout =
- __raw_readl(bank->base + OMAP24XX_GPIO_SETDATAOUT);
}
}
@@ -2243,10 +2300,6 @@ void omap_gpio_restore_context(void)
bank->base + OMAP24XX_GPIO_FALLINGDETECT);
__raw_writel(gpio_context[i].dataout,
bank->base + OMAP24XX_GPIO_DATAOUT);
- __raw_writel(gpio_context[i].setwkuena,
- bank->base + OMAP24XX_GPIO_SETWKUENA);
- __raw_writel(gpio_context[i].setdataout,
- bank->base + OMAP24XX_GPIO_SETDATAOUT);
}
}
#endif
@@ -2286,110 +2339,3 @@ static int __init omap_gpio_sysinit(void)
}
arch_initcall(omap_gpio_sysinit);
-
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-static int dbg_gpio_show(struct seq_file *s, void *unused)
-{
- unsigned i, j, gpio;
-
- for (i = 0, gpio = 0; i < gpio_bank_count; i++) {
- struct gpio_bank *bank = gpio_bank + i;
- unsigned bankwidth = 16;
- u32 mask = 1;
-
- if (bank_is_mpuio(bank))
- gpio = OMAP_MPUIO(0);
- else if (cpu_class_is_omap2() || cpu_is_omap7xx())
- bankwidth = 32;
-
- for (j = 0; j < bankwidth; j++, gpio++, mask <<= 1) {
- unsigned irq, value, is_in, irqstat;
- const char *label;
-
- label = gpiochip_is_requested(&bank->chip, j);
- if (!label)
- continue;
-
- irq = bank->virtual_irq_start + j;
- value = gpio_get_value(gpio);
- is_in = gpio_is_input(bank, mask);
-
- if (bank_is_mpuio(bank))
- seq_printf(s, "MPUIO %2d ", j);
- else
- seq_printf(s, "GPIO %3d ", gpio);
- seq_printf(s, "(%-20.20s): %s %s",
- label,
- is_in ? "in " : "out",
- value ? "hi" : "lo");
-
-/* FIXME for at least omap2, show pullup/pulldown state */
-
- irqstat = irq_desc[irq].status;
-#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
- if (is_in && ((bank->suspend_wakeup & mask)
- || irqstat & IRQ_TYPE_SENSE_MASK)) {
- char *trigger = NULL;
-
- switch (irqstat & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_FALLING:
- trigger = "falling";
- break;
- case IRQ_TYPE_EDGE_RISING:
- trigger = "rising";
- break;
- case IRQ_TYPE_EDGE_BOTH:
- trigger = "bothedge";
- break;
- case IRQ_TYPE_LEVEL_LOW:
- trigger = "low";
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- trigger = "high";
- break;
- case IRQ_TYPE_NONE:
- trigger = "(?)";
- break;
- }
- seq_printf(s, ", irq-%d %-8s%s",
- irq, trigger,
- (bank->suspend_wakeup & mask)
- ? " wakeup" : "");
- }
-#endif
- seq_printf(s, "\n");
- }
-
- if (bank_is_mpuio(bank)) {
- seq_printf(s, "\n");
- gpio = 0;
- }
- }
- return 0;
-}
-
-static int dbg_gpio_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dbg_gpio_show, &inode->i_private);
-}
-
-static const struct file_operations debug_fops = {
- .open = dbg_gpio_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init omap_gpio_debuginit(void)
-{
- (void) debugfs_create_file("omap_gpio", S_IRUGO,
- NULL, NULL, &debug_fops);
- return 0;
-}
-late_initcall(omap_gpio_debuginit);
-#endif
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index 624e26298faa..eec2b4993c69 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -26,15 +26,19 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/i2c-omap.h>
+
#include <mach/irqs.h>
#include <plat/mux.h>
#include <plat/i2c.h>
+#include <plat/omap-pm.h>
#define OMAP_I2C_SIZE 0x3f
#define OMAP1_I2C_BASE 0xfffb3800
#define OMAP2_I2C_BASE1 0x48070000
#define OMAP2_I2C_BASE2 0x48072000
#define OMAP2_I2C_BASE3 0x48060000
+#define OMAP4_I2C_BASE4 0x48350000
static const char name[] = "i2c_omap";
@@ -51,11 +55,14 @@ static const char name[] = "i2c_omap";
static struct resource i2c_resources[][2] = {
{ I2C_RESOURCE_BUILDER(0, 0) },
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- { I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE2, INT_24XX_I2C2_IRQ) },
+#if defined(CONFIG_ARCH_OMAP2PLUS)
+ { I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE2, 0) },
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
+ { I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE3, 0) },
#endif
-#if defined(CONFIG_ARCH_OMAP3)
- { I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE3, INT_34XX_I2C3_IRQ) },
+#if defined(CONFIG_ARCH_OMAP4)
+ { I2C_RESOURCE_BUILDER(OMAP4_I2C_BASE4, 0) },
#endif
};
@@ -70,14 +77,17 @@ static struct resource i2c_resources[][2] = {
}, \
}
-static u32 i2c_rate[ARRAY_SIZE(i2c_resources)];
+static struct omap_i2c_bus_platform_data i2c_pdata[ARRAY_SIZE(i2c_resources)];
static struct platform_device omap_i2c_devices[] = {
- I2C_DEV_BUILDER(1, i2c_resources[0], &i2c_rate[0]),
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- I2C_DEV_BUILDER(2, i2c_resources[1], &i2c_rate[1]),
+ I2C_DEV_BUILDER(1, i2c_resources[0], &i2c_pdata[0]),
+#if defined(CONFIG_ARCH_OMAP2PLUS)
+ I2C_DEV_BUILDER(2, i2c_resources[1], &i2c_pdata[1]),
+#endif
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
+ I2C_DEV_BUILDER(3, i2c_resources[2], &i2c_pdata[2]),
#endif
-#if defined(CONFIG_ARCH_OMAP3)
- I2C_DEV_BUILDER(3, i2c_resources[2], &i2c_rate[2]),
+#if defined(CONFIG_ARCH_OMAP4)
+ I2C_DEV_BUILDER(4, i2c_resources[3], &i2c_pdata[3]),
#endif
};
@@ -93,39 +103,89 @@ static int __init omap_i2c_nr_ports(void)
ports = 2;
else if (cpu_is_omap34xx())
ports = 3;
+ else if (cpu_is_omap44xx())
+ ports = 4;
return ports;
}
-static int __init omap_i2c_add_bus(int bus_id)
+/* Shared between omap2 and 3 */
+static resource_size_t omap2_i2c_irq[3] __initdata = {
+ INT_24XX_I2C1_IRQ,
+ INT_24XX_I2C2_IRQ,
+ INT_34XX_I2C3_IRQ,
+};
+
+static resource_size_t omap4_i2c_irq[4] __initdata = {
+ OMAP44XX_IRQ_I2C1,
+ OMAP44XX_IRQ_I2C2,
+ OMAP44XX_IRQ_I2C3,
+ OMAP44XX_IRQ_I2C4,
+};
+
+static inline int omap1_i2c_add_bus(struct platform_device *pdev, int bus_id)
{
- struct platform_device *pdev;
+ struct omap_i2c_bus_platform_data *pd;
struct resource *res;
- resource_size_t base, irq;
- pdev = &omap_i2c_devices[bus_id - 1];
+ pd = pdev->dev.platform_data;
+ res = pdev->resource;
+ res[0].start = OMAP1_I2C_BASE;
+ res[0].end = res[0].start + OMAP_I2C_SIZE;
+ res[1].start = INT_I2C;
+ omap1_i2c_mux_pins(bus_id);
+
+ return platform_device_register(pdev);
+}
+
+static inline int omap2_i2c_add_bus(struct platform_device *pdev, int bus_id)
+{
+ struct resource *res;
+ resource_size_t *irq;
+
+ res = pdev->resource;
+
+ if (!cpu_is_omap44xx())
+ irq = omap2_i2c_irq;
+ else
+ irq = omap4_i2c_irq;
+
if (bus_id == 1) {
- res = pdev->resource;
- if (cpu_class_is_omap1()) {
- base = OMAP1_I2C_BASE;
- irq = INT_I2C;
- } else {
- base = OMAP2_I2C_BASE1;
- irq = INT_24XX_I2C1_IRQ;
- }
- res[0].start = base;
- res[0].end = base + OMAP_I2C_SIZE;
- res[1].start = irq;
+ res[0].start = OMAP2_I2C_BASE1;
+ res[0].end = res[0].start + OMAP_I2C_SIZE;
}
- if (cpu_class_is_omap1())
- omap1_i2c_mux_pins(bus_id);
- if (cpu_class_is_omap2())
- omap2_i2c_mux_pins(bus_id);
+ res[1].start = irq[bus_id - 1];
+ omap2_i2c_mux_pins(bus_id);
+
+ /*
+ * When waiting for completion of a i2c transfer, we need to
+ * set a wake up latency constraint for the MPU. This is to
+ * ensure quick enough wakeup from idle, when transfer
+ * completes.
+ */
+ if (cpu_is_omap34xx()) {
+ struct omap_i2c_bus_platform_data *pd;
+
+ pd = pdev->dev.platform_data;
+ pd->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat;
+ }
return platform_device_register(pdev);
}
+static int __init omap_i2c_add_bus(int bus_id)
+{
+ struct platform_device *pdev;
+
+ pdev = &omap_i2c_devices[bus_id - 1];
+
+ if (cpu_class_is_omap1())
+ return omap1_i2c_add_bus(pdev, bus_id);
+ else
+ return omap2_i2c_add_bus(pdev, bus_id);
+}
+
/**
* omap_i2c_bus_setup - Process command line options for the I2C bus speed
* @str: String of options
@@ -146,8 +206,8 @@ static int __init omap_i2c_bus_setup(char *str)
get_options(str, 3, ints);
if (ints[0] < 2 || ints[1] < 1 || ints[1] > ports)
return 0;
- i2c_rate[ints[1] - 1] = ints[2];
- i2c_rate[ints[1] - 1] |= OMAP_I2C_CMDLINE_SETUP;
+ i2c_pdata[ints[1] - 1].clkrate = ints[2];
+ i2c_pdata[ints[1] - 1].clkrate |= OMAP_I2C_CMDLINE_SETUP;
return 1;
}
@@ -161,9 +221,9 @@ static int __init omap_register_i2c_bus_cmdline(void)
{
int i, err = 0;
- for (i = 0; i < ARRAY_SIZE(i2c_rate); i++)
- if (i2c_rate[i] & OMAP_I2C_CMDLINE_SETUP) {
- i2c_rate[i] &= ~OMAP_I2C_CMDLINE_SETUP;
+ for (i = 0; i < ARRAY_SIZE(i2c_pdata); i++)
+ if (i2c_pdata[i].clkrate & OMAP_I2C_CMDLINE_SETUP) {
+ i2c_pdata[i].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
err = omap_i2c_add_bus(i + 1);
if (err)
goto out;
@@ -197,9 +257,10 @@ int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
return err;
}
- if (!i2c_rate[bus_id - 1])
- i2c_rate[bus_id - 1] = clkrate;
- i2c_rate[bus_id - 1] &= ~OMAP_I2C_CMDLINE_SETUP;
+ if (!i2c_pdata[bus_id - 1].clkrate)
+ i2c_pdata[bus_id - 1].clkrate = clkrate;
+
+ i2c_pdata[bus_id - 1].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
return omap_i2c_add_bus(bus_id);
}
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 34f7fa9ad4c0..dfc472ca0cc4 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -196,15 +196,15 @@ extern struct clk dummy_ck;
#define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */
/* Clksel_rate flags */
-#define DEFAULT_RATE (1 << 0)
-#define RATE_IN_242X (1 << 1)
-#define RATE_IN_243X (1 << 2)
-#define RATE_IN_343X (1 << 3) /* rates common to all 343X */
-#define RATE_IN_3430ES2 (1 << 4) /* 3430ES2 rates only */
-#define RATE_IN_36XX (1 << 5)
-#define RATE_IN_4430 (1 << 6)
+#define RATE_IN_242X (1 << 0)
+#define RATE_IN_243X (1 << 1)
+#define RATE_IN_3XXX (1 << 2) /* rates common to all OMAP3 */
+#define RATE_IN_3430ES2 (1 << 3) /* 3430ES2 rates only */
+#define RATE_IN_36XX (1 << 4)
+#define RATE_IN_4430 (1 << 5)
#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
+#define RATE_IN_3430ES2PLUS (RATE_IN_3430ES2 | RATE_IN_36XX)
#endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 7556e271942e..d265018f5e6b 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -31,9 +31,6 @@
struct sys_timer;
-/* used by omap-smp.c and board-4430sdp.c */
-extern void __iomem *gic_cpu_base_addr;
-
extern void omap_map_common_io(void);
extern struct sys_timer omap_timer;
diff --git a/arch/arm/plat-omap/include/plat/control.h b/arch/arm/plat-omap/include/plat/control.h
index a56deee97676..131bf405c2f6 100644
--- a/arch/arm/plat-omap/include/plat/control.h
+++ b/arch/arm/plat-omap/include/plat/control.h
@@ -207,6 +207,9 @@
/* 44xx control status register offset */
#define OMAP44XX_CONTROL_STATUS 0x2c4
+/* 44xx-only CONTROL_GENERAL register offsets */
+#define OMAP44XX_CONTROL_MMC1 0x628
+#define OMAP44XX_CONTROL_PBIAS_LITE 0x600
/*
* REVISIT: This list of registers is not comprehensive - there are more
* that should be added.
@@ -252,6 +255,23 @@
#define OMAP2_PBIASLITEPWRDNZ0 (1 << 1)
#define OMAP2_PBIASLITEVMODE0 (1 << 0)
+/* CONTROL_PBIAS_LITE bits for OMAP4 */
+#define OMAP4_MMC1_PWRDNZ (1 << 26)
+#define OMAP4_MMC1_PBIASLITE_HIZ_MODE (1 << 25)
+#define OMAP4_MMC1_PBIASLITE_SUPPLY_HI_OUT (1 << 24)
+#define OMAP4_MMC1_PBIASLITE_VMODE_ERROR (1 << 23)
+#define OMAP4_MMC1_PBIASLITE_PWRDNZ (1 << 22)
+#define OMAP4_MMC1_PBIASLITE_VMODE (1 << 21)
+#define OMAP4_USBC1_ICUSB_PWRDNZ (1 << 20)
+
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP0 (1 << 31)
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP1 (1 << 30)
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP2 (1 << 29)
+#define OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP3 (1 << 28)
+#define OMAP4_CONTROL_SDMMC1_DR0_SPEEDCTRL (1 << 27)
+#define OMAP4_CONTROL_SDMMC1_DR1_SPEEDCTRL (1 << 26)
+#define OMAP4_CONTROL_SDMMC1_DR2_SPEEDCTRL (1 << 25)
+
/* CONTROL_PROG_IO1 bits */
#define OMAP3630_PRG_SDMMC1_SPEEDCTRL (1 << 20)
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index de7c54731cbe..de1c604962eb 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -72,8 +72,8 @@
IH_GPIO_BASE + (nr))
extern int omap_gpio_init(void); /* Call from board init only */
-extern void omap2_gpio_prepare_for_retention(void);
-extern void omap2_gpio_resume_after_retention(void);
+extern void omap2_gpio_prepare_for_idle(int power_state);
+extern void omap2_gpio_resume_after_idle(void);
extern void omap_set_gpio_debounce(int gpio, int enable);
extern void omap_set_gpio_debounce_time(int gpio, int enable);
extern void omap_gpio_save_context(void);
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 401701977dbb..c01d9f08a198 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -428,4 +428,8 @@ void omap3_intc_resume_idle(void);
#include <mach/hardware.h>
+#ifdef CONFIG_FIQ
+#define FIQ_START 1024
+#endif
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index 7de903d7c1ce..975744f10a58 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -149,6 +149,8 @@
#define OMAP_MCBSP_REG_WAKEUPEN 0xA8
#define OMAP_MCBSP_REG_XCCR 0xAC
#define OMAP_MCBSP_REG_RCCR 0xB0
+#define OMAP_MCBSP_REG_XBUFFSTAT 0xB4
+#define OMAP_MCBSP_REG_RBUFFSTAT 0xB8
#define OMAP_MCBSP_REG_SSELCR 0xBC
#define OMAP_ST_REG_REV 0x00
@@ -471,6 +473,8 @@ void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold);
void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold);
u16 omap_mcbsp_get_max_tx_threshold(unsigned int id);
u16 omap_mcbsp_get_max_rx_threshold(unsigned int id);
+u16 omap_mcbsp_get_tx_delay(unsigned int id);
+u16 omap_mcbsp_get_rx_delay(unsigned int id);
int omap_mcbsp_get_dma_op_mode(unsigned int id);
#else
static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold)
@@ -479,6 +483,8 @@ static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold)
{ }
static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; }
static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; }
+static inline u16 omap_mcbsp_get_tx_delay(unsigned int id) { return 0; }
+static inline u16 omap_mcbsp_get_rx_delay(unsigned int id) { return 0; }
static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; }
#endif
int omap_mcbsp_request(unsigned int id);
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index a1bac07c89eb..c835f1e994c6 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -102,6 +102,10 @@ struct omap_mmc_platform_data {
/* Regulator off remapped to sleep */
unsigned vcc_aux_disable_is_sleep:1;
+ /* we can put the features above into this variable */
+#define HSMMC_HAS_PBIAS (1 << 0)
+ unsigned features;
+
int switch_pin; /* gpio (card detect) */
int gpio_wp; /* gpio (write protect) */
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index f235d32cd942..ffd909fa5287 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -61,9 +61,9 @@
# define OMAP_NAME omap16xx
# endif
#endif
-#if (defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
+#ifdef CONFIG_ARCH_OMAP2PLUS
# if (defined(OMAP_NAME) || defined(MULTI_OMAP1))
-# error "OMAP1 and OMAP2 can't be selected at the same time"
+# error "OMAP1 and OMAP2PLUS can't be selected at the same time"
# endif
#endif
#ifdef CONFIG_ARCH_OMAP2420
@@ -82,12 +82,20 @@
# define OMAP_NAME omap2430
# endif
#endif
-#ifdef CONFIG_ARCH_OMAP3430
+#ifdef CONFIG_ARCH_OMAP3
# ifdef OMAP_NAME
# undef MULTI_OMAP2
# define MULTI_OMAP2
# else
-# define OMAP_NAME omap3430
+# define OMAP_NAME omap3
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap4
# endif
#endif
diff --git a/arch/arm/plat-omap/include/plat/omap34xx.h b/arch/arm/plat-omap/include/plat/omap34xx.h
index 2845fdc658b0..98fc8b4a4cc4 100644
--- a/arch/arm/plat-omap/include/plat/omap34xx.h
+++ b/arch/arm/plat-omap/include/plat/omap34xx.h
@@ -82,5 +82,10 @@
#define OMAP34XX_MAILBOX_BASE (L4_34XX_BASE + 0x94000)
+/* Security */
+#define OMAP34XX_SEC_BASE (L4_34XX_BASE + 0xA0000)
+#define OMAP34XX_SEC_SHA1MD5_BASE (OMAP34XX_SEC_BASE + 0x23000)
+#define OMAP34XX_SEC_AES_BASE (OMAP34XX_SEC_BASE + 0x25000)
+
#endif /* __ASM_ARCH_OMAP3_H */
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index b3ef1a7f53cc..8b3f12ff5cbc 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -30,6 +30,7 @@
#define OMAP4430_CM_BASE OMAP4430_CM1_BASE
#define OMAP4430_CM2_BASE 0x4a008000
#define OMAP4430_PRM_BASE 0x4a306000
+#define OMAP4430_PRCM_MPU_BASE 0x48243000
#define OMAP44XX_GPMC_BASE 0x50000000
#define OMAP443X_SCM_BASE 0x4a002000
#define OMAP443X_CTRL_BASE 0x4a100000
@@ -48,5 +49,8 @@
#define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000)
#define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000)
+#define OMAP4_MMU1_BASE 0x55082000
+#define OMAP4_MMU2_BASE 0x4A066000
+
#endif /* __ASM_ARCH_OMAP44XX_H */
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 36d6ea56ab51..0eccc09ac4a9 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -176,9 +176,8 @@ struct omap_hwmod_addr_space {
#define OCP_USER_SDMA (1 << 1)
/* omap_hwmod_ocp_if.flags bits */
-#define OCPIF_HAS_IDLEST (1 << 0)
-#define OCPIF_SWSUP_IDLE (1 << 1)
-#define OCPIF_CAN_BURST (1 << 2)
+#define OCPIF_SWSUP_IDLE (1 << 0)
+#define OCPIF_CAN_BURST (1 << 1)
/**
* struct omap_hwmod_ocp_if - OCP interface data
@@ -327,14 +326,12 @@ struct omap_hwmod_omap2_prcm {
/**
* struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
- * @module_offs: PRCM submodule offset from the start of the PRM/CM1/CM2
- * @device_offs: device register offset from @module_offs
+ * @clkctrl_reg: PRCM address of the clock control register
* @submodule_wkdep_bit: bit shift of the WKDEP range
*/
struct omap_hwmod_omap4_prcm {
- u32 module_offs;
- u16 device_offs;
- u8 submodule_wkdep_bit;
+ void __iomem *clkctrl_reg;
+ u8 submodule_wkdep_bit;
};
@@ -353,6 +350,8 @@ struct omap_hwmod_omap4_prcm {
* when module is enabled, rather than the default, which is to
* enable autoidle
* HWMOD_SET_DEFAULT_CLOCKACT: program CLOCKACTIVITY bits at startup
+ * HWMOD_NO_IDLEST : this module does not have idle status - this is the case
+ * only for few initiator modules on OMAP2 & 3.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -360,6 +359,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_INIT_NO_IDLE (1 << 3)
#define HWMOD_NO_OCP_AUTOIDLE (1 << 4)
#define HWMOD_SET_DEFAULT_CLOCKACT (1 << 5)
+#define HWMOD_NO_IDLEST (1 << 6)
/*
* omap_hwmod._int_flags definitions
diff --git a/arch/arm/plat-omap/include/plat/powerdomain.h b/arch/arm/plat-omap/include/plat/powerdomain.h
index d82b2c00d4f1..fb6ec74fe39e 100644
--- a/arch/arm/plat-omap/include/plat/powerdomain.h
+++ b/arch/arm/plat-omap/include/plat/powerdomain.h
@@ -31,6 +31,7 @@
#define PWRDM_MAX_PWRSTS 4
/* Powerdomain allowable state bitfields */
+#define PWRSTS_ON (1 << PWRDM_POWER_ON)
#define PWRSTS_OFF_ON ((1 << PWRDM_POWER_OFF) | \
(1 << PWRDM_POWER_ON))
@@ -49,6 +50,12 @@
* in MEM bank 1 position. This is
* true for OMAP3430
*/
+#define PWRDM_HAS_LOWPOWERSTATECHANGE (1 << 2) /*
+ * support to transition from a
+ * sleep state to a lower sleep
+ * state without waking up the
+ * powerdomain
+ */
/*
* Number of memory banks that are power-controllable. On OMAP4430, the
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 83dce4c4f7e6..19145f5c32ba 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -15,6 +15,20 @@
#include <linux/init.h>
+/*
+ * Memory entry used for the DEBUG_LL UART configuration. See also
+ * uncompress.h and debug-macro.S.
+ *
+ * Note that using a memory location for storing the UART configuration
+ * has at least two limitations:
+ *
+ * 1. Kernel uncompress code cannot overlap OMAP_UART_INFO as the
+ * uncompress code could then partially overwrite itself
+ * 2. We assume printascii is called at least once before paging_init,
+ * and addruart has a chance to read OMAP_UART_INFO
+ */
+#define OMAP_UART_INFO (PHYS_OFFSET + 0x3ffc)
+
/* OMAP1 serial ports */
#define OMAP1_UART1_BASE 0xfffb0000
#define OMAP1_UART2_BASE 0xfffb0800
@@ -39,7 +53,7 @@
/* External port on Zoom2/3 */
#define ZOOM_UART_BASE 0x10000000
-#define ZOOM_UART_VIRT 0xfb000000
+#define ZOOM_UART_VIRT 0xfa400000
#define OMAP_PORT_SHIFT 2
#define OMAP7XX_PORT_SHIFT 0
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index 81d9ec540fcf..bbedd71943f6 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -20,27 +20,21 @@
#include <linux/types.h>
#include <linux/serial_reg.h>
+#include <asm/memory.h>
#include <asm/mach-types.h>
#include <plat/serial.h>
-static volatile u8 *uart1_base;
-static int uart1_shift;
-
static volatile u8 *uart_base;
static int uart_shift;
/*
- * Store the DEBUG_LL uart number into UART1 scratchpad register.
+ * Store the DEBUG_LL uart number into memory.
* See also debug-macro.S, and serial.c for related code.
- *
- * Please note that we currently assume that:
- * - UART1 clocks are enabled for register access
- * - UART1 scratchpad register can be used
*/
-static void set_uart1_scratchpad(unsigned char port)
+static void set_omap_uart_info(unsigned char port)
{
- uart1_base[UART_SCR << uart1_shift] = port;
+ *(volatile u32 *)OMAP_UART_INFO = port;
}
static void putc(int c)
@@ -60,42 +54,38 @@ static inline void flush(void)
/*
* Macros to configure UART1 and debug UART
*/
-#define _DEBUG_LL_ENTRY(mach, uart1_phys, uart1_shft, \
- dbg_uart, dbg_shft, dbg_id) \
+#define _DEBUG_LL_ENTRY(mach, dbg_uart, dbg_shft, dbg_id) \
if (machine_is_##mach()) { \
- uart1_base = (volatile u8 *)(uart1_phys); \
- uart1_shift = (uart1_shft); \
uart_base = (volatile u8 *)(dbg_uart); \
uart_shift = (dbg_shft); \
port = (dbg_id); \
- set_uart1_scratchpad(port); \
+ set_omap_uart_info(port); \
break; \
}
#define DEBUG_LL_OMAP7XX(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP1_UART1_BASE, OMAP7XX_PORT_SHIFT, \
- OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT, OMAP1UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP7XX_PORT_SHIFT, \
+ OMAP1UART##p)
#define DEBUG_LL_OMAP1(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP1_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP1UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP1_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP1UART##p)
#define DEBUG_LL_OMAP2(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP2_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP2_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP2UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP2_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP2UART##p)
#define DEBUG_LL_OMAP3(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP3_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP3_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP3UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP3_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP3UART##p)
#define DEBUG_LL_OMAP4(p, mach) \
- _DEBUG_LL_ENTRY(mach, OMAP4_UART1_BASE, OMAP_PORT_SHIFT, \
- OMAP4_UART##p##_BASE, OMAP_PORT_SHIFT, OMAP4UART##p)
+ _DEBUG_LL_ENTRY(mach, OMAP4_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP4UART##p)
/* Zoom2/3 shift is different for UART1 and external port */
#define DEBUG_LL_ZOOM(mach) \
- _DEBUG_LL_ENTRY(mach, OMAP2_UART1_BASE, OMAP_PORT_SHIFT, \
- ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
+ _DEBUG_LL_ENTRY(mach, ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
static inline void __arch_decomp_setup(unsigned long arch_id)
{
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 876ca8d5e927..98eef5360e6d 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -13,6 +13,20 @@ enum ehci_hcd_omap_mode {
EHCI_HCD_OMAP_MODE_TLL,
};
+enum ohci_omap3_port_mode {
+ OMAP_OHCI_PORT_MODE_UNUSED,
+ OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM,
+};
+
struct ehci_hcd_omap_platform_data {
enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS];
unsigned phy_reset:1;
@@ -21,6 +35,13 @@ struct ehci_hcd_omap_platform_data {
int reset_gpio_port[OMAP3_HS_USB_PORTS];
};
+struct ohci_hcd_omap_platform_data {
+ enum ohci_omap3_port_mode port_mode[OMAP3_HS_USB_PORTS];
+
+ /* Set this to true for ES2.x silicon */
+ unsigned es2_compatibility:1;
+};
+
/*-------------------------------------------------------------------------*/
#define OMAP1_OTG_BASE 0xfffb0400
@@ -47,6 +68,7 @@ struct omap_musb_board_data {
u8 interface_type;
u8 mode;
u16 power;
+ unsigned extvbus:1;
};
enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI};
@@ -55,6 +77,8 @@ extern void usb_musb_init(struct omap_musb_board_data *board_data);
extern void usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata);
+extern void usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata);
+
#endif
void omap_usb_init(struct omap_usb_config *pdata);
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 0e137663349c..bc094dbacee6 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -25,6 +25,11 @@
#include "iopgtable.h"
+#define for_each_iotlb_cr(obj, n, __i, cr) \
+ for (__i = 0; \
+ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
+ __i++)
+
/* accommodate the difference between omap1 and omap2/3 */
static const struct iommu_functions *arch_iommu;
@@ -172,15 +177,12 @@ static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
l->base = MMU_LOCK_BASE(val);
l->vict = MMU_LOCK_VICT(val);
- BUG_ON(l->base != 0); /* Currently no preservation is used */
}
static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
{
u32 val;
- BUG_ON(l->base != 0); /* Currently no preservation is used */
-
val = (l->base << MMU_LOCK_BASE_SHIFT);
val |= (l->vict << MMU_LOCK_VICT_SHIFT);
@@ -214,6 +216,20 @@ static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
return arch_iommu->dump_cr(obj, cr, buf);
}
+/* only used in iotlb iteration for-loop */
+static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
+{
+ struct cr_regs cr;
+ struct iotlb_lock l;
+
+ iotlb_lock_get(obj, &l);
+ l.vict = n;
+ iotlb_lock_set(obj, &l);
+ iotlb_read_cr(obj, &cr);
+
+ return cr;
+}
+
/**
* load_iotlb_entry - Set an iommu tlb entry
* @obj: target iommu
@@ -221,7 +237,6 @@ static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
**/
int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
{
- int i;
int err = 0;
struct iotlb_lock l;
struct cr_regs *cr;
@@ -231,21 +246,30 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
clk_enable(obj->clk);
- for (i = 0; i < obj->nr_tlb_entries; i++) {
+ iotlb_lock_get(obj, &l);
+ if (l.base == obj->nr_tlb_entries) {
+ dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
+ err = -EBUSY;
+ goto out;
+ }
+ if (!e->prsvd) {
+ int i;
struct cr_regs tmp;
+ for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
+ if (!iotlb_cr_valid(&tmp))
+ break;
+
+ if (i == obj->nr_tlb_entries) {
+ dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
+ err = -EBUSY;
+ goto out;
+ }
+
iotlb_lock_get(obj, &l);
- l.vict = i;
+ } else {
+ l.vict = l.base;
iotlb_lock_set(obj, &l);
- iotlb_read_cr(obj, &tmp);
- if (!iotlb_cr_valid(&tmp))
- break;
- }
-
- if (i == obj->nr_tlb_entries) {
- dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
- err = -EBUSY;
- goto out;
}
cr = iotlb_alloc_cr(obj, e);
@@ -257,9 +281,11 @@ int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
iotlb_load_cr(obj, cr);
kfree(cr);
+ if (e->prsvd)
+ l.base++;
/* increment victim for next tlb load */
if (++l.vict == obj->nr_tlb_entries)
- l.vict = 0;
+ l.vict = l.base;
iotlb_lock_set(obj, &l);
out:
clk_disable(obj->clk);
@@ -276,20 +302,15 @@ EXPORT_SYMBOL_GPL(load_iotlb_entry);
**/
void flush_iotlb_page(struct iommu *obj, u32 da)
{
- struct iotlb_lock l;
int i;
+ struct cr_regs cr;
clk_enable(obj->clk);
- for (i = 0; i < obj->nr_tlb_entries; i++) {
- struct cr_regs cr;
+ for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
u32 start;
size_t bytes;
- iotlb_lock_get(obj, &l);
- l.vict = i;
- iotlb_lock_set(obj, &l);
- iotlb_read_cr(obj, &cr);
if (!iotlb_cr_valid(&cr))
continue;
@@ -299,7 +320,6 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
if ((start <= da) && (da < start + bytes)) {
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
__func__, start, da, bytes);
- iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
@@ -370,26 +390,19 @@ EXPORT_SYMBOL_GPL(iommu_dump_ctx);
static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
{
int i;
- struct iotlb_lock saved, l;
+ struct iotlb_lock saved;
+ struct cr_regs tmp;
struct cr_regs *p = crs;
clk_enable(obj->clk);
-
iotlb_lock_get(obj, &saved);
- memcpy(&l, &saved, sizeof(saved));
- for (i = 0; i < num; i++) {
- struct cr_regs tmp;
-
- iotlb_lock_get(obj, &l);
- l.vict = i;
- iotlb_lock_set(obj, &l);
- iotlb_read_cr(obj, &tmp);
+ for_each_iotlb_cr(obj, num, i, tmp) {
if (!iotlb_cr_valid(&tmp))
continue;
-
*p++ = tmp;
}
+
iotlb_lock_set(obj, &saved);
clk_disable(obj->clk);
@@ -503,6 +516,12 @@ static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
{
u32 *iopgd = iopgd_offset(obj, da);
+ if ((da | pa) & ~IOSECTION_MASK) {
+ dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+ __func__, da, pa, IOSECTION_SIZE);
+ return -EINVAL;
+ }
+
*iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
flush_iopgd_range(iopgd, iopgd);
return 0;
@@ -513,6 +532,12 @@ static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
u32 *iopgd = iopgd_offset(obj, da);
int i;
+ if ((da | pa) & ~IOSUPER_MASK) {
+ dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+ __func__, da, pa, IOSUPER_SIZE);
+ return -EINVAL;
+ }
+
for (i = 0; i < 16; i++)
*(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
flush_iopgd_range(iopgd, iopgd + 15);
@@ -542,6 +567,12 @@ static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
u32 *iopte = iopte_alloc(obj, iopgd, da);
int i;
+ if ((da | pa) & ~IOLARGE_MASK) {
+ dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
+ __func__, da, pa, IOLARGE_SIZE);
+ return -EINVAL;
+ }
+
if (IS_ERR(iopte))
return PTR_ERR(iopte);
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 65c6d1ff7237..e43983ba59c5 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -287,16 +287,19 @@ static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
prev_end = 0;
list_for_each_entry(tmp, &obj->mmap, list) {
- if ((prev_end <= start) && (start + bytes < tmp->da_start))
+ if (prev_end >= start)
+ break;
+
+ if (start + bytes < tmp->da_start)
goto found;
if (flags & IOVMF_DA_ANON)
- start = roundup(tmp->da_end, alignement);
+ start = roundup(tmp->da_end + 1, alignement);
prev_end = tmp->da_end;
}
- if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
+ if ((start > prev_end) && (ULONG_MAX - start >= bytes))
goto found;
dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index e1d0440fd4a8..7e669c9744d8 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -489,7 +489,7 @@ void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold)
{
struct omap_mcbsp *mcbsp;
- if (!cpu_is_omap34xx())
+ if (!cpu_is_omap34xx() && !cpu_is_omap44xx())
return;
if (!omap_mcbsp_check_valid_id(id)) {
@@ -511,7 +511,7 @@ void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold)
{
struct omap_mcbsp *mcbsp;
- if (!cpu_is_omap34xx())
+ if (!cpu_is_omap34xx() && !cpu_is_omap44xx())
return;
if (!omap_mcbsp_check_valid_id(id)) {
@@ -560,6 +560,61 @@ u16 omap_mcbsp_get_max_rx_threshold(unsigned int id)
}
EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold);
+#define MCBSP2_FIFO_SIZE 0x500 /* 1024 + 256 locations */
+#define MCBSP1345_FIFO_SIZE 0x80 /* 128 locations */
+/*
+ * omap_mcbsp_get_tx_delay returns the number of used slots in the McBSP FIFO
+ */
+u16 omap_mcbsp_get_tx_delay(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp;
+ u16 buffstat;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ /* Returns the number of free locations in the buffer */
+ buffstat = MCBSP_READ(mcbsp, XBUFFSTAT);
+
+ /* Number of slots are different in McBSP ports */
+ if (mcbsp->id == 2)
+ return MCBSP2_FIFO_SIZE - buffstat;
+ else
+ return MCBSP1345_FIFO_SIZE - buffstat;
+}
+EXPORT_SYMBOL(omap_mcbsp_get_tx_delay);
+
+/*
+ * omap_mcbsp_get_rx_delay returns the number of free slots in the McBSP FIFO
+ * to reach the threshold value (when the DMA will be triggered to read it)
+ */
+u16 omap_mcbsp_get_rx_delay(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp;
+ u16 buffstat, threshold;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ /* Returns the number of used locations in the buffer */
+ buffstat = MCBSP_READ(mcbsp, RBUFFSTAT);
+ /* RX threshold */
+ threshold = MCBSP_READ(mcbsp, THRSH1);
+
+ /* Return the number of location till we reach the threshold limit */
+ if (threshold <= buffstat)
+ return 0;
+ else
+ return threshold - buffstat;
+}
+EXPORT_SYMBOL(omap_mcbsp_get_rx_delay);
+
/*
* omap_mcbsp_get_dma_op_mode just return the current configured
* operating mode for the mcbsp channel
@@ -587,7 +642,7 @@ static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp)
* Enable wakup behavior, smart idle and all wakeups
* REVISIT: some wakeups may be unnecessary
*/
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
u16 syscon;
syscon = MCBSP_READ(mcbsp, SYSCON);
@@ -610,7 +665,7 @@ static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp)
/*
* Disable wakup behavior, smart idle and all wakeups
*/
- if (cpu_is_omap34xx()) {
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
u16 syscon;
syscon = MCBSP_READ(mcbsp, SYSCON);
@@ -724,14 +779,17 @@ int omap_mcbsp_request(unsigned int id)
goto err_clk_disable;
}
- init_completion(&mcbsp->rx_irq_completion);
- err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler,
+ if (mcbsp->rx_irq) {
+ init_completion(&mcbsp->rx_irq_completion);
+ err = request_irq(mcbsp->rx_irq,
+ omap_mcbsp_rx_irq_handler,
0, "McBSP", (void *)mcbsp);
- if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
- "for McBSP%d\n", mcbsp->rx_irq,
- mcbsp->id);
- goto err_free_irq;
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
+ "for McBSP%d\n", mcbsp->rx_irq,
+ mcbsp->id);
+ goto err_free_irq;
+ }
}
}
@@ -781,7 +839,8 @@ void omap_mcbsp_free(unsigned int id)
if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) {
/* Free IRQs */
- free_irq(mcbsp->rx_irq, (void *)mcbsp);
+ if (mcbsp->rx_irq)
+ free_irq(mcbsp->rx_irq, (void *)mcbsp);
free_irq(mcbsp->tx_irq, (void *)mcbsp);
}
@@ -855,7 +914,7 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx)
MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 7));
}
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
/* Release the transmitter and receiver */
w = MCBSP_READ_CACHE(mcbsp, XCCR);
w &= ~(tx ? XDISABLE : 0);
@@ -885,7 +944,7 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx)
/* Reset transmitter */
tx &= 1;
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
w = MCBSP_READ_CACHE(mcbsp, XCCR);
w |= (tx ? XDISABLE : 0);
MCBSP_WRITE(mcbsp, XCCR, w);
@@ -895,7 +954,7 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx)
/* Reset receiver */
rx &= 1;
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
w = MCBSP_READ_CACHE(mcbsp, RCCR);
w |= (rx ? RDISABLE : 0);
MCBSP_WRITE(mcbsp, RCCR, w);
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 0f5197479513..f899603051ac 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -2,10 +2,10 @@
* omap_device implementation
*
* Copyright (C) 2009 Nokia Corporation
- * Paul Walmsley
+ * Paul Walmsley, Kevin Hilman
*
* Developed in collaboration with (alphabetical order): Benoit
- * Cousson, Kevin Hilman, Tony Lindgren, Rajendra Nayak, Vikram
+ * Cousson, Thara Gopinath, Tony Lindgren, Rajendra Nayak, Vikram
* Pandita, Sakari Poussa, Anand Sawant, Santosh Shilimkar, Richard
* Woodruff
*
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 51f4dfb82e2b..226b2e858d6c 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -437,6 +437,20 @@ static inline int omap34xx_sram_init(void)
}
#endif
+#ifdef CONFIG_ARCH_OMAP4
+int __init omap44xx_sram_init(void)
+{
+ printk(KERN_ERR "FIXME: %s not implemented\n", __func__);
+
+ return -ENODEV;
+}
+#else
+static inline int omap44xx_sram_init(void)
+{
+ return 0;
+}
+#endif
+
int __init omap_sram_init(void)
{
omap_detect_sram();
@@ -451,7 +465,7 @@ int __init omap_sram_init(void)
else if (cpu_is_omap34xx())
omap34xx_sram_init();
else if (cpu_is_omap44xx())
- omap34xx_sram_init(); /* FIXME: */
+ omap44xx_sram_init();
return 0;
}
diff --git a/arch/arm/plat-orion/include/plat/orion_nand.h b/arch/arm/plat-orion/include/plat/orion_nand.h
index d6a4cfa37785..9f3c180834d1 100644
--- a/arch/arm/plat-orion/include/plat/orion_nand.h
+++ b/arch/arm/plat-orion/include/plat/orion_nand.h
@@ -14,6 +14,7 @@
*/
struct orion_nand_data {
struct mtd_partition *parts;
+ int (*dev_ready)(struct mtd_info *mtd);
u32 nr_parts;
u8 ale; /* address line number connected to ALE */
u8 cle; /* address line number connected to CLE */
diff --git a/arch/arm/plat-pxa/Kconfig b/arch/arm/plat-pxa/Kconfig
index b158e98038ed..da53395a17c6 100644
--- a/arch/arm/plat-pxa/Kconfig
+++ b/arch/arm/plat-pxa/Kconfig
@@ -1,3 +1,8 @@
if PLAT_PXA
+config PXA_SSP
+ tristate
+ help
+ Enable support for PXA2xx SSP ports
+
endif
diff --git a/arch/arm/plat-pxa/Makefile b/arch/arm/plat-pxa/Makefile
index 0264bfb0ca4f..6187edfbcb77 100644
--- a/arch/arm/plat-pxa/Makefile
+++ b/arch/arm/plat-pxa/Makefile
@@ -2,10 +2,11 @@
# Makefile for code common across different PXA processor families
#
-obj-y := dma.o
+obj-y := dma.o pmu.o
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
obj-$(CONFIG_PXA3xx) += mfp.o
obj-$(CONFIG_ARCH_MMP) += mfp.o
obj-$(CONFIG_HAVE_PWM) += pwm.o
+obj-$(CONFIG_PXA_SSP) += ssp.o
diff --git a/arch/arm/plat-pxa/include/plat/mfp.h b/arch/arm/plat-pxa/include/plat/mfp.h
index 857a6839071c..9e604c80618f 100644
--- a/arch/arm/plat-pxa/include/plat/mfp.h
+++ b/arch/arm/plat-pxa/include/plat/mfp.h
@@ -316,6 +316,13 @@ enum {
MFP_PIN_PMIC_INT,
MFP_PIN_RDY,
+ /* additional pins on MMP2 */
+ MFP_PIN_TWSI1_SCL,
+ MFP_PIN_TWSI1_SDA,
+ MFP_PIN_TWSI4_SCL,
+ MFP_PIN_TWSI4_SDA,
+ MFP_PIN_CLK_REQ,
+
MFP_PIN_MAX,
};
diff --git a/arch/arm/mach-pxa/include/mach/regs-ssp.h b/arch/arm/plat-pxa/include/plat/ssp.h
index 6a2ed35acd59..fe43150690ed 100644
--- a/arch/arm/mach-pxa/include/mach/regs-ssp.h
+++ b/arch/arm/plat-pxa/include/plat/ssp.h
@@ -1,5 +1,26 @@
-#ifndef __ASM_ARCH_REGS_SSP_H
-#define __ASM_ARCH_REGS_SSP_H
+/*
+ * ssp.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver supports the following PXA CPU/SSP ports:-
+ *
+ * PXA250 SSP
+ * PXA255 SSP, NSSP
+ * PXA26x SSP, NSSP, ASSP
+ * PXA27x SSP1, SSP2, SSP3
+ * PXA3xx SSP1, SSP2, SSP3, SSP4
+ */
+
+#ifndef __ASM_ARCH_SSP_H
+#define __ASM_ARCH_SSP_H
+
+#include <linux/list.h>
+#include <linux/io.h>
/*
* SSP Serial Port Registers
@@ -19,10 +40,7 @@
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
#define SSTSS (0x38) /* SSP Timeslot Status */
#define SSACD (0x3C) /* SSP Audio Clock Divider */
-
-#if defined(CONFIG_PXA3xx)
#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */
-#endif
/* Common PXA2xx bits first */
#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
@@ -33,29 +51,19 @@
#define SSCR0_National (0x2 << 4) /* National Microwire */
#define SSCR0_ECS (1 << 6) /* External clock select */
#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
+#define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */
-#if defined(CONFIG_PXA25x)
-#define SSCR0_SCR (0x0000ff00) /* Serial Clock Rate (mask) */
-#define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */
-#elif defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
-#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
-#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
-#endif
-
-#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
+/* PXA27x, PXA3xx */
#define SSCR0_EDSS (1 << 20) /* Extended data size select */
#define SSCR0_NCS (1 << 21) /* Network clock select */
#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */
#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */
#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
+#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */
#define SSCR0_ACS (1 << 30) /* Audio clock select */
#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
-#endif
-#if defined(CONFIG_PXA3xx)
-#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */
-#endif
#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
@@ -75,10 +83,6 @@
#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
-#define SSCR0_TIM (1 << 23) /* Transmit FIFO Under Run Interrupt Mask */
-#define SSCR0_RIM (1 << 22) /* Receive FIFO Over Run interrupt Mask */
-#define SSCR0_NCS (1 << 21) /* Network Clock Select */
-#define SSCR0_EDSS (1 << 20) /* Extended Data Size Select */
/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
@@ -108,27 +112,75 @@
#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
-#if defined(CONFIG_PXA3xx)
-#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */
-#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */
-#endif
-#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
-#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
-#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
-#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
-#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
-#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
-#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
-#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
+#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
+#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
+#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
+#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
+#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
+
+/* PXA3xx */
+#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */
+#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */
+#define SSPSP_TIMING_MASK (0x7f8001f0)
#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
-#if defined(CONFIG_PXA3xx)
#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
-#endif
-
-#endif /* __ASM_ARCH_REGS_SSP_H */
+enum pxa_ssp_type {
+ SSP_UNDEFINED = 0,
+ PXA25x_SSP, /* pxa 210, 250, 255, 26x */
+ PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
+ PXA27x_SSP,
+ PXA168_SSP,
+};
+
+struct ssp_device {
+ struct platform_device *pdev;
+ struct list_head node;
+
+ struct clk *clk;
+ void __iomem *mmio_base;
+ unsigned long phys_base;
+
+ const char *label;
+ int port_id;
+ int type;
+ int use_count;
+ int irq;
+ int drcmr_rx;
+ int drcmr_tx;
+};
+
+/**
+ * pxa_ssp_write_reg - Write to a SSP register
+ *
+ * @dev: SSP device to access
+ * @reg: Register to write to
+ * @val: Value to be written.
+ */
+static inline void pxa_ssp_write_reg(struct ssp_device *dev, u32 reg, u32 val)
+{
+ __raw_writel(val, dev->mmio_base + reg);
+}
+
+/**
+ * pxa_ssp_read_reg - Read from a SSP register
+ *
+ * @dev: SSP device to access
+ * @reg: Register to read from
+ */
+static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
+{
+ return __raw_readl(dev->mmio_base + reg);
+}
+
+struct ssp_device *pxa_ssp_request(int port, const char *label);
+void pxa_ssp_free(struct ssp_device *);
+#endif /* __ASM_ARCH_SSP_H */
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c
index be58f9fe65b0..b77e018d36c1 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/arch/arm/plat-pxa/mfp.c
@@ -110,6 +110,7 @@ static const unsigned long mfpr_lpm[] = {
MFPR_LPM_PULL_LOW,
MFPR_LPM_PULL_HIGH,
MFPR_LPM_FLOAT,
+ MFPR_LPM_INPUT,
};
/* mapping of MFP_PULL_* definitions to MFPR_PULL_* register bits */
diff --git a/arch/arm/plat-pxa/pmu.c b/arch/arm/plat-pxa/pmu.c
new file mode 100644
index 000000000000..267ceb6feb2f
--- /dev/null
+++ b/arch/arm/plat-pxa/pmu.c
@@ -0,0 +1,33 @@
+/*
+ * PMU IRQ registration for the PXA xscale PMU families.
+ * Copyright (C) 2010 Will Deacon, ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <asm/pmu.h>
+#include <mach/irqs.h>
+
+static struct resource pmu_resource = {
+ .start = IRQ_PMU,
+ .end = IRQ_PMU,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device pmu_device = {
+ .name = "arm-pmu",
+ .id = ARM_PMU_DEVICE_CPU,
+ .resource = &pmu_resource,
+ .num_resources = 1,
+};
+
+static int __init pxa_pmu_init(void)
+{
+ platform_device_register(&pmu_device);
+ return 0;
+}
+arch_initcall(pxa_pmu_init);
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
new file mode 100644
index 000000000000..c6357e554aba
--- /dev/null
+++ b/arch/arm/plat-pxa/ssp.c
@@ -0,0 +1,224 @@
+/*
+ * linux/arch/arm/mach-pxa/ssp.c
+ *
+ * based on linux/arch/arm/mach-sa1100/ssp.c by Russell King
+ *
+ * Copyright (C) 2003 Russell King.
+ * Copyright (C) 2003 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * PXA2xx SSP driver. This provides the generic core for simple
+ * IO-based SSP applications and allows easy port setup for DMA access.
+ *
+ * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <plat/ssp.h>
+
+static DEFINE_MUTEX(ssp_lock);
+static LIST_HEAD(ssp_list);
+
+struct ssp_device *pxa_ssp_request(int port, const char *label)
+{
+ struct ssp_device *ssp = NULL;
+
+ mutex_lock(&ssp_lock);
+
+ list_for_each_entry(ssp, &ssp_list, node) {
+ if (ssp->port_id == port && ssp->use_count == 0) {
+ ssp->use_count++;
+ ssp->label = label;
+ break;
+ }
+ }
+
+ mutex_unlock(&ssp_lock);
+
+ if (&ssp->node == &ssp_list)
+ return NULL;
+
+ return ssp;
+}
+EXPORT_SYMBOL(pxa_ssp_request);
+
+void pxa_ssp_free(struct ssp_device *ssp)
+{
+ mutex_lock(&ssp_lock);
+ if (ssp->use_count) {
+ ssp->use_count--;
+ ssp->label = NULL;
+ } else
+ dev_err(&ssp->pdev->dev, "device already free\n");
+ mutex_unlock(&ssp_lock);
+}
+EXPORT_SYMBOL(pxa_ssp_free);
+
+static int __devinit pxa_ssp_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct resource *res;
+ struct ssp_device *ssp;
+ int ret = 0;
+
+ ssp = kzalloc(sizeof(struct ssp_device), GFP_KERNEL);
+ if (ssp == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory");
+ return -ENOMEM;
+ }
+ ssp->pdev = pdev;
+
+ ssp->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ssp->clk)) {
+ ret = PTR_ERR(ssp->clk);
+ goto err_free;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no SSP RX DRCMR defined\n");
+ ret = -ENODEV;
+ goto err_free_clk;
+ }
+ ssp->drcmr_rx = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no SSP TX DRCMR defined\n");
+ ret = -ENODEV;
+ goto err_free_clk;
+ }
+ ssp->drcmr_tx = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ ret = -ENODEV;
+ goto err_free_clk;
+ }
+
+ res = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ ret = -EBUSY;
+ goto err_free_clk;
+ }
+
+ ssp->phys_base = res->start;
+
+ ssp->mmio_base = ioremap(res->start, resource_size(res));
+ if (ssp->mmio_base == NULL) {
+ dev_err(&pdev->dev, "failed to ioremap() registers\n");
+ ret = -ENODEV;
+ goto err_free_mem;
+ }
+
+ ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0) {
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
+ ret = -ENODEV;
+ goto err_free_io;
+ }
+
+ /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
+ * starts from 0, do a translation here
+ */
+ ssp->port_id = pdev->id + 1;
+ ssp->use_count = 0;
+ ssp->type = (int)id->driver_data;
+
+ mutex_lock(&ssp_lock);
+ list_add(&ssp->node, &ssp_list);
+ mutex_unlock(&ssp_lock);
+
+ platform_set_drvdata(pdev, ssp);
+ return 0;
+
+err_free_io:
+ iounmap(ssp->mmio_base);
+err_free_mem:
+ release_mem_region(res->start, resource_size(res));
+err_free_clk:
+ clk_put(ssp->clk);
+err_free:
+ kfree(ssp);
+ return ret;
+}
+
+static int __devexit pxa_ssp_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct ssp_device *ssp;
+
+ ssp = platform_get_drvdata(pdev);
+ if (ssp == NULL)
+ return -ENODEV;
+
+ iounmap(ssp->mmio_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(ssp->clk);
+
+ mutex_lock(&ssp_lock);
+ list_del(&ssp->node);
+ mutex_unlock(&ssp_lock);
+
+ kfree(ssp);
+ return 0;
+}
+
+static const struct platform_device_id ssp_id_table[] = {
+ { "pxa25x-ssp", PXA25x_SSP },
+ { "pxa25x-nssp", PXA25x_NSSP },
+ { "pxa27x-ssp", PXA27x_SSP },
+ { "pxa168-ssp", PXA168_SSP },
+ { },
+};
+
+static struct platform_driver pxa_ssp_driver = {
+ .probe = pxa_ssp_probe,
+ .remove = __devexit_p(pxa_ssp_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "pxa2xx-ssp",
+ },
+ .id_table = ssp_id_table,
+};
+
+static int __init pxa_ssp_init(void)
+{
+ return platform_driver_register(&pxa_ssp_driver);
+}
+
+static void __exit pxa_ssp_exit(void)
+{
+ platform_driver_unregister(&pxa_ssp_driver);
+}
+
+arch_initcall(pxa_ssp_init);
+module_exit(pxa_ssp_exit);
+
+MODULE_DESCRIPTION("PXA SSP driver");
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/plat-s3c24xx/Kconfig b/arch/arm/plat-s3c24xx/Kconfig
index 6e93ef8f3d43..984bf66826d2 100644
--- a/arch/arm/plat-s3c24xx/Kconfig
+++ b/arch/arm/plat-s3c24xx/Kconfig
@@ -9,6 +9,7 @@ config PLAT_S3C24XX
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
select S3C_DEVICE_NAND
+ select S3C_GPIO_CFG_S3C24XX
help
Base platform code for any Samsung S3C24XX device
@@ -44,6 +45,12 @@ config S3C2410_CLOCK
Clock code for the S3C2410, and similar processors which
is currently includes the S3C2410, S3C2440, S3C2442.
+config S3C2443_CLOCK
+ bool
+ help
+ Clock code for the S3C2443 and similar processors, which includes
+ the S3C2416 and S3C2450.
+
config S3C24XX_DCLK
bool
help
@@ -157,4 +164,9 @@ config S3C24XX_SIMTEC_AUDIO
help
Add audio devices for common Simtec S3C24XX boards
+config S3C2410_SETUP_TS
+ bool
+ help
+ Compile in platform device definition for Samsung TouchScreen.
+
endif
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index c2237c41141f..c2064c308719 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM) += irq-pm.o
obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
+obj-$(CONFIG_S3C2443_CLOCK) += s3c2443-clock.o
obj-$(CONFIG_S3C2410_DMA) += dma.o
obj-$(CONFIG_S3C2410_IOTIMING) += s3c2410-iotiming.o
obj-$(CONFIG_S3C2412_IOTIMING) += s3c2412-iotiming.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_S3C2410_CPUFREQ_UTILS) += s3c2410-cpufreq-utils.o
# device specific setup and/or initialisation
obj-$(CONFIG_ARCH_S3C2410) += setup-i2c.o
+obj-$(CONFIG_S3C2410_SETUP_TS) += setup-ts.o
# SPI gpio central GPIO functions
diff --git a/arch/arm/plat-s3c24xx/common-smdk.c b/arch/arm/plat-s3c24xx/common-smdk.c
index 9e0e20ad2e46..7b44d0c592b5 100644
--- a/arch/arm/plat-s3c24xx/common-smdk.c
+++ b/arch/arm/plat-s3c24xx/common-smdk.c
@@ -42,6 +42,7 @@
#include <plat/nand.h>
#include <plat/common-smdk.h>
+#include <plat/gpio-cfg.h>
#include <plat/devs.h>
#include <plat/pm.h>
@@ -185,10 +186,10 @@ void __init smdk_machine_init(void)
{
/* Configure the LEDs (even if we have no LED support)*/
- s3c2410_gpio_cfgpin(S3C2410_GPF(4), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPF(5), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPF(6), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPF(7), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(4), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(5), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(6), S3C2410_GPIO_OUTPUT);
+ s3c_gpio_cfgpin(S3C2410_GPF(7), S3C2410_GPIO_OUTPUT);
s3c2410_gpio_setpin(S3C2410_GPF(4), 1);
s3c2410_gpio_setpin(S3C2410_GPF(5), 1);
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 9ca64df35bf6..76d0858c3cbb 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -49,6 +49,7 @@
#include <plat/s3c2400.h>
#include <plat/s3c2410.h>
#include <plat/s3c2412.h>
+#include <plat/s3c2416.h>
#include <plat/s3c244x.h>
#include <plat/s3c2443.h>
@@ -57,6 +58,7 @@
static const char name_s3c2400[] = "S3C2400";
static const char name_s3c2410[] = "S3C2410";
static const char name_s3c2412[] = "S3C2412";
+static const char name_s3c2416[] = "S3C2416/S3C2450";
static const char name_s3c2440[] = "S3C2440";
static const char name_s3c2442[] = "S3C2442";
static const char name_s3c2442b[] = "S3C2442B";
@@ -137,6 +139,15 @@ static struct cpu_table cpu_ids[] __initdata = {
.init = s3c2412_init,
.name = name_s3c2412,
},
+ { /* a strange version of the s3c2416 */
+ .idcode = 0x32450003,
+ .idmask = 0xffffffff,
+ .map_io = s3c2416_map_io,
+ .init_clocks = s3c2416_init_clocks,
+ .init_uarts = s3c2416_init_uarts,
+ .init = s3c2416_init,
+ .name = name_s3c2416,
+ },
{
.idcode = 0x32443001,
.idmask = 0xffffffff,
@@ -170,6 +181,16 @@ static struct map_desc s3c_iodesc[] __initdata = {
static unsigned long s3c24xx_read_idcode_v5(void)
{
+#if defined(CONFIG_CPU_S3C2416)
+ /* s3c2416 is v5, with S3C24XX_GSTATUS1 instead of S3C2412_GSTATUS1 */
+
+ u32 gs = __raw_readl(S3C24XX_GSTATUS1);
+
+ /* test for s3c2416 or similar device */
+ if ((gs >> 16) == 0x3245)
+ return gs;
+#endif
+
#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
return __raw_readl(S3C2412_GSTATUS1);
#else
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 9265f09bfa58..452e18438b41 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -38,8 +39,7 @@
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/regs-spi.h>
-
-#include <mach/ts.h>
+#include <plat/ts.h>
/* Serial port registrations */
@@ -149,10 +149,14 @@ void __init s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *pd)
{
struct s3c2410fb_mach_info *npd;
- npd = kmalloc(sizeof(*npd), GFP_KERNEL);
+ npd = kmemdup(pd, sizeof(*npd), GFP_KERNEL);
if (npd) {
- memcpy(npd, pd, sizeof(*npd));
s3c_device_lcd.dev.platform_data = npd;
+ npd->displays = kmemdup(pd->displays,
+ sizeof(struct s3c2410fb_display) * npd->num_displays,
+ GFP_KERNEL);
+ if (!npd->displays)
+ printk(KERN_ERR "no memory for LCD display data\n");
} else {
printk(KERN_ERR "no memory for LCD platform data\n");
}
@@ -230,32 +234,6 @@ void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *pd)
}
}
-
-/* Watchdog */
-
-static struct resource s3c_wdt_resource[] = {
- [0] = {
- .start = S3C24XX_PA_WATCHDOG,
- .end = S3C24XX_PA_WATCHDOG + S3C24XX_SZ_WATCHDOG - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_WDT,
- .end = IRQ_WDT,
- .flags = IORESOURCE_IRQ,
- }
-
-};
-
-struct platform_device s3c_device_wdt = {
- .name = "s3c2410-wdt",
- .id = -1,
- .num_resources = ARRAY_SIZE(s3c_wdt_resource),
- .resource = s3c_wdt_resource,
-};
-
-EXPORT_SYMBOL(s3c_device_wdt);
-
/* IIS */
static struct resource s3c_iis_resource[] = {
@@ -338,14 +316,6 @@ struct platform_device s3c_device_adc = {
.resource = s3c_adc_resource,
};
-/* HWMON */
-
-struct platform_device s3c_device_hwmon = {
- .name = "s3c-hwmon",
- .id = -1,
- .dev.parent = &s3c_device_adc.dev,
-};
-
/* SDI */
static struct resource s3c_sdi_resource[] = {
@@ -371,7 +341,7 @@ struct platform_device s3c_device_sdi = {
EXPORT_SYMBOL(s3c_device_sdi);
-void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata)
+void __init s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata)
{
struct s3c24xx_mci_pdata *npd;
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 93827b3d4e84..6ad274e7593d 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1104,7 +1104,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
* devaddr: physical address of the source
*/
-int s3c2410_dma_devconfig(int channel,
+int s3c2410_dma_devconfig(unsigned int channel,
enum s3c2410_dmasrc source,
unsigned long devaddr)
{
diff --git a/arch/arm/plat-s3c24xx/gpio.c b/arch/arm/plat-s3c24xx/gpio.c
index 5467470badfd..2f3d7c089dfa 100644
--- a/arch/arm/plat-s3c24xx/gpio.c
+++ b/arch/arm/plat-s3c24xx/gpio.c
@@ -1,6 +1,6 @@
/* linux/arch/arm/plat-s3c24xx/gpio.c
*
- * Copyright (c) 2004-2005 Simtec Electronics
+ * Copyright (c) 2004-2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX GPIO support
@@ -20,12 +20,12 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/gpio.h>
#include <linux/io.h>
#include <mach/hardware.h>
@@ -34,133 +34,46 @@
#include <mach/regs-gpio.h>
-void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function)
-{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long mask;
- unsigned long con;
- unsigned long flags;
+#include <plat/gpio-core.h>
- if (pin < S3C2410_GPIO_BANKB) {
- mask = 1 << S3C2410_GPIO_OFFSET(pin);
- } else {
- mask = 3 << S3C2410_GPIO_OFFSET(pin)*2;
- }
-
- switch (function) {
- case S3C2410_GPIO_LEAVE:
- mask = 0;
- function = 0;
- break;
-
- case S3C2410_GPIO_INPUT:
- case S3C2410_GPIO_OUTPUT:
- case S3C2410_GPIO_SFN2:
- case S3C2410_GPIO_SFN3:
- if (pin < S3C2410_GPIO_BANKB) {
- function -= 1;
- function &= 1;
- function <<= S3C2410_GPIO_OFFSET(pin);
- } else {
- function &= 3;
- function <<= S3C2410_GPIO_OFFSET(pin)*2;
- }
- }
-
- /* modify the specified register wwith IRQs off */
-
- local_irq_save(flags);
-
- con = __raw_readl(base + 0x00);
- con &= ~mask;
- con |= function;
-
- __raw_writel(con, base + 0x00);
-
- local_irq_restore(flags);
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_cfgpin);
-
-unsigned int s3c2410_gpio_getcfg(unsigned int pin)
-{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long val = __raw_readl(base);
-
- if (pin < S3C2410_GPIO_BANKB) {
- val >>= S3C2410_GPIO_OFFSET(pin);
- val &= 1;
- val += 1;
- } else {
- val >>= S3C2410_GPIO_OFFSET(pin)*2;
- val &= 3;
- }
-
- return val | S3C2410_GPIO_INPUT;
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_getcfg);
+/* gpiolib wrappers until these are totally eliminated */
void s3c2410_gpio_pullup(unsigned int pin, unsigned int to)
{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long up;
+ int ret;
- if (pin < S3C2410_GPIO_BANKB)
- return;
+ WARN_ON(to); /* should be none of these left */
- local_irq_save(flags);
-
- up = __raw_readl(base + 0x08);
- up &= ~(1L << offs);
- up |= to << offs;
- __raw_writel(up, base + 0x08);
+ if (!to) {
+ /* if pull is enabled, try first with up, and if that
+ * fails, try using down */
- local_irq_restore(flags);
+ ret = s3c_gpio_setpull(pin, S3C_GPIO_PULL_UP);
+ if (ret)
+ s3c_gpio_setpull(pin, S3C_GPIO_PULL_DOWN);
+ } else {
+ s3c_gpio_setpull(pin, S3C_GPIO_PULL_NONE);
+ }
}
-
EXPORT_SYMBOL(s3c2410_gpio_pullup);
-int s3c2410_gpio_getpull(unsigned int pin)
-{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
-
- if (pin < S3C2410_GPIO_BANKB)
- return -EINVAL;
-
- return (__raw_readl(base + 0x08) & (1L << offs)) ? 1 : 0;
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_getpull);
-
void s3c2410_gpio_setpin(unsigned int pin, unsigned int to)
{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
- unsigned long flags;
- unsigned long dat;
+ /* do this via gpiolib until all users removed */
- local_irq_save(flags);
-
- dat = __raw_readl(base + 0x04);
- dat &= ~(1 << offs);
- dat |= to << offs;
- __raw_writel(dat, base + 0x04);
-
- local_irq_restore(flags);
+ gpio_request(pin, "temporary");
+ gpio_set_value(pin, to);
+ gpio_free(pin);
}
EXPORT_SYMBOL(s3c2410_gpio_setpin);
unsigned int s3c2410_gpio_getpin(unsigned int pin)
{
- void __iomem *base = S3C24XX_GPIO_BASE(pin);
- unsigned long offs = S3C2410_GPIO_OFFSET(pin);
+ struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
+ unsigned long offs = pin - chip->chip.base;
- return __raw_readl(base + 0x04) & (1<< offs);
+ return __raw_readl(chip->base + 0x04) & (1<< offs);
}
EXPORT_SYMBOL(s3c2410_gpio_getpin);
@@ -181,22 +94,3 @@ unsigned int s3c2410_modify_misccr(unsigned int clear, unsigned int change)
}
EXPORT_SYMBOL(s3c2410_modify_misccr);
-
-int s3c2410_gpio_getirq(unsigned int pin)
-{
- if (pin < S3C2410_GPF(0) || pin > S3C2410_GPG(15))
- return -EINVAL; /* not valid interrupts */
-
- if (pin < S3C2410_GPG(0) && pin > S3C2410_GPF(7))
- return -EINVAL; /* not valid pin */
-
- if (pin < S3C2410_GPF(4))
- return (pin - S3C2410_GPF(0)) + IRQ_EINT0;
-
- if (pin < S3C2410_GPG(0))
- return (pin - S3C2410_GPF(4)) + IRQ_EINT4;
-
- return (pin - S3C2410_GPG(0)) + IRQ_EINT8;
-}
-
-EXPORT_SYMBOL(s3c2410_gpio_getirq);
diff --git a/arch/arm/plat-s3c24xx/gpiolib.c b/arch/arm/plat-s3c24xx/gpiolib.c
index 4f0f11a6a677..4c0896f2572d 100644
--- a/arch/arm/plat-s3c24xx/gpiolib.c
+++ b/arch/arm/plat-s3c24xx/gpiolib.c
@@ -1,6 +1,6 @@
/* linux/arch/arm/plat-s3c24xx/gpiolib.c
*
- * Copyright (c) 2008 Simtec Electronics
+ * Copyright (c) 2008-2010 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
@@ -21,6 +21,8 @@
#include <linux/gpio.h>
#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <plat/pm.h>
@@ -77,10 +79,21 @@ static int s3c24xx_gpiolib_bankg_toirq(struct gpio_chip *chip, unsigned offset)
return IRQ_EINT8 + offset;
}
+static struct s3c_gpio_cfg s3c24xx_gpiocfg_banka = {
+ .set_config = s3c_gpio_setcfg_s3c24xx_a,
+ .get_config = s3c_gpio_getcfg_s3c24xx_a,
+};
+
+struct s3c_gpio_cfg s3c24xx_gpiocfg_default = {
+ .set_config = s3c_gpio_setcfg_s3c24xx,
+ .get_config = s3c_gpio_getcfg_s3c24xx,
+};
+
struct s3c_gpio_chip s3c24xx_gpios[] = {
[0] = {
.base = S3C2410_GPACON,
.pm = __gpio_pm(&s3c_gpio_pm_1bit),
+ .config = &s3c24xx_gpiocfg_banka,
.chip = {
.base = S3C2410_GPA(0),
.owner = THIS_MODULE,
@@ -161,15 +174,58 @@ struct s3c_gpio_chip s3c24xx_gpios[] = {
.ngpio = 11,
},
},
+ /* GPIOS for the S3C2443 and later devices. */
+ {
+ .base = S3C2440_GPJCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPJ(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOJ",
+ .ngpio = 16,
+ },
+ }, {
+ .base = S3C2443_GPKCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPK(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOK",
+ .ngpio = 16,
+ },
+ }, {
+ .base = S3C2443_GPLCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPL(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOL",
+ .ngpio = 15,
+ },
+ }, {
+ .base = S3C2443_GPMCON,
+ .pm = __gpio_pm(&s3c_gpio_pm_2bit),
+ .chip = {
+ .base = S3C2410_GPM(0),
+ .owner = THIS_MODULE,
+ .label = "GPIOM",
+ .ngpio = 2,
+ },
+ },
};
+
static __init int s3c24xx_gpiolib_init(void)
{
struct s3c_gpio_chip *chip = s3c24xx_gpios;
int gpn;
- for (gpn = 0; gpn < ARRAY_SIZE(s3c24xx_gpios); gpn++, chip++)
+ for (gpn = 0; gpn < ARRAY_SIZE(s3c24xx_gpios); gpn++, chip++) {
+ if (!chip->config)
+ chip->config = &s3c24xx_gpiocfg_default;
+
s3c_gpiolib_add(chip);
+ }
return 0;
}
diff --git a/arch/arm/plat-s3c24xx/include/plat/pll.h b/arch/arm/plat-s3c24xx/include/plat/pll.h
index 7ea8bffa7a9c..005729a1077a 100644
--- a/arch/arm/plat-s3c24xx/include/plat/pll.h
+++ b/arch/arm/plat-s3c24xx/include/plat/pll.h
@@ -35,3 +35,28 @@ s3c24xx_get_pll(unsigned int pllval, unsigned int baseclk)
return (unsigned int)fvco;
}
+
+#define S3C2416_PLL_M_SHIFT (14)
+#define S3C2416_PLL_P_SHIFT (5)
+#define S3C2416_PLL_S_MASK (7)
+#define S3C2416_PLL_M_MASK ((1 << 10) - 1)
+#define S3C2416_PLL_P_MASK (63)
+
+static inline unsigned int
+s3c2416_get_pll(unsigned int pllval, unsigned int baseclk)
+{
+ unsigned int m, p, s;
+ uint64_t fvco;
+
+ m = pllval >> S3C2416_PLL_M_SHIFT;
+ p = pllval >> S3C2416_PLL_P_SHIFT;
+
+ s = pllval & S3C2416_PLL_S_MASK;
+ m &= S3C2416_PLL_M_MASK;
+ p &= S3C2416_PLL_P_MASK;
+
+ fvco = (uint64_t)baseclk * m;
+ do_div(fvco, (p << s));
+
+ return (unsigned int)fvco;
+}
diff --git a/arch/arm/plat-s3c24xx/include/plat/s3c2416.h b/arch/arm/plat-s3c24xx/include/plat/s3c2416.h
new file mode 100644
index 000000000000..dc3c0907d221
--- /dev/null
+++ b/arch/arm/plat-s3c24xx/include/plat/s3c2416.h
@@ -0,0 +1,31 @@
+/* linux/include/asm-arm/plat-s3c24xx/s3c2443.h
+ *
+ * Copyright (c) 2009 Yauhen Kharuzhy <jekhor@gmail.com>
+ *
+ * Header file for s3c2416 cpu support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifdef CONFIG_CPU_S3C2416
+
+struct s3c2410_uartcfg;
+
+extern int s3c2416_init(void);
+
+extern void s3c2416_map_io(void);
+
+extern void s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+
+extern void s3c2416_init_clocks(int xtal);
+
+extern int s3c2416_baseclk_add(void);
+
+#else
+#define s3c2416_init_clocks NULL
+#define s3c2416_init_uarts NULL
+#define s3c2416_map_io NULL
+#define s3c2416_init NULL
+#endif
diff --git a/arch/arm/plat-s3c24xx/include/plat/s3c2443.h b/arch/arm/plat-s3c24xx/include/plat/s3c2443.h
index 815b107ed890..a19715feb798 100644
--- a/arch/arm/plat-s3c24xx/include/plat/s3c2443.h
+++ b/arch/arm/plat-s3c24xx/include/plat/s3c2443.h
@@ -30,3 +30,22 @@ extern int s3c2443_baseclk_add(void);
#define s3c2443_map_io NULL
#define s3c2443_init NULL
#endif
+
+/* common code used by s3c2443 and others.
+ * note, not to be used outside of arch/arm/mach-s3c* */
+
+struct clk; /* some files don't need clk.h otherwise */
+
+typedef unsigned int (*pll_fn)(unsigned int reg, unsigned int base);
+typedef unsigned int (*fdiv_fn)(unsigned long clkcon0);
+
+extern void s3c2443_common_setup_clocks(pll_fn get_mpll, fdiv_fn fdiv);
+extern void s3c2443_common_init_clocks(int xtal, pll_fn get_mpll, fdiv_fn fdiv);
+
+extern int s3c2443_clkcon_enable_h(struct clk *clk, int enable);
+extern int s3c2443_clkcon_enable_p(struct clk *clk, int enable);
+extern int s3c2443_clkcon_enable_s(struct clk *clk, int enable);
+
+extern struct clksrc_clk clk_epllref;
+extern struct clksrc_clk clk_esysclk;
+extern struct clksrc_clk clk_msysclk;
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/plat-s3c24xx/pm.c
index 3620dd299095..60627e63a254 100644
--- a/arch/arm/plat-s3c24xx/pm.c
+++ b/arch/arm/plat-s3c24xx/pm.c
@@ -43,6 +43,7 @@
#include <asm/mach/time.h>
+#include <plat/gpio-cfg.h>
#include <plat/pm.h>
#define PFX "s3c24xx-pm: "
@@ -90,22 +91,22 @@ static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
{
unsigned long irqstate;
unsigned long pinstate;
- int irq = s3c2410_gpio_getirq(pin);
+ int irq = gpio_to_irq(pin);
if (irqoffs < 4)
irqstate = s3c_irqwake_intmask & (1L<<irqoffs);
else
irqstate = s3c_irqwake_eintmask & (1L<<irqoffs);
- pinstate = s3c2410_gpio_getcfg(pin);
+ pinstate = s3c_gpio_getcfg(pin);
if (!irqstate) {
if (pinstate == S3C2410_GPIO_IRQ)
- S3C_PMDBG("Leaving IRQ %d (pin %d) enabled\n", irq, pin);
+ S3C_PMDBG("Leaving IRQ %d (pin %d) as is\n", irq, pin);
} else {
if (pinstate == S3C2410_GPIO_IRQ) {
S3C_PMDBG("Disabling IRQ %d (pin %d)\n", irq, pin);
- s3c2410_gpio_cfgpin(pin, S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(pin, S3C2410_GPIO_INPUT);
}
}
}
diff --git a/arch/arm/plat-s3c24xx/s3c2410-clock.c b/arch/arm/plat-s3c24xx/s3c2410-clock.c
index b61bdb793734..9ecc5d913679 100644
--- a/arch/arm/plat-s3c24xx/s3c2410-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2410-clock.c
@@ -87,7 +87,7 @@ static int s3c2410_upll_enable(struct clk *clk, int enable)
/* standard clock definitions */
-static struct clk init_clocks_disable[] = {
+static struct clk init_clocks_off[] = {
{
.name = "nand",
.id = -1,
@@ -249,17 +249,8 @@ int __init s3c2410_baseclk_add(void)
/* install (and disable) the clocks we do not need immediately */
- clkp = init_clocks_disable;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
-
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
-
- s3c2410_clkcon_enable(clkp, 0);
- }
+ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
/* show the clock-slow value */
diff --git a/arch/arm/plat-s3c24xx/s3c2443-clock.c b/arch/arm/plat-s3c24xx/s3c2443-clock.c
new file mode 100644
index 000000000000..461f070eb62d
--- /dev/null
+++ b/arch/arm/plat-s3c24xx/s3c2443-clock.c
@@ -0,0 +1,472 @@
+/* linux/arch/arm/plat-s3c24xx/s3c2443-clock.c
+ *
+ * Copyright (c) 2007, 2010 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2443 Clock control suport - common code
+ */
+
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <mach/regs-s3c2443-clock.h>
+
+#include <plat/s3c2443.h>
+#include <plat/clock.h>
+#include <plat/clock-clksrc.h>
+#include <plat/cpu.h>
+
+#include <plat/cpu-freq.h>
+
+
+static int s3c2443_gate(void __iomem *reg, struct clk *clk, int enable)
+{
+ u32 ctrlbit = clk->ctrlbit;
+ u32 con = __raw_readl(reg);
+
+ if (enable)
+ con |= ctrlbit;
+ else
+ con &= ~ctrlbit;
+
+ __raw_writel(con, reg);
+ return 0;
+}
+
+int s3c2443_clkcon_enable_h(struct clk *clk, int enable)
+{
+ return s3c2443_gate(S3C2443_HCLKCON, clk, enable);
+}
+
+int s3c2443_clkcon_enable_p(struct clk *clk, int enable)
+{
+ return s3c2443_gate(S3C2443_PCLKCON, clk, enable);
+}
+
+int s3c2443_clkcon_enable_s(struct clk *clk, int enable)
+{
+ return s3c2443_gate(S3C2443_SCLKCON, clk, enable);
+}
+
+/* mpllref is a direct descendant of clk_xtal by default, but it is not
+ * elided as the EPLL can be either sourced by the XTAL or EXTCLK and as
+ * such directly equating the two source clocks is impossible.
+ */
+struct clk clk_mpllref = {
+ .name = "mpllref",
+ .parent = &clk_xtal,
+ .id = -1,
+};
+
+static struct clk *clk_epllref_sources[] = {
+ [0] = &clk_mpllref,
+ [1] = &clk_mpllref,
+ [2] = &clk_xtal,
+ [3] = &clk_ext,
+};
+
+struct clksrc_clk clk_epllref = {
+ .clk = {
+ .name = "epllref",
+ .id = -1,
+ },
+ .sources = &(struct clksrc_sources) {
+ .sources = clk_epllref_sources,
+ .nr_sources = ARRAY_SIZE(clk_epllref_sources),
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 7 },
+};
+
+/* esysclk
+ *
+ * this is sourced from either the EPLL or the EPLLref clock
+*/
+
+static struct clk *clk_sysclk_sources[] = {
+ [0] = &clk_epllref.clk,
+ [1] = &clk_epll,
+};
+
+struct clksrc_clk clk_esysclk = {
+ .clk = {
+ .name = "esysclk",
+ .parent = &clk_epll,
+ .id = -1,
+ },
+ .sources = &(struct clksrc_sources) {
+ .sources = clk_sysclk_sources,
+ .nr_sources = ARRAY_SIZE(clk_sysclk_sources),
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 6 },
+};
+
+static unsigned long s3c2443_getrate_mdivclk(struct clk *clk)
+{
+ unsigned long parent_rate = clk_get_rate(clk->parent);
+ unsigned long div = __raw_readl(S3C2443_CLKDIV0);
+
+ div &= S3C2443_CLKDIV0_EXTDIV_MASK;
+ div >>= (S3C2443_CLKDIV0_EXTDIV_SHIFT-1); /* x2 */
+
+ return parent_rate / (div + 1);
+}
+
+static struct clk clk_mdivclk = {
+ .name = "mdivclk",
+ .parent = &clk_mpllref,
+ .id = -1,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_mdivclk,
+ },
+};
+
+static struct clk *clk_msysclk_sources[] = {
+ [0] = &clk_mpllref,
+ [1] = &clk_mpll,
+ [2] = &clk_mdivclk,
+ [3] = &clk_mpllref,
+};
+
+struct clksrc_clk clk_msysclk = {
+ .clk = {
+ .name = "msysclk",
+ .parent = &clk_xtal,
+ .id = -1,
+ },
+ .sources = &(struct clksrc_sources) {
+ .sources = clk_msysclk_sources,
+ .nr_sources = ARRAY_SIZE(clk_msysclk_sources),
+ },
+ .reg_src = { .reg = S3C2443_CLKSRC, .size = 2, .shift = 3 },
+};
+
+/* prediv
+ *
+ * this divides the msysclk down to pass to h/p/etc.
+ */
+
+static unsigned long s3c2443_prediv_getrate(struct clk *clk)
+{
+ unsigned long rate = clk_get_rate(clk->parent);
+ unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
+
+ clkdiv0 &= S3C2443_CLKDIV0_PREDIV_MASK;
+ clkdiv0 >>= S3C2443_CLKDIV0_PREDIV_SHIFT;
+
+ return rate / (clkdiv0 + 1);
+}
+
+static struct clk clk_prediv = {
+ .name = "prediv",
+ .id = -1,
+ .parent = &clk_msysclk.clk,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_prediv_getrate,
+ },
+};
+
+/* usbhost
+ *
+ * usb host bus-clock, usually 48MHz to provide USB bus clock timing
+*/
+
+static struct clksrc_clk clk_usb_bus_host = {
+ .clk = {
+ .name = "usb-bus-host-parent",
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ .ctrlbit = S3C2443_SCLKCON_USBHOST,
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
+};
+
+/* common clksrc clocks */
+
+static struct clksrc_clk clksrc_clks[] = {
+ {
+ /* ART baud-rate clock sourced from esysclk via a divisor */
+ .clk = {
+ .name = "uartclk",
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
+ }, {
+ /* camera interface bus-clock, divided down from esysclk */
+ .clk = {
+ .name = "camif-upll", /* same as 2440 name */
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ .ctrlbit = S3C2443_SCLKCON_CAMCLK,
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 26 },
+ }, {
+ .clk = {
+ .name = "display-if",
+ .id = -1,
+ .parent = &clk_esysclk.clk,
+ .ctrlbit = S3C2443_SCLKCON_DISPCLK,
+ .enable = s3c2443_clkcon_enable_s,
+ },
+ .reg_div = { .reg = S3C2443_CLKDIV1, .size = 8, .shift = 16 },
+ },
+};
+
+
+static struct clk init_clocks_off[] = {
+ {
+ .name = "adc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_ADC,
+ }, {
+ .name = "i2c",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_IIC,
+ }
+};
+
+static struct clk init_clocks[] = {
+ {
+ .name = "dma",
+ .id = 0,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA0,
+ }, {
+ .name = "dma",
+ .id = 1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA1,
+ }, {
+ .name = "dma",
+ .id = 2,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA2,
+ }, {
+ .name = "dma",
+ .id = 3,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA3,
+ }, {
+ .name = "dma",
+ .id = 4,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA4,
+ }, {
+ .name = "dma",
+ .id = 5,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_DMA5,
+ }, {
+ .name = "hsmmc",
+ .id = 0,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_HSMMC,
+ }, {
+ .name = "gpio",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_GPIO,
+ }, {
+ .name = "usb-host",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_USBH,
+ }, {
+ .name = "usb-device",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_USBD,
+ }, {
+ .name = "lcd",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_LCDC,
+
+ }, {
+ .name = "timers",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_PWMT,
+ }, {
+ .name = "cfc",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_CFC,
+ }, {
+ .name = "ssmc",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c2443_clkcon_enable_h,
+ .ctrlbit = S3C2443_HCLKCON_SSMC,
+ }, {
+ .name = "uart",
+ .id = 0,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART0,
+ }, {
+ .name = "uart",
+ .id = 1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART1,
+ }, {
+ .name = "uart",
+ .id = 2,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART2,
+ }, {
+ .name = "uart",
+ .id = 3,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_UART3,
+ }, {
+ .name = "rtc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_RTC,
+ }, {
+ .name = "watchdog",
+ .id = -1,
+ .parent = &clk_p,
+ .ctrlbit = S3C2443_PCLKCON_WDT,
+ }, {
+ .name = "ac97",
+ .id = -1,
+ .parent = &clk_p,
+ .ctrlbit = S3C2443_PCLKCON_AC97,
+ }, {
+ .name = "nand",
+ .id = -1,
+ .parent = &clk_h,
+ }, {
+ .name = "usb-bus-host",
+ .id = -1,
+ .parent = &clk_usb_bus_host.clk,
+ }
+};
+
+static inline unsigned long s3c2443_get_hdiv(unsigned long clkcon0)
+{
+ clkcon0 &= S3C2443_CLKDIV0_HCLKDIV_MASK;
+
+ return clkcon0 + 1;
+}
+
+/* EPLLCON compatible enough to get on/off information */
+
+void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll,
+ fdiv_fn get_fdiv)
+{
+ unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
+ unsigned long mpllcon = __raw_readl(S3C2443_MPLLCON);
+ unsigned long clkdiv0 = __raw_readl(S3C2443_CLKDIV0);
+ struct clk *xtal_clk;
+ unsigned long xtal;
+ unsigned long pll;
+ unsigned long fclk;
+ unsigned long hclk;
+ unsigned long pclk;
+ int ptr;
+
+ xtal_clk = clk_get(NULL, "xtal");
+ xtal = clk_get_rate(xtal_clk);
+ clk_put(xtal_clk);
+
+ pll = get_mpll(mpllcon, xtal);
+ clk_msysclk.clk.rate = pll;
+
+ fclk = pll / get_fdiv(clkdiv0);
+ hclk = s3c2443_prediv_getrate(&clk_prediv);
+ hclk /= s3c2443_get_hdiv(clkdiv0);
+ pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1);
+
+ s3c24xx_setup_clocks(fclk, hclk, pclk);
+
+ printk("CPU: MPLL %s %ld.%03ld MHz, cpu %ld.%03ld MHz, mem %ld.%03ld MHz, pclk %ld.%03ld MHz\n",
+ (mpllcon & S3C2443_PLLCON_OFF) ? "off":"on",
+ print_mhz(pll), print_mhz(fclk),
+ print_mhz(hclk), print_mhz(pclk));
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrc_clks); ptr++)
+ s3c_set_clksrc(&clksrc_clks[ptr], true);
+
+ /* ensure usb bus clock is within correct rate of 48MHz */
+
+ if (clk_get_rate(&clk_usb_bus_host.clk) != (48 * 1000 * 1000)) {
+ printk(KERN_INFO "Warning: USB host bus not at 48MHz\n");
+ clk_set_rate(&clk_usb_bus_host.clk, 48*1000*1000);
+ }
+
+ printk("CPU: EPLL %s %ld.%03ld MHz, usb-bus %ld.%03ld MHz\n",
+ (epllcon & S3C2443_PLLCON_OFF) ? "off":"on",
+ print_mhz(clk_get_rate(&clk_epll)),
+ print_mhz(clk_get_rate(&clk_usb_bus)));
+}
+
+static struct clk *clks[] __initdata = {
+ &clk_prediv,
+ &clk_mpllref,
+ &clk_mdivclk,
+ &clk_ext,
+ &clk_epll,
+ &clk_usb_bus,
+};
+
+static struct clksrc_clk *clksrcs[] __initdata = {
+ &clk_usb_bus_host,
+ &clk_epllref,
+ &clk_esysclk,
+ &clk_msysclk,
+};
+
+void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
+ fdiv_fn get_fdiv)
+{
+ int ptr;
+
+ /* s3c2443 parents h and p clocks from prediv */
+ clk_h.parent = &clk_prediv;
+ clk_p.parent = &clk_prediv;
+
+ clk_usb_bus.parent = &clk_usb_bus_host.clk;
+ clk_epll.parent = &clk_epllref.clk;
+
+ s3c24xx_register_baseclocks(xtal);
+ s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
+ s3c_register_clksrc(clksrcs[ptr], 1);
+
+ s3c_register_clksrc(clksrc_clks, ARRAY_SIZE(clksrc_clks));
+ s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
+
+ /* See s3c2443/etc notes on disabling clocks at init time */
+ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+
+ s3c2443_common_setup_clocks(get_mpll, get_fdiv);
+}
diff --git a/arch/arm/plat-s3c24xx/setup-i2c.c b/arch/arm/plat-s3c24xx/setup-i2c.c
index 71a6accf114e..9e90a7cbd1d6 100644
--- a/arch/arm/plat-s3c24xx/setup-i2c.c
+++ b/arch/arm/plat-s3c24xx/setup-i2c.c
@@ -15,12 +15,13 @@
struct platform_device;
+#include <plat/gpio-cfg.h>
#include <plat/iic.h>
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
void s3c_i2c0_cfg_gpio(struct platform_device *dev)
{
- s3c2410_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPE15_IICSDA);
- s3c2410_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPE14_IICSCL);
+ s3c_gpio_cfgpin(S3C2410_GPE(15), S3C2410_GPE15_IICSDA);
+ s3c_gpio_cfgpin(S3C2410_GPE(14), S3C2410_GPE14_IICSCL);
}
diff --git a/arch/arm/plat-s3c24xx/setup-ts.c b/arch/arm/plat-s3c24xx/setup-ts.c
new file mode 100644
index 000000000000..ed2638663675
--- /dev/null
+++ b/arch/arm/plat-s3c24xx/setup-ts.c
@@ -0,0 +1,34 @@
+/* linux/arch/arm/plat-s3c24xx/setup-ts.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Based on S3C24XX setup for i2c device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+
+struct platform_device; /* don't need the contents */
+
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+/**
+ * s3c24xx_ts_cfg_gpio - configure gpio for s3c2410 systems
+ *
+ * Configure the GPIO for the S3C2410 system, where we have external FETs
+ * connected to the device (later systems such as the S3C2440 integrate
+ * these into the device).
+ */
+void s3c24xx_ts_cfg_gpio(struct platform_device *dev)
+{
+ s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON);
+ s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON);
+ s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON);
+ s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON);
+}
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
index da7a61728c18..9793544a6ace 100644
--- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
+++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
@@ -21,16 +21,16 @@ void s3c24xx_spi_gpiocfg_bus0_gpe11_12_13(struct s3c2410_spi_info *spi,
int enable)
{
if (enable) {
- s3c2410_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPE13_SPICLK0);
- s3c2410_gpio_cfgpin(S3C2410_GPE(12), S3C2410_GPE12_SPIMOSI0);
- s3c2410_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPE11_SPIMISO0);
+ s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPE13_SPICLK0);
+ s3c_gpio_cfgpin(S3C2410_GPE(12), S3C2410_GPE12_SPIMOSI0);
+ s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPE11_SPIMISO0);
s3c2410_gpio_pullup(S3C2410_GPE(11), 0);
s3c2410_gpio_pullup(S3C2410_GPE(13), 0);
} else {
- s3c2410_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT);
- s3c2410_gpio_pullup(S3C2410_GPE(11), 1);
- s3c2410_gpio_pullup(S3C2410_GPE(12), 1);
- s3c2410_gpio_pullup(S3C2410_GPE(13), 1);
+ s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
index 89fcf5308cf6..db9e9e477ec1 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
@@ -23,16 +23,16 @@ void s3c24xx_spi_gpiocfg_bus1_gpd8_9_10(struct s3c2410_spi_info *spi,
printk(KERN_INFO "%s(%d)\n", __func__, enable);
if (enable) {
- s3c2410_gpio_cfgpin(S3C2410_GPD(10), S3C2440_GPD10_SPICLK1);
- s3c2410_gpio_cfgpin(S3C2410_GPD(9), S3C2440_GPD9_SPIMOSI1);
- s3c2410_gpio_cfgpin(S3C2410_GPD(8), S3C2440_GPD8_SPIMISO1);
+ s3c_gpio_cfgpin(S3C2410_GPD(10), S3C2440_GPD10_SPICLK1);
+ s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2440_GPD9_SPIMOSI1);
+ s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2440_GPD8_SPIMISO1);
s3c2410_gpio_pullup(S3C2410_GPD(10), 0);
s3c2410_gpio_pullup(S3C2410_GPD(9), 0);
} else {
- s3c2410_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT);
- s3c2410_gpio_pullup(S3C2410_GPD(10), 1);
- s3c2410_gpio_pullup(S3C2410_GPD(9), 1);
- s3c2410_gpio_pullup(S3C2410_GPD(8), 1);
+ s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
index 86b9edc67413..8ea663a438bb 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
@@ -21,16 +21,16 @@ void s3c24xx_spi_gpiocfg_bus1_gpg5_6_7(struct s3c2410_spi_info *spi,
int enable)
{
if (enable) {
- s3c2410_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPG7_SPICLK1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(6), S3C2410_GPG6_SPIMOSI1);
- s3c2410_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPG5_SPIMISO1);
+ s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPG7_SPICLK1);
+ s3c_gpio_cfgpin(S3C2410_GPG(6), S3C2410_GPG6_SPIMOSI1);
+ s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPG5_SPIMISO1);
s3c2410_gpio_pullup(S3C2410_GPG(5), 0);
s3c2410_gpio_pullup(S3C2410_GPG(6), 0);
} else {
- s3c2410_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
- s3c2410_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
- s3c2410_gpio_pullup(S3C2410_GPG(5), 1);
- s3c2410_gpio_pullup(S3C2410_GPG(6), 1);
- s3c2410_gpio_pullup(S3C2410_GPG(7), 1);
+ s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
+ s3c_gpio_cfgpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
+ s3c_gpio_cfgpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
}
}
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index d400a6a20fe4..11d6a1bbd90d 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -7,12 +7,13 @@
config PLAT_S5P
bool
- depends on (ARCH_S5P6440 || ARCH_S5P6442 || ARCH_S5PV210)
+ depends on (ARCH_S5P6440 || ARCH_S5P6442 || ARCH_S5PC100 || ARCH_S5PV210)
default y
select ARM_VIC
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
select S3C_GPIO_TRACK
+ select S5P_GPIO_DRVSTR
select SAMSUNG_GPIOLIB_4BIT
select S3C_GPIO_CFG_S3C64XX
select S3C_GPIO_PULL_UPDOWN
@@ -23,3 +24,9 @@ config PLAT_S5P
select SAMSUNG_IRQ_UART
help
Base platform code for Samsung's S5P series SoC.
+
+config S5P_EXT_INT
+ bool
+ help
+ Use the external interrupts (other than GPIO interrupts.)
+ Note: Do not choose this for S5P6440.
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
index a7c54b332d27..39c242bb9d58 100644
--- a/arch/arm/plat-s5p/Makefile
+++ b/arch/arm/plat-s5p/Makefile
@@ -16,4 +16,5 @@ obj-y += dev-uart.o
obj-y += cpu.o
obj-y += clock.o
obj-y += irq.o
-obj-y += setup-i2c0.o
+obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
+
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index aa96e335073b..b5e255265f20 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -33,7 +33,12 @@ struct clk clk_ext_xtal_mux = {
.id = -1,
};
-static struct clk s5p_clk_27m = {
+struct clk clk_xusbxti = {
+ .name = "xusbxti",
+ .id = -1,
+};
+
+struct clk s5p_clk_27m = {
.name = "clk_27m",
.id = -1,
.rate = 27000000,
@@ -69,6 +74,13 @@ struct clk clk_fout_epll = {
.ctrlbit = (1 << 31),
};
+/* VPLL clock output */
+struct clk clk_fout_vpll = {
+ .name = "fout_vpll",
+ .id = -1,
+ .ctrlbit = (1 << 31),
+};
+
/* ARM clock */
struct clk clk_arm = {
.name = "armclk",
@@ -133,8 +145,10 @@ static struct clk *s5p_clks[] __initdata = {
&clk_fout_apll,
&clk_fout_mpll,
&clk_fout_epll,
+ &clk_fout_vpll,
&clk_arm,
&clk_vpll,
+ &clk_xusbxti,
};
void __init s5p_register_clocks(unsigned long xtal_freq)
diff --git a/arch/arm/plat-s5p/cpu.c b/arch/arm/plat-s5p/cpu.c
index f92e5de3a755..75cb8c37ca2c 100644
--- a/arch/arm/plat-s5p/cpu.c
+++ b/arch/arm/plat-s5p/cpu.c
@@ -19,12 +19,14 @@
#include <plat/cpu.h>
#include <plat/s5p6440.h>
#include <plat/s5p6442.h>
+#include <plat/s5pc100.h>
#include <plat/s5pv210.h>
/* table of supported CPUs */
static const char name_s5p6440[] = "S5P6440";
static const char name_s5p6442[] = "S5P6442";
+static const char name_s5pc100[] = "S5PC100";
static const char name_s5pv210[] = "S5PV210/S5PC110";
static struct cpu_table cpu_ids[] __initdata = {
@@ -45,6 +47,14 @@ static struct cpu_table cpu_ids[] __initdata = {
.init = s5p6442_init,
.name = name_s5p6442,
}, {
+ .idcode = 0x43100000,
+ .idmask = 0xfffff000,
+ .map_io = s5pc100_map_io,
+ .init_clocks = s5pc100_init_clocks,
+ .init_uarts = s5pc100_init_uarts,
+ .init = s5pc100_init,
+ .name = name_s5pc100,
+ }, {
.idcode = 0x43110000,
.idmask = 0xfffff000,
.map_io = s5pv210_map_io,
diff --git a/arch/arm/plat-s5p/include/plat/irqs.h b/arch/arm/plat-s5p/include/plat/irqs.h
index 42e757f2e40c..3fb3a3a17465 100644
--- a/arch/arm/plat-s5p/include/plat/irqs.h
+++ b/arch/arm/plat-s5p/include/plat/irqs.h
@@ -79,7 +79,7 @@
#define S5P_IRQ_VIC2(x) (S5P_VIC2_BASE + (x))
#define S5P_IRQ_VIC3(x) (S5P_VIC3_BASE + (x))
-#define S5P_TIMER_IRQ(x) S5P_IRQ(11 + (x))
+#define S5P_TIMER_IRQ(x) (11 + (x))
#define IRQ_TIMER0 S5P_TIMER_IRQ(0)
#define IRQ_TIMER1 S5P_TIMER_IRQ(1)
@@ -87,4 +87,11 @@
#define IRQ_TIMER3 S5P_TIMER_IRQ(3)
#define IRQ_TIMER4 S5P_TIMER_IRQ(4)
+#define IRQ_EINT(x) ((x) < 16 ? ((x) + S5P_EINT_BASE1) \
+ : ((x) - 16 + S5P_EINT_BASE2))
+
+#define EINT_OFFSET(irq) ((irq) < S5P_EINT_BASE2 ? \
+ ((irq) - S5P_EINT_BASE1) : \
+ ((irq) + 16 - S5P_EINT_BASE2))
+
#endif /* __ASM_PLAT_S5P_IRQS_H */
diff --git a/arch/arm/plat-s5p/include/plat/pll.h b/arch/arm/plat-s5p/include/plat/pll.h
index d48325bb29e2..7db322726bc2 100644
--- a/arch/arm/plat-s5p/include/plat/pll.h
+++ b/arch/arm/plat-s5p/include/plat/pll.h
@@ -81,3 +81,25 @@ static inline unsigned long s5p_get_pll90xx(unsigned long baseclk,
return result;
}
+
+#define PLL65XX_MDIV_MASK (0x3FF)
+#define PLL65XX_PDIV_MASK (0x3F)
+#define PLL65XX_SDIV_MASK (0x7)
+#define PLL65XX_MDIV_SHIFT (16)
+#define PLL65XX_PDIV_SHIFT (8)
+#define PLL65XX_SDIV_SHIFT (0)
+
+static inline unsigned long s5p_get_pll65xx(unsigned long baseclk, u32 pll_con)
+{
+ u32 mdiv, pdiv, sdiv;
+ u64 fvco = baseclk;
+
+ mdiv = (pll_con >> PLL65XX_MDIV_SHIFT) & PLL65XX_MDIV_MASK;
+ pdiv = (pll_con >> PLL65XX_PDIV_SHIFT) & PLL65XX_PDIV_MASK;
+ sdiv = (pll_con >> PLL65XX_SDIV_SHIFT) & PLL65XX_SDIV_MASK;
+
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
+
+ return (unsigned long)fvco;
+}
diff --git a/arch/arm/plat-s5p/include/plat/s5p-clock.h b/arch/arm/plat-s5p/include/plat/s5p-clock.h
index 56fb8b414d41..09418b1101fe 100644
--- a/arch/arm/plat-s5p/include/plat/s5p-clock.h
+++ b/arch/arm/plat-s5p/include/plat/s5p-clock.h
@@ -21,12 +21,16 @@
#define clk_fin_mpll clk_ext_xtal_mux
#define clk_fin_epll clk_ext_xtal_mux
#define clk_fin_vpll clk_ext_xtal_mux
+#define clk_fin_hpll clk_ext_xtal_mux
extern struct clk clk_ext_xtal_mux;
+extern struct clk clk_xusbxti;
extern struct clk clk_48m;
+extern struct clk s5p_clk_27m;
extern struct clk clk_fout_apll;
extern struct clk clk_fout_mpll;
extern struct clk clk_fout_epll;
+extern struct clk clk_fout_vpll;
extern struct clk clk_arm;
extern struct clk clk_vpll;
diff --git a/arch/arm/plat-s5p/include/plat/s5pc100.h b/arch/arm/plat-s5p/include/plat/s5pc100.h
new file mode 100644
index 000000000000..5f6099dd7cad
--- /dev/null
+++ b/arch/arm/plat-s5p/include/plat/s5pc100.h
@@ -0,0 +1,33 @@
+/* arch/arm/plat-s5p/include/plat/s5pc100.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file for s5pc100 cpu support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Common init code for S5PC100 related SoCs */
+
+extern void s5pc100_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
+extern void s5pc100_register_clocks(void);
+extern void s5pc100_setup_clocks(void);
+
+#ifdef CONFIG_CPU_S5PC100
+
+extern int s5pc100_init(void);
+extern void s5pc100_init_irq(void);
+extern void s5pc100_map_io(void);
+extern void s5pc100_init_clocks(int xtal);
+
+#define s5pc100_init_uarts s5pc100_common_init_uarts
+
+#else
+#define s5pc100_init_clocks NULL
+#define s5pc100_init_uarts NULL
+#define s5pc100_map_io NULL
+#define s5pc100_init NULL
+#endif
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
new file mode 100644
index 000000000000..e56c8075df97
--- /dev/null
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -0,0 +1,218 @@
+/* linux/arch/arm/plat-s5p/irq-eint.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * S5P - IRQ EINT support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/sysdev.h>
+#include <linux/gpio.h>
+
+#include <asm/hardware/vic.h>
+
+#include <plat/regs-irqtype.h>
+
+#include <mach/map.h>
+#include <plat/cpu.h>
+#include <plat/pm.h>
+
+#include <plat/gpio-cfg.h>
+#include <mach/regs-gpio.h>
+
+static inline void s5p_irq_eint_mask(unsigned int irq)
+{
+ u32 mask;
+
+ mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
+ mask |= eint_irq_to_bit(irq);
+ __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+}
+
+static void s5p_irq_eint_unmask(unsigned int irq)
+{
+ u32 mask;
+
+ mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(irq)));
+ mask &= ~(eint_irq_to_bit(irq));
+ __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(irq)));
+}
+
+static inline void s5p_irq_eint_ack(unsigned int irq)
+{
+ __raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+}
+
+static void s5p_irq_eint_maskack(unsigned int irq)
+{
+ /* compiler should in-line these */
+ s5p_irq_eint_mask(irq);
+ s5p_irq_eint_ack(irq);
+}
+
+static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type)
+{
+ int offs = EINT_OFFSET(irq);
+ int shift;
+ u32 ctrl, mask;
+ u32 newvalue = 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ newvalue = S5P_EXTINT_RISEEDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ newvalue = S5P_EXTINT_RISEEDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ newvalue = S5P_EXTINT_BOTHEDGE;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ newvalue = S5P_EXTINT_LOWLEV;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ newvalue = S5P_EXTINT_HILEV;
+ break;
+
+ default:
+ printk(KERN_ERR "No such irq type %d", type);
+ return -EINVAL;
+ }
+
+ shift = (offs & 0x7) * 4;
+ mask = 0x7 << shift;
+
+ ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(irq)));
+ ctrl &= ~mask;
+ ctrl |= newvalue << shift;
+ __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(irq)));
+
+ if ((0 <= offs) && (offs < 8))
+ s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
+
+ else if ((8 <= offs) && (offs < 16))
+ s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
+
+ else if ((16 <= offs) && (offs < 24))
+ s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
+
+ else if ((24 <= offs) && (offs < 32))
+ s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
+
+ else
+ printk(KERN_ERR "No such irq number %d", offs);
+
+ return 0;
+}
+
+static struct irq_chip s5p_irq_eint = {
+ .name = "s5p-eint",
+ .mask = s5p_irq_eint_mask,
+ .unmask = s5p_irq_eint_unmask,
+ .mask_ack = s5p_irq_eint_maskack,
+ .ack = s5p_irq_eint_ack,
+ .set_type = s5p_irq_eint_set_type,
+#ifdef CONFIG_PM
+ .set_wake = s3c_irqext_wake,
+#endif
+};
+
+/* s5p_irq_demux_eint
+ *
+ * This function demuxes the IRQ from the group0 external interrupts,
+ * from EINTs 16 to 31. It is designed to be inlined into the specific
+ * handler s5p_irq_demux_eintX_Y.
+ *
+ * Each EINT pend/mask registers handle eight of them.
+ */
+static inline void s5p_irq_demux_eint(unsigned int start)
+{
+ u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
+ u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
+ unsigned int irq;
+
+ status &= ~mask;
+ status &= 0xff;
+
+ while (status) {
+ irq = fls(status) - 1;
+ generic_handle_irq(irq + start);
+ status &= ~(1 << irq);
+ }
+}
+
+static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+{
+ s5p_irq_demux_eint(IRQ_EINT(16));
+ s5p_irq_demux_eint(IRQ_EINT(24));
+}
+
+static inline void s5p_irq_vic_eint_mask(unsigned int irq)
+{
+ void __iomem *base = get_irq_chip_data(irq);
+
+ s5p_irq_eint_mask(irq);
+ writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void s5p_irq_vic_eint_unmask(unsigned int irq)
+{
+ void __iomem *base = get_irq_chip_data(irq);
+
+ s5p_irq_eint_unmask(irq);
+ writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE);
+}
+
+static inline void s5p_irq_vic_eint_ack(unsigned int irq)
+{
+ __raw_writel(eint_irq_to_bit(irq), S5P_EINT_PEND(EINT_REG_NR(irq)));
+}
+
+static void s5p_irq_vic_eint_maskack(unsigned int irq)
+{
+ s5p_irq_vic_eint_mask(irq);
+ s5p_irq_vic_eint_ack(irq);
+}
+
+static struct irq_chip s5p_irq_vic_eint = {
+ .name = "s5p_vic_eint",
+ .mask = s5p_irq_vic_eint_mask,
+ .unmask = s5p_irq_vic_eint_unmask,
+ .mask_ack = s5p_irq_vic_eint_maskack,
+ .ack = s5p_irq_vic_eint_ack,
+ .set_type = s5p_irq_eint_set_type,
+#ifdef CONFIG_PM
+ .set_wake = s3c_irqext_wake,
+#endif
+};
+
+int __init s5p_init_irq_eint(void)
+{
+ int irq;
+
+ for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
+ set_irq_chip(irq, &s5p_irq_vic_eint);
+
+ for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
+ set_irq_chip(irq, &s5p_irq_eint);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
+ set_irq_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
+ return 0;
+}
+
+arch_initcall(s5p_init_irq_eint);
diff --git a/arch/arm/plat-s5pc1xx/Kconfig b/arch/arm/plat-s5pc1xx/Kconfig
deleted file mode 100644
index c7ccdf22eefa..000000000000
--- a/arch/arm/plat-s5pc1xx/Kconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2009 Samsung Electronics Co.
-# Byungho Min <bhmin@samsung.com>
-#
-# Licensed under GPLv2
-
-config PLAT_S5PC1XX
- bool
- depends on ARCH_S5PC1XX
- default y
- select PLAT_S3C
- select ARM_VIC
- select NO_IOPORT
- select ARCH_REQUIRE_GPIOLIB
- select SAMSUNG_CLKSRC
- select SAMSUNG_IRQ_UART
- select SAMSUNG_IRQ_VIC_TIMER
- select S3C_GPIO_TRACK
- select S3C_GPIO_PULL_UPDOWN
- select S3C_GPIO_CFG_S3C24XX
- select S3C_GPIO_CFG_S3C64XX
- select S5P_GPIO_CFG_S5PC1XX
- help
- Base platform code for any Samsung S5PC1XX device
-
-if PLAT_S5PC1XX
-
-# Configuration options shared by all S3C64XX implementations
-
-config CPU_S5PC100_INIT
- bool
- help
- Common initialisation code for the S5PC1XX
-
-config CPU_S5PC100_CLOCK
- bool
- help
- Common clock support code for the S5PC1XX
-
-# platform specific device setup
-
-config S5PC1XX_SETUP_FB_24BPP
- bool
- help
- Common setup code for S5PC1XX with an 24bpp RGB display helper.
-
-config S5PC1XX_SETUP_I2C0
- bool
- default y
- help
- Common setup code for i2c bus 0.
-
- Note, currently since i2c0 is always compiled, this setup helper
- is always compiled with it.
-
-config S5PC1XX_SETUP_I2C1
- bool
- help
- Common setup code for i2c bus 1.
-
-config S5PC1XX_SETUP_SDHCI_GPIO
- bool
- help
- Common setup code for SDHCI gpio.
-
-endif
diff --git a/arch/arm/plat-s5pc1xx/Makefile b/arch/arm/plat-s5pc1xx/Makefile
deleted file mode 100644
index 278f26806089..000000000000
--- a/arch/arm/plat-s5pc1xx/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-# arch/arm/plat-s5pc1xx/Makefile
-#
-# Copyright 2009 Samsung Electronics Co.
-#
-# Licensed under GPLv2
-
-obj-y :=
-obj-m :=
-obj-n := dummy.o
-obj- :=
-
-# Core files
-
-obj-y += dev-uart.o
-obj-y += cpu.o
-obj-y += irq.o irq-gpio.o irq-eint.o
-obj-y += clock.o
-obj-y += gpiolib.o
-
-# CPU support
-
-obj-$(CONFIG_CPU_S5PC100_INIT) += s5pc100-init.o
-obj-$(CONFIG_CPU_S5PC100_CLOCK) += s5pc100-clock.o
-
-# Device setup
-
-obj-$(CONFIG_S5P_GPIO_CFG_S5PC1XX) += gpio-config.o
-obj-$(CONFIG_S5PC1XX_SETUP_FB_24BPP) += setup-fb-24bpp.o
-obj-$(CONFIG_S5PC1XX_SETUP_I2C0) += setup-i2c0.o
-obj-$(CONFIG_S5PC1XX_SETUP_I2C1) += setup-i2c1.o
-obj-$(CONFIG_S5PC1XX_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
diff --git a/arch/arm/plat-s5pc1xx/clock.c b/arch/arm/plat-s5pc1xx/clock.c
deleted file mode 100644
index 387f23190c3c..000000000000
--- a/arch/arm/plat-s5pc1xx/clock.c
+++ /dev/null
@@ -1,709 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/clock.c
- *
- * Copyright 2009 Samsung Electronics Co.
- *
- * S5PC1XX Base clock support
- *
- * Based on plat-s3c64xx/clock.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/regs-clock.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-
-struct clk clk_27m = {
- .name = "clk_27m",
- .id = -1,
- .rate = 27000000,
-};
-
-static int clk_48m_ctrl(struct clk *clk, int enable)
-{
- unsigned long flags;
- u32 val;
-
- /* can't rely on clock lock, this register has other usages */
- local_irq_save(flags);
-
- val = __raw_readl(S5PC100_CLKSRC1);
- if (enable)
- val |= S5PC100_CLKSRC1_CLK48M_MASK;
- else
- val &= ~S5PC100_CLKSRC1_CLK48M_MASK;
-
- __raw_writel(val, S5PC100_CLKSRC1);
- local_irq_restore(flags);
-
- return 0;
-}
-
-struct clk clk_48m = {
- .name = "clk_48m",
- .id = -1,
- .rate = 48000000,
- .enable = clk_48m_ctrl,
-};
-
-struct clk clk_54m = {
- .name = "clk_54m",
- .id = -1,
- .rate = 54000000,
-};
-
-struct clk clk_hd0 = {
- .name = "hclkd0",
- .id = -1,
- .rate = 0,
- .parent = NULL,
- .ctrlbit = 0,
- .ops = &clk_ops_def_setrate,
-};
-
-struct clk clk_pd0 = {
- .name = "pclkd0",
- .id = -1,
- .rate = 0,
- .parent = NULL,
- .ctrlbit = 0,
- .ops = &clk_ops_def_setrate,
-};
-
-static int s5pc1xx_clk_gate(void __iomem *reg, struct clk *clk, int enable)
-{
- unsigned int ctrlbit = clk->ctrlbit;
- u32 con;
-
- con = __raw_readl(reg);
- if (enable)
- con |= ctrlbit;
- else
- con &= ~ctrlbit;
- __raw_writel(con, reg);
-
- return 0;
-}
-
-static int s5pc100_clk_d00_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D00, clk, enable);
-}
-
-static int s5pc100_clk_d01_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D01, clk, enable);
-}
-
-static int s5pc100_clk_d02_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D02, clk, enable);
-}
-
-static int s5pc100_clk_d10_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D10, clk, enable);
-}
-
-static int s5pc100_clk_d11_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D11, clk, enable);
-}
-
-static int s5pc100_clk_d12_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D12, clk, enable);
-}
-
-static int s5pc100_clk_d13_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D13, clk, enable);
-}
-
-static int s5pc100_clk_d14_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D14, clk, enable);
-}
-
-static int s5pc100_clk_d15_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D15, clk, enable);
-}
-
-static int s5pc100_clk_d20_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_CLKGATE_D20, clk, enable);
-}
-
-int s5pc100_sclk0_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_SCLKGATE0, clk, enable);
-}
-
-int s5pc100_sclk1_ctrl(struct clk *clk, int enable)
-{
- return s5pc1xx_clk_gate(S5PC100_SCLKGATE1, clk, enable);
-}
-
-static struct clk s5pc100_init_clocks_disable[] = {
- {
- .name = "dsi",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_DSI,
- }, {
- .name = "csi",
- .id = -1,
- .parent = &clk_h,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_CSI,
- }, {
- .name = "ccan",
- .id = 0,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_CCAN0,
- }, {
- .name = "ccan",
- .id = 1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_CCAN1,
- }, {
- .name = "keypad",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_KEYIF,
- }, {
- .name = "hclkd2",
- .id = -1,
- .parent = NULL,
- .enable = s5pc100_clk_d20_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D20_HCLKD2,
- }, {
- .name = "iis-d2",
- .id = -1,
- .parent = NULL,
- .enable = s5pc100_clk_d20_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D20_I2SD2,
- },
-};
-
-static struct clk s5pc100_init_clocks[] = {
- /* System1 (D0_0) devices */
- {
- .name = "intc",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d00_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D00_INTC,
- }, {
- .name = "tzic",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d00_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D00_TZIC,
- }, {
- .name = "cf-ata",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d00_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D00_CFCON,
- }, {
- .name = "mdma",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d00_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D00_MDMA,
- }, {
- .name = "g2d",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d00_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D00_G2D,
- }, {
- .name = "secss",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d00_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D00_SECSS,
- }, {
- .name = "cssys",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d00_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D00_CSSYS,
- },
-
- /* Memory (D0_1) devices */
- {
- .name = "dmc",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d01_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D01_DMC,
- }, {
- .name = "sromc",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d01_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D01_SROMC,
- }, {
- .name = "onenand",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d01_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D01_ONENAND,
- }, {
- .name = "nand",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d01_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D01_NFCON,
- }, {
- .name = "intmem",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d01_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D01_INTMEM,
- }, {
- .name = "ebi",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d01_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D01_EBI,
- },
-
- /* System2 (D0_2) devices */
- {
- .name = "seckey",
- .id = -1,
- .parent = &clk_pd0,
- .enable = s5pc100_clk_d02_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D02_SECKEY,
- }, {
- .name = "sdm",
- .id = -1,
- .parent = &clk_hd0,
- .enable = s5pc100_clk_d02_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D02_SDM,
- },
-
- /* File (D1_0) devices */
- {
- .name = "pdma",
- .id = 0,
- .parent = &clk_h,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_PDMA0,
- }, {
- .name = "pdma",
- .id = 1,
- .parent = &clk_h,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_PDMA1,
- }, {
- .name = "usb-host",
- .id = -1,
- .parent = &clk_h,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_USBHOST,
- }, {
- .name = "otg",
- .id = -1,
- .parent = &clk_h,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_USBOTG,
- }, {
- .name = "modem",
- .id = -1,
- .parent = &clk_h,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_MODEMIF,
- }, {
- .name = "hsmmc",
- .id = 0,
- .parent = &clk_48m,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_HSMMC0,
- }, {
- .name = "hsmmc",
- .id = 1,
- .parent = &clk_48m,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_HSMMC1,
- }, {
- .name = "hsmmc",
- .id = 2,
- .parent = &clk_48m,
- .enable = s5pc100_clk_d10_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D10_HSMMC2,
- },
-
- /* Multimedia1 (D1_1) devices */
- {
- .name = "lcd",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_LCD,
- }, {
- .name = "rotator",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_ROTATOR,
- }, {
- .name = "fimc",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_FIMC0,
- }, {
- .name = "fimc",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_FIMC1,
- }, {
- .name = "fimc",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_FIMC2,
- }, {
- .name = "jpeg",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_JPEG,
- }, {
- .name = "g3d",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d11_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D11_G3D,
- },
-
- /* Multimedia2 (D1_2) devices */
- {
- .name = "tv",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d12_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D12_TV,
- }, {
- .name = "vp",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d12_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D12_VP,
- }, {
- .name = "mixer",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d12_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D12_MIXER,
- }, {
- .name = "hdmi",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d12_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D12_HDMI,
- }, {
- .name = "mfc",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d12_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D12_MFC,
- },
-
- /* System (D1_3) devices */
- {
- .name = "chipid",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_CHIPID,
- }, {
- .name = "gpio",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_GPIO,
- }, {
- .name = "apc",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_APC,
- }, {
- .name = "iec",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_IEC,
- }, {
- .name = "timers",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_PWM,
- }, {
- .name = "systimer",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_SYSTIMER,
- }, {
- .name = "watchdog",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_WDT,
- }, {
- .name = "rtc",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d13_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D13_RTC,
- },
-
- /* Connectivity (D1_4) devices */
- {
- .name = "uart",
- .id = 0,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_UART0,
- }, {
- .name = "uart",
- .id = 1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_UART1,
- }, {
- .name = "uart",
- .id = 2,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_UART2,
- }, {
- .name = "uart",
- .id = 3,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_UART3,
- }, {
- .name = "i2c",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_IIC,
- }, {
- .name = "hdmi-i2c",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_HDMI_IIC,
- }, {
- .name = "spi",
- .id = 0,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_SPI0,
- }, {
- .name = "spi",
- .id = 1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_SPI1,
- }, {
- .name = "spi",
- .id = 2,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_SPI2,
- }, {
- .name = "irda",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_IRDA,
- }, {
- .name = "hsitx",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_HSITX,
- }, {
- .name = "hsirx",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d14_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D14_HSIRX,
- },
-
- /* Audio (D1_5) devices */
- {
- .name = "iis",
- .id = 0,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_IIS0,
- }, {
- .name = "iis",
- .id = 1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_IIS1,
- }, {
- .name = "iis",
- .id = 2,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_IIS2,
- }, {
- .name = "ac97",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_AC97,
- }, {
- .name = "pcm",
- .id = 0,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_PCM0,
- }, {
- .name = "pcm",
- .id = 1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_PCM1,
- }, {
- .name = "spdif",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_SPDIF,
- }, {
- .name = "adc",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_TSADC,
- }, {
- .name = "cg",
- .id = -1,
- .parent = &clk_p,
- .enable = s5pc100_clk_d15_ctrl,
- .ctrlbit = S5PC100_CLKGATE_D15_CG,
- },
-
- /* Audio (D2_0) devices: all disabled */
-
- /* Special Clocks 0 */
- {
- .name = "sclk_hpm",
- .id = -1,
- .parent = NULL,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_HPM,
- }, {
- .name = "sclk_onenand",
- .id = -1,
- .parent = NULL,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_ONENAND,
- }, {
- .name = "spi_48",
- .id = 0,
- .parent = &clk_48m,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_SPI0_48,
- }, {
- .name = "spi_48",
- .id = 1,
- .parent = &clk_48m,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_SPI1_48,
- }, {
- .name = "spi_48",
- .id = 2,
- .parent = &clk_48m,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_SPI2_48,
- }, {
- .name = "mmc_48",
- .id = 0,
- .parent = &clk_48m,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_MMC0_48,
- }, {
- .name = "mmc_48",
- .id = 1,
- .parent = &clk_48m,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_MMC1_48,
- }, {
- .name = "mmc_48",
- .id = 2,
- .parent = &clk_48m,
- .enable = s5pc100_sclk0_ctrl,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_MMC2_48,
- },
- /* Special Clocks 1 */
-};
-
-static struct clk *clks[] __initdata = {
- &clk_ext,
- &clk_epll,
- &clk_pd0,
- &clk_hd0,
- &clk_27m,
- &clk_48m,
- &clk_54m,
-};
-
-void __init s5pc1xx_register_clocks(void)
-{
- struct clk *clkp;
- int ret;
- int ptr;
- int size;
-
- s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-
- s3c_register_clocks(s5pc100_init_clocks,
- ARRAY_SIZE(s5pc100_init_clocks));
-
- clkp = s5pc100_init_clocks_disable;
- size = ARRAY_SIZE(s5pc100_init_clocks_disable);
-
- for (ptr = 0; ptr < size; ptr++, clkp++) {
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
-
- (clkp->enable)(clkp, 0);
- }
-
- s3c_pwmclk_init();
-}
diff --git a/arch/arm/plat-s5pc1xx/cpu.c b/arch/arm/plat-s5pc1xx/cpu.c
deleted file mode 100644
index 02baeaa2a121..000000000000
--- a/arch/arm/plat-s5pc1xx/cpu.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/cpu.c
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * S5PC1XX CPU Support
- *
- * Based on plat-s3c64xx/cpu.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <asm/mach/map.h>
-
-#include <plat/regs-serial.h>
-
-#include <plat/cpu.h>
-#include <plat/devs.h>
-#include <plat/clock.h>
-
-#include <plat/s5pc100.h>
-
-/* table of supported CPUs */
-
-static const char name_s5pc100[] = "S5PC100";
-
-static struct cpu_table cpu_ids[] __initdata = {
- {
- .idcode = 0x43100000,
- .idmask = 0xfffff000,
- .map_io = s5pc100_map_io,
- .init_clocks = s5pc100_init_clocks,
- .init_uarts = s5pc100_init_uarts,
- .init = s5pc100_init,
- .name = name_s5pc100,
- },
-};
-/* minimal IO mapping */
-
-/* see notes on uart map in arch/arm/mach-s5pc100/include/mach/debug-macro.S */
-#define UART_OFFS (S3C_PA_UART & 0xffff)
-
-static struct map_desc s5pc1xx_iodesc[] __initdata = {
- {
- .virtual = (unsigned long)S5PC1XX_VA_CLK_OTHER,
- .pfn = __phys_to_pfn(S5PC1XX_PA_CLK_OTHER),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_GPIO,
- .pfn = __phys_to_pfn(S5PC100_PA_GPIO),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_CHIPID,
- .pfn = __phys_to_pfn(S5PC1XX_PA_CHIPID),
- .length = SZ_16,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_CLK,
- .pfn = __phys_to_pfn(S5PC1XX_PA_CLK),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_PWR,
- .pfn = __phys_to_pfn(S5PC1XX_PA_PWR),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)(S5PC1XX_VA_UART),
- .pfn = __phys_to_pfn(S5PC1XX_PA_UART),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_VIC(0),
- .pfn = __phys_to_pfn(S5PC1XX_PA_VIC(0)),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_VIC(1),
- .pfn = __phys_to_pfn(S5PC1XX_PA_VIC(1)),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_VIC(2),
- .pfn = __phys_to_pfn(S5PC1XX_PA_VIC(2)),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S5PC1XX_VA_TIMER,
- .pfn = __phys_to_pfn(S5PC1XX_PA_TIMER),
- .length = SZ_256,
- .type = MT_DEVICE,
- },
-};
-
-/* read cpu identification code */
-
-void __init s5pc1xx_init_io(struct map_desc *mach_desc, int size)
-{
- unsigned long idcode;
-
- /* initialise the io descriptors we need for initialisation */
- iotable_init(s5pc1xx_iodesc, ARRAY_SIZE(s5pc1xx_iodesc));
- iotable_init(mach_desc, size);
-
- idcode = __raw_readl(S5PC1XX_VA_CHIPID);
- s3c_init_cpu(idcode, cpu_ids, ARRAY_SIZE(cpu_ids));
-}
diff --git a/arch/arm/plat-s5pc1xx/dev-uart.c b/arch/arm/plat-s5pc1xx/dev-uart.c
deleted file mode 100644
index 586c95c60bfe..000000000000
--- a/arch/arm/plat-s5pc1xx/dev-uart.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/dev-uart.c
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * Based on plat-s3c64xx/dev-uart.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/devs.h>
-
-/* Serial port registrations */
-
-/* 64xx uarts are closer together */
-
-static struct resource s5pc1xx_uart0_resource[] = {
- [0] = {
- .start = S3C_PA_UART0,
- .end = S3C_PA_UART0 + 0x100,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX0,
- .end = IRQ_S3CUART_RX0,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S3CUART_TX0,
- .end = IRQ_S3CUART_TX0,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR0,
- .end = IRQ_S3CUART_ERR0,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource s5pc1xx_uart1_resource[] = {
- [0] = {
- .start = S3C_PA_UART1,
- .end = S3C_PA_UART1 + 0x100,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX1,
- .end = IRQ_S3CUART_RX1,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S3CUART_TX1,
- .end = IRQ_S3CUART_TX1,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR1,
- .end = IRQ_S3CUART_ERR1,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource s5pc1xx_uart2_resource[] = {
- [0] = {
- .start = S3C_PA_UART2,
- .end = S3C_PA_UART2 + 0x100,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX2,
- .end = IRQ_S3CUART_RX2,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S3CUART_TX2,
- .end = IRQ_S3CUART_TX2,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR2,
- .end = IRQ_S3CUART_ERR2,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource s5pc1xx_uart3_resource[] = {
- [0] = {
- .start = S3C_PA_UART3,
- .end = S3C_PA_UART3 + 0x100,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX3,
- .end = IRQ_S3CUART_RX3,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = IRQ_S3CUART_TX3,
- .end = IRQ_S3CUART_TX3,
- .flags = IORESOURCE_IRQ,
-
- },
- [3] = {
- .start = IRQ_S3CUART_ERR3,
- .end = IRQ_S3CUART_ERR3,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-
-struct s3c24xx_uart_resources s5pc1xx_uart_resources[] __initdata = {
- [0] = {
- .resources = s5pc1xx_uart0_resource,
- .nr_resources = ARRAY_SIZE(s5pc1xx_uart0_resource),
- },
- [1] = {
- .resources = s5pc1xx_uart1_resource,
- .nr_resources = ARRAY_SIZE(s5pc1xx_uart1_resource),
- },
- [2] = {
- .resources = s5pc1xx_uart2_resource,
- .nr_resources = ARRAY_SIZE(s5pc1xx_uart2_resource),
- },
- [3] = {
- .resources = s5pc1xx_uart3_resource,
- .nr_resources = ARRAY_SIZE(s5pc1xx_uart3_resource),
- },
-};
diff --git a/arch/arm/plat-s5pc1xx/gpio-config.c b/arch/arm/plat-s5pc1xx/gpio-config.c
deleted file mode 100644
index a4f67e80a150..000000000000
--- a/arch/arm/plat-s5pc1xx/gpio-config.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/gpio-config.c
- *
- * Copyright 2009 Samsung Electronics
- *
- * S5PC1XX GPIO Configuration.
- *
- * Based on plat-s3c64xx/gpio-config.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <plat/gpio-core.h>
-#include <plat/gpio-cfg-s5pc1xx.h>
-
-s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin, unsigned int off)
-{
- struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
- void __iomem *reg;
- int shift = off * 2;
- u32 drvstr;
-
- if (!chip)
- return -EINVAL;
-
- reg = chip->base + 0x0C;
-
- drvstr = __raw_readl(reg);
- drvstr = 0xffff & (0x3 << shift);
- drvstr = drvstr >> shift;
-
- return (__force s5p_gpio_drvstr_t)drvstr;
-}
-EXPORT_SYMBOL(s5p_gpio_get_drvstr);
-
-int s5p_gpio_set_drvstr(unsigned int pin, unsigned int off,
- s5p_gpio_drvstr_t drvstr)
-{
- struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
- void __iomem *reg;
- int shift = off * 2;
- u32 tmp;
-
- if (!chip)
- return -EINVAL;
-
- reg = chip->base + 0x0C;
-
- tmp = __raw_readl(reg);
- tmp |= drvstr << shift;
-
- __raw_writel(tmp, reg);
-
- return 0;
-}
-EXPORT_SYMBOL(s5p_gpio_set_drvstr);
diff --git a/arch/arm/plat-s5pc1xx/include/plat/gpio-cfg-s5pc1xx.h b/arch/arm/plat-s5pc1xx/include/plat/gpio-cfg-s5pc1xx.h
deleted file mode 100644
index 72ad59f61efc..000000000000
--- a/arch/arm/plat-s5pc1xx/include/plat/gpio-cfg-s5pc1xx.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/include/plat/gpio-cfg.h
- *
- * Copyright 2009 Samsung Electronic
- *
- * S5PC1XX Platform - GPIO pin configuration
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* This file contains the necessary definitions to get the basic gpio
- * pin configuration done such as setting a pin to input or output or
- * changing the pull-{up,down} configurations.
- */
-
-#ifndef __GPIO_CFG_S5PC1XX_H
-#define __GPIO_CFG_S5PC1XX_H __FILE__
-
-typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
-
-#define S5P_GPIO_DRVSTR_LV1 0x00
-#define S5P_GPIO_DRVSTR_LV2 0x01
-#define S5P_GPIO_DRVSTR_LV3 0x10
-#define S5P_GPIO_DRVSTR_LV4 0x11
-
-extern s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin, unsigned int off);
-
-extern int s5p_gpio_set_drvstr(unsigned int pin, unsigned int off,
- s5p_gpio_drvstr_t drvstr);
-
-#endif /* __GPIO_CFG_S5PC1XX_H */
diff --git a/arch/arm/plat-s5pc1xx/include/plat/gpio-ext.h b/arch/arm/plat-s5pc1xx/include/plat/gpio-ext.h
deleted file mode 100644
index 33ad267e8477..000000000000
--- a/arch/arm/plat-s5pc1xx/include/plat/gpio-ext.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/include/plat/gpio-eint.h
- *
- * Copyright 2009 Samsung Electronics Co.
- *
- * External Interrupt (GPH0 ~ GPH3) control register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S5PC1XX_WKUP_INT_CON0_7 (S5PC1XX_EINT_BASE + 0x0)
-#define S5PC1XX_WKUP_INT_CON8_15 (S5PC1XX_EINT_BASE + 0x4)
-#define S5PC1XX_WKUP_INT_CON16_23 (S5PC1XX_EINT_BASE + 0x8)
-#define S5PC1XX_WKUP_INT_CON24_31 (S5PC1XX_EINT_BASE + 0xC)
-#define S5PC1XX_WKUP_INT_CON(x) (S5PC1XX_WKUP_INT_CON0_7 + (x * 0x4))
-
-#define S5PC1XX_WKUP_INT_FLTCON0_3 (S5PC1XX_EINT_BASE + 0x80)
-#define S5PC1XX_WKUP_INT_FLTCON4_7 (S5PC1XX_EINT_BASE + 0x84)
-#define S5PC1XX_WKUP_INT_FLTCON8_11 (S5PC1XX_EINT_BASE + 0x88)
-#define S5PC1XX_WKUP_INT_FLTCON12_15 (S5PC1XX_EINT_BASE + 0x8C)
-#define S5PC1XX_WKUP_INT_FLTCON16_19 (S5PC1XX_EINT_BASE + 0x90)
-#define S5PC1XX_WKUP_INT_FLTCON20_23 (S5PC1XX_EINT_BASE + 0x94)
-#define S5PC1XX_WKUP_INT_FLTCON24_27 (S5PC1XX_EINT_BASE + 0x98)
-#define S5PC1XX_WKUP_INT_FLTCON28_31 (S5PC1XX_EINT_BASE + 0x9C)
-#define S5PC1XX_WKUP_INT_FLTCON(x) (S5PC1XX_WKUP_INT_FLTCON0_3 + (x * 0x4))
-
-#define S5PC1XX_WKUP_INT_MASK0_7 (S5PC1XX_EINT_BASE + 0x100)
-#define S5PC1XX_WKUP_INT_MASK8_15 (S5PC1XX_EINT_BASE + 0x104)
-#define S5PC1XX_WKUP_INT_MASK16_23 (S5PC1XX_EINT_BASE + 0x108)
-#define S5PC1XX_WKUP_INT_MASK24_31 (S5PC1XX_EINT_BASE + 0x10C)
-#define S5PC1XX_WKUP_INT_MASK(x) (S5PC1XX_WKUP_INT_MASK0_7 + (x * 0x4))
-
-#define S5PC1XX_WKUP_INT_PEND0_7 (S5PC1XX_EINT_BASE + 0x140)
-#define S5PC1XX_WKUP_INT_PEND8_15 (S5PC1XX_EINT_BASE + 0x144)
-#define S5PC1XX_WKUP_INT_PEND16_23 (S5PC1XX_EINT_BASE + 0x148)
-#define S5PC1XX_WKUP_INT_PEND24_31 (S5PC1XX_EINT_BASE + 0x14C)
-#define S5PC1XX_WKUP_INT_PEND(x) (S5PC1XX_WKUP_INT_PEND0_7 + (x * 0x4))
-
-#define S5PC1XX_WKUP_INT_LOWLEV (0x00)
-#define S5PC1XX_WKUP_INT_HILEV (0x01)
-#define S5PC1XX_WKUP_INT_FALLEDGE (0x02)
-#define S5PC1XX_WKUP_INT_RISEEDGE (0x03)
-#define S5PC1XX_WKUP_INT_BOTHEDGE (0x04)
diff --git a/arch/arm/plat-s5pc1xx/include/plat/irqs.h b/arch/arm/plat-s5pc1xx/include/plat/irqs.h
deleted file mode 100644
index 409c804315e8..000000000000
--- a/arch/arm/plat-s5pc1xx/include/plat/irqs.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/include/plat/irqs.h
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * S5PC1XX - Common IRQ support
- *
- * Based on plat-s3c64xx/include/plat/irqs.h
- */
-
-#ifndef __ASM_PLAT_S5PC1XX_IRQS_H
-#define __ASM_PLAT_S5PC1XX_IRQS_H __FILE__
-
-/* we keep the first set of CPU IRQs out of the range of
- * the ISA space, so that the PC104 has them to itself
- * and we don't end up having to do horrible things to the
- * standard ISA drivers....
- *
- * note, since we're using the VICs, our start must be a
- * mulitple of 32 to allow the common code to work
- */
-
-#define S3C_IRQ_OFFSET (32)
-
-#define S3C_IRQ(x) ((x) + S3C_IRQ_OFFSET)
-
-#define S3C_VIC0_BASE S3C_IRQ(0)
-#define S3C_VIC1_BASE S3C_IRQ(32)
-#define S3C_VIC2_BASE S3C_IRQ(64)
-
-/* UART interrupts, each UART has 4 intterupts per channel so
- * use the space between the ISA and S3C main interrupts. Note, these
- * are not in the same order as the S3C24XX series! */
-
-#define IRQ_S3CUART_BASE0 (16)
-#define IRQ_S3CUART_BASE1 (20)
-#define IRQ_S3CUART_BASE2 (24)
-#define IRQ_S3CUART_BASE3 (28)
-
-#define UART_IRQ_RXD (0)
-#define UART_IRQ_ERR (1)
-#define UART_IRQ_TXD (2)
-#define UART_IRQ_MODEM (3)
-
-#define IRQ_S3CUART_RX0 (IRQ_S3CUART_BASE0 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX0 (IRQ_S3CUART_BASE0 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR0 (IRQ_S3CUART_BASE0 + UART_IRQ_ERR)
-
-#define IRQ_S3CUART_RX1 (IRQ_S3CUART_BASE1 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX1 (IRQ_S3CUART_BASE1 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR1 (IRQ_S3CUART_BASE1 + UART_IRQ_ERR)
-
-#define IRQ_S3CUART_RX2 (IRQ_S3CUART_BASE2 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX2 (IRQ_S3CUART_BASE2 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR2 (IRQ_S3CUART_BASE2 + UART_IRQ_ERR)
-
-#define IRQ_S3CUART_RX3 (IRQ_S3CUART_BASE3 + UART_IRQ_RXD)
-#define IRQ_S3CUART_TX3 (IRQ_S3CUART_BASE3 + UART_IRQ_TXD)
-#define IRQ_S3CUART_ERR3 (IRQ_S3CUART_BASE3 + UART_IRQ_ERR)
-
-/* VIC based IRQs */
-
-#define S5PC1XX_IRQ_VIC0(x) (S3C_VIC0_BASE + (x))
-#define S5PC1XX_IRQ_VIC1(x) (S3C_VIC1_BASE + (x))
-#define S5PC1XX_IRQ_VIC2(x) (S3C_VIC2_BASE + (x))
-
-/*
- * VIC0: system, DMA, timer
- */
-#define IRQ_EINT0 S5PC1XX_IRQ_VIC0(0)
-#define IRQ_EINT1 S5PC1XX_IRQ_VIC0(1)
-#define IRQ_EINT2 S5PC1XX_IRQ_VIC0(2)
-#define IRQ_EINT3 S5PC1XX_IRQ_VIC0(3)
-#define IRQ_EINT4 S5PC1XX_IRQ_VIC0(4)
-#define IRQ_EINT5 S5PC1XX_IRQ_VIC0(5)
-#define IRQ_EINT6 S5PC1XX_IRQ_VIC0(6)
-#define IRQ_EINT7 S5PC1XX_IRQ_VIC0(7)
-#define IRQ_EINT8 S5PC1XX_IRQ_VIC0(8)
-#define IRQ_EINT9 S5PC1XX_IRQ_VIC0(9)
-#define IRQ_EINT10 S5PC1XX_IRQ_VIC0(10)
-#define IRQ_EINT11 S5PC1XX_IRQ_VIC0(11)
-#define IRQ_EINT12 S5PC1XX_IRQ_VIC0(12)
-#define IRQ_EINT13 S5PC1XX_IRQ_VIC0(13)
-#define IRQ_EINT14 S5PC1XX_IRQ_VIC0(14)
-#define IRQ_EINT15 S5PC1XX_IRQ_VIC0(15)
-#define IRQ_EINT16_31 S5PC1XX_IRQ_VIC0(16)
-#define IRQ_BATF S5PC1XX_IRQ_VIC0(17)
-#define IRQ_MDMA S5PC1XX_IRQ_VIC0(18)
-#define IRQ_PDMA0 S5PC1XX_IRQ_VIC0(19)
-#define IRQ_PDMA1 S5PC1XX_IRQ_VIC0(20)
-#define IRQ_TIMER0_VIC S5PC1XX_IRQ_VIC0(21)
-#define IRQ_TIMER1_VIC S5PC1XX_IRQ_VIC0(22)
-#define IRQ_TIMER2_VIC S5PC1XX_IRQ_VIC0(23)
-#define IRQ_TIMER3_VIC S5PC1XX_IRQ_VIC0(24)
-#define IRQ_TIMER4_VIC S5PC1XX_IRQ_VIC0(25)
-#define IRQ_SYSTIMER S5PC1XX_IRQ_VIC0(26)
-#define IRQ_WDT S5PC1XX_IRQ_VIC0(27)
-#define IRQ_RTC_ALARM S5PC1XX_IRQ_VIC0(28)
-#define IRQ_RTC_TIC S5PC1XX_IRQ_VIC0(29)
-#define IRQ_GPIOINT S5PC1XX_IRQ_VIC0(30)
-
-/*
- * VIC1: ARM, power, memory, connectivity
- */
-#define IRQ_CORTEX0 S5PC1XX_IRQ_VIC1(0)
-#define IRQ_CORTEX1 S5PC1XX_IRQ_VIC1(1)
-#define IRQ_CORTEX2 S5PC1XX_IRQ_VIC1(2)
-#define IRQ_CORTEX3 S5PC1XX_IRQ_VIC1(3)
-#define IRQ_CORTEX4 S5PC1XX_IRQ_VIC1(4)
-#define IRQ_IEMAPC S5PC1XX_IRQ_VIC1(5)
-#define IRQ_IEMIEC S5PC1XX_IRQ_VIC1(6)
-#define IRQ_ONENAND S5PC1XX_IRQ_VIC1(7)
-#define IRQ_NFC S5PC1XX_IRQ_VIC1(8)
-#define IRQ_CFC S5PC1XX_IRQ_VIC1(9)
-#define IRQ_UART0 S5PC1XX_IRQ_VIC1(10)
-#define IRQ_UART1 S5PC1XX_IRQ_VIC1(11)
-#define IRQ_UART2 S5PC1XX_IRQ_VIC1(12)
-#define IRQ_UART3 S5PC1XX_IRQ_VIC1(13)
-#define IRQ_IIC S5PC1XX_IRQ_VIC1(14)
-#define IRQ_SPI0 S5PC1XX_IRQ_VIC1(15)
-#define IRQ_SPI1 S5PC1XX_IRQ_VIC1(16)
-#define IRQ_SPI2 S5PC1XX_IRQ_VIC1(17)
-#define IRQ_IRDA S5PC1XX_IRQ_VIC1(18)
-#define IRQ_CAN0 S5PC1XX_IRQ_VIC1(19)
-#define IRQ_CAN1 S5PC1XX_IRQ_VIC1(20)
-#define IRQ_HSIRX S5PC1XX_IRQ_VIC1(21)
-#define IRQ_HSITX S5PC1XX_IRQ_VIC1(22)
-#define IRQ_UHOST S5PC1XX_IRQ_VIC1(23)
-#define IRQ_OTG S5PC1XX_IRQ_VIC1(24)
-#define IRQ_MSM S5PC1XX_IRQ_VIC1(25)
-#define IRQ_HSMMC0 S5PC1XX_IRQ_VIC1(26)
-#define IRQ_HSMMC1 S5PC1XX_IRQ_VIC1(27)
-#define IRQ_HSMMC2 S5PC1XX_IRQ_VIC1(28)
-#define IRQ_MIPICSI S5PC1XX_IRQ_VIC1(29)
-#define IRQ_MIPIDSI S5PC1XX_IRQ_VIC1(30)
-
-/*
- * VIC2: multimedia, audio, security
- */
-#define IRQ_LCD0 S5PC1XX_IRQ_VIC2(0)
-#define IRQ_LCD1 S5PC1XX_IRQ_VIC2(1)
-#define IRQ_LCD2 S5PC1XX_IRQ_VIC2(2)
-#define IRQ_LCD3 S5PC1XX_IRQ_VIC2(3)
-#define IRQ_ROTATOR S5PC1XX_IRQ_VIC2(4)
-#define IRQ_FIMC0 S5PC1XX_IRQ_VIC2(5)
-#define IRQ_FIMC1 S5PC1XX_IRQ_VIC2(6)
-#define IRQ_FIMC2 S5PC1XX_IRQ_VIC2(7)
-#define IRQ_JPEG S5PC1XX_IRQ_VIC2(8)
-#define IRQ_2D S5PC1XX_IRQ_VIC2(9)
-#define IRQ_3D S5PC1XX_IRQ_VIC2(10)
-#define IRQ_MIXER S5PC1XX_IRQ_VIC2(11)
-#define IRQ_HDMI S5PC1XX_IRQ_VIC2(12)
-#define IRQ_IIC1 S5PC1XX_IRQ_VIC2(13)
-#define IRQ_MFC S5PC1XX_IRQ_VIC2(14)
-#define IRQ_TVENC S5PC1XX_IRQ_VIC2(15)
-#define IRQ_I2S0 S5PC1XX_IRQ_VIC2(16)
-#define IRQ_I2S1 S5PC1XX_IRQ_VIC2(17)
-#define IRQ_I2S2 S5PC1XX_IRQ_VIC2(18)
-#define IRQ_AC97 S5PC1XX_IRQ_VIC2(19)
-#define IRQ_PCM0 S5PC1XX_IRQ_VIC2(20)
-#define IRQ_PCM1 S5PC1XX_IRQ_VIC2(21)
-#define IRQ_SPDIF S5PC1XX_IRQ_VIC2(22)
-#define IRQ_ADC S5PC1XX_IRQ_VIC2(23)
-#define IRQ_PENDN S5PC1XX_IRQ_VIC2(24)
-#define IRQ_TC IRQ_PENDN
-#define IRQ_KEYPAD S5PC1XX_IRQ_VIC2(25)
-#define IRQ_CG S5PC1XX_IRQ_VIC2(26)
-#define IRQ_SEC S5PC1XX_IRQ_VIC2(27)
-#define IRQ_SECRX S5PC1XX_IRQ_VIC2(28)
-#define IRQ_SECTX S5PC1XX_IRQ_VIC2(29)
-#define IRQ_SDMIRQ S5PC1XX_IRQ_VIC2(30)
-#define IRQ_SDMFIQ S5PC1XX_IRQ_VIC2(31)
-
-#define IRQ_TIMER(x) (IRQ_SDMFIQ + 1 + (x))
-#define IRQ_TIMER0 IRQ_TIMER(0)
-#define IRQ_TIMER1 IRQ_TIMER(1)
-#define IRQ_TIMER2 IRQ_TIMER(2)
-#define IRQ_TIMER3 IRQ_TIMER(3)
-#define IRQ_TIMER4 IRQ_TIMER(4)
-
-/* External interrupt */
-#define S3C_IRQ_EINT_BASE (IRQ_SDMFIQ + 6)
-
-#define S3C_EINT(x) (S3C_IRQ_EINT_BASE + (x - 16))
-#define IRQ_EINT(x) (x < 16 ? IRQ_EINT0 + x : S3C_EINT(x))
-#define IRQ_EINT_BIT(x) (x < IRQ_EINT16_31 ? x - IRQ_EINT0 : x - S3C_EINT(0))
-
-/* GPIO interrupt */
-#define S3C_IRQ_GPIO_BASE (IRQ_EINT(31) + 1)
-#define S3C_IRQ_GPIO(x) (S3C_IRQ_GPIO_BASE + (x))
-
-/*
- * Until MP04 Groups -> 40 (exactly 39) Groups * 8 ~= 320 GPIOs
- */
-#define NR_IRQS (S3C_IRQ_GPIO(320) + 1)
-
-#endif /* __ASM_PLAT_S5PC1XX_IRQS_H */
-
diff --git a/arch/arm/plat-s5pc1xx/include/plat/pll.h b/arch/arm/plat-s5pc1xx/include/plat/pll.h
deleted file mode 100644
index 21afef1573e7..000000000000
--- a/arch/arm/plat-s5pc1xx/include/plat/pll.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* arch/arm/plat-s5pc1xx/include/plat/pll.h
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * S5PC1XX PLL code
- *
- * Based on plat-s3c64xx/include/plat/pll.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S5P_PLL_MDIV_MASK ((1 << (25-16+1)) - 1)
-#define S5P_PLL_PDIV_MASK ((1 << (13-8+1)) - 1)
-#define S5P_PLL_SDIV_MASK ((1 << (2-0+1)) - 1)
-#define S5P_PLL_MDIV_SHIFT (16)
-#define S5P_PLL_PDIV_SHIFT (8)
-#define S5P_PLL_SDIV_SHIFT (0)
-
-#include <asm/div64.h>
-
-static inline unsigned long s5pc1xx_get_pll(unsigned long baseclk,
- u32 pllcon)
-{
- u32 mdiv, pdiv, sdiv;
- u64 fvco = baseclk;
-
- mdiv = (pllcon >> S5P_PLL_MDIV_SHIFT) & S5P_PLL_MDIV_MASK;
- pdiv = (pllcon >> S5P_PLL_PDIV_SHIFT) & S5P_PLL_PDIV_MASK;
- sdiv = (pllcon >> S5P_PLL_SDIV_SHIFT) & S5P_PLL_SDIV_MASK;
-
- fvco *= mdiv;
- do_div(fvco, (pdiv << sdiv));
-
- return (unsigned long)fvco;
-}
diff --git a/arch/arm/plat-s5pc1xx/include/plat/regs-clock.h b/arch/arm/plat-s5pc1xx/include/plat/regs-clock.h
deleted file mode 100644
index 24dec4e52538..000000000000
--- a/arch/arm/plat-s5pc1xx/include/plat/regs-clock.h
+++ /dev/null
@@ -1,252 +0,0 @@
-/* arch/arm/plat-s5pc1xx/include/plat/regs-clock.h
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * S5PC1XX clock register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_REGS_CLOCK_H
-#define __PLAT_REGS_CLOCK_H __FILE__
-
-#define S5PC100_CLKREG(x) (S5PC1XX_VA_CLK + (x))
-#define S5PC100_CLKREG_OTHER(x) (S5PC1XX_VA_CLK_OTHER + (x))
-
-/* s5pc100 register for clock */
-#define S5PC100_APLL_LOCK S5PC100_CLKREG(0x00)
-#define S5PC100_MPLL_LOCK S5PC100_CLKREG(0x04)
-#define S5PC100_EPLL_LOCK S5PC100_CLKREG(0x08)
-#define S5PC100_HPLL_LOCK S5PC100_CLKREG(0x0C)
-
-#define S5PC100_APLL_CON S5PC100_CLKREG(0x100)
-#define S5PC100_MPLL_CON S5PC100_CLKREG(0x104)
-#define S5PC100_EPLL_CON S5PC100_CLKREG(0x108)
-#define S5PC100_HPLL_CON S5PC100_CLKREG(0x10C)
-
-#define S5PC100_CLKSRC0 S5PC100_CLKREG(0x200)
-#define S5PC100_CLKSRC1 S5PC100_CLKREG(0x204)
-#define S5PC100_CLKSRC2 S5PC100_CLKREG(0x208)
-#define S5PC100_CLKSRC3 S5PC100_CLKREG(0x20C)
-
-#define S5PC100_CLKDIV0 S5PC100_CLKREG(0x300)
-#define S5PC100_CLKDIV1 S5PC100_CLKREG(0x304)
-#define S5PC100_CLKDIV2 S5PC100_CLKREG(0x308)
-#define S5PC100_CLKDIV3 S5PC100_CLKREG(0x30C)
-#define S5PC100_CLKDIV4 S5PC100_CLKREG(0x310)
-
-#define S5PC100_CLK_OUT S5PC100_CLKREG(0x400)
-
-#define S5PC100_CLKGATE_D00 S5PC100_CLKREG(0x500)
-#define S5PC100_CLKGATE_D01 S5PC100_CLKREG(0x504)
-#define S5PC100_CLKGATE_D02 S5PC100_CLKREG(0x508)
-
-#define S5PC100_CLKGATE_D10 S5PC100_CLKREG(0x520)
-#define S5PC100_CLKGATE_D11 S5PC100_CLKREG(0x524)
-#define S5PC100_CLKGATE_D12 S5PC100_CLKREG(0x528)
-#define S5PC100_CLKGATE_D13 S5PC100_CLKREG(0x52C)
-#define S5PC100_CLKGATE_D14 S5PC100_CLKREG(0x530)
-#define S5PC100_CLKGATE_D15 S5PC100_CLKREG(0x534)
-
-#define S5PC100_CLKGATE_D20 S5PC100_CLKREG(0x540)
-
-#define S5PC100_SCLKGATE0 S5PC100_CLKREG(0x560)
-#define S5PC100_SCLKGATE1 S5PC100_CLKREG(0x564)
-
-/* EPLL_CON */
-#define S5PC100_EPLL_EN (1<<31)
-#define S5PC100_EPLL_MASK 0xffffffff
-#define S5PC100_EPLLVAL(_m, _p, _s) ((_m) << 16 | ((_p) << 8) | ((_s)))
-
-/* CLKSRC0..CLKSRC3 -> mostly removed due to clksrc updates */
-#define S5PC100_CLKSRC1_CLK48M_MASK (0x1<<24)
-#define S5PC100_CLKSRC1_CLK48M_SHIFT (24)
-
-/* CLKDIV0 */
-#define S5PC100_CLKDIV0_APLL_MASK (0x1<<0)
-#define S5PC100_CLKDIV0_APLL_SHIFT (0)
-#define S5PC100_CLKDIV0_ARM_MASK (0x7<<4)
-#define S5PC100_CLKDIV0_ARM_SHIFT (4)
-#define S5PC100_CLKDIV0_D0_MASK (0x7<<8)
-#define S5PC100_CLKDIV0_D0_SHIFT (8)
-#define S5PC100_CLKDIV0_PCLKD0_MASK (0x7<<12)
-#define S5PC100_CLKDIV0_PCLKD0_SHIFT (12)
-#define S5PC100_CLKDIV0_SECSS_MASK (0x7<<16)
-#define S5PC100_CLKDIV0_SECSS_SHIFT (16)
-
-/* CLKDIV1 (OneNAND clock only used in one place, removed) */
-#define S5PC100_CLKDIV1_APLL2_MASK (0x7<<0)
-#define S5PC100_CLKDIV1_APLL2_SHIFT (0)
-#define S5PC100_CLKDIV1_MPLL_MASK (0x3<<4)
-#define S5PC100_CLKDIV1_MPLL_SHIFT (4)
-#define S5PC100_CLKDIV1_MPLL2_MASK (0x1<<8)
-#define S5PC100_CLKDIV1_MPLL2_SHIFT (8)
-#define S5PC100_CLKDIV1_D1_MASK (0x7<<12)
-#define S5PC100_CLKDIV1_D1_SHIFT (12)
-#define S5PC100_CLKDIV1_PCLKD1_MASK (0x7<<16)
-#define S5PC100_CLKDIV1_PCLKD1_SHIFT (16)
-#define S5PC100_CLKDIV1_CAM_MASK (0x1F<<24)
-#define S5PC100_CLKDIV1_CAM_SHIFT (24)
-
-/* CLKDIV2 => removed in clksrc update */
-/* CLKDIV3 => removed in clksrc update, or not needed */
-/* CLKDIV4 => removed in clksrc update, or not needed */
-
-/* HCLKD0/PCLKD0 Clock Gate 0 Registers */
-#define S5PC100_CLKGATE_D00_INTC (1<<0)
-#define S5PC100_CLKGATE_D00_TZIC (1<<1)
-#define S5PC100_CLKGATE_D00_CFCON (1<<2)
-#define S5PC100_CLKGATE_D00_MDMA (1<<3)
-#define S5PC100_CLKGATE_D00_G2D (1<<4)
-#define S5PC100_CLKGATE_D00_SECSS (1<<5)
-#define S5PC100_CLKGATE_D00_CSSYS (1<<6)
-
-/* HCLKD0/PCLKD0 Clock Gate 1 Registers */
-#define S5PC100_CLKGATE_D01_DMC (1<<0)
-#define S5PC100_CLKGATE_D01_SROMC (1<<1)
-#define S5PC100_CLKGATE_D01_ONENAND (1<<2)
-#define S5PC100_CLKGATE_D01_NFCON (1<<3)
-#define S5PC100_CLKGATE_D01_INTMEM (1<<4)
-#define S5PC100_CLKGATE_D01_EBI (1<<5)
-
-/* PCLKD0 Clock Gate 2 Registers */
-#define S5PC100_CLKGATE_D02_SECKEY (1<<1)
-#define S5PC100_CLKGATE_D02_SDM (1<<2)
-
-/* HCLKD1/PCLKD1 Clock Gate 0 Registers */
-#define S5PC100_CLKGATE_D10_PDMA0 (1<<0)
-#define S5PC100_CLKGATE_D10_PDMA1 (1<<1)
-#define S5PC100_CLKGATE_D10_USBHOST (1<<2)
-#define S5PC100_CLKGATE_D10_USBOTG (1<<3)
-#define S5PC100_CLKGATE_D10_MODEMIF (1<<4)
-#define S5PC100_CLKGATE_D10_HSMMC0 (1<<5)
-#define S5PC100_CLKGATE_D10_HSMMC1 (1<<6)
-#define S5PC100_CLKGATE_D10_HSMMC2 (1<<7)
-
-/* HCLKD1/PCLKD1 Clock Gate 1 Registers */
-#define S5PC100_CLKGATE_D11_LCD (1<<0)
-#define S5PC100_CLKGATE_D11_ROTATOR (1<<1)
-#define S5PC100_CLKGATE_D11_FIMC0 (1<<2)
-#define S5PC100_CLKGATE_D11_FIMC1 (1<<3)
-#define S5PC100_CLKGATE_D11_FIMC2 (1<<4)
-#define S5PC100_CLKGATE_D11_JPEG (1<<5)
-#define S5PC100_CLKGATE_D11_DSI (1<<6)
-#define S5PC100_CLKGATE_D11_CSI (1<<7)
-#define S5PC100_CLKGATE_D11_G3D (1<<8)
-
-/* HCLKD1/PCLKD1 Clock Gate 2 Registers */
-#define S5PC100_CLKGATE_D12_TV (1<<0)
-#define S5PC100_CLKGATE_D12_VP (1<<1)
-#define S5PC100_CLKGATE_D12_MIXER (1<<2)
-#define S5PC100_CLKGATE_D12_HDMI (1<<3)
-#define S5PC100_CLKGATE_D12_MFC (1<<4)
-
-/* HCLKD1/PCLKD1 Clock Gate 3 Registers */
-#define S5PC100_CLKGATE_D13_CHIPID (1<<0)
-#define S5PC100_CLKGATE_D13_GPIO (1<<1)
-#define S5PC100_CLKGATE_D13_APC (1<<2)
-#define S5PC100_CLKGATE_D13_IEC (1<<3)
-#define S5PC100_CLKGATE_D13_PWM (1<<6)
-#define S5PC100_CLKGATE_D13_SYSTIMER (1<<7)
-#define S5PC100_CLKGATE_D13_WDT (1<<8)
-#define S5PC100_CLKGATE_D13_RTC (1<<9)
-
-/* HCLKD1/PCLKD1 Clock Gate 4 Registers */
-#define S5PC100_CLKGATE_D14_UART0 (1<<0)
-#define S5PC100_CLKGATE_D14_UART1 (1<<1)
-#define S5PC100_CLKGATE_D14_UART2 (1<<2)
-#define S5PC100_CLKGATE_D14_UART3 (1<<3)
-#define S5PC100_CLKGATE_D14_IIC (1<<4)
-#define S5PC100_CLKGATE_D14_HDMI_IIC (1<<5)
-#define S5PC100_CLKGATE_D14_SPI0 (1<<6)
-#define S5PC100_CLKGATE_D14_SPI1 (1<<7)
-#define S5PC100_CLKGATE_D14_SPI2 (1<<8)
-#define S5PC100_CLKGATE_D14_IRDA (1<<9)
-#define S5PC100_CLKGATE_D14_CCAN0 (1<<10)
-#define S5PC100_CLKGATE_D14_CCAN1 (1<<11)
-#define S5PC100_CLKGATE_D14_HSITX (1<<12)
-#define S5PC100_CLKGATE_D14_HSIRX (1<<13)
-
-/* HCLKD1/PCLKD1 Clock Gate 5 Registers */
-#define S5PC100_CLKGATE_D15_IIS0 (1<<0)
-#define S5PC100_CLKGATE_D15_IIS1 (1<<1)
-#define S5PC100_CLKGATE_D15_IIS2 (1<<2)
-#define S5PC100_CLKGATE_D15_AC97 (1<<3)
-#define S5PC100_CLKGATE_D15_PCM0 (1<<4)
-#define S5PC100_CLKGATE_D15_PCM1 (1<<5)
-#define S5PC100_CLKGATE_D15_SPDIF (1<<6)
-#define S5PC100_CLKGATE_D15_TSADC (1<<7)
-#define S5PC100_CLKGATE_D15_KEYIF (1<<8)
-#define S5PC100_CLKGATE_D15_CG (1<<9)
-
-/* HCLKD2 Clock Gate 0 Registers */
-#define S5PC100_CLKGATE_D20_HCLKD2 (1<<0)
-#define S5PC100_CLKGATE_D20_I2SD2 (1<<1)
-
-/* Special Clock Gate 0 Registers */
-#define S5PC100_CLKGATE_SCLK0_HPM (1<<0)
-#define S5PC100_CLKGATE_SCLK0_PWI (1<<1)
-#define S5PC100_CLKGATE_SCLK0_ONENAND (1<<2)
-#define S5PC100_CLKGATE_SCLK0_UART (1<<3)
-#define S5PC100_CLKGATE_SCLK0_SPI0 (1<<4)
-#define S5PC100_CLKGATE_SCLK0_SPI1 (1<<5)
-#define S5PC100_CLKGATE_SCLK0_SPI2 (1<<6)
-#define S5PC100_CLKGATE_SCLK0_SPI0_48 (1<<7)
-#define S5PC100_CLKGATE_SCLK0_SPI1_48 (1<<8)
-#define S5PC100_CLKGATE_SCLK0_SPI2_48 (1<<9)
-#define S5PC100_CLKGATE_SCLK0_IRDA (1<<10)
-#define S5PC100_CLKGATE_SCLK0_USBHOST (1<<11)
-#define S5PC100_CLKGATE_SCLK0_MMC0 (1<<12)
-#define S5PC100_CLKGATE_SCLK0_MMC1 (1<<13)
-#define S5PC100_CLKGATE_SCLK0_MMC2 (1<<14)
-#define S5PC100_CLKGATE_SCLK0_MMC0_48 (1<<15)
-#define S5PC100_CLKGATE_SCLK0_MMC1_48 (1<<16)
-#define S5PC100_CLKGATE_SCLK0_MMC2_48 (1<<17)
-
-/* Special Clock Gate 1 Registers */
-#define S5PC100_CLKGATE_SCLK1_LCD (1<<0)
-#define S5PC100_CLKGATE_SCLK1_FIMC0 (1<<1)
-#define S5PC100_CLKGATE_SCLK1_FIMC1 (1<<2)
-#define S5PC100_CLKGATE_SCLK1_FIMC2 (1<<3)
-#define S5PC100_CLKGATE_SCLK1_TV54 (1<<4)
-#define S5PC100_CLKGATE_SCLK1_VDAC54 (1<<5)
-#define S5PC100_CLKGATE_SCLK1_MIXER (1<<6)
-#define S5PC100_CLKGATE_SCLK1_HDMI (1<<7)
-#define S5PC100_CLKGATE_SCLK1_AUDIO0 (1<<8)
-#define S5PC100_CLKGATE_SCLK1_AUDIO1 (1<<9)
-#define S5PC100_CLKGATE_SCLK1_AUDIO2 (1<<10)
-#define S5PC100_CLKGATE_SCLK1_SPDIF (1<<11)
-#define S5PC100_CLKGATE_SCLK1_CAM (1<<12)
-
-#define S5PC100_SWRESET S5PC100_CLKREG_OTHER(0x000)
-#define S5PC100_OND_SWRESET S5PC100_CLKREG_OTHER(0x008)
-#define S5PC100_GEN_CTRL S5PC100_CLKREG_OTHER(0x100)
-#define S5PC100_GEN_STATUS S5PC100_CLKREG_OTHER(0x104)
-#define S5PC100_MEM_SYS_CFG S5PC100_CLKREG_OTHER(0x200)
-#define S5PC100_CAM_MUX_SEL S5PC100_CLKREG_OTHER(0x300)
-#define S5PC100_MIXER_OUT_SEL S5PC100_CLKREG_OTHER(0x304)
-#define S5PC100_LPMP_MODE_SEL S5PC100_CLKREG_OTHER(0x308)
-#define S5PC100_MIPI_PHY_CON0 S5PC100_CLKREG_OTHER(0x400)
-#define S5PC100_MIPI_PHY_CON1 S5PC100_CLKREG_OTHER(0x414)
-#define S5PC100_HDMI_PHY_CON0 S5PC100_CLKREG_OTHER(0x420)
-
-#define S5PC100_SWRESET_RESETVAL 0xc100
-#define S5PC100_OTHER_SYS_INT 24
-#define S5PC100_OTHER_STA_TYPE 23
-#define STA_TYPE_EXPON 0
-#define STA_TYPE_SFR 1
-
-#define S5PC100_SLEEP_CFG_OSC_EN 0
-
-/* OTHERS Resgister */
-#define S5PC100_OTHERS_USB_SIG_MASK (1 << 16)
-#define S5PC100_OTHERS_MIPI_DPHY_EN (1 << 28)
-
-/* MIPI D-PHY Control Register 0 */
-#define S5PC100_MIPI_PHY_CON0_M_RESETN (1 << 1)
-#define S5PC100_MIPI_PHY_CON0_S_RESETN (1 << 0)
-
-#endif /* _PLAT_REGS_CLOCK_H */
diff --git a/arch/arm/plat-s5pc1xx/include/plat/regs-power.h b/arch/arm/plat-s5pc1xx/include/plat/regs-power.h
deleted file mode 100644
index 02ffa491b53a..000000000000
--- a/arch/arm/plat-s5pc1xx/include/plat/regs-power.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* arch/arm/plat-s5pc1xx/include/plat/regs-clock.h
- *
- * Copyright 2009 Samsung Electronics Co.
- * Jongse Won <jongse.won@samsung.com>
- *
- * S5PC1XX clock register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARM_REGS_PWR
-#define __ASM_ARM_REGS_PWR __FILE__
-
-#define S5PC1XX_PWRREG(x) (S5PC1XX_VA_PWR + (x))
-
-/* s5pc100 (0xE0108000) register for power management */
-#define S5PC100_PWR_CFG S5PC1XX_PWRREG(0x0)
-#define S5PC100_EINT_WAKEUP_MASK S5PC1XX_PWRREG(0x4)
-#define S5PC100_NORMAL_CFG S5PC1XX_PWRREG(0x10)
-#define S5PC100_STOP_CFG S5PC1XX_PWRREG(0x14)
-#define S5PC100_SLEEP_CFG S5PC1XX_PWRREG(0x18)
-#define S5PC100_STOP_MEM_CFG S5PC1XX_PWRREG(0x1C)
-#define S5PC100_OSC_FREQ S5PC1XX_PWRREG(0x100)
-#define S5PC100_OSC_STABLE S5PC1XX_PWRREG(0x104)
-#define S5PC100_PWR_STABLE S5PC1XX_PWRREG(0x108)
-#define S5PC100_MTC_STABLE S5PC1XX_PWRREG(0x110)
-#define S5PC100_CLAMP_STABLE S5PC1XX_PWRREG(0x114)
-#define S5PC100_OTHERS S5PC1XX_PWRREG(0x200)
-#define S5PC100_RST_STAT S5PC1XX_PWRREG(0x300)
-#define S5PC100_WAKEUP_STAT S5PC1XX_PWRREG(0x304)
-#define S5PC100_BLK_PWR_STAT S5PC1XX_PWRREG(0x308)
-#define S5PC100_INFORM0 S5PC1XX_PWRREG(0x400)
-#define S5PC100_INFORM1 S5PC1XX_PWRREG(0x404)
-#define S5PC100_INFORM2 S5PC1XX_PWRREG(0x408)
-#define S5PC100_INFORM3 S5PC1XX_PWRREG(0x40C)
-#define S5PC100_INFORM4 S5PC1XX_PWRREG(0x410)
-#define S5PC100_INFORM5 S5PC1XX_PWRREG(0x414)
-#define S5PC100_INFORM6 S5PC1XX_PWRREG(0x418)
-#define S5PC100_INFORM7 S5PC1XX_PWRREG(0x41C)
-#define S5PC100_DCGIDX_MAP0 S5PC1XX_PWRREG(0x500)
-#define S5PC100_DCGIDX_MAP1 S5PC1XX_PWRREG(0x504)
-#define S5PC100_DCGIDX_MAP2 S5PC1XX_PWRREG(0x508)
-#define S5PC100_DCGPERF_MAP0 S5PC1XX_PWRREG(0x50C)
-#define S5PC100_DCGPERF_MAP1 S5PC1XX_PWRREG(0x510)
-#define S5PC100_DVCIDX_MAP S5PC1XX_PWRREG(0x514)
-#define S5PC100_FREQ_CPU S5PC1XX_PWRREG(0x518)
-#define S5PC100_FREQ_DPM S5PC1XX_PWRREG(0x51C)
-#define S5PC100_DVSEMCLK_EN S5PC1XX_PWRREG(0x520)
-#define S5PC100_APLL_CON_L8 S5PC1XX_PWRREG(0x600)
-#define S5PC100_APLL_CON_L7 S5PC1XX_PWRREG(0x604)
-#define S5PC100_APLL_CON_L6 S5PC1XX_PWRREG(0x608)
-#define S5PC100_APLL_CON_L5 S5PC1XX_PWRREG(0x60C)
-#define S5PC100_APLL_CON_L4 S5PC1XX_PWRREG(0x610)
-#define S5PC100_APLL_CON_L3 S5PC1XX_PWRREG(0x614)
-#define S5PC100_APLL_CON_L2 S5PC1XX_PWRREG(0x618)
-#define S5PC100_APLL_CON_L1 S5PC1XX_PWRREG(0x61C)
-#define S5PC100_IEM_CONTROL S5PC1XX_PWRREG(0x620)
-#define S5PC100_CLKDIV_IEM_L8 S5PC1XX_PWRREG(0x700)
-#define S5PC100_CLKDIV_IEM_L7 S5PC1XX_PWRREG(0x704)
-#define S5PC100_CLKDIV_IEM_L6 S5PC1XX_PWRREG(0x708)
-#define S5PC100_CLKDIV_IEM_L5 S5PC1XX_PWRREG(0x70C)
-#define S5PC100_CLKDIV_IEM_L4 S5PC1XX_PWRREG(0x710)
-#define S5PC100_CLKDIV_IEM_L3 S5PC1XX_PWRREG(0x714)
-#define S5PC100_CLKDIV_IEM_L2 S5PC1XX_PWRREG(0x718)
-#define S5PC100_CLKDIV_IEM_L1 S5PC1XX_PWRREG(0x71C)
-#define S5PC100_IEM_HPMCLK_DIV S5PC1XX_PWRREG(0x724)
-
-/* PWR_CFG */
-#define S5PC100_PWRCFG_CFG_DEEP_IDLE (1 << 31)
-#define S5PC100_PWRCFG_CFG_WFI_MASK (3 << 5)
-#define S5PC100_PWRCFG_CFG_WFI_IDLE (0 << 5)
-#define S5PC100_PWRCFG_CFG_WFI_DEEP_IDLE (1 << 5)
-#define S5PC100_PWRCFG_CFG_WFI_STOP (2 << 5)
-#define S5PC100_PWRCFG_CFG_WFI_SLEEP (3 << 5)
-
-/* SLEEP_CFG */
-#define S5PC100_SLEEP_OSC_EN_SLEEP (1 << 0)
-
-/* OTHERS */
-#define S5PC100_PMU_INT_DISABLE (1 << 24)
-
-#endif /* __ASM_ARM_REGS_PWR */
diff --git a/arch/arm/plat-s5pc1xx/include/plat/s5pc100.h b/arch/arm/plat-s5pc1xx/include/plat/s5pc100.h
deleted file mode 100644
index 2531f34a56f3..000000000000
--- a/arch/arm/plat-s5pc1xx/include/plat/s5pc100.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* arch/arm/plat-s5pc1xx/include/plat/s5pc100.h
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * Header file for s5pc100 cpu support
- *
- * Based on plat-s3c64xx/include/plat/s3c6400.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Common init code for S5PC100 related SoCs */
-extern int s5pc100_init(void);
-extern void s5pc100_map_io(void);
-extern void s5pc100_init_clocks(int xtal);
-extern int s5pc100_register_baseclocks(unsigned long xtal);
-extern void s5pc100_init_irq(void);
-extern void s5pc100_init_io(struct map_desc *mach_desc, int size);
-extern void s5pc100_common_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-extern void s5pc100_register_clocks(void);
-extern void s5pc100_setup_clocks(void);
-extern struct sysdev_class s5pc100_sysclass;
-
-#define s5pc100_init_uarts s5pc100_common_init_uarts
-
-/* Some day, belows will be moved to plat-s5pc/include/plat/cpu.h */
-extern void s5pc1xx_init_irq(u32 *vic_valid, int num);
-extern void s5pc1xx_init_io(struct map_desc *mach_desc, int size);
-
-/* Some day, belows will be moved to plat-s5pc/include/plat/clock.h */
-extern struct clk clk_hpll;
-extern struct clk clk_hd0;
-extern struct clk clk_pd0;
-extern struct clk clk_54m;
-extern void s5pc1xx_register_clocks(void);
-extern int s5pc100_sclk0_ctrl(struct clk *clk, int enable);
-extern int s5pc100_sclk1_ctrl(struct clk *clk, int enable);
-
-/* Some day, belows will be moved to plat-s5pc/include/plat/devs.h */
-extern struct s3c24xx_uart_resources s5pc1xx_uart_resources[];
-extern struct platform_device s3c_device_g2d;
-extern struct platform_device s3c_device_g3d;
-extern struct platform_device s3c_device_vpp;
-extern struct platform_device s3c_device_tvenc;
-extern struct platform_device s3c_device_tvscaler;
-extern struct platform_device s3c_device_rotator;
-extern struct platform_device s3c_device_jpeg;
-extern struct platform_device s3c_device_onenand;
-extern struct platform_device s3c_device_usb_otghcd;
-extern struct platform_device s3c_device_keypad;
-extern struct platform_device s3c_device_ts;
-extern struct platform_device s3c_device_g3d;
-extern struct platform_device s3c_device_smc911x;
-extern struct platform_device s3c_device_fimc0;
-extern struct platform_device s3c_device_fimc1;
-extern struct platform_device s3c_device_mfc;
-extern struct platform_device s3c_device_ac97;
-extern struct platform_device s3c_device_fimc0;
-extern struct platform_device s3c_device_fimc1;
-extern struct platform_device s3c_device_fimc2;
-
diff --git a/arch/arm/plat-s5pc1xx/irq-eint.c b/arch/arm/plat-s5pc1xx/irq-eint.c
deleted file mode 100644
index 373122f57d56..000000000000
--- a/arch/arm/plat-s5pc1xx/irq-eint.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * linux/arch/arm/plat-s5pc1xx/irq-eint.c
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- * Kyungin Park <kyungmin.park@samsung.com>
- *
- * Based on plat-s3c64xx/irq-eint.c
- *
- * S5PC1XX - Interrupt handling for IRQ_EINT(x)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/sysdev.h>
-#include <linux/pm.h>
-#include <linux/gpio.h>
-
-#include <asm/hardware/vic.h>
-
-#include <mach/map.h>
-
-#include <plat/gpio-cfg.h>
-#include <plat/gpio-ext.h>
-#include <plat/pm.h>
-#include <plat/regs-gpio.h>
-#include <plat/regs-irqtype.h>
-
-/*
- * bank is a group of external interrupt
- * bank0 means EINT0 ... EINT7
- * bank1 means EINT8 ... EINT15
- * bank2 means EINT16 ... EINT23
- * bank3 means EINT24 ... EINT31
- */
-
-static inline int s3c_get_eint(unsigned int irq)
-{
- int real;
-
- if (irq < IRQ_EINT16_31)
- real = (irq - IRQ_EINT0);
- else
- real = (irq - S3C_IRQ_EINT_BASE) + IRQ_EINT16_31 - IRQ_EINT0;
-
- return real;
-}
-
-static inline int s3c_get_bank(unsigned int irq)
-{
- return s3c_get_eint(irq) >> 3;
-}
-
-static inline int s3c_eint_to_bit(unsigned int irq)
-{
- int real, bit;
-
- real = s3c_get_eint(irq);
- bit = 1 << (real & (8 - 1));
-
- return bit;
-}
-
-static inline void s3c_irq_eint_mask(unsigned int irq)
-{
- u32 mask;
- u32 bank = s3c_get_bank(irq);
-
- mask = __raw_readl(S5PC1XX_WKUP_INT_MASK(bank));
- mask |= s3c_eint_to_bit(irq);
- __raw_writel(mask, S5PC1XX_WKUP_INT_MASK(bank));
-}
-
-static void s3c_irq_eint_unmask(unsigned int irq)
-{
- u32 mask;
- u32 bank = s3c_get_bank(irq);
-
- mask = __raw_readl(S5PC1XX_WKUP_INT_MASK(bank));
- mask &= ~(s3c_eint_to_bit(irq));
- __raw_writel(mask, S5PC1XX_WKUP_INT_MASK(bank));
-}
-
-static inline void s3c_irq_eint_ack(unsigned int irq)
-{
- u32 bank = s3c_get_bank(irq);
-
- __raw_writel(s3c_eint_to_bit(irq), S5PC1XX_WKUP_INT_PEND(bank));
-}
-
-static void s3c_irq_eint_maskack(unsigned int irq)
-{
- /* compiler should in-line these */
- s3c_irq_eint_mask(irq);
- s3c_irq_eint_ack(irq);
-}
-
-static int s3c_irq_eint_set_type(unsigned int irq, unsigned int type)
-{
- u32 bank = s3c_get_bank(irq);
- int real = s3c_get_eint(irq);
- int gpio, shift, sfn;
- u32 ctrl, con = 0;
-
- switch (type) {
- case IRQ_TYPE_NONE:
- printk(KERN_WARNING "No edge setting!\n");
- break;
-
- case IRQ_TYPE_EDGE_RISING:
- con = S5PC1XX_WKUP_INT_RISEEDGE;
- break;
-
- case IRQ_TYPE_EDGE_FALLING:
- con = S5PC1XX_WKUP_INT_FALLEDGE;
- break;
-
- case IRQ_TYPE_EDGE_BOTH:
- con = S5PC1XX_WKUP_INT_BOTHEDGE;
- break;
-
- case IRQ_TYPE_LEVEL_LOW:
- con = S5PC1XX_WKUP_INT_LOWLEV;
- break;
-
- case IRQ_TYPE_LEVEL_HIGH:
- con = S5PC1XX_WKUP_INT_HILEV;
- break;
-
- default:
- printk(KERN_ERR "No such irq type %d", type);
- return -EINVAL;
- }
-
- gpio = real & (8 - 1);
- shift = gpio << 2;
-
- ctrl = __raw_readl(S5PC1XX_WKUP_INT_CON(bank));
- ctrl &= ~(0x7 << shift);
- ctrl |= con << shift;
- __raw_writel(ctrl, S5PC1XX_WKUP_INT_CON(bank));
-
- switch (real) {
- case 0 ... 7:
- gpio = S5PC100_GPH0(gpio);
- break;
- case 8 ... 15:
- gpio = S5PC100_GPH1(gpio);
- break;
- case 16 ... 23:
- gpio = S5PC100_GPH2(gpio);
- break;
- case 24 ... 31:
- gpio = S5PC100_GPH3(gpio);
- break;
- default:
- return -EINVAL;
- }
-
- sfn = S3C_GPIO_SFN(0x2);
- s3c_gpio_cfgpin(gpio, sfn);
-
- return 0;
-}
-
-static struct irq_chip s3c_irq_eint = {
- .name = "EINT",
- .mask = s3c_irq_eint_mask,
- .unmask = s3c_irq_eint_unmask,
- .mask_ack = s3c_irq_eint_maskack,
- .ack = s3c_irq_eint_ack,
- .set_type = s3c_irq_eint_set_type,
- .set_wake = s3c_irqext_wake,
-};
-
-/* s3c_irq_demux_eint
- *
- * This function demuxes the IRQ from external interrupts,
- * from IRQ_EINT(16) to IRQ_EINT(31). It is designed to be inlined into
- * the specific handlers s3c_irq_demux_eintX_Y.
- */
-static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end)
-{
- u32 status = __raw_readl(S5PC1XX_WKUP_INT_PEND((start >> 3)));
- u32 mask = __raw_readl(S5PC1XX_WKUP_INT_MASK((start >> 3)));
- unsigned int irq;
-
- status &= ~mask;
- status &= (1 << (end - start + 1)) - 1;
-
- for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) {
- if (status & 1)
- generic_handle_irq(irq);
-
- status >>= 1;
- }
-}
-
-static void s3c_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_eint(16, 23);
- s3c_irq_demux_eint(24, 31);
-}
-
-/*
- * Handle EINT0 ... EINT15 at VIC directly
- */
-static void s3c_irq_vic_eint_mask(unsigned int irq)
-{
- void __iomem *base = get_irq_chip_data(irq);
- unsigned int real;
-
- s3c_irq_eint_mask(irq);
- real = s3c_get_eint(irq);
- writel(1 << real, base + VIC_INT_ENABLE_CLEAR);
-}
-
-static void s3c_irq_vic_eint_unmask(unsigned int irq)
-{
- void __iomem *base = get_irq_chip_data(irq);
- unsigned int real;
-
- s3c_irq_eint_unmask(irq);
- real = s3c_get_eint(irq);
- writel(1 << real, base + VIC_INT_ENABLE);
-}
-
-static inline void s3c_irq_vic_eint_ack(unsigned int irq)
-{
- u32 bit;
- u32 bank = s3c_get_bank(irq);
-
- bit = s3c_eint_to_bit(irq);
- __raw_writel(bit, S5PC1XX_WKUP_INT_PEND(bank));
-}
-
-static void s3c_irq_vic_eint_maskack(unsigned int irq)
-{
- /* compiler should in-line these */
- s3c_irq_vic_eint_mask(irq);
- s3c_irq_vic_eint_ack(irq);
-}
-
-static struct irq_chip s3c_irq_vic_eint = {
- .name = "EINT",
- .mask = s3c_irq_vic_eint_mask,
- .unmask = s3c_irq_vic_eint_unmask,
- .mask_ack = s3c_irq_vic_eint_maskack,
- .ack = s3c_irq_vic_eint_ack,
- .set_type = s3c_irq_eint_set_type,
- .set_wake = s3c_irqext_wake,
-};
-
-static int __init s5pc1xx_init_irq_eint(void)
-{
- int irq;
-
- for (irq = IRQ_EINT0; irq <= IRQ_EINT15; irq++) {
- set_irq_chip(irq, &s3c_irq_vic_eint);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
-
- for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
- set_irq_chip(irq, &s3c_irq_eint);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
-
- set_irq_chained_handler(IRQ_EINT16_31, s3c_irq_demux_eint16_31);
-
- return 0;
-}
-
-arch_initcall(s5pc1xx_init_irq_eint);
diff --git a/arch/arm/plat-s5pc1xx/irq.c b/arch/arm/plat-s5pc1xx/irq.c
deleted file mode 100644
index bfc524827819..000000000000
--- a/arch/arm/plat-s5pc1xx/irq.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/* arch/arm/plat-s5pc1xx/irq.c
- *
- * Copyright 2009 Samsung Electronics Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * S5PC1XX - Interrupt handling
- *
- * Based on plat-s3c64xx/irq.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-
-#include <asm/hardware/vic.h>
-
-#include <mach/map.h>
-#include <plat/irq-vic-timer.h>
-#include <plat/irq-uart.h>
-#include <plat/cpu.h>
-
-/* Note, we make use of the fact that the parent IRQs, IRQ_UART[0..3]
- * are consecutive when looking up the interrupt in the demux routines.
- */
-static struct s3c_uart_irq uart_irqs[] = {
- [0] = {
- .regs = (void *)S3C_VA_UART0,
- .base_irq = IRQ_S3CUART_BASE0,
- .parent_irq = IRQ_UART0,
- },
- [1] = {
- .regs = (void *)S3C_VA_UART1,
- .base_irq = IRQ_S3CUART_BASE1,
- .parent_irq = IRQ_UART1,
- },
- [2] = {
- .regs = (void *)S3C_VA_UART2,
- .base_irq = IRQ_S3CUART_BASE2,
- .parent_irq = IRQ_UART2,
- },
- [3] = {
- .regs = (void *)S3C_VA_UART3,
- .base_irq = IRQ_S3CUART_BASE3,
- .parent_irq = IRQ_UART3,
- },
-};
-
-void __init s5pc1xx_init_irq(u32 *vic_valid, int num)
-{
- int i;
-
- printk(KERN_DEBUG "%s: initialising interrupts\n", __func__);
-
- /* initialise the pair of VICs */
- for (i = 0; i < num; i++)
- vic_init((void *)S5PC1XX_VA_VIC(i), S3C_IRQ(i * S3C_IRQ_OFFSET),
- vic_valid[i], 0);
-
- /* add the timer sub-irqs */
-
- s3c_init_vic_timer_irq(IRQ_TIMER0_VIC, IRQ_TIMER0);
- s3c_init_vic_timer_irq(IRQ_TIMER1_VIC, IRQ_TIMER1);
- s3c_init_vic_timer_irq(IRQ_TIMER2_VIC, IRQ_TIMER2);
- s3c_init_vic_timer_irq(IRQ_TIMER3_VIC, IRQ_TIMER3);
- s3c_init_vic_timer_irq(IRQ_TIMER4_VIC, IRQ_TIMER4);
-
- s3c_init_uart_irqs(uart_irqs, ARRAY_SIZE(uart_irqs));
-}
-
-
diff --git a/arch/arm/plat-s5pc1xx/s5pc100-clock.c b/arch/arm/plat-s5pc1xx/s5pc100-clock.c
deleted file mode 100644
index 2bf6c57a96a2..000000000000
--- a/arch/arm/plat-s5pc1xx/s5pc100-clock.c
+++ /dev/null
@@ -1,876 +0,0 @@
-/* linux/arch/arm/plat-s5pc1xx/s5pc100-clock.c
- *
- * Copyright 2009 Samsung Electronics, Co.
- * Byungho Min <bhmin@samsung.com>
- *
- * S5PC100 based common clock support
- *
- * Based on plat-s3c64xx/s3c6400-clock.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/sysdev.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/cpu-freq.h>
-
-#include <plat/regs-clock.h>
-#include <plat/clock.h>
-#include <plat/clock-clksrc.h>
-#include <plat/cpu.h>
-#include <plat/pll.h>
-#include <plat/devs.h>
-#include <plat/s5pc100.h>
-
-/* fin_apll, fin_mpll and fin_epll are all the same clock, which we call
- * ext_xtal_mux for want of an actual name from the manual.
-*/
-
-static struct clk clk_ext_xtal_mux = {
- .name = "ext_xtal",
- .id = -1,
-};
-
-#define clk_fin_apll clk_ext_xtal_mux
-#define clk_fin_mpll clk_ext_xtal_mux
-#define clk_fin_epll clk_ext_xtal_mux
-#define clk_fin_hpll clk_ext_xtal_mux
-
-#define clk_fout_mpll clk_mpll
-#define clk_vclk_54m clk_54m
-
-/* APLL */
-static struct clk clk_fout_apll = {
- .name = "fout_apll",
- .id = -1,
- .rate = 27000000,
-};
-
-static struct clk *clk_src_apll_list[] = {
- [0] = &clk_fin_apll,
- [1] = &clk_fout_apll,
-};
-
-static struct clksrc_sources clk_src_apll = {
- .sources = clk_src_apll_list,
- .nr_sources = ARRAY_SIZE(clk_src_apll_list),
-};
-
-static struct clksrc_clk clk_mout_apll = {
- .clk = {
- .name = "mout_apll",
- .id = -1,
- },
- .sources = &clk_src_apll,
- .reg_src = { .reg = S5PC100_CLKSRC0, .shift = 0, .size = 1, },
-};
-
-static unsigned long s5pc100_clk_dout_apll_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- ratio = __raw_readl(S5PC100_CLKDIV0) & S5PC100_CLKDIV0_APLL_MASK;
- ratio >>= S5PC100_CLKDIV0_APLL_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_apll = {
- .name = "dout_apll",
- .id = -1,
- .parent = &clk_mout_apll.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_apll_get_rate,
- },
-};
-
-static unsigned long s5pc100_clk_arm_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- ratio = __raw_readl(S5PC100_CLKDIV0) & S5PC100_CLKDIV0_ARM_MASK;
- ratio >>= S5PC100_CLKDIV0_ARM_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static unsigned long s5pc100_clk_arm_round_rate(struct clk *clk,
- unsigned long rate)
-{
- unsigned long parent = clk_get_rate(clk->parent);
- u32 div;
-
- if (parent < rate)
- return rate;
-
- div = (parent / rate) - 1;
- if (div > S5PC100_CLKDIV0_ARM_MASK)
- div = S5PC100_CLKDIV0_ARM_MASK;
-
- return parent / (div + 1);
-}
-
-static int s5pc100_clk_arm_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent = clk_get_rate(clk->parent);
- u32 div;
- u32 val;
-
- if (rate < parent / (S5PC100_CLKDIV0_ARM_MASK + 1))
- return -EINVAL;
-
- rate = clk_round_rate(clk, rate);
- div = clk_get_rate(clk->parent) / rate;
-
- val = __raw_readl(S5PC100_CLKDIV0);
- val &= S5PC100_CLKDIV0_ARM_MASK;
- val |= (div - 1);
- __raw_writel(val, S5PC100_CLKDIV0);
-
- return 0;
-}
-
-static struct clk clk_arm = {
- .name = "armclk",
- .id = -1,
- .parent = &clk_dout_apll,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_arm_get_rate,
- .set_rate = s5pc100_clk_arm_set_rate,
- .round_rate = s5pc100_clk_arm_round_rate,
- },
-};
-
-static unsigned long s5pc100_clk_dout_d0_bus_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- ratio = __raw_readl(S5PC100_CLKDIV0) & S5PC100_CLKDIV0_D0_MASK;
- ratio >>= S5PC100_CLKDIV0_D0_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_d0_bus = {
- .name = "dout_d0_bus",
- .id = -1,
- .parent = &clk_arm,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_d0_bus_get_rate,
- },
-};
-
-static unsigned long s5pc100_clk_dout_pclkd0_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- ratio = __raw_readl(S5PC100_CLKDIV0) & S5PC100_CLKDIV0_PCLKD0_MASK;
- ratio >>= S5PC100_CLKDIV0_PCLKD0_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_pclkd0 = {
- .name = "dout_pclkd0",
- .id = -1,
- .parent = &clk_dout_d0_bus,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_pclkd0_get_rate,
- },
-};
-
-static unsigned long s5pc100_clk_dout_apll2_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- ratio = __raw_readl(S5PC100_CLKDIV1) & S5PC100_CLKDIV1_APLL2_MASK;
- ratio >>= S5PC100_CLKDIV1_APLL2_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_apll2 = {
- .name = "dout_apll2",
- .id = -1,
- .parent = &clk_mout_apll.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_apll2_get_rate,
- },
-};
-
-/* MPLL */
-static struct clk *clk_src_mpll_list[] = {
- [0] = &clk_fin_mpll,
- [1] = &clk_fout_mpll,
-};
-
-static struct clksrc_sources clk_src_mpll = {
- .sources = clk_src_mpll_list,
- .nr_sources = ARRAY_SIZE(clk_src_mpll_list),
-};
-
-static struct clksrc_clk clk_mout_mpll = {
- .clk = {
- .name = "mout_mpll",
- .id = -1,
- },
- .sources = &clk_src_mpll,
- .reg_src = { .reg = S5PC100_CLKSRC0, .shift = 4, .size = 1, },
-};
-
-static struct clk *clkset_am_list[] = {
- [0] = &clk_mout_mpll.clk,
- [1] = &clk_dout_apll2,
-};
-
-static struct clksrc_sources clk_src_am = {
- .sources = clkset_am_list,
- .nr_sources = ARRAY_SIZE(clkset_am_list),
-};
-
-static struct clksrc_clk clk_mout_am = {
- .clk = {
- .name = "mout_am",
- .id = -1,
- },
- .sources = &clk_src_am,
- .reg_src = { .reg = S5PC100_CLKSRC0, .shift = 16, .size = 1, },
-};
-
-static unsigned long s5pc100_clk_dout_d1_bus_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate);
-
- ratio = __raw_readl(S5PC100_CLKDIV1) & S5PC100_CLKDIV1_D1_MASK;
- ratio >>= S5PC100_CLKDIV1_D1_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_d1_bus = {
- .name = "dout_d1_bus",
- .id = -1,
- .parent = &clk_mout_am.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_d1_bus_get_rate,
- },
-};
-
-static struct clk *clkset_onenand_list[] = {
- [0] = &clk_dout_d0_bus,
- [1] = &clk_dout_d1_bus,
-};
-
-static struct clksrc_sources clk_src_onenand = {
- .sources = clkset_onenand_list,
- .nr_sources = ARRAY_SIZE(clkset_onenand_list),
-};
-
-static struct clksrc_clk clk_mout_onenand = {
- .clk = {
- .name = "mout_onenand",
- .id = -1,
- },
- .sources = &clk_src_onenand,
- .reg_src = { .reg = S5PC100_CLKSRC0, .shift = 24, .size = 1, },
-};
-
-static unsigned long s5pc100_clk_dout_pclkd1_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate);
-
- ratio = __raw_readl(S5PC100_CLKDIV1) & S5PC100_CLKDIV1_PCLKD1_MASK;
- ratio >>= S5PC100_CLKDIV1_PCLKD1_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_pclkd1 = {
- .name = "dout_pclkd1",
- .id = -1,
- .parent = &clk_dout_d1_bus,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_pclkd1_get_rate,
- },
-};
-
-static unsigned long s5pc100_clk_dout_mpll2_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate);
-
- ratio = __raw_readl(S5PC100_CLKDIV1) & S5PC100_CLKDIV1_MPLL2_MASK;
- ratio >>= S5PC100_CLKDIV1_MPLL2_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_mpll2 = {
- .name = "dout_mpll2",
- .id = -1,
- .parent = &clk_mout_am.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_mpll2_get_rate,
- },
-};
-
-static unsigned long s5pc100_clk_dout_cam_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate);
-
- ratio = __raw_readl(S5PC100_CLKDIV1) & S5PC100_CLKDIV1_CAM_MASK;
- ratio >>= S5PC100_CLKDIV1_CAM_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_cam = {
- .name = "dout_cam",
- .id = -1,
- .parent = &clk_dout_mpll2,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_cam_get_rate,
- },
-};
-
-static unsigned long s5pc100_clk_dout_mpll_get_rate(struct clk *clk)
-{
- unsigned long rate = clk_get_rate(clk->parent);
- unsigned int ratio;
-
- printk(KERN_DEBUG "%s: parent is %ld\n", __func__, rate);
-
- ratio = __raw_readl(S5PC100_CLKDIV1) & S5PC100_CLKDIV1_MPLL_MASK;
- ratio >>= S5PC100_CLKDIV1_MPLL_SHIFT;
-
- return rate / (ratio + 1);
-}
-
-static struct clk clk_dout_mpll = {
- .name = "dout_mpll",
- .id = -1,
- .parent = &clk_mout_am.clk,
- .ops = &(struct clk_ops) {
- .get_rate = s5pc100_clk_dout_mpll_get_rate,
- },
-};
-
-/* EPLL */
-static struct clk clk_fout_epll = {
- .name = "fout_epll",
- .id = -1,
-};
-
-static struct clk *clk_src_epll_list[] = {
- [0] = &clk_fin_epll,
- [1] = &clk_fout_epll,
-};
-
-static struct clksrc_sources clk_src_epll = {
- .sources = clk_src_epll_list,
- .nr_sources = ARRAY_SIZE(clk_src_epll_list),
-};
-
-static struct clksrc_clk clk_mout_epll = {
- .clk = {
- .name = "mout_epll",
- .id = -1,
- },
- .sources = &clk_src_epll,
- .reg_src = { .reg = S5PC100_CLKSRC0, .shift = 8, .size = 1, },
-};
-
-/* HPLL */
-static struct clk clk_fout_hpll = {
- .name = "fout_hpll",
- .id = -1,
-};
-
-static struct clk *clk_src_hpll_list[] = {
- [0] = &clk_27m,
- [1] = &clk_fout_hpll,
-};
-
-static struct clksrc_sources clk_src_hpll = {
- .sources = clk_src_hpll_list,
- .nr_sources = ARRAY_SIZE(clk_src_hpll_list),
-};
-
-static struct clksrc_clk clk_mout_hpll = {
- .clk = {
- .name = "mout_hpll",
- .id = -1,
- },
- .sources = &clk_src_hpll,
- .reg_src = { .reg = S5PC100_CLKSRC0, .shift = 12, .size = 1, },
-};
-
-/* Peripherals */
-/*
- * The peripheral clocks are all controlled via clocksource followed
- * by an optional divider and gate stage. We currently roll this into
- * one clock which hides the intermediate clock from the mux.
- *
- * Note, the JPEG clock can only be an even divider...
- *
- * The scaler and LCD clocks depend on the S5PC100 version, and also
- * have a common parent divisor so are not included here.
- */
-
-static struct clk clk_iis_cd0 = {
- .name = "iis_cdclk0",
- .id = -1,
-};
-
-static struct clk clk_iis_cd1 = {
- .name = "iis_cdclk1",
- .id = -1,
-};
-
-static struct clk clk_iis_cd2 = {
- .name = "iis_cdclk2",
- .id = -1,
-};
-
-static struct clk clk_pcm_cd0 = {
- .name = "pcm_cdclk0",
- .id = -1,
-};
-
-static struct clk clk_pcm_cd1 = {
- .name = "pcm_cdclk1",
- .id = -1,
-};
-
-static struct clk *clkset_audio0_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_fin_epll,
- &clk_iis_cd0,
- &clk_pcm_cd0,
- &clk_mout_hpll.clk,
-};
-
-static struct clksrc_sources clkset_audio0 = {
- .sources = clkset_audio0_list,
- .nr_sources = ARRAY_SIZE(clkset_audio0_list),
-};
-
-static struct clk *clkset_spi_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll2,
- &clk_fin_epll,
- &clk_mout_hpll.clk,
-};
-
-static struct clksrc_sources clkset_spi = {
- .sources = clkset_spi_list,
- .nr_sources = ARRAY_SIZE(clkset_spi_list),
-};
-
-static struct clk *clkset_uart_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
-};
-
-static struct clksrc_sources clkset_uart = {
- .sources = clkset_uart_list,
- .nr_sources = ARRAY_SIZE(clkset_uart_list),
-};
-
-static struct clk *clkset_audio1_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_fin_epll,
- &clk_iis_cd1,
- &clk_pcm_cd1,
- &clk_mout_hpll.clk,
-};
-
-static struct clksrc_sources clkset_audio1 = {
- .sources = clkset_audio1_list,
- .nr_sources = ARRAY_SIZE(clkset_audio1_list),
-};
-
-static struct clk *clkset_audio2_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_fin_epll,
- &clk_iis_cd2,
- &clk_mout_hpll.clk,
-};
-
-static struct clksrc_sources clkset_audio2 = {
- .sources = clkset_audio2_list,
- .nr_sources = ARRAY_SIZE(clkset_audio2_list),
-};
-
-static struct clksrc_clk clksrc_audio[] = {
- {
- .clk = {
- .name = "audio-bus",
- .id = 0,
- .ctrlbit = S5PC100_CLKGATE_SCLK1_AUDIO0,
- .enable = s5pc100_sclk1_ctrl,
- },
- .sources = &clkset_audio0,
- .reg_div = { .reg = S5PC100_CLKDIV4, .shift = 12, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC3, .shift = 12, .size = 3, },
- }, {
- .clk = {
- .name = "audio-bus",
- .id = 1,
- .ctrlbit = S5PC100_CLKGATE_SCLK1_AUDIO1,
- .enable = s5pc100_sclk1_ctrl,
- },
- .sources = &clkset_audio1,
- .reg_div = { .reg = S5PC100_CLKDIV4, .shift = 16, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC3, .shift = 16, .size = 3, },
- }, {
- .clk = {
- .name = "audio-bus",
- .id = 2,
- .ctrlbit = S5PC100_CLKGATE_SCLK1_AUDIO2,
- .enable = s5pc100_sclk1_ctrl,
- },
- .sources = &clkset_audio2,
- .reg_div = { .reg = S5PC100_CLKDIV4, .shift = 20, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC3, .shift = 20, .size = 3, },
- },
-};
-
-static struct clk *clkset_spdif_list[] = {
- &clksrc_audio[0].clk,
- &clksrc_audio[1].clk,
- &clksrc_audio[2].clk,
-};
-
-static struct clksrc_sources clkset_spdif = {
- .sources = clkset_spdif_list,
- .nr_sources = ARRAY_SIZE(clkset_spdif_list),
-};
-
-static struct clk *clkset_lcd_fimc_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_mout_hpll.clk,
- &clk_vclk_54m,
-};
-
-static struct clksrc_sources clkset_lcd_fimc = {
- .sources = clkset_lcd_fimc_list,
- .nr_sources = ARRAY_SIZE(clkset_lcd_fimc_list),
-};
-
-static struct clk *clkset_mmc_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_fin_epll,
- &clk_mout_hpll.clk ,
-};
-
-static struct clksrc_sources clkset_mmc = {
- .sources = clkset_mmc_list,
- .nr_sources = ARRAY_SIZE(clkset_mmc_list),
-};
-
-static struct clk *clkset_usbhost_list[] = {
- &clk_mout_epll.clk,
- &clk_dout_mpll,
- &clk_mout_hpll.clk,
- &clk_48m,
-};
-
-static struct clksrc_sources clkset_usbhost = {
- .sources = clkset_usbhost_list,
- .nr_sources = ARRAY_SIZE(clkset_usbhost_list),
-};
-
-static struct clksrc_clk clksrc_clks[] = {
- {
- .clk = {
- .name = "spi_bus",
- .id = 0,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_SPI0,
- .enable = s5pc100_sclk0_ctrl,
-
- },
- .sources = &clkset_spi,
- .reg_div = { .reg = S5PC100_CLKDIV2, .shift = 4, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC1, .shift = 4, .size = 2, },
- }, {
- .clk = {
- .name = "spi_bus",
- .id = 1,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_SPI1,
- .enable = s5pc100_sclk0_ctrl,
- },
- .sources = &clkset_spi,
- .reg_div = { .reg = S5PC100_CLKDIV2, .shift = 8, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC1, .shift = 8, .size = 2, },
- }, {
- .clk = {
- .name = "spi_bus",
- .id = 2,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_SPI2,
- .enable = s5pc100_sclk0_ctrl,
- },
- .sources = &clkset_spi,
- .reg_div = { .reg = S5PC100_CLKDIV2, .shift = 12, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC1, .shift = 12, .size = 2, },
- }, {
- .clk = {
- .name = "uclk1",
- .id = -1,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_UART,
- .enable = s5pc100_sclk0_ctrl,
- },
- .sources = &clkset_uart,
- .reg_div = { .reg = S5PC100_CLKDIV2, .shift = 0, .size = 3, },
- .reg_src = { .reg = S5PC100_CLKSRC1, .shift = 0, .size = 1, },
- }, {
- .clk = {
- .name = "spdif",
- .id = -1,
- },
- .sources = &clkset_spdif,
- .reg_src = { .reg = S5PC100_CLKSRC3, .shift = 24, .size = 2, },
- }, {
- .clk = {
- .name = "lcd",
- .id = -1,
- .ctrlbit = S5PC100_CLKGATE_SCLK1_LCD,
- .enable = s5pc100_sclk1_ctrl,
- },
- .sources = &clkset_lcd_fimc,
- .reg_div = { .reg = S5PC100_CLKDIV3, .shift = 12, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC2, .shift = 12, .size = 2, },
- }, {
- .clk = {
- .name = "fimc",
- .id = 0,
- .ctrlbit = S5PC100_CLKGATE_SCLK1_FIMC0,
- .enable = s5pc100_sclk1_ctrl,
- },
- .sources = &clkset_lcd_fimc,
- .reg_div = { .reg = S5PC100_CLKDIV3, .shift = 16, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC2, .shift = 16, .size = 2, },
- }, {
- .clk = {
- .name = "fimc",
- .id = 1,
- .ctrlbit = S5PC100_CLKGATE_SCLK1_FIMC1,
- .enable = s5pc100_sclk1_ctrl,
- },
- .sources = &clkset_lcd_fimc,
- .reg_div = { .reg = S5PC100_CLKDIV3, .shift = 20, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC2, .shift = 20, .size = 2, },
- }, {
- .clk = {
- .name = "fimc",
- .id = 2,
- .ctrlbit = S5PC100_CLKGATE_SCLK1_FIMC2,
- .enable = s5pc100_sclk1_ctrl,
- },
- .sources = &clkset_lcd_fimc,
- .reg_div = { .reg = S5PC100_CLKDIV3, .shift = 24, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC2, .shift = 24, .size = 2, },
- }, {
- .clk = {
- .name = "mmc_bus",
- .id = 0,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_MMC0,
- .enable = s5pc100_sclk0_ctrl,
- },
- .sources = &clkset_mmc,
- .reg_div = { .reg = S5PC100_CLKDIV3, .shift = 0, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC2, .shift = 0, .size = 2, },
- }, {
- .clk = {
- .name = "mmc_bus",
- .id = 1,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_MMC1,
- .enable = s5pc100_sclk0_ctrl,
- },
- .sources = &clkset_mmc,
- .reg_div = { .reg = S5PC100_CLKDIV3, .shift = 4, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC2, .shift = 4, .size = 2, },
- }, {
- .clk = {
- .name = "mmc_bus",
- .id = 2,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_MMC2,
- .enable = s5pc100_sclk0_ctrl,
- },
- .sources = &clkset_mmc,
- .reg_div = { .reg = S5PC100_CLKDIV3, .shift = 8, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC2, .shift = 8, .size = 2, },
- }, {
- .clk = {
- .name = "usbhost",
- .id = -1,
- .ctrlbit = S5PC100_CLKGATE_SCLK0_USBHOST,
- .enable = s5pc100_sclk0_ctrl,
- },
- .sources = &clkset_usbhost,
- .reg_div = { .reg = S5PC100_CLKDIV2, .shift = 20, .size = 4, },
- .reg_src = { .reg = S5PC100_CLKSRC1, .shift = 20, .size = 2, },
- }
-};
-
-/* Clock initialisation code */
-
-static struct clksrc_clk *init_parents[] = {
- &clk_mout_apll,
- &clk_mout_mpll,
- &clk_mout_am,
- &clk_mout_onenand,
- &clk_mout_epll,
- &clk_mout_hpll,
-};
-
-#define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
-
-void __init_or_cpufreq s5pc100_setup_clocks(void)
-{
- struct clk *xtal_clk;
- unsigned long xtal;
- unsigned long armclk;
- unsigned long hclkd0;
- unsigned long hclk;
- unsigned long pclkd0;
- unsigned long pclk;
- unsigned long apll, mpll, epll, hpll;
- unsigned int ptr;
- u32 clkdiv0, clkdiv1;
-
- printk(KERN_DEBUG "%s: registering clocks\n", __func__);
-
- clkdiv0 = __raw_readl(S5PC100_CLKDIV0);
- clkdiv1 = __raw_readl(S5PC100_CLKDIV1);
-
- printk(KERN_DEBUG "%s: clkdiv0 = %08x, clkdiv1 = %08x\n", __func__, clkdiv0, clkdiv1);
-
- xtal_clk = clk_get(NULL, "xtal");
- BUG_ON(IS_ERR(xtal_clk));
-
- xtal = clk_get_rate(xtal_clk);
- clk_put(xtal_clk);
-
- printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
-
- apll = s5pc1xx_get_pll(xtal, __raw_readl(S5PC100_APLL_CON));
- mpll = s5pc1xx_get_pll(xtal, __raw_readl(S5PC100_MPLL_CON));
- epll = s5pc1xx_get_pll(xtal, __raw_readl(S5PC100_EPLL_CON));
- hpll = s5pc1xx_get_pll(xtal, __raw_readl(S5PC100_HPLL_CON));
-
- printk(KERN_INFO "S5PC100: Apll=%ld.%03ld Mhz, Mpll=%ld.%03ld Mhz"
- ", Epll=%ld.%03ld Mhz, Hpll=%ld.%03ld Mhz\n",
- print_mhz(apll), print_mhz(mpll),
- print_mhz(epll), print_mhz(hpll));
-
- armclk = apll / GET_DIV(clkdiv0, S5PC100_CLKDIV0_APLL);
- armclk = armclk / GET_DIV(clkdiv0, S5PC100_CLKDIV0_ARM);
- hclkd0 = armclk / GET_DIV(clkdiv0, S5PC100_CLKDIV0_D0);
- pclkd0 = hclkd0 / GET_DIV(clkdiv0, S5PC100_CLKDIV0_PCLKD0);
- hclk = mpll / GET_DIV(clkdiv1, S5PC100_CLKDIV1_D1);
- pclk = hclk / GET_DIV(clkdiv1, S5PC100_CLKDIV1_PCLKD1);
-
- printk(KERN_INFO "S5PC100: ARMCLK=%ld.%03ld MHz, HCLKD0=%ld.%03ld MHz,"
- " PCLKD0=%ld.%03ld MHz\n, HCLK=%ld.%03ld MHz,"
- " PCLK=%ld.%03ld MHz\n",
- print_mhz(armclk), print_mhz(hclkd0),
- print_mhz(pclkd0), print_mhz(hclk), print_mhz(pclk));
-
- clk_fout_apll.rate = apll;
- clk_fout_mpll.rate = mpll;
- clk_fout_epll.rate = epll;
- clk_fout_hpll.rate = hpll;
-
- clk_h.rate = hclk;
- clk_p.rate = pclk;
- clk_f.rate = armclk;
-
- for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
- s3c_set_clksrc(init_parents[ptr], true);
-
- for (ptr = 0; ptr < ARRAY_SIZE(clksrc_audio); ptr++)
- s3c_set_clksrc(clksrc_audio + ptr, true);
-
- for (ptr = 0; ptr < ARRAY_SIZE(clksrc_clks); ptr++)
- s3c_set_clksrc(clksrc_clks + ptr, true);
-}
-
-static struct clk *clks[] __initdata = {
- &clk_ext_xtal_mux,
- &clk_dout_apll,
- &clk_dout_d0_bus,
- &clk_dout_pclkd0,
- &clk_dout_apll2,
- &clk_mout_apll.clk,
- &clk_mout_mpll.clk,
- &clk_mout_epll.clk,
- &clk_mout_hpll.clk,
- &clk_mout_am.clk,
- &clk_dout_d1_bus,
- &clk_mout_onenand.clk,
- &clk_dout_pclkd1,
- &clk_dout_mpll2,
- &clk_dout_cam,
- &clk_dout_mpll,
- &clk_fout_epll,
- &clk_iis_cd0,
- &clk_iis_cd1,
- &clk_iis_cd2,
- &clk_pcm_cd0,
- &clk_pcm_cd1,
- &clk_arm,
-};
-
-void __init s5pc100_register_clocks(void)
-{
- struct clk *clkp;
- int ret;
- int ptr;
-
- for (ptr = 0; ptr < ARRAY_SIZE(clks); ptr++) {
- clkp = clks[ptr];
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
- }
-
- s3c_register_clksrc(clksrc_audio, ARRAY_SIZE(clksrc_audio));
- s3c_register_clksrc(clksrc_clks, ARRAY_SIZE(clksrc_clks));
-}
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index d552c65fa1b0..2753fb3e4f73 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -6,7 +6,7 @@
config PLAT_SAMSUNG
bool
- depends on ARCH_S3C2410 || ARCH_S3C24A0 || ARCH_S3C64XX || ARCH_S5PC1XX
+ depends on ARCH_S3C2410 || ARCH_S3C24A0 || ARCH_S3C64XX
select NO_IOPORT
default y
help
@@ -90,12 +90,6 @@ config S3C_GPIO_CFG_S3C64XX
Internal configuration to enable S3C64XX style GPIO configuration
functions.
-config S5P_GPIO_CFG_S5PC1XX
- bool
- help
- Internal configuration to enable S5PC1XX style GPIO configuration
- functions.
-
config S3C_GPIO_PULL_UPDOWN
bool
help
@@ -111,6 +105,12 @@ config S3C_GPIO_PULL_UP
help
Internal configuration to enable the correct GPIO pull helper
+config S5P_GPIO_DRVSTR
+ bool
+ help
+ Internal configuration to get and set correct GPIO driver strength
+ helper
+
config SAMSUNG_GPIO_EXTRA
int "Number of additional GPIO pins"
default 0
@@ -160,11 +160,21 @@ config S3C_DEV_HSMMC2
help
Compile in platform device definitions for HSMMC channel 2
+config S3C_DEV_HWMON
+ bool
+ help
+ Compile in platform device definitions for HWMON
+
config S3C_DEV_I2C1
bool
help
Compile in platform device definitions for I2C channel 1
+config S3C_DEV_I2C2
+ bool
+ help
+ Compile in platform device definitions for I2C channel 2
+
config S3C_DEV_FB
bool
help
@@ -180,17 +190,43 @@ config S3C_DEV_USB_HSOTG
help
Compile in platform device definition for USB high-speed OtG
+config S3C_DEV_WDT
+ bool
+ default y if ARCH_S3C2410
+ help
+ Complie in platform device definition for Watchdog Timer
+
config S3C_DEV_NAND
bool
help
Compile in platform device definition for NAND controller
+config S3C_DEV_ONENAND
+ bool
+ help
+ Compile in platform device definition for OneNAND controller
+
+config S3C_DEV_RTC
+ bool
+ help
+ Complie in platform device definition for RTC
+
+config SAMSUNG_DEV_ADC
+ bool
+ help
+ Compile in platform device definition for ADC controller
+
config S3C64XX_DEV_SPI
bool
help
Compile in platform device definitions for S3C64XX's type
SPI controllers.
+config SAMSUNG_DEV_TS
+ bool
+ help
+ Common in platform device definitions for touchscreen device
+
# DMA
config S3C_DMA
@@ -198,6 +234,12 @@ config S3C_DMA
help
Internal configuration for S3C DMA core
+config S3C_PL330_DMA
+ bool
+ select PL330
+ help
+ S3C DMA API Driver for PL330 DMAC.
+
comment "Power management"
config SAMSUNG_PM_DEBUG
@@ -243,4 +285,12 @@ config SAMSUNG_PM_CHECK_CHUNKSIZE
See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
+config SAMSUNG_WAKEMASK
+ bool
+ depends on PM
+ help
+ Compile support for wakeup-mask controls found on the S3C6400
+ and above. This code allows a set of interrupt to wakeup-mask
+ mappings. See <plat/wakeup-mask.h>
+
endif
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 22c89d08f6e5..b1d82cc5e716 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -33,24 +33,36 @@ obj-$(CONFIG_S3C_ADC) += adc.o
obj-$(CONFIG_S3C_DEV_HSMMC) += dev-hsmmc.o
obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
obj-$(CONFIG_S3C_DEV_HSMMC2) += dev-hsmmc2.o
+obj-$(CONFIG_S3C_DEV_HWMON) += dev-hwmon.o
obj-y += dev-i2c0.o
obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
+obj-$(CONFIG_S3C_DEV_I2C2) += dev-i2c2.o
obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
obj-y += dev-uart.o
obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
+obj-$(CONFIG_S3C_DEV_WDT) += dev-wdt.o
obj-$(CONFIG_S3C_DEV_NAND) += dev-nand.o
+obj-$(CONFIG_S3C_DEV_ONENAND) += dev-onenand.o
+obj-$(CONFIG_S3C_DEV_RTC) += dev-rtc.o
+
+obj-$(CONFIG_SAMSUNG_DEV_ADC) += dev-adc.o
+obj-$(CONFIG_SAMSUNG_DEV_TS) += dev-ts.o
# DMA support
obj-$(CONFIG_S3C_DMA) += dma.o
+obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o
+
# PM support
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM) += pm-gpio.o
obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
+obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o
+
# PWM support
obj-$(CONFIG_HAVE_PWM) += pwm.o
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 210030d5cfe1..04d9521ddc9f 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -66,6 +66,7 @@ struct adc_device {
struct s3c_adc_client *cur;
struct s3c_adc_client *ts_pend;
void __iomem *regs;
+ spinlock_t lock;
unsigned int prescale;
@@ -74,7 +75,7 @@ struct adc_device {
static struct adc_device *adc_dev;
-static LIST_HEAD(adc_pending);
+static LIST_HEAD(adc_pending); /* protected by adc_device.lock */
#define adc_dbg(_adc, msg...) dev_dbg(&(_adc)->pdev->dev, msg)
@@ -145,7 +146,7 @@ int s3c_adc_start(struct s3c_adc_client *client,
if (client->is_ts && adc->ts_pend)
return -EAGAIN;
- local_irq_save(flags);
+ spin_lock_irqsave(&adc->lock, flags);
client->channel = channel;
client->nr_samples = nr_samples;
@@ -157,7 +158,8 @@ int s3c_adc_start(struct s3c_adc_client *client,
if (!adc->cur)
s3c_adc_try(adc);
- local_irq_restore(flags);
+
+ spin_unlock_irqrestore(&adc->lock, flags);
return 0;
}
@@ -237,6 +239,10 @@ EXPORT_SYMBOL_GPL(s3c_adc_register);
void s3c_adc_release(struct s3c_adc_client *client)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc_dev->lock, flags);
+
/* We should really check that nothing is in progress. */
if (adc_dev->cur == client)
adc_dev->cur = NULL;
@@ -255,6 +261,8 @@ void s3c_adc_release(struct s3c_adc_client *client)
if (adc_dev->cur == NULL)
s3c_adc_try(adc_dev);
+
+ spin_unlock_irqrestore(&adc_dev->lock, flags);
kfree(client);
}
EXPORT_SYMBOL_GPL(s3c_adc_release);
@@ -264,7 +272,6 @@ static irqreturn_t s3c_adc_irq(int irq, void *pw)
struct adc_device *adc = pw;
struct s3c_adc_client *client = adc->cur;
enum s3c_cpu_type cpu = platform_get_device_id(adc->pdev)->driver_data;
- unsigned long flags;
unsigned data0, data1;
if (!client) {
@@ -296,12 +303,12 @@ static irqreturn_t s3c_adc_irq(int irq, void *pw)
client->select_cb(client, 1);
s3c_adc_convert(adc);
} else {
- local_irq_save(flags);
+ spin_lock(&adc->lock);
(client->select_cb)(client, 0);
adc->cur = NULL;
s3c_adc_try(adc);
- local_irq_restore(flags);
+ spin_unlock(&adc->lock);
}
exit:
@@ -326,6 +333,8 @@ static int s3c_adc_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ spin_lock_init(&adc->lock);
+
adc->pdev = pdev;
adc->prescale = S3C2410_ADCCON_PRSCVL(49);
@@ -407,13 +416,17 @@ static int __devexit s3c_adc_remove(struct platform_device *pdev)
static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct adc_device *adc = platform_get_drvdata(pdev);
+ unsigned long flags;
u32 con;
+ spin_lock_irqsave(&adc->lock, flags);
+
con = readl(adc->regs + S3C2410_ADCCON);
con |= S3C2410_ADCCON_STDBM;
writel(con, adc->regs + S3C2410_ADCCON);
disable_irq(adc->irq);
+ spin_unlock_irqrestore(&adc->lock, flags);
clk_disable(adc->clk);
return 0;
@@ -422,6 +435,7 @@ static int s3c_adc_suspend(struct platform_device *pdev, pm_message_t state)
static int s3c_adc_resume(struct platform_device *pdev)
{
struct adc_device *adc = platform_get_drvdata(pdev);
+ unsigned long flags;
clk_enable(adc->clk);
enable_irq(adc->irq);
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 1b25c9d8c403..8bf79f3efdfb 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -376,6 +376,21 @@ void __init s3c_register_clocks(struct clk *clkp, int nr_clks)
}
}
+/**
+ * s3c_disable_clocks() - disable an array of clocks
+ * @clkp: Pointer to the first clock in the array.
+ * @nr_clks: Number of clocks to register.
+ *
+ * for internal use only at initialisation time. disable the clocks in the
+ * @clkp array.
+ */
+
+void __init s3c_disable_clocks(struct clk *clkp, int nr_clks)
+{
+ for (; nr_clks > 0; nr_clks--, clkp++)
+ (clkp->enable)(clkp, 0);
+}
+
/* initalise all the clocks */
int __init s3c24xx_register_baseclocks(unsigned long xtal)
diff --git a/arch/arm/mach-s3c64xx/dev-adc.c b/arch/arm/plat-samsung/dev-adc.c
index fafef9b6bcfa..9d903d4095ed 100644
--- a/arch/arm/mach-s3c64xx/dev-adc.c
+++ b/arch/arm/plat-samsung/dev-adc.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/plat-s3c64xx/dev-adc.c
+/* linux/arch/arm/plat-samsung/dev-adc.c
*
* Copyright 2010 Maurus Cuelenaere
*
@@ -22,8 +22,8 @@
static struct resource s3c_adc_resource[] = {
[0] = {
- .start = S3C64XX_PA_ADC,
- .end = S3C64XX_PA_ADC + SZ_256 - 1,
+ .start = SAMSUNG_PA_ADC,
+ .end = SAMSUNG_PA_ADC + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -39,7 +39,7 @@ static struct resource s3c_adc_resource[] = {
};
struct platform_device s3c_device_adc = {
- .name = "s3c64xx-adc",
+ .name = "samsung-adc",
.id = -1,
.num_resources = ARRAY_SIZE(s3c_adc_resource),
.resource = s3c_adc_resource,
diff --git a/arch/arm/plat-samsung/dev-fb.c b/arch/arm/plat-samsung/dev-fb.c
index 002a15f313f3..bf60204c6297 100644
--- a/arch/arm/plat-samsung/dev-fb.c
+++ b/arch/arm/plat-samsung/dev-fb.c
@@ -19,7 +19,6 @@
#include <mach/irqs.h>
#include <mach/map.h>
-#include <mach/regs-fb.h>
#include <plat/fb.h>
#include <plat/devs.h>
diff --git a/arch/arm/plat-samsung/dev-hwmon.c b/arch/arm/plat-samsung/dev-hwmon.c
new file mode 100644
index 000000000000..b3ffb9587250
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-hwmon.c
@@ -0,0 +1,42 @@
+/* linux/arch/arm/plat-samsung/dev-hwmon.c
+ *
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Adapted for HWMON by Maurus Cuelenaere
+ *
+ * Samsung series device definition for HWMON
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <plat/devs.h>
+#include <plat/hwmon.h>
+
+struct platform_device s3c_device_hwmon = {
+ .name = "s3c-hwmon",
+ .id = -1,
+ .dev.parent = &s3c_device_adc.dev,
+};
+
+void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd)
+{
+ struct s3c_hwmon_pdata *npd;
+
+ if (!pd) {
+ printk(KERN_ERR "%s: no platform data\n", __func__);
+ return;
+ }
+
+ npd = kmemdup(pd, sizeof(struct s3c_hwmon_pdata), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+
+ s3c_device_hwmon.dev.platform_data = npd;
+}
diff --git a/arch/arm/plat-samsung/dev-i2c2.c b/arch/arm/plat-samsung/dev-i2c2.c
new file mode 100644
index 000000000000..07036dee09e7
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-i2c2.c
@@ -0,0 +1,70 @@
+/* linux/arch/arm/plat-s3c/dev-i2c2.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S3C series device definition for i2c device 2
+ *
+ * Based on plat-samsung/dev-i2c0.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/regs-iic.h>
+#include <plat/iic.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+static struct resource s3c_i2c_resource[] = {
+ [0] = {
+ .start = S3C_PA_IIC2,
+ .end = S3C_PA_IIC2 + SZ_4K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_CAN0,
+ .end = IRQ_CAN0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_i2c2 = {
+ .name = "s3c2410-i2c",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(s3c_i2c_resource),
+ .resource = s3c_i2c_resource,
+};
+
+static struct s3c2410_platform_i2c default_i2c_data2 __initdata = {
+ .flags = 0,
+ .bus_num = 2,
+ .slave_addr = 0x10,
+ .frequency = 100*1000,
+ .sda_delay = 100,
+};
+
+void __init s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *pd)
+{
+ struct s3c2410_platform_i2c *npd;
+
+ if (!pd)
+ pd = &default_i2c_data2;
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ else if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c2_cfg_gpio;
+
+ s3c_device_i2c2.dev.platform_data = npd;
+}
diff --git a/arch/arm/plat-samsung/dev-onenand.c b/arch/arm/plat-samsung/dev-onenand.c
new file mode 100644
index 000000000000..45ec73287d8c
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-onenand.c
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/arm/plat-samsung/dev-onenand.c
+ *
+ * Copyright (c) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * S3C64XX/S5PC100 series device definition for OneNAND devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+static struct resource s3c_onenand_resources[] = {
+ [0] = {
+ .start = S3C_PA_ONENAND,
+ .end = S3C_PA_ONENAND + 0x400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = S3C_PA_ONENAND_BUF,
+ .end = S3C_PA_ONENAND_BUF + S3C_SZ_ONENAND_BUF - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = IRQ_ONENAND,
+ .end = IRQ_ONENAND,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_onenand = {
+ .name = "samsung-onenand",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c_onenand_resources),
+ .resource = s3c_onenand_resources,
+};
+
+void s3c_onenand_set_platdata(struct onenand_platform_data *pdata)
+{
+ struct onenand_platform_data *pd;
+
+ pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
+ if (!pd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+ s3c_device_onenand.dev.platform_data = pd;
+}
diff --git a/arch/arm/mach-s3c64xx/dev-rtc.c b/arch/arm/plat-samsung/dev-rtc.c
index b9e7a05f0129..bf4e2267333c 100644
--- a/arch/arm/mach-s3c64xx/dev-rtc.c
+++ b/arch/arm/plat-samsung/dev-rtc.c
@@ -1,4 +1,4 @@
-/* linux/arch/arm/plat-s3c64xx/dev-rtc.c
+/* linux/arch/arm/plat-samsung/dev-rtc.c
*
* Copyright 2009 by Maurus Cuelenaere <mcuelenaere@gmail.com>
*
@@ -18,26 +18,26 @@
static struct resource s3c_rtc_resource[] = {
[0] = {
- .start = S3C64XX_PA_RTC,
- .end = S3C64XX_PA_RTC + 0xff,
- .flags = IORESOURCE_MEM,
+ .start = S3C_PA_RTC,
+ .end = S3C_PA_RTC + 0xff,
+ .flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ_RTC_ALARM,
- .end = IRQ_RTC_ALARM,
- .flags = IORESOURCE_IRQ,
+ .start = IRQ_RTC_ALARM,
+ .end = IRQ_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
},
[2] = {
- .start = IRQ_RTC_TIC,
- .end = IRQ_RTC_TIC,
- .flags = IORESOURCE_IRQ
+ .start = IRQ_RTC_TIC,
+ .end = IRQ_RTC_TIC,
+ .flags = IORESOURCE_IRQ
}
};
struct platform_device s3c_device_rtc = {
- .name = "s3c64xx-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(s3c_rtc_resource),
- .resource = s3c_rtc_resource,
+ .name = "s3c64xx-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c_rtc_resource),
+ .resource = s3c_rtc_resource,
};
EXPORT_SYMBOL(s3c_device_rtc);
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c
new file mode 100644
index 000000000000..236ef8427d7d
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-ts.c
@@ -0,0 +1,61 @@
+/* linux/arch/arm/mach-s3c64xx/dev-ts.c
+ *
+ * Copyright (c) 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>, <ben-linux@fluff.org>
+ *
+ * Adapted by Maurus Cuelenaere for s3c64xx
+ *
+ * S3C64XX series device definition for touchscreen device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+#include <plat/ts.h>
+
+static struct resource s3c_ts_resource[] = {
+ [0] = {
+ .start = SAMSUNG_PA_ADC,
+ .end = SAMSUNG_PA_ADC + SZ_256 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_TC,
+ .end = IRQ_TC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s3c_device_ts = {
+ .name = "s3c64xx-ts",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c_ts_resource),
+ .resource = s3c_ts_resource,
+};
+
+void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
+{
+ struct s3c2410_ts_mach_info *npd;
+
+ if (!pd) {
+ printk(KERN_ERR "%s: no platform data\n", __func__);
+ return;
+ }
+
+ npd = kmemdup(pd, sizeof(struct s3c2410_ts_mach_info), GFP_KERNEL);
+ if (!npd)
+ printk(KERN_ERR "%s: no memory for platform data\n", __func__);
+
+ s3c_device_ts.dev.platform_data = npd;
+}
+EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
diff --git a/arch/arm/plat-samsung/dev-wdt.c b/arch/arm/plat-samsung/dev-wdt.c
new file mode 100644
index 000000000000..5efca87cddbd
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-wdt.c
@@ -0,0 +1,40 @@
+/* linux/arch/arm/plat-samsung/dev-wdt.c
+ *
+ * Copyright (c) 2004 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C series device definition for the watchdog timer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+
+static struct resource s3c_wdt_resource[] = {
+ [0] = {
+ .start = S3C_PA_WDT,
+ .end = S3C_PA_WDT + SZ_1M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_WDT,
+ .end = IRQ_WDT,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+struct platform_device s3c_device_wdt = {
+ .name = "s3c2410-wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c_wdt_resource),
+ .resource = s3c_wdt_resource,
+};
+EXPORT_SYMBOL(s3c_device_wdt);
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 44a84e896546..57b68a50f45e 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -1,7 +1,7 @@
/* linux/arch/arm/plat-s3c/gpio-config.c
*
* Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
+ * Copyright 2008-2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
@@ -33,14 +33,34 @@ int s3c_gpio_cfgpin(unsigned int pin, unsigned int config)
offset = pin - chip->chip.base;
- local_irq_save(flags);
+ s3c_gpio_lock(chip, flags);
ret = s3c_gpio_do_setcfg(chip, offset, config);
- local_irq_restore(flags);
+ s3c_gpio_unlock(chip, flags);
return ret;
}
EXPORT_SYMBOL(s3c_gpio_cfgpin);
+unsigned s3c_gpio_getcfg(unsigned int pin)
+{
+ struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
+ unsigned long flags;
+ unsigned ret = 0;
+ int offset;
+
+ if (chip) {
+ offset = pin - chip->chip.base;
+
+ s3c_gpio_lock(chip, flags);
+ ret = s3c_gpio_do_getcfg(chip, offset);
+ s3c_gpio_unlock(chip, flags);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c_gpio_getcfg);
+
+
int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull)
{
struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
@@ -52,17 +72,17 @@ int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull)
offset = pin - chip->chip.base;
- local_irq_save(flags);
+ s3c_gpio_lock(chip, flags);
ret = s3c_gpio_do_setpull(chip, offset, pull);
- local_irq_restore(flags);
+ s3c_gpio_unlock(chip, flags);
return ret;
}
EXPORT_SYMBOL(s3c_gpio_setpull);
#ifdef CONFIG_S3C_GPIO_CFG_S3C24XX
-int s3c_gpio_setcfg_s3c24xx_banka(struct s3c_gpio_chip *chip,
- unsigned int off, unsigned int cfg)
+int s3c_gpio_setcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
+ unsigned int off, unsigned int cfg)
{
void __iomem *reg = chip->base;
unsigned int shift = off;
@@ -87,6 +107,19 @@ int s3c_gpio_setcfg_s3c24xx_banka(struct s3c_gpio_chip *chip,
return 0;
}
+unsigned s3c_gpio_getcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ u32 con;
+
+ con = __raw_readl(chip->base);
+ con >>= off;
+ con &= 1;
+ con++;
+
+ return S3C_GPIO_SFN(con);
+}
+
int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg)
{
@@ -109,6 +142,19 @@ int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
return 0;
}
+
+unsigned int s3c_gpio_getcfg_s3c24xx(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ u32 con;
+
+ con = __raw_readl(chip->base);
+ con >>= off * 2;
+ con &= 3;
+
+ /* this conversion works for IN and OUT as well as special mode */
+ return S3C_GPIO_SPECIAL(con);
+}
#endif
#ifdef CONFIG_S3C_GPIO_CFG_S3C64XX
@@ -134,6 +180,25 @@ int s3c_gpio_setcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
return 0;
}
+
+unsigned s3c_gpio_getcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ void __iomem *reg = chip->base;
+ unsigned int shift = (off & 7) * 4;
+ u32 con;
+
+ if (off < 8 && chip->chip.ngpio > 8)
+ reg -= 4;
+
+ con = __raw_readl(reg);
+ con >>= shift;
+ con &= 0xf;
+
+ /* this conversion works for IN and OUT as well as special mode */
+ return S3C_GPIO_SPECIAL(con);
+}
+
#endif /* CONFIG_S3C_GPIO_CFG_S3C64XX */
#ifdef CONFIG_S3C_GPIO_PULL_UPDOWN
@@ -164,3 +229,83 @@ s3c_gpio_pull_t s3c_gpio_getpull_updown(struct s3c_gpio_chip *chip,
return (__force s3c_gpio_pull_t)pup;
}
#endif
+
+#ifdef CONFIG_S3C_GPIO_PULL_UP
+int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off, s3c_gpio_pull_t pull)
+{
+ void __iomem *reg = chip->base + 0x08;
+ u32 pup = __raw_readl(reg);
+
+ pup = __raw_readl(reg);
+
+ if (pup == S3C_GPIO_PULL_UP)
+ pup &= ~(1 << off);
+ else if (pup == S3C_GPIO_PULL_NONE)
+ pup |= (1 << off);
+ else
+ return -EINVAL;
+
+ __raw_writel(pup, reg);
+ return 0;
+}
+
+s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ void __iomem *reg = chip->base + 0x08;
+ u32 pup = __raw_readl(reg);
+
+ pup &= (1 << off);
+ return pup ? S3C_GPIO_PULL_NONE : S3C_GPIO_PULL_UP;
+}
+#endif /* CONFIG_S3C_GPIO_PULL_UP */
+
+#ifdef CONFIG_S5P_GPIO_DRVSTR
+s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
+{
+ struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
+ unsigned int off;
+ void __iomem *reg;
+ int shift;
+ u32 drvstr;
+
+ if (!chip)
+ return -EINVAL;
+
+ off = chip->chip.base - pin;
+ shift = off * 2;
+ reg = chip->base + 0x0C;
+
+ drvstr = __raw_readl(reg);
+ drvstr = 0xffff & (0x3 << shift);
+ drvstr = drvstr >> shift;
+
+ return (__force s5p_gpio_drvstr_t)drvstr;
+}
+EXPORT_SYMBOL(s5p_gpio_get_drvstr);
+
+int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
+{
+ struct s3c_gpio_chip *chip = s3c_gpiolib_getchip(pin);
+ unsigned int off;
+ void __iomem *reg;
+ int shift;
+ u32 tmp;
+
+ if (!chip)
+ return -EINVAL;
+
+ off = chip->chip.base - pin;
+ shift = off * 2;
+ reg = chip->base + 0x0C;
+
+ tmp = __raw_readl(reg);
+ tmp |= drvstr << shift;
+
+ __raw_writel(tmp, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(s5p_gpio_set_drvstr);
+#endif /* CONFIG_S5P_GPIO_DRVSTR */
diff --git a/arch/arm/plat-samsung/gpio.c b/arch/arm/plat-samsung/gpio.c
index 28d2ab8a08db..b83a83351cea 100644
--- a/arch/arm/plat-samsung/gpio.c
+++ b/arch/arm/plat-samsung/gpio.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/spinlock.h>
#include <plat/gpio-core.h>
@@ -52,14 +53,14 @@ static int s3c_gpiolib_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
unsigned long con;
- local_irq_save(flags);
+ s3c_gpio_lock(ourchip, flags);
con = __raw_readl(base + 0x00);
con &= ~(3 << (offset * 2));
__raw_writel(con, base + 0x00);
- local_irq_restore(flags);
+ s3c_gpio_unlock(ourchip, flags);
return 0;
}
@@ -72,7 +73,7 @@ static int s3c_gpiolib_output(struct gpio_chip *chip,
unsigned long dat;
unsigned long con;
- local_irq_save(flags);
+ s3c_gpio_lock(ourchip, flags);
dat = __raw_readl(base + 0x04);
dat &= ~(1 << offset);
@@ -87,7 +88,7 @@ static int s3c_gpiolib_output(struct gpio_chip *chip,
__raw_writel(con, base + 0x00);
__raw_writel(dat, base + 0x04);
- local_irq_restore(flags);
+ s3c_gpio_unlock(ourchip, flags);
return 0;
}
@@ -99,7 +100,7 @@ static void s3c_gpiolib_set(struct gpio_chip *chip,
unsigned long flags;
unsigned long dat;
- local_irq_save(flags);
+ s3c_gpio_lock(ourchip, flags);
dat = __raw_readl(base + 0x04);
dat &= ~(1 << offset);
@@ -107,7 +108,7 @@ static void s3c_gpiolib_set(struct gpio_chip *chip,
dat |= 1 << offset;
__raw_writel(dat, base + 0x04);
- local_irq_restore(flags);
+ s3c_gpio_unlock(ourchip, flags);
}
static int s3c_gpiolib_get(struct gpio_chip *chip, unsigned offset)
@@ -131,6 +132,8 @@ __init void s3c_gpiolib_add(struct s3c_gpio_chip *chip)
BUG_ON(!gc->label);
BUG_ON(!gc->ngpio);
+ spin_lock_init(&chip->lock);
+
if (!gc->direction_input)
gc->direction_input = s3c_gpiolib_input;
if (!gc->direction_output)
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index 60b62692ac7a..0fbcd0effd8e 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -74,6 +74,7 @@ extern struct clk clk_ext;
extern struct clk clk_h2;
extern struct clk clk_27m;
extern struct clk clk_48m;
+extern struct clk clk_xusbxti;
extern int clk_default_setrate(struct clk *clk, unsigned long rate);
extern struct clk_ops clk_ops_def_setrate;
@@ -91,6 +92,7 @@ extern int s3c24xx_register_clock(struct clk *clk);
extern int s3c24xx_register_clocks(struct clk **clk, int nr_clks);
extern void s3c_register_clocks(struct clk *clk, int nr_clks);
+extern void s3c_disable_clocks(struct clk *clkp, int nr_clks);
extern int s3c24xx_register_baseclocks(unsigned long xtal);
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index d316b4a579f4..6412933d6fbb 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -73,11 +73,15 @@ extern struct sys_timer s3c24xx_timer;
extern struct sysdev_class s3c2410_sysclass;
extern struct sysdev_class s3c2410a_sysclass;
extern struct sysdev_class s3c2412_sysclass;
+extern struct sysdev_class s3c2416_sysclass;
extern struct sysdev_class s3c2440_sysclass;
extern struct sysdev_class s3c2442_sysclass;
extern struct sysdev_class s3c2443_sysclass;
extern struct sysdev_class s3c6410_sysclass;
extern struct sysdev_class s3c64xx_sysclass;
+extern struct sysdev_class s5p6440_sysclass;
+extern struct sysdev_class s5p6442_sysclass;
+extern struct sysdev_class s5pv210_sysclass;
extern void (*s5pc1xx_idle)(void);
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 796d24258313..e6144e4b9118 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -45,6 +45,7 @@ extern struct platform_device s3c_device_lcd;
extern struct platform_device s3c_device_wdt;
extern struct platform_device s3c_device_i2c0;
extern struct platform_device s3c_device_i2c1;
+extern struct platform_device s3c_device_i2c2;
extern struct platform_device s3c_device_rtc;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_sdi;
@@ -57,13 +58,48 @@ extern struct platform_device s3c_device_hsmmc2;
extern struct platform_device s3c_device_spi0;
extern struct platform_device s3c_device_spi1;
+extern struct platform_device s5pc100_device_spi0;
+extern struct platform_device s5pc100_device_spi1;
+extern struct platform_device s5pc100_device_spi2;
+extern struct platform_device s5pv210_device_spi0;
+extern struct platform_device s5pv210_device_spi1;
+extern struct platform_device s5p6440_device_spi0;
+extern struct platform_device s5p6440_device_spi1;
+
extern struct platform_device s3c_device_hwmon;
extern struct platform_device s3c_device_nand;
+extern struct platform_device s3c_device_onenand;
+extern struct platform_device s3c64xx_device_onenand1;
+extern struct platform_device s5pc110_device_onenand;
extern struct platform_device s3c_device_usbgadget;
extern struct platform_device s3c_device_usb_hsotg;
+extern struct platform_device s5pv210_device_ac97;
+extern struct platform_device s5pv210_device_pcm0;
+extern struct platform_device s5pv210_device_pcm1;
+extern struct platform_device s5pv210_device_pcm2;
+extern struct platform_device s5pv210_device_iis0;
+extern struct platform_device s5pv210_device_iis1;
+extern struct platform_device s5pv210_device_iis2;
+
+extern struct platform_device s5p6442_device_pcm0;
+extern struct platform_device s5p6442_device_pcm1;
+extern struct platform_device s5p6442_device_iis0;
+extern struct platform_device s5p6442_device_iis1;
+extern struct platform_device s5p6442_device_spi;
+
+extern struct platform_device s5p6440_device_pcm;
+extern struct platform_device s5p6440_device_iis;
+
+extern struct platform_device s5pc100_device_ac97;
+extern struct platform_device s5pc100_device_pcm0;
+extern struct platform_device s5pc100_device_pcm1;
+extern struct platform_device s5pc100_device_iis0;
+extern struct platform_device s5pc100_device_iis1;
+extern struct platform_device s5pc100_device_iis2;
+
/* s3c2440 specific devices */
#ifdef CONFIG_CPU_S3C2440
diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h
index 7584d751ed51..2e8f8c6560d7 100644
--- a/arch/arm/plat-samsung/include/plat/dma.h
+++ b/arch/arm/plat-samsung/include/plat/dma.h
@@ -110,8 +110,8 @@ extern int s3c2410_dma_config(unsigned int channel, int xferunit);
* configure the device we're talking to
*/
-extern int s3c2410_dma_devconfig(int channel, enum s3c2410_dmasrc source,
- unsigned long devaddr);
+extern int s3c2410_dma_devconfig(unsigned int channel,
+ enum s3c2410_dmasrc source, unsigned long devaddr);
/* s3c2410_dma_getposition
*
diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h
index ffc01a76b7ce..27d3b497b55b 100644
--- a/arch/arm/plat-samsung/include/plat/fb.h
+++ b/arch/arm/plat-samsung/include/plat/fb.h
@@ -15,6 +15,13 @@
#ifndef __PLAT_S3C_FB_H
#define __PLAT_S3C_FB_H __FILE__
+/* S3C_FB_MAX_WIN
+ * Set to the maximum number of windows that any of the supported hardware
+ * can use. Since the platform data uses this for an array size, having it
+ * set to the maximum of any version of the hardware can do is safe.
+ */
+#define S3C_FB_MAX_WIN (5)
+
/**
* struct s3c_fb_pd_win - per window setup data
* @win_mode: The display parameters to initialise (not for window 0)
@@ -77,4 +84,11 @@ extern void s3c64xx_fb_gpio_setup_24bpp(void);
*/
extern void s5pc100_fb_gpio_setup_24bpp(void);
+/**
+ * s5pv210_fb_gpio_setup_24bpp() - S5PV210/S5PC110 setup function for 24bpp LCD
+ *
+ * Initialise the GPIO for an 24bpp LCD display on the RGB interface.
+ */
+extern void s5pv210_fb_gpio_setup_24bpp(void);
+
#endif /* __PLAT_S3C_FB_H */
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
index dda19da037ad..3e21c75feefa 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
@@ -30,6 +30,12 @@ static inline int s3c_gpio_do_setcfg(struct s3c_gpio_chip *chip,
return (chip->config->set_config)(chip, off, config);
}
+static inline unsigned s3c_gpio_do_getcfg(struct s3c_gpio_chip *chip,
+ unsigned int off)
+{
+ return (chip->config->get_config)(chip, off);
+}
+
static inline int s3c_gpio_do_setpull(struct s3c_gpio_chip *chip,
unsigned int off, s3c_gpio_pull_t pull)
{
@@ -53,6 +59,18 @@ extern int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg);
/**
+ * s3c_gpio_getcfg_s3c24xx - S3C24XX style GPIO configuration read.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of s3c_gpio_setcfg_s3c24xx(). Will return a value whicg
+ * could be directly passed back to s3c_gpio_setcfg_s3c24xx(), from the
+ * S3C_GPIO_SPECIAL() macro.
+ */
+unsigned int s3c_gpio_getcfg_s3c24xx(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
+/**
* s3c_gpio_setcfg_s3c24xx_a - S3C24XX style GPIO configuration (Bank A)
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
@@ -65,6 +83,21 @@ extern int s3c_gpio_setcfg_s3c24xx(struct s3c_gpio_chip *chip,
extern int s3c_gpio_setcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg);
+
+/**
+ * s3c_gpio_getcfg_s3c24xx_a - S3C24XX style GPIO configuration read (Bank A)
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of s3c_gpio_setcfg_s3c24xx_a() turning an GPIO into a usable
+ * GPIO configuration value.
+ *
+ * @sa s3c_gpio_getcfg_s3c24xx
+ * @sa s3c_gpio_getcfg_s3c64xx_4bit
+ */
+extern unsigned s3c_gpio_getcfg_s3c24xx_a(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
/**
* s3c_gpio_setcfg_s3c64xx_4bit - S3C64XX 4bit single register GPIO config.
* @chip: The gpio chip that is being configured.
@@ -85,6 +118,20 @@ extern int s3c_gpio_setcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
unsigned int off, unsigned int cfg);
+/**
+ * s3c_gpio_getcfg_s3c64xx_4bit - S3C64XX 4bit single register GPIO config read.
+ * @chip: The gpio chip that is being configured.
+ * @off: The offset for the GPIO being configured.
+ *
+ * The reverse of s3c_gpio_setcfg_s3c64xx_4bit(), turning a gpio configuration
+ * register setting into a value the software can use, such as could be passed
+ * to s3c_gpio_setcfg_s3c64xx_4bit().
+ *
+ * @sa s3c_gpio_getcfg_s3c24xx
+ */
+extern unsigned s3c_gpio_getcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
/* Pull-{up,down} resistor controls.
*
* S3C2410,S3C2440,S3C24A0 = Pull-UP,
@@ -146,6 +193,17 @@ extern s3c_gpio_pull_t s3c_gpio_getpull_updown(struct s3c_gpio_chip *chip,
unsigned int off);
/**
+ * s3c_gpio_getpull_1up() - Get configuration for choice of up or none
+ * @chip: The gpio chip that the GPIO pin belongs to
+ * @off: The offset to the pin to get the configuration of.
+ *
+ * This helper function reads the state of the pull-up resistor for the
+ * given GPIO in the same case as s3c_gpio_setpull_1up.
+*/
+extern s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
+ unsigned int off);
+
+/**
* s3c_gpio_setpull_s3c2443() - Pull configuration for s3c2443.
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index 29cd6a86cade..db4112c6f2be 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -25,6 +25,7 @@
#define __PLAT_GPIO_CFG_H __FILE__
typedef unsigned int __bitwise__ s3c_gpio_pull_t;
+typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
/* forward declaration if gpio-core.h hasn't been included */
struct s3c_gpio_chip;
@@ -42,6 +43,11 @@ struct s3c_gpio_chip;
* layouts. Provide an point to vector control routine and provide any
* per-bank configuration information that other systems such as the
* external interrupt code will need.
+ *
+ * @sa s3c_gpio_cfgpin
+ * @sa s3c_gpio_getcfg
+ * @sa s3c_gpio_setpull
+ * @sa s3c_gpio_getpull
*/
struct s3c_gpio_cfg {
unsigned int cfg_eint;
@@ -69,14 +75,39 @@ struct s3c_gpio_cfg {
/**
* s3c_gpio_cfgpin() - Change the GPIO function of a pin.
* @pin pin The pin number to configure.
- * @pin to The configuration for the pin's function.
+ * @to to The configuration for the pin's function.
*
* Configure which function is actually connected to the external
* pin, such as an gpio input, output or some form of special function
* connected to an internal peripheral block.
+ *
+ * The @to parameter can be one of the generic S3C_GPIO_INPUT, S3C_GPIO_OUTPUT
+ * or S3C_GPIO_SFN() to indicate one of the possible values that the helper
+ * will then generate the correct bit mask and shift for the configuration.
+ *
+ * If a bank of GPIOs all needs to be set to special-function 2, then
+ * the following code will work:
+ *
+ * for (gpio = start; gpio < end; gpio++)
+ * s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
+ *
+ * The @to parameter can also be a specific value already shifted to the
+ * correct position in the control register, although these are discouraged
+ * in newer kernels and are only being kept for compatibility.
*/
extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to);
+/**
+ * s3c_gpio_getcfg - Read the current function for a GPIO pin
+ * @pin: The pin to read the configuration value for.
+ *
+ * Read the configuration state of the given @pin, returning a value that
+ * could be passed back to s3c_gpio_cfgpin().
+ *
+ * @sa s3c_gpio_cfgpin
+ */
+extern unsigned s3c_gpio_getcfg(unsigned int pin);
+
/* Define values for the pull-{up,down} available for each gpio pin.
*
* These values control the state of the weak pull-{up,down} resistors
@@ -96,6 +127,8 @@ extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to);
* This function sets the state of the pull-{up,down} resistor for the
* specified pin. It will return 0 if successfull, or a negative error
* code if the pin cannot support the requested pull setting.
+ *
+ * @pull is one of S3C_GPIO_PULL_NONE, S3C_GPIO_PULL_DOWN or S3C_GPIO_PULL_UP.
*/
extern int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull);
@@ -107,4 +140,33 @@ extern int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull);
*/
extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
+/* Define values for the drvstr available for each gpio pin.
+ *
+ * These values control the value of the output signal driver strength,
+ * configurable on most pins on the S5C series.
+ */
+#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00)
+#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01)
+#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10)
+#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11)
+
+/**
+ * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
+ * @pin: The pin number to get the settings for
+ *
+ * Read the driver streght value for the specified pin.
+*/
+extern s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin);
+
+/**
+ * s3c_gpio_set_drvstr() - set the driver streght value of a gpio pin
+ * @pin: The pin number to configure the driver streght value
+ * @drvstr: The new value of the driver strength
+ *
+ * This function sets the driver strength value for the specified pin.
+ * It will return 0 if successfull, or a negative error code if the pin
+ * cannot support the requested setting.
+*/
+extern int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr);
+
#endif /* __PLAT_GPIO_CFG_H */
diff --git a/arch/arm/plat-samsung/include/plat/gpio-core.h b/arch/arm/plat-samsung/include/plat/gpio-core.h
index 49ff406a7066..e358c7da8480 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-core.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-core.h
@@ -44,16 +44,26 @@ struct s3c_gpio_cfg;
* @chip: The chip structure to be exported via gpiolib.
* @base: The base pointer to the gpio configuration registers.
* @config: special function and pull-resistor control information.
+ * @lock: Lock for exclusive access to this gpio bank.
* @pm_save: Save information for suspend/resume support.
*
* This wrapper provides the necessary information for the Samsung
* specific gpios being registered with gpiolib.
+ *
+ * The lock protects each gpio bank from multiple access of the shared
+ * configuration registers, or from reading of data whilst another thread
+ * is writing to the register set.
+ *
+ * Each chip has its own lock to avoid any contention between different
+ * CPU cores trying to get one lock for different GPIO banks, where each
+ * bank of GPIO has its own register space and configuration registers.
*/
struct s3c_gpio_chip {
struct gpio_chip chip;
struct s3c_gpio_cfg *config;
struct s3c_gpio_pm *pm;
void __iomem *base;
+ spinlock_t lock;
#ifdef CONFIG_PM
u32 pm_save[4];
#endif
@@ -97,7 +107,7 @@ extern void s3c_gpiolib_add(struct s3c_gpio_chip *chip);
* others = Special functions (dependant on bank)
*
* Note, since the code to deal with the case where there are two control
- * registers instead of one, we do not have a seperate set of function
+ * registers instead of one, we do not have a separate set of function
* (samsung_gpiolib_add_4bit2_chips)for each case.
*/
extern void samsung_gpiolib_add_4bit_chips(struct s3c_gpio_chip *chip,
@@ -108,6 +118,9 @@ extern void samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
extern void samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip);
extern void samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip);
+/* exported for core SoC support to change */
+extern struct s3c_gpio_cfg s3c24xx_gpiocfg_default;
+
#ifdef CONFIG_S3C_GPIO_TRACK
extern struct s3c_gpio_chip *s3c_gpios[S3C_GPIO_END];
@@ -135,3 +148,7 @@ extern struct s3c_gpio_pm s3c_gpio_pm_4bit;
#define __gpio_pm(x) NULL
#endif /* CONFIG_PM */
+
+/* locking wrappers to deal with multiple access to the same gpio bank */
+#define s3c_gpio_lock(_oc, _fl) spin_lock_irqsave(&(_oc)->lock, _fl)
+#define s3c_gpio_unlock(_oc, _fl) spin_unlock_irqrestore(&(_oc)->lock, _fl)
diff --git a/arch/arm/plat-samsung/include/plat/hwmon.h b/arch/arm/plat-samsung/include/plat/hwmon.h
index 1ba88ea0aa31..c167e4429bc7 100644
--- a/arch/arm/plat-samsung/include/plat/hwmon.h
+++ b/arch/arm/plat-samsung/include/plat/hwmon.h
@@ -37,5 +37,15 @@ struct s3c_hwmon_pdata {
struct s3c_hwmon_chcfg *in[8];
};
+/**
+ * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device
+ * @pd: Platform data to register to device.
+ *
+ * Register the given platform data for use with the S3C HWMON device.
+ * The call will copy the platform data, so the board definitions can
+ * make the structure itself __initdata.
+ */
+extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
+
#endif /* __ASM_ARCH_ADC_HWMON_H */
diff --git a/arch/arm/plat-samsung/include/plat/iic-core.h b/arch/arm/plat-samsung/include/plat/iic-core.h
index 36397ca20962..f182669b8e8e 100644
--- a/arch/arm/plat-samsung/include/plat/iic-core.h
+++ b/arch/arm/plat-samsung/include/plat/iic-core.h
@@ -32,4 +32,11 @@ static inline void s3c_i2c1_setname(char *name)
#endif
}
+static inline void s3c_i2c2_setname(char *name)
+{
+#ifdef CONFIG_S3C_DEV_I2C2
+ s3c_device_i2c2.name = name;
+#endif
+}
+
#endif /* __ASM_ARCH_IIC_H */
diff --git a/arch/arm/plat-samsung/include/plat/iic.h b/arch/arm/plat-samsung/include/plat/iic.h
index 3083df00dee6..133308bf595d 100644
--- a/arch/arm/plat-samsung/include/plat/iic.h
+++ b/arch/arm/plat-samsung/include/plat/iic.h
@@ -54,9 +54,11 @@ struct s3c2410_platform_i2c {
*/
extern void s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *i2c);
extern void s3c_i2c1_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *i2c);
/* defined by architecture to configure gpio */
extern void s3c_i2c0_cfg_gpio(struct platform_device *dev);
extern void s3c_i2c1_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c2_cfg_gpio(struct platform_device *dev);
#endif /* __ASM_ARCH_IIC_H */
diff --git a/arch/arm/plat-samsung/include/plat/onenand-core.h b/arch/arm/plat-samsung/include/plat/onenand-core.h
new file mode 100644
index 000000000000..7701cb7020c8
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/onenand-core.h
@@ -0,0 +1,37 @@
+/*
+ * linux/arch/arm/plat-samsung/onenand-core.h
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Samsung OneNAD Controller core functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_ONENAND_CORE_H
+#define __ASM_ARCH_ONENAND_CORE_H __FILE__
+
+/* These functions are only for use with the core support code, such as
+ * the cpu specific initialisation code
+ */
+
+/* re-define device name depending on support. */
+static inline void s3c_onenand_setname(char *name)
+{
+#ifdef CONFIG_S3C_DEV_ONENAND
+ s3c_device_onenand.name = name;
+#endif
+}
+
+static inline void s3c64xx_onenand1_setname(char *name)
+{
+#ifdef CONFIG_S3C64XX_DEV_ONENAND1
+ s3c64xx_device_onenand1.name = name;
+#endif
+}
+
+#endif /* __ASM_ARCH_ONENAND_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/pll6553x.h b/arch/arm/plat-samsung/include/plat/pll6553x.h
new file mode 100644
index 000000000000..b8b7e1d884f8
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/pll6553x.h
@@ -0,0 +1,51 @@
+/* arch/arm/plat-samsung/include/plat/pll6553x.h
+ * partially from arch/arm/mach-s3c64xx/include/mach/pll.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Samsung PLL6553x PLL code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* S3C6400 and compatible (S3C2416, etc.) EPLL code */
+
+#define PLL6553X_MDIV_MASK ((1 << (23-16)) - 1)
+#define PLL6553X_PDIV_MASK ((1 << (13-8)) - 1)
+#define PLL6553X_SDIV_MASK ((1 << (2-0)) - 1)
+#define PLL6553X_MDIV_SHIFT (16)
+#define PLL6553X_PDIV_SHIFT (8)
+#define PLL6553X_SDIV_SHIFT (0)
+#define PLL6553X_KDIV_MASK (0xffff)
+
+static inline unsigned long s3c_get_pll6553x(unsigned long baseclk,
+ u32 pll0, u32 pll1)
+{
+ unsigned long result;
+ u32 mdiv, pdiv, sdiv, kdiv;
+ u64 tmp;
+
+ mdiv = (pll0 >> PLL6553X_MDIV_SHIFT) & PLL6553X_MDIV_MASK;
+ pdiv = (pll0 >> PLL6553X_PDIV_SHIFT) & PLL6553X_PDIV_MASK;
+ sdiv = (pll0 >> PLL6553X_SDIV_SHIFT) & PLL6553X_SDIV_MASK;
+ kdiv = pll1 & PLL6553X_KDIV_MASK;
+
+ /* We need to multiple baseclk by mdiv (the integer part) and kdiv
+ * which is in 2^16ths, so shift mdiv up (does not overflow) and
+ * add kdiv before multiplying. The use of tmp is to avoid any
+ * overflows before shifting bac down into result when multipling
+ * by the mdiv and kdiv pair.
+ */
+
+ tmp = baseclk;
+ tmp *= (mdiv << 16) + kdiv;
+ do_div(tmp, (pdiv << sdiv));
+ result = tmp >> 16;
+
+ return result;
+}
diff --git a/arch/arm/plat-samsung/include/plat/regs-onenand.h b/arch/arm/plat-samsung/include/plat/regs-onenand.h
new file mode 100644
index 000000000000..930ea8b88ed3
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/regs-onenand.h
@@ -0,0 +1,63 @@
+/*
+ * linux/arch/arm/plat-s3c/include/plat/regs-onenand.h
+ *
+ * Copyright (C) 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SAMSUNG_ONENAND_H__
+#define __SAMSUNG_ONENAND_H__
+
+#include <mach/hardware.h>
+
+/*
+ * OneNAND Controller
+ */
+#define MEM_CFG_OFFSET 0x0000
+#define BURST_LEN_OFFSET 0x0010
+#define MEM_RESET_OFFSET 0x0020
+#define INT_ERR_STAT_OFFSET 0x0030
+#define INT_ERR_MASK_OFFSET 0x0040
+#define INT_ERR_ACK_OFFSET 0x0050
+#define ECC_ERR_STAT_OFFSET 0x0060
+#define MANUFACT_ID_OFFSET 0x0070
+#define DEVICE_ID_OFFSET 0x0080
+#define DATA_BUF_SIZE_OFFSET 0x0090
+#define BOOT_BUF_SIZE_OFFSET 0x00A0
+#define BUF_AMOUNT_OFFSET 0x00B0
+#define TECH_OFFSET 0x00C0
+#define FBA_WIDTH_OFFSET 0x00D0
+#define FPA_WIDTH_OFFSET 0x00E0
+#define FSA_WIDTH_OFFSET 0x00F0
+#define TRANS_SPARE_OFFSET 0x0140
+#define DBS_DFS_WIDTH_OFFSET 0x0160
+#define INT_PIN_ENABLE_OFFSET 0x01A0
+#define ACC_CLOCK_OFFSET 0x01C0
+#define FLASH_VER_ID_OFFSET 0x01F0
+#define FLASH_AUX_CNTRL_OFFSET 0x0300 /* s3c64xx only */
+
+#define ONENAND_MEM_RESET_HOT 0x3
+#define ONENAND_MEM_RESET_COLD 0x2
+#define ONENAND_MEM_RESET_WARM 0x1
+
+#define CACHE_OP_ERR (1 << 13)
+#define RST_CMP (1 << 12)
+#define RDY_ACT (1 << 11)
+#define INT_ACT (1 << 10)
+#define UNSUP_CMD (1 << 9)
+#define LOCKED_BLK (1 << 8)
+#define BLK_RW_CMP (1 << 7)
+#define ERS_CMP (1 << 6)
+#define PGM_CMP (1 << 5)
+#define LOAD_CMP (1 << 4)
+#define ERS_FAIL (1 << 3)
+#define PGM_FAIL (1 << 2)
+#define INT_TO (1 << 1)
+#define LD_FAIL_ECC_ERR (1 << 0)
+
+#define TSRF (1 << 0)
+
+#endif
diff --git a/arch/arm/plat-samsung/include/plat/regs-rtc.h b/arch/arm/plat-samsung/include/plat/regs-rtc.h
index d5837cf8e402..65c190d142dd 100644
--- a/arch/arm/plat-samsung/include/plat/regs-rtc.h
+++ b/arch/arm/plat-samsung/include/plat/regs-rtc.h
@@ -20,6 +20,10 @@
#define S3C2410_RTCCON_CLKSEL (1<<1)
#define S3C2410_RTCCON_CNTSEL (1<<2)
#define S3C2410_RTCCON_CLKRST (1<<3)
+#define S3C64XX_RTCCON_TICEN (1<<8)
+
+#define S3C64XX_RTCCON_TICMSK (0xF<<7)
+#define S3C64XX_RTCCON_TICSHT (7)
#define S3C2410_TICNT S3C2410_RTCREG(0x44)
#define S3C2410_TICNT_ENABLE (1<<7)
diff --git a/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h b/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h
new file mode 100644
index 000000000000..5fe6721b57f7
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __S3C_DMA_PL330_H_
+#define __S3C_DMA_PL330_H_
+
+#define S3C2410_DMAF_AUTOSTART (1 << 0)
+#define S3C2410_DMAF_CIRCULAR (1 << 1)
+
+/*
+ * PL330 can assign any channel to communicate with
+ * any of the peripherals attched to the DMAC.
+ * For the sake of consistency across client drivers,
+ * We keep the channel names unchanged and only add
+ * missing peripherals are added.
+ * Order is not important since S3C PL330 API driver
+ * use these just as IDs.
+ */
+enum dma_ch {
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_IRDA,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_I2S2_RX,
+ DMACH_I2S2_TX,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_SPI2_RX,
+ DMACH_SPI2_TX,
+ DMACH_AC97_MICIN,
+ DMACH_AC97_PCMIN,
+ DMACH_AC97_PCMOUT,
+ DMACH_EXTERNAL,
+ DMACH_PWM,
+ DMACH_SPDIF,
+ DMACH_HSI_RX,
+ DMACH_HSI_TX,
+ DMACH_PCM0_TX,
+ DMACH_PCM0_RX,
+ DMACH_PCM1_TX,
+ DMACH_PCM1_RX,
+ DMACH_PCM2_TX,
+ DMACH_PCM2_RX,
+ DMACH_MSM_REQ3,
+ DMACH_MSM_REQ2,
+ DMACH_MSM_REQ1,
+ DMACH_MSM_REQ0,
+ /* END Marker, also used to denote a reserved channel */
+ DMACH_MAX,
+};
+
+static inline bool s3c_dma_has_circular(void)
+{
+ return true;
+}
+
+#include <plat/dma.h>
+
+#endif /* __S3C_DMA_PL330_H_ */
diff --git a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h b/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
new file mode 100644
index 000000000000..bf5e2a9d408d
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
@@ -0,0 +1,32 @@
+/* linux/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __S3C_PL330_PDATA_H
+#define __S3C_PL330_PDATA_H
+
+#include <plat/s3c-dma-pl330.h>
+
+/*
+ * Every PL330 DMAC has max 32 peripheral interfaces,
+ * of which some may be not be really used in your
+ * DMAC's configuration.
+ * Populate this array of 32 peri i/fs with relevant
+ * channel IDs for used peri i/f and DMACH_MAX for
+ * those unused.
+ *
+ * The platforms just need to provide this info
+ * to the S3C DMA API driver for PL330.
+ */
+struct s3c_pl330_platdata {
+ enum dma_ch peri[32];
+};
+
+#endif /* __S3C_PL330_PDATA_H */
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index d17724149315..e5aba8f95b79 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -63,5 +63,9 @@ struct s3c64xx_spi_info {
* has some chips attached to it.
*/
extern void s3c64xx_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
+extern void s5pc100_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
+extern void s5pv210_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
+extern void s5p6440_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
+extern void s5p6442_spi_set_info(int cntrlr, int src_clk_nr, int num_cs);
#endif /* __S3C64XX_PLAT_SPI_H */
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 7d07cd7aa4f2..13f9fb20900a 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -75,6 +75,9 @@ extern void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
extern void s5pc100_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
extern void s5pc100_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
extern void s3c64xx_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
+extern void s5pv210_setup_sdhci0_cfg_gpio(struct platform_device *, int w);
+extern void s5pv210_setup_sdhci1_cfg_gpio(struct platform_device *, int w);
+extern void s5pv210_setup_sdhci2_cfg_gpio(struct platform_device *, int w);
/* S3C6400 SDHCI setup */
@@ -218,4 +221,56 @@ static inline void s5pc100_default_sdhci1(void) { }
static inline void s5pc100_default_sdhci2(void) { }
#endif /* CONFIG_S5PC100_SETUP_SDHCI */
+
+/* S5PC110 SDHCI setup */
+#ifdef CONFIG_S5PV210_SETUP_SDHCI
+extern char *s5pv210_hsmmc_clksrcs[4];
+
+extern void s5pv210_setup_sdhci_cfg_card(struct platform_device *dev,
+ void __iomem *r,
+ struct mmc_ios *ios,
+ struct mmc_card *card);
+
+#ifdef CONFIG_S3C_DEV_HSMMC
+static inline void s5pv210_default_sdhci0(void)
+{
+ s3c_hsmmc0_def_platdata.clocks = s5pv210_hsmmc_clksrcs;
+ s3c_hsmmc0_def_platdata.cfg_gpio = s5pv210_setup_sdhci0_cfg_gpio;
+ s3c_hsmmc0_def_platdata.cfg_card = s5pv210_setup_sdhci_cfg_card;
+}
+#else
+static inline void s5pc100_default_sdhci0(void) { }
+#endif /* CONFIG_S3C_DEV_HSMMC */
+
+#ifdef CONFIG_S3C_DEV_HSMMC1
+static inline void s5pv210_default_sdhci1(void)
+{
+ s3c_hsmmc1_def_platdata.clocks = s5pv210_hsmmc_clksrcs;
+ s3c_hsmmc1_def_platdata.cfg_gpio = s5pv210_setup_sdhci1_cfg_gpio;
+ s3c_hsmmc1_def_platdata.cfg_card = s5pv210_setup_sdhci_cfg_card;
+}
+#else
+static inline void s5pv210_default_sdhci1(void) { }
+#endif /* CONFIG_S3C_DEV_HSMMC1 */
+
+#ifdef CONFIG_S3C_DEV_HSMMC2
+static inline void s5pv210_default_sdhci2(void)
+{
+ s3c_hsmmc2_def_platdata.clocks = s5pv210_hsmmc_clksrcs;
+ s3c_hsmmc2_def_platdata.cfg_gpio = s5pv210_setup_sdhci2_cfg_gpio;
+ s3c_hsmmc2_def_platdata.cfg_card = s5pv210_setup_sdhci_cfg_card;
+}
+#else
+static inline void s5pv210_default_sdhci2(void) { }
+#endif /* CONFIG_S3C_DEV_HSMMC2 */
+
+#else
+static inline void s5pv210_default_sdhci0(void) { }
+static inline void s5pv210_default_sdhci1(void) { }
+static inline void s5pv210_default_sdhci2(void) { }
+#endif /* CONFIG_S5PC100_SETUP_SDHCI */
+
+
+
+
#endif /* __PLAT_S3C_SDHCI_H */
diff --git a/arch/arm/mach-s3c2410/include/mach/ts.h b/arch/arm/plat-samsung/include/plat/ts.h
index dc361700d695..26fdb22e0fc2 100644
--- a/arch/arm/mach-s3c2410/include/mach/ts.h
+++ b/arch/arm/plat-samsung/include/plat/ts.h
@@ -1,4 +1,4 @@
-/* linux/include/asm/arch-s3c2410/ts.h
+/* arch/arm/plat-samsung/include/plat/ts.h
*
* Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
*
@@ -14,8 +14,12 @@ struct s3c2410_ts_mach_info {
int delay;
int presc;
int oversampling_shift;
+ void (*cfg_gpio)(struct platform_device *dev);
};
extern void s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
+/* defined by architecture to configure gpio */
+extern void s3c24xx_ts_cfg_gpio(struct platform_device *dev);
+
#endif /* __ASM_ARM_TS_H */
diff --git a/arch/arm/plat-samsung/include/plat/wakeup-mask.h b/arch/arm/plat-samsung/include/plat/wakeup-mask.h
new file mode 100644
index 000000000000..43e4acd2e1c6
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/wakeup-mask.h
@@ -0,0 +1,44 @@
+/* arch/arm/plat-samsung/include/plat/wakeup-mask.h
+ *
+ * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
+ *
+ * Support for wakeup mask interrupts on newer SoCs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#ifndef __PLAT_WAKEUP_MASK_H
+#define __PLAT_WAKEUP_MASK_H __file__
+
+/* if no irq yet defined, but still want to mask */
+#define NO_WAKEUP_IRQ (0x90000000)
+
+/**
+ * struct samsung_wakeup_mask - wakeup mask information
+ * @irq: The interrupt associated with this wakeup.
+ * @bit: The bit, as a (1 << bitno) controlling this source.
+ */
+struct samsung_wakeup_mask {
+ unsigned int irq;
+ u32 bit;
+};
+
+/**
+ * samsung_sync_wakemask - sync wakeup mask information for pm
+ * @reg: The register that is used.
+ * @masks: The list of masks to use.
+ * @nr_masks: The number of entries pointed to buy @masks.
+ *
+ * Synchronise the wakeup mask information at suspend time from the list
+ * of interrupts and control bits in @masks. We do this at suspend time
+ * as overriding the relevant irq chips is harder and the register is only
+ * required to be correct before we enter sleep.
+ */
+extern void samsung_sync_wakemask(void __iomem *reg,
+ struct samsung_wakeup_mask *masks,
+ int nr_masks);
+
+#endif /* __PLAT_WAKEUP_MASK_H */
diff --git a/arch/arm/plat-samsung/pm-gpio.c b/arch/arm/plat-samsung/pm-gpio.c
index 69a4c7f02e25..7df03f87fbfa 100644
--- a/arch/arm/plat-samsung/pm-gpio.c
+++ b/arch/arm/plat-samsung/pm-gpio.c
@@ -329,10 +329,12 @@ void s3c_pm_save_gpios(void)
struct s3c_gpio_chip *ourchip;
unsigned int gpio_nr;
- for (gpio_nr = 0; gpio_nr < S3C_GPIO_END; gpio_nr++) {
+ for (gpio_nr = 0; gpio_nr < S3C_GPIO_END;) {
ourchip = s3c_gpiolib_getchip(gpio_nr);
- if (!ourchip)
+ if (!ourchip) {
+ gpio_nr++;
continue;
+ }
s3c_pm_save_gpio(ourchip);
@@ -367,10 +369,12 @@ void s3c_pm_restore_gpios(void)
struct s3c_gpio_chip *ourchip;
unsigned int gpio_nr;
- for (gpio_nr = 0; gpio_nr < S3C_GPIO_END; gpio_nr++) {
+ for (gpio_nr = 0; gpio_nr < S3C_GPIO_END;) {
ourchip = s3c_gpiolib_getchip(gpio_nr);
- if (!ourchip)
+ if (!ourchip) {
+ gpio_nr++;
continue;
+ }
s3c_pm_resume_gpio(ourchip);
diff --git a/arch/arm/plat-samsung/s3c-pl330.c b/arch/arm/plat-samsung/s3c-pl330.c
new file mode 100644
index 000000000000..a91305a60aed
--- /dev/null
+++ b/arch/arm/plat-samsung/s3c-pl330.c
@@ -0,0 +1,1224 @@
+/* linux/arch/arm/plat-samsung/s3c-pl330.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware/pl330.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+/**
+ * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
+ * @busy_chan: Number of channels currently busy.
+ * @peri: List of IDs of peripherals this DMAC can work with.
+ * @node: To attach to the global list of DMACs.
+ * @pi: PL330 configuration info for the DMAC.
+ * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
+ */
+struct s3c_pl330_dmac {
+ unsigned busy_chan;
+ enum dma_ch *peri;
+ struct list_head node;
+ struct pl330_info *pi;
+ struct kmem_cache *kmcache;
+};
+
+/**
+ * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
+ * @token: Xfer ID provided by the client.
+ * @node: To attach to the list of xfers on a channel.
+ * @px: Xfer for PL330 core.
+ * @chan: Owner channel of this xfer.
+ */
+struct s3c_pl330_xfer {
+ void *token;
+ struct list_head node;
+ struct pl330_xfer px;
+ struct s3c_pl330_chan *chan;
+};
+
+/**
+ * struct s3c_pl330_chan - Logical channel to communicate with
+ * a Physical peripheral.
+ * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
+ * NULL if the channel is available to be acquired.
+ * @id: ID of the peripheral that this channel can communicate with.
+ * @options: Options specified by the client.
+ * @sdaddr: Address provided via s3c2410_dma_devconfig.
+ * @node: To attach to the global list of channels.
+ * @lrq: Pointer to the last submitted pl330_req to PL330 core.
+ * @xfer_list: To manage list of xfers enqueued.
+ * @req: Two requests to communicate with the PL330 engine.
+ * @callback_fn: Callback function to the client.
+ * @rqcfg: Channel configuration for the xfers.
+ * @xfer_head: Pointer to the xfer to be next excecuted.
+ * @dmac: Pointer to the DMAC that manages this channel, NULL if the
+ * channel is available to be acquired.
+ * @client: Client of this channel. NULL if the
+ * channel is available to be acquired.
+ */
+struct s3c_pl330_chan {
+ void *pl330_chan_id;
+ enum dma_ch id;
+ unsigned int options;
+ unsigned long sdaddr;
+ struct list_head node;
+ struct pl330_req *lrq;
+ struct list_head xfer_list;
+ struct pl330_req req[2];
+ s3c2410_dma_cbfn_t callback_fn;
+ struct pl330_reqcfg rqcfg;
+ struct s3c_pl330_xfer *xfer_head;
+ struct s3c_pl330_dmac *dmac;
+ struct s3c2410_dma_client *client;
+};
+
+/* All DMACs in the platform */
+static LIST_HEAD(dmac_list);
+
+/* All channels to peripherals in the platform */
+static LIST_HEAD(chan_list);
+
+/*
+ * Since we add resources(DMACs and Channels) to the global pool,
+ * we need to guard access to the resources using a global lock
+ */
+static DEFINE_SPINLOCK(res_lock);
+
+/* Returns the channel with ID 'id' in the chan_list */
+static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
+{
+ struct s3c_pl330_chan *ch;
+
+ list_for_each_entry(ch, &chan_list, node)
+ if (ch->id == id)
+ return ch;
+
+ return NULL;
+}
+
+/* Allocate a new channel with ID 'id' and add to chan_list */
+static void chan_add(const enum dma_ch id)
+{
+ struct s3c_pl330_chan *ch = id_to_chan(id);
+
+ /* Return if the channel already exists */
+ if (ch)
+ return;
+
+ ch = kmalloc(sizeof(*ch), GFP_KERNEL);
+ /* Return silently to work with other channels */
+ if (!ch)
+ return;
+
+ ch->id = id;
+ ch->dmac = NULL;
+
+ list_add_tail(&ch->node, &chan_list);
+}
+
+/* If the channel is not yet acquired by any client */
+static bool chan_free(struct s3c_pl330_chan *ch)
+{
+ if (!ch)
+ return false;
+
+ /* Channel points to some DMAC only when it's acquired */
+ return ch->dmac ? false : true;
+}
+
+/*
+ * Returns 0 is peripheral i/f is invalid or not present on the dmac.
+ * Index + 1, otherwise.
+ */
+static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
+{
+ enum dma_ch *id = dmac->peri;
+ int i;
+
+ /* Discount invalid markers */
+ if (ch_id == DMACH_MAX)
+ return 0;
+
+ for (i = 0; i < PL330_MAX_PERI; i++)
+ if (id[i] == ch_id)
+ return i + 1;
+
+ return 0;
+}
+
+/* If all channel threads of the DMAC are busy */
+static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
+{
+ struct pl330_info *pi = dmac->pi;
+
+ return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
+}
+
+/*
+ * Returns the number of free channels that
+ * can be handled by this dmac only.
+ */
+static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
+{
+ enum dma_ch *id = dmac->peri;
+ struct s3c_pl330_dmac *d;
+ struct s3c_pl330_chan *ch;
+ unsigned found, count = 0;
+ enum dma_ch p;
+ int i;
+
+ for (i = 0; i < PL330_MAX_PERI; i++) {
+ p = id[i];
+ ch = id_to_chan(p);
+
+ if (p == DMACH_MAX || !chan_free(ch))
+ continue;
+
+ found = 0;
+ list_for_each_entry(d, &dmac_list, node) {
+ if (d != dmac && iface_of_dmac(d, ch->id)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * Measure of suitability of 'dmac' handling 'ch'
+ *
+ * 0 indicates 'dmac' can not handle 'ch' either
+ * because it is not supported by the hardware or
+ * because all dmac channels are currently busy.
+ *
+ * >0 vlaue indicates 'dmac' has the capability.
+ * The bigger the value the more suitable the dmac.
+ */
+#define MAX_SUIT UINT_MAX
+#define MIN_SUIT 0
+
+static unsigned suitablility(struct s3c_pl330_dmac *dmac,
+ struct s3c_pl330_chan *ch)
+{
+ struct pl330_info *pi = dmac->pi;
+ enum dma_ch *id = dmac->peri;
+ struct s3c_pl330_dmac *d;
+ unsigned s;
+ int i;
+
+ s = MIN_SUIT;
+ /* If all the DMAC channel threads are busy */
+ if (dmac_busy(dmac))
+ return s;
+
+ for (i = 0; i < PL330_MAX_PERI; i++)
+ if (id[i] == ch->id)
+ break;
+
+ /* If the 'dmac' can't talk to 'ch' */
+ if (i == PL330_MAX_PERI)
+ return s;
+
+ s = MAX_SUIT;
+ list_for_each_entry(d, &dmac_list, node) {
+ /*
+ * If some other dmac can talk to this
+ * peri and has some channel free.
+ */
+ if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
+ s = 0;
+ break;
+ }
+ }
+ if (s)
+ return s;
+
+ s = 100;
+
+ /* Good if free chans are more, bad otherwise */
+ s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
+
+ return s;
+}
+
+/* More than one DMAC may have capability to transfer data with the
+ * peripheral. This function assigns most suitable DMAC to manage the
+ * channel and hence communicate with the peripheral.
+ */
+static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
+{
+ struct s3c_pl330_dmac *d, *dmac = NULL;
+ unsigned sn, sl = MIN_SUIT;
+
+ list_for_each_entry(d, &dmac_list, node) {
+ sn = suitablility(d, ch);
+
+ if (sn == MAX_SUIT)
+ return d;
+
+ if (sn > sl)
+ dmac = d;
+ }
+
+ return dmac;
+}
+
+/* Acquire the channel for peripheral 'id' */
+static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
+{
+ struct s3c_pl330_chan *ch = id_to_chan(id);
+ struct s3c_pl330_dmac *dmac;
+
+ /* If the channel doesn't exist or is already acquired */
+ if (!ch || !chan_free(ch)) {
+ ch = NULL;
+ goto acq_exit;
+ }
+
+ dmac = map_chan_to_dmac(ch);
+ /* If couldn't map */
+ if (!dmac) {
+ ch = NULL;
+ goto acq_exit;
+ }
+
+ dmac->busy_chan++;
+ ch->dmac = dmac;
+
+acq_exit:
+ return ch;
+}
+
+/* Delete xfer from the queue */
+static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
+{
+ struct s3c_pl330_xfer *t;
+ struct s3c_pl330_chan *ch;
+ int found;
+
+ if (!xfer)
+ return;
+
+ ch = xfer->chan;
+
+ /* Make sure xfer is in the queue */
+ found = 0;
+ list_for_each_entry(t, &ch->xfer_list, node)
+ if (t == xfer) {
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ return;
+
+ /* If xfer is last entry in the queue */
+ if (xfer->node.next == &ch->xfer_list)
+ t = list_entry(ch->xfer_list.next,
+ struct s3c_pl330_xfer, node);
+ else
+ t = list_entry(xfer->node.next,
+ struct s3c_pl330_xfer, node);
+
+ /* If there was only one node left */
+ if (t == xfer)
+ ch->xfer_head = NULL;
+ else if (ch->xfer_head == xfer)
+ ch->xfer_head = t;
+
+ list_del(&xfer->node);
+}
+
+/* Provides pointer to the next xfer in the queue.
+ * If CIRCULAR option is set, the list is left intact,
+ * otherwise the xfer is removed from the list.
+ * Forced delete 'pluck' can be set to override the CIRCULAR option.
+ */
+static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
+ int pluck)
+{
+ struct s3c_pl330_xfer *xfer = ch->xfer_head;
+
+ if (!xfer)
+ return NULL;
+
+ /* If xfer is last entry in the queue */
+ if (xfer->node.next == &ch->xfer_list)
+ ch->xfer_head = list_entry(ch->xfer_list.next,
+ struct s3c_pl330_xfer, node);
+ else
+ ch->xfer_head = list_entry(xfer->node.next,
+ struct s3c_pl330_xfer, node);
+
+ if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
+ del_from_queue(xfer);
+
+ return xfer;
+}
+
+static inline void add_to_queue(struct s3c_pl330_chan *ch,
+ struct s3c_pl330_xfer *xfer, int front)
+{
+ struct pl330_xfer *xt;
+
+ /* If queue empty */
+ if (ch->xfer_head == NULL)
+ ch->xfer_head = xfer;
+
+ xt = &ch->xfer_head->px;
+ /* If the head already submitted (CIRCULAR head) */
+ if (ch->options & S3C2410_DMAF_CIRCULAR &&
+ (xt == ch->req[0].x || xt == ch->req[1].x))
+ ch->xfer_head = xfer;
+
+ /* If this is a resubmission, it should go at the head */
+ if (front) {
+ ch->xfer_head = xfer;
+ list_add(&xfer->node, &ch->xfer_list);
+ } else {
+ list_add_tail(&xfer->node, &ch->xfer_list);
+ }
+}
+
+static inline void _finish_off(struct s3c_pl330_xfer *xfer,
+ enum s3c2410_dma_buffresult res, int ffree)
+{
+ struct s3c_pl330_chan *ch;
+
+ if (!xfer)
+ return;
+
+ ch = xfer->chan;
+
+ /* Do callback */
+ if (ch->callback_fn)
+ ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
+
+ /* Force Free or if buffer is not needed anymore */
+ if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
+ kmem_cache_free(ch->dmac->kmcache, xfer);
+}
+
+static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
+ struct pl330_req *r)
+{
+ struct s3c_pl330_xfer *xfer;
+ int ret = 0;
+
+ /* If already submitted */
+ if (r->x)
+ return 0;
+
+ xfer = get_from_queue(ch, 0);
+ if (xfer) {
+ r->x = &xfer->px;
+
+ /* Use max bandwidth for M<->M xfers */
+ if (r->rqtype == MEMTOMEM) {
+ struct pl330_info *pi = xfer->chan->dmac->pi;
+ int burst = 1 << ch->rqcfg.brst_size;
+ u32 bytes = r->x->bytes;
+ int bl;
+
+ bl = pi->pcfg.data_bus_width / 8;
+ bl *= pi->pcfg.data_buf_dep;
+ bl /= burst;
+
+ /* src/dst_burst_len can't be more than 16 */
+ if (bl > 16)
+ bl = 16;
+
+ while (bl > 1) {
+ if (!(bytes % (bl * burst)))
+ break;
+ bl--;
+ }
+
+ ch->rqcfg.brst_len = bl;
+ } else {
+ ch->rqcfg.brst_len = 1;
+ }
+
+ ret = pl330_submit_req(ch->pl330_chan_id, r);
+
+ /* If submission was successful */
+ if (!ret) {
+ ch->lrq = r; /* latest submitted req */
+ return 0;
+ }
+
+ r->x = NULL;
+
+ /* If both of the PL330 ping-pong buffers filled */
+ if (ret == -EAGAIN) {
+ dev_err(ch->dmac->pi->dev, "%s:%d!\n",
+ __func__, __LINE__);
+ /* Queue back again */
+ add_to_queue(ch, xfer, 1);
+ ret = 0;
+ } else {
+ dev_err(ch->dmac->pi->dev, "%s:%d!\n",
+ __func__, __LINE__);
+ _finish_off(xfer, S3C2410_RES_ERR, 0);
+ }
+ }
+
+ return ret;
+}
+
+static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
+ struct pl330_req *r, enum pl330_op_err err)
+{
+ unsigned long flags;
+ struct s3c_pl330_xfer *xfer;
+ struct pl330_xfer *xl = r->x;
+ enum s3c2410_dma_buffresult res;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ r->x = NULL;
+
+ s3c_pl330_submit(ch, r);
+
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ /* Map result to S3C DMA API */
+ if (err == PL330_ERR_NONE)
+ res = S3C2410_RES_OK;
+ else if (err == PL330_ERR_ABORT)
+ res = S3C2410_RES_ABORT;
+ else
+ res = S3C2410_RES_ERR;
+
+ /* If last request had some xfer */
+ if (xl) {
+ xfer = container_of(xl, struct s3c_pl330_xfer, px);
+ _finish_off(xfer, res, 0);
+ } else {
+ dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
+ __func__, __LINE__);
+ }
+}
+
+static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
+{
+ struct pl330_req *r = token;
+ struct s3c_pl330_chan *ch = container_of(r,
+ struct s3c_pl330_chan, req[0]);
+ s3c_pl330_rq(ch, r, err);
+}
+
+static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
+{
+ struct pl330_req *r = token;
+ struct s3c_pl330_chan *ch = container_of(r,
+ struct s3c_pl330_chan, req[1]);
+ s3c_pl330_rq(ch, r, err);
+}
+
+/* Release an acquired channel */
+static void chan_release(struct s3c_pl330_chan *ch)
+{
+ struct s3c_pl330_dmac *dmac;
+
+ if (chan_free(ch))
+ return;
+
+ dmac = ch->dmac;
+ ch->dmac = NULL;
+ dmac->busy_chan--;
+}
+
+int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
+{
+ struct s3c_pl330_xfer *xfer;
+ enum pl330_chan_op pl330op;
+ struct s3c_pl330_chan *ch;
+ unsigned long flags;
+ int idx, ret;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = id_to_chan(id);
+
+ if (!ch || chan_free(ch)) {
+ ret = -EINVAL;
+ goto ctrl_exit;
+ }
+
+ switch (op) {
+ case S3C2410_DMAOP_START:
+ /* Make sure both reqs are enqueued */
+ idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+ s3c_pl330_submit(ch, &ch->req[idx]);
+ s3c_pl330_submit(ch, &ch->req[1 - idx]);
+ pl330op = PL330_OP_START;
+ break;
+
+ case S3C2410_DMAOP_STOP:
+ pl330op = PL330_OP_ABORT;
+ break;
+
+ case S3C2410_DMAOP_FLUSH:
+ pl330op = PL330_OP_FLUSH;
+ break;
+
+ case S3C2410_DMAOP_PAUSE:
+ case S3C2410_DMAOP_RESUME:
+ case S3C2410_DMAOP_TIMEOUT:
+ case S3C2410_DMAOP_STARTED:
+ spin_unlock_irqrestore(&res_lock, flags);
+ return 0;
+
+ default:
+ spin_unlock_irqrestore(&res_lock, flags);
+ return -EINVAL;
+ }
+
+ ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
+
+ if (pl330op == PL330_OP_START) {
+ spin_unlock_irqrestore(&res_lock, flags);
+ return ret;
+ }
+
+ idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+
+ /* Abort the current xfer */
+ if (ch->req[idx].x) {
+ xfer = container_of(ch->req[idx].x,
+ struct s3c_pl330_xfer, px);
+
+ /* Drop xfer during FLUSH */
+ if (pl330op == PL330_OP_FLUSH)
+ del_from_queue(xfer);
+
+ ch->req[idx].x = NULL;
+
+ spin_unlock_irqrestore(&res_lock, flags);
+ _finish_off(xfer, S3C2410_RES_ABORT,
+ pl330op == PL330_OP_FLUSH ? 1 : 0);
+ spin_lock_irqsave(&res_lock, flags);
+ }
+
+ /* Flush the whole queue */
+ if (pl330op == PL330_OP_FLUSH) {
+
+ if (ch->req[1 - idx].x) {
+ xfer = container_of(ch->req[1 - idx].x,
+ struct s3c_pl330_xfer, px);
+
+ del_from_queue(xfer);
+
+ ch->req[1 - idx].x = NULL;
+
+ spin_unlock_irqrestore(&res_lock, flags);
+ _finish_off(xfer, S3C2410_RES_ABORT, 1);
+ spin_lock_irqsave(&res_lock, flags);
+ }
+
+ /* Finish off the remaining in the queue */
+ xfer = ch->xfer_head;
+ while (xfer) {
+
+ del_from_queue(xfer);
+
+ spin_unlock_irqrestore(&res_lock, flags);
+ _finish_off(xfer, S3C2410_RES_ABORT, 1);
+ spin_lock_irqsave(&res_lock, flags);
+
+ xfer = ch->xfer_head;
+ }
+ }
+
+ctrl_exit:
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_ctrl);
+
+int s3c2410_dma_enqueue(enum dma_ch id, void *token,
+ dma_addr_t addr, int size)
+{
+ struct s3c_pl330_chan *ch;
+ struct s3c_pl330_xfer *xfer;
+ unsigned long flags;
+ int idx, ret = 0;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = id_to_chan(id);
+
+ /* Error if invalid or free channel */
+ if (!ch || chan_free(ch)) {
+ ret = -EINVAL;
+ goto enq_exit;
+ }
+
+ /* Error if size is unaligned */
+ if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
+ ret = -EINVAL;
+ goto enq_exit;
+ }
+
+ xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
+ if (!xfer) {
+ ret = -ENOMEM;
+ goto enq_exit;
+ }
+
+ xfer->token = token;
+ xfer->chan = ch;
+ xfer->px.bytes = size;
+ xfer->px.next = NULL; /* Single request */
+
+ /* For S3C DMA API, direction is always fixed for all xfers */
+ if (ch->req[0].rqtype == MEMTODEV) {
+ xfer->px.src_addr = addr;
+ xfer->px.dst_addr = ch->sdaddr;
+ } else {
+ xfer->px.src_addr = ch->sdaddr;
+ xfer->px.dst_addr = addr;
+ }
+
+ add_to_queue(ch, xfer, 0);
+
+ /* Try submitting on either request */
+ idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+
+ if (!ch->req[idx].x)
+ s3c_pl330_submit(ch, &ch->req[idx]);
+ else
+ s3c_pl330_submit(ch, &ch->req[1 - idx]);
+
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ if (ch->options & S3C2410_DMAF_AUTOSTART)
+ s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
+
+ return 0;
+
+enq_exit:
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_enqueue);
+
+int s3c2410_dma_request(enum dma_ch id,
+ struct s3c2410_dma_client *client,
+ void *dev)
+{
+ struct s3c_pl330_dmac *dmac;
+ struct s3c_pl330_chan *ch;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = chan_acquire(id);
+ if (!ch) {
+ ret = -EBUSY;
+ goto req_exit;
+ }
+
+ dmac = ch->dmac;
+
+ ch->pl330_chan_id = pl330_request_channel(dmac->pi);
+ if (!ch->pl330_chan_id) {
+ chan_release(ch);
+ ret = -EBUSY;
+ goto req_exit;
+ }
+
+ ch->client = client;
+ ch->options = 0; /* Clear any option */
+ ch->callback_fn = NULL; /* Clear any callback */
+ ch->lrq = NULL;
+
+ ch->rqcfg.brst_size = 2; /* Default word size */
+ ch->rqcfg.swap = SWAP_NO;
+ ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
+ ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
+ ch->rqcfg.privileged = 0;
+ ch->rqcfg.insnaccess = 0;
+
+ /* Set invalid direction */
+ ch->req[0].rqtype = DEVTODEV;
+ ch->req[1].rqtype = ch->req[0].rqtype;
+
+ ch->req[0].cfg = &ch->rqcfg;
+ ch->req[1].cfg = ch->req[0].cfg;
+
+ ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
+ ch->req[1].peri = ch->req[0].peri;
+
+ ch->req[0].token = &ch->req[0];
+ ch->req[0].xfer_cb = s3c_pl330_rq0;
+ ch->req[1].token = &ch->req[1];
+ ch->req[1].xfer_cb = s3c_pl330_rq1;
+
+ ch->req[0].x = NULL;
+ ch->req[1].x = NULL;
+
+ /* Reset xfer list */
+ INIT_LIST_HEAD(&ch->xfer_list);
+ ch->xfer_head = NULL;
+
+req_exit:
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_request);
+
+int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
+{
+ struct s3c_pl330_chan *ch;
+ struct s3c_pl330_xfer *xfer;
+ unsigned long flags;
+ int ret = 0;
+ unsigned idx;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = id_to_chan(id);
+
+ if (!ch || chan_free(ch))
+ goto free_exit;
+
+ /* Refuse if someone else wanted to free the channel */
+ if (ch->client != client) {
+ ret = -EBUSY;
+ goto free_exit;
+ }
+
+ /* Stop any active xfer, Flushe the queue and do callbacks */
+ pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
+
+ /* Abort the submitted requests */
+ idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
+
+ if (ch->req[idx].x) {
+ xfer = container_of(ch->req[idx].x,
+ struct s3c_pl330_xfer, px);
+
+ ch->req[idx].x = NULL;
+ del_from_queue(xfer);
+
+ spin_unlock_irqrestore(&res_lock, flags);
+ _finish_off(xfer, S3C2410_RES_ABORT, 1);
+ spin_lock_irqsave(&res_lock, flags);
+ }
+
+ if (ch->req[1 - idx].x) {
+ xfer = container_of(ch->req[1 - idx].x,
+ struct s3c_pl330_xfer, px);
+
+ ch->req[1 - idx].x = NULL;
+ del_from_queue(xfer);
+
+ spin_unlock_irqrestore(&res_lock, flags);
+ _finish_off(xfer, S3C2410_RES_ABORT, 1);
+ spin_lock_irqsave(&res_lock, flags);
+ }
+
+ /* Pluck and Abort the queued requests in order */
+ do {
+ xfer = get_from_queue(ch, 1);
+
+ spin_unlock_irqrestore(&res_lock, flags);
+ _finish_off(xfer, S3C2410_RES_ABORT, 1);
+ spin_lock_irqsave(&res_lock, flags);
+ } while (xfer);
+
+ ch->client = NULL;
+
+ pl330_release_channel(ch->pl330_chan_id);
+
+ ch->pl330_chan_id = NULL;
+
+ chan_release(ch);
+
+free_exit:
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_free);
+
+int s3c2410_dma_config(enum dma_ch id, int xferunit)
+{
+ struct s3c_pl330_chan *ch;
+ struct pl330_info *pi;
+ unsigned long flags;
+ int i, dbwidth, ret = 0;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = id_to_chan(id);
+
+ if (!ch || chan_free(ch)) {
+ ret = -EINVAL;
+ goto cfg_exit;
+ }
+
+ pi = ch->dmac->pi;
+ dbwidth = pi->pcfg.data_bus_width / 8;
+
+ /* Max size of xfer can be pcfg.data_bus_width */
+ if (xferunit > dbwidth) {
+ ret = -EINVAL;
+ goto cfg_exit;
+ }
+
+ i = 0;
+ while (xferunit != (1 << i))
+ i++;
+
+ /* If valid value */
+ if (xferunit == (1 << i))
+ ch->rqcfg.brst_size = i;
+ else
+ ret = -EINVAL;
+
+cfg_exit:
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_config);
+
+/* Options that are supported by this driver */
+#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
+
+int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
+{
+ struct s3c_pl330_chan *ch;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = id_to_chan(id);
+
+ if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
+ ret = -EINVAL;
+ else
+ ch->options = options;
+
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(s3c2410_dma_setflags);
+
+int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
+{
+ struct s3c_pl330_chan *ch;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = id_to_chan(id);
+
+ if (!ch || chan_free(ch))
+ ret = -EINVAL;
+ else
+ ch->callback_fn = rtn;
+
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
+
+int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
+ unsigned long address)
+{
+ struct s3c_pl330_chan *ch;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ ch = id_to_chan(id);
+
+ if (!ch || chan_free(ch)) {
+ ret = -EINVAL;
+ goto devcfg_exit;
+ }
+
+ switch (source) {
+ case S3C2410_DMASRC_HW: /* P->M */
+ ch->req[0].rqtype = DEVTOMEM;
+ ch->req[1].rqtype = DEVTOMEM;
+ ch->rqcfg.src_inc = 0;
+ ch->rqcfg.dst_inc = 1;
+ break;
+ case S3C2410_DMASRC_MEM: /* M->P */
+ ch->req[0].rqtype = MEMTODEV;
+ ch->req[1].rqtype = MEMTODEV;
+ ch->rqcfg.src_inc = 1;
+ ch->rqcfg.dst_inc = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto devcfg_exit;
+ }
+
+ ch->sdaddr = address;
+
+devcfg_exit:
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(s3c2410_dma_devconfig);
+
+int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
+{
+ struct s3c_pl330_chan *ch = id_to_chan(id);
+ struct pl330_chanstatus status;
+ int ret;
+
+ if (!ch || chan_free(ch))
+ return -EINVAL;
+
+ ret = pl330_chan_status(ch->pl330_chan_id, &status);
+ if (ret < 0)
+ return ret;
+
+ *src = status.src_addr;
+ *dst = status.dst_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(s3c2410_dma_getposition);
+
+static irqreturn_t pl330_irq_handler(int irq, void *data)
+{
+ if (pl330_update(data))
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static int pl330_probe(struct platform_device *pdev)
+{
+ struct s3c_pl330_dmac *s3c_pl330_dmac;
+ struct s3c_pl330_platdata *pl330pd;
+ struct pl330_info *pl330_info;
+ struct resource *res;
+ int i, ret, irq;
+
+ pl330pd = pdev->dev.platform_data;
+
+ /* Can't do without the list of _32_ peripherals */
+ if (!pl330pd || !pl330pd->peri) {
+ dev_err(&pdev->dev, "platform data missing!\n");
+ return -ENODEV;
+ }
+
+ pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
+ if (!pl330_info)
+ return -ENOMEM;
+
+ pl330_info->pl330_data = NULL;
+ pl330_info->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+
+ request_mem_region(res->start, resource_size(res), pdev->name);
+
+ pl330_info->base = ioremap(res->start, resource_size(res));
+ if (!pl330_info->base) {
+ ret = -ENXIO;
+ goto probe_err2;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto probe_err3;
+ }
+
+ ret = request_irq(irq, pl330_irq_handler, 0,
+ dev_name(&pdev->dev), pl330_info);
+ if (ret)
+ goto probe_err4;
+
+ ret = pl330_add(pl330_info);
+ if (ret)
+ goto probe_err5;
+
+ /* Allocate a new DMAC */
+ s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
+ if (!s3c_pl330_dmac) {
+ ret = -ENOMEM;
+ goto probe_err6;
+ }
+
+ /* Hook the info */
+ s3c_pl330_dmac->pi = pl330_info;
+
+ /* No busy channels */
+ s3c_pl330_dmac->busy_chan = 0;
+
+ s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
+ sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
+
+ if (!s3c_pl330_dmac->kmcache) {
+ ret = -ENOMEM;
+ goto probe_err7;
+ }
+
+ /* Get the list of peripherals */
+ s3c_pl330_dmac->peri = pl330pd->peri;
+
+ /* Attach to the list of DMACs */
+ list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
+
+ /* Create a channel for each peripheral in the DMAC
+ * that is, if it doesn't already exist
+ */
+ for (i = 0; i < PL330_MAX_PERI; i++)
+ if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
+ chan_add(s3c_pl330_dmac->peri[i]);
+
+ printk(KERN_INFO
+ "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
+ printk(KERN_INFO
+ "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
+ pl330_info->pcfg.data_buf_dep,
+ pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
+ pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
+
+ return 0;
+
+probe_err7:
+ kfree(s3c_pl330_dmac);
+probe_err6:
+ pl330_del(pl330_info);
+probe_err5:
+ free_irq(irq, pl330_info);
+probe_err4:
+probe_err3:
+ iounmap(pl330_info->base);
+probe_err2:
+ release_mem_region(res->start, resource_size(res));
+probe_err1:
+ kfree(pl330_info);
+
+ return ret;
+}
+
+static int pl330_remove(struct platform_device *pdev)
+{
+ struct s3c_pl330_dmac *dmac, *d;
+ struct s3c_pl330_chan *ch;
+ unsigned long flags;
+ int del, found;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ spin_lock_irqsave(&res_lock, flags);
+
+ found = 0;
+ list_for_each_entry(d, &dmac_list, node)
+ if (d->pi->dev == &pdev->dev) {
+ found = 1;
+ break;
+ }
+
+ if (!found) {
+ spin_unlock_irqrestore(&res_lock, flags);
+ return 0;
+ }
+
+ dmac = d;
+
+ /* Remove all Channels that are managed only by this DMAC */
+ list_for_each_entry(ch, &chan_list, node) {
+
+ /* Only channels that are handled by this DMAC */
+ if (iface_of_dmac(dmac, ch->id))
+ del = 1;
+ else
+ continue;
+
+ /* Don't remove if some other DMAC has it too */
+ list_for_each_entry(d, &dmac_list, node)
+ if (d != dmac && iface_of_dmac(d, ch->id)) {
+ del = 0;
+ break;
+ }
+
+ if (del) {
+ spin_unlock_irqrestore(&res_lock, flags);
+ s3c2410_dma_free(ch->id, ch->client);
+ spin_lock_irqsave(&res_lock, flags);
+ list_del(&ch->node);
+ kfree(ch);
+ }
+ }
+
+ /* Remove the DMAC */
+ list_del(&dmac->node);
+ kfree(dmac);
+
+ spin_unlock_irqrestore(&res_lock, flags);
+
+ return 0;
+}
+
+static struct platform_driver pl330_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s3c-pl330",
+ },
+ .probe = pl330_probe,
+ .remove = pl330_remove,
+};
+
+static int __init pl330_init(void)
+{
+ return platform_driver_register(&pl330_driver);
+}
+module_init(pl330_init);
+
+static void __exit pl330_exit(void)
+{
+ platform_driver_unregister(&pl330_driver);
+ return;
+}
+module_exit(pl330_exit);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/plat-samsung/wakeup-mask.c b/arch/arm/plat-samsung/wakeup-mask.c
new file mode 100644
index 000000000000..2e09b6ad84ca
--- /dev/null
+++ b/arch/arm/plat-samsung/wakeup-mask.c
@@ -0,0 +1,47 @@
+/* arch/arm/plat-samsung/wakeup-mask.c
+ *
+ * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
+ *
+ * Support for wakeup mask interrupts on newer SoCs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <plat/wakeup-mask.h>
+#include <plat/pm.h>
+
+void samsung_sync_wakemask(void __iomem *reg,
+ struct samsung_wakeup_mask *mask, int nr_mask)
+{
+ struct irq_desc *desc;
+ u32 val;
+
+ val = __raw_readl(reg);
+
+ for (; nr_mask > 0; nr_mask--, mask++) {
+ if (mask->irq == NO_WAKEUP_IRQ) {
+ val |= mask->bit;
+ continue;
+ }
+
+ desc = irq_to_desc(mask->irq);
+
+ /* bit of a liberty to read this directly from irq_desc. */
+ if (desc->wake_depth > 0)
+ val &= ~mask->bit;
+ else
+ val |= mask->bit;
+ }
+
+ printk(KERN_INFO "wakemask %08x => %08x\n", __raw_readl(reg), val);
+ __raw_writel(val, reg);
+}
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
new file mode 100644
index 000000000000..1bb3dbce8810
--- /dev/null
+++ b/arch/arm/plat-spear/Kconfig
@@ -0,0 +1,31 @@
+#
+# SPEAr Platform configuration file
+#
+
+if PLAT_SPEAR
+
+choice
+ prompt "ST SPEAr Family"
+ default ARCH_SPEAR3XX
+
+config ARCH_SPEAR3XX
+ bool "SPEAr3XX"
+ select ARM_VIC
+ select CPU_ARM926T
+ help
+ Supports for ARM's SPEAR3XX family
+
+config ARCH_SPEAR6XX
+ bool "SPEAr6XX"
+ select ARM_VIC
+ select CPU_ARM926T
+ help
+ Supports for ARM's SPEAR6XX family
+
+endchoice
+
+# Adding SPEAr machine specific configuration files
+source "arch/arm/mach-spear3xx/Kconfig"
+source "arch/arm/mach-spear6xx/Kconfig"
+
+endif
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
new file mode 100644
index 000000000000..eb89540aeda9
--- /dev/null
+++ b/arch/arm/plat-spear/Makefile
@@ -0,0 +1,8 @@
+#
+# SPEAr Platform specific Makefile
+#
+
+# Common support
+obj-y := clock.o padmux.o time.o
+
+obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
new file mode 100644
index 000000000000..ee4f90e534d8
--- /dev/null
+++ b/arch/arm/plat-spear/clock.c
@@ -0,0 +1,435 @@
+/*
+ * arch/arm/plat-spear/clock.c
+ *
+ * Clock framework for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <mach/misc_regs.h>
+#include <plat/clock.h>
+
+static DEFINE_SPINLOCK(clocks_lock);
+static LIST_HEAD(root_clks);
+
+static void propagate_rate(struct list_head *);
+
+static int generic_clk_enable(struct clk *clk)
+{
+ unsigned int val;
+
+ if (!clk->en_reg)
+ return -EFAULT;
+
+ val = readl(clk->en_reg);
+ if (unlikely(clk->flags & RESET_TO_ENABLE))
+ val &= ~(1 << clk->en_reg_bit);
+ else
+ val |= 1 << clk->en_reg_bit;
+
+ writel(val, clk->en_reg);
+
+ return 0;
+}
+
+static void generic_clk_disable(struct clk *clk)
+{
+ unsigned int val;
+
+ if (!clk->en_reg)
+ return;
+
+ val = readl(clk->en_reg);
+ if (unlikely(clk->flags & RESET_TO_ENABLE))
+ val |= 1 << clk->en_reg_bit;
+ else
+ val &= ~(1 << clk->en_reg_bit);
+
+ writel(val, clk->en_reg);
+}
+
+/* generic clk ops */
+static struct clkops generic_clkops = {
+ .enable = generic_clk_enable,
+ .disable = generic_clk_disable,
+};
+
+/*
+ * clk_enable - inform the system when the clock source should be running.
+ * @clk: clock source
+ *
+ * If the clock can not be enabled/disabled, this should return success.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (!clk || IS_ERR(clk))
+ return -EFAULT;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if (clk->usage_count == 0) {
+ if (clk->ops && clk->ops->enable)
+ ret = clk->ops->enable(clk);
+ }
+ clk->usage_count++;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+/*
+ * clk_disable - inform the system when the clock source is no longer required.
+ * @clk: clock source
+ *
+ * Inform the system that a clock source is no longer required by
+ * a driver and may be shut down.
+ *
+ * Implementation detail: if the clock source is shared between
+ * multiple drivers, clk_enable() calls must be balanced by the
+ * same number of clk_disable() calls for the clock source to be
+ * disabled.
+ */
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ if (!clk || IS_ERR(clk))
+ return;
+
+ WARN_ON(clk->usage_count == 0);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->usage_count--;
+ if (clk->usage_count == 0) {
+ if (clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+ }
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+/**
+ * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
+ * This is only valid once the clock source has been enabled.
+ * @clk: clock source
+ */
+unsigned long clk_get_rate(struct clk *clk)
+{
+ unsigned long flags, rate;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ rate = clk->rate;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+/**
+ * clk_set_parent - set the parent clock source for this clock
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int i, found = 0, val = 0;
+ unsigned long flags;
+
+ if (!clk || IS_ERR(clk) || !parent || IS_ERR(parent))
+ return -EFAULT;
+ if (clk->usage_count)
+ return -EBUSY;
+ if (!clk->pclk_sel)
+ return -EPERM;
+ if (clk->pclk == parent)
+ return 0;
+
+ for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
+ if (clk->pclk_sel->pclk_info[i].pclk == parent) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ /* reflect parent change in hardware */
+ val = readl(clk->pclk_sel->pclk_sel_reg);
+ val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
+ val |= clk->pclk_sel->pclk_info[i].pclk_mask << clk->pclk_sel_shift;
+ writel(val, clk->pclk_sel->pclk_sel_reg);
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ /* reflect parent change in software */
+ clk->recalc(clk);
+ propagate_rate(&clk->children);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+/* registers clock in platform clock framework */
+void clk_register(struct clk_lookup *cl)
+{
+ struct clk *clk = cl->clk;
+ unsigned long flags;
+
+ if (!clk || IS_ERR(clk))
+ return;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+
+ INIT_LIST_HEAD(&clk->children);
+ if (clk->flags & ALWAYS_ENABLED)
+ clk->ops = NULL;
+ else if (!clk->ops)
+ clk->ops = &generic_clkops;
+
+ /* root clock don't have any parents */
+ if (!clk->pclk && !clk->pclk_sel) {
+ list_add(&clk->sibling, &root_clks);
+ /* add clocks with only one parent to parent's children list */
+ } else if (clk->pclk && !clk->pclk_sel) {
+ list_add(&clk->sibling, &clk->pclk->children);
+ } else {
+ /* add clocks with > 1 parent to 1st parent's children list */
+ list_add(&clk->sibling,
+ &clk->pclk_sel->pclk_info[0].pclk->children);
+ }
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ /* add clock to arm clockdev framework */
+ clkdev_add(cl);
+}
+
+/**
+ * propagate_rate - recalculate and propagate all clocks in list head
+ *
+ * Recalculates all root clocks in list head, which if the clock's .recalc is
+ * set correctly, should also propagate their rates.
+ */
+static void propagate_rate(struct list_head *lhead)
+{
+ struct clk *clkp, *_temp;
+
+ list_for_each_entry_safe(clkp, _temp, lhead, sibling) {
+ if (clkp->recalc)
+ clkp->recalc(clkp);
+ propagate_rate(&clkp->children);
+ }
+}
+
+/* returns current programmed clocks clock info structure */
+static struct pclk_info *pclk_info_get(struct clk *clk)
+{
+ unsigned int mask, i;
+ unsigned long flags;
+ struct pclk_info *info = NULL;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ mask = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
+ & clk->pclk_sel->pclk_sel_mask;
+
+ for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
+ if (clk->pclk_sel->pclk_info[i].pclk_mask == mask)
+ info = &clk->pclk_sel->pclk_info[i];
+ }
+ spin_unlock_irqrestore(&clocks_lock, flags);
+
+ return info;
+}
+
+/*
+ * Set pclk as cclk's parent and add clock sibling node to current parents
+ * children list
+ */
+static void change_parent(struct clk *cclk, struct clk *pclk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ list_del(&cclk->sibling);
+ list_add(&cclk->sibling, &pclk->children);
+
+ cclk->pclk = pclk;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * calculates current programmed rate of pll1
+ *
+ * In normal mode
+ * rate = (2 * M[15:8] * Fin)/(N * 2^P)
+ *
+ * In Dithered mode
+ * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
+ */
+void pll1_clk_recalc(struct clk *clk)
+{
+ struct pll_clk_config *config = clk->private_data;
+ unsigned int num = 2, den = 0, val, mode = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ mode = (readl(config->mode_reg) >> PLL_MODE_SHIFT) &
+ PLL_MODE_MASK;
+
+ val = readl(config->cfg_reg);
+ /* calculate denominator */
+ den = (val >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+ den = 1 << den;
+ den *= (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+
+ /* calculate numerator & denominator */
+ if (!mode) {
+ /* Normal mode */
+ num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+ } else {
+ /* Dithered mode */
+ num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+ den *= 256;
+ }
+
+ clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/* calculates current programmed rate of ahb or apb bus */
+void bus_clk_recalc(struct clk *clk)
+{
+ struct bus_clk_config *config = clk->private_data;
+ unsigned int div;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ div = ((readl(config->reg) >> config->shift) & config->mask) + 1;
+ clk->rate = (unsigned long)clk->pclk->rate / div;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * calculates current programmed rate of auxiliary synthesizers
+ * used by: UART, FIRDA
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2
+ * Fout2 = Fin * X/Y
+ *
+ * Selection of eqn 1 or 2 is programmed in register
+ */
+void aux_clk_recalc(struct clk *clk)
+{
+ struct aux_clk_config *config = clk->private_data;
+ struct pclk_info *pclk_info = NULL;
+ unsigned int num = 1, den = 1, val, eqn;
+ unsigned long flags;
+
+ /* get current programmed parent */
+ pclk_info = pclk_info_get(clk);
+ if (!pclk_info) {
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->pclk = NULL;
+ clk->rate = 0;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+ return;
+ }
+
+ change_parent(clk, pclk_info->pclk);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if (pclk_info->scalable) {
+ val = readl(config->synth_reg);
+
+ eqn = (val >> AUX_EQ_SEL_SHIFT) & AUX_EQ_SEL_MASK;
+ if (eqn == AUX_EQ1_SEL)
+ den *= 2;
+
+ /* calculate numerator */
+ num = (val >> AUX_XSCALE_SHIFT) & AUX_XSCALE_MASK;
+
+ /* calculate denominator */
+ den *= (val >> AUX_YSCALE_SHIFT) & AUX_YSCALE_MASK;
+ val = (((clk->pclk->rate/10000) * num) / den) * 10000;
+ } else
+ val = clk->pclk->rate;
+
+ clk->rate = val;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * calculates current programmed rate of gpt synthesizers
+ * Fout from synthesizer can be given from below equations:
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+void gpt_clk_recalc(struct clk *clk)
+{
+ struct aux_clk_config *config = clk->private_data;
+ struct pclk_info *pclk_info = NULL;
+ unsigned int div = 1, val;
+ unsigned long flags;
+
+ pclk_info = pclk_info_get(clk);
+ if (!pclk_info) {
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->pclk = NULL;
+ clk->rate = 0;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+ return;
+ }
+
+ change_parent(clk, pclk_info->pclk);
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ if (pclk_info->scalable) {
+ val = readl(config->synth_reg);
+ div += (val >> GPT_MSCALE_SHIFT) & GPT_MSCALE_MASK;
+ div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
+ }
+
+ clk->rate = (unsigned long)clk->pclk->rate / div;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/*
+ * Used for clocks that always have same value as the parent clock divided by a
+ * fixed divisor
+ */
+void follow_parent(struct clk *clk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocks_lock, flags);
+ clk->rate = clk->pclk->rate;
+ spin_unlock_irqrestore(&clocks_lock, flags);
+}
+
+/**
+ * recalc_root_clocks - recalculate and propagate all root clocks
+ *
+ * Recalculates all root clocks (clocks with no parent), which if the
+ * clock's .recalc is set correctly, should also propagate their rates.
+ */
+void recalc_root_clocks(void)
+{
+ propagate_rate(&root_clks);
+}
diff --git a/arch/arm/plat-spear/include/plat/clkdev.h b/arch/arm/plat-spear/include/plat/clkdev.h
new file mode 100644
index 000000000000..a2d0112fcaf7
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/clkdev.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/plat-spear/include/plat/clkdev.h
+ *
+ * Clock Dev framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_CLKDEV_H
+#define __PLAT_CLKDEV_H
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif /* __PLAT_CLKDEV_H */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
new file mode 100644
index 000000000000..298bafc0a52f
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/clock.h
@@ -0,0 +1,126 @@
+/*
+ * arch/arm/plat-spear/include/plat/clock.h
+ *
+ * Clock framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_CLOCK_H
+#define __PLAT_CLOCK_H
+
+#include <linux/list.h>
+#include <asm/clkdev.h>
+#include <linux/types.h>
+
+/* clk structure flags */
+#define ALWAYS_ENABLED (1 << 0) /* clock always enabled */
+#define RESET_TO_ENABLE (1 << 1) /* reset register bit to enable clk */
+
+/**
+ * struct clkops - clock operations
+ * @enable: pointer to clock enable function
+ * @disable: pointer to clock disable function
+ */
+struct clkops {
+ int (*enable) (struct clk *);
+ void (*disable) (struct clk *);
+};
+
+/**
+ * struct pclk_info - parents info
+ * @pclk: pointer to parent clk
+ * @pclk_mask: value to be written for selecting this parent
+ * @scalable: Is parent scalable (1 - YES, 0 - NO)
+ */
+struct pclk_info {
+ struct clk *pclk;
+ u8 pclk_mask;
+ u8 scalable;
+};
+
+/**
+ * struct pclk_sel - parents selection configuration
+ * @pclk_info: pointer to array of parent clock info
+ * @pclk_count: number of parents
+ * @pclk_sel_reg: register for selecting a parent
+ * @pclk_sel_mask: mask for selecting parent (can be used to clear bits also)
+ */
+struct pclk_sel {
+ struct pclk_info *pclk_info;
+ u8 pclk_count;
+ unsigned int *pclk_sel_reg;
+ unsigned int pclk_sel_mask;
+};
+
+/**
+ * struct clk - clock structure
+ * @usage_count: num of users who enabled this clock
+ * @flags: flags for clock properties
+ * @rate: programmed clock rate in Hz
+ * @en_reg: clk enable/disable reg
+ * @en_reg_bit: clk enable/disable bit
+ * @ops: clk enable/disable ops - generic_clkops selected if NULL
+ * @recalc: pointer to clock rate recalculate function
+ * @pclk: current parent clk
+ * @pclk_sel: pointer to parent selection structure
+ * @pclk_sel_shift: register shift for selecting parent of this clock
+ * @children: list for childrens or this clock
+ * @sibling: node for list of clocks having same parents
+ * @private_data: clock specific private data
+ */
+struct clk {
+ unsigned int usage_count;
+ unsigned int flags;
+ unsigned long rate;
+ unsigned int *en_reg;
+ u8 en_reg_bit;
+ const struct clkops *ops;
+ void (*recalc) (struct clk *);
+
+ struct clk *pclk;
+ struct pclk_sel *pclk_sel;
+ unsigned int pclk_sel_shift;
+
+ struct list_head children;
+ struct list_head sibling;
+ void *private_data;
+};
+
+/* pll configuration structure */
+struct pll_clk_config {
+ unsigned int *mode_reg;
+ unsigned int *cfg_reg;
+};
+
+/* ahb and apb bus configuration structure */
+struct bus_clk_config {
+ unsigned int *reg;
+ unsigned int mask;
+ unsigned int shift;
+};
+
+/*
+ * Aux clk configuration structure: applicable to GPT, UART and FIRDA
+ */
+struct aux_clk_config {
+ unsigned int *synth_reg;
+};
+
+/* platform specific clock functions */
+void clk_register(struct clk_lookup *cl);
+void recalc_root_clocks(void);
+
+/* clock recalc functions */
+void follow_parent(struct clk *clk);
+void pll1_clk_recalc(struct clk *clk);
+void bus_clk_recalc(struct clk *clk);
+void gpt_clk_recalc(struct clk *clk);
+void aux_clk_recalc(struct clk *clk);
+
+#endif /* __PLAT_CLOCK_H */
diff --git a/arch/arm/plat-spear/include/plat/debug-macro.S b/arch/arm/plat-spear/include/plat/debug-macro.S
new file mode 100644
index 000000000000..1670734b7e51
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/debug-macro.S
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/plat-spear/include/plat/debug-macro.S
+ *
+ * Debugging macro include header for spear platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/amba/serial.h>
+#include <mach/spear.h>
+
+ .macro addruart, rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, =SPEAR_DBG_UART_BASE @ Physical base
+ movne \rx, =VA_SPEAR_DBG_UART_BASE @ Virtual base
+ .endm
+
+ .macro senduart, rd, rx
+ strb \rd, [\rx, #UART01x_DR] @ ASC_TX_BUFFER
+ .endm
+
+ .macro waituart, rd, rx
+1001: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
+ tst \rd, #UART01x_FR_TXFF @ TX_FULL
+ bne 1001b
+ .endm
+
+ .macro busyuart, rd, rx
+1002: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
+ tst \rd, #UART011_FR_TXFE @ TX_EMPTY
+ beq 1002b
+ .endm
diff --git a/arch/arm/plat-spear/include/plat/gpio.h b/arch/arm/plat-spear/include/plat/gpio.h
new file mode 100644
index 000000000000..b857c91257dd
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/gpio.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/plat-spear/include/plat/gpio.h
+ *
+ * GPIO macros for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_GPIO_H
+#define __PLAT_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep __gpio_cansleep
+#define gpio_to_irq __gpio_to_irq
+
+#endif /* __PLAT_GPIO_H */
diff --git a/arch/arm/plat-spear/include/plat/io.h b/arch/arm/plat-spear/include/plat/io.h
new file mode 100644
index 000000000000..4d4ba822b3eb
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/io.h
@@ -0,0 +1,22 @@
+/*
+ * arch/arm/plat-spear/include/plat/io.h
+ *
+ * IO definitions for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_IO_H
+#define __PLAT_IO_H
+
+#define IO_SPACE_LIMIT 0xFFFFFFFF
+
+#define __io(a) __typesafe_io(a)
+#define __mem_pci(a) (a)
+
+#endif /* __PLAT_IO_H */
diff --git a/arch/arm/plat-spear/include/plat/memory.h b/arch/arm/plat-spear/include/plat/memory.h
new file mode 100644
index 000000000000..27a4aba77343
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/memory.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/plat-spear/include/plat/memory.h
+ *
+ * Memory map for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_MEMORY_H
+#define __PLAT_MEMORY_H
+
+/* Physical DRAM offset */
+#define PHYS_OFFSET UL(0x00000000)
+
+#endif /* __PLAT_MEMORY_H */
diff --git a/arch/arm/plat-spear/include/plat/padmux.h b/arch/arm/plat-spear/include/plat/padmux.h
new file mode 100644
index 000000000000..877f3adcf610
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/padmux.h
@@ -0,0 +1,92 @@
+/*
+ * arch/arm/plat-spear/include/plat/padmux.h
+ *
+ * SPEAr platform specific gpio pads muxing file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_PADMUX_H
+#define __PLAT_PADMUX_H
+
+#include <linux/types.h>
+
+/*
+ * struct pmx_reg: configuration structure for mode reg and mux reg
+ *
+ * offset: offset of mode reg
+ * mask: mask of mode reg
+ */
+struct pmx_reg {
+ u32 offset;
+ u32 mask;
+};
+
+/*
+ * struct pmx_dev_mode: configuration structure every group of modes of a device
+ *
+ * ids: all modes for this configuration
+ * mask: mask for supported mode
+ */
+struct pmx_dev_mode {
+ u32 ids;
+ u32 mask;
+};
+
+/*
+ * struct pmx_mode: mode definition structure
+ *
+ * name: mode name
+ * mask: mode mask
+ */
+struct pmx_mode {
+ char *name;
+ u32 id;
+ u32 mask;
+};
+
+/*
+ * struct pmx_dev: device definition structure
+ *
+ * name: device name
+ * modes: device configuration array for different modes supported
+ * mode_count: size of modes array
+ * is_active: is peripheral active/enabled
+ * enb_on_reset: if 1, mask bits to be cleared in reg otherwise to be set in reg
+ */
+struct pmx_dev {
+ char *name;
+ struct pmx_dev_mode *modes;
+ u8 mode_count;
+ bool is_active;
+ bool enb_on_reset;
+};
+
+/*
+ * struct pmx_driver: driver definition structure
+ *
+ * mode: mode to be set
+ * devs: array of pointer to pmx devices
+ * devs_count: ARRAY_SIZE of devs
+ * base: base address of soc config registers
+ * mode_reg: structure of mode config register
+ * mux_reg: structure of device mux config register
+ */
+struct pmx_driver {
+ struct pmx_mode *mode;
+ struct pmx_dev **devs;
+ u8 devs_count;
+ u32 *base;
+ struct pmx_reg mode_reg;
+ struct pmx_reg mux_reg;
+};
+
+/* pmx functions */
+int pmx_register(struct pmx_driver *driver);
+
+#endif /* __PLAT_PADMUX_H */
diff --git a/arch/arm/plat-spear/include/plat/shirq.h b/arch/arm/plat-spear/include/plat/shirq.h
new file mode 100644
index 000000000000..03ed8b585dcf
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/shirq.h
@@ -0,0 +1,73 @@
+/*
+ * arch/arm/plat-spear/include/plat/shirq.h
+ *
+ * SPEAr platform shared irq layer header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_SHIRQ_H
+#define __PLAT_SHIRQ_H
+
+#include <linux/irq.h>
+#include <linux/types.h>
+
+/*
+ * struct shirq_dev_config: shared irq device configuration
+ *
+ * virq: virtual irq number of device
+ * enb_mask: enable mask of device
+ * status_mask: status mask of device
+ * clear_mask: clear mask of device
+ */
+struct shirq_dev_config {
+ u32 virq;
+ u32 enb_mask;
+ u32 status_mask;
+ u32 clear_mask;
+};
+
+/*
+ * struct shirq_regs: shared irq register configuration
+ *
+ * base: base address of shared irq register
+ * enb_reg: enable register offset
+ * reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt
+ * status_reg: status register offset
+ * status_reg_mask: status register valid mask
+ * clear_reg: clear register offset
+ * reset_to_clear: val 1 indicates, we need to clear bit for clearing interrupt
+ */
+struct shirq_regs {
+ void __iomem *base;
+ u32 enb_reg;
+ u32 reset_to_enb;
+ u32 status_reg;
+ u32 status_reg_mask;
+ u32 clear_reg;
+ u32 reset_to_clear;
+};
+
+/*
+ * struct spear_shirq: shared irq structure
+ *
+ * irq: hardware irq number
+ * dev_config: array of device config structures which are using "irq" line
+ * dev_count: size of dev_config array
+ * regs: register configuration for shared irq block
+ */
+struct spear_shirq {
+ u32 irq;
+ struct shirq_dev_config *dev_config;
+ u32 dev_count;
+ struct shirq_regs regs;
+};
+
+int spear_shirq_register(struct spear_shirq *shirq);
+
+#endif /* __PLAT_SHIRQ_H */
diff --git a/arch/arm/plat-spear/include/plat/system.h b/arch/arm/plat-spear/include/plat/system.h
new file mode 100644
index 000000000000..55a4e405d578
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/system.h
@@ -0,0 +1,41 @@
+/*
+ * arch/arm/plat-spear/include/plat/system.h
+ *
+ * SPEAr platform specific architecture functions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_SYSTEM_H
+#define __PLAT_SYSTEM_H
+
+#include <asm/hardware/sp810.h>
+#include <linux/io.h>
+#include <mach/spear.h>
+
+static inline void arch_idle(void)
+{
+ /*
+ * This should do all the clock switching
+ * and wait for interrupt tricks
+ */
+ cpu_do_idle();
+}
+
+static inline void arch_reset(char mode, const char *cmd)
+{
+ if (mode == 's') {
+ /* software reset, Jump into ROM at address 0 */
+ cpu_reset(0);
+ } else {
+ /* hardware reset, Use on-chip reset capability */
+ sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+ }
+}
+
+#endif /* __PLAT_SYSTEM_H */
diff --git a/arch/arm/plat-spear/include/plat/timex.h b/arch/arm/plat-spear/include/plat/timex.h
new file mode 100644
index 000000000000..914d09dd50fd
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/plat-spear/include/plat/timex.h
+ *
+ * SPEAr platform specific timex definitions
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_TIMEX_H
+#define __PLAT_TIMEX_H
+
+#define CLOCK_TICK_RATE 48000000
+
+#endif /* __PLAT_TIMEX_H */
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
new file mode 100644
index 000000000000..99ba6789cc97
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -0,0 +1,43 @@
+/*
+ * arch/arm/plat-spear/include/plat/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/amba/serial.h>
+#include <mach/spear.h>
+
+#ifndef __PLAT_UNCOMPRESS_H
+#define __PLAT_UNCOMPRESS_H
+/*
+ * This does not append a newline
+ */
+static inline void putc(int c)
+{
+ void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
+
+ while (readl(base + UART01x_FR) & UART01x_FR_TXFF)
+ barrier();
+
+ writel(c, base + UART01x_DR);
+}
+
+static inline void flush(void)
+{
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
+
+#endif /* __PLAT_UNCOMPRESS_H */
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
new file mode 100644
index 000000000000..09e9372aea21
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/vmalloc.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/plat-spear/include/plat/vmalloc.h
+ *
+ * Defining Vmalloc area for SPEAr platform
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_VMALLOC_H
+#define __PLAT_VMALLOC_H
+
+#define VMALLOC_END 0xF0000000
+
+#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/arm/plat-spear/padmux.c b/arch/arm/plat-spear/padmux.c
new file mode 100644
index 000000000000..d2aab3adcdeb
--- /dev/null
+++ b/arch/arm/plat-spear/padmux.c
@@ -0,0 +1,164 @@
+/*
+ * arch/arm/plat-spear/include/plat/padmux.c
+ *
+ * SPEAr platform specific gpio pads muxing source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <plat/padmux.h>
+
+/*
+ * struct pmx: pmx definition structure
+ *
+ * base: base address of configuration registers
+ * mode_reg: mode configurations
+ * mux_reg: muxing configurations
+ * active_mode: pointer to current active mode
+ */
+struct pmx {
+ u32 base;
+ struct pmx_reg mode_reg;
+ struct pmx_reg mux_reg;
+ struct pmx_mode *active_mode;
+};
+
+static struct pmx *pmx;
+
+/**
+ * pmx_mode_set - Enables an multiplexing mode
+ * @mode - pointer to pmx mode
+ *
+ * It will set mode of operation in hardware.
+ * Returns -ve on Err otherwise 0
+ */
+static int pmx_mode_set(struct pmx_mode *mode)
+{
+ u32 val;
+
+ if (!mode->name)
+ return -EFAULT;
+
+ pmx->active_mode = mode;
+
+ val = readl(pmx->base + pmx->mode_reg.offset);
+ val &= ~pmx->mode_reg.mask;
+ val |= mode->mask & pmx->mode_reg.mask;
+ writel(val, pmx->base + pmx->mode_reg.offset);
+
+ return 0;
+}
+
+/**
+ * pmx_devs_enable - Enables list of devices
+ * @devs - pointer to pmx device array
+ * @count - number of devices to enable
+ *
+ * It will enable pads for all required peripherals once and only once.
+ * If peripheral is not supported by current mode then request is rejected.
+ * Conflicts between peripherals are not handled and peripherals will be
+ * enabled in the order they are present in pmx_dev array.
+ * In case of conflicts last peripheral enalbed will be present.
+ * Returns -ve on Err otherwise 0
+ */
+static int pmx_devs_enable(struct pmx_dev **devs, u8 count)
+{
+ u32 val, i, mask;
+
+ if (!count)
+ return -EINVAL;
+
+ val = readl(pmx->base + pmx->mux_reg.offset);
+ for (i = 0; i < count; i++) {
+ u8 j = 0;
+
+ if (!devs[i]->name || !devs[i]->modes) {
+ printk(KERN_ERR "padmux: dev name or modes is null\n");
+ continue;
+ }
+ /* check if peripheral exists in active mode */
+ if (pmx->active_mode) {
+ bool found = false;
+ for (j = 0; j < devs[i]->mode_count; j++) {
+ if (devs[i]->modes[j].ids &
+ pmx->active_mode->id) {
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ printk(KERN_ERR "%s device not available in %s"\
+ "mode\n", devs[i]->name,
+ pmx->active_mode->name);
+ continue;
+ }
+ }
+
+ /* enable peripheral */
+ mask = devs[i]->modes[j].mask & pmx->mux_reg.mask;
+ if (devs[i]->enb_on_reset)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ devs[i]->is_active = true;
+ }
+ writel(val, pmx->base + pmx->mux_reg.offset);
+ kfree(pmx);
+
+ /* this will ensure that multiplexing can't be changed now */
+ pmx = (struct pmx *)-1;
+
+ return 0;
+}
+
+/**
+ * pmx_register - registers a platform requesting pad mux feature
+ * @driver - pointer to driver structure containing driver specific parameters
+ *
+ * Also this must be called only once. This will allocate memory for pmx
+ * structure, will call pmx_mode_set, will call pmx_devs_enable.
+ * Returns -ve on Err otherwise 0
+ */
+int pmx_register(struct pmx_driver *driver)
+{
+ int ret = 0;
+
+ if (pmx)
+ return -EPERM;
+ if (!driver->base || !driver->devs)
+ return -EFAULT;
+
+ pmx = kzalloc(sizeof(*pmx), GFP_KERNEL);
+ if (!pmx)
+ return -ENOMEM;
+
+ pmx->base = (u32)driver->base;
+ pmx->mode_reg.offset = driver->mode_reg.offset;
+ pmx->mode_reg.mask = driver->mode_reg.mask;
+ pmx->mux_reg.offset = driver->mux_reg.offset;
+ pmx->mux_reg.mask = driver->mux_reg.mask;
+
+ /* choose mode to enable */
+ if (driver->mode) {
+ ret = pmx_mode_set(driver->mode);
+ if (ret)
+ goto pmx_fail;
+ }
+ ret = pmx_devs_enable(driver->devs, driver->devs_count);
+ if (ret)
+ goto pmx_fail;
+
+ return 0;
+
+pmx_fail:
+ return ret;
+}
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
new file mode 100644
index 000000000000..2172d6946aea
--- /dev/null
+++ b/arch/arm/plat-spear/shirq.c
@@ -0,0 +1,118 @@
+/*
+ * arch/arm/plat-spear/shirq.c
+ *
+ * SPEAr platform shared irq layer source file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <plat/shirq.h>
+
+struct spear_shirq *shirq;
+static DEFINE_SPINLOCK(lock);
+
+static void shirq_irq_mask(unsigned irq)
+{
+ struct spear_shirq *shirq = get_irq_chip_data(irq);
+ u32 val, id = irq - shirq->dev_config[0].virq;
+ unsigned long flags;
+
+ if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
+ return;
+
+ spin_lock_irqsave(&lock, flags);
+ val = readl(shirq->regs.base + shirq->regs.enb_reg);
+ if (shirq->regs.reset_to_enb)
+ val |= shirq->dev_config[id].enb_mask;
+ else
+ val &= ~(shirq->dev_config[id].enb_mask);
+ writel(val, shirq->regs.base + shirq->regs.enb_reg);
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+static void shirq_irq_unmask(unsigned irq)
+{
+ struct spear_shirq *shirq = get_irq_chip_data(irq);
+ u32 val, id = irq - shirq->dev_config[0].virq;
+ unsigned long flags;
+
+ if ((shirq->regs.enb_reg == -1) || shirq->dev_config[id].enb_mask == -1)
+ return;
+
+ spin_lock_irqsave(&lock, flags);
+ val = readl(shirq->regs.base + shirq->regs.enb_reg);
+ if (shirq->regs.reset_to_enb)
+ val &= ~(shirq->dev_config[id].enb_mask);
+ else
+ val |= shirq->dev_config[id].enb_mask;
+ writel(val, shirq->regs.base + shirq->regs.enb_reg);
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+static struct irq_chip shirq_chip = {
+ .name = "spear_shirq",
+ .ack = shirq_irq_mask,
+ .mask = shirq_irq_mask,
+ .unmask = shirq_irq_unmask,
+};
+
+static void shirq_handler(unsigned irq, struct irq_desc *desc)
+{
+ u32 i, val, mask;
+ struct spear_shirq *shirq = get_irq_data(irq);
+
+ desc->chip->ack(irq);
+ while ((val = readl(shirq->regs.base + shirq->regs.status_reg) &
+ shirq->regs.status_reg_mask)) {
+ for (i = 0; (i < shirq->dev_count) && val; i++) {
+ if (!(shirq->dev_config[i].status_mask & val))
+ continue;
+
+ generic_handle_irq(shirq->dev_config[i].virq);
+
+ /* clear interrupt */
+ val &= ~shirq->dev_config[i].status_mask;
+ if ((shirq->regs.clear_reg == -1) ||
+ shirq->dev_config[i].clear_mask == -1)
+ continue;
+ mask = readl(shirq->regs.base + shirq->regs.clear_reg);
+ if (shirq->regs.reset_to_clear)
+ mask &= ~shirq->dev_config[i].clear_mask;
+ else
+ mask |= shirq->dev_config[i].clear_mask;
+ writel(mask, shirq->regs.base + shirq->regs.clear_reg);
+ }
+ }
+ desc->chip->unmask(irq);
+}
+
+int spear_shirq_register(struct spear_shirq *shirq)
+{
+ int i;
+
+ if (!shirq || !shirq->dev_config || !shirq->regs.base)
+ return -EFAULT;
+
+ if (!shirq->dev_count)
+ return -EINVAL;
+
+ set_irq_chained_handler(shirq->irq, shirq_handler);
+ for (i = 0; i < shirq->dev_count; i++) {
+ set_irq_chip(shirq->dev_config[i].virq, &shirq_chip);
+ set_irq_handler(shirq->dev_config[i].virq, handle_simple_irq);
+ set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID);
+ set_irq_chip_data(shirq->dev_config[i].virq, shirq);
+ }
+
+ set_irq_data(shirq->irq, shirq);
+ return 0;
+}
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
new file mode 100644
index 000000000000..a1025d38f383
--- /dev/null
+++ b/arch/arm/plat-spear/time.c
@@ -0,0 +1,292 @@
+/*
+ * arch/arm/plat-spear/time.c
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Shiraz Hashim<shiraz.hashim@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/irq.h>
+#include <asm/mach/time.h>
+#include <mach/irqs.h>
+#include <mach/hardware.h>
+#include <mach/spear.h>
+#include <mach/generic.h>
+
+/*
+ * We would use TIMER0 and TIMER1 as clockevent and clocksource.
+ * Timer0 and Timer1 both belong to same gpt block in cpu subbsystem. Further
+ * they share same functional clock. Any change in one's functional clock will
+ * also affect other timer.
+ */
+
+#define CLKEVT 0 /* gpt0, channel0 as clockevent */
+#define CLKSRC 1 /* gpt0, channel1 as clocksource */
+
+/* Register offsets, x is channel number */
+#define CR(x) ((x) * 0x80 + 0x80)
+#define IR(x) ((x) * 0x80 + 0x84)
+#define LOAD(x) ((x) * 0x80 + 0x88)
+#define COUNT(x) ((x) * 0x80 + 0x8C)
+
+/* Reg bit definitions */
+#define CTRL_INT_ENABLE 0x0100
+#define CTRL_ENABLE 0x0020
+#define CTRL_ONE_SHOT 0x0010
+
+#define CTRL_PRESCALER1 0x0
+#define CTRL_PRESCALER2 0x1
+#define CTRL_PRESCALER4 0x2
+#define CTRL_PRESCALER8 0x3
+#define CTRL_PRESCALER16 0x4
+#define CTRL_PRESCALER32 0x5
+#define CTRL_PRESCALER64 0x6
+#define CTRL_PRESCALER128 0x7
+#define CTRL_PRESCALER256 0x8
+
+#define INT_STATUS 0x1
+
+static __iomem void *gpt_base;
+static struct clk *gpt_clk;
+
+static void clockevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk_event_dev);
+static int clockevent_next_event(unsigned long evt,
+ struct clock_event_device *clk_event_dev);
+
+/*
+ * Following clocksource_set_clock and clockevent_set_clock picked
+ * from arch/mips/kernel/time.c
+ */
+
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) NSEC_PER_SEC << shift;
+ do_div(temp, clock);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cs->shift = shift;
+ cs->mult = (u32) temp;
+}
+
+void __init clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
+{
+ u64 temp;
+ u32 shift;
+
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) clock << shift;
+ do_div(temp, NSEC_PER_SEC);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cd->shift = shift;
+ cd->mult = (u32) temp;
+}
+
+static cycle_t clocksource_read_cycles(struct clocksource *cs)
+{
+ return (cycle_t) readw(gpt_base + COUNT(CLKSRC));
+}
+
+static struct clocksource clksrc = {
+ .name = "tmr1",
+ .rating = 200, /* its a pretty decent clock */
+ .read = clocksource_read_cycles,
+ .mask = 0xFFFF, /* 16 bits */
+ .mult = 0, /* to be computed */
+ .shift = 0, /* to be computed */
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void spear_clocksource_init(void)
+{
+ u32 tick_rate;
+ u16 val;
+
+ /* program the prescaler (/256)*/
+ writew(CTRL_PRESCALER256, gpt_base + CR(CLKSRC));
+
+ /* find out actual clock driving Timer */
+ tick_rate = clk_get_rate(gpt_clk);
+ tick_rate >>= CTRL_PRESCALER256;
+
+ writew(0xFFFF, gpt_base + LOAD(CLKSRC));
+
+ val = readw(gpt_base + CR(CLKSRC));
+ val &= ~CTRL_ONE_SHOT; /* autoreload mode */
+ val |= CTRL_ENABLE ;
+ writew(val, gpt_base + CR(CLKSRC));
+
+ clocksource_set_clock(&clksrc, tick_rate);
+
+ /* register the clocksource */
+ clocksource_register(&clksrc);
+}
+
+static struct clock_event_device clkevt = {
+ .name = "tmr0",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = clockevent_set_mode,
+ .set_next_event = clockevent_next_event,
+ .shift = 0, /* to be computed */
+};
+
+static void clockevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk_event_dev)
+{
+ u32 period;
+ u16 val;
+
+ /* stop the timer */
+ val = readw(gpt_base + CR(CLKEVT));
+ val &= ~CTRL_ENABLE;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ period = clk_get_rate(gpt_clk) / HZ;
+ period >>= CTRL_PRESCALER16;
+ writew(period, gpt_base + LOAD(CLKEVT));
+
+ val = readw(gpt_base + CR(CLKEVT));
+ val &= ~CTRL_ONE_SHOT;
+ val |= CTRL_ENABLE | CTRL_INT_ENABLE;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ val = readw(gpt_base + CR(CLKEVT));
+ val |= CTRL_ONE_SHOT;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+
+ break;
+ default:
+ pr_err("Invalid mode requested\n");
+ break;
+ }
+}
+
+static int clockevent_next_event(unsigned long cycles,
+ struct clock_event_device *clk_event_dev)
+{
+ u16 val;
+
+ writew(cycles, gpt_base + LOAD(CLKEVT));
+
+ val = readw(gpt_base + CR(CLKEVT));
+ val |= CTRL_ENABLE | CTRL_INT_ENABLE;
+ writew(val, gpt_base + CR(CLKEVT));
+
+ return 0;
+}
+
+static irqreturn_t spear_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &clkevt;
+
+ writew(INT_STATUS, gpt_base + IR(CLKEVT));
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction spear_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = spear_timer_interrupt
+};
+
+static void __init spear_clockevent_init(void)
+{
+ u32 tick_rate;
+
+ /* program the prescaler */
+ writew(CTRL_PRESCALER16, gpt_base + CR(CLKEVT));
+
+ tick_rate = clk_get_rate(gpt_clk);
+ tick_rate >>= CTRL_PRESCALER16;
+
+ clockevent_set_clock(&clkevt, tick_rate);
+
+ clkevt.max_delta_ns = clockevent_delta2ns(0xfff0,
+ &clkevt);
+ clkevt.min_delta_ns = clockevent_delta2ns(3, &clkevt);
+
+ clkevt.cpumask = cpumask_of(0);
+
+ clockevents_register_device(&clkevt);
+
+ setup_irq(SPEAR_GPT0_CHAN0_IRQ, &spear_timer_irq);
+}
+
+void __init spear_setup_timer(void)
+{
+ struct clk *pll3_clk;
+
+ if (!request_mem_region(SPEAR_GPT0_BASE, SZ_1K, "gpt0")) {
+ pr_err("%s:cannot get IO addr\n", __func__);
+ return;
+ }
+
+ gpt_base = (void __iomem *)ioremap(SPEAR_GPT0_BASE, SZ_1K);
+ if (!gpt_base) {
+ pr_err("%s:ioremap failed for gpt\n", __func__);
+ goto err_mem;
+ }
+
+ gpt_clk = clk_get_sys("gpt0", NULL);
+ if (!gpt_clk) {
+ pr_err("%s:couldn't get clk for gpt\n", __func__);
+ goto err_iomap;
+ }
+
+ pll3_clk = clk_get(NULL, "pll3_48m_clk");
+ if (!pll3_clk) {
+ pr_err("%s:couldn't get PLL3 as parent for gpt\n", __func__);
+ goto err_iomap;
+ }
+
+ clk_set_parent(gpt_clk, pll3_clk);
+
+ spear_clockevent_init();
+ spear_clocksource_init();
+
+ return;
+
+err_iomap:
+ iounmap(gpt_base);
+
+err_mem:
+ release_mem_region(SPEAR_GPT0_BASE, SZ_1K);
+}
+
+struct sys_timer spear_sys_timer = {
+ .init = spear_setup_timer,
+};
diff --git a/arch/arm/plat-versatile/Makefile b/arch/arm/plat-versatile/Makefile
new file mode 100644
index 000000000000..9b1a66816aa6
--- /dev/null
+++ b/arch/arm/plat-versatile/Makefile
@@ -0,0 +1,4 @@
+obj-y := clock.o
+obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_ARCH_REALVIEW) += sched-clock.o
+obj-$(CONFIG_ARCH_VERSATILE) += sched-clock.o
diff --git a/arch/arm/mach-integrator/clock.c b/arch/arm/plat-versatile/clock.c
index 989ecf5f5c46..5c8b6564fdc2 100644
--- a/arch/arm/mach-integrator/clock.c
+++ b/arch/arm/plat-versatile/clock.c
@@ -1,5 +1,5 @@
/*
- * linux/arch/arm/mach-integrator/clock.c
+ * linux/arch/arm/plat-versatile/clock.c
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
@@ -14,7 +14,8 @@
#include <linux/clk.h>
#include <linux/mutex.h>
-#include <asm/clkdev.h>
+#include <asm/hardware/icst.h>
+
#include <mach/clkdev.h>
int clk_enable(struct clk *clk)
@@ -36,24 +37,38 @@ EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
- struct icst525_vco vco;
- vco = icst525_khz_to_vco(clk->params, rate / 1000);
- return icst525_khz(clk->params, vco) * 1000;
+ long ret = -EIO;
+ if (clk->ops && clk->ops->round)
+ ret = clk->ops->round(clk, rate);
+ return ret;
}
EXPORT_SYMBOL(clk_round_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret = -EIO;
-
- if (clk->setvco) {
- struct icst525_vco vco;
-
- vco = icst525_khz_to_vco(clk->params, rate / 1000);
- clk->rate = icst525_khz(clk->params, vco) * 1000;
- clk->setvco(clk, vco);
- ret = 0;
- }
+ if (clk->ops && clk->ops->set)
+ ret = clk->ops->set(clk, rate);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
+
+long icst_clk_round(struct clk *clk, unsigned long rate)
+{
+ struct icst_vco vco;
+ vco = icst_hz_to_vco(clk->params, rate);
+ return icst_hz(clk->params, vco);
+}
+EXPORT_SYMBOL(icst_clk_round);
+
+int icst_clk_set(struct clk *clk, unsigned long rate)
+{
+ struct icst_vco vco;
+
+ vco = icst_hz_to_vco(clk->params, rate);
+ clk->rate = icst_hz(clk->params, vco);
+ clk->ops->setvco(clk, vco);
+
+ return 0;
+}
+EXPORT_SYMBOL(icst_clk_set);
diff --git a/arch/arm/plat-versatile/include/plat/clock.h b/arch/arm/plat-versatile/include/plat/clock.h
new file mode 100644
index 000000000000..3cfb024ccd70
--- /dev/null
+++ b/arch/arm/plat-versatile/include/plat/clock.h
@@ -0,0 +1,15 @@
+#ifndef PLAT_CLOCK_H
+#define PLAT_CLOCK_H
+
+#include <asm/hardware/icst.h>
+
+struct clk_ops {
+ long (*round)(struct clk *, unsigned long);
+ int (*set)(struct clk *, unsigned long);
+ void (*setvco)(struct clk *, struct icst_vco);
+};
+
+int icst_clk_set(struct clk *, unsigned long);
+long icst_clk_round(struct clk *, unsigned long);
+
+#endif
diff --git a/arch/arm/plat-versatile/include/plat/timer-sp.h b/arch/arm/plat-versatile/include/plat/timer-sp.h
new file mode 100644
index 000000000000..21e75e30d497
--- /dev/null
+++ b/arch/arm/plat-versatile/include/plat/timer-sp.h
@@ -0,0 +1,2 @@
+void sp804_clocksource_init(void __iomem *);
+void sp804_clockevents_init(void __iomem *, unsigned int);
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c
new file mode 100644
index 000000000000..9768cf7e83d7
--- /dev/null
+++ b/arch/arm/plat-versatile/sched-clock.c
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/arm/plat-versatile/sched-clock.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/cnt32_to_63.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+
+#include <mach/hardware.h>
+#include <mach/platform.h>
+
+#ifdef VERSATILE_SYS_BASE
+#define REFCOUNTER (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET)
+#endif
+
+#ifdef REALVIEW_SYS_BASE
+#define REFCOUNTER (__io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_24MHz_OFFSET)
+#endif
+
+/*
+ * This is the Realview and Versatile sched_clock implementation. This
+ * has a resolution of 41.7ns, and a maximum value of about 35583 days.
+ *
+ * The return value is guaranteed to be monotonic in that range as
+ * long as there is always less than 89 seconds between successive
+ * calls to this function.
+ */
+unsigned long long sched_clock(void)
+{
+ unsigned long long v = cnt32_to_63(readl(REFCOUNTER));
+
+ /* the <<1 gets rid of the cnt_32_to_63 top bit saving on a bic insn */
+ v *= 125<<1;
+ do_div(v, 3<<1);
+
+ return v;
+}
diff --git a/arch/arm/plat-versatile/timer-sp.c b/arch/arm/plat-versatile/timer-sp.c
new file mode 100644
index 000000000000..fb0d1c299718
--- /dev/null
+++ b/arch/arm/plat-versatile/timer-sp.c
@@ -0,0 +1,156 @@
+/*
+ * linux/arch/arm/plat-versatile/timer-sp.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/hardware/arm_timer.h>
+
+#include <plat/timer-sp.h>
+
+/*
+ * These timers are currently always setup to be clocked at 1MHz.
+ */
+#define TIMER_FREQ_KHZ (1000)
+#define TIMER_RELOAD (TIMER_FREQ_KHZ * 1000 / HZ)
+
+static void __iomem *clksrc_base;
+
+static cycle_t sp804_read(struct clocksource *cs)
+{
+ return ~readl(clksrc_base + TIMER_VALUE);
+}
+
+static struct clocksource clocksource_sp804 = {
+ .name = "timer3",
+ .rating = 200,
+ .read = sp804_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .shift = 20,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init sp804_clocksource_init(void __iomem *base)
+{
+ struct clocksource *cs = &clocksource_sp804;
+
+ clksrc_base = base;
+
+ /* setup timer 0 as free-running clocksource */
+ writel(0, clksrc_base + TIMER_CTRL);
+ writel(0xffffffff, clksrc_base + TIMER_LOAD);
+ writel(0xffffffff, clksrc_base + TIMER_VALUE);
+ writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
+ clksrc_base + TIMER_CTRL);
+
+ cs->mult = clocksource_khz2mult(TIMER_FREQ_KHZ, cs->shift);
+ clocksource_register(cs);
+}
+
+
+static void __iomem *clkevt_base;
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ /* clear the interrupt */
+ writel(1, clkevt_base + TIMER_INTCLR);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void sp804_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE;
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ writel(TIMER_RELOAD, clkevt_base + TIMER_LOAD);
+ ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* period set, and timer enabled in 'next_event' hook */
+ ctrl |= TIMER_CTRL_ONESHOT;
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ break;
+ }
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+}
+
+static int sp804_set_next_event(unsigned long next,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
+
+ writel(next, clkevt_base + TIMER_LOAD);
+ writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+
+ return 0;
+}
+
+static struct clock_event_device sp804_clockevent = {
+ .name = "timer0",
+ .shift = 32,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sp804_set_mode,
+ .set_next_event = sp804_set_next_event,
+ .rating = 300,
+ .cpumask = cpu_all_mask,
+};
+
+static struct irqaction sp804_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = sp804_timer_interrupt,
+ .dev_id = &sp804_clockevent,
+};
+
+void __init sp804_clockevents_init(void __iomem *base, unsigned int timer_irq)
+{
+ struct clock_event_device *evt = &sp804_clockevent;
+
+ clkevt_base = base;
+
+ evt->irq = timer_irq;
+ evt->mult = div_sc(TIMER_FREQ_KHZ, NSEC_PER_MSEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
+ evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
+
+ setup_irq(timer_irq, &sp804_timer_irq);
+ clockevents_register_device(evt);
+}
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index b131c27ddf57..bbce6a1c6bb6 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -19,7 +19,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
/*
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
index 377320e3bd17..06394e5ead6c 100644
--- a/arch/avr32/include/asm/scatterlist.h
+++ b/arch/avr32/include/asm/scatterlist.h
@@ -1,25 +1,7 @@
#ifndef __ASM_AVR32_SCATTERLIST_H
#define __ASM_AVR32_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- dma_addr_t dma_address;
- unsigned int length;
-};
-
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffff)
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index fd0c5d7e9337..7a9c03dcb0b6 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -81,7 +81,7 @@ static inline struct thread_info *current_thread_info(void)
TIF_NEED_RESCHED */
#define TIF_BREAKPOINT 4 /* enter monitor mode on return */
#define TIF_SINGLE_STEP 5 /* single step in progress */
-#define TIF_MEMDIE 6
+#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index f27aa3b259fa..668ed2817e51 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -110,17 +110,17 @@ static struct clock_event_device comparator = {
.set_mode = comparator_mode,
};
+void read_persistent_clock(struct timespec *ts)
+{
+ ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0);
+ ts->tv_nsec = 0;
+}
+
void __init time_init(void)
{
unsigned long counter_hz;
int ret;
- xtime.tv_sec = mktime(2007, 1, 1, 0, 0, 0);
- xtime.tv_nsec = 0;
-
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
/* figure rate for counter */
counter_hz = clk_get_rate(boot_cpu_data.clk);
counter.mult = clocksource_hz2mult(counter_hz, counter.shift);
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index c078849df7f9..f66294b4f9d2 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -348,7 +348,7 @@ config MEM_MT48LC16M16A2TG_75
config MEM_MT48LC32M8A2_75
bool
- depends on (BFIN537_STAMP || PNAV10 || BFIN538_EZKIT)
+ depends on (BFIN518F_EZBRD || BFIN537_STAMP || PNAV10 || BFIN538_EZKIT)
default y
config MEM_MT48LC8M32B2B5_7
@@ -361,11 +361,6 @@ config MEM_MT48LC32M16A2TG_75
depends on (BFIN527_EZKIT || BFIN527_EZKIT_V2 || BFIN532_IP0X || BLACKSTAMP)
default y
-config MEM_MT48LC32M8A2_75
- bool
- depends on (BFIN518F_EZBRD)
- default y
-
config MEM_MT48H32M16LFCJ_75
bool
depends on (BFIN526_EZBRD)
@@ -791,6 +786,34 @@ config MEMCPY_L1
If enabled, the memcpy function is linked
into L1 instruction memory. (less latency)
+config STRCMP_L1
+ bool "locate strcmp function in L1 Memory"
+ default y
+ help
+ If enabled, the strcmp function is linked
+ into L1 instruction memory (less latency).
+
+config STRNCMP_L1
+ bool "locate strncmp function in L1 Memory"
+ default y
+ help
+ If enabled, the strncmp function is linked
+ into L1 instruction memory (less latency).
+
+config STRCPY_L1
+ bool "locate strcpy function in L1 Memory"
+ default y
+ help
+ If enabled, the strcpy function is linked
+ into L1 instruction memory (less latency).
+
+config STRNCPY_L1
+ bool "locate strncpy function in L1 Memory"
+ default y
+ help
+ If enabled, the strncpy function is linked
+ into L1 instruction memory (less latency).
+
config SYS_BFIN_SPINLOCK_L1
bool "Locate sys_bfin_spinlock function in L1 Memory"
default y
@@ -1187,32 +1210,6 @@ config PM_BFIN_SLEEP
If unsure, select "Sleep Deeper".
endchoice
-config PM_WAKEUP_BY_GPIO
- bool "Allow Wakeup from Standby by GPIO"
- depends on PM && !BF54x
-
-config PM_WAKEUP_GPIO_NUMBER
- int "GPIO number"
- range 0 47
- depends on PM_WAKEUP_BY_GPIO
- default 2
-
-choice
- prompt "GPIO Polarity"
- depends on PM_WAKEUP_BY_GPIO
- default PM_WAKEUP_GPIO_POLAR_H
-config PM_WAKEUP_GPIO_POLAR_H
- bool "Active High"
-config PM_WAKEUP_GPIO_POLAR_L
- bool "Active Low"
-config PM_WAKEUP_GPIO_POLAR_EDGE_F
- bool "Falling EDGE"
-config PM_WAKEUP_GPIO_POLAR_EDGE_R
- bool "Rising EDGE"
-config PM_WAKEUP_GPIO_POLAR_EDGE_B
- bool "Both EDGE"
-endchoice
-
comment "Possible Suspend Mem / Hibernate Wake-Up Sources"
depends on PM
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index aec89a5280b2..d1825cb24768 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -238,7 +238,7 @@ config EARLY_PRINTK
config NMI_WATCHDOG
bool "Enable NMI watchdog to help debugging lockup on SMP"
default n
- depends on (SMP && !BFIN_SCRATCH_REG_RETN)
+ depends on SMP
help
If any CPU in the system does not execute the period local timer
interrupt for more than 5 seconds, then the NMI handler dumps debug
@@ -264,4 +264,13 @@ config BFIN_ISRAM_SELF_TEST
help
Run some self tests of the isram driver code at boot.
+config BFIN_PSEUDODBG_INSNS
+ bool "Support pseudo debug instructions"
+ default n
+ help
+ This option allows the kernel to emulate some pseudo instructions which
+ allow simulator test cases to be run under Linux with no changes.
+
+ Most people should say N here.
+
endmenu
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index e6485c305ea6..121cc04d877d 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -39,9 +39,15 @@ extern unsigned long sclk_to_usecs(unsigned long sclk);
extern unsigned long usecs_to_sclk(unsigned long usecs);
struct pt_regs;
+#if defined(CONFIG_DEBUG_VERBOSE)
extern void dump_bfin_process(struct pt_regs *regs);
extern void dump_bfin_mem(struct pt_regs *regs);
extern void dump_bfin_trace_buffer(void);
+#else
+#define dump_bfin_process(regs)
+#define dump_bfin_mem(regs)
+#define dump_bfin_trace_buffer()
+#endif
/* init functions only */
extern int init_arch_irq(void);
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 75f6dc336d46..8d9b1eba89c4 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -9,7 +9,12 @@
#ifdef CONFIG_BUG
-#define BFIN_BUG_OPCODE 0xefcd
+/*
+ * This can be any undefined 16-bit opcode, meaning
+ * ((opcode & 0xc000) != 0xc000)
+ * Anything from 0x0001 to 0x000A (inclusive) will work
+ */
+#define BFIN_BUG_OPCODE 0x0001
#ifdef CONFIG_DEBUG_BUGVERBOSE
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 8542bc31f63c..93f6c634fdf4 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -15,6 +15,8 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#ifdef CONFIG_SMP
#define __cacheline_aligned
#else
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 91bd2d7b9d55..01b19d0cf509 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -167,23 +167,23 @@ int bfin_special_gpio_request(unsigned gpio, const char *label);
#endif
#ifdef CONFIG_PM
+int bfin_pm_standby_ctrl(unsigned ctrl);
-unsigned int bfin_pm_standby_setup(void);
-void bfin_pm_standby_restore(void);
+static inline int bfin_pm_standby_setup(void)
+{
+ return bfin_pm_standby_ctrl(1);
+}
+
+static inline void bfin_pm_standby_restore(void)
+{
+ bfin_pm_standby_ctrl(0);
+}
void bfin_gpio_pm_hibernate_restore(void);
void bfin_gpio_pm_hibernate_suspend(void);
#ifndef CONFIG_BF54x
-#define PM_WAKE_RISING 0x1
-#define PM_WAKE_FALLING 0x2
-#define PM_WAKE_HIGH 0x4
-#define PM_WAKE_LOW 0x8
-#define PM_WAKE_BOTH_EDGES (PM_WAKE_RISING | PM_WAKE_FALLING)
-#define PM_WAKE_IGNORE 0xF0
-
-int gpio_pm_wakeup_request(unsigned gpio, unsigned char type);
-void gpio_pm_wakeup_free(unsigned gpio);
+int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl);
struct gpio_port_s {
unsigned short data;
diff --git a/arch/blackfin/include/asm/pgtable.h b/arch/blackfin/include/asm/pgtable.h
index 821c699c2238..dcca3e6d6e80 100644
--- a/arch/blackfin/include/asm/pgtable.h
+++ b/arch/blackfin/include/asm/pgtable.h
@@ -80,7 +80,8 @@ PTE_BIT_FUNC(mkyoung, |= _PAGE_ACCESSED);
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-#define ZERO_PAGE(vaddr) (virt_to_page(0))
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
+extern char empty_zero_page[];
extern unsigned int kobjsize(const void *objp);
diff --git a/arch/blackfin/include/asm/pseudo_instructions.h b/arch/blackfin/include/asm/pseudo_instructions.h
new file mode 100644
index 000000000000..b00adfa08169
--- /dev/null
+++ b/arch/blackfin/include/asm/pseudo_instructions.h
@@ -0,0 +1,18 @@
+/*
+ * header file for pseudo instructions
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _BLACKFIN_PSEUDO_
+#define _BLACKFIN_PSEUDO_
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+extern bool execute_pseudodbg_assert(struct pt_regs *fp, unsigned int opcode);
+extern bool execute_pseudodbg(struct pt_regs *fp, unsigned int opcode);
+
+#endif
diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h
index 04f448711cd0..64d41d34ab0b 100644
--- a/arch/blackfin/include/asm/scatterlist.h
+++ b/arch/blackfin/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
#ifndef _BLACKFIN_SCATTERLIST_H
#define _BLACKFIN_SCATTERLIST_H
-#include <linux/mm.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- dma_addr_t dma_address;
- unsigned int length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffff)
diff --git a/arch/blackfin/include/asm/string.h b/arch/blackfin/include/asm/string.h
index d7f0ccb418c3..423c099aa988 100644
--- a/arch/blackfin/include/asm/string.h
+++ b/arch/blackfin/include/asm/string.h
@@ -12,121 +12,16 @@
#ifdef __KERNEL__ /* only set these up for kernel code */
#define __HAVE_ARCH_STRCPY
-extern inline char *strcpy(char *dest, const char *src)
-{
- char *xdest = dest;
- char temp = 0;
-
- __asm__ __volatile__ (
- "1:"
- "%2 = B [%1++] (Z);"
- "B [%0++] = %2;"
- "CC = %2;"
- "if cc jump 1b (bp);"
- : "+&a" (dest), "+&a" (src), "=&d" (temp)
- :
- : "memory", "CC");
-
- return xdest;
-}
+extern char *strcpy(char *dest, const char *src);
#define __HAVE_ARCH_STRNCPY
-extern inline char *strncpy(char *dest, const char *src, size_t n)
-{
- char *xdest = dest;
- char temp = 0;
-
- if (n == 0)
- return xdest;
-
- __asm__ __volatile__ (
- "1:"
- "%3 = B [%1++] (Z);"
- "B [%0++] = %3;"
- "CC = %3;"
- "if ! cc jump 2f;"
- "%2 += -1;"
- "CC = %2 == 0;"
- "if ! cc jump 1b (bp);"
- "jump 4f;"
- "2:"
- /* if src is shorter than n, we need to null pad bytes now */
- "%3 = 0;"
- "3:"
- "%2 += -1;"
- "CC = %2 == 0;"
- "if cc jump 4f;"
- "B [%0++] = %3;"
- "jump 3b;"
- "4:"
- : "+&a" (dest), "+&a" (src), "+&da" (n), "=&d" (temp)
- :
- : "memory", "CC");
-
- return xdest;
-}
+extern char *strncpy(char *dest, const char *src, size_t n);
#define __HAVE_ARCH_STRCMP
-extern inline int strcmp(const char *cs, const char *ct)
-{
- /* need to use int's here so the char's in the assembly don't get
- * sign extended incorrectly when we don't want them to be
- */
- int __res1, __res2;
-
- __asm__ __volatile__ (
- "1:"
- "%2 = B[%0++] (Z);" /* get *cs */
- "%3 = B[%1++] (Z);" /* get *ct */
- "CC = %2 == %3;" /* compare a byte */
- "if ! cc jump 2f;" /* not equal, break out */
- "CC = %2;" /* at end of cs? */
- "if cc jump 1b (bp);" /* no, keep going */
- "jump.s 3f;" /* strings are equal */
- "2:"
- "%2 = %2 - %3;" /* *cs - *ct */
- "3:"
- : "+&a" (cs), "+&a" (ct), "=&d" (__res1), "=&d" (__res2)
- :
- : "memory", "CC");
-
- return __res1;
-}
+extern int strcmp(const char *cs, const char *ct);
#define __HAVE_ARCH_STRNCMP
-extern inline int strncmp(const char *cs, const char *ct, size_t count)
-{
- /* need to use int's here so the char's in the assembly don't get
- * sign extended incorrectly when we don't want them to be
- */
- int __res1, __res2;
-
- if (!count)
- return 0;
-
- __asm__ __volatile__ (
- "1:"
- "%3 = B[%0++] (Z);" /* get *cs */
- "%4 = B[%1++] (Z);" /* get *ct */
- "CC = %3 == %4;" /* compare a byte */
- "if ! cc jump 3f;" /* not equal, break out */
- "CC = %3;" /* at end of cs? */
- "if ! cc jump 4f;" /* yes, all done */
- "%2 += -1;" /* no, adjust count */
- "CC = %2 == 0;"
- "if ! cc jump 1b;" /* more to do, keep going */
- "2:"
- "%3 = 0;" /* strings are equal */
- "jump.s 4f;"
- "3:"
- "%3 = %3 - %4;" /* *cs - *ct */
- "4:"
- : "+&a" (cs), "+&a" (ct), "+&da" (count), "=&d" (__res1), "=&d" (__res2)
- :
- : "memory", "CC");
-
- return __res1;
-}
+extern int strncmp(const char *cs, const char *ct, size_t count);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *s, int c, size_t count);
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index e9a5614cdbb1..02560fd8a121 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -98,7 +98,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
-#define TIF_MEMDIE 4
+#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_FREEZE 6 /* is freezing for suspend */
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
diff --git a/arch/blackfin/include/asm/tlbflush.h b/arch/blackfin/include/asm/tlbflush.h
index f1a06c006ed0..7c368682c0a3 100644
--- a/arch/blackfin/include/asm/tlbflush.h
+++ b/arch/blackfin/include/asm/tlbflush.h
@@ -1 +1,2 @@
#include <asm-generic/tlbflush.h>
+#define flush_tlb_kernel_range(s, e) do { } while (0)
diff --git a/arch/blackfin/include/asm/trace.h b/arch/blackfin/include/asm/trace.h
index dc0aa55ae773..33589a29b8d8 100644
--- a/arch/blackfin/include/asm/trace.h
+++ b/arch/blackfin/include/asm/trace.h
@@ -23,6 +23,13 @@
#ifndef __ASSEMBLY__
extern unsigned long trace_buff_offset;
extern unsigned long software_trace_buff[];
+#if defined(CONFIG_DEBUG_VERBOSE)
+extern void decode_address(char *buf, unsigned long address);
+extern bool get_instruction(unsigned int *val, unsigned short *address);
+#else
+static inline void decode_address(char *buf, unsigned long address) { }
+static inline bool get_instruction(unsigned int *val, unsigned short *address) { return false; }
+#endif
/* Trace Macros for C files */
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 346a421f1562..30d0d1f01dc7 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -7,7 +7,8 @@ extra-y := init_task.o vmlinux.lds
obj-y := \
entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \
- fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o
+ fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o \
+ exception.o dumpstack.o
ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y)
obj-y += time-ts.o
@@ -29,6 +30,8 @@ obj-$(CONFIG_NMI_WATCHDOG) += nmi.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_DEBUG_VERBOSE) += trace.o
+obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o
# the kgdb test puts code into L2 and without linker
# relaxation, we need to force long calls to/from it
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index e35e20f00d9b..42833ee2b308 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -475,9 +475,7 @@ GET_GPIO_P(maskb)
#ifdef CONFIG_PM
-
static unsigned short wakeup_map[GPIO_BANK_NUM];
-static unsigned char wakeup_flags_map[MAX_BLACKFIN_GPIOS];
static const unsigned int sic_iwr_irqs[] = {
#if defined(BF533_FAMILY)
@@ -514,112 +512,26 @@ static const unsigned int sic_iwr_irqs[] = {
*************************************************************
* MODIFICATION HISTORY :
**************************************************************/
-int gpio_pm_wakeup_request(unsigned gpio, unsigned char type)
-{
- unsigned long flags;
-
- if ((check_gpio(gpio) < 0) || !type)
- return -EINVAL;
-
- local_irq_save_hw(flags);
- wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio);
- wakeup_flags_map[gpio] = type;
- local_irq_restore_hw(flags);
-
- return 0;
-}
-EXPORT_SYMBOL(gpio_pm_wakeup_request);
-
-void gpio_pm_wakeup_free(unsigned gpio)
+int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl)
{
unsigned long flags;
if (check_gpio(gpio) < 0)
- return;
+ return -EINVAL;
local_irq_save_hw(flags);
-
- wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
-
- local_irq_restore_hw(flags);
-}
-EXPORT_SYMBOL(gpio_pm_wakeup_free);
-
-static int bfin_gpio_wakeup_type(unsigned gpio, unsigned char type)
-{
- port_setup(gpio, GPIO_USAGE);
- set_gpio_dir(gpio, 0);
- set_gpio_inen(gpio, 1);
-
- if (type & (PM_WAKE_RISING | PM_WAKE_FALLING))
- set_gpio_edge(gpio, 1);
- else
- set_gpio_edge(gpio, 0);
-
- if ((type & (PM_WAKE_BOTH_EDGES)) == (PM_WAKE_BOTH_EDGES))
- set_gpio_both(gpio, 1);
+ if (ctrl)
+ wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio);
else
- set_gpio_both(gpio, 0);
-
- if ((type & (PM_WAKE_FALLING | PM_WAKE_LOW)))
- set_gpio_polar(gpio, 1);
- else
- set_gpio_polar(gpio, 0);
+ wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
- SSYNC();
-
- return 0;
-}
-
-u32 bfin_pm_standby_setup(void)
-{
- u16 bank, mask, i, gpio;
-
- for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
- mask = wakeup_map[gpio_bank(i)];
- bank = gpio_bank(i);
-
- gpio_bank_saved[bank].maskb = gpio_array[bank]->maskb;
- gpio_array[bank]->maskb = 0;
-
- if (mask) {
-#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
- gpio_bank_saved[bank].fer = *port_fer[bank];
-#endif
- gpio_bank_saved[bank].inen = gpio_array[bank]->inen;
- gpio_bank_saved[bank].polar = gpio_array[bank]->polar;
- gpio_bank_saved[bank].dir = gpio_array[bank]->dir;
- gpio_bank_saved[bank].edge = gpio_array[bank]->edge;
- gpio_bank_saved[bank].both = gpio_array[bank]->both;
- gpio_bank_saved[bank].reserved =
- reserved_gpio_map[bank];
-
- gpio = i;
-
- while (mask) {
- if ((mask & 1) && (wakeup_flags_map[gpio] !=
- PM_WAKE_IGNORE)) {
- reserved_gpio_map[gpio_bank(gpio)] |=
- gpio_bit(gpio);
- bfin_gpio_wakeup_type(gpio,
- wakeup_flags_map[gpio]);
- set_gpio_data(gpio, 0); /*Clear*/
- }
- gpio++;
- mask >>= 1;
- }
-
- bfin_internal_set_wake(sic_iwr_irqs[bank], 1);
- gpio_array[bank]->maskb_set = wakeup_map[gpio_bank(i)];
- }
- }
-
- AWA_DUMMY_READ(maskb_set);
+ set_gpio_maskb(gpio, ctrl);
+ local_irq_restore_hw(flags);
return 0;
}
-void bfin_pm_standby_restore(void)
+int bfin_pm_standby_ctrl(unsigned ctrl)
{
u16 bank, mask, i;
@@ -627,24 +539,10 @@ void bfin_pm_standby_restore(void)
mask = wakeup_map[gpio_bank(i)];
bank = gpio_bank(i);
- if (mask) {
-#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x)
- *port_fer[bank] = gpio_bank_saved[bank].fer;
-#endif
- gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
- gpio_array[bank]->dir = gpio_bank_saved[bank].dir;
- gpio_array[bank]->polar = gpio_bank_saved[bank].polar;
- gpio_array[bank]->edge = gpio_bank_saved[bank].edge;
- gpio_array[bank]->both = gpio_bank_saved[bank].both;
-
- reserved_gpio_map[bank] =
- gpio_bank_saved[bank].reserved;
- bfin_internal_set_wake(sic_iwr_irqs[bank], 0);
- }
-
- gpio_array[bank]->maskb = gpio_bank_saved[bank].maskb;
+ if (mask)
+ bfin_internal_set_wake(sic_iwr_irqs[bank], ctrl);
}
- AWA_DUMMY_READ(maskb);
+ return 0;
}
void bfin_gpio_pm_hibernate_suspend(void)
@@ -708,16 +606,11 @@ void bfin_gpio_pm_hibernate_restore(void)
#else /* CONFIG_BF54x */
#ifdef CONFIG_PM
-u32 bfin_pm_standby_setup(void)
+int bfin_pm_standby_ctrl(unsigned ctrl)
{
return 0;
}
-void bfin_pm_standby_restore(void)
-{
-
-}
-
void bfin_gpio_pm_hibernate_suspend(void)
{
int i, bank;
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index ed8392c117ea..2c264b51566a 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -33,6 +33,18 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
/*
+ * Because string functions are both inline and exported functions and
+ * folder arch/blackfin/lib is configured as a library path in Makefile,
+ * symbols exported in folder lib is not linked into built-in.o but
+ * inlined only. In order to export string symbols to kernel module
+ * properly, they should be exported here.
+ */
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+
+/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
* doesn't really matter since they're not versioned).
diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c
new file mode 100644
index 000000000000..5cfbaa298211
--- /dev/null
+++ b/arch/blackfin/kernel/dumpstack.c
@@ -0,0 +1,174 @@
+/* Provide basic stack dumping functions
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/kernel.h>
+#include <linux/thread_info.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <asm/trace.h>
+
+/*
+ * Checks to see if the address pointed to is either a
+ * 16-bit CALL instruction, or a 32-bit CALL instruction
+ */
+static bool is_bfin_call(unsigned short *addr)
+{
+ unsigned int opcode;
+
+ if (!get_instruction(&opcode, addr))
+ return false;
+
+ if ((opcode >= 0x0060 && opcode <= 0x0067) ||
+ (opcode >= 0x0070 && opcode <= 0x0077) ||
+ (opcode >= 0xE3000000 && opcode <= 0xE3FFFFFF))
+ return true;
+
+ return false;
+
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+#ifdef CONFIG_PRINTK
+ unsigned int *addr, *endstack, *fp = 0, *frame;
+ unsigned short *ins_addr;
+ char buf[150];
+ unsigned int i, j, ret_addr, frame_no = 0;
+
+ /*
+ * If we have been passed a specific stack, use that one otherwise
+ * if we have been passed a task structure, use that, otherwise
+ * use the stack of where the variable "stack" exists
+ */
+
+ if (stack == NULL) {
+ if (task) {
+ /* We know this is a kernel stack, so this is the start/end */
+ stack = (unsigned long *)task->thread.ksp;
+ endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE);
+ } else {
+ /* print out the existing stack info */
+ stack = (unsigned long *)&stack;
+ endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
+ }
+ } else
+ endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
+
+ printk(KERN_NOTICE "Stack info:\n");
+ decode_address(buf, (unsigned int)stack);
+ printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
+
+ if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
+ printk(KERN_NOTICE "Invalid stack pointer\n");
+ return;
+ }
+
+ /* First thing is to look for a frame pointer */
+ for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
+ if (*addr & 0x1)
+ continue;
+ ins_addr = (unsigned short *)*addr;
+ ins_addr--;
+ if (is_bfin_call(ins_addr))
+ fp = addr - 1;
+
+ if (fp) {
+ /* Let's check to see if it is a frame pointer */
+ while (fp >= (addr - 1) && fp < endstack
+ && fp && ((unsigned int) fp & 0x3) == 0)
+ fp = (unsigned int *)*fp;
+ if (fp == 0 || fp == endstack) {
+ fp = addr - 1;
+ break;
+ }
+ fp = 0;
+ }
+ }
+ if (fp) {
+ frame = fp;
+ printk(KERN_NOTICE " FP: (0x%p)\n", fp);
+ } else
+ frame = 0;
+
+ /*
+ * Now that we think we know where things are, we
+ * walk the stack again, this time printing things out
+ * incase there is no frame pointer, we still look for
+ * valid return addresses
+ */
+
+ /* First time print out data, next time, print out symbols */
+ for (j = 0; j <= 1; j++) {
+ if (j)
+ printk(KERN_NOTICE "Return addresses in stack:\n");
+ else
+ printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack);
+
+ fp = frame;
+ frame_no = 0;
+
+ for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
+ addr < endstack; addr++, i++) {
+
+ ret_addr = 0;
+ if (!j && i % 8 == 0)
+ printk(KERN_NOTICE "%p:", addr);
+
+ /* if it is an odd address, or zero, just skip it */
+ if (*addr & 0x1 || !*addr)
+ goto print;
+
+ ins_addr = (unsigned short *)*addr;
+
+ /* Go back one instruction, and see if it is a CALL */
+ ins_addr--;
+ ret_addr = is_bfin_call(ins_addr);
+ print:
+ if (!j && stack == (unsigned long *)addr)
+ printk("[%08x]", *addr);
+ else if (ret_addr)
+ if (j) {
+ decode_address(buf, (unsigned int)*addr);
+ if (frame == addr) {
+ printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf);
+ continue;
+ }
+ printk(KERN_NOTICE " address : %s\n", buf);
+ } else
+ printk("<%08x>", *addr);
+ else if (fp == addr) {
+ if (j)
+ frame = addr+1;
+ else
+ printk("(%08x)", *addr);
+
+ fp = (unsigned int *)*addr;
+ frame_no++;
+
+ } else if (!j)
+ printk(" %08x ", *addr);
+ }
+ if (!j)
+ printk("\n");
+ }
+#endif
+}
+EXPORT_SYMBOL(show_stack);
+
+void dump_stack(void)
+{
+ unsigned long stack;
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ int tflags;
+#endif
+ trace_buffer_save(tflags);
+ dump_bfin_trace_buffer();
+ show_stack(current, &stack);
+ trace_buffer_restore(tflags);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/blackfin/kernel/exception.c b/arch/blackfin/kernel/exception.c
new file mode 100644
index 000000000000..9208b5fd5186
--- /dev/null
+++ b/arch/blackfin/kernel/exception.c
@@ -0,0 +1,45 @@
+/* Basic functions for adding/removing custom exception handlers
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/module.h>
+#include <asm/irq_handler.h>
+
+int bfin_request_exception(unsigned int exception, void (*handler)(void))
+{
+ void (*curr_handler)(void);
+
+ if (exception > 0x3F)
+ return -EINVAL;
+
+ curr_handler = ex_table[exception];
+
+ if (curr_handler != ex_replaceable)
+ return -EBUSY;
+
+ ex_table[exception] = handler;
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_request_exception);
+
+int bfin_free_exception(unsigned int exception, void (*handler)(void))
+{
+ void (*curr_handler)(void);
+
+ if (exception > 0x3F)
+ return -EINVAL;
+
+ curr_handler = ex_table[exception];
+
+ if (curr_handler != handler)
+ return -EBUSY;
+
+ ex_table[exception] = ex_replaceable;
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_free_exception);
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 2c501ceb1e55..08bc44ea6883 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -66,7 +66,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
gdb_regs[BFIN_RETN] = regs->retn;
gdb_regs[BFIN_RETE] = regs->rete;
gdb_regs[BFIN_PC] = regs->pc;
- gdb_regs[BFIN_CC] = 0;
+ gdb_regs[BFIN_CC] = (regs->astat >> 5) & 1;
gdb_regs[BFIN_EXTRA1] = 0;
gdb_regs[BFIN_EXTRA2] = 0;
gdb_regs[BFIN_EXTRA3] = 0;
@@ -439,6 +439,11 @@ int kgdb_validate_break_address(unsigned long addr)
return -EFAULT;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->retx = ip;
+}
+
int kgdb_arch_init(void)
{
kgdb_single_step = 0;
diff --git a/arch/blackfin/kernel/pseudodbg.c b/arch/blackfin/kernel/pseudodbg.c
new file mode 100644
index 000000000000..db85bc94334e
--- /dev/null
+++ b/arch/blackfin/kernel/pseudodbg.c
@@ -0,0 +1,191 @@
+/* The fake debug assert instructions
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+
+const char * const greg_names[] = {
+ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
+ "P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP",
+ "I0", "I1", "I2", "I3", "M0", "M1", "M2", "M3",
+ "B0", "B1", "B2", "B3", "L0", "L1", "L2", "L3",
+ "A0.X", "A0.W", "A1.X", "A1.W", "<res>", "<res>", "ASTAT", "RETS",
+ "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>",
+ "LC0", "LT0", "LB0", "LC1", "LT1", "LB1", "CYCLES", "CYCLES2",
+ "USP", "SEQSTAT", "SYSCFG", "RETI", "RETX", "RETN", "RETE", "EMUDAT",
+};
+
+static const char *get_allreg_name(int grp, int reg)
+{
+ return greg_names[(grp << 3) | reg];
+}
+
+/*
+ * Unfortunately, the pt_regs structure is not laid out the same way as the
+ * hardware register file, so we need to do some fix ups.
+ *
+ * CYCLES is not stored in the pt_regs structure - so, we just read it from
+ * the hardware.
+ *
+ * Don't support:
+ * - All reserved registers
+ * - All in group 7 are (supervisors only)
+ */
+
+static bool fix_up_reg(struct pt_regs *fp, long *value, int grp, int reg)
+{
+ long *val = &fp->r0;
+ unsigned long tmp;
+
+ /* Only do Dregs and Pregs for now */
+ if (grp == 5 ||
+ (grp == 4 && (reg == 4 || reg == 5)) ||
+ (grp == 7))
+ return false;
+
+ if (grp == 0 || (grp == 1 && reg < 6))
+ val -= (reg + 8 * grp);
+ else if (grp == 1 && reg == 6)
+ val = &fp->usp;
+ else if (grp == 1 && reg == 7)
+ val = &fp->fp;
+ else if (grp == 2) {
+ val = &fp->i0;
+ val -= reg;
+ } else if (grp == 3 && reg >= 4) {
+ val = &fp->l0;
+ val -= (reg - 4);
+ } else if (grp == 3 && reg < 4) {
+ val = &fp->b0;
+ val -= reg;
+ } else if (grp == 4 && reg < 4) {
+ val = &fp->a0x;
+ val -= reg;
+ } else if (grp == 4 && reg == 6)
+ val = &fp->astat;
+ else if (grp == 4 && reg == 7)
+ val = &fp->rets;
+ else if (grp == 6 && reg < 6) {
+ val = &fp->lc0;
+ val -= reg;
+ } else if (grp == 6 && reg == 6) {
+ __asm__ __volatile__("%0 = cycles;\n" : "=d"(tmp));
+ val = &tmp;
+ } else if (grp == 6 && reg == 7) {
+ __asm__ __volatile__("%0 = cycles2;\n" : "=d"(tmp));
+ val = &tmp;
+ }
+
+ *value = *val;
+ return true;
+
+}
+
+#define PseudoDbg_Assert_opcode 0xf0000000
+#define PseudoDbg_Assert_expected_bits 0
+#define PseudoDbg_Assert_expected_mask 0xffff
+#define PseudoDbg_Assert_regtest_bits 16
+#define PseudoDbg_Assert_regtest_mask 0x7
+#define PseudoDbg_Assert_grp_bits 19
+#define PseudoDbg_Assert_grp_mask 0x7
+#define PseudoDbg_Assert_dbgop_bits 22
+#define PseudoDbg_Assert_dbgop_mask 0x3
+#define PseudoDbg_Assert_dontcare_bits 24
+#define PseudoDbg_Assert_dontcare_mask 0x7
+#define PseudoDbg_Assert_code_bits 27
+#define PseudoDbg_Assert_code_mask 0x1f
+
+/*
+ * DBGA - debug assert
+ */
+bool execute_pseudodbg_assert(struct pt_regs *fp, unsigned int opcode)
+{
+ int expected = ((opcode >> PseudoDbg_Assert_expected_bits) & PseudoDbg_Assert_expected_mask);
+ int dbgop = ((opcode >> (PseudoDbg_Assert_dbgop_bits)) & PseudoDbg_Assert_dbgop_mask);
+ int grp = ((opcode >> (PseudoDbg_Assert_grp_bits)) & PseudoDbg_Assert_grp_mask);
+ int regtest = ((opcode >> (PseudoDbg_Assert_regtest_bits)) & PseudoDbg_Assert_regtest_mask);
+ long value;
+
+ if ((opcode & 0xFF000000) != PseudoDbg_Assert_opcode)
+ return false;
+
+ if (!fix_up_reg(fp, &value, grp, regtest))
+ return false;
+
+ if (dbgop == 0 || dbgop == 2) {
+ /* DBGA ( regs_lo , uimm16 ) */
+ /* DBGAL ( regs , uimm16 ) */
+ if (expected != (value & 0xFFFF)) {
+ pr_notice("DBGA (%s.L,0x%x) failure, got 0x%x\n",
+ get_allreg_name(grp, regtest),
+ expected, (unsigned int)(value & 0xFFFF));
+ return false;
+ }
+
+ } else if (dbgop == 1 || dbgop == 3) {
+ /* DBGA ( regs_hi , uimm16 ) */
+ /* DBGAH ( regs , uimm16 ) */
+ if (expected != ((value >> 16) & 0xFFFF)) {
+ pr_notice("DBGA (%s.H,0x%x) failure, got 0x%x\n",
+ get_allreg_name(grp, regtest),
+ expected, (unsigned int)((value >> 16) & 0xFFFF));
+ return false;
+ }
+ }
+
+ fp->pc += 4;
+ return true;
+}
+
+#define PseudoDbg_opcode 0xf8000000
+#define PseudoDbg_reg_bits 0
+#define PseudoDbg_reg_mask 0x7
+#define PseudoDbg_grp_bits 3
+#define PseudoDbg_grp_mask 0x7
+#define PseudoDbg_fn_bits 6
+#define PseudoDbg_fn_mask 0x3
+#define PseudoDbg_code_bits 8
+#define PseudoDbg_code_mask 0xff
+
+/*
+ * DBG - debug (dump a register value out)
+ */
+bool execute_pseudodbg(struct pt_regs *fp, unsigned int opcode)
+{
+ int grp, fn, reg;
+ long value, value1;
+
+ if ((opcode & 0xFF000000) != PseudoDbg_opcode)
+ return false;
+
+ opcode >>= 16;
+ grp = ((opcode >> PseudoDbg_grp_bits) & PseudoDbg_reg_mask);
+ fn = ((opcode >> PseudoDbg_fn_bits) & PseudoDbg_fn_mask);
+ reg = ((opcode >> PseudoDbg_reg_bits) & PseudoDbg_reg_mask);
+
+ if (fn == 3 && (reg == 0 || reg == 1)) {
+ if (!fix_up_reg(fp, &value, 4, 2 * reg))
+ return false;
+ if (!fix_up_reg(fp, &value1, 4, 2 * reg + 1))
+ return false;
+
+ pr_notice("DBG A%i = %02lx%08lx\n", reg, value & 0xFF, value1);
+ fp->pc += 2;
+ return true;
+
+ } else if (fn == 0) {
+ if (!fix_up_reg(fp, &value, grp, reg))
+ return false;
+
+ pr_notice("DBG %s = %08lx\n", get_allreg_name(grp, reg), value);
+ fp->pc += 2;
+ return true;
+ }
+
+ return false;
+}
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 43eb969405d1..6ec77685df52 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -292,28 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- case PTRACE_GETFDPIC: {
- unsigned long tmp = 0;
-
- switch (addr) {
- case_PTRACE_GETFDPIC_EXEC:
- case PTRACE_GETFDPIC_EXEC:
- tmp = child->mm->context.exec_fdpic_loadmap;
- break;
- case_PTRACE_GETFDPIC_INTERP:
- case PTRACE_GETFDPIC_INTERP:
- tmp = child->mm->context.interp_fdpic_loadmap;
- break;
- default:
- break;
- }
-
- ret = put_user(tmp, datap);
- break;
- }
-#endif
-
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKEDATA:
pr_debug("ptrace: PTRACE_PEEKDATA\n");
@@ -357,8 +335,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR:
switch (addr) {
#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
- case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC;
- case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP;
+ case PT_FDPIC_EXEC:
+ request = PTRACE_GETFDPIC;
+ addr = PTRACE_GETFDPIC_EXEC;
+ goto case_default;
+ case PT_FDPIC_INTERP:
+ request = PTRACE_GETFDPIC;
+ addr = PTRACE_GETFDPIC_INTERP;
+ goto case_default;
#endif
default:
ret = get_reg(child, addr, datap);
@@ -385,6 +369,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
0, sizeof(struct pt_regs),
(const void __user *)data);
+ case_default:
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 8e2efceb364b..d37a397f43f5 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -925,7 +925,7 @@ void __init setup_arch(char **cmdline_p)
else if (_bfin_swrst & RESET_SOFTWARE)
printk(KERN_NOTICE "Reset caused by Software reset\n");
- printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n");
+ printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
if (bfin_compiled_revid() == 0xffff)
printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
else if (bfin_compiled_revid() == -1)
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index 2e7f8e10bf87..bdc1e2f0da32 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -47,3 +47,26 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr,
}
EXPORT_SYMBOL(get_fb_unmapped_area);
#endif
+
+/* Needed for legacy userspace atomic emulation */
+static DEFINE_SPINLOCK(bfin_spinlock_lock);
+
+#ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
+__attribute__((l1_text))
+#endif
+asmlinkage int sys_bfin_spinlock(int *p)
+{
+ int ret, tmp = 0;
+
+ spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
+ ret = get_user(tmp, p);
+ if (likely(ret == 0)) {
+ if (unlikely(tmp))
+ ret = 1;
+ else
+ put_user(1, p);
+ }
+ spin_unlock(&bfin_spinlock_lock);
+
+ return ret;
+}
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index cb7a01d4f009..8c9a43daf80f 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -353,9 +353,15 @@ void bfin_coretmr_clockevent_init(void)
#endif /* CONFIG_TICKSOURCE_CORETMR */
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
+ ts->tv_sec = secs_since_1970;
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
#ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
@@ -368,11 +374,6 @@ void __init time_init(void)
}
#endif
- /* Initialize xtime. From now on, xtime is updated with timer interrupts */
- xtime.tv_sec = secs_since_1970;
- xtime.tv_nsec = 0;
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
-
bfin_cs_cycles_init();
bfin_cs_gptimer0_init();
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 13c1ee3e6408..c9113619029f 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -112,11 +112,6 @@ u32 arch_gettimeoffset(void)
}
#endif
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return 0;
-}
-
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -126,29 +121,8 @@ __attribute__((l1_text))
#endif
irqreturn_t timer_interrupt(int irq, void *dummy)
{
- /* last time the cmos clock got updated */
- static long last_rtc_update;
-
write_seqlock(&xtime_lock);
do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / NSEC_PER_USEC) >=
- 500000 - ((unsigned)TICK_SIZE) / 2
- && (xtime.tv_nsec / NSEC_PER_USEC) <=
- 500000 + ((unsigned)TICK_SIZE) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* Do it again in 60s. */
- last_rtc_update = xtime.tv_sec - 600;
- }
write_sequnlock(&xtime_lock);
#ifdef CONFIG_IPIPE
@@ -161,10 +135,15 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
+ ts->tv_sec = secs_since_1970;
+ ts->tv_nsec = 0;
+}
+void __init time_init(void)
+{
#ifdef CONFIG_RTC_DRV_BFIN
/* [#2663] hack to filter junk RTC values that would cause
* userspace to have to deal with time values greater than
@@ -176,11 +155,5 @@ void __init time_init(void)
}
#endif
- /* Initialize xtime. From now on, xtime is updated with timer interrupts */
- xtime.tv_sec = secs_since_1970;
- xtime.tv_nsec = 0;
-
- wall_to_monotonic.tv_sec = -xtime.tv_sec;
-
time_sched_init(timer_interrupt);
}
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
new file mode 100644
index 000000000000..59fcdf6b0138
--- /dev/null
+++ b/arch/blackfin/kernel/trace.c
@@ -0,0 +1,981 @@
+/* provide some functions which dump the trace buffer, in a nice way for people
+ * to read it, and understand what is going on
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#include <linux/kernel.h>
+#include <linux/hardirq.h>
+#include <linux/thread_info.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <asm/dma.h>
+#include <asm/trace.h>
+#include <asm/fixed_code.h>
+#include <asm/traps.h>
+#include <asm/irq_handler.h>
+
+void decode_address(char *buf, unsigned long address)
+{
+ struct task_struct *p;
+ struct mm_struct *mm;
+ unsigned long flags, offset;
+ unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+ struct rb_node *n;
+
+#ifdef CONFIG_KALLSYMS
+ unsigned long symsize;
+ const char *symname;
+ char *modname;
+ char *delim = ":";
+ char namebuf[128];
+#endif
+
+ buf += sprintf(buf, "<0x%08lx> ", address);
+
+#ifdef CONFIG_KALLSYMS
+ /* look up the address and see if we are in kernel space */
+ symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
+
+ if (symname) {
+ /* yeah! kernel space! */
+ if (!modname)
+ modname = delim = "";
+ sprintf(buf, "{ %s%s%s%s + 0x%lx }",
+ delim, modname, delim, symname,
+ (unsigned long)offset);
+ return;
+ }
+#endif
+
+ if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
+ /* Problem in fixed code section? */
+ strcat(buf, "/* Maybe fixed code section */");
+ return;
+
+ } else if (address < CONFIG_BOOT_LOAD) {
+ /* Problem somewhere before the kernel start address */
+ strcat(buf, "/* Maybe null pointer? */");
+ return;
+
+ } else if (address >= COREMMR_BASE) {
+ strcat(buf, "/* core mmrs */");
+ return;
+
+ } else if (address >= SYSMMR_BASE) {
+ strcat(buf, "/* system mmrs */");
+ return;
+
+ } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
+ strcat(buf, "/* on-chip L1 ROM */");
+ return;
+
+ } else if (address >= L1_SCRATCH_START && address < L1_SCRATCH_START + L1_SCRATCH_LENGTH) {
+ strcat(buf, "/* on-chip scratchpad */");
+ return;
+
+ } else if (address >= physical_mem_end && address < ASYNC_BANK0_BASE) {
+ strcat(buf, "/* unconnected memory */");
+ return;
+
+ } else if (address >= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && address < BOOT_ROM_START) {
+ strcat(buf, "/* reserved memory */");
+ return;
+
+ } else if (address >= L1_DATA_A_START && address < L1_DATA_A_START + L1_DATA_A_LENGTH) {
+ strcat(buf, "/* on-chip Data Bank A */");
+ return;
+
+ } else if (address >= L1_DATA_B_START && address < L1_DATA_B_START + L1_DATA_B_LENGTH) {
+ strcat(buf, "/* on-chip Data Bank B */");
+ return;
+ }
+
+ /*
+ * Don't walk any of the vmas if we are oopsing, it has been known
+ * to cause problems - corrupt vmas (kernel crashes) cause double faults
+ */
+ if (oops_in_progress) {
+ strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
+ return;
+ }
+
+ /* looks like we're off in user-land, so let's walk all the
+ * mappings of all our processes and see if we can't be a whee
+ * bit more specific
+ */
+ write_lock_irqsave(&tasklist_lock, flags);
+ for_each_process(p) {
+ mm = (in_atomic ? p->mm : get_task_mm(p));
+ if (!mm)
+ continue;
+
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!in_atomic)
+ mmput(mm);
+ continue;
+ }
+
+ for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+ struct vm_area_struct *vma;
+
+ vma = rb_entry(n, struct vm_area_struct, vm_rb);
+
+ if (address >= vma->vm_start && address < vma->vm_end) {
+ char _tmpbuf[256];
+ char *name = p->comm;
+ struct file *file = vma->vm_file;
+
+ if (file) {
+ char *d_name = d_path(&file->f_path, _tmpbuf,
+ sizeof(_tmpbuf));
+ if (!IS_ERR(d_name))
+ name = d_name;
+ }
+
+ /* FLAT does not have its text aligned to the start of
+ * the map while FDPIC ELF does ...
+ */
+
+ /* before we can check flat/fdpic, we need to
+ * make sure current is valid
+ */
+ if ((unsigned long)current >= FIXED_CODE_START &&
+ !((unsigned long)current & 0x3)) {
+ if (current->mm &&
+ (address > current->mm->start_code) &&
+ (address < current->mm->end_code))
+ offset = address - current->mm->start_code;
+ else
+ offset = (address - vma->vm_start) +
+ (vma->vm_pgoff << PAGE_SHIFT);
+
+ sprintf(buf, "[ %s + 0x%lx ]", name, offset);
+ } else
+ sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
+ name, vma->vm_start, vma->vm_end);
+
+ up_read(&mm->mmap_sem);
+ if (!in_atomic)
+ mmput(mm);
+
+ if (buf[0] == '\0')
+ sprintf(buf, "[ %s ] dynamic memory", name);
+
+ goto done;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+ if (!in_atomic)
+ mmput(mm);
+ }
+
+ /*
+ * we were unable to find this address anywhere,
+ * or some MMs were skipped because they were in use.
+ */
+ sprintf(buf, "/* kernel dynamic memory */");
+
+done:
+ write_unlock_irqrestore(&tasklist_lock, flags);
+}
+
+#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
+
+/*
+ * Similar to get_user, do some address checking, then dereference
+ * Return true on success, false on bad address
+ */
+bool get_mem16(unsigned short *val, unsigned short *address)
+{
+ unsigned long addr = (unsigned long)address;
+
+ /* Check for odd addresses */
+ if (addr & 0x1)
+ return false;
+
+ switch (bfin_mem_access_type(addr, 2)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ *val = *address;
+ return true;
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy(val, address, 2);
+ return true;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy(val, address, 2);
+ return true;
+ default: /* invalid access */
+ return false;
+ }
+}
+
+bool get_instruction(unsigned int *val, unsigned short *address)
+{
+ unsigned long addr = (unsigned long)address;
+ unsigned short opcode0, opcode1;
+
+ /* Check for odd addresses */
+ if (addr & 0x1)
+ return false;
+
+ /* MMR region will never have instructions */
+ if (addr >= SYSMMR_BASE)
+ return false;
+
+ /* Scratchpad will never have instructions */
+ if (addr >= L1_SCRATCH_START && addr < L1_SCRATCH_START + L1_SCRATCH_LENGTH)
+ return false;
+
+ /* Data banks will never have instructions */
+ if (addr >= BOOT_ROM_START + BOOT_ROM_LENGTH && addr < L1_CODE_START)
+ return false;
+
+ if (!get_mem16(&opcode0, address))
+ return false;
+
+ /* was this a 32-bit instruction? If so, get the next 16 bits */
+ if ((opcode0 & 0xc000) == 0xc000) {
+ if (!get_mem16(&opcode1, address + 1))
+ return false;
+ *val = (opcode0 << 16) + opcode1;
+ } else
+ *val = opcode0;
+
+ return true;
+}
+
+#if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
+/*
+ * decode the instruction if we are printing out the trace, as it
+ * makes things easier to follow, without running it through objdump
+ * Decode the change of flow, and the common load/store instructions
+ * which are the main cause for faults, and discontinuities in the trace
+ * buffer.
+ */
+
+#define ProgCtrl_opcode 0x0000
+#define ProgCtrl_poprnd_bits 0
+#define ProgCtrl_poprnd_mask 0xf
+#define ProgCtrl_prgfunc_bits 4
+#define ProgCtrl_prgfunc_mask 0xf
+#define ProgCtrl_code_bits 8
+#define ProgCtrl_code_mask 0xff
+
+static void decode_ProgCtrl_0(unsigned int opcode)
+{
+ int poprnd = ((opcode >> ProgCtrl_poprnd_bits) & ProgCtrl_poprnd_mask);
+ int prgfunc = ((opcode >> ProgCtrl_prgfunc_bits) & ProgCtrl_prgfunc_mask);
+
+ if (prgfunc == 0 && poprnd == 0)
+ pr_cont("NOP");
+ else if (prgfunc == 1 && poprnd == 0)
+ pr_cont("RTS");
+ else if (prgfunc == 1 && poprnd == 1)
+ pr_cont("RTI");
+ else if (prgfunc == 1 && poprnd == 2)
+ pr_cont("RTX");
+ else if (prgfunc == 1 && poprnd == 3)
+ pr_cont("RTN");
+ else if (prgfunc == 1 && poprnd == 4)
+ pr_cont("RTE");
+ else if (prgfunc == 2 && poprnd == 0)
+ pr_cont("IDLE");
+ else if (prgfunc == 2 && poprnd == 3)
+ pr_cont("CSYNC");
+ else if (prgfunc == 2 && poprnd == 4)
+ pr_cont("SSYNC");
+ else if (prgfunc == 2 && poprnd == 5)
+ pr_cont("EMUEXCPT");
+ else if (prgfunc == 3)
+ pr_cont("CLI R%i", poprnd);
+ else if (prgfunc == 4)
+ pr_cont("STI R%i", poprnd);
+ else if (prgfunc == 5)
+ pr_cont("JUMP (P%i)", poprnd);
+ else if (prgfunc == 6)
+ pr_cont("CALL (P%i)", poprnd);
+ else if (prgfunc == 7)
+ pr_cont("CALL (PC + P%i)", poprnd);
+ else if (prgfunc == 8)
+ pr_cont("JUMP (PC + P%i", poprnd);
+ else if (prgfunc == 9)
+ pr_cont("RAISE %i", poprnd);
+ else if (prgfunc == 10)
+ pr_cont("EXCPT %i", poprnd);
+ else
+ pr_cont("0x%04x", opcode);
+
+}
+
+#define BRCC_opcode 0x1000
+#define BRCC_offset_bits 0
+#define BRCC_offset_mask 0x3ff
+#define BRCC_B_bits 10
+#define BRCC_B_mask 0x1
+#define BRCC_T_bits 11
+#define BRCC_T_mask 0x1
+#define BRCC_code_bits 12
+#define BRCC_code_mask 0xf
+
+static void decode_BRCC_0(unsigned int opcode)
+{
+ int B = ((opcode >> BRCC_B_bits) & BRCC_B_mask);
+ int T = ((opcode >> BRCC_T_bits) & BRCC_T_mask);
+
+ pr_cont("IF %sCC JUMP pcrel %s", T ? "" : "!", B ? "(BP)" : "");
+}
+
+#define CALLa_opcode 0xe2000000
+#define CALLa_addr_bits 0
+#define CALLa_addr_mask 0xffffff
+#define CALLa_S_bits 24
+#define CALLa_S_mask 0x1
+#define CALLa_code_bits 25
+#define CALLa_code_mask 0x7f
+
+static void decode_CALLa_0(unsigned int opcode)
+{
+ int S = ((opcode >> (CALLa_S_bits - 16)) & CALLa_S_mask);
+
+ if (S)
+ pr_cont("CALL pcrel");
+ else
+ pr_cont("JUMP.L");
+}
+
+#define LoopSetup_opcode 0xe0800000
+#define LoopSetup_eoffset_bits 0
+#define LoopSetup_eoffset_mask 0x3ff
+#define LoopSetup_dontcare_bits 10
+#define LoopSetup_dontcare_mask 0x3
+#define LoopSetup_reg_bits 12
+#define LoopSetup_reg_mask 0xf
+#define LoopSetup_soffset_bits 16
+#define LoopSetup_soffset_mask 0xf
+#define LoopSetup_c_bits 20
+#define LoopSetup_c_mask 0x1
+#define LoopSetup_rop_bits 21
+#define LoopSetup_rop_mask 0x3
+#define LoopSetup_code_bits 23
+#define LoopSetup_code_mask 0x1ff
+
+static void decode_LoopSetup_0(unsigned int opcode)
+{
+ int c = ((opcode >> LoopSetup_c_bits) & LoopSetup_c_mask);
+ int reg = ((opcode >> LoopSetup_reg_bits) & LoopSetup_reg_mask);
+ int rop = ((opcode >> LoopSetup_rop_bits) & LoopSetup_rop_mask);
+
+ pr_cont("LSETUP <> LC%i", c);
+ if ((rop & 1) == 1)
+ pr_cont("= P%i", reg);
+ if ((rop & 2) == 2)
+ pr_cont(" >> 0x1");
+}
+
+#define DspLDST_opcode 0x9c00
+#define DspLDST_reg_bits 0
+#define DspLDST_reg_mask 0x7
+#define DspLDST_i_bits 3
+#define DspLDST_i_mask 0x3
+#define DspLDST_m_bits 5
+#define DspLDST_m_mask 0x3
+#define DspLDST_aop_bits 7
+#define DspLDST_aop_mask 0x3
+#define DspLDST_W_bits 9
+#define DspLDST_W_mask 0x1
+#define DspLDST_code_bits 10
+#define DspLDST_code_mask 0x3f
+
+static void decode_dspLDST_0(unsigned int opcode)
+{
+ int i = ((opcode >> DspLDST_i_bits) & DspLDST_i_mask);
+ int m = ((opcode >> DspLDST_m_bits) & DspLDST_m_mask);
+ int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask);
+ int aop = ((opcode >> DspLDST_aop_bits) & DspLDST_aop_mask);
+ int reg = ((opcode >> DspLDST_reg_bits) & DspLDST_reg_mask);
+
+ if (W == 0) {
+ pr_cont("R%i", reg);
+ switch (m) {
+ case 0:
+ pr_cont(" = ");
+ break;
+ case 1:
+ pr_cont(".L = ");
+ break;
+ case 2:
+ pr_cont(".W = ");
+ break;
+ }
+ }
+
+ pr_cont("[ I%i", i);
+
+ switch (aop) {
+ case 0:
+ pr_cont("++ ]");
+ break;
+ case 1:
+ pr_cont("-- ]");
+ break;
+ }
+
+ if (W == 1) {
+ pr_cont(" = R%i", reg);
+ switch (m) {
+ case 1:
+ pr_cont(".L = ");
+ break;
+ case 2:
+ pr_cont(".W = ");
+ break;
+ }
+ }
+}
+
+#define LDST_opcode 0x9000
+#define LDST_reg_bits 0
+#define LDST_reg_mask 0x7
+#define LDST_ptr_bits 3
+#define LDST_ptr_mask 0x7
+#define LDST_Z_bits 6
+#define LDST_Z_mask 0x1
+#define LDST_aop_bits 7
+#define LDST_aop_mask 0x3
+#define LDST_W_bits 9
+#define LDST_W_mask 0x1
+#define LDST_sz_bits 10
+#define LDST_sz_mask 0x3
+#define LDST_code_bits 12
+#define LDST_code_mask 0xf
+
+static void decode_LDST_0(unsigned int opcode)
+{
+ int Z = ((opcode >> LDST_Z_bits) & LDST_Z_mask);
+ int W = ((opcode >> LDST_W_bits) & LDST_W_mask);
+ int sz = ((opcode >> LDST_sz_bits) & LDST_sz_mask);
+ int aop = ((opcode >> LDST_aop_bits) & LDST_aop_mask);
+ int reg = ((opcode >> LDST_reg_bits) & LDST_reg_mask);
+ int ptr = ((opcode >> LDST_ptr_bits) & LDST_ptr_mask);
+
+ if (W == 0)
+ pr_cont("%s%i = ", (sz == 0 && Z == 1) ? "P" : "R", reg);
+
+ switch (sz) {
+ case 1:
+ pr_cont("W");
+ break;
+ case 2:
+ pr_cont("B");
+ break;
+ }
+
+ pr_cont("[P%i", ptr);
+
+ switch (aop) {
+ case 0:
+ pr_cont("++");
+ break;
+ case 1:
+ pr_cont("--");
+ break;
+ }
+ pr_cont("]");
+
+ if (W == 1)
+ pr_cont(" = %s%i ", (sz == 0 && Z == 1) ? "P" : "R", reg);
+
+ if (sz) {
+ if (Z)
+ pr_cont(" (X)");
+ else
+ pr_cont(" (Z)");
+ }
+}
+
+#define LDSTii_opcode 0xa000
+#define LDSTii_reg_bit 0
+#define LDSTii_reg_mask 0x7
+#define LDSTii_ptr_bit 3
+#define LDSTii_ptr_mask 0x7
+#define LDSTii_offset_bit 6
+#define LDSTii_offset_mask 0xf
+#define LDSTii_op_bit 10
+#define LDSTii_op_mask 0x3
+#define LDSTii_W_bit 12
+#define LDSTii_W_mask 0x1
+#define LDSTii_code_bit 13
+#define LDSTii_code_mask 0x7
+
+static void decode_LDSTii_0(unsigned int opcode)
+{
+ int reg = ((opcode >> LDSTii_reg_bit) & LDSTii_reg_mask);
+ int ptr = ((opcode >> LDSTii_ptr_bit) & LDSTii_ptr_mask);
+ int offset = ((opcode >> LDSTii_offset_bit) & LDSTii_offset_mask);
+ int op = ((opcode >> LDSTii_op_bit) & LDSTii_op_mask);
+ int W = ((opcode >> LDSTii_W_bit) & LDSTii_W_mask);
+
+ if (W == 0) {
+ pr_cont("%s%i = %s[P%i + %i]", op == 3 ? "R" : "P", reg,
+ op == 1 || op == 2 ? "" : "W", ptr, offset);
+ if (op == 2)
+ pr_cont("(Z)");
+ if (op == 3)
+ pr_cont("(X)");
+ } else {
+ pr_cont("%s[P%i + %i] = %s%i", op == 0 ? "" : "W", ptr,
+ offset, op == 3 ? "P" : "R", reg);
+ }
+}
+
+#define LDSTidxI_opcode 0xe4000000
+#define LDSTidxI_offset_bits 0
+#define LDSTidxI_offset_mask 0xffff
+#define LDSTidxI_reg_bits 16
+#define LDSTidxI_reg_mask 0x7
+#define LDSTidxI_ptr_bits 19
+#define LDSTidxI_ptr_mask 0x7
+#define LDSTidxI_sz_bits 22
+#define LDSTidxI_sz_mask 0x3
+#define LDSTidxI_Z_bits 24
+#define LDSTidxI_Z_mask 0x1
+#define LDSTidxI_W_bits 25
+#define LDSTidxI_W_mask 0x1
+#define LDSTidxI_code_bits 26
+#define LDSTidxI_code_mask 0x3f
+
+static void decode_LDSTidxI_0(unsigned int opcode)
+{
+ int Z = ((opcode >> LDSTidxI_Z_bits) & LDSTidxI_Z_mask);
+ int W = ((opcode >> LDSTidxI_W_bits) & LDSTidxI_W_mask);
+ int sz = ((opcode >> LDSTidxI_sz_bits) & LDSTidxI_sz_mask);
+ int reg = ((opcode >> LDSTidxI_reg_bits) & LDSTidxI_reg_mask);
+ int ptr = ((opcode >> LDSTidxI_ptr_bits) & LDSTidxI_ptr_mask);
+ int offset = ((opcode >> LDSTidxI_offset_bits) & LDSTidxI_offset_mask);
+
+ if (W == 0)
+ pr_cont("%s%i = ", sz == 0 && Z == 1 ? "P" : "R", reg);
+
+ if (sz == 1)
+ pr_cont("W");
+ if (sz == 2)
+ pr_cont("B");
+
+ pr_cont("[P%i + %s0x%x]", ptr, offset & 0x20 ? "-" : "",
+ (offset & 0x1f) << 2);
+
+ if (W == 0 && sz != 0) {
+ if (Z)
+ pr_cont("(X)");
+ else
+ pr_cont("(Z)");
+ }
+
+ if (W == 1)
+ pr_cont("= %s%i", (sz == 0 && Z == 1) ? "P" : "R", reg);
+
+}
+
+static void decode_opcode(unsigned int opcode)
+{
+#ifdef CONFIG_BUG
+ if (opcode == BFIN_BUG_OPCODE)
+ pr_cont("BUG");
+ else
+#endif
+ if ((opcode & 0xffffff00) == ProgCtrl_opcode)
+ decode_ProgCtrl_0(opcode);
+ else if ((opcode & 0xfffff000) == BRCC_opcode)
+ decode_BRCC_0(opcode);
+ else if ((opcode & 0xfffff000) == 0x2000)
+ pr_cont("JUMP.S");
+ else if ((opcode & 0xfe000000) == CALLa_opcode)
+ decode_CALLa_0(opcode);
+ else if ((opcode & 0xff8000C0) == LoopSetup_opcode)
+ decode_LoopSetup_0(opcode);
+ else if ((opcode & 0xfffffc00) == DspLDST_opcode)
+ decode_dspLDST_0(opcode);
+ else if ((opcode & 0xfffff000) == LDST_opcode)
+ decode_LDST_0(opcode);
+ else if ((opcode & 0xffffe000) == LDSTii_opcode)
+ decode_LDSTii_0(opcode);
+ else if ((opcode & 0xfc000000) == LDSTidxI_opcode)
+ decode_LDSTidxI_0(opcode);
+ else if (opcode & 0xffff0000)
+ pr_cont("0x%08x", opcode);
+ else
+ pr_cont("0x%04x", opcode);
+}
+
+#define BIT_MULTI_INS 0x08000000
+static void decode_instruction(unsigned short *address)
+{
+ unsigned int opcode;
+
+ if (!get_instruction(&opcode, address))
+ return;
+
+ decode_opcode(opcode);
+
+ /* If things are a 32-bit instruction, it has the possibility of being
+ * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions)
+ * This test collidates with the unlink instruction, so disallow that
+ */
+ if ((opcode & 0xc0000000) == 0xc0000000 &&
+ (opcode & BIT_MULTI_INS) &&
+ (opcode & 0xe8000000) != 0xe8000000) {
+ pr_cont(" || ");
+ if (!get_instruction(&opcode, address + 2))
+ return;
+ decode_opcode(opcode);
+ pr_cont(" || ");
+ if (!get_instruction(&opcode, address + 3))
+ return;
+ decode_opcode(opcode);
+ }
+}
+#endif
+
+void dump_bfin_trace_buffer(void)
+{
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+ int tflags, i = 0, fault = 0;
+ char buf[150];
+ unsigned short *addr;
+ unsigned int cpu = raw_smp_processor_id();
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ int j, index;
+#endif
+
+ trace_buffer_save(tflags);
+
+ pr_notice("Hardware Trace:\n");
+
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
+#endif
+
+ if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
+ for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
+ addr = (unsigned short *)bfin_read_TBUF();
+ decode_address(buf, (unsigned long)addr);
+ pr_notice("%4i Target : %s\n", i, buf);
+ /* Normally, the faulting instruction doesn't go into
+ * the trace buffer, (since it doesn't commit), so
+ * we print out the fault address here
+ */
+ if (!fault && addr == ((unsigned short *)evt_ivhw)) {
+ addr = (unsigned short *)bfin_read_TBUF();
+ decode_address(buf, (unsigned long)addr);
+ pr_notice(" FAULT : %s ", buf);
+ decode_instruction(addr);
+ pr_cont("\n");
+ fault = 1;
+ continue;
+ }
+ if (!fault && addr == (unsigned short *)trap &&
+ (cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) {
+ decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
+ pr_notice(" FAULT : %s ", buf);
+ decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr);
+ pr_cont("\n");
+ fault = 1;
+ }
+ addr = (unsigned short *)bfin_read_TBUF();
+ decode_address(buf, (unsigned long)addr);
+ pr_notice(" Source : %s ", buf);
+ decode_instruction(addr);
+ pr_cont("\n");
+ }
+ }
+
+#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
+ if (trace_buff_offset)
+ index = trace_buff_offset / 4;
+ else
+ index = EXPAND_LEN;
+
+ j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
+ while (j) {
+ decode_address(buf, software_trace_buff[index]);
+ pr_notice("%4i Target : %s\n", i, buf);
+ index -= 1;
+ if (index < 0)
+ index = EXPAND_LEN;
+ decode_address(buf, software_trace_buff[index]);
+ pr_notice(" Source : %s ", buf);
+ decode_instruction((unsigned short *)software_trace_buff[index]);
+ pr_cont("\n");
+ index -= 1;
+ if (index < 0)
+ index = EXPAND_LEN;
+ j--;
+ i++;
+ }
+#endif
+
+ trace_buffer_restore(tflags);
+#endif
+}
+EXPORT_SYMBOL(dump_bfin_trace_buffer);
+
+void dump_bfin_process(struct pt_regs *fp)
+{
+ /* We should be able to look at fp->ipend, but we don't push it on the
+ * stack all the time, so do this until we fix that */
+ unsigned int context = bfin_read_IPEND();
+
+ if (oops_in_progress)
+ pr_emerg("Kernel OOPS in progress\n");
+
+ if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
+ pr_notice("HW Error context\n");
+ else if (context & 0x0020)
+ pr_notice("Deferred Exception context\n");
+ else if (context & 0x3FC0)
+ pr_notice("Interrupt context\n");
+ else if (context & 0x4000)
+ pr_notice("Deferred Interrupt context\n");
+ else if (context & 0x8000)
+ pr_notice("Kernel process context\n");
+
+ /* Because we are crashing, and pointers could be bad, we check things
+ * pretty closely before we use them
+ */
+ if ((unsigned long)current >= FIXED_CODE_START &&
+ !((unsigned long)current & 0x3) && current->pid) {
+ pr_notice("CURRENT PROCESS:\n");
+ if (current->comm >= (char *)FIXED_CODE_START)
+ pr_notice("COMM=%s PID=%d",
+ current->comm, current->pid);
+ else
+ pr_notice("COMM= invalid");
+
+ pr_cont(" CPU=%d\n", current_thread_info()->cpu);
+ if (!((unsigned long)current->mm & 0x3) &&
+ (unsigned long)current->mm >= FIXED_CODE_START) {
+ pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
+ (void *)current->mm->start_code,
+ (void *)current->mm->end_code,
+ (void *)current->mm->start_data,
+ (void *)current->mm->end_data);
+ pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
+ (void *)current->mm->end_data,
+ (void *)current->mm->brk,
+ (void *)current->mm->start_stack);
+ } else
+ pr_notice("invalid mm\n");
+ } else
+ pr_notice("No Valid process in current context\n");
+}
+
+void dump_bfin_mem(struct pt_regs *fp)
+{
+ unsigned short *addr, *erraddr, val = 0, err = 0;
+ char sti = 0, buf[6];
+
+ erraddr = (void *)fp->pc;
+
+ pr_notice("return address: [0x%p]; contents of:", erraddr);
+
+ for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
+ addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
+ addr++) {
+ if (!((unsigned long)addr & 0xF))
+ pr_notice("0x%p: ", addr);
+
+ if (!get_mem16(&val, addr)) {
+ val = 0;
+ sprintf(buf, "????");
+ } else
+ sprintf(buf, "%04x", val);
+
+ if (addr == erraddr) {
+ pr_cont("[%s]", buf);
+ err = val;
+ } else
+ pr_cont(" %s ", buf);
+
+ /* Do any previous instructions turn on interrupts? */
+ if (addr <= erraddr && /* in the past */
+ ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
+ val == 0x017b)) /* [SP++] = RETI */
+ sti = 1;
+ }
+
+ pr_cont("\n");
+
+ /* Hardware error interrupts can be deferred */
+ if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
+ oops_in_progress)){
+ pr_notice("Looks like this was a deferred error - sorry\n");
+#ifndef CONFIG_DEBUG_HWERR
+ pr_notice("The remaining message may be meaningless\n");
+ pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
+#else
+ /* If we are handling only one peripheral interrupt
+ * and current mm and pid are valid, and the last error
+ * was in that user space process's text area
+ * print it out - because that is where the problem exists
+ */
+ if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
+ (current->pid && current->mm)) {
+ /* And the last RETI points to the current userspace context */
+ if ((fp + 1)->pc >= current->mm->start_code &&
+ (fp + 1)->pc <= current->mm->end_code) {
+ pr_notice("It might be better to look around here :\n");
+ pr_notice("-------------------------------------------\n");
+ show_regs(fp + 1);
+ pr_notice("-------------------------------------------\n");
+ }
+ }
+#endif
+ }
+}
+
+void show_regs(struct pt_regs *fp)
+{
+ char buf[150];
+ struct irqaction *action;
+ unsigned int i;
+ unsigned long flags = 0;
+ unsigned int cpu = raw_smp_processor_id();
+ unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
+
+ pr_notice("\n");
+ if (CPUID != bfin_cpuid())
+ pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
+ "but running on:0x%04x (Rev %d)\n",
+ CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
+
+ pr_notice("ADSP-%s-0.%d",
+ CPU, bfin_compiled_revid());
+
+ if (bfin_compiled_revid() != bfin_revid())
+ pr_cont("(Detected 0.%d)", bfin_revid());
+
+ pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
+ get_cclk()/1000000, get_sclk()/1000000,
+#ifdef CONFIG_MPU
+ "mpu on"
+#else
+ "mpu off"
+#endif
+ );
+
+ pr_notice("%s", linux_banner);
+
+ pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
+ pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
+ (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
+ if (fp->ipend & EVT_IRPTEN)
+ pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
+ if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
+ EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
+ pr_notice(" Peripheral interrupts masked off\n");
+ if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
+ pr_notice(" Kernel interrupts masked off\n");
+ if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
+ pr_notice(" HWERRCAUSE: 0x%lx\n",
+ (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
+#ifdef EBIU_ERRMST
+ /* If the error was from the EBIU, print it out */
+ if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
+ pr_notice(" EBIU Error Reason : 0x%04x\n",
+ bfin_read_EBIU_ERRMST());
+ pr_notice(" EBIU Error Address : 0x%08x\n",
+ bfin_read_EBIU_ERRADD());
+ }
+#endif
+ }
+ pr_notice(" EXCAUSE : 0x%lx\n",
+ fp->seqstat & SEQSTAT_EXCAUSE);
+ for (i = 2; i <= 15 ; i++) {
+ if (fp->ipend & (1 << i)) {
+ if (i != 4) {
+ decode_address(buf, bfin_read32(EVT0 + 4*i));
+ pr_notice(" physical IVG%i asserted : %s\n", i, buf);
+ } else
+ pr_notice(" interrupts disabled\n");
+ }
+ }
+
+ /* if no interrupts are going off, don't print this out */
+ if (fp->ipend & ~0x3F) {
+ for (i = 0; i < (NR_IRQS - 1); i++) {
+ if (!in_atomic)
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
+
+ action = irq_desc[i].action;
+ if (!action)
+ goto unlock;
+
+ decode_address(buf, (unsigned int)action->handler);
+ pr_notice(" logical irq %3d mapped : %s", i, buf);
+ for (action = action->next; action; action = action->next) {
+ decode_address(buf, (unsigned int)action->handler);
+ pr_cont(", %s", buf);
+ }
+ pr_cont("\n");
+unlock:
+ if (!in_atomic)
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ }
+ }
+
+ decode_address(buf, fp->rete);
+ pr_notice(" RETE: %s\n", buf);
+ decode_address(buf, fp->retn);
+ pr_notice(" RETN: %s\n", buf);
+ decode_address(buf, fp->retx);
+ pr_notice(" RETX: %s\n", buf);
+ decode_address(buf, fp->rets);
+ pr_notice(" RETS: %s\n", buf);
+ decode_address(buf, fp->pc);
+ pr_notice(" PC : %s\n", buf);
+
+ if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
+ (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
+ decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
+ pr_notice("DCPLB_FAULT_ADDR: %s\n", buf);
+ decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
+ pr_notice("ICPLB_FAULT_ADDR: %s\n", buf);
+ }
+
+ pr_notice("PROCESSOR STATE:\n");
+ pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
+ fp->r0, fp->r1, fp->r2, fp->r3);
+ pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
+ fp->r4, fp->r5, fp->r6, fp->r7);
+ pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
+ fp->p0, fp->p1, fp->p2, fp->p3);
+ pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
+ fp->p4, fp->p5, fp->fp, (long)fp);
+ pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
+ fp->lb0, fp->lt0, fp->lc0);
+ pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
+ fp->lb1, fp->lt1, fp->lc1);
+ pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
+ fp->b0, fp->l0, fp->m0, fp->i0);
+ pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
+ fp->b1, fp->l1, fp->m1, fp->i1);
+ pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
+ fp->b2, fp->l2, fp->m2, fp->i2);
+ pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
+ fp->b3, fp->l3, fp->m3, fp->i3);
+ pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
+ fp->a0w, fp->a0x, fp->a1w, fp->a1x);
+
+ pr_notice("USP : %08lx ASTAT: %08lx\n",
+ rdusp(), fp->astat);
+
+ pr_notice("\n");
+}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index ba70c4bc2699..59c1df75e4de 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -1,25 +1,22 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Main exception handling logic.
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
#include <linux/bug.h>
#include <linux/uaccess.h>
-#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/fs.h>
-#include <linux/rbtree.h>
#include <asm/traps.h>
-#include <asm/cacheflush.h>
#include <asm/cplb.h>
-#include <asm/dma.h>
#include <asm/blackfin.h>
#include <asm/irq_handler.h>
#include <linux/irq.h>
#include <asm/trace.h>
#include <asm/fixed_code.h>
+#include <asm/pseudo_instructions.h>
#ifdef CONFIG_KGDB
# include <linux/kgdb.h>
@@ -62,194 +59,6 @@ void __init trap_init(void)
CSYNC();
}
-static void decode_address(char *buf, unsigned long address)
-{
-#ifdef CONFIG_DEBUG_VERBOSE
- struct task_struct *p;
- struct mm_struct *mm;
- unsigned long flags, offset;
- unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
- struct rb_node *n;
-
-#ifdef CONFIG_KALLSYMS
- unsigned long symsize;
- const char *symname;
- char *modname;
- char *delim = ":";
- char namebuf[128];
-#endif
-
- buf += sprintf(buf, "<0x%08lx> ", address);
-
-#ifdef CONFIG_KALLSYMS
- /* look up the address and see if we are in kernel space */
- symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
-
- if (symname) {
- /* yeah! kernel space! */
- if (!modname)
- modname = delim = "";
- sprintf(buf, "{ %s%s%s%s + 0x%lx }",
- delim, modname, delim, symname,
- (unsigned long)offset);
- return;
- }
-#endif
-
- if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
- /* Problem in fixed code section? */
- strcat(buf, "/* Maybe fixed code section */");
- return;
-
- } else if (address < CONFIG_BOOT_LOAD) {
- /* Problem somewhere before the kernel start address */
- strcat(buf, "/* Maybe null pointer? */");
- return;
-
- } else if (address >= COREMMR_BASE) {
- strcat(buf, "/* core mmrs */");
- return;
-
- } else if (address >= SYSMMR_BASE) {
- strcat(buf, "/* system mmrs */");
- return;
-
- } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
- strcat(buf, "/* on-chip L1 ROM */");
- return;
- }
-
- /*
- * Don't walk any of the vmas if we are oopsing, it has been known
- * to cause problems - corrupt vmas (kernel crashes) cause double faults
- */
- if (oops_in_progress) {
- strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
- return;
- }
-
- /* looks like we're off in user-land, so let's walk all the
- * mappings of all our processes and see if we can't be a whee
- * bit more specific
- */
- write_lock_irqsave(&tasklist_lock, flags);
- for_each_process(p) {
- mm = (in_atomic ? p->mm : get_task_mm(p));
- if (!mm)
- continue;
-
- if (!down_read_trylock(&mm->mmap_sem)) {
- if (!in_atomic)
- mmput(mm);
- continue;
- }
-
- for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
- struct vm_area_struct *vma;
-
- vma = rb_entry(n, struct vm_area_struct, vm_rb);
-
- if (address >= vma->vm_start && address < vma->vm_end) {
- char _tmpbuf[256];
- char *name = p->comm;
- struct file *file = vma->vm_file;
-
- if (file) {
- char *d_name = d_path(&file->f_path, _tmpbuf,
- sizeof(_tmpbuf));
- if (!IS_ERR(d_name))
- name = d_name;
- }
-
- /* FLAT does not have its text aligned to the start of
- * the map while FDPIC ELF does ...
- */
-
- /* before we can check flat/fdpic, we need to
- * make sure current is valid
- */
- if ((unsigned long)current >= FIXED_CODE_START &&
- !((unsigned long)current & 0x3)) {
- if (current->mm &&
- (address > current->mm->start_code) &&
- (address < current->mm->end_code))
- offset = address - current->mm->start_code;
- else
- offset = (address - vma->vm_start) +
- (vma->vm_pgoff << PAGE_SHIFT);
-
- sprintf(buf, "[ %s + 0x%lx ]", name, offset);
- } else
- sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
- name, vma->vm_start, vma->vm_end);
-
- up_read(&mm->mmap_sem);
- if (!in_atomic)
- mmput(mm);
-
- if (buf[0] == '\0')
- sprintf(buf, "[ %s ] dynamic memory", name);
-
- goto done;
- }
- }
-
- up_read(&mm->mmap_sem);
- if (!in_atomic)
- mmput(mm);
- }
-
- /*
- * we were unable to find this address anywhere,
- * or some MMs were skipped because they were in use.
- */
- sprintf(buf, "/* kernel dynamic memory */");
-
-done:
- write_unlock_irqrestore(&tasklist_lock, flags);
-#else
- sprintf(buf, " ");
-#endif
-}
-
-asmlinkage void double_fault_c(struct pt_regs *fp)
-{
-#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
- int j;
- trace_buffer_save(j);
-#endif
-
- console_verbose();
- oops_in_progress = 1;
-#ifdef CONFIG_DEBUG_VERBOSE
- printk(KERN_EMERG "Double Fault\n");
-#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
- if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
- unsigned int cpu = raw_smp_processor_id();
- char buf[150];
- decode_address(buf, cpu_pda[cpu].retx_doublefault);
- printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
- (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
- decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
- printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
- decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
- printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
-
- decode_address(buf, fp->retx);
- printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
- } else
-#endif
- {
- dump_bfin_process(fp);
- dump_bfin_mem(fp);
- show_regs(fp);
- dump_bfin_trace_buffer();
- }
-#endif
- panic("Double Fault - unrecoverable event");
-
-}
-
static int kernel_mode_regs(struct pt_regs *regs)
{
return regs->ipend & 0xffc0;
@@ -260,6 +69,9 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
int j;
#endif
+#ifdef CONFIG_BFIN_PSEUDODBG_INSNS
+ int opcode;
+#endif
unsigned int cpu = raw_smp_processor_id();
const char *strerror = NULL;
int sig = 0;
@@ -392,6 +204,19 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
}
}
#endif
+#ifdef CONFIG_BFIN_PSEUDODBG_INSNS
+ /*
+ * Support for the fake instructions, if the instruction fails,
+ * then just execute a illegal opcode failure (like normal).
+ * Don't support these instructions inside the kernel
+ */
+ if (!kernel_mode_regs(fp) && get_instruction(&opcode, (unsigned short *)fp->pc)) {
+ if (execute_pseudodbg_assert(fp, opcode))
+ goto traps_done;
+ if (execute_pseudodbg(fp, opcode))
+ goto traps_done;
+ }
+#endif
info.si_code = ILL_ILLOPC;
sig = SIGILL;
strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
@@ -672,659 +497,44 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
trace_buffer_restore(j);
}
-/* Typical exception handling routines */
-
-#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
-
-/*
- * Similar to get_user, do some address checking, then dereference
- * Return true on success, false on bad address
- */
-static bool get_instruction(unsigned short *val, unsigned short *address)
-{
- unsigned long addr = (unsigned long)address;
-
- /* Check for odd addresses */
- if (addr & 0x1)
- return false;
-
- /* MMR region will never have instructions */
- if (addr >= SYSMMR_BASE)
- return false;
-
- switch (bfin_mem_access_type(addr, 2)) {
- case BFIN_MEM_ACCESS_CORE:
- case BFIN_MEM_ACCESS_CORE_ONLY:
- *val = *address;
- return true;
- case BFIN_MEM_ACCESS_DMA:
- dma_memcpy(val, address, 2);
- return true;
- case BFIN_MEM_ACCESS_ITEST:
- isram_memcpy(val, address, 2);
- return true;
- default: /* invalid access */
- return false;
- }
-}
-
-/*
- * decode the instruction if we are printing out the trace, as it
- * makes things easier to follow, without running it through objdump
- * These are the normal instructions which cause change of flow, which
- * would be at the source of the trace buffer
- */
-#if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
-static void decode_instruction(unsigned short *address)
-{
- unsigned short opcode;
-
- if (get_instruction(&opcode, address)) {
- if (opcode == 0x0010)
- verbose_printk("RTS");
- else if (opcode == 0x0011)
- verbose_printk("RTI");
- else if (opcode == 0x0012)
- verbose_printk("RTX");
- else if (opcode == 0x0013)
- verbose_printk("RTN");
- else if (opcode == 0x0014)
- verbose_printk("RTE");
- else if (opcode == 0x0025)
- verbose_printk("EMUEXCPT");
- else if (opcode >= 0x0040 && opcode <= 0x0047)
- verbose_printk("STI R%i", opcode & 7);
- else if (opcode >= 0x0050 && opcode <= 0x0057)
- verbose_printk("JUMP (P%i)", opcode & 7);
- else if (opcode >= 0x0060 && opcode <= 0x0067)
- verbose_printk("CALL (P%i)", opcode & 7);
- else if (opcode >= 0x0070 && opcode <= 0x0077)
- verbose_printk("CALL (PC+P%i)", opcode & 7);
- else if (opcode >= 0x0080 && opcode <= 0x0087)
- verbose_printk("JUMP (PC+P%i)", opcode & 7);
- else if (opcode >= 0x0090 && opcode <= 0x009F)
- verbose_printk("RAISE 0x%x", opcode & 0xF);
- else if (opcode >= 0x00A0 && opcode <= 0x00AF)
- verbose_printk("EXCPT 0x%x", opcode & 0xF);
- else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
- verbose_printk("IF !CC JUMP");
- else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
- verbose_printk("IF CC JUMP");
- else if (opcode >= 0x2000 && opcode <= 0x2fff)
- verbose_printk("JUMP.S");
- else if (opcode >= 0xe080 && opcode <= 0xe0ff)
- verbose_printk("LSETUP");
- else if (opcode >= 0xe200 && opcode <= 0xe2ff)
- verbose_printk("JUMP.L");
- else if (opcode >= 0xe300 && opcode <= 0xe3ff)
- verbose_printk("CALL pcrel");
- else
- verbose_printk("0x%04x", opcode);
- }
-
-}
-#endif
-
-void dump_bfin_trace_buffer(void)
-{
-#ifdef CONFIG_DEBUG_VERBOSE
-#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
- int tflags, i = 0;
- char buf[150];
- unsigned short *addr;
-#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
- int j, index;
-#endif
-
- trace_buffer_save(tflags);
-
- printk(KERN_NOTICE "Hardware Trace:\n");
-
-#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
- printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n");
-#endif
-
- if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
- for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
- decode_address(buf, (unsigned long)bfin_read_TBUF());
- printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
- addr = (unsigned short *)bfin_read_TBUF();
- decode_address(buf, (unsigned long)addr);
- printk(KERN_NOTICE " Source : %s ", buf);
- decode_instruction(addr);
- printk("\n");
- }
- }
-
-#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
- if (trace_buff_offset)
- index = trace_buff_offset / 4;
- else
- index = EXPAND_LEN;
-
- j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
- while (j) {
- decode_address(buf, software_trace_buff[index]);
- printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
- index -= 1;
- if (index < 0 )
- index = EXPAND_LEN;
- decode_address(buf, software_trace_buff[index]);
- printk(KERN_NOTICE " Source : %s ", buf);
- decode_instruction((unsigned short *)software_trace_buff[index]);
- printk("\n");
- index -= 1;
- if (index < 0)
- index = EXPAND_LEN;
- j--;
- i++;
- }
-#endif
-
- trace_buffer_restore(tflags);
-#endif
-#endif
-}
-EXPORT_SYMBOL(dump_bfin_trace_buffer);
-
-#ifdef CONFIG_BUG
-int is_valid_bugaddr(unsigned long addr)
-{
- unsigned short opcode;
-
- if (!get_instruction(&opcode, (unsigned short *)addr))
- return 0;
-
- return opcode == BFIN_BUG_OPCODE;
-}
-#endif
-
-/*
- * Checks to see if the address pointed to is either a
- * 16-bit CALL instruction, or a 32-bit CALL instruction
- */
-static bool is_bfin_call(unsigned short *addr)
-{
- unsigned short opcode = 0, *ins_addr;
- ins_addr = (unsigned short *)addr;
-
- if (!get_instruction(&opcode, ins_addr))
- return false;
-
- if ((opcode >= 0x0060 && opcode <= 0x0067) ||
- (opcode >= 0x0070 && opcode <= 0x0077))
- return true;
-
- ins_addr--;
- if (!get_instruction(&opcode, ins_addr))
- return false;
-
- if (opcode >= 0xE300 && opcode <= 0xE3FF)
- return true;
-
- return false;
-
-}
-
-void show_stack(struct task_struct *task, unsigned long *stack)
-{
-#ifdef CONFIG_PRINTK
- unsigned int *addr, *endstack, *fp = 0, *frame;
- unsigned short *ins_addr;
- char buf[150];
- unsigned int i, j, ret_addr, frame_no = 0;
-
- /*
- * If we have been passed a specific stack, use that one otherwise
- * if we have been passed a task structure, use that, otherwise
- * use the stack of where the variable "stack" exists
- */
-
- if (stack == NULL) {
- if (task) {
- /* We know this is a kernel stack, so this is the start/end */
- stack = (unsigned long *)task->thread.ksp;
- endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE);
- } else {
- /* print out the existing stack info */
- stack = (unsigned long *)&stack;
- endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
- }
- } else
- endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
-
- printk(KERN_NOTICE "Stack info:\n");
- decode_address(buf, (unsigned int)stack);
- printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
-
- if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
- printk(KERN_NOTICE "Invalid stack pointer\n");
- return;
- }
-
- /* First thing is to look for a frame pointer */
- for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
- if (*addr & 0x1)
- continue;
- ins_addr = (unsigned short *)*addr;
- ins_addr--;
- if (is_bfin_call(ins_addr))
- fp = addr - 1;
-
- if (fp) {
- /* Let's check to see if it is a frame pointer */
- while (fp >= (addr - 1) && fp < endstack
- && fp && ((unsigned int) fp & 0x3) == 0)
- fp = (unsigned int *)*fp;
- if (fp == 0 || fp == endstack) {
- fp = addr - 1;
- break;
- }
- fp = 0;
- }
- }
- if (fp) {
- frame = fp;
- printk(KERN_NOTICE " FP: (0x%p)\n", fp);
- } else
- frame = 0;
-
- /*
- * Now that we think we know where things are, we
- * walk the stack again, this time printing things out
- * incase there is no frame pointer, we still look for
- * valid return addresses
- */
-
- /* First time print out data, next time, print out symbols */
- for (j = 0; j <= 1; j++) {
- if (j)
- printk(KERN_NOTICE "Return addresses in stack:\n");
- else
- printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack);
-
- fp = frame;
- frame_no = 0;
-
- for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
- addr < endstack; addr++, i++) {
-
- ret_addr = 0;
- if (!j && i % 8 == 0)
- printk(KERN_NOTICE "%p:",addr);
-
- /* if it is an odd address, or zero, just skip it */
- if (*addr & 0x1 || !*addr)
- goto print;
-
- ins_addr = (unsigned short *)*addr;
-
- /* Go back one instruction, and see if it is a CALL */
- ins_addr--;
- ret_addr = is_bfin_call(ins_addr);
- print:
- if (!j && stack == (unsigned long *)addr)
- printk("[%08x]", *addr);
- else if (ret_addr)
- if (j) {
- decode_address(buf, (unsigned int)*addr);
- if (frame == addr) {
- printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf);
- continue;
- }
- printk(KERN_NOTICE " address : %s\n", buf);
- } else
- printk("<%08x>", *addr);
- else if (fp == addr) {
- if (j)
- frame = addr+1;
- else
- printk("(%08x)", *addr);
-
- fp = (unsigned int *)*addr;
- frame_no++;
-
- } else if (!j)
- printk(" %08x ", *addr);
- }
- if (!j)
- printk("\n");
- }
-#endif
-}
-EXPORT_SYMBOL(show_stack);
-
-void dump_stack(void)
+asmlinkage void double_fault_c(struct pt_regs *fp)
{
- unsigned long stack;
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
- int tflags;
+ int j;
+ trace_buffer_save(j);
#endif
- trace_buffer_save(tflags);
- dump_bfin_trace_buffer();
- show_stack(current, &stack);
- trace_buffer_restore(tflags);
-}
-EXPORT_SYMBOL(dump_stack);
-void dump_bfin_process(struct pt_regs *fp)
-{
+ console_verbose();
+ oops_in_progress = 1;
#ifdef CONFIG_DEBUG_VERBOSE
- /* We should be able to look at fp->ipend, but we don't push it on the
- * stack all the time, so do this until we fix that */
- unsigned int context = bfin_read_IPEND();
-
- if (oops_in_progress)
- verbose_printk(KERN_EMERG "Kernel OOPS in progress\n");
-
- if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
- verbose_printk(KERN_NOTICE "HW Error context\n");
- else if (context & 0x0020)
- verbose_printk(KERN_NOTICE "Deferred Exception context\n");
- else if (context & 0x3FC0)
- verbose_printk(KERN_NOTICE "Interrupt context\n");
- else if (context & 0x4000)
- verbose_printk(KERN_NOTICE "Deferred Interrupt context\n");
- else if (context & 0x8000)
- verbose_printk(KERN_NOTICE "Kernel process context\n");
-
- /* Because we are crashing, and pointers could be bad, we check things
- * pretty closely before we use them
- */
- if ((unsigned long)current >= FIXED_CODE_START &&
- !((unsigned long)current & 0x3) && current->pid) {
- verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
- if (current->comm >= (char *)FIXED_CODE_START)
- verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
- current->comm, current->pid);
- else
- verbose_printk(KERN_NOTICE "COMM= invalid");
+ printk(KERN_EMERG "Double Fault\n");
+#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
+ if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
+ unsigned int cpu = raw_smp_processor_id();
+ char buf[150];
+ decode_address(buf, cpu_pda[cpu].retx_doublefault);
+ printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
+ (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
+ decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
+ printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
+ decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
+ printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
- printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu);
- if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
- verbose_printk(KERN_NOTICE
- "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
- " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
- (void *)current->mm->start_code,
- (void *)current->mm->end_code,
- (void *)current->mm->start_data,
- (void *)current->mm->end_data,
- (void *)current->mm->end_data,
- (void *)current->mm->brk,
- (void *)current->mm->start_stack);
- else
- verbose_printk(KERN_NOTICE "invalid mm\n");
+ decode_address(buf, fp->retx);
+ printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
} else
- verbose_printk(KERN_NOTICE
- "No Valid process in current context\n");
-#endif
-}
-
-void dump_bfin_mem(struct pt_regs *fp)
-{
-#ifdef CONFIG_DEBUG_VERBOSE
- unsigned short *addr, *erraddr, val = 0, err = 0;
- char sti = 0, buf[6];
-
- erraddr = (void *)fp->pc;
-
- verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr);
-
- for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
- addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
- addr++) {
- if (!((unsigned long)addr & 0xF))
- verbose_printk(KERN_NOTICE "0x%p: ", addr);
-
- if (!get_instruction(&val, addr)) {
- val = 0;
- sprintf(buf, "????");
- } else
- sprintf(buf, "%04x", val);
-
- if (addr == erraddr) {
- verbose_printk("[%s]", buf);
- err = val;
- } else
- verbose_printk(" %s ", buf);
-
- /* Do any previous instructions turn on interrupts? */
- if (addr <= erraddr && /* in the past */
- ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
- val == 0x017b)) /* [SP++] = RETI */
- sti = 1;
- }
-
- verbose_printk("\n");
-
- /* Hardware error interrupts can be deferred */
- if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
- oops_in_progress)){
- verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n");
-#ifndef CONFIG_DEBUG_HWERR
- verbose_printk(KERN_NOTICE
-"The remaining message may be meaningless\n"
-"You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
-#else
- /* If we are handling only one peripheral interrupt
- * and current mm and pid are valid, and the last error
- * was in that user space process's text area
- * print it out - because that is where the problem exists
- */
- if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
- (current->pid && current->mm)) {
- /* And the last RETI points to the current userspace context */
- if ((fp + 1)->pc >= current->mm->start_code &&
- (fp + 1)->pc <= current->mm->end_code) {
- verbose_printk(KERN_NOTICE "It might be better to look around here :\n");
- verbose_printk(KERN_NOTICE "-------------------------------------------\n");
- show_regs(fp + 1);
- verbose_printk(KERN_NOTICE "-------------------------------------------\n");
- }
- }
-#endif
- }
-#endif
-}
-
-void show_regs(struct pt_regs *fp)
-{
-#ifdef CONFIG_DEBUG_VERBOSE
- char buf [150];
- struct irqaction *action;
- unsigned int i;
- unsigned long flags = 0;
- unsigned int cpu = raw_smp_processor_id();
- unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
-
- verbose_printk(KERN_NOTICE "\n");
- if (CPUID != bfin_cpuid())
- verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
- "but running on:0x%04x (Rev %d)\n",
- CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
-
- verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
- CPU, bfin_compiled_revid());
-
- if (bfin_compiled_revid() != bfin_revid())
- verbose_printk("(Detected 0.%d)", bfin_revid());
-
- verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
- get_cclk()/1000000, get_sclk()/1000000,
-#ifdef CONFIG_MPU
- "mpu on"
-#else
- "mpu off"
-#endif
- );
-
- verbose_printk(KERN_NOTICE "%s", linux_banner);
-
- verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
- verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
- (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
- if (fp->ipend & EVT_IRPTEN)
- verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n");
- if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
- EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
- verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n");
- if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
- verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n");
- if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
- verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
- (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
-#ifdef EBIU_ERRMST
- /* If the error was from the EBIU, print it out */
- if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
- verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n",
- bfin_read_EBIU_ERRMST());
- verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n",
- bfin_read_EBIU_ERRADD());
- }
#endif
+ {
+ dump_bfin_process(fp);
+ dump_bfin_mem(fp);
+ show_regs(fp);
+ dump_bfin_trace_buffer();
}
- verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n",
- fp->seqstat & SEQSTAT_EXCAUSE);
- for (i = 2; i <= 15 ; i++) {
- if (fp->ipend & (1 << i)) {
- if (i != 4) {
- decode_address(buf, bfin_read32(EVT0 + 4*i));
- verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf);
- } else
- verbose_printk(KERN_NOTICE " interrupts disabled\n");
- }
- }
-
- /* if no interrupts are going off, don't print this out */
- if (fp->ipend & ~0x3F) {
- for (i = 0; i < (NR_IRQS - 1); i++) {
- if (!in_atomic)
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
-
- action = irq_desc[i].action;
- if (!action)
- goto unlock;
-
- decode_address(buf, (unsigned int)action->handler);
- verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf);
- for (action = action->next; action; action = action->next) {
- decode_address(buf, (unsigned int)action->handler);
- verbose_printk(", %s", buf);
- }
- verbose_printk("\n");
-unlock:
- if (!in_atomic)
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- }
- }
-
- decode_address(buf, fp->rete);
- verbose_printk(KERN_NOTICE " RETE: %s\n", buf);
- decode_address(buf, fp->retn);
- verbose_printk(KERN_NOTICE " RETN: %s\n", buf);
- decode_address(buf, fp->retx);
- verbose_printk(KERN_NOTICE " RETX: %s\n", buf);
- decode_address(buf, fp->rets);
- verbose_printk(KERN_NOTICE " RETS: %s\n", buf);
- decode_address(buf, fp->pc);
- verbose_printk(KERN_NOTICE " PC : %s\n", buf);
-
- if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
- (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
- decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
- verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
- decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
- verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
- }
-
- verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n");
- verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
- fp->r0, fp->r1, fp->r2, fp->r3);
- verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
- fp->r4, fp->r5, fp->r6, fp->r7);
- verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
- fp->p0, fp->p1, fp->p2, fp->p3);
- verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
- fp->p4, fp->p5, fp->fp, (long)fp);
- verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n",
- fp->lb0, fp->lt0, fp->lc0);
- verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n",
- fp->lb1, fp->lt1, fp->lc1);
- verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
- fp->b0, fp->l0, fp->m0, fp->i0);
- verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
- fp->b1, fp->l1, fp->m1, fp->i1);
- verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
- fp->b2, fp->l2, fp->m2, fp->i2);
- verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
- fp->b3, fp->l3, fp->m3, fp->i3);
- verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
- fp->a0w, fp->a0x, fp->a1w, fp->a1x);
-
- verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n",
- rdusp(), fp->astat);
-
- verbose_printk(KERN_NOTICE "\n");
#endif
-}
-
-#ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
-asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
-#endif
-
-static DEFINE_SPINLOCK(bfin_spinlock_lock);
-
-asmlinkage int sys_bfin_spinlock(int *p)
-{
- int ret, tmp = 0;
-
- spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
- ret = get_user(tmp, p);
- if (likely(ret == 0)) {
- if (unlikely(tmp))
- ret = 1;
- else
- put_user(1, p);
- }
- spin_unlock(&bfin_spinlock_lock);
- return ret;
-}
-
-int bfin_request_exception(unsigned int exception, void (*handler)(void))
-{
- void (*curr_handler)(void);
-
- if (exception > 0x3F)
- return -EINVAL;
-
- curr_handler = ex_table[exception];
-
- if (curr_handler != ex_replaceable)
- return -EBUSY;
-
- ex_table[exception] = handler;
+ panic("Double Fault - unrecoverable event");
- return 0;
}
-EXPORT_SYMBOL(bfin_request_exception);
-
-int bfin_free_exception(unsigned int exception, void (*handler)(void))
-{
- void (*curr_handler)(void);
-
- if (exception > 0x3F)
- return -EINVAL;
-
- curr_handler = ex_table[exception];
- if (curr_handler != handler)
- return -EBUSY;
-
- ex_table[exception] = ex_replaceable;
-
- return 0;
-}
-EXPORT_SYMBOL(bfin_free_exception);
void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
{
@@ -1349,3 +559,23 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
dump_stack();
panic("Unrecoverable event");
}
+
+#ifdef CONFIG_BUG
+int is_valid_bugaddr(unsigned long addr)
+{
+ unsigned int opcode;
+
+ if (!get_instruction(&opcode, (unsigned short *)addr))
+ return 0;
+
+ return opcode == BFIN_BUG_OPCODE;
+}
+#endif
+
+/* stub this out */
+#ifndef CONFIG_DEBUG_VERBOSE
+void show_regs(struct pt_regs *fp)
+{
+
+}
+#endif
diff --git a/arch/blackfin/lib/memset.S b/arch/blackfin/lib/memset.S
index c30d99b10969..eab1bef3f5bf 100644
--- a/arch/blackfin/lib/memset.S
+++ b/arch/blackfin/lib/memset.S
@@ -20,6 +20,7 @@
* R1 = filler byte
* R2 = count
* Favours word aligned data.
+ * The strncpy assumes that I0 and I1 are not used in this function
*/
ENTRY(_memset)
diff --git a/arch/blackfin/lib/strcmp.S b/arch/blackfin/lib/strcmp.S
new file mode 100644
index 000000000000..d7c1d158973b
--- /dev/null
+++ b/arch/blackfin/lib/strcmp.S
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
+ */
+
+#include <linux/linkage.h>
+
+/* void *strcmp(char *s1, const char *s2);
+ * R0 = address (s1)
+ * R1 = address (s2)
+ *
+ * Returns an integer less than, equal to, or greater than zero if s1
+ * (or the first n bytes thereof) is found, respectively, to be less
+ * than, to match, or be greater than s2.
+ */
+
+#ifdef CONFIG_STRCMP_L1
+.section .l1.text
+#else
+.text
+#endif
+
+.align 2
+
+ENTRY(_strcmp)
+ P0 = R0 ; /* s1 */
+ P1 = R1 ; /* s2 */
+
+1:
+ R0 = B[P0++] (Z); /* get *s1 */
+ R1 = B[P1++] (Z); /* get *s2 */
+ CC = R0 == R1; /* compare a byte */
+ if ! cc jump 2f; /* not equal, break out */
+ CC = R0; /* at end of s1? */
+ if cc jump 1b (bp); /* no, keep going */
+ jump.s 3f; /* strings are equal */
+2:
+ R0 = R0 - R1; /* *s1 - *s2 */
+3:
+ RTS;
+
+ENDPROC(_strcmp)
diff --git a/arch/blackfin/lib/strcmp.c b/arch/blackfin/lib/strcmp.c
deleted file mode 100644
index fde39a1950ce..000000000000
--- a/arch/blackfin/lib/strcmp.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Provide symbol in case str func is not inlined.
- *
- * Copyright (c) 2006-2007 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#define strcmp __inline_strcmp
-#include <asm/string.h>
-#undef strcmp
-
-#include <linux/module.h>
-
-int strcmp(const char *dest, const char *src)
-{
- return __inline_strcmp(dest, src);
-}
-EXPORT_SYMBOL(strcmp);
diff --git a/arch/blackfin/lib/strcpy.S b/arch/blackfin/lib/strcpy.S
new file mode 100644
index 000000000000..a6a0c6363806
--- /dev/null
+++ b/arch/blackfin/lib/strcpy.S
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
+ */
+
+#include <linux/linkage.h>
+
+/* void *strcpy(char *dest, const char *src);
+ * R0 = address (dest)
+ * R1 = address (src)
+ *
+ * Returns a pointer to the destination string dest
+ */
+
+#ifdef CONFIG_STRCPY_L1
+.section .l1.text
+#else
+.text
+#endif
+
+.align 2
+
+ENTRY(_strcpy)
+ P0 = R0 ; /* dst*/
+ P1 = R1 ; /* src*/
+
+1:
+ R1 = B [P1++] (Z);
+ B [P0++] = R1;
+ CC = R1;
+ if cc jump 1b (bp);
+ RTS;
+
+ENDPROC(_strcpy)
diff --git a/arch/blackfin/lib/strcpy.c b/arch/blackfin/lib/strcpy.c
deleted file mode 100644
index 2a8836b1f4d3..000000000000
--- a/arch/blackfin/lib/strcpy.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Provide symbol in case str func is not inlined.
- *
- * Copyright (c) 2006-2007 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#define strcpy __inline_strcpy
-#include <asm/string.h>
-#undef strcpy
-
-#include <linux/module.h>
-
-char *strcpy(char *dest, const char *src)
-{
- return __inline_strcpy(dest, src);
-}
-EXPORT_SYMBOL(strcpy);
diff --git a/arch/blackfin/lib/strncmp.S b/arch/blackfin/lib/strncmp.S
new file mode 100644
index 000000000000..6da37c34a847
--- /dev/null
+++ b/arch/blackfin/lib/strncmp.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
+ */
+
+#include <linux/linkage.h>
+
+/* void *strncpy(char *s1, const char *s2, size_t n);
+ * R0 = address (dest)
+ * R1 = address (src)
+ * R2 = size (n)
+ * Returns a pointer to the destination string dest
+ */
+
+#ifdef CONFIG_STRNCMP_L1
+.section .l1.text
+#else
+.text
+#endif
+
+.align 2
+
+ENTRY(_strncmp)
+ CC = R2 == 0;
+ if CC JUMP 5f;
+
+ P0 = R0 ; /* s1 */
+ P1 = R1 ; /* s2 */
+1:
+ R0 = B[P0++] (Z); /* get *s1 */
+ R1 = B[P1++] (Z); /* get *s2 */
+ CC = R0 == R1; /* compare a byte */
+ if ! cc jump 3f; /* not equal, break out */
+ CC = R0; /* at end of s1? */
+ if ! cc jump 4f; /* yes, all done */
+ R2 += -1; /* no, adjust count */
+ CC = R2 == 0;
+ if ! cc jump 1b (bp); /* more to do, keep going */
+2:
+ R0 = 0; /* strings are equal */
+ jump.s 4f;
+3:
+ R0 = R0 - R1; /* *s1 - *s2 */
+4:
+ RTS;
+
+5:
+ R0 = 0;
+ RTS;
+
+ENDPROC(_strncmp)
diff --git a/arch/blackfin/lib/strncmp.c b/arch/blackfin/lib/strncmp.c
deleted file mode 100644
index 46518b1d2983..000000000000
--- a/arch/blackfin/lib/strncmp.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Provide symbol in case str func is not inlined.
- *
- * Copyright (c) 2006-2007 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#define strncmp __inline_strncmp
-#include <asm/string.h>
-#include <linux/module.h>
-#undef strncmp
-
-int strncmp(const char *cs, const char *ct, size_t count)
-{
- return __inline_strncmp(cs, ct, count);
-}
-EXPORT_SYMBOL(strncmp);
diff --git a/arch/blackfin/lib/strncpy.S b/arch/blackfin/lib/strncpy.S
new file mode 100644
index 000000000000..f3931d50b4a7
--- /dev/null
+++ b/arch/blackfin/lib/strncpy.S
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
+ */
+
+#include <linux/linkage.h>
+#include <asm/context.S>
+
+/* void *strncpy(char *dest, const char *src, size_t n);
+ * R0 = address (dest)
+ * R1 = address (src)
+ * R2 = size
+ * Returns a pointer (R0) to the destination string dest
+ * we do this by not changing R0
+ */
+
+#ifdef CONFIG_STRNCPY_L1
+.section .l1.text
+#else
+.text
+#endif
+
+.align 2
+
+ENTRY(_strncpy)
+ CC = R2 == 0;
+ if CC JUMP 4f;
+
+ P2 = R2 ; /* size */
+ P0 = R0 ; /* dst*/
+ P1 = R1 ; /* src*/
+
+ LSETUP (1f, 2f) LC0 = P2;
+1:
+ R1 = B [P1++] (Z);
+ B [P0++] = R1;
+ CC = R1 == 0;
+2:
+ if CC jump 3f;
+
+ RTS;
+
+ /* if src is shorter than n, we need to null pad bytes in dest
+ * but, we can get here when the last byte is zero, and we don't
+ * want to copy an extra byte at the end, so we need to check
+ */
+3:
+ R2 = LC0;
+ CC = R2
+ if ! CC jump 6f;
+
+ /* if the required null padded portion is small, do it here, rather than
+ * handling the overhead of memset (which is OK when things are big).
+ */
+ R3 = 0x20;
+ CC = R2 < R3;
+ IF CC jump 4f;
+
+ R2 += -1;
+
+ /* Set things up for memset
+ * R0 = address
+ * R1 = filler byte (this case it's zero, set above)
+ * R2 = count (set above)
+ */
+
+ I1 = R0;
+ R0 = RETS;
+ I0 = R0;
+ R0 = P0;
+ pseudo_long_call _memset, p0;
+ R0 = I0;
+ RETS = R0;
+ R0 = I1;
+ RTS;
+
+4:
+ LSETUP(5f, 5f) LC0;
+5:
+ B [P0++] = R1;
+6:
+ RTS;
+
+ENDPROC(_strncpy)
diff --git a/arch/blackfin/lib/strncpy.c b/arch/blackfin/lib/strncpy.c
deleted file mode 100644
index ea1dc6bf2373..000000000000
--- a/arch/blackfin/lib/strncpy.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Provide symbol in case str func is not inlined.
- *
- * Copyright (c) 2006-2007 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#define strncpy __inline_strncpy
-#include <asm/string.h>
-#undef strncpy
-
-#include <linux/module.h>
-
-char *strncpy(char *dest, const char *src, size_t n)
-{
- return __inline_strncpy(dest, src, n);
-}
-EXPORT_SYMBOL(strncpy);
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index ebe76d1e874a..f392af641657 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -98,6 +98,10 @@ static struct musb_hdrc_config musb_config = {
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PF11,
+ /* Some custom boards need to be active low, just set it to "0"
+ * if it is the case.
+ */
+ .gpio_vrsel_active = 1,
};
static struct musb_hdrc_platform_data musb_plat = {
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 55069af4f67d..606eb36b9d6e 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -62,6 +62,10 @@ static struct musb_hdrc_config musb_config = {
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PG13,
+ /* Some custom boards need to be active low, just set it to "0"
+ * if it is the case.
+ */
+ .gpio_vrsel_active = 1,
};
static struct musb_hdrc_platform_data musb_plat = {
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 923383386aa1..a05c967a24cf 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -102,6 +102,10 @@ static struct musb_hdrc_config musb_config = {
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PG13,
+ /* Some custom boards need to be active low, just set it to "0"
+ * if it is the case.
+ */
+ .gpio_vrsel_active = 1,
};
static struct musb_hdrc_platform_data musb_plat = {
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index c489d602c590..05d45994480e 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -23,12 +23,13 @@
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
+#include <asm/portmux.h>
#include <linux/spi/ad7877.h>
/*
* Name the Board for the /proc/cpuinfo
*/
-char *bfin_board_name = "CamSig Minotaur BF537";
+const char bfin_board_name[] = "CamSig Minotaur BF537";
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
static struct resource bfin_pcmcia_cf_resources[] = {
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 066d5c261f47..cf396ea40092 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1702,628 +1702,6 @@
#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-/* ************ CONTROLLER AREA NETWORK (CAN) MASKS ***************/
-/* CAN_CONTROL Masks */
-#define SRS 0x0001 /* Software Reset */
-#define DNM 0x0002 /* Device Net Mode */
-#define ABO 0x0004 /* Auto-Bus On Enable */
-#define TXPRIO 0x0008 /* TX Priority (Priority/Mailbox*) */
-#define WBA 0x0010 /* Wake-Up On CAN Bus Activity Enable */
-#define SMR 0x0020 /* Sleep Mode Request */
-#define CSR 0x0040 /* CAN Suspend Mode Request */
-#define CCR 0x0080 /* CAN Configuration Mode Request */
-
-/* CAN_STATUS Masks */
-#define WT 0x0001 /* TX Warning Flag */
-#define WR 0x0002 /* RX Warning Flag */
-#define EP 0x0004 /* Error Passive Mode */
-#define EBO 0x0008 /* Error Bus Off Mode */
-#define SMA 0x0020 /* Sleep Mode Acknowledge */
-#define CSA 0x0040 /* Suspend Mode Acknowledge */
-#define CCA 0x0080 /* Configuration Mode Acknowledge */
-#define MBPTR 0x1F00 /* Mailbox Pointer */
-#define TRM 0x4000 /* Transmit Mode */
-#define REC 0x8000 /* Receive Mode */
-
-/* CAN_CLOCK Masks */
-#define BRP 0x03FF /* Bit-Rate Pre-Scaler */
-
-/* CAN_TIMING Masks */
-#define TSEG1 0x000F /* Time Segment 1 */
-#define TSEG2 0x0070 /* Time Segment 2 */
-#define SAM 0x0080 /* Sampling */
-#define SJW 0x0300 /* Synchronization Jump Width */
-
-/* CAN_DEBUG Masks */
-#define DEC 0x0001 /* Disable CAN Error Counters */
-#define DRI 0x0002 /* Disable CAN RX Input */
-#define DTO 0x0004 /* Disable CAN TX Output */
-#define DIL 0x0008 /* Disable CAN Internal Loop */
-#define MAA 0x0010 /* Mode Auto-Acknowledge Enable */
-#define MRB 0x0020 /* Mode Read Back Enable */
-#define CDE 0x8000 /* CAN Debug Enable */
-
-/* CAN_CEC Masks */
-#define RXECNT 0x00FF /* Receive Error Counter */
-#define TXECNT 0xFF00 /* Transmit Error Counter */
-
-/* CAN_INTR Masks */
-#define MBRIRQ 0x0001 /* Mailbox Receive Interrupt */
-#define MBRIF MBRIRQ /* legacy */
-#define MBTIRQ 0x0002 /* Mailbox Transmit Interrupt */
-#define MBTIF MBTIRQ /* legacy */
-#define GIRQ 0x0004 /* Global Interrupt */
-#define SMACK 0x0008 /* Sleep Mode Acknowledge */
-#define CANTX 0x0040 /* CAN TX Bus Value */
-#define CANRX 0x0080 /* CAN RX Bus Value */
-
-/* CAN_MBxx_ID1 and CAN_MBxx_ID0 Masks */
-#define DFC 0xFFFF /* Data Filtering Code (If Enabled) (ID0) */
-#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (ID0) */
-#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (ID1) */
-#define BASEID 0x1FFC /* Base Identifier */
-#define IDE 0x2000 /* Identifier Extension */
-#define RTR 0x4000 /* Remote Frame Transmission Request */
-#define AME 0x8000 /* Acceptance Mask Enable */
-
-/* CAN_MBxx_TIMESTAMP Masks */
-#define TSV 0xFFFF /* Timestamp */
-
-/* CAN_MBxx_LENGTH Masks */
-#define DLC 0x000F /* Data Length Code */
-
-/* CAN_AMxxH and CAN_AMxxL Masks */
-#define DFM 0xFFFF /* Data Field Mask (If Enabled) (CAN_AMxxL) */
-#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (CAN_AMxxL) */
-#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (CAN_AMxxH) */
-#define BASEID 0x1FFC /* Base Identifier */
-#define AMIDE 0x2000 /* Acceptance Mask ID Extension Enable */
-#define FMD 0x4000 /* Full Mask Data Field Enable */
-#define FDF 0x8000 /* Filter On Data Field Enable */
-
-/* CAN_MC1 Masks */
-#define MC0 0x0001 /* Enable Mailbox 0 */
-#define MC1 0x0002 /* Enable Mailbox 1 */
-#define MC2 0x0004 /* Enable Mailbox 2 */
-#define MC3 0x0008 /* Enable Mailbox 3 */
-#define MC4 0x0010 /* Enable Mailbox 4 */
-#define MC5 0x0020 /* Enable Mailbox 5 */
-#define MC6 0x0040 /* Enable Mailbox 6 */
-#define MC7 0x0080 /* Enable Mailbox 7 */
-#define MC8 0x0100 /* Enable Mailbox 8 */
-#define MC9 0x0200 /* Enable Mailbox 9 */
-#define MC10 0x0400 /* Enable Mailbox 10 */
-#define MC11 0x0800 /* Enable Mailbox 11 */
-#define MC12 0x1000 /* Enable Mailbox 12 */
-#define MC13 0x2000 /* Enable Mailbox 13 */
-#define MC14 0x4000 /* Enable Mailbox 14 */
-#define MC15 0x8000 /* Enable Mailbox 15 */
-
-/* CAN_MC2 Masks */
-#define MC16 0x0001 /* Enable Mailbox 16 */
-#define MC17 0x0002 /* Enable Mailbox 17 */
-#define MC18 0x0004 /* Enable Mailbox 18 */
-#define MC19 0x0008 /* Enable Mailbox 19 */
-#define MC20 0x0010 /* Enable Mailbox 20 */
-#define MC21 0x0020 /* Enable Mailbox 21 */
-#define MC22 0x0040 /* Enable Mailbox 22 */
-#define MC23 0x0080 /* Enable Mailbox 23 */
-#define MC24 0x0100 /* Enable Mailbox 24 */
-#define MC25 0x0200 /* Enable Mailbox 25 */
-#define MC26 0x0400 /* Enable Mailbox 26 */
-#define MC27 0x0800 /* Enable Mailbox 27 */
-#define MC28 0x1000 /* Enable Mailbox 28 */
-#define MC29 0x2000 /* Enable Mailbox 29 */
-#define MC30 0x4000 /* Enable Mailbox 30 */
-#define MC31 0x8000 /* Enable Mailbox 31 */
-
-/* CAN_MD1 Masks */
-#define MD0 0x0001 /* Enable Mailbox 0 For Receive */
-#define MD1 0x0002 /* Enable Mailbox 1 For Receive */
-#define MD2 0x0004 /* Enable Mailbox 2 For Receive */
-#define MD3 0x0008 /* Enable Mailbox 3 For Receive */
-#define MD4 0x0010 /* Enable Mailbox 4 For Receive */
-#define MD5 0x0020 /* Enable Mailbox 5 For Receive */
-#define MD6 0x0040 /* Enable Mailbox 6 For Receive */
-#define MD7 0x0080 /* Enable Mailbox 7 For Receive */
-#define MD8 0x0100 /* Enable Mailbox 8 For Receive */
-#define MD9 0x0200 /* Enable Mailbox 9 For Receive */
-#define MD10 0x0400 /* Enable Mailbox 10 For Receive */
-#define MD11 0x0800 /* Enable Mailbox 11 For Receive */
-#define MD12 0x1000 /* Enable Mailbox 12 For Receive */
-#define MD13 0x2000 /* Enable Mailbox 13 For Receive */
-#define MD14 0x4000 /* Enable Mailbox 14 For Receive */
-#define MD15 0x8000 /* Enable Mailbox 15 For Receive */
-
-/* CAN_MD2 Masks */
-#define MD16 0x0001 /* Enable Mailbox 16 For Receive */
-#define MD17 0x0002 /* Enable Mailbox 17 For Receive */
-#define MD18 0x0004 /* Enable Mailbox 18 For Receive */
-#define MD19 0x0008 /* Enable Mailbox 19 For Receive */
-#define MD20 0x0010 /* Enable Mailbox 20 For Receive */
-#define MD21 0x0020 /* Enable Mailbox 21 For Receive */
-#define MD22 0x0040 /* Enable Mailbox 22 For Receive */
-#define MD23 0x0080 /* Enable Mailbox 23 For Receive */
-#define MD24 0x0100 /* Enable Mailbox 24 For Receive */
-#define MD25 0x0200 /* Enable Mailbox 25 For Receive */
-#define MD26 0x0400 /* Enable Mailbox 26 For Receive */
-#define MD27 0x0800 /* Enable Mailbox 27 For Receive */
-#define MD28 0x1000 /* Enable Mailbox 28 For Receive */
-#define MD29 0x2000 /* Enable Mailbox 29 For Receive */
-#define MD30 0x4000 /* Enable Mailbox 30 For Receive */
-#define MD31 0x8000 /* Enable Mailbox 31 For Receive */
-
-/* CAN_RMP1 Masks */
-#define RMP0 0x0001 /* RX Message Pending In Mailbox 0 */
-#define RMP1 0x0002 /* RX Message Pending In Mailbox 1 */
-#define RMP2 0x0004 /* RX Message Pending In Mailbox 2 */
-#define RMP3 0x0008 /* RX Message Pending In Mailbox 3 */
-#define RMP4 0x0010 /* RX Message Pending In Mailbox 4 */
-#define RMP5 0x0020 /* RX Message Pending In Mailbox 5 */
-#define RMP6 0x0040 /* RX Message Pending In Mailbox 6 */
-#define RMP7 0x0080 /* RX Message Pending In Mailbox 7 */
-#define RMP8 0x0100 /* RX Message Pending In Mailbox 8 */
-#define RMP9 0x0200 /* RX Message Pending In Mailbox 9 */
-#define RMP10 0x0400 /* RX Message Pending In Mailbox 10 */
-#define RMP11 0x0800 /* RX Message Pending In Mailbox 11 */
-#define RMP12 0x1000 /* RX Message Pending In Mailbox 12 */
-#define RMP13 0x2000 /* RX Message Pending In Mailbox 13 */
-#define RMP14 0x4000 /* RX Message Pending In Mailbox 14 */
-#define RMP15 0x8000 /* RX Message Pending In Mailbox 15 */
-
-/* CAN_RMP2 Masks */
-#define RMP16 0x0001 /* RX Message Pending In Mailbox 16 */
-#define RMP17 0x0002 /* RX Message Pending In Mailbox 17 */
-#define RMP18 0x0004 /* RX Message Pending In Mailbox 18 */
-#define RMP19 0x0008 /* RX Message Pending In Mailbox 19 */
-#define RMP20 0x0010 /* RX Message Pending In Mailbox 20 */
-#define RMP21 0x0020 /* RX Message Pending In Mailbox 21 */
-#define RMP22 0x0040 /* RX Message Pending In Mailbox 22 */
-#define RMP23 0x0080 /* RX Message Pending In Mailbox 23 */
-#define RMP24 0x0100 /* RX Message Pending In Mailbox 24 */
-#define RMP25 0x0200 /* RX Message Pending In Mailbox 25 */
-#define RMP26 0x0400 /* RX Message Pending In Mailbox 26 */
-#define RMP27 0x0800 /* RX Message Pending In Mailbox 27 */
-#define RMP28 0x1000 /* RX Message Pending In Mailbox 28 */
-#define RMP29 0x2000 /* RX Message Pending In Mailbox 29 */
-#define RMP30 0x4000 /* RX Message Pending In Mailbox 30 */
-#define RMP31 0x8000 /* RX Message Pending In Mailbox 31 */
-
-/* CAN_RML1 Masks */
-#define RML0 0x0001 /* RX Message Lost In Mailbox 0 */
-#define RML1 0x0002 /* RX Message Lost In Mailbox 1 */
-#define RML2 0x0004 /* RX Message Lost In Mailbox 2 */
-#define RML3 0x0008 /* RX Message Lost In Mailbox 3 */
-#define RML4 0x0010 /* RX Message Lost In Mailbox 4 */
-#define RML5 0x0020 /* RX Message Lost In Mailbox 5 */
-#define RML6 0x0040 /* RX Message Lost In Mailbox 6 */
-#define RML7 0x0080 /* RX Message Lost In Mailbox 7 */
-#define RML8 0x0100 /* RX Message Lost In Mailbox 8 */
-#define RML9 0x0200 /* RX Message Lost In Mailbox 9 */
-#define RML10 0x0400 /* RX Message Lost In Mailbox 10 */
-#define RML11 0x0800 /* RX Message Lost In Mailbox 11 */
-#define RML12 0x1000 /* RX Message Lost In Mailbox 12 */
-#define RML13 0x2000 /* RX Message Lost In Mailbox 13 */
-#define RML14 0x4000 /* RX Message Lost In Mailbox 14 */
-#define RML15 0x8000 /* RX Message Lost In Mailbox 15 */
-
-/* CAN_RML2 Masks */
-#define RML16 0x0001 /* RX Message Lost In Mailbox 16 */
-#define RML17 0x0002 /* RX Message Lost In Mailbox 17 */
-#define RML18 0x0004 /* RX Message Lost In Mailbox 18 */
-#define RML19 0x0008 /* RX Message Lost In Mailbox 19 */
-#define RML20 0x0010 /* RX Message Lost In Mailbox 20 */
-#define RML21 0x0020 /* RX Message Lost In Mailbox 21 */
-#define RML22 0x0040 /* RX Message Lost In Mailbox 22 */
-#define RML23 0x0080 /* RX Message Lost In Mailbox 23 */
-#define RML24 0x0100 /* RX Message Lost In Mailbox 24 */
-#define RML25 0x0200 /* RX Message Lost In Mailbox 25 */
-#define RML26 0x0400 /* RX Message Lost In Mailbox 26 */
-#define RML27 0x0800 /* RX Message Lost In Mailbox 27 */
-#define RML28 0x1000 /* RX Message Lost In Mailbox 28 */
-#define RML29 0x2000 /* RX Message Lost In Mailbox 29 */
-#define RML30 0x4000 /* RX Message Lost In Mailbox 30 */
-#define RML31 0x8000 /* RX Message Lost In Mailbox 31 */
-
-/* CAN_OPSS1 Masks */
-#define OPSS0 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 0 */
-#define OPSS1 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 1 */
-#define OPSS2 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 2 */
-#define OPSS3 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 3 */
-#define OPSS4 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 4 */
-#define OPSS5 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 5 */
-#define OPSS6 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 6 */
-#define OPSS7 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 7 */
-#define OPSS8 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 8 */
-#define OPSS9 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 9 */
-#define OPSS10 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 10 */
-#define OPSS11 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 11 */
-#define OPSS12 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 12 */
-#define OPSS13 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 13 */
-#define OPSS14 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 14 */
-#define OPSS15 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 15 */
-
-/* CAN_OPSS2 Masks */
-#define OPSS16 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 16 */
-#define OPSS17 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 17 */
-#define OPSS18 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 18 */
-#define OPSS19 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 19 */
-#define OPSS20 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 20 */
-#define OPSS21 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 21 */
-#define OPSS22 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 22 */
-#define OPSS23 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 23 */
-#define OPSS24 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 24 */
-#define OPSS25 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 25 */
-#define OPSS26 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 26 */
-#define OPSS27 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 27 */
-#define OPSS28 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 28 */
-#define OPSS29 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 29 */
-#define OPSS30 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 30 */
-#define OPSS31 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 31 */
-
-/* CAN_TRR1 Masks */
-#define TRR0 0x0001 /* Deny But Don't Lock Access To Mailbox 0 */
-#define TRR1 0x0002 /* Deny But Don't Lock Access To Mailbox 1 */
-#define TRR2 0x0004 /* Deny But Don't Lock Access To Mailbox 2 */
-#define TRR3 0x0008 /* Deny But Don't Lock Access To Mailbox 3 */
-#define TRR4 0x0010 /* Deny But Don't Lock Access To Mailbox 4 */
-#define TRR5 0x0020 /* Deny But Don't Lock Access To Mailbox 5 */
-#define TRR6 0x0040 /* Deny But Don't Lock Access To Mailbox 6 */
-#define TRR7 0x0080 /* Deny But Don't Lock Access To Mailbox 7 */
-#define TRR8 0x0100 /* Deny But Don't Lock Access To Mailbox 8 */
-#define TRR9 0x0200 /* Deny But Don't Lock Access To Mailbox 9 */
-#define TRR10 0x0400 /* Deny But Don't Lock Access To Mailbox 10 */
-#define TRR11 0x0800 /* Deny But Don't Lock Access To Mailbox 11 */
-#define TRR12 0x1000 /* Deny But Don't Lock Access To Mailbox 12 */
-#define TRR13 0x2000 /* Deny But Don't Lock Access To Mailbox 13 */
-#define TRR14 0x4000 /* Deny But Don't Lock Access To Mailbox 14 */
-#define TRR15 0x8000 /* Deny But Don't Lock Access To Mailbox 15 */
-
-/* CAN_TRR2 Masks */
-#define TRR16 0x0001 /* Deny But Don't Lock Access To Mailbox 16 */
-#define TRR17 0x0002 /* Deny But Don't Lock Access To Mailbox 17 */
-#define TRR18 0x0004 /* Deny But Don't Lock Access To Mailbox 18 */
-#define TRR19 0x0008 /* Deny But Don't Lock Access To Mailbox 19 */
-#define TRR20 0x0010 /* Deny But Don't Lock Access To Mailbox 20 */
-#define TRR21 0x0020 /* Deny But Don't Lock Access To Mailbox 21 */
-#define TRR22 0x0040 /* Deny But Don't Lock Access To Mailbox 22 */
-#define TRR23 0x0080 /* Deny But Don't Lock Access To Mailbox 23 */
-#define TRR24 0x0100 /* Deny But Don't Lock Access To Mailbox 24 */
-#define TRR25 0x0200 /* Deny But Don't Lock Access To Mailbox 25 */
-#define TRR26 0x0400 /* Deny But Don't Lock Access To Mailbox 26 */
-#define TRR27 0x0800 /* Deny But Don't Lock Access To Mailbox 27 */
-#define TRR28 0x1000 /* Deny But Don't Lock Access To Mailbox 28 */
-#define TRR29 0x2000 /* Deny But Don't Lock Access To Mailbox 29 */
-#define TRR30 0x4000 /* Deny But Don't Lock Access To Mailbox 30 */
-#define TRR31 0x8000 /* Deny But Don't Lock Access To Mailbox 31 */
-
-/* CAN_TRS1 Masks */
-#define TRS0 0x0001 /* Remote Frame Request For Mailbox 0 */
-#define TRS1 0x0002 /* Remote Frame Request For Mailbox 1 */
-#define TRS2 0x0004 /* Remote Frame Request For Mailbox 2 */
-#define TRS3 0x0008 /* Remote Frame Request For Mailbox 3 */
-#define TRS4 0x0010 /* Remote Frame Request For Mailbox 4 */
-#define TRS5 0x0020 /* Remote Frame Request For Mailbox 5 */
-#define TRS6 0x0040 /* Remote Frame Request For Mailbox 6 */
-#define TRS7 0x0080 /* Remote Frame Request For Mailbox 7 */
-#define TRS8 0x0100 /* Remote Frame Request For Mailbox 8 */
-#define TRS9 0x0200 /* Remote Frame Request For Mailbox 9 */
-#define TRS10 0x0400 /* Remote Frame Request For Mailbox 10 */
-#define TRS11 0x0800 /* Remote Frame Request For Mailbox 11 */
-#define TRS12 0x1000 /* Remote Frame Request For Mailbox 12 */
-#define TRS13 0x2000 /* Remote Frame Request For Mailbox 13 */
-#define TRS14 0x4000 /* Remote Frame Request For Mailbox 14 */
-#define TRS15 0x8000 /* Remote Frame Request For Mailbox 15 */
-
-/* CAN_TRS2 Masks */
-#define TRS16 0x0001 /* Remote Frame Request For Mailbox 16 */
-#define TRS17 0x0002 /* Remote Frame Request For Mailbox 17 */
-#define TRS18 0x0004 /* Remote Frame Request For Mailbox 18 */
-#define TRS19 0x0008 /* Remote Frame Request For Mailbox 19 */
-#define TRS20 0x0010 /* Remote Frame Request For Mailbox 20 */
-#define TRS21 0x0020 /* Remote Frame Request For Mailbox 21 */
-#define TRS22 0x0040 /* Remote Frame Request For Mailbox 22 */
-#define TRS23 0x0080 /* Remote Frame Request For Mailbox 23 */
-#define TRS24 0x0100 /* Remote Frame Request For Mailbox 24 */
-#define TRS25 0x0200 /* Remote Frame Request For Mailbox 25 */
-#define TRS26 0x0400 /* Remote Frame Request For Mailbox 26 */
-#define TRS27 0x0800 /* Remote Frame Request For Mailbox 27 */
-#define TRS28 0x1000 /* Remote Frame Request For Mailbox 28 */
-#define TRS29 0x2000 /* Remote Frame Request For Mailbox 29 */
-#define TRS30 0x4000 /* Remote Frame Request For Mailbox 30 */
-#define TRS31 0x8000 /* Remote Frame Request For Mailbox 31 */
-
-/* CAN_AA1 Masks */
-#define AA0 0x0001 /* Aborted Message In Mailbox 0 */
-#define AA1 0x0002 /* Aborted Message In Mailbox 1 */
-#define AA2 0x0004 /* Aborted Message In Mailbox 2 */
-#define AA3 0x0008 /* Aborted Message In Mailbox 3 */
-#define AA4 0x0010 /* Aborted Message In Mailbox 4 */
-#define AA5 0x0020 /* Aborted Message In Mailbox 5 */
-#define AA6 0x0040 /* Aborted Message In Mailbox 6 */
-#define AA7 0x0080 /* Aborted Message In Mailbox 7 */
-#define AA8 0x0100 /* Aborted Message In Mailbox 8 */
-#define AA9 0x0200 /* Aborted Message In Mailbox 9 */
-#define AA10 0x0400 /* Aborted Message In Mailbox 10 */
-#define AA11 0x0800 /* Aborted Message In Mailbox 11 */
-#define AA12 0x1000 /* Aborted Message In Mailbox 12 */
-#define AA13 0x2000 /* Aborted Message In Mailbox 13 */
-#define AA14 0x4000 /* Aborted Message In Mailbox 14 */
-#define AA15 0x8000 /* Aborted Message In Mailbox 15 */
-
-/* CAN_AA2 Masks */
-#define AA16 0x0001 /* Aborted Message In Mailbox 16 */
-#define AA17 0x0002 /* Aborted Message In Mailbox 17 */
-#define AA18 0x0004 /* Aborted Message In Mailbox 18 */
-#define AA19 0x0008 /* Aborted Message In Mailbox 19 */
-#define AA20 0x0010 /* Aborted Message In Mailbox 20 */
-#define AA21 0x0020 /* Aborted Message In Mailbox 21 */
-#define AA22 0x0040 /* Aborted Message In Mailbox 22 */
-#define AA23 0x0080 /* Aborted Message In Mailbox 23 */
-#define AA24 0x0100 /* Aborted Message In Mailbox 24 */
-#define AA25 0x0200 /* Aborted Message In Mailbox 25 */
-#define AA26 0x0400 /* Aborted Message In Mailbox 26 */
-#define AA27 0x0800 /* Aborted Message In Mailbox 27 */
-#define AA28 0x1000 /* Aborted Message In Mailbox 28 */
-#define AA29 0x2000 /* Aborted Message In Mailbox 29 */
-#define AA30 0x4000 /* Aborted Message In Mailbox 30 */
-#define AA31 0x8000 /* Aborted Message In Mailbox 31 */
-
-/* CAN_TA1 Masks */
-#define TA0 0x0001 /* Transmit Successful From Mailbox 0 */
-#define TA1 0x0002 /* Transmit Successful From Mailbox 1 */
-#define TA2 0x0004 /* Transmit Successful From Mailbox 2 */
-#define TA3 0x0008 /* Transmit Successful From Mailbox 3 */
-#define TA4 0x0010 /* Transmit Successful From Mailbox 4 */
-#define TA5 0x0020 /* Transmit Successful From Mailbox 5 */
-#define TA6 0x0040 /* Transmit Successful From Mailbox 6 */
-#define TA7 0x0080 /* Transmit Successful From Mailbox 7 */
-#define TA8 0x0100 /* Transmit Successful From Mailbox 8 */
-#define TA9 0x0200 /* Transmit Successful From Mailbox 9 */
-#define TA10 0x0400 /* Transmit Successful From Mailbox 10 */
-#define TA11 0x0800 /* Transmit Successful From Mailbox 11 */
-#define TA12 0x1000 /* Transmit Successful From Mailbox 12 */
-#define TA13 0x2000 /* Transmit Successful From Mailbox 13 */
-#define TA14 0x4000 /* Transmit Successful From Mailbox 14 */
-#define TA15 0x8000 /* Transmit Successful From Mailbox 15 */
-
-/* CAN_TA2 Masks */
-#define TA16 0x0001 /* Transmit Successful From Mailbox 16 */
-#define TA17 0x0002 /* Transmit Successful From Mailbox 17 */
-#define TA18 0x0004 /* Transmit Successful From Mailbox 18 */
-#define TA19 0x0008 /* Transmit Successful From Mailbox 19 */
-#define TA20 0x0010 /* Transmit Successful From Mailbox 20 */
-#define TA21 0x0020 /* Transmit Successful From Mailbox 21 */
-#define TA22 0x0040 /* Transmit Successful From Mailbox 22 */
-#define TA23 0x0080 /* Transmit Successful From Mailbox 23 */
-#define TA24 0x0100 /* Transmit Successful From Mailbox 24 */
-#define TA25 0x0200 /* Transmit Successful From Mailbox 25 */
-#define TA26 0x0400 /* Transmit Successful From Mailbox 26 */
-#define TA27 0x0800 /* Transmit Successful From Mailbox 27 */
-#define TA28 0x1000 /* Transmit Successful From Mailbox 28 */
-#define TA29 0x2000 /* Transmit Successful From Mailbox 29 */
-#define TA30 0x4000 /* Transmit Successful From Mailbox 30 */
-#define TA31 0x8000 /* Transmit Successful From Mailbox 31 */
-
-/* CAN_MBTD Masks */
-#define TDPTR 0x001F /* Mailbox To Temporarily Disable */
-#define TDA 0x0040 /* Temporary Disable Acknowledge */
-#define TDR 0x0080 /* Temporary Disable Request */
-
-/* CAN_RFH1 Masks */
-#define RFH0 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 0 */
-#define RFH1 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 1 */
-#define RFH2 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 2 */
-#define RFH3 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 3 */
-#define RFH4 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 4 */
-#define RFH5 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 5 */
-#define RFH6 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 6 */
-#define RFH7 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 7 */
-#define RFH8 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 8 */
-#define RFH9 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 9 */
-#define RFH10 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 10 */
-#define RFH11 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 11 */
-#define RFH12 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 12 */
-#define RFH13 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 13 */
-#define RFH14 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 14 */
-#define RFH15 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 15 */
-
-/* CAN_RFH2 Masks */
-#define RFH16 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 16 */
-#define RFH17 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 17 */
-#define RFH18 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 18 */
-#define RFH19 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 19 */
-#define RFH20 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 20 */
-#define RFH21 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 21 */
-#define RFH22 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 22 */
-#define RFH23 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 23 */
-#define RFH24 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 24 */
-#define RFH25 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 25 */
-#define RFH26 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 26 */
-#define RFH27 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 27 */
-#define RFH28 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 28 */
-#define RFH29 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 29 */
-#define RFH30 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 30 */
-#define RFH31 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 31 */
-
-/* CAN_MBTIF1 Masks */
-#define MBTIF0 0x0001 /* TX Interrupt Active In Mailbox 0 */
-#define MBTIF1 0x0002 /* TX Interrupt Active In Mailbox 1 */
-#define MBTIF2 0x0004 /* TX Interrupt Active In Mailbox 2 */
-#define MBTIF3 0x0008 /* TX Interrupt Active In Mailbox 3 */
-#define MBTIF4 0x0010 /* TX Interrupt Active In Mailbox 4 */
-#define MBTIF5 0x0020 /* TX Interrupt Active In Mailbox 5 */
-#define MBTIF6 0x0040 /* TX Interrupt Active In Mailbox 6 */
-#define MBTIF7 0x0080 /* TX Interrupt Active In Mailbox 7 */
-#define MBTIF8 0x0100 /* TX Interrupt Active In Mailbox 8 */
-#define MBTIF9 0x0200 /* TX Interrupt Active In Mailbox 9 */
-#define MBTIF10 0x0400 /* TX Interrupt Active In Mailbox 10 */
-#define MBTIF11 0x0800 /* TX Interrupt Active In Mailbox 11 */
-#define MBTIF12 0x1000 /* TX Interrupt Active In Mailbox 12 */
-#define MBTIF13 0x2000 /* TX Interrupt Active In Mailbox 13 */
-#define MBTIF14 0x4000 /* TX Interrupt Active In Mailbox 14 */
-#define MBTIF15 0x8000 /* TX Interrupt Active In Mailbox 15 */
-
-/* CAN_MBTIF2 Masks */
-#define MBTIF16 0x0001 /* TX Interrupt Active In Mailbox 16 */
-#define MBTIF17 0x0002 /* TX Interrupt Active In Mailbox 17 */
-#define MBTIF18 0x0004 /* TX Interrupt Active In Mailbox 18 */
-#define MBTIF19 0x0008 /* TX Interrupt Active In Mailbox 19 */
-#define MBTIF20 0x0010 /* TX Interrupt Active In Mailbox 20 */
-#define MBTIF21 0x0020 /* TX Interrupt Active In Mailbox 21 */
-#define MBTIF22 0x0040 /* TX Interrupt Active In Mailbox 22 */
-#define MBTIF23 0x0080 /* TX Interrupt Active In Mailbox 23 */
-#define MBTIF24 0x0100 /* TX Interrupt Active In Mailbox 24 */
-#define MBTIF25 0x0200 /* TX Interrupt Active In Mailbox 25 */
-#define MBTIF26 0x0400 /* TX Interrupt Active In Mailbox 26 */
-#define MBTIF27 0x0800 /* TX Interrupt Active In Mailbox 27 */
-#define MBTIF28 0x1000 /* TX Interrupt Active In Mailbox 28 */
-#define MBTIF29 0x2000 /* TX Interrupt Active In Mailbox 29 */
-#define MBTIF30 0x4000 /* TX Interrupt Active In Mailbox 30 */
-#define MBTIF31 0x8000 /* TX Interrupt Active In Mailbox 31 */
-
-/* CAN_MBRIF1 Masks */
-#define MBRIF0 0x0001 /* RX Interrupt Active In Mailbox 0 */
-#define MBRIF1 0x0002 /* RX Interrupt Active In Mailbox 1 */
-#define MBRIF2 0x0004 /* RX Interrupt Active In Mailbox 2 */
-#define MBRIF3 0x0008 /* RX Interrupt Active In Mailbox 3 */
-#define MBRIF4 0x0010 /* RX Interrupt Active In Mailbox 4 */
-#define MBRIF5 0x0020 /* RX Interrupt Active In Mailbox 5 */
-#define MBRIF6 0x0040 /* RX Interrupt Active In Mailbox 6 */
-#define MBRIF7 0x0080 /* RX Interrupt Active In Mailbox 7 */
-#define MBRIF8 0x0100 /* RX Interrupt Active In Mailbox 8 */
-#define MBRIF9 0x0200 /* RX Interrupt Active In Mailbox 9 */
-#define MBRIF10 0x0400 /* RX Interrupt Active In Mailbox 10 */
-#define MBRIF11 0x0800 /* RX Interrupt Active In Mailbox 11 */
-#define MBRIF12 0x1000 /* RX Interrupt Active In Mailbox 12 */
-#define MBRIF13 0x2000 /* RX Interrupt Active In Mailbox 13 */
-#define MBRIF14 0x4000 /* RX Interrupt Active In Mailbox 14 */
-#define MBRIF15 0x8000 /* RX Interrupt Active In Mailbox 15 */
-
-/* CAN_MBRIF2 Masks */
-#define MBRIF16 0x0001 /* RX Interrupt Active In Mailbox 16 */
-#define MBRIF17 0x0002 /* RX Interrupt Active In Mailbox 17 */
-#define MBRIF18 0x0004 /* RX Interrupt Active In Mailbox 18 */
-#define MBRIF19 0x0008 /* RX Interrupt Active In Mailbox 19 */
-#define MBRIF20 0x0010 /* RX Interrupt Active In Mailbox 20 */
-#define MBRIF21 0x0020 /* RX Interrupt Active In Mailbox 21 */
-#define MBRIF22 0x0040 /* RX Interrupt Active In Mailbox 22 */
-#define MBRIF23 0x0080 /* RX Interrupt Active In Mailbox 23 */
-#define MBRIF24 0x0100 /* RX Interrupt Active In Mailbox 24 */
-#define MBRIF25 0x0200 /* RX Interrupt Active In Mailbox 25 */
-#define MBRIF26 0x0400 /* RX Interrupt Active In Mailbox 26 */
-#define MBRIF27 0x0800 /* RX Interrupt Active In Mailbox 27 */
-#define MBRIF28 0x1000 /* RX Interrupt Active In Mailbox 28 */
-#define MBRIF29 0x2000 /* RX Interrupt Active In Mailbox 29 */
-#define MBRIF30 0x4000 /* RX Interrupt Active In Mailbox 30 */
-#define MBRIF31 0x8000 /* RX Interrupt Active In Mailbox 31 */
-
-/* CAN_MBIM1 Masks */
-#define MBIM0 0x0001 /* Enable Interrupt For Mailbox 0 */
-#define MBIM1 0x0002 /* Enable Interrupt For Mailbox 1 */
-#define MBIM2 0x0004 /* Enable Interrupt For Mailbox 2 */
-#define MBIM3 0x0008 /* Enable Interrupt For Mailbox 3 */
-#define MBIM4 0x0010 /* Enable Interrupt For Mailbox 4 */
-#define MBIM5 0x0020 /* Enable Interrupt For Mailbox 5 */
-#define MBIM6 0x0040 /* Enable Interrupt For Mailbox 6 */
-#define MBIM7 0x0080 /* Enable Interrupt For Mailbox 7 */
-#define MBIM8 0x0100 /* Enable Interrupt For Mailbox 8 */
-#define MBIM9 0x0200 /* Enable Interrupt For Mailbox 9 */
-#define MBIM10 0x0400 /* Enable Interrupt For Mailbox 10 */
-#define MBIM11 0x0800 /* Enable Interrupt For Mailbox 11 */
-#define MBIM12 0x1000 /* Enable Interrupt For Mailbox 12 */
-#define MBIM13 0x2000 /* Enable Interrupt For Mailbox 13 */
-#define MBIM14 0x4000 /* Enable Interrupt For Mailbox 14 */
-#define MBIM15 0x8000 /* Enable Interrupt For Mailbox 15 */
-
-/* CAN_MBIM2 Masks */
-#define MBIM16 0x0001 /* Enable Interrupt For Mailbox 16 */
-#define MBIM17 0x0002 /* Enable Interrupt For Mailbox 17 */
-#define MBIM18 0x0004 /* Enable Interrupt For Mailbox 18 */
-#define MBIM19 0x0008 /* Enable Interrupt For Mailbox 19 */
-#define MBIM20 0x0010 /* Enable Interrupt For Mailbox 20 */
-#define MBIM21 0x0020 /* Enable Interrupt For Mailbox 21 */
-#define MBIM22 0x0040 /* Enable Interrupt For Mailbox 22 */
-#define MBIM23 0x0080 /* Enable Interrupt For Mailbox 23 */
-#define MBIM24 0x0100 /* Enable Interrupt For Mailbox 24 */
-#define MBIM25 0x0200 /* Enable Interrupt For Mailbox 25 */
-#define MBIM26 0x0400 /* Enable Interrupt For Mailbox 26 */
-#define MBIM27 0x0800 /* Enable Interrupt For Mailbox 27 */
-#define MBIM28 0x1000 /* Enable Interrupt For Mailbox 28 */
-#define MBIM29 0x2000 /* Enable Interrupt For Mailbox 29 */
-#define MBIM30 0x4000 /* Enable Interrupt For Mailbox 30 */
-#define MBIM31 0x8000 /* Enable Interrupt For Mailbox 31 */
-
-/* CAN_GIM Masks */
-#define EWTIM 0x0001 /* Enable TX Error Count Interrupt */
-#define EWRIM 0x0002 /* Enable RX Error Count Interrupt */
-#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */
-#define BOIM 0x0008 /* Enable Bus Off Interrupt */
-#define WUIM 0x0010 /* Enable Wake-Up Interrupt */
-#define UIAIM 0x0020 /* Enable Access To Unimplemented Address Interrupt */
-#define AAIM 0x0040 /* Enable Abort Acknowledge Interrupt */
-#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */
-#define UCEIM 0x0100 /* Enable Universal Counter Overflow Interrupt */
-#define EXTIM 0x0200 /* Enable External Trigger Output Interrupt */
-#define ADIM 0x0400 /* Enable Access Denied Interrupt */
-
-/* CAN_GIS Masks */
-#define EWTIS 0x0001 /* TX Error Count IRQ Status */
-#define EWRIS 0x0002 /* RX Error Count IRQ Status */
-#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */
-#define BOIS 0x0008 /* Bus Off IRQ Status */
-#define WUIS 0x0010 /* Wake-Up IRQ Status */
-#define UIAIS 0x0020 /* Access To Unimplemented Address IRQ Status */
-#define AAIS 0x0040 /* Abort Acknowledge IRQ Status */
-#define RMLIS 0x0080 /* RX Message Lost IRQ Status */
-#define UCEIS 0x0100 /* Universal Counter Overflow IRQ Status */
-#define EXTIS 0x0200 /* External Trigger Output IRQ Status */
-#define ADIS 0x0400 /* Access Denied IRQ Status */
-
-/* CAN_GIF Masks */
-#define EWTIF 0x0001 /* TX Error Count IRQ Flag */
-#define EWRIF 0x0002 /* RX Error Count IRQ Flag */
-#define EPIF 0x0004 /* Error-Passive Mode IRQ Flag */
-#define BOIF 0x0008 /* Bus Off IRQ Flag */
-#define WUIF 0x0010 /* Wake-Up IRQ Flag */
-#define UIAIF 0x0020 /* Access To Unimplemented Address IRQ Flag */
-#define AAIF 0x0040 /* Abort Acknowledge IRQ Flag */
-#define RMLIF 0x0080 /* RX Message Lost IRQ Flag */
-#define UCEIF 0x0100 /* Universal Counter Overflow IRQ Flag */
-#define EXTIF 0x0200 /* External Trigger Output IRQ Flag */
-#define ADIF 0x0400 /* Access Denied IRQ Flag */
-
-/* CAN_UCCNF Masks */
-#define UCCNF 0x000F /* Universal Counter Mode */
-#define UC_STAMP 0x0001 /* Timestamp Mode */
-#define UC_WDOG 0x0002 /* Watchdog Mode */
-#define UC_AUTOTX 0x0003 /* Auto-Transmit Mode */
-#define UC_ERROR 0x0006 /* CAN Error Frame Count */
-#define UC_OVER 0x0007 /* CAN Overload Frame Count */
-#define UC_LOST 0x0008 /* Arbitration Lost During TX Count */
-#define UC_AA 0x0009 /* TX Abort Count */
-#define UC_TA 0x000A /* TX Successful Count */
-#define UC_REJECT 0x000B /* RX Message Rejected Count */
-#define UC_RML 0x000C /* RX Message Lost Count */
-#define UC_RX 0x000D /* Total Successful RX Messages Count */
-#define UC_RMP 0x000E /* Successful RX W/Matching ID Count */
-#define UC_ALL 0x000F /* Correct Message On CAN Bus Line Count */
-#define UCRC 0x0020 /* Universal Counter Reload/Clear */
-#define UCCT 0x0040 /* Universal Counter CAN Trigger */
-#define UCE 0x0080 /* Universal Counter Enable */
-
-/* CAN_ESR Masks */
-#define ACKE 0x0004 /* Acknowledge Error */
-#define SER 0x0008 /* Stuff Error */
-#define CRCE 0x0010 /* CRC Error */
-#define SA0 0x0020 /* Stuck At Dominant Error */
-#define BEF 0x0040 /* Bit Error Flag */
-#define FER 0x0080 /* Form Error Flag */
-
-/* CAN_EWR Masks */
-#define EWLREC 0x00FF /* RX Error Count Limit (For EWRIS) */
-#define EWLTEC 0xFF00 /* TX Error Count Limit (For EWTIS) */
-
/* ******************* PIN CONTROL REGISTER MASKS ************************/
/* PORT_MUX Masks */
#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
diff --git a/arch/blackfin/mach-bf537/include/mach/irq.h b/arch/blackfin/mach-bf537/include/mach/irq.h
index 789a4f226f7b..1a6d617c5fcf 100644
--- a/arch/blackfin/mach-bf537/include/mach/irq.h
+++ b/arch/blackfin/mach-bf537/include/mach/irq.h
@@ -74,7 +74,7 @@
#define IRQ_PPI_ERROR 42 /*PPI Error Interrupt */
#define IRQ_CAN_ERROR 43 /*CAN Error Interrupt */
-#define IRQ_MAC_ERROR 44 /*PPI Error Interrupt */
+#define IRQ_MAC_ERROR 44 /*MAC Status/Error Interrupt */
#define IRQ_SPORT0_ERROR 45 /*SPORT0 Error Interrupt */
#define IRQ_SPORT1_ERROR 46 /*SPORT1 Error Interrupt */
#define IRQ_SPI_ERROR 47 /*SPI Error Interrupt */
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index fac563e6f62f..d7061d9f2a83 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -2418,625 +2418,4 @@
#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-
-/* ************ CONTROLLER AREA NETWORK (CAN) MASKS ***************/
-/* CAN_CONTROL Masks */
-#define SRS 0x0001 /* Software Reset */
-#define DNM 0x0002 /* Device Net Mode */
-#define ABO 0x0004 /* Auto-Bus On Enable */
-#define WBA 0x0010 /* Wake-Up On CAN Bus Activity Enable */
-#define SMR 0x0020 /* Sleep Mode Request */
-#define CSR 0x0040 /* CAN Suspend Mode Request */
-#define CCR 0x0080 /* CAN Configuration Mode Request */
-
-/* CAN_STATUS Masks */
-#define WT 0x0001 /* TX Warning Flag */
-#define WR 0x0002 /* RX Warning Flag */
-#define EP 0x0004 /* Error Passive Mode */
-#define EBO 0x0008 /* Error Bus Off Mode */
-#define CSA 0x0040 /* Suspend Mode Acknowledge */
-#define CCA 0x0080 /* Configuration Mode Acknowledge */
-#define MBPTR 0x1F00 /* Mailbox Pointer */
-#define TRM 0x4000 /* Transmit Mode */
-#define REC 0x8000 /* Receive Mode */
-
-/* CAN_CLOCK Masks */
-#define BRP 0x03FF /* Bit-Rate Pre-Scaler */
-
-/* CAN_TIMING Masks */
-#define TSEG1 0x000F /* Time Segment 1 */
-#define TSEG2 0x0070 /* Time Segment 2 */
-#define SAM 0x0080 /* Sampling */
-#define SJW 0x0300 /* Synchronization Jump Width */
-
-/* CAN_DEBUG Masks */
-#define DEC 0x0001 /* Disable CAN Error Counters */
-#define DRI 0x0002 /* Disable CAN RX Input */
-#define DTO 0x0004 /* Disable CAN TX Output */
-#define DIL 0x0008 /* Disable CAN Internal Loop */
-#define MAA 0x0010 /* Mode Auto-Acknowledge Enable */
-#define MRB 0x0020 /* Mode Read Back Enable */
-#define CDE 0x8000 /* CAN Debug Enable */
-
-/* CAN_CEC Masks */
-#define RXECNT 0x00FF /* Receive Error Counter */
-#define TXECNT 0xFF00 /* Transmit Error Counter */
-
-/* CAN_INTR Masks */
-#define MBRIRQ 0x0001 /* Mailbox Receive Interrupt */
-#define MBRIF MBRIRQ /* legacy */
-#define MBTIRQ 0x0002 /* Mailbox Transmit Interrupt */
-#define MBTIF MBTIRQ /* legacy */
-#define GIRQ 0x0004 /* Global Interrupt */
-#define SMACK 0x0008 /* Sleep Mode Acknowledge */
-#define CANTX 0x0040 /* CAN TX Bus Value */
-#define CANRX 0x0080 /* CAN RX Bus Value */
-
-/* CAN_MBxx_ID1 and CAN_MBxx_ID0 Masks */
-#define DFC 0xFFFF /* Data Filtering Code (If Enabled) (ID0) */
-#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (ID0) */
-#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (ID1) */
-#define BASEID 0x1FFC /* Base Identifier */
-#define IDE 0x2000 /* Identifier Extension */
-#define RTR 0x4000 /* Remote Frame Transmission Request */
-#define AME 0x8000 /* Acceptance Mask Enable */
-
-/* CAN_MBxx_TIMESTAMP Masks */
-#define TSV 0xFFFF /* Timestamp */
-
-/* CAN_MBxx_LENGTH Masks */
-#define DLC 0x000F /* Data Length Code */
-
-/* CAN_AMxxH and CAN_AMxxL Masks */
-#define DFM 0xFFFF /* Data Field Mask (If Enabled) (CAN_AMxxL) */
-#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (CAN_AMxxL) */
-#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (CAN_AMxxH) */
-#define BASEID 0x1FFC /* Base Identifier */
-#define AMIDE 0x2000 /* Acceptance Mask ID Extension Enable */
-#define FMD 0x4000 /* Full Mask Data Field Enable */
-#define FDF 0x8000 /* Filter On Data Field Enable */
-
-/* CAN_MC1 Masks */
-#define MC0 0x0001 /* Enable Mailbox 0 */
-#define MC1 0x0002 /* Enable Mailbox 1 */
-#define MC2 0x0004 /* Enable Mailbox 2 */
-#define MC3 0x0008 /* Enable Mailbox 3 */
-#define MC4 0x0010 /* Enable Mailbox 4 */
-#define MC5 0x0020 /* Enable Mailbox 5 */
-#define MC6 0x0040 /* Enable Mailbox 6 */
-#define MC7 0x0080 /* Enable Mailbox 7 */
-#define MC8 0x0100 /* Enable Mailbox 8 */
-#define MC9 0x0200 /* Enable Mailbox 9 */
-#define MC10 0x0400 /* Enable Mailbox 10 */
-#define MC11 0x0800 /* Enable Mailbox 11 */
-#define MC12 0x1000 /* Enable Mailbox 12 */
-#define MC13 0x2000 /* Enable Mailbox 13 */
-#define MC14 0x4000 /* Enable Mailbox 14 */
-#define MC15 0x8000 /* Enable Mailbox 15 */
-
-/* CAN_MC2 Masks */
-#define MC16 0x0001 /* Enable Mailbox 16 */
-#define MC17 0x0002 /* Enable Mailbox 17 */
-#define MC18 0x0004 /* Enable Mailbox 18 */
-#define MC19 0x0008 /* Enable Mailbox 19 */
-#define MC20 0x0010 /* Enable Mailbox 20 */
-#define MC21 0x0020 /* Enable Mailbox 21 */
-#define MC22 0x0040 /* Enable Mailbox 22 */
-#define MC23 0x0080 /* Enable Mailbox 23 */
-#define MC24 0x0100 /* Enable Mailbox 24 */
-#define MC25 0x0200 /* Enable Mailbox 25 */
-#define MC26 0x0400 /* Enable Mailbox 26 */
-#define MC27 0x0800 /* Enable Mailbox 27 */
-#define MC28 0x1000 /* Enable Mailbox 28 */
-#define MC29 0x2000 /* Enable Mailbox 29 */
-#define MC30 0x4000 /* Enable Mailbox 30 */
-#define MC31 0x8000 /* Enable Mailbox 31 */
-
-/* CAN_MD1 Masks */
-#define MD0 0x0001 /* Enable Mailbox 0 For Receive */
-#define MD1 0x0002 /* Enable Mailbox 1 For Receive */
-#define MD2 0x0004 /* Enable Mailbox 2 For Receive */
-#define MD3 0x0008 /* Enable Mailbox 3 For Receive */
-#define MD4 0x0010 /* Enable Mailbox 4 For Receive */
-#define MD5 0x0020 /* Enable Mailbox 5 For Receive */
-#define MD6 0x0040 /* Enable Mailbox 6 For Receive */
-#define MD7 0x0080 /* Enable Mailbox 7 For Receive */
-#define MD8 0x0100 /* Enable Mailbox 8 For Receive */
-#define MD9 0x0200 /* Enable Mailbox 9 For Receive */
-#define MD10 0x0400 /* Enable Mailbox 10 For Receive */
-#define MD11 0x0800 /* Enable Mailbox 11 For Receive */
-#define MD12 0x1000 /* Enable Mailbox 12 For Receive */
-#define MD13 0x2000 /* Enable Mailbox 13 For Receive */
-#define MD14 0x4000 /* Enable Mailbox 14 For Receive */
-#define MD15 0x8000 /* Enable Mailbox 15 For Receive */
-
-/* CAN_MD2 Masks */
-#define MD16 0x0001 /* Enable Mailbox 16 For Receive */
-#define MD17 0x0002 /* Enable Mailbox 17 For Receive */
-#define MD18 0x0004 /* Enable Mailbox 18 For Receive */
-#define MD19 0x0008 /* Enable Mailbox 19 For Receive */
-#define MD20 0x0010 /* Enable Mailbox 20 For Receive */
-#define MD21 0x0020 /* Enable Mailbox 21 For Receive */
-#define MD22 0x0040 /* Enable Mailbox 22 For Receive */
-#define MD23 0x0080 /* Enable Mailbox 23 For Receive */
-#define MD24 0x0100 /* Enable Mailbox 24 For Receive */
-#define MD25 0x0200 /* Enable Mailbox 25 For Receive */
-#define MD26 0x0400 /* Enable Mailbox 26 For Receive */
-#define MD27 0x0800 /* Enable Mailbox 27 For Receive */
-#define MD28 0x1000 /* Enable Mailbox 28 For Receive */
-#define MD29 0x2000 /* Enable Mailbox 29 For Receive */
-#define MD30 0x4000 /* Enable Mailbox 30 For Receive */
-#define MD31 0x8000 /* Enable Mailbox 31 For Receive */
-
-/* CAN_RMP1 Masks */
-#define RMP0 0x0001 /* RX Message Pending In Mailbox 0 */
-#define RMP1 0x0002 /* RX Message Pending In Mailbox 1 */
-#define RMP2 0x0004 /* RX Message Pending In Mailbox 2 */
-#define RMP3 0x0008 /* RX Message Pending In Mailbox 3 */
-#define RMP4 0x0010 /* RX Message Pending In Mailbox 4 */
-#define RMP5 0x0020 /* RX Message Pending In Mailbox 5 */
-#define RMP6 0x0040 /* RX Message Pending In Mailbox 6 */
-#define RMP7 0x0080 /* RX Message Pending In Mailbox 7 */
-#define RMP8 0x0100 /* RX Message Pending In Mailbox 8 */
-#define RMP9 0x0200 /* RX Message Pending In Mailbox 9 */
-#define RMP10 0x0400 /* RX Message Pending In Mailbox 10 */
-#define RMP11 0x0800 /* RX Message Pending In Mailbox 11 */
-#define RMP12 0x1000 /* RX Message Pending In Mailbox 12 */
-#define RMP13 0x2000 /* RX Message Pending In Mailbox 13 */
-#define RMP14 0x4000 /* RX Message Pending In Mailbox 14 */
-#define RMP15 0x8000 /* RX Message Pending In Mailbox 15 */
-
-/* CAN_RMP2 Masks */
-#define RMP16 0x0001 /* RX Message Pending In Mailbox 16 */
-#define RMP17 0x0002 /* RX Message Pending In Mailbox 17 */
-#define RMP18 0x0004 /* RX Message Pending In Mailbox 18 */
-#define RMP19 0x0008 /* RX Message Pending In Mailbox 19 */
-#define RMP20 0x0010 /* RX Message Pending In Mailbox 20 */
-#define RMP21 0x0020 /* RX Message Pending In Mailbox 21 */
-#define RMP22 0x0040 /* RX Message Pending In Mailbox 22 */
-#define RMP23 0x0080 /* RX Message Pending In Mailbox 23 */
-#define RMP24 0x0100 /* RX Message Pending In Mailbox 24 */
-#define RMP25 0x0200 /* RX Message Pending In Mailbox 25 */
-#define RMP26 0x0400 /* RX Message Pending In Mailbox 26 */
-#define RMP27 0x0800 /* RX Message Pending In Mailbox 27 */
-#define RMP28 0x1000 /* RX Message Pending In Mailbox 28 */
-#define RMP29 0x2000 /* RX Message Pending In Mailbox 29 */
-#define RMP30 0x4000 /* RX Message Pending In Mailbox 30 */
-#define RMP31 0x8000 /* RX Message Pending In Mailbox 31 */
-
-/* CAN_RML1 Masks */
-#define RML0 0x0001 /* RX Message Lost In Mailbox 0 */
-#define RML1 0x0002 /* RX Message Lost In Mailbox 1 */
-#define RML2 0x0004 /* RX Message Lost In Mailbox 2 */
-#define RML3 0x0008 /* RX Message Lost In Mailbox 3 */
-#define RML4 0x0010 /* RX Message Lost In Mailbox 4 */
-#define RML5 0x0020 /* RX Message Lost In Mailbox 5 */
-#define RML6 0x0040 /* RX Message Lost In Mailbox 6 */
-#define RML7 0x0080 /* RX Message Lost In Mailbox 7 */
-#define RML8 0x0100 /* RX Message Lost In Mailbox 8 */
-#define RML9 0x0200 /* RX Message Lost In Mailbox 9 */
-#define RML10 0x0400 /* RX Message Lost In Mailbox 10 */
-#define RML11 0x0800 /* RX Message Lost In Mailbox 11 */
-#define RML12 0x1000 /* RX Message Lost In Mailbox 12 */
-#define RML13 0x2000 /* RX Message Lost In Mailbox 13 */
-#define RML14 0x4000 /* RX Message Lost In Mailbox 14 */
-#define RML15 0x8000 /* RX Message Lost In Mailbox 15 */
-
-/* CAN_RML2 Masks */
-#define RML16 0x0001 /* RX Message Lost In Mailbox 16 */
-#define RML17 0x0002 /* RX Message Lost In Mailbox 17 */
-#define RML18 0x0004 /* RX Message Lost In Mailbox 18 */
-#define RML19 0x0008 /* RX Message Lost In Mailbox 19 */
-#define RML20 0x0010 /* RX Message Lost In Mailbox 20 */
-#define RML21 0x0020 /* RX Message Lost In Mailbox 21 */
-#define RML22 0x0040 /* RX Message Lost In Mailbox 22 */
-#define RML23 0x0080 /* RX Message Lost In Mailbox 23 */
-#define RML24 0x0100 /* RX Message Lost In Mailbox 24 */
-#define RML25 0x0200 /* RX Message Lost In Mailbox 25 */
-#define RML26 0x0400 /* RX Message Lost In Mailbox 26 */
-#define RML27 0x0800 /* RX Message Lost In Mailbox 27 */
-#define RML28 0x1000 /* RX Message Lost In Mailbox 28 */
-#define RML29 0x2000 /* RX Message Lost In Mailbox 29 */
-#define RML30 0x4000 /* RX Message Lost In Mailbox 30 */
-#define RML31 0x8000 /* RX Message Lost In Mailbox 31 */
-
-/* CAN_OPSS1 Masks */
-#define OPSS0 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 0 */
-#define OPSS1 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 1 */
-#define OPSS2 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 2 */
-#define OPSS3 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 3 */
-#define OPSS4 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 4 */
-#define OPSS5 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 5 */
-#define OPSS6 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 6 */
-#define OPSS7 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 7 */
-#define OPSS8 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 8 */
-#define OPSS9 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 9 */
-#define OPSS10 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 10 */
-#define OPSS11 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 11 */
-#define OPSS12 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 12 */
-#define OPSS13 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 13 */
-#define OPSS14 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 14 */
-#define OPSS15 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 15 */
-
-/* CAN_OPSS2 Masks */
-#define OPSS16 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 16 */
-#define OPSS17 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 17 */
-#define OPSS18 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 18 */
-#define OPSS19 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 19 */
-#define OPSS20 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 20 */
-#define OPSS21 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 21 */
-#define OPSS22 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 22 */
-#define OPSS23 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 23 */
-#define OPSS24 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 24 */
-#define OPSS25 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 25 */
-#define OPSS26 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 26 */
-#define OPSS27 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 27 */
-#define OPSS28 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 28 */
-#define OPSS29 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 29 */
-#define OPSS30 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 30 */
-#define OPSS31 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 31 */
-
-/* CAN_TRR1 Masks */
-#define TRR0 0x0001 /* Deny But Don't Lock Access To Mailbox 0 */
-#define TRR1 0x0002 /* Deny But Don't Lock Access To Mailbox 1 */
-#define TRR2 0x0004 /* Deny But Don't Lock Access To Mailbox 2 */
-#define TRR3 0x0008 /* Deny But Don't Lock Access To Mailbox 3 */
-#define TRR4 0x0010 /* Deny But Don't Lock Access To Mailbox 4 */
-#define TRR5 0x0020 /* Deny But Don't Lock Access To Mailbox 5 */
-#define TRR6 0x0040 /* Deny But Don't Lock Access To Mailbox 6 */
-#define TRR7 0x0080 /* Deny But Don't Lock Access To Mailbox 7 */
-#define TRR8 0x0100 /* Deny But Don't Lock Access To Mailbox 8 */
-#define TRR9 0x0200 /* Deny But Don't Lock Access To Mailbox 9 */
-#define TRR10 0x0400 /* Deny But Don't Lock Access To Mailbox 10 */
-#define TRR11 0x0800 /* Deny But Don't Lock Access To Mailbox 11 */
-#define TRR12 0x1000 /* Deny But Don't Lock Access To Mailbox 12 */
-#define TRR13 0x2000 /* Deny But Don't Lock Access To Mailbox 13 */
-#define TRR14 0x4000 /* Deny But Don't Lock Access To Mailbox 14 */
-#define TRR15 0x8000 /* Deny But Don't Lock Access To Mailbox 15 */
-
-/* CAN_TRR2 Masks */
-#define TRR16 0x0001 /* Deny But Don't Lock Access To Mailbox 16 */
-#define TRR17 0x0002 /* Deny But Don't Lock Access To Mailbox 17 */
-#define TRR18 0x0004 /* Deny But Don't Lock Access To Mailbox 18 */
-#define TRR19 0x0008 /* Deny But Don't Lock Access To Mailbox 19 */
-#define TRR20 0x0010 /* Deny But Don't Lock Access To Mailbox 20 */
-#define TRR21 0x0020 /* Deny But Don't Lock Access To Mailbox 21 */
-#define TRR22 0x0040 /* Deny But Don't Lock Access To Mailbox 22 */
-#define TRR23 0x0080 /* Deny But Don't Lock Access To Mailbox 23 */
-#define TRR24 0x0100 /* Deny But Don't Lock Access To Mailbox 24 */
-#define TRR25 0x0200 /* Deny But Don't Lock Access To Mailbox 25 */
-#define TRR26 0x0400 /* Deny But Don't Lock Access To Mailbox 26 */
-#define TRR27 0x0800 /* Deny But Don't Lock Access To Mailbox 27 */
-#define TRR28 0x1000 /* Deny But Don't Lock Access To Mailbox 28 */
-#define TRR29 0x2000 /* Deny But Don't Lock Access To Mailbox 29 */
-#define TRR30 0x4000 /* Deny But Don't Lock Access To Mailbox 30 */
-#define TRR31 0x8000 /* Deny But Don't Lock Access To Mailbox 31 */
-
-/* CAN_TRS1 Masks */
-#define TRS0 0x0001 /* Remote Frame Request For Mailbox 0 */
-#define TRS1 0x0002 /* Remote Frame Request For Mailbox 1 */
-#define TRS2 0x0004 /* Remote Frame Request For Mailbox 2 */
-#define TRS3 0x0008 /* Remote Frame Request For Mailbox 3 */
-#define TRS4 0x0010 /* Remote Frame Request For Mailbox 4 */
-#define TRS5 0x0020 /* Remote Frame Request For Mailbox 5 */
-#define TRS6 0x0040 /* Remote Frame Request For Mailbox 6 */
-#define TRS7 0x0080 /* Remote Frame Request For Mailbox 7 */
-#define TRS8 0x0100 /* Remote Frame Request For Mailbox 8 */
-#define TRS9 0x0200 /* Remote Frame Request For Mailbox 9 */
-#define TRS10 0x0400 /* Remote Frame Request For Mailbox 10 */
-#define TRS11 0x0800 /* Remote Frame Request For Mailbox 11 */
-#define TRS12 0x1000 /* Remote Frame Request For Mailbox 12 */
-#define TRS13 0x2000 /* Remote Frame Request For Mailbox 13 */
-#define TRS14 0x4000 /* Remote Frame Request For Mailbox 14 */
-#define TRS15 0x8000 /* Remote Frame Request For Mailbox 15 */
-
-/* CAN_TRS2 Masks */
-#define TRS16 0x0001 /* Remote Frame Request For Mailbox 16 */
-#define TRS17 0x0002 /* Remote Frame Request For Mailbox 17 */
-#define TRS18 0x0004 /* Remote Frame Request For Mailbox 18 */
-#define TRS19 0x0008 /* Remote Frame Request For Mailbox 19 */
-#define TRS20 0x0010 /* Remote Frame Request For Mailbox 20 */
-#define TRS21 0x0020 /* Remote Frame Request For Mailbox 21 */
-#define TRS22 0x0040 /* Remote Frame Request For Mailbox 22 */
-#define TRS23 0x0080 /* Remote Frame Request For Mailbox 23 */
-#define TRS24 0x0100 /* Remote Frame Request For Mailbox 24 */
-#define TRS25 0x0200 /* Remote Frame Request For Mailbox 25 */
-#define TRS26 0x0400 /* Remote Frame Request For Mailbox 26 */
-#define TRS27 0x0800 /* Remote Frame Request For Mailbox 27 */
-#define TRS28 0x1000 /* Remote Frame Request For Mailbox 28 */
-#define TRS29 0x2000 /* Remote Frame Request For Mailbox 29 */
-#define TRS30 0x4000 /* Remote Frame Request For Mailbox 30 */
-#define TRS31 0x8000 /* Remote Frame Request For Mailbox 31 */
-
-/* CAN_AA1 Masks */
-#define AA0 0x0001 /* Aborted Message In Mailbox 0 */
-#define AA1 0x0002 /* Aborted Message In Mailbox 1 */
-#define AA2 0x0004 /* Aborted Message In Mailbox 2 */
-#define AA3 0x0008 /* Aborted Message In Mailbox 3 */
-#define AA4 0x0010 /* Aborted Message In Mailbox 4 */
-#define AA5 0x0020 /* Aborted Message In Mailbox 5 */
-#define AA6 0x0040 /* Aborted Message In Mailbox 6 */
-#define AA7 0x0080 /* Aborted Message In Mailbox 7 */
-#define AA8 0x0100 /* Aborted Message In Mailbox 8 */
-#define AA9 0x0200 /* Aborted Message In Mailbox 9 */
-#define AA10 0x0400 /* Aborted Message In Mailbox 10 */
-#define AA11 0x0800 /* Aborted Message In Mailbox 11 */
-#define AA12 0x1000 /* Aborted Message In Mailbox 12 */
-#define AA13 0x2000 /* Aborted Message In Mailbox 13 */
-#define AA14 0x4000 /* Aborted Message In Mailbox 14 */
-#define AA15 0x8000 /* Aborted Message In Mailbox 15 */
-
-/* CAN_AA2 Masks */
-#define AA16 0x0001 /* Aborted Message In Mailbox 16 */
-#define AA17 0x0002 /* Aborted Message In Mailbox 17 */
-#define AA18 0x0004 /* Aborted Message In Mailbox 18 */
-#define AA19 0x0008 /* Aborted Message In Mailbox 19 */
-#define AA20 0x0010 /* Aborted Message In Mailbox 20 */
-#define AA21 0x0020 /* Aborted Message In Mailbox 21 */
-#define AA22 0x0040 /* Aborted Message In Mailbox 22 */
-#define AA23 0x0080 /* Aborted Message In Mailbox 23 */
-#define AA24 0x0100 /* Aborted Message In Mailbox 24 */
-#define AA25 0x0200 /* Aborted Message In Mailbox 25 */
-#define AA26 0x0400 /* Aborted Message In Mailbox 26 */
-#define AA27 0x0800 /* Aborted Message In Mailbox 27 */
-#define AA28 0x1000 /* Aborted Message In Mailbox 28 */
-#define AA29 0x2000 /* Aborted Message In Mailbox 29 */
-#define AA30 0x4000 /* Aborted Message In Mailbox 30 */
-#define AA31 0x8000 /* Aborted Message In Mailbox 31 */
-
-/* CAN_TA1 Masks */
-#define TA0 0x0001 /* Transmit Successful From Mailbox 0 */
-#define TA1 0x0002 /* Transmit Successful From Mailbox 1 */
-#define TA2 0x0004 /* Transmit Successful From Mailbox 2 */
-#define TA3 0x0008 /* Transmit Successful From Mailbox 3 */
-#define TA4 0x0010 /* Transmit Successful From Mailbox 4 */
-#define TA5 0x0020 /* Transmit Successful From Mailbox 5 */
-#define TA6 0x0040 /* Transmit Successful From Mailbox 6 */
-#define TA7 0x0080 /* Transmit Successful From Mailbox 7 */
-#define TA8 0x0100 /* Transmit Successful From Mailbox 8 */
-#define TA9 0x0200 /* Transmit Successful From Mailbox 9 */
-#define TA10 0x0400 /* Transmit Successful From Mailbox 10 */
-#define TA11 0x0800 /* Transmit Successful From Mailbox 11 */
-#define TA12 0x1000 /* Transmit Successful From Mailbox 12 */
-#define TA13 0x2000 /* Transmit Successful From Mailbox 13 */
-#define TA14 0x4000 /* Transmit Successful From Mailbox 14 */
-#define TA15 0x8000 /* Transmit Successful From Mailbox 15 */
-
-/* CAN_TA2 Masks */
-#define TA16 0x0001 /* Transmit Successful From Mailbox 16 */
-#define TA17 0x0002 /* Transmit Successful From Mailbox 17 */
-#define TA18 0x0004 /* Transmit Successful From Mailbox 18 */
-#define TA19 0x0008 /* Transmit Successful From Mailbox 19 */
-#define TA20 0x0010 /* Transmit Successful From Mailbox 20 */
-#define TA21 0x0020 /* Transmit Successful From Mailbox 21 */
-#define TA22 0x0040 /* Transmit Successful From Mailbox 22 */
-#define TA23 0x0080 /* Transmit Successful From Mailbox 23 */
-#define TA24 0x0100 /* Transmit Successful From Mailbox 24 */
-#define TA25 0x0200 /* Transmit Successful From Mailbox 25 */
-#define TA26 0x0400 /* Transmit Successful From Mailbox 26 */
-#define TA27 0x0800 /* Transmit Successful From Mailbox 27 */
-#define TA28 0x1000 /* Transmit Successful From Mailbox 28 */
-#define TA29 0x2000 /* Transmit Successful From Mailbox 29 */
-#define TA30 0x4000 /* Transmit Successful From Mailbox 30 */
-#define TA31 0x8000 /* Transmit Successful From Mailbox 31 */
-
-/* CAN_MBTD Masks */
-#define TDPTR 0x001F /* Mailbox To Temporarily Disable */
-#define TDA 0x0040 /* Temporary Disable Acknowledge */
-#define TDR 0x0080 /* Temporary Disable Request */
-
-/* CAN_RFH1 Masks */
-#define RFH0 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 0 */
-#define RFH1 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 1 */
-#define RFH2 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 2 */
-#define RFH3 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 3 */
-#define RFH4 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 4 */
-#define RFH5 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 5 */
-#define RFH6 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 6 */
-#define RFH7 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 7 */
-#define RFH8 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 8 */
-#define RFH9 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 9 */
-#define RFH10 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 10 */
-#define RFH11 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 11 */
-#define RFH12 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 12 */
-#define RFH13 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 13 */
-#define RFH14 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 14 */
-#define RFH15 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 15 */
-
-/* CAN_RFH2 Masks */
-#define RFH16 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 16 */
-#define RFH17 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 17 */
-#define RFH18 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 18 */
-#define RFH19 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 19 */
-#define RFH20 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 20 */
-#define RFH21 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 21 */
-#define RFH22 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 22 */
-#define RFH23 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 23 */
-#define RFH24 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 24 */
-#define RFH25 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 25 */
-#define RFH26 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 26 */
-#define RFH27 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 27 */
-#define RFH28 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 28 */
-#define RFH29 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 29 */
-#define RFH30 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 30 */
-#define RFH31 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 31 */
-
-/* CAN_MBTIF1 Masks */
-#define MBTIF0 0x0001 /* TX Interrupt Active In Mailbox 0 */
-#define MBTIF1 0x0002 /* TX Interrupt Active In Mailbox 1 */
-#define MBTIF2 0x0004 /* TX Interrupt Active In Mailbox 2 */
-#define MBTIF3 0x0008 /* TX Interrupt Active In Mailbox 3 */
-#define MBTIF4 0x0010 /* TX Interrupt Active In Mailbox 4 */
-#define MBTIF5 0x0020 /* TX Interrupt Active In Mailbox 5 */
-#define MBTIF6 0x0040 /* TX Interrupt Active In Mailbox 6 */
-#define MBTIF7 0x0080 /* TX Interrupt Active In Mailbox 7 */
-#define MBTIF8 0x0100 /* TX Interrupt Active In Mailbox 8 */
-#define MBTIF9 0x0200 /* TX Interrupt Active In Mailbox 9 */
-#define MBTIF10 0x0400 /* TX Interrupt Active In Mailbox 10 */
-#define MBTIF11 0x0800 /* TX Interrupt Active In Mailbox 11 */
-#define MBTIF12 0x1000 /* TX Interrupt Active In Mailbox 12 */
-#define MBTIF13 0x2000 /* TX Interrupt Active In Mailbox 13 */
-#define MBTIF14 0x4000 /* TX Interrupt Active In Mailbox 14 */
-#define MBTIF15 0x8000 /* TX Interrupt Active In Mailbox 15 */
-
-/* CAN_MBTIF2 Masks */
-#define MBTIF16 0x0001 /* TX Interrupt Active In Mailbox 16 */
-#define MBTIF17 0x0002 /* TX Interrupt Active In Mailbox 17 */
-#define MBTIF18 0x0004 /* TX Interrupt Active In Mailbox 18 */
-#define MBTIF19 0x0008 /* TX Interrupt Active In Mailbox 19 */
-#define MBTIF20 0x0010 /* TX Interrupt Active In Mailbox 20 */
-#define MBTIF21 0x0020 /* TX Interrupt Active In Mailbox 21 */
-#define MBTIF22 0x0040 /* TX Interrupt Active In Mailbox 22 */
-#define MBTIF23 0x0080 /* TX Interrupt Active In Mailbox 23 */
-#define MBTIF24 0x0100 /* TX Interrupt Active In Mailbox 24 */
-#define MBTIF25 0x0200 /* TX Interrupt Active In Mailbox 25 */
-#define MBTIF26 0x0400 /* TX Interrupt Active In Mailbox 26 */
-#define MBTIF27 0x0800 /* TX Interrupt Active In Mailbox 27 */
-#define MBTIF28 0x1000 /* TX Interrupt Active In Mailbox 28 */
-#define MBTIF29 0x2000 /* TX Interrupt Active In Mailbox 29 */
-#define MBTIF30 0x4000 /* TX Interrupt Active In Mailbox 30 */
-#define MBTIF31 0x8000 /* TX Interrupt Active In Mailbox 31 */
-
-/* CAN_MBRIF1 Masks */
-#define MBRIF0 0x0001 /* RX Interrupt Active In Mailbox 0 */
-#define MBRIF1 0x0002 /* RX Interrupt Active In Mailbox 1 */
-#define MBRIF2 0x0004 /* RX Interrupt Active In Mailbox 2 */
-#define MBRIF3 0x0008 /* RX Interrupt Active In Mailbox 3 */
-#define MBRIF4 0x0010 /* RX Interrupt Active In Mailbox 4 */
-#define MBRIF5 0x0020 /* RX Interrupt Active In Mailbox 5 */
-#define MBRIF6 0x0040 /* RX Interrupt Active In Mailbox 6 */
-#define MBRIF7 0x0080 /* RX Interrupt Active In Mailbox 7 */
-#define MBRIF8 0x0100 /* RX Interrupt Active In Mailbox 8 */
-#define MBRIF9 0x0200 /* RX Interrupt Active In Mailbox 9 */
-#define MBRIF10 0x0400 /* RX Interrupt Active In Mailbox 10 */
-#define MBRIF11 0x0800 /* RX Interrupt Active In Mailbox 11 */
-#define MBRIF12 0x1000 /* RX Interrupt Active In Mailbox 12 */
-#define MBRIF13 0x2000 /* RX Interrupt Active In Mailbox 13 */
-#define MBRIF14 0x4000 /* RX Interrupt Active In Mailbox 14 */
-#define MBRIF15 0x8000 /* RX Interrupt Active In Mailbox 15 */
-
-/* CAN_MBRIF2 Masks */
-#define MBRIF16 0x0001 /* RX Interrupt Active In Mailbox 16 */
-#define MBRIF17 0x0002 /* RX Interrupt Active In Mailbox 17 */
-#define MBRIF18 0x0004 /* RX Interrupt Active In Mailbox 18 */
-#define MBRIF19 0x0008 /* RX Interrupt Active In Mailbox 19 */
-#define MBRIF20 0x0010 /* RX Interrupt Active In Mailbox 20 */
-#define MBRIF21 0x0020 /* RX Interrupt Active In Mailbox 21 */
-#define MBRIF22 0x0040 /* RX Interrupt Active In Mailbox 22 */
-#define MBRIF23 0x0080 /* RX Interrupt Active In Mailbox 23 */
-#define MBRIF24 0x0100 /* RX Interrupt Active In Mailbox 24 */
-#define MBRIF25 0x0200 /* RX Interrupt Active In Mailbox 25 */
-#define MBRIF26 0x0400 /* RX Interrupt Active In Mailbox 26 */
-#define MBRIF27 0x0800 /* RX Interrupt Active In Mailbox 27 */
-#define MBRIF28 0x1000 /* RX Interrupt Active In Mailbox 28 */
-#define MBRIF29 0x2000 /* RX Interrupt Active In Mailbox 29 */
-#define MBRIF30 0x4000 /* RX Interrupt Active In Mailbox 30 */
-#define MBRIF31 0x8000 /* RX Interrupt Active In Mailbox 31 */
-
-/* CAN_MBIM1 Masks */
-#define MBIM0 0x0001 /* Enable Interrupt For Mailbox 0 */
-#define MBIM1 0x0002 /* Enable Interrupt For Mailbox 1 */
-#define MBIM2 0x0004 /* Enable Interrupt For Mailbox 2 */
-#define MBIM3 0x0008 /* Enable Interrupt For Mailbox 3 */
-#define MBIM4 0x0010 /* Enable Interrupt For Mailbox 4 */
-#define MBIM5 0x0020 /* Enable Interrupt For Mailbox 5 */
-#define MBIM6 0x0040 /* Enable Interrupt For Mailbox 6 */
-#define MBIM7 0x0080 /* Enable Interrupt For Mailbox 7 */
-#define MBIM8 0x0100 /* Enable Interrupt For Mailbox 8 */
-#define MBIM9 0x0200 /* Enable Interrupt For Mailbox 9 */
-#define MBIM10 0x0400 /* Enable Interrupt For Mailbox 10 */
-#define MBIM11 0x0800 /* Enable Interrupt For Mailbox 11 */
-#define MBIM12 0x1000 /* Enable Interrupt For Mailbox 12 */
-#define MBIM13 0x2000 /* Enable Interrupt For Mailbox 13 */
-#define MBIM14 0x4000 /* Enable Interrupt For Mailbox 14 */
-#define MBIM15 0x8000 /* Enable Interrupt For Mailbox 15 */
-
-/* CAN_MBIM2 Masks */
-#define MBIM16 0x0001 /* Enable Interrupt For Mailbox 16 */
-#define MBIM17 0x0002 /* Enable Interrupt For Mailbox 17 */
-#define MBIM18 0x0004 /* Enable Interrupt For Mailbox 18 */
-#define MBIM19 0x0008 /* Enable Interrupt For Mailbox 19 */
-#define MBIM20 0x0010 /* Enable Interrupt For Mailbox 20 */
-#define MBIM21 0x0020 /* Enable Interrupt For Mailbox 21 */
-#define MBIM22 0x0040 /* Enable Interrupt For Mailbox 22 */
-#define MBIM23 0x0080 /* Enable Interrupt For Mailbox 23 */
-#define MBIM24 0x0100 /* Enable Interrupt For Mailbox 24 */
-#define MBIM25 0x0200 /* Enable Interrupt For Mailbox 25 */
-#define MBIM26 0x0400 /* Enable Interrupt For Mailbox 26 */
-#define MBIM27 0x0800 /* Enable Interrupt For Mailbox 27 */
-#define MBIM28 0x1000 /* Enable Interrupt For Mailbox 28 */
-#define MBIM29 0x2000 /* Enable Interrupt For Mailbox 29 */
-#define MBIM30 0x4000 /* Enable Interrupt For Mailbox 30 */
-#define MBIM31 0x8000 /* Enable Interrupt For Mailbox 31 */
-
-/* CAN_GIM Masks */
-#define EWTIM 0x0001 /* Enable TX Error Count Interrupt */
-#define EWRIM 0x0002 /* Enable RX Error Count Interrupt */
-#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */
-#define BOIM 0x0008 /* Enable Bus Off Interrupt */
-#define WUIM 0x0010 /* Enable Wake-Up Interrupt */
-#define UIAIM 0x0020 /* Enable Access To Unimplemented Address Interrupt */
-#define AAIM 0x0040 /* Enable Abort Acknowledge Interrupt */
-#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */
-#define UCEIM 0x0100 /* Enable Universal Counter Overflow Interrupt */
-#define EXTIM 0x0200 /* Enable External Trigger Output Interrupt */
-#define ADIM 0x0400 /* Enable Access Denied Interrupt */
-
-/* CAN_GIS Masks */
-#define EWTIS 0x0001 /* TX Error Count IRQ Status */
-#define EWRIS 0x0002 /* RX Error Count IRQ Status */
-#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */
-#define BOIS 0x0008 /* Bus Off IRQ Status */
-#define WUIS 0x0010 /* Wake-Up IRQ Status */
-#define UIAIS 0x0020 /* Access To Unimplemented Address IRQ Status */
-#define AAIS 0x0040 /* Abort Acknowledge IRQ Status */
-#define RMLIS 0x0080 /* RX Message Lost IRQ Status */
-#define UCEIS 0x0100 /* Universal Counter Overflow IRQ Status */
-#define EXTIS 0x0200 /* External Trigger Output IRQ Status */
-#define ADIS 0x0400 /* Access Denied IRQ Status */
-
-/* CAN_GIF Masks */
-#define EWTIF 0x0001 /* TX Error Count IRQ Flag */
-#define EWRIF 0x0002 /* RX Error Count IRQ Flag */
-#define EPIF 0x0004 /* Error-Passive Mode IRQ Flag */
-#define BOIF 0x0008 /* Bus Off IRQ Flag */
-#define WUIF 0x0010 /* Wake-Up IRQ Flag */
-#define UIAIF 0x0020 /* Access To Unimplemented Address IRQ Flag */
-#define AAIF 0x0040 /* Abort Acknowledge IRQ Flag */
-#define RMLIF 0x0080 /* RX Message Lost IRQ Flag */
-#define UCEIF 0x0100 /* Universal Counter Overflow IRQ Flag */
-#define EXTIF 0x0200 /* External Trigger Output IRQ Flag */
-#define ADIF 0x0400 /* Access Denied IRQ Flag */
-
-/* CAN_UCCNF Masks */
-#define UCCNF 0x000F /* Universal Counter Mode */
-#define UC_STAMP 0x0001 /* Timestamp Mode */
-#define UC_WDOG 0x0002 /* Watchdog Mode */
-#define UC_AUTOTX 0x0003 /* Auto-Transmit Mode */
-#define UC_ERROR 0x0006 /* CAN Error Frame Count */
-#define UC_OVER 0x0007 /* CAN Overload Frame Count */
-#define UC_LOST 0x0008 /* Arbitration Lost During TX Count */
-#define UC_AA 0x0009 /* TX Abort Count */
-#define UC_TA 0x000A /* TX Successful Count */
-#define UC_REJECT 0x000B /* RX Message Rejected Count */
-#define UC_RML 0x000C /* RX Message Lost Count */
-#define UC_RX 0x000D /* Total Successful RX Messages Count */
-#define UC_RMP 0x000E /* Successful RX W/Matching ID Count */
-#define UC_ALL 0x000F /* Correct Message On CAN Bus Line Count */
-#define UCRC 0x0020 /* Universal Counter Reload/Clear */
-#define UCCT 0x0040 /* Universal Counter CAN Trigger */
-#define UCE 0x0080 /* Universal Counter Enable */
-
-/* CAN_ESR Masks */
-#define ACKE 0x0004 /* Acknowledge Error */
-#define SER 0x0008 /* Stuff Error */
-#define CRCE 0x0010 /* CRC Error */
-#define SA0 0x0020 /* Stuck At Dominant Error */
-#define BEF 0x0040 /* Bit Error Flag */
-#define FER 0x0080 /* Form Error Flag */
-
-/* CAN_EWR Masks */
-#define EWLREC 0x00FF /* RX Error Count Limit (For EWRIS) */
-#define EWLTEC 0xFF00 /* TX Error Count Limit (For EWTIS) */
-
#endif /* _DEF_BF539_H */
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index f60c333fec66..dbb6b1d83f6d 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -498,6 +498,10 @@ static struct musb_hdrc_config musb_config = {
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PH6,
+ /* Some custom boards need to be active low, just set it to "0"
+ * if it is the case.
+ */
+ .gpio_vrsel_active = 1,
};
static struct musb_hdrc_platform_data musb_plat = {
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 06919db00a74..6fcfb9187c35 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -603,6 +603,10 @@ static struct musb_hdrc_config musb_config = {
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PE7,
+ /* Some custom boards need to be active low, just set it to "0"
+ * if it is the case.
+ */
+ .gpio_vrsel_active = 1,
};
static struct musb_hdrc_platform_data musb_plat = {
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index ab04d137fd8b..0ed06c2366fe 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -2104,677 +2104,6 @@
#define ECCCNT 0x3ff /* Transfer Count */
-/* Bit masks for CAN0_CONTROL */
-
-#define SRS 0x1 /* Software Reset */
-#define DNM 0x2 /* DeviceNet Mode */
-#define ABO 0x4 /* Auto Bus On */
-#define WBA 0x10 /* Wakeup On CAN Bus Activity */
-#define SMR 0x20 /* Sleep Mode Request */
-#define CSR 0x40 /* CAN Suspend Mode Request */
-#define CCR 0x80 /* CAN Configuration Mode Request */
-
-/* Bit masks for CAN0_STATUS */
-
-#define WT 0x1 /* CAN Transmit Warning Flag */
-#define WR 0x2 /* CAN Receive Warning Flag */
-#define EP 0x4 /* CAN Error Passive Mode */
-#define EBO 0x8 /* CAN Error Bus Off Mode */
-#define CSA 0x40 /* CAN Suspend Mode Acknowledge */
-#define CCA 0x80 /* CAN Configuration Mode Acknowledge */
-#define MBPTR 0x1f00 /* Mailbox Pointer */
-#define TRM 0x4000 /* Transmit Mode Status */
-#define REC 0x8000 /* Receive Mode Status */
-
-/* Bit masks for CAN0_DEBUG */
-
-#define DEC 0x1 /* Disable Transmit/Receive Error Counters */
-#define DRI 0x2 /* Disable CANRX Input Pin */
-#define DTO 0x4 /* Disable CANTX Output Pin */
-#define DIL 0x8 /* Disable Internal Loop */
-#define MAA 0x10 /* Mode Auto-Acknowledge */
-#define MRB 0x20 /* Mode Read Back */
-#define CDE 0x8000 /* CAN Debug Mode Enable */
-
-/* Bit masks for CAN0_CLOCK */
-
-#define BRP 0x3ff /* CAN Bit Rate Prescaler */
-
-/* Bit masks for CAN0_TIMING */
-
-#define SJW 0x300 /* Synchronization Jump Width */
-#define SAM 0x80 /* Sampling */
-#define TSEG2 0x70 /* Time Segment 2 */
-#define TSEG1 0xf /* Time Segment 1 */
-
-/* Bit masks for CAN0_INTR */
-
-#define CANRX 0x80 /* Serial Input From Transceiver */
-#define CANTX 0x40 /* Serial Output To Transceiver */
-#define SMACK 0x8 /* Sleep Mode Acknowledge */
-#define GIRQ 0x4 /* Global Interrupt Request Status */
-#define MBTIRQ 0x2 /* Mailbox Transmit Interrupt Request */
-#define MBRIRQ 0x1 /* Mailbox Receive Interrupt Request */
-
-/* Bit masks for CAN0_GIM */
-
-#define EWTIM 0x1 /* Error Warning Transmit Interrupt Mask */
-#define EWRIM 0x2 /* Error Warning Receive Interrupt Mask */
-#define EPIM 0x4 /* Error Passive Interrupt Mask */
-#define BOIM 0x8 /* Bus Off Interrupt Mask */
-#define WUIM 0x10 /* Wakeup Interrupt Mask */
-#define UIAIM 0x20 /* Unimplemented Address Interrupt Mask */
-#define AAIM 0x40 /* Abort Acknowledge Interrupt Mask */
-#define RMLIM 0x80 /* Receive Message Lost Interrupt Mask */
-#define UCEIM 0x100 /* Universal Counter Exceeded Interrupt Mask */
-#define ADIM 0x400 /* Access Denied Interrupt Mask */
-
-/* Bit masks for CAN0_GIS */
-
-#define EWTIS 0x1 /* Error Warning Transmit Interrupt Status */
-#define EWRIS 0x2 /* Error Warning Receive Interrupt Status */
-#define EPIS 0x4 /* Error Passive Interrupt Status */
-#define BOIS 0x8 /* Bus Off Interrupt Status */
-#define WUIS 0x10 /* Wakeup Interrupt Status */
-#define UIAIS 0x20 /* Unimplemented Address Interrupt Status */
-#define AAIS 0x40 /* Abort Acknowledge Interrupt Status */
-#define RMLIS 0x80 /* Receive Message Lost Interrupt Status */
-#define UCEIS 0x100 /* Universal Counter Exceeded Interrupt Status */
-#define ADIS 0x400 /* Access Denied Interrupt Status */
-
-/* Bit masks for CAN0_GIF */
-
-#define EWTIF 0x1 /* Error Warning Transmit Interrupt Flag */
-#define EWRIF 0x2 /* Error Warning Receive Interrupt Flag */
-#define EPIF 0x4 /* Error Passive Interrupt Flag */
-#define BOIF 0x8 /* Bus Off Interrupt Flag */
-#define WUIF 0x10 /* Wakeup Interrupt Flag */
-#define UIAIF 0x20 /* Unimplemented Address Interrupt Flag */
-#define AAIF 0x40 /* Abort Acknowledge Interrupt Flag */
-#define RMLIF 0x80 /* Receive Message Lost Interrupt Flag */
-#define UCEIF 0x100 /* Universal Counter Exceeded Interrupt Flag */
-#define ADIF 0x400 /* Access Denied Interrupt Flag */
-
-/* Bit masks for CAN0_MBTD */
-
-#define TDR 0x80 /* Temporary Disable Request */
-#define TDA 0x40 /* Temporary Disable Acknowledge */
-#define TDPTR 0x1f /* Temporary Disable Pointer */
-
-/* Bit masks for CAN0_UCCNF */
-
-#define UCCNF 0xf /* Universal Counter Configuration */
-#define UCRC 0x20 /* Universal Counter Reload/Clear */
-#define UCCT 0x40 /* Universal Counter CAN Trigger */
-#define UCE 0x80 /* Universal Counter Enable */
-
-/* Bit masks for CAN0_CEC */
-
-#define RXECNT 0xff /* Receive Error Counter */
-#define TXECNT 0xff00 /* Transmit Error Counter */
-
-/* Bit masks for CAN0_ESR */
-
-#define FER 0x80 /* Form Error */
-#define BEF 0x40 /* Bit Error Flag */
-#define SA0 0x20 /* Stuck At Dominant */
-#define CRCE 0x10 /* CRC Error */
-#define SER 0x8 /* Stuff Bit Error */
-#define ACKE 0x4 /* Acknowledge Error */
-
-/* Bit masks for CAN0_EWR */
-
-#define EWLTEC 0xff00 /* Transmit Error Warning Limit */
-#define EWLREC 0xff /* Receive Error Warning Limit */
-
-/* Bit masks for CAN0_AMxx_H */
-
-#define FDF 0x8000 /* Filter On Data Field */
-#define FMD 0x4000 /* Full Mask Data */
-#define AMIDE 0x2000 /* Acceptance Mask Identifier Extension */
-#define BASEID 0x1ffc /* Base Identifier */
-#define EXTID_HI 0x3 /* Extended Identifier High Bits */
-
-/* Bit masks for CAN0_AMxx_L */
-
-#define EXTID_LO 0xffff /* Extended Identifier Low Bits */
-#define DFM 0xffff /* Data Field Mask */
-
-/* Bit masks for CAN0_MBxx_ID1 */
-
-#define AME 0x8000 /* Acceptance Mask Enable */
-#define RTR 0x4000 /* Remote Transmission Request */
-#define IDE 0x2000 /* Identifier Extension */
-#define BASEID 0x1ffc /* Base Identifier */
-#define EXTID_HI 0x3 /* Extended Identifier High Bits */
-
-/* Bit masks for CAN0_MBxx_ID0 */
-
-#define EXTID_LO 0xffff /* Extended Identifier Low Bits */
-#define DFM 0xffff /* Data Field Mask */
-
-/* Bit masks for CAN0_MBxx_TIMESTAMP */
-
-#define TSV 0xffff /* Time Stamp Value */
-
-/* Bit masks for CAN0_MBxx_LENGTH */
-
-#define DLC 0xf /* Data Length Code */
-
-/* Bit masks for CAN0_MBxx_DATA3 */
-
-#define CAN_BYTE0 0xff00 /* Data Field Byte 0 */
-#define CAN_BYTE1 0xff /* Data Field Byte 1 */
-
-/* Bit masks for CAN0_MBxx_DATA2 */
-
-#define CAN_BYTE2 0xff00 /* Data Field Byte 2 */
-#define CAN_BYTE3 0xff /* Data Field Byte 3 */
-
-/* Bit masks for CAN0_MBxx_DATA1 */
-
-#define CAN_BYTE4 0xff00 /* Data Field Byte 4 */
-#define CAN_BYTE5 0xff /* Data Field Byte 5 */
-
-/* Bit masks for CAN0_MBxx_DATA0 */
-
-#define CAN_BYTE6 0xff00 /* Data Field Byte 6 */
-#define CAN_BYTE7 0xff /* Data Field Byte 7 */
-
-/* Bit masks for CAN0_MC1 */
-
-#define MC0 0x1 /* Mailbox 0 Enable */
-#define MC1 0x2 /* Mailbox 1 Enable */
-#define MC2 0x4 /* Mailbox 2 Enable */
-#define MC3 0x8 /* Mailbox 3 Enable */
-#define MC4 0x10 /* Mailbox 4 Enable */
-#define MC5 0x20 /* Mailbox 5 Enable */
-#define MC6 0x40 /* Mailbox 6 Enable */
-#define MC7 0x80 /* Mailbox 7 Enable */
-#define MC8 0x100 /* Mailbox 8 Enable */
-#define MC9 0x200 /* Mailbox 9 Enable */
-#define MC10 0x400 /* Mailbox 10 Enable */
-#define MC11 0x800 /* Mailbox 11 Enable */
-#define MC12 0x1000 /* Mailbox 12 Enable */
-#define MC13 0x2000 /* Mailbox 13 Enable */
-#define MC14 0x4000 /* Mailbox 14 Enable */
-#define MC15 0x8000 /* Mailbox 15 Enable */
-
-/* Bit masks for CAN0_MC2 */
-
-#define MC16 0x1 /* Mailbox 16 Enable */
-#define MC17 0x2 /* Mailbox 17 Enable */
-#define MC18 0x4 /* Mailbox 18 Enable */
-#define MC19 0x8 /* Mailbox 19 Enable */
-#define MC20 0x10 /* Mailbox 20 Enable */
-#define MC21 0x20 /* Mailbox 21 Enable */
-#define MC22 0x40 /* Mailbox 22 Enable */
-#define MC23 0x80 /* Mailbox 23 Enable */
-#define MC24 0x100 /* Mailbox 24 Enable */
-#define MC25 0x200 /* Mailbox 25 Enable */
-#define MC26 0x400 /* Mailbox 26 Enable */
-#define MC27 0x800 /* Mailbox 27 Enable */
-#define MC28 0x1000 /* Mailbox 28 Enable */
-#define MC29 0x2000 /* Mailbox 29 Enable */
-#define MC30 0x4000 /* Mailbox 30 Enable */
-#define MC31 0x8000 /* Mailbox 31 Enable */
-
-/* Bit masks for CAN0_MD1 */
-
-#define MD0 0x1 /* Mailbox 0 Receive Enable */
-#define MD1 0x2 /* Mailbox 1 Receive Enable */
-#define MD2 0x4 /* Mailbox 2 Receive Enable */
-#define MD3 0x8 /* Mailbox 3 Receive Enable */
-#define MD4 0x10 /* Mailbox 4 Receive Enable */
-#define MD5 0x20 /* Mailbox 5 Receive Enable */
-#define MD6 0x40 /* Mailbox 6 Receive Enable */
-#define MD7 0x80 /* Mailbox 7 Receive Enable */
-#define MD8 0x100 /* Mailbox 8 Receive Enable */
-#define MD9 0x200 /* Mailbox 9 Receive Enable */
-#define MD10 0x400 /* Mailbox 10 Receive Enable */
-#define MD11 0x800 /* Mailbox 11 Receive Enable */
-#define MD12 0x1000 /* Mailbox 12 Receive Enable */
-#define MD13 0x2000 /* Mailbox 13 Receive Enable */
-#define MD14 0x4000 /* Mailbox 14 Receive Enable */
-#define MD15 0x8000 /* Mailbox 15 Receive Enable */
-
-/* Bit masks for CAN0_MD2 */
-
-#define MD16 0x1 /* Mailbox 16 Receive Enable */
-#define MD17 0x2 /* Mailbox 17 Receive Enable */
-#define MD18 0x4 /* Mailbox 18 Receive Enable */
-#define MD19 0x8 /* Mailbox 19 Receive Enable */
-#define MD20 0x10 /* Mailbox 20 Receive Enable */
-#define MD21 0x20 /* Mailbox 21 Receive Enable */
-#define MD22 0x40 /* Mailbox 22 Receive Enable */
-#define MD23 0x80 /* Mailbox 23 Receive Enable */
-#define MD24 0x100 /* Mailbox 24 Receive Enable */
-#define MD25 0x200 /* Mailbox 25 Receive Enable */
-#define MD26 0x400 /* Mailbox 26 Receive Enable */
-#define MD27 0x800 /* Mailbox 27 Receive Enable */
-#define MD28 0x1000 /* Mailbox 28 Receive Enable */
-#define MD29 0x2000 /* Mailbox 29 Receive Enable */
-#define MD30 0x4000 /* Mailbox 30 Receive Enable */
-#define MD31 0x8000 /* Mailbox 31 Receive Enable */
-
-/* Bit masks for CAN0_RMP1 */
-
-#define RMP0 0x1 /* Mailbox 0 Receive Message Pending */
-#define RMP1 0x2 /* Mailbox 1 Receive Message Pending */
-#define RMP2 0x4 /* Mailbox 2 Receive Message Pending */
-#define RMP3 0x8 /* Mailbox 3 Receive Message Pending */
-#define RMP4 0x10 /* Mailbox 4 Receive Message Pending */
-#define RMP5 0x20 /* Mailbox 5 Receive Message Pending */
-#define RMP6 0x40 /* Mailbox 6 Receive Message Pending */
-#define RMP7 0x80 /* Mailbox 7 Receive Message Pending */
-#define RMP8 0x100 /* Mailbox 8 Receive Message Pending */
-#define RMP9 0x200 /* Mailbox 9 Receive Message Pending */
-#define RMP10 0x400 /* Mailbox 10 Receive Message Pending */
-#define RMP11 0x800 /* Mailbox 11 Receive Message Pending */
-#define RMP12 0x1000 /* Mailbox 12 Receive Message Pending */
-#define RMP13 0x2000 /* Mailbox 13 Receive Message Pending */
-#define RMP14 0x4000 /* Mailbox 14 Receive Message Pending */
-#define RMP15 0x8000 /* Mailbox 15 Receive Message Pending */
-
-/* Bit masks for CAN0_RMP2 */
-
-#define RMP16 0x1 /* Mailbox 16 Receive Message Pending */
-#define RMP17 0x2 /* Mailbox 17 Receive Message Pending */
-#define RMP18 0x4 /* Mailbox 18 Receive Message Pending */
-#define RMP19 0x8 /* Mailbox 19 Receive Message Pending */
-#define RMP20 0x10 /* Mailbox 20 Receive Message Pending */
-#define RMP21 0x20 /* Mailbox 21 Receive Message Pending */
-#define RMP22 0x40 /* Mailbox 22 Receive Message Pending */
-#define RMP23 0x80 /* Mailbox 23 Receive Message Pending */
-#define RMP24 0x100 /* Mailbox 24 Receive Message Pending */
-#define RMP25 0x200 /* Mailbox 25 Receive Message Pending */
-#define RMP26 0x400 /* Mailbox 26 Receive Message Pending */
-#define RMP27 0x800 /* Mailbox 27 Receive Message Pending */
-#define RMP28 0x1000 /* Mailbox 28 Receive Message Pending */
-#define RMP29 0x2000 /* Mailbox 29 Receive Message Pending */
-#define RMP30 0x4000 /* Mailbox 30 Receive Message Pending */
-#define RMP31 0x8000 /* Mailbox 31 Receive Message Pending */
-
-/* Bit masks for CAN0_RML1 */
-
-#define RML0 0x1 /* Mailbox 0 Receive Message Lost */
-#define RML1 0x2 /* Mailbox 1 Receive Message Lost */
-#define RML2 0x4 /* Mailbox 2 Receive Message Lost */
-#define RML3 0x8 /* Mailbox 3 Receive Message Lost */
-#define RML4 0x10 /* Mailbox 4 Receive Message Lost */
-#define RML5 0x20 /* Mailbox 5 Receive Message Lost */
-#define RML6 0x40 /* Mailbox 6 Receive Message Lost */
-#define RML7 0x80 /* Mailbox 7 Receive Message Lost */
-#define RML8 0x100 /* Mailbox 8 Receive Message Lost */
-#define RML9 0x200 /* Mailbox 9 Receive Message Lost */
-#define RML10 0x400 /* Mailbox 10 Receive Message Lost */
-#define RML11 0x800 /* Mailbox 11 Receive Message Lost */
-#define RML12 0x1000 /* Mailbox 12 Receive Message Lost */
-#define RML13 0x2000 /* Mailbox 13 Receive Message Lost */
-#define RML14 0x4000 /* Mailbox 14 Receive Message Lost */
-#define RML15 0x8000 /* Mailbox 15 Receive Message Lost */
-
-/* Bit masks for CAN0_RML2 */
-
-#define RML16 0x1 /* Mailbox 16 Receive Message Lost */
-#define RML17 0x2 /* Mailbox 17 Receive Message Lost */
-#define RML18 0x4 /* Mailbox 18 Receive Message Lost */
-#define RML19 0x8 /* Mailbox 19 Receive Message Lost */
-#define RML20 0x10 /* Mailbox 20 Receive Message Lost */
-#define RML21 0x20 /* Mailbox 21 Receive Message Lost */
-#define RML22 0x40 /* Mailbox 22 Receive Message Lost */
-#define RML23 0x80 /* Mailbox 23 Receive Message Lost */
-#define RML24 0x100 /* Mailbox 24 Receive Message Lost */
-#define RML25 0x200 /* Mailbox 25 Receive Message Lost */
-#define RML26 0x400 /* Mailbox 26 Receive Message Lost */
-#define RML27 0x800 /* Mailbox 27 Receive Message Lost */
-#define RML28 0x1000 /* Mailbox 28 Receive Message Lost */
-#define RML29 0x2000 /* Mailbox 29 Receive Message Lost */
-#define RML30 0x4000 /* Mailbox 30 Receive Message Lost */
-#define RML31 0x8000 /* Mailbox 31 Receive Message Lost */
-
-/* Bit masks for CAN0_OPSS1 */
-
-#define OPSS0 0x1 /* Mailbox 0 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS1 0x2 /* Mailbox 1 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS2 0x4 /* Mailbox 2 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS3 0x8 /* Mailbox 3 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS4 0x10 /* Mailbox 4 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS5 0x20 /* Mailbox 5 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS6 0x40 /* Mailbox 6 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS7 0x80 /* Mailbox 7 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS8 0x100 /* Mailbox 8 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS9 0x200 /* Mailbox 9 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS10 0x400 /* Mailbox 10 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS11 0x800 /* Mailbox 11 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS12 0x1000 /* Mailbox 12 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS13 0x2000 /* Mailbox 13 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS14 0x4000 /* Mailbox 14 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS15 0x8000 /* Mailbox 15 Overwrite Protection/Single-Shot Transmission Enable */
-
-/* Bit masks for CAN0_OPSS2 */
-
-#define OPSS16 0x1 /* Mailbox 16 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS17 0x2 /* Mailbox 17 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS18 0x4 /* Mailbox 18 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS19 0x8 /* Mailbox 19 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS20 0x10 /* Mailbox 20 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS21 0x20 /* Mailbox 21 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS22 0x40 /* Mailbox 22 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS23 0x80 /* Mailbox 23 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS24 0x100 /* Mailbox 24 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS25 0x200 /* Mailbox 25 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS26 0x400 /* Mailbox 26 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS27 0x800 /* Mailbox 27 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS28 0x1000 /* Mailbox 28 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS29 0x2000 /* Mailbox 29 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS30 0x4000 /* Mailbox 30 Overwrite Protection/Single-Shot Transmission Enable */
-#define OPSS31 0x8000 /* Mailbox 31 Overwrite Protection/Single-Shot Transmission Enable */
-
-/* Bit masks for CAN0_TRS1 */
-
-#define TRS0 0x1 /* Mailbox 0 Transmit Request Set */
-#define TRS1 0x2 /* Mailbox 1 Transmit Request Set */
-#define TRS2 0x4 /* Mailbox 2 Transmit Request Set */
-#define TRS3 0x8 /* Mailbox 3 Transmit Request Set */
-#define TRS4 0x10 /* Mailbox 4 Transmit Request Set */
-#define TRS5 0x20 /* Mailbox 5 Transmit Request Set */
-#define TRS6 0x40 /* Mailbox 6 Transmit Request Set */
-#define TRS7 0x80 /* Mailbox 7 Transmit Request Set */
-#define TRS8 0x100 /* Mailbox 8 Transmit Request Set */
-#define TRS9 0x200 /* Mailbox 9 Transmit Request Set */
-#define TRS10 0x400 /* Mailbox 10 Transmit Request Set */
-#define TRS11 0x800 /* Mailbox 11 Transmit Request Set */
-#define TRS12 0x1000 /* Mailbox 12 Transmit Request Set */
-#define TRS13 0x2000 /* Mailbox 13 Transmit Request Set */
-#define TRS14 0x4000 /* Mailbox 14 Transmit Request Set */
-#define TRS15 0x8000 /* Mailbox 15 Transmit Request Set */
-
-/* Bit masks for CAN0_TRS2 */
-
-#define TRS16 0x1 /* Mailbox 16 Transmit Request Set */
-#define TRS17 0x2 /* Mailbox 17 Transmit Request Set */
-#define TRS18 0x4 /* Mailbox 18 Transmit Request Set */
-#define TRS19 0x8 /* Mailbox 19 Transmit Request Set */
-#define TRS20 0x10 /* Mailbox 20 Transmit Request Set */
-#define TRS21 0x20 /* Mailbox 21 Transmit Request Set */
-#define TRS22 0x40 /* Mailbox 22 Transmit Request Set */
-#define TRS23 0x80 /* Mailbox 23 Transmit Request Set */
-#define TRS24 0x100 /* Mailbox 24 Transmit Request Set */
-#define TRS25 0x200 /* Mailbox 25 Transmit Request Set */
-#define TRS26 0x400 /* Mailbox 26 Transmit Request Set */
-#define TRS27 0x800 /* Mailbox 27 Transmit Request Set */
-#define TRS28 0x1000 /* Mailbox 28 Transmit Request Set */
-#define TRS29 0x2000 /* Mailbox 29 Transmit Request Set */
-#define TRS30 0x4000 /* Mailbox 30 Transmit Request Set */
-#define TRS31 0x8000 /* Mailbox 31 Transmit Request Set */
-
-/* Bit masks for CAN0_TRR1 */
-
-#define TRR0 0x1 /* Mailbox 0 Transmit Request Reset */
-#define TRR1 0x2 /* Mailbox 1 Transmit Request Reset */
-#define TRR2 0x4 /* Mailbox 2 Transmit Request Reset */
-#define TRR3 0x8 /* Mailbox 3 Transmit Request Reset */
-#define TRR4 0x10 /* Mailbox 4 Transmit Request Reset */
-#define TRR5 0x20 /* Mailbox 5 Transmit Request Reset */
-#define TRR6 0x40 /* Mailbox 6 Transmit Request Reset */
-#define TRR7 0x80 /* Mailbox 7 Transmit Request Reset */
-#define TRR8 0x100 /* Mailbox 8 Transmit Request Reset */
-#define TRR9 0x200 /* Mailbox 9 Transmit Request Reset */
-#define TRR10 0x400 /* Mailbox 10 Transmit Request Reset */
-#define TRR11 0x800 /* Mailbox 11 Transmit Request Reset */
-#define TRR12 0x1000 /* Mailbox 12 Transmit Request Reset */
-#define TRR13 0x2000 /* Mailbox 13 Transmit Request Reset */
-#define TRR14 0x4000 /* Mailbox 14 Transmit Request Reset */
-#define TRR15 0x8000 /* Mailbox 15 Transmit Request Reset */
-
-/* Bit masks for CAN0_TRR2 */
-
-#define TRR16 0x1 /* Mailbox 16 Transmit Request Reset */
-#define TRR17 0x2 /* Mailbox 17 Transmit Request Reset */
-#define TRR18 0x4 /* Mailbox 18 Transmit Request Reset */
-#define TRR19 0x8 /* Mailbox 19 Transmit Request Reset */
-#define TRR20 0x10 /* Mailbox 20 Transmit Request Reset */
-#define TRR21 0x20 /* Mailbox 21 Transmit Request Reset */
-#define TRR22 0x40 /* Mailbox 22 Transmit Request Reset */
-#define TRR23 0x80 /* Mailbox 23 Transmit Request Reset */
-#define TRR24 0x100 /* Mailbox 24 Transmit Request Reset */
-#define TRR25 0x200 /* Mailbox 25 Transmit Request Reset */
-#define TRR26 0x400 /* Mailbox 26 Transmit Request Reset */
-#define TRR27 0x800 /* Mailbox 27 Transmit Request Reset */
-#define TRR28 0x1000 /* Mailbox 28 Transmit Request Reset */
-#define TRR29 0x2000 /* Mailbox 29 Transmit Request Reset */
-#define TRR30 0x4000 /* Mailbox 30 Transmit Request Reset */
-#define TRR31 0x8000 /* Mailbox 31 Transmit Request Reset */
-
-/* Bit masks for CAN0_AA1 */
-
-#define AA0 0x1 /* Mailbox 0 Abort Acknowledge */
-#define AA1 0x2 /* Mailbox 1 Abort Acknowledge */
-#define AA2 0x4 /* Mailbox 2 Abort Acknowledge */
-#define AA3 0x8 /* Mailbox 3 Abort Acknowledge */
-#define AA4 0x10 /* Mailbox 4 Abort Acknowledge */
-#define AA5 0x20 /* Mailbox 5 Abort Acknowledge */
-#define AA6 0x40 /* Mailbox 6 Abort Acknowledge */
-#define AA7 0x80 /* Mailbox 7 Abort Acknowledge */
-#define AA8 0x100 /* Mailbox 8 Abort Acknowledge */
-#define AA9 0x200 /* Mailbox 9 Abort Acknowledge */
-#define AA10 0x400 /* Mailbox 10 Abort Acknowledge */
-#define AA11 0x800 /* Mailbox 11 Abort Acknowledge */
-#define AA12 0x1000 /* Mailbox 12 Abort Acknowledge */
-#define AA13 0x2000 /* Mailbox 13 Abort Acknowledge */
-#define AA14 0x4000 /* Mailbox 14 Abort Acknowledge */
-#define AA15 0x8000 /* Mailbox 15 Abort Acknowledge */
-
-/* Bit masks for CAN0_AA2 */
-
-#define AA16 0x1 /* Mailbox 16 Abort Acknowledge */
-#define AA17 0x2 /* Mailbox 17 Abort Acknowledge */
-#define AA18 0x4 /* Mailbox 18 Abort Acknowledge */
-#define AA19 0x8 /* Mailbox 19 Abort Acknowledge */
-#define AA20 0x10 /* Mailbox 20 Abort Acknowledge */
-#define AA21 0x20 /* Mailbox 21 Abort Acknowledge */
-#define AA22 0x40 /* Mailbox 22 Abort Acknowledge */
-#define AA23 0x80 /* Mailbox 23 Abort Acknowledge */
-#define AA24 0x100 /* Mailbox 24 Abort Acknowledge */
-#define AA25 0x200 /* Mailbox 25 Abort Acknowledge */
-#define AA26 0x400 /* Mailbox 26 Abort Acknowledge */
-#define AA27 0x800 /* Mailbox 27 Abort Acknowledge */
-#define AA28 0x1000 /* Mailbox 28 Abort Acknowledge */
-#define AA29 0x2000 /* Mailbox 29 Abort Acknowledge */
-#define AA30 0x4000 /* Mailbox 30 Abort Acknowledge */
-#define AA31 0x8000 /* Mailbox 31 Abort Acknowledge */
-
-/* Bit masks for CAN0_TA1 */
-
-#define TA0 0x1 /* Mailbox 0 Transmit Acknowledge */
-#define TA1 0x2 /* Mailbox 1 Transmit Acknowledge */
-#define TA2 0x4 /* Mailbox 2 Transmit Acknowledge */
-#define TA3 0x8 /* Mailbox 3 Transmit Acknowledge */
-#define TA4 0x10 /* Mailbox 4 Transmit Acknowledge */
-#define TA5 0x20 /* Mailbox 5 Transmit Acknowledge */
-#define TA6 0x40 /* Mailbox 6 Transmit Acknowledge */
-#define TA7 0x80 /* Mailbox 7 Transmit Acknowledge */
-#define TA8 0x100 /* Mailbox 8 Transmit Acknowledge */
-#define TA9 0x200 /* Mailbox 9 Transmit Acknowledge */
-#define TA10 0x400 /* Mailbox 10 Transmit Acknowledge */
-#define TA11 0x800 /* Mailbox 11 Transmit Acknowledge */
-#define TA12 0x1000 /* Mailbox 12 Transmit Acknowledge */
-#define TA13 0x2000 /* Mailbox 13 Transmit Acknowledge */
-#define TA14 0x4000 /* Mailbox 14 Transmit Acknowledge */
-#define TA15 0x8000 /* Mailbox 15 Transmit Acknowledge */
-
-/* Bit masks for CAN0_TA2 */
-
-#define TA16 0x1 /* Mailbox 16 Transmit Acknowledge */
-#define TA17 0x2 /* Mailbox 17 Transmit Acknowledge */
-#define TA18 0x4 /* Mailbox 18 Transmit Acknowledge */
-#define TA19 0x8 /* Mailbox 19 Transmit Acknowledge */
-#define TA20 0x10 /* Mailbox 20 Transmit Acknowledge */
-#define TA21 0x20 /* Mailbox 21 Transmit Acknowledge */
-#define TA22 0x40 /* Mailbox 22 Transmit Acknowledge */
-#define TA23 0x80 /* Mailbox 23 Transmit Acknowledge */
-#define TA24 0x100 /* Mailbox 24 Transmit Acknowledge */
-#define TA25 0x200 /* Mailbox 25 Transmit Acknowledge */
-#define TA26 0x400 /* Mailbox 26 Transmit Acknowledge */
-#define TA27 0x800 /* Mailbox 27 Transmit Acknowledge */
-#define TA28 0x1000 /* Mailbox 28 Transmit Acknowledge */
-#define TA29 0x2000 /* Mailbox 29 Transmit Acknowledge */
-#define TA30 0x4000 /* Mailbox 30 Transmit Acknowledge */
-#define TA31 0x8000 /* Mailbox 31 Transmit Acknowledge */
-
-/* Bit masks for CAN0_RFH1 */
-
-#define RFH0 0x1 /* Mailbox 0 Remote Frame Handling Enable */
-#define RFH1 0x2 /* Mailbox 1 Remote Frame Handling Enable */
-#define RFH2 0x4 /* Mailbox 2 Remote Frame Handling Enable */
-#define RFH3 0x8 /* Mailbox 3 Remote Frame Handling Enable */
-#define RFH4 0x10 /* Mailbox 4 Remote Frame Handling Enable */
-#define RFH5 0x20 /* Mailbox 5 Remote Frame Handling Enable */
-#define RFH6 0x40 /* Mailbox 6 Remote Frame Handling Enable */
-#define RFH7 0x80 /* Mailbox 7 Remote Frame Handling Enable */
-#define RFH8 0x100 /* Mailbox 8 Remote Frame Handling Enable */
-#define RFH9 0x200 /* Mailbox 9 Remote Frame Handling Enable */
-#define RFH10 0x400 /* Mailbox 10 Remote Frame Handling Enable */
-#define RFH11 0x800 /* Mailbox 11 Remote Frame Handling Enable */
-#define RFH12 0x1000 /* Mailbox 12 Remote Frame Handling Enable */
-#define RFH13 0x2000 /* Mailbox 13 Remote Frame Handling Enable */
-#define RFH14 0x4000 /* Mailbox 14 Remote Frame Handling Enable */
-#define RFH15 0x8000 /* Mailbox 15 Remote Frame Handling Enable */
-
-/* Bit masks for CAN0_RFH2 */
-
-#define RFH16 0x1 /* Mailbox 16 Remote Frame Handling Enable */
-#define RFH17 0x2 /* Mailbox 17 Remote Frame Handling Enable */
-#define RFH18 0x4 /* Mailbox 18 Remote Frame Handling Enable */
-#define RFH19 0x8 /* Mailbox 19 Remote Frame Handling Enable */
-#define RFH20 0x10 /* Mailbox 20 Remote Frame Handling Enable */
-#define RFH21 0x20 /* Mailbox 21 Remote Frame Handling Enable */
-#define RFH22 0x40 /* Mailbox 22 Remote Frame Handling Enable */
-#define RFH23 0x80 /* Mailbox 23 Remote Frame Handling Enable */
-#define RFH24 0x100 /* Mailbox 24 Remote Frame Handling Enable */
-#define RFH25 0x200 /* Mailbox 25 Remote Frame Handling Enable */
-#define RFH26 0x400 /* Mailbox 26 Remote Frame Handling Enable */
-#define RFH27 0x800 /* Mailbox 27 Remote Frame Handling Enable */
-#define RFH28 0x1000 /* Mailbox 28 Remote Frame Handling Enable */
-#define RFH29 0x2000 /* Mailbox 29 Remote Frame Handling Enable */
-#define RFH30 0x4000 /* Mailbox 30 Remote Frame Handling Enable */
-#define RFH31 0x8000 /* Mailbox 31 Remote Frame Handling Enable */
-
-/* Bit masks for CAN0_MBIM1 */
-
-#define MBIM0 0x1 /* Mailbox 0 Mailbox Interrupt Mask */
-#define MBIM1 0x2 /* Mailbox 1 Mailbox Interrupt Mask */
-#define MBIM2 0x4 /* Mailbox 2 Mailbox Interrupt Mask */
-#define MBIM3 0x8 /* Mailbox 3 Mailbox Interrupt Mask */
-#define MBIM4 0x10 /* Mailbox 4 Mailbox Interrupt Mask */
-#define MBIM5 0x20 /* Mailbox 5 Mailbox Interrupt Mask */
-#define MBIM6 0x40 /* Mailbox 6 Mailbox Interrupt Mask */
-#define MBIM7 0x80 /* Mailbox 7 Mailbox Interrupt Mask */
-#define MBIM8 0x100 /* Mailbox 8 Mailbox Interrupt Mask */
-#define MBIM9 0x200 /* Mailbox 9 Mailbox Interrupt Mask */
-#define MBIM10 0x400 /* Mailbox 10 Mailbox Interrupt Mask */
-#define MBIM11 0x800 /* Mailbox 11 Mailbox Interrupt Mask */
-#define MBIM12 0x1000 /* Mailbox 12 Mailbox Interrupt Mask */
-#define MBIM13 0x2000 /* Mailbox 13 Mailbox Interrupt Mask */
-#define MBIM14 0x4000 /* Mailbox 14 Mailbox Interrupt Mask */
-#define MBIM15 0x8000 /* Mailbox 15 Mailbox Interrupt Mask */
-
-/* Bit masks for CAN0_MBIM2 */
-
-#define MBIM16 0x1 /* Mailbox 16 Mailbox Interrupt Mask */
-#define MBIM17 0x2 /* Mailbox 17 Mailbox Interrupt Mask */
-#define MBIM18 0x4 /* Mailbox 18 Mailbox Interrupt Mask */
-#define MBIM19 0x8 /* Mailbox 19 Mailbox Interrupt Mask */
-#define MBIM20 0x10 /* Mailbox 20 Mailbox Interrupt Mask */
-#define MBIM21 0x20 /* Mailbox 21 Mailbox Interrupt Mask */
-#define MBIM22 0x40 /* Mailbox 22 Mailbox Interrupt Mask */
-#define MBIM23 0x80 /* Mailbox 23 Mailbox Interrupt Mask */
-#define MBIM24 0x100 /* Mailbox 24 Mailbox Interrupt Mask */
-#define MBIM25 0x200 /* Mailbox 25 Mailbox Interrupt Mask */
-#define MBIM26 0x400 /* Mailbox 26 Mailbox Interrupt Mask */
-#define MBIM27 0x800 /* Mailbox 27 Mailbox Interrupt Mask */
-#define MBIM28 0x1000 /* Mailbox 28 Mailbox Interrupt Mask */
-#define MBIM29 0x2000 /* Mailbox 29 Mailbox Interrupt Mask */
-#define MBIM30 0x4000 /* Mailbox 30 Mailbox Interrupt Mask */
-#define MBIM31 0x8000 /* Mailbox 31 Mailbox Interrupt Mask */
-
-/* Bit masks for CAN0_MBTIF1 */
-
-#define MBTIF0 0x1 /* Mailbox 0 Mailbox Transmit Interrupt Flag */
-#define MBTIF1 0x2 /* Mailbox 1 Mailbox Transmit Interrupt Flag */
-#define MBTIF2 0x4 /* Mailbox 2 Mailbox Transmit Interrupt Flag */
-#define MBTIF3 0x8 /* Mailbox 3 Mailbox Transmit Interrupt Flag */
-#define MBTIF4 0x10 /* Mailbox 4 Mailbox Transmit Interrupt Flag */
-#define MBTIF5 0x20 /* Mailbox 5 Mailbox Transmit Interrupt Flag */
-#define MBTIF6 0x40 /* Mailbox 6 Mailbox Transmit Interrupt Flag */
-#define MBTIF7 0x80 /* Mailbox 7 Mailbox Transmit Interrupt Flag */
-#define MBTIF8 0x100 /* Mailbox 8 Mailbox Transmit Interrupt Flag */
-#define MBTIF9 0x200 /* Mailbox 9 Mailbox Transmit Interrupt Flag */
-#define MBTIF10 0x400 /* Mailbox 10 Mailbox Transmit Interrupt Flag */
-#define MBTIF11 0x800 /* Mailbox 11 Mailbox Transmit Interrupt Flag */
-#define MBTIF12 0x1000 /* Mailbox 12 Mailbox Transmit Interrupt Flag */
-#define MBTIF13 0x2000 /* Mailbox 13 Mailbox Transmit Interrupt Flag */
-#define MBTIF14 0x4000 /* Mailbox 14 Mailbox Transmit Interrupt Flag */
-#define MBTIF15 0x8000 /* Mailbox 15 Mailbox Transmit Interrupt Flag */
-
-/* Bit masks for CAN0_MBTIF2 */
-
-#define MBTIF16 0x1 /* Mailbox 16 Mailbox Transmit Interrupt Flag */
-#define MBTIF17 0x2 /* Mailbox 17 Mailbox Transmit Interrupt Flag */
-#define MBTIF18 0x4 /* Mailbox 18 Mailbox Transmit Interrupt Flag */
-#define MBTIF19 0x8 /* Mailbox 19 Mailbox Transmit Interrupt Flag */
-#define MBTIF20 0x10 /* Mailbox 20 Mailbox Transmit Interrupt Flag */
-#define MBTIF21 0x20 /* Mailbox 21 Mailbox Transmit Interrupt Flag */
-#define MBTIF22 0x40 /* Mailbox 22 Mailbox Transmit Interrupt Flag */
-#define MBTIF23 0x80 /* Mailbox 23 Mailbox Transmit Interrupt Flag */
-#define MBTIF24 0x100 /* Mailbox 24 Mailbox Transmit Interrupt Flag */
-#define MBTIF25 0x200 /* Mailbox 25 Mailbox Transmit Interrupt Flag */
-#define MBTIF26 0x400 /* Mailbox 26 Mailbox Transmit Interrupt Flag */
-#define MBTIF27 0x800 /* Mailbox 27 Mailbox Transmit Interrupt Flag */
-#define MBTIF28 0x1000 /* Mailbox 28 Mailbox Transmit Interrupt Flag */
-#define MBTIF29 0x2000 /* Mailbox 29 Mailbox Transmit Interrupt Flag */
-#define MBTIF30 0x4000 /* Mailbox 30 Mailbox Transmit Interrupt Flag */
-#define MBTIF31 0x8000 /* Mailbox 31 Mailbox Transmit Interrupt Flag */
-
-/* Bit masks for CAN0_MBRIF1 */
-
-#define MBRIF0 0x1 /* Mailbox 0 Mailbox Receive Interrupt Flag */
-#define MBRIF1 0x2 /* Mailbox 1 Mailbox Receive Interrupt Flag */
-#define MBRIF2 0x4 /* Mailbox 2 Mailbox Receive Interrupt Flag */
-#define MBRIF3 0x8 /* Mailbox 3 Mailbox Receive Interrupt Flag */
-#define MBRIF4 0x10 /* Mailbox 4 Mailbox Receive Interrupt Flag */
-#define MBRIF5 0x20 /* Mailbox 5 Mailbox Receive Interrupt Flag */
-#define MBRIF6 0x40 /* Mailbox 6 Mailbox Receive Interrupt Flag */
-#define MBRIF7 0x80 /* Mailbox 7 Mailbox Receive Interrupt Flag */
-#define MBRIF8 0x100 /* Mailbox 8 Mailbox Receive Interrupt Flag */
-#define MBRIF9 0x200 /* Mailbox 9 Mailbox Receive Interrupt Flag */
-#define MBRIF10 0x400 /* Mailbox 10 Mailbox Receive Interrupt Flag */
-#define MBRIF11 0x800 /* Mailbox 11 Mailbox Receive Interrupt Flag */
-#define MBRIF12 0x1000 /* Mailbox 12 Mailbox Receive Interrupt Flag */
-#define MBRIF13 0x2000 /* Mailbox 13 Mailbox Receive Interrupt Flag */
-#define MBRIF14 0x4000 /* Mailbox 14 Mailbox Receive Interrupt Flag */
-#define MBRIF15 0x8000 /* Mailbox 15 Mailbox Receive Interrupt Flag */
-
-/* Bit masks for CAN0_MBRIF2 */
-
-#define MBRIF16 0x1 /* Mailbox 16 Mailbox Receive Interrupt Flag */
-#define MBRIF17 0x2 /* Mailbox 17 Mailbox Receive Interrupt Flag */
-#define MBRIF18 0x4 /* Mailbox 18 Mailbox Receive Interrupt Flag */
-#define MBRIF19 0x8 /* Mailbox 19 Mailbox Receive Interrupt Flag */
-#define MBRIF20 0x10 /* Mailbox 20 Mailbox Receive Interrupt Flag */
-#define MBRIF21 0x20 /* Mailbox 21 Mailbox Receive Interrupt Flag */
-#define MBRIF22 0x40 /* Mailbox 22 Mailbox Receive Interrupt Flag */
-#define MBRIF23 0x80 /* Mailbox 23 Mailbox Receive Interrupt Flag */
-#define MBRIF24 0x100 /* Mailbox 24 Mailbox Receive Interrupt Flag */
-#define MBRIF25 0x200 /* Mailbox 25 Mailbox Receive Interrupt Flag */
-#define MBRIF26 0x400 /* Mailbox 26 Mailbox Receive Interrupt Flag */
-#define MBRIF27 0x800 /* Mailbox 27 Mailbox Receive Interrupt Flag */
-#define MBRIF28 0x1000 /* Mailbox 28 Mailbox Receive Interrupt Flag */
-#define MBRIF29 0x2000 /* Mailbox 29 Mailbox Receive Interrupt Flag */
-#define MBRIF30 0x4000 /* Mailbox 30 Mailbox Receive Interrupt Flag */
-#define MBRIF31 0x8000 /* Mailbox 31 Mailbox Receive Interrupt Flag */
-
/* Bit masks for EPPIx_STATUS */
#define CFIFO_ERR 0x1 /* Chroma FIFO Error */
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 5163e2c383c5..bfcfa86db2b5 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/jiffies.h>
#include <linux/i2c-pca-platform.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -112,7 +113,7 @@ static struct resource bfin_i2c_pca_resources[] = {
struct i2c_pca9564_pf_platform_data pca9564_platform_data = {
.gpio = -1,
.i2c_clock_speed = 330000,
- .timeout = 10000
+ .timeout = HZ,
};
/* PCA9564 I2C Bus driver */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 7ad8878bfa18..1c8c4c7245c3 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -92,26 +92,29 @@ static void __init search_IAR(void)
{
unsigned ivg, irq_pos = 0;
for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
- int irqn;
+ int irqN;
ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
- for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
- int iar_shift = (irqn & 7) * 4;
- if (ivg == (0xf &
-#if defined(CONFIG_BF52x) || defined(CONFIG_BF538) \
- || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
- bfin_read32((unsigned long *)SIC_IAR0 +
- ((irqn % 32) >> 3) + ((irqn / 32) *
- ((SIC_IAR4 - SIC_IAR0) / 4))) >> iar_shift)) {
+ for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
+ int irqn;
+ u32 iar = bfin_read32((unsigned long *)SIC_IAR0 +
+#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
+ defined(CONFIG_BF538) || defined(CONFIG_BF539)
+ ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
#else
- bfin_read32((unsigned long *)SIC_IAR0 +
- (irqn >> 3)) >> iar_shift)) {
+ (irqN >> 3)
#endif
- ivg_table[irq_pos].irqno = IVG7 + irqn;
- ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
- ivg7_13[ivg].istop++;
- irq_pos++;
+ );
+
+ for (irqn = irqN; irqn < irqN + 4; ++irqn) {
+ int iar_shift = (irqn & 7) * 4;
+ if (ivg == (0xf & (iar >> iar_shift))) {
+ ivg_table[irq_pos].irqno = IVG7 + irqn;
+ ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
+ ivg7_13[ivg].istop++;
+ irq_pos++;
+ }
}
}
}
@@ -662,14 +665,7 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
#ifdef CONFIG_PM
int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
{
- unsigned gpio = irq_to_gpio(irq);
-
- if (state)
- gpio_pm_wakeup_request(gpio, PM_WAKE_IGNORE);
- else
- gpio_pm_wakeup_free(gpio);
-
- return 0;
+ return gpio_pm_wakeup_ctrl(irq_to_gpio(irq), state);
}
#endif
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index c1f1ccc846f0..ea7f95f6bb4c 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -20,35 +20,11 @@
#include <asm/dma.h>
#include <asm/dpmc.h>
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_H
-#define WAKEUP_TYPE PM_WAKE_HIGH
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_L
-#define WAKEUP_TYPE PM_WAKE_LOW
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_F
-#define WAKEUP_TYPE PM_WAKE_FALLING
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_R
-#define WAKEUP_TYPE PM_WAKE_RISING
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_B
-#define WAKEUP_TYPE PM_WAKE_BOTH_EDGES
-#endif
-
void bfin_pm_suspend_standby_enter(void)
{
unsigned long flags;
-#ifdef CONFIG_PM_WAKEUP_BY_GPIO
- gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
-#endif
-
local_irq_save_hw(flags);
bfin_pm_standby_setup();
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 7cecbaf0358a..a17107a700d5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -170,8 +170,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
kfree(msg);
break;
default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message \
- 0x%lx\n", cpu, msg->type);
+ printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
+ cpu, msg->type);
kfree(msg);
break;
}
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 355b87aa6b93..bb4e8fff4b55 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -15,23 +15,11 @@
#include "blackfin_sram.h"
/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
+ * ZERO_PAGE is a special page that is used for zero-initialized data and COW.
+ * Let the bss do its zero-init magic so we don't have to do it ourselves.
*/
-static unsigned long empty_bad_page_table;
-
-static unsigned long empty_bad_page;
-
-static unsigned long empty_zero_page;
+char empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_EXCEPTION_L1_SCRATCH
#if defined CONFIG_SYSCALL_TAB_L1
@@ -52,40 +40,26 @@ EXPORT_SYMBOL(cpu_pda);
void __init paging_init(void)
{
/*
- * make sure start_mem is page aligned, otherwise bootmem and
- * page_alloc get different views og the world
+ * make sure start_mem is page aligned, otherwise bootmem and
+ * page_alloc get different views of the world
*/
unsigned long end_mem = memory_end & PAGE_MASK;
- pr_debug("start_mem is %#lx virtual_end is %#lx\n", PAGE_ALIGN(memory_start), end_mem);
-
- /*
- * initialize the bad page table and bad page to point
- * to a couple of allocated pages
- */
- empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
+ unsigned long zones_size[MAX_NR_ZONES] = {
+ [0] = 0,
+ [ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT,
+ [ZONE_NORMAL] = 0,
+#ifdef CONFIG_HIGHMEM
+ [ZONE_HIGHMEM] = 0,
+#endif
+ };
- /*
- * Set up SFC/DFC registers (user data space)
- */
+ /* Set up SFC/DFC registers (user data space) */
set_fs(KERNEL_DS);
- pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n",
+ pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n",
PAGE_ALIGN(memory_start), end_mem);
-
- {
- unsigned long zones_size[MAX_NR_ZONES] = { 0, };
-
- zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] = 0;
-#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = 0;
-#endif
- free_area_init(zones_size);
- }
+ free_area_init(zones_size);
}
asmlinkage void __init init_pda(void)
diff --git a/arch/blackfin/mm/isram-driver.c b/arch/blackfin/mm/isram-driver.c
index 39b058564f62..7e2e674ed444 100644
--- a/arch/blackfin/mm/isram-driver.c
+++ b/arch/blackfin/mm/isram-driver.c
@@ -43,13 +43,12 @@ static DEFINE_SPINLOCK(dtest_lock);
/* Takes a void pointer */
#define IADDR2DTEST(x) \
({ unsigned long __addr = (unsigned long)(x); \
- (__addr & 0x47F8) | /* address bits 14 & 10:3 */ \
- (__addr & 0x8000) << 23 | /* Bank A/B */ \
- (__addr & 0x0800) << 15 | /* address bit 11 */ \
- (__addr & 0x3000) << 4 | /* address bits 13:12 */ \
- (__addr & 0x8000) << 8 | /* address bit 15 */ \
- (0x1000000) | /* instruction access = 1 */ \
- (0x4); /* data array = 1 */ \
+ ((__addr & (1 << 11)) << (26 - 11)) | /* addr bit 11 (Way0/Way1) */ \
+ (1 << 24) | /* instruction access = 1 */ \
+ ((__addr & (1 << 15)) << (23 - 15)) | /* addr bit 15 (Data Bank) */ \
+ ((__addr & (3 << 12)) << (16 - 12)) | /* addr bits 13:12 (Subbank) */ \
+ (__addr & 0x47F8) | /* addr bits 14 & 10:3 */ \
+ (1 << 2); /* data array = 1 */ \
})
/* Takes a pointer, and returns the offset (in bits) which things should be shifted */
@@ -196,7 +195,7 @@ EXPORT_SYMBOL(isram_memcpy);
#ifdef CONFIG_BFIN_ISRAM_SELF_TEST
-#define TEST_LEN 0x100
+static int test_len = 0x20000;
static __init void hex_dump(unsigned char *buf, int len)
{
@@ -212,15 +211,15 @@ static __init int isram_read_test(char *sdram, void *l1inst)
pr_info("INFO: running isram_read tests\n");
/* setup some different data to play with */
- for (i = 0; i < TEST_LEN; ++i)
- sdram[i] = i;
- dma_memcpy(l1inst, sdram, TEST_LEN);
+ for (i = 0; i < test_len; ++i)
+ sdram[i] = i % 255;
+ dma_memcpy(l1inst, sdram, test_len);
/* make sure we can read the L1 inst */
- for (i = 0; i < TEST_LEN; i += sizeof(uint64_t)) {
+ for (i = 0; i < test_len; i += sizeof(uint64_t)) {
data1 = isram_read(l1inst + i);
memcpy(&data2, sdram + i, sizeof(data2));
- if (memcmp(&data1, &data2, sizeof(uint64_t))) {
+ if (data1 != data2) {
pr_err("FAIL: isram_read(%p) returned %#llx but wanted %#llx\n",
l1inst + i, data1, data2);
++ret;
@@ -238,25 +237,25 @@ static __init int isram_write_test(char *sdram, void *l1inst)
pr_info("INFO: running isram_write tests\n");
/* setup some different data to play with */
- memset(sdram, 0, TEST_LEN * 2);
- dma_memcpy(l1inst, sdram, TEST_LEN);
- for (i = 0; i < TEST_LEN; ++i)
- sdram[i] = i;
+ memset(sdram, 0, test_len * 2);
+ dma_memcpy(l1inst, sdram, test_len);
+ for (i = 0; i < test_len; ++i)
+ sdram[i] = i % 255;
/* make sure we can write the L1 inst */
- for (i = 0; i < TEST_LEN; i += sizeof(uint64_t)) {
+ for (i = 0; i < test_len; i += sizeof(uint64_t)) {
memcpy(&data1, sdram + i, sizeof(data1));
isram_write(l1inst + i, data1);
data2 = isram_read(l1inst + i);
- if (memcmp(&data1, &data2, sizeof(uint64_t))) {
+ if (data1 != data2) {
pr_err("FAIL: isram_write(%p, %#llx) != %#llx\n",
l1inst + i, data1, data2);
++ret;
}
}
- dma_memcpy(sdram + TEST_LEN, l1inst, TEST_LEN);
- if (memcmp(sdram, sdram + TEST_LEN, TEST_LEN)) {
+ dma_memcpy(sdram + test_len, l1inst, test_len);
+ if (memcmp(sdram, sdram + test_len, test_len)) {
pr_err("FAIL: isram_write() did not work properly\n");
++ret;
}
@@ -268,12 +267,12 @@ static __init int
_isram_memcpy_test(char pattern, void *sdram, void *l1inst, const char *smemcpy,
void *(*fmemcpy)(void *, const void *, size_t))
{
- memset(sdram, pattern, TEST_LEN);
- fmemcpy(l1inst, sdram, TEST_LEN);
- fmemcpy(sdram + TEST_LEN, l1inst, TEST_LEN);
- if (memcmp(sdram, sdram + TEST_LEN, TEST_LEN)) {
+ memset(sdram, pattern, test_len);
+ fmemcpy(l1inst, sdram, test_len);
+ fmemcpy(sdram + test_len, l1inst, test_len);
+ if (memcmp(sdram, sdram + test_len, test_len)) {
pr_err("FAIL: %s(%p <=> %p, %#x) failed (data is %#x)\n",
- smemcpy, l1inst, sdram, TEST_LEN, pattern);
+ smemcpy, l1inst, sdram, test_len, pattern);
return 1;
}
return 0;
@@ -292,12 +291,13 @@ static __init int isram_memcpy_test(char *sdram, void *l1inst)
/* check read of small, unaligned, and hardware 64bit limits */
pr_info("INFO: running isram_memcpy (read) tests\n");
- for (i = 0; i < TEST_LEN; ++i)
- sdram[i] = i;
- dma_memcpy(l1inst, sdram, TEST_LEN);
+ /* setup some different data to play with */
+ for (i = 0; i < test_len; ++i)
+ sdram[i] = i % 255;
+ dma_memcpy(l1inst, sdram, test_len);
thisret = 0;
- for (i = 0; i < TEST_LEN - 32; ++i) {
+ for (i = 0; i < test_len - 32; ++i) {
unsigned char cmp[32];
for (j = 1; j <= 32; ++j) {
memset(cmp, 0, sizeof(cmp));
@@ -310,7 +310,7 @@ static __init int isram_memcpy_test(char *sdram, void *l1inst)
pr_cont("\n");
if (++thisret > 20) {
pr_err("FAIL: skipping remaining series\n");
- i = TEST_LEN;
+ i = test_len;
break;
}
}
@@ -321,11 +321,11 @@ static __init int isram_memcpy_test(char *sdram, void *l1inst)
/* check write of small, unaligned, and hardware 64bit limits */
pr_info("INFO: running isram_memcpy (write) tests\n");
- memset(sdram + TEST_LEN, 0, TEST_LEN);
- dma_memcpy(l1inst, sdram + TEST_LEN, TEST_LEN);
+ memset(sdram + test_len, 0, test_len);
+ dma_memcpy(l1inst, sdram + test_len, test_len);
thisret = 0;
- for (i = 0; i < TEST_LEN - 32; ++i) {
+ for (i = 0; i < test_len - 32; ++i) {
unsigned char cmp[32];
for (j = 1; j <= 32; ++j) {
isram_memcpy(l1inst + i, sdram + i, j);
@@ -338,7 +338,7 @@ static __init int isram_memcpy_test(char *sdram, void *l1inst)
pr_cont("\n");
if (++thisret > 20) {
pr_err("FAIL: skipping remaining series\n");
- i = TEST_LEN;
+ i = test_len;
break;
}
}
@@ -355,22 +355,30 @@ static __init int isram_test_init(void)
char *sdram;
void *l1inst;
- sdram = kmalloc(TEST_LEN * 2, GFP_KERNEL);
- if (!sdram) {
- pr_warning("SKIP: could not allocate sdram\n");
- return 0;
+ /* Try to test as much of L1SRAM as possible */
+ while (test_len) {
+ test_len >>= 1;
+ l1inst = l1_inst_sram_alloc(test_len);
+ if (l1inst)
+ break;
}
-
- l1inst = l1_inst_sram_alloc(TEST_LEN);
if (!l1inst) {
- kfree(sdram);
pr_warning("SKIP: could not allocate L1 inst\n");
return 0;
}
+ pr_info("INFO: testing %#x bytes (%p - %p)\n",
+ test_len, l1inst, l1inst + test_len);
+
+ sdram = kmalloc(test_len * 2, GFP_KERNEL);
+ if (!sdram) {
+ sram_free(l1inst);
+ pr_warning("SKIP: could not allocate sdram\n");
+ return 0;
+ }
/* sanity check initial L1 inst state */
ret = 1;
- pr_info("INFO: running initial dma_memcpy checks\n");
+ pr_info("INFO: running initial dma_memcpy checks %p\n", sdram);
if (_isram_memcpy_test(0xa, sdram, l1inst, dma_memcpy))
goto abort;
if (_isram_memcpy_test(0x5, sdram, l1inst, dma_memcpy))
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 49b2ff2c8b74..627e04b5ba9a 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -256,7 +256,8 @@ static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
plast->next = pslot->next;
pavail = pslot;
} else {
- pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
+ /* use atomic so our L1 allocator can be used atomically */
+ pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
if (!pavail)
return NULL;
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 059eac6abda1..e25bf4440b51 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -23,6 +23,9 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_TIME
def_bool y
+config GENERIC_CMOS_UPDATE
+ def_bool y
+
config ARCH_USES_GETTIMEOFFSET
def_bool y
diff --git a/arch/cris/arch-v10/drivers/eeprom.c b/arch/cris/arch-v10/drivers/eeprom.c
index 1f2ae909d3e6..c3405507a3d1 100644
--- a/arch/cris/arch-v10/drivers/eeprom.c
+++ b/arch/cris/arch-v10/drivers/eeprom.c
@@ -73,8 +73,7 @@ struct eeprom_type
int adapt_state; /* 1 = To high , 0 = Even, -1 = To low */
/* this one is to keep the read/write operations atomic */
- wait_queue_head_t wait_q;
- volatile int busy;
+ struct mutex lock;
int retry_cnt_addr; /* Used to keep track of number of retries for
adaptive timing adjustments */
int retry_cnt_read;
@@ -115,8 +114,7 @@ const struct file_operations eeprom_fops =
int __init eeprom_init(void)
{
- init_waitqueue_head(&eeprom.wait_q);
- eeprom.busy = 0;
+ mutex_init(&eeprom.lock);
#ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE
#define EETEXT "Found"
@@ -439,10 +437,7 @@ static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig)
static int eeprom_read_buf(loff_t addr, char * buf, int count)
{
- struct file f;
-
- f.f_pos = addr;
- return eeprom_read(&f, buf, count, &addr);
+ return eeprom_read(NULL, buf, count, &addr);
}
@@ -452,7 +447,7 @@ static int eeprom_read_buf(loff_t addr, char * buf, int count)
static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off)
{
int read=0;
- unsigned long p = file->f_pos;
+ unsigned long p = *off;
unsigned char page;
@@ -461,12 +456,9 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
return -EFAULT;
}
- wait_event_interruptible(eeprom.wait_q, !eeprom.busy);
- if (signal_pending(current))
+ if (mutex_lock_interruptible(&eeprom.lock))
return -EINTR;
- eeprom.busy++;
-
page = (unsigned char) (p >> 8);
if(!eeprom_address(p))
@@ -476,8 +468,7 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
i2c_stop();
/* don't forget to wake them up */
- eeprom.busy--;
- wake_up_interruptible(&eeprom.wait_q);
+ mutex_unlock(&eeprom.lock);
return -EFAULT;
}
@@ -501,11 +492,10 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
if(read > 0)
{
- file->f_pos += read;
+ *off += read;
}
- eeprom.busy--;
- wake_up_interruptible(&eeprom.wait_q);
+ mutex_unlock(&eeprom.lock);
return read;
}
@@ -513,11 +503,7 @@ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t
static int eeprom_write_buf(loff_t addr, const char * buf, int count)
{
- struct file f;
-
- f.f_pos = addr;
-
- return eeprom_write(&f, buf, count, &addr);
+ return eeprom_write(NULL, buf, count, &addr);
}
@@ -534,16 +520,14 @@ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count,
return -EFAULT;
}
- wait_event_interruptible(eeprom.wait_q, !eeprom.busy);
/* bail out if we get interrupted */
- if (signal_pending(current))
+ if (mutex_lock_interruptible(&eeprom.lock))
return -EINTR;
- eeprom.busy++;
for(i = 0; (i < EEPROM_RETRIES) && (restart > 0); i++)
{
restart = 0;
written = 0;
- p = file->f_pos;
+ p = *off;
while( (written < count) && (p < eeprom.size))
@@ -556,8 +540,7 @@ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count,
i2c_stop();
/* don't forget to wake them up */
- eeprom.busy--;
- wake_up_interruptible(&eeprom.wait_q);
+ mutex_unlock(&eeprom.lock);
return -EFAULT;
}
#ifdef EEPROM_ADAPTIVE_TIMING
@@ -669,12 +652,11 @@ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count,
} /* while */
} /* for */
- eeprom.busy--;
- wake_up_interruptible(&eeprom.wait_q);
- if (written == 0 && file->f_pos >= eeprom.size){
+ mutex_unlock(&eeprom.lock);
+ if (written == 0 && p >= eeprom.size){
return -ENOSPC;
}
- file->f_pos += written;
+ *off = p;
return written;
}
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 31ca1418d5a7..30adae594aef 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -26,7 +26,6 @@
/* it will make jiffies at 96 hz instead of 100 hz though */
#undef USE_CASCADE_TIMERS
-extern void update_xtime_from_cmos(void);
extern int set_rtc_mmss(unsigned long nowtime);
extern int have_rtc;
@@ -188,8 +187,6 @@ stop_watchdog(void)
#endif
}
-/* last time the cmos clock got updated */
-static long last_rtc_update = 0;
/*
* timer_interrupt() needs to keep up the real-time clock,
@@ -232,24 +229,6 @@ timer_interrupt(int irq, void *dev_id)
do_timer(1);
cris_do_profile(regs); /* Save profiling information */
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- *
- * The division here is not time critical since it will run once in
- * 11 minutes
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
return IRQ_HANDLED;
}
@@ -274,22 +253,10 @@ time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0) {
- /* no RTC, start at 1980 */
- xtime.tv_sec = 0;
- xtime.tv_nsec = 0;
+ if(RTC_INIT() < 0)
have_rtc = 0;
- } else {
- /* get the current time */
+ else
have_rtc = 1;
- update_xtime_from_cmos();
- }
-
- /*
- * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
- * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
/* Setup the etrax timers
* Base frequency is 25000 hz, divider 250 -> 100 HZ
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index b1920d8de403..1ee0e1010228 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -44,7 +44,6 @@ unsigned long timer_regs[NR_CPUS] =
#endif
};
-extern void update_xtime_from_cmos(void);
extern int set_rtc_mmss(unsigned long nowtime);
extern int have_rtc;
@@ -198,9 +197,6 @@ handle_watchdog_bite(struct pt_regs* regs)
#endif
}
-/* Last time the cmos clock got updated. */
-static long last_rtc_update = 0;
-
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick.
@@ -238,25 +234,6 @@ timer_interrupt(int irq, void *dev_id)
/* Call the real timer interrupt handler */
do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- *
- * The division here is not time critical since it will run once in
- * 11 minutes
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* Do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
return IRQ_HANDLED;
}
@@ -309,23 +286,10 @@ time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0) {
- /* No RTC, start at 1980 */
- xtime.tv_sec = 0;
- xtime.tv_nsec = 0;
+ if(RTC_INIT() < 0)
have_rtc = 0;
- } else {
- /* Get the current time */
+ else
have_rtc = 1;
- update_xtime_from_cmos();
- }
-
- /*
- * Initialize wall_to_monotonic such that adding it to
- * xtime will yield zero, the tv_nsec field must be normalized
- * (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
/* Start CPU local timer. */
cris_timer_init();
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h
index a6aca819e9f3..88dc9b9c4ba0 100644
--- a/arch/cris/include/asm/atomic.h
+++ b/arch/cris/include/asm/atomic.h
@@ -15,7 +15,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/* These should be written in asm but we do it in C for now. */
diff --git a/arch/cris/include/asm/scatterlist.h b/arch/cris/include/asm/scatterlist.h
index faff53ad1f96..249a7842ff5f 100644
--- a/arch/cris/include/asm/scatterlist.h
+++ b/arch/cris/include/asm/scatterlist.h
@@ -1,22 +1,7 @@
#ifndef __ASM_CRIS_SCATTERLIST_H
#define __ASM_CRIS_SCATTERLIST_H
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- char * address; /* Location data is to be transferred to */
- unsigned int length;
-
- /* The following is i386 highmem junk - not used by us */
- unsigned long page_link;
- unsigned int offset;/* for highmem, page offset */
-
-};
-
-#define sg_dma_address(sg) ((sg)->address)
-#define sg_dma_len(sg) ((sg)->length)
-/* i386 junk */
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x1fffffff)
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index c3aade36c330..91776069ca80 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -85,7 +85,7 @@ struct thread_info {
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 17
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_FREEZE 18 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index a05dd31f3efb..c72730d20ef6 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -98,6 +98,8 @@ unsigned long
get_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
+ if(!have_rtc)
+ return 0;
sec = CMOS_READ(RTC_SECONDS);
min = CMOS_READ(RTC_MINUTES);
@@ -119,19 +121,19 @@ get_cmos_time(void)
return mktime(year, mon, day, hour, min, sec);
}
-/* update xtime from the CMOS settings. used when /dev/rtc gets a SET_TIME.
- * TODO: this doesn't reset the fancy NTP phase stuff as do_settimeofday does.
- */
-void
-update_xtime_from_cmos(void)
+int update_persistent_clock(struct timespec now)
{
- if(have_rtc) {
- xtime.tv_sec = get_cmos_time();
- xtime.tv_nsec = 0;
- }
+ return set_rtc_mmss(now.tv_sec);
}
+void read_persistent_clock(struct timespec *ts)
+{
+ ts->tv_sec = get_cmos_time();
+ ts->tv_nsec = 0;
+}
+
+
extern void cris_profile_sample(struct pt_regs* regs);
void
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 00a57af79afc..fae32c7fdcb6 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -36,7 +36,7 @@
#define smp_mb__after_atomic_inc() barrier()
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i))
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
diff --git a/arch/frv/include/asm/gdb-stub.h b/arch/frv/include/asm/gdb-stub.h
index 2da716407ff2..e6bedd0cd9a5 100644
--- a/arch/frv/include/asm/gdb-stub.h
+++ b/arch/frv/include/asm/gdb-stub.h
@@ -12,6 +12,7 @@
#ifndef __ASM_GDB_STUB_H
#define __ASM_GDB_STUB_H
+#undef GDBSTUB_DEBUG_IO
#undef GDBSTUB_DEBUG_PROTOCOL
#include <asm/ptrace.h>
@@ -108,6 +109,12 @@ extern void gdbstub_printk(const char *fmt, ...);
extern void debug_to_serial(const char *p, int n);
extern void console_set_baud(unsigned baud);
+#ifdef GDBSTUB_DEBUG_IO
+#define gdbstub_io(FMT,...) gdbstub_printk(FMT, ##__VA_ARGS__)
+#else
+#define gdbstub_io(FMT,...) ({ 0; })
+#endif
+
#ifdef GDBSTUB_DEBUG_PROTOCOL
#define gdbstub_proto(FMT,...) gdbstub_printk(FMT,##__VA_ARGS__)
#else
diff --git a/arch/frv/include/asm/mem-layout.h b/arch/frv/include/asm/mem-layout.h
index 2947764fc0e0..ccae981876fa 100644
--- a/arch/frv/include/asm/mem-layout.h
+++ b/arch/frv/include/asm/mem-layout.h
@@ -35,8 +35,8 @@
* the slab must be aligned such that load- and store-double instructions don't
* fault if used
*/
-#define ARCH_KMALLOC_MINALIGN 8
-#define ARCH_SLAB_MINALIGN 8
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
/*****************************************************************************/
/*
diff --git a/arch/frv/include/asm/scatterlist.h b/arch/frv/include/asm/scatterlist.h
index 4bca8a28546c..1614bfd7e3a4 100644
--- a/arch/frv/include/asm/scatterlist.h
+++ b/arch/frv/include/asm/scatterlist.h
@@ -1,45 +1,7 @@
#ifndef _ASM_SCATTERLIST_H
#define _ASM_SCATTERLIST_H
-#include <asm/types.h>
-
-/*
- * Drivers must set either ->address or (preferred) page and ->offset
- * to indicate where data must be transferred to/from.
- *
- * Using page is recommended since it handles highmem data as well as
- * low mem. ->address is restricted to data which has a virtual mapping, and
- * it will go away in the future. Updating to page can be automated very
- * easily -- something like
- *
- * sg->address = some_ptr;
- *
- * can be rewritten as
- *
- * sg_set_buf(sg, some_ptr, length);
- *
- * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
- */
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset; /* for highmem, page offset */
-
- dma_addr_t dma_address;
- unsigned int length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffffUL)
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index e608e056bb53..11f33ead29bf 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -113,7 +113,7 @@ register struct thread_info *__current_thread_info asm("gr15");
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 17 /* OOM killer killed process */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_FREEZE 18 /* freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
diff --git a/arch/frv/kernel/gdb-io.c b/arch/frv/kernel/gdb-io.c
index c997bccb9221..2ca641d199f8 100644
--- a/arch/frv/kernel/gdb-io.c
+++ b/arch/frv/kernel/gdb-io.c
@@ -171,11 +171,11 @@ int gdbstub_rx_char(unsigned char *_ch, int nonblock)
return -EINTR;
}
else if (st & (UART_LSR_FE|UART_LSR_OE|UART_LSR_PE)) {
- gdbstub_proto("### GDB Rx Error (st=%02x) ###\n",st);
+ gdbstub_io("### GDB Rx Error (st=%02x) ###\n",st);
return -EIO;
}
else {
- gdbstub_proto("### GDB Rx %02x (st=%02x) ###\n",ch,st);
+ gdbstub_io("### GDB Rx %02x (st=%02x) ###\n",ch,st);
*_ch = ch & 0x7f;
return 0;
}
diff --git a/arch/frv/kernel/gdb-stub.c b/arch/frv/kernel/gdb-stub.c
index 7ca8a6b19ac9..84d103c33c9c 100644
--- a/arch/frv/kernel/gdb-stub.c
+++ b/arch/frv/kernel/gdb-stub.c
@@ -1344,6 +1344,44 @@ void gdbstub_get_mmu_state(void)
} /* end gdbstub_get_mmu_state() */
+/*
+ * handle general query commands of the form 'qXXXXX'
+ */
+static void gdbstub_handle_query(void)
+{
+ if (strcmp(input_buffer, "qAttached") == 0) {
+ /* return current thread ID */
+ sprintf(output_buffer, "1");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qC") == 0) {
+ /* return current thread ID */
+ sprintf(output_buffer, "QC 0");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qOffsets") == 0) {
+ /* return relocation offset of text and data segments */
+ sprintf(output_buffer, "Text=0;Data=0;Bss=0");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qSymbol::") == 0) {
+ sprintf(output_buffer, "OK");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qSupported") == 0) {
+ /* query of supported features */
+ sprintf(output_buffer, "PacketSize=%u;ReverseContinue-;ReverseStep-",
+ sizeof(input_buffer));
+ return;
+ }
+
+ gdbstub_strcpy(output_buffer,"E01");
+}
+
/*****************************************************************************/
/*
* handle event interception and GDB remote protocol processing
@@ -1840,6 +1878,10 @@ void gdbstub(int sigval)
case 'k' :
goto done; /* just continue */
+ /* detach */
+ case 'D':
+ gdbstub_strcpy(output_buffer, "OK");
+ break;
/* reset the whole machine (FIXME: system dependent) */
case 'r':
@@ -1852,6 +1894,14 @@ void gdbstub(int sigval)
__debug_status.dcr |= DCR_SE;
goto done;
+ /* extended command */
+ case 'v':
+ if (strcmp(input_buffer, "vCont?") == 0) {
+ output_buffer[0] = 0;
+ break;
+ }
+ goto unsupported_cmd;
+
/* set baud rate (bBB) */
case 'b':
ptr = &input_buffer[1];
@@ -1923,8 +1973,19 @@ void gdbstub(int sigval)
gdbstub_strcpy(output_buffer,"OK");
break;
+ /* Thread-setting packet */
+ case 'H':
+ gdbstub_strcpy(output_buffer, "OK");
+ break;
+
+ case 'q':
+ gdbstub_handle_query();
+ break;
+
default:
+ unsupported_cmd:
gdbstub_proto("### GDB Unsupported Cmd '%s'\n",input_buffer);
+ gdbstub_strcpy(output_buffer,"E01");
break;
}
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index 60eeed3694c0..fac028936a04 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -344,26 +344,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
0, sizeof(child->thread.user->f),
(const void __user *)data);
- case PTRACE_GETFDPIC:
- tmp = 0;
- switch (addr) {
- case PTRACE_GETFDPIC_EXEC:
- tmp = child->mm->context.exec_fdpic_loadmap;
- break;
- case PTRACE_GETFDPIC_INTERP:
- tmp = child->mm->context.interp_fdpic_loadmap;
- break;
- default:
- break;
- }
-
- ret = 0;
- if (put_user(tmp, (unsigned long *) data)) {
- ret = -EFAULT;
- break;
- }
- break;
-
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c
index 71abd1510a59..6c155d69da29 100644
--- a/arch/frv/kernel/sysctl.c
+++ b/arch/frv/kernel/sysctl.c
@@ -46,8 +46,9 @@ static void frv_change_dcache_mode(unsigned long newmode)
/*
* handle requests to dynamically switch the write caching mode delivered by /proc
*/
-static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int procctl_frv_cachemode(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
{
unsigned long hsr0;
char buff[8];
@@ -84,7 +85,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
}
/* read the state */
- if (filp->f_pos > 0) {
+ if (*ppos > 0) {
*lenp = 0;
return 0;
}
@@ -110,7 +111,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
return -EFAULT;
*lenp = len;
- filp->f_pos = len;
+ *ppos = len;
return 0;
} /* end procctl_frv_cachemode() */
@@ -120,8 +121,9 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
* permit the mm_struct the nominated process is using have its MMU context ID pinned
*/
#ifdef CONFIG_MMU
-static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int procctl_frv_pin_cxnr(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
{
pid_t pid;
char buff[16], *p;
@@ -150,7 +152,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
}
/* read the currently pinned CXN */
- if (filp->f_pos > 0) {
+ if (*ppos > 0) {
*lenp = 0;
return 0;
}
@@ -163,7 +165,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
return -EFAULT;
*lenp = len;
- filp->f_pos = len;
+ *ppos = len;
return 0;
} /* end procctl_frv_pin_cxnr() */
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index fb0ce7577225..0ddbbae83cb2 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -48,20 +48,12 @@ static struct irqaction timer_irq = {
.name = "timer",
};
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return -1;
-}
-
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
- /* last time the cmos clock got updated */
- static long last_rtc_update = 0;
-
profile_tick(CPU_PROFILING);
/*
* Here we are in the timer irq handler. We just have irqs locally
@@ -74,22 +66,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
do_timer(1);
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2
- ) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
-
#ifdef CONFIG_HEARTBEAT
static unsigned short n;
n++;
@@ -119,7 +95,8 @@ void time_divisor_init(void)
__set_TCSR_DATA(0, base >> 8);
}
-void time_init(void)
+
+void read_persistent_clock(struct timespec *ts)
{
unsigned int year, mon, day, hour, min, sec;
@@ -135,9 +112,12 @@ void time_init(void)
if ((year += 1900) < 1970)
year += 100;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = 0;
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = 0;
+}
+void time_init(void)
+{
/* install scheduling interrupt handler */
setup_irq(IRQ_CPU_TIMER0, &timer_irq);
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 33c8c0fa9583..e936804b7508 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -10,7 +10,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#include <asm/system.h>
diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
index d3ecdd87ac90..de08a4a2cc1c 100644
--- a/arch/h8300/include/asm/scatterlist.h
+++ b/arch/h8300/include/asm/scatterlist.h
@@ -1,17 +1,7 @@
#ifndef _H8300_SCATTERLIST_H
#define _H8300_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- dma_addr_t dma_address;
- unsigned int length;
-};
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0xffffffff)
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index 70e67e47d020..d6f1784bfdee 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -87,7 +87,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
-#define TIF_MEMDIE 4
+#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
#define TIF_FREEZE 16 /* is freezing for suspend */
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 7f2d6cfbb4b6..165005aff9df 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -41,7 +41,7 @@ void h8300_timer_tick(void)
update_process_times(user_mode(get_irq_regs()));
}
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
unsigned int year, mon, day, hour, min, sec;
@@ -56,8 +56,12 @@ void __init time_init(void)
#endif
if ((year += 1900) < 1970)
year += 100;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = 0;
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
h8300_timer_setup();
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 4d4f4188cdf1..95610820041e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -56,10 +56,10 @@ config MMU
config NEED_DMA_MAP_STATE
def_bool y
-config SWIOTLB
- bool
+config NEED_SG_DMA_LENGTH
+ def_bool y
-config IOMMU_HELPER
+config SWIOTLB
bool
config GENERIC_LOCKBREAK
@@ -498,6 +498,14 @@ config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
+config USE_PERCPU_NUMA_NODE_ID
+ def_bool y
+ depends on NUMA
+
+config HAVE_MEMORYLESS_NODES
+ def_bool y
+ depends on NUMA
+
config ARCH_PROC_KCORE_TEXT
def_bool y
depends on PROC_KCORE
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index e14c492a8a93..4ce8d1358fee 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
- struct acpi_device_info *dev_info;
+ struct acpi_device_info *adi;
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
- status = acpi_get_object_info(device->handle, &dev_info);
+ status = acpi_get_object_info(device->handle, &adi);
if (ACPI_FAILURE(status))
return 1;
@@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function.
*/
- if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) {
+ if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- kfree(dev_info);
+ kfree(adi);
/*
* default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 21adbd7f90f8..837dc82a013e 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -94,7 +94,6 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
-#define acpi_ht 0 /* no HT-only mode on IA64 */
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 88405cb0832a..4e1948447a00 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,8 +21,8 @@
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
-#define atomic_read(v) ((v)->counter)
-#define atomic64_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 6ebc229a1c51..9da3df6f1a52 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -437,17 +437,18 @@ __fls (unsigned long x)
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-static __inline__ unsigned long
-hweight64 (unsigned long x)
+static __inline__ unsigned long __arch_hweight64(unsigned long x)
{
unsigned long result;
result = ia64_popcnt(x);
return result;
}
-#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
-#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
-#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
+#define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
+#define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
+#define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
+
+#include <asm-generic/bitops/const_hweight.h>
#endif /* __KERNEL__ */
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h
index f2ca32069b3f..e0de61709cf1 100644
--- a/arch/ia64/include/asm/mmzone.h
+++ b/arch/ia64/include/asm/mmzone.h
@@ -19,16 +19,12 @@
static inline int pfn_to_nid(unsigned long pfn)
{
-#ifdef CONFIG_NUMA
extern int paddr_to_nid(unsigned long);
int nid = paddr_to_nid(pfn << PAGE_SHIFT);
if (nid < 0)
return 0;
else
return nid;
-#else
- return 0;
-#endif
}
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index f7c00a5e0e2b..1bd408265694 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -39,7 +39,10 @@ extern void *per_cpu_init(void);
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
-#define __ia64_per_cpu_var(var) var
+#define __ia64_per_cpu_var(var) (*({ \
+ __verify_pcpu_ptr(&(var)); \
+ ((typeof(var) __kernel __force *)&(var)); \
+}))
#include <asm-generic/percpu.h>
diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h
index d8e98961dec7..f299a4fb25c8 100644
--- a/arch/ia64/include/asm/scatterlist.h
+++ b/arch/ia64/include/asm/scatterlist.h
@@ -1,6 +1,7 @@
#ifndef _ASM_IA64_SCATTERLIST_H
#define _ASM_IA64_SCATTERLIST_H
+#include <asm-generic/scatterlist.h>
/*
* It used to be that ISA_DMA_THRESHOLD had something to do with the
* DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
@@ -10,7 +11,6 @@
* that's 4GB - 1.
*/
#define ISA_DMA_THRESHOLD 0xffffffff
-
-#include <asm-generic/scatterlist.h>
+#define ARCH_HAS_SG_CHAIN
#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 8ce2e388e37c..b6a5ba2aca34 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -102,7 +102,7 @@ struct thread_info {
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 17
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
#define TIF_FREEZE 20 /* is freezing for suspend */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d323071d0f91..09f646753d1a 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -26,11 +26,6 @@
#define RECLAIM_DISTANCE 15
/*
- * Returns the number of the node containing CPU 'cpu'
- */
-#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
-
-/*
* Returns a bitmask of CPUs on Node 'node'.
*/
#define cpumask_of_node(node) ((node) == -1 ? \
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 4d1a7e9314cf..c6c90f39f4d9 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -785,6 +785,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
return 0;
}
+int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
+{
+ if (isa_irq >= 16)
+ return -1;
+ *gsi = isa_irq;
+ return 0;
+}
+
/*
* ACPI based hotplug CPU support
*/
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index b0b4e6e710f2..22f61526a8e1 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -113,7 +113,7 @@ processor_get_freq (
dprintk("processor_get_freq\n");
saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu)
goto migrate_end;
@@ -121,7 +121,7 @@ processor_get_freq (
ret = processor_get_pstate(&value);
if (ret) {
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
printk(KERN_WARNING "get performance failed with error %d\n",
ret);
ret = 0;
@@ -131,7 +131,7 @@ processor_get_freq (
ret = (clock_freq*1000);
migrate_end:
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return ret;
}
@@ -151,7 +151,7 @@ processor_set_freq (
dprintk("processor_set_freq\n");
saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (smp_processor_id() != cpu) {
retval = -EAGAIN;
goto migrate_end;
@@ -208,7 +208,7 @@ processor_set_freq (
retval = 0;
migrate_end:
- set_cpus_allowed(current, saved_mask);
+ set_cpus_allowed_ptr(current, &saved_mask);
return (retval);
}
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 640479304ac0..f14c35f9b03a 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -29,6 +29,7 @@
#include <linux/threads.h>
#include <linux/bitops.h>
#include <linux/irq.h>
+#include <linux/ratelimit.h>
#include <asm/delay.h>
#include <asm/intrinsics.h>
@@ -467,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
sp = ia64_getreg(_IA64_REG_SP);
if ((sp - bsp) < 1024) {
- static unsigned char count;
- static long last_time;
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
- if (time_after(jiffies, last_time + 5 * HZ))
- count = 0;
- if (++count < 5) {
- last_time = jiffies;
+ if (__ratelimit(&ratelimit)) {
printk("ia64_handle_irq: DANGER: less than "
"1KB of free stack space!!\n"
"(bsp=0x%lx, sp=%lx)\n", bsp, sp);
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 3095654f9ab3..d9485d952ed0 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@ struct dma_map_ops swiotlb_dma_ops = {
.unmap_sg = swiotlb_unmap_sg_attrs,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
- .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.dma_supported = swiotlb_dma_supported,
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 0dec7f702448..7c7909f9bc93 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -638,7 +638,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
*/
read_lock(&tasklist_lock);
- if (child->signal) {
+ if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
if (child->state == TASK_STOPPED &&
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
@@ -662,7 +662,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
* job control stop, so that SIGCONT can be used to wake it up.
*/
read_lock(&tasklist_lock);
- if (child->signal) {
+ if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
if (child->state == TASK_TRACED &&
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e6676fca4828..aa8b5fa1a8de 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -404,10 +404,9 @@ static void
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
{
cpumask_t save_cpus_allowed = current->cpus_allowed;
- cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
- set_cpus_allowed(current, new_cpus_allowed);
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
(*fn)(arg);
- set_cpus_allowed(current, save_cpus_allowed);
+ set_cpus_allowed_ptr(current, &save_cpus_allowed);
}
static void
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index e5230b2ff2c5..6a1380e90f87 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -390,6 +390,14 @@ smp_callin (void)
fix_b0_for_bsp();
+#ifdef CONFIG_NUMA
+ /*
+ * numa_node_id() works after this.
+ */
+ set_numa_node(cpu_to_node_map[cpuid]);
+ set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
+#endif
+
ipi_call_lock_irq();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
@@ -632,6 +640,9 @@ void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callin_map);
+#ifdef CONFIG_NUMA
+ set_numa_node(cpu_to_node_map[smp_processor_id()]);
+#endif
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
paravirt_post_smp_prepare_boot_cpu();
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 47a192781b0a..653b3c46ea82 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -430,18 +430,16 @@ static int __init rtc_init(void)
}
module_init(rtc_init);
+void read_persistent_clock(struct timespec *ts)
+{
+ efi_gettimeofday(ts);
+}
+
void __init
time_init (void)
{
register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
- efi_gettimeofday(&xtime);
ia64_init_itm();
-
- /*
- * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
- * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
- */
- set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
}
/*
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 28f299de2903..0baa1bbb65fe 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -361,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
return 0;
oldmask = current->cpus_allowed;
- retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (unlikely(retval))
return retval;
retval = cpu_cache_sysfs_init(cpu);
- set_cpus_allowed(current, oldmask);
+ set_cpus_allowed_ptr(current, &oldmask);
if (unlikely(retval < 0))
return retval;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 776dd40397e2..622772b7fb6c 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tty.h>
+#include <linux/ratelimit.h>
#include <asm/intrinsics.h>
#include <asm/processor.h>
@@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
/*
* Make sure we log the unaligned access, so that user/sysadmin can notice it and
* eventually fix the program. However, we don't want to do that for every access so we
- * pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be
- * either...
+ * pace it with jiffies.
*/
-static int
-within_logging_rate_limit (void)
-{
- static unsigned long count, last_time;
-
- if (time_after(jiffies, last_time + 5 * HZ))
- count = 0;
- if (count < 5) {
- last_time = jiffies;
- count++;
- return 1;
- }
- return 0;
-
-}
+static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5);
void
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
@@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (!no_unaligned_warning &&
!(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
- within_logging_rate_limit())
+ __ratelimit(&logging_rate_limit))
{
char buf[200]; /* comm[] is at most 16 bytes... */
size_t len;
@@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
}
}
} else {
- if (within_logging_rate_limit()) {
+ if (__ratelimit(&logging_rate_limit)) {
printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
ifa, regs->cr_iip + ipsr->ri);
if (unaligned_dump_stack)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 7f3c0a2e60cd..d5f4e9161201 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -979,11 +979,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
+ r = -ENXIO;
if (irqchip_in_kernel(kvm)) {
__s32 status;
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
if (ioctl == KVM_IRQ_LINE_STATUS) {
+ r = -EFAULT;
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
sizeof irq_event))
@@ -1379,7 +1381,7 @@ static void kvm_release_vm_pages(struct kvm *kvm)
int i, j;
unsigned long base_gfn;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn;
@@ -1535,8 +1537,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
goto out;
if (copy_to_user(user_stack, stack,
- sizeof(struct kvm_ia64_vcpu_stack)))
+ sizeof(struct kvm_ia64_vcpu_stack))) {
+ r = -EFAULT;
goto out;
+ }
break;
}
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index 7a62f75778c5..f0b9cac82414 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -51,7 +51,7 @@ static int __init kvm_vmm_init(void)
vmm_fpswa_interface = fpswa_interface;
/*Register vmm data to kvm side*/
- return kvm_init(&vmm_info, 1024, THIS_MODULE);
+ return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
}
static void __exit kvm_vmm_exit(void)
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 19261a99e623..0799fea4c588 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if ((vma->vm_flags & mask) != mask)
goto bad_area;
- survive:
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
@@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk(KERN_CRIT "VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 64aff520b899..aa2533ae7e9e 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -335,8 +335,11 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
}
struct pci_bus * __devinit
-pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
+pci_acpi_scan_root(struct acpi_pci_root *root)
{
+ struct acpi_device *device = root->device;
+ int domain = root->segment;
+ int bus = root->secondary.start;
struct pci_controller *controller;
unsigned int windows = 0;
struct pci_bus *pbus;
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 55ac3c4e11d2..fa1eceed0d23 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -30,7 +30,6 @@
#include <linux/miscdevice.h>
#include <linux/utsname.h>
#include <linux/cpumask.h>
-#include <linux/smp_lock.h>
#include <linux/nodemask.h>
#include <linux/smp.h>
#include <linux/mutex.h>
@@ -629,9 +628,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
else {
/* migrate the task before calling SAL */
save_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
sn_hwperf_call_sal(op_info);
- set_cpus_allowed(current, save_allowed);
+ set_cpus_allowed_ptr(current, &save_allowed);
}
}
r = op_info->ret;
@@ -682,8 +681,7 @@ static int sn_hwperf_map_err(int hwperf_err)
/*
* ioctl for "sn_hwperf" misc device
*/
-static int
-sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
+static long sn_hwperf_ioctl(struct file *fp, u32 op, unsigned long arg)
{
struct sn_hwperf_ioctl_args a;
struct cpuinfo_ia64 *cdata;
@@ -699,8 +697,6 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
int i;
int j;
- unlock_kernel();
-
/* only user requests are allowed here */
if ((op & SN_HWPERF_OP_MASK) < 10) {
r = -EINVAL;
@@ -859,12 +855,11 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
error:
vfree(p);
- lock_kernel();
return r;
}
static const struct file_operations sn_hwperf_fops = {
- .ioctl = sn_hwperf_ioctl,
+ .unlocked_ioctl = sn_hwperf_ioctl,
};
static struct miscdevice sn_hwperf_dev = {
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 63f0cf0f50dd..d44a51e5271b 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -26,7 +26,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h
index 1ed372c73d0b..aeeddd8dac17 100644
--- a/arch/m32r/include/asm/scatterlist.h
+++ b/arch/m32r/include/asm/scatterlist.h
@@ -1,20 +1,7 @@
#ifndef _ASM_M32R_SCATTERLIST_H
#define _ASM_M32R_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- char * address; /* Location data is to be transferred to, NULL for
- * highmem page */
- unsigned long page_link;
- unsigned int offset;/* for highmem, page offset */
-
- dma_addr_t dma_address;
- unsigned int length;
-};
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x1fffffff)
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index ed240b6e8e77..71faff5bcc27 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -142,7 +142,7 @@ static inline unsigned int get_thread_fault_code(void)
#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 18 /* OOM killer killed process */
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 9cedcef11575..bda86820bffd 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -106,24 +106,6 @@ u32 arch_gettimeoffset(void)
}
/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be
- * called 500 ms after the second nowtime has started, because when
- * nowtime is written into the registers of the CMOS clock, it will
- * jump to the next second precisely 500 ms later. Check the Motorola
- * MC146818A or Dallas DS12887 data sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you won't notice until after reboot!
- */
-static inline int set_rtc_mmss(unsigned long nowtime)
-{
- return 0;
-}
-
-/* last time the cmos clock got updated */
-static long last_rtc_update = 0;
-
-/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
*/
@@ -138,23 +120,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- write_seqlock(&xtime_lock);
- if (ntp_synced()
- && xtime.tv_sec > last_rtc_update + 660
- && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
- && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
- {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- write_sequnlock(&xtime_lock);
/* As we return to user mode fire off the other CPU schedulers..
this is basically because we don't yet share IRQ's around.
This message is rigged to be safe on the 386 - basically it's
@@ -174,7 +139,7 @@ static struct irqaction irq0 = {
.name = "MFT2",
};
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
unsigned int epoch, year, mon, day, hour, min, sec;
@@ -194,11 +159,13 @@ void __init time_init(void)
epoch = 1952;
year += epoch;
- xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
- xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+}
+
+void __init time_init(void)
+{
#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b5da298ba61d..2e3737b92ffc 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -7,6 +7,7 @@ config M68K
default y
select HAVE_AOUT
select HAVE_IDE
+ select GENERIC_ATOMIC64
config MMU
bool
diff --git a/arch/m68k/amiga/Makefile b/arch/m68k/amiga/Makefile
index 6a0d7650f980..11dd30b16b3b 100644
--- a/arch/m68k/amiga/Makefile
+++ b/arch/m68k/amiga/Makefile
@@ -2,6 +2,6 @@
# Makefile for Linux arch/m68k/amiga source directory
#
-obj-y := config.o amiints.o cia.o chipram.o amisound.o
+obj-y := config.o amiints.o cia.o chipram.o amisound.o platform.o
obj-$(CONFIG_AMIGA_PCMCIA) += pcmcia.o
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index d2cc35d98532..b1577f741fa8 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -97,10 +97,6 @@ static void amiga_get_model(char *model);
static void amiga_get_hardware_list(struct seq_file *m);
/* amiga specific timer functions */
static unsigned long amiga_gettimeoffset(void);
-static int a3000_hwclk(int, struct rtc_time *);
-static int a2000_hwclk(int, struct rtc_time *);
-static int amiga_set_clock_mmss(unsigned long);
-static unsigned int amiga_get_ss(void);
extern void amiga_mksound(unsigned int count, unsigned int ticks);
static void amiga_reset(void);
extern void amiga_init_sound(void);
@@ -138,10 +134,6 @@ static struct {
}
};
-static struct resource rtc_resource = {
- .start = 0x00dc0000, .end = 0x00dcffff
-};
-
static struct resource ram_resource[NUM_MEMINFO];
@@ -387,15 +379,6 @@ void __init config_amiga(void)
mach_get_model = amiga_get_model;
mach_get_hardware_list = amiga_get_hardware_list;
mach_gettimeoffset = amiga_gettimeoffset;
- if (AMIGAHW_PRESENT(A3000_CLK)) {
- mach_hwclk = a3000_hwclk;
- rtc_resource.name = "A3000 RTC";
- request_resource(&iomem_resource, &rtc_resource);
- } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
- mach_hwclk = a2000_hwclk;
- rtc_resource.name = "A2000 RTC";
- request_resource(&iomem_resource, &rtc_resource);
- }
/*
* default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI
@@ -404,8 +387,6 @@ void __init config_amiga(void)
*/
mach_max_dma_address = 0xffffffff;
- mach_set_clock_mmss = amiga_set_clock_mmss;
- mach_get_ss = amiga_get_ss;
mach_reset = amiga_reset;
#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
mach_beep = amiga_mksound;
@@ -530,161 +511,6 @@ static unsigned long amiga_gettimeoffset(void)
return ticks + offset;
}
-static int a3000_hwclk(int op, struct rtc_time *t)
-{
- tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
-
- if (!op) { /* read */
- t->tm_sec = tod_3000.second1 * 10 + tod_3000.second2;
- t->tm_min = tod_3000.minute1 * 10 + tod_3000.minute2;
- t->tm_hour = tod_3000.hour1 * 10 + tod_3000.hour2;
- t->tm_mday = tod_3000.day1 * 10 + tod_3000.day2;
- t->tm_wday = tod_3000.weekday;
- t->tm_mon = tod_3000.month1 * 10 + tod_3000.month2 - 1;
- t->tm_year = tod_3000.year1 * 10 + tod_3000.year2;
- if (t->tm_year <= 69)
- t->tm_year += 100;
- } else {
- tod_3000.second1 = t->tm_sec / 10;
- tod_3000.second2 = t->tm_sec % 10;
- tod_3000.minute1 = t->tm_min / 10;
- tod_3000.minute2 = t->tm_min % 10;
- tod_3000.hour1 = t->tm_hour / 10;
- tod_3000.hour2 = t->tm_hour % 10;
- tod_3000.day1 = t->tm_mday / 10;
- tod_3000.day2 = t->tm_mday % 10;
- if (t->tm_wday != -1)
- tod_3000.weekday = t->tm_wday;
- tod_3000.month1 = (t->tm_mon + 1) / 10;
- tod_3000.month2 = (t->tm_mon + 1) % 10;
- if (t->tm_year >= 100)
- t->tm_year -= 100;
- tod_3000.year1 = t->tm_year / 10;
- tod_3000.year2 = t->tm_year % 10;
- }
-
- tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
-
- return 0;
-}
-
-static int a2000_hwclk(int op, struct rtc_time *t)
-{
- int cnt = 5;
-
- tod_2000.cntrl1 = TOD2000_CNTRL1_HOLD;
-
- while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) {
- tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
- udelay(70);
- tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
- --cnt;
- }
-
- if (!cnt)
- printk(KERN_INFO "hwclk: timed out waiting for RTC (0x%x)\n",
- tod_2000.cntrl1);
-
- if (!op) { /* read */
- t->tm_sec = tod_2000.second1 * 10 + tod_2000.second2;
- t->tm_min = tod_2000.minute1 * 10 + tod_2000.minute2;
- t->tm_hour = (tod_2000.hour1 & 3) * 10 + tod_2000.hour2;
- t->tm_mday = tod_2000.day1 * 10 + tod_2000.day2;
- t->tm_wday = tod_2000.weekday;
- t->tm_mon = tod_2000.month1 * 10 + tod_2000.month2 - 1;
- t->tm_year = tod_2000.year1 * 10 + tod_2000.year2;
- if (t->tm_year <= 69)
- t->tm_year += 100;
-
- if (!(tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE)) {
- if (!(tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour == 12)
- t->tm_hour = 0;
- else if ((tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour != 12)
- t->tm_hour += 12;
- }
- } else {
- tod_2000.second1 = t->tm_sec / 10;
- tod_2000.second2 = t->tm_sec % 10;
- tod_2000.minute1 = t->tm_min / 10;
- tod_2000.minute2 = t->tm_min % 10;
- if (tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE)
- tod_2000.hour1 = t->tm_hour / 10;
- else if (t->tm_hour >= 12)
- tod_2000.hour1 = TOD2000_HOUR1_PM +
- (t->tm_hour - 12) / 10;
- else
- tod_2000.hour1 = t->tm_hour / 10;
- tod_2000.hour2 = t->tm_hour % 10;
- tod_2000.day1 = t->tm_mday / 10;
- tod_2000.day2 = t->tm_mday % 10;
- if (t->tm_wday != -1)
- tod_2000.weekday = t->tm_wday;
- tod_2000.month1 = (t->tm_mon + 1) / 10;
- tod_2000.month2 = (t->tm_mon + 1) % 10;
- if (t->tm_year >= 100)
- t->tm_year -= 100;
- tod_2000.year1 = t->tm_year / 10;
- tod_2000.year2 = t->tm_year % 10;
- }
-
- tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
-
- return 0;
-}
-
-static int amiga_set_clock_mmss(unsigned long nowtime)
-{
- short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
- if (AMIGAHW_PRESENT(A3000_CLK)) {
- tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
-
- tod_3000.second1 = real_seconds / 10;
- tod_3000.second2 = real_seconds % 10;
- tod_3000.minute1 = real_minutes / 10;
- tod_3000.minute2 = real_minutes % 10;
-
- tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
- } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
- int cnt = 5;
-
- tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
-
- while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) {
- tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
- udelay(70);
- tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
- --cnt;
- }
-
- if (!cnt)
- printk(KERN_INFO "set_clock_mmss: timed out waiting for RTC (0x%x)\n", tod_2000.cntrl1);
-
- tod_2000.second1 = real_seconds / 10;
- tod_2000.second2 = real_seconds % 10;
- tod_2000.minute1 = real_minutes / 10;
- tod_2000.minute2 = real_minutes % 10;
-
- tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
- }
-
- return 0;
-}
-
-static unsigned int amiga_get_ss(void)
-{
- unsigned int s;
-
- if (AMIGAHW_PRESENT(A3000_CLK)) {
- tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
- s = tod_3000.second1 * 10 + tod_3000.second2;
- tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
- } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
- s = tod_2000.second1 * 10 + tod_2000.second2;
- }
- return s;
-}
-
static NORET_TYPE void amiga_reset(void)
ATTRIB_NORET;
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c
new file mode 100644
index 000000000000..7fd8b41723ea
--- /dev/null
+++ b/arch/m68k/amiga/platform.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2007-2009 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/zorro.h>
+
+#include <asm/amigahw.h>
+#include <asm/amigayle.h>
+
+
+#ifdef CONFIG_ZORRO
+
+static const struct resource zorro_resources[] __initconst = {
+ /* Zorro II regions (on Zorro II/III) */
+ {
+ .name = "Zorro II exp",
+ .start = 0x00e80000,
+ .end = 0x00efffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Zorro II mem",
+ .start = 0x00200000,
+ .end = 0x009fffff,
+ .flags = IORESOURCE_MEM,
+ },
+ /* Zorro III regions (on Zorro III only) */
+ {
+ .name = "Zorro III exp",
+ .start = 0xff000000,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Zorro III cfg",
+ .start = 0x40000000,
+ .end = 0x7fffffff,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+
+static int __init amiga_init_bus(void)
+{
+ if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
+ return -ENODEV;
+
+ platform_device_register_simple("amiga-zorro", -1, zorro_resources,
+ AMIGAHW_PRESENT(ZORRO3) ? 4 : 2);
+ return 0;
+}
+
+subsys_initcall(amiga_init_bus);
+
+
+static int z_dev_present(zorro_id id)
+{
+ unsigned int i;
+
+ for (i = 0; i < zorro_num_autocon; i++)
+ if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) &&
+ zorro_autocon[i].rom.er_Product == ZORRO_PROD(id))
+ return 1;
+
+ return 0;
+}
+
+#else /* !CONFIG_ZORRO */
+
+static inline int z_dev_present(zorro_id id) { return 0; }
+
+#endif /* !CONFIG_ZORRO */
+
+
+static const struct resource a3000_scsi_resource __initconst = {
+ .start = 0xdd0000,
+ .end = 0xdd00ff,
+ .flags = IORESOURCE_MEM,
+};
+
+
+static const struct resource a4000t_scsi_resource __initconst = {
+ .start = 0xdd0000,
+ .end = 0xdd0fff,
+ .flags = IORESOURCE_MEM,
+};
+
+
+static const struct resource a1200_ide_resource __initconst = {
+ .start = 0xda0000,
+ .end = 0xda1fff,
+ .flags = IORESOURCE_MEM,
+};
+
+static const struct gayle_ide_platform_data a1200_ide_pdata __initconst = {
+ .base = 0xda0000,
+ .irqport = 0xda9000,
+ .explicit_ack = 1,
+};
+
+
+static const struct resource a4000_ide_resource __initconst = {
+ .start = 0xdd2000,
+ .end = 0xdd3fff,
+ .flags = IORESOURCE_MEM,
+};
+
+static const struct gayle_ide_platform_data a4000_ide_pdata __initconst = {
+ .base = 0xdd2020,
+ .irqport = 0xdd3020,
+ .explicit_ack = 0,
+};
+
+
+static const struct resource amiga_rtc_resource __initconst = {
+ .start = 0x00dc0000,
+ .end = 0x00dcffff,
+ .flags = IORESOURCE_MEM,
+};
+
+
+static int __init amiga_init_devices(void)
+{
+ struct platform_device *pdev;
+
+ if (!MACH_IS_AMIGA)
+ return -ENODEV;
+
+ /* video hardware */
+ if (AMIGAHW_PRESENT(AMI_VIDEO))
+ platform_device_register_simple("amiga-video", -1, NULL, 0);
+
+
+ /* sound hardware */
+ if (AMIGAHW_PRESENT(AMI_AUDIO))
+ platform_device_register_simple("amiga-audio", -1, NULL, 0);
+
+
+ /* storage interfaces */
+ if (AMIGAHW_PRESENT(AMI_FLOPPY))
+ platform_device_register_simple("amiga-floppy", -1, NULL, 0);
+
+ if (AMIGAHW_PRESENT(A3000_SCSI))
+ platform_device_register_simple("amiga-a3000-scsi", -1,
+ &a3000_scsi_resource, 1);
+
+ if (AMIGAHW_PRESENT(A4000_SCSI))
+ platform_device_register_simple("amiga-a4000t-scsi", -1,
+ &a4000t_scsi_resource, 1);
+
+ if (AMIGAHW_PRESENT(A1200_IDE) ||
+ z_dev_present(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE)) {
+ pdev = platform_device_register_simple("amiga-gayle-ide", -1,
+ &a1200_ide_resource, 1);
+ platform_device_add_data(pdev, &a1200_ide_pdata,
+ sizeof(a1200_ide_pdata));
+ }
+
+ if (AMIGAHW_PRESENT(A4000_IDE)) {
+ pdev = platform_device_register_simple("amiga-gayle-ide", -1,
+ &a4000_ide_resource, 1);
+ platform_device_add_data(pdev, &a4000_ide_pdata,
+ sizeof(a4000_ide_pdata));
+ }
+
+
+ /* other I/O hardware */
+ if (AMIGAHW_PRESENT(AMI_KEYBOARD))
+ platform_device_register_simple("amiga-keyboard", -1, NULL, 0);
+
+ if (AMIGAHW_PRESENT(AMI_MOUSE))
+ platform_device_register_simple("amiga-mouse", -1, NULL, 0);
+
+ if (AMIGAHW_PRESENT(AMI_SERIAL))
+ platform_device_register_simple("amiga-serial", -1, NULL, 0);
+
+ if (AMIGAHW_PRESENT(AMI_PARALLEL))
+ platform_device_register_simple("amiga-parallel", -1, NULL, 0);
+
+
+ /* real time clocks */
+ if (AMIGAHW_PRESENT(A2000_CLK))
+ platform_device_register_simple("rtc-msm6242", -1,
+ &amiga_rtc_resource, 1);
+
+ if (AMIGAHW_PRESENT(A3000_CLK))
+ platform_device_register_simple("rtc-rp5c01", -1,
+ &amiga_rtc_resource, 1);
+
+ return 0;
+}
+
+device_initcall(amiga_init_devices);
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index b46ea1714a89..cb8617bb194b 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
#include <linux/ioport.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
@@ -35,10 +34,9 @@
static unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-static char rtc_status;
+static atomic_t rtc_status = ATOMIC_INIT(1);
-static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
unsigned char msr;
@@ -132,29 +130,20 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
}
/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
+ * We enforce only one user at a time here with the open/close.
*/
-
static int rtc_open(struct inode *inode, struct file *file)
{
- lock_kernel();
- if(rtc_status) {
- unlock_kernel();
+ if (!atomic_dec_and_test(&rtc_status)) {
+ atomic_inc(&rtc_status);
return -EBUSY;
}
-
- rtc_status = 1;
- unlock_kernel();
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
- lock_kernel();
- rtc_status = 0;
- unlock_kernel();
+ atomic_inc(&rtc_status);
return 0;
}
@@ -163,9 +152,9 @@ static int rtc_release(struct inode *inode, struct file *file)
*/
static const struct file_operations rtc_fops = {
- .ioctl = rtc_ioctl,
- .open = rtc_open,
- .release = rtc_release,
+ .unlocked_ioctl = rtc_ioctl,
+ .open = rtc_open,
+ .release = rtc_release,
};
static struct miscdevice rtc_dev = {
diff --git a/arch/m68k/hp300/time.h b/arch/m68k/hp300/time.h
index f5b3d098b0f5..7b98242960de 100644
--- a/arch/m68k/hp300/time.h
+++ b/arch/m68k/hp300/time.h
@@ -1,4 +1,2 @@
extern void hp300_sched_init(irq_handler_t vector);
-extern unsigned long hp300_gettimeoffset (void);
-
-
+extern unsigned long hp300_gettimeoffset(void);
diff --git a/arch/m68k/include/asm/amigayle.h b/arch/m68k/include/asm/amigayle.h
index bb5a6aa329f3..a01453d9c231 100644
--- a/arch/m68k/include/asm/amigayle.h
+++ b/arch/m68k/include/asm/amigayle.h
@@ -104,4 +104,10 @@ struct GAYLE {
#define GAYLE_CFG_250NS 0x00
#define GAYLE_CFG_720NS 0x0c
+struct gayle_ide_platform_data {
+ unsigned long base;
+ unsigned long irqport;
+ int explicit_ack; /* A1200 IDE needs explicit ack */
+};
+
#endif /* asm-m68k/amigayle.h */
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 8d29145ebb27..eab36dcacf6c 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -3,3 +3,5 @@
#else
#include "atomic_mm.h"
#endif
+
+#include <asm-generic/atomic64.h>
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h
index d9d2ed647435..6a223b3f7e74 100644
--- a/arch/m68k/include/asm/atomic_mm.h
+++ b/arch/m68k/include/asm/atomic_mm.h
@@ -15,7 +15,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
static inline void atomic_add(int i, atomic_t *v)
diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h
index 5674cb9449bd..289310c63a8a 100644
--- a/arch/m68k/include/asm/atomic_no.h
+++ b/arch/m68k/include/asm/atomic_no.h
@@ -15,7 +15,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
static __inline__ void atomic_add(int i, atomic_t *v)
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
index 9bde784e7bad..b4ecdaada520 100644
--- a/arch/m68k/include/asm/bitops_mm.h
+++ b/arch/m68k/include/asm/bitops_mm.h
@@ -365,6 +365,10 @@ static inline int minix_test_bit(int nr, const void *vaddr)
#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
+#define ext2_find_next_zero_bit(addr, size, offset) \
+ generic_find_next_zero_le_bit((unsigned long *)addr, size, offset)
+#define ext2_find_next_bit(addr, size, offset) \
+ generic_find_next_le_bit((unsigned long *)addr, size, offset)
static inline int ext2_test_bit(int nr, const void *vaddr)
{
@@ -394,10 +398,9 @@ static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
return (p - addr) * 32 + res;
}
-static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
- unsigned offset)
+static inline unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- const unsigned long *addr = vaddr;
const unsigned long *p = addr + (offset >> 5);
int bit = offset & 31UL, res;
@@ -437,10 +440,9 @@ static inline int ext2_find_first_bit(const void *vaddr, unsigned size)
return (p - addr) * 32 + res;
}
-static inline int ext2_find_next_bit(const void *vaddr, unsigned size,
- unsigned offset)
+static inline unsigned long generic_find_next_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- const unsigned long *addr = vaddr;
const unsigned long *p = addr + (offset >> 5);
int bit = offset & 31UL, res;
diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
index fed3fd30de7e..ecafbe1718c3 100644
--- a/arch/m68k/include/asm/cache.h
+++ b/arch/m68k/include/asm/cache.h
@@ -8,4 +8,6 @@
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
#endif
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index ed2b69b96805..db824a4b136e 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -113,6 +113,7 @@
#define MCF_GPIO_PAR_UART (0xA4036)
#define MCF_GPIO_PAR_FECI2C (0xA4033)
+#define MCF_GPIO_PAR_QSPI (0xA4034)
#define MCF_GPIO_PAR_FEC (0xA4038)
#define MCF_GPIO_PAR_UART_PAR_URXD0 (0x0001)
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h
index a34894cf8e6f..e8d06b24a48e 100644
--- a/arch/m68k/include/asm/m523xsim.h
+++ b/arch/m68k/include/asm/m523xsim.h
@@ -127,5 +127,10 @@
#define MCFGPIO_IRQ_MAX 8
#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+/*
+ * Pin Assignment
+*/
+#define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10004A)
+#define MCFGPIO_PAR_TIMER (MCF_IPSBAR + 0x10004C)
/****************************************************************************/
#endif /* m523xsim_h */
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h
index 14bce877ed88..79b7b402f3c9 100644
--- a/arch/m68k/include/asm/m5249sim.h
+++ b/arch/m68k/include/asm/m5249sim.h
@@ -69,10 +69,12 @@
#define MCFSIM_DMA1ICR MCFSIM_ICR7 /* DMA 1 ICR */
#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
+#define MCFSIM_QSPIICR MCFSIM_ICR10 /* QSPI ICR */
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_QSPI 28 /* QSPI, Level 4 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 453356d72d80..1feb46f108ce 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -31,6 +31,7 @@
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
#define MCFINT_UART1 14 /* Interrupt number for UART1 */
#define MCFINT_UART2 15 /* Interrupt number for UART2 */
+#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
#define MCFINT_PIT1 36 /* Interrupt number for PIT1 */
/*
@@ -120,6 +121,9 @@
#define MCFGPIO_PIN_MAX 100
#define MCFGPIO_IRQ_MAX 8
#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+
+#define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10004A)
+#define MCFGPIO_PAR_TIMER (MCF_IPSBAR + 0x10004C)
#endif
#ifdef CONFIG_M5275
@@ -212,6 +216,8 @@
#define MCFGPIO_PIN_MAX 148
#define MCFGPIO_IRQ_MAX 8
#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+
+#define MCFGPIO_PAR_QSPI (MCF_IPSBAR + 0x10007E)
#endif
/*
@@ -223,6 +229,7 @@
#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005)
+
/*
* GPIO pins setups to enable the UARTs.
*/
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index e2ad1f42b657..891cbedad972 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -29,6 +29,7 @@
#define MCFINT_VECBASE 64 /* Vector base number */
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
+#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
#define MCFINT_PIT1 55 /* Interrupt number for PIT1 */
/*
@@ -249,70 +250,4 @@
#define MCF5282_I2C_I2SR_RXAK (0x01) // received acknowledge
-
-/*********************************************************************
-*
-* Queued Serial Peripheral Interface (QSPI) Module
-*
-*********************************************************************/
-/* Derek - 21 Feb 2005 */
-/* change to the format used in I2C */
-/* Read/Write access macros for general use */
-#define MCF5282_QSPI_QMR MCF_IPSBAR + 0x0340
-#define MCF5282_QSPI_QDLYR MCF_IPSBAR + 0x0344
-#define MCF5282_QSPI_QWR MCF_IPSBAR + 0x0348
-#define MCF5282_QSPI_QIR MCF_IPSBAR + 0x034C
-#define MCF5282_QSPI_QAR MCF_IPSBAR + 0x0350
-#define MCF5282_QSPI_QDR MCF_IPSBAR + 0x0354
-#define MCF5282_QSPI_QCR MCF_IPSBAR + 0x0354
-
-/* Bit level definitions and macros */
-#define MCF5282_QSPI_QMR_MSTR (0x8000)
-#define MCF5282_QSPI_QMR_DOHIE (0x4000)
-#define MCF5282_QSPI_QMR_BITS_16 (0x0000)
-#define MCF5282_QSPI_QMR_BITS_8 (0x2000)
-#define MCF5282_QSPI_QMR_BITS_9 (0x2400)
-#define MCF5282_QSPI_QMR_BITS_10 (0x2800)
-#define MCF5282_QSPI_QMR_BITS_11 (0x2C00)
-#define MCF5282_QSPI_QMR_BITS_12 (0x3000)
-#define MCF5282_QSPI_QMR_BITS_13 (0x3400)
-#define MCF5282_QSPI_QMR_BITS_14 (0x3800)
-#define MCF5282_QSPI_QMR_BITS_15 (0x3C00)
-#define MCF5282_QSPI_QMR_CPOL (0x0200)
-#define MCF5282_QSPI_QMR_CPHA (0x0100)
-#define MCF5282_QSPI_QMR_BAUD(x) (((x)&0x00FF))
-
-#define MCF5282_QSPI_QDLYR_SPE (0x80)
-#define MCF5282_QSPI_QDLYR_QCD(x) (((x)&0x007F)<<8)
-#define MCF5282_QSPI_QDLYR_DTL(x) (((x)&0x00FF))
-
-#define MCF5282_QSPI_QWR_HALT (0x8000)
-#define MCF5282_QSPI_QWR_WREN (0x4000)
-#define MCF5282_QSPI_QWR_WRTO (0x2000)
-#define MCF5282_QSPI_QWR_CSIV (0x1000)
-#define MCF5282_QSPI_QWR_ENDQP(x) (((x)&0x000F)<<8)
-#define MCF5282_QSPI_QWR_CPTQP(x) (((x)&0x000F)<<4)
-#define MCF5282_QSPI_QWR_NEWQP(x) (((x)&0x000F))
-
-#define MCF5282_QSPI_QIR_WCEFB (0x8000)
-#define MCF5282_QSPI_QIR_ABRTB (0x4000)
-#define MCF5282_QSPI_QIR_ABRTL (0x1000)
-#define MCF5282_QSPI_QIR_WCEFE (0x0800)
-#define MCF5282_QSPI_QIR_ABRTE (0x0400)
-#define MCF5282_QSPI_QIR_SPIFE (0x0100)
-#define MCF5282_QSPI_QIR_WCEF (0x0008)
-#define MCF5282_QSPI_QIR_ABRT (0x0004)
-#define MCF5282_QSPI_QIR_SPIF (0x0001)
-
-#define MCF5282_QSPI_QAR_ADDR(x) (((x)&0x003F))
-
-#define MCF5282_QSPI_QDR_COMMAND(x) (((x)&0xFF00))
-#define MCF5282_QSPI_QCR_DATA(x) (((x)&0x00FF)<<8)
-#define MCF5282_QSPI_QCR_CONT (0x8000)
-#define MCF5282_QSPI_QCR_BITSE (0x4000)
-#define MCF5282_QSPI_QCR_DT (0x2000)
-#define MCF5282_QSPI_QCR_DSCK (0x1000)
-#define MCF5282_QSPI_QCR_CS (((x)&0x000F)<<8)
-
-/****************************************************************************/
#endif /* m528xsim_h */
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h
index 36bf15aec9ae..c4bf1c81e3cf 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m532xsim.h
@@ -17,6 +17,7 @@
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
#define MCFINT_UART2 28 /* Interrupt number for UART2 */
+#define MCFINT_QSPI 31 /* Interrupt number for QSPI */
#define MCF_WTM_WCR MCF_REG16(0xFC098000)
diff --git a/arch/m68k/include/asm/mcfqspi.h b/arch/m68k/include/asm/mcfqspi.h
new file mode 100644
index 000000000000..39d90d51111d
--- /dev/null
+++ b/arch/m68k/include/asm/mcfqspi.h
@@ -0,0 +1,64 @@
+/*
+ * Definitions for Freescale Coldfire QSPI module
+ *
+ * Copyright 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+*/
+
+#ifndef mcfqspi_h
+#define mcfqspi_h
+
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
+#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340)
+#elif defined(CONFIG_M5249)
+#define MCFQSPI_IOBASE (MCF_MBAR + 0x300)
+#elif defined(CONFIG_M520x) || defined(CONFIG_M532x)
+#define MCFQSPI_IOBASE 0xFC058000
+#endif
+#define MCFQSPI_IOSIZE 0x40
+
+/**
+ * struct mcfqspi_cs_control - chip select control for the coldfire qspi driver
+ * @setup: setup the control; allocate gpio's, etc. May be NULL.
+ * @teardown: finish with the control; free gpio's, etc. May be NULL.
+ * @select: output the signals to select the device. Can not be NULL.
+ * @deselect: output the signals to deselect the device. Can not be NULL.
+ *
+ * The QSPI module has 4 hardware chip selects. We don't use them. Instead
+ * platforms are required to supply a mcfqspi_cs_control as a part of the
+ * platform data for each QSPI master controller. Only the select and
+ * deselect functions are required.
+*/
+struct mcfqspi_cs_control {
+ int (*setup)(struct mcfqspi_cs_control *);
+ void (*teardown)(struct mcfqspi_cs_control *);
+ void (*select)(struct mcfqspi_cs_control *, u8, bool);
+ void (*deselect)(struct mcfqspi_cs_control *, u8, bool);
+};
+
+/**
+ * struct mcfqspi_platform_data - platform data for the coldfire qspi driver
+ * @bus_num: board specific identifier for this qspi driver.
+ * @num_chipselects: number of chip selects supported by this qspi driver.
+ * @cs_control: platform dependent chip select control.
+*/
+struct mcfqspi_platform_data {
+ s16 bus_num;
+ u16 num_chipselect;
+ struct mcfqspi_cs_control *cs_control;
+};
+
+#endif /* mcfqspi_h */
diff --git a/arch/m68k/include/asm/mcfsmc.h b/arch/m68k/include/asm/mcfsmc.h
deleted file mode 100644
index 527bea5d6788..000000000000
--- a/arch/m68k/include/asm/mcfsmc.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/****************************************************************************/
-
-/*
- * mcfsmc.h -- SMC ethernet support for ColdFire environments.
- *
- * (C) Copyright 1999-2002, Greg Ungerer (gerg@snapgear.com)
- * (C) Copyright 2000, Lineo Inc. (www.lineo.com)
- */
-
-/****************************************************************************/
-#ifndef mcfsmc_h
-#define mcfsmc_h
-/****************************************************************************/
-
-/*
- * None of the current ColdFire targets that use the SMC91x111
- * allow 8 bit accesses. So this code is 16bit access only.
- */
-
-
-#undef outb
-#undef inb
-#undef outw
-#undef outwd
-#undef inw
-#undef outl
-#undef inl
-
-#undef outsb
-#undef outsw
-#undef outsl
-#undef insb
-#undef insw
-#undef insl
-
-/*
- * Re-defines for ColdFire environment... The SMC part is
- * mapped into memory space, so remap the PC-style in/out
- * routines to handle that.
- */
-#define outb smc_outb
-#define inb smc_inb
-#define outw smc_outw
-#define outwd smc_outwd
-#define inw smc_inw
-#define outl smc_outl
-#define inl smc_inl
-
-#define outsb smc_outsb
-#define outsw smc_outsw
-#define outsl smc_outsl
-#define insb smc_insb
-#define insw smc_insw
-#define insl smc_insl
-
-
-static inline int smc_inb(unsigned int addr)
-{
- register unsigned short w;
- w = *((volatile unsigned short *) (addr & ~0x1));
- return(((addr & 0x1) ? w : (w >> 8)) & 0xff);
-}
-
-static inline void smc_outw(unsigned int val, unsigned int addr)
-{
- *((volatile unsigned short *) addr) = (val << 8) | (val >> 8);
-}
-
-static inline int smc_inw(unsigned int addr)
-{
- register unsigned short w;
- w = *((volatile unsigned short *) addr);
- return(((w << 8) | (w >> 8)) & 0xffff);
-}
-
-static inline void smc_outl(unsigned long val, unsigned int addr)
-{
- *((volatile unsigned long *) addr) =
- ((val << 8) & 0xff000000) | ((val >> 8) & 0x00ff0000) |
- ((val << 8) & 0x0000ff00) | ((val >> 8) & 0x000000ff);
-}
-
-static inline void smc_outwd(unsigned int val, unsigned int addr)
-{
- *((volatile unsigned short *) addr) = val;
-}
-
-
-/*
- * The rep* functions are used to feed the data port with
- * raw data. So we do not byte swap them when copying.
- */
-
-static inline void smc_insb(unsigned int addr, void *vbuf, int unsigned long len)
-{
- volatile unsigned short *rp;
- unsigned short *buf, *ebuf;
-
- buf = (unsigned short *) vbuf;
- rp = (volatile unsigned short *) addr;
-
- /* Copy as words for as long as possible */
- for (ebuf = buf + (len >> 1); (buf < ebuf); )
- *buf++ = *rp;
-
- /* Lastly, handle left over byte */
- if (len & 0x1)
- *((unsigned char *) buf) = (*rp >> 8) & 0xff;
-}
-
-static inline void smc_insw(unsigned int addr, void *vbuf, unsigned long len)
-{
- volatile unsigned short *rp;
- unsigned short *buf, *ebuf;
-
- buf = (unsigned short *) vbuf;
- rp = (volatile unsigned short *) addr;
- for (ebuf = buf + len; (buf < ebuf); )
- *buf++ = *rp;
-}
-
-static inline void smc_insl(unsigned int addr, void *vbuf, unsigned long len)
-{
- volatile unsigned long *rp;
- unsigned long *buf, *ebuf;
-
- buf = (unsigned long *) vbuf;
- rp = (volatile unsigned long *) addr;
- for (ebuf = buf + len; (buf < ebuf); )
- *buf++ = *rp;
-}
-
-static inline void smc_outsw(unsigned int addr, const void *vbuf, unsigned long len)
-{
- volatile unsigned short *rp;
- unsigned short *buf, *ebuf;
-
- buf = (unsigned short *) vbuf;
- rp = (volatile unsigned short *) addr;
- for (ebuf = buf + len; (buf < ebuf); )
- *rp = *buf++;
-}
-
-static inline void smc_outsl(unsigned int addr, void *vbuf, unsigned long len)
-{
- volatile unsigned long *rp;
- unsigned long *buf, *ebuf;
-
- buf = (unsigned long *) vbuf;
- rp = (volatile unsigned long *) addr;
- for (ebuf = buf + len; (buf < ebuf); )
- *rp = *buf++;
-}
-
-
-#ifdef CONFIG_NETtel
-/*
- * Re-map the address space of at least one of the SMC ethernet
- * parts. Both parts power up decoding the same address, so we
- * need to move one of them first, before doing enything else.
- *
- * We also increase the number of wait states for this part by one.
- */
-
-void smc_remap(unsigned int ioaddr)
-{
- static int once = 0;
- extern unsigned short ppdata;
- if (once++ == 0) {
- *((volatile unsigned short *)MCFSIM_PADDR) = 0x00ec;
- ppdata |= 0x0080;
- *((volatile unsigned short *)MCFSIM_PADAT) = ppdata;
- outw(0x0001, ioaddr + BANK_SELECT);
- outw(0x0001, ioaddr + BANK_SELECT);
- outw(0x0067, ioaddr + BASE);
-
- ppdata &= ~0x0080;
- *((volatile unsigned short *)MCFSIM_PADAT) = ppdata;
- }
-
- *((volatile unsigned short *)(MCF_MBAR+MCFSIM_CSCR3)) = 0x1180;
-}
-
-#endif
-
-/****************************************************************************/
-#endif /* mcfsmc_h */
diff --git a/arch/m68k/include/asm/param.h b/arch/m68k/include/asm/param.h
index 85c41b75aa78..36265ccf5c7b 100644
--- a/arch/m68k/include/asm/param.h
+++ b/arch/m68k/include/asm/param.h
@@ -1,26 +1,12 @@
#ifndef _M68K_PARAM_H
#define _M68K_PARAM_H
-#ifdef __KERNEL__
-# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
#ifdef __uClinux__
#define EXEC_PAGESIZE 4096
#else
#define EXEC_PAGESIZE 8192
#endif
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
+#include <asm-generic/param.h>
#endif /* _M68K_PARAM_H */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index cbd3d4751dd2..7a6a7590cc02 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -44,11 +44,15 @@ static inline void wrusp(unsigned long usp)
* User space process size: 3.75GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
+#ifdef CONFIG_MMU
#ifndef CONFIG_SUN3
#define TASK_SIZE (0xF0000000UL)
#else
#define TASK_SIZE (0x0E000000UL)
#endif
+#else
+#define TASK_SIZE (0xFFFFFFFFUL)
+#endif
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
index e27ad902b1cf..175da06c6b95 100644
--- a/arch/m68k/include/asm/scatterlist.h
+++ b/arch/m68k/include/asm/scatterlist.h
@@ -1,23 +1,9 @@
#ifndef _M68K_SCATTERLIST_H
#define _M68K_SCATTERLIST_H
-#include <linux/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
-
- dma_addr_t dma_address; /* A place to hang host-specific addresses at. */
-};
+#include <asm-generic/scatterlist.h>
/* This is bogus and should go away. */
#define ISA_DMA_THRESHOLD (0x00ffffff)
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h
index 67266c683453..3bf31dc51b12 100644
--- a/arch/m68k/include/asm/thread_info_mm.h
+++ b/arch/m68k/include/asm/thread_info_mm.h
@@ -65,7 +65,7 @@ struct thread_info {
#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
-#define TIF_MEMDIE 16
+#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
#define TIF_FREEZE 17 /* thread is freezing for suspend */
#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/thread_info_no.h b/arch/m68k/include/asm/thread_info_no.h
index 884776f686ca..51f354b672e6 100644
--- a/arch/m68k/include/asm/thread_info_no.h
+++ b/arch/m68k/include/asm/thread_info_no.h
@@ -85,7 +85,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
-#define TIF_MEMDIE 4
+#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
#define TIF_FREEZE 16 /* is freezing for suspend */
/* as above, but as bit values */
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 17dc2a31a7ca..4926b3856c15 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -73,21 +73,24 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
-void __init time_init(void)
+void read_persistent_clock(struct timespec *ts)
{
struct rtc_time time;
+ ts->tv_sec = 0;
+ ts->tv_nsec = 0;
if (mach_hwclk) {
mach_hwclk(0, &time);
if ((time.tm_year += 1900) < 1970)
time.tm_year += 100;
- xtime.tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
+ ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec);
- xtime.tv_nsec = 0;
}
- wall_to_monotonic.tv_sec = -xtime.tv_sec;
+}
+void __init time_init(void)
+{
mach_sched_init(timer_interrupt);
}
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index aacd6d17b833..ada4f4cca811 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -455,7 +455,7 @@ static inline void access_error040(struct frame *fp)
if (do_page_fault(&fp->ptregs, addr, errorcode)) {
#ifdef DEBUG
- printk("do_page_fault() !=0 \n");
+ printk("do_page_fault() !=0\n");
#endif
if (user_mode(&fp->ptregs)){
/* delay writebacks after signal delivery */
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 0356da9bf763..1c16b1baf8db 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -148,7 +148,7 @@ static void mac_cache_card_flush(int writeback)
void __init config_mac(void)
{
if (!MACH_IS_MAC)
- printk(KERN_ERR "ERROR: no Mac, but config_mac() called!! \n");
+ printk(KERN_ERR "ERROR: no Mac, but config_mac() called!!\n");
mach_sched_init = mac_sched_init;
mach_init_IRQ = mac_init_IRQ;
@@ -867,7 +867,7 @@ static void __init mac_identify(void)
*/
iop_preinit();
- printk(KERN_INFO "Detected Macintosh model: %d \n", model);
+ printk(KERN_INFO "Detected Macintosh model: %d\n", model);
/*
* Report booter data:
@@ -878,12 +878,12 @@ static void __init mac_identify(void)
mac_bi_data.videoaddr, mac_bi_data.videorow,
mac_bi_data.videodepth, mac_bi_data.dimensions & 0xFFFF,
mac_bi_data.dimensions >> 16);
- printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx \n",
+ printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx\n",
mac_bi_data.videological, mac_orig_videoaddr,
mac_bi_data.sccbase);
- printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx \n",
+ printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx\n",
mac_bi_data.boottime, mac_bi_data.gmtbias);
- printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx \n",
+ printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx\n",
mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize);
iop_init();
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index d0e35cf99fc6..a96394a0333d 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -154,7 +154,6 @@ good_area:
* the fault.
*/
- survive:
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
#ifdef DEBUG
printk("handle_mm_fault returns %d\n",fault);
@@ -180,15 +179,10 @@ good_area:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
-
- printk("VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return 0;
no_context:
current->thread.signo = SIGBUS;
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 8da9c250d3e1..11ac6f63967a 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
#include <linux/ioport.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
@@ -36,8 +35,7 @@ static const unsigned char days_in_mo[] =
static atomic_t rtc_ready = ATOMIC_INIT(1);
-static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE;
unsigned long flags;
@@ -120,22 +118,15 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
}
/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
+ * We enforce only one user at a time here with the open/close.
*/
-
static int rtc_open(struct inode *inode, struct file *file)
{
- lock_kernel();
if( !atomic_dec_and_test(&rtc_ready) )
{
atomic_inc( &rtc_ready );
- unlock_kernel();
return -EBUSY;
}
- unlock_kernel();
-
return 0;
}
@@ -150,9 +141,9 @@ static int rtc_release(struct inode *inode, struct file *file)
*/
static const struct file_operations rtc_fops = {
- .ioctl = rtc_ioctl,
- .open = rtc_open,
- .release = rtc_release,
+ .unlocked_ioctl = rtc_ioctl,
+ .open = rtc_open,
+ .release = rtc_release,
};
static struct miscdevice rtc_dev=
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 31ab3f08bbda..ad10fecec2fe 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -126,7 +126,7 @@ static void q40_reset(void)
{
halted = 1;
printk("\n\n*******************************************\n"
- "Called q40_reset : press the RESET button!! \n"
+ "Called q40_reset : press the RESET button!!\n"
"*******************************************\n");
Q40_LED_ON();
while (1)
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 064f5913db1a..efeb6033fc17 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -566,7 +566,7 @@ config RAMBASE
processor address space.
config RAMSIZE
- hex "Size of RAM (in bytes)"
+ hex "Size of RAM (in bytes), or 0 for automatic"
default "0x400000"
help
Define the size of the system RAM. If you select 0 then the
diff --git a/arch/m68knommu/mm/fault.c b/arch/m68knommu/mm/fault.c
index 6f6673cb5829..bc05cf74d9c0 100644
--- a/arch/m68knommu/mm/fault.c
+++ b/arch/m68knommu/mm/fault.c
@@ -2,7 +2,7 @@
* linux/arch/m68knommu/mm/fault.c
*
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
- * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
+ * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
*
* Based on:
*
@@ -36,7 +36,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
#ifdef DEBUG
- printk (KERN_DEBUG "regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
+ printk(KERN_DEBUG "regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
regs->sr, regs->pc, address, error_code);
#endif
@@ -44,11 +44,11 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if ((unsigned long) address < PAGE_SIZE) {
+ if ((unsigned long) address < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- } else
+ else
printk(KERN_ALERT "Unable to handle kernel access");
- printk(KERN_ALERT " at virtual address %08lx\n",address);
+ printk(KERN_ALERT " at virtual address %08lx\n", address);
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c
index 92614de42cd3..71d2ba474c63 100644
--- a/arch/m68knommu/platform/520x/config.c
+++ b/arch/m68knommu/platform/520x/config.c
@@ -15,10 +15,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -74,9 +77,152 @@ static struct platform_device m520x_fec = {
.resource = m520x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m520x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 62
+#define MCFQSPI_CS1 63
+#define MCFQSPI_CS2 44
+
+static int m520x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ return 0;
+
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m520x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m520x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ }
+}
+
+static void m520x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m520x_cs_control = {
+ .setup = m520x_cs_setup,
+ .teardown = m520x_cs_teardown,
+ .select = m520x_cs_select,
+ .deselect = m520x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m520x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 3,
+ .cs_control = &m520x_cs_control,
+};
+
+static struct platform_device m520x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m520x_qspi_resources),
+ .resource = m520x_qspi_resources,
+ .dev.platform_data = &m520x_qspi_data,
+};
+
+static void __init m520x_qspi_init(void)
+{
+ u16 par;
+ /* setup Port QS for QSPI with gpio CS control */
+ writeb(0x3f, MCF_IPSBAR + MCF_GPIO_PAR_QSPI);
+ /* make U1CTS and U2RTS gpio for cs_control */
+ par = readw(MCF_IPSBAR + MCF_GPIO_PAR_UART);
+ par &= 0x00ff;
+ writew(par, MCF_IPSBAR + MCF_GPIO_PAR_UART);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
+
static struct platform_device *m520x_devices[] __initdata = {
&m520x_uart,
&m520x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m520x_qspi,
+#endif
};
/***************************************************************************/
@@ -147,6 +293,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m520x_cpu_reset;
m520x_uarts_init();
m520x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m520x_qspi_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c
index 6ba84f2aa397..8980f6d7715a 100644
--- a/arch/m68knommu/platform/523x/config.c
+++ b/arch/m68knommu/platform/523x/config.c
@@ -16,10 +16,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -75,9 +78,173 @@ static struct platform_device m523x_fec = {
.resource = m523x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m523x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 91
+#define MCFQSPI_CS1 92
+#define MCFQSPI_CS2 103
+#define MCFQSPI_CS3 99
+
+static int m523x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m523x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m523x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, cs_high);
+ break;
+ }
+}
+
+static void m523x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m523x_cs_control = {
+ .setup = m523x_cs_setup,
+ .teardown = m523x_cs_teardown,
+ .select = m523x_cs_select,
+ .deselect = m523x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m523x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m523x_cs_control,
+};
+
+static struct platform_device m523x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m523x_qspi_resources),
+ .resource = m523x_qspi_resources,
+ .dev.platform_data = &m523x_qspi_data,
+};
+
+static void __init m523x_qspi_init(void)
+{
+ u16 par;
+
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writeb(0x1f, MCFGPIO_PAR_QSPI);
+ /* and CS2 & CS3 as gpio */
+ par = readw(MCFGPIO_PAR_TIMER);
+ par &= 0x3f3f;
+ writew(par, MCFGPIO_PAR_TIMER);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
static struct platform_device *m523x_devices[] __initdata = {
&m523x_uart,
&m523x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m523x_qspi,
+#endif
};
/***************************************************************************/
@@ -114,6 +281,9 @@ void __init config_BSP(char *commandp, int size)
static int __init init_BSP(void)
{
m523x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m523x_qspi_init();
+#endif
platform_add_devices(m523x_devices, ARRAY_SIZE(m523x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5249/config.c b/arch/m68knommu/platform/5249/config.c
index 646f5ba462fc..ceb31e5744a6 100644
--- a/arch/m68knommu/platform/5249/config.c
+++ b/arch/m68knommu/platform/5249/config.c
@@ -12,10 +12,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -37,8 +40,196 @@ static struct platform_device m5249_uart = {
.dev.platform_data = m5249_uart_platform,
};
+#ifdef CONFIG_M5249C3
+
+static struct resource m5249_smc91x_resources[] = {
+ {
+ .start = 0xe0000300,
+ .end = 0xe0000300 + 0x100,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINTC2_GPIOIRQ6,
+ .end = MCFINTC2_GPIOIRQ6,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device m5249_smc91x = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5249_smc91x_resources),
+ .resource = m5249_smc91x_resources,
+};
+
+#endif /* CONFIG_M5249C3 */
+
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m5249_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_QSPI,
+ .end = MCF_IRQ_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 29
+#define MCFQSPI_CS1 24
+#define MCFQSPI_CS2 21
+#define MCFQSPI_CS3 22
+
+static int m5249_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m5249_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m5249_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, cs_high);
+ break;
+ }
+}
+
+static void m5249_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m5249_cs_control = {
+ .setup = m5249_cs_setup,
+ .teardown = m5249_cs_teardown,
+ .select = m5249_cs_select,
+ .deselect = m5249_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m5249_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m5249_cs_control,
+};
+
+static struct platform_device m5249_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5249_qspi_resources),
+ .resource = m5249_qspi_resources,
+ .dev.platform_data = &m5249_qspi_data,
+};
+
+static void __init m5249_qspi_init(void)
+{
+ /* QSPI irq setup */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL4 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_QSPIICR);
+ mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
+
static struct platform_device *m5249_devices[] __initdata = {
&m5249_uart,
+#ifdef CONFIG_M5249C3
+ &m5249_smc91x,
+#endif
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m5249_qspi,
+#endif
};
/***************************************************************************/
@@ -67,6 +258,24 @@ static void __init m5249_uarts_init(void)
/***************************************************************************/
+#ifdef CONFIG_M5249C3
+
+static void __init m5249_smc91x_init(void)
+{
+ u32 gpio;
+
+ /* Set the GPIO line as interrupt source for smc91x device */
+ gpio = readl(MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
+ writel(gpio | 0x40, MCF_MBAR2 + MCFSIM2_GPIOINTENABLE);
+
+ gpio = readl(MCF_MBAR2 + MCFSIM2_INTLEVEL5);
+ writel(gpio | 0x04000000, MCF_MBAR2 + MCFSIM2_INTLEVEL5);
+}
+
+#endif /* CONFIG_M5249C3 */
+
+/***************************************************************************/
+
static void __init m5249_timers_init(void)
{
/* Timer1 is always used as system timer */
@@ -100,6 +309,12 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m5249_cpu_reset;
m5249_timers_init();
m5249_uarts_init();
+#ifdef CONFIG_M5249C3
+ m5249_smc91x_init();
+#endif
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m5249_qspi_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c
index fa51be172830..3d9c35c98b98 100644
--- a/arch/m68knommu/platform/527x/config.c
+++ b/arch/m68knommu/platform/527x/config.c
@@ -16,10 +16,13 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -106,12 +109,188 @@ static struct platform_device m527x_fec[] = {
},
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m527x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#if defined(CONFIG_M5271)
+#define MCFQSPI_CS0 91
+#define MCFQSPI_CS1 92
+#define MCFQSPI_CS2 99
+#define MCFQSPI_CS3 103
+#elif defined(CONFIG_M5275)
+#define MCFQSPI_CS0 59
+#define MCFQSPI_CS1 60
+#define MCFQSPI_CS2 61
+#define MCFQSPI_CS3 62
+#endif
+
+static int m527x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m527x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m527x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, cs_high);
+ break;
+ }
+}
+
+static void m527x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ switch (chip_select) {
+ case 0:
+ gpio_set_value(MCFQSPI_CS0, !cs_high);
+ break;
+ case 1:
+ gpio_set_value(MCFQSPI_CS1, !cs_high);
+ break;
+ case 2:
+ gpio_set_value(MCFQSPI_CS2, !cs_high);
+ break;
+ case 3:
+ gpio_set_value(MCFQSPI_CS3, !cs_high);
+ break;
+ }
+}
+
+static struct mcfqspi_cs_control m527x_cs_control = {
+ .setup = m527x_cs_setup,
+ .teardown = m527x_cs_teardown,
+ .select = m527x_cs_select,
+ .deselect = m527x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m527x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m527x_cs_control,
+};
+
+static struct platform_device m527x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m527x_qspi_resources),
+ .resource = m527x_qspi_resources,
+ .dev.platform_data = &m527x_qspi_data,
+};
+
+static void __init m527x_qspi_init(void)
+{
+#if defined(CONFIG_M5271)
+ u16 par;
+
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writeb(0x1f, MCFGPIO_PAR_QSPI);
+ /* and CS2 & CS3 as gpio */
+ par = readw(MCFGPIO_PAR_TIMER);
+ par &= 0x3f3f;
+ writew(par, MCFGPIO_PAR_TIMER);
+#elif defined(CONFIG_M5275)
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writew(0x003e, MCFGPIO_PAR_QSPI);
+#endif
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
static struct platform_device *m527x_devices[] __initdata = {
&m527x_uart,
&m527x_fec[0],
#ifdef CONFIG_FEC2
&m527x_fec[1],
#endif
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m527x_qspi,
+#endif
};
/***************************************************************************/
@@ -187,6 +366,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m527x_cpu_reset;
m527x_uarts_init();
m527x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m527x_qspi_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c
index 6e608d1836f1..76b743343bfa 100644
--- a/arch/m68knommu/platform/528x/config.c
+++ b/arch/m68knommu/platform/528x/config.c
@@ -17,10 +17,13 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -76,10 +79,141 @@ static struct platform_device m528x_fec = {
.resource = m528x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m528x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 147
+#define MCFQSPI_CS1 148
+#define MCFQSPI_CS2 149
+#define MCFQSPI_CS3 150
+
+static int m528x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ status = gpio_request(MCFQSPI_CS3, "MCFQSPI_CS3");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS3 failed\n");
+ goto fail3;
+ }
+ status = gpio_direction_output(MCFQSPI_CS3, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS3 failed\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ gpio_free(MCFQSPI_CS3);
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m528x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS3);
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m528x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, cs_high);
+}
+
+static void m528x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, !cs_high);
+}
+
+static struct mcfqspi_cs_control m528x_cs_control = {
+ .setup = m528x_cs_setup,
+ .teardown = m528x_cs_teardown,
+ .select = m528x_cs_select,
+ .deselect = m528x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m528x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 4,
+ .cs_control = &m528x_cs_control,
+};
+
+static struct platform_device m528x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m528x_qspi_resources),
+ .resource = m528x_qspi_resources,
+ .dev.platform_data = &m528x_qspi_data,
+};
+
+static void __init m528x_qspi_init(void)
+{
+ /* setup Port QS for QSPI with gpio CS control */
+ __raw_writeb(0x07, MCFGPIO_PQSPAR);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
static struct platform_device *m528x_devices[] __initdata = {
&m528x_uart,
&m528x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m528x_qspi,
+#endif
};
/***************************************************************************/
@@ -174,6 +308,9 @@ static int __init init_BSP(void)
mach_reset = m528x_cpu_reset;
m528x_uarts_init();
m528x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m528x_qspi_init();
+#endif
platform_add_devices(m528x_devices, ARRAY_SIZE(m528x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index 667db6598451..6de526976828 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -14,5 +14,7 @@
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-obj-y += config.o gpio.o
+obj-y += config.o gpio.o
+obj-$(CONFIG_NETtel) += nettel.o
+obj-$(CONFIG_CLEOPATRA) += nettel.o
diff --git a/arch/m68knommu/platform/5307/nettel.c b/arch/m68knommu/platform/5307/nettel.c
new file mode 100644
index 000000000000..e925ea4602f8
--- /dev/null
+++ b/arch/m68knommu/platform/5307/nettel.c
@@ -0,0 +1,153 @@
+/***************************************************************************/
+
+/*
+ * nettel.c -- startup code support for the NETtel boards
+ *
+ * Copyright (C) 2009, Greg Ungerer (gerg@snapgear.com)
+ */
+
+/***************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/nettel.h>
+
+/***************************************************************************/
+
+/*
+ * Define the IO and interrupt resources of the 2 SMC9196 interfaces.
+ */
+#define NETTEL_SMC0_ADDR 0x30600300
+#define NETTEL_SMC0_IRQ 29
+
+#define NETTEL_SMC1_ADDR 0x30600000
+#define NETTEL_SMC1_IRQ 27
+
+/*
+ * We need some access into the SMC9196 registers. Define those registers
+ * we will need here (including the smc91x.h doesn't seem to give us these
+ * in a simple form).
+ */
+#define SMC91xx_BANKSELECT 14
+#define SMC91xx_BASEADDR 2
+#define SMC91xx_BASEMAC 4
+
+/***************************************************************************/
+
+static struct resource nettel_smc91x_0_resources[] = {
+ {
+ .start = NETTEL_SMC0_ADDR,
+ .end = NETTEL_SMC0_ADDR + 0x20,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = NETTEL_SMC0_IRQ,
+ .end = NETTEL_SMC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource nettel_smc91x_1_resources[] = {
+ {
+ .start = NETTEL_SMC1_ADDR,
+ .end = NETTEL_SMC1_ADDR + 0x20,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = NETTEL_SMC1_IRQ,
+ .end = NETTEL_SMC1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device nettel_smc91x[] = {
+ {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(nettel_smc91x_0_resources),
+ .resource = nettel_smc91x_0_resources,
+ },
+ {
+ .name = "smc91x",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(nettel_smc91x_1_resources),
+ .resource = nettel_smc91x_1_resources,
+ },
+};
+
+static struct platform_device *nettel_devices[] __initdata = {
+ &nettel_smc91x[0],
+ &nettel_smc91x[1],
+};
+
+/***************************************************************************/
+
+static u8 nettel_macdefault[] __initdata = {
+ 0x00, 0xd0, 0xcf, 0x00, 0x00, 0x01,
+};
+
+/*
+ * Set flash contained MAC address into SMC9196 core. Make sure the flash
+ * MAC address is sane, and not an empty flash. If no good use the Moreton
+ * Bay default MAC address instead.
+ */
+
+static void __init nettel_smc91x_setmac(unsigned int ioaddr, unsigned int flashaddr)
+{
+ u16 *macp;
+
+ macp = (u16 *) flashaddr;
+ if ((macp[0] == 0xffff) && (macp[1] == 0xffff) && (macp[2] == 0xffff))
+ macp = (u16 *) &nettel_macdefault[0];
+
+ writew(1, NETTEL_SMC0_ADDR + SMC91xx_BANKSELECT);
+ writew(macp[0], ioaddr + SMC91xx_BASEMAC);
+ writew(macp[1], ioaddr + SMC91xx_BASEMAC + 2);
+ writew(macp[2], ioaddr + SMC91xx_BASEMAC + 4);
+}
+
+/***************************************************************************/
+
+/*
+ * Re-map the address space of at least one of the SMC ethernet
+ * parts. Both parts power up decoding the same address, so we
+ * need to move one of them first, before doing anything else.
+ */
+
+static void __init nettel_smc91x_init(void)
+{
+ writew(0x00ec, MCF_MBAR + MCFSIM_PADDR);
+ mcf_setppdata(0, 0x0080);
+ writew(1, NETTEL_SMC0_ADDR + SMC91xx_BANKSELECT);
+ writew(0x0067, NETTEL_SMC0_ADDR + SMC91xx_BASEADDR);
+ mcf_setppdata(0x0080, 0);
+
+ /* Set correct chip select timing for SMC9196 accesses */
+ writew(0x1180, MCF_MBAR + MCFSIM_CSCR3);
+
+ /* Set the SMC interrupts to be auto-vectored */
+ mcf_autovector(NETTEL_SMC0_IRQ);
+ mcf_autovector(NETTEL_SMC1_IRQ);
+
+ /* Set MAC addresses from flash for both interfaces */
+ nettel_smc91x_setmac(NETTEL_SMC0_ADDR, 0xf0006000);
+ nettel_smc91x_setmac(NETTEL_SMC1_ADDR, 0xf0006006);
+}
+
+/***************************************************************************/
+
+static int __init init_nettel(void)
+{
+ nettel_smc91x_init();
+ platform_add_devices(nettel_devices, ARRAY_SIZE(nettel_devices));
+ return 0;
+}
+
+arch_initcall(init_nettel);
+
+/***************************************************************************/
diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c
index d632948e64e5..ca51323f957b 100644
--- a/arch/m68knommu/platform/532x/config.c
+++ b/arch/m68knommu/platform/532x/config.c
@@ -21,12 +21,15 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
#include <asm/mcfwdebug.h>
+#include <asm/mcfqspi.h>
/***************************************************************************/
@@ -82,9 +85,127 @@ static struct platform_device m532x_fec = {
.resource = m532x_fec_resources,
};
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+static struct resource m532x_qspi_resources[] = {
+ {
+ .start = MCFQSPI_IOBASE,
+ .end = MCFQSPI_IOBASE + MCFQSPI_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_QSPI,
+ .end = MCFINT_VECBASE + MCFINT_QSPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+#define MCFQSPI_CS0 84
+#define MCFQSPI_CS1 85
+#define MCFQSPI_CS2 86
+
+static int m532x_cs_setup(struct mcfqspi_cs_control *cs_control)
+{
+ int status;
+
+ status = gpio_request(MCFQSPI_CS0, "MCFQSPI_CS0");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS0 failed\n");
+ goto fail0;
+ }
+ status = gpio_direction_output(MCFQSPI_CS0, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS0 failed\n");
+ goto fail1;
+ }
+
+ status = gpio_request(MCFQSPI_CS1, "MCFQSPI_CS1");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS1 failed\n");
+ goto fail1;
+ }
+ status = gpio_direction_output(MCFQSPI_CS1, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS1 failed\n");
+ goto fail2;
+ }
+
+ status = gpio_request(MCFQSPI_CS2, "MCFQSPI_CS2");
+ if (status) {
+ pr_debug("gpio_request for MCFQSPI_CS2 failed\n");
+ goto fail2;
+ }
+ status = gpio_direction_output(MCFQSPI_CS2, 1);
+ if (status) {
+ pr_debug("gpio_direction_output for MCFQSPI_CS2 failed\n");
+ goto fail3;
+ }
+
+ return 0;
+
+fail3:
+ gpio_free(MCFQSPI_CS2);
+fail2:
+ gpio_free(MCFQSPI_CS1);
+fail1:
+ gpio_free(MCFQSPI_CS0);
+fail0:
+ return status;
+}
+
+static void m532x_cs_teardown(struct mcfqspi_cs_control *cs_control)
+{
+ gpio_free(MCFQSPI_CS2);
+ gpio_free(MCFQSPI_CS1);
+ gpio_free(MCFQSPI_CS0);
+}
+
+static void m532x_cs_select(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, cs_high);
+}
+
+static void m532x_cs_deselect(struct mcfqspi_cs_control *cs_control,
+ u8 chip_select, bool cs_high)
+{
+ gpio_set_value(MCFQSPI_CS0 + chip_select, !cs_high);
+}
+
+static struct mcfqspi_cs_control m532x_cs_control = {
+ .setup = m532x_cs_setup,
+ .teardown = m532x_cs_teardown,
+ .select = m532x_cs_select,
+ .deselect = m532x_cs_deselect,
+};
+
+static struct mcfqspi_platform_data m532x_qspi_data = {
+ .bus_num = 0,
+ .num_chipselect = 3,
+ .cs_control = &m532x_cs_control,
+};
+
+static struct platform_device m532x_qspi = {
+ .name = "mcfqspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m532x_qspi_resources),
+ .resource = m532x_qspi_resources,
+ .dev.platform_data = &m532x_qspi_data,
+};
+
+static void __init m532x_qspi_init(void)
+{
+ /* setup QSPS pins for QSPI with gpio CS control */
+ writew(0x01f0, MCF_GPIO_PAR_QSPI);
+}
+#endif /* defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE) */
+
+
static struct platform_device *m532x_devices[] __initdata = {
&m532x_uart,
&m532x_fec,
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ &m532x_qspi,
+#endif
};
/***************************************************************************/
@@ -158,6 +279,9 @@ static int __init init_BSP(void)
{
m532x_uarts_init();
m532x_fec_init();
+#if defined(CONFIG_SPI_COLDFIRE_QSPI) || defined(CONFIG_SPI_COLDFIRE_QSPI_MODULE)
+ m532x_qspi_init();
+#endif
platform_add_devices(m532x_devices, ARRAY_SIZE(m532x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/68360/commproc.c b/arch/m68knommu/platform/68360/commproc.c
index 6acb8d294cb6..f27e688c404e 100644
--- a/arch/m68knommu/platform/68360/commproc.c
+++ b/arch/m68knommu/platform/68360/commproc.c
@@ -110,7 +110,7 @@ void m360_cpm_reset()
/* pte = find_pte(&init_mm, host_page_addr); */
/* pte_val(*pte) |= _PAGE_NO_CACHE; */
/* flush_tlb_page(current->mm->mmap, host_buffer); */
-
+
/* Tell everyone where the comm processor resides.
*/
/* cpmp = (cpm360_t *)commproc; */
@@ -191,7 +191,7 @@ cpm_interrupt(int irq, void * dev, struct pt_regs * regs)
*/
((immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr |= (1 << vec);
#endif
-
+
}
/* The CPM can generate the error interrupt when there is a race condition
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index 402b46e630f6..123b2fe72d01 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -12,29 +12,15 @@
struct device_node;
struct dev_archdata {
- /* Optional pointer to an OF device node */
- struct device_node *of_node;
-
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
void *dma_data;
};
struct pdev_archdata {
+ u64 dma_mask;
};
-static inline void dev_archdata_set_node(struct dev_archdata *ad,
- struct device_node *np)
-{
- ad->of_node = np;
-}
-
-static inline struct device_node *
-dev_archdata_get_node(const struct dev_archdata *ad)
-{
- return ad->of_node;
-}
-
#endif /* _ASM_MICROBLAZE_DEVICE_H */
diff --git a/arch/microblaze/include/asm/of_device.h b/arch/microblaze/include/asm/of_device.h
index ba917cfaefe6..73cb98040982 100644
--- a/arch/microblaze/include/asm/of_device.h
+++ b/arch/microblaze/include/asm/of_device.h
@@ -21,9 +21,8 @@
* probed using OF properties.
*/
struct of_device {
- struct device_node *node; /* to be obsoleted */
- u64 dma_mask; /* DMA mask */
struct device dev; /* Generic device interface */
+ struct pdev_archdata archdata;
};
extern ssize_t of_device_get_modalias(struct of_device *ofdev,
diff --git a/arch/microblaze/include/asm/scatterlist.h b/arch/microblaze/include/asm/scatterlist.h
index 35d786fe93ae..dc4a8900cc80 100644
--- a/arch/microblaze/include/asm/scatterlist.h
+++ b/arch/microblaze/include/asm/scatterlist.h
@@ -1 +1,3 @@
#include <asm-generic/scatterlist.h>
+
+#define ISA_DMA_THRESHOLD (~0UL)
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 59efb3fef957..48c4f0335e3f 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -12,6 +12,7 @@
#include <asm/registers.h>
#include <asm/setup.h>
#include <asm/irqflags.h>
+#include <asm/cache.h>
#include <asm-generic/cmpxchg.h>
#include <asm-generic/cmpxchg-local.h>
@@ -96,4 +97,14 @@ extern struct dentry *of_debugfs_root;
#define arch_align_stack(x) (x)
+/*
+ * MicroBlaze doesn't handle unaligned accesses in hardware.
+ *
+ * Based on this we force the IP header alignment in network drivers.
+ * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
+ * cacheline alignment of buffers.
+ */
+#define NET_IP_ALIGN 2
+#define NET_SKB_PAD L1_CACHE_BYTES
+
#endif /* _ASM_MICROBLAZE_SYSTEM_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index b2ca80f64640..8a8e9fc6e0c0 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -122,7 +122,7 @@ static inline struct thread_info *current_thread_info(void)
/* restore singlestep on return to user mode */
#define TIF_SINGLESTEP 4
#define TIF_IRET 5 /* return with iret */
-#define TIF_MEMDIE 6
+#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_FREEZE 14 /* Freezing for suspend */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 446bec29b142..26460d15b338 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -182,6 +182,39 @@ extern long __user_bad(void);
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ unsigned long __gu_val = 0; \
+ const typeof(*(ptr)) __user *__gu_addr = (ptr); \
+ int __gu_err = 0; \
+ \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) { \
+ switch (size) { \
+ case 1: \
+ __get_user_asm("lbu", __gu_addr, __gu_val, \
+ __gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm("lhu", __gu_addr, __gu_val, \
+ __gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm("lw", __gu_addr, __gu_val, \
+ __gu_err); \
+ break; \
+ default: \
+ __gu_err = __user_bad(); \
+ break; \
+ } \
+ } else { \
+ __gu_err = -EFAULT; \
+ } \
+ x = (typeof(*(ptr)))__gu_val; \
+ __gu_err; \
+})
#define __get_user(x, ptr) \
({ \
@@ -206,12 +239,6 @@ extern long __user_bad(void);
})
-#define get_user(x, ptr) \
-({ \
- access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
- ? __get_user((x), (ptr)) : -EFAULT; \
-})
-
#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
@@ -266,6 +293,42 @@ extern long __user_bad(void);
*
* Returns zero on success, or -EFAULT on error.
*/
+#define put_user(x, ptr) \
+ __put_user_check((x), (ptr), sizeof(*(ptr)))
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ typeof(*(ptr)) __pu_val; \
+ typeof(*(ptr)) __user *__pu_addr = (ptr); \
+ int __pu_err = 0; \
+ \
+ __pu_val = (x); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
+ switch (size) { \
+ case 1: \
+ __put_user_asm("sb", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm("sh", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm("sw", __pu_addr, __pu_val, \
+ __pu_err); \
+ break; \
+ case 8: \
+ __put_user_asm_8(__pu_addr, __pu_val, __pu_err);\
+ break; \
+ default: \
+ __pu_err = __user_bad(); \
+ break; \
+ } \
+ } else { \
+ __pu_err = -EFAULT; \
+ } \
+ __pu_err; \
+})
#define __put_user(x, ptr) \
({ \
@@ -290,18 +353,6 @@ extern long __user_bad(void);
__gu_err; \
})
-#ifndef CONFIG_MMU
-
-#define put_user(x, ptr) __put_user((x), (ptr))
-
-#else /* CONFIG_MMU */
-
-#define put_user(x, ptr) \
-({ \
- access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
- ? __put_user((x), (ptr)) : -EFAULT; \
-})
-#endif /* CONFIG_MMU */
/* copy_to_from_user */
#define __copy_from_user(to, from, n) \
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 21c3a92394de..109876e8d643 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -137,8 +137,9 @@ do { \
do { \
int step = -line_length; \
int align = ~(line_length - 1); \
+ int count; \
end = ((end & align) == end) ? end - line_length : end & align; \
- int count = end - start; \
+ count = end - start; \
WARN_ON(count < 0); \
\
__asm__ __volatile__ (" 1: " #op " %0, %1; \
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 391d6197fc3b..8cc18cd2cce6 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -476,6 +476,8 @@ ENTRY(ret_from_fork)
nop
work_pending:
+ enable_irq
+
andi r11, r19, _TIF_NEED_RESCHED
beqi r11, 1f
bralid r15, schedule
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bc4dcb7d3861..ff85f7718035 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -52,3 +52,14 @@ EXPORT_SYMBOL_GPL(_ebss);
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
+
+/*
+ * Assembly functions that may be used (directly or indirectly) by modules
+ */
+EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__strncpy_user);
+
+#ifdef CONFIG_OPT_LIB_ASM
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+#endif
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index cbecf110dc30..0e73f6606547 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
void *module_alloc(unsigned long size)
{
@@ -151,6 +152,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
+ flush_dcache();
return 0;
}
diff --git a/arch/microblaze/kernel/of_device.c b/arch/microblaze/kernel/of_device.c
index 9a0f7632c47c..b372787886ed 100644
--- a/arch/microblaze/kernel/of_device.c
+++ b/arch/microblaze/kernel/of_device.c
@@ -12,7 +12,7 @@
void of_device_make_bus_id(struct of_device *dev)
{
static atomic_t bus_no_reg_magic;
- struct device_node *node = dev->node;
+ struct device_node *node = dev->dev.of_node;
const u32 *reg;
u64 addr;
int magic;
@@ -49,11 +49,10 @@ struct of_device *of_device_alloc(struct device_node *np,
if (!dev)
return NULL;
- dev->node = of_node_get(np);
- dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.of_node = of_node_get(np);
+ dev->dev.dma_mask = &dev->archdata.dma_mask;
dev->dev.parent = parent;
dev->dev.release = of_release_dev;
- dev->dev.archdata.of_node = np;
if (bus_id)
dev_set_name(&dev->dev, bus_id);
@@ -75,17 +74,17 @@ int of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
ofdev = to_of_device(dev);
- if (add_uevent_var(env, "OF_NAME=%s", ofdev->node->name))
+ if (add_uevent_var(env, "OF_NAME=%s", ofdev->dev.of_node->name))
return -ENOMEM;
- if (add_uevent_var(env, "OF_TYPE=%s", ofdev->node->type))
+ if (add_uevent_var(env, "OF_TYPE=%s", ofdev->dev.of_node->type))
return -ENOMEM;
/* Since the compatible field can contain pretty much anything
* it's not really legal to split it out with commas. We split it
* up using a number of environment variables instead. */
- compat = of_get_property(ofdev->node, "compatible", &cplen);
+ compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen);
while (compat && *compat && cplen > 0) {
if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat))
return -ENOMEM;
diff --git a/arch/microblaze/kernel/of_platform.c b/arch/microblaze/kernel/of_platform.c
index 0dc755286d38..ccf6f4257f4b 100644
--- a/arch/microblaze/kernel/of_platform.c
+++ b/arch/microblaze/kernel/of_platform.c
@@ -47,7 +47,7 @@ struct of_device *of_platform_device_create(struct device_node *np,
if (!dev)
return NULL;
- dev->dma_mask = 0xffffffffUL;
+ dev->archdata.dma_mask = 0xffffffffUL;
dev->dev.bus = &of_platform_bus_type;
/* We do not fill the DMA ops for platform devices by default.
@@ -166,7 +166,7 @@ EXPORT_SYMBOL(of_platform_bus_probe);
static int of_dev_node_match(struct device *dev, void *data)
{
- return to_of_device(dev)->node == data;
+ return to_of_device(dev)->dev.of_node == data;
}
struct of_device *of_find_device_by_node(struct device_node *np)
@@ -184,7 +184,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
static int of_dev_phandle_match(struct device *dev, void *data)
{
phandle *ph = data;
- return to_of_device(dev)->node->phandle == *ph;
+ return to_of_device(dev)->dev.of_node->phandle == *ph;
}
struct of_device *of_find_device_by_phandle(phandle ph)
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index f42c2dde8b1c..cca3579d4268 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -47,6 +47,7 @@ unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size;
+EXPORT_SYMBOL(memory_size);
/*
* paging_init() sets up the page tables - in fact we've already done this.
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 784557fb28cf..59bf2335a4ce 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -42,6 +42,7 @@
unsigned long ioremap_base;
unsigned long ioremap_bot;
+EXPORT_SYMBOL(ioremap_bot);
/* The maximum lowmem defaults to 768Mb, but this can be configured to
* another value.
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 01c8c97c15b7..9cb782b8e036 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1507,7 +1507,7 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
pci_bus_add_devices(bus);
/* Fixup EEH */
- eeh_add_device_tree_late(bus);
+ /* eeh_add_device_tree_late(bus); */
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7e6fd1cbd3f8..cdaae942623d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1075,6 +1075,8 @@ config CPU_LOONGSON2F
bool "Loongson 2F"
depends on SYS_HAS_CPU_LOONGSON2F
select CPU_LOONGSON2
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
help
The Loongson 2F processor implements the MIPS III instruction set
with many extensions.
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 99ae84ce5af3..ca0506a8585a 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/sysdev.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
@@ -174,10 +175,6 @@ static dbdev_tab_t dbdev_tab[] = {
#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
-#ifdef CONFIG_PM
-static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][6];
-#endif
-
static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
@@ -960,29 +957,37 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
return nbytes;
}
-#ifdef CONFIG_PM
-void au1xxx_dbdma_suspend(void)
+
+struct alchemy_dbdma_sysdev {
+ struct sys_device sysdev;
+ u32 pm_regs[NUM_DBDMA_CHANS + 1][6];
+};
+
+static int alchemy_dbdma_suspend(struct sys_device *dev,
+ pm_message_t state)
{
+ struct alchemy_dbdma_sysdev *sdev =
+ container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
int i;
u32 addr;
addr = DDMA_GLOBAL_BASE;
- au1xxx_dbdma_pm_regs[0][0] = au_readl(addr + 0x00);
- au1xxx_dbdma_pm_regs[0][1] = au_readl(addr + 0x04);
- au1xxx_dbdma_pm_regs[0][2] = au_readl(addr + 0x08);
- au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
+ sdev->pm_regs[0][0] = au_readl(addr + 0x00);
+ sdev->pm_regs[0][1] = au_readl(addr + 0x04);
+ sdev->pm_regs[0][2] = au_readl(addr + 0x08);
+ sdev->pm_regs[0][3] = au_readl(addr + 0x0c);
/* save channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
- au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
- au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
- au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
- au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
- au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
- au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
+ sdev->pm_regs[i][0] = au_readl(addr + 0x00);
+ sdev->pm_regs[i][1] = au_readl(addr + 0x04);
+ sdev->pm_regs[i][2] = au_readl(addr + 0x08);
+ sdev->pm_regs[i][3] = au_readl(addr + 0x0c);
+ sdev->pm_regs[i][4] = au_readl(addr + 0x10);
+ sdev->pm_regs[i][5] = au_readl(addr + 0x14);
/* halt channel */
- au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
+ au_writel(sdev->pm_regs[i][0] & ~1, addr + 0x00);
au_sync();
while (!(au_readl(addr + 0x14) & 1))
au_sync();
@@ -992,32 +997,65 @@ void au1xxx_dbdma_suspend(void)
/* disable channel interrupts */
au_writel(0, DDMA_GLOBAL_BASE + 0x0c);
au_sync();
+
+ return 0;
}
-void au1xxx_dbdma_resume(void)
+static int alchemy_dbdma_resume(struct sys_device *dev)
{
+ struct alchemy_dbdma_sysdev *sdev =
+ container_of(dev, struct alchemy_dbdma_sysdev, sysdev);
int i;
u32 addr;
addr = DDMA_GLOBAL_BASE;
- au_writel(au1xxx_dbdma_pm_regs[0][0], addr + 0x00);
- au_writel(au1xxx_dbdma_pm_regs[0][1], addr + 0x04);
- au_writel(au1xxx_dbdma_pm_regs[0][2], addr + 0x08);
- au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
+ au_writel(sdev->pm_regs[0][0], addr + 0x00);
+ au_writel(sdev->pm_regs[0][1], addr + 0x04);
+ au_writel(sdev->pm_regs[0][2], addr + 0x08);
+ au_writel(sdev->pm_regs[0][3], addr + 0x0c);
/* restore channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
- au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
- au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
- au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
- au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
- au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
- au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
+ au_writel(sdev->pm_regs[i][0], addr + 0x00);
+ au_writel(sdev->pm_regs[i][1], addr + 0x04);
+ au_writel(sdev->pm_regs[i][2], addr + 0x08);
+ au_writel(sdev->pm_regs[i][3], addr + 0x0c);
+ au_writel(sdev->pm_regs[i][4], addr + 0x10);
+ au_writel(sdev->pm_regs[i][5], addr + 0x14);
au_sync();
addr += 0x100; /* next channel base */
}
+
+ return 0;
+}
+
+static struct sysdev_class alchemy_dbdma_sysdev_class = {
+ .name = "dbdma",
+ .suspend = alchemy_dbdma_suspend,
+ .resume = alchemy_dbdma_resume,
+};
+
+static int __init alchemy_dbdma_sysdev_init(void)
+{
+ struct alchemy_dbdma_sysdev *sdev;
+ int ret;
+
+ ret = sysdev_class_register(&alchemy_dbdma_sysdev_class);
+ if (ret)
+ return ret;
+
+ sdev = kzalloc(sizeof(struct alchemy_dbdma_sysdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->sysdev.id = -1;
+ sdev->sysdev.cls = &alchemy_dbdma_sysdev_class;
+ ret = sysdev_register(&sdev->sysdev);
+ if (ret)
+ kfree(sdev);
+
+ return ret;
}
-#endif /* CONFIG_PM */
static int __init au1xxx_dbdma_init(void)
{
@@ -1046,6 +1084,11 @@ static int __init au1xxx_dbdma_init(void)
else {
dbdma_initialized = 1;
printk(KERN_INFO "Alchemy DBDMA initialized\n");
+ ret = alchemy_dbdma_sysdev_init();
+ if (ret) {
+ printk(KERN_ERR "DBDMA PM init failed\n");
+ ret = 0;
+ }
}
return ret;
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index b2821ace4d00..9f78ada83b3c 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -29,6 +29,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/sysdev.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
@@ -216,90 +218,6 @@ struct au1xxx_irqmap au1200_irqmap[] __initdata = {
};
-#ifdef CONFIG_PM
-
-/*
- * Save/restore the interrupt controller state.
- * Called from the save/restore core registers as part of the
- * au_sleep function in power.c.....maybe I should just pm_register()
- * them instead?
- */
-static unsigned int sleep_intctl_config0[2];
-static unsigned int sleep_intctl_config1[2];
-static unsigned int sleep_intctl_config2[2];
-static unsigned int sleep_intctl_src[2];
-static unsigned int sleep_intctl_assign[2];
-static unsigned int sleep_intctl_wake[2];
-static unsigned int sleep_intctl_mask[2];
-
-void save_au1xxx_intctl(void)
-{
- sleep_intctl_config0[0] = au_readl(IC0_CFG0RD);
- sleep_intctl_config1[0] = au_readl(IC0_CFG1RD);
- sleep_intctl_config2[0] = au_readl(IC0_CFG2RD);
- sleep_intctl_src[0] = au_readl(IC0_SRCRD);
- sleep_intctl_assign[0] = au_readl(IC0_ASSIGNRD);
- sleep_intctl_wake[0] = au_readl(IC0_WAKERD);
- sleep_intctl_mask[0] = au_readl(IC0_MASKRD);
-
- sleep_intctl_config0[1] = au_readl(IC1_CFG0RD);
- sleep_intctl_config1[1] = au_readl(IC1_CFG1RD);
- sleep_intctl_config2[1] = au_readl(IC1_CFG2RD);
- sleep_intctl_src[1] = au_readl(IC1_SRCRD);
- sleep_intctl_assign[1] = au_readl(IC1_ASSIGNRD);
- sleep_intctl_wake[1] = au_readl(IC1_WAKERD);
- sleep_intctl_mask[1] = au_readl(IC1_MASKRD);
-}
-
-/*
- * For most restore operations, we clear the entire register and
- * then set the bits we found during the save.
- */
-void restore_au1xxx_intctl(void)
-{
- au_writel(0xffffffff, IC0_MASKCLR); au_sync();
-
- au_writel(0xffffffff, IC0_CFG0CLR); au_sync();
- au_writel(sleep_intctl_config0[0], IC0_CFG0SET); au_sync();
- au_writel(0xffffffff, IC0_CFG1CLR); au_sync();
- au_writel(sleep_intctl_config1[0], IC0_CFG1SET); au_sync();
- au_writel(0xffffffff, IC0_CFG2CLR); au_sync();
- au_writel(sleep_intctl_config2[0], IC0_CFG2SET); au_sync();
- au_writel(0xffffffff, IC0_SRCCLR); au_sync();
- au_writel(sleep_intctl_src[0], IC0_SRCSET); au_sync();
- au_writel(0xffffffff, IC0_ASSIGNCLR); au_sync();
- au_writel(sleep_intctl_assign[0], IC0_ASSIGNSET); au_sync();
- au_writel(0xffffffff, IC0_WAKECLR); au_sync();
- au_writel(sleep_intctl_wake[0], IC0_WAKESET); au_sync();
- au_writel(0xffffffff, IC0_RISINGCLR); au_sync();
- au_writel(0xffffffff, IC0_FALLINGCLR); au_sync();
- au_writel(0x00000000, IC0_TESTBIT); au_sync();
-
- au_writel(0xffffffff, IC1_MASKCLR); au_sync();
-
- au_writel(0xffffffff, IC1_CFG0CLR); au_sync();
- au_writel(sleep_intctl_config0[1], IC1_CFG0SET); au_sync();
- au_writel(0xffffffff, IC1_CFG1CLR); au_sync();
- au_writel(sleep_intctl_config1[1], IC1_CFG1SET); au_sync();
- au_writel(0xffffffff, IC1_CFG2CLR); au_sync();
- au_writel(sleep_intctl_config2[1], IC1_CFG2SET); au_sync();
- au_writel(0xffffffff, IC1_SRCCLR); au_sync();
- au_writel(sleep_intctl_src[1], IC1_SRCSET); au_sync();
- au_writel(0xffffffff, IC1_ASSIGNCLR); au_sync();
- au_writel(sleep_intctl_assign[1], IC1_ASSIGNSET); au_sync();
- au_writel(0xffffffff, IC1_WAKECLR); au_sync();
- au_writel(sleep_intctl_wake[1], IC1_WAKESET); au_sync();
- au_writel(0xffffffff, IC1_RISINGCLR); au_sync();
- au_writel(0xffffffff, IC1_FALLINGCLR); au_sync();
- au_writel(0x00000000, IC1_TESTBIT); au_sync();
-
- au_writel(sleep_intctl_mask[1], IC1_MASKSET); au_sync();
-
- au_writel(sleep_intctl_mask[0], IC0_MASKSET); au_sync();
-}
-#endif /* CONFIG_PM */
-
-
static void au1x_ic0_unmask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
@@ -635,3 +553,91 @@ void __init arch_init_irq(void)
break;
}
}
+
+struct alchemy_ic_sysdev {
+ struct sys_device sysdev;
+ void __iomem *base;
+ unsigned long pmdata[7];
+};
+
+static int alchemy_ic_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct alchemy_ic_sysdev *icdev =
+ container_of(dev, struct alchemy_ic_sysdev, sysdev);
+
+ icdev->pmdata[0] = __raw_readl(icdev->base + IC_CFG0RD);
+ icdev->pmdata[1] = __raw_readl(icdev->base + IC_CFG1RD);
+ icdev->pmdata[2] = __raw_readl(icdev->base + IC_CFG2RD);
+ icdev->pmdata[3] = __raw_readl(icdev->base + IC_SRCRD);
+ icdev->pmdata[4] = __raw_readl(icdev->base + IC_ASSIGNRD);
+ icdev->pmdata[5] = __raw_readl(icdev->base + IC_WAKERD);
+ icdev->pmdata[6] = __raw_readl(icdev->base + IC_MASKRD);
+
+ return 0;
+}
+
+static int alchemy_ic_resume(struct sys_device *dev)
+{
+ struct alchemy_ic_sysdev *icdev =
+ container_of(dev, struct alchemy_ic_sysdev, sysdev);
+
+ __raw_writel(0xffffffff, icdev->base + IC_MASKCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_CFG0CLR);
+ __raw_writel(0xffffffff, icdev->base + IC_CFG1CLR);
+ __raw_writel(0xffffffff, icdev->base + IC_CFG2CLR);
+ __raw_writel(0xffffffff, icdev->base + IC_SRCCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_ASSIGNCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_WAKECLR);
+ __raw_writel(0xffffffff, icdev->base + IC_RISINGCLR);
+ __raw_writel(0xffffffff, icdev->base + IC_FALLINGCLR);
+ __raw_writel(0x00000000, icdev->base + IC_TESTBIT);
+ wmb();
+ __raw_writel(icdev->pmdata[0], icdev->base + IC_CFG0SET);
+ __raw_writel(icdev->pmdata[1], icdev->base + IC_CFG1SET);
+ __raw_writel(icdev->pmdata[2], icdev->base + IC_CFG2SET);
+ __raw_writel(icdev->pmdata[3], icdev->base + IC_SRCSET);
+ __raw_writel(icdev->pmdata[4], icdev->base + IC_ASSIGNSET);
+ __raw_writel(icdev->pmdata[5], icdev->base + IC_WAKESET);
+ wmb();
+
+ __raw_writel(icdev->pmdata[6], icdev->base + IC_MASKSET);
+ wmb();
+
+ return 0;
+}
+
+static struct sysdev_class alchemy_ic_sysdev_class = {
+ .name = "ic",
+ .suspend = alchemy_ic_suspend,
+ .resume = alchemy_ic_resume,
+};
+
+static int __init alchemy_ic_sysdev_init(void)
+{
+ struct alchemy_ic_sysdev *icdev;
+ unsigned long icbase[2] = { IC0_PHYS_ADDR, IC1_PHYS_ADDR };
+ int err, i;
+
+ err = sysdev_class_register(&alchemy_ic_sysdev_class);
+ if (err)
+ return err;
+
+ for (i = 0; i < 2; i++) {
+ icdev = kzalloc(sizeof(struct alchemy_ic_sysdev), GFP_KERNEL);
+ if (!icdev)
+ return -ENOMEM;
+
+ icdev->base = ioremap(icbase[i], 0x1000);
+
+ icdev->sysdev.id = i;
+ icdev->sysdev.cls = &alchemy_ic_sysdev_class;
+ err = sysdev_register(&icdev->sysdev);
+ if (err) {
+ kfree(icdev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+device_initcall(alchemy_ic_sysdev_init);
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index 6ab7b42aa1be..14eb8c492da2 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -36,9 +36,6 @@
#include <asm/uaccess.h>
#include <asm/mach-au1x00/au1000.h>
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
-#include <asm/mach-au1x00/au1xxx_dbdma.h>
-#endif
#ifdef CONFIG_PM
@@ -106,9 +103,6 @@ static void save_core_regs(void)
sleep_usb[1] = au_readl(0xb4020024); /* OTG_MUX */
#endif
- /* Save interrupt controller state. */
- save_au1xxx_intctl();
-
/* Clocks and PLLs. */
sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0);
sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1);
@@ -132,10 +126,6 @@ static void save_core_regs(void)
sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3);
sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3);
sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3);
-
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
- au1xxx_dbdma_suspend();
-#endif
}
static void restore_core_regs(void)
@@ -199,12 +189,6 @@ static void restore_core_regs(void)
au_writel(sleep_uart0_linectl, UART0_ADDR + UART_LCR); au_sync();
au_writel(sleep_uart0_clkdiv, UART0_ADDR + UART_CLK); au_sync();
}
-
- restore_au1xxx_intctl();
-
-#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
- au1xxx_dbdma_resume();
-#endif
}
void au_sleep(void)
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
index b5311d8a29ab..4ef50d86b181 100644
--- a/arch/mips/alchemy/devboards/pb1000/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -27,8 +27,10 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1000.h>
+#include <asm/reboot.h>
#include <prom.h>
#include "../platform.h"
@@ -38,8 +40,16 @@ const char *get_system_type(void)
return "Alchemy Pb1000";
}
-void board_reset(void)
+static void board_reset(char *c)
{
+ asm volatile ("jr %0" : : "r" (0xbfc00000));
+}
+
+static void board_power_off(void)
+{
+ printk(KERN_ALERT "It's now safe to remove power\n");
+ while (1)
+ asm volatile (".set mips3 ; wait ; .set mips1");
}
void __init board_setup(void)
@@ -177,6 +187,10 @@ void __init board_setup(void)
au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
break;
}
+
+ pm_power_off = board_power_off;
+ _machine_halt = board_power_off;
+ _machine_restart = board_reset;
}
static int __init pb1000_init_irq(void)
diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c
index c7b4caa81a35..90dda5f3ecc5 100644
--- a/arch/mips/alchemy/devboards/pb1100/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c
@@ -39,11 +39,6 @@ const char *get_system_type(void)
return "Alchemy Pb1100";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
diff --git a/arch/mips/alchemy/devboards/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c
index 3184063f8042..8b4466f2d44a 100644
--- a/arch/mips/alchemy/devboards/pb1200/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c
@@ -48,12 +48,6 @@ const char *get_system_type(void)
return "Alchemy Pb1200";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_RESETS, 0);
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
printk(KERN_INFO "AMD Alchemy Pb1200 Board\n");
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
index fa9770ac358a..9cd9dfa698e7 100644
--- a/arch/mips/alchemy/devboards/pb1500/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -45,11 +45,6 @@ const char *get_system_type(void)
return "Alchemy Pb1500";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
u32 pin_func;
diff --git a/arch/mips/alchemy/devboards/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c
index 1e8fb3ddd726..9d7d6edafa8d 100644
--- a/arch/mips/alchemy/devboards/pb1550/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c
@@ -48,11 +48,6 @@ const char *get_system_type(void)
return "Alchemy Pb1550";
}
-void board_reset(void)
-{
- bcsr_write(BCSR_SYSTEM, 0);
-}
-
void __init board_setup(void)
{
u32 pin_func;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 2fafc78e5ce1..566f2d7f2ea3 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -576,7 +576,6 @@ static int __init ar7_register_devices(void)
{
void __iomem *bootcr;
u32 val;
- u16 chip_id;
int res;
res = ar7_register_uarts();
@@ -635,18 +634,10 @@ static int __init ar7_register_devices(void)
val = readl(bootcr);
iounmap(bootcr);
if (val & AR7_WDT_HW_ENA) {
- chip_id = ar7_chip_id();
- switch (chip_id) {
- case AR7_CHIP_7100:
- case AR7_CHIP_7200:
- ar7_wdt_res.start = AR7_REGS_WDT;
- break;
- case AR7_CHIP_7300:
+ if (ar7_has_high_vlynq())
ar7_wdt_res.start = UR8_REGS_WDT;
- break;
- default:
- break;
- }
+ else
+ ar7_wdt_res.start = AR7_REGS_WDT;
ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
res = platform_device_register(&ar7_wdt);
@@ -656,4 +647,4 @@ static int __init ar7_register_devices(void)
return 0;
}
-arch_initcall(ar7_register_devices);
+device_initcall(ar7_register_devices);
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
index 315bc7f79ce1..f560fe7d38dd 100644
--- a/arch/mips/bcm63xx/gpio.c
+++ b/arch/mips/bcm63xx/gpio.c
@@ -91,7 +91,7 @@ static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
spin_lock_irqsave(&bcm63xx_gpio_lock, flags);
tmp = bcm_gpio_readl(reg);
- if (dir == GPIO_DIR_IN)
+ if (dir == BCM63XX_GPIO_DIR_IN)
tmp &= ~mask;
else
tmp |= mask;
@@ -103,14 +103,14 @@ static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
static int bcm63xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
- return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_IN);
+ return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_IN);
}
static int bcm63xx_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
bcm63xx_gpio_set(chip, gpio, value);
- return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_OUT);
+ return bcm63xx_gpio_set_direction(chip, gpio, BCM63XX_GPIO_DIR_OUT);
}
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
index 8240728d485a..83eac37a1ff9 100644
--- a/arch/mips/cavium-octeon/serial.c
+++ b/arch/mips/cavium-octeon/serial.c
@@ -65,7 +65,11 @@ static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
p->type = PORT_OCTEON;
p->iotype = UPIO_MEM;
p->regshift = 3; /* I/O addresses are every 8 bytes */
- p->uartclk = mips_hpt_frequency;
+ if (octeon_is_simulation())
+ /* Make simulator output fast*/
+ p->uartclk = 115200 * 16;
+ else
+ p->uartclk = mips_hpt_frequency;
p->serial_in = octeon_serial_in;
p->serial_out = octeon_serial_out;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 9a06fa9f9f0c..d1b5ffaf0281 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -403,7 +403,6 @@ void __init prom_init(void)
const int coreid = cvmx_get_core_num();
int i;
int argc;
- struct uart_port octeon_port;
#ifdef CONFIG_CAVIUM_RESERVE32
int64_t addr = -1;
#endif
@@ -610,30 +609,6 @@ void __init prom_init(void)
_machine_restart = octeon_restart;
_machine_halt = octeon_halt;
- memset(&octeon_port, 0, sizeof(octeon_port));
- /*
- * For early_serial_setup we don't set the port type or
- * UPF_FIXED_TYPE.
- */
- octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
- octeon_port.iotype = UPIO_MEM;
- /* I/O addresses are every 8 bytes */
- octeon_port.regshift = 3;
- /* Clock rate of the chip */
- octeon_port.uartclk = mips_hpt_frequency;
- octeon_port.fifosize = 64;
- octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
- octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
- octeon_port.serial_in = octeon_serial_in;
- octeon_port.serial_out = octeon_serial_out;
-#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
- octeon_port.line = 0;
-#else
- octeon_port.line = octeon_uart;
-#endif
- octeon_port.irq = 42 + octeon_uart;
- early_serial_setup(&octeon_port);
-
octeon_user_io_init();
register_smp_ops(&octeon_smp_ops);
}
@@ -727,7 +702,7 @@ int prom_putchar(char c)
} while ((lsrval & 0x20) == 0);
/* Write the byte */
- cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
+ cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
return 1;
}
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 5a5b6ba7514e..e70009584090 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30
-# Wed Jun 24 14:08:59 2009
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 11:35:01 2010
#
CONFIG_MIPS=y
@@ -11,11 +11,12 @@ CONFIG_MIPS=y
# CONFIG_MACH_ALCHEMY is not set
CONFIG_AR7=y
# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
# CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
# CONFIG_MIPS_SIM is not set
# CONFIG_NEC_MARKEINS is not set
@@ -26,6 +27,7 @@ CONFIG_AR7=y
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
# CONFIG_SGI_IP28 is not set
@@ -46,6 +48,7 @@ CONFIG_AR7=y
# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -63,10 +66,8 @@ CONFIG_CEVT_R4K=y
CONFIG_CSRC_R4K_LIB=y
CONFIG_CSRC_R4K=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_SYS_HAS_EARLY_PRINTK=y
-# CONFIG_HOTPLUG_CPU is not set
# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
@@ -81,7 +82,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -103,6 +105,8 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
+CONFIG_SYS_SUPPORTS_ZBOOT_UART16550=y
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
@@ -124,6 +128,7 @@ CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
@@ -141,8 +146,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_TICK_ONESHOT=y
# CONFIG_NO_HZ is not set
@@ -165,6 +169,7 @@ CONFIG_KEXEC=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -174,6 +179,14 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -186,14 +199,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -204,6 +215,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
CONFIG_RD_LZMA=y
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -225,19 +237,22 @@ CONFIG_SHMEM=y
CONFIG_AIO=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
# CONFIG_VM_EVENT_COUNTERS is not set
-CONFIG_STRIP_ASM_SYMS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
@@ -248,7 +263,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -256,14 +271,41 @@ CONFIG_BLOCK=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -293,7 +335,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -377,6 +418,7 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NETFILTER_XTABLES=m
# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
# CONFIG_NETFILTER_XT_TARGET_HL is not set
# CONFIG_NETFILTER_XT_TARGET_LED is not set
@@ -458,6 +500,7 @@ CONFIG_IP_NF_RAW=m
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
CONFIG_ATM=m
# CONFIG_ATM_CLIP is not set
@@ -466,6 +509,7 @@ CONFIG_ATM_BR2684=m
CONFIG_ATM_BR2684_IPFILTER=y
CONFIG_STP=y
CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=y
# CONFIG_VLAN_8021Q_GVRP is not set
@@ -541,20 +585,19 @@ CONFIG_HAMRADIO=y
# CONFIG_AF_RXRPC is not set
CONFIG_FIB_RULES=y
CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
# CONFIG_CFG80211_DEBUGFS is not set
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
-CONFIG_WIRELESS_EXT=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
# CONFIG_LIB80211 is not set
CONFIG_MAC80211=m
-CONFIG_MAC80211_DEFAULT_PS=y
-CONFIG_MAC80211_DEFAULT_PS_VALUE=1
-
-#
-# Rate control algorithm selection
-#
CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_MINSTREL=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
@@ -576,6 +619,7 @@ CONFIG_MAC80211_RC_DEFAULT="pid"
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -585,9 +629,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_AR7_PARTS is not set
@@ -636,6 +680,7 @@ CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_GPIO_ADDR is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -668,6 +713,10 @@ CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_CDROM_PKTCDVD is not set
@@ -687,6 +736,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -727,6 +777,7 @@ CONFIG_MII=y
# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -737,23 +788,21 @@ CONFIG_MII=y
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
CONFIG_CPMAC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_LIBERTAS is not set
+CONFIG_WLAN=y
# CONFIG_LIBERTAS_THINFIRM is not set
# CONFIG_MAC80211_HWSIM is not set
-# CONFIG_P54_COMMON is not set
-# CONFIG_HOSTAP is not set
+# CONFIG_ATH_COMMON is not set
# CONFIG_B43 is not set
# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_P54_COMMON is not set
# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -813,6 +862,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -824,11 +874,39 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_IT8761E is not set
+
+#
+# I2C GPIO expanders:
+#
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -842,13 +920,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB=y
-# CONFIG_SSB_SILENT is not set
-# CONFIG_SSB_DEBUG is not set
-CONFIG_SSB_SERIAL=y
-CONFIG_SSB_DRIVER_MIPS=y
-CONFIG_SSB_EMBEDDED=y
-CONFIG_SSB_DRIVER_EXTIF=y
+# CONFIG_SSB is not set
#
# Multifunction device drivers
@@ -882,15 +954,18 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
-# CONFIG_LEDS_GPIO is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
#
@@ -921,6 +996,7 @@ CONFIG_VLYNQ=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
@@ -984,6 +1060,7 @@ CONFIG_JFFS2_RTIME=y
CONFIG_JFFS2_CMODE_PRIORITY=y
# CONFIG_JFFS2_CMODE_SIZE is not set
# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_EMBEDDED is not set
@@ -996,11 +1073,11 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1039,21 +1116,29 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_STRIP_ASM_SYMS=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
# CONFIG_CMDLINE_OVERRIDE is not set
+# CONFIG_SPINLOCK_TEST is not set
#
# Security options
@@ -1061,13 +1146,16 @@ CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=m
CONFIG_CRYPTO_ALGAPI2=m
CONFIG_CRYPTO_AEAD2=m
@@ -1108,11 +1196,13 @@ CONFIG_CRYPTO_ECB=m
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 267bd46120bc..bbd826b8032d 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25-rc2
-# Mon Feb 18 11:55:24 2008
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 12:14:30 2010
#
CONFIG_MIPS=y
@@ -9,20 +9,25 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
CONFIG_BCM47XX=y
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
# CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
# CONFIG_SGI_IP28 is not set
@@ -36,10 +41,14 @@ CONFIG_BCM47XX=y
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -50,16 +59,16 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
CONFIG_CSRC_R4K=y
CONFIG_CFE=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-CONFIG_EARLY_PRINTK=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_SYS_HAS_EARLY_PRINTK=y
-# CONFIG_HOTPLUG_CPU is not set
# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
@@ -71,7 +80,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -84,6 +94,7 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -91,11 +102,13 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
#
# Kernel type
@@ -105,11 +118,13 @@ CONFIG_32BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
@@ -122,12 +137,13 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -144,12 +160,12 @@ CONFIG_HZ=250
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-CONFIG_RCU_TRACE=y
CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -163,6 +179,7 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -170,25 +187,37 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
CONFIG_CGROUP_NS=y
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
CONFIG_CGROUP_CPUACCT=y
# CONFIG_RESOURCE_COUNTERS is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
@@ -197,54 +226,90 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-CONFIG_COMPAT_BRK=y
+CONFIG_PCSPKR_PLATFORM=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-CONFIG_LBD=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_LSF=y
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
+# CONFIG_CFQ_GROUP_IOSCHED is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -253,7 +318,8 @@ CONFIG_HW_HAS_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
CONFIG_MMU=y
# CONFIG_PCCARD is not set
# CONFIG_HOTPLUG_PCI is not set
@@ -262,31 +328,30 @@ CONFIG_MMU=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=m
CONFIG_TRAD_SIGNALS=y
#
# Power management options
#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_PM is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
CONFIG_NET_KEY=m
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -315,7 +380,7 @@ CONFIG_INET_TUNNEL=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_LRO=m
+CONFIG_INET_LRO=y
CONFIG_INET_DIAG=m
CONFIG_INET_TCP_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
@@ -339,36 +404,6 @@ CONFIG_DEFAULT_BIC=y
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="bic"
# CONFIG_TCP_MD5SIG is not set
-CONFIG_IP_VS=m
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
CONFIG_IPV6=m
CONFIG_IPV6_PRIVACY=y
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -384,9 +419,12 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+# CONFIG_IPV6_MROUTE is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -404,6 +442,7 @@ CONFIG_NF_CT_ACCT=y
CONFIG_NF_CONNTRACK_MARK=y
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
CONFIG_NF_CT_PROTO_GRE=m
CONFIG_NF_CT_PROTO_SCTP=m
CONFIG_NF_CT_PROTO_UDPLITE=m
@@ -417,20 +456,25 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
+# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
@@ -439,20 +483,23 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
@@ -460,20 +507,53 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_IPV6 is not set
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+# CONFIG_IP_VS_PROTO_SCTP is not set
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
@@ -481,10 +561,13 @@ CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_DCCP=m
CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
CONFIG_NF_NAT_FTP=m
CONFIG_NF_NAT_IRC=m
CONFIG_NF_NAT_TFTP=m
@@ -493,9 +576,9 @@ CONFIG_NF_NAT_PPTP=m
CONFIG_NF_NAT_H323=m
CONFIG_NF_NAT_SIP=m
CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
@@ -507,24 +590,20 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_RAW=m
-
-#
-# Bridge: Netfilter Configuration
-#
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -533,6 +612,7 @@ CONFIG_BRIDGE_EBT_802_3=m
CONFIG_BRIDGE_EBT_AMONG=m
CONFIG_BRIDGE_EBT_ARP=m
CONFIG_BRIDGE_EBT_IP=m
+# CONFIG_BRIDGE_EBT_IP6 is not set
CONFIG_BRIDGE_EBT_LIMIT=m
CONFIG_BRIDGE_EBT_MARK=m
CONFIG_BRIDGE_EBT_PKTTYPE=m
@@ -545,31 +625,30 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_NFLOG is not set
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
#
# DCCP CCIDs Configuration (EXPERIMENTAL)
#
-CONFIG_IP_DCCP_CCID2=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
+CONFIG_IP_DCCP_CCID3=y
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
CONFIG_IP_DCCP_CCID3_RTO=100
-CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_DCCP_TFRC_LIB=y
CONFIG_IP_SCTP=m
# CONFIG_SCTP_DBG_MSG is not set
# CONFIG_SCTP_DBG_OBJCNT is not set
# CONFIG_SCTP_HMAC_NONE is not set
# CONFIG_SCTP_HMAC_SHA1 is not set
CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
CONFIG_TIPC=m
CONFIG_TIPC_ADVANCED=y
CONFIG_TIPC_ZONES=3
CONFIG_TIPC_CLUSTERS=1
CONFIG_TIPC_NODES=255
-CONFIG_TIPC_SLAVE_NODES=0
CONFIG_TIPC_PORTS=8191
CONFIG_TIPC_LOG=0
# CONFIG_TIPC_DEBUG is not set
@@ -580,8 +659,12 @@ CONFIG_ATM_LANE=m
CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
# CONFIG_LLC2 is not set
@@ -591,6 +674,8 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
#
@@ -601,7 +686,7 @@ CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_ATM=m
CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+# CONFIG_NET_SCH_MULTIQ is not set
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
@@ -609,6 +694,7 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
CONFIG_NET_SCH_INGRESS=m
#
@@ -626,6 +712,7 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_STACK=32
CONFIG_NET_EMATCH_CMP=m
@@ -642,8 +729,10 @@ CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
+# CONFIG_NET_ACT_SKBEDIT is not set
CONFIG_NET_CLS_IND=y
CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -651,58 +740,7 @@ CONFIG_NET_SCH_FIFO=y
CONFIG_NET_PKTGEN=m
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-# CONFIG_IRDA_ULTRA is not set
-
-#
-# IrDA options
-#
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-CONFIG_IRTTY_SIR=m
-
-#
-# Dongle support
-#
-CONFIG_DONGLE=y
-CONFIG_ESI_DONGLE=m
-CONFIG_ACTISYS_DONGLE=m
-CONFIG_TEKRAM_DONGLE=m
-CONFIG_TOIM3232_DONGLE=m
-CONFIG_LITELINK_DONGLE=m
-CONFIG_MA600_DONGLE=m
-CONFIG_GIRBIL_DONGLE=m
-CONFIG_MCP2120_DONGLE=m
-CONFIG_OLD_BELKIN_DONGLE=m
-CONFIG_ACT200L_DONGLE=m
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-
-#
-# FIR device drivers
-#
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_TOSHIBA_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_MCS_FIR=m
+# CONFIG_IRDA is not set
CONFIG_BT=m
# CONFIG_BT_L2CAP is not set
# CONFIG_BT_SCO is not set
@@ -710,8 +748,7 @@ CONFIG_BT=m
#
# Bluetooth device drivers
#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
@@ -720,51 +757,37 @@ CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
# CONFIG_AF_RXRPC is not set
CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
+CONFIG_WIRELESS=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
CONFIG_CFG80211=m
-CONFIG_NL80211=y
-CONFIG_WIRELESS_EXT=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
CONFIG_MAC80211=m
-
-#
-# Rate control algorithm selection
-#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
-# CONFIG_MAC80211_RC_DEFAULT_SIMPLE is not set
-# CONFIG_MAC80211_RC_DEFAULT_NONE is not set
-
-#
-# Selecting 'y' for an algorithm will
-#
-
-#
-# build the algorithm into mac80211.
-#
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
CONFIG_MAC80211_RC_DEFAULT="pid"
-CONFIG_MAC80211_RC_PID=y
-# CONFIG_MAC80211_RC_SIMPLE is not set
+CONFIG_MAC80211_MESH=y
CONFIG_MAC80211_LEDS=y
# CONFIG_MAC80211_DEBUGFS is not set
-# CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT is not set
-# CONFIG_MAC80211_DEBUG is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
CONFIG_RFKILL=m
-CONFIG_RFKILL_INPUT=m
CONFIG_RFKILL_LEDS=y
-CONFIG_NET_9P=m
-CONFIG_NET_9P_FD=m
-# CONFIG_NET_9P_DEBUG is not set
+CONFIG_RFKILL_INPUT=y
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -774,17 +797,22 @@ CONFIG_NET_9P_FD=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_CONNECTOR=m
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -829,9 +857,7 @@ CONFIG_MTD_ABSENT=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_INTEL_VR_NOR is not set
# CONFIG_MTD_PLATRAM is not set
@@ -854,6 +880,11 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -866,6 +897,7 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_DRBD is not set
CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
@@ -875,18 +907,27 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
CONFIG_ATA_OVER_ETH=m
+# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -904,10 +945,6 @@ CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
@@ -924,21 +961,30 @@ CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
CONFIG_ISCSI_TCP=m
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_IPS is not set
@@ -954,7 +1000,12 @@ CONFIG_ISCSI_TCP=m
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
# CONFIG_FUSION is not set
@@ -962,11 +1013,18 @@ CONFIG_ISCSI_TCP=m
#
# IEEE 1394 (FireWire) support
#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_IFB is not set
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
@@ -990,8 +1048,11 @@ CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
CONFIG_ICPLUS_PHY=m
# CONFIG_REALTEK_PHY is not set
-# CONFIG_FIXED_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
CONFIG_MDIO_BITBANG=m
+# CONFIG_MDIO_GPIO is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
@@ -999,24 +1060,31 @@ CONFIG_MII=y
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
CONFIG_B44=y
CONFIG_B44_PCI_AUTOSELECT=y
CONFIG_B44_PCICORE_AUTOSELECT=y
CONFIG_B44_PCI=y
# CONFIG_FORCEDETH is not set
# CONFIG_TC35815 is not set
-# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
@@ -1026,41 +1094,67 @@ CONFIG_B44_PCI=y
# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
# CONFIG_PRISM54 is not set
# CONFIG_USB_ZD1201 is not set
# CONFIG_USB_NET_RNDIS_WLAN is not set
# CONFIG_RTL8180 is not set
# CONFIG_RTL8187 is not set
# CONFIG_ADM8211 is not set
-# CONFIG_P54_COMMON is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+CONFIG_ATH_COMMON=m
+# CONFIG_ATH_DEBUG is not set
CONFIG_ATH5K=m
-# CONFIG_IWL4965 is not set
-# CONFIG_IWL3945 is not set
+# CONFIG_ATH5K_DEBUG is not set
+# CONFIG_ATH9K is not set
+# CONFIG_AR9170_USB is not set
+CONFIG_B43=m
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_PIO=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_DEBUG=y
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
# CONFIG_HOSTAP is not set
-# CONFIG_BCM43XX is not set
-# CONFIG_B43 is not set
-# CONFIG_B43LEGACY is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_LIBERTAS is not set
+# CONFIG_HERMES is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
CONFIG_ZD1211RW=m
# CONFIG_ZD1211RW_DEBUG is not set
-# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
#
# USB Network Adapters
@@ -1072,7 +1166,10 @@ CONFIG_USB_RTL8150=m
CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
+# CONFIG_USB_NET_CDC_EEM is not set
CONFIG_USB_NET_DM9601=m
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_NET1080=m
CONFIG_USB_NET_PLUSB=m
@@ -1086,6 +1183,10 @@ CONFIG_USB_ARMLINUX=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+CONFIG_USB_SIERRA_NET=m
# CONFIG_WAN is not set
CONFIG_ATM_DRIVERS=y
CONFIG_ATM_DUMMY=m
@@ -1099,8 +1200,9 @@ CONFIG_ATM_TCP=m
# CONFIG_ATM_AMBASSADOR is not set
# CONFIG_ATM_HORIZON is not set
# CONFIG_ATM_IA is not set
-# CONFIG_ATM_FORE200E_MAYBE is not set
+# CONFIG_ATM_FORE200E is not set
# CONFIG_ATM_HE is not set
+# CONFIG_ATM_SOLOS is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
CONFIG_PPP=m
@@ -1120,11 +1222,10 @@ CONFIG_SLHC=m
# CONFIG_SLIP_SMART is not set
# CONFIG_SLIP_MODE_SLIP6 is not set
# CONFIG_NET_FC is not set
-CONFIG_NETCONSOLE=y
-# CONFIG_NETCONSOLE_DYNAMIC is not set
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -1134,6 +1235,7 @@ CONFIG_NET_POLL_CONTROLLER=y
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -1166,6 +1268,7 @@ CONFIG_INPUT_EVDEV=m
# Character devices
#
# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -1185,23 +1288,24 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
+# CONFIG_SPI is not set
#
-# SPI support
+# PPS support
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+# CONFIG_PPS is not set
CONFIG_W1=m
CONFIG_W1_CON=y
@@ -1217,21 +1321,45 @@ CONFIG_W1_MASTER_DS2490=m
#
CONFIG_W1_SLAVE_THERM=m
CONFIG_W1_SLAVE_SMEM=m
+# CONFIG_W1_SLAVE_DS2431 is not set
CONFIG_W1_SLAVE_DS2433=m
# CONFIG_W1_SLAVE_DS2433_CRC is not set
CONFIG_W1_SLAVE_DS2760=m
+# CONFIG_W1_SLAVE_BQ27000 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
-# CONFIG_WATCHDOG is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
#
-# Sonics Silicon Backplane
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_BCM47XX_WDT=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+
#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
CONFIG_SSB=y
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_BLOCKIO=y
CONFIG_SSB_PCIHOST_POSSIBLE=y
CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
# CONFIG_SSB_SILENT is not set
# CONFIG_SSB_DEBUG is not set
CONFIG_SSB_SERIAL=y
@@ -1239,24 +1367,26 @@ CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
CONFIG_SSB_DRIVER_PCICORE=y
CONFIG_SSB_PCICORE_HOSTMODE=y
CONFIG_SSB_DRIVER_MIPS=y
+CONFIG_SSB_EMBEDDED=y
CONFIG_SSB_DRIVER_EXTIF=y
+CONFIG_SSB_DRIVER_GIGE=y
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1271,15 +1401,9 @@ CONFIG_DISPLAY_SUPPORT=m
#
# Display hardware drivers
#
-
-#
-# Sound
-#
CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -1292,24 +1416,24 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
-
-#
-# PCI devices
-#
+CONFIG_SND_PCI=y
# CONFIG_SND_AD1889 is not set
# CONFIG_SND_ALS300 is not set
# CONFIG_SND_ALI5451 is not set
@@ -1318,6 +1442,7 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_AU8810 is not set
# CONFIG_SND_AU8820 is not set
# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
# CONFIG_SND_AZT3328 is not set
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CA0106 is not set
@@ -1325,6 +1450,8 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
# CONFIG_SND_DARLA20 is not set
# CONFIG_SND_GINA20 is not set
# CONFIG_SND_LAYLA20 is not set
@@ -1337,6 +1464,8 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_INDIGO is not set
# CONFIG_SND_INDIGOIO is not set
# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
# CONFIG_SND_EMU10K1 is not set
# CONFIG_SND_EMU10K1X is not set
# CONFIG_SND_ENS1370 is not set
@@ -1353,6 +1482,7 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_INTEL8X0 is not set
# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
# CONFIG_SND_MAESTRO3 is not set
# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
@@ -1368,45 +1498,22 @@ CONFIG_SND_VIRMIDI=m
# CONFIG_SND_VIRTUOSO is not set
# CONFIG_SND_VX222 is not set
# CONFIG_SND_YMFPCI is not set
-
-#
-# ALSA MIPS devices
-#
-
-#
-# USB devices
-#
+CONFIG_SND_MIPS=y
+CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
-
-#
-# System on Chip audio support
-#
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# Open Sound System
-#
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=m
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
#
# USB Input Devices
#
CONFIG_USB_HID=m
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
CONFIG_USB_HIDDEV=y
#
@@ -1414,6 +1521,41 @@ CONFIG_USB_HIDDEV=y
#
# CONFIG_USB_KBD is not set
# CONFIG_USB_MOUSE is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1429,14 +1571,24 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_HCD_SSB is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1446,26 +1598,30 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_U132_HCD=m
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=m
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
# USB Device Class drivers
#
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
# CONFIG_USB_STORAGE_ISD200 is not set
-CONFIG_USB_STORAGE_DPCM=y
CONFIG_USB_STORAGE_USBAT=y
CONFIG_USB_STORAGE_SDDR09=y
CONFIG_USB_STORAGE_SDDR55=y
@@ -1473,6 +1629,7 @@ CONFIG_USB_STORAGE_JUMPSHOT=y
CONFIG_USB_STORAGE_ALAUDA=y
CONFIG_USB_STORAGE_ONETOUCH=y
CONFIG_USB_STORAGE_KARMA=y
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
# CONFIG_USB_LIBUSUAL is not set
#
@@ -1480,7 +1637,6 @@ CONFIG_USB_STORAGE_KARMA=y
#
CONFIG_USB_MDC800=m
CONFIG_USB_MICROTEK=m
-# CONFIG_USB_MON is not set
#
# USB port drivers
@@ -1489,13 +1645,12 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_EZUSB=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_AIRPRIME=m
CONFIG_USB_SERIAL_ARK3116=m
CONFIG_USB_SERIAL_BELKIN=m
CONFIG_USB_SERIAL_CH341=m
# CONFIG_USB_SERIAL_WHITEHEAT is not set
CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP2101=m
+# CONFIG_USB_SERIAL_CP210X is not set
CONFIG_USB_SERIAL_CYPRESS_M8=m
CONFIG_USB_SERIAL_EMPEG=m
CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -1515,18 +1670,26 @@ CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_MOS7720=m
CONFIG_USB_SERIAL_MOS7840=m
+# CONFIG_USB_SERIAL_MOTOROLA is not set
CONFIG_USB_SERIAL_NAVMAN=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_OTI6858=m
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
# CONFIG_USB_SERIAL_TI is not set
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
CONFIG_USB_SERIAL_DEBUG=m
#
@@ -1535,18 +1698,13 @@ CONFIG_USB_SERIAL_DEBUG=m
# CONFIG_USB_EMI62 is not set
# CONFIG_USB_EMI26 is not set
CONFIG_USB_ADUTUX=m
-CONFIG_USB_AUERSWALD=m
+# CONFIG_USB_SEVSEG is not set
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-CONFIG_USB_BERRY_CHARGE=m
CONFIG_USB_LED=m
CONFIG_USB_CYPRESS_CY7C63=m
CONFIG_USB_CYTHERM=m
-CONFIG_USB_PHIDGET=m
-CONFIG_USB_PHIDGETKIT=m
-CONFIG_USB_PHIDGETMOTORCONTROL=m
-CONFIG_USB_PHIDGETSERVO=m
CONFIG_USB_IDMOUSE=m
CONFIG_USB_FTDI_ELAN=m
# CONFIG_USB_APPLEDISPLAY is not set
@@ -1555,6 +1713,7 @@ CONFIG_USB_LD=m
CONFIG_USB_TRANCEVIBRATOR=m
CONFIG_USB_IOWARRIOR=m
CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
CONFIG_USB_ATM=m
CONFIG_USB_SPEEDTOUCH=m
CONFIG_USB_CXACRU=m
@@ -1563,30 +1722,51 @@ CONFIG_USB_XUSBATM=m
CONFIG_USB_GADGET=m
# CONFIG_USB_GADGET_DEBUG_FILES is not set
# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
-CONFIG_USB_GADGET_NET2280=y
-CONFIG_USB_NET2280=m
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2280=y
+CONFIG_USB_NET2280=m
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
CONFIG_USB_G_SERIAL=m
CONFIG_USB_MIDI_GADGET=m
# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -1596,21 +1776,33 @@ CONFIG_LEDS_CLASS=y
# LED drivers
#
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
#
-# Userspace I/O
+# TI VLYNQ
#
-# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# File systems
@@ -1621,10 +1813,11 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
@@ -1642,28 +1835,39 @@ CONFIG_JFS_SECURITY=y
CONFIG_FS_POSIX_ACL=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
+# CONFIG_XFS_DEBUG is not set
CONFIG_GFS2_FS=m
-CONFIG_GFS2_FS_LOCKING_NOLOCK=m
-CONFIG_GFS2_FS_LOCKING_DLM=m
+# CONFIG_GFS2_FS_LOCKING_DLM is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=m
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_QUOTACTL=y
CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
CONFIG_GENERIC_ACL=y
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
CONFIG_ISO9660_FS=m
@@ -1690,15 +1894,13 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
CONFIG_ADFS_FS=m
# CONFIG_ADFS_FS_RW is not set
CONFIG_AFFS_FS=m
@@ -1721,12 +1923,19 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=m
+# CONFIG_SQUASHFS is not set
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
+# CONFIG_OMFS_FS is not set
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
# CONFIG_UFS_FS_WRITE is not set
@@ -1736,13 +1945,12 @@ CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
-# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFS_V4_1 is not set
CONFIG_NFSD=m
CONFIG_NFSD_V2_ACL=y
CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
@@ -1750,10 +1958,10 @@ CONFIG_NFS_ACL_SUPPORT=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
-CONFIG_SUNRPC_BIND34=y
CONFIG_RPCSEC_GSS_KRB5=m
CONFIG_RPCSEC_GSS_SPKM3=m
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
@@ -1771,9 +1979,7 @@ CONFIG_NCPFS_OS2_NS=y
CONFIG_NCPFS_NLS=y
CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
-# CONFIG_CODA_FS_OLD_API is not set
# CONFIG_AFS_FS is not set
-CONFIG_9P_FS=m
#
# Partition Types
@@ -1846,90 +2052,167 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_SPINLOCK_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=m
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_XTS=m
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_SEED=m
# CONFIG_CRYPTO_SALSA20 is not set
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_AUTHENC=m
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
+# CONFIG_CRC_T10DIF is not set
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
CONFIG_CRC7=m
CONFIG_LIBCRC32C=m
CONFIG_AUDIT_GENERIC=y
-CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 144b94d9a6ad..cff8f4c0e57c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc8
-# Sun Sep 30 12:56:10 2007
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 13:39:10 2010
#
CONFIG_MIPS=y
@@ -9,20 +9,28 @@ CONFIG_MIPS=y
# Machine selection
#
CONFIG_MACH_ALCHEMY=y
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_LASAT is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
# CONFIG_SGI_IP32 is not set
# CONFIG_SIBYTE_CRHINE is not set
# CONFIG_SIBYTE_CARMEL is not set
@@ -33,10 +41,14 @@ CONFIG_MACH_ALCHEMY=y
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+CONFIG_ALCHEMY_GPIOINT_AU1000=y
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
CONFIG_MIPS_MTX1=y
# CONFIG_MIPS_BOSPORUS is not set
# CONFIG_MIPS_DB1000 is not set
@@ -53,29 +65,38 @@ CONFIG_MIPS_MTX1=y
# CONFIG_MIPS_XXS1500 is not set
CONFIG_SOC_AU1500=y
CONFIG_SOC_AU1X00=y
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
+CONFIG_CSRC_R4K_LIB=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-# CONFIG_HOTPLUG_CPU is not set
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+CONFIG_IRQ_CPU=y
CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -88,6 +109,7 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -95,11 +117,14 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
#
# Kernel type
@@ -109,28 +134,36 @@ CONFIG_32BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
CONFIG_64BIT_PHYS_ADDR=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_CPU_SUPPORTS_HIGHMEM=y
CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
# CONFIG_HZ_48 is not set
# CONFIG_HZ_100 is not set
# CONFIG_HZ_128 is not set
@@ -148,6 +181,7 @@ CONFIG_SECCOMP=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -157,23 +191,49 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
@@ -182,61 +242,104 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_SLOW_WORK_DEBUG is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-CONFIG_LBD=y
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
#
CONFIG_HW_HAS_PCI=y
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
CONFIG_PCCARD=m
-# CONFIG_PCMCIA_DEBUG is not set
CONFIG_PCMCIA=m
CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
CONFIG_CARDBUS=y
#
@@ -251,6 +354,7 @@ CONFIG_YENTA_TOSHIBA=y
CONFIG_PD6729=m
CONFIG_I82092=m
# CONFIG_PCMCIA_AU1X00 is not set
+# CONFIG_PCMCIA_ALCHEMY_DEVBOARD is not set
CONFIG_PCCARD_NONSTATIC=m
# CONFIG_HOTPLUG_PCI is not set
@@ -258,35 +362,38 @@ CONFIG_PCCARD_NONSTATIC=m
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=m
CONFIG_TRAD_SIGNALS=y
#
# Power management options
#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND_UP_POSSIBLE=y
CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
# CONFIG_APM_EMULATION is not set
-
-#
-# Networking
-#
+# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
CONFIG_NET=y
#
# Networking options
#
CONFIG_PACKET=m
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
CONFIG_NET_KEY=m
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -315,42 +422,13 @@ CONFIG_INET_TUNNEL=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-CONFIG_IP_VS=m
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
CONFIG_IPV6=m
CONFIG_IPV6_PRIVACY=y
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -366,12 +444,15 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
CONFIG_IPV6_TUNNEL=m
# CONFIG_IPV6_MULTIPLE_TABLES is not set
-# CONFIG_NETLABEL is not set
+# CONFIG_IPV6_MROUTE is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
CONFIG_BRIDGE_NETFILTER=y
#
@@ -380,57 +461,97 @@ CONFIG_BRIDGE_NETFILTER=y
CONFIG_NETFILTER_NETLINK=m
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
-# CONFIG_NF_CONNTRACK_ENABLED is not set
# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HL=m
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_NETFILTER_XT_MATCH_HL=m
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
# CONFIG_NETFILTER_XT_MATCH_U32 is not set
-# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_IPV6 is not set
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+# CONFIG_IP_VS_PROTO_SCTP is not set
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
#
# IP: Netfilter Configuration
#
+# CONFIG_NF_DEFRAG_IPV4 is not set
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
@@ -439,34 +560,29 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
#
-# IPv6: Netfilter Configuration (EXPERIMENTAL)
+# IPv6: Netfilter Configuration
#
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_OWNER=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
# CONFIG_IP6_NF_MATCH_MH is not set
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_RAW=m
#
# DECnet: Netfilter Configuration
#
CONFIG_DECNET_NF_GRABULATOR=m
-
-#
-# Bridge: Netfilter Configuration
-#
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -475,6 +591,7 @@ CONFIG_BRIDGE_EBT_802_3=m
CONFIG_BRIDGE_EBT_AMONG=m
CONFIG_BRIDGE_EBT_ARP=m
CONFIG_BRIDGE_EBT_IP=m
+# CONFIG_BRIDGE_EBT_IP6 is not set
CONFIG_BRIDGE_EBT_LIMIT=m
CONFIG_BRIDGE_EBT_MARK=m
CONFIG_BRIDGE_EBT_PKTTYPE=m
@@ -487,25 +604,25 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_ULOG=m
+# CONFIG_BRIDGE_EBT_NFLOG is not set
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
#
# DCCP CCIDs Configuration (EXPERIMENTAL)
#
-CONFIG_IP_DCCP_CCID2=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
-CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_DCCP_CCID3=y
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=y
CONFIG_IP_SCTP=m
# CONFIG_SCTP_DBG_MSG is not set
# CONFIG_SCTP_DBG_OBJCNT is not set
# CONFIG_SCTP_HMAC_NONE is not set
# CONFIG_SCTP_HMAC_SHA1 is not set
CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
CONFIG_TIPC=m
# CONFIG_TIPC_ADVANCED is not set
# CONFIG_TIPC_DEBUG is not set
@@ -516,8 +633,12 @@ CONFIG_ATM_LANE=m
CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
CONFIG_DECNET=m
# CONFIG_DECNET_ROUTER is not set
CONFIG_LLC=y
@@ -535,12 +656,9 @@ CONFIG_ECONET=m
CONFIG_ECONET_AUNUDP=y
CONFIG_ECONET_NATIVE=y
CONFIG_WAN_ROUTER=m
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_FIFO=y
#
# Queueing/Scheduling
@@ -550,7 +668,7 @@ CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_ATM=m
CONFIG_NET_SCH_PRIO=m
-# CONFIG_NET_SCH_RR is not set
+# CONFIG_NET_SCH_MULTIQ is not set
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
@@ -558,6 +676,7 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
CONFIG_NET_SCH_INGRESS=m
#
@@ -574,6 +693,7 @@ CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
+# CONFIG_NET_CLS_FLOW is not set
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_STACK=32
CONFIG_NET_EMATCH_CMP=m
@@ -586,10 +706,13 @@ CONFIG_NET_ACT_POLICE=y
# CONFIG_NET_ACT_GACT is not set
# CONFIG_NET_ACT_MIRRED is not set
# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
# CONFIG_NET_ACT_PEDIT is not set
# CONFIG_NET_ACT_SIMP is not set
-CONFIG_NET_CLS_POLICE=y
+# CONFIG_NET_ACT_SKBEDIT is not set
# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -613,9 +736,8 @@ CONFIG_6PACK=m
CONFIG_BPQETHER=m
CONFIG_BAYCOM_SER_FDX=m
CONFIG_BAYCOM_SER_HDX=m
-CONFIG_BAYCOM_PAR=m
-CONFIG_BAYCOM_EPP=m
CONFIG_YAM=m
+# CONFIG_CAN is not set
CONFIG_IRDA=m
#
@@ -657,15 +779,8 @@ CONFIG_MCP2120_DONGLE=m
CONFIG_OLD_BELKIN_DONGLE=m
CONFIG_ACT200L_DONGLE=m
# CONFIG_KINGSUN_DONGLE is not set
-
-#
-# Old SIR device drivers
-#
-# CONFIG_IRPORT_SIR is not set
-
-#
-# Old Serial dongle support
-#
+# CONFIG_KSDAZZLE_DONGLE is not set
+# CONFIG_KS959_DONGLE is not set
#
# FIR device drivers
@@ -683,17 +798,17 @@ CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_CMTP=m
CONFIG_BT_HIDP=m
#
# Bluetooth device drivers
#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
+# CONFIG_BT_HCIUART_LL is not set
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
@@ -702,24 +817,19 @@ CONFIG_BT_HCIBT3C=m
CONFIG_BT_HCIBLUECARD=m
CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_MRVL is not set
CONFIG_AF_RXRPC=m
# CONFIG_AF_RXRPC_DEBUG is not set
# CONFIG_RXKAD is not set
CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_EXT=y
-# CONFIG_MAC80211 is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -730,40 +840,43 @@ CONFIG_IEEE80211_SOFTMAC=m
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH=""
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_CONNECTOR=m
-CONFIG_MTD=m
+CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_CONCAT=m
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
#
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
#
# RAM/ROM/Flash chip drivers
#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
@@ -775,206 +888,81 @@ CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
#
# Mapping drivers for chip access
#
CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x4000000
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
-# CONFIG_MTD_ALCHEMY is not set
-# CONFIG_MTD_MTX1 is not set
-CONFIG_MTD_PCI=m
-CONFIG_MTD_PLATRAM=m
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PCI is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
-CONFIG_MTD_PMC551=m
-# CONFIG_MTD_PMC551_BUGFIX is not set
-# CONFIG_MTD_PMC551_DEBUG is not set
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-CONFIG_MTD_BLOCK2MTD=m
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
#
# Disk-On-Chip Device Drivers
#
-CONFIG_MTD_DOC2000=m
-CONFIG_MTD_DOC2001=m
-CONFIG_MTD_DOC2001PLUS=m
-CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
-# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-CONFIG_MTD_DOCPROBE_ADDRESS=0
-CONFIG_MTD_NAND=m
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-# CONFIG_MTD_NAND_ECC_SMC is not set
-# CONFIG_MTD_NAND_MUSEUM_IDS is not set
-CONFIG_MTD_NAND_IDS=m
-CONFIG_MTD_NAND_DISKONCHIP=m
-# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
-# CONFIG_MTD_NAND_CAFE is not set
-CONFIG_MTD_NAND_NANDSIM=m
-# CONFIG_MTD_NAND_PLATFORM is not set
-CONFIG_MTD_ONENAND=m
-CONFIG_MTD_ONENAND_VERIFY_WRITE=y
-# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
-CONFIG_PARPORT=m
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_SERIAL=m
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_PC_SUPERIO=y
-CONFIG_PARPORT_PC_PCMCIA=m
-# CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_AX88796=m
-CONFIG_PARPORT_1284=y
-CONFIG_PARPORT_NOT_PC=y
+# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
-CONFIG_PARIDE=m
-
-#
-# Parallel IDE high-level drivers
-#
-CONFIG_PARIDE_PD=m
-CONFIG_PARIDE_PCD=m
-CONFIG_PARIDE_PF=m
-CONFIG_PARIDE_PT=m
-CONFIG_PARIDE_PG=m
-
-#
-# Parallel IDE protocol modules
-#
-CONFIG_PARIDE_ATEN=m
-CONFIG_PARIDE_BPCK=m
-CONFIG_PARIDE_BPCK6=m
-CONFIG_PARIDE_COMM=m
-CONFIG_PARIDE_DSTR=m
-CONFIG_PARIDE_FIT2=m
-CONFIG_PARIDE_FIT3=m
-CONFIG_PARIDE_EPAT=m
-CONFIG_PARIDE_EPATC8=y
-CONFIG_PARIDE_EPIA=m
-CONFIG_PARIDE_FRIQ=m
-CONFIG_PARIDE_FRPW=m
-CONFIG_PARIDE_KBIC=m
-CONFIG_PARIDE_KTTI=m
-CONFIG_PARIDE_ON20=m
-CONFIG_PARIDE_ON26=m
-CONFIG_BLK_CPQ_DA=m
-CONFIG_BLK_CPQ_CISS_DA=m
-CONFIG_CISS_SCSI_TAPE=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
+# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_BUFFERS=8
-# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-CONFIG_ATA_OVER_ETH=m
-CONFIG_MISC_DEVICES=y
-# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
-CONFIG_SGI_IOC4=m
-CONFIG_TIFM_CORE=m
-CONFIG_TIFM_7XX1=m
-CONFIG_IDE=y
-CONFIG_IDE_MAX_HWIFS=4
-CONFIG_BLK_DEV_IDE=y
-
-#
-# Please see Documentation/ide.txt for help/info on IDE drives
-#
-# CONFIG_BLK_DEV_IDE_SATA is not set
-CONFIG_BLK_DEV_IDEDISK=m
-# CONFIG_IDEDISK_MULTI_MODE is not set
-CONFIG_BLK_DEV_IDECS=m
-# CONFIG_BLK_DEV_DELKIN is not set
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_IDETAPE=m
-CONFIG_BLK_DEV_IDEFLOPPY=m
-CONFIG_BLK_DEV_IDESCSI=m
-# CONFIG_IDE_TASK_IOCTL is not set
-CONFIG_IDE_PROC_FS=y
-
-#
-# IDE chipset support/bugfixes
-#
-CONFIG_IDE_GENERIC=m
-CONFIG_BLK_DEV_IDEPCI=y
-CONFIG_IDEPCI_SHARE_IRQ=y
-CONFIG_IDEPCI_PCIBUS_ORDER=y
-# CONFIG_BLK_DEV_OFFBOARD is not set
-CONFIG_BLK_DEV_GENERIC=m
-CONFIG_BLK_DEV_OPTI621=m
-CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-# CONFIG_IDEDMA_ONLYDISK is not set
-CONFIG_BLK_DEV_AEC62XX=m
-CONFIG_BLK_DEV_ALI15X3=m
-# CONFIG_WDC_ALI15X3 is not set
-CONFIG_BLK_DEV_AMD74XX=m
-CONFIG_BLK_DEV_CMD64X=m
-CONFIG_BLK_DEV_TRIFLEX=m
-CONFIG_BLK_DEV_CY82C693=m
-# CONFIG_BLK_DEV_CS5520 is not set
-CONFIG_BLK_DEV_CS5530=m
-CONFIG_BLK_DEV_HPT34X=m
-# CONFIG_HPT34X_AUTODMA is not set
-CONFIG_BLK_DEV_HPT366=m
-# CONFIG_BLK_DEV_JMICRON is not set
-CONFIG_BLK_DEV_SC1200=m
-CONFIG_BLK_DEV_PIIX=m
-# CONFIG_BLK_DEV_IT8213 is not set
-CONFIG_BLK_DEV_IT821X=m
-CONFIG_BLK_DEV_NS87415=m
-CONFIG_BLK_DEV_PDC202XX_OLD=m
-CONFIG_PDC202XX_BURST=y
-CONFIG_BLK_DEV_PDC202XX_NEW=m
-CONFIG_BLK_DEV_SVWKS=m
-CONFIG_BLK_DEV_SIIMAGE=m
-# CONFIG_BLK_DEV_SLC90E66 is not set
-CONFIG_BLK_DEV_TRM290=m
-# CONFIG_BLK_DEV_VIA82CXXX is not set
-# CONFIG_BLK_DEV_TC86C001 is not set
-# CONFIG_IDE_ARM is not set
-CONFIG_BLK_DEV_IDEDMA=y
-# CONFIG_IDEDMA_IVB is not set
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_TIFM_CORE=m
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
#
# SCSI device support
#
-CONFIG_RAID_ATTRS=m
+CONFIG_SCSI_MOD=m
+# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=m
CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
@@ -985,18 +973,13 @@ CONFIG_SCSI_PROC_FS=y
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-# CONFIG_BLK_DEV_SR_VENDOR is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
+# CONFIG_CHR_DEV_SCH is not set
CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_CONSTANTS is not set
CONFIG_SCSI_LOGGING=y
# CONFIG_SCSI_SCAN_ASYNC is not set
CONFIG_SCSI_WAIT_SCAN=m
@@ -1009,198 +992,39 @@ CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_ATA is not set
+CONFIG_SCSI_SAS_HOST_SMP=y
# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-CONFIG_SCSI_LOWLEVEL=y
-CONFIG_ISCSI_TCP=m
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-CONFIG_AIC7XXX_DEBUG_ENABLE=y
-CONFIG_AIC7XXX_DEBUG_MASK=0
-CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-CONFIG_AIC79XX_DEBUG_ENABLE=y
-CONFIG_AIC79XX_DEBUG_MASK=0
-CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-CONFIG_SCSI_AIC94XX=m
-# CONFIG_AIC94XX_DEBUG is not set
-CONFIG_SCSI_DPT_I2O=m
-CONFIG_SCSI_ARCMSR=m
-CONFIG_MEGARAID_NEWGEN=y
-CONFIG_MEGARAID_MM=m
-CONFIG_MEGARAID_MAILBOX=m
-CONFIG_MEGARAID_LEGACY=m
-CONFIG_MEGARAID_SAS=m
-CONFIG_SCSI_HPTIOP=m
-CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INITIO=m
-# CONFIG_SCSI_INIA100 is not set
-CONFIG_SCSI_PPA=m
-CONFIG_SCSI_IMM=m
-# CONFIG_SCSI_IZIP_EPP16 is not set
-# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_STEX=m
-CONFIG_SCSI_SYM53C8XX_2=m
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-CONFIG_SCSI_SYM53C8XX_MMIO=y
-CONFIG_SCSI_IPR=m
-# CONFIG_SCSI_IPR_TRACE is not set
-# CONFIG_SCSI_IPR_DUMP is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA_FC=m
-CONFIG_SCSI_QLA_ISCSI=m
-CONFIG_SCSI_LPFC=m
-CONFIG_SCSI_DC395x=m
-CONFIG_SCSI_DC390T=m
-CONFIG_SCSI_NSP32=m
-CONFIG_SCSI_DEBUG=m
-# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
-CONFIG_ATA=m
-# CONFIG_ATA_NONSTANDARD is not set
-CONFIG_SATA_AHCI=m
-CONFIG_SATA_SVW=m
-CONFIG_ATA_PIIX=m
-CONFIG_SATA_MV=m
-CONFIG_SATA_NV=m
-CONFIG_PDC_ADMA=m
-CONFIG_SATA_QSTOR=m
-CONFIG_SATA_PROMISE=m
-CONFIG_SATA_SX4=m
-CONFIG_SATA_SIL=m
-CONFIG_SATA_SIL24=m
-CONFIG_SATA_SIS=m
-CONFIG_SATA_ULI=m
-CONFIG_SATA_VIA=m
-CONFIG_SATA_VITESSE=m
-# CONFIG_SATA_INIC162X is not set
-# CONFIG_PATA_ALI is not set
-# CONFIG_PATA_AMD is not set
-# CONFIG_PATA_ARTOP is not set
-# CONFIG_PATA_ATIIXP is not set
-# CONFIG_PATA_CMD640_PCI is not set
-# CONFIG_PATA_CMD64X is not set
-CONFIG_PATA_CS5520=m
-# CONFIG_PATA_CS5530 is not set
-# CONFIG_PATA_CYPRESS is not set
-CONFIG_PATA_EFAR=m
-CONFIG_ATA_GENERIC=m
-# CONFIG_PATA_HPT366 is not set
-# CONFIG_PATA_HPT37X is not set
-# CONFIG_PATA_HPT3X2N is not set
-# CONFIG_PATA_HPT3X3 is not set
-# CONFIG_PATA_IT821X is not set
-# CONFIG_PATA_IT8213 is not set
-CONFIG_PATA_JMICRON=m
-CONFIG_PATA_TRIFLEX=m
-# CONFIG_PATA_MARVELL is not set
-CONFIG_PATA_MPIIX=m
-# CONFIG_PATA_OLDPIIX is not set
-CONFIG_PATA_NETCELL=m
-# CONFIG_PATA_NS87410 is not set
-# CONFIG_PATA_OPTI is not set
-# CONFIG_PATA_OPTIDMA is not set
-CONFIG_PATA_PCMCIA=m
-# CONFIG_PATA_PDC_OLD is not set
-# CONFIG_PATA_RADISYS is not set
-CONFIG_PATA_RZ1000=m
-# CONFIG_PATA_SC1200 is not set
-# CONFIG_PATA_SERVERWORKS is not set
-CONFIG_PATA_PDC2027X=m
-CONFIG_PATA_SIL680=m
-CONFIG_PATA_SIS=m
-CONFIG_PATA_VIA=m
-CONFIG_PATA_WINBOND=m
-# CONFIG_PATA_PLATFORM is not set
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-# CONFIG_MD_RAID5_RESHAPE is not set
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
-CONFIG_BLK_DEV_DM=m
-# CONFIG_DM_DEBUG is not set
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_EMC=m
-# CONFIG_DM_MULTIPATH_RDAC is not set
-# CONFIG_DM_DELAY is not set
-
-#
-# Fusion MPT device support
-#
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_FC=m
-CONFIG_FUSION_SAS=m
-CONFIG_FUSION_MAX_SGE=128
-CONFIG_FUSION_CTL=m
-CONFIG_FUSION_LAN=m
-# CONFIG_FUSION_LOGGING is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
#
-# CONFIG_FIREWIRE is not set
-CONFIG_IEEE1394=m
-
-#
-# Subsystem Options
-#
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
#
-# Controllers
+# You can enable one or both FireWire driver stacks.
#
-CONFIG_IEEE1394_PCILYNX=m
-CONFIG_IEEE1394_OHCI1394=m
#
-# Protocols
+# The newer stack is recommended.
#
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_DV1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_I2O=m
-CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
-CONFIG_I2O_EXT_ADAPTEC=y
-CONFIG_I2O_CONFIG=m
-CONFIG_I2O_CONFIG_OLD_IOCTL=y
-CONFIG_I2O_BUS=m
-CONFIG_I2O_BLOCK=m
-CONFIG_I2O_SCSI=m
-CONFIG_I2O_PROC=m
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_IFB is not set
CONFIG_DUMMY=m
CONFIG_BONDING=m
# CONFIG_MACVLAN is not set
CONFIG_EQUALIZER=m
CONFIG_TUN=m
+# CONFIG_VETH is not set
CONFIG_ARCNET=m
CONFIG_ARCNET_1201=m
CONFIG_ARCNET_1051=m
@@ -1225,9 +1049,11 @@ CONFIG_VITESSE_PHY=m
CONFIG_SMSC_PHY=m
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
-CONFIG_FIXED_PHY=m
-# CONFIG_FIXED_MII_10_FDX is not set
-# CONFIG_FIXED_MII_100_FDX is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=m
# CONFIG_AX88796 is not set
@@ -1240,8 +1066,12 @@ CONFIG_VORTEX=m
CONFIG_TYPHOON=m
# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
+CONFIG_DE2104X_DSL=0
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
@@ -1251,21 +1081,26 @@ CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
CONFIG_PCMCIA_XIRCOM=m
-# CONFIG_PCMCIA_XIRTULIP is not set
CONFIG_HP100=m
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
CONFIG_PCNET32=m
-# CONFIG_PCNET32_NAPI is not set
CONFIG_AMD8111_ETH=m
-# CONFIG_AMD8111E_NAPI is not set
CONFIG_ADAPTEC_STARFIRE=m
-# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
+# CONFIG_KSZ884X_PCI is not set
CONFIG_B44=m
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
CONFIG_FORCEDETH=m
# CONFIG_FORCEDETH_NAPI is not set
# CONFIG_TC35815 is not set
-CONFIG_DGRS=m
-CONFIG_EEPRO100=m
CONFIG_E100=m
CONFIG_FEALNX=m
CONFIG_NATSEMI=m
@@ -1276,52 +1111,71 @@ CONFIG_8139TOO=m
# CONFIG_8139TOO_TUNE_TWISTER is not set
CONFIG_8139TOO_8129=y
# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_R6040 is not set
CONFIG_SIS900=m
CONFIG_EPIC100=m
+# CONFIG_SMSC9420 is not set
CONFIG_SUNDANCE=m
# CONFIG_SUNDANCE_MMIO is not set
CONFIG_TLAN=m
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
CONFIG_VIA_RHINE=m
# CONFIG_VIA_RHINE_MMIO is not set
-# CONFIG_VIA_RHINE_NAPI is not set
# CONFIG_SC92031 is not set
-CONFIG_NET_POCKET=y
-CONFIG_DE600=m
-CONFIG_DE620=m
+# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
CONFIG_ACENIC=m
# CONFIG_ACENIC_OMIT_TIGON_I is not set
CONFIG_DL2K=m
CONFIG_E1000=m
-# CONFIG_E1000_NAPI is not set
-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
CONFIG_NS83820=m
CONFIG_HAMACHI=m
CONFIG_YELLOWFIN=m
CONFIG_R8169=m
-# CONFIG_R8169_NAPI is not set
CONFIG_R8169_VLAN=y
CONFIG_SIS190=m
CONFIG_SKGE=m
+# CONFIG_SKGE_DEBUG is not set
CONFIG_SKY2=m
-CONFIG_SK98LIN=m
+# CONFIG_SKY2_DEBUG is not set
CONFIG_VIA_VELOCITY=m
CONFIG_TIGON3=m
CONFIG_BNX2=m
+# CONFIG_CNIC is not set
CONFIG_QLA3XXX=m
# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
CONFIG_NETDEV_10000=y
+CONFIG_MDIO=m
CONFIG_CHELSIO_T1=m
# CONFIG_CHELSIO_T1_1G is not set
-CONFIG_CHELSIO_T1_NAPI=y
+CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_ENIC is not set
+# CONFIG_IXGBE is not set
CONFIG_IXGB=m
-# CONFIG_IXGB_NAPI is not set
CONFIG_S2IO=m
-# CONFIG_S2IO_NAPI is not set
+# CONFIG_VXGE is not set
CONFIG_MYRI10GE=m
# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
+# CONFIG_BE2NET is not set
CONFIG_TR=y
CONFIG_IBMOL=m
CONFIG_IBMLS=m
@@ -1329,12 +1183,18 @@ CONFIG_3C359=m
CONFIG_TMS380TR=m
CONFIG_TMSPCI=m
CONFIG_ABYSS=m
+CONFIG_WLAN=y
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
#
# USB Network Adapters
@@ -1343,11 +1203,13 @@ CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET_MII=m
CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
+# CONFIG_USB_NET_CDC_EEM is not set
# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_NET1080=m
CONFIG_USB_NET_PLUSB=m
@@ -1361,6 +1223,9 @@ CONFIG_USB_ARMLINUX=y
CONFIG_USB_EPSON2888=y
# CONFIG_USB_KC2190 is not set
CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+CONFIG_USB_SIERRA_NET=m
CONFIG_NET_PCMCIA=y
CONFIG_PCMCIA_3C589=m
CONFIG_PCMCIA_3C574=m
@@ -1383,16 +1248,6 @@ CONFIG_HDLC_PPP=m
CONFIG_HDLC_X25=m
CONFIG_PCI200SYN=m
CONFIG_WANXL=m
-CONFIG_PC300=m
-CONFIG_PC300_MLPPP=y
-
-#
-# Cyclades-PC300 MLPPP support is disabled.
-#
-
-#
-# Refer to the file README.mlppp, provided by PC300 package.
-#
# CONFIG_PC300TOO is not set
CONFIG_FARSYNC=m
CONFIG_DSCC4=m
@@ -1428,15 +1283,13 @@ CONFIG_ATM_HORIZON=m
# CONFIG_ATM_HORIZON_DEBUG is not set
CONFIG_ATM_IA=m
# CONFIG_ATM_IA_DEBUG is not set
-CONFIG_ATM_FORE200E_MAYBE=m
-CONFIG_ATM_FORE200E_PCA=y
-CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
+CONFIG_ATM_FORE200E=m
# CONFIG_ATM_FORE200E_USE_TASKLET is not set
CONFIG_ATM_FORE200E_TX_RETRY=16
CONFIG_ATM_FORE200E_DEBUG=0
-CONFIG_ATM_FORE200E=m
CONFIG_ATM_HE=m
CONFIG_ATM_HE_USE_SUNI=y
+# CONFIG_ATM_SOLOS is not set
CONFIG_FDDI=y
CONFIG_DEFXX=m
# CONFIG_DEFXX_MMIO is not set
@@ -1444,7 +1297,6 @@ CONFIG_SKFP=m
CONFIG_HIPPI=y
CONFIG_ROADRUNNER=m
# CONFIG_ROADRUNNER_LARGE_RINGS is not set
-CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_MULTILINK=y
CONFIG_PPP_FILTER=y
@@ -1462,219 +1314,53 @@ CONFIG_SLHC=m
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
CONFIG_NET_FC=y
-CONFIG_SHAPER=m
CONFIG_NETCONSOLE=m
+# CONFIG_NETCONSOLE_DYNAMIC is not set
CONFIG_NETPOLL=y
# CONFIG_NETPOLL_TRAP is not set
CONFIG_NET_POLL_CONTROLLER=y
-CONFIG_ISDN=m
-CONFIG_ISDN_I4L=m
-CONFIG_ISDN_PPP=y
-CONFIG_ISDN_PPP_VJ=y
-CONFIG_ISDN_MPP=y
-CONFIG_IPPP_FILTER=y
-CONFIG_ISDN_PPP_BSDCOMP=m
-CONFIG_ISDN_AUDIO=y
-CONFIG_ISDN_TTY_FAX=y
-CONFIG_ISDN_X25=y
-
-#
-# ISDN feature submodules
-#
-# CONFIG_ISDN_DRV_LOOP is not set
-CONFIG_ISDN_DIVERSION=m
-
-#
-# ISDN4Linux hardware drivers
-#
-
-#
-# Passive cards
-#
-CONFIG_ISDN_DRV_HISAX=m
-
-#
-# D-channel protocol features
-#
-CONFIG_HISAX_EURO=y
-CONFIG_DE_AOC=y
-# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-# CONFIG_HISAX_NO_LLC is not set
-# CONFIG_HISAX_NO_KEYPAD is not set
-CONFIG_HISAX_1TR6=y
-CONFIG_HISAX_NI1=y
-CONFIG_HISAX_MAX_CARDS=8
-
-#
-# HiSax supported cards
-#
-CONFIG_HISAX_16_3=y
-CONFIG_HISAX_TELESPCI=y
-CONFIG_HISAX_S0BOX=y
-CONFIG_HISAX_FRITZPCI=y
-CONFIG_HISAX_AVM_A1_PCMCIA=y
-CONFIG_HISAX_ELSA=y
-CONFIG_HISAX_DIEHLDIVA=y
-CONFIG_HISAX_SEDLBAUER=y
-CONFIG_HISAX_NETJET=y
-CONFIG_HISAX_NETJET_U=y
-CONFIG_HISAX_NICCY=y
-CONFIG_HISAX_BKM_A4T=y
-CONFIG_HISAX_SCT_QUADRO=y
-CONFIG_HISAX_GAZEL=y
-CONFIG_HISAX_HFC_PCI=y
-CONFIG_HISAX_W6692=y
-CONFIG_HISAX_HFC_SX=y
-CONFIG_HISAX_ENTERNOW_PCI=y
-# CONFIG_HISAX_DEBUG is not set
-
-#
-# HiSax PCMCIA card service modules
-#
-CONFIG_HISAX_SEDLBAUER_CS=m
-CONFIG_HISAX_ELSA_CS=m
-CONFIG_HISAX_AVM_A1_CS=m
-CONFIG_HISAX_TELES_CS=m
-
-#
-# HiSax sub driver modules
-#
-CONFIG_HISAX_ST5481=m
-CONFIG_HISAX_HFCUSB=m
-CONFIG_HISAX_HFC4S8S=m
-CONFIG_HISAX_FRITZ_PCIPNP=m
-CONFIG_HISAX_HDLC=y
-
-#
-# Active cards
-#
-# CONFIG_HYSDN is not set
-CONFIG_ISDN_DRV_GIGASET=m
-CONFIG_GIGASET_BASE=m
-CONFIG_GIGASET_M105=m
-# CONFIG_GIGASET_M101 is not set
-# CONFIG_GIGASET_DEBUG is not set
-# CONFIG_GIGASET_UNDOCREQ is not set
-CONFIG_ISDN_CAPI=m
-CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
-CONFIG_CAPI_TRACE=y
-CONFIG_ISDN_CAPI_MIDDLEWARE=y
-CONFIG_ISDN_CAPI_CAPI20=m
-CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
-CONFIG_ISDN_CAPI_CAPIFS=m
-CONFIG_ISDN_CAPI_CAPIDRV=m
-
-#
-# CAPI hardware drivers
-#
-CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-CONFIG_ISDN_DRV_AVMB1_C4=m
-CONFIG_CAPI_EICON=y
-CONFIG_ISDN_DIVAS=m
-CONFIG_ISDN_DIVAS_BRIPCI=y
-CONFIG_ISDN_DIVAS_PRIPCI=y
-CONFIG_ISDN_DIVAS_DIVACAPI=m
-CONFIG_ISDN_DIVAS_USERIDI=m
-CONFIG_ISDN_DIVAS_MAINT=m
-CONFIG_PHONE=m
-CONFIG_PHONE_IXJ=m
-CONFIG_PHONE_IXJ_PCMCIA=m
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
#
# Input device support
#
CONFIG_INPUT=y
-CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-CONFIG_INPUT_JOYDEV=m
-CONFIG_INPUT_TSDEV=m
-CONFIG_INPUT_TSDEV_SCREEN_X=240
-CONFIG_INPUT_TSDEV_SCREEN_Y=320
-CONFIG_INPUT_EVDEV=m
-CONFIG_INPUT_EVBUG=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_LKKBD=m
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_PS2_ALPS=y
-CONFIG_MOUSE_PS2_LOGIPS2PP=y
-CONFIG_MOUSE_PS2_SYNAPTICS=y
-CONFIG_MOUSE_PS2_LIFEBOOK=y
-CONFIG_MOUSE_PS2_TRACKPOINT=y
-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
-CONFIG_MOUSE_SERIAL=m
-# CONFIG_MOUSE_APPLETOUCH is not set
-CONFIG_MOUSE_VSXXXAA=m
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_JOYSTICK_ANALOG=m
-CONFIG_JOYSTICK_A3D=m
-CONFIG_JOYSTICK_ADI=m
-CONFIG_JOYSTICK_COBRA=m
-CONFIG_JOYSTICK_GF2K=m
-CONFIG_JOYSTICK_GRIP=m
-CONFIG_JOYSTICK_GRIP_MP=m
-CONFIG_JOYSTICK_GUILLEMOT=m
-CONFIG_JOYSTICK_INTERACT=m
-CONFIG_JOYSTICK_SIDEWINDER=m
-CONFIG_JOYSTICK_TMDC=m
-CONFIG_JOYSTICK_IFORCE=m
-CONFIG_JOYSTICK_IFORCE_USB=y
-CONFIG_JOYSTICK_IFORCE_232=y
-CONFIG_JOYSTICK_WARRIOR=m
-CONFIG_JOYSTICK_MAGELLAN=m
-CONFIG_JOYSTICK_SPACEORB=m
-CONFIG_JOYSTICK_SPACEBALL=m
-CONFIG_JOYSTICK_STINGER=m
-CONFIG_JOYSTICK_TWIDJOY=m
-CONFIG_JOYSTICK_DB9=m
-CONFIG_JOYSTICK_GAMECON=m
-CONFIG_JOYSTICK_TURBOGRAFX=m
-CONFIG_JOYSTICK_JOYDUMP=m
-# CONFIG_JOYSTICK_XPAD is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=m
-# CONFIG_TOUCHSCREEN_FUJITSU is not set
-CONFIG_TOUCHSCREEN_GUNZE=m
-CONFIG_TOUCHSCREEN_ELO=m
-CONFIG_TOUCHSCREEN_MTOUCH=m
-CONFIG_TOUCHSCREEN_MK712=m
-CONFIG_TOUCHSCREEN_PENMOUNT=m
-CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
-CONFIG_TOUCHSCREEN_TOUCHWIN=m
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
-# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_PCSPKR=m
-# CONFIG_INPUT_ATI_REMOTE is not set
-# CONFIG_INPUT_ATI_REMOTE2 is not set
-# CONFIG_INPUT_KEYSPAN_REMOTE is not set
-# CONFIG_INPUT_POWERMATE is not set
-# CONFIG_INPUT_YEALINK is not set
-CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
#
# Hardware I/O ports
@@ -1682,10 +1368,10 @@ CONFIG_INPUT_UINPUT=m
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_PARKBD=m
CONFIG_SERIO_PCIPS2=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=m
+# CONFIG_SERIO_ALTERA_PS2 is not set
CONFIG_GAMEPORT=m
CONFIG_GAMEPORT_NS558=m
CONFIG_GAMEPORT_L4=m
@@ -1696,30 +1382,13 @@ CONFIG_GAMEPORT_FM801=m
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_COMPUTONE is not set
-CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
-# CONFIG_CYZ_INTR is not set
-CONFIG_DIGIEPCA=m
-# CONFIG_MOXA_INTELLIO is not set
-CONFIG_MOXA_SMARTIO=m
-# CONFIG_MOXA_SMARTIO_NEW is not set
-# CONFIG_ISI is not set
-CONFIG_SYNCLINKMP=m
-CONFIG_SYNCLINK_GT=m
-CONFIG_N_HDLC=m
-# CONFIG_RISCOM8 is not set
-CONFIG_SPECIALIX=m
-# CONFIG_SPECIALIX_RTSCTS is not set
-CONFIG_SX=m
-# CONFIG_RIO is not set
-CONFIG_STALDRV=y
-# CONFIG_STALLION is not set
-# CONFIG_ISTALLION is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
#
# Serial drivers
@@ -1741,161 +1410,128 @@ CONFIG_SERIAL_8250_RSA=y
#
CONFIG_SERIAL_CORE=m
CONFIG_SERIAL_JSM=m
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
-CONFIG_PRINTER=m
-# CONFIG_LP_CONSOLE is not set
-CONFIG_PPDEV=m
-CONFIG_TIPAR=m
-CONFIG_IPMI_HANDLER=m
-# CONFIG_IPMI_PANIC_EVENT is not set
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-
-#
-# Watchdog Device Drivers
-#
-CONFIG_SOFT_WATCHDOG=m
-# CONFIG_WDT_MTX1 is not set
-
-#
-# PCI-based Watchdog Cards
-#
-CONFIG_PCIPCWATCHDOG=m
-CONFIG_WDTPCI=m
-CONFIG_WDT_501_PCI=y
-
-#
-# USB-based Watchdog Cards
-#
-CONFIG_USBPCWATCHDOG=m
+# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
-CONFIG_RTC=y
-CONFIG_R3964=m
-CONFIG_APPLICOM=m
-CONFIG_DRM=m
-CONFIG_DRM_TDFX=m
-CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
-CONFIG_DRM_MGA=m
-CONFIG_DRM_VIA=m
-CONFIG_DRM_SAVAGE=m
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
#
# PCMCIA character devices
#
-CONFIG_SYNCLINK_CS=m
-CONFIG_CARDMAN_4000=m
-CONFIG_CARDMAN_4040=m
-CONFIG_RAW_DRIVER=m
-CONFIG_MAX_RAW_DEVS=256
-CONFIG_TCG_TPM=m
-CONFIG_TCG_ATMEL=m
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_IPWIRELESS is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
#
-# I2C Algorithms
+# I2C Hardware Bus support
#
-CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCF=m
-CONFIG_I2C_ALGOPCA=m
#
-# I2C Hardware Bus support
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD756_S4882=m
-CONFIG_I2C_AMD8111=m
-CONFIG_I2C_I801=m
-CONFIG_I2C_I810=m
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_NFORCE2=m
-CONFIG_I2C_OCORES=m
-CONFIG_I2C_PARPORT=m
-CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_PROSAVAGE=m
-CONFIG_I2C_SAVAGE4=m
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
-CONFIG_I2C_STUB=m
# CONFIG_I2C_TINY_USB is not set
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-CONFIG_I2C_VOODOO3=m
#
-# Miscellaneous I2C Chip support
+# Other I2C/SMBus bus drivers
#
-CONFIG_SENSORS_DS1337=m
-CONFIG_SENSORS_DS1374=m
-# CONFIG_DS1682 is not set
-CONFIG_EEPROM_LEGACY=m
-CONFIG_SENSORS_PCF8574=m
-CONFIG_SENSORS_PCA9539=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_EEPROM_MAX6875=m
-# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
#
-# SPI support
+# PPS support
#
-CONFIG_SPI=y
-CONFIG_SPI_MASTER=y
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
#
-# SPI Master Controller Drivers
+# Memory mapped GPIO expanders:
#
-CONFIG_SPI_BITBANG=m
-CONFIG_SPI_BUTTERFLY=m
-# CONFIG_SPI_LM70_LLP is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
#
-# SPI Protocol Masters
+# I2C GPIO expanders:
#
-# CONFIG_EEPROM_AT25 is not set
-# CONFIG_SPI_SPIDEV is not set
-# CONFIG_SPI_TLE62X0 is not set
-CONFIG_W1=m
-CONFIG_W1_CON=y
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
-# 1-wire Bus Masters
+# PCI GPIO expanders:
#
-CONFIG_W1_MASTER_MATROX=m
-CONFIG_W1_MASTER_DS2490=m
-CONFIG_W1_MASTER_DS2482=m
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
#
-# 1-wire Slaves
+# SPI GPIO expanders:
#
-CONFIG_W1_SLAVE_THERM=m
-CONFIG_W1_SLAVE_SMEM=m
-CONFIG_W1_SLAVE_DS2433=m
-# CONFIG_W1_SLAVE_DS2433_CRC is not set
-# CONFIG_W1_SLAVE_DS2760 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
CONFIG_HWMON_VID=m
-CONFIG_SENSORS_ABITUGURU=m
-# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
# CONFIG_SENSORS_AD7418 is not set
CONFIG_SENSORS_ADM1021=m
CONFIG_SENSORS_ADM1025=m
@@ -1903,17 +1539,23 @@ CONFIG_SENSORS_ADM1026=m
# CONFIG_SENSORS_ADM1029 is not set
CONFIG_SENSORS_ADM1031=m
CONFIG_SENSORS_ADM9240=m
-CONFIG_SENSORS_ASB100=m
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
CONFIG_SENSORS_ATXP1=m
CONFIG_SENSORS_DS1621=m
+# CONFIG_SENSORS_I5K_AMB is not set
CONFIG_SENSORS_F71805F=m
-CONFIG_SENSORS_FSCHER=m
-CONFIG_SENSORS_FSCPOS=m
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_GL520SM=m
CONFIG_SENSORS_IT87=m
CONFIG_SENSORS_LM63=m
-CONFIG_SENSORS_LM70=m
+# CONFIG_SENSORS_LM73 is not set
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM77=m
CONFIG_SENSORS_LM78=m
@@ -1924,16 +1566,25 @@ CONFIG_SENSORS_LM87=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_LM92=m
# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
CONFIG_SENSORS_MAX1619=m
# CONFIG_SENSORS_MAX6650 is not set
CONFIG_SENSORS_PC87360=m
# CONFIG_SENSORS_PC87427 is not set
+CONFIG_SENSORS_PCF8591=m
+# CONFIG_SENSORS_SHT15 is not set
CONFIG_SENSORS_SIS5595=m
# CONFIG_SENSORS_DME1737 is not set
CONFIG_SENSORS_SMSC47M1=m
CONFIG_SENSORS_SMSC47M192=m
CONFIG_SENSORS_SMSC47B397=m
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
CONFIG_SENSORS_VIA686A=m
CONFIG_SENSORS_VT1211=m
CONFIG_SENSORS_VT8231=m
@@ -1942,370 +1593,94 @@ CONFIG_SENSORS_W83791D=m
CONFIG_SENSORS_W83792D=m
# CONFIG_SENSORS_W83793 is not set
CONFIG_SENSORS_W83L785TS=m
+# CONFIG_SENSORS_W83L786NG is not set
CONFIG_SENSORS_W83627HF=m
CONFIG_SENSORS_W83627EHF=m
-# CONFIG_HWMON_DEBUG_CHIP is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_VIDEO_V4L2=y
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
-CONFIG_VIDEO_TVAUDIO=m
-CONFIG_VIDEO_TDA7432=m
-CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TDA9875=m
-CONFIG_VIDEO_TEA6415C=m
-CONFIG_VIDEO_TEA6420=m
-CONFIG_VIDEO_MSP3400=m
-CONFIG_VIDEO_WM8775=m
-CONFIG_VIDEO_BT819=m
-CONFIG_VIDEO_BT856=m
-CONFIG_VIDEO_KS0127=m
-CONFIG_VIDEO_SAA7110=m
-CONFIG_VIDEO_SAA7111=m
-CONFIG_VIDEO_SAA7114=m
-CONFIG_VIDEO_SAA711X=m
-CONFIG_VIDEO_TVP5150=m
-CONFIG_VIDEO_VPX3220=m
-CONFIG_VIDEO_CX25840=m
-CONFIG_VIDEO_CX2341X=m
-CONFIG_VIDEO_SAA7185=m
-CONFIG_VIDEO_ADV7170=m
-CONFIG_VIDEO_ADV7175=m
-CONFIG_VIDEO_VIVI=m
-CONFIG_VIDEO_BT848=m
-CONFIG_VIDEO_BT848_DVB=y
-CONFIG_VIDEO_SAA6588=m
-CONFIG_VIDEO_BWQCAM=m
-CONFIG_VIDEO_CQCAM=m
-CONFIG_VIDEO_W9966=m
-CONFIG_VIDEO_CPIA=m
-CONFIG_VIDEO_CPIA_PP=m
-CONFIG_VIDEO_CPIA_USB=m
-CONFIG_VIDEO_CPIA2=m
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
-CONFIG_TUNER_3036=m
-# CONFIG_TUNER_TEA5761 is not set
-CONFIG_VIDEO_STRADIS=m
-CONFIG_VIDEO_ZORAN_ZR36060=m
-CONFIG_VIDEO_ZORAN=m
-CONFIG_VIDEO_ZORAN_BUZ=m
-CONFIG_VIDEO_ZORAN_DC10=m
-CONFIG_VIDEO_ZORAN_DC30=m
-CONFIG_VIDEO_ZORAN_LML33=m
-CONFIG_VIDEO_ZORAN_LML33R10=m
-CONFIG_VIDEO_ZORAN_AVS6EYES=m
-CONFIG_VIDEO_SAA7134=m
-CONFIG_VIDEO_SAA7134_ALSA=m
-CONFIG_VIDEO_SAA7134_OSS=m
-CONFIG_VIDEO_SAA7134_DVB=m
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DPC=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_CX88_ALSA=m
-CONFIG_VIDEO_CX88_BLACKBIRD=m
-CONFIG_VIDEO_CX88_DVB=m
-CONFIG_VIDEO_CX88_VP3054=m
-# CONFIG_VIDEO_IVTV is not set
-# CONFIG_VIDEO_CAFE_CCIC is not set
-CONFIG_V4L_USB_DRIVERS=y
-CONFIG_VIDEO_PVRUSB2=m
-CONFIG_VIDEO_PVRUSB2_29XXX=y
-CONFIG_VIDEO_PVRUSB2_24XXX=y
-CONFIG_VIDEO_PVRUSB2_SYSFS=y
-# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
-CONFIG_VIDEO_EM28XX=m
-# CONFIG_VIDEO_USBVISION is not set
-CONFIG_VIDEO_USBVIDEO=m
-CONFIG_USB_VICAM=m
-CONFIG_USB_IBMCAM=m
-CONFIG_USB_KONICAWC=m
-CONFIG_USB_QUICKCAM_MESSENGER=m
-CONFIG_USB_ET61X251=m
-CONFIG_VIDEO_OVCAMCHIP=m
-CONFIG_USB_W9968CF=m
-# CONFIG_USB_OV511 is not set
-CONFIG_USB_SE401=m
-CONFIG_USB_SN9C102=m
-CONFIG_USB_STV680=m
-CONFIG_USB_ZC0301=m
-CONFIG_USB_PWC=m
-# CONFIG_USB_PWC_DEBUG is not set
-# CONFIG_USB_ZR364XX is not set
-CONFIG_RADIO_ADAPTERS=y
-CONFIG_RADIO_GEMTEK_PCI=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_MAESTRO=m
-CONFIG_USB_DSBR=m
-CONFIG_DVB_CORE=m
-CONFIG_DVB_CORE_ATTACH=y
-CONFIG_DVB_CAPTURE_DRIVERS=y
-
-#
-# Supported SAA7146 based PCI Adapters
-#
-CONFIG_DVB_AV7110=m
-CONFIG_DVB_AV7110_OSD=y
-CONFIG_DVB_BUDGET=m
-CONFIG_DVB_BUDGET_CI=m
-CONFIG_DVB_BUDGET_AV=m
-CONFIG_DVB_BUDGET_PATCH=m
-
-#
-# Supported USB Adapters
-#
-CONFIG_DVB_USB=m
-# CONFIG_DVB_USB_DEBUG is not set
-CONFIG_DVB_USB_A800=m
-CONFIG_DVB_USB_DIBUSB_MB=m
-CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
-CONFIG_DVB_USB_DIBUSB_MC=m
-CONFIG_DVB_USB_DIB0700=m
-CONFIG_DVB_USB_UMT_010=m
-CONFIG_DVB_USB_CXUSB=m
-# CONFIG_DVB_USB_M920X is not set
-# CONFIG_DVB_USB_GL861 is not set
-# CONFIG_DVB_USB_AU6610 is not set
-CONFIG_DVB_USB_DIGITV=m
-CONFIG_DVB_USB_VP7045=m
-CONFIG_DVB_USB_VP702X=m
-CONFIG_DVB_USB_GP8PSK=m
-CONFIG_DVB_USB_NOVA_T_USB2=m
-# CONFIG_DVB_USB_TTUSB2 is not set
-CONFIG_DVB_USB_DTT200U=m
-# CONFIG_DVB_USB_OPERA1 is not set
-# CONFIG_DVB_USB_AF9005 is not set
-CONFIG_DVB_TTUSB_BUDGET=m
-CONFIG_DVB_TTUSB_DEC=m
-CONFIG_DVB_CINERGYT2=m
-CONFIG_DVB_CINERGYT2_TUNING=y
-CONFIG_DVB_CINERGYT2_STREAM_URB_COUNT=32
-CONFIG_DVB_CINERGYT2_STREAM_BUF_SIZE=512
-CONFIG_DVB_CINERGYT2_QUERY_INTERVAL=250
-CONFIG_DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE=y
-CONFIG_DVB_CINERGYT2_RC_QUERY_INTERVAL=100
-
-#
-# Supported FlexCopII (B2C2) Adapters
-#
-CONFIG_DVB_B2C2_FLEXCOP=m
-CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-CONFIG_DVB_B2C2_FLEXCOP_USB=m
-# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
-
-#
-# Supported BT878 Adapters
-#
-CONFIG_DVB_BT8XX=m
-
-#
-# Supported Pluto2 Adapters
-#
-CONFIG_DVB_PLUTO2=m
-
-#
-# Supported DVB Frontends
-#
-
-#
-# Customise DVB Frontends
-#
-# CONFIG_DVB_FE_CUSTOMISE is not set
-
-#
-# DVB-S (satellite) frontends
-#
-CONFIG_DVB_STV0299=m
-CONFIG_DVB_CX24110=m
-CONFIG_DVB_CX24123=m
-CONFIG_DVB_TDA8083=m
-CONFIG_DVB_MT312=m
-CONFIG_DVB_VES1X93=m
-CONFIG_DVB_S5H1420=m
-CONFIG_DVB_TDA10086=m
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
#
-# DVB-T (terrestrial) frontends
+# Watchdog Device Drivers
#
-CONFIG_DVB_SP8870=m
-CONFIG_DVB_SP887X=m
-CONFIG_DVB_CX22700=m
-CONFIG_DVB_CX22702=m
-CONFIG_DVB_L64781=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_NXT6000=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_ZL10353=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
-CONFIG_DVB_DIB7000M=m
-CONFIG_DVB_DIB7000P=m
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_WDT_MTX1=y
#
-# DVB-C (cable) frontends
+# PCI-based Watchdog Cards
#
-CONFIG_DVB_VES1820=m
-CONFIG_DVB_TDA10021=m
-CONFIG_DVB_TDA10023=m
-CONFIG_DVB_STV0297=m
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
#
-# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+# USB-based Watchdog Cards
#
-CONFIG_DVB_NXT200X=m
-CONFIG_DVB_OR51211=m
-CONFIG_DVB_OR51132=m
-CONFIG_DVB_BCM3510=m
-CONFIG_DVB_LGDT330X=m
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
-# Tuners/PLL support
+# Sonics Silicon Backplane
#
-CONFIG_DVB_PLL=m
-CONFIG_DVB_TDA826X=m
-CONFIG_DVB_TDA827X=m
-# CONFIG_DVB_TUNER_QT1010 is not set
-CONFIG_DVB_TUNER_MT2060=m
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+# CONFIG_SSB_B43_PCI_BRIDGE is not set
+CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
+# CONFIG_SSB_PCMCIAHOST is not set
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+# CONFIG_SSB_SDIOHOST is not set
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+# CONFIG_SSB_DRIVER_MIPS is not set
#
-# Miscellaneous devices
+# Multifunction device drivers
#
-CONFIG_DVB_LNBP21=m
-CONFIG_DVB_ISL6421=m
-CONFIG_DVB_TUA6100=m
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_VIDEO_BUF=m
-CONFIG_VIDEO_BUF_DVB=m
-CONFIG_VIDEO_BTCX=m
-CONFIG_VIDEO_IR_I2C=m
-CONFIG_VIDEO_IR=m
-CONFIG_VIDEO_TVEEPROM=m
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+# CONFIG_VGA_ARB is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=m
+# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
#
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
-CONFIG_VGASTATE=m
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_DDC=m
-CONFIG_FB_CFB_FILLRECT=m
-CONFIG_FB_CFB_COPYAREA=m
-CONFIG_FB_CFB_IMAGEBLIT=m
-# CONFIG_FB_SYS_FILLRECT is not set
-# CONFIG_FB_SYS_COPYAREA is not set
-# CONFIG_FB_SYS_IMAGEBLIT is not set
-# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-
-#
-# Frame buffer hardware drivers
-#
-CONFIG_FB_CIRRUS=m
-CONFIG_FB_PM2=m
-CONFIG_FB_PM2_FIFO_DISCONNECT=y
-CONFIG_FB_CYBER2000=m
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-CONFIG_FB_S1D13XXX=m
-CONFIG_FB_NVIDIA=m
-CONFIG_FB_NVIDIA_I2C=y
-# CONFIG_FB_NVIDIA_DEBUG is not set
-CONFIG_FB_NVIDIA_BACKLIGHT=y
-CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_RIVA_BACKLIGHT=y
-CONFIG_FB_MATROX=m
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G=y
-CONFIG_FB_MATROX_I2C=m
-CONFIG_FB_MATROX_MAVEN=m
-CONFIG_FB_MATROX_MULTIHEAD=y
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-CONFIG_FB_RADEON_BACKLIGHT=y
-# CONFIG_FB_RADEON_DEBUG is not set
-CONFIG_FB_ATY128=m
-CONFIG_FB_ATY128_BACKLIGHT=y
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-CONFIG_FB_ATY_GENERIC_LCD=y
-CONFIG_FB_ATY_GX=y
-CONFIG_FB_ATY_BACKLIGHT=y
-# CONFIG_FB_S3 is not set
-CONFIG_FB_SAVAGE=m
-CONFIG_FB_SAVAGE_I2C=y
-CONFIG_FB_SAVAGE_ACCEL=y
-CONFIG_FB_SIS=m
-CONFIG_FB_SIS_300=y
-CONFIG_FB_SIS_315=y
-CONFIG_FB_NEOMAGIC=m
-CONFIG_FB_KYRO=m
-CONFIG_FB_3DFX=m
-# CONFIG_FB_3DFX_ACCEL is not set
-CONFIG_FB_VOODOO1=m
-# CONFIG_FB_VT8623 is not set
-CONFIG_FB_TRIDENT=m
-# CONFIG_FB_TRIDENT_ACCEL is not set
-# CONFIG_FB_ARK is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_VIRTUAL is not set
#
# Console display driver support
#
-CONFIG_VGA_CONSOLE=y
-# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=m
-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-# CONFIG_LOGO is not set
-
-#
-# Sound
-#
CONFIG_SOUND=m
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -2318,32 +1693,29 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_RTCTIMER=m
-CONFIG_SND_SEQ_RTCTIMER_DEFAULT=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+CONFIG_SND_OPL3_LIB_SEQ=m
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+CONFIG_SND_EMU10K1_SEQ=m
CONFIG_SND_MPU401_UART=m
CONFIG_SND_OPL3_LIB=m
CONFIG_SND_VX_LIB=m
CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_DRIVERS=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
-CONFIG_SND_MTS64=m
CONFIG_SND_SERIAL_U16550=m
CONFIG_SND_MPU401=m
-# CONFIG_SND_PORTMAN2X4 is not set
-
-#
-# PCI devices
-#
+# CONFIG_SND_AC97_POWER_SAVE is not set
+CONFIG_SND_PCI=y
CONFIG_SND_AD1889=m
CONFIG_SND_ALS300=m
CONFIG_SND_ALI5451=m
@@ -2352,14 +1724,18 @@ CONFIG_SND_ATIIXP_MODEM=m
CONFIG_SND_AU8810=m
CONFIG_SND_AU8820=m
CONFIG_SND_AU8830=m
+# CONFIG_SND_AW2 is not set
CONFIG_SND_AZT3328=m
CONFIG_SND_BT87X=m
# CONFIG_SND_BT87X_OVERCLOCK is not set
CONFIG_SND_CA0106=m
CONFIG_SND_CMIPCI=m
+# CONFIG_SND_OXYGEN is not set
CONFIG_SND_CS4281=m
CONFIG_SND_CS46XX=m
CONFIG_SND_CS46XX_NEW_DSP=y
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
CONFIG_SND_DARLA20=m
CONFIG_SND_GINA20=m
CONFIG_SND_LAYLA20=m
@@ -2372,6 +1748,8 @@ CONFIG_SND_ECHO3G=m
CONFIG_SND_INDIGO=m
CONFIG_SND_INDIGOIO=m
CONFIG_SND_INDIGODJ=m
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
CONFIG_SND_EMU10K1=m
CONFIG_SND_EMU10K1X=m
CONFIG_SND_ENS1370=m
@@ -2379,19 +1757,36 @@ CONFIG_SND_ENS1371=m
CONFIG_SND_ES1938=m
CONFIG_SND_ES1968=m
CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X_BOOL=y
-CONFIG_SND_FM801_TEA575X=m
CONFIG_SND_HDA_INTEL=m
+# CONFIG_SND_HDA_HWDEP is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
+# CONFIG_SND_HDA_INPUT_JACK is not set
+# CONFIG_SND_HDA_PATCH_LOADER is not set
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_ANALOG=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_VIA=y
+CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
+CONFIG_SND_HDA_ELD=y
+CONFIG_SND_HDA_CODEC_CIRRUS=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+CONFIG_SND_HDA_CODEC_CA0110=y
+CONFIG_SND_HDA_CODEC_CMEDIA=y
+CONFIG_SND_HDA_CODEC_SI3054=y
+CONFIG_SND_HDA_GENERIC=y
+# CONFIG_SND_HDA_POWER_SAVE is not set
CONFIG_SND_HDSP=m
CONFIG_SND_HDSPM=m
+# CONFIG_SND_HIFIER is not set
CONFIG_SND_ICE1712=m
CONFIG_SND_ICE1724=m
CONFIG_SND_INTEL8X0=m
CONFIG_SND_INTEL8X0M=m
CONFIG_SND_KORG1212=m
-CONFIG_SND_KORG1212_FIRMWARE_IN_KERNEL=y
+# CONFIG_SND_LX6464ES is not set
CONFIG_SND_MAESTRO3=m
-CONFIG_SND_MAESTRO3_FIRMWARE_IN_KERNEL=y
CONFIG_SND_MIXART=m
CONFIG_SND_NM256=m
CONFIG_SND_PCXHR=m
@@ -2403,55 +1798,30 @@ CONFIG_SND_SONICVIBES=m
CONFIG_SND_TRIDENT=m
CONFIG_SND_VIA82XX=m
CONFIG_SND_VIA82XX_MODEM=m
+# CONFIG_SND_VIRTUOSO is not set
CONFIG_SND_VX222=m
CONFIG_SND_YMFPCI=m
-CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y
-# CONFIG_SND_AC97_POWER_SAVE is not set
-
-#
-# ALSA MIPS devices
-#
+CONFIG_SND_MIPS=y
# CONFIG_SND_AU1X00 is not set
-
-#
-# USB devices
-#
+CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
-
-#
-# PCMCIA devices
-#
+CONFIG_SND_PCMCIA=y
CONFIG_SND_VXPOCKET=m
CONFIG_SND_PDAUDIOCF=m
-
-#
-# System on Chip audio support
-#
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# Open Sound System
-#
CONFIG_SOUND_PRIME=m
-CONFIG_SOUND_TRIDENT=m
-# CONFIG_SOUND_MSNDCLAS is not set
-# CONFIG_SOUND_MSNDPIN is not set
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
#
# USB Input Devices
#
CONFIG_USB_HID=m
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-# CONFIG_HID_FF is not set
+# CONFIG_HID_PID is not set
CONFIG_USB_HIDDEV=y
#
@@ -2459,12 +1829,50 @@ CONFIG_USB_HIDDEV=y
#
CONFIG_USB_KBD=m
CONFIG_USB_MOUSE=m
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_ZEROPLUS is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=m
# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
#
# Miscellaneous USB options
@@ -2472,19 +1880,27 @@ CONFIG_USB=m
CONFIG_USB_DEVICEFS=y
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
-CONFIG_USB_SUSPEND=y
-# CONFIG_USB_PERSIST is not set
# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=m
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_SPLIT_ISO=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_HCD_SSB is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
@@ -2493,32 +1909,38 @@ CONFIG_USB_U132_HCD=m
CONFIG_USB_SL811_HCD=m
CONFIG_USB_SL811_CS=m
# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
# USB Device Class drivers
#
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_DPCM=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_USB_STORAGE_ALAUDA=y
-CONFIG_USB_STORAGE_KARMA=y
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+CONFIG_USB_STORAGE_KARMA=m
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
CONFIG_USB_LIBUSUAL=y
#
@@ -2526,25 +1948,20 @@ CONFIG_USB_LIBUSUAL=y
#
CONFIG_USB_MDC800=m
CONFIG_USB_MICROTEK=m
-CONFIG_USB_MON=y
#
# USB port drivers
#
-CONFIG_USB_USS720=m
-
-#
-# USB Serial Converter support
-#
CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_AIRPRIME=m
CONFIG_USB_SERIAL_ARK3116=m
CONFIG_USB_SERIAL_BELKIN=m
+# CONFIG_USB_SERIAL_CH341 is not set
CONFIG_USB_SERIAL_WHITEHEAT=m
CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP2101=m
+# CONFIG_USB_SERIAL_CP210X is not set
CONFIG_USB_SERIAL_CYPRESS_M8=m
CONFIG_USB_SERIAL_EMPEG=m
CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -2556,6 +1973,7 @@ CONFIG_USB_SERIAL_EDGEPORT=m
CONFIG_USB_SERIAL_EDGEPORT_TI=m
CONFIG_USB_SERIAL_GARMIN=m
CONFIG_USB_SERIAL_IPW=m
+# CONFIG_USB_SERIAL_IUU is not set
CONFIG_USB_SERIAL_KEYSPAN_PDA=m
CONFIG_USB_SERIAL_KEYSPAN=m
# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
@@ -2575,20 +1993,27 @@ CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_MOS7720=m
CONFIG_USB_SERIAL_MOS7840=m
+# CONFIG_USB_SERIAL_MOTOROLA is not set
CONFIG_USB_SERIAL_NAVMAN=m
CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
# CONFIG_USB_SERIAL_DEBUG is not set
-CONFIG_USB_EZUSB=y
#
# USB Miscellaneous drivers
@@ -2596,18 +2021,13 @@ CONFIG_USB_EZUSB=y
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
CONFIG_USB_ADUTUX=m
-CONFIG_USB_AUERSWALD=m
+# CONFIG_USB_SEVSEG is not set
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-# CONFIG_USB_BERRY_CHARGE is not set
CONFIG_USB_LED=m
CONFIG_USB_CYPRESS_CY7C63=m
CONFIG_USB_CYTHERM=m
-CONFIG_USB_PHIDGET=m
-CONFIG_USB_PHIDGETKIT=m
-CONFIG_USB_PHIDGETMOTORCONTROL=m
-CONFIG_USB_PHIDGETSERVO=m
CONFIG_USB_IDMOUSE=m
CONFIG_USB_FTDI_ELAN=m
CONFIG_USB_APPLEDISPLAY=m
@@ -2617,86 +2037,113 @@ CONFIG_USB_LD=m
CONFIG_USB_TRANCEVIBRATOR=m
# CONFIG_USB_IOWARRIOR is not set
CONFIG_USB_TEST=m
-
-#
-# USB DSL modem support
-#
+# CONFIG_USB_ISIGHTFW is not set
CONFIG_USB_ATM=m
CONFIG_USB_SPEEDTOUCH=m
CONFIG_USB_CXACRU=m
CONFIG_USB_UEAGLEATM=m
CONFIG_USB_XUSBATM=m
-
-#
-# USB Gadget Support
-#
CONFIG_USB_GADGET=m
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
-CONFIG_USB_GADGET_NET2280=y
-CONFIG_USB_NET2280=m
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2280=y
+CONFIG_USB_NET2280=m
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_MASS_STORAGE is not set
CONFIG_USB_G_SERIAL=m
CONFIG_USB_MIDI_GADGET=m
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_MULTI is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
CONFIG_MMC=m
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=m
CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
CONFIG_MMC_SDHCI=m
+# CONFIG_MMC_SDHCI_PCI is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
CONFIG_MMC_TIFM_SD=m
+# CONFIG_MMC_SDRICOH_CS is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-# CONFIG_LEDS_TRIGGERS is not set
-CONFIG_INFINIBAND=m
-CONFIG_INFINIBAND_USER_MAD=m
-CONFIG_INFINIBAND_USER_ACCESS=m
-CONFIG_INFINIBAND_USER_MEM=y
-CONFIG_INFINIBAND_ADDR_TRANS=y
-CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_MTHCA_DEBUG=y
-CONFIG_INFINIBAND_AMSO1100=m
-CONFIG_INFINIBAND_AMSO1100_DEBUG=y
-# CONFIG_MLX4_INFINIBAND is not set
-CONFIG_INFINIBAND_IPOIB=m
-# CONFIG_INFINIBAND_IPOIB_CM is not set
-CONFIG_INFINIBAND_IPOIB_DEBUG=y
-# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
-CONFIG_INFINIBAND_SRP=m
-CONFIG_INFINIBAND_ISER=m
-CONFIG_RTC_LIB=m
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=m
#
@@ -2712,6 +2159,7 @@ CONFIG_RTC_DRV_TEST=m
# I2C RTC drivers
#
CONFIG_RTC_DRV_DS1307=m
+# CONFIG_RTC_DRV_DS1374 is not set
CONFIG_RTC_DRV_DS1672=m
# CONFIG_RTC_DRV_MAX6900 is not set
CONFIG_RTC_DRV_RS5C372=m
@@ -2720,48 +2168,45 @@ CONFIG_RTC_DRV_X1205=m
CONFIG_RTC_DRV_PCF8563=m
CONFIG_RTC_DRV_PCF8583=m
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
-CONFIG_RTC_DRV_RS5C348=m
-CONFIG_RTC_DRV_MAX6902=m
#
# Platform RTC drivers
#
# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
CONFIG_RTC_DRV_DS1553=m
-# CONFIG_RTC_DRV_STK17TA8 is not set
CONFIG_RTC_DRV_DS1742=m
+# CONFIG_RTC_DRV_STK17TA8 is not set
CONFIG_RTC_DRV_M48T86=m
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
CONFIG_RTC_DRV_V3020=m
#
# on-CPU RTC drivers
#
-
-#
-# DMA Engine support
-#
-CONFIG_DMA_ENGINE=y
-
-#
-# DMA Clients
-#
-CONFIG_NET_DMA=y
-
-#
-# DMA Devices
-#
-CONFIG_INTEL_IOATDMA=m
+# CONFIG_RTC_DRV_AU1XXX is not set
+# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
#
-# Userspace I/O
+# TI VLYNQ
#
-# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# File systems
@@ -2772,44 +2217,43 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=m
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=m
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=m
-CONFIG_REISERFS_FS=m
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-# CONFIG_JFS_DEBUG is not set
-CONFIG_JFS_STATISTICS=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
+# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-CONFIG_MINIX_FS=m
-CONFIG_ROMFS_FS=m
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
CONFIG_QUOTACTL=y
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -2838,73 +2282,79 @@ CONFIG_NTFS_FS=m
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
-CONFIG_ADFS_FS=m
-# CONFIG_ADFS_FS_RW is not set
-CONFIG_AFFS_FS=m
-CONFIG_ECRYPT_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
# CONFIG_JFFS2_SUMMARY is not set
-# CONFIG_JFFS2_FS_XATTR is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_UFS_DEBUG is not set
-
-#
-# Network File Systems
-#
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
-# CONFIG_SUNRPC_BIND34 is not set
CONFIG_RPCSEC_GSS_KRB5=m
CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_SMB_FS=m
# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CEPH_FS is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_UPCALL is not set
# CONFIG_CIFS_XATTR is not set
# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_DFS_UPCALL is not set
# CONFIG_CIFS_EXPERIMENTAL is not set
CONFIG_NCP_FS=m
CONFIG_NCPFS_PACKET_SIGNING=y
@@ -2916,7 +2366,6 @@ CONFIG_NCPFS_OS2_NS=y
CONFIG_NCPFS_NLS=y
CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
-# CONFIG_CODA_FS_OLD_API is not set
CONFIG_AFS_FS=m
# CONFIG_AFS_DEBUG is not set
@@ -2948,10 +2397,6 @@ CONFIG_SUN_PARTITION=y
CONFIG_KARMA_PARTITION=y
CONFIG_EFI_PARTITION=y
# CONFIG_SYSV68_PARTITION is not set
-
-#
-# Native Language Support
-#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="cp437"
CONFIG_NLS_CODEPAGE_437=m
@@ -2992,118 +2437,179 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-
-#
-# Distributed Lock Manager
-#
-CONFIG_DLM=m
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Profiling support
-#
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
+# CONFIG_DLM is not set
#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_CROSSCOMPILE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_SPINLOCK_TEST is not set
#
# Security options
#
CONFIG_KEYS=y
# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-# CONFIG_SECURITY_NETWORK_XFRM is not set
-CONFIG_SECURITY_CAPABILITIES=m
-CONFIG_SECURITY_ROOTPLUG=m
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_SECURITY_SELINUX_DEVELOP=y
-CONFIG_SECURITY_SELINUX_AVC_STATS=y
-CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
-# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
-CONFIG_XOR_BLOCKS=m
-CONFIG_ASYNC_CORE=m
-CONFIG_ASYNC_MEMCPY=m
-CONFIG_ASYNC_XOR=m
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
-CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_TGR192=m
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_FCRYPT is not set
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_CAMELLIA is not set
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_FCRYPT is not set
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_CAMELLIA is not set
-CONFIG_CRYPTO_TEST=m
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
-# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
CONFIG_AUDIT_GENERIC=y
CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-CONFIG_REED_SOLOMON=m
-CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
-CONFIG_CHECK_SIGNATURE=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 57a50483abdf..90a032af95ce 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25
-# Mon Apr 28 12:24:17 2008
+# Linux kernel version: 2.6.34-rc6
+# Sat May 1 11:49:51 2010
#
CONFIG_MIPS=y
@@ -9,22 +9,25 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
# CONFIG_LASAT is not set
-# CONFIG_LEMOTE_FULONG is not set
-# CONFIG_MIPS_ATLAS is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
-# CONFIG_MIPS_SEAD is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
# CONFIG_SGI_IP28 is not set
@@ -38,11 +41,14 @@ CONFIG_MIPS=y
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
CONFIG_MIKROTIK_RB532=y
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -53,14 +59,15 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_BOOT_RAW=y
+CONFIG_CEVT_R4K_LIB=y
CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
CONFIG_CSRC_R4K=y
CONFIG_DMA_NONCOHERENT=y
-CONFIG_DMA_NEED_PCI_MAP_STATE=y
-# CONFIG_HOTPLUG_CPU is not set
+CONFIG_NEED_DMA_MAP_STATE=y
# CONFIG_NO_IOPORT is not set
CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
@@ -73,7 +80,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=4
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -86,6 +94,7 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -93,11 +102,13 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
#
# Kernel type
@@ -107,11 +118,13 @@ CONFIG_32BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
@@ -124,12 +137,13 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -151,6 +165,7 @@ CONFIG_PREEMPT_NONE=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -168,23 +183,31 @@ CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CGROUPS is not set
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
# CONFIG_KALLSYMS is not set
@@ -192,54 +215,87 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
-CONFIG_COMPAT_BRK=y
+CONFIG_PCSPKR_PLATFORM=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_PCI_QUIRKS is not set
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-# CONFIG_KMOD is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
-CONFIG_CLASSIC_RCU=y
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+# CONFIG_FREEZER is not set
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -248,7 +304,8 @@ CONFIG_HW_HAS_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
CONFIG_MMU=y
# CONFIG_PCCARD is not set
# CONFIG_HOTPLUG_PCI is not set
@@ -257,25 +314,22 @@ CONFIG_MMU=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_TRAD_SIGNALS=y
#
# Power management options
#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_PM is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -325,7 +379,6 @@ CONFIG_DEFAULT_VEGAS=y
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="vegas"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
# CONFIG_IPV6 is not set
# CONFIG_NETWORK_SECMARK is not set
CONFIG_NETFILTER=y
@@ -336,8 +389,9 @@ CONFIG_NETFILTER_ADVANCED=y
#
# Core Netfilter Configuration
#
+CONFIG_NETFILTER_NETLINK=m
# CONFIG_NETFILTER_NETLINK_QUEUE is not set
-# CONFIG_NETFILTER_NETLINK_LOG is not set
+CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NF_CONNTRACK=y
CONFIG_NF_CT_ACCT=y
CONFIG_NF_CONNTRACK_MARK=y
@@ -355,18 +409,23 @@ CONFIG_NF_CONNTRACK_IRC=m
# CONFIG_NF_CONNTRACK_SIP is not set
CONFIG_NF_CONNTRACK_TFTP=m
# CONFIG_NF_CT_NETLINK is not set
+# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=y
# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
# CONFIG_NETFILTER_XT_TARGET_MARK is not set
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
CONFIG_NETFILTER_XT_TARGET_TRACE=m
# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
@@ -375,18 +434,21 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
# CONFIG_NETFILTER_XT_MATCH_MAC is not set
# CONFIG_NETFILTER_XT_MATCH_MARK is not set
-# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=y
# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
@@ -394,20 +456,21 @@ CONFIG_NETFILTER_XT_MATCH_STATE=y
# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
# CONFIG_NETFILTER_XT_MATCH_TIME is not set
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
# CONFIG_IP_NF_QUEUE is not set
CONFIG_IP_NF_IPTABLES=y
-# CONFIG_IP_NF_MATCH_RECENT is not set
-# CONFIG_IP_NF_MATCH_ECN is not set
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
# CONFIG_IP_NF_MATCH_TTL is not set
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
# CONFIG_IP_NF_TARGET_LOG is not set
@@ -415,8 +478,8 @@ CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_NF_NAT=y
CONFIG_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
-# CONFIG_IP_NF_TARGET_REDIRECT is not set
# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
# CONFIG_NF_NAT_SNMP_BASIC is not set
CONFIG_NF_NAT_FTP=m
CONFIG_NF_NAT_IRC=m
@@ -426,17 +489,22 @@ CONFIG_NF_NAT_TFTP=m
# CONFIG_NF_NAT_H323 is not set
# CONFIG_NF_NAT_SIP is not set
CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
# CONFIG_IP_NF_TARGET_ECN is not set
# CONFIG_IP_NF_TARGET_TTL is not set
-# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
CONFIG_IP_NF_RAW=m
# CONFIG_IP_NF_ARPTABLES is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+CONFIG_STP=y
CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=y
CONFIG_LLC2=m
@@ -446,6 +514,8 @@ CONFIG_LLC2=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
#
@@ -455,7 +525,7 @@ CONFIG_NET_SCH_CBQ=m
# CONFIG_NET_SCH_HTB is not set
# CONFIG_NET_SCH_HFSC is not set
CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+# CONFIG_NET_SCH_MULTIQ is not set
# CONFIG_NET_SCH_RED is not set
# CONFIG_NET_SCH_SFQ is not set
# CONFIG_NET_SCH_TEQL is not set
@@ -463,6 +533,7 @@ CONFIG_NET_SCH_RR=m
# CONFIG_NET_SCH_GRED is not set
# CONFIG_NET_SCH_DSMARK is not set
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
# CONFIG_NET_SCH_INGRESS is not set
#
@@ -496,8 +567,10 @@ CONFIG_NET_ACT_IPT=m
# CONFIG_NET_ACT_NAT is not set
CONFIG_NET_ACT_PEDIT=m
# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
CONFIG_NET_CLS_IND=y
CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -514,14 +587,19 @@ CONFIG_HAMRADIO=y
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_PRIV=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_EXT=y
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -533,13 +611,17 @@ CONFIG_WIRELESS_EXT=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -612,6 +694,11 @@ CONFIG_MTD_NAND_PLATFORM=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -623,23 +710,36 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -656,10 +756,6 @@ CONFIG_SCSI_PROC_FS=y
# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -676,27 +772,35 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_BE2ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
@@ -708,9 +812,15 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+# CONFIG_ATA_VERBOSE_ERROR is not set
# CONFIG_SATA_PMP is not set
# CONFIG_SATA_AHCI is not set
# CONFIG_SATA_SIL24 is not set
@@ -732,6 +842,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATP867X is not set
# CONFIG_PATA_ATIIXP is not set
# CONFIG_PATA_CMD640_PCI is not set
# CONFIG_PATA_CMD64X is not set
@@ -747,6 +858,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -757,29 +869,39 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
CONFIG_PATA_RB532=y
+# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_SIL680 is not set
# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
# CONFIG_PATA_PLATFORM is not set
+# CONFIG_PATA_SCH is not set
# CONFIG_MD is not set
# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
+#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
CONFIG_IFB=m
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
@@ -797,21 +919,28 @@ CONFIG_KORINA=y
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_TC35815 is not set
-# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
@@ -821,30 +950,27 @@ CONFIG_NET_PCI=y
# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
CONFIG_VIA_RHINE=y
# CONFIG_VIA_RHINE_MMIO is not set
-CONFIG_VIA_RHINE_NAPI=y
# CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_HERMES is not set
+CONFIG_WLAN=y
CONFIG_ATMEL=m
# CONFIG_PCI_ATMEL is not set
# CONFIG_PRISM54 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_HOSTAP is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -864,6 +990,7 @@ CONFIG_SLHC=m
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -872,7 +999,8 @@ CONFIG_SLHC=m
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -887,17 +1015,29 @@ CONFIG_INPUT=y
#
CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+CONFIG_INPUT_RB532_BUTTON=y
#
# Hardware I/O ports
@@ -909,6 +1049,7 @@ CONFIG_INPUT_KEYBOARD=y
# Character devices
#
# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -928,105 +1069,95 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
-# CONFIG_RTC is not set
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
-
-#
-# SPI support
-#
# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
-# CONFIG_W1 is not set
-# CONFIG_POWER_SUPPLY is not set
-# CONFIG_HWMON is not set
-# CONFIG_THERMAL is not set
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
#
-# Watchdog Device Drivers
+# PPS support
#
-# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
#
-# PCI-based Watchdog Cards
+# Memory mapped GPIO expanders:
#
-# CONFIG_PCIPCWATCHDOG is not set
-# CONFIG_WDTPCI is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
#
-# Sonics Silicon Backplane
+# I2C GPIO expanders:
#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
#
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
-# CONFIG_HTC_PASIC3 is not set
-
-#
-# Multimedia devices
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_COMMON=m
-CONFIG_VIDEO_ALLOW_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_VIDEO_V4L2=m
-CONFIG_VIDEO_V4L1=m
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-
-#
-# Encoders/decoders and other helper chips
+# PCI GPIO expanders:
#
+# CONFIG_GPIO_CS5535 is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_LANGWELL is not set
#
-# Audio decoders
+# SPI GPIO expanders:
#
#
-# Video decoders
+# AC97 GPIO expanders:
#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
#
-# Video and audio decoders
+# Watchdog Device Drivers
#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
+CONFIG_RC32434_WDT=y
#
-# MPEG video encoders
+# PCI-based Watchdog Cards
#
-# CONFIG_VIDEO_CX2341X is not set
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+CONFIG_SSB_POSSIBLE=y
#
-# Video encoders
+# Sonics Silicon Backplane
#
+# CONFIG_SSB is not set
#
-# Video improvement chips
+# Multifunction device drivers
#
-# CONFIG_VIDEO_VIVI is not set
-# CONFIG_VIDEO_CPIA is not set
-# CONFIG_VIDEO_STRADIS is not set
-# CONFIG_SOC_CAMERA is not set
-# CONFIG_RADIO_ADAPTERS is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
+# CONFIG_VGA_ARB is not set
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1037,13 +1168,10 @@ CONFIG_VIDEO_CAPTURE_DRIVERS=y
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
# CONFIG_HID is not set
+# CONFIG_HID_PID is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1053,9 +1181,18 @@ CONFIG_USB_ARCH_HAS_EHCI=y
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -1064,41 +1201,67 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+CONFIG_LEDS_MIKROTIK_RB532=y
# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY is not set
+CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -1117,15 +1280,13 @@ CONFIG_EXT2_FS=y
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1148,9 +1309,14 @@ CONFIG_JFFS2_RTIME=y
CONFIG_JFFS2_CMODE_PRIORITY=y
# CONFIG_JFFS2_CMODE_SIZE is not set
# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -1160,6 +1326,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1198,11 +1365,22 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_STRIP_ASM_SYMS=y
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_CMDLINE_BOOL is not set
#
@@ -1210,18 +1388,32 @@ CONFIG_FRAME_WARN=1024
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=m
-CONFIG_CRYPTO_AEAD=m
-CONFIG_CRYPTO_BLKCIPHER=m
-# CONFIG_CRYPTO_MANAGER is not set
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=m
+CONFIG_CRYPTO_BLKCIPHER2=m
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=m
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=m
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=m
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=m
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
CONFIG_CRYPTO_TEST=m
@@ -1249,14 +1441,20 @@ CONFIG_CRYPTO_TEST=m
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
-# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
@@ -1266,7 +1464,7 @@ CONFIG_CRYPTO_TEST=m
#
# Ciphers
#
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
# CONFIG_CRYPTO_ANUBIS is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1286,27 +1484,36 @@ CONFIG_CRYPTO_TEST=m
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
+CONFIG_CRYPTO_ZLIB=y
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 519197ede089..59dc0c7ef733 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -29,7 +29,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/*
* atomic_set - set atomic variable
@@ -410,7 +410,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer of type atomic64_t
*
*/
-#define atomic64_read(v) ((v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
/*
* atomic64_set - set atomic variable
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
index 032ca73f181b..48bb82372994 100644
--- a/arch/mips/include/asm/i8253.h
+++ b/arch/mips/include/asm/i8253.h
@@ -12,7 +12,7 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-extern spinlock_t i8253_lock;
+extern raw_spinlock_t i8253_lock;
extern void setup_pit_timer(void);
diff --git a/arch/mips/include/asm/kgdb.h b/arch/mips/include/asm/kgdb.h
index 48223b09396c..19002d605ac4 100644
--- a/arch/mips/include/asm/kgdb.h
+++ b/arch/mips/include/asm/kgdb.h
@@ -38,6 +38,8 @@ extern int kgdb_early_setup;
extern void *saved_vectors[32];
extern void handle_exception(struct pt_regs *regs);
extern void breakinst(void);
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index ae07423e6e82..e76941db2312 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -190,8 +190,6 @@ extern unsigned long au1xxx_calc_clock(void);
/* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */
void au1xxx_save_and_sleep(void);
void au_sleep(void);
-void save_au1xxx_intctl(void);
-void restore_au1xxx_intctl(void);
/* SOC Interrupt numbers */
@@ -835,6 +833,38 @@ enum soc_au1200_ints {
#define MEM_STNAND_DATA 0x20
#endif
+
+/* Interrupt Controller register offsets */
+#define IC_CFG0RD 0x40
+#define IC_CFG0SET 0x40
+#define IC_CFG0CLR 0x44
+#define IC_CFG1RD 0x48
+#define IC_CFG1SET 0x48
+#define IC_CFG1CLR 0x4C
+#define IC_CFG2RD 0x50
+#define IC_CFG2SET 0x50
+#define IC_CFG2CLR 0x54
+#define IC_REQ0INT 0x54
+#define IC_SRCRD 0x58
+#define IC_SRCSET 0x58
+#define IC_SRCCLR 0x5C
+#define IC_REQ1INT 0x5C
+#define IC_ASSIGNRD 0x60
+#define IC_ASSIGNSET 0x60
+#define IC_ASSIGNCLR 0x64
+#define IC_WAKERD 0x68
+#define IC_WAKESET 0x68
+#define IC_WAKECLR 0x6C
+#define IC_MASKRD 0x70
+#define IC_MASKSET 0x70
+#define IC_MASKCLR 0x74
+#define IC_RISINGRD 0x78
+#define IC_RISINGCLR 0x78
+#define IC_FALLINGRD 0x7C
+#define IC_FALLINGCLR 0x7C
+#define IC_TESTBIT 0x80
+
+
/* Interrupt Controller 0 */
#define IC0_CFG0RD 0xB0400040
#define IC0_CFG0SET 0xB0400040
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 8c6b1105ce0b..c8a553a36ba4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -358,10 +358,6 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr);
u32 au1xxx_ddma_add_device(dbdev_tab_t *dev);
extern void au1xxx_ddma_del_device(u32 devid);
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp);
-#ifdef CONFIG_PM
-void au1xxx_dbdma_suspend(void);
-void au1xxx_dbdma_resume(void);
-#endif
/*
* Flags for the put_source/put_dest functions.
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 43d4da0b1e9f..3999ec0aa7f5 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -20,7 +20,7 @@ static inline unsigned long bcm63xx_gpio_count(void)
}
}
-#define GPIO_DIR_OUT 0x0
-#define GPIO_DIR_IN 0x1
+#define BCM63XX_GPIO_DIR_OUT 0x0
+#define BCM63XX_GPIO_DIR_IN 0x1
#endif /* !BCM63XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 16210cedd929..675bd8641d5a 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -52,6 +52,8 @@
#define cpu_has_tx39_cache 0
#define cpu_has_userlocal 0
#define cpu_has_vce 0
+#define cpu_has_veic 0
+#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
#define cpu_has_watch 1
diff --git a/arch/mips/include/asm/mach-loongson/gpio.h b/arch/mips/include/asm/mach-loongson/gpio.h
new file mode 100644
index 000000000000..e30e73d443df
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/gpio.h
@@ -0,0 +1,35 @@
+/*
+ * STLS2F GPIO Support
+ *
+ * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
+ * Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STLS2F_GPIO_H
+#define __STLS2F_GPIO_H
+
+#include <asm-generic/gpio.h>
+
+extern void gpio_set_value(unsigned gpio, int value);
+extern int gpio_get_value(unsigned gpio);
+extern int gpio_cansleep(unsigned gpio);
+
+/* The chip can do interrupt
+ * but it has not been tested and doc not clear
+ */
+static inline int gpio_to_irq(int gpio)
+{
+ return -EINVAL;
+}
+
+static inline int irq_to_gpio(int gpio)
+{
+ return -EINVAL;
+}
+
+#endif /* __STLS2F_GPIO_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 49382d5e891a..c6e3c93ce7c7 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -135,6 +135,12 @@
#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
/*
+ * Bits 18 - 20 of the FPU Status Register will be read as 0,
+ * and should be written as zero.
+ */
+#define FPU_CSR_RSVD 0x001c0000
+
+/*
* X the exception cause indicator
* E the exception enable
* S the sticky/flag bit
@@ -161,7 +167,8 @@
#define FPU_CSR_UDF_S 0x00000008
#define FPU_CSR_INE_S 0x00000004
-/* rounding mode */
+/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM 0x00000003
#define FPU_CSR_RN 0x0 /* nearest */
#define FPU_CSR_RZ 0x1 /* towards zero */
#define FPU_CSR_RU 0x2 /* towards +Infinity */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index ab387910009a..5d33b727acf5 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -344,16 +344,10 @@ unsigned long get_wchan(struct task_struct *p);
#ifdef CONFIG_CPU_HAS_PREFETCH
#define ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch((x), 0, 1)
-static inline void prefetch(const void *addr)
-{
- __asm__ __volatile__(
- " .set mips4 \n"
- " pref %0, (%1) \n"
- " .set mips0 \n"
- :
- : "i" (Pref_Load), "r" (addr));
-}
+#define ARCH_HAS_PREFETCHW
+#define prefetchw(x) __builtin_prefetch((x), 1, 1)
#endif
diff --git a/arch/mips/include/asm/scatterlist.h b/arch/mips/include/asm/scatterlist.h
index 83d69fe17c9f..9af65e79be36 100644
--- a/arch/mips/include/asm/scatterlist.h
+++ b/arch/mips/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
#ifndef __ASM_SCATTERLIST_H
#define __ASM_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- dma_addr_t dma_address;
- unsigned int length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x00ffffffUL)
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 845da2107ed1..2376f2e06e47 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -112,7 +112,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 18
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
#define TIF_FIXADE 20 /* Fix address errors in software */
#define TIF_LOGADE 21 /* Log address errors to syslog */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index be5bb16be4e0..3562b854f2cd 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -125,6 +125,30 @@ static int __init wait_disable(char *s)
__setup("nowait", wait_disable);
+static int __cpuinitdata mips_fpu_disabled;
+
+static int __init fpu_disable(char *s)
+{
+ cpu_data[0].options &= ~MIPS_CPU_FPU;
+ mips_fpu_disabled = 1;
+
+ return 1;
+}
+
+__setup("nofpu", fpu_disable);
+
+int __cpuinitdata mips_dsp_disabled;
+
+static int __init dsp_disable(char *s)
+{
+ cpu_data[0].ases &= ~MIPS_ASE_DSP;
+ mips_dsp_disabled = 1;
+
+ return 1;
+}
+
+__setup("nodsp", dsp_disable);
+
void __init check_wait(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -982,6 +1006,12 @@ __cpuinit void cpu_probe(void)
*/
BUG_ON(current_cpu_type() != c->cputype);
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (mips_dsp_disabled)
+ c->ases &= ~MIPS_ASE_DSP;
+
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
index 2f6a0b147ab8..ae5db206347c 100644
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -65,7 +65,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
return -ENODEV;
cpus_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
if (cpufreq_frequency_table_target
(policy, &loongson2_clockmod_table[0], target_freq, relation,
@@ -91,7 +91,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
/* notifiers */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- set_cpus_allowed(current, cpus_allowed);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(cpuclk, freq);
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index ed5c441615e4..94794062a177 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -15,7 +15,7 @@
#include <asm/io.h>
#include <asm/time.h>
-DEFINE_SPINLOCK(i8253_lock);
+DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
@@ -26,7 +26,7 @@ EXPORT_SYMBOL(i8253_lock);
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
switch(mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -55,7 +55,7 @@ static void init_pit_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
}
/*
@@ -65,10 +65,10 @@ static void init_pit_timer(enum clock_event_mode mode,
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
outb_p(delta & 0xff , PIT_CH0); /* LSB */
outb(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
return 0;
}
@@ -137,7 +137,7 @@ static cycle_t pit_read(struct clocksource *cs)
static int old_count;
static u32 old_jifs;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
@@ -183,7 +183,7 @@ static cycle_t pit_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (LATCH - 1) - count;
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 50c9bb880667..9b78ff6e9b84 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -180,6 +180,11 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
*(ptr++) = regs->cp0_epc;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->cp0_epc = pc;
+}
+
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
* then try to fall into the debugger
@@ -198,7 +203,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
- if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs))
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_setting_breakpoint))
@@ -212,6 +217,26 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
return NOTIFY_STOP;
}
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_mips_notify,
};
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index cbc6182b0065..f5981c499109 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -100,10 +100,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
cpus_intersects(new_mask, mt_fpu_cpumask)) {
cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
- retval = set_cpus_allowed(p, effective_mask);
+ retval = set_cpus_allowed_ptr(p, &effective_mask);
} else {
clear_ti_thread_flag(ti, TIF_FPUBOUND);
- retval = set_cpus_allowed(p, new_mask);
+ retval = set_cpus_allowed_ptr(p, &new_mask);
}
out_unlock:
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 44337ba03717..a5297e2a353a 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -385,7 +385,7 @@ EXPORT(sysn32_call_table)
PTR sys_fchmodat
PTR sys_faccessat
PTR compat_sys_pselect6
- PTR sys_ppoll /* 6265 */
+ PTR compat_sys_ppoll /* 6265 */
PTR sys_unshare
PTR sys_splice
PTR sys_sync_file_range
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index f9513f9e61d3..85aef3fc6716 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -569,27 +569,6 @@ void __init setup_arch(char **cmdline_p)
plat_smp_setup();
}
-static int __init fpu_disable(char *s)
-{
- int i;
-
- for (i = 0; i < NR_CPUS; i++)
- cpu_data[i].options &= ~MIPS_CPU_FPU;
-
- return 1;
-}
-
-__setup("nofpu", fpu_disable);
-
-static int __init dsp_disable(char *s)
-{
- cpu_data[0].ases &= ~MIPS_ASE_DSP;
-
- return 1;
-}
-
-__setup("nodsp", dsp_disable);
-
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d612c6dcb746..8bdd6a663c7f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -26,6 +26,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
+#include <linux/kdb.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -185,6 +186,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
+#ifdef CONFIG_KGDB_KDB
+ } else if (atomic_read(&kgdb_active) != -1 &&
+ kdb_current_regs) {
+ memcpy(&regs, kdb_current_regs, sizeof(regs));
+#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(&regs);
}
@@ -360,6 +366,8 @@ void __noreturn die(const char * str, struct pt_regs * regs)
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
+ notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
+
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
@@ -704,6 +712,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
siginfo_t info;
char b[40];
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
return;
@@ -854,7 +867,7 @@ static void mt_ase_fp_affinity(void)
= current->cpus_allowed;
cpus_and(tmask, current->cpus_allowed,
mt_fpu_cpumask);
- set_cpus_allowed(current, tmask);
+ set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
}
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 7668c4de1151..cdd2e812ba1a 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -4,6 +4,7 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
pci.o bonito-irq.o mem.o machtype.o platform.o
+obj-$(CONFIG_GENERIC_GPIO) += gpio.o
#
# Serial port support
diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c
new file mode 100644
index 000000000000..e8a0ffa935b4
--- /dev/null
+++ b/arch/mips/loongson/common/gpio.c
@@ -0,0 +1,139 @@
+/*
+ * STLS2F GPIO Support
+ *
+ * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
+ * Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <asm/types.h>
+#include <loongson.h>
+#include <linux/gpio.h>
+
+#define STLS2F_N_GPIO 4
+#define STLS2F_GPIO_IN_OFFSET 16
+
+static DEFINE_SPINLOCK(gpio_lock);
+
+int gpio_get_value(unsigned gpio)
+{
+ u32 val;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO)
+ return __gpio_get_value(gpio);
+
+ mask = 1 << (gpio + STLS2F_GPIO_IN_OFFSET);
+ spin_lock(&gpio_lock);
+ val = LOONGSON_GPIODATA;
+ spin_unlock(&gpio_lock);
+
+ return ((val & mask) != 0);
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+void gpio_set_value(unsigned gpio, int state)
+{
+ u32 val;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO) {
+ __gpio_set_value(gpio, state);
+ return ;
+ }
+
+ mask = 1 << gpio;
+
+ spin_lock(&gpio_lock);
+ val = LOONGSON_GPIODATA;
+ if (state)
+ val |= mask;
+ else
+ val &= (~mask);
+ LOONGSON_GPIODATA = val;
+ spin_unlock(&gpio_lock);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int gpio_cansleep(unsigned gpio)
+{
+ if (gpio < STLS2F_N_GPIO)
+ return 0;
+ else
+ return __gpio_cansleep(gpio);
+}
+EXPORT_SYMBOL(gpio_cansleep);
+
+static int ls2f_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ u32 temp;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO)
+ return -EINVAL;
+
+ spin_lock(&gpio_lock);
+ mask = 1 << gpio;
+ temp = LOONGSON_GPIOIE;
+ temp |= mask;
+ LOONGSON_GPIOIE = temp;
+ spin_unlock(&gpio_lock);
+
+ return 0;
+}
+
+static int ls2f_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int level)
+{
+ u32 temp;
+ u32 mask;
+
+ if (gpio >= STLS2F_N_GPIO)
+ return -EINVAL;
+
+ gpio_set_value(gpio, level);
+ spin_lock(&gpio_lock);
+ mask = 1 << gpio;
+ temp = LOONGSON_GPIOIE;
+ temp &= (~mask);
+ LOONGSON_GPIOIE = temp;
+ spin_unlock(&gpio_lock);
+
+ return 0;
+}
+
+static int ls2f_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ return gpio_get_value(gpio);
+}
+
+static void ls2f_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ gpio_set_value(gpio, value);
+}
+
+static struct gpio_chip ls2f_chip = {
+ .label = "ls2f",
+ .direction_input = ls2f_gpio_direction_input,
+ .get = ls2f_gpio_get_value,
+ .direction_output = ls2f_gpio_direction_output,
+ .set = ls2f_gpio_set_value,
+ .base = 0,
+ .ngpio = STLS2F_N_GPIO,
+};
+
+static int __init ls2f_gpio_setup(void)
+{
+ return gpiochip_add(&ls2f_chip);
+}
+arch_initcall(ls2f_gpio_setup);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 8f2f8e9d8b21..47842b7d26ae 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -78,6 +78,9 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
#define FPCREG_RID 0 /* $0 = revision id */
#define FPCREG_CSR 31 /* $31 = csr */
+/* Determine rounding mode from the RM bits of the FCSR */
+#define modeindex(v) ((v) & FPU_CSR_RM)
+
/* Convert Mips rounding mode (0..3) to IEEE library modes. */
static const unsigned char ieee_rm[4] = {
[FPU_CSR_RN] = IEEE754_RN,
@@ -351,7 +354,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
if (MIPSInst_RD(ir) == FPCREG_CSR) {
value = ctx->fcr31;
- value = (value & ~0x3) | mips_rm[value & 0x3];
+ value = (value & ~FPU_CSR_RM) |
+ mips_rm[modeindex(value)];
#ifdef CSRTRACE
printk("%p gpr[%d]<-csr=%08x\n",
(void *) (xcp->cp0_epc),
@@ -384,10 +388,14 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
- value &= (FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
- ctx->fcr31 &= ~(FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
- /* convert to ieee library modes */
- ctx->fcr31 |= (value & ~0x3) | ieee_rm[value & 0x3];
+
+ /*
+ * Don't write reserved bits,
+ * and convert to ieee library modes
+ */
+ ctx->fcr31 = (value &
+ ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
+ ieee_rm[modeindex(value)];
}
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
@@ -900,7 +908,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754sp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
@@ -926,7 +934,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754sp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
@@ -1074,7 +1082,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754dp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
@@ -1100,7 +1108,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
- ieee754_csr.rm = ieee_rm[MIPSInst_FUNC(ir) & 0x3];
+ ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754dp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index 29e2326b6257..d0d24e047676 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -8,7 +8,6 @@
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- *
*/
#include <linux/init.h>
#include <linux/oprofile.h>
@@ -17,24 +16,18 @@
#include <loongson.h> /* LOONGSON2_PERFCNT_IRQ */
#include "op_impl.h"
-/*
- * a patch should be sent to oprofile with the loongson-specific support.
- * otherwise, the oprofile tool will not recognize this and complain about
- * "cpu_type 'unset' is not valid".
- */
#define LOONGSON2_CPU_TYPE "mips/loongson2"
-#define LOONGSON2_COUNTER1_EVENT(event) ((event & 0x0f) << 5)
-#define LOONGSON2_COUNTER2_EVENT(event) ((event & 0x0f) << 9)
-
-#define LOONGSON2_PERFCNT_EXL (1UL << 0)
-#define LOONGSON2_PERFCNT_KERNEL (1UL << 1)
-#define LOONGSON2_PERFCNT_SUPERVISOR (1UL << 2)
-#define LOONGSON2_PERFCNT_USER (1UL << 3)
-#define LOONGSON2_PERFCNT_INT_EN (1UL << 4)
#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
-/* Loongson2 performance counter register */
+#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
+#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
+#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
+#define LOONGSON2_PERFCTRL_USER (1UL << 3)
+#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
+#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
+ (((event) & 0x0f) << ((idx) ? 9 : 5))
+
#define read_c0_perfctrl() __read_64bit_c0_register($24, 0)
#define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val)
#define read_c0_perfcnt() __read_64bit_c0_register($25, 0)
@@ -49,7 +42,6 @@ static struct loongson2_register_config {
static char *oprofid = "LoongsonPerf";
static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
-/* Compute all of the registers in preparation for enabling profiling. */
static void loongson2_reg_setup(struct op_counter_config *cfg)
{
@@ -57,41 +49,38 @@ static void loongson2_reg_setup(struct op_counter_config *cfg)
reg.reset_counter1 = 0;
reg.reset_counter2 = 0;
- /* Compute the performance counter ctrl word. */
- /* For now count kernel and user mode */
+
+ /*
+ * Compute the performance counter ctrl word.
+ * For now, count kernel and user mode.
+ */
if (cfg[0].enabled) {
- ctrl |= LOONGSON2_COUNTER1_EVENT(cfg[0].event);
+ ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event);
reg.reset_counter1 = 0x80000000ULL - cfg[0].count;
}
if (cfg[1].enabled) {
- ctrl |= LOONGSON2_COUNTER2_EVENT(cfg[1].event);
- reg.reset_counter2 = (0x80000000ULL - cfg[1].count);
+ ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event);
+ reg.reset_counter2 = 0x80000000ULL - cfg[1].count;
}
if (cfg[0].enabled || cfg[1].enabled) {
- ctrl |= LOONGSON2_PERFCNT_EXL | LOONGSON2_PERFCNT_INT_EN;
+ ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE;
if (cfg[0].kernel || cfg[1].kernel)
- ctrl |= LOONGSON2_PERFCNT_KERNEL;
+ ctrl |= LOONGSON2_PERFCTRL_KERNEL;
if (cfg[0].user || cfg[1].user)
- ctrl |= LOONGSON2_PERFCNT_USER;
+ ctrl |= LOONGSON2_PERFCTRL_USER;
}
reg.ctrl = ctrl;
reg.cnt1_enabled = cfg[0].enabled;
reg.cnt2_enabled = cfg[1].enabled;
-
}
-/* Program all of the registers in preparation for enabling profiling. */
-
static void loongson2_cpu_setup(void *args)
{
- uint64_t perfcount;
-
- perfcount = (reg.reset_counter2 << 32) | reg.reset_counter1;
- write_c0_perfcnt(perfcount);
+ write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1);
}
static void loongson2_cpu_start(void *args)
@@ -114,15 +103,8 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
struct pt_regs *regs = get_irq_regs();
int enabled;
- /*
- * LOONGSON2 defines two 32-bit performance counters.
- * To avoid a race updating the registers we need to stop the counters
- * while we're messing with
- * them ...
- */
-
/* Check whether the irq belongs to me */
- enabled = read_c0_perfcnt() & LOONGSON2_PERFCNT_INT_EN;
+ enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
if (!enabled)
return IRQ_NONE;
enabled = reg.cnt1_enabled | reg.cnt2_enabled;
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
index cd5b76a1c951..3fc5d46687a9 100644
--- a/arch/mips/powertv/asic/prealloc-calliope.c
+++ b/arch/mips/powertv/asic/prealloc-calliope.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* NON_DVR_CAPABLE CALLIOPE RESOURCES
@@ -32,432 +34,234 @@ struct resource non_dvr_calliope_resources[] __initdata =
/*
* VIDEO / LX1
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x24200000 - 1, /*2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /*8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x26700000 - 1, /*~36.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00600000 - 1, /* 6 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 6MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* DOCSIS Subsystem
*/
- {
- .name = "Docsis",
- .start = 0x22000000,
- .end = 0x22700000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM)
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x22700000,
- .end = 0x23500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x23700000,
- .end = 0x23720000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
+ IORESOURCE_MEM)
+
/*
* DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_MEM,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
/*
* Synopsys GMAC Memory Region
*/
- {
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 64KiB */
+ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- * Add other resources here
+ * TFTPBuffer
*
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
*/
- { },
-};
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-struct resource non_dvr_vz_calliope_resources[] __initdata =
-{
/*
- * VIDEO / LX1
- */
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x24200000 - 1, /*2 Meg */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8k block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x22202000,
- .end = 0x22C20B85 - 1, /* 10.12 Meg */
- .flags = IORESOURCE_MEM,
- },
- /*
- * Sysaudio Driver
- */
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- /*
- * STAVEM driver/STAPI
- */
- {
- .name = "AVMEMPartition0",
- .start = 0x20300000,
- .end = 0x20620000-1, /*3.125 MB total */
- .flags = IORESOURCE_MEM,
- },
- /*
- * GHW HAL Driver
- */
- {
- .name = "GraphicsHeap",
- .start = 0x20100000,
- .end = 0x20300000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * multi com buffer area
- */
- {
- .name = "MulticomSHM",
- .start = 0x23900000,
- .end = 0x23920000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * DMA Ring buffer
- */
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * Display bins buffer for unit0
- */
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF,
- .flags = IORESOURCE_MEM,
- },
- /*
- * PMEM
- */
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * Smartcard
- */
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
- /*
- * NAND Flash
+ * Add other resources here
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE+0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+
/*
- * Synopsys GMAC Memory Region
+ * End of Resource marker
*/
{
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
+ .flags = 0,
},
- /*
- * Add other resources here
- */
- { },
};
+
struct resource non_dvr_vze_calliope_resources[] __initdata =
{
/*
* VIDEO / LX1
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x22000000,
- .end = 0x22200000 - 1, /*2 Meg */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8k block ST231a monitor */
- .start = 0x22200000,
- .end = 0x22202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x22202000,
- .end = 0x22C20B85 - 1, /* 10.12 Meg */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (10.12MiB) */
+ PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x20396000,
- .end = 0x206B6000 - 1, /* 3.125 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 3.125MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1,
+ IORESOURCE_MEM)
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x20100000,
- .end = 0x20396000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (2.59MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x206B6000,
- .end = 0x206D6000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1,
+ IORESOURCE_MEM)
+
/*
- * DMA Ring buffer
+ * DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE+0x400 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
/*
* Synopsys GMAC Memory Region
*/
- {
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 64KiB */
+ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
struct resource non_dvr_vzf_calliope_resources[] __initdata =
@@ -465,156 +269,117 @@ struct resource non_dvr_vzf_calliope_resources[] __initdata =
/*
* VIDEO / LX1
*/
- {
- .name = "ST231aImage", /*Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x24200000 - 1, /*2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /*8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24202000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- /* ~19.4 (21.5MiB - (2MiB + 8KiB)) */
- .end = 0x25580000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00480000 - 1, /* 4.5 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4.5MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x22700000,
- .end = 0x23500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x23700000,
- .end = 0x23720000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
+ IORESOURCE_MEM)
+
/*
* DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_MEM,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
/*
* Synopsys GMAC Memory Region
*/
- {
- .name = "GMAC",
- .start = 0x00000000,
- .end = 0x00010000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 64KiB */
+ PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
index 45a5c3ea718c..c532b50521ec 100644
--- a/arch/mips/powertv/asic/prealloc-cronus.c
+++ b/arch/mips/powertv/asic/prealloc-cronus.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* DVR_CAPABLE CRONUS RESOURCES
@@ -30,305 +32,161 @@
struct resource dvr_cronus_resources[] __initdata =
{
/*
- *
* VIDEO1 / LX1
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 2 image (2MiB) */
+ PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 12MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x002EA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* ITFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "ITFS",
- .start = 0x64180000,
- /* 815,104 bytes each for 2 ITFS partitions. */
- .end = 0x6430DFFF,
- .flags = IORESOURCE_IO,
- },
+ /* 815,104 bytes each for 2 ITFS partitions. */
+ PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1, IORESOURCE_MEM)
+
/*
- *
* AVFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- /* (945K * 8) = (128K *3) 5 playbacks / 3 server */
- .end = 0x64AD0000 - 1,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "AvfsFileSys",
- .start = 0x64AD0000,
- .end = 0x64AD1000 - 1, /* 4K */
- .flags = IORESOURCE_IO,
- },
+ /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
+ IORESOURCE_MEM)
+
+ /* 4KiB */
+ PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
*/
- {
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ IORESOURCE_MEM)
+
/*
- *
* KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
*/
- {
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+ PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+ IORESOURCE_MEM)
+ /* NP Image - must be video bank 1 (320KiB) */
+ PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+ /* NP IPC - must be video bank 2 (512KiB) */
+ PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
+ /*
+ * TFTPBuffer
+ *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
+ */
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
/*
@@ -337,272 +195,146 @@ struct resource dvr_cronus_resources[] __initdata =
struct resource non_dvr_cronus_resources[] __initdata =
{
/*
- *
* VIDEO1 / LX1
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 2 image (2MiB) */
+ PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 12MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+ IORESOURCE_MEM)
+
/*
- *
- * DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
+ * DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_IO,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, IORESOURCE_MEM)
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
*/
- {
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
+
/*
- *
* KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
+ */
+ /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+ PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+ IORESOURCE_MEM)
+ /* NP Image - must be video bank 1 (320KiB) */
+ PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+ /* NP IPC - must be video bank 2 (512KiB) */
+ PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
+ /*
+ * NAND Flash
+ */
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
+ /*
+ * Add other resources here
+ */
+
+ /*
+ * End of Resource marker
*/
{
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
+ .flags = 0,
},
- { },
};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
index 23a905613c04..b5537e49e7f5 100644
--- a/arch/mips/powertv/asic/prealloc-cronuslite.c
+++ b/arch/mips/powertv/asic/prealloc-cronuslite.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* NON_DVR_CAPABLE CRONUSLITE RESOURCES
@@ -30,261 +32,143 @@
struct resource non_dvr_cronuslite_resources[] __initdata =
{
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory1",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x60000000, 0x60200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x60200000, 0x60202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x60202000, 0x62000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (128KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x63B80000 - 1, /* 6 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 6MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
+ IORESOURCE_MEM)
+
/*
- *
- * DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
+ * DMA Ring buffer (don't need recording buffers)
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 680KiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x63B83000,
- .end = 0x63B84000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ IORESOURCE_MEM)
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x63B84000,
- .end = 0x63E48C00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_IO,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
*/
- {
- .name = "SmartCardInfo",
- .start = 0x63B80000,
- .end = 0x63B82800 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
+
/*
- *
* KAVNET
- * NP Reset Vector - must be of the form xxCxxxxx
- * NP Image - must be video bank 1
- * NP IPC - must be video bank 2
*/
- {
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
+ PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
+ IORESOURCE_MEM)
+ /* NP Image - must be video bank 1 (320KiB) */
+ PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
+ /* NP IPC - must be video bank 2 (512KiB) */
+ PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
+ /*
+ * TFTPBuffer
+ *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
+ */
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
index 018d4514dbe3..96480a2395c0 100644
--- a/arch/mips/powertv/asic/prealloc-zeus.c
+++ b/arch/mips/powertv/asic/prealloc-zeus.c
@@ -22,7 +22,9 @@
*/
#include <linux/init.h>
+#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
+#include "prealloc.h"
/*
* DVR_CAPABLE RESOURCES
@@ -30,280 +32,151 @@
struct resource dvr_zeus_resources[] __initdata =
{
/*
- *
* VIDEO1 / LX1
- *
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x20000000,
- .end = 0x201FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x20200000,
- .end = 0x20201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory1",
- .start = 0x20202000,
- .end = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* VIDEO2 / LX2
- *
*/
- {
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x30000000,
- .end = 0x301FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x30200000,
- .end = 0x30201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory2",
- .start = 0x30202000,
- .end = 0x31FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 2 image (2MiB) */
+ PREALLOC_NORMAL("ST231bImage", 0x30000000, 0x30200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231bMonitor", 0x30200000, 0x30202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory2", 0x30202000, 0x32000000-1,
+ IORESOURCE_MEM)
+
/*
- *
* Sysaudio Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * DSP_Image_Buff - DSP code and data images (1MB)
- * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
- * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
- * ADSC_Main_Buff - ADSC Main buffer (16KB)
- *
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* STAVEM driver/STAPI
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
* by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
- *
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00c00000 - 1, /* 12 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 12MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* DOCSIS Subsystem
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "Docsis",
- .start = 0x40100000,
- .end = 0x407fffff,
- .flags = IORESOURCE_MEM,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
+
/*
- *
* GHW HAL Driver
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * GraphicsHeap - PowerTV Graphics Heap
- *
*/
- {
- .name = "GraphicsHeap",
- .start = 0x46900000,
- .end = 0x47700000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
+ IORESOURCE_MEM)
+
/*
- *
* multi com buffer area
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "MulticomSHM",
- .start = 0x47900000,
- .end = 0x47920000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
+ IORESOURCE_MEM)
+
/*
- *
* DMA Ring buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 2.5MiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Display bins buffer for unit0
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit0
- *
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
- * Display bins buffer
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Display Bins for unit1
- *
+ * Display bins buffer for unit1
*/
- {
- .name = "DisplayBins1",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* ITFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "ITFS",
- .start = 0x00000000,
- /* 815,104 bytes each for 2 ITFS partitions. */
- .end = 0x0018DFFF,
- .flags = IORESOURCE_MEM,
- },
+ /* 815,104 bytes each for 2 ITFS partitions. */
+ PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Docsis -
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
- .end = 0x007c2000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "AvfsFileSys",
- .start = 0x00000000,
- .end = 0x00001000 - 1, /* 4K */
- .flags = IORESOURCE_MEM,
- },
+ /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* 4KiB */
+ PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* PMEM
- *
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Persistent memory for diagnostics.
- *
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* Smartcard
+ */
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
+ /*
+ * TFTPBuffer
*
- * This driver requires:
- *
- * Arbitrary Based Buffers:
- * Read and write buffers for Internal/External cards
- *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
/*
@@ -314,146 +187,118 @@ struct resource non_dvr_zeus_resources[] __initdata =
/*
* VIDEO1 / LX1
*/
- {
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x20000000,
- .end = 0x201FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
- },
- {
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x20200000,
- .end = 0x20201FFF,
- .flags = IORESOURCE_IO,
- },
- {
- .name = "MediaMemory1",
- .start = 0x20202000,
- .end = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
- },
+ /* Delta-Mu 1 image (2MiB) */
+ PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 monitor (8KiB) */
+ PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
+ IORESOURCE_MEM)
+ /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
+ PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
+ IORESOURCE_MEM)
+
/*
* Sysaudio Driver
*/
- {
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
- },
+ /* DSP code and data images (1MiB) */
+ PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC CPU PCM buffer (40KiB) */
+ PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC AUX buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+ /* ADSC Main buffer (16KiB) */
+ PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* STAVEM driver/STAPI
*/
- {
- .name = "AVMEMPartition0",
- .start = 0x00000000,
- .end = 0x00600000 - 1, /* 6 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 6MiB */
+ PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* DOCSIS Subsystem
*/
- {
- .name = "Docsis",
- .start = 0x40100000,
- .end = 0x407fffff,
- .flags = IORESOURCE_MEM,
- },
+ /* 7MiB */
+ PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
+
/*
* GHW HAL Driver
*/
- {
- .name = "GraphicsHeap",
- .start = 0x46900000,
- .end = 0x47700000 - 1, /* 14 MB total */
- .flags = IORESOURCE_MEM,
- },
+ /* PowerTV Graphics Heap (14MiB) */
+ PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
+ IORESOURCE_MEM)
+
/*
* multi com buffer area
*/
- {
- .name = "MulticomSHM",
- .start = 0x47900000,
- .end = 0x47920000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 128KiB */
+ PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
+ IORESOURCE_MEM)
+
/*
* DMA Ring buffer
*/
- {
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* 2.5MiB */
+ PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Display bins buffer for unit0
*/
- {
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
- },
+ /* 4KiB */
+ PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
- *
* AVFS: player HAL memory
- *
- *
*/
- {
- .name = "AvfsDmaMem",
- .start = 0x00000000,
- .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_MEM,
- },
+ /* 945K * 3 for playback */
+ PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* PMEM
*/
- {
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Persistent memory for diagnostics (64KiB) */
+ PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Smartcard
*/
- {
- .name = "SmartCardInfo",
- .start = 0x00000000,
- .end = 0x2800 - 1,
- .flags = IORESOURCE_MEM,
- },
+ /* Read and write buffers for Internal/External cards (10KiB) */
+ PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* NAND Flash
*/
- {
- .name = "NandFlash",
- .start = NAND_FLASH_BASE,
- .end = NAND_FLASH_BASE + 0x400 - 1,
- .flags = IORESOURCE_IO,
- },
+ /* 10KiB */
+ PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
+ IORESOURCE_MEM)
+
+ /*
+ * TFTPBuffer
+ *
+ * This buffer is used in some minimal configurations (e.g. two-way
+ * loader) for storing software images
+ */
+ PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
+ (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
+
/*
* Add other resources here
*/
- { },
+
+ /*
+ * End of Resource marker
+ */
+ {
+ .flags = 0,
+ },
};
diff --git a/arch/mips/powertv/asic/prealloc.h b/arch/mips/powertv/asic/prealloc.h
new file mode 100644
index 000000000000..8e682df17856
--- /dev/null
+++ b/arch/mips/powertv/asic/prealloc.h
@@ -0,0 +1,70 @@
+/*
+ * Definitions for memory preallocations
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
+#define _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
+
+#define KIBIBYTE(n) ((n) * 1024) /* Number of kibibytes */
+#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
+
+/* "struct resource" array element definition */
+#define PREALLOC(NAME, START, END, FLAGS) { \
+ .name = (NAME), \
+ .start = (START), \
+ .end = (END), \
+ .flags = (FLAGS) \
+ },
+
+/* Individual resources in the preallocated resource arrays are defined using
+ * macros. These macros are conditionally defined based on their
+ * corresponding kernel configuration flag:
+ * - CONFIG_PREALLOC_NORMAL: preallocate resources for a normal settop box
+ * - CONFIG_PREALLOC_TFTP: preallocate the TFTP download resource
+ * - CONFIG_PREALLOC_DOCSIS: preallocate the DOCSIS resource
+ * - CONFIG_PREALLOC_PMEM: reserve space for persistent memory
+ */
+#ifdef CONFIG_PREALLOC_NORMAL
+#define PREALLOC_NORMAL(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_NORMAL(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_TFTP
+#define PREALLOC_TFTP(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_TFTP(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_DOCSIS
+#define PREALLOC_DOCSIS(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_DOCSIS(name, start, end, flags)
+#endif
+
+#ifdef CONFIG_PREALLOC_PMEM
+#define PREALLOC_PMEM(name, start, end, flags) \
+ PREALLOC(name, start, end, flags)
+#else
+#define PREALLOC_PMEM(name, start, end, flags)
+#endif
+#endif
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 7a8b0a8b643a..044bbe462c2c 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -253,7 +253,7 @@ void __init init_bcm1480_irqs(void)
* On the second cpu, everything is set to IP5, which is
* ignored, EXCEPT the mailbox interrupt. That one is
* set to IP[2] so it is handled. This is needed so we
- * can do cross-cpu function calls, as requred by SMP
+ * can do cross-cpu function calls, as required by SMP
*/
#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 62371f772553..12ac04a658ee 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -236,7 +236,7 @@ void __init init_sb1250_irqs(void)
* On the second cpu, everything is set to IP5, which is
* ignored, EXCEPT the mailbox interrupt. That one is
* set to IP[2] so it is handled. This is needed so we
- * can do cross-cpu function calls, as requred by SMP
+ * can do cross-cpu function calls, as required by SMP
*/
#define IMR_IP2_VAL K_INT_MAP_I0
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index 54847fe1e564..097335262fb3 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -83,3 +83,57 @@ static int __init swarm_pata_init(void)
device_initcall(swarm_pata_init);
#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
+
+#define sb1250_dev_struct(num) \
+ static struct resource sb1250_res##num = { \
+ .name = "SB1250 MAC " __stringify(num), \
+ .flags = IORESOURCE_MEM, \
+ .start = A_MAC_CHANNEL_BASE(num), \
+ .end = A_MAC_CHANNEL_BASE(num + 1) -1, \
+ };\
+ static struct platform_device sb1250_dev##num = { \
+ .name = "sb1250-mac", \
+ .id = num, \
+ .resource = &sb1250_res##num, \
+ .num_resources = 1, \
+ }
+
+sb1250_dev_struct(0);
+sb1250_dev_struct(1);
+sb1250_dev_struct(2);
+sb1250_dev_struct(3);
+
+static struct platform_device *sb1250_devs[] __initdata = {
+ &sb1250_dev0,
+ &sb1250_dev1,
+ &sb1250_dev2,
+ &sb1250_dev3,
+};
+
+static int __init sb1250_device_init(void)
+{
+ int ret;
+
+ /* Set the number of available units based on the SOC type. */
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1250:
+ case K_SYS_SOC_TYPE_BCM1250_ALT:
+ ret = platform_add_devices(sb1250_devs, 3);
+ break;
+ case K_SYS_SOC_TYPE_BCM1120:
+ case K_SYS_SOC_TYPE_BCM1125:
+ case K_SYS_SOC_TYPE_BCM1125H:
+ case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
+ ret = platform_add_devices(sb1250_devs, 2);
+ break;
+ case K_SYS_SOC_TYPE_BCM1x55:
+ case K_SYS_SOC_TYPE_BCM1x80:
+ ret = platform_add_devices(sb1250_devs, 4);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+device_initcall(sb1250_device_init);
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index adc69291f9e2..575d219b8001 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -905,7 +905,7 @@ struct txx9_sramc_sysdev {
void __iomem *base;
};
-static ssize_t txx9_sram_read(struct kobject *kobj,
+static ssize_t txx9_sram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
@@ -920,7 +920,7 @@ static ssize_t txx9_sram_read(struct kobject *kobj,
return size;
}
-static ssize_t txx9_sram_write(struct kobject *kobj,
+static ssize_t txx9_sram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 89faacad5d17..1c4565a9102b 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -37,6 +37,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
config GENERIC_CALIBRATE_DELAY
def_bool y
+config GENERIC_CMOS_UPDATE
+ def_bool y
+
config GENERIC_FIND_NEXT_BIT
def_bool y
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 5bf5be9566de..f0cc1f84a72f 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -1,157 +1 @@
-/* MN10300 Atomic counter operations
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_ATOMIC_H
-#define _ASM_ATOMIC_H
-
-#ifdef CONFIG_SMP
-#error not SMP safe
-#endif
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#define ATOMIC_INIT(i) { (i) }
-
-#ifdef __KERNEL__
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-#define atomic_read(v) ((v)->counter)
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-#include <asm/system.h>
-
-/**
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int temp;
-
- local_irq_save(flags);
- temp = v->counter;
- temp += i;
- v->counter = temp;
- local_irq_restore(flags);
-
- return temp;
-}
-
-/**
- * atomic_sub_return - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int temp;
-
- local_irq_save(flags);
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- local_irq_restore(flags);
-
- return temp;
-}
-
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
-
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- mask = ~mask;
- local_irq_save(flags);
- *addr &= mask;
- local_irq_restore(flags);
-}
-
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-
-/* Atomic operations are already serializing on MN10300??? */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#include <asm-generic/atomic-long.h>
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_ATOMIC_H */
+#include <asm-generic/atomic.h>
diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
index e03cfa2e997e..6e2fe28dde4e 100644
--- a/arch/mn10300/include/asm/cache.h
+++ b/arch/mn10300/include/asm/cache.h
@@ -21,6 +21,8 @@
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
#endif
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
/* data cache purge registers
* - read from the register to unconditionally purge that cache line
* - write address & 0xffffff00 to conditionally purge that cache line
diff --git a/arch/mn10300/include/asm/scatterlist.h b/arch/mn10300/include/asm/scatterlist.h
index 67535901b9ff..7bd00b9e030d 100644
--- a/arch/mn10300/include/asm/scatterlist.h
+++ b/arch/mn10300/include/asm/scatterlist.h
@@ -11,45 +11,8 @@
#ifndef _ASM_SCATTERLIST_H
#define _ASM_SCATTERLIST_H
-#include <asm/types.h>
-
-/*
- * Drivers must set either ->address or (preferred) page and ->offset
- * to indicate where data must be transferred to/from.
- *
- * Using page is recommended since it handles highmem data as well as
- * low mem. ->address is restricted to data which has a virtual mapping, and
- * it will go away in the future. Updating to page can be automated very
- * easily -- something like
- *
- * sg->address = some_ptr;
- *
- * can be rewritten as
- *
- * sg_set_page(virt_to_page(some_ptr));
- * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK;
- *
- * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
- */
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset; /* for highmem, page offset */
- dma_addr_t dma_address;
- unsigned int length;
-};
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (0x00ffffff)
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
#endif /* _ASM_SCATTERLIST_H */
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 58d64f8b2cc3..2001cb657a95 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -148,7 +148,7 @@ static inline unsigned long current_stack_pointer(void)
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 17 /* OOM killer killed process */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_FREEZE 18 /* freezing for suspend */
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 7978470b5749..815a933aafa8 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -26,17 +26,15 @@ static long last_rtc_update;
/* time for RTC to update itself in ioclks */
static unsigned long mn10300_rtc_update_period;
-/*
- * read the current RTC time
- */
-unsigned long __init get_initial_rtc_time(void)
+void read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
get_rtc_time(&tm);
- return mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+ ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
+ ts->tv_nsec = 0;
}
/*
@@ -110,24 +108,9 @@ static int set_rtc_mmss(unsigned long nowtime)
return retval;
}
-void check_rtc_time(void)
+int update_persistent_clock(struct timespec now)
{
- /* the RTC clock just finished ticking over again this second
- * - if we have an externally synchronized Linux clock, then update
- * RTC clock accordingly every ~11 minutes. set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if ((time_status & STA_UNSYNC) == 0 &&
- xtime.tv_sec > last_rtc_update + 660 &&
- xtime.tv_nsec / 1000 >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- xtime.tv_nsec / 1000 <= 500000 + ((unsigned) TICK_SIZE) / 2
- ) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* do it again in 60s */
- last_rtc_update = xtime.tv_sec - 600;
- }
+ return set_rtc_mms(now.tv_sec);
}
/*
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c
index 395caf01b909..8f7f6d22783d 100644
--- a/arch/mn10300/kernel/time.c
+++ b/arch/mn10300/kernel/time.c
@@ -111,7 +111,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
/* advance the kernel's time tracking system */
profile_tick(CPU_PROFILING);
do_timer(1);
- check_rtc_time();
}
write_sequnlock(&xtime_lock);
@@ -139,9 +138,6 @@ void __init time_init(void)
" (calibrated against RTC)\n",
MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
- xtime.tv_sec = get_initial_rtc_time();
- xtime.tv_nsec = 0;
-
mn10300_last_tsc = TMTSCBC;
/* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 9c4da3d63bfb..05a366a5c4d5 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -98,6 +98,9 @@ config STACKTRACE_SUPPORT
config NEED_DMA_MAP_STATE
def_bool y
+config NEED_SG_DMA_LENGTH
+ def_bool y
+
config ISA_DMA_API
bool
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 716634d1f546..f81955934aeb 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -189,7 +189,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
static __inline__ int atomic_read(const atomic_t *v)
{
- return v->counter;
+ return (*(volatile int *)&(v)->counter);
}
/* exported interface */
@@ -286,7 +286,7 @@ atomic64_set(atomic64_t *v, s64 i)
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
- return v->counter;
+ return (*(volatile long *)&(v)->counter);
}
#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 75e46c557a16..72cfdb0cfdd1 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -44,7 +44,7 @@
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define __WARN() \
+#define __WARN_TAINT(taint) \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
@@ -54,11 +54,11 @@
"\t.org 2b+%c3\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING), \
+ "i" (BUGFLAG_TAINT(taint)), \
"i" (sizeof(struct bug_entry)) ); \
} while(0)
#else
-#define __WARN() \
+#define __WARN_TAINT(taint) \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
@@ -67,7 +67,7 @@
"\t.short %c0\n" \
"\t.org 2b+%c1\n" \
"\t.popsection" \
- : : "i" (BUGFLAG_WARNING), \
+ : : "i" (BUGFLAG_TAINT(taint)), \
"i" (sizeof(struct bug_entry)) ); \
} while(0)
#endif
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 477277739da5..4556d820128a 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -2,6 +2,7 @@
#define _PARISC_CACHEFLUSH_H
#include <linux/mm.h>
+#include <linux/uaccess.h>
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */
@@ -125,11 +126,20 @@ static inline void *kmap(struct page *page)
#define kunmap(page) kunmap_parisc(page_address(page))
-#define kmap_atomic(page, idx) page_address(page)
+static inline void *kmap_atomic(struct page *page, enum km_type idx)
+{
+ pagefault_disable();
+ return page_address(page);
+}
-#define kunmap_atomic(addr, idx) kunmap_parisc(addr)
+static inline void kunmap_atomic(void *addr, enum km_type idx)
+{
+ kunmap_parisc(addr);
+ pagefault_enable();
+}
-#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
+#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
+#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h
index 62269b31ebf4..2c3b79b54b28 100644
--- a/arch/parisc/include/asm/scatterlist.h
+++ b/arch/parisc/include/asm/scatterlist.h
@@ -3,25 +3,9 @@
#include <asm/page.h>
#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
-
- unsigned int length;
-
- /* an IOVA can be 64-bits on some PA-Risc platforms. */
- dma_addr_t iova; /* I/O Virtual Address */
- __u32 iova_length; /* bytes mapped */
-};
-
-#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
-#define sg_dma_address(sg) ((sg)->iova)
-#define sg_dma_len(sg) ((sg)->iova_length)
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
+#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
#endif /* _ASM_PARISC_SCATTERLIST_H */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 7ecc1039cfed..aa8de727e90b 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -56,7 +56,7 @@ struct thread_info {
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 4 /* 32 bit binary */
-#define TIF_MEMDIE 5
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
#define TIF_FREEZE 7 /* is freezing for suspend */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index ec787b411e9a..dcd55103a4bb 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -45,8 +45,12 @@
#else
#define FRAME_SIZE 64
#endif
+#define FRAME_ALIGN 64
-#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
+/* Add FRAME_SIZE to the size x and align it to y. All definitions
+ * that use align_frame will include space for a frame.
+ */
+#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void)
{
@@ -146,7 +150,8 @@ int main(void)
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK();
DEFINE(TASK_SZ, sizeof(struct task_struct));
- DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64));
+ /* TASK_SZ_ALGN includes space for a stack frame. */
+ DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
@@ -233,7 +238,8 @@ int main(void)
DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
- DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64));
+ /* PT_SZ_ALGN includes space for a stack frame. */
+ DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
@@ -242,7 +248,8 @@ int main(void)
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
- DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
+ /* THREAD_SZ_ALGN includes space for a stack frame. */
+ DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 3a44f7f704fa..6337adef30f6 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -364,32 +364,6 @@
.align 32
.endm
- /* The following are simple 32 vs 64 bit instruction
- * abstractions for the macros */
- .macro EXTR reg1,start,length,reg2
-#ifdef CONFIG_64BIT
- extrd,u \reg1,32+(\start),\length,\reg2
-#else
- extrw,u \reg1,\start,\length,\reg2
-#endif
- .endm
-
- .macro DEP reg1,start,length,reg2
-#ifdef CONFIG_64BIT
- depd \reg1,32+(\start),\length,\reg2
-#else
- depw \reg1,\start,\length,\reg2
-#endif
- .endm
-
- .macro DEPI val,start,length,reg
-#ifdef CONFIG_64BIT
- depdi \val,32+(\start),\length,\reg
-#else
- depwi \val,\start,\length,\reg
-#endif
- .endm
-
/* In LP64, the space contains part of the upper 32 bits of the
* fault. We have to extract this and place it in the va,
* zeroing the corresponding bits in the space register */
@@ -442,19 +416,19 @@
*/
.macro L2_ptep pmd,pte,index,va,fault
#if PT_NLEVELS == 3
- EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+ extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
- EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+ extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#endif
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
copy %r0,\pte
ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
- DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+ dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9
SHLREG %r9,PxD_VALUE_SHIFT,\pmd
- EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
LDREG %r0(\pmd),\pte /* pmd is now pte */
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
@@ -605,7 +579,7 @@
depdi 0,31,32,\tmp
#endif
copy \va,\tmp1
- DEPI 0,31,23,\tmp1
+ depi 0,31,23,\tmp1
cmpb,COND(<>),n \tmp,\tmp1,\fault
ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
depd,z \prot,8,7,\prot
@@ -997,13 +971,6 @@ intr_restore:
rfi
nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
#ifndef CONFIG_PREEMPT
# define intr_do_preempt intr_restore
@@ -2076,9 +2043,10 @@ syscall_restore:
LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
/* NOTE: We use rsm/ssm pair to make this operation atomic */
+ LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
rsm PSW_SM_I, %r0
- LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
- mfsp %sr3,%r1 /* Get users space id */
+ copy %r1,%r30 /* Restore user sp */
+ mfsp %sr3,%r1 /* Get user space id */
mtsp %r1,%sr7 /* Restore sr7 */
ssm PSW_SM_I, %r0
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index f5f96021caa0..68e75ce838d6 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -47,18 +47,17 @@ ENTRY(linux_gateway_page)
KILL_INSN
.endr
- /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
+ /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (2)
lws_entry:
- /* Unconditional branch to lws_start, located on the
- same gateway page */
- b,n lws_start
+ gate lws_start, %r0 /* increase privilege */
+ depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
- /* Fill from 0xb4 to 0xe0 */
- .rept 11
+ /* Fill from 0xb8 to 0xe0 */
+ .rept 10
KILL_INSN
.endr
@@ -423,9 +422,6 @@ tracesys_sigexit:
*********************************************************/
lws_start:
- /* Gate and ensure we return to userspace */
- gate .+8, %r0
- depi 3, 31, 2, %r31 /* Ensure we return to userspace */
#ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just
@@ -442,7 +438,7 @@ lws_start:
#endif
/* Is the lws entry number valid? */
- comiclr,>>= __NR_lws_entries, %r20, %r0
+ comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
/* WARNING: Trashing sr2 and sr3 */
@@ -473,7 +469,7 @@ lws_exit:
/* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30
#endif
- be,n 0(%sr3, %r31)
+ be,n 0(%sr7, %r31)
@@ -529,7 +525,6 @@ lws_compare_and_swap32:
#endif
lws_compare_and_swap:
-#ifdef CONFIG_SMP
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
@@ -572,8 +567,6 @@ cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
-#endif
-/* CONFIG_SMP */
/*
prev = *addr;
@@ -601,13 +594,11 @@ cas_action:
1: ldw 0(%sr3,%r26), %r28
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%sr3,%r26)
-#ifdef CONFIG_SMP
/* Free lock */
stw %r20, 0(%sr2,%r20)
-# if ENABLE_LWS_DEBUG
+#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
-# endif
#endif
/* Return to userspace, set no error */
b lws_exit
@@ -615,12 +606,10 @@ cas_action:
3:
/* Error occured on load or store */
-#ifdef CONFIG_SMP
/* Free lock */
stw %r20, 0(%sr2,%r20)
-# if ENABLE_LWS_DEBUG
+#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
-# endif
#endif
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
@@ -672,7 +661,6 @@ ENTRY(sys_call_table64)
END(sys_call_table64)
#endif
-#ifdef CONFIG_SMP
/*
All light-weight-syscall atomic operations
will use this set of locks
@@ -694,8 +682,6 @@ ENTRY(lws_lock_start)
.endr
END(lws_lock_start)
.previous
-#endif
-/* CONFIG_SMP for lws_lock_start */
.end
diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
index 3ca1c6149218..27a7492ddb0d 100644
--- a/arch/parisc/math-emu/decode_exc.c
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
return SIGNALCODE(SIGFPE, FPE_FLTINV);
case DIVISIONBYZEROEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
+ Clear_excp_register(exception_index);
return SIGNALCODE(SIGFPE, FPE_FLTDIV);
case INEXACTEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index c6afbfc95770..18162ce4261e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -264,8 +264,7 @@ no_context:
out_of_memory:
up_read(&mm->mmap_sem);
- printk(KERN_CRIT "VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2e19500921f9..66a315e06dce 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@ config PPC
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
select HAVE_PERF_EVENTS
+ select HAVE_REGS_AND_STACK_ACCESS_API
config EARLY_PRINTK
bool
@@ -662,6 +663,9 @@ config ZONE_DMA
config NEED_DMA_MAP_STATE
def_bool (PPC64 || NOT_COHERENT_CACHE)
+config NEED_SG_DMA_LENGTH
+ def_bool y
+
config GENERIC_ISA_DMA
bool
depends on PPC64 || POWER4 || 6xx && !CPM2
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5cdd7ed9a12e..53696da4518f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -44,6 +44,18 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
+config DEBUG_PER_CPU_MAPS
+ bool "Debug access to per_cpu maps"
+ depends on DEBUG_KERNEL
+ depends on SMP
+ default n
+ ---help---
+ Say Y to verify that the per_cpu map being accessed has
+ been setup. Adds a fair amount of code to kernel memory
+ and decreases performance.
+
+ Say N if unsure.
+
config HCALL_STATS
bool "Hypervisor call instrumentation"
depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index bb2465bcb327..ad0df7d0a643 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -44,6 +44,7 @@ $(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
+$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
@@ -77,7 +78,7 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
- gamecube-head.S gamecube.c wii-head.S wii.c
+ gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c
src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -169,7 +170,7 @@ quiet_cmd_wrap = WRAP $@
$(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux
image-$(CONFIG_PPC_PSERIES) += zImage.pseries
-image-$(CONFIG_PPC_MAPLE) += zImage.pseries
+image-$(CONFIG_PPC_MAPLE) += zImage.maple
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3
image-$(CONFIG_PPC_CELLEB) += zImage.pseries
@@ -206,6 +207,8 @@ image-$(CONFIG_TAISHAN) += cuImage.taishan
image-$(CONFIG_KATMAI) += cuImage.katmai
image-$(CONFIG_WARP) += cuImage.warp
image-$(CONFIG_YOSEMITE) += cuImage.yosemite
+image-$(CONFIG_ISS4xx) += treeImage.iss4xx \
+ treeImage.iss4xx-mpic
# Board ports in arch/powerpc/platform/8xx/Kconfig
image-$(CONFIG_MPC86XADS) += cuImage.mpc866ads
@@ -351,7 +354,7 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
zImage.iseries zImage.miboot zImage.pmac zImage.pseries \
- simpleImage.* otheros.bld *.dtb
+ zImage.maple simpleImage.* otheros.bld *.dtb
# clean up files cached by wrapper
clean-kernel := vmlinux.strip vmlinux.bin
diff --git a/arch/powerpc/boot/dts/iss4xx-mpic.dts b/arch/powerpc/boot/dts/iss4xx-mpic.dts
new file mode 100644
index 000000000000..23e9d9b7e400
--- /dev/null
+++ b/arch/powerpc/boot/dts/iss4xx-mpic.dts
@@ -0,0 +1,155 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Copyright (c) 2006, 2007 IBM Corp.
+ * Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/memreserve/ 0x01f00000 0x00100000;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ model = "ibm,iss-4xx";
+ compatible = "ibm,iss-4xx";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <0>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "ok";
+ };
+ cpu@1 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <1>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "disabled";
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x01f00100>;
+ };
+ cpu@2 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <2>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "disabled";
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x01f00200>;
+ };
+ cpu@3 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <3>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ status = "disabled";
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x01f00300>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+
+ };
+
+ MPIC: interrupt-controller {
+ compatible = "chrp,open-pic";
+ interrupt-controller;
+ dcr-reg = <0xffc00000 0x00030000>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ };
+
+ plb {
+ compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+ clock-frequency = <0>; // Filled in by zImage
+
+ POB0: opb {
+ compatible = "ibm,opb-4xx", "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* Wish there was a nicer way of specifying a full 32-bit
+ range */
+ ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+ 0x80000000 0x00000001 0x80000000 0x80000000>;
+ clock-frequency = <0>; // Filled in by zImage
+ UART0: serial@40000200 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0x40000200 0x00000008>;
+ virtual-reg = <0xe0000200>;
+ clock-frequency = <11059200>;
+ current-speed = <115200>;
+ interrupt-parent = <&MPIC>;
+ interrupts = <0x0 0x2>;
+ };
+ };
+ };
+
+ nvrtc {
+ compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+ reg = <0 0xEF703000 0x2000>;
+ };
+ iss-block {
+ compatible = "ibm,iss-sim-block-device";
+ reg = <0 0xEF701000 0x1000>;
+ };
+
+ chosen {
+ linux,stdout-path = "/plb/opb/serial@40000200";
+ };
+};
diff --git a/arch/powerpc/boot/dts/iss4xx.dts b/arch/powerpc/boot/dts/iss4xx.dts
new file mode 100644
index 000000000000..4ff6555c866d
--- /dev/null
+++ b/arch/powerpc/boot/dts/iss4xx.dts
@@ -0,0 +1,116 @@
+/*
+ * Device Tree Source for IBM Embedded PPC 476 Platform
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Copyright (c) 2006, 2007 IBM Corp.
+ * Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ model = "ibm,iss-4xx";
+ compatible = "ibm,iss-4xx";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC,4xx"; // real CPU changed in sim
+ reg = <0x00000000>;
+ clock-frequency = <100000000>; // 100Mhz :-)
+ timebase-frequency = <100000000>;
+ i-cache-line-size = <32>; // may need fixup in sim
+ d-cache-line-size = <32>; // may need fixup in sim
+ i-cache-size = <32768>; /* may need fixup in sim */
+ d-cache-size = <32768>; /* may need fixup in sim */
+ dcr-controller;
+ dcr-access-method = "native";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
+ };
+
+ UIC0: interrupt-controller0 {
+ compatible = "ibm,uic-4xx", "ibm,uic";
+ interrupt-controller;
+ cell-index = <0>;
+ dcr-reg = <0x0c0 0x009>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ };
+
+ UIC1: interrupt-controller1 {
+ compatible = "ibm,uic-4xx", "ibm,uic";
+ interrupt-controller;
+ cell-index = <1>;
+ dcr-reg = <0x0d0 0x009>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
+ interrupt-parent = <&UIC0>;
+ };
+
+ plb {
+ compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+ clock-frequency = <0>; // Filled in by zImage
+
+ POB0: opb {
+ compatible = "ibm,opb-4xx", "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* Wish there was a nicer way of specifying a full 32-bit
+ range */
+ ranges = <0x00000000 0x00000001 0x00000000 0x80000000
+ 0x80000000 0x00000001 0x80000000 0x80000000>;
+ clock-frequency = <0>; // Filled in by zImage
+ UART0: serial@40000200 {
+ device_type = "serial";
+ compatible = "ns16550a";
+ reg = <0x40000200 0x00000008>;
+ virtual-reg = <0xe0000200>;
+ clock-frequency = <11059200>;
+ current-speed = <115200>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x0 0x4>;
+ };
+ };
+ };
+
+ nvrtc {
+ compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
+ reg = <0 0xEF703000 0x2000>;
+ };
+ iss-block {
+ compatible = "ibm,iss-sim-block-device";
+ reg = <0 0xEF701000 0x1000>;
+ };
+
+ chosen {
+ linux,stdout-path = "/plb/opb/serial@40000200";
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 8a3a4f3ef831..4dd08c322979 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -292,7 +292,7 @@
fsl,num-channels = <4>;
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x97c>;
- fsl,descriptor-types-mask = <0x3ab0abf>;
+ fsl,descriptor-types-mask = <0x3a30abf>;
};
sata@18000 {
@@ -463,4 +463,18 @@
0 0x00800000>;
};
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ pwr {
+ gpios = <&mcu_pio 0 0>;
+ default-state = "on";
+ };
+
+ hdd {
+ gpios = <&mcu_pio 1 0>;
+ linux,default-trigger = "ide-disk";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 9e2264b10008..dbc1b988b29d 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -486,4 +486,18 @@
0 0x00800000>;
};
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ pwr {
+ gpios = <&mcu_pio 0 0>;
+ default-state = "on";
+ };
+
+ hdd {
+ gpios = <&mcu_pio 1 0>;
+ linux,default-trigger = "ide-disk";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 4e6a1a407bbd..3447eb9f6e88 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -470,4 +470,18 @@
0 0x00800000>;
};
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ pwr {
+ gpios = <&mcu_pio 0 0>;
+ default-state = "on";
+ };
+
+ hdd {
+ gpios = <&mcu_pio 1 0>;
+ linux,default-trigger = "ide-disk";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 72336d504528..15560c619b04 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -436,4 +436,18 @@
compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ pwr {
+ gpios = <&mcu_pio 0 0>;
+ default-state = "on";
+ };
+
+ hdd {
+ gpios = <&mcu_pio 1 0>;
+ linux,default-trigger = "ide-disk";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
index df5269093af8..22f64b62d7f6 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dts
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -19,6 +19,9 @@
aliases {
serial0 = &serial0;
serial1 = &serial1;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
pci0 = &pci0;
pci1 = &pci1;
};
@@ -346,6 +349,122 @@
};
};
+ mdio@24000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-mdio";
+ reg = <0x24000 0x1000 0xb0030 0x4>;
+
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1>;
+ reg = <0x1>;
+ };
+ };
+
+ mdio@25000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,etsec2-tbi";
+ reg = <0x25000 0x1000 0xb1030 0x4>;
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0000 0x1000>;
+ interrupts = <29 2 30 2 34 2>;
+ };
+
+ queue-group@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb4000 0x1000>;
+ interrupts = <17 2 18 2 24 2>;
+ };
+ };
+
+ enet1: ethernet@b1000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi0>;
+ phy-connection-type = "sgmii";
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb1000 0x1000>;
+ interrupts = <35 2 36 2 40 2>;
+ };
+
+ queue-group@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb5000 0x1000>;
+ interrupts = <51 2 52 2 67 2>;
+ };
+ };
+
+ enet2: ethernet@b2000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "fsl,etsec2";
+ fsl,num_rx_queues = <0x8>;
+ fsl,num_tx_queues = <0x8>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+
+ queue-group@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb2000 0x1000>;
+ interrupts = <31 2 32 2 33 2>;
+ };
+
+ queue-group@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb6000 0x1000>;
+ interrupts = <25 2 26 2 27 2>;
+ };
+ };
+
usb@22000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -356,6 +475,11 @@
phy_type = "ulpi";
};
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
usb@23000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -365,6 +489,7 @@
interrupts = <46 0x2>;
phy_type = "ulpi";
};
+ */
sdhci@2e000 {
compatible = "fsl,p1020-esdhc", "fsl,esdhc";
diff --git a/arch/powerpc/boot/treeboot-iss4xx.c b/arch/powerpc/boot/treeboot-iss4xx.c
new file mode 100644
index 000000000000..fcc44952874e
--- /dev/null
+++ b/arch/powerpc/boot/treeboot-iss4xx.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2010 Ben. Herrenschmidt, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003, 2004 Zultys Technologies
+ *
+ * Copyright 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "elf.h"
+#include "string.h"
+#include "stdio.h"
+#include "page.h"
+#include "ops.h"
+#include "reg.h"
+#include "io.h"
+#include "dcr.h"
+#include "4xx.h"
+#include "44x.h"
+#include "libfdt.h"
+
+BSS_STACK(4096);
+
+static void iss_4xx_fixups(void)
+{
+ ibm4xx_sdram_fixup_memsize();
+}
+
+#define SPRN_PIR 0x11E /* Processor Indentification Register */
+void platform_init(void)
+{
+ unsigned long end_of_ram = 0x08000000;
+ unsigned long avail_ram = end_of_ram - (unsigned long)_end;
+ u32 pir_reg;
+
+ simple_alloc_init(_end, avail_ram, 128, 64);
+ platform_ops.fixups = iss_4xx_fixups;
+ platform_ops.exit = ibm44x_dbcr_reset;
+ pir_reg = mfspr(SPRN_PIR);
+ fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index f4594ed09a20..cb97e7511d7e 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -149,6 +149,10 @@ pseries)
platformo=$object/of.o
link_address='0x4000000'
;;
+maple)
+ platformo=$object/of.o
+ link_address='0x400000'
+ ;;
pmac|chrp)
platformo=$object/of.o
;;
@@ -237,6 +241,9 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
+treeboot-iss4xx-mpic)
+ platformo="$object/treeboot-iss4xx.o"
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@@ -321,7 +328,7 @@ fi
# post-processing needed for some platforms
case "$platform" in
-pseries|chrp)
+pseries|chrp|maple)
$objbin/addnote "$ofile"
;;
coff)
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
new file mode 100644
index 000000000000..8683cbc6c3e1
--- /dev/null
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -0,0 +1,1026 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.33
+# Thu Mar 4 11:50:12 2010
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_PPC_BOOK3S_32 is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+CONFIG_44x=y
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+CONFIG_4xx=y
+CONFIG_BOOKE=y
+CONFIG_PTE_64BIT=y
+CONFIG_PHYS_64BIT=y
+CONFIG_PPC_MMU_NOHASH=y
+CONFIG_PPC_MMU_NOHASH_32=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_NOT_COHERENT_CACHE is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+CONFIG_PPC_UDBG_16550=y
+CONFIG_GENERIC_TBSYNC=y
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DTC=y
+# CONFIG_DEFAULT_UIMAGE is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_PPC_DCR_NATIVE=y
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_PPC_DCR=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+CONFIG_PPC_47x=y
+# CONFIG_BAMBOO is not set
+# CONFIG_EBONY is not set
+# CONFIG_SAM440EP is not set
+# CONFIG_SEQUOIA is not set
+# CONFIG_TAISHAN is not set
+# CONFIG_KATMAI is not set
+# CONFIG_RAINIER is not set
+# CONFIG_WARP is not set
+# CONFIG_ARCHES is not set
+# CONFIG_CANYONLANDS is not set
+# CONFIG_GLACIER is not set
+# CONFIG_REDWOOD is not set
+# CONFIG_EIGER is not set
+# CONFIG_YOSEMITE is not set
+CONFIG_ISS4xx=y
+# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set
+# CONFIG_PPC44x_SIMPLE is not set
+# CONFIG_PPC4xx_GPIO is not set
+# CONFIG_IPIC is not set
+CONFIG_MPIC=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_OF_RTC=y
+# CONFIG_SIMPLE_GPIO is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MATH_EMULATION=y
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_SWIOTLB is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_MAX_ACTIVE_REGIONS=32
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_STDBINUTILS=y
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="root=/dev/issblk0"
+CONFIG_EXTRA_TARGETS=""
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_4xx_SOC=y
+CONFIG_PPC_PCI_CHOICE=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_XILINX_SYSACE is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+# CONFIG_NETDEVICES is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_8250_MANY_PORTS is not set
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+# CONFIG_SERIAL_8250_RSA is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL is not set
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_HVC_UDBG is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_PPC_DISABLE_WERROR is not set
+CONFIG_PPC_WERROR=y
+CONFIG_PRINT_STACK_DEPTH=64
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_PPC_EMULATED_STATS is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+# CONFIG_XMON is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+CONFIG_PPC_EARLY_DEBUG_44x=y
+# CONFIG_PPC_EARLY_DEBUG_40x is not set
+# CONFIG_PPC_EARLY_DEBUG_CPM is not set
+# CONFIG_PPC_EARLY_DEBUG_USBGECKO is not set
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW=0x40000200
+CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH=0x1
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 12980d544654..dad617e2a88c 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -988,7 +988,7 @@ CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set
CONFIG_E1000=y
-CONFIG_E1000E=m
+CONFIG_E1000E=y
# CONFIG_IP1000 is not set
# CONFIG_IGB is not set
# CONFIG_NS83820 is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 41de3ddc9f24..16a14589bd40 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -804,7 +804,7 @@ CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set
CONFIG_E1000=y
-CONFIG_E1000E=m
+CONFIG_E1000E=y
# CONFIG_IP1000 is not set
# CONFIG_IGB is not set
# CONFIG_NS83820 is not set
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index a9b91ed3d4b9..2048a6aeea91 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -21,6 +21,7 @@
/* operations for longs and pointers */
#define PPC_LL stringify_in_c(ld)
#define PPC_STL stringify_in_c(std)
+#define PPC_STLU stringify_in_c(stdu)
#define PPC_LCMPI stringify_in_c(cmpdi)
#define PPC_LONG stringify_in_c(.llong)
#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
@@ -44,6 +45,7 @@
/* operations for longs and pointers */
#define PPC_LL stringify_in_c(lwz)
#define PPC_STL stringify_in_c(stw)
+#define PPC_STLU stringify_in_c(stwu)
#define PPC_LCMPI stringify_in_c(cmpwi)
#define PPC_LONG stringify_in_c(.long)
#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 2c15212e1700..065c590c991d 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -85,12 +85,12 @@
} \
} while (0)
-#define __WARN() do { \
+#define __WARN_TAINT(taint) do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING), \
+ "i" (BUGFLAG_TAINT(taint)), \
"i" (sizeof(struct bug_entry))); \
} while (0)
@@ -104,7 +104,7 @@
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING), \
+ "i" (BUGFLAG_TAINT(TAINT_WARN)), \
"i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \
} \
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 81de6eb3455d..725634fc18c6 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -12,8 +12,12 @@
#define L1_CACHE_SHIFT 6
#define MAX_COPY_PREFETCH 4
#elif defined(CONFIG_PPC32)
-#define L1_CACHE_SHIFT 5
#define MAX_COPY_PREFETCH 4
+#if defined(CONFIG_PPC_47x)
+#define L1_CACHE_SHIFT 7
+#else
+#define L1_CACHE_SHIFT 5
+#endif
#else /* CONFIG_PPC64 */
#define L1_CACHE_SHIFT 7
#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index abb833b0e58f..e3cba4e1eb34 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -72,6 +72,7 @@ extern int machine_check_4xx(struct pt_regs *regs);
extern int machine_check_440A(struct pt_regs *regs);
extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
+extern int machine_check_47x(struct pt_regs *regs);
/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
struct cpu_spec {
@@ -365,6 +366,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
CPU_FTR_INDEXED_DCR)
+#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
@@ -453,6 +455,9 @@ enum {
#ifdef CONFIG_44x
CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
+#ifdef CONFIG_PPC_47x
+ CPU_FTRS_47X |
+#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 |
#endif
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 6d94d27ed850..a3954e4fcbe2 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -10,9 +10,6 @@ struct dma_map_ops;
struct device_node;
struct dev_archdata {
- /* Optional pointer to an OF device node */
- struct device_node *of_node;
-
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
@@ -30,19 +27,8 @@ struct dev_archdata {
#endif
};
-static inline void dev_archdata_set_node(struct dev_archdata *ad,
- struct device_node *np)
-{
- ad->of_node = np;
-}
-
-static inline struct device_node *
-dev_archdata_get_node(const struct dev_archdata *ad)
-{
- return ad->of_node;
-}
-
struct pdev_archdata {
+ u64 dma_mask;
};
#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index f0275818b95c..5119b7db3142 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -228,6 +228,7 @@
#define H_JOIN 0x298
#define H_VASI_STATE 0x2A4
#define H_ENABLE_CRQ 0x2B0
+#define H_GET_EM_PARMS 0x2B8
#define H_SET_MPP 0x2D0
#define H_GET_MPP 0x2D4
#define MAX_HCALL_OPCODE H_GET_MPP
@@ -281,6 +282,7 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
*/
#define PLPAR_HCALL9_BUFSIZE 9
long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
/* For hcall instrumentation. One structure per-hcall, per-CPU */
struct hcall_stats {
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 9f4c9d4f5803..bd100fcf40d0 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -130,43 +130,5 @@ static inline int irqs_disabled_flags(unsigned long flags)
*/
struct irq_chip;
-#ifdef CONFIG_PERF_EVENTS
-
-#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_event_pending(void)
-{
- unsigned long x;
-
- asm volatile("lbz %0,%1(13)"
- : "=r" (x)
- : "i" (offsetof(struct paca_struct, perf_event_pending)));
- return x;
-}
-
-static inline void set_perf_event_pending(void)
-{
- asm volatile("stb %0,%1(13)" : :
- "r" (1),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-
-static inline void clear_perf_event_pending(void)
-{
- asm volatile("stb %0,%1(13)" : :
- "r" (0),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
-}
-#endif /* CONFIG_PPC64 */
-
-#else /* CONFIG_PERF_EVENTS */
-
-static inline unsigned long test_perf_event_pending(void)
-{
- return 0;
-}
-
-static inline void clear_perf_event_pending(void) {}
-#endif /* CONFIG_PERF_EVENTS */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 7e06b43720d3..a6ca6da1430b 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -31,6 +31,10 @@
#define KEXEC_ARCH KEXEC_ARCH_PPC
#endif
+#define KEXEC_STATE_NONE 0
+#define KEXEC_STATE_IRQS_OFF 1
+#define KEXEC_STATE_REAL_MODE 2
+
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#include <asm/reg.h>
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index 916369575c97..bca8fdcd2542 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -26,6 +26,7 @@ enum km_type {
KM_SOFTIRQ1,
KM_PPC_SYNC_PAGE,
KM_PPC_SYNC_ICACHE,
+ KM_KDB,
KM_TYPE_NR
};
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 81f3b0b5601e..6c5547d82bbe 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -77,4 +77,14 @@ struct kvm_debug_exit_arch {
struct kvm_guest_debug_arch {
};
+#define KVM_REG_MASK 0x001f
+#define KVM_REG_EXT_MASK 0xffe0
+#define KVM_REG_GPR 0x0000
+#define KVM_REG_FPR 0x0020
+#define KVM_REG_QPR 0x0040
+#define KVM_REG_FQPR 0x0060
+
+#define KVM_INTERRUPT_SET -1U
+#define KVM_INTERRUPT_UNSET -2U
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index aadf2dd6f84e..c5ea4cda34b3 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -88,6 +88,8 @@
#define BOOK3S_HFLAG_DCBZ32 0x1
#define BOOK3S_HFLAG_SLB 0x2
+#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
+#define BOOK3S_HFLAG_NATIVE_PS 0x8
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index db7db0a96967..6f74d93725a0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -22,46 +22,47 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
struct kvmppc_slb {
u64 esid;
u64 vsid;
u64 orige;
u64 origv;
- bool valid;
- bool Ks;
- bool Kp;
- bool nx;
- bool large; /* PTEs are 16MB */
- bool tb; /* 1TB segment */
- bool class;
+ bool valid : 1;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool large : 1; /* PTEs are 16MB */
+ bool tb : 1; /* 1TB segment */
+ bool class : 1;
};
struct kvmppc_sr {
u32 raw;
u32 vsid;
- bool Ks;
- bool Kp;
- bool nx;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool valid : 1;
};
struct kvmppc_bat {
u64 raw;
u32 bepi;
u32 bepi_mask;
- bool vs;
- bool vp;
u32 brpn;
u8 wimg;
u8 pp;
+ bool vs : 1;
+ bool vp : 1;
};
struct kvmppc_sid_map {
u64 guest_vsid;
u64 guest_esid;
u64 host_vsid;
- bool valid;
+ bool valid : 1;
};
#define SID_MAP_BITS 9
@@ -70,7 +71,7 @@ struct kvmppc_sid_map {
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
- struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
+ struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64];
struct {
@@ -82,9 +83,10 @@ struct kvmppc_vcpu_book3s {
struct kvmppc_bat ibat[8];
struct kvmppc_bat dbat[8];
u64 hid[6];
+ u64 gqr[8];
int slb_nr;
+ u32 dsisr;
u64 sdr1;
- u64 dsisr;
u64 hior;
u64 msr_mask;
u64 vsid_first;
@@ -98,15 +100,15 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2
-#define VSID_REAL 0xfffffffffff00000
-#define VSID_REAL_DR 0xffffffffffe00000
-#define VSID_REAL_IR 0xffffffffffd00000
-#define VSID_BAT 0xffffffffffc00000
-#define VSID_PR 0x8000000000000000
+#define VSID_REAL 0x1fffffffffc00000ULL
+#define VSID_BAT 0x1fffffffffb00000ULL
+#define VSID_REAL_DR 0x2000000000000000ULL
+#define VSID_REAL_IR 0x4000000000000000ULL
+#define VSID_PR 0x8000000000000000ULL
-extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
-extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
@@ -114,11 +116,13 @@ extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data);
-extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data);
-extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr);
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
+extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
+extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
+extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
@@ -126,6 +130,8 @@ extern void kvmppc_rmcall(ulong srr0, ulong srr1);
extern void kvmppc_load_up_fpu(void);
extern void kvmppc_load_up_altivec(void);
extern void kvmppc_load_up_vsx(void);
+extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
+extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
@@ -140,7 +146,108 @@ static inline ulong dsisr(void)
}
extern void kvm_return_point(void);
+static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ if ( num < 14 ) {
+ to_svcpu(vcpu)->gpr[num] = val;
+ to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
+ } else
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ if ( num < 14 )
+ return to_svcpu(vcpu)->gpr[num];
+ else
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ to_svcpu(vcpu)->cr = val;
+ to_book3s(vcpu)->shadow_vcpu->cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ to_svcpu(vcpu)->xer = val;
+ to_book3s(vcpu)->shadow_vcpu->xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ to_svcpu(vcpu)->ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ to_svcpu(vcpu)->lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ to_svcpu(vcpu)->pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->pc;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
+
+ return svcpu->last_inst;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return to_svcpu(vcpu)->fault_dar;
+}
+
+/* Magic register values loaded into r3 and r4 before the 'sc' assembly
+ * instruction for the OSI hypercalls */
+#define OSI_SC_MAGIC_R3 0x113724FA
+#define OSI_SC_MAGIC_R4 0x77810F9B
#define INS_DCBZ 0x7c0007ec
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#include <asm/kvm_book3s_32.h>
+#else
+#include <asm/kvm_book3s_64.h>
+#endif
+
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
new file mode 100644
index 000000000000..de604db135f5
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -0,0 +1,42 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_32_H__
+#define __ASM_KVM_BOOK3S_32_H__
+
+static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+{
+ return to_book3s(vcpu)->shadow_vcpu;
+}
+
+#define PTE_SIZE 12
+#define VSID_ALL 0
+#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
+#define SR_KP 0x20000000
+#define PTE_V 0x80000000
+#define PTE_SEC 0x00000040
+#define PTE_M 0x00000010
+#define PTE_R 0x00000100
+#define PTE_C 0x00000080
+
+#define SID_SHIFT 28
+#define ESID_MASK 0xf0000000
+#define VSID_MASK 0x00fffffff0000000ULL
+
+#endif /* __ASM_KVM_BOOK3S_32_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
new file mode 100644
index 000000000000..4cadd612d575
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_64_H__
+#define __ASM_KVM_BOOK3S_64_H__
+
+static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+{
+ return &get_paca()->shadow_vcpu;
+}
+
+#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 183461b48407..36fdb3aff30b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -22,7 +22,7 @@
#ifdef __ASSEMBLY__
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
#include <asm/kvm_asm.h>
@@ -55,7 +55,7 @@ kvmppc_resume_\intno:
.macro DO_KVM intno
.endm
-#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
+#endif /* CONFIG_KVM_BOOK3S_HANDLER */
#else /*__ASSEMBLY__ */
@@ -63,12 +63,33 @@ struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
u32 xer;
+
+ u32 fault_dsisr;
+ u32 last_inst;
+ ulong ctr;
+ ulong lr;
+ ulong pc;
+ ulong shadow_srr1;
+ ulong fault_dar;
+
ulong host_r1;
ulong host_r2;
ulong handler;
ulong scratch0;
ulong scratch1;
ulong vmhandler;
+ u8 in_guest;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ u32 sr[16]; /* Guest SRs */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 slb_max; /* highest used guest slb entry */
+ struct {
+ u64 esid;
+ u64 vsid;
+ } slb[64]; /* guest SLB */
+#endif
};
#endif /*__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
new file mode 100644
index 000000000000..9c9ba3d59b1b
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -0,0 +1,96 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOKE_H__
+#define __ASM_KVM_BOOKE_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xer;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.last_inst;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault_dear;
+}
+
+#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h
new file mode 100644
index 000000000000..94f05de9ad04
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_fpu.h
@@ -0,0 +1,85 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright Novell Inc. 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_FPU_H__
+#define __ASM_KVM_FPU_H__
+
+#include <linux/types.h>
+
+extern void fps_fres(struct thread_struct *t, u32 *dst, u32 *src1);
+extern void fps_frsqrte(struct thread_struct *t, u32 *dst, u32 *src1);
+extern void fps_fsqrts(struct thread_struct *t, u32 *dst, u32 *src1);
+
+extern void fps_fadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fdivs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fmuls(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+extern void fps_fsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2);
+
+extern void fps_fmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fnmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fnmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+extern void fps_fsel(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2,
+ u32 *src3);
+
+#define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1);
+#define FPD_TWO_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1, u64 *src2);
+#define FPD_THREE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \
+ u64 *dst, u64 *src1, u64 *src2, u64 *src3);
+
+extern void fpd_fcmpu(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+extern void fpd_fcmpo(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2);
+
+FPD_ONE_IN(fsqrts)
+FPD_ONE_IN(frsqrtes)
+FPD_ONE_IN(fres)
+FPD_ONE_IN(frsp)
+FPD_ONE_IN(fctiw)
+FPD_ONE_IN(fctiwz)
+FPD_ONE_IN(fsqrt)
+FPD_ONE_IN(fre)
+FPD_ONE_IN(frsqrte)
+FPD_ONE_IN(fneg)
+FPD_ONE_IN(fabs)
+FPD_TWO_IN(fadds)
+FPD_TWO_IN(fsubs)
+FPD_TWO_IN(fdivs)
+FPD_TWO_IN(fmuls)
+FPD_TWO_IN(fcpsgn)
+FPD_TWO_IN(fdiv)
+FPD_TWO_IN(fadd)
+FPD_TWO_IN(fmul)
+FPD_TWO_IN(fsub)
+FPD_THREE_IN(fmsubs)
+FPD_THREE_IN(fmadds)
+FPD_THREE_IN(fnmsubs)
+FPD_THREE_IN(fnmadds)
+FPD_THREE_IN(fsel)
+FPD_THREE_IN(fmsub)
+FPD_THREE_IN(fmadd)
+FPD_THREE_IN(fnmsub)
+FPD_THREE_IN(fnmadd)
+
+#endif
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 5e5bae7e152f..0c9ad869decd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -66,7 +66,7 @@ struct kvm_vcpu_stat {
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_wakeup;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
u32 pf_storage;
u32 pf_instruc;
u32 sp_storage;
@@ -124,12 +124,12 @@ struct kvm_arch {
};
struct kvmppc_pte {
- u64 eaddr;
+ ulong eaddr;
u64 vpage;
- u64 raddr;
- bool may_read;
- bool may_write;
- bool may_execute;
+ ulong raddr;
+ bool may_read : 1;
+ bool may_write : 1;
+ bool may_execute : 1;
};
struct kvmppc_mmu {
@@ -145,7 +145,7 @@ struct kvmppc_mmu {
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
- int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
+ int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
@@ -160,7 +160,7 @@ struct hpte_cache {
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
ulong host_msr;
ulong host_r2;
void *host_retip;
@@ -175,7 +175,7 @@ struct kvm_vcpu_arch {
ulong gpr[32];
u64 fpr[32];
- u32 fpscr;
+ u64 fpscr;
#ifdef CONFIG_ALTIVEC
vector128 vr[32];
@@ -186,19 +186,23 @@ struct kvm_vcpu_arch {
u64 vsr[32];
#endif
+#ifdef CONFIG_PPC_BOOK3S
+ /* For Gekko paired singles */
+ u32 qpr[32];
+#endif
+
+#ifdef CONFIG_BOOKE
ulong pc;
ulong ctr;
ulong lr;
-#ifdef CONFIG_BOOKE
ulong xer;
u32 cr;
#endif
ulong msr;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
ulong shadow_msr;
- ulong shadow_srr1;
ulong hflags;
ulong guest_owned_ext;
#endif
@@ -253,20 +257,22 @@ struct kvm_vcpu_arch {
struct dentry *debugfs_exit_timing;
#endif
+#ifdef CONFIG_BOOKE
u32 last_inst;
-#ifdef CONFIG_PPC64
- ulong fault_dsisr;
-#endif
ulong fault_dear;
ulong fault_esr;
ulong queued_dear;
ulong queued_esr;
+#endif
gpa_t paddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian;
+ u8 mmio_sign_extend;
u8 dcr_needed;
u8 dcr_is_write;
+ u8 osi_needed;
+ u8 osi_enabled;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
@@ -275,7 +281,7 @@ struct kvm_vcpu_arch {
u64 dec_jiffies;
unsigned long pending_exceptions;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
struct hpte_cache hpte_cache[HPTEG_CACHE_NUM];
int hpte_cache_offset;
#endif
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e2642829e435..18d139ec2d22 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -30,6 +30,8 @@
#include <linux/kvm_host.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/kvm_book3s.h>
+#else
+#include <asm/kvm_booke.h>
#endif
enum emulation_result {
@@ -37,6 +39,7 @@ enum emulation_result {
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
EMULATE_DO_DCR, /* kvm_run filled with DCR request */
EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_AGAIN, /* something went wrong. go again */
};
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -48,8 +51,11 @@ extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_bigendian);
+extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_bigendian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u32 val, unsigned int bytes, int is_bigendian);
+ u64 val, unsigned int bytes, int is_bigendian);
extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu);
@@ -63,6 +69,7 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
@@ -88,6 +95,8 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
+extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq);
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance);
@@ -99,81 +108,37 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
-#ifdef CONFIG_PPC_BOOK3S
-
-/* We assume we're always acting on the current vcpu */
-
-static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
-{
- if ( num < 14 ) {
- get_paca()->shadow_vcpu.gpr[num] = val;
- to_book3s(vcpu)->shadow_vcpu.gpr[num] = val;
- } else
- vcpu->arch.gpr[num] = val;
-}
-
-static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
-{
- if ( num < 14 )
- return get_paca()->shadow_vcpu.gpr[num];
- else
- return vcpu->arch.gpr[num];
-}
-
-static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
-{
- get_paca()->shadow_vcpu.cr = val;
- to_book3s(vcpu)->shadow_vcpu.cr = val;
-}
-
-static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
-{
- return get_paca()->shadow_vcpu.cr;
-}
-
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
-{
- get_paca()->shadow_vcpu.xer = val;
- to_book3s(vcpu)->shadow_vcpu.xer = val;
-}
-
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+/*
+ * Cuts out inst bits with ordering according to spec.
+ * That means the leftmost bit is zero. All given bits are included.
+ */
+static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
{
- return get_paca()->shadow_vcpu.xer;
-}
+ u32 r;
+ u32 mask;
-#else
+ BUG_ON(msb > lsb);
-static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
-{
- vcpu->arch.gpr[num] = val;
-}
+ mask = (1 << (lsb - msb + 1)) - 1;
+ r = (inst >> (63 - lsb)) & mask;
-static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
-{
- return vcpu->arch.gpr[num];
+ return r;
}
-static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+/*
+ * Replaces inst bits with ordering according to spec.
+ */
+static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
{
- vcpu->arch.cr = val;
-}
+ u32 r;
+ u32 mask;
-static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.cr;
-}
+ BUG_ON(msb > lsb);
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
-{
- vcpu->arch.xer = val;
-}
+ mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
+ r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.xer;
+ return r;
}
-#endif
-
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index a062c57696d0..19a661b4cb98 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -108,7 +108,7 @@ static inline void* macio_get_drvdata(struct macio_dev *dev)
static inline struct device_node *macio_get_of_node(struct macio_dev *mdev)
{
- return mdev->ofdev.node;
+ return mdev->ofdev.dev.of_node;
}
#ifdef CONFIG_PCI
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 0372669383a8..bf52d704fc47 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -40,7 +40,7 @@
#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
-#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */
+#define PPC44x_TLB_E 0x00000080 /* Memory is little endian */
#define PPC44x_TLB_PERM_MASK 0x0000003f
#define PPC44x_TLB_UX 0x00000020 /* User execution */
@@ -53,6 +53,52 @@
/* Number of TLB entries */
#define PPC44x_TLB_SIZE 64
+/* 47x bits */
+#define PPC47x_MMUCR_TID 0x0000ffff
+#define PPC47x_MMUCR_STS 0x00010000
+
+/* Page identification fields */
+#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */
+#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */
+#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */
+#define PPC47x_TLB0_4K 0x00000000
+#define PPC47x_TLB0_16K 0x00000010
+#define PPC47x_TLB0_64K 0x00000030
+#define PPC47x_TLB0_1M 0x00000070
+#define PPC47x_TLB0_16M 0x000000f0
+#define PPC47x_TLB0_256M 0x000001f0
+#define PPC47x_TLB0_1G 0x000003f0
+#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */
+
+/* Translation fields */
+#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */
+#define PPC47x_TLB1_ERPN_MASK 0x000003ff
+
+/* Storage attribute and access control fields */
+#define PPC47x_TLB2_ATTR_MASK 0x0003ff80
+#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */
+#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */
+#define PPC47x_TLB2_U0 0x00008000 /* User 0 */
+#define PPC47x_TLB2_U1 0x00004000 /* User 1 */
+#define PPC47x_TLB2_U2 0x00002000 /* User 2 */
+#define PPC47x_TLB2_U3 0x00001000 /* User 3 */
+#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */
+#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */
+#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */
+#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */
+#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */
+#define PPC47x_TLB2_PERM_MASK 0x0000003f
+#define PPC47x_TLB2_UX 0x00000020 /* User execution */
+#define PPC47x_TLB2_UW 0x00000010 /* User write */
+#define PPC47x_TLB2_UR 0x00000008 /* User read */
+#define PPC47x_TLB2_SX 0x00000004 /* Super execution */
+#define PPC47x_TLB2_SW 0x00000002 /* Super write */
+#define PPC47x_TLB2_SR 0x00000001 /* Super read */
+#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
+#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
+#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR)
+#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
+
#ifndef __ASSEMBLY__
extern unsigned int tlb_44x_hwater;
@@ -79,12 +125,15 @@ typedef struct {
#if (PAGE_SHIFT == 12)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
#define mmu_virtual_psize MMU_PAGE_4K
#elif (PAGE_SHIFT == 14)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
#define mmu_virtual_psize MMU_PAGE_16K
#elif (PAGE_SHIFT == 16)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
+#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
#define mmu_virtual_psize MMU_PAGE_64K
#elif (PAGE_SHIFT == 18)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 7ffbb65ff7a9..7ebf42ed84a2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -18,6 +18,7 @@
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
+#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
/*
* This is individual features
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 26383e0778aa..81fb41289d6c 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -27,6 +27,8 @@ extern int __init_new_context(void);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
#else
+extern unsigned long __init_new_context(void);
+extern void __destroy_context(unsigned long context_id);
extern void mmu_context_init(void);
#endif
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 35acac90c8ca..aac87cbceb57 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -30,7 +30,7 @@ extern struct pglist_data *node_data[];
*/
extern int numa_cpu_lookup_table[];
-extern cpumask_t numa_cpumask_lookup_table[];
+extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn;
#endif
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index 42561f4f032d..ecc4fc69ac13 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -248,6 +248,7 @@ struct mpc52xx_psc_fifo {
u16 tflwfptr; /* PSC + 0x9e */
};
+#define MPC512x_PSC_FIFO_EOF 0x100
#define MPC512x_PSC_FIFO_RESET_SLICE 0x80
#define MPC512x_PSC_FIFO_ENABLE_SLICE 0x01
#define MPC512x_PSC_FIFO_ENABLE_DMA 0x04
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 61913d9a21a0..e000cce8f6dd 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -463,9 +463,6 @@ extern void mpic_cpu_set_priority(int prio);
/* Request IPIs on primary mpic */
extern void mpic_request_ipis(void);
-/* Send an IPI (non offseted number 0..3) */
-extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
-
/* Send a message (IPI) to a given target (cpu number or MSG_*) */
void smp_mpic_message_pass(int target, int msg);
diff --git a/arch/powerpc/include/asm/of_device.h b/arch/powerpc/include/asm/of_device.h
index a64debf177dc..444e97e2982e 100644
--- a/arch/powerpc/include/asm/of_device.h
+++ b/arch/powerpc/include/asm/of_device.h
@@ -12,9 +12,8 @@
*/
struct of_device
{
- struct device_node *node; /* to be obsoleted */
- u64 dma_mask; /* DMA mask */
struct device dev; /* Generic device interface */
+ struct pdev_archdata archdata;
};
extern struct of_device *of_device_alloc(struct device_node *np,
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a011603d4079..8ce7963ad41d 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -23,7 +23,7 @@
#include <asm/page.h>
#include <asm/exception-64e.h>
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
#endif
register struct paca_struct *local_paca asm("r13");
@@ -82,6 +82,7 @@ struct paca_struct {
s16 hw_cpu_id; /* Physical processor number */
u8 cpu_start; /* At startup, processor spins until */
/* this becomes non-zero. */
+ u8 kexec_state; /* set when kexec down has irqs off */
#ifdef CONFIG_PPC_STD_MMU_64
struct slb_shadow *slb_shadow_ptr;
@@ -136,15 +137,9 @@ struct paca_struct {
u64 startpurr; /* PURR/TB value snapshot */
u64 startspurr; /* SPURR value snapshot */
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- struct {
- u64 esid;
- u64 vsid;
- } kvm_slb[64]; /* guest SLB */
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
- u8 kvm_slb_max; /* highest used guest slb entry */
- u8 kvm_in_guest; /* are we inside the guest? */
#endif
};
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 94942d60ddfd..1ca1102b4a2f 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -19,6 +19,8 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
u32 io1, io2;
int propsize;
int count = 0;
+ int virq;
+
for (np = NULL; (np = of_find_compatible_node(np,
"parallel",
"pnpPNP,400")) != NULL;) {
@@ -26,10 +28,13 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
if (!prop || propsize > 6*sizeof(u32))
continue;
io1 = prop[1]; io2 = prop[2];
- prop = of_get_property(np, "interrupts", NULL);
- if (!prop)
+
+ virq = irq_of_parse_and_map(np, 0);
+ if (virq == NO_IRQ)
continue;
- if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
+
+ if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
+ != NULL)
count++;
}
return count;
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 605f5c5398d1..292725cec2e3 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,6 +11,12 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
+struct vmemmap_backing {
+ struct vmemmap_backing *list;
+ unsigned long phys;
+ unsigned long virt_addr;
+};
+
/*
* Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 55646adfa843..a7db96f2b5c3 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -287,7 +287,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) \
- (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
+ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
((unsigned long) (pmd_val(pmd) & PAGE_MASK))
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 221ba6240464..7492fe8ad6e4 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -229,6 +229,9 @@ struct thread_struct {
unsigned long spefscr; /* SPE & eFP status */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+ void* kvm_shadow_vcpu; /* KVM internal data */
+#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
};
#define ARCH_MIN_TASKALIGN 16
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 9e2d84c06b74..5d8be0416227 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -89,6 +89,7 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->nip)
#define user_stack_pointer(regs) ((regs)->gpr[1])
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
#define regs_return_value(regs) ((regs)->gpr[3])
#ifdef CONFIG_SMP
@@ -141,6 +142,69 @@ do { \
#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601))
#define ARCH_HAS_USER_SINGLE_STEP_INFO
+/*
+ * kprobe-based event tracer support
+ */
+
+#include <linux/stddef.h>
+#include <linux/thread_info.h>
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+
+static inline bool regs_within_kernel_stack(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5572e86223f4..d62fdf4e504b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -293,10 +293,12 @@
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define HID1_PS (1<<16) /* 750FX PLL selection */
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
+#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#define SPRN_HID6 0x3F9 /* BE HID 6 */
#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
@@ -465,6 +467,14 @@
#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
#define SPRN_XER 0x001 /* Fixed Point Exception Register */
+#define SPRN_MMCR0_GEKKO 0x3B8 /* Gekko Monitor Mode Control Register 0 */
+#define SPRN_MMCR1_GEKKO 0x3BC /* Gekko Monitor Mode Control Register 1 */
+#define SPRN_PMC1_GEKKO 0x3B9 /* Gekko Performance Monitor Control 1 */
+#define SPRN_PMC2_GEKKO 0x3BA /* Gekko Performance Monitor Control 2 */
+#define SPRN_PMC3_GEKKO 0x3BD /* Gekko Performance Monitor Control 3 */
+#define SPRN_PMC4_GEKKO 0x3BE /* Gekko Performance Monitor Control 4 */
+#define SPRN_WPAR_GEKKO 0x399 /* Gekko Write Pipe Address Register */
+
#define SPRN_SCOMC 0x114 /* SCOM Access Control */
#define SPRN_SCOMD 0x115 /* SCOM Access DATA */
@@ -817,6 +827,7 @@
#define PVR_403GC 0x00200200
#define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000
+#define PVR_476 0x11a52000
#define PVR_STB03XXX 0x40310000
#define PVR_NP405H 0x41410000
#define PVR_NP405L 0x41610000
@@ -853,6 +864,9 @@
#define PVR_8245 0x80811014
#define PVR_8260 PVR_8240
+/* 476 Simulator seems to currently have the PVR of the 602... */
+#define PVR_476_ISS 0x00052000
+
/* 64-bit processors */
/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
#define PV_NORTHSTAR 0x0033
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 414d434a66d0..5304a37ba425 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -191,6 +191,10 @@
#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
+#define PPC47x_MCSR_GPR 0x01000000 /* GPR parity error */
+#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
+#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
+
#ifdef CONFIG_E500
#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
@@ -604,5 +608,25 @@
#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
#endif /* 403GCX */
+
+/* Some 476 specific registers */
+#define SPRN_SSPCR 830
+#define SPRN_USPCR 831
+#define SPRN_ISPCR 829
+#define SPRN_MMUBE0 820
+#define MMUBE0_IBE0_SHIFT 24
+#define MMUBE0_IBE1_SHIFT 16
+#define MMUBE0_IBE2_SHIFT 8
+#define MMUBE0_VBE0 0x00000004
+#define MMUBE0_VBE1 0x00000002
+#define MMUBE0_VBE2 0x00000001
+#define SPRN_MMUBE1 821
+#define MMUBE1_IBE3_SHIFT 24
+#define MMUBE1_IBE4_SHIFT 16
+#define MMUBE1_IBE5_SHIFT 8
+#define MMUBE1_VBE3 0x00000004
+#define MMUBE1_VBE4 0x00000002
+#define MMUBE1_VBE5 0x00000001
+
#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
index 912bf597870f..34cc78fd0ef4 100644
--- a/arch/powerpc/include/asm/scatterlist.h
+++ b/arch/powerpc/include/asm/scatterlist.h
@@ -9,38 +9,12 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifdef __KERNEL__
-#include <linux/types.h>
#include <asm/dma.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
-
- /* For TCE or SWIOTLB support */
- dma_addr_t dma_address;
- u32 dma_length;
-};
-
-/*
- * These macros should be used after a dma_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->dma_length)
+#include <asm-generic/scatterlist.h>
#ifdef __powerpc64__
#define ISA_DMA_THRESHOLD (~0UL)
#endif
-
#define ARCH_HAS_SG_CHAIN
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 1d3b270d3083..66e237bbe15f 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -40,7 +40,7 @@ extern void smp_message_recv(int);
DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(cpumask_t map);
+extern void fixup_irqs(const struct cpumask *map);
int generic_cpu_disable(void);
int generic_cpu_enable(unsigned int cpu);
void generic_cpu_die(unsigned int cpu);
@@ -68,8 +68,19 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
}
#endif
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+ return per_cpu(cpu_core_map, cpu);
+}
+
extern int cpu_to_core_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
@@ -93,7 +104,6 @@ void smp_init_pSeries(void);
void smp_init_cell(void);
void smp_init_celleb(void);
void smp_setup_cpu_maps(void);
-void smp_setup_cpu_sibling_map(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index aa9d383a1c09..65eb85976a03 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -104,7 +104,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
-#define TIF_MEMDIE 9
+#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8eaec310a25b..32adf7280720 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -8,6 +8,26 @@ struct device_node;
#ifdef CONFIG_NUMA
+/*
+ * Before going off node we want the VM to try and reclaim from the local
+ * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
+ * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
+ * 20, we never reclaim and go off node straight away.
+ *
+ * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ */
+#define RECLAIM_DISTANCE 10
+
+/*
+ * Before going off node we want the VM to try and reclaim from the local
+ * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
+ * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
+ * 20, we never reclaim and go off node straight away.
+ *
+ * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ */
+#define RECLAIM_DISTANCE 10
+
#include <asm/mmzone.h>
static inline int cpu_to_node(int cpu)
@@ -19,7 +39,7 @@ static inline int cpu_to_node(int cpu)
#define cpumask_of_node(node) ((node) == -1 ? \
cpu_all_mask : \
- &numa_cpumask_lookup_table[node])
+ node_to_cpumask_map[node])
int of_node_to_nid(struct device_node *device);
@@ -102,8 +122,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#ifdef CONFIG_PPC64
#include <asm/smp.h>
-#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
-#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif
#endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 957ceb7059c5..496cc5b3984f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -50,6 +50,9 @@
#endif
#ifdef CONFIG_KVM
#include <linux/kvm_host.h>
+#ifndef CONFIG_BOOKE
+#include <asm/kvm_book3s.h>
+#endif
#endif
#ifdef CONFIG_PPC32
@@ -105,6 +108,9 @@ int main(void)
DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+ DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
+#endif
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -133,7 +139,6 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
- DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
@@ -184,6 +189,7 @@ int main(void)
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+ DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
@@ -191,33 +197,9 @@ int main(void)
DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
- DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
- DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
- DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
- DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
- DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
- DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
- DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
- DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
- DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
- DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
- DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
- DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
- DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
- DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
- DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
- DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
- DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
- DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
- DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
- DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
- DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
- shadow_vcpu.vmhandler));
- DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
- shadow_vcpu.scratch0));
- DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
- shadow_vcpu.scratch1));
+ DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
+ DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
+ DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
#endif
#endif /* CONFIG_PPC64 */
@@ -228,8 +210,8 @@ int main(void)
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
-#ifdef CONFIG_PPC64
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+#ifdef CONFIG_PPC64
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
@@ -412,9 +394,6 @@ int main(void)
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
- DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
- DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
- DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
@@ -422,32 +401,81 @@ int main(void)
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
- DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
- DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
- DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
-
- /* book3s_64 */
-#ifdef CONFIG_PPC64
- DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+ /* book3s */
+#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
- DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
- DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+ DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
+ offsetof(struct kvmppc_vcpu_book3s, vcpu));
+ DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
+ DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
+ DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
+ DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
+ DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
+ DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
+ DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
+ DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
+ DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
+ DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
+ DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
+ DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
+ DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
+ DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
+ DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
+ DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
+ DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
+ DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
+ DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
+ DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
+ DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
+ DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ vmhandler));
+ DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ scratch0));
+ DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ scratch1));
+ DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ in_guest));
+ DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ fault_dsisr));
+ DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ fault_dar));
+ DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ last_inst));
+ DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
+ shadow_srr1));
+#ifdef CONFIG_PPC_BOOK3S_32
+ DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
+#endif
#else
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
-#endif /* CONFIG_PPC64 */
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+ DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+#endif /* CONFIG_PPC_BOOK3S */
#endif
#ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
+#ifdef CONFIG_FSL_BOOKE
+ DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
+ DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
+ DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
+ DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
+ DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
+ DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
+#endif
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8af4949434b2..9556be903e96 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1701,6 +1701,35 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A,
.platform = "ppc440",
},
+ { /* 476 core */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x11a50000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x |
+ MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
+ { /* 476 iss */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00050000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_47x |
+ MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 6f4613dd05ef..8c066d6a8e4b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -162,6 +162,32 @@ static void crash_kexec_prepare_cpus(int cpu)
/* Leave the IPI callback set */
}
+/* wait for all the CPUs to hit real mode but timeout if they don't come in */
+static void crash_kexec_wait_realmode(int cpu)
+{
+ unsigned int msecs;
+ int i;
+
+ msecs = 10000;
+ for (i=0; i < NR_CPUS && msecs > 0; i++) {
+ if (i == cpu)
+ continue;
+
+ while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
+ barrier();
+ if (!cpu_possible(i)) {
+ break;
+ }
+ if (!cpu_online(i)) {
+ break;
+ }
+ msecs--;
+ mdelay(1);
+ }
+ }
+ mb();
+}
+
/*
* This function will be called by secondary cpus or by kexec cpu
* if soft-reset is activated to stop some CPUs.
@@ -347,10 +373,12 @@ int crash_shutdown_unregister(crash_shutdown_t handler)
EXPORT_SYMBOL(crash_shutdown_unregister);
static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
+static int crash_shutdown_cpu = -1;
static int handle_fault(struct pt_regs *regs)
{
- longjmp(crash_shutdown_buf, 1);
+ if (crash_shutdown_cpu == smp_processor_id())
+ longjmp(crash_shutdown_buf, 1);
return 0;
}
@@ -375,11 +403,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i);
+ if (!desc || !desc->chip || !desc->chip->eoi)
+ continue;
+
if (desc->status & IRQ_INPROGRESS)
desc->chip->eoi(i);
if (!(desc->status & IRQ_DISABLED))
- desc->chip->disable(i);
+ desc->chip->shutdown(i);
}
/*
@@ -388,6 +419,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/
old_handler = __debugger_fault_handler;
__debugger_fault_handler = handle_fault;
+ crash_shutdown_cpu = smp_processor_id();
for (i = 0; crash_shutdown_handles[i]; i++) {
if (setjmp(crash_shutdown_buf) == 0) {
/*
@@ -401,6 +433,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
asm volatile("sync; isync");
}
}
+ crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler;
/*
@@ -412,6 +445,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash);
crash_kexec_stop_spus();
+ crash_kexec_wait_realmode(crashing_cpu);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 59c928564a03..e7fe218b8697 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -1,7 +1,8 @@
/*
* Contains routines needed to support swiotlb for ppc.
*
- * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
+ * Author: Becky Bruce
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -38,8 +39,8 @@ struct dma_map_ops swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
- .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
- .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
@@ -70,7 +71,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
sd->max_direct_dma_addr = 0;
/* May need to bounce if the device can't address all of DRAM */
- if (dma_get_mask(dev) < lmb_end_of_DRAM())
+ if ((dma_get_mask(dev) + 1) < lmb_end_of_DRAM())
set_dma_ops(dev, &swiotlb_dma_ops);
return NOTIFY_DONE;
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 6c1df5757cd6..8d1de6f31d5a 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -127,11 +127,11 @@ static inline void dma_direct_sync_sg(struct device *dev,
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
-static inline void dma_direct_sync_single_range(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_direct_sync_single(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
- __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
+ __dma_sync(bus_to_virt(dma_handle), size, direction);
}
#endif
@@ -144,8 +144,8 @@ struct dma_map_ops dma_direct_ops = {
.map_page = dma_direct_map_page,
.unmap_page = dma_direct_unmap_page,
#ifdef CONFIG_NOT_COHERENT_CACHE
- .sync_single_range_for_cpu = dma_direct_sync_single_range,
- .sync_single_range_for_device = dma_direct_sync_single_range,
+ .sync_single_for_cpu = dma_direct_sync_single,
+ .sync_single_for_device = dma_direct_sync_single,
.sync_sg_for_cpu = dma_direct_sync_sg,
.sync_sg_for_device = dma_direct_sync_sg,
#endif
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 1175a8539e6c..ed4aeb96398b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -373,11 +373,13 @@ syscall_exit_cont:
bnel- load_dbcr0
#endif
#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
bne- 2f
1:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
#endif /* CONFIG_44x */
BEGIN_FTR_SECTION
lwarx r7,0,r1
@@ -848,6 +850,9 @@ resume_kernel:
/* interrupts are hard-disabled at this point */
restore:
#ifdef CONFIG_44x
+BEGIN_MMU_FTR_SECTION
+ b 1f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 07109d843787..42e9d908914a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,15 +556,6 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
2:
TRACE_AND_RESTORE_IRQ(r5);
-#ifdef CONFIG_PERF_EVENTS
- /* check paca->perf_event_pending if we're enabling ints */
- lbz r3,PACAPERFPEND(r13)
- and. r3,r3,r5
- beq 27f
- bl .perf_event_do_pending
-27:
-#endif /* CONFIG_PERF_EVENTS */
-
/* extract EE bit and use it to restore paca->hard_enabled */
ld r3,_MSR(r1)
rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e3be98ffe2a7..3e423fbad6bc 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -735,8 +735,11 @@ _STATIC(do_hash_page)
std r3,_DAR(r1)
std r4,_DSISR(r1)
- andis. r0,r4,0xa450 /* weird error? */
+ andis. r0,r4,0xa410 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
+ andis. r0,r4,DSISR_DABRMATCH@h
+ bne- handle_dabr_fault
+
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
bne- do_ste_alloc /* If so handle it */
@@ -823,6 +826,14 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
bl .raw_local_irq_restore
b 11f
+/* We have a data breakpoint exception - handle it */
+handle_dabr_fault:
+ ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_dabr
+ b .ret_from_except_lite
+
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
ENABLE_INTS
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e025e89fe93e..98c4b29a56f4 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -33,6 +33,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/bug.h>
+#include <asm/kvm_book3s_asm.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
@@ -303,6 +304,7 @@ __secondary_hold_acknowledge:
*/
#define EXCEPTION(n, label, hdlr, xfer) \
. = n; \
+ DO_KVM n; \
label: \
EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -358,6 +360,7 @@ i##n: \
* -- paulus.
*/
. = 0x200
+ DO_KVM 0x200
mtspr SPRN_SPRG_SCRATCH0,r10
mtspr SPRN_SPRG_SCRATCH1,r11
mfcr r10
@@ -381,6 +384,7 @@ i##n: \
/* Data access exception. */
. = 0x300
+ DO_KVM 0x300
DataAccess:
EXCEPTION_PROLOG
mfspr r10,SPRN_DSISR
@@ -397,6 +401,7 @@ DataAccess:
/* Instruction access exception. */
. = 0x400
+ DO_KVM 0x400
InstructionAccess:
EXCEPTION_PROLOG
andis. r0,r9,0x4000 /* no pte found? */
@@ -413,6 +418,7 @@ InstructionAccess:
/* Alignment exception */
. = 0x600
+ DO_KVM 0x600
Alignment:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
@@ -427,6 +433,7 @@ Alignment:
/* Floating-point unavailable */
. = 0x800
+ DO_KVM 0x800
FPUnavailable:
BEGIN_FTR_SECTION
/*
@@ -450,6 +457,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
/* System call */
. = 0xc00
+ DO_KVM 0xc00
SystemCall:
EXCEPTION_PROLOG
EXC_XFER_EE_LITE(0xc00, DoSyscall)
@@ -467,9 +475,11 @@ SystemCall:
* by executing an altivec instruction.
*/
. = 0xf00
+ DO_KVM 0xf00
b PerformanceMonitor
. = 0xf20
+ DO_KVM 0xf20
b AltiVecUnavailable
/*
@@ -882,6 +892,10 @@ __secondary_start:
RFI
#endif /* CONFIG_SMP */
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#include "../kvm/book3s_rmhandlers.S"
+#endif
+
/*
* Those generic dummy functions are kept for CPUs not
* included in CONFIG_6xx
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 711368b993f2..5ab484ef06a7 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/synch.h>
#include "head_booke.h"
@@ -69,165 +70,7 @@ _ENTRY(_start);
mr r27,r7
li r24,0 /* CPU number */
-/*
- * In case the firmware didn't do it, we apply some workarounds
- * that are good for all 440 core variants here
- */
- mfspr r3,SPRN_CCR0
- rlwinm r3,r3,0,0,27 /* disable icache prefetch */
- isync
- mtspr SPRN_CCR0,r3
- isync
- sync
-
-/*
- * Set up the initial MMU state
- *
- * We are still executing code at the virtual address
- * mappings set by the firmware for the base of RAM.
- *
- * We first invalidate all TLB entries but the one
- * we are running from. We then load the KERNELBASE
- * mappings so we can begin to use kernel addresses
- * natively and so the interrupt vector locations are
- * permanently pinned (necessary since Book E
- * implementations always have translation enabled).
- *
- * TODO: Use the known TLB entry we are running from to
- * determine which physical region we are located
- * in. This can be used to determine where in RAM
- * (on a shared CPU system) or PCI memory space
- * (on a DRAMless system) we are located.
- * For now, we assume a perfect world which means
- * we are located at the base of DRAM (physical 0).
- */
-
-/*
- * Search TLB for entry that we are currently using.
- * Invalidate all entries but the one we are using.
- */
- /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
- mfspr r3,SPRN_PID /* Get PID */
- mfmsr r4 /* Get MSR */
- andi. r4,r4,MSR_IS@l /* TS=1? */
- beq wmmucr /* If not, leave STS=0 */
- oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
-wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
- sync
-
- bl invstr /* Find our address */
-invstr: mflr r5 /* Make it accessible */
- tlbsx r23,0,r5 /* Find entry we are in */
- li r4,0 /* Start at TLB entry 0 */
- li r3,0 /* Set PAGEID inval value */
-1: cmpw r23,r4 /* Is this our entry? */
- beq skpinv /* If so, skip the inval */
- tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
-skpinv: addi r4,r4,1 /* Increment */
- cmpwi r4,64 /* Are we done? */
- bne 1b /* If not, repeat */
- isync /* If so, context change */
-
-/*
- * Configure and load pinned entry into TLB slot 63.
- */
-
- lis r3,PAGE_OFFSET@h
- ori r3,r3,PAGE_OFFSET@l
-
- /* Kernel is at the base of RAM */
- li r4, 0 /* Load the kernel physical address */
-
- /* Load the kernel PID = 0 */
- li r0,0
- mtspr SPRN_PID,r0
- sync
-
- /* Initialize MMUCR */
- li r5,0
- mtspr SPRN_MMUCR,r5
- sync
-
- /* pageid fields */
- clrrwi r3,r3,10 /* Mask off the effective page number */
- ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
-
- /* xlat fields */
- clrrwi r4,r4,10 /* Mask off the real page number */
- /* ERPN is 0 for first 4GB page */
-
- /* attrib fields */
- /* Added guarded bit to protect against speculative loads/stores */
- li r5,0
- ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
-
- li r0,63 /* TLB slot 63 */
-
- tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
- tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
- tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
-
- /* Force context change */
- mfmsr r0
- mtspr SPRN_SRR1, r0
- lis r0,3f@h
- ori r0,r0,3f@l
- mtspr SPRN_SRR0,r0
- sync
- rfi
-
- /* If necessary, invalidate original entry we used */
-3: cmpwi r23,63
- beq 4f
- li r6,0
- tlbwe r6,r23,PPC44x_TLB_PAGEID
- isync
-
-4:
-#ifdef CONFIG_PPC_EARLY_DEBUG_44x
- /* Add UART mapping for early debug. */
-
- /* pageid fields */
- lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
- ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
-
- /* xlat fields */
- lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
- ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
-
- /* attrib fields */
- li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
- li r0,62 /* TLB slot 0 */
-
- tlbwe r3,r0,PPC44x_TLB_PAGEID
- tlbwe r4,r0,PPC44x_TLB_XLAT
- tlbwe r5,r0,PPC44x_TLB_ATTRIB
-
- /* Force context change */
- isync
-#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
-
- /* Establish the interrupt vector offsets */
- SET_IVOR(0, CriticalInput);
- SET_IVOR(1, MachineCheck);
- SET_IVOR(2, DataStorage);
- SET_IVOR(3, InstructionStorage);
- SET_IVOR(4, ExternalInput);
- SET_IVOR(5, Alignment);
- SET_IVOR(6, Program);
- SET_IVOR(7, FloatingPointUnavailable);
- SET_IVOR(8, SystemCall);
- SET_IVOR(9, AuxillaryProcessorUnavailable);
- SET_IVOR(10, Decrementer);
- SET_IVOR(11, FixedIntervalTimer);
- SET_IVOR(12, WatchdogTimer);
- SET_IVOR(13, DataTLBError);
- SET_IVOR(14, InstructionTLBError);
- SET_IVOR(15, DebugCrit);
-
- /* Establish the interrupt vector base */
- lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
- mtspr SPRN_IVPR,r4
+ bl init_cpu_state
/*
* This is where the main kernel code starts.
@@ -349,7 +192,7 @@ interrupt_base:
#endif
/* Data TLB Error Interrupt */
- START_EXCEPTION(DataTLBError)
+ START_EXCEPTION(DataTLBError44x)
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mtspr SPRN_SPRG_WSCRATCH1, r11
mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -440,7 +283,7 @@ tlb_44x_patch_hwater_D:
mfspr r10,SPRN_DEAR
/* Jump to common tlb load */
- b finish_tlb_load
+ b finish_tlb_load_44x
2:
/* The bailout. Restore registers to pre-exception conditions
@@ -460,7 +303,7 @@ tlb_44x_patch_hwater_D:
* information from different registers and bailout
* to a different point.
*/
- START_EXCEPTION(InstructionTLBError)
+ START_EXCEPTION(InstructionTLBError44x)
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mtspr SPRN_SPRG_WSCRATCH1, r11
mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -536,7 +379,7 @@ tlb_44x_patch_hwater_I:
mfspr r10,SPRN_SRR0
/* Jump to common TLB load point */
- b finish_tlb_load
+ b finish_tlb_load_44x
2:
/* The bailout. Restore registers to pre-exception conditions
@@ -550,15 +393,7 @@ tlb_44x_patch_hwater_I:
mfspr r10, SPRN_SPRG_RSCRATCH0
b InstructionStorage
- /* Debug Interrupt */
- DEBUG_CRIT_EXCEPTION
-
-/*
- * Local functions
- */
-
/*
-
* Both the instruction and data TLB miss get to this
* point to load the TLB.
* r10 - EA of fault
@@ -568,7 +403,7 @@ tlb_44x_patch_hwater_I:
* MMUCR - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
*/
-finish_tlb_load:
+finish_tlb_load_44x:
/* Combine RPN & ERPN an write WS 0 */
rlwimi r11,r12,0,0,31-PAGE_SHIFT
tlbwe r11,r13,PPC44x_TLB_XLAT
@@ -601,6 +436,227 @@ finish_tlb_load:
mfspr r10, SPRN_SPRG_RSCRATCH0
rfi /* Force context change */
+/* TLB error interrupts for 476
+ */
+#ifdef CONFIG_PPC_47x
+ START_EXCEPTION(DataTLBError47x)
+ mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
+ mtspr SPRN_SPRG_WSCRATCH1,r11
+ mtspr SPRN_SPRG_WSCRATCH2,r12
+ mtspr SPRN_SPRG_WSCRATCH3,r13
+ mfcr r11
+ mtspr SPRN_SPRG_WSCRATCH4,r11
+ mfspr r10,SPRN_DEAR /* Get faulting address */
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ lis r11,PAGE_OFFSET@h
+ cmplw cr0,r10,r11
+ blt+ 3f
+ lis r11,swapper_pg_dir@h
+ ori r11,r11, swapper_pg_dir@l
+ li r12,0 /* MMUCR = 0 */
+ b 4f
+
+ /* Get the PGD for the current thread and setup MMUCR */
+3: mfspr r11,SPRN_SPRG3
+ lwz r11,PGDIR(r11)
+ mfspr r12,SPRN_PID /* Get PID */
+4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
+
+ /* Mask of required permission bits. Note that while we
+ * do copy ESR:ST to _PAGE_RW position as trying to write
+ * to an RO page is pretty common, we don't do it with
+ * _PAGE_DIRTY. We could do it, but it's a fairly rare
+ * event so I'd rather take the overhead when it happens
+ * rather than adding an instruction here. We should measure
+ * whether the whole thing is worth it in the first place
+ * as we could avoid loading SPRN_ESR completely in the first
+ * place...
+ *
+ * TODO: Is it worth doing that mfspr & rlwimi in the first
+ * place or can we save a couple of instructions here ?
+ */
+ mfspr r12,SPRN_ESR
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ rlwimi r13,r12,10,30,30
+
+ /* Load the PTE */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+ lwzx r11,r12,r11 /* Get pgd/pmd entry */
+
+ /* Word 0 is EPN,V,TS,DSIZ */
+ li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+ rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
+ li r12,0
+ tlbwe r10,r12,0
+
+ /* XXX can we do better ? Need to make sure tlbwe has established
+ * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+ isync
+#endif
+
+ rlwinm. r12,r11,0,0,20 /* Extract pt base address */
+ /* Compute pte address */
+ rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+ beq 2f /* Bail if no table */
+ lwz r11,0(r12) /* Get high word of pte entry */
+
+ /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+ * bottom of r12 to create a data dependency... We can also use r10
+ * as destination nowadays
+ */
+#ifdef CONFIG_SMP
+ lwsync
+#endif
+ lwz r12,4(r12) /* Get low word of pte entry */
+
+ andc. r13,r13,r12 /* Check permission */
+
+ /* Jump to common tlb load */
+ beq finish_tlb_load_47x
+
+2: /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mfspr r11,SPRN_SPRG_RSCRATCH4
+ mtcr r11
+ mfspr r13,SPRN_SPRG_RSCRATCH3
+ mfspr r12,SPRN_SPRG_RSCRATCH2
+ mfspr r11,SPRN_SPRG_RSCRATCH1
+ mfspr r10,SPRN_SPRG_RSCRATCH0
+ b DataStorage
+
+ /* Instruction TLB Error Interrupt */
+ /*
+ * Nearly the same as above, except we get our
+ * information from different registers and bailout
+ * to a different point.
+ */
+ START_EXCEPTION(InstructionTLBError47x)
+ mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
+ mtspr SPRN_SPRG_WSCRATCH1,r11
+ mtspr SPRN_SPRG_WSCRATCH2,r12
+ mtspr SPRN_SPRG_WSCRATCH3,r13
+ mfcr r11
+ mtspr SPRN_SPRG_WSCRATCH4,r11
+ mfspr r10,SPRN_SRR0 /* Get faulting address */
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ lis r11,PAGE_OFFSET@h
+ cmplw cr0,r10,r11
+ blt+ 3f
+ lis r11,swapper_pg_dir@h
+ ori r11,r11, swapper_pg_dir@l
+ li r12,0 /* MMUCR = 0 */
+ b 4f
+
+ /* Get the PGD for the current thread and setup MMUCR */
+3: mfspr r11,SPRN_SPRG_THREAD
+ lwz r11,PGDIR(r11)
+ mfspr r12,SPRN_PID /* Get PID */
+4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
+
+ /* Make up the required permissions */
+ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+
+ /* Load PTE */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
+ lwzx r11,r12,r11 /* Get pgd/pmd entry */
+
+ /* Word 0 is EPN,V,TS,DSIZ */
+ li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
+ rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
+ li r12,0
+ tlbwe r10,r12,0
+
+ /* XXX can we do better ? Need to make sure tlbwe has established
+ * latch V bit in MMUCR0 before the PTE is loaded further down */
+#ifdef CONFIG_SMP
+ isync
+#endif
+
+ rlwinm. r12,r11,0,0,20 /* Extract pt base address */
+ /* Compute pte address */
+ rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
+ beq 2f /* Bail if no table */
+
+ lwz r11,0(r12) /* Get high word of pte entry */
+ /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
+ * bottom of r12 to create a data dependency... We can also use r10
+ * as destination nowadays
+ */
+#ifdef CONFIG_SMP
+ lwsync
+#endif
+ lwz r12,4(r12) /* Get low word of pte entry */
+
+ andc. r13,r13,r12 /* Check permission */
+
+ /* Jump to common TLB load point */
+ beq finish_tlb_load_47x
+
+2: /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mfspr r11, SPRN_SPRG_RSCRATCH4
+ mtcr r11
+ mfspr r13, SPRN_SPRG_RSCRATCH3
+ mfspr r12, SPRN_SPRG_RSCRATCH2
+ mfspr r11, SPRN_SPRG_RSCRATCH1
+ mfspr r10, SPRN_SPRG_RSCRATCH0
+ b InstructionStorage
+
+/*
+ * Both the instruction and data TLB miss get to this
+ * point to load the TLB.
+ * r10 - free to use
+ * r11 - PTE high word value
+ * r12 - PTE low word value
+ * r13 - free to use
+ * MMUCR - loaded with proper value when we get here
+ * Upon exit, we reload everything and RFI.
+ */
+finish_tlb_load_47x:
+ /* Combine RPN & ERPN an write WS 1 */
+ rlwimi r11,r12,0,0,31-PAGE_SHIFT
+ tlbwe r11,r13,1
+
+ /* And make up word 2 */
+ li r10,0xf85 /* Mask to apply from PTE */
+ rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
+ and r11,r12,r10 /* Mask PTE bits to keep */
+ andi. r10,r12,_PAGE_USER /* User page ? */
+ beq 1f /* nope, leave U bits empty */
+ rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+1: tlbwe r11,r13,2
+
+ /* Done...restore registers and get out of here.
+ */
+ mfspr r11, SPRN_SPRG_RSCRATCH4
+ mtcr r11
+ mfspr r13, SPRN_SPRG_RSCRATCH3
+ mfspr r12, SPRN_SPRG_RSCRATCH2
+ mfspr r11, SPRN_SPRG_RSCRATCH1
+ mfspr r10, SPRN_SPRG_RSCRATCH0
+ rfi
+
+#endif /* CONFIG_PPC_47x */
+
+ /* Debug Interrupt */
+ /*
+ * This statement needs to exist at the end of the IVPR
+ * definition just in case you end up taking a debug
+ * exception within another exception.
+ */
+ DEBUG_CRIT_EXCEPTION
+
/*
* Global functions
*/
@@ -647,6 +703,428 @@ _GLOBAL(set_context)
blr
/*
+ * Init CPU state. This is called at boot time or for secondary CPUs
+ * to setup initial TLB entries, setup IVORs, etc...
+ *
+ */
+_GLOBAL(init_cpu_state)
+ mflr r22
+#ifdef CONFIG_PPC_47x
+ /* We use the PVR to differenciate 44x cores from 476 */
+ mfspr r3,SPRN_PVR
+ srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476@h
+ beq head_start_47x
+ cmplwi cr0,r3,PVR_476_ISS@h
+ beq head_start_47x
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * In case the firmware didn't do it, we apply some workarounds
+ * that are good for all 440 core variants here
+ */
+ mfspr r3,SPRN_CCR0
+ rlwinm r3,r3,0,0,27 /* disable icache prefetch */
+ isync
+ mtspr SPRN_CCR0,r3
+ isync
+ sync
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ *
+ * We first invalidate all TLB entries but the one
+ * we are running from. We then load the KERNELBASE
+ * mappings so we can begin to use kernel addresses
+ * natively and so the interrupt vector locations are
+ * permanently pinned (necessary since Book E
+ * implementations always have translation enabled).
+ *
+ * TODO: Use the known TLB entry we are running from to
+ * determine which physical region we are located
+ * in. This can be used to determine where in RAM
+ * (on a shared CPU system) or PCI memory space
+ * (on a DRAMless system) we are located.
+ * For now, we assume a perfect world which means
+ * we are located at the base of DRAM (physical 0).
+ */
+
+/*
+ * Search TLB for entry that we are currently using.
+ * Invalidate all entries but the one we are using.
+ */
+ /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+ mfspr r3,SPRN_PID /* Get PID */
+ mfmsr r4 /* Get MSR */
+ andi. r4,r4,MSR_IS@l /* TS=1? */
+ beq wmmucr /* If not, leave STS=0 */
+ oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
+wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
+ sync
+
+ bl invstr /* Find our address */
+invstr: mflr r5 /* Make it accessible */
+ tlbsx r23,0,r5 /* Find entry we are in */
+ li r4,0 /* Start at TLB entry 0 */
+ li r3,0 /* Set PAGEID inval value */
+1: cmpw r23,r4 /* Is this our entry? */
+ beq skpinv /* If so, skip the inval */
+ tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
+skpinv: addi r4,r4,1 /* Increment */
+ cmpwi r4,64 /* Are we done? */
+ bne 1b /* If not, repeat */
+ isync /* If so, context change */
+
+/*
+ * Configure and load pinned entry into TLB slot 63.
+ */
+
+ lis r3,PAGE_OFFSET@h
+ ori r3,r3,PAGE_OFFSET@l
+
+ /* Kernel is at the base of RAM */
+ li r4, 0 /* Load the kernel physical address */
+
+ /* Load the kernel PID = 0 */
+ li r0,0
+ mtspr SPRN_PID,r0
+ sync
+
+ /* Initialize MMUCR */
+ li r5,0
+ mtspr SPRN_MMUCR,r5
+ sync
+
+ /* pageid fields */
+ clrrwi r3,r3,10 /* Mask off the effective page number */
+ ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
+
+ /* xlat fields */
+ clrrwi r4,r4,10 /* Mask off the real page number */
+ /* ERPN is 0 for first 4GB page */
+
+ /* attrib fields */
+ /* Added guarded bit to protect against speculative loads/stores */
+ li r5,0
+ ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+
+ li r0,63 /* TLB slot 63 */
+
+ tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
+ tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
+ tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
+
+ /* Force context change */
+ mfmsr r0
+ mtspr SPRN_SRR1, r0
+ lis r0,3f@h
+ ori r0,r0,3f@l
+ mtspr SPRN_SRR0,r0
+ sync
+ rfi
+
+ /* If necessary, invalidate original entry we used */
+3: cmpwi r23,63
+ beq 4f
+ li r6,0
+ tlbwe r6,r23,PPC44x_TLB_PAGEID
+ isync
+
+4:
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+ /* Add UART mapping for early debug. */
+
+ /* pageid fields */
+ lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+ ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
+
+ /* xlat fields */
+ lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+ ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+ /* attrib fields */
+ li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
+ li r0,62 /* TLB slot 0 */
+
+ tlbwe r3,r0,PPC44x_TLB_PAGEID
+ tlbwe r4,r0,PPC44x_TLB_XLAT
+ tlbwe r5,r0,PPC44x_TLB_ATTRIB
+
+ /* Force context change */
+ isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+ /* Establish the interrupt vector offsets */
+ SET_IVOR(0, CriticalInput);
+ SET_IVOR(1, MachineCheck);
+ SET_IVOR(2, DataStorage);
+ SET_IVOR(3, InstructionStorage);
+ SET_IVOR(4, ExternalInput);
+ SET_IVOR(5, Alignment);
+ SET_IVOR(6, Program);
+ SET_IVOR(7, FloatingPointUnavailable);
+ SET_IVOR(8, SystemCall);
+ SET_IVOR(9, AuxillaryProcessorUnavailable);
+ SET_IVOR(10, Decrementer);
+ SET_IVOR(11, FixedIntervalTimer);
+ SET_IVOR(12, WatchdogTimer);
+ SET_IVOR(13, DataTLBError44x);
+ SET_IVOR(14, InstructionTLBError44x);
+ SET_IVOR(15, DebugCrit);
+
+ b head_start_common
+
+
+#ifdef CONFIG_PPC_47x
+
+#ifdef CONFIG_SMP
+
+/* Entry point for secondary 47x processors */
+_GLOBAL(start_secondary_47x)
+ mr r24,r3 /* CPU number */
+
+ bl init_cpu_state
+
+ /* Now we need to bolt the rest of kernel memory which
+ * is done in C code. We must be careful because our task
+ * struct or our stack can (and will probably) be out
+ * of reach of the initial 256M TLB entry, so we use a
+ * small temporary stack in .bss for that. This works
+ * because only one CPU at a time can be in this code
+ */
+ lis r1,temp_boot_stack@h
+ ori r1,r1,temp_boot_stack@l
+ addi r1,r1,1024-STACK_FRAME_OVERHEAD
+ li r0,0
+ stw r0,0(r1)
+ bl mmu_init_secondary
+
+ /* Now we can get our task struct and real stack pointer */
+
+ /* Get current_thread_info and current */
+ lis r1,secondary_ti@ha
+ lwz r1,secondary_ti@l(r1)
+ lwz r2,TI_TASK(r1)
+
+ /* Current stack pointer */
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ li r0,0
+ stw r0,0(r1)
+
+ /* Kernel stack for exception entry in SPRG3 */
+ addi r4,r2,THREAD /* init task's THREAD */
+ mtspr SPRN_SPRG3,r4
+
+ b start_secondary
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Set up the initial MMU state for 44x
+ *
+ * We are still executing code at the virtual address
+ * mappings set by the firmware for the base of RAM.
+ */
+
+head_start_47x:
+ /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
+ mfspr r3,SPRN_PID /* Get PID */
+ mfmsr r4 /* Get MSR */
+ andi. r4,r4,MSR_IS@l /* TS=1? */
+ beq 1f /* If not, leave STS=0 */
+ oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
+1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
+ sync
+
+ /* Find the entry we are running from */
+ bl 1f
+1: mflr r23
+ tlbsx r23,0,r23
+ tlbre r24,r23,0
+ tlbre r25,r23,1
+ tlbre r26,r23,2
+
+/*
+ * Cleanup time
+ */
+
+ /* Initialize MMUCR */
+ li r5,0
+ mtspr SPRN_MMUCR,r5
+ sync
+
+clear_all_utlb_entries:
+
+ #; Set initial values.
+
+ addis r3,0,0x8000
+ addi r4,0,0
+ addi r5,0,0
+ b clear_utlb_entry
+
+ #; Align the loop to speed things up.
+
+ .align 6
+
+clear_utlb_entry:
+
+ tlbwe r4,r3,0
+ tlbwe r5,r3,1
+ tlbwe r5,r3,2
+ addis r3,r3,0x2000
+ cmpwi r3,0
+ bne clear_utlb_entry
+ addis r3,0,0x8000
+ addis r4,r4,0x100
+ cmpwi r4,0
+ bne clear_utlb_entry
+
+ #; Restore original entry.
+
+ oris r23,r23,0x8000 /* specify the way */
+ tlbwe r24,r23,0
+ tlbwe r25,r23,1
+ tlbwe r26,r23,2
+
+/*
+ * Configure and load pinned entry into TLB for the kernel core
+ */
+
+ lis r3,PAGE_OFFSET@h
+ ori r3,r3,PAGE_OFFSET@l
+
+ /* Kernel is at the base of RAM */
+ li r4, 0 /* Load the kernel physical address */
+
+ /* Load the kernel PID = 0 */
+ li r0,0
+ mtspr SPRN_PID,r0
+ sync
+
+ /* Word 0 */
+ clrrwi r3,r3,12 /* Mask off the effective page number */
+ ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
+
+ /* Word 1 */
+ clrrwi r4,r4,12 /* Mask off the real page number */
+ /* ERPN is 0 for first 4GB page */
+ /* Word 2 */
+ li r5,0
+ ori r5,r5,PPC47x_TLB2_S_RWX
+#ifdef CONFIG_SMP
+ ori r5,r5,PPC47x_TLB2_M
+#endif
+
+ /* We write to way 0 and bolted 0 */
+ lis r0,0x8800
+ tlbwe r3,r0,0
+ tlbwe r4,r0,1
+ tlbwe r5,r0,2
+
+/*
+ * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
+ * them up later
+ */
+ LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
+ mtspr SPRN_SSPCR,r3
+ mtspr SPRN_USPCR,r3
+ LOAD_REG_IMMEDIATE(r3, 0x12345670)
+ mtspr SPRN_ISPCR,r3
+
+ /* Force context change */
+ mfmsr r0
+ mtspr SPRN_SRR1, r0
+ lis r0,3f@h
+ ori r0,r0,3f@l
+ mtspr SPRN_SRR0,r0
+ sync
+ rfi
+
+ /* Invalidate original entry we used */
+3:
+ rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
+ tlbwe r24,r23,0
+ addi r24,0,0
+ tlbwe r24,r23,1
+ tlbwe r24,r23,2
+ isync /* Clear out the shadow TLB entries */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
+ /* Add UART mapping for early debug. */
+
+ /* Word 0 */
+ lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
+ ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
+
+ /* Word 1 */
+ lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
+ ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
+
+ /* Word 2 */
+ li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
+
+ /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
+ * congruence class as the kernel, we need to make sure of it at
+ * some point
+ */
+ lis r0,0x8d00
+ tlbwe r3,r0,0
+ tlbwe r4,r0,1
+ tlbwe r5,r0,2
+
+ /* Force context change */
+ isync
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+ /* Establish the interrupt vector offsets */
+ SET_IVOR(0, CriticalInput);
+ SET_IVOR(1, MachineCheckA);
+ SET_IVOR(2, DataStorage);
+ SET_IVOR(3, InstructionStorage);
+ SET_IVOR(4, ExternalInput);
+ SET_IVOR(5, Alignment);
+ SET_IVOR(6, Program);
+ SET_IVOR(7, FloatingPointUnavailable);
+ SET_IVOR(8, SystemCall);
+ SET_IVOR(9, AuxillaryProcessorUnavailable);
+ SET_IVOR(10, Decrementer);
+ SET_IVOR(11, FixedIntervalTimer);
+ SET_IVOR(12, WatchdogTimer);
+ SET_IVOR(13, DataTLBError47x);
+ SET_IVOR(14, InstructionTLBError47x);
+ SET_IVOR(15, DebugCrit);
+
+ /* We configure icbi to invalidate 128 bytes at a time since the
+ * current 32-bit kernel code isn't too happy with icache != dcache
+ * block size
+ */
+ mfspr r3,SPRN_CCR0
+ oris r3,r3,0x0020
+ mtspr SPRN_CCR0,r3
+ isync
+
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * Here we are back to code that is common between 44x and 47x
+ *
+ * We proceed to further kernel initialization and return to the
+ * main kernel entry
+ */
+head_start_common:
+ /* Establish the interrupt vector base */
+ lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
+ mtspr SPRN_IVPR,r4
+
+ addis r22,r22,KERNELBASE@h
+ mtlr r22
+ isync
+ blr
+
+/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
@@ -671,3 +1149,9 @@ swapper_pg_dir:
*/
abatron_pteptrs:
.space 8
+
+#ifdef CONFIG_SMP
+ .align 12
+temp_boot_stack:
+ .space 1024
+#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index bed9a29ee383..844a44b64472 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -37,7 +37,7 @@
#include <asm/firmware.h>
#include <asm/page_64.h>
#include <asm/irqflags.h>
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
/* The physical memory is layed out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -169,7 +169,7 @@ exception_marker:
/* KVM trampoline code needs to be close to the interrupt handlers */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#include "../kvm/book3s_64_rmhandlers.S"
+#include "../kvm/book3s_rmhandlers.S"
#endif
_GLOBAL(generic_secondary_thread_init)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3ef743fa5d7c..1f1a04b5c2a4 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -71,9 +71,6 @@ _ENTRY(_start);
* in the first level table, but that would require many changes to the
* Linux page directory/table functions that I don't want to do right now.
*
- * I used to use SPRG2 for a temporary register in the TLB handler, but it
- * has since been put to other uses. I now use a hack to save a register
- * and the CCR at memory location 0.....Someday I'll fix this.....
* -- Dan
*/
.globl __start
@@ -302,8 +299,13 @@ InstructionTLBMiss:
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
+#ifdef CONFIG_8xx_CPU6
stw r10, 0(r0)
stw r11, 4(r0)
+#else
+ mtspr SPRN_DAR, r10
+ mtspr SPRN_SPRG2, r11
+#endif
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
#ifdef CONFIG_8xx_CPU15
addi r11, r10, 0x1000
@@ -318,12 +320,16 @@ InstructionTLBMiss:
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
+#ifdef CONFIG_MODULES
+ /* Only modules will cause ITLB Misses as we always
+ * pin the first 8MB of kernel memory */
andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
beq 3f
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
rlwimi r10, r11, 0, 2, 19
3:
+#endif
lwz r11, 0(r10) /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
@@ -339,31 +345,35 @@ InstructionTLBMiss:
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
+#ifdef CONFIG_SWAP
andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
bne- cr0, 2f
-
- /* Clear PP lsb, 0x400 */
- rlwinm r10, r10, 0, 22, 20
-
+#endif
/* The Linux PTE won't go exactly into the MMU TLB.
- * Software indicator bits 22 and 28 must be clear.
+ * Software indicator bits 21 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
li r11, 0x00f0
- rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
+ rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */
DO_8xx_CPU6(0x2d80, r3)
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
- mfspr r10, SPRN_M_TW /* Restore registers */
+ /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+ mfspr r10, SPRN_DAR
+ mtcr r10
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r11, SPRN_SPRG2
+#else
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
+ mfspr r10, SPRN_M_TW
rfi
2:
mfspr r11, SPRN_SRR1
@@ -373,13 +383,20 @@ InstructionTLBMiss:
rlwinm r11, r11, 0, 0xffff
mtspr SPRN_SRR1, r11
- mfspr r10, SPRN_M_TW /* Restore registers */
+ /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+ mfspr r10, SPRN_DAR
+ mtcr r10
+ li r11, 0x00f0
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r11, SPRN_SPRG2
+#else
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
+ mfspr r10, SPRN_M_TW
b InstructionAccess
. = 0x1200
@@ -390,8 +407,13 @@ DataStoreTLBMiss:
DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10
+#ifdef CONFIG_8xx_CPU6
stw r10, 0(r0)
stw r11, 4(r0)
+#else
+ mtspr SPRN_DAR, r10
+ mtspr SPRN_SPRG2, r11
+#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
@@ -438,15 +460,14 @@ DataStoreTLBMiss:
* r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
* r10 = (r10 & ~PRESENT) | r11;
*/
+#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
-
+#endif
/* Honour kernel RO, User NA */
/* 0x200 == Extended encoding, bit 22 */
- /* r11 = (r10 & _PAGE_USER) >> 2 */
- rlwinm r11, r10, 32-2, 0x200
- or r10, r11, r10
+ rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
/* r11 = (r10 & _PAGE_RW) >> 1 */
rlwinm r11, r10, 32-1, 0x200
or r10, r11, r10
@@ -460,18 +481,24 @@ DataStoreTLBMiss:
* of the MMU.
*/
2: li r11, 0x00f0
- mtspr SPRN_DAR,r11 /* Tag DAR */
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
- mfspr r10, SPRN_M_TW /* Restore registers */
+ /* Restore registers */
+#ifndef CONFIG_8xx_CPU6
+ mfspr r10, SPRN_DAR
+ mtcr r10
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r11, SPRN_SPRG2
+#else
+ mtspr SPRN_DAR, r11 /* Tag DAR */
lwz r11, 0(r0)
mtcr r11
lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
+ mfspr r10, SPRN_M_TW
rfi
/* This is an instruction TLB error on the MPC8xx. This could be due
@@ -683,9 +710,6 @@ start_here:
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG_THREAD,r4
- li r3,0
- /* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */
- mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
/* stack */
lis r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 50504ae39cb7..a0bf158c8b47 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -1,6 +1,7 @@
#ifndef __HEAD_BOOKE_H__
#define __HEAD_BOOKE_H__
+#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
/*
* Macros used for common Book-e exception handling
*/
@@ -48,6 +49,9 @@
stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
+ lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
+ addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
+ stw r10, 8(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 725526547994..edd4a57fd29e 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -639,6 +639,13 @@ interrupt_base:
rlwinm r12,r12,0,16,1
mtspr SPRN_MAS1,r12
+ /* Make up the required permissions for kernel code */
+#ifdef CONFIG_PTE_64BIT
+ li r13,_PAGE_PRESENT | _PAGE_BAP_SX
+ oris r13,r13,_PAGE_ACCESSED@h
+#else
+ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+#endif
b 4f
/* Get the PGD for the current thread */
@@ -646,15 +653,15 @@ interrupt_base:
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
-4:
- /* Make up the required permissions */
+ /* Make up the required permissions for user code */
#ifdef CONFIG_PTE_64BIT
- li r13,_PAGE_PRESENT | _PAGE_EXEC
+ li r13,_PAGE_PRESENT | _PAGE_BAP_UX
oris r13,r13,_PAGE_ACCESSED@h
#else
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
#endif
+4:
FIND_PTE
andc. r13,r13,r11 /* Check permission */
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 71cf280da184..21266abfbda6 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -140,14 +140,14 @@ static struct dma_map_ops ibmebus_dma_ops = {
static int ibmebus_match_path(struct device *dev, void *data)
{
- struct device_node *dn = to_of_device(dev)->node;
+ struct device_node *dn = to_of_device(dev)->dev.of_node;
return (dn->full_name &&
(strcasecmp((char *)data, dn->full_name) == 0));
}
static int ibmebus_match_node(struct device *dev, void *data)
{
- return to_of_device(dev)->node == data;
+ return to_of_device(dev)->dev.of_node == data;
}
static int ibmebus_create_device(struct device_node *dn)
@@ -202,7 +202,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
int ibmebus_register_driver(struct of_platform_driver *drv)
{
/* If the driver uses devices that ibmebus doesn't know, add them */
- ibmebus_create_devices(drv->match_table);
+ ibmebus_create_devices(drv->driver.of_match_table);
return of_register_driver(drv, &ibmebus_bus_type);
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ec94f906ea43..d5839179ec77 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -43,20 +43,9 @@
#define DBG(...)
static int novmerge;
-static int protect4gb = 1;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
-static int __init setup_protect4gb(char *str)
-{
- if (strcmp(str, "on") == 0)
- protect4gb = 1;
- else if (strcmp(str, "off") == 0)
- protect4gb = 0;
-
- return 1;
-}
-
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
@@ -66,7 +55,6 @@ static int __init setup_iommu(char *str)
return 1;
}
-__setup("protect4gb=", setup_protect4gb);
__setup("iommu=", setup_iommu);
static unsigned long iommu_range_alloc(struct device *dev,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 64f6f2031c22..30817d9b20cb 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,6 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
-#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -145,11 +144,6 @@ notrace void raw_local_irq_restore(unsigned long en)
}
#endif /* CONFIG_PPC_STD_MMU_64 */
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
- }
-
/*
* if (get_paca()->hard_enabled) return;
* But again we need to take care that gcc gets hard_enabled directly
@@ -290,30 +284,33 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(cpumask_t map)
+void fixup_irqs(const struct cpumask *map)
{
struct irq_desc *desc;
unsigned int irq;
static int warned;
+ cpumask_var_t mask;
- for_each_irq(irq) {
- cpumask_t mask;
+ alloc_cpumask_var(&mask, GFP_KERNEL);
+ for_each_irq(irq) {
desc = irq_to_desc(irq);
if (desc && desc->status & IRQ_PER_CPU)
continue;
- cpumask_and(&mask, desc->affinity, &map);
- if (any_online_cpu(mask) == NR_CPUS) {
+ cpumask_and(mask, desc->affinity, map);
+ if (cpumask_any(mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
- mask = map;
+ cpumask_copy(mask, map);
}
if (desc->chip->set_affinity)
- desc->chip->set_affinity(irq, &mask);
+ desc->chip->set_affinity(irq, mask);
else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
+ free_cpumask_var(mask);
+
local_irq_enable();
mdelay(1);
local_irq_disable();
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 41bada0298c8..82a7b228c81a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -20,6 +20,7 @@
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/ptrace.h>
+#include <linux/kdebug.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/machdep.h>
@@ -115,7 +116,8 @@ void kgdb_roundup_cpus(unsigned long flags)
/* KGDB functions to use existing PowerPC64 hooks. */
static int kgdb_debugger(struct pt_regs *regs)
{
- return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
+ return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
+ DIE_OOPS, regs);
}
static int kgdb_handle_breakpoint(struct pt_regs *regs)
@@ -123,7 +125,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
if (user_mode(regs))
return 0;
- if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+ if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
@@ -309,6 +311,11 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
(unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->nip = pc;
+}
+
/*
* This function does PowerPC specific procesing for interfacing to gdb.
*/
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index b36f074524ad..c533525ca56a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -114,6 +114,9 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
regs->msr &= ~MSR_CE;
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#ifdef CONFIG_PPC_47x
+ isync();
+#endif
#endif
/*
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index c2c70e1b32cd..50362b6ef6e9 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -38,7 +38,7 @@
#include <asm/vio.h>
#include <asm/mmu.h>
-#define MODULE_VERS "1.8"
+#define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg"
/* #define LPARCFG_DEBUG */
@@ -487,6 +487,14 @@ static void splpar_dispatch_data(struct seq_file *m)
seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
}
+static void parse_em_data(struct seq_file *m)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+}
+
static int pseries_lparcfg_data(struct seq_file *m, void *v)
{
int partition_potential_processors;
@@ -541,6 +549,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+ parse_em_data(m);
+
return 0;
}
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 040bd1de8d99..26f9900f773c 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -155,33 +155,38 @@ void kexec_copy_flush(struct kimage *image)
#ifdef CONFIG_SMP
-/* FIXME: we should schedule this function to be called on all cpus based
- * on calling the interrupts, but we would like to call it off irq level
- * so that the interrupt controller is clean.
- */
+static int kexec_all_irq_disabled = 0;
+
static void kexec_smp_down(void *arg)
{
+ local_irq_disable();
+ mb(); /* make sure our irqs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ while(kexec_all_irq_disabled == 0)
+ cpu_relax();
+ mb(); /* make sure all irqs are disabled before this */
+ /*
+ * Now every CPU has IRQs off, we can clear out any pending
+ * IPIs and be sure that no more will come in after this.
+ */
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 1);
- local_irq_disable();
kexec_smp_wait();
/* NOTREACHED */
}
-static void kexec_prepare_cpus(void)
+static void kexec_prepare_cpus_wait(int wait_state)
{
int my_cpu, i, notified=-1;
- smp_call_function(kexec_smp_down, NULL, /* wait */0);
my_cpu = get_cpu();
-
- /* check the others cpus are now down (via paca hw cpu id == -1) */
+ /* Make sure each CPU has atleast made it to the state we need */
for (i=0; i < NR_CPUS; i++) {
if (i == my_cpu)
continue;
- while (paca[i].hw_cpu_id != -1) {
+ while (paca[i].kexec_state < wait_state) {
barrier();
if (!cpu_possible(i)) {
printk("kexec: cpu %d hw_cpu_id %d is not"
@@ -201,20 +206,35 @@ static void kexec_prepare_cpus(void)
}
if (i != notified) {
printk( "kexec: waiting for cpu %d (physical"
- " %d) to go down\n",
- i, paca[i].hw_cpu_id);
+ " %d) to enter %i state\n",
+ i, paca[i].hw_cpu_id, wait_state);
notified = i;
}
}
}
+ mb();
+}
+
+static void kexec_prepare_cpus(void)
+{
+
+ smp_call_function(kexec_smp_down, NULL, /* wait */0);
+ local_irq_disable();
+ mb(); /* make sure IRQs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+
+ kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
+ /* we are sure every CPU has IRQs off at this point */
+ kexec_all_irq_disabled = 1;
/* after we tell the others to go down */
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
- put_cpu();
+ /* Before removing MMU mapings make sure all CPUs have entered real mode */
+ kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
- local_irq_disable();
+ put_cpu();
}
#else /* ! SMP */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8649f536f8df..8043d1b73cf0 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
-#ifndef CONFIG_44x
+#ifdef CONFIG_44x
/* We don't flush the icache on 44x. Those have a virtual icache
* and we don't have access to the virtual address here (it's
* not the page vaddr but where it's mapped in user space). The
@@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
* a change in the address space occurs, before returning to
* user space
*/
+BEGIN_MMU_FTR_SECTION
+ blr
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
+#endif /* CONFIG_44x */
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
-#endif /* CONFIG_44x */
blr
+#ifndef CONFIG_BOOKE
/*
* Flush a particular page from the data cache to RAM, identified
* by its physical address. We turn off the MMU so we can just use
@@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mtmsr r10 /* restore DR */
isync
blr
+#endif /* CONFIG_BOOKE */
/*
* Clear pages using the dcbz instruction, which doesn't cause any
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index a5cf9c1356a6..a2b18dffa03e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -24,6 +24,7 @@
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
+#include <asm/kexec.h>
.text
@@ -471,6 +472,10 @@ _GLOBAL(kexec_wait)
1: mflr r5
addi r5,r5,kexec_flag-1b
+ li r4,KEXEC_STATE_REAL_MODE
+ stb r4,PACAKEXECSTATE(r13)
+ SYNC
+
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
@@ -494,14 +499,11 @@ kexec_flag:
* note: this is a terminal routine, it does not save lr
*
* get phys id from paca
- * set paca id to -1 to say we got here
* switch to real mode
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
- li r4,-1
- sth r4,PACAHWCPUID(r13) /* let others know we left */
bl real_mode
b .kexec_wait
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index a359cb08e900..df78e0236a02 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -13,7 +13,7 @@
static void of_device_make_bus_id(struct of_device *dev)
{
static atomic_t bus_no_reg_magic;
- struct device_node *node = dev->node;
+ struct device_node *node = dev->dev.of_node;
const u32 *reg;
u64 addr;
int magic;
@@ -69,11 +69,10 @@ struct of_device *of_device_alloc(struct device_node *np,
if (!dev)
return NULL;
- dev->node = of_node_get(np);
- dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.of_node = of_node_get(np);
+ dev->dev.dma_mask = &dev->archdata.dma_mask;
dev->dev.parent = parent;
dev->dev.release = of_release_dev;
- dev->dev.archdata.of_node = np;
if (bus_id)
dev_set_name(&dev->dev, "%s", bus_id);
@@ -95,17 +94,17 @@ int of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
ofdev = to_of_device(dev);
- if (add_uevent_var(env, "OF_NAME=%s", ofdev->node->name))
+ if (add_uevent_var(env, "OF_NAME=%s", ofdev->dev.of_node->name))
return -ENOMEM;
- if (add_uevent_var(env, "OF_TYPE=%s", ofdev->node->type))
+ if (add_uevent_var(env, "OF_TYPE=%s", ofdev->dev.of_node->type))
return -ENOMEM;
/* Since the compatible field can contain pretty much anything
* it's not really legal to split it out with commas. We split it
* up using a number of environment variables instead. */
- compat = of_get_property(ofdev->node, "compatible", &cplen);
+ compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen);
while (compat && *compat && cplen > 0) {
if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat))
return -ENOMEM;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 6c1dfc3ff8bc..487a98851ba6 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -74,7 +74,7 @@ struct of_device* of_platform_device_create(struct device_node *np,
if (!dev)
return NULL;
- dev->dma_mask = 0xffffffffUL;
+ dev->archdata.dma_mask = 0xffffffffUL;
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
dev->dev.bus = &of_platform_bus_type;
@@ -195,7 +195,7 @@ EXPORT_SYMBOL(of_platform_bus_probe);
static int of_dev_node_match(struct device *dev, void *data)
{
- return to_of_device(dev)->node == data;
+ return to_of_device(dev)->dev.of_node == data;
}
struct of_device *of_find_device_by_node(struct device_node *np)
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
static int of_dev_phandle_match(struct device *dev, void *data)
{
phandle *ph = data;
- return to_of_device(dev)->node->phandle == *ph;
+ return to_of_device(dev)->dev.of_node->phandle == *ph;
}
struct of_device *of_find_device_by_phandle(phandle ph)
@@ -246,10 +246,10 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
if (ppc_md.pci_setup_phb == NULL)
return -ENODEV;
- printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name);
+ pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name);
/* Alloc and setup PHB data structure */
- phb = pcibios_alloc_controller(dev->node);
+ phb = pcibios_alloc_controller(dev->dev.of_node);
if (!phb)
return -ENODEV;
@@ -263,19 +263,19 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
}
/* Process "ranges" property */
- pci_process_bridge_OF_ranges(phb, dev->node, 0);
+ pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0);
/* Init pci_dn data structures */
pci_devs_phb_init_dynamic(phb);
/* Register devices with EEH */
#ifdef CONFIG_EEH
- if (dev->node->child)
- eeh_add_device_tree_early(dev->node);
+ if (dev->dev.of_node->child)
+ eeh_add_device_tree_early(dev->dev.of_node);
#endif /* CONFIG_EEH */
/* Scan the bus */
- pcibios_scan_phb(phb, dev->node);
+ pcibios_scan_phb(phb, dev->dev.of_node);
if (phb->bus == NULL)
return -ENXIO;
@@ -306,10 +306,11 @@ static struct of_device_id of_pci_phb_ids[] = {
};
static struct of_platform_driver of_pci_phb_driver = {
- .match_table = of_pci_phb_ids,
.probe = of_pci_phb_probe,
.driver = {
.name = "of-pci",
+ .owner = THIS_MODULE,
+ .of_match_table = of_pci_phb_ids,
},
};
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0c40c6f476fe..f88acf0218db 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -18,6 +18,7 @@
#include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h>
#include <asm/iseries/hv_types.h>
+#include <asm/kexec.h>
/* This symbol is provided by the linker - let it fill in the paca
* field correctly */
@@ -97,6 +98,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->kernelbase = (unsigned long) _stext;
new_paca->kernel_msr = MSR_KERNEL;
new_paca->hw_cpu_id = 0xffff;
+ new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
#ifdef CONFIG_PPC_STD_MMU_64
new_paca->slb_shadow_ptr = &slb_shadow[cpu];
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0c0567e58409..6646005dffb1 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1097,8 +1097,8 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
if (dev->is_added)
continue;
- /* Setup OF node pointer in archdata */
- sd->of_node = pci_device_to_OF_node(dev);
+ /* Setup OF node pointer in the device */
+ dev->dev.of_node = pci_device_to_OF_node(dev);
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index cd11d5ca80df..6ddb795f83e8 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -310,6 +310,8 @@ static void __devinit __of_scan_bus(struct device_node *node,
/* Scan direct children */
for_each_child_of_node(node, child) {
pr_debug(" * %s\n", child->full_name);
+ if (!of_device_is_available(child))
+ continue;
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 08460a2e9f41..43b83c35cf54 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -35,6 +35,9 @@ struct cpu_hw_events {
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+
+ unsigned int group_flag;
+ int n_txn_start;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -718,66 +721,6 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-static void event_sched_in(struct perf_event *event)
-{
- event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = smp_processor_id();
- event->tstamp_running += event->ctx->time - event->tstamp_stopped;
- if (is_software_event(event))
- event->pmu->enable(event);
-}
-
-/*
- * Called to enable a whole group of events.
- * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
- * Assumes the caller has disabled interrupts and has
- * frozen the PMU with hw_perf_save_disable.
- */
-int hw_perf_group_sched_in(struct perf_event *group_leader,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- struct cpu_hw_events *cpuhw;
- long i, n, n0;
- struct perf_event *sub;
-
- if (!ppmu)
- return 0;
- cpuhw = &__get_cpu_var(cpu_hw_events);
- n0 = cpuhw->n_events;
- n = collect_events(group_leader, ppmu->n_counter - n0,
- &cpuhw->event[n0], &cpuhw->events[n0],
- &cpuhw->flags[n0]);
- if (n < 0)
- return -EAGAIN;
- if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
- return -EAGAIN;
- i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
- if (i < 0)
- return -EAGAIN;
- cpuhw->n_events = n0 + n;
- cpuhw->n_added += n;
-
- /*
- * OK, this group can go on; update event states etc.,
- * and enable any software events
- */
- for (i = n0; i < n0 + n; ++i)
- cpuhw->event[i]->hw.config = cpuhw->events[i];
- cpuctx->active_oncpu += n;
- n = 1;
- event_sched_in(group_leader);
- list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
- if (sub->state != PERF_EVENT_STATE_OFF) {
- event_sched_in(sub);
- ++n;
- }
- }
- ctx->nr_active += n;
-
- return 1;
-}
-
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
@@ -805,12 +748,22 @@ static int power_pmu_enable(struct perf_event *event)
cpuhw->event[n0] = event;
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
+
+ /*
+ * If group events scheduling transaction was started,
+ * skip the schedulability test here, it will be peformed
+ * at commit time(->commit_txn) as a whole
+ */
+ if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
+ goto nocheck;
+
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
-
event->hw.config = cpuhw->events[n0];
+
+nocheck:
++cpuhw->n_events;
++cpuhw->n_added;
@@ -896,11 +849,65 @@ static void power_pmu_unthrottle(struct perf_event *event)
local_irq_restore(flags);
}
+/*
+ * Start group events scheduling transaction
+ * Set the flag to make pmu::enable() not perform the
+ * schedulability test, it will be performed at commit time
+ */
+void power_pmu_start_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
+ cpuhw->n_txn_start = cpuhw->n_events;
+}
+
+/*
+ * Stop group events scheduling transaction
+ * Clear the flag and pmu::enable() will perform the
+ * schedulability test.
+ */
+void power_pmu_cancel_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Commit group events scheduling transaction
+ * Perform the group schedulability test as a whole
+ * Return 0 if success
+ */
+int power_pmu_commit_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuhw;
+ long i, n;
+
+ if (!ppmu)
+ return -EAGAIN;
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ n = cpuhw->n_events;
+ if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
+ return -EAGAIN;
+ i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
+ if (i < 0)
+ return -EAGAIN;
+
+ for (i = cpuhw->n_txn_start; i < n; ++i)
+ cpuhw->event[i]->hw.config = cpuhw->events[i];
+
+ return 0;
+}
+
struct pmu power_pmu = {
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
.unthrottle = power_pmu_unthrottle,
+ .start_txn = power_pmu_start_txn,
+ .cancel_txn = power_pmu_cancel_txn,
+ .commit_txn = power_pmu_commit_txn,
};
/*
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392ac63c..bc9f39d2598b 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -101,6 +101,10 @@ EXPORT_SYMBOL(pci_dram_offset);
EXPORT_SYMBOL(start_thread);
EXPORT_SYMBOL(kernel_thread);
+#ifndef CONFIG_BOOKE
+EXPORT_SYMBOL_GPL(cvt_df);
+EXPORT_SYMBOL_GPL(cvt_fd);
+#endif
EXPORT_SYMBOL(giveup_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e4d71ced97ef..9d255b4f0a0e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -371,6 +371,9 @@ int set_dabr(unsigned long dabr)
/* XXX should we have a CPU_FTR_HAS_DABR ? */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DAC1, dabr);
+#ifdef CONFIG_PPC_47x
+ isync();
+#endif
#elif defined(CONFIG_PPC_BOOK3S)
mtspr(SPRN_DABR, dabr);
#endif
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index ed2cfe17d25e..7a0c0199ea28 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -39,6 +39,109 @@
#include <asm/system.h>
/*
+ * The parameter save area on the stack is used to store arguments being passed
+ * to callee function and is located at fixed offset from stack pointer.
+ */
+#ifdef CONFIG_PPC32
+#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
+#else /* CONFIG_PPC32 */
+#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
+#endif
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define STR(s) #s /* convert to string */
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define GPR_OFFSET_NAME(num) \
+ {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ GPR_OFFSET_NAME(0),
+ GPR_OFFSET_NAME(1),
+ GPR_OFFSET_NAME(2),
+ GPR_OFFSET_NAME(3),
+ GPR_OFFSET_NAME(4),
+ GPR_OFFSET_NAME(5),
+ GPR_OFFSET_NAME(6),
+ GPR_OFFSET_NAME(7),
+ GPR_OFFSET_NAME(8),
+ GPR_OFFSET_NAME(9),
+ GPR_OFFSET_NAME(10),
+ GPR_OFFSET_NAME(11),
+ GPR_OFFSET_NAME(12),
+ GPR_OFFSET_NAME(13),
+ GPR_OFFSET_NAME(14),
+ GPR_OFFSET_NAME(15),
+ GPR_OFFSET_NAME(16),
+ GPR_OFFSET_NAME(17),
+ GPR_OFFSET_NAME(18),
+ GPR_OFFSET_NAME(19),
+ GPR_OFFSET_NAME(20),
+ GPR_OFFSET_NAME(21),
+ GPR_OFFSET_NAME(22),
+ GPR_OFFSET_NAME(23),
+ GPR_OFFSET_NAME(24),
+ GPR_OFFSET_NAME(25),
+ GPR_OFFSET_NAME(26),
+ GPR_OFFSET_NAME(27),
+ GPR_OFFSET_NAME(28),
+ GPR_OFFSET_NAME(29),
+ GPR_OFFSET_NAME(30),
+ GPR_OFFSET_NAME(31),
+ REG_OFFSET_NAME(nip),
+ REG_OFFSET_NAME(msr),
+ REG_OFFSET_NAME(ctr),
+ REG_OFFSET_NAME(link),
+ REG_OFFSET_NAME(xer),
+ REG_OFFSET_NAME(ccr),
+#ifdef CONFIG_PPC64
+ REG_OFFSET_NAME(softe),
+#else
+ REG_OFFSET_NAME(mq),
+#endif
+ REG_OFFSET_NAME(trap),
+ REG_OFFSET_NAME(dar),
+ REG_OFFSET_NAME(dsisr),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
+
+/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 74367841615a..0e1ec6f746f6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -691,10 +691,14 @@ void rtas_os_term(char *str)
{
int status;
- if (panic_timeout)
- return;
-
- if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term"))
+ /*
+ * Firmware with the ibm,extended-os-term property is guaranteed
+ * to always return from an ibm,os-term call. Earlier versions without
+ * this property may terminate the partition which we want to avoid
+ * since it interferes with panic_timeout.
+ */
+ if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
+ RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
return;
snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
@@ -705,8 +709,7 @@ void rtas_os_term(char *str)
} while (rtas_busy_delay(status));
if (status != 0)
- printk(KERN_EMERG "ibm,os-term call failed %d\n",
- status);
+ printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
}
static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 4190eae7850a..638883e23e3a 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -411,9 +411,9 @@ static void rtas_event_scan(struct work_struct *w)
get_online_cpus();
- cpu = next_cpu(smp_processor_id(), cpu_online_map);
- if (cpu == NR_CPUS) {
- cpu = first_cpu(cpu_online_map);
+ cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
+ if (cpu >= nr_cpu_ids) {
+ cpu = cpumask_first(cpu_online_mask);
if (first_pass) {
first_pass = 0;
@@ -466,8 +466,8 @@ static void start_event_scan(void)
/* Retreive errors from nvram if any */
retreive_nvram_error_log();
- schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
- event_scan_delay);
+ schedule_delayed_work_on(cpumask_first(cpu_online_mask),
+ &event_scan_work, event_scan_delay);
}
static int __init rtas_init(void)
@@ -490,6 +490,12 @@ static int __init rtas_init(void)
return -ENODEV;
}
+ if (!rtas_event_scan_rate) {
+ /* Broken firmware: take a rate of zero to mean don't scan */
+ printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n");
+ return 0;
+ }
+
/* Make room for the sequence number */
rtas_error_log_max = rtas_get_error_log_max();
rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 48f0a008b20b..5e4d852f640c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -161,45 +161,44 @@ extern u32 cpu_temp_both(unsigned long cpu);
DEFINE_PER_CPU(unsigned int, cpu_pvr);
#endif
-static int show_cpuinfo(struct seq_file *m, void *v)
+static void show_cpuinfo_summary(struct seq_file *m)
{
- unsigned long cpu_id = (unsigned long)v - 1;
- unsigned int pvr;
- unsigned short maj;
- unsigned short min;
-
- if (cpu_id == NR_CPUS) {
- struct device_node *root;
- const char *model = NULL;
+ struct device_node *root;
+ const char *model = NULL;
#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
- unsigned long bogosum = 0;
- int i;
- for_each_online_cpu(i)
- bogosum += loops_per_jiffy;
- seq_printf(m, "total bogomips\t: %lu.%02lu\n",
- bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
+ unsigned long bogosum = 0;
+ int i;
+ for_each_online_cpu(i)
+ bogosum += loops_per_jiffy;
+ seq_printf(m, "total bogomips\t: %lu.%02lu\n",
+ bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
#endif /* CONFIG_SMP && CONFIG_PPC32 */
- seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
- if (ppc_md.name)
- seq_printf(m, "platform\t: %s\n", ppc_md.name);
- root = of_find_node_by_path("/");
- if (root)
- model = of_get_property(root, "model", NULL);
- if (model)
- seq_printf(m, "model\t\t: %s\n", model);
- of_node_put(root);
-
- if (ppc_md.show_cpuinfo != NULL)
- ppc_md.show_cpuinfo(m);
+ seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
+ if (ppc_md.name)
+ seq_printf(m, "platform\t: %s\n", ppc_md.name);
+ root = of_find_node_by_path("/");
+ if (root)
+ model = of_get_property(root, "model", NULL);
+ if (model)
+ seq_printf(m, "model\t\t: %s\n", model);
+ of_node_put(root);
+
+ if (ppc_md.show_cpuinfo != NULL)
+ ppc_md.show_cpuinfo(m);
#ifdef CONFIG_PPC32
- /* Display the amount of memory */
- seq_printf(m, "Memory\t\t: %d MB\n",
- (unsigned int)(total_memory / (1024 * 1024)));
+ /* Display the amount of memory */
+ seq_printf(m, "Memory\t\t: %d MB\n",
+ (unsigned int)(total_memory / (1024 * 1024)));
#endif
+}
- return 0;
- }
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long cpu_id = (unsigned long)v - 1;
+ unsigned int pvr;
+ unsigned short maj;
+ unsigned short min;
/* We only show online cpus: disable preempt (overzealous, I
* knew) to prevent cpu going down. */
@@ -308,19 +307,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#endif
preempt_enable();
+
+ /* If this is the last cpu, print the summary */
+ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
+ show_cpuinfo_summary(m);
+
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
- unsigned long i = *pos;
-
- return i <= NR_CPUS ? (void *)(i + 1) : NULL;
+ if (*pos == 0) /* just in case, cpu 0 is not the first */
+ *pos = cpumask_first(cpu_online_mask);
+ else
+ *pos = cpumask_next(*pos - 1, cpu_online_mask);
+ if ((*pos) < nr_cpu_ids)
+ return (void *)(unsigned long)(*pos + 1);
+ return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
- ++*pos;
+ (*pos)++;
return c_start(m, pos);
}
@@ -386,14 +394,14 @@ static void __init cpu_init_thread_core_maps(int tpc)
/**
* setup_cpu_maps - initialize the following cpu maps:
- * cpu_possible_map
- * cpu_present_map
+ * cpu_possible_mask
+ * cpu_present_mask
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
- * cpu_online_map as they come up.
+ * cpu_online_mask as they come up.
*
* This function is valid only for Open Firmware systems. finish_device_tree
* must be called before using this.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 914389158a9b..f3fb5a79de52 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -424,9 +424,18 @@ void __init setup_system(void)
DBG(" <- setup_system()\n");
}
+static u64 slb0_limit(void)
+{
+ if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+ return 1UL << SID_SHIFT_1T;
+ }
+ return 1UL << SID_SHIFT;
+}
+
#ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void)
{
+ u64 limit = slb0_limit();
unsigned int i;
/*
@@ -436,10 +445,10 @@ static void __init irqstack_early_init(void)
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
+ THREAD_SIZE, limit));
hardirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
+ THREAD_SIZE, limit));
}
}
#else
@@ -470,7 +479,7 @@ static void __init exc_lvl_early_init(void)
*/
static void __init emergency_stack_init(void)
{
- unsigned long limit;
+ u64 limit;
unsigned int i;
/*
@@ -482,7 +491,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(0x10000000ULL, lmb.rmo_size);
+ limit = min(slb0_limit(), lmb.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;
@@ -573,12 +582,6 @@ void ppc64_boot_msg(unsigned int src, const char *msg)
printk("[boot]%04x %s\n", src, msg);
}
-void cpu_die(void)
-{
- if (ppc_md.cpu_die)
- ppc_md.cpu_die();
-}
-
#ifdef CONFIG_SMP
#define PCPU_DYN_SIZE ()
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c2ee14498077..5c196d1086d9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -59,8 +59,8 @@
struct thread_info *secondary_ti;
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
-DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(boot_cpuid);
cpu_callin_map[boot_cpuid] = 1;
+ for_each_possible_cpu(cpu) {
+ zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ }
+
+ cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
+ cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+
if (smp_ops)
if (smp_ops->probe)
max_cpus = smp_ops->probe();
@@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __devinit smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != boot_cpuid);
-
- set_cpu_online(boot_cpuid, true);
- cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
- cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
#ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current;
#endif
@@ -313,7 +319,7 @@ int generic_cpu_disable(void)
set_cpu_online(cpu, false);
#ifdef CONFIG_PPC64
vdso_data->processorCount--;
- fixup_irqs(cpu_online_map);
+ fixup_irqs(cpu_online_mask);
#endif
return 0;
}
@@ -333,7 +339,7 @@ int generic_cpu_enable(unsigned int cpu)
cpu_relax();
#ifdef CONFIG_PPC64
- fixup_irqs(cpu_online_map);
+ fixup_irqs(cpu_online_mask);
/* counter the irq disable in fixup_irqs */
local_irq_enable();
#endif
@@ -462,7 +468,7 @@ out:
return id;
}
-/* Must be called when no change can occur to cpu_present_map,
+/* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline.
*/
static struct device_node *cpu_to_l2cache(int cpu)
@@ -495,6 +501,14 @@ int __devinit start_secondary(void *unused)
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
+
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ /* Clear any pending timer interrupts */
+ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+
+ /* Enable decrementer interrupt */
+ mtspr(SPRN_TCR, TCR_DIE);
+#endif
set_dec(tb_ticks_per_jiffy);
preempt_disable();
cpu_callin_map[cpu] = 1;
@@ -517,15 +531,15 @@ int __devinit start_secondary(void *unused)
for (i = 0; i < threads_per_core; i++) {
if (cpu_is_offline(base + i))
continue;
- cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
- cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
+ cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
+ cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
/* cpu_core_map should be a superset of
* cpu_sibling_map even if we don't have cache
* information, so update the former here, too.
*/
- cpu_set(cpu, per_cpu(cpu_core_map, base +i));
- cpu_set(base + i, per_cpu(cpu_core_map, cpu));
+ cpumask_set_cpu(cpu, cpu_core_mask(base + i));
+ cpumask_set_cpu(base + i, cpu_core_mask(cpu));
}
l2_cache = cpu_to_l2cache(cpu);
for_each_online_cpu(i) {
@@ -533,8 +547,8 @@ int __devinit start_secondary(void *unused)
if (!np)
continue;
if (np == l2_cache) {
- cpu_set(cpu, per_cpu(cpu_core_map, i));
- cpu_set(i, per_cpu(cpu_core_map, cpu));
+ cpumask_set_cpu(cpu, cpu_core_mask(i));
+ cpumask_set_cpu(i, cpu_core_mask(cpu));
}
of_node_put(np);
}
@@ -554,19 +568,22 @@ int setup_profiling_timer(unsigned int multiplier)
void __init smp_cpus_done(unsigned int max_cpus)
{
- cpumask_t old_mask;
+ cpumask_var_t old_mask;
/* We want the setup_cpu() here to be called from CPU 0, but our
* init thread may have been "borrowed" by another CPU in the meantime
* se we pin us down to CPU 0 for a short while
*/
- old_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
+ alloc_cpumask_var(&old_mask, GFP_NOWAIT);
+ cpumask_copy(old_mask, &current->cpus_allowed);
+ set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
if (smp_ops && smp_ops->setup_cpu)
smp_ops->setup_cpu(boot_cpuid);
- set_cpus_allowed(current, old_mask);
+ set_cpus_allowed_ptr(current, old_mask);
+
+ free_cpumask_var(old_mask);
snapshot_timebases();
@@ -591,10 +608,10 @@ int __cpu_disable(void)
/* Update sibling maps */
base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) {
- cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
- cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
- cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
- cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
+ cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
+ cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
+ cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
+ cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
}
l2_cache = cpu_to_l2cache(cpu);
@@ -603,8 +620,8 @@ int __cpu_disable(void)
if (!np)
continue;
if (np == l2_cache) {
- cpu_clear(cpu, per_cpu(cpu_core_map, i));
- cpu_clear(i, per_cpu(cpu_core_map, cpu));
+ cpumask_clear_cpu(cpu, cpu_core_mask(i));
+ cpumask_clear_cpu(i, cpu_core_mask(cpu));
}
of_node_put(np);
}
@@ -631,4 +648,10 @@ void cpu_hotplug_driver_unlock()
{
mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
}
+
+void cpu_die(void)
+{
+ if (ppc_md.cpu_die)
+ ppc_md.cpu_die();
+}
#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e235e52dc4fe..c0d8c2006bf4 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -35,7 +35,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_PPC64
/* Time in microseconds we delay before sleeping in the idle loop */
-DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 };
+DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
static ssize_t store_smt_snooze_delay(struct sys_device *dev,
struct sysdev_attribute *attr,
@@ -44,9 +44,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, sysdev);
ssize_t ret;
- unsigned long snooze;
+ long snooze;
- ret = sscanf(buf, "%lu", &snooze);
+ ret = sscanf(buf, "%ld", &snooze);
if (ret != 1)
return -EINVAL;
@@ -61,53 +61,23 @@ static ssize_t show_smt_snooze_delay(struct sys_device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, sysdev);
- return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
+ return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
}
static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
store_smt_snooze_delay);
-/* Only parse OF options if the matching cmdline option was not specified */
-static int smt_snooze_cmdline;
-
-static int __init smt_setup(void)
-{
- struct device_node *options;
- const unsigned int *val;
- unsigned int cpu;
-
- if (!cpu_has_feature(CPU_FTR_SMT))
- return -ENODEV;
-
- options = of_find_node_by_path("/options");
- if (!options)
- return -ENODEV;
-
- val = of_get_property(options, "ibm,smt-snooze-delay", NULL);
- if (!smt_snooze_cmdline && val) {
- for_each_possible_cpu(cpu)
- per_cpu(smt_snooze_delay, cpu) = *val;
- }
-
- of_node_put(options);
- return 0;
-}
-__initcall(smt_setup);
-
static int __init setup_smt_snooze_delay(char *str)
{
unsigned int cpu;
- int snooze;
+ long snooze;
if (!cpu_has_feature(CPU_FTR_SMT))
return 1;
- smt_snooze_cmdline = 1;
-
- if (get_option(&str, &snooze)) {
- for_each_possible_cpu(cpu)
- per_cpu(smt_snooze_delay, cpu) = snooze;
- }
+ snooze = simple_strtol(str, NULL, 10);
+ for_each_possible_cpu(cpu)
+ per_cpu(smt_snooze_delay, cpu) = snooze;
return 1;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1b16b9a3e49a..0441bbdadbd1 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -532,25 +532,60 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
-DEFINE_PER_CPU(u8, perf_event_pending);
+#ifdef CONFIG_PERF_EVENTS
-void set_perf_event_pending(void)
+/*
+ * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
+ */
+#ifdef CONFIG_PPC64
+static inline unsigned long test_perf_event_pending(void)
{
- get_cpu_var(perf_event_pending) = 1;
- set_dec(1);
- put_cpu_var(perf_event_pending);
+ unsigned long x;
+
+ asm volatile("lbz %0,%1(13)"
+ : "=r" (x)
+ : "i" (offsetof(struct paca_struct, perf_event_pending)));
+ return x;
}
+static inline void set_perf_event_pending_flag(void)
+{
+ asm volatile("stb %0,%1(13)" : :
+ "r" (1),
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+
+static inline void clear_perf_event_pending(void)
+{
+ asm volatile("stb %0,%1(13)" : :
+ "r" (0),
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
+}
+
+#else /* 32-bit */
+
+DEFINE_PER_CPU(u8, perf_event_pending);
+
+#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
-#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
+#endif /* 32 vs 64 bit */
+
+void set_perf_event_pending(void)
+{
+ preempt_disable();
+ set_perf_event_pending_flag();
+ set_dec(1);
+ preempt_enable();
+}
+
+#else /* CONFIG_PERF_EVENTS */
#define test_perf_event_pending() 0
#define clear_perf_event_pending()
-#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
+#endif /* CONFIG_PERF_EVENTS */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -582,10 +617,6 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
- }
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
@@ -604,6 +635,11 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
+ }
+
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
get_lppaca()->int_dword.fields.decr_int = 0;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 29d128eb6c43..3031fc712ad0 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -380,6 +380,46 @@ int machine_check_440A(struct pt_regs *regs)
}
return 0;
}
+
+int machine_check_47x(struct pt_regs *regs)
+{
+ unsigned long reason = get_mc_reason(regs);
+ u32 mcsr;
+
+ printk(KERN_ERR "Machine check in kernel mode.\n");
+ if (reason & ESR_IMCP) {
+ printk(KERN_ERR
+ "Instruction Synchronous Machine Check exception\n");
+ mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+ return 0;
+ }
+ mcsr = mfspr(SPRN_MCSR);
+ if (mcsr & MCSR_IB)
+ printk(KERN_ERR "Instruction Read PLB Error\n");
+ if (mcsr & MCSR_DRB)
+ printk(KERN_ERR "Data Read PLB Error\n");
+ if (mcsr & MCSR_DWB)
+ printk(KERN_ERR "Data Write PLB Error\n");
+ if (mcsr & MCSR_TLBP)
+ printk(KERN_ERR "TLB Parity Error\n");
+ if (mcsr & MCSR_ICP) {
+ flush_instruction_cache();
+ printk(KERN_ERR "I-Cache Parity Error\n");
+ }
+ if (mcsr & MCSR_DCSP)
+ printk(KERN_ERR "D-Cache Search Parity Error\n");
+ if (mcsr & PPC47x_MCSR_GPR)
+ printk(KERN_ERR "GPR Parity Error\n");
+ if (mcsr & PPC47x_MCSR_FPR)
+ printk(KERN_ERR "FPR Parity Error\n");
+ if (mcsr & PPC47x_MCSR_IPR)
+ printk(KERN_ERR "Machine Check exception is imprecise\n");
+
+ /* Clear MCSR */
+ mtspr(SPRN_MCSR, mcsr);
+
+ return 0;
+}
#elif defined(CONFIG_E500)
int machine_check_e500(struct pt_regs *regs)
{
@@ -815,12 +855,15 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}
if (reason & REASON_TRAP) {
+ /* Debugger is first in line to stop recursive faults in
+ * rcu_lock, notify_die, or atomic_notifier_call_chain */
+ if (debugger_bpt(regs))
+ return;
+
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
return;
- if (debugger_bpt(regs))
- return;
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 82237176a2a3..00b9436f7652 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -645,8 +645,10 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
found = 1;
break;
}
- if (!found)
+ if (!found) {
+ spin_unlock_irqrestore(&vio_cmo.lock, flags);
return;
+ }
/* Increase/decrease in desired device entitlement */
if (desired >= viodev->cmo.desired) {
@@ -705,7 +707,7 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
* Check to see that device has a DMA window and configure
* entitlement for the device.
*/
- if (of_get_property(viodev->dev.archdata.of_node,
+ if (of_get_property(viodev->dev.of_node,
"ibm,my-dma-window", NULL)) {
/* Check that the driver is CMO enabled and get desired DMA */
if (!viodrv->get_desired_dma) {
@@ -958,9 +960,12 @@ viodev_cmo_rd_attr(allocated);
static ssize_t name_show(struct device *, struct device_attribute *, char *);
static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
static struct device_attribute vio_cmo_dev_attrs[] = {
__ATTR_RO(name),
__ATTR_RO(devspec),
+ __ATTR_RO(modalias),
__ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
viodev_cmo_desired_show, viodev_cmo_desired_set),
__ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
@@ -1049,7 +1054,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
if (firmware_has_feature(FW_FEATURE_ISERIES))
return vio_build_iommu_table_iseries(dev);
- dma_window = of_get_property(dev->dev.archdata.of_node,
+ dma_window = of_get_property(dev->dev.of_node,
"ibm,my-dma-window", NULL);
if (!dma_window)
return NULL;
@@ -1058,7 +1063,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
if (tbl == NULL)
return NULL;
- of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
+ of_parse_dma_window(dev->dev.of_node, dma_window,
&tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
@@ -1086,7 +1091,7 @@ static const struct vio_device_id *vio_match_device(
{
while (ids->type[0] != '\0') {
if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
- of_device_is_compatible(dev->dev.archdata.of_node,
+ of_device_is_compatible(dev->dev.of_node,
ids->compat))
return ids;
ids++;
@@ -1179,7 +1184,7 @@ EXPORT_SYMBOL(vio_unregister_driver);
static void __devinit vio_dev_release(struct device *dev)
{
/* XXX should free TCE table */
- of_node_put(dev->archdata.of_node);
+ of_node_put(dev->of_node);
kfree(to_vio_dev(dev));
}
@@ -1230,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
if (unit_address != NULL)
viodev->unit_address = *unit_address;
}
- viodev->dev.archdata.of_node = of_node_get(of_node);
+ viodev->dev.of_node = of_node_get(of_node);
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_set_dma_ops(viodev);
@@ -1315,14 +1320,32 @@ static ssize_t name_show(struct device *dev,
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct device_node *of_node = dev->archdata.of_node;
+ struct device_node *of_node = dev->of_node;
return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
}
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct vio_dev *vio_dev = to_vio_dev(dev);
+ struct device_node *dn;
+ const char *cp;
+
+ dn = dev->of_node;
+ if (!dn)
+ return -ENODEV;
+ cp = of_get_property(dn, "compatible", NULL);
+ if (!cp)
+ return -ENODEV;
+
+ return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+}
+
static struct device_attribute vio_dev_attrs[] = {
__ATTR_RO(name),
__ATTR_RO(devspec),
+ __ATTR_RO(modalias),
__ATTR_NULL
};
@@ -1347,7 +1370,7 @@ static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
struct device_node *dn;
const char *cp;
- dn = dev->archdata.of_node;
+ dn = dev->of_node;
if (!dn)
return -ENODEV;
cp = of_get_property(dn, "compatible", NULL);
@@ -1365,6 +1388,7 @@ static struct bus_type vio_bus_type = {
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
+ .pm = GENERIC_SUBSYS_PM_OPS,
};
/**
@@ -1378,7 +1402,7 @@ static struct bus_type vio_bus_type = {
*/
const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
{
- return of_get_property(vdev->dev.archdata.of_node, which, length);
+ return of_get_property(vdev->dev.of_node, which, length);
}
EXPORT_SYMBOL(vio_get_attribute);
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 689a57c2ac80..73c0a3f64ed1 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -147,7 +147,7 @@ static int __init kvmppc_44x_init(void)
if (r)
return r;
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
}
static void __exit kvmppc_44x_exit(void)
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 2570fcc7665d..812312542e50 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -440,7 +440,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
unsigned int gtlb_index;
gtlb_index = kvmppc_get_gpr(vcpu, ra);
- if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
+ if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
printk("%s: index %d\n", __func__, gtlb_index);
kvmppc_dump_vcpu(vcpu);
return EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 60624cc9f4d4..b7baff78f90c 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,12 +22,34 @@ config KVM
select ANON_INODES
select KVM_MMIO
+config KVM_BOOK3S_HANDLER
+ bool
+
+config KVM_BOOK3S_32_HANDLER
+ bool
+ select KVM_BOOK3S_HANDLER
+
config KVM_BOOK3S_64_HANDLER
bool
+ select KVM_BOOK3S_HANDLER
+
+config KVM_BOOK3S_32
+ tristate "KVM support for PowerPC book3s_32 processors"
+ depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
+ select KVM
+ select KVM_BOOK3S_32_HANDLER
+ ---help---
+ Support running unmodified book3s_32 guest kernels
+ in virtual machines on book3s_32 host processors.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
config KVM_BOOK3S_64
tristate "KVM support for PowerPC book3s_64 processors"
- depends on EXPERIMENTAL && PPC64
+ depends on EXPERIMENTAL && PPC_BOOK3S_64
select KVM
select KVM_BOOK3S_64_HANDLER
---help---
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 56484d652377..ff436066bf77 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -14,7 +14,7 @@ CFLAGS_emulate.o := -I.
common-objs-y += powerpc.o emulate.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
-obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o
+obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
AFLAGS_booke_interrupts.o := -I$(obj)
@@ -40,17 +40,31 @@ kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
kvm-book3s_64-objs := \
$(common-objs-y) \
+ fpu.o \
+ book3s_paired_singles.o \
book3s.o \
- book3s_64_emulate.o \
- book3s_64_interrupts.o \
+ book3s_emulate.o \
+ book3s_interrupts.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
+kvm-book3s_32-objs := \
+ $(common-objs-y) \
+ fpu.o \
+ book3s_paired_singles.o \
+ book3s.o \
+ book3s_emulate.o \
+ book3s_interrupts.o \
+ book3s_32_mmu_host.o \
+ book3s_32_mmu.o
+kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
+
kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_440) += kvm.o
obj-$(CONFIG_KVM_E500) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
+obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 604af29b71ed..b998abf1a63d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -16,6 +16,7 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -29,6 +30,7 @@
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
+#include <linux/highmem.h>
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -36,7 +38,15 @@
/* #define EXIT_DEBUG_SIMPLE */
/* #define DEBUG_EXT */
-static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr);
+
+/* Some compatibility defines */
+#ifdef CONFIG_PPC_BOOK3S_32
+#define MSR_USER32 MSR_USER
+#define MSR_USER64 MSR_USER
+#define HW_PAGE_SIZE PAGE_SIZE
+#endif
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
@@ -69,18 +79,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
- memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
- get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
+ to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+#endif
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
- memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
- memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
- to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
+ to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
+#endif
kvmppc_giveup_ext(vcpu, MSR_FP);
kvmppc_giveup_ext(vcpu, MSR_VEC);
@@ -131,18 +149,22 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
}
- if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
- (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
+ if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
kvmppc_mmu_flush_segments(vcpu);
- kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
}
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
- vcpu->arch.srr0 = vcpu->arch.pc;
+ vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
vcpu->arch.srr1 = vcpu->arch.msr | flags;
- vcpu->arch.pc = to_book3s(vcpu)->hior + vec;
+ kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
@@ -218,6 +240,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
}
+void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+{
+ kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
+}
+
int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
@@ -302,7 +330,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
#endif
priority = __ffs(*pending);
- while (priority <= (sizeof(unsigned int) * 8)) {
+ while (priority < BOOK3S_IRQPRIO_MAX) {
if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
(priority != BOOK3S_IRQPRIO_DECREMENTER)) {
/* DEC interrupts get cleared by mtdec */
@@ -318,13 +346,18 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
{
+ u32 host_pvr;
+
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
vcpu->arch.pvr = pvr;
+#ifdef CONFIG_PPC_BOOK3S_64
if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
kvmppc_mmu_book3s_64_init(vcpu);
to_book3s(vcpu)->hior = 0xfff00000;
to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
- } else {
+ } else
+#endif
+ {
kvmppc_mmu_book3s_32_init(vcpu);
to_book3s(vcpu)->hior = 0;
to_book3s(vcpu)->msr_mask = 0xffffffffULL;
@@ -337,6 +370,32 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
!strcmp(cur_cpu_spec->platform, "ppc970"))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+ /* Cell performs badly if MSR_FEx are set. So let's hope nobody
+ really needs them in a VM on Cell and force disable them. */
+ if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
+ to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* 32 bit Book3S always has 32 byte dcbz */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+#endif
+
+ /* On some CPUs we can execute paired single operations natively */
+ asm ( "mfpvr %0" : "=r"(host_pvr));
+ switch (host_pvr) {
+ case 0x00080200: /* lonestar 2.0 */
+ case 0x00088202: /* lonestar 2.2 */
+ case 0x70000100: /* gekko 1.0 */
+ case 0x00080100: /* gekko 2.0 */
+ case 0x00083203: /* gekko 2.3a */
+ case 0x00083213: /* gekko 2.3b */
+ case 0x00083204: /* gekko 2.4 */
+ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
+ case 0x00087200: /* broadway */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
+ /* Enable HID2.PSE - in case we need it later */
+ mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
+ }
}
/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
@@ -350,34 +409,29 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
*/
static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
{
- bool touched = false;
- hva_t hpage;
+ struct page *hpage;
+ u64 hpage_offset;
u32 *page;
int i;
- hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
- if (kvm_is_error_hva(hpage))
+ hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
+ if (is_error_page(hpage))
return;
- hpage |= pte->raddr & ~PAGE_MASK;
- hpage &= ~0xFFFULL;
-
- page = vmalloc(HW_PAGE_SIZE);
-
- if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
- goto out;
+ hpage_offset = pte->raddr & ~PAGE_MASK;
+ hpage_offset &= ~0xFFFULL;
+ hpage_offset /= 4;
- for (i=0; i < HW_PAGE_SIZE / 4; i++)
- if ((page[i] & 0xff0007ff) == INS_DCBZ) {
- page[i] &= 0xfffffff7; // reserved instruction, so we trap
- touched = true;
- }
+ get_page(hpage);
+ page = kmap_atomic(hpage, KM_USER0);
- if (touched)
- copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE);
+ /* patch dcbz into reserved instruction, so we trap */
+ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
+ if ((page[i] & 0xff0007ff) == INS_DCBZ)
+ page[i] &= 0xfffffff7;
-out:
- vfree(page);
+ kunmap_atomic(page, KM_USER0);
+ put_page(hpage);
}
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
@@ -391,15 +445,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
} else {
pte->eaddr = eaddr;
pte->raddr = eaddr & 0xffffffff;
- pte->vpage = eaddr >> 12;
- switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
- case 0:
- pte->vpage |= VSID_REAL;
- case MSR_DR:
- pte->vpage |= VSID_REAL_DR;
- case MSR_IR:
- pte->vpage |= VSID_REAL_IR;
- }
+ pte->vpage = VSID_REAL | eaddr >> 12;
pte->may_read = true;
pte->may_write = true;
pte->may_execute = true;
@@ -434,55 +480,55 @@ err:
return kvmppc_bad_hva();
}
-int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr)
+int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+ bool data)
{
struct kvmppc_pte pte;
- hva_t hva = eaddr;
vcpu->stat.st++;
- if (kvmppc_xlate(vcpu, eaddr, false, &pte))
- goto err;
+ if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+ return -ENOENT;
- hva = kvmppc_pte_to_hva(vcpu, &pte, false);
- if (kvm_is_error_hva(hva))
- goto err;
+ *eaddr = pte.raddr;
- if (copy_to_user((void __user *)hva, ptr, size)) {
- printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva);
- goto err;
- }
+ if (!pte.may_write)
+ return -EPERM;
- return 0;
+ if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
+ return EMULATE_DO_MMIO;
-err:
- return -ENOENT;
+ return EMULATE_DONE;
}
-int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr,
+int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data)
{
struct kvmppc_pte pte;
- hva_t hva = eaddr;
+ hva_t hva = *eaddr;
vcpu->stat.ld++;
- if (kvmppc_xlate(vcpu, eaddr, data, &pte))
- goto err;
+ if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
+ goto nopte;
+
+ *eaddr = pte.raddr;
hva = kvmppc_pte_to_hva(vcpu, &pte, true);
if (kvm_is_error_hva(hva))
- goto err;
+ goto mmio;
if (copy_from_user(ptr, (void __user *)hva, size)) {
printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
- goto err;
+ goto mmio;
}
- return 0;
+ return EMULATE_DONE;
-err:
+nopte:
return -ENOENT;
+mmio:
+ return EMULATE_DO_MMIO;
}
static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -499,12 +545,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
int page_found = 0;
struct kvmppc_pte pte;
bool is_mmio = false;
+ bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
+ bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+ u64 vsid;
- if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) {
- relocated = (vcpu->arch.msr & MSR_DR);
- } else {
- relocated = (vcpu->arch.msr & MSR_IR);
- }
+ relocated = data ? dr : ir;
/* Resolve real address if translation turned on */
if (relocated) {
@@ -516,14 +561,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.raddr = eaddr & 0xffffffff;
pte.eaddr = eaddr;
pte.vpage = eaddr >> 12;
- switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
- case 0:
- pte.vpage |= VSID_REAL;
- case MSR_DR:
- pte.vpage |= VSID_REAL_DR;
- case MSR_IR:
- pte.vpage |= VSID_REAL_IR;
- }
+ }
+
+ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
+ break;
+ case MSR_DR:
+ case MSR_IR:
+ vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+ if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+ else
+ pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+ pte.vpage |= vsid;
+
+ if (vsid == -1)
+ page_found = -EINVAL;
+ break;
}
if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -538,20 +594,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */
- vcpu->arch.dear = vcpu->arch.fault_dear;
- to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
- vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
+ to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
- vcpu->arch.dear = vcpu->arch.fault_dear;
- to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
+ to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
- vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL);
+ vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
- vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio &&
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -583,11 +639,13 @@ static inline int get_fpr_index(int i)
}
/* Give up external provider (FPU, Altivec, VSX) */
-static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
+void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
{
struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
u64 *thread_fpr = (u64*)t->fpr;
int i;
@@ -629,21 +687,65 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
kvmppc_recalc_shadow_msr(vcpu);
}
+static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
+{
+ ulong srr0 = kvmppc_get_pc(vcpu);
+ u32 last_inst = kvmppc_get_last_inst(vcpu);
+ int ret;
+
+ ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
+ if (ret == -ENOENT) {
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
+ return EMULATE_AGAIN;
+ }
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
+{
+
+ /* Need to do paired single emulation? */
+ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
+ return EMULATE_DONE;
+
+ /* Read out the instruction */
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
+ /* Need to emulate */
+ return EMULATE_FAIL;
+
+ return EMULATE_AGAIN;
+}
+
/* Handle external providers (FPU, Altivec, VSX) */
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
ulong msr)
{
struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
u64 *thread_fpr = (u64*)t->fpr;
int i;
+ /* When we have paired singles, we emulate in software */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
+ return RESUME_GUEST;
+
if (!(vcpu->arch.msr & msr)) {
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
return RESUME_GUEST;
}
+ /* We already own the ext */
+ if (vcpu->arch.guest_owned_ext & msr) {
+ return RESUME_GUEST;
+ }
+
#ifdef DEBUG_EXT
printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
#endif
@@ -696,21 +798,33 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
#ifdef EXIT_DEBUG
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
- exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
- kvmppc_get_dec(vcpu), vcpu->arch.msr);
+ exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
+ kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
#elif defined (EXIT_DEBUG_SIMPLE)
if ((exit_nr != 0x900) && (exit_nr != 0x500))
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
- exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
+ exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
vcpu->arch.msr);
#endif
kvm_resched(vcpu);
switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE:
vcpu->stat.pf_instruc++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
+ == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
/* only care about PTEG not found errors, but leave NX alone */
- if (vcpu->arch.shadow_srr1 & 0x40000000) {
- r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
+ if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
+ r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
@@ -719,37 +833,52 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* so we can't use the NX bit inside the guest. Let's cross our fingers,
* that no guest that needs the dcbz hack does NX.
*/
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
+ r = RESUME_GUEST;
} else {
- vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000;
+ vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
}
break;
case BOOK3S_INTERRUPT_DATA_STORAGE:
+ {
+ ulong dar = kvmppc_get_fault_dar(vcpu);
vcpu->stat.pf_storage++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, dar);
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
/* The only case we need to handle is missing shadow PTEs */
- if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) {
- r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr);
+ if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
+ r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
} else {
- vcpu->arch.dear = vcpu->arch.fault_dear;
- to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
+ vcpu->arch.dear = dar;
+ to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
r = RESUME_GUEST;
}
break;
+ }
case BOOK3S_INTERRUPT_DATA_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) {
- vcpu->arch.dear = vcpu->arch.fault_dear;
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
+ vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_SEGMENT);
}
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_INST_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) {
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_INST_SEGMENT);
}
@@ -764,18 +893,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
case BOOK3S_INTERRUPT_PROGRAM:
{
enum emulation_result er;
ulong flags;
- flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
+program_interrupt:
+ flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
if (vcpu->arch.msr & MSR_PR) {
#ifdef EXIT_DEBUG
- printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst);
+ printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
#endif
- if ((vcpu->arch.last_inst & 0xff0007ff) !=
+ if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
(INS_DCBZ & 0xfffffff7)) {
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
@@ -789,33 +922,80 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case EMULATE_DONE:
r = RESUME_GUEST_NV;
break;
+ case EMULATE_AGAIN:
+ r = RESUME_GUEST;
+ break;
case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
- __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST_NV;
+ break;
default:
BUG();
}
break;
}
case BOOK3S_INTERRUPT_SYSCALL:
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
-#endif
- vcpu->stat.syscall_exits++;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
+ // XXX make user settable
+ if (vcpu->arch.osi_enabled &&
+ (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
+ (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
+ u64 *gprs = run->osi.gprs;
+ int i;
+
+ run->exit_reason = KVM_EXIT_OSI;
+ for (i = 0; i < 32; i++)
+ gprs[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu->arch.osi_needed = 1;
+ r = RESUME_HOST_NV;
+
+ } else {
+ vcpu->stat.syscall_exits++;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
break;
case BOOK3S_INTERRUPT_FP_UNAVAIL:
- r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP);
- break;
case BOOK3S_INTERRUPT_ALTIVEC:
- r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC);
- break;
case BOOK3S_INTERRUPT_VSX:
- r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX);
+ {
+ int ext_msr = 0;
+
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
+ case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
+ case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
+ }
+
+ switch (kvmppc_check_ext(vcpu, exit_nr)) {
+ case EMULATE_DONE:
+ /* everything ok - let's enable the ext */
+ r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
+ break;
+ case EMULATE_FAIL:
+ /* we need to emulate this instruction */
+ goto program_interrupt;
+ break;
+ default:
+ /* nothing to worry about - go again */
+ break;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_ALIGNMENT:
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
+ to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ }
+ r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_MACHINE_CHECK:
case BOOK3S_INTERRUPT_TRACE:
@@ -825,7 +1005,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
default:
/* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
- exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1);
+ exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
r = RESUME_HOST;
BUG();
break;
@@ -852,7 +1032,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
#ifdef EXIT_DEBUG
- printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r);
+ printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
#endif
return r;
@@ -867,10 +1047,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
- regs->pc = vcpu->arch.pc;
+ vcpu_load(vcpu);
+
+ regs->pc = kvmppc_get_pc(vcpu);
regs->cr = kvmppc_get_cr(vcpu);
- regs->ctr = vcpu->arch.ctr;
- regs->lr = vcpu->arch.lr;
+ regs->ctr = kvmppc_get_ctr(vcpu);
+ regs->lr = kvmppc_get_lr(vcpu);
regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr;
regs->srr0 = vcpu->arch.srr0;
@@ -887,6 +1069,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu_put(vcpu);
+
return 0;
}
@@ -894,10 +1078,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
- vcpu->arch.pc = regs->pc;
+ vcpu_load(vcpu);
+
+ kvmppc_set_pc(vcpu, regs->pc);
kvmppc_set_cr(vcpu, regs->cr);
- vcpu->arch.ctr = regs->ctr;
- vcpu->arch.lr = regs->lr;
+ kvmppc_set_ctr(vcpu, regs->ctr);
+ kvmppc_set_lr(vcpu, regs->lr);
kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0;
@@ -913,6 +1099,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
+ vcpu_put(vcpu);
+
return 0;
}
@@ -922,6 +1110,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;
+ vcpu_load(vcpu);
+
sregs->pvr = vcpu->arch.pvr;
sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
@@ -940,6 +1130,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
}
}
+
+ vcpu_put(vcpu);
+
return 0;
}
@@ -949,6 +1142,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i;
+ vcpu_load(vcpu);
+
kvmppc_set_pvr(vcpu, sregs->pvr);
vcpu3s->sdr1 = sregs->u.s.sdr1;
@@ -975,6 +1170,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
/* Flush the MMU after messing with the segments */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ vcpu_put(vcpu);
+
return 0;
}
@@ -1042,24 +1240,33 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu;
- int err;
+ int err = -ENOMEM;
- vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO,
- get_order(sizeof(struct kvmppc_vcpu_book3s)));
- if (!vcpu_book3s) {
- err = -ENOMEM;
+ vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
+ if (!vcpu_book3s)
goto out;
- }
+
+ memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
+
+ vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+ kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
+ if (!vcpu_book3s->shadow_vcpu)
+ goto free_vcpu;
vcpu = &vcpu_book3s->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
- goto free_vcpu;
+ goto free_shadow_vcpu;
vcpu->arch.host_retip = kvm_return_point;
vcpu->arch.host_msr = mfmsr();
+#ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */
vcpu->arch.pvr = 0x3C0301;
+#else
+ /* default to book3s_32 (750) */
+ vcpu->arch.pvr = 0x84202;
+#endif
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
vcpu_book3s->slb_nr = 64;
@@ -1067,23 +1274,24 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+#ifdef CONFIG_PPC_BOOK3S_64
vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
+#else
+ vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
+#endif
vcpu->arch.shadow_msr = MSR_USER64;
- err = __init_new_context();
+ err = kvmppc_mmu_init(vcpu);
if (err < 0)
- goto free_vcpu;
- vcpu_book3s->context_id = err;
-
- vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
- vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
- vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+ goto free_shadow_vcpu;
return vcpu;
+free_shadow_vcpu:
+ kfree(vcpu_book3s->shadow_vcpu);
free_vcpu:
- free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
+ vfree(vcpu_book3s);
out:
return ERR_PTR(err);
}
@@ -1092,9 +1300,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
- __destroy_context(vcpu_book3s->context_id);
kvm_vcpu_uninit(vcpu);
- free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
+ kfree(vcpu_book3s->shadow_vcpu);
+ vfree(vcpu_book3s);
}
extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
@@ -1102,8 +1310,12 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
struct thread_struct ext_bkp;
+#ifdef CONFIG_ALTIVEC
bool save_vec = current->thread.used_vr;
+#endif
+#ifdef CONFIG_VSX
bool save_vsx = current->thread.used_vsr;
+#endif
ulong ext_msr;
/* No need to go into the guest when all we do is going out */
@@ -1144,6 +1356,10 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* XXX we get called with irq disabled - change that! */
local_irq_enable();
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+
ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
local_irq_disable();
@@ -1179,7 +1395,8 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
static int kvmppc_book3s_init(void)
{
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE);
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
+ THIS_MODULE);
}
static void kvmppc_book3s_exit(void)
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index faf99f20d993..0b10503c8a4a 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -37,7 +37,7 @@
#define dprintk(X...) do { } while(0)
#endif
-#ifdef DEBUG_PTE
+#ifdef DEBUG_MMU_PTE
#define dprintk_pte(X...) printk(KERN_INFO X)
#else
#define dprintk_pte(X...) do { } while(0)
@@ -45,6 +45,9 @@
#define PTEG_FLAG_ACCESSED 0x00000100
#define PTEG_FLAG_DIRTY 0x00000080
+#ifndef SID_SHIFT
+#define SID_SHIFT 28
+#endif
static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
{
@@ -57,6 +60,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data);
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+ u64 *vsid);
static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
{
@@ -66,13 +71,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
bool data)
{
- struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr);
+ u64 vsid;
struct kvmppc_pte pte;
if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
return pte.vpage;
- return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16);
+ kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+ return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
}
static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
@@ -142,8 +148,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
bat->bepi_mask);
}
if ((eaddr & bat->bepi_mask) == bat->bepi) {
+ u64 vsid;
+ kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
+ eaddr >> SID_SHIFT, &vsid);
+ vsid <<= 16;
+ pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
+
pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
- pte->vpage = (eaddr >> 12) | VSID_BAT;
pte->may_read = bat->pp;
pte->may_write = bat->pp > 1;
pte->may_execute = true;
@@ -172,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_sr *sre;
hva_t ptegp;
u32 pteg[16];
- u64 ptem = 0;
+ u32 ptem = 0;
int i;
int found = 0;
@@ -302,6 +313,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
/* And then put in the new SR */
sre->raw = value;
sre->vsid = (value & 0x0fffffff);
+ sre->valid = (value & 0x80000000) ? false : true;
sre->Ks = (value & 0x40000000) ? true : false;
sre->Kp = (value & 0x20000000) ? true : false;
sre->nx = (value & 0x10000000) ? true : false;
@@ -312,36 +324,48 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
{
- kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
}
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_sr *sr;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ sr = find_sr(to_book3s(vcpu), ea);
+ if (sr->valid)
+ gvsid = sr->vsid;
+ }
+
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
high memory) */
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea;
- ea = esid << SID_SHIFT;
- *vsid = find_sr(to_book3s(vcpu), ea)->vsid;
+ if (!sr->valid)
+ return -1;
+
+ *vsid = sr->vsid;
break;
- }
default:
BUG();
}
+ if (vcpu->arch.msr & MSR_PR)
+ *vsid |= VSID_PR;
+
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
new file mode 100644
index 000000000000..0bb66005338f
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash32.h>
+#include <asm/machdep.h>
+#include <asm/mmu_context.h>
+#include <asm/hw_irq.h>
+
+/* #define DEBUG_MMU */
+/* #define DEBUG_SR */
+
+#ifdef DEBUG_MMU
+#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_mmu(a, ...) do { } while(0)
+#endif
+
+#ifdef DEBUG_SR
+#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_sr(a, ...) do { } while(0)
+#endif
+
+#if PAGE_SHIFT != 12
+#error Unknown page size
+#endif
+
+#ifdef CONFIG_SMP
+#error XXX need to grab mmu_hash_lock
+#endif
+
+#ifdef CONFIG_PTE_64BIT
+#error Only 32 bit pages are supported for now
+#endif
+
+static ulong htab;
+static u32 htabmask;
+
+static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
+{
+ volatile u32 *pteg;
+
+ dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n",
+ pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+
+ pteg = (u32*)pte->slot;
+
+ pteg[0] = 0;
+ asm volatile ("sync");
+ asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
+ asm volatile ("sync");
+ asm volatile ("tlbsync");
+
+ pte->host_va = 0;
+
+ if (pte->pte.may_write)
+ kvm_release_pfn_dirty(pte->pfn);
+ else
+ kvm_release_pfn_clean(pte->pfn);
+}
+
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
+ vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ guest_ea &= ea_mask;
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.eaddr & ea_mask) == guest_ea) {
+ invalidate_pte(vcpu, pte);
+ }
+ }
+
+ /* Doing a complete flush -> start from scratch */
+ if (!ea_mask)
+ vcpu->arch.hpte_cache_offset = 0;
+}
+
+void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
+ vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ guest_vp &= vp_mask;
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.vpage & vp_mask) == guest_vp) {
+ invalidate_pte(vcpu, pte);
+ }
+ }
+}
+
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
+ vcpu->arch.hpte_cache_offset, pa_start, pa_end);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.raddr >= pa_start) &&
+ (pte->pte.raddr < pa_end)) {
+ invalidate_pte(vcpu, pte);
+ }
+ }
+}
+
+struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
+{
+ int i;
+ u64 guest_vp;
+
+ guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
+ for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if (pte->pte.vpage == guest_vp)
+ return &pte->pte;
+ }
+
+ return NULL;
+}
+
+static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return vcpu->arch.hpte_cache_offset++;
+}
+
+/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
+ * a hash, so we don't waste cycles on looping */
+static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
+}
+
+
+static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ struct kvmppc_sid_map *map;
+ u16 sid_map_mask;
+
+ if (vcpu->arch.msr & MSR_PR)
+ gvsid |= VSID_PR;
+
+ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
+ map = &to_book3s(vcpu)->sid_map[sid_map_mask];
+ if (map->guest_vsid == gvsid) {
+ dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
+ gvsid, map->host_vsid);
+ return map;
+ }
+
+ map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
+ if (map->guest_vsid == gvsid) {
+ dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
+ gvsid, map->host_vsid);
+ return map;
+ }
+
+ dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
+ return NULL;
+}
+
+static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
+ bool primary)
+{
+ u32 page, hash;
+ ulong pteg = htab;
+
+ page = (eaddr & ~ESID_MASK) >> 12;
+
+ hash = ((vsid ^ page) << 6);
+ if (!primary)
+ hash = ~hash;
+
+ hash &= htabmask;
+
+ pteg |= hash;
+
+ dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
+ htab, hash, htabmask, pteg);
+
+ return (u32*)pteg;
+}
+
+extern char etext[];
+
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
+{
+ pfn_t hpaddr;
+ u64 va;
+ u64 vsid;
+ struct kvmppc_sid_map *map;
+ volatile u32 *pteg;
+ u32 eaddr = orig_pte->eaddr;
+ u32 pteg0, pteg1;
+ register int rr = 0;
+ bool primary = false;
+ bool evict = false;
+ int hpte_id;
+ struct hpte_cache *pte;
+
+ /* Get host physical address for gpa */
+ hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+ if (kvm_is_error_hva(hpaddr)) {
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
+ orig_pte->eaddr);
+ return -EINVAL;
+ }
+ hpaddr <<= PAGE_SHIFT;
+
+ /* and write the mapping ea -> hpa into the pt */
+ vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
+ map = find_sid_vsid(vcpu, vsid);
+ if (!map) {
+ kvmppc_mmu_map_segment(vcpu, eaddr);
+ map = find_sid_vsid(vcpu, vsid);
+ }
+ BUG_ON(!map);
+
+ vsid = map->host_vsid;
+ va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
+
+next_pteg:
+ if (rr == 16) {
+ primary = !primary;
+ evict = true;
+ rr = 0;
+ }
+
+ pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
+
+ /* not evicting yet */
+ if (!evict && (pteg[rr] & PTE_V)) {
+ rr += 2;
+ goto next_pteg;
+ }
+
+ dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
+
+ pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
+ (primary ? 0 : PTE_SEC);
+ pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
+
+ if (orig_pte->may_write) {
+ pteg1 |= PP_RWRW;
+ mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+ } else {
+ pteg1 |= PP_RWRX;
+ }
+
+ local_irq_disable();
+
+ if (pteg[rr]) {
+ pteg[rr] = 0;
+ asm volatile ("sync");
+ }
+ pteg[rr + 1] = pteg1;
+ pteg[rr] = pteg0;
+ asm volatile ("sync");
+
+ local_irq_enable();
+
+ dprintk_mmu("KVM: new PTEG: %p\n", pteg);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
+ dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
+
+
+ /* Now tell our Shadow PTE code about the new page */
+
+ hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
+ pte = &vcpu->arch.hpte_cache[hpte_id];
+
+ dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
+ orig_pte->may_write ? 'w' : '-',
+ orig_pte->may_execute ? 'x' : '-',
+ orig_pte->eaddr, (ulong)pteg, va,
+ orig_pte->vpage, hpaddr);
+
+ pte->slot = (ulong)&pteg[rr];
+ pte->host_va = va;
+ pte->pte = *orig_pte;
+ pte->pfn = hpaddr >> PAGE_SHIFT;
+
+ return 0;
+}
+
+static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ struct kvmppc_sid_map *map;
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ u16 sid_map_mask;
+ static int backwards_map = 0;
+
+ if (vcpu->arch.msr & MSR_PR)
+ gvsid |= VSID_PR;
+
+ /* We might get collisions that trap in preceding order, so let's
+ map them differently */
+
+ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
+ if (backwards_map)
+ sid_map_mask = SID_MAP_MASK - sid_map_mask;
+
+ map = &to_book3s(vcpu)->sid_map[sid_map_mask];
+
+ /* Make sure we're taking the other map next time */
+ backwards_map = !backwards_map;
+
+ /* Uh-oh ... out of mappings. Let's flush! */
+ if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) {
+ vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+ memset(vcpu_book3s->sid_map, 0,
+ sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ kvmppc_mmu_flush_segments(vcpu);
+ }
+ map->host_vsid = vcpu_book3s->vsid_next;
+
+ /* Would have to be 111 to be completely aligned with the rest of
+ Linux, but that is just way too little space! */
+ vcpu_book3s->vsid_next+=1;
+
+ map->guest_vsid = gvsid;
+ map->valid = true;
+
+ return map;
+}
+
+int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
+{
+ u32 esid = eaddr >> SID_SHIFT;
+ u64 gvsid;
+ u32 sr;
+ struct kvmppc_sid_map *map;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+
+ if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
+ /* Invalidate an entry */
+ svcpu->sr[esid] = SR_INVALID;
+ return -ENOENT;
+ }
+
+ map = find_sid_vsid(vcpu, gvsid);
+ if (!map)
+ map = create_sid_map(vcpu, gvsid);
+
+ map->guest_esid = esid;
+ sr = map->host_vsid | SR_KP;
+ svcpu->sr[esid] = sr;
+
+ dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
+
+ return 0;
+}
+
+void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+
+ dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
+ for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
+ svcpu->sr[i] = SR_INVALID;
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ preempt_disable();
+ __destroy_context(to_book3s(vcpu)->context_id);
+ preempt_enable();
+}
+
+/* From mm/mmu_context_hash32.c */
+#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff)
+
+int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int err;
+ ulong sdr1;
+
+ err = __init_new_context();
+ if (err < 0)
+ return -1;
+ vcpu3s->context_id = err;
+
+ vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
+ vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
+
+#if 0 /* XXX still doesn't guarantee uniqueness */
+ /* We could collide with the Linux vsid space because the vsid
+ * wraps around at 24 bits. We're safe if we do our own space
+ * though, so let's always set the highest bit. */
+
+ vcpu3s->vsid_max |= 0x00800000;
+ vcpu3s->vsid_first |= 0x00800000;
+#endif
+ BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first);
+
+ vcpu3s->vsid_next = vcpu3s->vsid_first;
+
+ /* Remember where the HTAB is */
+ asm ( "mfsdr1 %0" : "=r"(sdr1) );
+ htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
+ htab = (ulong)__va(sdr1 & 0xffff0000);
+
+ return 0;
+}
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
new file mode 100644
index 000000000000..3608471ad2d8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+.macro LOAD_GUEST_SEGMENTS
+
+ /* Required state:
+ *
+ * MSR = ~IR|DR
+ * R1 = host R1
+ * R2 = host R2
+ * R3 = shadow vcpu
+ * all other volatile GPRS = free
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
+ */
+
+#define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \
+ mtsr n, r9
+
+ XCHG_SR(0)
+ XCHG_SR(1)
+ XCHG_SR(2)
+ XCHG_SR(3)
+ XCHG_SR(4)
+ XCHG_SR(5)
+ XCHG_SR(6)
+ XCHG_SR(7)
+ XCHG_SR(8)
+ XCHG_SR(9)
+ XCHG_SR(10)
+ XCHG_SR(11)
+ XCHG_SR(12)
+ XCHG_SR(13)
+ XCHG_SR(14)
+ XCHG_SR(15)
+
+ /* Clear BATs. */
+
+#define KVM_KILL_BAT(n, reg) \
+ mtspr SPRN_IBAT##n##U,reg; \
+ mtspr SPRN_IBAT##n##L,reg; \
+ mtspr SPRN_DBAT##n##U,reg; \
+ mtspr SPRN_DBAT##n##L,reg; \
+
+ li r9, 0
+ KVM_KILL_BAT(0, r9)
+ KVM_KILL_BAT(1, r9)
+ KVM_KILL_BAT(2, r9)
+ KVM_KILL_BAT(3, r9)
+
+.endm
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+.macro LOAD_HOST_SEGMENTS
+
+ /* Register usage at this point:
+ *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF
+ * SVCPU.* = guest *
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
+ *
+ */
+
+ /* Restore BATs */
+
+ /* We only overwrite the upper part, so we only restoree
+ the upper part. */
+#define KVM_LOAD_BAT(n, reg, RA, RB) \
+ lwz RA,(n*16)+0(reg); \
+ lwz RB,(n*16)+4(reg); \
+ mtspr SPRN_IBAT##n##U,RA; \
+ mtspr SPRN_IBAT##n##L,RB; \
+ lwz RA,(n*16)+8(reg); \
+ lwz RB,(n*16)+12(reg); \
+ mtspr SPRN_DBAT##n##U,RA; \
+ mtspr SPRN_DBAT##n##L,RB; \
+
+ lis r9, BATS@ha
+ addi r9, r9, BATS@l
+ tophys(r9, r9)
+ KVM_LOAD_BAT(0, r9, r10, r11)
+ KVM_LOAD_BAT(1, r9, r10, r11)
+ KVM_LOAD_BAT(2, r9, r10, r11)
+ KVM_LOAD_BAT(3, r9, r10, r11)
+
+ /* Restore Segment Registers */
+
+ /* 0xc - 0xf */
+
+ li r0, 4
+ mtctr r0
+ LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
+ lis r4, 0xc000
+3: mtsrin r3, r4
+ addi r3, r3, 0x111 /* increment VSID */
+ addis r4, r4, 0x1000 /* address of next segment */
+ bdnz 3b
+
+ /* 0x0 - 0xb */
+
+ /* 'current->mm' needs to be in r4 */
+ tophys(r4, r2)
+ lwz r4, MM(r4)
+ tophys(r4, r4)
+ /* This only clobbers r0, r3, r4 and r5 */
+ bl switch_mmu_context
+
+.endm
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 512dcff77554..4025ea26b3c1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -232,7 +232,7 @@ do_second:
}
dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
- "-> 0x%llx\n",
+ "-> 0x%lx\n",
eaddr, avpn, gpte->vpage, gpte->raddr);
found = true;
break;
@@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
if (vcpu->arch.msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
- kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
}
@@ -439,37 +439,43 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
}
-static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_slb *slb;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ if (slb)
+ gvsid = slb->vsid;
+ }
+
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea;
- struct kvmppc_slb *slb;
- ea = esid << SID_SHIFT;
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
- if (slb)
- *vsid = slb->vsid;
- else
+ if (!slb)
return -ENOENT;
+ *vsid = gvsid;
break;
- }
default:
BUG();
break;
}
+ if (vcpu->arch.msr & MSR_PR)
+ *vsid |= VSID_PR;
+
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index f2899b297ffd..e4b5744977f6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -48,21 +48,25 @@
static void invalidate_pte(struct hpte_cache *pte)
{
- dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
- i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+ dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
+ pte->pte.eaddr, pte->pte.vpage, pte->host_va);
ppc_md.hpte_invalidate(pte->slot, pte->host_va,
MMU_PAGE_4K, MMU_SEGSIZE_256M,
false);
pte->host_va = 0;
- kvm_release_pfn_dirty(pte->pfn);
+
+ if (pte->pte.may_write)
+ kvm_release_pfn_dirty(pte->pfn);
+ else
+ kvm_release_pfn_clean(pte->pfn);
}
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
int i;
- dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
+ dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
@@ -106,12 +110,12 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
}
}
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
int i;
- dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
- vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
+ dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
+ vcpu->arch.hpte_cache_offset, pa_start, pa_end);
BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
@@ -182,7 +186,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
if (map->guest_vsid == gvsid) {
- dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
+ dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
gvsid, map->host_vsid);
return map;
}
@@ -194,7 +198,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
return map;
}
- dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid);
+ dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
+ sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
return NULL;
}
@@ -212,7 +217,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
return -EINVAL;
}
hpaddr <<= PAGE_SHIFT;
@@ -227,10 +232,16 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
map = find_sid_vsid(vcpu, vsid);
if (!map) {
- kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
+ ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
+ WARN_ON(ret < 0);
map = find_sid_vsid(vcpu, vsid);
}
- BUG_ON(!map);
+ if (!map) {
+ printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
+ vsid, orig_pte->eaddr);
+ WARN_ON(true);
+ return -EINVAL;
+ }
vsid = map->host_vsid;
va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
@@ -257,26 +268,26 @@ map_again:
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
-#ifdef USE_SECONDARY
hash = ~hash;
+ vflags ^= HPTE_V_SECONDARY;
attempt++;
- if (attempt % 2)
- vflags = HPTE_V_SECONDARY;
- else
- vflags = 0;
-#else
- attempt = 2;
-#endif
goto map_again;
} else {
int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
- dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n",
+ dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
(rflags & HPTE_R_N) ? '-' : 'x',
orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
+ /* The ppc_md code may give us a secondary entry even though we
+ asked for a primary. Fix up. */
+ if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
+ hash = ~hash;
+ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+ }
+
pte->slot = hpteg + (ret & 7);
pte->host_va = va;
pte->pte = *orig_pte;
@@ -321,6 +332,9 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
map->guest_vsid = gvsid;
map->valid = true;
+ dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
+ sid_map_mask, gvsid, map->host_vsid);
+
return map;
}
@@ -331,14 +345,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
int found_inval = -1;
int r;
- if (!get_paca()->kvm_slb_max)
- get_paca()->kvm_slb_max = 1;
+ if (!to_svcpu(vcpu)->slb_max)
+ to_svcpu(vcpu)->slb_max = 1;
/* Are we overwriting? */
- for (i = 1; i < get_paca()->kvm_slb_max; i++) {
- if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
+ for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
+ if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
found_inval = i;
- else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
+ else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
return i;
}
@@ -352,11 +366,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
max_slb_size = mmu_slb_size;
/* Overflowing -> purge */
- if ((get_paca()->kvm_slb_max) == max_slb_size)
+ if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
kvmppc_mmu_flush_segments(vcpu);
- r = get_paca()->kvm_slb_max;
- get_paca()->kvm_slb_max++;
+ r = to_svcpu(vcpu)->slb_max;
+ to_svcpu(vcpu)->slb_max++;
return r;
}
@@ -374,7 +388,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
/* Invalidate an entry */
- get_paca()->kvm_slb[slb_index].esid = 0;
+ to_svcpu(vcpu)->slb[slb_index].esid = 0;
return -ENOENT;
}
@@ -388,8 +402,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index;
- get_paca()->kvm_slb[slb_index].esid = slb_esid;
- get_paca()->kvm_slb[slb_index].vsid = slb_vsid;
+ to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
+ to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
@@ -398,11 +412,29 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
- get_paca()->kvm_slb_max = 1;
- get_paca()->kvm_slb[0].esid = 0;
+ to_svcpu(vcpu)->slb_max = 1;
+ to_svcpu(vcpu)->slb[0].esid = 0;
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ __destroy_context(to_book3s(vcpu)->context_id);
+}
+
+int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int err;
+
+ err = __init_new_context();
+ if (err < 0)
+ return -1;
+ vcpu3s->context_id = err;
+
+ vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
+ vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
+ vcpu3s->vsid_next = vcpu3s->vsid_first;
+
+ return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 35b762722187..04e7d3bbfe8b 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -44,8 +44,7 @@ slb_exit_skip_ ## num:
* *
*****************************************************************************/
-.global kvmppc_handler_trampoline_enter
-kvmppc_handler_trampoline_enter:
+.macro LOAD_GUEST_SEGMENTS
/* Required state:
*
@@ -53,20 +52,14 @@ kvmppc_handler_trampoline_enter:
* R13 = PACA
* R1 = host R1
* R2 = host R2
- * R9 = guest IP
- * R10 = guest MSR
- * all other GPRS = free
- * PACA[KVM_CR] = guest CR
- * PACA[KVM_XER] = guest XER
+ * R3 = shadow vcpu
+ * all other volatile GPRS = free
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
*/
- mtsrr0 r9
- mtsrr1 r10
-
- /* Activate guest mode, so faults get handled by KVM */
- li r11, KVM_GUEST_MODE_GUEST
- stb r11, PACA_KVM_IN_GUEST(r13)
-
/* Remove LPAR shadow entries */
#if SLB_NUM_BOLTED == 3
@@ -101,14 +94,14 @@ kvmppc_handler_trampoline_enter:
/* Fill SLB with our shadow */
- lbz r12, PACA_KVM_SLB_MAX(r13)
+ lbz r12, SVCPU_SLB_MAX(r3)
mulli r12, r12, 16
- addi r12, r12, PACA_KVM_SLB
- add r12, r12, r13
+ addi r12, r12, SVCPU_SLB
+ add r12, r12, r3
/* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
- li r11, PACA_KVM_SLB
- add r11, r11, r13
+ li r11, SVCPU_SLB
+ add r11, r11, r3
slb_loop_enter:
@@ -127,34 +120,7 @@ slb_loop_enter_skip:
slb_do_enter:
- /* Enter guest */
-
- ld r0, (PACA_KVM_R0)(r13)
- ld r1, (PACA_KVM_R1)(r13)
- ld r2, (PACA_KVM_R2)(r13)
- ld r3, (PACA_KVM_R3)(r13)
- ld r4, (PACA_KVM_R4)(r13)
- ld r5, (PACA_KVM_R5)(r13)
- ld r6, (PACA_KVM_R6)(r13)
- ld r7, (PACA_KVM_R7)(r13)
- ld r8, (PACA_KVM_R8)(r13)
- ld r9, (PACA_KVM_R9)(r13)
- ld r10, (PACA_KVM_R10)(r13)
- ld r12, (PACA_KVM_R12)(r13)
-
- lwz r11, (PACA_KVM_CR)(r13)
- mtcr r11
-
- ld r11, (PACA_KVM_XER)(r13)
- mtxer r11
-
- ld r11, (PACA_KVM_R11)(r13)
- ld r13, (PACA_KVM_R13)(r13)
-
- RFI
-kvmppc_handler_trampoline_enter_end:
-
-
+.endm
/******************************************************************************
* *
@@ -162,99 +128,22 @@ kvmppc_handler_trampoline_enter_end:
* *
*****************************************************************************/
-.global kvmppc_handler_trampoline_exit
-kvmppc_handler_trampoline_exit:
+.macro LOAD_HOST_SEGMENTS
/* Register usage at this point:
*
- * SPRG_SCRATCH0 = guest R13
- * R12 = exit handler id
- * R13 = PACA
- * PACA.KVM.SCRATCH0 = guest R12
- * PACA.KVM.SCRATCH1 = guest CR
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * SVCPU.* = guest *
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
*
*/
- /* Save registers */
-
- std r0, PACA_KVM_R0(r13)
- std r1, PACA_KVM_R1(r13)
- std r2, PACA_KVM_R2(r13)
- std r3, PACA_KVM_R3(r13)
- std r4, PACA_KVM_R4(r13)
- std r5, PACA_KVM_R5(r13)
- std r6, PACA_KVM_R6(r13)
- std r7, PACA_KVM_R7(r13)
- std r8, PACA_KVM_R8(r13)
- std r9, PACA_KVM_R9(r13)
- std r10, PACA_KVM_R10(r13)
- std r11, PACA_KVM_R11(r13)
-
- /* Restore R1/R2 so we can handle faults */
- ld r1, PACA_KVM_HOST_R1(r13)
- ld r2, PACA_KVM_HOST_R2(r13)
-
- /* Save guest PC and MSR in GPRs */
- mfsrr0 r3
- mfsrr1 r4
-
- /* Get scratch'ed off registers */
- mfspr r9, SPRN_SPRG_SCRATCH0
- std r9, PACA_KVM_R13(r13)
-
- ld r8, PACA_KVM_SCRATCH0(r13)
- std r8, PACA_KVM_R12(r13)
-
- lwz r7, PACA_KVM_SCRATCH1(r13)
- stw r7, PACA_KVM_CR(r13)
-
- /* Save more register state */
-
- mfxer r6
- stw r6, PACA_KVM_XER(r13)
-
- mfdar r5
- mfdsisr r6
-
- /*
- * In order for us to easily get the last instruction,
- * we got the #vmexit at, we exploit the fact that the
- * virtual layout is still the same here, so we can just
- * ld from the guest's PC address
- */
-
- /* We only load the last instruction when it's safe */
- cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
- beq ld_last_inst
- cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
- beq ld_last_inst
-
- b no_ld_last_inst
-
-ld_last_inst:
- /* Save off the guest instruction we're at */
-
- /* Set guest mode to 'jump over instruction' so if lwz faults
- * we'll just continue at the next IP. */
- li r9, KVM_GUEST_MODE_SKIP
- stb r9, PACA_KVM_IN_GUEST(r13)
-
- /* 1) enable paging for data */
- mfmsr r9
- ori r11, r9, MSR_DR /* Enable paging for data */
- mtmsr r11
- /* 2) fetch the instruction */
- li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
- lwz r0, 0(r3)
- /* 3) disable paging again */
- mtmsr r9
-
-no_ld_last_inst:
-
- /* Unset guest mode */
- li r9, KVM_GUEST_MODE_NONE
- stb r9, PACA_KVM_IN_GUEST(r13)
-
/* Restore bolted entries from the shadow and fix it along the way */
/* We don't store anything in entry 0, so we don't need to take care of it */
@@ -275,28 +164,4 @@ no_ld_last_inst:
slb_do_exit:
- /* Register usage at this point:
- *
- * R0 = guest last inst
- * R1 = host R1
- * R2 = host R2
- * R3 = guest PC
- * R4 = guest MSR
- * R5 = guest DAR
- * R6 = guest DSISR
- * R12 = exit handler id
- * R13 = PACA
- * PACA.KVM.* = guest *
- *
- */
-
- /* RFI into the highmem handler */
- mfmsr r7
- ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
- mtsrr1 r7
- ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
- mtsrr0 r8
-
- RFI
-kvmppc_handler_trampoline_exit_end:
-
+.endm
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 2b0ee7e040c9..c85f906038ce 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -28,13 +28,16 @@
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_MTMSR 146
#define OP_31_XOP_MTMSRD 178
+#define OP_31_XOP_MTSR 210
#define OP_31_XOP_MTSRIN 242
#define OP_31_XOP_TLBIEL 274
#define OP_31_XOP_TLBIE 306
#define OP_31_XOP_SLBMTE 402
#define OP_31_XOP_SLBIE 434
#define OP_31_XOP_SLBIA 498
+#define OP_31_XOP_MFSR 595
#define OP_31_XOP_MFSRIN 659
+#define OP_31_XOP_DCBA 758
#define OP_31_XOP_SLBMFEV 851
#define OP_31_XOP_EIOIO 854
#define OP_31_XOP_SLBMFEE 915
@@ -42,6 +45,24 @@
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
#define OP_31_XOP_DCBZ 1010
+#define OP_LFS 48
+#define OP_LFD 50
+#define OP_STFS 52
+#define OP_STFD 54
+
+#define SPRN_GQR0 912
+#define SPRN_GQR1 913
+#define SPRN_GQR2 914
+#define SPRN_GQR3 915
+#define SPRN_GQR4 916
+#define SPRN_GQR5 917
+#define SPRN_GQR6 918
+#define SPRN_GQR7 919
+
+/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
+ * function pointers, so let's just disable the define. */
+#undef mfsrin
+
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
@@ -52,7 +73,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) {
case OP_19_XOP_RFID:
case OP_19_XOP_RFI:
- vcpu->arch.pc = vcpu->arch.srr0;
+ kvmppc_set_pc(vcpu, vcpu->arch.srr0);
kvmppc_set_msr(vcpu, vcpu->arch.srr1);
*advance = 0;
break;
@@ -80,6 +101,18 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case OP_31_XOP_MTMSR:
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
break;
+ case OP_31_XOP_MFSR:
+ {
+ int srnum;
+
+ srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
+ if (vcpu->arch.mmu.mfsrin) {
+ u32 sr;
+ sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
+ kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+ }
+ break;
+ }
case OP_31_XOP_MFSRIN:
{
int srnum;
@@ -92,6 +125,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
break;
}
+ case OP_31_XOP_MTSR:
+ vcpu->arch.mmu.mtsrin(vcpu,
+ (inst >> 16) & 0xf,
+ kvmppc_get_gpr(vcpu, get_rs(inst)));
+ break;
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
@@ -150,12 +188,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_set_gpr(vcpu, get_rt(inst), t);
}
break;
+ case OP_31_XOP_DCBA:
+ /* Gets treated as NOP */
+ break;
case OP_31_XOP_DCBZ:
{
ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
ulong ra = 0;
- ulong addr;
+ ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ u32 dsisr;
+ int r;
if (get_ra(inst))
ra = kvmppc_get_gpr(vcpu, get_ra(inst));
@@ -163,15 +206,25 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
addr = (ra + rb) & ~31ULL;
if (!(vcpu->arch.msr & MSR_SF))
addr &= 0xffffffff;
+ vaddr = addr;
+
+ r = kvmppc_st(vcpu, &addr, 32, zeros, true);
+ if ((r == -ENOENT) || (r == -EPERM)) {
+ *advance = 0;
+ vcpu->arch.dear = vaddr;
+ to_svcpu(vcpu)->fault_dar = vaddr;
+
+ dsisr = DSISR_ISSTORE;
+ if (r == -ENOENT)
+ dsisr |= DSISR_NOHPTE;
+ else if (r == -EPERM)
+ dsisr |= DSISR_PROTFAULT;
+
+ to_book3s(vcpu)->dsisr = dsisr;
+ to_svcpu(vcpu)->fault_dsisr = dsisr;
- if (kvmppc_st(vcpu, addr, 32, zeros)) {
- vcpu->arch.dear = addr;
- vcpu->arch.fault_dear = addr;
- to_book3s(vcpu)->dsisr = DSISR_PROTFAULT |
- DSISR_ISSTORE;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE);
- kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL);
}
break;
@@ -184,6 +237,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = EMULATE_FAIL;
}
+ if (emulated == EMULATE_FAIL)
+ emulated = kvmppc_emulate_paired_single(run, vcpu);
+
return emulated;
}
@@ -207,6 +263,34 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
}
}
+static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_bat *bat;
+
+ switch (sprn) {
+ case SPRN_IBAT0U ... SPRN_IBAT3L:
+ bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
+ break;
+ case SPRN_IBAT4U ... SPRN_IBAT7L:
+ bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
+ break;
+ case SPRN_DBAT0U ... SPRN_DBAT3L:
+ bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
+ break;
+ case SPRN_DBAT4U ... SPRN_DBAT7L:
+ bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
+ break;
+ default:
+ BUG();
+ }
+
+ if (sprn % 2)
+ return bat->raw >> 32;
+ else
+ return bat->raw;
+}
+
static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -217,13 +301,13 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
break;
case SPRN_IBAT4U ... SPRN_IBAT7L:
- bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2];
+ bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
break;
case SPRN_DBAT0U ... SPRN_DBAT3L:
bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
break;
case SPRN_DBAT4U ... SPRN_DBAT7L:
- bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2];
+ bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
break;
default:
BUG();
@@ -258,6 +342,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
/* BAT writes happen so rarely that we're ok to flush
* everything here */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ kvmppc_mmu_flush_segments(vcpu);
break;
case SPRN_HID0:
to_book3s(vcpu)->hid[0] = spr_val;
@@ -268,7 +353,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_HID2:
to_book3s(vcpu)->hid[2] = spr_val;
break;
+ case SPRN_HID2_GEKKO:
+ to_book3s(vcpu)->hid[2] = spr_val;
+ /* HID2.PSE controls paired single on gekko */
+ switch (vcpu->arch.pvr) {
+ case 0x00080200: /* lonestar 2.0 */
+ case 0x00088202: /* lonestar 2.2 */
+ case 0x70000100: /* gekko 1.0 */
+ case 0x00080100: /* gekko 2.0 */
+ case 0x00083203: /* gekko 2.3a */
+ case 0x00083213: /* gekko 2.3b */
+ case 0x00083204: /* gekko 2.4 */
+ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
+ case 0x00087200: /* broadway */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
+ /* Native paired singles */
+ } else if (spr_val & (1 << 29)) { /* HID2.PSE */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ } else {
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
+ }
+ break;
+ }
+ break;
case SPRN_HID4:
+ case SPRN_HID4_GEKKO:
to_book3s(vcpu)->hid[4] = spr_val;
break;
case SPRN_HID5:
@@ -278,12 +388,30 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
(mfmsr() & MSR_HV))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
break;
+ case SPRN_GQR0:
+ case SPRN_GQR1:
+ case SPRN_GQR2:
+ case SPRN_GQR3:
+ case SPRN_GQR4:
+ case SPRN_GQR5:
+ case SPRN_GQR6:
+ case SPRN_GQR7:
+ to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
+ break;
case SPRN_ICTC:
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
+ case SPRN_L2CR:
+ case SPRN_MMCR0_GEKKO:
+ case SPRN_MMCR1_GEKKO:
+ case SPRN_PMC1_GEKKO:
+ case SPRN_PMC2_GEKKO:
+ case SPRN_PMC3_GEKKO:
+ case SPRN_PMC4_GEKKO:
+ case SPRN_WPAR_GEKKO:
break;
default:
printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
@@ -301,6 +429,12 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
int emulated = EMULATE_DONE;
switch (sprn) {
+ case SPRN_IBAT0U ... SPRN_IBAT3L:
+ case SPRN_IBAT4U ... SPRN_IBAT7L:
+ case SPRN_DBAT0U ... SPRN_DBAT3L:
+ case SPRN_DBAT4U ... SPRN_DBAT7L:
+ kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn));
+ break;
case SPRN_SDR1:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
break;
@@ -320,19 +454,40 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
break;
case SPRN_HID2:
+ case SPRN_HID2_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
break;
case SPRN_HID4:
+ case SPRN_HID4_GEKKO:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
break;
case SPRN_HID5:
kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
break;
+ case SPRN_GQR0:
+ case SPRN_GQR1:
+ case SPRN_GQR2:
+ case SPRN_GQR3:
+ case SPRN_GQR4:
+ case SPRN_GQR5:
+ case SPRN_GQR6:
+ case SPRN_GQR7:
+ kvmppc_set_gpr(vcpu, rt,
+ to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
+ break;
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
+ case SPRN_L2CR:
+ case SPRN_MMCR0_GEKKO:
+ case SPRN_MMCR1_GEKKO:
+ case SPRN_PMC1_GEKKO:
+ case SPRN_PMC2_GEKKO:
+ case SPRN_PMC3_GEKKO:
+ case SPRN_PMC4_GEKKO:
+ case SPRN_WPAR_GEKKO:
kvmppc_set_gpr(vcpu, rt, 0);
break;
default:
@@ -346,3 +501,73 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
return emulated;
}
+u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
+{
+ u32 dsisr = 0;
+
+ /*
+ * This is what the spec says about DSISR bits (not mentioned = 0):
+ *
+ * 12:13 [DS] Set to bits 30:31
+ * 15:16 [X] Set to bits 29:30
+ * 17 [X] Set to bit 25
+ * [D/DS] Set to bit 5
+ * 18:21 [X] Set to bits 21:24
+ * [D/DS] Set to bits 1:4
+ * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS)
+ * 27:31 Set to bits 11:15 (RA)
+ */
+
+ switch (get_op(inst)) {
+ /* D-form */
+ case OP_LFS:
+ case OP_LFD:
+ case OP_STFD:
+ case OP_STFS:
+ dsisr |= (inst >> 12) & 0x4000; /* bit 17 */
+ dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
+ break;
+ /* X-form */
+ case 31:
+ dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
+ dsisr |= (inst << 8) & 0x04000; /* bit 17 */
+ dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */
+ break;
+ default:
+ printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
+ break;
+ }
+
+ dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
+
+ return dsisr;
+}
+
+ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
+{
+ ulong dar = 0;
+ ulong ra;
+
+ switch (get_op(inst)) {
+ case OP_LFS:
+ case OP_LFD:
+ case OP_STFD:
+ case OP_STFS:
+ ra = get_ra(inst);
+ if (ra)
+ dar = kvmppc_get_gpr(vcpu, ra);
+ dar += (s32)((s16)inst);
+ break;
+ case 31:
+ ra = get_ra(inst);
+ if (ra)
+ dar = kvmppc_get_gpr(vcpu, ra);
+ dar += kvmppc_get_gpr(vcpu, get_rb(inst));
+ break;
+ default:
+ printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
+ break;
+ }
+
+ return dar;
+}
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0d..1dd5a1ddfd0d 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c1584d0cbce8..2f0bc928b08a 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -24,36 +24,56 @@
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
-#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
-#define ULONG_SIZE 8
-#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+#if defined(CONFIG_PPC_BOOK3S_64)
-.macro DISABLE_INTERRUPTS
- mfmsr r0
- rldicl r0,r0,48,1
- rotldi r0,r0,16
- mtmsrd r0,1
-.endm
+#define ULONG_SIZE 8
+#define FUNC(name) GLUE(.,name)
+#define GET_SHADOW_VCPU(reg) \
+ addi reg, r13, PACA_KVM_SVCPU
+
+#define DISABLE_INTERRUPTS \
+ mfmsr r0; \
+ rldicl r0,r0,48,1; \
+ rotldi r0,r0,16; \
+ mtmsrd r0,1; \
+
+#elif defined(CONFIG_PPC_BOOK3S_32)
+
+#define ULONG_SIZE 4
+#define FUNC(name) name
+
+#define GET_SHADOW_VCPU(reg) \
+ lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
+
+#define DISABLE_INTERRUPTS \
+ mfmsr r0; \
+ rlwinm r0,r0,0,17,15; \
+ mtmsr r0; \
+
+#endif /* CONFIG_PPC_BOOK3S_XX */
+
+
+#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
#define VCPU_LOAD_NVGPRS(vcpu) \
- ld r14, VCPU_GPR(r14)(vcpu); \
- ld r15, VCPU_GPR(r15)(vcpu); \
- ld r16, VCPU_GPR(r16)(vcpu); \
- ld r17, VCPU_GPR(r17)(vcpu); \
- ld r18, VCPU_GPR(r18)(vcpu); \
- ld r19, VCPU_GPR(r19)(vcpu); \
- ld r20, VCPU_GPR(r20)(vcpu); \
- ld r21, VCPU_GPR(r21)(vcpu); \
- ld r22, VCPU_GPR(r22)(vcpu); \
- ld r23, VCPU_GPR(r23)(vcpu); \
- ld r24, VCPU_GPR(r24)(vcpu); \
- ld r25, VCPU_GPR(r25)(vcpu); \
- ld r26, VCPU_GPR(r26)(vcpu); \
- ld r27, VCPU_GPR(r27)(vcpu); \
- ld r28, VCPU_GPR(r28)(vcpu); \
- ld r29, VCPU_GPR(r29)(vcpu); \
- ld r30, VCPU_GPR(r30)(vcpu); \
- ld r31, VCPU_GPR(r31)(vcpu); \
+ PPC_LL r14, VCPU_GPR(r14)(vcpu); \
+ PPC_LL r15, VCPU_GPR(r15)(vcpu); \
+ PPC_LL r16, VCPU_GPR(r16)(vcpu); \
+ PPC_LL r17, VCPU_GPR(r17)(vcpu); \
+ PPC_LL r18, VCPU_GPR(r18)(vcpu); \
+ PPC_LL r19, VCPU_GPR(r19)(vcpu); \
+ PPC_LL r20, VCPU_GPR(r20)(vcpu); \
+ PPC_LL r21, VCPU_GPR(r21)(vcpu); \
+ PPC_LL r22, VCPU_GPR(r22)(vcpu); \
+ PPC_LL r23, VCPU_GPR(r23)(vcpu); \
+ PPC_LL r24, VCPU_GPR(r24)(vcpu); \
+ PPC_LL r25, VCPU_GPR(r25)(vcpu); \
+ PPC_LL r26, VCPU_GPR(r26)(vcpu); \
+ PPC_LL r27, VCPU_GPR(r27)(vcpu); \
+ PPC_LL r28, VCPU_GPR(r28)(vcpu); \
+ PPC_LL r29, VCPU_GPR(r29)(vcpu); \
+ PPC_LL r30, VCPU_GPR(r30)(vcpu); \
+ PPC_LL r31, VCPU_GPR(r31)(vcpu); \
/*****************************************************************************
* *
@@ -69,11 +89,11 @@ _GLOBAL(__kvmppc_vcpu_entry)
kvm_start_entry:
/* Write correct stack frame */
- mflr r0
- std r0,16(r1)
+ mflr r0
+ PPC_STL r0,PPC_LR_STKOFF(r1)
/* Save host state to the stack */
- stdu r1, -SWITCH_FRAME_SIZE(r1)
+ PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
/* Save r3 (kvm_run) and r4 (vcpu) */
SAVE_2GPRS(3, r1)
@@ -82,33 +102,28 @@ kvm_start_entry:
SAVE_NVGPRS(r1)
/* Save LR */
- std r0, _LINK(r1)
+ PPC_STL r0, _LINK(r1)
/* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4)
+ GET_SHADOW_VCPU(r5)
+
/* Save R1/R2 in the PACA */
- std r1, PACA_KVM_HOST_R1(r13)
- std r2, PACA_KVM_HOST_R2(r13)
+ PPC_STL r1, SVCPU_HOST_R1(r5)
+ PPC_STL r2, SVCPU_HOST_R2(r5)
/* XXX swap in/out on load? */
- ld r3, VCPU_HIGHMEM_HANDLER(r4)
- std r3, PACA_KVM_VMHANDLER(r13)
+ PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
+ PPC_STL r3, SVCPU_VMHANDLER(r5)
kvm_start_lightweight:
- ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
- ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
-
- /* Load some guest state in the respective registers */
- ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
- /* will be swapped in by rmcall */
-
- ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
- mtlr r3 /* LR = r3 */
+ PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
DISABLE_INTERRUPTS
+#ifdef CONFIG_PPC_BOOK3S_64
/* Some guests may need to have dcbz set to 32 byte length.
*
* Usually we ensure that by patching the guest's instructions
@@ -118,7 +133,7 @@ kvm_start_lightweight:
* because that's a lot faster.
*/
- ld r3, VCPU_HFLAGS(r4)
+ PPC_LL r3, VCPU_HFLAGS(r4)
rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
beq no_dcbz32_on
@@ -128,13 +143,15 @@ kvm_start_lightweight:
no_dcbz32_on:
- ld r6, VCPU_RMCALL(r4)
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+ PPC_LL r6, VCPU_RMCALL(r4)
mtctr r6
- ld r3, VCPU_TRAMPOLINE_ENTER(r4)
+ PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
- /* Jump to SLB patching handlder and into our guest */
+ /* Jump to segment patching handler and into our guest */
bctr
/*
@@ -149,31 +166,20 @@ kvmppc_handler_highmem:
/*
* Register usage at this point:
*
- * R0 = guest last inst
- * R1 = host R1
- * R2 = host R2
- * R3 = guest PC
- * R4 = guest MSR
- * R5 = guest DAR
- * R6 = guest DSISR
- * R13 = PACA
- * PACA.KVM.* = guest *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = PACA
+ * SVCPU.* = guest *
*
*/
/* R7 = vcpu */
- ld r7, GPR4(r1)
+ PPC_LL r7, GPR4(r1)
- /* Now save the guest state */
+#ifdef CONFIG_PPC_BOOK3S_64
- stw r0, VCPU_LAST_INST(r7)
-
- std r3, VCPU_PC(r7)
- std r4, VCPU_SHADOW_SRR1(r7)
- std r5, VCPU_FAULT_DEAR(r7)
- std r6, VCPU_FAULT_DSISR(r7)
-
- ld r5, VCPU_HFLAGS(r7)
+ PPC_LL r5, VCPU_HFLAGS(r7)
rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
beq no_dcbz32_off
@@ -184,35 +190,29 @@ kvmppc_handler_highmem:
no_dcbz32_off:
- std r14, VCPU_GPR(r14)(r7)
- std r15, VCPU_GPR(r15)(r7)
- std r16, VCPU_GPR(r16)(r7)
- std r17, VCPU_GPR(r17)(r7)
- std r18, VCPU_GPR(r18)(r7)
- std r19, VCPU_GPR(r19)(r7)
- std r20, VCPU_GPR(r20)(r7)
- std r21, VCPU_GPR(r21)(r7)
- std r22, VCPU_GPR(r22)(r7)
- std r23, VCPU_GPR(r23)(r7)
- std r24, VCPU_GPR(r24)(r7)
- std r25, VCPU_GPR(r25)(r7)
- std r26, VCPU_GPR(r26)(r7)
- std r27, VCPU_GPR(r27)(r7)
- std r28, VCPU_GPR(r28)(r7)
- std r29, VCPU_GPR(r29)(r7)
- std r30, VCPU_GPR(r30)(r7)
- std r31, VCPU_GPR(r31)(r7)
-
- /* Save guest CTR */
- mfctr r5
- std r5, VCPU_CTR(r7)
-
- /* Save guest LR */
- mflr r5
- std r5, VCPU_LR(r7)
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+ PPC_STL r14, VCPU_GPR(r14)(r7)
+ PPC_STL r15, VCPU_GPR(r15)(r7)
+ PPC_STL r16, VCPU_GPR(r16)(r7)
+ PPC_STL r17, VCPU_GPR(r17)(r7)
+ PPC_STL r18, VCPU_GPR(r18)(r7)
+ PPC_STL r19, VCPU_GPR(r19)(r7)
+ PPC_STL r20, VCPU_GPR(r20)(r7)
+ PPC_STL r21, VCPU_GPR(r21)(r7)
+ PPC_STL r22, VCPU_GPR(r22)(r7)
+ PPC_STL r23, VCPU_GPR(r23)(r7)
+ PPC_STL r24, VCPU_GPR(r24)(r7)
+ PPC_STL r25, VCPU_GPR(r25)(r7)
+ PPC_STL r26, VCPU_GPR(r26)(r7)
+ PPC_STL r27, VCPU_GPR(r27)(r7)
+ PPC_STL r28, VCPU_GPR(r28)(r7)
+ PPC_STL r29, VCPU_GPR(r29)(r7)
+ PPC_STL r30, VCPU_GPR(r30)(r7)
+ PPC_STL r31, VCPU_GPR(r31)(r7)
/* Restore host msr -> SRR1 */
- ld r6, VCPU_HOST_MSR(r7)
+ PPC_LL r6, VCPU_HOST_MSR(r7)
/*
* For some interrupts, we need to call the real Linux
@@ -228,9 +228,12 @@ no_dcbz32_off:
beq call_linux_handler
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq call_linux_handler
+ cmpwi r12, BOOK3S_INTERRUPT_PERFMON
+ beq call_linux_handler
/* Back to EE=1 */
mtmsr r6
+ sync
b kvm_return_point
call_linux_handler:
@@ -249,14 +252,14 @@ call_linux_handler:
*/
/* Restore host IP -> SRR0 */
- ld r5, VCPU_HOST_RETIP(r7)
+ PPC_LL r5, VCPU_HOST_RETIP(r7)
/* XXX Better move to a safe function?
* What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
mtlr r12
- ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
+ PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7)
mtsrr0 r4
LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
mtsrr1 r3
@@ -274,7 +277,7 @@ kvm_return_point:
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
- bl KVMPPC_HANDLE_EXIT
+ bl FUNC(kvmppc_handle_exit)
/* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
@@ -285,7 +288,7 @@ kvm_return_point:
kvm_exit_loop:
- ld r4, _LINK(r1)
+ PPC_LL r4, _LINK(r1)
mtlr r4
/* Restore non-volatile host registers (r14 - r31) */
@@ -296,8 +299,8 @@ kvm_exit_loop:
kvm_loop_heavyweight:
- ld r4, _LINK(r1)
- std r4, (16 + SWITCH_FRAME_SIZE)(r1)
+ PPC_LL r4, _LINK(r1)
+ PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
/* Load vcpu and cpu_run */
REST_2GPRS(3, r1)
@@ -315,4 +318,3 @@ kvm_loop_lightweight:
/* Jump back into the beginning of this function */
b kvm_start_lightweight
-
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
new file mode 100644
index 000000000000..a9f66abafcb3
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -0,0 +1,1289 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright Novell Inc 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/kvm.h>
+#include <asm/kvm_ppc.h>
+#include <asm/disassemble.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_fpu.h>
+#include <asm/reg.h>
+#include <asm/cacheflush.h>
+#include <linux/vmalloc.h>
+
+/* #define DEBUG */
+
+#ifdef DEBUG
+#define dprintk printk
+#else
+#define dprintk(...) do { } while(0);
+#endif
+
+#define OP_LFS 48
+#define OP_LFSU 49
+#define OP_LFD 50
+#define OP_LFDU 51
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
+#define OP_PSQ_L 56
+#define OP_PSQ_LU 57
+#define OP_PSQ_ST 60
+#define OP_PSQ_STU 61
+
+#define OP_31_LFSX 535
+#define OP_31_LFSUX 567
+#define OP_31_LFDX 599
+#define OP_31_LFDUX 631
+#define OP_31_STFSX 663
+#define OP_31_STFSUX 695
+#define OP_31_STFX 727
+#define OP_31_STFUX 759
+#define OP_31_LWIZX 887
+#define OP_31_STFIWX 983
+
+#define OP_59_FADDS 21
+#define OP_59_FSUBS 20
+#define OP_59_FSQRTS 22
+#define OP_59_FDIVS 18
+#define OP_59_FRES 24
+#define OP_59_FMULS 25
+#define OP_59_FRSQRTES 26
+#define OP_59_FMSUBS 28
+#define OP_59_FMADDS 29
+#define OP_59_FNMSUBS 30
+#define OP_59_FNMADDS 31
+
+#define OP_63_FCMPU 0
+#define OP_63_FCPSGN 8
+#define OP_63_FRSP 12
+#define OP_63_FCTIW 14
+#define OP_63_FCTIWZ 15
+#define OP_63_FDIV 18
+#define OP_63_FADD 21
+#define OP_63_FSQRT 22
+#define OP_63_FSEL 23
+#define OP_63_FRE 24
+#define OP_63_FMUL 25
+#define OP_63_FRSQRTE 26
+#define OP_63_FMSUB 28
+#define OP_63_FMADD 29
+#define OP_63_FNMSUB 30
+#define OP_63_FNMADD 31
+#define OP_63_FCMPO 32
+#define OP_63_MTFSB1 38 // XXX
+#define OP_63_FSUB 20
+#define OP_63_FNEG 40
+#define OP_63_MCRFS 64
+#define OP_63_MTFSB0 70
+#define OP_63_FMR 72
+#define OP_63_MTFSFI 134
+#define OP_63_FABS 264
+#define OP_63_MFFS 583
+#define OP_63_MTFSF 711
+
+#define OP_4X_PS_CMPU0 0
+#define OP_4X_PSQ_LX 6
+#define OP_4XW_PSQ_STX 7
+#define OP_4A_PS_SUM0 10
+#define OP_4A_PS_SUM1 11
+#define OP_4A_PS_MULS0 12
+#define OP_4A_PS_MULS1 13
+#define OP_4A_PS_MADDS0 14
+#define OP_4A_PS_MADDS1 15
+#define OP_4A_PS_DIV 18
+#define OP_4A_PS_SUB 20
+#define OP_4A_PS_ADD 21
+#define OP_4A_PS_SEL 23
+#define OP_4A_PS_RES 24
+#define OP_4A_PS_MUL 25
+#define OP_4A_PS_RSQRTE 26
+#define OP_4A_PS_MSUB 28
+#define OP_4A_PS_MADD 29
+#define OP_4A_PS_NMSUB 30
+#define OP_4A_PS_NMADD 31
+#define OP_4X_PS_CMPO0 32
+#define OP_4X_PSQ_LUX 38
+#define OP_4XW_PSQ_STUX 39
+#define OP_4X_PS_NEG 40
+#define OP_4X_PS_CMPU1 64
+#define OP_4X_PS_MR 72
+#define OP_4X_PS_CMPO1 96
+#define OP_4X_PS_NABS 136
+#define OP_4X_PS_ABS 264
+#define OP_4X_PS_MERGE00 528
+#define OP_4X_PS_MERGE01 560
+#define OP_4X_PS_MERGE10 592
+#define OP_4X_PS_MERGE11 624
+
+#define SCALAR_NONE 0
+#define SCALAR_HIGH (1 << 0)
+#define SCALAR_LOW (1 << 1)
+#define SCALAR_NO_PS0 (1 << 2)
+#define SCALAR_NO_PS1 (1 << 3)
+
+#define GQR_ST_TYPE_MASK 0x00000007
+#define GQR_ST_TYPE_SHIFT 0
+#define GQR_ST_SCALE_MASK 0x00003f00
+#define GQR_ST_SCALE_SHIFT 8
+#define GQR_LD_TYPE_MASK 0x00070000
+#define GQR_LD_TYPE_SHIFT 16
+#define GQR_LD_SCALE_MASK 0x3f000000
+#define GQR_LD_SCALE_SHIFT 24
+
+#define GQR_QUANTIZE_FLOAT 0
+#define GQR_QUANTIZE_U8 4
+#define GQR_QUANTIZE_U16 5
+#define GQR_QUANTIZE_S8 6
+#define GQR_QUANTIZE_S16 7
+
+#define FPU_LS_SINGLE 0
+#define FPU_LS_DOUBLE 1
+#define FPU_LS_SINGLE_LOW 2
+
+static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
+{
+ struct thread_struct t;
+
+ t.fpscr.val = vcpu->arch.fpscr;
+ cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t);
+}
+
+static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
+{
+ u64 dsisr;
+
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0);
+ vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
+ vcpu->arch.dear = eaddr;
+ /* Page Fault */
+ dsisr = kvmppc_set_field(0, 33, 33, 1);
+ if (is_store)
+ to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
+}
+
+static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, int ls_type)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ char tmp[8];
+ int len = sizeof(u32);
+
+ if (ls_type == FPU_LS_DOUBLE)
+ len = sizeof(u64);
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* read from memory */
+ r = kvmppc_ld(vcpu, &addr, len, tmp, true);
+ vcpu->arch.paddr_accessed = addr;
+
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, false);
+ goto done_load;
+ } else if (r == EMULATE_DO_MMIO) {
+ emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1);
+ goto done_load;
+ }
+
+ emulated = EMULATE_DONE;
+
+ /* put in registers */
+ switch (ls_type) {
+ case FPU_LS_SINGLE:
+ cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t);
+ vcpu->arch.qpr[rs] = *((u32*)tmp);
+ break;
+ case FPU_LS_DOUBLE:
+ vcpu->arch.fpr[rs] = *((u64*)tmp);
+ break;
+ }
+
+ dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
+ addr, len);
+
+done_load:
+ return emulated;
+}
+
+static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, int ls_type)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ char tmp[8];
+ u64 val;
+ int len;
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ switch (ls_type) {
+ case FPU_LS_SINGLE:
+ cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t);
+ val = *((u32*)tmp);
+ len = sizeof(u32);
+ break;
+ case FPU_LS_SINGLE_LOW:
+ *((u32*)tmp) = vcpu->arch.fpr[rs];
+ val = vcpu->arch.fpr[rs] & 0xffffffff;
+ len = sizeof(u32);
+ break;
+ case FPU_LS_DOUBLE:
+ *((u64*)tmp) = vcpu->arch.fpr[rs];
+ val = vcpu->arch.fpr[rs];
+ len = sizeof(u64);
+ break;
+ default:
+ val = 0;
+ len = 0;
+ }
+
+ r = kvmppc_st(vcpu, &addr, len, tmp, true);
+ vcpu->arch.paddr_accessed = addr;
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, true);
+ } else if (r == EMULATE_DO_MMIO) {
+ emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
+ } else {
+ emulated = EMULATE_DONE;
+ }
+
+ dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
+ val, addr, len);
+
+ return emulated;
+}
+
+static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, bool w, int i)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ float one = 1.0;
+ u32 tmp[2];
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* read from memory */
+ if (w) {
+ r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
+ memcpy(&tmp[1], &one, sizeof(u32));
+ } else {
+ r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
+ }
+ vcpu->arch.paddr_accessed = addr;
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, false);
+ goto done_load;
+ } else if ((r == EMULATE_DO_MMIO) && w) {
+ emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1);
+ vcpu->arch.qpr[rs] = tmp[1];
+ goto done_load;
+ } else if (r == EMULATE_DO_MMIO) {
+ emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1);
+ goto done_load;
+ }
+
+ emulated = EMULATE_DONE;
+
+ /* put in registers */
+ cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t);
+ vcpu->arch.qpr[rs] = tmp[1];
+
+ dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
+ tmp[1], addr, w ? 4 : 8);
+
+done_load:
+ return emulated;
+}
+
+static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, ulong addr, bool w, int i)
+{
+ int emulated = EMULATE_FAIL;
+ struct thread_struct t;
+ int r;
+ u32 tmp[2];
+ int len = w ? sizeof(u32) : sizeof(u64);
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t);
+ tmp[1] = vcpu->arch.qpr[rs];
+
+ r = kvmppc_st(vcpu, &addr, len, tmp, true);
+ vcpu->arch.paddr_accessed = addr;
+ if (r < 0) {
+ kvmppc_inject_pf(vcpu, addr, true);
+ } else if ((r == EMULATE_DO_MMIO) && w) {
+ emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
+ } else if (r == EMULATE_DO_MMIO) {
+ u64 val = ((u64)tmp[0] << 32) | tmp[1];
+ emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
+ } else {
+ emulated = EMULATE_DONE;
+ }
+
+ dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
+ tmp[0], tmp[1], addr, len);
+
+ return emulated;
+}
+
+/*
+ * Cuts out inst bits with ordering according to spec.
+ * That means the leftmost bit is zero. All given bits are included.
+ */
+static inline u32 inst_get_field(u32 inst, int msb, int lsb)
+{
+ return kvmppc_get_field(inst, msb + 32, lsb + 32);
+}
+
+/*
+ * Replaces inst bits with ordering according to spec.
+ */
+static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value)
+{
+ return kvmppc_set_field(inst, msb + 32, lsb + 32, value);
+}
+
+bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
+{
+ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
+ return false;
+
+ switch (get_op(inst)) {
+ case OP_PSQ_L:
+ case OP_PSQ_LU:
+ case OP_PSQ_ST:
+ case OP_PSQ_STU:
+ case OP_LFS:
+ case OP_LFSU:
+ case OP_LFD:
+ case OP_LFDU:
+ case OP_STFS:
+ case OP_STFSU:
+ case OP_STFD:
+ case OP_STFDU:
+ return true;
+ case 4:
+ /* X form */
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_4X_PS_CMPU0:
+ case OP_4X_PSQ_LX:
+ case OP_4X_PS_CMPO0:
+ case OP_4X_PSQ_LUX:
+ case OP_4X_PS_NEG:
+ case OP_4X_PS_CMPU1:
+ case OP_4X_PS_MR:
+ case OP_4X_PS_CMPO1:
+ case OP_4X_PS_NABS:
+ case OP_4X_PS_ABS:
+ case OP_4X_PS_MERGE00:
+ case OP_4X_PS_MERGE01:
+ case OP_4X_PS_MERGE10:
+ case OP_4X_PS_MERGE11:
+ return true;
+ }
+ /* XW form */
+ switch (inst_get_field(inst, 25, 30)) {
+ case OP_4XW_PSQ_STX:
+ case OP_4XW_PSQ_STUX:
+ return true;
+ }
+ /* A form */
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_4A_PS_SUM1:
+ case OP_4A_PS_SUM0:
+ case OP_4A_PS_MULS0:
+ case OP_4A_PS_MULS1:
+ case OP_4A_PS_MADDS0:
+ case OP_4A_PS_MADDS1:
+ case OP_4A_PS_DIV:
+ case OP_4A_PS_SUB:
+ case OP_4A_PS_ADD:
+ case OP_4A_PS_SEL:
+ case OP_4A_PS_RES:
+ case OP_4A_PS_MUL:
+ case OP_4A_PS_RSQRTE:
+ case OP_4A_PS_MSUB:
+ case OP_4A_PS_MADD:
+ case OP_4A_PS_NMSUB:
+ case OP_4A_PS_NMADD:
+ return true;
+ }
+ break;
+ case 59:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_59_FADDS:
+ case OP_59_FSUBS:
+ case OP_59_FDIVS:
+ case OP_59_FRES:
+ case OP_59_FRSQRTES:
+ return true;
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_59_FMULS:
+ case OP_59_FMSUBS:
+ case OP_59_FMADDS:
+ case OP_59_FNMSUBS:
+ case OP_59_FNMADDS:
+ return true;
+ }
+ break;
+ case 63:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_63_MTFSB0:
+ case OP_63_MTFSB1:
+ case OP_63_MTFSF:
+ case OP_63_MTFSFI:
+ case OP_63_MCRFS:
+ case OP_63_MFFS:
+ case OP_63_FCMPU:
+ case OP_63_FCMPO:
+ case OP_63_FNEG:
+ case OP_63_FMR:
+ case OP_63_FABS:
+ case OP_63_FRSP:
+ case OP_63_FDIV:
+ case OP_63_FADD:
+ case OP_63_FSUB:
+ case OP_63_FCTIW:
+ case OP_63_FCTIWZ:
+ case OP_63_FRSQRTE:
+ case OP_63_FCPSGN:
+ return true;
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_63_FMUL:
+ case OP_63_FSEL:
+ case OP_63_FMSUB:
+ case OP_63_FMADD:
+ case OP_63_FNMSUB:
+ case OP_63_FNMADD:
+ return true;
+ }
+ break;
+ case 31:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_31_LFSX:
+ case OP_31_LFSUX:
+ case OP_31_LFDX:
+ case OP_31_LFDUX:
+ case OP_31_STFSX:
+ case OP_31_STFSUX:
+ case OP_31_STFX:
+ case OP_31_STFUX:
+ case OP_31_STFIWX:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static int get_d_signext(u32 inst)
+{
+ int d = inst & 0x8ff;
+
+ if (d & 0x800)
+ return -(d & 0x7ff);
+
+ return (d & 0x7ff);
+}
+
+static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
+ int reg_out, int reg_in1, int reg_in2,
+ int reg_in3, int scalar,
+ void (*func)(struct thread_struct *t,
+ u32 *dst, u32 *src1,
+ u32 *src2, u32 *src3))
+{
+ u32 *qpr = vcpu->arch.qpr;
+ u64 *fpr = vcpu->arch.fpr;
+ u32 ps0_out;
+ u32 ps0_in1, ps0_in2, ps0_in3;
+ u32 ps1_in1, ps1_in2, ps1_in3;
+ struct thread_struct t;
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* RC */
+ WARN_ON(rc);
+
+ /* PS0 */
+ cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
+ cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
+ cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t);
+
+ if (scalar & SCALAR_LOW)
+ ps0_in2 = qpr[reg_in2];
+
+ func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
+
+ dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
+ ps0_in1, ps0_in2, ps0_in3, ps0_out);
+
+ if (!(scalar & SCALAR_NO_PS0))
+ cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
+
+ /* PS1 */
+ ps1_in1 = qpr[reg_in1];
+ ps1_in2 = qpr[reg_in2];
+ ps1_in3 = qpr[reg_in3];
+
+ if (scalar & SCALAR_HIGH)
+ ps1_in2 = ps0_in2;
+
+ if (!(scalar & SCALAR_NO_PS1))
+ func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
+
+ dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
+ ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
+ int reg_out, int reg_in1, int reg_in2,
+ int scalar,
+ void (*func)(struct thread_struct *t,
+ u32 *dst, u32 *src1,
+ u32 *src2))
+{
+ u32 *qpr = vcpu->arch.qpr;
+ u64 *fpr = vcpu->arch.fpr;
+ u32 ps0_out;
+ u32 ps0_in1, ps0_in2;
+ u32 ps1_out;
+ u32 ps1_in1, ps1_in2;
+ struct thread_struct t;
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* RC */
+ WARN_ON(rc);
+
+ /* PS0 */
+ cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t);
+
+ if (scalar & SCALAR_LOW)
+ ps0_in2 = qpr[reg_in2];
+ else
+ cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t);
+
+ func(&t, &ps0_out, &ps0_in1, &ps0_in2);
+
+ if (!(scalar & SCALAR_NO_PS0)) {
+ dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
+ ps0_in1, ps0_in2, ps0_out);
+
+ cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
+ }
+
+ /* PS1 */
+ ps1_in1 = qpr[reg_in1];
+ ps1_in2 = qpr[reg_in2];
+
+ if (scalar & SCALAR_HIGH)
+ ps1_in2 = ps0_in2;
+
+ func(&t, &ps1_out, &ps1_in1, &ps1_in2);
+
+ if (!(scalar & SCALAR_NO_PS1)) {
+ qpr[reg_out] = ps1_out;
+
+ dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
+ ps1_in1, ps1_in2, qpr[reg_out]);
+ }
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
+ int reg_out, int reg_in,
+ void (*func)(struct thread_struct *t,
+ u32 *dst, u32 *src1))
+{
+ u32 *qpr = vcpu->arch.qpr;
+ u64 *fpr = vcpu->arch.fpr;
+ u32 ps0_out, ps0_in;
+ u32 ps1_in;
+ struct thread_struct t;
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ /* RC */
+ WARN_ON(rc);
+
+ /* PS0 */
+ cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t);
+ func(&t, &ps0_out, &ps0_in);
+
+ dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
+ ps0_in, ps0_out);
+
+ cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t);
+
+ /* PS1 */
+ ps1_in = qpr[reg_in];
+ func(&t, &qpr[reg_out], &ps1_in);
+
+ dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
+ ps1_in, qpr[reg_out]);
+
+ return EMULATE_DONE;
+}
+
+int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ u32 inst = kvmppc_get_last_inst(vcpu);
+ enum emulation_result emulated = EMULATE_DONE;
+
+ int ax_rd = inst_get_field(inst, 6, 10);
+ int ax_ra = inst_get_field(inst, 11, 15);
+ int ax_rb = inst_get_field(inst, 16, 20);
+ int ax_rc = inst_get_field(inst, 21, 25);
+ short full_d = inst_get_field(inst, 16, 31);
+
+ u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
+ u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
+ u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
+ u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
+
+ bool rcomp = (inst & 1) ? true : false;
+ u32 cr = kvmppc_get_cr(vcpu);
+ struct thread_struct t;
+#ifdef DEBUG
+ int i;
+#endif
+
+ t.fpscr.val = vcpu->arch.fpscr;
+
+ if (!kvmppc_inst_is_paired_single(vcpu, inst))
+ return EMULATE_FAIL;
+
+ if (!(vcpu->arch.msr & MSR_FP)) {
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
+ return EMULATE_AGAIN;
+ }
+
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ preempt_disable();
+ enable_kernel_fp();
+ /* Do we need to clear FE0 / FE1 here? Don't think so. */
+
+#ifdef DEBUG
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+ u32 f;
+ cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
+ dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
+ i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
+ }
+#endif
+
+ switch (get_op(inst)) {
+ case OP_PSQ_L:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_PSQ_LU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_PSQ_ST:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_PSQ_STU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 16, 16) ? true : false;
+ int i = inst_get_field(inst, 17, 19);
+
+ addr += get_d_signext(inst);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case 4:
+ /* X form */
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_4X_PS_CMPU0:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PSQ_LX:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_4X_PS_CMPO0:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PSQ_LUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_4X_PS_NEG:
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] ^= 0x80000000;
+ break;
+ case OP_4X_PS_CMPU1:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PS_MR:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ break;
+ case OP_4X_PS_CMPO1:
+ /* XXX */
+ emulated = EMULATE_FAIL;
+ break;
+ case OP_4X_PS_NABS:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] |= 0x80000000;
+ break;
+ case OP_4X_PS_ABS:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
+ vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ vcpu->arch.qpr[ax_rd] &= ~0x80000000;
+ break;
+ case OP_4X_PS_MERGE00:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
+ /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
+ cvt_df((double*)&vcpu->arch.fpr[ax_rb],
+ (float*)&vcpu->arch.qpr[ax_rd], &t);
+ break;
+ case OP_4X_PS_MERGE01:
+ WARN_ON(rcomp);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ break;
+ case OP_4X_PS_MERGE10:
+ WARN_ON(rcomp);
+ /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+ cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
+ (double*)&vcpu->arch.fpr[ax_rd], &t);
+ /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
+ cvt_df((double*)&vcpu->arch.fpr[ax_rb],
+ (float*)&vcpu->arch.qpr[ax_rd], &t);
+ break;
+ case OP_4X_PS_MERGE11:
+ WARN_ON(rcomp);
+ /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
+ cvt_fd((float*)&vcpu->arch.qpr[ax_ra],
+ (double*)&vcpu->arch.fpr[ax_rd], &t);
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
+ break;
+ }
+ /* XW form */
+ switch (inst_get_field(inst, 25, 30)) {
+ case OP_4XW_PSQ_STX:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ break;
+ }
+ case OP_4XW_PSQ_STUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
+ bool w = inst_get_field(inst, 21, 21) ? true : false;
+ int i = inst_get_field(inst, 22, 24);
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ }
+ /* A form */
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_4A_PS_SUM1:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
+ vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
+ break;
+ case OP_4A_PS_SUM0:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
+ vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
+ break;
+ case OP_4A_PS_MULS0:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
+ break;
+ case OP_4A_PS_MULS1:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
+ break;
+ case OP_4A_PS_MADDS0:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
+ break;
+ case OP_4A_PS_MADDS1:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
+ break;
+ case OP_4A_PS_DIV:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
+ break;
+ case OP_4A_PS_SUB:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
+ break;
+ case OP_4A_PS_ADD:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
+ break;
+ case OP_4A_PS_SEL:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
+ break;
+ case OP_4A_PS_RES:
+ emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
+ ax_rb, fps_fres);
+ break;
+ case OP_4A_PS_MUL:
+ emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
+ break;
+ case OP_4A_PS_RSQRTE:
+ emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
+ ax_rb, fps_frsqrte);
+ break;
+ case OP_4A_PS_MSUB:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
+ break;
+ case OP_4A_PS_MADD:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
+ break;
+ case OP_4A_PS_NMSUB:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
+ break;
+ case OP_4A_PS_NMADD:
+ emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
+ ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
+ break;
+ }
+ break;
+
+ /* Real FPU operations */
+
+ case OP_LFS:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+ break;
+ }
+ case OP_LFSU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_LFD:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_LFDU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_STFS:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+ break;
+ }
+ case OP_STFSU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_STFD:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_STFDU:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case 31:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_31_LFSX:
+ {
+ ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
+
+ addr += kvmppc_get_gpr(vcpu, ax_rb);
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+ break;
+ }
+ case OP_31_LFSUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_LFDX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_31_LFDUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_STFSX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+ break;
+ }
+ case OP_31_STFSUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_SINGLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_STFX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+ break;
+ }
+ case OP_31_STFUX:
+ {
+ ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr, FPU_LS_DOUBLE);
+
+ if (emulated == EMULATE_DONE)
+ kvmppc_set_gpr(vcpu, ax_ra, addr);
+ break;
+ }
+ case OP_31_STFIWX:
+ {
+ ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
+ kvmppc_get_gpr(vcpu, ax_rb);
+
+ emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ addr,
+ FPU_LS_SINGLE_LOW);
+ break;
+ }
+ break;
+ }
+ break;
+ case 59:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_59_FADDS:
+ fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FSUBS:
+ fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FDIVS:
+ fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FRES:
+ fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FRSQRTES:
+ fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_59_FMULS:
+ fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FMSUBS:
+ fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FMADDS:
+ fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FNMSUBS:
+ fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_59_FNMADDS:
+ fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ }
+ break;
+ case 63:
+ switch (inst_get_field(inst, 21, 30)) {
+ case OP_63_MTFSB0:
+ case OP_63_MTFSB1:
+ case OP_63_MCRFS:
+ case OP_63_MTFSFI:
+ /* XXX need to implement */
+ break;
+ case OP_63_MFFS:
+ /* XXX missing CR */
+ *fpr_d = vcpu->arch.fpscr;
+ break;
+ case OP_63_MTFSF:
+ /* XXX missing fm bits */
+ /* XXX missing CR */
+ vcpu->arch.fpscr = *fpr_b;
+ break;
+ case OP_63_FCMPU:
+ {
+ u32 tmp_cr;
+ u32 cr0_mask = 0xf0000000;
+ u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
+
+ fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+ cr &= ~(cr0_mask >> cr_shift);
+ cr |= (cr & cr0_mask) >> cr_shift;
+ break;
+ }
+ case OP_63_FCMPO:
+ {
+ u32 tmp_cr;
+ u32 cr0_mask = 0xf0000000;
+ u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
+
+ fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
+ cr &= ~(cr0_mask >> cr_shift);
+ cr |= (cr & cr0_mask) >> cr_shift;
+ break;
+ }
+ case OP_63_FNEG:
+ fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FMR:
+ *fpr_d = *fpr_b;
+ break;
+ case OP_63_FABS:
+ fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FCPSGN:
+ fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FDIV:
+ fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FADD:
+ fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FSUB:
+ fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
+ break;
+ case OP_63_FCTIW:
+ fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FCTIWZ:
+ fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ break;
+ case OP_63_FRSP:
+ fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ kvmppc_sync_qpr(vcpu, ax_rd);
+ break;
+ case OP_63_FRSQRTE:
+ {
+ double one = 1.0f;
+
+ /* fD = sqrt(fB) */
+ fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
+ /* fD = 1.0f / fD */
+ fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
+ break;
+ }
+ }
+ switch (inst_get_field(inst, 26, 30)) {
+ case OP_63_FMUL:
+ fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
+ break;
+ case OP_63_FSEL:
+ fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FMSUB:
+ fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FMADD:
+ fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FNMSUB:
+ fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ case OP_63_FNMADD:
+ fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
+ break;
+ }
+ break;
+ }
+
+#ifdef DEBUG
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
+ u32 f;
+ cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t);
+ dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
+ }
+#endif
+
+ if (rcomp)
+ kvmppc_set_cr(vcpu, cr);
+
+ preempt_enable();
+
+ return emulated;
+}
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index c83c60ad96c5..506d5c316c96 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -22,7 +22,10 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/exception-64s.h>
+#endif
/*****************************************************************************
* *
@@ -30,6 +33,39 @@
* *
****************************************************************************/
+#if defined(CONFIG_PPC_BOOK3S_64)
+
+#define LOAD_SHADOW_VCPU(reg) \
+ mfspr reg, SPRN_SPRG_PACA
+
+#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
+#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
+#define FUNC(name) GLUE(.,name)
+
+#elif defined(CONFIG_PPC_BOOK3S_32)
+
+#define LOAD_SHADOW_VCPU(reg) \
+ mfspr reg, SPRN_SPRG_THREAD; \
+ lwz reg, THREAD_KVM_SVCPU(reg); \
+ /* PPC32 can have a NULL pointer - let's check for that */ \
+ mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
+ mfcr r12; \
+ cmpwi reg, 0; \
+ bne 1f; \
+ mfspr reg, SPRN_SPRG_SCRATCH0; \
+ mtcr r12; \
+ mfspr r12, SPRN_SPRG_SCRATCH1; \
+ b kvmppc_resume_\intno; \
+1:; \
+ mtcr r12; \
+ mfspr r12, SPRN_SPRG_SCRATCH1; \
+ tophys(reg, reg)
+
+#define SHADOW_VCPU_OFF 0
+#define MSR_NOIRQ MSR_KERNEL
+#define FUNC(name) name
+
+#endif
.macro INTERRUPT_TRAMPOLINE intno
@@ -42,19 +78,19 @@ kvmppc_trampoline_\intno:
* First thing to do is to find out if we're coming
* from a KVM guest or a Linux process.
*
- * To distinguish, we check a magic byte in the PACA
+ * To distinguish, we check a magic byte in the PACA/current
*/
- mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
- std r12, PACA_KVM_SCRATCH0(r13)
+ LOAD_SHADOW_VCPU(r13)
+ PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
mfcr r12
- stw r12, PACA_KVM_SCRATCH1(r13)
- lbz r12, PACA_KVM_IN_GUEST(r13)
+ stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
cmpwi r12, KVM_GUEST_MODE_NONE
bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */
- lwz r12, PACA_KVM_SCRATCH1(r13)
+ lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
mtcr r12
- ld r12, PACA_KVM_SCRATCH0(r13)
+ PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
b kvmppc_resume_\intno /* Get back original handler */
@@ -76,9 +112,7 @@ kvmppc_trampoline_\intno:
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
@@ -88,7 +122,14 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
+
+/* Those are only available on 64 bit machines */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
+#endif
/*
* Bring us back to the faulting code, but skip the
@@ -99,11 +140,11 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
*
* Input Registers:
*
- * R12 = free
- * R13 = PACA
- * PACA.KVM.SCRATCH0 = guest R12
- * PACA.KVM.SCRATCH1 = guest CR
- * SPRG_SCRATCH0 = guest R13
+ * R12 = free
+ * R13 = Shadow VCPU (PACA)
+ * SVCPU.SCRATCH0 = guest R12
+ * SVCPU.SCRATCH1 = guest CR
+ * SPRG_SCRATCH0 = guest R13
*
*/
kvmppc_handler_skip_ins:
@@ -114,9 +155,9 @@ kvmppc_handler_skip_ins:
mtsrr0 r12
/* Clean up all state */
- lwz r12, PACA_KVM_SCRATCH1(r13)
+ lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
mtcr r12
- ld r12, PACA_KVM_SCRATCH0(r13)
+ PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
mfspr r13, SPRN_SPRG_SCRATCH0
/* And get back into the code */
@@ -147,41 +188,48 @@ kvmppc_handler_lowmem_trampoline_end:
*
* R3 = function
* R4 = MSR
- * R5 = CTR
+ * R5 = scratch register
*
*/
_GLOBAL(kvmppc_rmcall)
- mtmsr r4 /* Disable relocation, so mtsrr
+ LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
+ mtmsr r5 /* Disable relocation and interrupts, so mtsrr
doesn't get interrupted */
- mtctr r5
+ sync
mtsrr0 r3
mtsrr1 r4
RFI
+#if defined(CONFIG_PPC_BOOK3S_32)
+#define STACK_LR INT_FRAME_SIZE+4
+#elif defined(CONFIG_PPC_BOOK3S_64)
+#define STACK_LR _LINK
+#endif
+
/*
* Activate current's external feature (FPU/Altivec/VSX)
*/
-#define define_load_up(what) \
- \
-_GLOBAL(kvmppc_load_up_ ## what); \
- subi r1, r1, INT_FRAME_SIZE; \
- mflr r3; \
- std r3, _LINK(r1); \
- mfmsr r4; \
- std r31, GPR3(r1); \
- mr r31, r4; \
- li r5, MSR_DR; \
- oris r5, r5, MSR_EE@h; \
- andc r4, r4, r5; \
- mtmsr r4; \
- \
- bl .load_up_ ## what; \
- \
- mtmsr r31; \
- ld r3, _LINK(r1); \
- ld r31, GPR3(r1); \
- addi r1, r1, INT_FRAME_SIZE; \
- mtlr r3; \
+#define define_load_up(what) \
+ \
+_GLOBAL(kvmppc_load_up_ ## what); \
+ PPC_STLU r1, -INT_FRAME_SIZE(r1); \
+ mflr r3; \
+ PPC_STL r3, STACK_LR(r1); \
+ PPC_STL r20, _NIP(r1); \
+ mfmsr r20; \
+ LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
+ andc r3,r20,r3; /* Disable DR,EE */ \
+ mtmsr r3; \
+ sync; \
+ \
+ bl FUNC(load_up_ ## what); \
+ \
+ mtmsr r20; /* Enable DR,EE */ \
+ sync; \
+ PPC_LL r3, STACK_LR(r1); \
+ PPC_LL r20, _NIP(r1); \
+ mtlr r3; \
+ addi r1, r1, INT_FRAME_SIZE; \
blr
define_load_up(fpu)
@@ -194,11 +242,10 @@ define_load_up(vsx)
.global kvmppc_trampoline_lowmem
kvmppc_trampoline_lowmem:
- .long kvmppc_handler_lowmem_trampoline - _stext
+ .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
.global kvmppc_trampoline_enter
kvmppc_trampoline_enter:
- .long kvmppc_handler_trampoline_enter - _stext
-
-#include "book3s_64_slb.S"
+ .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
+#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
new file mode 100644
index 000000000000..7c52ed0b7051
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -0,0 +1,259 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2010
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+/* Real mode helpers */
+
+#if defined(CONFIG_PPC_BOOK3S_64)
+
+#define GET_SHADOW_VCPU(reg) \
+ addi reg, r13, PACA_KVM_SVCPU
+
+#elif defined(CONFIG_PPC_BOOK3S_32)
+
+#define GET_SHADOW_VCPU(reg) \
+ tophys(reg, r2); \
+ lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
+ tophys(reg, reg)
+
+#endif
+
+/* Disable for nested KVM */
+#define USE_QUICK_LAST_INST
+
+
+/* Get helper functions for subarch specific functionality */
+
+#if defined(CONFIG_PPC_BOOK3S_64)
+#include "book3s_64_slb.S"
+#elif defined(CONFIG_PPC_BOOK3S_32)
+#include "book3s_32_sr.S"
+#endif
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+.global kvmppc_handler_trampoline_enter
+kvmppc_handler_trampoline_enter:
+
+ /* Required state:
+ *
+ * MSR = ~IR|DR
+ * R13 = PACA
+ * R1 = host R1
+ * R2 = host R2
+ * R10 = guest MSR
+ * all other volatile GPRS = free
+ * SVCPU[CR] = guest CR
+ * SVCPU[XER] = guest XER
+ * SVCPU[CTR] = guest CTR
+ * SVCPU[LR] = guest LR
+ */
+
+ /* r3 = shadow vcpu */
+ GET_SHADOW_VCPU(r3)
+
+ /* Move SRR0 and SRR1 into the respective regs */
+ PPC_LL r9, SVCPU_PC(r3)
+ mtsrr0 r9
+ mtsrr1 r10
+
+ /* Activate guest mode, so faults get handled by KVM */
+ li r11, KVM_GUEST_MODE_GUEST
+ stb r11, SVCPU_IN_GUEST(r3)
+
+ /* Switch to guest segment. This is subarch specific. */
+ LOAD_GUEST_SEGMENTS
+
+ /* Enter guest */
+
+ PPC_LL r4, (SVCPU_CTR)(r3)
+ PPC_LL r5, (SVCPU_LR)(r3)
+ lwz r6, (SVCPU_CR)(r3)
+ lwz r7, (SVCPU_XER)(r3)
+
+ mtctr r4
+ mtlr r5
+ mtcr r6
+ mtxer r7
+
+ PPC_LL r0, (SVCPU_R0)(r3)
+ PPC_LL r1, (SVCPU_R1)(r3)
+ PPC_LL r2, (SVCPU_R2)(r3)
+ PPC_LL r4, (SVCPU_R4)(r3)
+ PPC_LL r5, (SVCPU_R5)(r3)
+ PPC_LL r6, (SVCPU_R6)(r3)
+ PPC_LL r7, (SVCPU_R7)(r3)
+ PPC_LL r8, (SVCPU_R8)(r3)
+ PPC_LL r9, (SVCPU_R9)(r3)
+ PPC_LL r10, (SVCPU_R10)(r3)
+ PPC_LL r11, (SVCPU_R11)(r3)
+ PPC_LL r12, (SVCPU_R12)(r3)
+ PPC_LL r13, (SVCPU_R13)(r3)
+
+ PPC_LL r3, (SVCPU_R3)(r3)
+
+ RFI
+kvmppc_handler_trampoline_enter_end:
+
+
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+.global kvmppc_handler_trampoline_exit
+kvmppc_handler_trampoline_exit:
+
+ /* Register usage at this point:
+ *
+ * SPRG_SCRATCH0 = guest R13
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * SVCPU.SCRATCH0 = guest R12
+ * SVCPU.SCRATCH1 = guest CR
+ *
+ */
+
+ /* Save registers */
+
+ PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
+ PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
+ PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
+ PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
+ PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
+ PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
+ PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
+ PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
+ PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
+ PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
+ PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
+ PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
+
+ /* Restore R1/R2 so we can handle faults */
+ PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
+ PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
+
+ /* Save guest PC and MSR */
+ mfsrr0 r3
+ mfsrr1 r4
+
+ PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
+ PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
+
+ /* Get scratch'ed off registers */
+ mfspr r9, SPRN_SPRG_SCRATCH0
+ PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+
+ PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
+ PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
+ stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
+
+ /* Save more register state */
+
+ mfxer r5
+ mfdar r6
+ mfdsisr r7
+ mfctr r8
+ mflr r9
+
+ stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
+ PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
+ stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
+ PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
+ PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
+
+ /*
+ * In order for us to easily get the last instruction,
+ * we got the #vmexit at, we exploit the fact that the
+ * virtual layout is still the same here, so we can just
+ * ld from the guest's PC address
+ */
+
+ /* We only load the last instruction when it's safe */
+ cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
+ beq ld_last_inst
+ cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
+ beq ld_last_inst
+ cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
+ beq- ld_last_inst
+
+ b no_ld_last_inst
+
+ld_last_inst:
+ /* Save off the guest instruction we're at */
+
+ /* In case lwz faults */
+ li r0, KVM_INST_FETCH_FAILED
+
+#ifdef USE_QUICK_LAST_INST
+
+ /* Set guest mode to 'jump over instruction' so if lwz faults
+ * we'll just continue at the next IP. */
+ li r9, KVM_GUEST_MODE_SKIP
+ stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+
+ /* 1) enable paging for data */
+ mfmsr r9
+ ori r11, r9, MSR_DR /* Enable paging for data */
+ mtmsr r11
+ sync
+ /* 2) fetch the instruction */
+ lwz r0, 0(r3)
+ /* 3) disable paging again */
+ mtmsr r9
+ sync
+
+#endif
+ stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
+
+no_ld_last_inst:
+
+ /* Unset guest mode */
+ li r9, KVM_GUEST_MODE_NONE
+ stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+
+ /* Switch back to host MMU */
+ LOAD_HOST_SEGMENTS
+
+ /* Register usage at this point:
+ *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * SVCPU.* = guest *
+ *
+ */
+
+ /* RFI into the highmem handler */
+ mfmsr r7
+ ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
+ mtsrr1 r7
+ /* Load highmem handler address */
+ PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
+ mtsrr0 r8
+
+ RFI
+kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 2a3a1953d4bd..a33ab8cc2ccc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -133,6 +133,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
}
+void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+{
+ clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
+}
+
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
@@ -479,6 +485,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
regs->pc = vcpu->arch.pc;
regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
@@ -499,6 +507,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu_put(vcpu);
+
return 0;
}
@@ -506,6 +516,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
vcpu->arch.pc = regs->pc;
kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
@@ -525,6 +537,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
+ vcpu_put(vcpu);
+
return 0;
}
@@ -553,7 +567,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
- return kvmppc_core_vcpu_translate(vcpu, tr);
+ int r;
+
+ vcpu_load(vcpu);
+ r = kvmppc_core_vcpu_translate(vcpu, tr);
+ vcpu_put(vcpu);
+ return r;
}
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 669a5c5fc7d7..bc2b4004eb26 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -161,7 +161,7 @@ static int __init kvmppc_e500_init(void)
flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
- return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE);
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
}
static void __init kvmppc_e500_exit(void)
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index cb72a65f4ecc..4568ec386c2a 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -38,10 +38,12 @@
#define OP_31_XOP_LBZX 87
#define OP_31_XOP_STWX 151
#define OP_31_XOP_STBX 215
+#define OP_31_XOP_LBZUX 119
#define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339
+#define OP_31_XOP_LHAX 343
#define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467
@@ -62,10 +64,12 @@
#define OP_STBU 39
#define OP_LHZ 40
#define OP_LHZU 41
+#define OP_LHA 42
+#define OP_LHAU 43
#define OP_STH 44
#define OP_STHU 45
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
{
return 1;
@@ -82,7 +86,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
unsigned long dec_nsec;
pr_debug("mtDEC: %x\n", vcpu->arch.dec);
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
/* mtdec lowers the interrupt line when positive. */
kvmppc_core_dequeue_dec(vcpu);
@@ -128,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
* from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- u32 inst = vcpu->arch.last_inst;
+ u32 inst = kvmppc_get_last_inst(vcpu);
u32 ea;
int ra;
int rb;
@@ -143,13 +147,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
- /* Try again next time */
- if (inst == KVM_INST_FETCH_FAILED)
- return EMULATE_DONE;
-
switch (get_op(inst)) {
case OP_TRAP:
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
case OP_TRAP_64:
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
@@ -171,6 +171,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
+ case OP_31_XOP_LBZUX:
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = kvmppc_get_gpr(vcpu, rb);
+ if (ra)
+ ea += kvmppc_get_gpr(vcpu, ra);
+
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ kvmppc_set_gpr(vcpu, ra, ea);
+ break;
+
case OP_31_XOP_STWX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
@@ -200,6 +213,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, rs, ea);
break;
+ case OP_31_XOP_LHAX:
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ break;
+
case OP_31_XOP_LHZX:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
@@ -450,6 +468,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
+ case OP_LHA:
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ break;
+
+ case OP_LHAU:
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ break;
+
case OP_STH:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
@@ -472,7 +502,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (emulated == EMULATE_FAIL) {
emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
- if (emulated == EMULATE_FAIL) {
+ if (emulated == EMULATE_AGAIN) {
+ advance = 0;
+ } else if (emulated == EMULATE_FAIL) {
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
@@ -480,10 +512,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
- trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
+ trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
+ /* Advance past emulated instruction. */
if (advance)
- vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
return emulated;
}
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
new file mode 100644
index 000000000000..2b340a3eee90
--- /dev/null
+++ b/arch/powerpc/kvm/fpu.S
@@ -0,0 +1,273 @@
+/*
+ * FPU helper code to use FPU operations from inside the kernel
+ *
+ * Copyright (C) 2010 Alexander Graf (agraf@suse.de)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/* Instructions operating on single parameters */
+
+/*
+ * Single operation with one input operand
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (short*)&result
+ * R5 = (short*)&param1
+ */
+#define FPS_ONE_IN(name) \
+_GLOBAL(fps_ ## name); \
+ lfd 0,0(r3); /* load up fpscr value */ \
+ MTFSF_L(0); \
+ lfs 0,0(r5); \
+ \
+ name 0,0; \
+ \
+ stfs 0,0(r4); \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ blr
+
+/*
+ * Single operation with two input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (short*)&result
+ * R5 = (short*)&param1
+ * R6 = (short*)&param2
+ */
+#define FPS_TWO_IN(name) \
+_GLOBAL(fps_ ## name); \
+ lfd 0,0(r3); /* load up fpscr value */ \
+ MTFSF_L(0); \
+ lfs 0,0(r5); \
+ lfs 1,0(r6); \
+ \
+ name 0,0,1; \
+ \
+ stfs 0,0(r4); \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ blr
+
+/*
+ * Single operation with three input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (short*)&result
+ * R5 = (short*)&param1
+ * R6 = (short*)&param2
+ * R7 = (short*)&param3
+ */
+#define FPS_THREE_IN(name) \
+_GLOBAL(fps_ ## name); \
+ lfd 0,0(r3); /* load up fpscr value */ \
+ MTFSF_L(0); \
+ lfs 0,0(r5); \
+ lfs 1,0(r6); \
+ lfs 2,0(r7); \
+ \
+ name 0,0,1,2; \
+ \
+ stfs 0,0(r4); \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ blr
+
+FPS_ONE_IN(fres)
+FPS_ONE_IN(frsqrte)
+FPS_ONE_IN(fsqrts)
+FPS_TWO_IN(fadds)
+FPS_TWO_IN(fdivs)
+FPS_TWO_IN(fmuls)
+FPS_TWO_IN(fsubs)
+FPS_THREE_IN(fmadds)
+FPS_THREE_IN(fmsubs)
+FPS_THREE_IN(fnmadds)
+FPS_THREE_IN(fnmsubs)
+FPS_THREE_IN(fsel)
+
+
+/* Instructions operating on double parameters */
+
+/*
+ * Beginning of double instruction processing
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ * R7 = (double*)&param2 [load_two]
+ * R8 = (double*)&param3 [load_three]
+ * LR = instruction call function
+ */
+fpd_load_three:
+ lfd 2,0(r8) /* load param3 */
+fpd_load_two:
+ lfd 1,0(r7) /* load param2 */
+fpd_load_one:
+ lfd 0,0(r6) /* load param1 */
+fpd_load_none:
+ lfd 3,0(r3) /* load up fpscr value */
+ MTFSF_L(3)
+ lwz r6, 0(r4) /* load cr */
+ mtcr r6
+ blr
+
+/*
+ * End of double instruction processing
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * LR = caller of instruction call function
+ */
+fpd_return:
+ mfcr r6
+ stfd 0,0(r5) /* save result */
+ mffs 0
+ stfd 0,0(r3) /* save new fpscr value */
+ stw r6,0(r4) /* save new cr value */
+ blr
+
+/*
+ * Double operation with no input operand
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ */
+#define FPD_NONE_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_none; \
+ mtlr r12; \
+ \
+ name. 0; /* call instruction */ \
+ b fpd_return
+
+/*
+ * Double operation with one input operand
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ */
+#define FPD_ONE_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_one; \
+ mtlr r12; \
+ \
+ name. 0,0; /* call instruction */ \
+ b fpd_return
+
+/*
+ * Double operation with two input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ * R7 = (double*)&param2
+ * R8 = (double*)&param3
+ */
+#define FPD_TWO_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_two; \
+ mtlr r12; \
+ \
+ name. 0,0,1; /* call instruction */ \
+ b fpd_return
+
+/*
+ * CR Double operation with two input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&param1
+ * R6 = (double*)&param2
+ * R7 = (double*)&param3
+ */
+#define FPD_TWO_IN_CR(name) \
+_GLOBAL(fpd_ ## name); \
+ lfd 1,0(r6); /* load param2 */ \
+ lfd 0,0(r5); /* load param1 */ \
+ lfd 3,0(r3); /* load up fpscr value */ \
+ MTFSF_L(3); \
+ lwz r6, 0(r4); /* load cr */ \
+ mtcr r6; \
+ \
+ name 0,0,1; /* call instruction */ \
+ mfcr r6; \
+ mffs 0; \
+ stfd 0,0(r3); /* save new fpscr value */ \
+ stw r6,0(r4); /* save new cr value */ \
+ blr
+
+/*
+ * Double operation with three input operands
+ *
+ * R3 = (double*)&fpscr
+ * R4 = (u32*)&cr
+ * R5 = (double*)&result
+ * R6 = (double*)&param1
+ * R7 = (double*)&param2
+ * R8 = (double*)&param3
+ */
+#define FPD_THREE_IN(name) \
+_GLOBAL(fpd_ ## name); \
+ mflr r12; \
+ bl fpd_load_three; \
+ mtlr r12; \
+ \
+ name. 0,0,1,2; /* call instruction */ \
+ b fpd_return
+
+FPD_ONE_IN(fsqrts)
+FPD_ONE_IN(frsqrtes)
+FPD_ONE_IN(fres)
+FPD_ONE_IN(frsp)
+FPD_ONE_IN(fctiw)
+FPD_ONE_IN(fctiwz)
+FPD_ONE_IN(fsqrt)
+FPD_ONE_IN(fre)
+FPD_ONE_IN(frsqrte)
+FPD_ONE_IN(fneg)
+FPD_ONE_IN(fabs)
+FPD_TWO_IN(fadds)
+FPD_TWO_IN(fsubs)
+FPD_TWO_IN(fdivs)
+FPD_TWO_IN(fmuls)
+FPD_TWO_IN_CR(fcmpu)
+FPD_TWO_IN(fcpsgn)
+FPD_TWO_IN(fdiv)
+FPD_TWO_IN(fadd)
+FPD_TWO_IN(fmul)
+FPD_TWO_IN_CR(fcmpo)
+FPD_TWO_IN(fsub)
+FPD_THREE_IN(fmsubs)
+FPD_THREE_IN(fmadds)
+FPD_THREE_IN(fnmsubs)
+FPD_THREE_IN(fnmadds)
+FPD_THREE_IN(fsel)
+FPD_THREE_IN(fmsub)
+FPD_THREE_IN(fmadd)
+FPD_THREE_IN(fnmsub)
+FPD_THREE_IN(fnmadd)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 297fcd2ff7d0..9b8683f39e05 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EMULATE_FAIL:
/* XXX Deliver Program interrupt to guest. */
printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
- vcpu->arch.last_inst);
+ kvmppc_get_last_inst(vcpu));
r = RESUME_HOST;
break;
default:
@@ -148,6 +148,10 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_PPC_SEGSTATE:
+ case KVM_CAP_PPC_PAIRED_SINGLES:
+ case KVM_CAP_PPC_UNSET_IRQ:
+ case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_PPC_OSI:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -193,12 +197,17 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
vcpu = kvmppc_core_vcpu_create(kvm, id);
- kvmppc_create_vcpu_debugfs(vcpu, id);
+ if (!IS_ERR(vcpu))
+ kvmppc_create_vcpu_debugfs(vcpu, id);
return vcpu;
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ /* Make sure we're not using the vcpu anymore */
+ hrtimer_cancel(&vcpu->arch.dec_timer);
+ tasklet_kill(&vcpu->arch.tasklet);
+
kvmppc_remove_vcpu_debugfs(vcpu);
kvmppc_core_vcpu_free(vcpu);
}
@@ -278,7 +287,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- ulong gpr;
+ u64 gpr;
if (run->mmio.len > sizeof(gpr)) {
printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -287,6 +296,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
if (vcpu->arch.mmio_is_bigendian) {
switch (run->mmio.len) {
+ case 8: gpr = *(u64 *)run->mmio.data; break;
case 4: gpr = *(u32 *)run->mmio.data; break;
case 2: gpr = *(u16 *)run->mmio.data; break;
case 1: gpr = *(u8 *)run->mmio.data; break;
@@ -300,7 +310,43 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
+ if (vcpu->arch.mmio_sign_extend) {
+ switch (run->mmio.len) {
+#ifdef CONFIG_PPC64
+ case 4:
+ gpr = (s64)(s32)gpr;
+ break;
+#endif
+ case 2:
+ gpr = (s64)(s16)gpr;
+ break;
+ case 1:
+ gpr = (s64)(s8)gpr;
+ break;
+ }
+ }
+
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
+
+ switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
+ case KVM_REG_GPR:
+ kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
+ break;
+ case KVM_REG_FPR:
+ vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ break;
+#ifdef CONFIG_PPC_BOOK3S
+ case KVM_REG_QPR:
+ vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ break;
+ case KVM_REG_FQPR:
+ vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ break;
+#endif
+ default:
+ BUG();
+ }
}
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
@@ -319,12 +365,25 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmio_is_bigendian = is_bigendian;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
+ vcpu->arch.mmio_sign_extend = 0;
return EMULATE_DO_MMIO;
}
+/* Same as above, but sign extends */
+int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_bigendian)
+{
+ int r;
+
+ r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
+ vcpu->arch.mmio_sign_extend = 1;
+
+ return r;
+}
+
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u32 val, unsigned int bytes, int is_bigendian)
+ u64 val, unsigned int bytes, int is_bigendian)
{
void *data = run->mmio.data;
@@ -342,6 +401,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Store the value at the lowest bytes in 'data'. */
if (is_bigendian) {
switch (bytes) {
+ case 8: *(u64 *)data = val; break;
case 4: *(u32 *)data = val; break;
case 2: *(u16 *)data = val; break;
case 1: *(u8 *)data = val; break;
@@ -376,6 +436,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (!vcpu->arch.dcr_is_write)
kvmppc_complete_dcr_load(vcpu, run);
vcpu->arch.dcr_needed = 0;
+ } else if (vcpu->arch.osi_needed) {
+ u64 *gprs = run->osi.gprs;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ kvmppc_set_gpr(vcpu, i, gprs[i]);
+ vcpu->arch.osi_needed = 0;
}
kvmppc_core_deliver_interrupts(vcpu);
@@ -396,7 +463,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
- kvmppc_core_queue_external(vcpu, irq);
+ if (irq->irq == KVM_INTERRUPT_UNSET)
+ kvmppc_core_dequeue_external(vcpu, irq);
+ else
+ kvmppc_core_queue_external(vcpu, irq);
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
@@ -406,6 +476,27 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
return 0;
}
+static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
+ struct kvm_enable_cap *cap)
+{
+ int r;
+
+ if (cap->flags)
+ return -EINVAL;
+
+ switch (cap->cap) {
+ case KVM_CAP_PPC_OSI:
+ r = 0;
+ vcpu->arch.osi_enabled = true;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
@@ -434,6 +525,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
}
+ case KVM_ENABLE_CAP:
+ {
+ struct kvm_enable_cap cap;
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
+ break;
+ }
default:
r = -EINVAL;
}
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 64e2e499e32a..455881a5563f 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -28,7 +28,7 @@ _GLOBAL(strcpy)
/* This clears out any unused part of the destination buffer,
just as the libc version does. -- paulus */
_GLOBAL(strncpy)
- cmpwi 0,r5,0
+ PPC_LCMPI 0,r5,0
beqlr
mtctr r5
addi r6,r3,-1
@@ -39,7 +39,7 @@ _GLOBAL(strncpy)
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
bnelr /* if we didn't hit a null char, we're done */
mfctr r5
- cmpwi 0,r5,0 /* any space left in destination buffer? */
+ PPC_LCMPI 0,r5,0 /* any space left in destination buffer? */
beqlr /* we know r0 == 0 here */
2: stbu r0,1(r6) /* clear it out if so */
bdnz 2b
@@ -70,8 +70,8 @@ _GLOBAL(strcmp)
blr
_GLOBAL(strncmp)
- PPC_LCMPI r5,0
- beqlr
+ PPC_LCMPI 0,r5,0
+ beq- 2f
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
@@ -82,6 +82,8 @@ _GLOBAL(strncmp)
beqlr 1
bdnzt eq,1b
blr
+2: li r3,0
+ blr
_GLOBAL(strlen)
addi r4,r3,-1
@@ -92,8 +94,8 @@ _GLOBAL(strlen)
blr
_GLOBAL(memcmp)
- cmpwi 0,r5,0
- ble- 2f
+ PPC_LCMPI 0,r5,0
+ beq- 2f
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
@@ -106,8 +108,8 @@ _GLOBAL(memcmp)
blr
_GLOBAL(memchr)
- cmpwi 0,r5,0
- ble- 2f
+ PPC_LCMPI 0,r5,0
+ beq- 2f
mtctr r5
addi r3,r3,-1
1: lbzu r0,1(r3)
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 3986264b0993..d8c6efb32bc6 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -38,7 +38,9 @@ unsigned int tlb_44x_index; /* = 0 */
unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
int icache_44x_need_flush;
-static void __init ppc44x_update_tlb_hwater(void)
+unsigned long tlb_47x_boltmap[1024/8];
+
+static void __cpuinit ppc44x_update_tlb_hwater(void)
{
extern unsigned int tlb_44x_patch_hwater_D[];
extern unsigned int tlb_44x_patch_hwater_I[];
@@ -59,7 +61,7 @@ static void __init ppc44x_update_tlb_hwater(void)
}
/*
- * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
*/
static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
{
@@ -67,12 +69,18 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
ppc44x_update_tlb_hwater();
+ mtspr(SPRN_MMUCR, 0);
+
__asm__ __volatile__(
"tlbwe %2,%3,%4\n"
"tlbwe %1,%3,%5\n"
"tlbwe %0,%3,%6\n"
:
+#ifdef CONFIG_PPC47x
+ : "r" (PPC47x_TLB2_S_RWX),
+#else
: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
+#endif
"r" (phys),
"r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
"r" (entry),
@@ -81,8 +89,93 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
"i" (PPC44x_TLB_ATTRIB));
}
+static int __init ppc47x_find_free_bolted(void)
+{
+ unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+ unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+ if (!(mmube0 & MMUBE0_VBE0))
+ return 0;
+ if (!(mmube0 & MMUBE0_VBE1))
+ return 1;
+ if (!(mmube0 & MMUBE0_VBE2))
+ return 2;
+ if (!(mmube1 & MMUBE1_VBE3))
+ return 3;
+ if (!(mmube1 & MMUBE1_VBE4))
+ return 4;
+ if (!(mmube1 & MMUBE1_VBE5))
+ return 5;
+ return -1;
+}
+
+static void __init ppc47x_update_boltmap(void)
+{
+ unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+ unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+ if (mmube0 & MMUBE0_VBE0)
+ __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube0 & MMUBE0_VBE1)
+ __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube0 & MMUBE0_VBE2)
+ __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube1 & MMUBE1_VBE3)
+ __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube1 & MMUBE1_VBE4)
+ __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+ if (mmube1 & MMUBE1_VBE5)
+ __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
+ tlb_47x_boltmap);
+}
+
+/*
+ * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
+ */
+static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+{
+ unsigned int rA;
+ int bolted;
+
+ /* Base rA is HW way select, way 0, bolted bit set */
+ rA = 0x88000000;
+
+ /* Look for a bolted entry slot */
+ bolted = ppc47x_find_free_bolted();
+ BUG_ON(bolted < 0);
+
+ /* Insert bolted slot number */
+ rA |= bolted << 24;
+
+ pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
+ virt, phys, bolted);
+
+ mtspr(SPRN_MMUCR, 0);
+
+ __asm__ __volatile__(
+ "tlbwe %2,%3,0\n"
+ "tlbwe %1,%3,1\n"
+ "tlbwe %0,%3,2\n"
+ :
+ : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
+ PPC47x_TLB2_SX
+#ifdef CONFIG_SMP
+ | PPC47x_TLB2_M
+#endif
+ ),
+ "r" (phys),
+ "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
+ "r" (rA));
+}
+
void __init MMU_init_hw(void)
{
+ /* This is not useful on 47x but won't hurt either */
ppc44x_update_tlb_hwater();
flush_instruction_cache();
@@ -95,8 +188,51 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
/* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S */
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
- addr += PPC_PIN_SIZE)
- ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+ addr += PPC_PIN_SIZE) {
+ if (mmu_has_feature(MMU_FTR_TYPE_47x))
+ ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+ else
+ ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+ }
+ if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+ ppc47x_update_boltmap();
+#ifdef DEBUG
+ {
+ int i;
+
+ printk(KERN_DEBUG "bolted entries: ");
+ for (i = 0; i < 255; i++) {
+ if (test_bit(i, tlb_47x_boltmap))
+ printk("%d ", i);
+ }
+ printk("\n");
+ }
+#endif /* DEBUG */
+ }
return total_lowmem;
}
+
+#ifdef CONFIG_SMP
+void __cpuinit mmu_init_secondary(int cpu)
+{
+ unsigned long addr;
+
+ /* Pin in enough TLBs to cover any lowmem not covered by the
+ * initial 256M mapping established in head_44x.S
+ *
+ * WARNING: This is called with only the first 256M of the
+ * linear mapping in the TLB and we can't take faults yet
+ * so beware of what this code uses. It runs off a temporary
+ * stack. current (r2) isn't initialized, smp_processor_id()
+ * will not work, current thread info isn't accessible, ...
+ */
+ for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
+ addr += PPC_PIN_SIZE) {
+ if (mmu_has_feature(MMU_FTR_TYPE_47x))
+ ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
+ else
+ ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
+ }
+}
+#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 26fb6b990b0a..1bd712c33ce2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -151,13 +151,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
+ defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
do_dabr(regs, address, error_code);
return 0;
}
-#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
+#endif
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
@@ -307,7 +308,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- survive:
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
@@ -359,15 +359,10 @@ bad_area_nosemaphore:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- return SIGKILL;
+ if (!user_mode(regs))
+ return SIGKILL;
+ pagefault_out_of_memory();
+ return 0;
do_sigbus:
up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 1ed6b52f3031..cdc7526e9c93 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -2,7 +2,7 @@
* Modifications by Kumar Gala (galak@kernel.crashing.org) to support
* E500 Book E processors.
*
- * Copyright 2004 Freescale Semiconductor, Inc
+ * Copyright 2004,2010 Freescale Semiconductor, Inc.
*
* This file contains the routines for initializing the MMU
* on the 4xx series of chips.
@@ -56,19 +56,13 @@
unsigned int tlbcam_index;
-#define NUM_TLBCAMS (64)
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
#endif
-struct tlbcam {
- u32 MAS0;
- u32 MAS1;
- unsigned long MAS2;
- u32 MAS3;
- u32 MAS7;
-} TLBCAM[NUM_TLBCAMS];
+#define NUM_TLBCAMS (64)
+struct tlbcam TLBCAM[NUM_TLBCAMS];
struct tlbcamrange {
unsigned long start;
@@ -109,19 +103,6 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
return 0;
}
-void loadcam_entry(int idx)
-{
- mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
- mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
- mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
- mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
-
- if (mmu_has_feature(MMU_FTR_BIG_PHYS))
- mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
-
- asm volatile("isync;tlbwe;isync" : : : "memory");
-}
-
/*
* Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d7fa50b09b4a..e267f223fdff 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
}
#endif /* CONFIG_PPC_BOOK3E */
+struct vmemmap_backing *vmemmap_list;
+
+static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
+{
+ static struct vmemmap_backing *next;
+ static int num_left;
+
+ /* allocate a page when required and hand out chunks */
+ if (!next || !num_left) {
+ next = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (unlikely(!next)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
+ }
+
+ num_left--;
+
+ return next++;
+}
+
+static __meminit void vmemmap_list_populate(unsigned long phys,
+ unsigned long start,
+ int node)
+{
+ struct vmemmap_backing *vmem_back;
+
+ vmem_back = vmemmap_list_alloc(node);
+ if (unlikely(!vmem_back)) {
+ WARN_ON(1);
+ return;
+ }
+
+ vmem_back->phys = phys;
+ vmem_back->virt_addr = start;
+ vmem_back->list = vmemmap_list;
+
+ vmemmap_list = vmem_back;
+}
+
int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node)
{
@@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page,
if (!p)
return -ENOMEM;
+ vmemmap_list_populate(__pa(p), start, node);
+
pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p);
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
index 0dfba2bf7f31..d0ee554e86e4 100644
--- a/arch/powerpc/mm/mmu_context_hash32.c
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -60,11 +60,7 @@
static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
-
-/*
- * Set up the context for a new address space.
- */
-int init_new_context(struct task_struct *t, struct mm_struct *mm)
+unsigned long __init_new_context(void)
{
unsigned long ctx = next_mmu_context;
@@ -74,19 +70,38 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context.id = ctx;
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(__init_new_context);
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = __init_new_context();
return 0;
}
/*
+ * Free a context ID. Make sure to call this with preempt disabled!
+ */
+void __destroy_context(unsigned long ctx)
+{
+ clear_bit(ctx, context_map);
+}
+EXPORT_SYMBOL_GPL(__destroy_context);
+
+/*
* We're finished using the context for an address space.
*/
void destroy_context(struct mm_struct *mm)
{
preempt_disable();
if (mm->context.id != NO_CONTEXT) {
- clear_bit(mm->context.id, context_map);
+ __destroy_context(mm->context.id);
mm->context.id = NO_CONTEXT;
}
preempt_enable();
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1f2d9ff09895..ddfd7ad4e1d6 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -395,10 +395,18 @@ void __init mmu_context_init(void)
* the PID/TID comparison is disabled, so we can use a TID of zero
* to represent all kernel pages as shared among all contexts.
* -- Dan
+ *
+ * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
+ * should normally never have to steal though the facility is
+ * present if needed.
+ * -- BenH
*/
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0;
last_context = 15;
+ } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+ first_context = 1;
+ last_context = 65535;
} else {
first_context = 1;
last_context = 255;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d49a77503e19..63b84a0d3b10 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -69,12 +69,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
}
#endif /* CONIFG_8xx */
-/*
- * As of today, we don't support tlbivax broadcast on any
- * implementation. When that becomes the case, this will be
- * an extern.
- */
-#ifdef CONFIG_PPC_BOOK3E
+#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind);
#else
@@ -149,7 +144,15 @@ extern unsigned long mmu_mapin_ram(unsigned long top);
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void);
-
+extern void loadcam_entry(unsigned int index);
+
+struct tlbcam {
+ u32 MAS0;
+ u32 MAS1;
+ unsigned long MAS2;
+ u32 MAS3;
+ u32 MAS7;
+};
#elif defined(CONFIG_PPC32)
/* anything 32-bit except 4xx or 8xx */
extern void MMU_init_hw(void);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index eaa7633515b7..80d110635d24 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -33,16 +33,41 @@ static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
int numa_cpu_lookup_table[NR_CPUS];
-cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
-EXPORT_SYMBOL(numa_cpumask_lookup_table);
+EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_data);
static int min_common_depth;
static int n_mem_addr_cells, n_mem_size_cells;
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: node_to_cpumask() is not valid until after this is done.
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+ unsigned int node, num = 0;
+
+ /* setup nr_node_ids if not done yet */
+ if (nr_node_ids == MAX_NUMNODES) {
+ for_each_node_mask(node, node_possible_map)
+ num = node;
+ nr_node_ids = num + 1;
+ }
+
+ /* allocate the map */
+ for (node = 0; node < nr_node_ids; node++)
+ alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+
+ /* cpumask_of_node() will now work */
+ dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
+}
+
static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid)
{
@@ -138,8 +163,8 @@ static void __cpuinit map_cpu_to_node(int cpu, int node)
dbg("adding cpu %d to node %d\n", cpu, node);
- if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
- cpu_set(cpu, numa_cpumask_lookup_table[node]);
+ if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
+ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -149,8 +174,8 @@ static void unmap_cpu_from_node(unsigned long cpu)
dbg("removing cpu %lu from node %d\n", cpu, node);
- if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
- cpu_clear(cpu, numa_cpumask_lookup_table[node]);
+ if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
+ cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
} else {
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
cpu, node);
@@ -246,7 +271,8 @@ static int __init find_min_common_depth(void)
const unsigned int *ref_points;
struct device_node *rtas_root;
unsigned int len;
- struct device_node *options;
+ struct device_node *chosen;
+ const char *vec5;
rtas_root = of_find_node_by_path("/rtas");
@@ -264,14 +290,17 @@ static int __init find_min_common_depth(void)
"ibm,associativity-reference-points", &len);
/*
- * For type 1 affinity information we want the first field
+ * For form 1 affinity information we want the first field
*/
- options = of_find_node_by_path("/options");
- if (options) {
- const char *str;
- str = of_get_property(options, "ibm,associativity-form", NULL);
- if (str && !strcmp(str, "1"))
- index = 0;
+#define VEC5_AFFINITY_BYTE 5
+#define VEC5_AFFINITY 0x80
+ chosen = of_find_node_by_path("/chosen");
+ if (chosen) {
+ vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
+ if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
+ dbg("Using form 1 affinity\n");
+ index = 0;
+ }
}
if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
@@ -750,8 +779,9 @@ void __init dump_numa_cpu_topology(void)
* If we used a CPU iterator here we would miss printing
* the holes in the cpumap.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+ if (cpumask_test_cpu(cpu,
+ node_to_cpumask_map[node])) {
if (count == 0)
printk(" %u", cpu);
++count;
@@ -763,7 +793,7 @@ void __init dump_numa_cpu_topology(void)
}
if (count > 1)
- printk("-%u", NR_CPUS - 1);
+ printk("-%u", nr_cpu_ids - 1);
printk("\n");
}
}
@@ -939,10 +969,6 @@ void __init do_init_bootmem(void)
else
dump_numa_memory_topology();
- register_cpu_notifier(&ppc64_numa_nb);
- cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
- (void *)(unsigned long)boot_cpuid);
-
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
void *bootmem_vaddr;
@@ -996,6 +1022,16 @@ void __init do_init_bootmem(void)
}
init_bootmem_done = 1;
+
+ /*
+ * Now bootmem is initialised we can create the node to cpumask
+ * lookup tables and setup the cpu callback to populate them.
+ */
+ setup_node_to_cpumask_map();
+
+ register_cpu_notifier(&ppc64_numa_nb);
+ cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
+ (void *)(unsigned long)boot_cpuid);
}
void __init paging_init(void)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index b9243e7557ae..9fc02dc72ce9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);
+#ifdef _PAGE_BAP_SR
+ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+ * which means that we just cleared supervisor access... oops ;-) This
+ * restores it
+ */
+ flags |= _PAGE_BAP_SR;
+#endif
+
return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_flags);
@@ -385,11 +393,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb();
-#ifdef CONFIG_PPC_STD_MMU
- flush_hash_pages(0, address, pmd_val(*kpmd), 1);
-#else
flush_tlb_page(NULL, address);
-#endif
pte_unmap(kpte);
return 0;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d95679a5fb29..d050fc8d9714 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC);
+#ifdef _PAGE_BAP_SR
+ /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
+ * which means that we just cleared supervisor access... oops ;-) This
+ * restores it
+ */
+ flags |= _PAGE_BAP_SR;
+#endif
+
if (ppc_md.ioremap)
return ppc_md.ioremap(addr, size, flags, caller);
return __ioremap_caller(addr, size, flags, caller);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index bbdc5b577b85..cfa768203d08 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -10,7 +10,7 @@
* - tlbil_va
* - tlbil_pid
* - tlbil_all
- * - tlbivax_bcast (not yet)
+ * - tlbivax_bcast
*
* Code mostly moved over from misc_32.S
*
@@ -33,6 +33,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h>
+#include <asm/bug.h>
#if defined(CONFIG_40x)
@@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va)
* Nothing to do for 8xx, everything is inline
*/
-#elif defined(CONFIG_44x)
+#elif defined(CONFIG_44x) /* Includes 47x */
/*
* 440 implementation uses tlbsx/we for tlbil_va and a full sweep
@@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va)
*/
_GLOBAL(__tlbil_va)
mfspr r5,SPRN_MMUCR
- rlwimi r5,r4,0,24,31 /* Set TID */
+ mfmsr r10
+
+ /*
+ * We write 16 bits of STID since 47x supports that much, we
+ * will never be passed out of bounds values on 440 (hopefully)
+ */
+ rlwimi r5,r4,0,16,31
/* We have to run the search with interrupts disabled, otherwise
* an interrupt which causes a TLB miss can clobber the MMUCR
@@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va)
* and restoring MMUCR, so only normal interrupts have to be
* taken care of.
*/
- mfmsr r4
wrteei 0
mtspr SPRN_MMUCR,r5
- tlbsx. r3, 0, r3
- wrtee r4
- bne 1f
+ tlbsx. r6,0,r3
+ bne 10f
sync
- /* There are only 64 TLB entries, so r3 < 64,
- * which means bit 22, is clear. Since 22 is
- * the V bit in the TLB_PAGEID, loading this
+BEGIN_MMU_FTR_SECTION
+ b 2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+ /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
+ * 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this
* value will invalidate the TLB entry.
*/
- tlbwe r3, r3, PPC44x_TLB_PAGEID
+ tlbwe r6,r6,PPC44x_TLB_PAGEID
isync
-1: blr
+10: wrtee r10
+ blr
+2:
+#ifdef CONFIG_PPC_47x
+ oris r7,r6,0x8000 /* specify way explicitely */
+ clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */
+ ori r4,r4,PPC47x_TLBE_SIZE
+ tlbwe r4,r7,0 /* write it */
+ isync
+ wrtee r10
+ blr
+#else /* CONFIG_PPC_47x */
+1: trap
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
_GLOBAL(_tlbil_all)
_GLOBAL(_tlbil_pid)
+BEGIN_MMU_FTR_SECTION
+ b 2f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
li r3,0
sync
@@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid)
isync
blr
+2:
+#ifdef CONFIG_PPC_47x
+ /* 476 variant. There's not simple way to do this, hopefully we'll
+ * try to limit the amount of such full invalidates
+ */
+ mfmsr r11 /* Interrupts off */
+ wrteei 0
+ li r3,-1 /* Current set */
+ lis r10,tlb_47x_boltmap@h
+ ori r10,r10,tlb_47x_boltmap@l
+ lis r7,0x8000 /* Specify way explicitely */
+
+ b 9f /* For each set */
+
+1: li r9,4 /* Number of ways */
+ li r4,0 /* Current way */
+ li r6,0 /* Default entry value 0 */
+ andi. r0,r8,1 /* Check if way 0 is bolted */
+ mtctr r9 /* Load way counter */
+ bne- 3f /* Bolted, skip loading it */
+
+2: /* For each way */
+ or r5,r3,r4 /* Make way|index for tlbre */
+ rlwimi r5,r5,16,8,15 /* Copy index into position */
+ tlbre r6,r5,0 /* Read entry */
+3: addis r4,r4,0x2000 /* Next way */
+ andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
+ beq 4f /* Nope, skip it */
+ rlwimi r7,r5,0,1,2 /* Insert way number */
+ rlwinm r6,r6,0,21,19 /* Clear V */
+ tlbwe r6,r7,0 /* Write it */
+4: bdnz 2b /* Loop for each way */
+ srwi r8,r8,1 /* Next boltmap bit */
+9: cmpwi cr1,r3,255 /* Last set done ? */
+ addi r3,r3,1 /* Next set */
+ beq cr1,1f /* End of loop */
+ andi. r0,r3,0x1f /* Need to load a new boltmap word ? */
+ bne 1b /* No, loop */
+ lwz r8,0(r10) /* Load boltmap entry */
+ addi r10,r10,4 /* Next word */
+ b 1b /* Then loop */
+1: isync /* Sync shadows */
+ wrtee r11
+#else /* CONFIG_PPC_47x */
+1: trap
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
+#endif /* !CONFIG_PPC_47x */
+ blr
+
+#ifdef CONFIG_PPC_47x
+/*
+ * _tlbivax_bcast is only on 47x. We don't bother doing a runtime
+ * check though, it will blow up soon enough if we mistakenly try
+ * to use it on a 440.
+ */
+_GLOBAL(_tlbivax_bcast)
+ mfspr r5,SPRN_MMUCR
+ mfmsr r10
+ rlwimi r5,r4,0,16,31
+ wrteei 0
+ mtspr SPRN_MMUCR,r5
+/* tlbivax 0,r3 - use .long to avoid binutils deps */
+ .long 0x7c000624 | (r3 << 11)
+ isync
+ eieio
+ tlbsync
+ sync
+ wrtee r10
+ blr
+#endif /* CONFIG_PPC_47x */
#elif defined(CONFIG_FSL_BOOKE)
/*
@@ -271,3 +365,31 @@ _GLOBAL(set_context)
#else
#error Unsupported processor type !
#endif
+
+#if defined(CONFIG_FSL_BOOKE)
+/*
+ * extern void loadcam_entry(unsigned int index)
+ *
+ * Load TLBCAM[index] entry in to the L2 CAM MMU
+ */
+_GLOBAL(loadcam_entry)
+ LOAD_REG_ADDR(r4, TLBCAM)
+ mulli r5,r3,TLBCAM_SIZE
+ add r3,r5,r4
+ lwz r4,TLBCAM_MAS0(r3)
+ mtspr SPRN_MAS0,r4
+ lwz r4,TLBCAM_MAS1(r3)
+ mtspr SPRN_MAS1,r4
+ PPC_LL r4,TLBCAM_MAS2(r3)
+ mtspr SPRN_MAS2,r4
+ lwz r4,TLBCAM_MAS3(r3)
+ mtspr SPRN_MAS3,r4
+BEGIN_MMU_FTR_SECTION
+ lwz r4,TLBCAM_MAS7(r3)
+ mtspr SPRN_MAS7,r4
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
+ isync
+ tlbwe
+ isync
+ blr
+#endif
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 7486bffd3ebb..eeba0a70e466 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -1,3 +1,12 @@
+config PPC_47x
+ bool "Support for 47x variant"
+ depends on 44x
+ default n
+ select MPIC
+ help
+ This option enables support for the 47x family of processors and is
+ not currently compatible with other 44x or 46x varients
+
config BAMBOO
bool "Bamboo"
depends on 44x
@@ -151,6 +160,17 @@ config YOSEMITE
help
This option enables support for the AMCC PPC440EP evaluation board.
+config ISS4xx
+ bool "ISS 4xx Simulator"
+ depends on (44x || 40x)
+ default n
+ select 405GP if 40x
+ select 440GP if 44x && !PPC_47x
+ select PPC_FPU
+ select OF_RTC
+ help
+ This option enables support for the IBM ISS simulation environment
+
#config LUAN
# bool "Luan"
# depends on 44x
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index ee6185aeaa3b..82ff326e0795 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_SAM440EP) += sam440ep.o
obj-$(CONFIG_WARP) += warp.o
obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
+obj-$(CONFIG_ISS4xx) += iss4xx.o
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
new file mode 100644
index 000000000000..aa46e9d1e771
--- /dev/null
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -0,0 +1,167 @@
+/*
+ * PPC476 board specific routines
+ *
+ * Copyright 2010 Torez Smith, IBM Corporation.
+ *
+ * Based on earlier code:
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003-2005 Zultys Technologies
+ *
+ * Rewritten and ported to the merged powerpc tree:
+ * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+#include <asm/mpic.h>
+#include <asm/mmu.h>
+
+static __initdata struct of_device_id iss4xx_of_bus[] = {
+ { .compatible = "ibm,plb4", },
+ { .compatible = "ibm,plb6", },
+ { .compatible = "ibm,opb", },
+ { .compatible = "ibm,ebc", },
+ {},
+};
+
+static int __init iss4xx_device_probe(void)
+{
+ of_platform_bus_probe(NULL, iss4xx_of_bus, NULL);
+ of_instantiate_rtc();
+
+ return 0;
+}
+machine_device_initcall(iss4xx, iss4xx_device_probe);
+
+/* We can have either UICs or MPICs */
+static void __init iss4xx_init_irq(void)
+{
+ struct device_node *np;
+
+ /* Find top level interrupt controller */
+ for_each_node_with_property(np, "interrupt-controller") {
+ if (of_get_property(np, "interrupts", NULL) == NULL)
+ break;
+ }
+ if (np == NULL)
+ panic("Can't find top level interrupt controller");
+
+ /* Check type and do appropriate initialization */
+ if (of_device_is_compatible(np, "ibm,uic")) {
+ uic_init_tree();
+ ppc_md.get_irq = uic_get_irq;
+#ifdef CONFIG_MPIC
+ } else if (of_device_is_compatible(np, "chrp,open-pic")) {
+ /* The MPIC driver will get everything it needs from the
+ * device-tree, just pass 0 to all arguments
+ */
+ struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
+ " MPIC ");
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+ ppc_md.get_irq = mpic_get_irq;
+#endif
+ } else
+ panic("Unrecognized top level interrupt controller");
+}
+
+#ifdef CONFIG_SMP
+static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
+{
+ mpic_setup_this_cpu();
+}
+
+static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
+{
+ struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
+ const u64 *spin_table_addr_prop;
+ u32 *spin_table;
+ extern void start_secondary_47x(void);
+
+ BUG_ON(cpunode == NULL);
+
+ /* Assume spin table. We could test for the enable-method in
+ * the device-tree but currently there's little point as it's
+ * our only supported method
+ */
+ spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr",
+ NULL);
+ if (spin_table_addr_prop == NULL) {
+ pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
+ return;
+ }
+
+ /* Assume it's mapped as part of the linear mapping. This is a bit
+ * fishy but will work fine for now
+ */
+ spin_table = (u32 *)__va(*spin_table_addr_prop);
+ pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
+
+ spin_table[3] = cpu;
+ smp_wmb();
+ spin_table[1] = __pa(start_secondary_47x);
+ mb();
+}
+
+static struct smp_ops_t iss_smp_ops = {
+ .probe = smp_mpic_probe,
+ .message_pass = smp_mpic_message_pass,
+ .setup_cpu = smp_iss4xx_setup_cpu,
+ .kick_cpu = smp_iss4xx_kick_cpu,
+ .give_timebase = smp_generic_give_timebase,
+ .take_timebase = smp_generic_take_timebase,
+};
+
+static void __init iss4xx_smp_init(void)
+{
+ if (mmu_has_feature(MMU_FTR_TYPE_47x))
+ smp_ops = &iss_smp_ops;
+}
+
+#else /* CONFIG_SMP */
+static void __init iss4xx_smp_init(void) { }
+#endif /* CONFIG_SMP */
+
+static void __init iss4xx_setup_arch(void)
+{
+ iss4xx_smp_init();
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init iss4xx_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
+ return 0;
+
+ return 1;
+}
+
+define_machine(iss4xx) {
+ .name = "ISS-4xx",
+ .probe = iss4xx_probe,
+ .progress = udbg_progress,
+ .init_IRQ = iss4xx_init_irq,
+ .setup_arch = iss4xx_setup_arch,
+ .restart = ppc4xx_reset_system,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index b7f518a60f03..707e572b7c40 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -22,6 +22,7 @@
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/mpc5121.h>
+#include <asm/mpc52xx_psc.h>
#include "mpc512x.h"
@@ -95,9 +96,86 @@ void __init mpc512x_declare_of_platform_devices(void)
}
}
+#define DEFAULT_FIFO_SIZE 16
+
+static unsigned int __init get_fifo_size(struct device_node *np,
+ char *prop_name)
+{
+ const unsigned int *fp;
+
+ fp = of_get_property(np, prop_name, NULL);
+ if (fp)
+ return *fp;
+
+ pr_warning("no %s property in %s node, defaulting to %d\n",
+ prop_name, np->full_name, DEFAULT_FIFO_SIZE);
+
+ return DEFAULT_FIFO_SIZE;
+}
+
+#define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \
+ ((u32)(_base) + sizeof(struct mpc52xx_psc)))
+
+/* Init PSC FIFO space for TX and RX slices */
+void __init mpc512x_psc_fifo_init(void)
+{
+ struct device_node *np;
+ void __iomem *psc;
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
+ int fifobase = 0; /* current fifo address in 32 bit words */
+
+ for_each_compatible_node(np, NULL, "fsl,mpc5121-psc") {
+ tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
+ rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
+
+ /* size in register is in 4 byte units */
+ tx_fifo_size /= 4;
+ rx_fifo_size /= 4;
+ if (!tx_fifo_size)
+ tx_fifo_size = 1;
+ if (!rx_fifo_size)
+ rx_fifo_size = 1;
+
+ psc = of_iomap(np, 0);
+ if (!psc) {
+ pr_err("%s: Can't map %s device\n",
+ __func__, np->full_name);
+ continue;
+ }
+
+ /* FIFO space is 4KiB, check if requested size is available */
+ if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) {
+ pr_err("%s: no fifo space available for %s\n",
+ __func__, np->full_name);
+ iounmap(psc);
+ /*
+ * chances are that another device requests less
+ * fifo space, so we continue.
+ */
+ continue;
+ }
+
+ /* set tx and rx fifo size registers */
+ out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size);
+ fifobase += tx_fifo_size;
+ out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size);
+ fifobase += rx_fifo_size;
+
+ /* reset and enable the slices */
+ out_be32(&FIFOC(psc)->txcmd, 0x80);
+ out_be32(&FIFOC(psc)->txcmd, 0x01);
+ out_be32(&FIFOC(psc)->rxcmd, 0x80);
+ out_be32(&FIFOC(psc)->rxcmd, 0x01);
+
+ iounmap(psc);
+ }
+}
+
void __init mpc512x_init(void)
{
mpc512x_declare_of_platform_devices();
mpc5121_clk_init();
mpc512x_restart_init();
+ mpc512x_psc_fifo_init();
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
index fda7c2a18282..ca5305a5bd61 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
@@ -168,7 +168,7 @@ static int __devinit mpc52xx_wkup_gpiochip_probe(struct of_device *ofdev,
ofchip->gc.get = mpc52xx_wkup_gpio_get;
ofchip->gc.set = mpc52xx_wkup_gpio_set;
- ret = of_mm_gpiochip_add(ofdev->node, &chip->mmchip);
+ ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
if (ret)
return ret;
@@ -193,8 +193,11 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
};
static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = {
- .name = "gpio_wkup",
- .match_table = mpc52xx_wkup_gpiochip_match,
+ .driver = {
+ .name = "gpio_wkup",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_wkup_gpiochip_match,
+ },
.probe = mpc52xx_wkup_gpiochip_probe,
.remove = mpc52xx_gpiochip_remove,
};
@@ -329,7 +332,7 @@ static int __devinit mpc52xx_simple_gpiochip_probe(struct of_device *ofdev,
ofchip->gc.get = mpc52xx_simple_gpio_get;
ofchip->gc.set = mpc52xx_simple_gpio_set;
- ret = of_mm_gpiochip_add(ofdev->node, &chip->mmchip);
+ ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
if (ret)
return ret;
@@ -349,8 +352,11 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
};
static struct of_platform_driver mpc52xx_simple_gpiochip_driver = {
- .name = "gpio",
- .match_table = mpc52xx_simple_gpiochip_match,
+ .driver = {
+ .name = "gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_simple_gpiochip_match,
+ },
.probe = mpc52xx_simple_gpiochip_probe,
.remove = mpc52xx_gpiochip_remove,
};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index a60ee39d3b78..46c93578cbf0 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -734,8 +734,8 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
spin_lock_init(&gpt->lock);
gpt->dev = &ofdev->dev;
- gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node);
- gpt->regs = of_iomap(ofdev->node, 0);
+ gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ gpt->regs = of_iomap(ofdev->dev.of_node, 0);
if (!gpt->regs) {
kfree(gpt);
return -ENOMEM;
@@ -743,21 +743,21 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, gpt);
- mpc52xx_gpt_gpio_setup(gpt, ofdev->node);
- mpc52xx_gpt_irq_setup(gpt, ofdev->node);
+ mpc52xx_gpt_gpio_setup(gpt, ofdev->dev.of_node);
+ mpc52xx_gpt_irq_setup(gpt, ofdev->dev.of_node);
mutex_lock(&mpc52xx_gpt_list_mutex);
list_add(&gpt->list, &mpc52xx_gpt_list);
mutex_unlock(&mpc52xx_gpt_list_mutex);
/* check if this device could be a watchdog */
- if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) ||
- of_get_property(ofdev->node, "has-wdt", NULL)) {
+ if (of_get_property(ofdev->dev.of_node, "fsl,has-wdt", NULL) ||
+ of_get_property(ofdev->dev.of_node, "has-wdt", NULL)) {
const u32 *on_boot_wdt;
gpt->wdt_mode = MPC52xx_GPT_CAN_WDT;
- on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot",
- NULL);
+ on_boot_wdt = of_get_property(ofdev->dev.of_node,
+ "fsl,wdt-on-boot", NULL);
if (on_boot_wdt) {
dev_info(gpt->dev, "used as watchdog\n");
gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
@@ -784,8 +784,11 @@ static const struct of_device_id mpc52xx_gpt_match[] = {
};
static struct of_platform_driver mpc52xx_gpt_driver = {
- .name = "mpc52xx-gpt",
- .match_table = mpc52xx_gpt_match,
+ .driver = {
+ .name = "mpc52xx-gpt",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_gpt_match,
+ },
.probe = mpc52xx_gpt_probe,
.remove = mpc52xx_gpt_remove,
};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index d4f8be307cd5..e86aec644501 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -445,14 +445,14 @@ mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
if (lpbfifo.dev != NULL)
return -ENOSPC;
- lpbfifo.irq = irq_of_parse_and_map(op->node, 0);
+ lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (!lpbfifo.irq)
return -ENODEV;
- if (of_address_to_resource(op->node, 0, &res))
+ if (of_address_to_resource(op->dev.of_node, 0, &res))
return -ENODEV;
lpbfifo.regs_phys = res.start;
- lpbfifo.regs = of_iomap(op->node, 0);
+ lpbfifo.regs = of_iomap(op->dev.of_node, 0);
if (!lpbfifo.regs)
return -ENOMEM;
@@ -537,9 +537,11 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = {
};
static struct of_platform_driver mpc52xx_lpbfifo_driver = {
- .owner = THIS_MODULE,
- .name = "mpc52xx-lpbfifo",
- .match_table = mpc52xx_lpbfifo_match,
+ .driver = {
+ .name = "mpc52xx-lpbfifo",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_lpbfifo_match,
+ },
.probe = mpc52xx_lpbfifo_probe,
.remove = __devexit_p(mpc52xx_lpbfifo_remove),
};
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index f21555d3395a..9f2e52b36f91 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -119,12 +119,12 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev,
struct device_node *node;
int ret;
- node = of_get_parent(ofdev->node);
+ node = of_get_parent(ofdev->dev.of_node);
of_node_put(node);
if (node != ep8248e_bcsr_node)
return -ENODEV;
- ret = of_address_to_resource(ofdev->node, 0, &res);
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
return ret;
@@ -142,7 +142,7 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev,
bus->parent = &ofdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
- ret = of_mdiobus_register(bus, ofdev->node);
+ ret = of_mdiobus_register(bus, ofdev->dev.of_node);
if (ret)
goto err_free_irq;
@@ -170,8 +170,9 @@ static const struct of_device_id ep8248e_mdio_match[] = {
static struct of_platform_driver ep8248e_mdio_driver = {
.driver = {
.name = "ep8248e-mdio-bitbang",
+ .owner = THIS_MODULE,
+ .of_match_table = ep8248e_mdio_match,
},
- .match_table = ep8248e_mdio_match,
.probe = ep8248e_mdio_probe,
.remove = ep8248e_mdio_remove,
};
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 0b4f883b20eb..ae525e4745d2 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -74,6 +74,7 @@ static int __init mpc831x_rdb_probe(void)
static struct of_device_id __initdata of_bus_ids[] = {
{ .compatible = "simple-bus" },
{ .compatible = "gianfar" },
+ { .compatible = "gpio-leds", },
{},
};
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index a1908d261240..e00801c42540 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -72,6 +72,7 @@ static struct of_device_id mpc837x_ids[] = {
{ .compatible = "soc", },
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
+ { .compatible = "gpio-leds", },
{},
};
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 43805348b81e..ebe6c3537209 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -321,7 +321,7 @@ static struct platform_suspend_ops mpc83xx_suspend_ops = {
static int pmc_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct pmc_type *type = match->data;
int ret = 0;
@@ -423,8 +423,11 @@ static struct of_device_id pmc_match[] = {
};
static struct of_platform_driver pmc_driver = {
- .name = "mpc83xx-pmc",
- .match_table = pmc_match,
+ .driver = {
+ .name = "mpc83xx-pmc",
+ .owner = THIS_MODULE,
+ .of_match_table = pmc_match,
+ },
.probe = pmc_probe,
.remove = pmc_remove
};
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 5abe137f6309..018cc67be426 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -83,7 +83,8 @@ static struct of_device_id __initdata mpc8610_ids[] = {
{ .compatible = "fsl,mpc8610-immr", },
{ .compatible = "fsl,mpc8610-guts", },
{ .compatible = "simple-bus", },
- { .compatible = "gianfar", },
+ /* So that the DMA channel nodes can be probed individually: */
+ { .compatible = "fsl,eloplus-dma", },
{}
};
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a8aae0b54579..d361f8119b1e 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -43,7 +43,7 @@ config 40x
select PPC_PCI_CHOICE
config 44x
- bool "AMCC 44x"
+ bool "AMCC 44x, 46x or 47x"
select PPC_DCR_NATIVE
select PPC_UDBG_16550
select 4xx_SOC
@@ -294,7 +294,7 @@ config PPC_PERF_CTRS
This enables the powerpc-specific perf_event back-end.
config SMP
- depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
+ depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
@@ -322,6 +322,7 @@ config NR_CPUS
config NOT_COHERENT_CACHE
bool
depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
+ default n if PPC_47x
default y
config CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 8efe48192f3f..6257e5378615 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -345,7 +345,7 @@ static int axon_msi_shutdown(struct of_device *device)
static int axon_msi_probe(struct of_device *device,
const struct of_device_id *device_id)
{
- struct device_node *dn = device->node;
+ struct device_node *dn = device->dev.of_node;
struct axon_msic *msic;
unsigned int virq;
int dcr_base, dcr_len;
@@ -447,11 +447,12 @@ static const struct of_device_id axon_msi_device_id[] = {
};
static struct of_platform_driver axon_msi_driver = {
- .match_table = axon_msi_device_id,
.probe = axon_msi_probe,
.shutdown = axon_msi_shutdown,
- .driver = {
- .name = "axon-msi"
+ .driver = {
+ .name = "axon-msi",
+ .owner = THIS_MODULE,
+ .of_match_table = axon_msi_device_id,
},
};
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index e6506cd0ff94..bfa2c0cb3d1e 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cur = cbe_freqs[cur_pmode].frequency;
#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index e3ec4976fae7..22667a09d40e 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -545,7 +545,6 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
{
struct iommu_window *window;
struct cbe_iommu *iommu;
- struct dev_archdata *archdata = &dev->archdata;
/* Current implementation uses the first window available in that
* node's iommu. We -might- do something smarter later though it may
@@ -554,7 +553,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
iommu = cell_iommu_for_node(dev_to_node(dev));
if (iommu == NULL || list_empty(&iommu->windows)) {
printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
- archdata->of_node ? archdata->of_node->full_name : "?",
+ dev->of_node ? dev->of_node->full_name : "?",
dev_to_node(dev));
return NULL;
}
@@ -897,7 +896,7 @@ static u64 cell_iommu_get_fixed_address(struct device *dev)
const u32 *ranges = NULL;
int i, len, best, naddr, nsize, pna, range_size;
- np = of_node_get(dev->archdata.of_node);
+ np = of_node_get(dev->of_node);
while (1) {
naddr = of_n_addr_cells(np);
nsize = of_n_size_cells(np);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 5c2808252516..1a40da92154c 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1849,8 +1849,7 @@ out:
return ret;
}
-static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
- int datasync)
+static int spufs_mfc_fsync(struct file *file, int datasync)
{
return spufs_mfc_flush(file, NULL);
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index fc1b1c42b1dc..e5e5f823d687 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -251,7 +251,7 @@ const struct file_operations spufs_context_fops = {
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
.readdir = dcache_readdir,
- .fsync = simple_sync_file,
+ .fsync = noop_fsync,
};
EXPORT_SYMBOL_GPL(spufs_context_fops);
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 57e5b608fa1a..174a04ac4806 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -69,10 +69,10 @@ void __init wii_memory_fixups(void)
/*
* This is part of a workaround to allow the use of two
- * discontiguous RAM ranges on the Wii, even if this is
+ * discontinuous RAM ranges on the Wii, even if this is
* currently unsupported on 32-bit PowerPC Linux.
*
- * We coealesce the two memory ranges of the Wii into a
+ * We coalesce the two memory ranges of the Wii into a
* single range, then create a reservation for the "hole"
* between both ranges.
*/
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index fba5bf915073..32a56c6dfa72 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -252,8 +252,8 @@ decrementer_iSeries_masked:
li r11,1
ld r12,PACALPPACAPTR(r13)
stb r11,LPPACADECRINT(r12)
- LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy)
- lwz r12,0(r12)
+ li r12,-1
+ clrldi r12,r12,33 /* set DEC to 0x7fffffff */
mtspr SPRN_DEC,r12
/* fall through */
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index b841c9a9db87..3fc2e6494b8b 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/ratelimit.h>
#include <asm/types.h>
#include <asm/io.h>
@@ -584,14 +585,9 @@ static inline struct device_node *xlate_iomm_address(
orig_addr = (unsigned long __force)addr;
if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
- static unsigned long last_jiffies;
- static int num_printed;
+ static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
- if (time_after(jiffies, last_jiffies + 60 * HZ)) {
- last_jiffies = jiffies;
- num_printed = 0;
- }
- if (num_printed++ < 10)
+ if (__ratelimit(&ratelimit))
printk(KERN_ERR
"iSeries_%s: invalid access at IO address %p\n",
func, addr);
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 722335e32fd4..6590850045af 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -83,7 +83,7 @@ static void smp_iSeries_message_pass(int target, int msg)
static int smp_iSeries_probe(void)
{
- return cpus_weight(cpu_possible_map);
+ return cpumask_weight(cpu_possible_mask);
}
static void smp_iSeries_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index d35e0520abf0..c16537bc0c6e 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -213,7 +213,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
- cpumask_copy(policy->cpus, &cpu_online_map);
+ cpumask_copy(policy->cpus, cpu_online_mask);
ppc_proc_freq = policy->cur * 1000ul;
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index 0f881f64583e..627ee089e75d 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -220,7 +220,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct device *dev = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct mii_bus *new_bus;
struct gpio_priv *priv;
const unsigned int *prop;
@@ -301,11 +301,12 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match);
static struct of_platform_driver gpio_mdio_driver =
{
- .match_table = gpio_mdio_match,
.probe = gpio_mdio_probe,
.remove = gpio_mdio_remove,
- .driver = {
- .name = "gpio-mdio-bitbang",
+ .driver = {
+ .name = "gpio-mdio-bitbang",
+ .owner = THIS_MODULE,
+ .of_match_table = gpio_mdio_match,
},
};
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index ac6fdd973291..f372ec1691a3 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -360,10 +360,10 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
/* We know electra_cf devices will always have of_node set, since
* electra_cf is an of_platform driver.
*/
- if (!parent->archdata.of_node)
+ if (!parent->of_node)
return 0;
- if (!of_device_is_compatible(parent->archdata.of_node, "electra-cf"))
+ if (!of_device_is_compatible(parent->of_node, "electra-cf"))
return 0;
/* We use the direct ops for localbus */
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 3ca09d3ccce3..9650c6029c82 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -362,7 +362,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, &cpu_online_map);
+ cpumask_copy(policy->cpus, cpu_online_mask);
cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f45331ab97cb..06a137c5b8bb 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -592,7 +592,7 @@ static void __init kw_i2c_probe(void)
/* Probe keywest-i2c busses */
for_each_compatible_node(np, "i2c","keywest-i2c") {
struct pmac_i2c_host_kw *host;
- int multibus, chans, i;
+ int multibus;
/* Found one, init a host structure */
host = kw_i2c_host_init(np);
@@ -614,6 +614,8 @@ static void __init kw_i2c_probe(void)
* parent type
*/
if (multibus) {
+ int chans, i;
+
parent = of_get_parent(np);
if (parent == NULL)
continue;
@@ -1258,8 +1260,7 @@ static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
if (inst == NULL)
return;
pmac_i2c_close(inst->bus);
- if (inst)
- kfree(inst);
+ kfree(inst);
}
static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 3362e781b6a7..f0bc08f6c1f0 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,6 +33,8 @@ extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void);
+extern void pmac32_cpu_die(void);
+extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
extern void pmac_pic_init(void);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 15c2241f9c72..f1d0132ebcc7 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -480,7 +480,7 @@ static void __init pmac_init_early(void)
#endif
/* SMP Init has to be done early as we need to patch up
- * cpu_possible_map before interrupt stacks are allocated
+ * cpu_possible_mask before interrupt stacks are allocated
* or kaboom...
*/
#ifdef CONFIG_SMP
@@ -646,7 +646,7 @@ static int pmac_pci_probe_mode(struct pci_bus *bus)
/* access per cpu vars from generic smp.c */
DECLARE_PER_CPU(int, cpu_state);
-static void pmac_cpu_die(void)
+static void pmac64_cpu_die(void)
{
/*
* turn off as much as possible, we'll be
@@ -717,8 +717,13 @@ define_machine(powermac) {
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
- .cpu_die = pmac_cpu_die,
+#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PPC64
+ .cpu_die = pmac64_cpu_die,
+#endif
+#ifdef CONFIG_PPC32
+ .cpu_die = pmac32_cpu_die,
+#endif
#endif
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
.cpu_die = generic_mach_cpu_die,
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6898e8241cd0..c95215f4f8b6 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -53,6 +53,8 @@
#include <asm/pmac_low_i2c.h>
#include <asm/pmac_pfunc.h>
+#include "pmac.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -315,7 +317,7 @@ static int __init smp_psurge_probe(void)
/* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't
- * set their bits in cpu_present_map.
+ * set their bits in cpu_present_mask.
*/
if (ncpus > NR_CPUS)
ncpus = NR_CPUS;
@@ -878,10 +880,9 @@ int smp_core99_cpu_disable(void)
return 0;
}
-extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
static int cpu_dead[NR_CPUS];
-void cpu_die(void)
+void pmac32_cpu_die(void)
{
local_irq_disable();
cpu_dead[smp_processor_id()] = 1;
@@ -944,7 +945,7 @@ void __init pmac_setup_smp(void)
}
#ifdef CONFIG_PPC32
else {
- /* We have to set bits in cpu_possible_map here since the
+ /* We have to set bits in cpu_possible_mask here since the
* secondary CPU(s) aren't in the device tree. Various
* things won't be initialized for CPUs not in the possible
* map, so we really need to fix it up here.
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 6d09f5e3e7e4..23083c397528 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -766,7 +766,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
BUG();
};
- dev->core.archdata.of_node = NULL;
+ dev->core.of_node = NULL;
set_dev_node(&dev->core, 0);
pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 0ff5174ae4f5..3dbef309bc8d 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -7,7 +7,7 @@ EXTRA_CFLAGS += -DDEBUG
endif
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
- setup.o iommu.o ras.o \
+ setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_XICS) += xics.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index e1682bc168a3..d71e58584086 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -79,13 +79,12 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
* prepend this to the full_name.
*/
name = (char *)ccwa + ccwa->name_offset;
- dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL);
+ dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
if (!dn->full_name) {
kfree(dn);
return NULL;
}
- sprintf(dn->full_name, "/%s", name);
return dn;
}
@@ -410,15 +409,13 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
* directory of the device tree. CPUs actually live in the
* cpus directory so we need to fixup the full_name.
*/
- cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
- GFP_KERNEL);
+ cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
if (!cpu_name) {
dlpar_free_cc_nodes(dn);
rc = -ENOMEM;
goto out;
}
- sprintf(cpu_name, "/cpus%s", dn->full_name);
kfree(dn->full_name);
dn->full_name = cpu_name;
@@ -433,6 +430,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
if (rc) {
dlpar_release_drc(drc_index);
dlpar_free_cc_nodes(dn);
+ goto out;
}
rc = dlpar_online_cpu(dn);
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 7df7fbb7cacb..34b7dc12e731 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -749,7 +749,7 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn)
/* Determine type of EEH reset required by device,
* default hot reset or fundamental reset
*/
- if (dev->needs_freset)
+ if (dev && dev->needs_freset)
rtas_pci_slot_reset(pdn, 3);
else
rtas_pci_slot_reset(pdn, 1);
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
new file mode 100644
index 000000000000..e889c9d9586a
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2001 Dave Engebretsen IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/prom.h>
+
+#include "pseries.h"
+
+void request_event_sources_irqs(struct device_node *np,
+ irq_handler_t handler,
+ const char *name)
+{
+ int i, index, count = 0;
+ struct of_irq oirq;
+ const u32 *opicprop;
+ unsigned int opicplen;
+ unsigned int virqs[16];
+
+ /* Check for obsolete "open-pic-interrupt" property. If present, then
+ * map those interrupts using the default interrupt host and default
+ * trigger
+ */
+ opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
+ if (opicprop) {
+ opicplen /= sizeof(u32);
+ for (i = 0; i < opicplen; i++) {
+ if (count > 15)
+ break;
+ virqs[count] = irq_create_mapping(NULL, *(opicprop++));
+ if (virqs[count] == NO_IRQ)
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", np->full_name);
+ else
+ count++;
+
+ }
+ }
+ /* Else use normal interrupt tree parsing */
+ else {
+ /* First try to do a proper OF tree parsing */
+ for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
+ index++) {
+ if (count > 15)
+ break;
+ virqs[count] = irq_create_of_mapping(oirq.controller,
+ oirq.specifier,
+ oirq.size);
+ if (virqs[count] == NO_IRQ)
+ printk(KERN_ERR "Unable to allocate interrupt "
+ "number for %s\n", np->full_name);
+ else
+ count++;
+ }
+ }
+
+ /* Now request them */
+ for (i = 0; i < count; i++) {
+ if (request_irq(virqs[i], handler, 0, name, NULL)) {
+ printk(KERN_ERR "Unable to request interrupt %d for "
+ "%s\n", virqs[i], np->full_name);
+ return;
+ }
+ }
+}
+
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a8e1d5d17a28..8f85f399ab9f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
for(;;);
}
-static int qcss_tok; /* query-cpu-stopped-state token */
-
-/* Get state of physical CPU.
- * Return codes:
- * 0 - The processor is in the RTAS stopped state
- * 1 - stop-self is in progress
- * 2 - The processor is not in the RTAS stopped state
- * -1 - Hardware Error
- * -2 - Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
- int cpu_status, status;
-
- status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
- if (status != 0) {
- printk(KERN_ERR
- "RTAS query-cpu-stopped-state failed: %i\n", status);
- return status;
- }
-
- return cpu_status;
-}
-
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
@@ -187,7 +163,7 @@ static int pseries_cpu_disable(void)
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
- boot_cpuid = any_online_cpu(cpu_online_map);
+ boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */
xics_migrate_irqs_away();
@@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
for (tries = 0; tries < 25; tries++) {
- cpu_status = query_cpu_stopped(pcpu);
- if (cpu_status == 0 || cpu_status == -1)
+ cpu_status = smp_query_cpu_stopped(pcpu);
+ if (cpu_status == QCSS_STOPPED ||
+ cpu_status == QCSS_HARDWARE_ERROR)
break;
cpu_relax();
}
@@ -245,7 +222,7 @@ static void pseries_cpu_die(unsigned int cpu)
}
/*
- * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
+ * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent up to two logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
@@ -254,7 +231,7 @@ static void pseries_cpu_die(unsigned int cpu)
static int pseries_add_processor(struct device_node *np)
{
unsigned int cpu;
- cpumask_t candidate_map, tmp = CPU_MASK_NONE;
+ cpumask_var_t candidate_mask, tmp;
int err = -ENOSPC, len, nthreads, i;
const u32 *intserv;
@@ -262,48 +239,53 @@ static int pseries_add_processor(struct device_node *np)
if (!intserv)
return 0;
+ zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
+ zalloc_cpumask_var(&tmp, GFP_KERNEL);
+
nthreads = len / sizeof(u32);
for (i = 0; i < nthreads; i++)
- cpu_set(i, tmp);
+ cpumask_set_cpu(i, tmp);
cpu_maps_update_begin();
- BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+ BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
/* Get a bitmap of unoccupied slots. */
- cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
- if (cpus_empty(candidate_map)) {
+ cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
+ if (cpumask_empty(candidate_mask)) {
/* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting.
*/
printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name,
- cpus_weight(cpu_possible_map));
+ cpumask_weight(cpu_possible_mask));
goto out_unlock;
}
- while (!cpus_empty(tmp))
- if (cpus_subset(tmp, candidate_map))
+ while (!cpumask_empty(tmp))
+ if (cpumask_subset(tmp, candidate_mask))
/* Found a range where we can insert the new cpu(s) */
break;
else
- cpus_shift_left(tmp, tmp, nthreads);
+ cpumask_shift_left(tmp, tmp, nthreads);
- if (cpus_empty(tmp)) {
- printk(KERN_ERR "Unable to find space in cpu_present_map for"
+ if (cpumask_empty(tmp)) {
+ printk(KERN_ERR "Unable to find space in cpu_present_mask for"
" processor %s with %d thread(s)\n", np->name,
nthreads);
goto out_unlock;
}
- for_each_cpu_mask(cpu, tmp) {
- BUG_ON(cpu_isset(cpu, cpu_present_map));
+ for_each_cpu(cpu, tmp) {
+ BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++);
}
err = 0;
out_unlock:
cpu_maps_update_done();
+ free_cpumask_var(candidate_mask);
+ free_cpumask_var(tmp);
return err;
}
@@ -334,7 +316,7 @@ static void pseries_remove_processor(struct device_node *np)
set_hard_smp_processor_id(cpu, -1);
break;
}
- if (cpu == NR_CPUS)
+ if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", intserv[i]);
}
@@ -388,6 +370,7 @@ static int __init pseries_cpu_hotplug_init(void)
struct device_node *np;
const char *typep;
int cpu;
+ int qcss_tok;
for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL);
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 383a5d0e9818..48d20573e4de 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -228,3 +228,41 @@ _GLOBAL(plpar_hcall9)
mtcrf 0xff,r0
blr /* return r3 = status */
+
+/* See plpar_hcall_raw to see why this is needed */
+_GLOBAL(plpar_hcall9_raw)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ std r4,STK_PARM(r4)(r1) /* Save ret buffer */
+
+ mr r4,r5
+ mr r5,r6
+ mr r6,r7
+ mr r7,r8
+ mr r8,r9
+ mr r9,r10
+ ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
+ ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
+ ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
+
+ HVSC /* invoke the hypervisor */
+
+ mr r0,r12
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+ std r6, 16(r12)
+ std r7, 24(r12)
+ std r8, 32(r12)
+ std r9, 40(r12)
+ std r10,48(r12)
+ std r11,56(r12)
+ std r0, 64(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 1fefae76e295..e19ff021e711 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -102,7 +102,7 @@ static const struct file_operations hcall_inst_seq_fops = {
#define CPU_NAME_BUF_SIZE 32
-static void probe_hcall_entry(unsigned long opcode, unsigned long *args)
+static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args)
{
struct hcall_stats *h;
@@ -114,7 +114,7 @@ static void probe_hcall_entry(unsigned long opcode, unsigned long *args)
h->purr_start = mfspr(SPRN_PURR);
}
-static void probe_hcall_exit(unsigned long opcode, unsigned long retval,
+static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long retval,
unsigned long *retbuf)
{
struct hcall_stats *h;
@@ -140,11 +140,11 @@ static int __init hcall_inst_init(void)
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
- if (register_trace_hcall_entry(probe_hcall_entry))
+ if (register_trace_hcall_entry(probe_hcall_entry, NULL))
return -EINVAL;
- if (register_trace_hcall_exit(probe_hcall_exit)) {
- unregister_trace_hcall_entry(probe_hcall_entry);
+ if (register_trace_hcall_exit(probe_hcall_exit, NULL)) {
+ unregister_trace_hcall_entry(probe_hcall_entry, NULL);
return -EINVAL;
}
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 1a0000a4b6d6..d26182d42cbf 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -468,7 +468,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
- dn = dev->dev.archdata.of_node;
+ dn = dev->dev.of_node;
/* If we're the direct child of a root bus, then we need to allocate
* an iommu table ourselves. The bus setup code should have setup
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0707653612ba..cf79b46d8f88 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -367,21 +367,28 @@ static void pSeries_lpar_hptab_clear(void)
{
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
- unsigned long dummy1, dummy2, dword0;
+ struct {
+ unsigned long pteh;
+ unsigned long ptel;
+ } ptes[4];
long lpar_rc;
- int i;
+ int i, j;
- /* TODO: Use bulk call */
- for (i = 0; i < hpte_count; i++) {
- /* dont remove HPTEs with VRMA mappings */
- lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG,
- &dummy1, &dummy2);
- if (lpar_rc == H_NOT_FOUND) {
- lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1);
- if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK)
- != HPTE_V_VRMA_MASK))
- /* Can be hpte for 1TB Seg. So remove it */
- plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
+ /* Read in batches of 4,
+ * invalidate only valid entries not in the VRMA
+ * hpte_count will be a multiple of 4
+ */
+ for (i = 0; i < hpte_count; i += 4) {
+ lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
+ if (lpar_rc != H_SUCCESS)
+ continue;
+ for (j = 0; j < 4; j++){
+ if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
+ HPTE_V_VRMA_MASK)
+ continue;
+ if (ptes[j].pteh & HPTE_V_VALID)
+ plpar_pte_remove_raw(0, i + j, 0,
+ &(ptes[j].pteh), &(ptes[j].ptel));
}
}
}
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a05f8d427856..d9801117124b 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -4,6 +4,14 @@
#include <asm/hvcall.h>
#include <asm/page.h>
+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
static inline long poll_pending(void)
{
return plpar_hcall_norets(H_POLL_PENDING);
@@ -183,6 +191,24 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
return rc;
}
+/*
+ * plpar_pte_read_4_raw can be called in real mode.
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
+ unsigned long *ptes)
+
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
+
+ memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+ return rc;
+}
+
static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
unsigned long avpn)
{
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 9e17c0d2a0c8..40c93cad91d2 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -10,6 +10,13 @@
#ifndef _PSERIES_PSERIES_H
#define _PSERIES_PSERIES_H
+#include <linux/interrupt.h>
+
+struct device_node;
+
+extern void request_event_sources_irqs(struct device_node *np,
+ irq_handler_t handler, const char *name);
+
extern void __init fw_feature_init(const char *hypertas, unsigned long len);
struct pt_regs;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index db940d2c39a0..41a3e9a039ed 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -67,63 +67,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
-static void request_ras_irqs(struct device_node *np,
- irq_handler_t handler,
- const char *name)
-{
- int i, index, count = 0;
- struct of_irq oirq;
- const u32 *opicprop;
- unsigned int opicplen;
- unsigned int virqs[16];
-
- /* Check for obsolete "open-pic-interrupt" property. If present, then
- * map those interrupts using the default interrupt host and default
- * trigger
- */
- opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
- if (opicprop) {
- opicplen /= sizeof(u32);
- for (i = 0; i < opicplen; i++) {
- if (count > 15)
- break;
- virqs[count] = irq_create_mapping(NULL, *(opicprop++));
- if (virqs[count] == NO_IRQ)
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", np->full_name);
- else
- count++;
-
- }
- }
- /* Else use normal interrupt tree parsing */
- else {
- /* First try to do a proper OF tree parsing */
- for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
- index++) {
- if (count > 15)
- break;
- virqs[count] = irq_create_of_mapping(oirq.controller,
- oirq.specifier,
- oirq.size);
- if (virqs[count] == NO_IRQ)
- printk(KERN_ERR "Unable to allocate interrupt "
- "number for %s\n", np->full_name);
- else
- count++;
- }
- }
-
- /* Now request them */
- for (i = 0; i < count; i++) {
- if (request_irq(virqs[i], handler, 0, name, NULL)) {
- printk(KERN_ERR "Unable to request interrupt %d for "
- "%s\n", virqs[i], np->full_name);
- return;
- }
- }
-}
-
/*
* Initialize handlers for the set of interrupts caused by hardware errors
* and power system events.
@@ -138,14 +81,15 @@ static int __init init_ras_IRQ(void)
/* Internal Errors */
np = of_find_node_by_path("/event-sources/internal-errors");
if (np != NULL) {
- request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR");
+ request_event_sources_irqs(np, ras_error_interrupt,
+ "RAS_ERROR");
of_node_put(np);
}
/* EPOW Events */
np = of_find_node_by_path("/event-sources/epow-events");
if (np != NULL) {
- request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW");
+ request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW");
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6710761bf60f..a6d19e3a505e 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -496,13 +496,14 @@ static int __init pSeries_probe(void)
}
-DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
+DECLARE_PER_CPU(long, smt_snooze_delay);
static void pseries_dedicated_idle_sleep(void)
{
unsigned int cpu = smp_processor_id();
unsigned long start_snooze;
unsigned long in_purr, out_purr;
+ long snooze = __get_cpu_var(smt_snooze_delay);
/*
* Indicate to the HV that we are idle. Now would be
@@ -517,13 +518,12 @@ static void pseries_dedicated_idle_sleep(void)
* has been checked recently. If we should poll for a little
* while, do so.
*/
- if (__get_cpu_var(smt_snooze_delay)) {
- start_snooze = get_tb() +
- __get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec;
+ if (snooze) {
+ start_snooze = get_tb() + snooze * tb_ticks_per_usec;
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
- while (get_tb() < start_snooze) {
+ while ((snooze < 0) || (get_tb() < start_snooze)) {
if (need_resched() || cpu_is_offline(cpu))
goto out;
ppc64_runlatch_off();
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 4e7f89a84561..3b1bf61c45be 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -55,7 +55,29 @@
* The Primary thread of each non-boot processor was started from the OF client
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
*/
-static cpumask_t of_spin_map;
+static cpumask_var_t of_spin_mask;
+
+/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
+int smp_query_cpu_stopped(unsigned int pcpu)
+{
+ int cpu_status, status;
+ int qcss_tok = rtas_token("query-cpu-stopped-state");
+
+ if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_INFO "Firmware doesn't support "
+ "query-cpu-stopped-state\n");
+ return QCSS_HARDWARE_ERROR;
+ }
+
+ status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+ if (status != 0) {
+ printk(KERN_ERR
+ "RTAS query-cpu-stopped-state failed: %i\n", status);
+ return status;
+ }
+
+ return cpu_status;
+}
/**
* smp_startup_cpu() - start the given cpu
@@ -76,12 +98,18 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
unsigned int pcpu;
int start_cpu;
- if (cpu_isset(lcpu, of_spin_map))
+ if (cpumask_test_cpu(lcpu, of_spin_mask))
/* Already started by OF and sitting in spin loop */
return 1;
pcpu = get_hard_smp_processor_id(lcpu);
+ /* Check to see if the CPU out of FW already for kexec */
+ if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
+ cpumask_set_cpu(lcpu, of_spin_mask);
+ return 1;
+ }
+
/* Fixup atomic count: it exited inside IRQ handler. */
task_thread_info(paca[lcpu].__current)->preempt_count = 0;
@@ -115,7 +143,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
if (firmware_has_feature(FW_FEATURE_SPLPAR))
vpa_init(cpu);
- cpu_clear(cpu, of_spin_map);
+ cpumask_clear_cpu(cpu, of_spin_mask);
set_cpu_current_state(cpu, CPU_STATE_ONLINE);
set_default_offline_state(cpu);
@@ -186,17 +214,19 @@ static void __init smp_init_pseries(void)
pr_debug(" -> smp_init_pSeries()\n");
+ alloc_bootmem_cpumask_var(&of_spin_mask);
+
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
if (cpu_thread_in_core(i) == 0)
- cpu_set(i, of_spin_map);
+ cpumask_set_cpu(i, of_spin_mask);
}
} else {
- of_spin_map = cpu_present_map;
+ cpumask_copy(of_spin_mask, cpu_present_mask);
}
- cpu_clear(boot_cpuid, of_spin_map);
+ cpumask_clear_cpu(boot_cpuid, of_spin_mask);
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1bcedd8b4616..f19d19468393 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -163,29 +163,37 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
/* Interface to generic irq subsystem */
#ifdef CONFIG_SMP
-static int get_irq_server(unsigned int virq, cpumask_t cpumask,
+/*
+ * For the moment we only implement delivery to all cpus or one cpu.
+ *
+ * If the requested affinity is cpu_all_mask, we set global affinity.
+ * If not we set it to the first cpu in the mask, even if multiple cpus
+ * are set. This is so things like irqbalance (which set core and package
+ * wide affinities) do the right thing.
+ */
+static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check)
{
- int server;
- /* For the moment only implement delivery to all cpus or one cpu */
- cpumask_t tmp = CPU_MASK_NONE;
if (!distribute_irqs)
return default_server;
- if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
- cpus_and(tmp, cpu_online_map, cpumask);
-
- server = first_cpu(tmp);
+ if (!cpumask_equal(cpumask, cpu_all_mask)) {
+ int server = cpumask_first_and(cpu_online_mask, cpumask);
- if (server < NR_CPUS)
+ if (server < nr_cpu_ids)
return get_hard_smp_processor_id(server);
if (strict_check)
return -1;
}
- if (cpus_equal(cpu_online_map, cpu_present_map))
+ /*
+ * Workaround issue with some versions of JS20 firmware that
+ * deliver interrupts to cpus which haven't been started. This
+ * happens when using the maxcpus= boot option.
+ */
+ if (cpumask_equal(cpu_online_mask, cpu_present_mask))
return default_distrib_server;
return default_server;
@@ -207,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq)
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
- server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0);
+ server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
@@ -398,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
return -1;
}
- /*
- * For the moment only implement delivery to all cpus or one cpu.
- * Get current irq_server for the given irq
- */
- irq_server = get_irq_server(virq, *cpumask, 1);
+ irq_server = get_irq_server(virq, cpumask, 1);
if (irq_server == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
@@ -611,7 +615,7 @@ int __init smp_xics_probe(void)
{
xics_request_ipi();
- return cpus_weight(cpu_possible_map);
+ return cpumask_weight(cpu_possible_mask);
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 88f4ae787832..402d2212162f 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -185,7 +185,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
axon_ram_bank_id++;
dev_info(&device->dev, "Found memory controller on %s\n",
- device->node->full_name);
+ device->dev.of_node->full_name);
bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
if (bank == NULL) {
@@ -198,7 +198,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
bank->device = device;
- if (of_address_to_resource(device->node, 0, &resource) != 0) {
+ if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) {
dev_err(&device->dev, "Cannot access device tree\n");
rc = -EFAULT;
goto failed;
@@ -253,7 +253,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
add_disk(bank->disk);
- bank->irq_id = irq_of_parse_and_map(device->node, 0);
+ bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
if (bank->irq_id == NO_IRQ) {
dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
rc = -EFAULT;
@@ -327,12 +327,12 @@ static struct of_device_id axon_ram_device_id[] = {
};
static struct of_platform_driver axon_ram_driver = {
- .match_table = axon_ram_device_id,
.probe = axon_ram_probe,
.remove = axon_ram_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = AXON_RAM_MODULE_NAME,
+ .driver = {
+ .name = AXON_RAM_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = axon_ram_device_id,
},
};
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 378ebd9aac18..a7c5c470af14 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -377,7 +377,7 @@ mpc52xx_bcom_probe(struct of_device *op, const struct of_device_id *match)
printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
/* Get the bestcomm node */
- of_node_get(op->node);
+ of_node_get(op->dev.of_node);
/* Prepare SRAM */
ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
@@ -406,10 +406,10 @@ mpc52xx_bcom_probe(struct of_device *op, const struct of_device_id *match)
}
/* Save the node */
- bcom_eng->ofnode = op->node;
+ bcom_eng->ofnode = op->dev.of_node;
/* Get, reserve & map io */
- if (of_address_to_resource(op->node, 0, &res_bcom)) {
+ if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't get resource\n");
rv = -EINVAL;
@@ -453,7 +453,7 @@ error_sramclean:
kfree(bcom_eng);
bcom_sram_cleanup();
error_ofput:
- of_node_put(op->node);
+ of_node_put(op->dev.of_node);
printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
@@ -494,14 +494,12 @@ MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
static struct of_platform_driver mpc52xx_bcom_of_platform_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .match_table = mpc52xx_bcom_of_match,
.probe = mpc52xx_bcom_probe,
.remove = mpc52xx_bcom_remove,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_bcom_of_match,
},
};
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 3482e3fd89c0..a7be144f5874 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -249,7 +249,7 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
goto error_out;
}
- msi->irqhost = irq_alloc_host(dev->node, IRQ_HOST_MAP_LINEAR,
+ msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
NR_MSI_IRQS, &fsl_msi_host_ops, 0);
if (msi->irqhost == NULL) {
@@ -259,10 +259,10 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
}
/* Get the MSI reg base */
- err = of_address_to_resource(dev->node, 0, &res);
+ err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
dev_err(&dev->dev, "%s resource error!\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
goto error_out;
}
@@ -285,16 +285,16 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
goto error_out;
}
- p = of_get_property(dev->node, "interrupts", &count);
+ p = of_get_property(dev->dev.of_node, "interrupts", &count);
if (!p) {
dev_err(&dev->dev, "no interrupts property found on %s\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
err = -ENODEV;
goto error_out;
}
if (count % 8 != 0) {
dev_err(&dev->dev, "Malformed interrupts property on %s\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
err = -EINVAL;
goto error_out;
}
@@ -303,7 +303,7 @@ static int __devinit fsl_of_msi_probe(struct of_device *dev,
for (i = 0; i < count / 2; i++) {
if (i > NR_MSI_REG)
break;
- virt_msir = irq_of_parse_and_map(dev->node, i);
+ virt_msir = irq_of_parse_and_map(dev->dev.of_node, i);
if (virt_msir != NO_IRQ) {
set_irq_data(virt_msir, (void *)i);
set_irq_chained_handler(virt_msir, fsl_msi_cascade);
@@ -345,8 +345,11 @@ static const struct of_device_id fsl_of_msi_ids[] = {
};
static struct of_platform_driver fsl_of_msi_driver = {
- .name = "fsl-msi",
- .match_table = fsl_of_msi_ids,
+ .driver = {
+ .name = "fsl-msi",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_of_msi_ids,
+ },
.probe = fsl_of_msi_probe,
};
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index a7635a993dca..9082eb921ad9 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -60,7 +60,7 @@ static struct platform_suspend_ops pmc_suspend_ops = {
static int pmc_probe(struct of_device *ofdev, const struct of_device_id *id)
{
- pmc_regs = of_iomap(ofdev->node, 0);
+ pmc_regs = of_iomap(ofdev->dev.of_node, 0);
if (!pmc_regs)
return -ENOMEM;
@@ -76,8 +76,11 @@ static const struct of_device_id pmc_ids[] = {
};
static struct of_platform_driver pmc_driver = {
- .driver.name = "fsl-pmc",
- .match_table = pmc_ids,
+ .driver = {
+ .name = "fsl-pmc",
+ .owner = THIS_MODULE,
+ .of_match_table = pmc_ids,
+ },
.probe = pmc_probe,
};
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 71fba88f50db..cd37e49e7034 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1,6 +1,15 @@
/*
* Freescale MPC85xx/MPC86xx RapidIO support
*
+ * Copyright 2009 Sysgo AG
+ * Thomas Moll <thomas.moll@sysgo.com>
+ * - fixed maintenance access routines, check for aligned access
+ *
+ * Copyright 2009 Integrated Device Technology, Inc.
+ * Alex Bounine <alexandre.bounine@idt.com>
+ * - Added Port-Write message handling
+ * - Added Machine Check exception handling
+ *
* Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
* Zhang Wei <wei.zhang@freescale.com>
*
@@ -24,19 +33,30 @@
#include <linux/of_platform.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/kfifo.h>
#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/uaccess.h>
+
+#undef DEBUG_PW /* Port-Write debugging */
/* RapidIO definition irq, which read from OF-tree */
#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
+#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
#define RIO_ATMU_REGS_OFFSET 0x10c00
#define RIO_P_MSG_REGS_OFFSET 0x11000
#define RIO_S_MSG_REGS_OFFSET 0x13000
#define RIO_ESCSR 0x158
#define RIO_CCSR 0x15c
+#define RIO_LTLEDCSR 0x0608
+#define RIO_LTLEDCSR_IER 0x80000000
+#define RIO_LTLEDCSR_PRT 0x01000000
+#define RIO_LTLEECSR 0x060c
+#define RIO_EPWISR 0x10010
#define RIO_ISR_AACR 0x10120
#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
#define RIO_MAINT_WIN_SIZE 0x400000
@@ -55,6 +75,18 @@
#define RIO_MSG_ISR_QFI 0x00000010
#define RIO_MSG_ISR_DIQI 0x00000001
+#define RIO_IPWMR_SEN 0x00100000
+#define RIO_IPWMR_QFIE 0x00000100
+#define RIO_IPWMR_EIE 0x00000020
+#define RIO_IPWMR_CQ 0x00000002
+#define RIO_IPWMR_PWE 0x00000001
+
+#define RIO_IPWSR_QF 0x00100000
+#define RIO_IPWSR_TE 0x00000080
+#define RIO_IPWSR_QFI 0x00000010
+#define RIO_IPWSR_PWD 0x00000008
+#define RIO_IPWSR_PWB 0x00000004
+
#define RIO_MSG_DESC_SIZE 32
#define RIO_MSG_BUFFER_SIZE 4096
#define RIO_MIN_TX_RING_SIZE 2
@@ -121,7 +153,7 @@ struct rio_msg_regs {
u32 pad10[26];
u32 pwmr;
u32 pwsr;
- u32 pad11;
+ u32 epwqbar;
u32 pwqbar;
};
@@ -160,6 +192,14 @@ struct rio_msg_rx_ring {
void *dev_id;
};
+struct rio_port_write_msg {
+ void *virt;
+ dma_addr_t phys;
+ u32 msg_count;
+ u32 err_count;
+ u32 discard_count;
+};
+
struct rio_priv {
struct device *dev;
void __iomem *regs_win;
@@ -172,11 +212,64 @@ struct rio_priv {
struct rio_dbell_ring dbell_ring;
struct rio_msg_tx_ring msg_tx_ring;
struct rio_msg_rx_ring msg_rx_ring;
+ struct rio_port_write_msg port_write_msg;
int bellirq;
int txirq;
int rxirq;
+ int pwirq;
+ struct work_struct pw_work;
+ struct kfifo pw_fifo;
+ spinlock_t pw_fifo_lock;
};
+#define __fsl_read_rio_config(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2)\n" \
+ " eieio\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %1,-1\n" \
+ " li %0,%3\n" \
+ " b 2b\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,3b\n" \
+ ".text" \
+ : "=r" (err), "=r" (x) \
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+
+static void __iomem *rio_regs_win;
+
+static int (*saved_mcheck_exception)(struct pt_regs *regs);
+
+static int fsl_rio_mcheck_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *entry = NULL;
+ unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
+
+ if (reason & MCSR_BUS_RBERR) {
+ reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
+ if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
+ /* Check if we are prepared to handle this fault */
+ entry = search_exception_tables(regs->nip);
+ if (entry) {
+ pr_debug("RIO: %s - MC Exception handled\n",
+ __func__);
+ out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
+ 0);
+ regs->msr |= MSR_RI;
+ regs->nip = entry->fixup;
+ return 1;
+ }
+ }
+ }
+
+ if (saved_mcheck_exception)
+ return saved_mcheck_exception(regs);
+ else
+ return cur_cpu_spec->machine_check(regs);
+}
+
/**
* fsl_rio_doorbell_send - Send a MPC85xx doorbell message
* @mport: RapidIO master port info
@@ -277,27 +370,44 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
{
struct rio_priv *priv = mport->priv;
u8 *data;
+ u32 rval, err = 0;
pr_debug
("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
index, destid, hopcount, offset, len);
+
+ /* 16MB maintenance window possible */
+ /* allow only aligned access to maintenance registers */
+ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
+ return -EINVAL;
+
out_be32(&priv->maint_atmu_regs->rowtar,
- (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
+ (destid << 22) | (hopcount << 12) | (offset >> 12));
+ out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
- data = (u8 *) priv->maint_win + offset;
+ data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
switch (len) {
case 1:
- *val = in_8((u8 *) data);
+ __fsl_read_rio_config(rval, data, err, "lbz");
break;
case 2:
- *val = in_be16((u16 *) data);
+ __fsl_read_rio_config(rval, data, err, "lhz");
break;
- default:
- *val = in_be32((u32 *) data);
+ case 4:
+ __fsl_read_rio_config(rval, data, err, "lwz");
break;
+ default:
+ return -EINVAL;
}
- return 0;
+ if (err) {
+ pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
+ err, destid, hopcount, offset);
+ }
+
+ *val = rval;
+
+ return err;
}
/**
@@ -322,10 +432,17 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
pr_debug
("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
index, destid, hopcount, offset, len, val);
+
+ /* 16MB maintenance windows possible */
+ /* allow only aligned access to maintenance registers */
+ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
+ return -EINVAL;
+
out_be32(&priv->maint_atmu_regs->rowtar,
- (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
+ (destid << 22) | (hopcount << 12) | (offset >> 12));
+ out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
- data = (u8 *) priv->maint_win + offset;
+ data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
switch (len) {
case 1:
out_8((u8 *) data, val);
@@ -333,9 +450,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
case 2:
out_be16((u16 *) data, val);
break;
- default:
+ case 4:
out_be32((u32 *) data, val);
break;
+ default:
+ return -EINVAL;
}
return 0;
@@ -930,6 +1049,223 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
return rc;
}
+/**
+ * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ *
+ * Handles port write interrupts. Parses a list of registered
+ * port write event handlers and executes a matching event handler.
+ */
+static irqreturn_t
+fsl_rio_port_write_handler(int irq, void *dev_instance)
+{
+ u32 ipwmr, ipwsr;
+ struct rio_mport *port = (struct rio_mport *)dev_instance;
+ struct rio_priv *priv = port->priv;
+ u32 epwisr, tmp;
+
+ ipwmr = in_be32(&priv->msg_regs->pwmr);
+ ipwsr = in_be32(&priv->msg_regs->pwsr);
+
+ epwisr = in_be32(priv->regs_win + RIO_EPWISR);
+ if (epwisr & 0x80000000) {
+ tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+ pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
+ out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+ }
+
+ if (!(epwisr & 0x00000001))
+ return IRQ_HANDLED;
+
+#ifdef DEBUG_PW
+ pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
+ if (ipwsr & RIO_IPWSR_QF)
+ pr_debug(" QF");
+ if (ipwsr & RIO_IPWSR_TE)
+ pr_debug(" TE");
+ if (ipwsr & RIO_IPWSR_QFI)
+ pr_debug(" QFI");
+ if (ipwsr & RIO_IPWSR_PWD)
+ pr_debug(" PWD");
+ if (ipwsr & RIO_IPWSR_PWB)
+ pr_debug(" PWB");
+ pr_debug(" )\n");
+#endif
+ out_be32(&priv->msg_regs->pwsr,
+ ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
+
+ if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
+ priv->port_write_msg.err_count++;
+ pr_info("RIO: Port-Write Transaction Err (%d)\n",
+ priv->port_write_msg.err_count);
+ }
+ if (ipwsr & RIO_IPWSR_PWD) {
+ priv->port_write_msg.discard_count++;
+ pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
+ priv->port_write_msg.discard_count);
+ }
+
+ /* Schedule deferred processing if PW was received */
+ if (ipwsr & RIO_IPWSR_QFI) {
+ /* Save PW message (if there is room in FIFO),
+ * otherwise discard it.
+ */
+ if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
+ priv->port_write_msg.msg_count++;
+ kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
+ RIO_PW_MSG_SIZE);
+ } else {
+ priv->port_write_msg.discard_count++;
+ pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
+ priv->port_write_msg.discard_count);
+ }
+ schedule_work(&priv->pw_work);
+ }
+
+ /* Issue Clear Queue command. This allows another
+ * port-write to be received.
+ */
+ out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
+
+ return IRQ_HANDLED;
+}
+
+static void fsl_pw_dpc(struct work_struct *work)
+{
+ struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
+ unsigned long flags;
+ u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
+
+ /*
+ * Process port-write messages
+ */
+ spin_lock_irqsave(&priv->pw_fifo_lock, flags);
+ while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
+ RIO_PW_MSG_SIZE)) {
+ /* Process one message */
+ spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
+#ifdef DEBUG_PW
+ {
+ u32 i;
+ pr_debug("%s : Port-Write Message:", __func__);
+ for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
+ if ((i%4) == 0)
+ pr_debug("\n0x%02x: 0x%08x", i*4,
+ msg_buffer[i]);
+ else
+ pr_debug(" 0x%08x", msg_buffer[i]);
+ }
+ pr_debug("\n");
+ }
+#endif
+ /* Pass the port-write message to RIO core for processing */
+ rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
+ spin_lock_irqsave(&priv->pw_fifo_lock, flags);
+ }
+ spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
+}
+
+/**
+ * fsl_rio_pw_enable - enable/disable port-write interface init
+ * @mport: Master port implementing the port write unit
+ * @enable: 1=enable; 0=disable port-write message handling
+ */
+static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
+{
+ struct rio_priv *priv = mport->priv;
+ u32 rval;
+
+ rval = in_be32(&priv->msg_regs->pwmr);
+
+ if (enable)
+ rval |= RIO_IPWMR_PWE;
+ else
+ rval &= ~RIO_IPWMR_PWE;
+
+ out_be32(&priv->msg_regs->pwmr, rval);
+
+ return 0;
+}
+
+/**
+ * fsl_rio_port_write_init - MPC85xx port write interface init
+ * @mport: Master port implementing the port write unit
+ *
+ * Initializes port write unit hardware and DMA buffer
+ * ring. Called from fsl_rio_setup(). Returns %0 on success
+ * or %-ENOMEM on failure.
+ */
+static int fsl_rio_port_write_init(struct rio_mport *mport)
+{
+ struct rio_priv *priv = mport->priv;
+ int rc = 0;
+
+ /* Following configurations require a disabled port write controller */
+ out_be32(&priv->msg_regs->pwmr,
+ in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
+
+ /* Initialize port write */
+ priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
+ RIO_PW_MSG_SIZE,
+ &priv->port_write_msg.phys, GFP_KERNEL);
+ if (!priv->port_write_msg.virt) {
+ pr_err("RIO: unable allocate port write queue\n");
+ return -ENOMEM;
+ }
+
+ priv->port_write_msg.err_count = 0;
+ priv->port_write_msg.discard_count = 0;
+
+ /* Point dequeue/enqueue pointers at first entry */
+ out_be32(&priv->msg_regs->epwqbar, 0);
+ out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
+
+ pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
+ in_be32(&priv->msg_regs->epwqbar),
+ in_be32(&priv->msg_regs->pwqbar));
+
+ /* Clear interrupt status IPWSR */
+ out_be32(&priv->msg_regs->pwsr,
+ (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
+
+ /* Configure port write contoller for snooping enable all reporting,
+ clear queue full */
+ out_be32(&priv->msg_regs->pwmr,
+ RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
+
+
+ /* Hook up port-write handler */
+ rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
+ "port-write", (void *)mport);
+ if (rc < 0) {
+ pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
+ goto err_out;
+ }
+
+ INIT_WORK(&priv->pw_work, fsl_pw_dpc);
+ spin_lock_init(&priv->pw_fifo_lock);
+ if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
+ pr_err("FIFO allocation failed\n");
+ rc = -ENOMEM;
+ goto err_out_irq;
+ }
+
+ pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
+ in_be32(&priv->msg_regs->pwmr),
+ in_be32(&priv->msg_regs->pwsr));
+
+ return rc;
+
+err_out_irq:
+ free_irq(IRQ_RIO_PW(mport), (void *)mport);
+err_out:
+ dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
+ priv->port_write_msg.virt,
+ priv->port_write_msg.phys);
+ return rc;
+}
+
static char *cmdline = NULL;
static int fsl_rio_get_hdid(int index)
@@ -1015,41 +1351,41 @@ int fsl_rio_setup(struct of_device *dev)
u64 law_start, law_size;
int paw, aw, sw;
- if (!dev->node) {
+ if (!dev->dev.of_node) {
dev_err(&dev->dev, "Device OF-Node is NULL");
return -EFAULT;
}
- rc = of_address_to_resource(dev->node, 0, &regs);
+ rc = of_address_to_resource(dev->dev.of_node, 0, &regs);
if (rc) {
dev_err(&dev->dev, "Can't get %s property 'reg'\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -EFAULT;
}
- dev_info(&dev->dev, "Of-device full name %s\n", dev->node->full_name);
+ dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name);
dev_info(&dev->dev, "Regs: %pR\n", &regs);
- dt_range = of_get_property(dev->node, "ranges", &rlen);
+ dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen);
if (!dt_range) {
dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -EFAULT;
}
/* Get node address wide */
- cell = of_get_property(dev->node, "#address-cells", NULL);
+ cell = of_get_property(dev->dev.of_node, "#address-cells", NULL);
if (cell)
aw = *cell;
else
- aw = of_n_addr_cells(dev->node);
+ aw = of_n_addr_cells(dev->dev.of_node);
/* Get node size wide */
- cell = of_get_property(dev->node, "#size-cells", NULL);
+ cell = of_get_property(dev->dev.of_node, "#size-cells", NULL);
if (cell)
sw = *cell;
else
- sw = of_n_size_cells(dev->node);
+ sw = of_n_size_cells(dev->dev.of_node);
/* Get parent address wide wide */
- paw = of_n_addr_cells(dev->node);
+ paw = of_n_addr_cells(dev->dev.of_node);
law_start = of_read_number(dt_range + aw, paw);
law_size = of_read_number(dt_range + aw + paw, sw);
@@ -1057,7 +1393,7 @@ int fsl_rio_setup(struct of_device *dev)
dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
law_start, law_size);
- ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
if (!ops) {
rc = -ENOMEM;
goto err_ops;
@@ -1067,6 +1403,7 @@ int fsl_rio_setup(struct of_device *dev)
ops->cread = fsl_rio_config_read;
ops->cwrite = fsl_rio_config_write;
ops->dsend = fsl_rio_doorbell_send;
+ ops->pwenable = fsl_rio_pw_enable;
port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
if (!port) {
@@ -1089,11 +1426,12 @@ int fsl_rio_setup(struct of_device *dev)
port->iores.flags = IORESOURCE_MEM;
port->iores.name = "rio_io_win";
- priv->bellirq = irq_of_parse_and_map(dev->node, 2);
- priv->txirq = irq_of_parse_and_map(dev->node, 3);
- priv->rxirq = irq_of_parse_and_map(dev->node, 4);
- dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq,
- priv->txirq, priv->rxirq);
+ priv->pwirq = irq_of_parse_and_map(dev->node, 0);
+ priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
+ priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
+ priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
+ dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
+ priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
@@ -1109,6 +1447,7 @@ int fsl_rio_setup(struct of_device *dev)
rio_register_mport(port);
priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
+ rio_regs_win = priv->regs_win;
/* Probe the master port phy type */
ccsr = in_be32(priv->regs_win + RIO_CCSR);
@@ -1166,7 +1505,8 @@ int fsl_rio_setup(struct of_device *dev)
/* Configure maintenance transaction window */
out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
- out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */
+ out_be32(&priv->maint_atmu_regs->rowar,
+ 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
@@ -1175,6 +1515,12 @@ int fsl_rio_setup(struct of_device *dev)
(law_start + RIO_MAINT_WIN_SIZE) >> 12);
out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
fsl_rio_doorbell_init(port);
+ fsl_rio_port_write_init(port);
+
+ saved_mcheck_exception = ppc_md.machine_check_exception;
+ ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
+ /* Ensure that RFXE is set */
+ mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
return 0;
err:
@@ -1195,7 +1541,7 @@ static int __devinit fsl_of_rio_rpn_probe(struct of_device *dev,
{
int rc;
printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
rc = fsl_rio_setup(dev);
if (rc)
@@ -1215,8 +1561,11 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = {
};
static struct of_platform_driver fsl_of_rio_rpn_driver = {
- .name = "fsl-of-rio",
- .match_table = fsl_of_rio_rpn_ids,
+ .driver = {
+ .name = "fsl-of-rio",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_of_rio_rpn_ids,
+ },
.probe = fsl_of_rio_rpn_probe,
};
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index 6478eb10691a..83f519655fac 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -16,6 +16,7 @@
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#define MPC8XXX_GPIO_PINS 32
@@ -35,6 +36,7 @@ struct mpc8xxx_gpio_chip {
* open drain mode safely
*/
u32 data;
+ struct irq_host *irq;
};
static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -128,12 +130,136 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
+static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+
+ if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS)
+ return irq_create_mapping(mpc8xxx_gc->irq, offset);
+ else
+ return -ENXIO;
+}
+
+static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned int mask;
+
+ mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR);
+ if (mask)
+ generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
+ 32 - ffs(mask)));
+}
+
+static void mpc8xxx_irq_unmask(unsigned int virq)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_mask(unsigned int virq)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+
+ clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+}
+
+static void mpc8xxx_irq_ack(unsigned int virq)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+
+ out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(virq)));
+}
+
+static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
+{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+ struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ unsigned long flags;
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ setbits32(mm->regs + GPIO_ICR,
+ mpc8xxx_gpio2mask(virq_to_hw(virq)));
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+ clrbits32(mm->regs + GPIO_ICR,
+ mpc8xxx_gpio2mask(virq_to_hw(virq)));
+ spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct irq_chip mpc8xxx_irq_chip = {
+ .name = "mpc8xxx-gpio",
+ .unmask = mpc8xxx_irq_unmask,
+ .mask = mpc8xxx_irq_mask,
+ .ack = mpc8xxx_irq_ack,
+ .set_type = mpc8xxx_irq_set_type,
+};
+
+static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ set_irq_chip_data(virq, h->host_data);
+ set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
+ set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_flags)
+
+{
+ /* interrupt sense values coming from the device tree equal either
+ * EDGE_FALLING or EDGE_BOTH
+ */
+ *out_hwirq = intspec[0];
+ *out_flags = intspec[1];
+
+ return 0;
+}
+
+static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
+ .map = mpc8xxx_gpio_irq_map,
+ .xlate = mpc8xxx_gpio_irq_xlate,
+};
+
static void __init mpc8xxx_add_controller(struct device_node *np)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct of_mm_gpio_chip *mm_gc;
struct of_gpio_chip *of_gc;
struct gpio_chip *gc;
+ unsigned hwirq;
int ret;
mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -158,11 +284,32 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
else
gc->get = mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
+ gc->to_irq = mpc8xxx_gpio_to_irq;
ret = of_mm_gpiochip_add(np, mm_gc);
if (ret)
goto err;
+ hwirq = irq_of_parse_and_map(np, 0);
+ if (hwirq == NO_IRQ)
+ goto skip_irq;
+
+ mpc8xxx_gc->irq =
+ irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS,
+ &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS);
+ if (!mpc8xxx_gc->irq)
+ goto skip_irq;
+
+ mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
+
+ /* ack and mask all irqs */
+ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
+ out_be32(mm_gc->regs + GPIO_IMR, 0);
+
+ set_irq_data(hwirq, mpc8xxx_gc);
+ set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
+
+skip_irq:
return;
err:
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 260295b10557..2102487612a4 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -568,12 +568,12 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#endif /* CONFIG_MPIC_U3_HT_IRQS */
#ifdef CONFIG_SMP
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
{
int cpuid;
if (cpumask_equal(mask, cpu_all_mask)) {
- static int irq_rover;
+ static int irq_rover = 0;
static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
@@ -581,15 +581,11 @@ static int irq_choose_cpu(const cpumask_t *mask)
do_round_robin:
raw_spin_lock_irqsave(&irq_rover_lock, flags);
- while (!cpu_online(irq_rover)) {
- if (++irq_rover >= NR_CPUS)
- irq_rover = 0;
- }
+ irq_rover = cpumask_next(irq_rover, cpu_online_mask);
+ if (irq_rover >= nr_cpu_ids)
+ irq_rover = cpumask_first(cpu_online_mask);
+
cpuid = irq_rover;
- do {
- if (++irq_rover >= NR_CPUS)
- irq_rover = 0;
- } while (!cpu_online(irq_rover));
raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
} else {
@@ -601,7 +597,7 @@ static int irq_choose_cpu(const cpumask_t *mask)
return get_hard_smp_processor_id(cpuid);
}
#else
-static int irq_choose_cpu(const cpumask_t *mask)
+static int irq_choose_cpu(const struct cpumask *mask)
{
return hard_smp_processor_id();
}
@@ -814,12 +810,16 @@ int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
} else {
- cpumask_t tmp;
+ cpumask_var_t tmp;
- cpumask_and(&tmp, cpumask, cpu_online_mask);
+ alloc_cpumask_var(&tmp, GFP_KERNEL);
+
+ cpumask_and(tmp, cpumask, cpu_online_mask);
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
- mpic_physmask(cpus_addr(tmp)[0]));
+ mpic_physmask(cpumask_bits(tmp)[0]));
+
+ free_cpumask_var(tmp);
}
return 0;
@@ -1479,21 +1479,6 @@ void mpic_teardown_this_cpu(int secondary)
}
-void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
-{
- struct mpic *mpic = mpic_primary;
-
- BUG_ON(mpic == NULL);
-
-#ifdef DEBUG_IPI
- DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
-#endif
-
- mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
- ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
- mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
-}
-
static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
{
u32 src;
@@ -1589,8 +1574,25 @@ void mpic_request_ipis(void)
}
}
+static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask)
+{
+ struct mpic *mpic = mpic_primary;
+
+ BUG_ON(mpic == NULL);
+
+#ifdef DEBUG_IPI
+ DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+#endif
+
+ mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
+ ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE),
+ mpic_physmask(cpumask_bits(cpu_mask)[0]));
+}
+
void smp_mpic_message_pass(int target, int msg)
{
+ cpumask_var_t tmp;
+
/* make sure we're sending something that translates to an IPI */
if ((unsigned int)msg > 3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -1599,13 +1601,17 @@ void smp_mpic_message_pass(int target, int msg)
}
switch (target) {
case MSG_ALL:
- mpic_send_ipi(msg, 0xffffffff);
+ mpic_send_ipi(msg, cpu_online_mask);
break;
case MSG_ALL_BUT_SELF:
- mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
+ alloc_cpumask_var(&tmp, GFP_NOWAIT);
+ cpumask_andnot(tmp, cpu_online_mask,
+ cpumask_of(smp_processor_id()));
+ mpic_send_ipi(msg, tmp);
+ free_cpumask_var(tmp);
break;
default:
- mpic_send_ipi(msg, 1 << target);
+ mpic_send_ipi(msg, cpumask_of(target));
break;
}
}
@@ -1616,7 +1622,7 @@ int __init smp_mpic_probe(void)
DBG("smp_mpic_probe()...\n");
- nr_cpus = cpus_weight(cpu_possible_map);
+ nr_cpus = cpumask_weight(cpu_possible_mask);
DBG("nr_cpus: %d\n", nr_cpus);
diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
index 1456015a22d8..198f288570cc 100644
--- a/arch/powerpc/sysdev/mv64x60_pci.c
+++ b/arch/powerpc/sysdev/mv64x60_pci.c
@@ -24,7 +24,7 @@
#define MV64X60_VAL_LEN_MAX 11
#define MV64X60_PCICFG_CPCI_HOTSWAP 0x68
-static ssize_t mv64x60_hs_reg_read(struct kobject *kobj,
+static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
@@ -45,7 +45,7 @@ static ssize_t mv64x60_hs_reg_read(struct kobject *kobj,
return sprintf(buf, "0x%08x\n", v);
}
-static ssize_t mv64x60_hs_reg_write(struct kobject *kobj,
+static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 652652db4ce2..d07137a07d75 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -124,7 +124,7 @@ static void pmi_notify_handlers(struct work_struct *work)
static int pmi_of_probe(struct of_device *dev,
const struct of_device_id *match)
{
- struct device_node *np = dev->node;
+ struct device_node *np = dev->dev.of_node;
int rc;
if (data) {
@@ -206,11 +206,12 @@ static int pmi_of_remove(struct of_device *dev)
}
static struct of_platform_driver pmi_of_platform_driver = {
- .match_table = pmi_match,
.probe = pmi_of_probe,
.remove = pmi_of_remove,
- .driver = {
- .name = "pmi",
+ .driver = {
+ .name = "pmi",
+ .owner = THIS_MODULE,
+ .of_match_table = pmi_match,
},
};
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
index 5c014350bf16..d3d6ce3c33b4 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/sysdev/ppc4xx_soc.c
@@ -191,11 +191,31 @@ static int __init ppc4xx_l2c_probe(void)
arch_initcall(ppc4xx_l2c_probe);
/*
- * At present, this routine just applies a system reset.
+ * Apply a system reset. Alternatively a board specific value may be
+ * provided via the "reset-type" property in the cpu node.
*/
void ppc4xx_reset_system(char *cmd)
{
- mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_RST_SYSTEM);
+ struct device_node *np;
+ u32 reset_type = DBCR0_RST_SYSTEM;
+ const u32 *prop;
+
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np) {
+ prop = of_get_property(np, "reset-type", NULL);
+
+ /*
+ * Check if property exists and if it is in range:
+ * 1 - PPC4xx core reset
+ * 2 - PPC4xx chip reset
+ * 3 - PPC4xx system reset (default)
+ */
+ if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3)))
+ reset_type = prop[0] << 28;
+ }
+
+ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type);
+
while (1)
; /* Just in case the reset doesn't work */
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 149393c02c3f..093e0ae1a941 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -669,8 +669,11 @@ static const struct of_device_id qe_ids[] = {
};
static struct of_platform_driver qe_driver = {
- .driver.name = "fsl-qe",
- .match_table = qe_ids,
+ .driver = {
+ .name = "fsl-qe",
+ .owner = THIS_MODULE,
+ .of_match_table = qe_ids,
+ },
.probe = qe_probe,
.resume = qe_resume,
};
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0d8cd9bbe101..bee1c0f794cf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -102,6 +102,7 @@ config S390
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
@@ -444,13 +445,6 @@ config FORCE_MAX_ZONEORDER
int
default "9"
-config PROCESS_DEBUG
- bool "Show crashed user process info"
- help
- Say Y to print all process fault locations to the console. This is
- a debugging option; you probably do not want to set it unless you
- are an S390 port maintainer.
-
config PFAULT
bool "Pseudo page fault support"
help
@@ -486,13 +480,6 @@ config CMM
Everybody who wants to run Linux under VM should select this
option.
-config CMM_PROC
- bool "/proc interface to cooperative memory management"
- depends on CMM
- help
- Select this option to enable the /proc interface to the
- cooperative memory management.
-
config CMM_IUCV
bool "IUCV special message interface to cooperative memory management"
depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0da10746e0e5..30c5f01f93b0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -116,6 +116,12 @@ image bzImage: vmlinux
zfcpdump:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+vdso_install:
+ifeq ($(CONFIG_64BIT),y)
+ $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
+endif
+ $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 6e4a67ad07e1..1c999f726a58 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -7,7 +7,7 @@
BITS := $(if $(CONFIG_64BIT),64,31)
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
- vmlinux.bin.lzma misc.o piggy.o sizes.h head$(BITS).o
+ vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += $(cflags-y)
@@ -47,6 +47,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZMA) := lzma
+suffix-$(CONFIG_KERNEL_LZO) := lzo
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
@@ -54,6 +55,8 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
$(call if_changed,bzip2)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
$(call if_changed,lzma)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+ $(call if_changed,lzo)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 14e0479d3888..0851eb1e919e 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -50,6 +50,10 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_unlzma.c"
#endif
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
extern _sclp_print_early(const char *);
int puts(const char *s)
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index aea572009d60..fa487d4cc08b 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/types.h>
+#include <linux/debugfs.h>
#define REG_FILE_MODE 0440
#define UPDATE_FILE_MODE 0220
@@ -34,6 +35,9 @@ extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
/* VM Hypervisor */
extern int hypfs_vm_init(void);
+extern void hypfs_vm_exit(void);
extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
+/* Directory for debugfs files */
+extern struct dentry *hypfs_dbfs_dir;
#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 5b1acdba6495..1211bb1d2f24 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <asm/ebcdic.h>
#include "hypfs.h"
@@ -22,6 +23,8 @@
#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
#define TMP_SIZE 64 /* size of temporary buffers */
+#define DBFS_D204_HDR_VERSION 0
+
/* diag 204 subcodes */
enum diag204_sc {
SUBC_STIB4 = 4,
@@ -47,6 +50,8 @@ static void *diag204_buf; /* 4K aligned buffer for diag204 data */
static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
static int diag204_buf_pages; /* number of pages for diag204 data */
+static struct dentry *dbfs_d204_file;
+
/*
* DIAG 204 data structures and member access functions.
*
@@ -364,18 +369,21 @@ static void diag204_free_buffer(void)
} else {
free_pages((unsigned long) diag204_buf, 0);
}
- diag204_buf_pages = 0;
diag204_buf = NULL;
}
+static void *page_align_ptr(void *ptr)
+{
+ return (void *) PAGE_ALIGN((unsigned long) ptr);
+}
+
static void *diag204_alloc_vbuf(int pages)
{
/* The buffer has to be page aligned! */
diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
if (!diag204_buf_vmalloc)
return ERR_PTR(-ENOMEM);
- diag204_buf = (void*)((unsigned long)diag204_buf_vmalloc
- & ~0xfffUL) + 0x1000;
+ diag204_buf = page_align_ptr(diag204_buf_vmalloc);
diag204_buf_pages = pages;
return diag204_buf;
}
@@ -468,17 +476,26 @@ fail_alloc:
return rc;
}
+static int diag204_do_store(void *buf, int pages)
+{
+ int rc;
+
+ rc = diag204((unsigned long) diag204_store_sc |
+ (unsigned long) diag204_info_type, pages, buf);
+ return rc < 0 ? -ENOSYS : 0;
+}
+
static void *diag204_store(void)
{
void *buf;
- int pages;
+ int pages, rc;
buf = diag204_get_buffer(diag204_info_type, &pages);
if (IS_ERR(buf))
goto out;
- if (diag204((unsigned long)diag204_store_sc |
- (unsigned long)diag204_info_type, pages, buf) < 0)
- return ERR_PTR(-ENOSYS);
+ rc = diag204_do_store(buf, pages);
+ if (rc)
+ return ERR_PTR(rc);
out:
return buf;
}
@@ -526,6 +543,92 @@ static int diag224_idx2name(int index, char *name)
return 0;
}
+struct dbfs_d204_hdr {
+ u64 len; /* Length of d204 buffer without header */
+ u16 version; /* Version of header */
+ u8 sc; /* Used subcode */
+ char reserved[53];
+} __attribute__ ((packed));
+
+struct dbfs_d204 {
+ struct dbfs_d204_hdr hdr; /* 64 byte header */
+ char buf[]; /* d204 buffer */
+} __attribute__ ((packed));
+
+struct dbfs_d204_private {
+ struct dbfs_d204 *d204; /* Aligned d204 data with header */
+ void *base; /* Base pointer (needed for vfree) */
+};
+
+static int dbfs_d204_open(struct inode *inode, struct file *file)
+{
+ struct dbfs_d204_private *data;
+ struct dbfs_d204 *d204;
+ int rc, buf_size;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
+ data->base = vmalloc(buf_size);
+ if (!data->base) {
+ rc = -ENOMEM;
+ goto fail_kfree_data;
+ }
+ memset(data->base, 0, buf_size);
+ d204 = page_align_ptr(data->base + sizeof(d204->hdr))
+ - sizeof(d204->hdr);
+ rc = diag204_do_store(&d204->buf, diag204_buf_pages);
+ if (rc)
+ goto fail_vfree_base;
+ d204->hdr.version = DBFS_D204_HDR_VERSION;
+ d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
+ d204->hdr.sc = diag204_store_sc;
+ data->d204 = d204;
+ file->private_data = data;
+ return nonseekable_open(inode, file);
+
+fail_vfree_base:
+ vfree(data->base);
+fail_kfree_data:
+ kfree(data);
+ return rc;
+}
+
+static int dbfs_d204_release(struct inode *inode, struct file *file)
+{
+ struct dbfs_d204_private *data = file->private_data;
+
+ vfree(data->base);
+ kfree(data);
+ return 0;
+}
+
+static ssize_t dbfs_d204_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dbfs_d204_private *data = file->private_data;
+
+ return simple_read_from_buffer(buf, size, ppos, data->d204,
+ data->d204->hdr.len +
+ sizeof(data->d204->hdr));
+}
+
+static const struct file_operations dbfs_d204_ops = {
+ .open = dbfs_d204_open,
+ .read = dbfs_d204_read,
+ .release = dbfs_d204_release,
+};
+
+static int hypfs_dbfs_init(void)
+{
+ dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
+ NULL, &dbfs_d204_ops);
+ if (IS_ERR(dbfs_d204_file))
+ return PTR_ERR(dbfs_d204_file);
+ return 0;
+}
+
__init int hypfs_diag_init(void)
{
int rc;
@@ -540,11 +643,17 @@ __init int hypfs_diag_init(void)
pr_err("The hardware system does not provide all "
"functions required by hypfs\n");
}
+ if (diag204_info_type == INFO_EXT) {
+ rc = hypfs_dbfs_init();
+ if (rc)
+ diag204_free_buffer();
+ }
return rc;
}
void hypfs_diag_exit(void)
{
+ debugfs_remove(dbfs_d204_file);
diag224_delete_name_table();
diag204_free_buffer();
}
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index f0b0d31f0b48..ee5ab1a578e7 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -10,14 +10,18 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/ebcdic.h>
+#include <asm/timex.h>
#include "hypfs.h"
#define NAME_LEN 8
+#define DBFS_D2FC_HDR_VERSION 0
static char local_guest[] = " ";
static char all_guests[] = "* ";
static char *guest_query;
+static struct dentry *dbfs_d2fc_file;
+
struct diag2fc_data {
__u32 version;
__u32 flags;
@@ -76,23 +80,26 @@ static int diag2fc(int size, char* query, void *addr)
return -residual_cnt;
}
-static struct diag2fc_data *diag2fc_store(char *query, int *count)
+/*
+ * Allocate buffer for "query" and store diag 2fc at "offset"
+ */
+static void *diag2fc_store(char *query, unsigned int *count, int offset)
{
+ void *data;
int size;
- struct diag2fc_data *data;
do {
size = diag2fc(0, query, NULL);
if (size < 0)
return ERR_PTR(-EACCES);
- data = vmalloc(size);
+ data = vmalloc(size + offset);
if (!data)
return ERR_PTR(-ENOMEM);
- if (diag2fc(size, query, data) == 0)
+ if (diag2fc(size, query, data + offset) == 0)
break;
vfree(data);
} while (1);
- *count = (size / sizeof(*data));
+ *count = (size / sizeof(struct diag2fc_data));
return data;
}
@@ -168,9 +175,10 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
{
struct dentry *dir, *file;
struct diag2fc_data *data;
- int rc, i, count = 0;
+ unsigned int count = 0;
+ int rc, i;
- data = diag2fc_store(guest_query, &count);
+ data = diag2fc_store(guest_query, &count, 0);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -218,8 +226,61 @@ failed:
return rc;
}
+struct dbfs_d2fc_hdr {
+ u64 len; /* Length of d2fc buffer without header */
+ u16 version; /* Version of header */
+ char tod_ext[16]; /* TOD clock for d2fc */
+ u64 count; /* Number of VM guests in d2fc buffer */
+ char reserved[30];
+} __attribute__ ((packed));
+
+struct dbfs_d2fc {
+ struct dbfs_d2fc_hdr hdr; /* 64 byte header */
+ char buf[]; /* d2fc buffer */
+} __attribute__ ((packed));
+
+static int dbfs_d2fc_open(struct inode *inode, struct file *file)
+{
+ struct dbfs_d2fc *data;
+ unsigned int count;
+
+ data = diag2fc_store(guest_query, &count, sizeof(data->hdr));
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ get_clock_ext(data->hdr.tod_ext);
+ data->hdr.len = count * sizeof(struct diag2fc_data);
+ data->hdr.version = DBFS_D2FC_HDR_VERSION;
+ data->hdr.count = count;
+ memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved));
+ file->private_data = data;
+ return nonseekable_open(inode, file);
+}
+
+static int dbfs_d2fc_release(struct inode *inode, struct file *file)
+{
+ diag2fc_free(file->private_data);
+ return 0;
+}
+
+static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dbfs_d2fc *data = file->private_data;
+
+ return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
+ sizeof(struct dbfs_d2fc_hdr));
+}
+
+static const struct file_operations dbfs_d2fc_ops = {
+ .open = dbfs_d2fc_open,
+ .read = dbfs_d2fc_read,
+ .release = dbfs_d2fc_release,
+};
+
int hypfs_vm_init(void)
{
+ if (!MACHINE_IS_VM)
+ return 0;
if (diag2fc(0, all_guests, NULL) > 0)
guest_query = all_guests;
else if (diag2fc(0, local_guest, NULL) > 0)
@@ -227,5 +288,17 @@ int hypfs_vm_init(void)
else
return -EACCES;
+ dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
+ NULL, &dbfs_d2fc_ops);
+ if (IS_ERR(dbfs_d2fc_file))
+ return PTR_ERR(dbfs_d2fc_file);
+
return 0;
}
+
+void hypfs_vm_exit(void)
+{
+ if (!MACHINE_IS_VM)
+ return;
+ debugfs_remove(dbfs_d2fc_file);
+}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c53f8ac825ca..6b120f073043 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -46,6 +46,8 @@ static const struct super_operations hypfs_s_ops;
/* start of list of all dentries, which have to be deleted on update */
static struct dentry *hypfs_last_dentry;
+struct dentry *hypfs_dbfs_dir;
+
static void hypfs_update_update(struct super_block *sb)
{
struct hypfs_sb_info *sb_info = sb->s_fs_info;
@@ -145,7 +147,7 @@ static int hypfs_open(struct inode *inode, struct file *filp)
}
mutex_unlock(&fs_info->lock);
}
- return 0;
+ return nonseekable_open(inode, filp);
}
static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
@@ -468,20 +470,22 @@ static int __init hypfs_init(void)
{
int rc;
- if (MACHINE_IS_VM) {
- if (hypfs_vm_init())
- /* no diag 2fc, just exit */
- return -ENODATA;
- } else {
- if (hypfs_diag_init()) {
- rc = -ENODATA;
- goto fail_diag;
- }
+ hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
+ if (IS_ERR(hypfs_dbfs_dir))
+ return PTR_ERR(hypfs_dbfs_dir);
+
+ if (hypfs_diag_init()) {
+ rc = -ENODATA;
+ goto fail_debugfs_remove;
+ }
+ if (hypfs_vm_init()) {
+ rc = -ENODATA;
+ goto fail_hypfs_diag_exit;
}
s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
if (!s390_kobj) {
rc = -ENOMEM;
- goto fail_sysfs;
+ goto fail_hypfs_vm_exit;
}
rc = register_filesystem(&hypfs_type);
if (rc)
@@ -490,18 +494,22 @@ static int __init hypfs_init(void)
fail_filesystem:
kobject_put(s390_kobj);
-fail_sysfs:
- if (!MACHINE_IS_VM)
- hypfs_diag_exit();
-fail_diag:
+fail_hypfs_vm_exit:
+ hypfs_vm_exit();
+fail_hypfs_diag_exit:
+ hypfs_diag_exit();
+fail_debugfs_remove:
+ debugfs_remove(hypfs_dbfs_dir);
+
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc;
}
static void __exit hypfs_exit(void)
{
- if (!MACHINE_IS_VM)
- hypfs_diag_exit();
+ hypfs_diag_exit();
+ hypfs_vm_exit();
+ debugfs_remove(hypfs_dbfs_dir);
unregister_filesystem(&hypfs_type);
kobject_put(s390_kobj);
}
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 451bfbb9db3d..76daea117181 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <asm/system.h>
#define ATOMIC_INIT(i) { (i) }
@@ -274,6 +275,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
long long c, old;
+
c = atomic64_read(v);
for (;;) {
if (unlikely(c == u))
@@ -286,6 +288,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
return c != u;
}
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
#define atomic64_inc(_v) atomic64_add_return(1, _v)
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 9beeb9db9b23..bf90d1fd97a5 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -46,18 +46,18 @@
unreachable(); \
} while (0)
-#define __WARN() do { \
- __EMIT_BUG(BUGFLAG_WARNING); \
+#define __WARN_TAINT(taint) do { \
+ __EMIT_BUG(BUGFLAG_TAINT(taint)); \
} while (0)
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
if (__ret_warn_on) \
- __EMIT_BUG(BUGFLAG_WARNING); \
+ __WARN(); \
} else { \
if (unlikely(__ret_warn_on)) \
- __EMIT_BUG(BUGFLAG_WARNING); \
+ __WARN(); \
} \
unlikely(__ret_warn_on); \
})
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f4bd346a52d3..1c0030f9b890 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -91,6 +91,14 @@ struct ccw_device {
void (*handler) (struct ccw_device *, unsigned long, struct irb *);
};
+/*
+ * Possible CIO actions triggered by the unit check handler.
+ */
+enum uc_todo {
+ UC_TODO_RETRY,
+ UC_TODO_RETRY_ON_NEW_PATH,
+ UC_TODO_STOP
+};
/**
* struct ccw driver - device driver for channel attached devices
@@ -107,6 +115,7 @@ struct ccw_device {
* @freeze: callback for freezing during hibernation snapshotting
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
+ * @uc_handler: callback for unit check handler
* @driver: embedded device driver structure
* @name: device driver name
*/
@@ -124,6 +133,7 @@ struct ccw_driver {
int (*freeze)(struct ccw_device *);
int (*thaw) (struct ccw_device *);
int (*restore)(struct ccw_device *);
+ enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
struct device_driver driver;
char *name;
};
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 258ba88b7b50..8b1a52a137c5 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -188,15 +188,16 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
-void vtime_start_cpu(void);
+void vtime_start_cpu(__u64 int_clock, __u64 enter_timer);
cputime64_t s390_get_idle_time(int cpu);
#define arch_idle_time(cpu) s390_get_idle_time(cpu)
-static inline void s390_idle_check(void)
+static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
+ __u64 enter_timer)
{
- if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
- vtime_start_cpu();
+ if (regs->psw.mask & PSW_MASK_WAIT)
+ vtime_start_cpu(int_clock, enter_timer);
}
static inline int s390_nohz_delay(int cpu)
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 05527c040b7a..0f97ef2d92ac 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -104,38 +104,39 @@ struct _lowcore {
/* CPU time accounting values */
__u64 sync_enter_timer; /* 0x0250 */
__u64 async_enter_timer; /* 0x0258 */
- __u64 exit_timer; /* 0x0260 */
- __u64 user_timer; /* 0x0268 */
- __u64 system_timer; /* 0x0270 */
- __u64 steal_timer; /* 0x0278 */
- __u64 last_update_timer; /* 0x0280 */
- __u64 last_update_clock; /* 0x0288 */
+ __u64 mcck_enter_timer; /* 0x0260 */
+ __u64 exit_timer; /* 0x0268 */
+ __u64 user_timer; /* 0x0270 */
+ __u64 system_timer; /* 0x0278 */
+ __u64 steal_timer; /* 0x0280 */
+ __u64 last_update_timer; /* 0x0288 */
+ __u64 last_update_clock; /* 0x0290 */
/* Current process. */
- __u32 current_task; /* 0x0290 */
- __u32 thread_info; /* 0x0294 */
- __u32 kernel_stack; /* 0x0298 */
+ __u32 current_task; /* 0x0298 */
+ __u32 thread_info; /* 0x029c */
+ __u32 kernel_stack; /* 0x02a0 */
/* Interrupt and panic stack. */
- __u32 async_stack; /* 0x029c */
- __u32 panic_stack; /* 0x02a0 */
+ __u32 async_stack; /* 0x02a4 */
+ __u32 panic_stack; /* 0x02a8 */
/* Address space pointer. */
- __u32 kernel_asce; /* 0x02a4 */
- __u32 user_asce; /* 0x02a8 */
- __u32 user_exec_asce; /* 0x02ac */
+ __u32 kernel_asce; /* 0x02ac */
+ __u32 user_asce; /* 0x02b0 */
+ __u32 user_exec_asce; /* 0x02b4 */
/* SMP info area */
- struct cpuid cpu_id; /* 0x02b0 */
__u32 cpu_nr; /* 0x02b8 */
__u32 softirq_pending; /* 0x02bc */
__u32 percpu_offset; /* 0x02c0 */
__u32 ext_call_fast; /* 0x02c4 */
__u64 int_clock; /* 0x02c8 */
- __u64 clock_comparator; /* 0x02d0 */
- __u32 machine_flags; /* 0x02d8 */
- __u32 ftrace_func; /* 0x02dc */
- __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
+ __u64 mcck_clock; /* 0x02d0 */
+ __u64 clock_comparator; /* 0x02d8 */
+ __u32 machine_flags; /* 0x02e0 */
+ __u32 ftrace_func; /* 0x02e4 */
+ __u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
@@ -189,14 +190,14 @@ struct _lowcore {
__u32 data_exc_code; /* 0x0090 */
__u16 mon_class_num; /* 0x0094 */
__u16 per_perc_atmid; /* 0x0096 */
- addr_t per_address; /* 0x0098 */
+ __u64 per_address; /* 0x0098 */
__u8 exc_access_id; /* 0x00a0 */
__u8 per_access_id; /* 0x00a1 */
__u8 op_access_id; /* 0x00a2 */
__u8 ar_access_id; /* 0x00a3 */
__u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
- addr_t trans_exc_code; /* 0x00a8 */
- addr_t monitor_code; /* 0x00b0 */
+ __u64 trans_exc_code; /* 0x00a8 */
+ __u64 monitor_code; /* 0x00b0 */
__u16 subchannel_id; /* 0x00b8 */
__u16 subchannel_nr; /* 0x00ba */
__u32 io_int_parm; /* 0x00bc */
@@ -207,7 +208,7 @@ struct _lowcore {
__u32 mcck_interruption_code[2]; /* 0x00e8 */
__u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
__u32 external_damage_code; /* 0x00f4 */
- addr_t failing_storage_address; /* 0x00f8 */
+ __u64 failing_storage_address; /* 0x00f8 */
__u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
__u64 breaking_event_addr; /* 0x0110 */
__u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
@@ -233,39 +234,41 @@ struct _lowcore {
/* CPU accounting and timing values. */
__u64 sync_enter_timer; /* 0x02a0 */
__u64 async_enter_timer; /* 0x02a8 */
- __u64 exit_timer; /* 0x02b0 */
- __u64 user_timer; /* 0x02b8 */
- __u64 system_timer; /* 0x02c0 */
- __u64 steal_timer; /* 0x02c8 */
- __u64 last_update_timer; /* 0x02d0 */
- __u64 last_update_clock; /* 0x02d8 */
+ __u64 mcck_enter_timer; /* 0x02b0 */
+ __u64 exit_timer; /* 0x02b8 */
+ __u64 user_timer; /* 0x02c0 */
+ __u64 system_timer; /* 0x02c8 */
+ __u64 steal_timer; /* 0x02d0 */
+ __u64 last_update_timer; /* 0x02d8 */
+ __u64 last_update_clock; /* 0x02e0 */
/* Current process. */
- __u64 current_task; /* 0x02e0 */
- __u64 thread_info; /* 0x02e8 */
- __u64 kernel_stack; /* 0x02f0 */
+ __u64 current_task; /* 0x02e8 */
+ __u64 thread_info; /* 0x02f0 */
+ __u64 kernel_stack; /* 0x02f8 */
/* Interrupt and panic stack. */
- __u64 async_stack; /* 0x02f8 */
- __u64 panic_stack; /* 0x0300 */
+ __u64 async_stack; /* 0x0300 */
+ __u64 panic_stack; /* 0x0308 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0308 */
- __u64 user_asce; /* 0x0310 */
- __u64 user_exec_asce; /* 0x0318 */
+ __u64 kernel_asce; /* 0x0310 */
+ __u64 user_asce; /* 0x0318 */
+ __u64 user_exec_asce; /* 0x0320 */
/* SMP info area */
- struct cpuid cpu_id; /* 0x0320 */
__u32 cpu_nr; /* 0x0328 */
__u32 softirq_pending; /* 0x032c */
__u64 percpu_offset; /* 0x0330 */
__u64 ext_call_fast; /* 0x0338 */
__u64 int_clock; /* 0x0340 */
- __u64 clock_comparator; /* 0x0348 */
- __u64 vdso_per_cpu_data; /* 0x0350 */
- __u64 machine_flags; /* 0x0358 */
- __u64 ftrace_func; /* 0x0360 */
- __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */
+ __u64 mcck_clock; /* 0x0348 */
+ __u64 clock_comparator; /* 0x0350 */
+ __u64 vdso_per_cpu_data; /* 0x0358 */
+ __u64 machine_flags; /* 0x0360 */
+ __u64 ftrace_func; /* 0x0368 */
+ __u64 sie_hook; /* 0x0370 */
+ __u64 cmf_hpp; /* 0x0378 */
/* Interrupt response block. */
__u8 irb[64]; /* 0x0380 */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index fef9b33cdd59..e2c218dc68a6 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -328,8 +328,8 @@ struct pt_regs
psw_t psw;
unsigned long gprs[NUM_GPRS];
unsigned long orig_gpr2;
- unsigned short svcnr;
unsigned short ilc;
+ unsigned short svcnr;
};
#endif
@@ -436,6 +436,7 @@ typedef struct
#define PTRACE_PEEKDATA_AREA 0x5003
#define PTRACE_POKETEXT_AREA 0x5004
#define PTRACE_POKEDATA_AREA 0x5005
+#define PTRACE_GET_LAST_BREAK 0x5006
/*
* PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 9b04b1102bbc..0eaae6260274 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -368,14 +368,12 @@ struct qdio_initialize {
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_PCI_OUT 0x10
-extern int qdio_initialize(struct qdio_initialize *);
extern int qdio_allocate(struct qdio_initialize *);
extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, unsigned int bufnr, unsigned int count);
-extern int qdio_cleanup(struct ccw_device*, int);
extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 35d786fe93ae..be44d94cba54 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1 +1,3 @@
+#define ISA_DMA_THRESHOLD (~0UL)
+
#include <asm-generic/scatterlist.h>
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 9ab6bd3a65d1..25e831d58e1e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -2,7 +2,7 @@
* include/asm-s390/setup.h
*
* S390 version
- * Copyright IBM Corp. 1999,2006
+ * Copyright IBM Corp. 1999,2010
*/
#ifndef _ASM_S390_SETUP_H
@@ -72,6 +72,7 @@ extern unsigned int user_mode;
#define MACHINE_FLAG_HPAGE (1UL << 10)
#define MACHINE_FLAG_PFMF (1UL << 11)
#define MACHINE_FLAG_LPAR (1UL << 12)
+#define MACHINE_FLAG_SPP (1UL << 13)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -88,6 +89,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_MVCOS (0)
#define MACHINE_HAS_HPAGE (0)
#define MACHINE_HAS_PFMF (0)
+#define MACHINE_HAS_SPP (0)
#else /* __s390x__ */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
@@ -97,6 +99,7 @@ extern unsigned int user_mode;
#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE)
#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
+#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
#endif /* __s390x__ */
#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 1741c1556a4e..cef66210c846 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -459,11 +459,6 @@ extern void (*_machine_power_off)(void);
#define arch_align_stack(x) (x)
-#ifdef CONFIG_TRACE_IRQFLAGS
-extern psw_t sysc_restore_trace_psw;
-extern psw_t io_restore_trace_psw;
-#endif
-
static inline int tprot(unsigned long addr)
{
int rc = -EFAULT;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 34f0873d6525..5baf0230b29b 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -50,6 +50,7 @@ struct thread_info {
struct restart_block restart_block;
__u64 user_timer;
__u64 system_timer;
+ unsigned long last_break; /* last breaking-event-address. */
};
/*
@@ -96,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_31BIT 17 /* 32bit process */
-#define TIF_MEMDIE 18
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
#define TIF_FREEZE 20 /* thread is freezing for suspend */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index f174bdaa6b59..09d345a701dc 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -61,11 +61,15 @@ static inline unsigned long long get_clock (void)
return clk;
}
+static inline void get_clock_ext(char *clk)
+{
+ asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
+}
+
static inline unsigned long long get_clock_xt(void)
{
unsigned char clk[16];
-
- asm volatile("stcke %0" : "=Q" (clk) : : "cc");
+ get_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 6e7211abd950..dc8a67297d0f 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -7,8 +7,10 @@
const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
+#define topology_core_id(cpu) (cpu_core_id[cpu])
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
int topology_set_cpu_management(int fc);
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index a09408952ed0..5232278d79ad 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ int main(void)
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
+ DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
BLANK();
DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
@@ -112,6 +113,7 @@ int main(void)
DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
+ DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
@@ -126,8 +128,8 @@ int main(void)
DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
- DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
+ DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
@@ -150,6 +152,8 @@ int main(void)
DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
+ DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
+ DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
#endif /* CONFIG_32BIT */
return 0;
}
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 0168472b2fdf..98192261491d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -655,6 +655,7 @@ found:
p_info->act_entry_offset = 0;
file->private_data = p_info;
debug_info_get(debug_info);
+ nonseekable_open(inode, file);
out:
mutex_unlock(&debug_mutex);
return rc;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2d92c2cf92d7..c00856ad4e5a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -356,6 +356,7 @@ static __init void detect_machine_facilities(void)
{
#ifdef CONFIG_64BIT
unsigned int facilities;
+ unsigned long long facility_bits;
facilities = stfl();
if (facilities & (1 << 28))
@@ -364,6 +365,9 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (facilities & (1 << 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
+ if ((stfle(&facility_bits, 1) > 0) &&
+ (facility_bits & (1ULL << (63 - 40))))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
#endif
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6af7045280a8..d5e3e6007447 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -73,21 +73,24 @@ STACK_SIZE = 1 << STACK_SHIFT
basr %r14,%r1
.endm
- .macro TRACE_IRQS_CHECK
- basr %r2,%r0
+ .macro TRACE_IRQS_CHECK_ON
tm SP_PSW(%r15),0x03 # irqs enabled?
- jz 0f
- l %r1,BASED(.Ltrace_irq_on_caller)
- basr %r14,%r1
- j 1f
-0: l %r1,BASED(.Ltrace_irq_off_caller)
- basr %r14,%r1
-1:
+ bz BASED(0f)
+ TRACE_IRQS_ON
+0:
+ .endm
+
+ .macro TRACE_IRQS_CHECK_OFF
+ tm SP_PSW(%r15),0x03 # irqs enabled?
+ bz BASED(0f)
+ TRACE_IRQS_OFF
+0:
.endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
-#define TRACE_IRQS_CHECK
+#define TRACE_IRQS_CHECK_ON
+#define TRACE_IRQS_CHECK_OFF
#endif
#ifdef CONFIG_LOCKDEP
@@ -177,9 +180,9 @@ STACK_SIZE = 1 << STACK_SHIFT
s %r15,BASED(.Lc_spsize) # make room for registers & psw
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- icm %r12,3,__LC_SVC_ILC
+ icm %r12,12,__LC_SVC_ILC
stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
- st %r12,SP_SVCNR(%r15)
+ st %r12,SP_ILC(%r15)
mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
la %r12,0
st %r12,__SF_BACKCHAIN(%r15) # clear back chain
@@ -273,66 +276,45 @@ sysc_do_restart:
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
sysc_return:
+ LOCKDEP_SYS_EXIT
+sysc_tif:
tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(sysc_restore_trace_psw_addr)
- l %r1,0(%r1)
- lpsw 0(%r1)
-sysc_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-sysc_leave:
RESTORE_ALL __LC_RETURN_PSW,1
sysc_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
-sysc_restore_trace_psw_addr:
- .long sysc_restore_trace_psw
-
- .section .data,"aw",@progbits
- .align 8
- .globl sysc_restore_trace_psw
-sysc_restore_trace_psw:
- .long 0, sysc_restore_trace + 0x80000000
- .previous
-#endif
-
-#
-# recheck if there is more work to do
#
-sysc_work_loop:
- tm __TI_flags+3(%r9),_TIF_WORK_SVC
- bz BASED(sysc_restore) # there is no work to do
-#
-# One of the work bits is on. Find out which one.
+# There is work to do, but first we need to check if we return to userspace.
#
sysc_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
bno BASED(sysc_restore)
+
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work_tif:
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
bo BASED(sysc_mcck_pending)
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(sysc_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING
- bnz BASED(sysc_sigpending)
+ bo BASED(sysc_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
- bnz BASED(sysc_notify_resume)
+ bo BASED(sysc_notify_resume)
tm __TI_flags+3(%r9),_TIF_RESTART_SVC
bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
bo BASED(sysc_singlestep)
- b BASED(sysc_restore)
-sysc_work_done:
+ b BASED(sysc_return) # beware of critical section cleanup
#
# _TIF_NEED_RESCHED is set, call schedule
#
sysc_reschedule:
l %r1,BASED(.Lschedule)
- la %r14,BASED(sysc_work_loop)
+ la %r14,BASED(sysc_return)
br %r1 # call scheduler
#
@@ -340,7 +322,7 @@ sysc_reschedule:
#
sysc_mcck_pending:
l %r1,BASED(.Ls390_handle_mcck)
- la %r14,BASED(sysc_work_loop)
+ la %r14,BASED(sysc_return)
br %r1 # TIF bit will be cleared by handler
#
@@ -355,7 +337,7 @@ sysc_sigpending:
bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
bo BASED(sysc_singlestep)
- b BASED(sysc_work_loop)
+ b BASED(sysc_return)
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -363,7 +345,7 @@ sysc_sigpending:
sysc_notify_resume:
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_notify_resume)
- la %r14,BASED(sysc_work_loop)
+ la %r14,BASED(sysc_return)
br %r1 # call do_notify_resume
@@ -458,11 +440,13 @@ kernel_execve:
br %r14
# execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+ TRACE_IRQS_OFF
l %r15,__LC_KERNEL_STACK # load ksp
s %r15,BASED(.Lc_spsize) # make room for registers & psw
l %r9,__LC_THREAD_INFO
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
l %r1,BASED(.Lexecve_tail)
basr %r14,%r1
@@ -499,8 +483,8 @@ pgm_check_handler:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime:
+ TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- TRACE_IRQS_OFF
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3
@@ -509,8 +493,10 @@ pgm_do_call:
sll %r8,2
l %r7,0(%r8,%r7) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
- la %r14,BASED(sysc_return)
- br %r7 # branch to interrupt-handler
+ basr %r14,%r7 # branch to interrupt-handler
+pgm_exit:
+ TRACE_IRQS_CHECK_ON
+ b BASED(sysc_return)
#
# handle per exception
@@ -537,19 +523,19 @@ pgm_per_std:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
pgm_no_vtime2:
+ TRACE_IRQS_CHECK_OFF
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- TRACE_IRQS_OFF
l %r1,__TI_task(%r9)
+ tm SP_PSW+1(%r15),0x01 # kernel per event ?
+ bz BASED(kernel_per)
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
- tm SP_PSW+1(%r15),0x01 # kernel per event ?
- bz BASED(kernel_per)
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3 # clear per-event-bit and ilc
- be BASED(sysc_return) # only per or per+check ?
+ be BASED(pgm_exit) # only per or per+check ?
b BASED(pgm_do_call)
#
@@ -570,8 +556,8 @@ pgm_svcper:
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
- lm %r2,%r6,SP_R2(%r15) # load svc arguments
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ lm %r2,%r6,SP_R2(%r15) # load svc arguments
b BASED(sysc_do_svc)
#
@@ -582,8 +568,8 @@ kernel_per:
mvi SP_SVCNR+1(%r15),0xff
la %r2,SP_PTREGS(%r15) # address of register-save area
l %r1,BASED(.Lhandle_per) # load adr. of per handler
- la %r14,BASED(sysc_restore)# load adr. of system return
- br %r1 # branch to do_single_step
+ basr %r14,%r1 # branch to do_single_step
+ b BASED(pgm_exit)
/*
* IO interrupt handler routine
@@ -602,134 +588,126 @@ io_int_handler:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
io_no_vtime:
- l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
TRACE_IRQS_OFF
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
la %r2,SP_PTREGS(%r15) # address of register-save area
basr %r14,%r1 # branch to standard irq handler
io_return:
+ LOCKDEP_SYS_EXIT
+ TRACE_IRQS_ON
+io_tif:
tm __TI_flags+3(%r9),_TIF_WORK_INT
bnz BASED(io_work) # there is work to do (signals etc.)
io_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(io_restore_trace_psw_addr)
- l %r1,0(%r1)
- lpsw 0(%r1)
-io_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-io_leave:
RESTORE_ALL __LC_RETURN_PSW,0
io_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
-io_restore_trace_psw_addr:
- .long io_restore_trace_psw
-
- .section .data,"aw",@progbits
- .align 8
- .globl io_restore_trace_psw
-io_restore_trace_psw:
- .long 0, io_restore_trace + 0x80000000
- .previous
-#endif
-
#
-# switch to kernel stack, then check the TIF bits
+# There is work todo, find out in which context we have been interrupted:
+# 1) if we return to user space we can do all _TIF_WORK_INT work
+# 2) if we return to kernel code and preemptive scheduling is enabled check
+# the preemption counter and if it is zero call preempt_schedule_irq
+# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
-#ifndef CONFIG_PREEMPT
- bno BASED(io_restore) # no-> skip resched & signal
-#else
- bnz BASED(io_work_user) # no -> check for preemptive scheduling
+ bo BASED(io_work_user) # yes -> do resched & signal
+#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r9)
bnz BASED(io_restore) # preemption disabled
+ tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
+ bno BASED(io_restore)
+ # switch to kernel stack
l %r1,SP_R15(%r15)
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
-io_resume_loop:
- tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
- bno BASED(io_restore)
+ # TRACE_IRQS_ON already done at io_return, call
+ # TRACE_IRQS_OFF to keep things symmetrical
+ TRACE_IRQS_OFF
l %r1,BASED(.Lpreempt_schedule_irq)
- la %r14,BASED(io_resume_loop)
- br %r1 # call schedule
+ basr %r14,%r1 # call preempt_schedule_irq
+ b BASED(io_return)
+#else
+ b BASED(io_restore)
#endif
+#
+# Need to do work before returning to userspace, switch to kernel stack
+#
io_work_user:
l %r1,__LC_KERNEL_STACK
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
+
#
# One of the work bits is on. Find out which one.
-# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED
+# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING
#
-io_work_loop:
+io_work_tif:
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
bo BASED(io_mcck_pending)
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(io_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING
- bnz BASED(io_sigpending)
+ bo BASED(io_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
- bnz BASED(io_notify_resume)
- b BASED(io_restore)
-io_work_done:
+ bo BASED(io_notify_resume)
+ b BASED(io_return) # beware of critical section cleanup
#
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
+ # TRACE_IRQS_ON already done at io_return
l %r1,BASED(.Ls390_handle_mcck)
basr %r14,%r1 # TIF bit will be cleared by handler
- b BASED(io_work_loop)
+ TRACE_IRQS_OFF
+ b BASED(io_return)
#
# _TIF_NEED_RESCHED is set, call schedule
#
io_reschedule:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
l %r1,BASED(.Lschedule)
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
basr %r14,%r1 # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- tm __TI_flags+3(%r9),_TIF_WORK_INT
- bz BASED(io_restore) # there is no work to do
- b BASED(io_work_loop)
+ b BASED(io_return)
#
# _TIF_SIGPENDING is set, call do_signal
#
io_sigpending:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- b BASED(io_work_loop)
+ b BASED(io_return)
#
# _TIF_SIGPENDING is set, call do_signal
#
io_notify_resume:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
l %r1,BASED(.Ldo_notify_resume)
basr %r14,%r1 # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- b BASED(io_work_loop)
+ b BASED(io_return)
/*
* External interrupt handler routine
@@ -764,15 +742,14 @@ __critical_end:
.globl mcck_int_handler
mcck_int_handler:
- stck __LC_INT_CLOCK
+ stck __LC_MCCK_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32
la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
- mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
+ mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
bo BASED(1f)
la %r14,__LC_SYNC_ENTER_TIMER
@@ -786,7 +763,7 @@ mcck_int_handler:
bl BASED(0f)
la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14)
- mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
+ mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
bno BASED(mcck_int_main) # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -808,9 +785,9 @@ mcck_int_main:
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
bz BASED(mcck_no_vtime)
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
mcck_no_vtime:
l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -833,7 +810,6 @@ mcck_no_vtime:
mcck_return:
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
bno BASED(0f)
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
@@ -917,18 +893,14 @@ stack_overflow:
cleanup_table_system_call:
.long system_call + 0x80000000, sysc_do_svc + 0x80000000
-cleanup_table_sysc_return:
- .long sysc_return + 0x80000000, sysc_leave + 0x80000000
-cleanup_table_sysc_leave:
- .long sysc_leave + 0x80000000, sysc_done + 0x80000000
-cleanup_table_sysc_work_loop:
- .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000
-cleanup_table_io_return:
- .long io_return + 0x80000000, io_leave + 0x80000000
-cleanup_table_io_leave:
- .long io_leave + 0x80000000, io_done + 0x80000000
-cleanup_table_io_work_loop:
- .long io_work_loop + 0x80000000, io_work_done + 0x80000000
+cleanup_table_sysc_tif:
+ .long sysc_tif + 0x80000000, sysc_restore + 0x80000000
+cleanup_table_sysc_restore:
+ .long sysc_restore + 0x80000000, sysc_done + 0x80000000
+cleanup_table_io_tif:
+ .long io_tif + 0x80000000, io_restore + 0x80000000
+cleanup_table_io_restore:
+ .long io_restore + 0x80000000, io_done + 0x80000000
cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_system_call)
@@ -936,49 +908,40 @@ cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_system_call+4)
bl BASED(cleanup_system_call)
0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_return)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
- bl BASED(cleanup_sysc_return)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
- bl BASED(cleanup_sysc_leave)
-0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
- bl BASED(cleanup_sysc_return)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
+ bl BASED(cleanup_sysc_tif)
0:
- clc 4(4,%r12),BASED(cleanup_table_io_return)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_return+4)
- bl BASED(cleanup_io_return)
+ clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
+ bl BASED(cleanup_sysc_restore)
0:
- clc 4(4,%r12),BASED(cleanup_table_io_leave)
+ clc 4(4,%r12),BASED(cleanup_table_io_tif)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
- bl BASED(cleanup_io_leave)
+ clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
+ bl BASED(cleanup_io_tif)
0:
- clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
+ clc 4(4,%r12),BASED(cleanup_table_io_restore)
bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
- bl BASED(cleanup_io_work_loop)
+ clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
+ bl BASED(cleanup_io_restore)
0:
br %r14
cleanup_system_call:
mvc __LC_RETURN_PSW(8),0(%r12)
- c %r12,BASED(.Lmck_old_psw)
- be BASED(0f)
- la %r12,__LC_SAVE_AREA+16
- b BASED(1f)
-0: la %r12,__LC_SAVE_AREA+32
-1:
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
bh BASED(0f)
+ mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+ c %r12,BASED(.Lmck_old_psw)
+ be BASED(0f)
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: c %r12,BASED(.Lmck_old_psw)
+ la %r12,__LC_SAVE_AREA+32
+ be BASED(0f)
+ la %r12,__LC_SAVE_AREA+16
0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
bhe BASED(cleanup_vtime)
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
@@ -1011,61 +974,54 @@ cleanup_system_call_insn:
.long sysc_stime + 0x80000000
.long sysc_update + 0x80000000
-cleanup_sysc_return:
+cleanup_sysc_tif:
mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
+ mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave:
- clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
+cleanup_sysc_restore:
+ clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
be BASED(2f)
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+ c %r12,BASED(.Lmck_old_psw)
+ be BASED(0f)
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
- clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
+0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
be BASED(2f)
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
c %r12,BASED(.Lmck_old_psw)
- bne BASED(0f)
- mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
- b BASED(1f)
-0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
-1: lm %r0,%r11,SP_R0(%r15)
+ la %r12,__LC_SAVE_AREA+32
+ be BASED(1f)
+ la %r12,__LC_SAVE_AREA+16
+1: mvc 0(16,%r12),SP_R12(%r15)
+ lm %r0,%r11,SP_R0(%r15)
l %r15,SP_R15(%r15)
2: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave_insn:
+cleanup_sysc_restore_insn:
.long sysc_done - 4 + 0x80000000
.long sysc_done - 8 + 0x80000000
-cleanup_io_return:
+cleanup_io_tif:
mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return)
+ mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_work_loop:
- mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
- la %r12,__LC_RETURN_PSW
- br %r14
-
-cleanup_io_leave:
- clc 4(4,%r12),BASED(cleanup_io_leave_insn)
- be BASED(2f)
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
- clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
- be BASED(2f)
+cleanup_io_restore:
+ clc 4(4,%r12),BASED(cleanup_io_restore_insn)
+ be BASED(1f)
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+ clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
+ be BASED(1f)
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
- c %r12,BASED(.Lmck_old_psw)
- bne BASED(0f)
mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
- b BASED(1f)
-0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
-1: lm %r0,%r11,SP_R0(%r15)
+ lm %r0,%r11,SP_R0(%r15)
l %r15,SP_R15(%r15)
-2: la %r12,__LC_RETURN_PSW
+1: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_leave_insn:
+cleanup_io_restore_insn:
.long io_done - 4 + 0x80000000
.long io_done - 8 + 0x80000000
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 52106d53271c..e7192e1cb678 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -2,7 +2,7 @@
* arch/s390/kernel/entry64.S
* S390 low-level entry points.
*
- * Copyright (C) IBM Corp. 1999,2006
+ * Copyright (C) IBM Corp. 1999,2010
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Hartmut Penner (hp@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -59,30 +59,45 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
#define BASED(name) name-system_call(%r13)
+ .macro HANDLE_SIE_INTERCEPT
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ lg %r3,__LC_SIE_HOOK
+ ltgr %r3,%r3
+ jz 0f
+ basr %r14,%r3
+0:
+#endif
+ .endm
+
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
- basr %r2,%r0
- brasl %r14,trace_hardirqs_on_caller
+ basr %r2,%r0
+ brasl %r14,trace_hardirqs_on_caller
.endm
.macro TRACE_IRQS_OFF
- basr %r2,%r0
- brasl %r14,trace_hardirqs_off_caller
+ basr %r2,%r0
+ brasl %r14,trace_hardirqs_off_caller
.endm
- .macro TRACE_IRQS_CHECK
- basr %r2,%r0
+ .macro TRACE_IRQS_CHECK_ON
tm SP_PSW(%r15),0x03 # irqs enabled?
jz 0f
- brasl %r14,trace_hardirqs_on_caller
- j 1f
-0: brasl %r14,trace_hardirqs_off_caller
-1:
+ TRACE_IRQS_ON
+0:
+ .endm
+
+ .macro TRACE_IRQS_CHECK_OFF
+ tm SP_PSW(%r15),0x03 # irqs enabled?
+ jz 0f
+ TRACE_IRQS_OFF
+0:
.endm
#else
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF
-#define TRACE_IRQS_CHECK
+#define TRACE_IRQS_CHECK_ON
+#define TRACE_IRQS_CHECK_OFF
#endif
#ifdef CONFIG_LOCKDEP
@@ -111,31 +126,35 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
* R15 - kernel stack pointer
*/
- .macro SAVE_ALL_BASE savearea
- stmg %r12,%r15,\savearea
- larl %r13,system_call
- .endm
-
.macro SAVE_ALL_SVC psworg,savearea
- la %r12,\psworg
+ stmg %r11,%r15,\savearea
lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ aghi %r15,-SP_SIZE # make room for registers & psw
+ lg %r11,__LC_LAST_BREAK
.endm
- .macro SAVE_ALL_SYNC psworg,savearea
- la %r12,\psworg
+ .macro SAVE_ALL_PGM psworg,savearea
+ stmg %r11,%r15,\savearea
tm \psworg+1,0x01 # test problem state bit
- jz 2f # skip stack setup save
- lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
#ifdef CONFIG_CHECK_STACK
- j 3f
-2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
- jz stack_overflow
-3:
+ jnz 1f
+ tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
+ jnz 2f
+ la %r12,\psworg
+ j stack_overflow
+#else
+ jz 2f
#endif
-2:
+1: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
+2: aghi %r15,-SP_SIZE # make room for registers & psw
+ larl %r13,system_call
+ lg %r11,__LC_LAST_BREAK
.endm
.macro SAVE_ALL_ASYNC psworg,savearea
+ stmg %r11,%r15,\savearea
+ larl %r13,system_call
+ lg %r11,__LC_LAST_BREAK
la %r12,\psworg
tm \psworg+1,0x01 # test problem state bit
jnz 1f # from user -> load kernel stack
@@ -149,27 +168,23 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
- jz 2f
-1: lg %r15,__LC_ASYNC_STACK # load async stack
#ifdef CONFIG_CHECK_STACK
- j 3f
-2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
- jz stack_overflow
-3:
+ jnz 1f
+ tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
+ jnz 2f
+ j stack_overflow
+#else
+ jz 2f
#endif
-2:
+1: lg %r15,__LC_ASYNC_STACK # load async stack
+2: aghi %r15,-SP_SIZE # make room for registers & psw
.endm
- .macro CREATE_STACK_FRAME psworg,savearea
- aghi %r15,-SP_SIZE # make room for registers & psw
- mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+ .macro CREATE_STACK_FRAME savearea
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
- icm %r12,3,__LC_SVC_ILC
- stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
- st %r12,SP_SVCNR(%r15)
- mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
- la %r12,0
- stg %r12,__SF_BACKCHAIN(%r15)
+ mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
+ stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
.endm
.macro RESTORE_ALL psworg,sync
@@ -185,6 +200,13 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
lpswe \psworg # back to caller
.endm
+ .macro LAST_BREAK
+ srag %r10,%r11,23
+ jz 0f
+ stg %r11,__TI_last_break(%r12)
+0:
+ .endm
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -230,143 +252,129 @@ __critical_start:
system_call:
stpt __LC_SYNC_ENTER_TIMER
sysc_saveall:
- SAVE_ALL_BASE __LC_SAVE_AREA
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
+ CREATE_STACK_FRAME __LC_SAVE_AREA
+ mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
+ mvc SP_ILC(4,%r15),__LC_SVC_ILC
+ stg %r7,SP_ARGS(%r15)
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
sysc_vtime:
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
sysc_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
sysc_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+ LAST_BREAK
sysc_do_svc:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- ltgr %r7,%r7 # test for svc 0
+ llgh %r7,SP_SVCNR(%r15)
+ slag %r7,%r7,2 # shift and test for svc 0
jnz sysc_nr_ok
# svc 0: system call number in %r1
- cl %r1,BASED(.Lnr_syscalls)
+ llgfr %r1,%r1 # clear high word in r1
+ cghi %r1,NR_syscalls
jnl sysc_nr_ok
- lgfr %r7,%r1 # clear high word in r1
+ sth %r1,SP_SVCNR(%r15)
+ slag %r7,%r1,2 # shift and test for svc 0
sysc_nr_ok:
- mvc SP_ARGS(8,%r15),SP_R7(%r15)
-sysc_do_restart:
- sth %r7,SP_SVCNR(%r15)
- sllg %r7,%r7,2 # svc number * 4
larl %r10,sys_call_table
#ifdef CONFIG_COMPAT
- tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ?
+ tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ?
jno sysc_noemu
larl %r10,sys_call_table_emu # use 31 bit emulation system calls
sysc_noemu:
#endif
- tm __TI_flags+6(%r9),_TIF_SYSCALL
+ tm __TI_flags+6(%r12),_TIF_SYSCALL
lgf %r8,0(%r7,%r10) # load address of system call routine
jnz sysc_tracesys
basr %r14,%r8 # call sys_xxxx
stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
sysc_return:
- tm __TI_flags+7(%r9),_TIF_WORK_SVC
+ LOCKDEP_SYS_EXIT
+sysc_tif:
+ tm __TI_flags+7(%r12),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.)
sysc_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- larl %r1,sysc_restore_trace_psw
- lpswe 0(%r1)
-sysc_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-sysc_leave:
RESTORE_ALL __LC_RETURN_PSW,1
sysc_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
- .section .data,"aw",@progbits
- .align 8
- .globl sysc_restore_trace_psw
-sysc_restore_trace_psw:
- .quad 0, sysc_restore_trace
- .previous
-#endif
-
#
-# recheck if there is more work to do
-#
-sysc_work_loop:
- tm __TI_flags+7(%r9),_TIF_WORK_SVC
- jz sysc_restore # there is no work to do
-#
-# One of the work bits is on. Find out which one.
+# There is work to do, but first we need to check if we return to userspace.
#
sysc_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
jno sysc_restore
- tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
+
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work_tif:
+ tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
jo sysc_mcck_pending
- tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
- tm __TI_flags+7(%r9),_TIF_SIGPENDING
- jnz sysc_sigpending
- tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
- jnz sysc_notify_resume
- tm __TI_flags+7(%r9),_TIF_RESTART_SVC
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
+ jo sysc_sigpending
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
+ jo sysc_notify_resume
+ tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart
- tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
+ tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
jo sysc_singlestep
- j sysc_restore
-sysc_work_done:
+ j sysc_return # beware of critical section cleanup
#
# _TIF_NEED_RESCHED is set, call schedule
#
sysc_reschedule:
- larl %r14,sysc_work_loop
- jg schedule # return point is sysc_return
+ larl %r14,sysc_return
+ jg schedule # return point is sysc_return
#
# _TIF_MCCK_PENDING is set, call handler
#
sysc_mcck_pending:
- larl %r14,sysc_work_loop
+ larl %r14,sysc_return
jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
- ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+ ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_signal # call do_signal
- tm __TI_flags+7(%r9),_TIF_RESTART_SVC
+ tm __TI_flags+7(%r12),_TIF_RESTART_SVC
jo sysc_restart
- tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
+ tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
jo sysc_singlestep
- j sysc_work_loop
+ j sysc_return
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
sysc_notify_resume:
la %r2,SP_PTREGS(%r15) # load pt_regs
- larl %r14,sysc_work_loop
+ larl %r14,sysc_return
jg do_notify_resume # call do_notify_resume
#
# _TIF_RESTART_SVC is set, set up registers and restart svc
#
sysc_restart:
- ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+ ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
lg %r7,SP_R2(%r15) # load new svc number
mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
lmg %r2,%r6,SP_R2(%r15) # load svc arguments
- j sysc_do_restart # restart svc
+ sth %r7,SP_SVCNR(%r15)
+ slag %r7,%r7,2
+ j sysc_nr_ok # restart svc
#
# _TIF_SINGLE_STEP is set, call do_single_step
#
sysc_singlestep:
- ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+ ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area
larl %r14,sysc_return # load adr. of system return
@@ -379,8 +387,8 @@ sysc_singlestep:
sysc_tracesys:
la %r2,SP_PTREGS(%r15) # load pt_regs
la %r3,0
- srl %r7,2
- stg %r7,SP_R2(%r15)
+ llgh %r0,SP_SVCNR(%r15)
+ stg %r0,SP_R2(%r15)
brasl %r14,do_syscall_trace_enter
lghi %r0,NR_syscalls
clgr %r0,%r2
@@ -393,7 +401,7 @@ sysc_tracego:
basr %r14,%r8 # call sys_xxx
stg %r2,SP_R2(%r15) # store return value
sysc_tracenogo:
- tm __TI_flags+6(%r9),_TIF_SYSCALL
+ tm __TI_flags+6(%r12),_TIF_SYSCALL
jz sysc_return
la %r2,SP_PTREGS(%r15) # load pt_regs
larl %r14,sysc_return # return point is sysc_return
@@ -405,7 +413,7 @@ sysc_tracenogo:
.globl ret_from_fork
ret_from_fork:
lg %r13,__LC_SVC_NEW_PSW+8
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
jo 0f
stg %r15,SP_R15(%r15) # store stack pointer for new kthread
@@ -435,12 +443,14 @@ kernel_execve:
br %r14
# execve succeeded.
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
+# TRACE_IRQS_OFF
lg %r15,__LC_KERNEL_STACK # load ksp
aghi %r15,-SP_SIZE # make room for registers & psw
lg %r13,__LC_SVC_NEW_PSW+8
- lg %r9,__LC_THREAD_INFO
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
+ lg %r12,__LC_THREAD_INFO
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+# TRACE_IRQS_ON
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
brasl %r14,execve_tail
j sysc_return
@@ -465,20 +475,23 @@ pgm_check_handler:
* for LPSW?).
*/
stpt __LC_SYNC_ENTER_TIMER
- SAVE_ALL_BASE __LC_SAVE_AREA
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
jnz pgm_per # got per exception -> special case
- SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+ SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+ CREATE_STACK_FRAME __LC_SAVE_AREA
+ xc SP_ILC(4,%r15),SP_ILC(%r15)
+ mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+ LAST_BREAK
pgm_no_vtime:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
- TRACE_IRQS_OFF
+ HANDLE_SIE_INTERCEPT
+ TRACE_IRQS_CHECK_OFF
+ stg %r11,SP_ARGS(%r15)
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3
@@ -487,8 +500,10 @@ pgm_do_call:
larl %r1,pgm_check_table
lg %r1,0(%r8,%r1) # load address of handler routine
la %r2,SP_PTREGS(%r15) # address of register-save area
- larl %r14,sysc_return
- br %r1 # branch to interrupt-handler
+ basr %r14,%r1 # branch to interrupt-handler
+pgm_exit:
+ TRACE_IRQS_CHECK_ON
+ j sysc_return
#
# handle per exception
@@ -500,55 +515,60 @@ pgm_per:
clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
je pgm_svcper
# no interesting special case, ignore PER event
- lmg %r12,%r15,__LC_SAVE_AREA
lpswe __LC_PGM_OLD_PSW
#
# Normal per exception
#
pgm_per_std:
- SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+ SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+ CREATE_STACK_FRAME __LC_SAVE_AREA
+ mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime2
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+ LAST_BREAK
pgm_no_vtime2:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- TRACE_IRQS_OFF
- lg %r1,__TI_task(%r9)
+ HANDLE_SIE_INTERCEPT
+ TRACE_IRQS_CHECK_OFF
+ lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
- oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+ oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
ngr %r8,%r3 # clear per-event-bit and ilc
- je sysc_return
+ je pgm_exit
j pgm_do_call
#
# it was a single stepped SVC that is causing all the trouble
#
pgm_svcper:
- SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
+ SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
+ CREATE_STACK_FRAME __LC_SAVE_AREA
+ mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
+ mvc SP_ILC(4,%r15),__LC_SVC_ILC
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
- lg %r8,__TI_task(%r9)
+ LAST_BREAK
+ TRACE_IRQS_OFF
+ lg %r8,__TI_task(%r12)
mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
- oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+ oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
TRACE_IRQS_ON
- lmg %r2,%r6,SP_R2(%r15) # load svc arguments
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
+ lmg %r2,%r6,SP_R2(%r15) # load svc arguments
j sysc_do_svc
#
@@ -557,8 +577,8 @@ pgm_svcper:
kernel_per:
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
la %r2,SP_PTREGS(%r15) # address of register-save area
- larl %r14,sysc_restore # load adr. of system ret, no work
- jg do_single_step # branch to do_single_step
+ brasl %r14,do_single_step
+ j pgm_exit
/*
* IO interrupt handler routine
@@ -567,162 +587,133 @@ kernel_per:
io_int_handler:
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- SAVE_ALL_BASE __LC_SAVE_AREA+32
- SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
- CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
+ SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
+ CREATE_STACK_FRAME __LC_SAVE_AREA+40
+ mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz io_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK
io_no_vtime:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
io_return:
- tm __TI_flags+7(%r9),_TIF_WORK_INT
+ LOCKDEP_SYS_EXIT
+ TRACE_IRQS_ON
+io_tif:
+ tm __TI_flags+7(%r12),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.)
io_restore:
-#ifdef CONFIG_TRACE_IRQFLAGS
- larl %r1,io_restore_trace_psw
- lpswe 0(%r1)
-io_restore_trace:
- TRACE_IRQS_CHECK
- LOCKDEP_SYS_EXIT
-#endif
-io_leave:
RESTORE_ALL __LC_RETURN_PSW,0
io_done:
-#ifdef CONFIG_TRACE_IRQFLAGS
- .section .data,"aw",@progbits
- .align 8
- .globl io_restore_trace_psw
-io_restore_trace_psw:
- .quad 0, io_restore_trace
- .previous
-#endif
-
#
-# There is work todo, we need to check if we return to userspace, then
-# check, if we are in SIE, if yes leave it
+# There is work todo, find out in which context we have been interrupted:
+# 1) if we return to user space we can do all _TIF_WORK_INT work
+# 2) if we return to kernel code and kvm is enabled check if we need to
+# modify the psw to leave SIE
+# 3) if we return to kernel code and preemptive scheduling is enabled check
+# the preemption counter and if it is zero call preempt_schedule_irq
+# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
-#ifndef CONFIG_PREEMPT
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- jnz io_work_user # yes -> no need to check for SIE
- la %r1, BASED(sie_opcode) # we return to kernel here
- lg %r2, SP_PSW+8(%r15)
- clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
- jne io_restore # no-> return to kernel
- lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
- aghi %r1, 4
- stg %r1, SP_PSW+8(%r15)
- j io_restore # return to kernel
-#else
- jno io_restore # no-> skip resched & signal
-#endif
-#else
- jnz io_work_user # yes -> do resched & signal
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- la %r1, BASED(sie_opcode)
- lg %r2, SP_PSW+8(%r15)
- clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
- jne 0f # no -> leave PSW alone
- lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
- aghi %r1, 4
- stg %r1, SP_PSW+8(%r15)
-0:
-#endif
+ jo io_work_user # yes -> do resched & signal
+#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
- icm %r0,15,__TI_precount(%r9)
+ icm %r0,15,__TI_precount(%r12)
jnz io_restore # preemption is disabled
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
+ jno io_restore
# switch to kernel stack
lg %r1,SP_R15(%r15)
aghi %r1,-SP_SIZE
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1
-io_resume_loop:
- tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
- jno io_restore
- larl %r14,io_resume_loop
- jg preempt_schedule_irq
+ # TRACE_IRQS_ON already done at io_return, call
+ # TRACE_IRQS_OFF to keep things symmetrical
+ TRACE_IRQS_OFF
+ brasl %r14,preempt_schedule_irq
+ j io_return
+#else
+ j io_restore
#endif
+#
+# Need to do work before returning to userspace, switch to kernel stack
+#
io_work_user:
lg %r1,__LC_KERNEL_STACK
aghi %r1,-SP_SIZE
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1
+
#
# One of the work bits is on. Find out which one.
-# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED
+# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING
#
-io_work_loop:
- tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
+io_work_tif:
+ tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
jo io_mcck_pending
- tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo io_reschedule
- tm __TI_flags+7(%r9),_TIF_SIGPENDING
- jnz io_sigpending
- tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
- jnz io_notify_resume
- j io_restore
-io_work_done:
-
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
-sie_opcode:
- .long 0xb2140000
-#endif
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
+ jo io_sigpending
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
+ jo io_notify_resume
+ j io_return # beware of critical section cleanup
#
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
+ # TRACE_IRQS_ON already done at io_return
brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
- j io_work_loop
+ TRACE_IRQS_OFF
+ j io_return
#
# _TIF_NEED_RESCHED is set, call schedule
#
io_reschedule:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
brasl %r14,schedule # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- tm __TI_flags+7(%r9),_TIF_WORK_INT
- jz io_restore # there is no work to do
- j io_work_loop
+ j io_return
#
# _TIF_SIGPENDING or is set, call do_signal
#
io_sigpending:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_signal # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_work_loop
+ j io_return
#
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
io_notify_resume:
- TRACE_IRQS_ON
+ # TRACE_IRQS_ON already done at io_return
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,do_notify_resume # call do_notify_resume
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- j io_work_loop
+ j io_return
/*
* External interrupt handler routine
@@ -731,16 +722,18 @@ io_notify_resume:
ext_int_handler:
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- SAVE_ALL_BASE __LC_SAVE_AREA+32
- SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
- CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
+ SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
+ CREATE_STACK_FRAME __LC_SAVE_AREA+40
+ mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz ext_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK
ext_no_vtime:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
llgh %r3,__LC_EXT_INT_CODE # get interruption code
@@ -754,17 +747,18 @@ __critical_end:
*/
.globl mcck_int_handler
mcck_int_handler:
- stck __LC_INT_CLOCK
+ stck __LC_MCCK_CLOCK
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
- SAVE_ALL_BASE __LC_SAVE_AREA+64
+ stmg %r11,%r15,__LC_SAVE_AREA+80
+ larl %r13,system_call
+ lg %r11,__LC_LAST_BREAK
la %r12,__LC_MCK_OLD_PSW
tm __LC_MCCK_CODE,0x80 # system damage?
jo mcck_int_main # yes -> rest of mcck code invalid
la %r14,4095
- mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
+ mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
jo 1f
la %r14,__LC_SYNC_ENTER_TIMER
@@ -778,7 +772,7 @@ mcck_int_handler:
jl 0f
la %r14,__LC_LAST_UPDATE_TIMER
0: spt 0(%r14)
- mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
+ mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno mcck_int_main # no -> skip cleanup critical
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -794,16 +788,19 @@ mcck_int_main:
srag %r14,%r14,PAGE_SHIFT
jz 0f
lg %r15,__LC_PANIC_STACK # load panic stack
-0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
+0: aghi %r15,-SP_SIZE # make room for registers & psw
+ CREATE_STACK_FRAME __LC_SAVE_AREA+80
+ mvc SP_PSW(16,%r15),0(%r12)
+ lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz mcck_no_vtime
- UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
+ UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
+ LAST_BREAK
mcck_no_vtime:
- lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
la %r2,SP_PTREGS(%r15) # load pt_regs
brasl %r14,s390_do_machine_check
tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -814,8 +811,9 @@ mcck_no_vtime:
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lgr %r15,%r1
stosm __SF_EMPTY(%r15),0x04 # turn dat on
- tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
+ tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
jno mcck_return
+ HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
@@ -823,11 +821,11 @@ mcck_return:
mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
- mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
stpt __LC_EXIT_TIMER
0: lpswe __LC_RETURN_MCCK_PSW # back to caller
+mcck_done:
/*
* Restart interruption handler, kick starter for additional CPUs
@@ -883,14 +881,14 @@ stack_overflow:
lg %r15,__LC_PANIC_STACK # change to panic stack
aghi %r15,-SP_SIZE
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
- stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
+ stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
la %r1,__LC_SAVE_AREA
chi %r12,__LC_SVC_OLD_PSW
je 0f
chi %r12,__LC_PGM_OLD_PSW
je 0f
- la %r1,__LC_SAVE_AREA+32
-0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
+ la %r1,__LC_SAVE_AREA+40
+0: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -899,18 +897,14 @@ stack_overflow:
cleanup_table_system_call:
.quad system_call, sysc_do_svc
-cleanup_table_sysc_return:
- .quad sysc_return, sysc_leave
-cleanup_table_sysc_leave:
- .quad sysc_leave, sysc_done
-cleanup_table_sysc_work_loop:
- .quad sysc_work_loop, sysc_work_done
-cleanup_table_io_return:
- .quad io_return, io_leave
-cleanup_table_io_leave:
- .quad io_leave, io_done
-cleanup_table_io_work_loop:
- .quad io_work_loop, io_work_done
+cleanup_table_sysc_tif:
+ .quad sysc_tif, sysc_restore
+cleanup_table_sysc_restore:
+ .quad sysc_restore, sysc_done
+cleanup_table_io_tif:
+ .quad io_tif, io_restore
+cleanup_table_io_restore:
+ .quad io_restore, io_done
cleanup_critical:
clc 8(8,%r12),BASED(cleanup_table_system_call)
@@ -918,61 +912,54 @@ cleanup_critical:
clc 8(8,%r12),BASED(cleanup_table_system_call+8)
jl cleanup_system_call
0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_return)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
- jl cleanup_sysc_return
-0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
+ clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
- jl cleanup_sysc_leave
+ clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
+ jl cleanup_sysc_tif
0:
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
+ clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
- jl cleanup_sysc_return
+ clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
+ jl cleanup_sysc_restore
0:
- clc 8(8,%r12),BASED(cleanup_table_io_return)
+ clc 8(8,%r12),BASED(cleanup_table_io_tif)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_return+8)
- jl cleanup_io_return
+ clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
+ jl cleanup_io_tif
0:
- clc 8(8,%r12),BASED(cleanup_table_io_leave)
+ clc 8(8,%r12),BASED(cleanup_table_io_restore)
jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
- jl cleanup_io_leave
-0:
- clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
- jl 0f
- clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
- jl cleanup_io_work_loop
+ clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
+ jl cleanup_io_restore
0:
br %r14
cleanup_system_call:
mvc __LC_RETURN_PSW(16),0(%r12)
- cghi %r12,__LC_MCK_OLD_PSW
- je 0f
- la %r12,__LC_SAVE_AREA+32
- j 1f
-0: la %r12,__LC_SAVE_AREA+64
-1:
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
jh 0f
+ mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+ cghi %r12,__LC_MCK_OLD_PSW
+ je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: cghi %r12,__LC_MCK_OLD_PSW
+ la %r12,__LC_SAVE_AREA+80
+ je 0f
+ la %r12,__LC_SAVE_AREA+40
0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
jhe cleanup_vtime
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
jh 0f
- mvc __LC_SAVE_AREA(32),0(%r12)
-0: stg %r13,8(%r12)
- stg %r12,__LC_SAVE_AREA+96 # argh
- SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
- lg %r12,__LC_SAVE_AREA+96 # argh
- stg %r15,24(%r12)
- llgh %r7,__LC_SVC_INT_CODE
+ mvc __LC_SAVE_AREA(40),0(%r12)
+0: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
+ aghi %r15,-SP_SIZE # make room for registers & psw
+ stg %r15,32(%r12)
+ stg %r11,0(%r12)
+ CREATE_STACK_FRAME __LC_SAVE_AREA
+ mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
+ mvc SP_ILC(4,%r15),__LC_SVC_ILC
+ stg %r7,SP_ARGS(%r15)
+ mvc 8(8,%r12),__LC_THREAD_INFO
cleanup_vtime:
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
jhe cleanup_stime
@@ -983,7 +970,11 @@ cleanup_stime:
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
cleanup_update:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
+ srag %r12,%r11,23
+ lg %r12,__LC_THREAD_INFO
+ jz 0f
+ stg %r11,__TI_last_break(%r12)
+0: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
la %r12,__LC_RETURN_PSW
br %r14
cleanup_system_call_insn:
@@ -993,61 +984,54 @@ cleanup_system_call_insn:
.quad sysc_stime
.quad sysc_update
-cleanup_sysc_return:
+cleanup_sysc_tif:
mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
+ mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave:
- clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
- je 3f
- clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
+cleanup_sysc_restore:
+ clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
+ je 2f
+ clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
jhe 0f
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+ cghi %r12,__LC_MCK_OLD_PSW
+ je 0f
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW
- jne 1f
- mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
- j 2f
-1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
-2: lmg %r0,%r11,SP_R0(%r15)
+ la %r12,__LC_SAVE_AREA+80
+ je 1f
+ la %r12,__LC_SAVE_AREA+40
+1: mvc 0(40,%r12),SP_R11(%r15)
+ lmg %r0,%r10,SP_R0(%r15)
lg %r15,SP_R15(%r15)
-3: la %r12,__LC_RETURN_PSW
+2: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_sysc_leave_insn:
+cleanup_sysc_restore_insn:
.quad sysc_done - 4
.quad sysc_done - 16
-cleanup_io_return:
- mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return)
- la %r12,__LC_RETURN_PSW
- br %r14
-
-cleanup_io_work_loop:
+cleanup_io_tif:
mvc __LC_RETURN_PSW(8),0(%r12)
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
+ mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_leave:
- clc 8(8,%r12),BASED(cleanup_io_leave_insn)
- je 3f
- clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
+cleanup_io_restore:
+ clc 8(8,%r12),BASED(cleanup_io_restore_insn)
+ je 1f
+ clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
jhe 0f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
- cghi %r12,__LC_MCK_OLD_PSW
- jne 1f
- mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
- j 2f
-1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
-2: lmg %r0,%r11,SP_R0(%r15)
+ mvc __LC_SAVE_AREA+80(40),SP_R11(%r15)
+ lmg %r0,%r10,SP_R0(%r15)
lg %r15,SP_R15(%r15)
-3: la %r12,__LC_RETURN_PSW
+1: la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_leave_insn:
+cleanup_io_restore_insn:
.quad io_done - 4
.quad io_done - 16
@@ -1055,13 +1039,6 @@ cleanup_io_leave_insn:
* Integer constants
*/
.align 4
-.Lconst:
-.Lnr_syscalls: .long NR_syscalls
-.L0x0130: .short 0x130
-.L0x0140: .short 0x140
-.L0x0150: .short 0x150
-.L0x0160: .short 0x160
-.L0x0170: .short 0x170
.Lcritical_start:
.quad __critical_start
.Lcritical_end:
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 9d1f76702d47..51838ad42d56 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -328,8 +328,8 @@ iplstart:
#
# reset files in VM reader
#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running VM ?
+ stidp __LC_SAVE_AREA # store cpuid
+ tm __LC_SAVE_AREA,0xff # running VM ?
bno .Lnoreset
la %r2,.Lreset
lhi %r3,26
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 1bbcc499d455..b8f8dc126102 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -82,7 +82,7 @@ startup_continue:
_ehead:
#ifdef CONFIG_SHARED_KERNEL
- .org 0x100000
+ .org 0x100000 - 0x11000 # head.o ends at 0x11000
#endif
#
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 1f70970de0aa..cdef68717416 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -80,7 +80,7 @@ startup_continue:
_ehead:
#ifdef CONFIG_SHARED_KERNEL
- .org 0x100000
+ .org 0x100000 - 0x11000 # head.o ends at 0x11000
#endif
#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 72c8b0d070c8..a689070be287 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -403,8 +403,9 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
static struct kobj_attribute sys_ipl_device_attr =
__ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
-static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
IPL_PARMBLOCK_SIZE);
@@ -419,8 +420,9 @@ static struct bin_attribute ipl_parameter_attr = {
.read = &ipl_parameter_read,
};
-static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
{
unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
@@ -694,7 +696,7 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
/* FCP reipl device attributes */
-static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
+static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
@@ -704,7 +706,7 @@ static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
-static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj,
+static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 3d34eef5a2c3..2a3d2bf6f083 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -63,6 +63,8 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
case 0x0b: /* bsm */
case 0x83: /* diag */
case 0x44: /* ex */
+ case 0xac: /* stnsm */
+ case 0xad: /* stosm */
return -EINVAL;
}
switch (*(__u16 *) instruction) {
@@ -72,6 +74,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
case 0xb258: /* bsg */
case 0xb218: /* pc */
case 0xb228: /* pt */
+ case 0xb98d: /* epsw */
return -EINVAL;
}
return 0;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 015e27da40eb..ac151399ef34 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -255,7 +255,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
int umode;
nmi_enter();
- s390_idle_check();
+ s390_idle_check(regs, S390_lowcore.mcck_clock,
+ S390_lowcore.mcck_enter_timer);
mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
mcck = &__get_cpu_var(cpu_mcck);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 0729f36c2fe3..ecb2d02b02e4 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,24 +18,42 @@
#include <asm/lowcore.h>
#include <asm/param.h>
+static DEFINE_PER_CPU(struct cpuid, cpu_id);
+
+/*
+ * cpu_init - initializes state that is per-CPU.
+ */
+void __cpuinit cpu_init(void)
+{
+ struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
+
+ get_cpu_id(id);
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+}
+
+/*
+ * print_cpu_info - print basic information about a cpu
+ */
void __cpuinit print_cpu_info(void)
{
+ struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
+
pr_info("Processor %d started, address %d, identification %06X\n",
- S390_lowcore.cpu_nr, S390_lowcore.cpu_addr,
- S390_lowcore.cpu_id.ident);
+ S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
}
/*
* show_cpuinfo - Get information on one CPU for use by procfs.
*/
-
static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[10] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs"
};
- struct _lowcore *lc;
unsigned long n = (unsigned long) v - 1;
int i;
@@ -55,19 +73,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
if (cpu_online(n)) {
-#ifdef CONFIG_SMP
- lc = (smp_processor_id() == n) ?
- &S390_lowcore : lowcore_ptr[n];
-#else
- lc = &S390_lowcore;
-#endif
+ struct cpuid *id = &per_cpu(cpu_id, n);
seq_printf(m, "processor %li: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
- n, lc->cpu_id.version,
- lc->cpu_id.ident,
- lc->cpu_id.machine);
+ n, id->version, id->ident, id->machine);
}
preempt_enable();
return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 33fdc5a79764..83339d33c4b1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -57,6 +57,7 @@
enum s390_regset {
REGSET_GENERAL,
REGSET_FP,
+ REGSET_LAST_BREAK,
REGSET_GENERAL_EXTENDED,
};
@@ -381,6 +382,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
copied += sizeof(unsigned long);
}
return 0;
+ case PTRACE_GET_LAST_BREAK:
+ put_user(task_thread_info(child)->last_break,
+ (unsigned long __user *) data);
+ return 0;
default:
/* Removing high order bit from addr (only for 31 bit). */
addr &= PSW_ADDR_INSN;
@@ -633,6 +638,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
copied += sizeof(unsigned int);
}
return 0;
+ case PTRACE_GET_LAST_BREAK:
+ put_user(task_thread_info(child)->last_break,
+ (unsigned int __user *) data);
+ return 0;
}
return compat_ptrace_request(child, request, addr, data);
}
@@ -640,7 +649,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret;
+ long ret = 0;
/* Do the secure computing check first. */
secure_computing(regs->gprs[2]);
@@ -649,7 +658,6 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
* The sysc_tracesys code in entry.S stored the system
* call number to gprs[2].
*/
- ret = regs->gprs[2];
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
(tracehook_report_syscall_entry(regs) ||
regs->gprs[2] >= NR_syscalls)) {
@@ -671,7 +679,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
regs->gprs[2], regs->orig_gpr2,
regs->gprs[3], regs->gprs[4],
regs->gprs[5]);
- return ret;
+ return ret ?: regs->gprs[2];
}
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
@@ -798,6 +806,28 @@ static int s390_fpregs_set(struct task_struct *target,
return rc;
}
+#ifdef CONFIG_64BIT
+
+static int s390_last_break_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ if (count > 0) {
+ if (kbuf) {
+ unsigned long *k = kbuf;
+ *k = task_thread_info(target)->last_break;
+ } else {
+ unsigned long __user *u = ubuf;
+ if (__put_user(task_thread_info(target)->last_break, u))
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+#endif
+
static const struct user_regset s390_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
@@ -815,6 +845,15 @@ static const struct user_regset s390_regsets[] = {
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
+#ifdef CONFIG_64BIT
+ [REGSET_LAST_BREAK] = {
+ .core_note_type = NT_S390_LAST_BREAK,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = s390_last_break_get,
+ },
+#endif
};
static const struct user_regset_view user_s390_view = {
@@ -949,6 +988,27 @@ static int s390_compat_regs_high_set(struct task_struct *target,
return rc;
}
+static int s390_compat_last_break_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ compat_ulong_t last_break;
+
+ if (count > 0) {
+ last_break = task_thread_info(target)->last_break;
+ if (kbuf) {
+ unsigned long *k = kbuf;
+ *k = last_break;
+ } else {
+ unsigned long __user *u = ubuf;
+ if (__put_user(last_break, u))
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
@@ -966,6 +1026,13 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
+ [REGSET_LAST_BREAK] = {
+ .core_note_type = NT_S390_LAST_BREAK,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = s390_compat_last_break_get,
+ },
[REGSET_GENERAL_EXTENDED] = {
.core_note_type = NT_S390_HIGH_GPRS,
.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 59618bcd99b7..9ce641b5291f 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -120,7 +120,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
- s390_idle_check();
+ s390_idle_check(regs, S390_lowcore.int_clock,
+ S390_lowcore.async_enter_timer);
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 91625f759ccd..c8e8e1354e1d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -2,7 +2,7 @@
* arch/s390/kernel/setup.c
*
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright (C) IBM Corp. 1999,2010
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
@@ -113,22 +113,6 @@ static struct resource data_resource = {
};
/*
- * cpu_init() initializes state that is per-CPU.
- */
-void __cpuinit cpu_init(void)
-{
- /*
- * Store processor id in lowcore (used e.g. in timer_interrupt)
- */
- get_cpu_id(&S390_lowcore.cpu_id);
-
- atomic_inc(&init_mm.mm_count);
- current->active_mm = &init_mm;
- BUG_ON(current->mm);
- enter_lazy_tlb(&init_mm, current);
-}
-
-/*
* condev= and conmode= setup parameter.
*/
@@ -385,10 +369,6 @@ static void setup_addressing_mode(void)
pr_info("Address spaces switched, "
"mvcos not available\n");
}
-#ifdef CONFIG_TRACE_IRQFLAGS
- sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
- io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
-#endif
}
static void __init
@@ -437,6 +417,7 @@ setup_lowcore(void)
__ctl_set_bit(14, 29);
}
#else
+ lc->cmf_hpp = -1ULL;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -695,6 +676,7 @@ static void __init setup_hwcaps(void)
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
unsigned long long facility_list_extended;
unsigned int facility_list;
+ struct cpuid cpu_id;
int i;
facility_list = stfl();
@@ -756,7 +738,8 @@ static void __init setup_hwcaps(void)
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
- switch (S390_lowcore.cpu_id.machine) {
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
case 0x9672:
#if !defined(CONFIG_64BIT)
default: /* Use "g5" as default for 31 bit kernels. */
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6289945562b0..ee7ac8b11782 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -313,6 +313,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
To avoid breaking binary compatibility, they are passed as args. */
regs->gprs[4] = current->thread.trap_no;
regs->gprs[5] = current->thread.prot_addr;
+ regs->gprs[6] = task_thread_info(current)->last_break;
/* Place signal number on stack to allow backtrace from handler. */
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
@@ -376,6 +377,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
+ regs->gprs[5] = task_thread_info(current)->last_break;
return 0;
give_sigsegv:
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e4d98de83dd8..541053ed234e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -944,21 +944,21 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
struct s390_idle_data *idle;
+ int err = 0;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
idle = &per_cpu(s390_idle, cpu);
memset(idle, 0, sizeof(struct s390_idle_data));
- if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
- return NOTIFY_BAD;
+ err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
break;
}
- return NOTIFY_OK;
+ return notifier_from_errno(err);
}
static struct notifier_block __cpuinitdata smp_cpu_nb = {
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d906bf19c14a..a2163c95eb98 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -391,7 +391,6 @@ static void __init time_init_wq(void)
if (time_sync_wq)
return;
time_sync_wq = create_singlethread_workqueue("timesync");
- stop_machine_create();
}
/*
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 247b4c2d1e51..bcef00766a64 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -37,7 +37,8 @@ struct tl_cpu {
};
struct tl_container {
- unsigned char reserved[8];
+ unsigned char reserved[7];
+ unsigned char id;
};
union tl_entry {
@@ -58,6 +59,7 @@ struct tl_info {
struct core_info {
struct core_info *next;
+ unsigned char id;
cpumask_t mask;
};
@@ -73,6 +75,7 @@ static DECLARE_WORK(topology_work, topology_work_fn);
static DEFINE_SPINLOCK(topology_lock);
cpumask_t cpu_core_map[NR_CPUS];
+unsigned char cpu_core_id[NR_CPUS];
static cpumask_t cpu_coregroup_map(unsigned int cpu)
{
@@ -116,6 +119,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
for_each_present_cpu(lcpu) {
if (cpu_logical_map(lcpu) == rcpu) {
cpu_set(lcpu, core->mask);
+ cpu_core_id[lcpu] = core->id;
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
}
@@ -158,6 +162,7 @@ static void tl_to_cores(struct tl_info *info)
break;
case 1:
core = core->next;
+ core->id = tle->container.id;
break;
case 0:
add_cpus_to_core(&tle->cpu, core);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 6e7ad63854c0..5d8f0f3d0250 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -46,13 +46,7 @@
pgm_check_handler_t *pgm_check_table[128];
-#ifdef CONFIG_SYSCTL
-#ifdef CONFIG_PROCESS_DEBUG
-int sysctl_userprocess_debug = 1;
-#else
-int sysctl_userprocess_debug = 0;
-#endif
-#endif
+int show_unhandled_signals;
extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_dat_exception;
@@ -315,18 +309,19 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(SIGSEGV);
}
-static void inline
-report_user_fault(long interruption_code, struct pt_regs *regs)
+static void inline report_user_fault(struct pt_regs *regs, long int_code,
+ int signr)
{
-#if defined(CONFIG_SYSCTL)
- if (!sysctl_userprocess_debug)
+ if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
-#endif
-#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
- printk("User process fault: interruption code 0x%lX\n",
- interruption_code);
+ if (!unhandled_signal(current, signr))
+ return;
+ if (!printk_ratelimit())
+ return;
+ printk("User process fault: interruption code 0x%lX ", int_code);
+ print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
+ printk("\n");
show_regs(regs);
-#endif
}
int is_valid_bugaddr(unsigned long addr)
@@ -354,7 +349,7 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
tsk->thread.trap_no = interruption_code & 0xffff;
force_sig_info(signr, info, tsk);
- report_user_fault(interruption_code, regs);
+ report_user_fault(regs, interruption_code, signr);
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -390,8 +385,8 @@ static void default_trap_handler(struct pt_regs * regs, long interruption_code)
{
if (regs->psw.mask & PSW_MASK_PSTATE) {
local_irq_enable();
+ report_user_fault(regs, interruption_code, SIGSEGV);
do_exit(SIGSEGV);
- report_user_fault(interruption_code, regs);
} else
die("Unknown program exception", regs, interruption_code);
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 6bc9c197aa91..6b83870507d5 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -102,11 +102,7 @@ static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
/*
* Allocate/free per cpu vdso data.
*/
-#ifdef CONFIG_64BIT
#define SEGMENT_ORDER 2
-#else
-#define SEGMENT_ORDER 1
-#endif
int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
{
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b59a812a010e..3479f1b0d4e0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -121,32 +121,35 @@ void account_system_vtime(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(account_system_vtime);
-void vtime_start_cpu(void)
+void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
__u64 idle_time, expires;
+ if (idle->idle_enter == 0ULL)
+ return;
+
/* Account time spent with enabled wait psw loaded as idle time. */
- idle_time = S390_lowcore.int_clock - idle->idle_enter;
+ idle_time = int_clock - idle->idle_enter;
account_idle_time(idle_time);
S390_lowcore.steal_timer +=
idle->idle_enter - S390_lowcore.last_update_clock;
- S390_lowcore.last_update_clock = S390_lowcore.int_clock;
+ S390_lowcore.last_update_clock = int_clock;
/* Account system time spent going idle. */
S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
- S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer;
+ S390_lowcore.last_update_timer = enter_timer;
/* Restart vtime CPU timer */
if (vq->do_spt) {
/* Program old expire value but first save progress. */
- expires = vq->idle - S390_lowcore.async_enter_timer;
+ expires = vq->idle - enter_timer;
expires += get_vtimer();
set_vtimer(expires);
} else {
/* Don't account the CPU timer delta while the cpu was idle. */
- vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
+ vq->elapsed -= vq->idle - enter_timer;
}
idle->sequence++;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 49292869a5cd..8093e6f47f49 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -341,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
- goto out_free_cpu;
+ goto out_free_sie_block;
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block);
return vcpu;
+out_free_sie_block:
+ free_page((unsigned long)(vcpu->arch.sie_block));
out_free_cpu:
kfree(vcpu);
out_nomem:
@@ -750,7 +752,7 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
static int __init kvm_s390_init(void)
{
int ret;
- ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (ret)
return ret;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 60f09ab3672c..cfa9d1777457 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -72,7 +72,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
struct kvm_memslots *memslots;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- memslots = rcu_dereference(vcpu->kvm->memslots);
+ memslots = kvm_memslots(vcpu->kvm);
mem = &memslots->memslots[0];
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index 934fd6a885f6..7e9d30d567b0 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -1,20 +1,58 @@
/*
* sie64a.S - low level sie call
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2010
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
*/
#include <linux/errno.h>
#include <asm/asm-offsets.h>
+#include <asm/setup.h>
+#include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+
+_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
+
+/*
+ * offsets into stackframe
+ * SP_ = offsets into stack sie64 is called with
+ * SPI_ = offsets into irq stack
+ */
+SP_GREGS = __SF_EMPTY
+SP_HOOK = __SF_EMPTY+8
+SP_GPP = __SF_EMPTY+16
+SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
+
-SP_R5 = 5 * 8 # offset into stackframe
-SP_R6 = 6 * 8
+ .macro SPP newpp
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
+ jz 0f
+ .insn s,0xb2800000,\newpp
+0:
+ .endm
+
+sie_irq_handler:
+ SPP __LC_CMF_HPP # set host id
+ larl %r2,sie_inst
+ clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
+ jne 1f
+ xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
+ lg %r2,__LC_THREAD_INFO # pointer thread_info struct
+ tm __TI_flags+7(%r2),_TIF_EXIT_SIE
+ jz 0f
+ larl %r2,sie_exit # work pending, leave sie
+ stg %r2,__LC_RETURN_PSW+8
+ br %r14
+0: larl %r2,sie_reenter # re-enter with guest id
+ stg %r2,__LC_RETURN_PSW+8
+1: br %r14
/*
* sie64a calling convention:
@@ -23,23 +61,34 @@ SP_R6 = 6 * 8
*/
.globl sie64a
sie64a:
- lgr %r5,%r3
- stmg %r5,%r14,SP_R5(%r15) # save register on entry
- lgr %r14,%r2 # pointer to sie control block
- lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+ stg %r3,SP_GREGS(%r15) # save guest register save area
+ stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
+ lgr %r14,%r2 # pointer to sie control block
+ larl %r5,sie_irq_handler
+ stg %r2,SP_GPP(%r15)
+ stg %r5,SP_HOOK(%r15) # save hook target
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+sie_reenter:
+ mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
+ SPP SP_GPP(%r15) # set guest id
sie_inst:
sie 0(%r14)
- lg %r14,SP_R5(%r15)
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
+ SPP __LC_CMF_HPP # set host id
+sie_exit:
+ lg %r14,SP_GREGS(%r15)
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
lghi %r2,0
- lmg %r6,%r14,SP_R6(%r15)
+ lmg %r6,%r14,__SF_GPRS(%r15)
br %r14
sie_err:
- lg %r14,SP_R5(%r15)
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
+ SPP __LC_CMF_HPP # set host id
+ lg %r14,SP_GREGS(%r15)
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
lghi %r2,-EFAULT
- lmg %r6,%r14,SP_R6(%r15)
+ lmg %r6,%r14,__SF_GPRS(%r15)
br %r14
.section __ex_table,"a"
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index f87b34731e1d..eb6a2ef5f82e 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,11 +1,9 @@
/*
- * arch/s390/mm/cmm.c
+ * Collaborative memory management interface.
*
- * S390 version
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Copyright IBM Corp 2003,2010
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*
- * Collaborative memory management interface.
*/
#include <linux/errno.h>
@@ -20,9 +18,9 @@
#include <linux/kthread.h>
#include <linux/oom.h>
#include <linux/suspend.h>
+#include <linux/uaccess.h>
#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
#include <asm/diag.h>
static char *sender = "VMRMSVM";
@@ -53,14 +51,14 @@ static struct cmm_page_array *cmm_timed_page_list;
static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
-static wait_queue_head_t cmm_thread_wait;
-static struct timer_list cmm_timer;
+static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
+static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
static void cmm_timer_fn(unsigned long);
static void cmm_set_timer(void);
-static long
-cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
+static long cmm_alloc_pages(long nr, long *counter,
+ struct cmm_page_array **list)
{
struct cmm_page_array *pa, *npa;
unsigned long addr;
@@ -99,8 +97,7 @@ cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
-static long
-cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@@ -140,11 +137,10 @@ static int cmm_oom_notify(struct notifier_block *self,
}
static struct notifier_block cmm_oom_nb = {
- .notifier_call = cmm_oom_notify
+ .notifier_call = cmm_oom_notify,
};
-static int
-cmm_thread(void *dummy)
+static int cmm_thread(void *dummy)
{
int rc;
@@ -170,7 +166,7 @@ cmm_thread(void *dummy)
cmm_timed_pages_target = cmm_timed_pages;
} else if (cmm_timed_pages_target < cmm_timed_pages) {
cmm_free_pages(1, &cmm_timed_pages,
- &cmm_timed_page_list);
+ &cmm_timed_page_list);
}
if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
cmm_set_timer();
@@ -178,14 +174,12 @@ cmm_thread(void *dummy)
return 0;
}
-static void
-cmm_kick_thread(void)
+static void cmm_kick_thread(void)
{
wake_up(&cmm_thread_wait);
}
-static void
-cmm_set_timer(void)
+static void cmm_set_timer(void)
{
if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
if (timer_pending(&cmm_timer))
@@ -202,8 +196,7 @@ cmm_set_timer(void)
add_timer(&cmm_timer);
}
-static void
-cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(unsigned long ignored)
{
long nr;
@@ -216,57 +209,49 @@ cmm_timer_fn(unsigned long ignored)
cmm_set_timer();
}
-void
-cmm_set_pages(long nr)
+static void cmm_set_pages(long nr)
{
cmm_pages_target = nr;
cmm_kick_thread();
}
-long
-cmm_get_pages(void)
+static long cmm_get_pages(void)
{
return cmm_pages;
}
-void
-cmm_add_timed_pages(long nr)
+static void cmm_add_timed_pages(long nr)
{
cmm_timed_pages_target += nr;
cmm_kick_thread();
}
-long
-cmm_get_timed_pages(void)
+static long cmm_get_timed_pages(void)
{
return cmm_timed_pages;
}
-void
-cmm_set_timeout(long nr, long seconds)
+static void cmm_set_timeout(long nr, long seconds)
{
cmm_timeout_pages = nr;
cmm_timeout_seconds = seconds;
cmm_set_timer();
}
-static int
-cmm_skip_blanks(char *cp, char **endp)
+static int cmm_skip_blanks(char *cp, char **endp)
{
char *str;
- for (str = cp; *str == ' ' || *str == '\t'; str++);
+ for (str = cp; *str == ' ' || *str == '\t'; str++)
+ ;
*endp = str;
return str != cp;
}
-#ifdef CONFIG_CMM_PROC
-
static struct ctl_table cmm_table[];
-static int
-cmm_pages_handler(ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
char buf[16], *p;
long nr;
@@ -305,9 +290,8 @@ cmm_pages_handler(ctl_table *ctl, int write,
return 0;
}
-static int
-cmm_timeout_handler(ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
@@ -370,12 +354,10 @@ static struct ctl_table cmm_dir_table[] = {
},
{ }
};
-#endif
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
-static void
-cmm_smsg_target(const char *from, char *msg)
+static void cmm_smsg_target(const char *from, char *msg)
{
long nr, seconds;
@@ -445,16 +427,13 @@ static struct notifier_block cmm_power_notifier = {
.notifier_call = cmm_power_event,
};
-static int
-cmm_init (void)
+static int cmm_init(void)
{
int rc = -ENOMEM;
-#ifdef CONFIG_CMM_PROC
cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
if (!cmm_sysctl_header)
goto out_sysctl;
-#endif
#ifdef CONFIG_CMM_IUCV
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
if (rc < 0)
@@ -466,8 +445,6 @@ cmm_init (void)
rc = register_pm_notifier(&cmm_power_notifier);
if (rc)
goto out_pm;
- init_waitqueue_head(&cmm_thread_wait);
- init_timer(&cmm_timer);
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
if (rc)
@@ -483,36 +460,26 @@ out_oom_notify:
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
out_smsg:
#endif
-#ifdef CONFIG_CMM_PROC
unregister_sysctl_table(cmm_sysctl_header);
out_sysctl:
-#endif
+ del_timer_sync(&cmm_timer);
return rc;
}
+module_init(cmm_init);
-static void
-cmm_exit(void)
+static void cmm_exit(void)
{
- kthread_stop(cmm_thread_ptr);
- unregister_pm_notifier(&cmm_power_notifier);
- unregister_oom_notifier(&cmm_oom_nb);
- cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
- cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
-#ifdef CONFIG_CMM_PROC
unregister_sysctl_table(cmm_sysctl_header);
-#endif
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
+ unregister_pm_notifier(&cmm_power_notifier);
+ unregister_oom_notifier(&cmm_oom_nb);
+ kthread_stop(cmm_thread_ptr);
+ del_timer_sync(&cmm_timer);
+ cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
+ cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
}
-
-module_init(cmm_init);
module_exit(cmm_exit);
-EXPORT_SYMBOL(cmm_set_pages);
-EXPORT_SYMBOL(cmm_get_pages);
-EXPORT_SYMBOL(cmm_add_timed_pages);
-EXPORT_SYMBOL(cmm_get_timed_pages);
-EXPORT_SYMBOL(cmm_set_timeout);
-
MODULE_LICENSE("GPL");
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3040d7c78fe0..2505b2ea0ef1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -48,10 +48,6 @@
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
-#ifdef CONFIG_SYSCTL
-extern int sysctl_userprocess_debug;
-#endif
-
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
@@ -120,6 +116,22 @@ static inline int user_space_fault(unsigned long trans_exc_code)
return trans_exc_code != 3;
}
+static inline void report_user_fault(struct pt_regs *regs, long int_code,
+ int signr, unsigned long address)
+{
+ if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
+ return;
+ if (!unhandled_signal(current, signr))
+ return;
+ if (!printk_ratelimit())
+ return;
+ printk("User process fault: interruption code 0x%lX ", int_code);
+ print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
+ printk("\n");
+ printk("failing address: %lX\n", address);
+ show_regs(regs);
+}
+
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
@@ -133,17 +145,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
address = trans_exc_code & __FAIL_ADDR_MASK;
current->thread.prot_addr = address;
current->thread.trap_no = int_code;
-#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
-#if defined(CONFIG_SYSCTL)
- if (sysctl_userprocess_debug)
-#endif
- {
- printk("User process fault: interruption code 0x%lX\n",
- int_code);
- printk("failing address: %lX\n", address);
- show_regs(regs);
- }
-#endif
+ report_user_fault(regs, int_code, SIGSEGV, address);
si.si_signo = SIGSEGV;
si.si_code = si_code;
si.si_addr = (void __user *) address;
diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h
index 9f533b8362c7..4fa1a6658215 100644
--- a/arch/score/include/asm/scatterlist.h
+++ b/arch/score/include/asm/scatterlist.h
@@ -1,6 +1,8 @@
#ifndef _ASM_SCORE_SCATTERLIST_H
#define _ASM_SCORE_SCATTERLIST_H
+#define ISA_DMA_THRESHOLD (~0UL)
+
#include <asm-generic/scatterlist.h>
#endif /* _ASM_SCORE_SCATTERLIST_H */
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index 55939992c27d..8570d08f58c1 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -92,7 +92,7 @@ register struct thread_info *__current_thread_info __asm__("r28");
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
-#define TIF_MEMDIE 18
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8d90564c2bcf..c5ee4ce60b57 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -44,6 +44,7 @@ config SUPERH32
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_ARCH_KGDB
select HAVE_HW_BREAKPOINT
+ select HAVE_MIXED_BREAKPOINTS_REGS
select PERF_EVENTS if HAVE_HW_BREAKPOINT
select ARCH_HIBERNATION_POSSIBLE if MMU
@@ -157,7 +158,6 @@ config LOCKDEP_SUPPORT
config HAVE_LATENCYTOP_SUPPORT
def_bool y
- depends on !SMP
config ARCH_HAS_ILOG2_U32
def_bool n
@@ -186,6 +186,9 @@ config DMA_NONCOHERENT
config NEED_DMA_MAP_STATE
def_bool DMA_NONCOHERENT
+config NEED_SG_DMA_LENGTH
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -237,6 +240,8 @@ config CPU_SHX2
config CPU_SHX3
bool
select DMA_COHERENT
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_NUMA
config ARCH_SHMOBILE
bool
@@ -453,18 +458,12 @@ config CPU_SUBTYPE_SH7786
select CPU_SH4A
select CPU_SHX3
select CPU_HAS_PTEAEX
- select ARCH_SPARSEMEM_ENABLE
- select SYS_SUPPORTS_NUMA
- select SYS_SUPPORTS_SMP
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
config CPU_SUBTYPE_SHX3
bool "Support SH-X3 processor"
select CPU_SH4A
select CPU_SHX3
- select ARCH_SPARSEMEM_ENABLE
- select SYS_SUPPORTS_NUMA
- select SYS_SUPPORTS_SMP
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
# SH4AL-DSP Processor Support
@@ -634,7 +633,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
- depends on SUPERH32 && EXPERIMENTAL
+ depends on SUPERH32 && EXPERIMENTAL && BROKEN_ON_SMP
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
@@ -706,6 +705,13 @@ config NR_CPUS
This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image.
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
+ depends on SMP && HOTPLUG && EXPERIMENTAL
+ help
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
+
source "kernel/Kconfig.preempt"
config GUSA
@@ -732,6 +738,8 @@ config GUSA_RB
LLSC, this should be more efficient than the other alternative of
disabling interrupts around the atomic sequence.
+source "drivers/sh/Kconfig"
+
endmenu
menu "Boot options"
@@ -863,4 +871,20 @@ source "security/Kconfig"
source "crypto/Kconfig"
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ default n
+ ---help---
+ Say Y here to get to see options for using your Linux host to run other
+ operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
+
source "lib/Kconfig"
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 588579ac2e35..307b3a4a790b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -83,7 +83,6 @@ defaultimage-$(CONFIG_SH_AP325RXA) := uImage
defaultimage-$(CONFIG_SH_7724_SOLUTION_ENGINE) := uImage
defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux
defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux
-defaultimage-$(CONFIG_SH_SDK7786) := vmlinux.bin
# Set some sensible Kbuild defaults
KBUILD_IMAGE := $(defaultimage-y)
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index a9bd6e3ee10b..d81c609decc7 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -24,6 +24,7 @@
#include <cpu/sh7786.h>
#include <asm/heartbeat.h>
#include <asm/sizes.h>
+#include <asm/smp-ops.h>
/*
* bit 1234 5678
@@ -203,6 +204,8 @@ static void __init urquell_setup(char **cmdline_p)
printk(KERN_INFO "Renesas Technology Corp. Urquell support.\n");
pm_power_off = urquell_power_off;
+
+ register_smp_ops(&shx3_smp_ops);
}
/*
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 6c13b92742e8..49714258732e 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
@@ -442,7 +443,9 @@ static void sdhi0_set_pwr(struct platform_device *pdev, int state)
}
static struct sh_mobile_sdhi_info sdhi0_info = {
- .set_pwr = sdhi0_set_pwr,
+ .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
+ .set_pwr = sdhi0_set_pwr,
};
static struct resource sdhi0_resources[] = {
@@ -478,7 +481,9 @@ static void sdhi1_set_pwr(struct platform_device *pdev, int state)
}
static struct sh_mobile_sdhi_info sdhi1_info = {
- .set_pwr = sdhi1_set_pwr,
+ .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
+ .set_pwr = sdhi1_set_pwr,
};
static struct resource sdhi1_resources[] = {
@@ -710,8 +715,6 @@ static struct clk_ops fsimck_clk_ops = {
};
static struct clk fsimckb_clk = {
- .name = "fsimckb_clk",
- .id = -1,
.ops = &fsimck_clk_ops,
.enable_reg = (void __iomem *)FCLKBCR,
.rate = 0, /* unknown */
@@ -771,6 +774,51 @@ static struct platform_device irda_device = {
.resource = irda_resources,
};
+#include <media/ak881x.h>
+#include <media/sh_vou.h>
+
+struct ak881x_pdata ak881x_pdata = {
+ .flags = AK881X_IF_MODE_SLAVE,
+};
+
+static struct i2c_board_info ak8813 = {
+ I2C_BOARD_INFO("ak8813", 0x20),
+ .platform_data = &ak881x_pdata,
+};
+
+struct sh_vou_pdata sh_vou_pdata = {
+ .bus_fmt = SH_VOU_BUS_8BIT,
+ .flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
+ .board_info = &ak8813,
+ .i2c_adap = 0,
+ .module_name = "ak881x",
+};
+
+static struct resource sh_vou_resources[] = {
+ [0] = {
+ .start = 0xfe960000,
+ .end = 0xfe962043,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 55,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device vou_device = {
+ .name = "sh-vou",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(sh_vou_resources),
+ .resource = sh_vou_resources,
+ .dev = {
+ .platform_data = &sh_vou_pdata,
+ },
+ .archdata = {
+ .hwblk_id = HWBLK_VOU,
+ },
+};
+
static struct platform_device *ecovec_devices[] __initdata = {
&heartbeat_device,
&nor_flash_device,
@@ -792,6 +840,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
&camera_devices[2],
&fsi_device,
&irda_device,
+ &vou_device,
};
#ifdef CONFIG_I2C
@@ -1138,16 +1187,20 @@ static int __init arch_setup(void)
/* set SPU2 clock to 83.4 MHz */
clk = clk_get(NULL, "spu_clk");
- clk_set_rate(clk, clk_round_rate(clk, 83333333));
- clk_put(clk);
+ if (clk) {
+ clk_set_rate(clk, clk_round_rate(clk, 83333333));
+ clk_put(clk);
+ }
/* change parent of FSI B */
clk = clk_get(NULL, "fsib_clk");
- clk_register(&fsimckb_clk);
- clk_set_parent(clk, &fsimckb_clk);
- clk_set_rate(clk, 11000);
- clk_set_rate(&fsimckb_clk, 11000);
- clk_put(clk);
+ if (clk) {
+ clk_register(&fsimckb_clk);
+ clk_set_parent(clk, &fsimckb_clk);
+ clk_set_rate(clk, 11000);
+ clk_set_rate(&fsimckb_clk, 11000);
+ clk_put(clk);
+ }
gpio_request(GPIO_PTU0, NULL);
gpio_direction_output(GPIO_PTU0, 0);
@@ -1159,8 +1212,10 @@ static int __init arch_setup(void)
/* set VPU clock to 166 MHz */
clk = clk_get(NULL, "vpu_clk");
- clk_set_rate(clk, clk_round_rate(clk, 166000000));
- clk_put(clk);
+ if (clk) {
+ clk_set_rate(clk, clk_round_rate(clk, 166000000));
+ clk_put(clk);
+ }
/* enable IrDA */
gpio_request(GPIO_FN_IRDA_OUT, NULL);
@@ -1175,6 +1230,38 @@ static int __init arch_setup(void)
i2c_register_board_info(1, i2c1_devices,
ARRAY_SIZE(i2c1_devices));
+ /* VOU */
+ gpio_request(GPIO_FN_DV_D15, NULL);
+ gpio_request(GPIO_FN_DV_D14, NULL);
+ gpio_request(GPIO_FN_DV_D13, NULL);
+ gpio_request(GPIO_FN_DV_D12, NULL);
+ gpio_request(GPIO_FN_DV_D11, NULL);
+ gpio_request(GPIO_FN_DV_D10, NULL);
+ gpio_request(GPIO_FN_DV_D9, NULL);
+ gpio_request(GPIO_FN_DV_D8, NULL);
+ gpio_request(GPIO_FN_DV_CLKI, NULL);
+ gpio_request(GPIO_FN_DV_CLK, NULL);
+ gpio_request(GPIO_FN_DV_VSYNC, NULL);
+ gpio_request(GPIO_FN_DV_HSYNC, NULL);
+
+ /* AK8813 power / reset sequence */
+ gpio_request(GPIO_PTG4, NULL);
+ gpio_request(GPIO_PTU3, NULL);
+ /* Reset */
+ gpio_direction_output(GPIO_PTG4, 0);
+ /* Power down */
+ gpio_direction_output(GPIO_PTU3, 1);
+
+ udelay(10);
+
+ /* Power up, reset */
+ gpio_set_value(GPIO_PTU3, 0);
+
+ udelay(10);
+
+ /* Remove reset */
+ gpio_set_value(GPIO_PTG4, 1);
+
return platform_add_devices(ecovec_devices,
ARRAY_SIZE(ecovec_devices));
}
diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c
index affd66747ba3..a5ecfbacaf36 100644
--- a/arch/sh/boards/mach-highlander/setup.c
+++ b/arch/sh/boards/mach-highlander/setup.c
@@ -14,6 +14,7 @@
* for more details.
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/types.h>
@@ -26,6 +27,7 @@
#include <net/ax88796.h>
#include <asm/machvec.h>
#include <mach/highlander.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/heartbeat.h>
#include <asm/io.h>
@@ -326,7 +328,6 @@ static struct clk_ops ivdr_clk_ops = {
};
static struct clk ivdr_clk = {
- .name = "ivdr_clk",
.ops = &ivdr_clk_ops,
};
@@ -334,6 +335,13 @@ static struct clk *r7780rp_clocks[] = {
&ivdr_clk,
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("ivdr_clk", &ivdr_clk),
+};
+
static void r7780rp_power_off(void)
{
if (mach_is_r7780mp() || mach_is_r7785rp())
@@ -370,6 +378,8 @@ static void __init highlander_setup(char **cmdline_p)
clk_enable(clk);
}
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
__raw_writew(0x0000, PA_OBLED); /* Clear LED. */
if (mach_is_r7780rp())
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index b2cd0ed8664e..68994a163f6c 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -10,6 +10,8 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/onenand.h>
#include <linux/delay.h>
@@ -356,10 +358,19 @@ static struct resource kfr2r09_sh_sdhi0_resources[] = {
},
};
+static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
+ .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
+ .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
+};
+
static struct platform_device kfr2r09_sh_sdhi0_device = {
.name = "sh_mobile_sdhi",
.num_resources = ARRAY_SIZE(kfr2r09_sh_sdhi0_resources),
.resource = kfr2r09_sh_sdhi0_resources,
+ .dev = {
+ .platform_data = &sh7724_sdhi0_data,
+ },
.archdata = {
.hwblk_id = HWBLK_SDHI0,
},
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 7da0fc94a01e..87185de20446 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
@@ -402,10 +403,18 @@ static struct resource sdhi_cn9_resources[] = {
},
};
+static struct sh_mobile_sdhi_info sh7724_sdhi_data = {
+ .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
+};
+
static struct platform_device sdhi_cn9_device = {
.name = "sh_mobile_sdhi",
.num_resources = ARRAY_SIZE(sdhi_cn9_resources),
.resource = sdhi_cn9_resources,
+ .dev = {
+ .platform_data = &sh7724_sdhi_data,
+ },
.archdata = {
.hwblk_id = HWBLK_SDHI,
},
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index f094ea2ee783..2ec1ea5cf8ef 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -21,6 +21,7 @@
#include <asm/heartbeat.h>
#include <asm/sizes.h>
#include <asm/reboot.h>
+#include <asm/smp-ops.h>
static struct resource heartbeat_resource = {
.start = 0x07fff8b0,
@@ -165,6 +166,19 @@ static void sdk7786_restart(char *cmd)
fpga_write_reg(0xa5a5, SRSTR);
}
+static void sdk7786_power_off(void)
+{
+ fpga_write_reg(fpga_read_reg(PWRCR) | PWRCR_PDWNREQ, PWRCR);
+
+ /*
+ * It can take up to 20us for the R8C to do its job, back off and
+ * wait a bit until we've been shut off. Even though newer FPGA
+ * versions don't set the ACK bit, the latency issue remains.
+ */
+ while ((fpga_read_reg(PWRCR) & PWRCR_PDWNACK) == 0)
+ cpu_sleep();
+}
+
/* Initialize the board */
static void __init sdk7786_setup(char **cmdline_p)
{
@@ -175,6 +189,9 @@ static void __init sdk7786_setup(char **cmdline_p)
pr_info("\tPCB revision:\t%d\n", fpga_read_reg(PCBRR) & 0xf);
machine_ops.restart = sdk7786_restart;
+ pm_power_off = sdk7786_power_off;
+
+ register_smp_ops(&shx3_smp_ops);
}
/*
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index ccaa290e9aba..f9b82546c2df 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/smc91x.h>
@@ -276,8 +277,6 @@ static struct clk_ops fsimck_clk_ops = {
};
static struct clk fsimcka_clk = {
- .name = "fsimcka_clk",
- .id = -1,
.ops = &fsimck_clk_ops,
.enable_reg = (void __iomem *)FCLKACR,
.rate = 0, /* unknown */
@@ -464,11 +463,19 @@ static struct resource sdhi0_cn7_resources[] = {
},
};
+static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
+ .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
+};
+
static struct platform_device sdhi0_cn7_device = {
.name = "sh_mobile_sdhi",
.id = 0,
.num_resources = ARRAY_SIZE(sdhi0_cn7_resources),
.resource = sdhi0_cn7_resources,
+ .dev = {
+ .platform_data = &sh7724_sdhi0_data,
+ },
.archdata = {
.hwblk_id = HWBLK_SDHI0,
},
@@ -487,11 +494,19 @@ static struct resource sdhi1_cn8_resources[] = {
},
};
+static struct sh_mobile_sdhi_info sh7724_sdhi1_data = {
+ .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
+};
+
static struct platform_device sdhi1_cn8_device = {
.name = "sh_mobile_sdhi",
.id = 1,
.num_resources = ARRAY_SIZE(sdhi1_cn8_resources),
.resource = sdhi1_cn8_resources,
+ .dev = {
+ .platform_data = &sh7724_sdhi1_data,
+ },
.archdata = {
.hwblk_id = HWBLK_SDHI1,
},
@@ -517,6 +532,52 @@ static struct platform_device irda_device = {
.resource = irda_resources,
};
+#include <media/ak881x.h>
+#include <media/sh_vou.h>
+
+struct ak881x_pdata ak881x_pdata = {
+ .flags = AK881X_IF_MODE_SLAVE,
+};
+
+static struct i2c_board_info ak8813 = {
+ /* With open J18 jumper address is 0x21 */
+ I2C_BOARD_INFO("ak8813", 0x20),
+ .platform_data = &ak881x_pdata,
+};
+
+struct sh_vou_pdata sh_vou_pdata = {
+ .bus_fmt = SH_VOU_BUS_8BIT,
+ .flags = SH_VOU_HSYNC_LOW | SH_VOU_VSYNC_LOW,
+ .board_info = &ak8813,
+ .i2c_adap = 0,
+ .module_name = "ak881x",
+};
+
+static struct resource sh_vou_resources[] = {
+ [0] = {
+ .start = 0xfe960000,
+ .end = 0xfe962043,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 55,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device vou_device = {
+ .name = "sh-vou",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(sh_vou_resources),
+ .resource = sh_vou_resources,
+ .dev = {
+ .platform_data = &sh_vou_pdata,
+ },
+ .archdata = {
+ .hwblk_id = HWBLK_VOU,
+ },
+};
+
static struct platform_device *ms7724se_devices[] __initdata = {
&heartbeat_device,
&smc91x_eth_device,
@@ -532,6 +593,7 @@ static struct platform_device *ms7724se_devices[] __initdata = {
&sdhi0_cn7_device,
&sdhi1_cn8_device,
&irda_device,
+ &vou_device,
};
/* I2C device */
@@ -616,6 +678,7 @@ static int __init devices_setup(void)
{
u16 sw = __raw_readw(SW4140); /* select camera, monitor */
struct clk *clk;
+ u16 fpga_out;
/* register board specific self-refresh code */
sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
@@ -625,14 +688,26 @@ static int __init devices_setup(void)
&ms7724se_sdram_leave_start,
&ms7724se_sdram_leave_end);
/* Reset Release */
- __raw_writew(__raw_readw(FPGA_OUT) &
- ~((1 << 1) | /* LAN */
- (1 << 6) | /* VIDEO DAC */
- (1 << 7) | /* AK4643 */
- (1 << 8) | /* IrDA */
- (1 << 12) | /* USB0 */
- (1 << 14)), /* RMII */
- FPGA_OUT);
+ fpga_out = __raw_readw(FPGA_OUT);
+ /* bit4: NTSC_PDN, bit5: NTSC_RESET */
+ fpga_out &= ~((1 << 1) | /* LAN */
+ (1 << 4) | /* AK8813 PDN */
+ (1 << 5) | /* AK8813 RESET */
+ (1 << 6) | /* VIDEO DAC */
+ (1 << 7) | /* AK4643 */
+ (1 << 8) | /* IrDA */
+ (1 << 12) | /* USB0 */
+ (1 << 14)); /* RMII */
+ __raw_writew(fpga_out | (1 << 4), FPGA_OUT);
+
+ udelay(10);
+
+ /* AK8813 RESET */
+ __raw_writew(fpga_out | (1 << 5), FPGA_OUT);
+
+ udelay(10);
+
+ __raw_writew(fpga_out, FPGA_OUT);
/* turn on USB clocks, use external clock */
__raw_writew((__raw_readw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB);
@@ -771,16 +846,20 @@ static int __init devices_setup(void)
/* set SPU2 clock to 83.4 MHz */
clk = clk_get(NULL, "spu_clk");
- clk_set_rate(clk, clk_round_rate(clk, 83333333));
- clk_put(clk);
+ if (clk) {
+ clk_set_rate(clk, clk_round_rate(clk, 83333333));
+ clk_put(clk);
+ }
/* change parent of FSI A */
clk = clk_get(NULL, "fsia_clk");
- clk_register(&fsimcka_clk);
- clk_set_parent(clk, &fsimcka_clk);
- clk_set_rate(clk, 11000);
- clk_set_rate(&fsimcka_clk, 11000);
- clk_put(clk);
+ if (clk) {
+ clk_register(&fsimcka_clk);
+ clk_set_parent(clk, &fsimcka_clk);
+ clk_set_rate(clk, 11000);
+ clk_set_rate(&fsimcka_clk, 11000);
+ clk_put(clk);
+ }
/* SDHI0 connected to cn7 */
gpio_request(GPIO_FN_SDHI0CD, NULL);
@@ -860,6 +939,20 @@ static int __init devices_setup(void)
lcdc_info.ch[0].flags = LCDC_FLAGS_DWPOL;
}
+ /* VOU */
+ gpio_request(GPIO_FN_DV_D15, NULL);
+ gpio_request(GPIO_FN_DV_D14, NULL);
+ gpio_request(GPIO_FN_DV_D13, NULL);
+ gpio_request(GPIO_FN_DV_D12, NULL);
+ gpio_request(GPIO_FN_DV_D11, NULL);
+ gpio_request(GPIO_FN_DV_D10, NULL);
+ gpio_request(GPIO_FN_DV_D9, NULL);
+ gpio_request(GPIO_FN_DV_D8, NULL);
+ gpio_request(GPIO_FN_DV_CLKI, NULL);
+ gpio_request(GPIO_FN_DV_CLK, NULL);
+ gpio_request(GPIO_FN_DV_VSYNC, NULL);
+ gpio_request(GPIO_FN_DV_HSYNC, NULL);
+
return platform_add_devices(ms7724se_devices,
ARRAY_SIZE(ms7724se_devices));
}
diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c
index e284592fd42a..102bf56befb4 100644
--- a/arch/sh/boards/mach-x3proto/setup.c
+++ b/arch/sh/boards/mach-x3proto/setup.c
@@ -19,6 +19,7 @@
#include <linux/usb/r8a66597.h>
#include <linux/usb/m66592.h>
#include <asm/ilsel.h>
+#include <asm/smp-ops.h>
static struct resource heartbeat_resources[] = {
[0] = {
@@ -152,7 +153,13 @@ static void __init x3proto_init_irq(void)
__raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000);
}
+static void __init x3proto_setup(char **cmdline_p)
+{
+ register_smp_ops(&shx3_smp_ops);
+}
+
static struct sh_machine_vector mv_x3proto __initmv = {
.mv_name = "x3proto",
+ .mv_setup = x3proto_setup,
.mv_init_irq = x3proto_init_irq,
};
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 5d660b90943b..cfa5a087a886 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -14,10 +14,16 @@ OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
#
# IMAGE_OFFSET is the load offset of the compression loader
#
+ifeq ($(CONFIG_32BIT),y)
+IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
+ $$[$(CONFIG_MEMORY_START) + \
+ $(CONFIG_BOOT_LINK_OFFSET)]')
+else
IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_PAGE_OFFSET) + \
$(KERNEL_MEMORY) + \
$(CONFIG_BOOT_LINK_OFFSET)]')
+endif
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S
index 02a30935f0b9..200c1d4f1efe 100644
--- a/arch/sh/boot/compressed/head_32.S
+++ b/arch/sh/boot/compressed/head_32.S
@@ -97,7 +97,11 @@ init_stack_addr:
decompress_kernel_addr:
.long decompress_kernel
kernel_start_addr:
+#ifdef CONFIG_32BIT
+ .long ___pa(_text+PAGE_SIZE)
+#else
.long _text+PAGE_SIZE
+#endif
.align 9
fake_headers_as_bzImage:
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 8931a60e37a4..7fefa2b9e28c 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:10:59 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:04:35 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -71,11 +74,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -107,7 +105,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -116,13 +114,13 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -237,8 +235,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -246,6 +244,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -261,7 +261,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -335,7 +335,7 @@ CONFIG_SECCOMP=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -371,6 +371,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -378,7 +379,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -594,6 +594,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
@@ -613,6 +614,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -774,6 +776,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -802,6 +805,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -814,15 +818,9 @@ CONFIG_I2C_SH_MOBILE=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
CONFIG_SPI_MASTER=y
@@ -853,13 +851,16 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -891,22 +892,25 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -1041,10 +1045,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=y
-# CONFIG_MMC_TMIO is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -1120,8 +1121,6 @@ CONFIG_RTC_DRV_PCF8563=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -1206,6 +1205,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
# CONFIG_UBIFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1234,6 +1234,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1426,6 +1427,7 @@ CONFIG_CRYPTO_CBC=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig
index 92589a950d07..edae0a6830da 100644
--- a/arch/sh/configs/cayman_defconfig
+++ b/arch/sh/configs/cayman_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:14:50 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:01:02 2010
#
CONFIG_SUPERH=y
# CONFIG_SUPERH32 is not set
@@ -13,7 +13,6 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
@@ -32,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
CONFIG_POSIX_MQUEUE=y
@@ -70,7 +72,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -114,7 +115,6 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
@@ -204,6 +204,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -225,7 +226,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -294,7 +295,6 @@ CONFIG_HZ=250
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -309,9 +309,9 @@ CONFIG_ENTRY_OFFSET=0x00001000
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -337,7 +337,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -463,6 +462,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
@@ -480,6 +480,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -569,7 +570,7 @@ CONFIG_SCSI_LOWLEVEL=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -638,6 +639,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -650,6 +653,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -732,6 +736,7 @@ CONFIG_DEVKMEM=y
#
# CONFIG_SERIAL_SH_SCI is not set
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -779,6 +784,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -791,15 +797,9 @@ CONFIG_I2C_HELPER_AUTO=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -823,10 +823,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_I5K_AMB is not set
@@ -863,6 +864,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
# CONFIG_SENSORS_TMP421 is not set
@@ -909,9 +911,9 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_WM8400 is not set
-# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
-# CONFIG_AB3100_CORE is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -919,6 +921,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
@@ -1125,6 +1128,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1153,6 +1157,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1208,6 +1213,7 @@ CONFIG_SCHEDSTATS=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -1234,6 +1240,7 @@ CONFIG_FRAME_POINTER=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
@@ -1361,6 +1368,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig
index 55f652be954b..b96bf3d2cd01 100644
--- a/arch/sh/configs/dreamcast_defconfig
+++ b/arch/sh/configs/dreamcast_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:17:35 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:11:55 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,9 +49,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -72,7 +75,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -124,6 +126,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -242,6 +245,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -263,7 +267,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -360,7 +364,6 @@ CONFIG_SECCOMP=y
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -377,9 +380,9 @@ CONFIG_CMDLINE="console=ttySC1,115200 panic=3"
#
CONFIG_MAPLE=y
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -404,7 +407,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -532,6 +534,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -549,7 +552,7 @@ CONFIG_HAVE_IDE=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -589,6 +592,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -717,6 +721,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -771,6 +776,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -778,6 +784,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -918,7 +925,6 @@ CONFIG_RTC_LIB=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -976,6 +982,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -990,6 +997,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1135,6 +1143,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig
index 662c1ad20494..58aec9dd5630 100644
--- a/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/arch/sh/configs/ecovec24-romimage_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:18:17 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:14:56 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -71,11 +74,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -86,6 +84,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -111,7 +110,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -120,13 +119,13 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -237,8 +236,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -246,6 +245,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -261,7 +262,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -336,7 +337,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -370,6 +371,7 @@ CONFIG_PM=y
# CONFIG_SUSPEND is not set
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -377,7 +379,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -485,6 +486,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -585,6 +587,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -650,6 +653,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -678,6 +682,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -690,15 +695,9 @@ CONFIG_I2C_SH_MOBILE=y
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -712,13 +711,16 @@ CONFIG_GPIO_SYSFS=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -747,20 +749,23 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -868,7 +873,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -880,7 +884,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -911,7 +914,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -1027,6 +1029,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1056,6 +1059,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index 6041c66dd10e..67c665671c6c 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.34-rc2
-# Mon Mar 29 02:21:58 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:17:28 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -336,6 +336,7 @@ CONFIG_SECCOMP=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -762,6 +763,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -1755,6 +1757,7 @@ CONFIG_CRYPTO_CBC=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig
index 72f8718dd738..0883d873ea64 100644
--- a/arch/sh/configs/edosk7705_defconfig
+++ b/arch/sh/configs/edosk7705_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:24:26 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:21:52 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -12,8 +12,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,6 +29,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -43,9 +44,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -61,11 +64,11 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_CGROUPS is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_UID16 is not set
# CONFIG_KALLSYMS is not set
@@ -87,7 +90,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
@@ -103,6 +106,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -202,6 +206,7 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -217,7 +222,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -330,6 +335,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
# CONFIG_PHONE is not set
@@ -361,6 +367,7 @@ CONFIG_HAVE_IDE=y
# Non-8250 serial port support
#
# CONFIG_SERIAL_SH_SCI is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -440,6 +447,7 @@ CONFIG_INOTIFY_USER=y
#
# Caches
#
+# CONFIG_FSCACHE is not set
#
# Pseudo filesystems
@@ -491,6 +499,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig
index 0932e6d656eb..8257f5776374 100644
--- a/arch/sh/configs/edosk7760_defconfig
+++ b/arch/sh/configs/edosk7760_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:24:44 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:23:10 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -72,7 +75,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -83,6 +85,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -110,8 +113,9 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -128,6 +132,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -246,6 +251,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -261,7 +267,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -342,7 +348,6 @@ CONFIG_SCHED_HRTICK=y
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -379,7 +384,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -579,6 +583,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -685,6 +690,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -713,6 +719,7 @@ CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_SH7760=y
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -725,15 +732,9 @@ CONFIG_I2C_SH7760=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
CONFIG_I2C_DEBUG_CORE=y
CONFIG_I2C_DEBUG_ALGO=y
CONFIG_I2C_DEBUG_BUS=y
-CONFIG_I2C_DEBUG_CHIP=y
# CONFIG_SPI is not set
#
@@ -756,6 +757,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -763,12 +765,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -947,6 +950,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -969,6 +973,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1048,6 +1053,7 @@ CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -1096,6 +1102,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1214,6 +1221,7 @@ CONFIG_CRYPTO_DES=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig
index f899e5613f86..8acdc374d033 100644
--- a/arch/sh/configs/espt_defconfig
+++ b/arch/sh/configs/espt_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:26:55 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:27:14 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -69,11 +72,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -112,7 +110,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -120,7 +117,6 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -131,6 +127,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -243,7 +240,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
+# CONFIG_PMB is not set
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -253,6 +250,7 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -269,7 +267,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -344,7 +342,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -381,7 +379,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -444,7 +441,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -584,6 +580,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -688,6 +685,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -753,6 +751,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -944,7 +943,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -956,7 +954,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1052,6 +1049,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1078,6 +1076,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1144,13 +1143,12 @@ CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1158,10 +1156,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1269,7 +1264,8 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 06644908631e..f894bdc97a65 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:30:31 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:30:50 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
CONFIG_BSD_PROCESS_ACCT=y
@@ -66,7 +69,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -99,7 +101,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -115,6 +117,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -228,6 +231,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -243,7 +247,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -326,7 +330,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -344,7 +347,6 @@ CONFIG_ENTRY_OFFSET=0x00001000
CONFIG_PCCARD=y
CONFIG_PCMCIA=y
CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
#
# PC-card bridges
@@ -369,6 +371,7 @@ CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_APM_EMULATION=y
# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
# CONFIG_NET is not set
@@ -412,6 +415,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -544,6 +548,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -722,7 +727,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -780,6 +784,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -977,6 +982,7 @@ CONFIG_CRYPTO_MD5=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/kfr2r09-romimage_defconfig b/arch/sh/configs/kfr2r09-romimage_defconfig
index 3d834e59e8f9..8c54e1620e95 100644
--- a/arch/sh/configs/kfr2r09-romimage_defconfig
+++ b/arch/sh/configs/kfr2r09-romimage_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:31:09 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:33:23 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
@@ -70,11 +73,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -85,6 +83,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -110,7 +109,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -119,13 +118,13 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -222,8 +221,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -231,6 +230,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -246,7 +247,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -321,7 +322,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -354,6 +355,7 @@ CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
# CONFIG_SUSPEND is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -361,7 +363,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -449,6 +450,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
# CONFIG_NETDEVICES is not set
@@ -511,6 +513,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -538,6 +541,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -549,15 +553,9 @@ CONFIG_I2C_SH_MOBILE=y
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -571,13 +569,16 @@ CONFIG_GPIO_SYSFS=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -606,20 +607,23 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -690,6 +694,7 @@ CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
CONFIG_USB_CDC_COMPOSITE=y
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -794,6 +799,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig
index f22be494ed99..2e74b08ca14d 100644
--- a/arch/sh/configs/kfr2r09_defconfig
+++ b/arch/sh/configs/kfr2r09_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:32:55 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:35:20 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -72,11 +75,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -87,6 +85,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -112,7 +111,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -121,13 +120,13 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -243,8 +242,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -252,6 +251,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -267,7 +268,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -343,7 +344,7 @@ CONFIG_KEXEC=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -377,6 +378,7 @@ CONFIG_PM=y
# CONFIG_SUSPEND is not set
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -386,7 +388,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -580,6 +581,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -659,6 +661,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -687,6 +690,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -699,15 +703,9 @@ CONFIG_I2C_SH_MOBILE=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -721,13 +719,16 @@ CONFIG_GPIO_SYSFS=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -756,20 +757,23 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -889,6 +893,7 @@ CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
CONFIG_USB_CDC_COMPOSITE=m
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -912,9 +917,6 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
-# CONFIG_MMC_TMIO is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -982,8 +984,6 @@ CONFIG_RTC_DRV_SH=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -996,7 +996,6 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -1074,6 +1073,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1103,6 +1103,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig
index 2a42d4977fe4..87789345d47f 100644
--- a/arch/sh/configs/landisk_defconfig
+++ b/arch/sh/configs/landisk_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:35:31 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:38:08 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -69,7 +72,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -102,7 +104,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
@@ -120,6 +122,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -238,6 +241,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -253,7 +257,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -333,7 +337,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -348,15 +351,14 @@ CONFIG_ENTRY_OFFSET=0x00001000
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
CONFIG_PCCARD=y
CONFIG_PCMCIA=y
CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
CONFIG_CARDBUS=y
#
@@ -392,7 +394,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -612,6 +613,7 @@ CONFIG_BLK_DEV_IDEDMA=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -711,7 +713,7 @@ CONFIG_MD_RAID1=m
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -751,6 +753,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -798,6 +801,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -810,6 +815,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -835,6 +841,7 @@ CONFIG_WLAN=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_NET_PCMCIA is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
@@ -910,6 +917,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -977,6 +985,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -984,6 +993,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1025,6 +1035,7 @@ CONFIG_USB_HID=m
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=m
CONFIG_HID_APPLE=m
CONFIG_HID_BELKIN=m
@@ -1040,14 +1051,19 @@ CONFIG_HID_GYRATION=m
CONFIG_HID_LOGITECH=m
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=m
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=m
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=m
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=m
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=m
CONFIG_HID_SONY=m
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=m
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1169,6 +1185,7 @@ CONFIG_USB_SERIAL_FTDI_SIO=m
# CONFIG_USB_SERIAL_NAVMAN is not set
CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
# CONFIG_USB_SERIAL_QUALCOMM is not set
# CONFIG_USB_SERIAL_SPCP8X5 is not set
# CONFIG_USB_SERIAL_HP4X is not set
@@ -1182,6 +1199,7 @@ CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_OPTION is not set
# CONFIG_USB_SERIAL_OMNINET is not set
# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
# CONFIG_USB_SERIAL_DEBUG is not set
#
@@ -1194,7 +1212,6 @@ CONFIG_USB_EMI26=m
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1208,7 +1225,6 @@ CONFIG_USB_SISUSBVGA_CON=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1313,6 +1329,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1347,6 +1364,7 @@ CONFIG_SUNRPC=m
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1531,6 +1549,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig
index f2f1f8c73b2f..6088a76bd21c 100644
--- a/arch/sh/configs/lboxre2_defconfig
+++ b/arch/sh/configs/lboxre2_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:37:01 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:42:57 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -69,7 +72,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -102,7 +104,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
@@ -120,6 +122,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -238,6 +241,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -253,7 +257,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -333,7 +337,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -349,15 +352,14 @@ CONFIG_CMDLINE="console=ttySC1,115200 root=/dev/sda1"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
CONFIG_PCCARD=y
CONFIG_PCMCIA=y
CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
CONFIG_CARDBUS=y
#
@@ -392,7 +394,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -552,6 +553,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -670,6 +672,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -707,7 +710,7 @@ CONFIG_PATA_PLATFORM=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -747,6 +750,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -798,6 +802,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -810,6 +816,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -908,6 +915,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -975,6 +983,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -982,6 +991,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1155,6 +1165,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1173,6 +1184,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1357,6 +1369,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index a7a16ce357ad..28c104da0ba1 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:37:42 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:44:00 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -71,7 +74,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -82,6 +84,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -109,8 +112,9 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
@@ -126,6 +130,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -244,6 +249,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -259,7 +265,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -344,7 +350,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -380,7 +385,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -586,6 +590,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -741,6 +746,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -765,6 +771,7 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
@@ -953,6 +960,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -977,6 +985,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1051,6 +1060,7 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1097,6 +1107,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1125,6 +1136,7 @@ CONFIG_DUMP_CODE=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig
index 7d43fabdc073..41cefa490ec1 100644
--- a/arch/sh/configs/microdev_defconfig
+++ b/arch/sh/configs/microdev_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:40:41 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:48:22 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
@@ -69,7 +72,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -80,6 +82,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -106,7 +109,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -122,6 +125,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -235,6 +239,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -256,7 +261,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -334,7 +339,6 @@ CONFIG_HZ=250
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -515,6 +519,7 @@ CONFIG_IDE_PROC_FS=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -596,6 +601,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -774,6 +780,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -800,6 +807,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -992,6 +1000,7 @@ CONFIG_CRYPTO_DES=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index d2b183117771..1c889b74cd57 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:41:41 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:01:29 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -71,7 +74,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -82,6 +84,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -109,7 +112,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -117,17 +119,16 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -243,7 +244,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_29BIT=y
-# CONFIG_X2TLB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=1
@@ -254,6 +255,8 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -271,7 +274,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_MIGRATION is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -348,7 +351,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -384,6 +387,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -391,7 +395,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -454,7 +457,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -564,6 +566,7 @@ CONFIG_MTD_NAND_IDS=y
# CONFIG_MTD_NAND_DISKONCHIP is not set
# CONFIG_MTD_NAND_NANDSIM is not set
CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_NAND_SH_FLCTL is not set
# CONFIG_MTD_ONENAND is not set
#
@@ -596,6 +599,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
@@ -612,6 +616,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -764,6 +769,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -792,6 +798,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -804,15 +811,9 @@ CONFIG_I2C_SH_MOBILE=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -826,13 +827,16 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -861,20 +865,23 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -1054,6 +1061,7 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1129,8 +1137,6 @@ CONFIG_RTC_DRV_SH=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -1143,7 +1149,6 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -1202,6 +1207,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1224,6 +1230,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1251,13 +1258,12 @@ CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1265,10 +1271,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1383,7 +1386,8 @@ CONFIG_CRYPTO_WORKQUEUE=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig
index d50c0314281e..826b1198959f 100644
--- a/arch/sh/configs/polaris_defconfig
+++ b/arch/sh/configs/polaris_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 11:45:25 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:53:51 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -30,6 +30,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -71,11 +74,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -109,8 +107,9 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
@@ -126,6 +125,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -244,6 +244,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -259,7 +260,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=999999
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -340,7 +341,6 @@ CONFIG_SCHED_HRTICK=y
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -377,7 +377,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -583,6 +582,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -712,6 +712,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -829,7 +830,6 @@ CONFIG_RTC_DRV_SH=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -896,6 +896,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -919,6 +920,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -958,6 +960,7 @@ CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
CONFIG_DEBUG_PREEMPT=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_PI_LIST=y
@@ -1010,6 +1013,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1038,6 +1042,7 @@ CONFIG_DUMP_CODE=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index efda63d4070a..4b751bd37e20 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:16:13 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 15:58:09 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,9 +49,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,11 +76,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -112,7 +110,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -122,7 +119,6 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -133,6 +129,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -245,11 +242,8 @@ CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
-# CONFIG_29BIT is not set
-CONFIG_32BIT=y
-CONFIG_PMB_ENABLE=y
+CONFIG_29BIT=y
# CONFIG_PMB is not set
-CONFIG_PMB_FIXED=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -257,6 +251,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -278,7 +273,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -358,7 +353,7 @@ CONFIG_KEXEC=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -374,9 +369,9 @@ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -402,7 +397,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -452,6 +446,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_ATM is not set
CONFIG_STP=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
@@ -472,7 +467,6 @@ CONFIG_LLC=m
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -621,6 +615,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
@@ -638,6 +633,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -755,6 +751,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -791,7 +788,7 @@ CONFIG_PATA_PLATFORM=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -832,6 +829,7 @@ CONFIG_NET_PCI=y
CONFIG_PCNET32=m
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -884,6 +882,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -896,6 +896,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -995,6 +996,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -1043,6 +1045,7 @@ CONFIG_I2C_HIGHLANDER=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -1055,15 +1058,9 @@ CONFIG_I2C_HIGHLANDER=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -1087,10 +1084,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_I5K_AMB is not set
@@ -1127,6 +1125,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
# CONFIG_SENSORS_TMP421 is not set
@@ -1156,6 +1155,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -1163,12 +1163,14 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1176,6 +1178,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1372,6 +1375,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1402,6 +1406,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1480,6 +1485,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -1490,7 +1496,6 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
@@ -1507,11 +1512,11 @@ CONFIG_DEBUG_INFO=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1519,10 +1524,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -1535,6 +1537,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1662,7 +1665,8 @@ CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index f4b00451dcee..f5e3819469e9 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:19:35 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:03:27 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -34,6 +34,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -49,9 +50,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -77,7 +80,6 @@ CONFIG_TREE_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -112,7 +114,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -122,18 +123,17 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_KRETPROBES=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -247,12 +247,9 @@ CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
-# CONFIG_29BIT is not set
-CONFIG_32BIT=y
-CONFIG_PMB_ENABLE=y
+CONFIG_29BIT=y
# CONFIG_PMB is not set
-CONFIG_PMB_FIXED=y
-# CONFIG_X2TLB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
# CONFIG_NUMA is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -263,6 +260,8 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -285,7 +284,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=999999
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -303,6 +302,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_SH_FPU=y
CONFIG_SH_STORE_QUEUES=y
+# CONFIG_SPECULATIVE_EXECUTION is not set
CONFIG_CPU_HAS_INTEVT=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_FPU=y
@@ -377,7 +377,7 @@ CONFIG_KEXEC=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -393,9 +393,9 @@ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -421,7 +421,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -471,6 +470,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_ATM is not set
CONFIG_STP=m
CONFIG_BRIDGE=m
+CONFIG_BRIDGE_IGMP_SNOOPING=y
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
@@ -492,7 +492,6 @@ CONFIG_LLC=m
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -557,6 +556,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
@@ -574,6 +574,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -691,6 +692,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -727,7 +729,7 @@ CONFIG_PATA_PLATFORM=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -797,6 +799,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -809,6 +813,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -910,6 +915,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -959,6 +965,7 @@ CONFIG_I2C_HIGHLANDER=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -971,15 +978,9 @@ CONFIG_I2C_HIGHLANDER=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -994,13 +995,17 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -1033,10 +1038,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_I5K_AMB is not set
@@ -1074,6 +1080,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
# CONFIG_SENSORS_TMP421 is not set
@@ -1102,20 +1109,25 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1123,6 +1135,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1371,6 +1384,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1401,6 +1415,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1475,6 +1490,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -1510,7 +1526,6 @@ CONFIG_FRAME_POINTER=y
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1518,10 +1533,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -1534,6 +1546,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1661,7 +1674,8 @@ CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig
index 2fc635a5a8c5..80e7b41ca620 100644
--- a/arch/sh/configs/rsk7201_defconfig
+++ b/arch/sh/configs/rsk7201_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:23:12 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:09:18 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -30,6 +30,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -44,9 +45,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -65,7 +68,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
# CONFIG_IKCONFIG_PROC is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -79,6 +81,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -105,7 +108,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -114,7 +116,6 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLOB=y
# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -124,6 +125,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -240,6 +242,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -255,7 +258,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
#
@@ -326,7 +329,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -485,6 +487,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -545,6 +548,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=8
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -657,7 +661,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -720,6 +723,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -756,13 +760,12 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -770,10 +773,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -795,7 +795,8 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 0169e60e0947..66eb7d7dad44 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:23:54 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:11:37 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
@@ -70,7 +73,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
# CONFIG_IKCONFIG_PROC is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -85,6 +87,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -112,7 +115,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -122,7 +124,6 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLOB=y
# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -132,6 +133,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -248,6 +250,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -263,7 +266,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
#
@@ -350,7 +353,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -448,7 +450,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -592,6 +593,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -662,6 +664,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -723,6 +726,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=4
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -745,6 +749,7 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
@@ -783,6 +788,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_DUMMY is not set
# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
@@ -816,6 +822,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -831,14 +838,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -859,7 +871,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -916,7 +927,6 @@ CONFIG_USB_R8A66597_HCD=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -928,7 +938,6 @@ CONFIG_USB_R8A66597_HCD=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -948,11 +957,11 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_GPIO_PLATFORM=y
# CONFIG_LEDS_REGULATOR is not set
# CONFIG_LEDS_LT3593 is not set
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
@@ -1018,7 +1027,6 @@ CONFIG_RTC_DRV_SH=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -1072,6 +1080,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1098,6 +1107,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1180,6 +1190,7 @@ CONFIG_DEBUG_OBJECTS=y
# CONFIG_DEBUG_OBJECTS_TIMERS is not set
# CONFIG_DEBUG_OBJECTS_WORK is not set
CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1189,7 +1200,6 @@ CONFIG_DEBUG_MUTEXES=y
# CONFIG_LOCK_STAT is not set
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
@@ -1207,11 +1217,11 @@ CONFIG_FRAME_POINTER=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1219,10 +1229,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -1234,6 +1241,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1263,7 +1271,8 @@ CONFIG_DUMP_CODE=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index dba024d72a89..6bd3d95d1518 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:25:36 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:15:07 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -70,7 +73,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -104,7 +106,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
@@ -113,7 +114,6 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -124,6 +124,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -242,6 +243,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -257,7 +259,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -343,7 +345,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -359,9 +360,9 @@ CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -389,7 +390,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -449,7 +449,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -525,6 +524,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -642,6 +642,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -678,7 +679,7 @@ CONFIG_PATA_PLATFORM=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -719,6 +720,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -771,6 +773,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -783,6 +787,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -805,6 +810,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -882,6 +888,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -964,6 +971,7 @@ CONFIG_MFD_SM501=y
# CONFIG_MFD_MC13783 is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_AB4500_CORE is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -971,6 +979,7 @@ CONFIG_MFD_SM501=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -1152,6 +1161,7 @@ CONFIG_SND_SPI=y
CONFIG_SND_SUPERH=y
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
# CONFIG_SND_SOC is not set
CONFIG_SOUND_PRIME=m
@@ -1170,6 +1180,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -1185,14 +1196,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1291,7 +1307,6 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1303,7 +1318,6 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1381,7 +1395,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -1442,6 +1455,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1456,6 +1470,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1522,13 +1537,12 @@ CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1536,10 +1550,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1648,7 +1659,8 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index 6d511d06cbf6..487abcc4d4e7 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:26:39 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:20:00 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_IO_TRAPPED=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -70,7 +73,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -104,7 +106,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
@@ -113,7 +114,6 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -124,6 +124,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -242,6 +243,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -257,7 +259,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -343,7 +345,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -359,9 +360,9 @@ CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -389,7 +390,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -449,7 +449,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -611,6 +610,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -728,6 +728,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -764,7 +765,7 @@ CONFIG_PATA_PLATFORM=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -805,6 +806,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -857,6 +859,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -869,6 +873,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -891,6 +896,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -968,6 +974,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -1050,6 +1057,7 @@ CONFIG_MFD_SM501=y
# CONFIG_MFD_MC13783 is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_AB4500_CORE is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1057,6 +1065,7 @@ CONFIG_MFD_SM501=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -1238,6 +1247,7 @@ CONFIG_SND_SPI=y
CONFIG_SND_SUPERH=y
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
# CONFIG_SND_SOC is not set
CONFIG_SOUND_PRIME=m
@@ -1256,6 +1266,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -1271,14 +1282,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1377,7 +1393,6 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1389,7 +1404,6 @@ CONFIG_USB_LIBUSUAL=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1467,7 +1481,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -1529,6 +1542,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1543,6 +1557,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1609,13 +1624,12 @@ CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1623,10 +1637,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1735,7 +1746,8 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig
index 1859ba099945..fe923142c2cc 100644
--- a/arch/sh/configs/sdk7780_defconfig
+++ b/arch/sh/configs/sdk7780_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:27:20 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:21:00 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,7 +76,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -107,8 +109,9 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
@@ -126,6 +129,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -237,11 +241,8 @@ CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
-# CONFIG_29BIT is not set
-CONFIG_32BIT=y
-CONFIG_PMB_ENABLE=y
+CONFIG_29BIT=y
# CONFIG_PMB is not set
-CONFIG_PMB_FIXED=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -249,6 +250,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -270,7 +272,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -353,7 +355,7 @@ CONFIG_SCHED_HRTICK=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -369,16 +371,15 @@ CONFIG_CMDLINE="mem=128M console=tty0 console=ttySC0,115200 ip=bootp root=/dev/n
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCI_LEGACY is not set
CONFIG_PCI_DEBUG=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
CONFIG_PCCARD=y
CONFIG_PCMCIA=y
CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
CONFIG_CARDBUS=y
#
@@ -417,7 +418,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -659,6 +659,7 @@ CONFIG_BLK_DEV_GENERIC=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -777,6 +778,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -824,7 +826,7 @@ CONFIG_BLK_DEV_DM=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -890,6 +892,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_NET_PCMCIA is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
@@ -995,6 +998,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -1057,6 +1061,7 @@ CONFIG_SSB_DRIVER_PCICORE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1064,6 +1069,7 @@ CONFIG_SSB_DRIVER_PCICORE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1167,6 +1173,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -1182,14 +1189,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1288,7 +1300,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1301,7 +1312,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1317,10 +1327,6 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
-
-#
-# LED Triggers
-#
# CONFIG_LEDS_TRIGGERS is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
@@ -1416,6 +1422,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1444,6 +1451,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1523,6 +1531,7 @@ CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -1571,6 +1580,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1690,6 +1700,7 @@ CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 9b331eab968e..269824598520 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc7
-# Tue Feb 9 15:27:06 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:03:37 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -18,6 +18,8 @@ CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_LOCKBREAK=y
# CONFIG_ARCH_SUSPEND_POSSIBLE is not set
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_SYS_SUPPORTS_HUGETLBFS=y
@@ -35,6 +37,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_COHERENT=y
# CONFIG_DMA_NONCOHERENT is not set
+# CONFIG_NEED_DMA_MAP_STATE is not set
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -42,7 +45,6 @@ CONFIG_CONSTRUCTORS=y
# General setup
#
CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
@@ -51,25 +53,27 @@ CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_LZO=y
-CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_GZIP is not set
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
-# CONFIG_KERNEL_LZO is not set
+CONFIG_KERNEL_LZO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
-# CONFIG_AUDIT is not set
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_TREE=y
#
# RCU Subsystem
#
-CONFIG_TREE_RCU=y
-# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU is not set
+CONFIG_TREE_PREEMPT_RCU=y
# CONFIG_TINY_RCU is not set
CONFIG_RCU_TRACE=y
CONFIG_RCU_FANOUT=32
@@ -77,32 +81,36 @@ CONFIG_RCU_FANOUT=32
CONFIG_TREE_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+CONFIG_LOG_BUF_SHIFT=17
CONFIG_CGROUPS=y
-# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_NS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
-# CONFIG_CPUSETS is not set
+CONFIG_CPUSETS=y
+# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
-# CONFIG_CGROUP_MEM_RES_CTLR_SWAP is not set
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
CONFIG_MM_OWNER=y
# CONFIG_SYSFS_DEPRECATED_V2 is not set
-# CONFIG_RELAY is not set
+CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
-# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -131,7 +139,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -142,13 +149,15 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-# CONFIG_OPROFILE is not set
+CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
-# CONFIG_KPROBES is not set
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
CONFIG_HAVE_HW_BREAKPOINT=y
@@ -157,7 +166,8 @@ CONFIG_HAVE_HW_BREAKPOINT=y
# GCOV-based kernel profiling
#
# CONFIG_GCOV_KERNEL is not set
-# CONFIG_SLOW_WORK is not set
+CONFIG_SLOW_WORK=y
+CONFIG_SLOW_WORK_DEBUG=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@@ -168,9 +178,10 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
# CONFIG_BLK_DEV_INTEGRITY is not set
CONFIG_BLK_CGROUP=y
# CONFIG_DEBUG_BLK_CGROUP is not set
@@ -215,7 +226,7 @@ CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
-# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
CONFIG_FREEZER=y
#
@@ -267,16 +278,15 @@ CONFIG_QUICKLIST=y
CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
-CONFIG_MEMORY_START=0x60000000
+CONFIG_MEMORY_START=0x40000000
CONFIG_MEMORY_SIZE=0x20000000
# CONFIG_29BIT is not set
CONFIG_32BIT=y
CONFIG_PMB=y
-# CONFIG_PMB_LEGACY is not set
CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
-# CONFIG_NUMA is not set
-CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_NUMA=y
+CONFIG_NODES_SHIFT=1
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
@@ -286,21 +296,23 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
CONFIG_ARCH_MEMORY_PROBE=y
CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
-CONFIG_HUGETLB_PAGE_SIZE_1MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
-# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
+CONFIG_HUGETLB_PAGE_SIZE_64MB=y
# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
+CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
CONFIG_MEMORY_HOTPLUG=y
@@ -313,6 +325,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=1
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_SCHED_MC=y
#
# Cache configuration
@@ -328,6 +341,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_SH_FPU=y
CONFIG_SH_STORE_QUEUES=y
+CONFIG_SPECULATIVE_EXECUTION=y
CONFIG_CPU_HAS_INTEVT=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_PTEAEX=y
@@ -372,7 +386,10 @@ CONFIG_SH_CPU_FREQ=y
#
# DMA support
#
-# CONFIG_SH_DMA is not set
+CONFIG_SH_DMA=y
+# CONFIG_SH_DMA_API is not set
+CONFIG_NR_ONCHIP_DMA_CHANNELS=6
+# CONFIG_NR_DMA_CHANNELS_BOOL is not set
#
# Companion Chips
@@ -388,19 +405,21 @@ CONFIG_HEARTBEAT=y
# Kernel features
#
# CONFIG_HZ_100 is not set
-CONFIG_HZ_250=y
+# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
-# CONFIG_HZ_1000 is not set
-CONFIG_HZ=250
+CONFIG_HZ_1000=y
+CONFIG_HZ=1000
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
-# CONFIG_CRASH_DUMP is not set
CONFIG_SECCOMP=y
-# CONFIG_SMP is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=y
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
-CONFIG_GUSA=y
+CONFIG_INTC_USERIMASK=y
+CONFIG_INTC_BALANCING=y
#
# Boot options
@@ -410,7 +429,7 @@ CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
CONFIG_CMDLINE_OVERWRITE=y
# CONFIG_CMDLINE_EXTEND is not set
-CONFIG_CMDLINE="console=ttySC1,115200 earlyprintk=sh-sci.1,115200 root=/dev/sda1 nmi_debug=state,debounce rootdelay=10"
+CONFIG_CMDLINE="console=ttySC1,115200 earlyprintk=sh-sci.1,115200 root=/dev/sda1 nmi_debug=state,debounce rootdelay=5 pmb=iomap ignore_loglevel"
#
# Bus options
@@ -424,8 +443,7 @@ CONFIG_PCIEAER_INJECT=y
CONFIG_PCIEASPM=y
CONFIG_PCIEASPM_DEBUG=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCI_LEGACY is not set
-CONFIG_PCI_DEBUG=y
+# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -435,7 +453,7 @@ CONFIG_PCI_DEBUG=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=y
@@ -444,9 +462,11 @@ CONFIG_BINFMT_MISC=y
#
CONFIG_PM=y
CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
CONFIG_PM_VERBOSE=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -456,7 +476,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -520,19 +539,14 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-CONFIG_WIRELESS=y
-# CONFIG_CFG80211 is not set
-# CONFIG_LIB80211 is not set
-
-#
-# CFG80211 needs to be enabled for MAC80211
-#
+# CONFIG_WIRELESS is not set
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -553,14 +567,118 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+# CONFIG_MTD_CHAR is not set
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_FTL=y
+CONFIG_NFTL=y
+# CONFIG_NFTL_RW is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+CONFIG_MTD_OOPS=m
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+CONFIG_MTD_PLATRAM=y
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+CONFIG_MTD_PHRAM=y
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_CAFE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+CONFIG_MTD_NAND_SH_FLCTL=m
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+CONFIG_MTD_UBI_GLUEBI=m
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
-# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
#
# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
@@ -575,34 +693,63 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
-CONFIG_MISC_DEVICES=y
-# CONFIG_AD525X_DPOT is not set
-# CONFIG_PHANTOM is not set
-# CONFIG_SGI_IOC4 is not set
-# CONFIG_TIFM_CORE is not set
-# CONFIG_ICS932S401 is not set
-# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HP_ILO is not set
-# CONFIG_ISL29003 is not set
-# CONFIG_DS1682 is not set
-# CONFIG_TI_DAC7512 is not set
-# CONFIG_C2PORT is not set
-
-#
-# EEPROM support
-#
-# CONFIG_EEPROM_AT24 is not set
-# CONFIG_EEPROM_AT25 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_EEPROM_MAX6875 is not set
-# CONFIG_EEPROM_93CX6 is not set
-# CONFIG_CB710_CORE is not set
+# CONFIG_MISC_DEVICES is not set
CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
+CONFIG_IDE=y
+
+#
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
+#
+CONFIG_IDE_ATAPI=y
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_IDE_PROC_FS=y
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_BLK_DEV_PLATFORM=y
+
+#
+# PCI IDE chipsets support
+#
+# CONFIG_BLK_DEV_GENERIC is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT8172 is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -616,11 +763,12 @@ CONFIG_SCSI_PROC_FS=y
CONFIG_BLK_DEV_SD=y
# CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
+CONFIG_BLK_DEV_SR=y
+# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
# CONFIG_SCSI_SCAN_ASYNC is not set
CONFIG_SCSI_WAIT_SCAN=m
@@ -631,52 +779,10 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SPI_ATTRS is not set
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
# CONFIG_SCSI_SRP_ATTRS is not set
-CONFIG_SCSI_LOWLEVEL=y
-# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
-# CONFIG_BE2ISCSI is not set
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_HPSA is not set
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_3W_SAS is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_AIC94XX is not set
-# CONFIG_SCSI_MVSAS is not set
-# CONFIG_SCSI_ARCMSR is not set
-# CONFIG_MEGARAID_NEWGEN is not set
-# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_SAS is not set
-# CONFIG_SCSI_MPT2SAS is not set
-# CONFIG_SCSI_HPTIOP is not set
-# CONFIG_LIBFC is not set
-# CONFIG_LIBFCOE is not set
-# CONFIG_FCOE is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_STEX is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_QLA_FC is not set
-# CONFIG_SCSI_QLA_ISCSI is not set
-# CONFIG_SCSI_LPFC is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_PMCRAID is not set
-# CONFIG_SCSI_PM8001 is not set
-# CONFIG_SCSI_SRP is not set
-# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
@@ -719,6 +825,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -743,7 +850,17 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_WINBOND is not set
CONFIG_PATA_PLATFORM=y
# CONFIG_PATA_SCH is not set
-# CONFIG_MD is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+# CONFIG_DM_CRYPT is not set
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
# CONFIG_FUSION is not set
#
@@ -820,11 +937,7 @@ CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
-CONFIG_WLAN=y
-# CONFIG_ATMEL is not set
-# CONFIG_PRISM54 is not set
-# CONFIG_USB_ZD1201 is not set
-# CONFIG_HOSTAP is not set
+# CONFIG_WLAN is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -838,6 +951,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -923,7 +1037,7 @@ CONFIG_VT=y
CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
-# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
@@ -940,9 +1054,11 @@ CONFIG_DEVKMEM=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_SH_SCI_DMA=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -988,6 +1104,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -1001,15 +1118,9 @@ CONFIG_I2C_HELPER_AUTO=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -1045,7 +1156,8 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
# CONFIG_ALIM7101_WDT is not set
-# CONFIG_SH_WDT is not set
+CONFIG_SH_WDT=y
+# CONFIG_SH_WDT_MMAP is not set
#
# PCI-based Watchdog Cards
@@ -1068,6 +1180,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -1075,15 +1188,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1091,6 +1206,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -1121,6 +1237,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
# CONFIG_HID_A4TECH is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
@@ -1135,12 +1252,16 @@ CONFIG_USB_HID=y
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
# CONFIG_HID_PANTHERLORD is not set
# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1241,7 +1362,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1253,7 +1373,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
@@ -1291,6 +1410,7 @@ CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1370,43 +1490,102 @@ CONFIG_RTC_DRV_MAX6900=y
#
CONFIG_RTC_DRV_SH=y
# CONFIG_RTC_DRV_GENERIC is not set
-# CONFIG_DMADEVICES is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+
+#
+# DMA Devices
+#
+CONFIG_SH_DMAE=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
# CONFIG_AUXDISPLAY is not set
CONFIG_UIO=m
# CONFIG_UIO_CIF is not set
-# CONFIG_UIO_PDRV is not set
-# CONFIG_UIO_PDRV_GENIRQ is not set
-# CONFIG_UIO_SMX is not set
+CONFIG_UIO_PDRV=m
+CONFIG_UIO_PDRV_GENIRQ=m
# CONFIG_UIO_AEC is not set
# CONFIG_UIO_SERCOS3 is not set
-# CONFIG_UIO_PCI_GENERIC is not set
+CONFIG_UIO_PCI_GENERIC=m
+# CONFIG_UIO_NETX is not set
#
# TI VLYNQ
#
-# CONFIG_STAGING is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_ECHO is not set
+# CONFIG_POCH is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_TRANZPORT is not set
+
+#
+# Qualcomm MSM Camera And Video
+#
+
+#
+# Camera Sensor Selection
+#
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_IDE_PHISON is not set
+# CONFIG_VME_BUS is not set
+
+#
+# RAR Register Driver
+#
+# CONFIG_RAR_REGISTER is not set
+# CONFIG_IIO is not set
+CONFIG_RAMZSWAP=m
+CONFIG_RAMZSWAP_STATS=y
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_STRIP is not set
+# CONFIG_DT3155 is not set
+# CONFIG_CRYSTALHD is not set
#
# File systems
#
CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
-# CONFIG_EXT4_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
-# CONFIG_FS_POSIX_ACL is not set
-# CONFIG_XFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=y
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_BTRFS_FS is not set
+CONFIG_BTRFS_FS=y
+# CONFIG_BTRFS_FS_POSIX_ACL is not set
# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
@@ -1415,19 +1594,30 @@ CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=m
#
# Caches
#
-# CONFIG_FSCACHE is not set
+CONFIG_FSCACHE=m
+# CONFIG_FSCACHE_STATS is not set
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=m
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
#
# CD-ROM/DVD Filesystems
#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
#
# DOS/FAT/NT Filesystems
@@ -1448,7 +1638,7 @@ CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=y
CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
@@ -1457,30 +1647,60 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_SQUASHFS is not set
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=m
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+CONFIG_LOGFS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
-# CONFIG_ROMFS_FS is not set
+CONFIG_ROMFS_FS=m
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
-# CONFIG_NFS_V4 is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
CONFIG_ROOT_NFS=y
# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1493,7 +1713,7 @@ CONFIG_SUNRPC=y
CONFIG_MSDOS_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
# CONFIG_NLS_CODEPAGE_850 is not set
@@ -1508,16 +1728,16 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_865 is not set
# CONFIG_NLS_CODEPAGE_866 is not set
# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=m
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -1527,10 +1747,10 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_ISO8859_9 is not set
# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
+CONFIG_NLS_ISO8859_15=m
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=m
# CONFIG_DLM is not set
#
@@ -1538,7 +1758,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_PRINTK_TIME=y
-CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
@@ -1547,7 +1767,7 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SHIRQ=y
+# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
@@ -1555,8 +1775,8 @@ CONFIG_DETECT_HUNG_TASK=y
# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
-# CONFIG_TIMER_STATS is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y
@@ -1573,24 +1793,28 @@ CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
-CONFIG_DEBUG_VM=y
+# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
-# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_RCU_CPU_STALL_DETECTOR=y
+# CONFIG_RCU_CPU_STALL_VERBOSE is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_LATENCYTOP=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FTRACE_NMI_ENTER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1598,16 +1822,19 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
+CONFIG_FTRACE_NMI_ENTER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
-# CONFIG_FUNCTION_TRACER is not set
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_FUNCTION_GRAPH_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
CONFIG_BRANCH_PROFILE_NONE=y
@@ -1619,9 +1846,13 @@ CONFIG_KSYM_TRACER=y
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_DYNAMIC_FTRACE=y
+# CONFIG_FUNCTION_PROFILER is not set
+CONFIG_FTRACE_MCOUNT_RECORD=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
# CONFIG_RING_BUFFER_BENCHMARK is not set
# CONFIG_DYNAMIC_DEBUG is not set
-# CONFIG_DMA_API_DEBUG is not set
+CONFIG_DMA_API_DEBUG=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -1632,6 +1863,7 @@ CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DUMP_CODE=y
CONFIG_DWARF_UNWINDER=y
# CONFIG_SH_NO_BSS_INIT is not set
+CONFIG_MCOUNT=y
#
# Security options
@@ -1649,10 +1881,21 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_MANAGER is not set
-# CONFIG_CRYPTO_MANAGER2 is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
# CONFIG_CRYPTO_TEST is not set
@@ -1667,7 +1910,7 @@ CONFIG_CRYPTO=y
#
# Block modes
#
-# CONFIG_CRYPTO_CBC is not set
+CONFIG_CRYPTO_CBC=y
# CONFIG_CRYPTO_CTR is not set
# CONFIG_CRYPTO_CTS is not set
# CONFIG_CRYPTO_ECB is not set
@@ -1685,10 +1928,10 @@ CONFIG_CRYPTO=y
#
# Digest
#
-# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_CRC32C=y
# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
-# CONFIG_CRYPTO_MD5 is not set
+CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
# CONFIG_CRYPTO_RMD128 is not set
# CONFIG_CRYPTO_RMD160 is not set
@@ -1710,7 +1953,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_DES is not set
+CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
# CONFIG_CRYPTO_SALSA20 is not set
@@ -1722,9 +1965,9 @@ CONFIG_CRYPTO=y
#
# Compression
#
-# CONFIG_CRYPTO_DEFLATE is not set
+CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_ZLIB is not set
-# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_LZO=m
#
# Random Number Generation
@@ -1732,6 +1975,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
CONFIG_BINARY_PRINTF=y
#
@@ -1740,12 +1984,22 @@ CONFIG_BINARY_PRINTF=y
CONFIG_BITREVERSE=y
CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC16 is not set
+CONFIG_CRC16=y
# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
-# CONFIG_LIBCRC32C is not set
+CONFIG_LIBCRC32C=y
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DECOMPRESS_LZO=y
+CONFIG_BTREE=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 43e6780a89d1..910eaec934c9 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:30:00 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:34:06 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
@@ -72,7 +75,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_TREE_RCU_TRACE=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
CONFIG_CGROUPS=y
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_NS=y
@@ -82,6 +84,7 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
+# CONFIG_CGROUP_SCHED is not set
CONFIG_MM_OWNER=y
# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
@@ -96,6 +99,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -123,7 +127,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -133,7 +136,6 @@ CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLOB=y
# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -143,6 +145,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -162,7 +165,6 @@ CONFIG_BLOCK=y
CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
-# CONFIG_BLK_CGROUP is not set
#
# IO Schedulers
@@ -261,6 +263,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -276,7 +279,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
#
@@ -361,7 +364,6 @@ CONFIG_HZ=1000
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -400,7 +402,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -463,7 +464,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -607,6 +607,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -688,6 +689,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=4
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -799,7 +801,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -853,6 +854,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -881,6 +883,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -919,6 +922,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -929,7 +933,6 @@ CONFIG_DEBUG_PREEMPT=y
# CONFIG_LOCK_STAT is not set
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -947,11 +950,11 @@ CONFIG_FRAME_POINTER=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -959,10 +962,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -975,6 +975,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1094,7 +1095,8 @@ CONFIG_CRYPTO_LZO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig
index ec494e32fa2e..586cb1e02be0 100644
--- a/arch/sh/configs/se7343_defconfig
+++ b/arch/sh/configs/se7343_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:30:41 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:37:31 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -70,11 +73,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -107,7 +105,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -124,6 +122,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -245,6 +244,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -260,7 +260,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -335,7 +335,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -370,6 +370,7 @@ CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -377,7 +378,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -569,6 +569,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -635,6 +636,7 @@ CONFIG_USB_USBNET=y
CONFIG_USB_NET_CDCETHER=y
# CONFIG_USB_NET_CDC_EEM is not set
CONFIG_USB_NET_DM9601=y
+# CONFIG_USB_NET_SMSC75XX is not set
# CONFIG_USB_NET_SMSC95XX is not set
# CONFIG_USB_NET_GL620A is not set
# CONFIG_USB_NET_NET1080 is not set
@@ -644,6 +646,7 @@ CONFIG_USB_NET_DM9601=y
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -713,6 +716,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=4
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -739,6 +743,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -752,15 +757,9 @@ CONFIG_I2C_SH_MOBILE=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -783,6 +782,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -790,12 +790,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -878,6 +879,7 @@ CONFIG_SND_DRIVERS=y
CONFIG_SND_SUPERH=y
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
# CONFIG_SND_SOC is not set
# CONFIG_SOUND_PRIME is not set
@@ -895,6 +897,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -910,14 +913,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -997,7 +1005,6 @@ CONFIG_USB_ISP116X_HCD=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1009,7 +1016,6 @@ CONFIG_USB_ISP116X_HCD=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1027,8 +1033,6 @@ CONFIG_RTC_LIB=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
# CONFIG_UIO_PDRV_GENIRQ is not set
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -1115,6 +1119,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1141,6 +1146,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1324,6 +1330,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig
index ee87e2b2168f..acb3e02a7123 100644
--- a/arch/sh/configs/se7619_defconfig
+++ b/arch/sh/configs/se7619_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:34:15 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:41:13 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -30,6 +30,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -44,9 +45,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -62,7 +65,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
@@ -91,7 +93,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_COMPAT_BRK=y
@@ -107,6 +109,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -217,6 +220,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -232,7 +236,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
#
@@ -301,7 +305,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -453,6 +456,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -513,6 +517,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -612,7 +617,6 @@ CONFIG_RTC_LIB=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -664,6 +668,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -729,6 +734,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig
index 03f4219f2086..084fec9e9050 100644
--- a/arch/sh/configs/se7705_defconfig
+++ b/arch/sh/configs/se7705_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:34:37 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:42:43 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -30,6 +30,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
@@ -67,7 +70,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
@@ -76,6 +78,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -101,7 +104,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -117,6 +120,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -234,6 +238,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -249,7 +254,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -327,7 +332,6 @@ CONFIG_HZ=250
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -362,7 +366,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -562,6 +565,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -679,6 +683,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -798,7 +803,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -863,6 +867,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -885,6 +890,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1026,6 +1032,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index cfa58199a368..4487a230466f 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:44:56 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:05:15 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -12,8 +12,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,6 +29,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -43,9 +44,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -68,7 +71,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -102,8 +104,9 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
@@ -119,6 +122,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -236,6 +240,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -251,7 +256,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -329,7 +334,6 @@ CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -366,7 +370,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -614,6 +617,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -754,6 +758,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -834,11 +839,11 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
# CONFIG_LEDS_TRIGGER_TIMER is not set
# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
@@ -940,6 +945,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -962,6 +968,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -997,6 +1004,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1042,6 +1050,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1167,6 +1176,7 @@ CONFIG_CRYPTO_DEFLATE=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index 201283c829a1..7c06b5c0b49b 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:46:58 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:06:03 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -12,8 +12,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -30,6 +30,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -44,9 +45,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -69,11 +72,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -107,8 +105,9 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
@@ -124,6 +123,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -241,6 +241,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -256,7 +257,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -334,7 +335,6 @@ CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -371,7 +371,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -620,6 +619,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -687,6 +687,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -764,6 +765,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -829,6 +831,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -844,14 +847,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -946,7 +954,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -958,7 +965,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -973,11 +979,11 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+CONFIG_LEDS_TRIGGERS=y
#
# LED Triggers
#
-CONFIG_LEDS_TRIGGERS=y
# CONFIG_LEDS_TRIGGER_TIMER is not set
# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
@@ -1082,6 +1088,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1163,6 +1170,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1208,6 +1216,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1333,6 +1342,7 @@ CONFIG_CRYPTO_DEFLATE=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index 4a4efd261d03..42782e7ff8ba 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 13:49:15 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:07:07 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,9 +49,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,7 +76,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -84,6 +86,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -122,13 +125,13 @@ CONFIG_PROFILING=y
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -245,7 +248,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_29BIT=y
-# CONFIG_X2TLB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=1
@@ -256,6 +259,8 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -279,7 +284,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -356,7 +361,7 @@ CONFIG_KEXEC=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -391,6 +396,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -398,7 +404,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -519,6 +524,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -676,6 +682,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -898,6 +905,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -912,6 +920,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -944,6 +953,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1059,6 +1069,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index ab371afe3595..ebb4ac4a7d8f 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 14:36:56 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 16:55:41 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -47,9 +48,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -71,11 +74,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -107,7 +105,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -116,13 +114,13 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -237,8 +235,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -246,6 +244,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -261,7 +261,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -340,7 +340,7 @@ CONFIG_SECCOMP=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -376,6 +376,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -383,7 +384,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -559,6 +559,7 @@ CONFIG_MTD_NAND_IDS=y
# CONFIG_MTD_NAND_NANDSIM is not set
# CONFIG_MTD_NAND_PLATFORM is not set
# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_NAND_SH_FLCTL is not set
# CONFIG_MTD_ONENAND is not set
#
@@ -600,6 +601,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
@@ -619,6 +621,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -726,6 +729,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -805,6 +809,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -833,6 +838,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -846,15 +852,9 @@ CONFIG_I2C_SH_MOBILE=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
CONFIG_SPI_MASTER=y
@@ -885,13 +885,16 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -923,22 +926,25 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -1001,7 +1007,9 @@ CONFIG_USB_GSPCA=m
# CONFIG_USB_M5602 is not set
# CONFIG_USB_STV06XX is not set
# CONFIG_USB_GL860 is not set
+# CONFIG_USB_GSPCA_BENQ is not set
# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_CPIA1 is not set
# CONFIG_USB_GSPCA_ETOMS is not set
# CONFIG_USB_GSPCA_FINEPIX is not set
# CONFIG_USB_GSPCA_JEILINJ is not set
@@ -1009,9 +1017,11 @@ CONFIG_USB_GSPCA=m
# CONFIG_USB_GSPCA_MR97310A is not set
# CONFIG_USB_GSPCA_OV519 is not set
# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_OV534_9 is not set
# CONFIG_USB_GSPCA_PAC207 is not set
# CONFIG_USB_GSPCA_PAC7302 is not set
# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SN9C2028 is not set
# CONFIG_USB_GSPCA_SN9C20X is not set
# CONFIG_USB_GSPCA_SONIXB is not set
# CONFIG_USB_GSPCA_SONIXJ is not set
@@ -1033,6 +1043,7 @@ CONFIG_USB_GSPCA=m
# CONFIG_VIDEO_PVRUSB2 is not set
# CONFIG_VIDEO_HDPVR is not set
# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_VIDEO_TLG2300 is not set
# CONFIG_VIDEO_CX231XX is not set
# CONFIG_VIDEO_USBVISION is not set
# CONFIG_USB_ET61X251 is not set
@@ -1147,6 +1158,7 @@ CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_SUPERH is not set
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
CONFIG_SND_SOC=m
@@ -1155,6 +1167,7 @@ CONFIG_SND_SOC=m
#
CONFIG_SND_SOC_SH4_FSI=m
CONFIG_SND_FSI_AK4642=y
+# CONFIG_SND_FSI_DA7210 is not set
CONFIG_SND_SOC_I2C_AND_SPI=m
CONFIG_SND_SOC_ALL_CODECS=m
CONFIG_SND_SOC_WM_HUBS=m
@@ -1167,6 +1180,7 @@ CONFIG_SND_SOC_AK4535=m
CONFIG_SND_SOC_AK4642=m
CONFIG_SND_SOC_AK4671=m
CONFIG_SND_SOC_CS4270=m
+CONFIG_SND_SOC_DA7210=m
CONFIG_SND_SOC_L3=m
CONFIG_SND_SOC_PCM3008=m
CONFIG_SND_SOC_SPDIF=m
@@ -1189,17 +1203,21 @@ CONFIG_SND_SOC_WM8753=m
CONFIG_SND_SOC_WM8776=m
CONFIG_SND_SOC_WM8900=m
CONFIG_SND_SOC_WM8903=m
+CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8940=m
+CONFIG_SND_SOC_WM8955=m
CONFIG_SND_SOC_WM8960=m
CONFIG_SND_SOC_WM8961=m
CONFIG_SND_SOC_WM8971=m
CONFIG_SND_SOC_WM8974=m
+CONFIG_SND_SOC_WM8978=m
CONFIG_SND_SOC_WM8988=m
CONFIG_SND_SOC_WM8990=m
CONFIG_SND_SOC_WM8993=m
CONFIG_SND_SOC_WM9081=m
CONFIG_SND_SOC_MAX9877=m
CONFIG_SND_SOC_TPA6130A2=m
+CONFIG_SND_SOC_WM2000=m
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
@@ -1215,6 +1233,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
# CONFIG_HID_A4TECH is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
@@ -1229,12 +1248,16 @@ CONFIG_USB_HID=y
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
# CONFIG_HID_PANTHERLORD is not set
# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_QUANTA is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1327,7 +1350,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1339,7 +1361,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG_FILES is not set
CONFIG_USB_GADGET_VBUS_DRAW=2
@@ -1378,6 +1399,7 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1401,10 +1423,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=y
-# CONFIG_MMC_TMIO is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -1480,8 +1499,6 @@ CONFIG_RTC_DRV_PCF8563=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -1566,6 +1583,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
# CONFIG_UBIFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1594,6 +1612,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1786,6 +1805,7 @@ CONFIG_CRYPTO_CBC=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig
index b15a44e2ec43..e096c3e7e18c 100644
--- a/arch/sh/configs/se7750_defconfig
+++ b/arch/sh/configs/se7750_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 14:39:10 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:00:16 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -70,7 +73,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -103,7 +105,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -120,6 +122,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -237,6 +240,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -252,7 +256,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -329,7 +333,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -364,7 +367,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -580,6 +582,7 @@ CONFIG_IDE_PROC_FS=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -698,6 +701,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -816,7 +820,6 @@ CONFIG_RTC_LIB=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -885,6 +888,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -907,6 +911,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1063,6 +1068,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig
index d1effdeaa416..84d3facf6a30 100644
--- a/arch/sh/configs/se7751_defconfig
+++ b/arch/sh/configs/se7751_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 14:39:56 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:03:26 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -69,7 +72,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -80,6 +82,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -106,7 +109,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -123,6 +126,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -240,6 +244,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -255,7 +260,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -333,7 +338,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -369,7 +373,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -589,6 +592,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -666,6 +670,7 @@ CONFIG_DEVKMEM=y
# Non-8250 serial port support
#
# CONFIG_SERIAL_SH_SCI is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -785,7 +790,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -854,6 +858,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -868,6 +873,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1012,6 +1018,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig
index 58533d50f06e..f0d7e679e75f 100644
--- a/arch/sh/configs/se7780_defconfig
+++ b/arch/sh/configs/se7780_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 14:40:32 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:06:12 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -100,7 +103,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
@@ -117,6 +120,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -238,6 +242,7 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -253,7 +258,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -328,6 +333,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -343,9 +349,9 @@ CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -362,7 +368,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -571,6 +576,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -714,7 +720,7 @@ CONFIG_SATA_SIL=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -772,6 +778,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -807,6 +814,7 @@ CONFIG_WLAN=y
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_PPP is not set
@@ -878,6 +886,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -928,6 +937,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -935,6 +945,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1032,6 +1043,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -1047,14 +1059,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1148,7 +1165,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1161,7 +1177,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1192,7 +1207,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -1211,6 +1225,7 @@ CONFIG_INOTIFY_USER=y
#
# Caches
#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1332,6 +1347,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1442,6 +1458,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 666fde110b27..562147d333e3 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 14:41:25 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:10:00 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,9 +49,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,7 +76,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -84,6 +86,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -111,7 +114,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
@@ -120,7 +122,6 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -131,6 +132,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -250,6 +252,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -265,7 +268,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -342,7 +345,6 @@ CONFIG_HZ=250
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -358,9 +360,9 @@ CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -388,7 +390,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -453,7 +454,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -574,6 +574,7 @@ CONFIG_IDE_PROC_FS=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=m
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=m
CONFIG_SCSI_DMA=y
@@ -664,7 +665,7 @@ CONFIG_SCSI_LOWLEVEL=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -704,6 +705,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -751,6 +753,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -763,6 +767,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -853,6 +858,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -927,6 +933,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -934,6 +941,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1075,6 +1083,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1105,6 +1114,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1187,13 +1197,12 @@ CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1201,10 +1210,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1323,7 +1329,8 @@ CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index 35a3beeba182..98485ca46390 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 14:43:04 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:14:59 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -30,6 +30,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -44,9 +45,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -68,11 +71,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -105,7 +103,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -122,6 +120,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -240,6 +239,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -255,7 +255,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -331,7 +331,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -367,7 +366,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -620,6 +618,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -723,6 +722,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -824,7 +824,6 @@ CONFIG_RTC_LIB=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -890,6 +889,7 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -904,6 +904,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -934,6 +935,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1049,6 +1051,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index a3056b69d2ba..a6a9e6887ef8 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:03:45 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:17:36 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -65,11 +68,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
# CONFIG_CGROUP_NS is not set
@@ -78,6 +76,7 @@ CONFIG_CGROUPS=y
# CONFIG_CPUSETS is not set
# CONFIG_CGROUP_CPUACCT is not set
# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_CGROUP_SCHED is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
@@ -106,7 +105,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_COMPAT_BRK is not set
@@ -114,21 +112,19 @@ CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
#
-# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
@@ -138,7 +134,6 @@ CONFIG_BLOCK=y
CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
-# CONFIG_BLK_CGROUP is not set
#
# IO Schedulers
@@ -235,8 +230,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -246,6 +241,8 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -262,7 +259,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -353,7 +350,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -390,6 +387,7 @@ CONFIG_HIBERNATION_NVS=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -428,6 +426,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -467,6 +466,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -491,6 +491,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -502,15 +503,9 @@ CONFIG_I2C_SH_MOBILE=y
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -533,6 +528,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -540,12 +536,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -629,8 +626,6 @@ CONFIG_RTC_DRV_SH=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -643,7 +638,6 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -707,15 +701,12 @@ CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -723,13 +714,9 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
-# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_DMA_API_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
@@ -747,7 +734,8 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig
index 04b841b29427..6f308b71f81a 100644
--- a/arch/sh/configs/sh7763rdp_defconfig
+++ b/arch/sh/configs/sh7763rdp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:05:29 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:19:21 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -69,11 +72,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -112,7 +110,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -120,7 +117,6 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
@@ -131,6 +127,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -243,7 +240,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
+# CONFIG_PMB is not set
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -253,6 +250,7 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -269,7 +267,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -344,7 +342,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -381,7 +379,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -444,7 +441,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -585,6 +581,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -689,6 +686,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -754,6 +752,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -945,7 +944,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -957,7 +955,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -980,9 +977,6 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
-# CONFIG_MMC_TMIO is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -1074,6 +1068,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1096,6 +1091,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1162,13 +1158,12 @@ CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1176,10 +1171,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
@@ -1287,7 +1279,8 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index 7b247053ece6..4327f898baa8 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:06:28 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:20:25 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -45,9 +46,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -64,11 +67,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
# CONFIG_CGROUP_NS is not set
@@ -77,6 +75,7 @@ CONFIG_CGROUPS=y
# CONFIG_CPUSETS is not set
# CONFIG_CGROUP_CPUACCT is not set
# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_CGROUP_SCHED is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
@@ -105,7 +104,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_COMPAT_BRK is not set
@@ -113,7 +111,6 @@ CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
@@ -123,11 +120,11 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
#
-# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_RT_MUTEXES=y
@@ -137,7 +134,6 @@ CONFIG_BLOCK=y
CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
-# CONFIG_BLK_CGROUP is not set
#
# IO Schedulers
@@ -232,7 +228,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
+# CONFIG_PMB is not set
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -242,6 +238,7 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -258,7 +255,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -347,7 +344,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -382,6 +379,7 @@ CONFIG_HIBERNATION_NVS=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
# CONFIG_PM_RUNTIME is not set
+CONFIG_PM_OPS=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_LADDER=y
CONFIG_CPU_IDLE_GOV_MENU=y
@@ -420,6 +418,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -459,6 +458,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -483,6 +483,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -494,15 +495,9 @@ CONFIG_I2C_SH_MOBILE=y
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -525,6 +520,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -532,12 +528,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -621,8 +618,6 @@ CONFIG_RTC_DRV_SH=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -635,7 +630,6 @@ CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -699,15 +693,12 @@ CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_STACKTRACE=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -715,13 +706,9 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
-# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_DMA_API_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
@@ -739,7 +726,8 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index 8330813b0c1d..71f39c71b04b 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:07:40 2010
+# Linux kernel version: 2.6.34
+# Mon May 24 08:33:02 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,9 +49,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,12 +76,7 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
+CONFIG_LOG_BUF_SHIFT=16
# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -112,27 +110,26 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
CONFIG_PERF_COUNTERS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-CONFIG_COMPAT_BRK=y
+# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
-CONFIG_OPROFILE=y
+# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
-CONFIG_KPROBES=y
-CONFIG_KRETPROBES=y
-CONFIG_HAVE_IOREMAP_PROT=y
+# CONFIG_KPROBES is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
#
# GCOV-based kernel profiling
@@ -248,10 +245,8 @@ CONFIG_MEMORY_START=0x40000000
CONFIG_MEMORY_SIZE=0x20000000
# CONFIG_29BIT is not set
CONFIG_32BIT=y
-CONFIG_PMB_ENABLE=y
-# CONFIG_PMB is not set
-CONFIG_PMB_FIXED=y
-# CONFIG_X2TLB is not set
+CONFIG_PMB=y
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
# CONFIG_NUMA is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -262,13 +257,15 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
-CONFIG_HUGETLB_PAGE_SIZE_64K=y
+# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
-# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+CONFIG_HUGETLB_PAGE_SIZE_1MB=y
# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set
@@ -280,11 +277,11 @@ CONFIG_SPARSEMEM=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
# CONFIG_MEMORY_HOTPLUG is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_SPLIT_PTLOCK_CPUS=999999
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -302,6 +299,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_SH_FPU=y
CONFIG_SH_STORE_QUEUES=y
+CONFIG_SPECULATIVE_EXECUTION=y
CONFIG_CPU_HAS_INTEVT=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_FPU=y
@@ -374,7 +372,7 @@ CONFIG_SECCOMP=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_INTC_USERIMASK=y
#
# Boot options
@@ -389,9 +387,10 @@ CONFIG_ENTRY_OFFSET=0x00001000
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCI_LEGACY is not set
+CONFIG_PCI_DEBUG=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
# CONFIG_PCCARD is not set
@@ -421,7 +420,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -469,6 +467,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
# CONFIG_BRIDGE is not set
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
@@ -489,8 +488,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_TCPPROBE is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@@ -503,11 +500,20 @@ CONFIG_WIRELESS=y
#
# CFG80211 needs to be enabled for MAC80211
#
+
+#
+# Some wireless drivers require a rate control algorithm
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
#
+# CAIF Support
+#
+# CONFIG_CAIF is not set
+
+#
# Device Drivers
#
@@ -518,7 +524,11 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
@@ -541,6 +551,7 @@ CONFIG_MTD_BLOCK=y
# CONFIG_INFTL is not set
# CONFIG_RFD_FTL is not set
# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
# CONFIG_MTD_OOPS is not set
#
@@ -633,6 +644,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -671,7 +683,9 @@ CONFIG_ATA=y
CONFIG_ATA_VERBOSE_ERROR=y
CONFIG_SATA_PMP=y
# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_AHCI_PLATFORM is not set
# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_INIC162X is not set
CONFIG_ATA_SFF=y
# CONFIG_SATA_SVW is not set
# CONFIG_ATA_PIIX is not set
@@ -686,7 +700,6 @@ CONFIG_SATA_SIL=y
# CONFIG_SATA_ULI is not set
# CONFIG_SATA_VIA is not set
# CONFIG_SATA_VITESSE is not set
-# CONFIG_SATA_INIC162X is not set
# CONFIG_PATA_ALI is not set
# CONFIG_PATA_AMD is not set
# CONFIG_PATA_ARTOP is not set
@@ -706,6 +719,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -742,7 +756,7 @@ CONFIG_SATA_SIL=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -755,8 +769,36 @@ CONFIG_NETDEVICES=y
# CONFIG_TUN is not set
# CONFIG_VETH is not set
# CONFIG_ARCNET is not set
-# CONFIG_NET_ETHERNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_STNIC is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+CONFIG_NET_VENDOR_3COM=y
+CONFIG_VORTEX=y
+# CONFIG_TYPHOON is not set
+# CONFIG_SMC91X is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
@@ -797,6 +839,7 @@ CONFIG_R8169=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -837,6 +880,7 @@ CONFIG_INPUT_KEYBOARD=y
CONFIG_KEYBOARD_ATKBD=y
# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
# CONFIG_KEYBOARD_OPENCORES is not set
@@ -885,6 +929,7 @@ CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_DEVKMEM is not set
# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
# CONFIG_NOZOMI is not set
#
@@ -901,6 +946,9 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
CONFIG_UNIX98_PTYS=y
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_LEGACY_PTYS is not set
@@ -947,6 +995,7 @@ CONFIG_I2C_ALGOPCA=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -960,15 +1009,9 @@ CONFIG_I2C_ALGOPCA=y
#
CONFIG_I2C_PCA_PLATFORM=y
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -1011,6 +1054,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
CONFIG_MFD_SM501=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -1018,12 +1062,14 @@ CONFIG_MFD_SM501=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1073,7 +1119,7 @@ CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_SIS is not set
# CONFIG_FB_VIA is not set
# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
+CONFIG_FB_KYRO=y
# CONFIG_FB_3DFX is not set
# CONFIG_FB_VOODOO1 is not set
# CONFIG_FB_VT8623 is not set
@@ -1099,15 +1145,15 @@ CONFIG_FB_SM501=y
#
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
CONFIG_LOGO_SUPERH_MONO=y
CONFIG_LOGO_SUPERH_VGA16=y
CONFIG_LOGO_SUPERH_CLUT224=y
@@ -1131,15 +1177,18 @@ CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
CONFIG_SND_DYNAMIC_MINORS=y
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+CONFIG_SND_DEBUG_VERBOSE=y
+CONFIG_SND_VMASTER=y
CONFIG_SND_RAWMIDI_SEQ=y
CONFIG_SND_OPL3_LIB_SEQ=y
# CONFIG_SND_OPL4_LIB_SEQ is not set
# CONFIG_SND_SBAWE_SEQ is not set
-# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_EMU10K1_SEQ=y
CONFIG_SND_MPU401_UART=y
CONFIG_SND_OPL3_LIB=y
+CONFIG_SND_AC97_CODEC=y
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_PCI=y
# CONFIG_SND_AD1889 is not set
@@ -1174,7 +1223,7 @@ CONFIG_SND_CMIPCI=y
# CONFIG_SND_INDIGODJ is not set
# CONFIG_SND_INDIGOIOX is not set
# CONFIG_SND_INDIGODJX is not set
-# CONFIG_SND_EMU10K1 is not set
+CONFIG_SND_EMU10K1=y
# CONFIG_SND_EMU10K1X is not set
# CONFIG_SND_ENS1370 is not set
# CONFIG_SND_ENS1371 is not set
@@ -1209,9 +1258,11 @@ CONFIG_SND_CMIPCI=y
# CONFIG_SND_SUPERH is not set
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
# CONFIG_SND_USB_CAIAQ is not set
# CONFIG_SND_SOC is not set
# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=y
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HIDRAW is not set
@@ -1226,39 +1277,43 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
-CONFIG_HID_A4TECH=m
-CONFIG_HID_APPLE=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_DRAGONRISE=m
-# CONFIG_DRAGONRISE_FF is not set
-CONFIG_HID_EZKEY=m
-CONFIG_HID_KYE=m
-CONFIG_HID_GYRATION=m
-CONFIG_HID_TWINHAN=m
-CONFIG_HID_KENSINGTON=m
-CONFIG_HID_LOGITECH=m
-# CONFIG_LOGITECH_FF is not set
-# CONFIG_LOGIRUMBLEPAD2_FF is not set
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_NTRIG=m
-CONFIG_HID_PANTHERLORD=m
-# CONFIG_PANTHERLORD_FF is not set
-CONFIG_HID_PETALYNX=m
-CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
-CONFIG_HID_SUNPLUS=m
-CONFIG_HID_GREENASIA=m
-# CONFIG_GREENASIA_FF is not set
-CONFIG_HID_SMARTJOYPLUS=m
-# CONFIG_SMARTJOYPLUS_FF is not set
-CONFIG_HID_TOPSEED=m
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CANDO is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EGALAX is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_ROCCAT_KONE is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
# CONFIG_HID_THRUSTMASTER is not set
-CONFIG_HID_ZEROPLUS=m
-# CONFIG_ZEROPLUS_FF is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1271,10 +1326,8 @@ CONFIG_USB=y
# Miscellaneous USB options
#
# CONFIG_USB_DEVICEFS is not set
-CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_SUSPEND is not set
-# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
# CONFIG_USB_MON is not set
@@ -1286,9 +1339,7 @@ CONFIG_USB_DEVICE_CLASS=y
#
# CONFIG_USB_C67X00_HCD is not set
# CONFIG_USB_XHCI_HCD is not set
-CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_EHCI_HCD is not set
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
@@ -1351,20 +1402,17 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
-# CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1390,10 +1438,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
CONFIG_MMC_SDHCI=m
# CONFIG_MMC_SDHCI_PCI is not set
CONFIG_MMC_SDHCI_PLTFM=m
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
# CONFIG_MMC_TIFM_SD is not set
-# CONFIG_MMC_TMIO is not set
# CONFIG_MMC_CB710 is not set
# CONFIG_MMC_VIA_SDMMC is not set
# CONFIG_MEMSTICK is not set
@@ -1460,23 +1505,21 @@ CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_SH is not set
# CONFIG_RTC_DRV_GENERIC is not set
CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
#
# DMA Devices
#
+# CONFIG_TIMB_DMA is not set
# CONFIG_AUXDISPLAY is not set
CONFIG_UIO=m
# CONFIG_UIO_CIF is not set
# CONFIG_UIO_PDRV is not set
# CONFIG_UIO_PDRV_GENIRQ is not set
-# CONFIG_UIO_SMX is not set
# CONFIG_UIO_AEC is not set
# CONFIG_UIO_SERCOS3 is not set
# CONFIG_UIO_PCI_GENERIC is not set
-
-#
-# TI VLYNQ
-#
+# CONFIG_UIO_NETX is not set
# CONFIG_STAGING is not set
#
@@ -1556,6 +1599,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1586,6 +1630,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1651,61 +1696,75 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
CONFIG_SCHEDSTATS=y
-CONFIG_TRACE_IRQFLAGS=y
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
+# CONFIG_DEBUG_KMEMLEAK_TEST is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_NOP_TRACER=y
-CONFIG_HAVE_FTRACE_NMI_ENTER=y
+# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
-CONFIG_TRACER_MAX_TRACE=y
-CONFIG_RING_BUFFER=y
-CONFIG_FTRACE_NMI_ENTER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
-CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
-CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
-CONFIG_FTRACE=y
-CONFIG_FUNCTION_TRACER=y
-CONFIG_FUNCTION_GRAPH_TRACER=y
-CONFIG_IRQSOFF_TRACER=y
-# CONFIG_PREEMPT_TRACER is not set
-CONFIG_SCHED_TRACER=y
-# CONFIG_FTRACE_SYSCALLS is not set
-# CONFIG_BOOT_TRACER is not set
-CONFIG_BRANCH_PROFILE_NONE=y
-# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
-# CONFIG_PROFILE_ALL_BRANCHES is not set
-CONFIG_STACK_TRACER=y
-CONFIG_KMEMTRACE=y
-CONFIG_WORKQUEUE_TRACER=y
-# CONFIG_BLK_DEV_IO_TRACE is not set
-CONFIG_DYNAMIC_FTRACE=y
-# CONFIG_FUNCTION_PROFILER is not set
-CONFIG_FTRACE_MCOUNT_RECORD=y
-# CONFIG_FTRACE_STARTUP_TEST is not set
-# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
# CONFIG_SH_STANDARD_BIOS is not set
-CONFIG_DWARF_UNWINDER=y
-CONFIG_MCOUNT=y
+# CONFIG_STACK_DEBUG is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_DUMP_CODE=y
+# CONFIG_DWARF_UNWINDER is not set
+# CONFIG_SH_NO_BSS_INIT is not set
#
# Security options
@@ -1815,7 +1874,8 @@ CONFIG_CRYPTO_DES=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig
index f196e87c7665..30f38c2767bf 100644
--- a/arch/sh/configs/sh7785lcr_defconfig
+++ b/arch/sh/configs/sh7785lcr_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:09:09 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:27:53 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,9 +49,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,11 +76,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -124,13 +122,13 @@ CONFIG_PROFILING=y
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -244,8 +242,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
# CONFIG_NUMA is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -254,6 +252,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -269,7 +269,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -287,6 +287,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_SH_FPU=y
CONFIG_SH_STORE_QUEUES=y
+# CONFIG_SPECULATIVE_EXECUTION is not set
CONFIG_CPU_HAS_INTEVT=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_FPU=y
@@ -344,7 +345,7 @@ CONFIG_KEXEC=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -359,9 +360,9 @@ CONFIG_ENTRY_OFFSET=0x00001000
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -387,7 +388,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -598,6 +598,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -671,6 +672,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -707,7 +709,7 @@ CONFIG_SATA_SIL=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -766,6 +768,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -851,6 +854,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -899,6 +903,7 @@ CONFIG_I2C_ALGOPCA=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -912,15 +917,9 @@ CONFIG_I2C_ALGOPCA=y
#
CONFIG_I2C_PCA_PLATFORM=y
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -943,6 +942,7 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
CONFIG_MFD_SM501=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
@@ -950,12 +950,14 @@ CONFIG_MFD_SM501=y
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -963,6 +965,7 @@ CONFIG_MFD_SM501=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1059,6 +1062,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -1074,14 +1078,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1182,7 +1191,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1195,7 +1203,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
CONFIG_USB_TEST=m
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1353,6 +1360,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1383,6 +1391,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1461,6 +1470,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -1509,6 +1519,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1633,6 +1644,7 @@ CONFIG_CRYPTO_DES=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig
index 45441c0ab30c..4ba2705c7a4a 100644
--- a/arch/sh/configs/shmin_defconfig
+++ b/arch/sh/configs/shmin_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:10:09 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:32:23 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -12,8 +12,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -29,6 +29,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -43,9 +44,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
@@ -65,7 +68,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
@@ -95,7 +97,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -111,6 +113,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -222,6 +225,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -237,7 +241,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -312,7 +316,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -544,6 +547,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -625,6 +629,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -719,7 +724,6 @@ CONFIG_RTC_LIB=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -773,6 +777,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -797,6 +802,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -936,6 +942,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index ecf50cda4cbc..42f6bd34440d 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:10:45 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:08:45 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,6 +28,7 @@ CONFIG_SYS_SUPPORTS_NUMA=y
CONFIG_SYS_SUPPORTS_TMU=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -35,6 +36,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_COHERENT=y
# CONFIG_DMA_NONCOHERENT is not set
+# CONFIG_NEED_DMA_MAP_STATE is not set
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -49,9 +51,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,15 +77,11 @@ CONFIG_TREE_RCU=y
CONFIG_RCU_TRACE=y
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
CONFIG_TREE_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
CONFIG_CGROUP_NS=y
@@ -92,6 +92,7 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
# CONFIG_CGROUP_MEM_RES_CTLR_SWAP is not set
+# CONFIG_CGROUP_SCHED is not set
CONFIG_MM_OWNER=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -131,7 +132,6 @@ CONFIG_PERF_USE_VMALLOC=y
# Kernel Performance Events And Counters
#
CONFIG_PERF_EVENTS=y
-CONFIG_EVENT_PROFILE=y
# CONFIG_PERF_COUNTERS is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
@@ -140,12 +140,10 @@ CONFIG_COMPAT_BRK=y
# CONFIG_SLUB is not set
CONFIG_SLOB=y
CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=y
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_KRETPROBES=y
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
@@ -153,6 +151,7 @@ CONFIG_HAVE_DMA_ATTRS=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -173,7 +172,6 @@ CONFIG_BLOCK=y
CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
-# CONFIG_BLK_CGROUP is not set
#
# IO Schedulers
@@ -269,8 +267,8 @@ CONFIG_FORCE_MAX_ZONEORDER=7
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=3
@@ -282,6 +280,8 @@ CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
# CONFIG_PAGE_SIZE_4KB is not set
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -307,7 +307,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_SCHED_MC=y
@@ -392,14 +392,14 @@ CONFIG_HZ_250=y
CONFIG_HZ=250
CONFIG_SCHED_HRTICK=y
CONFIG_KEXEC=y
-# CONFIG_CRASH_DUMP is not set
CONFIG_SECCOMP=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
+# CONFIG_HOTPLUG_CPU is not set
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -518,7 +518,6 @@ CONFIG_IPV6_NDISC_NODETYPE=y
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
-# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
CONFIG_CAN=m
CONFIG_CAN_RAW=m
@@ -583,6 +582,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
@@ -601,6 +601,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -693,6 +694,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -734,6 +736,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=2
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -760,6 +763,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -773,15 +777,9 @@ CONFIG_I2C_HELPER_AUTO=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -838,10 +836,9 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_WM8400 is not set
-# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
-# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_AB4500_CORE is not set
# CONFIG_REGULATOR is not set
@@ -933,7 +930,6 @@ CONFIG_USB_R8A66597_HCD=m
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -945,7 +941,6 @@ CONFIG_USB_R8A66597_HCD=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
@@ -983,6 +978,7 @@ CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
@@ -1065,8 +1061,6 @@ CONFIG_RTC_DRV_SH=y
CONFIG_UIO=m
# CONFIG_UIO_PDRV is not set
# CONFIG_UIO_PDRV_GENIRQ is not set
-# CONFIG_UIO_SMX is not set
-# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@@ -1145,6 +1139,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1159,6 +1154,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1246,7 +1242,6 @@ CONFIG_DEBUG_PREEMPT=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1266,9 +1261,9 @@ CONFIG_FRAME_POINTER=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
-CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
@@ -1276,10 +1271,7 @@ CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_RING_BUFFER_ALLOW_SWAP=y
-CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
@@ -1292,6 +1284,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1329,6 +1322,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_MANAGER2 is not set
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_PCRYPT is not set
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
# CONFIG_CRYPTO_TEST is not set
@@ -1407,7 +1401,8 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
-CONFIG_BINARY_PRINTF=y
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
diff --git a/arch/sh/configs/snapgear_defconfig b/arch/sh/configs/snapgear_defconfig
index 98352d757851..513834d2b8ab 100644
--- a/arch/sh/configs/snapgear_defconfig
+++ b/arch/sh/configs/snapgear_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:14:18 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:36:53 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
# CONFIG_SWAP is not set
# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
@@ -68,7 +71,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -79,6 +81,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -105,7 +108,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
@@ -122,6 +125,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -235,6 +239,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -250,7 +255,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -335,7 +340,6 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -350,9 +354,9 @@ CONFIG_ENTRY_OFFSET=0x00001000
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -569,6 +573,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -586,7 +591,7 @@ CONFIG_HAVE_IDE=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -626,6 +631,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -726,6 +732,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -764,6 +771,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -771,6 +779,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -848,7 +857,6 @@ CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -907,6 +915,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -925,6 +934,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -983,6 +993,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
index 72982e360e3f..14e69a229c0a 100644
--- a/arch/sh/configs/systemh_defconfig
+++ b/arch/sh/configs/systemh_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:14:50 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:39:19 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -31,6 +31,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
# CONFIG_SYSVIPC is not set
# CONFIG_BSD_PROCESS_ACCT is not set
@@ -65,7 +68,6 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -76,6 +78,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -102,7 +105,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@@ -119,6 +122,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -237,6 +241,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -252,7 +257,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -332,7 +337,6 @@ CONFIG_HZ=250
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -402,6 +406,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
# CONFIG_SCSI_DMA is not set
@@ -442,6 +447,7 @@ CONFIG_DEVKMEM=y
# Non-8250 serial port support
#
# CONFIG_SERIAL_SH_SCI is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -552,7 +558,6 @@ CONFIG_RTC_LIB=y
# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
# CONFIG_EXT4_FS is not set
-CONFIG_EXT4_USE_FOR_EXT23=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
@@ -609,6 +614,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -676,6 +682,7 @@ CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 78c257053c79..79196b4dfdb1 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:17:20 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 17:41:12 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,9 +47,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -71,7 +74,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -82,6 +84,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -109,8 +112,9 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
-# CONFIG_PERF_EVENTS is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_COMPAT_BRK=y
@@ -127,6 +131,7 @@ CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -245,6 +250,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -260,7 +266,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -345,7 +351,6 @@ CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
# CONFIG_GUSA_RB is not set
-# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@@ -361,9 +366,9 @@ CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw"
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
# CONFIG_PCIEPORTBUS is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -392,7 +397,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -561,6 +565,7 @@ CONFIG_IP6_NF_RAW=m
# CONFIG_ATM is not set
CONFIG_STP=y
CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=y
# CONFIG_VLAN_8021Q_GVRP is not set
@@ -749,6 +754,7 @@ CONFIG_MTD_NAND_IDS=m
# CONFIG_MTD_NAND_CAFE is not set
# CONFIG_MTD_NAND_PLATFORM is not set
# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_NAND_SH_FLCTL is not set
# CONFIG_MTD_ONENAND is not set
#
@@ -768,10 +774,6 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
-
-#
-# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
-#
# CONFIG_BLK_DEV_DRBD is not set
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
@@ -802,6 +804,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -892,7 +895,7 @@ CONFIG_SCSI_LOWLEVEL=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -951,6 +954,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -1002,6 +1006,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+CONFIG_CHELSIO_T4_DEPENDS=y
+# CONFIG_CHELSIO_T4 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
@@ -1014,6 +1020,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
@@ -1040,6 +1047,7 @@ CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
# CONFIG_USB_NET_CDC_EEM is not set
# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
# CONFIG_USB_NET_SMSC95XX is not set
# CONFIG_USB_NET_GL620A is not set
CONFIG_USB_NET_NET1080=m
@@ -1049,6 +1057,7 @@ CONFIG_USB_NET_PLUSB=m
# CONFIG_USB_NET_CDC_SUBSET is not set
CONFIG_USB_NET_ZAURUS=m
# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -1136,6 +1145,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -1215,6 +1225,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1222,6 +1233,7 @@ CONFIG_SSB_POSSIBLE=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1372,6 +1384,7 @@ CONFIG_USB_SERIAL_ARK3116=m
# CONFIG_USB_SERIAL_NAVMAN is not set
CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QCAUX is not set
# CONFIG_USB_SERIAL_QUALCOMM is not set
# CONFIG_USB_SERIAL_SPCP8X5 is not set
# CONFIG_USB_SERIAL_HP4X is not set
@@ -1385,6 +1398,7 @@ CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_OPTION is not set
# CONFIG_USB_SERIAL_OMNINET is not set
# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
# CONFIG_USB_SERIAL_DEBUG is not set
#
@@ -1397,7 +1411,6 @@ CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1410,7 +1423,6 @@ CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1552,6 +1564,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1585,6 +1598,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CEPH_FS is not set
CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
CONFIG_CIFS_WEAK_PW_HASH=y
@@ -1680,6 +1694,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1726,6 +1741,7 @@ CONFIG_FTRACE=y
CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KSYM_TRACER is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -1853,6 +1869,7 @@ CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig
index 4fa03bf086dd..17b7258dcde5 100644
--- a/arch/sh/configs/ul2_defconfig
+++ b/arch/sh/configs/ul2_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:18:53 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:10:50 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
# CONFIG_GENERIC_GPIO is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -33,6 +33,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -48,9 +49,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -73,7 +76,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
@@ -84,6 +86,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -122,13 +125,13 @@ CONFIG_PROFILING=y
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -244,7 +247,7 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x01f00000
CONFIG_29BIT=y
-# CONFIG_X2TLB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=1
@@ -255,6 +258,8 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -278,7 +283,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_MIGRATION is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -352,7 +357,7 @@ CONFIG_KEXEC=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
-# CONFIG_SPARSE_IRQ is not set
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -388,6 +393,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@@ -395,7 +401,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -473,7 +478,7 @@ CONFIG_CFG80211=y
# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
# CONFIG_CFG80211_REG_DEBUG is not set
CONFIG_CFG80211_DEFAULT_PS=y
-# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
CONFIG_CFG80211_WEXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
CONFIG_LIB80211=m
@@ -619,6 +624,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -709,6 +715,7 @@ CONFIG_LIBERTAS=m
# CONFIG_LIBERTAS_USB is not set
CONFIG_LIBERTAS_SDIO=m
CONFIG_LIBERTAS_DEBUG=y
+# CONFIG_LIBERTAS_MESH is not set
# CONFIG_P54_COMMON is not set
# CONFIG_RT2X00 is not set
# CONFIG_WL12XX is not set
@@ -730,6 +737,7 @@ CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_CDCETHER=y
# CONFIG_USB_NET_CDC_EEM is not set
# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
# CONFIG_USB_NET_SMSC95XX is not set
# CONFIG_USB_NET_GL620A is not set
# CONFIG_USB_NET_NET1080 is not set
@@ -739,6 +747,7 @@ CONFIG_USB_NET_CDCETHER=y
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -800,6 +809,7 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=1
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
@@ -952,7 +962,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -964,7 +973,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -987,9 +995,6 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
-# CONFIG_MMC_AT91 is not set
-# CONFIG_MMC_ATMELMCI is not set
-# CONFIG_MMC_TMIO is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@@ -1079,6 +1084,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
CONFIG_CRAMFS=y
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1104,6 +1110,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1299,6 +1306,7 @@ CONFIG_CRYPTO_ARC4=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index 23bda1916f4d..28bb19d2cbe9 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33-rc2
-# Mon Jan 4 15:27:53 2010
+# Linux kernel version: 2.6.34-rc5
+# Tue May 18 18:13:10 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
-CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
+CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -35,6 +35,7 @@ CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_COHERENT=y
# CONFIG_DMA_NONCOHERENT is not set
+# CONFIG_NEED_DMA_MAP_STATE is not set
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -49,9 +50,11 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -77,11 +80,6 @@ CONFIG_RCU_FANOUT=32
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-# CONFIG_USER_SCHED is not set
-CONFIG_CGROUP_SCHED=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_NS=y
@@ -93,6 +91,9 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
CONFIG_MM_OWNER=y
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
@@ -102,6 +103,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -142,13 +144,13 @@ CONFIG_PROFILING=y
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
-CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@@ -169,7 +171,6 @@ CONFIG_BLOCK=y
CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
-# CONFIG_BLK_CGROUP is not set
#
# IO Schedulers
@@ -265,8 +266,8 @@ CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_29BIT=y
-# CONFIG_PMB_ENABLE is not set
-# CONFIG_X2TLB is not set
+# CONFIG_PMB is not set
+CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
# CONFIG_NUMA is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -277,6 +278,8 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_IOREMAP_FIXED=y
+CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@@ -299,7 +302,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
-CONFIG_NR_QUICK=2
+CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@@ -317,6 +320,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_SH_FPU=y
CONFIG_SH_STORE_QUEUES=y
+# CONFIG_SPECULATIVE_EXECUTION is not set
CONFIG_CPU_HAS_INTEVT=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_PTEAEX=y
@@ -325,15 +329,14 @@ CONFIG_CPU_HAS_FPU=y
#
# Board support
#
+# CONFIG_SH_SDK7786 is not set
CONFIG_SH_URQUELL=y
#
# Timer and clock configuration
#
CONFIG_SH_TIMER_TMU=y
-CONFIG_SH_PCLK_FREQ=33333333
CONFIG_SH_CLK_CPG=y
-CONFIG_SH_CLK_CPG_LEGACY=y
CONFIG_TICK_ONESHOT=y
# CONFIG_NO_HZ is not set
CONFIG_HIGH_RES_TIMERS=y
@@ -376,7 +379,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_GUSA=y
-CONFIG_SPARSE_IRQ=y
+# CONFIG_INTC_USERIMASK is not set
#
# Boot options
@@ -391,6 +394,7 @@ CONFIG_ENTRY_OFFSET=0x00001000
# Bus options
#
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCIEAER=y
# CONFIG_PCIE_ECRC is not set
@@ -398,7 +402,6 @@ CONFIG_PCIEAER=y
CONFIG_PCIEASPM=y
CONFIG_PCIEASPM_DEBUG=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-# CONFIG_PCI_LEGACY is not set
CONFIG_PCI_DEBUG=y
# CONFIG_PCI_STUB is not set
# CONFIG_PCI_IOV is not set
@@ -428,7 +431,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
@@ -639,6 +641,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -712,6 +715,7 @@ CONFIG_ATA_SFF=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
@@ -748,7 +752,7 @@ CONFIG_ATA_SFF=y
#
#
-# See the help texts for more information.
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
@@ -807,6 +811,7 @@ CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_KSZ884X_PCI is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_E100 is not set
@@ -871,6 +876,7 @@ CONFIG_WLAN=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_IPHETH is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -958,6 +964,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -1007,6 +1014,7 @@ CONFIG_I2C_ALGOPCA=y
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SH_MOBILE is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -1020,15 +1028,9 @@ CONFIG_I2C_ALGOPCA=y
#
CONFIG_I2C_PCA_PLATFORM=y
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -1043,13 +1045,17 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_SCH is not set
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -1082,10 +1088,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_I5K_AMB is not set
@@ -1123,6 +1130,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_AMC6821 is not set
# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_TMP401 is not set
# CONFIG_SENSORS_TMP421 is not set
@@ -1151,21 +1159,26 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
CONFIG_MFD_SM501=y
# CONFIG_MFD_SM501_GPIO is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -1187,6 +1200,7 @@ CONFIG_VIDEO_IR=y
# Graphics support
#
CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1283,6 +1297,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
+# CONFIG_HID_3M_PCT is not set
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
@@ -1298,14 +1313,19 @@ CONFIG_HID_GYRATION=y
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+# CONFIG_LOGIG940_FF is not set
CONFIG_HID_MICROSOFT=y
+# CONFIG_HID_MOSART is not set
CONFIG_HID_MONTEREY=y
# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
+# CONFIG_HID_QUANTA is not set
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
+# CONFIG_HID_STANTUM is not set
CONFIG_HID_SUNPLUS=y
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@@ -1326,7 +1346,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -1405,7 +1424,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1417,7 +1435,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1584,6 +1601,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1610,6 +1628,7 @@ CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@@ -1688,6 +1707,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1713,6 +1733,7 @@ CONFIG_FRAME_POINTER=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
@@ -1846,6 +1867,7 @@ CONFIG_CRYPTO_DES=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_VIRTUALIZATION is not set
# CONFIG_BINARY_PRINTF is not set
#
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 275a448ae8c2..c7983124d99d 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -13,7 +13,7 @@
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#if defined(CONFIG_GUSA_RB)
diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
index d02c01b3e6b9..6323f864d111 100644
--- a/arch/sh/include/asm/bug.h
+++ b/arch/sh/include/asm/bug.h
@@ -48,7 +48,7 @@ do { \
"i" (sizeof(struct bug_entry))); \
} while (0)
-#define __WARN() \
+#define __WARN_TAINT(taint) \
do { \
__asm__ __volatile__ ( \
"1:\t.short %O0\n" \
@@ -57,7 +57,7 @@ do { \
: "n" (TRAPA_BUG_OPCODE), \
"i" (__FILE__), \
"i" (__LINE__), \
- "i" (BUGFLAG_WARNING), \
+ "i" (BUGFLAG_TAINT(taint)), \
"i" (sizeof(struct bug_entry))); \
} while (0)
diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
index 02df18ea9608..e461d67f03c3 100644
--- a/arch/sh/include/asm/cache.h
+++ b/arch/sh/include/asm/cache.h
@@ -38,14 +38,10 @@ struct cache_info {
* 2. those in the physical page number.
*/
unsigned int alias_mask;
-
unsigned int n_aliases; /* Number of aliases */
unsigned long flags;
};
-
-int __init detect_cpu_and_cache_system(void);
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */
diff --git a/arch/sh/include/asm/clkdev.h b/arch/sh/include/asm/clkdev.h
new file mode 100644
index 000000000000..5645f358128b
--- /dev/null
+++ b/arch/sh/include/asm/clkdev.h
@@ -0,0 +1,35 @@
+/*
+ * arch/sh/include/asm/clkdev.h
+ *
+ * Cloned from arch/arm/include/asm/clkdev.h:
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+struct clk;
+
+struct clk_lookup {
+ struct list_head node;
+ const char *dev_id;
+ const char *con_id;
+ struct clk *clk;
+};
+
+struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
+ const char *dev_fmt, ...);
+
+void clkdev_add(struct clk_lookup *cl);
+void clkdev_drop(struct clk_lookup *cl);
+
+void clkdev_add_table(struct clk_lookup *, size_t);
+int clk_add_alias(const char *, const char *, char *, struct device *);
+
+#endif
diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h
index 11da4c5beb68..803d4c7f09dc 100644
--- a/arch/sh/include/asm/clock.h
+++ b/arch/sh/include/asm/clock.h
@@ -1,171 +1,16 @@
#ifndef __ASM_SH_CLOCK_H
#define __ASM_SH_CLOCK_H
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/cpufreq.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-struct clk;
-
-struct clk_ops {
- void (*init)(struct clk *clk);
- int (*enable)(struct clk *clk);
- void (*disable)(struct clk *clk);
- unsigned long (*recalc)(struct clk *clk);
- int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id);
- int (*set_parent)(struct clk *clk, struct clk *parent);
- long (*round_rate)(struct clk *clk, unsigned long rate);
-};
-
-struct clk {
- struct list_head node;
- const char *name;
- int id;
- struct module *owner;
-
- struct clk *parent;
- struct clk_ops *ops;
-
- struct list_head children;
- struct list_head sibling; /* node for children */
-
- int usecount;
-
- unsigned long rate;
- unsigned long flags;
-
- void __iomem *enable_reg;
- unsigned int enable_bit;
-
- unsigned long arch_flags;
- void *priv;
- struct dentry *dentry;
- struct cpufreq_frequency_table *freq_table;
-};
-
-struct clk_lookup {
- struct list_head node;
- const char *dev_id;
- const char *con_id;
- struct clk *clk;
-};
-
-#define CLK_ENABLE_ON_INIT (1 << 0)
+#include <linux/sh_clk.h>
/* Should be defined by processor-specific code */
void __deprecated arch_init_clk_ops(struct clk_ops **, int type);
int __init arch_clk_init(void);
-/* arch/sh/kernel/cpu/clock.c */
-int clk_init(void);
-unsigned long followparent_recalc(struct clk *);
-void recalculate_root_clocks(void);
-void propagate_rate(struct clk *);
-int clk_reparent(struct clk *child, struct clk *parent);
-int clk_register(struct clk *);
-void clk_unregister(struct clk *);
-
/* arch/sh/kernel/cpu/clock-cpg.c */
int __init __deprecated cpg_clk_init(void);
-/* the exported API, in addition to clk_set_rate */
-/**
- * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter
- * @clk: clock source
- * @rate: desired clock rate in Hz
- * @algo_id: algorithm id to be passed down to ops->set_rate
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id);
-
-enum clk_sh_algo_id {
- NO_CHANGE = 0,
-
- IUS_N1_N1,
- IUS_322,
- IUS_522,
- IUS_N11,
-
- SB_N1,
-
- SB3_N1,
- SB3_32,
- SB3_43,
- SB3_54,
-
- BP_N1,
-
- IP_N1,
-};
-
-struct clk_div_mult_table {
- unsigned int *divisors;
- unsigned int nr_divisors;
- unsigned int *multipliers;
- unsigned int nr_multipliers;
-};
-
-struct cpufreq_frequency_table;
-void clk_rate_table_build(struct clk *clk,
- struct cpufreq_frequency_table *freq_table,
- int nr_freqs,
- struct clk_div_mult_table *src_table,
- unsigned long *bitmap);
-
-long clk_rate_table_round(struct clk *clk,
- struct cpufreq_frequency_table *freq_table,
- unsigned long rate);
-
-int clk_rate_table_find(struct clk *clk,
- struct cpufreq_frequency_table *freq_table,
- unsigned long rate);
-
-#define SH_CLK_MSTP32(_name, _id, _parent, _enable_reg, \
- _enable_bit, _flags) \
-{ \
- .name = _name, \
- .id = _id, \
- .parent = _parent, \
- .enable_reg = (void __iomem *)_enable_reg, \
- .enable_bit = _enable_bit, \
- .flags = _flags, \
-}
-
-int sh_clk_mstp32_register(struct clk *clks, int nr);
-
-#define SH_CLK_DIV4(_name, _parent, _reg, _shift, _div_bitmap, _flags) \
-{ \
- .name = _name, \
- .parent = _parent, \
- .enable_reg = (void __iomem *)_reg, \
- .enable_bit = _shift, \
- .arch_flags = _div_bitmap, \
- .flags = _flags, \
-}
-
-struct clk_div4_table {
- struct clk_div_mult_table *div_mult_table;
- void (*kick)(struct clk *clk);
-};
-
-int sh_clk_div4_register(struct clk *clks, int nr,
- struct clk_div4_table *table);
-int sh_clk_div4_enable_register(struct clk *clks, int nr,
- struct clk_div4_table *table);
-int sh_clk_div4_reparent_register(struct clk *clks, int nr,
- struct clk_div4_table *table);
-
-#define SH_CLK_DIV6(_name, _parent, _reg, _flags) \
-{ \
- .name = _name, \
- .parent = _parent, \
- .enable_reg = (void __iomem *)_reg, \
- .flags = _flags, \
-}
-
-int sh_clk_div6_register(struct clk *clks, int nr);
+/* arch/sh/kernel/cpu/clock.c */
+int clk_init(void);
#endif /* __ASM_SH_CLOCK_H */
diff --git a/arch/sh/include/asm/hw_breakpoint.h b/arch/sh/include/asm/hw_breakpoint.h
index 965dd780d51b..89890f61a7b9 100644
--- a/arch/sh/include/asm/hw_breakpoint.h
+++ b/arch/sh/include/asm/hw_breakpoint.h
@@ -46,17 +46,20 @@ struct pmu;
/* Maximum number of UBC channels */
#define HBP_NUM 2
+static inline int hw_breakpoint_slots(int type)
+{
+ return HBP_NUM;
+}
+
/* arch/sh/kernel/hw_breakpoint.c */
-extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk);
+extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
+extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
extern void arch_fill_perf_breakpoint(struct perf_event *bp);
extern int register_sh_ubc(struct sh_ubc *);
diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h
index 5d3ccae4202b..855e945c6199 100644
--- a/arch/sh/include/asm/hwblk.h
+++ b/arch/sh/include/asm/hwblk.h
@@ -58,13 +58,11 @@ void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int cnt);
void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int cnt);
/* allow clocks to enable and disable hardware blocks */
-#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \
-{ \
- .name = _name, \
- .id = _id, \
- .parent = _parent, \
- .arch_flags = _hwblk, \
- .flags = _flags, \
+#define SH_HWBLK_CLK(_hwblk, _parent, _flags) \
+[_hwblk] = { \
+ .parent = _parent, \
+ .arch_flags = _hwblk, \
+ .flags = _flags, \
}
int sh_hwblk_clk_register(struct clk *clks, int nr);
diff --git a/arch/sh/include/asm/io_generic.h b/arch/sh/include/asm/io_generic.h
index 1e5d375f55dc..491df93cbf8e 100644
--- a/arch/sh/include/asm/io_generic.h
+++ b/arch/sh/include/asm/io_generic.h
@@ -38,5 +38,6 @@ void IO_CONCAT(__IO_PREFIX,iounmap)(void *addr);
void __iomem *IO_CONCAT(__IO_PREFIX,ioport_map)(unsigned long addr, unsigned int size);
void IO_CONCAT(__IO_PREFIX,ioport_unmap)(void __iomem *addr);
+void IO_CONCAT(__IO_PREFIX,mem_init)(void);
#undef __IO_PREFIX
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index df8e1500527c..02c2f0102cfa 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -1,6 +1,7 @@
#ifndef __ASM_SH_IRQ_H
#define __ASM_SH_IRQ_H
+#include <linux/cpumask.h>
#include <asm/machvec.h>
/*
@@ -12,6 +13,14 @@
#define NR_IRQS_LEGACY 8 /* Legacy external IRQ0-7 */
/*
+ * This is a special IRQ number for indicating that no IRQ has been
+ * triggered and to simply ignore the IRQ dispatch. This is a special
+ * case that can happen with IRQ auto-distribution when multiple CPUs
+ * are woken up and signalled in parallel.
+ */
+#define NO_IRQ_IGNORE ((unsigned int)-1)
+
+/*
* Convert back and forth between INTEVT and IRQ values.
*/
#ifdef CONFIG_CPU_HAS_INTEVT
@@ -42,6 +51,8 @@ static inline int generic_irq_demux(int irq)
#define irq_demux(irq) sh_mv.mv_irq_demux(irq)
void init_IRQ(void);
+void migrate_irqs(void);
+
asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
#ifdef CONFIG_IRQSTACKS
@@ -53,6 +64,14 @@ extern void irq_ctx_exit(int cpu);
# define irq_ctx_exit(cpu) do { } while (0)
#endif
+#ifdef CONFIG_INTC_BALANCING
+extern unsigned int irq_lookup(unsigned int irq);
+extern void irq_finish(unsigned int irq);
+#else
+#define irq_lookup(irq) (irq)
+#define irq_finish(irq) do { } while (0)
+#endif
+
#include <asm-generic/irq.h>
#ifdef CONFIG_CPU_SH5
#include <cpu/irq.h>
diff --git a/arch/sh/include/asm/kexec.h b/arch/sh/include/asm/kexec.h
index 765a5e1660fc..ad6ef8a275ee 100644
--- a/arch/sh/include/asm/kexec.h
+++ b/arch/sh/include/asm/kexec.h
@@ -26,6 +26,10 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_SH
+#ifdef CONFIG_KEXEC
+/* arch/sh/kernel/machine_kexec.c */
+void reserve_crashkernel(void);
+
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
@@ -59,4 +63,8 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
newregs->pc = (unsigned long)current_text_addr();
}
}
+#else
+static inline void reserve_crashkernel(void) { }
+#endif /* CONFIG_KEXEC */
+
#endif /* __ASM_SH_KEXEC_H */
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index 9c30955630ff..bc0218cb72e1 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -49,6 +49,8 @@ struct sh_machine_vector {
int (*mv_clk_init)(void);
int (*mv_mode_pins)(void);
+
+ void (*mv_mem_init)(void);
};
extern struct sh_machine_vector sh_mv;
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 7f5363b29ba0..8887baff5eff 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -42,9 +42,10 @@ setup_bootmem_node(int nid, unsigned long start, unsigned long end)
void __init plat_mem_setup(void);
/* arch/sh/kernel/setup.c */
-void __init setup_bootmem_allocator(unsigned long start_pfn);
void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
+/* arch/sh/mm/init.c */
+void __init allocate_pgdat(unsigned int nid);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_MMZONE_H */
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index d71feb359304..fb703d120d09 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -49,7 +49,7 @@
extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
-extern unsigned long memory_start, memory_end;
+extern unsigned long memory_start, memory_end, memory_limit;
static inline unsigned long
pages_do_alias(unsigned long addr1, unsigned long addr2)
@@ -128,13 +128,18 @@ typedef struct page *pgtable_t;
* added or subtracted as required.
*/
#ifdef CONFIG_PMB
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
-#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
+#define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START)
+#define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START)
#else
-#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
-#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define ___pa(x) ((x)-PAGE_OFFSET)
+#define ___va(x) ((x)+PAGE_OFFSET)
#endif
+#ifndef __ASSEMBLY__
+#define __pa(x) ___pa((unsigned long)x)
+#define __va(x) (void *)___va((unsigned long)x)
+#endif /* !__ASSEMBLY__ */
+
#ifdef CONFIG_UNCACHED_MAPPING
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 9605e062840f..0a58cb25a658 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -85,6 +85,10 @@ struct sh_cpuinfo {
struct tlb_info itlb;
struct tlb_info dtlb;
+#ifdef CONFIG_SMP
+ struct task_struct *idle;
+#endif
+
unsigned long flags;
} __attribute__ ((aligned(L1_CACHE_BYTES)));
@@ -102,6 +106,9 @@ struct task_struct;
extern struct pt_regs fake_swapper_regs;
+extern void cpu_init(void);
+extern void cpu_probe(void);
+
/* arch/sh/kernel/process.c */
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 572b4eb09493..61a445d2d02a 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -27,8 +27,6 @@
#define CCN_CVR 0xff000040
#define CCN_PRR 0xff000044
-asmlinkage void __init sh_cpu_init(void);
-
/*
* User space process size: 2GB.
*
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index 4758325bb24a..01fa17a3d759 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -19,6 +19,7 @@
#define COMMAND_LINE ((char *) (PARAM+0x100))
void sh_mv_setup(void);
+void check_for_initrd(void);
#endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h
index f1b1e6944a5f..1d95c78808d1 100644
--- a/arch/sh/include/asm/siu.h
+++ b/arch/sh/include/asm/siu.h
@@ -11,16 +11,14 @@
#ifndef ASM_SIU_H
#define ASM_SIU_H
-#include <asm/dmaengine.h>
-
struct device;
struct siu_platform {
struct device *dma_dev;
- enum sh_dmae_slave_chan_id dma_slave_tx_a;
- enum sh_dmae_slave_chan_id dma_slave_rx_a;
- enum sh_dmae_slave_chan_id dma_slave_tx_b;
- enum sh_dmae_slave_chan_id dma_slave_rx_b;
+ unsigned int dma_slave_tx_a;
+ unsigned int dma_slave_rx_a;
+ unsigned int dma_slave_tx_b;
+ unsigned int dma_slave_rx_b;
};
#endif /* ASM_SIU_H */
diff --git a/arch/sh/include/asm/smp-ops.h b/arch/sh/include/asm/smp-ops.h
new file mode 100644
index 000000000000..c590f76856f1
--- /dev/null
+++ b/arch/sh/include/asm/smp-ops.h
@@ -0,0 +1,51 @@
+#ifndef __ASM_SH_SMP_OPS_H
+#define __ASM_SH_SMP_OPS_H
+
+struct plat_smp_ops {
+ void (*smp_setup)(void);
+ unsigned int (*smp_processor_id)(void);
+ void (*prepare_cpus)(unsigned int max_cpus);
+ void (*start_cpu)(unsigned int cpu, unsigned long entry_point);
+ void (*send_ipi)(unsigned int cpu, unsigned int message);
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+ void (*play_dead)(void);
+};
+
+extern struct plat_smp_ops *mp_ops;
+extern struct plat_smp_ops shx3_smp_ops;
+
+#ifdef CONFIG_SMP
+
+static inline void plat_smp_setup(void)
+{
+ BUG_ON(!mp_ops);
+ mp_ops->smp_setup();
+}
+
+static inline void play_dead(void)
+{
+ mp_ops->play_dead();
+}
+
+extern void register_smp_ops(struct plat_smp_ops *ops);
+
+#else
+
+static inline void plat_smp_setup(void)
+{
+ /* UP, nothing to do ... */
+}
+
+static inline void register_smp_ops(struct plat_smp_ops *ops)
+{
+}
+
+static inline void play_dead(void)
+{
+ BUG();
+}
+
+#endif /* CONFIG_SMP */
+
+#endif /* __ASM_SH_SMP_OPS_H */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 53ef26ced75f..9070d943ddde 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -3,15 +3,16 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
+#include <asm/smp-ops.h>
#ifdef CONFIG_SMP
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/current.h>
+#include <asm/percpu.h>
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#define hard_smp_processor_id() plat_smp_processor_id()
/* Map from cpu id to sequential logical cpu number. */
extern int __cpu_number_map[NR_CPUS];
@@ -30,20 +31,43 @@ enum {
SMP_MSG_NR, /* must be last */
};
+DECLARE_PER_CPU(int, cpu_state);
+
void smp_message_recv(unsigned int msg);
void smp_timer_broadcast(const struct cpumask *mask);
void local_timer_interrupt(void);
void local_timer_setup(unsigned int cpu);
-
-void plat_smp_setup(void);
-void plat_prepare_cpus(unsigned int max_cpus);
-int plat_smp_processor_id(void);
-void plat_start_cpu(unsigned int cpu, unsigned long entry_point);
-void plat_send_ipi(unsigned int cpu, unsigned int message);
+void local_timer_stop(unsigned int cpu);
void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+void native_play_dead(void);
+void native_cpu_die(unsigned int cpu);
+int native_cpu_disable(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void play_dead_common(void);
+extern int __cpu_disable(void);
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ extern struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->cpu_die(cpu);
+}
+#endif
+
+static inline int hard_smp_processor_id(void)
+{
+ extern struct plat_smp_ops *mp_ops; /* private */
+
+ if (!mp_ops)
+ return 0; /* boot CPU */
+
+ return mp_ops->smp_processor_id();
+}
#else
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 55a36fef6875..c228946926ed 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -121,7 +121,7 @@ extern void init_thread_xstate(void);
#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_MEMDIE 18
+#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19 /* Freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h
index de2359533994..9a6125eb0079 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma-register.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h
@@ -23,7 +23,8 @@
#define CHCR_TS_HIGH_MASK 0
#define CHCR_TS_HIGH_SHIFT 0
#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
- defined(CONFIG_CPU_SUBTYPE_SH7724)
+ defined(CONFIG_CPU_SUBTYPE_SH7724) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7786)
#define CHCR_TS_LOW_MASK 0x00000018
#define CHCR_TS_LOW_SHIFT 3
#define CHCR_TS_HIGH_MASK 0x00300000
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
index 5963124c1d4a..e46ec708105a 100644
--- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h
+++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h
@@ -19,13 +19,26 @@
#define MMUCR 0xFF000010 /* MMU Control Register */
+#define MMU_TLB_ENTRY_SHIFT 8
+
#define MMU_ITLB_ADDRESS_ARRAY 0xF2000000
#define MMU_ITLB_ADDRESS_ARRAY2 0xF2800000
+#define MMU_ITLB_DATA_ARRAY 0xF3000000
+#define MMU_ITLB_DATA_ARRAY2 0xF3800000
+
#define MMU_UTLB_ADDRESS_ARRAY 0xF6000000
#define MMU_UTLB_ADDRESS_ARRAY2 0xF6800000
+#define MMU_UTLB_DATA_ARRAY 0xF7000000
+#define MMU_UTLB_DATA_ARRAY2 0xF7800000
#define MMU_PAGE_ASSOC_BIT 0x80
-#define MMUCR_TI (1<<2)
+#ifdef CONFIG_MMU
+#define MMUCR_AT (1 << 0)
+#else
+#define MMUCR_AT (0)
+#endif
+
+#define MMUCR_TI (1 << 2)
#define MMUCR_URB 0x00FC0000
#define MMUCR_URB_SHIFT 18
@@ -58,7 +71,8 @@
#endif
#define MMU_NTLB_ENTRIES 64
-#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME|MMUCR_SE|MMUCR_AEX)
+#define MMU_CONTROL_INIT (MMUCR_AT | MMUCR_TI | MMUCR_SQMD | \
+ MMUCR_ME | MMUCR_SE | MMUCR_AEX)
#define TRA 0xff000020
#define EXPEVT 0xff000024
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h
index 48560407cbe1..7a5b8a331b4a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7722.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h
@@ -235,4 +235,19 @@ enum {
HWBLK_NR,
};
+enum {
+ SHDMA_SLAVE_SCIF0_TX,
+ SHDMA_SLAVE_SCIF0_RX,
+ SHDMA_SLAVE_SCIF1_TX,
+ SHDMA_SLAVE_SCIF1_RX,
+ SHDMA_SLAVE_SCIF2_TX,
+ SHDMA_SLAVE_SCIF2_RX,
+ SHDMA_SLAVE_SIUA_TX,
+ SHDMA_SLAVE_SIUA_RX,
+ SHDMA_SLAVE_SIUB_TX,
+ SHDMA_SLAVE_SIUB_RX,
+ SHDMA_SLAVE_SDHI0_TX,
+ SHDMA_SLAVE_SDHI0_RX,
+};
+
#endif /* __ASM_SH7722_H__ */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 0cd1f71a1116..fbbf550cc529 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -283,4 +283,23 @@ enum {
HWBLK_NR,
};
+enum {
+ SHDMA_SLAVE_SCIF0_TX,
+ SHDMA_SLAVE_SCIF0_RX,
+ SHDMA_SLAVE_SCIF1_TX,
+ SHDMA_SLAVE_SCIF1_RX,
+ SHDMA_SLAVE_SCIF2_TX,
+ SHDMA_SLAVE_SCIF2_RX,
+ SHDMA_SLAVE_SCIF3_TX,
+ SHDMA_SLAVE_SCIF3_RX,
+ SHDMA_SLAVE_SCIF4_TX,
+ SHDMA_SLAVE_SCIF4_RX,
+ SHDMA_SLAVE_SCIF5_TX,
+ SHDMA_SLAVE_SCIF5_RX,
+ SHDMA_SLAVE_SDHI0_TX,
+ SHDMA_SLAVE_SDHI0_RX,
+ SHDMA_SLAVE_SDHI1_TX,
+ SHDMA_SLAVE_SDHI1_RX,
+};
+
#endif /* __ASM_SH7724_H__ */
diff --git a/arch/sh/include/mach-sdk7786/mach/fpga.h b/arch/sh/include/mach-sdk7786/mach/fpga.h
index 2120d67dec70..416b621d94d1 100644
--- a/arch/sh/include/mach-sdk7786/mach/fpga.h
+++ b/arch/sh/include/mach-sdk7786/mach/fpga.h
@@ -42,6 +42,15 @@
#define SCBR_I2CCEN BIT(1) /* CPU I2C master enable */
#define PWRCR 0x1a0
+#define PWRCR_SCISEL0 BIT(0)
+#define PWRCR_SCISEL1 BIT(1)
+#define PWRCR_SCIEN BIT(2) /* Serial port enable */
+#define PWRCR_PDWNACK BIT(5) /* Power down acknowledge */
+#define PWRCR_PDWNREQ BIT(7) /* Power down request */
+#define PWRCR_INT2 BIT(11) /* INT2 connection to power manager */
+#define PWRCR_BUPINIT BIT(13) /* DDR backup initialize */
+#define PWRCR_BKPRST BIT(15) /* Backup power reset */
+
#define SPCBR 0x1b0
#define SPICR 0x1c0
#define SPIDR 0x1d0
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 02fd3ae8b0ee..650b92f00ee5 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -11,7 +11,7 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
-obj-y := debugtraps.o dma-nommu.o dumpstack.o \
+obj-y := clkdev.o debugtraps.o dma-nommu.o dumpstack.o \
idle.o io.o io_generic.o irq.o \
irq_$(BITS).o machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace_$(BITS).o \
diff --git a/arch/sh/kernel/clkdev.c b/arch/sh/kernel/clkdev.c
new file mode 100644
index 000000000000..defdd6e30908
--- /dev/null
+++ b/arch/sh/kernel/clkdev.c
@@ -0,0 +1,169 @@
+/*
+ * arch/sh/kernel/clkdev.c
+ *
+ * Cloned from arch/arm/common/clkdev.c:
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <asm/clock.h>
+#include <asm/clkdev.h>
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+
+/*
+ * Find the correct struct clk for the device and connection ID.
+ * We do slightly fuzzy matching here:
+ * An entry with a NULL ID is assumed to be a wildcard.
+ * If an entry has a device ID, it must match
+ * If an entry has a connection ID, it must match
+ * Then we take the most specific entry - with the following
+ * order of precidence: dev+con > dev only > con only.
+ */
+static struct clk *clk_find(const char *dev_id, const char *con_id)
+{
+ struct clk_lookup *p;
+ struct clk *clk = NULL;
+ int match, best = 0;
+
+ list_for_each_entry(p, &clocks, node) {
+ match = 0;
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
+ match += 2;
+ }
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
+ match += 1;
+ }
+ if (match == 0)
+ continue;
+
+ if (match > best) {
+ clk = p->clk;
+ best = match;
+ }
+ }
+ return clk;
+}
+
+struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+ struct clk *clk;
+
+ mutex_lock(&clocks_mutex);
+ clk = clk_find(dev_id, con_id);
+ mutex_unlock(&clocks_mutex);
+
+ return clk ? clk : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(clk_get_sys);
+
+void clkdev_add(struct clk_lookup *cl)
+{
+ mutex_lock(&clocks_mutex);
+ list_add_tail(&cl->node, &clocks);
+ mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clkdev_add);
+
+void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
+{
+ mutex_lock(&clocks_mutex);
+ while (num--) {
+ list_add_tail(&cl->node, &clocks);
+ cl++;
+ }
+ mutex_unlock(&clocks_mutex);
+}
+
+#define MAX_DEV_ID 20
+#define MAX_CON_ID 16
+
+struct clk_lookup_alloc {
+ struct clk_lookup cl;
+ char dev_id[MAX_DEV_ID];
+ char con_id[MAX_CON_ID];
+};
+
+struct clk_lookup * __init_refok
+clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
+{
+ struct clk_lookup_alloc *cla;
+
+ if (!slab_is_available())
+ cla = alloc_bootmem_low_pages(sizeof(*cla));
+ else
+ cla = kzalloc(sizeof(*cla), GFP_KERNEL);
+
+ if (!cla)
+ return NULL;
+
+ cla->cl.clk = clk;
+ if (con_id) {
+ strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
+ cla->cl.con_id = cla->con_id;
+ }
+
+ if (dev_fmt) {
+ va_list ap;
+
+ va_start(ap, dev_fmt);
+ vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
+ cla->cl.dev_id = cla->dev_id;
+ va_end(ap);
+ }
+
+ return &cla->cl;
+}
+EXPORT_SYMBOL(clkdev_alloc);
+
+int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
+ struct device *dev)
+{
+ struct clk *r = clk_get(dev, id);
+ struct clk_lookup *l;
+
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ l = clkdev_alloc(r, alias, alias_dev_name);
+ clk_put(r);
+ if (!l)
+ return -ENODEV;
+ clkdev_add(l);
+ return 0;
+}
+EXPORT_SYMBOL(clk_add_alias);
+
+/*
+ * clkdev_drop - remove a clock dynamically allocated
+ */
+void clkdev_drop(struct clk_lookup *cl)
+{
+ mutex_lock(&clocks_mutex);
+ list_del(&cl->node);
+ mutex_unlock(&clocks_mutex);
+ kfree(cl);
+}
+EXPORT_SYMBOL(clkdev_drop);
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index 0e48bc61c272..4edcb60a1355 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
# Common interfaces.
obj-$(CONFIG_SH_ADC) += adc.o
-obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o
+obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_SH_FPU_EMU) += fpu.o
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
index eed5eaff96ba..e2f63d68da51 100644
--- a/arch/sh/kernel/cpu/clock-cpg.c
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -2,317 +2,25 @@
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
-static int sh_clk_mstp32_enable(struct clk *clk)
-{
- __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
- clk->enable_reg);
- return 0;
-}
-
-static void sh_clk_mstp32_disable(struct clk *clk)
-{
- __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
- clk->enable_reg);
-}
-
-static struct clk_ops sh_clk_mstp32_clk_ops = {
- .enable = sh_clk_mstp32_enable,
- .disable = sh_clk_mstp32_disable,
- .recalc = followparent_recalc,
-};
-
-int __init sh_clk_mstp32_register(struct clk *clks, int nr)
-{
- struct clk *clkp;
- int ret = 0;
- int k;
-
- for (k = 0; !ret && (k < nr); k++) {
- clkp = clks + k;
- clkp->ops = &sh_clk_mstp32_clk_ops;
- ret |= clk_register(clkp);
- }
-
- return ret;
-}
-
-static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
-{
- return clk_rate_table_round(clk, clk->freq_table, rate);
-}
-
-static int sh_clk_div6_divisors[64] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
- 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
-};
-
-static struct clk_div_mult_table sh_clk_div6_table = {
- .divisors = sh_clk_div6_divisors,
- .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
-};
-
-static unsigned long sh_clk_div6_recalc(struct clk *clk)
-{
- struct clk_div_mult_table *table = &sh_clk_div6_table;
- unsigned int idx;
-
- clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, NULL);
-
- idx = __raw_readl(clk->enable_reg) & 0x003f;
-
- return clk->freq_table[idx].frequency;
-}
-
-static int sh_clk_div6_set_rate(struct clk *clk,
- unsigned long rate, int algo_id)
-{
- unsigned long value;
- int idx;
-
- idx = clk_rate_table_find(clk, clk->freq_table, rate);
- if (idx < 0)
- return idx;
-
- value = __raw_readl(clk->enable_reg);
- value &= ~0x3f;
- value |= idx;
- __raw_writel(value, clk->enable_reg);
- return 0;
-}
-
-static int sh_clk_div6_enable(struct clk *clk)
-{
- unsigned long value;
- int ret;
-
- ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
- if (ret == 0) {
- value = __raw_readl(clk->enable_reg);
- value &= ~0x100; /* clear stop bit to enable clock */
- __raw_writel(value, clk->enable_reg);
- }
- return ret;
-}
-
-static void sh_clk_div6_disable(struct clk *clk)
-{
- unsigned long value;
-
- value = __raw_readl(clk->enable_reg);
- value |= 0x100; /* stop clock */
- value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
- __raw_writel(value, clk->enable_reg);
-}
-
-static struct clk_ops sh_clk_div6_clk_ops = {
- .recalc = sh_clk_div6_recalc,
- .round_rate = sh_clk_div_round_rate,
- .set_rate = sh_clk_div6_set_rate,
- .enable = sh_clk_div6_enable,
- .disable = sh_clk_div6_disable,
-};
-
-int __init sh_clk_div6_register(struct clk *clks, int nr)
-{
- struct clk *clkp;
- void *freq_table;
- int nr_divs = sh_clk_div6_table.nr_divisors;
- int freq_table_size = sizeof(struct cpufreq_frequency_table);
- int ret = 0;
- int k;
-
- freq_table_size *= (nr_divs + 1);
- freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
- if (!freq_table) {
- pr_err("sh_clk_div6_register: unable to alloc memory\n");
- return -ENOMEM;
- }
-
- for (k = 0; !ret && (k < nr); k++) {
- clkp = clks + k;
-
- clkp->ops = &sh_clk_div6_clk_ops;
- clkp->id = -1;
- clkp->freq_table = freq_table + (k * freq_table_size);
- clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
-
- ret = clk_register(clkp);
- }
-
- return ret;
-}
-
-static unsigned long sh_clk_div4_recalc(struct clk *clk)
-{
- struct clk_div4_table *d4t = clk->priv;
- struct clk_div_mult_table *table = d4t->div_mult_table;
- unsigned int idx;
-
- clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, &clk->arch_flags);
-
- idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
-
- return clk->freq_table[idx].frequency;
-}
-
-static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
-{
- struct clk_div4_table *d4t = clk->priv;
- struct clk_div_mult_table *table = d4t->div_mult_table;
- u32 value;
- int ret;
-
- if (!strcmp("pll_clk", parent->name))
- value = __raw_readl(clk->enable_reg) & ~(1 << 7);
- else
- value = __raw_readl(clk->enable_reg) | (1 << 7);
-
- ret = clk_reparent(clk, parent);
- if (ret < 0)
- return ret;
-
- __raw_writel(value, clk->enable_reg);
-
- /* Rebiuld the frequency table */
- clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, &clk->arch_flags);
-
- return 0;
-}
-
-static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
-{
- struct clk_div4_table *d4t = clk->priv;
- unsigned long value;
- int idx = clk_rate_table_find(clk, clk->freq_table, rate);
- if (idx < 0)
- return idx;
-
- value = __raw_readl(clk->enable_reg);
- value &= ~(0xf << clk->enable_bit);
- value |= (idx << clk->enable_bit);
- __raw_writel(value, clk->enable_reg);
-
- if (d4t->kick)
- d4t->kick(clk);
-
- return 0;
-}
-
-static int sh_clk_div4_enable(struct clk *clk)
-{
- __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
- return 0;
-}
-
-static void sh_clk_div4_disable(struct clk *clk)
-{
- __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
-}
-
-static struct clk_ops sh_clk_div4_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
- .round_rate = sh_clk_div_round_rate,
-};
-
-static struct clk_ops sh_clk_div4_enable_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
- .round_rate = sh_clk_div_round_rate,
- .enable = sh_clk_div4_enable,
- .disable = sh_clk_div4_disable,
-};
-
-static struct clk_ops sh_clk_div4_reparent_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
- .round_rate = sh_clk_div_round_rate,
- .enable = sh_clk_div4_enable,
- .disable = sh_clk_div4_disable,
- .set_parent = sh_clk_div4_set_parent,
-};
-
-static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
- struct clk_div4_table *table, struct clk_ops *ops)
-{
- struct clk *clkp;
- void *freq_table;
- int nr_divs = table->div_mult_table->nr_divisors;
- int freq_table_size = sizeof(struct cpufreq_frequency_table);
- int ret = 0;
- int k;
-
- freq_table_size *= (nr_divs + 1);
- freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
- if (!freq_table) {
- pr_err("sh_clk_div4_register: unable to alloc memory\n");
- return -ENOMEM;
- }
-
- for (k = 0; !ret && (k < nr); k++) {
- clkp = clks + k;
-
- clkp->ops = ops;
- clkp->id = -1;
- clkp->priv = table;
-
- clkp->freq_table = freq_table + (k * freq_table_size);
- clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
-
- ret = clk_register(clkp);
- }
-
- return ret;
-}
-
-int __init sh_clk_div4_register(struct clk *clks, int nr,
- struct clk_div4_table *table)
-{
- return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
-}
-
-int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
- struct clk_div4_table *table)
-{
- return sh_clk_div4_register_ops(clks, nr, table,
- &sh_clk_div4_enable_clk_ops);
-}
-
-int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
- struct clk_div4_table *table)
-{
- return sh_clk_div4_register_ops(clks, nr, table,
- &sh_clk_div4_reparent_clk_ops);
-}
-
-#ifdef CONFIG_SH_CLK_CPG_LEGACY
static struct clk master_clk = {
- .name = "master_clk",
.flags = CLK_ENABLE_ON_INIT,
.rate = CONFIG_SH_PCLK_FREQ,
};
static struct clk peripheral_clk = {
- .name = "peripheral_clk",
.parent = &master_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk bus_clk = {
- .name = "bus_clk",
.parent = &master_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk cpu_clk = {
- .name = "cpu_clk",
.parent = &master_clk,
.flags = CLK_ENABLE_ON_INIT,
};
@@ -327,6 +35,16 @@ static struct clk *onchip_clocks[] = {
&cpu_clk,
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("master_clk", &master_clk),
+ CLKDEV_CON_ID("peripheral_clk", &peripheral_clk),
+ CLKDEV_CON_ID("bus_clk", &bus_clk),
+ CLKDEV_CON_ID("cpu_clk", &cpu_clk),
+};
+
int __init __deprecated cpg_clk_init(void)
{
int i, ret = 0;
@@ -338,6 +56,13 @@ int __init __deprecated cpg_clk_init(void)
ret |= clk_register(clk);
}
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ clk_add_alias("tmu_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("mtu2_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("cmt_fck", NULL, "peripheral_clk", NULL);
+ clk_add_alias("sci_ick", NULL, "peripheral_clk", NULL);
+
return ret;
}
@@ -349,4 +74,3 @@ int __init __weak arch_clk_init(void)
{
return cpg_clk_init();
}
-#endif /* CONFIG_SH_CPG_CLK_LEGACY */
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index e9fa1bfed53e..50f887dda565 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -10,558 +10,16 @@
*
* Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
*
- * With clkdev bits:
- *
- * Copyright (C) 2008 Russell King.
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/kobject.h>
-#include <linux/sysdev.h>
-#include <linux/seq_file.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/debugfs.h>
-#include <linux/cpufreq.h>
+#include <linux/clk.h>
#include <asm/clock.h>
#include <asm/machvec.h>
-static LIST_HEAD(clock_list);
-static DEFINE_SPINLOCK(clock_lock);
-static DEFINE_MUTEX(clock_list_sem);
-
-void clk_rate_table_build(struct clk *clk,
- struct cpufreq_frequency_table *freq_table,
- int nr_freqs,
- struct clk_div_mult_table *src_table,
- unsigned long *bitmap)
-{
- unsigned long mult, div;
- unsigned long freq;
- int i;
-
- for (i = 0; i < nr_freqs; i++) {
- div = 1;
- mult = 1;
-
- if (src_table->divisors && i < src_table->nr_divisors)
- div = src_table->divisors[i];
-
- if (src_table->multipliers && i < src_table->nr_multipliers)
- mult = src_table->multipliers[i];
-
- if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
- freq = CPUFREQ_ENTRY_INVALID;
- else
- freq = clk->parent->rate * mult / div;
-
- freq_table[i].index = i;
- freq_table[i].frequency = freq;
- }
-
- /* Termination entry */
- freq_table[i].index = i;
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-}
-
-long clk_rate_table_round(struct clk *clk,
- struct cpufreq_frequency_table *freq_table,
- unsigned long rate)
-{
- unsigned long rate_error, rate_error_prev = ~0UL;
- unsigned long rate_best_fit = rate;
- unsigned long highest, lowest;
- int i;
-
- highest = lowest = 0;
-
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned long freq = freq_table[i].frequency;
-
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
-
- if (freq > highest)
- highest = freq;
- if (freq < lowest)
- lowest = freq;
-
- rate_error = abs(freq - rate);
- if (rate_error < rate_error_prev) {
- rate_best_fit = freq;
- rate_error_prev = rate_error;
- }
-
- if (rate_error == 0)
- break;
- }
-
- if (rate >= highest)
- rate_best_fit = highest;
- if (rate <= lowest)
- rate_best_fit = lowest;
-
- return rate_best_fit;
-}
-
-int clk_rate_table_find(struct clk *clk,
- struct cpufreq_frequency_table *freq_table,
- unsigned long rate)
-{
- int i;
-
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- unsigned long freq = freq_table[i].frequency;
-
- if (freq == CPUFREQ_ENTRY_INVALID)
- continue;
-
- if (freq == rate)
- return i;
- }
-
- return -ENOENT;
-}
-
-/* Used for clocks that always have same value as the parent clock */
-unsigned long followparent_recalc(struct clk *clk)
-{
- return clk->parent ? clk->parent->rate : 0;
-}
-
-int clk_reparent(struct clk *child, struct clk *parent)
-{
- list_del_init(&child->sibling);
- if (parent)
- list_add(&child->sibling, &parent->children);
- child->parent = parent;
-
- /* now do the debugfs renaming to reattach the child
- to the proper parent */
-
- return 0;
-}
-
-/* Propagate rate to children */
-void propagate_rate(struct clk *tclk)
-{
- struct clk *clkp;
-
- list_for_each_entry(clkp, &tclk->children, sibling) {
- if (clkp->ops && clkp->ops->recalc)
- clkp->rate = clkp->ops->recalc(clkp);
-
- propagate_rate(clkp);
- }
-}
-
-static void __clk_disable(struct clk *clk)
-{
- if (clk->usecount == 0) {
- printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
- clk->name);
- WARN_ON(1);
- return;
- }
-
- if (!(--clk->usecount)) {
- if (likely(clk->ops && clk->ops->disable))
- clk->ops->disable(clk);
- if (likely(clk->parent))
- __clk_disable(clk->parent);
- }
-}
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- if (!clk)
- return;
-
- spin_lock_irqsave(&clock_lock, flags);
- __clk_disable(clk);
- spin_unlock_irqrestore(&clock_lock, flags);
-}
-EXPORT_SYMBOL_GPL(clk_disable);
-
-static int __clk_enable(struct clk *clk)
-{
- int ret = 0;
-
- if (clk->usecount++ == 0) {
- if (clk->parent) {
- ret = __clk_enable(clk->parent);
- if (unlikely(ret))
- goto err;
- }
-
- if (clk->ops && clk->ops->enable) {
- ret = clk->ops->enable(clk);
- if (ret) {
- if (clk->parent)
- __clk_disable(clk->parent);
- goto err;
- }
- }
- }
-
- return ret;
-err:
- clk->usecount--;
- return ret;
-}
-
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
- int ret;
-
- if (!clk)
- return -EINVAL;
-
- spin_lock_irqsave(&clock_lock, flags);
- ret = __clk_enable(clk);
- spin_unlock_irqrestore(&clock_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_enable);
-
-static LIST_HEAD(root_clks);
-
-/**
- * recalculate_root_clocks - recalculate and propagate all root clocks
- *
- * Recalculates all root clocks (clocks with no parent), which if the
- * clock's .recalc is set correctly, should also propagate their rates.
- * Called at init.
- */
-void recalculate_root_clocks(void)
-{
- struct clk *clkp;
-
- list_for_each_entry(clkp, &root_clks, sibling) {
- if (clkp->ops && clkp->ops->recalc)
- clkp->rate = clkp->ops->recalc(clkp);
- propagate_rate(clkp);
- }
-}
-
-int clk_register(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- /*
- * trap out already registered clocks
- */
- if (clk->node.next || clk->node.prev)
- return 0;
-
- mutex_lock(&clock_list_sem);
-
- INIT_LIST_HEAD(&clk->children);
- clk->usecount = 0;
-
- if (clk->parent)
- list_add(&clk->sibling, &clk->parent->children);
- else
- list_add(&clk->sibling, &root_clks);
-
- list_add(&clk->node, &clock_list);
- if (clk->ops && clk->ops->init)
- clk->ops->init(clk);
- mutex_unlock(&clock_list_sem);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(clk_register);
-
-void clk_unregister(struct clk *clk)
-{
- mutex_lock(&clock_list_sem);
- list_del(&clk->sibling);
- list_del(&clk->node);
- mutex_unlock(&clock_list_sem);
-}
-EXPORT_SYMBOL_GPL(clk_unregister);
-
-static void clk_enable_init_clocks(void)
-{
- struct clk *clkp;
-
- list_for_each_entry(clkp, &clock_list, node)
- if (clkp->flags & CLK_ENABLE_ON_INIT)
- clk_enable(clkp);
-}
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL_GPL(clk_get_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return clk_set_rate_ex(clk, rate, 0);
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
-{
- int ret = -EOPNOTSUPP;
- unsigned long flags;
-
- spin_lock_irqsave(&clock_lock, flags);
-
- if (likely(clk->ops && clk->ops->set_rate)) {
- ret = clk->ops->set_rate(clk, rate, algo_id);
- if (ret != 0)
- goto out_unlock;
- } else {
- clk->rate = rate;
- ret = 0;
- }
-
- if (clk->ops && clk->ops->recalc)
- clk->rate = clk->ops->recalc(clk);
-
- propagate_rate(clk);
-
-out_unlock:
- spin_unlock_irqrestore(&clock_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_rate_ex);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (!parent || !clk)
- return ret;
- if (clk->parent == parent)
- return 0;
-
- spin_lock_irqsave(&clock_lock, flags);
- if (clk->usecount == 0) {
- if (clk->ops->set_parent)
- ret = clk->ops->set_parent(clk, parent);
- else
- ret = clk_reparent(clk, parent);
-
- if (ret == 0) {
- pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
- clk->name, clk->parent->name, clk->rate);
- if (clk->ops->recalc)
- clk->rate = clk->ops->recalc(clk);
- propagate_rate(clk);
- }
- } else
- ret = -EBUSY;
- spin_unlock_irqrestore(&clock_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_parent);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
- return clk->parent;
-}
-EXPORT_SYMBOL_GPL(clk_get_parent);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (likely(clk->ops && clk->ops->round_rate)) {
- unsigned long flags, rounded;
-
- spin_lock_irqsave(&clock_lock, flags);
- rounded = clk->ops->round_rate(clk, rate);
- spin_unlock_irqrestore(&clock_lock, flags);
-
- return rounded;
- }
-
- return clk_get_rate(clk);
-}
-EXPORT_SYMBOL_GPL(clk_round_rate);
-
-/*
- * Find the correct struct clk for the device and connection ID.
- * We do slightly fuzzy matching here:
- * An entry with a NULL ID is assumed to be a wildcard.
- * If an entry has a device ID, it must match
- * If an entry has a connection ID, it must match
- * Then we take the most specific entry - with the following
- * order of precedence: dev+con > dev only > con only.
- */
-static struct clk *clk_find(const char *dev_id, const char *con_id)
-{
- struct clk_lookup *p;
- struct clk *clk = NULL;
- int match, best = 0;
-
- list_for_each_entry(p, &clock_list, node) {
- match = 0;
- if (p->dev_id) {
- if (!dev_id || strcmp(p->dev_id, dev_id))
- continue;
- match += 2;
- }
- if (p->con_id) {
- if (!con_id || strcmp(p->con_id, con_id))
- continue;
- match += 1;
- }
- if (match == 0)
- continue;
-
- if (match > best) {
- clk = p->clk;
- best = match;
- }
- }
- return clk;
-}
-
-struct clk *clk_get_sys(const char *dev_id, const char *con_id)
-{
- struct clk *clk;
-
- mutex_lock(&clock_list_sem);
- clk = clk_find(dev_id, con_id);
- mutex_unlock(&clock_list_sem);
-
- return clk ? clk : ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL_GPL(clk_get_sys);
-
-/*
- * Returns a clock. Note that we first try to use device id on the bus
- * and clock name. If this fails, we try to use clock name only.
- */
-struct clk *clk_get(struct device *dev, const char *id)
-{
- const char *dev_id = dev ? dev_name(dev) : NULL;
- struct clk *p, *clk = ERR_PTR(-ENOENT);
- int idno;
-
- clk = clk_get_sys(dev_id, id);
- if (clk && !IS_ERR(clk))
- return clk;
-
- if (dev == NULL || dev->bus != &platform_bus_type)
- idno = -1;
- else
- idno = to_platform_device(dev)->id;
-
- mutex_lock(&clock_list_sem);
- list_for_each_entry(p, &clock_list, node) {
- if (p->id == idno &&
- strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
- clk = p;
- goto found;
- }
- }
-
- list_for_each_entry(p, &clock_list, node) {
- if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
- clk = p;
- break;
- }
- }
-
-found:
- mutex_unlock(&clock_list_sem);
-
- return clk;
-}
-EXPORT_SYMBOL_GPL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- if (clk && !IS_ERR(clk))
- module_put(clk->owner);
-}
-EXPORT_SYMBOL_GPL(clk_put);
-
-#ifdef CONFIG_PM
-static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
-{
- static pm_message_t prev_state;
- struct clk *clkp;
-
- switch (state.event) {
- case PM_EVENT_ON:
- /* Resumeing from hibernation */
- if (prev_state.event != PM_EVENT_FREEZE)
- break;
-
- list_for_each_entry(clkp, &clock_list, node) {
- if (likely(clkp->ops)) {
- unsigned long rate = clkp->rate;
-
- if (likely(clkp->ops->set_parent))
- clkp->ops->set_parent(clkp,
- clkp->parent);
- if (likely(clkp->ops->set_rate))
- clkp->ops->set_rate(clkp,
- rate, NO_CHANGE);
- else if (likely(clkp->ops->recalc))
- clkp->rate = clkp->ops->recalc(clkp);
- }
- }
- break;
- case PM_EVENT_FREEZE:
- break;
- case PM_EVENT_SUSPEND:
- break;
- }
-
- prev_state = state;
- return 0;
-}
-
-static int clks_sysdev_resume(struct sys_device *dev)
-{
- return clks_sysdev_suspend(dev, PMSG_ON);
-}
-
-static struct sysdev_class clks_sysdev_class = {
- .name = "clks",
-};
-
-static struct sysdev_driver clks_sysdev_driver = {
- .suspend = clks_sysdev_suspend,
- .resume = clks_sysdev_resume,
-};
-
-static struct sys_device clks_sysdev_dev = {
- .cls = &clks_sysdev_class,
-};
-
-static int __init clk_sysdev_init(void)
-{
- sysdev_class_register(&clks_sysdev_class);
- sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
- sysdev_register(&clks_sysdev_dev);
-
- return 0;
-}
-subsys_initcall(clk_sysdev_init);
-#endif
-
int __init clk_init(void)
{
int ret;
@@ -591,89 +49,19 @@ int __init clk_init(void)
}
/*
- * debugfs support to trace clock tree hierarchy and attributes
+ * Returns a clock. Note that we first try to use device id on the bus
+ * and clock name. If this fails, we try to use clock name only.
*/
-static struct dentry *clk_debugfs_root;
-
-static int clk_debugfs_register_one(struct clk *c)
+struct clk *clk_get(struct device *dev, const char *con_id)
{
- int err;
- struct dentry *d, *child, *child_tmp;
- struct clk *pa = c->parent;
- char s[255];
- char *p = s;
-
- p += sprintf(p, "%s", c->name);
- if (c->id >= 0)
- sprintf(p, ":%d", c->id);
- d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
- if (!d)
- return -ENOMEM;
- c->dentry = d;
-
- d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- return 0;
+ const char *dev_id = dev ? dev_name(dev) : NULL;
-err_out:
- d = c->dentry;
- list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(c->dentry);
- return err;
+ return clk_get_sys(dev_id, con_id);
}
+EXPORT_SYMBOL_GPL(clk_get);
-static int clk_debugfs_register(struct clk *c)
+void clk_put(struct clk *clk)
{
- int err;
- struct clk *pa = c->parent;
-
- if (pa && !pa->dentry) {
- err = clk_debugfs_register(pa);
- if (err)
- return err;
- }
-
- if (!c->dentry) {
- err = clk_debugfs_register_one(c);
- if (err)
- return err;
- }
- return 0;
}
+EXPORT_SYMBOL_GPL(clk_put);
-static int __init clk_debugfs_init(void)
-{
- struct clk *c;
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("clock", NULL);
- if (!d)
- return -ENOMEM;
- clk_debugfs_root = d;
-
- list_for_each_entry(c, &clock_list, node) {
- err = clk_debugfs_register(c);
- if (err)
- goto err_out;
- }
- return 0;
-err_out:
- debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */
- return err;
-}
-late_initcall(clk_debugfs_init);
diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c
index 67a1e811cfe8..3e985aae5d91 100644
--- a/arch/sh/kernel/cpu/hwblk.c
+++ b/arch/sh/kernel/cpu/hwblk.c
@@ -146,6 +146,11 @@ int __init sh_hwblk_clk_register(struct clk *clks, int nr)
for (k = 0; !ret && (k < nr); k++) {
clkp = clks + k;
+
+ /* skip over clocks using hwblk 0 (HWBLK_UNKNOWN) */
+ if (!clkp->arch_flags)
+ continue;
+
clkp->ops = &sh_hwblk_clk_ops;
ret |= clk_register(clkp);
}
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index c736422344eb..97661061ff20 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -43,9 +43,9 @@
* peripherals (nofpu, nodsp, and so forth).
*/
#define onchip_setup(x) \
-static int x##_disabled __initdata = !cpu_has_##x; \
+static int x##_disabled __cpuinitdata = !cpu_has_##x; \
\
-static int __init x##_setup(char *opts) \
+static int __cpuinit x##_setup(char *opts) \
{ \
x##_disabled = 1; \
return 1; \
@@ -59,7 +59,7 @@ onchip_setup(dsp);
#define CPUOPM 0xff2f0000
#define CPUOPM_RABD (1 << 5)
-static void __init speculative_execution_init(void)
+static void __cpuinit speculative_execution_init(void)
{
/* Clear RABD */
__raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
@@ -78,7 +78,7 @@ static void __init speculative_execution_init(void)
#define EXPMASK_BRDSSLP (1 << 1)
#define EXPMASK_MMCAW (1 << 4)
-static void __init expmask_init(void)
+static void __cpuinit expmask_init(void)
{
unsigned long expmask = __raw_readl(EXPMASK);
@@ -217,7 +217,7 @@ static void detect_cache_shape(void)
l2_cache_shape = -1; /* No S-cache */
}
-static void __init fpu_init(void)
+static void __cpuinit fpu_init(void)
{
/* Disable the FPU */
if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
@@ -230,7 +230,7 @@ static void __init fpu_init(void)
}
#ifdef CONFIG_SH_DSP
-static void __init release_dsp(void)
+static void __cpuinit release_dsp(void)
{
unsigned long sr;
@@ -244,7 +244,7 @@ static void __init release_dsp(void)
);
}
-static void __init dsp_init(void)
+static void __cpuinit dsp_init(void)
{
unsigned long sr;
@@ -276,11 +276,11 @@ static void __init dsp_init(void)
release_dsp();
}
#else
-static inline void __init dsp_init(void) { }
+static inline void __cpuinit dsp_init(void) { }
#endif /* CONFIG_SH_DSP */
/**
- * sh_cpu_init
+ * cpu_init
*
* This is our initial entry point for each CPU, and is invoked on the
* boot CPU prior to calling start_kernel(). For SMP, a combination of
@@ -293,14 +293,14 @@ static inline void __init dsp_init(void) { }
* subtype and initial configuration will all be done.
*
* Each processor family is still responsible for doing its own probing
- * and cache configuration in detect_cpu_and_cache_system().
+ * and cache configuration in cpu_probe().
*/
-asmlinkage void __init sh_cpu_init(void)
+asmlinkage void __cpuinit cpu_init(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
/* First, probe the CPU */
- detect_cpu_and_cache_system();
+ cpu_probe();
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index 1db6d8883888..bab8e75958ae 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
boot_cpu_data.type = CPU_SH7619;
@@ -30,7 +30,4 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
boot_cpu_data.icache = boot_cpu_data.dcache;
boot_cpu_data.family = CPU_FAMILY_SH2;
-
- return 0;
}
-
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 114c7cee7184..c3638516bffc 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -128,17 +128,14 @@ static struct platform_device eth_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0xf84a0072,
.end = 0xf84a0077,
.flags = IORESOURCE_MEM,
@@ -160,17 +157,14 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x08,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0xf84a0078,
.end = 0xf84a007d,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 6825d6507164..48e97a2a0c8d 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
boot_cpu_data.family = CPU_FAMILY_SH2A;
@@ -51,6 +51,4 @@ int __init detect_cpu_and_cache_system(void)
* on the cache info.
*/
boot_cpu_data.icache = boot_cpu_data.dcache;
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index 8f669dc9b0da..6c96ea02bf8d 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -115,16 +115,13 @@ static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups,
mask_registers, prio_registers, NULL);
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xff801300,
.end = 0xff801326,
.flags = IORESOURCE_MEM,
@@ -146,16 +143,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xff801380,
.end = 0xff801390,
.flags = IORESOURCE_MEM,
@@ -177,16 +171,13 @@ static struct platform_device mtu2_1_device = {
};
static struct sh_timer_config mtu2_2_platform_data = {
- .name = "MTU2_2",
.channel_offset = 0x80,
.timer_bit = 2,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_2_resources[] = {
[0] = {
- .name = "MTU2_2",
.start = 0xff801000,
.end = 0xff80100a,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index 4ccfeb59eb1a..d08bf4c07d60 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -318,16 +318,13 @@ static struct platform_device rtc_device = {
};
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xfffe4300,
.end = 0xfffe4326,
.flags = IORESOURCE_MEM,
@@ -349,16 +346,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xfffe4380,
.end = 0xfffe4390,
.flags = IORESOURCE_MEM,
@@ -380,16 +374,13 @@ static struct platform_device mtu2_1_device = {
};
static struct sh_timer_config mtu2_2_platform_data = {
- .name = "MTU2_2",
.channel_offset = 0x80,
.timer_bit = 2,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_2_resources[] = {
[0] = {
- .name = "MTU2_2",
.start = 0xfffe4000,
.end = 0xfffe400a,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index 3136966cc9b3..832f401b5860 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -234,17 +234,14 @@ static struct platform_device scif3_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0xfffec002,
.end = 0xfffec007,
.flags = IORESOURCE_MEM,
@@ -266,17 +263,14 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x08,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0xfffec008,
.end = 0xfffec00d,
.flags = IORESOURCE_MEM,
@@ -298,16 +292,13 @@ static struct platform_device cmt1_device = {
};
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xfffe4300,
.end = 0xfffe4326,
.flags = IORESOURCE_MEM,
@@ -329,16 +320,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xfffe4380,
.end = 0xfffe4390,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index 064873585a8b..dc47b04e1049 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -194,17 +194,14 @@ static struct platform_device scif3_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0xfffec002,
.end = 0xfffec007,
.flags = IORESOURCE_MEM,
@@ -226,17 +223,14 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x08,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0xfffec008,
.end = 0xfffec00d,
.flags = IORESOURCE_MEM,
@@ -258,16 +252,13 @@ static struct platform_device cmt1_device = {
};
static struct sh_timer_config mtu2_0_platform_data = {
- .name = "MTU2_0",
.channel_offset = -0x80,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
- .name = "MTU2_0",
.start = 0xfffe4300,
.end = 0xfffe4326,
.flags = IORESOURCE_MEM,
@@ -289,16 +280,13 @@ static struct platform_device mtu2_0_device = {
};
static struct sh_timer_config mtu2_1_platform_data = {
- .name = "MTU2_1",
.channel_offset = -0x100,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
- .name = "MTU2_1",
.start = 0xfffe4380,
.end = 0xfffe4390,
.flags = IORESOURCE_MEM,
@@ -320,16 +308,13 @@ static struct platform_device mtu2_1_device = {
};
static struct sh_timer_config mtu2_2_platform_data = {
- .name = "MTU2_2",
.channel_offset = 0x80,
.timer_bit = 2,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource mtu2_2_resources[] = {
[0] = {
- .name = "MTU2_2",
.start = 0xfffe4000,
.end = 0xfffe400a,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index 295ec4c99e98..bf23c322e164 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
#include <asm/cache.h>
#include <asm/io.h>
-int detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
@@ -108,6 +108,4 @@ int detect_cpu_and_cache_system(void)
boot_cpu_data.icache = boot_cpu_data.dcache;
boot_cpu_data.family = CPU_FAMILY_SH3;
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index 7b892d60e3a0..baadd7f54d94 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -124,16 +124,13 @@ static struct platform_device rtc_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xfffffe94,
.end = 0xfffffe9f,
.flags = IORESOURCE_MEM,
@@ -155,16 +152,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xfffffea0,
.end = 0xfffffeab,
.flags = IORESOURCE_MEM,
@@ -186,15 +180,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xfffffeac,
.end = 0xfffffebb,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index bc0c4f68c7c7..3cf8c8ef7b32 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -157,16 +157,13 @@ static struct platform_device scif2_device = {
#endif
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xfffffe94,
.end = 0xfffffe9f,
.flags = IORESOURCE_MEM,
@@ -188,16 +185,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xfffffea0,
.end = 0xfffffeab,
.flags = IORESOURCE_MEM,
@@ -219,15 +213,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xfffffeac,
.end = 0xfffffebb,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 0845a3ad006d..b0c2fb4ab479 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -127,16 +127,13 @@ static struct platform_device scif1_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xa412fe94,
.end = 0xa412fe9f,
.flags = IORESOURCE_MEM,
@@ -158,16 +155,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xa412fea0,
.end = 0xa412feab,
.flags = IORESOURCE_MEM,
@@ -189,15 +183,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xa412feac,
.end = 0xa412feb5,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index a718a6231091..24b17135d5d2 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -130,17 +130,14 @@ static struct platform_device usbf_device = {
};
static struct sh_timer_config cmt0_platform_data = {
- .name = "CMT0",
.channel_offset = 0x10,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
static struct resource cmt0_resources[] = {
[0] = {
- .name = "CMT0",
.start = 0x044a0010,
.end = 0x044a001b,
.flags = IORESOURCE_MEM,
@@ -162,15 +159,12 @@ static struct platform_device cmt0_device = {
};
static struct sh_timer_config cmt1_platform_data = {
- .name = "CMT1",
.channel_offset = 0x20,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource cmt1_resources[] = {
[0] = {
- .name = "CMT1",
.start = 0x044a0020,
.end = 0x044a002b,
.flags = IORESOURCE_MEM,
@@ -192,15 +186,12 @@ static struct platform_device cmt1_device = {
};
static struct sh_timer_config cmt2_platform_data = {
- .name = "CMT2",
.channel_offset = 0x30,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource cmt2_resources[] = {
[0] = {
- .name = "CMT2",
.start = 0x044a0030,
.end = 0x044a003b,
.flags = IORESOURCE_MEM,
@@ -222,15 +213,12 @@ static struct platform_device cmt2_device = {
};
static struct sh_timer_config cmt3_platform_data = {
- .name = "CMT3",
.channel_offset = 0x40,
.timer_bit = 3,
- .clk = "peripheral_clk",
};
static struct resource cmt3_resources[] = {
[0] = {
- .name = "CMT3",
.start = 0x044a0040,
.end = 0x044a004b,
.flags = IORESOURCE_MEM,
@@ -252,15 +240,12 @@ static struct platform_device cmt3_device = {
};
static struct sh_timer_config cmt4_platform_data = {
- .name = "CMT4",
.channel_offset = 0x50,
.timer_bit = 4,
- .clk = "peripheral_clk",
};
static struct resource cmt4_resources[] = {
[0] = {
- .name = "CMT4",
.start = 0x044a0050,
.end = 0x044a005b,
.flags = IORESOURCE_MEM,
@@ -282,16 +267,13 @@ static struct platform_device cmt4_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x02,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xa412fe94,
.end = 0xa412fe9f,
.flags = IORESOURCE_MEM,
@@ -313,16 +295,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0xe,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xa412fea0,
.end = 0xa412feab,
.flags = IORESOURCE_MEM,
@@ -344,15 +323,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1a,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xa412feac,
.end = 0xa412feb5,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 6b80850294da..4eabc68cd753 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -12,9 +12,10 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
+#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
-#include <asm/io.h>
#define CPG2_FRQCR3 0xfe0a0018
@@ -45,7 +46,6 @@ static struct clk_ops sh4202_emi_clk_ops = {
};
static struct clk sh4202_emi_clk = {
- .name = "emi_clk",
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_emi_clk_ops,
};
@@ -61,7 +61,6 @@ static struct clk_ops sh4202_femi_clk_ops = {
};
static struct clk sh4202_femi_clk = {
- .name = "femi_clk",
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_femi_clk_ops,
};
@@ -139,7 +138,6 @@ static struct clk_ops sh4202_shoc_clk_ops = {
};
static struct clk sh4202_shoc_clk = {
- .name = "shoc_clk",
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_shoc_clk_ops,
};
@@ -150,6 +148,15 @@ static struct clk *sh4202_onchip_clocks[] = {
&sh4202_shoc_clk,
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk),
+ CLKDEV_CON_ID("femi_clk", &sh4202_femi_clk),
+ CLKDEV_CON_ID("shoc_clk", &sh4202_shoc_clk),
+};
+
int __init arch_clk_init(void)
{
struct clk *clk;
@@ -167,5 +174,7 @@ int __init arch_clk_init(void)
clk_put(clk);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 822977a06d84..d180f16281ed 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -15,7 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
unsigned long pvr, prr, cvr;
unsigned long size;
@@ -251,6 +251,4 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.scache.linesz);
}
}
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index b9b7e10ad68f..e916b18e1f7c 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -31,16 +31,13 @@ static struct platform_device scif0_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -62,16 +59,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -93,15 +87,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index ffd79e57254f..911d196e86b5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -66,16 +66,13 @@ static struct platform_device scif1_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -97,16 +94,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -128,15 +122,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -163,15 +154,12 @@ static struct platform_device tmu2_device = {
defined(CONFIG_CPU_SUBTYPE_SH7751R)
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xfe100008,
.end = 0xfe100013,
.flags = IORESOURCE_MEM,
@@ -193,15 +181,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xfe100014,
.end = 0xfe10001f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index a16eb3656f4b..48ea8fe85dc5 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -187,16 +187,13 @@ static struct platform_device scif3_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -218,16 +215,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -249,15 +243,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 2c16df37eda6..71291ae201b9 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
/* SH7343 registers */
@@ -36,8 +37,6 @@
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
- .name = "rclk",
- .id = -1,
.rate = 32768,
};
@@ -46,8 +45,6 @@ static struct clk r_clk = {
* from the platform code.
*/
struct clk extal_clk = {
- .name = "extal",
- .id = -1,
.rate = 33333333,
};
@@ -69,8 +66,6 @@ static struct clk_ops dll_clk_ops = {
};
static struct clk dll_clk = {
- .name = "dll_clk",
- .id = -1,
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
@@ -91,8 +86,6 @@ static struct clk_ops pll_clk_ops = {
};
static struct clk pll_clk = {
- .name = "pll_clk",
- .id = -1,
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
@@ -121,72 +114,168 @@ static struct clk_div4_table div4_table = {
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
DIV4_SIUA, DIV4_SIUB, DIV4_NR };
-#define DIV4(_str, _reg, _bit, _mask, _flags) \
- SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
- [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0),
- [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0),
- [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+#define MSTP(_parent, _reg, _bit, _flags) \
+ SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
+ MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001,
+ MSTP109, MSTP108, MSTP100,
+ MSTP225, MSTP224, MSTP218, MSTP217, MSTP216,
+ MSTP214, MSTP213, MSTP212, MSTP211, MSTP208,
+ MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
+ [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
+ [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+
+ [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
+
+ [MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0),
+ [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
+ [MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0),
+ [MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0),
+ [MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0),
+ [MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0),
+ [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
+ [MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
+ [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
+ [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
+ [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _flags) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags)
-
-static struct clk mstp_clks[] = {
- MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
- MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
- MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
- MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
- MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
- MSTP("intc3", &div4_clks[DIV4_P], MSTPCR0, 23, 0),
- MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 0),
- MSTP("dmac0", &div4_clks[DIV4_P], MSTPCR0, 21, 0),
- MSTP("sh0", &div4_clks[DIV4_P], MSTPCR0, 20, 0),
- MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0),
- MSTP("ubc0", &div4_clks[DIV4_P], MSTPCR0, 17, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
- MSTP("mfi0", &div4_clks[DIV4_P], MSTPCR0, 11, 0),
- MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
- MSTP("scif3", &div4_clks[DIV4_P], MSTPCR0, 4, 0),
- MSTP("sio0", &div4_clks[DIV4_P], MSTPCR0, 3, 0),
- MSTP("siof0", &div4_clks[DIV4_P], MSTPCR0, 2, 0),
- MSTP("siof1", &div4_clks[DIV4_P], MSTPCR0, 1, 0),
-
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0),
- MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0),
-
- MSTP("tpu0", &div4_clks[DIV4_P], MSTPCR2, 25, 0),
- MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0),
- MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0),
- MSTP("mmcif0", &div4_clks[DIV4_P], MSTPCR2, 17, 0),
- MSTP("sim0", &div4_clks[DIV4_P], MSTPCR2, 16, 0),
- MSTP("keysc0", &r_clk, MSTPCR2, 14, 0),
- MSTP("tsif0", &div4_clks[DIV4_P], MSTPCR2, 13, 0),
- MSTP("s3d40", &div4_clks[DIV4_P], MSTPCR2, 12, 0),
- MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0),
- MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0),
- MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP32 clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
+ CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
+ CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
+ {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP007],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP006],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP005],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP004],
+ },
+ CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]),
+ CLKDEV_CON_ID("i2c0", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("i2c1", &mstp_clks[MSTP108]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
+ CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
+ CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]),
+ CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
+ CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
};
int __init arch_clk_init(void)
@@ -202,14 +291,16 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
index 91588d280cd8..7ce5bbcd4084 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
/* SH7366 registers */
@@ -36,8 +37,6 @@
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
- .name = "rclk",
- .id = -1,
.rate = 32768,
};
@@ -46,8 +45,6 @@ static struct clk r_clk = {
* from the platform code.
*/
struct clk extal_clk = {
- .name = "extal",
- .id = -1,
.rate = 33333333,
};
@@ -69,8 +66,6 @@ static struct clk_ops dll_clk_ops = {
};
static struct clk dll_clk = {
- .name = "dll_clk",
- .id = -1,
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
@@ -94,8 +89,6 @@ static struct clk_ops pll_clk_ops = {
};
static struct clk pll_clk = {
- .name = "pll_clk",
- .id = -1,
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
@@ -124,69 +117,154 @@ static struct clk_div4_table div4_table = {
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
DIV4_SIUA, DIV4_SIUB, DIV4_NR };
-#define DIV4(_str, _reg, _bit, _mask, _flags) \
- SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
- [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
- [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0),
- [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0),
- [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
-#define MSTP(_str, _parent, _reg, _bit, _flags) \
- SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags)
+#define MSTP(_parent, _reg, _bit, _flags) \
+ SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
+ MSTP007, MSTP006, MSTP005, MSTP002, MSTP001,
+ MSTP109, MSTP100,
+ MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217,
+ MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP_NR };
-static struct clk mstp_clks[] = {
+static struct clk mstp_clks[MSTP_NR] = {
/* See page 52 of Datasheet V0.40: Overview -> Block Diagram */
- MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
- MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
- MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
- MSTP("rsmem0", &div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
- MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
- MSTP("intc3", &div4_clks[DIV4_P], MSTPCR0, 23, 0),
- MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 0),
- MSTP("dmac0", &div4_clks[DIV4_P], MSTPCR0, 21, 0),
- MSTP("sh0", &div4_clks[DIV4_P], MSTPCR0, 20, 0),
- MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0),
- MSTP("ubc0", &div4_clks[DIV4_P], MSTPCR0, 17, 0),
- MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- MSTP("cmt0", &r_clk, MSTPCR0, 14, 0),
- MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0),
- MSTP("mfi0", &div4_clks[DIV4_P], MSTPCR0, 11, 0),
- MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0),
- MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0),
- MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0),
- MSTP("msiof0", &div4_clks[DIV4_P], MSTPCR0, 2, 0),
- MSTP("sbr0", &div4_clks[DIV4_P], MSTPCR0, 1, 0),
-
- MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0),
-
- MSTP("icb0", &div4_clks[DIV4_P], MSTPCR2, 27, 0),
- MSTP("meram0", &div4_clks[DIV4_P], MSTPCR2, 26, 0),
- MSTP("dacy1", &div4_clks[DIV4_P], MSTPCR2, 24, 0),
- MSTP("dacy0", &div4_clks[DIV4_P], MSTPCR2, 23, 0),
- MSTP("tsif0", &div4_clks[DIV4_P], MSTPCR2, 22, 0),
- MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0),
- MSTP("mmcif0", &div4_clks[DIV4_P], MSTPCR2, 17, 0),
- MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0),
- MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0),
- MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT),
- MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0),
- MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0),
- MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0),
- MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
- MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
- MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0),
+ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
+ [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
+ [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+
+ [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+
+ [MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
+ [MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0),
+ [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0),
+ [MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0),
+ [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
+ [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT),
+ [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
+ [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
+ [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP32 clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
+ CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
+ CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
+ {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP007],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP006],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP005],
+ },
+ CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]),
+ CLKDEV_CON_ID("i2c0", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]),
+ CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]),
+ CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]),
+ CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
+ CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
};
int __init arch_clk_init(void)
@@ -202,14 +280,16 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 15db6d521c5c..2030f3d9fac7 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/hwblk.h>
#include <cpu/sh7722.h>
@@ -36,8 +37,6 @@
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
- .name = "rclk",
- .id = -1,
.rate = 32768,
};
@@ -46,8 +45,6 @@ static struct clk r_clk = {
* from the platform code.
*/
struct clk extal_clk = {
- .name = "extal",
- .id = -1,
.rate = 33333333,
};
@@ -69,8 +66,6 @@ static struct clk_ops dll_clk_ops = {
};
static struct clk dll_clk = {
- .name = "dll_clk",
- .id = -1,
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
@@ -94,8 +89,6 @@ static struct clk_ops pll_clk_ops = {
};
static struct clk pll_clk = {
- .name = "pll_clk",
- .id = -1,
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
@@ -121,68 +114,142 @@ static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
-#define DIV4(_str, _reg, _bit, _mask, _flags) \
- SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
struct clk div4_clks[DIV4_NR] = {
- [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
- [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
- [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x1fff, 0),
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
};
enum { DIV4_IRDA, DIV4_ENABLE_NR };
struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
- [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x1fff, 0),
+ [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x1fff, 0),
};
enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
- [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x1fff, 0),
- [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x1fff, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+static struct clk mstp_clks[HWBLK_NR] = {
+ SH_HWBLK_CLK(HWBLK_URAM, &div4_clks[DIV4_U], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_XYMEM, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_TMU, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_FLCTL, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
+
+ SH_HWBLK_CLK(HWBLK_IIC, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
+
+ SH_HWBLK_CLK(HWBLK_SDHI, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_USBF, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SIU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_JPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_P], 0),
};
-#define R_CLK &r_clk
-#define P_CLK &div4_clks[DIV4_P]
-#define B_CLK &div4_clks[DIV4_B]
-#define U_CLK &div4_clks[DIV4_U]
-
-static struct clk mstp_clks[] = {
- SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0),
- SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
- SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
- SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
- SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
- SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
- SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
-
- SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
- SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
-
- SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0),
- SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
- SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0),
- SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
- SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
- SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
- SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0),
- SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
- SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
- SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, 0),
- SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
- SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0),
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
+ CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU],
+ },
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+ {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF0],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF1],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF2],
+ },
+ CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_CON_ID("siu0", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
@@ -198,6 +265,8 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
@@ -210,10 +279,10 @@ int __init arch_clk_init(void)
DIV4_REPARENT_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
index 50babe01fe44..d3938f0d3702 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/hwblk.h>
#include <cpu/sh7723.h>
@@ -36,8 +38,6 @@
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
- .name = "rclk",
- .id = -1,
.rate = 32768,
};
@@ -46,8 +46,6 @@ static struct clk r_clk = {
* from the platform code.
*/
struct clk extal_clk = {
- .name = "extal",
- .id = -1,
.rate = 33333333,
};
@@ -69,8 +67,6 @@ static struct clk_ops dll_clk_ops = {
};
static struct clk dll_clk = {
- .name = "dll_clk",
- .id = -1,
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
@@ -94,8 +90,6 @@ static struct clk_ops pll_clk_ops = {
};
static struct clk pll_clk = {
- .name = "pll_clk",
- .id = -1,
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
@@ -123,92 +117,215 @@ static struct clk_div4_table div4_table = {
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
-#define DIV4(_str, _reg, _bit, _mask, _flags) \
- SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
- [DIV4_I] = DIV4("cpu_clk", FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT),
- [DIV4_U] = DIV4("umem_clk", FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT),
- [DIV4_SH] = DIV4("shyway_clk", FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4("bus_clk", FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
- [DIV4_B3] = DIV4("b3_clk", FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
- [DIV4_P] = DIV4("peripheral_clk", FRQCR, 0, 0x0dbf, 0),
+ [DIV4_I] = DIV4(FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x0dbf, 0),
};
enum { DIV4_IRDA, DIV4_ENABLE_NR };
struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
- [DIV4_IRDA] = DIV4("irda_clk", IRDACLKCR, 0, 0x0dbf, 0),
+ [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x0dbf, 0),
};
enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
- [DIV4_SIUA] = DIV4("siua_clk", SCLKACR, 0, 0x0dbf, 0),
- [DIV4_SIUB] = DIV4("siub_clk", SCLKBCR, 0, 0x0dbf, 0),
-};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x0dbf, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x0dbf, 0),
};
+enum { DIV6_V, DIV6_NR };
-#define R_CLK (&r_clk)
-#define P_CLK (&div4_clks[DIV4_P])
-#define B_CLK (&div4_clks[DIV4_B])
-#define U_CLK (&div4_clks[DIV4_U])
-#define I_CLK (&div4_clks[DIV4_I])
-#define SH_CLK (&div4_clks[DIV4_SH])
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
static struct clk mstp_clks[] = {
/* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
- SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("intc0", -1, I_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
- SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
- SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
- SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
- SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
- SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
- SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
- SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
- SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0),
- SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
- SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
- SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
- SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
- SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
- SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
- SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
- SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
- SH_HWBLK_CLK("meram0", -1, SH_CLK, HWBLK_MERAM, 0),
-
- SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0),
- SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
-
- SH_HWBLK_CLK("atapi0", -1, SH_CLK, HWBLK_ATAPI, 0),
- SH_HWBLK_CLK("adc0", -1, P_CLK, HWBLK_ADC, 0),
- SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
- SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
- SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
- SH_HWBLK_CLK("icb0", -1, B_CLK, HWBLK_ICB, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
- SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
- SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
- SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB, 0),
- SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
- SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0),
- SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, 0),
- SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
- SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0),
- SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0),
- SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, 0),
- SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
- SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
+ SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0),
+ SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_FLCTL, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MERAM, &div4_clks[DIV4_SH], 0),
+
+ SH_HWBLK_CLK(HWBLK_IIC, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
+
+ SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_SH], 0),
+ SH_HWBLK_CLK(HWBLK_ADC, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_ICB, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_USB, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SIU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU2H1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU2H0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
+};
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
+ CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ },
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]),
+ {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ },
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+ {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF0],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF1],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF2],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF3],
+ }, {
+ /* SCIF4 */
+ .dev_id = "sh-sci.4",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF4],
+ }, {
+ /* SCIF5 */
+ .dev_id = "sh-sci.5",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF5],
+ },
+ CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_CON_ID("meram0", &mstp_clks[HWBLK_MERAM]),
+ CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_CON_ID("siu0", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
@@ -222,7 +339,9 @@ int __init arch_clk_init(void)
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
- ret = clk_register(main_clks[k]);
+ ret |= clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
@@ -236,10 +355,10 @@ int __init arch_clk_init(void)
DIV4_REPARENT_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 6707061fbf54..2d9700c6b53a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -21,6 +21,8 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/hwblk.h>
#include <cpu/sh7724.h>
@@ -39,8 +41,6 @@
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
- .name = "rclk",
- .id = -1,
.rate = 32768,
};
@@ -49,8 +49,6 @@ static struct clk r_clk = {
* from the platform code.
*/
struct clk extal_clk = {
- .name = "extal",
- .id = -1,
.rate = 33333333,
};
@@ -74,8 +72,6 @@ static struct clk_ops fll_clk_ops = {
};
static struct clk fll_clk = {
- .name = "fll_clk",
- .id = -1,
.ops = &fll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
@@ -96,8 +92,6 @@ static struct clk_ops pll_clk_ops = {
};
static struct clk pll_clk = {
- .name = "pll_clk",
- .id = -1,
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
@@ -113,8 +107,6 @@ static struct clk_ops div3_clk_ops = {
};
static struct clk div3_clk = {
- .name = "div3_clk",
- .id = -1,
.ops = &div3_clk_ops,
.parent = &pll_clk,
};
@@ -151,86 +143,215 @@ static struct clk_div4_table div4_table = {
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR };
-#define DIV4(_str, _reg, _bit, _mask, _flags) \
- SH_CLK_DIV4(_str, &pll_clk, _reg, _bit, _mask, _flags)
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
- [DIV4_I] = DIV4("cpu_clk", FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT),
- [DIV4_SH] = DIV4("shyway_clk", FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4("bus_clk", FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
- [DIV4_P] = DIV4("peripheral_clk", FRQCRA, 0, 0x2f7c, 0),
- [DIV4_M1] = DIV4("vpu_clk", FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0),
+ [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
};
-struct clk div6_clks[] = {
- SH_CLK_DIV6("video_clk", &div3_clk, VCLKCR, 0),
- SH_CLK_DIV6("fsia_clk", &div3_clk, FCLKACR, 0),
- SH_CLK_DIV6("fsib_clk", &div3_clk, FCLKBCR, 0),
- SH_CLK_DIV6("irda_clk", &div3_clk, IRDACLKCR, 0),
- SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
+enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
+ [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
+ [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
+ [DIV6_I] = SH_CLK_DIV6(&div3_clk, IRDACLKCR, 0),
+ [DIV6_S] = SH_CLK_DIV6(&div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
+};
+
+static struct clk mstp_clks[HWBLK_NR] = {
+ SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_RSMEM, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_P], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT),
+ SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0),
+ SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0),
+
+ SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0),
+ SH_HWBLK_CLK(HWBLK_IIC0, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_IIC1, &div4_clks[DIV4_P], 0),
+
+ SH_HWBLK_CLK(HWBLK_MMC, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_ETHER, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0),
+ SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_USB1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_USB0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU1, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_2DDMAC, &div4_clks[DIV4_SH], 0),
+ SH_HWBLK_CLK(HWBLK_SPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_JPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_BEU0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_CEU0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VEU0, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0),
+ SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0),
};
-#define R_CLK (&r_clk)
-#define P_CLK (&div4_clks[DIV4_P])
-#define B_CLK (&div4_clks[DIV4_B])
-#define I_CLK (&div4_clks[DIV4_I])
-#define SH_CLK (&div4_clks[DIV4_SH])
-
-static struct clk mstp_clks[] = {
- SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("rs0", -1, B_CLK, HWBLK_RSMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("intc0", -1, P_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0),
- SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT),
- SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0),
- SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0),
- SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0),
- SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0),
- SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0),
- SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0),
- SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0),
- SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0),
- SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0),
- SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0),
- SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0),
- SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0),
- SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0),
- SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0),
- SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0),
-
- SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0),
- SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0),
- SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC0, 0),
- SH_HWBLK_CLK("i2c1", -1, P_CLK, HWBLK_IIC1, 0),
-
- SH_HWBLK_CLK("mmc0", -1, B_CLK, HWBLK_MMC, 0),
- SH_HWBLK_CLK("eth0", -1, B_CLK, HWBLK_ETHER, 0),
- SH_HWBLK_CLK("atapi0", -1, B_CLK, HWBLK_ATAPI, 0),
- SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0),
- SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0),
- SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0),
- SH_HWBLK_CLK("usb1", -1, B_CLK, HWBLK_USB1, 0),
- SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB0, 0),
- SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0),
- SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0),
- SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0),
- SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, 0),
- SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0),
- SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0),
- SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0),
- SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0),
- SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0),
- SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0),
- SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0),
- SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0),
- SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, 0),
- SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0),
- SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0),
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("fll_clk", &fll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+ CLKDEV_CON_ID("div3_clk", &div3_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+ CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]),
+ CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]),
+ CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
+ CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU0],
+ }, {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ },
+ CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]),
+ {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[HWBLK_TMU1],
+ }, {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF0],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF1],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF2],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF3],
+ }, {
+ /* SCIF4 */
+ .dev_id = "sh-sci.4",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF4],
+ }, {
+ /* SCIF5 */
+ .dev_id = "sh-sci.5",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[HWBLK_SCIF5],
+ },
+ CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("i2c0", &mstp_clks[HWBLK_IIC0]),
+ CLKDEV_CON_ID("i2c1", &mstp_clks[HWBLK_IIC1]),
+ CLKDEV_CON_ID("mmc0", &mstp_clks[HWBLK_MMC]),
+ CLKDEV_CON_ID("eth0", &mstp_clks[HWBLK_ETHER]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]),
+ CLKDEV_CON_ID("ceu1", &mstp_clks[HWBLK_CEU1]),
+ CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]),
+ CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]),
+ CLKDEV_CON_ID("spu0", &mstp_clks[HWBLK_SPU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU0]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]),
};
int __init arch_clk_init(void)
@@ -246,14 +367,16 @@ int __init arch_clk_init(void)
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
- ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks));
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index 86aae60677dc..0a752bd324ac 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
@@ -87,7 +88,6 @@ static struct clk_ops sh7757_shyway_clk_ops = {
};
static struct clk sh7757_shyway_clk = {
- .name = "shyway_clk",
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh7757_shyway_clk_ops,
};
@@ -100,6 +100,13 @@ static struct clk *sh7757_onchip_clocks[] = {
&sh7757_shyway_clk,
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &sh7757_shyway_clk),
+};
+
static int __init sh7757_clk_init(void)
{
struct clk *clk = clk_get(NULL, "master_clk");
@@ -123,6 +130,8 @@ static int __init sh7757_clk_init(void)
clk_put(clk);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
return 0;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
index 9f401163e71e..1f1df48008cd 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -12,6 +12,8 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
@@ -77,7 +79,6 @@ static struct clk_ops sh7763_shyway_clk_ops = {
};
static struct clk sh7763_shyway_clk = {
- .name = "shyway_clk",
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh7763_shyway_clk_ops,
};
@@ -90,6 +91,13 @@ static struct clk *sh7763_onchip_clocks[] = {
&sh7763_shyway_clk,
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk),
+};
+
int __init arch_clk_init(void)
{
struct clk *clk;
@@ -107,5 +115,7 @@ int __init arch_clk_init(void)
clk_put(clk);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index 150963a6001e..62d706350060 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -11,6 +11,8 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
@@ -83,7 +85,6 @@ static struct clk_ops sh7780_shyway_clk_ops = {
};
static struct clk sh7780_shyway_clk = {
- .name = "shyway_clk",
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh7780_shyway_clk_ops,
};
@@ -96,6 +97,13 @@ static struct clk *sh7780_onchip_clocks[] = {
&sh7780_shyway_clk,
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk),
+};
+
int __init arch_clk_init(void)
{
struct clk *clk;
@@ -113,5 +121,7 @@ int __init arch_clk_init(void)
clk_put(clk);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index d997f0a25b10..c3e458aaa2b7 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -3,7 +3,7 @@
*
* SH7785 support for the clock framework
*
- * Copyright (C) 2007 - 2009 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/cpufreq.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <cpu/sh7785.h>
@@ -23,8 +24,6 @@
* from the platform code.
*/
static struct clk extal_clk = {
- .name = "extal",
- .id = -1,
.rate = 33333333,
};
@@ -42,8 +41,6 @@ static struct clk_ops pll_clk_ops = {
};
static struct clk pll_clk = {
- .name = "pll_clk",
- .id = -1,
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
@@ -69,48 +66,149 @@ static struct clk_div4_table div4_table = {
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA,
DIV4_DU, DIV4_P, DIV4_NR };
-#define DIV4(_str, _bit, _mask, _flags) \
- SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags)
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
- [DIV4_P] = DIV4("peripheral_clk", 0, 0x0f80, 0),
- [DIV4_DU] = DIV4("du_clk", 4, 0x0ff0, 0),
- [DIV4_GA] = DIV4("ga_clk", 8, 0x0030, 0),
- [DIV4_DDR] = DIV4("ddr_clk", 12, 0x000c, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4("bus_clk", 16, 0x0fe0, CLK_ENABLE_ON_INIT),
- [DIV4_SH] = DIV4("shyway_clk", 20, 0x000c, CLK_ENABLE_ON_INIT),
- [DIV4_U] = DIV4("umem_clk", 24, 0x000c, CLK_ENABLE_ON_INIT),
- [DIV4_I] = DIV4("cpu_clk", 28, 0x000e, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(0, 0x0f80, 0),
+ [DIV4_DU] = DIV4(4, 0x0ff0, 0),
+ [DIV4_GA] = DIV4(8, 0x0030, 0),
+ [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(24, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc80030
#define MSTPCR1 0xffc80034
-static struct clk mstp_clks[] = {
+enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP021, MSTP020, MSTP017, MSTP016,
+ MSTP013, MSTP012, MSTP009, MSTP008, MSTP003, MSTP002,
+ MSTP119, MSTP117, MSTP105, MSTP104, MSTP100,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
- SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
- SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
- SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
- SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
- SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
- SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
- SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0),
- SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0),
- SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0),
- SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0),
- SH_CLK_MSTP32("mmcif_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 13, 0),
- SH_CLK_MSTP32("flctl_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 12, 0),
- SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0),
- SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0),
- SH_CLK_MSTP32("siof_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 3, 0),
- SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ [MSTP013] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 13, 0),
+ [MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
/* MSTPCR1 */
- SH_CLK_MSTP32("hudi_fck", -1, NULL, MSTPCR1, 19, 0),
- SH_CLK_MSTP32("ubc_fck", -1, NULL, MSTPCR1, 17, 0),
- SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0),
- SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0),
- SH_CLK_MSTP32("gdta_fck", -1, NULL, MSTPCR1, 0, 0),
+ [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
+ [MSTP117] = SH_CLK_MSTP32(NULL, MSTPCR1, 17, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
+ [MSTP100] = SH_CLK_MSTP32(NULL, MSTPCR1, 0, 0),
+};
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
+ CLKDEV_CON_ID("ga_clk", &div4_clks[DIV4_GA]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ {
+ /* SCIF5 */
+ .dev_id = "sh-sci.5",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP029],
+ }, {
+ /* SCIF4 */
+ .dev_id = "sh-sci.4",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP028],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP027],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP026],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP025],
+ }, {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP024],
+ },
+ CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
+ CLKDEV_CON_ID("mmcif_fck", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("flctl_fck", &mstp_clks[MSTP012]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ }, {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ },
+ CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
+ CLKDEV_CON_ID("ubc_fck", &mstp_clks[MSTP117]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
+ CLKDEV_CON_ID("gdta_fck", &mstp_clks[MSTP100]),
};
int __init arch_clk_init(void)
@@ -119,12 +217,14 @@ int __init arch_clk_init(void)
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
index af69fd468703..597c9fbe49c6 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
@@ -21,8 +22,6 @@
* from the platform code.
*/
static struct clk extal_clk = {
- .name = "extal",
- .id = -1,
.rate = 33333333,
};
@@ -44,8 +43,6 @@ static struct clk_ops pll_clk_ops = {
};
static struct clk pll_clk = {
- .name = "pll_clk",
- .id = -1,
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
@@ -70,54 +67,191 @@ static struct clk_div4_table div4_table = {
enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
-#define DIV4(_str, _bit, _mask, _flags) \
- SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags)
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
- [DIV4_P] = DIV4("peripheral_clk", 0, 0x0b40, 0),
- [DIV4_DU] = DIV4("du_clk", 4, 0x0010, 0),
- [DIV4_DDR] = DIV4("ddr_clk", 12, 0x0002, CLK_ENABLE_ON_INIT),
- [DIV4_B] = DIV4("bus_clk", 16, 0x0360, CLK_ENABLE_ON_INIT),
- [DIV4_SH] = DIV4("shyway_clk", 20, 0x0002, CLK_ENABLE_ON_INIT),
- [DIV4_I] = DIV4("cpu_clk", 28, 0x0006, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(0, 0x0b40, 0),
+ [DIV4_DU] = DIV4(4, 0x0010, 0),
+ [DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT),
};
#define MSTPCR0 0xffc40030
#define MSTPCR1 0xffc40034
-static struct clk mstp_clks[] = {
+enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008,
+ MSTP005, MSTP004, MSTP002,
+ MSTP112, MSTP110, MSTP109, MSTP108,
+ MSTP105, MSTP104, MSTP103, MSTP102,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
/* MSTPCR0 */
- SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0),
- SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0),
- SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0),
- SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0),
- SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0),
- SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0),
- SH_CLK_MSTP32("ssi_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 23, 0),
- SH_CLK_MSTP32("ssi_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 22, 0),
- SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0),
- SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0),
- SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0),
- SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0),
- SH_CLK_MSTP32("i2c_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 15, 0),
- SH_CLK_MSTP32("i2c_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 14, 0),
- SH_CLK_MSTP32("tmu9_11_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 11, 0),
- SH_CLK_MSTP32("tmu678_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 10, 0),
- SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0),
- SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0),
- SH_CLK_MSTP32("sdif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 5, 0),
- SH_CLK_MSTP32("sdif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 4, 0),
- SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
+ [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
/* MSTPCR1 */
- SH_CLK_MSTP32("usb_fck", -1, NULL, MSTPCR1, 12, 0),
- SH_CLK_MSTP32("pcie_fck", 2, NULL, MSTPCR1, 10, 0),
- SH_CLK_MSTP32("pcie_fck", 1, NULL, MSTPCR1, 9, 0),
- SH_CLK_MSTP32("pcie_fck", 0, NULL, MSTPCR1, 8, 0),
- SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0),
- SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0),
- SH_CLK_MSTP32("du_fck", -1, NULL, MSTPCR1, 3, 0),
- SH_CLK_MSTP32("ether_fck", -1, NULL, MSTPCR1, 2, 0),
+ [MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0),
+ [MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0),
+ [MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0),
+ [MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
+ [MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0),
+ [MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0),
+};
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ {
+ /* SCIF5 */
+ .dev_id = "sh-sci.5",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP029],
+ }, {
+ /* SCIF4 */
+ .dev_id = "sh-sci.4",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP028],
+ }, {
+ /* SCIF3 */
+ .dev_id = "sh-sci.3",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP027],
+ }, {
+ /* SCIF2 */
+ .dev_id = "sh-sci.2",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP026],
+ }, {
+ /* SCIF1 */
+ .dev_id = "sh-sci.1",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP025],
+ }, {
+ /* SCIF0 */
+ .dev_id = "sh-sci.0",
+ .con_id = "sci_fck",
+ .clk = &mstp_clks[MSTP024],
+ },
+ CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
+ CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]),
+ CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]),
+ {
+ /* TMU0 */
+ .dev_id = "sh_tmu.0",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU1 */
+ .dev_id = "sh_tmu.1",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU2 */
+ .dev_id = "sh_tmu.2",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP008],
+ }, {
+ /* TMU3 */
+ .dev_id = "sh_tmu.3",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ }, {
+ /* TMU4 */
+ .dev_id = "sh_tmu.4",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ }, {
+ /* TMU5 */
+ .dev_id = "sh_tmu.5",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP009],
+ }, {
+ /* TMU6 */
+ .dev_id = "sh_tmu.6",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP010],
+ }, {
+ /* TMU7 */
+ .dev_id = "sh_tmu.7",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP010],
+ }, {
+ /* TMU8 */
+ .dev_id = "sh_tmu.8",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP010],
+ }, {
+ /* TMU9 */
+ .dev_id = "sh_tmu.9",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP011],
+ }, {
+ /* TMU10 */
+ .dev_id = "sh_tmu.10",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP011],
+ }, {
+ /* TMU11 */
+ .dev_id = "sh_tmu.11",
+ .con_id = "tmu_fck",
+ .clk = &mstp_clks[MSTP011],
+ },
+ CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]),
+ CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]),
+ CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]),
+ CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]),
+ CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
+ CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]),
+ CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]),
};
int __init arch_clk_init(void)
@@ -126,12 +260,14 @@ int __init arch_clk_init(void)
for (i = 0; i < ARRAY_SIZE(clks); i++)
ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
if (!ret)
ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
&div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks));
+ ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index e75c57bdfa5e..236a6282d778 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -13,9 +13,10 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
-#include <asm/io.h>
static int ifc_divisors[] = { 1, 2, 4 ,6 };
static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 18, 24, 32, 36, 48 };
@@ -94,7 +95,6 @@ static struct clk_ops shx3_shyway_clk_ops = {
};
static struct clk shx3_shyway_clk = {
- .name = "shyway_clk",
.flags = CLK_ENABLE_ON_INIT,
.ops = &shx3_shyway_clk_ops,
};
@@ -107,6 +107,13 @@ static struct clk *shx3_onchip_clocks[] = {
&shx3_shyway_clk,
};
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &shx3_shyway_clk),
+};
+
int __init arch_clk_init(void)
{
struct clk *clk;
@@ -124,5 +131,7 @@ int __init arch_clk_init(void)
clk_put(clk);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 45eb1bfd42c9..3681cafdb4af 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -21,7 +21,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -37,7 +36,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -53,7 +51,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -69,7 +66,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 83, 83, 83, 83 },
- .clk = "scif3",
};
static struct platform_device scif3_device = {
@@ -207,17 +203,14 @@ static struct platform_device jpu_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -239,16 +232,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -270,16 +260,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -301,15 +288,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index c494c193e3b6..8dab9e1bbd89 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -23,7 +23,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -169,17 +168,14 @@ static struct platform_device veu1_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -201,16 +197,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -232,16 +225,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -263,15 +253,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index fd7e3639e845..156ccc960015 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -17,14 +17,13 @@
#include <linux/usb/m66592.h>
#include <asm/clock.h>
-#include <asm/dmaengine.h>
#include <asm/mmzone.h>
#include <asm/siu.h>
#include <cpu/dma-register.h>
#include <cpu/sh7722.h>
-static struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
+static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
.addr = 0xffe0000c,
@@ -75,10 +74,20 @@ static struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
.addr = 0xa454c094,
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb6,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_TX,
+ .addr = 0x04ce0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_RX,
+ .addr = 0x04ce0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
},
};
-static struct sh_dmae_channel sh7722_dmae_channels[] = {
+static const struct sh_dmae_channel sh7722_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -106,7 +115,7 @@ static struct sh_dmae_channel sh7722_dmae_channels[] = {
}
};
-static unsigned int ts_shift[] = TS_SHIFT;
+static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma_platform_data = {
.slave = sh7722_dmae_slaves,
@@ -174,7 +183,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -190,7 +198,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -206,7 +213,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -401,17 +407,14 @@ static struct platform_device jpu_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -436,16 +439,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -470,16 +470,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -504,15 +501,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 85c61f624702..0eadefdbbba1 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -26,7 +26,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -42,7 +41,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -58,7 +56,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -74,7 +71,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
};
static struct platform_device scif3_device = {
@@ -90,7 +86,6 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
};
static struct platform_device scif4_device = {
@@ -106,7 +101,6 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
};
static struct platform_device scif5_device = {
@@ -211,17 +205,14 @@ static struct platform_device veu1_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 125,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -246,16 +237,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -280,16 +268,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -314,15 +299,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
@@ -347,15 +329,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu1",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd90008,
.end = 0xffd90013,
.flags = IORESOURCE_MEM,
@@ -380,15 +359,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu1",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd90014,
.end = 0xffd9001f,
.flags = IORESOURCE_MEM,
@@ -413,15 +389,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu1",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd90020,
.end = 0xffd9002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index e7fa2a92fc1f..79c556e56262 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -18,48 +18,104 @@
#include <linux/mm.h>
#include <linux/serial_sci.h>
#include <linux/uio_driver.h>
+#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <linux/notifier.h>
#include <asm/suspend.h>
#include <asm/clock.h>
-#include <asm/dmaengine.h>
#include <asm/mmzone.h>
#include <cpu/dma-register.h>
#include <cpu/sh7724.h>
/* DMA */
-static struct sh_dmae_channel sh7724_dmae0_channels[] = {
+static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
{
- .offset = 0,
- .dmars = 0,
- .dmars_bit = 0,
+ .slave_id = SHDMA_SLAVE_SCIF0_TX,
+ .addr = 0xffe0000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
}, {
- .offset = 0x10,
- .dmars = 0,
- .dmars_bit = 8,
+ .slave_id = SHDMA_SLAVE_SCIF0_RX,
+ .addr = 0xffe00014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
}, {
- .offset = 0x20,
- .dmars = 4,
- .dmars_bit = 0,
+ .slave_id = SHDMA_SLAVE_SCIF1_TX,
+ .addr = 0xffe1000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x25,
}, {
- .offset = 0x30,
- .dmars = 4,
- .dmars_bit = 8,
+ .slave_id = SHDMA_SLAVE_SCIF1_RX,
+ .addr = 0xffe10014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x26,
}, {
- .offset = 0x50,
- .dmars = 8,
- .dmars_bit = 0,
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0xffe2000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
}, {
- .offset = 0x60,
- .dmars = 8,
- .dmars_bit = 8,
- }
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0xffe20014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF3_TX,
+ .addr = 0xa4e30020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2d,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF3_RX,
+ .addr = 0xa4e30024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2e,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF4_TX,
+ .addr = 0xa4e40020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x31,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF4_RX,
+ .addr = 0xa4e40024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x32,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF5_TX,
+ .addr = 0xa4e50020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x35,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF5_RX,
+ .addr = 0xa4e50024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x36,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_TX,
+ .addr = 0x04ce0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_RX,
+ .addr = 0x04ce0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_TX,
+ .addr = 0x04cf0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc9,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_RX,
+ .addr = 0x04cf0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xca,
+ },
};
-static struct sh_dmae_channel sh7724_dmae1_channels[] = {
+static const struct sh_dmae_channel sh7724_dmae_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -87,23 +143,13 @@ static struct sh_dmae_channel sh7724_dmae1_channels[] = {
}
};
-static unsigned int ts_shift[] = TS_SHIFT;
-
-static struct sh_dmae_pdata dma0_platform_data = {
- .channel = sh7724_dmae0_channels,
- .channel_num = ARRAY_SIZE(sh7724_dmae0_channels),
- .ts_low_shift = CHCR_TS_LOW_SHIFT,
- .ts_low_mask = CHCR_TS_LOW_MASK,
- .ts_high_shift = CHCR_TS_HIGH_SHIFT,
- .ts_high_mask = CHCR_TS_HIGH_MASK,
- .ts_shift = ts_shift,
- .ts_shift_num = ARRAY_SIZE(ts_shift),
- .dmaor_init = DMAOR_INIT,
-};
+static const unsigned int ts_shift[] = TS_SHIFT;
-static struct sh_dmae_pdata dma1_platform_data = {
- .channel = sh7724_dmae1_channels,
- .channel_num = ARRAY_SIZE(sh7724_dmae1_channels),
+static struct sh_dmae_pdata dma_platform_data = {
+ .slave = sh7724_dmae_slaves,
+ .slave_num = ARRAY_SIZE(sh7724_dmae_slaves),
+ .channel = sh7724_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7724_dmae_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
@@ -187,7 +233,7 @@ static struct platform_device dma0_device = {
.resource = sh7724_dmae0_resources,
.num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
.dev = {
- .platform_data = &dma0_platform_data,
+ .platform_data = &dma_platform_data,
},
.archdata = {
.hwblk_id = HWBLK_DMAC0,
@@ -200,7 +246,7 @@ static struct platform_device dma1_device = {
.resource = sh7724_dmae1_resources,
.num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
.dev = {
- .platform_data = &dma1_platform_data,
+ .platform_data = &dma_platform_data,
},
.archdata = {
.hwblk_id = HWBLK_DMAC1,
@@ -213,7 +259,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
};
static struct platform_device scif0_device = {
@@ -229,7 +274,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
};
static struct platform_device scif1_device = {
@@ -245,7 +289,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
};
static struct platform_device scif2_device = {
@@ -261,7 +304,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
};
static struct platform_device scif3_device = {
@@ -277,7 +319,6 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
};
static struct platform_device scif4_device = {
@@ -293,7 +334,6 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIFA,
.irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
};
static struct platform_device scif5_device = {
@@ -485,17 +525,14 @@ static struct platform_device veu1_device = {
};
static struct sh_timer_config cmt_platform_data = {
- .name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
- .clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
- .name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
@@ -520,16 +557,13 @@ static struct platform_device cmt_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -554,16 +588,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -588,15 +619,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
@@ -622,15 +650,12 @@ static struct platform_device tmu2_device = {
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu1",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd90008,
.end = 0xffd90013,
.flags = IORESOURCE_MEM,
@@ -655,15 +680,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu1",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd90014,
.end = 0xffd9001f,
.flags = IORESOURCE_MEM,
@@ -688,15 +710,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu1",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd90020,
.end = 0xffd9002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index e75edf58796a..444aca95b20d 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -63,16 +63,13 @@ static struct platform_device scif4_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xfe430008,
.end = 0xfe430013,
.flags = IORESOURCE_MEM,
@@ -94,16 +91,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xfe430014,
.end = 0xfe43001f,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 7f6b0a5f7f82..5b5f6b005fc5 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -131,16 +131,13 @@ static struct platform_device usbf_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -162,16 +159,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -193,15 +187,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -223,15 +214,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd88008,
.end = 0xffd88013,
.flags = IORESOURCE_MEM,
@@ -253,15 +241,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd88014,
.end = 0xffd8801f,
.flags = IORESOURCE_MEM,
@@ -283,15 +268,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd88020,
.end = 0xffd8802b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 86d681ecf90e..7270d7fd6761 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -165,16 +165,13 @@ static struct platform_device scif9_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -196,16 +193,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -227,15 +221,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -257,15 +248,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffd81008,
.end = 0xffd81013,
.flags = IORESOURCE_MEM,
@@ -287,15 +275,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffd81014,
.end = 0xffd8101f,
.flags = IORESOURCE_MEM,
@@ -317,15 +302,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffd81020,
.end = 0xffd8102f,
.flags = IORESOURCE_MEM,
@@ -347,15 +329,12 @@ static struct platform_device tmu5_device = {
};
static struct sh_timer_config tmu6_platform_data = {
- .name = "TMU6",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu6_resources[] = {
[0] = {
- .name = "TMU6",
.start = 0xffd82008,
.end = 0xffd82013,
.flags = IORESOURCE_MEM,
@@ -377,15 +356,12 @@ static struct platform_device tmu6_device = {
};
static struct sh_timer_config tmu7_platform_data = {
- .name = "TMU7",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu7_resources[] = {
[0] = {
- .name = "TMU7",
.start = 0xffd82014,
.end = 0xffd8201f,
.flags = IORESOURCE_MEM,
@@ -407,15 +383,12 @@ static struct platform_device tmu7_device = {
};
static struct sh_timer_config tmu8_platform_data = {
- .name = "TMU8",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu8_resources[] = {
[0] = {
- .name = "TMU8",
.start = 0xffd82020,
.end = 0xffd8202b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 02e792c90de6..0f414864f76b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -12,10 +12,9 @@
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
-#include <asm/dmaengine.h>
-
#include <cpu/dma-register.h>
static struct plat_sci_port scif0_platform_data = {
@@ -49,16 +48,13 @@ static struct platform_device scif1_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -80,16 +76,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -111,15 +104,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -141,15 +131,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
@@ -171,15 +158,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
@@ -201,15 +185,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
@@ -251,7 +232,7 @@ static struct platform_device rtc_device = {
};
/* DMA */
-static struct sh_dmae_channel sh7780_dmae0_channels[] = {
+static const struct sh_dmae_channel sh7780_dmae0_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -279,7 +260,7 @@ static struct sh_dmae_channel sh7780_dmae0_channels[] = {
}
};
-static struct sh_dmae_channel sh7780_dmae1_channels[] = {
+static const struct sh_dmae_channel sh7780_dmae1_channels[] = {
{
.offset = 0,
}, {
@@ -295,7 +276,7 @@ static struct sh_dmae_channel sh7780_dmae1_channels[] = {
}
};
-static unsigned int ts_shift[] = TS_SHIFT;
+static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = sh7780_dmae0_channels,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 1fcd88b1671e..c9a572bc6dc8 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -13,9 +13,9 @@
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
-#include <asm/dmaengine.h>
#include <asm/mmzone.h>
#include <cpu/dma-register.h>
@@ -25,7 +25,6 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 40, 40, 40, 40 },
- .clk = "scif_fck",
};
static struct platform_device scif0_device = {
@@ -41,7 +40,6 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 44, 44, 44, 44 },
- .clk = "scif_fck",
};
static struct platform_device scif1_device = {
@@ -57,7 +55,6 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 60, 60, 60, 60 },
- .clk = "scif_fck",
};
static struct platform_device scif2_device = {
@@ -73,7 +70,6 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 61, 61, 61, 61 },
- .clk = "scif_fck",
};
static struct platform_device scif3_device = {
@@ -89,7 +85,6 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 62, 62, 62, 62 },
- .clk = "scif_fck",
};
static struct platform_device scif4_device = {
@@ -105,7 +100,6 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 63, 63, 63, 63 },
- .clk = "scif_fck",
};
static struct platform_device scif5_device = {
@@ -117,16 +111,13 @@ static struct platform_device scif5_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu012_fck",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -148,16 +139,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu012_fck",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -179,15 +167,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu012_fck",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -209,15 +194,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "tmu345_fck",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
@@ -239,15 +221,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "tmu345_fck",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
@@ -269,15 +248,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "tmu345_fck",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
@@ -299,7 +275,7 @@ static struct platform_device tmu5_device = {
};
/* DMA */
-static struct sh_dmae_channel sh7785_dmae0_channels[] = {
+static const struct sh_dmae_channel sh7785_dmae0_channels[] = {
{
.offset = 0,
.dmars = 0,
@@ -327,7 +303,7 @@ static struct sh_dmae_channel sh7785_dmae0_channels[] = {
}
};
-static struct sh_dmae_channel sh7785_dmae1_channels[] = {
+static const struct sh_dmae_channel sh7785_dmae1_channels[] = {
{
.offset = 0,
}, {
@@ -343,7 +319,7 @@ static struct sh_dmae_channel sh7785_dmae1_channels[] = {
}
};
-static unsigned int ts_shift[] = TS_SHIFT;
+static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = sh7785_dmae0_channels,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 7e585320710a..8797723231ea 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -1,7 +1,7 @@
/*
* SH7786 Setup
*
- * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright (C) 2009 - 2010 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
* Paul Mundt <paul.mundt@renesas.com>
*
@@ -21,6 +21,9 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/sh_timer.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_intc.h>
+#include <cpu/dma-register.h>
#include <asm/mmzone.h>
static struct plat_sci_port scif0_platform_data = {
@@ -117,16 +120,13 @@ static struct platform_device scif5_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
@@ -148,16 +148,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
@@ -179,15 +176,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
@@ -209,15 +203,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffda0008,
.end = 0xffda0013,
.flags = IORESOURCE_MEM,
@@ -239,15 +230,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffda0014,
.end = 0xffda001f,
.flags = IORESOURCE_MEM,
@@ -269,15 +257,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffda0020,
.end = 0xffda002b,
.flags = IORESOURCE_MEM,
@@ -299,15 +284,12 @@ static struct platform_device tmu5_device = {
};
static struct sh_timer_config tmu6_platform_data = {
- .name = "TMU6",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu6_resources[] = {
[0] = {
- .name = "TMU6",
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
@@ -329,15 +311,12 @@ static struct platform_device tmu6_device = {
};
static struct sh_timer_config tmu7_platform_data = {
- .name = "TMU7",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu7_resources[] = {
[0] = {
- .name = "TMU7",
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
@@ -359,15 +338,12 @@ static struct platform_device tmu7_device = {
};
static struct sh_timer_config tmu8_platform_data = {
- .name = "TMU8",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu8_resources[] = {
[0] = {
- .name = "TMU8",
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
@@ -389,15 +365,12 @@ static struct platform_device tmu8_device = {
};
static struct sh_timer_config tmu9_platform_data = {
- .name = "TMU9",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu9_resources[] = {
[0] = {
- .name = "TMU9",
.start = 0xffde0008,
.end = 0xffde0013,
.flags = IORESOURCE_MEM,
@@ -419,15 +392,12 @@ static struct platform_device tmu9_device = {
};
static struct sh_timer_config tmu10_platform_data = {
- .name = "TMU10",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu10_resources[] = {
[0] = {
- .name = "TMU10",
.start = 0xffde0014,
.end = 0xffde001f,
.flags = IORESOURCE_MEM,
@@ -449,15 +419,12 @@ static struct platform_device tmu10_device = {
};
static struct sh_timer_config tmu11_platform_data = {
- .name = "TMU11",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu11_resources[] = {
[0] = {
- .name = "TMU11",
.start = 0xffde0020,
.end = 0xffde002b,
.flags = IORESOURCE_MEM,
@@ -478,6 +445,83 @@ static struct platform_device tmu11_device = {
.num_resources = ARRAY_SIZE(tmu11_resources),
};
+static const struct sh_dmae_channel dmac0_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = dmac0_channels,
+ .channel_num = ARRAY_SIZE(dmac0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* Resource order important! */
+static struct resource dmac0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ }, {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ }, {
+ /* DMA error IRQ */
+ .start = evt2irq(0x5c0),
+ .end = evt2irq(0x5c0),
+ .flags = IORESOURCE_IRQ,
+ }, {
+ /* IRQ for channels 0-5 */
+ .start = evt2irq(0x500),
+ .end = evt2irq(0x5a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = dmac0_resources,
+ .num_resources = ARRAY_SIZE(dmac0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xffe70400,
@@ -525,10 +569,10 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
};
static struct platform_device *sh7786_devices[] __initdata = {
+ &dma0_device,
&usb_ohci_device,
};
-
/*
* Please call this function if your platform board
* use external clock for USB
@@ -536,6 +580,7 @@ static struct platform_device *sh7786_devices[] __initdata = {
#define USBCTL0 0xffe70858
#define CLOCK_MODE_MASK 0xffffff7f
#define EXT_CLOCK_MODE 0x00000080
+
void __init sh7786_usb_use_exclock(void)
{
u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
@@ -553,6 +598,7 @@ void __init sh7786_usb_use_exclock(void)
#define PLL_ENB 0x00000002
#define PHY_RST 0x00000004
#define ACT_PLL_STATUS 0xc0000000
+
static void __init sh7786_usb_setup(void)
{
int i = 1000000;
@@ -708,9 +754,19 @@ static struct intc_vect vectors[] __initdata = {
#define INTMSK2 0xfe410068
#define INTMSKCLR2 0xfe41006c
+#define INTDISTCR0 0xfe4100b0
+#define INTDISTCR1 0xfe4100b4
+#define INTACK 0xfe4100b8
+#define INTACKCLR 0xfe4100bc
+#define INT2DISTCR0 0xfe410900
+#define INT2DISTCR1 0xfe410904
+#define INT2DISTCR2 0xfe410908
+#define INT2DISTCR3 0xfe41090c
+
static struct intc_mask_reg mask_registers[] __initdata = {
{ CnINTMSK0, CnINTMSKCLR0, 32,
- { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
+ INTC_SMP_BALANCING(INTDISTCR0) },
{ INTMSK2, INTMSKCLR2, 32,
{ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
@@ -722,7 +778,8 @@ static struct intc_mask_reg mask_registers[] __initdata = {
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
{ CnINT2MSKR0, CnINT2MSKCR0 , 32,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT } },
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT },
+ INTC_SMP_BALANCING(INT2DISTCR0) },
{ CnINT2MSKR1, CnINT2MSKCR1, 32,
{ TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0,
DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
@@ -731,14 +788,14 @@ static struct intc_mask_reg mask_registers[] __initdata = {
HPB_0, HPB_1, HPB_2,
SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
SCIF1,
- TMU2, TMU3, 0, } },
+ TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) },
{ CnINT2MSKR2, CnINT2MSKCR2, 32,
{ 0, 0, SCIF2, SCIF3, SCIF4, SCIF5,
Eth_0, Eth_1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PCIeC0_0, PCIeC0_1, PCIeC0_2,
PCIeC1_0, PCIeC1_1, PCIeC1_2,
- USB, 0, 0 } },
+ USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) },
{ CnINT2MSKR3, CnINT2MSKCR3, 32,
{ 0, 0, 0, 0, 0, 0,
I2C0, I2C1,
@@ -747,7 +804,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
HAC0, HAC1,
FLCTL, 0,
HSPI, GPIO0, GPIO1, Thermal,
- 0, 0, 0, 0, 0, 0, 0, 0 } },
+ 0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
};
static struct intc_prio_reg prio_registers[] __initdata = {
@@ -863,6 +920,19 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
#define INTC_INTMSK2 INTMSK2
#define INTC_INTMSKCLR1 CnINTMSKCLR1
#define INTC_INTMSKCLR2 INTMSKCLR2
+#define INTC_USERIMASK 0xfe411000
+
+#ifdef CONFIG_INTC_BALANCING
+unsigned int irq_lookup(unsigned int irq)
+{
+ return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
+}
+
+void irq_finish(unsigned int irq)
+{
+ __raw_writel(irq2evt(irq), INTACKCLR);
+}
+#endif
void __init plat_irq_setup(void)
{
@@ -877,6 +947,7 @@ void __init plat_irq_setup(void)
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
register_intc_controller(&intc_desc);
+ register_intc_userimask(INTC_USERIMASK);
}
void __init plat_irq_setup_pins(int mode)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 780ba17a5599..9158bc5ea38b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -70,16 +70,13 @@ static struct platform_device scif2_device = {
};
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = 0xffc10008,
.end = 0xffc10013,
.flags = IORESOURCE_MEM,
@@ -101,16 +98,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = 0xffc10014,
.end = 0xffc1001f,
.flags = IORESOURCE_MEM,
@@ -132,15 +126,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = 0xffc10020,
.end = 0xffc1002f,
.flags = IORESOURCE_MEM,
@@ -162,15 +153,12 @@ static struct platform_device tmu2_device = {
};
static struct sh_timer_config tmu3_platform_data = {
- .name = "TMU3",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
};
static struct resource tmu3_resources[] = {
[0] = {
- .name = "TMU3",
.start = 0xffc20008,
.end = 0xffc20013,
.flags = IORESOURCE_MEM,
@@ -192,15 +180,12 @@ static struct platform_device tmu3_device = {
};
static struct sh_timer_config tmu4_platform_data = {
- .name = "TMU4",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
};
static struct resource tmu4_resources[] = {
[0] = {
- .name = "TMU4",
.start = 0xffc20014,
.end = 0xffc2001f,
.flags = IORESOURCE_MEM,
@@ -222,15 +207,12 @@ static struct platform_device tmu4_device = {
};
static struct sh_timer_config tmu5_platform_data = {
- .name = "TMU5",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu5_resources[] = {
[0] = {
- .name = "TMU5",
.start = 0xffc20020,
.end = 0xffc2002b,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 11bf4c1e25c0..de865cac02ee 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -1,7 +1,7 @@
/*
* SH-X3 SMP
*
- * Copyright (C) 2007 - 2008 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
* Copyright (C) 2007 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,16 +9,22 @@
* for more details.
*/
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <asm/sections.h>
#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
#define STBCR_MSTP 0x00000001
#define STBCR_RESET 0x00000002
+#define STBCR_SLEEP 0x00000004
#define STBCR_LTSLP 0x80000000
static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
@@ -37,7 +43,7 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-void __init plat_smp_setup(void)
+static void shx3_smp_setup(void)
{
unsigned int cpu = 0;
int i, num;
@@ -63,7 +69,7 @@ void __init plat_smp_setup(void)
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
-void __init plat_prepare_cpus(unsigned int max_cpus)
+static void shx3_prepare_cpus(unsigned int max_cpus)
{
int i;
@@ -72,11 +78,14 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
BUILD_BUG_ON(SMP_MSG_NR >= 8);
for (i = 0; i < SMP_MSG_NR; i++)
- request_irq(104 + i, ipi_interrupt_handler, IRQF_DISABLED,
- "IPI", (void *)(long)i);
+ request_irq(104 + i, ipi_interrupt_handler,
+ IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i);
+
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
}
-void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
+static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
{
if (__in_29bit_mode())
__raw_writel(entry_point, RESET_REG(cpu));
@@ -93,12 +102,12 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
__raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
}
-int plat_smp_processor_id(void)
+static unsigned int shx3_smp_processor_id(void)
{
return __raw_readl(0xff000048); /* CPIDR */
}
-void plat_send_ipi(unsigned int cpu, unsigned int message)
+static void shx3_send_ipi(unsigned int cpu, unsigned int message)
{
unsigned long addr = 0xfe410070 + (cpu * 4);
@@ -106,3 +115,52 @@ void plat_send_ipi(unsigned int cpu, unsigned int message)
__raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
}
+
+static void shx3_update_boot_vector(unsigned int cpu)
+{
+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ cpu_relax();
+ __raw_writel(STBCR_RESET, STBCR_REG(cpu));
+}
+
+static int __cpuinit
+shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ shx3_update_boot_vector(cpu);
+ break;
+ case CPU_ONLINE:
+ pr_info("CPU %u is now online\n", cpu);
+ break;
+ case CPU_DEAD:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
+ .notifier_call = shx3_cpu_callback,
+};
+
+static int __cpuinit register_shx3_cpu_notifier(void)
+{
+ register_hotcpu_notifier(&shx3_cpu_notifier);
+ return 0;
+}
+late_initcall(register_shx3_cpu_notifier);
+
+struct plat_smp_ops shx3_smp_ops = {
+ .smp_setup = shx3_smp_setup,
+ .prepare_cpus = shx3_prepare_cpus,
+ .start_cpu = shx3_start_cpu,
+ .smp_processor_id = shx3_smp_processor_id,
+ .send_ipi = shx3_send_ipi,
+ .cpu_die = native_cpu_die,
+ .cpu_disable = native_cpu_disable,
+ .play_dead = native_play_dead,
+};
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 521d05b3f7ba..9e882409e4e9 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -17,7 +17,7 @@
#include <asm/cache.h>
#include <asm/tlb.h>
-int __init detect_cpu_and_cache_system(void)
+void __cpuinit cpu_probe(void)
{
unsigned long long cir;
@@ -72,6 +72,4 @@ int __init detect_cpu_and_cache_system(void)
/* Setup some I/D TLB defaults */
sh64_tlb_init();
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index e7a3c1e4b604..d910666142b1 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -68,16 +68,13 @@ static struct platform_device rtc_device = {
#define TMU2_BASE (TMU_BASE + 0x8 + (0xc * 0x2))
static struct sh_timer_config tmu0_platform_data = {
- .name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
- .clk = "peripheral_clk",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
- .name = "TMU0",
.start = TMU0_BASE,
.end = TMU0_BASE + 0xc - 1,
.flags = IORESOURCE_MEM,
@@ -99,16 +96,13 @@ static struct platform_device tmu0_device = {
};
static struct sh_timer_config tmu1_platform_data = {
- .name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
- .clk = "peripheral_clk",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
- .name = "TMU1",
.start = TMU1_BASE,
.end = TMU1_BASE + 0xc - 1,
.flags = IORESOURCE_MEM,
@@ -130,15 +124,12 @@ static struct platform_device tmu1_device = {
};
static struct sh_timer_config tmu2_platform_data = {
- .name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
- .clk = "peripheral_clk",
};
static struct resource tmu2_resources[] = {
[0] = {
- .name = "TMU2",
.start = TMU2_BASE,
.end = TMU2_BASE + 0xc - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 95d216255565..37c97d444576 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -4,7 +4,6 @@
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
-
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
@@ -13,6 +12,25 @@
/* Stores the physical address of elf header of crash image. */
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+/*
+ * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
+ * is_kdump_kernel() to determine if we are booting after a panic. Hence
+ * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
+ *
+ * elfcorehdr= specifies the location of elf core header
+ * stored by the crashed kernel.
+ */
+static int __init parse_elfcorehdr(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ elfcorehdr_addr = memparse(arg, &arg);
+
+ return 0;
+}
+early_param("elfcorehdr", parse_elfcorehdr);
+
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index a8234b2010d1..886d7d83ace3 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/elf.h>
#include <linux/ftrace.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <asm/dwarf.h>
#include <asm/unwinder.h>
@@ -844,8 +845,10 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
rb_link_node(&cie->node, parent, rb_node);
rb_insert_color(&cie->node, &cie_root);
+#ifdef CONFIG_MODULES
if (mod != NULL)
list_add_tail(&cie->link, &mod->arch.cie_list);
+#endif
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
@@ -934,8 +937,10 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
rb_link_node(&fde->node, parent, rb_node);
rb_insert_color(&fde->node, &fde_root);
+#ifdef CONFIG_MODULES
if (mod != NULL)
list_add_tail(&fde->link, &mod->arch.fde_list);
+#endif
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index fe0b743881b0..6e35f012cc03 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -131,6 +131,7 @@ ENTRY(_stext)
* r8 = scratch register
* r9 = scratch register
* r10 = number of PMB entries we've setup
+ * r11 = scratch register
*/
mov.l .LMMUCR, r1 /* Flush the TLB */
@@ -167,8 +168,9 @@ ENTRY(_stext)
.Lvalidate_existing_mappings:
+ mov.l .LPMB_DATA_MASK, r11
mov.l @r7, r8
- and r0, r8
+ and r11, r8
cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
bt .Lpmb_done
@@ -335,12 +337,13 @@ ENTRY(stack_start)
3: .long __bss_start
4: .long _end
5: .long start_kernel
-6: .long sh_cpu_init
+6: .long cpu_init
7: .long init_thread_union
#ifdef CONFIG_PMB
.LPMB_ADDR: .long PMB_ADDR
.LPMB_DATA: .long PMB_DATA
+.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
.LMMUCR: .long MMUCR
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 675eea7785d9..efae6ab3d54c 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -120,25 +120,16 @@ static int get_hbp_len(u16 hbp_len)
}
/*
- * Check for virtual address in user space.
- */
-int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
-{
- unsigned int len;
-
- len = get_hbp_len(hbp_len);
-
- return (va <= TASK_SIZE - len);
-}
-
-/*
* Check for virtual address in kernel space.
*/
-static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
unsigned int len;
+ unsigned long va;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- len = get_hbp_len(hbp_len);
+ va = info->address;
+ len = get_hbp_len(info->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -226,8 +217,7 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk)
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
@@ -270,15 +260,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
if (info->address & align)
return -EINVAL;
- /* Check that the virtual address is in the proper range */
- if (tsk) {
- if (!arch_check_va_in_userspace(info->address, info->len))
- return -EFAULT;
- } else {
- if (!arch_check_va_in_kernelspace(info->address, info->len))
- return -EFAULT;
- }
-
return 0;
}
@@ -363,8 +344,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
perf_bp_event(bp, args->regs);
/* Deliver the signal to userspace */
- if (arch_check_va_in_userspace(bp->attr.bp_addr,
- bp->attr.bp_len)) {
+ if (!arch_check_bp_in_kernelspace(bp)) {
siginfo_t info;
info.si_signo = args->signr;
@@ -425,11 +405,6 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
/* TODO */
}
-void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
-{
- /* TODO */
-}
-
int register_sh_ubc(struct sh_ubc *ubc)
{
/* Bail if it's already assigned */
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 273f890b17ae..425d604e3a28 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -19,6 +19,7 @@
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/atomic.h>
+#include <asm/smp.h>
void (*pm_idle)(void) = NULL;
@@ -89,10 +90,13 @@ void cpu_idle(void)
while (1) {
tick_nohz_stop_sched_tick(1);
- while (!need_resched() && cpu_online(cpu)) {
+ while (!need_resched()) {
check_pgt_cache();
rmb();
+ if (cpu_is_offline(cpu))
+ play_dead();
+
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
@@ -133,7 +137,7 @@ static void do_nothing(void *unused)
void stop_this_cpu(void *unused)
{
local_irq_disable();
- cpu_clear(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), false);
for (;;)
cpu_sleep();
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index d2d41d046657..257de1f0692b 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -12,6 +12,7 @@
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/ftrace.h>
+#include <linux/delay.h>
#include <asm/processor.h>
#include <asm/machvec.h>
#include <asm/uaccess.h>
@@ -113,19 +114,14 @@ union irq_ctx {
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
-#endif
-asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+
+static inline void handle_one_irq(unsigned int irq)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-#ifdef CONFIG_IRQSTACKS
union irq_ctx *curctx, *irqctx;
-#endif
-
- irq_enter();
- irq = irq_demux(irq);
-#ifdef CONFIG_IRQSTACKS
curctx = (union irq_ctx *)current_thread_info();
irqctx = hardirq_ctx[smp_processor_id()];
@@ -164,20 +160,9 @@ asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
"r5", "r6", "r7", "r8", "t", "pr"
);
} else
-#endif
generic_handle_irq(irq);
-
- irq_exit();
-
- set_irq_regs(old_regs);
- return 1;
}
-#ifdef CONFIG_IRQSTACKS
-static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
-
-static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
-
/*
* allocate per-cpu stacks for hardirq and for softirq processing
*/
@@ -257,8 +242,33 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+ generic_handle_irq(irq);
+}
#endif
+asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ irq = irq_demux(irq_lookup(irq));
+
+ if (irq != NO_IRQ_IGNORE) {
+ handle_one_irq(irq);
+ irq_finish(irq);
+ }
+
+ irq_exit();
+
+ set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
+}
+
void __init init_IRQ(void)
{
plat_irq_setup();
@@ -283,3 +293,44 @@ int __init arch_probe_nr_irqs(void)
return 0;
}
#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
+{
+ printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
+ irq, desc->node, cpu);
+
+ raw_spin_lock_irq(&desc->lock);
+ desc->chip->set_affinity(irq, cpumask_of(cpu));
+ raw_spin_unlock_irq(&desc->lock);
+}
+
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ struct irq_desc *desc;
+ unsigned int irq, cpu = smp_processor_id();
+
+ for_each_irq_desc(irq, desc) {
+ if (desc->node == cpu) {
+ unsigned int newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
+ if (newcpu >= nr_cpu_ids) {
+ if (printk_ratelimit())
+ printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
+ irq, cpu);
+
+ cpumask_setall(desc->affinity);
+ newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
+ }
+
+ route_irq(desc, irq, newcpu);
+ }
+ }
+}
+#endif
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 70c69659b846..efb6d398dec3 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -237,6 +237,18 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
return -1;
}
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ if (exception == 60)
+ return instruction_pointer(regs) - 2;
+ return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+}
+
/*
* The primary entry points for the kgdb debug trap table entries.
*/
@@ -247,7 +259,7 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_save(flags);
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
- kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs);
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c
index 0b04e7d4a9b9..8bfc6dfa8b94 100644
--- a/arch/sh/kernel/localtimer.c
+++ b/arch/sh/kernel/localtimer.c
@@ -44,7 +44,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
{
}
-void __cpuinit local_timer_setup(unsigned int cpu)
+void local_timer_setup(unsigned int cpu)
{
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -60,3 +60,7 @@ void __cpuinit local_timer_setup(unsigned int cpu)
clockevents_register_device(clk);
}
+
+void local_timer_stop(unsigned int cpu)
+{
+}
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 7672141c841b..5a559e666eb3 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -8,7 +8,6 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
@@ -16,6 +15,7 @@
#include <linux/numa.h>
#include <linux/ftrace.h>
#include <linux/suspend.h>
+#include <linux/lmb.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@@ -147,4 +147,64 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
+#ifdef CONFIG_X2TLB
+ VMCOREINFO_CONFIG(X2TLB);
+#endif
+}
+
+void __init reserve_crashkernel(void)
+{
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ /* this is necessary because of lmb_phys_mem_size() */
+ lmb_analyze();
+
+ ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(),
+ &crash_size, &crash_base);
+ if (ret == 0 && crash_size > 0) {
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+ }
+
+ if (crashk_res.end == crashk_res.start)
+ goto disable;
+
+ crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1);
+ if (!crashk_res.start) {
+ unsigned long max = lmb_end_of_DRAM() - memory_limit;
+ crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max);
+ if (!crashk_res.start) {
+ pr_err("crashkernel allocation failed\n");
+ goto disable;
+ }
+ } else {
+ ret = lmb_reserve(crashk_res.start, crash_size);
+ if (unlikely(ret < 0)) {
+ pr_err("crashkernel reservation failed - "
+ "memory is in use\n");
+ goto disable;
+ }
+ }
+
+ crashk_res.end = crashk_res.start + crash_size - 1;
+
+ /*
+ * Crash kernel trumps memory limit
+ */
+ if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) {
+ memory_limit = 0;
+ pr_info("Disabled memory limit for crashkernel\n");
+ }
+
+ pr_info("Reserving %ldMB of memory at 0x%08lx "
+ "for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crashk_res.start),
+ (unsigned long)(lmb_phys_mem_size() >> 20));
+
+ return;
+
+disable:
+ crashk_res.start = crashk_res.end = 0;
}
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index 1652340ba3f2..85cfaf916fdc 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -131,6 +131,7 @@ void __init sh_mv_setup(void)
mv_set(ioport_unmap);
mv_set(irq_demux);
mv_set(mode_pins);
+ mv_set(mem_init);
if (!sh_mv.mv_nr_irqs)
sh_mv.mv_nr_irqs = NR_IRQS;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 17f89aa4e1b3..dcb126dc76fd 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -90,7 +90,7 @@ void arch_task_cache_init(void)
# define HAVE_SOFTFP 0
#endif
-void init_thread_xstate(void)
+void __cpuinit init_thread_xstate(void)
{
if (boot_cpu_data.flags & CPU_HAS_FPU)
xstate_size = sizeof(struct sh_fpu_hard_struct);
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index 7759a9a93211..6c4bbba2a675 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -85,7 +85,7 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
bp = thread->ptrace_bps[0];
if (!bp) {
- hw_breakpoint_init(&attr);
+ ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_2;
@@ -436,29 +436,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
0, sizeof(struct pt_dspregs),
(const void __user *)data);
#endif
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- case PTRACE_GETFDPIC: {
- unsigned long tmp = 0;
-
- switch (addr) {
- case PTRACE_GETFDPIC_EXEC:
- tmp = child->mm->context.exec_fdpic_loadmap;
- break;
- case PTRACE_GETFDPIC_INTERP:
- tmp = child->mm->context.interp_fdpic_loadmap;
- break;
- default:
- break;
- }
-
- ret = 0;
- if (put_user(tmp, datap)) {
- ret = -EFAULT;
- break;
- }
- break;
- }
-#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8870d6ba64bf..272734681d29 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -4,7 +4,7 @@
* This file handles the architecture-dependent parts of initialization
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
*/
#include <linux/screen_info.h>
#include <linux/ioport.h>
@@ -39,7 +39,9 @@
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/clock.h>
+#include <asm/smp.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
/*
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
@@ -93,6 +95,7 @@ unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_end = 0;
EXPORT_SYMBOL(memory_end);
+unsigned long memory_limit = 0;
static struct resource mem_resources[MAX_NUMNODES];
@@ -100,92 +103,73 @@ int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
static int __init early_parse_mem(char *p)
{
- unsigned long size;
+ if (!p)
+ return 1;
- memory_start = (unsigned long)__va(__MEMORY_START);
- size = memparse(p, &p);
+ memory_limit = PAGE_ALIGN(memparse(p, &p));
- if (size > __MEMORY_SIZE) {
- printk(KERN_ERR
- "Using mem= to increase the size of kernel memory "
- "is not allowed.\n"
- " Recompile the kernel with the correct value for "
- "CONFIG_MEMORY_SIZE.\n");
- return 0;
- }
-
- memory_end = memory_start + size;
+ pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
return 0;
}
early_param("mem", early_parse_mem);
-/*
- * Register fully available low RAM pages with the bootmem allocator.
- */
-static void __init register_bootmem_low_pages(void)
+void __init check_for_initrd(void)
{
- unsigned long curr_pfn, last_pfn, pages;
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long start, end;
+
+ /*
+ * Check for the rare cases where boot loaders adhere to the boot
+ * ABI.
+ */
+ if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
+ goto disable;
+
+ start = INITRD_START + __MEMORY_START;
+ end = start + INITRD_SIZE;
+
+ if (unlikely(end <= start))
+ goto disable;
+ if (unlikely(start & ~PAGE_MASK)) {
+ pr_err("initrd must be page aligned\n");
+ goto disable;
+ }
+
+ if (unlikely(start < PAGE_OFFSET)) {
+ pr_err("initrd start < PAGE_OFFSET\n");
+ goto disable;
+ }
+
+ if (unlikely(end > lmb_end_of_DRAM())) {
+ pr_err("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ end, (unsigned long)lmb_end_of_DRAM());
+ goto disable;
+ }
/*
- * We are rounding up the start address of usable memory:
+ * If we got this far inspite of the boot loader's best efforts
+ * to the contrary, assume we actually have a valid initrd and
+ * fix up the root dev.
*/
- curr_pfn = PFN_UP(__MEMORY_START);
+ ROOT_DEV = Root_RAM0;
/*
- * ... and at the end of the usable range downwards:
+ * Address sanitization
*/
- last_pfn = PFN_DOWN(__pa(memory_end));
+ initrd_start = (unsigned long)__va(__pa(start));
+ initrd_end = initrd_start + INITRD_SIZE;
- if (last_pfn > max_low_pfn)
- last_pfn = max_low_pfn;
+ lmb_reserve(__pa(initrd_start), INITRD_SIZE);
- pages = last_pfn - curr_pfn;
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
-}
+ return;
-#ifdef CONFIG_KEXEC
-static void __init reserve_crashkernel(void)
-{
- unsigned long long free_mem;
- unsigned long long crash_size, crash_base;
- void *vp;
- int ret;
-
- free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
-
- ret = parse_crashkernel(boot_command_line, free_mem,
- &crash_size, &crash_base);
- if (ret == 0 && crash_size) {
- if (crash_base <= 0) {
- vp = alloc_bootmem_nopanic(crash_size);
- if (!vp) {
- printk(KERN_INFO "crashkernel allocation "
- "failed\n");
- return;
- }
- crash_base = __pa(vp);
- } else if (reserve_bootmem(crash_base, crash_size,
- BOOTMEM_EXCLUSIVE) < 0) {
- printk(KERN_INFO "crashkernel reservation failed - "
- "memory is in use\n");
- return;
- }
-
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(free_mem >> 20));
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
- insert_resource(&iomem_resource, &crashk_res);
- }
-}
-#else
-static inline void __init reserve_crashkernel(void)
-{}
+disable:
+ pr_info("initrd disabled\n");
+ initrd_start = initrd_end = 0;
#endif
+}
void __cpuinit calibrate_delay(void)
{
@@ -207,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
struct resource *res = &mem_resources[nid];
+ unsigned long start, end;
WARN_ON(res->name); /* max one active range per node for now */
+ start = start_pfn << PAGE_SHIFT;
+ end = end_pfn << PAGE_SHIFT;
+
res->name = "System RAM";
- res->start = start_pfn << PAGE_SHIFT;
- res->end = (end_pfn << PAGE_SHIFT) - 1;
+ res->start = start;
+ res->end = end - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
if (request_resource(&iomem_resource, res)) {
pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
start_pfn, end_pfn);
@@ -229,138 +218,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
- add_active_range(nid, start_pfn, end_pfn);
-}
-
-void __init setup_bootmem_allocator(unsigned long free_pfn)
-{
- unsigned long bootmap_size;
- unsigned long bootmap_pages, bootmem_paddr;
- u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT;
- int i;
-
- bootmap_pages = bootmem_bootmap_pages(total_pages);
-
- bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
-
- /*
- * Find a proper area for the bootmem bitmap. After this
- * bootstrap step all allocations (until the page allocator
- * is intact) must be done via bootmem_alloc().
- */
- bootmap_size = init_bootmem_node(NODE_DATA(0),
- bootmem_paddr >> PAGE_SHIFT,
- min_low_pfn, max_low_pfn);
-
- /* Add active regions with valid PFNs. */
- for (i = 0; i < lmb.memory.cnt; i++) {
- unsigned long start_pfn, end_pfn;
- start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
- end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
- __add_active_range(0, start_pfn, end_pfn);
- }
-
- /*
- * Add all physical memory to the bootmem map and mark each
- * area as present.
- */
- register_bootmem_low_pages();
-
- /* Reserve the sections we're already using. */
- for (i = 0; i < lmb.reserved.cnt; i++)
- reserve_bootmem(lmb.reserved.region[i].base,
- lmb_size_bytes(&lmb.reserved, i),
- BOOTMEM_DEFAULT);
-
- node_set_online(0);
-
- sparse_memory_present_with_active_regions(0);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- ROOT_DEV = Root_RAM0;
-
- if (LOADER_TYPE && INITRD_START) {
- unsigned long initrd_start_phys = INITRD_START + __MEMORY_START;
-
- if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
- reserve_bootmem(initrd_start_phys, INITRD_SIZE,
- BOOTMEM_DEFAULT);
- initrd_start = (unsigned long)__va(initrd_start_phys);
- initrd_end = initrd_start + INITRD_SIZE;
- } else {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- initrd_start_phys + INITRD_SIZE,
- (unsigned long)PFN_PHYS(max_low_pfn));
- initrd_start = 0;
- }
- }
-#endif
-
- reserve_crashkernel();
-}
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-static void __init setup_memory(void)
-{
- unsigned long start_pfn;
- u64 base = min_low_pfn << PAGE_SHIFT;
- u64 size = (max_low_pfn << PAGE_SHIFT) - base;
-
- /*
- * Partially used pages are not usable - thus
- * we are rounding upwards:
- */
- start_pfn = PFN_UP(__pa(_end));
-
- lmb_add(base, size);
-
- /*
- * Reserve the kernel text and
- * Reserve the bootmem bitmap. We do this in two steps (first step
- * was init_bootmem()), because this catches the (definitely buggy)
- * case of us accidentally initializing the bootmem allocator with
- * an invalid RAM area.
- */
- lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
- (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
- (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
-
/*
- * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ * Also make sure that there is a PMB mapping that covers this
+ * range before we attempt to activate it, to avoid reset by MMU.
+ * We can hit this path with NUMA or memory hot-add.
*/
- if (CONFIG_ZERO_PAGE_OFFSET != 0)
- lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
-
- lmb_analyze();
- lmb_dump_all();
+ pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
+ PAGE_KERNEL);
- setup_bootmem_allocator(start_pfn);
-}
-#else
-extern void __init setup_memory(void);
-#endif
-
-/*
- * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
- * is_kdump_kernel() to determine if we are booting after a panic. Hence
- * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
- */
-#ifdef CONFIG_CRASH_DUMP
-/* elfcorehdr= specifies the location of elf core header
- * stored by the crashed kernel.
- */
-static int __init parse_elfcorehdr(char *arg)
-{
- if (!arg)
- return -EINVAL;
- elfcorehdr_addr = memparse(arg, &arg);
- return 0;
+ add_active_range(nid, start_pfn, end_pfn);
}
-early_param("elfcorehdr", parse_elfcorehdr);
-#endif
-void __init __attribute__ ((weak)) plat_early_device_setup(void)
+void __init __weak plat_early_device_setup(void)
{
}
@@ -401,10 +270,6 @@ void __init setup_arch(char **cmdline_p)
bss_resource.start = virt_to_phys(__bss_start);
bss_resource.end = virt_to_phys(_ebss)-1;
- memory_start = (unsigned long)__va(__MEMORY_START);
- if (!memory_end)
- memory_end = memory_start + __MEMORY_SIZE;
-
#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
#else
@@ -421,47 +286,24 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
- uncached_init();
-
plat_early_device_setup();
- /* Let earlyprintk output early console messages */
- early_platform_driver_probe("earlyprintk", 1, 1);
-
sh_mv_setup();
- /*
- * Find the highest page frame number we have available
- */
- max_pfn = PFN_DOWN(__pa(memory_end));
-
- /*
- * Determine low and high memory ranges:
- */
- max_low_pfn = max_pfn;
- min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
-
- nodes_clear(node_online_map);
+ /* Let earlyprintk output early console messages */
+ early_platform_driver_probe("earlyprintk", 1, 1);
- pmb_init();
- lmb_init();
- setup_memory();
- sparse_init();
+ paging_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
- paging_init();
-
- ioremap_fixed_init();
/* Perform the machine specific initialisation */
if (likely(sh_mv.mv_setup))
sh_mv.mv_setup(cmdline_p);
-#ifdef CONFIG_SMP
plat_smp_setup();
-#endif
}
/* processor boot mode configuration */
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 002cc612deef..509b36b45115 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -3,7 +3,7 @@
*
* SMP support for the SuperH processors.
*
- * Copyright (C) 2002 - 2008 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
* Copyright (C) 2006 - 2007 Akio Idehara
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -31,7 +31,20 @@
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
-static inline void __init smp_store_cpu_info(unsigned int cpu)
+struct plat_smp_ops *mp_ops = NULL;
+
+/* State of each CPU */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
+
+void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
+{
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
+
+ mp_ops = ops;
+}
+
+static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
@@ -46,14 +59,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
init_new_context(current, &init_mm);
current_thread_info()->cpu = cpu;
- plat_prepare_cpus(max_cpus);
+ mp_ops->prepare_cpus(max_cpus);
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(&cpu_possible_map);
#endif
}
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
@@ -62,37 +75,137 @@ void __devinit smp_prepare_boot_cpu(void)
set_cpu_online(cpu, true);
set_cpu_possible(cpu, true);
+
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void native_cpu_die(unsigned int cpu)
+{
+ unsigned int i;
+
+ for (i = 0; i < 10; i++) {
+ smp_rmb();
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+
+ return;
+ }
+
+ msleep(100);
+ }
+
+ pr_err("CPU %u didn't die...\n", cpu);
+}
+
+int native_cpu_disable(unsigned int cpu)
+{
+ return cpu == 0 ? -EPERM : 0;
+}
+
+void play_dead_common(void)
+{
+ idle_task_exit();
+ irq_ctx_exit(raw_smp_processor_id());
+ mb();
+
+ __get_cpu_var(cpu_state) = CPU_DEAD;
+ local_irq_disable();
+}
+
+void native_play_dead(void)
+{
+ play_dead_common();
}
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct task_struct *p;
+ int ret;
+
+ ret = mp_ops->cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Stop the local timer for this CPU.
+ */
+ local_timer_stop(cpu);
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ flush_cache_all();
+ local_flush_tlb_all();
+
+ read_lock(&tasklist_lock);
+ for_each_process(p)
+ if (p->mm)
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int native_cpu_disable(unsigned int cpu)
+{
+ return -ENOSYS;
+}
+
+void native_cpu_die(unsigned int cpu)
+{
+ /* We said "no" in __cpu_disable */
+ BUG();
+}
+
+void native_play_dead(void)
+{
+ BUG();
+}
+#endif
+
asmlinkage void __cpuinit start_secondary(void)
{
- unsigned int cpu;
+ unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
enable_mmu();
atomic_inc(&mm->mm_count);
atomic_inc(&mm->mm_users);
current->active_mm = mm;
- BUG_ON(current->mm);
enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
per_cpu_trap_init();
preempt_disable();
- notify_cpu_starting(smp_processor_id());
+ notify_cpu_starting(cpu);
local_irq_enable();
- cpu = smp_processor_id();
-
/* Enable local timers */
local_timer_setup(cpu);
calibrate_delay();
smp_store_cpu_info(cpu);
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu_idle();
}
@@ -111,12 +224,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
struct task_struct *tsk;
unsigned long timeout;
- tsk = fork_idle(cpu);
- if (IS_ERR(tsk)) {
- printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
- return PTR_ERR(tsk);
+ tsk = cpu_data[cpu].idle;
+ if (!tsk) {
+ tsk = fork_idle(cpu);
+ if (IS_ERR(tsk)) {
+ pr_err("Failed forking idle task for cpu %d\n", cpu);
+ return PTR_ERR(tsk);
+ }
+
+ cpu_data[cpu].idle = tsk;
}
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
/* Fill in data in head.S for secondary cpus */
stack_start.sp = tsk->thread.sp;
stack_start.thread_info = tsk->stack;
@@ -127,7 +247,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
(unsigned long)&stack_start + sizeof(stack_start));
wmb();
- plat_start_cpu(cpu, (unsigned long)_stext);
+ mp_ops->start_cpu(cpu, (unsigned long)_stext);
timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
@@ -135,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
break;
udelay(10);
+ barrier();
}
if (cpu_online(cpu))
@@ -159,7 +280,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
void smp_send_reschedule(int cpu)
{
- plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
+ mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
}
void smp_send_stop(void)
@@ -172,12 +293,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
int cpu;
for_each_cpu(cpu, mask)
- plat_send_ipi(cpu, SMP_MSG_FUNCTION);
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
}
void arch_send_call_function_single_ipi(int cpu)
{
- plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
void smp_timer_broadcast(const struct cpumask *mask)
@@ -185,7 +306,7 @@ void smp_timer_broadcast(const struct cpumask *mask)
int cpu;
for_each_cpu(cpu, mask)
- plat_send_ipi(cpu, SMP_MSG_TIMER);
+ mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
}
static void ipi_timer(void)
@@ -249,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm)
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
-
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 9b0b633b6c92..948fdb656933 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -52,7 +52,11 @@ static int __init topology_init(void)
#endif
for_each_present_cpu(i) {
- ret = register_cpu(&per_cpu(cpu_devices, i), i);
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = 1;
+
+ ret = register_cpu(c, i);
if (unlikely(ret))
printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
__func__, i, ret);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index e3f92eb05ffd..e67e140bf1f6 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -944,3 +944,8 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
+
+void __cpuinit per_cpu_trap_init(void)
+{
+ /* Nothing to do for now, VBR initialization later. */
+}
diff --git a/arch/sh/lib/strlen.S b/arch/sh/lib/strlen.S
index f8ab296047b3..1bcc13f05962 100644
--- a/arch/sh/lib/strlen.S
+++ b/arch/sh/lib/strlen.S
@@ -35,7 +35,7 @@ ENTRY(strlen)
mov.b @r4+,r1
tst r1,r1
bt 8f
- add #1,r2
+ add #1,r2
1:
mov #0,r3
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 3dc8a8a63822..53f7c684afb2 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -10,6 +10,7 @@ cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
+cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
@@ -18,13 +19,14 @@ mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o
obj-y += $(mmu-y)
-obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
-ifdef CONFIG_DEBUG_FS
-obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
+debugfs-y := asids-debugfs.o
+ifndef CONFIG_CACHE_OFF
+debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
endif
ifdef CONFIG_MMU
+debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
@@ -32,6 +34,7 @@ tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
obj-y += $(tlb-y)
endif
+obj-$(CONFIG_DEBUG_FS) += $(debugfs-y)
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
new file mode 100644
index 000000000000..c0adbee97b5f
--- /dev/null
+++ b/arch/sh/mm/cache-shx3.c
@@ -0,0 +1,44 @@
+/*
+ * arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/cache.h>
+
+#define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */
+#define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */
+
+void __init shx3_cache_init(void)
+{
+ unsigned int ccr;
+
+ ccr = __raw_readl(CCR);
+
+ /*
+ * If we've got cache aliases, resolve them in hardware.
+ */
+ if (boot_cpu_data.dcache.n_aliases || boot_cpu_data.icache.n_aliases) {
+ ccr |= CCR_CACHE_SNM;
+
+ boot_cpu_data.icache.n_aliases = 0;
+ boot_cpu_data.dcache.n_aliases = 0;
+
+ pr_info("Enabling hardware synonym avoidance\n");
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * Broadcast I-cache block invalidations by default.
+ */
+ ccr |= CCR_CACHE_IBE;
+#endif
+
+ writel_uncached(ccr, CCR);
+}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 0f4095d7ac8b..ba401d137bb9 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -334,6 +334,13 @@ void __init cpu_cache_init(void)
extern void __weak sh4_cache_init(void);
sh4_cache_init();
+
+ if ((boot_cpu_data.type == CPU_SH7786) ||
+ (boot_cpu_data.type == CPU_SHX3)) {
+ extern void __weak shx3_cache_init(void);
+
+ shx3_cache_init();
+ }
}
if (boot_cpu_data.family == CPU_FAMILY_SH5) {
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 8bf79e3b7bdd..d4c34d757f0d 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -200,7 +200,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
-survive:
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -290,15 +289,10 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
do_sigbus:
up_read(&mm->mmap_sem);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index c505de61a5ca..46f84de62469 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -2,7 +2,7 @@
* linux/arch/sh/mm/init.c
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
*
* Based on linux/arch/i386/mm/init.c:
* Copyright (C) 1995 Linus Torvalds
@@ -16,17 +16,31 @@
#include <linux/pagemap.h>
#include <linux/percpu.h>
#include <linux/io.h>
+#include <linux/lmb.h>
#include <linux/dma-mapping.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
+#include <asm/kexec.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/cache.h>
#include <asm/sizes.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
+void __init generic_mem_init(void)
+{
+ lmb_add(__MEMORY_START, __MEMORY_SIZE);
+}
+
+void __init __weak plat_mem_setup(void)
+{
+ /* Nothing to see here, move along. */
+}
+
#ifdef CONFIG_MMU
static pte_t *__get_pte_phys(unsigned long addr)
{
@@ -152,15 +166,166 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
}
#endif /* CONFIG_MMU */
-/*
- * paging_init() sets up the page tables
- */
+void __init allocate_pgdat(unsigned int nid)
+{
+ unsigned long start_pfn, end_pfn;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ unsigned long phys;
+#endif
+
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ phys = __lmb_alloc_base(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
+ /* Retry with all of system memory */
+ if (!phys)
+ phys = __lmb_alloc_base(sizeof(struct pglist_data),
+ SMP_CACHE_BYTES, lmb_end_of_DRAM());
+ if (!phys)
+ panic("Can't allocate pgdat for node %d\n", nid);
+
+ NODE_DATA(nid) = __va(phys);
+ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+
+ NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+#endif
+
+ NODE_DATA(nid)->node_start_pfn = start_pfn;
+ NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+}
+
+static void __init bootmem_init_one_node(unsigned int nid)
+{
+ unsigned long total_pages, paddr;
+ unsigned long end_pfn;
+ struct pglist_data *p;
+ int i;
+
+ p = NODE_DATA(nid);
+
+ /* Nothing to do.. */
+ if (!p->node_spanned_pages)
+ return;
+
+ end_pfn = p->node_start_pfn + p->node_spanned_pages;
+
+ total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
+
+ paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
+ if (!paddr)
+ panic("Can't allocate bootmap for nid[%d]\n", nid);
+
+ init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
+
+ free_bootmem_with_active_regions(nid, end_pfn);
+
+ /*
+ * XXX Handle initial reservations for the system memory node
+ * only for the moment, we'll refactor this later for handling
+ * reservations in other nodes.
+ */
+ if (nid == 0) {
+ /* Reserve the sections we're already using. */
+ for (i = 0; i < lmb.reserved.cnt; i++)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i),
+ BOOTMEM_DEFAULT);
+ }
+
+ sparse_memory_present_with_active_regions(nid);
+}
+
+static void __init do_init_bootmem(void)
+{
+ int i;
+
+ /* Add active regions with valid PFNs. */
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ unsigned long start_pfn, end_pfn;
+ start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
+ end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
+ __add_active_range(0, start_pfn, end_pfn);
+ }
+
+ /* All of system RAM sits in node 0 for the non-NUMA case */
+ allocate_pgdat(0);
+ node_set_online(0);
+
+ plat_mem_setup();
+
+ for_each_online_node(i)
+ bootmem_init_one_node(i);
+
+ sparse_init();
+}
+
+static void __init early_reserve_mem(void)
+{
+ unsigned long start_pfn;
+
+ /*
+ * Partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(_end));
+
+ /*
+ * Reserve the kernel text and Reserve the bootmem bitmap. We do
+ * this in two steps (first step was init_bootmem()), because
+ * this catches the (definitely buggy) case of us accidentally
+ * initializing the bootmem allocator with an invalid RAM area.
+ */
+ lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
+ (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
+ (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
+
+ /*
+ * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ */
+ if (CONFIG_ZERO_PAGE_OFFSET != 0)
+ lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+
+ /*
+ * Handle additional early reservations
+ */
+ check_for_initrd();
+ reserve_crashkernel();
+}
+
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long vaddr, end;
int nid;
+ lmb_init();
+
+ sh_mv.mv_mem_init();
+
+ early_reserve_mem();
+
+ lmb_enforce_memory_limit(memory_limit);
+ lmb_analyze();
+
+ lmb_dump_all();
+
+ /*
+ * Determine low and high memory ranges:
+ */
+ max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
+
+ nodes_clear(node_online_map);
+
+ memory_start = (unsigned long)__va(__MEMORY_START);
+ memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
+
+ uncached_init();
+ pmb_init();
+ do_init_bootmem();
+ ioremap_fixed_init();
+
/* We don't need to map the kernel through the TLB, as
* it is permanatly mapped using P1. So clear the
* entire pgd. */
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 961b34085e3b..a2e645f64a37 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -24,44 +24,6 @@ EXPORT_SYMBOL_GPL(node_data);
* latency. Each node's pgdat is node-local at the beginning of the node,
* immediately followed by the node mem map.
*/
-void __init setup_memory(void)
-{
- unsigned long free_pfn = PFN_UP(__pa(_end));
- u64 base = min_low_pfn << PAGE_SHIFT;
- u64 size = (max_low_pfn << PAGE_SHIFT) - base;
-
- lmb_add(base, size);
-
- /* Reserve the LMB regions used by the kernel, initrd, etc.. */
- lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
- (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) -
- (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
-
- /*
- * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
- */
- if (CONFIG_ZERO_PAGE_OFFSET != 0)
- lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
-
- lmb_analyze();
- lmb_dump_all();
-
- /*
- * Node 0 sets up its pgdat at the first available pfn,
- * and bumps it up before setting up the bootmem allocator.
- */
- NODE_DATA(0) = pfn_to_kaddr(free_pfn);
- memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
- free_pfn += PFN_UP(sizeof(struct pglist_data));
- NODE_DATA(0)->bdata = &bootmem_node_data[0];
-
- /* Set up node 0 */
- setup_bootmem_allocator(free_pfn);
-
- /* Give the platforms a chance to hook up their nodes */
- plat_mem_setup();
-}
-
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
unsigned long bootmap_pages;
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index e43ec600afcf..18623ba751b3 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -341,6 +341,8 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
unsigned long flags, pmb_flags;
int i, mapped;
+ if (size < SZ_16M)
+ return -EINVAL;
if (!pmb_addr_valid(vaddr, size))
return -EFAULT;
if (pmb_mapping_exists(vaddr, phys, size))
@@ -680,7 +682,7 @@ static void __init pmb_merge(struct pmb_entry *head)
/*
* The merged page size must be valid.
*/
- if (!pmb_size_valid(newsize))
+ if (!depth || !pmb_size_valid(newsize))
return;
head->flags &= ~PMB_SZ_MASK;
diff --git a/arch/sh/mm/tlb-debugfs.c b/arch/sh/mm/tlb-debugfs.c
new file mode 100644
index 000000000000..229bf75f28df
--- /dev/null
+++ b/arch/sh/mm/tlb-debugfs.c
@@ -0,0 +1,179 @@
+/*
+ * arch/sh/mm/tlb-debugfs.c
+ *
+ * debugfs ops for SH-4 ITLB/UTLBs.
+ *
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+enum tlb_type {
+ TLB_TYPE_ITLB,
+ TLB_TYPE_UTLB,
+};
+
+static struct {
+ int bits;
+ const char *size;
+} tlb_sizes[] = {
+ { 0x0, " 1KB" },
+ { 0x1, " 4KB" },
+ { 0x2, " 8KB" },
+ { 0x4, " 64KB" },
+ { 0x5, "256KB" },
+ { 0x7, " 1MB" },
+ { 0x8, " 4MB" },
+ { 0xc, " 64MB" },
+};
+
+static int tlb_seq_show(struct seq_file *file, void *iter)
+{
+ unsigned int tlb_type = (unsigned int)file->private;
+ unsigned long addr1, addr2, data1, data2;
+ unsigned long flags;
+ unsigned long mmucr;
+ unsigned int nentries, entry;
+ unsigned int urb;
+
+ mmucr = __raw_readl(MMUCR);
+ if ((mmucr & 0x1) == 0) {
+ seq_printf(file, "address translation disabled\n");
+ return 0;
+ }
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ urb = (mmucr & MMUCR_URB) >> MMUCR_URB_SHIFT;
+
+ /* Make the "entry >= urb" test fail. */
+ if (urb == 0)
+ urb = MMUCR_URB_NENTRIES + 1;
+
+ if (tlb_type == TLB_TYPE_ITLB) {
+ addr1 = MMU_ITLB_ADDRESS_ARRAY;
+ addr2 = MMU_ITLB_ADDRESS_ARRAY2;
+ data1 = MMU_ITLB_DATA_ARRAY;
+ data2 = MMU_ITLB_DATA_ARRAY2;
+ nentries = 4;
+ } else {
+ addr1 = MMU_UTLB_ADDRESS_ARRAY;
+ addr2 = MMU_UTLB_ADDRESS_ARRAY2;
+ data1 = MMU_UTLB_DATA_ARRAY;
+ data2 = MMU_UTLB_DATA_ARRAY2;
+ nentries = 64;
+ }
+
+ seq_printf(file, "entry: vpn ppn asid size valid wired\n");
+
+ for (entry = 0; entry < nentries; entry++) {
+ unsigned long vpn, ppn, asid, size;
+ unsigned long valid;
+ unsigned long val;
+ const char *sz = " ?";
+ int i;
+
+ val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ vpn = val & 0xfffffc00;
+ valid = val & 0x100;
+
+ val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ asid = val & MMU_CONTEXT_ASID_MASK;
+
+ val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ ppn = (val & 0x0ffffc00) << 4;
+
+ val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT));
+ ctrl_barrier();
+ size = (val & 0xf0) >> 4;
+
+ for (i = 0; i < ARRAY_SIZE(tlb_sizes); i++) {
+ if (tlb_sizes[i].bits == size)
+ break;
+ }
+
+ if (i != ARRAY_SIZE(tlb_sizes))
+ sz = tlb_sizes[i].size;
+
+ seq_printf(file, "%2d: 0x%08lx 0x%08lx %5lu %s %s %s\n",
+ entry, vpn, ppn, asid,
+ sz, valid ? "V" : "-",
+ (urb <= entry) ? "W" : "-");
+ }
+
+ back_to_cached();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int tlb_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tlb_seq_show, inode->i_private);
+}
+
+static const struct file_operations tlb_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = tlb_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tlb_debugfs_init(void)
+{
+ struct dentry *itlb, *utlb;
+
+ itlb = debugfs_create_file("itlb", S_IRUSR, sh_debugfs_root,
+ (unsigned int *)TLB_TYPE_ITLB,
+ &tlb_debugfs_fops);
+ if (unlikely(!itlb))
+ return -ENOMEM;
+ if (IS_ERR(itlb))
+ return PTR_ERR(itlb);
+
+ utlb = debugfs_create_file("utlb", S_IRUSR, sh_debugfs_root,
+ (unsigned int *)TLB_TYPE_UTLB,
+ &tlb_debugfs_fops);
+ if (unlikely(!utlb)) {
+ debugfs_remove(itlb);
+ return -ENOMEM;
+ }
+
+ if (IS_ERR(utlb)) {
+ debugfs_remove(itlb);
+ return PTR_ERR(utlb);
+ }
+
+ return 0;
+}
+module_init(tlb_debugfs_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 706da1d3a67a..03db41cc1268 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -189,7 +189,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
-survive:
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
@@ -294,22 +293,11 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- if (is_global_init(current)) {
- panic("INIT out of memory\n");
- yield();
- goto survive;
- }
- printk("fault:Out of memory\n");
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk("VM: killing process %s\n", tsk->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
do_sigbus:
printk("fault:Do sigbus\n");
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 9908d477ccd9..6f1470baa314 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -75,7 +75,7 @@ config ARCH_USES_GETTIMEOFFSET
config GENERIC_CMOS_UPDATE
bool
- default y if SPARC64
+ default y
config GENERIC_CLOCKEVENTS
bool
@@ -133,6 +133,9 @@ config ZONE_DMA
config NEED_DMA_MAP_STATE
def_bool y
+config NEED_SG_DMA_LENGTH
+ def_bool y
+
config GENERIC_ISA_DMA
bool
default y if SPARC32
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index f0d343c3b956..7ae128b19d3f 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -25,7 +25,7 @@ extern int atomic_cmpxchg(atomic_t *, int, int);
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index f2e48009989e..2050ca02c423 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -13,8 +13,8 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
-#define atomic64_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index e72ac9cdfb98..766121a67a24 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -44,7 +44,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#ifdef ULTRA_HAS_POPULATION_COUNT
-static inline unsigned int hweight64(unsigned long w)
+static inline unsigned int __arch_hweight64(unsigned long w)
{
unsigned int res;
@@ -52,7 +52,7 @@ static inline unsigned int hweight64(unsigned long w)
return res;
}
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
@@ -60,7 +60,7 @@ static inline unsigned int hweight32(unsigned int w)
return res;
}
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
{
unsigned int res;
@@ -68,7 +68,7 @@ static inline unsigned int hweight16(unsigned int w)
return res;
}
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
{
unsigned int res;
@@ -78,9 +78,10 @@ static inline unsigned int hweight8(unsigned int w)
#else
-#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/arch_hweight.h>
#endif
+#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
index 41f85ae4bd4a..78b07009f60a 100644
--- a/arch/sparc/include/asm/cache.h
+++ b/arch/sparc/include/asm/cache.h
@@ -7,6 +7,8 @@
#ifndef _SPARC_CACHE_H
#define _SPARC_CACHE_H
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES 32
#define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index f3b85b6b0b76..d4c452147412 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -13,25 +13,10 @@ struct dev_archdata {
void *iommu;
void *stc;
void *host_controller;
-
- struct device_node *prom_node;
struct of_device *op;
-
int numa_node;
};
-static inline void dev_archdata_set_node(struct dev_archdata *ad,
- struct device_node *np)
-{
- ad->prom_node = np;
-}
-
-static inline struct device_node *
-dev_archdata_get_node(const struct dev_archdata *ad)
-{
- return ad->prom_node;
-}
-
struct pdev_archdata {
};
diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h
index b83e44729655..e834880be204 100644
--- a/arch/sparc/include/asm/fb.h
+++ b/arch/sparc/include/asm/fb.h
@@ -18,7 +18,7 @@ static inline int fb_is_primary_device(struct fb_info *info)
struct device *dev = info->device;
struct device_node *node;
- node = dev->archdata.prom_node;
+ node = dev->of_node;
if (node &&
node == of_console_device)
return 1;
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index 36439d67ad71..8fac3ab22f36 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -589,7 +589,7 @@ static unsigned long __init sun_floppy_init(void)
if (!op)
return 0;
- state_prop = of_get_property(op->node, "status", NULL);
+ state_prop = of_get_property(op->dev.of_node, "status", NULL);
if (state_prop && !strncmp(state_prop, "disabled", 8))
return 0;
@@ -716,7 +716,7 @@ static unsigned long __init sun_floppy_init(void)
return sun_floppy_types[0];
}
- prop = of_get_property(op->node, "status", NULL);
+ prop = of_get_property(op->dev.of_node, "status", NULL);
if (prop && !strncmp(state, "disabled", 8))
return 0;
diff --git a/arch/sparc/include/asm/of_device.h b/arch/sparc/include/asm/of_device.h
index a5d9811f9697..f320246a0586 100644
--- a/arch/sparc/include/asm/of_device.h
+++ b/arch/sparc/include/asm/of_device.h
@@ -14,7 +14,6 @@
*/
struct of_device
{
- struct device_node *node;
struct device dev;
struct resource resource[PROMREG_MAX];
unsigned int irqs[PROMINTR_MAX];
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index ff9ead640c4a..c333b8d0949b 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -113,7 +113,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
struct parport *p;
int slot, err;
- parent = op->node->parent;
+ parent = op->dev.of_node->parent;
if (!strcmp(parent->name, "dma")) {
p = parport_pc_probe_port(base, base + 0x400,
op->irqs[0], PARPORT_DMA_NOFIFO,
@@ -232,8 +232,11 @@ static const struct of_device_id ecpp_match[] = {
};
static struct of_platform_driver ecpp_driver = {
- .name = "ecpp",
- .match_table = ecpp_match,
+ .driver = {
+ .name = "ecpp",
+ .owner = THIS_MODULE,
+ .of_match_table = ecpp_match,
+ },
.probe = ecpp_probe,
.remove = __devexit_p(ecpp_remove),
};
diff --git a/arch/sparc/include/asm/scatterlist.h b/arch/sparc/include/asm/scatterlist.h
index d1120257b033..433e45f05fd4 100644
--- a/arch/sparc/include/asm/scatterlist.h
+++ b/arch/sparc/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
#ifndef _SPARC_SCATTERLIST_H
#define _SPARC_SCATTERLIST_H
-#define sg_dma_len(sg) ((sg)->dma_length)
-
#include <asm-generic/scatterlist.h>
+#define ISA_DMA_THRESHOLD (~0UL)
+#define ARCH_HAS_SG_CHAIN
+
#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 844d73a0340c..9dd0318d3ddf 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -132,7 +132,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
* this quantum (SMP) */
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
* TIF_NEED_RESCHED */
-#define TIF_MEMDIE 10
+#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
#define TIF_FREEZE 11 /* is freezing for suspend */
/* as above, but as bit values */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 4827a3aeac7f..fb2ea7705a46 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -223,7 +223,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
* an immediate value in instructions such as andcc.
*/
/* flag bit 12 is available */
-#define TIF_MEMDIE 13
+#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 14
#define TIF_FREEZE 15 /* is freezing for suspend */
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 71ec90b9e316..b27476caa133 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -174,8 +174,11 @@ static struct of_device_id __initdata apc_match[] = {
MODULE_DEVICE_TABLE(of, apc_match);
static struct of_platform_driver apc_driver = {
- .name = "apc",
- .match_table = apc_match,
+ .driver = {
+ .name = "apc",
+ .owner = THIS_MODULE,
+ .of_match_table = apc_match,
+ },
.probe = apc_probe,
};
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 9f52db2d441c..ddc84128b3c2 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, auxio_match);
static int __devinit auxio_probe(struct of_device *dev, const struct of_device_id *match)
{
- struct device_node *dp = dev->node;
+ struct device_node *dp = dev->dev.of_node;
unsigned long size;
if (!strcmp(dp->parent->name, "ebus")) {
@@ -132,10 +132,11 @@ static int __devinit auxio_probe(struct of_device *dev, const struct of_device_i
}
static struct of_platform_driver auxio_driver = {
- .match_table = auxio_match,
.probe = auxio_probe,
- .driver = {
- .name = "auxio",
+ .driver = {
+ .name = "auxio",
+ .owner = THIS_MODULE,
+ .of_match_table = auxio_match,
},
};
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index 415c86d5a8da..434335f65823 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -149,10 +149,11 @@ static struct of_device_id __initdata clock_board_match[] = {
};
static struct of_platform_driver clock_board_driver = {
- .match_table = clock_board_match,
.probe = clock_board_probe,
- .driver = {
- .name = "clock_board",
+ .driver = {
+ .name = "clock_board",
+ .owner = THIS_MODULE,
+ .of_match_table = clock_board_match,
},
};
@@ -168,7 +169,7 @@ static int __devinit fhc_probe(struct of_device *op,
goto out;
}
- if (!strcmp(op->node->parent->name, "central"))
+ if (!strcmp(op->dev.of_node->parent->name, "central"))
p->central = true;
p->pregs = of_ioremap(&op->resource[0], 0,
@@ -183,7 +184,7 @@ static int __devinit fhc_probe(struct of_device *op,
reg = upa_readl(p->pregs + FHC_PREGS_BSR);
p->board_num = ((reg >> 16) & 1) | ((reg >> 12) & 0x0e);
} else {
- p->board_num = of_getintprop_default(op->node, "board#", -1);
+ p->board_num = of_getintprop_default(op->dev.of_node, "board#", -1);
if (p->board_num == -1) {
printk(KERN_ERR "fhc: No board# property\n");
goto out_unmap_pregs;
@@ -254,10 +255,11 @@ static struct of_device_id __initdata fhc_match[] = {
};
static struct of_platform_driver fhc_driver = {
- .match_table = fhc_match,
.probe = fhc_probe,
- .driver = {
- .name = "fhc",
+ .driver = {
+ .name = "fhc",
+ .owner = THIS_MODULE,
+ .of_match_table = fhc_match,
},
};
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index e1a9598e2a4d..870cb65b3f21 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -425,7 +425,7 @@ static int __devinit jbusmc_probe(struct of_device *op,
INIT_LIST_HEAD(&p->list);
err = -ENODEV;
- prop = of_get_property(op->node, "portid", &len);
+ prop = of_get_property(op->dev.of_node, "portid", &len);
if (!prop || len != 4) {
printk(KERN_ERR PFX "Cannot find portid.\n");
goto out_free;
@@ -433,7 +433,7 @@ static int __devinit jbusmc_probe(struct of_device *op,
p->portid = *prop;
- prop = of_get_property(op->node, "memory-control-register-1", &len);
+ prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len);
if (!prop || len != 8) {
printk(KERN_ERR PFX "Cannot get memory control register 1.\n");
goto out_free;
@@ -449,7 +449,7 @@ static int __devinit jbusmc_probe(struct of_device *op,
}
err = -ENODEV;
- ml = of_get_property(op->node, "memory-layout", &p->layout_len);
+ ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len);
if (!ml) {
printk(KERN_ERR PFX "Cannot get memory layout property.\n");
goto out_iounmap;
@@ -466,7 +466,7 @@ static int __devinit jbusmc_probe(struct of_device *op,
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n",
- op->node->full_name);
+ op->dev.of_node->full_name);
dev_set_drvdata(&op->dev, p);
@@ -693,7 +693,7 @@ static void chmc_fetch_decode_regs(struct chmc *p)
static int __devinit chmc_probe(struct of_device *op,
const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
unsigned long ver;
const void *pval;
int len, portid;
@@ -811,8 +811,11 @@ static const struct of_device_id us3mc_match[] = {
MODULE_DEVICE_TABLE(of, us3mc_match);
static struct of_platform_driver us3mc_driver = {
- .name = "us3mc",
- .match_table = us3mc_match,
+ .driver = {
+ .name = "us3mc",
+ .owner = THIS_MODULE,
+ .of_match_table = us3mc_match,
+ },
.probe = us3mc_probe,
.remove = __devexit_p(us3mc_remove),
};
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 84e5386714cd..703e4aa9bc38 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -290,7 +290,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
goto err_noiommu;
- res->name = op->node->name;
+ res->name = op->dev.of_node->name;
return (void *)(unsigned long)res->start;
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 04df4edc0073..539243b236fa 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -158,6 +158,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+ regs->npc = regs->pc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 0a2bd0f99fc1..768290a6c028 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -181,6 +181,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->tpc = ip;
+ regs->tnpc = regs->tpc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index da527b33ebc7..47e63f1e719c 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -254,10 +254,10 @@ static void __init build_device_resources(struct of_device *op,
return;
p_op = to_of_device(parent);
- bus = of_match_bus(p_op->node);
- bus->count_cells(op->node, &na, &ns);
+ bus = of_match_bus(p_op->dev.of_node);
+ bus->count_cells(op->dev.of_node, &na, &ns);
- preg = of_get_property(op->node, bus->addr_prop_name, &num_reg);
+ preg = of_get_property(op->dev.of_node, bus->addr_prop_name, &num_reg);
if (!preg || num_reg == 0)
return;
@@ -271,8 +271,8 @@ static void __init build_device_resources(struct of_device *op,
struct resource *r = &op->resource[index];
u32 addr[OF_MAX_ADDR_CELLS];
const u32 *reg = (preg + (index * ((na + ns) * 4)));
- struct device_node *dp = op->node;
- struct device_node *pp = p_op->node;
+ struct device_node *dp = op->dev.of_node;
+ struct device_node *pp = p_op->dev.of_node;
struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
@@ -321,7 +321,7 @@ static void __init build_device_resources(struct of_device *op,
if (of_resource_verbose)
printk("%s reg[%d] -> %llx\n",
- op->node->full_name, index,
+ op->dev.of_node->full_name, index,
result);
if (result != OF_BAD_ADDR) {
@@ -329,7 +329,7 @@ static void __init build_device_resources(struct of_device *op,
r->end = result + size - 1;
r->flags = flags | ((result >> 32ULL) & 0xffUL);
}
- r->name = op->node->name;
+ r->name = op->dev.of_node->name;
}
}
@@ -345,10 +345,9 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
return NULL;
sd = &op->dev.archdata;
- sd->prom_node = dp;
sd->op = op;
- op->node = dp;
+ op->dev.of_node = dp;
op->clock_freq = of_getintprop_default(dp, "clock-frequency",
(25*1000*1000));
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index b3d4cb5d21b3..1dae8079f728 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -323,10 +323,10 @@ static void __init build_device_resources(struct of_device *op,
return;
p_op = to_of_device(parent);
- bus = of_match_bus(p_op->node);
- bus->count_cells(op->node, &na, &ns);
+ bus = of_match_bus(p_op->dev.of_node);
+ bus->count_cells(op->dev.of_node, &na, &ns);
- preg = of_get_property(op->node, bus->addr_prop_name, &num_reg);
+ preg = of_get_property(op->dev.of_node, bus->addr_prop_name, &num_reg);
if (!preg || num_reg == 0)
return;
@@ -340,7 +340,7 @@ static void __init build_device_resources(struct of_device *op,
if (num_reg > PROMREG_MAX) {
printk(KERN_WARNING "%s: Too many regs (%d), "
"limiting to %d.\n",
- op->node->full_name, num_reg, PROMREG_MAX);
+ op->dev.of_node->full_name, num_reg, PROMREG_MAX);
num_reg = PROMREG_MAX;
}
@@ -348,8 +348,8 @@ static void __init build_device_resources(struct of_device *op,
struct resource *r = &op->resource[index];
u32 addr[OF_MAX_ADDR_CELLS];
const u32 *reg = (preg + (index * ((na + ns) * 4)));
- struct device_node *dp = op->node;
- struct device_node *pp = p_op->node;
+ struct device_node *dp = op->dev.of_node;
+ struct device_node *pp = p_op->dev.of_node;
struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
@@ -397,7 +397,7 @@ static void __init build_device_resources(struct of_device *op,
if (of_resource_verbose)
printk("%s reg[%d] -> %llx\n",
- op->node->full_name, index,
+ op->dev.of_node->full_name, index,
result);
if (result != OF_BAD_ADDR) {
@@ -408,7 +408,7 @@ static void __init build_device_resources(struct of_device *op,
r->end = result + size - 1;
r->flags = flags;
}
- r->name = op->node->name;
+ r->name = op->dev.of_node->name;
}
}
@@ -530,7 +530,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
struct device *parent,
unsigned int irq)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct device_node *pp, *ip;
unsigned int orig_irq = irq;
int nid;
@@ -575,7 +575,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
if (of_irq_verbose)
printk("%s: Apply [%s:%x] imap --> [%s:%x]\n",
- op->node->full_name,
+ op->dev.of_node->full_name,
pp->full_name, this_orig_irq,
(iret ? iret->full_name : "NULL"), irq);
@@ -594,7 +594,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
if (of_irq_verbose)
printk("%s: PCI swizzle [%s] "
"%x --> %x\n",
- op->node->full_name,
+ op->dev.of_node->full_name,
pp->full_name, this_orig_irq,
irq);
@@ -611,11 +611,11 @@ static unsigned int __init build_one_device_irq(struct of_device *op,
if (!ip)
return orig_irq;
- irq = ip->irq_trans->irq_build(op->node, irq,
+ irq = ip->irq_trans->irq_build(op->dev.of_node, irq,
ip->irq_trans->data);
if (of_irq_verbose)
printk("%s: Apply IRQ trans [%s] %x --> %x\n",
- op->node->full_name, ip->full_name, orig_irq, irq);
+ op->dev.of_node->full_name, ip->full_name, orig_irq, irq);
out:
nid = of_node_to_nid(dp);
@@ -640,10 +640,9 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
return NULL;
sd = &op->dev.archdata;
- sd->prom_node = dp;
sd->op = op;
- op->node = dp;
+ op->dev.of_node = dp;
op->clock_freq = of_getintprop_default(dp, "clock-frequency",
(25*1000*1000));
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index 0247e68210b3..10c6c36a6e75 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -16,7 +16,7 @@ static int node_match(struct device *dev, void *data)
struct of_device *op = to_of_device(dev);
struct device_node *dp = data;
- return (op->node == dp);
+ return (op->dev.of_node == dp);
}
struct of_device *of_find_device_by_node(struct device_node *dp)
@@ -48,7 +48,7 @@ EXPORT_SYMBOL(irq_of_parse_and_map);
void of_propagate_archdata(struct of_device *bus)
{
struct dev_archdata *bus_sd = &bus->dev.archdata;
- struct device_node *bus_dp = bus->node;
+ struct device_node *bus_dp = bus->dev.of_node;
struct device_node *dp;
for (dp = bus_dp->child; dp; dp = dp->sibling) {
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 5ac539a5930f..8a8363adb8bd 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -261,7 +261,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
sd->host_controller = pbm;
- sd->prom_node = node;
sd->op = op = of_find_device_by_node(node);
sd->numa_node = pbm->numa_node;
@@ -285,6 +284,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->sysdata = node;
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
+ dev->dev.of_node = node;
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
set_pcie_port_type(dev);
@@ -653,7 +653,7 @@ show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char *
struct device_node *dp;
pdev = to_pci_dev(dev);
- dp = pdev->dev.archdata.prom_node;
+ dp = pdev->dev.of_node;
return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
}
@@ -683,7 +683,7 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm,
struct device *parent)
{
- struct device_node *node = pbm->op->node;
+ struct device_node *node = pbm->op->dev.of_node;
struct pci_bus *bus;
printk("PCI: Scanning PBM %s\n", node->full_name);
@@ -1022,7 +1022,7 @@ void arch_teardown_msi_irq(unsigned int virt_irq)
struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
{
- return pdev->dev.archdata.prom_node;
+ return pdev->dev.of_node;
}
EXPORT_SYMBOL(pci_device_to_OF_node);
@@ -1151,15 +1151,13 @@ static int __init of_pci_slot_init(void)
struct device_node *node;
if (pbus->self) {
- struct dev_archdata *sd = pbus->self->sysdata;
-
/* PCI->PCI bridge */
- node = sd->prom_node;
+ node = pbus->self->dev.of_node;
} else {
struct pci_pbm_info *pbm = pbus->sysdata;
/* Host PCI controller */
- node = pbm->op->node;
+ node = pbm->op->dev.of_node;
}
pci_bus_slot_names(node, pbus);
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 8a000583b5cf..6c7a33af3ba6 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -314,12 +314,12 @@ struct pci_ops sun4v_pci_ops = {
void pci_get_pbm_props(struct pci_pbm_info *pbm)
{
- const u32 *val = of_get_property(pbm->op->node, "bus-range", NULL);
+ const u32 *val = of_get_property(pbm->op->dev.of_node, "bus-range", NULL);
pbm->pci_first_busno = val[0];
pbm->pci_last_busno = val[1];
- val = of_get_property(pbm->op->node, "ino-bitmap", NULL);
+ val = of_get_property(pbm->op->dev.of_node, "ino-bitmap", NULL);
if (val) {
pbm->ino_bitmap = (((u64)val[1] << 32UL) |
((u64)val[0] << 0UL));
@@ -365,7 +365,8 @@ static void pci_register_legacy_regions(struct resource *io_res,
static void pci_register_iommu_region(struct pci_pbm_info *pbm)
{
- const u32 *vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
+ const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma",
+ NULL);
if (vdma) {
struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
@@ -394,7 +395,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
int num_pbm_ranges;
saw_mem = saw_io = 0;
- pbm_ranges = of_get_property(pbm->op->node, "ranges", &i);
+ pbm_ranges = of_get_property(pbm->op->dev.of_node, "ranges", &i);
if (!pbm_ranges) {
prom_printf("PCI: Fatal error, missing PBM ranges property "
" for %s\n",
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index d53f45bc7dda..51cfa09e392a 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -413,7 +413,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm,
struct of_device *op, u32 portid)
{
const struct linux_prom64_registers *regs;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
int err;
pbm->numa_node = -1;
@@ -458,7 +458,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm,
static int __devinit fire_probe(struct of_device *op,
const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
u32 portid;
@@ -508,8 +508,11 @@ static struct of_device_id __initdata fire_match[] = {
};
static struct of_platform_driver fire_driver = {
- .name = DRIVER_NAME,
- .match_table = fire_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = fire_match,
+ },
.probe = fire_probe,
};
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index e0ef847219c3..548b8ca9c210 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -324,7 +324,7 @@ void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
const u32 *val;
int len;
- val = of_get_property(pbm->op->node, "#msi-eqs", &len);
+ val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_num = *val;
@@ -347,16 +347,16 @@ void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
u32 msi64_len;
} *arng;
- val = of_get_property(pbm->op->node, "msi-eq-size", &len);
+ val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_ent_count = *val;
- mqp = of_get_property(pbm->op->node,
+ mqp = of_get_property(pbm->op->dev.of_node,
"msi-eq-to-devino", &len);
if (!mqp)
- mqp = of_get_property(pbm->op->node,
+ mqp = of_get_property(pbm->op->dev.of_node,
"msi-eq-devino", &len);
if (!mqp || len != sizeof(struct msiq_prop))
goto no_msi;
@@ -364,27 +364,27 @@ void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
pbm->msiq_first = mqp->first_msiq;
pbm->msiq_first_devino = mqp->first_devino;
- val = of_get_property(pbm->op->node, "#msi", &len);
+ val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_num = *val;
- mrng = of_get_property(pbm->op->node, "msi-ranges", &len);
+ mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
if (!mrng || len != sizeof(struct msi_range_prop))
goto no_msi;
pbm->msi_first = mrng->first_msi;
- val = of_get_property(pbm->op->node, "msi-data-mask", &len);
+ val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_data_mask = *val;
- val = of_get_property(pbm->op->node, "msix-data-width", &len);
+ val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
if (!val || len != 4)
goto no_msi;
pbm->msix_data_width = *val;
- arng = of_get_property(pbm->op->node, "msi-address-ranges",
+ arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
&len);
if (!arng || len != sizeof(struct addr_range_prop))
goto no_msi;
diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c
index 142b9d6984a8..558a70512824 100644
--- a/arch/sparc/kernel/pci_psycho.c
+++ b/arch/sparc/kernel/pci_psycho.c
@@ -285,7 +285,7 @@ static irqreturn_t psycho_ce_intr(int irq, void *dev_id)
#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
static void psycho_register_error_handlers(struct pci_pbm_info *pbm)
{
- struct of_device *op = of_find_device_by_node(pbm->op->node);
+ struct of_device *op = of_find_device_by_node(pbm->op->dev.of_node);
unsigned long base = pbm->controller_regs;
u64 tmp;
int err;
@@ -507,7 +507,7 @@ static int __devinit psycho_probe(struct of_device *op,
const struct of_device_id *match)
{
const struct linux_prom64_registers *pr_regs;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
int is_pbm_a, err;
@@ -602,8 +602,11 @@ static struct of_device_id __initdata psycho_match[] = {
};
static struct of_platform_driver psycho_driver = {
- .name = DRIVER_NAME,
- .match_table = psycho_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = psycho_match,
+ },
.probe = psycho_probe,
};
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index ba6fbeba3e2c..6dad8e3b7506 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -310,7 +310,7 @@ static irqreturn_t sabre_ce_intr(int irq, void *dev_id)
static void sabre_register_error_handlers(struct pci_pbm_info *pbm)
{
- struct device_node *dp = pbm->op->node;
+ struct device_node *dp = pbm->op->dev.of_node;
struct of_device *op;
unsigned long base = pbm->controller_regs;
u64 tmp;
@@ -456,7 +456,7 @@ static int __devinit sabre_probe(struct of_device *op,
const struct of_device_id *match)
{
const struct linux_prom64_registers *pr_regs;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
u32 upa_portid, dma_mask;
struct iommu *iommu;
@@ -596,8 +596,11 @@ static struct of_device_id __initdata sabre_match[] = {
};
static struct of_platform_driver sabre_driver = {
- .name = DRIVER_NAME,
- .match_table = sabre_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sabre_match,
+ },
.probe = sabre_probe,
};
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 2b5cdde77af7..97a1ae2e1c02 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -844,7 +844,7 @@ static int pbm_routes_this_ino(struct pci_pbm_info *pbm, u32 ino)
*/
static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm)
{
- struct of_device *op = of_find_device_by_node(pbm->op->node);
+ struct of_device *op = of_find_device_by_node(pbm->op->dev.of_node);
u64 tmp, err_mask, err_no_mask;
int err;
@@ -939,7 +939,7 @@ static void tomatillo_register_error_handlers(struct pci_pbm_info *pbm)
static void schizo_register_error_handlers(struct pci_pbm_info *pbm)
{
- struct of_device *op = of_find_device_by_node(pbm->op->node);
+ struct of_device *op = of_find_device_by_node(pbm->op->dev.of_node);
u64 tmp, err_mask, err_no_mask;
int err;
@@ -1068,7 +1068,7 @@ static void __devinit schizo_scan_bus(struct pci_pbm_info *pbm,
{
pbm_config_busmastering(pbm);
pbm->is_66mhz_capable =
- (of_find_property(pbm->op->node, "66mhz-capable", NULL)
+ (of_find_property(pbm->op->dev.of_node, "66mhz-capable", NULL)
!= NULL);
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
@@ -1138,7 +1138,7 @@ static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
u32 dma_mask;
u64 control;
- vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
+ vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
if (!vdma)
vdma = vdma_default;
@@ -1268,7 +1268,7 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
pbm->chip_version >= 0x2)
tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
- if (!of_find_property(pbm->op->node, "no-bus-parking", NULL))
+ if (!of_find_property(pbm->op->dev.of_node, "no-bus-parking", NULL))
tmp |= SCHIZO_PCICTRL_PARK;
else
tmp &= ~SCHIZO_PCICTRL_PARK;
@@ -1311,7 +1311,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
int chip_type)
{
const struct linux_prom64_registers *regs;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
const char *chipset_name;
int is_pbm_a, err;
@@ -1415,7 +1415,7 @@ static struct pci_pbm_info * __devinit schizo_find_sibling(u32 portid,
static int __devinit __schizo_init(struct of_device *op, unsigned long chip_type)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
u32 portid;
@@ -1491,8 +1491,11 @@ static struct of_device_id __initdata schizo_match[] = {
};
static struct of_platform_driver schizo_driver = {
- .name = DRIVER_NAME,
- .match_table = schizo_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = schizo_match,
+ },
.probe = schizo_probe,
};
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 23c33ff9c31e..a24af6f7e17f 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -540,7 +540,7 @@ static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
struct property *prop;
struct device_node *dp;
- dp = pbm->op->node;
+ dp = pbm->op->dev.of_node;
prop = of_find_property(dp, "66mhz-capable", NULL);
pbm->is_66mhz_capable = (prop != NULL);
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
@@ -584,7 +584,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
u32 dma_mask, dma_offset;
const u32 *vdma;
- vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
+ vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
if (!vdma)
vdma = vdma_default;
@@ -881,7 +881,7 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
struct of_device *op, u32 devhandle)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
int err;
pbm->numa_node = of_node_to_nid(dp);
@@ -929,7 +929,7 @@ static int __devinit pci_sun4v_probe(struct of_device *op,
u32 devhandle;
int i, err;
- dp = op->node;
+ dp = op->dev.of_node;
if (!hvapi_negotiated++) {
err = sun4v_hvapi_register(HV_GRP_PCI,
@@ -1009,8 +1009,11 @@ static struct of_device_id __initdata pci_sun4v_match[] = {
};
static struct of_platform_driver pci_sun4v_driver = {
- .name = DRIVER_NAME,
- .match_table = pci_sun4v_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = pci_sun4v_match,
+ },
.probe = pci_sun4v_probe,
};
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e2771939341d..0ec92c8861dd 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -14,6 +14,7 @@
#include <linux/perf_event.h>
#include <linux/kprobes.h>
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
@@ -91,6 +92,8 @@ struct cpu_hw_events {
/* Enabled/disable state. */
int enabled;
+
+ unsigned int group_flag;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
@@ -980,53 +983,6 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-static void event_sched_in(struct perf_event *event)
-{
- event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = smp_processor_id();
- event->tstamp_running += event->ctx->time - event->tstamp_stopped;
- if (is_software_event(event))
- event->pmu->enable(event);
-}
-
-int hw_perf_group_sched_in(struct perf_event *group_leader,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct perf_event *sub;
- int n0, n;
-
- if (!sparc_pmu)
- return 0;
-
- n0 = cpuc->n_events;
- n = collect_events(group_leader, perf_max_events - n0,
- &cpuc->event[n0], &cpuc->events[n0],
- &cpuc->current_idx[n0]);
- if (n < 0)
- return -EAGAIN;
- if (check_excludes(cpuc->event, n0, n))
- return -EINVAL;
- if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
- return -EAGAIN;
- cpuc->n_events = n0 + n;
- cpuc->n_added += n;
-
- cpuctx->active_oncpu += n;
- n = 1;
- event_sched_in(group_leader);
- list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
- if (sub->state != PERF_EVENT_STATE_OFF) {
- event_sched_in(sub);
- n++;
- }
- }
- ctx->nr_active += n;
-
- return 1;
-}
-
static int sparc_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1044,11 +1000,20 @@ static int sparc_pmu_enable(struct perf_event *event)
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
+ /*
+ * If group events scheduling transaction was started,
+ * skip the schedulability test here, it will be peformed
+ * at commit time(->commit_txn) as a whole
+ */
+ if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
+ goto nocheck;
+
if (check_excludes(cpuc->event, n0, 1))
goto out;
if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
goto out;
+nocheck:
cpuc->n_events++;
cpuc->n_added++;
@@ -1128,11 +1093,61 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}
+/*
+ * Start group events scheduling transaction
+ * Set the flag to make pmu::enable() not perform the
+ * schedulability test, it will be performed at commit time
+ */
+static void sparc_pmu_start_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Stop group events scheduling transaction
+ * Clear the flag and pmu::enable() will perform the
+ * schedulability test.
+ */
+static void sparc_pmu_cancel_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Commit group events scheduling transaction
+ * Perform the group schedulability test as a whole
+ * Return 0 if success
+ */
+static int sparc_pmu_commit_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int n;
+
+ if (!sparc_pmu)
+ return -EINVAL;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ n = cpuc->n_events;
+ if (check_excludes(cpuc->event, 0, n))
+ return -EINVAL;
+ if (sparc_check_constraints(cpuc->event, cpuc->events, n))
+ return -EAGAIN;
+
+ return 0;
+}
+
static const struct pmu pmu = {
.enable = sparc_pmu_enable,
.disable = sparc_pmu_disable,
.read = sparc_pmu_read,
.unthrottle = sparc_pmu_unthrottle,
+ .start_txn = sparc_pmu_start_txn,
+ .cancel_txn = sparc_pmu_cancel_txn,
+ .commit_txn = sparc_pmu_commit_txn,
};
const struct pmu *hw_perf_event_init(struct perf_event *event)
@@ -1276,6 +1291,9 @@ static void perf_callchain_kernel(struct pt_regs *regs,
struct perf_callchain_entry *entry)
{
unsigned long ksp, fp;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ int graph = 0;
+#endif
callchain_store(entry, PERF_CONTEXT_KERNEL);
callchain_store(entry, regs->tpc);
@@ -1303,6 +1321,16 @@ static void perf_callchain_kernel(struct pt_regs *regs,
fp = (unsigned long)sf->fp + STACK_BIAS;
}
callchain_store(entry, pc);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = current->curr_ret_stack;
+ if (current->ret_stack && index >= graph) {
+ pc = current->ret_stack[index - graph].ret;
+ callchain_store(entry, pc);
+ graph++;
+ }
+ }
+#endif
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index 5e4563d86f19..9589d8b9b0c1 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -79,8 +79,11 @@ static struct of_device_id __initdata pmc_match[] = {
MODULE_DEVICE_TABLE(of, pmc_match);
static struct of_platform_driver pmc_driver = {
- .name = "pmc",
- .match_table = pmc_match,
+ .driver = {
+ .name = "pmc",
+ .owner = THIS_MODULE,
+ .of_match_table = pmc_match,
+ },
.probe = pmc_probe,
};
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index e2a045c235a1..168d4cb63f5b 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -41,9 +41,9 @@ static int __devinit power_probe(struct of_device *op, const struct of_device_id
power_reg = of_ioremap(res, 0, 0x4, "power");
printk(KERN_INFO "%s: Control reg at %llx\n",
- op->node->name, res->start);
+ op->dev.of_node->name, res->start);
- if (has_button_interrupt(irq, op->node)) {
+ if (has_button_interrupt(irq, op->dev.of_node)) {
if (request_irq(irq,
power_handler, 0, "power", NULL) < 0)
printk(KERN_ERR "power: Cannot setup IRQ handler.\n");
@@ -60,10 +60,11 @@ static struct of_device_id __initdata power_match[] = {
};
static struct of_platform_driver power_driver = {
- .match_table = power_match,
.probe = power_probe,
- .driver = {
- .name = "power",
+ .driver = {
+ .name = "power",
+ .owner = THIS_MODULE,
+ .of_match_table = power_match,
},
};
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index a5cf3864b31f..dbe81a368b45 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -202,6 +202,7 @@ void show_regs(struct pt_regs *regs)
regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
}
struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index 8f1478475421..3f34ac853931 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -450,7 +450,7 @@ int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct of_device *op,
const char *chip_name, int chip_type)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
pbm->name = dp->full_name;
pbm->numa_node = -1;
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 406e0872504e..cfeaf04b9cdf 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -63,10 +63,10 @@ void sbus_set_sbus64(struct device *dev, int bursts)
int slot;
u64 val;
- regs = of_get_property(op->node, "reg", NULL);
+ regs = of_get_property(op->dev.of_node, "reg", NULL);
if (!regs) {
printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n",
- op->node->full_name);
+ op->dev.of_node->full_name);
return;
}
slot = regs->which_io;
@@ -287,7 +287,7 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
- portid = of_getintprop_default(op->node, "portid", -1);
+ portid = of_getintprop_default(op->dev.of_node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
@@ -361,7 +361,7 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
- portid = of_getintprop_default(op->node, "portid", -1);
+ portid = of_getintprop_default(op->dev.of_node, "portid", -1);
printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
portid,
@@ -439,7 +439,7 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
upa_writeq(error_bits, afsr_reg);
- portid = of_getintprop_default(op->node, "portid", -1);
+ portid = of_getintprop_default(op->dev.of_node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
@@ -496,7 +496,7 @@ static void __init sysio_register_error_handlers(struct of_device *op)
u64 control;
int portid;
- portid = of_getintprop_default(op->node, "portid", -1);
+ portid = of_getintprop_default(op->dev.of_node, "portid", -1);
irq = sbus_build_irq(op, SYSIO_UE_INO);
if (request_irq(irq, sysio_ue_handler, 0,
@@ -537,7 +537,7 @@ static void __init sysio_register_error_handlers(struct of_device *op)
static void __init sbus_iommu_init(struct of_device *op)
{
const struct linux_prom64_registers *pr;
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long regs, reg_base;
@@ -589,7 +589,7 @@ static void __init sbus_iommu_init(struct of_device *op)
*/
iommu->write_complete_reg = regs + 0x2000UL;
- portid = of_getintprop_default(op->node, "portid", -1);
+ portid = of_getintprop_default(op->dev.of_node, "portid", -1);
printk(KERN_INFO "SYSIO: UPA portID %x, at %016lx\n",
portid, regs);
diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c
index acb12f673757..3e0815349630 100644
--- a/arch/sparc/kernel/stacktrace.c
+++ b/arch/sparc/kernel/stacktrace.c
@@ -1,6 +1,7 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
+#include <linux/ftrace.h>
#include <linux/module.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
@@ -12,6 +13,10 @@ static void __save_stack_trace(struct thread_info *tp,
bool skip_sched)
{
unsigned long ksp, fp;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ struct task_struct *t;
+ int graph = 0;
+#endif
if (tp == current_thread_info()) {
stack_trace_flush();
@@ -21,6 +26,9 @@ static void __save_stack_trace(struct thread_info *tp,
}
fp = ksp + STACK_BIAS;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ t = tp->task;
+#endif
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
@@ -44,8 +52,21 @@ static void __save_stack_trace(struct thread_info *tp,
if (trace->skip > 0)
trace->skip--;
- else if (!skip_sched || !in_sched_functions(pc))
+ else if (!skip_sched || !in_sched_functions(pc)) {
trace->entries[trace->nr_entries++] = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = t->curr_ret_stack;
+ if (t->ret_stack && index >= graph) {
+ pc = t->ret_stack[index - graph].ret;
+ if (trace->nr_entries <
+ trace->max_entries)
+ trace->entries[trace->nr_entries++] = pc;
+ graph++;
+ }
+ }
+#endif
+ }
} while (trace->nr_entries < trace->max_entries);
}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 0d4c09b15efc..e404b063be2c 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -78,6 +78,11 @@ __volatile__ unsigned int *master_l10_counter;
u32 (*do_arch_gettimeoffset)(void);
+int update_persistent_clock(struct timespec now)
+{
+ return set_rtc_mmss(now.tv_sec);
+}
+
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -87,9 +92,6 @@ u32 (*do_arch_gettimeoffset)(void);
static irqreturn_t timer_interrupt(int dummy, void *dev_id)
{
- /* last time the cmos clock got updated */
- static long last_rtc_update;
-
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
@@ -101,16 +103,6 @@ static irqreturn_t timer_interrupt(int dummy, void *dev_id)
do_timer(1);
- /* Determine when to update the Mostek clock. */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (set_rtc_mmss(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
- }
write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
@@ -152,7 +144,7 @@ static struct platform_device m48t59_rtc = {
static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
if (!model)
@@ -185,10 +177,11 @@ static struct of_device_id __initdata clock_match[] = {
};
static struct of_platform_driver clock_driver = {
- .match_table = clock_match,
.probe = clock_probe,
- .driver = {
- .name = "rtc",
+ .driver = {
+ .name = "rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = clock_match,
},
};
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index c7bbe6cf7b85..21e9fcae0668 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -424,7 +424,7 @@ static int __devinit rtc_probe(struct of_device *op, const struct of_device_id *
struct resource *r;
printk(KERN_INFO "%s: RTC regs at 0x%llx\n",
- op->node->full_name, op->resource[0].start);
+ op->dev.of_node->full_name, op->resource[0].start);
/* The CMOS RTC driver only accepts IORESOURCE_IO, so cons
* up a fake resource so that the probe works for all cases.
@@ -463,10 +463,11 @@ static struct of_device_id __initdata rtc_match[] = {
};
static struct of_platform_driver rtc_driver = {
- .match_table = rtc_match,
.probe = rtc_probe,
- .driver = {
- .name = "rtc",
+ .driver = {
+ .name = "rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = rtc_match,
},
};
@@ -480,7 +481,7 @@ static int __devinit bq4802_probe(struct of_device *op, const struct of_device_i
{
printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
- op->node->full_name, op->resource[0].start);
+ op->dev.of_node->full_name, op->resource[0].start);
rtc_bq4802_device.resource = &op->resource[0];
return platform_device_register(&rtc_bq4802_device);
@@ -495,10 +496,11 @@ static struct of_device_id __initdata bq4802_match[] = {
};
static struct of_platform_driver bq4802_driver = {
- .match_table = bq4802_match,
.probe = bq4802_probe,
- .driver = {
- .name = "bq4802",
+ .driver = {
+ .name = "bq4802",
+ .owner = THIS_MODULE,
+ .of_match_table = bq4802_match,
},
};
@@ -534,7 +536,7 @@ static struct platform_device m48t59_rtc = {
static int __devinit mostek_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
/* On an Enterprise system there can be multiple mostek clocks.
* We should only match the one that is on the central FHC bus.
@@ -558,10 +560,11 @@ static struct of_device_id __initdata mostek_match[] = {
};
static struct of_platform_driver mostek_driver = {
- .match_table = mostek_match,
.probe = mostek_probe,
- .driver = {
- .name = "mostek",
+ .driver = {
+ .name = "mostek",
+ .owner = THIS_MODULE,
+ .of_match_table = mostek_match,
},
};
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 9da57f032983..42ad2ba85010 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kdebug.h>
+#include <linux/ftrace.h>
#include <linux/gfp.h>
#include <asm/smp.h>
@@ -2154,6 +2155,9 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
unsigned long fp, thread_base, ksp;
struct thread_info *tp;
int count = 0;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ int graph = 0;
+#endif
ksp = (unsigned long) _ksp;
if (!tsk)
@@ -2193,6 +2197,16 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
}
printk(" [%016lx] %pS\n", pc, (void *) pc);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+ printk(" [%016lx] %pS\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+#endif
} while (++count < 16);
}
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index d332503fa1be..cfcac1ff4cf2 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -124,8 +124,8 @@ static ssize_t harddog_write(struct file *file, const char __user *data, size_t
return 0;
}
-static int harddog_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int harddog_ioctl_unlocked(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
void __user *argp= (void __user *)arg;
static struct watchdog_info ident = {
@@ -148,10 +148,22 @@ static int harddog_ioctl(struct inode *inode, struct file *file,
}
}
+static long harddog_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ lock_kernel();
+ ret = harddog_ioctl_unlocked(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static const struct file_operations harddog_fops = {
.owner = THIS_MODULE,
.write = harddog_write,
- .ioctl = harddog_ioctl,
+ .unlocked_ioctl = harddog_ioctl,
.open = harddog_open,
.release = harddog_release,
};
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 368219cc2366..ae42695c3597 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -136,7 +136,7 @@ static unsigned int hostaudio_poll(struct file *file,
return mask;
}
-static int hostaudio_ioctl(struct inode *inode, struct file *file,
+static long hostaudio_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostaudio_state *state = file->private_data;
@@ -223,7 +223,7 @@ static int hostaudio_release(struct inode *inode, struct file *file)
/* /dev/mixer file operations */
-static int hostmixer_ioctl_mixdev(struct inode *inode, struct file *file,
+static long hostmixer_ioctl_mixdev(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostmixer_state *state = file->private_data;
@@ -289,7 +289,7 @@ static const struct file_operations hostaudio_fops = {
.read = hostaudio_read,
.write = hostaudio_write,
.poll = hostaudio_poll,
- .ioctl = hostaudio_ioctl,
+ .unlocked_ioctl = hostaudio_ioctl,
.mmap = NULL,
.open = hostaudio_open,
.release = hostaudio_release,
@@ -298,7 +298,7 @@ static const struct file_operations hostaudio_fops = {
static const struct file_operations hostmixer_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .ioctl = hostmixer_ioctl_mixdev,
+ .unlocked_ioctl = hostmixer_ioctl_mixdev,
.open = hostmixer_open_mixdev,
.release = hostmixer_release,
};
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 7a656bd8bd3c..7f7338c90784 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -19,7 +19,6 @@ static irqreturn_t line_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
- struct tty_struct *tty;
if (line)
chan_interrupt(&line->chan_list, &line->task, line->tty, irq);
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index d22f9e5c0eac..7158393b6793 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -46,8 +46,7 @@ static ssize_t mmapper_write(struct file *file, const char __user *buf,
return count;
}
-static int mmapper_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
@@ -90,7 +89,7 @@ static const struct file_operations mmapper_fops = {
.owner = THIS_MODULE,
.read = mmapper_read,
.write = mmapper_write,
- .ioctl = mmapper_ioctl,
+ .unlocked_ioctl = mmapper_ioctl,
.mmap = mmapper_mmap,
.open = mmapper_open,
.release = mmapper_release,
diff --git a/arch/um/include/asm/system.h b/arch/um/include/asm/system.h
index 753346e2cdfd..93af1cf0907d 100644
--- a/arch/um/include/asm/system.h
+++ b/arch/um/include/asm/system.h
@@ -3,11 +3,8 @@
#include "sysdep/system.h"
-extern void *switch_to(void *prev, void *next, void *last);
-
extern int get_signals(void);
extern int set_signals(int enable);
-extern int get_signals(void);
extern void block_signals(void);
extern void unblock_signals(void);
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index fd911f855367..e2cf786bda0a 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -63,10 +63,9 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
- * TIF_NEED_RESCHED
- */
-#define TIF_RESTART_BLOCK 4
-#define TIF_MEMDIE 5
+ * TIF_NEED_RESCHED */
+#define TIF_RESTART_BLOCK 4
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_AUDIT 6
#define TIF_RESTORE_SIGMASK 7
#define TIF_FREEZE 16 /* is freezing for suspend */
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 4e3b820bd2be..f5173e1ec3ac 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -10,7 +10,7 @@
#include "sysdep/syscalls.h"
extern int syscall_table_size;
-#define NR_syscalls (syscall_table_size / sizeof(void *))
+#define NR_SYSCALLS (syscall_table_size / sizeof(void *))
void handle_syscall(struct uml_pt_regs *r)
{
@@ -30,7 +30,7 @@ void handle_syscall(struct uml_pt_regs *r)
* in case it's a compiler bug.
*/
syscall = UPT_SYSCALL_NR(r);
- if ((syscall >= NR_syscalls) || (syscall < 0))
+ if ((syscall >= NR_SYSCALLS) || (syscall < 0))
result = -ENOSYS;
else result = EXECUTE_SYSCALL(syscall, regs);
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h
index e64cd41d7bab..a979a22a8d9f 100644
--- a/arch/um/sys-i386/asm/elf.h
+++ b/arch/um/sys-i386/asm/elf.h
@@ -75,6 +75,8 @@ typedef struct user_i387_struct elf_fpregset_t;
pr_reg[16] = PT_REGS_SS(regs); \
} while (0);
+struct task_struct;
+
extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index 49655c83efd2..d760967f33a7 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -95,6 +95,8 @@ typedef struct user_i387_struct elf_fpregset_t;
(pr_reg)[25] = 0; \
(pr_reg)[26] = 0;
+struct task_struct;
+
extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
index 1a899a7ed7a6..b6b65c7c7a7d 100644
--- a/arch/um/sys-x86_64/signal.c
+++ b/arch/um/sys-x86_64/signal.c
@@ -6,6 +6,7 @@
#include <linux/personality.h>
#include <linux/ptrace.h>
+#include <linux/kernel.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
@@ -165,8 +166,6 @@ struct rt_sigframe
struct _fpstate fpstate;
};
-#define round_down(m, n) (((m) / (n)) * (n))
-
int setup_signal_stack_si(unsigned long stack_top, int sig,
struct k_sigaction *ka, struct pt_regs * regs,
siginfo_t *info, sigset_t *set)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9458685902bd..dcb0593b4a66 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -53,11 +53,15 @@ config X86
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_HW_BREAKPOINT
+ select HAVE_MIXED_BREAKPOINTS_REGS
select PERF_EVENTS
select ANON_INODES
select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER
+config INSTRUCTION_DECODER
+ def_bool (KPROBES || PERF_EVENTS)
+
config OUTPUT_FORMAT
string
default "elf32-i386" if X86_32
@@ -105,6 +109,9 @@ config SBUS
config NEED_DMA_MAP_STATE
def_bool (X86_64 || DMAR || DMA_API_DEBUG)
+config NEED_SG_DMA_LENGTH
+ def_bool y
+
config GENERIC_ISA_DMA
def_bool y
@@ -197,20 +204,17 @@ config HAVE_INTEL_TXT
# Use the generic interrupt handling code in kernel/irq/:
config GENERIC_HARDIRQS
- bool
- default y
+ def_bool y
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config GENERIC_IRQ_PROBE
- bool
- default y
+ def_bool y
config GENERIC_PENDING_IRQ
- bool
+ def_bool y
depends on GENERIC_HARDIRQS && SMP
- default y
config USE_GENERIC_SMP_HELPERS
def_bool y
@@ -225,19 +229,22 @@ config X86_64_SMP
depends on X86_64 && SMP
config X86_HT
- bool
+ def_bool y
depends on SMP
- default y
config X86_TRAMPOLINE
- bool
+ def_bool y
depends on SMP || (64BIT && ACPI_SLEEP)
- default y
config X86_32_LAZY_GS
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR
+config ARCH_HWEIGHT_CFLAGS
+ string
+ default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
+ default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
+
config KTIME_SCALAR
def_bool X86_32
source "init/Kconfig"
@@ -447,7 +454,7 @@ config X86_NUMAQ
firmware with - send email to <Martin.Bligh@us.ibm.com>.
config X86_SUPPORTS_MEMORY_FAILURE
- bool
+ def_bool y
# MCE code calls memory_failure():
depends on X86_MCE
# On 32-bit this adds too big of NODES_SHIFT and we run out of page flags:
@@ -455,7 +462,6 @@ config X86_SUPPORTS_MEMORY_FAILURE
# On 32-bit SPARSEMEM adds too big of SECTIONS_WIDTH:
depends on X86_64 || !SPARSEMEM
select ARCH_SUPPORTS_MEMORY_FAILURE
- default y
config X86_VISWS
bool "SGI 320/540 (Visual Workstation)"
@@ -570,7 +576,6 @@ config PARAVIRT_SPINLOCKS
config PARAVIRT_CLOCK
bool
- default n
endif
@@ -749,7 +754,6 @@ config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
select CPUMASK_OFFSTACK
- default n
---help---
Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
@@ -829,7 +833,6 @@ config X86_VISWS_APIC
config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
bool "Reroute for broken boot IRQs"
- default n
depends on X86_IO_APIC
---help---
This option enables a workaround that fixes a source of
@@ -876,9 +879,8 @@ config X86_MCE_AMD
the DRAM Error Threshold.
config X86_ANCIENT_MCE
- def_bool n
+ bool "Support for old Pentium 5 / WinChip machine checks"
depends on X86_32 && X86_MCE
- prompt "Support for old Pentium 5 / WinChip machine checks"
---help---
Include support for machine check handling on old Pentium 5 or WinChip
systems. These typically need to be enabled explicitely on the command
@@ -886,8 +888,7 @@ config X86_ANCIENT_MCE
config X86_MCE_THRESHOLD
depends on X86_MCE_AMD || X86_MCE_INTEL
- bool
- default y
+ def_bool y
config X86_MCE_INJECT
depends on X86_MCE
@@ -1026,8 +1027,8 @@ config X86_CPUID
choice
prompt "High Memory Support"
- default HIGHMEM4G if !X86_NUMAQ
default HIGHMEM64G if X86_NUMAQ
+ default HIGHMEM4G
depends on X86_32
config NOHIGHMEM
@@ -1285,7 +1286,7 @@ source "mm/Kconfig"
config HIGHPTE
bool "Allocate 3rd-level pagetables from highmem"
- depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
+ depends on HIGHMEM
---help---
The VM uses one page table entry for each page of physical memory.
For systems with a lot of RAM, this can be wasteful of precious
@@ -1369,8 +1370,7 @@ config MATH_EMULATION
kernel, it won't hurt.
config MTRR
- bool
- default y
+ def_bool y
prompt "MTRR (Memory Type Range Register) support" if EMBEDDED
---help---
On Intel P6 family processors (Pentium Pro, Pentium II and later)
@@ -1436,8 +1436,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
mtrr_spare_reg_nr=N on the kernel command line.
config X86_PAT
- bool
- default y
+ def_bool y
prompt "x86 PAT support" if EMBEDDED
depends on MTRR
---help---
@@ -1605,8 +1604,7 @@ config X86_NEED_RELOCS
depends on X86_32 && RELOCATABLE
config PHYSICAL_ALIGN
- hex
- prompt "Alignment value to which kernel should be aligned" if X86_32
+ hex "Alignment value to which kernel should be aligned" if X86_32
default "0x1000000"
range 0x2000 0x1000000
---help---
@@ -1653,7 +1651,6 @@ config COMPAT_VDSO
config CMDLINE_BOOL
bool "Built-in kernel command line"
- default n
---help---
Allow for specifying boot arguments to the kernel at
build time. On some systems (e.g. embedded ones), it is
@@ -1687,7 +1684,6 @@ config CMDLINE
config CMDLINE_OVERRIDE
bool "Built-in command line overrides boot loader arguments"
- default n
depends on CMDLINE_BOOL
---help---
Set this option to 'Y' to have the kernel ignore the boot loader
@@ -1710,6 +1706,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool X86_64
depends on NUMA
+config USE_PERCPU_NUMA_NODE_ID
+ def_bool X86_64
+ depends on NUMA
+
menu "Power management and ACPI options"
config ARCH_HIBERNATION_HEADER
@@ -1723,8 +1723,7 @@ source "drivers/acpi/Kconfig"
source "drivers/sfi/Kconfig"
config X86_APM_BOOT
- bool
- default y
+ def_bool y
depends on APM || APM_MODULE
menuconfig APM
@@ -1931,6 +1930,14 @@ config PCI_MMCONFIG
bool "Support mmconfig PCI config space access"
depends on X86_64 && PCI && ACPI
+config PCI_CNB20LE_QUIRK
+ bool "Read CNB20LE Host Bridge Windows"
+ depends on PCI
+ help
+ Read the PCI windows out of the CNB20LE host bridge. This allows
+ PCI hotplug to work on systems with the CNB20LE chipset which do
+ not have ACPI.
+
config DMAR
bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
depends on PCI_MSI && ACPI && EXPERIMENTAL
@@ -1953,8 +1960,7 @@ config DMAR_DEFAULT_ON
experimental.
config DMAR_BROKEN_GFX_WA
- def_bool n
- prompt "Workaround broken graphics drivers (going away soon)"
+ bool "Workaround broken graphics drivers (going away soon)"
depends on DMAR && BROKEN
---help---
Current Graphics drivers tend to use physical address
@@ -2052,7 +2058,6 @@ config SCx200HR_TIMER
config OLPC
bool "One Laptop Per Child support"
select GPIOLIB
- default n
---help---
Add support for detecting the unique features of the OLPC
XO hardware.
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index a19829374e6a..2ac9069890cd 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -338,6 +338,10 @@ config X86_F00F_BUG
def_bool y
depends on M586MMX || M586TSC || M586 || M486 || M386
+config X86_INVD_BUG
+ def_bool y
+ depends on M486 || M386
+
config X86_WP_WORKS_OK
def_bool y
depends on !M386
@@ -502,23 +506,3 @@ config CPU_SUP_UMC_32
CPU might render the kernel unbootable.
If unsure, say N.
-
-config X86_DS
- def_bool X86_PTRACE_BTS
- depends on X86_DEBUGCTLMSR
- select HAVE_HW_BRANCH_TRACER
-
-config X86_PTRACE_BTS
- bool "Branch Trace Store"
- default y
- depends on X86_DEBUGCTLMSR
- depends on BROKEN
- ---help---
- This adds a ptrace interface to the hardware's branch trace store.
-
- Debuggers may use it to collect an execution trace of the debugged
- application in order to answer the question 'how did I get here?'.
- Debuggers may trace user mode as well as kernel mode.
-
- Say Y unless there is no application development on this machine
- and you want to save a small amount of code size.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index bc01e3ebfeb2..75085080b63e 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -45,7 +45,6 @@ config EARLY_PRINTK
config EARLY_PRINTK_DBGP
bool "Early printk via EHCI debug port"
- default n
depends on EARLY_PRINTK && PCI
---help---
Write kernel log output directly into the EHCI debug port.
@@ -76,7 +75,6 @@ config DEBUG_PER_CPU_MAPS
bool "Debug access to per_cpu maps"
depends on DEBUG_KERNEL
depends on SMP
- default n
---help---
Say Y to verify that the per_cpu map being accessed has
been setup. Adds a fair amount of code to kernel memory
@@ -174,15 +172,6 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
-config X86_DS_SELFTEST
- bool "DS selftest"
- default y
- depends on DEBUG_KERNEL
- depends on X86_DS
- ---help---
- Perform Debug Store selftests at boot time.
- If in doubt, say "N".
-
config HAVE_MMIOTRACE_SUPPORT
def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 0a43dc515e4c..8aa1b59b9074 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -95,8 +95,9 @@ sp-$(CONFIG_X86_64) := rsp
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe)
+cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
LDFLAGS := -m elf_$(UTS_MACHINE)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 20bb0e1ac681..ff16756a51c1 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,9 @@
#define IN IN1
#define KEY %xmm2
#define IV %xmm3
+#define BSWAP_MASK %xmm10
+#define CTR %xmm11
+#define INC %xmm12
#define KEYP %rdi
#define OUTP %rsi
@@ -42,6 +45,7 @@
#define T1 %r10
#define TKEYP T1
#define T2 %r11
+#define TCTR_LOW T2
_key_expansion_128:
_key_expansion_256a:
@@ -724,3 +728,114 @@ ENTRY(aesni_cbc_dec)
movups IV, (IVP)
.Lcbc_dec_just_ret:
ret
+
+.align 16
+.Lbswap_mask:
+ .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+
+/*
+ * _aesni_inc_init: internal ABI
+ * setup registers used by _aesni_inc
+ * input:
+ * IV
+ * output:
+ * CTR: == IV, in little endian
+ * TCTR_LOW: == lower qword of CTR
+ * INC: == 1, in little endian
+ * BSWAP_MASK == endian swapping mask
+ */
+_aesni_inc_init:
+ movaps .Lbswap_mask, BSWAP_MASK
+ movaps IV, CTR
+ PSHUFB_XMM BSWAP_MASK CTR
+ mov $1, TCTR_LOW
+ MOVQ_R64_XMM TCTR_LOW INC
+ MOVQ_R64_XMM CTR TCTR_LOW
+ ret
+
+/*
+ * _aesni_inc: internal ABI
+ * Increase IV by 1, IV is in big endian
+ * input:
+ * IV
+ * CTR: == IV, in little endian
+ * TCTR_LOW: == lower qword of CTR
+ * INC: == 1, in little endian
+ * BSWAP_MASK == endian swapping mask
+ * output:
+ * IV: Increase by 1
+ * changed:
+ * CTR: == output IV, in little endian
+ * TCTR_LOW: == lower qword of CTR
+ */
+_aesni_inc:
+ paddq INC, CTR
+ add $1, TCTR_LOW
+ jnc .Linc_low
+ pslldq $8, INC
+ paddq INC, CTR
+ psrldq $8, INC
+.Linc_low:
+ movaps CTR, IV
+ PSHUFB_XMM BSWAP_MASK IV
+ ret
+
+/*
+ * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ * size_t len, u8 *iv)
+ */
+ENTRY(aesni_ctr_enc)
+ cmp $16, LEN
+ jb .Lctr_enc_just_ret
+ mov 480(KEYP), KLEN
+ movups (IVP), IV
+ call _aesni_inc_init
+ cmp $64, LEN
+ jb .Lctr_enc_loop1
+.align 4
+.Lctr_enc_loop4:
+ movaps IV, STATE1
+ call _aesni_inc
+ movups (INP), IN1
+ movaps IV, STATE2
+ call _aesni_inc
+ movups 0x10(INP), IN2
+ movaps IV, STATE3
+ call _aesni_inc
+ movups 0x20(INP), IN3
+ movaps IV, STATE4
+ call _aesni_inc
+ movups 0x30(INP), IN4
+ call _aesni_enc4
+ pxor IN1, STATE1
+ movups STATE1, (OUTP)
+ pxor IN2, STATE2
+ movups STATE2, 0x10(OUTP)
+ pxor IN3, STATE3
+ movups STATE3, 0x20(OUTP)
+ pxor IN4, STATE4
+ movups STATE4, 0x30(OUTP)
+ sub $64, LEN
+ add $64, INP
+ add $64, OUTP
+ cmp $64, LEN
+ jge .Lctr_enc_loop4
+ cmp $16, LEN
+ jb .Lctr_enc_ret
+.align 4
+.Lctr_enc_loop1:
+ movaps IV, STATE
+ call _aesni_inc
+ movups (INP), IN
+ call _aesni_enc1
+ pxor IN, STATE
+ movups STATE, (OUTP)
+ sub $16, LEN
+ add $16, INP
+ add $16, OUTP
+ cmp $16, LEN
+ jge .Lctr_enc_loop1
+.Lctr_enc_ret:
+ movups IV, (IVP)
+.Lctr_enc_just_ret:
+ ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 49c552c060e9..2cb3dcc4490a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -18,6 +18,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/cryptd.h>
+#include <crypto/ctr.h>
#include <asm/i387.h>
#include <asm/aes.h>
@@ -58,6 +59,8 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{
@@ -321,6 +324,72 @@ static struct crypto_alg blk_cbc_alg = {
},
};
+static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
+ struct blkcipher_walk *walk)
+{
+ u8 *ctrblk = walk->iv;
+ u8 keystream[AES_BLOCK_SIZE];
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ unsigned int nbytes = walk->nbytes;
+
+ aesni_enc(ctx, keystream, ctrblk);
+ crypto_xor(keystream, src, nbytes);
+ memcpy(dst, keystream, nbytes);
+ crypto_inc(ctrblk, AES_BLOCK_SIZE);
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ kernel_fpu_begin();
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ if (walk.nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ kernel_fpu_end();
+
+ return err;
+}
+
+static struct crypto_alg blk_ctr_alg = {
+ .cra_name = "__ctr-aes-aesni",
+ .cra_driver_name = "__driver-ctr-aes-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
+ .cra_alignmask = 0,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ },
+ },
+};
+
static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
@@ -467,13 +536,11 @@ static struct crypto_alg ablk_cbc_alg = {
},
};
-#ifdef HAS_CTR
static int ablk_ctr_init(struct crypto_tfm *tfm)
{
struct cryptd_ablkcipher *cryptd_tfm;
- cryptd_tfm = cryptd_alloc_ablkcipher("fpu(ctr(__driver-aes-aesni))",
- 0, 0);
+ cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ablk_init_common(tfm, cryptd_tfm);
@@ -500,11 +567,50 @@ static struct crypto_alg ablk_ctr_alg = {
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
+ .decrypt = ablk_encrypt,
.geniv = "chainiv",
},
},
};
+
+#ifdef HAS_CTR
+static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
+{
+ struct cryptd_ablkcipher *cryptd_tfm;
+
+ cryptd_tfm = cryptd_alloc_ablkcipher(
+ "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+ ablk_init_common(tfm, cryptd_tfm);
+ return 0;
+}
+
+static struct crypto_alg ablk_rfc3686_ctr_alg = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct async_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
+ .cra_init = ablk_rfc3686_ctr_init,
+ .cra_exit = ablk_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = ablk_set_key,
+ .encrypt = ablk_encrypt,
+ .decrypt = ablk_decrypt,
+ .geniv = "seqiv",
+ },
+ },
+};
#endif
#ifdef HAS_LRW
@@ -640,13 +746,17 @@ static int __init aesni_init(void)
goto blk_ecb_err;
if ((err = crypto_register_alg(&blk_cbc_alg)))
goto blk_cbc_err;
+ if ((err = crypto_register_alg(&blk_ctr_alg)))
+ goto blk_ctr_err;
if ((err = crypto_register_alg(&ablk_ecb_alg)))
goto ablk_ecb_err;
if ((err = crypto_register_alg(&ablk_cbc_alg)))
goto ablk_cbc_err;
-#ifdef HAS_CTR
if ((err = crypto_register_alg(&ablk_ctr_alg)))
goto ablk_ctr_err;
+#ifdef HAS_CTR
+ if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
+ goto ablk_rfc3686_ctr_err;
#endif
#ifdef HAS_LRW
if ((err = crypto_register_alg(&ablk_lrw_alg)))
@@ -675,13 +785,17 @@ ablk_pcbc_err:
ablk_lrw_err:
#endif
#ifdef HAS_CTR
+ crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
+ablk_rfc3686_ctr_err:
+#endif
crypto_unregister_alg(&ablk_ctr_alg);
ablk_ctr_err:
-#endif
crypto_unregister_alg(&ablk_cbc_alg);
ablk_cbc_err:
crypto_unregister_alg(&ablk_ecb_alg);
ablk_ecb_err:
+ crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
crypto_unregister_alg(&blk_cbc_alg);
blk_cbc_err:
crypto_unregister_alg(&blk_ecb_alg);
@@ -705,10 +819,12 @@ static void __exit aesni_exit(void)
crypto_unregister_alg(&ablk_lrw_alg);
#endif
#ifdef HAS_CTR
- crypto_unregister_alg(&ablk_ctr_alg);
+ crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
#endif
+ crypto_unregister_alg(&ablk_ctr_alg);
crypto_unregister_alg(&ablk_cbc_alg);
crypto_unregister_alg(&ablk_ecb_alg);
+ crypto_unregister_alg(&blk_ctr_alg);
crypto_unregister_alg(&blk_cbc_alg);
crypto_unregister_alg(&blk_ecb_alg);
crypto_unregister_alg(&__aesni_alg);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 56f462cf22d2..aa2c39d968fc 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -85,7 +85,6 @@ extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
-extern int acpi_ht;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
@@ -97,7 +96,6 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
static inline void disable_acpi(void)
{
acpi_disabled = 1;
- acpi_ht = 0;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index b97f786a48d5..a63a68be1cce 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -6,8 +6,8 @@
.macro LOCK_PREFIX
1: lock
.section .smp_locks,"a"
- _ASM_ALIGN
- _ASM_PTR 1b
+ .balign 4
+ .long 1b - .
.previous
.endm
#else
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index b09ec55650b3..03b6bb5394a0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -28,20 +28,20 @@
*/
#ifdef CONFIG_SMP
-#define LOCK_PREFIX \
+#define LOCK_PREFIX_HERE \
".section .smp_locks,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR "661f\n" /* address */ \
+ ".balign 4\n" \
+ ".long 671f - .\n" /* offset */ \
".previous\n" \
- "661:\n\tlock; "
+ "671:"
+
+#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
#else /* ! CONFIG_SMP */
+#define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX ""
#endif
-/* This must be included *after* the definition of LOCK_PREFIX */
-#include <asm/cpufeature.h>
-
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
@@ -96,6 +96,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
".previous"
/*
+ * This must be included *after* the definition of ALTERNATIVE due to
+ * <asm/arch_hweight.h>
+ */
+#include <asm/cpufeature.h>
+
+/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index b4ac2cdcb64f..1fa03e04ae44 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -373,6 +373,7 @@ extern atomic_t init_deasserted;
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
#endif
+#ifdef CONFIG_X86_LOCAL_APIC
static inline u32 apic_read(u32 reg)
{
return apic->read(reg);
@@ -403,10 +404,19 @@ static inline u32 safe_apic_wait_icr_idle(void)
return apic->safe_wait_icr_idle();
}
+#else /* CONFIG_X86_LOCAL_APIC */
+
+static inline u32 apic_read(u32 reg) { return 0; }
+static inline void apic_write(u32 reg, u32 val) { }
+static inline u64 apic_icr_read(void) { return 0; }
+static inline void apic_icr_write(u32 low, u32 high) { }
+static inline void apic_wait_icr_idle(void) { }
+static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
+
+#endif /* CONFIG_X86_LOCAL_APIC */
static inline void ack_APIC_irq(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
/*
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
@@ -414,7 +424,6 @@ static inline void ack_APIC_irq(void)
/* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0);
-#endif
}
static inline unsigned default_get_apic_id(unsigned long x)
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..9686c3d9ff73
--- /dev/null
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_X86_HWEIGHT_H
+#define _ASM_X86_HWEIGHT_H
+
+#ifdef CONFIG_64BIT
+/* popcnt %edi, %eax -- redundant REX prefix for alignment */
+#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
+/* popcnt %rdi, %rax */
+#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
+#define REG_IN "D"
+#define REG_OUT "a"
+#else
+/* popcnt %eax, %eax */
+#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
+#define REG_IN "a"
+#define REG_OUT "a"
+#endif
+
+/*
+ * __sw_hweightXX are called from within the alternatives below
+ * and callee-clobbered registers need to be taken care of. See
+ * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
+ * compiler switches.
+ */
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ unsigned int res = 0;
+
+ asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+
+ return res;
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __arch_hweight32(w & 0xffff);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __arch_hweight32(w & 0xff);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ unsigned long res = 0;
+
+#ifdef CONFIG_X86_32
+ return __arch_hweight32((u32)w) +
+ __arch_hweight32((u32)(w >> 32));
+#else
+ asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+#endif /* CONFIG_X86_32 */
+
+ return res;
+}
+
+#endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 8f8217b9bdac..952a826ac4e5 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -22,7 +22,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return v->counter;
+ return (*(volatile int *)&(v)->counter);
}
/**
@@ -246,6 +246,29 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+/*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline int atomic_dec_if_positive(atomic_t *v)
+{
+ int c, old, dec;
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 03027bf28de5..2a934aa19a43 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -14,109 +14,193 @@ typedef struct {
#define ATOMIC64_INIT(val) { (val) }
-extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
+#ifdef CONFIG_X86_CMPXCHG64
+#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
+#else
+#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
+#endif
+
+#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
+
+/**
+ * atomic64_cmpxchg - cmpxchg atomic64 variable
+ * @p: pointer to type atomic64_t
+ * @o: expected value
+ * @n: new value
+ *
+ * Atomically sets @v to @n if it was equal to @o and returns
+ * the old value.
+ */
+
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+ return cmpxchg64(&v->counter, o, n);
+}
/**
* atomic64_xchg - xchg atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
*
- * Atomically xchgs the value of @ptr to @new_val and returns
+ * Atomically xchgs the value of @v to @n and returns
* the old value.
*/
-extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
+{
+ long long o;
+ unsigned high = (unsigned)(n >> 32);
+ unsigned low = (unsigned)n;
+ asm volatile(ATOMIC64_ALTERNATIVE(xchg)
+ : "=A" (o), "+b" (low), "+c" (high)
+ : "S" (v)
+ : "memory"
+ );
+ return o;
+}
/**
* atomic64_set - set atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
*
- * Atomically sets the value of @ptr to @new_val.
+ * Atomically sets the value of @v to @n.
*/
-extern void atomic64_set(atomic64_t *ptr, u64 new_val);
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ unsigned high = (unsigned)(i >> 32);
+ unsigned low = (unsigned)i;
+ asm volatile(ATOMIC64_ALTERNATIVE(set)
+ : "+b" (low), "+c" (high)
+ : "S" (v)
+ : "eax", "edx", "memory"
+ );
+}
/**
* atomic64_read - read atomic64 variable
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically reads the value of @ptr and returns it.
+ * Atomically reads the value of @v and returns it.
*/
-static inline u64 atomic64_read(atomic64_t *ptr)
+static inline long long atomic64_read(atomic64_t *v)
{
- u64 res;
-
- /*
- * Note, we inline this atomic64_t primitive because
- * it only clobbers EAX/EDX and leaves the others
- * untouched. We also (somewhat subtly) rely on the
- * fact that cmpxchg8b returns the current 64-bit value
- * of the memory location we are touching:
- */
- asm volatile(
- "mov %%ebx, %%eax\n\t"
- "mov %%ecx, %%edx\n\t"
- LOCK_PREFIX "cmpxchg8b %1\n"
- : "=&A" (res)
- : "m" (*ptr)
- );
-
- return res;
-}
-
-extern u64 atomic64_read(atomic64_t *ptr);
+ long long r;
+ asm volatile(ATOMIC64_ALTERNATIVE(read)
+ : "=A" (r), "+c" (v)
+ : : "memory"
+ );
+ return r;
+ }
/**
* atomic64_add_return - add and return
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr and returns @delta + *@ptr
+ * Atomically adds @i to @v and returns @i + *@v
*/
-extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE(add_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/*
* Other variants with different arithmetic operators:
*/
-extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
-extern u64 atomic64_inc_return(atomic64_t *ptr);
-extern u64 atomic64_dec_return(atomic64_t *ptr);
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
+
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+ long long a;
+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
+ : "=A" (a)
+ : "S" (v)
+ : "memory", "ecx"
+ );
+ return a;
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+ long long a;
+ asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
+ : "=A" (a)
+ : "S" (v)
+ : "memory", "ecx"
+ );
+ return a;
+}
/**
* atomic64_add - add integer to atomic64 variable
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr.
+ * Atomically adds @i to @v.
*/
-extern void atomic64_add(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_add(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/**
* atomic64_sub - subtract the atomic64 variable
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
*
- * Atomically subtracts @delta from @ptr.
+ * Atomically subtracts @i from @v.
*/
-extern void atomic64_sub(u64 delta, atomic64_t *ptr);
+static inline long long atomic64_sub(long long i, atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
+ : "+A" (i), "+c" (v)
+ : : "memory"
+ );
+ return i;
+}
/**
* atomic64_sub_and_test - subtract value from variable and test result
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr and returns
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
-extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
+static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
/**
* atomic64_inc - increment atomic64 variable
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically increments @ptr by 1.
+ * Atomically increments @v by 1.
*/
-extern void atomic64_inc(atomic64_t *ptr);
+static inline void atomic64_inc(atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
+ : : "S" (v)
+ : "memory", "eax", "ecx", "edx"
+ );
+}
/**
* atomic64_dec - decrement atomic64 variable
@@ -124,37 +208,97 @@ extern void atomic64_inc(atomic64_t *ptr);
*
* Atomically decrements @ptr by 1.
*/
-extern void atomic64_dec(atomic64_t *ptr);
+static inline void atomic64_dec(atomic64_t *v)
+{
+ asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
+ : : "S" (v)
+ : "memory", "eax", "ecx", "edx"
+ );
+}
/**
* atomic64_dec_and_test - decrement and test
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically decrements @ptr by 1 and
+ * Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
-extern int atomic64_dec_and_test(atomic64_t *ptr);
+static inline int atomic64_dec_and_test(atomic64_t *v)
+{
+ return atomic64_dec_return(v) == 0;
+}
/**
* atomic64_inc_and_test - increment and test
- * @ptr: pointer to type atomic64_t
+ * @v: pointer to type atomic64_t
*
- * Atomically increments @ptr by 1
+ * Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
-extern int atomic64_inc_and_test(atomic64_t *ptr);
+static inline int atomic64_inc_and_test(atomic64_t *v)
+{
+ return atomic64_inc_return(v) == 0;
+}
/**
* atomic64_add_negative - add and test if negative
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
*
- * Atomically adds @delta to @ptr and returns true
+ * Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-extern int atomic64_add_negative(u64 delta, atomic64_t *ptr);
+static inline int atomic64_add_negative(long long i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+ unsigned low = (unsigned)u;
+ unsigned high = (unsigned)(u >> 32);
+ asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
+ : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
+ : : "memory");
+ return (int)a;
+}
+
+
+static inline int atomic64_inc_not_zero(atomic64_t *v)
+{
+ int r;
+ asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
+ : "=a" (r)
+ : "S" (v)
+ : "ecx", "edx", "memory"
+ );
+ return r;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long r;
+ asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
+ : "=A" (r)
+ : "S" (v)
+ : "ecx", "memory"
+ );
+ return r;
+}
+
+#undef ATOMIC64_ALTERNATIVE
+#undef ATOMIC64_ALTERNATIVE_
#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 51c5b4056929..49fd1ea22951 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
*/
static inline long atomic64_read(const atomic64_t *v)
{
- return v->counter;
+ return (*(volatile long *)&(v)->counter);
}
/**
@@ -221,4 +221,27 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long c, old, dec;
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 02b47a603fc8..545776efeb16 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -444,7 +444,9 @@ static inline int fls(int x)
#define ARCH_HAS_FAST_MULTIPLIER 1
-#include <asm-generic/bitops/hweight.h>
+#include <asm/arch_hweight.h>
+
+#include <asm-generic/bitops/const_hweight.h>
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 7a1065958ba9..3b62ab56c7a0 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -24,7 +24,7 @@
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
- (CONFIG_PHYSICAL_ALIGN < (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2))
+ (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
#endif
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 634c40a739a6..63e35ec9075c 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -44,9 +44,6 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
memcpy(dst, src, len);
}
-#define PG_WC PG_arch_1
-PAGEFLAG(WC, WC)
-
#ifdef CONFIG_X86_PAT
/*
* X86 PAT uses page flags WC and Uncached together to keep track of
@@ -55,16 +52,24 @@ PAGEFLAG(WC, WC)
* _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
* been changed from its default (value of -1 used to denote this).
* Note we do not support _PAGE_CACHE_UC here.
- *
- * Caller must hold memtype_lock for atomicity.
*/
+
+#define _PGMT_DEFAULT 0
+#define _PGMT_WC (1UL << PG_arch_1)
+#define _PGMT_UC_MINUS (1UL << PG_uncached)
+#define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
+
static inline unsigned long get_page_memtype(struct page *pg)
{
- if (!PageUncached(pg) && !PageWC(pg))
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
return -1;
- else if (!PageUncached(pg) && PageWC(pg))
+ else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC;
- else if (PageUncached(pg) && !PageWC(pg))
+ else if (pg_flags == _PGMT_UC_MINUS)
return _PAGE_CACHE_UC_MINUS;
else
return _PAGE_CACHE_WB;
@@ -72,25 +77,26 @@ static inline unsigned long get_page_memtype(struct page *pg)
static inline void set_page_memtype(struct page *pg, unsigned long memtype)
{
+ unsigned long memtype_flags = _PGMT_DEFAULT;
+ unsigned long old_flags;
+ unsigned long new_flags;
+
switch (memtype) {
case _PAGE_CACHE_WC:
- ClearPageUncached(pg);
- SetPageWC(pg);
+ memtype_flags = _PGMT_WC;
break;
case _PAGE_CACHE_UC_MINUS:
- SetPageUncached(pg);
- ClearPageWC(pg);
+ memtype_flags = _PGMT_UC_MINUS;
break;
case _PAGE_CACHE_WB:
- SetPageUncached(pg);
- SetPageWC(pg);
- break;
- default:
- case -1:
- ClearPageUncached(pg);
- ClearPageWC(pg);
+ memtype_flags = _PGMT_WB;
break;
}
+
+ do {
+ old_flags = pg->flags;
+ new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
+ } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
@@ -139,9 +145,11 @@ int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray);
/*
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index ffb9bb6b6c37..8859e12dd3cf 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -271,7 +271,8 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
- alternative_io("call cmpxchg8b_emu", \
+ alternative_io(LOCK_PREFIX_HERE \
+ "call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0cd82d068613..468145914389 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -161,6 +161,7 @@
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
+#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
/* Virtualization flags: Linux defined */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -175,6 +176,7 @@
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#include <asm/asm.h>
#include <linux/bitops.h>
extern const char * const x86_cap_flags[NCAPINTS*32];
@@ -283,6 +285,69 @@ extern const char * const x86_power_flags[32];
#endif /* CONFIG_X86_64 */
+/*
+ * Static testing of CPU features. Used the same as boot_cpu_has().
+ * These are only valid after alternatives have run, but will statically
+ * patch the target code for additional performance.
+ *
+ */
+static __always_inline __pure bool __static_cpu_has(u8 bit)
+{
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+ asm goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ _ASM_ALIGN "\n"
+ _ASM_PTR "1b\n"
+ _ASM_PTR "0\n" /* no replacement */
+ " .byte %P0\n" /* feature bit */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 0\n" /* replacement len */
+ " .byte 0xff + 0 - (2b-1b)\n" /* padding */
+ ".previous\n"
+ : : "i" (bit) : : t_no);
+ return true;
+ t_no:
+ return false;
+#else
+ u8 flag;
+ /* Open-coded due to __stringify() in ALTERNATIVE() */
+ asm volatile("1: movb $0,%0\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ _ASM_ALIGN "\n"
+ _ASM_PTR "1b\n"
+ _ASM_PTR "3f\n"
+ " .byte %P1\n" /* feature bit */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 4f - 3f\n" /* replacement len */
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+ : "=qm" (flag) : "i" (bit));
+ return flag;
+#endif
+}
+
+#if __GNUC__ >= 4
+#define static_cpu_has(bit) \
+( \
+ __builtin_constant_p(boot_cpu_has(bit)) ? \
+ boot_cpu_has(bit) : \
+ (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \
+ __static_cpu_has(bit) : \
+ boot_cpu_has(bit) \
+)
+#else
+/*
+ * gcc 3.x is too stupid to do the static test; fall back to dynamic.
+ */
+#define static_cpu_has(bit) boot_cpu_has(bit)
+#endif
+
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/ds.h b/arch/x86/include/asm/ds.h
deleted file mode 100644
index 70dac199b093..000000000000
--- a/arch/x86/include/asm/ds.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Debug Store (DS) support
- *
- * This provides a low-level interface to the hardware's Debug Store
- * feature that is used for branch trace store (BTS) and
- * precise-event based sampling (PEBS).
- *
- * It manages:
- * - DS and BTS hardware configuration
- * - buffer overflow handling (to be done)
- * - buffer access
- *
- * It does not do:
- * - security checking (is the caller allowed to trace the task)
- * - buffer allocation (memory accounting)
- *
- *
- * Copyright (C) 2007-2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
- */
-
-#ifndef _ASM_X86_DS_H
-#define _ASM_X86_DS_H
-
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/err.h>
-
-
-#ifdef CONFIG_X86_DS
-
-struct task_struct;
-struct ds_context;
-struct ds_tracer;
-struct bts_tracer;
-struct pebs_tracer;
-
-typedef void (*bts_ovfl_callback_t)(struct bts_tracer *);
-typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
-
-
-/*
- * A list of features plus corresponding macros to talk about them in
- * the ds_request function's flags parameter.
- *
- * We use the enum to index an array of corresponding control bits;
- * we use the macro to index a flags bit-vector.
- */
-enum ds_feature {
- dsf_bts = 0,
- dsf_bts_kernel,
-#define BTS_KERNEL (1 << dsf_bts_kernel)
- /* trace kernel-mode branches */
-
- dsf_bts_user,
-#define BTS_USER (1 << dsf_bts_user)
- /* trace user-mode branches */
-
- dsf_bts_overflow,
- dsf_bts_max,
- dsf_pebs = dsf_bts_max,
-
- dsf_pebs_max,
- dsf_ctl_max = dsf_pebs_max,
- dsf_bts_timestamps = dsf_ctl_max,
-#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps)
- /* add timestamps into BTS trace */
-
-#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS)
-};
-
-
-/*
- * Request BTS or PEBS
- *
- * Due to alignement constraints, the actual buffer may be slightly
- * smaller than the requested or provided buffer.
- *
- * Returns a pointer to a tracer structure on success, or
- * ERR_PTR(errcode) on failure.
- *
- * The interrupt threshold is independent from the overflow callback
- * to allow users to use their own overflow interrupt handling mechanism.
- *
- * The function might sleep.
- *
- * task: the task to request recording for
- * cpu: the cpu to request recording for
- * base: the base pointer for the (non-pageable) buffer;
- * size: the size of the provided buffer in bytes
- * ovfl: pointer to a function to be called on buffer overflow;
- * NULL if cyclic buffer requested
- * th: the interrupt threshold in records from the end of the buffer;
- * -1 if no interrupt threshold is requested.
- * flags: a bit-mask of the above flags
- */
-extern struct bts_tracer *ds_request_bts_task(struct task_struct *task,
- void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-extern struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-extern struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-extern struct pebs_tracer *ds_request_pebs_cpu(int cpu,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags);
-
-/*
- * Release BTS or PEBS resources
- * Suspend and resume BTS or PEBS tracing
- *
- * Must be called with irq's enabled.
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern void ds_release_bts(struct bts_tracer *tracer);
-extern void ds_suspend_bts(struct bts_tracer *tracer);
-extern void ds_resume_bts(struct bts_tracer *tracer);
-extern void ds_release_pebs(struct pebs_tracer *tracer);
-extern void ds_suspend_pebs(struct pebs_tracer *tracer);
-extern void ds_resume_pebs(struct pebs_tracer *tracer);
-
-/*
- * Release BTS or PEBS resources
- * Suspend and resume BTS or PEBS tracing
- *
- * Cpu tracers must call this on the traced cpu.
- * Task tracers must call ds_release_~_noirq() for themselves.
- *
- * May be called with irq's disabled.
- *
- * Returns 0 if successful;
- * -EPERM if the cpu tracer does not trace the current cpu.
- * -EPERM if the task tracer does not trace itself.
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern int ds_release_bts_noirq(struct bts_tracer *tracer);
-extern int ds_suspend_bts_noirq(struct bts_tracer *tracer);
-extern int ds_resume_bts_noirq(struct bts_tracer *tracer);
-extern int ds_release_pebs_noirq(struct pebs_tracer *tracer);
-extern int ds_suspend_pebs_noirq(struct pebs_tracer *tracer);
-extern int ds_resume_pebs_noirq(struct pebs_tracer *tracer);
-
-
-/*
- * The raw DS buffer state as it is used for BTS and PEBS recording.
- *
- * This is the low-level, arch-dependent interface for working
- * directly on the raw trace data.
- */
-struct ds_trace {
- /* the number of bts/pebs records */
- size_t n;
- /* the size of a bts/pebs record in bytes */
- size_t size;
- /* pointers into the raw buffer:
- - to the first entry */
- void *begin;
- /* - one beyond the last entry */
- void *end;
- /* - one beyond the newest entry */
- void *top;
- /* - the interrupt threshold */
- void *ith;
- /* flags given on ds_request() */
- unsigned int flags;
-};
-
-/*
- * An arch-independent view on branch trace data.
- */
-enum bts_qualifier {
- bts_invalid,
-#define BTS_INVALID bts_invalid
-
- bts_branch,
-#define BTS_BRANCH bts_branch
-
- bts_task_arrives,
-#define BTS_TASK_ARRIVES bts_task_arrives
-
- bts_task_departs,
-#define BTS_TASK_DEPARTS bts_task_departs
-
- bts_qual_bit_size = 4,
- bts_qual_max = (1 << bts_qual_bit_size),
-};
-
-struct bts_struct {
- __u64 qualifier;
- union {
- /* BTS_BRANCH */
- struct {
- __u64 from;
- __u64 to;
- } lbr;
- /* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */
- struct {
- __u64 clock;
- pid_t pid;
- } event;
- } variant;
-};
-
-
-/*
- * The BTS state.
- *
- * This gives access to the raw DS state and adds functions to provide
- * an arch-independent view of the BTS data.
- */
-struct bts_trace {
- struct ds_trace ds;
-
- int (*read)(struct bts_tracer *tracer, const void *at,
- struct bts_struct *out);
- int (*write)(struct bts_tracer *tracer, const struct bts_struct *in);
-};
-
-
-/*
- * The PEBS state.
- *
- * This gives access to the raw DS state and the PEBS-specific counter
- * reset value.
- */
-struct pebs_trace {
- struct ds_trace ds;
-
- /* the number of valid counters in the below array */
- unsigned int counters;
-
-#define MAX_PEBS_COUNTERS 4
- /* the counter reset value */
- unsigned long long counter_reset[MAX_PEBS_COUNTERS];
-};
-
-
-/*
- * Read the BTS or PEBS trace.
- *
- * Returns a view on the trace collected for the parameter tracer.
- *
- * The view remains valid as long as the traced task is not running or
- * the tracer is suspended.
- * Writes into the trace buffer are not reflected.
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer);
-extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer);
-
-
-/*
- * Reset the write pointer of the BTS/PEBS buffer.
- *
- * Returns 0 on success; -Eerrno on error
- *
- * tracer: the tracer handle returned from ds_request_~()
- */
-extern int ds_reset_bts(struct bts_tracer *tracer);
-extern int ds_reset_pebs(struct pebs_tracer *tracer);
-
-/*
- * Set the PEBS counter reset value.
- *
- * Returns 0 on success; -Eerrno on error
- *
- * tracer: the tracer handle returned from ds_request_pebs()
- * counter: the index of the counter
- * value: the new counter reset value
- */
-extern int ds_set_pebs_reset(struct pebs_tracer *tracer,
- unsigned int counter, u64 value);
-
-/*
- * Initialization
- */
-struct cpuinfo_x86;
-extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
-
-/*
- * Context switch work
- */
-extern void ds_switch_to(struct task_struct *prev, struct task_struct *next);
-
-#else /* CONFIG_X86_DS */
-
-struct cpuinfo_x86;
-static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
-static inline void ds_switch_to(struct task_struct *prev,
- struct task_struct *next) {}
-
-#endif /* CONFIG_X86_DS */
-#endif /* _ASM_X86_DS_H */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index ae6253ab9029..733f7e91e7a9 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -34,6 +34,18 @@
#define CFI_SIGNAL_FRAME
#endif
+#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
+ /*
+ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
+ * The latter we currently just discard since we don't do DWARF
+ * unwinding at runtime. So only the offline DWARF information is
+ * useful to anyone. Note we should not use this directive if this
+ * file is used in the vDSO assembly, or if vmlinux.lds.S gets
+ * changed so it doesn't discard .eh_frame.
+ */
+ .cfi_sections .debug_frame
+#endif
+
#else
/*
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 0e22296790d3..ec8a52d14ab1 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -45,7 +45,12 @@
#define E820_NVS 4
#define E820_UNUSABLE 5
-/* reserved RAM used by kernel itself */
+/*
+ * reserved RAM used by kernel itself
+ * if CONFIG_INTEL_TXT is enabled, memory of this type will be
+ * included in the S3 integrity calculation and so should not include
+ * any memory that BIOS might alter over the S3 transition
+ */
#define E820_RESERVED_KERN 128
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 0f8576427cfe..aeab29aee617 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
-#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
+#define inc_irq_stat(member) percpu_inc(irq_stat.member)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 1d5c08a1bdfd..004e6e25e913 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -68,6 +68,7 @@ extern unsigned long force_hpet_address;
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
+extern u8 hpet_readback_cmp;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 2a1bd8f4f23a..942255310e6a 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -41,12 +41,16 @@ struct arch_hw_breakpoint {
/* Total number of available HW breakpoint registers */
#define HBP_NUM 4
+static inline int hw_breakpoint_slots(int type)
+{
+ return HBP_NUM;
+}
+
struct perf_event;
struct pmu;
-extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len);
-extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk);
+extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
+extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
diff --git a/arch/x86/include/asm/hyperv.h b/arch/x86/include/asm/hyperv.h
index e153a2b3889a..5df477ac3af7 100644
--- a/arch/x86/include/asm/hyperv.h
+++ b/arch/x86/include/asm/hyperv.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X86_KVM_HYPERV_H
-#define _ASM_X86_KVM_HYPERV_H
+#ifndef _ASM_X86_HYPERV_H
+#define _ASM_X86_HYPERV_H
#include <linux/types.h>
@@ -14,6 +14,10 @@
#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
+#define HYPERV_CPUID_MIN 0x40000005
+#define HYPERV_CPUID_MAX 0x4000ffff
+
/*
* Feature identification. EAX indicates which features are available
* to the partition based upon the current partition privileges.
@@ -129,6 +133,9 @@
/* MSR used to provide vcpu index */
#define HV_X64_MSR_VP_INDEX 0x40000002
+/* MSR used to read the per-partition time reference counter */
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+
/* Define the virtual APIC registers */
#define HV_X64_MSR_EOI 0x40000070
#define HV_X64_MSR_ICR 0x40000071
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b78c0941e422..70abda7058c8 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -17,10 +17,33 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
-#ifndef ASM_X86__HYPERVISOR_H
-#define ASM_X86__HYPERVISOR_H
+#ifndef _ASM_X86_HYPERVISOR_H
+#define _ASM_X86_HYPERVISOR_H
extern void init_hypervisor(struct cpuinfo_x86 *c);
extern void init_hypervisor_platform(void);
+/*
+ * x86 hypervisor information
+ */
+struct hypervisor_x86 {
+ /* Hypervisor name */
+ const char *name;
+
+ /* Detection routine */
+ bool (*detect)(void);
+
+ /* Adjust CPU feature bits (run once per CPU) */
+ void (*set_cpu_features)(struct cpuinfo_x86 *);
+
+ /* Platform setup (run once per boot) */
+ void (*init_platform)(void);
+};
+
+extern const struct hypervisor_x86 *x86_hyper;
+
+/* Recognized hypervisors */
+extern const struct hypervisor_x86 x86_hyper_vmware;
+extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
+
#endif
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index da2930924501..c991b3a7b904 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -16,7 +16,9 @@
#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/hardirq.h>
+#include <linux/slab.h>
#include <asm/asm.h>
+#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
@@ -56,6 +58,11 @@ extern int restore_i387_xstate_ia32(void __user *buf);
#define X87_FSW_ES (1 << 7) /* Exception Summary */
+static __always_inline __pure bool use_xsave(void)
+{
+ return static_cpu_has(X86_FEATURE_XSAVE);
+}
+
#ifdef CONFIG_X86_64
/* Ignore delayed exceptions from user space */
@@ -91,15 +98,15 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
values. The kernel data segment can be sometimes 0 and sometimes
new user value. Both should be ok.
Use the PDA as safe address because it should be already in L1. */
-static inline void clear_fpu_state(struct task_struct *tsk)
+static inline void fpu_clear(struct fpu *fpu)
{
- struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
/*
* xsave header may indicate the init state of the FP.
*/
- if ((task_thread_info(tsk)->status & TS_XSAVE) &&
+ if (use_xsave() &&
!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
return;
@@ -111,6 +118,11 @@ static inline void clear_fpu_state(struct task_struct *tsk)
X86_FEATURE_FXSAVE_LEAK);
}
+static inline void clear_fpu_state(struct task_struct *tsk)
+{
+ fpu_clear(&tsk->thread.fpu);
+}
+
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
int err;
@@ -135,7 +147,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
return err;
}
-static inline void fxsave(struct task_struct *tsk)
+static inline void fpu_fxsave(struct fpu *fpu)
{
/* Using "rex64; fxsave %0" is broken because, if the memory operand
uses any extended registers for addressing, a second REX prefix
@@ -145,42 +157,45 @@ static inline void fxsave(struct task_struct *tsk)
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
__asm__ __volatile__("rex64/fxsave %0"
- : "=m" (tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__("rex64/fxsave (%1)"
- : "=m" (tsk->thread.xstate->fxsave)
- : "cdaSDb" (&tsk->thread.xstate->fxsave));
+ : "=m" (fpu->state->fxsave)
+ : "cdaSDb" (&fpu->state->fxsave));
#endif
}
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline void fpu_save_init(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE)
- xsave(tsk);
+ if (use_xsave())
+ fpu_xsave(fpu);
else
- fxsave(tsk);
+ fpu_fxsave(fpu);
+
+ fpu_clear(fpu);
+}
- clear_fpu_state(tsk);
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
#else /* CONFIG_X86_32 */
#ifdef CONFIG_MATH_EMULATION
-extern void finit_task(struct task_struct *tsk);
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
#else
-static inline void finit_task(struct task_struct *tsk)
-{
-}
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
#endif
static inline void tolerant_fwait(void)
@@ -216,13 +231,13 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
/*
* These must be called with preempt disabled
*/
-static inline void __save_init_fpu(struct task_struct *tsk)
+static inline void fpu_save_init(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE) {
- struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ if (use_xsave()) {
+ struct xsave_struct *xstate = &fpu->state->xsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
- xsave(tsk);
+ fpu_xsave(fpu);
/*
* xsave header may indicate the init state of the FP.
@@ -246,8 +261,8 @@ static inline void __save_init_fpu(struct task_struct *tsk)
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
- [fx] "m" (tsk->thread.xstate->fxsave),
- [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
+ [fx] "m" (fpu->state->fxsave),
+ [fsw] "m" (fpu->state->fxsave.swd) : "memory");
clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
@@ -259,17 +274,34 @@ clear_state:
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
end:
+ ;
+}
+
+static inline void __save_init_fpu(struct task_struct *tsk)
+{
+ fpu_save_init(&tsk->thread.fpu);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
+
#endif /* CONFIG_X86_64 */
-static inline int restore_fpu_checking(struct task_struct *tsk)
+static inline int fpu_fxrstor_checking(struct fpu *fpu)
{
- if (task_thread_info(tsk)->status & TS_XSAVE)
- return xrstor_checking(&tsk->thread.xstate->xsave);
+ return fxrstor_checking(&fpu->state->fxsave);
+}
+
+static inline int fpu_restore_checking(struct fpu *fpu)
+{
+ if (use_xsave())
+ return fpu_xrstor_checking(fpu);
else
- return fxrstor_checking(&tsk->thread.xstate->fxsave);
+ return fpu_fxrstor_checking(fpu);
+}
+
+static inline int restore_fpu_checking(struct task_struct *tsk)
+{
+ return fpu_restore_checking(&tsk->thread.fpu);
}
/*
@@ -397,30 +429,59 @@ static inline void clear_fpu(struct task_struct *tsk)
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.xstate->fxsave.cwd;
+ return tsk->thread.fpu.state->fxsave.cwd;
} else {
- return (unsigned short)tsk->thread.xstate->fsave.cwd;
+ return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
- return tsk->thread.xstate->fxsave.swd;
+ return tsk->thread.fpu.state->fxsave.swd;
} else {
- return (unsigned short)tsk->thread.xstate->fsave.swd;
+ return (unsigned short)tsk->thread.fpu.state->fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
- return tsk->thread.xstate->fxsave.mxcsr;
+ return tsk->thread.fpu.state->fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}
+static bool fpu_allocated(struct fpu *fpu)
+{
+ return fpu->state != NULL;
+}
+
+static inline int fpu_alloc(struct fpu *fpu)
+{
+ if (fpu_allocated(fpu))
+ return 0;
+ fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
+ if (!fpu->state)
+ return -ENOMEM;
+ WARN_ON((unsigned long)fpu->state & 15);
+ return 0;
+}
+
+static inline void fpu_free(struct fpu *fpu)
+{
+ if (fpu->state) {
+ kmem_cache_free(task_xstate_cachep, fpu->state);
+ fpu->state = NULL;
+ }
+}
+
+static inline void fpu_copy(struct fpu *dst, struct fpu *src)
+{
+ memcpy(dst->state, src->state, xstate_size);
+}
+
#endif /* __ASSEMBLY__ */
#define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h
index 1edbf89680fd..fc1f579fb965 100644
--- a/arch/x86/include/asm/i8253.h
+++ b/arch/x86/include/asm/i8253.h
@@ -6,7 +6,7 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-extern spinlock_t i8253_lock;
+extern raw_spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 96c2e0ad04ca..88c765e16410 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -68,6 +68,8 @@ struct insn {
const insn_byte_t *next_byte;
};
+#define MAX_INSN_SIZE 16
+
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 14cf526091f9..280bf7fb6aba 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -7,7 +7,66 @@
#ifdef __ASSEMBLY__
+#define REG_NUM_INVALID 100
+
+#define REG_TYPE_R64 0
+#define REG_TYPE_XMM 1
+#define REG_TYPE_INVALID 100
+
+ .macro R64_NUM opd r64
+ \opd = REG_NUM_INVALID
+ .ifc \r64,%rax
+ \opd = 0
+ .endif
+ .ifc \r64,%rcx
+ \opd = 1
+ .endif
+ .ifc \r64,%rdx
+ \opd = 2
+ .endif
+ .ifc \r64,%rbx
+ \opd = 3
+ .endif
+ .ifc \r64,%rsp
+ \opd = 4
+ .endif
+ .ifc \r64,%rbp
+ \opd = 5
+ .endif
+ .ifc \r64,%rsi
+ \opd = 6
+ .endif
+ .ifc \r64,%rdi
+ \opd = 7
+ .endif
+ .ifc \r64,%r8
+ \opd = 8
+ .endif
+ .ifc \r64,%r9
+ \opd = 9
+ .endif
+ .ifc \r64,%r10
+ \opd = 10
+ .endif
+ .ifc \r64,%r11
+ \opd = 11
+ .endif
+ .ifc \r64,%r12
+ \opd = 12
+ .endif
+ .ifc \r64,%r13
+ \opd = 13
+ .endif
+ .ifc \r64,%r14
+ \opd = 14
+ .endif
+ .ifc \r64,%r15
+ \opd = 15
+ .endif
+ .endm
+
.macro XMM_NUM opd xmm
+ \opd = REG_NUM_INVALID
.ifc \xmm,%xmm0
\opd = 0
.endif
@@ -58,13 +117,25 @@
.endif
.endm
+ .macro REG_TYPE type reg
+ R64_NUM reg_type_r64 \reg
+ XMM_NUM reg_type_xmm \reg
+ .if reg_type_r64 <> REG_NUM_INVALID
+ \type = REG_TYPE_R64
+ .elseif reg_type_xmm <> REG_NUM_INVALID
+ \type = REG_TYPE_XMM
+ .else
+ \type = REG_TYPE_INVALID
+ .endif
+ .endm
+
.macro PFX_OPD_SIZE
.byte 0x66
.endm
- .macro PFX_REX opd1 opd2
- .if (\opd1 | \opd2) & 8
- .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1)
+ .macro PFX_REX opd1 opd2 W=0
+ .if ((\opd1 | \opd2) & 8) || \W
+ .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
.endif
.endm
@@ -145,6 +216,25 @@
.byte 0x0f, 0x38, 0xdf
MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
.endm
+
+ .macro MOVQ_R64_XMM opd1 opd2
+ REG_TYPE movq_r64_xmm_opd1_type \opd1
+ .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+ XMM_NUM movq_r64_xmm_opd1 \opd1
+ R64_NUM movq_r64_xmm_opd2 \opd2
+ .else
+ R64_NUM movq_r64_xmm_opd1 \opd1
+ XMM_NUM movq_r64_xmm_opd2 \opd2
+ .endif
+ PFX_OPD_SIZE
+ PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
+ .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+ .byte 0x0f, 0x7e
+ .else
+ .byte 0x0f, 0x6e
+ .endif
+ MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
+ .endm
#endif
#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
new file mode 100644
index 000000000000..4470c9ad4a3e
--- /dev/null
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_X86_INTEL_SCU_IPC_H_
+#define _ASM_X86_INTEL_SCU_IPC_H_
+
+/* Read single register */
+int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+
+/* Read two sequential registers */
+int intel_scu_ipc_ioread16(u16 addr, u16 *data);
+
+/* Read four sequential registers */
+int intel_scu_ipc_ioread32(u16 addr, u32 *data);
+
+/* Read a vector */
+int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
+
+/* Write single register */
+int intel_scu_ipc_iowrite8(u16 addr, u8 data);
+
+/* Write two sequential registers */
+int intel_scu_ipc_iowrite16(u16 addr, u16 data);
+
+/* Write four sequential registers */
+int intel_scu_ipc_iowrite32(u16 addr, u32 data);
+
+/* Write a vector */
+int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+
+/* Update single register based on the mask */
+int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+
+/*
+ * Indirect register read
+ * Can be used when SCCB(System Controller Configuration Block) register
+ * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+ */
+int intel_scu_ipc_register_read(u32 addr, u32 *data);
+
+/*
+ * Indirect register write
+ * Can be used when SCCB(System Controller Configuration Block) register
+ * HRIM(Honor Restricted IPC Messages) is set (bit 23)
+ */
+int intel_scu_ipc_register_write(u32 addr, u32 data);
+
+/* Issue commands to the SCU with or without data */
+int intel_scu_ipc_simple_command(int cmd, int sub);
+int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
+ u32 *out, int outlen);
+/* I2C control api */
+int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
+
+/* Update FW version */
+int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
+
+#endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 35832a03a515..63cb4096c3dc 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -159,7 +159,6 @@ struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
void setup_IO_APIC_irq_extra(u32 gsi);
-extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
@@ -180,12 +179,13 @@ extern void ioapic_write_entry(int apic, int pin,
extern void setup_ioapic_ids_from_mpc(void);
struct mp_ioapic_gsi{
- int gsi_base;
- int gsi_end;
+ u32 gsi_base;
+ u32 gsi_end;
};
extern struct mp_ioapic_gsi mp_gsi_routing[];
-int mp_find_ioapic(int gsi);
-int mp_find_ioapic_pin(int ioapic, int gsi);
+extern u32 gsi_end;
+int mp_find_ioapic(u32 gsi);
+int mp_find_ioapic_pin(int ioapic, u32 gsi);
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
extern void __init pre_init_apic_IRQ0(void);
@@ -197,7 +197,8 @@ static const int timer_through_8259 = 0;
static inline void ioapic_init_mappings(void) { }
static inline void ioapic_insert_resources(void) { }
static inline void probe_nr_irqs_gsi(void) { }
-static inline int mp_find_ioapic(int gsi) { return 0; }
+#define gsi_end (NR_IRQS_LEGACY - 1)
+static inline int mp_find_ioapic(u32 gsi) { return 0; }
struct io_apic_irq_attr;
static inline int io_apic_set_pci_routing(struct device *dev, int irq,
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h
index f70e60071fe8..af00bd1d2089 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/k8.h
@@ -16,11 +16,16 @@ extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int k8_scan_nodes(void);
#ifdef CONFIG_K8_NB
+extern int num_k8_northbridges;
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
}
+
#else
+#define num_k8_northbridges 0
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return NULL;
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index e6c6c808489f..006da3687cdc 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -76,4 +76,7 @@ static inline void arch_kgdb_breakpoint(void)
#define BREAK_INSTR_SIZE 1
#define CACHE_FLUSH_IS_SAFE 1
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
+
#endif /* _ASM_X86_KGDB_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4ffa345a8ccb..547882539157 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
@@ -36,7 +37,6 @@ typedef u8 kprobe_opcode_t;
#define RELATIVEJUMP_SIZE 5
#define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4
-#define MAX_INSN_SIZE 16
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) \
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index f46b79f6c16c..ff90055c7f0b 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -21,6 +21,7 @@
#define __KVM_HAVE_PIT_STATE2
#define __KVM_HAVE_XEN_HVM
#define __KVM_HAVE_VCPU_EVENTS
+#define __KVM_HAVE_DEBUGREGS
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
@@ -257,6 +258,11 @@ struct kvm_reinject_control {
/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
+#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
+
+/* Interrupt shadow states */
+#define KVM_X86_SHADOW_INT_MOV_SS 0x01
+#define KVM_X86_SHADOW_INT_STI 0x02
/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
@@ -271,7 +277,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 soft;
- __u8 pad;
+ __u8 shadow;
} interrupt;
struct {
__u8 injected;
@@ -284,4 +290,13 @@ struct kvm_vcpu_events {
__u32 reserved[10];
};
+/* for KVM_GET/SET_DEBUGREGS */
+struct kvm_debugregs {
+ __u64 db[4];
+ __u64 dr6;
+ __u64 dr7;
+ __u64 flags;
+ __u64 reserved[9];
+};
+
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7a6f54fa13ba..0b2729bf2070 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -11,6 +11,8 @@
#ifndef _ASM_X86_KVM_X86_EMULATE_H
#define _ASM_X86_KVM_X86_EMULATE_H
+#include <asm/desc_defs.h>
+
struct x86_emulate_ctxt;
/*
@@ -63,6 +65,15 @@ struct x86_emulate_ops {
unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
/*
+ * write_std: Write bytes of standard (non-emulated/special) memory.
+ * Used for descriptor writing.
+ * @addr: [IN ] Linear address to which to write.
+ * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to write to memory.
+ */
+ int (*write_std)(unsigned long addr, void *val,
+ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+ /*
* fetch: Read bytes of standard (non-emulated/special) memory.
* Used for instruction fetch.
* @addr: [IN ] Linear address from which to read.
@@ -109,6 +120,23 @@ struct x86_emulate_ops {
unsigned int bytes,
struct kvm_vcpu *vcpu);
+ int (*pio_in_emulated)(int size, unsigned short port, void *val,
+ unsigned int count, struct kvm_vcpu *vcpu);
+
+ int (*pio_out_emulated)(int size, unsigned short port, const void *val,
+ unsigned int count, struct kvm_vcpu *vcpu);
+
+ bool (*get_cached_descriptor)(struct desc_struct *desc,
+ int seg, struct kvm_vcpu *vcpu);
+ void (*set_cached_descriptor)(struct desc_struct *desc,
+ int seg, struct kvm_vcpu *vcpu);
+ u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
+ void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
+ void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
+ ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
+ void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
+ int (*cpl)(struct kvm_vcpu *vcpu);
+ void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
};
/* Type, address-of, and value of an instruction's operand. */
@@ -124,6 +152,12 @@ struct fetch_cache {
unsigned long end;
};
+struct read_cache {
+ u8 data[1024];
+ unsigned long pos;
+ unsigned long end;
+};
+
struct decode_cache {
u8 twobyte;
u8 b;
@@ -139,7 +173,7 @@ struct decode_cache {
u8 seg_override;
unsigned int d;
unsigned long regs[NR_VCPU_REGS];
- unsigned long eip, eip_orig;
+ unsigned long eip;
/* modrm */
u8 modrm;
u8 modrm_mod;
@@ -151,16 +185,15 @@ struct decode_cache {
void *modrm_ptr;
unsigned long modrm_val;
struct fetch_cache fetch;
+ struct read_cache io_read;
};
-#define X86_SHADOW_INT_MOV_SS 1
-#define X86_SHADOW_INT_STI 2
-
struct x86_emulate_ctxt {
/* Register state before/after emulation. */
struct kvm_vcpu *vcpu;
unsigned long eflags;
+ unsigned long eip; /* eip before instruction emulation */
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
int mode;
u32 cs_base;
@@ -168,6 +201,7 @@ struct x86_emulate_ctxt {
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
+ bool restart; /* restart string instruction after writeback */
/* decode cache */
struct decode_cache decode;
};
@@ -194,5 +228,9 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops);
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops);
+int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code);
#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 06d9e79ca37d..76f5483cffec 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -171,15 +171,15 @@ struct kvm_pte_chain {
union kvm_mmu_page_role {
unsigned word;
struct {
- unsigned glevels:4;
unsigned level:4;
+ unsigned cr4_pae:1;
unsigned quadrant:2;
unsigned pad_for_nice_hex_output:6;
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
- unsigned cr4_pge:1;
unsigned nxe:1;
+ unsigned cr0_wp:1;
};
};
@@ -187,8 +187,6 @@ struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
- struct list_head oos_link;
-
/*
* The following two entries are used to key the shadow page in the
* hash table.
@@ -204,9 +202,9 @@ struct kvm_mmu_page {
* in this shadow page.
*/
DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
- int multimapped; /* More than one parent_pte? */
- int root_count; /* Currently serving as active root */
+ bool multimapped; /* More than one parent_pte? */
bool unsync;
+ int root_count; /* Currently serving as active root */
unsigned int unsync_children;
union {
u64 *parent_pte; /* !multimapped */
@@ -224,14 +222,9 @@ struct kvm_pv_mmu_op_buffer {
struct kvm_pio_request {
unsigned long count;
- int cur_count;
- gva_t guest_gva;
int in;
int port;
int size;
- int string;
- int down;
- int rep;
};
/*
@@ -320,6 +313,7 @@ struct kvm_vcpu_arch {
struct kvm_queued_exception {
bool pending;
bool has_error_code;
+ bool reinject;
u8 nr;
u32 error_code;
} exception;
@@ -362,8 +356,8 @@ struct kvm_vcpu_arch {
u64 *mce_banks;
/* used for guest single stepping over the given code position */
- u16 singlestep_cs;
unsigned long singlestep_rip;
+
/* fields used by HYPER-V emulation */
u64 hv_vapic;
};
@@ -389,6 +383,7 @@ struct kvm_arch {
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_alloc_mmu_pages;
+ atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
@@ -461,11 +456,6 @@ struct kvm_vcpu_stat {
u32 nmi_injections;
};
-struct descriptor_table {
- u16 limit;
- unsigned long base;
-} __attribute__((packed));
-
struct kvm_x86_ops {
int (*cpu_has_kvm_support)(void); /* __init */
int (*disabled_by_bios)(void); /* __init */
@@ -503,12 +493,11 @@ struct kvm_x86_ops {
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
- void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
- int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
- int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
+ void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -527,7 +516,8 @@ struct kvm_x86_ops {
void (*set_irq)(struct kvm_vcpu *vcpu);
void (*set_nmi)(struct kvm_vcpu *vcpu);
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code);
+ bool has_error_code, u32 error_code,
+ bool reinject);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*nmi_allowed)(struct kvm_vcpu *vcpu);
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
@@ -541,6 +531,8 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
+ void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
+
const struct trace_print_flags *exit_reasons_str;
};
@@ -587,23 +579,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
-void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
- unsigned long *rflags);
-unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
-void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
- unsigned long *rflags);
void kvm_enable_efer_bits(u64);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
struct x86_emulate_ctxt;
-int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in,
- int size, unsigned port);
-int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
- int size, unsigned long count, int down,
- gva_t address, int rep, unsigned port);
+int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
@@ -616,12 +599,15 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code);
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -634,6 +620,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
+void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
@@ -649,8 +637,6 @@ int emulator_write_emulated(unsigned long addr,
unsigned int bytes,
struct kvm_vcpu *vcpu);
-unsigned long segment_base(u16 selector);
-
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
@@ -675,7 +661,6 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
-int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
bool kvm_check_iopl(struct kvm_vcpu *vcpu);
@@ -724,23 +709,6 @@ static inline void kvm_load_ldt(u16 sel)
asm("lldt %0" : : "rm"(sel));
}
-static inline void kvm_get_idt(struct descriptor_table *table)
-{
- asm("sidt %0" : "=m"(*table));
-}
-
-static inline void kvm_get_gdt(struct descriptor_table *table)
-{
- asm("sgdt %0" : "=m"(*table));
-}
-
-static inline unsigned long kvm_read_tr_base(void)
-{
- u16 tr;
- asm("str %0" : "=g"(tr));
- return segment_base(tr);
-}
-
#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
@@ -826,4 +794,6 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_define_shared_msr(unsigned index, u32 msr);
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index ffae1420e7d7..05eba5e9a8e8 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -16,10 +16,23 @@
#define KVM_FEATURE_CLOCKSOURCE 0
#define KVM_FEATURE_NOP_IO_DELAY 1
#define KVM_FEATURE_MMU_OP 2
+/* This indicates that the new set of kvmclock msrs
+ * are available. The use of 0x11 and 0x12 is deprecated
+ */
+#define KVM_FEATURE_CLOCKSOURCE2 3
+
+/* The last 8 bits are used to indicate how to interpret the flags field
+ * in pvclock structure. If no bits are set, all flags are ignored.
+ */
+#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
+/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
+#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
+#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
+
#define KVM_MAX_MMU_OP_BATCH 32
/* Operations for KVM_HC_MMU_OP */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6c3fdd631ed3..f32a4301c4d4 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -225,5 +225,13 @@ extern void mcheck_intel_therm_init(void);
static inline void mcheck_intel_therm_init(void) { }
#endif
+/*
+ * Used by APEI to report memory error via /dev/mcelog
+ */
+
+struct cper_sec_mem_err;
+extern void apei_mce_report_mem_error(int corrected,
+ struct cper_sec_mem_err *mem_err);
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index d8bf23a88d05..c82868e9f905 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -105,16 +105,6 @@ extern void mp_config_acpi_legacy_irqs(void);
struct device;
extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
int active_high_low);
-extern int acpi_probe_gsi(void);
-#ifdef CONFIG_X86_IO_APIC
-extern int mp_find_ioapic(int gsi);
-extern int mp_find_ioapic_pin(int ioapic, int gsi);
-#endif
-#else /* !CONFIG_ACPI: */
-static inline int acpi_probe_gsi(void)
-{
- return 0;
-}
#endif /* CONFIG_ACPI */
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
new file mode 100644
index 000000000000..79ce5685ab64
--- /dev/null
+++ b/arch/x86/include/asm/mshyperv.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_MSHYPER_H
+#define _ASM_X86_MSHYPER_H
+
+#include <linux/types.h>
+#include <asm/hyperv.h>
+
+struct ms_hyperv_info {
+ u32 features;
+ u32 hints;
+};
+
+extern struct ms_hyperv_info ms_hyperv;
+
+#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4604e6a54d36..b49d8ca228f6 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -71,11 +71,14 @@
#define MSR_IA32_LASTINTTOIP 0x000001de
/* DEBUGCTLMSR bits (others vary by model): */
-#define _DEBUGCTLMSR_LBR 0 /* last branch recording */
-#define _DEBUGCTLMSR_BTF 1 /* single-step on branches */
-
-#define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR)
-#define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF)
+#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
+#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+#define DEBUGCTLMSR_TR (1UL << 6)
+#define DEBUGCTLMSR_BTS (1UL << 7)
+#define DEBUGCTLMSR_BTINT (1UL << 8)
+#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
+#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
+#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
@@ -199,8 +202,9 @@
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
-#define FEATURE_CONTROL_LOCKED (1<<0)
-#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
@@ -232,6 +236,8 @@
#define MSR_IA32_MISC_ENABLE 0x000001a0
+#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
+
/* MISC_ENABLE bits: architectural */
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
@@ -359,6 +365,8 @@
#define MSR_P4_U2L_ESCR0 0x000003b0
#define MSR_P4_U2L_ESCR1 0x000003b1
+#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
+
/* Intel Core-based CPU performance counters */
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 1a0422348d6d..8d8797eae5d7 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -83,7 +83,7 @@ struct irq_routing_table {
extern unsigned int pcibios_irq_mask;
-extern spinlock_t pci_config_lock;
+extern raw_spinlock_t pci_config_lock;
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 66a272dfd8b8..0797e748d280 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -105,7 +105,7 @@ do { \
/*
* Generate a percpu add to memory instruction and optimize code
- * if a one is added or subtracted.
+ * if one is added or subtracted.
*/
#define percpu_add_op(var, val) \
do { \
@@ -190,6 +190,29 @@ do { \
pfo_ret__; \
})
+#define percpu_unary_op(op, var) \
+({ \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+})
+
/*
* percpu_read() makes gcc load the percpu variable every time it is
* accessed while percpu_read_stable() allows the value to be cached.
@@ -207,6 +230,7 @@ do { \
#define percpu_and(var, val) percpu_to_op("and", var, val)
#define percpu_or(var, val) percpu_to_op("or", var, val)
#define percpu_xor(var, val) percpu_to_op("xor", var, val)
+#define percpu_inc(var) percpu_unary_op("inc", var)
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index db6109a885a7..254883d0c7e0 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,7 +5,7 @@
* Performance event hw details:
*/
-#define X86_PMC_MAX_GENERIC 8
+#define X86_PMC_MAX_GENERIC 32
#define X86_PMC_MAX_FIXED 3
#define X86_PMC_IDX_GENERIC 0
@@ -18,39 +18,31 @@
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
-#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
-#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
-#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
-#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
-#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
-
-/*
- * Includes eventsel and unit mask as well:
- */
-
-
-#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
-#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
-#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
-#define INTEL_ARCH_INV_MASK 0x00800000ULL
-#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
-#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
-
-/*
- * filter mask to validate fixed counter events.
- * the following filters disqualify for fixed counters:
- * - inv
- * - edge
- * - cnt-mask
- * The other filters are supported by fixed counters.
- * The any-thread option is supported starting with v3.
- */
-#define INTEL_ARCH_FIXED_MASK \
- (INTEL_ARCH_CNT_MASK| \
- INTEL_ARCH_INV_MASK| \
- INTEL_ARCH_EDGE_MASK|\
- INTEL_ARCH_UNIT_MASK|\
- INTEL_ARCH_EVENT_MASK)
+#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
+#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
+#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
+#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
+#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
+#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
+#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
+#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
+#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
+#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
+
+#define AMD64_EVENTSEL_EVENT \
+ (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
+#define INTEL_ARCH_EVENT_MASK \
+ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
+
+#define X86_RAW_EVENT_MASK \
+ (ARCH_PERFMON_EVENTSEL_EVENT | \
+ ARCH_PERFMON_EVENTSEL_UMASK | \
+ ARCH_PERFMON_EVENTSEL_EDGE | \
+ ARCH_PERFMON_EVENTSEL_INV | \
+ ARCH_PERFMON_EVENTSEL_CMASK)
+#define AMD64_RAW_EVENT_MASK \
+ (X86_RAW_EVENT_MASK | \
+ AMD64_EVENTSEL_EVENT)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -67,7 +59,7 @@
union cpuid10_eax {
struct {
unsigned int version_id:8;
- unsigned int num_events:8;
+ unsigned int num_counters:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
@@ -76,7 +68,7 @@ union cpuid10_eax {
union cpuid10_edx {
struct {
- unsigned int num_events_fixed:4;
+ unsigned int num_counters_fixed:4;
unsigned int reserved:28;
} split;
unsigned int full;
@@ -136,6 +128,18 @@ extern void perf_events_lapic_init(void);
#define PERF_EVENT_INDEX_OFFSET 0
+/*
+ * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
+ * This flag is otherwise unused and ABI specified to be 0, so nobody should
+ * care what we do with it.
+ */
+#define PERF_EFLAGS_EXACT (1UL << 3)
+
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+
#else
static inline void init_hw_perf_events(void) { }
static inline void perf_events_lapic_init(void) { }
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
new file mode 100644
index 000000000000..64a8ebff06fc
--- /dev/null
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -0,0 +1,795 @@
+/*
+ * Netburst Perfomance Events (P4, old Xeon)
+ */
+
+#ifndef PERF_EVENT_P4_H
+#define PERF_EVENT_P4_H
+
+#include <linux/cpu.h>
+#include <linux/bitops.h>
+
+/*
+ * NetBurst has perfomance MSRs shared between
+ * threads if HT is turned on, ie for both logical
+ * processors (mem: in turn in Atom with HT support
+ * perf-MSRs are not shared and every thread has its
+ * own perf-MSRs set)
+ */
+#define ARCH_P4_TOTAL_ESCR (46)
+#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */
+#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
+#define ARCH_P4_MAX_CCCR (18)
+#define ARCH_P4_MAX_COUNTER (ARCH_P4_MAX_CCCR / 2)
+
+#define P4_ESCR_EVENT_MASK 0x7e000000U
+#define P4_ESCR_EVENT_SHIFT 25
+#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
+#define P4_ESCR_EVENTMASK_SHIFT 9
+#define P4_ESCR_TAG_MASK 0x000001e0U
+#define P4_ESCR_TAG_SHIFT 5
+#define P4_ESCR_TAG_ENABLE 0x00000010U
+#define P4_ESCR_T0_OS 0x00000008U
+#define P4_ESCR_T0_USR 0x00000004U
+#define P4_ESCR_T1_OS 0x00000002U
+#define P4_ESCR_T1_USR 0x00000001U
+
+#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
+#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
+#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
+
+/* Non HT mask */
+#define P4_ESCR_MASK \
+ (P4_ESCR_EVENT_MASK | \
+ P4_ESCR_EVENTMASK_MASK | \
+ P4_ESCR_TAG_MASK | \
+ P4_ESCR_TAG_ENABLE | \
+ P4_ESCR_T0_OS | \
+ P4_ESCR_T0_USR)
+
+/* HT mask */
+#define P4_ESCR_MASK_HT \
+ (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
+
+#define P4_CCCR_OVF 0x80000000U
+#define P4_CCCR_CASCADE 0x40000000U
+#define P4_CCCR_OVF_PMI_T0 0x04000000U
+#define P4_CCCR_OVF_PMI_T1 0x08000000U
+#define P4_CCCR_FORCE_OVF 0x02000000U
+#define P4_CCCR_EDGE 0x01000000U
+#define P4_CCCR_THRESHOLD_MASK 0x00f00000U
+#define P4_CCCR_THRESHOLD_SHIFT 20
+#define P4_CCCR_COMPLEMENT 0x00080000U
+#define P4_CCCR_COMPARE 0x00040000U
+#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U
+#define P4_CCCR_ESCR_SELECT_SHIFT 13
+#define P4_CCCR_ENABLE 0x00001000U
+#define P4_CCCR_THREAD_SINGLE 0x00010000U
+#define P4_CCCR_THREAD_BOTH 0x00020000U
+#define P4_CCCR_THREAD_ANY 0x00030000U
+#define P4_CCCR_RESERVED 0x00000fffU
+
+#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
+#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
+
+/* Custom bits in reerved CCCR area */
+#define P4_CCCR_CACHE_OPS_MASK 0x0000003fU
+
+
+/* Non HT mask */
+#define P4_CCCR_MASK \
+ (P4_CCCR_OVF | \
+ P4_CCCR_CASCADE | \
+ P4_CCCR_OVF_PMI_T0 | \
+ P4_CCCR_FORCE_OVF | \
+ P4_CCCR_EDGE | \
+ P4_CCCR_THRESHOLD_MASK | \
+ P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE | \
+ P4_CCCR_ESCR_SELECT_MASK | \
+ P4_CCCR_ENABLE)
+
+/* HT mask */
+#define P4_CCCR_MASK_HT \
+ (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
+
+#define P4_GEN_ESCR_EMASK(class, name, bit) \
+ class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
+#define P4_ESCR_EMASK_BIT(class, name) class##__##name
+
+/*
+ * config field is 64bit width and consists of
+ * HT << 63 | ESCR << 32 | CCCR
+ * where HT is HyperThreading bit (since ESCR
+ * has it reserved we may use it for own purpose)
+ *
+ * note that this is NOT the addresses of respective
+ * ESCR and CCCR but rather an only packed value should
+ * be unpacked and written to a proper addresses
+ *
+ * the base idea is to pack as much info as
+ * possible
+ */
+#define p4_config_pack_escr(v) (((u64)(v)) << 32)
+#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL)
+#define p4_config_unpack_escr(v) (((u64)(v)) >> 32)
+#define p4_config_unpack_cccr(v) (((u64)(v)) & 0xffffffffULL)
+
+#define p4_config_unpack_emask(v) \
+ ({ \
+ u32 t = p4_config_unpack_escr((v)); \
+ t = t & P4_ESCR_EVENTMASK_MASK; \
+ t = t >> P4_ESCR_EVENTMASK_SHIFT; \
+ t; \
+ })
+
+#define p4_config_unpack_event(v) \
+ ({ \
+ u32 t = p4_config_unpack_escr((v)); \
+ t = t & P4_ESCR_EVENT_MASK; \
+ t = t >> P4_ESCR_EVENT_SHIFT; \
+ t; \
+ })
+
+#define p4_config_unpack_cache_event(v) (((u64)(v)) & P4_CCCR_CACHE_OPS_MASK)
+
+#define P4_CONFIG_HT_SHIFT 63
+#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
+
+static inline bool p4_is_event_cascaded(u64 config)
+{
+ u32 cccr = p4_config_unpack_cccr(config);
+ return !!(cccr & P4_CCCR_CASCADE);
+}
+
+static inline int p4_ht_config_thread(u64 config)
+{
+ return !!(config & P4_CONFIG_HT);
+}
+
+static inline u64 p4_set_ht_bit(u64 config)
+{
+ return config | P4_CONFIG_HT;
+}
+
+static inline u64 p4_clear_ht_bit(u64 config)
+{
+ return config & ~P4_CONFIG_HT;
+}
+
+static inline int p4_ht_active(void)
+{
+#ifdef CONFIG_SMP
+ return smp_num_siblings > 1;
+#endif
+ return 0;
+}
+
+static inline int p4_ht_thread(int cpu)
+{
+#ifdef CONFIG_SMP
+ if (smp_num_siblings == 2)
+ return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
+#endif
+ return 0;
+}
+
+static inline int p4_should_swap_ts(u64 config, int cpu)
+{
+ return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
+}
+
+static inline u32 p4_default_cccr_conf(int cpu)
+{
+ /*
+ * Note that P4_CCCR_THREAD_ANY is "required" on
+ * non-HT machines (on HT machines we count TS events
+ * regardless the state of second logical processor
+ */
+ u32 cccr = P4_CCCR_THREAD_ANY;
+
+ if (!p4_ht_thread(cpu))
+ cccr |= P4_CCCR_OVF_PMI_T0;
+ else
+ cccr |= P4_CCCR_OVF_PMI_T1;
+
+ return cccr;
+}
+
+static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
+{
+ u32 escr = 0;
+
+ if (!p4_ht_thread(cpu)) {
+ if (!exclude_os)
+ escr |= P4_ESCR_T0_OS;
+ if (!exclude_usr)
+ escr |= P4_ESCR_T0_USR;
+ } else {
+ if (!exclude_os)
+ escr |= P4_ESCR_T1_OS;
+ if (!exclude_usr)
+ escr |= P4_ESCR_T1_USR;
+ }
+
+ return escr;
+}
+
+enum P4_EVENTS {
+ P4_EVENT_TC_DELIVER_MODE,
+ P4_EVENT_BPU_FETCH_REQUEST,
+ P4_EVENT_ITLB_REFERENCE,
+ P4_EVENT_MEMORY_CANCEL,
+ P4_EVENT_MEMORY_COMPLETE,
+ P4_EVENT_LOAD_PORT_REPLAY,
+ P4_EVENT_STORE_PORT_REPLAY,
+ P4_EVENT_MOB_LOAD_REPLAY,
+ P4_EVENT_PAGE_WALK_TYPE,
+ P4_EVENT_BSQ_CACHE_REFERENCE,
+ P4_EVENT_IOQ_ALLOCATION,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES,
+ P4_EVENT_FSB_DATA_ACTIVITY,
+ P4_EVENT_BSQ_ALLOCATION,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES,
+ P4_EVENT_SSE_INPUT_ASSIST,
+ P4_EVENT_PACKED_SP_UOP,
+ P4_EVENT_PACKED_DP_UOP,
+ P4_EVENT_SCALAR_SP_UOP,
+ P4_EVENT_SCALAR_DP_UOP,
+ P4_EVENT_64BIT_MMX_UOP,
+ P4_EVENT_128BIT_MMX_UOP,
+ P4_EVENT_X87_FP_UOP,
+ P4_EVENT_TC_MISC,
+ P4_EVENT_GLOBAL_POWER_EVENTS,
+ P4_EVENT_TC_MS_XFER,
+ P4_EVENT_UOP_QUEUE_WRITES,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE,
+ P4_EVENT_RETIRED_BRANCH_TYPE,
+ P4_EVENT_RESOURCE_STALL,
+ P4_EVENT_WC_BUFFER,
+ P4_EVENT_B2B_CYCLES,
+ P4_EVENT_BNR,
+ P4_EVENT_SNOOP,
+ P4_EVENT_RESPONSE,
+ P4_EVENT_FRONT_END_EVENT,
+ P4_EVENT_EXECUTION_EVENT,
+ P4_EVENT_REPLAY_EVENT,
+ P4_EVENT_INSTR_RETIRED,
+ P4_EVENT_UOPS_RETIRED,
+ P4_EVENT_UOP_TYPE,
+ P4_EVENT_BRANCH_RETIRED,
+ P4_EVENT_MISPRED_BRANCH_RETIRED,
+ P4_EVENT_X87_ASSIST,
+ P4_EVENT_MACHINE_CLEAR,
+ P4_EVENT_INSTR_COMPLETED,
+};
+
+#define P4_OPCODE(event) event##_OPCODE
+#define P4_OPCODE_ESEL(opcode) ((opcode & 0x00ff) >> 0)
+#define P4_OPCODE_EVNT(opcode) ((opcode & 0xff00) >> 8)
+#define P4_OPCODE_PACK(event, sel) (((event) << 8) | sel)
+
+/*
+ * Comments below the event represent ESCR restriction
+ * for this event and counter index per ESCR
+ *
+ * MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
+ * processor builds (family 0FH, models 01H-02H). These MSRs
+ * are not available on later versions, so that we don't use
+ * them completely
+ *
+ * Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
+ * working so that we should not use this CCCR and respective
+ * counter as result
+ */
+enum P4_EVENT_OPCODES {
+ P4_OPCODE(P4_EVENT_TC_DELIVER_MODE) = P4_OPCODE_PACK(0x01, 0x01),
+ /*
+ * MSR_P4_TC_ESCR0: 4, 5
+ * MSR_P4_TC_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST) = P4_OPCODE_PACK(0x03, 0x00),
+ /*
+ * MSR_P4_BPU_ESCR0: 0, 1
+ * MSR_P4_BPU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_ITLB_REFERENCE) = P4_OPCODE_PACK(0x18, 0x03),
+ /*
+ * MSR_P4_ITLB_ESCR0: 0, 1
+ * MSR_P4_ITLB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_MEMORY_CANCEL) = P4_OPCODE_PACK(0x02, 0x05),
+ /*
+ * MSR_P4_DAC_ESCR0: 8, 9
+ * MSR_P4_DAC_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_MEMORY_COMPLETE) = P4_OPCODE_PACK(0x08, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY) = P4_OPCODE_PACK(0x04, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY) = P4_OPCODE_PACK(0x05, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY) = P4_OPCODE_PACK(0x03, 0x02),
+ /*
+ * MSR_P4_MOB_ESCR0: 0, 1
+ * MSR_P4_MOB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE) = P4_OPCODE_PACK(0x01, 0x04),
+ /*
+ * MSR_P4_PMH_ESCR0: 0, 1
+ * MSR_P4_PMH_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE) = P4_OPCODE_PACK(0x0c, 0x07),
+ /*
+ * MSR_P4_BSU_ESCR0: 0, 1
+ * MSR_P4_BSU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_IOQ_ALLOCATION) = P4_OPCODE_PACK(0x03, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x1a, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY) = P4_OPCODE_PACK(0x17, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_ALLOCATION) = P4_OPCODE_PACK(0x05, 0x07),
+ /*
+ * MSR_P4_BSU_ESCR0: 0, 1
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x06, 0x07),
+ /*
+ * NOTE: no ESCR name in docs, it's guessed
+ * MSR_P4_BSU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST) = P4_OPCODE_PACK(0x34, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_PACKED_SP_UOP) = P4_OPCODE_PACK(0x08, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_PACKED_DP_UOP) = P4_OPCODE_PACK(0x0c, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_SCALAR_SP_UOP) = P4_OPCODE_PACK(0x0a, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_SCALAR_DP_UOP) = P4_OPCODE_PACK(0x0e, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_64BIT_MMX_UOP) = P4_OPCODE_PACK(0x02, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_128BIT_MMX_UOP) = P4_OPCODE_PACK(0x1a, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_X87_FP_UOP) = P4_OPCODE_PACK(0x04, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_TC_MISC) = P4_OPCODE_PACK(0x06, 0x01),
+ /*
+ * MSR_P4_TC_ESCR0: 4, 5
+ * MSR_P4_TC_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS) = P4_OPCODE_PACK(0x13, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_TC_MS_XFER) = P4_OPCODE_PACK(0x05, 0x00),
+ /*
+ * MSR_P4_MS_ESCR0: 4, 5
+ * MSR_P4_MS_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES) = P4_OPCODE_PACK(0x09, 0x00),
+ /*
+ * MSR_P4_MS_ESCR0: 4, 5
+ * MSR_P4_MS_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x05, 0x02),
+ /*
+ * MSR_P4_TBPU_ESCR0: 4, 5
+ * MSR_P4_TBPU_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x04, 0x02),
+ /*
+ * MSR_P4_TBPU_ESCR0: 4, 5
+ * MSR_P4_TBPU_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RESOURCE_STALL) = P4_OPCODE_PACK(0x01, 0x01),
+ /*
+ * MSR_P4_ALF_ESCR0: 12, 13, 16
+ * MSR_P4_ALF_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_WC_BUFFER) = P4_OPCODE_PACK(0x05, 0x05),
+ /*
+ * MSR_P4_DAC_ESCR0: 8, 9
+ * MSR_P4_DAC_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_B2B_CYCLES) = P4_OPCODE_PACK(0x16, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BNR) = P4_OPCODE_PACK(0x08, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_SNOOP) = P4_OPCODE_PACK(0x06, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_RESPONSE) = P4_OPCODE_PACK(0x04, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_FRONT_END_EVENT) = P4_OPCODE_PACK(0x08, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_EXECUTION_EVENT) = P4_OPCODE_PACK(0x0c, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_REPLAY_EVENT) = P4_OPCODE_PACK(0x09, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_INSTR_RETIRED) = P4_OPCODE_PACK(0x02, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_UOPS_RETIRED) = P4_OPCODE_PACK(0x01, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_UOP_TYPE) = P4_OPCODE_PACK(0x02, 0x02),
+ /*
+ * MSR_P4_RAT_ESCR0: 12, 13, 16
+ * MSR_P4_RAT_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_BRANCH_RETIRED) = P4_OPCODE_PACK(0x06, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED) = P4_OPCODE_PACK(0x03, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_X87_ASSIST) = P4_OPCODE_PACK(0x03, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_MACHINE_CLEAR) = P4_OPCODE_PACK(0x02, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_INSTR_COMPLETED) = P4_OPCODE_PACK(0x07, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+};
+
+/*
+ * a caller should use P4_ESCR_EMASK_NAME helper to
+ * pick the EventMask needed, for example
+ *
+ * P4_ESCR_EMASK_NAME(P4_EVENT_TC_DELIVER_MODE, DD)
+ */
+enum P4_ESCR_EMASKS {
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DB, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DI, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BD, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BB, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BI, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, ID, 6),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BPU_FETCH_REQUEST, TCMISS, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, MISS, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT_UK, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, 64K_CONF, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, LSC, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, SSC, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STA, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STD, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, DTMISS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, ITMISS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS, 10),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, DEFAULT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_READ, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_UC, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WC, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WT, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WP, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WB, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OWN, 13),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OTHER, 14),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, PREFETCH, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN, 13),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER, 14),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1, 12),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2, 13),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1, 12),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2, 13),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SSE_INPUT_ASSIST, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_SP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_DP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_SP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_DP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_64BIT_MMX_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_128BIT_MMX_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_FP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_MISC, FLUSH, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_MS_XFER, CISC, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CALL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RESOURCE_STALL, SBFULL, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_EVICTS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS2, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS3, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS0, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS1, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS2, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS3, 7),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSTAG, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSNTAG, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSTAG, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGLOADS, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGSTORES, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNP, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNM, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTP, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTM, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSU, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSO, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAO, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAU, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, PREA, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, CLEAR, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, MOCLEAR, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, SMCLEAR, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1),
+};
+
+/* P4 PEBS: stale for a while */
+#define P4_PEBS_METRIC_MASK 0x00001fffU
+#define P4_PEBS_UOB_TAG 0x01000000U
+#define P4_PEBS_ENABLE 0x02000000U
+
+/* Replay metrics for MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT */
+#define P4_PEBS__1stl_cache_load_miss_retired 0x3000001
+#define P4_PEBS__2ndl_cache_load_miss_retired 0x3000002
+#define P4_PEBS__dtlb_load_miss_retired 0x3000004
+#define P4_PEBS__dtlb_store_miss_retired 0x3000004
+#define P4_PEBS__dtlb_all_miss_retired 0x3000004
+#define P4_PEBS__tagged_mispred_branch 0x3018000
+#define P4_PEBS__mob_load_replay_retired 0x3000200
+#define P4_PEBS__split_load_retired 0x3000400
+#define P4_PEBS__split_store_retired 0x3000400
+
+#define P4_VERT__1stl_cache_load_miss_retired 0x0000001
+#define P4_VERT__2ndl_cache_load_miss_retired 0x0000001
+#define P4_VERT__dtlb_load_miss_retired 0x0000001
+#define P4_VERT__dtlb_store_miss_retired 0x0000002
+#define P4_VERT__dtlb_all_miss_retired 0x0000003
+#define P4_VERT__tagged_mispred_branch 0x0000010
+#define P4_VERT__mob_load_replay_retired 0x0000001
+#define P4_VERT__split_load_retired 0x0000001
+#define P4_VERT__split_store_retired 0x0000002
+
+enum P4_CACHE_EVENTS {
+ P4_CACHE__NONE,
+
+ P4_CACHE__1stl_cache_load_miss_retired,
+ P4_CACHE__2ndl_cache_load_miss_retired,
+ P4_CACHE__dtlb_load_miss_retired,
+ P4_CACHE__dtlb_store_miss_retired,
+ P4_CACHE__itlb_reference_hit,
+ P4_CACHE__itlb_reference_miss,
+
+ P4_CACHE__MAX
+};
+
+#endif /* PERF_EVENT_P4_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index b753ea59703a..7e5c6a60b8ee 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -21,7 +21,6 @@ struct mm_struct;
#include <asm/msr.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
-#include <asm/ds.h>
#include <linux/personality.h>
#include <linux/cpumask.h>
@@ -29,6 +28,7 @@ struct mm_struct;
#include <linux/threads.h>
#include <linux/math64.h>
#include <linux/init.h>
+#include <linux/err.h>
#define HBP_NUM 4
/*
@@ -113,7 +113,6 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
#endif
- unsigned int x86_hyper_vendor;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
@@ -127,9 +126,6 @@ struct cpuinfo_x86 {
#define X86_VENDOR_UNKNOWN 0xff
-#define X86_HYPER_VENDOR_NONE 0
-#define X86_HYPER_VENDOR_VMWARE 1
-
/*
* capabilities of CPUs
*/
@@ -380,6 +376,10 @@ union thread_xstate {
struct xsave_struct xsave;
};
+struct fpu {
+ union thread_xstate *state;
+};
+
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
@@ -457,7 +457,7 @@ struct thread_struct {
unsigned long trap_no;
unsigned long error_code;
/* floating point and extended processor state */
- union thread_xstate *xstate;
+ struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
@@ -473,10 +473,6 @@ struct thread_struct {
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
-/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
- unsigned long debugctlmsr;
- /* Debug Store context; see asm/ds.h */
- struct ds_context *ds_ctx;
};
static inline unsigned long native_get_debugreg(int regno)
@@ -793,6 +789,8 @@ static inline void wbinvd_halt(void)
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+extern void early_trap_init(void);
+
/* Defined in head.S */
extern struct desc_ptr early_gdt_descr;
@@ -803,7 +801,7 @@ extern void cpu_init(void);
static inline unsigned long get_debugctlmsr(void)
{
- unsigned long debugctlmsr = 0;
+ unsigned long debugctlmsr = 0;
#ifndef CONFIG_X86_DEBUGCTLMSR
if (boot_cpu_data.x86 < 6)
@@ -811,21 +809,6 @@ static inline unsigned long get_debugctlmsr(void)
#endif
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
- return debugctlmsr;
-}
-
-static inline unsigned long get_debugctlmsr_on_cpu(int cpu)
-{
- u64 debugctlmsr = 0;
- u32 val1, val2;
-
-#ifndef CONFIG_X86_DEBUGCTLMSR
- if (boot_cpu_data.x86 < 6)
- return 0;
-#endif
- rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2);
- debugctlmsr = val1 | ((u64)val2 << 32);
-
return debugctlmsr;
}
@@ -838,18 +821,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
-static inline void update_debugctlmsr_on_cpu(int cpu,
- unsigned long debugctlmsr)
-{
-#ifndef CONFIG_X86_DEBUGCTLMSR
- if (boot_cpu_data.x86 < 6)
- return;
-#endif
- wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR,
- (u32)((u64)debugctlmsr),
- (u32)((u64)debugctlmsr >> 32));
-}
-
/*
* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful:
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
index 86723035a515..52b098a6eebb 100644
--- a/arch/x86/include/asm/ptrace-abi.h
+++ b/arch/x86/include/asm/ptrace-abi.h
@@ -82,61 +82,6 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
-
-/* configuration/status structure used in PTRACE_BTS_CONFIG and
- PTRACE_BTS_STATUS commands.
-*/
-struct ptrace_bts_config {
- /* requested or actual size of BTS buffer in bytes */
- __u32 size;
- /* bitmask of below flags */
- __u32 flags;
- /* buffer overflow signal */
- __u32 signal;
- /* actual size of bts_struct in bytes */
- __u32 bts_size;
-};
-#endif /* __ASSEMBLY__ */
-
-#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
-#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
-#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
- instead of wrapping around */
-#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
-
-#define PTRACE_BTS_CONFIG 40
-/* Configure branch trace recording.
- ADDR points to a struct ptrace_bts_config.
- DATA gives the size of that buffer.
- A new buffer is allocated, if requested in the flags.
- An overflow signal may only be requested for new buffers.
- Returns the number of bytes read.
-*/
-#define PTRACE_BTS_STATUS 41
-/* Return the current configuration in a struct ptrace_bts_config
- pointed to by ADDR; DATA gives the size of that buffer.
- Returns the number of bytes written.
-*/
-#define PTRACE_BTS_SIZE 42
-/* Return the number of available BTS records for draining.
- DATA and ADDR are ignored.
-*/
-#define PTRACE_BTS_GET 43
-/* Get a single BTS record.
- DATA defines the index into the BTS array, where 0 is the newest
- entry, and higher indices refer to older entries.
- ADDR is pointing to struct bts_struct (see asm/ds.h).
-*/
-#define PTRACE_BTS_CLEAR 44
-/* Clear the BTS buffer.
- DATA and ADDR are ignored.
-*/
-#define PTRACE_BTS_DRAIN 45
-/* Read all available BTS records and clear the buffer.
- ADDR points to an array of struct bts_struct.
- DATA gives the size of that buffer.
- BTS records are read from oldest to newest.
- Returns number of BTS records drained.
-*/
+#endif
#endif /* _ASM_X86_PTRACE_ABI_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 69a686a7dff0..78cd1ea94500 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -289,12 +289,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
-#ifdef CONFIG_X86_PTRACE_BTS
-extern void ptrace_bts_untrace(struct task_struct *tsk);
-
-#define arch_ptrace_untrace(tsk) ptrace_bts_untrace(tsk)
-#endif /* CONFIG_X86_PTRACE_BTS */
-
#endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 6d93508f2626..35f2d1948ada 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -29,7 +29,8 @@ struct pvclock_vcpu_time_info {
u64 system_time;
u32 tsc_to_system_mul;
s8 tsc_shift;
- u8 pad[3];
+ u8 flags;
+ u8 pad[2];
} __attribute__((__packed__)); /* 32 bytes */
struct pvclock_wall_clock {
@@ -38,5 +39,6 @@ struct pvclock_wall_clock {
u32 nsec;
} __attribute__((__packed__));
+#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 53235fd5f8ce..cd02f324aa6b 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -6,6 +6,7 @@
/* some helper functions for xen and kvm pv clock sources */
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+void pvclock_set_flags(u8 flags);
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
struct pvclock_vcpu_time_info *vcpu,
diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
deleted file mode 100644
index c8e9c8bed3d0..000000000000
--- a/arch/x86/include/asm/rdc321x_defs.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#define PFX "rdc321x: "
-
-/* General purpose configuration and data registers */
-#define RDC3210_CFGREG_ADDR 0x0CF8
-#define RDC3210_CFGREG_DATA 0x0CFC
-
-#define RDC321X_GPIO_CTRL_REG1 0x48
-#define RDC321X_GPIO_CTRL_REG2 0x84
-#define RDC321X_GPIO_DATA_REG1 0x4c
-#define RDC321X_GPIO_DATA_REG2 0x88
-
-#define RDC321X_MAX_GPIO 58
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 75af592677ec..fb0b1874396f 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
#ifndef _ASM_X86_SCATTERLIST_H
#define _ASM_X86_SCATTERLIST_H
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
#include <asm-generic/scatterlist.h>
+#define ISA_DMA_THRESHOLD (0x00ffffff)
+#define ARCH_HAS_SG_CHAIN
+
#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 38638cd2fa4c..0e831059ac5a 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 event_inj_err;
u64 nested_cr3;
u64 lbr_ctl;
- u8 reserved_5[832];
+ u64 reserved_5;
+ u64 next_rip;
+ u8 reserved_6[816];
};
@@ -115,6 +117,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
u16 attrib;
@@ -238,6 +244,7 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
#define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e0d28901e969..f0b6e5dbc5a0 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -87,13 +87,12 @@ struct thread_info {
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_IA32 17 /* 32bit process */
#define TIF_FORK 18 /* ret_from_fork */
-#define TIF_MEMDIE 20
+#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_DEBUG 21 /* uses debug registers */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
#define TIF_FREEZE 23 /* is freezing for suspend */
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
-#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
-#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
+#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
@@ -115,8 +114,7 @@ struct thread_info {
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
-#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
-#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
+#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@@ -147,7 +145,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC)
+ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
@@ -241,10 +239,9 @@ static inline struct thread_info *current_thread_info(void)
#define TS_USEDFPU 0x0001 /* FPU was used by this task
this quantum (SMP) */
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-#define TS_POLLING 0x0004 /* true if in idle loop
- and not sleeping */
+#define TS_POLLING 0x0004 /* idle task polling need_resched,
+ skip sending interrupt */
#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-#define TS_XSAVE 0x0010 /* Use xsave/xrstor */
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c5087d796587..21899cc31e52 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -53,33 +53,29 @@
extern int cpu_to_node_map[];
/* Returns the number of the node containing CPU 'cpu' */
-static inline int cpu_to_node(int cpu)
+static inline int __cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
-#define early_cpu_to_node(cpu) cpu_to_node(cpu)
+#define early_cpu_to_node __cpu_to_node
+#define cpu_to_node __cpu_to_node
#else /* CONFIG_X86_64 */
/* Mappings between logical cpu number and node number */
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
-/* Returns the number of the current Node. */
-DECLARE_PER_CPU(int, node_number);
-#define numa_node_id() percpu_read(node_number)
-
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-extern int cpu_to_node(int cpu);
+/*
+ * override generic percpu implementation of cpu_to_node
+ */
+extern int __cpu_to_node(int cpu);
+#define cpu_to_node __cpu_to_node
+
extern int early_cpu_to_node(int cpu);
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
-/* Returns the number of the node containing CPU 'cpu' */
-static inline int cpu_to_node(int cpu)
-{
- return per_cpu(x86_cpu_to_node_map, cpu);
-}
-
/* Same function but used if called before per_cpu areas are setup */
static inline int early_cpu_to_node(int cpu)
{
@@ -170,6 +166,10 @@ static inline int numa_node_id(void)
{
return 0;
}
+/*
+ * indicate override:
+ */
+#define numa_node_id numa_node_id
static inline int early_cpu_to_node(int cpu)
{
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 4da91ad69e0d..f66cda56781d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -79,7 +79,7 @@ static inline int get_si_code(unsigned long condition)
extern int panic_on_unrecovered_nmi;
-void math_error(void __user *);
+void math_error(struct pt_regs *, int, int);
void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index b414d2b401f6..aa558ac0306e 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -27,13 +27,14 @@
* set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
*
* We will use 31 sets, one for sending BAU messages from each of the 32
- * cpu's on the node.
+ * cpu's on the uvhub.
*
* TLB shootdown will use the first of the 8 descriptors of each set.
* Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
*/
#define UV_ITEMS_PER_DESCRIPTOR 8
+#define MAX_BAU_CONCURRENT 3
#define UV_CPUS_PER_ACT_STATUS 32
#define UV_ACT_STATUS_MASK 0x3
#define UV_ACT_STATUS_SIZE 2
@@ -45,6 +46,9 @@
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
+#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
/*
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
@@ -55,15 +59,29 @@
#define DESC_STATUS_SOURCE_TIMEOUT 3
/*
- * source side thresholds at which message retries print a warning
+ * source side threshholds at which message retries print a warning
*/
#define SOURCE_TIMEOUT_LIMIT 20
#define DESTINATION_TIMEOUT_LIMIT 20
/*
+ * misc. delays, in microseconds
+ */
+#define THROTTLE_DELAY 10
+#define TIMEOUT_DELAY 10
+#define BIOS_TO 1000
+/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
+
+/*
+ * threshholds at which to use IPI to free resources
+ */
+#define PLUGSB4RESET 100
+#define TIMEOUTSB4RESET 100
+
+/*
* number of entries in the destination side payload queue
*/
-#define DEST_Q_SIZE 17
+#define DEST_Q_SIZE 20
/*
* number of destination side software ack resources
*/
@@ -72,9 +90,10 @@
/*
* completion statuses for sending a TLB flush message
*/
-#define FLUSH_RETRY 1
-#define FLUSH_GIVEUP 2
-#define FLUSH_COMPLETE 3
+#define FLUSH_RETRY_PLUGGED 1
+#define FLUSH_RETRY_TIMEOUT 2
+#define FLUSH_GIVEUP 3
+#define FLUSH_COMPLETE 4
/*
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
@@ -86,14 +105,14 @@
* 'base_dest_nodeid' field of the header corresponds to the
* destination nodeID associated with that specified bit.
*/
-struct bau_target_nodemask {
- unsigned long bits[BITS_TO_LONGS(256)];
+struct bau_target_uvhubmask {
+ unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
};
/*
- * mask of cpu's on a node
+ * mask of cpu's on a uvhub
* (during initialization we need to check that unsigned long has
- * enough bits for max. cpu's per node)
+ * enough bits for max. cpu's per uvhub)
*/
struct bau_local_cpumask {
unsigned long bits;
@@ -135,8 +154,8 @@ struct bau_msg_payload {
struct bau_msg_header {
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
- unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */
- /* bits 20:6 */ /* first bit in node_map */
+ unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
+ /* bits 20:6 */ /* first bit in uvhub map */
unsigned int command:8; /* message type */
/* bits 28:21 */
/* 0x38: SN3net EndPoint Message */
@@ -146,26 +165,38 @@ struct bau_msg_header {
unsigned int rsvd_2:9; /* must be zero */
/* bits 40:32 */
/* Suppl_A is 56-41 */
- unsigned int payload_2a:8;/* becomes byte 16 of msg */
- /* bits 48:41 */ /* not currently using */
- unsigned int payload_2b:8;/* becomes byte 17 of msg */
- /* bits 56:49 */ /* not currently using */
+ unsigned int sequence:16;/* message sequence number */
+ /* bits 56:41 */ /* becomes bytes 16-17 of msg */
/* Address field (96:57) is never used as an
address (these are address bits 42:3) */
+
unsigned int rsvd_3:1; /* must be zero */
/* bit 57 */
/* address bits 27:4 are payload */
- /* these 24 bits become bytes 12-14 of msg */
+ /* these next 24 (58-81) bits become bytes 12-14 of msg */
+
+ /* bits 65:58 land in byte 12 */
unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
/* bit 58 */
-
- unsigned int payload_1a:5;/* not currently used */
- /* bits 63:59 */
- unsigned int payload_1b:8;/* not currently used */
- /* bits 71:64 */
- unsigned int payload_1c:8;/* not currently used */
- /* bits 79:72 */
- unsigned int payload_1d:2;/* not currently used */
+ unsigned int msg_type:3; /* software type of the message*/
+ /* bits 61:59 */
+ unsigned int canceled:1; /* message canceled, resource to be freed*/
+ /* bit 62 */
+ unsigned int payload_1a:1;/* not currently used */
+ /* bit 63 */
+ unsigned int payload_1b:2;/* not currently used */
+ /* bits 65:64 */
+
+ /* bits 73:66 land in byte 13 */
+ unsigned int payload_1ca:6;/* not currently used */
+ /* bits 71:66 */
+ unsigned int payload_1c:2;/* not currently used */
+ /* bits 73:72 */
+
+ /* bits 81:74 land in byte 14 */
+ unsigned int payload_1d:6;/* not currently used */
+ /* bits 79:74 */
+ unsigned int payload_1e:2;/* not currently used */
/* bits 81:80 */
unsigned int rsvd_4:7; /* must be zero */
@@ -178,7 +209,7 @@ struct bau_msg_header {
/* bits 95:90 */
unsigned int rsvd_6:5; /* must be zero */
/* bits 100:96 */
- unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */
+ unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
/* bit 101*/
unsigned int fairness:3;/* usually zero */
/* bits 104:102 */
@@ -191,13 +222,18 @@ struct bau_msg_header {
/* bits 127:107 */
};
+/* see msg_type: */
+#define MSG_NOOP 0
+#define MSG_REGULAR 1
+#define MSG_RETRY 2
+
/*
* The activation descriptor:
* The format of the message to send, plus all accompanying control
* Should be 64 bytes
*/
struct bau_desc {
- struct bau_target_nodemask distribution;
+ struct bau_target_uvhubmask distribution;
/*
* message template, consisting of header and payload:
*/
@@ -237,19 +273,25 @@ struct bau_payload_queue_entry {
unsigned short acknowledge_count; /* filled in by destination */
/* 16 bits, bytes 10-11 */
- unsigned short replied_to:1; /* sent as 0 by the source */
- /* 1 bit */
- unsigned short unused1:7; /* not currently using */
- /* 7 bits: byte 12) */
+ /* these next 3 bytes come from bits 58-81 of the message header */
+ unsigned short replied_to:1; /* sent as 0 by the source */
+ unsigned short msg_type:3; /* software message type */
+ unsigned short canceled:1; /* sent as 0 by the source */
+ unsigned short unused1:3; /* not currently using */
+ /* byte 12 */
- unsigned char unused2[2]; /* not currently using */
- /* bytes 13-14 */
+ unsigned char unused2a; /* not currently using */
+ /* byte 13 */
+ unsigned char unused2; /* not currently using */
+ /* byte 14 */
unsigned char sw_ack_vector; /* filled in by the hardware */
/* byte 15 (bits 127:120) */
- unsigned char unused4[3]; /* not currently using bytes 17-19 */
- /* bytes 17-19 */
+ unsigned short sequence; /* message sequence number */
+ /* bytes 16-17 */
+ unsigned char unused4[2]; /* not currently using bytes 18-19 */
+ /* bytes 18-19 */
int number_of_cpus; /* filled in at destination */
/* 32 bits, bytes 20-23 (aligned) */
@@ -259,63 +301,93 @@ struct bau_payload_queue_entry {
};
/*
- * one for every slot in the destination payload queue
- */
-struct bau_msg_status {
- struct bau_local_cpumask seen_by; /* map of cpu's */
-};
-
-/*
- * one for every slot in the destination software ack resources
- */
-struct bau_sw_ack_status {
- struct bau_payload_queue_entry *msg; /* associated message */
- int watcher; /* cpu monitoring, or -1 */
-};
-
-/*
- * one on every node and per-cpu; to locate the software tables
+ * one per-cpu; to locate the software tables
*/
struct bau_control {
struct bau_desc *descriptor_base;
- struct bau_payload_queue_entry *bau_msg_head;
struct bau_payload_queue_entry *va_queue_first;
struct bau_payload_queue_entry *va_queue_last;
- struct bau_msg_status *msg_statuses;
- int *watching; /* pointer to array */
+ struct bau_payload_queue_entry *bau_msg_head;
+ struct bau_control *uvhub_master;
+ struct bau_control *socket_master;
+ unsigned long timeout_interval;
+ atomic_t active_descriptor_count;
+ int max_concurrent;
+ int max_concurrent_constant;
+ int retry_message_scans;
+ int plugged_tries;
+ int timeout_tries;
+ int ipi_attempts;
+ int conseccompletes;
+ short cpu;
+ short uvhub_cpu;
+ short uvhub;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+ cycles_t send_message;
+ spinlock_t masks_lock;
+ spinlock_t uvhub_lock;
+ spinlock_t queue_lock;
};
/*
* This structure is allocated per_cpu for UV TLB shootdown statistics.
*/
struct ptc_stats {
- unsigned long ptc_i; /* number of IPI-style flushes */
- unsigned long requestor; /* number of nodes this cpu sent to */
- unsigned long requestee; /* times cpu was remotely requested */
- unsigned long alltlb; /* times all tlb's on this cpu were flushed */
- unsigned long onetlb; /* times just one tlb on this cpu was flushed */
- unsigned long s_retry; /* retries on source side timeouts */
- unsigned long d_retry; /* retries on destination side timeouts */
- unsigned long sflush; /* cycles spent in uv_flush_tlb_others */
- unsigned long dflush; /* cycles spent on destination side */
- unsigned long retriesok; /* successes on retries */
- unsigned long nomsg; /* interrupts with no message */
- unsigned long multmsg; /* interrupts with multiple messages */
- unsigned long ntargeted;/* nodes targeted */
+ /* sender statistics */
+ unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
+ unsigned long s_requestor; /* number of shootdown requests */
+ unsigned long s_stimeout; /* source side timeouts */
+ unsigned long s_dtimeout; /* destination side timeouts */
+ unsigned long s_time; /* time spent in sending side */
+ unsigned long s_retriesok; /* successful retries */
+ unsigned long s_ntargcpu; /* number of cpus targeted */
+ unsigned long s_ntarguvhub; /* number of uvhubs targeted */
+ unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */
+ unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */
+ unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */
+ unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */
+ unsigned long s_ntarguvhub1; /* number of times == 1 target hub */
+ unsigned long s_resets_plug; /* ipi-style resets from plug state */
+ unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
+ unsigned long s_busy; /* status stayed busy past s/w timer */
+ unsigned long s_throttles; /* waits in throttle */
+ unsigned long s_retry_messages; /* retry broadcasts */
+ /* destination statistics */
+ unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
+ unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
+ unsigned long d_multmsg; /* interrupts with multiple messages */
+ unsigned long d_nomsg; /* interrupts with no message */
+ unsigned long d_time; /* time spent on destination side */
+ unsigned long d_requestee; /* number of messages processed */
+ unsigned long d_retries; /* number of retry messages processed */
+ unsigned long d_canceled; /* number of messages canceled by retries */
+ unsigned long d_nocanceled; /* retries that found nothing to cancel */
+ unsigned long d_resets; /* number of ipi-style requests processed */
+ unsigned long d_rcanceled; /* number of messages canceled by resets */
};
-static inline int bau_node_isset(int node, struct bau_target_nodemask *dstp)
+static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
{
- return constant_test_bit(node, &dstp->bits[0]);
+ return constant_test_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_node_set(int node, struct bau_target_nodemask *dstp)
+static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
{
- __set_bit(node, &dstp->bits[0]);
+ __set_bit(uvhub, &dstp->bits[0]);
}
-static inline void bau_nodes_clear(struct bau_target_nodemask *dstp, int nbits)
+static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
+ int nbits)
{
bitmap_zero(&dstp->bits[0], nbits);
}
+static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
+{
+ return bitmap_weight((unsigned long *)&dstp->bits[0],
+ UV_DISTRIBUTION_SIZE);
+}
static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
{
@@ -328,4 +400,35 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);
+struct atomic_short {
+ short counter;
+};
+
+/**
+ * atomic_read_short - read a short atomic variable
+ * @v: pointer of type atomic_short
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read_short(const struct atomic_short *v)
+{
+ return v->counter;
+}
+
+/**
+ * atomic_add_short_return - add and return a short int
+ * @i: short value to add
+ * @v: pointer of type atomic_short
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_short_return(short i, struct atomic_short *v)
+{
+ short __i = i;
+ asm volatile(LOCK_PREFIX "xaddw %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+}
+
#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 14cc74ba5d23..bf6b88ef8eeb 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -307,7 +307,7 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset
* Access Global MMR space using the MMR space located at the top of physical
* memory.
*/
-static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
+static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
{
return __va(UV_GLOBAL_MMR64_BASE |
UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 2cae46c7c8a2..b2f2d2e05cec 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -1,4 +1,3 @@
-
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -15,13 +14,25 @@
#define UV_MMR_ENABLE (1UL << 63)
/* ========================================================================= */
+/* UVH_BAU_DATA_BROADCAST */
+/* ========================================================================= */
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x0440
+
+#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
+#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+
+union uvh_bau_data_broadcast_u {
+ unsigned long v;
+ struct uvh_bau_data_broadcast_s {
+ unsigned long enable : 1; /* RW */
+ unsigned long rsvd_1_63: 63; /* */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_BAU_DATA_CONFIG */
/* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
-#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
-#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
-/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
#define UVH_BAU_DATA_CONFIG 0x61680UL
#define UVH_BAU_DATA_CONFIG_32 0x0438
@@ -604,6 +615,68 @@ union uvh_lb_bau_intd_software_acknowledge_u {
#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0x0a70
/* ========================================================================= */
+/* UVH_LB_BAU_MISC_CONTROL */
+/* ========================================================================= */
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0x00a10
+
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVH_LB_BAU_MISC_CONTROL_CSI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+union uvh_lb_bau_misc_control_u {
+ unsigned long v;
+ struct uvh_lb_bau_misc_control_s {
+ unsigned long rejection_delay : 8; /* RW */
+ unsigned long apic_mode : 1; /* RW */
+ unsigned long force_broadcast : 1; /* RW */
+ unsigned long force_lock_nop : 1; /* RW */
+ unsigned long csi_agent_presence_vector : 3; /* RW */
+ unsigned long descriptor_fetch_mode : 1; /* RW */
+ unsigned long enable_intd_soft_ack_mode : 1; /* RW */
+ unsigned long intd_soft_ack_timeout_period : 4; /* RW */
+ unsigned long enable_dual_mapping_mode : 1; /* RW */
+ unsigned long vga_io_port_decode_enable : 1; /* RW */
+ unsigned long vga_io_port_16_bit_decode : 1; /* RW */
+ unsigned long suppress_dest_registration : 1; /* RW */
+ unsigned long programmed_initial_priority : 3; /* RW */
+ unsigned long use_incoming_priority : 1; /* RW */
+ unsigned long enable_programmed_initial_priority : 1; /* RW */
+ unsigned long rsvd_29_47 : 19; /* */
+ unsigned long fun : 16; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
/* ========================================================================= */
#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
@@ -681,334 +754,6 @@ union uvh_lb_bau_sb_descriptor_base_u {
};
/* ========================================================================= */
-/* UVH_LB_MCAST_AOERR0_RPT_ENABLE */
-/* ========================================================================= */
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE 0x50b20UL
-
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_SHFT 0
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_OBESE_MSG_MASK 0x0000000000000001UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_SHFT 1
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_DATA_SB_ERR_MASK 0x0000000000000002UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_SHFT 2
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_NACK_BUFF_PARITY_MASK 0x0000000000000004UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_SHFT 3
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_TIMEOUT_MASK 0x0000000000000008UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_SHFT 4
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_INACTIVE_REPLY_MASK 0x0000000000000010UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_SHFT 5
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_UPGRADE_ERROR_MASK 0x0000000000000020UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_SHFT 6
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REG_COUNT_UNDERFLOW_MASK 0x0000000000000040UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_SHFT 7
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MCAST_REP_OBESE_MSG_MASK 0x0000000000000080UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_SHFT 8
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_RUNT_MSG_MASK 0x0000000000000100UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_SHFT 9
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_OBESE_MSG_MASK 0x0000000000000200UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_SHFT 10
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REQ_DATA_SB_ERR_MASK 0x0000000000000400UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_SHFT 11
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_RUNT_MSG_MASK 0x0000000000000800UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_SHFT 12
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_OBESE_MSG_MASK 0x0000000000001000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_SHFT 13
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_DATA_SB_ERR_MASK 0x0000000000002000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_SHFT 14
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_REP_COMMAND_ERR_MASK 0x0000000000004000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_SHFT 15
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_UCACHE_PEND_TIMEOUT_MASK 0x0000000000008000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_SHFT 16
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_RUNT_MSG_MASK 0x0000000000010000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_SHFT 17
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_OBESE_MSG_MASK 0x0000000000020000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_SHFT 18
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REQ_DATA_SB_ERR_MASK 0x0000000000040000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_SHFT 19
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_RUNT_MSG_MASK 0x0000000000080000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_SHFT 20
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_OBESE_MSG_MASK 0x0000000000100000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_SHFT 21
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_REP_DATA_SB_ERR_MASK 0x0000000000200000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_SHFT 22
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_AMO_TIMEOUT_MASK 0x0000000000400000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_SHFT 23
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_PUT_TIMEOUT_MASK 0x0000000000800000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_SHFT 24
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_MACC_SPURIOUS_EVENT_MASK 0x0000000001000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_SHFT 25
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IOH_DESTINATION_TABLE_PARITY_MASK 0x0000000002000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_SHFT 26
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_HAD_ERROR_REPLY_MASK 0x0000000004000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_SHFT 27
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_GET_TIMEOUT_MASK 0x0000000008000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_SHFT 28
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_LOCK_MANAGER_HAD_ERROR_REPLY_MASK 0x0000000010000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_SHFT 29
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_HAD_ERROR_REPLY_MASK 0x0000000020000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_SHFT 30
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_PUT_TIMEOUT_MASK 0x0000000040000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_SHFT 31
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SB_ACTIVATION_OVERRUN_MASK 0x0000000080000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_SHFT 32
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_HAD_ERROR_REPLY_MASK 0x0000000100000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_SHFT 33
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_COMPLETED_GB_ACTIVATION_TIMEOUT_MASK 0x0000000200000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_SHFT 34
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_0_PARITY_MASK 0x0000000400000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_SHFT 35
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_DESCRIPTOR_BUFFER_1_PARITY_MASK 0x0000000800000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_SHFT 36
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_SOCKET_DESTINATION_TABLE_PARITY_MASK 0x0000001000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_SHFT 37
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_BAU_REPLY_PAYLOAD_CORRUPTION_MASK 0x0000002000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_SHFT 38
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_IO_PORT_DESTINATION_TABLE_PARITY_MASK 0x0000004000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_SHFT 39
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INTD_SOFT_ACK_TIMEOUT_MASK 0x0000008000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_SHFT 40
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_OBESE_MSG_MASK 0x0000010000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_SHFT 41
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_REP_COMMAND_ERR_MASK 0x0000020000000000UL
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_SHFT 42
-#define UVH_LB_MCAST_AOERR0_RPT_ENABLE_INT_TIMEOUT_MASK 0x0000040000000000UL
-
-union uvh_lb_mcast_aoerr0_rpt_enable_u {
- unsigned long v;
- struct uvh_lb_mcast_aoerr0_rpt_enable_s {
- unsigned long mcast_obese_msg : 1; /* RW */
- unsigned long mcast_data_sb_err : 1; /* RW */
- unsigned long mcast_nack_buff_parity : 1; /* RW */
- unsigned long mcast_timeout : 1; /* RW */
- unsigned long mcast_inactive_reply : 1; /* RW */
- unsigned long mcast_upgrade_error : 1; /* RW */
- unsigned long mcast_reg_count_underflow : 1; /* RW */
- unsigned long mcast_rep_obese_msg : 1; /* RW */
- unsigned long ucache_req_runt_msg : 1; /* RW */
- unsigned long ucache_req_obese_msg : 1; /* RW */
- unsigned long ucache_req_data_sb_err : 1; /* RW */
- unsigned long ucache_rep_runt_msg : 1; /* RW */
- unsigned long ucache_rep_obese_msg : 1; /* RW */
- unsigned long ucache_rep_data_sb_err : 1; /* RW */
- unsigned long ucache_rep_command_err : 1; /* RW */
- unsigned long ucache_pend_timeout : 1; /* RW */
- unsigned long macc_req_runt_msg : 1; /* RW */
- unsigned long macc_req_obese_msg : 1; /* RW */
- unsigned long macc_req_data_sb_err : 1; /* RW */
- unsigned long macc_rep_runt_msg : 1; /* RW */
- unsigned long macc_rep_obese_msg : 1; /* RW */
- unsigned long macc_rep_data_sb_err : 1; /* RW */
- unsigned long macc_amo_timeout : 1; /* RW */
- unsigned long macc_put_timeout : 1; /* RW */
- unsigned long macc_spurious_event : 1; /* RW */
- unsigned long ioh_destination_table_parity : 1; /* RW */
- unsigned long get_had_error_reply : 1; /* RW */
- unsigned long get_timeout : 1; /* RW */
- unsigned long lock_manager_had_error_reply : 1; /* RW */
- unsigned long put_had_error_reply : 1; /* RW */
- unsigned long put_timeout : 1; /* RW */
- unsigned long sb_activation_overrun : 1; /* RW */
- unsigned long completed_gb_activation_had_error_reply : 1; /* RW */
- unsigned long completed_gb_activation_timeout : 1; /* RW */
- unsigned long descriptor_buffer_0_parity : 1; /* RW */
- unsigned long descriptor_buffer_1_parity : 1; /* RW */
- unsigned long socket_destination_table_parity : 1; /* RW */
- unsigned long bau_reply_payload_corruption : 1; /* RW */
- unsigned long io_port_destination_table_parity : 1; /* RW */
- unsigned long intd_soft_ack_timeout : 1; /* RW */
- unsigned long int_rep_obese_msg : 1; /* RW */
- unsigned long int_rep_command_err : 1; /* RW */
- unsigned long int_timeout : 1; /* RW */
- unsigned long rsvd_43_63 : 21; /* */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LOCAL_INT0_CONFIG */
-/* ========================================================================= */
-#define UVH_LOCAL_INT0_CONFIG 0x61000UL
-
-#define UVH_LOCAL_INT0_CONFIG_VECTOR_SHFT 0
-#define UVH_LOCAL_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_LOCAL_INT0_CONFIG_DM_SHFT 8
-#define UVH_LOCAL_INT0_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_LOCAL_INT0_CONFIG_DESTMODE_SHFT 11
-#define UVH_LOCAL_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_LOCAL_INT0_CONFIG_STATUS_SHFT 12
-#define UVH_LOCAL_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_LOCAL_INT0_CONFIG_P_SHFT 13
-#define UVH_LOCAL_INT0_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_LOCAL_INT0_CONFIG_T_SHFT 15
-#define UVH_LOCAL_INT0_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_LOCAL_INT0_CONFIG_M_SHFT 16
-#define UVH_LOCAL_INT0_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_LOCAL_INT0_CONFIG_APIC_ID_SHFT 32
-#define UVH_LOCAL_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_local_int0_config_u {
- unsigned long v;
- struct uvh_local_int0_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_LOCAL_INT0_ENABLE */
-/* ========================================================================= */
-#define UVH_LOCAL_INT0_ENABLE 0x65000UL
-
-#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_SHFT 0
-#define UVH_LOCAL_INT0_ENABLE_LB_HCERR_MASK 0x0000000000000001UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_SHFT 1
-#define UVH_LOCAL_INT0_ENABLE_GR0_HCERR_MASK 0x0000000000000002UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_SHFT 2
-#define UVH_LOCAL_INT0_ENABLE_GR1_HCERR_MASK 0x0000000000000004UL
-#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_SHFT 3
-#define UVH_LOCAL_INT0_ENABLE_LH_HCERR_MASK 0x0000000000000008UL
-#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_SHFT 4
-#define UVH_LOCAL_INT0_ENABLE_RH_HCERR_MASK 0x0000000000000010UL
-#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_SHFT 5
-#define UVH_LOCAL_INT0_ENABLE_XN_HCERR_MASK 0x0000000000000020UL
-#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_SHFT 6
-#define UVH_LOCAL_INT0_ENABLE_SI_HCERR_MASK 0x0000000000000040UL
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_SHFT 7
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR0_MASK 0x0000000000000080UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_SHFT 8
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR0_MASK 0x0000000000000100UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_SHFT 9
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR0_MASK 0x0000000000000200UL
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_SHFT 10
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR0_MASK 0x0000000000000400UL
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_SHFT 11
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR0_MASK 0x0000000000000800UL
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_SHFT 12
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR0_MASK 0x0000000000001000UL
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_SHFT 13
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR0_MASK 0x0000000000002000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_SHFT 14
-#define UVH_LOCAL_INT0_ENABLE_LB_AOERR1_MASK 0x0000000000004000UL
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_SHFT 15
-#define UVH_LOCAL_INT0_ENABLE_GR0_AOERR1_MASK 0x0000000000008000UL
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_SHFT 16
-#define UVH_LOCAL_INT0_ENABLE_GR1_AOERR1_MASK 0x0000000000010000UL
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_SHFT 17
-#define UVH_LOCAL_INT0_ENABLE_LH_AOERR1_MASK 0x0000000000020000UL
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_SHFT 18
-#define UVH_LOCAL_INT0_ENABLE_RH_AOERR1_MASK 0x0000000000040000UL
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_SHFT 19
-#define UVH_LOCAL_INT0_ENABLE_XN_AOERR1_MASK 0x0000000000080000UL
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_SHFT 20
-#define UVH_LOCAL_INT0_ENABLE_SI_AOERR1_MASK 0x0000000000100000UL
-#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_SHFT 21
-#define UVH_LOCAL_INT0_ENABLE_RH_VPI_INT_MASK 0x0000000000200000UL
-#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_SHFT 22
-#define UVH_LOCAL_INT0_ENABLE_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_SHFT 23
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_0_MASK 0x0000000000800000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_SHFT 24
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_1_MASK 0x0000000001000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_SHFT 25
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_2_MASK 0x0000000002000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_SHFT 26
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_3_MASK 0x0000000004000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_SHFT 27
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_4_MASK 0x0000000008000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_SHFT 28
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_5_MASK 0x0000000010000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_SHFT 29
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_6_MASK 0x0000000020000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_SHFT 30
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_7_MASK 0x0000000040000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_SHFT 31
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_8_MASK 0x0000000080000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_SHFT 32
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_9_MASK 0x0000000100000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_SHFT 33
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_10_MASK 0x0000000200000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_SHFT 34
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_11_MASK 0x0000000400000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_SHFT 35
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_12_MASK 0x0000000800000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_SHFT 36
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_13_MASK 0x0000001000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_SHFT 37
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_14_MASK 0x0000002000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_SHFT 38
-#define UVH_LOCAL_INT0_ENABLE_LB_IRQ_INT_15_MASK 0x0000004000000000UL
-#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_SHFT 39
-#define UVH_LOCAL_INT0_ENABLE_L1_NMI_INT_MASK 0x0000008000000000UL
-#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_SHFT 40
-#define UVH_LOCAL_INT0_ENABLE_STOP_CLOCK_MASK 0x0000010000000000UL
-#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_SHFT 41
-#define UVH_LOCAL_INT0_ENABLE_ASIC_TO_L1_MASK 0x0000020000000000UL
-#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_SHFT 42
-#define UVH_LOCAL_INT0_ENABLE_L1_TO_ASIC_MASK 0x0000040000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LTC_INT_SHFT 43
-#define UVH_LOCAL_INT0_ENABLE_LTC_INT_MASK 0x0000080000000000UL
-#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_SHFT 44
-#define UVH_LOCAL_INT0_ENABLE_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
-
-union uvh_local_int0_enable_u {
- unsigned long v;
- struct uvh_local_int0_enable_s {
- unsigned long lb_hcerr : 1; /* RW */
- unsigned long gr0_hcerr : 1; /* RW */
- unsigned long gr1_hcerr : 1; /* RW */
- unsigned long lh_hcerr : 1; /* RW */
- unsigned long rh_hcerr : 1; /* RW */
- unsigned long xn_hcerr : 1; /* RW */
- unsigned long si_hcerr : 1; /* RW */
- unsigned long lb_aoerr0 : 1; /* RW */
- unsigned long gr0_aoerr0 : 1; /* RW */
- unsigned long gr1_aoerr0 : 1; /* RW */
- unsigned long lh_aoerr0 : 1; /* RW */
- unsigned long rh_aoerr0 : 1; /* RW */
- unsigned long xn_aoerr0 : 1; /* RW */
- unsigned long si_aoerr0 : 1; /* RW */
- unsigned long lb_aoerr1 : 1; /* RW */
- unsigned long gr0_aoerr1 : 1; /* RW */
- unsigned long gr1_aoerr1 : 1; /* RW */
- unsigned long lh_aoerr1 : 1; /* RW */
- unsigned long rh_aoerr1 : 1; /* RW */
- unsigned long xn_aoerr1 : 1; /* RW */
- unsigned long si_aoerr1 : 1; /* RW */
- unsigned long rh_vpi_int : 1; /* RW */
- unsigned long system_shutdown_int : 1; /* RW */
- unsigned long lb_irq_int_0 : 1; /* RW */
- unsigned long lb_irq_int_1 : 1; /* RW */
- unsigned long lb_irq_int_2 : 1; /* RW */
- unsigned long lb_irq_int_3 : 1; /* RW */
- unsigned long lb_irq_int_4 : 1; /* RW */
- unsigned long lb_irq_int_5 : 1; /* RW */
- unsigned long lb_irq_int_6 : 1; /* RW */
- unsigned long lb_irq_int_7 : 1; /* RW */
- unsigned long lb_irq_int_8 : 1; /* RW */
- unsigned long lb_irq_int_9 : 1; /* RW */
- unsigned long lb_irq_int_10 : 1; /* RW */
- unsigned long lb_irq_int_11 : 1; /* RW */
- unsigned long lb_irq_int_12 : 1; /* RW */
- unsigned long lb_irq_int_13 : 1; /* RW */
- unsigned long lb_irq_int_14 : 1; /* RW */
- unsigned long lb_irq_int_15 : 1; /* RW */
- unsigned long l1_nmi_int : 1; /* RW */
- unsigned long stop_clock : 1; /* RW */
- unsigned long asic_to_l1 : 1; /* RW */
- unsigned long l1_to_asic : 1; /* RW */
- unsigned long ltc_int : 1; /* RW */
- unsigned long la_seq_trigger : 1; /* RW */
- unsigned long rsvd_45_63 : 19; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_NODE_ID */
/* ========================================================================= */
#define UVH_NODE_ID 0x0UL
@@ -1112,26 +857,6 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
};
/* ========================================================================= */
-/* UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR */
-/* ========================================================================= */
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR 0x1600020UL
-
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT 26
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
-#define UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_rh_gam_cfg_overlay_config_mmr_u {
- unsigned long v;
- struct uvh_rh_gam_cfg_overlay_config_mmr_s {
- unsigned long rsvd_0_25: 26; /* */
- unsigned long base : 20; /* RW */
- unsigned long rsvd_46_62: 17; /* */
- unsigned long enable : 1; /* RW */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -1263,101 +988,6 @@ union uvh_rtc1_int_config_u {
};
/* ========================================================================= */
-/* UVH_RTC2_INT_CONFIG */
-/* ========================================================================= */
-#define UVH_RTC2_INT_CONFIG 0x61600UL
-
-#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_RTC2_INT_CONFIG_P_SHFT 13
-#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_RTC2_INT_CONFIG_T_SHFT 15
-#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_RTC2_INT_CONFIG_M_SHFT 16
-#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
-#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_rtc2_int_config_u {
- unsigned long v;
- struct uvh_rtc2_int_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_RTC3_INT_CONFIG */
-/* ========================================================================= */
-#define UVH_RTC3_INT_CONFIG 0x61640UL
-
-#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
-#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
-#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
-#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
-#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
-#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
-#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
-#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
-#define UVH_RTC3_INT_CONFIG_P_SHFT 13
-#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
-#define UVH_RTC3_INT_CONFIG_T_SHFT 15
-#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
-#define UVH_RTC3_INT_CONFIG_M_SHFT 16
-#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
-#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
-#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
-
-union uvh_rtc3_int_config_u {
- unsigned long v;
- struct uvh_rtc3_int_config_s {
- unsigned long vector_ : 8; /* RW */
- unsigned long dm : 3; /* RW */
- unsigned long destmode : 1; /* RW */
- unsigned long status : 1; /* RO */
- unsigned long p : 1; /* RO */
- unsigned long rsvd_14 : 1; /* */
- unsigned long t : 1; /* RO */
- unsigned long m : 1; /* RW */
- unsigned long rsvd_17_31: 15; /* */
- unsigned long apic_id : 32; /* RW */
- } s;
-};
-
-/* ========================================================================= */
-/* UVH_RTC_INC_RATIO */
-/* ========================================================================= */
-#define UVH_RTC_INC_RATIO 0x350000UL
-
-#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
-#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
-#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
-#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
-
-union uvh_rtc_inc_ratio_u {
- unsigned long v;
- struct uvh_rtc_inc_ratio_s {
- unsigned long fraction : 20; /* RW */
- unsigned long ratio : 3; /* RW */
- unsigned long rsvd_23_63: 41; /* */
- } s;
-};
-
-/* ========================================================================= */
/* UVH_SI_ADDR_MAP_CONFIG */
/* ========================================================================= */
#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
deleted file mode 100644
index e49ed6d2fd4e..000000000000
--- a/arch/x86/include/asm/vmware.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2008, VMware, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef ASM_X86__VMWARE_H
-#define ASM_X86__VMWARE_H
-
-extern void vmware_platform_setup(void);
-extern int vmware_platform(void);
-extern void vmware_set_feature_bits(struct cpuinfo_x86 *c);
-
-#endif
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index fb9a080740ec..9e6779f7cf2d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,8 @@
*
*/
+#include <linux/types.h>
+
/*
* Definitions of Primary Processor-Based VM-Execution Controls.
*/
@@ -120,6 +122,8 @@ enum vmcs_field {
GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
GUEST_IA32_PAT = 0x00002804,
GUEST_IA32_PAT_HIGH = 0x00002805,
+ GUEST_IA32_EFER = 0x00002806,
+ GUEST_IA32_EFER_HIGH = 0x00002807,
GUEST_PDPTR0 = 0x0000280a,
GUEST_PDPTR0_HIGH = 0x0000280b,
GUEST_PDPTR1 = 0x0000280c,
@@ -130,6 +134,8 @@ enum vmcs_field {
GUEST_PDPTR3_HIGH = 0x00002811,
HOST_IA32_PAT = 0x00002c00,
HOST_IA32_PAT_HIGH = 0x00002c01,
+ HOST_IA32_EFER = 0x00002c02,
+ HOST_IA32_EFER_HIGH = 0x00002c03,
PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
EXCEPTION_BITMAP = 0x00004004,
@@ -394,6 +400,10 @@ enum vmcs_field {
#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
-
+struct vmx_msr_entry {
+ u32 index;
+ u32 reserved;
+ u64 value;
+} __aligned(16);
#endif
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index ddc04ccad03b..2c4390cae228 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -37,8 +37,9 @@ extern int check_for_xstate(struct i387_fxsave_struct __user *buf,
void __user *fpstate,
struct _fpx_sw_bytes *sw);
-static inline int xrstor_checking(struct xsave_struct *fx)
+static inline int fpu_xrstor_checking(struct fpu *fpu)
{
+ struct xsave_struct *fx = &fpu->state->xsave;
int err;
asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
@@ -110,12 +111,12 @@ static inline void xrstor_state(struct xsave_struct *fx, u64 mask)
: "memory");
}
-static inline void xsave(struct task_struct *tsk)
+static inline void fpu_xsave(struct fpu *fpu)
{
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27"
- : : "D" (&(tsk->thread.xstate->xsave)),
+ : : "D" (&(fpu->state->xsave)),
"a" (-1), "d"(-1) : "memory");
}
#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4c58352209e0..e77b22083721 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -47,8 +47,6 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
-obj-$(CONFIG_X86_DS) += ds.o
-obj-$(CONFIG_X86_DS_SELFTEST) += ds_selftest.o
obj-$(CONFIG_X86_32) += tls.o
obj-$(CONFIG_IA32_EMULATION) += tls.o
obj-y += step.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index cd40aba6aa95..60cc4058ed5f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -63,7 +63,6 @@ EXPORT_SYMBOL(acpi_disabled);
int acpi_noirq; /* skip ACPI IRQ initialization */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled);
-int acpi_ht __initdata = 1; /* enable HT */
int acpi_lapic;
int acpi_ioapic;
@@ -94,6 +93,53 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
/*
+ * ISA irqs by default are the first 16 gsis but can be
+ * any gsi as specified by an interrupt source override.
+ */
+static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+};
+
+static unsigned int gsi_to_irq(unsigned int gsi)
+{
+ unsigned int irq = gsi + NR_IRQS_LEGACY;
+ unsigned int i;
+
+ for (i = 0; i < NR_IRQS_LEGACY; i++) {
+ if (isa_irq_to_gsi[i] == gsi) {
+ return i;
+ }
+ }
+
+ /* Provide an identity mapping of gsi == irq
+ * except on truly weird platforms that have
+ * non isa irqs in the first 16 gsis.
+ */
+ if (gsi >= NR_IRQS_LEGACY)
+ irq = gsi;
+ else
+ irq = gsi_end + 1 + gsi;
+
+ return irq;
+}
+
+static u32 irq_to_gsi(int irq)
+{
+ unsigned int gsi;
+
+ if (irq < NR_IRQS_LEGACY)
+ gsi = isa_irq_to_gsi[irq];
+ else if (irq <= gsi_end)
+ gsi = irq;
+ else if (irq <= (gsi_end + NR_IRQS_LEGACY))
+ gsi = irq - gsi_end;
+ else
+ gsi = 0xffffffff;
+
+ return gsi;
+}
+
+/*
* Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
* to map the target physical address. The problem is that set_fixmap()
* provides a single page, and it is possible that the page is not
@@ -313,7 +359,7 @@ acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
-static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, u32 gsi)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
@@ -333,7 +379,7 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
* If GSI is < 16, this will update its flags,
* else it will create a new mp_irqs[] entry.
*/
- mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
/*
* stash over-ride to indicate we've been here
@@ -357,9 +403,10 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
acpi_table_print_madt_entry(header);
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
- acpi_sci_ioapic_setup(intsrc->global_irq,
+ acpi_sci_ioapic_setup(intsrc->source_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
- (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
+ (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
+ intsrc->global_irq);
return 0;
}
@@ -448,7 +495,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
- *irq = gsi;
+ *irq = gsi_to_irq(gsi);
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
@@ -458,6 +505,14 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
return 0;
}
+int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
+{
+ if (isa_irq >= 16)
+ return -1;
+ *gsi = irq_to_gsi(isa_irq);
+ return 0;
+}
+
/*
* success: return IRQ number (>=0)
* failure: return < 0
@@ -482,7 +537,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
}
#endif
- irq = plat_gsi;
+ irq = gsi_to_irq(plat_gsi);
return irq;
}
@@ -867,29 +922,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
extern int es7000_plat;
#endif
-int __init acpi_probe_gsi(void)
-{
- int idx;
- int gsi;
- int max_gsi = 0;
-
- if (acpi_disabled)
- return 0;
-
- if (!acpi_ioapic)
- return 0;
-
- max_gsi = 0;
- for (idx = 0; idx < nr_ioapics; idx++) {
- gsi = mp_gsi_routing[idx].gsi_end;
-
- if (gsi > max_gsi)
- max_gsi = gsi;
- }
-
- return max_gsi + 1;
-}
-
static void assign_to_mp_irq(struct mpc_intsrc *m,
struct mpc_intsrc *mp_irq)
{
@@ -947,13 +979,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
mp_irq.dstirq = pin; /* INTIN# */
save_mp_irq(&mp_irq);
+
+ isa_irq_to_gsi[bus_irq] = gsi;
}
void __init mp_config_acpi_legacy_irqs(void)
{
int i;
- int ioapic;
- unsigned int dstapic;
struct mpc_intsrc mp_irq;
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
@@ -974,19 +1006,27 @@ void __init mp_config_acpi_legacy_irqs(void)
#endif
/*
- * Locate the IOAPIC that manages the ISA IRQs (0-15).
- */
- ioapic = mp_find_ioapic(0);
- if (ioapic < 0)
- return;
- dstapic = mp_ioapics[ioapic].apicid;
-
- /*
* Use the default configuration for the IRQs 0-15. Unless
* overridden by (MADT) interrupt source override entries.
*/
for (i = 0; i < 16; i++) {
+ int ioapic, pin;
+ unsigned int dstapic;
int idx;
+ u32 gsi;
+
+ /* Locate the gsi that irq i maps to. */
+ if (acpi_isa_irq_to_gsi(i, &gsi))
+ continue;
+
+ /*
+ * Locate the IOAPIC that manages the ISA IRQ.
+ */
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
+ continue;
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+ dstapic = mp_ioapics[ioapic].apicid;
for (idx = 0; idx < mp_irq_entries; idx++) {
struct mpc_intsrc *irq = mp_irqs + idx;
@@ -996,7 +1036,7 @@ void __init mp_config_acpi_legacy_irqs(void)
break;
/* Do we already have a mapping for this IOAPIC pin */
- if (irq->dstapic == dstapic && irq->dstirq == i)
+ if (irq->dstapic == dstapic && irq->dstirq == pin)
break;
}
@@ -1011,7 +1051,7 @@ void __init mp_config_acpi_legacy_irqs(void)
mp_irq.dstapic = dstapic;
mp_irq.irqtype = mp_INT;
mp_irq.srcbusirq = i; /* Identity mapped */
- mp_irq.dstirq = i;
+ mp_irq.dstirq = pin;
save_mp_irq(&mp_irq);
}
@@ -1076,11 +1116,6 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
-#ifdef CONFIG_X86_32
- if (ioapic_renumber_irq)
- gsi = ioapic_renumber_irq(ioapic, gsi);
-#endif
-
if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
printk(KERN_ERR "Invalid reference to IOAPIC pin "
"%d-%d\n", mp_ioapics[ioapic].apicid,
@@ -1094,7 +1129,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
- io_apic_set_pci_routing(dev, gsi, &irq_attr);
+ io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr);
return gsi;
}
@@ -1154,7 +1189,8 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
+ acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
+ acpi_gbl_FADT.sci_interrupt);
/* Fill in identity legacy mappings where no override */
mp_config_acpi_legacy_irqs();
@@ -1464,9 +1500,8 @@ void __init acpi_boot_table_init(void)
/*
* If acpi_disabled, bail out
- * One exception: acpi=ht continues far enough to enumerate LAPICs
*/
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return;
/*
@@ -1497,9 +1532,8 @@ int __init early_acpi_boot_init(void)
{
/*
* If acpi_disabled, bail out
- * One exception: acpi=ht continues far enough to enumerate LAPICs
*/
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return 1;
/*
@@ -1517,9 +1551,8 @@ int __init acpi_boot_init(void)
/*
* If acpi_disabled, bail out
- * One exception: acpi=ht continues far enough to enumerate LAPICs
*/
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return 1;
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
@@ -1554,21 +1587,12 @@ static int __init parse_acpi(char *arg)
/* acpi=force to over-ride black-list */
else if (strcmp(arg, "force") == 0) {
acpi_force = 1;
- acpi_ht = 1;
acpi_disabled = 0;
}
/* acpi=strict disables out-of-spec workarounds */
else if (strcmp(arg, "strict") == 0) {
acpi_strict = 1;
}
- /* Limit ACPI just to boot-time to enable HT */
- else if (strcmp(arg, "ht") == 0) {
- if (!acpi_force) {
- printk(KERN_WARNING "acpi=ht will be removed in Linux-2.6.35\n");
- disable_acpi();
- }
- acpi_ht = 1;
- }
/* acpi=rsdt use RSDT instead of XSDT */
else if (strcmp(arg, "rsdt") == 0) {
acpi_rsdt_forced = 1;
@@ -1576,6 +1600,10 @@ static int __init parse_acpi(char *arg)
/* "acpi=noirq" disables ACPI interrupt routing */
else if (strcmp(arg, "noirq") == 0) {
acpi_noirq_set();
+ }
+ /* "acpi=copy_dsdt" copys DSDT */
+ else if (strcmp(arg, "copy_dsdt") == 0) {
+ acpi_gbl_copy_dsdt_locally = 1;
} else {
/* Core will printk when we return error. */
return -EINVAL;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index f9961034e557..82e508677b91 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -162,8 +162,6 @@ static int __init acpi_sleep_setup(char *str)
#endif
if (strncmp(str, "old_ordering", 12) == 0)
acpi_old_suspend_ordering();
- if (strncmp(str, "sci_force_enable", 16) == 0)
- acpi_set_sci_en_on_resume();
str = strchr(str, ',');
if (str != NULL)
str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1a160d5d44d0..70237732a6c7 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
}
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-extern u8 *__smp_locks[], *__smp_locks_end[];
+extern s32 __smp_locks[], __smp_locks_end[];
static void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
@@ -235,37 +235,41 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
#ifdef CONFIG_SMP
-static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
+static void alternatives_smp_lock(const s32 *start, const s32 *end,
+ u8 *text, u8 *text_end)
{
- u8 **ptr;
+ const s32 *poff;
mutex_lock(&text_mutex);
- for (ptr = start; ptr < end; ptr++) {
- if (*ptr < text)
- continue;
- if (*ptr > text_end)
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
+ if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn DS segment override prefix into lock prefix */
- text_poke(*ptr, ((unsigned char []){0xf0}), 1);
+ if (*ptr == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
};
mutex_unlock(&text_mutex);
}
-static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
+static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+ u8 *text, u8 *text_end)
{
- u8 **ptr;
+ const s32 *poff;
if (noreplace_smp)
return;
mutex_lock(&text_mutex);
- for (ptr = start; ptr < end; ptr++) {
- if (*ptr < text)
- continue;
- if (*ptr > text_end)
+ for (poff = start; poff < end; poff++) {
+ u8 *ptr = (u8 *)poff + *poff;
+
+ if (!*poff || ptr < text || ptr >= text_end)
continue;
/* turn lock prefix into DS segment override prefix */
- text_poke(*ptr, ((unsigned char []){0x3E}), 1);
+ if (*ptr == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
};
mutex_unlock(&text_mutex);
}
@@ -276,8 +280,8 @@ struct smp_alt_module {
char *name;
/* ptrs to lock prefixes */
- u8 **locks;
- u8 **locks_end;
+ const s32 *locks;
+ const s32 *locks_end;
/* .text segment, needed to avoid patching init code ;) */
u8 *text;
@@ -398,16 +402,19 @@ void alternatives_smp_switch(int smp)
int alternatives_text_reserved(void *start, void *end)
{
struct smp_alt_module *mod;
- u8 **ptr;
+ const s32 *poff;
u8 *text_start = start;
u8 *text_end = end;
list_for_each_entry(mod, &smp_alt_modules, next) {
if (mod->text > text_end || mod->text_end < text_start)
continue;
- for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
- if (text_start <= *ptr && text_end >= *ptr)
+ for (poff = mod->locks; poff < mod->locks_end; poff++) {
+ const u8 *ptr = (const u8 *)poff + *poff;
+
+ if (text_start <= ptr && text_end > ptr)
return 1;
+ }
}
return 0;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index e5a4a1e01618..c02cc692985c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -51,6 +51,7 @@
#include <asm/smp.h>
#include <asm/mce.h>
#include <asm/kvm_para.h>
+#include <asm/tsc.h>
unsigned int num_processors;
@@ -1151,8 +1152,13 @@ static void __cpuinit lapic_setup_esr(void)
*/
void __cpuinit setup_local_APIC(void)
{
- unsigned int value;
- int i, j;
+ unsigned int value, queued;
+ int i, j, acked = 0;
+ unsigned long long tsc = 0, ntsc;
+ long long max_loops = cpu_khz;
+
+ if (cpu_has_tsc)
+ rdtscll(tsc);
if (disable_apic) {
arch_disable_smp_support();
@@ -1204,13 +1210,32 @@ void __cpuinit setup_local_APIC(void)
* the interrupt. Hence a vector might get locked. It was noticed
* for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
*/
- for (i = APIC_ISR_NR - 1; i >= 0; i--) {
- value = apic_read(APIC_ISR + i*0x10);
- for (j = 31; j >= 0; j--) {
- if (value & (1<<j))
- ack_APIC_irq();
+ do {
+ queued = 0;
+ for (i = APIC_ISR_NR - 1; i >= 0; i--)
+ queued |= apic_read(APIC_IRR + i*0x10);
+
+ for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+ value = apic_read(APIC_ISR + i*0x10);
+ for (j = 31; j >= 0; j--) {
+ if (value & (1<<j)) {
+ ack_APIC_irq();
+ acked++;
+ }
+ }
}
- }
+ if (acked > 256) {
+ printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
+ acked);
+ break;
+ }
+ if (cpu_has_tsc) {
+ rdtscll(ntsc);
+ max_loops = (cpu_khz << 10) - (ntsc - tsc);
+ } else
+ max_loops--;
+ } while (queued && max_loops > 0);
+ WARN_ON(max_loops <= 0);
/*
* Now that we are all set up, enable the APIC
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 03ba1b895f5e..425e53a87feb 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -131,24 +131,6 @@ int es7000_plat;
static unsigned int base;
-static int
-es7000_rename_gsi(int ioapic, int gsi)
-{
- if (es7000_plat == ES7000_ZORRO)
- return gsi;
-
- if (!base) {
- int i;
- for (i = 0; i < nr_ioapics; i++)
- base += nr_ioapic_registers[i];
- }
-
- if (!ioapic && (gsi < 16))
- gsi += base;
-
- return gsi;
-}
-
static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
{
unsigned long vect = 0, psaival = 0;
@@ -190,7 +172,6 @@ static void setup_unisys(void)
es7000_plat = ES7000_ZORRO;
else
es7000_plat = ES7000_CLASSIC;
- ioapic_renumber_irq = es7000_rename_gsi;
}
/*
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index eb2789c3f721..33f3563a2a52 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -89,6 +89,9 @@ int nr_ioapics;
/* IO APIC gsi routing info */
struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
+/* The last gsi number used */
+u32 gsi_end;
+
/* MP IRQ source entries */
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
@@ -1013,10 +1016,9 @@ static inline int irq_trigger(int idx)
return MPBIOS_trigger(idx);
}
-int (*ioapic_renumber_irq)(int ioapic, int irq);
static int pin_2_irq(int idx, int apic, int pin)
{
- int irq, i;
+ int irq;
int bus = mp_irqs[idx].srcbus;
/*
@@ -1028,18 +1030,12 @@ static int pin_2_irq(int idx, int apic, int pin)
if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].srcbusirq;
} else {
- /*
- * PCI IRQs are mapped in order
- */
- i = irq = 0;
- while (i < apic)
- irq += nr_ioapic_registers[i++];
- irq += pin;
- /*
- * For MPS mode, so far only needed by ES7000 platform
- */
- if (ioapic_renumber_irq)
- irq = ioapic_renumber_irq(apic, irq);
+ u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
+
+ if (gsi >= NR_IRQS_LEGACY)
+ irq = gsi;
+ else
+ irq = gsi_end + 1 + gsi;
}
#ifdef CONFIG_X86_32
@@ -1950,20 +1946,8 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
void __init enable_IO_APIC(void)
{
- union IO_APIC_reg_01 reg_01;
int i8259_apic, i8259_pin;
int apic;
- unsigned long flags;
-
- /*
- * The number of IO-APIC IRQ registers (== #pins):
- */
- for (apic = 0; apic < nr_ioapics; apic++) {
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- reg_01.raw = io_apic_read(apic, 1);
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- nr_ioapic_registers[apic] = reg_01.bits.entries+1;
- }
if (!legacy_pic->nr_legacy_irqs)
return;
@@ -3858,27 +3842,20 @@ int __init io_apic_get_redir_entries (int ioapic)
reg_01.raw = io_apic_read(ioapic, 1);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- return reg_01.bits.entries;
+ /* The register returns the maximum index redir index
+ * supported, which is one less than the total number of redir
+ * entries.
+ */
+ return reg_01.bits.entries + 1;
}
void __init probe_nr_irqs_gsi(void)
{
- int nr = 0;
+ int nr;
- nr = acpi_probe_gsi();
- if (nr > nr_irqs_gsi) {
+ nr = gsi_end + 1 + NR_IRQS_LEGACY;
+ if (nr > nr_irqs_gsi)
nr_irqs_gsi = nr;
- } else {
- /* for acpi=off or acpi is not compiled in */
- int idx;
-
- nr = 0;
- for (idx = 0; idx < nr_ioapics; idx++)
- nr += io_apic_get_redir_entries(idx) + 1;
-
- if (nr > nr_irqs_gsi)
- nr_irqs_gsi = nr;
- }
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
}
@@ -4085,22 +4062,27 @@ int __init io_apic_get_version(int ioapic)
return reg_01.bits.version;
}
-int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
{
- int i;
+ int ioapic, pin, idx;
if (skip_ioapic_setup)
return -1;
- for (i = 0; i < mp_irq_entries; i++)
- if (mp_irqs[i].irqtype == mp_INT &&
- mp_irqs[i].srcbusirq == bus_irq)
- break;
- if (i >= mp_irq_entries)
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0)
return -1;
- *trigger = irq_trigger(i);
- *polarity = irq_polarity(i);
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+ if (pin < 0)
+ return -1;
+
+ idx = find_irq_entry(ioapic, pin, mp_INT);
+ if (idx < 0)
+ return -1;
+
+ *trigger = irq_trigger(idx);
+ *polarity = irq_polarity(idx);
return 0;
}
@@ -4241,7 +4223,7 @@ void __init ioapic_insert_resources(void)
}
}
-int mp_find_ioapic(int gsi)
+int mp_find_ioapic(u32 gsi)
{
int i = 0;
@@ -4256,7 +4238,7 @@ int mp_find_ioapic(int gsi)
return -1;
}
-int mp_find_ioapic_pin(int ioapic, int gsi)
+int mp_find_ioapic_pin(int ioapic, u32 gsi)
{
if (WARN_ON(ioapic == -1))
return -1;
@@ -4284,6 +4266,7 @@ static int bad_ioapic(unsigned long address)
void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
{
int idx = 0;
+ int entries;
if (bad_ioapic(address))
return;
@@ -4302,9 +4285,17 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
+ entries = io_apic_get_redir_entries(idx);
mp_gsi_routing[idx].gsi_base = gsi_base;
- mp_gsi_routing[idx].gsi_end = gsi_base +
- io_apic_get_redir_entries(idx);
+ mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;
+
+ /*
+ * The number of IO-APIC IRQ registers (== #pins):
+ */
+ nr_ioapic_registers[idx] = entries;
+
+ if (mp_gsi_routing[idx].gsi_end > gsi_end)
+ gsi_end = mp_gsi_routing[idx].gsi_end;
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
"GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c085d52dbaf2..e46f98f36e31 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -735,9 +735,6 @@ void __init uv_system_init(void)
uv_node_to_blade[nid] = blade;
uv_cpu_to_blade[cpu] = blade;
max_pnode = max(pnode, max_pnode);
-
- printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
- cpu, apicid, pnode, nid, lcpu, blade);
}
/* Add blade/pnode info for nodes without cpus */
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 031aa887b0eb..c4f9182ca3ac 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1224,7 +1224,7 @@ static void reinit_timer(void)
#ifdef INIT_TIMER_AFTER_SUSPEND
unsigned long flags;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/* set the clock to HZ */
outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10);
@@ -1232,7 +1232,7 @@ static void reinit_timer(void)
udelay(10);
outb_pit(LATCH >> 8, PIT_CH0); /* MSB */
udelay(10);
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
#endif
}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index c202b62f3671..3a785da34b6f 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,7 +14,7 @@ CFLAGS_common.o := $(nostackp)
obj-y := intel_cacheinfo.o addon_cpuid_features.o
obj-y += proc.o capflags.o powerflags.o common.o
-obj-y += vmware.o hypervisor.o sched.o
+obj-y += vmware.o hypervisor.o sched.o mshyperv.o
obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
obj-$(CONFIG_X86_64) += bugs_64.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 97ad79cdf688..10fa5684a662 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -30,12 +30,14 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
- { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
- { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
- { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
- { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
+ { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
+ { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
+ { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006 },
+ { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 },
+ { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
+ { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
+ { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
+ { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
{ 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 01a265212395..c39576cb3018 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -86,7 +86,7 @@ static void __init check_fpu(void)
static void __init check_hlt(void)
{
- if (paravirt_enabled())
+ if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
return;
printk(KERN_INFO "Checking 'hlt' instruction... ");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4868e4a951ee..68e4a6f2211e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1084,6 +1084,20 @@ static void clear_all_debug_regs(void)
}
}
+#ifdef CONFIG_KGDB
+/*
+ * Restore debug regs if using kgdbwait and you have a kernel debugger
+ * connection established.
+ */
+static void dbg_restore_debug_regs(void)
+{
+ if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
+ arch_kgdb_ops.correct_hw_break();
+}
+#else /* ! CONFIG_KGDB */
+#define dbg_restore_debug_regs()
+#endif /* ! CONFIG_KGDB */
+
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
@@ -1107,9 +1121,9 @@ void __cpuinit cpu_init(void)
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
- if (cpu != 0 && percpu_read(node_number) == 0 &&
- cpu_to_node(cpu) != NUMA_NO_NODE)
- percpu_write(node_number, cpu_to_node(cpu));
+ if (cpu != 0 && percpu_read(numa_node) == 0 &&
+ early_cpu_to_node(cpu) != NUMA_NO_NODE)
+ set_numa_node(early_cpu_to_node(cpu));
#endif
me = current;
@@ -1174,18 +1188,8 @@ void __cpuinit cpu_init(void)
load_TR_desc();
load_LDT(&init_mm.context);
-#ifdef CONFIG_KGDB
- /*
- * If the kgdb is connected no debug regs should be altered. This
- * is only applicable when KGDB and a KGDB I/O module are built
- * into the kernel and you are using early debugging with
- * kgdbwait. KGDB will control the kernel HW breakpoint registers.
- */
- if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
- arch_kgdb_ops.correct_hw_break();
- else
-#endif
- clear_all_debug_regs();
+ clear_all_debug_regs();
+ dbg_restore_debug_regs();
fpu_init();
@@ -1239,14 +1243,12 @@ void __cpuinit cpu_init(void)
#endif
clear_all_debug_regs();
+ dbg_restore_debug_regs();
/*
* Force FPU initialization:
*/
- if (cpu_has_xsave)
- current_thread_info()->status = TS_XSAVE;
- else
- current_thread_info()->status = 0;
+ current_thread_info()->status = 0;
clear_used_math();
mxcsr_feature_mask_init();
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile
index 1840c0a5170b..bd54bf67e6fb 100644
--- a/arch/x86/kernel/cpu/cpufreq/Makefile
+++ b/arch/x86/kernel/cpu/cpufreq/Makefile
@@ -2,8 +2,8 @@
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
-obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
-obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
+obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 459168083b77..1d3cddaa40ee 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -46,6 +46,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
+#include "mperf.h"
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
"acpi-cpufreq", msg)
@@ -71,8 +72,6 @@ struct acpi_cpufreq_data {
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
-
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance *acpi_perf_data;
@@ -240,45 +239,6 @@ static u32 get_cur_val(const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_single(), on the target CPU */
-static void read_measured_perf_ctrs(void *_cur)
-{
- struct aperfmperf *am = _cur;
-
- get_aperfmperf(am);
-}
-
-/*
- * Return the measured active (C0) frequency on this CPU since last call
- * to this function.
- * Input: cpu number
- * Return: Average CPU frequency in terms of max frequency (zero on error)
- *
- * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
- * over a period of time, while CPU is in C0 state.
- * IA32_MPERF counts at the rate of max advertised frequency
- * IA32_APERF counts at the rate of actual CPU frequency
- * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
- * no meaning should be associated with absolute values of these MSRs.
- */
-static unsigned int get_measured_perf(struct cpufreq_policy *policy,
- unsigned int cpu)
-{
- struct aperfmperf perf;
- unsigned long ratio;
- unsigned int retval;
-
- if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
- return 0;
-
- ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
- per_cpu(acfreq_old_perf, cpu) = perf;
-
- retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
-
- return retval;
-}
-
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
@@ -702,7 +662,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* Check for APERF/MPERF support in hardware */
if (cpu_has(c, X86_FEATURE_APERFMPERF))
- acpi_cpufreq_driver.getavg = get_measured_perf;
+ acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
dprintk("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.c b/arch/x86/kernel/cpu/cpufreq/mperf.c
new file mode 100644
index 000000000000..911e193018ae
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpufreq/mperf.c
@@ -0,0 +1,51 @@
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+
+#include "mperf.h"
+
+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
+
+/* Called via smp_call_function_single(), on the target CPU */
+static void read_measured_perf_ctrs(void *_cur)
+{
+ struct aperfmperf *am = _cur;
+
+ get_aperfmperf(am);
+}
+
+/*
+ * Return the measured active (C0) frequency on this CPU since last call
+ * to this function.
+ * Input: cpu number
+ * Return: Average CPU frequency in terms of max frequency (zero on error)
+ *
+ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
+ * over a period of time, while CPU is in C0 state.
+ * IA32_MPERF counts at the rate of max advertised frequency
+ * IA32_APERF counts at the rate of actual CPU frequency
+ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
+ * no meaning should be associated with absolute values of these MSRs.
+ */
+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
+ unsigned int cpu)
+{
+ struct aperfmperf perf;
+ unsigned long ratio;
+ unsigned int retval;
+
+ if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
+ return 0;
+
+ ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
+ per_cpu(acfreq_old_perf, cpu) = perf;
+
+ retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cpufreq/mperf.h b/arch/x86/kernel/cpu/cpufreq/mperf.h
new file mode 100644
index 000000000000..5dbf2950dc22
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpufreq/mperf.h
@@ -0,0 +1,9 @@
+/*
+ * (c) 2010 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ */
+
+unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
+ unsigned int cpu);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index b6215b9798e2..7ec2123838e6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1,6 +1,5 @@
-
/*
- * (c) 2003-2006 Advanced Micro Devices, Inc.
+ * (c) 2003-2010 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
@@ -46,6 +45,7 @@
#define PFX "powernow-k8: "
#define VERSION "version 2.20.00"
#include "powernow-k8.h"
+#include "mperf.h"
/* serialize freq changes */
static DEFINE_MUTEX(fidvid_mutex);
@@ -54,6 +54,12 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON;
+/* core performance boost */
+static bool cpb_capable, cpb_enabled;
+static struct msr __percpu *msrs;
+
+static struct cpufreq_driver cpufreq_amd64_driver;
+
#ifndef CONFIG_SMP
static inline const struct cpumask *cpu_core_mask(int cpu)
{
@@ -1249,6 +1255,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
int rc;
+ struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
if (!cpu_online(pol->cpu))
return -ENODEV;
@@ -1323,6 +1330,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL;
}
+ /* Check for APERF/MPERF support in hardware */
+ if (cpu_has(c, X86_FEATURE_APERFMPERF))
+ cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
+
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
if (cpu_family == CPU_HW_PSTATE)
@@ -1394,8 +1405,77 @@ out:
return khz;
}
+static void _cpb_toggle_msrs(bool t)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ if (t)
+ reg->l &= ~BIT(25);
+ else
+ reg->l |= BIT(25);
+ }
+ wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ put_online_cpus();
+}
+
+/*
+ * Switch on/off core performance boosting.
+ *
+ * 0=disable
+ * 1=enable.
+ */
+static void cpb_toggle(bool t)
+{
+ if (!cpb_capable)
+ return;
+
+ if (t && !cpb_enabled) {
+ cpb_enabled = true;
+ _cpb_toggle_msrs(t);
+ printk(KERN_INFO PFX "Core Boosting enabled.\n");
+ } else if (!t && cpb_enabled) {
+ cpb_enabled = false;
+ _cpb_toggle_msrs(t);
+ printk(KERN_INFO PFX "Core Boosting disabled.\n");
+ }
+}
+
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ int ret = -EINVAL;
+ unsigned long val = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (!ret && (val == 0 || val == 1) && cpb_capable)
+ cpb_toggle(val);
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", cpb_enabled);
+}
+
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_one_rw(cpb);
+
static struct freq_attr *powernow_k8_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
+ &cpb,
NULL,
};
@@ -1411,10 +1491,51 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
+/*
+ * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
+ * cannot block the remaining ones from boosting. On the CPU_UP path we
+ * simply keep the boost-disable flag in sync with the current global
+ * state.
+ */
+static int cpb_notify(struct notifier_block *nb, unsigned long action,
+ void *hcpu)
+{
+ unsigned cpu = (long)hcpu;
+ u32 lo, hi;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+
+ if (!cpb_enabled) {
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ lo |= BIT(25);
+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+ }
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ lo &= ~BIT(25);
+ wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpb_nb = {
+ .notifier_call = cpb_notify,
+};
+
/* driver entry point for init */
static int __cpuinit powernowk8_init(void)
{
- unsigned int i, supported_cpus = 0;
+ unsigned int i, supported_cpus = 0, cpu;
for_each_online_cpu(i) {
int rc;
@@ -1423,15 +1544,36 @@ static int __cpuinit powernowk8_init(void)
supported_cpus++;
}
- if (supported_cpus == num_online_cpus()) {
- printk(KERN_INFO PFX "Found %d %s "
- "processors (%d cpu cores) (" VERSION ")\n",
- num_online_nodes(),
- boot_cpu_data.x86_model_id, supported_cpus);
- return cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (supported_cpus != num_online_cpus())
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
+
+ if (boot_cpu_has(X86_FEATURE_CPB)) {
+
+ cpb_capable = true;
+
+ register_cpu_notifier(&cpb_nb);
+
+ msrs = msrs_alloc();
+ if (!msrs) {
+ printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
+ return -ENOMEM;
+ }
+
+ rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ cpb_enabled |= !(!!(reg->l & BIT(25)));
+ }
+
+ printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
+ (cpb_enabled ? "on" : "off"));
}
- return -ENODEV;
+ return cpufreq_register_driver(&cpufreq_amd64_driver);
}
/* driver entry point for term */
@@ -1439,6 +1581,13 @@ static void __exit powernowk8_exit(void)
{
dprintk("exit\n");
+ if (boot_cpu_has(X86_FEATURE_CPB)) {
+ msrs_free(msrs);
+ msrs = NULL;
+
+ unregister_cpu_notifier(&cpb_nb);
+ }
+
cpufreq_unregister_driver(&cpufreq_amd64_driver);
}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 02ce824073cb..df3529b1c02d 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -5,7 +5,6 @@
* http://www.gnu.org/licenses/gpl.html
*/
-
enum pstate {
HW_PSTATE_INVALID = 0xff,
HW_PSTATE_0 = 0,
@@ -55,7 +54,6 @@ struct powernow_k8_data {
struct cpumask *available_cores;
};
-
/* processor's cpuid instruction support */
#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
#define CPUID_XFAM 0x0ff00000 /* extended family */
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 08be922de33a..dd531cc56a8f 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -21,37 +21,55 @@
*
*/
+#include <linux/module.h>
#include <asm/processor.h>
-#include <asm/vmware.h>
#include <asm/hypervisor.h>
-static inline void __cpuinit
-detect_hypervisor_vendor(struct cpuinfo_x86 *c)
+/*
+ * Hypervisor detect order. This is specified explicitly here because
+ * some hypervisors might implement compatibility modes for other
+ * hypervisors and therefore need to be detected in specific sequence.
+ */
+static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
- if (vmware_platform())
- c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
- else
- c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
-}
+ &x86_hyper_vmware,
+ &x86_hyper_ms_hyperv,
+};
-static inline void __cpuinit
-hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
+const struct hypervisor_x86 *x86_hyper;
+EXPORT_SYMBOL(x86_hyper);
+
+static inline void __init
+detect_hypervisor_vendor(void)
{
- if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE) {
- vmware_set_feature_bits(c);
- return;
+ const struct hypervisor_x86 *h, * const *p;
+
+ for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
+ h = *p;
+ if (h->detect()) {
+ x86_hyper = h;
+ printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
+ break;
+ }
}
}
void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
{
- detect_hypervisor_vendor(c);
- hypervisor_set_feature_bits(c);
+ if (x86_hyper && x86_hyper->set_cpu_features)
+ x86_hyper->set_cpu_features(c);
}
void __init init_hypervisor_platform(void)
{
+
+ detect_hypervisor_vendor();
+
+ if (!x86_hyper)
+ return;
+
init_hypervisor(&boot_cpu_data);
- if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
- vmware_platform_setup();
+
+ if (x86_hyper->init_platform)
+ x86_hyper->init_platform();
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1366c7cfd483..85f69cdeae10 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -12,7 +12,6 @@
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/msr.h>
-#include <asm/ds.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
@@ -373,12 +372,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
}
- if (c->cpuid_level > 6) {
- unsigned ecx = cpuid_ecx(6);
- if (ecx & 0x01)
- set_cpu_cap(c, X86_FEATURE_APERFMPERF);
- }
-
if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
if (cpu_has_ds) {
@@ -388,7 +381,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_BTS);
if (!(l1 & (1<<12)))
set_cpu_cap(c, X86_FEATURE_PEBS);
- ds_init_intel(c);
}
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index b3eeb66c0a51..33eae2062cf5 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -148,13 +148,19 @@ union _cpuid4_leaf_ecx {
u32 full;
};
+struct amd_l3_cache {
+ struct pci_dev *dev;
+ bool can_disable;
+ unsigned indices;
+ u8 subcaches[4];
+};
+
struct _cpuid4_info {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- bool can_disable;
- unsigned int l3_indices;
+ struct amd_l3_cache *l3;
DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
@@ -164,8 +170,7 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
- bool can_disable;
- unsigned int l3_indices;
+ struct amd_l3_cache *l3;
};
unsigned short num_cache_leaves;
@@ -302,87 +307,163 @@ struct _cache_attr {
};
#ifdef CONFIG_CPU_SUP_AMD
-static unsigned int __cpuinit amd_calc_l3_indices(void)
+
+/*
+ * L3 cache descriptors
+ */
+static struct amd_l3_cache **__cpuinitdata l3_caches;
+
+static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
{
- /*
- * We're called over smp_call_function_single() and therefore
- * are on the correct cpu.
- */
- int cpu = smp_processor_id();
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned int sc0, sc1, sc2, sc3;
u32 val = 0;
- pci_read_config_dword(dev, 0x1C4, &val);
+ pci_read_config_dword(l3->dev, 0x1C4, &val);
/* calculate subcache sizes */
- sc0 = !(val & BIT(0));
- sc1 = !(val & BIT(4));
- sc2 = !(val & BIT(8)) + !(val & BIT(9));
- sc3 = !(val & BIT(12)) + !(val & BIT(13));
+ l3->subcaches[0] = sc0 = !(val & BIT(0));
+ l3->subcaches[1] = sc1 = !(val & BIT(4));
+ l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
+ l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
- return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+ l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+}
+
+static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
+{
+ struct amd_l3_cache *l3;
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+
+ l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
+ if (!l3) {
+ printk(KERN_WARNING "Error allocating L3 struct\n");
+ return NULL;
+ }
+
+ l3->dev = dev;
+
+ amd_calc_l3_indices(l3);
+
+ return l3;
}
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
- if (index < 3)
+ int node;
+
+ if (boot_cpu_data.x86 != 0x10)
return;
- if (boot_cpu_data.x86 == 0x11)
+ if (index < 3)
return;
/* see errata #382 and #388 */
- if ((boot_cpu_data.x86 == 0x10) &&
- ((boot_cpu_data.x86_model < 0x8) ||
- (boot_cpu_data.x86_mask < 0x1)))
+ if (boot_cpu_data.x86_model < 0x8)
+ return;
+
+ if ((boot_cpu_data.x86_model == 0x8 ||
+ boot_cpu_data.x86_model == 0x9)
+ &&
+ boot_cpu_data.x86_mask < 0x1)
+ return;
+
+ /* not in virtualized environments */
+ if (num_k8_northbridges == 0)
return;
- this_leaf->can_disable = true;
- this_leaf->l3_indices = amd_calc_l3_indices();
+ /*
+ * Strictly speaking, the amount in @size below is leaked since it is
+ * never freed but this is done only on shutdown so it doesn't matter.
+ */
+ if (!l3_caches) {
+ int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
+
+ l3_caches = kzalloc(size, GFP_ATOMIC);
+ if (!l3_caches)
+ return;
+ }
+
+ node = amd_get_nb_id(smp_processor_id());
+
+ if (!l3_caches[node]) {
+ l3_caches[node] = amd_init_l3_cache(node);
+ l3_caches[node]->can_disable = true;
+ }
+
+ WARN_ON(!l3_caches[node]);
+
+ this_leaf->l3 = l3_caches[node];
}
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int index)
+ unsigned int slot)
{
- int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = amd_get_nb_id(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
+ struct pci_dev *dev = this_leaf->l3->dev;
unsigned int reg = 0;
- if (!this_leaf->can_disable)
+ if (!this_leaf->l3 || !this_leaf->l3->can_disable)
return -EINVAL;
if (!dev)
return -EINVAL;
- pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
+ pci_read_config_dword(dev, 0x1BC + slot * 4, &reg);
return sprintf(buf, "0x%08x\n", reg);
}
-#define SHOW_CACHE_DISABLE(index) \
+#define SHOW_CACHE_DISABLE(slot) \
static ssize_t \
-show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
+show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
{ \
- return show_cache_disable(this_leaf, buf, index); \
+ return show_cache_disable(this_leaf, buf, slot); \
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)
+static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
+ unsigned slot, unsigned long idx)
+{
+ int i;
+
+ idx |= BIT(30);
+
+ /*
+ * disable index in all 4 subcaches
+ */
+ for (i = 0; i < 4; i++) {
+ u32 reg = idx | (i << 20);
+
+ if (!l3->subcaches[i])
+ continue;
+
+ pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
+
+ /*
+ * We need to WBINVD on a core on the node containing the L3
+ * cache which indices we disable therefore a simple wbinvd()
+ * is not sufficient.
+ */
+ wbinvd_on_cpu(cpu);
+
+ reg |= BIT(31);
+ pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
+ }
+}
+
+
static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
- const char *buf, size_t count, unsigned int index)
+ const char *buf, size_t count,
+ unsigned int slot)
{
+ struct pci_dev *dev = this_leaf->l3->dev;
int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
- int node = amd_get_nb_id(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned long val = 0;
#define SUBCACHE_MASK (3UL << 20)
#define SUBCACHE_INDEX 0xfff
- if (!this_leaf->can_disable)
+ if (!this_leaf->l3 || !this_leaf->l3->can_disable)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
@@ -396,26 +477,20 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
/* do not allow writes outside of allowed bits */
if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
- ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
+ ((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
return -EINVAL;
- val |= BIT(30);
- pci_write_config_dword(dev, 0x1BC + index * 4, val);
- /*
- * We need to WBINVD on a core on the node containing the L3 cache which
- * indices we disable therefore a simple wbinvd() is not sufficient.
- */
- wbinvd_on_cpu(cpu);
- pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
+ amd_l3_disable_index(this_leaf->l3, cpu, slot, val);
+
return count;
}
-#define STORE_CACHE_DISABLE(index) \
+#define STORE_CACHE_DISABLE(slot) \
static ssize_t \
-store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
+store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
const char *buf, size_t count) \
{ \
- return store_cache_disable(this_leaf, buf, count, index); \
+ return store_cache_disable(this_leaf, buf, count, slot); \
}
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)
@@ -443,8 +518,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
amd_cpuid4(index, &eax, &ebx, &ecx);
- if (boot_cpu_data.x86 >= 0x10)
- amd_check_l3_disable(index, this_leaf);
+ amd_check_l3_disable(index, this_leaf);
} else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
}
@@ -701,6 +775,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);
+ kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
kfree(per_cpu(ici_cpuid4_info, cpu));
per_cpu(ici_cpuid4_info, cpu) = NULL;
}
@@ -985,7 +1060,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_leaf = CPUID4_INFO_IDX(cpu, i);
- if (this_leaf->can_disable)
+ if (this_leaf->l3 && this_leaf->l3->can_disable)
ktype_cache.default_attrs = default_l3_attrs;
else
ktype_cache.default_attrs = default_attrs;
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index 4ac6d48fe11b..bb34b03af252 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
+
+obj-$(CONFIG_ACPI_APEI) += mce-apei.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
new file mode 100644
index 000000000000..745b54f9be89
--- /dev/null
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -0,0 +1,138 @@
+/*
+ * Bridge between MCE and APEI
+ *
+ * On some machine, corrected memory errors are reported via APEI
+ * generic hardware error source (GHES) instead of corrected Machine
+ * Check. These corrected memory errors can be reported to user space
+ * through /dev/mcelog via faking a corrected Machine Check, so that
+ * the error memory page can be offlined by /sbin/mcelog if the error
+ * count for one page is beyond the threshold.
+ *
+ * For fatal MCE, save MCE record into persistent storage via ERST, so
+ * that the MCE record can be logged after reboot via ERST.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/cper.h>
+#include <acpi/apei.h>
+#include <asm/mce.h>
+
+#include "mce-internal.h"
+
+void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
+{
+ struct mce m;
+
+ /* Only corrected MC is reported */
+ if (!corrected)
+ return;
+
+ mce_setup(&m);
+ m.bank = 1;
+ /* Fake a memory read corrected error with unknown channel */
+ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+ m.addr = mem_err->physical_addr;
+ mce_log(&m);
+ mce_notify_irq();
+}
+EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
+
+#define CPER_CREATOR_MCE \
+ UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
+ 0x64, 0x90, 0xb8, 0x9d)
+#define CPER_SECTION_TYPE_MCE \
+ UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
+ 0x04, 0x4a, 0x38, 0xfc)
+
+/*
+ * CPER specification (in UEFI specification 2.3 appendix N) requires
+ * byte-packed.
+ */
+struct cper_mce_record {
+ struct cper_record_header hdr;
+ struct cper_section_descriptor sec_hdr;
+ struct mce mce;
+} __packed;
+
+int apei_write_mce(struct mce *m)
+{
+ struct cper_mce_record rcd;
+
+ memset(&rcd, 0, sizeof(rcd));
+ memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
+ rcd.hdr.revision = CPER_RECORD_REV;
+ rcd.hdr.signature_end = CPER_SIG_END;
+ rcd.hdr.section_count = 1;
+ rcd.hdr.error_severity = CPER_SER_FATAL;
+ /* timestamp, platform_id, partition_id are all invalid */
+ rcd.hdr.validation_bits = 0;
+ rcd.hdr.record_length = sizeof(rcd);
+ rcd.hdr.creator_id = CPER_CREATOR_MCE;
+ rcd.hdr.notification_type = CPER_NOTIFY_MCE;
+ rcd.hdr.record_id = cper_next_record_id();
+ rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
+
+ rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
+ rcd.sec_hdr.section_length = sizeof(rcd.mce);
+ rcd.sec_hdr.revision = CPER_SEC_REV;
+ /* fru_id and fru_text is invalid */
+ rcd.sec_hdr.validation_bits = 0;
+ rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
+ rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
+ rcd.sec_hdr.section_severity = CPER_SER_FATAL;
+
+ memcpy(&rcd.mce, m, sizeof(*m));
+
+ return erst_write(&rcd.hdr);
+}
+
+ssize_t apei_read_mce(struct mce *m, u64 *record_id)
+{
+ struct cper_mce_record rcd;
+ ssize_t len;
+
+ len = erst_read_next(&rcd.hdr, sizeof(rcd));
+ if (len <= 0)
+ return len;
+ /* Can not skip other records in storage via ERST unless clear them */
+ else if (len != sizeof(rcd) ||
+ uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) {
+ if (printk_ratelimit())
+ pr_warning(
+ "MCE-APEI: Can not skip the unknown record in ERST");
+ return -EIO;
+ }
+
+ memcpy(m, &rcd.mce, sizeof(*m));
+ *record_id = rcd.hdr.record_id;
+
+ return sizeof(*m);
+}
+
+/* Check whether there is record in ERST */
+int apei_check_mce(void)
+{
+ return erst_get_record_count();
+}
+
+int apei_clear_mce(u64 record_id)
+{
+ return erst_clear(record_id);
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 32996f9fab67..fefcc69ee8b5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -28,3 +28,26 @@ extern int mce_ser;
extern struct mce_bank *mce_banks;
+#ifdef CONFIG_ACPI_APEI
+int apei_write_mce(struct mce *m);
+ssize_t apei_read_mce(struct mce *m, u64 *record_id);
+int apei_check_mce(void);
+int apei_clear_mce(u64 record_id);
+#else
+static inline int apei_write_mce(struct mce *m)
+{
+ return -EINVAL;
+}
+static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
+{
+ return 0;
+}
+static inline int apei_check_mce(void)
+{
+ return 0;
+}
+static inline int apei_clear_mce(u64 record_id)
+{
+ return -EINVAL;
+}
+#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8a6f0afa767e..707165dbc203 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -264,7 +264,7 @@ static void wait_for_panic(void)
static void mce_panic(char *msg, struct mce *final, char *exp)
{
- int i;
+ int i, apei_err = 0;
if (!fake_panic) {
/*
@@ -287,8 +287,11 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
struct mce *m = &mcelog.entry[i];
if (!(m->status & MCI_STATUS_VAL))
continue;
- if (!(m->status & MCI_STATUS_UC))
+ if (!(m->status & MCI_STATUS_UC)) {
print_mce(m);
+ if (!apei_err)
+ apei_err = apei_write_mce(m);
+ }
}
/* Now print uncorrected but with the final one last */
for (i = 0; i < MCE_LOG_LEN; i++) {
@@ -297,11 +300,17 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
continue;
if (!(m->status & MCI_STATUS_UC))
continue;
- if (!final || memcmp(m, final, sizeof(struct mce)))
+ if (!final || memcmp(m, final, sizeof(struct mce))) {
print_mce(m);
+ if (!apei_err)
+ apei_err = apei_write_mce(m);
+ }
}
- if (final)
+ if (final) {
print_mce(final);
+ if (!apei_err)
+ apei_err = apei_write_mce(final);
+ }
if (cpu_missing)
printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
print_mce_tail();
@@ -539,7 +548,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
struct mce m;
int i;
- __get_cpu_var(mce_poll_count)++;
+ percpu_inc(mce_poll_count);
mce_setup(&m);
@@ -934,7 +943,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
atomic_inc(&mce_entry);
- __get_cpu_var(mce_exception_count)++;
+ percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
@@ -1493,6 +1502,43 @@ static void collect_tscs(void *data)
rdtscll(cpu_tsc[smp_processor_id()]);
}
+static int mce_apei_read_done;
+
+/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
+static int __mce_read_apei(char __user **ubuf, size_t usize)
+{
+ int rc;
+ u64 record_id;
+ struct mce m;
+
+ if (usize < sizeof(struct mce))
+ return -EINVAL;
+
+ rc = apei_read_mce(&m, &record_id);
+ /* Error or no more MCE record */
+ if (rc <= 0) {
+ mce_apei_read_done = 1;
+ return rc;
+ }
+ rc = -EFAULT;
+ if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
+ return rc;
+ /*
+ * In fact, we should have cleared the record after that has
+ * been flushed to the disk or sent to network in
+ * /sbin/mcelog, but we have no interface to support that now,
+ * so just clear it to avoid duplication.
+ */
+ rc = apei_clear_mce(record_id);
+ if (rc) {
+ mce_apei_read_done = 1;
+ return rc;
+ }
+ *ubuf += sizeof(struct mce);
+
+ return 0;
+}
+
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
loff_t *off)
{
@@ -1506,15 +1552,19 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
return -ENOMEM;
mutex_lock(&mce_read_mutex);
+
+ if (!mce_apei_read_done) {
+ err = __mce_read_apei(&buf, usize);
+ if (err || buf != ubuf)
+ goto out;
+ }
+
next = rcu_dereference_check_mce(mcelog.next);
/* Only supports full reads right now */
- if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
- mutex_unlock(&mce_read_mutex);
- kfree(cpu_tsc);
-
- return -EINVAL;
- }
+ err = -EINVAL;
+ if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
+ goto out;
err = 0;
prev = 0;
@@ -1562,10 +1612,15 @@ timeout:
memset(&mcelog.entry[i], 0, sizeof(struct mce));
}
}
+
+ if (err)
+ err = -EFAULT;
+
+out:
mutex_unlock(&mce_read_mutex);
kfree(cpu_tsc);
- return err ? -EFAULT : buf - ubuf;
+ return err ? err : buf - ubuf;
}
static unsigned int mce_poll(struct file *file, poll_table *wait)
@@ -1573,6 +1628,8 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
poll_wait(file, &mce_wait, wait);
if (rcu_dereference_check_mce(mcelog.next))
return POLLIN | POLLRDNORM;
+ if (!mce_apei_read_done && apei_check_mce())
+ return POLLIN | POLLRDNORM;
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 81c499eceb21..e1a0a3bf9716 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -190,7 +190,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
mutex_unlock(&therm_cpu_lock);
break;
}
- return err ? NOTIFY_BAD : NOTIFY_OK;
+ return notifier_from_errno(err);
}
static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
new file mode 100644
index 000000000000..16f41bbe46b6
--- /dev/null
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -0,0 +1,55 @@
+/*
+ * HyperV Detection code.
+ *
+ * Copyright (C) 2010, Novell, Inc.
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/processor.h>
+#include <asm/hypervisor.h>
+#include <asm/hyperv.h>
+#include <asm/mshyperv.h>
+
+struct ms_hyperv_info ms_hyperv;
+
+static bool __init ms_hyperv_platform(void)
+{
+ u32 eax;
+ u32 hyp_signature[3];
+
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return false;
+
+ cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
+ &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
+
+ return eax >= HYPERV_CPUID_MIN &&
+ eax <= HYPERV_CPUID_MAX &&
+ !memcmp("Microsoft Hv", hyp_signature, 12);
+}
+
+static void __init ms_hyperv_init_platform(void)
+{
+ /*
+ * Extract the features and hints
+ */
+ ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
+ ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
+
+ printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints);
+}
+
+const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+ .name = "Microsoft HyperV",
+ .detect = ms_hyperv_platform,
+ .init_platform = ms_hyperv_init_platform,
+};
+EXPORT_SYMBOL(x86_hyper_ms_hyperv);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index db5bdc8addf8..c77586061bcb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -31,46 +31,51 @@
#include <asm/nmi.h>
#include <asm/compat.h>
-static u64 perf_event_mask __read_mostly;
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val) \
+do { \
+ trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
+ (unsigned long)(val)); \
+ native_write_msr((msr), (u32)((u64)(val)), \
+ (u32)((u64)(val) >> 32)); \
+} while (0)
+#endif
-/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS 4
+/*
+ * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
+ */
+static unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long offset, addr = (unsigned long)from;
+ int type = in_nmi() ? KM_NMI : KM_IRQ0;
+ unsigned long size, len = 0;
+ struct page *page;
+ void *map;
+ int ret;
-/* The size of a BTS record in bytes: */
-#define BTS_RECORD_SIZE 24
+ do {
+ ret = __get_user_pages_fast(addr, 1, 0, &page);
+ if (!ret)
+ break;
-/* The size of a per-cpu BTS buffer in bytes: */
-#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
+ offset = addr & (PAGE_SIZE - 1);
+ size = min(PAGE_SIZE - offset, n - len);
-/* The BTS overflow threshold in bytes from the end of the buffer: */
-#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
+ map = kmap_atomic(page, type);
+ memcpy(to, map+offset, size);
+ kunmap_atomic(map, type);
+ put_page(page);
+ len += size;
+ to += size;
+ addr += size;
-/*
- * Bits in the debugctlmsr controlling branch tracing.
- */
-#define X86_DEBUGCTL_TR (1 << 6)
-#define X86_DEBUGCTL_BTS (1 << 7)
-#define X86_DEBUGCTL_BTINT (1 << 8)
-#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
-#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
+ } while (len < n);
-/*
- * A debug store configuration.
- *
- * We only support architectures that use 64bit fields.
- */
-struct debug_store {
- u64 bts_buffer_base;
- u64 bts_index;
- u64 bts_absolute_maximum;
- u64 bts_interrupt_threshold;
- u64 pebs_buffer_base;
- u64 pebs_index;
- u64 pebs_absolute_maximum;
- u64 pebs_interrupt_threshold;
- u64 pebs_event_reset[MAX_PEBS_EVENTS];
-};
+ return len;
+}
struct event_constraint {
union {
@@ -89,18 +94,41 @@ struct amd_nb {
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
+#define MAX_LBR_ENTRIES 16
+
struct cpu_hw_events {
+ /*
+ * Generic x86 PMC bits
+ */
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- unsigned long interrupts;
int enabled;
- struct debug_store *ds;
int n_events;
int n_added;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+
+ unsigned int group_flag;
+
+ /*
+ * Intel DebugStore bits
+ */
+ struct debug_store *ds;
+ u64 pebs_enabled;
+
+ /*
+ * Intel LBR bits
+ */
+ int lbr_users;
+ void *lbr_context;
+ struct perf_branch_stack lbr_stack;
+ struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
+
+ /*
+ * AMD specific bits
+ */
struct amd_nb *amd_nb;
};
@@ -114,44 +142,75 @@ struct cpu_hw_events {
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
+/*
+ * Constraint on the Event code.
+ */
#define INTEL_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
+ EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
+/*
+ * Constraint on the Event code + UMask + fixed-mask
+ *
+ * filter mask to validate fixed counter events.
+ * the following filters disqualify for fixed counters:
+ * - inv
+ * - edge
+ * - cnt-mask
+ * The other filters are supported by fixed counters.
+ * The any-thread option is supported starting with v3.
+ */
#define FIXED_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
+ EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
+
+/*
+ * Constraint on the Event code + UMask
+ */
+#define PEBS_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->cmask; (e)++)
+ for ((e) = (c); (e)->weight; (e)++)
+
+union perf_capabilities {
+ struct {
+ u64 lbr_format : 6;
+ u64 pebs_trap : 1;
+ u64 pebs_arch_reg : 1;
+ u64 pebs_format : 4;
+ u64 smm_freeze : 1;
+ };
+ u64 capabilities;
+};
/*
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
+ /*
+ * Generic x86 PMC bits
+ */
const char *name;
int version;
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
- void (*enable_all)(void);
+ void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
+ int (*hw_config)(struct perf_event *event);
+ int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
- u64 (*raw_event)(u64);
int max_events;
- int num_events;
- int num_events_fixed;
- int event_bits;
- u64 event_mask;
+ int num_counters;
+ int num_counters_fixed;
+ int cntval_bits;
+ u64 cntval_mask;
int apic;
u64 max_period;
- u64 intel_ctrl;
- void (*enable_bts)(u64 config);
- void (*disable_bts)(void);
-
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
@@ -159,11 +218,32 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
+ void (*quirks)(void);
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
+
+ /*
+ * Intel Arch Perfmon v2+
+ */
+ u64 intel_ctrl;
+ union perf_capabilities intel_cap;
+
+ /*
+ * Intel DebugStore bits
+ */
+ int bts, pebs;
+ int pebs_record_size;
+ void (*drain_pebs)(struct pt_regs *regs);
+ struct event_constraint *pebs_constraints;
+
+ /*
+ * Intel LBR
+ */
+ unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
+ int lbr_nr; /* hardware stack size */
};
static struct x86_pmu x86_pmu __read_mostly;
@@ -198,7 +278,7 @@ static u64
x86_perf_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- int shift = 64 - x86_pmu.event_bits;
+ int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
@@ -241,33 +321,32 @@ again:
static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
+#ifdef CONFIG_X86_LOCAL_APIC
+
static bool reserve_pmc_hardware(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
int i;
if (nmi_watchdog == NMI_LOCAL_APIC)
disable_lapic_nmi_watchdog();
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
-#endif
return true;
-#ifdef CONFIG_X86_LOCAL_APIC
eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i);
- i = x86_pmu.num_events;
+ i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
@@ -277,128 +356,36 @@ perfctr_fail:
enable_lapic_nmi_watchdog();
return false;
-#endif
}
static void release_pmc_hardware(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
int i;
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu.perfctr + i);
release_evntsel_nmi(x86_pmu.eventsel + i);
}
if (nmi_watchdog == NMI_LOCAL_APIC)
enable_lapic_nmi_watchdog();
-#endif
-}
-
-static inline bool bts_available(void)
-{
- return x86_pmu.enable_bts != NULL;
-}
-
-static void init_debug_store_on_cpu(int cpu)
-{
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- return;
-
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
- (u32)((u64)(unsigned long)ds),
- (u32)((u64)(unsigned long)ds >> 32));
-}
-
-static void fini_debug_store_on_cpu(int cpu)
-{
- if (!per_cpu(cpu_hw_events, cpu).ds)
- return;
-
- wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
-}
-
-static void release_bts_hardware(void)
-{
- int cpu;
-
- if (!bts_available())
- return;
-
- get_online_cpus();
-
- for_each_online_cpu(cpu)
- fini_debug_store_on_cpu(cpu);
-
- for_each_possible_cpu(cpu) {
- struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
- if (!ds)
- continue;
-
- per_cpu(cpu_hw_events, cpu).ds = NULL;
-
- kfree((void *)(unsigned long)ds->bts_buffer_base);
- kfree(ds);
- }
-
- put_online_cpus();
}
-static int reserve_bts_hardware(void)
-{
- int cpu, err = 0;
-
- if (!bts_available())
- return 0;
-
- get_online_cpus();
-
- for_each_possible_cpu(cpu) {
- struct debug_store *ds;
- void *buffer;
-
- err = -ENOMEM;
- buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
- if (unlikely(!buffer))
- break;
-
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (unlikely(!ds)) {
- kfree(buffer);
- break;
- }
-
- ds->bts_buffer_base = (u64)(unsigned long)buffer;
- ds->bts_index = ds->bts_buffer_base;
- ds->bts_absolute_maximum =
- ds->bts_buffer_base + BTS_BUFFER_SIZE;
- ds->bts_interrupt_threshold =
- ds->bts_absolute_maximum - BTS_OVFL_TH;
-
- per_cpu(cpu_hw_events, cpu).ds = ds;
- err = 0;
- }
+#else
- if (err)
- release_bts_hardware();
- else {
- for_each_online_cpu(cpu)
- init_debug_store_on_cpu(cpu);
- }
+static bool reserve_pmc_hardware(void) { return true; }
+static void release_pmc_hardware(void) {}
- put_online_cpus();
+#endif
- return err;
-}
+static int reserve_ds_buffers(void);
+static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
- release_bts_hardware();
+ release_ds_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -441,54 +428,11 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
return 0;
}
-/*
- * Setup the hardware configuration for a given attr_type
- */
-static int __hw_perf_event_init(struct perf_event *event)
+static int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
u64 config;
- int err;
-
- if (!x86_pmu_initialized())
- return -ENODEV;
-
- err = 0;
- if (!atomic_inc_not_zero(&active_events)) {
- mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&active_events) == 0) {
- if (!reserve_pmc_hardware())
- err = -EBUSY;
- else
- err = reserve_bts_hardware();
- }
- if (!err)
- atomic_inc(&active_events);
- mutex_unlock(&pmc_reserve_mutex);
- }
- if (err)
- return err;
-
- event->destroy = hw_perf_event_destroy;
-
- /*
- * Generate PMC IRQs:
- * (keep 'enabled' bit clear for now)
- */
- hwc->config = ARCH_PERFMON_EVENTSEL_INT;
-
- hwc->idx = -1;
- hwc->last_cpu = -1;
- hwc->last_tag = ~0ULL;
-
- /*
- * Count user and OS events unless requested not to.
- */
- if (!attr->exclude_user)
- hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
- if (!attr->exclude_kernel)
- hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
if (!hwc->sample_period) {
hwc->sample_period = x86_pmu.max_period;
@@ -505,16 +449,8 @@ static int __hw_perf_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- /*
- * Raw hw_event type provide the config in the hw_event structure
- */
- if (attr->type == PERF_TYPE_RAW) {
- hwc->config |= x86_pmu.raw_event(attr->config);
- if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
- perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
- return -EACCES;
+ if (attr->type == PERF_TYPE_RAW)
return 0;
- }
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, attr);
@@ -539,11 +475,11 @@ static int __hw_perf_event_init(struct perf_event *event)
if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
(hwc->sample_period == 1)) {
/* BTS is not supported by this architecture. */
- if (!bts_available())
+ if (!x86_pmu.bts)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
- if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+ if (!attr->exclude_kernel)
return -EOPNOTSUPP;
}
@@ -552,12 +488,87 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}
+static int x86_pmu_hw_config(struct perf_event *event)
+{
+ if (event->attr.precise_ip) {
+ int precise = 0;
+
+ /* Support for constant skid */
+ if (x86_pmu.pebs)
+ precise++;
+
+ /* Support for IP fixup */
+ if (x86_pmu.lbr_nr)
+ precise++;
+
+ if (event->attr.precise_ip > precise)
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Generate PMC IRQs:
+ * (keep 'enabled' bit clear for now)
+ */
+ event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
+
+ /*
+ * Count user and OS events unless requested not to
+ */
+ if (!event->attr.exclude_user)
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
+ if (!event->attr.exclude_kernel)
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
+
+ if (event->attr.type == PERF_TYPE_RAW)
+ event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+
+ return x86_setup_perfctr(event);
+}
+
+/*
+ * Setup the hardware configuration for a given attr_type
+ */
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ int err;
+
+ if (!x86_pmu_initialized())
+ return -ENODEV;
+
+ err = 0;
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&active_events) == 0) {
+ if (!reserve_pmc_hardware())
+ err = -EBUSY;
+ else {
+ err = reserve_ds_buffers();
+ if (err)
+ release_pmc_hardware();
+ }
+ }
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+ if (err)
+ return err;
+
+ event->destroy = hw_perf_event_destroy;
+
+ event->hw.idx = -1;
+ event->hw.last_cpu = -1;
+ event->hw.last_tag = ~0ULL;
+
+ return x86_pmu.hw_config(event);
+}
+
static void x86_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
@@ -587,12 +598,12 @@ void hw_perf_disable(void)
x86_pmu.disable_all();
}
-static void x86_pmu_enable_all(void)
+static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
u64 val;
@@ -667,14 +678,14 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* assign events to counters starting with most
* constrained events.
*/
- wmax = x86_pmu.num_events;
+ wmax = x86_pmu.num_counters;
/*
* when fixed event counters are present,
* wmax is incremented by 1 to account
* for one more choice
*/
- if (x86_pmu.num_events_fixed)
+ if (x86_pmu.num_counters_fixed)
wmax++;
for (w = 1, num = n; num && w <= wmax; w++) {
@@ -724,7 +735,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
struct perf_event *event;
int n, max_count;
- max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
+ max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
/* current number of events already accepted */
n = cpuc->n_events;
@@ -795,7 +806,7 @@ void hw_perf_enable(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
- int i;
+ int i, added = cpuc->n_added;
if (!x86_pmu_initialized())
return;
@@ -847,19 +858,20 @@ void hw_perf_enable(void)
cpuc->enabled = 1;
barrier();
- x86_pmu.enable_all();
+ x86_pmu.enable_all(added);
}
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+ u64 enable_mask)
{
- (void)checking_wrmsrl(hwc->config_base + hwc->idx,
- hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
+ wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
}
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
+
+ wrmsrl(hwc->config_base + hwc->idx, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -874,7 +886,7 @@ x86_perf_event_set_period(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
- int err, ret = 0, idx = hwc->idx;
+ int ret = 0, idx = hwc->idx;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
@@ -912,8 +924,8 @@ x86_perf_event_set_period(struct perf_event *event)
*/
atomic64_set(&hwc->prev_count, (u64)-left);
- err = checking_wrmsrl(hwc->event_base + idx,
- (u64)(-left) & x86_pmu.event_mask);
+ wrmsrl(hwc->event_base + idx,
+ (u64)(-left) & x86_pmu.cntval_mask);
perf_event_update_userpage(event);
@@ -924,7 +936,8 @@ static void x86_pmu_enable_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
- __x86_pmu_enable_event(&event->hw);
+ __x86_pmu_enable_event(&event->hw,
+ ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
@@ -950,7 +963,15 @@ static int x86_pmu_enable(struct perf_event *event)
if (n < 0)
return n;
- ret = x86_schedule_events(cpuc, n, assign);
+ /*
+ * If group events scheduling transaction was started,
+ * skip the schedulability test here, it will be peformed
+ * at commit time(->commit_txn) as a whole
+ */
+ if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
+ goto out;
+
+ ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
return ret;
/*
@@ -959,6 +980,7 @@ static int x86_pmu_enable(struct perf_event *event)
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
+out:
cpuc->n_events = n;
cpuc->n_added += n - n0;
@@ -991,11 +1013,12 @@ static void x86_pmu_unthrottle(struct perf_event *event)
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
+ u64 pebs;
struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
- if (!x86_pmu.num_events)
+ if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
@@ -1008,16 +1031,18 @@ void perf_event_print_debug(void)
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
+ rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
+ pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
- pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
+ pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
@@ -1030,7 +1055,7 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
- for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1095,7 +1120,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -1103,7 +1128,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
hwc = &event->hw;
val = x86_perf_event_update(event);
- if (val & (1ULL << (x86_pmu.event_bits - 1)))
+ if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;
/*
@@ -1146,7 +1171,6 @@ void set_perf_event_pending(void)
void perf_events_lapic_init(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
if (!x86_pmu.apic || !x86_pmu_initialized())
return;
@@ -1154,7 +1178,6 @@ void perf_events_lapic_init(void)
* Always use NMI for PMU
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
-#endif
}
static int __kprobes
@@ -1178,9 +1201,7 @@ perf_event_nmi_handler(struct notifier_block *self,
regs = args->regs;
-#ifdef CONFIG_X86_LOCAL_APIC
apic_write(APIC_LVTPC, APIC_DM_NMI);
-#endif
/*
* Can't rely on the handled return value to say it was our NMI, two
* events could trigger 'simultaneously' raising two back-to-back NMIs.
@@ -1217,118 +1238,11 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
return &unconstrained;
}
-static int x86_event_sched_in(struct perf_event *event,
- struct perf_cpu_context *cpuctx)
-{
- int ret = 0;
-
- event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = smp_processor_id();
- event->tstamp_running += event->ctx->time - event->tstamp_stopped;
-
- if (!is_x86_event(event))
- ret = event->pmu->enable(event);
-
- if (!ret && !is_software_event(event))
- cpuctx->active_oncpu++;
-
- if (!ret && event->attr.exclusive)
- cpuctx->exclusive = 1;
-
- return ret;
-}
-
-static void x86_event_sched_out(struct perf_event *event,
- struct perf_cpu_context *cpuctx)
-{
- event->state = PERF_EVENT_STATE_INACTIVE;
- event->oncpu = -1;
-
- if (!is_x86_event(event))
- event->pmu->disable(event);
-
- event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
-
- if (!is_software_event(event))
- cpuctx->active_oncpu--;
-
- if (event->attr.exclusive || !cpuctx->active_oncpu)
- cpuctx->exclusive = 0;
-}
-
-/*
- * Called to enable a whole group of events.
- * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
- * Assumes the caller has disabled interrupts and has
- * frozen the PMU with hw_perf_save_disable.
- *
- * called with PMU disabled. If successful and return value 1,
- * then guaranteed to call perf_enable() and hw_perf_enable()
- */
-int hw_perf_group_sched_in(struct perf_event *leader,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct perf_event *sub;
- int assign[X86_PMC_IDX_MAX];
- int n0, n1, ret;
-
- /* n0 = total number of events */
- n0 = collect_events(cpuc, leader, true);
- if (n0 < 0)
- return n0;
-
- ret = x86_schedule_events(cpuc, n0, assign);
- if (ret)
- return ret;
-
- ret = x86_event_sched_in(leader, cpuctx);
- if (ret)
- return ret;
-
- n1 = 1;
- list_for_each_entry(sub, &leader->sibling_list, group_entry) {
- if (sub->state > PERF_EVENT_STATE_OFF) {
- ret = x86_event_sched_in(sub, cpuctx);
- if (ret)
- goto undo;
- ++n1;
- }
- }
- /*
- * copy new assignment, now we know it is possible
- * will be used by hw_perf_enable()
- */
- memcpy(cpuc->assign, assign, n0*sizeof(int));
-
- cpuc->n_events = n0;
- cpuc->n_added += n1;
- ctx->nr_active += n1;
-
- /*
- * 1 means successful and events are active
- * This is not quite true because we defer
- * actual activation until hw_perf_enable() but
- * this way we* ensure caller won't try to enable
- * individual events
- */
- return 1;
-undo:
- x86_event_sched_out(leader, cpuctx);
- n0 = 1;
- list_for_each_entry(sub, &leader->sibling_list, group_entry) {
- if (sub->state == PERF_EVENT_STATE_ACTIVE) {
- x86_event_sched_out(sub, cpuctx);
- if (++n0 == n1)
- break;
- }
- }
- return ret;
-}
-
#include "perf_event_amd.c"
#include "perf_event_p6.c"
+#include "perf_event_p4.c"
+#include "perf_event_intel_lbr.c"
+#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
static int __cpuinit
@@ -1402,48 +1316,50 @@ void __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
- if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
+ if (x86_pmu.quirks)
+ x86_pmu.quirks();
+
+ if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
- x86_pmu.num_events, X86_PMC_MAX_GENERIC);
- x86_pmu.num_events = X86_PMC_MAX_GENERIC;
+ x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
- perf_event_mask = (1 << x86_pmu.num_events) - 1;
- perf_max_events = x86_pmu.num_events;
+ x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+ perf_max_events = x86_pmu.num_counters;
- if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
+ if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
- x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
- x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
+ x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
- perf_event_mask |=
- ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
- x86_pmu.intel_ctrl = perf_event_mask;
+ x86_pmu.intel_ctrl |=
+ ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
unconstrained = (struct event_constraint)
- __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
- 0, x86_pmu.num_events);
+ __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
+ 0, x86_pmu.num_counters);
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if (c->cmask != INTEL_ARCH_FIXED_MASK)
+ if (c->cmask != X86_RAW_EVENT_MASK)
continue;
- c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
- c->weight += x86_pmu.num_events;
+ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ c->weight += x86_pmu.num_counters;
}
}
pr_info("... version: %d\n", x86_pmu.version);
- pr_info("... bit width: %d\n", x86_pmu.event_bits);
- pr_info("... generic registers: %d\n", x86_pmu.num_events);
- pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
+ pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
+ pr_info("... generic registers: %d\n", x86_pmu.num_counters);
+ pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
- pr_info("... event mask: %016Lx\n", perf_event_mask);
+ pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
+ pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
perf_cpu_notifier(x86_pmu_notifier);
}
@@ -1453,6 +1369,59 @@ static inline void x86_pmu_read(struct perf_event *event)
x86_perf_event_update(event);
}
+/*
+ * Start group events scheduling transaction
+ * Set the flag to make pmu::enable() not perform the
+ * schedulability test, it will be performed at commit time
+ */
+static void x86_pmu_start_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ cpuc->group_flag |= PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Stop group events scheduling transaction
+ * Clear the flag and pmu::enable() will perform the
+ * schedulability test.
+ */
+static void x86_pmu_cancel_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Commit group events scheduling transaction
+ * Perform the group schedulability test as a whole
+ * Return 0 if success
+ */
+static int x86_pmu_commit_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int assign[X86_PMC_IDX_MAX];
+ int n, ret;
+
+ n = cpuc->n_events;
+
+ if (!x86_pmu_initialized())
+ return -EAGAIN;
+
+ ret = x86_pmu.schedule_events(cpuc, n, assign);
+ if (ret)
+ return ret;
+
+ /*
+ * copy new assignment, now we know it is possible
+ * will be used by hw_perf_enable()
+ */
+ memcpy(cpuc->assign, assign, n*sizeof(int));
+
+ return 0;
+}
+
static const struct pmu pmu = {
.enable = x86_pmu_enable,
.disable = x86_pmu_disable,
@@ -1460,9 +1429,38 @@ static const struct pmu pmu = {
.stop = x86_pmu_stop,
.read = x86_pmu_read,
.unthrottle = x86_pmu_unthrottle,
+ .start_txn = x86_pmu_start_txn,
+ .cancel_txn = x86_pmu_cancel_txn,
+ .commit_txn = x86_pmu_commit_txn,
};
/*
+ * validate that we can schedule this event
+ */
+static int validate_event(struct perf_event *event)
+{
+ struct cpu_hw_events *fake_cpuc;
+ struct event_constraint *c;
+ int ret = 0;
+
+ fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
+ if (!fake_cpuc)
+ return -ENOMEM;
+
+ c = x86_pmu.get_event_constraints(fake_cpuc, event);
+
+ if (!c || !c->weight)
+ ret = -ENOSPC;
+
+ if (x86_pmu.put_event_constraints)
+ x86_pmu.put_event_constraints(fake_cpuc, event);
+
+ kfree(fake_cpuc);
+
+ return ret;
+}
+
+/*
* validate a single event group
*
* validation include:
@@ -1502,7 +1500,7 @@ static int validate_group(struct perf_event *event)
fake_cpuc->n_events = n;
- ret = x86_schedule_events(fake_cpuc, n, NULL);
+ ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out_free:
kfree(fake_cpuc);
@@ -1527,6 +1525,8 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
if (event->group_leader != event)
err = validate_group(event);
+ else
+ err = validate_event(event);
event->pmu = tmp;
}
@@ -1574,8 +1574,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
- if (reliable)
- callchain_store(entry, addr);
+ callchain_store(entry, addr);
}
static const struct stacktrace_ops backtrace_ops = {
@@ -1597,41 +1596,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
}
-/*
- * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
- */
-static unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
-{
- unsigned long offset, addr = (unsigned long)from;
- int type = in_nmi() ? KM_NMI : KM_IRQ0;
- unsigned long size, len = 0;
- struct page *page;
- void *map;
- int ret;
-
- do {
- ret = __get_user_pages_fast(addr, 1, 0, &page);
- if (!ret)
- break;
-
- offset = addr & (PAGE_SIZE - 1);
- size = min(PAGE_SIZE - offset, n - len);
-
- map = kmap_atomic(page, type);
- memcpy(to, map+offset, size);
- kunmap_atomic(map, type);
- put_page(page);
-
- len += size;
- to += size;
- addr += size;
-
- } while (len < n);
-
- return len;
-}
-
#ifdef CONFIG_COMPAT
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1727,6 +1691,11 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
struct perf_callchain_entry *entry;
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* TODO: We don't support guest os callchain now */
+ return NULL;
+ }
+
if (in_nmi())
entry = &__get_cpu_var(pmc_nmi_entry);
else
@@ -1748,5 +1717,43 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
*/
regs->bp = rewind_frame_pointer(skip + 1);
regs->cs = __KERNEL_CS;
- local_save_flags(regs->flags);
+ /*
+ * We abuse bit 3 to pass exact information, see perf_misc_flags
+ * and the comment with PERF_EFLAGS_EXACT.
+ */
+ regs->flags = 0;
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ unsigned long ip;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ ip = perf_guest_cbs->get_guest_ip();
+ else
+ ip = instruction_pointer(regs);
+
+ return ip;
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ int misc = 0;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ if (regs->flags & PERF_EFLAGS_EXACT)
+ misc |= PERF_RECORD_MISC_EXACT_IP;
+
+ return misc;
}
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index db6f7d4056e1..611df11ba15e 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -2,7 +2,7 @@
static DEFINE_RAW_SPINLOCK(amd_nb_lock);
-static __initconst u64 amd_hw_cache_event_ids
+static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -111,22 +111,19 @@ static u64 amd_pmu_event_map(int hw_event)
return amd_perfmon_event_map[hw_event];
}
-static u64 amd_pmu_raw_event(u64 hw_event)
+static int amd_pmu_hw_config(struct perf_event *event)
{
-#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
-#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
-#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
-#define K7_EVNTSEL_INV_MASK 0x000800000ULL
-#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
-
-#define K7_EVNTSEL_MASK \
- (K7_EVNTSEL_EVENT_MASK | \
- K7_EVNTSEL_UNIT_MASK | \
- K7_EVNTSEL_EDGE_MASK | \
- K7_EVNTSEL_INV_MASK | \
- K7_EVNTSEL_REG_MASK)
-
- return hw_event & K7_EVNTSEL_MASK;
+ int ret = x86_pmu_hw_config(event);
+
+ if (ret)
+ return ret;
+
+ if (event->attr.type != PERF_TYPE_RAW)
+ return 0;
+
+ event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+
+ return 0;
}
/*
@@ -165,7 +162,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
if (nb->owners[i] == event) {
cmpxchg(nb->owners+i, event, NULL);
break;
@@ -215,7 +212,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
struct amd_nb *nb = cpuc->amd_nb;
struct perf_event *old = NULL;
- int max = x86_pmu.num_events;
+ int max = x86_pmu.num_counters;
int i, j, k = -1;
/*
@@ -293,7 +290,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
/*
* initialize all possible NB constraints
*/
- for (i = 0; i < x86_pmu.num_events; i++) {
+ for (i = 0; i < x86_pmu.num_counters; i++) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
@@ -371,21 +368,22 @@ static void amd_pmu_cpu_dead(int cpu)
raw_spin_unlock(&amd_nb_lock);
}
-static __initconst struct x86_pmu amd_pmu = {
+static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
+ .hw_config = amd_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
- .raw_event = amd_pmu_raw_event,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_events = 4,
- .event_bits = 48,
- .event_mask = (1ULL << 48) - 1,
+ .num_counters = 4,
+ .cntval_bits = 48,
+ .cntval_mask = (1ULL << 48) - 1,
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 9c794ac87837..fdbc652d3feb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -88,7 +88,7 @@ static u64 intel_pmu_event_map(int hw_event)
return intel_perfmon_event_map[hw_event];
}
-static __initconst u64 westmere_hw_cache_event_ids
+static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -179,7 +179,7 @@ static __initconst u64 westmere_hw_cache_event_ids
},
};
-static __initconst u64 nehalem_hw_cache_event_ids
+static __initconst const u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -270,7 +270,7 @@ static __initconst u64 nehalem_hw_cache_event_ids
},
};
-static __initconst u64 core2_hw_cache_event_ids
+static __initconst const u64 core2_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -361,7 +361,7 @@ static __initconst u64 core2_hw_cache_event_ids
},
};
-static __initconst u64 atom_hw_cache_event_ids
+static __initconst const u64 atom_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -452,60 +452,6 @@ static __initconst u64 atom_hw_cache_event_ids
},
};
-static u64 intel_pmu_raw_event(u64 hw_event)
-{
-#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
-#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
-#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
-#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
-#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
-
-#define CORE_EVNTSEL_MASK \
- (INTEL_ARCH_EVTSEL_MASK | \
- INTEL_ARCH_UNIT_MASK | \
- INTEL_ARCH_EDGE_MASK | \
- INTEL_ARCH_INV_MASK | \
- INTEL_ARCH_CNT_MASK)
-
- return hw_event & CORE_EVNTSEL_MASK;
-}
-
-static void intel_pmu_enable_bts(u64 config)
-{
- unsigned long debugctlmsr;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr |= X86_DEBUGCTL_TR;
- debugctlmsr |= X86_DEBUGCTL_BTS;
- debugctlmsr |= X86_DEBUGCTL_BTINT;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_OS))
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
-
- if (!(config & ARCH_PERFMON_EVENTSEL_USR))
- debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
-
- update_debugctlmsr(debugctlmsr);
-}
-
-static void intel_pmu_disable_bts(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- unsigned long debugctlmsr;
-
- if (!cpuc->ds)
- return;
-
- debugctlmsr = get_debugctlmsr();
-
- debugctlmsr &=
- ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
- X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
-
- update_debugctlmsr(debugctlmsr);
-}
-
static void intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -514,12 +460,17 @@ static void intel_pmu_disable_all(void)
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
+
+ intel_pmu_pebs_disable_all();
+ intel_pmu_lbr_disable_all();
}
-static void intel_pmu_enable_all(void)
+static void intel_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ intel_pmu_pebs_enable_all();
+ intel_pmu_lbr_enable_all();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
@@ -533,6 +484,42 @@ static void intel_pmu_enable_all(void)
}
}
+/*
+ * Workaround for:
+ * Intel Errata AAK100 (model 26)
+ * Intel Errata AAP53 (model 30)
+ * Intel Errata BD53 (model 44)
+ *
+ * These chips need to be 'reset' when adding counters by programming
+ * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
+ * either in sequence on the same PMC or on different PMCs.
+ */
+static void intel_pmu_nhm_enable_all(int added)
+{
+ if (added) {
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int i;
+
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
+
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
+
+ for (i = 0; i < 3; i++) {
+ struct perf_event *event = cpuc->events[i];
+
+ if (!event)
+ continue;
+
+ __x86_pmu_enable_event(&event->hw,
+ ARCH_PERFMON_EVENTSEL_ENABLE);
+ }
+ }
+ intel_pmu_enable_all(added);
+}
+
static inline u64 intel_pmu_get_status(void)
{
u64 status;
@@ -547,8 +534,7 @@ static inline void intel_pmu_ack_status(u64 ack)
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static inline void
-intel_pmu_disable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
@@ -557,71 +543,10 @@ intel_pmu_disable_fixed(struct hw_perf_event *hwc)
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
- (void)checking_wrmsrl(hwc->config_base, ctrl_val);
-}
-
-static void intel_pmu_drain_bts_buffer(void)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct debug_store *ds = cpuc->ds;
- struct bts_record {
- u64 from;
- u64 to;
- u64 flags;
- };
- struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
- struct bts_record *at, *top;
- struct perf_output_handle handle;
- struct perf_event_header header;
- struct perf_sample_data data;
- struct pt_regs regs;
-
- if (!event)
- return;
-
- if (!ds)
- return;
-
- at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
- top = (struct bts_record *)(unsigned long)ds->bts_index;
-
- if (top <= at)
- return;
-
- ds->bts_index = ds->bts_buffer_base;
-
- perf_sample_data_init(&data, 0);
-
- data.period = event->hw.last_period;
- regs.ip = 0;
-
- /*
- * Prepare a generic sample, i.e. fill in the invariant fields.
- * We will overwrite the from and to address before we output
- * the sample.
- */
- perf_prepare_sample(&header, &data, event, &regs);
-
- if (perf_output_begin(&handle, event,
- header.size * (top - at), 1, 1))
- return;
-
- for (; at < top; at++) {
- data.ip = at->from;
- data.addr = at->to;
-
- perf_output_sample(&handle, &header, &data, event);
- }
-
- perf_output_end(&handle);
-
- /* There's new data available. */
- event->hw.interrupts++;
- event->pending_kill = POLL_IN;
+ wrmsrl(hwc->config_base, ctrl_val);
}
-static inline void
-intel_pmu_disable_event(struct perf_event *event)
+static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -637,14 +562,15 @@ intel_pmu_disable_event(struct perf_event *event)
}
x86_pmu_disable_event(event);
+
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_disable(event);
}
-static inline void
-intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
- int err;
/*
* Enable IRQ generation (0x8),
@@ -669,7 +595,7 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc)
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
- err = checking_wrmsrl(hwc->config_base, ctrl_val);
+ wrmsrl(hwc->config_base, ctrl_val);
}
static void intel_pmu_enable_event(struct perf_event *event)
@@ -689,7 +615,10 @@ static void intel_pmu_enable_event(struct perf_event *event)
return;
}
- __x86_pmu_enable_event(hwc);
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_enable(event);
+
+ __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
@@ -708,20 +637,20 @@ static void intel_pmu_reset(void)
unsigned long flags;
int idx;
- if (!x86_pmu.num_events)
+ if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
- for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
}
- for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
- }
+
if (ds)
ds->bts_index = ds->bts_buffer_base;
@@ -747,7 +676,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
- intel_pmu_enable_all();
+ intel_pmu_enable_all(0);
return 0;
}
@@ -762,6 +691,15 @@ again:
inc_irq_stat(apic_perf_irqs);
ack = status;
+
+ intel_pmu_lbr_read();
+
+ /*
+ * PEBS overflow sets bit 62 in the global status register
+ */
+ if (__test_and_clear_bit(62, (unsigned long *)&status))
+ x86_pmu.drain_pebs(regs);
+
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
@@ -787,26 +725,22 @@ again:
goto again;
done:
- intel_pmu_enable_all();
+ intel_pmu_enable_all(0);
return 1;
}
-static struct event_constraint bts_constraint =
- EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
-
static struct event_constraint *
-intel_special_constraints(struct perf_event *event)
+intel_bts_constraints(struct perf_event *event)
{
- unsigned int hw_event;
-
- hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int hw_event, bts_event;
- if (unlikely((hw_event ==
- x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
- (event->hw.sample_period == 1))) {
+ hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+ bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+ if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
return &bts_constraint;
- }
+
return NULL;
}
@@ -815,24 +749,53 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
{
struct event_constraint *c;
- c = intel_special_constraints(event);
+ c = intel_bts_constraints(event);
+ if (c)
+ return c;
+
+ c = intel_pebs_constraints(event);
if (c)
return c;
return x86_get_event_constraints(cpuc, event);
}
-static __initconst struct x86_pmu core_pmu = {
+static int intel_pmu_hw_config(struct perf_event *event)
+{
+ int ret = x86_pmu_hw_config(event);
+
+ if (ret)
+ return ret;
+
+ if (event->attr.type != PERF_TYPE_RAW)
+ return 0;
+
+ if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
+ return 0;
+
+ if (x86_pmu.version < 3)
+ return -EINVAL;
+
+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
+
+ return 0;
+}
+
+static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
+ .hw_config = x86_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
- .raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
@@ -845,17 +808,32 @@ static __initconst struct x86_pmu core_pmu = {
.event_constraints = intel_core_event_constraints,
};
-static __initconst struct x86_pmu intel_pmu = {
+static void intel_pmu_cpu_starting(int cpu)
+{
+ init_debug_store_on_cpu(cpu);
+ /*
+ * Deal with CPUs that don't clear their LBRs on power-up.
+ */
+ intel_pmu_lbr_reset();
+}
+
+static void intel_pmu_cpu_dying(int cpu)
+{
+ fini_debug_store_on_cpu(cpu);
+}
+
+static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
.disable_all = intel_pmu_disable_all,
.enable_all = intel_pmu_enable_all,
.enable = intel_pmu_enable_event,
.disable = intel_pmu_disable_event,
+ .hw_config = intel_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
- .raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
@@ -864,14 +842,38 @@ static __initconst struct x86_pmu intel_pmu = {
* the generic event period:
*/
.max_period = (1ULL << 31) - 1,
- .enable_bts = intel_pmu_enable_bts,
- .disable_bts = intel_pmu_disable_bts,
.get_event_constraints = intel_get_event_constraints,
- .cpu_starting = init_debug_store_on_cpu,
- .cpu_dying = fini_debug_store_on_cpu,
+ .cpu_starting = intel_pmu_cpu_starting,
+ .cpu_dying = intel_pmu_cpu_dying,
};
+static void intel_clovertown_quirks(void)
+{
+ /*
+ * PEBS is unreliable due to:
+ *
+ * AJ67 - PEBS may experience CPL leaks
+ * AJ68 - PEBS PMI may be delayed by one event
+ * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
+ * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
+ *
+ * AJ67 could be worked around by restricting the OS/USR flags.
+ * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
+ *
+ * AJ106 could possibly be worked around by not allowing LBR
+ * usage from PEBS, including the fixup.
+ * AJ68 could possibly be worked around by always programming
+ * a pebs_event_reset[0] value and coping with the lost events.
+ *
+ * But taken together it might just make sense to not enable PEBS on
+ * these chips.
+ */
+ printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+ x86_pmu.pebs = 0;
+ x86_pmu.pebs_constraints = NULL;
+}
+
static __init int intel_pmu_init(void)
{
union cpuid10_edx edx;
@@ -881,12 +883,13 @@ static __init int intel_pmu_init(void)
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
- /* check for P6 processor family */
- if (boot_cpu_data.x86 == 6) {
- return p6_pmu_init();
- } else {
+ switch (boot_cpu_data.x86) {
+ case 0x6:
+ return p6_pmu_init();
+ case 0xf:
+ return p4_pmu_init();
+ }
return -ENODEV;
- }
}
/*
@@ -904,16 +907,28 @@ static __init int intel_pmu_init(void)
x86_pmu = intel_pmu;
x86_pmu.version = version;
- x86_pmu.num_events = eax.split.num_events;
- x86_pmu.event_bits = eax.split.bit_width;
- x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
+ x86_pmu.num_counters = eax.split.num_counters;
+ x86_pmu.cntval_bits = eax.split.bit_width;
+ x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
*/
if (version > 1)
- x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
+ x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+
+ /*
+ * v2 and above have a perf capabilities MSR
+ */
+ if (version > 1) {
+ u64 capabilities;
+
+ rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+ x86_pmu.intel_cap.capabilities = capabilities;
+ }
+
+ intel_ds_init();
/*
* Install the hw-cache-events table:
@@ -924,12 +939,15 @@ static __init int intel_pmu_init(void)
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+ x86_pmu.quirks = intel_clovertown_quirks;
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_core();
+
x86_pmu.event_constraints = intel_core2_event_constraints;
pr_cont("Core2 events, ");
break;
@@ -940,13 +958,19 @@ static __init int intel_pmu_init(void)
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_nhm();
+
x86_pmu.event_constraints = intel_nehalem_event_constraints;
- pr_cont("Nehalem/Corei7 events, ");
+ x86_pmu.enable_all = intel_pmu_nhm_enable_all;
+ pr_cont("Nehalem events, ");
break;
+
case 28: /* Atom */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_atom();
+
x86_pmu.event_constraints = intel_gen_event_constraints;
pr_cont("Atom events, ");
break;
@@ -956,7 +980,10 @@ static __init int intel_pmu_init(void)
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ intel_pmu_lbr_init_nhm();
+
x86_pmu.event_constraints = intel_westmere_event_constraints;
+ x86_pmu.enable_all = intel_pmu_nhm_enable_all;
pr_cont("Westmere events, ");
break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
new file mode 100644
index 000000000000..18018d1311cd
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -0,0 +1,641 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS 4
+
+/* The size of a BTS record in bytes: */
+#define BTS_RECORD_SIZE 24
+
+#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
+#define PEBS_BUFFER_SIZE PAGE_SIZE
+
+/*
+ * pebs_record_32 for p4 and core not supported
+
+struct pebs_record_32 {
+ u32 flags, ip;
+ u32 ax, bc, cx, dx;
+ u32 si, di, bp, sp;
+};
+
+ */
+
+struct pebs_record_core {
+ u64 flags, ip;
+ u64 ax, bx, cx, dx;
+ u64 si, di, bp, sp;
+ u64 r8, r9, r10, r11;
+ u64 r12, r13, r14, r15;
+};
+
+struct pebs_record_nhm {
+ u64 flags, ip;
+ u64 ax, bx, cx, dx;
+ u64 si, di, bp, sp;
+ u64 r8, r9, r10, r11;
+ u64 r12, r13, r14, r15;
+ u64 status, dla, dse, lat;
+};
+
+/*
+ * A debug store configuration.
+ *
+ * We only support architectures that use 64bit fields.
+ */
+struct debug_store {
+ u64 bts_buffer_base;
+ u64 bts_index;
+ u64 bts_absolute_maximum;
+ u64 bts_interrupt_threshold;
+ u64 pebs_buffer_base;
+ u64 pebs_index;
+ u64 pebs_absolute_maximum;
+ u64 pebs_interrupt_threshold;
+ u64 pebs_event_reset[MAX_PEBS_EVENTS];
+};
+
+static void init_debug_store_on_cpu(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds)
+ return;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
+ (u32)((u64)(unsigned long)ds),
+ (u32)((u64)(unsigned long)ds >> 32));
+}
+
+static void fini_debug_store_on_cpu(int cpu)
+{
+ if (!per_cpu(cpu_hw_events, cpu).ds)
+ return;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
+}
+
+static void release_ds_buffers(void)
+{
+ int cpu;
+
+ if (!x86_pmu.bts && !x86_pmu.pebs)
+ return;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu)
+ fini_debug_store_on_cpu(cpu);
+
+ for_each_possible_cpu(cpu) {
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+
+ if (!ds)
+ continue;
+
+ per_cpu(cpu_hw_events, cpu).ds = NULL;
+
+ kfree((void *)(unsigned long)ds->pebs_buffer_base);
+ kfree((void *)(unsigned long)ds->bts_buffer_base);
+ kfree(ds);
+ }
+
+ put_online_cpus();
+}
+
+static int reserve_ds_buffers(void)
+{
+ int cpu, err = 0;
+
+ if (!x86_pmu.bts && !x86_pmu.pebs)
+ return 0;
+
+ get_online_cpus();
+
+ for_each_possible_cpu(cpu) {
+ struct debug_store *ds;
+ void *buffer;
+ int max, thresh;
+
+ err = -ENOMEM;
+ ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ if (unlikely(!ds))
+ break;
+ per_cpu(cpu_hw_events, cpu).ds = ds;
+
+ if (x86_pmu.bts) {
+ buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
+ if (unlikely(!buffer))
+ break;
+
+ max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+ thresh = max / 16;
+
+ ds->bts_buffer_base = (u64)(unsigned long)buffer;
+ ds->bts_index = ds->bts_buffer_base;
+ ds->bts_absolute_maximum = ds->bts_buffer_base +
+ max * BTS_RECORD_SIZE;
+ ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+ thresh * BTS_RECORD_SIZE;
+ }
+
+ if (x86_pmu.pebs) {
+ buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
+ if (unlikely(!buffer))
+ break;
+
+ max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+
+ ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ ds->pebs_index = ds->pebs_buffer_base;
+ ds->pebs_absolute_maximum = ds->pebs_buffer_base +
+ max * x86_pmu.pebs_record_size;
+ /*
+ * Always use single record PEBS
+ */
+ ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
+ x86_pmu.pebs_record_size;
+ }
+
+ err = 0;
+ }
+
+ if (err)
+ release_ds_buffers();
+ else {
+ for_each_online_cpu(cpu)
+ init_debug_store_on_cpu(cpu);
+ }
+
+ put_online_cpus();
+
+ return err;
+}
+
+/*
+ * BTS
+ */
+
+static struct event_constraint bts_constraint =
+ EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
+
+static void intel_pmu_enable_bts(u64 config)
+{
+ unsigned long debugctlmsr;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr |= DEBUGCTLMSR_TR;
+ debugctlmsr |= DEBUGCTLMSR_BTS;
+ debugctlmsr |= DEBUGCTLMSR_BTINT;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_OS))
+ debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_USR))
+ debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
+
+ update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_disable_bts(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long debugctlmsr;
+
+ if (!cpuc->ds)
+ return;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr &=
+ ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
+ DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
+
+ update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_drain_bts_buffer(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct bts_record {
+ u64 from;
+ u64 to;
+ u64 flags;
+ };
+ struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ struct bts_record *at, *top;
+ struct perf_output_handle handle;
+ struct perf_event_header header;
+ struct perf_sample_data data;
+ struct pt_regs regs;
+
+ if (!event)
+ return;
+
+ if (!ds)
+ return;
+
+ at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
+ top = (struct bts_record *)(unsigned long)ds->bts_index;
+
+ if (top <= at)
+ return;
+
+ ds->bts_index = ds->bts_buffer_base;
+
+ perf_sample_data_init(&data, 0);
+ data.period = event->hw.last_period;
+ regs.ip = 0;
+
+ /*
+ * Prepare a generic sample, i.e. fill in the invariant fields.
+ * We will overwrite the from and to address before we output
+ * the sample.
+ */
+ perf_prepare_sample(&header, &data, event, &regs);
+
+ if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
+ return;
+
+ for (; at < top; at++) {
+ data.ip = at->from;
+ data.addr = at->to;
+
+ perf_output_sample(&handle, &header, &data, event);
+ }
+
+ perf_output_end(&handle);
+
+ /* There's new data available. */
+ event->hw.interrupts++;
+ event->pending_kill = POLL_IN;
+}
+
+/*
+ * PEBS
+ */
+
+static struct event_constraint intel_core_pebs_events[] = {
+ PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
+ PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
+ PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
+ PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint intel_nehalem_pebs_events[] = {
+ PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
+ PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
+ PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
+ PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
+ PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
+ PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint *
+intel_pebs_constraints(struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ if (!event->attr.precise_ip)
+ return NULL;
+
+ if (x86_pmu.pebs_constraints) {
+ for_each_event_constraint(c, x86_pmu.pebs_constraints) {
+ if ((event->hw.config & c->cmask) == c->code)
+ return c;
+ }
+ }
+
+ return &emptyconstraint;
+}
+
+static void intel_pmu_pebs_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
+
+ cpuc->pebs_enabled |= 1ULL << hwc->idx;
+ WARN_ON_ONCE(cpuc->enabled);
+
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
+ intel_pmu_lbr_enable(event);
+}
+
+static void intel_pmu_pebs_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
+ if (cpuc->enabled)
+ wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+
+ hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
+
+ if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
+ intel_pmu_lbr_disable(event);
+}
+
+static void intel_pmu_pebs_enable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->pebs_enabled)
+ wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+}
+
+static void intel_pmu_pebs_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->pebs_enabled)
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+}
+
+#include <asm/insn.h>
+
+static inline bool kernel_ip(unsigned long ip)
+{
+#ifdef CONFIG_X86_32
+ return ip > PAGE_OFFSET;
+#else
+ return (long)ip < 0;
+#endif
+}
+
+static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long from = cpuc->lbr_entries[0].from;
+ unsigned long old_to, to = cpuc->lbr_entries[0].to;
+ unsigned long ip = regs->ip;
+
+ /*
+ * We don't need to fixup if the PEBS assist is fault like
+ */
+ if (!x86_pmu.intel_cap.pebs_trap)
+ return 1;
+
+ /*
+ * No LBR entry, no basic block, no rewinding
+ */
+ if (!cpuc->lbr_stack.nr || !from || !to)
+ return 0;
+
+ /*
+ * Basic blocks should never cross user/kernel boundaries
+ */
+ if (kernel_ip(ip) != kernel_ip(to))
+ return 0;
+
+ /*
+ * unsigned math, either ip is before the start (impossible) or
+ * the basic block is larger than 1 page (sanity)
+ */
+ if ((ip - to) > PAGE_SIZE)
+ return 0;
+
+ /*
+ * We sampled a branch insn, rewind using the LBR stack
+ */
+ if (ip == to) {
+ regs->ip = from;
+ return 1;
+ }
+
+ do {
+ struct insn insn;
+ u8 buf[MAX_INSN_SIZE];
+ void *kaddr;
+
+ old_to = to;
+ if (!kernel_ip(ip)) {
+ int bytes, size = MAX_INSN_SIZE;
+
+ bytes = copy_from_user_nmi(buf, (void __user *)to, size);
+ if (bytes != size)
+ return 0;
+
+ kaddr = buf;
+ } else
+ kaddr = (void *)to;
+
+ kernel_insn_init(&insn, kaddr);
+ insn_get_length(&insn);
+ to += insn.length;
+ } while (to < ip);
+
+ if (to == ip) {
+ regs->ip = old_to;
+ return 1;
+ }
+
+ /*
+ * Even though we decoded the basic block, the instruction stream
+ * never matched the given IP, either the TO or the IP got corrupted.
+ */
+ return 0;
+}
+
+static int intel_pmu_save_and_restart(struct perf_event *event);
+
+static void __intel_pmu_pebs_event(struct perf_event *event,
+ struct pt_regs *iregs, void *__pebs)
+{
+ /*
+ * We cast to pebs_record_core since that is a subset of
+ * both formats and we don't use the other fields in this
+ * routine.
+ */
+ struct pebs_record_core *pebs = __pebs;
+ struct perf_sample_data data;
+ struct pt_regs regs;
+
+ if (!intel_pmu_save_and_restart(event))
+ return;
+
+ perf_sample_data_init(&data, 0);
+ data.period = event->hw.last_period;
+
+ /*
+ * We use the interrupt regs as a base because the PEBS record
+ * does not contain a full regs set, specifically it seems to
+ * lack segment descriptors, which get used by things like
+ * user_mode().
+ *
+ * In the simple case fix up only the IP and BP,SP regs, for
+ * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
+ * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+ */
+ regs = *iregs;
+ regs.ip = pebs->ip;
+ regs.bp = pebs->bp;
+ regs.sp = pebs->sp;
+
+ if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
+ regs.flags |= PERF_EFLAGS_EXACT;
+ else
+ regs.flags &= ~PERF_EFLAGS_EXACT;
+
+ if (perf_event_overflow(event, 1, &data, &regs))
+ x86_pmu_stop(event);
+}
+
+static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct perf_event *event = cpuc->events[0]; /* PMC0 only */
+ struct pebs_record_core *at, *top;
+ int n;
+
+ if (!ds || !x86_pmu.pebs)
+ return;
+
+ at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
+
+ /*
+ * Whatever else happens, drain the thing
+ */
+ ds->pebs_index = ds->pebs_buffer_base;
+
+ if (!test_bit(0, cpuc->active_mask))
+ return;
+
+ WARN_ON_ONCE(!event);
+
+ if (!event->attr.precise_ip)
+ return;
+
+ n = top - at;
+ if (n <= 0)
+ return;
+
+ /*
+ * Should not happen, we program the threshold at 1 and do not
+ * set a reset value.
+ */
+ WARN_ON_ONCE(n > 1);
+ at += n - 1;
+
+ __intel_pmu_pebs_event(event, iregs, at);
+}
+
+static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct pebs_record_nhm *at, *top;
+ struct perf_event *event = NULL;
+ u64 status = 0;
+ int bit, n;
+
+ if (!ds || !x86_pmu.pebs)
+ return;
+
+ at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
+
+ ds->pebs_index = ds->pebs_buffer_base;
+
+ n = top - at;
+ if (n <= 0)
+ return;
+
+ /*
+ * Should not happen, we program the threshold at 1 and do not
+ * set a reset value.
+ */
+ WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+
+ for ( ; at < top; at++) {
+ for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+ event = cpuc->events[bit];
+ if (!test_bit(bit, cpuc->active_mask))
+ continue;
+
+ WARN_ON_ONCE(!event);
+
+ if (!event->attr.precise_ip)
+ continue;
+
+ if (__test_and_set_bit(bit, (unsigned long *)&status))
+ continue;
+
+ break;
+ }
+
+ if (!event || bit >= MAX_PEBS_EVENTS)
+ continue;
+
+ __intel_pmu_pebs_event(event, iregs, at);
+ }
+}
+
+/*
+ * BTS, PEBS probe and setup
+ */
+
+static void intel_ds_init(void)
+{
+ /*
+ * No support for 32bit formats
+ */
+ if (!boot_cpu_has(X86_FEATURE_DTES64))
+ return;
+
+ x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
+ x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
+ if (x86_pmu.pebs) {
+ char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
+ int format = x86_pmu.intel_cap.pebs_format;
+
+ switch (format) {
+ case 0:
+ printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+ x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
+ x86_pmu.pebs_constraints = intel_core_pebs_events;
+ break;
+
+ case 1:
+ printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
+ x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
+ x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
+ break;
+
+ default:
+ printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
+ x86_pmu.pebs = 0;
+ break;
+ }
+ }
+}
+
+#else /* CONFIG_CPU_SUP_INTEL */
+
+static int reserve_ds_buffers(void)
+{
+ return 0;
+}
+
+static void release_ds_buffers(void)
+{
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
new file mode 100644
index 000000000000..d202c1bece1a
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -0,0 +1,218 @@
+#ifdef CONFIG_CPU_SUP_INTEL
+
+enum {
+ LBR_FORMAT_32 = 0x00,
+ LBR_FORMAT_LIP = 0x01,
+ LBR_FORMAT_EIP = 0x02,
+ LBR_FORMAT_EIP_FLAGS = 0x03,
+};
+
+/*
+ * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
+ * otherwise it becomes near impossible to get a reliable stack.
+ */
+
+static void __intel_pmu_lbr_enable(void)
+{
+ u64 debugctl;
+
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+}
+
+static void __intel_pmu_lbr_disable(void)
+{
+ u64 debugctl;
+
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+}
+
+static void intel_pmu_lbr_reset_32(void)
+{
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++)
+ wrmsrl(x86_pmu.lbr_from + i, 0);
+}
+
+static void intel_pmu_lbr_reset_64(void)
+{
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ wrmsrl(x86_pmu.lbr_from + i, 0);
+ wrmsrl(x86_pmu.lbr_to + i, 0);
+ }
+}
+
+static void intel_pmu_lbr_reset(void)
+{
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
+ intel_pmu_lbr_reset_32();
+ else
+ intel_pmu_lbr_reset_64();
+}
+
+static void intel_pmu_lbr_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ WARN_ON_ONCE(cpuc->enabled);
+
+ /*
+ * Reset the LBR stack if we changed task context to
+ * avoid data leaks.
+ */
+
+ if (event->ctx->task && cpuc->lbr_context != event->ctx) {
+ intel_pmu_lbr_reset();
+ cpuc->lbr_context = event->ctx;
+ }
+
+ cpuc->lbr_users++;
+}
+
+static void intel_pmu_lbr_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (!x86_pmu.lbr_nr)
+ return;
+
+ cpuc->lbr_users--;
+ WARN_ON_ONCE(cpuc->lbr_users < 0);
+
+ if (cpuc->enabled && !cpuc->lbr_users)
+ __intel_pmu_lbr_disable();
+}
+
+static void intel_pmu_lbr_enable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->lbr_users)
+ __intel_pmu_lbr_enable();
+}
+
+static void intel_pmu_lbr_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (cpuc->lbr_users)
+ __intel_pmu_lbr_disable();
+}
+
+static inline u64 intel_pmu_lbr_tos(void)
+{
+ u64 tos;
+
+ rdmsrl(x86_pmu.lbr_tos, tos);
+
+ return tos;
+}
+
+static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
+{
+ unsigned long mask = x86_pmu.lbr_nr - 1;
+ u64 tos = intel_pmu_lbr_tos();
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ unsigned long lbr_idx = (tos - i) & mask;
+ union {
+ struct {
+ u32 from;
+ u32 to;
+ };
+ u64 lbr;
+ } msr_lastbranch;
+
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
+
+ cpuc->lbr_entries[i].from = msr_lastbranch.from;
+ cpuc->lbr_entries[i].to = msr_lastbranch.to;
+ cpuc->lbr_entries[i].flags = 0;
+ }
+ cpuc->lbr_stack.nr = i;
+}
+
+#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
+
+/*
+ * Due to lack of segmentation in Linux the effective address (offset)
+ * is the same as the linear address, allowing us to merge the LIP and EIP
+ * LBR formats.
+ */
+static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
+{
+ unsigned long mask = x86_pmu.lbr_nr - 1;
+ int lbr_format = x86_pmu.intel_cap.lbr_format;
+ u64 tos = intel_pmu_lbr_tos();
+ int i;
+
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ unsigned long lbr_idx = (tos - i) & mask;
+ u64 from, to, flags = 0;
+
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
+ rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
+
+ if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
+ flags = !!(from & LBR_FROM_FLAG_MISPRED);
+ from = (u64)((((s64)from) << 1) >> 1);
+ }
+
+ cpuc->lbr_entries[i].from = from;
+ cpuc->lbr_entries[i].to = to;
+ cpuc->lbr_entries[i].flags = flags;
+ }
+ cpuc->lbr_stack.nr = i;
+}
+
+static void intel_pmu_lbr_read(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ if (!cpuc->lbr_users)
+ return;
+
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
+ intel_pmu_lbr_read_32(cpuc);
+ else
+ intel_pmu_lbr_read_64(cpuc);
+}
+
+static void intel_pmu_lbr_init_core(void)
+{
+ x86_pmu.lbr_nr = 4;
+ x86_pmu.lbr_tos = 0x01c9;
+ x86_pmu.lbr_from = 0x40;
+ x86_pmu.lbr_to = 0x60;
+}
+
+static void intel_pmu_lbr_init_nhm(void)
+{
+ x86_pmu.lbr_nr = 16;
+ x86_pmu.lbr_tos = 0x01c9;
+ x86_pmu.lbr_from = 0x680;
+ x86_pmu.lbr_to = 0x6c0;
+}
+
+static void intel_pmu_lbr_init_atom(void)
+{
+ x86_pmu.lbr_nr = 8;
+ x86_pmu.lbr_tos = 0x01c9;
+ x86_pmu.lbr_from = 0x40;
+ x86_pmu.lbr_to = 0x60;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
new file mode 100644
index 000000000000..ae85d69644d1
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -0,0 +1,858 @@
+/*
+ * Netburst Perfomance Events (P4, old Xeon)
+ *
+ * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
+ * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#ifdef CONFIG_CPU_SUP_INTEL
+
+#include <asm/perf_event_p4.h>
+
+#define P4_CNTR_LIMIT 3
+/*
+ * array indices: 0,1 - HT threads, used with HT enabled cpu
+ */
+struct p4_event_bind {
+ unsigned int opcode; /* Event code and ESCR selector */
+ unsigned int escr_msr[2]; /* ESCR MSR for this event */
+ char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
+};
+
+struct p4_cache_event_bind {
+ unsigned int metric_pebs;
+ unsigned int metric_vert;
+};
+
+#define P4_GEN_CACHE_EVENT_BIND(name) \
+ [P4_CACHE__##name] = { \
+ .metric_pebs = P4_PEBS__##name, \
+ .metric_vert = P4_VERT__##name, \
+ }
+
+static struct p4_cache_event_bind p4_cache_event_bind_map[] = {
+ P4_GEN_CACHE_EVENT_BIND(1stl_cache_load_miss_retired),
+ P4_GEN_CACHE_EVENT_BIND(2ndl_cache_load_miss_retired),
+ P4_GEN_CACHE_EVENT_BIND(dtlb_load_miss_retired),
+ P4_GEN_CACHE_EVENT_BIND(dtlb_store_miss_retired),
+};
+
+/*
+ * Note that we don't use CCCR1 here, there is an
+ * exception for P4_BSQ_ALLOCATION but we just have
+ * no workaround
+ *
+ * consider this binding as resources which particular
+ * event may borrow, it doesn't contain EventMask,
+ * Tags and friends -- they are left to a caller
+ */
+static struct p4_event_bind p4_event_bind_map[] = {
+ [P4_EVENT_TC_DELIVER_MODE] = {
+ .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
+ .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_BPU_FETCH_REQUEST] = {
+ .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
+ .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_ITLB_REFERENCE] = {
+ .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
+ .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_MEMORY_CANCEL] = {
+ .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
+ .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_MEMORY_COMPLETE] = {
+ .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
+ .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_LOAD_PORT_REPLAY] = {
+ .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
+ .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_STORE_PORT_REPLAY] = {
+ .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
+ .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_MOB_LOAD_REPLAY] = {
+ .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
+ .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_PAGE_WALK_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
+ .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_BSQ_CACHE_REFERENCE] = {
+ .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
+ .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_IOQ_ALLOCATION] = {
+ .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
+ .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
+ .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
+ .cntr = { {2, -1, -1}, {3, -1, -1} },
+ },
+ [P4_EVENT_FSB_DATA_ACTIVITY] = {
+ .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
+ .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
+ .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
+ .cntr = { {0, -1, -1}, {1, -1, -1} },
+ },
+ [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
+ .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
+ .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
+ .cntr = { {2, -1, -1}, {3, -1, -1} },
+ },
+ [P4_EVENT_SSE_INPUT_ASSIST] = {
+ .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_PACKED_SP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_PACKED_DP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_SCALAR_SP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_SCALAR_DP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_64BIT_MMX_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_128BIT_MMX_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_X87_FP_UOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
+ .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_TC_MISC] = {
+ .opcode = P4_OPCODE(P4_EVENT_TC_MISC),
+ .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_GLOBAL_POWER_EVENTS] = {
+ .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_TC_MS_XFER] = {
+ .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
+ .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_UOP_QUEUE_WRITES] = {
+ .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
+ .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
+ .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_RETIRED_BRANCH_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
+ .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
+ .cntr = { {4, 5, -1}, {6, 7, -1} },
+ },
+ [P4_EVENT_RESOURCE_STALL] = {
+ .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
+ .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_WC_BUFFER] = {
+ .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
+ .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
+ .cntr = { {8, 9, -1}, {10, 11, -1} },
+ },
+ [P4_EVENT_B2B_CYCLES] = {
+ .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_BNR] = {
+ .opcode = P4_OPCODE(P4_EVENT_BNR),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_SNOOP] = {
+ .opcode = P4_OPCODE(P4_EVENT_SNOOP),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_RESPONSE] = {
+ .opcode = P4_OPCODE(P4_EVENT_RESPONSE),
+ .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
+ .cntr = { {0, -1, -1}, {2, -1, -1} },
+ },
+ [P4_EVENT_FRONT_END_EVENT] = {
+ .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_EXECUTION_EVENT] = {
+ .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_REPLAY_EVENT] = {
+ .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_INSTR_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_UOPS_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_UOP_TYPE] = {
+ .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
+ .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_BRANCH_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_MISPRED_BRANCH_RETIRED] = {
+ .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_X87_ASSIST] = {
+ .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_MACHINE_CLEAR] = {
+ .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
+ .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+ [P4_EVENT_INSTR_COMPLETED] = {
+ .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
+ .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
+ .cntr = { {12, 13, 16}, {14, 15, 17} },
+ },
+};
+
+#define P4_GEN_CACHE_EVENT(event, bit, cache_event) \
+ p4_config_pack_escr(P4_ESCR_EVENT(event) | \
+ P4_ESCR_EMASK_BIT(event, bit)) | \
+ p4_config_pack_cccr(cache_event | \
+ P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
+
+static __initconst const u64 p4_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__1stl_cache_load_miss_retired),
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__2ndl_cache_load_miss_retired),
+ },
+},
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__dtlb_load_miss_retired),
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
+ P4_CACHE__dtlb_store_miss_retired),
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
+ P4_CACHE__itlb_reference_hit),
+ [ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
+ P4_CACHE__itlb_reference_miss),
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
+ /* non-halted CPU clocks */
+ [PERF_COUNT_HW_CPU_CYCLES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
+
+ /*
+ * retired instructions
+ * in a sake of simplicity we don't use the FSB tagging
+ */
+ [PERF_COUNT_HW_INSTRUCTIONS] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
+
+ /* cache hits */
+ [PERF_COUNT_HW_CACHE_REFERENCES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
+
+ /* cache misses */
+ [PERF_COUNT_HW_CACHE_MISSES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
+
+ /* branch instructions retired */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
+
+ /* mispredicted branches retired */
+ [PERF_COUNT_HW_BRANCH_MISSES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
+
+ /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
+ [PERF_COUNT_HW_BUS_CYCLES] =
+ p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
+ P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
+ p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
+};
+
+static struct p4_event_bind *p4_config_get_bind(u64 config)
+{
+ unsigned int evnt = p4_config_unpack_event(config);
+ struct p4_event_bind *bind = NULL;
+
+ if (evnt < ARRAY_SIZE(p4_event_bind_map))
+ bind = &p4_event_bind_map[evnt];
+
+ return bind;
+}
+
+static u64 p4_pmu_event_map(int hw_event)
+{
+ struct p4_event_bind *bind;
+ unsigned int esel;
+ u64 config;
+
+ config = p4_general_events[hw_event];
+ bind = p4_config_get_bind(config);
+ esel = P4_OPCODE_ESEL(bind->opcode);
+ config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
+
+ return config;
+}
+
+static int p4_hw_config(struct perf_event *event)
+{
+ int cpu = get_cpu();
+ int rc = 0;
+ unsigned int evnt;
+ u32 escr, cccr;
+
+ /*
+ * the reason we use cpu that early is that: if we get scheduled
+ * first time on the same cpu -- we will not need swap thread
+ * specific flags in config (and will save some cpu cycles)
+ */
+
+ cccr = p4_default_cccr_conf(cpu);
+ escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
+ event->attr.exclude_user);
+ event->hw.config = p4_config_pack_escr(escr) |
+ p4_config_pack_cccr(cccr);
+
+ if (p4_ht_active() && p4_ht_thread(cpu))
+ event->hw.config = p4_set_ht_bit(event->hw.config);
+
+ if (event->attr.type == PERF_TYPE_RAW) {
+
+ /* user data may have out-of-bound event index */
+ evnt = p4_config_unpack_event(event->attr.config);
+ if (evnt >= ARRAY_SIZE(p4_event_bind_map)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * We don't control raw events so it's up to the caller
+ * to pass sane values (and we don't count the thread number
+ * on HT machine but allow HT-compatible specifics to be
+ * passed on)
+ *
+ * XXX: HT wide things should check perf_paranoid_cpu() &&
+ * CAP_SYS_ADMIN
+ */
+ event->hw.config |= event->attr.config &
+ (p4_config_pack_escr(P4_ESCR_MASK_HT) |
+ p4_config_pack_cccr(P4_CCCR_MASK_HT));
+ }
+
+ rc = x86_setup_perfctr(event);
+out:
+ put_cpu();
+ return rc;
+}
+
+static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
+{
+ int overflow = 0;
+ u32 low, high;
+
+ rdmsr(hwc->config_base + hwc->idx, low, high);
+
+ /* we need to check high bit for unflagged overflows */
+ if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
+ overflow = 1;
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ ((u64)low) & ~P4_CCCR_OVF);
+ }
+
+ return overflow;
+}
+
+static inline void p4_pmu_disable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * If event gets disabled while counter is in overflowed
+ * state we need to clear P4_CCCR_OVF, otherwise interrupt get
+ * asserted again and again
+ */
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ (u64)(p4_config_unpack_cccr(hwc->config)) &
+ ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
+}
+
+static void p4_pmu_disable_all(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx;
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ p4_pmu_disable_event(event);
+ }
+}
+
+static void p4_pmu_enable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int thread = p4_ht_config_thread(hwc->config);
+ u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
+ unsigned int idx = p4_config_unpack_event(hwc->config);
+ unsigned int idx_cache = p4_config_unpack_cache_event(hwc->config);
+ struct p4_event_bind *bind;
+ struct p4_cache_event_bind *bind_cache;
+ u64 escr_addr, cccr;
+
+ bind = &p4_event_bind_map[idx];
+ escr_addr = (u64)bind->escr_msr[thread];
+
+ /*
+ * - we dont support cascaded counters yet
+ * - and counter 1 is broken (erratum)
+ */
+ WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
+ WARN_ON_ONCE(hwc->idx == 1);
+
+ /* we need a real Event value */
+ escr_conf &= ~P4_ESCR_EVENT_MASK;
+ escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
+
+ cccr = p4_config_unpack_cccr(hwc->config);
+
+ /*
+ * it could be Cache event so that we need to
+ * set metrics into additional MSRs
+ */
+ BUILD_BUG_ON(P4_CACHE__MAX > P4_CCCR_CACHE_OPS_MASK);
+ if (idx_cache > P4_CACHE__NONE &&
+ idx_cache < ARRAY_SIZE(p4_cache_event_bind_map)) {
+ bind_cache = &p4_cache_event_bind_map[idx_cache];
+ (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind_cache->metric_pebs);
+ (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind_cache->metric_vert);
+ }
+
+ (void)checking_wrmsrl(escr_addr, escr_conf);
+ (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+ (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
+}
+
+static void p4_pmu_enable_all(int added)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx;
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+ p4_pmu_enable_event(event);
+ }
+}
+
+static int p4_pmu_handle_irq(struct pt_regs *regs)
+{
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
+ int idx, handled = 0;
+ u64 val;
+
+ data.addr = 0;
+ data.raw = NULL;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ event = cpuc->events[idx];
+ hwc = &event->hw;
+
+ WARN_ON_ONCE(hwc->idx != idx);
+
+ /* it might be unflagged overflow */
+ handled = p4_pmu_clear_cccr_ovf(hwc);
+
+ val = x86_perf_event_update(event);
+ if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
+ continue;
+
+ /* event overflow for sure */
+ data.period = event->hw.last_period;
+
+ if (!x86_perf_event_set_period(event))
+ continue;
+ if (perf_event_overflow(event, 1, &data, regs))
+ p4_pmu_disable_event(event);
+ }
+
+ if (handled) {
+ /* p4 quirk: unmask it again */
+ apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
+ inc_irq_stat(apic_perf_irqs);
+ }
+
+ return handled;
+}
+
+/*
+ * swap thread specific fields according to a thread
+ * we are going to run on
+ */
+static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
+{
+ u32 escr, cccr;
+
+ /*
+ * we either lucky and continue on same cpu or no HT support
+ */
+ if (!p4_should_swap_ts(hwc->config, cpu))
+ return;
+
+ /*
+ * the event is migrated from an another logical
+ * cpu, so we need to swap thread specific flags
+ */
+
+ escr = p4_config_unpack_escr(hwc->config);
+ cccr = p4_config_unpack_cccr(hwc->config);
+
+ if (p4_ht_thread(cpu)) {
+ cccr &= ~P4_CCCR_OVF_PMI_T0;
+ cccr |= P4_CCCR_OVF_PMI_T1;
+ if (escr & P4_ESCR_T0_OS) {
+ escr &= ~P4_ESCR_T0_OS;
+ escr |= P4_ESCR_T1_OS;
+ }
+ if (escr & P4_ESCR_T0_USR) {
+ escr &= ~P4_ESCR_T0_USR;
+ escr |= P4_ESCR_T1_USR;
+ }
+ hwc->config = p4_config_pack_escr(escr);
+ hwc->config |= p4_config_pack_cccr(cccr);
+ hwc->config |= P4_CONFIG_HT;
+ } else {
+ cccr &= ~P4_CCCR_OVF_PMI_T1;
+ cccr |= P4_CCCR_OVF_PMI_T0;
+ if (escr & P4_ESCR_T1_OS) {
+ escr &= ~P4_ESCR_T1_OS;
+ escr |= P4_ESCR_T0_OS;
+ }
+ if (escr & P4_ESCR_T1_USR) {
+ escr &= ~P4_ESCR_T1_USR;
+ escr |= P4_ESCR_T0_USR;
+ }
+ hwc->config = p4_config_pack_escr(escr);
+ hwc->config |= p4_config_pack_cccr(cccr);
+ hwc->config &= ~P4_CONFIG_HT;
+ }
+}
+
+/*
+ * ESCR address hashing is tricky, ESCRs are not sequential
+ * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
+ * the metric between any ESCRs is laid in range [0xa0,0xe1]
+ *
+ * so we make ~70% filled hashtable
+ */
+
+#define P4_ESCR_MSR_BASE 0x000003a0
+#define P4_ESCR_MSR_MAX 0x000003e1
+#define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
+#define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
+#define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
+
+static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] = {
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0),
+ P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1),
+};
+
+static int p4_get_escr_idx(unsigned int addr)
+{
+ unsigned int idx = P4_ESCR_MSR_IDX(addr);
+
+ if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
+ !p4_escr_table[idx] ||
+ p4_escr_table[idx] != addr)) {
+ WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
+ return -1;
+ }
+
+ return idx;
+}
+
+static int p4_next_cntr(int thread, unsigned long *used_mask,
+ struct p4_event_bind *bind)
+{
+ int i, j;
+
+ for (i = 0; i < P4_CNTR_LIMIT; i++) {
+ j = bind->cntr[thread][i];
+ if (j != -1 && !test_bit(j, used_mask))
+ return j;
+ }
+
+ return -1;
+}
+
+static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
+{
+ unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
+ int cpu = smp_processor_id();
+ struct hw_perf_event *hwc;
+ struct p4_event_bind *bind;
+ unsigned int i, thread, num;
+ int cntr_idx, escr_idx;
+
+ bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
+
+ for (i = 0, num = n; i < n; i++, num--) {
+
+ hwc = &cpuc->event_list[i]->hw;
+ thread = p4_ht_thread(cpu);
+ bind = p4_config_get_bind(hwc->config);
+ escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
+ if (unlikely(escr_idx == -1))
+ goto done;
+
+ if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
+ cntr_idx = hwc->idx;
+ if (assign)
+ assign[i] = hwc->idx;
+ goto reserve;
+ }
+
+ cntr_idx = p4_next_cntr(thread, used_mask, bind);
+ if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
+ goto done;
+
+ p4_pmu_swap_config_ts(hwc, cpu);
+ if (assign)
+ assign[i] = cntr_idx;
+reserve:
+ set_bit(cntr_idx, used_mask);
+ set_bit(escr_idx, escr_mask);
+ }
+
+done:
+ return num ? -ENOSPC : 0;
+}
+
+static __initconst const struct x86_pmu p4_pmu = {
+ .name = "Netburst P4/Xeon",
+ .handle_irq = p4_pmu_handle_irq,
+ .disable_all = p4_pmu_disable_all,
+ .enable_all = p4_pmu_enable_all,
+ .enable = p4_pmu_enable_event,
+ .disable = p4_pmu_disable_event,
+ .eventsel = MSR_P4_BPU_CCCR0,
+ .perfctr = MSR_P4_BPU_PERFCTR0,
+ .event_map = p4_pmu_event_map,
+ .max_events = ARRAY_SIZE(p4_general_events),
+ .get_event_constraints = x86_get_event_constraints,
+ /*
+ * IF HT disabled we may need to use all
+ * ARCH_P4_MAX_CCCR counters simulaneously
+ * though leave it restricted at moment assuming
+ * HT is on
+ */
+ .num_counters = ARCH_P4_MAX_CCCR,
+ .apic = 1,
+ .cntval_bits = 40,
+ .cntval_mask = (1ULL << 40) - 1,
+ .max_period = (1ULL << 39) - 1,
+ .hw_config = p4_hw_config,
+ .schedule_events = p4_pmu_schedule_events,
+};
+
+static __init int p4_pmu_init(void)
+{
+ unsigned int low, high;
+
+ /* If we get stripped -- indexig fails */
+ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
+
+ rdmsr(MSR_IA32_MISC_ENABLE, low, high);
+ if (!(low & (1 << 7))) {
+ pr_cont("unsupported Netburst CPU model %d ",
+ boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+
+ memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+ pr_cont("Netburst events, ");
+
+ x86_pmu = p4_pmu;
+
+ return 0;
+}
+
+#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index a330485d14da..34ba07be2cda 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -27,24 +27,6 @@ static u64 p6_pmu_event_map(int hw_event)
*/
#define P6_NOP_EVENT 0x0000002EULL
-static u64 p6_pmu_raw_event(u64 hw_event)
-{
-#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
-#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
-#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
-#define P6_EVNTSEL_INV_MASK 0x00800000ULL
-#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
-
-#define P6_EVNTSEL_MASK \
- (P6_EVNTSEL_EVENT_MASK | \
- P6_EVNTSEL_UNIT_MASK | \
- P6_EVNTSEL_EDGE_MASK | \
- P6_EVNTSEL_INV_MASK | \
- P6_EVNTSEL_REG_MASK)
-
- return hw_event & P6_EVNTSEL_MASK;
-}
-
static struct event_constraint p6_event_constraints[] =
{
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
@@ -66,7 +48,7 @@ static void p6_pmu_disable_all(void)
wrmsrl(MSR_P6_EVNTSEL0, val);
}
-static void p6_pmu_enable_all(void)
+static void p6_pmu_enable_all(int added)
{
unsigned long val;
@@ -102,22 +84,23 @@ static void p6_pmu_enable_event(struct perf_event *event)
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
}
-static __initconst struct x86_pmu p6_pmu = {
+static __initconst const struct x86_pmu p6_pmu = {
.name = "p6",
.handle_irq = x86_pmu_handle_irq,
.disable_all = p6_pmu_disable_all,
.enable_all = p6_pmu_enable_all,
.enable = p6_pmu_enable_event,
.disable = p6_pmu_disable_event,
+ .hw_config = x86_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
.eventsel = MSR_P6_EVNTSEL0,
.perfctr = MSR_P6_PERFCTR0,
.event_map = p6_pmu_event_map,
- .raw_event = p6_pmu_raw_event,
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
- .num_events = 2,
+ .num_counters = 2,
/*
* Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
@@ -125,8 +108,8 @@ static __initconst struct x86_pmu p6_pmu = {
*
* See IA-32 Intel Architecture Software developer manual Vol 3B
*/
- .event_bits = 32,
- .event_mask = (1ULL << 32) - 1,
+ .cntval_bits = 32,
+ .cntval_mask = (1ULL << 32) - 1,
.get_event_constraints = x86_get_event_constraints,
.event_constraints = p6_event_constraints,
};
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index dfdb4dba2320..b9d1ff588445 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,8 +24,8 @@
#include <linux/dmi.h>
#include <linux/module.h>
#include <asm/div64.h>
-#include <asm/vmware.h>
#include <asm/x86_init.h>
+#include <asm/hypervisor.h>
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -65,7 +65,7 @@ static unsigned long vmware_get_tsc_khz(void)
return tsc_hz;
}
-void __init vmware_platform_setup(void)
+static void __init vmware_platform_setup(void)
{
uint32_t eax, ebx, ecx, edx;
@@ -83,26 +83,22 @@ void __init vmware_platform_setup(void)
* serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor.
*/
-int vmware_platform(void)
+static bool __init vmware_platform(void)
{
if (cpu_has_hypervisor) {
- unsigned int eax, ebx, ecx, edx;
- char hyper_vendor_id[13];
-
- cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &ebx, &ecx, &edx);
- memcpy(hyper_vendor_id + 0, &ebx, 4);
- memcpy(hyper_vendor_id + 4, &ecx, 4);
- memcpy(hyper_vendor_id + 8, &edx, 4);
- hyper_vendor_id[12] = '\0';
- if (!strcmp(hyper_vendor_id, "VMwareVMware"))
- return 1;
+ unsigned int eax;
+ unsigned int hyper_vendor_id[3];
+
+ cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
+ &hyper_vendor_id[1], &hyper_vendor_id[2]);
+ if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
+ return true;
} else if (dmi_available && dmi_name_in_serial("VMware") &&
__vmware_platform())
- return 1;
+ return true;
- return 0;
+ return false;
}
-EXPORT_SYMBOL(vmware_platform);
/*
* VMware hypervisor takes care of exporting a reliable TSC to the guest.
@@ -116,8 +112,16 @@ EXPORT_SYMBOL(vmware_platform);
* so that the kernel could just trust the hypervisor with providing a
* reliable virtual TSC that is suitable for timekeeping.
*/
-void __cpuinit vmware_set_feature_bits(struct cpuinfo_x86 *c)
+static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
}
+
+const __refconst struct hypervisor_x86 x86_hyper_vmware = {
+ .name = "VMware",
+ .detect = vmware_platform,
+ .set_cpu_features = vmware_set_cpu_features,
+ .init_platform = vmware_platform_setup,
+};
+EXPORT_SYMBOL(x86_hyper_vmware);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8b862d5900fe..1b7b31ab7d86 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -170,7 +170,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
cpuid_device_destroy(cpu);
break;
}
- return err ? NOTIFY_BAD : NOTIFY_OK;
+ return notifier_from_errno(err);
}
static struct notifier_block __refdata cpuid_class_cpu_notifier =
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
deleted file mode 100644
index 1c47390dd0e5..000000000000
--- a/arch/x86/kernel/ds.c
+++ /dev/null
@@ -1,1437 +0,0 @@
-/*
- * Debug Store support
- *
- * This provides a low-level interface to the hardware's Debug Store
- * feature that is used for branch trace store (BTS) and
- * precise-event based sampling (PEBS).
- *
- * It manages:
- * - DS and BTS hardware configuration
- * - buffer overflow handling (to be done)
- * - buffer access
- *
- * It does not do:
- * - security checking (is the caller allowed to trace the task)
- * - buffer allocation (memory accounting)
- *
- *
- * Copyright (C) 2007-2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/trace_clock.h>
-
-#include <asm/ds.h>
-
-#include "ds_selftest.h"
-
-/*
- * The configuration for a particular DS hardware implementation:
- */
-struct ds_configuration {
- /* The name of the configuration: */
- const char *name;
-
- /* The size of pointer-typed fields in DS, BTS, and PEBS: */
- unsigned char sizeof_ptr_field;
-
- /* The size of a BTS/PEBS record in bytes: */
- unsigned char sizeof_rec[2];
-
- /* The number of pebs counter reset values in the DS structure. */
- unsigned char nr_counter_reset;
-
- /* Control bit-masks indexed by enum ds_feature: */
- unsigned long ctl[dsf_ctl_max];
-};
-static struct ds_configuration ds_cfg __read_mostly;
-
-
-/* Maximal size of a DS configuration: */
-#define MAX_SIZEOF_DS 0x80
-
-/* Maximal size of a BTS record: */
-#define MAX_SIZEOF_BTS (3 * 8)
-
-/* BTS and PEBS buffer alignment: */
-#define DS_ALIGNMENT (1 << 3)
-
-/* Number of buffer pointers in DS: */
-#define NUM_DS_PTR_FIELDS 8
-
-/* Size of a pebs reset value in DS: */
-#define PEBS_RESET_FIELD_SIZE 8
-
-/* Mask of control bits in the DS MSR register: */
-#define BTS_CONTROL \
- ( ds_cfg.ctl[dsf_bts] | \
- ds_cfg.ctl[dsf_bts_kernel] | \
- ds_cfg.ctl[dsf_bts_user] | \
- ds_cfg.ctl[dsf_bts_overflow] )
-
-/*
- * A BTS or PEBS tracer.
- *
- * This holds the configuration of the tracer and serves as a handle
- * to identify tracers.
- */
-struct ds_tracer {
- /* The DS context (partially) owned by this tracer. */
- struct ds_context *context;
- /* The buffer provided on ds_request() and its size in bytes. */
- void *buffer;
- size_t size;
-};
-
-struct bts_tracer {
- /* The common DS part: */
- struct ds_tracer ds;
-
- /* The trace including the DS configuration: */
- struct bts_trace trace;
-
- /* Buffer overflow notification function: */
- bts_ovfl_callback_t ovfl;
-
- /* Active flags affecting trace collection. */
- unsigned int flags;
-};
-
-struct pebs_tracer {
- /* The common DS part: */
- struct ds_tracer ds;
-
- /* The trace including the DS configuration: */
- struct pebs_trace trace;
-
- /* Buffer overflow notification function: */
- pebs_ovfl_callback_t ovfl;
-};
-
-/*
- * Debug Store (DS) save area configuration (see Intel64 and IA32
- * Architectures Software Developer's Manual, section 18.5)
- *
- * The DS configuration consists of the following fields; different
- * architetures vary in the size of those fields.
- *
- * - double-word aligned base linear address of the BTS buffer
- * - write pointer into the BTS buffer
- * - end linear address of the BTS buffer (one byte beyond the end of
- * the buffer)
- * - interrupt pointer into BTS buffer
- * (interrupt occurs when write pointer passes interrupt pointer)
- * - double-word aligned base linear address of the PEBS buffer
- * - write pointer into the PEBS buffer
- * - end linear address of the PEBS buffer (one byte beyond the end of
- * the buffer)
- * - interrupt pointer into PEBS buffer
- * (interrupt occurs when write pointer passes interrupt pointer)
- * - value to which counter is reset following counter overflow
- *
- * Later architectures use 64bit pointers throughout, whereas earlier
- * architectures use 32bit pointers in 32bit mode.
- *
- *
- * We compute the base address for the first 8 fields based on:
- * - the field size stored in the DS configuration
- * - the relative field position
- * - an offset giving the start of the respective region
- *
- * This offset is further used to index various arrays holding
- * information for BTS and PEBS at the respective index.
- *
- * On later 32bit processors, we only access the lower 32bit of the
- * 64bit pointer fields. The upper halves will be zeroed out.
- */
-
-enum ds_field {
- ds_buffer_base = 0,
- ds_index,
- ds_absolute_maximum,
- ds_interrupt_threshold,
-};
-
-enum ds_qualifier {
- ds_bts = 0,
- ds_pebs
-};
-
-static inline unsigned long
-ds_get(const unsigned char *base, enum ds_qualifier qual, enum ds_field field)
-{
- base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
- return *(unsigned long *)base;
-}
-
-static inline void
-ds_set(unsigned char *base, enum ds_qualifier qual, enum ds_field field,
- unsigned long value)
-{
- base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
- (*(unsigned long *)base) = value;
-}
-
-
-/*
- * Locking is done only for allocating BTS or PEBS resources.
- */
-static DEFINE_SPINLOCK(ds_lock);
-
-/*
- * We either support (system-wide) per-cpu or per-thread allocation.
- * We distinguish the two based on the task_struct pointer, where a
- * NULL pointer indicates per-cpu allocation for the current cpu.
- *
- * Allocations are use-counted. As soon as resources are allocated,
- * further allocations must be of the same type (per-cpu or
- * per-thread). We model this by counting allocations (i.e. the number
- * of tracers of a certain type) for one type negatively:
- * =0 no tracers
- * >0 number of per-thread tracers
- * <0 number of per-cpu tracers
- *
- * Tracers essentially gives the number of ds contexts for a certain
- * type of allocation.
- */
-static atomic_t tracers = ATOMIC_INIT(0);
-
-static inline int get_tracer(struct task_struct *task)
-{
- int error;
-
- spin_lock_irq(&ds_lock);
-
- if (task) {
- error = -EPERM;
- if (atomic_read(&tracers) < 0)
- goto out;
- atomic_inc(&tracers);
- } else {
- error = -EPERM;
- if (atomic_read(&tracers) > 0)
- goto out;
- atomic_dec(&tracers);
- }
-
- error = 0;
-out:
- spin_unlock_irq(&ds_lock);
- return error;
-}
-
-static inline void put_tracer(struct task_struct *task)
-{
- if (task)
- atomic_dec(&tracers);
- else
- atomic_inc(&tracers);
-}
-
-/*
- * The DS context is either attached to a thread or to a cpu:
- * - in the former case, the thread_struct contains a pointer to the
- * attached context.
- * - in the latter case, we use a static array of per-cpu context
- * pointers.
- *
- * Contexts are use-counted. They are allocated on first access and
- * deallocated when the last user puts the context.
- */
-struct ds_context {
- /* The DS configuration; goes into MSR_IA32_DS_AREA: */
- unsigned char ds[MAX_SIZEOF_DS];
-
- /* The owner of the BTS and PEBS configuration, respectively: */
- struct bts_tracer *bts_master;
- struct pebs_tracer *pebs_master;
-
- /* Use count: */
- unsigned long count;
-
- /* Pointer to the context pointer field: */
- struct ds_context **this;
-
- /* The traced task; NULL for cpu tracing: */
- struct task_struct *task;
-
- /* The traced cpu; only valid if task is NULL: */
- int cpu;
-};
-
-static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
-
-
-static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
-{
- struct ds_context **p_context =
- (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
- struct ds_context *context = NULL;
- struct ds_context *new_context = NULL;
-
- /* Chances are small that we already have a context. */
- new_context = kzalloc(sizeof(*new_context), GFP_KERNEL);
- if (!new_context)
- return NULL;
-
- spin_lock_irq(&ds_lock);
-
- context = *p_context;
- if (likely(!context)) {
- context = new_context;
-
- context->this = p_context;
- context->task = task;
- context->cpu = cpu;
- context->count = 0;
-
- *p_context = context;
- }
-
- context->count++;
-
- spin_unlock_irq(&ds_lock);
-
- if (context != new_context)
- kfree(new_context);
-
- return context;
-}
-
-static void ds_put_context(struct ds_context *context)
-{
- struct task_struct *task;
- unsigned long irq;
-
- if (!context)
- return;
-
- spin_lock_irqsave(&ds_lock, irq);
-
- if (--context->count) {
- spin_unlock_irqrestore(&ds_lock, irq);
- return;
- }
-
- *(context->this) = NULL;
-
- task = context->task;
-
- if (task)
- clear_tsk_thread_flag(task, TIF_DS_AREA_MSR);
-
- /*
- * We leave the (now dangling) pointer to the DS configuration in
- * the DS_AREA msr. This is as good or as bad as replacing it with
- * NULL - the hardware would crash if we enabled tracing.
- *
- * This saves us some problems with having to write an msr on a
- * different cpu while preventing others from doing the same for the
- * next context for that same cpu.
- */
-
- spin_unlock_irqrestore(&ds_lock, irq);
-
- /* The context might still be in use for context switching. */
- if (task && (task != current))
- wait_task_context_switch(task);
-
- kfree(context);
-}
-
-static void ds_install_ds_area(struct ds_context *context)
-{
- unsigned long ds;
-
- ds = (unsigned long)context->ds;
-
- /*
- * There is a race between the bts master and the pebs master.
- *
- * The thread/cpu access is synchronized via get/put_cpu() for
- * task tracing and via wrmsr_on_cpu for cpu tracing.
- *
- * If bts and pebs are collected for the same task or same cpu,
- * the same confiuration is written twice.
- */
- if (context->task) {
- get_cpu();
- if (context->task == current)
- wrmsrl(MSR_IA32_DS_AREA, ds);
- set_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
- put_cpu();
- } else
- wrmsr_on_cpu(context->cpu, MSR_IA32_DS_AREA,
- (u32)((u64)ds), (u32)((u64)ds >> 32));
-}
-
-/*
- * Call the tracer's callback on a buffer overflow.
- *
- * context: the ds context
- * qual: the buffer type
- */
-static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
-{
- switch (qual) {
- case ds_bts:
- if (context->bts_master &&
- context->bts_master->ovfl)
- context->bts_master->ovfl(context->bts_master);
- break;
- case ds_pebs:
- if (context->pebs_master &&
- context->pebs_master->ovfl)
- context->pebs_master->ovfl(context->pebs_master);
- break;
- }
-}
-
-
-/*
- * Write raw data into the BTS or PEBS buffer.
- *
- * The remainder of any partially written record is zeroed out.
- *
- * context: the DS context
- * qual: the buffer type
- * record: the data to write
- * size: the size of the data
- */
-static int ds_write(struct ds_context *context, enum ds_qualifier qual,
- const void *record, size_t size)
-{
- int bytes_written = 0;
-
- if (!record)
- return -EINVAL;
-
- while (size) {
- unsigned long base, index, end, write_end, int_th;
- unsigned long write_size, adj_write_size;
-
- /*
- * Write as much as possible without producing an
- * overflow interrupt.
- *
- * Interrupt_threshold must either be
- * - bigger than absolute_maximum or
- * - point to a record between buffer_base and absolute_maximum
- *
- * Index points to a valid record.
- */
- base = ds_get(context->ds, qual, ds_buffer_base);
- index = ds_get(context->ds, qual, ds_index);
- end = ds_get(context->ds, qual, ds_absolute_maximum);
- int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
-
- write_end = min(end, int_th);
-
- /*
- * If we are already beyond the interrupt threshold,
- * we fill the entire buffer.
- */
- if (write_end <= index)
- write_end = end;
-
- if (write_end <= index)
- break;
-
- write_size = min((unsigned long) size, write_end - index);
- memcpy((void *)index, record, write_size);
-
- record = (const char *)record + write_size;
- size -= write_size;
- bytes_written += write_size;
-
- adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
- adj_write_size *= ds_cfg.sizeof_rec[qual];
-
- /* Zero out trailing bytes. */
- memset((char *)index + write_size, 0,
- adj_write_size - write_size);
- index += adj_write_size;
-
- if (index >= end)
- index = base;
- ds_set(context->ds, qual, ds_index, index);
-
- if (index >= int_th)
- ds_overflow(context, qual);
- }
-
- return bytes_written;
-}
-
-
-/*
- * Branch Trace Store (BTS) uses the following format. Different
- * architectures vary in the size of those fields.
- * - source linear address
- * - destination linear address
- * - flags
- *
- * Later architectures use 64bit pointers throughout, whereas earlier
- * architectures use 32bit pointers in 32bit mode.
- *
- * We compute the base address for the fields based on:
- * - the field size stored in the DS configuration
- * - the relative field position
- *
- * In order to store additional information in the BTS buffer, we use
- * a special source address to indicate that the record requires
- * special interpretation.
- *
- * Netburst indicated via a bit in the flags field whether the branch
- * was predicted; this is ignored.
- *
- * We use two levels of abstraction:
- * - the raw data level defined here
- * - an arch-independent level defined in ds.h
- */
-
-enum bts_field {
- bts_from,
- bts_to,
- bts_flags,
-
- bts_qual = bts_from,
- bts_clock = bts_to,
- bts_pid = bts_flags,
-
- bts_qual_mask = (bts_qual_max - 1),
- bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
-};
-
-static inline unsigned long bts_get(const char *base, unsigned long field)
-{
- base += (ds_cfg.sizeof_ptr_field * field);
- return *(unsigned long *)base;
-}
-
-static inline void bts_set(char *base, unsigned long field, unsigned long val)
-{
- base += (ds_cfg.sizeof_ptr_field * field);
- (*(unsigned long *)base) = val;
-}
-
-
-/*
- * The raw BTS data is architecture dependent.
- *
- * For higher-level users, we give an arch-independent view.
- * - ds.h defines struct bts_struct
- * - bts_read translates one raw bts record into a bts_struct
- * - bts_write translates one bts_struct into the raw format and
- * writes it into the top of the parameter tracer's buffer.
- *
- * return: bytes read/written on success; -Eerrno, otherwise
- */
-static int
-bts_read(struct bts_tracer *tracer, const void *at, struct bts_struct *out)
-{
- if (!tracer)
- return -EINVAL;
-
- if (at < tracer->trace.ds.begin)
- return -EINVAL;
-
- if (tracer->trace.ds.end < (at + tracer->trace.ds.size))
- return -EINVAL;
-
- memset(out, 0, sizeof(*out));
- if ((bts_get(at, bts_qual) & ~bts_qual_mask) == bts_escape) {
- out->qualifier = (bts_get(at, bts_qual) & bts_qual_mask);
- out->variant.event.clock = bts_get(at, bts_clock);
- out->variant.event.pid = bts_get(at, bts_pid);
- } else {
- out->qualifier = bts_branch;
- out->variant.lbr.from = bts_get(at, bts_from);
- out->variant.lbr.to = bts_get(at, bts_to);
-
- if (!out->variant.lbr.from && !out->variant.lbr.to)
- out->qualifier = bts_invalid;
- }
-
- return ds_cfg.sizeof_rec[ds_bts];
-}
-
-static int bts_write(struct bts_tracer *tracer, const struct bts_struct *in)
-{
- unsigned char raw[MAX_SIZEOF_BTS];
-
- if (!tracer)
- return -EINVAL;
-
- if (MAX_SIZEOF_BTS < ds_cfg.sizeof_rec[ds_bts])
- return -EOVERFLOW;
-
- switch (in->qualifier) {
- case bts_invalid:
- bts_set(raw, bts_from, 0);
- bts_set(raw, bts_to, 0);
- bts_set(raw, bts_flags, 0);
- break;
- case bts_branch:
- bts_set(raw, bts_from, in->variant.lbr.from);
- bts_set(raw, bts_to, in->variant.lbr.to);
- bts_set(raw, bts_flags, 0);
- break;
- case bts_task_arrives:
- case bts_task_departs:
- bts_set(raw, bts_qual, (bts_escape | in->qualifier));
- bts_set(raw, bts_clock, in->variant.event.clock);
- bts_set(raw, bts_pid, in->variant.event.pid);
- break;
- default:
- return -EINVAL;
- }
-
- return ds_write(tracer->ds.context, ds_bts, raw,
- ds_cfg.sizeof_rec[ds_bts]);
-}
-
-
-static void ds_write_config(struct ds_context *context,
- struct ds_trace *cfg, enum ds_qualifier qual)
-{
- unsigned char *ds = context->ds;
-
- ds_set(ds, qual, ds_buffer_base, (unsigned long)cfg->begin);
- ds_set(ds, qual, ds_index, (unsigned long)cfg->top);
- ds_set(ds, qual, ds_absolute_maximum, (unsigned long)cfg->end);
- ds_set(ds, qual, ds_interrupt_threshold, (unsigned long)cfg->ith);
-}
-
-static void ds_read_config(struct ds_context *context,
- struct ds_trace *cfg, enum ds_qualifier qual)
-{
- unsigned char *ds = context->ds;
-
- cfg->begin = (void *)ds_get(ds, qual, ds_buffer_base);
- cfg->top = (void *)ds_get(ds, qual, ds_index);
- cfg->end = (void *)ds_get(ds, qual, ds_absolute_maximum);
- cfg->ith = (void *)ds_get(ds, qual, ds_interrupt_threshold);
-}
-
-static void ds_init_ds_trace(struct ds_trace *trace, enum ds_qualifier qual,
- void *base, size_t size, size_t ith,
- unsigned int flags) {
- unsigned long buffer, adj;
-
- /*
- * Adjust the buffer address and size to meet alignment
- * constraints:
- * - buffer is double-word aligned
- * - size is multiple of record size
- *
- * We checked the size at the very beginning; we have enough
- * space to do the adjustment.
- */
- buffer = (unsigned long)base;
-
- adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
- buffer += adj;
- size -= adj;
-
- trace->n = size / ds_cfg.sizeof_rec[qual];
- trace->size = ds_cfg.sizeof_rec[qual];
-
- size = (trace->n * trace->size);
-
- trace->begin = (void *)buffer;
- trace->top = trace->begin;
- trace->end = (void *)(buffer + size);
- /*
- * The value for 'no threshold' is -1, which will set the
- * threshold outside of the buffer, just like we want it.
- */
- ith *= ds_cfg.sizeof_rec[qual];
- trace->ith = (void *)(buffer + size - ith);
-
- trace->flags = flags;
-}
-
-
-static int ds_request(struct ds_tracer *tracer, struct ds_trace *trace,
- enum ds_qualifier qual, struct task_struct *task,
- int cpu, void *base, size_t size, size_t th)
-{
- struct ds_context *context;
- int error;
- size_t req_size;
-
- error = -EOPNOTSUPP;
- if (!ds_cfg.sizeof_rec[qual])
- goto out;
-
- error = -EINVAL;
- if (!base)
- goto out;
-
- req_size = ds_cfg.sizeof_rec[qual];
- /* We might need space for alignment adjustments. */
- if (!IS_ALIGNED((unsigned long)base, DS_ALIGNMENT))
- req_size += DS_ALIGNMENT;
-
- error = -EINVAL;
- if (size < req_size)
- goto out;
-
- if (th != (size_t)-1) {
- th *= ds_cfg.sizeof_rec[qual];
-
- error = -EINVAL;
- if (size <= th)
- goto out;
- }
-
- tracer->buffer = base;
- tracer->size = size;
-
- error = -ENOMEM;
- context = ds_get_context(task, cpu);
- if (!context)
- goto out;
- tracer->context = context;
-
- /*
- * Defer any tracer-specific initialization work for the context until
- * context ownership has been clarified.
- */
-
- error = 0;
- out:
- return error;
-}
-
-static struct bts_tracer *ds_request_bts(struct task_struct *task, int cpu,
- void *base, size_t size,
- bts_ovfl_callback_t ovfl, size_t th,
- unsigned int flags)
-{
- struct bts_tracer *tracer;
- int error;
-
- /* Buffer overflow notification is not yet implemented. */
- error = -EOPNOTSUPP;
- if (ovfl)
- goto out;
-
- error = get_tracer(task);
- if (error < 0)
- goto out;
-
- error = -ENOMEM;
- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
- if (!tracer)
- goto out_put_tracer;
- tracer->ovfl = ovfl;
-
- /* Do some more error checking and acquire a tracing context. */
- error = ds_request(&tracer->ds, &tracer->trace.ds,
- ds_bts, task, cpu, base, size, th);
- if (error < 0)
- goto out_tracer;
-
- /* Claim the bts part of the tracing context we acquired above. */
- spin_lock_irq(&ds_lock);
-
- error = -EPERM;
- if (tracer->ds.context->bts_master)
- goto out_unlock;
- tracer->ds.context->bts_master = tracer;
-
- spin_unlock_irq(&ds_lock);
-
- /*
- * Now that we own the bts part of the context, let's complete the
- * initialization for that part.
- */
- ds_init_ds_trace(&tracer->trace.ds, ds_bts, base, size, th, flags);
- ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
- ds_install_ds_area(tracer->ds.context);
-
- tracer->trace.read = bts_read;
- tracer->trace.write = bts_write;
-
- /* Start tracing. */
- ds_resume_bts(tracer);
-
- return tracer;
-
- out_unlock:
- spin_unlock_irq(&ds_lock);
- ds_put_context(tracer->ds.context);
- out_tracer:
- kfree(tracer);
- out_put_tracer:
- put_tracer(task);
- out:
- return ERR_PTR(error);
-}
-
-struct bts_tracer *ds_request_bts_task(struct task_struct *task,
- void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_bts(task, 0, base, size, ovfl, th, flags);
-}
-
-struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
- bts_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_bts(NULL, cpu, base, size, ovfl, th, flags);
-}
-
-static struct pebs_tracer *ds_request_pebs(struct task_struct *task, int cpu,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl, size_t th,
- unsigned int flags)
-{
- struct pebs_tracer *tracer;
- int error;
-
- /* Buffer overflow notification is not yet implemented. */
- error = -EOPNOTSUPP;
- if (ovfl)
- goto out;
-
- error = get_tracer(task);
- if (error < 0)
- goto out;
-
- error = -ENOMEM;
- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
- if (!tracer)
- goto out_put_tracer;
- tracer->ovfl = ovfl;
-
- /* Do some more error checking and acquire a tracing context. */
- error = ds_request(&tracer->ds, &tracer->trace.ds,
- ds_pebs, task, cpu, base, size, th);
- if (error < 0)
- goto out_tracer;
-
- /* Claim the pebs part of the tracing context we acquired above. */
- spin_lock_irq(&ds_lock);
-
- error = -EPERM;
- if (tracer->ds.context->pebs_master)
- goto out_unlock;
- tracer->ds.context->pebs_master = tracer;
-
- spin_unlock_irq(&ds_lock);
-
- /*
- * Now that we own the pebs part of the context, let's complete the
- * initialization for that part.
- */
- ds_init_ds_trace(&tracer->trace.ds, ds_pebs, base, size, th, flags);
- ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
- ds_install_ds_area(tracer->ds.context);
-
- /* Start tracing. */
- ds_resume_pebs(tracer);
-
- return tracer;
-
- out_unlock:
- spin_unlock_irq(&ds_lock);
- ds_put_context(tracer->ds.context);
- out_tracer:
- kfree(tracer);
- out_put_tracer:
- put_tracer(task);
- out:
- return ERR_PTR(error);
-}
-
-struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
- void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_pebs(task, 0, base, size, ovfl, th, flags);
-}
-
-struct pebs_tracer *ds_request_pebs_cpu(int cpu, void *base, size_t size,
- pebs_ovfl_callback_t ovfl,
- size_t th, unsigned int flags)
-{
- return ds_request_pebs(NULL, cpu, base, size, ovfl, th, flags);
-}
-
-static void ds_free_bts(struct bts_tracer *tracer)
-{
- struct task_struct *task;
-
- task = tracer->ds.context->task;
-
- WARN_ON_ONCE(tracer->ds.context->bts_master != tracer);
- tracer->ds.context->bts_master = NULL;
-
- /* Make sure tracing stopped and the tracer is not in use. */
- if (task && (task != current))
- wait_task_context_switch(task);
-
- ds_put_context(tracer->ds.context);
- put_tracer(task);
-
- kfree(tracer);
-}
-
-void ds_release_bts(struct bts_tracer *tracer)
-{
- might_sleep();
-
- if (!tracer)
- return;
-
- ds_suspend_bts(tracer);
- ds_free_bts(tracer);
-}
-
-int ds_release_bts_noirq(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long irq;
- int error;
-
- if (!tracer)
- return 0;
-
- task = tracer->ds.context->task;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task &&
- (tracer->ds.context->cpu != smp_processor_id()))
- goto out;
-
- error = -EPERM;
- if (task && (task != current))
- goto out;
-
- ds_suspend_bts_noirq(tracer);
- ds_free_bts(tracer);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-static void update_task_debugctlmsr(struct task_struct *task,
- unsigned long debugctlmsr)
-{
- task->thread.debugctlmsr = debugctlmsr;
-
- get_cpu();
- if (task == current)
- update_debugctlmsr(debugctlmsr);
- put_cpu();
-}
-
-void ds_suspend_bts(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr;
- int cpu;
-
- if (!tracer)
- return;
-
- tracer->flags = 0;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- WARN_ON(!task && irqs_disabled());
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr_on_cpu(cpu));
- debugctlmsr &= ~BTS_CONTROL;
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr_on_cpu(cpu, debugctlmsr);
-}
-
-int ds_suspend_bts_noirq(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr, irq;
- int cpu, error = 0;
-
- if (!tracer)
- return 0;
-
- tracer->flags = 0;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task && (cpu != smp_processor_id()))
- goto out;
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr());
- debugctlmsr &= ~BTS_CONTROL;
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr(debugctlmsr);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-static unsigned long ds_bts_control(struct bts_tracer *tracer)
-{
- unsigned long control;
-
- control = ds_cfg.ctl[dsf_bts];
- if (!(tracer->trace.ds.flags & BTS_KERNEL))
- control |= ds_cfg.ctl[dsf_bts_kernel];
- if (!(tracer->trace.ds.flags & BTS_USER))
- control |= ds_cfg.ctl[dsf_bts_user];
-
- return control;
-}
-
-void ds_resume_bts(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr;
- int cpu;
-
- if (!tracer)
- return;
-
- tracer->flags = tracer->trace.ds.flags;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- WARN_ON(!task && irqs_disabled());
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr_on_cpu(cpu));
- debugctlmsr |= ds_bts_control(tracer);
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr_on_cpu(cpu, debugctlmsr);
-}
-
-int ds_resume_bts_noirq(struct bts_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long debugctlmsr, irq;
- int cpu, error = 0;
-
- if (!tracer)
- return 0;
-
- tracer->flags = tracer->trace.ds.flags;
-
- task = tracer->ds.context->task;
- cpu = tracer->ds.context->cpu;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task && (cpu != smp_processor_id()))
- goto out;
-
- debugctlmsr = (task ?
- task->thread.debugctlmsr :
- get_debugctlmsr());
- debugctlmsr |= ds_bts_control(tracer);
-
- if (task)
- update_task_debugctlmsr(task, debugctlmsr);
- else
- update_debugctlmsr(debugctlmsr);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-static void ds_free_pebs(struct pebs_tracer *tracer)
-{
- struct task_struct *task;
-
- task = tracer->ds.context->task;
-
- WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
- tracer->ds.context->pebs_master = NULL;
-
- ds_put_context(tracer->ds.context);
- put_tracer(task);
-
- kfree(tracer);
-}
-
-void ds_release_pebs(struct pebs_tracer *tracer)
-{
- might_sleep();
-
- if (!tracer)
- return;
-
- ds_suspend_pebs(tracer);
- ds_free_pebs(tracer);
-}
-
-int ds_release_pebs_noirq(struct pebs_tracer *tracer)
-{
- struct task_struct *task;
- unsigned long irq;
- int error;
-
- if (!tracer)
- return 0;
-
- task = tracer->ds.context->task;
-
- local_irq_save(irq);
-
- error = -EPERM;
- if (!task &&
- (tracer->ds.context->cpu != smp_processor_id()))
- goto out;
-
- error = -EPERM;
- if (task && (task != current))
- goto out;
-
- ds_suspend_pebs_noirq(tracer);
- ds_free_pebs(tracer);
-
- error = 0;
- out:
- local_irq_restore(irq);
- return error;
-}
-
-void ds_suspend_pebs(struct pebs_tracer *tracer)
-{
-
-}
-
-int ds_suspend_pebs_noirq(struct pebs_tracer *tracer)
-{
- return 0;
-}
-
-void ds_resume_pebs(struct pebs_tracer *tracer)
-{
-
-}
-
-int ds_resume_pebs_noirq(struct pebs_tracer *tracer)
-{
- return 0;
-}
-
-const struct bts_trace *ds_read_bts(struct bts_tracer *tracer)
-{
- if (!tracer)
- return NULL;
-
- ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
- return &tracer->trace;
-}
-
-const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer)
-{
- if (!tracer)
- return NULL;
-
- ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
-
- tracer->trace.counters = ds_cfg.nr_counter_reset;
- memcpy(tracer->trace.counter_reset,
- tracer->ds.context->ds +
- (NUM_DS_PTR_FIELDS * ds_cfg.sizeof_ptr_field),
- ds_cfg.nr_counter_reset * PEBS_RESET_FIELD_SIZE);
-
- return &tracer->trace;
-}
-
-int ds_reset_bts(struct bts_tracer *tracer)
-{
- if (!tracer)
- return -EINVAL;
-
- tracer->trace.ds.top = tracer->trace.ds.begin;
-
- ds_set(tracer->ds.context->ds, ds_bts, ds_index,
- (unsigned long)tracer->trace.ds.top);
-
- return 0;
-}
-
-int ds_reset_pebs(struct pebs_tracer *tracer)
-{
- if (!tracer)
- return -EINVAL;
-
- tracer->trace.ds.top = tracer->trace.ds.begin;
-
- ds_set(tracer->ds.context->ds, ds_pebs, ds_index,
- (unsigned long)tracer->trace.ds.top);
-
- return 0;
-}
-
-int ds_set_pebs_reset(struct pebs_tracer *tracer,
- unsigned int counter, u64 value)
-{
- if (!tracer)
- return -EINVAL;
-
- if (ds_cfg.nr_counter_reset < counter)
- return -EINVAL;
-
- *(u64 *)(tracer->ds.context->ds +
- (NUM_DS_PTR_FIELDS * ds_cfg.sizeof_ptr_field) +
- (counter * PEBS_RESET_FIELD_SIZE)) = value;
-
- return 0;
-}
-
-static const struct ds_configuration ds_cfg_netburst = {
- .name = "Netburst",
- .ctl[dsf_bts] = (1 << 2) | (1 << 3),
- .ctl[dsf_bts_kernel] = (1 << 5),
- .ctl[dsf_bts_user] = (1 << 6),
- .nr_counter_reset = 1,
-};
-static const struct ds_configuration ds_cfg_pentium_m = {
- .name = "Pentium M",
- .ctl[dsf_bts] = (1 << 6) | (1 << 7),
- .nr_counter_reset = 1,
-};
-static const struct ds_configuration ds_cfg_core2_atom = {
- .name = "Core 2/Atom",
- .ctl[dsf_bts] = (1 << 6) | (1 << 7),
- .ctl[dsf_bts_kernel] = (1 << 9),
- .ctl[dsf_bts_user] = (1 << 10),
- .nr_counter_reset = 1,
-};
-static const struct ds_configuration ds_cfg_core_i7 = {
- .name = "Core i7",
- .ctl[dsf_bts] = (1 << 6) | (1 << 7),
- .ctl[dsf_bts_kernel] = (1 << 9),
- .ctl[dsf_bts_user] = (1 << 10),
- .nr_counter_reset = 4,
-};
-
-static void
-ds_configure(const struct ds_configuration *cfg,
- struct cpuinfo_x86 *cpu)
-{
- unsigned long nr_pebs_fields = 0;
-
- printk(KERN_INFO "[ds] using %s configuration\n", cfg->name);
-
-#ifdef __i386__
- nr_pebs_fields = 10;
-#else
- nr_pebs_fields = 18;
-#endif
-
- /*
- * Starting with version 2, architectural performance
- * monitoring supports a format specifier.
- */
- if ((cpuid_eax(0xa) & 0xff) > 1) {
- unsigned long perf_capabilities, format;
-
- rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_capabilities);
-
- format = (perf_capabilities >> 8) & 0xf;
-
- switch (format) {
- case 0:
- nr_pebs_fields = 18;
- break;
- case 1:
- nr_pebs_fields = 22;
- break;
- default:
- printk(KERN_INFO
- "[ds] unknown PEBS format: %lu\n", format);
- nr_pebs_fields = 0;
- break;
- }
- }
-
- memset(&ds_cfg, 0, sizeof(ds_cfg));
- ds_cfg = *cfg;
-
- ds_cfg.sizeof_ptr_field =
- (cpu_has(cpu, X86_FEATURE_DTES64) ? 8 : 4);
-
- ds_cfg.sizeof_rec[ds_bts] = ds_cfg.sizeof_ptr_field * 3;
- ds_cfg.sizeof_rec[ds_pebs] = ds_cfg.sizeof_ptr_field * nr_pebs_fields;
-
- if (!cpu_has(cpu, X86_FEATURE_BTS)) {
- ds_cfg.sizeof_rec[ds_bts] = 0;
- printk(KERN_INFO "[ds] bts not available\n");
- }
- if (!cpu_has(cpu, X86_FEATURE_PEBS)) {
- ds_cfg.sizeof_rec[ds_pebs] = 0;
- printk(KERN_INFO "[ds] pebs not available\n");
- }
-
- printk(KERN_INFO "[ds] sizes: address: %u bit, ",
- 8 * ds_cfg.sizeof_ptr_field);
- printk("bts/pebs record: %u/%u bytes\n",
- ds_cfg.sizeof_rec[ds_bts], ds_cfg.sizeof_rec[ds_pebs]);
-
- WARN_ON_ONCE(MAX_PEBS_COUNTERS < ds_cfg.nr_counter_reset);
-}
-
-void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
-{
- /* Only configure the first cpu. Others are identical. */
- if (ds_cfg.name)
- return;
-
- switch (c->x86) {
- case 0x6:
- switch (c->x86_model) {
- case 0x9:
- case 0xd: /* Pentium M */
- ds_configure(&ds_cfg_pentium_m, c);
- break;
- case 0xf:
- case 0x17: /* Core2 */
- case 0x1c: /* Atom */
- ds_configure(&ds_cfg_core2_atom, c);
- break;
- case 0x1a: /* Core i7 */
- ds_configure(&ds_cfg_core_i7, c);
- break;
- default:
- /* Sorry, don't know about them. */
- break;
- }
- break;
- case 0xf:
- switch (c->x86_model) {
- case 0x0:
- case 0x1:
- case 0x2: /* Netburst */
- ds_configure(&ds_cfg_netburst, c);
- break;
- default:
- /* Sorry, don't know about them. */
- break;
- }
- break;
- default:
- /* Sorry, don't know about them. */
- break;
- }
-}
-
-static inline void ds_take_timestamp(struct ds_context *context,
- enum bts_qualifier qualifier,
- struct task_struct *task)
-{
- struct bts_tracer *tracer = context->bts_master;
- struct bts_struct ts;
-
- /* Prevent compilers from reading the tracer pointer twice. */
- barrier();
-
- if (!tracer || !(tracer->flags & BTS_TIMESTAMPS))
- return;
-
- memset(&ts, 0, sizeof(ts));
- ts.qualifier = qualifier;
- ts.variant.event.clock = trace_clock_global();
- ts.variant.event.pid = task->pid;
-
- bts_write(tracer, &ts);
-}
-
-/*
- * Change the DS configuration from tracing prev to tracing next.
- */
-void ds_switch_to(struct task_struct *prev, struct task_struct *next)
-{
- struct ds_context *prev_ctx = prev->thread.ds_ctx;
- struct ds_context *next_ctx = next->thread.ds_ctx;
- unsigned long debugctlmsr = next->thread.debugctlmsr;
-
- /* Make sure all data is read before we start. */
- barrier();
-
- if (prev_ctx) {
- update_debugctlmsr(0);
-
- ds_take_timestamp(prev_ctx, bts_task_departs, prev);
- }
-
- if (next_ctx) {
- ds_take_timestamp(next_ctx, bts_task_arrives, next);
-
- wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds);
- }
-
- update_debugctlmsr(debugctlmsr);
-}
-
-static __init int ds_selftest(void)
-{
- if (ds_cfg.sizeof_rec[ds_bts]) {
- int error;
-
- error = ds_selftest_bts();
- if (error) {
- WARN(1, "[ds] selftest failed. disabling bts.\n");
- ds_cfg.sizeof_rec[ds_bts] = 0;
- }
- }
-
- if (ds_cfg.sizeof_rec[ds_pebs]) {
- int error;
-
- error = ds_selftest_pebs();
- if (error) {
- WARN(1, "[ds] selftest failed. disabling pebs.\n");
- ds_cfg.sizeof_rec[ds_pebs] = 0;
- }
- }
-
- return 0;
-}
-device_initcall(ds_selftest);
diff --git a/arch/x86/kernel/ds_selftest.c b/arch/x86/kernel/ds_selftest.c
deleted file mode 100644
index 6bc7c199ab99..000000000000
--- a/arch/x86/kernel/ds_selftest.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Debug Store support - selftest
- *
- *
- * Copyright (C) 2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2009
- */
-
-#include "ds_selftest.h"
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-
-#include <asm/ds.h>
-
-
-#define BUFFER_SIZE 521 /* Intentionally chose an odd size. */
-#define SMALL_BUFFER_SIZE 24 /* A single bts entry. */
-
-struct ds_selftest_bts_conf {
- struct bts_tracer *tracer;
- int error;
- int (*suspend)(struct bts_tracer *);
- int (*resume)(struct bts_tracer *);
-};
-
-static int ds_selftest_bts_consistency(const struct bts_trace *trace)
-{
- int error = 0;
-
- if (!trace) {
- printk(KERN_CONT "failed to access trace...");
- /* Bail out. Other tests are pointless. */
- return -1;
- }
-
- if (!trace->read) {
- printk(KERN_CONT "bts read not available...");
- error = -1;
- }
-
- /* Do some sanity checks on the trace configuration. */
- if (!trace->ds.n) {
- printk(KERN_CONT "empty bts buffer...");
- error = -1;
- }
- if (!trace->ds.size) {
- printk(KERN_CONT "bad bts trace setup...");
- error = -1;
- }
- if (trace->ds.end !=
- (char *)trace->ds.begin + (trace->ds.n * trace->ds.size)) {
- printk(KERN_CONT "bad bts buffer setup...");
- error = -1;
- }
- /*
- * We allow top in [begin; end], since its not clear when the
- * overflow adjustment happens: after the increment or before the
- * write.
- */
- if ((trace->ds.top < trace->ds.begin) ||
- (trace->ds.end < trace->ds.top)) {
- printk(KERN_CONT "bts top out of bounds...");
- error = -1;
- }
-
- return error;
-}
-
-static int ds_selftest_bts_read(struct bts_tracer *tracer,
- const struct bts_trace *trace,
- const void *from, const void *to)
-{
- const unsigned char *at;
-
- /*
- * Check a few things which do not belong to this test.
- * They should be covered by other tests.
- */
- if (!trace)
- return -1;
-
- if (!trace->read)
- return -1;
-
- if (to < from)
- return -1;
-
- if (from < trace->ds.begin)
- return -1;
-
- if (trace->ds.end < to)
- return -1;
-
- if (!trace->ds.size)
- return -1;
-
- /* Now to the test itself. */
- for (at = from; (void *)at < to; at += trace->ds.size) {
- struct bts_struct bts;
- unsigned long index;
- int error;
-
- if (((void *)at - trace->ds.begin) % trace->ds.size) {
- printk(KERN_CONT
- "read from non-integer index...");
- return -1;
- }
- index = ((void *)at - trace->ds.begin) / trace->ds.size;
-
- memset(&bts, 0, sizeof(bts));
- error = trace->read(tracer, at, &bts);
- if (error < 0) {
- printk(KERN_CONT
- "error reading bts trace at [%lu] (0x%p)...",
- index, at);
- return error;
- }
-
- switch (bts.qualifier) {
- case BTS_BRANCH:
- break;
- default:
- printk(KERN_CONT
- "unexpected bts entry %llu at [%lu] (0x%p)...",
- bts.qualifier, index, at);
- return -1;
- }
- }
-
- return 0;
-}
-
-static void ds_selftest_bts_cpu(void *arg)
-{
- struct ds_selftest_bts_conf *conf = arg;
- const struct bts_trace *trace;
- void *top;
-
- if (IS_ERR(conf->tracer)) {
- conf->error = PTR_ERR(conf->tracer);
- conf->tracer = NULL;
-
- printk(KERN_CONT
- "initialization failed (err: %d)...", conf->error);
- return;
- }
-
- /* We should meanwhile have enough trace. */
- conf->error = conf->suspend(conf->tracer);
- if (conf->error < 0)
- return;
-
- /* Let's see if we can access the trace. */
- trace = ds_read_bts(conf->tracer);
-
- conf->error = ds_selftest_bts_consistency(trace);
- if (conf->error < 0)
- return;
-
- /* If everything went well, we should have a few trace entries. */
- if (trace->ds.top == trace->ds.begin) {
- /*
- * It is possible but highly unlikely that we got a
- * buffer overflow and end up at exactly the same
- * position we started from.
- * Let's issue a warning, but continue.
- */
- printk(KERN_CONT "no trace/overflow...");
- }
-
- /* Let's try to read the trace we collected. */
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace,
- trace->ds.begin, trace->ds.top);
- if (conf->error < 0)
- return;
-
- /*
- * Let's read the trace again.
- * Since we suspended tracing, we should get the same result.
- */
- top = trace->ds.top;
-
- trace = ds_read_bts(conf->tracer);
- conf->error = ds_selftest_bts_consistency(trace);
- if (conf->error < 0)
- return;
-
- if (top != trace->ds.top) {
- printk(KERN_CONT "suspend not working...");
- conf->error = -1;
- return;
- }
-
- /* Let's collect some more trace - see if resume is working. */
- conf->error = conf->resume(conf->tracer);
- if (conf->error < 0)
- return;
-
- conf->error = conf->suspend(conf->tracer);
- if (conf->error < 0)
- return;
-
- trace = ds_read_bts(conf->tracer);
-
- conf->error = ds_selftest_bts_consistency(trace);
- if (conf->error < 0)
- return;
-
- if (trace->ds.top == top) {
- /*
- * It is possible but highly unlikely that we got a
- * buffer overflow and end up at exactly the same
- * position we started from.
- * Let's issue a warning and check the full trace.
- */
- printk(KERN_CONT
- "no resume progress/overflow...");
-
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace,
- trace->ds.begin, trace->ds.end);
- } else if (trace->ds.top < top) {
- /*
- * We had a buffer overflow - the entire buffer should
- * contain trace records.
- */
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace,
- trace->ds.begin, trace->ds.end);
- } else {
- /*
- * It is quite likely that the buffer did not overflow.
- * Let's just check the delta trace.
- */
- conf->error =
- ds_selftest_bts_read(conf->tracer, trace, top,
- trace->ds.top);
- }
- if (conf->error < 0)
- return;
-
- conf->error = 0;
-}
-
-static int ds_suspend_bts_wrap(struct bts_tracer *tracer)
-{
- ds_suspend_bts(tracer);
- return 0;
-}
-
-static int ds_resume_bts_wrap(struct bts_tracer *tracer)
-{
- ds_resume_bts(tracer);
- return 0;
-}
-
-static void ds_release_bts_noirq_wrap(void *tracer)
-{
- (void)ds_release_bts_noirq(tracer);
-}
-
-static int ds_selftest_bts_bad_release_noirq(int cpu,
- struct bts_tracer *tracer)
-{
- int error = -EPERM;
-
- /* Try to release the tracer on the wrong cpu. */
- get_cpu();
- if (cpu != smp_processor_id()) {
- error = ds_release_bts_noirq(tracer);
- if (error != -EPERM)
- printk(KERN_CONT "release on wrong cpu...");
- }
- put_cpu();
-
- return error ? 0 : -1;
-}
-
-static int ds_selftest_bts_bad_request_cpu(int cpu, void *buffer)
-{
- struct bts_tracer *tracer;
- int error;
-
- /* Try to request cpu tracing while task tracing is active. */
- tracer = ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, NULL,
- (size_t)-1, BTS_KERNEL);
- error = PTR_ERR(tracer);
- if (!IS_ERR(tracer)) {
- ds_release_bts(tracer);
- error = 0;
- }
-
- if (error != -EPERM)
- printk(KERN_CONT "cpu/task tracing overlap...");
-
- return error ? 0 : -1;
-}
-
-static int ds_selftest_bts_bad_request_task(void *buffer)
-{
- struct bts_tracer *tracer;
- int error;
-
- /* Try to request cpu tracing while task tracing is active. */
- tracer = ds_request_bts_task(current, buffer, BUFFER_SIZE, NULL,
- (size_t)-1, BTS_KERNEL);
- error = PTR_ERR(tracer);
- if (!IS_ERR(tracer)) {
- error = 0;
- ds_release_bts(tracer);
- }
-
- if (error != -EPERM)
- printk(KERN_CONT "task/cpu tracing overlap...");
-
- return error ? 0 : -1;
-}
-
-int ds_selftest_bts(void)
-{
- struct ds_selftest_bts_conf conf;
- unsigned char buffer[BUFFER_SIZE], *small_buffer;
- unsigned long irq;
- int cpu;
-
- printk(KERN_INFO "[ds] bts selftest...");
- conf.error = 0;
-
- small_buffer = (unsigned char *)ALIGN((unsigned long)buffer, 8) + 8;
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- conf.suspend = ds_suspend_bts_wrap;
- conf.resume = ds_resume_bts_wrap;
- conf.tracer =
- ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- ds_selftest_bts_cpu(&conf);
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_task(buffer);
- ds_release_bts(conf.tracer);
- if (conf.error < 0)
- goto out;
-
- conf.suspend = ds_suspend_bts_noirq;
- conf.resume = ds_resume_bts_noirq;
- conf.tracer =
- ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- smp_call_function_single(cpu, ds_selftest_bts_cpu, &conf, 1);
- if (conf.error >= 0) {
- conf.error =
- ds_selftest_bts_bad_release_noirq(cpu,
- conf.tracer);
- /* We must not release the tracer twice. */
- if (conf.error < 0)
- conf.tracer = NULL;
- }
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_task(buffer);
- smp_call_function_single(cpu, ds_release_bts_noirq_wrap,
- conf.tracer, 1);
- if (conf.error < 0)
- goto out;
- }
-
- conf.suspend = ds_suspend_bts_wrap;
- conf.resume = ds_resume_bts_wrap;
- conf.tracer =
- ds_request_bts_task(current, buffer, BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- ds_selftest_bts_cpu(&conf);
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
- ds_release_bts(conf.tracer);
- if (conf.error < 0)
- goto out;
-
- conf.suspend = ds_suspend_bts_noirq;
- conf.resume = ds_resume_bts_noirq;
- conf.tracer =
- ds_request_bts_task(current, small_buffer, SMALL_BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
- local_irq_save(irq);
- ds_selftest_bts_cpu(&conf);
- if (conf.error >= 0)
- conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
- ds_release_bts_noirq(conf.tracer);
- local_irq_restore(irq);
- if (conf.error < 0)
- goto out;
-
- conf.error = 0;
- out:
- put_online_cpus();
- printk(KERN_CONT "%s.\n", (conf.error ? "failed" : "passed"));
-
- return conf.error;
-}
-
-int ds_selftest_pebs(void)
-{
- return 0;
-}
diff --git a/arch/x86/kernel/ds_selftest.h b/arch/x86/kernel/ds_selftest.h
deleted file mode 100644
index 2ba8745c6663..000000000000
--- a/arch/x86/kernel/ds_selftest.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Debug Store support - selftest
- *
- *
- * Copyright (C) 2009 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2009
- */
-
-#ifdef CONFIG_X86_DS_SELFTEST
-extern int ds_selftest_bts(void);
-extern int ds_selftest_pebs(void);
-#else
-static inline int ds_selftest_bts(void) { return 0; }
-static inline int ds_selftest_pebs(void) { return 0; }
-#endif
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 6d817554780a..c89a386930b7 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -224,11 +224,6 @@ unsigned __kprobes long oops_begin(void)
int cpu;
unsigned long flags;
- /* notify the hw-branch tracer so it may disable tracing and
- add the last trace to the trace buffer -
- the earlier this happens, the more useful the trace. */
- trace_hw_branch_oops();
-
oops_enter();
/* racy, but better than risking deadlock. */
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index b9c830c12b4a..fa99bae75ace 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -41,6 +41,14 @@ static void early_vga_write(struct console *con, const char *str, unsigned n)
writew(0x720, VGABASE + 2*(max_xpos*j + i));
current_ypos = max_ypos-1;
}
+#ifdef CONFIG_KGDB_KDB
+ if (c == '\b') {
+ if (current_xpos > 0)
+ current_xpos--;
+ } else if (c == '\r') {
+ current_xpos = 0;
+ } else
+#endif
if (c == '\n') {
current_xpos = 0;
current_ypos++;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 44a8e0dc6737..cd49141cf153 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -53,6 +53,7 @@
#include <asm/processor-flags.h>
#include <asm/ftrace.h>
#include <asm/irq_vectors.h>
+#include <asm/cpufeature.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
@@ -905,7 +906,25 @@ ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
pushl $0
CFI_ADJUST_CFA_OFFSET 4
+#ifdef CONFIG_X86_INVD_BUG
+ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
+661: pushl $do_general_protection
+662:
+.section .altinstructions,"a"
+ .balign 4
+ .long 661b
+ .long 663f
+ .byte X86_FEATURE_XMM
+ .byte 662b-661b
+ .byte 664f-663f
+.previous
+.section .altinstr_replacement,"ax"
+663: pushl $do_simd_coprocessor_error
+664:
+.previous
+#else
pushl $do_simd_coprocessor_error
+#endif
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 23b4ecdffa9b..a198b7c87a12 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -36,6 +36,7 @@
unsigned long hpet_address;
u8 hpet_blockid; /* OS timer block num */
u8 hpet_msi_disable;
+u8 hpet_readback_cmp;
#ifdef CONFIG_PCI_MSI
static unsigned long hpet_num_timers;
@@ -395,19 +396,23 @@ static int hpet_next_event(unsigned long delta,
* at that point and we would wait for the next hpet interrupt
* forever. We found out that reading the CMP register back
* forces the transfer so we can rely on the comparison with
- * the counter register below. If the read back from the
- * compare register does not match the value we programmed
- * then we might have a real hardware problem. We can not do
- * much about it here, but at least alert the user/admin with
- * a prominent warning.
- * An erratum on some chipsets (ICH9,..), results in comparator read
- * immediately following a write returning old value. Workaround
- * for this is to read this value second time, when first
- * read returns old value.
+ * the counter register below.
+ *
+ * That works fine on those ATI chipsets, but on newer Intel
+ * chipsets (ICH9...) this triggers due to an erratum: Reading
+ * the comparator immediately following a write is returning
+ * the old value.
+ *
+ * We restrict the read back to the affected ATI chipsets (set
+ * by quirks) and also run it with hpet=verbose for debugging
+ * purposes.
*/
- if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
- WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt,
- KERN_WARNING "hpet: compare register read back failed.\n");
+ if (hpet_readback_cmp || hpet_verbose) {
+ u32 cmp = hpet_readl(HPET_Tn_CMP(timer));
+
+ if (cmp != cnt)
+ printk_once(KERN_WARNING
+ "hpet: compare register read back failed.\n");
}
return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index d6cc065f519f..a8f1b803d2fd 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -189,25 +189,16 @@ static int get_hbp_len(u8 hbp_len)
}
/*
- * Check for virtual address in user space.
- */
-int arch_check_va_in_userspace(unsigned long va, u8 hbp_len)
-{
- unsigned int len;
-
- len = get_hbp_len(hbp_len);
-
- return (va <= TASK_SIZE - len);
-}
-
-/*
* Check for virtual address in kernel space.
*/
-static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
unsigned int len;
+ unsigned long va;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- len = get_hbp_len(hbp_len);
+ va = info->address;
+ len = get_hbp_len(info->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -300,8 +291,7 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp,
- struct task_struct *tsk)
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
@@ -314,16 +304,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
ret = -EINVAL;
- if (info->type == X86_BREAKPOINT_EXECUTE)
- /*
- * Ptrace-refactoring code
- * For now, we'll allow instruction breakpoint only for user-space
- * addresses
- */
- if ((!arch_check_va_in_userspace(info->address, info->len)) &&
- info->len != X86_BREAKPOINT_EXECUTE)
- return ret;
-
switch (info->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
@@ -350,15 +330,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
if (info->address & align)
return -EINVAL;
- /* Check that the virtual address is in the proper range */
- if (tsk) {
- if (!arch_check_va_in_userspace(info->address, info->len))
- return -EFAULT;
- } else {
- if (!arch_check_va_in_kernelspace(info->address, info->len))
- return -EFAULT;
- }
-
return 0;
}
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 54c31c285488..86cef6b32253 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -102,65 +102,62 @@ void __cpuinit fpu_init(void)
mxcsr_feature_mask_init();
/* clean state in init */
- if (cpu_has_xsave)
- current_thread_info()->status = TS_XSAVE;
- else
- current_thread_info()->status = 0;
+ current_thread_info()->status = 0;
clear_used_math();
}
#endif /* CONFIG_X86_64 */
-/*
- * The _current_ task is using the FPU for the first time
- * so initialize it and set the mxcsr to its default
- * value at reset if we support XMM instructions and then
- * remeber the current task has used the FPU.
- */
-int init_fpu(struct task_struct *tsk)
+static void fpu_finit(struct fpu *fpu)
{
- if (tsk_used_math(tsk)) {
- if (HAVE_HWFP && tsk == current)
- unlazy_fpu(tsk);
- return 0;
- }
-
- /*
- * Memory allocation at the first usage of the FPU and other state.
- */
- if (!tsk->thread.xstate) {
- tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
- GFP_KERNEL);
- if (!tsk->thread.xstate)
- return -ENOMEM;
- }
-
#ifdef CONFIG_X86_32
if (!HAVE_HWFP) {
- memset(tsk->thread.xstate, 0, xstate_size);
- finit_task(tsk);
- set_stopped_child_used_math(tsk);
- return 0;
+ finit_soft_fpu(&fpu->state->soft);
+ return;
}
#endif
if (cpu_has_fxsr) {
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fx = &fpu->state->fxsave;
memset(fx, 0, xstate_size);
fx->cwd = 0x37f;
if (cpu_has_xmm)
fx->mxcsr = MXCSR_DEFAULT;
} else {
- struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
+ struct i387_fsave_struct *fp = &fpu->state->fsave;
memset(fp, 0, xstate_size);
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;
fp->fos = 0xffff0000u;
}
+}
+
+/*
+ * The _current_ task is using the FPU for the first time
+ * so initialize it and set the mxcsr to its default
+ * value at reset if we support XMM instructions and then
+ * remeber the current task has used the FPU.
+ */
+int init_fpu(struct task_struct *tsk)
+{
+ int ret;
+
+ if (tsk_used_math(tsk)) {
+ if (HAVE_HWFP && tsk == current)
+ unlazy_fpu(tsk);
+ return 0;
+ }
+
/*
- * Only the device not available exception or ptrace can call init_fpu.
+ * Memory allocation at the first usage of the FPU and other state.
*/
+ ret = fpu_alloc(&tsk->thread.fpu);
+ if (ret)
+ return ret;
+
+ fpu_finit(&tsk->thread.fpu);
+
set_stopped_child_used_math(tsk);
return 0;
}
@@ -194,7 +191,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -211,19 +208,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (cpu_has_xsave)
- target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
return ret;
}
@@ -246,14 +243,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
* memory layout in the thread struct, so that we can copy the entire
* xstateregs to the user using one user_regset_copyout().
*/
- memcpy(&target->thread.xstate->fxsave.sw_reserved,
+ memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
/*
* Copy the xstate memory layout.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->xsave, 0, -1);
+ &target->thread.fpu.state->xsave, 0, -1);
return ret;
}
@@ -272,14 +269,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->xsave, 0, -1);
+ &target->thread.fpu.state->xsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
- xsave_hdr = &target->thread.xstate->xsave.xsave_hdr;
+ xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
xsave_hdr->xstate_bv &= pcntxt_mask;
/*
@@ -365,7 +362,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
static void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -405,7 +402,7 @@ static void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -445,7 +442,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) {
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fsave, 0,
+ &target->thread.fpu.state->fsave, 0,
-1);
}
@@ -475,7 +472,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr) {
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->fsave, 0, -1);
+ &target->thread.fpu.state->fsave, 0, -1);
}
if (pos > 0 || count < sizeof(env))
@@ -490,7 +487,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* presence of FP.
*/
if (cpu_has_xsave)
- target->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
return ret;
}
@@ -501,7 +498,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- struct i387_fsave_struct *fp = &tsk->thread.xstate->fsave;
+ struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
fp->status = fp->swd;
if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
@@ -512,7 +509,7 @@ static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
+ struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
struct user_i387_ia32_struct env;
int err = 0;
@@ -547,7 +544,7 @@ static int save_i387_xsave(void __user *buf)
* header as well as change any contents in the memory layout.
* xrestore as part of sigreturn will capture all the changes.
*/
- tsk->thread.xstate->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+ tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
if (save_i387_fxsave(fx) < 0)
return -1;
@@ -599,7 +596,7 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
{
struct task_struct *tsk = current;
- return __copy_from_user(&tsk->thread.xstate->fsave, buf,
+ return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
sizeof(struct i387_fsave_struct));
}
@@ -610,10 +607,10 @@ static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
struct user_i387_ia32_struct env;
int err;
- err = __copy_from_user(&tsk->thread.xstate->fxsave, &buf->_fxsr_env[0],
+ err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
size);
/* mxcsr reserved bits must be masked to zero for security reasons */
- tsk->thread.xstate->fxsave.mxcsr &= mxcsr_feature_mask;
+ tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
if (err || __copy_from_user(&env, buf, sizeof(env)))
return 1;
convert_to_fxsr(tsk, &env);
@@ -629,7 +626,7 @@ static int restore_i387_xsave(void __user *buf)
struct i387_fxsave_struct __user *fx =
(struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
struct xsave_hdr_struct *xsave_hdr =
- &current->thread.xstate->xsave.xsave_hdr;
+ &current->thread.fpu.state->xsave.xsave_hdr;
u64 mask;
int err;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 23c167925a5c..2dfd31597443 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -16,7 +16,7 @@
#include <asm/hpet.h>
#include <asm/smp.h>
-DEFINE_SPINLOCK(i8253_lock);
+DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
@@ -33,7 +33,7 @@ struct clock_event_device *global_clock_event;
static void init_pit_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -62,7 +62,7 @@ static void init_pit_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
}
/*
@@ -72,10 +72,10 @@ static void init_pit_timer(enum clock_event_mode mode,
*/
static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
{
- spin_lock(&i8253_lock);
+ raw_spin_lock(&i8253_lock);
outb_pit(delta & 0xff , PIT_CH0); /* LSB */
outb_pit(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
+ raw_spin_unlock(&i8253_lock);
return 0;
}
@@ -130,7 +130,7 @@ static cycle_t pit_read(struct clocksource *cs)
int count;
u32 jifs;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
@@ -176,7 +176,7 @@ static cycle_t pit_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
count = (LATCH - 1) - count;
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 0ed2d300cd46..990ae7cfc578 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
outb(0, 0xF0);
if (ignore_fpu_irq || !boot_cpu_data.hard_math)
return IRQ_NONE;
- math_error((void __user *)get_irq_regs()->ip);
+ math_error(get_irq_regs(), 0, 16);
return IRQ_HANDLED;
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index b2258ca91003..4f4af75b9482 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -47,20 +47,8 @@
#include <asm/debugreg.h>
#include <asm/apicdef.h>
#include <asm/system.h>
-
#include <asm/apic.h>
-/*
- * Put the error code here just in case the user cares:
- */
-static int gdb_x86errcode;
-
-/*
- * Likewise, the vector number here (since GDB only gets the signal
- * number through the usual means, and that's not very specific):
- */
-static int gdb_x86vector = -1;
-
/**
* pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
* @gdb_regs: A pointer to hold the registers in the order GDB wants.
@@ -211,6 +199,8 @@ static struct hw_breakpoint {
struct perf_event **pev;
} breakinfo[4];
+static unsigned long early_dr7;
+
static void kgdb_correct_hw_break(void)
{
int breakno;
@@ -222,6 +212,14 @@ static void kgdb_correct_hw_break(void)
int cpu = raw_smp_processor_id();
if (!breakinfo[breakno].enabled)
continue;
+ if (dbg_is_early) {
+ set_debugreg(breakinfo[breakno].addr, breakno);
+ early_dr7 |= encode_dr7(breakno,
+ breakinfo[breakno].len,
+ breakinfo[breakno].type);
+ set_debugreg(early_dr7, 7);
+ continue;
+ }
bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
info = counter_arch_bp(bp);
if (bp->attr.disabled != 1)
@@ -236,7 +234,8 @@ static void kgdb_correct_hw_break(void)
if (!val)
bp->attr.disabled = 0;
}
- hw_breakpoint_restore();
+ if (!dbg_is_early)
+ hw_breakpoint_restore();
}
static int hw_break_reserve_slot(int breakno)
@@ -245,6 +244,9 @@ static int hw_break_reserve_slot(int breakno)
int cnt = 0;
struct perf_event **pevent;
+ if (dbg_is_early)
+ return 0;
+
for_each_online_cpu(cpu) {
cnt++;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
@@ -270,6 +272,9 @@ static int hw_break_release_slot(int breakno)
struct perf_event **pevent;
int cpu;
+ if (dbg_is_early)
+ return 0;
+
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent))
@@ -314,7 +319,11 @@ static void kgdb_remove_all_hw_break(void)
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled == 1)
continue;
- arch_uninstall_hw_breakpoint(bp);
+ if (dbg_is_early)
+ early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+ breakinfo[i].type);
+ else
+ arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
}
}
@@ -391,6 +400,11 @@ void kgdb_disable_hw_debug(struct pt_regs *regs)
for (i = 0; i < 4; i++) {
if (!breakinfo[i].enabled)
continue;
+ if (dbg_is_early) {
+ early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
+ breakinfo[i].type);
+ continue;
+ }
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled == 1)
continue;
@@ -399,23 +413,6 @@ void kgdb_disable_hw_debug(struct pt_regs *regs)
}
}
-/**
- * kgdb_post_primary_code - Save error vector/code numbers.
- * @regs: Original pt_regs.
- * @e_vector: Original error vector.
- * @err_code: Original error code.
- *
- * This is needed on architectures which support SMP and KGDB.
- * This function is called after all the slave cpus have been put
- * to a know spin state and the primary CPU has control over KGDB.
- */
-void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
- /* primary processor is completely in the debugger */
- gdb_x86vector = e_vector;
- gdb_x86errcode = err_code;
-}
-
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
@@ -567,7 +564,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
return NOTIFY_DONE;
}
- if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs))
+ if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
return NOTIFY_DONE;
/* Must touch watchdog before return to normal operation */
@@ -575,6 +572,26 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
return NOTIFY_STOP;
}
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return __kgdb_notify(&args, cmd);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
@@ -605,14 +622,15 @@ static struct notifier_block kgdb_notifier = {
*/
int kgdb_arch_init(void)
{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_late(void)
+{
int i, cpu;
- int ret;
struct perf_event_attr attr;
struct perf_event **pevent;
- ret = register_die_notifier(&kgdb_notifier);
- if (ret != 0)
- return ret;
/*
* Pre-allocate the hw breakpoint structions in the non-atomic
* portion of kgdb because this operation requires mutexs to
@@ -624,12 +642,15 @@ int kgdb_arch_init(void)
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
for (i = 0; i < 4; i++) {
+ if (breakinfo[i].pev)
+ continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
if (IS_ERR(breakinfo[i].pev)) {
- printk(KERN_ERR "kgdb: Could not allocate hw breakpoints\n");
+ printk(KERN_ERR "kgdb: Could not allocate hw"
+ "breakpoints\nDisabling the kernel debugger\n");
breakinfo[i].pev = NULL;
kgdb_arch_exit();
- return -1;
+ return;
}
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
@@ -640,7 +661,6 @@ int kgdb_arch_init(void)
}
}
}
- return ret;
}
/**
@@ -690,6 +710,11 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
return instruction_pointer(regs);
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index b43bbaebe2c0..345a4b1fe144 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -422,14 +422,22 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
static void __kprobes clear_btf(void)
{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
- update_debugctlmsr(0);
+ if (test_thread_flag(TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ }
}
static void __kprobes restore_btf(void)
{
- if (test_thread_flag(TIF_DEBUGCTLMSR))
- update_debugctlmsr(current->thread.debugctlmsr);
+ if (test_thread_flag(TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl |= DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ }
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
@@ -534,20 +542,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
struct kprobe_ctlblk *kcb;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
- if (*addr != BREAKPOINT_INSTRUCTION) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- * Back up over the (now missing) int3 and run
- * the original instruction.
- */
- regs->ip = (unsigned long)addr;
- return 1;
- }
-
/*
* We don't want to be preempted for the entire
* duration of kprobe processing. We conditionally
@@ -579,6 +573,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
setup_singlestep(p, regs, kcb, 0);
return 1;
}
+ } else if (*addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Back up over the (now missing) int3 and run
+ * the original instruction.
+ */
+ regs->ip = (unsigned long)addr;
+ preempt_enable_no_resched();
+ return 1;
} else if (kprobe_running()) {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index feaeb0d3aa4f..eb9b76c716c2 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -29,6 +29,8 @@
#define KVM_SCALE 22
static int kvmclock = 1;
+static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
+static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
static int parse_no_kvmclock(char *arg)
{
@@ -54,7 +56,8 @@ static unsigned long kvm_get_wallclock(void)
low = (int)__pa_symbol(&wall_clock);
high = ((u64)__pa_symbol(&wall_clock) >> 32);
- native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
+
+ native_write_msr(msr_kvm_wall_clock, low, high);
vcpu_time = &get_cpu_var(hv_clock);
pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
@@ -130,7 +133,8 @@ static int kvm_register_clock(char *txt)
high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
cpu, high, low, txt);
- return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
+
+ return native_write_msr_safe(msr_kvm_system_time, low, high);
}
#ifdef CONFIG_X86_LOCAL_APIC
@@ -165,14 +169,14 @@ static void __init kvm_smp_prepare_boot_cpu(void)
#ifdef CONFIG_KEXEC
static void kvm_crash_shutdown(struct pt_regs *regs)
{
- native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+ native_write_msr(msr_kvm_system_time, 0, 0);
native_machine_crash_shutdown(regs);
}
#endif
static void kvm_shutdown(void)
{
- native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+ native_write_msr(msr_kvm_system_time, 0, 0);
native_machine_shutdown();
}
@@ -181,27 +185,37 @@ void __init kvmclock_init(void)
if (!kvm_para_available())
return;
- if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
- if (kvm_register_clock("boot clock"))
- return;
- pv_time_ops.sched_clock = kvm_clock_read;
- x86_platform.calibrate_tsc = kvm_get_tsc_khz;
- x86_platform.get_wallclock = kvm_get_wallclock;
- x86_platform.set_wallclock = kvm_set_wallclock;
+ if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) {
+ msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW;
+ msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW;
+ } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
+ return;
+
+ printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+ msr_kvm_system_time, msr_kvm_wall_clock);
+
+ if (kvm_register_clock("boot clock"))
+ return;
+ pv_time_ops.sched_clock = kvm_clock_read;
+ x86_platform.calibrate_tsc = kvm_get_tsc_khz;
+ x86_platform.get_wallclock = kvm_get_wallclock;
+ x86_platform.set_wallclock = kvm_set_wallclock;
#ifdef CONFIG_X86_LOCAL_APIC
- x86_cpuinit.setup_percpu_clockev =
- kvm_setup_secondary_clock;
+ x86_cpuinit.setup_percpu_clockev =
+ kvm_setup_secondary_clock;
#endif
#ifdef CONFIG_SMP
- smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
+ smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
#endif
- machine_ops.shutdown = kvm_shutdown;
+ machine_ops.shutdown = kvm_shutdown;
#ifdef CONFIG_KEXEC
- machine_ops.crash_shutdown = kvm_crash_shutdown;
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
#endif
- kvm_get_preset_lpj();
- clocksource_register(&kvm_clock);
- pv_info.paravirt_enabled = 1;
- pv_info.name = "KVM";
- }
+ kvm_get_preset_lpj();
+ clocksource_register(&kvm_clock);
+ pv_info.paravirt_enabled = 1;
+ pv_info.name = "KVM";
+
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
+ pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index cceb5bc3c3c2..fa6551d36c10 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -201,9 +201,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
return error;
}
-static int microcode_open(struct inode *unused1, struct file *unused2)
+static int microcode_open(struct inode *inode, struct file *file)
{
- return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
}
static ssize_t microcode_write(struct file *file, const char __user *buf,
@@ -260,6 +260,7 @@ static void microcode_dev_exit(void)
}
MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+MODULE_ALIAS("devname:cpu/microcode");
#else
#define microcode_dev_init() 0
#define microcode_dev_exit() do { } while (0)
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 85a343e28937..356170262a93 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -343,10 +343,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
int (*get_ucode_data)(void *, const void *, size_t))
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u8 *ucode_ptr = data, *new_mc = NULL, *mc;
+ u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
enum ucode_state state = UCODE_OK;
+ unsigned int curr_mc_size = 0;
while (leftover) {
struct microcode_header_intel mc_header;
@@ -361,9 +362,15 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
break;
}
- mc = vmalloc(mc_size);
- if (!mc)
- break;
+ /* For performance reasons, reuse mc area when possible */
+ if (!mc || mc_size > curr_mc_size) {
+ if (mc)
+ vfree(mc);
+ mc = vmalloc(mc_size);
+ if (!mc)
+ break;
+ curr_mc_size = mc_size;
+ }
if (get_ucode_data(mc, ucode_ptr, mc_size) ||
microcode_sanity_check(mc) < 0) {
@@ -376,13 +383,16 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
- } else
- vfree(mc);
+ mc = NULL; /* trigger new vmalloc */
+ }
ucode_ptr += mc_size;
leftover -= mc_size;
}
+ if (mc)
+ vfree(mc);
+
if (leftover) {
if (new_mc)
vfree(new_mc);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index e81030f71a8f..5ae5d2426edf 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -115,21 +115,6 @@ static void __init MP_bus_info(struct mpc_bus *m)
printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
}
-static int bad_ioapic(unsigned long address)
-{
- if (nr_ioapics >= MAX_IO_APICS) {
- printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
- "(found %d)\n", MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS!\n");
- }
- if (!address) {
- printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
- " found in table, skipping!\n");
- return 1;
- }
- return 0;
-}
-
static void __init MP_ioapic_info(struct mpc_ioapic *m)
{
if (!(m->flags & MPC_APIC_USABLE))
@@ -138,15 +123,7 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
m->apicid, m->apicver, m->apicaddr);
- if (bad_ioapic(m->apicaddr))
- return;
-
- mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
- mp_ioapics[nr_ioapics].apicid = m->apicid;
- mp_ioapics[nr_ioapics].type = m->type;
- mp_ioapics[nr_ioapics].apicver = m->apicver;
- mp_ioapics[nr_ioapics].flags = m->flags;
- nr_ioapics++;
+ mp_register_ioapic(m->apicid, m->apicaddr, gsi_end + 1);
}
static void print_MP_intsrc_info(struct mpc_intsrc *m)
diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c
index 0aad8670858e..e796448f0eb5 100644
--- a/arch/x86/kernel/mrst.c
+++ b/arch/x86/kernel/mrst.c
@@ -237,4 +237,9 @@ void __init x86_mrst_early_setup(void)
x86_init.pci.fixup_irqs = x86_init_noop;
legacy_pic = &null_legacy_pic;
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 4d4468e9f47c..7bf2dc4c8f70 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -230,7 +230,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
msr_device_destroy(cpu);
break;
}
- return err ? NOTIFY_BAD : NOTIFY_OK;
+ return notifier_from_errno(err);
}
static struct notifier_block __refdata msr_class_cpu_notifier = {
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 7d2829dde20e..a5bc528d4328 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
.free_coherent = swiotlb_free_coherent,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
- .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.map_sg = swiotlb_map_sg_attrs,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 28ad9f4d8b94..e7e35219b32f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -20,7 +20,6 @@
#include <asm/idle.h>
#include <asm/uaccess.h>
#include <asm/i387.h>
-#include <asm/ds.h>
#include <asm/debugreg.h>
unsigned long idle_halt;
@@ -32,26 +31,22 @@ struct kmem_cache *task_xstate_cachep;
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
+ int ret;
+
*dst = *src;
- if (src->thread.xstate) {
- dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
- GFP_KERNEL);
- if (!dst->thread.xstate)
- return -ENOMEM;
- WARN_ON((unsigned long)dst->thread.xstate & 15);
- memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+ if (fpu_allocated(&src->thread.fpu)) {
+ memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
+ ret = fpu_alloc(&dst->thread.fpu);
+ if (ret)
+ return ret;
+ fpu_copy(&dst->thread.fpu, &src->thread.fpu);
}
return 0;
}
void free_thread_xstate(struct task_struct *tsk)
{
- if (tsk->thread.xstate) {
- kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
- tsk->thread.xstate = NULL;
- }
-
- WARN(tsk->thread.ds_ctx, "leaking DS context\n");
+ fpu_free(&tsk->thread.fpu);
}
void free_thread_info(struct thread_info *ti)
@@ -198,11 +193,16 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
prev = &prev_p->thread;
next = &next_p->thread;
- if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
- test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
- ds_switch_to(prev_p, next_p);
- else if (next->debugctlmsr != prev->debugctlmsr)
- update_debugctlmsr(next->debugctlmsr);
+ if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
+ test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
+ debugctl |= DEBUGCTLMSR_BTF;
+
+ update_debugctlmsr(debugctl);
+ }
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
@@ -546,11 +546,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
* check OSVW bit for CPUs that are not affected
* by erratum #400
*/
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
- if (val >= 2) {
- rdmsrl(MSR_AMD64_OSVW_STATUS, val);
- if (!(val & BIT(1)))
- goto no_c1e_idle;
+ if (cpu_has(c, X86_FEATURE_OSVW)) {
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
+ if (val >= 2) {
+ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
+ if (!(val & BIT(1)))
+ goto no_c1e_idle;
+ }
}
return 1;
}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f6c62667e30c..8d128783af47 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -55,7 +55,6 @@
#include <asm/cpu.h>
#include <asm/idle.h>
#include <asm/syscalls.h>
-#include <asm/ds.h>
#include <asm/debugreg.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -238,13 +237,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
-
- clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
- p->thread.ds_ctx = NULL;
-
- clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
- p->thread.debugctlmsr = 0;
-
return err;
}
@@ -317,7 +309,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
- prefetch(next->xstate);
+ prefetch(next->fpu.state);
/*
* Reload esp0.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 17cb3295cbf7..3c2422a99f1f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -49,7 +49,6 @@
#include <asm/ia32.h>
#include <asm/idle.h>
#include <asm/syscalls.h>
-#include <asm/ds.h>
#include <asm/debugreg.h>
asmlinkage extern void ret_from_fork(void);
@@ -313,13 +312,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
if (err)
goto out;
}
-
- clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
- p->thread.ds_ctx = NULL;
-
- clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
- p->thread.debugctlmsr = 0;
-
err = 0;
out:
if (err && p->thread.io_bitmap_ptr) {
@@ -396,7 +388,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
- prefetch(next->xstate);
+ prefetch(next->fpu.state);
/*
* Reload esp0, LDT and the page table pointer:
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 2e9b55027b7e..70c4872cd8aa 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -2,9 +2,6 @@
/*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * BTS tracing
- * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
*/
#include <linux/kernel.h>
@@ -22,7 +19,6 @@
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/signal.h>
-#include <linux/workqueue.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
@@ -36,7 +32,6 @@
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/proto.h>
-#include <asm/ds.h>
#include <asm/hw_breakpoint.h>
#include "tls.h"
@@ -693,7 +688,7 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
struct perf_event_attr attr;
if (!t->ptrace_bps[nr]) {
- hw_breakpoint_init(&attr);
+ ptrace_breakpoint_init(&attr);
/*
* Put stub len and type to register (reserve) an inactive but
* correct bp
@@ -789,342 +784,6 @@ static int ioperm_get(struct task_struct *target,
0, IO_BITMAP_BYTES);
}
-#ifdef CONFIG_X86_PTRACE_BTS
-/*
- * A branch trace store context.
- *
- * Contexts may only be installed by ptrace_bts_config() and only for
- * ptraced tasks.
- *
- * Contexts are destroyed when the tracee is detached from the tracer.
- * The actual destruction work requires interrupts enabled, so the
- * work is deferred and will be scheduled during __ptrace_unlink().
- *
- * Contexts hold an additional task_struct reference on the traced
- * task, as well as a reference on the tracer's mm.
- *
- * Ptrace already holds a task_struct for the duration of ptrace operations,
- * but since destruction is deferred, it may be executed after both
- * tracer and tracee exited.
- */
-struct bts_context {
- /* The branch trace handle. */
- struct bts_tracer *tracer;
-
- /* The buffer used to store the branch trace and its size. */
- void *buffer;
- unsigned int size;
-
- /* The mm that paid for the above buffer. */
- struct mm_struct *mm;
-
- /* The task this context belongs to. */
- struct task_struct *task;
-
- /* The signal to send on a bts buffer overflow. */
- unsigned int bts_ovfl_signal;
-
- /* The work struct to destroy a context. */
- struct work_struct work;
-};
-
-static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
-{
- void *buffer = NULL;
- int err = -ENOMEM;
-
- err = account_locked_memory(current->mm, current->signal->rlim, size);
- if (err < 0)
- return err;
-
- buffer = kzalloc(size, GFP_KERNEL);
- if (!buffer)
- goto out_refund;
-
- context->buffer = buffer;
- context->size = size;
- context->mm = get_task_mm(current);
-
- return 0;
-
- out_refund:
- refund_locked_memory(current->mm, size);
- return err;
-}
-
-static inline void free_bts_buffer(struct bts_context *context)
-{
- if (!context->buffer)
- return;
-
- kfree(context->buffer);
- context->buffer = NULL;
-
- refund_locked_memory(context->mm, context->size);
- context->size = 0;
-
- mmput(context->mm);
- context->mm = NULL;
-}
-
-static void free_bts_context_work(struct work_struct *w)
-{
- struct bts_context *context;
-
- context = container_of(w, struct bts_context, work);
-
- ds_release_bts(context->tracer);
- put_task_struct(context->task);
- free_bts_buffer(context);
- kfree(context);
-}
-
-static inline void free_bts_context(struct bts_context *context)
-{
- INIT_WORK(&context->work, free_bts_context_work);
- schedule_work(&context->work);
-}
-
-static inline struct bts_context *alloc_bts_context(struct task_struct *task)
-{
- struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (context) {
- context->task = task;
- task->bts = context;
-
- get_task_struct(task);
- }
-
- return context;
-}
-
-static int ptrace_bts_read_record(struct task_struct *child, size_t index,
- struct bts_struct __user *out)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
- struct bts_struct bts;
- const unsigned char *at;
- int error;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- at = trace->ds.top - ((index + 1) * trace->ds.size);
- if ((void *)at < trace->ds.begin)
- at += (trace->ds.n * trace->ds.size);
-
- if (!trace->read)
- return -EOPNOTSUPP;
-
- error = trace->read(context->tracer, at, &bts);
- if (error < 0)
- return error;
-
- if (copy_to_user(out, &bts, sizeof(bts)))
- return -EFAULT;
-
- return sizeof(bts);
-}
-
-static int ptrace_bts_drain(struct task_struct *child,
- long size,
- struct bts_struct __user *out)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
- const unsigned char *at;
- int error, drained = 0;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- if (!trace->read)
- return -EOPNOTSUPP;
-
- if (size < (trace->ds.top - trace->ds.begin))
- return -EIO;
-
- for (at = trace->ds.begin; (void *)at < trace->ds.top;
- out++, drained++, at += trace->ds.size) {
- struct bts_struct bts;
-
- error = trace->read(context->tracer, at, &bts);
- if (error < 0)
- return error;
-
- if (copy_to_user(out, &bts, sizeof(bts)))
- return -EFAULT;
- }
-
- memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
-
- error = ds_reset_bts(context->tracer);
- if (error < 0)
- return error;
-
- return drained;
-}
-
-static int ptrace_bts_config(struct task_struct *child,
- long cfg_size,
- const struct ptrace_bts_config __user *ucfg)
-{
- struct bts_context *context;
- struct ptrace_bts_config cfg;
- unsigned int flags = 0;
-
- if (cfg_size < sizeof(cfg))
- return -EIO;
-
- if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
- return -EFAULT;
-
- context = child->bts;
- if (!context)
- context = alloc_bts_context(child);
- if (!context)
- return -ENOMEM;
-
- if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
- if (!cfg.signal)
- return -EINVAL;
-
- return -EOPNOTSUPP;
- context->bts_ovfl_signal = cfg.signal;
- }
-
- ds_release_bts(context->tracer);
- context->tracer = NULL;
-
- if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
- int err;
-
- free_bts_buffer(context);
- if (!cfg.size)
- return 0;
-
- err = alloc_bts_buffer(context, cfg.size);
- if (err < 0)
- return err;
- }
-
- if (cfg.flags & PTRACE_BTS_O_TRACE)
- flags |= BTS_USER;
-
- if (cfg.flags & PTRACE_BTS_O_SCHED)
- flags |= BTS_TIMESTAMPS;
-
- context->tracer =
- ds_request_bts_task(child, context->buffer, context->size,
- NULL, (size_t)-1, flags);
- if (unlikely(IS_ERR(context->tracer))) {
- int error = PTR_ERR(context->tracer);
-
- free_bts_buffer(context);
- context->tracer = NULL;
- return error;
- }
-
- return sizeof(cfg);
-}
-
-static int ptrace_bts_status(struct task_struct *child,
- long cfg_size,
- struct ptrace_bts_config __user *ucfg)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
- struct ptrace_bts_config cfg;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- if (cfg_size < sizeof(cfg))
- return -EIO;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- memset(&cfg, 0, sizeof(cfg));
- cfg.size = trace->ds.end - trace->ds.begin;
- cfg.signal = context->bts_ovfl_signal;
- cfg.bts_size = sizeof(struct bts_struct);
-
- if (cfg.signal)
- cfg.flags |= PTRACE_BTS_O_SIGNAL;
-
- if (trace->ds.flags & BTS_USER)
- cfg.flags |= PTRACE_BTS_O_TRACE;
-
- if (trace->ds.flags & BTS_TIMESTAMPS)
- cfg.flags |= PTRACE_BTS_O_SCHED;
-
- if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
- return -EFAULT;
-
- return sizeof(cfg);
-}
-
-static int ptrace_bts_clear(struct task_struct *child)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
-
- return ds_reset_bts(context->tracer);
-}
-
-static int ptrace_bts_size(struct task_struct *child)
-{
- struct bts_context *context;
- const struct bts_trace *trace;
-
- context = child->bts;
- if (!context)
- return -ESRCH;
-
- trace = ds_read_bts(context->tracer);
- if (!trace)
- return -ESRCH;
-
- return (trace->ds.top - trace->ds.begin) / trace->ds.size;
-}
-
-/*
- * Called from __ptrace_unlink() after the child has been moved back
- * to its original parent.
- */
-void ptrace_bts_untrace(struct task_struct *child)
-{
- if (unlikely(child->bts)) {
- free_bts_context(child->bts);
- child->bts = NULL;
- }
-}
-#endif /* CONFIG_X86_PTRACE_BTS */
-
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -1252,39 +911,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
#endif
- /*
- * These bits need more cooking - not enabled yet:
- */
-#ifdef CONFIG_X86_PTRACE_BTS
- case PTRACE_BTS_CONFIG:
- ret = ptrace_bts_config
- (child, data, (struct ptrace_bts_config __user *)addr);
- break;
-
- case PTRACE_BTS_STATUS:
- ret = ptrace_bts_status
- (child, data, (struct ptrace_bts_config __user *)addr);
- break;
-
- case PTRACE_BTS_SIZE:
- ret = ptrace_bts_size(child);
- break;
-
- case PTRACE_BTS_GET:
- ret = ptrace_bts_read_record
- (child, data, (struct bts_struct __user *) addr);
- break;
-
- case PTRACE_BTS_CLEAR:
- ret = ptrace_bts_clear(child);
- break;
-
- case PTRACE_BTS_DRAIN:
- ret = ptrace_bts_drain
- (child, data, (struct bts_struct __user *) addr);
- break;
-#endif /* CONFIG_X86_PTRACE_BTS */
-
default:
ret = ptrace_request(child, request, addr, data);
break;
@@ -1544,14 +1170,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_GET_THREAD_AREA:
case PTRACE_SET_THREAD_AREA:
-#ifdef CONFIG_X86_PTRACE_BTS
- case PTRACE_BTS_CONFIG:
- case PTRACE_BTS_STATUS:
- case PTRACE_BTS_SIZE:
- case PTRACE_BTS_GET:
- case PTRACE_BTS_CLEAR:
- case PTRACE_BTS_DRAIN:
-#endif /* CONFIG_X86_PTRACE_BTS */
return arch_ptrace(child, request, addr, data);
default:
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 03801f2f761f..239427ca02af 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -31,8 +31,16 @@ struct pvclock_shadow_time {
u32 tsc_to_nsec_mul;
int tsc_shift;
u32 version;
+ u8 flags;
};
+static u8 valid_flags __read_mostly = 0;
+
+void pvclock_set_flags(u8 flags)
+{
+ valid_flags = flags;
+}
+
/*
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
* yielding a 64-bit result.
@@ -91,6 +99,7 @@ static unsigned pvclock_get_time_values(struct pvclock_shadow_time *dst,
dst->system_timestamp = src->system_time;
dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
dst->tsc_shift = src->tsc_shift;
+ dst->flags = src->flags;
rmb(); /* test version after fetching data */
} while ((src->version & 1) || (dst->version != src->version));
@@ -109,11 +118,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
return pv_tsc_khz;
}
+static atomic64_t last_value = ATOMIC64_INIT(0);
+
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
struct pvclock_shadow_time shadow;
unsigned version;
cycle_t ret, offset;
+ u64 last;
do {
version = pvclock_get_time_values(&shadow, src);
@@ -123,6 +135,31 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
barrier();
} while (version != src->version);
+ if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) &&
+ (shadow.flags & PVCLOCK_TSC_STABLE_BIT))
+ return ret;
+
+ /*
+ * Assumption here is that last_value, a global accumulator, always goes
+ * forward. If we are less than that, we should not be much smaller.
+ * We assume there is an error marging we're inside, and then the correction
+ * does not sacrifice accuracy.
+ *
+ * For reads: global may have changed between test and return,
+ * but this means someone else updated poked the clock at a later time.
+ * We just need to make sure we are not seeing a backwards event.
+ *
+ * For updates: last_value = ret is not enough, since two vcpus could be
+ * updating at the same time, and one of them could be slightly behind,
+ * making the assumption that last_value always go forward fail to hold.
+ */
+ last = atomic64_read(&last_value);
+ do {
+ if (ret < last)
+ return last;
+ last = atomic64_cmpxchg(&last_value, last, ret);
+ } while (unlikely(last != ret));
+
return ret;
}
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 12e9feaa2f7a..e72d3fc6547d 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -495,10 +495,18 @@ void force_hpet_resume(void)
/*
* HPET MSI on some boards (ATI SB700/SB800) has side effect on
* floppy DMA. Disable HPET MSI on such platforms.
+ * See erratum #27 (Misinterpreted MSI Requests May Result in
+ * Corrupted LPC DMA Data) in AMD Publication #46837,
+ * "SB700 Family Product Errata", Rev. 1.0, March 2010.
+ *
+ * Also force the read back of the CMP register in hpet_next_event()
+ * to work around the problem that the CMP register write seems to be
+ * delayed. See hpet_next_event() for details.
*/
static void force_disable_hpet_msi(struct pci_dev *unused)
{
hpet_msi_disable = 1;
+ hpet_readback_cmp = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c4851eff57b3..b4ae4acbd031 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -676,6 +676,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
},
},
+ /*
+ * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
+ * match on the product name.
+ */
+ {
+ .callback = dmi_low_memory_corruption,
+ .ident = "Phoenix BIOS",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ },
#endif
{}
};
@@ -725,6 +736,7 @@ void __init setup_arch(char **cmdline_p)
/* VMI may relocate the fixmap; do this before touching ioremap area */
vmi_init();
+ early_trap_init();
early_cpu_init();
early_ioremap_init();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b00e70..a867940a6dfc 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -265,10 +265,10 @@ void __init setup_per_cpu_areas(void)
#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
/*
- * make sure boot cpu node_number is right, when boot cpu is on the
+ * make sure boot cpu numa_node is right, when boot cpu is on the
* node that doesn't have mem installed
*/
- per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
+ set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
#endif
/* Setup node to cpumask map */
diff --git a/arch/x86/kernel/sfi.c b/arch/x86/kernel/sfi.c
index 34e099382651..7ded57896c0a 100644
--- a/arch/x86/kernel/sfi.c
+++ b/arch/x86/kernel/sfi.c
@@ -81,7 +81,6 @@ static int __init sfi_parse_cpus(struct sfi_table_header *table)
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_IO_APIC
-static u32 gsi_base;
static int __init sfi_parse_ioapic(struct sfi_table_header *table)
{
@@ -94,8 +93,7 @@ static int __init sfi_parse_ioapic(struct sfi_table_header *table)
pentry = (struct sfi_apic_table_entry *)sb->pentry;
for (i = 0; i < num; i++) {
- mp_register_ioapic(i, pentry->phys_addr, gsi_base);
- gsi_base += io_apic_get_redir_entries(i);
+ mp_register_ioapic(i, pentry->phys_addr, gsi_end + 1);
pentry++;
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 763d815e27a0..37462f1ddba5 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1215,9 +1215,17 @@ __init void prefill_possible_map(void)
if (!num_processors)
num_processors = 1;
- if (setup_possible_cpus == -1)
- possible = num_processors + disabled_cpus;
- else
+ i = setup_max_cpus ?: 1;
+ if (setup_possible_cpus == -1) {
+ possible = num_processors;
+#ifdef CONFIG_HOTPLUG_CPU
+ if (setup_max_cpus)
+ possible += disabled_cpus;
+#else
+ if (possible > i)
+ possible = i;
+#endif
+ } else
possible = setup_possible_cpus;
total_cpus = max_t(int, possible, num_processors + disabled_cpus);
@@ -1230,11 +1238,23 @@ __init void prefill_possible_map(void)
possible = nr_cpu_ids;
}
+#ifdef CONFIG_HOTPLUG_CPU
+ if (!setup_max_cpus)
+#endif
+ if (possible > i) {
+ printk(KERN_WARNING
+ "%d Processors exceeds max_cpus limit of %u\n",
+ possible, setup_max_cpus);
+ possible = i;
+ }
+
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
possible, max_t(int, possible - num_processors, 0));
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
+ for (; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
nr_cpu_ids = possible;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 3149032ff107..58de45ee08b6 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -158,22 +158,6 @@ static int enable_single_step(struct task_struct *child)
}
/*
- * Install this value in MSR_IA32_DEBUGCTLMSR whenever child is running.
- */
-static void write_debugctlmsr(struct task_struct *child, unsigned long val)
-{
- if (child->thread.debugctlmsr == val)
- return;
-
- child->thread.debugctlmsr = val;
-
- if (child != current)
- return;
-
- update_debugctlmsr(val);
-}
-
-/*
* Enable single or block step.
*/
static void enable_step(struct task_struct *child, bool block)
@@ -186,15 +170,17 @@ static void enable_step(struct task_struct *child, bool block)
* that uses user-mode single stepping itself.
*/
if (enable_single_step(child) && block) {
- set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
- write_debugctlmsr(child,
- child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
- } else {
- write_debugctlmsr(child,
- child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
-
- if (!child->thread.debugctlmsr)
- clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl |= DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ set_tsk_thread_flag(child, TIF_BLOCKSTEP);
+ } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
+
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
}
}
@@ -213,11 +199,13 @@ void user_disable_single_step(struct task_struct *child)
/*
* Make sure block stepping (BTF) is disabled.
*/
- write_debugctlmsr(child,
- child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
+ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+ unsigned long debugctl = get_debugctlmsr();
- if (!child->thread.debugctlmsr)
- clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ update_debugctlmsr(debugctl);
+ clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+ }
/* Always clear TIF_SINGLESTEP... */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 86c9f91b48ae..c2f1b26141e2 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -46,6 +46,7 @@
/* Global pointer to shared data; NULL means no measured launch. */
struct tboot *tboot __read_mostly;
+EXPORT_SYMBOL(tboot);
/* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
#define AP_WAIT_TIMEOUT 1
@@ -175,6 +176,9 @@ static void add_mac_region(phys_addr_t start, unsigned long size)
struct tboot_mac_region *mr;
phys_addr_t end = start + size;
+ if (tboot->num_mac_regions >= MAX_TB_MAC_REGIONS)
+ panic("tboot: Too many MAC regions\n");
+
if (start && size) {
mr = &tboot->mac_regions[tboot->num_mac_regions++];
mr->start = round_down(start, PAGE_SIZE);
@@ -184,18 +188,17 @@ static void add_mac_region(phys_addr_t start, unsigned long size)
static int tboot_setup_sleep(void)
{
+ int i;
+
tboot->num_mac_regions = 0;
- /* S3 resume code */
- add_mac_region(acpi_wakeup_address, WAKEUP_SIZE);
+ for (i = 0; i < e820.nr_map; i++) {
+ if ((e820.map[i].type != E820_RAM)
+ && (e820.map[i].type != E820_RESERVED_KERN))
+ continue;
-#ifdef CONFIG_X86_TRAMPOLINE
- /* AP trampoline code */
- add_mac_region(virt_to_phys(trampoline_base), TRAMPOLINE_SIZE);
-#endif
-
- /* kernel code + data + bss */
- add_mac_region(virt_to_phys(_text), _end - _text);
+ add_mac_region(e820.map[i].addr, e820.map[i].size);
+ }
tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 17b03dd3a6b5..7fea555929e2 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -1,7 +1,7 @@
/*
* SGI UltraViolet TLB flush routines.
*
- * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
+ * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
*
* This code is released under the GNU General Public License version 2 or
* later.
@@ -20,42 +20,67 @@
#include <asm/idle.h>
#include <asm/tsc.h>
#include <asm/irq_vectors.h>
+#include <asm/timer.h>
-static struct bau_control **uv_bau_table_bases __read_mostly;
-static int uv_bau_retry_limit __read_mostly;
+struct msg_desc {
+ struct bau_payload_queue_entry *msg;
+ int msg_slot;
+ int sw_ack_slot;
+ struct bau_payload_queue_entry *va_queue_first;
+ struct bau_payload_queue_entry *va_queue_last;
+};
-/* base pnode in this partition */
-static int uv_partition_base_pnode __read_mostly;
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
+
+static int uv_bau_max_concurrent __read_mostly;
+
+static int nobau;
+static int __init setup_nobau(char *arg)
+{
+ nobau = 1;
+ return 0;
+}
+early_param("nobau", setup_nobau);
-static unsigned long uv_mmask __read_mostly;
+/* base pnode in this partition */
+static int uv_partition_base_pnode __read_mostly;
+/* position of pnode (which is nasid>>1): */
+static int uv_nshift __read_mostly;
+static unsigned long uv_mmask __read_mostly;
static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
static DEFINE_PER_CPU(struct bau_control, bau_control);
+static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
+
+struct reset_args {
+ int sender;
+};
/*
- * Determine the first node on a blade.
+ * Determine the first node on a uvhub. 'Nodes' are used for kernel
+ * memory allocation.
*/
-static int __init blade_to_first_node(int blade)
+static int __init uvhub_to_first_node(int uvhub)
{
int node, b;
for_each_online_node(node) {
b = uv_node_to_blade_id(node);
- if (blade == b)
+ if (uvhub == b)
return node;
}
- return -1; /* shouldn't happen */
+ return -1;
}
/*
- * Determine the apicid of the first cpu on a blade.
+ * Determine the apicid of the first cpu on a uvhub.
*/
-static int __init blade_to_first_apicid(int blade)
+static int __init uvhub_to_first_apicid(int uvhub)
{
int cpu;
for_each_present_cpu(cpu)
- if (blade == uv_cpu_to_blade_id(cpu))
+ if (uvhub == uv_cpu_to_blade_id(cpu))
return per_cpu(x86_cpu_to_apicid, cpu);
return -1;
}
@@ -68,195 +93,459 @@ static int __init blade_to_first_apicid(int blade)
* clear of the Timeout bit (as well) will free the resource. No reply will
* be sent (the hardware will only do one reply per message).
*/
-static void uv_reply_to_message(int resource,
- struct bau_payload_queue_entry *msg,
- struct bau_msg_status *msp)
+static inline void uv_reply_to_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
unsigned long dw;
+ struct bau_payload_queue_entry *msg;
- dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
+ msg = mdp->msg;
+ if (!msg->canceled) {
+ dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
+ msg->sw_ack_vector;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
+ }
msg->replied_to = 1;
msg->sw_ack_vector = 0;
- if (msp)
- msp->seen_by.bits = 0;
- uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
}
/*
- * Do all the things a cpu should do for a TLB shootdown message.
- * Other cpu's may come here at the same time for this message.
+ * Process the receipt of a RETRY message
*/
-static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
- int msg_slot, int sw_ack_slot)
+static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
+ struct bau_control *bcp)
{
- unsigned long this_cpu_mask;
- struct bau_msg_status *msp;
- int cpu;
+ int i;
+ int cancel_count = 0;
+ int slot2;
+ unsigned long msg_res;
+ unsigned long mmr = 0;
+ struct bau_payload_queue_entry *msg;
+ struct bau_payload_queue_entry *msg2;
+ struct ptc_stats *stat;
- msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
- cpu = uv_blade_processor_id();
- msg->number_of_cpus =
- uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
- this_cpu_mask = 1UL << cpu;
- if (msp->seen_by.bits & this_cpu_mask)
- return;
- atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
+ msg = mdp->msg;
+ stat = &per_cpu(ptcstats, bcp->cpu);
+ stat->d_retries++;
+ /*
+ * cancel any message from msg+1 to the retry itself
+ */
+ for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
+ if (msg2 > mdp->va_queue_last)
+ msg2 = mdp->va_queue_first;
+ if (msg2 == msg)
+ break;
+
+ /* same conditions for cancellation as uv_do_reset */
+ if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
+ (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
+ msg->sw_ack_vector) == 0) &&
+ (msg2->sending_cpu == msg->sending_cpu) &&
+ (msg2->msg_type != MSG_NOOP)) {
+ slot2 = msg2 - mdp->va_queue_first;
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = ((msg2->sw_ack_vector << 8) |
+ msg2->sw_ack_vector);
+ /*
+ * This is a message retry; clear the resources held
+ * by the previous message only if they timed out.
+ * If it has not timed out we have an unexpected
+ * situation to report.
+ */
+ if (mmr & (msg_res << 8)) {
+ /*
+ * is the resource timed out?
+ * make everyone ignore the cancelled message.
+ */
+ msg2->canceled = 1;
+ stat->d_canceled++;
+ cancel_count++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ (msg_res << 8) | msg_res);
+ } else
+ printk(KERN_INFO "note bau retry: no effect\n");
+ }
+ }
+ if (!cancel_count)
+ stat->d_nocanceled++;
+}
- if (msg->replied_to == 1)
- return;
+/*
+ * Do all the things a cpu should do for a TLB shootdown message.
+ * Other cpu's may come here at the same time for this message.
+ */
+static void uv_bau_process_message(struct msg_desc *mdp,
+ struct bau_control *bcp)
+{
+ int msg_ack_count;
+ short socket_ack_count = 0;
+ struct ptc_stats *stat;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *smaster = bcp->socket_master;
+ /*
+ * This must be a normal message, or retry of a normal message
+ */
+ msg = mdp->msg;
+ stat = &per_cpu(ptcstats, bcp->cpu);
if (msg->address == TLB_FLUSH_ALL) {
local_flush_tlb();
- __get_cpu_var(ptcstats).alltlb++;
+ stat->d_alltlb++;
} else {
__flush_tlb_one(msg->address);
- __get_cpu_var(ptcstats).onetlb++;
+ stat->d_onetlb++;
}
+ stat->d_requestee++;
+
+ /*
+ * One cpu on each uvhub has the additional job on a RETRY
+ * of releasing the resource held by the message that is
+ * being retried. That message is identified by sending
+ * cpu number.
+ */
+ if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
+ uv_bau_process_retry_msg(mdp, bcp);
- __get_cpu_var(ptcstats).requestee++;
+ /*
+ * This is a sw_ack message, so we have to reply to it.
+ * Count each responding cpu on the socket. This avoids
+ * pinging the count's cache line back and forth between
+ * the sockets.
+ */
+ socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
+ &smaster->socket_acknowledge_count[mdp->msg_slot]);
+ if (socket_ack_count == bcp->cpus_in_socket) {
+ /*
+ * Both sockets dump their completed count total into
+ * the message's count.
+ */
+ smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
+ msg_ack_count = atomic_add_short_return(socket_ack_count,
+ (struct atomic_short *)&msg->acknowledge_count);
+
+ if (msg_ack_count == bcp->cpus_in_uvhub) {
+ /*
+ * All cpus in uvhub saw it; reply
+ */
+ uv_reply_to_message(mdp, bcp);
+ }
+ }
- atomic_inc_short(&msg->acknowledge_count);
- if (msg->number_of_cpus == msg->acknowledge_count)
- uv_reply_to_message(sw_ack_slot, msg, msp);
+ return;
}
/*
- * Examine the payload queue on one distribution node to see
- * which messages have not been seen, and which cpu(s) have not seen them.
+ * Determine the first cpu on a uvhub.
+ */
+static int uvhub_to_first_cpu(int uvhub)
+{
+ int cpu;
+ for_each_present_cpu(cpu)
+ if (uvhub == uv_cpu_to_blade_id(cpu))
+ return cpu;
+ return -1;
+}
+
+/*
+ * Last resort when we get a large number of destination timeouts is
+ * to clear resources held by a given cpu.
+ * Do this with IPI so that all messages in the BAU message queue
+ * can be identified by their nonzero sw_ack_vector field.
*
- * Returns the number of cpu's that have not responded.
+ * This is entered for a single cpu on the uvhub.
+ * The sender want's this uvhub to free a specific message's
+ * sw_ack resources.
*/
-static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
+static void
+uv_do_reset(void *ptr)
{
- struct bau_payload_queue_entry *msg;
- struct bau_msg_status *msp;
- int count = 0;
int i;
- int j;
+ int slot;
+ int count = 0;
+ unsigned long mmr;
+ unsigned long msg_res;
+ struct bau_control *bcp;
+ struct reset_args *rap;
+ struct bau_payload_queue_entry *msg;
+ struct ptc_stats *stat;
- for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
- msg++, i++) {
- if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
- msp = bau_tablesp->msg_statuses + i;
- printk(KERN_DEBUG
- "blade %d: address:%#lx %d of %d, not cpu(s): ",
- i, msg->address, msg->acknowledge_count,
- msg->number_of_cpus);
- for (j = 0; j < msg->number_of_cpus; j++) {
- if (!((1L << j) & msp->seen_by.bits)) {
- count++;
- printk("%d ", j);
- }
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ rap = (struct reset_args *)ptr;
+ stat = &per_cpu(ptcstats, bcp->cpu);
+ stat->d_resets++;
+
+ /*
+ * We're looking for the given sender, and
+ * will free its sw_ack resource.
+ * If all cpu's finally responded after the timeout, its
+ * message 'replied_to' was set.
+ */
+ for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
+ /* uv_do_reset: same conditions for cancellation as
+ uv_bau_process_retry_msg() */
+ if ((msg->replied_to == 0) &&
+ (msg->canceled == 0) &&
+ (msg->sending_cpu == rap->sender) &&
+ (msg->sw_ack_vector) &&
+ (msg->msg_type != MSG_NOOP)) {
+ /*
+ * make everyone else ignore this message
+ */
+ msg->canceled = 1;
+ slot = msg - bcp->va_queue_first;
+ count++;
+ /*
+ * only reset the resource if it is still pending
+ */
+ mmr = uv_read_local_mmr
+ (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+ msg_res = ((msg->sw_ack_vector << 8) |
+ msg->sw_ack_vector);
+ if (mmr & msg_res) {
+ stat->d_rcanceled++;
+ uv_write_local_mmr(
+ UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
+ msg_res);
}
- printk("\n");
}
}
- return count;
+ return;
}
/*
- * Examine the payload queue on all the distribution nodes to see
- * which messages have not been seen, and which cpu(s) have not seen them.
- *
- * Returns the number of cpu's that have not responded.
+ * Use IPI to get all target uvhubs to release resources held by
+ * a given sending cpu number.
*/
-static int uv_examine_destinations(struct bau_target_nodemask *distribution)
+static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
+ int sender)
{
- int sender;
- int i;
- int count = 0;
+ int uvhub;
+ int cpu;
+ cpumask_t mask;
+ struct reset_args reset_args;
+
+ reset_args.sender = sender;
- sender = smp_processor_id();
- for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
- if (!bau_node_isset(i, distribution))
+ cpus_clear(mask);
+ /* find a single cpu for each uvhub in this distribution mask */
+ for (uvhub = 0;
+ uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
+ uvhub++) {
+ if (!bau_uvhub_isset(uvhub, distribution))
continue;
- count += uv_examine_destination(uv_bau_table_bases[i], sender);
+ /* find a cpu for this uvhub */
+ cpu = uvhub_to_first_cpu(uvhub);
+ cpu_set(cpu, mask);
}
- return count;
+ /* IPI all cpus; Preemption is already disabled */
+ smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
+ return;
+}
+
+static inline unsigned long
+cycles_2_us(unsigned long long cyc)
+{
+ unsigned long long ns;
+ unsigned long us;
+ ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
+ >> CYC2NS_SCALE_FACTOR;
+ us = ns / 1000;
+ return us;
}
/*
- * wait for completion of a broadcast message
- *
- * return COMPLETE, RETRY or GIVEUP
+ * wait for all cpus on this hub to finish their sends and go quiet
+ * leaves uvhub_quiesce set so that no new broadcasts are started by
+ * bau_flush_send_and_wait()
+ */
+static inline void
+quiesce_local_uvhub(struct bau_control *hmaster)
+{
+ atomic_add_short_return(1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * mark this quiet-requestor as done
+ */
+static inline void
+end_uvhub_quiesce(struct bau_control *hmaster)
+{
+ atomic_add_short_return(-1, (struct atomic_short *)
+ &hmaster->uvhub_quiesce);
+}
+
+/*
+ * Wait for completion of a broadcast software ack message
+ * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
*/
static int uv_wait_completion(struct bau_desc *bau_desc,
- unsigned long mmr_offset, int right_shift)
+ unsigned long mmr_offset, int right_shift, int this_cpu,
+ struct bau_control *bcp, struct bau_control *smaster, long try)
{
- int exams = 0;
- long destination_timeouts = 0;
- long source_timeouts = 0;
+ int relaxes = 0;
unsigned long descriptor_status;
+ unsigned long mmr;
+ unsigned long mask;
+ cycles_t ttime;
+ cycles_t timeout_time;
+ struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
+ struct bau_control *hmaster;
+
+ hmaster = bcp->uvhub_master;
+ timeout_time = get_cycles() + bcp->timeout_interval;
+ /* spin on the status MMR, waiting for it to go idle */
while ((descriptor_status = (((unsigned long)
uv_read_local_mmr(mmr_offset) >>
right_shift) & UV_ACT_STATUS_MASK)) !=
DESC_STATUS_IDLE) {
- if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
- source_timeouts++;
- if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
- source_timeouts = 0;
- __get_cpu_var(ptcstats).s_retry++;
- return FLUSH_RETRY;
- }
/*
- * spin here looking for progress at the destinations
+ * Our software ack messages may be blocked because there are
+ * no swack resources available. As long as none of them
+ * has timed out hardware will NACK our message and its
+ * state will stay IDLE.
*/
- if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
- destination_timeouts++;
- if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
- /*
- * returns number of cpus not responding
- */
- if (uv_examine_destinations
- (&bau_desc->distribution) == 0) {
- __get_cpu_var(ptcstats).d_retry++;
- return FLUSH_RETRY;
- }
- exams++;
- if (exams >= uv_bau_retry_limit) {
- printk(KERN_DEBUG
- "uv_flush_tlb_others");
- printk("giving up on cpu %d\n",
- smp_processor_id());
+ if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
+ stat->s_stimeout++;
+ return FLUSH_GIVEUP;
+ } else if (descriptor_status ==
+ DESC_STATUS_DESTINATION_TIMEOUT) {
+ stat->s_dtimeout++;
+ ttime = get_cycles();
+
+ /*
+ * Our retries may be blocked by all destination
+ * swack resources being consumed, and a timeout
+ * pending. In that case hardware returns the
+ * ERROR that looks like a destination timeout.
+ */
+ if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_PLUGGED;
+ }
+
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_TIMEOUT;
+ } else {
+ /*
+ * descriptor_status is still BUSY
+ */
+ cpu_relax();
+ relaxes++;
+ if (relaxes >= 10000) {
+ relaxes = 0;
+ if (get_cycles() > timeout_time) {
+ quiesce_local_uvhub(hmaster);
+
+ /* single-thread the register change */
+ spin_lock(&hmaster->masks_lock);
+ mmr = uv_read_local_mmr(mmr_offset);
+ mask = 0UL;
+ mask |= (3UL < right_shift);
+ mask = ~mask;
+ mmr &= mask;
+ uv_write_local_mmr(mmr_offset, mmr);
+ spin_unlock(&hmaster->masks_lock);
+ end_uvhub_quiesce(hmaster);
+ stat->s_busy++;
return FLUSH_GIVEUP;
}
- /*
- * delays can hang the simulator
- udelay(1000);
- */
- destination_timeouts = 0;
}
}
- cpu_relax();
}
+ bcp->conseccompletes++;
return FLUSH_COMPLETE;
}
+static inline cycles_t
+sec_2_cycles(unsigned long sec)
+{
+ unsigned long ns;
+ cycles_t cyc;
+
+ ns = sec * 1000000000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
+/*
+ * conditionally add 1 to *v, unless *v is >= u
+ * return 0 if we cannot add 1 to *v because it is >= u
+ * return 1 if we can add 1 to *v because it is < u
+ * the add is atomic
+ *
+ * This is close to atomic_add_unless(), but this allows the 'u' value
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+{
+ spin_lock(lock);
+ if (atomic_read(v) >= u) {
+ spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+ spin_unlock(lock);
+ return 1;
+}
+
/**
* uv_flush_send_and_wait
*
- * Send a broadcast and wait for a broadcast message to complete.
+ * Send a broadcast and wait for it to complete.
*
- * The flush_mask contains the cpus the broadcast was sent to.
+ * The flush_mask contains the cpus the broadcast is to be sent to, plus
+ * cpus that are on the local uvhub.
*
- * Returns NULL if all remote flushing was done. The mask is zeroed.
+ * Returns NULL if all flushing represented in the mask was done. The mask
+ * is zeroed.
* Returns @flush_mask if some remote flushing remains to be done. The
- * mask will have some bits still set.
+ * mask will have some bits still set, representing any cpus on the local
+ * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
*/
-const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
- struct bau_desc *bau_desc,
- struct cpumask *flush_mask)
+const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ struct cpumask *flush_mask,
+ struct bau_control *bcp)
{
- int completion_status = 0;
int right_shift;
- int tries = 0;
- int pnode;
+ int uvhub;
int bit;
+ int completion_status = 0;
+ int seq_number = 0;
+ long try = 0;
+ int cpu = bcp->uvhub_cpu;
+ int this_cpu = bcp->cpu;
+ int this_uvhub = bcp->uvhub;
unsigned long mmr_offset;
unsigned long index;
cycles_t time1;
cycles_t time2;
+ struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
+ struct bau_control *smaster = bcp->socket_master;
+ struct bau_control *hmaster = bcp->uvhub_master;
+
+ /*
+ * Spin here while there are hmaster->max_concurrent or more active
+ * descriptors. This is the per-uvhub 'throttle'.
+ */
+ if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_concurrent)) {
+ stat->s_throttles++;
+ do {
+ cpu_relax();
+ } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
+ &hmaster->active_descriptor_count,
+ hmaster->max_concurrent));
+ }
+
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
if (cpu < UV_CPUS_PER_ACT_STATUS) {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
@@ -268,24 +557,108 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
}
time1 = get_cycles();
do {
- tries++;
+ /*
+ * Every message from any given cpu gets a unique message
+ * sequence number. But retries use that same number.
+ * Our message may have timed out at the destination because
+ * all sw-ack resources are in use and there is a timeout
+ * pending there. In that case, our last send never got
+ * placed into the queue and we need to persist until it
+ * does.
+ *
+ * Make any retry a type MSG_RETRY so that the destination will
+ * free any resource held by a previous message from this cpu.
+ */
+ if (try == 0) {
+ /* use message type set by the caller the first time */
+ seq_number = bcp->message_number++;
+ } else {
+ /* use RETRY type on all the rest; same sequence */
+ bau_desc->header.msg_type = MSG_RETRY;
+ stat->s_retry_messages++;
+ }
+ bau_desc->header.sequence = seq_number;
index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
- cpu;
+ bcp->uvhub_cpu;
+ bcp->send_message = get_cycles();
+
uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+
+ try++;
completion_status = uv_wait_completion(bau_desc, mmr_offset,
- right_shift);
- } while (completion_status == FLUSH_RETRY);
+ right_shift, this_cpu, bcp, smaster, try);
+
+ if (completion_status == FLUSH_RETRY_PLUGGED) {
+ /*
+ * Our retries may be blocked by all destination swack
+ * resources being consumed, and a timeout pending. In
+ * that case hardware immediately returns the ERROR
+ * that looks like a destination timeout.
+ */
+ udelay(TIMEOUT_DELAY);
+ bcp->plugged_tries++;
+ if (bcp->plugged_tries >= PLUGSB4RESET) {
+ bcp->plugged_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution,
+ this_cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_plug++;
+ }
+ } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
+ hmaster->max_concurrent = 1;
+ bcp->timeout_tries++;
+ udelay(TIMEOUT_DELAY);
+ if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
+ bcp->timeout_tries = 0;
+ quiesce_local_uvhub(hmaster);
+ spin_lock(&hmaster->queue_lock);
+ uv_reset_with_ipi(&bau_desc->distribution,
+ this_cpu);
+ spin_unlock(&hmaster->queue_lock);
+ end_uvhub_quiesce(hmaster);
+ bcp->ipi_attempts++;
+ stat->s_resets_timeout++;
+ }
+ }
+ if (bcp->ipi_attempts >= 3) {
+ bcp->ipi_attempts = 0;
+ completion_status = FLUSH_GIVEUP;
+ break;
+ }
+ cpu_relax();
+ } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
+ (completion_status == FLUSH_RETRY_TIMEOUT));
time2 = get_cycles();
- __get_cpu_var(ptcstats).sflush += (time2 - time1);
- if (tries > 1)
- __get_cpu_var(ptcstats).retriesok++;
- if (completion_status == FLUSH_GIVEUP) {
+ if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
+ && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
+ hmaster->max_concurrent++;
+
+ /*
+ * hold any cpu not timing out here; no other cpu currently held by
+ * the 'throttle' should enter the activation code
+ */
+ while (hmaster->uvhub_quiesce)
+ cpu_relax();
+ atomic_dec(&hmaster->active_descriptor_count);
+
+ /* guard against cycles wrap */
+ if (time2 > time1)
+ stat->s_time += (time2 - time1);
+ else
+ stat->s_requestor--; /* don't count this one */
+ if (completion_status == FLUSH_COMPLETE && try > 1)
+ stat->s_retriesok++;
+ else if (completion_status == FLUSH_GIVEUP) {
/*
* Cause the caller to do an IPI-style TLB shootdown on
- * the cpu's, all of which are still in the mask.
+ * the target cpu's, all of which are still in the mask.
*/
- __get_cpu_var(ptcstats).ptc_i++;
+ stat->s_giveup++;
return flush_mask;
}
@@ -294,18 +667,17 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
* use the IPI method of shootdown on them.
*/
for_each_cpu(bit, flush_mask) {
- pnode = uv_cpu_to_pnode(bit);
- if (pnode == this_pnode)
+ uvhub = uv_cpu_to_blade_id(bit);
+ if (uvhub == this_uvhub)
continue;
cpumask_clear_cpu(bit, flush_mask);
}
if (!cpumask_empty(flush_mask))
return flush_mask;
+
return NULL;
}
-static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
-
/**
* uv_flush_tlb_others - globally purge translation cache of a virtual
* address or all TLB's
@@ -322,8 +694,8 @@ static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
* The caller has derived the cpumask from the mm_struct. This function
* is called only if there are bits set in the mask. (e.g. flush_tlb_page())
*
- * The cpumask is converted into a nodemask of the nodes containing
- * the cpus.
+ * The cpumask is converted into a uvhubmask of the uvhubs containing
+ * those cpus.
*
* Note that this function should be called with preemption disabled.
*
@@ -335,52 +707,82 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va, unsigned int cpu)
{
- struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
- int i;
- int bit;
- int pnode;
- int uv_cpu;
- int this_pnode;
+ int remotes;
+ int tcpu;
+ int uvhub;
int locals = 0;
struct bau_desc *bau_desc;
+ struct cpumask *flush_mask;
+ struct ptc_stats *stat;
+ struct bau_control *bcp;
- cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+ if (nobau)
+ return cpumask;
- uv_cpu = uv_blade_processor_id();
- this_pnode = uv_hub_info->pnode;
- bau_desc = __get_cpu_var(bau_control).descriptor_base;
- bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
+ bcp = &per_cpu(bau_control, cpu);
+ /*
+ * Each sending cpu has a per-cpu mask which it fills from the caller's
+ * cpu mask. Only remote cpus are converted to uvhubs and copied.
+ */
+ flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
+ /*
+ * copy cpumask to flush_mask, removing current cpu
+ * (current cpu should already have been flushed by the caller and
+ * should never be returned if we return flush_mask)
+ */
+ cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+ if (cpu_isset(cpu, *cpumask))
+ locals++; /* current cpu was targeted */
- bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ bau_desc = bcp->descriptor_base;
+ bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
- i = 0;
- for_each_cpu(bit, flush_mask) {
- pnode = uv_cpu_to_pnode(bit);
- BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
- if (pnode == this_pnode) {
+ bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ remotes = 0;
+ for_each_cpu(tcpu, flush_mask) {
+ uvhub = uv_cpu_to_blade_id(tcpu);
+ if (uvhub == bcp->uvhub) {
locals++;
continue;
}
- bau_node_set(pnode - uv_partition_base_pnode,
- &bau_desc->distribution);
- i++;
+ bau_uvhub_set(uvhub, &bau_desc->distribution);
+ remotes++;
}
- if (i == 0) {
+ if (remotes == 0) {
/*
- * no off_node flushing; return status for local node
+ * No off_hub flushing; return status for local hub.
+ * Return the caller's mask if all were local (the current
+ * cpu may be in that mask).
*/
if (locals)
- return flush_mask;
+ return cpumask;
else
return NULL;
}
- __get_cpu_var(ptcstats).requestor++;
- __get_cpu_var(ptcstats).ntargeted += i;
+ stat = &per_cpu(ptcstats, cpu);
+ stat->s_requestor++;
+ stat->s_ntargcpu += remotes;
+ remotes = bau_uvhub_weight(&bau_desc->distribution);
+ stat->s_ntarguvhub += remotes;
+ if (remotes >= 16)
+ stat->s_ntarguvhub16++;
+ else if (remotes >= 8)
+ stat->s_ntarguvhub8++;
+ else if (remotes >= 4)
+ stat->s_ntarguvhub4++;
+ else if (remotes >= 2)
+ stat->s_ntarguvhub2++;
+ else
+ stat->s_ntarguvhub1++;
bau_desc->payload.address = va;
bau_desc->payload.sending_cpu = cpu;
- return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
+ /*
+ * uv_flush_send_and_wait returns null if all cpu's were messaged, or
+ * the adjusted flush_mask if any cpu's were not messaged.
+ */
+ return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
}
/*
@@ -389,87 +791,70 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
*
* We received a broadcast assist message.
*
- * Interrupts may have been disabled; this interrupt could represent
+ * Interrupts are disabled; this interrupt could represent
* the receipt of several messages.
*
- * All cores/threads on this node get this interrupt.
- * The last one to see it does the s/w ack.
+ * All cores/threads on this hub get this interrupt.
+ * The last one to see it does the software ack.
* (the resource will not be freed until noninterruptable cpus see this
- * interrupt; hardware will timeout the s/w ack and reply ERROR)
+ * interrupt; hardware may timeout the s/w ack and reply ERROR)
*/
void uv_bau_message_interrupt(struct pt_regs *regs)
{
- struct bau_payload_queue_entry *va_queue_first;
- struct bau_payload_queue_entry *va_queue_last;
- struct bau_payload_queue_entry *msg;
- struct pt_regs *old_regs = set_irq_regs(regs);
- cycles_t time1;
- cycles_t time2;
- int msg_slot;
- int sw_ack_slot;
- int fw;
int count = 0;
- unsigned long local_pnode;
-
- ack_APIC_irq();
- exit_idle();
- irq_enter();
-
- time1 = get_cycles();
-
- local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
-
- va_queue_first = __get_cpu_var(bau_control).va_queue_first;
- va_queue_last = __get_cpu_var(bau_control).va_queue_last;
-
- msg = __get_cpu_var(bau_control).bau_msg_head;
+ cycles_t time_start;
+ struct bau_payload_queue_entry *msg;
+ struct bau_control *bcp;
+ struct ptc_stats *stat;
+ struct msg_desc msgdesc;
+
+ time_start = get_cycles();
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ stat = &per_cpu(ptcstats, smp_processor_id());
+ msgdesc.va_queue_first = bcp->va_queue_first;
+ msgdesc.va_queue_last = bcp->va_queue_last;
+ msg = bcp->bau_msg_head;
while (msg->sw_ack_vector) {
count++;
- fw = msg->sw_ack_vector;
- msg_slot = msg - va_queue_first;
- sw_ack_slot = ffs(fw) - 1;
-
- uv_bau_process_message(msg, msg_slot, sw_ack_slot);
-
+ msgdesc.msg_slot = msg - msgdesc.va_queue_first;
+ msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
+ msgdesc.msg = msg;
+ uv_bau_process_message(&msgdesc, bcp);
msg++;
- if (msg > va_queue_last)
- msg = va_queue_first;
- __get_cpu_var(bau_control).bau_msg_head = msg;
+ if (msg > msgdesc.va_queue_last)
+ msg = msgdesc.va_queue_first;
+ bcp->bau_msg_head = msg;
}
+ stat->d_time += (get_cycles() - time_start);
if (!count)
- __get_cpu_var(ptcstats).nomsg++;
+ stat->d_nomsg++;
else if (count > 1)
- __get_cpu_var(ptcstats).multmsg++;
-
- time2 = get_cycles();
- __get_cpu_var(ptcstats).dflush += (time2 - time1);
-
- irq_exit();
- set_irq_regs(old_regs);
+ stat->d_multmsg++;
+ ack_APIC_irq();
}
/*
* uv_enable_timeouts
*
- * Each target blade (i.e. blades that have cpu's) needs to have
+ * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
* shootdown message timeouts enabled. The timeout does not cause
* an interrupt, but causes an error message to be returned to
* the sender.
*/
static void uv_enable_timeouts(void)
{
- int blade;
- int nblades;
+ int uvhub;
+ int nuvhubs;
int pnode;
unsigned long mmr_image;
- nblades = uv_num_possible_blades();
+ nuvhubs = uv_num_possible_blades();
- for (blade = 0; blade < nblades; blade++) {
- if (!uv_blade_nr_possible_cpus(blade))
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+ if (!uv_blade_nr_possible_cpus(uvhub))
continue;
- pnode = uv_blade_to_pnode(blade);
+ pnode = uv_blade_to_pnode(uvhub);
mmr_image =
uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
/*
@@ -479,16 +864,16 @@ static void uv_enable_timeouts(void)
* To program the period, the SOFT_ACK_MODE must be off.
*/
mmr_image &= ~((unsigned long)1 <<
- UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
/*
* Set the 4-bit period.
*/
mmr_image &= ~((unsigned long)0xf <<
- UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
- UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
/*
@@ -497,7 +882,7 @@ static void uv_enable_timeouts(void)
* indicated in bits 2:0 (7 causes all of them to timeout).
*/
mmr_image |= ((unsigned long)1 <<
- UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
+ UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
uv_write_global_mmr64
(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
}
@@ -522,9 +907,20 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data)
{
}
+static inline unsigned long long
+millisec_2_cycles(unsigned long millisec)
+{
+ unsigned long ns;
+ unsigned long long cyc;
+
+ ns = millisec * 1000;
+ cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
+ return cyc;
+}
+
/*
- * Display the statistics thru /proc
- * data points to the cpu number
+ * Display the statistics thru /proc.
+ * 'data' points to the cpu number
*/
static int uv_ptc_seq_show(struct seq_file *file, void *data)
{
@@ -535,78 +931,155 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
if (!cpu) {
seq_printf(file,
- "# cpu requestor requestee one all sretry dretry ptc_i ");
+ "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
seq_printf(file,
- "sw_ack sflush dflush sok dnomsg dmult starget\n");
+ "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
+ seq_printf(file,
+ "retries rok resetp resett giveup sto bz throt ");
+ seq_printf(file,
+ "sw_ack recv rtime all ");
+ seq_printf(file,
+ "one mult none retry canc nocan reset rcan\n");
}
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
- seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
- cpu, stat->requestor,
- stat->requestee, stat->onetlb, stat->alltlb,
- stat->s_retry, stat->d_retry, stat->ptc_i);
- seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
+ /* source side statistics */
+ seq_printf(file,
+ "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ cpu, stat->s_requestor, cycles_2_us(stat->s_time),
+ stat->s_ntarguvhub, stat->s_ntarguvhub16,
+ stat->s_ntarguvhub8, stat->s_ntarguvhub4,
+ stat->s_ntarguvhub2, stat->s_ntarguvhub1,
+ stat->s_ntargcpu, stat->s_dtimeout);
+ seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
+ stat->s_retry_messages, stat->s_retriesok,
+ stat->s_resets_plug, stat->s_resets_timeout,
+ stat->s_giveup, stat->s_stimeout,
+ stat->s_busy, stat->s_throttles);
+ /* destination side statistics */
+ seq_printf(file,
+ "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
- stat->sflush, stat->dflush,
- stat->retriesok, stat->nomsg,
- stat->multmsg, stat->ntargeted);
+ stat->d_requestee, cycles_2_us(stat->d_time),
+ stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
+ stat->d_nomsg, stat->d_retries, stat->d_canceled,
+ stat->d_nocanceled, stat->d_resets,
+ stat->d_rcanceled);
}
return 0;
}
/*
+ * -1: resetf the statistics
* 0: display meaning of the statistics
- * >0: retry limit
+ * >0: maximum concurrent active descriptors per uvhub (throttle)
*/
static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
size_t count, loff_t *data)
{
- long newmode;
+ int cpu;
+ long input_arg;
char optstr[64];
+ struct ptc_stats *stat;
+ struct bau_control *bcp;
if (count == 0 || count > sizeof(optstr))
return -EINVAL;
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
- if (strict_strtoul(optstr, 10, &newmode) < 0) {
+ if (strict_strtol(optstr, 10, &input_arg) < 0) {
printk(KERN_DEBUG "%s is invalid\n", optstr);
return -EINVAL;
}
- if (newmode == 0) {
+ if (input_arg == 0) {
printk(KERN_DEBUG "# cpu: cpu number\n");
+ printk(KERN_DEBUG "Sender statistics:\n");
+ printk(KERN_DEBUG
+ "sent: number of shootdown messages sent\n");
+ printk(KERN_DEBUG
+ "stime: time spent sending messages\n");
+ printk(KERN_DEBUG
+ "numuvhubs: number of hubs targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "numuvhubs16: number times 16 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs8: number times 8 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs4: number times 4 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs2: number times 2 or more hubs targeted\n");
+ printk(KERN_DEBUG
+ "numuvhubs1: number times 1 hub targeted\n");
+ printk(KERN_DEBUG
+ "numcpus: number of cpus targeted with shootdown\n");
+ printk(KERN_DEBUG
+ "dto: number of destination timeouts\n");
+ printk(KERN_DEBUG
+ "retries: destination timeout retries sent\n");
+ printk(KERN_DEBUG
+ "rok: : destination timeouts successfully retried\n");
+ printk(KERN_DEBUG
+ "resetp: ipi-style resource resets for plugs\n");
+ printk(KERN_DEBUG
+ "resett: ipi-style resource resets for timeouts\n");
+ printk(KERN_DEBUG
+ "giveup: fall-backs to ipi-style shootdowns\n");
+ printk(KERN_DEBUG
+ "sto: number of source timeouts\n");
+ printk(KERN_DEBUG
+ "bz: number of stay-busy's\n");
+ printk(KERN_DEBUG
+ "throt: number times spun in throttle\n");
+ printk(KERN_DEBUG "Destination side statistics:\n");
printk(KERN_DEBUG
- "requestor: times this cpu was the flush requestor\n");
+ "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
printk(KERN_DEBUG
- "requestee: times this cpu was requested to flush its TLBs\n");
+ "recv: shootdown messages received\n");
printk(KERN_DEBUG
- "one: times requested to flush a single address\n");
+ "rtime: time spent processing messages\n");
printk(KERN_DEBUG
- "all: times requested to flush all TLB's\n");
+ "all: shootdown all-tlb messages\n");
printk(KERN_DEBUG
- "sretry: number of retries of source-side timeouts\n");
+ "one: shootdown one-tlb messages\n");
printk(KERN_DEBUG
- "dretry: number of retries of destination-side timeouts\n");
+ "mult: interrupts that found multiple messages\n");
printk(KERN_DEBUG
- "ptc_i: times UV fell through to IPI-style flushes\n");
+ "none: interrupts that found no messages\n");
printk(KERN_DEBUG
- "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
+ "retry: number of retry messages processed\n");
printk(KERN_DEBUG
- "sflush_us: cycles spent in uv_flush_tlb_others()\n");
+ "canc: number messages canceled by retries\n");
printk(KERN_DEBUG
- "dflush_us: cycles spent in handling flush requests\n");
- printk(KERN_DEBUG "sok: successes on retry\n");
- printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
+ "nocan: number retries that found nothing to cancel\n");
printk(KERN_DEBUG
- "dmult: interrupts with multiple messages\n");
- printk(KERN_DEBUG "starget: nodes targeted\n");
+ "reset: number of ipi-style reset requests processed\n");
+ printk(KERN_DEBUG
+ "rcan: number messages canceled by reset requests\n");
+ } else if (input_arg == -1) {
+ for_each_present_cpu(cpu) {
+ stat = &per_cpu(ptcstats, cpu);
+ memset(stat, 0, sizeof(struct ptc_stats));
+ }
} else {
- uv_bau_retry_limit = newmode;
- printk(KERN_DEBUG "timeout retry limit:%d\n",
- uv_bau_retry_limit);
+ uv_bau_max_concurrent = input_arg;
+ bcp = &per_cpu(bau_control, smp_processor_id());
+ if (uv_bau_max_concurrent < 1 ||
+ uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
+ printk(KERN_DEBUG
+ "Error: BAU max concurrent %d; %d is invalid\n",
+ bcp->max_concurrent, uv_bau_max_concurrent);
+ return -EINVAL;
+ }
+ printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
+ uv_bau_max_concurrent);
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->max_concurrent = uv_bau_max_concurrent;
+ }
}
return count;
@@ -650,79 +1123,30 @@ static int __init uv_ptc_init(void)
}
/*
- * begin the initialization of the per-blade control structures
- */
-static struct bau_control * __init uv_table_bases_init(int blade, int node)
-{
- int i;
- struct bau_msg_status *msp;
- struct bau_control *bau_tabp;
-
- bau_tabp =
- kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
- BUG_ON(!bau_tabp);
-
- bau_tabp->msg_statuses =
- kmalloc_node(sizeof(struct bau_msg_status) *
- DEST_Q_SIZE, GFP_KERNEL, node);
- BUG_ON(!bau_tabp->msg_statuses);
-
- for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
- bau_cpubits_clear(&msp->seen_by, (int)
- uv_blade_nr_possible_cpus(blade));
-
- uv_bau_table_bases[blade] = bau_tabp;
-
- return bau_tabp;
-}
-
-/*
- * finish the initialization of the per-blade control structures
- */
-static void __init
-uv_table_bases_finish(int blade,
- struct bau_control *bau_tablesp,
- struct bau_desc *adp)
-{
- struct bau_control *bcp;
- int cpu;
-
- for_each_present_cpu(cpu) {
- if (blade != uv_cpu_to_blade_id(cpu))
- continue;
-
- bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
- bcp->bau_msg_head = bau_tablesp->va_queue_first;
- bcp->va_queue_first = bau_tablesp->va_queue_first;
- bcp->va_queue_last = bau_tablesp->va_queue_last;
- bcp->msg_statuses = bau_tablesp->msg_statuses;
- bcp->descriptor_base = adp;
- }
-}
-
-/*
* initialize the sending side's sending buffers
*/
-static struct bau_desc * __init
+static void
uv_activation_descriptor_init(int node, int pnode)
{
int i;
+ int cpu;
unsigned long pa;
unsigned long m;
unsigned long n;
- struct bau_desc *adp;
- struct bau_desc *ad2;
+ struct bau_desc *bau_desc;
+ struct bau_desc *bd2;
+ struct bau_control *bcp;
/*
* each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
- * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per blade
+ * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
*/
- adp = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
+ bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
- BUG_ON(!adp);
+ BUG_ON(!bau_desc);
- pa = uv_gpa(adp); /* need the real nasid*/
- n = uv_gpa_to_pnode(pa);
+ pa = uv_gpa(bau_desc); /* need the real nasid*/
+ n = pa >> uv_nshift;
m = pa & uv_mmask;
uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
@@ -731,96 +1155,188 @@ uv_activation_descriptor_init(int node, int pnode)
/*
* initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
* cpu even though we only use the first one; one descriptor can
- * describe a broadcast to 256 nodes.
+ * describe a broadcast to 256 uv hubs.
*/
- for (i = 0, ad2 = adp; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
- i++, ad2++) {
- memset(ad2, 0, sizeof(struct bau_desc));
- ad2->header.sw_ack_flag = 1;
+ for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
+ i++, bd2++) {
+ memset(bd2, 0, sizeof(struct bau_desc));
+ bd2->header.sw_ack_flag = 1;
/*
- * base_dest_nodeid is the first node in the partition, so
- * the bit map will indicate partition-relative node numbers.
- * note that base_dest_nodeid is actually a nasid.
+ * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+ * in the partition. The bit map will indicate uvhub numbers,
+ * which are 0-N in a partition. Pnodes are unique system-wide.
*/
- ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
- ad2->header.dest_subnodeid = 0x10; /* the LB */
- ad2->header.command = UV_NET_ENDPOINT_INTD;
- ad2->header.int_both = 1;
+ bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+ bd2->header.dest_subnodeid = 0x10; /* the LB */
+ bd2->header.command = UV_NET_ENDPOINT_INTD;
+ bd2->header.int_both = 1;
/*
* all others need to be set to zero:
* fairness chaining multilevel count replied_to
*/
}
- return adp;
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
+ continue;
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->descriptor_base = bau_desc;
+ }
}
/*
* initialize the destination side's receiving buffers
+ * entered for each uvhub in the partition
+ * - node is first node (kernel memory notion) on the uvhub
+ * - pnode is the uvhub's physical identifier
*/
-static struct bau_payload_queue_entry * __init
-uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
+static void
+uv_payload_queue_init(int node, int pnode)
{
- struct bau_payload_queue_entry *pqp;
- unsigned long pa;
int pn;
+ int cpu;
char *cp;
+ unsigned long pa;
+ struct bau_payload_queue_entry *pqp;
+ struct bau_payload_queue_entry *pqp_malloc;
+ struct bau_control *bcp;
pqp = (struct bau_payload_queue_entry *) kmalloc_node(
(DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
GFP_KERNEL, node);
BUG_ON(!pqp);
+ pqp_malloc = pqp;
cp = (char *)pqp + 31;
pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
- bau_tablesp->va_queue_first = pqp;
+
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_cpu_to_pnode(cpu))
+ continue;
+ /* for every cpu on this pnode: */
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->va_queue_first = pqp;
+ bcp->bau_msg_head = pqp;
+ bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
+ }
/*
* need the pnode of where the memory was really allocated
*/
pa = uv_gpa(pqp);
- pn = uv_gpa_to_pnode(pa);
+ pn = pa >> uv_nshift;
uv_write_global_mmr64(pnode,
UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
uv_physnodeaddr(pqp));
uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
uv_physnodeaddr(pqp));
- bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
(unsigned long)
- uv_physnodeaddr(bau_tablesp->va_queue_last));
+ uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
+ /* in effect, all msg_type's are set to MSG_NOOP */
memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
-
- return pqp;
}
/*
- * Initialization of each UV blade's structures
+ * Initialization of each UV hub's structures
*/
-static int __init uv_init_blade(int blade)
+static void __init uv_init_uvhub(int uvhub, int vector)
{
int node;
int pnode;
- unsigned long pa;
unsigned long apicid;
- struct bau_desc *adp;
- struct bau_payload_queue_entry *pqp;
- struct bau_control *bau_tablesp;
-
- node = blade_to_first_node(blade);
- bau_tablesp = uv_table_bases_init(blade, node);
- pnode = uv_blade_to_pnode(blade);
- adp = uv_activation_descriptor_init(node, pnode);
- pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
- uv_table_bases_finish(blade, bau_tablesp, adp);
+
+ node = uvhub_to_first_node(uvhub);
+ pnode = uv_blade_to_pnode(uvhub);
+ uv_activation_descriptor_init(node, pnode);
+ uv_payload_queue_init(node, pnode);
/*
* the below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS
*/
- apicid = blade_to_first_apicid(blade);
- pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
+ apicid = uvhub_to_first_apicid(uvhub);
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
- ((apicid << 32) | UV_BAU_MESSAGE));
- return 0;
+ ((apicid << 32) | vector));
+}
+
+/*
+ * initialize the bau_control structure for each cpu
+ */
+static void uv_init_per_cpu(int nuvhubs)
+{
+ int i, j, k;
+ int cpu;
+ int pnode;
+ int uvhub;
+ short socket = 0;
+ struct bau_control *bcp;
+ struct uvhub_desc *bdp;
+ struct socket_desc *sdp;
+ struct bau_control *hmaster = NULL;
+ struct bau_control *smaster = NULL;
+ struct socket_desc {
+ short num_cpus;
+ short cpu_number[16];
+ };
+ struct uvhub_desc {
+ short num_sockets;
+ short num_cpus;
+ short uvhub;
+ short pnode;
+ struct socket_desc socket[2];
+ };
+ struct uvhub_desc *uvhub_descs;
+
+ uvhub_descs = (struct uvhub_desc *)
+ kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ memset(bcp, 0, sizeof(struct bau_control));
+ spin_lock_init(&bcp->masks_lock);
+ bcp->max_concurrent = uv_bau_max_concurrent;
+ pnode = uv_cpu_hub_info(cpu)->pnode;
+ uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
+ bdp = &uvhub_descs[uvhub];
+ bdp->num_cpus++;
+ bdp->uvhub = uvhub;
+ bdp->pnode = pnode;
+ /* time interval to catch a hardware stay-busy bug */
+ bcp->timeout_interval = millisec_2_cycles(3);
+ /* kludge: assume uv_hub.h is constant */
+ socket = (cpu_physical_id(cpu)>>5)&1;
+ if (socket >= bdp->num_sockets)
+ bdp->num_sockets = socket+1;
+ sdp = &bdp->socket[socket];
+ sdp->cpu_number[sdp->num_cpus] = cpu;
+ sdp->num_cpus++;
+ }
+ socket = 0;
+ for_each_possible_blade(uvhub) {
+ bdp = &uvhub_descs[uvhub];
+ for (i = 0; i < bdp->num_sockets; i++) {
+ sdp = &bdp->socket[i];
+ for (j = 0; j < sdp->num_cpus; j++) {
+ cpu = sdp->cpu_number[j];
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->cpu = cpu;
+ if (j == 0) {
+ smaster = bcp;
+ if (i == 0)
+ hmaster = bcp;
+ }
+ bcp->cpus_in_uvhub = bdp->num_cpus;
+ bcp->cpus_in_socket = sdp->num_cpus;
+ bcp->socket_master = smaster;
+ bcp->uvhub_master = hmaster;
+ for (k = 0; k < DEST_Q_SIZE; k++)
+ bcp->socket_acknowledge_count[k] = 0;
+ bcp->uvhub_cpu =
+ uv_cpu_hub_info(cpu)->blade_processor_id;
+ }
+ socket++;
+ }
+ }
+ kfree(uvhub_descs);
}
/*
@@ -828,38 +1344,54 @@ static int __init uv_init_blade(int blade)
*/
static int __init uv_bau_init(void)
{
- int blade;
- int nblades;
+ int uvhub;
+ int pnode;
+ int nuvhubs;
int cur_cpu;
+ int vector;
+ unsigned long mmr;
if (!is_uv_system())
return 0;
+ if (nobau)
+ return 0;
+
for_each_possible_cpu(cur_cpu)
zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
GFP_KERNEL, cpu_to_node(cur_cpu));
- uv_bau_retry_limit = 1;
+ uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
+ uv_nshift = uv_hub_info->m_val;
uv_mmask = (1UL << uv_hub_info->m_val) - 1;
- nblades = uv_num_possible_blades();
+ nuvhubs = uv_num_possible_blades();
- uv_bau_table_bases = (struct bau_control **)
- kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
- BUG_ON(!uv_bau_table_bases);
+ uv_init_per_cpu(nuvhubs);
uv_partition_base_pnode = 0x7fffffff;
- for (blade = 0; blade < nblades; blade++)
- if (uv_blade_nr_possible_cpus(blade) &&
- (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
- uv_partition_base_pnode = uv_blade_to_pnode(blade);
- for (blade = 0; blade < nblades; blade++)
- if (uv_blade_nr_possible_cpus(blade))
- uv_init_blade(blade);
-
- alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
+ for (uvhub = 0; uvhub < nuvhubs; uvhub++)
+ if (uv_blade_nr_possible_cpus(uvhub) &&
+ (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
+ uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
+
+ vector = UV_BAU_MESSAGE;
+ for_each_possible_blade(uvhub)
+ if (uv_blade_nr_possible_cpus(uvhub))
+ uv_init_uvhub(uvhub, vector);
+
uv_enable_timeouts();
+ alloc_intr_gate(vector, uv_bau_message_intr1);
+
+ for_each_possible_blade(uvhub) {
+ pnode = uv_blade_to_pnode(uvhub);
+ /* INIT the bau */
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
+ ((unsigned long)1 << 63));
+ mmr = 1; /* should be 1 to broadcast to both sockets */
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
+ }
return 0;
}
-__initcall(uv_bau_init);
-__initcall(uv_ptc_init);
+core_initcall(uv_bau_init);
+core_initcall(uv_ptc_init);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1168e4454188..142d70c74b02 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/kgdb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
@@ -108,15 +109,6 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
dec_preempt_count();
}
-#ifdef CONFIG_X86_32
-static inline void
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
-{
- if (!user_mode_vm(regs))
- die(str, regs, err);
-}
-#endif
-
static void __kprobes
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
long error_code, siginfo_t *info)
@@ -460,6 +452,11 @@ void restart_nmi(void)
/* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
== NOTIFY_STOP)
@@ -543,11 +540,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
/* DR6 may or may not be cleared by the CPU */
set_debugreg(0, 6);
+
/*
* The processor cleared BTF, so don't mark that we need it set.
*/
- clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
- tsk->thread.debugctlmsr = 0;
+ clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
@@ -585,55 +582,67 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
return;
}
-#ifdef CONFIG_X86_64
-static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
-{
- if (fixup_exception(regs))
- return 1;
-
- notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
- /* Illegal floating point operation in the kernel */
- current->thread.trap_no = trapnr;
- die(str, regs, 0);
- return 0;
-}
-#endif
-
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
-void math_error(void __user *ip)
+void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
- struct task_struct *task;
+ struct task_struct *task = current;
siginfo_t info;
- unsigned short cwd, swd, err;
+ unsigned short err;
+ char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
+
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
+ return;
+ conditional_sti(regs);
+
+ if (!user_mode_vm(regs))
+ {
+ if (!fixup_exception(regs)) {
+ task->thread.error_code = error_code;
+ task->thread.trap_no = trapnr;
+ die(str, regs, error_code);
+ }
+ return;
+ }
/*
* Save the info for the exception handler and clear the error.
*/
- task = current;
save_init_fpu(task);
- task->thread.trap_no = 16;
- task->thread.error_code = 0;
+ task->thread.trap_no = trapnr;
+ task->thread.error_code = error_code;
info.si_signo = SIGFPE;
info.si_errno = 0;
- info.si_addr = ip;
- /*
- * (~cwd & swd) will mask out exceptions that are not set to unmasked
- * status. 0x3f is the exception bits in these regs, 0x200 is the
- * C1 reg you need in case of a stack fault, 0x040 is the stack
- * fault bit. We should only be taking one exception at a time,
- * so if this combination doesn't produce any single exception,
- * then we have a bad program that isn't synchronizing its FPU usage
- * and it will suffer the consequences since we won't be able to
- * fully reproduce the context of the exception
- */
- cwd = get_fpu_cwd(task);
- swd = get_fpu_swd(task);
+ info.si_addr = (void __user *)regs->ip;
+ if (trapnr == 16) {
+ unsigned short cwd, swd;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't synchronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
- err = swd & ~cwd;
+ err = swd & ~cwd;
+ } else {
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ unsigned short mxcsr = get_fpu_mxcsr(task);
+ err = ~(mxcsr >> 7) & mxcsr;
+ }
if (err & 0x001) { /* Invalid op */
/*
@@ -662,97 +671,17 @@ void math_error(void __user *ip)
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
{
- conditional_sti(regs);
-
#ifdef CONFIG_X86_32
ignore_fpu_irq = 1;
-#else
- if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel x87 math error", 16))
- return;
#endif
- math_error((void __user *)regs->ip);
-}
-
-static void simd_math_error(void __user *ip)
-{
- struct task_struct *task;
- siginfo_t info;
- unsigned short mxcsr;
-
- /*
- * Save the info for the exception handler and clear the error.
- */
- task = current;
- save_init_fpu(task);
- task->thread.trap_no = 19;
- task->thread.error_code = 0;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_code = __SI_FAULT;
- info.si_addr = ip;
- /*
- * The SIMD FPU exceptions are handled a little differently, as there
- * is only a single status/control register. Thus, to determine which
- * unmasked exception was caught we must mask the exception mask bits
- * at 0x1f80, and then use these to mask the exception bits at 0x3f.
- */
- mxcsr = get_fpu_mxcsr(task);
- switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
- case 0x000:
- default:
- break;
- case 0x001: /* Invalid Op */
- info.si_code = FPE_FLTINV;
- break;
- case 0x002: /* Denormalize */
- case 0x010: /* Underflow */
- info.si_code = FPE_FLTUND;
- break;
- case 0x004: /* Zero Divide */
- info.si_code = FPE_FLTDIV;
- break;
- case 0x008: /* Overflow */
- info.si_code = FPE_FLTOVF;
- break;
- case 0x020: /* Precision */
- info.si_code = FPE_FLTRES;
- break;
- }
- force_sig_info(SIGFPE, &info, task);
+ math_error(regs, error_code, 16);
}
dotraplinkage void
do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
{
- conditional_sti(regs);
-
-#ifdef CONFIG_X86_32
- if (cpu_has_xmm) {
- /* Handle SIMD FPU exceptions on PIII+ processors. */
- ignore_fpu_irq = 1;
- simd_math_error((void __user *)regs->ip);
- return;
- }
- /*
- * Handle strange cache flush from user space exception
- * in all other cases. This is undocumented behaviour.
- */
- if (regs->flags & X86_VM_MASK) {
- handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
- return;
- }
- current->thread.trap_no = 19;
- current->thread.error_code = error_code;
- die_if_kernel("cache flush denied", regs, error_code);
- force_sig(SIGSEGV, current);
-#else
- if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel simd math error", 19))
- return;
- simd_math_error((void __user *)regs->ip);
-#endif
+ math_error(regs, error_code, 19);
}
dotraplinkage void
@@ -879,6 +808,16 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
}
#endif
+/* Set of traps needed for early debugging. */
+void __init early_trap_init(void)
+{
+ set_intr_gate_ist(1, &debug, DEBUG_STACK);
+ /* int3 can be called from all */
+ set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
+ set_intr_gate(14, &page_fault);
+ load_idt(&idt_descr);
+}
+
void __init trap_init(void)
{
int i;
@@ -892,10 +831,7 @@ void __init trap_init(void)
#endif
set_intr_gate(0, &divide_error);
- set_intr_gate_ist(1, &debug, DEBUG_STACK);
set_intr_gate_ist(2, &nmi, NMI_STACK);
- /* int3 can be called from all */
- set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
/* int4 can be called from all */
set_system_intr_gate(4, &overflow);
set_intr_gate(5, &bounds);
@@ -911,7 +847,6 @@ void __init trap_init(void)
set_intr_gate(11, &segment_not_present);
set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
set_intr_gate(13, &general_protection);
- set_intr_gate(14, &page_fault);
set_intr_gate(15, &spurious_interrupt_bug);
set_intr_gate(16, &coprocessor_error);
set_intr_gate(17, &alignment_check);
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 1d40336b030a..1132129db792 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -44,7 +44,7 @@ static void uv_ack_apic(unsigned int irq)
ack_APIC_irq();
}
-struct irq_chip uv_irq_chip = {
+static struct irq_chip uv_irq_chip = {
.name = "UV-CORE",
.startup = uv_noop_ret,
.shutdown = uv_noop,
@@ -141,7 +141,7 @@ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
*/
static int
arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
- unsigned long mmr_offset, int restrict)
+ unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_desc *desc = irq_to_desc(irq);
@@ -160,7 +160,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
if (err != 0)
return err;
- if (restrict == UV_AFFINITY_CPU)
+ if (limit == UV_AFFINITY_CPU)
desc->status |= IRQ_NO_BALANCING;
else
desc->status |= IRQ_MOVE_PCNTXT;
@@ -214,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
unsigned long mmr_offset;
- unsigned mmr_pnode;
+ int mmr_pnode;
if (set_desc_affinity(desc, mask, &dest))
return -1;
@@ -248,7 +248,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
* interrupt is raised.
*/
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
- unsigned long mmr_offset, int restrict)
+ unsigned long mmr_offset, int limit)
{
int irq, ret;
@@ -258,7 +258,7 @@ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
- restrict);
+ limit);
if (ret == irq)
uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
else
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 693920b22496..1b950d151e58 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(init_level4_pgt);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 782c3a362ec6..37e68fc5e24a 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -99,7 +99,7 @@ int save_i387_xstate(void __user *buf)
if (err)
return err;
- if (task_thread_info(tsk)->status & TS_XSAVE)
+ if (use_xsave())
err = xsave_user(buf);
else
err = fxsave_user(buf);
@@ -109,14 +109,14 @@ int save_i387_xstate(void __user *buf)
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
} else {
- if (__copy_to_user(buf, &tsk->thread.xstate->fxsave,
+ if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
xstate_size))
return -1;
}
clear_used_math(); /* trigger finit */
- if (task_thread_info(tsk)->status & TS_XSAVE) {
+ if (use_xsave()) {
struct _fpstate __user *fx = buf;
struct _xstate __user *x = buf;
u64 xstate_bv;
@@ -225,7 +225,7 @@ int restore_i387_xstate(void __user *buf)
clts();
task_thread_info(current)->status |= TS_USEDFPU;
}
- if (task_thread_info(tsk)->status & TS_XSAVE)
+ if (use_xsave())
err = restore_user_xstate(buf);
else
err = fxrstor_checking((__force struct i387_fxsave_struct *)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4dade6ac0827..5ac0bb465ed6 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -33,6 +33,7 @@
#include <asm/kvm_emulate.h>
#include "x86.h"
+#include "tss.h"
/*
* Opcode effective-address decode tables.
@@ -50,6 +51,8 @@
#define DstReg (2<<1) /* Register operand. */
#define DstMem (3<<1) /* Memory operand. */
#define DstAcc (4<<1) /* Destination Accumulator */
+#define DstDI (5<<1) /* Destination is in ES:(E)DI */
+#define DstMem64 (6<<1) /* 64bit memory operand */
#define DstMask (7<<1)
/* Source operand type. */
#define SrcNone (0<<4) /* No source operand. */
@@ -63,6 +66,7 @@
#define SrcOne (7<<4) /* Implied '1' */
#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
#define SrcImmU (9<<4) /* Immediate operand, unsigned */
+#define SrcSI (0xa<<4) /* Source is in the DS:RSI */
#define SrcMask (0xf<<4)
/* Generic ModRM decode. */
#define ModRM (1<<8)
@@ -85,6 +89,9 @@
#define Src2ImmByte (2<<29)
#define Src2One (3<<29)
#define Src2Imm16 (4<<29)
+#define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
+ in memory and second argument is located
+ immediately after the first one in memory. */
#define Src2Mask (7<<29)
enum {
@@ -147,8 +154,8 @@ static u32 opcode_table[256] = {
0, 0, 0, 0,
/* 0x68 - 0x6F */
SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */
+ DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */
+ SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */
/* 0x70 - 0x77 */
SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
@@ -173,12 +180,12 @@ static u32 opcode_table[256] = {
/* 0xA0 - 0xA7 */
ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs,
- ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | String, ImplicitOps | String,
+ ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String,
+ ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String,
/* 0xA8 - 0xAF */
- 0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | String, ImplicitOps | String,
+ 0, 0, ByteOp | DstDI | Mov | String, DstDI | Mov | String,
+ ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String,
+ ByteOp | DstDI | String, DstDI | String,
/* 0xB0 - 0xB7 */
ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
@@ -204,13 +211,13 @@ static u32 opcode_table[256] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xE0 - 0xE7 */
0, 0, 0, 0,
- ByteOp | SrcImmUByte, SrcImmUByte,
- ByteOp | SrcImmUByte, SrcImmUByte,
+ ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
+ ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
/* 0xE8 - 0xEF */
SrcImm | Stack, SrcImm | ImplicitOps,
SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
+ SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
+ SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
@@ -343,7 +350,8 @@ static u32 group_table[] = {
[Group5*8] =
DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
SrcMem | ModRM | Stack, 0,
- SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
+ SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps,
+ SrcMem | ModRM | Stack, 0,
[Group7*8] =
0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
@@ -353,14 +361,14 @@ static u32 group_table[] = {
DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
[Group9*8] =
- 0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0,
+ 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0,
};
static u32 group2_table[] = {
[Group7*8] =
- SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM,
+ SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
- SrcMem16 | ModRM | Mov, 0,
+ SrcMem16 | ModRM | Mov | Priv, 0,
[Group9*8] =
0, 0, 0, 0, 0, 0, 0, 0,
};
@@ -562,7 +570,7 @@ static u32 group2_table[] = {
#define insn_fetch(_type, _size, _eip) \
({ unsigned long _x; \
rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
- if (rc != 0) \
+ if (rc != X86EMUL_CONTINUE) \
goto done; \
(_eip) += (_size); \
(_type)_x; \
@@ -638,40 +646,40 @@ static unsigned long ss_base(struct x86_emulate_ctxt *ctxt)
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
- unsigned long linear, u8 *dest)
+ unsigned long eip, u8 *dest)
{
struct fetch_cache *fc = &ctxt->decode.fetch;
int rc;
- int size;
+ int size, cur_size;
- if (linear < fc->start || linear >= fc->end) {
- size = min(15UL, PAGE_SIZE - offset_in_page(linear));
- rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
- if (rc)
+ if (eip == fc->end) {
+ cur_size = fc->end - fc->start;
+ size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
+ rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
+ size, ctxt->vcpu, NULL);
+ if (rc != X86EMUL_CONTINUE)
return rc;
- fc->start = linear;
- fc->end = linear + size;
+ fc->end += size;
}
- *dest = fc->data[linear - fc->start];
- return 0;
+ *dest = fc->data[eip - fc->start];
+ return X86EMUL_CONTINUE;
}
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
unsigned long eip, void *dest, unsigned size)
{
- int rc = 0;
+ int rc;
/* x86 instructions are limited to 15 bytes. */
- if (eip + size - ctxt->decode.eip_orig > 15)
+ if (eip + size - ctxt->eip > 15)
return X86EMUL_UNHANDLEABLE;
- eip += ctxt->cs_base;
while (size--) {
rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
/*
@@ -702,7 +710,7 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
*address = 0;
rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
ctxt->vcpu, NULL);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
ctxt->vcpu, NULL);
@@ -782,7 +790,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct decode_cache *c = &ctxt->decode;
u8 sib;
int index_reg = 0, base_reg = 0, scale;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
if (c->rex_prefix) {
c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
@@ -895,7 +903,7 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
switch (c->ad_bytes) {
case 2:
@@ -916,14 +924,18 @@ int
x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, group;
- /* Shadow copy of register state. Committed on successful emulation. */
+ /* we cannot decode insn before we complete previous rep insn */
+ WARN_ON(ctxt->restart);
+
+ /* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
- c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu);
+ c->eip = ctxt->eip;
+ c->fetch.start = c->fetch.end = c->eip;
ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
@@ -1015,11 +1027,6 @@ done_prefixes:
}
}
- if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
- kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");
- return -1;
- }
-
if (c->d & Group) {
group = c->d & GroupMask;
c->modrm = insn_fetch(u8, 1, c->eip);
@@ -1046,7 +1053,7 @@ done_prefixes:
rc = decode_modrm(ctxt, ops);
else if (c->d & MemAbs)
rc = decode_abs(ctxt, ops);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
if (!c->has_seg_override)
@@ -1057,6 +1064,10 @@ done_prefixes:
if (c->ad_bytes != 8)
c->modrm_ea = (u32)c->modrm_ea;
+
+ if (c->rip_relative)
+ c->modrm_ea += c->eip;
+
/*
* Decode and fetch the source operand: register, memory
* or immediate.
@@ -1091,6 +1102,8 @@ done_prefixes:
break;
}
c->src.type = OP_MEM;
+ c->src.ptr = (unsigned long *)c->modrm_ea;
+ c->src.val = 0;
break;
case SrcImm:
case SrcImmU:
@@ -1139,6 +1152,14 @@ done_prefixes:
c->src.bytes = 1;
c->src.val = 1;
break;
+ case SrcSI:
+ c->src.type = OP_MEM;
+ c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->src.ptr = (unsigned long *)
+ register_address(c, seg_override_base(ctxt, c),
+ c->regs[VCPU_REGS_RSI]);
+ c->src.val = 0;
+ break;
}
/*
@@ -1168,6 +1189,12 @@ done_prefixes:
c->src2.bytes = 1;
c->src2.val = 1;
break;
+ case Src2Mem16:
+ c->src2.type = OP_MEM;
+ c->src2.bytes = 2;
+ c->src2.ptr = (unsigned long *)(c->modrm_ea + c->src.bytes);
+ c->src2.val = 0;
+ break;
}
/* Decode and fetch the destination operand: register or memory. */
@@ -1180,6 +1207,7 @@ done_prefixes:
c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
break;
case DstMem:
+ case DstMem64:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.type = OP_REG;
@@ -1188,12 +1216,24 @@ done_prefixes:
break;
}
c->dst.type = OP_MEM;
+ c->dst.ptr = (unsigned long *)c->modrm_ea;
+ if ((c->d & DstMask) == DstMem64)
+ c->dst.bytes = 8;
+ else
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->dst.val = 0;
+ if (c->d & BitOp) {
+ unsigned long mask = ~(c->dst.bytes * 8 - 1);
+
+ c->dst.ptr = (void *)c->dst.ptr +
+ (c->src.val & mask) / 8;
+ }
break;
case DstAcc:
c->dst.type = OP_REG;
- c->dst.bytes = c->op_bytes;
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = &c->regs[VCPU_REGS_RAX];
- switch (c->op_bytes) {
+ switch (c->dst.bytes) {
case 1:
c->dst.val = *(u8 *)c->dst.ptr;
break;
@@ -1203,18 +1243,248 @@ done_prefixes:
case 4:
c->dst.val = *(u32 *)c->dst.ptr;
break;
+ case 8:
+ c->dst.val = *(u64 *)c->dst.ptr;
+ break;
}
c->dst.orig_val = c->dst.val;
break;
+ case DstDI:
+ c->dst.type = OP_MEM;
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->dst.ptr = (unsigned long *)
+ register_address(c, es_base(ctxt),
+ c->regs[VCPU_REGS_RDI]);
+ c->dst.val = 0;
+ break;
}
- if (c->rip_relative)
- c->modrm_ea += c->eip;
-
done:
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}
+static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ unsigned int size, unsigned short port,
+ void *dest)
+{
+ struct read_cache *rc = &ctxt->decode.io_read;
+
+ if (rc->pos == rc->end) { /* refill pio read ahead */
+ struct decode_cache *c = &ctxt->decode;
+ unsigned int in_page, n;
+ unsigned int count = c->rep_prefix ?
+ address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
+ in_page = (ctxt->eflags & EFLG_DF) ?
+ offset_in_page(c->regs[VCPU_REGS_RDI]) :
+ PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
+ n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
+ count);
+ if (n == 0)
+ n = 1;
+ rc->pos = rc->end = 0;
+ if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
+ return 0;
+ rc->end = n * size;
+ }
+
+ memcpy(dest, rc->data + rc->pos, size);
+ rc->pos += size;
+ return 1;
+}
+
+static u32 desc_limit_scaled(struct desc_struct *desc)
+{
+ u32 limit = get_desc_limit(desc);
+
+ return desc->g ? (limit << 12) | 0xfff : limit;
+}
+
+static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_ptr *dt)
+{
+ if (selector & 1 << 2) {
+ struct desc_struct desc;
+ memset (dt, 0, sizeof *dt);
+ if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
+ return;
+
+ dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
+ dt->address = get_desc_base(&desc);
+ } else
+ ops->get_gdt(dt, ctxt->vcpu);
+}
+
+/* allowed just for 8 bytes segments */
+static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_struct *desc)
+{
+ struct desc_ptr dt;
+ u16 index = selector >> 3;
+ int ret;
+ u32 err;
+ ulong addr;
+
+ get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+
+ if (dt.size < index * 8 + 7) {
+ kvm_inject_gp(ctxt->vcpu, selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ addr = dt.address + index * 8;
+ ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(ctxt->vcpu, addr, err);
+
+ return ret;
+}
+
+/* allowed just for 8 bytes segments */
+static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_struct *desc)
+{
+ struct desc_ptr dt;
+ u16 index = selector >> 3;
+ u32 err;
+ ulong addr;
+ int ret;
+
+ get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+
+ if (dt.size < index * 8 + 7) {
+ kvm_inject_gp(ctxt->vcpu, selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ addr = dt.address + index * 8;
+ ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(ctxt->vcpu, addr, err);
+
+ return ret;
+}
+
+static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, int seg)
+{
+ struct desc_struct seg_desc;
+ u8 dpl, rpl, cpl;
+ unsigned err_vec = GP_VECTOR;
+ u32 err_code = 0;
+ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+ int ret;
+
+ memset(&seg_desc, 0, sizeof seg_desc);
+
+ if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
+ || ctxt->mode == X86EMUL_MODE_REAL) {
+ /* set real mode segment descriptor */
+ set_desc_base(&seg_desc, selector << 4);
+ set_desc_limit(&seg_desc, 0xffff);
+ seg_desc.type = 3;
+ seg_desc.p = 1;
+ seg_desc.s = 1;
+ goto load;
+ }
+
+ /* NULL selector is not valid for TR, CS and SS */
+ if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
+ && null_selector)
+ goto exception;
+
+ /* TR should be in GDT only */
+ if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
+ goto exception;
+
+ if (null_selector) /* for NULL selector skip all following checks */
+ goto load;
+
+ ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ err_code = selector & 0xfffc;
+ err_vec = GP_VECTOR;
+
+ /* can't load system descriptor into segment selecor */
+ if (seg <= VCPU_SREG_GS && !seg_desc.s)
+ goto exception;
+
+ if (!seg_desc.p) {
+ err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
+ goto exception;
+ }
+
+ rpl = selector & 3;
+ dpl = seg_desc.dpl;
+ cpl = ops->cpl(ctxt->vcpu);
+
+ switch (seg) {
+ case VCPU_SREG_SS:
+ /*
+ * segment is not a writable data segment or segment
+ * selector's RPL != CPL or segment selector's RPL != CPL
+ */
+ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
+ goto exception;
+ break;
+ case VCPU_SREG_CS:
+ if (!(seg_desc.type & 8))
+ goto exception;
+
+ if (seg_desc.type & 4) {
+ /* conforming */
+ if (dpl > cpl)
+ goto exception;
+ } else {
+ /* nonconforming */
+ if (rpl > cpl || dpl != cpl)
+ goto exception;
+ }
+ /* CS(RPL) <- CPL */
+ selector = (selector & 0xfffc) | cpl;
+ break;
+ case VCPU_SREG_TR:
+ if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
+ goto exception;
+ break;
+ case VCPU_SREG_LDTR:
+ if (seg_desc.s || seg_desc.type != 2)
+ goto exception;
+ break;
+ default: /* DS, ES, FS, or GS */
+ /*
+ * segment is not a data or readable code segment or
+ * ((segment is a data or nonconforming code segment)
+ * and (both RPL and CPL > DPL))
+ */
+ if ((seg_desc.type & 0xa) == 0x8 ||
+ (((seg_desc.type & 0xc) != 0xc) &&
+ (rpl > dpl && cpl > dpl)))
+ goto exception;
+ break;
+ }
+
+ if (seg_desc.s) {
+ /* mark segment as accessed */
+ seg_desc.type |= 1;
+ ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
+load:
+ ops->set_segment_selector(selector, seg, ctxt->vcpu);
+ ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
+ return X86EMUL_CONTINUE;
+exception:
+ kvm_queue_exception_e(ctxt->vcpu, err_vec, err_code);
+ return X86EMUL_PROPAGATE_FAULT;
+}
+
static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
@@ -1251,7 +1521,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
+ int cpl = ops->cpl(ctxt->vcpu);
rc = emulate_pop(ctxt, ops, &val, len);
if (rc != X86EMUL_CONTINUE)
@@ -1306,10 +1576,10 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
int rc;
rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
+ rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
return rc;
}
@@ -1332,7 +1602,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
@@ -1343,7 +1613,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
}
rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
break;
--reg;
}
@@ -1354,12 +1624,8 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc;
- rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
- if (rc != 0)
- return rc;
- return 0;
+ return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
}
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
@@ -1395,7 +1661,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
switch (c->modrm_reg) {
case 0 ... 1: /* test */
@@ -1408,11 +1673,9 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
emulate_1op("neg", c->dst, ctxt->eflags);
break;
default:
- DPRINTF("Cannot emulate %02x\n", c->b);
- rc = X86EMUL_UNHANDLEABLE;
- break;
+ return 0;
}
- return rc;
+ return 1;
}
static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
@@ -1442,20 +1705,14 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
emulate_push(ctxt);
break;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
- unsigned long memop)
+ struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- u64 old, new;
- int rc;
-
- rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- return rc;
+ u64 old = c->dst.orig_val;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
@@ -1463,17 +1720,13 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
-
} else {
- new = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+ c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
(u32) c->regs[VCPU_REGS_RBX];
- rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- return rc;
ctxt->eflags |= EFLG_ZF;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
@@ -1484,14 +1737,14 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
unsigned long cs;
rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
if (c->op_bytes == 4)
c->eip = (u32)c->eip;
rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
return rc;
}
@@ -1544,7 +1797,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
default:
break;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
@@ -1598,8 +1851,11 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
u64 msr_data;
/* syscall is not available in real mode */
- if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86)
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->mode == X86EMUL_MODE_REAL ||
+ ctxt->mode == X86EMUL_MODE_VM86) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1649,14 +1905,16 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL) {
kvm_inject_gp(ctxt->vcpu, 0);
- return X86EMUL_UNHANDLEABLE;
+ return X86EMUL_PROPAGATE_FAULT;
}
/* XXX sysenter/sysexit have not been tested in 64bit mode.
* Therefore, we inject an #UD.
*/
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->mode == X86EMUL_MODE_PROT64) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1711,7 +1969,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86) {
kvm_inject_gp(ctxt->vcpu, 0);
- return X86EMUL_UNHANDLEABLE;
+ return X86EMUL_PROPAGATE_FAULT;
}
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1756,7 +2014,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
+static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
@@ -1764,7 +2023,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
+ return ops->cpl(ctxt->vcpu) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
@@ -1801,22 +2060,419 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 port, u16 len)
{
- if (emulator_bad_iopl(ctxt))
+ if (emulator_bad_iopl(ctxt, ops))
if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
return false;
return true;
}
+static u32 get_cached_descriptor_base(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ int seg)
+{
+ struct desc_struct desc;
+ if (ops->get_cached_descriptor(&desc, seg, ctxt->vcpu))
+ return get_desc_base(&desc);
+ else
+ return ~0;
+}
+
+static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_16 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ tss->ip = c->eip;
+ tss->flag = ctxt->eflags;
+ tss->ax = c->regs[VCPU_REGS_RAX];
+ tss->cx = c->regs[VCPU_REGS_RCX];
+ tss->dx = c->regs[VCPU_REGS_RDX];
+ tss->bx = c->regs[VCPU_REGS_RBX];
+ tss->sp = c->regs[VCPU_REGS_RSP];
+ tss->bp = c->regs[VCPU_REGS_RBP];
+ tss->si = c->regs[VCPU_REGS_RSI];
+ tss->di = c->regs[VCPU_REGS_RDI];
+
+ tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
+ tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
+ tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
+ tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
+ tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
+}
+
+static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_16 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int ret;
+
+ c->eip = tss->ip;
+ ctxt->eflags = tss->flag | 2;
+ c->regs[VCPU_REGS_RAX] = tss->ax;
+ c->regs[VCPU_REGS_RCX] = tss->cx;
+ c->regs[VCPU_REGS_RDX] = tss->dx;
+ c->regs[VCPU_REGS_RBX] = tss->bx;
+ c->regs[VCPU_REGS_RSP] = tss->sp;
+ c->regs[VCPU_REGS_RBP] = tss->bp;
+ c->regs[VCPU_REGS_RSI] = tss->si;
+ c->regs[VCPU_REGS_RDI] = tss->di;
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
+ ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
+ ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int task_switch_16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+{
+ struct tss_segment_16 tss_seg;
+ int ret;
+ u32 err, new_tss_base = get_desc_base(new_desc);
+
+ ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ save_state_to_tss16(ctxt, ops, &tss_seg);
+
+ ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+ ret = ops->write_std(new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link,
+ ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+ }
+
+ return load_state_from_tss16(ctxt, ops, &tss_seg);
+}
+
+static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_32 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ tss->cr3 = ops->get_cr(3, ctxt->vcpu);
+ tss->eip = c->eip;
+ tss->eflags = ctxt->eflags;
+ tss->eax = c->regs[VCPU_REGS_RAX];
+ tss->ecx = c->regs[VCPU_REGS_RCX];
+ tss->edx = c->regs[VCPU_REGS_RDX];
+ tss->ebx = c->regs[VCPU_REGS_RBX];
+ tss->esp = c->regs[VCPU_REGS_RSP];
+ tss->ebp = c->regs[VCPU_REGS_RBP];
+ tss->esi = c->regs[VCPU_REGS_RSI];
+ tss->edi = c->regs[VCPU_REGS_RDI];
+
+ tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
+ tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
+ tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
+ tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
+ tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
+ tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
+ tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
+}
+
+static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_32 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int ret;
+
+ ops->set_cr(3, tss->cr3, ctxt->vcpu);
+ c->eip = tss->eip;
+ ctxt->eflags = tss->eflags | 2;
+ c->regs[VCPU_REGS_RAX] = tss->eax;
+ c->regs[VCPU_REGS_RCX] = tss->ecx;
+ c->regs[VCPU_REGS_RDX] = tss->edx;
+ c->regs[VCPU_REGS_RBX] = tss->ebx;
+ c->regs[VCPU_REGS_RSP] = tss->esp;
+ c->regs[VCPU_REGS_RBP] = tss->ebp;
+ c->regs[VCPU_REGS_RSI] = tss->esi;
+ c->regs[VCPU_REGS_RDI] = tss->edi;
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
+ ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
+ ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
+ ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
+ ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int task_switch_32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+{
+ struct tss_segment_32 tss_seg;
+ int ret;
+ u32 err, new_tss_base = get_desc_base(new_desc);
+
+ ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ save_state_to_tss32(ctxt, ops, &tss_seg);
+
+ ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+ ret = ops->write_std(new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link,
+ ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+ }
+
+ return load_state_from_tss32(ctxt, ops, &tss_seg);
+}
+
+static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
+{
+ struct desc_struct curr_tss_desc, next_tss_desc;
+ int ret;
+ u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
+ ulong old_tss_base =
+ get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR);
+ u32 desc_limit;
+
+ /* FIXME: old_tss_base == ~0 ? */
+
+ ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ /* FIXME: check that next_tss_desc is tss */
+
+ if (reason != TASK_SWITCH_IRET) {
+ if ((tss_selector & 3) > next_tss_desc.dpl ||
+ ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ }
+
+ desc_limit = desc_limit_scaled(&next_tss_desc);
+ if (!next_tss_desc.p ||
+ ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
+ desc_limit < 0x2b)) {
+ kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR,
+ tss_selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
+ curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
+ write_segment_descriptor(ctxt, ops, old_tss_sel,
+ &curr_tss_desc);
+ }
+
+ if (reason == TASK_SWITCH_IRET)
+ ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
+
+ /* set back link to prev task only if NT bit is set in eflags
+ note that old_tss_sel is not used afetr this point */
+ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
+ old_tss_sel = 0xffff;
+
+ if (next_tss_desc.type & 8)
+ ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
+ old_tss_base, &next_tss_desc);
+ else
+ ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
+ old_tss_base, &next_tss_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
+ ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
+
+ if (reason != TASK_SWITCH_IRET) {
+ next_tss_desc.type |= (1 << 1); /* set busy flag */
+ write_segment_descriptor(ctxt, ops, tss_selector,
+ &next_tss_desc);
+ }
+
+ ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
+ ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
+ ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
+
+ if (has_error_code) {
+ struct decode_cache *c = &ctxt->decode;
+
+ c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
+ c->lock_prefix = 0;
+ c->src.val = (unsigned long) error_code;
+ emulate_push(ctxt);
+ }
+
+ return ret;
+}
+
+int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int rc;
+
+ memset(c, 0, sizeof(struct decode_cache));
+ c->eip = ctxt->eip;
+ memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
+ c->dst.type = OP_NONE;
+
+ rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
+ has_error_code, error_code);
+
+ if (rc == X86EMUL_CONTINUE) {
+ memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
+ kvm_rip_write(ctxt->vcpu, c->eip);
+ rc = writeback(ctxt, ops);
+ }
+
+ return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
+}
+
+static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
+ int reg, struct operand *op)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
+
+ register_address_increment(c, &c->regs[reg], df * op->bytes);
+ op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]);
+}
+
int
x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{
- unsigned long memop = 0;
u64 msr_data;
- unsigned long saved_eip = 0;
struct decode_cache *c = &ctxt->decode;
- unsigned int port;
- int io_dir_in;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
+ int saved_dst_type = c->dst.type;
ctxt->interruptibility = 0;
@@ -1826,26 +2482,30 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
*/
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
- saved_eip = c->eip;
+
+ if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
/* LOCK prefix is allowed only with some instructions */
- if (c->lock_prefix && !(c->d & Lock)) {
+ if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
- if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) {
+ if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
- memop = c->modrm_ea;
-
if (c->rep_prefix && (c->d & String)) {
+ ctxt->restart = true;
/* All REP prefixes have the same first termination condition */
- if (c->regs[VCPU_REGS_RCX] == 0) {
+ if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
+ string_done:
+ ctxt->restart = false;
kvm_rip_write(ctxt->vcpu, c->eip);
goto done;
}
@@ -1857,25 +2517,18 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
* - if REPNE/REPNZ and ZF = 1 then done
*/
if ((c->b == 0xa6) || (c->b == 0xa7) ||
- (c->b == 0xae) || (c->b == 0xaf)) {
+ (c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == 0)) {
- kvm_rip_write(ctxt->vcpu, c->eip);
- goto done;
- }
+ ((ctxt->eflags & EFLG_ZF) == 0))
+ goto string_done;
if ((c->rep_prefix == REPNE_PREFIX) &&
- ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
- kvm_rip_write(ctxt->vcpu, c->eip);
- goto done;
- }
+ ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))
+ goto string_done;
}
- c->regs[VCPU_REGS_RCX]--;
- c->eip = kvm_rip_read(ctxt->vcpu);
+ c->eip = ctxt->eip;
}
if (c->src.type == OP_MEM) {
- c->src.ptr = (unsigned long *)memop;
- c->src.val = 0;
rc = ops->read_emulated((unsigned long)c->src.ptr,
&c->src.val,
c->src.bytes,
@@ -1885,29 +2538,25 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->src.orig_val = c->src.val;
}
+ if (c->src2.type == OP_MEM) {
+ rc = ops->read_emulated((unsigned long)c->src2.ptr,
+ &c->src2.val,
+ c->src2.bytes,
+ ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ }
+
if ((c->d & DstMask) == ImplicitOps)
goto special_insn;
- if (c->dst.type == OP_MEM) {
- c->dst.ptr = (unsigned long *)memop;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.val = 0;
- if (c->d & BitOp) {
- unsigned long mask = ~(c->dst.bytes * 8 - 1);
-
- c->dst.ptr = (void *)c->dst.ptr +
- (c->src.val & mask) / 8;
- }
- if (!(c->d & Mov)) {
- /* optimisation - avoid slow emulated read */
- rc = ops->read_emulated((unsigned long)c->dst.ptr,
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- }
+ if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
+ /* optimisation - avoid slow emulated read if Mov */
+ rc = ops->read_emulated((unsigned long)c->dst.ptr, &c->dst.val,
+ c->dst.bytes, ctxt->vcpu);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
}
c->dst.orig_val = c->dst.val;
@@ -1926,7 +2575,7 @@ special_insn:
break;
case 0x07: /* pop es */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x08 ... 0x0d:
@@ -1945,7 +2594,7 @@ special_insn:
break;
case 0x17: /* pop ss */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x18 ... 0x1d:
@@ -1957,7 +2606,7 @@ special_insn:
break;
case 0x1f: /* pop ds */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x20 ... 0x25:
@@ -1988,7 +2637,7 @@ special_insn:
case 0x58 ... 0x5f: /* pop reg */
pop_instruction:
rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x60: /* pusha */
@@ -1996,7 +2645,7 @@ special_insn:
break;
case 0x61: /* popa */
rc = emulate_popa(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x63: /* movsxd */
@@ -2010,47 +2659,29 @@ special_insn:
break;
case 0x6c: /* insb */
case 0x6d: /* insw/insd */
+ c->dst.bytes = min(c->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
- (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ c->dst.bytes)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (kvm_emulate_pio_string(ctxt->vcpu,
- 1,
- (c->d & ByteOp) ? 1 : c->op_bytes,
- c->rep_prefix ?
- address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
- (ctxt->eflags & EFLG_DF),
- register_address(c, es_base(ctxt),
- c->regs[VCPU_REGS_RDI]),
- c->rep_prefix,
- c->regs[VCPU_REGS_RDX]) == 0) {
- c->eip = saved_eip;
- return -1;
- }
- return 0;
+ if (!pio_in_emulated(ctxt, ops, c->dst.bytes,
+ c->regs[VCPU_REGS_RDX], &c->dst.val))
+ goto done; /* IO is needed, skip writeback */
+ break;
case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */
+ c->src.bytes = min(c->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
- (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ c->src.bytes)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (kvm_emulate_pio_string(ctxt->vcpu,
- 0,
- (c->d & ByteOp) ? 1 : c->op_bytes,
- c->rep_prefix ?
- address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
- (ctxt->eflags & EFLG_DF),
- register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
- c->rep_prefix,
- c->regs[VCPU_REGS_RDX]) == 0) {
- c->eip = saved_eip;
- return -1;
- }
- return 0;
+ ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX],
+ &c->src.val, 1, ctxt->vcpu);
+
+ c->dst.type = OP_NONE; /* nothing to writeback */
+ break;
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(c->b, ctxt->eflags))
jmp_rel(c, c->src.val);
@@ -2107,12 +2738,11 @@ special_insn:
case 0x8c: { /* mov r/m, sreg */
struct kvm_segment segreg;
- if (c->modrm_reg <= 5)
+ if (c->modrm_reg <= VCPU_SREG_GS)
kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg);
else {
- printk(KERN_INFO "0x8c: Invalid segreg in modrm byte 0x%02x\n",
- c->modrm);
- goto cannot_emulate;
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
}
c->dst.val = segreg.selector;
break;
@@ -2132,16 +2762,16 @@ special_insn:
}
if (c->modrm_reg == VCPU_SREG_SS)
- toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
+ toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS);
- rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
+ rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
}
case 0x8f: /* pop (sole member of Grp1a) */
rc = emulate_grp1a(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0x90: /* nop / xchg r8,rax */
@@ -2175,89 +2805,16 @@ special_insn:
c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX];
break;
case 0xa4 ... 0xa5: /* movs */
- c->dst.type = OP_MEM;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(c,
- es_base(ctxt),
- c->regs[VCPU_REGS_RDI]);
- rc = ops->read_emulated(register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
- &c->dst.val,
- c->dst.bytes, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- register_address_increment(c, &c->regs[VCPU_REGS_RSI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
- register_address_increment(c, &c->regs[VCPU_REGS_RDI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
- break;
+ goto mov;
case 0xa6 ... 0xa7: /* cmps */
- c->src.type = OP_NONE; /* Disable writeback. */
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.ptr = (unsigned long *)register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]);
- rc = ops->read_emulated((unsigned long)c->src.ptr,
- &c->src.val,
- c->src.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
-
c->dst.type = OP_NONE; /* Disable writeback. */
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(c,
- es_base(ctxt),
- c->regs[VCPU_REGS_RDI]);
- rc = ops->read_emulated((unsigned long)c->dst.ptr,
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
-
DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
-
- emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
-
- register_address_increment(c, &c->regs[VCPU_REGS_RSI],
- (ctxt->eflags & EFLG_DF) ? -c->src.bytes
- : c->src.bytes);
- register_address_increment(c, &c->regs[VCPU_REGS_RDI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
-
- break;
+ goto cmp;
case 0xaa ... 0xab: /* stos */
- c->dst.type = OP_MEM;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)register_address(c,
- es_base(ctxt),
- c->regs[VCPU_REGS_RDI]);
c->dst.val = c->regs[VCPU_REGS_RAX];
- register_address_increment(c, &c->regs[VCPU_REGS_RDI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
break;
case 0xac ... 0xad: /* lods */
- c->dst.type = OP_REG;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
- rc = ops->read_emulated(register_address(c,
- seg_override_base(ctxt, c),
- c->regs[VCPU_REGS_RSI]),
- &c->dst.val,
- c->dst.bytes,
- ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- goto done;
- register_address_increment(c, &c->regs[VCPU_REGS_RSI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
- break;
+ goto mov;
case 0xae ... 0xaf: /* scas */
DPRINTF("Urk! I don't handle SCAS.\n");
goto cannot_emulate;
@@ -2277,7 +2834,7 @@ special_insn:
break;
case 0xcb: /* ret far */
rc = emulate_ret_far(ctxt, ops);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0xd0 ... 0xd1: /* Grp2 */
@@ -2290,14 +2847,10 @@ special_insn:
break;
case 0xe4: /* inb */
case 0xe5: /* in */
- port = c->src.val;
- io_dir_in = 1;
- goto do_io;
+ goto do_io_in;
case 0xe6: /* outb */
case 0xe7: /* out */
- port = c->src.val;
- io_dir_in = 0;
- goto do_io;
+ goto do_io_out;
case 0xe8: /* call (near) */ {
long int rel = c->src.val;
c->src.val = (unsigned long) c->eip;
@@ -2308,8 +2861,9 @@ special_insn:
case 0xe9: /* jmp rel */
goto jmp;
case 0xea: /* jmp far */
- if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
- VCPU_SREG_CS))
+ jump_far:
+ if (load_segment_descriptor(ctxt, ops, c->src2.val,
+ VCPU_SREG_CS))
goto done;
c->eip = c->src.val;
@@ -2321,25 +2875,29 @@ special_insn:
break;
case 0xec: /* in al,dx */
case 0xed: /* in (e/r)ax,dx */
- port = c->regs[VCPU_REGS_RDX];
- io_dir_in = 1;
- goto do_io;
+ c->src.val = c->regs[VCPU_REGS_RDX];
+ do_io_in:
+ c->dst.bytes = min(c->dst.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
+ kvm_inject_gp(ctxt->vcpu, 0);
+ goto done;
+ }
+ if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
+ &c->dst.val))
+ goto done; /* IO is needed */
+ break;
case 0xee: /* out al,dx */
case 0xef: /* out (e/r)ax,dx */
- port = c->regs[VCPU_REGS_RDX];
- io_dir_in = 0;
- do_io:
- if (!emulator_io_permited(ctxt, ops, port,
- (c->d & ByteOp) ? 1 : c->op_bytes)) {
+ c->src.val = c->regs[VCPU_REGS_RDX];
+ do_io_out:
+ c->dst.bytes = min(c->dst.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
kvm_inject_gp(ctxt->vcpu, 0);
goto done;
}
- if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
- (c->d & ByteOp) ? 1 : c->op_bytes,
- port) != 0) {
- c->eip = saved_eip;
- goto cannot_emulate;
- }
+ ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1,
+ ctxt->vcpu);
+ c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->vcpu->arch.halt_request = 1;
@@ -2350,16 +2908,15 @@ special_insn:
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf6 ... 0xf7: /* Grp3 */
- rc = emulate_grp3(ctxt, ops);
- if (rc != 0)
- goto done;
+ if (!emulate_grp3(ctxt, ops))
+ goto cannot_emulate;
break;
case 0xf8: /* clc */
ctxt->eflags &= ~EFLG_CF;
c->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xfa: /* cli */
- if (emulator_bad_iopl(ctxt))
+ if (emulator_bad_iopl(ctxt, ops))
kvm_inject_gp(ctxt->vcpu, 0);
else {
ctxt->eflags &= ~X86_EFLAGS_IF;
@@ -2367,10 +2924,10 @@ special_insn:
}
break;
case 0xfb: /* sti */
- if (emulator_bad_iopl(ctxt))
+ if (emulator_bad_iopl(ctxt, ops))
kvm_inject_gp(ctxt->vcpu, 0);
else {
- toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
+ toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI);
ctxt->eflags |= X86_EFLAGS_IF;
c->dst.type = OP_NONE; /* Disable writeback. */
}
@@ -2383,28 +2940,55 @@ special_insn:
ctxt->eflags |= EFLG_DF;
c->dst.type = OP_NONE; /* Disable writeback. */
break;
- case 0xfe ... 0xff: /* Grp4/Grp5 */
+ case 0xfe: /* Grp4 */
+ grp45:
rc = emulate_grp45(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
+ case 0xff: /* Grp5 */
+ if (c->modrm_reg == 5)
+ goto jump_far;
+ goto grp45;
}
writeback:
rc = writeback(ctxt, ops);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
+ /*
+ * restore dst type in case the decoding will be reused
+ * (happens for string instruction )
+ */
+ c->dst.type = saved_dst_type;
+
+ if ((c->d & SrcMask) == SrcSI)
+ string_addr_inc(ctxt, seg_override_base(ctxt, c), VCPU_REGS_RSI,
+ &c->src);
+
+ if ((c->d & DstMask) == DstDI)
+ string_addr_inc(ctxt, es_base(ctxt), VCPU_REGS_RDI, &c->dst);
+
+ if (c->rep_prefix && (c->d & String)) {
+ struct read_cache *rc = &ctxt->decode.io_read;
+ register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
+ /*
+ * Re-enter guest when pio read ahead buffer is empty or,
+ * if it is not used, after each 1024 iteration.
+ */
+ if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
+ (rc->end != 0 && rc->end == rc->pos))
+ ctxt->restart = false;
+ }
+
/* Commit shadow register state. */
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
kvm_rip_write(ctxt->vcpu, c->eip);
+ ops->set_rflags(ctxt->vcpu, ctxt->eflags);
done:
- if (rc == X86EMUL_UNHANDLEABLE) {
- c->eip = saved_eip;
- return -1;
- }
- return 0;
+ return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
twobyte_insn:
switch (c->b) {
@@ -2418,18 +3002,18 @@ twobyte_insn:
goto cannot_emulate;
rc = kvm_fix_hypercall(ctxt->vcpu);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
/* Let the processor re-execute the fixed hypercall */
- c->eip = kvm_rip_read(ctxt->vcpu);
+ c->eip = ctxt->eip;
/* Disable writeback. */
c->dst.type = OP_NONE;
break;
case 2: /* lgdt */
rc = read_descriptor(ctxt, ops, c->src.ptr,
&size, &address, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
realmode_lgdt(ctxt->vcpu, size, address);
/* Disable writeback. */
@@ -2440,7 +3024,7 @@ twobyte_insn:
switch (c->modrm_rm) {
case 1:
rc = kvm_fix_hypercall(ctxt->vcpu);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
default:
@@ -2450,7 +3034,7 @@ twobyte_insn:
rc = read_descriptor(ctxt, ops, c->src.ptr,
&size, &address,
c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
realmode_lidt(ctxt->vcpu, size, address);
}
@@ -2459,15 +3043,18 @@ twobyte_insn:
break;
case 4: /* smsw */
c->dst.bytes = 2;
- c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
+ c->dst.val = ops->get_cr(0, ctxt->vcpu);
break;
case 6: /* lmsw */
- realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
- &ctxt->eflags);
+ ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) |
+ (c->src.val & 0x0f), ctxt->vcpu);
c->dst.type = OP_NONE;
break;
+ case 5: /* not defined */
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
case 7: /* invlpg*/
- emulate_invlpg(ctxt->vcpu, memop);
+ emulate_invlpg(ctxt->vcpu, c->modrm_ea);
/* Disable writeback. */
c->dst.type = OP_NONE;
break;
@@ -2493,54 +3080,54 @@ twobyte_insn:
c->dst.type = OP_NONE;
break;
case 0x20: /* mov cr, reg */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- c->regs[c->modrm_rm] =
- realmode_get_cr(ctxt->vcpu, c->modrm_reg);
+ switch (c->modrm_reg) {
+ case 1:
+ case 5 ... 7:
+ case 9 ... 15:
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
+ c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu);
c->dst.type = OP_NONE; /* no writeback */
break;
case 0x21: /* mov from dr to reg */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- rc = emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
- if (rc)
- goto cannot_emulate;
+ if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
+ (c->modrm_reg == 4 || c->modrm_reg == 5)) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
+ emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
c->dst.type = OP_NONE; /* no writeback */
break;
case 0x22: /* mov reg, cr */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- realmode_set_cr(ctxt->vcpu,
- c->modrm_reg, c->modrm_val, &ctxt->eflags);
+ ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu);
c->dst.type = OP_NONE;
break;
case 0x23: /* mov from reg to dr */
- if (c->modrm_mod != 3)
- goto cannot_emulate;
- rc = emulator_set_dr(ctxt, c->modrm_reg,
- c->regs[c->modrm_rm]);
- if (rc)
- goto cannot_emulate;
+ if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
+ (c->modrm_reg == 4 || c->modrm_reg == 5)) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ goto done;
+ }
+ emulator_set_dr(ctxt, c->modrm_reg, c->regs[c->modrm_rm]);
c->dst.type = OP_NONE; /* no writeback */
break;
case 0x30:
/* wrmsr */
msr_data = (u32)c->regs[VCPU_REGS_RAX]
| ((u64)c->regs[VCPU_REGS_RDX] << 32);
- rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
- if (rc) {
+ if (kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = kvm_rip_read(ctxt->vcpu);
+ goto done;
}
rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE;
break;
case 0x32:
/* rdmsr */
- rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
- if (rc) {
+ if (kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
kvm_inject_gp(ctxt->vcpu, 0);
- c->eip = kvm_rip_read(ctxt->vcpu);
+ goto done;
} else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32;
@@ -2577,7 +3164,7 @@ twobyte_insn:
break;
case 0xa1: /* pop fs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0xa3:
@@ -2596,7 +3183,7 @@ twobyte_insn:
break;
case 0xa9: /* pop gs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
goto done;
break;
case 0xab:
@@ -2668,16 +3255,14 @@ twobyte_insn:
(u64) c->src.val;
break;
case 0xc7: /* Grp9 (cmpxchg8b) */
- rc = emulate_grp9(ctxt, ops, memop);
- if (rc != 0)
+ rc = emulate_grp9(ctxt, ops);
+ if (rc != X86EMUL_CONTINUE)
goto done;
- c->dst.type = OP_NONE;
break;
}
goto writeback;
cannot_emulate:
DPRINTF("Cannot emulate %02x\n", c->b);
- c->eip = saved_eip;
return -1;
}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index a790fa128a9f..93825ff3338f 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -33,6 +33,29 @@
#include <linux/kvm_host.h>
#include "trace.h"
+static void pic_lock(struct kvm_pic *s)
+ __acquires(&s->lock)
+{
+ raw_spin_lock(&s->lock);
+}
+
+static void pic_unlock(struct kvm_pic *s)
+ __releases(&s->lock)
+{
+ bool wakeup = s->wakeup_needed;
+ struct kvm_vcpu *vcpu;
+
+ s->wakeup_needed = false;
+
+ raw_spin_unlock(&s->lock);
+
+ if (wakeup) {
+ vcpu = s->kvm->bsp_vcpu;
+ if (vcpu)
+ kvm_vcpu_kick(vcpu);
+ }
+}
+
static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
{
s->isr &= ~(1 << irq);
@@ -45,19 +68,19 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
* Other interrupt may be delivered to PIC while lock is dropped but
* it should be safe since PIC state is already updated at this stage.
*/
- raw_spin_unlock(&s->pics_state->lock);
+ pic_unlock(s->pics_state);
kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
- raw_spin_lock(&s->pics_state->lock);
+ pic_lock(s->pics_state);
}
void kvm_pic_clear_isr_ack(struct kvm *kvm)
{
struct kvm_pic *s = pic_irqchip(kvm);
- raw_spin_lock(&s->lock);
+ pic_lock(s);
s->pics[0].isr_ack = 0xff;
s->pics[1].isr_ack = 0xff;
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
}
/*
@@ -158,9 +181,9 @@ static void pic_update_irq(struct kvm_pic *s)
void kvm_pic_update_irq(struct kvm_pic *s)
{
- raw_spin_lock(&s->lock);
+ pic_lock(s);
pic_update_irq(s);
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
}
int kvm_pic_set_irq(void *opaque, int irq, int level)
@@ -168,14 +191,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
struct kvm_pic *s = opaque;
int ret = -1;
- raw_spin_lock(&s->lock);
+ pic_lock(s);
if (irq >= 0 && irq < PIC_NUM_PINS) {
ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
pic_update_irq(s);
trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
s->pics[irq >> 3].imr, ret == 0);
}
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return ret;
}
@@ -205,7 +228,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
int irq, irq2, intno;
struct kvm_pic *s = pic_irqchip(kvm);
- raw_spin_lock(&s->lock);
+ pic_lock(s);
irq = pic_get_irq(&s->pics[0]);
if (irq >= 0) {
pic_intack(&s->pics[0], irq);
@@ -230,7 +253,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
intno = s->pics[0].irq_base + irq;
}
pic_update_irq(s);
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return intno;
}
@@ -444,7 +467,7 @@ static int picdev_write(struct kvm_io_device *this,
printk(KERN_ERR "PIC: non byte write\n");
return 0;
}
- raw_spin_lock(&s->lock);
+ pic_lock(s);
switch (addr) {
case 0x20:
case 0x21:
@@ -457,7 +480,7 @@ static int picdev_write(struct kvm_io_device *this,
elcr_ioport_write(&s->pics[addr & 1], addr, data);
break;
}
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return 0;
}
@@ -474,7 +497,7 @@ static int picdev_read(struct kvm_io_device *this,
printk(KERN_ERR "PIC: non byte read\n");
return 0;
}
- raw_spin_lock(&s->lock);
+ pic_lock(s);
switch (addr) {
case 0x20:
case 0x21:
@@ -488,7 +511,7 @@ static int picdev_read(struct kvm_io_device *this,
break;
}
*(unsigned char *)val = data;
- raw_spin_unlock(&s->lock);
+ pic_unlock(s);
return 0;
}
@@ -505,7 +528,7 @@ static void pic_irq_request(void *opaque, int level)
s->output = level;
if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
s->pics[0].isr_ack &= ~(1 << irq);
- kvm_vcpu_kick(vcpu);
+ s->wakeup_needed = true;
}
}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 34b15915754d..cd1f362f413d 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -63,6 +63,7 @@ struct kvm_kpic_state {
struct kvm_pic {
raw_spinlock_t lock;
+ bool wakeup_needed;
unsigned pending_acks;
struct kvm *kvm;
struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h
index 55c7524dda54..64bc6ea78d90 100644
--- a/arch/x86/kvm/kvm_timer.h
+++ b/arch/x86/kvm/kvm_timer.h
@@ -10,9 +10,7 @@ struct kvm_timer {
};
struct kvm_timer_ops {
- bool (*is_periodic)(struct kvm_timer *);
+ bool (*is_periodic)(struct kvm_timer *);
};
-
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);
-
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 19a8906bcaa2..81563e76e28f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -148,7 +148,6 @@ module_param(oos_shadow, bool, 0644);
#include <trace/events/kvm.h>
-#undef TRACE_INCLUDE_FILE
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
@@ -174,12 +173,7 @@ struct kvm_shadow_walk_iterator {
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
-
-struct kvm_unsync_walk {
- int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
-};
-
-typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp);
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
@@ -223,7 +217,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
-static int is_write_protection(struct kvm_vcpu *vcpu)
+static bool is_write_protection(struct kvm_vcpu *vcpu)
{
return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
}
@@ -327,7 +321,6 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
- set_page_private(page, 0);
cache->objects[cache->nobjs++] = page_address(page);
}
return 0;
@@ -438,9 +431,9 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
int i;
gfn = unalias_gfn(kvm, gfn);
+ slot = gfn_to_memslot_unaliased(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
- slot = gfn_to_memslot_unaliased(kvm, gfn);
write_count = slot_largepage_idx(gfn, slot, i);
*write_count -= 1;
WARN_ON(*write_count < 0);
@@ -654,7 +647,6 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
{
struct kvm_rmap_desc *desc;
- struct kvm_rmap_desc *prev_desc;
u64 *prev_spte;
int i;
@@ -666,7 +658,6 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
return NULL;
}
desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
- prev_desc = NULL;
prev_spte = NULL;
while (desc) {
for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
@@ -794,7 +785,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
int retval = 0;
struct kvm_memslots *slots;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -925,7 +916,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- INIT_LIST_HEAD(&sp->oos_link);
bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
sp->multimapped = 0;
sp->parent_pte = parent_pte;
@@ -1009,8 +999,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
}
-static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- mmu_parent_walk_fn fn)
+static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
{
struct kvm_pte_chain *pte_chain;
struct hlist_node *node;
@@ -1019,8 +1008,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (!sp->multimapped && sp->parent_pte) {
parent_sp = page_header(__pa(sp->parent_pte));
- fn(vcpu, parent_sp);
- mmu_parent_walk(vcpu, parent_sp, fn);
+ fn(parent_sp);
+ mmu_parent_walk(parent_sp, fn);
return;
}
hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
@@ -1028,8 +1017,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (!pte_chain->parent_ptes[i])
break;
parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
- fn(vcpu, parent_sp);
- mmu_parent_walk(vcpu, parent_sp, fn);
+ fn(parent_sp);
+ mmu_parent_walk(parent_sp, fn);
}
}
@@ -1066,16 +1055,15 @@ static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
}
}
-static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int unsync_walk_fn(struct kvm_mmu_page *sp)
{
kvm_mmu_update_parents_unsync(sp);
return 1;
}
-static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp)
+static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
- mmu_parent_walk(vcpu, sp, unsync_walk_fn);
+ mmu_parent_walk(sp, unsync_walk_fn);
kvm_mmu_update_parents_unsync(sp);
}
@@ -1201,6 +1189,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
WARN_ON(!sp->unsync);
+ trace_kvm_mmu_sync_page(sp);
sp->unsync = 0;
--kvm->stat.mmu_unsync;
}
@@ -1209,12 +1198,11 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
- if (sp->role.glevels != vcpu->arch.mmu.root_level) {
+ if (sp->role.cr4_pae != !!is_pae(vcpu)) {
kvm_mmu_zap_page(vcpu->kvm, sp);
return 1;
}
- trace_kvm_mmu_sync_page(sp);
if (rmap_write_protect(vcpu->kvm, sp->gfn))
kvm_flush_remote_tlbs(vcpu->kvm);
kvm_unlink_unsync_page(vcpu->kvm, sp);
@@ -1331,6 +1319,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role = vcpu->arch.mmu.base_role;
role.level = level;
role.direct = direct;
+ if (role.direct)
+ role.cr4_pae = 0;
role.access = access;
if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
@@ -1351,7 +1341,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
mmu_page_add_parent_pte(vcpu, sp, parent_pte);
if (sp->unsync_children) {
set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
- kvm_mmu_mark_parents_unsync(vcpu, sp);
+ kvm_mmu_mark_parents_unsync(sp);
}
trace_kvm_mmu_get_page(sp, false);
return sp;
@@ -1573,13 +1563,14 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
r = 0;
index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
+restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
if (sp->gfn == gfn && !sp->role.direct) {
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
if (kvm_mmu_zap_page(kvm, sp))
- n = bucket->first;
+ goto restart;
}
return r;
}
@@ -1593,13 +1584,14 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
+restart:
hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
if (sp->gfn == gfn && !sp->role.direct
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
if (kvm_mmu_zap_page(kvm, sp))
- nn = bucket->first;
+ goto restart;
}
}
}
@@ -1626,20 +1618,6 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
}
}
-struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
-{
- struct page *page;
-
- gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
-
- if (gpa == UNMAPPED_GVA)
- return NULL;
-
- page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-
- return page;
-}
-
/*
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
@@ -1752,7 +1730,6 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
struct kvm_mmu_page *s;
struct hlist_node *node, *n;
- trace_kvm_mmu_unsync_page(sp);
index = kvm_page_table_hashfn(sp->gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
/* don't unsync if pagetable is shadowed with multiple roles */
@@ -1762,10 +1739,11 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (s->role.word != sp->role.word)
return 1;
}
+ trace_kvm_mmu_unsync_page(sp);
++vcpu->kvm->stat.mmu_unsync;
sp->unsync = 1;
- kvm_mmu_mark_parents_unsync(vcpu, sp);
+ kvm_mmu_mark_parents_unsync(sp);
mmu_convert_notrap(sp);
return 0;
@@ -2081,21 +2059,23 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->arch.mmu.root_hpa;
ASSERT(!VALID_PAGE(root));
- if (tdp_enabled)
- direct = 1;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ if (tdp_enabled) {
+ direct = 1;
+ root_gfn = 0;
+ }
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = root;
return 0;
}
direct = !is_paging(vcpu);
- if (tdp_enabled)
- direct = 1;
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
@@ -2111,11 +2091,18 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ if (tdp_enabled) {
+ direct = 1;
+ root_gfn = i << 30;
+ }
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
@@ -2299,13 +2286,19 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
/* no rsvd bits for 2 level 4K page table entries */
context->rsvd_bits_mask[0][1] = 0;
context->rsvd_bits_mask[0][0] = 0;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+
+ if (!is_pse(vcpu)) {
+ context->rsvd_bits_mask[1][1] = 0;
+ break;
+ }
+
if (is_cpuid_PSE36())
/* 36bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
else
/* 32 bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT32E_ROOT_LEVEL:
context->rsvd_bits_mask[0][2] =
@@ -2318,7 +2311,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
break;
case PT64_ROOT_LEVEL:
context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
@@ -2336,7 +2329,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
break;
}
}
@@ -2438,7 +2431,8 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
else
r = paging32_init_context(vcpu);
- vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
+ vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
+ vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
return r;
}
@@ -2478,7 +2472,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
goto out;
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
r = mmu_alloc_roots(vcpu);
+ spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock);
if (r)
@@ -2527,7 +2523,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
}
++vcpu->kvm->stat.mmu_pte_updated;
- if (sp->role.glevels == PT32_ROOT_LEVEL)
+ if (!sp->role.cr4_pae)
paging32_update_pte(vcpu, sp, spte, new);
else
paging64_update_pte(vcpu, sp, spte, new);
@@ -2562,36 +2558,11 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
}
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
+ u64 gpte)
{
gfn_t gfn;
- int r;
- u64 gpte = 0;
pfn_t pfn;
- if (bytes != 4 && bytes != 8)
- return;
-
- /*
- * Assume that the pte write on a page table of the same type
- * as the current vcpu paging mode. This is nearly always true
- * (might be false while changing modes). Note it is verified later
- * by update_pte().
- */
- if (is_pae(vcpu)) {
- /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
- if ((bytes == 4) && (gpa % 4 == 0)) {
- r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
- if (r)
- return;
- memcpy((void *)&gpte + (gpa % 8), new, 4);
- } else if ((bytes == 8) && (gpa % 8 == 0)) {
- memcpy((void *)&gpte, new, 8);
- }
- } else {
- if ((bytes == 4) && (gpa % 4 == 0))
- memcpy((void *)&gpte, new, 4);
- }
if (!is_present_gpte(gpte))
return;
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -2640,10 +2611,46 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int flooded = 0;
int npte;
int r;
+ int invlpg_counter;
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
- mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
+
+ invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
+
+ /*
+ * Assume that the pte write on a page table of the same type
+ * as the current vcpu paging mode. This is nearly always true
+ * (might be false while changing modes). Note it is verified later
+ * by update_pte().
+ */
+ if ((is_pae(vcpu) && bytes == 4) || !new) {
+ /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
+ if (is_pae(vcpu)) {
+ gpa &= ~(gpa_t)7;
+ bytes = 8;
+ }
+ r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
+ if (r)
+ gentry = 0;
+ new = (const u8 *)&gentry;
+ }
+
+ switch (bytes) {
+ case 4:
+ gentry = *(const u32 *)new;
+ break;
+ case 8:
+ gentry = *(const u64 *)new;
+ break;
+ default:
+ gentry = 0;
+ break;
+ }
+
+ mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
spin_lock(&vcpu->kvm->mmu_lock);
+ if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
+ gentry = 0;
kvm_mmu_access_page(vcpu, gfn);
kvm_mmu_free_some_pages(vcpu);
++vcpu->kvm->stat.mmu_pte_write;
@@ -2662,10 +2669,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
}
index = kvm_page_table_hashfn(gfn);
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+
+restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
continue;
- pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
+ pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4;
if (misaligned || flooded) {
@@ -2682,14 +2691,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
if (kvm_mmu_zap_page(vcpu->kvm, sp))
- n = bucket->first;
+ goto restart;
++vcpu->kvm->stat.mmu_flooded;
continue;
}
page_offset = offset;
level = sp->role.level;
npte = 1;
- if (sp->role.glevels == PT32_ROOT_LEVEL) {
+ if (!sp->role.cr4_pae) {
page_offset <<= 1; /* 32->64 */
/*
* A 32-bit pde maps 4MB while the shadow pdes map
@@ -2707,20 +2716,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
continue;
}
spte = &sp->spt[page_offset / sizeof(*spte)];
- if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
- gentry = 0;
- r = kvm_read_guest_atomic(vcpu->kvm,
- gpa & ~(u64)(pte_size - 1),
- &gentry, pte_size);
- new = (const void *)&gentry;
- if (r < 0)
- new = NULL;
- }
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
- if (new)
- mmu_pte_write_new_pte(vcpu, sp, spte, new);
+ if (gentry)
+ mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte;
}
@@ -2900,22 +2900,23 @@ void kvm_mmu_zap_all(struct kvm *kvm)
struct kvm_mmu_page *sp, *node;
spin_lock(&kvm->mmu_lock);
+restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
if (kvm_mmu_zap_page(kvm, sp))
- node = container_of(kvm->arch.active_mmu_pages.next,
- struct kvm_mmu_page, link);
+ goto restart;
+
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
}
-static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
{
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(kvm, page);
+ return kvm_mmu_zap_page(kvm, page) + 1;
}
static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
@@ -2927,7 +2928,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- int npages, idx;
+ int npages, idx, freed_pages;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
@@ -2935,8 +2936,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
kvm->arch.n_free_mmu_pages;
cache_count += npages;
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
- kvm_mmu_remove_one_alloc_mmu_page(kvm);
- cache_count--;
+ freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm);
+ cache_count -= freed_pages;
kvm_freed = kvm;
}
nr_to_scan--;
@@ -3011,7 +3012,8 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
unsigned int nr_pages = 0;
struct kvm_memslots *slots;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
+
for (i = 0; i < slots->nmemslots; i++)
nr_pages += slots->memslots[i].npages;
@@ -3174,8 +3176,7 @@ static gva_t canonicalize(gva_t gva)
}
-typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
- u64 *sptep);
+typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
inspect_spte_fn fn)
@@ -3191,7 +3192,7 @@ static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
child = page_header(ent & PT64_BASE_ADDR_MASK);
__mmu_spte_walk(kvm, child, fn);
} else
- fn(kvm, sp, &sp->spt[i]);
+ fn(kvm, &sp->spt[i]);
}
}
}
@@ -3282,11 +3283,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
static int count_rmaps(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memslots *slots;
int nmaps = 0;
int i, j, k, idx;
idx = srcu_read_lock(&kvm->srcu);
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *m = &slots->memslots[i];
struct kvm_rmap_desc *d;
@@ -3315,7 +3318,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
return nmaps;
}
-void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
+void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
{
unsigned long *rmapp;
struct kvm_mmu_page *rev_sp;
@@ -3331,14 +3334,14 @@ void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
printk(KERN_ERR "%s: no memslot for gfn %ld\n",
audit_msg, gfn);
printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
- audit_msg, sptep - rev_sp->spt,
+ audit_msg, (long int)(sptep - rev_sp->spt),
rev_sp->gfn);
dump_stack();
return;
}
rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
- is_large_pte(*sptep));
+ rev_sp->role.level);
if (!*rmapp) {
if (!printk_ratelimit())
return;
@@ -3373,7 +3376,7 @@ static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
continue;
if (!(ent & PT_WRITABLE_MASK))
continue;
- inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
+ inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
}
}
return;
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 3e4a5c6ca2a9..42f07b1bfbc9 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -6,14 +6,12 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvmmmu
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE mmutrace
#define KVM_MMU_PAGE_FIELDS \
__field(__u64, gfn) \
__field(__u32, role) \
__field(__u32, root_count) \
- __field(__u32, unsync)
+ __field(bool, unsync)
#define KVM_MMU_PAGE_ASSIGN(sp) \
__entry->gfn = sp->gfn; \
@@ -30,14 +28,14 @@
\
role.word = __entry->role; \
\
- trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
+ trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \
" %snxe root %u %s%c", \
- __entry->gfn, role.level, role.glevels, \
+ __entry->gfn, role.level, \
+ role.cr4_pae ? " pae" : "", \
role.quadrant, \
role.direct ? " direct" : "", \
access_str[role.access], \
role.invalid ? " invalid" : "", \
- role.cr4_pge ? "" : "!", \
role.nxe ? "" : "!", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
@@ -94,15 +92,15 @@ TRACE_EVENT(
TP_printk("pte %llx level %u", __entry->pte, __entry->level)
);
-/* We set a pte accessed bit */
-TRACE_EVENT(
- kvm_mmu_set_accessed_bit,
+DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
+
TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
+
TP_ARGS(table_gfn, index, size),
TP_STRUCT__entry(
__field(__u64, gpa)
- ),
+ ),
TP_fast_assign(
__entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
@@ -112,22 +110,20 @@ TRACE_EVENT(
TP_printk("gpa %llx", __entry->gpa)
);
-/* We set a pte dirty bit */
-TRACE_EVENT(
- kvm_mmu_set_dirty_bit,
+/* We set a pte accessed bit */
+DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
+
TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
- TP_ARGS(table_gfn, index, size),
- TP_STRUCT__entry(
- __field(__u64, gpa)
- ),
+ TP_ARGS(table_gfn, index, size)
+);
- TP_fast_assign(
- __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
- + index * size;
- ),
+/* We set a pte dirty bit */
+DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
- TP_printk("gpa %llx", __entry->gpa)
+ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
+
+ TP_ARGS(table_gfn, index, size)
);
TRACE_EVENT(
@@ -166,55 +162,45 @@ TRACE_EVENT(
__entry->created ? "new" : "existing")
);
-TRACE_EVENT(
- kvm_mmu_sync_page,
+DECLARE_EVENT_CLASS(kvm_mmu_page_class,
+
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp),
TP_STRUCT__entry(
KVM_MMU_PAGE_FIELDS
- ),
+ ),
TP_fast_assign(
KVM_MMU_PAGE_ASSIGN(sp)
- ),
+ ),
TP_printk("%s", KVM_MMU_PAGE_PRINTK())
);
-TRACE_EVENT(
- kvm_mmu_unsync_page,
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
TP_PROTO(struct kvm_mmu_page *sp),
- TP_ARGS(sp),
-
- TP_STRUCT__entry(
- KVM_MMU_PAGE_FIELDS
- ),
- TP_fast_assign(
- KVM_MMU_PAGE_ASSIGN(sp)
- ),
-
- TP_printk("%s", KVM_MMU_PAGE_PRINTK())
+ TP_ARGS(sp)
);
-TRACE_EVENT(
- kvm_mmu_zap_page,
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
TP_PROTO(struct kvm_mmu_page *sp),
- TP_ARGS(sp),
- TP_STRUCT__entry(
- KVM_MMU_PAGE_FIELDS
- ),
+ TP_ARGS(sp)
+);
- TP_fast_assign(
- KVM_MMU_PAGE_ASSIGN(sp)
- ),
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_zap_page,
+ TP_PROTO(struct kvm_mmu_page *sp),
- TP_printk("%s", KVM_MMU_PAGE_PRINTK())
+ TP_ARGS(sp)
);
-
#endif /* _TRACE_KVMMMU_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mmutrace
+
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 81eab9a50e6a..89d66ca4d87c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -170,7 +170,7 @@ walk:
goto access_error;
#if PTTYPE == 64
- if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
+ if (fetch_fault && (pte & PT64_NX_MASK))
goto access_error;
#endif
@@ -190,10 +190,10 @@ walk:
if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
((walker->level == PT_DIRECTORY_LEVEL) &&
- (pte & PT_PAGE_SIZE_MASK) &&
+ is_large_pte(pte) &&
(PTTYPE == 64 || is_pse(vcpu))) ||
((walker->level == PT_PDPE_LEVEL) &&
- (pte & PT_PAGE_SIZE_MASK) &&
+ is_large_pte(pte) &&
is_long_mode(vcpu))) {
int lvl = walker->level;
@@ -258,11 +258,17 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
pt_element_t gpte;
unsigned pte_access;
pfn_t pfn;
+ u64 new_spte;
gpte = *(const pt_element_t *)pte;
if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
- if (!is_present_gpte(gpte))
- __set_spte(spte, shadow_notrap_nonpresent_pte);
+ if (!is_present_gpte(gpte)) {
+ if (page->unsync)
+ new_spte = shadow_trap_nonpresent_pte;
+ else
+ new_spte = shadow_notrap_nonpresent_pte;
+ __set_spte(spte, new_spte);
+ }
return;
}
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
@@ -457,6 +463,7 @@ out_unlock:
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
+ gpa_t pte_gpa = -1;
int level;
u64 *sptep;
int need_flush = 0;
@@ -467,9 +474,16 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
level = iterator.level;
sptep = iterator.sptep;
- if (level == PT_PAGE_TABLE_LEVEL ||
- ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
- ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
+ if (is_last_spte(*sptep, level)) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ int offset, shift;
+
+ shift = PAGE_SHIFT -
+ (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
+ offset = sp->role.quadrant << shift;
+
+ pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+ pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) {
rmap_remove(vcpu->kvm, sptep);
@@ -487,7 +501,17 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
+
+ atomic_inc(&vcpu->kvm->arch.invlpg_counter);
+
spin_unlock(&vcpu->kvm->mmu_lock);
+
+ if (pte_gpa == -1)
+ return;
+
+ if (mmu_topup_memory_caches(vcpu))
+ return;
+ kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
@@ -551,12 +575,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
int i, offset, nr_present;
bool reset_host_protection;
+ gpa_t first_pte_gpa;
offset = nr_present = 0;
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
+ first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
pt_element_t gpte;
@@ -566,8 +593,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (!is_shadow_present_pte(sp->spt[i]))
continue;
- pte_gpa = gfn_to_gpa(sp->gfn);
- pte_gpa += (i+offset) * sizeof(pt_element_t);
+ pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2ba58206812a..96dc232bfc56 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -44,10 +44,11 @@ MODULE_LICENSE("GPL");
#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
-#define SVM_FEATURE_NPT (1 << 0)
-#define SVM_FEATURE_LBRV (1 << 1)
-#define SVM_FEATURE_SVML (1 << 2)
-#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
+#define SVM_FEATURE_NPT (1 << 0)
+#define SVM_FEATURE_LBRV (1 << 1)
+#define SVM_FEATURE_SVML (1 << 2)
+#define SVM_FEATURE_NRIP (1 << 3)
+#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
@@ -70,6 +71,7 @@ struct kvm_vcpu;
struct nested_state {
struct vmcb *hsave;
u64 hsave_msr;
+ u64 vm_cr_msr;
u64 vmcb;
/* These are the merged vectors */
@@ -77,6 +79,7 @@ struct nested_state {
/* gpa pointers to the real vectors */
u64 vmcb_msrpm;
+ u64 vmcb_iopm;
/* A VMEXIT is required but not yet emulated */
bool exit_required;
@@ -91,6 +94,9 @@ struct nested_state {
};
+#define MSRPM_OFFSETS 16
+static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
+
struct vcpu_svm {
struct kvm_vcpu vcpu;
struct vmcb *vmcb;
@@ -110,13 +116,39 @@ struct vcpu_svm {
struct nested_state nested;
bool nmi_singlestep;
+
+ unsigned int3_injected;
+ unsigned long int3_rip;
+};
+
+#define MSR_INVALID 0xffffffffU
+
+static struct svm_direct_access_msrs {
+ u32 index; /* Index of the MSR */
+ bool always; /* True if intercept is always on */
+} direct_access_msrs[] = {
+ { .index = MSR_K6_STAR, .always = true },
+ { .index = MSR_IA32_SYSENTER_CS, .always = true },
+#ifdef CONFIG_X86_64
+ { .index = MSR_GS_BASE, .always = true },
+ { .index = MSR_FS_BASE, .always = true },
+ { .index = MSR_KERNEL_GS_BASE, .always = true },
+ { .index = MSR_LSTAR, .always = true },
+ { .index = MSR_CSTAR, .always = true },
+ { .index = MSR_SYSCALL_MASK, .always = true },
+#endif
+ { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
+ { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
+ { .index = MSR_IA32_LASTINTFROMIP, .always = false },
+ { .index = MSR_IA32_LASTINTTOIP, .always = false },
+ { .index = MSR_INVALID, .always = false },
};
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
#else
-static bool npt_enabled = false;
+static bool npt_enabled;
#endif
static int npt = 1;
@@ -129,6 +161,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm);
+static int nested_svm_intercept(struct vcpu_svm *svm);
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
@@ -163,8 +196,8 @@ static unsigned long iopm_base;
struct kvm_ldttss_desc {
u16 limit0;
u16 base0;
- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ unsigned base1:8, type:5, dpl:2, p:1;
+ unsigned limit1:4, zero0:3, g:1, base2:8;
u32 base3;
u32 zero1;
} __attribute__((packed));
@@ -194,6 +227,27 @@ static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
+static u32 svm_msrpm_offset(u32 msr)
+{
+ u32 offset;
+ int i;
+
+ for (i = 0; i < NUM_MSR_MAPS; i++) {
+ if (msr < msrpm_ranges[i] ||
+ msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
+ continue;
+
+ offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
+ offset += (i * MSRS_RANGE_SIZE); /* add range offset */
+
+ /* Now we have the u8 offset - but need the u32 offset */
+ return offset / 4;
+ }
+
+ /* MSR not in any range */
+ return MSR_INVALID;
+}
+
#define MAX_INST_SIZE 15
static inline u32 svm_has(u32 feat)
@@ -213,7 +267,7 @@ static inline void stgi(void)
static inline void invlpga(unsigned long addr, u32 asid)
{
- asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
+ asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
}
static inline void force_new_asid(struct kvm_vcpu *vcpu)
@@ -235,23 +289,6 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu->arch.efer = efer;
}
-static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- /* If we are within a nested VM we'd better #VMEXIT and let the
- guest handle the exception */
- if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
- return;
-
- svm->vmcb->control.event_inj = nr
- | SVM_EVTINJ_VALID
- | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
- | SVM_EVTINJ_TYPE_EXEPT;
- svm->vmcb->control.event_inj_err = error_code;
-}
-
static int is_external_interrupt(u32 info)
{
info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@ -264,7 +301,7 @@ static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
u32 ret = 0;
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
- ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
+ ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask;
}
@@ -283,6 +320,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (svm->vmcb->control.next_rip != 0)
+ svm->next_rip = svm->vmcb->control.next_rip;
+
if (!svm->next_rip) {
if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
EMULATE_DONE)
@@ -297,6 +337,43 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm_set_interrupt_shadow(vcpu, 0);
}
+static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
+ bool has_error_code, u32 error_code,
+ bool reinject)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /*
+ * If we are within a nested VM we'd better #VMEXIT and let the guest
+ * handle the exception
+ */
+ if (!reinject &&
+ nested_svm_check_exception(svm, nr, has_error_code, error_code))
+ return;
+
+ if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
+ unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
+
+ /*
+ * For guest debugging where we have to reinject #BP if some
+ * INT3 is guest-owned:
+ * Emulate nRIP by moving RIP forward. Will fail if injection
+ * raises a fault that is not intercepted. Still better than
+ * failing in all cases.
+ */
+ skip_emulated_instruction(&svm->vcpu);
+ rip = kvm_rip_read(&svm->vcpu);
+ svm->int3_rip = rip + svm->vmcb->save.cs.base;
+ svm->int3_injected = rip - old_rip;
+ }
+
+ svm->vmcb->control.event_inj = nr
+ | SVM_EVTINJ_VALID
+ | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
+ | SVM_EVTINJ_TYPE_EXEPT;
+ svm->vmcb->control.event_inj_err = error_code;
+}
+
static int has_svm(void)
{
const char *msg;
@@ -319,7 +396,7 @@ static int svm_hardware_enable(void *garbage)
struct svm_cpu_data *sd;
uint64_t efer;
- struct descriptor_table gdt_descr;
+ struct desc_ptr gdt_descr;
struct desc_struct *gdt;
int me = raw_smp_processor_id();
@@ -344,8 +421,8 @@ static int svm_hardware_enable(void *garbage)
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1;
- kvm_get_gdt(&gdt_descr);
- gdt = (struct desc_struct *)gdt_descr.base;
+ native_store_gdt(&gdt_descr);
+ gdt = (struct desc_struct *)gdt_descr.address;
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME);
@@ -391,42 +468,98 @@ err_1:
}
+static bool valid_msr_intercept(u32 index)
+{
+ int i;
+
+ for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
+ if (direct_access_msrs[i].index == index)
+ return true;
+
+ return false;
+}
+
static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write)
{
+ u8 bit_read, bit_write;
+ unsigned long tmp;
+ u32 offset;
+
+ /*
+ * If this warning triggers extend the direct_access_msrs list at the
+ * beginning of the file
+ */
+ WARN_ON(!valid_msr_intercept(msr));
+
+ offset = svm_msrpm_offset(msr);
+ bit_read = 2 * (msr & 0x0f);
+ bit_write = 2 * (msr & 0x0f) + 1;
+ tmp = msrpm[offset];
+
+ BUG_ON(offset == MSR_INVALID);
+
+ read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
+ write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
+
+ msrpm[offset] = tmp;
+}
+
+static void svm_vcpu_init_msrpm(u32 *msrpm)
+{
int i;
- for (i = 0; i < NUM_MSR_MAPS; i++) {
- if (msr >= msrpm_ranges[i] &&
- msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
- u32 msr_offset = (i * MSRS_IN_RANGE + msr -
- msrpm_ranges[i]) * 2;
-
- u32 *base = msrpm + (msr_offset / 32);
- u32 msr_shift = msr_offset % 32;
- u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
- *base = (*base & ~(0x3 << msr_shift)) |
- (mask << msr_shift);
+ memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+
+ for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
+ if (!direct_access_msrs[i].always)
+ continue;
+
+ set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
+ }
+}
+
+static void add_msr_offset(u32 offset)
+{
+ int i;
+
+ for (i = 0; i < MSRPM_OFFSETS; ++i) {
+
+ /* Offset already in list? */
+ if (msrpm_offsets[i] == offset)
return;
- }
+
+ /* Slot used by another offset? */
+ if (msrpm_offsets[i] != MSR_INVALID)
+ continue;
+
+ /* Add offset to list */
+ msrpm_offsets[i] = offset;
+
+ return;
}
+
+ /*
+ * If this BUG triggers the msrpm_offsets table has an overflow. Just
+ * increase MSRPM_OFFSETS in this case.
+ */
BUG();
}
-static void svm_vcpu_init_msrpm(u32 *msrpm)
+static void init_msrpm_offsets(void)
{
- memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+ int i;
-#ifdef CONFIG_X86_64
- set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
- set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
- set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
- set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
- set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
- set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
-#endif
- set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
- set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
+ memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
+
+ for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
+ u32 offset;
+
+ offset = svm_msrpm_offset(direct_access_msrs[i].index);
+ BUG_ON(offset == MSR_INVALID);
+
+ add_msr_offset(offset);
+ }
}
static void svm_enable_lbrv(struct vcpu_svm *svm)
@@ -467,6 +600,8 @@ static __init int svm_hardware_setup(void)
memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+ init_msrpm_offsets();
+
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -523,7 +658,7 @@ static void init_seg(struct vmcb_seg *seg)
{
seg->selector = 0;
seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
- SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
+ SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
seg->limit = 0xffff;
seg->base = 0;
}
@@ -543,16 +678,16 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->vcpu.fpu_active = 1;
- control->intercept_cr_read = INTERCEPT_CR0_MASK |
+ control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK;
- control->intercept_cr_write = INTERCEPT_CR0_MASK |
+ control->intercept_cr_write = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK |
INTERCEPT_CR8_MASK;
- control->intercept_dr_read = INTERCEPT_DR0_MASK |
+ control->intercept_dr_read = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -561,7 +696,7 @@ static void init_vmcb(struct vcpu_svm *svm)
INTERCEPT_DR6_MASK |
INTERCEPT_DR7_MASK;
- control->intercept_dr_write = INTERCEPT_DR0_MASK |
+ control->intercept_dr_write = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -575,7 +710,7 @@ static void init_vmcb(struct vcpu_svm *svm)
(1 << MC_VECTOR);
- control->intercept = (1ULL << INTERCEPT_INTR) |
+ control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
(1ULL << INTERCEPT_SMI) |
(1ULL << INTERCEPT_SELECTIVE_CR0) |
@@ -636,7 +771,8 @@ static void init_vmcb(struct vcpu_svm *svm)
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
- /* This is the guest-visible cr0 value.
+ /*
+ * This is the guest-visible cr0 value.
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/
svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
@@ -729,6 +865,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm_vcpu_init_msrpm(svm->msrpm);
svm->nested.msrpm = page_address(nested_msrpm_pages);
+ svm_vcpu_init_msrpm(svm->nested.msrpm);
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
@@ -882,7 +1019,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
- /* AMD's VMCB does not have an explicit unusable field, so emulate it
+ /*
+ * AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present"
*/
var->unusable = !var->present || (var->type == 0);
@@ -918,7 +1056,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->type |= 0x1;
break;
case VCPU_SREG_SS:
- /* On AMD CPUs sometimes the DB bit in the segment
+ /*
+ * On AMD CPUs sometimes the DB bit in the segment
* descriptor is left as 1, although the whole segment has
* been made unusable. Clear it here to pass an Intel VMX
* entry check when cross vendor migrating.
@@ -936,36 +1075,36 @@ static int svm_get_cpl(struct kvm_vcpu *vcpu)
return save->cpl;
}
-static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- dt->limit = svm->vmcb->save.idtr.limit;
- dt->base = svm->vmcb->save.idtr.base;
+ dt->size = svm->vmcb->save.idtr.limit;
+ dt->address = svm->vmcb->save.idtr.base;
}
-static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.idtr.limit = dt->limit;
- svm->vmcb->save.idtr.base = dt->base ;
+ svm->vmcb->save.idtr.limit = dt->size;
+ svm->vmcb->save.idtr.base = dt->address ;
}
-static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- dt->limit = svm->vmcb->save.gdtr.limit;
- dt->base = svm->vmcb->save.gdtr.base;
+ dt->size = svm->vmcb->save.gdtr.limit;
+ dt->address = svm->vmcb->save.gdtr.base;
}
-static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.gdtr.limit = dt->limit;
- svm->vmcb->save.gdtr.base = dt->base ;
+ svm->vmcb->save.gdtr.limit = dt->size;
+ svm->vmcb->save.gdtr.base = dt->address ;
}
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
@@ -978,6 +1117,7 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
static void update_cr0_intercept(struct vcpu_svm *svm)
{
+ struct vmcb *vmcb = svm->vmcb;
ulong gcr0 = svm->vcpu.arch.cr0;
u64 *hcr0 = &svm->vmcb->save.cr0;
@@ -989,11 +1129,25 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
- svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
- svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+ vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
+ vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+ if (is_nested(svm)) {
+ struct vmcb *hsave = svm->nested.hsave;
+
+ hsave->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
+ hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
+ vmcb->control.intercept_cr_read |= svm->nested.intercept_cr_read;
+ vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
+ }
} else {
svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+ if (is_nested(svm)) {
+ struct vmcb *hsave = svm->nested.hsave;
+
+ hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
+ hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
+ }
}
}
@@ -1001,6 +1155,27 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_nested(svm)) {
+ /*
+ * We are here because we run in nested mode, the host kvm
+ * intercepts cr0 writes but the l1 hypervisor does not.
+ * But the L1 hypervisor may intercept selective cr0 writes.
+ * This needs to be checked here.
+ */
+ unsigned long old, new;
+
+ /* Remove bits that would trigger a real cr0 write intercept */
+ old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
+ new = cr0 & SVM_CR0_SELECTIVE_MASK;
+
+ if (old == new) {
+ /* cr0 write with ts and mp unchanged */
+ svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
+ if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
+ return;
+ }
+ }
+
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
@@ -1134,70 +1309,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->vmcb->control.asid = sd->next_asid++;
}
-static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
+static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
- switch (dr) {
- case 0 ... 3:
- *dest = vcpu->arch.db[dr];
- break;
- case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 6:
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- *dest = vcpu->arch.dr6;
- else
- *dest = svm->vmcb->save.dr6;
- break;
- case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 7:
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- *dest = vcpu->arch.dr7;
- else
- *dest = svm->vmcb->save.dr7;
- break;
- }
-
- return EMULATE_DONE;
-}
-
-static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- switch (dr) {
- case 0 ... 3:
- vcpu->arch.db[dr] = value;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- vcpu->arch.eff_db[dr] = value;
- break;
- case 4:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 6:
- vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
- break;
- case 5:
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
- return EMULATE_FAIL; /* will re-inject UD */
- /* fall through */
- case 7:
- vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
- svm->vmcb->save.dr7 = vcpu->arch.dr7;
- vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
- }
- break;
- }
-
- return EMULATE_DONE;
+ svm->vmcb->save.dr7 = value;
}
static int pf_interception(struct vcpu_svm *svm)
@@ -1234,7 +1350,7 @@ static int db_interception(struct vcpu_svm *svm)
}
if (svm->vcpu.guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc =
svm->vmcb->save.cs.base + svm->vmcb->save.rip;
@@ -1268,7 +1384,22 @@ static int ud_interception(struct vcpu_svm *svm)
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
+ u32 excp;
+
+ if (is_nested(svm)) {
+ u32 h_excp, n_excp;
+
+ h_excp = svm->nested.hsave->control.intercept_exceptions;
+ n_excp = svm->nested.intercept_exceptions;
+ h_excp &= ~(1 << NM_VECTOR);
+ excp = h_excp | n_excp;
+ } else {
+ excp = svm->vmcb->control.intercept_exceptions;
+ excp &= ~(1 << NM_VECTOR);
+ }
+
+ svm->vmcb->control.intercept_exceptions = excp;
+
svm->vcpu.fpu_active = 1;
update_cr0_intercept(svm);
}
@@ -1309,29 +1440,23 @@ static int shutdown_interception(struct vcpu_svm *svm)
static int io_interception(struct vcpu_svm *svm)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
int size, in, string;
unsigned port;
++svm->vcpu.stat.io_exits;
-
- svm->next_rip = svm->vmcb->control.exit_info_2;
-
string = (io_info & SVM_IOIO_STR_MASK) != 0;
-
- if (string) {
- if (emulate_instruction(&svm->vcpu,
- 0, 0, 0) == EMULATE_DO_MMIO)
- return 0;
- return 1;
- }
-
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
+ if (string || in)
+ return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
+
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
-
+ svm->next_rip = svm->vmcb->control.exit_info_2;
skip_emulated_instruction(&svm->vcpu);
- return kvm_emulate_pio(&svm->vcpu, in, size, port);
+
+ return kvm_fast_pio_out(vcpu, size, port);
}
static int nmi_interception(struct vcpu_svm *svm)
@@ -1384,6 +1509,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code)
{
+ int vmexit;
+
if (!is_nested(svm))
return 0;
@@ -1392,21 +1519,28 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_info_1 = error_code;
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
- return nested_svm_exit_handled(svm);
+ vmexit = nested_svm_intercept(svm);
+ if (vmexit == NESTED_EXIT_DONE)
+ svm->nested.exit_required = true;
+
+ return vmexit;
}
-static inline int nested_svm_intr(struct vcpu_svm *svm)
+/* This function returns true if it is save to enable the irq window */
+static inline bool nested_svm_intr(struct vcpu_svm *svm)
{
if (!is_nested(svm))
- return 0;
+ return true;
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
- return 0;
+ return true;
if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
- return 0;
+ return false;
- svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+ svm->vmcb->control.exit_code = SVM_EXIT_INTR;
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
if (svm->nested.intercept & 1ULL) {
/*
@@ -1417,21 +1551,40 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
*/
svm->nested.exit_required = true;
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
- return 1;
+ return false;
}
- return 0;
+ return true;
+}
+
+/* This function returns true if it is save to enable the nmi window */
+static inline bool nested_svm_nmi(struct vcpu_svm *svm)
+{
+ if (!is_nested(svm))
+ return true;
+
+ if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
+ return true;
+
+ svm->vmcb->control.exit_code = SVM_EXIT_NMI;
+ svm->nested.exit_required = true;
+
+ return false;
}
-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
+static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
{
struct page *page;
+ might_sleep();
+
page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
if (is_error_page(page))
goto error;
- return kmap_atomic(page, idx);
+ *_page = page;
+
+ return kmap(page);
error:
kvm_release_page_clean(page);
@@ -1440,61 +1593,55 @@ error:
return NULL;
}
-static void nested_svm_unmap(void *addr, enum km_type idx)
+static void nested_svm_unmap(struct page *page)
{
- struct page *page;
+ kunmap(page);
+ kvm_release_page_dirty(page);
+}
- if (!addr)
- return;
+static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
+{
+ unsigned port;
+ u8 val, bit;
+ u64 gpa;
- page = kmap_atomic_to_page(addr);
+ if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
+ return NESTED_EXIT_HOST;
- kunmap_atomic(addr, idx);
- kvm_release_page_dirty(page);
+ port = svm->vmcb->control.exit_info_1 >> 16;
+ gpa = svm->nested.vmcb_iopm + (port / 8);
+ bit = port % 8;
+ val = 0;
+
+ if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
+ val &= (1 << bit);
+
+ return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
-static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
+static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
- u32 param = svm->vmcb->control.exit_info_1 & 1;
- u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
- bool ret = false;
- u32 t0, t1;
- u8 *msrpm;
+ u32 offset, msr, value;
+ int write, mask;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
- return false;
+ return NESTED_EXIT_HOST;
- msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
+ msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
+ offset = svm_msrpm_offset(msr);
+ write = svm->vmcb->control.exit_info_1 & 1;
+ mask = 1 << ((2 * (msr & 0xf)) + write);
- if (!msrpm)
- goto out;
+ if (offset == MSR_INVALID)
+ return NESTED_EXIT_DONE;
- switch (msr) {
- case 0 ... 0x1fff:
- t0 = (msr * 2) % 8;
- t1 = msr / 8;
- break;
- case 0xc0000000 ... 0xc0001fff:
- t0 = (8192 + msr - 0xc0000000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- case 0xc0010000 ... 0xc0011fff:
- t0 = (16384 + msr - 0xc0010000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- default:
- ret = true;
- goto out;
- }
+ /* Offset is in 32 bit units but need in 8 bit units */
+ offset *= 4;
- ret = msrpm[t1] & ((1 << param) << t0);
-
-out:
- nested_svm_unmap(msrpm, KM_USER0);
+ if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
+ return NESTED_EXIT_DONE;
- return ret;
+ return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
static int nested_svm_exit_special(struct vcpu_svm *svm)
@@ -1504,17 +1651,21 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
switch (exit_code) {
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
+ case SVM_EXIT_EXCP_BASE + MC_VECTOR:
return NESTED_EXIT_HOST;
- /* For now we are always handling NPFs when using them */
case SVM_EXIT_NPF:
+ /* For now we are always handling NPFs when using them */
if (npt_enabled)
return NESTED_EXIT_HOST;
break;
- /* When we're shadowing, trap PFs */
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
+ /* When we're shadowing, trap PFs */
if (!npt_enabled)
return NESTED_EXIT_HOST;
break;
+ case SVM_EXIT_EXCP_BASE + NM_VECTOR:
+ nm_interception(svm);
+ break;
default:
break;
}
@@ -1525,7 +1676,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
/*
* If this function returns true, this #vmexit was already handled
*/
-static int nested_svm_exit_handled(struct vcpu_svm *svm)
+static int nested_svm_intercept(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
int vmexit = NESTED_EXIT_HOST;
@@ -1534,6 +1685,9 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
case SVM_EXIT_MSR:
vmexit = nested_svm_exit_handled_msr(svm);
break;
+ case SVM_EXIT_IOIO:
+ vmexit = nested_svm_intercept_ioio(svm);
+ break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
if (svm->nested.intercept_cr_read & cr_bits)
@@ -1564,6 +1718,10 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
vmexit = NESTED_EXIT_DONE;
break;
}
+ case SVM_EXIT_ERR: {
+ vmexit = NESTED_EXIT_DONE;
+ break;
+ }
default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
if (svm->nested.intercept & exit_bits)
@@ -1571,9 +1729,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
}
}
- if (vmexit == NESTED_EXIT_DONE) {
+ return vmexit;
+}
+
+static int nested_svm_exit_handled(struct vcpu_svm *svm)
+{
+ int vmexit;
+
+ vmexit = nested_svm_intercept(svm);
+
+ if (vmexit == NESTED_EXIT_DONE)
nested_svm_vmexit(svm);
- }
return vmexit;
}
@@ -1615,6 +1781,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ struct page *page;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
@@ -1622,10 +1789,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb->control.exit_int_info,
vmcb->control.exit_int_info_err);
- nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
if (!nested_vmcb)
return 1;
+ /* Exit nested SVM mode */
+ svm->nested.vmcb = 0;
+
/* Give the current vmcb to the guest */
disable_gif(svm);
@@ -1635,9 +1805,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->save.ds = vmcb->save.ds;
nested_vmcb->save.gdtr = vmcb->save.gdtr;
nested_vmcb->save.idtr = vmcb->save.idtr;
- if (npt_enabled)
- nested_vmcb->save.cr3 = vmcb->save.cr3;
+ nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
+ nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
nested_vmcb->save.cr2 = vmcb->save.cr2;
+ nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
nested_vmcb->save.rflags = vmcb->save.rflags;
nested_vmcb->save.rip = vmcb->save.rip;
nested_vmcb->save.rsp = vmcb->save.rsp;
@@ -1709,10 +1880,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
- /* Exit nested SVM mode */
- svm->nested.vmcb = 0;
-
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
@@ -1722,19 +1890,33 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
- u32 *nested_msrpm;
+ /*
+ * This function merges the msr permission bitmaps of kvm and the
+ * nested vmcb. It is omptimized in that it only merges the parts where
+ * the kvm msr permission bitmap may contain zero bits
+ */
int i;
- nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
- if (!nested_msrpm)
- return false;
+ if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
+ return true;
- for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
- svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
+ for (i = 0; i < MSRPM_OFFSETS; i++) {
+ u32 value, p;
+ u64 offset;
- svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
+ if (msrpm_offsets[i] == 0xffffffff)
+ break;
+
+ p = msrpm_offsets[i];
+ offset = svm->nested.vmcb_msrpm + (p * 4);
+
+ if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
+ return false;
+
+ svm->nested.msrpm[p] = svm->msrpm[p] | value;
+ }
- nested_svm_unmap(nested_msrpm, KM_USER0);
+ svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
return true;
}
@@ -1744,26 +1926,34 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
+ struct page *page;
+ u64 vmcb_gpa;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ vmcb_gpa = svm->vmcb->save.rax;
+
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return false;
- /* nested_vmcb is our indicator if nested SVM is activated */
- svm->nested.vmcb = svm->vmcb->save.rax;
-
- trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, svm->nested.vmcb,
+ trace_kvm_nested_vmrun(svm->vmcb->save.rip - 3, vmcb_gpa,
nested_vmcb->save.rip,
nested_vmcb->control.int_ctl,
nested_vmcb->control.event_inj,
nested_vmcb->control.nested_ctl);
+ trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
+ nested_vmcb->control.intercept_cr_write,
+ nested_vmcb->control.intercept_exceptions,
+ nested_vmcb->control.intercept);
+
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
- /* Save the old vmcb, so we don't need to pick what we save, but
- can restore everything when a VMEXIT occurs */
+ /*
+ * Save the old vmcb, so we don't need to pick what we save, but can
+ * restore everything when a VMEXIT occurs
+ */
hsave->save.es = vmcb->save.es;
hsave->save.cs = vmcb->save.cs;
hsave->save.ss = vmcb->save.ss;
@@ -1803,14 +1993,17 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
if (npt_enabled) {
svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
- } else {
+ } else
kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
- kvm_mmu_reset_context(&svm->vcpu);
- }
+
+ /* Guest paging mode is active - reset mmu */
+ kvm_mmu_reset_context(&svm->vcpu);
+
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
+
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
@@ -1819,22 +2012,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
- /* We don't want a nested guest to be more powerful than the guest,
- so all intercepts are ORed */
- svm->vmcb->control.intercept_cr_read |=
- nested_vmcb->control.intercept_cr_read;
- svm->vmcb->control.intercept_cr_write |=
- nested_vmcb->control.intercept_cr_write;
- svm->vmcb->control.intercept_dr_read |=
- nested_vmcb->control.intercept_dr_read;
- svm->vmcb->control.intercept_dr_write |=
- nested_vmcb->control.intercept_dr_write;
- svm->vmcb->control.intercept_exceptions |=
- nested_vmcb->control.intercept_exceptions;
-
- svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
-
- svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
+ svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
+ svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
/* cache intercepts */
svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
@@ -1851,13 +2030,43 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
+ if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
+ /* We only want the cr8 intercept bits of the guest */
+ svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
+ svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+ }
+
+ /* We don't want to see VMMCALLs from a nested guest */
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
+
+ /*
+ * We don't want a nested guest to be more powerful than the guest, so
+ * all intercepts are ORed
+ */
+ svm->vmcb->control.intercept_cr_read |=
+ nested_vmcb->control.intercept_cr_read;
+ svm->vmcb->control.intercept_cr_write |=
+ nested_vmcb->control.intercept_cr_write;
+ svm->vmcb->control.intercept_dr_read |=
+ nested_vmcb->control.intercept_dr_read;
+ svm->vmcb->control.intercept_dr_write |=
+ nested_vmcb->control.intercept_dr_write;
+ svm->vmcb->control.intercept_exceptions |=
+ nested_vmcb->control.intercept_exceptions;
+
+ svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+
+ svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
+
+ /* nested_vmcb is our indicator if nested SVM is activated */
+ svm->nested.vmcb = vmcb_gpa;
enable_gif(svm);
@@ -1883,6 +2092,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
+ struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
@@ -1890,12 +2100,12 @@ static int vmload_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
return 1;
}
@@ -1903,6 +2113,7 @@ static int vmload_interception(struct vcpu_svm *svm)
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
+ struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
@@ -1910,12 +2121,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
+ nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
- nested_svm_unmap(nested_vmcb, KM_USER0);
+ nested_svm_unmap(page);
return 1;
}
@@ -2018,6 +2229,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
uint32_t idt_v =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
+ bool has_error_code = false;
+ u32 error_code = 0;
tss_selector = (u16)svm->vmcb->control.exit_info_1;
@@ -2038,6 +2251,12 @@ static int task_switch_interception(struct vcpu_svm *svm)
svm->vcpu.arch.nmi_injected = false;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
+ if (svm->vmcb->control.exit_info_2 &
+ (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
+ has_error_code = true;
+ error_code =
+ (u32)svm->vmcb->control.exit_info_2;
+ }
kvm_clear_exception_queue(&svm->vcpu);
break;
case SVM_EXITINTINFO_TYPE_INTR:
@@ -2054,7 +2273,14 @@ static int task_switch_interception(struct vcpu_svm *svm)
(int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
skip_emulated_instruction(&svm->vcpu);
- return kvm_task_switch(&svm->vcpu, tss_selector, reason);
+ if (kvm_task_switch(&svm->vcpu, tss_selector, reason,
+ has_error_code, error_code) == EMULATE_FAIL) {
+ svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ svm->vcpu.run->internal.ndata = 0;
+ return 0;
+ }
+ return 1;
}
static int cpuid_interception(struct vcpu_svm *svm)
@@ -2067,7 +2293,7 @@ static int cpuid_interception(struct vcpu_svm *svm)
static int iret_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.nmi_window_exits;
- svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
return 1;
}
@@ -2145,9 +2371,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
case MSR_IA32_SYSENTER_ESP:
*data = svm->sysenter_esp;
break;
- /* Nobody will change the following 5 values in the VMCB so
- we can safely return them on rdmsr. They will always be 0
- until LBRV is implemented. */
+ /*
+ * Nobody will change the following 5 values in the VMCB so we can
+ * safely return them on rdmsr. They will always be 0 until LBRV is
+ * implemented.
+ */
case MSR_IA32_DEBUGCTLMSR:
*data = svm->vmcb->save.dbgctl;
break;
@@ -2167,7 +2395,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
*data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
- *data = 0;
+ *data = svm->nested.vm_cr_msr;
break;
case MSR_IA32_UCODE_REV:
*data = 0x01000065;
@@ -2197,6 +2425,31 @@ static int rdmsr_interception(struct vcpu_svm *svm)
return 1;
}
+static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ int svm_dis, chg_mask;
+
+ if (data & ~SVM_VM_CR_VALID_MASK)
+ return 1;
+
+ chg_mask = SVM_VM_CR_VALID_MASK;
+
+ if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
+ chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
+
+ svm->nested.vm_cr_msr &= ~chg_mask;
+ svm->nested.vm_cr_msr |= (data & chg_mask);
+
+ svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
+
+ /* check for svm_disable while efer.svme is set */
+ if (svm_dis && (vcpu->arch.efer & EFER_SVME))
+ return 1;
+
+ return 0;
+}
+
static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -2263,6 +2516,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
svm->nested.hsave_msr = data;
break;
case MSR_VM_CR:
+ return svm_set_vm_cr(vcpu, data);
case MSR_VM_IGNNE:
pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
@@ -2326,16 +2580,16 @@ static int pause_interception(struct vcpu_svm *svm)
}
static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
- [SVM_EXIT_READ_CR0] = emulate_on_interception,
- [SVM_EXIT_READ_CR3] = emulate_on_interception,
- [SVM_EXIT_READ_CR4] = emulate_on_interception,
- [SVM_EXIT_READ_CR8] = emulate_on_interception,
+ [SVM_EXIT_READ_CR0] = emulate_on_interception,
+ [SVM_EXIT_READ_CR3] = emulate_on_interception,
+ [SVM_EXIT_READ_CR4] = emulate_on_interception,
+ [SVM_EXIT_READ_CR8] = emulate_on_interception,
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
- [SVM_EXIT_READ_DR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
+ [SVM_EXIT_READ_DR0] = emulate_on_interception,
[SVM_EXIT_READ_DR1] = emulate_on_interception,
[SVM_EXIT_READ_DR2] = emulate_on_interception,
[SVM_EXIT_READ_DR3] = emulate_on_interception,
@@ -2354,15 +2608,14 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
- [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
- [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
- [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
- [SVM_EXIT_INTR] = intr_interception,
+ [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
+ [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
+ [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
+ [SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
- /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
@@ -2370,7 +2623,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
[SVM_EXIT_INVLPGA] = invlpga_interception,
- [SVM_EXIT_IOIO] = io_interception,
+ [SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
@@ -2393,7 +2646,12 @@ static int handle_exit(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run;
u32 exit_code = svm->vmcb->control.exit_code;
- trace_kvm_exit(exit_code, svm->vmcb->save.rip);
+ trace_kvm_exit(exit_code, vcpu);
+
+ if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
+ vcpu->arch.cr0 = svm->vmcb->save.cr0;
+ if (npt_enabled)
+ vcpu->arch.cr3 = svm->vmcb->save.cr3;
if (unlikely(svm->nested.exit_required)) {
nested_svm_vmexit(svm);
@@ -2422,11 +2680,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
svm_complete_interrupts(svm);
- if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
- vcpu->arch.cr0 = svm->vmcb->save.cr0;
- if (npt_enabled)
- vcpu->arch.cr3 = svm->vmcb->save.cr3;
-
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
@@ -2479,7 +2732,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
- svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}
@@ -2511,6 +2764,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
if (irr == -1)
return;
@@ -2522,8 +2778,12 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
- return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
- !(svm->vcpu.arch.hflags & HF_NMI_MASK);
+ int ret;
+ ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+ !(svm->vcpu.arch.hflags & HF_NMI_MASK);
+ ret = ret && gif_set(svm) && nested_svm_nmi(svm);
+
+ return ret;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -2539,10 +2799,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK;
- svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
} else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
- svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
}
}
@@ -2568,13 +2828,13 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- nested_svm_intr(svm);
-
- /* In case GIF=0 we can't rely on the CPU to tell us when
- * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
- * The next time we get that intercept, this function will be
- * called again though and we'll get the vintr intercept. */
- if (gif_set(svm)) {
+ /*
+ * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
+ * 1, because that's a separate STGI/VMRUN intercept. The next time we
+ * get that intercept, this function will be called again though and
+ * we'll get the vintr intercept.
+ */
+ if (gif_set(svm) && nested_svm_intr(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}
@@ -2588,9 +2848,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
== HF_NMI_MASK)
return; /* IRET will cause a vm exit */
- /* Something prevents NMI from been injected. Single step over
- possible problem (IRET or exception injection or interrupt
- shadow) */
+ /*
+ * Something prevents NMI from been injected. Single step over possible
+ * problem (IRET or exception injection or interrupt shadow)
+ */
svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_intercept(vcpu);
@@ -2614,6 +2875,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
kvm_set_cr8(vcpu, cr8);
@@ -2625,6 +2889,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;
+ if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
cr8 = kvm_get_cr8(vcpu);
svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
@@ -2635,6 +2902,9 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
u8 vector;
int type;
u32 exitintinfo = svm->vmcb->control.exit_int_info;
+ unsigned int3_injected = svm->int3_injected;
+
+ svm->int3_injected = 0;
if (svm->vcpu.arch.hflags & HF_IRET_MASK)
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
@@ -2654,18 +2924,25 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
svm->vcpu.arch.nmi_injected = true;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
- /* In case of software exception do not reinject an exception
- vector, but re-execute and instruction instead */
- if (is_nested(svm))
- break;
- if (kvm_exception_is_soft(vector))
+ /*
+ * In case of software exceptions, do not reinject the vector,
+ * but re-execute the instruction instead. Rewind RIP first
+ * if we emulated INT3 before.
+ */
+ if (kvm_exception_is_soft(vector)) {
+ if (vector == BP_VECTOR && int3_injected &&
+ kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
+ kvm_rip_write(&svm->vcpu,
+ kvm_rip_read(&svm->vcpu) -
+ int3_injected);
break;
+ }
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err;
- kvm_queue_exception_e(&svm->vcpu, vector, err);
+ kvm_requeue_exception_e(&svm->vcpu, vector, err);
} else
- kvm_queue_exception(&svm->vcpu, vector);
+ kvm_requeue_exception(&svm->vcpu, vector);
break;
case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(&svm->vcpu, vector, false);
@@ -2688,6 +2965,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
u16 gs_selector;
u16 ldt_selector;
+ svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+ svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+ svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+
/*
* A vmexit emulation is required before the vcpu can be executed
* again.
@@ -2695,10 +2976,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(svm->nested.exit_required))
return;
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
-
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
@@ -2879,25 +3156,39 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
}
+static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+ switch (func) {
+ case 0x8000000A:
+ entry->eax = 1; /* SVM revision 1 */
+ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
+ ASID emulation to nested SVM */
+ entry->ecx = 0; /* Reserved */
+ entry->edx = 0; /* Do not support any additional features */
+
+ break;
+ }
+}
+
static const struct trace_print_flags svm_exit_reasons_str[] = {
- { SVM_EXIT_READ_CR0, "read_cr0" },
- { SVM_EXIT_READ_CR3, "read_cr3" },
- { SVM_EXIT_READ_CR4, "read_cr4" },
- { SVM_EXIT_READ_CR8, "read_cr8" },
- { SVM_EXIT_WRITE_CR0, "write_cr0" },
- { SVM_EXIT_WRITE_CR3, "write_cr3" },
- { SVM_EXIT_WRITE_CR4, "write_cr4" },
- { SVM_EXIT_WRITE_CR8, "write_cr8" },
- { SVM_EXIT_READ_DR0, "read_dr0" },
- { SVM_EXIT_READ_DR1, "read_dr1" },
- { SVM_EXIT_READ_DR2, "read_dr2" },
- { SVM_EXIT_READ_DR3, "read_dr3" },
- { SVM_EXIT_WRITE_DR0, "write_dr0" },
- { SVM_EXIT_WRITE_DR1, "write_dr1" },
- { SVM_EXIT_WRITE_DR2, "write_dr2" },
- { SVM_EXIT_WRITE_DR3, "write_dr3" },
- { SVM_EXIT_WRITE_DR5, "write_dr5" },
- { SVM_EXIT_WRITE_DR7, "write_dr7" },
+ { SVM_EXIT_READ_CR0, "read_cr0" },
+ { SVM_EXIT_READ_CR3, "read_cr3" },
+ { SVM_EXIT_READ_CR4, "read_cr4" },
+ { SVM_EXIT_READ_CR8, "read_cr8" },
+ { SVM_EXIT_WRITE_CR0, "write_cr0" },
+ { SVM_EXIT_WRITE_CR3, "write_cr3" },
+ { SVM_EXIT_WRITE_CR4, "write_cr4" },
+ { SVM_EXIT_WRITE_CR8, "write_cr8" },
+ { SVM_EXIT_READ_DR0, "read_dr0" },
+ { SVM_EXIT_READ_DR1, "read_dr1" },
+ { SVM_EXIT_READ_DR2, "read_dr2" },
+ { SVM_EXIT_READ_DR3, "read_dr3" },
+ { SVM_EXIT_WRITE_DR0, "write_dr0" },
+ { SVM_EXIT_WRITE_DR1, "write_dr1" },
+ { SVM_EXIT_WRITE_DR2, "write_dr2" },
+ { SVM_EXIT_WRITE_DR3, "write_dr3" },
+ { SVM_EXIT_WRITE_DR5, "write_dr5" },
+ { SVM_EXIT_WRITE_DR7, "write_dr7" },
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
@@ -2946,8 +3237,10 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- update_cr0_intercept(svm);
svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
+ if (is_nested(svm))
+ svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+ update_cr0_intercept(svm);
}
static struct kvm_x86_ops svm_x86_ops = {
@@ -2986,8 +3279,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
- .get_dr = svm_get_dr,
- .set_dr = svm_set_dr,
+ .set_dr7 = svm_set_dr7,
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
@@ -3023,12 +3315,14 @@ static struct kvm_x86_ops svm_x86_ops = {
.cpuid_update = svm_cpuid_update,
.rdtscp_supported = svm_rdtscp_supported,
+
+ .set_supported_cpuid = svm_set_supported_cpuid,
};
static int __init svm_init(void)
{
return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
- THIS_MODULE);
+ __alignof__(struct vcpu_svm), THIS_MODULE);
}
static void __exit svm_exit(void)
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index eea40439066c..4ddadb1a5ffe 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -12,7 +12,8 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
/*
* There is a race window between reading and incrementing, but we do
* not care about potentially loosing timer events in the !reinject
- * case anyway.
+ * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
+ * in vcpu_enter_guest.
*/
if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 6ad30a29f044..a6544b8e7c0f 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -5,8 +5,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-#define TRACE_INCLUDE_PATH arch/x86/kvm
-#define TRACE_INCLUDE_FILE trace
/*
* Tracepoint for guest mode entry.
@@ -184,8 +182,8 @@ TRACE_EVENT(kvm_apic,
* Tracepoint for kvm guest exit:
*/
TRACE_EVENT(kvm_exit,
- TP_PROTO(unsigned int exit_reason, unsigned long guest_rip),
- TP_ARGS(exit_reason, guest_rip),
+ TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu),
+ TP_ARGS(exit_reason, vcpu),
TP_STRUCT__entry(
__field( unsigned int, exit_reason )
@@ -194,7 +192,7 @@ TRACE_EVENT(kvm_exit,
TP_fast_assign(
__entry->exit_reason = exit_reason;
- __entry->guest_rip = guest_rip;
+ __entry->guest_rip = kvm_rip_read(vcpu);
),
TP_printk("reason %s rip 0x%lx",
@@ -221,6 +219,38 @@ TRACE_EVENT(kvm_inj_virq,
TP_printk("irq %u", __entry->irq)
);
+#define EXS(x) { x##_VECTOR, "#" #x }
+
+#define kvm_trace_sym_exc \
+ EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
+ EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
+ EXS(MF), EXS(MC)
+
+/*
+ * Tracepoint for kvm interrupt injection:
+ */
+TRACE_EVENT(kvm_inj_exception,
+ TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
+ TP_ARGS(exception, has_error, error_code),
+
+ TP_STRUCT__entry(
+ __field( u8, exception )
+ __field( u8, has_error )
+ __field( u32, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->exception = exception;
+ __entry->has_error = has_error;
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("%s (0x%x)",
+ __print_symbolic(__entry->exception, kvm_trace_sym_exc),
+ /* FIXME: don't print error_code if not present */
+ __entry->has_error ? __entry->error_code : 0)
+);
+
/*
* Tracepoint for page fault.
*/
@@ -413,12 +443,34 @@ TRACE_EVENT(kvm_nested_vmrun,
),
TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
- "event_inj: 0x%08x npt: %s\n",
+ "event_inj: 0x%08x npt: %s",
__entry->rip, __entry->vmcb, __entry->nested_rip,
__entry->int_ctl, __entry->event_inj,
__entry->npt ? "on" : "off")
);
+TRACE_EVENT(kvm_nested_intercepts,
+ TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
+ TP_ARGS(cr_read, cr_write, exceptions, intercept),
+
+ TP_STRUCT__entry(
+ __field( __u16, cr_read )
+ __field( __u16, cr_write )
+ __field( __u32, exceptions )
+ __field( __u64, intercept )
+ ),
+
+ TP_fast_assign(
+ __entry->cr_read = cr_read;
+ __entry->cr_write = cr_write;
+ __entry->exceptions = exceptions;
+ __entry->intercept = intercept;
+ ),
+
+ TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
+ __entry->cr_read, __entry->cr_write, __entry->exceptions,
+ __entry->intercept)
+);
/*
* Tracepoint for #VMEXIT while nested
*/
@@ -447,7 +499,7 @@ TRACE_EVENT(kvm_nested_vmexit,
__entry->exit_int_info_err = exit_int_info_err;
),
TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
- "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n",
+ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
__entry->rip,
ftrace_print_symbols_seq(p, __entry->exit_code,
kvm_x86_ops->exit_reasons_str),
@@ -482,7 +534,7 @@ TRACE_EVENT(kvm_nested_vmexit_inject,
),
TP_printk("reason: %s ext_inf1: 0x%016llx "
- "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x\n",
+ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
ftrace_print_symbols_seq(p, __entry->exit_code,
kvm_x86_ops->exit_reasons_str),
__entry->exit_info1, __entry->exit_info2,
@@ -504,7 +556,7 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
__entry->rip = rip
),
- TP_printk("rip: 0x%016llx\n", __entry->rip)
+ TP_printk("rip: 0x%016llx", __entry->rip)
);
/*
@@ -526,7 +578,7 @@ TRACE_EVENT(kvm_invlpga,
__entry->address = address;
),
- TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx\n",
+ TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
__entry->rip, __entry->asid, __entry->address)
);
@@ -547,11 +599,102 @@ TRACE_EVENT(kvm_skinit,
__entry->slb = slb;
),
- TP_printk("rip: 0x%016llx slb: 0x%08x\n",
+ TP_printk("rip: 0x%016llx slb: 0x%08x",
__entry->rip, __entry->slb)
);
+#define __print_insn(insn, ilen) ({ \
+ int i; \
+ const char *ret = p->buffer + p->len; \
+ \
+ for (i = 0; i < ilen; ++i) \
+ trace_seq_printf(p, " %02x", insn[i]); \
+ trace_seq_printf(p, "%c", 0); \
+ ret; \
+ })
+
+#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
+#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
+#define KVM_EMUL_INSN_F_CS_D (1 << 2)
+#define KVM_EMUL_INSN_F_CS_L (1 << 3)
+
+#define kvm_trace_symbol_emul_flags \
+ { 0, "real" }, \
+ { KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \
+ { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \
+ { KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_D, "prot32" }, \
+ { KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_L, "prot64" }
+
+#define kei_decode_mode(mode) ({ \
+ u8 flags = 0xff; \
+ switch (mode) { \
+ case X86EMUL_MODE_REAL: \
+ flags = 0; \
+ break; \
+ case X86EMUL_MODE_VM86: \
+ flags = KVM_EMUL_INSN_F_EFL_VM; \
+ break; \
+ case X86EMUL_MODE_PROT16: \
+ flags = KVM_EMUL_INSN_F_CR0_PE; \
+ break; \
+ case X86EMUL_MODE_PROT32: \
+ flags = KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_D; \
+ break; \
+ case X86EMUL_MODE_PROT64: \
+ flags = KVM_EMUL_INSN_F_CR0_PE \
+ | KVM_EMUL_INSN_F_CS_L; \
+ break; \
+ } \
+ flags; \
+ })
+
+TRACE_EVENT(kvm_emulate_insn,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
+ TP_ARGS(vcpu, failed),
+
+ TP_STRUCT__entry(
+ __field( __u64, rip )
+ __field( __u32, csbase )
+ __field( __u8, len )
+ __array( __u8, insn, 15 )
+ __field( __u8, flags )
+ __field( __u8, failed )
+ ),
+
+ TP_fast_assign(
+ __entry->rip = vcpu->arch.emulate_ctxt.decode.fetch.start;
+ __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
+ __entry->len = vcpu->arch.emulate_ctxt.decode.eip
+ - vcpu->arch.emulate_ctxt.decode.fetch.start;
+ memcpy(__entry->insn,
+ vcpu->arch.emulate_ctxt.decode.fetch.data,
+ 15);
+ __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
+ __entry->failed = failed;
+ ),
+
+ TP_printk("%x:%llx:%s (%s)%s",
+ __entry->csbase, __entry->rip,
+ __print_insn(__entry->insn, __entry->len),
+ __print_symbolic(__entry->flags,
+ kvm_trace_symbol_emul_flags),
+ __entry->failed ? " failed" : ""
+ )
+ );
+
+#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
+#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
+
#endif /* _TRACE_KVM_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH arch/x86/kvm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bc933cfb4e66..859a01a07dbf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -27,6 +27,7 @@
#include <linux/moduleparam.h>
#include <linux/ftrace_event.h>
#include <linux/slab.h>
+#include <linux/tboot.h>
#include "kvm_cache_regs.h"
#include "x86.h"
@@ -98,6 +99,8 @@ module_param(ple_gap, int, S_IRUGO);
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);
+#define NR_AUTOLOAD_MSRS 1
+
struct vmcs {
u32 revision_id;
u32 abort;
@@ -125,6 +128,11 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base;
#endif
struct vmcs *vmcs;
+ struct msr_autoload {
+ unsigned nr;
+ struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+ struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+ } msr_autoload;
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
@@ -234,56 +242,56 @@ static const u32 vmx_msr_index[] = {
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
-static inline int is_page_fault(u32 intr_info)
+static inline bool is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int is_no_device(u32 intr_info)
+static inline bool is_no_device(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int is_invalid_opcode(u32 intr_info)
+static inline bool is_invalid_opcode(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int is_external_interrupt(u32 intr_info)
+static inline bool is_external_interrupt(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static inline int is_machine_check(u32 intr_info)
+static inline bool is_machine_check(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}
-static inline int cpu_has_vmx_msr_bitmap(void)
+static inline bool cpu_has_vmx_msr_bitmap(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
}
-static inline int cpu_has_vmx_tpr_shadow(void)
+static inline bool cpu_has_vmx_tpr_shadow(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
}
-static inline int vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool vm_need_tpr_shadow(struct kvm *kvm)
{
return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
}
-static inline int cpu_has_secondary_exec_ctrls(void)
+static inline bool cpu_has_secondary_exec_ctrls(void)
{
return vmcs_config.cpu_based_exec_ctrl &
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -303,80 +311,80 @@ static inline bool cpu_has_vmx_flexpriority(void)
static inline bool cpu_has_vmx_ept_execute_only(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
+ return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
}
static inline bool cpu_has_vmx_eptp_uncacheable(void)
{
- return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
+ return vmx_capability.ept & VMX_EPTP_UC_BIT;
}
static inline bool cpu_has_vmx_eptp_writeback(void)
{
- return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
+ return vmx_capability.ept & VMX_EPTP_WB_BIT;
}
static inline bool cpu_has_vmx_ept_2m_page(void)
{
- return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
+ return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
}
static inline bool cpu_has_vmx_ept_1g_page(void)
{
- return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
+ return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
}
-static inline int cpu_has_vmx_invept_individual_addr(void)
+static inline bool cpu_has_vmx_invept_individual_addr(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
+ return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
}
-static inline int cpu_has_vmx_invept_context(void)
+static inline bool cpu_has_vmx_invept_context(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
+ return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
}
-static inline int cpu_has_vmx_invept_global(void)
+static inline bool cpu_has_vmx_invept_global(void)
{
- return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
+ return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
}
-static inline int cpu_has_vmx_ept(void)
+static inline bool cpu_has_vmx_ept(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_ENABLE_EPT;
}
-static inline int cpu_has_vmx_unrestricted_guest(void)
+static inline bool cpu_has_vmx_unrestricted_guest(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_UNRESTRICTED_GUEST;
}
-static inline int cpu_has_vmx_ple(void)
+static inline bool cpu_has_vmx_ple(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_PAUSE_LOOP_EXITING;
}
-static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
+static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
{
return flexpriority_enabled && irqchip_in_kernel(kvm);
}
-static inline int cpu_has_vmx_vpid(void)
+static inline bool cpu_has_vmx_vpid(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_ENABLE_VPID;
}
-static inline int cpu_has_vmx_rdtscp(void)
+static inline bool cpu_has_vmx_rdtscp(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_RDTSCP;
}
-static inline int cpu_has_virtual_nmis(void)
+static inline bool cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}
@@ -595,16 +603,56 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}
+static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+{
+ unsigned i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ for (i = 0; i < m->nr; ++i)
+ if (m->guest[i].index == msr)
+ break;
+
+ if (i == m->nr)
+ return;
+ --m->nr;
+ m->guest[i] = m->guest[m->nr];
+ m->host[i] = m->host[m->nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+}
+
+static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+{
+ unsigned i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ for (i = 0; i < m->nr; ++i)
+ if (m->guest[i].index == msr)
+ break;
+
+ if (i == m->nr) {
+ ++m->nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+ }
+
+ m->guest[i].index = msr;
+ m->guest[i].value = guest_val;
+ m->host[i].index = msr;
+ m->host[i].value = host_val;
+}
+
static void reload_tss(void)
{
/*
* VT restores TR but not its size. Useless.
*/
- struct descriptor_table gdt;
+ struct desc_ptr gdt;
struct desc_struct *descs;
- kvm_get_gdt(&gdt);
- descs = (void *)gdt.base;
+ native_store_gdt(&gdt);
+ descs = (void *)gdt.address;
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
load_TR_desc();
}
@@ -631,9 +679,57 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+
+ clear_atomic_switch_msr(vmx, MSR_EFER);
+ /* On ept, can't emulate nx, and must switch nx atomically */
+ if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
+ guest_efer = vmx->vcpu.arch.efer;
+ if (!(guest_efer & EFER_LMA))
+ guest_efer &= ~EFER_LME;
+ add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
+ return false;
+ }
+
return true;
}
+static unsigned long segment_base(u16 selector)
+{
+ struct desc_ptr gdt;
+ struct desc_struct *d;
+ unsigned long table_base;
+ unsigned long v;
+
+ if (!(selector & ~3))
+ return 0;
+
+ native_store_gdt(&gdt);
+ table_base = gdt.address;
+
+ if (selector & 4) { /* from ldt */
+ u16 ldt_selector = kvm_read_ldt();
+
+ if (!(ldt_selector & ~3))
+ return 0;
+
+ table_base = segment_base(ldt_selector);
+ }
+ d = (struct desc_struct *)(table_base + (selector & ~7));
+ v = get_desc_base(d);
+#ifdef CONFIG_X86_64
+ if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+ v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
+#endif
+ return v;
+}
+
+static inline unsigned long kvm_read_tr_base(void)
+{
+ u16 tr;
+ asm("str %0" : "=g"(tr));
+ return segment_base(tr);
+}
+
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -758,7 +854,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
if (vcpu->cpu != cpu) {
- struct descriptor_table dt;
+ struct desc_ptr dt;
unsigned long sysenter_esp;
vcpu->cpu = cpu;
@@ -767,8 +863,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* processors.
*/
vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
- kvm_get_gdt(&dt);
- vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
+ native_store_gdt(&dt);
+ vmcs_writel(HOST_GDTR_BASE, dt.address); /* 22.2.4 */
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -846,9 +942,9 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
int ret = 0;
if (interruptibility & GUEST_INTR_STATE_STI)
- ret |= X86_SHADOW_INT_STI;
+ ret |= KVM_X86_SHADOW_INT_STI;
if (interruptibility & GUEST_INTR_STATE_MOV_SS)
- ret |= X86_SHADOW_INT_MOV_SS;
+ ret |= KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask;
}
@@ -860,9 +956,9 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
- if (mask & X86_SHADOW_INT_MOV_SS)
+ if (mask & KVM_X86_SHADOW_INT_MOV_SS)
interruptibility |= GUEST_INTR_STATE_MOV_SS;
- if (mask & X86_SHADOW_INT_STI)
+ else if (mask & KVM_X86_SHADOW_INT_STI)
interruptibility |= GUEST_INTR_STATE_STI;
if ((interruptibility != interruptibility_old))
@@ -882,7 +978,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
}
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
- bool has_error_code, u32 error_code)
+ bool has_error_code, u32 error_code,
+ bool reinject)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
@@ -1176,9 +1273,16 @@ static __init int vmx_disabled_by_bios(void)
u64 msr;
rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
- return (msr & (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
- == FEATURE_CONTROL_LOCKED;
+ if (msr & FEATURE_CONTROL_LOCKED) {
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
+ && tboot_enabled())
+ return 1;
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+ && !tboot_enabled())
+ return 1;
+ }
+
+ return 0;
/* locked but not enabled */
}
@@ -1186,21 +1290,23 @@ static int hardware_enable(void *garbage)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
- u64 old;
+ u64 old, test_bits;
if (read_cr4() & X86_CR4_VMXE)
return -EBUSY;
INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
- if ((old & (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
- != (FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED))
+
+ test_bits = FEATURE_CONTROL_LOCKED;
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ if (tboot_enabled())
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
+
+ if ((old & test_bits) != test_bits) {
/* enable and lock */
- wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
- FEATURE_CONTROL_LOCKED |
- FEATURE_CONTROL_VMXON_ENABLED);
+ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
+ }
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
@@ -1521,7 +1627,7 @@ static gva_t rmode_tss_base(struct kvm *kvm)
struct kvm_memslots *slots;
gfn_t base_gfn;
- slots = rcu_dereference(kvm->memslots);
+ slots = kvm_memslots(kvm);
base_gfn = kvm->memslots->memslots[0].base_gfn +
kvm->memslots->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
@@ -1649,6 +1755,7 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
& ~VM_ENTRY_IA32E_MODE);
+ vmx_set_efer(vcpu, vcpu->arch.efer);
}
#endif
@@ -1934,28 +2041,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
*l = (ar >> 13) & 1;
}
-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
- dt->base = vmcs_readl(GUEST_IDTR_BASE);
+ dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
+ dt->address = vmcs_readl(GUEST_IDTR_BASE);
}
-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
- vmcs_writel(GUEST_IDTR_BASE, dt->base);
+ vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
+ vmcs_writel(GUEST_IDTR_BASE, dt->address);
}
-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
- dt->base = vmcs_readl(GUEST_GDTR_BASE);
+ dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
+ dt->address = vmcs_readl(GUEST_GDTR_BASE);
}
-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
- vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
- vmcs_writel(GUEST_GDTR_BASE, dt->base);
+ vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
+ vmcs_writel(GUEST_GDTR_BASE, dt->address);
}
static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
@@ -2296,6 +2403,16 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
spin_unlock(&vmx_vpid_lock);
}
+static void free_vpid(struct vcpu_vmx *vmx)
+{
+ if (!enable_vpid)
+ return;
+ spin_lock(&vmx_vpid_lock);
+ if (vmx->vpid != 0)
+ __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+ spin_unlock(&vmx_vpid_lock);
+}
+
static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
{
int f = sizeof(unsigned long);
@@ -2334,7 +2451,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
u32 junk;
u64 host_pat, tsc_this, tsc_base;
unsigned long a;
- struct descriptor_table dt;
+ struct desc_ptr dt;
int i;
unsigned long kvm_vmx_return;
u32 exec_control;
@@ -2415,14 +2532,16 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
- kvm_get_idt(&dt);
- vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
+ native_store_idt(&dt);
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -2703,8 +2822,7 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
return 0;
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
- GUEST_INTR_STATE_NMI));
+ (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
}
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -2948,22 +3066,20 @@ static int handle_io(struct kvm_vcpu *vcpu)
int size, in, string;
unsigned port;
- ++vcpu->stat.io_exits;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
string = (exit_qualification & 16) != 0;
+ in = (exit_qualification & 8) != 0;
- if (string) {
- if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
- return 0;
- return 1;
- }
+ ++vcpu->stat.io_exits;
- size = (exit_qualification & 7) + 1;
- in = (exit_qualification & 8) != 0;
- port = exit_qualification >> 16;
+ if (string || in)
+ return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
+ port = exit_qualification >> 16;
+ size = (exit_qualification & 7) + 1;
skip_emulated_instruction(vcpu);
- return kvm_emulate_pio(vcpu, in, size, port);
+
+ return kvm_fast_pio_out(vcpu, size, port);
}
static void
@@ -3054,19 +3170,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
return 0;
}
-static int check_dr_alias(struct kvm_vcpu *vcpu)
-{
- if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return -1;
- }
- return 0;
-}
-
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
- unsigned long val;
int dr, reg;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
@@ -3101,67 +3207,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
- switch (dr) {
- case 0 ... 3:
- val = vcpu->arch.db[dr];
- break;
- case 4:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- case 6:
- val = vcpu->arch.dr6;
- break;
- case 5:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- default: /* 7 */
- val = vcpu->arch.dr7;
- break;
- }
- kvm_register_write(vcpu, reg, val);
- } else {
- val = vcpu->arch.regs[reg];
- switch (dr) {
- case 0 ... 3:
- vcpu->arch.db[dr] = val;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- vcpu->arch.eff_db[dr] = val;
- break;
- case 4:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- case 6:
- if (val & 0xffffffff00000000ULL) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
- break;
- case 5:
- if (check_dr_alias(vcpu) < 0)
- return 1;
- /* fall through */
- default: /* 7 */
- if (val & 0xffffffff00000000ULL) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
- if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
- vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
- vcpu->arch.switch_db_regs =
- (val & DR7_BP_EN_MASK);
- }
- break;
- }
- }
+ unsigned long val;
+ if (!kvm_get_dr(vcpu, dr, &val))
+ kvm_register_write(vcpu, reg, val);
+ } else
+ kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
skip_emulated_instruction(vcpu);
return 1;
}
+static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ vmcs_writel(GUEST_DR7, val);
+}
+
static int handle_cpuid(struct kvm_vcpu *vcpu)
{
kvm_emulate_cpuid(vcpu);
@@ -3293,6 +3352,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification;
+ bool has_error_code = false;
+ u32 error_code = 0;
u16 tss_selector;
int reason, type, idt_v;
@@ -3315,6 +3376,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
kvm_clear_interrupt_queue(vcpu);
break;
case INTR_TYPE_HARD_EXCEPTION:
+ if (vmx->idt_vectoring_info &
+ VECTORING_INFO_DELIVER_CODE_MASK) {
+ has_error_code = true;
+ error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ }
+ /* fall through */
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
@@ -3329,8 +3397,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
type != INTR_TYPE_NMI_INTR))
skip_emulated_instruction(vcpu);
- if (!kvm_task_switch(vcpu, tss_selector, reason))
+ if (kvm_task_switch(vcpu, tss_selector, reason,
+ has_error_code, error_code) == EMULATE_FAIL) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
return 0;
+ }
/* clear all local breakpoint enable flags */
vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
@@ -3575,7 +3648,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
- trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
+ trace_kvm_exit(exit_reason, vcpu);
/* If guest state is invalid, start emulating */
if (vmx->emulation_required && emulate_invalid_guest_state)
@@ -3660,8 +3733,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
/* We need to handle NMIs before interrupts are enabled */
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
- (exit_intr_info & INTR_INFO_VALID_MASK))
+ (exit_intr_info & INTR_INFO_VALID_MASK)) {
+ kvm_before_handle_nmi(&vmx->vcpu);
asm("int $2");
+ kvm_after_handle_nmi(&vmx->vcpu);
+ }
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
@@ -3921,10 +3997,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- spin_lock(&vmx_vpid_lock);
- if (vmx->vpid != 0)
- __clear_bit(vmx->vpid, vmx_vpid_bitmap);
- spin_unlock(&vmx_vpid_lock);
+ free_vpid(vmx);
vmx_free_vmcs(vcpu);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
@@ -3986,6 +4059,7 @@ free_msrs:
uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
+ free_vpid(vmx);
kmem_cache_free(kvm_vcpu_cache, vmx);
return ERR_PTR(err);
}
@@ -4116,6 +4190,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}
+static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -4152,6 +4230,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
+ .set_dr7 = vmx_set_dr7,
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
@@ -4187,6 +4266,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.cpuid_update = vmx_cpuid_update,
.rdtscp_supported = vmx_rdtscp_supported,
+
+ .set_supported_cpuid = vmx_set_supported_cpuid,
};
static int __init vmx_init(void)
@@ -4234,7 +4315,8 @@ static int __init vmx_init(void)
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
goto out3;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3c4ca98ad27f..05d571f6f196 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -40,8 +40,9 @@
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
#include <linux/slab.h>
+#include <linux/perf_event.h>
#include <trace/events/kvm.h>
-#undef TRACE_INCLUDE_FILE
+
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -223,34 +224,6 @@ static void drop_user_return_notifiers(void *ignore)
kvm_on_user_return(&smsr->urn);
}
-unsigned long segment_base(u16 selector)
-{
- struct descriptor_table gdt;
- struct desc_struct *d;
- unsigned long table_base;
- unsigned long v;
-
- if (selector == 0)
- return 0;
-
- kvm_get_gdt(&gdt);
- table_base = gdt.base;
-
- if (selector & 4) { /* from ldt */
- u16 ldt_selector = kvm_read_ldt();
-
- table_base = segment_base(ldt_selector);
- }
- d = (struct desc_struct *)(table_base + (selector & ~7));
- v = get_desc_base(d);
-#ifdef CONFIG_X86_64
- if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
- v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
-#endif
- return v;
-}
-EXPORT_SYMBOL_GPL(segment_base);
-
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
if (irqchip_in_kernel(vcpu->kvm))
@@ -292,7 +265,8 @@ static int exception_class(int vector)
}
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
- unsigned nr, bool has_error, u32 error_code)
+ unsigned nr, bool has_error, u32 error_code,
+ bool reinject)
{
u32 prev_nr;
int class1, class2;
@@ -303,6 +277,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
vcpu->arch.exception.error_code = error_code;
+ vcpu->arch.exception.reinject = reinject;
return;
}
@@ -331,10 +306,16 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- kvm_multiple_exception(vcpu, nr, false, 0);
+ kvm_multiple_exception(vcpu, nr, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
+void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
+{
+ kvm_multiple_exception(vcpu, nr, false, 0, true);
+}
+EXPORT_SYMBOL_GPL(kvm_requeue_exception);
+
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code)
{
@@ -351,10 +332,16 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- kvm_multiple_exception(vcpu, nr, true, error_code);
+ kvm_multiple_exception(vcpu, nr, true, error_code, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
+void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
+{
+ kvm_multiple_exception(vcpu, nr, true, error_code, true);
+}
+EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
+
/*
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
* a #GP and return false.
@@ -475,7 +462,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
kvm_x86_ops->set_cr0(vcpu, cr0);
- vcpu->arch.cr0 = cr0;
kvm_mmu_reset_context(vcpu);
return;
@@ -484,7 +470,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
- kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
+ kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
@@ -516,7 +502,6 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
}
kvm_x86_ops->set_cr4(vcpu, cr4);
vcpu->arch.cr4 = cr4;
- vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
kvm_mmu_reset_context(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -591,6 +576,80 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
+{
+ switch (dr) {
+ case 0 ... 3:
+ vcpu->arch.db[dr] = val;
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+ vcpu->arch.eff_db[dr] = val;
+ break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ case 6:
+ if (val & 0xffffffff00000000ULL) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
+ break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ default: /* 7 */
+ if (val & 0xffffffff00000000ULL) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+ kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
+ vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_set_dr);
+
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
+{
+ switch (dr) {
+ case 0 ... 3:
+ *val = vcpu->arch.db[dr];
+ break;
+ case 4:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ case 6:
+ *val = vcpu->arch.dr6;
+ break;
+ case 5:
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+ /* fall through */
+ default: /* 7 */
+ *val = vcpu->arch.dr7;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_get_dr);
+
static inline u32 bit(int bitno)
{
return 1 << (bitno & 31);
@@ -605,9 +664,10 @@ static inline u32 bit(int bitno)
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 5
+#define KVM_SAVE_MSRS_BEGIN 7
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+ MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_APIC_ASSIST_PAGE,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
@@ -624,48 +684,42 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE,
};
-static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & efer_reserved_bits) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (efer & efer_reserved_bits)
+ return 1;
if (is_paging(vcpu)
- && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+ return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
+ return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
- if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
- kvm_inject_gp(vcpu, 0);
- return;
- }
+ if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
+ return 1;
}
- kvm_x86_ops->set_efer(vcpu, efer);
-
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
+ kvm_x86_ops->set_efer(vcpu, efer);
+
vcpu->arch.efer = efer;
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
kvm_mmu_reset_context(vcpu);
+
+ return 0;
}
void kvm_enable_efer_bits(u64 mask)
@@ -695,14 +749,22 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
- static int version;
+ int version;
+ int r;
struct pvclock_wall_clock wc;
struct timespec boot;
if (!wall_clock)
return;
- version++;
+ r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
+ if (r)
+ return;
+
+ if (version & 1)
+ ++version; /* first time write, random junk */
+
+ ++version;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
@@ -795,6 +857,8 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
vcpu->hv_clock.system_time = ts.tv_nsec +
(NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
+ vcpu->hv_clock.flags = 0;
+
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
@@ -1086,10 +1150,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
case MSR_EFER:
- set_efer(vcpu, data);
- break;
+ return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
+ data &= ~(u64)0x100; /* ignore ignne emulation enable */
if (data != 0) {
pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
@@ -1132,10 +1196,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
+ case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
if (vcpu->arch.time_page) {
kvm_release_page_dirty(vcpu->arch.time_page);
@@ -1407,9 +1473,11 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = vcpu->arch.efer;
break;
case MSR_KVM_WALL_CLOCK:
+ case MSR_KVM_WALL_CLOCK_NEW:
data = vcpu->kvm->arch.wall_clock;
break;
case MSR_KVM_SYSTEM_TIME:
+ case MSR_KVM_SYSTEM_TIME_NEW:
data = vcpu->arch.time;
break;
case MSR_IA32_P5_MC_ADDR:
@@ -1548,6 +1616,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
+ case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
r = 1;
break;
@@ -1712,6 +1781,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
if (copy_from_user(cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out_free;
+ vcpu_load(vcpu);
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -1729,6 +1799,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
r = 0;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ vcpu_put(vcpu);
out_free:
vfree(cpuid_entries);
@@ -1749,9 +1820,11 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
goto out;
+ vcpu_load(vcpu);
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
+ vcpu_put(vcpu);
return 0;
out:
@@ -1764,6 +1837,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
{
int r;
+ vcpu_load(vcpu);
r = -E2BIG;
if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out;
@@ -1775,6 +1849,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
out:
cpuid->nent = vcpu->arch.cpuid_nent;
+ vcpu_put(vcpu);
return r;
}
@@ -1905,6 +1980,24 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
+ case KVM_CPUID_SIGNATURE: {
+ char signature[12] = "KVMKVMKVM\0\0";
+ u32 *sigptr = (u32 *)signature;
+ entry->eax = 0;
+ entry->ebx = sigptr[0];
+ entry->ecx = sigptr[1];
+ entry->edx = sigptr[2];
+ break;
+ }
+ case KVM_CPUID_FEATURES:
+ entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
+ (1 << KVM_FEATURE_NOP_IO_DELAY) |
+ (1 << KVM_FEATURE_CLOCKSOURCE2) |
+ (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+ entry->ebx = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ break;
case 0x80000000:
entry->eax = min(entry->eax, 0x8000001a);
break;
@@ -1913,6 +2006,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_supported_word6_x86_features;
break;
}
+
+ kvm_x86_ops->set_supported_cpuid(function, entry);
+
put_cpu();
}
@@ -1948,6 +2044,23 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
do_cpuid_ent(&cpuid_entries[nent], func, 0,
&nent, cpuid->nent);
+
+
+
+ r = -E2BIG;
+ if (nent >= cpuid->nent)
+ goto out_free;
+
+ do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
+ cpuid->nent);
+
+ r = -E2BIG;
+ if (nent >= cpuid->nent)
+ goto out_free;
+
+ do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
+ cpuid->nent);
+
r = -E2BIG;
if (nent >= cpuid->nent)
goto out_free;
@@ -2027,6 +2140,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
int r;
unsigned bank_num = mcg_cap & 0xff, bank;
+ vcpu_load(vcpu);
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
@@ -2041,6 +2155,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
out:
+ vcpu_put(vcpu);
return r;
}
@@ -2100,14 +2215,20 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
{
vcpu_load(vcpu);
- events->exception.injected = vcpu->arch.exception.pending;
+ events->exception.injected =
+ vcpu->arch.exception.pending &&
+ !kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.error_code = vcpu->arch.exception.error_code;
- events->interrupt.injected = vcpu->arch.interrupt.pending;
+ events->interrupt.injected =
+ vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr;
- events->interrupt.soft = vcpu->arch.interrupt.soft;
+ events->interrupt.soft = 0;
+ events->interrupt.shadow =
+ kvm_x86_ops->get_interrupt_shadow(vcpu,
+ KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending;
@@ -2116,7 +2237,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
- | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
+ | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+ | KVM_VCPUEVENT_VALID_SHADOW);
vcpu_put(vcpu);
}
@@ -2125,7 +2247,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
- | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
+ | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+ | KVM_VCPUEVENT_VALID_SHADOW))
return -EINVAL;
vcpu_load(vcpu);
@@ -2140,6 +2263,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.soft = events->interrupt.soft;
if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
kvm_pic_clear_isr_ack(vcpu->kvm);
+ if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
+ kvm_x86_ops->set_interrupt_shadow(vcpu,
+ events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected;
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
@@ -2154,6 +2280,36 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return 0;
}
+static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
+ struct kvm_debugregs *dbgregs)
+{
+ vcpu_load(vcpu);
+
+ memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+ dbgregs->dr6 = vcpu->arch.dr6;
+ dbgregs->dr7 = vcpu->arch.dr7;
+ dbgregs->flags = 0;
+
+ vcpu_put(vcpu);
+}
+
+static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ struct kvm_debugregs *dbgregs)
+{
+ if (dbgregs->flags)
+ return -EINVAL;
+
+ vcpu_load(vcpu);
+
+ memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ vcpu->arch.dr6 = dbgregs->dr6;
+ vcpu->arch.dr7 = dbgregs->dr7;
+
+ vcpu_put(vcpu);
+
+ return 0;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2308,7 +2464,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&mce, argp, sizeof mce))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
+ vcpu_put(vcpu);
break;
}
case KVM_GET_VCPU_EVENTS: {
@@ -2332,6 +2490,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
break;
}
+ case KVM_GET_DEBUGREGS: {
+ struct kvm_debugregs dbgregs;
+
+ kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, &dbgregs,
+ sizeof(struct kvm_debugregs)))
+ break;
+ r = 0;
+ break;
+ }
+ case KVM_SET_DEBUGREGS: {
+ struct kvm_debugregs dbgregs;
+
+ r = -EFAULT;
+ if (copy_from_user(&dbgregs, argp,
+ sizeof(struct kvm_debugregs)))
+ break;
+
+ r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -2385,7 +2566,7 @@ gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
struct kvm_mem_alias *alias;
struct kvm_mem_aliases *aliases;
- aliases = rcu_dereference(kvm->arch.aliases);
+ aliases = kvm_aliases(kvm);
for (i = 0; i < aliases->naliases; ++i) {
alias = &aliases->aliases[i];
@@ -2404,7 +2585,7 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
struct kvm_mem_alias *alias;
struct kvm_mem_aliases *aliases;
- aliases = rcu_dereference(kvm->arch.aliases);
+ aliases = kvm_aliases(kvm);
for (i = 0; i < aliases->naliases; ++i) {
alias = &aliases->aliases[i];
@@ -2799,11 +2980,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
+ r = -ENXIO;
if (irqchip_in_kernel(kvm)) {
__s32 status;
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
if (ioctl == KVM_IRQ_LINE_STATUS) {
+ r = -EFAULT;
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
sizeof irq_event))
@@ -3019,6 +3202,18 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}
+static void kvm_set_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ kvm_x86_ops->set_segment(vcpu, var, seg);
+}
+
+void kvm_get_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ kvm_x86_ops->get_segment(vcpu, var, seg);
+}
+
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
@@ -3099,14 +3294,17 @@ static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
}
-static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
- struct kvm_vcpu *vcpu, u32 *error)
+static int kvm_write_guest_virt_system(gva_t addr, void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu,
+ u32 *error)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
+ gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
+ PFERR_WRITE_MASK, error);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -3129,7 +3327,6 @@ out:
return r;
}
-
static int emulator_read_emulated(unsigned long addr,
void *val,
unsigned int bytes,
@@ -3232,9 +3429,9 @@ mmio:
}
int emulator_write_emulated(unsigned long addr,
- const void *val,
- unsigned int bytes,
- struct kvm_vcpu *vcpu)
+ const void *val,
+ unsigned int bytes,
+ struct kvm_vcpu *vcpu)
{
/* Crossing a page boundary? */
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
@@ -3252,45 +3449,150 @@ int emulator_write_emulated(unsigned long addr,
}
EXPORT_SYMBOL_GPL(emulator_write_emulated);
+#define CMPXCHG_TYPE(t, ptr, old, new) \
+ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
+
+#ifdef CONFIG_X86_64
+# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
+#else
+# define CMPXCHG64(ptr, old, new) \
+ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
+#endif
+
static int emulator_cmpxchg_emulated(unsigned long addr,
const void *old,
const void *new,
unsigned int bytes,
struct kvm_vcpu *vcpu)
{
- printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
-#ifndef CONFIG_X86_64
- /* guests cmpxchg8b have to be emulated atomically */
- if (bytes == 8) {
- gpa_t gpa;
- struct page *page;
- char *kaddr;
- u64 val;
+ gpa_t gpa;
+ struct page *page;
+ char *kaddr;
+ bool exchanged;
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
+ /* guests cmpxchg8b have to be emulated atomically */
+ if (bytes > 8 || (bytes & (bytes - 1)))
+ goto emul_write;
- if (gpa == UNMAPPED_GVA ||
- (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
- goto emul_write;
+ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
- if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
- goto emul_write;
+ if (gpa == UNMAPPED_GVA ||
+ (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ goto emul_write;
- val = *(u64 *)new;
+ if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
+ goto emul_write;
- page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
- kaddr = kmap_atomic(page, KM_USER0);
- set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
- kunmap_atomic(kaddr, KM_USER0);
- kvm_release_page_dirty(page);
+ kaddr = kmap_atomic(page, KM_USER0);
+ kaddr += offset_in_page(gpa);
+ switch (bytes) {
+ case 1:
+ exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
+ break;
+ case 2:
+ exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
+ break;
+ case 4:
+ exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
+ break;
+ case 8:
+ exchanged = CMPXCHG64(kaddr, old, new);
+ break;
+ default:
+ BUG();
}
+ kunmap_atomic(kaddr, KM_USER0);
+ kvm_release_page_dirty(page);
+
+ if (!exchanged)
+ return X86EMUL_CMPXCHG_FAILED;
+
+ kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
+
+ return X86EMUL_CONTINUE;
+
emul_write:
-#endif
+ printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
return emulator_write_emulated(addr, new, bytes, vcpu);
}
+static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
+{
+ /* TODO: String I/O for in kernel device */
+ int r;
+
+ if (vcpu->arch.pio.in)
+ r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
+ vcpu->arch.pio.size, pd);
+ else
+ r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ vcpu->arch.pio.port, vcpu->arch.pio.size,
+ pd);
+ return r;
+}
+
+
+static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
+ unsigned int count, struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.pio.count)
+ goto data_avail;
+
+ trace_kvm_pio(1, port, size, 1);
+
+ vcpu->arch.pio.port = port;
+ vcpu->arch.pio.in = 1;
+ vcpu->arch.pio.count = count;
+ vcpu->arch.pio.size = size;
+
+ if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+ data_avail:
+ memcpy(val, vcpu->arch.pio_data, size * count);
+ vcpu->arch.pio.count = 0;
+ return 1;
+ }
+
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = KVM_EXIT_IO_IN;
+ vcpu->run->io.size = size;
+ vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+ vcpu->run->io.count = count;
+ vcpu->run->io.port = port;
+
+ return 0;
+}
+
+static int emulator_pio_out_emulated(int size, unsigned short port,
+ const void *val, unsigned int count,
+ struct kvm_vcpu *vcpu)
+{
+ trace_kvm_pio(0, port, size, 1);
+
+ vcpu->arch.pio.port = port;
+ vcpu->arch.pio.in = 0;
+ vcpu->arch.pio.count = count;
+ vcpu->arch.pio.size = size;
+
+ memcpy(vcpu->arch.pio_data, val, size * count);
+
+ if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+ vcpu->arch.pio.count = 0;
+ return 1;
+ }
+
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = KVM_EXIT_IO_OUT;
+ vcpu->run->io.size = size;
+ vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+ vcpu->run->io.count = count;
+ vcpu->run->io.port = port;
+
+ return 0;
+}
+
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
return kvm_x86_ops->get_segment_base(vcpu, seg);
@@ -3311,14 +3613,14 @@ int emulate_clts(struct kvm_vcpu *vcpu)
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
- return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
+ return kvm_get_dr(ctxt->vcpu, dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
{
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
- return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
+ return kvm_set_dr(ctxt->vcpu, dr, value & mask);
}
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3339,12 +3641,167 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
}
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
+static u64 mk_cr_64(u64 curr_cr, u32 new_val)
+{
+ return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
+}
+
+static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
+{
+ unsigned long value;
+
+ switch (cr) {
+ case 0:
+ value = kvm_read_cr0(vcpu);
+ break;
+ case 2:
+ value = vcpu->arch.cr2;
+ break;
+ case 3:
+ value = vcpu->arch.cr3;
+ break;
+ case 4:
+ value = kvm_read_cr4(vcpu);
+ break;
+ case 8:
+ value = kvm_get_cr8(vcpu);
+ break;
+ default:
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+ return 0;
+ }
+
+ return value;
+}
+
+static void emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
+{
+ switch (cr) {
+ case 0:
+ kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
+ break;
+ case 2:
+ vcpu->arch.cr2 = val;
+ break;
+ case 3:
+ kvm_set_cr3(vcpu, val);
+ break;
+ case 4:
+ kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
+ break;
+ case 8:
+ kvm_set_cr8(vcpu, val & 0xfUL);
+ break;
+ default:
+ vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+ }
+}
+
+static int emulator_get_cpl(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->get_cpl(vcpu);
+}
+
+static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->get_gdt(vcpu, dt);
+}
+
+static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment var;
+
+ kvm_get_segment(vcpu, &var, seg);
+
+ if (var.unusable)
+ return false;
+
+ if (var.g)
+ var.limit >>= 12;
+ set_desc_limit(desc, var.limit);
+ set_desc_base(desc, (unsigned long)var.base);
+ desc->type = var.type;
+ desc->s = var.s;
+ desc->dpl = var.dpl;
+ desc->p = var.present;
+ desc->avl = var.avl;
+ desc->l = var.l;
+ desc->d = var.db;
+ desc->g = var.g;
+
+ return true;
+}
+
+static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment var;
+
+ /* needed to preserve selector */
+ kvm_get_segment(vcpu, &var, seg);
+
+ var.base = get_desc_base(desc);
+ var.limit = get_desc_limit(desc);
+ if (desc->g)
+ var.limit = (var.limit << 12) | 0xfff;
+ var.type = desc->type;
+ var.present = desc->p;
+ var.dpl = desc->dpl;
+ var.db = desc->d;
+ var.s = desc->s;
+ var.l = desc->l;
+ var.g = desc->g;
+ var.avl = desc->avl;
+ var.present = desc->p;
+ var.unusable = !var.present;
+ var.padding = 0;
+
+ kvm_set_segment(vcpu, &var, seg);
+ return;
+}
+
+static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment kvm_seg;
+
+ kvm_get_segment(vcpu, &kvm_seg, seg);
+ return kvm_seg.selector;
+}
+
+static void emulator_set_segment_selector(u16 sel, int seg,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment kvm_seg;
+
+ kvm_get_segment(vcpu, &kvm_seg, seg);
+ kvm_seg.selector = sel;
+ kvm_set_segment(vcpu, &kvm_seg, seg);
+}
+
+static void emulator_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+ kvm_x86_ops->set_rflags(vcpu, rflags);
+}
+
static struct x86_emulate_ops emulate_ops = {
.read_std = kvm_read_guest_virt_system,
+ .write_std = kvm_write_guest_virt_system,
.fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
.cmpxchg_emulated = emulator_cmpxchg_emulated,
+ .pio_in_emulated = emulator_pio_in_emulated,
+ .pio_out_emulated = emulator_pio_out_emulated,
+ .get_cached_descriptor = emulator_get_cached_descriptor,
+ .set_cached_descriptor = emulator_set_cached_descriptor,
+ .get_segment_selector = emulator_get_segment_selector,
+ .set_segment_selector = emulator_set_segment_selector,
+ .get_gdt = emulator_get_gdt,
+ .get_cr = emulator_get_cr,
+ .set_cr = emulator_set_cr,
+ .cpl = emulator_get_cpl,
+ .set_rflags = emulator_set_rflags,
};
static void cache_all_regs(struct kvm_vcpu *vcpu)
@@ -3375,14 +3832,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
cache_all_regs(vcpu);
vcpu->mmio_is_write = 0;
- vcpu->arch.pio.string = 0;
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
int cs_db, cs_l;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
vcpu->arch.emulate_ctxt.vcpu = vcpu;
- vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
+ vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+ vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
vcpu->arch.emulate_ctxt.mode =
(!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
@@ -3391,6 +3848,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
+ trace_kvm_emulate_insn_start(vcpu);
/* Only allow emulation of specific instructions on #UD
* (namely VMMCALL, sysenter, sysexit, syscall)*/
@@ -3423,6 +3881,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
++vcpu->stat.insn_emulation;
if (r) {
++vcpu->stat.insn_emulation_fail;
+ trace_kvm_emulate_insn_failed(vcpu);
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
return EMULATE_DONE;
return EMULATE_FAIL;
@@ -3434,16 +3893,20 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
return EMULATE_DONE;
}
+restart:
r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
if (r == 0)
kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
- if (vcpu->arch.pio.string)
+ if (vcpu->arch.pio.count) {
+ if (!vcpu->arch.pio.in)
+ vcpu->arch.pio.count = 0;
return EMULATE_DO_MMIO;
+ }
- if ((r || vcpu->mmio_is_write) && run) {
+ if (r || vcpu->mmio_is_write) {
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = vcpu->mmio_phys_addr;
memcpy(run->mmio.data, vcpu->mmio_data, 8);
@@ -3453,222 +3916,41 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
if (r) {
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
- return EMULATE_DONE;
+ goto done;
if (!vcpu->mmio_needed) {
+ ++vcpu->stat.insn_emulation_fail;
+ trace_kvm_emulate_insn_failed(vcpu);
kvm_report_emulation_failure(vcpu, "mmio");
return EMULATE_FAIL;
}
return EMULATE_DO_MMIO;
}
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
-
if (vcpu->mmio_is_write) {
vcpu->mmio_needed = 0;
return EMULATE_DO_MMIO;
}
- return EMULATE_DONE;
-}
-EXPORT_SYMBOL_GPL(emulate_instruction);
-
-static int pio_copy_data(struct kvm_vcpu *vcpu)
-{
- void *p = vcpu->arch.pio_data;
- gva_t q = vcpu->arch.pio.guest_gva;
- unsigned bytes;
- int ret;
- u32 error_code;
-
- bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
- if (vcpu->arch.pio.in)
- ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
- else
- ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
-
- if (ret == X86EMUL_PROPAGATE_FAULT)
- kvm_inject_page_fault(vcpu, q, error_code);
-
- return ret;
-}
-
-int complete_pio(struct kvm_vcpu *vcpu)
-{
- struct kvm_pio_request *io = &vcpu->arch.pio;
- long delta;
- int r;
- unsigned long val;
-
- if (!io->string) {
- if (io->in) {
- val = kvm_register_read(vcpu, VCPU_REGS_RAX);
- memcpy(&val, vcpu->arch.pio_data, io->size);
- kvm_register_write(vcpu, VCPU_REGS_RAX, val);
- }
- } else {
- if (io->in) {
- r = pio_copy_data(vcpu);
- if (r)
- goto out;
- }
-
- delta = 1;
- if (io->rep) {
- delta *= io->cur_count;
- /*
- * The size of the register should really depend on
- * current address size.
- */
- val = kvm_register_read(vcpu, VCPU_REGS_RCX);
- val -= delta;
- kvm_register_write(vcpu, VCPU_REGS_RCX, val);
- }
- if (io->down)
- delta = -delta;
- delta *= io->size;
- if (io->in) {
- val = kvm_register_read(vcpu, VCPU_REGS_RDI);
- val += delta;
- kvm_register_write(vcpu, VCPU_REGS_RDI, val);
- } else {
- val = kvm_register_read(vcpu, VCPU_REGS_RSI);
- val += delta;
- kvm_register_write(vcpu, VCPU_REGS_RSI, val);
- }
- }
-out:
- io->count -= io->cur_count;
- io->cur_count = 0;
-
- return 0;
-}
-
-static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
-{
- /* TODO: String I/O for in kernel device */
- int r;
-
- if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
- vcpu->arch.pio.size, pd);
- else
- r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
- vcpu->arch.pio.port, vcpu->arch.pio.size,
- pd);
- return r;
-}
-
-static int pio_string_write(struct kvm_vcpu *vcpu)
-{
- struct kvm_pio_request *io = &vcpu->arch.pio;
- void *pd = vcpu->arch.pio_data;
- int i, r = 0;
-
- for (i = 0; i < io->cur_count; i++) {
- if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
- io->port, io->size, pd)) {
- r = -EOPNOTSUPP;
- break;
- }
- pd += io->size;
- }
- return r;
-}
-
-int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
-{
- unsigned long val;
+done:
+ if (vcpu->arch.exception.pending)
+ vcpu->arch.emulate_ctxt.restart = false;
- trace_kvm_pio(!in, port, size, 1);
+ if (vcpu->arch.emulate_ctxt.restart)
+ goto restart;
- vcpu->run->exit_reason = KVM_EXIT_IO;
- vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
- vcpu->run->io.size = vcpu->arch.pio.size = size;
- vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
- vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
- vcpu->run->io.port = vcpu->arch.pio.port = port;
- vcpu->arch.pio.in = in;
- vcpu->arch.pio.string = 0;
- vcpu->arch.pio.down = 0;
- vcpu->arch.pio.rep = 0;
-
- if (!vcpu->arch.pio.in) {
- val = kvm_register_read(vcpu, VCPU_REGS_RAX);
- memcpy(vcpu->arch.pio_data, &val, 4);
- }
-
- if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
- complete_pio(vcpu);
- return 1;
- }
- return 0;
+ return EMULATE_DONE;
}
-EXPORT_SYMBOL_GPL(kvm_emulate_pio);
+EXPORT_SYMBOL_GPL(emulate_instruction);
-int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
- int size, unsigned long count, int down,
- gva_t address, int rep, unsigned port)
+int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
{
- unsigned now, in_page;
- int ret = 0;
-
- trace_kvm_pio(!in, port, size, count);
-
- vcpu->run->exit_reason = KVM_EXIT_IO;
- vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
- vcpu->run->io.size = vcpu->arch.pio.size = size;
- vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
- vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
- vcpu->run->io.port = vcpu->arch.pio.port = port;
- vcpu->arch.pio.in = in;
- vcpu->arch.pio.string = 1;
- vcpu->arch.pio.down = down;
- vcpu->arch.pio.rep = rep;
-
- if (!count) {
- kvm_x86_ops->skip_emulated_instruction(vcpu);
- return 1;
- }
-
- if (!down)
- in_page = PAGE_SIZE - offset_in_page(address);
- else
- in_page = offset_in_page(address) + size;
- now = min(count, (unsigned long)in_page / size);
- if (!now)
- now = 1;
- if (down) {
- /*
- * String I/O in reverse. Yuck. Kill the guest, fix later.
- */
- pr_unimpl(vcpu, "guest string pio down\n");
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- vcpu->run->io.count = now;
- vcpu->arch.pio.cur_count = now;
-
- if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
- kvm_x86_ops->skip_emulated_instruction(vcpu);
-
- vcpu->arch.pio.guest_gva = address;
-
- if (!vcpu->arch.pio.in) {
- /* string PIO write */
- ret = pio_copy_data(vcpu);
- if (ret == X86EMUL_PROPAGATE_FAULT)
- return 1;
- if (ret == 0 && !pio_string_write(vcpu)) {
- complete_pio(vcpu);
- if (vcpu->arch.pio.count == 0)
- ret = 1;
- }
- }
- /* no string PIO read support yet */
-
+ unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
+ /* do not return to emulator after return from userspace */
+ vcpu->arch.pio.count = 0;
return ret;
}
-EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
+EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
static void bounce_off(void *info)
{
@@ -3743,6 +4025,51 @@ static void kvm_timer_init(void)
}
}
+static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
+
+static int kvm_is_in_guest(void)
+{
+ return percpu_read(current_vcpu) != NULL;
+}
+
+static int kvm_is_user_mode(void)
+{
+ int user_mode = 3;
+
+ if (percpu_read(current_vcpu))
+ user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
+
+ return user_mode != 0;
+}
+
+static unsigned long kvm_get_guest_ip(void)
+{
+ unsigned long ip = 0;
+
+ if (percpu_read(current_vcpu))
+ ip = kvm_rip_read(percpu_read(current_vcpu));
+
+ return ip;
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+ .is_in_guest = kvm_is_in_guest,
+ .is_user_mode = kvm_is_user_mode,
+ .get_guest_ip = kvm_get_guest_ip,
+};
+
+void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ percpu_write(current_vcpu, vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
+
+void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ percpu_write(current_vcpu, NULL);
+}
+EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
+
int kvm_arch_init(void *opaque)
{
int r;
@@ -3779,6 +4106,8 @@ int kvm_arch_init(void *opaque)
kvm_timer_init();
+ perf_register_guest_info_callbacks(&kvm_guest_cbs);
+
return 0;
out:
@@ -3787,6 +4116,8 @@ out:
void kvm_arch_exit(void)
{
+ perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -3942,85 +4273,20 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
return emulator_write_emulated(rip, instruction, 3, vcpu);
}
-static u64 mk_cr_64(u64 curr_cr, u32 new_val)
-{
- return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
-}
-
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
{
- struct descriptor_table dt = { limit, base };
+ struct desc_ptr dt = { limit, base };
kvm_x86_ops->set_gdt(vcpu, &dt);
}
void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
{
- struct descriptor_table dt = { limit, base };
+ struct desc_ptr dt = { limit, base };
kvm_x86_ops->set_idt(vcpu, &dt);
}
-void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
- unsigned long *rflags)
-{
- kvm_lmsw(vcpu, msw);
- *rflags = kvm_get_rflags(vcpu);
-}
-
-unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
-{
- unsigned long value;
-
- switch (cr) {
- case 0:
- value = kvm_read_cr0(vcpu);
- break;
- case 2:
- value = vcpu->arch.cr2;
- break;
- case 3:
- value = vcpu->arch.cr3;
- break;
- case 4:
- value = kvm_read_cr4(vcpu);
- break;
- case 8:
- value = kvm_get_cr8(vcpu);
- break;
- default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
- return 0;
- }
-
- return value;
-}
-
-void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
- unsigned long *rflags)
-{
- switch (cr) {
- case 0:
- kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
- *rflags = kvm_get_rflags(vcpu);
- break;
- case 2:
- vcpu->arch.cr2 = val;
- break;
- case 3:
- kvm_set_cr3(vcpu, val);
- break;
- case 4:
- kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
- break;
- case 8:
- kvm_set_cr8(vcpu, val & 0xfUL);
- break;
- default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
- }
-}
-
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
@@ -4084,9 +4350,13 @@ int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
+ best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+ if (!best || best->eax < 0x80000008)
+ goto not_found;
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
if (best)
return best->eax & 0xff;
+not_found:
return 36;
}
@@ -4200,9 +4470,13 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
{
/* try to reinject previous events if any */
if (vcpu->arch.exception.pending) {
+ trace_kvm_inj_exception(vcpu->arch.exception.nr,
+ vcpu->arch.exception.has_error_code,
+ vcpu->arch.exception.error_code);
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
- vcpu->arch.exception.error_code);
+ vcpu->arch.exception.error_code,
+ vcpu->arch.exception.reinject);
return;
}
@@ -4432,7 +4706,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- post_kvm_run_save(vcpu);
vapic_exit(vcpu);
@@ -4460,26 +4733,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (!irqchip_in_kernel(vcpu->kvm))
kvm_set_cr8(vcpu, kvm_run->cr8);
- if (vcpu->arch.pio.cur_count) {
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = complete_pio(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
- if (r)
- goto out;
- }
- if (vcpu->mmio_needed) {
- memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
- vcpu->mmio_read_completed = 1;
- vcpu->mmio_needed = 0;
-
+ if (vcpu->arch.pio.count || vcpu->mmio_needed ||
+ vcpu->arch.emulate_ctxt.restart) {
+ if (vcpu->mmio_needed) {
+ memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+ vcpu->mmio_read_completed = 1;
+ vcpu->mmio_needed = 0;
+ }
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
- EMULTYPE_NO_DECODE);
+ r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r == EMULATE_DO_MMIO) {
- /*
- * Read-modify-write. Back to userspace.
- */
r = 0;
goto out;
}
@@ -4491,6 +4755,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
r = __vcpu_run(vcpu);
out:
+ post_kvm_run_save(vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -4562,12 +4827,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
-void kvm_get_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
-{
- kvm_x86_ops->get_segment(vcpu, var, seg);
-}
-
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
struct kvm_segment cs;
@@ -4581,7 +4840,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
- struct descriptor_table dt;
+ struct desc_ptr dt;
vcpu_load(vcpu);
@@ -4596,11 +4855,11 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
kvm_x86_ops->get_idt(vcpu, &dt);
- sregs->idt.limit = dt.limit;
- sregs->idt.base = dt.base;
+ sregs->idt.limit = dt.size;
+ sregs->idt.base = dt.address;
kvm_x86_ops->get_gdt(vcpu, &dt);
- sregs->gdt.limit = dt.limit;
- sregs->gdt.base = dt.base;
+ sregs->gdt.limit = dt.size;
+ sregs->gdt.base = dt.address;
sregs->cr0 = kvm_read_cr0(vcpu);
sregs->cr2 = vcpu->arch.cr2;
@@ -4639,563 +4898,33 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return 0;
}
-static void kvm_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
-{
- kvm_x86_ops->set_segment(vcpu, var, seg);
-}
-
-static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
- struct kvm_segment *kvm_desct)
-{
- kvm_desct->base = get_desc_base(seg_desc);
- kvm_desct->limit = get_desc_limit(seg_desc);
- if (seg_desc->g) {
- kvm_desct->limit <<= 12;
- kvm_desct->limit |= 0xfff;
- }
- kvm_desct->selector = selector;
- kvm_desct->type = seg_desc->type;
- kvm_desct->present = seg_desc->p;
- kvm_desct->dpl = seg_desc->dpl;
- kvm_desct->db = seg_desc->d;
- kvm_desct->s = seg_desc->s;
- kvm_desct->l = seg_desc->l;
- kvm_desct->g = seg_desc->g;
- kvm_desct->avl = seg_desc->avl;
- if (!selector)
- kvm_desct->unusable = 1;
- else
- kvm_desct->unusable = 0;
- kvm_desct->padding = 0;
-}
-
-static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
- u16 selector,
- struct descriptor_table *dtable)
-{
- if (selector & 1 << 2) {
- struct kvm_segment kvm_seg;
-
- kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
-
- if (kvm_seg.unusable)
- dtable->limit = 0;
- else
- dtable->limit = kvm_seg.limit;
- dtable->base = kvm_seg.base;
- }
- else
- kvm_x86_ops->get_gdt(vcpu, dtable);
-}
-
-/* allowed just for 8 bytes segments */
-static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- struct desc_struct *seg_desc)
-{
- struct descriptor_table dtable;
- u16 index = selector >> 3;
- int ret;
- u32 err;
- gva_t addr;
-
- get_segment_descriptor_dtable(vcpu, selector, &dtable);
-
- if (dtable.limit < index * 8 + 7) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
- return X86EMUL_PROPAGATE_FAULT;
- }
- addr = dtable.base + index * 8;
- ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
- vcpu, &err);
- if (ret == X86EMUL_PROPAGATE_FAULT)
- kvm_inject_page_fault(vcpu, addr, err);
-
- return ret;
-}
-
-/* allowed just for 8 bytes segments */
-static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
- struct desc_struct *seg_desc)
-{
- struct descriptor_table dtable;
- u16 index = selector >> 3;
-
- get_segment_descriptor_dtable(vcpu, selector, &dtable);
-
- if (dtable.limit < index * 8 + 7)
- return 1;
- return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
-}
-
-static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc)
-{
- u32 base_addr = get_desc_base(seg_desc);
-
- return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
-}
-
-static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc)
-{
- u32 base_addr = get_desc_base(seg_desc);
-
- return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
-}
-
-static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
-{
- struct kvm_segment kvm_seg;
-
- kvm_get_segment(vcpu, &kvm_seg, seg);
- return kvm_seg.selector;
-}
-
-static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
-{
- struct kvm_segment segvar = {
- .base = selector << 4,
- .limit = 0xffff,
- .selector = selector,
- .type = 3,
- .present = 1,
- .dpl = 3,
- .db = 0,
- .s = 1,
- .l = 0,
- .g = 0,
- .avl = 0,
- .unusable = 0,
- };
- kvm_x86_ops->set_segment(vcpu, &segvar, seg);
- return X86EMUL_CONTINUE;
-}
-
-static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
{
- return (seg != VCPU_SREG_LDTR) &&
- (seg != VCPU_SREG_TR) &&
- (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
-}
-
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
-{
- struct kvm_segment kvm_seg;
- struct desc_struct seg_desc;
- u8 dpl, rpl, cpl;
- unsigned err_vec = GP_VECTOR;
- u32 err_code = 0;
- bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
- int ret;
+ int cs_db, cs_l, ret;
+ cache_all_regs(vcpu);
- if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
- return kvm_load_realmode_segment(vcpu, selector, seg);
+ kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
- /* NULL selector is not valid for TR, CS and SS */
- if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
- && null_selector)
- goto exception;
+ vcpu->arch.emulate_ctxt.vcpu = vcpu;
+ vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+ vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
+ vcpu->arch.emulate_ctxt.mode =
+ (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
+ (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
+ ? X86EMUL_MODE_VM86 : cs_l
+ ? X86EMUL_MODE_PROT64 : cs_db
+ ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
- /* TR should be in GDT only */
- if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
- goto exception;
+ ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
+ tss_selector, reason, has_error_code,
+ error_code);
- ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
if (ret)
- return ret;
-
- seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
-
- if (null_selector) { /* for NULL selector skip all following checks */
- kvm_seg.unusable = 1;
- goto load;
- }
-
- err_code = selector & 0xfffc;
- err_vec = GP_VECTOR;
-
- /* can't load system descriptor into segment selecor */
- if (seg <= VCPU_SREG_GS && !kvm_seg.s)
- goto exception;
-
- if (!kvm_seg.present) {
- err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
- goto exception;
- }
-
- rpl = selector & 3;
- dpl = kvm_seg.dpl;
- cpl = kvm_x86_ops->get_cpl(vcpu);
-
- switch (seg) {
- case VCPU_SREG_SS:
- /*
- * segment is not a writable data segment or segment
- * selector's RPL != CPL or segment selector's RPL != CPL
- */
- if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
- goto exception;
- break;
- case VCPU_SREG_CS:
- if (!(kvm_seg.type & 8))
- goto exception;
-
- if (kvm_seg.type & 4) {
- /* conforming */
- if (dpl > cpl)
- goto exception;
- } else {
- /* nonconforming */
- if (rpl > cpl || dpl != cpl)
- goto exception;
- }
- /* CS(RPL) <- CPL */
- selector = (selector & 0xfffc) | cpl;
- break;
- case VCPU_SREG_TR:
- if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
- goto exception;
- break;
- case VCPU_SREG_LDTR:
- if (kvm_seg.s || kvm_seg.type != 2)
- goto exception;
- break;
- default: /* DS, ES, FS, or GS */
- /*
- * segment is not a data or readable code segment or
- * ((segment is a data or nonconforming code segment)
- * and (both RPL and CPL > DPL))
- */
- if ((kvm_seg.type & 0xa) == 0x8 ||
- (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
- goto exception;
- break;
- }
-
- if (!kvm_seg.unusable && kvm_seg.s) {
- /* mark segment as accessed */
- kvm_seg.type |= 1;
- seg_desc.type |= 1;
- save_guest_segment_descriptor(vcpu, selector, &seg_desc);
- }
-load:
- kvm_set_segment(vcpu, &kvm_seg, seg);
- return X86EMUL_CONTINUE;
-exception:
- kvm_queue_exception_e(vcpu, err_vec, err_code);
- return X86EMUL_PROPAGATE_FAULT;
-}
-
-static void save_state_to_tss32(struct kvm_vcpu *vcpu,
- struct tss_segment_32 *tss)
-{
- tss->cr3 = vcpu->arch.cr3;
- tss->eip = kvm_rip_read(vcpu);
- tss->eflags = kvm_get_rflags(vcpu);
- tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
- tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
- tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
- tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
- tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
- tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
- tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
- tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
- tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
- tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
- tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
- tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
- tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
-}
-
-static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
-{
- struct kvm_segment kvm_seg;
- kvm_get_segment(vcpu, &kvm_seg, seg);
- kvm_seg.selector = sel;
- kvm_set_segment(vcpu, &kvm_seg, seg);
-}
-
-static int load_state_from_tss32(struct kvm_vcpu *vcpu,
- struct tss_segment_32 *tss)
-{
- kvm_set_cr3(vcpu, tss->cr3);
-
- kvm_rip_write(vcpu, tss->eip);
- kvm_set_rflags(vcpu, tss->eflags | 2);
-
- kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
- kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
- kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
- kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
- kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
- kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
- kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
-
- /*
- * SDM says that segment selectors are loaded before segment
- * descriptors
- */
- kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
- kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
- kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
- kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
- kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
- kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
- kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
-
- /*
- * Now load segment descriptors. If fault happenes at this stage
- * it is handled in a context of new task
- */
- if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
- return 1;
- return 0;
-}
-
-static void save_state_to_tss16(struct kvm_vcpu *vcpu,
- struct tss_segment_16 *tss)
-{
- tss->ip = kvm_rip_read(vcpu);
- tss->flag = kvm_get_rflags(vcpu);
- tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
- tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
- tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
- tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
- tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
- tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
-
- tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
- tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
- tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
- tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
- tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
-}
-
-static int load_state_from_tss16(struct kvm_vcpu *vcpu,
- struct tss_segment_16 *tss)
-{
- kvm_rip_write(vcpu, tss->ip);
- kvm_set_rflags(vcpu, tss->flag | 2);
- kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
- kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
- kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
- kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
- kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
- kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
- kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
-
- /*
- * SDM says that segment selectors are loaded before segment
- * descriptors
- */
- kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
- kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
- kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
- kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
- kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
-
- /*
- * Now load segment descriptors. If fault happenes at this stage
- * it is handled in a context of new task
- */
- if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
- return 1;
-
- if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
- return 1;
- return 0;
-}
-
-static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
- u16 old_tss_sel, u32 old_tss_base,
- struct desc_struct *nseg_desc)
-{
- struct tss_segment_16 tss_segment_16;
- int ret = 0;
-
- if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
- sizeof tss_segment_16))
- goto out;
-
- save_state_to_tss16(vcpu, &tss_segment_16);
-
- if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
- sizeof tss_segment_16))
- goto out;
-
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
- &tss_segment_16, sizeof tss_segment_16))
- goto out;
-
- if (old_tss_sel != 0xffff) {
- tss_segment_16.prev_task_link = old_tss_sel;
+ return EMULATE_FAIL;
- if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr_write(vcpu, nseg_desc),
- &tss_segment_16.prev_task_link,
- sizeof tss_segment_16.prev_task_link))
- goto out;
- }
-
- if (load_state_from_tss16(vcpu, &tss_segment_16))
- goto out;
-
- ret = 1;
-out:
- return ret;
-}
-
-static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
- u16 old_tss_sel, u32 old_tss_base,
- struct desc_struct *nseg_desc)
-{
- struct tss_segment_32 tss_segment_32;
- int ret = 0;
-
- if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
- sizeof tss_segment_32))
- goto out;
-
- save_state_to_tss32(vcpu, &tss_segment_32);
-
- if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
- sizeof tss_segment_32))
- goto out;
-
- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
- &tss_segment_32, sizeof tss_segment_32))
- goto out;
-
- if (old_tss_sel != 0xffff) {
- tss_segment_32.prev_task_link = old_tss_sel;
-
- if (kvm_write_guest(vcpu->kvm,
- get_tss_base_addr_write(vcpu, nseg_desc),
- &tss_segment_32.prev_task_link,
- sizeof tss_segment_32.prev_task_link))
- goto out;
- }
-
- if (load_state_from_tss32(vcpu, &tss_segment_32))
- goto out;
-
- ret = 1;
-out:
- return ret;
-}
-
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
-{
- struct kvm_segment tr_seg;
- struct desc_struct cseg_desc;
- struct desc_struct nseg_desc;
- int ret = 0;
- u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
- u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
- u32 desc_limit;
-
- old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
-
- /* FIXME: Handle errors. Failure to read either TSS or their
- * descriptors should generate a pagefault.
- */
- if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
- goto out;
-
- if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
- goto out;
-
- if (reason != TASK_SWITCH_IRET) {
- int cpl;
-
- cpl = kvm_x86_ops->get_cpl(vcpu);
- if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
- return 1;
- }
- }
-
- desc_limit = get_desc_limit(&nseg_desc);
- if (!nseg_desc.p ||
- ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
- desc_limit < 0x2b)) {
- kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
- return 1;
- }
-
- if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
- cseg_desc.type &= ~(1 << 1); //clear the B flag
- save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
- }
-
- if (reason == TASK_SWITCH_IRET) {
- u32 eflags = kvm_get_rflags(vcpu);
- kvm_set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
- }
-
- /* set back link to prev task only if NT bit is set in eflags
- note that old_tss_sel is not used afetr this point */
- if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
- old_tss_sel = 0xffff;
-
- if (nseg_desc.type & 8)
- ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
- old_tss_base, &nseg_desc);
- else
- ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
- old_tss_base, &nseg_desc);
-
- if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
- u32 eflags = kvm_get_rflags(vcpu);
- kvm_set_rflags(vcpu, eflags | X86_EFLAGS_NT);
- }
-
- if (reason != TASK_SWITCH_IRET) {
- nseg_desc.type |= (1 << 1);
- save_guest_segment_descriptor(vcpu, tss_selector,
- &nseg_desc);
- }
-
- kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0(vcpu) | X86_CR0_TS);
- seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
- tr_seg.type = 11;
- kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
-out:
- return ret;
+ kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -5204,15 +4933,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
{
int mmu_reset_needed = 0;
int pending_vec, max_bits;
- struct descriptor_table dt;
+ struct desc_ptr dt;
vcpu_load(vcpu);
- dt.limit = sregs->idt.limit;
- dt.base = sregs->idt.base;
+ dt.size = sregs->idt.limit;
+ dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
- dt.limit = sregs->gdt.limit;
- dt.base = sregs->gdt.base;
+ dt.size = sregs->gdt.limit;
+ dt.address = sregs->gdt.base;
kvm_x86_ops->set_gdt(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
@@ -5311,11 +5040,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
}
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- vcpu->arch.singlestep_cs =
- get_segment_selector(vcpu, VCPU_SREG_CS);
- vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
- }
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
+ get_segment_base(vcpu, VCPU_SREG_CS);
/*
* Trigger an rflags update that will inject or remove the trace
@@ -5806,13 +5533,22 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
return kvm_x86_ops->interrupt_allowed(vcpu);
}
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+{
+ unsigned long current_rip = kvm_rip_read(vcpu) +
+ get_segment_base(vcpu, VCPU_SREG_CS);
+
+ return current_rip == linear_rip;
+}
+EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
+
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags;
rflags = kvm_x86_ops->get_rflags(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
+ rflags &= ~X86_EFLAGS_TF;
return rflags;
}
EXPORT_SYMBOL_GPL(kvm_get_rflags);
@@ -5820,10 +5556,8 @@ EXPORT_SYMBOL_GPL(kvm_get_rflags);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
- vcpu->arch.singlestep_cs ==
- get_segment_selector(vcpu, VCPU_SREG_CS) &&
- vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
- rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+ kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
+ rflags |= X86_EFLAGS_TF;
kvm_x86_ops->set_rflags(vcpu, rflags);
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);
@@ -5839,3 +5573,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 2d101639bd8d..f4b54458285b 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -65,4 +65,14 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
}
+static inline struct kvm_mem_aliases *kvm_aliases(struct kvm *kvm)
+{
+ return rcu_dereference_check(kvm->arch.aliases,
+ srcu_read_lock_held(&kvm->srcu)
+ || lockdep_is_held(&kvm->slots_lock));
+}
+
+void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
+void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 2bdf628066bd..9257510b4836 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1390,7 +1390,6 @@ __init void lguest_init(void)
#endif
#ifdef CONFIG_ACPI
acpi_disabled = 1;
- acpi_ht = 0;
#endif
/*
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 419386c24b82..f871e04b6965 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -20,17 +20,18 @@ lib-y := delay.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_KPROBES) += insn.o inat.o
+lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
obj-y += msr.o msr-reg.o msr-reg-export.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
+ lib-y += atomic64_cx8_32.o
lib-y += checksum_32.o
lib-y += strstr_32.o
lib-y += semaphore_32.o string_32.o
ifneq ($(CONFIG_X86_CMPXCHG64),y)
- lib-y += cmpxchg8b_emu.o
+ lib-y += cmpxchg8b_emu.o atomic64_386_32.o
endif
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
index 824fa0be55a3..540179e8e9fa 100644
--- a/arch/x86/lib/atomic64_32.c
+++ b/arch/x86/lib/atomic64_32.c
@@ -6,225 +6,54 @@
#include <asm/cmpxchg.h>
#include <asm/atomic.h>
-static noinline u64 cmpxchg8b(u64 *ptr, u64 old, u64 new)
-{
- u32 low = new;
- u32 high = new >> 32;
-
- asm volatile(
- LOCK_PREFIX "cmpxchg8b %1\n"
- : "+A" (old), "+m" (*ptr)
- : "b" (low), "c" (high)
- );
- return old;
-}
-
-u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
-{
- return cmpxchg8b(&ptr->counter, old_val, new_val);
-}
-EXPORT_SYMBOL(atomic64_cmpxchg);
-
-/**
- * atomic64_xchg - xchg atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
- *
- * Atomically xchgs the value of @ptr to @new_val and returns
- * the old value.
- */
-u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
-{
- /*
- * Try first with a (possibly incorrect) assumption about
- * what we have there. We'll do two loops most likely,
- * but we'll get an ownership MESI transaction straight away
- * instead of a read transaction followed by a
- * flush-for-ownership transaction:
- */
- u64 old_val, real_val = 0;
-
- do {
- old_val = real_val;
-
- real_val = atomic64_cmpxchg(ptr, old_val, new_val);
-
- } while (real_val != old_val);
-
- return old_val;
-}
-EXPORT_SYMBOL(atomic64_xchg);
-
-/**
- * atomic64_set - set atomic64 variable
- * @ptr: pointer to type atomic64_t
- * @new_val: value to assign
- *
- * Atomically sets the value of @ptr to @new_val.
- */
-void atomic64_set(atomic64_t *ptr, u64 new_val)
-{
- atomic64_xchg(ptr, new_val);
-}
-EXPORT_SYMBOL(atomic64_set);
-
-/**
-EXPORT_SYMBOL(atomic64_read);
- * atomic64_add_return - add and return
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr and returns @delta + *@ptr
- */
-noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
-{
- /*
- * Try first with a (possibly incorrect) assumption about
- * what we have there. We'll do two loops most likely,
- * but we'll get an ownership MESI transaction straight away
- * instead of a read transaction followed by a
- * flush-for-ownership transaction:
- */
- u64 old_val, new_val, real_val = 0;
-
- do {
- old_val = real_val;
- new_val = old_val + delta;
-
- real_val = atomic64_cmpxchg(ptr, old_val, new_val);
-
- } while (real_val != old_val);
-
- return new_val;
-}
-EXPORT_SYMBOL(atomic64_add_return);
-
-u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
-{
- return atomic64_add_return(-delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_sub_return);
-
-u64 atomic64_inc_return(atomic64_t *ptr)
-{
- return atomic64_add_return(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc_return);
-
-u64 atomic64_dec_return(atomic64_t *ptr)
-{
- return atomic64_sub_return(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec_return);
-
-/**
- * atomic64_add - add integer to atomic64 variable
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr.
- */
-void atomic64_add(u64 delta, atomic64_t *ptr)
-{
- atomic64_add_return(delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-/**
- * atomic64_sub - subtract the atomic64 variable
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr.
- */
-void atomic64_sub(u64 delta, atomic64_t *ptr)
-{
- atomic64_add(-delta, ptr);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @delta: integer value to subtract
- * @ptr: pointer to type atomic64_t
- *
- * Atomically subtracts @delta from @ptr and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-int atomic64_sub_and_test(u64 delta, atomic64_t *ptr)
-{
- u64 new_val = atomic64_sub_return(delta, ptr);
-
- return new_val == 0;
-}
-EXPORT_SYMBOL(atomic64_sub_and_test);
-
-/**
- * atomic64_inc - increment atomic64 variable
- * @ptr: pointer to type atomic64_t
- *
- * Atomically increments @ptr by 1.
- */
-void atomic64_inc(atomic64_t *ptr)
-{
- atomic64_add(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc);
-
-/**
- * atomic64_dec - decrement atomic64 variable
- * @ptr: pointer to type atomic64_t
- *
- * Atomically decrements @ptr by 1.
- */
-void atomic64_dec(atomic64_t *ptr)
-{
- atomic64_sub(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec);
-
-/**
- * atomic64_dec_and_test - decrement and test
- * @ptr: pointer to type atomic64_t
- *
- * Atomically decrements @ptr by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-int atomic64_dec_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(1, ptr);
-}
-EXPORT_SYMBOL(atomic64_dec_and_test);
-
-/**
- * atomic64_inc_and_test - increment and test
- * @ptr: pointer to type atomic64_t
- *
- * Atomically increments @ptr by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-int atomic64_inc_and_test(atomic64_t *ptr)
-{
- return atomic64_sub_and_test(-1, ptr);
-}
-EXPORT_SYMBOL(atomic64_inc_and_test);
-
-/**
- * atomic64_add_negative - add and test if negative
- * @delta: integer value to add
- * @ptr: pointer to type atomic64_t
- *
- * Atomically adds @delta to @ptr and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-int atomic64_add_negative(u64 delta, atomic64_t *ptr)
-{
- s64 new_val = atomic64_add_return(delta, ptr);
-
- return new_val < 0;
-}
-EXPORT_SYMBOL(atomic64_add_negative);
+long long atomic64_read_cx8(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_read_cx8);
+long long atomic64_set_cx8(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_set_cx8);
+long long atomic64_xchg_cx8(long long, unsigned high);
+EXPORT_SYMBOL(atomic64_xchg_cx8);
+long long atomic64_add_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_return_cx8);
+long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_return_cx8);
+long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_return_cx8);
+long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_return_cx8);
+long long atomic64_dec_if_positive_cx8(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
+int atomic64_inc_not_zero_cx8(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_not_zero_cx8);
+int atomic64_add_unless_cx8(atomic64_t *v, long long a, long long u);
+EXPORT_SYMBOL(atomic64_add_unless_cx8);
+
+#ifndef CONFIG_X86_CMPXCHG64
+long long atomic64_read_386(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_read_386);
+long long atomic64_set_386(long long, const atomic64_t *v);
+EXPORT_SYMBOL(atomic64_set_386);
+long long atomic64_xchg_386(long long, unsigned high);
+EXPORT_SYMBOL(atomic64_xchg_386);
+long long atomic64_add_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_return_386);
+long long atomic64_sub_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_return_386);
+long long atomic64_inc_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_return_386);
+long long atomic64_dec_return_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_return_386);
+long long atomic64_add_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_add_386);
+long long atomic64_sub_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_sub_386);
+long long atomic64_inc_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_386);
+long long atomic64_dec_386(long long a, atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_386);
+long long atomic64_dec_if_positive_386(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_if_positive_386);
+int atomic64_inc_not_zero_386(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_inc_not_zero_386);
+int atomic64_add_unless_386(atomic64_t *v, long long a, long long u);
+EXPORT_SYMBOL(atomic64_add_unless_386);
+#endif
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
new file mode 100644
index 000000000000..4a5979aa6883
--- /dev/null
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -0,0 +1,174 @@
+/*
+ * atomic64_t for 386/486
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/dwarf2.h>
+
+/* if you want SMP support, implement these with real spinlocks */
+.macro LOCK reg
+ pushfl
+ CFI_ADJUST_CFA_OFFSET 4
+ cli
+.endm
+
+.macro UNLOCK reg
+ popfl
+ CFI_ADJUST_CFA_OFFSET -4
+.endm
+
+.macro BEGIN func reg
+$v = \reg
+
+ENTRY(atomic64_\func\()_386)
+ CFI_STARTPROC
+ LOCK $v
+
+.macro RETURN
+ UNLOCK $v
+ ret
+.endm
+
+.macro END_
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_386)
+.purgem RETURN
+.purgem END_
+.purgem END
+.endm
+
+.macro END
+RETURN
+END_
+.endm
+.endm
+
+BEGIN read %ecx
+ movl ($v), %eax
+ movl 4($v), %edx
+END
+
+BEGIN set %esi
+ movl %ebx, ($v)
+ movl %ecx, 4($v)
+END
+
+BEGIN xchg %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ movl %ebx, ($v)
+ movl %ecx, 4($v)
+END
+
+BEGIN add %ecx
+ addl %eax, ($v)
+ adcl %edx, 4($v)
+END
+
+BEGIN add_return %ecx
+ addl ($v), %eax
+ adcl 4($v), %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN sub %ecx
+ subl %eax, ($v)
+ sbbl %edx, 4($v)
+END
+
+BEGIN sub_return %ecx
+ negl %edx
+ negl %eax
+ sbbl $0, %edx
+ addl ($v), %eax
+ adcl 4($v), %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN inc %esi
+ addl $1, ($v)
+ adcl $0, 4($v)
+END
+
+BEGIN inc_return %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ addl $1, %eax
+ adcl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN dec %esi
+ subl $1, ($v)
+ sbbl $0, 4($v)
+END
+
+BEGIN dec_return %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+END
+
+BEGIN add_unless %ecx
+ addl %eax, %esi
+ adcl %edx, %edi
+ addl ($v), %eax
+ adcl 4($v), %edx
+ cmpl %eax, %esi
+ je 3f
+1:
+ movl %eax, ($v)
+ movl %edx, 4($v)
+ movl $1, %eax
+2:
+RETURN
+3:
+ cmpl %edx, %edi
+ jne 1b
+ xorl %eax, %eax
+ jmp 2b
+END_
+
+BEGIN inc_not_zero %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ testl %eax, %eax
+ je 3f
+1:
+ addl $1, %eax
+ adcl $0, %edx
+ movl %eax, ($v)
+ movl %edx, 4($v)
+ movl $1, %eax
+2:
+RETURN
+3:
+ testl %edx, %edx
+ jne 1b
+ jmp 2b
+END_
+
+BEGIN dec_if_positive %esi
+ movl ($v), %eax
+ movl 4($v), %edx
+ subl $1, %eax
+ sbbl $0, %edx
+ js 1f
+ movl %eax, ($v)
+ movl %edx, 4($v)
+1:
+END
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
new file mode 100644
index 000000000000..71e080de3352
--- /dev/null
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -0,0 +1,224 @@
+/*
+ * atomic64_t for 586+
+ *
+ * Copyright © 2010 Luca Barbieri
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/dwarf2.h>
+
+.macro SAVE reg
+ pushl %\reg
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET \reg, 0
+.endm
+
+.macro RESTORE reg
+ popl %\reg
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE \reg
+.endm
+
+.macro read64 reg
+ movl %ebx, %eax
+ movl %ecx, %edx
+/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */
+ LOCK_PREFIX
+ cmpxchg8b (\reg)
+.endm
+
+ENTRY(atomic64_read_cx8)
+ CFI_STARTPROC
+
+ read64 %ecx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_read_cx8)
+
+ENTRY(atomic64_set_cx8)
+ CFI_STARTPROC
+
+1:
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
+ * are atomic on 586 and newer */
+ cmpxchg8b (%esi)
+ jne 1b
+
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_set_cx8)
+
+ENTRY(atomic64_xchg_cx8)
+ CFI_STARTPROC
+
+ movl %ebx, %eax
+ movl %ecx, %edx
+1:
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_xchg_cx8)
+
+.macro addsub_return func ins insc
+ENTRY(atomic64_\func\()_return_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+ SAVE esi
+ SAVE edi
+
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %ecx, %ebp
+
+ read64 %ebp
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ \ins\()l %esi, %ebx
+ \insc\()l %edi, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+
+10:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE edi
+ RESTORE esi
+ RESTORE ebx
+ RESTORE ebp
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_return_cx8)
+.endm
+
+addsub_return add add adc
+addsub_return sub sub sbb
+
+.macro incdec_return func ins insc
+ENTRY(atomic64_\func\()_return_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ \ins\()l $1, %ebx
+ \insc\()l $0, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+10:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_\func\()_return_cx8)
+.endm
+
+incdec_return inc add adc
+incdec_return dec sub sbb
+
+ENTRY(atomic64_dec_if_positive_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ subl $1, %ebx
+ sbb $0, %ecx
+ js 2f
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+2:
+ movl %ebx, %eax
+ movl %ecx, %edx
+ RESTORE ebx
+ ret
+ CFI_ENDPROC
+ENDPROC(atomic64_dec_if_positive_cx8)
+
+ENTRY(atomic64_add_unless_cx8)
+ CFI_STARTPROC
+ SAVE ebp
+ SAVE ebx
+/* these just push these two parameters on the stack */
+ SAVE edi
+ SAVE esi
+
+ movl %ecx, %ebp
+ movl %eax, %esi
+ movl %edx, %edi
+
+ read64 %ebp
+1:
+ cmpl %eax, 0(%esp)
+ je 4f
+2:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl %esi, %ebx
+ adcl %edi, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%ebp)
+ jne 1b
+
+ movl $1, %eax
+3:
+ addl $8, %esp
+ CFI_ADJUST_CFA_OFFSET -8
+ RESTORE ebx
+ RESTORE ebp
+ ret
+4:
+ cmpl %edx, 4(%esp)
+ jne 2b
+ xorl %eax, %eax
+ jmp 3b
+ CFI_ENDPROC
+ENDPROC(atomic64_add_unless_cx8)
+
+ENTRY(atomic64_inc_not_zero_cx8)
+ CFI_STARTPROC
+ SAVE ebx
+
+ read64 %esi
+1:
+ testl %eax, %eax
+ je 4f
+2:
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl $1, %ebx
+ adcl $0, %ecx
+ LOCK_PREFIX
+ cmpxchg8b (%esi)
+ jne 1b
+
+ movl $1, %eax
+3:
+ RESTORE ebx
+ ret
+4:
+ testl %edx, %edx
+ jne 2b
+ jmp 3b
+ CFI_ENDPROC
+ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
index aa0987088774..dc8adad10a2f 100644
--- a/arch/x86/math-emu/fpu_aux.c
+++ b/arch/x86/math-emu/fpu_aux.c
@@ -30,10 +30,10 @@ static void fclex(void)
}
/* Needs to be externally visible */
-void finit_task(struct task_struct *tsk)
+void finit_soft_fpu(struct i387_soft_struct *soft)
{
- struct i387_soft_struct *soft = &tsk->thread.xstate->soft;
struct address *oaddr, *iaddr;
+ memset(soft, 0, sizeof(*soft));
soft->cwd = 0x037f;
soft->swd = 0;
soft->ftop = 0; /* We don't keep top in the status word internally. */
@@ -52,7 +52,7 @@ void finit_task(struct task_struct *tsk)
void finit(void)
{
- finit_task(current);
+ finit_soft_fpu(&current->thread.fpu.state->soft);
}
/*
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 5d87f586f8d7..7718541541d4 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -681,7 +681,7 @@ int fpregs_soft_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct i387_soft_struct *s387 = &target->thread.xstate->soft;
+ struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
void *space = s387->st_space;
int ret;
int offset, other, i, tags, regnr, tag, newtop;
@@ -733,7 +733,7 @@ int fpregs_soft_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- struct i387_soft_struct *s387 = &target->thread.xstate->soft;
+ struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
const void *space = s387->st_space;
int ret;
int offset = (S387->ftop & 7) * 10, other = 80 - offset;
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 50fa0ec2c8a5..2c614410a5f3 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -31,7 +31,7 @@
#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
== (1 << 10))
-#define I387 (current->thread.xstate)
+#define I387 (current->thread.fpu.state)
#define FPU_info (I387->soft.info)
#define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 06630d26e56d..a4c768397baa 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -6,6 +6,7 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_physaddr.o := $(nostackp)
CFLAGS_setup_nx.o := $(nostackp)
+obj-$(CONFIG_X86_PAT) += pat_rbtree.o
obj-$(CONFIG_SMP) += tlb.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 8948f47fde05..a7bcc23ef96c 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -33,9 +33,6 @@ int numa_off __initdata;
static unsigned long __initdata nodemap_addr;
static unsigned long __initdata nodemap_size;
-DEFINE_PER_CPU(int, node_number) = 0;
-EXPORT_PER_CPU_SYMBOL(node_number);
-
/*
* Map cpu index to node index
*/
@@ -809,7 +806,7 @@ void __cpuinit numa_set_node(int cpu, int node)
per_cpu(x86_cpu_to_node_map, cpu) = node;
if (node != NUMA_NO_NODE)
- per_cpu(node_number, cpu) = node;
+ set_cpu_numa_node(cpu, node);
}
void __cpuinit numa_clear_node(int cpu)
@@ -867,7 +864,7 @@ void __cpuinit numa_remove_cpu(int cpu)
numa_set_cpumask(cpu, 0);
}
-int cpu_to_node(int cpu)
+int __cpu_to_node(int cpu)
{
if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
printk(KERN_WARNING
@@ -877,7 +874,7 @@ int cpu_to_node(int cpu)
}
return per_cpu(x86_cpu_to_node_map, cpu);
}
-EXPORT_SYMBOL(cpu_to_node);
+EXPORT_SYMBOL(__cpu_to_node);
/*
* Same function as cpu_to_node() but used if called before the
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 28195c350b97..532e7933d606 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -997,7 +997,8 @@ out_err:
}
EXPORT_SYMBOL(set_memory_uc);
-int set_memory_array_uc(unsigned long *addr, int addrinarray)
+int _set_memory_array(unsigned long *addr, int addrinarray,
+ unsigned long new_type)
{
int i, j;
int ret;
@@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray)
*/
for (i = 0; i < addrinarray; i++) {
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
- _PAGE_CACHE_UC_MINUS, NULL);
+ new_type, NULL);
if (ret)
goto out_free;
}
ret = change_page_attr_set(addr, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS), 1);
+
+ if (!ret && new_type == _PAGE_CACHE_WC)
+ ret = change_page_attr_set_clr(addr, addrinarray,
+ __pgprot(_PAGE_CACHE_WC),
+ __pgprot(_PAGE_CACHE_MASK),
+ 0, CPA_ARRAY, NULL);
if (ret)
goto out_free;
@@ -1025,8 +1032,19 @@ out_free:
return ret;
}
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray)
+{
+ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
EXPORT_SYMBOL(set_memory_array_uc);
+int set_memory_array_wc(unsigned long *addr, int addrinarray)
+{
+ return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_memory_array_wc);
+
int _set_memory_wc(unsigned long addr, int numpages)
{
int ret;
@@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_uc);
-int set_pages_array_uc(struct page **pages, int addrinarray)
+static int _set_pages_array(struct page **pages, int addrinarray,
+ unsigned long new_type)
{
unsigned long start;
unsigned long end;
int i;
int free_idx;
+ int ret;
for (i = 0; i < addrinarray; i++) {
if (PageHighMem(pages[i]))
continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE;
- if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
+ if (reserve_memtype(start, end, new_type, NULL))
goto err_out;
}
- if (cpa_set_pages_array(pages, addrinarray,
- __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
- return 0; /* Success */
- }
+ ret = cpa_set_pages_array(pages, addrinarray,
+ __pgprot(_PAGE_CACHE_UC_MINUS));
+ if (!ret && new_type == _PAGE_CACHE_WC)
+ ret = change_page_attr_set_clr(NULL, addrinarray,
+ __pgprot(_PAGE_CACHE_WC),
+ __pgprot(_PAGE_CACHE_MASK),
+ 0, CPA_PAGES_ARRAY, pages);
+ if (ret)
+ goto err_out;
+ return 0; /* Success */
err_out:
free_idx = i;
for (i = 0; i < free_idx; i++) {
@@ -1184,8 +1210,19 @@ err_out:
}
return -EINVAL;
}
+
+int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
+}
EXPORT_SYMBOL(set_pages_array_uc);
+int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
+}
+EXPORT_SYMBOL(set_pages_array_wc);
+
int set_pages_wb(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index edc8b95afc1a..acc15b23b743 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,6 +30,8 @@
#include <asm/pat.h>
#include <asm/io.h>
+#include "pat_internal.h"
+
#ifdef CONFIG_X86_PAT
int __read_mostly pat_enabled = 1;
@@ -53,19 +55,15 @@ static inline void pat_disable(const char *reason)
#endif
-static int debug_enable;
+int pat_debug_enable;
static int __init pat_debug_setup(char *str)
{
- debug_enable = 1;
+ pat_debug_enable = 1;
return 0;
}
__setup("debugpat", pat_debug_setup);
-#define dprintk(fmt, arg...) \
- do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
-
-
static u64 __read_mostly boot_pat_state;
enum {
@@ -132,84 +130,7 @@ void pat_init(void)
#undef PAT
-static char *cattr_name(unsigned long flags)
-{
- switch (flags & _PAGE_CACHE_MASK) {
- case _PAGE_CACHE_UC: return "uncached";
- case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
- case _PAGE_CACHE_WB: return "write-back";
- case _PAGE_CACHE_WC: return "write-combining";
- default: return "broken";
- }
-}
-
-/*
- * The global memtype list keeps track of memory type for specific
- * physical memory areas. Conflicting memory types in different
- * mappings can cause CPU cache corruption. To avoid this we keep track.
- *
- * The list is sorted based on starting address and can contain multiple
- * entries for each address (this allows reference counting for overlapping
- * areas). All the aliases have the same cache attributes of course.
- * Zero attributes are represented as holes.
- *
- * The data structure is a list that is also organized as an rbtree
- * sorted on the start address of memtype range.
- *
- * memtype_lock protects both the linear list and rbtree.
- */
-
-struct memtype {
- u64 start;
- u64 end;
- unsigned long type;
- struct list_head nd;
- struct rb_node rb;
-};
-
-static struct rb_root memtype_rbroot = RB_ROOT;
-static LIST_HEAD(memtype_list);
-static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
-
-static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
-{
- struct rb_node *node = root->rb_node;
- struct memtype *last_lower = NULL;
-
- while (node) {
- struct memtype *data = container_of(node, struct memtype, rb);
-
- if (data->start < start) {
- last_lower = data;
- node = node->rb_right;
- } else if (data->start > start) {
- node = node->rb_left;
- } else
- return data;
- }
-
- /* Will return NULL if there is no entry with its start <= start */
- return last_lower;
-}
-
-static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
-{
- struct rb_node **new = &(root->rb_node);
- struct rb_node *parent = NULL;
-
- while (*new) {
- struct memtype *this = container_of(*new, struct memtype, rb);
-
- parent = *new;
- if (data->start <= this->start)
- new = &((*new)->rb_left);
- else if (data->start > this->start)
- new = &((*new)->rb_right);
- }
-
- rb_link_node(&data->rb, parent, new);
- rb_insert_color(&data->rb, root);
-}
+static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
/*
* Does intersection of PAT memory type and MTRR memory type and returns
@@ -237,33 +158,6 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
return req_type;
}
-static int
-chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
-{
- if (new->type != entry->type) {
- if (type) {
- new->type = entry->type;
- *type = entry->type;
- } else
- goto conflict;
- }
-
- /* check overlaps with more than one entry in the list */
- list_for_each_entry_continue(entry, &memtype_list, nd) {
- if (new->end <= entry->start)
- break;
- else if (new->type != entry->type)
- goto conflict;
- }
- return 0;
-
- conflict:
- printk(KERN_INFO "%s:%d conflicting memory types "
- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
- new->end, cattr_name(new->type), cattr_name(entry->type));
- return -EBUSY;
-}
-
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
int ram_page = 0, not_rampage = 0;
@@ -296,8 +190,6 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
* Here we do two pass:
* - Find the memtype of all the pages in the range, look for any conflicts
* - In case of no conflicts, set the new memtype for pages in the range
- *
- * Caller must hold memtype_lock for atomicity.
*/
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
unsigned long *new_type)
@@ -364,9 +256,8 @@ static int free_ram_pages_type(u64 start, u64 end)
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
unsigned long *new_type)
{
- struct memtype *new, *entry;
+ struct memtype *new;
unsigned long actual_type;
- struct list_head *where;
int is_range_ram;
int err = 0;
@@ -404,9 +295,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1) {
- spin_lock(&memtype_lock);
err = reserve_ram_pages_type(start, end, req_type, new_type);
- spin_unlock(&memtype_lock);
return err;
} else if (is_range_ram < 0) {
@@ -423,42 +312,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock);
- /* Search for existing mapping that overlaps the current range */
- where = NULL;
- list_for_each_entry(entry, &memtype_list, nd) {
- if (end <= entry->start) {
- where = entry->nd.prev;
- break;
- } else if (start <= entry->start) { /* end > entry->start */
- err = chk_conflict(new, entry, new_type);
- if (!err) {
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
- where = entry->nd.prev;
- }
- break;
- } else if (start < entry->end) { /* start > entry->start */
- err = chk_conflict(new, entry, new_type);
- if (!err) {
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
-
- /*
- * Move to right position in the linked
- * list to add this new entry
- */
- list_for_each_entry_continue(entry,
- &memtype_list, nd) {
- if (start <= entry->start) {
- where = entry->nd.prev;
- break;
- }
- }
- }
- break;
- }
- }
-
+ err = rbt_memtype_check_insert(new, new_type);
if (err) {
printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
"track %s, req %s\n",
@@ -469,13 +323,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err;
}
- if (where)
- list_add(&new->nd, where);
- else
- list_add_tail(&new->nd, &memtype_list);
-
- memtype_rb_insert(&memtype_rbroot, new);
-
spin_unlock(&memtype_lock);
dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
@@ -487,9 +334,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
int free_memtype(u64 start, u64 end)
{
- struct memtype *entry, *saved_entry;
int err = -EINVAL;
int is_range_ram;
+ struct memtype *entry;
if (!pat_enabled)
return 0;
@@ -501,9 +348,7 @@ int free_memtype(u64 start, u64 end)
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1) {
- spin_lock(&memtype_lock);
err = free_ram_pages_type(start, end);
- spin_unlock(&memtype_lock);
return err;
} else if (is_range_ram < 0) {
@@ -511,56 +356,20 @@ int free_memtype(u64 start, u64 end)
}
spin_lock(&memtype_lock);
-
- entry = memtype_rb_search(&memtype_rbroot, start);
- if (unlikely(entry == NULL))
- goto unlock_ret;
-
- /*
- * Saved entry points to an entry with start same or less than what
- * we searched for. Now go through the list in both directions to look
- * for the entry that matches with both start and end, with list stored
- * in sorted start address
- */
- saved_entry = entry;
- list_for_each_entry_from(entry, &memtype_list, nd) {
- if (entry->start == start && entry->end == end) {
- rb_erase(&entry->rb, &memtype_rbroot);
- list_del(&entry->nd);
- kfree(entry);
- err = 0;
- break;
- } else if (entry->start > start) {
- break;
- }
- }
-
- if (!err)
- goto unlock_ret;
-
- entry = saved_entry;
- list_for_each_entry_reverse(entry, &memtype_list, nd) {
- if (entry->start == start && entry->end == end) {
- rb_erase(&entry->rb, &memtype_rbroot);
- list_del(&entry->nd);
- kfree(entry);
- err = 0;
- break;
- } else if (entry->start < start) {
- break;
- }
- }
-unlock_ret:
+ entry = rbt_memtype_erase(start, end);
spin_unlock(&memtype_lock);
- if (err) {
+ if (!entry) {
printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
current->comm, current->pid, start, end);
+ return -EINVAL;
}
+ kfree(entry);
+
dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
- return err;
+ return 0;
}
@@ -583,10 +392,8 @@ static unsigned long lookup_memtype(u64 paddr)
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
struct page *page;
- spin_lock(&memtype_lock);
page = pfn_to_page(paddr >> PAGE_SHIFT);
rettype = get_page_memtype(page);
- spin_unlock(&memtype_lock);
/*
* -1 from get_page_memtype() implies RAM page is in its
* default state and not reserved, and hence of type WB
@@ -599,7 +406,7 @@ static unsigned long lookup_memtype(u64 paddr)
spin_lock(&memtype_lock);
- entry = memtype_rb_search(&memtype_rbroot, paddr);
+ entry = rbt_memtype_lookup(paddr);
if (entry != NULL)
rettype = entry->type;
else
@@ -936,29 +743,25 @@ EXPORT_SYMBOL_GPL(pgprot_writecombine);
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
-/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
- struct memtype *list_node, *print_entry;
- int i = 1;
+ struct memtype *print_entry;
+ int ret;
- print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
+ print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
if (!print_entry)
return NULL;
spin_lock(&memtype_lock);
- list_for_each_entry(list_node, &memtype_list, nd) {
- if (pos == i) {
- *print_entry = *list_node;
- spin_unlock(&memtype_lock);
- return print_entry;
- }
- ++i;
- }
+ ret = rbt_memtype_copy_nth_element(print_entry, pos);
spin_unlock(&memtype_lock);
- kfree(print_entry);
- return NULL;
+ if (!ret) {
+ return print_entry;
+ } else {
+ kfree(print_entry);
+ return NULL;
+ }
}
static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h
new file mode 100644
index 000000000000..77e5ba153fac
--- /dev/null
+++ b/arch/x86/mm/pat_internal.h
@@ -0,0 +1,46 @@
+#ifndef __PAT_INTERNAL_H_
+#define __PAT_INTERNAL_H_
+
+extern int pat_debug_enable;
+
+#define dprintk(fmt, arg...) \
+ do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
+
+struct memtype {
+ u64 start;
+ u64 end;
+ u64 subtree_max_end;
+ unsigned long type;
+ struct rb_node rb;
+};
+
+static inline char *cattr_name(unsigned long flags)
+{
+ switch (flags & _PAGE_CACHE_MASK) {
+ case _PAGE_CACHE_UC: return "uncached";
+ case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
+ case _PAGE_CACHE_WB: return "write-back";
+ case _PAGE_CACHE_WC: return "write-combining";
+ default: return "broken";
+ }
+}
+
+#ifdef CONFIG_X86_PAT
+extern int rbt_memtype_check_insert(struct memtype *new,
+ unsigned long *new_type);
+extern struct memtype *rbt_memtype_erase(u64 start, u64 end);
+extern struct memtype *rbt_memtype_lookup(u64 addr);
+extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
+#else
+static inline int rbt_memtype_check_insert(struct memtype *new,
+ unsigned long *new_type)
+{ return 0; }
+static inline struct memtype *rbt_memtype_erase(u64 start, u64 end)
+{ return NULL; }
+static inline struct memtype *rbt_memtype_lookup(u64 addr)
+{ return NULL; }
+static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
+{ return 0; }
+#endif
+
+#endif /* __PAT_INTERNAL_H_ */
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
new file mode 100644
index 000000000000..f537087bb740
--- /dev/null
+++ b/arch/x86/mm/pat_rbtree.c
@@ -0,0 +1,274 @@
+/*
+ * Handle caching attributes in page tables (PAT)
+ *
+ * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * Suresh B Siddha <suresh.b.siddha@intel.com>
+ *
+ * Interval tree (augmented rbtree) used to store the PAT memory type
+ * reservations.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/pat.h>
+
+#include "pat_internal.h"
+
+/*
+ * The memtype tree keeps track of memory type for specific
+ * physical memory areas. Without proper tracking, conflicting memory
+ * types in different mappings can cause CPU cache corruption.
+ *
+ * The tree is an interval tree (augmented rbtree) with tree ordered
+ * on starting address. Tree can contain multiple entries for
+ * different regions which overlap. All the aliases have the same
+ * cache attributes of course.
+ *
+ * memtype_lock protects the rbtree.
+ */
+
+static void memtype_rb_augment_cb(struct rb_node *node);
+static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb);
+
+static int is_node_overlap(struct memtype *node, u64 start, u64 end)
+{
+ if (node->start >= end || node->end <= start)
+ return 0;
+
+ return 1;
+}
+
+static u64 get_subtree_max_end(struct rb_node *node)
+{
+ u64 ret = 0;
+ if (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+ ret = data->subtree_max_end;
+ }
+ return ret;
+}
+
+/* Update 'subtree_max_end' for a node, based on node and its children */
+static void update_node_max_end(struct rb_node *node)
+{
+ struct memtype *data;
+ u64 max_end, child_max_end;
+
+ if (!node)
+ return;
+
+ data = container_of(node, struct memtype, rb);
+ max_end = data->end;
+
+ child_max_end = get_subtree_max_end(node->rb_right);
+ if (child_max_end > max_end)
+ max_end = child_max_end;
+
+ child_max_end = get_subtree_max_end(node->rb_left);
+ if (child_max_end > max_end)
+ max_end = child_max_end;
+
+ data->subtree_max_end = max_end;
+}
+
+/* Update 'subtree_max_end' for a node and all its ancestors */
+static void update_path_max_end(struct rb_node *node)
+{
+ u64 old_max_end, new_max_end;
+
+ while (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+
+ old_max_end = data->subtree_max_end;
+ update_node_max_end(node);
+ new_max_end = data->subtree_max_end;
+
+ if (new_max_end == old_max_end)
+ break;
+
+ node = rb_parent(node);
+ }
+}
+
+/* Find the first (lowest start addr) overlapping range from rb tree */
+static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
+ u64 start, u64 end)
+{
+ struct rb_node *node = root->rb_node;
+ struct memtype *last_lower = NULL;
+
+ while (node) {
+ struct memtype *data = container_of(node, struct memtype, rb);
+
+ if (get_subtree_max_end(node->rb_left) > start) {
+ /* Lowest overlap if any must be on left side */
+ node = node->rb_left;
+ } else if (is_node_overlap(data, start, end)) {
+ last_lower = data;
+ break;
+ } else if (start >= data->start) {
+ /* Lowest overlap if any must be on right side */
+ node = node->rb_right;
+ } else {
+ break;
+ }
+ }
+ return last_lower; /* Returns NULL if there is no overlap */
+}
+
+static struct memtype *memtype_rb_exact_match(struct rb_root *root,
+ u64 start, u64 end)
+{
+ struct memtype *match;
+
+ match = memtype_rb_lowest_match(root, start, end);
+ while (match != NULL && match->start < end) {
+ struct rb_node *node;
+
+ if (match->start == start && match->end == end)
+ return match;
+
+ node = rb_next(&match->rb);
+ if (node)
+ match = container_of(node, struct memtype, rb);
+ else
+ match = NULL;
+ }
+
+ return NULL; /* Returns NULL if there is no exact match */
+}
+
+static int memtype_rb_check_conflict(struct rb_root *root,
+ u64 start, u64 end,
+ unsigned long reqtype, unsigned long *newtype)
+{
+ struct rb_node *node;
+ struct memtype *match;
+ int found_type = reqtype;
+
+ match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
+ if (match == NULL)
+ goto success;
+
+ if (match->type != found_type && newtype == NULL)
+ goto failure;
+
+ dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
+ found_type = match->type;
+
+ node = rb_next(&match->rb);
+ while (node) {
+ match = container_of(node, struct memtype, rb);
+
+ if (match->start >= end) /* Checked all possible matches */
+ goto success;
+
+ if (is_node_overlap(match, start, end) &&
+ match->type != found_type) {
+ goto failure;
+ }
+
+ node = rb_next(&match->rb);
+ }
+success:
+ if (newtype)
+ *newtype = found_type;
+
+ return 0;
+
+failure:
+ printk(KERN_INFO "%s:%d conflicting memory types "
+ "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
+ end, cattr_name(found_type), cattr_name(match->type));
+ return -EBUSY;
+}
+
+static void memtype_rb_augment_cb(struct rb_node *node)
+{
+ if (node)
+ update_path_max_end(node);
+}
+
+static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
+{
+ struct rb_node **node = &(root->rb_node);
+ struct rb_node *parent = NULL;
+
+ while (*node) {
+ struct memtype *data = container_of(*node, struct memtype, rb);
+
+ parent = *node;
+ if (newdata->start <= data->start)
+ node = &((*node)->rb_left);
+ else if (newdata->start > data->start)
+ node = &((*node)->rb_right);
+ }
+
+ rb_link_node(&newdata->rb, parent, node);
+ rb_insert_color(&newdata->rb, root);
+}
+
+int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
+{
+ int err = 0;
+
+ err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end,
+ new->type, ret_type);
+
+ if (!err) {
+ if (ret_type)
+ new->type = *ret_type;
+
+ memtype_rb_insert(&memtype_rbroot, new);
+ }
+ return err;
+}
+
+struct memtype *rbt_memtype_erase(u64 start, u64 end)
+{
+ struct memtype *data;
+
+ data = memtype_rb_exact_match(&memtype_rbroot, start, end);
+ if (!data)
+ goto out;
+
+ rb_erase(&data->rb, &memtype_rbroot);
+out:
+ return data;
+}
+
+struct memtype *rbt_memtype_lookup(u64 addr)
+{
+ struct memtype *data;
+ data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE);
+ return data;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
+{
+ struct rb_node *node;
+ int i = 1;
+
+ node = rb_first(&memtype_rbroot);
+ while (node && pos != i) {
+ node = rb_next(node);
+ i++;
+ }
+
+ if (node) { /* pos == i */
+ struct memtype *this = container_of(node, struct memtype, rb);
+ *out = *this;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+#endif
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index df3d5c861cda..308e32570d84 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -34,7 +34,7 @@
/* IA32 Manual 3, 2-1 */
static unsigned char prefix_codes[] = {
0xF0, 0xF2, 0xF3, 0x2E, 0x36, 0x3E, 0x26, 0x64,
- 0x65, 0x2E, 0x3E, 0x66, 0x67
+ 0x65, 0x66, 0x67
};
/* IA32 Manual 3, 3-432*/
static unsigned int reg_rop[] = {
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 792854003ed3..cac718499256 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -9,7 +9,6 @@
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
-#include <linux/quicklist.h>
#include <asm/system.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 28c68762648f..f9897f7a9ef1 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -363,6 +363,54 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
for (i = 0; i < MAX_NUMNODES; i++)
cutoff_node(i, start, end);
+ /*
+ * Join together blocks on the same node, holes between
+ * which don't overlap with memory on other nodes.
+ */
+ for (i = 0; i < num_node_memblks; ++i) {
+ int j, k;
+
+ for (j = i + 1; j < num_node_memblks; ++j) {
+ unsigned long start, end;
+
+ if (memblk_nodeid[i] != memblk_nodeid[j])
+ continue;
+ start = min(node_memblk_range[i].end,
+ node_memblk_range[j].end);
+ end = max(node_memblk_range[i].start,
+ node_memblk_range[j].start);
+ for (k = 0; k < num_node_memblks; ++k) {
+ if (memblk_nodeid[i] == memblk_nodeid[k])
+ continue;
+ if (start < node_memblk_range[k].end &&
+ end > node_memblk_range[k].start)
+ break;
+ }
+ if (k < num_node_memblks)
+ continue;
+ start = min(node_memblk_range[i].start,
+ node_memblk_range[j].start);
+ end = max(node_memblk_range[i].end,
+ node_memblk_range[j].end);
+ printk(KERN_INFO "SRAT: Node %d "
+ "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
+ memblk_nodeid[i],
+ node_memblk_range[i].start,
+ node_memblk_range[i].end,
+ node_memblk_range[j].start,
+ node_memblk_range[j].end,
+ start, end);
+ node_memblk_range[i].start = start;
+ node_memblk_range[i].end = end;
+ k = --num_node_memblks - j;
+ memmove(memblk_nodeid + j, memblk_nodeid + j+1,
+ k * sizeof(*memblk_nodeid));
+ memmove(node_memblk_range + j, node_memblk_range + j+1,
+ k * sizeof(*node_memblk_range));
+ --j;
+ }
+ }
+
memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
memblk_nodeid);
if (memnode_shift < 0) {
@@ -461,7 +509,8 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
* node, it must now point to the fake node ID.
*/
for (j = 0; j < MAX_LOCAL_APIC; j++)
- if (apicid_to_node[j] == nid)
+ if (apicid_to_node[j] == nid &&
+ fake_apicid_to_node[j] == NUMA_NO_NODE)
fake_apicid_to_node[j] = i;
}
for (i = 0; i < num_nodes; i++)
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 2c505ee71014..b28d2f1253bb 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -31,8 +31,9 @@ static struct op_x86_model_spec *model;
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
-/* 0 == registered but off, 1 == registered and on */
-static int nmi_enabled = 0;
+/* must be protected with get_online_cpus()/put_online_cpus(): */
+static int nmi_enabled;
+static int ctr_running;
struct op_counter_config counter_config[OP_MAX_COUNTER];
@@ -61,12 +62,16 @@ static int profile_exceptions_notify(struct notifier_block *self,
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
- int cpu = smp_processor_id();
switch (val) {
case DIE_NMI:
case DIE_NMI_IPI:
- model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
+ if (ctr_running)
+ model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
+ else if (!nmi_enabled)
+ break;
+ else
+ model->stop(&__get_cpu_var(cpu_msrs));
ret = NOTIFY_STOP;
break;
default:
@@ -95,24 +100,36 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
static void nmi_cpu_start(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->start(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->start(msrs);
}
static int nmi_start(void)
{
+ get_online_cpus();
on_each_cpu(nmi_cpu_start, NULL, 1);
+ ctr_running = 1;
+ put_online_cpus();
return 0;
}
static void nmi_cpu_stop(void *dummy)
{
struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->stop(msrs);
+ if (!msrs->controls)
+ WARN_ON_ONCE(1);
+ else
+ model->stop(msrs);
}
static void nmi_stop(void)
{
+ get_online_cpus();
on_each_cpu(nmi_cpu_stop, NULL, 1);
+ ctr_running = 0;
+ put_online_cpus();
}
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
@@ -252,7 +269,10 @@ static int nmi_switch_event(void)
if (nmi_multiplex_on() < 0)
return -EINVAL; /* not necessary */
- on_each_cpu(nmi_cpu_switch, NULL, 1);
+ get_online_cpus();
+ if (ctr_running)
+ on_each_cpu(nmi_cpu_switch, NULL, 1);
+ put_online_cpus();
return 0;
}
@@ -295,6 +315,7 @@ static void free_msrs(void)
kfree(per_cpu(cpu_msrs, i).controls);
per_cpu(cpu_msrs, i).controls = NULL;
}
+ nmi_shutdown_mux();
}
static int allocate_msrs(void)
@@ -307,14 +328,21 @@ static int allocate_msrs(void)
per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).counters)
- return 0;
+ goto fail;
per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
GFP_KERNEL);
if (!per_cpu(cpu_msrs, i).controls)
- return 0;
+ goto fail;
}
+ if (!nmi_setup_mux())
+ goto fail;
+
return 1;
+
+fail:
+ free_msrs();
+ return 0;
}
static void nmi_cpu_setup(void *dummy)
@@ -336,49 +364,6 @@ static struct notifier_block profile_exceptions_nb = {
.priority = 2
};
-static int nmi_setup(void)
-{
- int err = 0;
- int cpu;
-
- if (!allocate_msrs())
- err = -ENOMEM;
- else if (!nmi_setup_mux())
- err = -ENOMEM;
- else
- err = register_die_notifier(&profile_exceptions_nb);
-
- if (err) {
- free_msrs();
- nmi_shutdown_mux();
- return err;
- }
-
- /* We need to serialize save and setup for HT because the subset
- * of msrs are distinct for save and setup operations
- */
-
- /* Assume saved/restored counters are the same on all CPUs */
- model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
- for_each_possible_cpu(cpu) {
- if (!cpu)
- continue;
-
- memcpy(per_cpu(cpu_msrs, cpu).counters,
- per_cpu(cpu_msrs, 0).counters,
- sizeof(struct op_msr) * model->num_counters);
-
- memcpy(per_cpu(cpu_msrs, cpu).controls,
- per_cpu(cpu_msrs, 0).controls,
- sizeof(struct op_msr) * model->num_controls);
-
- mux_clone(cpu);
- }
- on_each_cpu(nmi_cpu_setup, NULL, 1);
- nmi_enabled = 1;
- return 0;
-}
-
static void nmi_cpu_restore_registers(struct op_msrs *msrs)
{
struct op_msr *counters = msrs->counters;
@@ -412,20 +397,24 @@ static void nmi_cpu_shutdown(void *dummy)
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
apic_write(APIC_LVTERR, v);
nmi_cpu_restore_registers(msrs);
+ if (model->cpu_down)
+ model->cpu_down();
}
-static void nmi_shutdown(void)
+static void nmi_cpu_up(void *dummy)
{
- struct op_msrs *msrs;
+ if (nmi_enabled)
+ nmi_cpu_setup(dummy);
+ if (ctr_running)
+ nmi_cpu_start(dummy);
+}
- nmi_enabled = 0;
- on_each_cpu(nmi_cpu_shutdown, NULL, 1);
- unregister_die_notifier(&profile_exceptions_nb);
- nmi_shutdown_mux();
- msrs = &get_cpu_var(cpu_msrs);
- model->shutdown(msrs);
- free_msrs();
- put_cpu_var(cpu_msrs);
+static void nmi_cpu_down(void *dummy)
+{
+ if (ctr_running)
+ nmi_cpu_stop(dummy);
+ if (nmi_enabled)
+ nmi_cpu_shutdown(dummy);
}
static int nmi_create_files(struct super_block *sb, struct dentry *root)
@@ -457,7 +446,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
return 0;
}
-#ifdef CONFIG_SMP
static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
void *data)
{
@@ -465,10 +453,10 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
switch (action) {
case CPU_DOWN_FAILED:
case CPU_ONLINE:
- smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
+ smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
break;
case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
+ smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
break;
}
return NOTIFY_DONE;
@@ -477,7 +465,75 @@ static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
static struct notifier_block oprofile_cpu_nb = {
.notifier_call = oprofile_cpu_notifier
};
-#endif
+
+static int nmi_setup(void)
+{
+ int err = 0;
+ int cpu;
+
+ if (!allocate_msrs())
+ return -ENOMEM;
+
+ /* We need to serialize save and setup for HT because the subset
+ * of msrs are distinct for save and setup operations
+ */
+
+ /* Assume saved/restored counters are the same on all CPUs */
+ err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
+ if (err)
+ goto fail;
+
+ for_each_possible_cpu(cpu) {
+ if (!cpu)
+ continue;
+
+ memcpy(per_cpu(cpu_msrs, cpu).counters,
+ per_cpu(cpu_msrs, 0).counters,
+ sizeof(struct op_msr) * model->num_counters);
+
+ memcpy(per_cpu(cpu_msrs, cpu).controls,
+ per_cpu(cpu_msrs, 0).controls,
+ sizeof(struct op_msr) * model->num_controls);
+
+ mux_clone(cpu);
+ }
+
+ nmi_enabled = 0;
+ ctr_running = 0;
+ barrier();
+ err = register_die_notifier(&profile_exceptions_nb);
+ if (err)
+ goto fail;
+
+ get_online_cpus();
+ register_cpu_notifier(&oprofile_cpu_nb);
+ on_each_cpu(nmi_cpu_setup, NULL, 1);
+ nmi_enabled = 1;
+ put_online_cpus();
+
+ return 0;
+fail:
+ free_msrs();
+ return err;
+}
+
+static void nmi_shutdown(void)
+{
+ struct op_msrs *msrs;
+
+ get_online_cpus();
+ unregister_cpu_notifier(&oprofile_cpu_nb);
+ on_each_cpu(nmi_cpu_shutdown, NULL, 1);
+ nmi_enabled = 0;
+ ctr_running = 0;
+ put_online_cpus();
+ barrier();
+ unregister_die_notifier(&profile_exceptions_nb);
+ msrs = &get_cpu_var(cpu_msrs);
+ model->shutdown(msrs);
+ free_msrs();
+ put_cpu_var(cpu_msrs);
+}
#ifdef CONFIG_PM
@@ -687,9 +743,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
return -ENODEV;
}
-#ifdef CONFIG_SMP
- register_cpu_notifier(&oprofile_cpu_nb);
-#endif
/* default values, can be overwritten by model */
ops->create_files = nmi_create_files;
ops->setup = nmi_setup;
@@ -716,12 +769,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
void op_nmi_exit(void)
{
- if (using_nmi) {
+ if (using_nmi)
exit_sysfs();
-#ifdef CONFIG_SMP
- unregister_cpu_notifier(&oprofile_cpu_nb);
-#endif
- }
- if (model->exit)
- model->exit();
}
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 090cbbec7dbd..b67a6b5aa8d4 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -30,13 +30,10 @@
#include "op_counter.h"
#define NUM_COUNTERS 4
-#define NUM_CONTROLS 4
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
#define NUM_VIRT_COUNTERS 32
-#define NUM_VIRT_CONTROLS 32
#else
#define NUM_VIRT_COUNTERS NUM_COUNTERS
-#define NUM_VIRT_CONTROLS NUM_CONTROLS
#endif
#define OP_EVENT_MASK 0x0FFF
@@ -105,102 +102,6 @@ static u32 get_ibs_caps(void)
return ibs_caps;
}
-#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
-
-static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* enable active counters */
- for (i = 0; i < NUM_COUNTERS; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
-#endif
-
-/* functions for op_amd_spec */
-
-static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
-{
- int i;
-
- for (i = 0; i < NUM_COUNTERS; i++) {
- if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
- msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
- }
-
- for (i = 0; i < NUM_CONTROLS; i++) {
- if (reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i))
- msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
- }
-}
-
-static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
- struct op_msrs const * const msrs)
-{
- u64 val;
- int i;
-
- /* setup reset_value */
- for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
- if (counter_config[i].enabled
- && msrs->counters[op_x86_virt_to_phys(i)].addr)
- reset_value[i] = counter_config[i].count;
- else
- reset_value[i] = 0;
- }
-
- /* clear all counters */
- for (i = 0; i < NUM_CONTROLS; ++i) {
- if (unlikely(!msrs->controls[i].addr)) {
- if (counter_config[i].enabled && !smp_processor_id())
- /*
- * counter is reserved, this is on all
- * cpus, so report only for cpu #0
- */
- op_x86_warn_reserved(i);
- continue;
- }
- rdmsrl(msrs->controls[i].addr, val);
- if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
- op_x86_warn_in_use(i);
- val &= model->reserved;
- wrmsrl(msrs->controls[i].addr, val);
- }
-
- /* avoid a false detection of ctr overflows in NMI handler */
- for (i = 0; i < NUM_COUNTERS; ++i) {
- if (unlikely(!msrs->counters[i].addr))
- continue;
- wrmsrl(msrs->counters[i].addr, -1LL);
- }
-
- /* enable active counters */
- for (i = 0; i < NUM_COUNTERS; ++i) {
- int virt = op_x86_phys_to_virt(i);
- if (!reset_value[virt])
- continue;
-
- /* setup counter registers */
- wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
-
- /* setup control registers */
- rdmsrl(msrs->controls[i].addr, val);
- val &= model->reserved;
- val |= op_x86_get_ctrl(model, &counter_config[virt]);
- wrmsrl(msrs->controls[i].addr, val);
- }
-}
-
/*
* 16-bit Linear Feedback Shift Register (LFSR)
*
@@ -365,6 +266,125 @@ static void op_amd_stop_ibs(void)
wrmsrl(MSR_AMD64_IBSOPCTL, 0);
}
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
+{
+ u64 val;
+ int i;
+
+ /* enable active counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (!reset_value[virt])
+ continue;
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[virt]);
+ wrmsrl(msrs->controls[i].addr, val);
+ }
+}
+
+#endif
+
+/* functions for op_amd_spec */
+
+static void op_amd_shutdown(struct op_msrs const * const msrs)
+{
+ int i;
+
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (!msrs->counters[i].addr)
+ continue;
+ release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+ release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
+ }
+}
+
+static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
+{
+ int i;
+
+ for (i = 0; i < NUM_COUNTERS; i++) {
+ if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
+ goto fail;
+ if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
+ release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+ goto fail;
+ }
+ /* both registers must be reserved */
+ msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
+ msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
+ continue;
+ fail:
+ if (!counter_config[i].enabled)
+ continue;
+ op_x86_warn_reserved(i);
+ op_amd_shutdown(msrs);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
+{
+ u64 val;
+ int i;
+
+ /* setup reset_value */
+ for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
+ if (counter_config[i].enabled
+ && msrs->counters[op_x86_virt_to_phys(i)].addr)
+ reset_value[i] = counter_config[i].count;
+ else
+ reset_value[i] = 0;
+ }
+
+ /* clear all counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (!msrs->controls[i].addr)
+ continue;
+ rdmsrl(msrs->controls[i].addr, val);
+ if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
+ op_x86_warn_in_use(i);
+ val &= model->reserved;
+ wrmsrl(msrs->controls[i].addr, val);
+ /*
+ * avoid a false detection of ctr overflows in NMI
+ * handler
+ */
+ wrmsrl(msrs->counters[i].addr, -1LL);
+ }
+
+ /* enable active counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (!reset_value[virt])
+ continue;
+
+ /* setup counter registers */
+ wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
+
+ /* setup control registers */
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[virt]);
+ wrmsrl(msrs->controls[i].addr, val);
+ }
+
+ if (ibs_caps)
+ setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
+}
+
+static void op_amd_cpu_shutdown(void)
+{
+ if (ibs_caps)
+ setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
+}
+
static int op_amd_check_ctrs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
@@ -425,42 +445,16 @@ static void op_amd_stop(struct op_msrs const * const msrs)
op_amd_stop_ibs();
}
-static void op_amd_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < NUM_COUNTERS; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
- }
- for (i = 0; i < NUM_CONTROLS; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
- }
-}
-
-static u8 ibs_eilvt_off;
-
-static inline void apic_init_ibs_nmi_per_cpu(void *arg)
-{
- ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
-}
-
-static inline void apic_clear_ibs_nmi_per_cpu(void *arg)
-{
- setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
-}
-
-static int init_ibs_nmi(void)
+static int __init_ibs_nmi(void)
{
#define IBSCTL_LVTOFFSETVAL (1 << 8)
#define IBSCTL 0x1cc
struct pci_dev *cpu_cfg;
int nodes;
u32 value = 0;
+ u8 ibs_eilvt_off;
- /* per CPU setup */
- on_each_cpu(apic_init_ibs_nmi_per_cpu, NULL, 1);
+ ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
nodes = 0;
cpu_cfg = NULL;
@@ -490,22 +484,15 @@ static int init_ibs_nmi(void)
return 0;
}
-/* uninitialize the APIC for the IBS interrupts if needed */
-static void clear_ibs_nmi(void)
-{
- if (ibs_caps)
- on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
-}
-
/* initialize the APIC for the IBS interrupts if available */
-static void ibs_init(void)
+static void init_ibs(void)
{
ibs_caps = get_ibs_caps();
if (!ibs_caps)
return;
- if (init_ibs_nmi()) {
+ if (__init_ibs_nmi()) {
ibs_caps = 0;
return;
}
@@ -514,14 +501,6 @@ static void ibs_init(void)
(unsigned)ibs_caps);
}
-static void ibs_exit(void)
-{
- if (!ibs_caps)
- return;
-
- clear_ibs_nmi();
-}
-
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
static int setup_ibs_files(struct super_block *sb, struct dentry *root)
@@ -570,27 +549,22 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
static int op_amd_init(struct oprofile_operations *ops)
{
- ibs_init();
+ init_ibs();
create_arch_files = ops->create_files;
ops->create_files = setup_ibs_files;
return 0;
}
-static void op_amd_exit(void)
-{
- ibs_exit();
-}
-
struct op_x86_model_spec op_amd_spec = {
.num_counters = NUM_COUNTERS,
- .num_controls = NUM_CONTROLS,
+ .num_controls = NUM_COUNTERS,
.num_virt_counters = NUM_VIRT_COUNTERS,
.reserved = MSR_AMD_EVENTSEL_RESERVED,
.event_mask = OP_EVENT_MASK,
.init = op_amd_init,
- .exit = op_amd_exit,
.fill_in_addresses = &op_amd_fill_in_addresses,
.setup_ctrs = &op_amd_setup_ctrs,
+ .cpu_down = &op_amd_cpu_shutdown,
.check_ctrs = &op_amd_check_ctrs,
.start = &op_amd_start,
.stop = &op_amd_stop,
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index e6a160a4684a..182558dd5515 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -385,8 +385,26 @@ static unsigned int get_stagger(void)
static unsigned long reset_value[NUM_COUNTERS_NON_HT];
+static void p4_shutdown(struct op_msrs const * const msrs)
+{
+ int i;
-static void p4_fill_in_addresses(struct op_msrs * const msrs)
+ for (i = 0; i < num_counters; ++i) {
+ if (msrs->counters[i].addr)
+ release_perfctr_nmi(msrs->counters[i].addr);
+ }
+ /*
+ * some of the control registers are specially reserved in
+ * conjunction with the counter registers (hence the starting offset).
+ * This saves a few bits.
+ */
+ for (i = num_counters; i < num_controls; ++i) {
+ if (msrs->controls[i].addr)
+ release_evntsel_nmi(msrs->controls[i].addr);
+ }
+}
+
+static int p4_fill_in_addresses(struct op_msrs * const msrs)
{
unsigned int i;
unsigned int addr, cccraddr, stag;
@@ -468,6 +486,18 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
}
}
+
+ for (i = 0; i < num_counters; ++i) {
+ if (!counter_config[i].enabled)
+ continue;
+ if (msrs->controls[i].addr)
+ continue;
+ op_x86_warn_reserved(i);
+ p4_shutdown(msrs);
+ return -EBUSY;
+ }
+
+ return 0;
}
@@ -668,26 +698,6 @@ static void p4_stop(struct op_msrs const * const msrs)
}
}
-static void p4_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(msrs->counters[i].addr);
- }
- /*
- * some of the control registers are specially reserved in
- * conjunction with the counter registers (hence the starting offset).
- * This saves a few bits.
- */
- for (i = num_counters; i < num_controls; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(msrs->controls[i].addr);
- }
-}
-
-
#ifdef CONFIG_SMP
struct op_x86_model_spec op_p4_ht2_spec = {
.num_counters = NUM_COUNTERS_HT2,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 2bf90fafa7b5..d769cda54082 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -30,19 +30,46 @@ static int counter_width = 32;
static u64 *reset_value;
-static void ppro_fill_in_addresses(struct op_msrs * const msrs)
+static void ppro_shutdown(struct op_msrs const * const msrs)
{
int i;
- for (i = 0; i < num_counters; i++) {
- if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
- msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
+ for (i = 0; i < num_counters; ++i) {
+ if (!msrs->counters[i].addr)
+ continue;
+ release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
+ release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
+ }
+ if (reset_value) {
+ kfree(reset_value);
+ reset_value = NULL;
}
+}
+
+static int ppro_fill_in_addresses(struct op_msrs * const msrs)
+{
+ int i;
for (i = 0; i < num_counters; i++) {
- if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
- msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
+ if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
+ goto fail;
+ if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
+ release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
+ goto fail;
+ }
+ /* both registers must be reserved */
+ msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
+ msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
+ continue;
+ fail:
+ if (!counter_config[i].enabled)
+ continue;
+ op_x86_warn_reserved(i);
+ ppro_shutdown(msrs);
+ return -EBUSY;
}
+
+ return 0;
}
@@ -78,26 +105,17 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
/* clear all counters */
for (i = 0; i < num_counters; ++i) {
- if (unlikely(!msrs->controls[i].addr)) {
- if (counter_config[i].enabled && !smp_processor_id())
- /*
- * counter is reserved, this is on all
- * cpus, so report only for cpu #0
- */
- op_x86_warn_reserved(i);
+ if (!msrs->controls[i].addr)
continue;
- }
rdmsrl(msrs->controls[i].addr, val);
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
op_x86_warn_in_use(i);
val &= model->reserved;
wrmsrl(msrs->controls[i].addr, val);
- }
-
- /* avoid a false detection of ctr overflows in NMI handler */
- for (i = 0; i < num_counters; ++i) {
- if (unlikely(!msrs->counters[i].addr))
- continue;
+ /*
+ * avoid a false detection of ctr overflows in NMI *
+ * handler
+ */
wrmsrl(msrs->counters[i].addr, -1LL);
}
@@ -189,25 +207,6 @@ static void ppro_stop(struct op_msrs const * const msrs)
}
}
-static void ppro_shutdown(struct op_msrs const * const msrs)
-{
- int i;
-
- for (i = 0; i < num_counters; ++i) {
- if (msrs->counters[i].addr)
- release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
- }
- for (i = 0; i < num_counters; ++i) {
- if (msrs->controls[i].addr)
- release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
- }
- if (reset_value) {
- kfree(reset_value);
- reset_value = NULL;
- }
-}
-
-
struct op_x86_model_spec op_ppro_spec = {
.num_counters = 2,
.num_controls = 2,
@@ -239,11 +238,11 @@ static void arch_perfmon_setup_counters(void)
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
current_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
- eax.split.num_events = 2;
+ eax.split.num_counters = 2;
eax.split.bit_width = 40;
}
- num_counters = eax.split.num_events;
+ num_counters = eax.split.num_counters;
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index ff82a755edd4..89017fa1fd63 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -40,10 +40,10 @@ struct op_x86_model_spec {
u64 reserved;
u16 event_mask;
int (*init)(struct oprofile_operations *ops);
- void (*exit)(void);
- void (*fill_in_addresses)(struct op_msrs * const msrs);
+ int (*fill_in_addresses)(struct op_msrs * const msrs);
void (*setup_ctrs)(struct op_x86_model_spec const *model,
struct op_msrs const * const msrs);
+ void (*cpu_down)(void);
int (*check_ctrs)(struct pt_regs * const regs,
struct op_msrs const * const msrs);
void (*start)(struct op_msrs const * const msrs);
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index b110d97fb925..a0207a7fdf39 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_X86_MRST) += mrst.o
obj-y += common.o early.o
obj-y += amd_bus.o bus_numa.o
+obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
+
ifeq ($(CONFIG_PCI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 31930fd30ea9..2ec04c424a62 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -207,10 +207,9 @@ get_current_resources(struct acpi_device *device, int busnum,
if (!info.res)
goto res_alloc_fail;
- info.name = kmalloc(16, GFP_KERNEL);
+ info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
if (!info.name)
goto name_alloc_fail;
- sprintf(info.name, "PCI Bus %04x:%02x", domain, busnum);
info.res_num = 0;
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
@@ -224,8 +223,11 @@ res_alloc_fail:
return;
}
-struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum)
+struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
{
+ struct acpi_device *device = root->device;
+ int domain = root->segment;
+ int busnum = root->secondary.start;
struct pci_bus *bus;
struct pci_sysdata *sd;
int node;
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
new file mode 100644
index 000000000000..0846a5bbbfbd
--- /dev/null
+++ b/arch/x86/pci/broadcom_bus.c
@@ -0,0 +1,101 @@
+/*
+ * Read address ranges from a Broadcom CNB20LE Host Bridge
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <asm/pci_x86.h>
+
+#include "bus_numa.h"
+
+static void __devinit cnb20le_res(struct pci_dev *dev)
+{
+ struct pci_root_info *info;
+ struct resource res;
+ u16 word1, word2;
+ u8 fbus, lbus;
+ int i;
+
+ /*
+ * The x86_pci_root_bus_res_quirks() function already refuses to use
+ * this information if ACPI _CRS was used. Therefore, we don't bother
+ * checking if ACPI is enabled, and just generate the information
+ * for both the ACPI _CRS and no ACPI cases.
+ */
+
+ info = &pci_root_info[pci_root_num];
+ pci_root_num++;
+
+ /* read the PCI bus numbers */
+ pci_read_config_byte(dev, 0x44, &fbus);
+ pci_read_config_byte(dev, 0x45, &lbus);
+ info->bus_min = fbus;
+ info->bus_max = lbus;
+
+ /*
+ * Add the legacy IDE ports on bus 0
+ *
+ * These do not exist anywhere in the bridge registers, AFAICT. I do
+ * not have the datasheet, so this is the best I can do.
+ */
+ if (fbus == 0) {
+ update_res(info, 0x01f0, 0x01f7, IORESOURCE_IO, 0);
+ update_res(info, 0x03f6, 0x03f6, IORESOURCE_IO, 0);
+ update_res(info, 0x0170, 0x0177, IORESOURCE_IO, 0);
+ update_res(info, 0x0376, 0x0376, IORESOURCE_IO, 0);
+ update_res(info, 0xffa0, 0xffaf, IORESOURCE_IO, 0);
+ }
+
+ /* read the non-prefetchable memory window */
+ pci_read_config_word(dev, 0xc0, &word1);
+ pci_read_config_word(dev, 0xc2, &word2);
+ if (word1 != word2) {
+ res.start = (word1 << 16) | 0x0000;
+ res.end = (word2 << 16) | 0xffff;
+ res.flags = IORESOURCE_MEM;
+ update_res(info, res.start, res.end, res.flags, 0);
+ }
+
+ /* read the prefetchable memory window */
+ pci_read_config_word(dev, 0xc4, &word1);
+ pci_read_config_word(dev, 0xc6, &word2);
+ if (word1 != word2) {
+ res.start = (word1 << 16) | 0x0000;
+ res.end = (word2 << 16) | 0xffff;
+ res.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ update_res(info, res.start, res.end, res.flags, 0);
+ }
+
+ /* read the IO port window */
+ pci_read_config_word(dev, 0xd0, &word1);
+ pci_read_config_word(dev, 0xd2, &word2);
+ if (word1 != word2) {
+ res.start = word1;
+ res.end = word2;
+ res.flags = IORESOURCE_IO;
+ update_res(info, res.start, res.end, res.flags, 0);
+ }
+
+ /* print information about this host bridge */
+ res.start = fbus;
+ res.end = lbus;
+ res.flags = IORESOURCE_BUS;
+ dev_info(&dev->dev, "CNB20LE PCI Host Bridge (domain %04x %pR)\n",
+ pci_domain_nr(dev->bus), &res);
+
+ for (i = 0; i < info->res_num; i++)
+ dev_info(&dev->dev, "host bridge window %pR\n", &info->res[i]);
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
+ cnb20le_res);
+
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index cf2e93869c48..215a27ae050d 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -76,7 +76,7 @@ struct pci_ops pci_root_ops = {
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
*/
-DEFINE_SPINLOCK(pci_config_lock);
+DEFINE_RAW_SPINLOCK(pci_config_lock);
static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
{
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
index 347d882b3bb3..bd33620b0071 100644
--- a/arch/x86/pci/direct.c
+++ b/arch/x86/pci/direct.c
@@ -27,7 +27,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus,
return -EINVAL;
}
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
@@ -43,7 +43,7 @@ static int pci_conf1_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -56,7 +56,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
if ((bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);
@@ -72,7 +72,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -108,7 +108,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus,
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
@@ -127,7 +127,7 @@ static int pci_conf2_read(unsigned int seg, unsigned int bus,
outb(0, 0xCF8);
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -147,7 +147,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
if (dev & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
outb((u8)(0xF0 | (fn << 1)), 0xCF8);
outb((u8)bus, 0xCFA);
@@ -166,7 +166,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
outb(0, 0xCF8);
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 5d362b5ba06f..9810a0f76c91 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -589,8 +589,6 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH10_1:
case PCI_DEVICE_ID_INTEL_ICH10_2:
case PCI_DEVICE_ID_INTEL_ICH10_3:
- case PCI_DEVICE_ID_INTEL_CPT_LPC1:
- case PCI_DEVICE_ID_INTEL_CPT_LPC2:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
@@ -605,6 +603,13 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
return 1;
}
+ if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) &&
+ (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) {
+ r->name = "PIIX/ICH";
+ r->get = pirq_piix_get;
+ r->set = pirq_piix_set;
+ return 1;
+ }
return 0;
}
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 39b9ebe8f886..a918553ebc75 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -483,16 +483,17 @@ static void __init pci_mmcfg_reject_broken(int early)
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
int valid = 0;
- if (!early && !acpi_disabled)
+ if (!early && !acpi_disabled) {
valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
- if (valid)
- continue;
-
- if (!early)
- printk(KERN_ERR FW_BUG PREFIX
- "MMCONFIG at %pR not reserved in "
- "ACPI motherboard resources\n", &cfg->res);
+ if (valid)
+ continue;
+ else
+ printk(KERN_ERR FW_BUG PREFIX
+ "MMCONFIG at %pR not reserved in "
+ "ACPI motherboard resources\n",
+ &cfg->res);
+ }
/* Don't try to do this check unless configuration
type 1 is available. how about type 2 ?*/
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 90d5fd476ed4..a3d9c54792ae 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -64,7 +64,7 @@ err: *value = -1;
if (!base)
goto err;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
@@ -79,7 +79,7 @@ err: *value = -1;
*value = mmio_config_readl(mmcfg_virt_addr + reg);
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -97,7 +97,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
if (!base)
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_exp_set_dev_base(base, bus, devfn);
@@ -112,7 +112,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
mmio_config_writel(mmcfg_virt_addr + reg, value);
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 8bf2fcb88d04..7ef3a2735df3 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -109,7 +109,7 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
decode++;
decode = ~(decode - 1);
} else {
- decode = ~0;
+ decode = 0;
}
/*
@@ -247,6 +247,10 @@ static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev)
u32 size;
int i;
+ /* Must have extended configuration space */
+ if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
+ return;
+
/* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
offset = fixed_bar_cap(dev->bus, dev->devfn);
if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 8223738ad806..5c9e2458df4e 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -37,7 +37,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus,
if (!value || (bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
write_cf8(bus, devfn, reg);
@@ -62,7 +62,7 @@ static int pci_conf1_mq_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
@@ -76,7 +76,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
if ((bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
write_cf8(bus, devfn, reg);
@@ -101,7 +101,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return 0;
}
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index 59a225c17b84..2492d165096a 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -162,7 +162,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
if (!value || (bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
@@ -213,7 +213,7 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
@@ -228,7 +228,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
if ((bus > 255) || (devfn > 255) || (reg > 255))
return -EINVAL;
- spin_lock_irqsave(&pci_config_lock, flags);
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
switch (len) {
case 1:
@@ -269,7 +269,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
break;
}
- spin_unlock_irqrestore(&pci_config_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return (int)((result & 0xff00) >> 8);
}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 32764b8880b5..b3c6c59ed302 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -476,6 +476,7 @@ void xen_timer_resume(void)
__init void xen_time_init(void)
{
int cpu = smp_processor_id();
+ struct timespec tp;
clocksource_register(&xen_clocksource);
@@ -487,9 +488,8 @@ __init void xen_time_init(void)
}
/* Set initial system time with full resolution */
- xen_read_wallclock(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ xen_read_wallclock(&tp);
+ do_settimeofday(&tp);
setup_force_cpu_cap(X86_FEATURE_TSC);
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 22d6dde42619..a96a0619d0b7 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -46,7 +46,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index f04c9891142f..ed8cd3cbd499 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -29,5 +29,6 @@
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif
+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h
index 87cb19d1b10c..26664cef8f11 100644
--- a/arch/xtensa/include/asm/hardirq.h
+++ b/arch/xtensa/include/asm/hardirq.h
@@ -11,18 +11,9 @@
#ifndef _XTENSA_HARDIRQ_H
#define _XTENSA_HARDIRQ_H
-#include <linux/cache.h>
-#include <asm/irq.h>
-
-/* headers.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
- unsigned int __syscall_count;
- struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
- unsigned int __nmi_count; /* arch dependent */
-} ____cacheline_aligned irq_cpustat_t;
-
void ack_bad_irq(unsigned int irq);
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
#endif /* _XTENSA_HARDIRQ_H */
diff --git a/arch/xtensa/include/asm/scatterlist.h b/arch/xtensa/include/asm/scatterlist.h
index 810080bb0a2b..b1f9fdc1d5ba 100644
--- a/arch/xtensa/include/asm/scatterlist.h
+++ b/arch/xtensa/include/asm/scatterlist.h
@@ -11,28 +11,7 @@
#ifndef _XTENSA_SCATTERLIST_H
#define _XTENSA_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- dma_addr_t dma_address;
- unsigned int length;
-};
-
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
+#include <asm-generic/scatterlist.h>
#define ISA_DMA_THRESHOLD (~0UL)
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 13165641cc51..7be8accb0b0c 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -129,7 +129,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
#define TIF_IRET 4 /* return with iret */
-#define TIF_MEMDIE 5
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_FREEZE 17 /* is freezing for suspend */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 8cd38484e130..c64a5d387de5 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -27,15 +27,6 @@ static unsigned int cached_irq_mask;
atomic_t irq_err_count;
/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
- printk("unexpected IRQ trap at vector %02x\n", irq);
-}
-
-/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 19f7df30937f..19df764f6399 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -60,11 +60,6 @@ static struct irqaction timer_irqaction = {
void __init time_init(void)
{
- /* FIXME: xtime&wall_to_monotonic are set in timekeeping_init. */
- read_persistent_clock(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
platform_calibrate_ccount();
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 74a7518faf16..70066e3582d0 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -44,14 +44,12 @@
#include <linux/linkage.h>
#include <asm/ptrace.h>
-#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
-#include <asm/processor.h>
#define WINDOW_VECTORS_SIZE 0x180
diff --git a/block/Kconfig b/block/Kconfig
index f9e89f4d94bb..9be0b56eaee1 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -77,29 +77,6 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
-config BLK_CGROUP
- tristate "Block cgroup support"
- depends on CGROUPS
- depends on CFQ_GROUP_IOSCHED
- default n
- ---help---
- Generic block IO controller cgroup interface. This is the common
- cgroup interface which should be used by various IO controlling
- policies.
-
- Currently, CFQ IO scheduler uses it to recognize task groups and
- control disk bandwidth allocation (proportional time slice allocation)
- to such task groups.
-
-config DEBUG_BLK_CGROUP
- bool
- depends on BLK_CGROUP
- default n
- ---help---
- Enable some debugging help. Currently it stores the cgroup path
- in the blk group which can be used by cfq for tracing various
- group related activity.
-
endif # BLOCK
config BLOCK_COMPAT
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index fc71cf071fb2..3199b76f795d 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,7 +23,8 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
- select BLK_CGROUP if CFQ_GROUP_IOSCHED
+ # If BLK_CGROUP is a module, CFQ has to be built as module.
+ depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -33,22 +34,15 @@ config IOSCHED_CFQ
This is the default I/O scheduler.
+ Note: If BLK_CGROUP=m, then CFQ can be built only as module.
+
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
- depends on IOSCHED_CFQ && CGROUPS
+ depends on IOSCHED_CFQ && BLK_CGROUP
default n
---help---
Enable group IO scheduling in CFQ.
-config DEBUG_CFQ_IOSCHED
- bool "Debug CFQ Scheduling"
- depends on CFQ_GROUP_IOSCHED
- select DEBUG_BLK_CGROUP
- default n
- ---help---
- Enable CFQ IO scheduling debugging in CFQ. Currently it makes
- blktrace output more verbose.
-
choice
prompt "Default I/O scheduler"
default DEFAULT_CFQ
diff --git a/block/Makefile b/block/Makefile
index cb2d515ebd6e..0bb499a739cd 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-iopoll.o ioctl.o genhd.o scsi_ioctl.o
+ blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6d88544b677f..0d710c9d403b 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -286,26 +286,31 @@ static void bio_end_empty_barrier(struct bio *bio, int err)
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
}
-
- complete(bio->bi_private);
+ if (bio->bi_private)
+ complete(bio->bi_private);
+ bio_put(bio);
}
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
+ * @gfp_mask: memory allocation flags (for bio_alloc)
* @error_sector: error sector
+ * @flags: BLKDEV_IFL_* flags to control behaviour
*
* Description:
* Issue a flush for the block device in question. Caller can supply
* room for storing the error offset in case of a flush error, if they
- * wish to.
+ * wish to. If WAIT flag is not passed then caller may check only what
+ * request was pushed in some internal queue for later handling.
*/
-int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+ sector_t *error_sector, unsigned long flags)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
struct bio *bio;
- int ret;
+ int ret = 0;
if (bdev->bd_disk == NULL)
return -ENXIO;
@@ -314,23 +319,25 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
if (!q)
return -ENXIO;
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(gfp_mask, 0);
bio->bi_end_io = bio_end_empty_barrier;
- bio->bi_private = &wait;
bio->bi_bdev = bdev;
- submit_bio(WRITE_BARRIER, bio);
-
- wait_for_completion(&wait);
+ if (test_bit(BLKDEV_WAIT, &flags))
+ bio->bi_private = &wait;
- /*
- * The driver must store the error location in ->bi_sector, if
- * it supports it. For non-stacked drivers, this should be copied
- * from blk_rq_pos(rq).
- */
- if (error_sector)
- *error_sector = bio->bi_sector;
+ bio_get(bio);
+ submit_bio(WRITE_BARRIER, bio);
+ if (test_bit(BLKDEV_WAIT, &flags)) {
+ wait_for_completion(&wait);
+ /*
+ * The driver must store the error location in ->bi_sector, if
+ * it supports it. For non-stacked drivers, this should be
+ * copied from blk_rq_pos(rq).
+ */
+ if (error_sector)
+ *error_sector = bio->bi_sector;
+ }
- ret = 0;
if (bio_flagged(bio, BIO_EOPNOTSUPP))
ret = -EOPNOTSUPP;
else if (!bio_flagged(bio, BIO_UPTODATE))
@@ -340,107 +347,3 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
-
-static void blkdev_discard_end_io(struct bio *bio, int err)
-{
- if (err) {
- if (err == -EOPNOTSUPP)
- set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- }
-
- if (bio->bi_private)
- complete(bio->bi_private);
- __free_page(bio_page(bio));
-
- bio_put(bio);
-}
-
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev: blockdev to issue discard for
- * @sector: start sector
- * @nr_sects: number of sectors to discard
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @flags: DISCARD_FL_* flags to control behaviour
- *
- * Description:
- * Issue a discard request for the sectors in question.
- */
-int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- struct request_queue *q = bdev_get_queue(bdev);
- int type = flags & DISCARD_FL_BARRIER ?
- DISCARD_BARRIER : DISCARD_NOBARRIER;
- struct bio *bio;
- struct page *page;
- int ret = 0;
-
- if (!q)
- return -ENXIO;
-
- if (!blk_queue_discard(q))
- return -EOPNOTSUPP;
-
- while (nr_sects && !ret) {
- unsigned int sector_size = q->limits.logical_block_size;
- unsigned int max_discard_sectors =
- min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-
- bio = bio_alloc(gfp_mask, 1);
- if (!bio)
- goto out;
- bio->bi_sector = sector;
- bio->bi_end_io = blkdev_discard_end_io;
- bio->bi_bdev = bdev;
- if (flags & DISCARD_FL_WAIT)
- bio->bi_private = &wait;
-
- /*
- * Add a zeroed one-sector payload as that's what
- * our current implementations need. If we'll ever need
- * more the interface will need revisiting.
- */
- page = alloc_page(gfp_mask | __GFP_ZERO);
- if (!page)
- goto out_free_bio;
- if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
- goto out_free_page;
-
- /*
- * And override the bio size - the way discard works we
- * touch many more blocks on disk than the actual payload
- * length.
- */
- if (nr_sects > max_discard_sectors) {
- bio->bi_size = max_discard_sectors << 9;
- nr_sects -= max_discard_sectors;
- sector += max_discard_sectors;
- } else {
- bio->bi_size = nr_sects << 9;
- nr_sects = 0;
- }
-
- bio_get(bio);
- submit_bio(type, bio);
-
- if (flags & DISCARD_FL_WAIT)
- wait_for_completion(&wait);
-
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- else if (!bio_flagged(bio, BIO_UPTODATE))
- ret = -EIO;
- bio_put(bio);
- }
- return ret;
-out_free_page:
- __free_page(page);
-out_free_bio:
- bio_put(bio);
-out:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 2cc682b860ea..a6809645d212 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -15,8 +15,12 @@
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/blkdev.h>
#include <linux/slab.h>
#include "blk-cgroup.h"
+#include <linux/genhd.h>
+
+#define MAX_KEY_LEN 100
static DEFINE_SPINLOCK(blkio_list_lock);
static LIST_HEAD(blkio_list);
@@ -49,6 +53,32 @@ struct cgroup_subsys blkio_subsys = {
};
EXPORT_SYMBOL_GPL(blkio_subsys);
+static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
+ struct blkio_policy_node *pn)
+{
+ list_add(&pn->node, &blkcg->policy_list);
+}
+
+/* Must be called with blkcg->lock held */
+static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+{
+ list_del(&pn->node);
+}
+
+/* Must be called with blkcg->lock held */
+static struct blkio_policy_node *
+blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
+{
+ struct blkio_policy_node *pn;
+
+ list_for_each_entry(pn, &blkcg->policy_list, node) {
+ if (pn->dev == dev)
+ return pn;
+ }
+
+ return NULL;
+}
+
struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
{
return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
@@ -56,13 +86,259 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
}
EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
-void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
- unsigned long time, unsigned long sectors)
+/*
+ * Add to the appropriate stat variable depending on the request type.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
+ bool sync)
+{
+ if (direction)
+ stat[BLKIO_STAT_WRITE] += add;
+ else
+ stat[BLKIO_STAT_READ] += add;
+ if (sync)
+ stat[BLKIO_STAT_SYNC] += add;
+ else
+ stat[BLKIO_STAT_ASYNC] += add;
+}
+
+/*
+ * Decrements the appropriate stat variable if non-zero depending on the
+ * request type. Panics on value being zero.
+ * This should be called with the blkg->stats_lock held.
+ */
+static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
+{
+ if (direction) {
+ BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
+ stat[BLKIO_STAT_WRITE]--;
+ } else {
+ BUG_ON(stat[BLKIO_STAT_READ] == 0);
+ stat[BLKIO_STAT_READ]--;
+ }
+ if (sync) {
+ BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
+ stat[BLKIO_STAT_SYNC]--;
+ } else {
+ BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
+ stat[BLKIO_STAT_ASYNC]--;
+ }
+}
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg)
+{
+ if (blkio_blkg_waiting(&blkg->stats))
+ return;
+ if (blkg == curr_blkg)
+ return;
+ blkg->stats.start_group_wait_time = sched_clock();
+ blkio_mark_blkg_waiting(&blkg->stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
+{
+ unsigned long long now;
+
+ if (!blkio_blkg_waiting(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_group_wait_time))
+ stats->group_wait_time += now - stats->start_group_wait_time;
+ blkio_clear_blkg_waiting(stats);
+}
+
+/* This should be called with the blkg->stats_lock held. */
+static void blkio_end_empty_time(struct blkio_group_stats *stats)
+{
+ unsigned long long now;
+
+ if (!blkio_blkg_empty(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_empty_time))
+ stats->empty_time += now - stats->start_empty_time;
+ blkio_clear_blkg_empty(stats);
+}
+
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ BUG_ON(blkio_blkg_idling(&blkg->stats));
+ blkg->stats.start_idle_time = sched_clock();
+ blkio_mark_blkg_idling(&blkg->stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ unsigned long long now;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ if (blkio_blkg_idling(stats)) {
+ now = sched_clock();
+ if (time_after64(now, stats->start_idle_time))
+ stats->idle_time += now - stats->start_idle_time;
+ blkio_clear_blkg_idling(stats);
+ }
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
+
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ stats->avg_queue_size_sum +=
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
+ stats->avg_queue_size_samples++;
+ blkio_update_group_wait_time(stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
+
+void blkiocg_set_start_empty_time(struct blkio_group *blkg)
+{
+ unsigned long flags;
+ struct blkio_group_stats *stats;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+
+ if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
+ stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ return;
+ }
+
+ /*
+ * group is already marked empty. This can happen if cfqq got new
+ * request in parent group and moved to this group while being added
+ * to service tree. Just ignore the event and move on.
+ */
+ if(blkio_blkg_empty(stats)) {
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ return;
+ }
+
+ stats->start_empty_time = sched_clock();
+ blkio_mark_blkg_empty(stats);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
+
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue)
+{
+ blkg->stats.dequeue += dequeue;
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
+#else
+static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg) {}
+static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
+#endif
+
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction,
+ bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
+ sync);
+ blkio_end_empty_time(&blkg->stats);
+ blkio_set_start_group_wait_time(blkg, curr_blkg);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
+ direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+
+void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkg->stats.time += time;
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync)
{
- blkg->time += time;
- blkg->sectors += sectors;
+ struct blkio_group_stats *stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ stats->sectors += bytes >> 9;
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
+ sync);
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
+ direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
+EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
+
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
+{
+ struct blkio_group_stats *stats;
+ unsigned long flags;
+ unsigned long long now = sched_clock();
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ stats = &blkg->stats;
+ if (time_after64(now, io_start_time))
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
+ now - io_start_time, direction, sync);
+ if (time_after64(io_start_time, start_time))
+ blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
+ io_start_time - start_time, direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
+
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+ bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
+ sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev)
@@ -70,14 +346,13 @@ void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
unsigned long flags;
spin_lock_irqsave(&blkcg->lock, flags);
+ spin_lock_init(&blkg->stats_lock);
rcu_assign_pointer(blkg->key, key);
blkg->blkcg_id = css_id(&blkcg->css);
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
spin_unlock_irqrestore(&blkcg->lock, flags);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
/* Need to take css reference ? */
cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
-#endif
blkg->dev = dev;
}
EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
@@ -101,17 +376,16 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg)
rcu_read_lock();
css = css_lookup(&blkio_subsys, blkg->blkcg_id);
- if (!css)
- goto out;
-
- blkcg = container_of(css, struct blkio_cgroup, css);
- spin_lock_irqsave(&blkcg->lock, flags);
- if (!hlist_unhashed(&blkg->blkcg_node)) {
- __blkiocg_del_blkio_group(blkg);
- ret = 0;
+ if (css) {
+ blkcg = container_of(css, struct blkio_cgroup, css);
+ spin_lock_irqsave(&blkcg->lock, flags);
+ if (!hlist_unhashed(&blkg->blkcg_node)) {
+ __blkiocg_del_blkio_group(blkg);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&blkcg->lock, flags);
}
- spin_unlock_irqrestore(&blkcg->lock, flags);
-out:
+
rcu_read_unlock();
return ret;
}
@@ -154,6 +428,7 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
struct blkio_group *blkg;
struct hlist_node *n;
struct blkio_policy_type *blkiop;
+ struct blkio_policy_node *pn;
if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
return -EINVAL;
@@ -162,7 +437,13 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
spin_lock(&blkio_list_lock);
spin_lock_irq(&blkcg->lock);
blkcg->weight = (unsigned int)val;
+
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ pn = blkio_policy_search_node(blkcg, blkg->dev);
+
+ if (pn)
+ continue;
+
list_for_each_entry(blkiop, &blkio_list, list)
blkiop->ops.blkio_update_group_weight_fn(blkg,
blkcg->weight);
@@ -172,13 +453,154 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
return 0;
}
-#define SHOW_FUNCTION_PER_GROUP(__VAR) \
+static int
+blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+{
+ struct blkio_cgroup *blkcg;
+ struct blkio_group *blkg;
+ struct blkio_group_stats *stats;
+ struct hlist_node *n;
+ uint64_t queued[BLKIO_STAT_TOTAL];
+ int i;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ bool idling, waiting, empty;
+ unsigned long long now = sched_clock();
+#endif
+
+ blkcg = cgroup_to_blkio_cgroup(cgroup);
+ spin_lock_irq(&blkcg->lock);
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ spin_lock(&blkg->stats_lock);
+ stats = &blkg->stats;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ idling = blkio_blkg_idling(stats);
+ waiting = blkio_blkg_waiting(stats);
+ empty = blkio_blkg_empty(stats);
+#endif
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
+ memset(stats, 0, sizeof(struct blkio_group_stats));
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (idling) {
+ blkio_mark_blkg_idling(stats);
+ stats->start_idle_time = now;
+ }
+ if (waiting) {
+ blkio_mark_blkg_waiting(stats);
+ stats->start_group_wait_time = now;
+ }
+ if (empty) {
+ blkio_mark_blkg_empty(stats);
+ stats->start_empty_time = now;
+ }
+#endif
+ spin_unlock(&blkg->stats_lock);
+ }
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
+}
+
+static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
+ int chars_left, bool diskname_only)
+{
+ snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
+ chars_left -= strlen(str);
+ if (chars_left <= 0) {
+ printk(KERN_WARNING
+ "Possibly incorrect cgroup stat display format");
+ return;
+ }
+ if (diskname_only)
+ return;
+ switch (type) {
+ case BLKIO_STAT_READ:
+ strlcat(str, " Read", chars_left);
+ break;
+ case BLKIO_STAT_WRITE:
+ strlcat(str, " Write", chars_left);
+ break;
+ case BLKIO_STAT_SYNC:
+ strlcat(str, " Sync", chars_left);
+ break;
+ case BLKIO_STAT_ASYNC:
+ strlcat(str, " Async", chars_left);
+ break;
+ case BLKIO_STAT_TOTAL:
+ strlcat(str, " Total", chars_left);
+ break;
+ default:
+ strlcat(str, " Invalid", chars_left);
+ }
+}
+
+static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
+ struct cgroup_map_cb *cb, dev_t dev)
+{
+ blkio_get_key_name(0, dev, str, chars_left, true);
+ cb->fill(cb, str, val);
+ return val;
+}
+
+/* This should be called with blkg->stats_lock held */
+static uint64_t blkio_get_stat(struct blkio_group *blkg,
+ struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
+{
+ uint64_t disk_total;
+ char key_str[MAX_KEY_LEN];
+ enum stat_sub_type sub_type;
+
+ if (type == BLKIO_STAT_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.time, cb, dev);
+ if (type == BLKIO_STAT_SECTORS)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.sectors, cb, dev);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
+ uint64_t sum = blkg->stats.avg_queue_size_sum;
+ uint64_t samples = blkg->stats.avg_queue_size_samples;
+ if (samples)
+ do_div(sum, samples);
+ else
+ sum = 0;
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
+ }
+ if (type == BLKIO_STAT_GROUP_WAIT_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.group_wait_time, cb, dev);
+ if (type == BLKIO_STAT_IDLE_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.idle_time, cb, dev);
+ if (type == BLKIO_STAT_EMPTY_TIME)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.empty_time, cb, dev);
+ if (type == BLKIO_STAT_DEQUEUE)
+ return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
+ blkg->stats.dequeue, cb, dev);
+#endif
+
+ for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
+ sub_type++) {
+ blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
+ }
+ disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
+ blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
+ blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
+ cb->fill(cb, key_str, disk_total);
+ return disk_total;
+}
+
+#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
- struct cftype *cftype, struct seq_file *m) \
+ struct cftype *cftype, struct cgroup_map_cb *cb) \
{ \
struct blkio_cgroup *blkcg; \
struct blkio_group *blkg; \
struct hlist_node *n; \
+ uint64_t cgroup_total = 0; \
\
if (!cgroup_lock_live_group(cgroup)) \
return -ENODEV; \
@@ -186,50 +608,293 @@ static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
blkcg = cgroup_to_blkio_cgroup(cgroup); \
rcu_read_lock(); \
hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
- if (blkg->dev) \
- seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
- MINOR(blkg->dev), blkg->__VAR); \
+ if (blkg->dev) { \
+ spin_lock_irq(&blkg->stats_lock); \
+ cgroup_total += blkio_get_stat(blkg, cb, \
+ blkg->dev, type); \
+ spin_unlock_irq(&blkg->stats_lock); \
+ } \
} \
+ if (show_total) \
+ cb->fill(cb, "Total", cgroup_total); \
rcu_read_unlock(); \
cgroup_unlock(); \
return 0; \
}
-SHOW_FUNCTION_PER_GROUP(time);
-SHOW_FUNCTION_PER_GROUP(sectors);
+SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
+SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
+SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
+SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
+SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
+SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
+SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
#ifdef CONFIG_DEBUG_BLK_CGROUP
-SHOW_FUNCTION_PER_GROUP(dequeue);
+SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
+SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
+SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
+SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
#endif
#undef SHOW_FUNCTION_PER_GROUP
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
+static int blkio_check_dev_num(dev_t dev)
{
- blkg->dequeue += dequeue;
+ int part = 0;
+ struct gendisk *disk;
+
+ disk = get_gendisk(dev, &part);
+ if (!disk || part)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int blkio_policy_parse_and_set(char *buf,
+ struct blkio_policy_node *newpn)
+{
+ char *s[4], *p, *major_s = NULL, *minor_s = NULL;
+ int ret;
+ unsigned long major, minor, temp;
+ int i = 0;
+ dev_t dev;
+
+ memset(s, 0, sizeof(s));
+
+ while ((p = strsep(&buf, " ")) != NULL) {
+ if (!*p)
+ continue;
+
+ s[i++] = p;
+
+ /* Prevent from inputing too many things */
+ if (i == 3)
+ break;
+ }
+
+ if (i != 2)
+ return -EINVAL;
+
+ p = strsep(&s[0], ":");
+ if (p != NULL)
+ major_s = p;
+ else
+ return -EINVAL;
+
+ minor_s = s[0];
+ if (!minor_s)
+ return -EINVAL;
+
+ ret = strict_strtoul(major_s, 10, &major);
+ if (ret)
+ return -EINVAL;
+
+ ret = strict_strtoul(minor_s, 10, &minor);
+ if (ret)
+ return -EINVAL;
+
+ dev = MKDEV(major, minor);
+
+ ret = blkio_check_dev_num(dev);
+ if (ret)
+ return ret;
+
+ newpn->dev = dev;
+
+ if (s[1] == NULL)
+ return -EINVAL;
+
+ ret = strict_strtoul(s[1], 10, &temp);
+ if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
+ temp > BLKIO_WEIGHT_MAX)
+ return -EINVAL;
+
+ newpn->weight = temp;
+
+ return 0;
+}
+
+unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+ dev_t dev)
+{
+ struct blkio_policy_node *pn;
+
+ pn = blkio_policy_search_node(blkcg, dev);
+ if (pn)
+ return pn->weight;
+ else
+ return blkcg->weight;
+}
+EXPORT_SYMBOL_GPL(blkcg_get_weight);
+
+
+static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
+ const char *buffer)
+{
+ int ret = 0;
+ char *buf;
+ struct blkio_policy_node *newpn, *pn;
+ struct blkio_cgroup *blkcg;
+ struct blkio_group *blkg;
+ int keep_newpn = 0;
+ struct hlist_node *n;
+ struct blkio_policy_type *blkiop;
+
+ buf = kstrdup(buffer, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
+ if (!newpn) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ ret = blkio_policy_parse_and_set(buf, newpn);
+ if (ret)
+ goto free_newpn;
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ spin_lock_irq(&blkcg->lock);
+
+ pn = blkio_policy_search_node(blkcg, newpn->dev);
+ if (!pn) {
+ if (newpn->weight != 0) {
+ blkio_policy_insert_node(blkcg, newpn);
+ keep_newpn = 1;
+ }
+ spin_unlock_irq(&blkcg->lock);
+ goto update_io_group;
+ }
+
+ if (newpn->weight == 0) {
+ /* weight == 0 means deleteing a specific weight */
+ blkio_policy_delete_node(pn);
+ spin_unlock_irq(&blkcg->lock);
+ goto update_io_group;
+ }
+ spin_unlock_irq(&blkcg->lock);
+
+ pn->weight = newpn->weight;
+
+update_io_group:
+ /* update weight for each cfqg */
+ spin_lock(&blkio_list_lock);
+ spin_lock_irq(&blkcg->lock);
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ if (newpn->dev == blkg->dev) {
+ list_for_each_entry(blkiop, &blkio_list, list)
+ blkiop->ops.blkio_update_group_weight_fn(blkg,
+ newpn->weight ?
+ newpn->weight :
+ blkcg->weight);
+ }
+ }
+
+ spin_unlock_irq(&blkcg->lock);
+ spin_unlock(&blkio_list_lock);
+
+free_newpn:
+ if (!keep_newpn)
+ kfree(newpn);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *m)
+{
+ struct blkio_cgroup *blkcg;
+ struct blkio_policy_node *pn;
+
+ seq_printf(m, "dev\tweight\n");
+
+ blkcg = cgroup_to_blkio_cgroup(cgrp);
+ if (!list_empty(&blkcg->policy_list)) {
+ spin_lock_irq(&blkcg->lock);
+ list_for_each_entry(pn, &blkcg->policy_list, node) {
+ seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
+ MINOR(pn->dev), pn->weight);
+ }
+ spin_unlock_irq(&blkcg->lock);
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
-#endif
struct cftype blkio_files[] = {
{
+ .name = "weight_device",
+ .read_seq_string = blkiocg_weight_device_read,
+ .write_string = blkiocg_weight_device_write,
+ .max_write_len = 256,
+ },
+ {
.name = "weight",
.read_u64 = blkiocg_weight_read,
.write_u64 = blkiocg_weight_write,
},
{
.name = "time",
- .read_seq_string = blkiocg_time_read,
+ .read_map = blkiocg_time_read,
},
{
.name = "sectors",
- .read_seq_string = blkiocg_sectors_read,
+ .read_map = blkiocg_sectors_read,
+ },
+ {
+ .name = "io_service_bytes",
+ .read_map = blkiocg_io_service_bytes_read,
+ },
+ {
+ .name = "io_serviced",
+ .read_map = blkiocg_io_serviced_read,
+ },
+ {
+ .name = "io_service_time",
+ .read_map = blkiocg_io_service_time_read,
+ },
+ {
+ .name = "io_wait_time",
+ .read_map = blkiocg_io_wait_time_read,
+ },
+ {
+ .name = "io_merged",
+ .read_map = blkiocg_io_merged_read,
+ },
+ {
+ .name = "io_queued",
+ .read_map = blkiocg_io_queued_read,
+ },
+ {
+ .name = "reset_stats",
+ .write_u64 = blkiocg_reset_stats,
},
#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
+ {
+ .name = "avg_queue_size",
+ .read_map = blkiocg_avg_queue_size_read,
+ },
+ {
+ .name = "group_wait_time",
+ .read_map = blkiocg_group_wait_time_read,
+ },
+ {
+ .name = "idle_time",
+ .read_map = blkiocg_idle_time_read,
+ },
+ {
+ .name = "empty_time",
+ .read_map = blkiocg_empty_time_read,
+ },
+ {
.name = "dequeue",
- .read_seq_string = blkiocg_dequeue_read,
- },
+ .read_map = blkiocg_dequeue_read,
+ },
#endif
};
@@ -246,37 +911,42 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
struct blkio_group *blkg;
void *key;
struct blkio_policy_type *blkiop;
+ struct blkio_policy_node *pn, *pntmp;
rcu_read_lock();
-remove_entry:
- spin_lock_irqsave(&blkcg->lock, flags);
+ do {
+ spin_lock_irqsave(&blkcg->lock, flags);
+
+ if (hlist_empty(&blkcg->blkg_list)) {
+ spin_unlock_irqrestore(&blkcg->lock, flags);
+ break;
+ }
+
+ blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
+ blkcg_node);
+ key = rcu_dereference(blkg->key);
+ __blkiocg_del_blkio_group(blkg);
- if (hlist_empty(&blkcg->blkg_list)) {
spin_unlock_irqrestore(&blkcg->lock, flags);
- goto done;
- }
- blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
- blkcg_node);
- key = rcu_dereference(blkg->key);
- __blkiocg_del_blkio_group(blkg);
+ /*
+ * This blkio_group is being unlinked as associated cgroup is
+ * going away. Let all the IO controlling policies know about
+ * this event. Currently this is static call to one io
+ * controlling policy. Once we have more policies in place, we
+ * need some dynamic registration of callback function.
+ */
+ spin_lock(&blkio_list_lock);
+ list_for_each_entry(blkiop, &blkio_list, list)
+ blkiop->ops.blkio_unlink_group_fn(key, blkg);
+ spin_unlock(&blkio_list_lock);
+ } while (1);
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
+ blkio_policy_delete_node(pn);
+ kfree(pn);
+ }
- /*
- * This blkio_group is being unlinked as associated cgroup is going
- * away. Let all the IO controlling policies know about this event.
- *
- * Currently this is static call to one io controlling policy. Once
- * we have more policies in place, we need some dynamic registration
- * of callback function.
- */
- spin_lock(&blkio_list_lock);
- list_for_each_entry(blkiop, &blkio_list, list)
- blkiop->ops.blkio_unlink_group_fn(key, blkg);
- spin_unlock(&blkio_list_lock);
- goto remove_entry;
-done:
free_css_id(&blkio_subsys, &blkcg->css);
rcu_read_unlock();
if (blkcg != &blkio_root_cgroup)
@@ -307,6 +977,7 @@ done:
spin_lock_init(&blkcg->lock);
INIT_HLIST_HEAD(&blkcg->blkg_list);
+ INIT_LIST_HEAD(&blkcg->policy_list);
return &blkcg->css;
}
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 8ccc20464dae..2b866ec1dcea 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -23,11 +23,84 @@ extern struct cgroup_subsys blkio_subsys;
#define blkio_subsys_id blkio_subsys.subsys_id
#endif
+enum stat_type {
+ /* Total time spent (in ns) between request dispatch to the driver and
+ * request completion for IOs doen by this cgroup. This may not be
+ * accurate when NCQ is turned on. */
+ BLKIO_STAT_SERVICE_TIME = 0,
+ /* Total bytes transferred */
+ BLKIO_STAT_SERVICE_BYTES,
+ /* Total IOs serviced, post merge */
+ BLKIO_STAT_SERVICED,
+ /* Total time spent waiting in scheduler queue in ns */
+ BLKIO_STAT_WAIT_TIME,
+ /* Number of IOs merged */
+ BLKIO_STAT_MERGED,
+ /* Number of IOs queued up */
+ BLKIO_STAT_QUEUED,
+ /* All the single valued stats go below this */
+ BLKIO_STAT_TIME,
+ BLKIO_STAT_SECTORS,
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ BLKIO_STAT_AVG_QUEUE_SIZE,
+ BLKIO_STAT_IDLE_TIME,
+ BLKIO_STAT_EMPTY_TIME,
+ BLKIO_STAT_GROUP_WAIT_TIME,
+ BLKIO_STAT_DEQUEUE
+#endif
+};
+
+enum stat_sub_type {
+ BLKIO_STAT_READ = 0,
+ BLKIO_STAT_WRITE,
+ BLKIO_STAT_SYNC,
+ BLKIO_STAT_ASYNC,
+ BLKIO_STAT_TOTAL
+};
+
+/* blkg state flags */
+enum blkg_state_flags {
+ BLKG_waiting = 0,
+ BLKG_idling,
+ BLKG_empty,
+};
+
struct blkio_cgroup {
struct cgroup_subsys_state css;
unsigned int weight;
spinlock_t lock;
struct hlist_head blkg_list;
+ struct list_head policy_list; /* list of blkio_policy_node */
+};
+
+struct blkio_group_stats {
+ /* total disk time and nr sectors dispatched by this group */
+ uint64_t time;
+ uint64_t sectors;
+ uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* Sum of number of IOs queued across all samples */
+ uint64_t avg_queue_size_sum;
+ /* Count of samples taken for average */
+ uint64_t avg_queue_size_samples;
+ /* How many times this group has been removed from service tree */
+ unsigned long dequeue;
+
+ /* Total time spent waiting for it to be assigned a timeslice. */
+ uint64_t group_wait_time;
+ uint64_t start_group_wait_time;
+
+ /* Time spent idling for this blkio_group */
+ uint64_t idle_time;
+ uint64_t start_idle_time;
+ /*
+ * Total time when we have requests queued and do not contain the
+ * current active queue.
+ */
+ uint64_t empty_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+#endif
};
struct blkio_group {
@@ -35,20 +108,25 @@ struct blkio_group {
void *key;
struct hlist_node blkcg_node;
unsigned short blkcg_id;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
/* Store cgroup path */
char path[128];
- /* How many times this group has been removed from service tree */
- unsigned long dequeue;
-#endif
/* The device MKDEV(major, minor), this group has been created for */
- dev_t dev;
+ dev_t dev;
- /* total disk time and nr sectors dispatched by this group */
- unsigned long time;
- unsigned long sectors;
+ /* Need to serialize the stats in the case of reset/update */
+ spinlock_t stats_lock;
+ struct blkio_group_stats stats;
};
+struct blkio_policy_node {
+ struct list_head node;
+ dev_t dev;
+ unsigned int weight;
+};
+
+extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
+ dev_t dev);
+
typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg,
unsigned int weight);
@@ -67,6 +145,11 @@ struct blkio_policy_type {
extern void blkio_policy_register(struct blkio_policy_type *);
extern void blkio_policy_unregister(struct blkio_policy_type *);
+static inline char *blkg_path(struct blkio_group *blkg)
+{
+ return blkg->path;
+}
+
#else
struct blkio_group {
@@ -78,6 +161,8 @@ struct blkio_policy_type {
static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
+static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
+
#endif
#define BLKIO_WEIGHT_MIN 100
@@ -85,16 +170,42 @@ static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
#define BLKIO_WEIGHT_DEFAULT 500
#ifdef CONFIG_DEBUG_BLK_CGROUP
-static inline char *blkg_path(struct blkio_group *blkg)
-{
- return blkg->path;
-}
-void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
+void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
+void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue);
+void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
+void blkiocg_set_start_empty_time(struct blkio_group *blkg);
+
+#define BLKG_FLAG_FNS(name) \
+static inline void blkio_mark_blkg_##name( \
+ struct blkio_group_stats *stats) \
+{ \
+ stats->flags |= (1 << BLKG_##name); \
+} \
+static inline void blkio_clear_blkg_##name( \
+ struct blkio_group_stats *stats) \
+{ \
+ stats->flags &= ~(1 << BLKG_##name); \
+} \
+static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
+{ \
+ return (stats->flags & (1 << BLKG_##name)) != 0; \
+} \
+
+BLKG_FLAG_FNS(waiting)
+BLKG_FLAG_FNS(idling)
+BLKG_FLAG_FNS(empty)
+#undef BLKG_FLAG_FNS
#else
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
-static inline void blkiocg_update_blkio_group_dequeue_stats(
- struct blkio_group *blkg, unsigned long dequeue) {}
+static inline void blkiocg_update_avg_queue_size_stats(
+ struct blkio_group *blkg) {}
+static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
+ unsigned long dequeue) {}
+static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+{}
+static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
+static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
#endif
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
@@ -105,26 +216,43 @@ extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
void *key);
-void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
- unsigned long time, unsigned long sectors);
+void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time);
+void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
+ bool direction, bool sync);
+void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
+void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
+ bool sync);
+void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync);
+void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync);
#else
struct cgroup;
static inline struct blkio_cgroup *
cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev)
-{
-}
+ struct blkio_group *blkg, void *key, dev_t dev) {}
static inline int
blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
static inline struct blkio_group *
blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
- unsigned long time, unsigned long sectors)
-{
-}
+static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
+ unsigned long time) {}
+static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
+ uint64_t bytes, bool direction, bool sync) {}
+static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
+ uint64_t start_time, uint64_t io_start_time, bool direction,
+ bool sync) {}
+static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
+static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync) {}
+static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync) {}
#endif
#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 9fe174dc74d1..3bc5579d6f54 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,6 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->tag = -1;
rq->ref_count = 1;
rq->start_time = jiffies;
+ set_start_time_ns(rq);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -450,6 +451,7 @@ void blk_cleanup_queue(struct request_queue *q)
*/
blk_sync_queue(q);
+ del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
mutex_unlock(&q->sysfs_lock);
@@ -510,6 +512,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
return NULL;
}
+ setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+ laptop_mode_timer_fn, (unsigned long) q);
init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list);
@@ -568,6 +572,22 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ return blk_init_allocated_queue_node(q, rfn, lock, node_id);
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+struct request_queue *
+blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock)
+{
+ return blk_init_allocated_queue_node(q, rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_allocated_queue);
+
+struct request_queue *
+blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
+ spinlock_t *lock, int node_id)
+{
if (!q)
return NULL;
@@ -601,7 +621,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
blk_put_queue(q);
return NULL;
}
-EXPORT_SYMBOL(blk_init_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue_node);
int blk_get_queue(struct request_queue *q)
{
@@ -1198,6 +1218,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
+ elv_bio_merged(q, req, bio);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
@@ -1231,6 +1252,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
drive_stat_acct(req, 0);
+ elv_bio_merged(q, req, bio);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
@@ -1855,8 +1877,10 @@ void blk_dequeue_request(struct request *rq)
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
- if (blk_account_rq(rq))
+ if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]++;
+ set_io_start_time_ns(rq);
+ }
}
/**
@@ -2098,7 +2122,7 @@ static void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && blk_fs_request(req))
- laptop_io_completion();
+ laptop_io_completion(&req->q->backing_dev_info);
blk_delete_timer(req);
@@ -2517,4 +2541,3 @@ int __init blk_dev_init(void)
return 0;
}
-
diff --git a/block/blk-lib.c b/block/blk-lib.c
new file mode 100644
index 000000000000..d0216b9f22d4
--- /dev/null
+++ b/block/blk-lib.c
@@ -0,0 +1,233 @@
+/*
+ * Functions related to generic helpers functions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+
+#include "blk.h"
+
+static void blkdev_discard_end_io(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+
+ if (bio->bi_private)
+ complete(bio->bi_private);
+ __free_page(bio_page(bio));
+
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int type = flags & BLKDEV_IFL_BARRIER ?
+ DISCARD_BARRIER : DISCARD_NOBARRIER;
+ struct bio *bio;
+ struct page *page;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
+ while (nr_sects && !ret) {
+ unsigned int sector_size = q->limits.logical_block_size;
+ unsigned int max_discard_sectors =
+ min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio)
+ goto out;
+ bio->bi_sector = sector;
+ bio->bi_end_io = blkdev_discard_end_io;
+ bio->bi_bdev = bdev;
+ if (flags & BLKDEV_IFL_WAIT)
+ bio->bi_private = &wait;
+
+ /*
+ * Add a zeroed one-sector payload as that's what
+ * our current implementations need. If we'll ever need
+ * more the interface will need revisiting.
+ */
+ page = alloc_page(gfp_mask | __GFP_ZERO);
+ if (!page)
+ goto out_free_bio;
+ if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
+ goto out_free_page;
+
+ /*
+ * And override the bio size - the way discard works we
+ * touch many more blocks on disk than the actual payload
+ * length.
+ */
+ if (nr_sects > max_discard_sectors) {
+ bio->bi_size = max_discard_sectors << 9;
+ nr_sects -= max_discard_sectors;
+ sector += max_discard_sectors;
+ } else {
+ bio->bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+
+ bio_get(bio);
+ submit_bio(type, bio);
+
+ if (flags & BLKDEV_IFL_WAIT)
+ wait_for_completion(&wait);
+
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+ bio_put(bio);
+ }
+ return ret;
+out_free_page:
+ __free_page(page);
+out_free_bio:
+ bio_put(bio);
+out:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(blkdev_issue_discard);
+
+struct bio_batch
+{
+ atomic_t done;
+ unsigned long flags;
+ struct completion *wait;
+ bio_end_io_t *end_io;
+};
+
+static void bio_batch_end_io(struct bio *bio, int err)
+{
+ struct bio_batch *bb = bio->bi_private;
+
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bb->flags);
+ else
+ clear_bit(BIO_UPTODATE, &bb->flags);
+ }
+ if (bb) {
+ if (bb->end_io)
+ bb->end_io(bio, err);
+ atomic_inc(&bb->done);
+ complete(bb->wait);
+ }
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_zeroout generate number of zero filed write bios
+ * @bdev: blockdev to issue
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Generate and issue number of bios with zerofiled pages.
+ * Send barrier at the beginning and at the end if requested. This guarantie
+ * correct request ordering. Empty barrier allow us to avoid post queue flush.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ int ret = 0;
+ struct bio *bio;
+ struct bio_batch bb;
+ unsigned int sz, issued = 0;
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ atomic_set(&bb.done, 0);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+ bb.end_io = NULL;
+
+ if (flags & BLKDEV_IFL_BARRIER) {
+ /* issue async barrier before the data */
+ ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
+ if (ret)
+ return ret;
+ }
+submit:
+ while (nr_sects != 0) {
+ bio = bio_alloc(gfp_mask,
+ min(nr_sects, (sector_t)BIO_MAX_PAGES));
+ if (!bio)
+ break;
+
+ bio->bi_sector = sector;
+ bio->bi_bdev = bdev;
+ bio->bi_end_io = bio_batch_end_io;
+ if (flags & BLKDEV_IFL_WAIT)
+ bio->bi_private = &bb;
+
+ while (nr_sects != 0) {
+ sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
+ if (sz == 0)
+ /* bio has maximum size possible */
+ break;
+ ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
+ nr_sects -= ret >> 9;
+ sector += ret >> 9;
+ if (ret < (sz << 9))
+ break;
+ }
+ issued++;
+ submit_bio(WRITE, bio);
+ }
+ /*
+ * When all data bios are in flight. Send final barrier if requeted.
+ */
+ if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
+ ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
+ flags & BLKDEV_IFL_WAIT);
+
+
+ if (flags & BLKDEV_IFL_WAIT)
+ /* Wait for bios in-flight */
+ while ( issued != atomic_read(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ /* One of bios in the batch was completed with error.*/
+ ret = -EIO;
+
+ if (ret)
+ goto out;
+
+ if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (nr_sects != 0)
+ goto submit;
+out:
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5f127cfb2e92..ed897b5ef315 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -55,6 +55,7 @@ static const int cfq_hist_divisor = 4;
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
+#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3)
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
@@ -143,8 +144,6 @@ struct cfq_queue {
struct cfq_queue *new_cfqq;
struct cfq_group *cfqg;
struct cfq_group *orig_cfqg;
- /* Sectors dispatched in current dispatch round */
- unsigned long nr_sectors;
};
/*
@@ -346,7 +345,7 @@ CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
-#ifdef CONFIG_DEBUG_CFQ_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
@@ -858,7 +857,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
if (!RB_EMPTY_NODE(&cfqg->rb_node))
cfq_rb_erase(&cfqg->rb_node, st);
cfqg->saved_workload_slice = 0;
- blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1);
+ blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@@ -884,8 +883,7 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
slice_used = cfqq->allocated_slice;
}
- cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
- cfqq->nr_sectors);
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
return slice_used;
}
@@ -919,8 +917,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
- blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl,
- cfqq->nr_sectors);
+ blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
+ blkiocg_set_start_empty_time(&cfqg->blkg);
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
@@ -961,7 +959,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
if (!cfqg)
goto done;
- cfqg->weight = blkcg->weight;
for_each_cfqg_st(cfqg, i, j, st)
*st = CFQ_RB_ROOT;
RB_CLEAR_NODE(&cfqg->rb_node);
@@ -978,6 +975,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
MKDEV(major, minor));
+ cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
/* Add group on cfqd list */
hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
@@ -1004,6 +1002,12 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
return cfqg;
}
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+ atomic_inc(&cfqg->ref);
+ return cfqg;
+}
+
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
/* Currently, all async queues are mapped to root group */
@@ -1087,6 +1091,12 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
{
return &cfqd->root_group;
}
+
+static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+{
+ return cfqg;
+}
+
static inline void
cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
cfqq->cfqg = cfqg;
@@ -1389,7 +1399,12 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
+ blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
cfq_add_rq_rb(rq);
+ blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+ &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
}
static struct request *
@@ -1445,6 +1460,8 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
+ blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
if (rq_is_meta(rq)) {
WARN_ON(!cfqq->meta_pending);
cfqq->meta_pending--;
@@ -1476,6 +1493,13 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
}
}
+static void cfq_bio_merged(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
+ cfq_bio_sync(bio));
+}
+
static void
cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
@@ -1493,6 +1517,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
+ blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
+ rq_is_sync(next));
}
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -1520,18 +1546,24 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
return cfqq == RQ_CFQQ(rq);
}
+static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ del_timer(&cfqd->idle_slice_timer);
+ blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+}
+
static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
+ blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
cfqq->slice_end = 0;
cfqq->slice_dispatch = 0;
- cfqq->nr_sectors = 0;
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1539,7 +1571,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
cfq_clear_cfqq_fifo_expire(cfqq);
cfq_mark_cfqq_slice_new(cfqq);
- del_timer(&cfqd->idle_slice_timer);
+ cfq_del_timer(cfqd, cfqq);
}
cfqd->active_queue = cfqq;
@@ -1555,7 +1587,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
if (cfq_cfqq_wait_request(cfqq))
- del_timer(&cfqd->idle_slice_timer);
+ cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_wait_busy(cfqq);
@@ -1857,6 +1889,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
+ blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
}
@@ -1876,7 +1909,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
elv_dispatch_sort(q, rq);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
- cfqq->nr_sectors += blk_rq_sectors(rq);
+ blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
+ rq_data_dir(rq), rq_is_sync(rq));
}
/*
@@ -3185,11 +3219,14 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
- del_timer(&cfqd->idle_slice_timer);
+ cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
- } else
+ } else {
+ blkiocg_update_idle_time_stats(
+ &cfqq->cfqg->blkg);
cfq_mark_cfqq_must_dispatch(cfqq);
+ }
}
} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
/*
@@ -3214,7 +3251,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
-
+ blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
+ &cfqd->serving_group->blkg, rq_data_dir(rq),
+ rq_is_sync(rq));
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -3300,6 +3339,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
cfqq->dispatched--;
+ blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
+ rq_io_start_time_ns(rq), rq_data_dir(rq),
+ rq_is_sync(rq));
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -3440,6 +3482,10 @@ static void cfq_put_request(struct request *rq)
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
+ /* Put down rq reference on cfqg */
+ cfq_put_cfqg(RQ_CFQG(rq));
+ rq->elevator_private3 = NULL;
+
cfq_put_queue(cfqq);
}
}
@@ -3528,6 +3574,7 @@ new_queue:
rq->elevator_private = cic;
rq->elevator_private2 = cfqq;
+ rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
return 0;
queue_fail:
@@ -3743,7 +3790,6 @@ static void *cfq_init_queue(struct request_queue *q)
* second, in order to have larger depth for async operations.
*/
cfqd->last_delayed_sync = jiffies - HZ;
- INIT_RCU_HEAD(&cfqd->rcu);
return cfqd;
}
@@ -3872,6 +3918,7 @@ static struct elevator_type iosched_cfq = {
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
.elevator_allow_merge_fn = cfq_allow_merge,
+ .elevator_bio_merged_fn = cfq_bio_merged,
.elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
diff --git a/block/elevator.c b/block/elevator.c
index 76e3702d5381..6df2b5056b51 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -539,6 +539,15 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
q->last_merge = rq;
}
+void elv_bio_merged(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_bio_merged_fn)
+ e->ops->elevator_bio_merged_fn(q, rq, bio);
+}
+
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
@@ -921,6 +930,7 @@ int elv_register_queue(struct request_queue *q)
}
return error;
}
+EXPORT_SYMBOL(elv_register_queue);
static void __elv_unregister_queue(struct elevator_queue *e)
{
@@ -933,6 +943,7 @@ void elv_unregister_queue(struct request_queue *q)
if (q)
__elv_unregister_queue(q->elevator);
}
+EXPORT_SYMBOL(elv_unregister_queue);
void elv_register(struct elevator_type *e)
{
diff --git a/block/genhd.c b/block/genhd.c
index d13ba76a169c..59a2db6fecef 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -596,6 +596,7 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
return disk;
}
+EXPORT_SYMBOL(get_gendisk);
/**
* bdget_disk - do bdget() by gendisk and partition number
@@ -987,7 +988,6 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
if (!new_ptbl)
return -ENOMEM;
- INIT_RCU_HEAD(&new_ptbl->rcu_head);
new_ptbl->len = target;
for (i = 0; i < len; i++)
diff --git a/block/ioctl.c b/block/ioctl.c
index 8905d2a2a717..e8eb679f2f9b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -126,7 +126,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
if (start + len > (bdev->bd_inode->i_size >> 9))
return -EINVAL;
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL,
- DISCARD_FL_WAIT);
+ BLKDEV_IFL_WAIT);
}
static int put_ushort(unsigned long arg, unsigned short val)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 403857ad06d4..9d9434f08c92 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -28,7 +28,7 @@ config CRYPTO_FIPS
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
certification. You should say no unless you know what
- this is. Note that CRYPTO_ANSI_CPRNG is requred if this
+ this is. Note that CRYPTO_ANSI_CPRNG is required if this
option is selected
config CRYPTO_ALGAPI
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index fe980dae1727..98a66103f4f2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -24,10 +24,287 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <crypto/scatterwalk.h>
+
#include "internal.h"
static const char *skcipher_default_geniv __read_mostly;
+struct ablkcipher_buffer {
+ struct list_head entry;
+ struct scatter_walk dst;
+ unsigned int len;
+ void *data;
+};
+
+enum {
+ ABLKCIPHER_WALK_SLOW = 1 << 0,
+};
+
+static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
+{
+ scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
+}
+
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+ struct ablkcipher_buffer *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+ ablkcipher_buffer_write(p);
+ list_del(&p->entry);
+ kfree(p);
+ }
+}
+EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
+
+static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
+ struct ablkcipher_buffer *p)
+{
+ p->dst = walk->out;
+ list_add_tail(&p->entry, &walk->buffers);
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
+{
+ u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+ return max(start, end_page);
+}
+
+static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ unsigned int bsize)
+{
+ unsigned int n = bsize;
+
+ for (;;) {
+ unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
+
+ if (len_this_page > n)
+ len_this_page = n;
+ scatterwalk_advance(&walk->out, n);
+ if (n == len_this_page)
+ break;
+ n -= len_this_page;
+ scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
+ }
+
+ return bsize;
+}
+
+static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
+ unsigned int n)
+{
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+
+ return n;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk);
+
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk, int err)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int nbytes = 0;
+
+ if (likely(err >= 0)) {
+ unsigned int n = walk->nbytes - err;
+
+ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
+ n = ablkcipher_done_fast(walk, n);
+ else if (WARN_ON(err)) {
+ err = -EINVAL;
+ goto err;
+ } else
+ n = ablkcipher_done_slow(walk, n);
+
+ nbytes = walk->total - n;
+ err = 0;
+ }
+
+ scatterwalk_done(&walk->in, 0, nbytes);
+ scatterwalk_done(&walk->out, 1, nbytes);
+
+err:
+ walk->total = nbytes;
+ walk->nbytes = nbytes;
+
+ if (nbytes) {
+ crypto_yield(req->base.flags);
+ return ablkcipher_walk_next(req, walk);
+ }
+
+ if (walk->iv != req->info)
+ memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
+ if (walk->iv_buffer)
+ kfree(walk->iv_buffer);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
+
+static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk,
+ unsigned int bsize,
+ unsigned int alignmask,
+ void **src_p, void **dst_p)
+{
+ unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
+ struct ablkcipher_buffer *p;
+ void *src, *dst, *base;
+ unsigned int n;
+
+ n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
+ n += (aligned_bsize * 3 - (alignmask + 1) +
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
+
+ p = kmalloc(n, GFP_ATOMIC);
+ if (!p)
+ ablkcipher_walk_done(req, walk, -ENOMEM);
+
+ base = p + 1;
+
+ dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
+ src = dst = ablkcipher_get_spot(dst, bsize);
+
+ p->len = bsize;
+ p->data = dst;
+
+ scatterwalk_copychunks(src, &walk->in, bsize, 0);
+
+ ablkcipher_queue_write(walk, p);
+
+ walk->nbytes = bsize;
+ walk->flags |= ABLKCIPHER_WALK_SLOW;
+
+ *src_p = src;
+ *dst_p = dst;
+
+ return 0;
+}
+
+static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
+ struct crypto_tfm *tfm,
+ unsigned int alignmask)
+{
+ unsigned bs = walk->blocksize;
+ unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
+ unsigned aligned_bs = ALIGN(bs, alignmask + 1);
+ unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
+ (alignmask + 1);
+ u8 *iv;
+
+ size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+ walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
+ if (!walk->iv_buffer)
+ return -ENOMEM;
+
+ iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
+ iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
+ iv = ablkcipher_get_spot(iv, ivsize);
+
+ walk->iv = memcpy(iv, walk->iv, ivsize);
+ return 0;
+}
+
+static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ walk->src.page = scatterwalk_page(&walk->in);
+ walk->src.offset = offset_in_page(walk->in.offset);
+ walk->dst.page = scatterwalk_page(&walk->out);
+ walk->dst.offset = offset_in_page(walk->out.offset);
+
+ return 0;
+}
+
+static int ablkcipher_walk_next(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int alignmask, bsize, n;
+ void *src, *dst;
+ int err;
+
+ alignmask = crypto_tfm_alg_alignmask(tfm);
+ n = walk->total;
+ if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
+ req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+ return ablkcipher_walk_done(req, walk, -EINVAL);
+ }
+
+ walk->flags &= ~ABLKCIPHER_WALK_SLOW;
+ src = dst = NULL;
+
+ bsize = min(walk->blocksize, n);
+ n = scatterwalk_clamp(&walk->in, n);
+ n = scatterwalk_clamp(&walk->out, n);
+
+ if (n < bsize ||
+ !scatterwalk_aligned(&walk->in, alignmask) ||
+ !scatterwalk_aligned(&walk->out, alignmask)) {
+ err = ablkcipher_next_slow(req, walk, bsize, alignmask,
+ &src, &dst);
+ goto set_phys_lowmem;
+ }
+
+ walk->nbytes = n;
+
+ return ablkcipher_next_fast(req, walk);
+
+set_phys_lowmem:
+ if (err >= 0) {
+ walk->src.page = virt_to_page(src);
+ walk->dst.page = virt_to_page(dst);
+ walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
+ walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
+ }
+
+ return err;
+}
+
+static int ablkcipher_walk_first(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned int alignmask;
+
+ alignmask = crypto_tfm_alg_alignmask(tfm);
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
+ walk->nbytes = walk->total;
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->iv_buffer = NULL;
+ walk->iv = req->info;
+ if (unlikely(((unsigned long)walk->iv & alignmask))) {
+ int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+ if (err)
+ return err;
+ }
+
+ scatterwalk_start(&walk->in, walk->in.sg);
+ scatterwalk_start(&walk->out, walk->out.sg);
+
+ return ablkcipher_walk_next(req, walk);
+}
+
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk)
+{
+ walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
+ return ablkcipher_walk_first(req, walk);
+}
+EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
+
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 76fae27ed01c..c3cf1a69a47a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -544,7 +544,7 @@ int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
{
int err = -EINVAL;
- if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
+ if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
goto out;
spawn->frontend = frontend;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index f9cdf04fe7c0..7f2c00a45205 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
- #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
- BUG();
- #endif
-
/* first check to see if we can still append to depend_tx */
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent && depend_tx->chan == tx->chan) {
- tx->parent = depend_tx;
- depend_tx->next = tx;
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
+ txd_chain(depend_tx, tx);
intr_tx = NULL;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
/* attached dependency, flush the parent channel */
if (!intr_tx) {
@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
if (intr_tx) {
intr_tx->callback = NULL;
intr_tx->callback_param = NULL;
- tx->parent = intr_tx;
- /* safe to set ->next outside the lock since we know we are
+ /* safe to chain outside the lock since we know we are
* not submitted yet
*/
- intr_tx->next = tx;
+ txd_chain(intr_tx, tx);
/* check if we need to append */
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent) {
- intr_tx->parent = depend_tx;
- depend_tx->next = intr_tx;
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx)) {
+ txd_chain(depend_tx, intr_tx);
async_tx_ack(intr_tx);
intr_tx = NULL;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
if (intr_tx) {
- intr_tx->parent = NULL;
+ txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
* 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent
*/
- BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
- tx->parent);
+ BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
+ txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL
*/
- spin_lock_bh(&depend_tx->lock);
- if (depend_tx->parent) {
+ txd_lock(depend_tx);
+ if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly
* if we are staying on the same channel: append
* else: channel switch
*/
if (depend_tx->chan == chan) {
- tx->parent = depend_tx;
- depend_tx->next = tx;
+ txd_chain(depend_tx, tx);
s = ASYNC_TX_SUBMITTED;
} else
s = ASYNC_TX_CHANNEL_SWITCH;
@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
else
s = ASYNC_TX_CHANNEL_SWITCH;
}
- spin_unlock_bh(&depend_tx->lock);
+ txd_unlock(depend_tx);
switch (s) {
case ASYNC_TX_SUBMITTED:
@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
async_tx_channel_switch(depend_tx, tx);
break;
case ASYNC_TX_DIRECT_SUBMIT:
- tx->parent = NULL;
+ txd_clear_parent(tx);
tx->tx_submit(tx);
break;
}
} else {
- tx->parent = NULL;
+ txd_clear_parent(tx);
tx->tx_submit(tx);
}
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 05eb32e0d949..b9884ee0adb6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -181,6 +181,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
@@ -196,6 +197,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
goto out;
authsize = crypto_aead_authsize(authenc);
+ cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
@@ -209,7 +211,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
@@ -228,11 +230,13 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
authsize = crypto_aead_authsize(authenc);
+ cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
@@ -246,7 +250,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
diff --git a/crypto/internal.h b/crypto/internal.h
index 2d226362e594..d4384b08ab29 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -6,7 +6,7 @@
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 80201241b698..247178cb98ec 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -315,16 +315,13 @@ out_free_inst:
goto out;
}
-static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
+ u32 type, u32 mask)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- alg = crypto_get_attr_alg(tb, algt->type,
- (algt->mask & CRYPTO_ALG_TYPE_MASK));
+ alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
if (IS_ERR(alg))
return ERR_CAST(alg);
@@ -365,7 +362,7 @@ static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- return pcrypt_alloc_aead(tb);
+ return pcrypt_alloc_aead(tb, algt->type, algt->mask);
}
return ERR_PTR(-EINVAL);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3de89a424401..41e529af0773 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,7 +68,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
- if (!offset_in_page(walk->offset) || !more)
+ if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
scatterwalk_pagedone(walk, out, more);
}
EXPORT_SYMBOL_GPL(scatterwalk_done);
diff --git a/crypto/shash.c b/crypto/shash.c
index 91f7b9d83881..22fd9433141f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -37,7 +37,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
u8 *buffer, *alignbuffer;
int err;
- absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1));
+ absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a35159947a26..3ca68f9fc14d 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -394,6 +394,17 @@ out:
return 0;
}
+static void test_hash_sg_init(struct scatterlist *sg)
+{
+ int i;
+
+ sg_init_table(sg, TVMEMSIZE);
+ for (i = 0; i < TVMEMSIZE; i++) {
+ sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
+ memset(tvmem[i], 0xff, PAGE_SIZE);
+ }
+}
+
static void test_hash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
@@ -423,12 +434,7 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
- sg_init_table(sg, TVMEMSIZE);
- for (i = 0; i < TVMEMSIZE; i++) {
- sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
- memset(tvmem[i], 0xff, PAGE_SIZE);
- }
-
+ test_hash_sg_init(sg);
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
printk(KERN_ERR
@@ -437,6 +443,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
goto out;
}
+ if (speed[i].klen)
+ crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
+
printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -458,6 +467,250 @@ out:
crypto_free_hash(tfm);
}
+struct tcrypt_result {
+ struct completion completion;
+ int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+ struct tcrypt_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static inline int do_one_ahash_op(struct ahash_request *req, int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ struct tcrypt_result *tr = req->base.data;
+
+ ret = wait_for_completion_interruptible(&tr->completion);
+ if (!ret)
+ ret = tr->err;
+ INIT_COMPLETION(tr->completion);
+ }
+ return ret;
+}
+
+static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
+ char *out, int sec)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ return ret;
+ }
+
+ printk("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / sec, ((long)bcount * blen) / sec);
+
+ return 0;
+}
+
+static int test_ahash_jiffies(struct ahash_request *req, int blen,
+ int plen, char *out, int sec)
+{
+ unsigned long start, end;
+ int bcount, pcount;
+ int ret;
+
+ if (plen == blen)
+ return test_ahash_jiffies_digest(req, blen, out, sec);
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = crypto_ahash_init(req);
+ if (ret)
+ return ret;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ return ret;
+ }
+ /* we assume there is enough space in 'out' for the result */
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%6u opers/sec, %9lu bytes/sec\n",
+ bcount / sec, ((long)bcount * blen) / sec);
+
+ return 0;
+}
+
+static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
+ char *out)
+{
+ unsigned long cycles = 0;
+ int ret, i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+
+ ret = do_one_ahash_op(req, crypto_ahash_digest(req));
+ if (ret)
+ goto out;
+
+ end = get_cycles();
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret)
+ return ret;
+
+ pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles / 8, cycles / (8 * blen));
+
+ return 0;
+}
+
+static int test_ahash_cycles(struct ahash_request *req, int blen,
+ int plen, char *out)
+{
+ unsigned long cycles = 0;
+ int i, pcount, ret;
+
+ if (plen == blen)
+ return test_ahash_cycles_digest(req, blen, out);
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto out;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ goto out;
+ }
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+
+ ret = crypto_ahash_init(req);
+ if (ret)
+ goto out;
+ for (pcount = 0; pcount < blen; pcount += plen) {
+ ret = do_one_ahash_op(req, crypto_ahash_update(req));
+ if (ret)
+ goto out;
+ }
+ ret = do_one_ahash_op(req, crypto_ahash_final(req));
+ if (ret)
+ goto out;
+
+ end = get_cycles();
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret)
+ return ret;
+
+ pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
+ cycles / 8, cycles / (8 * blen));
+
+ return 0;
+}
+
+static void test_ahash_speed(const char *algo, unsigned int sec,
+ struct hash_speed *speed)
+{
+ struct scatterlist sg[TVMEMSIZE];
+ struct tcrypt_result tresult;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ static char output[1024];
+ int i, ret;
+
+ printk(KERN_INFO "\ntesting speed of async %s\n", algo);
+
+ tfm = crypto_alloc_ahash(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ return;
+ }
+
+ if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
+ pr_err("digestsize(%u) > outputbuffer(%zu)\n",
+ crypto_ahash_digestsize(tfm), sizeof(output));
+ goto out;
+ }
+
+ test_hash_sg_init(sg);
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("ahash request allocation failure\n");
+ goto out;
+ }
+
+ init_completion(&tresult.completion);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &tresult);
+
+ for (i = 0; speed[i].blen != 0; i++) {
+ if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for tvmem (%lu)\n",
+ speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ break;
+ }
+
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
+
+ ahash_request_set_crypt(req, sg, output, speed[i].plen);
+
+ if (sec)
+ ret = test_ahash_jiffies(req, speed[i].blen,
+ speed[i].plen, output, sec);
+ else
+ ret = test_ahash_cycles(req, speed[i].blen,
+ speed[i].plen, output);
+
+ if (ret) {
+ pr_err("hashing failed ret=%d\n", ret);
+ break;
+ }
+ }
+
+ ahash_request_free(req);
+
+out:
+ crypto_free_ahash(tfm);
+}
+
static void test_available(void)
{
char **name = check;
@@ -881,9 +1134,87 @@ static int do_test(int m)
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
+ case 318:
+ test_hash_speed("ghash-generic", sec, hash_speed_template_16);
+ if (mode > 300 && mode < 400) break;
+
case 399:
break;
+ case 400:
+ /* fall through */
+
+ case 401:
+ test_ahash_speed("md4", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 402:
+ test_ahash_speed("md5", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 403:
+ test_ahash_speed("sha1", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 404:
+ test_ahash_speed("sha256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 405:
+ test_ahash_speed("sha384", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 406:
+ test_ahash_speed("sha512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 407:
+ test_ahash_speed("wp256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 408:
+ test_ahash_speed("wp384", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 409:
+ test_ahash_speed("wp512", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 410:
+ test_ahash_speed("tgr128", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 411:
+ test_ahash_speed("tgr160", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 412:
+ test_ahash_speed("tgr192", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 413:
+ test_ahash_speed("sha224", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 414:
+ test_ahash_speed("rmd128", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 415:
+ test_ahash_speed("rmd160", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 416:
+ test_ahash_speed("rmd256", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 417:
+ test_ahash_speed("rmd320", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+
+ case 499:
+ break;
+
case 1000:
test_available();
break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 966bbfaf95b1..10cb925132c9 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -25,6 +25,7 @@ struct cipher_speed_template {
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
+ unsigned int klen; /* key length */
};
/*
@@ -83,4 +84,32 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
+static struct hash_speed hash_speed_template_16[] = {
+ { .blen = 16, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 16, .klen = 16, },
+ { .blen = 64, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 16, .klen = 16, },
+ { .blen = 256, .plen = 64, .klen = 16, },
+ { .blen = 256, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 16, .klen = 16, },
+ { .blen = 1024, .plen = 256, .klen = 16, },
+ { .blen = 1024, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 16, .klen = 16, },
+ { .blen = 2048, .plen = 256, .klen = 16, },
+ { .blen = 2048, .plen = 1024, .klen = 16, },
+ { .blen = 2048, .plen = 2048, .klen = 16, },
+ { .blen = 4096, .plen = 16, .klen = 16, },
+ { .blen = 4096, .plen = 256, .klen = 16, },
+ { .blen = 4096, .plen = 1024, .klen = 16, },
+ { .blen = 4096, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 16, .klen = 16, },
+ { .blen = 8192, .plen = 256, .klen = 16, },
+ { .blen = 8192, .plen = 1024, .klen = 16, },
+ { .blen = 8192, .plen = 4096, .klen = 16, },
+ { .blen = 8192, .plen = 8192, .klen = 16, },
+
+ /* End marker */
+ { .blen = 0, .plen = 0, .klen = 0, }
+};
+
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c494d7610be1..5c8aaa0cb0b9 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -153,8 +153,21 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
+static int do_one_async_hash_op(struct ahash_request *req,
+ struct tcrypt_result *tr,
+ int ret)
+{
+ if (ret == -EINPROGRESS || ret == -EBUSY) {
+ ret = wait_for_completion_interruptible(&tr->completion);
+ if (!ret)
+ ret = tr->err;
+ INIT_COMPLETION(tr->completion);
+ }
+ return ret;
+}
+
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
- unsigned int tcount)
+ unsigned int tcount, bool use_digest)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
unsigned int i, j, k, temp;
@@ -206,23 +219,36 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &tresult.completion);
- if (!ret && !(ret = tresult.err)) {
- INIT_COMPLETION(tresult.completion);
- break;
+ if (use_digest) {
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_digest(req));
+ if (ret) {
+ pr_err("alg: hash: digest failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ } else {
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_init(req));
+ if (ret) {
+ pr_err("alt: hash: init failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_update(req));
+ if (ret) {
+ pr_err("alt: hash: update failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = do_one_async_hash_op(req, &tresult,
+ crypto_ahash_final(req));
+ if (ret) {
+ pr_err("alt: hash: final failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
}
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed on test %d "
- "for %s: ret=%d\n", j, algo, -ret);
- goto out;
}
if (memcmp(result, template[i].digest,
@@ -1402,7 +1428,11 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
return PTR_ERR(tfm);
}
- err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
+ err = test_hash(tfm, desc->suite.hash.vecs,
+ desc->suite.hash.count, true);
+ if (!err)
+ err = test_hash(tfm, desc->suite.hash.vecs,
+ desc->suite.hash.count, false);
crypto_free_ahash(tfm);
return err;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index fb765173d41c..74e35377fd30 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1669,17 +1669,73 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
-#define VMAC_AES_TEST_VECTORS 1
-static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
+#define VMAC_AES_TEST_VECTORS 8
+static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
'\x02', '\x03', '\x02', '\x02',
'\x02', '\x04', '\x01', '\x07',
'\x04', '\x01', '\x04', '\x03',};
+static char vmac_string2[128] = {'a', 'b', 'c',};
+static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ 'a', 'b', 'c', 'a', 'b', 'c',
+ };
+
static struct hash_testvec aes_vmac128_tv_template[] = {
{
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = NULL,
+ .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string1,
+ .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string2,
+ .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d",
+ .psize = 128,
+ .ksize = 16,
+ }, {
.key = "\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .plaintext = vmac_string,
- .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
+ .plaintext = vmac_string3,
+ .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = NULL,
+ .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84",
+ .psize = 0,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string1,
+ .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string2,
+ .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf",
+ .psize = 128,
+ .ksize = 16,
+ }, {
+ .key = "abcdefghijklmnop",
+ .plaintext = vmac_string3,
+ .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4",
.psize = 128,
.ksize = 16,
},
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 0a9468e575de..0999274a27ac 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -43,6 +43,8 @@ const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
+#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
+
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
@@ -110,8 +112,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
@@ -121,11 +123,11 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
- le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
@@ -136,17 +138,17 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
- le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
- le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
- le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
@@ -156,29 +158,29 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
- le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
- le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
- le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
- le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
- le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
- le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
- le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
- MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
- le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
+ MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
+ pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
@@ -216,8 +218,8 @@ const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
- t1 = le64_to_cpup(mp+i) + kp[i]; \
- t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
+ t1 = pe64_to_cpup(mp+i) + kp[i]; \
+ t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
@@ -322,8 +324,7 @@ static void vhash_abort(struct vmac_ctx *ctx)
ctx->first_block_processed = 0;
}
-static u64 l3hash(u64 p1, u64 p2,
- u64 k1, u64 k2, u64 len)
+static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@@ -474,7 +475,7 @@ static u64 vmac(unsigned char m[], unsigned int mbytes,
}
p = be64_to_cpup(out_p + i);
h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
- return p + h;
+ return le64_to_cpu(p + h);
}
static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
@@ -549,10 +550,6 @@ static int vmac_setkey(struct crypto_shash *parent,
static int vmac_init(struct shash_desc *pdesc)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
-
- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
return 0;
}
diff --git a/drivers/Makefile b/drivers/Makefile
index f42a03029b7c..91874e048552 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
obj-y += video/
+obj-y += idle/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
# PnP must come after ACPI since it will eventually need to check if acpi
@@ -91,7 +92,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-y += idle/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df6..746411518802 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
To compile this driver as a module, choose M here:
the modules will be called sbs and sbshc.
+config ACPI_HED
+ tristate "Hardware Error Device"
+ help
+ This driver supports the Hardware Error Device (PNP0C33),
+ which is used to report some hardware errors notified via
+ SCI, mainly the corrected errors.
+
+source "drivers/acpi/apei/Kconfig"
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c5..6ee33169e1dc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
-acpi-y += hest.o
+acpi-y += atomicio.o
# sleep related files
acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
+obj-$(CONFIG_ACPI_HED) += hed.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+
+obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 62122134693b..d269a8f3329c 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock);
#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
static unsigned long power_saving_mwait_eax;
+
+static unsigned char tsc_detected_unstable;
+static unsigned char tsc_marked_unstable;
+
static void power_saving_mwait_init(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -87,8 +91,8 @@ static void power_saving_mwait_init(void)
/*FALL THROUGH*/
default:
- /* TSC could halt in idle, so notify users */
- mark_tsc_unstable("TSC halts in idle");
+ /* TSC could halt in idle */
+ tsc_detected_unstable = 1;
}
#endif
}
@@ -168,16 +172,14 @@ static int power_saving_thread(void *data)
do_sleep = 0;
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
-
expire_time = jiffies + HZ * (100 - idle_pct) / 100;
while (!need_resched()) {
+ if (tsc_detected_unstable && !tsc_marked_unstable) {
+ /* TSC could halt in idle, so notify users */
+ mark_tsc_unstable("TSC halts in idle");
+ tsc_marked_unstable = 1;
+ }
local_irq_disable();
cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
@@ -200,8 +202,6 @@ static int power_saving_thread(void *data)
}
}
- current_thread_info()->status |= TS_POLLING;
-
/*
* current sched_rt has threshold for rt task running time.
* When a rt task uses 95% CPU time, the rt thread will be
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 7423052ece5a..d93cc06f4bf8 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,12 +14,12 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
- exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
+ exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 3e6ba99e4053..64d1e5c2d4ae 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -73,8 +73,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
- * evgpe - GPE handling and dispatch
+ * evgpe - Low-level GPE support
*/
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
@@ -85,19 +87,13 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block);
+
/*
- * evgpeblk
+ * evgpeblk - Upper-level GPE block support
*/
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context);
-
acpi_status
acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_generic_address *gpe_block_address,
@@ -116,12 +112,37 @@ u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
u32 gpe_number);
-u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+/*
+ * evgpeinit - GPE initialization and update
+ */
+acpi_status acpi_ev_gpe_initialize(void);
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
acpi_status
-acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
-acpi_status acpi_ev_gpe_initialize(void);
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/*
+ * evgpeutil - GPE utilities
+ */
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context);
/*
* evregion - Address Space handling
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index f8dd8f250ac4..9070f1fe8f17 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -112,6 +112,19 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+/*
+ * Optionally enable output from the AML Debug Object.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+
+/*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
@@ -145,11 +158,10 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
****************************************************************************/
/*
- * acpi_gbl_root_table_list is the master list of ACPI tables found in the
- * RSDT/XSDT.
- *
+ * acpi_gbl_root_table_list is the master list of ACPI tables that were
+ * found in the RSDT/XSDT.
*/
-ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
+ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
/* These addresses are calculated from the FADT Event Block addresses */
@@ -160,6 +172,11 @@ ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
+ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+
/*
* Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
* determined by the revision of the DSDT: If the DSDT revision is less than
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 6df3f8428168..049e203bd621 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -121,6 +121,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
struct acpi_walk_state *walk_state);
/*
+ * exdebug - AML debug object
+ */
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index);
+
+/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
acpi_status
@@ -274,7 +281,7 @@ acpi_status
acpi_ex_system_do_notify_op(union acpi_operand_object *value,
union acpi_operand_object *obj_desc);
-acpi_status acpi_ex_system_do_suspend(u64 time);
+acpi_status acpi_ex_system_do_sleep(u64 time);
acpi_status acpi_ex_system_do_stall(u32 time);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 24b8faa5c395..147a7e6bd38f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -213,12 +213,12 @@ struct acpi_namespace_node {
#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
-/* One internal RSDT for table management */
+/* Internal ACPI table management - master table list */
-struct acpi_internal_rsdt {
- struct acpi_table_desc *tables;
- u32 count;
- u32 size;
+struct acpi_table_list {
+ struct acpi_table_desc *tables; /* Table descriptor array */
+ u32 current_table_count; /* Tables currently in the array */
+ u32 max_table_count; /* Max tables array will hold */
u8 flags;
};
@@ -427,8 +427,8 @@ struct acpi_gpe_event_info {
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
- u8 runtime_count;
- u8 wakeup_count;
+ u8 runtime_count; /* References to a run GPE */
+ u8 wakeup_count; /* References to a wake GPE */
};
/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -454,6 +454,7 @@ struct acpi_gpe_block_info {
struct acpi_gpe_event_info *event_info; /* One for each GPE */
struct acpi_generic_address block_address; /* Base address of the block */
u32 register_count; /* Number of register pairs in block */
+ u16 gpe_count; /* Number of individual GPEs in block */
u8 block_base_number; /* Base GPE number for this block */
};
@@ -469,6 +470,10 @@ struct acpi_gpe_xrupt_info {
struct acpi_gpe_walk_info {
struct acpi_namespace_node *gpe_device;
struct acpi_gpe_block_info *gpe_block;
+ u16 count;
+ acpi_owner_id owner_id;
+ u8 enable_this_gpe;
+ u8 execute_by_owner_id;
};
struct acpi_gpe_device_info {
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 8ff3b741df28..62a576e34361 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -107,6 +107,10 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length);
acpi_status
acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
+void acpi_tb_check_dsdt_header(void);
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
+
void
acpi_tb_install_table(acpi_physical_address address,
char *signature, u32 table_index);
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index bb13817e0c31..347bee1726f1 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -323,7 +323,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default:
ACPI_ERROR((AE_INFO,
- "Invalid opcode in field list: %X",
+ "Invalid opcode in field list: 0x%X",
arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 721039233aa7..2a9a561c2f01 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -225,7 +225,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
(walk_state->thread->current_sync_level >
obj_desc->method.mutex->mutex.sync_level)) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(method_node),
walk_state->thread->current_sync_level));
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index cc343b959540..f3d52f59250b 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -262,7 +262,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_ERROR((AE_INFO,
- "Local index %d is invalid (max %d)",
+ "Local index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -276,7 +276,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_ERROR((AE_INFO,
- "Arg index %d is invalid (max %d)",
+ "Arg index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -287,7 +287,7 @@ acpi_ds_method_data_get_node(u8 type,
break;
default:
- ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
+ ACPI_ERROR((AE_INFO, "Type %u is invalid", type));
return_ACPI_STATUS(AE_TYPE);
}
@@ -424,7 +424,7 @@ acpi_ds_method_data_get_value(u8 type,
case ACPI_REFCLASS_ARG:
ACPI_ERROR((AE_INFO,
- "Uninitialized Arg[%d] at node %p",
+ "Uninitialized Arg[%u] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
@@ -440,7 +440,7 @@ acpi_ds_method_data_get_value(u8 type,
default:
ACPI_ERROR((AE_INFO,
- "Not a Arg/Local opcode: %X",
+ "Not a Arg/Local opcode: 0x%X",
type));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 891e08bf560b..3607adcaf085 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -288,7 +288,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_ERROR((AE_INFO,
- "Expecting bytelist, got AML opcode %X in op %p",
+ "Expecting bytelist, found AML opcode 0x%X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
@@ -511,7 +511,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
ACPI_INFO((AE_INFO,
- "Actual Package length (0x%X) is larger than NumElements field (0x%X), truncated\n",
+ "Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
i, element_count));
} else if (i < element_count) {
/*
@@ -519,7 +519,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* Note: this is not an error, the package is padded out with NULLs.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (0x%X) smaller than NumElements count (0x%X), padded with null elements\n",
+ "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
i, element_count));
}
@@ -701,7 +701,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown constant opcode %X",
+ "Unknown constant opcode 0x%X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
@@ -717,7 +717,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
+ ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
@@ -806,7 +806,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented reference type for AML opcode: %4.4X",
+ "Unimplemented reference type for AML opcode: 0x%4.4X",
opcode));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -816,7 +816,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
+ ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bf980cadb1e8..53a7e416f33e 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -292,7 +292,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->buffer.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in buffer obj %p",
+ "No pointer back to namespace node in buffer object %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -336,7 +336,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->package.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in package %p",
+ "No pointer back to namespace node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -580,7 +580,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown field creation opcode %02x", aml_opcode));
+ "Unknown field creation opcode 0x%02X",
+ aml_opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
@@ -589,7 +590,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
+ "Field [%4.4s] at %u exceeds Buffer [%4.4s] size %u (bits)",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.node),
@@ -693,7 +694,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
status = acpi_ex_resolve_operands(op->common.aml_opcode,
ACPI_WALK_OPERANDS, walk_state);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
+ ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
acpi_ps_get_opcode_name(op->common.aml_opcode),
status));
@@ -1461,7 +1462,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
+ ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b76c486d784..d555b374e314 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -140,7 +140,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
+ "Bad predicate (not an integer) ObjDesc=%p State=%p Type=0x%X",
obj_desc, walk_state, obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
@@ -354,7 +354,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) {
- ACPI_ERROR((AE_INFO, "Unknown opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown opcode 0x%X",
op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
}
@@ -678,7 +678,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
+ "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
op_class, op_type, op->common.aml_opcode,
op));
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 050df8164165..83155dd8671e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -179,7 +179,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
if (!object) {
ACPI_ERROR((AE_INFO,
- "Null Object! Obj=%p State=%p Num=%X",
+ "Null Object! Obj=%p State=%p Num=%u",
object, walk_state, walk_state->result_count));
return (AE_BAD_PARAMETER);
}
@@ -223,7 +223,7 @@ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
ACPI_RESULTS_OBJ_NUM_MAX) {
- ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
+ ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%u",
walk_state, walk_state->result_size));
return (AE_STACK_OVERFLOW);
}
@@ -314,7 +314,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_ERROR((AE_INFO,
- "Object stack overflow! Obj=%p State=%p #Ops=%X",
+ "Object stack overflow! Obj=%p State=%p #Ops=%u",
object, walk_state, walk_state->num_operands));
return (AE_STACK_OVERFLOW);
}
@@ -365,7 +365,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
if (walk_state->num_operands == 0) {
ACPI_ERROR((AE_INFO,
- "Object stack underflow! Count=%X State=%p #Ops=%X",
+ "Object stack underflow! Count=%X State=%p #Ops=%u",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
@@ -377,7 +377,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
walk_state->operands[walk_state->num_operands] = NULL;
}
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%u\n",
pop_count, walk_state, walk_state->num_operands));
return (AE_OK);
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c1e6f472d435..f5795915a2e9 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -302,7 +302,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
ACPI_DISABLE_EVENT);
ACPI_ERROR((AE_INFO,
- "No installed handler for fixed event [%08X]",
+ "No installed handler for fixed event [0x%08X]",
event));
return (ACPI_INTERRUPT_NOT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 78c55508aff5..a221ad404167 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
*
* RETURN: Status
*
- * DESCRIPTION: Updates GPE register enable masks based on the GPE type
+ * DESCRIPTION: Updates GPE register enable masks based upon whether there are
+ * references (either wake or run) to this GPE
*
******************************************************************************/
@@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+ /* Clear the wake/run bits up front */
+
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
- if (gpe_event_info->runtime_count)
+ /* Set the mask bits only if there are references to this GPE */
+
+ if (gpe_event_info->runtime_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ }
- if (gpe_event_info->wakeup_count)
+ if (gpe_event_info->wakeup_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ }
return_ACPI_STATUS(AE_OK);
}
@@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Enable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
+ * of type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
+
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * We will only allow a GPE to be enabled if it has either an
+ * associated method (_Lxx/_Exx) or a handler. Otherwise, the
+ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
+ * first time it fires.
+ */
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+ return_ACPI_STATUS(AE_NO_HANDLER);
+ }
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
+
+ /* Clear the GPE (of stale events) */
- /* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/* Enable the requested GPE */
+
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
return_ACPI_STATUS(status);
}
@@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Disable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
+ * regardless of the type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_disable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * Note: Always disable the GPE, even if we think that that it is already
+ * disabled. It is possible that the AML or some other code has enabled
+ * the GPE behind our back.
+ */
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/*
- * Even if we don't know the GPE type, make sure that we always
- * disable it. low_disable_gpe will just clear the enable bit for this
- * GPE and write it. It will not write out the current GPE enable mask,
- * since this may inadvertently enable GPEs too early, if a rogue GPE has
- * come in during ACPICA initialization - possibly as a result of AML or
- * other code that has enabled the GPE.
+ * Always H/W disable this GPE, even if we don't know the GPE type.
+ * Simply clear the enable bit for this particular GPE, but do not
+ * write out the current GPE enable mask since this may inadvertently
+ * enable GPEs too early. An example is a rogue GPE that has arrived
+ * during ACPICA initialization - possibly because AML or other code
+ * has enabled the GPE.
*/
status = acpi_hw_low_disable_gpe(gpe_event_info);
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_low_get_gpe_info
+ *
+ * PARAMETERS: gpe_number - Raw GPE number
+ * gpe_block - A GPE info block
+ *
+ * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
+ * is not within the specified GPE block)
+ *
+ * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
+ * the low-level implementation of ev_get_gpe_event_info.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block)
+{
+ u32 gpe_index;
+
+ /*
+ * Validate that the gpe_number is within the specified gpe_block.
+ * (Two steps)
+ */
+ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
+ return (NULL);
+ }
+
+ gpe_index = gpe_number - gpe_block->block_base_number;
+ if (gpe_index >= gpe_block->gpe_count) {
+ return (NULL);
+ }
+
+ return (&gpe_block->event_info[gpe_index]);
+}
+
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_event_info
@@ -184,29 +260,23 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number)
{
union acpi_operand_object *obj_desc;
- struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_event_info *gpe_info;
u32 i;
ACPI_FUNCTION_ENTRY();
- /* A NULL gpe_block means use the FADT-defined GPE block(s) */
+ /* A NULL gpe_device means use the FADT-defined GPE block(s) */
if (!gpe_device) {
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
- gpe_block = acpi_gbl_gpe_fadt_blocks[i];
- if (gpe_block) {
- if ((gpe_number >= gpe_block->block_base_number)
- && (gpe_number <
- gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number -
- gpe_block->
- block_base_number]);
- }
+ gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
+ acpi_gbl_gpe_fadt_blocks
+ [i]);
+ if (gpe_info) {
+ return (gpe_info);
}
}
@@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
return (NULL);
}
- gpe_block = obj_desc->device.gpe_block;
-
- if ((gpe_number >= gpe_block->block_base_number) &&
- (gpe_number <
- gpe_block->block_base_number + (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number - gpe_block->block_base_number]);
- }
-
- return (NULL);
+ return (acpi_ev_low_get_gpe_info
+ (gpe_number, obj_desc->device.gpe_block));
}
/*******************************************************************************
@@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
return_VOID;
}
- /* Set the GPE flags for return to enabled state */
+ /* Update the GPE register masks for return to enabled state */
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
@@ -499,7 +561,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -532,7 +594,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -548,7 +610,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -562,27 +624,30 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to queue handler for GPE[%2X] - event disabled",
+ "Unable to queue handler for GPE[0x%2X] - event disabled",
gpe_number));
}
break;
default:
- /* No handler or method to run! */
-
+ /*
+ * No handler or method to run!
+ * 03/2010: This case should no longer be possible. We will not allow
+ * a GPE to be enabled if it has no handler or method.
+ */
ACPI_ERROR((AE_INFO,
- "No handler or method for GPE[%2X], disabling event",
+ "No handler or method for GPE[0x%2X], disabling event",
gpe_number));
/*
- * Disable the GPE. The GPE will remain disabled until the ACPICA
- * Core Subsystem is restarted, or a handler is installed.
+ * Disable the GPE. The GPE will remain disabled a handler
+ * is installed or ACPICA is restarted.
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fef721917eaf..7c28f2d9fd35 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -51,20 +51,6 @@ ACPI_MODULE_NAME("evgpeblk")
/* Local prototypes */
static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value);
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value);
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number);
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
-
-static acpi_status
acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
u32 interrupt_number);
@@ -73,527 +59,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
/*******************************************************************************
*
- * FUNCTION: acpi_ev_valid_gpe_event
- *
- * PARAMETERS: gpe_event_info - Info for this GPE
- *
- * RETURN: TRUE if the gpe_event is valid
- *
- * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
- * Should be called only when the GPE lists are semaphore locked
- * and not subject to change.
- *
- ******************************************************************************/
-
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
-{
- struct acpi_gpe_xrupt_info *gpe_xrupt_block;
- struct acpi_gpe_block_info *gpe_block;
-
- ACPI_FUNCTION_ENTRY();
-
- /* No need for spin lock since we are not changing any list elements */
-
- /* Walk the GPE interrupt levels */
-
- gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_block) {
- gpe_block = gpe_xrupt_block->gpe_block_list_head;
-
- /* Walk the GPE blocks on this interrupt level */
-
- while (gpe_block) {
- if ((&gpe_block->event_info[0] <= gpe_event_info) &&
- (&gpe_block->event_info[((acpi_size)
- gpe_block->
- register_count) * 8] >
- gpe_event_info)) {
- return (TRUE);
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_block = gpe_xrupt_block->next;
- }
-
- return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_walk_gpe_list
- *
- * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
- * Context - Value passed to callback
- *
- * RETURN: Status
- *
- * DESCRIPTION: Walk the GPE lists.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
-{
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_gpe_xrupt_info *gpe_xrupt_info;
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Walk the interrupt level descriptor list */
-
- gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_info) {
-
- /* Walk all Gpe Blocks attached to this interrupt level */
-
- gpe_block = gpe_xrupt_info->gpe_block_list_head;
- while (gpe_block) {
-
- /* One callback per GPE block */
-
- status =
- gpe_walk_callback(gpe_xrupt_info, gpe_block,
- context);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_END) { /* Callback abort */
- status = AE_OK;
- }
- goto unlock_and_exit;
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_info = gpe_xrupt_info->next;
- }
-
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_handlers
- *
- * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
- * gpe_block - Gpe Block info
- *
- * RETURN: Status
- *
- * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
- * Used only prior to termination.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context)
-{
- struct acpi_gpe_event_info *gpe_event_info;
- u32 i;
- u32 j;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
-
- /* Examine each GPE Register within the block */
-
- for (i = 0; i < gpe_block->register_count; i++) {
-
- /* Now look at the individual GPEs in this byte register */
-
- for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- gpe_event_info = &gpe_block->event_info[((acpi_size) i *
- ACPI_GPE_REGISTER_WIDTH)
- + j];
-
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_HANDLER) {
- ACPI_FREE(gpe_event_info->dispatch.handler);
- gpe_event_info->dispatch.handler = NULL;
- gpe_event_info->flags &=
- ~ACPI_GPE_DISPATCH_MASK;
- }
- }
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_save_method_info
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * control method under the _GPE portion of the namespace.
- * Extract the name and GPE type from the object, saving this
- * information for quick lookup during GPE dispatch
- *
- * The name of each GPE control method is of the form:
- * "_Lxx" or "_Exx"
- * Where:
- * L - means that the GPE is level triggered
- * E - means that the GPE is edge triggered
- * xx - is the GPE number [in HEX]
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value)
-{
- struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
- struct acpi_gpe_event_info *gpe_event_info;
- u32 gpe_number;
- char name[ACPI_NAME_SIZE + 1];
- u8 type;
-
- ACPI_FUNCTION_TRACE(ev_save_method_info);
-
- /*
- * _Lxx and _Exx GPE method support
- *
- * 1) Extract the name from the object and convert to a string
- */
- ACPI_MOVE_32_TO_32(name,
- &((struct acpi_namespace_node *)obj_handle)->name.
- integer);
- name[ACPI_NAME_SIZE] = 0;
-
- /*
- * 2) Edge/Level determination is based on the 2nd character
- * of the method name
- *
- * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
- * if a _PRW object is found that points to this GPE.
- */
- switch (name[1]) {
- case 'L':
- type = ACPI_GPE_LEVEL_TRIGGERED;
- break;
-
- case 'E':
- type = ACPI_GPE_EDGE_TRIGGERED;
- break;
-
- default:
- /* Unknown method type, just ignore it! */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Ignoring unknown GPE method type: %s "
- "(name not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Convert the last two characters of the name to the GPE Number */
-
- gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
- if (gpe_number == ACPI_UINT32_MAX) {
-
- /* Conversion failed; invalid method, just ignore it */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Could not extract GPE number from name: %s "
- "(name is not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Ensure that we have a valid GPE number for this GPE block */
-
- if ((gpe_number < gpe_block->block_base_number) ||
- (gpe_number >= (gpe_block->block_base_number +
- (gpe_block->register_count * 8)))) {
- /*
- * Not valid for this GPE block, just ignore it. However, it may be
- * valid for a different GPE block, since GPE0 and GPE1 methods both
- * appear under \_GPE.
- */
- return_ACPI_STATUS(AE_OK);
- }
-
- /*
- * Now we can add this information to the gpe_event_info block for use
- * during dispatch of this GPE.
- */
- gpe_event_info =
- &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
-
- gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
-
- gpe_event_info->dispatch.method_node =
- (struct acpi_namespace_node *)obj_handle;
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Registered GPE method %s as GPE number 0x%.2X\n",
- name, gpe_number));
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_match_prw_and_gpe
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
- * not aborted on a single _PRW failure.
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * Device. Run the _PRW method. If present, extract the GPE
- * number and mark the GPE as a WAKE GPE.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value)
-{
- struct acpi_gpe_walk_info *gpe_info = (void *)info;
- struct acpi_namespace_node *gpe_device;
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_namespace_node *target_gpe_device;
- struct acpi_gpe_event_info *gpe_event_info;
- union acpi_operand_object *pkg_desc;
- union acpi_operand_object *obj_desc;
- u32 gpe_number;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
-
- /* Check for a _PRW method under this device */
-
- status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
- ACPI_BTYPE_PACKAGE, &pkg_desc);
- if (ACPI_FAILURE(status)) {
-
- /* Ignore all errors from _PRW, we don't want to abort the subsystem */
-
- return_ACPI_STATUS(AE_OK);
- }
-
- /* The returned _PRW package must have at least two elements */
-
- if (pkg_desc->package.count < 2) {
- goto cleanup;
- }
-
- /* Extract pointers from the input context */
-
- gpe_device = gpe_info->gpe_device;
- gpe_block = gpe_info->gpe_block;
-
- /*
- * The _PRW object must return a package, we are only interested in the
- * first element
- */
- obj_desc = pkg_desc->package.elements[0];
-
- if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
-
- /* Use FADT-defined GPE device (from definition of _PRW) */
-
- target_gpe_device = acpi_gbl_fadt_gpe_device;
-
- /* Integer is the GPE number in the FADT described GPE blocks */
-
- gpe_number = (u32) obj_desc->integer.value;
- } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
-
- /* Package contains a GPE reference and GPE number within a GPE block */
-
- if ((obj_desc->package.count < 2) ||
- ((obj_desc->package.elements[0])->common.type !=
- ACPI_TYPE_LOCAL_REFERENCE) ||
- ((obj_desc->package.elements[1])->common.type !=
- ACPI_TYPE_INTEGER)) {
- goto cleanup;
- }
-
- /* Get GPE block reference and decode */
-
- target_gpe_device =
- obj_desc->package.elements[0]->reference.node;
- gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
- } else {
- /* Unknown type, just ignore it */
-
- goto cleanup;
- }
-
- /*
- * Is this GPE within this block?
- *
- * TRUE if and only if these conditions are true:
- * 1) The GPE devices match.
- * 2) The GPE index(number) is within the range of the Gpe Block
- * associated with the GPE device.
- */
- if ((gpe_device == target_gpe_device) &&
- (gpe_number >= gpe_block->block_base_number) &&
- (gpe_number < gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- gpe_event_info = &gpe_block->event_info[gpe_number -
- gpe_block->
- block_base_number];
-
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- }
-
- cleanup:
- acpi_ut_remove_reference(pkg_desc);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_get_gpe_xrupt_block
- *
- * PARAMETERS: interrupt_number - Interrupt for a GPE block
- *
- * RETURN: A GPE interrupt block
- *
- * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
- * block per unique interrupt level used for GPEs. Should be
- * called only when the GPE lists are semaphore locked and not
- * subject to change.
- *
- ******************************************************************************/
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number)
-{
- struct acpi_gpe_xrupt_info *next_gpe_xrupt;
- struct acpi_gpe_xrupt_info *gpe_xrupt;
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
-
- /* No need for lock since we are not changing any list elements here */
-
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt) {
- if (next_gpe_xrupt->interrupt_number == interrupt_number) {
- return_PTR(next_gpe_xrupt);
- }
-
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- /* Not found, must allocate a new xrupt descriptor */
-
- gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
- if (!gpe_xrupt) {
- return_PTR(NULL);
- }
-
- gpe_xrupt->interrupt_number = interrupt_number;
-
- /* Install new interrupt descriptor with spin lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (acpi_gbl_gpe_xrupt_list_head) {
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt->next) {
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- next_gpe_xrupt->next = gpe_xrupt;
- gpe_xrupt->previous = next_gpe_xrupt;
- } else {
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Install new interrupt handler if not SCI_INT */
-
- if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
- status = acpi_os_install_interrupt_handler(interrupt_number,
- acpi_ev_gpe_xrupt_handler,
- gpe_xrupt);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not install GPE interrupt handler at level 0x%X",
- interrupt_number));
- return_PTR(NULL);
- }
- }
-
- return_PTR(gpe_xrupt);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_xrupt
- *
- * PARAMETERS: gpe_xrupt - A GPE interrupt info block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
- * interrupt handler if not the SCI interrupt.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
-
- /* We never want to remove the SCI interrupt handler */
-
- if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
- gpe_xrupt->gpe_block_list_head = NULL;
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Disable this interrupt */
-
- status =
- acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
- acpi_ev_gpe_xrupt_handler);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Unlink the interrupt block with lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (gpe_xrupt->previous) {
- gpe_xrupt->previous->next = gpe_xrupt->next;
- } else {
- /* No previous, update list head */
-
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
- }
-
- if (gpe_xrupt->next) {
- gpe_xrupt->next->previous = gpe_xrupt->previous;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Free the block */
-
- ACPI_FREE(gpe_xrupt);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_install_gpe_block
*
* PARAMETERS: gpe_block - New GPE block
@@ -705,8 +170,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
}
- acpi_current_gpe_count -=
- gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count -= gpe_block->gpe_count;
/* Free the gpe_block */
@@ -760,9 +224,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
- register_count *
- ACPI_GPE_REGISTER_WIDTH) *
+ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
sizeof(struct
acpi_gpe_event_info));
if (!gpe_event_info) {
@@ -880,6 +342,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
ACPI_FUNCTION_TRACE(ev_create_gpe_block);
@@ -897,6 +360,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Initialize the new GPE block */
gpe_block->node = gpe_device;
+ gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
@@ -921,12 +385,17 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(status);
}
- /* Find all GPE methods (_Lxx, _Exx) for this block */
+ /* Find all GPE methods (_Lxx or_Exx) for this block */
+
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.enable_this_gpe = FALSE;
+ walk_info.execute_by_owner_id = FALSE;
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_save_method_info, NULL,
- gpe_block, NULL);
+ acpi_ev_match_gpe_method, NULL,
+ &walk_info, NULL);
/* Return the new block */
@@ -938,14 +407,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
- ((gpe_block->register_count *
- ACPI_GPE_REGISTER_WIDTH) - 1)),
+ (gpe_block->gpe_count - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
/* Update global count of currently available GPEs */
- acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count += gpe_block->gpe_count;
return_ACPI_STATUS(AE_OK);
}
@@ -969,10 +437,13 @@ acpi_status
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info *gpe_block)
{
+ acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_gpe_walk_info gpe_info;
+ struct acpi_gpe_walk_info walk_info;
u32 wake_gpe_count;
u32 gpe_enabled_count;
+ u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -995,210 +466,75 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
* definition a wake GPE and will not be enabled while the machine
* is running.
*/
- gpe_info.gpe_block = gpe_block;
- gpe_info.gpe_device = gpe_device;
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.execute_by_owner_id = FALSE;
- acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ev_match_prw_and_gpe, NULL,
- &gpe_info, NULL);
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
}
/*
- * Enable all GPEs that have a corresponding method and aren't
+ * Enable all GPEs that have a corresponding method and are not
* capable of generating wakeups. Any other GPEs within this block
- * must be enabled via the acpi_enable_gpe() interface.
+ * must be enabled via the acpi_enable_gpe interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
- if (gpe_device == acpi_gbl_fadt_gpe_device)
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
gpe_device = NULL;
+ }
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- acpi_status status;
- acpi_size gpe_index;
- int gpe_number;
/* Get the info block for this particular GPE */
- gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
+
+ gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
wake_gpe_count++;
- if (acpi_gbl_leave_wake_gpes_disabled)
+ if (acpi_gbl_leave_wake_gpes_disabled) {
continue;
+ }
}
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
+ /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
continue;
+ }
+
+ /* Enable this GPE */
gpe_number = gpe_index + gpe_block->block_base_number;
status = acpi_enable_gpe(gpe_device, gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
- if (ACPI_FAILURE(status))
- ACPI_ERROR((AE_INFO,
- "Failed to enable GPE %02X\n",
- gpe_number));
- else
- gpe_enabled_count++;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
- wake_gpe_count, gpe_enabled_count));
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_gpe_initialize
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initialize the GPE data structures
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_gpe_initialize(void)
-{
- u32 register_count0 = 0;
- u32 register_count1 = 0;
- u32 gpe_number_max = 0;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_gpe_initialize);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Initialize the GPE Block(s) defined in the FADT
- *
- * Why the GPE register block lengths are divided by 2: From the ACPI
- * Spec, section "General-Purpose Event Registers", we have:
- *
- * "Each register block contains two registers of equal length
- * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
- * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
- * The length of the GPE1_STS and GPE1_EN registers is equal to
- * half the GPE1_LEN. If a generic register block is not supported
- * then its respective block pointer and block length values in the
- * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
- * to be the same size."
- */
-
- /*
- * Determine the maximum GPE number for this machine.
- *
- * Note: both GPE0 and GPE1 are optional, and either can exist without
- * the other.
- *
- * If EITHER the register length OR the block address are zero, then that
- * particular block is not supported.
- */
- if (acpi_gbl_FADT.gpe0_block_length &&
- acpi_gbl_FADT.xgpe0_block.address) {
-
- /* GPE block 0 exists (has both length and address > 0) */
-
- register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
-
- gpe_number_max =
- (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
-
- /* Install GPE Block 0 */
-
- status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe0_block,
- register_count0, 0,
- acpi_gbl_FADT.sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks[0]);
-
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 0"));
- }
- }
-
- if (acpi_gbl_FADT.gpe1_block_length &&
- acpi_gbl_FADT.xgpe1_block.address) {
-
- /* GPE block 1 exists (has both length and address > 0) */
-
- register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
-
- /* Check for GPE0/GPE1 overlap (if both banks exist) */
-
- if ((register_count0) &&
- (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
- ACPI_ERROR((AE_INFO,
- "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
- "(GPE %d to %d) - Ignoring GPE1",
- gpe_number_max, acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.gpe1_base +
- ((register_count1 *
- ACPI_GPE_REGISTER_WIDTH) - 1)));
-
- /* Ignore GPE1 block by setting the register count to zero */
-
- register_count1 = 0;
- } else {
- /* Install GPE Block 1 */
-
- status =
- acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe1_block,
- register_count1,
- acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.
- sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks
- [1]);
-
+ ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 1"));
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ continue;
}
- /*
- * GPE0 and GPE1 do not have to be contiguous in the GPE number
- * space. However, GPE0 always starts at GPE number zero.
- */
- gpe_number_max = acpi_gbl_FADT.gpe1_base +
- ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ gpe_enabled_count++;
}
}
- /* Exit if there are no GPE registers */
-
- if ((register_count0 + register_count1) == 0) {
-
- /* GPEs are not required by ACPI, this is OK */
-
+ if (gpe_enabled_count || wake_gpe_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "There are no GPE blocks defined in the FADT\n"));
- status = AE_OK;
- goto cleanup;
- }
-
- /* Check for Max GPE number out-of-range */
-
- if (gpe_number_max > ACPI_GPE_MAX) {
- ACPI_ERROR((AE_INFO,
- "Maximum GPE number from FADT is too large: 0x%X",
- gpe_number_max));
- status = AE_BAD_VALUE;
- goto cleanup;
+ "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
+ gpe_enabled_count, wake_gpe_count));
}
- cleanup:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
new file mode 100644
index 000000000000..3f6c2d26410d
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -0,0 +1,653 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeinit - System GPE initialization and update
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeinit")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
+ *
+ ******************************************************************************/
+acpi_status acpi_ev_gpe_initialize(void)
+{
+ u32 register_count0 = 0;
+ u32 register_count1 = 0;
+ u32 gpe_number_max = 0;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the GPE Block(s) defined in the FADT
+ *
+ * Why the GPE register block lengths are divided by 2: From the ACPI
+ * Spec, section "General-Purpose Event Registers", we have:
+ *
+ * "Each register block contains two registers of equal length
+ * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
+ * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
+ * The length of the GPE1_STS and GPE1_EN registers is equal to
+ * half the GPE1_LEN. If a generic register block is not supported
+ * then its respective block pointer and block length values in the
+ * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
+ * to be the same size."
+ */
+
+ /*
+ * Determine the maximum GPE number for this machine.
+ *
+ * Note: both GPE0 and GPE1 are optional, and either can exist without
+ * the other.
+ *
+ * If EITHER the register length OR the block address are zero, then that
+ * particular block is not supported.
+ */
+ if (acpi_gbl_FADT.gpe0_block_length &&
+ acpi_gbl_FADT.xgpe0_block.address) {
+
+ /* GPE block 0 exists (has both length and address > 0) */
+
+ register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
+
+ gpe_number_max =
+ (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
+
+ /* Install GPE Block 0 */
+
+ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe0_block,
+ register_count0, 0,
+ acpi_gbl_FADT.sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks[0]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 0"));
+ }
+ }
+
+ if (acpi_gbl_FADT.gpe1_block_length &&
+ acpi_gbl_FADT.xgpe1_block.address) {
+
+ /* GPE block 1 exists (has both length and address > 0) */
+
+ register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
+
+ /* Check for GPE0/GPE1 overlap (if both banks exist) */
+
+ if ((register_count0) &&
+ (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
+ ACPI_ERROR((AE_INFO,
+ "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
+ "(GPE %u to %u) - Ignoring GPE1",
+ gpe_number_max, acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.gpe1_base +
+ ((register_count1 *
+ ACPI_GPE_REGISTER_WIDTH) - 1)));
+
+ /* Ignore GPE1 block by setting the register count to zero */
+
+ register_count1 = 0;
+ } else {
+ /* Install GPE Block 1 */
+
+ status =
+ acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe1_block,
+ register_count1,
+ acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.
+ sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks
+ [1]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 1"));
+ }
+
+ /*
+ * GPE0 and GPE1 do not have to be contiguous in the GPE number
+ * space. However, GPE0 always starts at GPE number zero.
+ */
+ gpe_number_max = acpi_gbl_FADT.gpe1_base +
+ ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ }
+ }
+
+ /* Exit if there are no GPE registers */
+
+ if ((register_count0 + register_count1) == 0) {
+
+ /* GPEs are not required by ACPI, this is OK */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "There are no GPE blocks defined in the FADT\n"));
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Check for Max GPE number out-of-range */
+
+ if (gpe_number_max > ACPI_GPE_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Maximum GPE number from FADT is too large: 0x%X",
+ gpe_number_max));
+ status = AE_BAD_VALUE;
+ goto cleanup;
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_update_gpes
+ *
+ * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
+ * result of a Load() or load_table() operation. If new GPE
+ * methods have been installed, register the new methods and
+ * enable and runtime GPEs that are associated with them. Also,
+ * run any newly loaded _PRW methods in order to discover any
+ * new CAN_WAKE GPEs.
+ *
+ ******************************************************************************/
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
+ acpi_status status = AE_OK;
+ u32 new_wake_gpe_count = 0;
+
+ /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
+
+ walk_info.owner_id = table_owner_id;
+ walk_info.execute_by_owner_id = TRUE;
+ walk_info.count = 0;
+
+ if (acpi_gbl_leave_wake_gpes_disabled) {
+ /*
+ * 1) Run any newly-loaded _PRW methods to find any GPEs that
+ * can now be marked as CAN_WAKE GPEs. Note: We must run the
+ * _PRW methods before we process the _Lxx/_Exx methods because
+ * we will enable all runtime GPEs associated with the new
+ * _Lxx/_Exx methods at the time we process those methods.
+ *
+ * Unlock interpreter so that we can run the _PRW methods.
+ */
+ walk_info.gpe_block = NULL;
+ walk_info.gpe_device = NULL;
+
+ acpi_ex_exit_interpreter();
+
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_prw_and_gpe, NULL,
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
+
+ acpi_ex_enter_interpreter();
+ new_wake_gpe_count = walk_info.count;
+ }
+
+ /*
+ * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+ *
+ * Any GPEs that correspond to new _Lxx/_Exx methods and are not
+ * marked as CAN_WAKE are immediately enabled.
+ *
+ * Examine the namespace underneath each gpe_device within the
+ * gpe_block lists.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ walk_info.count = 0;
+ walk_info.enable_this_gpe = TRUE;
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_block->node;
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
+ walk_info.gpe_device,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_gpe_method,
+ NULL, &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While decoding _Lxx/_Exx methods"));
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ if (walk_info.count || new_wake_gpe_count) {
+ ACPI_INFO((AE_INFO,
+ "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
+ walk_info.count, new_wake_gpe_count));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_gpe_method
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * control method under the _GPE portion of the namespace.
+ * Extract the name and GPE type from the object, saving this
+ * information for quick lookup during GPE dispatch. Allows a
+ * per-owner_id evaluation if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * The name of each GPE control method is of the form:
+ * "_Lxx" or "_Exx", where:
+ * L - means that the GPE is level triggered
+ * E - means that the GPE is edge triggered
+ * xx - is the GPE number [in HEX]
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
+ * with that owner.
+ * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
+ * method is immediately enabled (Used for Load/load_table operators)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_namespace_node *method_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_namespace_node *gpe_device;
+ acpi_status status;
+ u32 gpe_number;
+ char name[ACPI_NAME_SIZE + 1];
+ u8 type;
+
+ ACPI_FUNCTION_TRACE(ev_match_gpe_method);
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (method_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Match and decode the _Lxx and _Exx GPE method names
+ *
+ * 1) Extract the method name and null terminate it
+ */
+ ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
+ name[ACPI_NAME_SIZE] = 0;
+
+ /* 2) Name must begin with an underscore */
+
+ if (name[0] != '_') {
+ return_ACPI_STATUS(AE_OK); /* Ignore this method */
+ }
+
+ /*
+ * 3) Edge/Level determination is based on the 2nd character
+ * of the method name
+ *
+ * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
+ * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
+ */
+ switch (name[1]) {
+ case 'L':
+ type = ACPI_GPE_LEVEL_TRIGGERED;
+ break;
+
+ case 'E':
+ type = ACPI_GPE_EDGE_TRIGGERED;
+ break;
+
+ default:
+ /* Unknown method type, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s "
+ "(name not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* 4) The last two characters of the name are the hex GPE Number */
+
+ gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+ if (gpe_number == ACPI_UINT32_MAX) {
+
+ /* Conversion failed; invalid method, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s "
+ "(name is not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Ensure that we have a valid GPE number for this GPE block */
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
+ if (!gpe_event_info) {
+ /*
+ * This gpe_number is not valid for this GPE block, just ignore it.
+ * However, it may be valid for a different GPE block, since GPE0
+ * and GPE1 methods both appear under \_GPE.
+ */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+
+ /* If there is already a handler, ignore this GPE method */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD) {
+ /*
+ * If there is already a method, ignore this method. But check
+ * for a type mismatch (if both the _Lxx AND _Exx exist)
+ */
+ if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
+ ACPI_ERROR((AE_INFO,
+ "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
+ gpe_number, gpe_number, gpe_number));
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Add the GPE information from above to the gpe_event_info block for
+ * use during dispatch of this GPE.
+ */
+ gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
+ gpe_event_info->dispatch.method_node = method_node;
+
+ /*
+ * Enable this GPE if requested. This only happens when during the
+ * execution of a Load or load_table operator. We have found a new
+ * GPE method and want to immediately enable the GPE if it is a
+ * runtime GPE.
+ */
+ if (walk_info->enable_this_gpe) {
+
+ /* Ignore GPEs that can wake the system */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
+ !acpi_gbl_leave_wake_gpes_disabled) {
+ walk_info->count++;
+ gpe_device = walk_info->gpe_device;
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
+ gpe_device = NULL;
+ }
+
+ status = acpi_enable_gpe(gpe_device, gpe_number,
+ ACPI_GPE_TYPE_RUNTIME);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ }
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Registered GPE method %s as GPE number 0x%.2X\n",
+ name, gpe_number));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_prw_and_gpe
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
+ * not aborted on a single _PRW failure.
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * Device. Run the _PRW method. If present, extract the GPE
+ * number and mark the GPE as a CAN_WAKE GPE. Allows a
+ * per-owner_id execution if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
+ * owner.
+ * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
+ * we only execute _PRWs that refer to the input gpe_device.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_namespace_node *target_gpe_device;
+ struct acpi_namespace_node *prw_node;
+ struct acpi_gpe_event_info *gpe_event_info;
+ union acpi_operand_object *pkg_desc;
+ union acpi_operand_object *obj_desc;
+ u32 gpe_number;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
+
+ /* Check for a _PRW method under this device */
+
+ status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
+ ACPI_NS_NO_UPSEARCH, &prw_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (prw_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Execute the _PRW */
+
+ status = acpi_ut_evaluate_object(prw_node, NULL,
+ ACPI_BTYPE_PACKAGE, &pkg_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* The returned _PRW package must have at least two elements */
+
+ if (pkg_desc->package.count < 2) {
+ goto cleanup;
+ }
+
+ /* Extract pointers from the input context */
+
+ gpe_device = walk_info->gpe_device;
+ gpe_block = walk_info->gpe_block;
+
+ /*
+ * The _PRW object must return a package, we are only interested
+ * in the first element
+ */
+ obj_desc = pkg_desc->package.elements[0];
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+
+ /* Use FADT-defined GPE device (from definition of _PRW) */
+
+ target_gpe_device = NULL;
+ if (gpe_device) {
+ target_gpe_device = acpi_gbl_fadt_gpe_device;
+ }
+
+ /* Integer is the GPE number in the FADT described GPE blocks */
+
+ gpe_number = (u32)obj_desc->integer.value;
+ } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
+
+ /* Package contains a GPE reference and GPE number within a GPE block */
+
+ if ((obj_desc->package.count < 2) ||
+ ((obj_desc->package.elements[0])->common.type !=
+ ACPI_TYPE_LOCAL_REFERENCE) ||
+ ((obj_desc->package.elements[1])->common.type !=
+ ACPI_TYPE_INTEGER)) {
+ goto cleanup;
+ }
+
+ /* Get GPE block reference and decode */
+
+ target_gpe_device =
+ obj_desc->package.elements[0]->reference.node;
+ gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
+ } else {
+ /* Unknown type, just ignore it */
+
+ goto cleanup;
+ }
+
+ /* Get the gpe_event_info for this GPE */
+
+ if (gpe_device) {
+ /*
+ * Is this GPE within this block?
+ *
+ * TRUE if and only if these conditions are true:
+ * 1) The GPE devices match.
+ * 2) The GPE index(number) is within the range of the Gpe Block
+ * associated with the GPE device.
+ */
+ if (gpe_device != target_gpe_device) {
+ goto cleanup;
+ }
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
+ } else {
+ /* gpe_device is NULL, just match the target_device and gpe_number */
+
+ gpe_event_info =
+ acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
+ }
+
+ if (gpe_event_info) {
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+
+ /* This GPE can wake the system */
+
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ walk_info->count++;
+ }
+ }
+
+ cleanup:
+ acpi_ut_remove_reference(pkg_desc);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
new file mode 100644
index 000000000000..19a0e513ea48
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -0,0 +1,337 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeutil - GPE utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeutil")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_walk_gpe_list
+ *
+ * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
+ * Context - Value passed to callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the GPE lists.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
+{
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+
+ /* One callback per GPE block */
+
+ status =
+ gpe_walk_callback(gpe_xrupt_info, gpe_block,
+ context);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_END) { /* Callback abort */
+ status = AE_OK;
+ }
+ goto unlock_and_exit;
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_valid_gpe_event
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ *
+ * RETURN: TRUE if the gpe_event is valid
+ *
+ * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* No need for spin lock since we are not changing any list elements */
+
+ /* Walk the GPE interrupt levels */
+
+ gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_block) {
+ gpe_block = gpe_xrupt_block->gpe_block_list_head;
+
+ /* Walk the GPE blocks on this interrupt level */
+
+ while (gpe_block) {
+ if ((&gpe_block->event_info[0] <= gpe_event_info) &&
+ (&gpe_block->event_info[gpe_block->gpe_count] >
+ gpe_event_info)) {
+ return (TRUE);
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_block = gpe_xrupt_block->next;
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_xrupt_block
+ *
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ *
+ * RETURN: A GPE interrupt block
+ *
+ * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
+ * block per unique interrupt level used for GPEs. Should be
+ * called only when the GPE lists are semaphore locked and not
+ * subject to change.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+{
+ struct acpi_gpe_xrupt_info *next_gpe_xrupt;
+ struct acpi_gpe_xrupt_info *gpe_xrupt;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
+
+ /* No need for lock since we are not changing any list elements here */
+
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt) {
+ if (next_gpe_xrupt->interrupt_number == interrupt_number) {
+ return_PTR(next_gpe_xrupt);
+ }
+
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ /* Not found, must allocate a new xrupt descriptor */
+
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
+ if (!gpe_xrupt) {
+ return_PTR(NULL);
+ }
+
+ gpe_xrupt->interrupt_number = interrupt_number;
+
+ /* Install new interrupt descriptor with spin lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (acpi_gbl_gpe_xrupt_list_head) {
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt->next) {
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ next_gpe_xrupt->next = gpe_xrupt;
+ gpe_xrupt->previous = next_gpe_xrupt;
+ } else {
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Install new interrupt handler if not SCI_INT */
+
+ if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
+ status = acpi_os_install_interrupt_handler(interrupt_number,
+ acpi_ev_gpe_xrupt_handler,
+ gpe_xrupt);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_PTR(NULL);
+ }
+ }
+
+ return_PTR(gpe_xrupt);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_xrupt
+ *
+ * PARAMETERS: gpe_xrupt - A GPE interrupt info block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
+ * interrupt handler if not the SCI interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
+
+ /* We never want to remove the SCI interrupt handler */
+
+ if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
+ gpe_xrupt->gpe_block_list_head = NULL;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Disable this interrupt */
+
+ status =
+ acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
+ acpi_ev_gpe_xrupt_handler);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink the interrupt block with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt->previous) {
+ gpe_xrupt->previous->next = gpe_xrupt->next;
+ } else {
+ /* No previous, update list head */
+
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
+ }
+
+ if (gpe_xrupt->next) {
+ gpe_xrupt->next->previous = gpe_xrupt->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Free the block */
+
+ ACPI_FREE(gpe_xrupt);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_handlers
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
+ * Used only prior to termination.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ gpe_event_info = &gpe_block->event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH)
+ + j];
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ ACPI_FREE(gpe_event_info->dispatch.handler);
+ gpe_event_info->dispatch.handler = NULL;
+ gpe_event_info->flags &=
+ ~ACPI_GPE_DISPATCH_MASK;
+ }
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 9a3cb7045a32..df0aea9a8cfd 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -590,7 +590,7 @@ void acpi_ev_terminate(void)
status = acpi_disable_event(i, 0);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
- "Could not disable fixed event %d",
+ "Could not disable fixed event %u",
(u32) i));
}
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index b40757955f9b..cc825023012a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -142,7 +142,7 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
+ ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
event));
/* Remove the handler */
@@ -203,7 +203,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO,
- "Could not write to fixed event enable register %X",
+ "Could not write to fixed event enable register 0x%X",
event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
@@ -682,14 +682,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Parameter validation */
- if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
- status = AE_BAD_PARAMETER;
- goto exit;
+ if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
- goto exit;
+ return_ACPI_STATUS(status);
}
/* Ensure that we have a valid GPE number */
@@ -720,6 +719,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
+ /* Disable the GPE before installing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE (status)) {
+ goto unlock_and_exit;
+ }
+
/* Install the handler */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -733,12 +739,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- exit:
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Installing notify handler failed"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 5ff32c78ea2d..d5a5efc043bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status acpi_enable(void)
{
- acpi_status status = AE_OK;
+ acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enable);
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n"));
- } else {
- /* Transition to ACPI mode */
+ return_ACPI_STATUS(AE_OK);
+ }
- status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not transition to ACPI mode"));
- return_ACPI_STATUS(status);
- }
+ /* Transition to ACPI mode */
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Transition to ACPI mode successful\n"));
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not transition to ACPI mode"));
+ return_ACPI_STATUS(status);
}
- return_ACPI_STATUS(status);
+ /* Sanity check that transition succeeded */
+
+ if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
+ ACPI_ERROR((AE_INFO,
+ "Hardware did not enter ACPI mode"));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Transition to ACPI mode successful\n"));
+
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enable)
@@ -203,21 +212,26 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
*
* FUNCTION: acpi_set_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * action - Enable or disable
- * Called from ISR or not
+ * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
*
* RETURN: Status
*
- * DESCRIPTION: Enable or disable an ACPI event (general purpose)
+ * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
+ * the reference count mechanism used in the acpi_enable_gpe and
+ * acpi_disable_gpe interfaces -- and should be used with care.
+ *
+ * Note: Typically used to disable a runtime GPE for short period of time,
+ * then re-enable it, without disturbing the existing reference counts. This
+ * is useful, for example, in the Embedded Controller (EC) driver.
*
******************************************************************************/
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_set_gpe);
@@ -243,7 +257,6 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid action\n"));
status = AE_BAD_PARAMETER;
break;
}
@@ -259,25 +272,31 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
*
* FUNCTION: acpi_enable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE will be used for
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Take a reference to a GPE and enable it if necessary
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ * hardware-enabled (for runtime GPEs), or the GPE register mask
+ * is updated (for wake GPEs).
*
******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -289,26 +308,43 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if (type & ACPI_GPE_TYPE_RUNTIME) {
- if (++gpe_event_info->runtime_count == 1) {
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count++;
+ if (gpe_event_info->runtime_count == 1) {
status = acpi_ev_enable_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
+ goto unlock_and_exit;
+ }
}
}
- if (type & ACPI_GPE_TYPE_WAKE) {
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ /* The GPE must have the ability to wake the system */
+
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
- status = AE_BAD_PARAMETER;
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
goto unlock_and_exit;
}
/*
- * Wake-up GPEs are only enabled right prior to putting the
- * system into a sleep state.
+ * Update the enable mask on the first wakeup reference. Wake GPEs
+ * are only hardware-enabled just before sleeping.
*/
- if (++gpe_event_info->wakeup_count == 1)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ gpe_event_info->wakeup_count++;
+ if (gpe_event_info->wakeup_count == 1) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -321,27 +357,34 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
*
* FUNCTION: acpi_disable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE won't be used for any more
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Release a reference to a GPE and disable it if necessary
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ * removed, only then is the GPE disabled (for runtime GPEs), or
+ * the GPE mask bit disabled (for wake GPEs)
*
******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
@@ -350,18 +393,39 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
- if (--gpe_event_info->runtime_count == 0)
+ /* Hardware-disable a runtime GPE on removal of the last reference */
+
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (!gpe_event_info->runtime_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count--;
+ if (!gpe_event_info->runtime_count) {
status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ gpe_event_info->runtime_count++;
+ goto unlock_and_exit;
+ }
+ }
}
- if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
- /*
- * Wake-up GPEs are not enabled after leaving system sleep
- * states, so we don't need to disable them here.
- */
- if (--gpe_event_info->wakeup_count == 0)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ /*
+ * Update masks for wake GPE on removal of the last reference.
+ * No need to hardware-disable wake GPEs here, they are not currently
+ * enabled.
+ */
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ if (!gpe_event_info->wakeup_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->wakeup_count--;
+ if (!gpe_event_info->wakeup_count) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -465,30 +529,23 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
*
* FUNCTION: acpi_clear_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
*
* RETURN: Status
*
* DESCRIPTION: Clear an ACPI event (general purpose)
*
******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -501,9 +558,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
status = acpi_hw_clear_gpe(gpe_event_info);
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -569,9 +624,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
*
* FUNCTION: acpi_get_gpe_status
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
* event_status - Where the current status of the event will
* be returned
*
@@ -582,21 +636,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
******************************************************************************/
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
- u32 gpe_number, u32 flags, acpi_event_status * event_status)
+ u32 gpe_number, acpi_event_status *event_status)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -614,9 +662,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
*event_status |= ACPI_EVENT_FLAG_HANDLE;
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -673,20 +719,15 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
- /* Run the _PRW methods and enable the GPEs */
-
- status = acpi_ev_initialize_gpe_block(node, gpe_block);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
-
- /* Get the device_object attached to the node */
+ /* Install block in the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
- /* No object, create a new one */
-
+ /*
+ * No object, create a new one (Device nodes do not always have
+ * an attached object)
+ */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
if (!obj_desc) {
status = AE_NO_MEMORY;
@@ -705,10 +746,14 @@ acpi_install_gpe_block(acpi_handle gpe_device,
}
}
- /* Install the GPE block in the device_object */
+ /* Now install the GPE block in the device_object */
obj_desc->device.gpe_block = gpe_block;
+ /* Run the _PRW methods and enable the runtime GPEs in the new block */
+
+ status = acpi_ev_initialize_gpe_block(node, gpe_block);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
@@ -839,8 +884,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Increment Index by the number of GPEs in this block */
- info->next_block_base_index +=
- (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
+ info->next_block_base_index += gpe_block->gpe_count;
if (info->index < info->next_block_base_index) {
/*
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7e8b3bedc376..008621c5ad85 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -82,8 +82,9 @@ acpi_ex_add_table(u32 table_index,
struct acpi_namespace_node *parent_node,
union acpi_operand_object **ddb_handle)
{
- acpi_status status;
union acpi_operand_object *obj_desc;
+ acpi_status status;
+ acpi_owner_id owner_id;
ACPI_FUNCTION_TRACE(ex_add_table);
@@ -119,7 +120,14 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
- return_ACPI_STATUS(status);
+ /* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
+
+ status = acpi_tb_get_owner_id(table_index, &owner_id);
+ if (ACPI_SUCCESS(status)) {
+ acpi_ev_update_gpes(owner_id);
+ }
+
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -248,10 +256,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_SUCCESS(status)) {
- ACPI_INFO((AE_INFO,
- "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
- table->signature, table->oem_id,
- table->oem_table_id));
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table);
}
/* Invoke table handler if present */
@@ -525,6 +531,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table_desc.pointer);
+
/* Remove the reference by added by acpi_ex_store above */
acpi_ut_remove_reference(ddb_handle);
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bda7aed0404b..b73bc50c5b76 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -650,7 +650,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Bad destination type during conversion: %X",
+ "Bad destination type during conversion: 0x%X",
destination_type));
status = AE_AML_INTERNAL;
break;
@@ -665,7 +665,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
+ "Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 0aa57d938698..3c61b48c73f5 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -306,12 +306,12 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
- ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
acpi_ut_get_region_name(region_space), region_space));
/* Create the region descriptor */
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
new file mode 100644
index 000000000000..be8c98b480d7
--- /dev/null
+++ b/drivers/acpi/acpica/exdebug.c
@@ -0,0 +1,261 @@
+/******************************************************************************
+ *
+ * Module Name: exdebug - Support for stores to the AML Debug Object
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exdebug")
+
+#ifndef ACPI_NO_ERROR_MESSAGES
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_debug_object
+ *
+ * PARAMETERS: source_desc - Object to be output to "Debug Object"
+ * Level - Indentation level (used for packages)
+ * Index - Current package element, zero if not pkg
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Handles stores to the AML Debug Object. For example:
+ * Store(INT1, Debug)
+ *
+ * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set.
+ *
+ * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or
+ * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal
+ * operational case, stores to the debug object are ignored but can be easily
+ * enabled if necessary.
+ *
+ ******************************************************************************/
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
+
+ /* Output must be enabled via the debug_object global or the dbg_level */
+
+ if (!acpi_gbl_enable_aml_debug_object &&
+ !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) {
+ return_VOID;
+ }
+
+ /*
+ * Print line header as long as we are not in the middle of an
+ * object display
+ */
+ if (!((level > 0) && index == 0)) {
+ acpi_os_printf("[ACPI Debug] %*s", level, " ");
+ }
+
+ /* Display the index for package output only */
+
+ if (index > 0) {
+ acpi_os_printf("(%.2u) ", index - 1);
+ }
+
+ if (!source_desc) {
+ acpi_os_printf("[Null Object]\n");
+ return_VOID;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
+ acpi_os_printf("%s ",
+ acpi_ut_get_object_type_name(source_desc));
+
+ if (!acpi_ut_valid_internal_object(source_desc)) {
+ acpi_os_printf("%p, Invalid Internal Object!\n",
+ source_desc);
+ return_VOID;
+ }
+ } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf("%s: %p\n",
+ acpi_ut_get_type_name(((struct
+ acpi_namespace_node *)
+ source_desc)->type),
+ source_desc);
+ return_VOID;
+ } else {
+ return_VOID;
+ }
+
+ /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
+
+ switch (source_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Output correct integer width */
+
+ if (acpi_gbl_integer_byte_width == 4) {
+ acpi_os_printf("0x%8.8X\n",
+ (u32)source_desc->integer.value);
+ } else {
+ acpi_os_printf("0x%8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(source_desc->integer.
+ value));
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
+ acpi_ut_dump_buffer2(source_desc->buffer.pointer,
+ (source_desc->buffer.length < 256) ?
+ source_desc->buffer.length : 256,
+ DB_BYTE_DISPLAY);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf("[0x%.2X] \"%s\"\n",
+ source_desc->string.length,
+ source_desc->string.pointer);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ acpi_os_printf("[Contains 0x%.2X Elements]\n",
+ source_desc->package.count);
+
+ /* Output the entire contents of the package */
+
+ for (i = 0; i < source_desc->package.count; i++) {
+ acpi_ex_do_debug_object(source_desc->package.
+ elements[i], level + 4, i + 1);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ acpi_os_printf("[%s] ",
+ acpi_ut_get_reference_name(source_desc));
+
+ /* Decode the reference */
+
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ acpi_os_printf("0x%X\n", source_desc->reference.value);
+ break;
+
+ case ACPI_REFCLASS_TABLE:
+
+ /* Case for ddb_handle */
+
+ acpi_os_printf("Table Index 0x%X\n",
+ source_desc->reference.value);
+ return;
+
+ default:
+ break;
+ }
+
+ acpi_os_printf(" ");
+
+ /* Check for valid node first, then valid object */
+
+ if (source_desc->reference.node) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.node) !=
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf
+ (" %p - Not a valid namespace node\n",
+ source_desc->reference.node);
+ } else {
+ acpi_os_printf("Node %p [%4.4s] ",
+ source_desc->reference.node,
+ (source_desc->reference.node)->
+ name.ascii);
+
+ switch ((source_desc->reference.node)->type) {
+
+ /* These types have no attached object */
+
+ case ACPI_TYPE_DEVICE:
+ acpi_os_printf("Device\n");
+ break;
+
+ case ACPI_TYPE_THERMAL:
+ acpi_os_printf("Thermal Zone\n");
+ break;
+
+ default:
+ acpi_ex_do_debug_object((source_desc->
+ reference.
+ node)->object,
+ level + 4, 0);
+ break;
+ }
+ }
+ } else if (source_desc->reference.object) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_ex_do_debug_object(((struct
+ acpi_namespace_node *)
+ source_desc->reference.
+ object)->object,
+ level + 4, 0);
+ } else {
+ acpi_ex_do_debug_object(source_desc->reference.
+ object, level + 4, 0);
+ }
+ }
+ break;
+
+ default:
+
+ acpi_os_printf("%p\n", source_desc);
+ break;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
+ return_VOID;
+}
+#endif
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 6c79fecbee42..f17d2ff0031b 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -281,7 +281,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer of length %X, found length %X",
+ "SMBus or IPMI write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f68a216168be..a6dc26f0b3be 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -94,7 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */
if (rgn_desc->common.type != ACPI_TYPE_REGION) {
- ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
+ ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)",
rgn_desc->common.type,
acpi_ut_get_object_type_name(rgn_desc)));
@@ -175,7 +175,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* byte, and a field with Dword access specified.
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
+ "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->
common_field.node),
obj_desc->common_field.access_byte_width,
@@ -189,7 +189,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* exceeds region length, indicate an error
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
+ "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->common_field.node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
@@ -281,13 +281,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) not implemented",
+ "Region %s(0x%X) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) has no handler",
+ "Region %s(0x%X) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
@@ -525,7 +525,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
+ ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u",
obj_desc->common.type));
status = AE_AML_INTERNAL;
break;
@@ -630,7 +630,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_ERROR((AE_INFO,
- "Unknown UpdateRule value: %X",
+ "Unknown UpdateRule value: 0x%X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
@@ -689,7 +689,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_ERROR((AE_INFO,
- "Field size %X (bits) is too large for buffer (%X)",
+ "Field size %u (bits) is too large for buffer (%u)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c5bb1eeed2df..95db4be0877b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -99,7 +99,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -115,7 +115,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
+ ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X",
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
return_ACPI_STATUS(AE_TYPE);
}
@@ -276,7 +276,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
}
@@ -378,7 +378,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Invalid object type, should not happen here */
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 7116bc86494d..f73be97043c0 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -85,10 +85,10 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
/*
- * Migrate the previous sync level associated with this mutex to the
- * previous mutex on the list so that it may be preserved. This handles
- * the case where several mutexes have been acquired at the same level,
- * but are not released in opposite order.
+ * Migrate the previous sync level associated with this mutex to
+ * the previous mutex on the list so that it may be preserved.
+ * This handles the case where several mutexes have been acquired
+ * at the same level, but are not released in opposite order.
*/
(obj_desc->mutex.prev)->mutex.original_sync_level =
obj_desc->mutex.original_sync_level;
@@ -101,8 +101,8 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
*
* FUNCTION: acpi_ex_link_mutex
*
- * PARAMETERS: obj_desc - The mutex to be linked
- * Thread - Current executing thread object
+ * PARAMETERS: obj_desc - The mutex to be linked
+ * Thread - Current executing thread object
*
* RETURN: None
*
@@ -138,9 +138,9 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_acquire_mutex_object
*
- * PARAMETERS: time_desc - Timeout in milliseconds
+ * PARAMETERS: Timeout - Timeout in milliseconds
* obj_desc - Mutex object
- * Thread - Current thread state
+ * thread_id - Current thread state
*
* RETURN: Status
*
@@ -234,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Must have a valid thread ID */
+ /* Must have a valid thread state struct */
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
@@ -249,7 +249,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(obj_desc->mutex.node),
walk_state->thread->current_sync_level));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
@@ -359,6 +359,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
u8 previous_sync_level;
+ struct acpi_thread_state *owner_thread;
ACPI_FUNCTION_TRACE(ex_release_mutex);
@@ -366,9 +367,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ owner_thread = obj_desc->mutex.owner_thread;
+
/* The mutex must have been previously acquired in order to release it */
- if (!obj_desc->mutex.owner_thread) {
+ if (!owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -387,16 +390,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
- if ((obj_desc->mutex.owner_thread->thread_id !=
- walk_state->thread->thread_id)
- && (obj_desc != acpi_gbl_global_lock_mutex)) {
+ if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
+ (obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
ACPI_CAST_PTR(void, walk_state->thread->thread_id),
acpi_ut_get_node_name(obj_desc->mutex.node),
- ACPI_CAST_PTR(void,
- obj_desc->mutex.owner_thread->
- thread_id)));
+ ACPI_CAST_PTR(void, owner_thread->thread_id)));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -407,10 +407,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* different level can only mean that the mutex ordering rule is being
* violated. This behavior is clarified in ACPI 4.0 specification.
*/
- if (obj_desc->mutex.sync_level !=
- walk_state->thread->current_sync_level) {
+ if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
+ "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.sync_level,
walk_state->thread->current_sync_level));
@@ -423,7 +422,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* acquired, but are not released in reverse order.
*/
previous_sync_level =
- walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
+ owner_thread->acquired_mutex_list->mutex.original_sync_level;
status = acpi_ex_release_mutex_object(obj_desc);
if (ACPI_FAILURE(status)) {
@@ -434,8 +433,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Restore the previous sync_level */
- walk_state->thread->current_sync_level = previous_sync_level;
+ owner_thread->current_sync_level = previous_sync_level;
}
+
return_ACPI_STATUS(status);
}
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_release_all_mutexes
*
- * PARAMETERS: Thread - Current executing thread object
+ * PARAMETERS: Thread - Current executing thread object
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 679f308c5a89..d11e539ef763 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -102,7 +102,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
name_string = ACPI_ALLOCATE(size_needed);
if (!name_string) {
ACPI_ERROR((AE_INFO,
- "Could not allocate size %d", size_needed));
+ "Could not allocate size %u", size_needed));
return_PTR(NULL);
}
@@ -216,7 +216,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
*/
status = AE_AML_BAD_NAME;
ACPI_ERROR((AE_INFO,
- "Bad character %02x in name, at %p",
+ "Bad character 0x%02x in name, at %p",
*aml_address, aml_address));
}
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 99adbab5acbf..84e4d185aa25 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -110,7 +110,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -173,7 +173,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
case AML_SLEEP_OP: /* Sleep (msec_time) */
- status = acpi_ex_system_do_suspend(operand[0]->integer.value);
+ status = acpi_ex_system_do_sleep(operand[0]->integer.value);
break;
case AML_STALL_OP: /* Stall (usec_time) */
@@ -189,7 +189,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -229,7 +229,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -399,7 +399,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
if (digit > 0) {
ACPI_ERROR((AE_INFO,
- "Integer too large to convert to BCD: %8.8X%8.8X",
+ "Integer too large to convert to BCD: 0x%8.8X%8.8X",
ACPI_FORMAT_UINT64(operand[0]->
integer.value)));
status = AE_AML_NUMERIC_OVERFLOW;
@@ -540,7 +540,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -979,7 +979,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown Index TargetType %X in reference object %p",
+ "Unknown Index TargetType 0x%X in reference object %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@@ -1007,7 +1007,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown class in reference(%p) - %2.2X",
+ "Unknown class in reference(%p) - 0x%2.2X",
operand[0],
operand[0]->reference.class));
@@ -1019,7 +1019,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 22841bbbe63c..10e104cf0fb9 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -119,33 +119,6 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
status = AE_AML_OPERAND_TYPE;
break;
}
-#ifdef ACPI_GPE_NOTIFY_CHECK
- /*
- * GPE method wake/notify check. Here, we want to ensure that we
- * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
- * GPE method during system runtime. If we do, the GPE is marked
- * as "wake-only" and disabled.
- *
- * 1) Is the Notify() value == device_wake?
- * 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
- * 3) Did the original GPE happen at system runtime?
- * (versus during wake)
- *
- * If all three cases are true, this is a wake-only GPE that should
- * be disabled at runtime.
- */
- if (value == 2) { /* device_wake */
- status =
- acpi_ev_check_for_wake_only_gpe(walk_state->
- gpe_event_info);
- if (ACPI_FAILURE(status)) {
-
- /* AE_WAKE_ONLY_GPE only error, means ignore this notify */
-
- return_ACPI_STATUS(AE_OK)
- }
- }
-#endif
/*
* Dispatch the notify to the appropriate handler
@@ -159,7 +132,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
}
@@ -224,7 +197,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -441,7 +414,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Index (%X%8.8X) is beyond end of object",
+ "Index (0x%8.8X%8.8X) is beyond end of object",
ACPI_FORMAT_UINT64(index)));
goto cleanup;
}
@@ -464,7 +437,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -572,7 +545,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 8bb1012ef44e..7a08d23befcd 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -119,7 +119,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -244,7 +244,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f256b6a25f2e..4b50730cf9a0 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -245,7 +245,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
index = operand[5]->integer.value;
if (index >= operand[0]->package.count) {
ACPI_ERROR((AE_INFO,
- "Index (%X%8.8X) beyond package end (%X)",
+ "Index (0x%8.8X%8.8X) beyond package end (0x%X)",
ACPI_FORMAT_UINT64(index),
operand[0]->package.count));
status = AE_AML_PACKAGE_LIMIT;
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 2fbfe51fb141..25059dace0ad 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -275,7 +275,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
default:
/* Invalid field access type */
- ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
+ ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
return_UINT32(0);
}
@@ -430,7 +430,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
type = acpi_ns_get_type(info->region_node);
if (type != ACPI_TYPE_REGION) {
ACPI_ERROR((AE_INFO,
- "Needed Region, found type %X (%s)",
+ "Needed Region, found type 0x%X (%s)",
type, acpi_ut_get_type_name(type)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 486b2e5661b6..531000fc77d2 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -105,7 +105,7 @@ acpi_ex_system_memory_space_handler(u32 function,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
+ ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
@@ -173,7 +173,7 @@ acpi_ex_system_memory_space_handler(u32 function,
mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
if (!mem_info->mapped_logical_address) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X%8.8X, size %X",
+ "Could not map memory at 0x%8.8X%8.8X, size %u",
ACPI_FORMAT_NATIVE_UINT(address),
(u32) map_length));
mem_info->mapped_length = 0;
@@ -491,8 +491,10 @@ acpi_ex_data_table_space_handler(u32 function,
{
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
- /* Perform the memory read or write */
-
+ /*
+ * Perform the memory read or write. The bit_width was already
+ * validated.
+ */
switch (function) {
case ACPI_READ:
@@ -502,9 +504,14 @@ acpi_ex_data_table_space_handler(u32 function,
break;
case ACPI_WRITE:
+
+ ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+ break;
+
default:
- return_ACPI_STATUS(AE_SUPPORT);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index fdc1b27999ef..1fa4289a687e 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -252,7 +252,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* No named references are allowed here */
ACPI_ERROR((AE_INFO,
- "Unsupported Reference type %X",
+ "Unsupported Reference type 0x%X",
source_desc->reference.class));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -264,7 +264,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* Default case is for unknown types */
ACPI_ERROR((AE_INFO,
- "Node %p - Unknown object type %X",
+ "Node %p - Unknown object type 0x%X",
node, entry_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fdd6a7079b97..7ca35ea8acea 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -231,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
- "Unknown TargetType %X in Index/Reference object %p",
+ "Unknown TargetType 0x%X in Index/Reference object %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
@@ -273,8 +273,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference type %X in %p", ref_type,
- stack_desc));
+ "Unknown Reference type 0x%X in %p",
+ ref_type, stack_desc));
status = AE_AML_INTERNAL;
break;
}
@@ -403,7 +403,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
ACPI_DESC_TYPE_NAMED) {
- ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
+ ACPI_ERROR((AE_INFO,
+ "Not a namespace node %p [%s]",
node,
acpi_ut_get_descriptor_name(node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
@@ -507,7 +508,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X",
+ "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index c5ecd615f145..8c97cfd6a0fd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -153,7 +153,7 @@ acpi_ex_resolve_operands(u16 opcode,
arg_types = op_info->runtime_args;
if (arg_types == ARGI_INVALID_OPCODE) {
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", opcode));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -218,7 +218,7 @@ acpi_ex_resolve_operands(u16 opcode,
if (!acpi_ut_valid_object_type(object_type)) {
ACPI_ERROR((AE_INFO,
- "Bad operand object type [%X]",
+ "Bad operand object type [0x%X]",
object_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -253,7 +253,7 @@ acpi_ex_resolve_operands(u16 opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X in %p",
+ "Unknown Reference Class 0x%2.2X in %p",
obj_desc->reference.class,
obj_desc));
@@ -665,7 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
/* Unknown type */
ACPI_ERROR((AE_INFO,
- "Internal - Unknown ARGI (required operand) type %X",
+ "Internal - Unknown ARGI (required operand) type 0x%X",
this_arg_type));
return_ACPI_STATUS(AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 702b9ecfd44b..1624436ba4c5 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstore - AML Interpreter object store support
@@ -53,10 +52,6 @@
ACPI_MODULE_NAME("exstore")
/* Local prototypes */
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index);
-
static acpi_status
acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
union acpi_operand_object *dest_desc,
@@ -64,215 +59,6 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
/*******************************************************************************
*
- * FUNCTION: acpi_ex_do_debug_object
- *
- * PARAMETERS: source_desc - Value to be stored
- * Level - Indentation level (used for packages)
- * Index - Current package element, zero if not pkg
- *
- * RETURN: None
- *
- * DESCRIPTION: Handles stores to the Debug Object.
- *
- ******************************************************************************/
-
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index)
-{
- u32 i;
-
- ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
-
- /* Print line header as long as we are not in the middle of an object display */
-
- if (!((level > 0) && index == 0)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
- level, " "));
- }
-
- /* Display index for package output only */
-
- if (index > 0) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "(%.2u) ", index - 1));
- }
-
- if (!source_desc) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
- return_VOID;
- }
-
- if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
- acpi_ut_get_object_type_name
- (source_desc)));
-
- if (!acpi_ut_valid_internal_object(source_desc)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "%p, Invalid Internal Object!\n",
- source_desc));
- return_VOID;
- }
- } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
- acpi_ut_get_type_name(((struct
- acpi_namespace_node
- *)source_desc)->
- type),
- source_desc));
- return_VOID;
- } else {
- return_VOID;
- }
-
- /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
-
- switch (source_desc->common.type) {
- case ACPI_TYPE_INTEGER:
-
- /* Output correct integer width */
-
- if (acpi_gbl_integer_byte_width == 4) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
- (u32) source_desc->integer.
- value));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "0x%8.8X%8.8X\n",
- ACPI_FORMAT_UINT64(source_desc->
- integer.
- value)));
- }
- break;
-
- case ACPI_TYPE_BUFFER:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
- (u32) source_desc->buffer.length));
- ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
- (source_desc->buffer.length <
- 256) ? source_desc->buffer.length : 256);
- break;
-
- case ACPI_TYPE_STRING:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
- source_desc->string.length,
- source_desc->string.pointer));
- break;
-
- case ACPI_TYPE_PACKAGE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "[Contains 0x%.2X Elements]\n",
- source_desc->package.count));
-
- /* Output the entire contents of the package */
-
- for (i = 0; i < source_desc->package.count; i++) {
- acpi_ex_do_debug_object(source_desc->package.
- elements[i], level + 4, i + 1);
- }
- break;
-
- case ACPI_TYPE_LOCAL_REFERENCE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
- acpi_ut_get_reference_name(source_desc)));
-
- /* Decode the reference */
-
- switch (source_desc->reference.class) {
- case ACPI_REFCLASS_INDEX:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
- source_desc->reference.value));
- break;
-
- case ACPI_REFCLASS_TABLE:
-
- /* Case for ddb_handle */
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Table Index 0x%X\n",
- source_desc->reference.value));
- return;
-
- default:
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
-
- /* Check for valid node first, then valid object */
-
- if (source_desc->reference.node) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.node) !=
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- " %p - Not a valid namespace node\n",
- source_desc->reference.
- node));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Node %p [%4.4s] ",
- source_desc->reference.
- node,
- (source_desc->reference.
- node)->name.ascii));
-
- switch ((source_desc->reference.node)->type) {
-
- /* These types have no attached object */
-
- case ACPI_TYPE_DEVICE:
- acpi_os_printf("Device\n");
- break;
-
- case ACPI_TYPE_THERMAL:
- acpi_os_printf("Thermal Zone\n");
- break;
-
- default:
- acpi_ex_do_debug_object((source_desc->
- reference.
- node)->object,
- level + 4, 0);
- break;
- }
- }
- } else if (source_desc->reference.object) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.object) ==
- ACPI_DESC_TYPE_NAMED) {
- acpi_ex_do_debug_object(((struct
- acpi_namespace_node *)
- source_desc->reference.
- object)->object,
- level + 4, 0);
- } else {
- acpi_ex_do_debug_object(source_desc->reference.
- object, level + 4, 0);
- }
- }
- break;
-
- default:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
- source_desc));
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_store
*
* PARAMETERS: *source_desc - Value to be stored
@@ -402,12 +188,12 @@ acpi_ex_store(union acpi_operand_object *source_desc,
source_desc,
acpi_ut_get_object_type_name(source_desc)));
- acpi_ex_do_debug_object(source_desc, 0, 0);
+ ACPI_DEBUG_OBJECT(source_desc, 0, 0);
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
ref_desc->reference.class));
ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index e11b6cb42a57..6d32e09327f1 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -170,7 +170,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
* (ACPI specifies 100 usec as max, but this gives some slack in
* order to support existing BIOSs)
*/
- ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
+ ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)",
how_long));
status = AE_AML_OPERAND_VALUE;
} else {
@@ -182,18 +182,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_system_do_suspend
+ * FUNCTION: acpi_ex_system_do_sleep
*
- * PARAMETERS: how_long - The amount of time to suspend,
+ * PARAMETERS: how_long - The amount of time to sleep,
* in milliseconds
*
* RETURN: None
*
- * DESCRIPTION: Suspend running thread for specified amount of time.
+ * DESCRIPTION: Sleep the running thread for specified amount of time.
*
******************************************************************************/
-acpi_status acpi_ex_system_do_suspend(u64 how_long)
+acpi_status acpi_ex_system_do_sleep(u64 how_long)
{
ACPI_FUNCTION_ENTRY();
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112a7d26..b44274a0b62c 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
{
acpi_status status;
- u32 retry;
ACPI_FUNCTION_TRACE(hw_set_mode);
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
return_ACPI_STATUS(status);
}
- /*
- * Some hardware takes a LONG time to switch modes. Give them 3 sec to
- * do so, but allow faster systems to proceed more quickly.
- */
- retry = 3000;
- while (retry) {
- if (acpi_hw_get_mode() == mode) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Mode %X successfully enabled\n",
- mode));
- return_ACPI_STATUS(AE_OK);
- }
- acpi_os_stall(1000);
- retry--;
- }
-
- ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
- return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index ec7fc227b33f..5d1273b660ae 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -299,7 +299,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
ACPI_FUNCTION_ENTRY();
if (register_id > ACPI_BITREG_MAX) {
- ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
+ ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: 0x%X",
register_id));
return (NULL);
}
@@ -413,7 +413,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
@@ -549,7 +549,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 5e6d4dbb8024..36eb803dd9d0 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -245,7 +245,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
- ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
+ ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e26c17d4b716..c10d587c1641 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -150,7 +150,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if (last_address > ACPI_UINT16_MAX) {
ACPI_ERROR((AE_INFO,
- "Illegal I/O port address/length above 64K: 0x%p/%X",
+ "Illegal I/O port address/length above 64K: %p/0x%X",
ACPI_CAST_PTR(void, address), byte_width));
return_ACPI_STATUS(AE_LIMIT);
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index aa2b80132d0a..3a2814676ac3 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -222,7 +222,7 @@ acpi_status acpi_ns_root_initialize(void)
default:
ACPI_ERROR((AE_INFO,
- "Unsupported initial type value %X",
+ "Unsupported initial type value 0x%X",
init_val->type));
acpi_ut_remove_reference(obj_desc);
obj_desc = NULL;
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 0689d36638d9..2110cc2360f0 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -205,8 +205,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
/* Check the node type and name */
if (type > ACPI_TYPE_LOCAL_MAX) {
- ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
- type));
+ ACPI_WARNING((AE_INFO,
+ "Invalid ACPI Object Type 0x%08X", type));
}
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 959372451635..7dea0031605c 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -107,7 +107,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index != 0) {
ACPI_ERROR((AE_INFO,
- "Could not construct external pathname; index=%X, size=%X, Path=%s",
+ "Could not construct external pathname; index=%u, size=%u, Path=%s",
(u32) index, (u32) size, &name_buffer[size]));
return (AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 08f8b3f5ccaa..a8e42b5e9463 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -311,7 +311,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (!node || !target_name || !return_node) {
ACPI_ERROR((AE_INFO,
- "Null parameter: Node %p Name %X ReturnNode %p",
+ "Null parameter: Node %p Name 0x%X ReturnNode %p",
node, target_name, return_node));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 24d05a87a2a3..bab559712da1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -276,7 +276,7 @@ u32 acpi_ns_local(acpi_object_type type)
/* Type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
@@ -764,7 +764,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
/* type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 00493e108a01..7df1a4c95274 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -460,7 +460,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType 0x%X", arg_type));
return_VOID;
}
@@ -742,7 +742,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType: 0x%X", arg_type));
status = AE_AML_OPERAND_TYPE;
break;
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 59aabaeab1d3..2f2e7760938c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -136,7 +136,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
/* The opcode is unrecognized. Just skip unknown opcodes */
ACPI_ERROR((AE_INFO,
- "Found unknown opcode %X at AML address %p offset %X, ignoring",
+ "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
walk_state->opcode, walk_state->parser_state.aml,
walk_state->aml_offset));
@@ -1021,7 +1021,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_EXCEPTION((AE_INFO, status,
"Invoked method did not return a value"));
-
}
ACPI_EXCEPTION((AE_INFO, status,
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6064dd4e94c2..c42f067cff9d 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -46,6 +46,7 @@
#include "acparser.h"
#include "acdispat.h"
#include "acinterp.h"
+#include "actables.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
@@ -220,6 +221,10 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
ACPI_FUNCTION_TRACE(ps_execute_method);
+ /* Quick validation of DSDT header */
+
+ acpi_tb_check_dsdt_header();
+
/* Validate the Info and method Node */
if (!info || !info->resolved_node) {
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index f2ee3b548609..c80a2eea3a01 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -212,7 +212,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need sub-package, found %s",
+ "(PRT[%u]) Need sub-package, found %s",
index,
acpi_ut_get_object_type_name
(*top_object_list)));
@@ -223,7 +223,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->package.count != 4) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need package of length 4, found length %d",
+ "(PRT[%u]) Need package of length 4, found length %u",
index, (*top_object_list)->package.count));
return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
}
@@ -240,7 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[0];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Address) Need Integer, found %s",
+ "(PRT[%u].Address) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -253,7 +253,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[1];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Pin) Need Integer, found %s",
+ "(PRT[%u].Pin) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -289,7 +289,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if (obj_desc->reference.class !=
ACPI_REFCLASS_NAME) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need name, found Reference Class %X",
+ "(PRT[%u].Source) Need name, found Reference Class 0x%X",
index,
obj_desc->reference.class));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -340,7 +340,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
default:
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need Ref/String/Integer, found %s",
+ "(PRT[%u].Source) Need Ref/String/Integer, found %s",
index,
acpi_ut_get_object_type_name
(obj_desc)));
@@ -358,7 +358,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[3];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].SourceIndex) Need Integer, found %s",
+ "(PRT[%u].SourceIndex) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index fd057c72d252..7335f22aac20 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -94,7 +94,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
[resource_index]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert AML resource (Type %X)",
+ "Could not convert AML resource (Type 0x%X)",
*aml));
return_ACPI_STATUS(status);
}
@@ -147,7 +147,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
ACPI_ERROR((AE_INFO,
- "Invalid descriptor type (%X) in resource list",
+ "Invalid descriptor type (0x%X) in resource list",
resource->type));
return_ACPI_STATUS(AE_BAD_DATA);
}
@@ -161,7 +161,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
[resource->type]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert resource (type %X) to AML",
+ "Could not convert resource (type 0x%X) to AML",
resource->type));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 07de352fa443..f8cd9e87d987 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -88,7 +88,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/* Each internal resource struct is expected to be 32-bit aligned */
ACPI_WARNING((AE_INFO,
- "Misaligned resource pointer (get): %p Type %2.2X Len %X",
+ "Misaligned resource pointer (get): %p Type 0x%2.2X Length %u",
resource, resource->type, resource->length));
}
@@ -541,7 +541,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
* "IRQ Format"), so 0x00 and 0x09 are illegal.
*/
ACPI_ERROR((AE_INFO,
- "Invalid interrupt polarity/trigger in resource list, %X",
+ "Invalid interrupt polarity/trigger in resource list, 0x%X",
aml->irq.flags));
return_ACPI_STATUS(AE_BAD_DATA);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index f43fbe0fc3fc..1728cb9bf600 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -283,7 +283,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
"FADT (revision %u) is longer than ACPI 2.0 version, "
- "truncating length 0x%X to 0x%X",
+ "truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
}
@@ -422,7 +422,7 @@ static void acpi_tb_convert_fadt(void)
if (address64->address && address32 &&
(address64->address != (u64) address32)) {
ACPI_ERROR((AE_INFO,
- "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 32",
+ "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
fadt_info_table[i].name, address32,
ACPI_FORMAT_UINT64(address64->address)));
}
@@ -481,7 +481,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
ACPI_WARNING((AE_INFO,
"32/64X FACS address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.facs,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
@@ -492,7 +492,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
ACPI_WARNING((AE_INFO,
"32/64X DSDT address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.dsdt,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
@@ -521,7 +521,7 @@ static void acpi_tb_validate_fadt(void)
if (address64->address &&
(address64->bit_width != ACPI_MUL_8(length))) {
ACPI_WARNING((AE_INFO,
- "32/64X length mismatch in %s: %d/%d",
+ "32/64X length mismatch in %s: %u/%u",
name, ACPI_MUL_8(length),
address64->bit_width));
}
@@ -534,7 +534,7 @@ static void acpi_tb_validate_fadt(void)
if (!address64->address || !length) {
ACPI_ERROR((AE_INFO,
"Required field %s has zero address and/or length:"
- " %8.8X%8.8X/%X",
+ " 0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -550,7 +550,7 @@ static void acpi_tb_validate_fadt(void)
(!address64->address && length)) {
ACPI_WARNING((AE_INFO,
"Optional field %s has zero address or length: "
- "%8.8X%8.8X/%X",
+ "0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -600,7 +600,7 @@ static void acpi_tb_setup_fadt_registers(void)
(fadt_info_table[i].default_length !=
target64->bit_width)) {
ACPI_WARNING((AE_INFO,
- "Invalid length for %s: %d, using default %d",
+ "Invalid length for %s: %u, using default %u",
fadt_info_table[i].name,
target64->bit_width,
fadt_info_table[i].
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index e252180ce61c..989d5c867864 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -83,7 +83,7 @@ acpi_tb_find_table(char *signature,
/* Search for the table */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
header.signature, ACPI_NAME_SIZE)) {
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 7ec02b0f69e0..83d7af8d0905 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -137,7 +137,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
/* Check if table is already registered */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (!acpi_gbl_root_table_list.tables[i].pointer) {
status =
acpi_tb_verify_table(&acpi_gbl_root_table_list.
@@ -273,7 +273,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Increase the Table Array size */
tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
- size +
+ max_table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -286,8 +286,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) acpi_gbl_root_table_list.size *
- sizeof(struct acpi_table_desc));
+ (acpi_size) acpi_gbl_root_table_list.
+ max_table_count * sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -295,8 +295,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
+ acpi_gbl_root_table_list.max_table_count +=
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
}
@@ -321,38 +322,36 @@ acpi_tb_store_table(acpi_physical_address address,
struct acpi_table_header *table,
u32 length, u8 flags, u32 *table_index)
{
- acpi_status status = AE_OK;
+ acpi_status status;
+ struct acpi_table_desc *new_table;
/* Ensure that there is room for the table in the Root Table List */
- if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
status = acpi_tb_resize_root_table_list();
if (ACPI_FAILURE(status)) {
return (status);
}
}
+ new_table =
+ &acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count];
+
/* Initialize added table */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address = address;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- pointer = table;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
- length;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- owner_id = 0;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
- flags;
-
- ACPI_MOVE_32_TO_32(&
- (acpi_gbl_root_table_list.
- tables[acpi_gbl_root_table_list.count].signature),
- table->signature);
-
- *table_index = acpi_gbl_root_table_list.count;
- acpi_gbl_root_table_list.count++;
- return (status);
+ new_table->address = address;
+ new_table->pointer = table;
+ new_table->length = length;
+ new_table->owner_id = 0;
+ new_table->flags = flags;
+
+ ACPI_MOVE_32_TO_32(&new_table->signature, table->signature);
+
+ *table_index = acpi_gbl_root_table_list.current_table_count;
+ acpi_gbl_root_table_list.current_table_count++;
+ return (AE_OK);
}
/*******************************************************************************
@@ -408,7 +407,7 @@ void acpi_tb_terminate(void)
/* Delete the individual tables */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
}
@@ -422,7 +421,7 @@ void acpi_tb_terminate(void)
acpi_gbl_root_table_list.tables = NULL;
acpi_gbl_root_table_list.flags = 0;
- acpi_gbl_root_table_list.count = 0;
+ acpi_gbl_root_table_list.current_table_count = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
@@ -452,7 +451,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
return_ACPI_STATUS(status);
}
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
/* The table index does not exist */
@@ -505,7 +504,7 @@ acpi_status acpi_tb_allocate_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
status = acpi_ut_allocate_owner_id
(&(acpi_gbl_root_table_list.tables[table_index].owner_id));
}
@@ -533,7 +532,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_release_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
acpi_ut_release_owner_id(&
(acpi_gbl_root_table_list.
tables[table_index].owner_id));
@@ -564,7 +563,7 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
ACPI_FUNCTION_TRACE(tb_get_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
*owner_id =
acpi_gbl_root_table_list.tables[table_index].owner_id;
status = AE_OK;
@@ -589,7 +588,7 @@ u8 acpi_tb_is_table_loaded(u32 table_index)
u8 is_loaded = FALSE;
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
is_loaded = (u8)
(acpi_gbl_root_table_list.tables[table_index].flags &
ACPI_TABLE_IS_LOADED);
@@ -616,7 +615,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
{
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
if (is_loaded) {
acpi_gbl_root_table_list.tables[table_index].flags |=
ACPI_TABLE_IS_LOADED;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 02723a9fb10c..34f9c2bc5e1f 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -158,7 +158,7 @@ acpi_status acpi_tb_initialize_facs(void)
u8 acpi_tb_tables_loaded(void)
{
- if (acpi_gbl_root_table_list.count >= 3) {
+ if (acpi_gbl_root_table_list.current_table_count >= 3) {
return (TRUE);
}
@@ -309,7 +309,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
if (checksum) {
ACPI_WARNING((AE_INFO,
- "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
+ "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
table->signature, table->checksum,
(u8) (table->checksum - checksum)));
@@ -349,6 +349,84 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_check_dsdt_header
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Quick compare to check validity of the DSDT. This will detect
+ * if the DSDT has been replaced from outside the OS and/or if
+ * the DSDT header has been corrupted.
+ *
+ ******************************************************************************/
+
+void acpi_tb_check_dsdt_header(void)
+{
+
+ /* Compare original length and checksum to current values */
+
+ if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
+ acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
+ ACPI_ERROR((AE_INFO,
+ "The DSDT has been corrupted or replaced - old, new headers below"));
+ acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
+ acpi_tb_print_table_header(0, acpi_gbl_DSDT);
+
+ ACPI_ERROR((AE_INFO,
+ "Please send DMI info to linux-acpi@vger.kernel.org\n"
+ "If system does not work as expected, please boot with acpi=copy_dsdt"));
+
+ /* Disable further error messages */
+
+ acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length;
+ acpi_gbl_original_dsdt_header.checksum =
+ acpi_gbl_DSDT->checksum;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_copy_dsdt
+ *
+ * PARAMETERS: table_desc - Installed table to copy
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory.
+ * Some very bad BIOSs are known to either corrupt the DSDT or
+ * install a new, bad DSDT. This copy works around the problem.
+ *
+ ******************************************************************************/
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
+{
+ struct acpi_table_header *new_table;
+ struct acpi_table_desc *table_desc;
+
+ table_desc = &acpi_gbl_root_table_list.tables[table_index];
+
+ new_table = ACPI_ALLOCATE(table_desc->length);
+ if (!new_table) {
+ ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X",
+ table_desc->length));
+ return (NULL);
+ }
+
+ ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
+ acpi_tb_delete_table(table_desc);
+ table_desc->pointer = new_table;
+ table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+
+ ACPI_INFO((AE_INFO,
+ "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
+ new_table->length));
+
+ return (new_table);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_install_table
*
* PARAMETERS: Address - Physical address of DSDT or FACS
@@ -496,7 +574,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
/* Will truncate 64-bit address to 32 bits, issue warning */
ACPI_WARNING((AE_INFO,
- "64-bit Physical Address in XSDT is too large (%8.8X%8.8X),"
+ "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
" truncating",
ACPI_FORMAT_UINT64(address64)));
}
@@ -629,14 +707,14 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
*/
table_entry =
ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
- acpi_gbl_root_table_list.count = 2;
+ acpi_gbl_root_table_list.current_table_count = 2;
/*
* Initialize the root table array from the RSDT/XSDT
*/
for (i = 0; i < table_count; i++) {
- if (acpi_gbl_root_table_list.count >=
- acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
/* There is no more room in the root table array, attempt resize */
@@ -646,19 +724,20 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
"Truncating %u table entries!",
(unsigned) (table_count -
(acpi_gbl_root_table_list.
- count - 2))));
+ current_table_count -
+ 2))));
break;
}
}
/* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address =
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count].address =
acpi_tb_get_root_table_entry(table_entry, table_entry_size);
table_entry += table_entry_size;
- acpi_gbl_root_table_list.count++;
+ acpi_gbl_root_table_list.current_table_count++;
}
/*
@@ -671,7 +750,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
* Complete the initialization of the root table array by examining
* the header of each table
*/
- for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
address, NULL, i);
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 5217a6159a31..4a8b9e6ea57a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -72,7 +72,7 @@ static int no_auto_ssdt;
acpi_status acpi_allocate_root_table(u32 initial_table_count)
{
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
return (acpi_tb_resize_root_table_list());
@@ -130,7 +130,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
if (allow_resize) {
acpi_gbl_root_table_list.flags |=
@@ -172,6 +172,7 @@ acpi_status acpi_reallocate_root_table(void)
{
struct acpi_table_desc *tables;
acpi_size new_size;
+ acpi_size current_size;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -183,10 +184,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
- new_size = ((acpi_size) acpi_gbl_root_table_list.count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ /*
+ * Get the current size of the root table and add the default
+ * increment to create the new table size.
+ */
+ current_size = (acpi_size)
+ acpi_gbl_root_table_list.current_table_count *
sizeof(struct acpi_table_desc);
+ new_size = current_size +
+ (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
+
/* Create new array and copy the old array */
tables = ACPI_ALLOCATE_ZEROED(new_size);
@@ -194,10 +202,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
+ ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
- acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
+ /*
+ * Update the root table descriptor. The new size will be the current
+ * number of tables plus the increment, independent of the reserved
+ * size of the original table list.
+ */
acpi_gbl_root_table_list.tables = tables;
+ acpi_gbl_root_table_list.max_table_count =
+ acpi_gbl_root_table_list.current_table_count +
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
acpi_gbl_root_table_list.flags =
ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
@@ -278,7 +293,8 @@ acpi_get_table_header(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -341,7 +357,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
ACPI_FUNCTION_TRACE(acpi_unload_table_id);
/* Find table in the global table list */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
continue;
}
@@ -391,7 +407,8 @@ acpi_get_table_with_size(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -459,7 +476,7 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
/* Validate index */
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -500,16 +517,17 @@ static acpi_status acpi_tb_load_namespace(void)
{
acpi_status status;
u32 i;
+ struct acpi_table_header *new_dsdt;
ACPI_FUNCTION_TRACE(tb_load_namespace);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/*
- * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
- * are optional.
+ * Load the namespace. The DSDT is required, but any SSDT and
+ * PSDT tables are optional. Verify the DSDT.
*/
- if (!acpi_gbl_root_table_list.count ||
+ if (!acpi_gbl_root_table_list.current_table_count ||
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT].signature),
@@ -522,17 +540,35 @@ static acpi_status acpi_tb_load_namespace(void)
goto unlock_and_exit;
}
- /* A valid DSDT is required */
-
- status =
- acpi_tb_verify_table(&acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT]);
- if (ACPI_FAILURE(status)) {
+ /*
+ * Save the DSDT pointer for simple access. This is the mapped memory
+ * address. We must take care here because the address of the .Tables
+ * array can change dynamically as tables are loaded at run-time. Note:
+ * .Pointer field is not validated until after call to acpi_tb_verify_table.
+ */
+ acpi_gbl_DSDT =
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
- status = AE_NO_ACPI_TABLES;
- goto unlock_and_exit;
+ /*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+ if (acpi_gbl_copy_dsdt_locally) {
+ new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+ if (new_dsdt) {
+ acpi_gbl_DSDT = new_dsdt;
+ }
}
+ /*
+ * Save the original DSDT header for detection of table corruption
+ * and/or replacement of the DSDT from outside the OS.
+ */
+ ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+ sizeof(struct acpi_table_header));
+
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
/* Load and parse tables */
@@ -545,7 +581,7 @@ static acpi_status acpi_tb_load_namespace(void)
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if ((!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
ACPI_SIG_SSDT)
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index dda6e8c497d3..fd2c07d1d3ac 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -134,7 +134,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_PTR_LENGTH);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -159,7 +159,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_WINDOW_SIZE);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
physical_address, ACPI_EBDA_WINDOW_SIZE));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -191,7 +191,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_HI_RSDP_WINDOW_BASE,
ACPI_HI_RSDP_WINDOW_SIZE));
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3d706b8fd449..8f0896281567 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -340,7 +340,7 @@ void *acpi_ut_allocate(acpi_size size,
/* Report allocation error */
ACPI_WARNING((module, line,
- "Could not allocate size %X", (u32) size));
+ "Could not allocate size %u", (u32) size));
return_PTR(NULL);
}
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 97ec3621e71d..6fef83f04bcd 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -677,16 +677,24 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
u16 reference_count;
union acpi_operand_object *next_object;
acpi_status status;
+ acpi_size copy_size;
/* Save fields from destination that we don't want to overwrite */
reference_count = dest_desc->common.reference_count;
next_object = dest_desc->common.next_object;
- /* Copy the entire source object over the destination object */
+ /*
+ * Copy the entire source object over the destination object.
+ * Note: Source can be either an operand object or namespace node.
+ */
+ copy_size = sizeof(union acpi_operand_object);
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) {
+ copy_size = sizeof(struct acpi_namespace_node);
+ }
- ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
- sizeof(union acpi_operand_object));
+ ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
+ ACPI_CAST_PTR(char, source_desc), copy_size);
/* Restore the saved fields */
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 16b51c69606a..ed794cd033ea 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -434,7 +434,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
default:
- ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
+ ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
break;
}
@@ -444,8 +444,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*/
if (count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
- "Large Reference Count (%X) in object %p", count,
- object));
+ "Large Reference Count (0x%X) in object %p",
+ count, object));
}
}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 7f5e734ce7f7..6dfdeb653490 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -307,7 +307,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
prefix_node, path, AE_TYPE);
ACPI_ERROR((AE_INFO,
- "Type returned from %s was incorrect: %s, expected Btypes: %X",
+ "Type returned from %s was incorrect: %s, expected Btypes: 0x%X",
path,
acpi_ut_get_object_type_name(info->return_object),
expected_return_btypes));
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index eda3e656c4af..66116750a0f9 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -785,6 +785,7 @@ acpi_status acpi_ut_init_globals(void)
/* Miscellaneous variables */
+ acpi_gbl_DSDT = NULL;
acpi_gbl_cm_single_step = FALSE;
acpi_gbl_db_terminate_threads = FALSE;
acpi_gbl_shutdown = FALSE;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 32982e2ac384..e8d0724ee403 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -205,7 +205,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
/* Guard against multiple allocations of ID to the same location */
if (*owner_id) {
- ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
+ ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
*owner_id));
return_ACPI_STATUS(AE_ALREADY_EXISTS);
}
@@ -315,7 +315,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
/* Zero is not a valid owner_iD */
if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
return_VOID;
}
@@ -341,7 +341,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_gbl_owner_id_mask[index] ^= bit;
} else {
ACPI_ERROR((AE_INFO,
- "Release of non-allocated OwnerId: %2.2X",
+ "Release of non-allocated OwnerId: 0x%2.2X",
owner_id + 1));
}
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 55d014ed6d55..058b3df48271 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -258,7 +258,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %p could not acquire Mutex [%X]",
+ "Thread %p could not acquire Mutex [0x%X]",
ACPI_CAST_PTR(void, this_thread_id), mutex_id));
}
@@ -297,7 +297,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [%X] is not acquired, cannot release",
+ "Mutex [0x%X] is not acquired, cannot release",
mutex_id));
return (AE_NOT_ACQUIRED);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 3356f0cb0745..fd1fa2749ea5 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -251,7 +251,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
if (!buffer) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) buffer_size));
acpi_ut_remove_reference(buffer_desc);
return_PTR(NULL);
@@ -303,7 +303,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
*/
string = ACPI_ALLOCATE_ZEROED(string_size + 1);
if (!string) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) string_size));
acpi_ut_remove_reference(string_desc);
return_PTR(NULL);
@@ -533,7 +533,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
*/
ACPI_ERROR((AE_INFO,
"Cannot convert to external object - "
- "unsupported Reference Class [%s] %X in object %p",
+ "unsupported Reference Class [%s] 0x%X in object %p",
acpi_ut_get_reference_name(internal_object),
internal_object->reference.class,
internal_object));
@@ -545,7 +545,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
default:
ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
- "unsupported type [%s] %X in object %p",
+ "unsupported type [%s] 0x%X in object %p",
acpi_ut_get_object_type_name(internal_object),
internal_object->common.type, internal_object));
status = AE_TYPE;
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 000000000000..f8c668f27b5a
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
+config ACPI_APEI
+ bool "ACPI Platform Error Interface (APEI)"
+ depends on X86
+ help
+ APEI allows to report errors (for example from the chipset)
+ to the operating system. This improves NMI handling
+ especially. In addition it supports error serialization and
+ error injection.
+
+config ACPI_APEI_GHES
+ tristate "APEI Generic Hardware Error Source"
+ depends on ACPI_APEI && X86
+ select ACPI_HED
+ help
+ Generic Hardware Error Source provides a way to report
+ platform hardware errors (such as that from chipset). It
+ works in so called "Firmware First" mode, that is, hardware
+ errors are reported to firmware firstly, then reported to
+ Linux by firmware. This way, some non-standard hardware
+ error registers or non-standard hardware link can be checked
+ by firmware to produce more valuable hardware error
+ information for Linux.
+
+config ACPI_APEI_EINJ
+ tristate "APEI Error INJection (EINJ)"
+ depends on ACPI_APEI && DEBUG_FS
+ help
+ EINJ provides a hardware error injection mechanism, it is
+ mainly used for debugging and testing the other parts of
+ APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 000000000000..b13b03a17789
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_ACPI_APEI) += apei.o
+obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
+obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+
+apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 000000000000..db3946e9c66b
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,593 @@
+/*
+ * apei-base.c - ACPI Platform Error Interface (APEI) supporting
+ * infrastructure
+ *
+ * APEI allows to report errors (for example from the chipset) to the
+ * the operating system. This improves NMI handling especially. In
+ * addition it supports error serialization and error injection.
+ *
+ * For more information about APEI, please refer to ACPI Specification
+ * version 4.0, chapter 17.
+ *
+ * This file has Common functions used by more than one APEI table,
+ * including framework of interpreter for ERST and EINJ; resource
+ * management for APEI registers.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <acpi/atomicio.h>
+
+#include "apei-internal.h"
+
+#define APEI_PFX "APEI: "
+
+/*
+ * APEI ERST (Error Record Serialization Table) and EINJ (Error
+ * INJection) interpreter framework.
+ */
+
+#define APEI_EXEC_PRESERVE_REGISTER 0x1
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries)
+{
+ ctx->ins_table = ins_table;
+ ctx->instructions = instructions;
+ ctx->action_table = action_table;
+ ctx->entries = entries;
+}
+EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
+{
+ int rc;
+
+ rc = acpi_atomic_read(val, &entry->register_region);
+ if (rc)
+ return rc;
+ *val >>= entry->register_region.bit_offset;
+ *val &= entry->mask;
+
+ return 0;
+}
+
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val = 0;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ ctx->value = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register);
+
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ rc = apei_exec_read_register(ctx, entry);
+ if (rc)
+ return rc;
+ ctx->value = (ctx->value == entry->value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
+
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
+{
+ int rc;
+
+ val &= entry->mask;
+ val <<= entry->register_region.bit_offset;
+ if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
+ u64 valr = 0;
+ rc = acpi_atomic_read(&valr, &entry->register_region);
+ if (rc)
+ return rc;
+ valr &= ~(entry->mask << entry->register_region.bit_offset);
+ val |= valr;
+ }
+ rc = acpi_atomic_write(val, &entry->register_region);
+
+ return rc;
+}
+
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->value);
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register);
+
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ ctx->value = entry->value;
+ rc = apei_exec_write_register(ctx, entry);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
+
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_noop);
+
+/*
+ * Interpret the specified action. Go through whole action table,
+ * execute all instructions belong to the action.
+ */
+int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ int rc;
+ u32 i, ip;
+ struct acpi_whea_header *entry;
+ apei_exec_ins_func_t run;
+
+ ctx->ip = 0;
+
+ /*
+ * "ip" is the instruction pointer of current instruction,
+ * "ctx->ip" specifies the next instruction to executed,
+ * instruction "run" function may change the "ctx->ip" to
+ * implement "goto" semantics.
+ */
+rewind:
+ ip = 0;
+ for (i = 0; i < ctx->entries; i++) {
+ entry = &ctx->action_table[i];
+ if (entry->action != action)
+ continue;
+ if (ip == ctx->ip) {
+ if (entry->instruction >= ctx->instructions ||
+ !ctx->ins_table[entry->instruction].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
+ return -EINVAL;
+ }
+ run = ctx->ins_table[entry->instruction].run;
+ rc = run(ctx, entry);
+ if (rc < 0)
+ return rc;
+ else if (rc != APEI_EXEC_SET_IP)
+ ctx->ip++;
+ }
+ ip++;
+ if (ctx->ip < ip)
+ goto rewind;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_run);
+
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data);
+
+static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
+ apei_exec_entry_func_t func,
+ void *data,
+ int *end)
+{
+ u8 ins;
+ int i, rc;
+ struct acpi_whea_header *entry;
+ struct apei_exec_ins_type *ins_table = ctx->ins_table;
+
+ for (i = 0; i < ctx->entries; i++) {
+ entry = ctx->action_table + i;
+ ins = entry->instruction;
+ if (end)
+ *end = i;
+ if (ins >= ctx->instructions || !ins_table[ins].run) {
+ pr_warning(FW_WARN APEI_PFX
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
+ return -EINVAL;
+ }
+ rc = func(ctx, entry, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pre_map_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ return acpi_pre_map_gar(&entry->register_region);
+
+ return 0;
+}
+
+/*
+ * Pre-map all GARs in action table to make it possible to access them
+ * in NMI handler.
+ */
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
+{
+ int rc, end;
+
+ rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
+ NULL, &end);
+ if (rc) {
+ struct apei_exec_context ctx_unmap;
+ memcpy(&ctx_unmap, ctx, sizeof(*ctx));
+ ctx_unmap.entries = end;
+ apei_exec_post_unmap_gars(&ctx_unmap);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
+
+static int post_unmap_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ acpi_post_unmap_gar(&entry->register_region);
+
+ return 0;
+}
+
+/* Post-unmap all GAR in action table. */
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
+{
+ return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
+ NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
+
+/*
+ * Resource management for GARs in APEI
+ */
+struct apei_res {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+/* Collect all resources requested, to avoid conflict */
+struct apei_resources apei_resources_all = {
+ .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
+ .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
+};
+
+static int apei_res_add(struct list_head *res_list,
+ unsigned long start, unsigned long size)
+{
+ struct apei_res *res, *resn, *res_ins = NULL;
+ unsigned long end = start + size;
+
+ if (end <= start)
+ return 0;
+repeat:
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ if (res->start > end || res->end < start)
+ continue;
+ else if (end <= res->end && start >= res->start) {
+ kfree(res_ins);
+ return 0;
+ }
+ list_del(&res->list);
+ res->start = start = min(res->start, start);
+ res->end = end = max(res->end, end);
+ kfree(res_ins);
+ res_ins = res;
+ goto repeat;
+ }
+
+ if (res_ins)
+ list_add(&res_ins->list, res_list);
+ else {
+ res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res_ins)
+ return -ENOMEM;
+ res_ins->start = start;
+ res_ins->end = end;
+ list_add(&res_ins->list, res_list);
+ }
+
+ return 0;
+}
+
+static int apei_res_sub(struct list_head *res_list1,
+ struct list_head *res_list2)
+{
+ struct apei_res *res1, *resn1, *res2, *res;
+ res1 = list_entry(res_list1->next, struct apei_res, list);
+ resn1 = list_entry(res1->list.next, struct apei_res, list);
+ while (&res1->list != res_list1) {
+ list_for_each_entry(res2, res_list2, list) {
+ if (res1->start >= res2->end ||
+ res1->end <= res2->start)
+ continue;
+ else if (res1->end <= res2->end &&
+ res1->start >= res2->start) {
+ list_del(&res1->list);
+ kfree(res1);
+ break;
+ } else if (res1->end > res2->end &&
+ res1->start < res2->start) {
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->start = res2->end;
+ res->end = res1->end;
+ res1->end = res2->start;
+ list_add(&res->list, &res1->list);
+ resn1 = res;
+ } else {
+ if (res1->start < res2->start)
+ res1->end = res2->start;
+ else
+ res1->start = res2->end;
+ }
+ }
+ res1 = resn1;
+ resn1 = list_entry(resn1->list.next, struct apei_res, list);
+ }
+
+ return 0;
+}
+
+static void apei_res_clean(struct list_head *res_list)
+{
+ struct apei_res *res, *resn;
+
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+void apei_resources_fini(struct apei_resources *resources)
+{
+ apei_res_clean(&resources->iomem);
+ apei_res_clean(&resources->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_fini);
+
+static int apei_resources_merge(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources2->iomem, list) {
+ rc = apei_res_add(&resources1->iomem, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+ list_for_each_entry(res, &resources2->ioport, list) {
+ rc = apei_res_add(&resources1->ioport, res->start,
+ res->end - res->start);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * EINJ has two groups of GARs (EINJ table entry and trigger table
+ * entry), so common resources are subtracted from the trigger table
+ * resources before the second requesting.
+ */
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+
+ rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
+ if (rc)
+ return rc;
+ return apei_res_sub(&resources1->ioport, &resources2->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_sub);
+
+/*
+ * IO memory/port rersource management mechanism is used to check
+ * whether memory/port area used by GARs conflicts with normal memory
+ * or IO memory/port of devices.
+ */
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc)
+{
+ struct apei_res *res, *res_bak;
+ struct resource *r;
+
+ apei_resources_sub(resources, &apei_resources_all);
+
+ list_for_each_entry(res, &resources->iomem, list) {
+ r = request_mem_region(res->start, res->end - res->start,
+ desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_iomem;
+ }
+ }
+
+ list_for_each_entry(res, &resources->ioport, list) {
+ r = request_region(res->start, res->end - res->start, desc);
+ if (!r) {
+ pr_err(APEI_PFX
+ "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_ioport;
+ }
+ }
+
+ apei_resources_merge(&apei_resources_all, resources);
+
+ return 0;
+err_unmap_ioport:
+ list_for_each_entry(res, &resources->ioport, list) {
+ if (res == res_bak)
+ break;
+ release_mem_region(res->start, res->end - res->start);
+ }
+ res_bak = NULL;
+err_unmap_iomem:
+ list_for_each_entry(res, &resources->iomem, list) {
+ if (res == res_bak)
+ break;
+ release_region(res->start, res->end - res->start);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apei_resources_request);
+
+void apei_resources_release(struct apei_resources *resources)
+{
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources->iomem, list)
+ release_mem_region(res->start, res->end - res->start);
+ list_for_each_entry(res, &resources->ioport, list)
+ release_region(res->start, res->end - res->start);
+
+ apei_resources_sub(&apei_resources_all, resources);
+}
+EXPORT_SYMBOL_GPL(apei_resources_release);
+
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int collect_res_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ struct apei_resources *resources = data;
+ struct acpi_generic_address *reg = &entry->register_region;
+ u8 ins = entry->instruction;
+ u64 paddr;
+ int rc;
+
+ if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
+ return 0;
+
+ rc = apei_check_gar(reg, &paddr);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return apei_res_add(&resources->iomem, paddr,
+ reg->bit_width / 8);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return apei_res_add(&resources->ioport, paddr,
+ reg->bit_width / 8);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Same register may be used by multiple instructions in GARs, so
+ * resources are collected before requesting.
+ */
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources)
+{
+ return apei_exec_for_each_entry(ctx, collect_res_callback,
+ resources, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
+
+struct dentry *apei_get_debugfs_dir(void)
+{
+ static struct dentry *dapei;
+
+ if (!dapei)
+ dapei = debugfs_create_dir("apei", NULL);
+
+ return dapei;
+}
+EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 000000000000..18df1e940276
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
+/*
+ * apei-internal.h - ACPI Platform Error Interface internal
+ * definations.
+ */
+
+#ifndef APEI_INTERNAL_H
+#define APEI_INTERNAL_H
+
+#include <linux/cper.h>
+
+struct apei_exec_context;
+
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+
+#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
+
+struct apei_exec_ins_type {
+ u32 flags;
+ apei_exec_ins_func_t run;
+};
+
+struct apei_exec_context {
+ u32 ip;
+ u64 value;
+ u64 var1;
+ u64 var2;
+ u64 src_base;
+ u64 dst_base;
+ struct apei_exec_ins_type *ins_table;
+ u32 instructions;
+ struct acpi_whea_header *action_table;
+ u32 entries;
+};
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries);
+
+static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
+ u64 input)
+{
+ ctx->value = input;
+}
+
+static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
+{
+ return ctx->value;
+}
+
+int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+
+/* Common instruction implementation */
+
+/* IP has been set in instruction function */
+#define APEI_EXEC_SET_IP 1
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
+
+struct apei_resources {
+ struct list_head iomem;
+ struct list_head ioport;
+};
+
+static inline void apei_resources_init(struct apei_resources *resources)
+{
+ INIT_LIST_HEAD(&resources->iomem);
+ INIT_LIST_HEAD(&resources->ioport);
+}
+
+void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2);
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc);
+void apei_resources_release(struct apei_resources *resources);
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources);
+
+struct dentry;
+struct dentry *apei_get_debugfs_dir(void);
+
+#define apei_estatus_for_each_section(estatus, section) \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
+ (void *)section - (void *)estatus < estatus->data_length; \
+ section = (void *)(section+1) + section->error_data_length)
+
+static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->raw_data_length)
+ return estatus->raw_data_offset + \
+ estatus->raw_data_length;
+ else
+ return sizeof(*estatus) + estatus->data_length;
+}
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 000000000000..f4cf2fc4c8c1
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CPER is the format used to describe platform hardware error by
+ * various APEI tables, such as ERST, BERT and HEST etc.
+ *
+ * For more information about CPER, please refer to Appendix N of UEFI
+ * Specification version 2.3.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/acpi.h>
+
+/*
+ * CPER record ID need to be unique even after reboot, because record
+ * ID is used as index for ERST storage, while CPER records from
+ * multiple boot may co-exist in ERST.
+ */
+u64 cper_next_record_id(void)
+{
+ static atomic64_t seq;
+
+ if (!atomic64_read(&seq))
+ atomic64_set(&seq, ((u64)get_seconds()) << 32);
+
+ return atomic64_inc_return(&seq);
+}
+EXPORT_SYMBOL_GPL(cper_next_record_id);
+
+int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
+{
+ if (estatus->data_length &&
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
+ return -EINVAL;
+ if (estatus->raw_data_length &&
+ estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check_header);
+
+int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
+{
+ struct acpi_hest_generic_data *gdata;
+ unsigned int data_len, gedata_len;
+ int rc;
+
+ rc = apei_estatus_check_header(estatus);
+ if (rc)
+ return rc;
+ data_len = estatus->data_length;
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+ while (data_len > sizeof(*gdata)) {
+ gedata_len = gdata->error_data_length;
+ if (gedata_len > data_len - sizeof(*gdata))
+ return -EINVAL;
+ data_len -= gedata_len + sizeof(*gdata);
+ }
+ if (data_len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 000000000000..465c885938ee
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,548 @@
+/*
+ * APEI Error INJection support
+ *
+ * EINJ provides a hardware error injection mechanism, this is useful
+ * for debugging and testing of other APEI and RAS features.
+ *
+ * For more information about EINJ, please refer to ACPI Specification
+ * version 4.0, section 17.5.
+ *
+ * Copyright 2009-2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <acpi/acpi.h>
+
+#include "apei-internal.h"
+
+#define EINJ_PFX "EINJ: "
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+
+/*
+ * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
+ * EINJ table through an unpublished extension. Use with caution as
+ * most will ignore the parameter and make their own choice of address
+ * for error injection.
+ */
+struct einj_parameter {
+ u64 type;
+ u64 reserved1;
+ u64 reserved2;
+ u64 param1;
+ u64 param2;
+};
+
+#define EINJ_OP_BUSY 0x1
+#define EINJ_STATUS_SUCCESS 0x0
+#define EINJ_STATUS_FAIL 0x1
+#define EINJ_STATUS_INVAL 0x2
+
+#define EINJ_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_einj)))
+
+static struct acpi_table_einj *einj_tab;
+
+static struct apei_resources einj_resources;
+
+static struct apei_exec_ins_type einj_ins_type[] = {
+ [ACPI_EINJ_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_EINJ_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_EINJ_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_EINJ_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+};
+
+/*
+ * Prevent EINJ interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ */
+static DEFINE_MUTEX(einj_mutex);
+
+static struct einj_parameter *einj_param;
+
+static void einj_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
+ EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
+}
+
+static int __einj_get_available_error_type(u32 *type)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ *type = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/* Get error injection capabilities of the platform */
+static int einj_get_available_error_type(u32 *type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_get_available_error_type(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static int einj_timedout(u64 *t)
+{
+ if ((s64)*t < SPIN_UNIT) {
+ pr_warning(FW_WARN EINJ_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= SPIN_UNIT;
+ ndelay(SPIN_UNIT);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static u64 einj_get_parameter_address(void)
+{
+ int i;
+ u64 paddr = 0;
+ struct acpi_whea_header *entry;
+
+ entry = EINJ_TAB_ENTRY(einj_tab);
+ for (i = 0; i < einj_tab->entries; i++) {
+ if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ memcpy(&paddr, &entry->register_region.address,
+ sizeof(paddr));
+ entry++;
+ }
+
+ return paddr;
+}
+
+/* do sanity check to trigger table */
+static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
+{
+ if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
+ return -EINVAL;
+ if (trigger_tab->table_size > PAGE_SIZE ||
+ trigger_tab->table_size <= trigger_tab->header_size)
+ return -EINVAL;
+ if (trigger_tab->entry_count !=
+ (trigger_tab->table_size - trigger_tab->header_size) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Execute instructions in trigger error action table */
+static int __einj_error_trigger(u64 trigger_paddr)
+{
+ struct acpi_einj_trigger *trigger_tab = NULL;
+ struct apei_exec_context trigger_ctx;
+ struct apei_resources trigger_resources;
+ struct acpi_whea_header *trigger_entry;
+ struct resource *r;
+ u32 table_size;
+ int rc = -EIO;
+
+ r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+ "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ (unsigned long long)trigger_paddr,
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ goto out;
+ }
+ trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_header;
+ }
+ rc = einj_check_trigger_header(trigger_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX
+ "The trigger error action table is invalid\n");
+ goto out_rel_header;
+ }
+ rc = -EIO;
+ table_size = trigger_tab->table_size;
+ r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_err(EINJ_PFX
+"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size);
+ goto out_rel_header;
+ }
+ iounmap(trigger_tab);
+ trigger_tab = ioremap_cache(trigger_paddr, table_size);
+ if (!trigger_tab) {
+ pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_entry;
+ }
+ trigger_entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ apei_resources_init(&trigger_resources);
+ apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
+ ARRAY_SIZE(einj_ins_type),
+ trigger_entry, trigger_tab->entry_count);
+ rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources, &einj_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
+ if (rc)
+ goto out_fini;
+ rc = apei_exec_pre_map_gars(&trigger_ctx);
+ if (rc)
+ goto out_release;
+
+ rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
+
+ apei_exec_post_unmap_gars(&trigger_ctx);
+out_release:
+ apei_resources_release(&trigger_resources);
+out_fini:
+ apei_resources_fini(&trigger_resources);
+out_rel_entry:
+ release_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab));
+out_rel_header:
+ release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+out:
+ if (trigger_tab)
+ iounmap(trigger_tab);
+
+ return rc;
+}
+
+static int __einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ struct apei_exec_context ctx;
+ u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, type);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ writeq(param1, &einj_param->param1);
+ writeq(param2, &einj_param->param2);
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!(val & EINJ_OP_BUSY))
+ break;
+ if (einj_timedout(&timeout))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (val != EINJ_STATUS_SUCCESS)
+ return -EBUSY;
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
+ if (rc)
+ return rc;
+ trigger_paddr = apei_exec_ctx_get_output(&ctx);
+ rc = __einj_error_trigger(trigger_paddr);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+
+ return rc;
+}
+
+/* Inject the specified hardware error */
+static int einj_error_inject(u32 type, u64 param1, u64 param2)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_error_inject(type, param1, param2);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static u32 error_type;
+static u64 error_param1;
+static u64 error_param2;
+static struct dentry *einj_debug_dir;
+
+static int available_error_type_show(struct seq_file *m, void *v)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (available_error_type & 0x0001)
+ seq_printf(m, "0x00000001\tProcessor Correctable\n");
+ if (available_error_type & 0x0002)
+ seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0004)
+ seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
+ if (available_error_type & 0x0008)
+ seq_printf(m, "0x00000008\tMemory Correctable\n");
+ if (available_error_type & 0x0010)
+ seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0020)
+ seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
+ if (available_error_type & 0x0040)
+ seq_printf(m, "0x00000040\tPCI Express Correctable\n");
+ if (available_error_type & 0x0080)
+ seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0100)
+ seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
+ if (available_error_type & 0x0200)
+ seq_printf(m, "0x00000200\tPlatform Correctable\n");
+ if (available_error_type & 0x0400)
+ seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0800)
+ seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
+
+ return 0;
+}
+
+static int available_error_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, available_error_type_show, NULL);
+}
+
+static const struct file_operations available_error_type_fops = {
+ .open = available_error_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int error_type_get(void *data, u64 *val)
+{
+ *val = error_type;
+
+ return 0;
+}
+
+static int error_type_set(void *data, u64 val)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ /* Only one error type can be specified */
+ if (val & (val - 1))
+ return -EINVAL;
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ error_type = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
+ error_type_set, "0x%llx\n");
+
+static int error_inject_set(void *data, u64 val)
+{
+ if (!error_type)
+ return -EINVAL;
+
+ return einj_error_inject(error_type, error_param1, error_param2);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
+ error_inject_set, "%llu\n");
+
+static int einj_check_table(struct acpi_table_einj *einj_tab)
+{
+ if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->header.length < sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->entries !=
+ (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init einj_init(void)
+{
+ int rc;
+ u64 param_paddr;
+ acpi_status status;
+ struct dentry *fentry;
+ struct apei_exec_context ctx;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table(ACPI_SIG_EINJ, 0,
+ (struct acpi_table_header **)&einj_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(EINJ_PFX "Table is not found!\n");
+ return -ENODEV;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
+ return -EINVAL;
+ }
+
+ rc = einj_check_table(einj_tab);
+ if (rc) {
+ pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
+ return -EINVAL;
+ }
+
+ rc = -ENOMEM;
+ einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
+ if (!einj_debug_dir)
+ goto err_cleanup;
+ fentry = debugfs_create_file("available_error_type", S_IRUSR,
+ einj_debug_dir, NULL,
+ &available_error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
+ einj_debug_dir, NULL, &error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_inject", S_IWUSR,
+ einj_debug_dir, NULL, &error_inject_fops);
+ if (!fentry)
+ goto err_cleanup;
+
+ apei_resources_init(&einj_resources);
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &einj_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&einj_resources, "APEI EINJ");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ }
+
+ pr_info(EINJ_PFX "Error INJection is initialized.\n");
+
+ return 0;
+
+err_unmap:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&einj_resources);
+err_fini:
+ apei_resources_fini(&einj_resources);
+err_cleanup:
+ debugfs_remove_recursive(einj_debug_dir);
+
+ return rc;
+}
+
+static void __exit einj_exit(void)
+{
+ struct apei_exec_context ctx;
+
+ if (einj_param)
+ iounmap(einj_param);
+ einj_exec_ctx_init(&ctx);
+ apei_exec_post_unmap_gars(&ctx);
+ apei_resources_release(&einj_resources);
+ apei_resources_fini(&einj_resources);
+ debugfs_remove_recursive(einj_debug_dir);
+}
+
+module_init(einj_init);
+module_exit(einj_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Error INJection support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 000000000000..2ebc39115507
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,855 @@
+/*
+ * APEI Error Record Serialization Table support
+ *
+ * ERST is a way provided by APEI to save and retrieve hardware error
+ * infomation to and from a persistent store.
+ *
+ * For more information about ERST, please refer to ACPI Specification
+ * version 4.0, section 17.4.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/uaccess.h>
+#include <linux/cper.h>
+#include <linux/nmi.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define ERST_PFX "ERST: "
+
+/* ERST command status */
+#define ERST_STATUS_SUCCESS 0x0
+#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
+#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
+#define ERST_STATUS_FAILED 0x3
+#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
+#define ERST_STATUS_RECORD_NOT_FOUND 0x5
+
+#define ERST_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_erst)))
+
+#define SPIN_UNIT 100 /* 100ns */
+/* Firmware should respond within 1 miliseconds */
+#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+#define FIRMWARE_MAX_STALL 50 /* 50us */
+
+int erst_disable;
+EXPORT_SYMBOL_GPL(erst_disable);
+
+static struct acpi_table_erst *erst_tab;
+
+/* ERST Error Log Address Range atrributes */
+#define ERST_RANGE_RESERVED 0x0001
+#define ERST_RANGE_NVRAM 0x0002
+#define ERST_RANGE_SLOW 0x0004
+
+/*
+ * ERST Error Log Address Range, used as buffer for reading/writing
+ * error records.
+ */
+static struct erst_erange {
+ u64 base;
+ u64 size;
+ void __iomem *vaddr;
+ u32 attr;
+} erst_erange;
+
+/*
+ * Prevent ERST interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ *
+ * It is used to provide exclusive accessing for ERST Error Log
+ * Address Range too.
+ */
+static DEFINE_SPINLOCK(erst_lock);
+
+static inline int erst_errno(int command_status)
+{
+ switch (command_status) {
+ case ERST_STATUS_SUCCESS:
+ return 0;
+ case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
+ return -ENODEV;
+ case ERST_STATUS_NOT_ENOUGH_SPACE:
+ return -ENOSPC;
+ case ERST_STATUS_RECORD_STORE_EMPTY:
+ case ERST_STATUS_RECORD_NOT_FOUND:
+ return -ENOENT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int erst_timedout(u64 *t, u64 spin_unit)
+{
+ if ((s64)*t < spin_unit) {
+ pr_warning(FW_WARN ERST_PFX
+ "Firmware does not respond in time\n");
+ return 1;
+ }
+ *t -= spin_unit;
+ ndelay(spin_unit);
+ touch_nmi_watchdog();
+ return 0;
+}
+
+static int erst_exec_load_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var1);
+}
+
+static int erst_exec_load_var2(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->var2);
+}
+
+static int erst_exec_store_var1(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->var1);
+}
+
+static int erst_exec_add(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 += ctx->var2;
+ return 0;
+}
+
+static int erst_exec_subtract(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->var1 -= ctx->var2;
+ return 0;
+}
+
+static int erst_exec_add_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val += ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_subtract_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ val -= ctx->value;
+ rc = __apei_exec_write_register(entry, val);
+ return rc;
+}
+
+static int erst_exec_stall(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ u64 stall_time;
+
+ if (ctx->value > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall instruction: %llx.\n",
+ ctx->value);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->value;
+ udelay(stall_time);
+ return 0;
+}
+
+static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 stall_time;
+
+ if (ctx->var1 > FIRMWARE_MAX_STALL) {
+ if (!in_nmi())
+ pr_warning(FW_WARN ERST_PFX
+ "Too long stall time for stall while true instruction: %llx.\n",
+ ctx->var1);
+ stall_time = FIRMWARE_MAX_STALL;
+ } else
+ stall_time = ctx->var1;
+
+ for (;;) {
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val != ctx->value)
+ break;
+ if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int erst_exec_skip_next_instruction_if_true(
+ struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ if (val == ctx->value) {
+ ctx->ip += 2;
+ return APEI_EXEC_SET_IP;
+ }
+
+ return 0;
+}
+
+static int erst_exec_goto(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ ctx->ip = ctx->value;
+ return APEI_EXEC_SET_IP;
+}
+
+static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->src_base);
+}
+
+static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_read_register(entry, &ctx->dst_base);
+}
+
+static int erst_exec_move_data(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 offset;
+
+ rc = __apei_exec_read_register(entry, &offset);
+ if (rc)
+ return rc;
+ memmove((void *)ctx->dst_base + offset,
+ (void *)ctx->src_base + offset,
+ ctx->var2);
+
+ return 0;
+}
+
+static struct apei_exec_ins_type erst_ins_type[] = {
+ [ACPI_ERST_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_ERST_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_ERST_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_ERST_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_ERST_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+ [ACPI_ERST_LOAD_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var1,
+ },
+ [ACPI_ERST_LOAD_VAR2] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_load_var2,
+ },
+ [ACPI_ERST_STORE_VAR1] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_store_var1,
+ },
+ [ACPI_ERST_ADD] = {
+ .flags = 0,
+ .run = erst_exec_add,
+ },
+ [ACPI_ERST_SUBTRACT] = {
+ .flags = 0,
+ .run = erst_exec_subtract,
+ },
+ [ACPI_ERST_ADD_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_add_value,
+ },
+ [ACPI_ERST_SUBTRACT_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_subtract_value,
+ },
+ [ACPI_ERST_STALL] = {
+ .flags = 0,
+ .run = erst_exec_stall,
+ },
+ [ACPI_ERST_STALL_WHILE_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_stall_while_true,
+ },
+ [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_skip_next_instruction_if_true,
+ },
+ [ACPI_ERST_GOTO] = {
+ .flags = 0,
+ .run = erst_exec_goto,
+ },
+ [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_src_address_base,
+ },
+ [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_set_dst_address_base,
+ },
+ [ACPI_ERST_MOVE_DATA] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = erst_exec_move_data,
+ },
+};
+
+static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
+ ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
+}
+
+static int erst_get_erange(struct erst_erange *range)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
+ if (rc)
+ return rc;
+ range->base = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
+ if (rc)
+ return rc;
+ range->size = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
+ if (rc)
+ return rc;
+ range->attr = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+static ssize_t __erst_get_record_count(void)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
+ if (rc)
+ return rc;
+ return apei_exec_ctx_get_output(&ctx);
+}
+
+ssize_t erst_get_record_count(void)
+{
+ ssize_t count;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ count = __erst_get_record_count();
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(erst_get_record_count);
+
+static int __erst_get_next_record_id(u64 *record_id)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
+ if (rc)
+ return rc;
+ *record_id = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/*
+ * Get the record ID of an existing error record on the persistent
+ * storage. If there is no error record on the persistent storage, the
+ * returned record_id is APEI_ERST_INVALID_RECORD_ID.
+ */
+int erst_get_next_record_id(u64 *record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_get_next_record_id);
+
+static int __erst_write_to_storage(u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_read_from_storage(u64 record_id, u64 offset)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, offset);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ };
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+static int __erst_clear_from_storage(u64 record_id)
+{
+ struct apei_exec_context ctx;
+ u64 timeout = FIRMWARE_TIMEOUT;
+ u64 val;
+ int rc;
+
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, record_id);
+ rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ for (;;) {
+ rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (!val)
+ break;
+ if (erst_timedout(&timeout, SPIN_UNIT))
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ if (rc)
+ return rc;
+
+ return erst_errno(val);
+}
+
+/* NVRAM ERST Error Log Address Range is not supported yet */
+static void pr_unimpl_nvram(void)
+{
+ if (printk_ratelimit())
+ pr_warning(ERST_PFX
+ "NVRAM ERST Log Address Range is not implemented yet\n");
+}
+
+static int __erst_write_to_nvram(const struct cper_record_header *record)
+{
+ /* do not print message, because printk is not safe for NMI */
+ return -ENOSYS;
+}
+
+static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+static int __erst_clear_from_nvram(u64 record_id)
+{
+ pr_unimpl_nvram();
+ return -ENOSYS;
+}
+
+int erst_write(const struct cper_record_header *record)
+{
+ int rc;
+ unsigned long flags;
+ struct cper_record_header *rcd_erange;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
+ return -EINVAL;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM) {
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ rc = __erst_write_to_nvram(record);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+
+ if (record->record_length > erst_erange.size)
+ return -EINVAL;
+
+ if (!spin_trylock_irqsave(&erst_lock, flags))
+ return -EBUSY;
+ memcpy(erst_erange.vaddr, record, record->record_length);
+ rcd_erange = erst_erange.vaddr;
+ /* signature for serialization system */
+ memcpy(&rcd_erange->persistence_information, "ER", 2);
+
+ rc = __erst_write_to_storage(0);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_write);
+
+static int __erst_read_to_erange(u64 record_id, u64 *offset)
+{
+ int rc;
+
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ return __erst_read_to_erange_from_nvram(
+ record_id, offset);
+
+ rc = __erst_read_from_storage(record_id, 0);
+ if (rc)
+ return rc;
+ *offset = 0;
+
+ return 0;
+}
+
+static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ int rc;
+ u64 offset, len = 0;
+ struct cper_record_header *rcd_tmp;
+
+ rc = __erst_read_to_erange(record_id, &offset);
+ if (rc)
+ return rc;
+ rcd_tmp = erst_erange.vaddr + offset;
+ len = rcd_tmp->record_length;
+ if (len <= buflen)
+ memcpy(record, rcd_tmp, len);
+
+ return len;
+}
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read(u64 record_id, struct cper_record_header *record,
+ size_t buflen)
+{
+ ssize_t len;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read);
+
+/*
+ * If return value > buflen, the buffer size is not big enough,
+ * else if return value = 0, there is no more record to read,
+ * else if return value < 0, something goes wrong,
+ * else everything is OK, and return value is record length
+ */
+ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
+{
+ int rc;
+ ssize_t len;
+ unsigned long flags;
+ u64 record_id;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ rc = __erst_get_next_record_id(&record_id);
+ if (rc) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return rc;
+ }
+ /* no more record */
+ if (record_id == APEI_ERST_INVALID_RECORD_ID) {
+ spin_unlock_irqrestore(&erst_lock, flags);
+ return 0;
+ }
+
+ len = __erst_read(record_id, record, buflen);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(erst_read_next);
+
+int erst_clear(u64 record_id)
+{
+ int rc;
+ unsigned long flags;
+
+ if (erst_disable)
+ return -ENODEV;
+
+ spin_lock_irqsave(&erst_lock, flags);
+ if (erst_erange.attr & ERST_RANGE_NVRAM)
+ rc = __erst_clear_from_nvram(record_id);
+ else
+ rc = __erst_clear_from_storage(record_id);
+ spin_unlock_irqrestore(&erst_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(erst_clear);
+
+static int __init setup_erst_disable(char *str)
+{
+ erst_disable = 1;
+ return 0;
+}
+
+__setup("erst_disable", setup_erst_disable);
+
+static int erst_check_table(struct acpi_table_erst *erst_tab)
+{
+ if (erst_tab->header_length != sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->header.length < sizeof(struct acpi_table_erst))
+ return -EINVAL;
+ if (erst_tab->entries !=
+ (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
+ sizeof(struct acpi_erst_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init erst_init(void)
+{
+ int rc = 0;
+ acpi_status status;
+ struct apei_exec_context ctx;
+ struct apei_resources erst_resources;
+ struct resource *r;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (erst_disable) {
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_ERST, 0,
+ (struct acpi_table_header **)&erst_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_err(ERST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(ERST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = erst_check_table(erst_tab);
+ if (rc) {
+ pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
+ goto err;
+ }
+
+ apei_resources_init(&erst_resources);
+ erst_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &erst_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&erst_resources, "APEI ERST");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+ rc = erst_get_erange(&erst_erange);
+ if (rc) {
+ if (rc == -ENODEV)
+ pr_info(ERST_PFX
+ "The corresponding hardware device or firmware implementation "
+ "is not available.\n");
+ else
+ pr_err(ERST_PFX
+ "Failed to get Error Log Address Range.\n");
+ goto err_unmap_reg;
+ }
+
+ r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
+ if (!r) {
+ pr_err(ERST_PFX
+ "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
+ (unsigned long long)erst_erange.base,
+ (unsigned long long)erst_erange.base + erst_erange.size);
+ rc = -EIO;
+ goto err_unmap_reg;
+ }
+ rc = -ENOMEM;
+ erst_erange.vaddr = ioremap_cache(erst_erange.base,
+ erst_erange.size);
+ if (!erst_erange.vaddr)
+ goto err_release_erange;
+
+ pr_info(ERST_PFX
+ "Error Record Serialization Table (ERST) support is initialized.\n");
+
+ return 0;
+
+err_release_erange:
+ release_mem_region(erst_erange.base, erst_erange.size);
+err_unmap_reg:
+ apei_exec_post_unmap_gars(&ctx);
+err_release:
+ apei_resources_release(&erst_resources);
+err_fini:
+ apei_resources_fini(&erst_resources);
+err:
+ erst_disable = 1;
+ return rc;
+}
+
+device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 000000000000..fd0cc016a099
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,427 @@
+/*
+ * APEI Generic Hardware Error Source support
+ *
+ * Generic Hardware Error Source provides a way to report platform
+ * hardware errors (such as that from chipset). It works in so called
+ * "Firmware First" mode, that is, hardware errors are reported to
+ * firmware firstly, then reported to Linux by firmware. This way,
+ * some non-standard hardware error registers or non-standard hardware
+ * link can be checked by firmware to produce more hardware error
+ * information for Linux.
+ *
+ * For more information about Generic Hardware Error Source, please
+ * refer to ACPI Specification version 4.0, section 17.3.2.6
+ *
+ * Now, only SCI notification type and memory errors are
+ * supported. More notification type and hardware error type will be
+ * added later.
+ *
+ * Copyright 2010 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/cper.h>
+#include <linux/kdebug.h>
+#include <acpi/apei.h>
+#include <acpi/atomicio.h>
+#include <acpi/hed.h>
+#include <asm/mce.h>
+
+#include "apei-internal.h"
+
+#define GHES_PFX "GHES: "
+
+#define GHES_ESTATUS_MAX_SIZE 65536
+
+/*
+ * One struct ghes is created for each generic hardware error
+ * source.
+ *
+ * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
+ * handler. Handler for one generic hardware error source is only
+ * triggered after the previous one is done. So handler can uses
+ * struct ghes without locking.
+ *
+ * estatus: memory buffer for error status block, allocated during
+ * HEST parsing.
+ */
+#define GHES_TO_CLEAR 0x0001
+
+struct ghes {
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ struct list_head list;
+ u64 buffer_paddr;
+ unsigned long flags;
+};
+
+/*
+ * Error source lists, one list for each notification method. The
+ * members in lists are struct ghes.
+ *
+ * The list members are only added in HEST parsing and deleted during
+ * module_exit, that is, single-threaded. So no lock is needed for
+ * that.
+ *
+ * But the mutual exclusion is needed between members adding/deleting
+ * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
+ * used for that.
+ */
+static LIST_HEAD(ghes_sci);
+
+static struct ghes *ghes_new(struct acpi_hest_generic *generic)
+{
+ struct ghes *ghes;
+ unsigned int error_block_length;
+ int rc;
+
+ ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
+ if (!ghes)
+ return ERR_PTR(-ENOMEM);
+ ghes->generic = generic;
+ INIT_LIST_HEAD(&ghes->list);
+ rc = acpi_pre_map_gar(&generic->error_status_address);
+ if (rc)
+ goto err_free;
+ error_block_length = generic->error_block_length;
+ if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
+ pr_warning(FW_WARN GHES_PFX
+ "Error status block length is too long: %u for "
+ "generic hardware error source: %d.\n",
+ error_block_length, generic->header.source_id);
+ error_block_length = GHES_ESTATUS_MAX_SIZE;
+ }
+ ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
+ if (!ghes->estatus) {
+ rc = -ENOMEM;
+ goto err_unmap;
+ }
+
+ return ghes;
+
+err_unmap:
+ acpi_post_unmap_gar(&generic->error_status_address);
+err_free:
+ kfree(ghes);
+ return ERR_PTR(rc);
+}
+
+static void ghes_fini(struct ghes *ghes)
+{
+ kfree(ghes->estatus);
+ acpi_post_unmap_gar(&ghes->generic->error_status_address);
+}
+
+enum {
+ GHES_SER_NO = 0x0,
+ GHES_SER_CORRECTED = 0x1,
+ GHES_SER_RECOVERABLE = 0x2,
+ GHES_SER_PANIC = 0x3,
+};
+
+static inline int ghes_severity(int severity)
+{
+ switch (severity) {
+ case CPER_SER_INFORMATIONAL:
+ return GHES_SER_NO;
+ case CPER_SER_CORRECTED:
+ return GHES_SER_CORRECTED;
+ case CPER_SER_RECOVERABLE:
+ return GHES_SER_RECOVERABLE;
+ case CPER_SER_FATAL:
+ return GHES_SER_PANIC;
+ default:
+ /* Unkown, go panic */
+ return GHES_SER_PANIC;
+ }
+}
+
+/* SCI handler run in work queue, so ioremap can be used here */
+static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+ int from_phys)
+{
+ void *vaddr;
+
+ vaddr = ioremap_cache(paddr, len);
+ if (!vaddr)
+ return -ENOMEM;
+ if (from_phys)
+ memcpy(buffer, vaddr, len);
+ else
+ memcpy(vaddr, buffer, len);
+ iounmap(vaddr);
+
+ return 0;
+}
+
+static int ghes_read_estatus(struct ghes *ghes, int silent)
+{
+ struct acpi_hest_generic *g = ghes->generic;
+ u64 buf_paddr;
+ u32 len;
+ int rc;
+
+ rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ if (rc) {
+ if (!silent && printk_ratelimit())
+ pr_warning(FW_WARN GHES_PFX
+"Failed to read error status block address for hardware error source: %d.\n",
+ g->header.source_id);
+ return -EIO;
+ }
+ if (!buf_paddr)
+ return -ENOENT;
+
+ rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+ sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (!ghes->estatus->block_status)
+ return -ENOENT;
+
+ ghes->buffer_paddr = buf_paddr;
+ ghes->flags |= GHES_TO_CLEAR;
+
+ rc = -EIO;
+ len = apei_estatus_len(ghes->estatus);
+ if (len < sizeof(*ghes->estatus))
+ goto err_read_block;
+ if (len > ghes->generic->error_block_length)
+ goto err_read_block;
+ if (apei_estatus_check_header(ghes->estatus))
+ goto err_read_block;
+ rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
+ buf_paddr + sizeof(*ghes->estatus),
+ len - sizeof(*ghes->estatus), 1);
+ if (rc)
+ return rc;
+ if (apei_estatus_check(ghes->estatus))
+ goto err_read_block;
+ rc = 0;
+
+err_read_block:
+ if (rc && !silent)
+ pr_warning(FW_WARN GHES_PFX
+ "Failed to read error status block!\n");
+ return rc;
+}
+
+static void ghes_clear_estatus(struct ghes *ghes)
+{
+ ghes->estatus->block_status = 0;
+ if (!(ghes->flags & GHES_TO_CLEAR))
+ return;
+ ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
+ sizeof(ghes->estatus->block_status), 0);
+ ghes->flags &= ~GHES_TO_CLEAR;
+}
+
+static void ghes_do_proc(struct ghes *ghes)
+{
+ int ser, processed = 0;
+ struct acpi_hest_generic_data *gdata;
+
+ ser = ghes_severity(ghes->estatus->error_severity);
+ apei_estatus_for_each_section(ghes->estatus, gdata) {
+#ifdef CONFIG_X86_MCE
+ if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PLATFORM_MEM)) {
+ apei_mce_report_mem_error(
+ ser == GHES_SER_CORRECTED,
+ (struct cper_sec_mem_err *)(gdata+1));
+ processed = 1;
+ }
+#endif
+ }
+
+ if (!processed && printk_ratelimit())
+ pr_warning(GHES_PFX
+ "Unknown error record from generic hardware error source: %d\n",
+ ghes->generic->header.source_id);
+}
+
+static int ghes_proc(struct ghes *ghes)
+{
+ int rc;
+
+ rc = ghes_read_estatus(ghes, 0);
+ if (rc)
+ goto out;
+ ghes_do_proc(ghes);
+
+out:
+ ghes_clear_estatus(ghes);
+ return 0;
+}
+
+static int ghes_notify_sci(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct ghes *ghes;
+ int ret = NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ghes, &ghes_sci, list) {
+ if (!ghes_proc(ghes))
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block ghes_notifier_sci = {
+ .notifier_call = ghes_notify_sci,
+};
+
+static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct acpi_hest_generic *generic;
+ struct ghes *ghes = NULL;
+ int rc = 0;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
+ return 0;
+
+ generic = (struct acpi_hest_generic *)hest_hdr;
+ if (!generic->enabled)
+ return 0;
+
+ if (generic->error_block_length <
+ sizeof(struct acpi_hest_generic_status)) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid error block length: %u for generic hardware error source: %d\n",
+ generic->error_block_length,
+ generic->header.source_id);
+ goto err;
+ }
+ if (generic->records_to_preallocate == 0) {
+ pr_warning(FW_BUG GHES_PFX
+"Invalid records to preallocate: %u for generic hardware error source: %d\n",
+ generic->records_to_preallocate,
+ generic->header.source_id);
+ goto err;
+ }
+ ghes = ghes_new(generic);
+ if (IS_ERR(ghes)) {
+ rc = PTR_ERR(ghes);
+ ghes = NULL;
+ goto err;
+ }
+ switch (generic->notify.type) {
+ case ACPI_HEST_NOTIFY_POLLED:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via POLL is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_EXTERNAL:
+ case ACPI_HEST_NOTIFY_LOCAL:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via IRQ is not supported!\n",
+ generic->header.source_id);
+ break;
+ case ACPI_HEST_NOTIFY_SCI:
+ if (list_empty(&ghes_sci))
+ register_acpi_hed_notifier(&ghes_notifier_sci);
+ list_add_rcu(&ghes->list, &ghes_sci);
+ break;
+ case ACPI_HEST_NOTIFY_NMI:
+ pr_warning(GHES_PFX
+"Generic hardware error source: %d notified via NMI is not supported!\n",
+ generic->header.source_id);
+ break;
+ default:
+ pr_warning(FW_WARN GHES_PFX
+ "Unknown notification type: %u for generic hardware error source: %d\n",
+ generic->notify.type, generic->header.source_id);
+ break;
+ }
+
+ return 0;
+err:
+ if (ghes)
+ ghes_fini(ghes);
+ return rc;
+}
+
+static void ghes_cleanup(void)
+{
+ struct ghes *ghes, *nghes;
+
+ if (!list_empty(&ghes_sci))
+ unregister_acpi_hed_notifier(&ghes_notifier_sci);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
+ list_del(&ghes->list);
+ ghes_fini(ghes);
+ kfree(ghes);
+ }
+}
+
+static int __init ghes_init(void)
+{
+ int rc;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (hest_disable) {
+ pr_info(GHES_PFX "HEST is not enabled!\n");
+ return -EINVAL;
+ }
+
+ rc = apei_hest_parse(hest_ghes_parse, NULL);
+ if (rc) {
+ pr_err(GHES_PFX
+ "Error during parsing HEST generic hardware error sources.\n");
+ goto err_cleanup;
+ }
+
+ if (list_empty(&ghes_sci)) {
+ pr_info(GHES_PFX
+ "No functional generic hardware error sources.\n");
+ rc = -ENODEV;
+ goto err_cleanup;
+ }
+
+ pr_info(GHES_PFX
+ "Generic Hardware Error Source support is initialized.\n");
+
+ return 0;
+err_cleanup:
+ ghes_cleanup();
+ return rc;
+}
+
+static void __exit ghes_exit(void)
+{
+ ghes_cleanup();
+}
+
+module_init(ghes_init);
+module_exit(ghes_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 000000000000..e7f40d362cb3
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
+/*
+ * APEI Hardware Error Souce Table support
+ *
+ * HEST describes error sources in detail; communicates operational
+ * parameters (i.e. severity levels, masking bits, and threshold
+ * values) to Linux as necessary. It also allows the BIOS to report
+ * non-standard error sources to Linux (for example, chipset-specific
+ * error registers).
+ *
+ * For more information about HEST, please refer to ACPI Specification
+ * version 4.0, section 17.3.2.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/kdebug.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define HEST_PFX "HEST: "
+
+int hest_disable;
+EXPORT_SYMBOL_GPL(hest_disable);
+
+/* HEST table parsing */
+
+static struct acpi_table_hest *hest_tab;
+
+static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ return 0;
+}
+
+static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
+ [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
+ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
+ [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
+ [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
+ [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
+ [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
+ [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
+};
+
+static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
+{
+ u16 hest_type = hest_hdr->type;
+ int len;
+
+ if (hest_type >= ACPI_HEST_TYPE_RESERVED)
+ return 0;
+
+ len = hest_esrc_len_tab[hest_type];
+
+ if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
+ struct acpi_hest_ia_corrected *cmc;
+ cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
+ len = sizeof(*cmc) + cmc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
+ struct acpi_hest_ia_machine_check *mc;
+ mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
+ len = sizeof(*mc) + mc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ }
+ BUG_ON(len == -1);
+
+ return len;
+};
+
+int apei_hest_parse(apei_hest_func_t func, void *data)
+{
+ struct acpi_hest_header *hest_hdr;
+ int i, rc, len;
+
+ if (hest_disable)
+ return -EINVAL;
+
+ hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
+ for (i = 0; i < hest_tab->error_source_count; i++) {
+ len = hest_esrc_len(hest_hdr);
+ if (!len) {
+ pr_warning(FW_WARN HEST_PFX
+ "Unknown or unused hardware error source "
+ "type: %d for hardware error source: %d.\n",
+ hest_hdr->type, hest_hdr->source_id);
+ return -EINVAL;
+ }
+ if ((void *)hest_hdr + len >
+ (void *)hest_tab + hest_tab->header.length) {
+ pr_warning(FW_BUG HEST_PFX
+ "Table contents overflow for hardware error source: %d.\n",
+ hest_hdr->source_id);
+ return -EINVAL;
+ }
+
+ rc = func(hest_hdr, data);
+ if (rc)
+ return rc;
+
+ hest_hdr = (void *)hest_hdr + len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_hest_parse);
+
+static int __init setup_hest_disable(char *str)
+{
+ hest_disable = 1;
+ return 0;
+}
+
+__setup("hest_disable", setup_hest_disable);
+
+static int __init hest_init(void)
+{
+ acpi_status status;
+ int rc = -ENODEV;
+
+ if (acpi_disabled)
+ goto err;
+
+ if (hest_disable) {
+ pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
+ goto err;
+ }
+
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
+ (struct acpi_table_header **)&hest_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(HEST_PFX "Table is not found!\n");
+ goto err;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_err(HEST_PFX "Failed to get table, %s\n", msg);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = apei_hest_parse(hest_void_parse, NULL);
+ if (rc)
+ goto err;
+
+ pr_info(HEST_PFX "HEST table parsing is initialized.\n");
+
+ return 0;
+err:
+ hest_disable = 1;
+ return rc;
+}
+
+subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..814b19249616
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
+/*
+ * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
+ * accessing in atomic context.
+ *
+ * This is used for NMI handler to access IO memory area, because
+ * ioremap/iounmap can not be used in NMI handler. The IO memory area
+ * is pre-mapped in process context and accessed in NMI handler.
+ *
+ * Copyright (C) 2009-2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <acpi/atomicio.h>
+
+#define ACPI_PFX "ACPI: "
+
+static LIST_HEAD(acpi_iomaps);
+/*
+ * Used for mutual exclusion between writers of acpi_iomaps list, for
+ * synchronization between readers and writer, RCU is used.
+ */
+static DEFINE_SPINLOCK(acpi_iomaps_lock);
+
+struct acpi_iomap {
+ struct list_head list;
+ void __iomem *vaddr;
+ unsigned long size;
+ phys_addr_t paddr;
+ struct kref ref;
+};
+
+/* acpi_iomaps_lock or RCU read lock must be held before calling */
+static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ list_for_each_entry_rcu(map, &acpi_iomaps, list) {
+ if (map->paddr + map->size >= paddr + size &&
+ map->paddr <= paddr)
+ return map;
+ }
+ return NULL;
+}
+
+/*
+ * Atomic "ioremap" used by NMI handler, if the specified IO memory
+ * area is not pre-mapped, NULL will be returned.
+ *
+ * acpi_iomaps_lock or RCU read lock must be held before calling
+ */
+static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map)
+ return map->vaddr + (paddr - map->paddr);
+ else
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map) {
+ kref_get(&map->ref);
+ return map->vaddr + (paddr - map->paddr);
+ } else
+ return NULL;
+}
+
+/*
+ * Used to pre-map the specified IO memory area. First try to find
+ * whether the area is already pre-mapped, if it is, increase the
+ * reference count (in __acpi_try_ioremap) and return; otherwise, do
+ * the real ioremap, and add the mapping into acpi_iomaps list.
+ */
+static void __iomem *acpi_pre_map(phys_addr_t paddr,
+ unsigned long size)
+{
+ void __iomem *vaddr;
+ struct acpi_iomap *map;
+ unsigned long pg_sz, flags;
+ phys_addr_t pg_off;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ if (vaddr)
+ return vaddr;
+
+ pg_off = paddr & PAGE_MASK;
+ pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
+ vaddr = ioremap(pg_off, pg_sz);
+ if (!vaddr)
+ return NULL;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto err_unmap;
+ INIT_LIST_HEAD(&map->list);
+ map->paddr = pg_off;
+ map->size = pg_sz;
+ map->vaddr = vaddr;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ if (vaddr) {
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ iounmap(map->vaddr);
+ kfree(map);
+ return vaddr;
+ }
+ list_add_tail_rcu(&map->list, &acpi_iomaps);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ return vaddr + (paddr - pg_off);
+err_unmap:
+ iounmap(vaddr);
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_iomap *map;
+
+ map = container_of(ref, struct acpi_iomap, ref);
+ list_del_rcu(&map->list);
+}
+
+/*
+ * Used to post-unmap the specified IO memory area. The iounmap is
+ * done only if the reference count goes zero.
+ */
+static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
+{
+ struct acpi_iomap *map;
+ unsigned long flags;
+ int del;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ map = __acpi_find_iomap(paddr, size);
+ BUG_ON(!map);
+ del = kref_put(&map->ref, __acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->vaddr);
+ kfree(map);
+}
+
+/* In NMI handler, should set silent = 1 */
+static int acpi_check_gar(struct acpi_generic_address *reg,
+ u64 *paddr, int silent)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (!silent)
+ pr_warning(FW_BUG ACPI_PFX
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Pre-map, working on GAR */
+int acpi_pre_map_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ void __iomem *vaddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
+ if (!vaddr)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
+
+/* Post-unmap, working on GAR */
+int acpi_post_unmap_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ acpi_post_unmap(paddr, reg->bit_width / 8);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+
+/*
+ * Can be used in atomic (including NMI) or process context. RCU read
+ * lock can only be released after the IO memory area accessing.
+ */
+static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ *val = readb(addr);
+ break;
+ case 16:
+ *val = readw(addr);
+ break;
+ case 32:
+ *val = readl(addr);
+ break;
+ case 64:
+ *val = readq(addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ writeb(val, addr);
+ break;
+ case 16:
+ writew(val, addr);
+ break;
+ case 32:
+ writel(val, addr);
+ break;
+ case 64:
+ writeq(val, addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* GAR accessing in atomic (including NMI) or process context */
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ *val = 0;
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_read_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_read);
+
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_write_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_write_port(paddr, val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 743576bf1bd7..c1d23cd71652 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -69,6 +69,44 @@ static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
};
+#ifdef CONFIG_X86
+static int set_copy_dsdt(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE "%s detected - "
+ "force copy of DSDT to local memory\n", id->ident);
+ acpi_gbl_copy_dsdt_locally = 1;
+ return 0;
+}
+
+static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ /*
+ * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=14679
+ */
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite A505",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
+ },
+ },
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite L505D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+ },
+ },
+ {}
+};
+#else
+static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ {}
+};
+#endif
+
/* --------------------------------------------------------------------------
Device Management
-------------------------------------------------------------------------- */
@@ -363,11 +401,6 @@ static void acpi_print_osc_error(acpi_handle handle,
printk("\n");
}
-static u8 hex_val(unsigned char c)
-{
- return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
-}
-
static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
{
int i;
@@ -384,8 +417,8 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
return AE_BAD_PARAMETER;
}
for (i = 0; i < 16; i++) {
- uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
- uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
+ uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
+ uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
}
return AE_OK;
}
@@ -813,6 +846,12 @@ void __init acpi_early_init(void)
acpi_gbl_permanent_mmap = 1;
+ /*
+ * If the machine falls into the DMI check table,
+ * DSDT will be copied to memory
+ */
+ dmi_check_system(dsdt_dmi_table);
+
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f2234db85da0..e61d4f8e62a5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1027,10 +1027,9 @@ int __init acpi_ec_ecdt_probe(void)
/* Don't trust ECDT, which comes from ASUSTek */
if (!EC_FLAGS_VALIDATE_ECDT)
goto install;
- saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
+ saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
if (!saved_ec)
return -ENOMEM;
- memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
/* fall through */
}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 000000000000..d0c1967f7597
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,112 @@
+/*
+ * ACPI Hardware Error Device (PNP0C33) Driver
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * ACPI Hardware Error Device is used to report some hardware errors
+ * notified via SCI, mainly the corrected errors.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/hed.h>
+
+static struct acpi_device_id acpi_hed_ids[] = {
+ {"PNP0C33", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
+
+static acpi_handle hed_handle;
+
+static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
+
+int register_acpi_hed_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
+
+void unregister_acpi_hed_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
+
+/*
+ * SCI to report hardware error is forwarded to the listeners of HED,
+ * it is used by HEST Generic Hardware Error Source with notify type
+ * SCI.
+ */
+static void acpi_hed_notify(struct acpi_device *device, u32 event)
+{
+ blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
+}
+
+static int __devinit acpi_hed_add(struct acpi_device *device)
+{
+ /* Only one hardware error device */
+ if (hed_handle)
+ return -EINVAL;
+ hed_handle = device->handle;
+ return 0;
+}
+
+static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
+{
+ hed_handle = NULL;
+ return 0;
+}
+
+static struct acpi_driver acpi_hed_driver = {
+ .name = "hardware_error_device",
+ .class = "hardware_error",
+ .ids = acpi_hed_ids,
+ .ops = {
+ .add = acpi_hed_add,
+ .remove = acpi_hed_remove,
+ .notify = acpi_hed_notify,
+ },
+};
+
+static int __init acpi_hed_init(void)
+{
+ if (acpi_disabled)
+ return -ENODEV;
+
+ if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_hed_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_hed_driver);
+}
+
+module_init(acpi_hed_init);
+module_exit(acpi_hed_exit);
+
+ACPI_MODULE_NAME("hed");
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 1c527a192872..000000000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,139 +0,0 @@
-#include <linux/acpi.h>
-#include <linux/pci.h>
-
-#define PREFIX "ACPI: "
-
-static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
-{
- return sizeof(*p) +
- (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
-}
-
-static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
-{
- return sizeof(*p);
-}
-
-static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
-{
- return (0 == pci_domain_nr(pci->bus) &&
- p->bus == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn));
-}
-
-static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
-{
- struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
- unsigned long rc=0;
- u8 pcie_type = 0;
- u8 bridge = 0;
- switch (type) {
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- rc = sizeof(struct acpi_hest_aer_root);
- pcie_type = PCI_EXP_TYPE_ROOT_PORT;
- break;
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- rc = sizeof(struct acpi_hest_aer);
- pcie_type = PCI_EXP_TYPE_ENDPOINT;
- break;
- case ACPI_HEST_TYPE_AER_BRIDGE:
- rc = sizeof(struct acpi_hest_aer_bridge);
- if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
- bridge = 1;
- break;
- }
-
- if (p->flags & ACPI_HEST_GLOBAL) {
- if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- }
- else
- if (hest_match_pci(p, pci))
- *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
- return rc;
-}
-
-static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
-{
- struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
- void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
- struct acpi_hest_header *hdr = p;
-
- int i;
- int firmware_first = 0;
- static unsigned char printed_unused = 0;
- static unsigned char printed_reserved = 0;
-
- for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
- switch (hdr->type) {
- case ACPI_HEST_TYPE_IA32_CHECK:
- p += parse_acpi_hest_ia_machine_check(p);
- break;
- case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
- p += parse_acpi_hest_ia_corrected(p);
- break;
- case ACPI_HEST_TYPE_IA32_NMI:
- p += parse_acpi_hest_ia_nmi(p);
- break;
- /* These three should never appear */
- case ACPI_HEST_TYPE_NOT_USED3:
- case ACPI_HEST_TYPE_NOT_USED4:
- case ACPI_HEST_TYPE_NOT_USED5:
- if (!printed_unused) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
- printed_unused = 1;
- }
- break;
- case ACPI_HEST_TYPE_AER_ROOT_PORT:
- case ACPI_HEST_TYPE_AER_ENDPOINT:
- case ACPI_HEST_TYPE_AER_BRIDGE:
- p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
- break;
- case ACPI_HEST_TYPE_GENERIC_ERROR:
- p += parse_acpi_hest_generic(p);
- break;
- /* These should never appear either */
- case ACPI_HEST_TYPE_RESERVED:
- default:
- if (!printed_reserved) {
- printk(KERN_DEBUG PREFIX
- "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
- printed_reserved = 1;
- }
- break;
- }
- }
- return firmware_first;
-}
-
-int acpi_hest_firmware_first_pci(struct pci_dev *pci)
-{
- acpi_status status = AE_NOT_FOUND;
- struct acpi_table_header *hest = NULL;
-
- if (acpi_disabled)
- return 0;
-
- status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
-
- if (ACPI_SUCCESS(status)) {
- if (acpi_hest_firmware_first(hest, pci)) {
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7594f65800cf..78418ce4fc78 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1207,6 +1207,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
EXPORT_SYMBOL(acpi_check_mem_region);
/*
+ * Let drivers know whether the resource checks are effective
+ */
+int acpi_resources_are_enforced(void)
+{
+ return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
+}
+EXPORT_SYMBOL(acpi_resources_are_enforced);
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
@@ -1406,7 +1415,7 @@ acpi_os_invalidate_address(
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res.start = address;
res.end = address + length - 1;
@@ -1458,7 +1467,7 @@ acpi_os_validate_address (
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
if (!res)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b0a71ecee682..e4804fb05e23 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,11 +401,13 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* driver reported one, then use it. Exit in any case.
*/
if (gsi < 0) {
+ u32 dev_gsi;
dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
/* Interrupt Line values above 0xF are forbidden */
- if (dev->irq > 0 && (dev->irq <= 0xF)) {
- printk(" - using IRQ %d\n", dev->irq);
- acpi_register_gsi(&dev->dev, dev->irq,
+ if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+ printk(" - using ISA IRQ %d\n", dev->irq);
+ acpi_register_gsi(&dev->dev, dev_gsi,
ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
return 0;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33f2a09..4eac59393edc 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node)
- if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ if ((root->segment == (u16) seg) &&
+ (root->secondary.start == (u16) bus))
return root->device->handle;
return NULL;
}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
- int *busnr = data;
+ struct resource *res = data;
struct acpi_resource_address64 address;
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
acpi_resource_to_address64(resource, &address);
if ((address.address_length > 0) &&
- (address.resource_type == ACPI_BUS_NUMBER_RANGE))
- *busnr = address.minimum;
+ (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
+ res->start = address.minimum;
+ res->end = address.minimum + address.address_length - 1;
+ }
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
- unsigned long long *bus)
+ struct resource *res)
{
acpi_status status;
- int busnum;
- busnum = -1;
+ res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, &busnum);
+ get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
- /* Check if we really get a bus number from _CRS */
- if (busnum == -1)
+ if (res->start == -1)
return AE_ERROR;
- *bus = busnum;
return AE_OK;
}
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
struct acpi_device *child;
u32 flags, base_flags;
+ root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ if (!root)
+ return -ENOMEM;
+
segment = 0;
status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
- return -ENODEV;
+ result = -ENODEV;
+ goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
- bus = 0;
- status = try_get_root_bridge_busnr(device->handle, &bus);
+ root->secondary.flags = IORESOURCE_BUS;
+ status = try_get_root_bridge_busnr(device->handle, &root->secondary);
if (ACPI_FAILURE(status)) {
+ /*
+ * We need both the start and end of the downstream bus range
+ * to interpret _CBA (MMCONFIG base address), so it really is
+ * supposed to be in _CRS. If we don't find it there, all we
+ * can do is assume [_BBN-0xFF] or [0-0xFF].
+ */
+ root->secondary.end = 0xFF;
+ printk(KERN_WARNING FW_BUG PREFIX
+ "no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_ERR PREFIX
- "no bus number in _CRS and can't evaluate _BBN\n");
- return -ENODEV;
+ if (ACPI_SUCCESS(status))
+ root->secondary.start = bus;
+ else if (status == AE_NOT_FOUND)
+ root->secondary.start = 0;
+ else {
+ printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
+ result = -ENODEV;
+ goto end;
}
}
- root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
- if (!root)
- return -ENOMEM;
-
INIT_LIST_HEAD(&root->node);
root->device = device;
root->segment = segment & 0xFFFF;
- root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
/* TBD: Locking */
list_add_tail(&root->node, &acpi_pci_roots);
- printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
+ printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->segment, root->bus_nr);
+ root->segment, &root->secondary);
/*
* Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, segment, bus);
+ root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->segment, root->bus_nr);
+ root->segment, (unsigned int)root->secondary.start);
result = -ENODEV;
goto end;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index ddc76787b842..f74d3b31e5c9 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -172,7 +172,6 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
- /* */
for (i = 0; i < list->count; i++) {
/*
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 5675d9747e87..b1034a9ada4e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
acpi_processor_get_limit_info(pr);
- acpi_processor_power_init(pr, device);
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ acpi_processor_power_init(pr, device);
pr->cdev = thermal_cooling_device_register("Processor", device,
&processor_cooling_ops);
@@ -920,9 +921,14 @@ static int __init acpi_processor_init(void)
if (!acpi_processor_dir)
return -ENOMEM;
#endif
- result = cpuidle_register_driver(&acpi_idle_driver);
- if (result < 0)
- goto out_proc;
+
+ if (!cpuidle_register_driver(&acpi_idle_driver)) {
+ printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
+ acpi_idle_driver.name);
+ } else {
+ printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
+ cpuidle_get_driver()->name);
+ }
result = acpi_bus_register_driver(&acpi_processor_driver);
if (result < 0)
@@ -941,7 +947,6 @@ static int __init acpi_processor_init(void)
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
-out_proc:
#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e9..2e8c27d48f2b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -698,7 +698,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
"max_cstate: C%d\n"
"maximum allowed latency: %d usec\n",
pr->power.state ? pr->power.state - pr->power.states : 0,
- max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
+ max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
seq_puts(seq, "states:\n");
@@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
break;
}
- if (pr->power.states[i].promotion.state)
- seq_printf(seq, "promotion[C%zd] ",
- (pr->power.states[i].promotion.state -
- pr->power.states));
- else
- seq_puts(seq, "promotion[--] ");
+ seq_puts(seq, "promotion[--] ");
- if (pr->power.states[i].demotion.state)
- seq_printf(seq, "demotion[C%zd] ",
- (pr->power.states[i].demotion.state -
- pr->power.states));
- else
- seq_puts(seq, "demotion[--] ");
+ seq_puts(seq, "demotion[--] ");
seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
pr->power.states[i].latency,
@@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
+ s64 idle_time_ns;
s64 idle_time;
s64 sleep_ticks = 0;
@@ -881,6 +872,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -888,12 +880,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
* NEED_RESCHED:
*/
smp_mb();
- }
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
- return 0;
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
}
/*
@@ -910,15 +902,18 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
+ idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
+ idle_time = idle_time_ns;
+ do_div(idle_time, NSEC_PER_USEC);
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+ sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ current_thread_info()->status |= TS_POLLING;
cx->usage++;
@@ -943,6 +938,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
+ s64 idle_time_ns;
s64 idle_time;
s64 sleep_ticks = 0;
@@ -968,6 +964,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -975,12 +972,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* NEED_RESCHED:
*/
smp_mb();
- }
- if (unlikely(need_resched())) {
- current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
- return 0;
+ if (unlikely(need_resched())) {
+ current_thread_info()->status |= TS_POLLING;
+ local_irq_enable();
+ return 0;
+ }
}
acpi_unlazy_tlb(smp_processor_id());
@@ -1025,14 +1022,17 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
+ idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1));
+ idle_time = idle_time_ns;
+ do_div(idle_time, NSEC_PER_USEC);
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+ sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
- current_thread_info()->status |= TS_POLLING;
+ if (cx->entry_method != ACPI_CSTATE_FFH)
+ current_thread_info()->status |= TS_POLLING;
cx->usage++;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0338f513a010..7f2e051ed4f1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -765,7 +765,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
}
status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
- ACPI_NOT_ISR, &event_status);
+ &event_status);
if (status == AE_OK)
device->wakeup.flags.run_wake =
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e2e992599e68..4ab2275b4461 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume. Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware. The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
-void __init acpi_set_sci_en_on_resume(void)
-{
- set_sci_en_on_resume = true;
-}
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
@@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
break;
}
- /* If ACPI is not enabled by the BIOS, we need to enable it here. */
- if (set_sci_en_on_resume)
- acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- else
- acpi_enable();
+ /* This violates the spec but is required for bug compatibility. */
+ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
@@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
-static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
-{
- set_sci_en_on_resume = true;
- return 0;
-}
-
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
@@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacBook 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Apple MacMini 1,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
@@ -394,182 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
- .callback = init_set_sci_en_on_resume,
- .ident = "Toshiba Satellite L300",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP G7000 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv4",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Pavilion dv7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad T510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad W510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Lenovo ThinkPad X201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
- },
- },
- {
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
@@ -578,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1558",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1557",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
- },
- },
- {
- .callback = init_set_sci_en_on_resume,
- .ident = "Dell Studio 1555",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
- },
- },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 8a8f3b3382a6..25b8bd149284 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,6 +1,6 @@
extern u8 sleep_states[];
-extern int acpi_suspend (u32 state);
+extern int acpi_suspend(u32 state);
extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
extern void acpi_enable_wakeup_device(u8 sleep_state);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 4aaf24976138..c79e789ed03a 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -71,7 +71,7 @@ struct acpi_table_attr {
struct list_head node;
};
-static ssize_t acpi_table_show(struct kobject *kobj,
+static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
@@ -303,8 +303,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
"Invalid GPE 0x%x\n", index));
goto end;
}
- result = acpi_get_gpe_status(*handle, index,
- ACPI_NOT_ISR, status);
+ result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
@@ -395,7 +394,7 @@ static ssize_t counter_set(struct kobject *kobj,
result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
- result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
+ result = acpi_clear_gpe(handle, index);
else
all_counters[index].count = strtoul(buf, NULL, 0);
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8a0ed2800e63..f336bca7c450 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return -ENODEV;
if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
- if (acpi_disabled && !acpi_ht)
+ if (acpi_disabled)
return -ENODEV;
if (!handler)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a0c93b321482..9865d46f49a8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,6 +45,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
+#include <acpi/video.h>
#define PREFIX "ACPI: "
@@ -65,11 +66,6 @@
#define MAX_NAME_LEN 20
-#define ACPI_VIDEO_DISPLAY_CRT 1
-#define ACPI_VIDEO_DISPLAY_TV 2
-#define ACPI_VIDEO_DISPLAY_DVI 3
-#define ACPI_VIDEO_DISPLAY_LCD 4
-
#define _COMPONENT ACPI_VIDEO_COMPONENT
ACPI_MODULE_NAME("video");
@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
result = acpi_video_init_brightness(device);
if (result)
return;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
+ count++;
- sprintf(name, "acpi_video%d", count++);
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = device->brightness->count - 3;
device->backlight = backlight_device_register(name, NULL, device,
@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (device->cap._DCS && device->cap._DSS) {
static int count;
char *name;
- name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
- sprintf(name, "acpi_video%d", count++);
+ count++;
device->output_dev = video_output_register(name,
NULL, device, &acpi_output_properties);
kfree(name);
@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
}
static int
+acpi_video_get_device_type(struct acpi_video_bus *video,
+ unsigned long device_id)
+{
+ struct acpi_video_enumerated_device *ids;
+ int i;
+
+ for (i = 0; i < video->attached_count; i++) {
+ ids = &video->attached_array[i];
+ if ((ids->value.int_val & 0xffff) == device_id)
+ return ids->value.int_val;
+ }
+
+ return 0;
+}
+
+static int
acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long long device_id;
- int status;
+ int status, device_type;
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
}
if(attribute->bios_can_detect)
data->flags.bios = 1;
- } else
- data->flags.unknown = 1;
+ } else {
+ /* Check for legacy IDs */
+ device_type = acpi_video_get_device_type(video,
+ device_id);
+ /* Ignore bits 16 and 18-20 */
+ switch (device_type & 0xffe2ffff) {
+ case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
+ data->flags.crt = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
+ data->flags.lcd = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_TV:
+ data->flags.tvout = 1;
+ break;
+ default:
+ data->flags.unknown = 1;
+ }
+ }
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
@@ -2032,6 +2061,71 @@ out:
return result;
}
+int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
+ void **edid)
+{
+ struct acpi_video_bus *video;
+ struct acpi_video_device *video_device;
+ union acpi_object *buffer = NULL;
+ acpi_status status;
+ int i, length;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ length = 256;
+
+ if (!video_device)
+ continue;
+
+ if (type) {
+ switch (type) {
+ case ACPI_VIDEO_DISPLAY_CRT:
+ if (!video_device->flags.crt)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_TV:
+ if (!video_device->flags.tvout)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_DVI:
+ if (!video_device->flags.dvi)
+ continue;
+ break;
+ case ACPI_VIDEO_DISPLAY_LCD:
+ if (!video_device->flags.lcd)
+ continue;
+ break;
+ }
+ } else if (video_device->device_id != device_id) {
+ continue;
+ }
+
+ status = acpi_video_device_EDID(video_device, &buffer, length);
+
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ length = 128;
+ status = acpi_video_device_EDID(video_device, &buffer,
+ length);
+ if (ACPI_FAILURE(status) || !buffer ||
+ buffer->type != ACPI_TYPE_BUFFER) {
+ continue;
+ }
+ }
+
+ *edid = buffer->buffer.pointer;
+ return length;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(acpi_video_get_edid);
+
static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b9b407..c5fef01b3c95 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
if (!strcmp("video", str))
acpi_video_support |=
- ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
+ ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
}
return 1;
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 01c52c415bdc..73f883333a0d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -57,6 +57,8 @@ config SATA_PMP
This option adds support for SATA Port Multipliers
(the SATA version of an ethernet hub, or SAS expander).
+comment "Controllers with non-SFF native interface"
+
config SATA_AHCI
tristate "AHCI SATA support"
depends on PCI
@@ -65,11 +67,11 @@ config SATA_AHCI
If unsure, say N.
-config SATA_SIL24
- tristate "Silicon Image 3124/3132 SATA support"
- depends on PCI
+config SATA_AHCI_PLATFORM
+ tristate "Platform AHCI SATA support"
help
- This option enables support for Silicon Image 3124/3132 Serial ATA.
+ This option enables support for Platform AHCI Serial ATA
+ controllers.
If unsure, say N.
@@ -82,6 +84,20 @@ config SATA_FSL
If unsure, say N.
+config SATA_INIC162X
+ tristate "Initio 162x SATA support"
+ depends on PCI
+ help
+ This option enables support for Initio 162x Serial ATA.
+
+config SATA_SIL24
+ tristate "Silicon Image 3124/3132 SATA support"
+ depends on PCI
+ help
+ This option enables support for Silicon Image 3124/3132 Serial ATA.
+
+ If unsure, say N.
+
config ATA_SFF
bool "ATA SFF support"
default y
@@ -102,15 +118,65 @@ config ATA_SFF
if ATA_SFF
-config SATA_SVW
- tristate "ServerWorks Frodo / Apple K2 SATA support"
+comment "SFF controllers with custom DMA interface"
+
+config PDC_ADMA
+ tristate "Pacific Digital ADMA support"
depends on PCI
help
- This option enables support for Broadcom/Serverworks/Apple K2
- SATA support.
+ This option enables support for Pacific Digital ADMA controllers
+
+ If unsure, say N.
+
+config PATA_MPC52xx
+ tristate "Freescale MPC52xx SoC internal IDE"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select PPC_BESTCOMM_ATA
+ help
+ This option enables support for integrated IDE controller
+ of the Freescale MPC52xx SoC.
+
+ If unsure, say N.
+
+config PATA_OCTEON_CF
+ tristate "OCTEON Boot Bus Compact Flash support"
+ depends on CPU_CAVIUM_OCTEON
+ help
+ This option enables a polled compact flash driver for use with
+ compact flash cards attached to the OCTEON boot bus.
+
+ If unsure, say N.
+
+config SATA_QSTOR
+ tristate "Pacific Digital SATA QStor support"
+ depends on PCI
+ help
+ This option enables support for Pacific Digital Serial ATA QStor.
+
+ If unsure, say N.
+
+config SATA_SX4
+ tristate "Promise SATA SX4 support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for Promise Serial ATA SX4.
If unsure, say N.
+config ATA_BMDMA
+ bool "ATA BMDMA support"
+ default y
+ help
+ This option adds support for SFF ATA controllers with BMDMA
+ capability. BMDMA stands for bus-master DMA and the
+ de-facto DMA interface for SFF controllers.
+
+ If unuser, say Y.
+
+if ATA_BMDMA
+
+comment "SATA SFF controllers with BMDMA"
+
config ATA_PIIX
tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
depends on PCI
@@ -138,22 +204,6 @@ config SATA_NV
If unsure, say N.
-config PDC_ADMA
- tristate "Pacific Digital ADMA support"
- depends on PCI
- help
- This option enables support for Pacific Digital ADMA controllers
-
- If unsure, say N.
-
-config SATA_QSTOR
- tristate "Pacific Digital SATA QStor support"
- depends on PCI
- help
- This option enables support for Pacific Digital Serial ATA QStor.
-
- If unsure, say N.
-
config SATA_PROMISE
tristate "Promise SATA TX2/TX4 support"
depends on PCI
@@ -162,14 +212,6 @@ config SATA_PROMISE
If unsure, say N.
-config SATA_SX4
- tristate "Promise SATA SX4 support (Experimental)"
- depends on PCI && EXPERIMENTAL
- help
- This option enables support for Promise Serial ATA SX4.
-
- If unsure, say N.
-
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
@@ -189,6 +231,15 @@ config SATA_SIS
enable the PATA_SIS driver in the config.
If unsure, say N.
+config SATA_SVW
+ tristate "ServerWorks Frodo / Apple K2 SATA support"
+ depends on PCI
+ help
+ This option enables support for Broadcom/Serverworks/Apple K2
+ SATA support.
+
+ If unsure, say N.
+
config SATA_ULI
tristate "ULi Electronics SATA support"
depends on PCI
@@ -213,20 +264,7 @@ config SATA_VITESSE
If unsure, say N.
-config SATA_INIC162X
- tristate "Initio 162x SATA support"
- depends on PCI
- help
- This option enables support for Initio 162x Serial ATA.
-
-config PATA_ACPI
- tristate "ACPI firmware driver for PATA"
- depends on ATA_ACPI
- help
- This option enables an ACPI method driver which drives
- motherboard PATA controller interfaces through the ACPI
- firmware in the BIOS. This driver can sometimes handle
- otherwise unsupported hardware.
+comment "PATA SFF controllers with BMDMA"
config PATA_ALI
tristate "ALi PATA support"
@@ -254,40 +292,30 @@ config PATA_ARTOP
If unsure, say N.
-config PATA_ATP867X
- tristate "ARTOP/Acard ATP867X PATA support"
+config PATA_ATIIXP
+ tristate "ATI PATA support"
depends on PCI
help
- This option enables support for ARTOP/Acard ATP867X PATA
- controllers.
-
- If unsure, say N.
-
-config PATA_AT32
- tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
- help
- This option enables support for the IDE devices on the
- Atmel AT32AP platform.
+ This option enables support for the ATI ATA interfaces
+ found on the many ATI chipsets.
If unsure, say N.
-config PATA_ATIIXP
- tristate "ATI PATA support"
+config PATA_ATP867X
+ tristate "ARTOP/Acard ATP867X PATA support"
depends on PCI
help
- This option enables support for the ATI ATA interfaces
- found on the many ATI chipsets.
+ This option enables support for ARTOP/Acard ATP867X PATA
+ controllers.
If unsure, say N.
-config PATA_CMD640_PCI
- tristate "CMD640 PCI PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+config PATA_BF54X
+ tristate "Blackfin 54x ATAPI support"
+ depends on BF542 || BF548 || BF549
help
- This option enables support for the CMD640 PCI IDE
- interface chip. Only the primary channel is currently
- supported.
+ This option enables support for the built-in ATAPI controller on
+ Blackfin 54x family chips.
If unsure, say N.
@@ -354,15 +382,6 @@ config PATA_EFAR
If unsure, say N.
-config ATA_GENERIC
- tristate "Generic ATA support"
- depends on PCI
- help
- This option enables support for generic BIOS configured
- ATA controllers via the new ATA layer
-
- If unsure, say N.
-
config PATA_HPT366
tristate "HPT 366/368 PATA support"
depends on PCI
@@ -407,12 +426,20 @@ config PATA_HPT3X3_DMA
controllers. Enable with care as there are still some
problems with DMA on this chipset.
-config PATA_ISAPNP
- tristate "ISA Plug and Play PATA support"
- depends on ISAPNP
+config PATA_ICSIDE
+ tristate "Acorn ICS PATA support"
+ depends on ARM && ARCH_ACORN
help
- This option enables support for ISA plug & play ATA
- controllers such as those found on old soundcards.
+ On Acorn systems, say Y here if you wish to use the ICS PATA
+ interface card. This is not required for ICS partition support.
+ If you are unsure, say N to this.
+
+config PATA_IT8213
+ tristate "IT8213 PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the ITE 821 PATA
+ controllers via the new ATA layer.
If unsure, say N.
@@ -426,15 +453,6 @@ config PATA_IT821X
If unsure, say N.
-config PATA_IT8213
- tristate "IT8213 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
- help
- This option enables support for the ITE 821 PATA
- controllers via the new ATA layer.
-
- If unsure, say N.
-
config PATA_JMICRON
tristate "JMicron PATA support"
depends on PCI
@@ -444,23 +462,14 @@ config PATA_JMICRON
If unsure, say N.
-config PATA_LEGACY
- tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI) && EXPERIMENTAL
- help
- This option enables support for ISA/VLB/PCI bus legacy PATA
- ports and allows them to be accessed via the new ATA layer.
-
- If unsure, say N.
-
-config PATA_TRIFLEX
- tristate "Compaq Triflex PATA support"
- depends on PCI
+config PATA_MACIO
+ tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
+ depends on PPC_PMAC
help
- Enable support for the Compaq 'Triflex' IDE controller as found
- on many Compaq Pentium-Pro systems, via the new ATA layer.
-
- If unsure, say N.
+ Most IDE capable PowerMacs have IDE busses driven by a variant
+ of this controller which is part of the Apple chipset used on
+ most PowerMac models. Some models have multiple busses using
+ different chipsets, though generally, MacIO is one of them.
config PATA_MARVELL
tristate "Marvell PATA support via legacy mode"
@@ -473,32 +482,6 @@ config PATA_MARVELL
If unsure, say N.
-config PATA_MPC52xx
- tristate "Freescale MPC52xx SoC internal IDE"
- depends on PPC_MPC52xx && PPC_BESTCOMM
- select PPC_BESTCOMM_ATA
- help
- This option enables support for integrated IDE controller
- of the Freescale MPC52xx SoC.
-
- If unsure, say N.
-
-config PATA_MPIIX
- tristate "Intel PATA MPIIX support"
- depends on PCI
- help
- This option enables support for MPIIX PATA support.
-
- If unsure, say N.
-
-config PATA_OLDPIIX
- tristate "Intel PATA old PIIX support"
- depends on PCI
- help
- This option enables support for early PIIX PATA support.
-
- If unsure, say N.
-
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
depends on PCI
@@ -517,15 +500,6 @@ config PATA_NINJA32
If unsure, say N.
-config PATA_NS87410
- tristate "Nat Semi NS87410 PATA support"
- depends on PCI
- help
- This option enables support for the National Semiconductor
- NS87410 PCI-IDE controller.
-
- If unsure, say N.
-
config PATA_NS87415
tristate "Nat Semi NS87415 PATA support"
depends on PCI
@@ -535,12 +509,11 @@ config PATA_NS87415
If unsure, say N.
-config PATA_OPTI
- tristate "OPTI621/6215 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+config PATA_OLDPIIX
+ tristate "Intel PATA old PIIX support"
+ depends on PCI
help
- This option enables full PIO support for the early Opti ATA
- controllers found on some old motherboards.
+ This option enables support for early PIIX PATA support.
If unsure, say N.
@@ -554,24 +527,6 @@ config PATA_OPTIDMA
If unsure, say N.
-config PATA_PALMLD
- tristate "Palm LifeDrive PATA support"
- depends on MACH_PALMLD
- help
- This option enables support for Palm LifeDrive's internal ATA
- port via the new ATA layer.
-
- If unsure, say N.
-
-config PATA_PCMCIA
- tristate "PCMCIA PATA support"
- depends on PCMCIA
- help
- This option enables support for PCMCIA ATA interfaces, including
- compact flash card adapters via the new ATA layer.
-
- If unsure, say N.
-
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
depends on PCI
@@ -589,12 +544,6 @@ config PATA_PDC_OLD
If unsure, say N.
-config PATA_QDI
- tristate "QDI VLB PATA support"
- depends on ISA
- help
- Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
-
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
@@ -604,15 +553,6 @@ config PATA_RADISYS
If unsure, say N.
-config PATA_RB532
- tristate "RouterBoard 532 PATA CompactFlash support"
- depends on MIKROTIK_RB532
- help
- This option enables support for the RouterBoard 532
- PATA CompactFlash controller.
-
- If unsure, say N.
-
config PATA_RDC
tristate "RDC PATA support"
depends on PCI
@@ -623,21 +563,30 @@ config PATA_RDC
If unsure, say N.
-config PATA_RZ1000
- tristate "PC Tech RZ1000 PATA support"
+config PATA_SC1200
+ tristate "SC1200 PATA support"
depends on PCI
help
- This option enables basic support for the PC Tech RZ1000/1
- PATA controllers via the new ATA layer
+ This option enables support for the NatSemi/AMD SC1200 SoC
+ companion chip used with the Geode processor family.
If unsure, say N.
-config PATA_SC1200
- tristate "SC1200 PATA support"
+config PATA_SCC
+ tristate "Toshiba's Cell Reference Set IDE support"
+ depends on PCI && PPC_CELLEB
+ help
+ This option enables support for the built-in IDE controller on
+ Toshiba Cell Reference Board.
+
+ If unsure, say N.
+
+config PATA_SCH
+ tristate "Intel SCH PATA support"
depends on PCI
help
- This option enables support for the NatSemi/AMD SC1200 SoC
- companion chip used with the Geode processor family.
+ This option enables support for Intel SCH PATA on the Intel
+ SCH (US15W, US15L, UL11L) series host controllers.
If unsure, say N.
@@ -675,6 +624,15 @@ config PATA_TOSHIBA
If unsure, say N.
+config PATA_TRIFLEX
+ tristate "Compaq Triflex PATA support"
+ depends on PCI
+ help
+ Enable support for the Compaq 'Triflex' IDE controller as found
+ on many Compaq Pentium-Pro systems, via the new ATA layer.
+
+ If unsure, say N.
+
config PATA_VIA
tristate "VIA PATA support"
depends on PCI
@@ -693,12 +651,99 @@ config PATA_WINBOND
If unsure, say N.
-config PATA_WINBOND_VLB
- tristate "Winbond W83759A VLB PATA support (Experimental)"
- depends on ISA && EXPERIMENTAL
+endif # ATA_BMDMA
+
+comment "PIO-only SFF controllers"
+
+config PATA_AT32
+ tristate "Atmel AVR32 PATA support (Experimental)"
+ depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
help
- Support for the Winbond W83759A controller on Vesa Local Bus
- systems.
+ This option enables support for the IDE devices on the
+ Atmel AT32AP platform.
+
+ If unsure, say N.
+
+config PATA_AT91
+ tristate "PATA support for AT91SAM9260"
+ depends on ARM && ARCH_AT91
+ help
+ This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
+
+ If unsure, say N.
+
+config PATA_CMD640_PCI
+ tristate "CMD640 PCI PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the CMD640 PCI IDE
+ interface chip. Only the primary channel is currently
+ supported.
+
+ If unsure, say N.
+
+config PATA_ISAPNP
+ tristate "ISA Plug and Play PATA support"
+ depends on ISAPNP
+ help
+ This option enables support for ISA plug & play ATA
+ controllers such as those found on old soundcards.
+
+ If unsure, say N.
+
+config PATA_IXP4XX_CF
+ tristate "IXP4XX Compact Flash support"
+ depends on ARCH_IXP4XX
+ help
+ This option enables support for a Compact Flash connected on
+ the ixp4xx expansion bus. This driver had been written for
+ Loft/Avila boards in mind but can work with others.
+
+ If unsure, say N.
+
+config PATA_MPIIX
+ tristate "Intel PATA MPIIX support"
+ depends on PCI
+ help
+ This option enables support for MPIIX PATA support.
+
+ If unsure, say N.
+
+config PATA_NS87410
+ tristate "Nat Semi NS87410 PATA support"
+ depends on PCI
+ help
+ This option enables support for the National Semiconductor
+ NS87410 PCI-IDE controller.
+
+ If unsure, say N.
+
+config PATA_OPTI
+ tristate "OPTI621/6215 PATA support (Very Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables full PIO support for the early Opti ATA
+ controllers found on some old motherboards.
+
+ If unsure, say N.
+
+config PATA_PALMLD
+ tristate "Palm LifeDrive PATA support"
+ depends on MACH_PALMLD
+ help
+ This option enables support for Palm LifeDrive's internal ATA
+ port via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_PCMCIA
+ tristate "PCMCIA PATA support"
+ depends on PCMCIA
+ help
+ This option enables support for PCMCIA ATA interfaces, including
+ compact flash card adapters via the new ATA layer.
+
+ If unsure, say N.
config HAVE_PATA_PLATFORM
bool
@@ -717,14 +762,6 @@ config PATA_PLATFORM
If unsure, say N.
-config PATA_AT91
- tristate "PATA support for AT91SAM9260"
- depends on ARM && ARCH_AT91
- help
- This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
-
- If unsure, say N.
-
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
depends on PATA_PLATFORM && PPC_OF
@@ -735,69 +772,65 @@ config PATA_OF_PLATFORM
If unsure, say N.
-config PATA_ICSIDE
- tristate "Acorn ICS PATA support"
- depends on ARM && ARCH_ACORN
+config PATA_QDI
+ tristate "QDI VLB PATA support"
+ depends on ISA
help
- On Acorn systems, say Y here if you wish to use the ICS PATA
- interface card. This is not required for ICS partition support.
- If you are unsure, say N to this.
+ Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
-config PATA_IXP4XX_CF
- tristate "IXP4XX Compact Flash support"
- depends on ARCH_IXP4XX
+config PATA_RB532
+ tristate "RouterBoard 532 PATA CompactFlash support"
+ depends on MIKROTIK_RB532
help
- This option enables support for a Compact Flash connected on
- the ixp4xx expansion bus. This driver had been written for
- Loft/Avila boards in mind but can work with others.
+ This option enables support for the RouterBoard 532
+ PATA CompactFlash controller.
If unsure, say N.
-config PATA_OCTEON_CF
- tristate "OCTEON Boot Bus Compact Flash support"
- depends on CPU_CAVIUM_OCTEON
+config PATA_RZ1000
+ tristate "PC Tech RZ1000 PATA support"
+ depends on PCI
help
- This option enables a polled compact flash driver for use with
- compact flash cards attached to the OCTEON boot bus.
+ This option enables basic support for the PC Tech RZ1000/1
+ PATA controllers via the new ATA layer
If unsure, say N.
-config PATA_SCC
- tristate "Toshiba's Cell Reference Set IDE support"
- depends on PCI && PPC_CELLEB
+config PATA_WINBOND_VLB
+ tristate "Winbond W83759A VLB PATA support (Experimental)"
+ depends on ISA && EXPERIMENTAL
help
- This option enables support for the built-in IDE controller on
- Toshiba Cell Reference Board.
+ Support for the Winbond W83759A controller on Vesa Local Bus
+ systems.
- If unsure, say N.
+comment "Generic fallback / legacy drivers"
-config PATA_SCH
- tristate "Intel SCH PATA support"
- depends on PCI
+config PATA_ACPI
+ tristate "ACPI firmware driver for PATA"
+ depends on ATA_ACPI && ATA_BMDMA
help
- This option enables support for Intel SCH PATA on the Intel
- SCH (US15W, US15L, UL11L) series host controllers.
-
- If unsure, say N.
+ This option enables an ACPI method driver which drives
+ motherboard PATA controller interfaces through the ACPI
+ firmware in the BIOS. This driver can sometimes handle
+ otherwise unsupported hardware.
-config PATA_BF54X
- tristate "Blackfin 54x ATAPI support"
- depends on BF542 || BF548 || BF549
+config ATA_GENERIC
+ tristate "Generic ATA support"
+ depends on PCI && ATA_BMDMA
help
- This option enables support for the built-in ATAPI controller on
- Blackfin 54x family chips.
+ This option enables support for generic BIOS configured
+ ATA controllers via the new ATA layer
If unsure, say N.
-config PATA_MACIO
- tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
- depends on PPC_PMAC
+config PATA_LEGACY
+ tristate "Legacy ISA PATA support (Experimental)"
+ depends on (ISA || PCI) && EXPERIMENTAL
help
- Most IDE capable PowerMacs have IDE busses driven by a variant
- of this controller which is part of the Apple chipset used on
- most PowerMac models. Some models have multiple busses using
- different chipsets, though generally, MacIO is one of them.
+ This option enables support for ISA/VLB/PCI bus legacy PATA
+ ports and allows them to be accessed via the new ATA layer.
+ If unsure, say N.
endif # ATA_SFF
endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index fc936d4471d6..7ef89d73df63 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,32 +1,39 @@
obj-$(CONFIG_ATA) += libata.o
-obj-$(CONFIG_SATA_AHCI) += ahci.o
-obj-$(CONFIG_SATA_SVW) += sata_svw.o
+# non-SFF interface
+obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
+obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
+obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
+
+# SFF w/ custom DMA
+obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
+obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
+obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
+obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
+obj-$(CONFIG_SATA_SX4) += sata_sx4.o
+
+# SFF SATA w/ BMDMA
obj-$(CONFIG_ATA_PIIX) += ata_piix.o
+obj-$(CONFIG_SATA_MV) += sata_mv.o
+obj-$(CONFIG_SATA_NV) += sata_nv.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
-obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
obj-$(CONFIG_SATA_SIL) += sata_sil.o
-obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
-obj-$(CONFIG_SATA_VIA) += sata_via.o
-obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
obj-$(CONFIG_SATA_SIS) += sata_sis.o
-obj-$(CONFIG_SATA_SX4) += sata_sx4.o
-obj-$(CONFIG_SATA_NV) += sata_nv.o
+obj-$(CONFIG_SATA_SVW) += sata_svw.o
obj-$(CONFIG_SATA_ULI) += sata_uli.o
-obj-$(CONFIG_SATA_MV) += sata_mv.o
-obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
-obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
-obj-$(CONFIG_SATA_FSL) += sata_fsl.o
-obj-$(CONFIG_PATA_MACIO) += pata_macio.o
+obj-$(CONFIG_SATA_VIA) += sata_via.o
+obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
+# SFF PATA w/ BMDMA
obj-$(CONFIG_PATA_ALI) += pata_ali.o
obj-$(CONFIG_PATA_AMD) += pata_amd.o
obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
-obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
-obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
-obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
+obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
@@ -38,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
-obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
-obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
+obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
+obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
+obj-$(CONFIG_PATA_MACIO) += pata_macio.o
+obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
-obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
-obj-$(CONFIG_PATA_OPTI) += pata_opti.o
-obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
-obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
-obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
-obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
-obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
-obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
+obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
-obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
-obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RDC) += pata_rdc.o
-obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
+obj-$(CONFIG_PATA_SCC) += pata_scc.o
+obj-$(CONFIG_PATA_SCH) += pata_sch.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
+obj-$(CONFIG_PATA_SIS) += pata_sis.o
obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o
+obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
-obj-$(CONFIG_PATA_SIS) += pata_sis.o
-obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
+
+# SFF PIO only
+obj-$(CONFIG_PATA_AT32) += pata_at32.o
+obj-$(CONFIG_PATA_AT91) += pata_at91.o
+obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
-obj-$(CONFIG_PATA_SCC) += pata_scc.o
-obj-$(CONFIG_PATA_SCH) += pata_sch.o
-obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
-obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
+obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
+obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
+obj-$(CONFIG_PATA_OPTI) += pata_opti.o
+obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
+obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
-obj-$(CONFIG_PATA_AT91) += pata_at91.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
-obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
+obj-$(CONFIG_PATA_QDI) += pata_qdi.o
+obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
+obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
+obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
+
# Should be last but two libata driver
obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
# Should be last but one libata driver
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5326af28a410..8ca16f54e1ed 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,403 +46,48 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
+#include "ahci.h"
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
-/* Enclosure Management Control */
-#define EM_CTRL_MSG_TYPE 0x000f0000
-
-/* Enclosure Management LED Message Type */
-#define EM_MSG_LED_HBA_PORT 0x0000000f
-#define EM_MSG_LED_PMP_SLOT 0x0000ff00
-#define EM_MSG_LED_VALUE 0xffff0000
-#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
-#define EM_MSG_LED_VALUE_OFF 0xfff80000
-#define EM_MSG_LED_VALUE_ON 0x00010000
-
-static int ahci_skip_host_reset;
-static int ahci_ignore_sss;
-
-module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
-MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
-
-module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
-MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy);
-static void ahci_disable_alpm(struct ata_port *ap);
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size);
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size);
-
enum {
AHCI_PCI_BAR = 5,
- AHCI_MAX_PORTS = 32,
- AHCI_MAX_SG = 168, /* hardware max is 64K */
- AHCI_DMA_BOUNDARY = 0xffffffff,
- AHCI_MAX_CMDS = 32,
- AHCI_CMD_SZ = 32,
- AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
- AHCI_RX_FIS_SZ = 256,
- AHCI_CMD_TBL_CDB = 0x40,
- AHCI_CMD_TBL_HDR_SZ = 0x80,
- AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
- AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
- AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
- AHCI_RX_FIS_SZ,
- AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
- AHCI_CMD_TBL_AR_SZ +
- (AHCI_RX_FIS_SZ * 16),
- AHCI_IRQ_ON_SG = (1 << 31),
- AHCI_CMD_ATAPI = (1 << 5),
- AHCI_CMD_WRITE = (1 << 6),
- AHCI_CMD_PREFETCH = (1 << 7),
- AHCI_CMD_RESET = (1 << 8),
- AHCI_CMD_CLR_BUSY = (1 << 10),
-
- RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
- RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
- RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
-
- board_ahci = 0,
- board_ahci_vt8251 = 1,
- board_ahci_ign_iferr = 2,
- board_ahci_sb600 = 3,
- board_ahci_mv = 4,
- board_ahci_sb700 = 5, /* for SB700 and SB800 */
- board_ahci_mcp65 = 6,
- board_ahci_nopmp = 7,
- board_ahci_yesncq = 8,
- board_ahci_nosntf = 9,
-
- /* global controller registers */
- HOST_CAP = 0x00, /* host capabilities */
- HOST_CTL = 0x04, /* global host control */
- HOST_IRQ_STAT = 0x08, /* interrupt status */
- HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
- HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
- HOST_EM_LOC = 0x1c, /* Enclosure Management location */
- HOST_EM_CTL = 0x20, /* Enclosure Management Control */
- HOST_CAP2 = 0x24, /* host capabilities, extended */
-
- /* HOST_CTL bits */
- HOST_RESET = (1 << 0), /* reset controller; self-clear */
- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
-
- /* HOST_CAP bits */
- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
- HOST_CAP_PART = (1 << 13), /* Partial state capable */
- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
- HOST_CAP_CLO = (1 << 24), /* Command List Override support */
- HOST_CAP_LED = (1 << 25), /* Supports activity LED */
- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
- HOST_CAP_SNTF = (1 << 29), /* SNotification register */
- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
-
- /* HOST_CAP2 bits */
- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
-
- /* registers for each SATA port */
- PORT_LST_ADDR = 0x00, /* command list DMA addr */
- PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
- PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
- PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
- PORT_IRQ_STAT = 0x10, /* interrupt status */
- PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
- PORT_CMD = 0x18, /* port command */
- PORT_TFDATA = 0x20, /* taskfile data */
- PORT_SIG = 0x24, /* device TF signature */
- PORT_CMD_ISSUE = 0x38, /* command issue */
- PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
- PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
- PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
- PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
- PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
- PORT_FBS = 0x40, /* FIS-based Switching */
-
- /* PORT_IRQ_{STAT,MASK} bits */
- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
-
- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
-
- PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
- PORT_IRQ_IF_ERR |
- PORT_IRQ_CONNECT |
- PORT_IRQ_PHYRDY |
- PORT_IRQ_UNK_FIS |
- PORT_IRQ_BAD_PMP,
- PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
- PORT_IRQ_TF_ERR |
- PORT_IRQ_HBUS_DATA_ERR,
- DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
- PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
- PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
-
- /* PORT_CMD bits */
- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
- PORT_CMD_PMP = (1 << 17), /* PMP attached */
- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
- PORT_CMD_CLO = (1 << 3), /* Command list override */
- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
-
- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
-
- PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
- PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
- PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
- PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
- PORT_FBS_SDE = (1 << 2), /* FBS single device error */
- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
- PORT_FBS_EN = (1 << 0), /* Enable FBS */
-
- /* hpriv->flags bits */
- AHCI_HFLAG_NO_NCQ = (1 << 0),
- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
- AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
- link offline */
- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
-
- /* ap->flags bits */
-
- AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
- ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
- ATA_FLAG_IPM,
-
- ICH_MAP = 0x90, /* ICH MAP register */
-
- /* em constants */
- EM_MAX_SLOTS = 8,
- EM_MAX_RETRY = 5,
-
- /* em_ctl bits */
- EM_CTL_RST = (1 << 9), /* Reset */
- EM_CTL_TM = (1 << 8), /* Transmit Message */
- EM_CTL_ALHD = (1 << 26), /* Activity LED */
-};
-
-struct ahci_cmd_hdr {
- __le32 opts;
- __le32 status;
- __le32 tbl_addr;
- __le32 tbl_addr_hi;
- __le32 reserved[4];
-};
-
-struct ahci_sg {
- __le32 addr;
- __le32 addr_hi;
- __le32 reserved;
- __le32 flags_size;
-};
-
-struct ahci_em_priv {
- enum sw_activity blink_policy;
- struct timer_list timer;
- unsigned long saved_activity;
- unsigned long activity;
- unsigned long led_state;
-};
-
-struct ahci_host_priv {
- unsigned int flags; /* AHCI_HFLAG_* */
- u32 cap; /* cap to use */
- u32 cap2; /* cap2 to use */
- u32 port_map; /* port map to use */
- u32 saved_cap; /* saved initial cap */
- u32 saved_cap2; /* saved initial cap2 */
- u32 saved_port_map; /* saved initial port_map */
- u32 em_loc; /* enclosure management location */
};
-struct ahci_port_priv {
- struct ata_link *active_link;
- struct ahci_cmd_hdr *cmd_slot;
- dma_addr_t cmd_slot_dma;
- void *cmd_tbl;
- dma_addr_t cmd_tbl_dma;
- void *rx_fis;
- dma_addr_t rx_fis_dma;
- /* for NCQ spurious interrupt analysis */
- unsigned int ncq_saw_d2h:1;
- unsigned int ncq_saw_dmas:1;
- unsigned int ncq_saw_sdb:1;
- u32 intr_mask; /* interrupts to enable */
- bool fbs_supported; /* set iff FBS is supported */
- bool fbs_enabled; /* set iff FBS is enabled */
- int fbs_last_dev; /* save FBS.DEV of last FIS */
- /* enclosure management info per PM slot */
- struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
+ board_ahci_ign_iferr,
+ board_ahci_nosntf,
+
+ /* board IDs for specific chipsets in alphabetical order */
+ board_ahci_mcp65,
+ board_ahci_mcp77,
+ board_ahci_mcp89,
+ board_ahci_mv,
+ board_ahci_sb600,
+ board_ahci_sb700, /* for SB700 and SB800 */
+ board_ahci_vt8251,
+
+ /* aliases */
+ board_ahci_mcp_linux = board_ahci_mcp65,
+ board_ahci_mcp67 = board_ahci_mcp65,
+ board_ahci_mcp73 = board_ahci_mcp65,
+ board_ahci_mcp79 = board_ahci_mcp77,
};
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
-static int ahci_port_start(struct ata_port *ap);
-static void ahci_port_stop(struct ata_port *ap);
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
-static void ahci_freeze(struct ata_port *ap);
-static void ahci_thaw(struct ata_port *ap);
-static void ahci_enable_fbs(struct ata_port *ap);
-static void ahci_disable_fbs(struct ata_port *ap);
-static void ahci_pmp_attach(struct ata_port *ap);
-static void ahci_pmp_detach(struct ata_port *ap);
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
-static int ahci_port_resume(struct ata_port *ap);
-static void ahci_dev_config(struct ata_device *dev);
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts);
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
-static ssize_t ahci_activity_store(struct ata_device *dev,
- enum sw_activity val);
-static void ahci_init_sw_activity(struct ata_link *link);
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
-static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
-static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
-static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
-
-static struct device_attribute *ahci_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- &dev_attr_em_message_type,
- &dev_attr_em_message,
- &dev_attr_ahci_host_caps,
- &dev_attr_ahci_host_cap2,
- &dev_attr_ahci_host_version,
- &dev_attr_ahci_port_cmd,
- NULL
-};
-
-static struct device_attribute *ahci_sdev_attrs[] = {
- &dev_attr_sw_activity,
- &dev_attr_unload_heads,
- NULL
-};
-
-static struct scsi_host_template ahci_sht = {
- ATA_NCQ_SHT(DRV_NAME),
- .can_queue = AHCI_MAX_CMDS - 1,
- .sg_tablesize = AHCI_MAX_SG,
- .dma_boundary = AHCI_DMA_BOUNDARY,
- .shost_attrs = ahci_shost_attrs,
- .sdev_attrs = ahci_sdev_attrs,
-};
-
-static struct ata_port_operations ahci_ops = {
- .inherits = &sata_pmp_port_ops,
-
- .qc_defer = ahci_pmp_qc_defer,
- .qc_prep = ahci_qc_prep,
- .qc_issue = ahci_qc_issue,
- .qc_fill_rtf = ahci_qc_fill_rtf,
-
- .freeze = ahci_freeze,
- .thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
- .error_handler = ahci_error_handler,
- .post_internal_cmd = ahci_post_internal_cmd,
- .dev_config = ahci_dev_config,
-
- .scr_read = ahci_scr_read,
- .scr_write = ahci_scr_write,
- .pmp_attach = ahci_pmp_attach,
- .pmp_detach = ahci_pmp_detach,
-
- .enable_pm = ahci_enable_alpm,
- .disable_pm = ahci_disable_alpm,
- .em_show = ahci_led_show,
- .em_store = ahci_led_store,
- .sw_activity_show = ahci_activity_show,
- .sw_activity_store = ahci_activity_store,
-#ifdef CONFIG_PM
- .port_suspend = ahci_port_suspend,
- .port_resume = ahci_port_resume,
-#endif
- .port_start = ahci_port_start,
- .port_stop = ahci_port_stop,
-};
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
@@ -463,6 +108,7 @@ static struct ata_port_operations ahci_sb600_ops = {
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
+ /* by features */
[board_ahci] =
{
.flags = AHCI_FLAG_COMMON,
@@ -470,81 +116,83 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_vt8251] =
+ [board_ahci_ign_iferr] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_vt8251_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_ign_iferr] =
+ [board_ahci_nosntf] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb600] =
+ /* by chipsets */
+ [board_ahci_mcp65] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
- AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
- AHCI_HFLAG_32BIT_ONLY),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
+ AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mv] =
+ [board_ahci_mcp77] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
+ .flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb700] = /* for SB700 and SB800 */
+ [board_ahci_mcp89] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mcp65] =
+ [board_ahci_mv] =
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
- .flags = AHCI_FLAG_COMMON,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nopmp] =
+ [board_ahci_sb600] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
+ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
+ AHCI_HFLAG_32BIT_ONLY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_yesncq] =
+ [board_ahci_sb700] = /* for SB700 and SB800 */
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_nosntf] =
+ [board_ahci_vt8251] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_vt8251_ops,
},
};
@@ -629,82 +277,82 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
- { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
/* SiS */
{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -737,12 +385,6 @@ static struct pci_driver ahci_pci_driver = {
#endif
};
-static int ahci_em_messages = 1;
-module_param(ahci_em_messages, int, 0444);
-/* add other LED protocol types when they become supported */
-MODULE_PARM_DESC(ahci_em_messages,
- "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
-
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
static int marvell_enable;
#else
@@ -752,166 +394,15 @@ module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
-static inline int ahci_nr_ports(u32 cap)
-{
- return (cap & 0x1f) + 1;
-}
-
-static inline void __iomem *__ahci_port_base(struct ata_host *host,
- unsigned int port_no)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- return mmio + 0x100 + (port_no * 0x80);
-}
-
-static inline void __iomem *ahci_port_base(struct ata_port *ap)
-{
- return __ahci_port_base(ap->host, ap->port_no);
-}
-
-static void ahci_enable_ahci(void __iomem *mmio)
-{
- int i;
- u32 tmp;
-
- /* turn on AHCI_EN */
- tmp = readl(mmio + HOST_CTL);
- if (tmp & HOST_AHCI_EN)
- return;
-
- /* Some controllers need AHCI_EN to be written multiple times.
- * Try a few times before giving up.
- */
- for (i = 0; i < 5; i++) {
- tmp |= HOST_AHCI_EN;
- writel(tmp, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
- if (tmp & HOST_AHCI_EN)
- return;
- msleep(10);
- }
-
- WARN_ON(1);
-}
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap);
-}
-
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap2);
-}
-
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
-
- return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
-}
-
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *port_mmio = ahci_port_base(ap);
-
- return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
-}
-
-/**
- * ahci_save_initial_config - Save and fixup initial config values
- * @pdev: target PCI device
- * @hpriv: host private area to store config values
- *
- * Some registers containing configuration info might be setup by
- * BIOS and might be cleared on reset. This function saves the
- * initial values of those registers into @hpriv such that they
- * can be restored after controller reset.
- *
- * If inconsistent, config values are fixed up by this function.
- *
- * LOCKING:
- * None.
- */
-static void ahci_save_initial_config(struct pci_dev *pdev,
- struct ahci_host_priv *hpriv)
+static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
{
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 cap, cap2, vers, port_map;
- int i;
- int mv;
-
- /* make sure AHCI mode is enabled before accessing CAP */
- ahci_enable_ahci(mmio);
+ unsigned int force_port_map = 0;
+ unsigned int mask_port_map = 0;
- /* Values prefixed with saved_ are written back to host after
- * reset. Values without are used for driver operation.
- */
- hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
- hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
-
- /* CAP2 register is only defined for AHCI 1.2 and later */
- vers = readl(mmio + HOST_VERSION);
- if ((vers >> 16) > 1 ||
- ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
- hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
- else
- hpriv->saved_cap2 = cap2 = 0;
-
- /* some chips have errata preventing 64bit use */
- if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do 64bit DMA, forcing 32bit\n");
- cap &= ~HOST_CAP_64;
- }
-
- if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do NCQ, turning off CAP_NCQ\n");
- cap &= ~HOST_CAP_NCQ;
- }
-
- if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can do NCQ, turning on CAP_NCQ\n");
- cap |= HOST_CAP_NCQ;
- }
-
- if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do PMP, turning off CAP_PMP\n");
- cap &= ~HOST_CAP_PMP;
- }
-
- if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do SNTF, turning off CAP_SNTF\n");
- cap &= ~HOST_CAP_SNTF;
- }
-
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
- port_map != 1) {
- dev_printk(KERN_INFO, &pdev->dev,
- "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
- port_map, 1);
- port_map = 1;
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ dev_info(&pdev->dev, "JMB361 has only one port\n");
+ force_port_map = 1;
}
/*
@@ -921,469 +412,25 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
*/
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
- mv = 0x3;
+ mask_port_map = 0x3;
else
- mv = 0xf;
- dev_printk(KERN_ERR, &pdev->dev,
- "MV_AHCI HACK: port_map %x -> %x\n",
- port_map,
- port_map & mv);
- dev_printk(KERN_ERR, &pdev->dev,
+ mask_port_map = 0xf;
+ dev_info(&pdev->dev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
-
- port_map &= mv;
}
- /* cross check port_map and cap.n_ports */
- if (port_map) {
- int map_ports = 0;
-
- for (i = 0; i < AHCI_MAX_PORTS; i++)
- if (port_map & (1 << i))
- map_ports++;
-
- /* If PI has more ports than n_ports, whine, clear
- * port_map and let it be generated from n_ports.
- */
- if (map_ports > ahci_nr_ports(cap)) {
- dev_printk(KERN_WARNING, &pdev->dev,
- "implemented port map (0x%x) contains more "
- "ports than nr_ports (%u), using nr_ports\n",
- port_map, ahci_nr_ports(cap));
- port_map = 0;
- }
- }
-
- /* fabricate port_map from cap.nr_ports */
- if (!port_map) {
- port_map = (1 << ahci_nr_ports(cap)) - 1;
- dev_printk(KERN_WARNING, &pdev->dev,
- "forcing PORTS_IMPL to 0x%x\n", port_map);
-
- /* write the fixed up value to the PI register */
- hpriv->saved_port_map = port_map;
- }
-
- /* record values to use during operation */
- hpriv->cap = cap;
- hpriv->cap2 = cap2;
- hpriv->port_map = port_map;
-}
-
-/**
- * ahci_restore_initial_config - Restore initial config
- * @host: target ATA host
- *
- * Restore initial config stored by ahci_save_initial_config().
- *
- * LOCKING:
- * None.
- */
-static void ahci_restore_initial_config(struct ata_host *host)
-{
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- writel(hpriv->saved_cap, mmio + HOST_CAP);
- if (hpriv->saved_cap2)
- writel(hpriv->saved_cap2, mmio + HOST_CAP2);
- writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
- (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
-}
-
-static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
-{
- static const int offset[] = {
- [SCR_STATUS] = PORT_SCR_STAT,
- [SCR_CONTROL] = PORT_SCR_CTL,
- [SCR_ERROR] = PORT_SCR_ERR,
- [SCR_ACTIVE] = PORT_SCR_ACT,
- [SCR_NOTIFICATION] = PORT_SCR_NTF,
- };
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- if (sc_reg < ARRAY_SIZE(offset) &&
- (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
- return offset[sc_reg];
- return 0;
-}
-
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- *val = readl(port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- writel(val, port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static void ahci_start_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* start DMA */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
-}
-
-static int ahci_stop_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_CMD);
-
- /* check if the HBA is idle */
- if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
- return 0;
-
- /* setting HBA to idle */
- tmp &= ~PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for engine to stop. This could be as long as 500 msec */
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
- if (tmp & PORT_CMD_LIST_ON)
- return -EIO;
-
- return 0;
-}
-
-static void ahci_start_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- u32 tmp;
-
- /* set FIS registers */
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->cmd_slot_dma >> 16) >> 16,
- port_mmio + PORT_LST_ADDR_HI);
- writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
-
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->rx_fis_dma >> 16) >> 16,
- port_mmio + PORT_FIS_ADDR_HI);
- writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
-
- /* enable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* flush */
- readl(port_mmio + PORT_CMD);
+ ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
+ mask_port_map);
}
-static int ahci_stop_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* disable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp &= ~PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for completion, spec says 500ms, give it 1000 */
- tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
- PORT_CMD_FIS_ON, 10, 1000);
- if (tmp & PORT_CMD_FIS_ON)
- return -EBUSY;
-
- return 0;
-}
-
-static void ahci_power_up(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
-
- /* spin up device */
- if (hpriv->cap & HOST_CAP_SSS) {
- cmd |= PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
- }
-
- /* wake up link */
- writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
-}
-
-static void ahci_disable_alpm(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* IPM bits should be disabled by libata-core */
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /* disable ALPM and ASP */
- cmd &= ~PORT_CMD_ASP;
- cmd &= ~PORT_CMD_ALPE;
-
- /* force the interface back to active */
- cmd |= PORT_CMD_ICC_ACTIVE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* wait 10ms to be sure we've come out of any low power state */
- msleep(10);
-
- /* clear out any PhyRdy stuff from interrupt status */
- writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
-
- /* go ahead and clean out PhyRdy Change from Serror too */
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
-
- /*
- * Clear flag to indicate that we should ignore all PhyRdy
- * state changes
- */
- hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
-
- /*
- * Enable interrupts on Phy Ready.
- */
- pp->intr_mask |= PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * don't change the link pm policy - we can be called
- * just to turn of link pm temporarily
- */
-}
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
- u32 asp;
-
- /* Make sure the host is capable of link power management */
- if (!(hpriv->cap & HOST_CAP_ALPM))
- return -EINVAL;
-
- switch (policy) {
- case MAX_PERFORMANCE:
- case NOT_AVAILABLE:
- /*
- * if we came here with NOT_AVAILABLE,
- * it just means this is the first time we
- * have tried to enable - default to max performance,
- * and let the user go to lower power modes on request.
- */
- ahci_disable_alpm(ap);
- return 0;
- case MIN_POWER:
- /* configure HBA to enter SLUMBER */
- asp = PORT_CMD_ASP;
- break;
- case MEDIUM_POWER:
- /* configure HBA to enter PARTIAL */
- asp = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * Disable interrupts on Phy Ready. This keeps us from
- * getting woken up due to spurious phy ready interrupts
- * TBD - Hot plug should be done via polling now, is
- * that even supported?
- */
- pp->intr_mask &= ~PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * Set a flag to indicate that we should ignore all PhyRdy
- * state changes since these can happen now whenever we
- * change link state
- */
- hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
-
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /*
- * Set ASP based on Policy
- */
- cmd |= asp;
-
- /*
- * Setting this bit will instruct the HBA to aggressively
- * enter a lower power link state when it's appropriate and
- * based on the value set above for ASP
- */
- cmd |= PORT_CMD_ALPE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* IPM bits should be set by libata-core */
- return 0;
-}
-
-#ifdef CONFIG_PM
-static void ahci_power_down(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd, scontrol;
-
- if (!(hpriv->cap & HOST_CAP_SSS))
- return;
-
- /* put device into listen mode, first set PxSCTL.DET to 0 */
- scontrol = readl(port_mmio + PORT_SCR_CTL);
- scontrol &= ~0xf;
- writel(scontrol, port_mmio + PORT_SCR_CTL);
-
- /* then set PxCMD.SUD to 0 */
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
- cmd &= ~PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
-}
-#endif
-
-static void ahci_start_port(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- ssize_t rc;
- int i;
-
- /* enable FIS reception */
- ahci_start_fis_rx(ap);
-
- /* enable DMA */
- ahci_start_engine(ap);
-
- /* turn on LEDs */
- if (ap->flags & ATA_FLAG_EM) {
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
-
- /* EM Transmit bit maybe busy during init */
- for (i = 0; i < EM_MAX_RETRY; i++) {
- rc = ahci_transmit_led_message(ap,
- emp->led_state,
- 4);
- if (rc == -EBUSY)
- msleep(1);
- else
- break;
- }
- }
- }
-
- if (ap->flags & ATA_FLAG_SW_ACTIVITY)
- ata_for_each_link(link, ap, EDGE)
- ahci_init_sw_activity(link);
-
-}
-
-static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
-{
- int rc;
-
- /* disable DMA */
- rc = ahci_stop_engine(ap);
- if (rc) {
- *emsg = "failed to stop engine";
- return rc;
- }
-
- /* disable FIS reception */
- rc = ahci_stop_fis_rx(ap);
- if (rc) {
- *emsg = "failed stop FIS RX";
- return rc;
- }
-
- return 0;
-}
-
-static int ahci_reset_controller(struct ata_host *host)
+static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 tmp;
- /* we must be in AHCI mode, before using anything
- * AHCI-specific, such as HOST_RESET.
- */
- ahci_enable_ahci(mmio);
-
- /* global controller reset */
- if (!ahci_skip_host_reset) {
- tmp = readl(mmio + HOST_CTL);
- if ((tmp & HOST_RESET) == 0) {
- writel(tmp | HOST_RESET, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
- }
-
- /*
- * to perform host reset, OS should set HOST_RESET
- * and poll until this bit is read to be "0".
- * reset must complete within 1 second, or
- * the hardware should be considered fried.
- */
- tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
- HOST_RESET, 10, 1000);
-
- if (tmp & HOST_RESET) {
- dev_printk(KERN_ERR, host->dev,
- "controller reset failed (0x%x)\n", tmp);
- return -EIO;
- }
-
- /* turn on AHCI mode */
- ahci_enable_ahci(mmio);
-
- /* Some registers might be cleared on reset. Restore
- * initial values.
- */
- ahci_restore_initial_config(host);
- } else
- dev_printk(KERN_INFO, host->dev,
- "skipping global host reset\n");
+ ahci_reset_controller(host);
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ struct ahci_host_priv *hpriv = host->private_data;
u16 tmp16;
/* configure PCS */
@@ -1397,267 +444,10 @@ static int ahci_reset_controller(struct ata_host *host)
return 0;
}
-static void ahci_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
- return;
-
- emp->activity++;
- if (!timer_pending(&emp->timer))
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void ahci_sw_activity_blink(unsigned long arg)
-{
- struct ata_link *link = (struct ata_link *)arg;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- unsigned long led_message = emp->led_state;
- u32 activity_led_state;
- unsigned long flags;
-
- led_message &= EM_MSG_LED_VALUE;
- led_message |= ap->port_no | (link->pmp << 8);
-
- /* check to see if we've had activity. If so,
- * toggle state of LED and reset timer. If not,
- * turn LED to desired idle state.
- */
- spin_lock_irqsave(ap->lock, flags);
- if (emp->saved_activity != emp->activity) {
- emp->saved_activity = emp->activity;
- /* get the current LED state */
- activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
-
- if (activity_led_state)
- activity_led_state = 0;
- else
- activity_led_state = 1;
-
- /* clear old state */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- /* toggle state */
- led_message |= (activity_led_state << 16);
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
- } else {
- /* switch to idle */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
- if (emp->blink_policy == BLINK_OFF)
- led_message |= (1 << 16);
- }
- spin_unlock_irqrestore(ap->lock, flags);
- ahci_transmit_led_message(ap, led_message, 4);
-}
-
-static void ahci_init_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* init activity stats, setup timer */
- emp->saved_activity = emp->activity = 0;
- setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
-
- /* check our blink policy and set flag for link if it's enabled */
- if (emp->blink_policy)
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
-}
-
-static int ahci_reset_em(struct ata_host *host)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
-
- em_ctl = readl(mmio + HOST_EM_CTL);
- if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
- return -EINVAL;
-
- writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
- return 0;
-}
-
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
- u32 message[] = {0, 0};
- unsigned long flags;
- int pmp;
- struct ahci_em_priv *emp;
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- spin_lock_irqsave(ap->lock, flags);
-
- /*
- * if we are still busy transmitting a previous message,
- * do not allow
- */
- em_ctl = readl(mmio + HOST_EM_CTL);
- if (em_ctl & EM_CTL_TM) {
- spin_unlock_irqrestore(ap->lock, flags);
- return -EBUSY;
- }
-
- /*
- * create message header - this is all zero except for
- * the message size, which is 4 bytes.
- */
- message[0] |= (4 << 8);
-
- /* ignore 0:4 of byte zero, fill in port info yourself */
- message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
-
- /* write message to EM_LOC */
- writel(message[0], mmio + hpriv->em_loc);
- writel(message[1], mmio + hpriv->em_loc+4);
-
- /* save off new led state for port/slot */
- emp->led_state = state;
-
- /*
- * tell hardware to transmit the message
- */
- writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
-
- spin_unlock_irqrestore(ap->lock, flags);
- return size;
-}
-
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- int rc = 0;
-
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
- rc += sprintf(buf, "%lx\n", emp->led_state);
- }
- return rc;
-}
-
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size)
-{
- int state;
- int pmp;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp;
-
- state = simple_strtoul(buf, NULL, 0);
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- /* mask off the activity bits if we are in sw_activity
- * mode, user should turn off sw_activity before setting
- * activity led through em_message
- */
- if (emp->blink_policy)
- state &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- return ahci_transmit_led_message(ap, state, size);
-}
-
-static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- u32 port_led_state = emp->led_state;
-
- /* save the desired Activity LED behavior */
- if (val == OFF) {
- /* clear LFLAG */
- link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
-
- /* set the LED to OFF */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- ahci_transmit_led_message(ap, port_led_state, 4);
- } else {
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
- if (val == BLINK_OFF) {
- /* set LED to ON for idle */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
- ahci_transmit_led_message(ap, port_led_state, 4);
- }
- }
- emp->blink_policy = val;
- return 0;
-}
-
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* display the saved value of activity behavior for this
- * disk.
- */
- return sprintf(buf, "%d\n", emp->blink_policy);
-}
-
-static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
- int port_no, void __iomem *mmio,
- void __iomem *port_mmio)
-{
- const char *emsg = NULL;
- int rc;
- u32 tmp;
-
- /* make sure port is not active */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- dev_printk(KERN_WARNING, &pdev->dev,
- "%s (%d)\n", emsg, rc);
-
- /* clear SError */
- tmp = readl(port_mmio + PORT_SCR_ERR);
- VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
- writel(tmp, port_mmio + PORT_SCR_ERR);
-
- /* clear port IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
- if (tmp)
- writel(tmp, port_mmio + PORT_IRQ_STAT);
-
- writel(1 << port_no, mmio + HOST_IRQ_STAT);
-}
-
-static void ahci_init_controller(struct ata_host *host)
+static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- int i;
void __iomem *port_mmio;
u32 tmp;
int mv;
@@ -1678,220 +468,7 @@ static void ahci_init_controller(struct ata_host *host)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- port_mmio = ahci_port_base(ap);
- if (ata_port_is_dummy(ap))
- continue;
-
- ahci_port_init(pdev, ap, i, mmio, port_mmio);
- }
-
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
- writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
-}
-
-static void ahci_dev_config(struct ata_device *dev)
-{
- struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
-
- if (hpriv->flags & AHCI_HFLAG_SECT255) {
- dev->max_sectors = 255;
- ata_dev_printk(dev, KERN_INFO,
- "SB600 AHCI: limiting to 255 sectors per cmd\n");
- }
-}
-
-static unsigned int ahci_dev_classify(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_taskfile tf;
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_SIG);
- tf.lbah = (tmp >> 24) & 0xff;
- tf.lbam = (tmp >> 16) & 0xff;
- tf.lbal = (tmp >> 8) & 0xff;
- tf.nsect = (tmp) & 0xff;
-
- return ata_dev_classify(&tf);
-}
-
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts)
-{
- dma_addr_t cmd_tbl_dma;
-
- cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
-
- pp->cmd_slot[tag].opts = cpu_to_le32(opts);
- pp->cmd_slot[tag].status = 0;
- pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
- pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
-}
-
-static int ahci_kick_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
- u32 tmp;
- int busy, rc;
-
- /* stop engine */
- rc = ahci_stop_engine(ap);
- if (rc)
- goto out_restart;
-
- /* need to do CLO?
- * always do CLO if PMP is attached (AHCI-1.3 9.2)
- */
- busy = status & (ATA_BUSY | ATA_DRQ);
- if (!busy && !sata_pmp_attached(ap)) {
- rc = 0;
- goto out_restart;
- }
-
- if (!(hpriv->cap & HOST_CAP_CLO)) {
- rc = -EOPNOTSUPP;
- goto out_restart;
- }
-
- /* perform CLO */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_CLO;
- writel(tmp, port_mmio + PORT_CMD);
-
- rc = 0;
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
- if (tmp & PORT_CMD_CLO)
- rc = -EIO;
-
- /* restart engine */
- out_restart:
- ahci_start_engine(ap);
- return rc;
-}
-
-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
- struct ata_taskfile *tf, int is_cmd, u16 flags,
- unsigned long timeout_msec)
-{
- const u32 cmd_fis_len = 5; /* five dwords */
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u8 *fis = pp->cmd_tbl;
- u32 tmp;
-
- /* prep the command */
- ata_tf_to_fis(tf, pmp, is_cmd, fis);
- ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
-
- /* issue & wait */
- writel(1, port_mmio + PORT_CMD_ISSUE);
-
- if (timeout_msec) {
- tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
- 1, timeout_msec);
- if (tmp & 0x1) {
- ahci_kick_engine(ap);
- return -EBUSY;
- }
- } else
- readl(port_mmio + PORT_CMD_ISSUE); /* flush */
-
- return 0;
-}
-
-static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
- int pmp, unsigned long deadline,
- int (*check_ready)(struct ata_link *link))
-{
- struct ata_port *ap = link->ap;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- const char *reason = NULL;
- unsigned long now, msecs;
- struct ata_taskfile tf;
- int rc;
-
- DPRINTK("ENTER\n");
-
- /* prepare for SRST (AHCI-1.1 10.4.1) */
- rc = ahci_kick_engine(ap);
- if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING,
- "failed to reset engine (errno=%d)\n", rc);
-
- ata_tf_init(link->device, &tf);
-
- /* issue the first D2H Register FIS */
- msecs = 0;
- now = jiffies;
- if (time_after(now, deadline))
- msecs = jiffies_to_msecs(deadline - now);
-
- tf.ctl |= ATA_SRST;
- if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
- AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
- rc = -EIO;
- reason = "1st FIS failed";
- goto fail;
- }
-
- /* spec says at least 5us, but be generous and sleep for 1ms */
- msleep(1);
-
- /* issue the second D2H Register FIS */
- tf.ctl &= ~ATA_SRST;
- ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
-
- /* wait for link to become ready */
- rc = ata_wait_after_reset(link, deadline, check_ready);
- if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
- /*
- * Workaround for cases where link online status can't
- * be trusted. Treat device readiness timeout as link
- * offline.
- */
- ata_link_printk(link, KERN_INFO,
- "device not ready, treating as offline\n");
- *class = ATA_DEV_NONE;
- } else if (rc) {
- /* link occupied, -ENODEV too is an error */
- reason = "device not ready";
- goto fail;
- } else
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, class=%u\n", *class);
- return 0;
-
- fail:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
- return rc;
-}
-
-static int ahci_check_ready(struct ata_link *link)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
-
- return ata_check_ready(status);
-}
-
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- int pmp = sata_srst_pmp(link);
-
- DPRINTK("ENTER\n");
-
- return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+ ahci_init_controller(host);
}
static int ahci_sb600_check_ready(struct ata_link *link)
@@ -1943,38 +520,6 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
- struct ata_taskfile tf;
- bool online;
- int rc;
-
- DPRINTK("ENTER\n");
-
- ahci_stop_engine(ap);
-
- /* clear D2H reception area to properly wait for D2H FIS */
- ata_tf_init(link->device, &tf);
- tf.command = 0x80;
- ata_tf_to_fis(&tf, 0, 0, d2h_fis);
-
- rc = sata_link_hardreset(link, timing, deadline, &online,
- ahci_check_ready);
-
- ahci_start_engine(ap);
-
- if (online)
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
- return rc;
-}
-
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -2043,605 +588,12 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static void ahci_postreset(struct ata_link *link, unsigned int *class)
-{
- struct ata_port *ap = link->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 new_tmp, tmp;
-
- ata_std_postreset(link, class);
-
- /* Make sure port's ATAPI bit is set appropriately */
- new_tmp = tmp = readl(port_mmio + PORT_CMD);
- if (*class == ATA_DEV_ATAPI)
- new_tmp |= PORT_CMD_ATAPI;
- else
- new_tmp &= ~PORT_CMD_ATAPI;
- if (new_tmp != tmp) {
- writel(new_tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
- }
-}
-
-static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
-{
- struct scatterlist *sg;
- struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
- unsigned int si;
-
- VPRINTK("ENTER\n");
-
- /*
- * Next, the S/G list.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- dma_addr_t addr = sg_dma_address(sg);
- u32 sg_len = sg_dma_len(sg);
-
- ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
- ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
- ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
- }
-
- return si;
-}
-
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
-
- if (!sata_pmp_attached(ap) || pp->fbs_enabled)
- return ata_std_qc_defer(qc);
- else
- return sata_pmp_qc_defer_cmd_switch(qc);
-}
-
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
- int is_atapi = ata_is_atapi(qc->tf.protocol);
- void *cmd_tbl;
- u32 opts;
- const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
-
- /*
- * Fill in command table information. First, the header,
- * a SATA Register - Host to Device command FIS.
- */
- cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
-
- ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
- if (is_atapi) {
- memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
- memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
- }
-
- n_elem = 0;
- if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = ahci_fill_sg(qc, cmd_tbl);
-
- /*
- * Fill in command slot information.
- */
- opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
- if (qc->tf.flags & ATA_TFLAG_WRITE)
- opts |= AHCI_CMD_WRITE;
- if (is_atapi)
- opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
-
- ahci_fill_cmd_slot(pp, qc->tag, opts);
-}
-
-static void ahci_fbs_dec_intr(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int retries = 3;
-
- DPRINTK("ENTER\n");
- BUG_ON(!pp->fbs_enabled);
-
- /* time to wait for DEC is not specified by AHCI spec,
- * add a retry loop for safety.
- */
- writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- while ((fbs & PORT_FBS_DEC) && retries--) {
- udelay(1);
- fbs = readl(port_mmio + PORT_FBS);
- }
-
- if (fbs & PORT_FBS_DEC)
- dev_printk(KERN_ERR, ap->host->dev,
- "failed to clear device error\n");
-}
-
-static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_eh_info *host_ehi = &ap->link.eh_info;
- struct ata_link *link = NULL;
- struct ata_queued_cmd *active_qc;
- struct ata_eh_info *active_ehi;
- bool fbs_need_dec = false;
- u32 serror;
-
- /* determine active link with error */
- if (pp->fbs_enabled) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int pmp = fbs >> PORT_FBS_DWE_OFFSET;
-
- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
- ata_link_online(&ap->pmp_link[pmp])) {
- link = &ap->pmp_link[pmp];
- fbs_need_dec = true;
- }
-
- } else
- ata_for_each_link(link, ap, EDGE)
- if (ata_link_active(link))
- break;
-
- if (!link)
- link = &ap->link;
-
- active_qc = ata_qc_from_tag(ap, link->active_tag);
- active_ehi = &link->eh_info;
-
- /* record irq stat */
- ata_ehi_clear_desc(host_ehi);
- ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
-
- /* AHCI needs SError cleared; otherwise, it might lock up */
- ahci_scr_read(&ap->link, SCR_ERROR, &serror);
- ahci_scr_write(&ap->link, SCR_ERROR, serror);
- host_ehi->serror |= serror;
-
- /* some controllers set IRQ_IF_ERR on device errors, ignore it */
- if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
- irq_stat &= ~PORT_IRQ_IF_ERR;
-
- if (irq_stat & PORT_IRQ_TF_ERR) {
- /* If qc is active, charge it; otherwise, the active
- * link. There's no active qc on NCQ errors. It will
- * be determined by EH by reading log page 10h.
- */
- if (active_qc)
- active_qc->err_mask |= AC_ERR_DEV;
- else
- active_ehi->err_mask |= AC_ERR_DEV;
-
- if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
- host_ehi->serror &= ~SERR_INTERNAL;
- }
-
- if (irq_stat & PORT_IRQ_UNK_FIS) {
- u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
-
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi,
- "unknown FIS %08x %08x %08x %08x" ,
- unk[0], unk[1], unk[2], unk[3]);
- }
-
- if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi, "incorrect PMP");
- }
-
- if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
- host_ehi->err_mask |= AC_ERR_HOST_BUS;
- host_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(host_ehi, "host bus error");
- }
-
- if (irq_stat & PORT_IRQ_IF_ERR) {
- if (fbs_need_dec)
- active_ehi->err_mask |= AC_ERR_DEV;
- else {
- host_ehi->err_mask |= AC_ERR_ATA_BUS;
- host_ehi->action |= ATA_EH_RESET;
- }
-
- ata_ehi_push_desc(host_ehi, "interface fatal error");
- }
-
- if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
- ata_ehi_hotplugged(host_ehi);
- ata_ehi_push_desc(host_ehi, "%s",
- irq_stat & PORT_IRQ_CONNECT ?
- "connection status changed" : "PHY RDY changed");
- }
-
- /* okay, let's hand over to EH */
-
- if (irq_stat & PORT_IRQ_FREEZE)
- ata_port_freeze(ap);
- else if (fbs_need_dec) {
- ata_link_abort(link);
- ahci_fbs_dec_intr(ap);
- } else
- ata_port_abort(ap);
-}
-
-static void ahci_port_intr(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_eh_info *ehi = &ap->link.eh_info;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
- u32 status, qc_active = 0;
- int rc;
-
- status = readl(port_mmio + PORT_IRQ_STAT);
- writel(status, port_mmio + PORT_IRQ_STAT);
-
- /* ignore BAD_PMP while resetting */
- if (unlikely(resetting))
- status &= ~PORT_IRQ_BAD_PMP;
-
- /* If we are getting PhyRdy, this is
- * just a power state change, we should
- * clear out this, plus the PhyRdy/Comm
- * Wake bits from Serror
- */
- if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
- (status & PORT_IRQ_PHYRDY)) {
- status &= ~PORT_IRQ_PHYRDY;
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
- }
-
- if (unlikely(status & PORT_IRQ_ERROR)) {
- ahci_error_intr(ap, status);
- return;
- }
-
- if (status & PORT_IRQ_SDB_FIS) {
- /* If SNotification is available, leave notification
- * handling to sata_async_notification(). If not,
- * emulate it by snooping SDB FIS RX area.
- *
- * Snooping FIS RX area is probably cheaper than
- * poking SNotification but some constrollers which
- * implement SNotification, ICH9 for example, don't
- * store AN SDB FIS into receive area.
- */
- if (hpriv->cap & HOST_CAP_SNTF)
- sata_async_notification(ap);
- else {
- /* If the 'N' bit in word 0 of the FIS is set,
- * we just received asynchronous notification.
- * Tell libata about it.
- *
- * Lack of SNotification should not appear in
- * ahci 1.2, so the workaround is unnecessary
- * when FBS is enabled.
- */
- if (pp->fbs_enabled)
- WARN_ON_ONCE(1);
- else {
- const __le32 *f = pp->rx_fis + RX_FIS_SDB;
- u32 f0 = le32_to_cpu(f[0]);
- if (f0 & (1 << 15))
- sata_async_notification(ap);
- }
- }
- }
-
- /* pp->active_link is not reliable once FBS is enabled, both
- * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
- * NCQ and non-NCQ commands may be in flight at the same time.
- */
- if (pp->fbs_enabled) {
- if (ap->qc_active) {
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
- }
- } else {
- /* pp->active_link is valid iff any command is in flight */
- if (ap->qc_active && pp->active_link->sactive)
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- else
- qc_active = readl(port_mmio + PORT_CMD_ISSUE);
- }
-
- rc = ata_qc_complete_multiple(ap, qc_active);
-
- /* while resetting, invalid completions are expected */
- if (unlikely(rc < 0 && !resetting)) {
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- ata_port_freeze(ap);
- }
-}
-
-static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct ahci_host_priv *hpriv;
- unsigned int i, handled = 0;
- void __iomem *mmio;
- u32 irq_stat, irq_masked;
-
- VPRINTK("ENTER\n");
-
- hpriv = host->private_data;
- mmio = host->iomap[AHCI_PCI_BAR];
-
- /* sigh. 0xffffffff is a valid return from h/w */
- irq_stat = readl(mmio + HOST_IRQ_STAT);
- if (!irq_stat)
- return IRQ_NONE;
-
- irq_masked = irq_stat & hpriv->port_map;
-
- spin_lock(&host->lock);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- if (!(irq_masked & (1 << i)))
- continue;
-
- ap = host->ports[i];
- if (ap) {
- ahci_port_intr(ap);
- VPRINTK("port %u\n", i);
- } else {
- VPRINTK("port %u (no irq)\n", i);
- if (ata_ratelimit())
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port %u\n", i);
- }
-
- handled = 1;
- }
-
- /* HOST_IRQ_STAT behaves as level triggered latch meaning that
- * it should be cleared after all the port events are cleared;
- * otherwise, it will raise a spurious interrupt after each
- * valid one. Please read section 10.6.2 of ahci 1.1 for more
- * information.
- *
- * Also, use the unmasked value to clear interrupt as spurious
- * pending event on a dummy port might cause screaming IRQ.
- */
- writel(irq_stat, mmio + HOST_IRQ_STAT);
-
- spin_unlock(&host->lock);
-
- VPRINTK("EXIT\n");
-
- return IRQ_RETVAL(handled);
-}
-
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
-
- /* Keep track of the currently active link. It will be used
- * in completion path to determine whether NCQ phase is in
- * progress.
- */
- pp->active_link = qc->dev->link;
-
- if (qc->tf.protocol == ATA_PROT_NCQ)
- writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
-
- if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
- u32 fbs = readl(port_mmio + PORT_FBS);
- fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
- fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
- writel(fbs, port_mmio + PORT_FBS);
- pp->fbs_last_dev = qc->dev->link->pmp;
- }
-
- writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
-
- ahci_sw_activity(qc->dev->link);
-
- return 0;
-}
-
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
-{
- struct ahci_port_priv *pp = qc->ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
-
- if (pp->fbs_enabled)
- d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
-
- ata_tf_from_fis(d2h_fis, &qc->result_tf);
- return true;
-}
-
-static void ahci_freeze(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
-
- /* turn IRQ off */
- writel(0, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_thaw(struct ata_port *ap)
-{
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* clear IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- writel(tmp, port_mmio + PORT_IRQ_STAT);
- writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
-
- /* turn IRQ back on */
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_error_handler(struct ata_port *ap)
-{
- if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
- /* restart engine */
- ahci_stop_engine(ap);
- ahci_start_engine(ap);
- }
-
- sata_pmp_error_handler(ap);
-}
-
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
-
- /* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
- ahci_kick_engine(ap);
-}
-
-static void ahci_enable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- } else
- dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
-
- ahci_start_engine(ap);
-}
-
-static void ahci_disable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if ((fbs & PORT_FBS_EN) == 0) {
- pp->fbs_enabled = false;
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN)
- dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
- else {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
- pp->fbs_enabled = false;
- }
-
- ahci_start_engine(ap);
-}
-
-static void ahci_pmp_attach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd |= PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- ahci_enable_fbs(ap);
-
- pp->intr_mask |= PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_pmp_detach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- ahci_disable_fbs(ap);
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd &= ~PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static int ahci_port_resume(struct ata_port *ap)
-{
- ahci_power_up(ap);
- ahci_start_port(ap);
-
- if (sata_pmp_attached(ap))
- ahci_pmp_attach(ap);
- else
- ahci_pmp_detach(ap);
-
- return 0;
-}
-
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
-{
- const char *emsg = NULL;
- int rc;
-
- rc = ahci_deinit_port(ap, &emsg);
- if (rc == 0)
- ahci_power_down(ap);
- else {
- ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
- ahci_start_port(ap);
- }
-
- return rc;
-}
-
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (mesg.event & PM_EVENT_SUSPEND &&
@@ -2675,11 +627,11 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
+ ahci_pci_init_controller(host);
}
ata_host_resume(host);
@@ -2688,92 +640,6 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
}
#endif
-static int ahci_port_start(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct device *dev = ap->host->dev;
- struct ahci_port_priv *pp;
- void *mem;
- dma_addr_t mem_dma;
- size_t dma_sz, rx_fis_sz;
-
- pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
- if (!pp)
- return -ENOMEM;
-
- /* check FBS capability */
- if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd = readl(port_mmio + PORT_CMD);
- if (cmd & PORT_CMD_FBSCP)
- pp->fbs_supported = true;
- else
- dev_printk(KERN_WARNING, dev,
- "The port is not capable of FBS\n");
- }
-
- if (pp->fbs_supported) {
- dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ * 16;
- } else {
- dma_sz = AHCI_PORT_PRIV_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ;
- }
-
- mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
- memset(mem, 0, dma_sz);
-
- /*
- * First item in chunk of DMA memory: 32-slot command table,
- * 32 bytes each in size
- */
- pp->cmd_slot = mem;
- pp->cmd_slot_dma = mem_dma;
-
- mem += AHCI_CMD_SLOT_SZ;
- mem_dma += AHCI_CMD_SLOT_SZ;
-
- /*
- * Second item: Received-FIS area
- */
- pp->rx_fis = mem;
- pp->rx_fis_dma = mem_dma;
-
- mem += rx_fis_sz;
- mem_dma += rx_fis_sz;
-
- /*
- * Third item: data area for storing a single command
- * and its scatter-gather table
- */
- pp->cmd_tbl = mem;
- pp->cmd_tbl_dma = mem_dma;
-
- /*
- * Save off initial list of interrupts to be enabled.
- * This could be changed later
- */
- pp->intr_mask = DEF_PORT_IRQ;
-
- ap->private_data = pp;
-
- /* engage engines, captain */
- return ahci_port_resume(ap);
-}
-
-static void ahci_port_stop(struct ata_port *ap)
-{
- const char *emsg = NULL;
- int rc;
-
- /* de-initialize port */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
-}
-
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
@@ -2806,31 +672,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
return 0;
}
-static void ahci_print_info(struct ata_host *host)
+static void ahci_pci_print_info(struct ata_host *host)
{
- struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 vers, cap, cap2, impl, speed;
- const char *speed_s;
u16 cc;
const char *scc_s;
- vers = readl(mmio + HOST_VERSION);
- cap = hpriv->cap;
- cap2 = hpriv->cap2;
- impl = hpriv->port_map;
-
- speed = (cap >> 20) & 0xf;
- if (speed == 1)
- speed_s = "1.5";
- else if (speed == 2)
- speed_s = "3";
- else if (speed == 3)
- speed_s = "6";
- else
- speed_s = "?";
-
pci_read_config_word(pdev, 0x0a, &cc);
if (cc == PCI_CLASS_STORAGE_IDE)
scc_s = "IDE";
@@ -2841,50 +688,7 @@ static void ahci_print_info(struct ata_host *host)
else
scc_s = "unknown";
- dev_printk(KERN_INFO, &pdev->dev,
- "AHCI %02x%02x.%02x%02x "
- "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
- ,
-
- (vers >> 24) & 0xff,
- (vers >> 16) & 0xff,
- (vers >> 8) & 0xff,
- vers & 0xff,
-
- ((cap >> 8) & 0x1f) + 1,
- (cap & 0x1f) + 1,
- speed_s,
- impl,
- scc_s);
-
- dev_printk(KERN_INFO, &pdev->dev,
- "flags: "
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s\n"
- ,
-
- cap & HOST_CAP_64 ? "64bit " : "",
- cap & HOST_CAP_NCQ ? "ncq " : "",
- cap & HOST_CAP_SNTF ? "sntf " : "",
- cap & HOST_CAP_MPS ? "ilck " : "",
- cap & HOST_CAP_SSS ? "stag " : "",
- cap & HOST_CAP_ALPM ? "pm " : "",
- cap & HOST_CAP_LED ? "led " : "",
- cap & HOST_CAP_CLO ? "clo " : "",
- cap & HOST_CAP_ONLY ? "only " : "",
- cap & HOST_CAP_PMP ? "pmp " : "",
- cap & HOST_CAP_FBS ? "fbs " : "",
- cap & HOST_CAP_PIO_MULTI ? "pio " : "",
- cap & HOST_CAP_SSC ? "slum " : "",
- cap & HOST_CAP_PART ? "part " : "",
- cap & HOST_CAP_CCC ? "ccc " : "",
- cap & HOST_CAP_EMS ? "ems " : "",
- cap & HOST_CAP_SXS ? "sxs " : "",
- cap2 & HOST_CAP2_APST ? "apst " : "",
- cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
- cap2 & HOST_CAP2_BOH ? "boh " : ""
- );
+ ahci_print_info(host, scc_s);
}
/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
@@ -3308,41 +1112,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
+ hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+
/* save initial config */
- ahci_save_initial_config(pdev, hpriv);
+ ahci_pci_save_initial_config(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
- /* Auto-activate optimization is supposed to be supported on
- all AHCI controllers indicating NCQ support, but it seems
- to be broken at least on some NVIDIA MCP79 chipsets.
- Until we get info on which NVIDIA chipsets don't have this
- issue, if any, disable AA on all NVIDIA AHCIs. */
- if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ /*
+ * Auto-activate optimization is supposed to be
+ * supported on all AHCI controllers indicating NCQ
+ * capability, but it seems to be broken on some
+ * chipsets including NVIDIAs.
+ */
+ if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
pi.flags |= ATA_FLAG_FPDMA_AA;
}
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
- if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
- u8 messages;
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 em_loc = readl(mmio + HOST_EM_LOC);
- u32 em_ctl = readl(mmio + HOST_EM_CTL);
-
- messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
-
- /* we only support LED message type right now */
- if ((messages & 0x01) && (ahci_em_messages == 1)) {
- /* store em_loc */
- hpriv->em_loc = ((em_loc >> 16) * 4);
- pi.flags |= ATA_FLAG_EM;
- if (!(em_ctl & EM_CTL_ALHD))
- pi.flags |= ATA_FLAG_SW_ACTIVITY;
- }
- }
+ ahci_set_em_messages(hpriv, &pi);
if (ahci_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
@@ -3372,7 +1163,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
- host->iomap = pcim_iomap_table(pdev);
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
@@ -3395,7 +1185,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
- ap->em_message_type = ahci_em_messages;
+ ap->em_message_type = hpriv->em_msg_type;
/* disabled/not-implemented port */
@@ -3414,12 +1204,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
- ahci_print_info(host);
+ ahci_pci_init_controller(host);
+ ahci_pci_print_info(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
new file mode 100644
index 000000000000..7113c5724471
--- /dev/null
+++ b/drivers/ata/ahci.h
@@ -0,0 +1,343 @@
+/*
+ * ahci.h - Common AHCI SATA definitions and declarations
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#ifndef _AHCI_H
+#define _AHCI_H
+
+#include <linux/libata.h>
+
+/* Enclosure Management Control */
+#define EM_CTRL_MSG_TYPE 0x000f0000
+
+/* Enclosure Management LED Message Type */
+#define EM_MSG_LED_HBA_PORT 0x0000000f
+#define EM_MSG_LED_PMP_SLOT 0x0000ff00
+#define EM_MSG_LED_VALUE 0xffff0000
+#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
+#define EM_MSG_LED_VALUE_OFF 0xfff80000
+#define EM_MSG_LED_VALUE_ON 0x00010000
+
+enum {
+ AHCI_MAX_PORTS = 32,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+ AHCI_CMD_SZ = 32,
+ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_CDB = 0x40,
+ AHCI_CMD_TBL_HDR_SZ = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
+ AHCI_CMD_TBL_AR_SZ +
+ (AHCI_RX_FIS_SZ * 16),
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+ AHCI_CMD_PREFETCH = (1 << 7),
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+ HOST_EM_LOC = 0x1c, /* Enclosure Management location */
+ HOST_EM_CTL = 0x20, /* Enclosure Management Control */
+ HOST_CAP2 = 0x24, /* host capabilities, extended */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
+ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
+ HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
+ HOST_CAP_PART = (1 << 13), /* Partial state capable */
+ HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
+ HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
+ HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
+ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
+ HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
+ HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_LED = (1 << 25), /* Supports activity LED */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
+ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* HOST_CAP2 bits */
+ HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
+ HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
+ HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+ PORT_FBS = 0x40, /* FIS-based Switching */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_IF_ERR |
+ PORT_IRQ_CONNECT |
+ PORT_IRQ_PHYRDY |
+ PORT_IRQ_UNK_FIS |
+ PORT_IRQ_BAD_PMP,
+ PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
+ PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_DATA_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
+ PORT_CMD_PMP = (1 << 17), /* PMP attached */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = (1 << 3), /* Command list override */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+
+ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
+ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
+ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
+ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
+ PORT_FBS_SDE = (1 << 2), /* FBS single device error */
+ PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
+ PORT_FBS_EN = (1 << 0), /* Enable FBS */
+
+ /* hpriv->flags bits */
+ AHCI_HFLAG_NO_NCQ = (1 << 0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
+ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
+ link offline */
+ AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
+ AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+
+ /* ap->flags bits */
+
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
+ ATA_FLAG_IPM,
+
+ ICH_MAP = 0x90, /* ICH MAP register */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
+ /* em_ctl bits */
+ EM_CTL_RST = (1 << 9), /* Reset */
+ EM_CTL_TM = (1 << 8), /* Transmit Message */
+ EM_CTL_MR = (1 << 0), /* Message Recieved */
+ EM_CTL_ALHD = (1 << 26), /* Activity LED */
+ EM_CTL_XMT = (1 << 25), /* Transmit Only */
+ EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
+
+ /* em message type */
+ EM_MSG_TYPE_LED = (1 << 0), /* LED */
+ EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
+ EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
+ EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
+};
+
+struct ahci_cmd_hdr {
+ __le32 opts;
+ __le32 status;
+ __le32 tbl_addr;
+ __le32 tbl_addr_hi;
+ __le32 reserved[4];
+};
+
+struct ahci_sg {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 reserved;
+ __le32 flags_size;
+};
+
+struct ahci_em_priv {
+ enum sw_activity blink_policy;
+ struct timer_list timer;
+ unsigned long saved_activity;
+ unsigned long activity;
+ unsigned long led_state;
+};
+
+struct ahci_port_priv {
+ struct ata_link *active_link;
+ struct ahci_cmd_hdr *cmd_slot;
+ dma_addr_t cmd_slot_dma;
+ void *cmd_tbl;
+ dma_addr_t cmd_tbl_dma;
+ void *rx_fis;
+ dma_addr_t rx_fis_dma;
+ /* for NCQ spurious interrupt analysis */
+ unsigned int ncq_saw_d2h:1;
+ unsigned int ncq_saw_dmas:1;
+ unsigned int ncq_saw_sdb:1;
+ u32 intr_mask; /* interrupts to enable */
+ bool fbs_supported; /* set iff FBS is supported */
+ bool fbs_enabled; /* set iff FBS is enabled */
+ int fbs_last_dev; /* save FBS.DEV of last FIS */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+};
+
+struct ahci_host_priv {
+ void __iomem * mmio; /* bus-independant mem map */
+ unsigned int flags; /* AHCI_HFLAG_* */
+ u32 cap; /* cap to use */
+ u32 cap2; /* cap2 to use */
+ u32 port_map; /* port map to use */
+ u32 saved_cap; /* saved initial cap */
+ u32 saved_cap2; /* saved initial cap2 */
+ u32 saved_port_map; /* saved initial port_map */
+ u32 em_loc; /* enclosure management location */
+ u32 em_buf_sz; /* EM buffer size in byte */
+ u32 em_msg_type; /* EM message type */
+};
+
+extern int ahci_ignore_sss;
+
+extern struct scsi_host_template ahci_sht;
+extern struct ata_port_operations ahci_ops;
+
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map);
+void ahci_init_controller(struct ata_host *host);
+int ahci_reset_controller(struct ata_host *host);
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link));
+
+int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_engine(struct ata_port *ap);
+int ahci_check_ready(struct ata_link *link);
+int ahci_kick_engine(struct ata_port *ap);
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi);
+int ahci_reset_em(struct ata_host *host);
+irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+void ahci_print_info(struct ata_host *host, const char *scc_s);
+
+static inline void __iomem *__ahci_port_base(struct ata_host *host,
+ unsigned int port_no)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return mmio + 0x100 + (port_no * 0x80);
+}
+
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
+{
+ return __ahci_port_base(ap->host, ap->port_no);
+}
+
+static inline int ahci_nr_ports(u32 cap)
+{
+ return (cap & 0x1f) + 1;
+}
+
+#endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
new file mode 100644
index 000000000000..5e11b160f247
--- /dev/null
+++ b/drivers/ata/ahci_platform.c
@@ -0,0 +1,192 @@
+/*
+ * AHCI SATA platform driver
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+static int __init ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_port_info pi = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ };
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ struct resource *mem;
+ int irq;
+ int n_ports;
+ int i;
+ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(dev, "no mmio space\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no irq\n");
+ return -EINVAL;
+ }
+
+ if (pdata && pdata->init) {
+ rc = pdata->init(dev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdata && pdata->ata_port_info)
+ pi = *pdata->ata_port_info;
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+ if (!hpriv->mmio) {
+ dev_err(dev, "can't map %pR\n", mem);
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ ahci_save_initial_config(dev, hpriv,
+ pdata ? pdata->force_port_map : 0,
+ pdata ? pdata->mask_port_map : 0);
+
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ ahci_set_em_messages(hpriv, &pi);
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+ if (!host) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ host->private_data = hpriv;
+
+ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+ else
+ printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+ if (pi.flags & ATA_FLAG_EM)
+ ahci_reset_em(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_desc(ap, "mmio %pR", mem);
+ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+ /* set initial link pm policy */
+ ap->pm_policy = NOT_AVAILABLE;
+
+ /* set enclosure management message type */
+ if (ap->flags & ATA_FLAG_EM)
+ ap->em_message_type = hpriv->em_msg_type;
+
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ goto err0;
+
+ ahci_init_controller(host);
+ ahci_print_info(host, "platform");
+
+ rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+ &ahci_sht);
+ if (rc)
+ goto err0;
+
+ return 0;
+err0:
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+ return rc;
+}
+
+static int __devexit ahci_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+
+ return 0;
+}
+
+static struct platform_driver ahci_driver = {
+ .probe = ahci_probe,
+ .remove = __devexit_p(ahci_remove),
+ .driver = {
+ .name = "ahci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ahci_init(void)
+{
+ return platform_driver_probe(&ahci_driver, ahci_probe);
+}
+module_init(ahci_init);
+
+static void __exit ahci_exit(void)
+{
+ platform_driver_unregister(&ahci_driver);
+}
+module_exit(ahci_exit);
+
+MODULE_DESCRIPTION("AHCI SATA platform driver");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci");
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 33fb614f9784..573158a9668d 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -155,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return rc;
pcim_pin_device(dev);
}
- return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0);
}
static struct pci_device_id ata_generic[] = {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 83bc49fac9bb..7409f98d2ae6 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -43,7 +43,7 @@
* driver the list of errata that are relevant is below, going back to
* PIIX4. Older device documentation is now a bit tricky to find.
*
- * The chipsets all follow very much the same design. The orginal Triton
+ * The chipsets all follow very much the same design. The original Triton
* series chipsets do _not_ support independant device timings, but this
* is fixed in Triton II. With the odd mobile exception the chips then
* change little except in gaining more modes until SATA arrives. This
@@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
hpriv->map = piix_init_sata_map(pdev, port_info,
piix_map_db_table[ent->driver_data]);
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
@@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
+ return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht);
}
static void piix_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
new file mode 100644
index 000000000000..1984a6e89e84
--- /dev/null
+++ b/drivers/ata/libahci.c
@@ -0,0 +1,2216 @@
+/*
+ * libahci.c - Common AHCI SATA low-level routines
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+static int ahci_skip_host_reset;
+int ahci_ignore_sss;
+EXPORT_SYMBOL_GPL(ahci_ignore_sss);
+
+module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
+MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
+
+module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
+MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+static void ahci_disable_alpm(struct ata_port *ap);
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size);
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int ahci_port_start(struct ata_port *ap);
+static void ahci_port_stop(struct ata_port *ap);
+static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
+static void ahci_freeze(struct ata_port *ap);
+static void ahci_thaw(struct ata_port *ap);
+static void ahci_enable_fbs(struct ata_port *ap);
+static void ahci_disable_fbs(struct ata_port *ap);
+static void ahci_pmp_attach(struct ata_port *ap);
+static void ahci_pmp_detach(struct ata_port *ap);
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void ahci_postreset(struct ata_link *link, unsigned int *class);
+static void ahci_error_handler(struct ata_port *ap);
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+static int ahci_port_resume(struct ata_port *ap);
+static void ahci_dev_config(struct ata_device *dev);
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts);
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
+#endif
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
+static ssize_t ahci_activity_store(struct ata_device *dev,
+ enum sw_activity val);
+static void ahci_init_sw_activity(struct ata_link *link);
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_read_em_buffer(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_store_em_buffer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size);
+
+static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
+static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
+ ahci_read_em_buffer, ahci_store_em_buffer);
+
+static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ &dev_attr_em_message_type,
+ &dev_attr_em_message,
+ &dev_attr_ahci_host_caps,
+ &dev_attr_ahci_host_cap2,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
+ &dev_attr_em_buffer,
+ NULL
+};
+
+static struct device_attribute *ahci_sdev_attrs[] = {
+ &dev_attr_sw_activity,
+ &dev_attr_unload_heads,
+ NULL
+};
+
+struct scsi_host_template ahci_sht = {
+ ATA_NCQ_SHT("ahci"),
+ .can_queue = AHCI_MAX_CMDS - 1,
+ .sg_tablesize = AHCI_MAX_SG,
+ .dma_boundary = AHCI_DMA_BOUNDARY,
+ .shost_attrs = ahci_shost_attrs,
+ .sdev_attrs = ahci_sdev_attrs,
+};
+EXPORT_SYMBOL_GPL(ahci_sht);
+
+struct ata_port_operations ahci_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = ahci_pmp_qc_defer,
+ .qc_prep = ahci_qc_prep,
+ .qc_issue = ahci_qc_issue,
+ .qc_fill_rtf = ahci_qc_fill_rtf,
+
+ .freeze = ahci_freeze,
+ .thaw = ahci_thaw,
+ .softreset = ahci_softreset,
+ .hardreset = ahci_hardreset,
+ .postreset = ahci_postreset,
+ .pmp_softreset = ahci_softreset,
+ .error_handler = ahci_error_handler,
+ .post_internal_cmd = ahci_post_internal_cmd,
+ .dev_config = ahci_dev_config,
+
+ .scr_read = ahci_scr_read,
+ .scr_write = ahci_scr_write,
+ .pmp_attach = ahci_pmp_attach,
+ .pmp_detach = ahci_pmp_detach,
+
+ .enable_pm = ahci_enable_alpm,
+ .disable_pm = ahci_disable_alpm,
+ .em_show = ahci_led_show,
+ .em_store = ahci_led_store,
+ .sw_activity_show = ahci_activity_show,
+ .sw_activity_store = ahci_activity_store,
+#ifdef CONFIG_PM
+ .port_suspend = ahci_port_suspend,
+ .port_resume = ahci_port_resume,
+#endif
+ .port_start = ahci_port_start,
+ .port_stop = ahci_port_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_ops);
+
+int ahci_em_messages = 1;
+EXPORT_SYMBOL_GPL(ahci_em_messages);
+module_param(ahci_em_messages, int, 0444);
+/* add other LED protocol types when they become supported */
+MODULE_PARM_DESC(ahci_em_messages,
+ "AHCI Enclosure Management Message control (0 = off, 1 = on)");
+
+static void ahci_enable_ahci(void __iomem *mmio)
+{
+ int i;
+ u32 tmp;
+
+ /* turn on AHCI_EN */
+ tmp = readl(mmio + HOST_CTL);
+ if (tmp & HOST_AHCI_EN)
+ return;
+
+ /* Some controllers need AHCI_EN to be written multiple times.
+ * Try a few times before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ tmp |= HOST_AHCI_EN;
+ writel(tmp, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
+ if (tmp & HOST_AHCI_EN)
+ return;
+ msleep(10);
+ }
+
+ WARN_ON(1);
+}
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap);
+}
+
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap2);
+}
+
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+}
+
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+}
+
+static ssize_t ahci_read_em_buffer(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *em_mmio = mmio + hpriv->em_loc;
+ u32 em_ctl, msg;
+ unsigned long flags;
+ size_t count;
+ int i;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
+ !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EINVAL;
+ }
+
+ if (!(em_ctl & EM_CTL_MR)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EAGAIN;
+ }
+
+ if (!(em_ctl & EM_CTL_SMB))
+ em_mmio += hpriv->em_buf_sz;
+
+ count = hpriv->em_buf_sz;
+
+ /* the count should not be larger than PAGE_SIZE */
+ if (count > PAGE_SIZE) {
+ if (printk_ratelimit())
+ ata_port_printk(ap, KERN_WARNING,
+ "EM read buffer size too large: "
+ "buffer size %u, page size %lu\n",
+ hpriv->em_buf_sz, PAGE_SIZE);
+ count = PAGE_SIZE;
+ }
+
+ for (i = 0; i < count; i += 4) {
+ msg = readl(em_mmio + i);
+ buf[i] = msg & 0xff;
+ buf[i + 1] = (msg >> 8) & 0xff;
+ buf[i + 2] = (msg >> 16) & 0xff;
+ buf[i + 3] = (msg >> 24) & 0xff;
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return i;
+}
+
+static ssize_t ahci_store_em_buffer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *em_mmio = mmio + hpriv->em_loc;
+ u32 em_ctl, msg;
+ unsigned long flags;
+ int i;
+
+ /* check size validity */
+ if (!(ap->flags & ATA_FLAG_EM) ||
+ !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
+ size % 4 || size > hpriv->em_buf_sz)
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < size; i += 4) {
+ msg = buf[i] | buf[i + 1] << 8 |
+ buf[i + 2] << 16 | buf[i + 3] << 24;
+ writel(msg, em_mmio + i);
+ }
+
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return size;
+}
+
+/**
+ * ahci_save_initial_config - Save and fixup initial config values
+ * @dev: target AHCI device
+ * @hpriv: host private area to store config values
+ * @force_port_map: force port map to a specified value
+ * @mask_port_map: mask out particular bits from port map
+ *
+ * Some registers containing configuration info might be setup by
+ * BIOS and might be cleared on reset. This function saves the
+ * initial values of those registers into @hpriv such that they
+ * can be restored after controller reset.
+ *
+ * If inconsistent, config values are fixed up by this function.
+ *
+ * LOCKING:
+ * None.
+ */
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map)
+{
+ void __iomem *mmio = hpriv->mmio;
+ u32 cap, cap2, vers, port_map;
+ int i;
+
+ /* make sure AHCI mode is enabled before accessing CAP */
+ ahci_enable_ahci(mmio);
+
+ /* Values prefixed with saved_ are written back to host after
+ * reset. Values without are used for driver operation.
+ */
+ hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
+ hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+
+ /* CAP2 register is only defined for AHCI 1.2 and later */
+ vers = readl(mmio + HOST_VERSION);
+ if ((vers >> 16) > 1 ||
+ ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
+ hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
+ else
+ hpriv->saved_cap2 = cap2 = 0;
+
+ /* some chips have errata preventing 64bit use */
+ if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do 64bit DMA, forcing 32bit\n");
+ cap &= ~HOST_CAP_64;
+ }
+
+ if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do NCQ, turning off CAP_NCQ\n");
+ cap &= ~HOST_CAP_NCQ;
+ }
+
+ if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can do NCQ, turning on CAP_NCQ\n");
+ cap |= HOST_CAP_NCQ;
+ }
+
+ if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do PMP, turning off CAP_PMP\n");
+ cap &= ~HOST_CAP_PMP;
+ }
+
+ if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do SNTF, turning off CAP_SNTF\n");
+ cap &= ~HOST_CAP_SNTF;
+ }
+
+ if (force_port_map && port_map != force_port_map) {
+ dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
+ port_map = force_port_map;
+ }
+
+ if (mask_port_map) {
+ dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
+ port_map,
+ port_map & mask_port_map);
+ port_map &= mask_port_map;
+ }
+
+ /* cross check port_map and cap.n_ports */
+ if (port_map) {
+ int map_ports = 0;
+
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1 << i))
+ map_ports++;
+
+ /* If PI has more ports than n_ports, whine, clear
+ * port_map and let it be generated from n_ports.
+ */
+ if (map_ports > ahci_nr_ports(cap)) {
+ dev_printk(KERN_WARNING, dev,
+ "implemented port map (0x%x) contains more "
+ "ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
+ port_map = 0;
+ }
+ }
+
+ /* fabricate port_map from cap.nr_ports */
+ if (!port_map) {
+ port_map = (1 << ahci_nr_ports(cap)) - 1;
+ dev_printk(KERN_WARNING, dev,
+ "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+ /* write the fixed up value to the PI register */
+ hpriv->saved_port_map = port_map;
+ }
+
+ /* record values to use during operation */
+ hpriv->cap = cap;
+ hpriv->cap2 = cap2;
+ hpriv->port_map = port_map;
+}
+EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+
+/**
+ * ahci_restore_initial_config - Restore initial config
+ * @host: target ATA host
+ *
+ * Restore initial config stored by ahci_save_initial_config().
+ *
+ * LOCKING:
+ * None.
+ */
+static void ahci_restore_initial_config(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ writel(hpriv->saved_cap, mmio + HOST_CAP);
+ if (hpriv->saved_cap2)
+ writel(hpriv->saved_cap2, mmio + HOST_CAP2);
+ writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
+ (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
+}
+
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
+{
+ static const int offset[] = {
+ [SCR_STATUS] = PORT_SCR_STAT,
+ [SCR_CONTROL] = PORT_SCR_CTL,
+ [SCR_ERROR] = PORT_SCR_ERR,
+ [SCR_ACTIVE] = PORT_SCR_ACT,
+ [SCR_NOTIFICATION] = PORT_SCR_NTF,
+ };
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ if (sc_reg < ARRAY_SIZE(offset) &&
+ (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+ return offset[sc_reg];
+ return 0;
+}
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ *val = readl(port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ writel(val, port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_is_device_present(void __iomem *port_mmio)
+{
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xff;
+
+ /* Make sure PxTFD.STS.BSY and PxTFD.STS.DRQ are 0 */
+ if (status & (ATA_BUSY | ATA_DRQ))
+ return 0;
+
+ /* Make sure PxSSTS.DET is 3h */
+ status = readl(port_mmio + PORT_SCR_STAT) & 0xf;
+ if (status != 3)
+ return 0;
+ return 1;
+}
+
+void ahci_start_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ if (!ahci_is_device_present(port_mmio))
+ return;
+
+ /* start DMA */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+}
+EXPORT_SYMBOL_GPL(ahci_start_engine);
+
+int ahci_stop_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_CMD);
+
+ /* check if the HBA is idle */
+ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+ return 0;
+
+ /* setting HBA to idle */
+ tmp &= ~PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for engine to stop. This could be as long as 500 msec */
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+ if (tmp & PORT_CMD_LIST_ON)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_stop_engine);
+
+static void ahci_start_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 tmp;
+
+ /* set FIS registers */
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->cmd_slot_dma >> 16) >> 16,
+ port_mmio + PORT_LST_ADDR_HI);
+ writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->rx_fis_dma >> 16) >> 16,
+ port_mmio + PORT_FIS_ADDR_HI);
+ writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+
+ /* enable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* flush */
+ readl(port_mmio + PORT_CMD);
+}
+
+static int ahci_stop_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* disable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp &= ~PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for completion, spec says 500ms, give it 1000 */
+ tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
+ PORT_CMD_FIS_ON, 10, 1000);
+ if (tmp & PORT_CMD_FIS_ON)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void ahci_power_up(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+
+ /* spin up device */
+ if (hpriv->cap & HOST_CAP_SSS) {
+ cmd |= PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+ }
+
+ /* wake up link */
+ writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
+}
+
+static void ahci_disable_alpm(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* IPM bits should be disabled by libata-core */
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* disable ALPM and ASP */
+ cmd &= ~PORT_CMD_ASP;
+ cmd &= ~PORT_CMD_ALPE;
+
+ /* force the interface back to active */
+ cmd |= PORT_CMD_ICC_ACTIVE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* wait 10ms to be sure we've come out of any low power state */
+ msleep(10);
+
+ /* clear out any PhyRdy stuff from interrupt status */
+ writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
+
+ /* go ahead and clean out PhyRdy Change from Serror too */
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+
+ /*
+ * Clear flag to indicate that we should ignore all PhyRdy
+ * state changes
+ */
+ hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
+
+ /*
+ * Enable interrupts on Phy Ready.
+ */
+ pp->intr_mask |= PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * don't change the link pm policy - we can be called
+ * just to turn of link pm temporarily
+ */
+}
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 asp;
+
+ /* Make sure the host is capable of link power management */
+ if (!(hpriv->cap & HOST_CAP_ALPM))
+ return -EINVAL;
+
+ switch (policy) {
+ case MAX_PERFORMANCE:
+ case NOT_AVAILABLE:
+ /*
+ * if we came here with NOT_AVAILABLE,
+ * it just means this is the first time we
+ * have tried to enable - default to max performance,
+ * and let the user go to lower power modes on request.
+ */
+ ahci_disable_alpm(ap);
+ return 0;
+ case MIN_POWER:
+ /* configure HBA to enter SLUMBER */
+ asp = PORT_CMD_ASP;
+ break;
+ case MEDIUM_POWER:
+ /* configure HBA to enter PARTIAL */
+ asp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Disable interrupts on Phy Ready. This keeps us from
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
+ */
+ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+ * state changes since these can happen now whenever we
+ * change link state
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
+
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /*
+ * Set ASP based on Policy
+ */
+ cmd |= asp;
+
+ /*
+ * Setting this bit will instruct the HBA to aggressively
+ * enter a lower power link state when it's appropriate and
+ * based on the value set above for ASP
+ */
+ cmd |= PORT_CMD_ALPE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* IPM bits should be set by libata-core */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void ahci_power_down(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd, scontrol;
+
+ if (!(hpriv->cap & HOST_CAP_SSS))
+ return;
+
+ /* put device into listen mode, first set PxSCTL.DET to 0 */
+ scontrol = readl(port_mmio + PORT_SCR_CTL);
+ scontrol &= ~0xf;
+ writel(scontrol, port_mmio + PORT_SCR_CTL);
+
+ /* then set PxCMD.SUD to 0 */
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+ cmd &= ~PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+}
+#endif
+
+static void ahci_start_port(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ ssize_t rc;
+ int i;
+
+ /* enable FIS reception */
+ ahci_start_fis_rx(ap);
+
+ /* enable DMA */
+ ahci_start_engine(ap);
+
+ /* turn on LEDs */
+ if (ap->flags & ATA_FLAG_EM) {
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+
+ /* EM Transmit bit maybe busy during init */
+ for (i = 0; i < EM_MAX_RETRY; i++) {
+ rc = ahci_transmit_led_message(ap,
+ emp->led_state,
+ 4);
+ if (rc == -EBUSY)
+ msleep(1);
+ else
+ break;
+ }
+ }
+ }
+
+ if (ap->flags & ATA_FLAG_SW_ACTIVITY)
+ ata_for_each_link(link, ap, EDGE)
+ ahci_init_sw_activity(link);
+
+}
+
+static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+{
+ int rc;
+
+ /* disable DMA */
+ rc = ahci_stop_engine(ap);
+ if (rc) {
+ *emsg = "failed to stop engine";
+ return rc;
+ }
+
+ /* disable FIS reception */
+ rc = ahci_stop_fis_rx(ap);
+ if (rc) {
+ *emsg = "failed stop FIS RX";
+ return rc;
+ }
+
+ return 0;
+}
+
+int ahci_reset_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 tmp;
+
+ /* we must be in AHCI mode, before using anything
+ * AHCI-specific, such as HOST_RESET.
+ */
+ ahci_enable_ahci(mmio);
+
+ /* global controller reset */
+ if (!ahci_skip_host_reset) {
+ tmp = readl(mmio + HOST_CTL);
+ if ((tmp & HOST_RESET) == 0) {
+ writel(tmp | HOST_RESET, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ /*
+ * to perform host reset, OS should set HOST_RESET
+ * and poll until this bit is read to be "0".
+ * reset must complete within 1 second, or
+ * the hardware should be considered fried.
+ */
+ tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
+ HOST_RESET, 10, 1000);
+
+ if (tmp & HOST_RESET) {
+ dev_printk(KERN_ERR, host->dev,
+ "controller reset failed (0x%x)\n", tmp);
+ return -EIO;
+ }
+
+ /* turn on AHCI mode */
+ ahci_enable_ahci(mmio);
+
+ /* Some registers might be cleared on reset. Restore
+ * initial values.
+ */
+ ahci_restore_initial_config(host);
+ } else
+ dev_printk(KERN_INFO, host->dev,
+ "skipping global host reset\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_controller);
+
+static void ahci_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
+ return;
+
+ emp->activity++;
+ if (!timer_pending(&emp->timer))
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
+}
+
+static void ahci_sw_activity_blink(unsigned long arg)
+{
+ struct ata_link *link = (struct ata_link *)arg;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ unsigned long led_message = emp->led_state;
+ u32 activity_led_state;
+ unsigned long flags;
+
+ led_message &= EM_MSG_LED_VALUE;
+ led_message |= ap->port_no | (link->pmp << 8);
+
+ /* check to see if we've had activity. If so,
+ * toggle state of LED and reset timer. If not,
+ * turn LED to desired idle state.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+ if (emp->saved_activity != emp->activity) {
+ emp->saved_activity = emp->activity;
+ /* get the current LED state */
+ activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
+
+ if (activity_led_state)
+ activity_led_state = 0;
+ else
+ activity_led_state = 1;
+
+ /* clear old state */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ /* toggle state */
+ led_message |= (activity_led_state << 16);
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
+ } else {
+ /* switch to idle */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+ if (emp->blink_policy == BLINK_OFF)
+ led_message |= (1 << 16);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+ ahci_transmit_led_message(ap, led_message, 4);
+}
+
+static void ahci_init_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* init activity stats, setup timer */
+ emp->saved_activity = emp->activity = 0;
+ setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
+
+ /* check our blink policy and set flag for link if it's enabled */
+ if (emp->blink_policy)
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+}
+
+int ahci_reset_em(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
+ return -EINVAL;
+
+ writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_em);
+
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+ u32 message[] = {0, 0};
+ unsigned long flags;
+ int pmp;
+ struct ahci_em_priv *emp;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * if we are still busy transmitting a previous message,
+ * do not allow
+ */
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EBUSY;
+ }
+
+ if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
+ /*
+ * create message header - this is all zero except for
+ * the message size, which is 4 bytes.
+ */
+ message[0] |= (4 << 8);
+
+ /* ignore 0:4 of byte zero, fill in port info yourself */
+ message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
+
+ /* write message to EM_LOC */
+ writel(message[0], mmio + hpriv->em_loc);
+ writel(message[1], mmio + hpriv->em_loc+4);
+
+ /*
+ * tell hardware to transmit the message
+ */
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+ }
+
+ /* save off new led state for port/slot */
+ emp->led_state = state;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ return size;
+}
+
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ int rc = 0;
+
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+ rc += sprintf(buf, "%lx\n", emp->led_state);
+ }
+ return rc;
+}
+
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size)
+{
+ int state;
+ int pmp;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp;
+
+ state = simple_strtoul(buf, NULL, 0);
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ /* mask off the activity bits if we are in sw_activity
+ * mode, user should turn off sw_activity before setting
+ * activity led through em_message
+ */
+ if (emp->blink_policy)
+ state &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ return ahci_transmit_led_message(ap, state, size);
+}
+
+static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ u32 port_led_state = emp->led_state;
+
+ /* save the desired Activity LED behavior */
+ if (val == OFF) {
+ /* clear LFLAG */
+ link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
+
+ /* set the LED to OFF */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ } else {
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+ if (val == BLINK_OFF) {
+ /* set LED to ON for idle */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ }
+ }
+ emp->blink_policy = val;
+ return 0;
+}
+
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* display the saved value of activity behavior for this
+ * disk.
+ */
+ return sprintf(buf, "%d\n", emp->blink_policy);
+}
+
+static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ int port_no, void __iomem *mmio,
+ void __iomem *port_mmio)
+{
+ const char *emsg = NULL;
+ int rc;
+ u32 tmp;
+
+ /* make sure port is not active */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ dev_warn(dev, "%s (%d)\n", emsg, rc);
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << port_no, mmio + HOST_IRQ_STAT);
+}
+
+void ahci_init_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ int i;
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ port_mmio = ahci_port_base(ap);
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ahci_port_init(host->dev, ap, i, mmio, port_mmio);
+ }
+
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+ writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+}
+EXPORT_SYMBOL_GPL(ahci_init_controller);
+
+static void ahci_dev_config(struct ata_device *dev)
+{
+ struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+
+ if (hpriv->flags & AHCI_HFLAG_SECT255) {
+ dev->max_sectors = 255;
+ ata_dev_printk(dev, KERN_INFO,
+ "SB600 AHCI: limiting to 255 sectors per cmd\n");
+ }
+}
+
+static unsigned int ahci_dev_classify(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_taskfile tf;
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_SIG);
+ tf.lbah = (tmp >> 24) & 0xff;
+ tf.lbam = (tmp >> 16) & 0xff;
+ tf.lbal = (tmp >> 8) & 0xff;
+ tf.nsect = (tmp) & 0xff;
+
+ return ata_dev_classify(&tf);
+}
+
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts)
+{
+ dma_addr_t cmd_tbl_dma;
+
+ cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
+
+ pp->cmd_slot[tag].opts = cpu_to_le32(opts);
+ pp->cmd_slot[tag].status = 0;
+ pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
+ pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
+}
+
+int ahci_kick_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 tmp;
+ int busy, rc;
+
+ /* stop engine */
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ goto out_restart;
+
+ /* need to do CLO?
+ * always do CLO if PMP is attached (AHCI-1.3 9.2)
+ */
+ busy = status & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !sata_pmp_attached(ap)) {
+ rc = 0;
+ goto out_restart;
+ }
+
+ if (!(hpriv->cap & HOST_CAP_CLO)) {
+ rc = -EOPNOTSUPP;
+ goto out_restart;
+ }
+
+ /* perform CLO */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_CLO;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ rc = 0;
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
+ if (tmp & PORT_CMD_CLO)
+ rc = -EIO;
+
+ /* restart engine */
+ out_restart:
+ ahci_start_engine(ap);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_kick_engine);
+
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ struct ata_taskfile *tf, int is_cmd, u16 flags,
+ unsigned long timeout_msec)
+{
+ const u32 cmd_fis_len = 5; /* five dwords */
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u8 *fis = pp->cmd_tbl;
+ u32 tmp;
+
+ /* prep the command */
+ ata_tf_to_fis(tf, pmp, is_cmd, fis);
+ ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+ /* issue & wait */
+ writel(1, port_mmio + PORT_CMD_ISSUE);
+
+ if (timeout_msec) {
+ tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
+ 1, timeout_msec);
+ if (tmp & 0x1) {
+ ahci_kick_engine(ap);
+ return -EBUSY;
+ }
+ } else
+ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+
+ return 0;
+}
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ /* prepare for SRST (AHCI-1.1 10.4.1) */
+ rc = ahci_kick_engine(ap);
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_printk(link, KERN_WARNING,
+ "failed to reset engine (errno=%d)\n", rc);
+
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+ msecs = 0;
+ now = jiffies;
+ if (time_after(now, deadline))
+ msecs = jiffies_to_msecs(deadline - now);
+
+ tf.ctl |= ATA_SRST;
+ if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+ AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
+ rc = -EIO;
+ reason = "1st FIS failed";
+ goto fail;
+ }
+
+ /* spec says at least 5us, but be generous and sleep for 1ms */
+ msleep(1);
+
+ /* issue the second D2H Register FIS */
+ tf.ctl &= ~ATA_SRST;
+ ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
+
+ /* wait for link to become ready */
+ rc = ata_wait_after_reset(link, deadline, check_ready);
+ if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
+ /*
+ * Workaround for cases where link online status can't
+ * be trusted. Treat device readiness timeout as link
+ * offline.
+ */
+ ata_link_printk(link, KERN_INFO,
+ "device not ready, treating as offline\n");
+ *class = ATA_DEV_NONE;
+ } else if (rc) {
+ /* link occupied, -ENODEV too is an error */
+ reason = "device not ready";
+ goto fail;
+ } else
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+ fail:
+ ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ return rc;
+}
+
+int ahci_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+ return ata_check_ready(status);
+}
+EXPORT_SYMBOL_GPL(ahci_check_ready);
+
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int pmp = sata_srst_pmp(link);
+
+ DPRINTK("ENTER\n");
+
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+}
+EXPORT_SYMBOL_GPL(ahci_do_softreset);
+
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = 0x80;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ ahci_start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+static void ahci_postreset(struct ata_link *link, unsigned int *class)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 new_tmp, tmp;
+
+ ata_std_postreset(link, class);
+
+ /* Make sure port's ATAPI bit is set appropriately */
+ new_tmp = tmp = readl(port_mmio + PORT_CMD);
+ if (*class == ATA_DEV_ATAPI)
+ new_tmp |= PORT_CMD_ATAPI;
+ else
+ new_tmp &= ~PORT_CMD_ATAPI;
+ if (new_tmp != tmp) {
+ writel(new_tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+ }
+}
+
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+ struct scatterlist *sg;
+ struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+ unsigned int si;
+
+ VPRINTK("ENTER\n");
+
+ /*
+ * Next, the S/G list.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+ ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
+ }
+
+ return si;
+}
+
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ if (!sata_pmp_attached(ap) || pp->fbs_enabled)
+ return ata_std_qc_defer(qc);
+ else
+ return sata_pmp_qc_defer_cmd_switch(qc);
+}
+
+static void ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ int is_atapi = ata_is_atapi(qc->tf.protocol);
+ void *cmd_tbl;
+ u32 opts;
+ const u32 cmd_fis_len = 5; /* five dwords */
+ unsigned int n_elem;
+
+ /*
+ * Fill in command table information. First, the header,
+ * a SATA Register - Host to Device command FIS.
+ */
+ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+ if (is_atapi) {
+ memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+ memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+ }
+
+ n_elem = 0;
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ n_elem = ahci_fill_sg(qc, cmd_tbl);
+
+ /*
+ * Fill in command slot information.
+ */
+ opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ opts |= AHCI_CMD_WRITE;
+ if (is_atapi)
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+ ahci_fill_cmd_slot(pp, qc->tag, opts);
+}
+
+static void ahci_fbs_dec_intr(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int retries = 3;
+
+ DPRINTK("ENTER\n");
+ BUG_ON(!pp->fbs_enabled);
+
+ /* time to wait for DEC is not specified by AHCI spec,
+ * add a retry loop for safety.
+ */
+ writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ while ((fbs & PORT_FBS_DEC) && retries--) {
+ udelay(1);
+ fbs = readl(port_mmio + PORT_FBS);
+ }
+
+ if (fbs & PORT_FBS_DEC)
+ dev_printk(KERN_ERR, ap->host->dev,
+ "failed to clear device error\n");
+}
+
+static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_eh_info *host_ehi = &ap->link.eh_info;
+ struct ata_link *link = NULL;
+ struct ata_queued_cmd *active_qc;
+ struct ata_eh_info *active_ehi;
+ bool fbs_need_dec = false;
+ u32 serror;
+
+ /* determine active link with error */
+ if (pp->fbs_enabled) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+ ata_link_online(&ap->pmp_link[pmp])) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+
+ } else
+ ata_for_each_link(link, ap, EDGE)
+ if (ata_link_active(link))
+ break;
+
+ if (!link)
+ link = &ap->link;
+
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ active_ehi = &link->eh_info;
+
+ /* record irq stat */
+ ata_ehi_clear_desc(host_ehi);
+ ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
+
+ /* AHCI needs SError cleared; otherwise, it might lock up */
+ ahci_scr_read(&ap->link, SCR_ERROR, &serror);
+ ahci_scr_write(&ap->link, SCR_ERROR, serror);
+ host_ehi->serror |= serror;
+
+ /* some controllers set IRQ_IF_ERR on device errors, ignore it */
+ if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
+ irq_stat &= ~PORT_IRQ_IF_ERR;
+
+ if (irq_stat & PORT_IRQ_TF_ERR) {
+ /* If qc is active, charge it; otherwise, the active
+ * link. There's no active qc on NCQ errors. It will
+ * be determined by EH by reading log page 10h.
+ */
+ if (active_qc)
+ active_qc->err_mask |= AC_ERR_DEV;
+ else
+ active_ehi->err_mask |= AC_ERR_DEV;
+
+ if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
+ host_ehi->serror &= ~SERR_INTERNAL;
+ }
+
+ if (irq_stat & PORT_IRQ_UNK_FIS) {
+ u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi,
+ "unknown FIS %08x %08x %08x %08x" ,
+ unk[0], unk[1], unk[2], unk[3]);
+ }
+
+ if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi, "incorrect PMP");
+ }
+
+ if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
+ host_ehi->err_mask |= AC_ERR_HOST_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(host_ehi, "host bus error");
+ }
+
+ if (irq_stat & PORT_IRQ_IF_ERR) {
+ if (fbs_need_dec)
+ active_ehi->err_mask |= AC_ERR_DEV;
+ else {
+ host_ehi->err_mask |= AC_ERR_ATA_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ }
+
+ ata_ehi_push_desc(host_ehi, "interface fatal error");
+ }
+
+ if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
+ ata_ehi_hotplugged(host_ehi);
+ ata_ehi_push_desc(host_ehi, "%s",
+ irq_stat & PORT_IRQ_CONNECT ?
+ "connection status changed" : "PHY RDY changed");
+ }
+
+ /* okay, let's hand over to EH */
+
+ if (irq_stat & PORT_IRQ_FREEZE)
+ ata_port_freeze(ap);
+ else if (fbs_need_dec) {
+ ata_link_abort(link);
+ ahci_fbs_dec_intr(ap);
+ } else
+ ata_port_abort(ap);
+}
+
+static void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
+ u32 status, qc_active = 0;
+ int rc;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ /* ignore BAD_PMP while resetting */
+ if (unlikely(resetting))
+ status &= ~PORT_IRQ_BAD_PMP;
+
+ /* If we are getting PhyRdy, this is
+ * just a power state change, we should
+ * clear out this, plus the PhyRdy/Comm
+ * Wake bits from Serror
+ */
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
+ if (unlikely(status & PORT_IRQ_ERROR)) {
+ ahci_error_intr(ap, status);
+ return;
+ }
+
+ if (status & PORT_IRQ_SDB_FIS) {
+ /* If SNotification is available, leave notification
+ * handling to sata_async_notification(). If not,
+ * emulate it by snooping SDB FIS RX area.
+ *
+ * Snooping FIS RX area is probably cheaper than
+ * poking SNotification but some constrollers which
+ * implement SNotification, ICH9 for example, don't
+ * store AN SDB FIS into receive area.
+ */
+ if (hpriv->cap & HOST_CAP_SNTF)
+ sata_async_notification(ap);
+ else {
+ /* If the 'N' bit in word 0 of the FIS is set,
+ * we just received asynchronous notification.
+ * Tell libata about it.
+ *
+ * Lack of SNotification should not appear in
+ * ahci 1.2, so the workaround is unnecessary
+ * when FBS is enabled.
+ */
+ if (pp->fbs_enabled)
+ WARN_ON_ONCE(1);
+ else {
+ const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+ u32 f0 = le32_to_cpu(f[0]);
+ if (f0 & (1 << 15))
+ sata_async_notification(ap);
+ }
+ }
+ }
+
+ /* pp->active_link is not reliable once FBS is enabled, both
+ * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+ * NCQ and non-NCQ commands may be in flight at the same time.
+ */
+ if (pp->fbs_enabled) {
+ if (ap->qc_active) {
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+ }
+ } else {
+ /* pp->active_link is valid iff any command is in flight */
+ if (ap->qc_active && pp->active_link->sactive)
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ else
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+ }
+
+
+ rc = ata_qc_complete_multiple(ap, qc_active);
+
+ /* while resetting, invalid completions are expected */
+ if (unlikely(rc < 0 && !resetting)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ }
+}
+
+irqreturn_t ahci_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int i, handled = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_port_intr(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_printk(KERN_WARNING, host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+
+ handled = 1;
+ }
+
+ /* HOST_IRQ_STAT behaves as level triggered latch meaning that
+ * it should be cleared after all the port events are cleared;
+ * otherwise, it will raise a spurious interrupt after each
+ * valid one. Please read section 10.6.2 of ahci 1.1 for more
+ * information.
+ *
+ * Also, use the unmasked value to clear interrupt as spurious
+ * pending event on a dummy port might cause screaming IRQ.
+ */
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+EXPORT_SYMBOL_GPL(ahci_interrupt);
+
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* Keep track of the currently active link. It will be used
+ * in completion path to determine whether NCQ phase is in
+ * progress.
+ */
+ pp->active_link = qc->dev->link;
+
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
+
+ if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+ fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+ writel(fbs, port_mmio + PORT_FBS);
+ pp->fbs_last_dev = qc->dev->link->pmp;
+ }
+
+ writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
+
+ ahci_sw_activity(qc->dev->link);
+
+ return 0;
+}
+
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+
+ if (pp->fbs_enabled)
+ d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+
+ ata_tf_from_fis(d2h_fis, &qc->result_tf);
+ return true;
+}
+
+static void ahci_freeze(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ /* turn IRQ off */
+ writel(0, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_thaw(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* clear IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+ writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
+
+ /* turn IRQ back on */
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_error_handler(struct ata_port *ap)
+{
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ /* restart engine */
+ ahci_stop_engine(ap);
+ ahci_start_engine(ap);
+ }
+
+ sata_pmp_error_handler(ap);
+}
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ahci_kick_engine(ap);
+}
+
+static void ahci_enable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ } else
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_disable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if ((fbs & PORT_FBS_EN) == 0) {
+ pp->fbs_enabled = false;
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN)
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
+ else {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
+ pp->fbs_enabled = false;
+ }
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_pmp_attach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd |= PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ ahci_enable_fbs(ap);
+
+ pp->intr_mask |= PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_pmp_detach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ ahci_disable_fbs(ap);
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd &= ~PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static int ahci_port_resume(struct ata_port *ap)
+{
+ ahci_power_up(ap);
+ ahci_start_port(ap);
+
+ if (sata_pmp_attached(ap))
+ ahci_pmp_attach(ap);
+ else
+ ahci_pmp_detach(ap);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc == 0)
+ ahci_power_down(ap);
+ else {
+ ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
+ ahci_start_port(ap);
+ }
+
+ return rc;
+}
+#endif
+
+static int ahci_port_start(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct device *dev = ap->host->dev;
+ struct ahci_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+ size_t dma_sz, rx_fis_sz;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ /* check FBS capability */
+ if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd = readl(port_mmio + PORT_CMD);
+ if (cmd & PORT_CMD_FBSCP)
+ pp->fbs_supported = true;
+ else
+ dev_printk(KERN_WARNING, dev,
+ "The port is not capable of FBS\n");
+ }
+
+ if (pp->fbs_supported) {
+ dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ * 16;
+ } else {
+ dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ;
+ }
+
+ mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ memset(mem, 0, dma_sz);
+
+ /*
+ * First item in chunk of DMA memory: 32-slot command table,
+ * 32 bytes each in size
+ */
+ pp->cmd_slot = mem;
+ pp->cmd_slot_dma = mem_dma;
+
+ mem += AHCI_CMD_SLOT_SZ;
+ mem_dma += AHCI_CMD_SLOT_SZ;
+
+ /*
+ * Second item: Received-FIS area
+ */
+ pp->rx_fis = mem;
+ pp->rx_fis_dma = mem_dma;
+
+ mem += rx_fis_sz;
+ mem_dma += rx_fis_sz;
+
+ /*
+ * Third item: data area for storing a single command
+ * and its scatter-gather table
+ */
+ pp->cmd_tbl = mem;
+ pp->cmd_tbl_dma = mem_dma;
+
+ /*
+ * Save off initial list of interrupts to be enabled.
+ * This could be changed later
+ */
+ pp->intr_mask = DEF_PORT_IRQ;
+
+ ap->private_data = pp;
+
+ /* engage engines, captain */
+ return ahci_port_resume(ap);
+}
+
+static void ahci_port_stop(struct ata_port *ap)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ /* de-initialize port */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
+}
+
+void ahci_print_info(struct ata_host *host, const char *scc_s)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 vers, cap, cap2, impl, speed;
+ const char *speed_s;
+
+ vers = readl(mmio + HOST_VERSION);
+ cap = hpriv->cap;
+ cap2 = hpriv->cap2;
+ impl = hpriv->port_map;
+
+ speed = (cap >> 20) & 0xf;
+ if (speed == 1)
+ speed_s = "1.5";
+ else if (speed == 2)
+ speed_s = "3";
+ else if (speed == 3)
+ speed_s = "6";
+ else
+ speed_s = "?";
+
+ dev_info(host->dev,
+ "AHCI %02x%02x.%02x%02x "
+ "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ ,
+
+ (vers >> 24) & 0xff,
+ (vers >> 16) & 0xff,
+ (vers >> 8) & 0xff,
+ vers & 0xff,
+
+ ((cap >> 8) & 0x1f) + 1,
+ (cap & 0x1f) + 1,
+ speed_s,
+ impl,
+ scc_s);
+
+ dev_info(host->dev,
+ "flags: "
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s\n"
+ ,
+
+ cap & HOST_CAP_64 ? "64bit " : "",
+ cap & HOST_CAP_NCQ ? "ncq " : "",
+ cap & HOST_CAP_SNTF ? "sntf " : "",
+ cap & HOST_CAP_MPS ? "ilck " : "",
+ cap & HOST_CAP_SSS ? "stag " : "",
+ cap & HOST_CAP_ALPM ? "pm " : "",
+ cap & HOST_CAP_LED ? "led " : "",
+ cap & HOST_CAP_CLO ? "clo " : "",
+ cap & HOST_CAP_ONLY ? "only " : "",
+ cap & HOST_CAP_PMP ? "pmp " : "",
+ cap & HOST_CAP_FBS ? "fbs " : "",
+ cap & HOST_CAP_PIO_MULTI ? "pio " : "",
+ cap & HOST_CAP_SSC ? "slum " : "",
+ cap & HOST_CAP_PART ? "part " : "",
+ cap & HOST_CAP_CCC ? "ccc " : "",
+ cap & HOST_CAP_EMS ? "ems " : "",
+ cap & HOST_CAP_SXS ? "sxs " : "",
+ cap2 & HOST_CAP2_APST ? "apst " : "",
+ cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
+ cap2 & HOST_CAP2_BOH ? "boh " : ""
+ );
+}
+EXPORT_SYMBOL_GPL(ahci_print_info);
+
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi)
+{
+ u8 messages;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_loc = readl(mmio + HOST_EM_LOC);
+ u32 em_ctl = readl(mmio + HOST_EM_CTL);
+
+ if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
+ return;
+
+ messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
+
+ if (messages) {
+ /* store em_loc */
+ hpriv->em_loc = ((em_loc >> 16) * 4);
+ hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
+ hpriv->em_msg_type = messages;
+ pi->flags |= ATA_FLAG_EM;
+ if (!(em_ctl & EM_CTL_ALHD))
+ pi->flags |= ATA_FLAG_SW_ACTIVITY;
+ }
+}
+EXPORT_SYMBOL_GPL(ahci_set_em_messages);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 49cffb6094a3..06b7e49e039c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,6 +65,7 @@
#include <linux/libata.h>
#include <asm/byteorder.h>
#include <linux/cdrom.h>
+#include <linux/ratelimit.h>
#include "libata.h"
@@ -96,7 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev);
static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
unsigned int ata_print_id = 1;
-static struct workqueue_struct *ata_wq;
struct workqueue_struct *ata_aux_wq;
@@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
+static int atapi_an;
+module_param(atapi_an, int, 0444);
+MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
@@ -1685,52 +1689,6 @@ unsigned long ata_id_xfermask(const u16 *id)
return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
}
-/**
- * ata_pio_queue_task - Queue port_task
- * @ap: The ata_port to queue port_task for
- * @data: data for @fn to use
- * @delay: delay time in msecs for workqueue function
- *
- * Schedule @fn(@data) for execution after @delay jiffies using
- * port_task. There is one port_task per port and it's the
- * user(low level driver)'s responsibility to make sure that only
- * one task is active at any given time.
- *
- * libata core layer takes care of synchronization between
- * port_task and EH. ata_pio_queue_task() may be ignored for EH
- * synchronization.
- *
- * LOCKING:
- * Inherited from caller.
- */
-void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
-{
- ap->port_task_data = data;
-
- /* may fail if ata_port_flush_task() in progress */
- queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
-}
-
-/**
- * ata_port_flush_task - Flush port_task
- * @ap: The ata_port to flush port_task for
- *
- * After this function completes, port_task is guranteed not to
- * be running or scheduled.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- */
-void ata_port_flush_task(struct ata_port *ap)
-{
- DPRINTK("ENTER\n");
-
- cancel_rearming_delayed_work(&ap->port_task);
-
- if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
-}
-
static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
struct completion *waiting = qc->private_data;
@@ -1852,7 +1810,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
- ata_port_flush_task(ap);
+ ata_sff_flush_pio_task(ap);
if (!rc) {
spin_lock_irqsave(ap->lock, flags);
@@ -1906,22 +1864,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->qc_active = preempted_qc_active;
ap->nr_active_links = preempted_nr_active_links;
- /* XXX - Some LLDDs (sata_mv) disable port on command failure.
- * Until those drivers are fixed, we detect the condition
- * here, fail the command with AC_ERR_SYSTEM and reenable the
- * port.
- *
- * Note that this doesn't change any behavior as internal
- * command failure results in disabling the device in the
- * higher layer for LLDDs without new reset/EH callbacks.
- *
- * Kill the following code as soon as those drivers are fixed.
- */
- if (ap->flags & ATA_FLAG_DISABLED) {
- err_mask |= AC_ERR_SYSTEM;
- ata_port_probe(ap);
- }
-
spin_unlock_irqrestore(ap->lock, flags);
if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
@@ -2184,6 +2126,14 @@ retry:
goto err_out;
}
+ if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
+ ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
+ "class=%d may_fallback=%d tried_spinup=%d\n",
+ class, may_fallback, tried_spinup);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+ 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
+ }
+
/* Falling back doesn't make sense if ID data was read
* successfully at least once.
*/
@@ -2572,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev)
* to enable ATAPI AN to discern between PHY status
* changed notifications and ATAPI ANs.
*/
- if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+ if (atapi_an &&
+ (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
(!sata_pmp_attached(ap) ||
sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
unsigned int err_mask;
@@ -2767,8 +2718,6 @@ int ata_bus_probe(struct ata_port *ap)
int rc;
struct ata_device *dev;
- ata_port_probe(ap);
-
ata_for_each_dev(dev, &ap->link, ALL)
tries[dev->devno] = ATA_PROBE_MAX_TRIES;
@@ -2796,8 +2745,7 @@ int ata_bus_probe(struct ata_port *ap)
ap->ops->phy_reset(ap);
ata_for_each_dev(dev, &ap->link, ALL) {
- if (!(ap->flags & ATA_FLAG_DISABLED) &&
- dev->class != ATA_DEV_UNKNOWN)
+ if (dev->class != ATA_DEV_UNKNOWN)
classes[dev->devno] = dev->class;
else
classes[dev->devno] = ATA_DEV_NONE;
@@ -2805,8 +2753,6 @@ int ata_bus_probe(struct ata_port *ap)
dev->class = ATA_DEV_UNKNOWN;
}
- ata_port_probe(ap);
-
/* read IDENTIFY page and configure devices. We have to do the identify
specific sequence bass-ackwards so that PDIAG- is released by
the slave device */
@@ -2856,8 +2802,6 @@ int ata_bus_probe(struct ata_port *ap)
ata_for_each_dev(dev, &ap->link, ENABLED)
return 0;
- /* no device present, disable port */
- ata_port_disable(ap);
return -ENODEV;
fail:
@@ -2889,22 +2833,6 @@ int ata_bus_probe(struct ata_port *ap)
}
/**
- * ata_port_probe - Mark port as enabled
- * @ap: Port for which we indicate enablement
- *
- * Modify @ap data structure such that the system
- * thinks that the entire port is enabled.
- *
- * LOCKING: host lock, or some other form of
- * serialization.
- */
-
-void ata_port_probe(struct ata_port *ap)
-{
- ap->flags &= ~ATA_FLAG_DISABLED;
-}
-
-/**
* sata_print_link_status - Print SATA link status
* @link: SATA link to printk link status about
*
@@ -2951,26 +2879,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
}
/**
- * ata_port_disable - Disable port.
- * @ap: Port to be disabled.
- *
- * Modify @ap data structure such that the system
- * thinks that the entire port is disabled, and should
- * never attempt to probe or communicate with devices
- * on this port.
- *
- * LOCKING: host lock, or some other form of
- * serialization.
- */
-
-void ata_port_disable(struct ata_port *ap)
-{
- ap->link.device[0].class = ATA_DEV_NONE;
- ap->link.device[1].class = ATA_DEV_NONE;
- ap->flags |= ATA_FLAG_DISABLED;
-}
-
-/**
* sata_down_spd_limit - adjust SATA spd limit downward
* @link: Link to adjust SATA spd limit for
* @spd_limit: Additional limit
@@ -3631,9 +3539,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
unsigned long start = jiffies;
- unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+ unsigned long nodev_deadline;
int warned = 0;
+ /* choose which 0xff timeout to use, read comment in libata.h */
+ if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
+ else
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+
/* Slave readiness can't be tested separately from master. On
* M/S emulation configuration, this function should be called
* only on the master and it will handle both master and slave.
@@ -3651,12 +3565,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
if (ready > 0)
return 0;
- /* -ENODEV could be transient. Ignore -ENODEV if link
+ /*
+ * -ENODEV could be transient. Ignore -ENODEV if link
* is online. Also, some SATA devices take a long
- * time to clear 0xff after reset. For example,
- * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
- * GoVault needs even more than that. Wait for
- * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
+ * time to clear 0xff after reset. Wait for
+ * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
+ * offline.
*
* Note that some PATA controllers (pata_ali) explode
* if status register is read more than once when
@@ -5558,30 +5472,6 @@ void ata_host_resume(struct ata_host *host)
#endif
/**
- * ata_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_port_start(struct ata_port *ap)
-{
- struct device *dev = ap->dev;
-
- ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
- GFP_KERNEL);
- if (!ap->prd)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
*
@@ -5709,12 +5599,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->pflags |= ATA_PFLAG_INITIALIZING;
ap->lock = &host->lock;
- ap->flags = ATA_FLAG_DISABLED;
ap->print_id = -1;
- ap->ctl = ATA_DEVCTL_OBS;
ap->host = host;
ap->dev = host->dev;
- ap->last_ctl = 0xFF;
#if defined(ATA_VERBOSE_DEBUG)
/* turn on all debugging levels */
@@ -5725,11 +5612,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
-#ifdef CONFIG_ATA_SFF
- INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
-#else
- INIT_DELAYED_WORK(&ap->port_task, NULL);
-#endif
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
@@ -5747,6 +5629,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
#endif
+ ata_sff_port_init(ap);
+
return ap;
}
@@ -6138,8 +6022,6 @@ static void async_port_probe(void *data, async_cookie_t cookie)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
- ata_port_probe(ap);
-
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
@@ -6503,6 +6385,7 @@ static int __init ata_parse_force_one(char **cur,
{ "3.0Gbps", .spd_limit = 2 },
{ "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
{ "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
+ { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
{ "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
{ "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
{ "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
@@ -6663,62 +6546,43 @@ static void __init ata_parse_force_param(void)
static int __init ata_init(void)
{
- ata_parse_force_param();
+ int rc = -ENOMEM;
- /*
- * FIXME: In UP case, there is only one workqueue thread and if you
- * have more than one PIO device, latency is bloody awful, with
- * occasional multi-second "hiccups" as one PIO device waits for
- * another. It's an ugly wart that users DO occasionally complain
- * about; luckily most users have at most one PIO polled device.
- */
- ata_wq = create_workqueue("ata");
- if (!ata_wq)
- goto free_force_tbl;
+ ata_parse_force_param();
ata_aux_wq = create_singlethread_workqueue("ata_aux");
if (!ata_aux_wq)
- goto free_wq;
+ goto fail;
+
+ rc = ata_sff_init();
+ if (rc)
+ goto fail;
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
-free_wq:
- destroy_workqueue(ata_wq);
-free_force_tbl:
+fail:
kfree(ata_force_tbl);
- return -ENOMEM;
+ if (ata_aux_wq)
+ destroy_workqueue(ata_aux_wq);
+ return rc;
}
static void __exit ata_exit(void)
{
+ ata_sff_exit();
kfree(ata_force_tbl);
- destroy_workqueue(ata_wq);
destroy_workqueue(ata_aux_wq);
}
subsys_initcall(ata_init);
module_exit(ata_exit);
-static unsigned long ratelimit_time;
-static DEFINE_SPINLOCK(ata_ratelimit_lock);
+static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
int ata_ratelimit(void)
{
- int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&ata_ratelimit_lock, flags);
-
- if (time_after(jiffies, ratelimit_time)) {
- rc = 1;
- ratelimit_time = jiffies + (HZ/5);
- } else
- rc = 0;
-
- spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
-
- return rc;
+ return __ratelimit(&ratelimit);
}
/**
@@ -6826,11 +6690,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
EXPORT_SYMBOL_GPL(ata_mode_string);
EXPORT_SYMBOL_GPL(ata_id_xfermask);
-EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_do_set_mode);
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_port_probe);
EXPORT_SYMBOL_GPL(ata_dev_disable);
EXPORT_SYMBOL_GPL(sata_set_spd);
EXPORT_SYMBOL_GPL(ata_wait_after_reset);
@@ -6842,7 +6704,6 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset);
EXPORT_SYMBOL_GPL(ata_std_postreset);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_dev_pair);
-EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_wait_register);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
@@ -6864,7 +6725,6 @@ EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
-EXPORT_SYMBOL_GPL(ata_pio_queue_task);
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
EXPORT_SYMBOL_GPL(ata_timing_find_mode);
EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 228740f356c9..f77a67303f8b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -550,8 +550,8 @@ void ata_scsi_error(struct Scsi_Host *host)
DPRINTK("ENTER\n");
- /* synchronize with port task */
- ata_port_flush_task(ap);
+ /* make sure sff pio task is not running */
+ ata_sff_flush_pio_task(ap);
/* synchronize with host lock and sort out timeouts */
@@ -3684,7 +3684,7 @@ void ata_std_error_handler(struct ata_port *ap)
ata_reset_fn_t hardreset = ops->hardreset;
/* ignore built-in hardreset if SCR access is not available */
- if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+ if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
hardreset = NULL;
ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 00305f41ed86..224faabd7b7e 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
return "<unknown>";
}
+#define PMP_GSCR_SII_POL 129
+
static int sata_pmp_configure(struct ata_device *dev, int print_info)
{
struct ata_port *ap = dev->link->ap;
u32 *gscr = dev->gscr;
+ u16 vendor = sata_pmp_gscr_vendor(gscr);
+ u16 devid = sata_pmp_gscr_devid(gscr);
unsigned int err_mask = 0;
const char *reason;
int nr_ports, rc;
@@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
goto fail;
}
+ /* Disable sending Early R_OK.
+ * With "cached read" HDD testing and multiple ports busy on a SATA
+ * host controller, 3726 PMP will very rarely drop a deferred
+ * R_OK that was intended for the host. Symptom will be all
+ * 5 drives under test will timeout, get reset, and recover.
+ */
+ if (vendor == 0x1095 && devid == 0x3726) {
+ u32 reg;
+
+ err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to read Sil3726 Private Register";
+ goto fail;
+ }
+ reg &= ~0x1;
+ err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to write Sil3726 Private Register";
+ goto fail;
+ }
+ }
+
if (print_info) {
ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
"0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
- sata_pmp_spec_rev_str(gscr),
- sata_pmp_gscr_vendor(gscr),
- sata_pmp_gscr_devid(gscr),
+ sata_pmp_spec_rev_str(gscr), vendor, devid,
sata_pmp_gscr_rev(gscr),
nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
gscr[SATA_PMP_GSCR_FEAT]);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0088cdeb0b1e..cfa9dd3d7253 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3345,9 +3345,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
struct ata_link *link;
struct ata_device *dev;
- if (ap->flags & ATA_FLAG_DISABLED)
- return;
-
repeat:
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e3877b6843c9..efa4a18cfb9d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -40,10 +40,12 @@
#include "libata.h"
+static struct workqueue_struct *ata_sff_wq;
+
const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops,
- .qc_prep = ata_sff_qc_prep,
+ .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf,
@@ -53,9 +55,7 @@ const struct ata_port_operations ata_sff_port_ops = {
.softreset = ata_sff_softreset,
.hardreset = sata_sff_hardreset,
.postreset = ata_sff_postreset,
- .drain_fifo = ata_sff_drain_fifo,
.error_handler = ata_sff_error_handler,
- .post_internal_cmd = ata_sff_post_internal_cmd,
.sff_dev_select = ata_sff_dev_select,
.sff_check_status = ata_sff_check_status,
@@ -63,178 +63,12 @@ const struct ata_port_operations ata_sff_port_ops = {
.sff_tf_read = ata_sff_tf_read,
.sff_exec_command = ata_sff_exec_command,
.sff_data_xfer = ata_sff_data_xfer,
- .sff_irq_on = ata_sff_irq_on,
- .sff_irq_clear = ata_sff_irq_clear,
+ .sff_drain_fifo = ata_sff_drain_fifo,
.lost_interrupt = ata_sff_lost_interrupt,
-
- .port_start = ata_sff_port_start,
};
EXPORT_SYMBOL_GPL(ata_sff_port_ops);
-const struct ata_port_operations ata_bmdma_port_ops = {
- .inherits = &ata_sff_port_ops,
-
- .mode_filter = ata_bmdma_mode_filter,
-
- .bmdma_setup = ata_bmdma_setup,
- .bmdma_start = ata_bmdma_start,
- .bmdma_stop = ata_bmdma_stop,
- .bmdma_status = ata_bmdma_status,
-};
-EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
-
-const struct ata_port_operations ata_bmdma32_port_ops = {
- .inherits = &ata_bmdma_port_ops,
-
- .sff_data_xfer = ata_sff_data_xfer32,
- .port_start = ata_sff_port_start32,
-};
-EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
-
-/**
- * ata_fill_sg - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si, pi;
-
- pi = 0;
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- u32 addr, offset;
- u32 sg_len, len;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- ap->prd[pi].addr = cpu_to_le32(addr);
- ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
- pi++;
- sg_len -= len;
- addr += len;
- }
- }
-
- ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- * ata_fill_sg_dumb - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command. Perform the fill
- * so that we avoid writing any length 64K records for
- * controllers that don't follow the spec.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si, pi;
-
- pi = 0;
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- u32 addr, offset;
- u32 sg_len, len, blen;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- blen = len & 0xffff;
- ap->prd[pi].addr = cpu_to_le32(addr);
- if (blen == 0) {
- /* Some PATA chipsets like the CS5530 can't
- cope with 0x0000 meaning 64K as the spec
- says */
- ap->prd[pi].flags_len = cpu_to_le32(0x8000);
- blen = 0x8000;
- ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
- }
- ap->prd[pi].flags_len = cpu_to_le32(blen);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
- pi++;
- sg_len -= len;
- addr += len;
- }
- }
-
- ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- * ata_sff_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
-
-/**
- * ata_sff_dumb_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg_dumb(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
-
/**
* ata_sff_check_status - Read device status reg & clear interrupt
* @ap: port where the device is
@@ -446,6 +280,27 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
/**
+ * ata_sff_set_devctl - Write device control reg
+ * @ap: port where the device is
+ * @ctl: value to write
+ *
+ * Writes ATA taskfile device control register.
+ *
+ * Note: may NOT be used as the sff_set_devctl() entry in
+ * ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ if (ap->ops->sff_set_devctl)
+ ap->ops->sff_set_devctl(ap, ctl);
+ else
+ iowrite8(ctl, ap->ioaddr.ctl_addr);
+}
+
+/**
* ata_sff_dev_select - Select device 0/1 on ATA bus
* @ap: ATA channel to manipulate
* @device: ATA device (numbered from zero) to select
@@ -491,7 +346,7 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
* LOCKING:
* caller.
*/
-void ata_dev_select(struct ata_port *ap, unsigned int device,
+static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
if (ata_msg_probe(ap))
@@ -517,50 +372,34 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
* Enable interrupts on a legacy IDE device using MMIO or PIO,
* wait for idle, clear any pending interrupts.
*
+ * Note: may NOT be used as the sff_irq_on() entry in
+ * ata_port_operations.
+ *
* LOCKING:
* Inherited from caller.
*/
-u8 ata_sff_irq_on(struct ata_port *ap)
+void ata_sff_irq_on(struct ata_port *ap)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 tmp;
+
+ if (ap->ops->sff_irq_on) {
+ ap->ops->sff_irq_on(ap);
+ return;
+ }
ap->ctl &= ~ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ioaddr->ctl_addr)
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- tmp = ata_wait_idle(ap);
-
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
+ ata_sff_set_devctl(ap, ap->ctl);
+ ata_wait_idle(ap);
- return tmp;
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_irq_on);
/**
- * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
- * @ap: Port associated with this ATA transaction.
- *
- * Clear interrupt and error flags in DMA status register.
- *
- * May be used as the irq_clear() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_irq_clear(struct ata_port *ap)
-{
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- if (!mmio)
- return;
-
- iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
-}
-EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
-
-/**
* ata_sff_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
* @tf: ATA taskfile register set
@@ -579,7 +418,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -615,8 +453,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
-
- ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
@@ -894,7 +730,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
}
- if (!do_write)
+ if (!do_write && !PageSlab(page))
flush_dcache_page(page);
qc->curbytes += qc->sect_size;
@@ -962,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
case ATAPI_PROT_NODATA:
ap->hsm_task_state = HSM_ST_LAST;
break;
+#ifdef CONFIG_ATA_BMDMA
case ATAPI_PROT_DMA:
ap->hsm_task_state = HSM_ST_LAST;
/* initiate bmdma */
ap->ops->bmdma_start(qc);
break;
+#endif /* CONFIG_ATA_BMDMA */
+ default:
+ BUG();
}
}
@@ -1165,7 +1005,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
qc = ata_qc_from_tag(ap, qc->tag);
if (qc) {
if (likely(!(qc->err_mask & AC_ERR_HSM))) {
- ap->ops->sff_irq_on(ap);
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
} else
ata_port_freeze(ap);
@@ -1181,7 +1021,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
} else {
if (in_wq) {
spin_lock_irqsave(ap->lock, flags);
- ap->ops->sff_irq_on(ap);
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
spin_unlock_irqrestore(ap->lock, flags);
} else
@@ -1293,7 +1133,7 @@ fsm_start:
if (in_wq)
spin_unlock_irqrestore(ap->lock, flags);
- /* if polling, ata_pio_task() handles the rest.
+ /* if polling, ata_sff_pio_task() handles the rest.
* otherwise, interrupt handler takes over from here.
*/
break;
@@ -1458,14 +1298,38 @@ fsm_start:
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-void ata_pio_task(struct work_struct *work)
+void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+{
+ /* may fail if ata_sff_flush_pio_task() in progress */
+ queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
+ msecs_to_jiffies(delay));
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
+
+void ata_sff_flush_pio_task(struct ata_port *ap)
+{
+ DPRINTK("ENTER\n");
+
+ cancel_rearming_delayed_work(&ap->sff_pio_task);
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ if (ata_msg_ctl(ap))
+ ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
+}
+
+static void ata_sff_pio_task(struct work_struct *work)
{
struct ata_port *ap =
- container_of(work, struct ata_port, port_task.work);
- struct ata_queued_cmd *qc = ap->port_task_data;
+ container_of(work, struct ata_port, sff_pio_task.work);
+ struct ata_queued_cmd *qc;
u8 status;
int poll_next;
+ /* qc can be NULL if timeout occurred */
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (!qc)
+ return;
+
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1481,7 +1345,7 @@ fsm_start:
msleep(2);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
- ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
+ ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
return;
}
}
@@ -1497,15 +1361,11 @@ fsm_start:
}
/**
- * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
+ * ata_sff_qc_issue - issue taskfile to a SFF controller
* @qc: command to issue to device
*
- * Using various libata functions and hooks, this function
- * starts an ATA command. ATA commands are grouped into
- * classes called "protocols", and issuing each type of protocol
- * is slightly different.
- *
- * May be used as the qc_issue() entry in ata_port_operations.
+ * This function issues a PIO or NODATA command to a SFF
+ * controller.
*
* LOCKING:
* spin_lock_irqsave(host lock)
@@ -1520,23 +1380,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
*/
- if (ap->flags & ATA_FLAG_PIO_POLLING) {
- switch (qc->tf.protocol) {
- case ATA_PROT_PIO:
- case ATA_PROT_NODATA:
- case ATAPI_PROT_PIO:
- case ATAPI_PROT_NODATA:
- qc->tf.flags |= ATA_TFLAG_POLLING;
- break;
- case ATAPI_PROT_DMA:
- if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
- /* see ata_dma_blacklisted() */
- BUG();
- break;
- default:
- break;
- }
- }
+ if (ap->flags & ATA_FLAG_PIO_POLLING)
+ qc->tf.flags |= ATA_TFLAG_POLLING;
/* select the device */
ata_dev_select(ap, qc->dev->devno, 1, 0);
@@ -1551,17 +1396,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_pio_queue_task(ap, qc, 0);
-
- break;
+ ata_sff_queue_pio_task(ap, 0);
- case ATA_PROT_DMA:
- WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
-
- ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
- ap->ops->bmdma_setup(qc); /* set up bmdma */
- ap->ops->bmdma_start(qc); /* initiate bmdma */
- ap->hsm_task_state = HSM_ST_LAST;
break;
case ATA_PROT_PIO:
@@ -1573,20 +1409,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
ap->hsm_task_state = HSM_ST_FIRST;
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
- /* always send first data block using
- * the ata_pio_task() codepath.
+ /* always send first data block using the
+ * ata_sff_pio_task() codepath.
*/
} else {
/* PIO data in protocol */
ap->hsm_task_state = HSM_ST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
- /* if polling, ata_pio_task() handles the rest.
- * otherwise, interrupt handler takes over from here.
+ /* if polling, ata_sff_pio_task() handles the
+ * rest. otherwise, interrupt handler takes
+ * over from here.
*/
}
@@ -1604,19 +1441,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
/* send cdb by polling if no cdb interrupt */
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
(qc->tf.flags & ATA_TFLAG_POLLING))
- ata_pio_queue_task(ap, qc, 0);
- break;
-
- case ATAPI_PROT_DMA:
- WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
-
- ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
- ap->ops->bmdma_setup(qc); /* set up bmdma */
- ap->hsm_task_state = HSM_ST_FIRST;
-
- /* send cdb by polling if no cdb interrupt */
- if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
break;
default:
@@ -1648,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
-/**
- * ata_sff_host_intr - Handle host interrupt for given (port, task)
- * @ap: Port on which interrupt arrived (possibly...)
- * @qc: Taskfile currently active in engine
- *
- * Handle host interrupt for given queued command. Currently,
- * only DMA interrupts are handled. All other commands are
- * handled via polling with interrupts disabled (nIEN bit).
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * One if interrupt was handled, zero if not (shared irq).
- */
-unsigned int ata_sff_host_intr(struct ata_port *ap,
- struct ata_queued_cmd *qc)
+static unsigned int ata_sff_idle_irq(struct ata_port *ap)
{
- struct ata_eh_info *ehi = &ap->link.eh_info;
- u8 status, host_stat = 0;
- bool bmdma_stopped = false;
+ ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+ if ((ap->stats.idle_irq % 1000) == 0) {
+ ap->ops->sff_check_status(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
+ ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ return 1;
+ }
+#endif
+ return 0; /* irq not handled */
+}
+
+static unsigned int __ata_sff_port_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc,
+ bool hsmv_on_idle)
+{
+ u8 status;
VPRINTK("ata%u: protocol %d task_state %d\n",
ap->print_id, qc->tf.protocol, ap->hsm_task_state);
@@ -1685,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap,
* need to check ata_is_atapi(qc->tf.protocol) again.
*/
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- goto idle_irq;
- break;
- case HSM_ST_LAST:
- if (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA) {
- /* check status of DMA engine */
- host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n",
- ap->print_id, host_stat);
-
- /* if it's not our irq... */
- if (!(host_stat & ATA_DMA_INTR))
- goto idle_irq;
-
- /* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(qc);
- bmdma_stopped = true;
-
- if (unlikely(host_stat & ATA_DMA_ERR)) {
- /* error when transfering data to/from memory */
- qc->err_mask |= AC_ERR_HOST_BUS;
- ap->hsm_task_state = HSM_ST_ERR;
- }
- }
+ return ata_sff_idle_irq(ap);
break;
case HSM_ST:
+ case HSM_ST_LAST:
break;
default:
- goto idle_irq;
+ return ata_sff_idle_irq(ap);
}
-
/* check main status, clearing INTRQ if needed */
status = ata_sff_irq_status(ap);
if (status & ATA_BUSY) {
- if (bmdma_stopped) {
+ if (hsmv_on_idle) {
/* BMDMA engine is already stopped, we're screwed */
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
} else
- goto idle_irq;
+ return ata_sff_idle_irq(ap);
}
- /* ack bmdma irq events */
- ap->ops->sff_irq_clear(ap);
+ /* clear irq events */
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
ata_sff_hsm_move(ap, qc, status, 0);
- if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA))
- ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
-
return 1; /* irq handled */
-
-idle_irq:
- ap->stats.idle_irq++;
-
-#ifdef ATA_IRQ_TRAP
- if ((ap->stats.idle_irq % 1000) == 0) {
- ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
- return 1;
- }
-#endif
- return 0; /* irq not handled */
}
-EXPORT_SYMBOL_GPL(ata_sff_host_intr);
/**
- * ata_sff_interrupt - Default ATA host interrupt handler
- * @irq: irq line (unused)
- * @dev_instance: pointer to our ata_host information structure
+ * ata_sff_port_intr - Handle SFF port interrupt
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
*
- * Default interrupt handler for PCI IDE devices. Calls
- * ata_sff_host_intr() for each port that is not disabled.
+ * Handle port interrupt for given queued command.
*
* LOCKING:
- * Obtains host lock during operation.
+ * spin_lock_irqsave(host lock)
*
* RETURNS:
- * IRQ_NONE or IRQ_HANDLED.
+ * One if interrupt was handled, zero if not (shared irq).
*/
-irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ return __ata_sff_port_intr(ap, qc, false);
+}
+EXPORT_SYMBOL_GPL(ata_sff_port_intr);
+
+static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
+ unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
{
struct ata_host *host = dev_instance;
bool retried = false;
@@ -1785,13 +1576,10 @@ retry:
struct ata_port *ap = host->ports[i];
struct ata_queued_cmd *qc;
- if (unlikely(ap->flags & ATA_FLAG_DISABLED))
- continue;
-
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
- handled |= ata_sff_host_intr(ap, qc);
+ handled |= port_intr(ap, qc);
else
polling |= 1 << i;
} else
@@ -1818,7 +1606,8 @@ retry:
if (idle & (1 << i)) {
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
} else {
/* clear INTRQ and check if BUSY cleared */
if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
@@ -1840,6 +1629,25 @@ retry:
return IRQ_RETVAL(handled);
}
+
+/**
+ * ata_sff_interrupt - Default SFF ATA host interrupt handler
+ * @irq: irq line (unused)
+ * @dev_instance: pointer to our ata_host information structure
+ *
+ * Default interrupt handler for PCI IDE devices. Calls
+ * ata_sff_port_intr() for each port that is not disabled.
+ *
+ * LOCKING:
+ * Obtains host lock during operation.
+ *
+ * RETURNS:
+ * IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+{
+ return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
+}
EXPORT_SYMBOL_GPL(ata_sff_interrupt);
/**
@@ -1862,11 +1670,8 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
/* Only one outstanding command per SFF channel */
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- /* Check we have a live one.. */
- if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE))
- return;
- /* We cannot lose an interrupt on a polled command */
- if (qc->tf.flags & ATA_TFLAG_POLLING)
+ /* We cannot lose an interrupt on a non-existent or polled command */
+ if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
return;
/* See if the controller thinks it is still busy - if so the command
isn't a lost IRQ but is still in progress */
@@ -1880,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
- ata_sff_host_intr(ap, qc);
+ ata_sff_port_intr(ap, qc);
}
EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
@@ -1888,20 +1693,18 @@ EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
* ata_sff_freeze - Freeze SFF controller port
* @ap: port to freeze
*
- * Freeze BMDMA controller port.
+ * Freeze SFF controller port.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_freeze(struct ata_port *ap)
{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
ap->ctl |= ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ioaddr->ctl_addr)
- iowrite8(ap->ctl, ioaddr->ctl_addr);
+ if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
+ ata_sff_set_devctl(ap, ap->ctl);
/* Under certain circumstances, some controllers raise IRQ on
* ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -1909,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap)
*/
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_freeze);
@@ -1926,8 +1730,9 @@ void ata_sff_thaw(struct ata_port *ap)
{
/* clear & re-enable interrupts */
ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
- ap->ops->sff_irq_on(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
+ ata_sff_irq_on(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_thaw);
@@ -2301,8 +2106,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
}
/* set up device control */
- if (ap->ioaddr.ctl_addr) {
- iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+ if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
+ ata_sff_set_devctl(ap, ap->ctl);
ap->last_ctl = ap->ctl;
}
}
@@ -2342,7 +2147,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
/**
- * ata_sff_error_handler - Stock error handler for BMDMA controller
+ * ata_sff_error_handler - Stock error handler for SFF controller
* @ap: port to handle error for
*
* Stock error handler for SFF controller. It can handle both
@@ -2359,62 +2164,32 @@ void ata_sff_error_handler(struct ata_port *ap)
ata_reset_fn_t hardreset = ap->ops->hardreset;
struct ata_queued_cmd *qc;
unsigned long flags;
- int thaw = 0;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
qc = NULL;
- /* reset PIO HSM and stop DMA engine */
spin_lock_irqsave(ap->lock, flags);
- ap->hsm_task_state = HSM_ST_IDLE;
-
- if (ap->ioaddr.bmdma_addr &&
- qc && (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATAPI_PROT_DMA)) {
- u8 host_stat;
-
- host_stat = ap->ops->bmdma_status(ap);
-
- /* BMDMA controllers indicate host bus error by
- * setting DMA_ERR bit and timing out. As it wasn't
- * really a timeout event, adjust error mask and
- * cancel frozen state.
- */
- if (qc->err_mask == AC_ERR_TIMEOUT
- && (host_stat & ATA_DMA_ERR)) {
- qc->err_mask = AC_ERR_HOST_BUS;
- thaw = 1;
- }
-
- ap->ops->bmdma_stop(qc);
- }
-
- ata_sff_sync(ap); /* FIXME: We don't need this */
- ap->ops->sff_check_status(ap);
- ap->ops->sff_irq_clear(ap);
- /* We *MUST* do FIFO draining before we issue a reset as several
- * devices helpfully clear their internal state and will lock solid
- * if we touch the data port post reset. Pass qc in case anyone wants
- * to do different PIO/DMA recovery or has per command fixups
+ /*
+ * We *MUST* do FIFO draining before we issue a reset as
+ * several devices helpfully clear their internal state and
+ * will lock solid if we touch the data port post reset. Pass
+ * qc in case anyone wants to do different PIO/DMA recovery or
+ * has per command fixups
*/
- if (ap->ops->drain_fifo)
- ap->ops->drain_fifo(qc);
+ if (ap->ops->sff_drain_fifo)
+ ap->ops->sff_drain_fifo(qc);
spin_unlock_irqrestore(ap->lock, flags);
- if (thaw)
- ata_eh_thaw_port(ap);
-
- /* PIO and DMA engines have been stopped, perform recovery */
-
- /* Ignore ata_sff_softreset if ctl isn't accessible and
- * built-in hardresets if SCR access isn't available.
- */
+ /* ignore ata_sff_softreset if ctl isn't accessible */
if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
softreset = NULL;
- if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+
+ /* ignore built-in hardresets if SCR access is not available */
+ if ((hardreset == sata_std_hardreset ||
+ hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
hardreset = NULL;
ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
@@ -2423,73 +2198,6 @@ void ata_sff_error_handler(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
/**
- * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
- * @qc: internal command to clean up
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- */
-void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- unsigned long flags;
-
- spin_lock_irqsave(ap->lock, flags);
-
- ap->hsm_task_state = HSM_ST_IDLE;
-
- if (ap->ioaddr.bmdma_addr)
- ap->ops->bmdma_stop(qc);
-
- spin_unlock_irqrestore(ap->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
-
-/**
- * ata_sff_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table if the device
- * is DMA capable SFF.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_sff_port_start(struct ata_port *ap)
-{
- if (ap->ioaddr.bmdma_addr)
- return ata_port_start(ap);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sff_port_start);
-
-/**
- * ata_sff_port_start32 - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table if the device
- * is DMA capable SFF.
- *
- * May be used as the port_start() entry in ata_port_operations for
- * devices that are capable of 32bit PIO.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_sff_port_start32(struct ata_port *ap)
-{
- ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
- if (ap->ioaddr.bmdma_addr)
- return ata_port_start(ap);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sff_port_start32);
-
-/**
* ata_sff_std_ports - initialize ioaddr with standard port offsets.
* @ioaddr: IO address structure to be initialized
*
@@ -2515,302 +2223,8 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr)
}
EXPORT_SYMBOL_GPL(ata_sff_std_ports);
-unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
- unsigned long xfer_mask)
-{
- /* Filter out DMA modes if the device has been configured by
- the BIOS as PIO only */
-
- if (adev->link->ap->ioaddr.bmdma_addr == NULL)
- xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- return xfer_mask;
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
-
-/**
- * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_bmdma_setup(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
- u8 dmactl;
-
- /* load PRD table addr. */
- mb(); /* make sure PRD table writes are visible to controller */
- iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
-
- /* specify data direction, triple-check start bit is clear */
- dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
- dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
- if (!rw)
- dmactl |= ATA_DMA_WR;
- iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- /* issue r/w command */
- ap->ops->sff_exec_command(ap, &qc->tf);
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_setup);
-
-/**
- * ata_bmdma_start - Start a PCI IDE BMDMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- u8 dmactl;
-
- /* start host DMA transaction */
- dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
- iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- /* Strictly, one may wish to issue an ioread8() here, to
- * flush the mmio write. However, control also passes
- * to the hardware at this point, and it will interrupt
- * us when we are to resume control. So, in effect,
- * we don't care when the mmio write flushes.
- * Further, a read of the DMA status register _immediately_
- * following the write may not be what certain flaky hardware
- * is expected, so I think it is best to not add a readb()
- * without first all the MMIO ATA cards/mobos.
- * Or maybe I'm just being paranoid.
- *
- * FIXME: The posting of this write means I/O starts are
- * unneccessarily delayed for MMIO
- */
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_start);
-
-/**
- * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
- * @qc: Command we are ending DMA for
- *
- * Clears the ATA_DMA_START flag in the dma control register
- *
- * May be used as the bmdma_stop() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_bmdma_stop(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- /* clear start/stop bit */
- iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
- mmio + ATA_DMA_CMD);
-
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_dma_pause(ap);
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_stop);
-
-/**
- * ata_bmdma_status - Read PCI IDE BMDMA status
- * @ap: Port associated with this ATA transaction.
- *
- * Read and return BMDMA status register.
- *
- * May be used as the bmdma_status() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-u8 ata_bmdma_status(struct ata_port *ap)
-{
- return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-}
-EXPORT_SYMBOL_GPL(ata_bmdma_status);
-
-/**
- * ata_bus_reset - reset host port and associated ATA channel
- * @ap: port to reset
- *
- * This is typically the first time we actually start issuing
- * commands to the ATA channel. We wait for BSY to clear, then
- * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
- * result. Determine what devices, if any, are on the channel
- * by looking at the device 0/1 error register. Look at the signature
- * stored in each device's taskfile registers, to determine if
- * the device is ATA or ATAPI.
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- * Obtains host lock.
- *
- * SIDE EFFECTS:
- * Sets ATA_FLAG_DISABLED if bus reset fails.
- *
- * DEPRECATED:
- * This function is only for drivers which still use old EH and
- * will be removed soon.
- */
-void ata_bus_reset(struct ata_port *ap)
-{
- struct ata_device *device = ap->link.device;
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- u8 err;
- unsigned int dev0, dev1 = 0, devmask = 0;
- int rc;
-
- DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
-
- /* determine if device 0/1 are present */
- if (ap->flags & ATA_FLAG_SATA_RESET)
- dev0 = 1;
- else {
- dev0 = ata_devchk(ap, 0);
- if (slave_possible)
- dev1 = ata_devchk(ap, 1);
- }
-
- if (dev0)
- devmask |= (1 << 0);
- if (dev1)
- devmask |= (1 << 1);
-
- /* select device 0 again */
- ap->ops->sff_dev_select(ap, 0);
-
- /* issue bus reset */
- if (ap->flags & ATA_FLAG_SRST) {
- rc = ata_bus_softreset(ap, devmask,
- ata_deadline(jiffies, 40000));
- if (rc && rc != -ENODEV)
- goto err_out;
- }
-
- /*
- * determine by signature whether we have ATA or ATAPI devices
- */
- device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
- if ((slave_possible) && (err != 0x81))
- device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
-
- /* is double-select really necessary? */
- if (device[1].class != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 1);
- if (device[0].class != ATA_DEV_NONE)
- ap->ops->sff_dev_select(ap, 0);
-
- /* if no devices were detected, disable this port */
- if ((device[0].class == ATA_DEV_NONE) &&
- (device[1].class == ATA_DEV_NONE))
- goto err_out;
-
- if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
- /* set up device control for ATA_FLAG_SATA_RESET */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- ap->last_ctl = ap->ctl;
- }
-
- DPRINTK("EXIT\n");
- return;
-
-err_out:
- ata_port_printk(ap, KERN_ERR, "disabling port\n");
- ata_port_disable(ap);
-
- DPRINTK("EXIT\n");
-}
-EXPORT_SYMBOL_GPL(ata_bus_reset);
-
#ifdef CONFIG_PCI
-/**
- * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
- * @pdev: PCI device
- *
- * Some PCI ATA devices report simplex mode but in fact can be told to
- * enter non simplex mode. This implements the necessary logic to
- * perform the task on such devices. Calling it on other devices will
- * have -undefined- behaviour.
- */
-int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
-{
- unsigned long bmdma = pci_resource_start(pdev, 4);
- u8 simplex;
-
- if (bmdma == 0)
- return -ENOENT;
-
- simplex = inb(bmdma + 0x02);
- outb(simplex & 0x60, bmdma + 0x02);
- simplex = inb(bmdma + 0x02);
- if (simplex & 0x80)
- return -EOPNOTSUPP;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
-
-/**
- * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
- * @host: target ATA host
- *
- * Acquire PCI BMDMA resources and initialize @host accordingly.
- *
- * LOCKING:
- * Inherited from calling layer (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-int ata_pci_bmdma_init(struct ata_host *host)
-{
- struct device *gdev = host->dev;
- struct pci_dev *pdev = to_pci_dev(gdev);
- int i, rc;
-
- /* No BAR4 allocation: No DMA */
- if (pci_resource_start(pdev, 4) == 0)
- return 0;
-
- /* TODO: If we get no DMA mask we should fall back to PIO */
- rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
- if (rc)
- return rc;
- rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
- if (rc)
- return rc;
-
- /* request and iomap DMA region */
- rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
- if (rc) {
- dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
- return -ENOMEM;
- }
- host->iomap = pcim_iomap_table(pdev);
-
- for (i = 0; i < 2; i++) {
- struct ata_port *ap = host->ports[i];
- void __iomem *bmdma = host->iomap[4] + 8 * i;
-
- if (ata_port_is_dummy(ap))
- continue;
-
- ap->ioaddr.bmdma_addr = bmdma;
- if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
- (ioread8(bmdma + 2) & 0x80))
- host->flags |= ATA_HOST_SIMPLEX;
-
- ata_port_desc(ap, "bmdma 0x%llx",
- (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
-
static int ata_resources_present(struct pci_dev *pdev, int port)
{
int i;
@@ -2905,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host)
EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
/**
- * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
+ * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
* @pdev: target PCI device
* @ppi: array of port_info, must be enough for two ports
* @r_host: out argument for the initialized ATA host
*
- * Helper to allocate ATA host for @pdev, acquire all native PCI
- * resources and initialize it accordingly in one go.
+ * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
+ * all PCI resources and initialize it accordingly in one go.
*
* LOCKING:
* Inherited from calling layer (may sleep).
@@ -2941,22 +2355,10 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
if (rc)
goto err_out;
- /* init DMA related stuff */
- rc = ata_pci_bmdma_init(host);
- if (rc)
- goto err_bmdma;
-
devres_remove_group(&pdev->dev, NULL);
*r_host = host;
return 0;
-err_bmdma:
- /* This is necessary because PCI and iomap resources are
- * merged and releasing the top group won't release the
- * acquired resources if some of those have been acquired
- * before entering this function.
- */
- pcim_iounmap_regions(pdev, 0xf);
err_out:
devres_release_group(&pdev->dev, NULL);
return rc;
@@ -3057,8 +2459,21 @@ out:
}
EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
+static const struct ata_port_info *ata_sff_find_valid_pi(
+ const struct ata_port_info * const *ppi)
+{
+ int i;
+
+ /* look up the first valid port_info */
+ for (i = 0; i < 2 && ppi[i]; i++)
+ if (ppi[i]->port_ops != &ata_dummy_port_ops)
+ return ppi[i];
+
+ return NULL;
+}
+
/**
- * ata_pci_sff_init_one - Initialize/register PCI IDE host controller
+ * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
* @pdev: Controller to be initialized
* @ppi: array of port_info, must be enough for two ports
* @sht: scsi_host_template to use when registering the host
@@ -3067,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
*
* This is a helper function which can be called from a driver's
* xxx_init_one() probe function if the hardware uses traditional
- * IDE taskfile registers.
- *
- * This function calls pci_enable_device(), reserves its register
- * regions, sets the dma mask, enables bus master mode, and calls
- * ata_device_add()
+ * IDE taskfile registers and is PIO only.
*
* ASSUMPTION:
* Nobody makes a single channel controller that appears solely as
@@ -3088,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv, int hflag)
{
struct device *dev = &pdev->dev;
- const struct ata_port_info *pi = NULL;
+ const struct ata_port_info *pi;
struct ata_host *host = NULL;
- int i, rc;
+ int rc;
DPRINTK("ENTER\n");
- /* look up the first valid port_info */
- for (i = 0; i < 2 && ppi[i]; i++) {
- if (ppi[i]->port_ops != &ata_dummy_port_ops) {
- pi = ppi[i];
- break;
- }
- }
-
+ pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
dev_printk(KERN_ERR, &pdev->dev,
"no valid port_info specified\n");
@@ -3122,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
host->private_data = host_priv;
host->flags |= hflag;
- pci_set_master(pdev);
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
@@ -3135,3 +2538,801 @@ out:
EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
#endif /* CONFIG_PCI */
+
+/*
+ * BMDMA support
+ */
+
+#ifdef CONFIG_ATA_BMDMA
+
+const struct ata_port_operations ata_bmdma_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .error_handler = ata_bmdma_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+ .qc_prep = ata_bmdma_qc_prep,
+ .qc_issue = ata_bmdma_qc_issue,
+
+ .sff_irq_clear = ata_bmdma_irq_clear,
+ .bmdma_setup = ata_bmdma_setup,
+ .bmdma_start = ata_bmdma_start,
+ .bmdma_stop = ata_bmdma_stop,
+ .bmdma_status = ata_bmdma_status,
+
+ .port_start = ata_bmdma_port_start,
+};
+EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
+
+const struct ata_port_operations ata_bmdma32_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .sff_data_xfer = ata_sff_data_xfer32,
+ .port_start = ata_bmdma_port_start32,
+};
+EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
+
+/**
+ * ata_bmdma_fill_sg - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
+ *
+ * Fill PCI IDE PRD (scatter-gather) table with segments
+ * associated with the current disk command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ prd[pi].addr = cpu_to_le32(addr);
+ prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
+ *
+ * Fill PCI IDE PRD (scatter-gather) table with segments
+ * associated with the current disk command. Perform the fill
+ * so that we avoid writing any length 64K records for
+ * controllers that don't follow the spec.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len, len, blen;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ blen = len & 0xffff;
+ prd[pi].addr = cpu_to_le32(addr);
+ if (blen == 0) {
+ /* Some PATA chipsets like the CS5530 can't
+ cope with 0x0000 meaning 64K as the spec
+ says */
+ prd[pi].flags_len = cpu_to_le32(0x8000);
+ blen = 0x8000;
+ prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+ }
+ prd[pi].flags_len = cpu_to_le32(blen);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ * ata_bmdma_qc_prep - Prepare taskfile for submission
+ * @qc: Metadata associated with taskfile to be prepared
+ *
+ * Prepare ATA taskfile for submission.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ata_bmdma_fill_sg(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
+
+/**
+ * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
+ * @qc: Metadata associated with taskfile to be prepared
+ *
+ * Prepare ATA taskfile for submission.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ata_bmdma_fill_sg_dumb(qc);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
+
+/**
+ * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
+ * @qc: command to issue to device
+ *
+ * This function issues a PIO, NODATA or DMA command to a
+ * SFF/BMDMA controller. PIO and NODATA are handled by
+ * ata_sff_qc_issue().
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* see ata_dma_blacklisted() */
+ BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
+ qc->tf.protocol == ATAPI_PROT_DMA);
+
+ /* defer PIO handling to sff_qc_issue */
+ if (!ata_is_dma(qc->tf.protocol))
+ return ata_sff_qc_issue(qc);
+
+ /* select the device */
+ ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+ /* start the command */
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->bmdma_setup(qc); /* set up bmdma */
+ ap->ops->bmdma_start(qc); /* initiate bmdma */
+ ap->hsm_task_state = HSM_ST_LAST;
+ break;
+
+ case ATAPI_PROT_DMA:
+ WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->bmdma_setup(qc); /* set up bmdma */
+ ap->hsm_task_state = HSM_ST_FIRST;
+
+ /* send cdb by polling if no cdb interrupt */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ ata_sff_queue_pio_task(ap, 0);
+ break;
+
+ default:
+ WARN_ON(1);
+ return AC_ERR_SYSTEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
+
+/**
+ * ata_bmdma_port_intr - Handle BMDMA port interrupt
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
+ *
+ * Handle port interrupt for given queued command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * One if interrupt was handled, zero if not (shared irq).
+ */
+unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u8 host_stat = 0;
+ bool bmdma_stopped = false;
+ unsigned int handled;
+
+ if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
+ /* check status of DMA engine */
+ host_stat = ap->ops->bmdma_status(ap);
+ VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
+
+ /* if it's not our irq... */
+ if (!(host_stat & ATA_DMA_INTR))
+ return ata_sff_idle_irq(ap);
+
+ /* before we do anything else, clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+ bmdma_stopped = true;
+
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+ }
+
+ handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
+
+ if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+ return handled;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
+
+/**
+ * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
+ * @irq: irq line (unused)
+ * @dev_instance: pointer to our ata_host information structure
+ *
+ * Default interrupt handler for PCI IDE devices. Calls
+ * ata_bmdma_port_intr() for each port that is not disabled.
+ *
+ * LOCKING:
+ * Obtains host lock during operation.
+ *
+ * RETURNS:
+ * IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
+{
+ return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
+
+/**
+ * ata_bmdma_error_handler - Stock error handler for BMDMA controller
+ * @ap: port to handle error for
+ *
+ * Stock error handler for BMDMA controller. It can handle both
+ * PATA and SATA controllers. Most BMDMA controllers should be
+ * able to use this EH as-is or with some added handling before
+ * and after.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_bmdma_error_handler(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ bool thaw = false;
+
+ qc = __ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+ qc = NULL;
+
+ /* reset PIO HSM and stop DMA engine */
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (qc && ata_is_dma(qc->tf.protocol)) {
+ u8 host_stat;
+
+ host_stat = ap->ops->bmdma_status(ap);
+
+ /* BMDMA controllers indicate host bus error by
+ * setting DMA_ERR bit and timing out. As it wasn't
+ * really a timeout event, adjust error mask and
+ * cancel frozen state.
+ */
+ if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
+ qc->err_mask = AC_ERR_HOST_BUS;
+ thaw = true;
+ }
+
+ ap->ops->bmdma_stop(qc);
+
+ /* if we're gonna thaw, make sure IRQ is clear */
+ if (thaw) {
+ ap->ops->sff_check_status(ap);
+ if (ap->ops->sff_irq_clear)
+ ap->ops->sff_irq_clear(ap);
+ }
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ if (thaw)
+ ata_eh_thaw_port(ap);
+
+ ata_sff_error_handler(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
+
+/**
+ * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
+ * @qc: internal command to clean up
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned long flags;
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ spin_lock_irqsave(ap->lock, flags);
+ ap->ops->bmdma_stop(qc);
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
+
+/**
+ * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Clear interrupt and error flags in DMA status register.
+ *
+ * May be used as the irq_clear() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_irq_clear(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ if (!mmio)
+ return;
+
+ iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
+
+/**
+ * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dmactl;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+ iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
+
+/**
+ * ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u8 dmactl;
+
+ /* start host DMA transaction */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* Strictly, one may wish to issue an ioread8() here, to
+ * flush the mmio write. However, control also passes
+ * to the hardware at this point, and it will interrupt
+ * us when we are to resume control. So, in effect,
+ * we don't care when the mmio write flushes.
+ * Further, a read of the DMA status register _immediately_
+ * following the write may not be what certain flaky hardware
+ * is expected, so I think it is best to not add a readb()
+ * without first all the MMIO ATA cards/mobos.
+ * Or maybe I'm just being paranoid.
+ *
+ * FIXME: The posting of this write means I/O starts are
+ * unneccessarily delayed for MMIO
+ */
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
+
+/**
+ * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
+ * @qc: Command we are ending DMA for
+ *
+ * Clears the ATA_DMA_START flag in the dma control register
+ *
+ * May be used as the bmdma_stop() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* clear start/stop bit */
+ iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+ mmio + ATA_DMA_CMD);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+
+/**
+ * ata_bmdma_status - Read PCI IDE BMDMA status
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Read and return BMDMA status register.
+ *
+ * May be used as the bmdma_status() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+u8 ata_bmdma_status(struct ata_port *ap)
+{
+ return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+
+
+/**
+ * ata_bmdma_port_start - Set port up for bmdma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized. Allocates space for PRD table.
+ *
+ * May be used as the port_start() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_bmdma_port_start(struct ata_port *ap)
+{
+ if (ap->mwdma_mask || ap->udma_mask) {
+ ap->bmdma_prd =
+ dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
+ &ap->bmdma_prd_dma, GFP_KERNEL);
+ if (!ap->bmdma_prd)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
+
+/**
+ * ata_bmdma_port_start32 - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized. Enables 32bit PIO and allocates space for PRD
+ * table.
+ *
+ * May be used as the port_start() entry in ata_port_operations for
+ * devices that are capable of 32bit PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_bmdma_port_start32(struct ata_port *ap)
+{
+ ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+ return ata_bmdma_port_start(ap);
+}
+EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
+
+#ifdef CONFIG_PCI
+
+/**
+ * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
+ * @pdev: PCI device
+ *
+ * Some PCI ATA devices report simplex mode but in fact can be told to
+ * enter non simplex mode. This implements the necessary logic to
+ * perform the task on such devices. Calling it on other devices will
+ * have -undefined- behaviour.
+ */
+int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
+{
+ unsigned long bmdma = pci_resource_start(pdev, 4);
+ u8 simplex;
+
+ if (bmdma == 0)
+ return -ENOENT;
+
+ simplex = inb(bmdma + 0x02);
+ outb(simplex & 0x60, bmdma + 0x02);
+ simplex = inb(bmdma + 0x02);
+ if (simplex & 0x80)
+ return -EOPNOTSUPP;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
+
+static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
+{
+ int i;
+
+ dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
+ reason);
+
+ for (i = 0; i < 2; i++) {
+ host->ports[i]->mwdma_mask = 0;
+ host->ports[i]->udma_mask = 0;
+ }
+}
+
+/**
+ * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
+ * @host: target ATA host
+ *
+ * Acquire PCI BMDMA resources and initialize @host accordingly.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ */
+void ata_pci_bmdma_init(struct ata_host *host)
+{
+ struct device *gdev = host->dev;
+ struct pci_dev *pdev = to_pci_dev(gdev);
+ int i, rc;
+
+ /* No BAR4 allocation: No DMA */
+ if (pci_resource_start(pdev, 4) == 0) {
+ ata_bmdma_nodma(host, "BAR4 is zero");
+ return;
+ }
+
+ /*
+ * Some controllers require BMDMA region to be initialized
+ * even if DMA is not in use to clear IRQ status via
+ * ->sff_irq_clear method. Try to initialize bmdma_addr
+ * regardless of dma masks.
+ */
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ ata_bmdma_nodma(host, "failed to set dma mask");
+ if (!rc) {
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ ata_bmdma_nodma(host,
+ "failed to set consistent dma mask");
+ }
+
+ /* request and iomap DMA region */
+ rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
+ if (rc) {
+ ata_bmdma_nodma(host, "failed to request/iomap BAR4");
+ return;
+ }
+ host->iomap = pcim_iomap_table(pdev);
+
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+ void __iomem *bmdma = host->iomap[4] + 8 * i;
+
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ap->ioaddr.bmdma_addr = bmdma;
+ if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
+ (ioread8(bmdma + 2) & 0x80))
+ host->flags |= ATA_HOST_SIMPLEX;
+
+ ata_port_desc(ap, "bmdma 0x%llx",
+ (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
+ }
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
+
+/**
+ * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
+ * @pdev: target PCI device
+ * @ppi: array of port_info, must be enough for two ports
+ * @r_host: out argument for the initialized ATA host
+ *
+ * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
+ * resources and initialize it accordingly in one go.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct ata_host **r_host)
+{
+ int rc;
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
+ if (rc)
+ return rc;
+
+ ata_pci_bmdma_init(*r_host);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
+
+/**
+ * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
+ * @pdev: Controller to be initialized
+ * @ppi: array of port_info, must be enough for two ports
+ * @sht: scsi_host_template to use when registering the host
+ * @host_priv: host private_data
+ * @hflags: host flags
+ *
+ * This function is similar to ata_pci_sff_init_one() but also
+ * takes care of BMDMA initialization.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_bmdma_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct scsi_host_template *sht, void *host_priv,
+ int hflags)
+{
+ struct device *dev = &pdev->dev;
+ const struct ata_port_info *pi;
+ struct ata_host *host = NULL;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ pi = ata_sff_find_valid_pi(ppi);
+ if (!pi) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "no valid port_info specified\n");
+ return -EINVAL;
+ }
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ goto out;
+
+ /* prepare and activate BMDMA host */
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+ if (rc)
+ goto out;
+ host->private_data = host_priv;
+ host->flags |= hflags;
+
+ pci_set_master(pdev);
+ rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
+ out:
+ if (rc == 0)
+ devres_remove_group(&pdev->dev, NULL);
+ else
+ devres_release_group(&pdev->dev, NULL);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
+
+#endif /* CONFIG_PCI */
+#endif /* CONFIG_ATA_BMDMA */
+
+/**
+ * ata_sff_port_init - Initialize SFF/BMDMA ATA port
+ * @ap: Port to initialize
+ *
+ * Called on port allocation to initialize SFF/BMDMA specific
+ * fields.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_sff_port_init(struct ata_port *ap)
+{
+ INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
+ ap->ctl = ATA_DEVCTL_OBS;
+ ap->last_ctl = 0xFF;
+}
+
+int __init ata_sff_init(void)
+{
+ /*
+ * FIXME: In UP case, there is only one workqueue thread and if you
+ * have more than one PIO device, latency is bloody awful, with
+ * occasional multi-second "hiccups" as one PIO device waits for
+ * another. It's an ugly wart that users DO occasionally complain
+ * about; luckily most users have at most one PIO polled device.
+ */
+ ata_sff_wq = create_workqueue("ata_sff");
+ if (!ata_sff_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __exit ata_sff_exit(void)
+{
+ destroy_workqueue(ata_sff_wq);
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 823e63096362..4b84ed60324a 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -38,17 +38,6 @@ struct ata_scsi_args {
void (*done)(struct scsi_cmnd *);
};
-static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
-{
- if (reset == sata_std_hardreset)
- return 1;
-#ifdef CONFIG_ATA_SFF
- if (reset == sata_sff_hardreset)
- return 1;
-#endif
- return 0;
-}
-
/* libata-core.c */
enum {
/* flags for ata_dev_read_id() */
@@ -79,7 +68,6 @@ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
-extern void ata_port_flush_task(struct ata_port *ap);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -202,10 +190,19 @@ static inline int sata_pmp_attach(struct ata_device *dev)
/* libata-sff.c */
#ifdef CONFIG_ATA_SFF
-extern void ata_dev_select(struct ata_port *ap, unsigned int device,
- unsigned int wait, unsigned int can_sleep);
-extern u8 ata_irq_on(struct ata_port *ap);
-extern void ata_pio_task(struct work_struct *work);
+extern void ata_sff_flush_pio_task(struct ata_port *ap);
+extern void ata_sff_port_init(struct ata_port *ap);
+extern int ata_sff_init(void);
+extern void ata_sff_exit(void);
+#else /* CONFIG_ATA_SFF */
+static inline void ata_sff_flush_pio_task(struct ata_port *ap)
+{ }
+static inline void ata_sff_port_init(struct ata_port *ap)
+{ }
+static inline int ata_sff_init(void)
+{ return 0; }
+static inline void ata_sff_exit(void)
+{ }
#endif /* CONFIG_ATA_SFF */
#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 1ea2be0f4b94..c8d47034d5e9 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -101,7 +101,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device
static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
{
struct pata_acpi *acpi = adev->link->ap->private_data;
- return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]);
+ return mask & acpi->mask[adev->devno];
}
/**
@@ -172,7 +172,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
struct pata_acpi *acpi = ap->private_data;
if (acpi->gtm.flags & 0x10)
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
if (adev != acpi->last) {
pacpi_set_piomode(ap, adev);
@@ -180,7 +180,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
pacpi_set_dmamode(ap, adev);
acpi->last = adev;
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
/**
@@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap)
return -ENOMEM;
acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
- ret = ata_sff_port_start(ap);
+ ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
@@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
pcim_pin_device(pdev);
}
- return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
}
static const struct pci_device_id pacpi_pci_tbl[] = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index dc61b72f751c..794ec6e3275d 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -124,7 +124,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strstr(model_num, "WDC"))
return mask &= ~ATA_MASK_UDMA;
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[0] = &info_20_udma;
}
- return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
+ if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
+ return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
+ else
+ return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index d95eca9c547e..620a07cabe31 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* And fire it up */
- return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 4d066d6c30fa..ba43f0f8c880 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
BUG_ON(ppi[0] == NULL);
- return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
static const struct pci_device_id artop_pci_tbl[] = {
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index c6a946aa252c..0da0dcc7dd08 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -202,7 +202,6 @@ static struct ata_port_operations pata_at91_port_ops = {
.sff_data_xfer = pata_at91_data_xfer_noirq,
.set_piomode = pata_at91_set_piomode,
.cable_detect = ata_cable_40wire,
- .port_start = ATA_OP_NULL,
};
static int __devinit pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index cbaf2eddac6b..43755616dc5a 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
static struct ata_port_operations atiixp_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
@@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
ppi[i] = &ata_dummy_port_info;
- return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL,
- ATA_HOST_PARALLEL_SCAN);
+ return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
+ ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index bb6e0746e07d..95295935dd95 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
pci_set_master(pdev);
- rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &atp867x_sht);
if (rc)
dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 02c81f12c702..9cae65de750e 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -821,6 +821,18 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device)
}
/**
+ * bfin_set_devctl - Write device control reg
+ * @ap: port where the device is
+ * @ctl: value to write
+ */
+
+static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ write_atapi_register(base, ATA_REG_CTRL, ctl);
+}
+
+/**
* bfin_bmdma_setup - Set up IDE DMA transaction
* @qc: Info associated with this ATA transaction.
*
@@ -1202,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
* bfin_irq_clear - Clear ATAPI interrupt.
* @ap: Port associated with this ATA transaction.
*
- * Note: Original code is ata_sff_irq_clear().
+ * Note: Original code is ata_bmdma_irq_clear().
*/
static void bfin_irq_clear(struct ata_port *ap)
@@ -1216,56 +1228,6 @@ static void bfin_irq_clear(struct ata_port *ap)
}
/**
- * bfin_irq_on - Enable interrupts on a port.
- * @ap: Port on which interrupts are enabled.
- *
- * Note: Original code is ata_sff_irq_on().
- */
-
-static unsigned char bfin_irq_on(struct ata_port *ap)
-{
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- u8 tmp;
-
- dev_dbg(ap->dev, "in atapi irq on\n");
- ap->ctl &= ~ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
- tmp = ata_wait_idle(ap);
-
- bfin_irq_clear(ap);
-
- return tmp;
-}
-
-/**
- * bfin_freeze - Freeze DMA controller port
- * @ap: port to freeze
- *
- * Note: Original code is ata_sff_freeze().
- */
-
-static void bfin_freeze(struct ata_port *ap)
-{
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
-
- dev_dbg(ap->dev, "in atapi dma freeze\n");
- ap->ctl |= ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
-
- /* Under certain circumstances, some controllers raise IRQ on
- * ATA_NIEN manipulation. Also, many controllers fail to mask
- * previously pending IRQ on ATA_NIEN assertion. Clear it.
- */
- ap->ops->sff_check_status(ap);
-
- bfin_irq_clear(ap);
-}
-
-/**
* bfin_thaw - Thaw DMA controller port
* @ap: port to thaw
*
@@ -1276,7 +1238,7 @@ void bfin_thaw(struct ata_port *ap)
{
dev_dbg(ap->dev, "in atapi dma thaw\n");
bfin_check_status(ap);
- bfin_irq_on(ap);
+ ata_sff_irq_on(ap);
}
/**
@@ -1293,7 +1255,7 @@ static void bfin_postreset(struct ata_link *link, unsigned int *classes)
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
/* re-enable interrupts */
- bfin_irq_on(ap);
+ ata_sff_irq_on(ap);
/* is double-select really necessary? */
if (classes[0] != ATA_DEV_NONE)
@@ -1438,18 +1400,12 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
+ struct ata_port *ap = host->ports[i];
+ struct ata_queued_cmd *qc;
- ap = host->ports[i];
- if (ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
-
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE))
- handled |= bfin_ata_host_intr(ap, qc);
- }
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+ handled |= bfin_ata_host_intr(ap, qc);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1465,7 +1421,7 @@ static struct scsi_host_template bfin_sht = {
};
static struct ata_port_operations bfin_pata_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
.set_piomode = bfin_set_piomode,
.set_dmamode = bfin_set_dmamode,
@@ -1476,6 +1432,7 @@ static struct ata_port_operations bfin_pata_ops = {
.sff_check_status = bfin_check_status,
.sff_check_altstatus = bfin_check_altstatus,
.sff_dev_select = bfin_dev_select,
+ .sff_set_devctl = bfin_set_devctl,
.bmdma_setup = bfin_bmdma_setup,
.bmdma_start = bfin_bmdma_start,
@@ -1485,13 +1442,11 @@ static struct ata_port_operations bfin_pata_ops = {
.qc_prep = ata_noop_qc_prep,
- .freeze = bfin_freeze,
.thaw = bfin_thaw,
.softreset = bfin_softreset,
.postreset = bfin_postreset,
.sff_irq_clear = bfin_irq_clear,
- .sff_irq_on = bfin_irq_on,
.port_start = bfin_port_start,
.port_stop = bfin_port_stop,
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index 45896b3c6538..e5f289f59ca3 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -153,24 +153,20 @@ static int cmd640_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct cmd640_reg *timing;
- int ret = ata_sff_port_start(ap);
- if (ret < 0)
- return ret;
-
timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
if (timing == NULL)
return -ENOMEM;
timing->last = -1; /* Force a load */
ap->private_data = timing;
- return ret;
+ return 0;
}
static struct scsi_host_template cmd640_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations cmd640_port_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &ata_sff_port_ops,
/* In theory xfer_noirq is not needed once we kill the prefetcher */
.sff_data_xfer = ata_sff_data_xfer_noirq,
.qc_issue = cmd640_qc_issue,
@@ -181,13 +177,10 @@ static struct ata_port_operations cmd640_port_ops = {
static void cmd640_hardware_init(struct pci_dev *pdev)
{
- u8 r;
u8 ctrl;
/* CMD640 detected, commiserations */
pci_write_config_byte(pdev, 0x5B, 0x00);
- /* Get version info */
- pci_read_config_byte(pdev, CFR, &r);
/* PIO0 command cycles */
pci_write_config_byte(pdev, CMDTIM, 0);
/* 512 byte bursts (sector) */
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 4c81a71b8877..9f5da1c7454b 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
#endif
- return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 95ebdac517f2..030952f1f97c 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
static struct ata_port_operations cs5520_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.cable_detect = ata_cable_40wire,
.set_piomode = cs5520_set_piomode,
};
@@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
continue;
rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
- ata_sff_interrupt, 0, DRV_NAME, host);
+ ata_bmdma_interrupt, 0, DRV_NAME, host);
if (rc)
return rc;
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 738ad2e14a97..f792330f0d8e 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -156,7 +156,7 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
cs5530_set_dmamode(ap, adev);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
static struct scsi_host_template cs5530_sht = {
@@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
static struct ata_port_operations cs5530_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = cs5530_qc_issue,
.cable_detect = ata_cable_40wire,
@@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[1] = &info_palmax_secondary;
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index a02e6459fdcc..03a93186aa19 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
rdmsr(ATAC_CH0D1_PIO, timings, dummy);
if (CS5535_BAD_PIO(timings))
wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
- return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
}
static const struct pci_device_id cs5535[] = {
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 914ae3506ff5..21ee23f89e88 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0);
}
static const struct pci_device_id cs5536[] = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 0fcc096b8dac..6d915b063d93 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
- return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
}
static const struct pci_device_id cy82c693[] = {
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 3bac0e079691..a08834758ea2 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL,
- ATA_HOST_PARALLEL_SCAN);
+ return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
+ ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id efar_pci_tbl[] = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index af49bfb57247..7688868557b9 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -182,7 +182,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
} else if (adev->class == ATA_DEV_ATAPI)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
static int hpt36x_cable_detect(struct ata_port *ap)
@@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
break;
}
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 8839307a64cf..9ae4c0830577 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -282,7 +282,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -298,7 +298,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
}
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
}
static const struct pci_device_id hpt37x[] = {
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 01457b266f3d..32f3463216b8 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -320,7 +320,7 @@ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
static struct scsi_host_template hpt3x2n_sht = {
@@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
/* Now kick off ATA set up */
- return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
}
static const struct pci_device_id hpt3x2n[] = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 727a81ce4c9f..b63d5e2d4628 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
}
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &hpt3x3_sht);
}
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index fa812e206eeb..9f2889fe43b2 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -321,7 +321,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
}
static struct ata_port_operations pata_icside_port_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
/* no need to build any PRD tables for DMA */
.qc_prep = ata_noop_qc_prep,
.sff_data_xfer = ata_sff_data_xfer_noirq,
@@ -333,7 +333,8 @@ static struct ata_port_operations pata_icside_port_ops = {
.cable_detect = ata_cable_40wire,
.set_dmamode = pata_icside_set_dmamode,
.postreset = pata_icside_postreset,
- .post_internal_cmd = pata_icside_bmdma_stop,
+
+ .port_start = ATA_OP_NULL, /* don't need PRD table */
};
static void __devinit
@@ -469,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
}
- return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0,
+ return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
&pata_icside_sht);
}
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index f971f0de88e6..4d142a2ab8fd 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
}
static const struct pci_device_id it8213_pci_tbl[] = {
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 5cb286fd839e..bf88f71a21f4 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
case 0xFC: /* Internal 'report rebuild state' */
/* Arguably should just no-op this one */
case ATA_CMD_SET_FEATURES:
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
return AC_ERR_DEV;
@@ -448,7 +448,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
{
it821x_passthru_dev_select(qc->ap, qc->dev->devno);
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
/**
@@ -739,7 +739,7 @@ static int it821x_port_start(struct ata_port *ap)
struct it821x_dev *itdev;
u8 conf;
- int ret = ata_sff_port_start(ap);
+ int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
@@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
ppi[0] = &info_smart;
}
- return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 565e01e6ac7c..cb3babbb7035 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
};
const struct ata_port_info *ppi[] = { &info, NULL };
- return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
static const struct pci_device_id jmicron_pci_tbl[] = {
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 211b6438b3a0..76640ac76888 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -720,6 +720,8 @@ static int pata_macio_port_start(struct ata_port *ap)
if (priv->dma_table_cpu == NULL) {
dev_err(priv->dev, "Unable to allocate DMA command list\n");
ap->ioaddr.bmdma_addr = NULL;
+ ap->mwdma_mask = 0;
+ ap->udma_mask = 0;
}
return 0;
}
@@ -917,7 +919,7 @@ static struct scsi_host_template pata_macio_sht = {
};
static struct ata_port_operations pata_macio_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &ata_bmdma_port_ops,
.freeze = pata_macio_freeze,
.set_piomode = pata_macio_set_timings,
@@ -925,7 +927,6 @@ static struct ata_port_operations pata_macio_ops = {
.cable_detect = pata_macio_cable_detect,
.sff_dev_select = pata_macio_dev_select,
.qc_prep = pata_macio_qc_prep,
- .mode_filter = ata_bmdma_mode_filter,
.bmdma_setup = pata_macio_bmdma_setup,
.bmdma_start = pata_macio_bmdma_start,
.bmdma_stop = pata_macio_bmdma_stop,
@@ -1109,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
/* Start it up */
priv->irq = irq;
- return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0,
+ return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
&pata_macio_sht);
}
@@ -1139,7 +1140,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev,
"Failed to allocate private memory\n");
return -ENOMEM;
}
- priv->node = of_node_get(mdev->ofdev.node);
+ priv->node = of_node_get(mdev->ofdev.dev.of_node);
priv->mdev = mdev;
priv->dev = &mdev->ofdev.dev;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index e8ca02e5a71d..dd38083dcbeb 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
return -ENODEV;
}
#endif
- return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
}
static const struct pci_device_id marvell_pci_tbl[] = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 9f5b053611dd..f087ab55b1df 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -64,13 +64,13 @@ struct mpc52xx_ata_priv {
/* ATAPI-4 PIO specs (in ns) */
-static const int ataspec_t0[5] = {600, 383, 240, 180, 120};
-static const int ataspec_t1[5] = { 70, 50, 30, 30, 25};
-static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70};
-static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70};
-static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25};
-static const int ataspec_t4[5] = { 30, 20, 15, 10, 10};
-static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
+static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
+static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
+static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
+static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
+static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
+static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
+static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
@@ -78,13 +78,13 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
/* ATAPI-4 MDMA specs (in clocks) */
struct mdmaspec {
- u32 t0M;
- u32 td;
- u32 th;
- u32 tj;
- u32 tkw;
- u32 tm;
- u32 tn;
+ u8 t0M;
+ u8 td;
+ u8 th;
+ u8 tj;
+ u8 tkw;
+ u8 tm;
+ u8 tn;
};
static const struct mdmaspec mdmaspec66[3] = {
@@ -101,23 +101,23 @@ static const struct mdmaspec mdmaspec132[3] = {
/* ATAPI-4 UDMA specs (in clocks) */
struct udmaspec {
- u32 tcyc;
- u32 t2cyc;
- u32 tds;
- u32 tdh;
- u32 tdvs;
- u32 tdvh;
- u32 tfs;
- u32 tli;
- u32 tmli;
- u32 taz;
- u32 tzah;
- u32 tenv;
- u32 tsr;
- u32 trfs;
- u32 trp;
- u32 tack;
- u32 tss;
+ u8 tcyc;
+ u8 t2cyc;
+ u8 tds;
+ u8 tdh;
+ u8 tdvs;
+ u8 tdvh;
+ u8 tfs;
+ u8 tli;
+ u8 tmli;
+ u8 taz;
+ u8 tzah;
+ u8 tenv;
+ u8 tsr;
+ u8 trfs;
+ u8 trp;
+ u8 tack;
+ u8 tss;
};
static const struct udmaspec udmaspec66[6] = {
@@ -270,7 +270,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
{
struct mpc52xx_ata_timings *timing = &priv->timings[dev];
unsigned int ipb_period = priv->ipb_period;
- unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
+ u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
if ((pio < 0) || (pio > 4))
return -EINVAL;
@@ -299,8 +299,8 @@ mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
if (speed < 0 || speed > 2)
return -EINVAL;
- t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm);
- t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8);
+ t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
+ t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
t->using_udma = 0;
return 0;
@@ -316,11 +316,11 @@ mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
if (speed < 0 || speed > 2)
return -EINVAL;
- t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh;
- t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli;
- t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr;
- t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack;
- t->udma5 = (s->tzah << 24);
+ t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
+ t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
+ t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
+ t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
+ t->udma5 = (u32)s->tzah << 24;
t->using_udma = 1;
return 0;
@@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
/* activate host */
- return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0,
+ return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
&mpc52xx_ata_sht);
}
@@ -694,7 +694,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
struct bcom_task *dmatsk = NULL;
/* Get ipb frequency */
- ipb_freq = mpc5xxx_get_bus_frequency(op->node);
+ ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
if (!ipb_freq) {
dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
@@ -702,7 +702,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
/* Get device base address from device tree, request the region
* and ioremap it. */
- rv = of_address_to_resource(op->node, 0, &res_mem);
+ rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
if (rv) {
dev_err(&op->dev, "could not determine device base address\n");
return rv;
@@ -735,14 +735,14 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
* The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
* UDMA modes 0, 1 and 2.
*/
- prop = of_get_property(op->node, "mwdma-mode", &proplen);
+ prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
if ((prop) && (proplen >= 4))
mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
- prop = of_get_property(op->node, "udma-mode", &proplen);
+ prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
if ((prop) && (proplen >= 4))
udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
- ata_irq = irq_of_parse_and_map(op->node, 0);
+ ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (ata_irq == NO_IRQ) {
dev_err(&op->dev, "error mapping irq\n");
return -EINVAL;
@@ -884,9 +884,6 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .match_table = mpc52xx_ata_of_match,
.probe = mpc52xx_ata_probe,
.remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM
@@ -896,6 +893,7 @@ static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mpc52xx_ata_of_match,
},
};
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 94f979a7f4f7..3eb921c746a1 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
ata_pci_bmdma_clear_simplex(pdev);
/* And let the library code do the work */
- return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0);
}
static const struct pci_device_id netcell_pci_tbl[] = {
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index dd53a66b19e3..cc50bd09aa26 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
- return ata_host_activate(host, dev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &ninja32_sht);
}
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 830431f036a1..605f198f958c 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -126,7 +126,7 @@ static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
- iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+ iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
@@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e
ns87415_fixup(pdev);
- return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
}
static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 005a44483a7b..06ddd91ffeda 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -489,9 +489,8 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
ata_wait_idle(ap);
}
-static u8 octeon_cf_irq_on(struct ata_port *ap)
+static void octeon_cf_irq_on(struct ata_port *ap)
{
- return 0;
}
static void octeon_cf_irq_clear(struct ata_port *ap)
@@ -655,9 +654,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
ap = host->ports[i];
ocd = ap->dev->platform_data;
- if (ap->flags & ATA_FLAG_DISABLED)
- continue;
-
ocd = ap->dev->platform_data;
cf_port = ap->private_data;
dma_int.u64 =
@@ -667,8 +663,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE)) {
+ if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
if (dma_int.s.done && !dma_cfg.s.en) {
if (!sg_is_last(qc->cursg)) {
qc->cursg = sg_next(qc->cursg);
@@ -738,8 +733,7 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
goto out;
}
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE))
+ if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
octeon_cf_dma_finished(ap, qc);
out:
spin_unlock_irqrestore(&host->lock, flags);
@@ -756,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev)
}
/*
- * Trap if driver tries to do standard bmdma commands. They are not
- * supported.
- */
-static void unreachable_qc(struct ata_queued_cmd *qc)
-{
- BUG();
-}
-
-static u8 unreachable_port(struct ata_port *ap)
-{
- BUG();
-}
-
-/*
* We don't do ATAPI DMA so return 0.
*/
static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
@@ -810,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = {
.sff_dev_select = octeon_cf_dev_select,
.sff_irq_on = octeon_cf_irq_on,
.sff_irq_clear = octeon_cf_irq_clear,
- .bmdma_setup = unreachable_qc,
- .bmdma_start = unreachable_qc,
- .bmdma_stop = unreachable_qc,
- .bmdma_status = unreachable_port,
.cable_detect = ata_cable_40wire,
.set_piomode = octeon_cf_set_piomode,
.set_dmamode = octeon_cf_set_dmamode,
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1f18ad9e4fe1..5a1b82c08be9 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -18,7 +18,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
int ret;
- struct device_node *dn = ofdev->node;
+ struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
struct resource irq_res;
@@ -91,8 +91,11 @@ static struct of_device_id pata_of_platform_match[] = {
MODULE_DEVICE_TABLE(of, pata_of_platform_match);
static struct of_platform_driver pata_of_platform_driver = {
- .name = "pata_of_platform",
- .match_table = pata_of_platform_match,
+ .driver = {
+ .name = "pata_of_platform",
+ .owner = THIS_MODULE,
+ .of_match_table = pata_of_platform_match,
+ },
.probe = pata_of_platform_probe,
.remove = __devexit_p(pata_of_platform_remove),
};
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 5f6aba7eb0dd..b811c1636204 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -200,7 +200,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
if (ata_dma_enabled(adev))
oldpiix_set_dmamode(ap, adev);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
@@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
}
static const struct pci_device_id oldpiix_pci_tbl[] = {
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 76b7d12b1e8d..0852cd07de08 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (optiplus_with_udma(dev))
ppi[0] = &info_82c700_udma;
- return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0);
}
static const struct pci_device_id optidma[] = {
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index d94b8f0bd743..118c28e8abaf 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -45,16 +45,6 @@
#define DRV_NAME "pata_pcmcia"
#define DRV_VERSION "0.3.5"
-/*
- * Private data structure to glue stuff together
- */
-
-struct ata_pcmcia_info {
- struct pcmcia_device *pdev;
- int ndev;
- dev_node_t node;
-};
-
/**
* pcmcia_set_mode - PCMCIA specific mode setup
* @link: link
@@ -175,7 +165,7 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
.sff_data_xfer = ata_data_xfer_8bit,
.cable_detect = ata_cable_40wire,
.set_mode = pcmcia_set_mode_8bit,
- .drain_fifo = pcmcia_8bit_drain_fifo,
+ .sff_drain_fifo = pcmcia_8bit_drain_fifo,
};
@@ -248,7 +238,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
- struct ata_pcmcia_info *info;
struct pcmcia_config_check *stk = NULL;
int is_kme = 0, ret = -ENOMEM, p;
unsigned long io_base, ctl_base;
@@ -256,19 +245,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
int n_ports = 1;
struct ata_port_operations *ops = &pcmcia_port_ops;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info == NULL)
- return -ENOMEM;
-
- /* Glue stuff together. FIXME: We may be able to get rid of info with care */
- info->pdev = pdev;
- pdev->priv = info;
-
/* Set up attributes in order to probe card and get resources */
pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
pdev->io.IOAddrLines = 3;
- pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
pdev->conf.Attributes = CONF_ENABLE_IRQ;
pdev->conf.IntType = INT_MEMORY_AND_IO;
@@ -293,8 +273,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
}
io_base = pdev->io.BasePort1;
ctl_base = stk->ctl_base;
- ret = pcmcia_request_irq(pdev, &pdev->irq);
- if (ret)
+ if (!pdev->irq)
goto failed;
ret = pcmcia_request_configuration(pdev, &pdev->conf);
@@ -344,21 +323,19 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
}
/* activate */
- ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt,
+ ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt,
IRQF_SHARED, &pcmcia_sht);
if (ret)
goto failed;
- info->ndev = 1;
+ pdev->priv = host;
kfree(stk);
return 0;
failed:
kfree(stk);
- info->ndev = 0;
pcmcia_disable_device(pdev);
out1:
- kfree(info);
return ret;
}
@@ -372,20 +349,12 @@ out1:
static void pcmcia_remove_one(struct pcmcia_device *pdev)
{
- struct ata_pcmcia_info *info = pdev->priv;
- struct device *dev = &pdev->dev;
-
- if (info != NULL) {
- /* If we have attached the device to the ATA layer, detach it */
- if (info->ndev) {
- struct ata_host *host = dev_get_drvdata(dev);
- ata_host_detach(host);
- }
- info->ndev = 0;
- pdev->priv = NULL;
- }
+ struct ata_host *host = pdev->priv;
+
+ if (host)
+ ata_host_detach(host);
+
pcmcia_disable_device(pdev);
- kfree(info);
}
static struct pcmcia_device_id pcmcia_devices[] = {
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ca5cad0fd80b..b18351122525 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -265,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
struct ata_device *pair = ata_dev_pair(adev);
if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
/* Check for slave of a Maxtor at UDMA6 */
ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
@@ -274,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
return -EIO;
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &pdc2027x_sht);
}
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 9ac0897cf8b0..c39f213e1bbc 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -249,7 +249,7 @@ static int pdc2026x_port_start(struct ata_port *ap)
u8 burst = ioread8(bmdma + 0x1f);
iowrite8(burst | 0x01, bmdma + 0x1f);
}
- return ata_sff_port_start(ap);
+ return ata_bmdma_port_start(ap);
}
/**
@@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
return -ENODEV;
}
}
- return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
}
static const struct pci_device_id pdc202xx[] = {
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 981615414849..cb01bf9496fe 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
/* Just one port for the moment */
- return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
static struct pci_device_id ata_tosh[] = {
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 3f6ebc6c665a..50400fa120fe 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -53,7 +53,6 @@ static struct ata_port_operations pata_platform_port_ops = {
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_unknown,
.set_mode = pata_platform_set_mode,
- .port_start = ATA_OP_NULL,
};
static void pata_platform_setup_port(struct ata_ioports *ioaddr,
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index fc9602229acb..8574b31f1773 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -179,7 +179,7 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
radisys_set_piomode(ap, adev);
}
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
@@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
}
static const struct pci_device_id radisys_pci_tbl[] = {
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 37092cfd7bc6..5fbe9b166c69 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
*/
pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
@@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
host->flags |= ATA_HOST_PARALLEL_SCAN;
pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht);
+ return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
}
static void rdc_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index dfecc6f964b0..e2c18257adff 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -174,7 +174,7 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
sc1200_set_dmamode(ap, adev);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
/**
@@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
static struct ata_port_operations sc1200_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.qc_issue = sc1200_qc_issue,
.qc_defer = sc1200_qc_defer,
.cable_detect = ata_cable_40wire,
@@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, NULL };
- return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
}
static const struct pci_device_id sc1200[] = {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 4257d6b40af4..d9db3f8d60ef 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -265,7 +265,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -416,6 +416,17 @@ static void scc_dev_select (struct ata_port *ap, unsigned int device)
}
/**
+ * scc_set_devctl - Write device control reg
+ * @ap: port where the device is
+ * @ctl: value to write
+ */
+
+static void scc_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ out_be32(ap->ioaddr.ctl_addr, ctl);
+}
+
+/**
* scc_bmdma_setup - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
@@ -430,7 +441,7 @@ static void scc_bmdma_setup (struct ata_queued_cmd *qc)
void __iomem *mmio = ap->ioaddr.bmdma_addr;
/* load PRD table addr */
- out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
+ out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
/* specify data direction, triple-check start bit is clear */
dmactl = in_be32(mmio + SCC_DMA_CMD);
@@ -501,8 +512,8 @@ static unsigned int scc_devchk (struct ata_port *ap,
* Note: Original code is ata_sff_wait_after_reset
*/
-int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
- unsigned long deadline)
+static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
+ unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -817,54 +828,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
}
/**
- * scc_irq_on - Enable interrupts on a port.
- * @ap: Port on which interrupts are enabled.
- *
- * Note: Original code is ata_sff_irq_on().
- */
-
-static u8 scc_irq_on (struct ata_port *ap)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 tmp;
-
- ap->ctl &= ~ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- out_be32(ioaddr->ctl_addr, ap->ctl);
- tmp = ata_wait_idle(ap);
-
- ap->ops->sff_irq_clear(ap);
-
- return tmp;
-}
-
-/**
- * scc_freeze - Freeze BMDMA controller port
- * @ap: port to freeze
- *
- * Note: Original code is ata_sff_freeze().
- */
-
-static void scc_freeze (struct ata_port *ap)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- ap->ctl |= ATA_NIEN;
- ap->last_ctl = ap->ctl;
-
- out_be32(ioaddr->ctl_addr, ap->ctl);
-
- /* Under certain circumstances, some controllers raise IRQ on
- * ATA_NIEN manipulation. Also, many controllers fail to mask
- * previously pending IRQ on ATA_NIEN assertion. Clear it.
- */
- ap->ops->sff_check_status(ap);
-
- ap->ops->sff_irq_clear(ap);
-}
-
-/**
* scc_pata_prereset - prepare for reset
* @ap: ATA port to be reset
* @deadline: deadline jiffies for the operation
@@ -903,8 +866,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
}
/* set up device control */
- if (ap->ioaddr.ctl_addr)
- out_be32(ap->ioaddr.ctl_addr, ap->ctl);
+ out_be32(ap->ioaddr.ctl_addr, ap->ctl);
DPRINTK("EXIT\n");
}
@@ -913,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
* scc_irq_clear - Clear PCI IDE BMDMA interrupt.
* @ap: Port associated with this ATA transaction.
*
- * Note: Original code is ata_sff_irq_clear().
+ * Note: Original code is ata_bmdma_irq_clear().
*/
static void scc_irq_clear (struct ata_port *ap)
@@ -930,7 +892,7 @@ static void scc_irq_clear (struct ata_port *ap)
* scc_port_start - Set port up for dma.
* @ap: Port to initialize
*
- * Allocate space for PRD table using ata_port_start().
+ * Allocate space for PRD table using ata_bmdma_port_start().
* Set PRD table address for PTERADD. (PRD Transfer End Read)
*/
@@ -939,11 +901,11 @@ static int scc_port_start (struct ata_port *ap)
void __iomem *mmio = ap->ioaddr.bmdma_addr;
int rc;
- rc = ata_port_start(ap);
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
- out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
+ out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
return 0;
}
@@ -978,6 +940,7 @@ static struct ata_port_operations scc_pata_ops = {
.sff_check_status = scc_check_status,
.sff_check_altstatus = scc_check_altstatus,
.sff_dev_select = scc_dev_select,
+ .sff_set_devctl = scc_set_devctl,
.bmdma_setup = scc_bmdma_setup,
.bmdma_start = scc_bmdma_start,
@@ -985,14 +948,11 @@ static struct ata_port_operations scc_pata_ops = {
.bmdma_status = scc_bmdma_status,
.sff_data_xfer = scc_data_xfer,
- .freeze = scc_freeze,
.prereset = scc_pata_prereset,
.softreset = scc_softreset,
.postreset = scc_postreset,
- .post_internal_cmd = scc_bmdma_stop,
.sff_irq_clear = scc_irq_clear,
- .sff_irq_on = scc_irq_on,
.port_start = scc_port_start,
.port_stop = scc_port_stop,
@@ -1145,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &scc_sht);
}
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 99cceb458e2a..e97b32f03a6e 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -174,22 +174,12 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
{
static int printed_version;
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
- struct ata_host *host;
- int rc;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
- /* enable device and prepare host */
- rc = pcim_enable_device(pdev);
- if (rc)
- return rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
- if (rc)
- return rc;
- pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
}
static int __init sch_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 9524d54035f7..86dd714e3e1d 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -198,7 +198,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l
{
if (adev->class == ATA_DEV_ATA)
mask &= ~ATA_MASK_UDMA;
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
@@ -218,7 +218,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
/* Disk, UDMA */
if (adev->class != ATA_DEV_ATA)
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
/* Actually do need to check */
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
@@ -227,7 +227,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
if (!strcmp(p, model_num))
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
}
- return ata_bmdma_mode_filter(adev, mask);
+ return mask;
}
/**
@@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
ata_pci_bmdma_clear_simplex(pdev);
- return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c6c589c23ffc..d3190d7ec304 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -190,15 +190,37 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
pci_write_config_word(pdev, ua, ultra);
}
+/**
+ * sil680_sff_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA command, with proper synchronization with interrupt
+ * handler / other threads. Use our MMIO space for PCI posting to avoid
+ * a hideously slow cycle all the way to the device.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void sil680_sff_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+ iowrite8(tf->command, ap->ioaddr.command_addr);
+ ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
static struct scsi_host_template sil680_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
+
static struct ata_port_operations sil680_port_ops = {
- .inherits = &ata_bmdma32_port_ops,
- .cable_detect = sil680_cable_detect,
- .set_piomode = sil680_set_piomode,
- .set_dmamode = sil680_set_dmamode,
+ .inherits = &ata_bmdma32_port_ops,
+ .sff_exec_command = sil680_sff_exec_command,
+ .cable_detect = sil680_cable_detect,
+ .set_piomode = sil680_set_piomode,
+ .set_dmamode = sil680_set_dmamode,
};
/**
@@ -352,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
ata_sff_std_ports(&host->ports[1]->ioaddr);
/* Register & activate */
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sil680_sht);
use_ioports:
- return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b6708032f321..60cea13cccce 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
sis_fixup(pdev, chipset);
- return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 733b042a7469..98548f640c8e 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
pci_write_config_dword(dev, 0x40, val);
- return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
}
static const struct pci_device_id sl82c105[] = {
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 48f50600ed2a..0d1f89e571dd 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
- return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
}
static const struct pci_device_id triflex[] = {
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 741e7cb69d8c..5e659885de16 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -355,7 +355,7 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
mask &= ~ ATA_MASK_UDMA;
}
}
- return ata_bmdma_mode_filter(dev, mask);
+ return mask;
}
/**
@@ -417,8 +417,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
tf->lbam,
tf->lbah);
}
-
- ata_wait_idle(ap);
}
static int via_port_start(struct ata_port *ap)
@@ -426,7 +424,7 @@ static int via_port_start(struct ata_port *ap)
struct via_port *vp;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int ret = ata_sff_port_start(ap);
+ int ret = ata_bmdma_port_start(ap);
if (ret < 0)
return ret;
@@ -629,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* We have established the device type, now fire it up */
- return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5904cfdb8dbe..adbe0426c8f0 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("ENTER\n");
adma_enter_reg_mode(qc->ap);
- if (qc->tf.protocol != ATA_PROT_DMA) {
- ata_sff_qc_prep(qc);
+ if (qc->tf.protocol != ATA_PROT_DMA)
return;
- }
buf[i++] = 0; /* Response flags */
buf[i++] = 0; /* reserved */
@@ -442,8 +440,6 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host)
continue;
handled = 1;
adma_enter_reg_mode(ap);
- if (ap->flags & ATA_FLAG_DISABLED)
- continue;
pp = ap->private_data;
if (!pp || pp->state != adma_state_pkt)
continue;
@@ -484,42 +480,38 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
- struct ata_port *ap;
- ap = host->ports[port_no];
- if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
- struct ata_queued_cmd *qc;
- struct adma_port_priv *pp = ap->private_data;
- if (!pp || pp->state != adma_state_mmio)
+ struct ata_port *ap = host->ports[port_no];
+ struct adma_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+
+ if (!pp || pp->state != adma_state_mmio)
+ continue;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+
+ /* check main status, clearing INTRQ */
+ u8 status = ata_sff_check_status(ap);
+ if ((status & ATA_BUSY))
continue;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
-
- /* check main status, clearing INTRQ */
- u8 status = ata_sff_check_status(ap);
- if ((status & ATA_BUSY))
- continue;
- DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
- ap->print_id, qc->tf.protocol, status);
-
- /* complete taskfile transaction */
- pp->state = adma_state_idle;
- qc->err_mask |= ac_err_mask(status);
- if (!qc->err_mask)
- ata_qc_complete(qc);
- else {
- struct ata_eh_info *ehi =
- &ap->link.eh_info;
- ata_ehi_clear_desc(ehi);
- ata_ehi_push_desc(ehi,
- "status 0x%02X", status);
-
- if (qc->err_mask == AC_ERR_DEV)
- ata_port_abort(ap);
- else
- ata_port_freeze(ap);
- }
- handled = 1;
+ DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+ ap->print_id, qc->tf.protocol, status);
+
+ /* complete taskfile transaction */
+ pp->state = adma_state_idle;
+ qc->err_mask |= ac_err_mask(status);
+ if (!qc->err_mask)
+ ata_qc_complete(qc);
+ else {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "status 0x%02X", status);
+
+ if (qc->err_mask == AC_ERR_DEV)
+ ata_port_abort(ap);
+ else
+ ata_port_freeze(ap);
}
+ handled = 1;
}
}
return handled;
@@ -562,11 +554,7 @@ static int adma_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct adma_port_priv *pp;
- int rc;
- rc = ata_port_start(ap);
- if (rc)
- return rc;
adma_enter_reg_mode(ap);
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index a69192b38b43..61c89b54ea23 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1313,7 +1313,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
dev_printk(KERN_INFO, &ofdev->dev,
"Sata FSL Platform/CSB Driver init\n");
- hcr_base = of_iomap(ofdev->node, 0);
+ hcr_base = of_iomap(ofdev->dev.of_node, 0);
if (!hcr_base)
goto error_exit_with_cleanup;
@@ -1332,7 +1332,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
- irq = irq_of_parse_and_map(ofdev->node, 0);
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (irq < 0) {
dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
goto error_exit_with_cleanup;
@@ -1427,8 +1427,11 @@ static struct of_device_id fsl_sata_match[] = {
MODULE_DEVICE_TABLE(of, fsl_sata_match);
static struct of_platform_driver fsl_sata_driver = {
- .name = "fsl-sata",
- .match_table = fsl_sata_match,
+ .driver = {
+ .name = "fsl-sata",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_sata_match,
+ },
.probe = sata_fsl_probe,
.remove = sata_fsl_remove,
#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 27dc6c86a4cd..a36149ebf4a2 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -415,22 +415,11 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
spin_lock(&host->lock);
- for (i = 0; i < NR_PORTS; i++) {
- struct ata_port *ap = host->ports[i];
-
- if (!(host_irq_stat & (HIRQ_PORT0 << i)))
- continue;
-
- if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
- inic_host_intr(ap);
+ for (i = 0; i < NR_PORTS; i++)
+ if (host_irq_stat & (HIRQ_PORT0 << i)) {
+ inic_host_intr(host->ports[i]);
handled++;
- } else {
- if (ata_ratelimit())
- dev_printk(KERN_ERR, host->dev, "interrupt "
- "from disabled port %d (0x%x)\n",
- i, host_irq_stat);
}
- }
spin_unlock(&host->lock);
@@ -679,8 +668,7 @@ static void init_port(struct ata_port *ap)
memset(pp->pkt, 0, sizeof(struct inic_pkt));
memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
- /* setup PRD and CPB lookup table addresses */
- writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
+ /* setup CPB lookup table addresses */
writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
}
@@ -694,7 +682,6 @@ static int inic_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct inic_port_priv *pp;
- int rc;
/* alloc and initialize private data */
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -703,10 +690,6 @@ static int inic_port_start(struct ata_port *ap)
ap->private_data = pp;
/* Alloc resources */
- rc = ata_port_start(ap);
- if (rc)
- return rc;
-
pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
&pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 71cc0d42f9e1..a476cd99b95d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
.hardreset = mv_hardreset,
- .error_handler = ata_std_error_handler, /* avoid SFF EH */
- .post_internal_cmd = ATA_OP_NULL,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -686,16 +684,27 @@ static struct ata_port_operations mv5_ops = {
};
static struct ata_port_operations mv6_ops = {
- .inherits = &mv5_ops,
+ .inherits = &ata_bmdma_port_ops,
+
+ .lost_interrupt = ATA_OP_NULL,
+
+ .qc_defer = mv_qc_defer,
+ .qc_prep = mv_qc_prep,
+ .qc_issue = mv_qc_issue,
+
.dev_config = mv6_dev_config,
- .scr_read = mv_scr_read,
- .scr_write = mv_scr_write,
+ .freeze = mv_eh_freeze,
+ .thaw = mv_eh_thaw,
+ .hardreset = mv_hardreset,
+ .softreset = mv_softreset,
.pmp_hardreset = mv_pmp_hardreset,
.pmp_softreset = mv_softreset,
- .softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
+ .scr_read = mv_scr_read,
+ .scr_write = mv_scr_write,
+
.sff_check_status = mv_sff_check_status,
.sff_irq_clear = mv_sff_irq_clear,
.check_atapi_dma = mv_check_atapi_dma,
@@ -703,6 +712,9 @@ static struct ata_port_operations mv6_ops = {
.bmdma_start = mv_bmdma_start,
.bmdma_stop = mv_bmdma_stop,
.bmdma_status = mv_bmdma_status,
+
+ .port_start = mv_port_start,
+ .port_stop = mv_port_stop,
};
static struct ata_port_operations mv_iie_ops = {
@@ -2248,7 +2260,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
}
if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_pio_queue_task(ap, qc, 0);
+ ata_sff_queue_pio_task(ap, 0);
return 0;
}
@@ -2344,7 +2356,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
if (IS_GEN_II(hpriv))
return mv_qc_issue_fis(qc);
}
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
@@ -2355,13 +2367,9 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
return NULL;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc) {
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- qc = NULL;
- else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
- qc = NULL;
- }
- return qc;
+ if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+ return qc;
+ return NULL;
}
static void mv_pmp_error_handler(struct ata_port *ap)
@@ -2546,9 +2554,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
char *when = "idle";
ata_ehi_clear_desc(ehi);
- if (ap->flags & ATA_FLAG_DISABLED) {
- when = "disabled";
- } else if (edma_was_enabled) {
+ if (edma_was_enabled) {
when = "EDMA enabled";
} else {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -2782,10 +2788,6 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
struct mv_port_priv *pp;
int edma_was_enabled;
- if (ap->flags & ATA_FLAG_DISABLED) {
- mv_unexpected_intr(ap, 0);
- return;
- }
/*
* Grab a snapshot of the EDMA_EN flag setting,
* so that we have a consistent view for this port,
@@ -2809,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
} else if (!edma_was_enabled) {
struct ata_queued_cmd *qc = mv_get_active_qc(ap);
if (qc)
- ata_sff_host_intr(ap, qc);
+ ata_bmdma_port_intr(ap, qc);
else
mv_unexpected_intr(ap, edma_was_enabled);
}
@@ -3656,9 +3658,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
/* special case: control/altstatus doesn't have ATA_REG_ address */
port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
- /* unused: */
- port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
-
/* Clear any currently outstanding port interrupt conditions */
serr = port_mmio + mv_scr_offset(SCR_ERROR);
writelfl(readl(serr), serr);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 2a98b09ab735..6fd114784116 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -272,7 +272,7 @@ enum ncq_saw_flag_list {
};
struct nv_swncq_port_priv {
- struct ata_prd *prd; /* our SG list */
+ struct ata_bmdma_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */
void __iomem *sactive_block;
void __iomem *irq_block;
@@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
}
/* handle interrupt */
- return ata_sff_host_intr(ap, qc);
+ return ata_bmdma_port_intr(ap, qc);
}
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
@@ -933,107 +933,108 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 status;
+ u32 gen_ctl;
+ u32 notifier, notifier_error;
+
notifier_clears[i] = 0;
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- struct nv_adma_port_priv *pp = ap->private_data;
- void __iomem *mmio = pp->ctl_block;
- u16 status;
- u32 gen_ctl;
- u32 notifier, notifier_error;
-
- /* if ADMA is disabled, use standard ata interrupt handler */
- if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
- u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
- >> (NV_INT_PORT_SHIFT * i);
- handled += nv_host_intr(ap, irq_stat);
- continue;
- }
+ /* if ADMA is disabled, use standard ata interrupt handler */
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+ u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+ >> (NV_INT_PORT_SHIFT * i);
+ handled += nv_host_intr(ap, irq_stat);
+ continue;
+ }
- /* if in ATA register mode, check for standard interrupts */
- if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
- u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
- >> (NV_INT_PORT_SHIFT * i);
- if (ata_tag_valid(ap->link.active_tag))
- /** NV_INT_DEV indication seems unreliable at times
- at least in ADMA mode. Force it on always when a
- command is active, to prevent losing interrupts. */
- irq_stat |= NV_INT_DEV;
- handled += nv_host_intr(ap, irq_stat);
- }
+ /* if in ATA register mode, check for standard interrupts */
+ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
+ u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+ >> (NV_INT_PORT_SHIFT * i);
+ if (ata_tag_valid(ap->link.active_tag))
+ /** NV_INT_DEV indication seems unreliable
+ at times at least in ADMA mode. Force it
+ on always when a command is active, to
+ prevent losing interrupts. */
+ irq_stat |= NV_INT_DEV;
+ handled += nv_host_intr(ap, irq_stat);
+ }
+
+ notifier = readl(mmio + NV_ADMA_NOTIFIER);
+ notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+ notifier_clears[i] = notifier | notifier_error;
+
+ gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+
+ if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+ !notifier_error)
+ /* Nothing to do */
+ continue;
+
+ status = readw(mmio + NV_ADMA_STAT);
+
+ /*
+ * Clear status. Ensure the controller sees the
+ * clearing before we start looking at any of the CPB
+ * statuses, so that any CPB completions after this
+ * point in the handler will raise another interrupt.
+ */
+ writew(status, mmio + NV_ADMA_STAT);
+ readw(mmio + NV_ADMA_STAT); /* flush posted write */
+ rmb();
+
+ handled++; /* irq handled if we got here */
- notifier = readl(mmio + NV_ADMA_NOTIFIER);
- notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
- notifier_clears[i] = notifier | notifier_error;
-
- gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
-
- if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
- !notifier_error)
- /* Nothing to do */
- continue;
-
- status = readw(mmio + NV_ADMA_STAT);
-
- /* Clear status. Ensure the controller sees the clearing before we start
- looking at any of the CPB statuses, so that any CPB completions after
- this point in the handler will raise another interrupt. */
- writew(status, mmio + NV_ADMA_STAT);
- readw(mmio + NV_ADMA_STAT); /* flush posted write */
- rmb();
-
- handled++; /* irq handled if we got here */
-
- /* freeze if hotplugged or controller error */
- if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
- NV_ADMA_STAT_HOTUNPLUG |
- NV_ADMA_STAT_TIMEOUT |
- NV_ADMA_STAT_SERROR))) {
- struct ata_eh_info *ehi = &ap->link.eh_info;
-
- ata_ehi_clear_desc(ehi);
- __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
- if (status & NV_ADMA_STAT_TIMEOUT) {
- ehi->err_mask |= AC_ERR_SYSTEM;
- ata_ehi_push_desc(ehi, "timeout");
- } else if (status & NV_ADMA_STAT_HOTPLUG) {
- ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, "hotplug");
- } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
- ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, "hot unplug");
- } else if (status & NV_ADMA_STAT_SERROR) {
- /* let libata analyze SError and figure out the cause */
- ata_ehi_push_desc(ehi, "SError");
- } else
- ata_ehi_push_desc(ehi, "unknown");
- ata_port_freeze(ap);
- continue;
+ /* freeze if hotplugged or controller error */
+ if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
+ NV_ADMA_STAT_HOTUNPLUG |
+ NV_ADMA_STAT_TIMEOUT |
+ NV_ADMA_STAT_SERROR))) {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+
+ ata_ehi_clear_desc(ehi);
+ __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
+ if (status & NV_ADMA_STAT_TIMEOUT) {
+ ehi->err_mask |= AC_ERR_SYSTEM;
+ ata_ehi_push_desc(ehi, "timeout");
+ } else if (status & NV_ADMA_STAT_HOTPLUG) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hotplug");
+ } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hot unplug");
+ } else if (status & NV_ADMA_STAT_SERROR) {
+ /* let EH analyze SError and figure out cause */
+ ata_ehi_push_desc(ehi, "SError");
+ } else
+ ata_ehi_push_desc(ehi, "unknown");
+ ata_port_freeze(ap);
+ continue;
+ }
+
+ if (status & (NV_ADMA_STAT_DONE |
+ NV_ADMA_STAT_CPBERR |
+ NV_ADMA_STAT_CMD_COMPLETE)) {
+ u32 check_commands = notifier_clears[i];
+ int pos, error = 0;
+
+ if (status & NV_ADMA_STAT_CPBERR) {
+ /* check all active commands */
+ if (ata_tag_valid(ap->link.active_tag))
+ check_commands = 1 <<
+ ap->link.active_tag;
+ else
+ check_commands = ap->link.sactive;
}
- if (status & (NV_ADMA_STAT_DONE |
- NV_ADMA_STAT_CPBERR |
- NV_ADMA_STAT_CMD_COMPLETE)) {
- u32 check_commands = notifier_clears[i];
- int pos, error = 0;
-
- if (status & NV_ADMA_STAT_CPBERR) {
- /* Check all active commands */
- if (ata_tag_valid(ap->link.active_tag))
- check_commands = 1 <<
- ap->link.active_tag;
- else
- check_commands = ap->
- link.sactive;
- }
-
- /** Check CPBs for completed commands */
- while ((pos = ffs(check_commands)) && !error) {
- pos--;
- error = nv_adma_check_cpb(ap, pos,
+ /* check CPBs for completed commands */
+ while ((pos = ffs(check_commands)) && !error) {
+ pos--;
+ error = nv_adma_check_cpb(ap, pos,
notifier_error & (1 << pos));
- check_commands &= ~(1 << pos);
- }
+ check_commands &= ~(1 << pos);
}
}
}
@@ -1099,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
u32 notifier_clears[2];
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
return;
}
@@ -1130,7 +1131,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
struct nv_adma_port_priv *pp = qc->ap->private_data;
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
- ata_sff_post_internal_cmd(qc);
+ ata_bmdma_post_internal_cmd(qc);
}
static int nv_adma_port_start(struct ata_port *ap)
@@ -1155,7 +1156,8 @@ static int nv_adma_port_start(struct ata_port *ap)
if (rc)
return rc;
- rc = ata_port_start(ap);
+ /* we might fallback to bmdma, allocate bmdma resources */
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
@@ -1407,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
- ata_sff_qc_prep(qc);
+ ata_bmdma_qc_prep(qc);
return;
}
@@ -1466,7 +1468,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
} else
nv_adma_mode(qc->ap);
@@ -1498,22 +1500,19 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- ap = host->ports[i];
- if (ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
+ struct ata_port *ap = host->ports[i];
+ struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
- handled += ata_sff_host_intr(ap, qc);
- else
- // No request pending? Clear interrupt status
- // anyway, in case there's one pending.
- ap->ops->sff_check_status(ap);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+ handled += ata_bmdma_port_intr(ap, qc);
+ } else {
+ /*
+ * No request pending? Clear interrupt status
+ * anyway, in case there's one pending.
+ */
+ ap->ops->sff_check_status(ap);
}
-
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -1526,11 +1525,7 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
int i, handled = 0;
for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- if (ap && !(ap->flags & ATA_FLAG_DISABLED))
- handled += nv_host_intr(ap, irq_stat);
-
+ handled += nv_host_intr(host->ports[i], irq_stat);
irq_stat >>= NV_INT_PORT_SHIFT;
}
@@ -1744,7 +1739,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
- ata_sff_error_handler(ap);
+ ata_bmdma_error_handler(ap);
}
static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -1870,7 +1865,7 @@ static void nv_swncq_error_handler(struct ata_port *ap)
ehc->i.action |= ATA_EH_RESET;
}
- ata_sff_error_handler(ap);
+ ata_bmdma_error_handler(ap);
}
#ifdef CONFIG_PM
@@ -1991,7 +1986,8 @@ static int nv_swncq_port_start(struct ata_port *ap)
struct nv_swncq_port_priv *pp;
int rc;
- rc = ata_port_start(ap);
+ /* we might fallback to bmdma, allocate bmdma resources */
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
@@ -2016,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
{
if (qc->tf.protocol != ATA_PROT_NCQ) {
- ata_sff_qc_prep(qc);
+ ata_bmdma_qc_prep(qc);
return;
}
@@ -2031,7 +2027,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scatterlist *sg;
struct nv_swncq_port_priv *pp = ap->private_data;
- struct ata_prd *prd;
+ struct ata_bmdma_prd *prd;
unsigned int si, idx;
prd = pp->prd + ATA_MAX_PRD * qc->tag;
@@ -2092,7 +2088,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
struct nv_swncq_port_priv *pp = ap->private_data;
if (qc->tf.protocol != ATA_PROT_NCQ)
- return ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
DPRINTK("Enter\n");
@@ -2380,16 +2376,14 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- if (ap->link.sactive) {
- nv_swncq_host_interrupt(ap, (u16)irq_stat);
- handled = 1;
- } else {
- if (irq_stat) /* reserve Hotplug */
- nv_swncq_irq_clear(ap, 0xfff0);
+ if (ap->link.sactive) {
+ nv_swncq_host_interrupt(ap, (u16)irq_stat);
+ handled = 1;
+ } else {
+ if (irq_stat) /* reserve Hotplug */
+ nv_swncq_irq_clear(ap, 0xfff0);
- handled += nv_host_intr(ap, (u8)irq_stat);
- }
+ handled += nv_host_intr(ap, (u8)irq_stat);
}
irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
}
@@ -2436,7 +2430,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ppi[0] = &nv_port_info[type];
ipriv = ppi[0]->private_data;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
@@ -2479,8 +2473,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
- IRQF_SHARED, ipriv->sht);
+ return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 5356ec00d2b4..f03ad48273ff 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -333,7 +333,8 @@ static int pdc_common_port_start(struct ata_port *ap)
struct pdc_port_priv *pp;
int rc;
- rc = ata_port_start(ap);
+ /* we use the same prd table as bmdma, allocate it */
+ rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
@@ -499,7 +500,7 @@ static int pdc_sata_scr_write(struct ata_link *link,
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- dma_addr_t sg_table = ap->prd_dma;
+ dma_addr_t sg_table = ap->bmdma_prd_dma;
unsigned int cdb_len = qc->dev->cdb_len;
u8 *cdb = qc->cdb;
struct pdc_port_priv *pp = ap->private_data;
@@ -587,6 +588,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
static void pdc_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
const u32 SG_COUNT_ASIC_BUG = 41*4;
unsigned int si, idx;
@@ -613,8 +615,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
- ap->prd[idx].addr = cpu_to_le32(addr);
- ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+ prd[idx].addr = cpu_to_le32(addr);
+ prd[idx].flags_len = cpu_to_le32(len & 0xffff);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
@@ -623,27 +625,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
}
}
- len = le32_to_cpu(ap->prd[idx - 1].flags_len);
+ len = le32_to_cpu(prd[idx - 1].flags_len);
if (len > SG_COUNT_ASIC_BUG) {
u32 addr;
VPRINTK("Splitting last PRD.\n");
- addr = le32_to_cpu(ap->prd[idx - 1].addr);
- ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
+ addr = le32_to_cpu(prd[idx - 1].addr);
+ prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
addr = addr + len - SG_COUNT_ASIC_BUG;
len = SG_COUNT_ASIC_BUG;
- ap->prd[idx].addr = cpu_to_le32(addr);
- ap->prd[idx].flags_len = cpu_to_le32(len);
+ prd[idx].addr = cpu_to_le32(addr);
+ prd[idx].flags_len = cpu_to_le32(len);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
}
- ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+ prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -658,7 +660,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
pdc_fill_sg(qc);
/*FALLTHROUGH*/
case ATA_PROT_NODATA:
- i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
+ i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
qc->dev->devno, pp->pkt);
if (qc->tf.flags & ATA_TFLAG_LBA48)
i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
@@ -838,7 +840,7 @@ static void pdc_error_handler(struct ata_port *ap)
if (!(ap->pflags & ATA_PFLAG_FROZEN))
pdc_reset_port(ap);
- ata_std_error_handler(ap);
+ ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -984,8 +986,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
/* check for a plug or unplug event */
ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
tmp = hotplug_status & (0x11 << ata_no);
- if (tmp && ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (tmp) {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_hotplugged(ehi);
@@ -997,8 +998,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
/* check for a packet interrupt */
tmp = mask & (1 << (i + 1));
- if (tmp && ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (tmp) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 92ba45e6689b..daeebf19a6a9 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host);
static void qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
-static void qs_bmdma_stop(struct ata_queued_cmd *qc);
-static u8 qs_bmdma_status(struct ata_port *ap);
static void qs_freeze(struct ata_port *ap);
static void qs_thaw(struct ata_port *ap);
static int qs_prereset(struct ata_link *link, unsigned long deadline);
@@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = qs_check_atapi_dma,
- .bmdma_stop = qs_bmdma_stop,
- .bmdma_status = qs_bmdma_status,
.qc_prep = qs_qc_prep,
.qc_issue = qs_qc_issue,
@@ -147,7 +143,6 @@ static struct ata_port_operations qs_ata_ops = {
.prereset = qs_prereset,
.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
- .post_internal_cmd = ATA_OP_NULL,
.lost_interrupt = ATA_OP_NULL,
.scr_read = qs_scr_read,
@@ -191,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
return 1; /* ATAPI DMA not supported */
}
-static void qs_bmdma_stop(struct ata_queued_cmd *qc)
-{
- /* nothing */
-}
-
-static u8 qs_bmdma_status(struct ata_port *ap)
-{
- return 0;
-}
-
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
@@ -255,7 +240,7 @@ static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
static void qs_error_handler(struct ata_port *ap)
{
qs_enter_reg_mode(ap);
- ata_std_error_handler(ap);
+ ata_sff_error_handler(ap);
}
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
@@ -304,10 +289,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
VPRINTK("ENTER\n");
qs_enter_reg_mode(qc->ap);
- if (qc->tf.protocol != ATA_PROT_DMA) {
- ata_sff_qc_prep(qc);
+ if (qc->tf.protocol != ATA_PROT_DMA)
return;
- }
nelem = qs_fill_sg(qc);
@@ -404,26 +387,24 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
u8 sHST = sff1 & 0x3f; /* host status */
unsigned int port_no = (sff1 >> 8) & 0x03;
struct ata_port *ap = host->ports[port_no];
+ struct qs_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
- struct qs_port_priv *pp = ap->private_data;
- if (!pp || pp->state != qs_state_pkt)
- continue;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
- switch (sHST) {
- case 0: /* successful CPB */
- case 3: /* device error */
- qs_enter_reg_mode(qc->ap);
- qs_do_or_die(qc, sDST);
- break;
- default:
- break;
- }
+ if (!pp || pp->state != qs_state_pkt)
+ continue;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+ switch (sHST) {
+ case 0: /* successful CPB */
+ case 3: /* device error */
+ qs_enter_reg_mode(qc->ap);
+ qs_do_or_die(qc, sDST);
+ break;
+ default:
+ break;
}
}
}
@@ -436,33 +417,30 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
- struct ata_port *ap;
- ap = host->ports[port_no];
- if (ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
- struct qs_port_priv *pp;
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) {
- /*
- * The qstor hardware generates spurious
- * interrupts from time to time when switching
- * in and out of packet mode.
- * There's no obvious way to know if we're
- * here now due to that, so just ack the irq
- * and pretend we knew it was ours.. (ugh).
- * This does not affect packet mode.
- */
- ata_sff_check_status(ap);
- handled = 1;
- continue;
- }
- pp = ap->private_data;
- if (!pp || pp->state != qs_state_mmio)
- continue;
- if (!(qc->tf.flags & ATA_TFLAG_POLLING))
- handled |= ata_sff_host_intr(ap, qc);
+ struct ata_port *ap = host->ports[port_no];
+ struct qs_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (!qc) {
+ /*
+ * The qstor hardware generates spurious
+ * interrupts from time to time when switching
+ * in and out of packet mode. There's no
+ * obvious way to know if we're here now due
+ * to that, so just ack the irq and pretend we
+ * knew it was ours.. (ugh). This does not
+ * affect packet mode.
+ */
+ ata_sff_check_status(ap);
+ handled = 1;
+ continue;
}
+
+ if (!pp || pp->state != qs_state_mmio)
+ continue;
+ if (!(qc->tf.flags & ATA_TFLAG_POLLING))
+ handled |= ata_sff_port_intr(ap, qc);
}
return handled;
}
@@ -509,11 +487,7 @@ static int qs_port_start(struct ata_port *ap)
void __iomem *mmio_base = qs_mmio_base(ap->host);
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
- int rc;
- rc = ata_port_start(ap);
- if (rc)
- return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3cb69d5fb817..3a4f84219719 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -284,7 +284,7 @@ static void sil_bmdma_setup(struct ata_queued_cmd *qc)
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
- iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS);
+ iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
@@ -311,10 +311,10 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
- struct ata_prd *prd, *last_prd = NULL;
+ struct ata_bmdma_prd *prd, *last_prd = NULL;
unsigned int si;
- prd = &ap->prd[0];
+ prd = &ap->bmdma_prd[0];
for_each_sg(qc->sg, sg, qc->n_elem, si) {
/* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
@@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
goto err_hsm;
/* ack bmdma irq events */
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
/* kick HSM in the ass */
ata_sff_hsm_move(ap, qc, status, 0);
@@ -532,9 +532,6 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
struct ata_port *ap = host->ports[i];
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
- if (unlikely(ap->flags & ATA_FLAG_DISABLED))
- continue;
-
/* turn off SATA_IRQ if not supported */
if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
bmdma2 &= ~SIL_DMA_SATA_IRQ;
@@ -587,7 +584,7 @@ static void sil_thaw(struct ata_port *ap)
/* clear IRQ */
ap->ops->sff_check_status(ap);
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
/* turn on SATA IRQ if supported */
if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 433b6b89c795..e9250514734b 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1160,13 +1160,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++)
if (status & (1 << i)) {
- struct ata_port *ap = host->ports[i];
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- sil24_host_intr(ap);
- handled++;
- } else
- printk(KERN_ERR DRV_NAME
- ": interrupt from disabled port %d\n", i);
+ sil24_host_intr(host->ports[i]);
+ handled++;
}
spin_unlock(&host->lock);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index f8a91bfd66a8..2bfe3ae03976 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
@@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_intx(pdev, 1);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7257f2d5c52c..7d9db4aaf07e 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
- writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
+ writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = readb(mmio + ATA_DMA_CMD);
@@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &k2_sata_sht);
}
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 232468f2ea90..bedd5188e5b0 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -302,11 +302,6 @@ static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
- int rc;
-
- rc = ata_port_start(ap);
- if (rc)
- return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
@@ -840,8 +835,7 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
ap = host->ports[port_no];
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
- if (tmp && ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (tmp && ap) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -927,7 +921,7 @@ static void pdc_error_handler(struct ata_port *ap)
if (!(ap->pflags & ATA_PFLAG_FROZEN))
pdc_reset_port(ap);
- ata_std_error_handler(ap);
+ ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 011e098590d1..b8578c32d344 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -181,9 +181,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ata_pci_bmdma_init(host);
- if (rc)
- return rc;
+ ata_pci_bmdma_init(host);
iomap = host->iomap;
@@ -244,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_intx(pdev, 1);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 08f65492cc81..101d8c219caf 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap)
* certain way. Leave it alone and just clear pending IRQ.
*/
ap->ops->sff_check_status(ap);
- ata_sff_irq_clear(ap);
+ ata_bmdma_irq_clear(ap);
}
/**
@@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
@@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int i, rc;
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
@@ -628,7 +628,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
svia_configure(pdev);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &svia_sht);
}
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 8b2a278b2547..b777176ff494 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
- handled = ata_sff_host_intr(ap, qc);
+ handled = ata_bmdma_port_intr(ap, qc);
/* We received an interrupt during a polled command,
* or some other spurious condition. Interrupt reporting
@@ -284,14 +284,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
for (i = 0; i < host->n_ports; i++) {
u8 port_status = (status >> (8 * i)) & 0xff;
if (port_status) {
- struct ata_port *ap = host->ports[i];
-
- if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
- vsc_port_intr(port_status, ap);
- handled++;
- } else
- dev_printk(KERN_ERR, host->dev,
- "interrupt from disabled port %d\n", i);
+ vsc_port_intr(port_status, host->ports[i]);
+ handled++;
}
}
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 191b85e857e0..f1a0a00b3b07 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -394,6 +394,7 @@ config ATM_HE_USE_SUNI
config ATM_SOLOS
tristate "Solos ADSL2+ PCI Multiport card driver"
depends on PCI
+ select FW_LOADER
help
Support for the Solos multiport ADSL2+ card.
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b86712167eb8..b9101818b47b 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -68,7 +68,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
*(struct atm_vcc **) &new_msg->vcc = vcc;
old_test = test_bit(flag,&vcc->flags);
out_vcc->push(out_vcc,skb);
- add_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ add_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
while (test_bit(flag,&vcc->flags) == old_test) {
mb();
out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL;
@@ -80,7 +80,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
schedule();
}
set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_atm(vcc)->sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
return error;
}
@@ -105,7 +105,7 @@ static int atmtcp_recv_control(const struct atmtcp_control *msg)
msg->type);
return -EINVAL;
}
- wake_up(sk_atm(vcc)->sk_sleep);
+ wake_up(sk_sleep(sk_atm(vcc)));
return 0;
}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 719ec5a0dca5..90a5a7cac740 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1131,7 +1131,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
if (i == -1)
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb->data,
- skb->len - skb->data_len);
+ skb_headlen(skb));
else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f7d6ebaa0418..da8f176c051e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -789,7 +789,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e)
fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
/* get the supported DVMA burst sizes */
- bursts = of_getintprop_default(op->node->parent, "burst-sizes", 0x00);
+ bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
if (sbus_can_dma_64bit())
sbus_set_sbus64(&op->dev, bursts);
@@ -820,18 +820,20 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_
const u8 *prop;
int len;
- prop = of_get_property(op->node, "madaddrlo2", &len);
+ prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
if (!prop)
return -ENODEV;
memcpy(&prom->mac_addr[4], prop, 4);
- prop = of_get_property(op->node, "madaddrhi4", &len);
+ prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
if (!prop)
return -ENODEV;
memcpy(&prom->mac_addr[2], prop, 4);
- prom->serial_number = of_getintprop_default(op->node, "serialnumber", 0);
- prom->hw_revision = of_getintprop_default(op->node, "promversion", 0);
+ prom->serial_number = of_getintprop_default(op->dev.of_node,
+ "serialnumber", 0);
+ prom->hw_revision = of_getintprop_default(op->dev.of_node,
+ "promversion", 0);
return 0;
}
@@ -841,10 +843,10 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
struct of_device *op = fore200e->bus_dev;
const struct linux_prom_registers *regs;
- regs = of_get_property(op->node, "reg", NULL);
+ regs = of_get_property(op->dev.of_node, "reg", NULL);
return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
- (regs ? regs->which_io : 0), op->node->name);
+ (regs ? regs->which_io : 0), op->dev.of_node->name);
}
#endif /* CONFIG_SBUS */
@@ -2693,8 +2695,11 @@ static const struct of_device_id fore200e_sba_match[] = {
MODULE_DEVICE_TABLE(of, fore200e_sba_match);
static struct of_platform_driver fore200e_sba_driver = {
- .name = "fore_200e",
- .match_table = fore200e_sba_match,
+ .driver = {
+ .name = "fore_200e",
+ .owner = THIS_MODULE,
+ .of_match_table = fore200e_sba_match,
+ },
.probe = fore200e_sba_probe,
.remove = __devexit_p(fore200e_sba_remove),
};
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c213e0da0343..56c2e99e458f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2664,8 +2664,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
#ifdef USE_SCATTERGATHER
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
- tpd->iovec[slot].len = skb->len - skb->data_len;
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ tpd->iovec[slot].len = skb_headlen(skb);
++slot;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 3fecfb446d90..5ad3bad2b0a5 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -37,7 +37,7 @@
#define CFAG12864BFB_NAME "cfag12864bfb"
-static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
+static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
.id = "cfag12864b",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -48,7 +48,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo cfag12864bfb_var __initdata = {
+static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = {
.xres = CFAG12864B_WIDTH,
.yres = CFAG12864B_HEIGHT,
.xres_virtual = CFAG12864B_WIDTH,
@@ -114,7 +114,7 @@ none:
return ret;
}
-static int cfag12864bfb_remove(struct platform_device *device)
+static int __devexit cfag12864bfb_remove(struct platform_device *device)
{
struct fb_info *info = platform_get_drvdata(device);
@@ -128,7 +128,7 @@ static int cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove = cfag12864bfb_remove,
+ .remove = __devexit_p(cfag12864bfb_remove),
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd52c48ee762..ef38aff737eb 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -18,9 +18,9 @@ config UEVENT_HELPER_PATH
config DEVTMPFS
bool "Maintain a devtmpfs filesystem to mount at /dev"
- depends on HOTPLUG && SHMEM && TMPFS
+ depends on HOTPLUG
help
- This creates a tmpfs filesystem instance early at bootup.
+ This creates a tmpfs/ramfs filesystem instance early at bootup.
In this filesystem, the kernel driver core maintains device
nodes with their default names and permissions for all
registered devices with an assigned major/minor number.
@@ -33,6 +33,9 @@ config DEVTMPFS
functional /dev without any further help. It also allows simple
rescue systems, and reliably handles dynamic major/minor numbers.
+ Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
+ file system will be used instead.
+
config DEVTMPFS_MOUNT
bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
depends on DEVTMPFS
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 9c6a0d6408e7..8e231d05b400 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -63,6 +63,14 @@ static void class_release(struct kobject *kobj)
kfree(cp);
}
+static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
+{
+ struct class_private *cp = to_class(kobj);
+ struct class *class = cp->class;
+
+ return class->ns_type;
+}
+
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
@@ -71,6 +79,7 @@ static const struct sysfs_ops class_sysfs_ops = {
static struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
+ .child_ns_type = class_child_ns_type,
};
/* Hotplug events for classes go to the class class_subsys */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b56a0ba31d4a..9630fbdf4e6c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -20,7 +20,6 @@
#include <linux/notifier.h>
#include <linux/genhd.h>
#include <linux/kallsyms.h>
-#include <linux/semaphore.h>
#include <linux/mutex.h>
#include <linux/async.h>
@@ -132,9 +131,21 @@ static void device_release(struct kobject *kobj)
kfree(p);
}
+static const void *device_namespace(struct kobject *kobj)
+{
+ struct device *dev = to_dev(kobj);
+ const void *ns = NULL;
+
+ if (dev->class && dev->class->ns_type)
+ ns = dev->class->namespace(dev);
+
+ return ns;
+}
+
static struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
+ .namespace = device_namespace,
};
@@ -559,10 +570,10 @@ void device_initialize(struct device *dev)
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mutex);
+ lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
- device_init_wakeup(dev, 0);
device_pm_init(dev);
set_dev_node(dev, -1);
}
@@ -596,11 +607,59 @@ static struct kobject *virtual_device_parent(struct device *dev)
return virtual_dir;
}
-static struct kobject *get_device_parent(struct device *dev,
- struct device *parent)
+struct class_dir {
+ struct kobject kobj;
+ struct class *class;
+};
+
+#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
+
+static void class_dir_release(struct kobject *kobj)
+{
+ struct class_dir *dir = to_class_dir(kobj);
+ kfree(dir);
+}
+
+static const
+struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
{
+ struct class_dir *dir = to_class_dir(kobj);
+ return dir->class->ns_type;
+}
+
+static struct kobj_type class_dir_ktype = {
+ .release = class_dir_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .child_ns_type = class_dir_child_ns_type
+};
+
+static struct kobject *
+class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+{
+ struct class_dir *dir;
int retval;
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ dir->class = class;
+ kobject_init(&dir->kobj, &class_dir_ktype);
+
+ dir->kobj.kset = &class->p->class_dirs;
+
+ retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+ if (retval < 0) {
+ kobject_put(&dir->kobj);
+ return NULL;
+ }
+ return &dir->kobj;
+}
+
+
+static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+{
if (dev->class) {
static DEFINE_MUTEX(gdp_mutex);
struct kobject *kobj = NULL;
@@ -635,18 +694,7 @@ static struct kobject *get_device_parent(struct device *dev,
}
/* or create a new class-directory at the parent device */
- k = kobject_create();
- if (!k) {
- mutex_unlock(&gdp_mutex);
- return NULL;
- }
- k->kset = &dev->class->p->class_dirs;
- retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
- if (retval < 0) {
- mutex_unlock(&gdp_mutex);
- kobject_put(k);
- return NULL;
- }
+ k = class_dir_create_and_add(dev->class, parent_kobj);
/* do not emit an uevent for this simple "glue" directory */
mutex_unlock(&gdp_mutex);
return k;
@@ -738,7 +786,7 @@ out_device:
out_busid:
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
dev_name(dev));
#else
/* link in the class directory pointing to the device */
@@ -756,7 +804,7 @@ out_busid:
return 0;
out_busid:
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
#endif
out_subsys:
@@ -784,13 +832,13 @@ static void device_remove_class_symlinks(struct device *dev)
if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
device_is_not_partition(dev))
- sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj,
dev_name(dev));
#else
if (dev->parent && device_is_not_partition(dev))
sysfs_remove_link(&dev->kobj, "device");
- sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+ sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
#endif
sysfs_remove_link(&dev->kobj, "subsystem");
@@ -1372,7 +1420,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
return ERR_PTR(err);
}
-#ifdef CONFIG_MODULE /* gotta find a "cleaner" way to do this */
+#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
if (owner) {
struct module_kobject *mk = &owner->mkobj;
@@ -1576,6 +1624,14 @@ int device_rename(struct device *dev, char *new_name)
goto out;
}
+#ifndef CONFIG_SYSFS_DEPRECATED
+ if (dev->class) {
+ error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
+ &dev->kobj, old_device_name, new_name);
+ if (error)
+ goto out;
+ }
+#endif
error = kobject_rename(&dev->kobj, new_name);
if (error)
goto out;
@@ -1590,11 +1646,6 @@ int device_rename(struct device *dev, char *new_name)
new_class_name);
}
}
-#else
- if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
- &dev->kobj, old_device_name, new_name);
- }
#endif
out:
@@ -1735,10 +1786,25 @@ EXPORT_SYMBOL_GPL(device_move);
*/
void device_shutdown(void)
{
- struct device *dev, *devn;
+ struct device *dev;
+
+ spin_lock(&devices_kset->list_lock);
+ /*
+ * Walk the devices list backward, shutting down each in turn.
+ * Beware that device unplug events may also start pulling
+ * devices offline, even as the system is shutting down.
+ */
+ while (!list_empty(&devices_kset->list)) {
+ dev = list_entry(devices_kset->list.prev, struct device,
+ kobj.entry);
+ get_device(dev);
+ /*
+ * Make sure the device is off the kset list, in the
+ * event that dev->*->shutdown() doesn't remove it.
+ */
+ list_del_init(&dev->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
- list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
- kobj.entry) {
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
dev->bus->shutdown(dev);
@@ -1746,6 +1812,10 @@ void device_shutdown(void)
dev_dbg(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
+ put_device(dev);
+
+ spin_lock(&devices_kset->list_lock);
}
+ spin_unlock(&devices_kset->list_lock);
async_synchronize_full();
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f35719aab3c1..251acea3d359 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
- cpumask_complement(offline, cpu_online_mask);
+ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
n = cpulist_scnprintf(buf, len, offline);
free_cpumask_var(offline);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c89291f8a16b..503c2620bbcc 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,11 +40,11 @@ static void driver_bound(struct device *dev)
pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
__func__, dev->driver->name);
+ klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
-
- klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
}
static int driver_sysfs_add(struct device *dev)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 057cf11326bf..af0600143d1c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -20,6 +20,7 @@
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
+#include <linux/ramfs.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/init_task.h>
@@ -45,7 +46,11 @@ __setup("devtmpfs.mount=", mount_param);
static int dev_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
+#ifdef CONFIG_TMPFS
return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+#else
+ return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt);
+#endif
}
static struct file_system_type dev_fs_type = {
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 985da11174e7..3f093b0dd217 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -27,6 +27,52 @@ MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
MODULE_LICENSE("GPL");
+/* Builtin firmware support */
+
+#ifdef CONFIG_FW_LOADER
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+ if (strcmp(name, b_fw->name) == 0) {
+ fw->size = b_fw->size;
+ fw->data = b_fw->data;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+ if (fw->data == b_fw->data)
+ return true;
+
+ return false;
+}
+
+#else /* Module case - no builtin firmware support */
+
+static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+{
+ return false;
+}
+
+static inline bool fw_is_builtin_firmware(const struct firmware *fw)
+{
+ return false;
+}
+#endif
+
enum {
FW_STATUS_LOADING,
FW_STATUS_DONE,
@@ -40,7 +86,6 @@ static int loading_timeout = 60; /* In seconds */
static DEFINE_MUTEX(fw_lock);
struct firmware_priv {
- char *fw_id;
struct completion completion;
struct bin_attribute attr_data;
struct firmware *fw;
@@ -48,18 +93,11 @@ struct firmware_priv {
struct page **pages;
int nr_pages;
int page_array_size;
- const char *vdata;
struct timer_list timeout;
+ bool nowait;
+ char fw_id[];
};
-#ifdef CONFIG_FW_LOADER
-extern struct builtin_fw __start_builtin_fw[];
-extern struct builtin_fw __end_builtin_fw[];
-#else /* Module case. Avoid ifdefs later; it'll all optimise out */
-static struct builtin_fw *__start_builtin_fw;
-static struct builtin_fw *__end_builtin_fw;
-#endif
-
static void
fw_load_abort(struct firmware_priv *fw_priv)
{
@@ -100,9 +138,25 @@ firmware_timeout_store(struct class *class,
return count;
}
-static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store);
+static struct class_attribute firmware_class_attrs[] = {
+ __ATTR(timeout, S_IWUSR | S_IRUGO,
+ firmware_timeout_show, firmware_timeout_store),
+ __ATTR_NULL
+};
+
+static void fw_dev_release(struct device *dev)
+{
+ struct firmware_priv *fw_priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kfree(fw_priv->pages);
+ kfree(fw_priv);
+ kfree(dev);
-static void fw_dev_release(struct device *dev);
+ module_put(THIS_MODULE);
+}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -112,12 +166,15 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait))
+ return -ENOMEM;
return 0;
}
static struct class firmware_class = {
.name = "firmware",
+ .class_attrs = firmware_class_attrs,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -130,6 +187,17 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
+static void firmware_free_data(const struct firmware *fw)
+{
+ int i;
+ vunmap(fw->data);
+ if (fw->pages) {
+ for (i = 0; i < PFN_UP(fw->size); i++)
+ __free_page(fw->pages[i]);
+ kfree(fw->pages);
+ }
+}
+
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -162,21 +230,21 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_unlock(&fw_lock);
break;
}
- vfree(fw_priv->fw->data);
- fw_priv->fw->data = NULL;
+ firmware_free_data(fw_priv->fw);
+ memset(fw_priv->fw, 0, sizeof(struct firmware));
+ /* If the pages are not owned by 'struct firmware' */
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kfree(fw_priv->pages);
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
- fw_priv->fw->size = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
- vfree(fw_priv->fw->data);
+ vunmap(fw_priv->fw->data);
fw_priv->fw->data = vmap(fw_priv->pages,
fw_priv->nr_pages,
0, PAGE_KERNEL_RO);
@@ -184,7 +252,10 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: vmap() failed\n", __func__);
goto err;
}
- /* Pages will be freed by vfree() */
+ /* Pages are now owned by 'struct firmware' */
+ fw_priv->fw->pages = fw_priv->pages;
+ fw_priv->pages = NULL;
+
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
complete(&fw_priv->completion);
@@ -207,8 +278,9 @@ static ssize_t firmware_loading_store(struct device *dev,
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
static ssize_t
-firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer, loff_t offset,
+ size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -291,6 +363,7 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
/**
* firmware_data_write - write method for firmware
+ * @filp: open sysfs file
* @kobj: kobject for the device
* @bin_attr: bin_attr structure
* @buffer: buffer being written
@@ -301,8 +374,9 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
* the driver as a firmware image.
**/
static ssize_t
-firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
- char *buffer, loff_t offset, size_t count)
+firmware_data_write(struct file* filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
+ loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
struct firmware_priv *fw_priv = dev_get_drvdata(dev);
@@ -353,21 +427,6 @@ static struct bin_attribute firmware_attr_data_tmpl = {
.write = firmware_data_write,
};
-static void fw_dev_release(struct device *dev)
-{
- struct firmware_priv *fw_priv = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
- kfree(fw_priv->fw_id);
- kfree(fw_priv);
- kfree(dev);
-
- module_put(THIS_MODULE);
-}
-
static void
firmware_class_timeout(u_long data)
{
@@ -379,8 +438,8 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
struct device *device)
{
int retval;
- struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
- GFP_KERNEL);
+ struct firmware_priv *fw_priv =
+ kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
struct device *f_dev = kzalloc(sizeof(*f_dev), GFP_KERNEL);
*dev_p = NULL;
@@ -391,16 +450,9 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
goto error_kfree;
}
+ strcpy(fw_priv->fw_id, fw_name);
init_completion(&fw_priv->completion);
fw_priv->attr_data = firmware_attr_data_tmpl;
- fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
- if (!fw_priv->fw_id) {
- dev_err(device, "%s: Firmware name allocation failed\n",
- __func__);
- retval = -ENOMEM;
- goto error_kfree;
- }
-
fw_priv->timeout.function = firmware_class_timeout;
fw_priv->timeout.data = (u_long) fw_priv;
init_timer(&fw_priv->timeout);
@@ -427,7 +479,7 @@ error_kfree:
static int fw_setup_device(struct firmware *fw, struct device **dev_p,
const char *fw_name, struct device *device,
- int uevent)
+ int uevent, bool nowait)
{
struct device *f_dev;
struct firmware_priv *fw_priv;
@@ -443,6 +495,8 @@ static int fw_setup_device(struct firmware *fw, struct device **dev_p,
fw_priv = dev_get_drvdata(f_dev);
+ fw_priv->nowait = nowait;
+
fw_priv->fw = fw;
sysfs_bin_attr_init(&fw_priv->attr_data);
retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
@@ -470,12 +524,11 @@ out:
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, int uevent)
+ struct device *device, int uevent, bool nowait)
{
struct device *f_dev;
struct firmware_priv *fw_priv;
struct firmware *firmware;
- struct builtin_fw *builtin;
int retval;
if (!firmware_p)
@@ -489,21 +542,16 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (strcmp(name, builtin->name))
- continue;
- dev_info(device, "firmware: using built-in firmware %s\n",
- name);
- firmware->size = builtin->size;
- firmware->data = builtin->data;
+ if (fw_get_builtin_firmware(firmware, name)) {
+ dev_dbg(device, "firmware: using built-in firmware %s\n", name);
return 0;
}
if (uevent)
- dev_info(device, "firmware: requesting %s\n", name);
+ dev_dbg(device, "firmware: requesting %s\n", name);
- retval = fw_setup_device(firmware, &f_dev, name, device, uevent);
+ retval = fw_setup_device(firmware, &f_dev, name, device,
+ uevent, nowait);
if (retval)
goto error_kfree_fw;
@@ -560,26 +608,18 @@ request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
int uevent = 1;
- return _request_firmware(firmware_p, name, device, uevent);
+ return _request_firmware(firmware_p, name, device, uevent, false);
}
/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
-void
-release_firmware(const struct firmware *fw)
+void release_firmware(const struct firmware *fw)
{
- struct builtin_fw *builtin;
-
if (fw) {
- for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
- builtin++) {
- if (fw->data == builtin->data)
- goto free_fw;
- }
- vfree(fw->data);
- free_fw:
+ if (!fw_is_builtin_firmware(fw))
+ firmware_free_data(fw);
kfree(fw);
}
}
@@ -606,7 +646,7 @@ request_firmware_work_func(void *arg)
return 0;
}
ret = _request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent);
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
@@ -670,26 +710,12 @@ request_firmware_nowait(
return 0;
}
-static int __init
-firmware_class_init(void)
+static int __init firmware_class_init(void)
{
- int error;
- error = class_register(&firmware_class);
- if (error) {
- printk(KERN_ERR "%s: class_register failed\n", __func__);
- return error;
- }
- error = class_create_file(&firmware_class, &class_attr_timeout);
- if (error) {
- printk(KERN_ERR "%s: class_create_file failed\n",
- __func__);
- class_unregister(&firmware_class);
- }
- return error;
-
+ return class_register(&firmware_class);
}
-static void __exit
-firmware_class_exit(void)
+
+static void __exit firmware_class_exit(void)
{
class_unregister(&firmware_class);
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index f32f2f9b7be5..db930d3ee312 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -15,12 +15,10 @@ static char *make_driver_name(struct device_driver *drv)
{
char *driver_name;
- driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
- GFP_KERNEL);
+ driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
if (!driver_name)
return NULL;
- sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
return driver_name;
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 057979a19eea..2bdd8a94ec94 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -9,6 +9,7 @@
#include <linux/memory.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
+#include <linux/compaction.h>
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
@@ -246,6 +247,8 @@ int register_node(struct node *node, int num, struct node *parent)
scan_unevictable_register_node(node);
hugetlb_register_node(node);
+
+ compaction_register_node(node);
}
return error;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4b4b565c835f..4d99c8bdfedc 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(platform_device_alloc);
* released.
*/
int platform_device_add_resources(struct platform_device *pdev,
- struct resource *res, unsigned int num)
+ const struct resource *res, unsigned int num)
{
struct resource *r;
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
*/
struct platform_device *platform_device_register_simple(const char *name,
int id,
- struct resource *res,
+ const struct resource *res,
unsigned int num)
{
struct platform_device *pdev;
@@ -735,7 +735,7 @@ static void platform_pm_complete(struct device *dev)
#ifdef CONFIG_SUSPEND
-static int platform_pm_suspend(struct device *dev)
+int __weak platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -753,7 +753,7 @@ static int platform_pm_suspend(struct device *dev)
return ret;
}
-static int platform_pm_suspend_noirq(struct device *dev)
+int __weak platform_pm_suspend_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -769,7 +769,7 @@ static int platform_pm_suspend_noirq(struct device *dev)
return ret;
}
-static int platform_pm_resume(struct device *dev)
+int __weak platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -787,7 +787,7 @@ static int platform_pm_resume(struct device *dev)
return ret;
}
-static int platform_pm_resume_noirq(struct device *dev)
+int __weak platform_pm_resume_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
@@ -967,17 +967,17 @@ static int platform_pm_restore_noirq(struct device *dev)
int __weak platform_pm_runtime_suspend(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_suspend(dev);
};
int __weak platform_pm_runtime_resume(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_resume(dev);
};
int __weak platform_pm_runtime_idle(struct device *dev)
{
- return -ENOSYS;
+ return pm_generic_runtime_idle(dev);
};
#else /* !CONFIG_PM_RUNTIME */
@@ -1254,6 +1254,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
}
if (match) {
+ /*
+ * Set up a sensible init_name to enable
+ * dev_name() and others to be used before the
+ * rest of the driver core is initialized.
+ */
+ if (!match->dev.init_name && slab_is_available()) {
+ if (match->id != -1)
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s.%d",
+ match->name,
+ match->id);
+ else
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s",
+ match->name);
+
+ if (!match->dev.init_name)
+ return -ENOMEM;
+ }
+
if (epdrv->pdrv->probe(match))
pr_warning("%s: unable to probe %s early.\n",
class_str, match->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 626dd147b75f..b0ec0e9f27e9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -229,14 +229,16 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
if (retval) {
dev->power.runtime_status = RPM_ACTIVE;
- pm_runtime_cancel_pending(dev);
-
if (retval == -EAGAIN || retval == -EBUSY) {
- notify = true;
+ if (dev->power.timer_expires == 0)
+ notify = true;
dev->power.runtime_error = 0;
+ } else {
+ pm_runtime_cancel_pending(dev);
}
} else {
dev->power.runtime_status = RPM_SUSPENDED;
+ pm_runtime_deactivate_timer(dev);
if (dev->parent) {
parent = dev->parent;
@@ -659,8 +661,6 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
- else if (dev->power.runtime_status == RPM_SUSPENDING)
- retval = -EINPROGRESS;
else if (atomic_read(&dev->power.usage_count) > 0
|| dev->power.disable_depth > 0)
retval = -EAGAIN;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 86fd9373447e..a4c33bc51257 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/pm_runtime.h>
+#include <asm/atomic.h>
#include "power.h"
/*
@@ -143,7 +144,59 @@ wake_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
-#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_RUNTIME
+
+static ssize_t rtpm_usagecount_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+
+static ssize_t rtpm_children_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
+}
+
+static ssize_t rtpm_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ return sprintf(buf, "disabled & forbidden\n");
+ else if (dev->power.disable_depth)
+ return sprintf(buf, "disabled\n");
+ else if (dev->power.runtime_auto == false)
+ return sprintf(buf, "forbidden\n");
+ return sprintf(buf, "enabled\n");
+}
+
+static ssize_t rtpm_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (dev->power.runtime_error)
+ return sprintf(buf, "error\n");
+ switch (dev->power.runtime_status) {
+ case RPM_SUSPENDED:
+ return sprintf(buf, "suspended\n");
+ case RPM_SUSPENDING:
+ return sprintf(buf, "suspending\n");
+ case RPM_RESUMING:
+ return sprintf(buf, "resuming\n");
+ case RPM_ACTIVE:
+ return sprintf(buf, "active\n");
+ }
+ return -EIO;
+}
+
+static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
+static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
+static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+
+#endif
+
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -170,15 +223,21 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(async, 0644, async_show, async_store);
-#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute * power_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_control.attr,
#endif
&dev_attr_wakeup.attr,
-#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+#ifdef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_async.attr,
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
#endif
NULL,
};
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d00..9fc630ce1ddb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
topology_remove_dev(cpu);
break;
}
- return rc ? NOTIFY_BAD : NOTIFY_OK;
+ return notifier_from_errno(rc);
}
static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 77bfce52e9ca..de277689da61 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -76,6 +76,17 @@ config BLK_DEV_XD
It's pretty unlikely that you have one of these: say N.
+config GDROM
+ tristate "SEGA Dreamcast GD-ROM drive"
+ depends on SH_DREAMCAST
+ help
+ A standard SEGA Dreamcast comes with a modified CD ROM drive called a
+ "GD-ROM" by SEGA to signify it is capable of reading special disks
+ with up to 1 GB of data. This drive will also read standard CD ROM
+ disks. Select this option to access any disks in your GD ROM drive.
+ Most users will want to say "Y" here.
+ You can also build this as a module which will be called gdrom.
+
config PARIDE
tristate "Parallel port IDE device support"
depends on PARPORT_PC
@@ -103,17 +114,6 @@ config PARIDE
"MicroSolutions backpack protocol", "DataStor Commuter protocol"
etc.).
-config GDROM
- tristate "SEGA Dreamcast GD-ROM drive"
- depends on SH_DREAMCAST
- help
- A standard SEGA Dreamcast comes with a modified CD ROM drive called a
- "GD-ROM" by SEGA to signify it is capable of reading special disks
- with up to 1 GB of data. This drive will also read standard CD ROM
- disks. Select this option to access any disks in your GD ROM drive.
- Most users will want to say "Y" here.
- You can also build this as a module which will be called gdrom.
-
source "drivers/block/paride/Kconfig"
config BLK_CPQ_DA
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0182a22c423a..832798aa14f6 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -66,6 +66,7 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
@@ -1696,34 +1697,18 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
return get_disk(unit[drive].gendisk);
}
-static int __init amiga_floppy_init(void)
+static int __init amiga_floppy_probe(struct platform_device *pdev)
{
int i, ret;
- if (!MACH_IS_AMIGA)
- return -ENODEV;
-
- if (!AMIGAHW_PRESENT(AMI_FLOPPY))
- return -ENODEV;
-
if (register_blkdev(FLOPPY_MAJOR,"fd"))
return -EBUSY;
- /*
- * We request DSKPTR, DSKLEN and DSKDATA only, because the other
- * floppy registers are too spreaded over the custom register space
- */
- ret = -EBUSY;
- if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) {
- printk("fd: cannot get floppy registers\n");
- goto out_blkdev;
- }
-
ret = -ENOMEM;
if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
NULL) {
printk("fd: cannot get chip mem buffer\n");
- goto out_memregion;
+ goto out_blkdev;
}
ret = -EBUSY;
@@ -1792,18 +1777,13 @@ out_irq2:
free_irq(IRQ_AMIGA_DSKBLK, NULL);
out_irq:
amiga_chip_free(raw_buf);
-out_memregion:
- release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
out_blkdev:
unregister_blkdev(FLOPPY_MAJOR,"fd");
return ret;
}
-module_init(amiga_floppy_init);
-#ifdef MODULE
-
#if 0 /* not safe to unload */
-void cleanup_module(void)
+static int __exit amiga_floppy_remove(struct platform_device *pdev)
{
int i;
@@ -1820,12 +1800,25 @@ void cleanup_module(void)
custom.dmacon = DMAF_DISK; /* disable DMA */
amiga_chip_free(raw_buf);
blk_cleanup_queue(floppy_queue);
- release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#endif
-#else
+static struct platform_driver amiga_floppy_driver = {
+ .driver = {
+ .name = "amiga-floppy",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_floppy_init(void)
+{
+ return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe);
+}
+
+module_init(amiga_floppy_init);
+
+#ifndef MODULE
static int __init amiga_floppy_setup (char *str)
{
int n;
@@ -1840,3 +1833,5 @@ static int __init amiga_floppy_setup (char *str)
__setup("floppy=", amiga_floppy_setup);
#endif
+
+MODULE_ALIAS("platform:amiga-floppy");
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index eb5ff0531cfb..51ceaee98f9f 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1588,7 +1588,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
c->Request = ioc->Request;
if (ioc->buf_size > 0) {
- int i;
for (i = 0; i < sg_used; i++) {
temp64.val =
pci_map_single(host->pdev, buff[i],
@@ -2434,7 +2433,7 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
/* if it was the last disk, find the new hightest lun */
if (clear_all && recalculate_highest_lun) {
- int i, newhighest = -1;
+ int newhighest = -1;
for (i = 0; i <= h->highest_lun; i++) {
/* if the disk has size > 0, it is available */
if (h->drv[i] && h->drv[i]->heads)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3390716898d5..e3f88d6e1412 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -84,6 +84,9 @@ struct drbd_bitmap {
#define BM_MD_IO_ERROR 1
#define BM_P_VMALLOCED 2
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+ unsigned long e, int val, const enum km_type km);
+
static int bm_is_locked(struct drbd_bitmap *b)
{
return test_bit(BM_LOCKED, &b->bm_flags);
@@ -441,7 +444,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
-int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
+int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long bits, words, owords, obits, *p_addr, *bm;
@@ -516,7 +519,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
obits = b->bm_bits;
growing = bits > obits;
- if (opages)
+ if (opages && growing && set_new_bits)
bm_set_surplus(b);
b->bm_pages = npages;
@@ -526,8 +529,12 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
b->bm_dev_capacity = capacity;
if (growing) {
- bm_memset(b, owords, 0xff, words-owords);
- b->bm_set += bits - obits;
+ if (set_new_bits) {
+ bm_memset(b, owords, 0xff, words-owords);
+ b->bm_set += bits - obits;
+ } else
+ bm_memset(b, owords, 0x00, words-owords);
+
}
if (want < have) {
@@ -773,7 +780,7 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int
/* nothing to do, on disk == in memory */
# define bm_cpu_to_lel(x) ((void)0)
# else
-void bm_cpu_to_lel(struct drbd_bitmap *b)
+static void bm_cpu_to_lel(struct drbd_bitmap *b)
{
/* need to cpu_to_lel all the pages ...
* this may be optimized by using
@@ -1015,7 +1022,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
-int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned long e, int val, const enum km_type km)
{
struct drbd_bitmap *b = mdev->bitmap;
@@ -1053,7 +1060,7 @@ int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
-int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e5e86a781820..e9654c8d5b62 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -132,6 +132,7 @@ enum {
DRBD_FAULT_DT_RA = 6, /* data read ahead */
DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
DRBD_FAULT_AL_EE = 8, /* alloc ee */
+ DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
DRBD_FAULT_MAX,
};
@@ -208,8 +209,11 @@ enum drbd_packets {
P_RS_IS_IN_SYNC = 0x22, /* meta socket */
P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */
+ /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */
+ /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */
+ P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */
- P_MAX_CMD = 0x25,
+ P_MAX_CMD = 0x28,
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
@@ -264,6 +268,7 @@ static inline const char *cmdname(enum drbd_packets cmd)
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
[P_COMPRESSED_BITMAP] = "CBitmap",
+ [P_DELAY_PROBE] = "DelayProbe",
[P_MAX_CMD] = NULL,
};
@@ -481,7 +486,8 @@ struct p_sizes {
u64 u_size; /* user requested size */
u64 c_size; /* current exported size */
u32 max_segment_size; /* Maximal size of a BIO */
- u32 queue_order_type;
+ u16 queue_order_type; /* not yet implemented in DRBD*/
+ u16 dds_flags; /* use enum dds_flags here. */
} __packed;
struct p_state {
@@ -538,6 +544,18 @@ struct p_compressed_bm {
u8 code[0];
} __packed;
+struct p_delay_probe {
+ struct p_header head;
+ u32 seq_num; /* sequence number to match the two probe packets */
+ u32 offset; /* usecs the probe got sent after the reference time point */
+} __packed;
+
+struct delay_probe {
+ struct list_head list;
+ unsigned int seq_num;
+ struct timeval time;
+};
+
/* DCBP: Drbd Compressed Bitmap Packet ... */
static inline enum drbd_bitmap_code
DCBP_get_code(struct p_compressed_bm *p)
@@ -722,22 +740,6 @@ enum epoch_event {
EV_CLEANUP = 32, /* used as flag */
};
-struct drbd_epoch_entry {
- struct drbd_work w;
- struct drbd_conf *mdev;
- struct bio *private_bio;
- struct hlist_node colision;
- sector_t sector;
- unsigned int size;
- struct drbd_epoch *epoch;
-
- /* up to here, the struct layout is identical to drbd_request;
- * we might be able to use that to our advantage... */
-
- unsigned int flags;
- u64 block_id;
-};
-
struct drbd_wq_barrier {
struct drbd_work w;
struct completion done;
@@ -748,17 +750,49 @@ struct digest_info {
void *digest;
};
-/* ee flag bits */
+struct drbd_epoch_entry {
+ struct drbd_work w;
+ struct hlist_node colision;
+ struct drbd_epoch *epoch;
+ struct drbd_conf *mdev;
+ struct page *pages;
+ atomic_t pending_bios;
+ unsigned int size;
+ /* see comments on ee flag bits below */
+ unsigned long flags;
+ sector_t sector;
+ u64 block_id;
+};
+
+/* ee flag bits.
+ * While corresponding bios are in flight, the only modification will be
+ * set_bit WAS_ERROR, which has to be atomic.
+ * If no bios are in flight yet, or all have been completed,
+ * non-atomic modification to ee->flags is ok.
+ */
enum {
__EE_CALL_AL_COMPLETE_IO,
- __EE_CONFLICT_PENDING,
__EE_MAY_SET_IN_SYNC,
+
+ /* This epoch entry closes an epoch using a barrier.
+ * On sucessful completion, the epoch is released,
+ * and the P_BARRIER_ACK send. */
__EE_IS_BARRIER,
+
+ /* In case a barrier failed,
+ * we need to resubmit without the barrier flag. */
+ __EE_RESUBMITTED,
+
+ /* we may have several bios per epoch entry.
+ * if any of those fail, we set this flag atomically
+ * from the endio callback */
+ __EE_WAS_ERROR,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
-#define EE_CONFLICT_PENDING (1<<__EE_CONFLICT_PENDING)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
+#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
+#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
/* global flag bits */
enum {
@@ -908,9 +942,12 @@ struct drbd_conf {
unsigned int ko_count;
struct drbd_work resync_work,
unplug_work,
- md_sync_work;
+ md_sync_work,
+ delay_probe_work,
+ uuid_work;
struct timer_list resync_timer;
struct timer_list md_sync_timer;
+ struct timer_list delay_probe_timer;
/* Used after attach while negotiating new disk state. */
union drbd_state new_state_tmp;
@@ -1026,6 +1063,13 @@ struct drbd_conf {
u64 ed_uuid; /* UUID of the exposed data */
struct mutex state_mutex;
char congestion_reason; /* Why we where congested... */
+ struct list_head delay_probes; /* protected by peer_seq_lock */
+ int data_delay; /* Delay of packets on the data-sock behind meta-sock */
+ unsigned int delay_seq; /* To generate sequence numbers of delay probes */
+ struct timeval dps_time; /* delay-probes-start-time */
+ unsigned int dp_volume_last; /* send_cnt of last delay probe */
+ int c_sync_rate; /* current resync rate after delay_probe magic */
+ atomic_t new_c_uuid;
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1081,6 +1125,11 @@ enum chg_state_flags {
CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE,
};
+enum dds_flags {
+ DDSF_FORCED = 1,
+ DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
+};
+
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
union drbd_state mask, union drbd_state val);
@@ -1113,7 +1162,7 @@ extern int drbd_send_protocol(struct drbd_conf *mdev);
extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
-extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply);
+extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
extern int _drbd_send_state(struct drbd_conf *mdev);
extern int drbd_send_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
@@ -1311,7 +1360,7 @@ struct bm_extent {
#define APP_R_HSIZE 15
extern int drbd_bm_init(struct drbd_conf *mdev);
-extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors);
+extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
extern void drbd_bm_cleanup(struct drbd_conf *mdev);
extern void drbd_bm_set_all(struct drbd_conf *mdev);
extern void drbd_bm_clear_all(struct drbd_conf *mdev);
@@ -1383,7 +1432,7 @@ extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
@@ -1414,7 +1463,8 @@ static inline void ov_oos_print(struct drbd_conf *mdev)
}
-extern void drbd_csum(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
/* worker callbacks */
extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
@@ -1438,6 +1488,8 @@ extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
extern void resync_timer_fn(unsigned long data);
/* drbd_receiver.c */
+extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+ const unsigned rw, const int fault_type);
extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
u64 id,
@@ -1593,6 +1645,41 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
* inline helper functions
*************************/
+/* see also page_chain_add and friends in drbd_receiver.c */
+static inline struct page *page_chain_next(struct page *page)
+{
+ return (struct page *)page_private(page);
+}
+#define page_chain_for_each(page) \
+ for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
+ page = page_chain_next(page))
+#define page_chain_for_each_safe(page, n) \
+ for (; page && ({ n = page_chain_next(page); 1; }); page = n)
+
+static inline int drbd_bio_has_active_page(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ int i;
+
+ __bio_for_each_segment(bvec, bio, i, 0) {
+ if (page_count(bvec->bv_page) > 1)
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+{
+ struct page *page = e->pages;
+ page_chain_for_each(page) {
+ if (page_count(page) > 1)
+ return 1;
+ }
+ return 0;
+}
+
+
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
@@ -2132,13 +2219,15 @@ static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
return 0;
if (test_bit(BITMAP_IO, &mdev->flags))
return 0;
+ if (atomic_read(&mdev->new_c_uuid))
+ return 0;
return 1;
}
/* I'd like to use wait_event_lock_irq,
* but I'm not sure when it got introduced,
* and not sure when it has 3 or 4 arguments */
-static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
+static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
{
/* compare with after_state_ch,
* os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
@@ -2152,6 +2241,9 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
* to avoid races with the reconnect code,
* we need to atomic_inc within the spinlock. */
+ if (atomic_read(&mdev->new_c_uuid) && atomic_add_unless(&mdev->new_c_uuid, -1, 1))
+ drbd_queue_work_front(&mdev->data.work, &mdev->uuid_work);
+
spin_lock_irq(&mdev->req_lock);
while (!__inc_ap_bio_cond(mdev)) {
prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
@@ -2160,7 +2252,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
finish_wait(&mdev->misc_wait, &wait);
spin_lock_irq(&mdev->req_lock);
}
- atomic_add(one_or_two, &mdev->ap_bio_cnt);
+ atomic_add(count, &mdev->ap_bio_cnt);
spin_unlock_irq(&mdev->req_lock);
}
@@ -2251,7 +2343,8 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
if (test_bit(MD_NO_BARRIER, &mdev->flags))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL,
+ BLKDEV_IFL_WAIT);
if (r) {
set_bit(MD_NO_BARRIER, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 93d1f9b469d4..be2d2da9cdba 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -684,6 +684,9 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
rv = SS_NO_REMOTE_DISK;
+ else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+ rv = SS_NO_UP_TO_DATE_DISK;
+
else if ((ns.conn == C_CONNECTED ||
ns.conn == C_WF_BITMAP_S ||
ns.conn == C_SYNC_SOURCE ||
@@ -840,7 +843,12 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
break;
case C_WF_BITMAP_S:
case C_PAUSED_SYNC_S:
- ns.pdsk = D_OUTDATED;
+ /* remap any consistent state to D_OUTDATED,
+ * but disallow "upgrade" of not even consistent states.
+ */
+ ns.pdsk =
+ (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
+ ? os.pdsk : D_OUTDATED;
break;
case C_SYNC_SOURCE:
ns.pdsk = D_INCONSISTENT;
@@ -1205,21 +1213,20 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
&& (ns.pdsk < D_INCONSISTENT ||
ns.pdsk == D_UNKNOWN ||
ns.pdsk == D_OUTDATED)) {
- kfree(mdev->p_uuid);
- mdev->p_uuid = NULL;
if (get_ldev(mdev)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
- mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
- drbd_uuid_new_current(mdev);
- drbd_send_uuids(mdev);
- }
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE &&
+ !atomic_read(&mdev->new_c_uuid))
+ atomic_set(&mdev->new_c_uuid, 2);
put_ldev(mdev);
}
}
if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
- drbd_uuid_new_current(mdev);
+ /* Diskless peer becomes primary or got connected do diskless, primary peer. */
+ if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0 &&
+ !atomic_read(&mdev->new_c_uuid))
+ atomic_set(&mdev->new_c_uuid, 2);
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
@@ -1232,7 +1239,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
- drbd_send_sizes(mdev, 0); /* to start sync... */
+ drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
drbd_send_state(mdev);
}
@@ -1343,6 +1350,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
drbd_md_sync(mdev);
}
+static int w_new_current_uuid(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+ if (get_ldev(mdev)) {
+ if (mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+ drbd_uuid_new_current(mdev);
+ if (get_net_conf(mdev)) {
+ drbd_send_uuids(mdev);
+ put_net_conf(mdev);
+ }
+ drbd_md_sync(mdev);
+ }
+ put_ldev(mdev);
+ }
+ atomic_dec(&mdev->new_c_uuid);
+ wake_up(&mdev->misc_wait);
+
+ return 1;
+}
static int drbd_thread_setup(void *arg)
{
@@ -1755,7 +1780,7 @@ int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
(struct p_header *)&p, sizeof(p));
}
-int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
+int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
{
struct p_sizes p;
sector_t d_size, u_size;
@@ -1767,7 +1792,6 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
d_size = drbd_get_max_capacity(mdev->ldev);
u_size = mdev->ldev->dc.disk_size;
q_order_type = drbd_queue_order_type(mdev);
- p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev));
put_ldev(mdev);
} else {
d_size = 0;
@@ -1779,7 +1803,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
- p.queue_order_type = cpu_to_be32(q_order_type);
+ p.queue_order_type = cpu_to_be16(q_order_type);
+ p.dds_flags = cpu_to_be16(flags);
ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
(struct p_header *)&p, sizeof(p));
@@ -2180,6 +2205,43 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
return ok;
}
+static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
+{
+ struct p_delay_probe dp;
+ int offset, ok = 0;
+ struct timeval now;
+
+ mutex_lock(&ds->mutex);
+ if (likely(ds->socket)) {
+ do_gettimeofday(&now);
+ offset = now.tv_usec - mdev->dps_time.tv_usec +
+ (now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
+ dp.seq_num = cpu_to_be32(mdev->delay_seq);
+ dp.offset = cpu_to_be32(offset);
+
+ ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
+ (struct p_header *)&dp, sizeof(dp), 0);
+ }
+ mutex_unlock(&ds->mutex);
+
+ return ok;
+}
+
+static int drbd_send_delay_probes(struct drbd_conf *mdev)
+{
+ int ok;
+
+ mdev->delay_seq++;
+ do_gettimeofday(&mdev->dps_time);
+ ok = drbd_send_delay_probe(mdev, &mdev->meta);
+ ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
+
+ mdev->dp_volume_last = mdev->send_cnt;
+ mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
+
+ return ok;
+}
+
/* called on sndtimeo
* returns FALSE if we should retry,
* TRUE if we think connection is dead
@@ -2309,6 +2371,44 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
return 1;
}
+static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+{
+ struct page *page = e->pages;
+ unsigned len = e->size;
+ page_chain_for_each(page) {
+ unsigned l = min_t(unsigned, len, PAGE_SIZE);
+ if (!_drbd_send_page(mdev, page, 0, l))
+ return 0;
+ len -= l;
+ }
+ return 1;
+}
+
+static void consider_delay_probes(struct drbd_conf *mdev)
+{
+ if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
+ return;
+
+ if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
+ drbd_send_delay_probes(mdev);
+}
+
+static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+ if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
+ drbd_send_delay_probes(mdev);
+
+ return 1;
+}
+
+static void delay_probe_timer_fn(unsigned long data)
+{
+ struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+ if (list_empty(&mdev->delay_probe_work.list))
+ drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
+}
+
/* Used to send write requests
* R_PRIMARY -> Peer (P_DATA)
*/
@@ -2360,7 +2460,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
if (ok && dgs) {
dgb = mdev->int_dig_out;
- drbd_csum(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
+ drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
}
if (ok) {
@@ -2371,6 +2471,10 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
}
drbd_put_data_sock(mdev);
+
+ if (ok)
+ consider_delay_probes(mdev);
+
return ok;
}
@@ -2409,13 +2513,17 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
sizeof(p), MSG_MORE);
if (ok && dgs) {
dgb = mdev->int_dig_out;
- drbd_csum(mdev, mdev->integrity_w_tfm, e->private_bio, dgb);
+ drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
}
if (ok)
- ok = _drbd_send_zc_bio(mdev, e->private_bio);
+ ok = _drbd_send_zc_ee(mdev, e);
drbd_put_data_sock(mdev);
+
+ if (ok)
+ consider_delay_probes(mdev);
+
return ok;
}
@@ -2600,6 +2708,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->net_cnt, 0);
atomic_set(&mdev->packet_seq, 0);
atomic_set(&mdev->pp_in_use, 0);
+ atomic_set(&mdev->new_c_uuid, 0);
mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->data.mutex);
@@ -2628,16 +2737,26 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
INIT_LIST_HEAD(&mdev->unplug_work.list);
INIT_LIST_HEAD(&mdev->md_sync_work.list);
INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
+ INIT_LIST_HEAD(&mdev->delay_probes);
+ INIT_LIST_HEAD(&mdev->delay_probe_work.list);
+ INIT_LIST_HEAD(&mdev->uuid_work.list);
+
mdev->resync_work.cb = w_resync_inactive;
mdev->unplug_work.cb = w_send_write_hint;
mdev->md_sync_work.cb = w_md_sync;
mdev->bm_io_work.w.cb = w_bitmap_io;
+ mdev->delay_probe_work.cb = w_delay_probes;
+ mdev->uuid_work.cb = w_new_current_uuid;
init_timer(&mdev->resync_timer);
init_timer(&mdev->md_sync_timer);
+ init_timer(&mdev->delay_probe_timer);
mdev->resync_timer.function = resync_timer_fn;
mdev->resync_timer.data = (unsigned long) mdev;
mdev->md_sync_timer.function = md_sync_timer_fn;
mdev->md_sync_timer.data = (unsigned long) mdev;
+ mdev->delay_probe_timer.function = delay_probe_timer_fn;
+ mdev->delay_probe_timer.data = (unsigned long) mdev;
+
init_waitqueue_head(&mdev->misc_wait);
init_waitqueue_head(&mdev->state_wait);
@@ -2680,7 +2799,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
drbd_set_my_capacity(mdev, 0);
if (mdev->bitmap) {
/* maybe never allocated. */
- drbd_bm_resize(mdev, 0);
+ drbd_bm_resize(mdev, 0, 1);
drbd_bm_cleanup(mdev);
}
@@ -3129,7 +3248,7 @@ int __init drbd_init(void)
if (err)
goto Enomem;
- drbd_proc = proc_create("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops);
+ drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
if (!drbd_proc) {
printk(KERN_ERR "drbd: unable to register proc file\n");
goto Enomem;
@@ -3660,7 +3779,8 @@ _drbd_fault_str(unsigned int type) {
[DRBD_FAULT_DT_RD] = "Data read",
[DRBD_FAULT_DT_RA] = "Data read ahead",
[DRBD_FAULT_BM_ALLOC] = "BM allocation",
- [DRBD_FAULT_AL_EE] = "EE allocation"
+ [DRBD_FAULT_AL_EE] = "EE allocation",
+ [DRBD_FAULT_RECEIVE] = "receive data corruption",
};
return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 6429d2b19e06..632e3245d1bb 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
-enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size;
@@ -541,12 +541,12 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev, force);
+ size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
int err;
- err = drbd_bm_resize(mdev, size);
+ err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
if (unlikely(err)) {
/* currently there is only one error: ENOMEM! */
size = drbd_bm_capacity(mdev)>>1;
@@ -704,9 +704,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
int max_segments = mdev->ldev->dc.max_bio_bvecs;
- if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv)
- max_seg_s = PAGE_SIZE;
-
max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
blk_queue_max_hw_sectors(q, max_seg_s >> 9);
@@ -1199,13 +1196,12 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
/* allocation not in the IO path, cqueue thread context */
- new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+ new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) {
retcode = ERR_NOMEM;
goto fail;
}
- memset(new_conf, 0, sizeof(struct net_conf));
new_conf->timeout = DRBD_TIMEOUT_DEF;
new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
new_conf->ping_int = DRBD_PING_INT_DEF;
@@ -1477,8 +1473,8 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
{
struct resize rs;
int retcode = NO_ERROR;
- int ldsc = 0; /* local disk size changed */
enum determine_dev_size dd;
+ enum dds_flags ddsf;
memset(&rs, 0, sizeof(struct resize));
if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
@@ -1502,13 +1498,17 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
- mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
- ldsc = 1;
+ if (rs.no_resync && mdev->agreed_pro_version < 93) {
+ retcode = ERR_NEED_APV_93;
+ goto fail;
}
+ if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
+ mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+
mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
- dd = drbd_determin_dev_size(mdev, rs.resize_force);
+ ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
+ dd = drbd_determin_dev_size(mdev, ddsf);
drbd_md_sync(mdev);
put_ldev(mdev);
if (dd == dev_size_error) {
@@ -1516,12 +1516,12 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) {
+ if (mdev->state.conn == C_CONNECTED) {
if (dd == grew)
set_bit(RESIZE_PENDING, &mdev->flags);
drbd_send_uuids(mdev);
- drbd_send_sizes(mdev, 1);
+ drbd_send_sizes(mdev, 1, ddsf);
}
fail:
@@ -1551,6 +1551,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
sc.rate = DRBD_RATE_DEF;
sc.after = DRBD_AFTER_DEF;
sc.al_extents = DRBD_AL_EXTENTS_DEF;
+ sc.dp_volume = DRBD_DP_VOLUME_DEF;
+ sc.dp_interval = DRBD_DP_INTERVAL_DEF;
+ sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
+ sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
} else
memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
@@ -2207,9 +2211,9 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
{
struct cn_msg *cn_reply;
struct drbd_nl_cfg_reply *reply;
- struct bio_vec *bvec;
unsigned short *tl;
- int i;
+ struct page *page;
+ unsigned len;
if (!e)
return;
@@ -2247,11 +2251,15 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
put_unaligned(T_ee_data, tl++);
put_unaligned(e->size, tl++);
- __bio_for_each_segment(bvec, e->private_bio, i, 0) {
- void *d = kmap(bvec->bv_page);
- memcpy(tl, d + bvec->bv_offset, bvec->bv_len);
- kunmap(bvec->bv_page);
- tl=(unsigned short*)((char*)tl + bvec->bv_len);
+ len = e->size;
+ page = e->pages;
+ page_chain_for_each(page) {
+ void *d = kmap_atomic(page, KM_USER0);
+ unsigned l = min_t(unsigned, len, PAGE_SIZE);
+ memcpy(tl, d, l);
+ kunmap_atomic(d, KM_USER0);
+ tl = (unsigned short*)((char*)tl + l);
+ len -= l;
}
put_unaligned(TT_END, tl++); /* Close the tag list */
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be3374b68460..d0f1767ea4c3 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -73,14 +73,21 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
/* if more than 1 GB display in MB */
if (mdev->rs_total > 0x100000L)
- seq_printf(seq, "(%lu/%lu)M\n\t",
+ seq_printf(seq, "(%lu/%lu)M",
(unsigned long) Bit2KB(rs_left >> 10),
(unsigned long) Bit2KB(mdev->rs_total >> 10));
else
- seq_printf(seq, "(%lu/%lu)K\n\t",
+ seq_printf(seq, "(%lu/%lu)K",
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(mdev->rs_total));
+ if (mdev->state.conn == C_SYNC_TARGET)
+ seq_printf(seq, " queue_delay: %d.%d ms\n\t",
+ mdev->data_delay / 1000,
+ (mdev->data_delay % 1000) / 100);
+ else if (mdev->state.conn == C_SYNC_SOURCE)
+ seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq);
+
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
* the * 100 / 100 trick are important. We do a +1 to be
@@ -128,6 +135,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
else
seq_printf(seq, " (%ld)", dbdt);
+ if (mdev->state.conn == C_SYNC_TARGET) {
+ if (mdev->c_sync_rate > 1000)
+ seq_printf(seq, " want: %d,%03d",
+ mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
+ else
+ seq_printf(seq, " want: %d", mdev->c_sync_rate);
+ }
+
seq_printf(seq, " K/sec\n");
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 3f096e7959b4..bc9ab7fb2cc7 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -80,30 +80,128 @@ static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epo
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
+/*
+ * some helper functions to deal with single linked page lists,
+ * page->private being our "next" pointer.
+ */
+
+/* If at least n pages are linked at head, get n pages off.
+ * Otherwise, don't modify head, and return NULL.
+ * Locking is the responsibility of the caller.
+ */
+static struct page *page_chain_del(struct page **head, int n)
+{
+ struct page *page;
+ struct page *tmp;
+
+ BUG_ON(!n);
+ BUG_ON(!head);
+
+ page = *head;
+
+ if (!page)
+ return NULL;
+
+ while (page) {
+ tmp = page_chain_next(page);
+ if (--n == 0)
+ break; /* found sufficient pages */
+ if (tmp == NULL)
+ /* insufficient pages, don't use any of them. */
+ return NULL;
+ page = tmp;
+ }
+
+ /* add end of list marker for the returned list */
+ set_page_private(page, 0);
+ /* actual return value, and adjustment of head */
+ page = *head;
+ *head = tmp;
+ return page;
+}
+
+/* may be used outside of locks to find the tail of a (usually short)
+ * "private" page chain, before adding it back to a global chain head
+ * with page_chain_add() under a spinlock. */
+static struct page *page_chain_tail(struct page *page, int *len)
+{
+ struct page *tmp;
+ int i = 1;
+ while ((tmp = page_chain_next(page)))
+ ++i, page = tmp;
+ if (len)
+ *len = i;
+ return page;
+}
+
+static int page_chain_free(struct page *page)
+{
+ struct page *tmp;
+ int i = 0;
+ page_chain_for_each_safe(page, tmp) {
+ put_page(page);
+ ++i;
+ }
+ return i;
+}
+
+static void page_chain_add(struct page **head,
+ struct page *chain_first, struct page *chain_last)
+{
+#if 1
+ struct page *tmp;
+ tmp = page_chain_tail(chain_first, NULL);
+ BUG_ON(tmp != chain_last);
+#endif
+
+ /* add chain to head */
+ set_page_private(chain_last, (unsigned long)*head);
+ *head = chain_first;
+}
+
+static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
{
struct page *page = NULL;
+ struct page *tmp = NULL;
+ int i = 0;
/* Yes, testing drbd_pp_vacant outside the lock is racy.
* So what. It saves a spin_lock. */
- if (drbd_pp_vacant > 0) {
+ if (drbd_pp_vacant >= number) {
spin_lock(&drbd_pp_lock);
- page = drbd_pp_pool;
- if (page) {
- drbd_pp_pool = (struct page *)page_private(page);
- set_page_private(page, 0); /* just to be polite */
- drbd_pp_vacant--;
- }
+ page = page_chain_del(&drbd_pp_pool, number);
+ if (page)
+ drbd_pp_vacant -= number;
spin_unlock(&drbd_pp_lock);
+ if (page)
+ return page;
}
+
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- if (!page)
- page = alloc_page(GFP_TRY);
- if (page)
- atomic_inc(&mdev->pp_in_use);
- return page;
+ for (i = 0; i < number; i++) {
+ tmp = alloc_page(GFP_TRY);
+ if (!tmp)
+ break;
+ set_page_private(tmp, (unsigned long)page);
+ page = tmp;
+ }
+
+ if (i == number)
+ return page;
+
+ /* Not enough pages immediately available this time.
+ * No need to jump around here, drbd_pp_alloc will retry this
+ * function "soon". */
+ if (page) {
+ tmp = page_chain_tail(page, NULL);
+ spin_lock(&drbd_pp_lock);
+ page_chain_add(&drbd_pp_pool, page, tmp);
+ drbd_pp_vacant += i;
+ spin_unlock(&drbd_pp_lock);
+ }
+ return NULL;
}
/* kick lower level device, if we have more than (arbitrary number)
@@ -127,7 +225,7 @@ static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed
list_for_each_safe(le, tle, &mdev->net_ee) {
e = list_entry(le, struct drbd_epoch_entry, w.list);
- if (drbd_bio_has_active_page(e->private_bio))
+ if (drbd_ee_has_active_page(e))
break;
list_move(le, to_be_freed);
}
@@ -148,32 +246,34 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
}
/**
- * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
+ * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
* @mdev: DRBD device.
- * @retry: whether or not to retry allocation forever (or until signalled)
+ * @number: number of pages requested
+ * @retry: whether to retry, if not enough pages are available right now
+ *
+ * Tries to allocate number pages, first from our own page pool, then from
+ * the kernel, unless this allocation would exceed the max_buffers setting.
+ * Possibly retry until DRBD frees sufficient pages somewhere else.
*
- * Tries to allocate a page, first from our own page pool, then from the
- * kernel, unless this allocation would exceed the max_buffers setting.
- * If @retry is non-zero, retry until DRBD frees a page somewhere else.
+ * Returns a page chain linked via page->private.
*/
-static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
+static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
{
struct page *page = NULL;
DEFINE_WAIT(wait);
- if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_page_or_try_alloc(mdev);
- if (page)
- return page;
- }
+ /* Yes, we may run up to @number over max_buffers. If we
+ * follow it strictly, the admin will get it wrong anyways. */
+ if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
+ page = drbd_pp_first_pages_or_try_alloc(mdev, number);
- for (;;) {
+ while (page == NULL) {
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
drbd_kick_lo_and_reclaim_net(mdev);
if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
- page = drbd_pp_first_page_or_try_alloc(mdev);
+ page = drbd_pp_first_pages_or_try_alloc(mdev, number);
if (page)
break;
}
@@ -190,62 +290,32 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
}
finish_wait(&drbd_pp_wait, &wait);
+ if (page)
+ atomic_add(number, &mdev->pp_in_use);
return page;
}
/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
+ * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+ * Either links the page chain back to the global pool,
+ * or returns all pages to the system. */
static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
{
- int free_it;
-
- spin_lock(&drbd_pp_lock);
- if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
- free_it = 1;
- } else {
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- drbd_pp_vacant++;
- free_it = 0;
- }
- spin_unlock(&drbd_pp_lock);
-
- atomic_dec(&mdev->pp_in_use);
-
- if (free_it)
- __free_page(page);
-
- wake_up(&drbd_pp_wait);
-}
-
-static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
-{
- struct page *p_to_be_freed = NULL;
- struct page *page;
- struct bio_vec *bvec;
int i;
-
- spin_lock(&drbd_pp_lock);
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
- set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
- p_to_be_freed = bvec->bv_page;
- } else {
- set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = bvec->bv_page;
- drbd_pp_vacant++;
- }
- }
- spin_unlock(&drbd_pp_lock);
- atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
-
- while (p_to_be_freed) {
- page = p_to_be_freed;
- p_to_be_freed = (struct page *)page_private(page);
- set_page_private(page, 0); /* just to be polite */
- put_page(page);
+ if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
+ i = page_chain_free(page);
+ else {
+ struct page *tmp;
+ tmp = page_chain_tail(page, &i);
+ spin_lock(&drbd_pp_lock);
+ page_chain_add(&drbd_pp_pool, page, tmp);
+ drbd_pp_vacant += i;
+ spin_unlock(&drbd_pp_lock);
}
-
+ atomic_sub(i, &mdev->pp_in_use);
+ i = atomic_read(&mdev->pp_in_use);
+ if (i < 0)
+ dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
wake_up(&drbd_pp_wait);
}
@@ -270,11 +340,9 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
unsigned int data_size,
gfp_t gfp_mask) __must_hold(local)
{
- struct request_queue *q;
struct drbd_epoch_entry *e;
struct page *page;
- struct bio *bio;
- unsigned int ds;
+ unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
return NULL;
@@ -286,84 +354,32 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
return NULL;
}
- bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
- if (!bio) {
- if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
- goto fail1;
- }
-
- bio->bi_bdev = mdev->ldev->backing_bdev;
- bio->bi_sector = sector;
-
- ds = data_size;
- while (ds) {
- page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
- if (!page) {
- if (!(gfp_mask & __GFP_NOWARN))
- dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
- goto fail2;
- }
- if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
- drbd_pp_free(mdev, page);
- dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
- "data_size=%u,ds=%u) failed\n",
- (unsigned long long)sector, data_size, ds);
-
- q = bdev_get_queue(bio->bi_bdev);
- if (q->merge_bvec_fn) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size,
- .bi_rw = bio->bi_rw,
- };
- int l = q->merge_bvec_fn(q, &bvm,
- &bio->bi_io_vec[bio->bi_vcnt]);
- dev_err(DEV, "merge_bvec_fn() = %d\n", l);
- }
-
- /* dump more of the bio. */
- dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
- dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
- dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
- dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
-
- goto fail2;
- break;
- }
- ds -= min_t(int, ds, PAGE_SIZE);
- }
-
- D_ASSERT(data_size == bio->bi_size);
-
- bio->bi_private = e;
- e->mdev = mdev;
- e->sector = sector;
- e->size = bio->bi_size;
+ page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ if (!page)
+ goto fail;
- e->private_bio = bio;
- e->block_id = id;
INIT_HLIST_NODE(&e->colision);
e->epoch = NULL;
+ e->mdev = mdev;
+ e->pages = page;
+ atomic_set(&e->pending_bios, 0);
+ e->size = data_size;
e->flags = 0;
+ e->sector = sector;
+ e->sector = sector;
+ e->block_id = id;
return e;
- fail2:
- drbd_pp_free_bio_pages(mdev, bio);
- bio_put(bio);
- fail1:
+ fail:
mempool_free(e, drbd_ee_mempool);
-
return NULL;
}
void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
- struct bio *bio = e->private_bio;
- drbd_pp_free_bio_pages(mdev, bio);
- bio_put(bio);
+ drbd_pp_free(mdev, e->pages);
+ D_ASSERT(atomic_read(&e->pending_bios) == 0);
D_ASSERT(hlist_unhashed(&e->colision));
mempool_free(e, drbd_ee_mempool);
}
@@ -902,7 +918,7 @@ retry:
if (!drbd_send_protocol(mdev))
return -1;
drbd_send_sync_param(mdev, &mdev->sync_conf);
- drbd_send_sizes(mdev, 0);
+ drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
drbd_send_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
@@ -946,7 +962,8 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
int rv;
if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
+ NULL, BLKDEV_IFL_WAIT);
if (rv) {
dev_err(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
@@ -1120,6 +1137,101 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
}
/**
+ * drbd_submit_ee()
+ * @mdev: DRBD device.
+ * @e: epoch entry
+ * @rw: flag field, see bio->bi_rw
+ */
+/* TODO allocate from our own bio_set. */
+int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
+ const unsigned rw, const int fault_type)
+{
+ struct bio *bios = NULL;
+ struct bio *bio;
+ struct page *page = e->pages;
+ sector_t sector = e->sector;
+ unsigned ds = e->size;
+ unsigned n_bios = 0;
+ unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
+
+ if (atomic_read(&mdev->new_c_uuid)) {
+ if (atomic_add_unless(&mdev->new_c_uuid, -1, 1)) {
+ drbd_uuid_new_current(mdev);
+ drbd_md_sync(mdev);
+
+ atomic_dec(&mdev->new_c_uuid);
+ wake_up(&mdev->misc_wait);
+ }
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->new_c_uuid));
+ }
+
+ /* In most cases, we will only need one bio. But in case the lower
+ * level restrictions happen to be different at this offset on this
+ * side than those of the sending peer, we may need to submit the
+ * request in more than one bio. */
+next_bio:
+ bio = bio_alloc(GFP_NOIO, nr_pages);
+ if (!bio) {
+ dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
+ goto fail;
+ }
+ /* > e->sector, unless this is the first bio */
+ bio->bi_sector = sector;
+ bio->bi_bdev = mdev->ldev->backing_bdev;
+ /* we special case some flags in the multi-bio case, see below
+ * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+ bio->bi_rw = rw;
+ bio->bi_private = e;
+ bio->bi_end_io = drbd_endio_sec;
+
+ bio->bi_next = bios;
+ bios = bio;
+ ++n_bios;
+
+ page_chain_for_each(page) {
+ unsigned len = min_t(unsigned, ds, PAGE_SIZE);
+ if (!bio_add_page(bio, page, len, 0)) {
+ /* a single page must always be possible! */
+ BUG_ON(bio->bi_vcnt == 0);
+ goto next_bio;
+ }
+ ds -= len;
+ sector += len >> 9;
+ --nr_pages;
+ }
+ D_ASSERT(page == NULL);
+ D_ASSERT(ds == 0);
+
+ atomic_set(&e->pending_bios, n_bios);
+ do {
+ bio = bios;
+ bios = bios->bi_next;
+ bio->bi_next = NULL;
+
+ /* strip off BIO_RW_UNPLUG unless it is the last bio */
+ if (bios)
+ bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+
+ drbd_generic_make_request(mdev, fault_type, bio);
+
+ /* strip off BIO_RW_BARRIER,
+ * unless it is the first or last bio */
+ if (bios && bios->bi_next)
+ bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ } while (bios);
+ maybe_kick_lo(mdev);
+ return 0;
+
+fail:
+ while (bios) {
+ bio = bios;
+ bios = bios->bi_next;
+ bio_put(bio);
+ }
+ return -ENOMEM;
+}
+
+/**
* w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
* @mdev: DRBD device.
* @w: work object.
@@ -1128,8 +1240,6 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
{
struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
- struct bio *bio = e->private_bio;
-
/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
(and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
so that we can finish that epoch in drbd_may_finish_epoch().
@@ -1143,33 +1253,17 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
if (previous_epoch(mdev, e->epoch))
dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
- /* prepare bio for re-submit,
- * re-init volatile members */
/* we still have a local reference,
* get_ldev was done in receive_Data. */
- bio->bi_bdev = mdev->ldev->backing_bdev;
- bio->bi_sector = e->sector;
- bio->bi_size = e->size;
- bio->bi_idx = 0;
-
- bio->bi_flags &= ~(BIO_POOL_MASK - 1);
- bio->bi_flags |= 1 << BIO_UPTODATE;
-
- /* don't know whether this is necessary: */
- bio->bi_phys_segments = 0;
- bio->bi_next = NULL;
-
- /* these should be unchanged: */
- /* bio->bi_end_io = drbd_endio_write_sec; */
- /* bio->bi_vcnt = whatever; */
e->w.cb = e_end_block;
-
- /* This is no longer a barrier request. */
- bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
-
- drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
-
+ if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
+ /* drbd_submit_ee fails for one reason only:
+ * if was not able to allocate sufficient bios.
+ * requeue, try again later. */
+ e->w.cb = w_e_reissue;
+ drbd_queue_work(&mdev->data.work, &e->w);
+ }
return 1;
}
@@ -1261,13 +1355,13 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
static struct drbd_epoch_entry *
read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
{
+ const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
struct drbd_epoch_entry *e;
- struct bio_vec *bvec;
struct page *page;
- struct bio *bio;
- int dgs, ds, i, rr;
+ int dgs, ds, rr;
void *dig_in = mdev->int_dig_in;
void *dig_vv = mdev->int_dig_vv;
+ unsigned long *data;
dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
@@ -1286,29 +1380,44 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
ERR_IF(data_size & 0x1ff) return NULL;
ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
+ /* even though we trust out peer,
+ * we sometimes have to double check. */
+ if (sector + (data_size>>9) > capacity) {
+ dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
+ (unsigned long long)capacity,
+ (unsigned long long)sector, data_size);
+ return NULL;
+ }
+
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
if (!e)
return NULL;
- bio = e->private_bio;
+
ds = data_size;
- bio_for_each_segment(bvec, bio, i) {
- page = bvec->bv_page;
- rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
+ page = e->pages;
+ page_chain_for_each(page) {
+ unsigned len = min_t(int, ds, PAGE_SIZE);
+ data = kmap(page);
+ rr = drbd_recv(mdev, data, len);
+ if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
+ dev_err(DEV, "Fault injection: Corrupting data on receive\n");
+ data[0] = data[0] ^ (unsigned long)-1;
+ }
kunmap(page);
- if (rr != min_t(int, ds, PAGE_SIZE)) {
+ if (rr != len) {
drbd_free_ee(mdev, e);
dev_warn(DEV, "short read receiving data: read %d expected %d\n",
- rr, min_t(int, ds, PAGE_SIZE));
+ rr, len);
return NULL;
}
ds -= rr;
}
if (dgs) {
- drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED.\n");
drbd_bcast_ee(mdev, "digest failed",
@@ -1330,7 +1439,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
int rr, rv = 1;
void *data;
- page = drbd_pp_alloc(mdev, 1);
+ if (!data_size)
+ return TRUE;
+
+ page = drbd_pp_alloc(mdev, 1, 1);
data = kmap(page);
while (data_size) {
@@ -1394,7 +1506,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
}
if (dgs) {
- drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
+ drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
return 0;
@@ -1415,7 +1527,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
D_ASSERT(hlist_unhashed(&e->colision));
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
drbd_set_in_sync(mdev, sector, e->size);
ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
} else {
@@ -1434,30 +1546,28 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
struct drbd_epoch_entry *e;
e = read_in_block(mdev, ID_SYNCER, sector, data_size);
- if (!e) {
- put_ldev(mdev);
- return FALSE;
- }
+ if (!e)
+ goto fail;
dec_rs_pending(mdev);
- e->private_bio->bi_end_io = drbd_endio_write_sec;
- e->private_bio->bi_rw = WRITE;
- e->w.cb = e_end_resync_block;
-
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_resync_block()
* respective _drbd_clear_done_ee */
+ e->w.cb = e_end_resync_block;
+
spin_lock_irq(&mdev->req_lock);
list_add(&e->w.list, &mdev->sync_ee);
spin_unlock_irq(&mdev->req_lock);
- drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
- /* accounting done in endio */
+ if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return TRUE;
- maybe_kick_lo(mdev);
- return TRUE;
+ drbd_free_ee(mdev, e);
+fail:
+ put_ldev(mdev);
+ return FALSE;
}
static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
@@ -1552,7 +1662,7 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T &&
e->flags & EE_MAY_SET_IN_SYNC) ?
@@ -1698,7 +1808,6 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- e->private_bio->bi_end_io = drbd_endio_write_sec;
e->w.cb = e_end_block;
spin_lock(&mdev->epoch_lock);
@@ -1894,12 +2003,8 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
drbd_al_begin_io(mdev, e->sector);
}
- e->private_bio->bi_rw = rw;
- drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
- /* accounting done in endio */
-
- maybe_kick_lo(mdev);
- return TRUE;
+ if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
+ return TRUE;
out_interrupted:
/* yes, the epoch_size now is imbalanced.
@@ -1945,7 +2050,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
"no local data.\n");
drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
P_NEG_RS_DREPLY , p);
- return TRUE;
+ return drbd_drain_block(mdev, h->length - brps);
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
@@ -1957,9 +2062,6 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
return FALSE;
}
- e->private_bio->bi_rw = READ;
- e->private_bio->bi_end_io = drbd_endio_read_sec;
-
switch (h->command) {
case P_DATA_REQUEST:
e->w.cb = w_e_end_data_req;
@@ -2053,10 +2155,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
inc_unacked(mdev);
- drbd_generic_make_request(mdev, fault_type, e->private_bio);
- maybe_kick_lo(mdev);
-
- return TRUE;
+ if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
+ return TRUE;
out_free_e:
kfree(di);
@@ -2473,6 +2573,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
hg > 0 ? "source" : "target");
}
+ if (abs(hg) == 100)
+ drbd_khelper(mdev, "initial-split-brain");
+
if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
int pcount = (mdev->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
@@ -2518,7 +2621,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
* after an attempted attach on a diskless node.
* We just refuse to attach -- well, we drop the "connection"
* to that disk, in a way... */
- dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
+ dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
drbd_khelper(mdev, "split-brain");
return C_MASK;
}
@@ -2849,7 +2952,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
unsigned int max_seg_s;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
- enum drbd_conns nconn;
+ enum dds_flags ddsf;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
if (drbd_recv(mdev, h->payload, h->length) != h->length)
@@ -2905,8 +3008,9 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
}
#undef min_not_zero
+ ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(mdev)) {
- dd = drbd_determin_dev_size(mdev, 0);
+ dd = drbd_determin_dev_size(mdev, ddsf);
put_ldev(mdev);
if (dd == dev_size_error)
return FALSE;
@@ -2916,33 +3020,21 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_set_my_capacity(mdev, p_size);
}
- if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
- nconn = drbd_sync_handshake(mdev,
- mdev->state.peer, mdev->state.pdsk);
- put_ldev(mdev);
-
- if (nconn == C_MASK) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
- }
-
- if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return FALSE;
- }
- }
-
if (get_ldev(mdev)) {
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
ldsc = 1;
}
- max_seg_s = be32_to_cpu(p->max_segment_size);
+ if (mdev->agreed_pro_version < 94)
+ max_seg_s = be32_to_cpu(p->max_segment_size);
+ else /* drbd 8.3.8 onwards */
+ max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+
if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
drbd_setup_queue_param(mdev, max_seg_s);
- drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
+ drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
put_ldev(mdev);
}
@@ -2951,14 +3043,17 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
drbd_get_capacity(mdev->this_bdev) || ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
- drbd_send_sizes(mdev, 0);
+ drbd_send_sizes(mdev, 0, ddsf);
}
if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
(dd == grew && mdev->state.conn == C_CONNECTED)) {
if (mdev->state.pdsk >= D_INCONSISTENT &&
- mdev->state.disk >= D_INCONSISTENT)
- resync_after_online_grow(mdev);
- else
+ mdev->state.disk >= D_INCONSISTENT) {
+ if (ddsf & DDSF_NO_RESYNC)
+ dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
+ else
+ resync_after_online_grow(mdev);
+ } else
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
}
}
@@ -3490,6 +3585,92 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
return TRUE;
}
+static void timeval_sub_us(struct timeval* tv, unsigned int us)
+{
+ tv->tv_sec -= us / 1000000;
+ us = us % 1000000;
+ if (tv->tv_usec > us) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+ tv->tv_usec -= us;
+}
+
+static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
+{
+ struct delay_probe *dp;
+ struct list_head *le;
+ struct timeval now;
+ int seq_num;
+ int offset;
+ int data_delay;
+
+ seq_num = be32_to_cpu(p->seq_num);
+ offset = be32_to_cpu(p->offset);
+
+ spin_lock(&mdev->peer_seq_lock);
+ if (!list_empty(&mdev->delay_probes)) {
+ if (from == USE_DATA_SOCKET)
+ le = mdev->delay_probes.next;
+ else
+ le = mdev->delay_probes.prev;
+
+ dp = list_entry(le, struct delay_probe, list);
+
+ if (dp->seq_num == seq_num) {
+ list_del(le);
+ spin_unlock(&mdev->peer_seq_lock);
+ do_gettimeofday(&now);
+ timeval_sub_us(&now, offset);
+ data_delay =
+ now.tv_usec - dp->time.tv_usec +
+ (now.tv_sec - dp->time.tv_sec) * 1000000;
+
+ if (data_delay > 0)
+ mdev->data_delay = data_delay;
+
+ kfree(dp);
+ return;
+ }
+
+ if (dp->seq_num > seq_num) {
+ spin_unlock(&mdev->peer_seq_lock);
+ dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
+ return; /* Do not alloca a struct delay_probe.... */
+ }
+ }
+ spin_unlock(&mdev->peer_seq_lock);
+
+ dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
+ if (!dp) {
+ dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
+ return;
+ }
+
+ dp->seq_num = seq_num;
+ do_gettimeofday(&dp->time);
+ timeval_sub_us(&dp->time, offset);
+
+ spin_lock(&mdev->peer_seq_lock);
+ if (from == USE_DATA_SOCKET)
+ list_add(&dp->list, &mdev->delay_probes);
+ else
+ list_add_tail(&dp->list, &mdev->delay_probes);
+ spin_unlock(&mdev->peer_seq_lock);
+}
+
+static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
+{
+ struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+ ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
+ if (drbd_recv(mdev, h->payload, h->length) != h->length)
+ return FALSE;
+
+ got_delay_probe(mdev, USE_DATA_SOCKET, p);
+ return TRUE;
+}
+
typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
static drbd_cmd_handler_f drbd_default_handler[] = {
@@ -3513,6 +3694,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = {
[P_OV_REQUEST] = receive_DataRequest,
[P_OV_REPLY] = receive_DataRequest,
[P_CSUM_RS_REQUEST] = receive_DataRequest,
+ [P_DELAY_PROBE] = receive_delay_probe,
/* anything missing from this table is in
* the asender_tbl, see get_asender_cmd */
[P_MAX_CMD] = NULL,
@@ -3739,7 +3921,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&mdev->pp_in_use);
if (i)
- dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
+ dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->active_ee));
@@ -4232,7 +4414,6 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
- D_ASSERT(p->block_id == ID_SYNCER);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
@@ -4290,6 +4471,14 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
return TRUE;
}
+static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
+{
+ struct p_delay_probe *p = (struct p_delay_probe *)h;
+
+ got_delay_probe(mdev, USE_META_SOCKET, p);
+ return TRUE;
+}
+
struct asender_cmd {
size_t pkt_size;
int (*process)(struct drbd_conf *mdev, struct p_header *h);
@@ -4314,6 +4503,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
[P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
[P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
+ [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m },
[P_MAX_CMD] = { 0, NULL },
};
if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de81ab7b4627..3397f11d0ba9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -722,6 +722,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
struct drbd_request *req;
int local, remote;
int err = -EIO;
+ int ret = 0;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -784,7 +785,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
(mdev->state.pdsk == D_INCONSISTENT &&
mdev->state.conn >= C_CONNECTED));
- if (!(local || remote)) {
+ if (!(local || remote) && !mdev->state.susp) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
goto fail_free_complete;
}
@@ -810,6 +811,16 @@ allocate_barrier:
/* GOOD, everything prepared, grab the spin_lock */
spin_lock_irq(&mdev->req_lock);
+ if (mdev->state.susp) {
+ /* If we got suspended, use the retry mechanism of
+ generic_make_request() to restart processing of this
+ bio. In the next call to drbd_make_request_26
+ we sleep in inc_ap_bio() */
+ ret = 1;
+ spin_unlock_irq(&mdev->req_lock);
+ goto fail_free_complete;
+ }
+
if (remote) {
remote = (mdev->state.pdsk == D_UP_TO_DATE ||
(mdev->state.pdsk == D_INCONSISTENT &&
@@ -947,12 +958,14 @@ fail_and_free_req:
req->private_bio = NULL;
put_ldev(mdev);
}
- bio_endio(bio, err);
+ if (!ret)
+ bio_endio(bio, err);
+
drbd_req_free(req);
dec_ap_bio(mdev);
kfree(b);
- return 0;
+ return ret;
}
/* helper function for drbd_make_request
@@ -962,11 +975,6 @@ fail_and_free_req:
*/
static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
{
- /* Unconfigured */
- if (mdev->state.conn == C_DISCONNECTING &&
- mdev->state.disk == D_DISKLESS)
- return 1;
-
if (mdev->state.role != R_PRIMARY &&
(!allow_oos || is_write)) {
if (__ratelimit(&drbd_ratelimit_state)) {
@@ -1070,15 +1078,21 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
/* we need to get a "reference count" (ap_bio_cnt)
* to avoid races with the disconnect/reconnect/suspend code.
- * In case we need to split the bio here, we need to get two references
+ * In case we need to split the bio here, we need to get three references
* atomically, otherwise we might deadlock when trying to submit the
* second one! */
- inc_ap_bio(mdev, 2);
+ inc_ap_bio(mdev, 3);
D_ASSERT(e_enr == s_enr + 1);
- drbd_make_request_common(mdev, &bp->bio1);
- drbd_make_request_common(mdev, &bp->bio2);
+ while (drbd_make_request_common(mdev, &bp->bio1))
+ inc_ap_bio(mdev, 1);
+
+ while (drbd_make_request_common(mdev, &bp->bio2))
+ inc_ap_bio(mdev, 1);
+
+ dec_ap_bio(mdev);
+
bio_pair_release(bp);
}
return 0;
@@ -1115,7 +1129,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
} else if (limit && get_ldev(mdev)) {
struct request_queue * const b =
mdev->ldev->backing_bdev->bd_disk->queue;
- if (b->merge_bvec_fn && mdev->ldev->dc.use_bmbv) {
+ if (b->merge_bvec_fn) {
backing_limit = b->merge_bvec_fn(b, bvm, bvec);
limit = min(limit, backing_limit);
}
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 76863e3f05be..85179e1fb50a 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -70,7 +70,7 @@ static const char *drbd_disk_s_names[] = {
static const char *drbd_state_sw_errors[] = {
[-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
- [-SS_NO_UP_TO_DATE_DISK] = "Refusing to be Primary without at least one UpToDate disk",
+ [-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data",
[-SS_NO_LOCAL_DISK] = "Can not resync without local disk",
[-SS_NO_REMOTE_DISK] = "Can not resync without remote disk",
[-SS_CONNECTED_OUTDATES] = "Refusing to be Outdated while Connected",
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d48a1dfd7b24..727ff6339754 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -47,8 +47,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
/* defined here:
drbd_md_io_complete
- drbd_endio_write_sec
- drbd_endio_read_sec
+ drbd_endio_sec
drbd_endio_pri
* more endio handlers:
@@ -85,27 +84,10 @@ void drbd_md_io_complete(struct bio *bio, int error)
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
{
unsigned long flags = 0;
- struct drbd_epoch_entry *e = NULL;
- struct drbd_conf *mdev;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
- e = bio->bi_private;
- mdev = e->mdev;
-
- if (error)
- dev_warn(DEV, "read: error=%d s=%llus\n", error,
- (unsigned long long)e->sector);
- if (!error && !uptodate) {
- dev_warn(DEV, "read: setting error to -EIO s=%llus\n",
- (unsigned long long)e->sector);
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
+ struct drbd_conf *mdev = e->mdev;
D_ASSERT(e->block_id != ID_VACANT);
@@ -114,49 +96,38 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
list_del(&e->w.list);
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
+ if (test_bit(__EE_WAS_ERROR, &e->flags))
+ __drbd_chk_io_error(mdev, FALSE);
spin_unlock_irqrestore(&mdev->req_lock, flags);
- drbd_chk_io_error(mdev, error, FALSE);
drbd_queue_work(&mdev->data.work, &e->w);
put_ldev(mdev);
}
+static int is_failed_barrier(int ee_flags)
+{
+ return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
+ == (EE_IS_BARRIER|EE_WAS_ERROR);
+}
+
/* writes on behalf of the partner, or resync writes,
- * "submitted" by the receiver.
- */
-void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
+ * "submitted" by the receiver, final stage. */
+static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
{
unsigned long flags = 0;
- struct drbd_epoch_entry *e = NULL;
- struct drbd_conf *mdev;
+ struct drbd_conf *mdev = e->mdev;
sector_t e_sector;
int do_wake;
int is_syncer_req;
int do_al_complete_io;
- int uptodate = bio_flagged(bio, BIO_UPTODATE);
- int is_barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
-
- e = bio->bi_private;
- mdev = e->mdev;
- if (error)
- dev_warn(DEV, "write: error=%d s=%llus\n", error,
- (unsigned long long)e->sector);
- if (!error && !uptodate) {
- dev_warn(DEV, "write: setting error to -EIO s=%llus\n",
- (unsigned long long)e->sector);
- /* strange behavior of some lower level drivers...
- * fail the request by clearing the uptodate flag,
- * but do not return any error?! */
- error = -EIO;
- }
-
- /* error == -ENOTSUPP would be a better test,
- * alas it is not reliable */
- if (error && is_barrier && e->flags & EE_IS_BARRIER) {
+ /* if this is a failed barrier request, disable use of barriers,
+ * and schedule for resubmission */
+ if (is_failed_barrier(e->flags)) {
drbd_bump_write_ordering(mdev, WO_bdev_flush);
spin_lock_irqsave(&mdev->req_lock, flags);
list_del(&e->w.list);
+ e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
e->w.cb = w_e_reissue;
/* put_ldev actually happens below, once we come here again. */
__release(local);
@@ -167,17 +138,16 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
D_ASSERT(e->block_id != ID_VACANT);
- spin_lock_irqsave(&mdev->req_lock, flags);
- mdev->writ_cnt += e->size >> 9;
- is_syncer_req = is_syncer_block_id(e->block_id);
-
/* after we moved e to done_ee,
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
e_sector = e->sector;
do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
+ is_syncer_req = is_syncer_block_id(e->block_id);
+ spin_lock_irqsave(&mdev->req_lock, flags);
+ mdev->writ_cnt += e->size >> 9;
list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee);
@@ -190,7 +160,7 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
? list_empty(&mdev->sync_ee)
: list_empty(&mdev->active_ee);
- if (error)
+ if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, FALSE);
spin_unlock_irqrestore(&mdev->req_lock, flags);
@@ -205,7 +175,42 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
wake_asender(mdev);
put_ldev(mdev);
+}
+
+/* writes on behalf of the partner, or resync writes,
+ * "submitted" by the receiver.
+ */
+void drbd_endio_sec(struct bio *bio, int error)
+{
+ struct drbd_epoch_entry *e = bio->bi_private;
+ struct drbd_conf *mdev = e->mdev;
+ int uptodate = bio_flagged(bio, BIO_UPTODATE);
+ int is_write = bio_data_dir(bio) == WRITE;
+
+ if (error)
+ dev_warn(DEV, "%s: error=%d s=%llus\n",
+ is_write ? "write" : "read", error,
+ (unsigned long long)e->sector);
+ if (!error && !uptodate) {
+ dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+ is_write ? "write" : "read",
+ (unsigned long long)e->sector);
+ /* strange behavior of some lower level drivers...
+ * fail the request by clearing the uptodate flag,
+ * but do not return any error?! */
+ error = -EIO;
+ }
+
+ if (error)
+ set_bit(__EE_WAS_ERROR, &e->flags);
+ bio_put(bio); /* no need for the bio anymore */
+ if (atomic_dec_and_test(&e->pending_bios)) {
+ if (is_write)
+ drbd_endio_write_sec_final(e);
+ else
+ drbd_endio_read_sec_final(e);
+ }
}
/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
@@ -295,7 +300,34 @@ int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1; /* Simply ignore this! */
}
-void drbd_csum(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+{
+ struct hash_desc desc;
+ struct scatterlist sg;
+ struct page *page = e->pages;
+ struct page *tmp;
+ unsigned len;
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ sg_init_table(&sg, 1);
+ crypto_hash_init(&desc);
+
+ while ((tmp = page_chain_next(page))) {
+ /* all but the last page will be fully used */
+ sg_set_page(&sg, page, PAGE_SIZE, 0);
+ crypto_hash_update(&desc, &sg, sg.length);
+ page = tmp;
+ }
+ /* and now the last, possibly only partially used page */
+ len = e->size & (PAGE_SIZE - 1);
+ sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
+ crypto_hash_update(&desc, &sg, sg.length);
+ crypto_hash_final(&desc, digest);
+}
+
+void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
@@ -329,11 +361,11 @@ static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel
return 1;
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(mdev->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev,
@@ -369,23 +401,21 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
- if (!e) {
- put_ldev(mdev);
- return 2;
- }
+ if (!e)
+ goto fail;
spin_lock_irq(&mdev->req_lock);
list_add(&e->w.list, &mdev->read_ee);
spin_unlock_irq(&mdev->req_lock);
- e->private_bio->bi_end_io = drbd_endio_read_sec;
- e->private_bio->bi_rw = READ;
e->w.cb = w_e_send_csum;
+ if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+ return 1;
- mdev->read_cnt += size >> 9;
- drbd_generic_make_request(mdev, DRBD_FAULT_RS_RD, e->private_bio);
-
- return 1;
+ drbd_free_ee(mdev, e);
+fail:
+ put_ldev(mdev);
+ return 2;
}
void resync_timer_fn(unsigned long data)
@@ -414,13 +444,25 @@ void resync_timer_fn(unsigned long data)
drbd_queue_work(&mdev->data.work, &mdev->resync_work);
}
+static int calc_resync_rate(struct drbd_conf *mdev)
+{
+ int d = mdev->data_delay / 1000; /* us -> ms */
+ int td = mdev->sync_conf.throttle_th * 100; /* 0.1s -> ms */
+ int hd = mdev->sync_conf.hold_off_th * 100; /* 0.1s -> ms */
+ int cr = mdev->sync_conf.rate;
+
+ return d <= td ? cr :
+ d >= hd ? 0 :
+ cr + (cr * (td - d) / (hd - td));
+}
+
int w_make_resync_request(struct drbd_conf *mdev,
struct drbd_work *w, int cancel)
{
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- int max_segment_size = queue_max_segment_size(mdev->rq_queue);
+ int max_segment_size;
int number, i, size, pe, mx;
int align, queued, sndbuf;
@@ -446,7 +488,13 @@ int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
- number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
+ /* starting with drbd 8.3.8, we can handle multi-bio EEs,
+ * if it should be necessary */
+ max_segment_size = mdev->agreed_pro_version < 94 ?
+ queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
+
+ mdev->c_sync_rate = calc_resync_rate(mdev);
+ number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
pe = atomic_read(&mdev->rs_pending_cnt);
mutex_lock(&mdev->data.mutex);
@@ -509,12 +557,6 @@ next_sector:
*
* Additionally always align bigger requests, in order to
* be prepared for all stripe sizes of software RAIDs.
- *
- * we _do_ care about the agreed-upon q->max_segment_size
- * here, as splitting up the requests on the other side is more
- * difficult. the consequence is, that on lvm and md and other
- * "indirect" devices, this is dead code, since
- * q->max_segment_size will be PAGE_SIZE.
*/
align = 1;
for (;;) {
@@ -806,7 +848,7 @@ out:
/* helper */
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
- if (drbd_bio_has_active_page(e->private_bio)) {
+ if (drbd_ee_has_active_page(e)) {
/* This might happen if sendpage() has not finished */
spin_lock_irq(&mdev->req_lock);
list_add_tail(&e->w.list, &mdev->net_ee);
@@ -832,7 +874,7 @@ int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
return 1;
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
ok = drbd_send_block(mdev, P_DATA_REPLY, e);
} else {
if (__ratelimit(&drbd_ratelimit_state))
@@ -873,7 +915,7 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
put_ldev(mdev);
}
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
@@ -921,7 +963,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
di = (struct digest_info *)(unsigned long)e->block_id;
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
/* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved,
* introducing more locking mechanisms */
@@ -931,7 +973,7 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
digest = kmalloc(digest_size, GFP_NOIO);
}
if (digest) {
- drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
eq = !memcmp(digest, di->digest, digest_size);
kfree(digest);
}
@@ -973,14 +1015,14 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- if (unlikely(!drbd_bio_uptodate(e->private_bio)))
+ if (unlikely((e->flags & EE_WAS_ERROR) != 0))
goto out;
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
/* FIXME if this allocation fails, online verify will not terminate! */
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
inc_rs_pending(mdev);
ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
digest, digest_size, P_OV_REPLY);
@@ -1029,11 +1071,11 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
di = (struct digest_info *)(unsigned long)e->block_id;
- if (likely(drbd_bio_uptodate(e->private_bio))) {
+ if (likely((e->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(mdev->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
- drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
+ drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index f93fa111ce50..defdb5013ea3 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -18,23 +18,9 @@ static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE)
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (page_count(bvec->bv_page) > 1)
- return 1;
- }
-
- return 0;
-}
-
/* bi_end_io handlers */
extern void drbd_md_io_complete(struct bio *bio, int error);
-extern void drbd_endio_read_sec(struct bio *bio, int error);
-extern void drbd_endio_write_sec(struct bio *bio, int error);
+extern void drbd_endio_sec(struct bio *bio, int error);
extern void drbd_endio_pri(struct bio *bio, int error);
/*
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 034e6dfc878c..81c78b3ce2df 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -164,12 +164,12 @@ unsigned long read_timer(void)
unsigned long t, flags;
int i;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
t = jiffies * 11932;
outb_p(0, 0x43);
i = inb_p(0x40);
i |= inb(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return(t - i);
}
#endif
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 8546d123b9a7..6120922f459f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -485,7 +485,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
goto out;
}
- ret = vfs_fsync(file, file->f_path.dentry, 0);
+ ret = vfs_fsync(file, 0);
if (unlikely(ret)) {
ret = -EIO;
goto out;
@@ -495,7 +495,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
ret = lo_send(lo, bio, pos);
if (barrier && !ret) {
- ret = vfs_fsync(file, file->f_path.dentry, 0);
+ ret = vfs_fsync(file, 0);
if (unlikely(ret))
ret = -EIO;
}
@@ -835,6 +835,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
set_blocksize(bdev, lo_blocksize);
@@ -858,6 +860,7 @@ out_clr:
set_capacity(lo->lo_disk, 0);
invalidate_bdev(bdev);
bd_set_size(bdev, 0);
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask);
lo->lo_state = Lo_unbound;
out_putf:
@@ -944,8 +947,11 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
if (bdev)
invalidate_bdev(bdev);
set_capacity(lo->lo_disk, 0);
- if (bdev)
+ if (bdev) {
bd_set_size(bdev, 0);
+ /* let user-space know about this change */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+ }
mapping_set_gfp_mask(filp->f_mapping, gfp);
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
@@ -1189,6 +1195,8 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
sz <<= 9;
mutex_lock(&bdev->bd_mutex);
bd_set_size(bdev, sz);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
mutex_unlock(&bdev->bd_mutex);
out:
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 59ca2b77b574..52f2d11bc7b9 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1004,7 +1004,7 @@ static const struct block_device_operations floppy_fops = {
static int swim3_add_device(struct macio_dev *mdev, int index)
{
- struct device_node *swim = mdev->ofdev.node;
+ struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2138a7ae050c..83fa09a836ca 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
unsigned long flags;
spin_lock_irqsave(&vblk->lock, flags);
- while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
int error;
switch (vbr->status) {
@@ -70,6 +70,8 @@ static void blk_done(struct virtqueue *vq)
vbr->req->sense_len = vbr->in_hdr.sense_len;
vbr->req->errors = vbr->in_hdr.errors;
}
+ if (blk_special_request(vbr->req))
+ vbr->req->errors = (error != 0);
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
@@ -103,6 +105,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
break;
+ case REQ_TYPE_SPECIAL:
+ vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+ vbr->out_hdr.sector = 0;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+ break;
case REQ_TYPE_LINUX_BLOCK:
if (req->cmd[0] == REQ_LB_OP_FLUSH) {
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
@@ -151,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -180,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
}
if (issued)
- vblk->vq->vq_ops->kick(vblk->vq);
+ virtqueue_kick(vblk->vq);
}
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
@@ -189,12 +196,45 @@ static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
req->cmd[0] = REQ_LB_OP_FLUSH;
}
+/* return id (s/n) string for *disk to *id_str
+ */
+static int virtblk_get_id(struct gendisk *disk, char *id_str)
+{
+ struct virtio_blk *vblk = disk->private_data;
+ struct request *req;
+ struct bio *bio;
+
+ bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
+ GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
+ if (IS_ERR(req)) {
+ bio_put(bio);
+ return PTR_ERR(req);
+ }
+
+ req->cmd_type = REQ_TYPE_SPECIAL;
+ return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+}
+
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
+ if (cmd == 0x56424944) { /* 'VBID' */
+ void __user *usr_data = (void __user *)data;
+ char id_str[VIRTIO_BLK_ID_BYTES];
+ int err;
+
+ err = virtblk_get_id(disk, id_str);
+ if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
+ err = -EFAULT;
+ return err;
+ }
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index e1c95e208a66..a7b83c0a7eb5 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1198,10 +1198,10 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
/* device id */
- id = of_get_property(op->node, "port-number", NULL);
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
/* physaddr */
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
@@ -1209,11 +1209,11 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
physaddr = res.start;
/* irq */
- irq = irq_of_parse_and_map(op->node, 0);
+ irq = irq_of_parse_and_map(op->dev.of_node, 0);
/* bus width */
bus_width = ACE_BUS_WIDTH_16;
- if (of_find_property(op->node, "8-bit", NULL))
+ if (of_find_property(op->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
/* Call the bus-independant setup code */
@@ -1237,13 +1237,12 @@ static const struct of_device_id ace_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, ace_of_match);
static struct of_platform_driver ace_of_driver = {
- .owner = THIS_MODULE,
- .name = "xsysace",
- .match_table = ace_of_match,
.probe = ace_of_probe,
.remove = __devexit_p(ace_of_remove),
.driver = {
.name = "xsysace",
+ .owner = THIS_MODULE,
+ .of_match_table = ace_of_match,
},
};
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d9bf87ca9e83..6f907ebed2d5 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -65,7 +65,6 @@ MODULE_LICENSE("GPL");
typedef struct bluecard_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -869,9 +868,6 @@ static int bluecard_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = bluecard_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -908,9 +904,9 @@ static int bluecard_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, bluecard_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -919,9 +915,6 @@ static int bluecard_config(struct pcmcia_device *link)
if (bluecard_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 027cb8bf650f..21e05fdc9121 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -72,7 +72,6 @@ MODULE_FIRMWARE("BT3CPCC.bin");
typedef struct bt3c_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -661,9 +660,6 @@ static int bt3c_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = bt3c_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -743,9 +739,9 @@ static int bt3c_config(struct pcmcia_device *link)
goto failed;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, &bt3c_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -754,9 +750,6 @@ found_port:
if (bt3c_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 204727586ee9..bed0ba630235 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -42,6 +42,8 @@ struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
+ u8 dev_type;
+
u8 tx_dnld_rdy;
u8 psmode;
@@ -88,8 +90,11 @@ struct btmrvl_private {
#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
#define BT_CMD_MODULE_CFG_REQ 0x5B
-/* Sub-commands: Module Bringup/Shutdown Request */
+/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
+#define MODULE_BROUGHT_UP 0x00
+#define MODULE_ALREADY_UP 0x0C
+
#define MODULE_SHUTDOWN_REQ 0xF2
#define BT_EVENT_POWER_STATE 0x20
@@ -123,6 +128,7 @@ struct btmrvl_event {
/* Prototype of global function */
+int btmrvl_register_hdev(struct btmrvl_private *priv);
struct btmrvl_private *btmrvl_add_card(void *card);
int btmrvl_remove_card(struct btmrvl_private *priv);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 53a43adf2e21..ee37ef0caee2 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -66,7 +66,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct btmrvl_adapter *adapter = priv->adapter;
struct btmrvl_event *event;
- u8 ret = 0;
+ int ret = 0;
event = (struct btmrvl_event *) skb->data;
if (event->ec != 0xff) {
@@ -112,8 +112,17 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
case BT_CMD_MODULE_CFG_REQ:
if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_BRINGUP_REQ) {
- BT_DBG("EVENT:%s", (event->data[2]) ?
- "Bring-up failed" : "Bring-up succeed");
+ BT_DBG("EVENT:%s",
+ ((event->data[2] == MODULE_BROUGHT_UP) ||
+ (event->data[2] == MODULE_ALREADY_UP)) ?
+ "Bring-up succeed" : "Bring-up failed");
+
+ if (event->length > 3)
+ priv->btmrvl_dev.dev_type = event->data[3];
+ else
+ priv->btmrvl_dev.dev_type = HCI_BREDR;
+
+ BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_SHUTDOWN_REQ) {
BT_DBG("EVENT:%s", (event->data[2]) ?
@@ -522,47 +531,20 @@ static int btmrvl_service_main_thread(void *data)
return 0;
}
-struct btmrvl_private *btmrvl_add_card(void *card)
+int btmrvl_register_hdev(struct btmrvl_private *priv)
{
struct hci_dev *hdev = NULL;
- struct btmrvl_private *priv;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- BT_ERR("Can not allocate priv");
- goto err_priv;
- }
-
- priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
- if (!priv->adapter) {
- BT_ERR("Allocate buffer for btmrvl_adapter failed!");
- goto err_adapter;
- }
-
- btmrvl_init_adapter(priv);
-
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can not allocate HCI device");
goto err_hdev;
}
- BT_DBG("Starting kthread...");
- priv->main_thread.priv = priv;
- spin_lock_init(&priv->driver_lock);
-
- init_waitqueue_head(&priv->main_thread.wait_q);
- priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
- &priv->main_thread, "btmrvl_main_service");
-
priv->btmrvl_dev.hcidev = hdev;
- priv->btmrvl_dev.card = card;
-
hdev->driver_data = priv;
- priv->btmrvl_dev.tx_dnld_rdy = true;
-
hdev->bus = HCI_SDIO;
hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
@@ -572,6 +554,10 @@ struct btmrvl_private *btmrvl_add_card(void *card)
hdev->ioctl = btmrvl_ioctl;
hdev->owner = THIS_MODULE;
+ btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+ hdev->dev_type = priv->btmrvl_dev.dev_type;
+
ret = hci_register_dev(hdev);
if (ret < 0) {
BT_ERR("Can not register HCI device");
@@ -582,16 +568,52 @@ struct btmrvl_private *btmrvl_add_card(void *card)
btmrvl_debugfs_init(hdev);
#endif
- return priv;
+ return 0;
err_hci_register_dev:
- /* Stop the thread servicing the interrupts */
- kthread_stop(priv->main_thread.task);
-
hci_free_dev(hdev);
err_hdev:
+ /* Stop the thread servicing the interrupts */
+ kthread_stop(priv->main_thread.task);
+
btmrvl_free_adapter(priv);
+ kfree(priv);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(btmrvl_register_hdev);
+
+struct btmrvl_private *btmrvl_add_card(void *card)
+{
+ struct btmrvl_private *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ BT_ERR("Can not allocate priv");
+ goto err_priv;
+ }
+
+ priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL);
+ if (!priv->adapter) {
+ BT_ERR("Allocate buffer for btmrvl_adapter failed!");
+ goto err_adapter;
+ }
+
+ btmrvl_init_adapter(priv);
+
+ BT_DBG("Starting kthread...");
+ priv->main_thread.priv = priv;
+ spin_lock_init(&priv->driver_lock);
+
+ init_waitqueue_head(&priv->main_thread.wait_q);
+ priv->main_thread.task = kthread_run(btmrvl_service_main_thread,
+ &priv->main_thread, "btmrvl_main_service");
+
+ priv->btmrvl_dev.card = card;
+ priv->btmrvl_dev.tx_dnld_rdy = true;
+
+ return priv;
err_adapter:
kfree(priv);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0dba76aa2232..df0773ebd9e4 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -931,7 +931,12 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw;
- btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+ if (btmrvl_register_hdev(priv)) {
+ BT_ERR("Register hdev failed!");
+ ret = -ENODEV;
+ goto disable_host_int;
+ }
+
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 60c0953d7d00..4ed7288f99db 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
typedef struct btuart_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -590,9 +589,6 @@ static int btuart_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = btuart_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -672,9 +668,9 @@ static int btuart_config(struct pcmcia_device *link)
goto failed;
found_port:
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, btuart_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -683,9 +679,6 @@ found_port:
if (btuart_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 17788317c51a..ef044d55cb25 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -67,7 +67,6 @@ MODULE_LICENSE("GPL");
typedef struct dtl1_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct hci_dev *hdev;
@@ -575,9 +574,6 @@ static int dtl1_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
- link->irq.Handler = dtl1_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -621,9 +617,9 @@ static int dtl1_config(struct pcmcia_device *link)
if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, dtl1_interrupt);
if (i != 0)
- link->irq.AssignedIRQ = 0;
+ goto failed;
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
@@ -632,9 +628,6 @@ static int dtl1_config(struct pcmcia_device *link)
if (dtl1_open(info) != 0)
goto failed;
- strcpy(info->node.dev_name, info->hdev->name);
- link->dev_node = &info->node;
-
return 0;
failed:
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index c0ce8134814e..3f038f5308a4 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -246,7 +246,7 @@ static int h4_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
h4->rx_state = H4_W4_PACKET_TYPE;
h4->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
h4->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 5c65014635be..fb8445c7365e 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -402,7 +402,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_EVENT_HDR:
- eh = (struct hci_event_hdr *) ll->rx_skb->data;
+ eh = hci_event_hdr(ll->rx_skb);
BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
@@ -410,7 +410,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_ACL_HDR:
- ah = (struct hci_acl_hdr *) ll->rx_skb->data;
+ ah = hci_acl_hdr(ll->rx_skb);
dlen = __le16_to_cpu(ah->dlen);
BT_DBG("ACL header: dlen %d", dlen);
@@ -419,7 +419,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
continue;
case HCILL_W4_SCO_HDR:
- sh = (struct hci_sco_hdr *) ll->rx_skb->data;
+ sh = hci_sco_hdr(ll->rx_skb);
BT_DBG("SCO header: dlen %d", sh->dlen);
@@ -491,7 +491,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
BT_ERR("Can't allocate mem for new packet");
ll->rx_state = HCILL_W4_PACKET_TYPE;
ll->rx_count = 0;
- return 0;
+ return -ENOMEM;
}
ll->rx_skb->dev = (void *) hu->hdev;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index bb0aefdb4267..3aa7b2a54b6f 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -157,7 +157,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
break;
case HCI_SCODATA_PKT:
- data->hdev->stat.cmd_tx++;
+ data->hdev->stat.sco_tx++;
break;
};
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index cc435be0bc13..451cd7071b1d 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -567,7 +567,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct disk_info *d;
struct cdrom_device_info *c;
struct request_queue *q;
- struct device_node *node = vdev->dev.archdata.of_node;
+ struct device_node *node = vdev->dev.of_node;
deviceno = vdev->unit_address;
if (deviceno >= VIOCD_MAX_CD)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3141dd3b6e53..f09fc0e2062d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -276,11 +276,19 @@ config N_HDLC
Allows synchronous HDLC communications with tty device drivers that
support synchronous HDLC such as the Microgate SyncLink adapter.
- This driver can only be built as a module ( = code which can be
+ This driver can be built as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called n_hdlc. If you want to do that, say M
here.
+config N_GSM
+ tristate "GSM MUX line discipline support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on NET
+ help
+ This line discipline provides support for the GSM MUX protocol and
+ presents the mux as a set of 61 individual tty devices.
+
config RISCOM8
tristate "SDL RISCom/8 card support"
depends on SERIAL_NONSTANDARD
@@ -1113,5 +1121,12 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
+config RAMOOPS
+ tristate "Log panic/oops to a RAM buffer"
+ default n
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in RAM where it can be read back at some later point.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f957edf7e45d..88d6eac69754 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
+obj-$(CONFIG_N_GSM) += n_gsm.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
@@ -107,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+obj-$(CONFIG_RAMOOPS) += ramoops.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 870f12cfed93..120490949997 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -178,86 +178,6 @@ struct agp_bridge_data {
#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
-/* Intel registers */
-#define INTEL_APSIZE 0xb4
-#define INTEL_ATTBASE 0xb8
-#define INTEL_AGPCTRL 0xb0
-#define INTEL_NBXCFG 0x50
-#define INTEL_ERRSTS 0x91
-
-/* Intel i830 registers */
-#define I830_GMCH_CTRL 0x52
-#define I830_GMCH_ENABLED 0x4
-#define I830_GMCH_MEM_MASK 0x1
-#define I830_GMCH_MEM_64M 0x1
-#define I830_GMCH_MEM_128M 0
-#define I830_GMCH_GMS_MASK 0x70
-#define I830_GMCH_GMS_DISABLED 0x00
-#define I830_GMCH_GMS_LOCAL 0x10
-#define I830_GMCH_GMS_STOLEN_512 0x20
-#define I830_GMCH_GMS_STOLEN_1024 0x30
-#define I830_GMCH_GMS_STOLEN_8192 0x40
-#define I830_RDRAM_CHANNEL_TYPE 0x03010
-#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
-#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
-
-/* This one is for I830MP w. an external graphic card */
-#define INTEL_I830_ERRSTS 0x92
-
-/* Intel 855GM/852GM registers */
-#define I855_GMCH_GMS_MASK 0xF0
-#define I855_GMCH_GMS_STOLEN_0M 0x0
-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-#define I85X_CAPID 0x44
-#define I85X_VARIANT_MASK 0x7
-#define I85X_VARIANT_SHIFT 5
-#define I855_GME 0x0
-#define I855_GM 0x4
-#define I852_GME 0x2
-#define I852_GM 0x5
-
-/* Intel i845 registers */
-#define INTEL_I845_AGPM 0x51
-#define INTEL_I845_ERRSTS 0xc8
-
-/* Intel i860 registers */
-#define INTEL_I860_MCHCFG 0x50
-#define INTEL_I860_ERRSTS 0xc8
-
-/* Intel i810 registers */
-#define I810_GMADDR 0x10
-#define I810_MMADDR 0x14
-#define I810_PTE_BASE 0x10000
-#define I810_PTE_MAIN_UNCACHED 0x00000000
-#define I810_PTE_LOCAL 0x00000002
-#define I810_PTE_VALID 0x00000001
-#define I830_PTE_SYSTEM_CACHED 0x00000006
-#define I810_SMRAM_MISCC 0x70
-#define I810_GFX_MEM_WIN_SIZE 0x00010000
-#define I810_GFX_MEM_WIN_32M 0x00010000
-#define I810_GMS 0x000000c0
-#define I810_GMS_DISABLE 0x00000000
-#define I810_PGETBL_CTL 0x2020
-#define I810_PGETBL_ENABLED 0x00000001
-#define I965_PGETBL_SIZE_MASK 0x0000000e
-#define I965_PGETBL_SIZE_512KB (0 << 1)
-#define I965_PGETBL_SIZE_256KB (1 << 1)
-#define I965_PGETBL_SIZE_128KB (2 << 1)
-#define I965_PGETBL_SIZE_1MB (3 << 1)
-#define I965_PGETBL_SIZE_2MB (4 << 1)
-#define I965_PGETBL_SIZE_1_5MB (5 << 1)
-#define G33_PGETBL_SIZE_MASK (3 << 8)
-#define G33_PGETBL_SIZE_1M (1 << 8)
-#define G33_PGETBL_SIZE_2M (2 << 8)
-
-#define I810_DRAM_CTL 0x3000
-#define I810_DRAM_ROW_0 0x00000001
-#define I810_DRAM_ROW_0_SDRAM 0x00000001
-
struct agp_device_ids {
unsigned short device_id; /* first, to make table easier to read */
enum chipset_type chipset;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index d2ce68f27e4b..fd793519ea2b 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = {
.aperture_sizes = ali_generic_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = ali_configure,
.fetch_size = ali_fetch_size,
.cleanup = ali_cleanup,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index a7637d72cef6..b6b1568314c8 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct amd_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
+ }
+
return 0;
}
@@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
.aperture_sizes = amd_irongate_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = amd_irongate_configure,
.fetch_size = amd_irongate_fetch_size,
.cleanup = amd_irongate_cleanup,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index fd50ead59c79..70312da4c968 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
.aperture_sizes = amd_8151_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = amd_8151_configure,
.fetch_size = amd64_fetch_size,
.cleanup = amd64_cleanup,
@@ -383,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
{
u32 httfea,baseaddr,enuscr;
struct pci_dev *dev1;
- int i;
+ int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -399,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
if (i == ARRAY_SIZE(uli_sizes)) {
dev_info(&pdev->dev, "no ULi size found for %d\n", size);
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
/* shadow x86-64 registers into ULi registers */
pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */
- if ((httfea & 0x7fff) >> (32 - 25))
- return -ENODEV;
+ if ((httfea & 0x7fff) >> (32 - 25)) {
+ ret = -ENODEV;
+ goto put;
+ }
httfea = (httfea& 0x7fff) << 25;
@@ -419,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
enuscr= httfea+ (size * 1024 * 1024) - 1;
pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
-
+ ret = 0;
+put:
pci_dev_put(dev1);
- return 0;
+ return ret;
}
@@ -440,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
{
u32 tmp, apbase, apbar, aplimit;
struct pci_dev *dev1;
- int i;
+ int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -457,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
if (i == ARRAY_SIZE(nforce3_sizes)) {
dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -471,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
dev_info(&pdev->dev, "aperture base > 4G\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
apbase = (apbase & 0x7fff) << 25;
@@ -487,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+ ret = 0;
+put:
pci_dev_put(dev1);
- return 0;
+ return ret;
}
static int __devinit agp_amd64_probe(struct pci_dev *pdev,
@@ -499,6 +508,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
u8 cap_ptr;
int err;
+ /* The Highlander principle */
+ if (agp_bridges_found)
+ return -ENODEV;
+
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
@@ -562,6 +575,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
+
+ agp_bridges_found--;
}
#ifdef CONFIG_PM
@@ -709,6 +724,11 @@ static struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
+static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
+ { PCI_DEVICE_CLASS(0, 0) },
+ { }
+};
+
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
@@ -734,7 +754,6 @@ int __init agp_amd64_init(void)
return err;
if (agp_bridges_found == 0) {
- struct pci_dev *dev;
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
#ifdef MODULE
@@ -750,17 +769,10 @@ int __init agp_amd64_init(void)
return -ENODEV;
/* Look for any AGP bridge */
- dev = NULL;
- err = -ENODEV;
- for_each_pci_dev(dev) {
- if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
- continue;
- /* Only one bridge supported right now */
- if (agp_amd64_probe(dev, NULL) == 0) {
- err = 0;
- break;
- }
- }
+ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
+ err = driver_attach(&agp_amd64_pci_driver.driver);
+ if (err == 0 && agp_bridges_found == 0)
+ err = -ENODEV;
}
return err;
}
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3b2ecbe86ebe..dc30e2243494 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct ati_page_map page_dir;
+ unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
+ for (i = 0; i < value->num_entries; i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
+ cur_gatt = GET_GATT(addr);
+ writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
+ }
+
return 0;
}
@@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
.aperture_sizes = ati_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = ati_configure,
.fetch_size = ati_fetch_size,
.cleanup = ati_cleanup,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 793f39ea9618..aa109cbe0e6e 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -28,6 +28,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include "agp.h"
+#include "intel-agp.h"
/*
* The real differences to the generic AGP code is
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index fb86708e47ed..4b51982fd23a 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1214,7 +1214,7 @@ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
return NULL;
for (i = 0; i < page_count; i++)
- new->pages[i] = 0;
+ new->pages[i] = NULL;
new->page_count = 0;
new->type = type;
new->num_scratch_pages = pages;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index aa4248efc5d8..d836a71bf06d 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,1531 +11,13 @@
#include <linux/agp_backend.h>
#include <asm/smp.h>
#include "agp.h"
+#include "intel-agp.h"
+
+#include "intel-gtt.c"
int intel_agp_enabled;
EXPORT_SYMBOL(intel_agp_enabled);
-/*
- * If we have Intel graphics, we're not going to have anything other than
- * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
- * Only newer chipsets need to bother with this, of course.
- */
-#ifdef CONFIG_DMAR
-#define USE_PCI_DMA_API 1
-#endif
-
-#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
-#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
-#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
-#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
-#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
-#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
-#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
-#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
-#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
-#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
-#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
-#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
-#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
-#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
-#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
-#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
-#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
-#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
-#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
-#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
-#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
-#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
-#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
-#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
-#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
-#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
-#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
-#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
-#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
-#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
-#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
-#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
-#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
-#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
-#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
-
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
- IS_SNB)
-
-extern int agp_memory_reserved;
-
-
-/* Intel 815 register */
-#define INTEL_815_APCONT 0x51
-#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
-
-/* Intel i820 registers */
-#define INTEL_I820_RDCR 0x51
-#define INTEL_I820_ERRSTS 0xc8
-
-/* Intel i840 registers */
-#define INTEL_I840_MCHCFG 0x50
-#define INTEL_I840_ERRSTS 0xc8
-
-/* Intel i850 registers */
-#define INTEL_I850_MCHCFG 0x50
-#define INTEL_I850_ERRSTS 0xc8
-
-/* intel 915G registers */
-#define I915_GMADDR 0x18
-#define I915_MMADDR 0x10
-#define I915_PTEADDR 0x1C
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-
-#define I915_IFPADDR 0x60
-
-/* Intel 965G registers */
-#define I965_MSAC 0x62
-#define I965_IFPADDR 0x70
-
-/* Intel 7505 registers */
-#define INTEL_I7505_APSIZE 0x74
-#define INTEL_I7505_NCAPID 0x60
-#define INTEL_I7505_NISTAT 0x6c
-#define INTEL_I7505_ATTBASE 0x78
-#define INTEL_I7505_ERRSTS 0x42
-#define INTEL_I7505_AGPCTRL 0x70
-#define INTEL_I7505_MCHCFG 0x50
-
-#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
-#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
-#define SNB_GTT_SIZE_0M (0 << 8)
-#define SNB_GTT_SIZE_1M (1 << 8)
-#define SNB_GTT_SIZE_2M (2 << 8)
-#define SNB_GTT_SIZE_MASK (3 << 8)
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
- {64, 16384, 4},
- /* The 32M mode still requires a 64k gatt */
- {32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY 1
-#define AGP_PHYS_MEMORY 2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
- {.mask = I810_PTE_VALID, .type = 0},
- {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
- .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-static struct _intel_private {
- struct pci_dev *pcidev; /* device one */
- u8 __iomem *registers;
- u32 __iomem *gtt; /* I915G */
- int num_dcache_entries;
- /* gtt_entries is the number of gtt entries that are already mapped
- * to stolen memory. Stolen memory is larger than the memory mapped
- * through gtt_entries, as it includes some reserved space for the BIOS
- * popup and for the GTT.
- */
- int gtt_entries; /* i830+ */
- int gtt_total_size;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
- struct page *i8xx_page;
- struct resource ifp_resource;
- int resource_valid;
-} intel_private;
-
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
- *ret = pci_map_page(intel_private.pcidev, page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, *ret))
- return -EINVAL;
- return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
- pci_unmap_page(intel_private.pcidev, dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
-
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
- struct sg_table st;
-
- st.sgl = mem->sg_list;
- st.orig_nents = st.nents = mem->page_count;
-
- sg_free_table(&st);
-
- mem->sg_list = NULL;
- mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
-{
- struct sg_table st;
- struct scatterlist *sg;
- int i;
-
- DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
-
- if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
- return -ENOMEM;
-
- mem->sg_list = sg = st.sgl;
-
- for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
- sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
-
- mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(!mem->num_sg)) {
- intel_agp_free_sglist(mem);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void intel_agp_unmap_memory(struct agp_memory *mem)
-{
- DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
-
- pci_unmap_sg(intel_private.pcidev, mem->sg_list,
- mem->page_count, PCI_DMA_BIDIRECTIONAL);
- intel_agp_free_sglist(mem);
-}
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- struct scatterlist *sg;
- int i, j;
-
- j = pg_start;
-
- WARN_ON(!mem->num_sg);
-
- if (mem->num_sg == mem->page_count) {
- for_each_sg(mem->sg_list, sg, mem->page_count, i) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg), mask_type),
- intel_private.gtt+j);
- j++;
- }
- } else {
- /* sg may merge pages, but we have to separate
- * per-page addr for GTT */
- unsigned int len, m;
-
- for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
- len = sg_dma_len(sg) / PAGE_SIZE;
- for (m = 0; m < len; m++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg) + m * PAGE_SIZE,
- mask_type),
- intel_private.gtt+j);
- j++;
- }
- }
- }
- readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- int i, j;
- u32 cache_bits = 0;
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
- {
- cache_bits = I830_PTE_SYSTEM_CACHED;
- }
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
-}
-
-#endif
-
-static int intel_i810_fetch_size(void)
-{
- u32 smram_miscc;
- struct aper_size_info_fixed *values;
-
- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
- return 0;
- }
- if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
- agp_bridge->previous_size =
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- } else {
- agp_bridge->previous_size =
- agp_bridge->current_size = (void *) (values);
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- return 0;
-}
-
-static int intel_i810_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- if (!intel_private.registers) {
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- dev_err(&intel_private.pcidev->dev,
- "can't remap memory\n");
- return -ENOMEM;
- }
- }
-
- if ((readl(intel_private.registers+I810_DRAM_CTL)
- & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
- /* This will need to be dynamically assigned */
- dev_info(&intel_private.pcidev->dev,
- "detected 4MB dedicated video ram\n");
- intel_private.num_dcache_entries = 1024;
- }
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = 0; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
- }
- global_cache_flush();
- return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
- writel(0, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers); /* PCI Posting. */
- iounmap(intel_private.registers);
-}
-
-static void intel_i810_tlbflush(struct agp_memory *mem)
-{
- return;
-}
-
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
-{
- return;
-}
-
-/* Exists to support ARGB cursors */
-static struct page *i8xx_alloc_pages(void)
-{
- struct page *page;
-
- page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
- if (page == NULL)
- return NULL;
-
- if (set_pages_uc(page, 4) < 0) {
- set_pages_wb(page, 4);
- __free_pages(page, 2);
- return NULL;
- }
- get_page(page);
- atomic_inc(&agp_bridge->current_memory_agp);
- return page;
-}
-
-static void i8xx_destroy_pages(struct page *page)
-{
- if (page == NULL)
- return;
-
- set_pages_wb(page, 4);
- put_page(page);
- __free_pages(page, 2);
- atomic_dec(&agp_bridge->current_memory_agp);
-}
-
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- if (type < AGP_USER_TYPES)
- return type;
- else if (type == AGP_USER_CACHED_MEMORY)
- return INTEL_AGP_CACHED_MEMORY;
- else
- return 0;
-}
-
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
-
- for (j = pg_start; j < (pg_start + mem->page_count); j++) {
- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
- ret = -EBUSY;
- goto out_err;
- }
- }
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- switch (mask_type) {
- case AGP_DCACHE_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = pg_start; i < (pg_start + mem->page_count); i++) {
- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
- intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
- break;
- case AGP_PHYS_MEMORY:
- case AGP_NORMAL_MEMORY:
- if (!mem->is_flushed)
- global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- break;
- default:
- goto out_err;
- }
-
- agp_bridge->driver->tlb_flush(mem);
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-/*
- * The i810/i830 requires a physical address to program its mouse
- * pointer into hardware.
- * However the Xserver still writes to it through the agp aperture.
- */
-static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
-{
- struct agp_memory *new;
- struct page *page;
-
- switch (pg_count) {
- case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
- break;
- case 4:
- /* kludge to get 4 physical pages for ARGB cursor */
- page = i8xx_alloc_pages();
- break;
- default:
- return NULL;
- }
-
- if (page == NULL)
- return NULL;
-
- new = agp_create_memory(pg_count);
- if (new == NULL)
- return NULL;
-
- new->pages[0] = page;
- if (pg_count == 4) {
- /* kludge to get 4 physical pages for ARGB cursor */
- new->pages[1] = new->pages[0] + 1;
- new->pages[2] = new->pages[1] + 1;
- new->pages[3] = new->pages[2] + 1;
- }
- new->page_count = pg_count;
- new->num_scratch_pages = pg_count;
- new->type = AGP_PHYS_MEMORY;
- new->physical = page_to_phys(new->pages[0]);
- return new;
-}
-
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
- struct agp_memory *new;
-
- if (type == AGP_DCACHE_MEMORY) {
- if (pg_count != intel_private.num_dcache_entries)
- return NULL;
-
- new = agp_create_memory(1);
- if (new == NULL)
- return NULL;
-
- new->type = AGP_DCACHE_MEMORY;
- new->page_count = pg_count;
- new->num_scratch_pages = 0;
- agp_free_page_array(new);
- return new;
- }
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- return NULL;
-}
-
-static void intel_i810_free_by_type(struct agp_memory *curr)
-{
- agp_free_key(curr->key);
- if (curr->type == AGP_PHYS_MEMORY) {
- if (curr->page_count == 4)
- i8xx_destroy_pages(curr->pages[0]);
- else {
- agp_bridge->driver->agp_destroy_page(curr->pages[0],
- AGP_PAGE_DESTROY_UNMAP);
- agp_bridge->driver->agp_destroy_page(curr->pages[0],
- AGP_PAGE_DESTROY_FREE);
- }
- agp_free_page_array(curr);
- }
- kfree(curr);
-}
-
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static struct aper_size_info_fixed intel_i830_sizes[] =
-{
- {128, 32768, 5},
- /* The 64M mode still requires a 128k gatt */
- {64, 16384, 5},
- {256, 65536, 6},
- {512, 131072, 7},
-};
-
-static void intel_i830_init_gtt_entries(void)
-{
- u16 gmch_ctrl;
- int gtt_entries = 0;
- u8 rdct;
- int local = 0;
- static const int ddt[4] = { 0, 16, 32, 64 };
- int size; /* reserved space (in kb) at the top of stolen memory */
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if (IS_I965) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-
- /* The 965 has a field telling us the size of the GTT,
- * which may be larger than what is necessary to map the
- * aperture.
- */
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = 128;
- break;
- case I965_PGETBL_SIZE_256KB:
- size = 256;
- break;
- case I965_PGETBL_SIZE_512KB:
- size = 512;
- break;
- case I965_PGETBL_SIZE_1MB:
- size = 1024;
- break;
- case I965_PGETBL_SIZE_2MB:
- size = 2048;
- break;
- case I965_PGETBL_SIZE_1_5MB:
- size = 1024 + 512;
- break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = 512;
- }
- size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_PINEVIEW) {
- /* G33's GTT size defined in gmch_ctrl */
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
- size = 1024;
- break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
- size = 512;
- }
- size += 4;
- } else if (IS_G4X || IS_PINEVIEW) {
- /* On 4 series hardware, GTT stolen is separate from graphics
- * stolen, ignore it in stolen gtt entries counting. However,
- * 4KB of the stolen memory doesn't get mapped to the GTT.
- */
- size = 4;
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size() + 4;
- }
-
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
- break;
- case I830_GMCH_GMS_LOCAL:
- rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
- gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
- MB(ddt[I830_RDRAM_DDT(rdct)]);
- local = 1;
- break;
- default:
- gtt_entries = 0;
- break;
- }
- } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
- /*
- * SandyBridge has new memory control reg at 0x50.w
- */
- u16 snb_gmch_ctl;
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
- case SNB_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_64M:
- gtt_entries = MB(64) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_96M:
- gtt_entries = MB(96) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_128M:
- gtt_entries = MB(128) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_160M:
- gtt_entries = MB(160) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_192M:
- gtt_entries = MB(192) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_224M:
- gtt_entries = MB(224) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_256M:
- gtt_entries = MB(256) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_288M:
- gtt_entries = MB(288) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_320M:
- gtt_entries = MB(320) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_352M:
- gtt_entries = MB(352) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_384M:
- gtt_entries = MB(384) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_416M:
- gtt_entries = MB(416) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_448M:
- gtt_entries = MB(448) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_480M:
- gtt_entries = MB(480) - KB(size);
- break;
- case SNB_GMCH_GMS_STOLEN_512M:
- gtt_entries = MB(512) - KB(size);
- break;
- }
- } else {
- switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
- case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
- break;
- case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
- break;
- case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
- break;
- case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
- break;
- case G33_GMCH_GMS_STOLEN_128M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(128) - KB(size);
- else
- gtt_entries = 0;
- break;
- case G33_GMCH_GMS_STOLEN_256M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(256) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(96) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(160) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(224) - KB(size);
- else
- gtt_entries = 0;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(352) - KB(size);
- else
- gtt_entries = 0;
- break;
- default:
- gtt_entries = 0;
- break;
- }
- }
- if (gtt_entries > 0) {
- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
- gtt_entries / KB(1), local ? "local" : "stolen");
- gtt_entries /= KB(4);
- } else {
- dev_info(&agp_bridge->dev->dev,
- "no pre-allocated video memory detected\n");
- gtt_entries = 0;
- }
-
- intel_private.gtt_entries = gtt_entries;
-}
-
-static void intel_i830_fini_flush(void)
-{
- kunmap(intel_private.i8xx_page);
- intel_private.i8xx_flush_page = NULL;
- unmap_page_from_agp(intel_private.i8xx_page);
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- intel_i830_fini_flush();
-}
-
-/* The chipset_flush interface needs to get data that has already been
- * flushed out of the CPU all the way out to main memory, because the GPU
- * doesn't snoop those buffers.
- *
- * The 8xx series doesn't have the same lovely interface for flushing the
- * chipset write buffers that the later chips do. According to the 865
- * specs, it's 64 octwords, or 1KB. So, to get those previous things in
- * that buffer out, we just fill 1KB and clflush it out, on the assumption
- * that it'll push whatever was in there out. It appears to work.
- */
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
-{
- unsigned int *pg = intel_private.i8xx_flush_page;
-
- memset(pg, 0, 1024);
-
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
-}
-
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers)
- return -ENOMEM;
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ?? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
-{
- return 0;
-}
-
-static int intel_i830_fetch_size(void)
-{
- u16 gmch_ctrl;
- struct aper_size_info_fixed *values;
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
- /* 855GM/852GM/865G has 128MB aperture size */
- agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- } else {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- }
-
- return 0;
-}
-
-static int intel_i830_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i830_setup_flush();
- return 0;
-}
-
-static void intel_i830_cleanup(void)
-{
- iounmap(intel_private.registers);
-}
-
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i, j, num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
- agp_bridge->driver->tlb_flush(mem);
-
-out:
- ret = 0;
-out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
-{
- if (type == AGP_PHYS_MEMORY)
- return alloc_agpphysmem_i8xx(pg_count, type);
- /* always return NULL for other allocation types for now */
- return NULL;
-}
-
-static int intel_alloc_chipset_flush_resource(void)
-{
- int ret;
- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
- PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
- pcibios_align_resource, agp_bridge->dev);
-
- return ret;
-}
-
-static void intel_i915_setup_chipset_flush(void)
-{
- int ret;
- u32 temp;
-
- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
- if (!(temp & 0x1)) {
- intel_alloc_chipset_flush_resource();
- intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- temp &= ~1;
-
- intel_private.resource_valid = 1;
- intel_private.ifp_resource.start = temp;
- intel_private.ifp_resource.end = temp + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
- /* some BIOSes reserve this area in a pnp some don't */
- if (ret)
- intel_private.resource_valid = 0;
- }
-}
-
-static void intel_i965_g33_setup_chipset_flush(void)
-{
- u32 temp_hi, temp_lo;
- int ret;
-
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
- pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
-
- if (!(temp_lo & 0x1)) {
-
- intel_alloc_chipset_flush_resource();
-
- intel_private.resource_valid = 1;
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
- upper_32_bits(intel_private.ifp_resource.start));
- pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- u64 l64;
-
- temp_lo &= ~0x1;
- l64 = ((u64)temp_hi << 32) | temp_lo;
-
- intel_private.resource_valid = 1;
- intel_private.ifp_resource.start = l64;
- intel_private.ifp_resource.end = l64 + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
- /* some BIOSes reserve this area in a pnp some don't */
- if (ret)
- intel_private.resource_valid = 0;
- }
-}
-
-static void intel_i9xx_setup_flush(void)
-{
- /* return if already configured */
- if (intel_private.ifp_resource.start)
- return;
-
- if (IS_SNB)
- return;
-
- /* setup a resource for this object */
- intel_private.ifp_resource.name = "Intel Flush Page";
- intel_private.ifp_resource.flags = IORESOURCE_MEM;
-
- /* Setup chipset flush for 915 */
- if (IS_I965 || IS_G33 || IS_G4X) {
- intel_i965_g33_setup_chipset_flush();
- } else {
- intel_i915_setup_chipset_flush();
- }
-
- if (intel_private.ifp_resource.start) {
- intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
- if (!intel_private.i9xx_flush_page)
- dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
- }
-}
-
-static int intel_i915_configure(void)
-{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
- int i;
-
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
-
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
-
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
-
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
- }
- readl(intel_private.gtt+i-1); /* PCI Posting. */
- }
-
- global_cache_flush();
-
- intel_i9xx_setup_flush();
-
- return 0;
-}
-
-static void intel_i915_cleanup(void)
-{
- if (intel_private.i9xx_flush_page)
- iounmap(intel_private.i9xx_flush_page);
- if (intel_private.resource_valid)
- release_resource(&intel_private.ifp_resource);
- intel_private.ifp_resource.start = 0;
- intel_private.resource_valid = 0;
- iounmap(intel_private.gtt);
- iounmap(intel_private.registers);
-}
-
-static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
-{
- if (intel_private.i9xx_flush_page)
- writel(1, intel_private.i9xx_flush_page);
-}
-
-static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int num_entries;
- void *temp;
- int ret = -EINVAL;
- int mask_type;
-
- if (mem->page_count == 0)
- goto out;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
-
- dev_info(&intel_private.pcidev->dev,
- "trying to insert into local/stolen memory\n");
- goto out_err;
- }
-
- if ((pg_start + mem->page_count) > num_entries)
- goto out_err;
-
- /* The i915 can't check the GTT for entries since it's read only;
- * depend on the caller to make the correct offset decisions.
- */
-
- if (type != mem->type)
- goto out_err;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
- goto out_err;
-
- if (!mem->is_flushed)
- global_cache_flush();
-
- intel_agp_insert_sg_entries(mem, pg_start, mask_type);
- agp_bridge->driver->tlb_flush(mem);
-
- out:
- ret = 0;
- out_err:
- mem->is_flushed = true;
- return ret;
-}
-
-static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
-{
- int i;
-
- if (mem->page_count == 0)
- return 0;
-
- if (pg_start < intel_private.gtt_entries) {
- dev_info(&intel_private.pcidev->dev,
- "trying to disable local/stolen memory\n");
- return -EINVAL;
- }
-
- for (i = pg_start; i < (mem->page_count + pg_start); i++)
- writel(agp_bridge->scratch_page, intel_private.gtt+i);
-
- readl(intel_private.gtt+i-1);
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-/* Return the aperture size by just checking the resource length. The effect
- * described in the spec of the MSAC registers is just changing of the
- * resource size.
- */
-static int intel_i9xx_fetch_size(void)
-{
- int num_sizes = ARRAY_SIZE(intel_i830_sizes);
- int aper_size; /* size in megabytes */
- int i;
-
- aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
-
- for (i = 0; i < num_sizes; i++) {
- if (aper_size == intel_i830_sizes[i].size) {
- agp_bridge->current_size = intel_i830_sizes + i;
- agp_bridge->previous_size = agp_bridge->current_size;
- return aper_size;
- }
- }
-
- return 0;
-}
-
-/* The intel i915 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp, temp2;
- int gtt_map_size = 256 * 1024;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
-
- if (IS_G33)
- gtt_map_size = 1024 * 1024; /* 1M on G33 */
- intel_private.gtt = ioremap(temp2, gtt_map_size);
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_map_size / 4;
-
- temp &= 0xfff80000;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-/*
- * The i965 supports 36-bit physical addresses, but to keep
- * the format of the GTT the same, the bits that don't fit
- * in a 32-bit word are shifted down to bits 4..7.
- *
- * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
- * is always zero on 32-bit architectures, so no need to make
- * this conditional.
- */
-static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Shift high bits down */
- addr |= (addr >> 28) & 0xf0;
-
- /* Type checking must be done elsewhere */
- return addr | bridge->driver->masks[type].mask;
-}
-
-static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
-{
- u16 snb_gmch_ctl;
-
- switch (agp_bridge->dev->device) {
- case PCI_DEVICE_ID_INTEL_GM45_HB:
- case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
- case PCI_DEVICE_ID_INTEL_Q45_HB:
- case PCI_DEVICE_ID_INTEL_G45_HB:
- case PCI_DEVICE_ID_INTEL_G41_HB:
- case PCI_DEVICE_ID_INTEL_B43_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
- case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
- *gtt_offset = *gtt_size = MB(2);
- break;
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
- case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
- *gtt_offset = MB(2);
-
- pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
- default:
- case SNB_GTT_SIZE_0M:
- printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
- *gtt_size = MB(0);
- break;
- case SNB_GTT_SIZE_1M:
- *gtt_size = MB(1);
- break;
- case SNB_GTT_SIZE_2M:
- *gtt_size = MB(2);
- break;
- }
- break;
- default:
- *gtt_offset = *gtt_size = KB(512);
- }
-}
-
-/* The intel i965 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
-{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
- int gtt_offset, gtt_size;
-
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
-
- pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
-
- temp &= 0xfff00000;
-
- intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
-
- intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
-
- if (!intel_private.gtt)
- return -ENOMEM;
-
- intel_private.gtt_total_size = gtt_size / 4;
-
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers) {
- iounmap(intel_private.gtt);
- return -ENOMEM;
- }
-
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ? */
-
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
-
- agp_bridge->gatt_table = NULL;
-
- agp_bridge->gatt_bus_addr = temp;
-
- return 0;
-}
-
-
static int intel_fetch_size(void)
{
int i;
@@ -1982,6 +464,7 @@ static const struct agp_bridge_driver intel_generic_driver = {
.aperture_sizes = intel_generic_sizes,
.size_type = U16_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_configure,
.fetch_size = intel_fetch_size,
.cleanup = intel_cleanup,
@@ -2003,38 +486,12 @@ static const struct agp_bridge_driver intel_generic_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_810_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i810_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 2,
- .needs_scratch_page = true,
- .configure = intel_i810_configure,
- .fetch_size = intel_i810_fetch_size,
- .cleanup = intel_i810_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = agp_generic_create_gatt_table,
- .free_gatt_table = agp_generic_free_gatt_table,
- .insert_memory = intel_i810_insert_entries,
- .remove_memory = intel_i810_remove_entries,
- .alloc_by_type = intel_i810_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
-};
-
static const struct agp_bridge_driver intel_815_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_815_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 2,
+ .needs_scratch_page = true,
.configure = intel_815_configure,
.fetch_size = intel_815_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2056,39 +513,12 @@ static const struct agp_bridge_driver intel_815_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_830_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i830_configure,
- .fetch_size = intel_i830_fetch_size,
- .cleanup = intel_i830_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i830_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i830_insert_entries,
- .remove_memory = intel_i830_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i830_chipset_flush,
-};
-
static const struct agp_bridge_driver intel_820_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_820_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_820_cleanup,
@@ -2115,6 +545,7 @@ static const struct agp_bridge_driver intel_830mp_driver = {
.aperture_sizes = intel_830mp_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 4,
+ .needs_scratch_page = true,
.configure = intel_830mp_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2141,6 +572,7 @@ static const struct agp_bridge_driver intel_840_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_840_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2167,6 +599,7 @@ static const struct agp_bridge_driver intel_845_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_845_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2193,6 +626,7 @@ static const struct agp_bridge_driver intel_850_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_850_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2219,6 +653,7 @@ static const struct agp_bridge_driver intel_860_driver = {
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_860_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2240,79 +675,12 @@ static const struct agp_bridge_driver intel_860_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_915_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i810_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
-static const struct agp_bridge_driver intel_i965_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i965_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
static const struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = intel_7505_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
@@ -2334,40 +702,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
-static const struct agp_bridge_driver intel_g33_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = intel_i830_sizes,
- .size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 4,
- .needs_scratch_page = true,
- .configure = intel_i915_configure,
- .fetch_size = intel_i9xx_fetch_size,
- .cleanup = intel_i915_cleanup,
- .tlb_flush = intel_i810_tlbflush,
- .mask_memory = intel_i965_mask_memory,
- .masks = intel_i810_masks,
- .agp_enable = intel_i810_agp_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = intel_i915_create_gatt_table,
- .free_gatt_table = intel_i830_free_gatt_table,
- .insert_memory = intel_i915_insert_entries,
- .remove_memory = intel_i915_remove_entries,
- .alloc_by_type = intel_i830_alloc_by_type,
- .free_by_type = intel_i810_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = intel_i830_type_to_mask_type,
- .chipset_flush = intel_i915_chipset_flush,
-#ifdef USE_PCI_DMA_API
- .agp_map_page = intel_agp_map_page,
- .agp_unmap_page = intel_agp_unmap_page,
- .agp_map_memory = intel_agp_map_memory,
- .agp_unmap_memory = intel_agp_unmap_memory,
-#endif
-};
-
static int find_gmch(u16 device)
{
struct pci_dev *gmch_device;
@@ -2392,103 +726,137 @@ static int find_gmch(u16 device)
static const struct intel_driver_description {
unsigned int chip_id;
unsigned int gmch_chip_id;
- unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
char *name;
const struct agp_bridge_driver *driver;
const struct agp_bridge_driver *gmch_driver;
} intel_agp_chipsets[] = {
- { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810",
+ { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
NULL, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815",
+ { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
&intel_815_driver, &intel_810_driver },
- { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M",
+ { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
&intel_830mp_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
+ { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854",
+ { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
+ { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL },
- { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
+ { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
&intel_845_driver, &intel_830_driver },
- { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
- { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
+ { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
+ { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
+ { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
+ { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
+ { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
+ { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
+ { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
+ { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
+ { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
+ { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
+ { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
+ { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
- { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33",
+ { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35",
+ { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
+ { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
"GM45", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
"Eaglelake", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
"Q45/Q43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
+ { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
"G45/G43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+ { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
"B43", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
+ { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
"G41", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
"Sandybridge", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
"Sandybridge", NULL, &intel_i965_driver },
- { 0, 0, 0, NULL, NULL, NULL }
+ { 0, 0, NULL, NULL, NULL }
};
+static int __devinit intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge)
+{
+ int i;
+ bridge->driver = NULL;
+
+ for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
+ if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
+ find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
+ bridge->driver =
+ intel_agp_chipsets[i].gmch_driver;
+ break;
+ }
+ }
+
+ if (!bridge->driver)
+ return 0;
+
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = pdev;
+
+ dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
+
+ if (bridge->driver->mask_memory == intel_i965_mask_memory) {
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask 36bit failed!\n");
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(36));
+ }
+
+ return 1;
+}
+
static int __devinit agp_intel_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2503,22 +871,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (!bridge)
return -ENOMEM;
+ bridge->capndx = cap_ptr;
+
+ if (intel_gmch_probe(pdev, bridge))
+ goto found_gmch;
+
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
/* In case that multiple models of gfx chip may
stand on same host bridge type, this can be
sure we detect the right IGD. */
if (pdev->device == intel_agp_chipsets[i].chip_id) {
- if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
- find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
- bridge->driver =
- intel_agp_chipsets[i].gmch_driver;
- break;
- } else if (intel_agp_chipsets[i].multi_gmch_chip) {
- continue;
- } else {
- bridge->driver = intel_agp_chipsets[i].driver;
- break;
- }
+ bridge->driver = intel_agp_chipsets[i].driver;
+ break;
}
}
@@ -2530,18 +894,16 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
return -ENODEV;
}
- if (bridge->driver == NULL) {
- /* bridge has no AGP and no IGD detected */
+ if (!bridge->driver) {
if (cap_ptr)
dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
- intel_agp_chipsets[i].gmch_chip_id);
+ intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
}
bridge->dev = pdev;
- bridge->capndx = cap_ptr;
- bridge->dev_private_data = &intel_private;
+ bridge->dev_private_data = NULL;
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
@@ -2577,15 +939,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
&bridge->mode);
}
- if (bridge->driver->mask_memory == intel_i965_mask_memory) {
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask 36bit failed!\n");
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(36));
- }
-
+found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
if (!err)
@@ -2611,22 +965,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
int ret_val;
- if (bridge->driver == &intel_generic_driver)
- intel_configure();
- else if (bridge->driver == &intel_850_driver)
- intel_850_configure();
- else if (bridge->driver == &intel_845_driver)
- intel_845_configure();
- else if (bridge->driver == &intel_830mp_driver)
- intel_830mp_configure();
- else if (bridge->driver == &intel_915_driver)
- intel_i915_configure();
- else if (bridge->driver == &intel_830_driver)
- intel_i830_configure();
- else if (bridge->driver == &intel_810_driver)
- intel_i810_configure();
- else if (bridge->driver == &intel_i965_driver)
- intel_i915_configure();
+ bridge->driver->configure();
ret_val = agp_rebind_memory();
if (ret_val != 0)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 000000000000..2547465d4658
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,239 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+
+/* Intel registers */
+#define INTEL_APSIZE 0xb4
+#define INTEL_ATTBASE 0xb8
+#define INTEL_AGPCTRL 0xb0
+#define INTEL_NBXCFG 0x50
+#define INTEL_ERRSTS 0x91
+
+/* Intel i830 registers */
+#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_ENABLED 0x4
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+#define I830_RDRAM_CHANNEL_TYPE 0x03010
+#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
+#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
+
+/* This one is for I830MP w. an external graphic card */
+#define INTEL_I830_ERRSTS 0x92
+
+/* Intel 855GM/852GM registers */
+#define I855_GMCH_GMS_MASK 0xF0
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I85X_CAPID 0x44
+#define I85X_VARIANT_MASK 0x7
+#define I85X_VARIANT_SHIFT 5
+#define I855_GME 0x0
+#define I855_GM 0x4
+#define I852_GME 0x2
+#define I852_GM 0x5
+
+/* Intel i845 registers */
+#define INTEL_I845_AGPM 0x51
+#define INTEL_I845_ERRSTS 0xc8
+
+/* Intel i860 registers */
+#define INTEL_I860_MCHCFG 0x50
+#define INTEL_I860_ERRSTS 0xc8
+
+/* Intel i810 registers */
+#define I810_GMADDR 0x10
+#define I810_MMADDR 0x14
+#define I810_PTE_BASE 0x10000
+#define I810_PTE_MAIN_UNCACHED 0x00000000
+#define I810_PTE_LOCAL 0x00000002
+#define I810_PTE_VALID 0x00000001
+#define I830_PTE_SYSTEM_CACHED 0x00000006
+#define I810_SMRAM_MISCC 0x70
+#define I810_GFX_MEM_WIN_SIZE 0x00010000
+#define I810_GFX_MEM_WIN_32M 0x00010000
+#define I810_GMS 0x000000c0
+#define I810_GMS_DISABLE 0x00000000
+#define I810_PGETBL_CTL 0x2020
+#define I810_PGETBL_ENABLED 0x00000001
+#define I965_PGETBL_SIZE_MASK 0x0000000e
+#define I965_PGETBL_SIZE_512KB (0 << 1)
+#define I965_PGETBL_SIZE_256KB (1 << 1)
+#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
+#define G33_PGETBL_SIZE_MASK (3 << 8)
+#define G33_PGETBL_SIZE_1M (1 << 8)
+#define G33_PGETBL_SIZE_2M (2 << 8)
+
+#define I810_DRAM_CTL 0x3000
+#define I810_DRAM_ROW_0 0x00000001
+#define I810_DRAM_ROW_0_SDRAM 0x00000001
+
+/* Intel 815 register */
+#define INTEL_815_APCONT 0x51
+#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
+
+/* Intel i820 registers */
+#define INTEL_I820_RDCR 0x51
+#define INTEL_I820_ERRSTS 0xc8
+
+/* Intel i840 registers */
+#define INTEL_I840_MCHCFG 0x50
+#define INTEL_I840_ERRSTS 0xc8
+
+/* Intel i850 registers */
+#define INTEL_I850_MCHCFG 0x50
+#define INTEL_I850_ERRSTS 0xc8
+
+/* intel 915G registers */
+#define I915_GMADDR 0x18
+#define I915_MMADDR 0x10
+#define I915_PTEADDR 0x1C
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
+#define I915_IFPADDR 0x60
+
+/* Intel 965G registers */
+#define I965_MSAC 0x62
+#define I965_IFPADDR 0x70
+
+/* Intel 7505 registers */
+#define INTEL_I7505_APSIZE 0x74
+#define INTEL_I7505_NCAPID 0x60
+#define INTEL_I7505_NISTAT 0x6c
+#define INTEL_I7505_ATTBASE 0x78
+#define INTEL_I7505_ERRSTS 0x42
+#define INTEL_I7505_AGPCTRL 0x70
+#define INTEL_I7505_MCHCFG 0x50
+
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+#define SNB_GTT_SIZE_0M (0 << 8)
+#define SNB_GTT_SIZE_1M (1 << 8)
+#define SNB_GTT_SIZE_2M (2 << 8)
+#define SNB_GTT_SIZE_MASK (3 << 8)
+
+/* pci devices ids */
+#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
+#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
+#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
+#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
+#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
+#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
+#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
+#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
+#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
+#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
+#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
+#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
+#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
+#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
+
+/* cover 915 and 945 variants */
+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
+
+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+
+#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
+
+#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+ IS_SNB)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 000000000000..e8ea6825822c
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1516 @@
+/*
+ * Intel GTT (Graphics Translation Table) routines
+ *
+ * Caveat: This driver implements the linux agp interface, but this is far from
+ * a agp driver! GTT support ended up here for purely historical reasons: The
+ * old userspace intel graphics drivers needed an interface to map memory into
+ * the GTT. And the drm provides a default interface for graphic devices sitting
+ * on an agp port. So it made sense to fake the GTT support as an agp port to
+ * avoid having to create a new api.
+ *
+ * With gem this does not make much sense anymore, just needlessly complicates
+ * the code. But as long as the old graphics stack is still support, it's stuck
+ * here.
+ *
+ * /fairy-tale-mode off
+ */
+
+/*
+ * If we have Intel graphics, we're not going to have anything other than
+ * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+ * on the Intel IOMMU support (CONFIG_DMAR).
+ * Only newer chipsets need to bother with this, of course.
+ */
+#ifdef CONFIG_DMAR
+#define USE_PCI_DMA_API 1
+#endif
+
+static const struct aper_size_info_fixed intel_i810_sizes[] =
+{
+ {64, 16384, 4},
+ /* The 32M mode still requires a 64k gatt */
+ {32, 8192, 4}
+};
+
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+#define INTEL_AGP_CACHED_MEMORY 3
+
+static struct gatt_mask intel_i810_masks[] =
+{
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
+ .type = INTEL_AGP_CACHED_MEMORY}
+};
+
+static struct _intel_private {
+ struct pci_dev *pcidev; /* device one */
+ u8 __iomem *registers;
+ u32 __iomem *gtt; /* I915G */
+ int num_dcache_entries;
+ /* gtt_entries is the number of gtt entries that are already mapped
+ * to stolen memory. Stolen memory is larger than the memory mapped
+ * through gtt_entries, as it includes some reserved space for the BIOS
+ * popup and for the GTT.
+ */
+ int gtt_entries; /* i830+ */
+ int gtt_total_size;
+ union {
+ void __iomem *i9xx_flush_page;
+ void *i8xx_flush_page;
+ };
+ struct page *i8xx_page;
+ struct resource ifp_resource;
+ int resource_valid;
+} intel_private;
+
+#ifdef USE_PCI_DMA_API
+static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+{
+ *ret = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+ return -EINVAL;
+ return 0;
+}
+
+static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+{
+ pci_unmap_page(intel_private.pcidev, dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+ struct sg_table st;
+
+ st.sgl = mem->sg_list;
+ st.orig_nents = st.nents = mem->page_count;
+
+ sg_free_table(&st);
+
+ mem->sg_list = NULL;
+ mem->num_sg = 0;
+}
+
+static int intel_agp_map_memory(struct agp_memory *mem)
+{
+ struct sg_table st;
+ struct scatterlist *sg;
+ int i;
+
+ DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+
+ if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+ return -ENOMEM;
+
+ mem->sg_list = sg = st.sgl;
+
+ for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+ sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+
+ mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(!mem->num_sg)) {
+ intel_agp_free_sglist(mem);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void intel_agp_unmap_memory(struct agp_memory *mem)
+{
+ DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+
+ pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+ mem->page_count, PCI_DMA_BIDIRECTIONAL);
+ intel_agp_free_sglist(mem);
+}
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ struct scatterlist *sg;
+ int i, j;
+
+ j = pg_start;
+
+ WARN_ON(!mem->num_sg);
+
+ if (mem->num_sg == mem->page_count) {
+ for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg), mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ } else {
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ unsigned int len, m;
+
+ for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+ len = sg_dma_len(sg) / PAGE_SIZE;
+ for (m = 0; m < len; m++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ sg_dma_address(sg) + m * PAGE_SIZE,
+ mask_type),
+ intel_private.gtt+j);
+ j++;
+ }
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+#else
+
+static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+ off_t pg_start, int mask_type)
+{
+ int i, j;
+ u32 cache_bits = 0;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ {
+ cache_bits = I830_PTE_SYSTEM_CACHED;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.gtt+j);
+ }
+
+ readl(intel_private.gtt+j-1);
+}
+
+#endif
+
+static int intel_i810_fetch_size(void)
+{
+ u32 smram_miscc;
+ struct aper_size_info_fixed *values;
+
+ pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
+ dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+ return 0;
+ }
+ if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ } else {
+ agp_bridge->current_size = (void *) (values);
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ return 0;
+}
+
+static int intel_i810_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ if (!intel_private.registers) {
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ dev_err(&intel_private.pcidev->dev,
+ "can't remap memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ if ((readl(intel_private.registers+I810_DRAM_CTL)
+ & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+ /* This will need to be dynamically assigned */
+ dev_info(&intel_private.pcidev->dev,
+ "detected 4MB dedicated video ram\n");
+ intel_private.num_dcache_entries = 1024;
+ }
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = 0; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
+ }
+ global_cache_flush();
+ return 0;
+}
+
+static void intel_i810_cleanup(void)
+{
+ writel(0, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers); /* PCI Posting. */
+ iounmap(intel_private.registers);
+}
+
+static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+{
+ return;
+}
+
+/* Exists to support ARGB cursors */
+static struct page *i8xx_alloc_pages(void)
+{
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
+ if (page == NULL)
+ return NULL;
+
+ if (set_pages_uc(page, 4) < 0) {
+ set_pages_wb(page, 4);
+ __free_pages(page, 2);
+ return NULL;
+ }
+ get_page(page);
+ atomic_inc(&agp_bridge->current_memory_agp);
+ return page;
+}
+
+static void i8xx_destroy_pages(struct page *page)
+{
+ if (page == NULL)
+ return;
+
+ set_pages_wb(page, 4);
+ put_page(page);
+ __free_pages(page, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+}
+
+static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ if (type < AGP_USER_TYPES)
+ return type;
+ else if (type == AGP_USER_CACHED_MEMORY)
+ return INTEL_AGP_CACHED_MEMORY;
+ else
+ return 0;
+}
+
+static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+ }
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ switch (mask_type) {
+ case AGP_DCACHE_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
+ intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ break;
+ case AGP_PHYS_MEMORY:
+ case AGP_NORMAL_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.registers+I810_PTE_BASE+(j*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+ break;
+ default:
+ goto out_err;
+ }
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+ return 0;
+}
+
+/*
+ * The i810/i830 requires a physical address to program its mouse
+ * pointer into hardware.
+ * However the Xserver still writes to it through the agp aperture.
+ */
+static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+ struct page *page;
+
+ switch (pg_count) {
+ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ break;
+ case 4:
+ /* kludge to get 4 physical pages for ARGB cursor */
+ page = i8xx_alloc_pages();
+ break;
+ default:
+ return NULL;
+ }
+
+ if (page == NULL)
+ return NULL;
+
+ new = agp_create_memory(pg_count);
+ if (new == NULL)
+ return NULL;
+
+ new->pages[0] = page;
+ if (pg_count == 4) {
+ /* kludge to get 4 physical pages for ARGB cursor */
+ new->pages[1] = new->pages[0] + 1;
+ new->pages[2] = new->pages[1] + 1;
+ new->pages[3] = new->pages[2] + 1;
+ }
+ new->page_count = pg_count;
+ new->num_scratch_pages = pg_count;
+ new->type = AGP_PHYS_MEMORY;
+ new->physical = page_to_phys(new->pages[0]);
+ return new;
+}
+
+static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
+{
+ struct agp_memory *new;
+
+ if (type == AGP_DCACHE_MEMORY) {
+ if (pg_count != intel_private.num_dcache_entries)
+ return NULL;
+
+ new = agp_create_memory(1);
+ if (new == NULL)
+ return NULL;
+
+ new->type = AGP_DCACHE_MEMORY;
+ new->page_count = pg_count;
+ new->num_scratch_pages = 0;
+ agp_free_page_array(new);
+ return new;
+ }
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ return NULL;
+}
+
+static void intel_i810_free_by_type(struct agp_memory *curr)
+{
+ agp_free_key(curr->key);
+ if (curr->type == AGP_PHYS_MEMORY) {
+ if (curr->page_count == 4)
+ i8xx_destroy_pages(curr->pages[0]);
+ else {
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_UNMAP);
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
+ AGP_PAGE_DESTROY_FREE);
+ }
+ agp_free_page_array(curr);
+ }
+ kfree(curr);
+}
+
+static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
+static struct aper_size_info_fixed intel_i830_sizes[] =
+{
+ {128, 32768, 5},
+ /* The 64M mode still requires a 128k gatt */
+ {64, 16384, 5},
+ {256, 65536, 6},
+ {512, 131072, 7},
+};
+
+static void intel_i830_init_gtt_entries(void)
+{
+ u16 gmch_ctrl;
+ int gtt_entries = 0;
+ u8 rdct;
+ int local = 0;
+ static const int ddt[4] = { 0, 16, 32, 64 };
+ int size; /* reserved space (in kb) at the top of stolen memory */
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+ if (IS_I965) {
+ u32 pgetbl_ctl;
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ /* The 965 has a field telling us the size of the GTT,
+ * which may be larger than what is necessary to map the
+ * aperture.
+ */
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = 128;
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = 256;
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = 512;
+ break;
+ case I965_PGETBL_SIZE_1MB:
+ size = 1024;
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = 2048;
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = 1024 + 512;
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = 512;
+ }
+ size += 4; /* add in BIOS popup space */
+ } else if (IS_G33 && !IS_PINEVIEW) {
+ /* G33's GTT size defined in gmch_ctrl */
+ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+ case G33_PGETBL_SIZE_1M:
+ size = 1024;
+ break;
+ case G33_PGETBL_SIZE_2M:
+ size = 2048;
+ break;
+ default:
+ dev_info(&agp_bridge->dev->dev,
+ "unknown page table size 0x%x, assuming 512KB\n",
+ (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+ size = 512;
+ }
+ size += 4;
+ } else if (IS_G4X || IS_PINEVIEW) {
+ /* On 4 series hardware, GTT stolen is separate from graphics
+ * stolen, ignore it in stolen gtt entries counting. However,
+ * 4KB of the stolen memory doesn't get mapped to the GTT.
+ */
+ size = 4;
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ size = agp_bridge->driver->fetch_size() + 4;
+ }
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ gtt_entries = KB(512) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
+ gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ MB(ddt[I830_RDRAM_DDT(rdct)]);
+ local = 1;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+ u16 snb_gmch_ctl;
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ gtt_entries = MB(64) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ gtt_entries = MB(96) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ gtt_entries = MB(128) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ gtt_entries = MB(160) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ gtt_entries = MB(192) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ gtt_entries = MB(224) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ gtt_entries = MB(256) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ gtt_entries = MB(288) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ gtt_entries = MB(320) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ gtt_entries = MB(352) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ gtt_entries = MB(384) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ gtt_entries = MB(416) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ gtt_entries = MB(448) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ gtt_entries = MB(480) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ gtt_entries = MB(512) - KB(size);
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ gtt_entries = MB(1) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ gtt_entries = MB(4) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ gtt_entries = MB(8) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ gtt_entries = MB(16) - KB(size);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case I915_GMCH_GMS_STOLEN_48M:
+ /* Check it's really I915G */
+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+ gtt_entries = MB(48) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case I915_GMCH_GMS_STOLEN_64M:
+ /* Check it's really I915G */
+ if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
+ gtt_entries = MB(64) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case G33_GMCH_GMS_STOLEN_128M:
+ if (IS_G33 || IS_I965 || IS_G4X)
+ gtt_entries = MB(128) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case G33_GMCH_GMS_STOLEN_256M:
+ if (IS_G33 || IS_I965 || IS_G4X)
+ gtt_entries = MB(256) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(96) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(160) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(224) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ if (IS_I965 || IS_G4X)
+ gtt_entries = MB(352) - KB(size);
+ else
+ gtt_entries = 0;
+ break;
+ default:
+ gtt_entries = 0;
+ break;
+ }
+ }
+ if (gtt_entries > 0) {
+ dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
+ gtt_entries / KB(1), local ? "local" : "stolen");
+ gtt_entries /= KB(4);
+ } else {
+ dev_info(&agp_bridge->dev->dev,
+ "no pre-allocated video memory detected\n");
+ gtt_entries = 0;
+ }
+
+ intel_private.gtt_entries = gtt_entries;
+}
+
+static void intel_i830_fini_flush(void)
+{
+ kunmap(intel_private.i8xx_page);
+ intel_private.i8xx_flush_page = NULL;
+ unmap_page_from_agp(intel_private.i8xx_page);
+
+ __free_page(intel_private.i8xx_page);
+ intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+ /* return if we've already set the flush mechanism up */
+ if (intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ if (!intel_private.i8xx_flush_page)
+ intel_i830_fini_flush();
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB. So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out. It appears to work.
+ */
+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+{
+ unsigned int *pg = intel_private.i8xx_flush_page;
+
+ memset(pg, 0, 1024);
+
+ if (cpu_has_clflush)
+ clflush_cache_range(pg, 1024);
+ else if (wbinvd_on_all_cpus() != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+}
+
+/* The intel i830 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ?? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/* Return the gatt table to a sane state. Use the top of stolen
+ * memory for the GTT.
+ */
+static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+{
+ return 0;
+}
+
+static int intel_i830_fetch_size(void)
+{
+ u16 gmch_ctrl;
+ struct aper_size_info_fixed *values;
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
+ agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
+ /* 855GM/852GM/865G has 128MB aperture size */
+ agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ }
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
+ agp_bridge->current_size = (void *) values;
+ agp_bridge->aperture_size_idx = 0;
+ return values[0].size;
+ } else {
+ agp_bridge->current_size = (void *) (values + 1);
+ agp_bridge->aperture_size_idx = 1;
+ return values[1].size;
+ }
+
+ return 0;
+}
+
+static int intel_i830_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+
+ intel_i830_setup_flush();
+ return 0;
+}
+
+static void intel_i830_cleanup(void)
+{
+ iounmap(intel_private.registers);
+}
+
+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
+
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
+ goto out_err;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+ /* The i830 can't check the GTT for entries since its read only,
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ page_to_phys(mem->pages[i]), mask_type),
+ intel_private.registers+I810_PTE_BASE+(j*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+
+ return 0;
+}
+
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+{
+ if (type == AGP_PHYS_MEMORY)
+ return alloc_agpphysmem_i8xx(pg_count, type);
+ /* always return NULL for other allocation types for now */
+ return NULL;
+}
+
+static int intel_alloc_chipset_flush_resource(void)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ pcibios_align_resource, agp_bridge->dev);
+
+ return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = temp;
+ intel_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = l64;
+ intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+ /* return if already configured */
+ if (intel_private.ifp_resource.start)
+ return;
+
+ if (IS_SNB)
+ return;
+
+ /* setup a resource for this object */
+ intel_private.ifp_resource.name = "Intel Flush Page";
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_I965 || IS_G33 || IS_G4X) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+ }
+
+ if (intel_private.ifp_resource.start) {
+ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ if (!intel_private.i9xx_flush_page)
+ dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
+ }
+}
+
+static int intel_i915_configure(void)
+{
+ struct aper_size_info_fixed *current_size;
+ u32 temp;
+ u16 gmch_ctrl;
+ int i;
+
+ current_size = A_SIZE_FIX(agp_bridge->current_size);
+
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
+
+ agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+
+ if (agp_bridge->driver->needs_scratch_page) {
+ for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
+ writel(agp_bridge->scratch_page, intel_private.gtt+i);
+ }
+ readl(intel_private.gtt+i-1); /* PCI Posting. */
+ }
+
+ global_cache_flush();
+
+ intel_i9xx_setup_flush();
+
+ return 0;
+}
+
+static void intel_i915_cleanup(void)
+{
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+ if (intel_private.resource_valid)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+}
+
+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
+{
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int num_entries;
+ void *temp;
+ int ret = -EINVAL;
+ int mask_type;
+
+ if (mem->page_count == 0)
+ goto out;
+
+ temp = agp_bridge->current_size;
+ num_entries = A_SIZE_FIX(temp)->num_entries;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
+ "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
+
+ dev_info(&intel_private.pcidev->dev,
+ "trying to insert into local/stolen memory\n");
+ goto out_err;
+ }
+
+ if ((pg_start + mem->page_count) > num_entries)
+ goto out_err;
+
+ /* The i915 can't check the GTT for entries since it's read only;
+ * depend on the caller to make the correct offset decisions.
+ */
+
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
+ global_cache_flush();
+
+ intel_agp_insert_sg_entries(mem, pg_start, mask_type);
+
+ out:
+ ret = 0;
+ out_err:
+ mem->is_flushed = true;
+ return ret;
+}
+
+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i;
+
+ if (mem->page_count == 0)
+ return 0;
+
+ if (pg_start < intel_private.gtt_entries) {
+ dev_info(&intel_private.pcidev->dev,
+ "trying to disable local/stolen memory\n");
+ return -EINVAL;
+ }
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
+ writel(agp_bridge->scratch_page, intel_private.gtt+i);
+
+ readl(intel_private.gtt+i-1);
+
+ return 0;
+}
+
+/* Return the aperture size by just checking the resource length. The effect
+ * described in the spec of the MSAC registers is just changing of the
+ * resource size.
+ */
+static int intel_i9xx_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_i830_sizes);
+ int aper_size; /* size in megabytes */
+ int i;
+
+ aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_i830_sizes[i].size) {
+ agp_bridge->current_size = intel_i830_sizes + i;
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+
+/* The intel i915 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp, temp2;
+ int gtt_map_size = 256 * 1024;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+ pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
+
+ if (IS_G33)
+ gtt_map_size = 1024 * 1024; /* 1M on G33 */
+ intel_private.gtt = ioremap(temp2, gtt_map_size);
+ if (!intel_private.gtt)
+ return -ENOMEM;
+
+ intel_private.gtt_total_size = gtt_map_size / 4;
+
+ temp &= 0xfff80000;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+/*
+ * The i965 supports 36-bit physical addresses, but to keep
+ * the format of the GTT the same, the bits that don't fit
+ * in a 32-bit word are shifted down to bits 4..7.
+ *
+ * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
+ * is always zero on 32-bit architectures, so no need to make
+ * this conditional.
+ */
+static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
+ dma_addr_t addr, int type)
+{
+ /* Shift high bits down */
+ addr |= (addr >> 28) & 0xf0;
+
+ /* Type checking must be done elsewhere */
+ return addr | bridge->driver->masks[type].mask;
+}
+
+static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
+{
+ u16 snb_gmch_ctl;
+
+ switch (agp_bridge->dev->device) {
+ case PCI_DEVICE_ID_INTEL_GM45_HB:
+ case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
+ case PCI_DEVICE_ID_INTEL_Q45_HB:
+ case PCI_DEVICE_ID_INTEL_G45_HB:
+ case PCI_DEVICE_ID_INTEL_G41_HB:
+ case PCI_DEVICE_ID_INTEL_B43_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+ *gtt_offset = *gtt_size = MB(2);
+ break;
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+ *gtt_offset = MB(2);
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ *gtt_size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ *gtt_size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ *gtt_size = MB(2);
+ break;
+ }
+ break;
+ default:
+ *gtt_offset = *gtt_size = KB(512);
+ }
+}
+
+/* The intel i965 automatically initializes the agp aperture during POST.
+ * Use the memory already set aside for in the GTT.
+ */
+static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+ int gtt_offset, gtt_size;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
+
+ temp &= 0xfff00000;
+
+ intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
+
+ intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+
+ if (!intel_private.gtt)
+ return -ENOMEM;
+
+ intel_private.gtt_total_size = gtt_size / 4;
+
+ intel_private.registers = ioremap(temp, 128 * 4096);
+ if (!intel_private.registers) {
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
+static const struct agp_bridge_driver intel_810_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i810_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 2,
+ .needs_scratch_page = true,
+ .configure = intel_i810_configure,
+ .fetch_size = intel_i810_fetch_size,
+ .cleanup = intel_i810_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = agp_generic_create_gatt_table,
+ .free_gatt_table = agp_generic_free_gatt_table,
+ .insert_memory = intel_i810_insert_entries,
+ .remove_memory = intel_i810_remove_entries,
+ .alloc_by_type = intel_i810_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
+};
+
+static const struct agp_bridge_driver intel_830_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i830_configure,
+ .fetch_size = intel_i830_fetch_size,
+ .cleanup = intel_i830_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i830_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i830_insert_entries,
+ .remove_memory = intel_i830_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i830_chipset_flush,
+};
+
+static const struct agp_bridge_driver intel_915_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_i965_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i965_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i965_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
+
+static const struct agp_bridge_driver intel_g33_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = true,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i9xx_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .mask_memory = intel_i965_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i915_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_alloc_pages = agp_generic_alloc_pages,
+ .agp_destroy_page = agp_generic_destroy_page,
+ .agp_destroy_pages = agp_generic_destroy_pages,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+ .agp_map_page = intel_agp_map_page,
+ .agp_unmap_page = intel_agp_unmap_page,
+ .agp_map_memory = intel_agp_map_memory,
+ .agp_unmap_memory = intel_agp_unmap_memory,
+#endif
+};
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 10f24e349a26..b9734a978186 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = {
.aperture_sizes = nvidia_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 5,
+ .needs_scratch_page = true,
.configure = nvidia_configure,
.fetch_size = nvidia_fetch_size,
.cleanup = nvidia_cleanup,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 6c3837a0184d..29aacd81de78 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = {
.aperture_sizes = sis_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
+ .needs_scratch_page = true,
.configure = sis_configure,
.fetch_size = sis_fetch_size,
.cleanup = sis_cleanup,
@@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_SI,
- .device = PCI_DEVICE_ID_SI_760,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
{ }
};
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 6f48931ac1ce..95db71360d24 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -28,6 +28,7 @@
*/
static int uninorth_rev;
static int is_u3;
+static u32 scratch_value;
#define DEFAULT_APERTURE_SIZE 256
#define DEFAULT_APERTURE_STRING "256"
@@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
- if (gp[i]) {
+ if (gp[i] != scratch_value) {
dev_info(&agp_bridge->dev->dev,
"uninorth_insert_memory: entry 0x%x occupied (%x)\n",
i, gp[i]);
@@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
return 0;
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
- for (i = 0; i < mem->page_count; ++i)
- gp[i] = 0;
+ for (i = 0; i < mem->page_count; ++i) {
+ gp[i] = scratch_value;
+ }
mb();
uninorth_tlbflush(mem);
@@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_bus_addr = virt_to_phys(table);
+ if (is_u3)
+ scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
+ 0x1UL);
for (i = 0; i < num_entries; i++)
- bridge->gatt_table[i] = 0;
+ bridge->gatt_table[i] = scratch_value;
return 0;
@@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
+ .needs_scratch_page = true,
};
const struct agp_bridge_driver u3_agp_driver = {
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index d3bd243867fc..df67e80019d2 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = {
.aperture_sizes = agp3_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 10,
+ .needs_scratch_page = true,
.configure = via_configure_agp3,
.fetch_size = via_fetch_size_agp3,
.cleanup = via_cleanup_agp3,
@@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = {
.aperture_sizes = via_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 9,
+ .needs_scratch_page = true,
.configure = via_configure,
.fetch_size = via_fetch_size,
.cleanup = via_cleanup,
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 56b27671adc4..4f8d60c25a98 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -84,6 +84,7 @@ static char *serial_version = "4.30";
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
@@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = {
/*
* The serial driver boot-time initialization code!
*/
-static int __init rs_init(void)
+static int __init amiga_serial_probe(struct platform_device *pdev)
{
unsigned long flags;
struct serial_state * state;
int error;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL))
- return -ENODEV;
-
serial_driver = alloc_tty_driver(1);
if (!serial_driver)
return -ENOMEM;
- /*
- * We request SERDAT and SERPER only, because the serial registers are
- * too spreaded over the custom register space
- */
- if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4,
- "amiserial [Paula]")) {
- error = -EBUSY;
- goto fail_put_tty_driver;
- }
-
IRQ_ports = NULL;
show_serial_version();
@@ -1998,7 +1986,7 @@ static int __init rs_init(void)
error = tty_register_driver(serial_driver);
if (error)
- goto fail_release_mem_region;
+ goto fail_put_tty_driver;
state = rs_table;
state->magic = SSTATE_MAGIC;
@@ -2050,23 +2038,24 @@ static int __init rs_init(void)
ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
+ platform_set_drvdata(pdev, state);
+
return 0;
fail_free_irq:
free_irq(IRQ_AMIGA_TBE, state);
fail_unregister:
tty_unregister_driver(serial_driver);
-fail_release_mem_region:
- release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
fail_put_tty_driver:
put_tty_driver(serial_driver);
return error;
}
-static __exit void rs_exit(void)
+static int __exit amiga_serial_remove(struct platform_device *pdev)
{
int error;
- struct async_struct *info = rs_table[0].info;
+ struct serial_state *state = platform_get_drvdata(pdev);
+ struct async_struct *info = state->info;
/* printk("Unloading %s: version %s\n", serial_name, serial_version); */
tasklet_kill(&info->tlet);
@@ -2075,19 +2064,38 @@ static __exit void rs_exit(void)
error);
put_tty_driver(serial_driver);
- if (info) {
- rs_table[0].info = NULL;
- kfree(info);
- }
+ rs_table[0].info = NULL;
+ kfree(info);
free_irq(IRQ_AMIGA_TBE, rs_table);
free_irq(IRQ_AMIGA_RBF, rs_table);
- release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
+ platform_set_drvdata(pdev, NULL);
+
+ return error;
+}
+
+static struct platform_driver amiga_serial_driver = {
+ .remove = __exit_p(amiga_serial_remove),
+ .driver = {
+ .name = "amiga-serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_serial_init(void)
+{
+ return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe);
+}
+
+module_init(amiga_serial_init);
+
+static void __exit amiga_serial_exit(void)
+{
+ platform_driver_unregister(&amiga_serial_driver);
}
-module_init(rs_init)
-module_exit(rs_exit)
+module_exit(amiga_serial_exit);
#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
@@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init);
#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-serial");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 4f568cb9af3f..033e1505fca9 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -265,8 +265,8 @@ static unsigned int apm_poll(struct file *fp, poll_table * wait)
* Only when everyone who has opened /dev/apm_bios with write permission
* has acknowledge does the actual suspend happen.
*/
-static int
-apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
+static long
+apm_ioctl(struct file *filp, u_int cmd, u_long arg)
{
struct apm_user *as = filp->private_data;
int err = -EINVAL;
@@ -274,6 +274,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
if (!as->suser || !as->writer)
return -EPERM;
+ lock_kernel();
switch (cmd) {
case APM_IOC_SUSPEND:
mutex_lock(&state_lock);
@@ -334,6 +335,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
mutex_unlock(&state_lock);
break;
}
+ unlock_kernel();
return err;
}
@@ -397,7 +399,7 @@ static const struct file_operations apm_bios_fops = {
.owner = THIS_MODULE,
.read = apm_read,
.poll = apm_poll,
- .ioctl = apm_ioctl,
+ .unlocked_ioctl = apm_ioctl,
.open = apm_open,
.release = apm_release,
};
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index a7424bf7eacf..f4ae0e0fb631 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/smp_lock.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/wait.h>
@@ -106,8 +107,7 @@ static unsigned int DeviceErrorCount; /* number of device error */
static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
-static int ac_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long ac_ioctl(struct file *, unsigned int, unsigned long);
static irqreturn_t ac_interrupt(int, void *);
static const struct file_operations ac_fops = {
@@ -115,7 +115,7 @@ static const struct file_operations ac_fops = {
.llseek = no_llseek,
.read = ac_read,
.write = ac_write,
- .ioctl = ac_ioctl,
+ .unlocked_ioctl = ac_ioctl,
};
static struct miscdevice ac_miscdev = {
@@ -689,7 +689,7 @@ static irqreturn_t ac_interrupt(int vec, void *dev_instance)
-static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ /* @ ADG ou ATO selon le cas */
int i;
@@ -703,15 +703,11 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
/* In general, the device is only openable by root anyway, so we're not
particularly concerned that bogus ioctls can flood the console. */
- adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL);
- if (!adgl)
- return -ENOMEM;
+ adgl = memdup_user(argp, sizeof(struct st_ram_io));
+ if (IS_ERR(adgl))
+ return PTR_ERR(adgl);
- if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
- kfree(adgl);
- return -EFAULT;
- }
-
+ lock_kernel();
IndexCard = adgl->num_card-1;
if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
@@ -721,6 +717,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
warncount--;
}
kfree(adgl);
+ unlock_kernel();
return -EINVAL;
}
@@ -838,6 +835,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
Dummy = readb(apbs[IndexCard].RamIO + VERS);
kfree(adgl);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 7fef305774de..89d871ef8c2f 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -253,7 +253,7 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
cur, cur->bsr_name);
- if (!cur->bsr_device) {
+ if (IS_ERR(cur->bsr_device)) {
printk(KERN_ERR "device_create failed for %s\n",
cur->bsr_name);
cdev_del(&cur->bsr_cdev);
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 61f0146e215d..dbee8688f75c 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -232,7 +232,7 @@ ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
}
static int
-ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct therm therm;
union {
@@ -316,6 +316,18 @@ ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0;
}
+static long
+ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ds1620_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef THERM_USE_PROC
static int
proc_therm_ds1620_read(char *buf, char **start, off_t offset,
@@ -344,7 +356,7 @@ static const struct file_operations ds1620_fops = {
.owner = THIS_MODULE,
.open = ds1620_open,
.read = ds1620_read,
- .ioctl = ds1620_ioctl,
+ .unlocked_ioctl = ds1620_unlocked_ioctl,
};
static struct miscdevice ds1620_miscdev = {
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 045c930e6320..e3859d4eaead 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -93,8 +93,8 @@ static ssize_t dtlk_write(struct file *, const char __user *,
static unsigned int dtlk_poll(struct file *, poll_table *);
static int dtlk_open(struct inode *, struct file *);
static int dtlk_release(struct inode *, struct file *);
-static int dtlk_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
static const struct file_operations dtlk_fops =
{
@@ -102,7 +102,7 @@ static const struct file_operations dtlk_fops =
.read = dtlk_read,
.write = dtlk_write,
.poll = dtlk_poll,
- .ioctl = dtlk_ioctl,
+ .unlocked_ioctl = dtlk_ioctl,
.open = dtlk_open,
.release = dtlk_release,
};
@@ -263,10 +263,9 @@ static void dtlk_timer_tick(unsigned long data)
wake_up_interruptible(&dtlk_process_list);
}
-static int dtlk_ioctl(struct inode *inode,
- struct file *file,
- unsigned int cmd,
- unsigned long arg)
+static long dtlk_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
{
char __user *argp = (char __user *)arg;
struct dtlk_settings *sp;
@@ -276,7 +275,9 @@ static int dtlk_ioctl(struct inode *inode,
switch (cmd) {
case DTLK_INTERROGATE:
+ lock_kernel();
sp = dtlk_interrogate();
+ unlock_kernel();
if (copy_to_user(argp, sp, sizeof(struct dtlk_settings)))
return -EINVAL;
return 0;
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
index fda4181b5e67..82b5a88a82d7 100644
--- a/drivers/char/generic_nvram.c
+++ b/drivers/char/generic_nvram.c
@@ -19,6 +19,7 @@
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/init.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#ifdef CONFIG_PPC_PMAC
@@ -84,8 +85,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf,
return p - buf;
}
-static int nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
#ifdef CONFIG_PPC_PMAC
@@ -116,12 +116,23 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
return 0;
}
+static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = nvram_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
const struct file_operations nvram_fops = {
.owner = THIS_MODULE,
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_unlocked_ioctl,
};
static struct miscdevice nvram_dev = {
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 31e7c91c2d9d..b6c2cc167c11 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -262,7 +262,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
#endif
}
-static int gen_rtc_ioctl(struct inode *inode, struct file *file,
+static int gen_rtc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct rtc_time wtime;
@@ -332,6 +332,18 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
+static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = gen_rtc_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* We enforce only one user at a time here with the open/close.
* Also clear the previous interrupt data on an open, and clean
@@ -482,7 +494,7 @@ static const struct file_operations gen_rtc_fops = {
.read = gen_rtc_read,
.poll = gen_rtc_poll,
#endif
- .ioctl = gen_rtc_ioctl,
+ .unlocked_ioctl = gen_rtc_unlocked_ioctl,
.open = gen_rtc_open,
.release = gen_rtc_release,
};
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 712d9f271aa6..e0249722d25f 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -49,8 +49,9 @@
#include <asm/uaccess.h>
#include <linux/sysrq.h>
#include <linux/timer.h>
+#include <linux/time.h>
-#define VERSION_STR "0.9.0"
+#define VERSION_STR "0.9.1"
#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */
#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */
@@ -119,10 +120,8 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
#if defined(CONFIG_S390)
# define HAVE_MONOTONIC
# define TIMER_FREQ 1000000000ULL
-#elif defined(CONFIG_IA64)
-# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq)
#else
-# define TIMER_FREQ (HZ*loops_per_jiffy)
+# define TIMER_FREQ 1000000000ULL
#endif
#ifdef HAVE_MONOTONIC
@@ -130,7 +129,9 @@ extern unsigned long long monotonic_clock(void);
#else
static inline unsigned long long monotonic_clock(void)
{
- return get_cycles();
+ struct timespec ts;
+ getrawmonotonic(&ts);
+ return timespec_to_ns(&ts);
}
#endif /* HAVE_MONOTONIC */
@@ -168,6 +169,13 @@ static void hangcheck_fire(unsigned long data)
printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
}
}
+#if 0
+ /*
+ * Enable to investigate delays in detail
+ */
+ printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
+#endif
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
hangcheck_tsc = monotonic_clock();
}
@@ -180,7 +188,7 @@ static int __init hangcheck_init(void)
#if defined (HAVE_MONOTONIC)
printk("Hangcheck: Using monotonic_clock().\n");
#else
- printk("Hangcheck: Using get_cycles().\n");
+ printk("Hangcheck: Using getrawmonotonic().\n");
#endif /* HAVE_MONOTONIC */
hangcheck_tsc_margin =
(unsigned long long)(hangcheck_margin + hangcheck_tick);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9ded667625ac..a0a1829d3198 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -431,14 +431,18 @@ static int hpet_release(struct inode *inode, struct file *file)
static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
-static int
-hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long hpet_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct hpet_dev *devp;
+ int ret;
devp = file->private_data;
- return hpet_ioctl_common(devp, cmd, arg, 0);
+ lock_kernel();
+ ret = hpet_ioctl_common(devp, cmd, arg, 0);
+ unlock_kernel();
+
+ return ret;
}
static int hpet_ioctl_ieon(struct hpet_dev *devp)
@@ -654,7 +658,7 @@ static const struct file_operations hpet_fops = {
.llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
- .ioctl = hpet_ioctl,
+ .unlocked_ioctl = hpet_ioctl,
.open = hpet_open,
.release = hpet_release,
.fasync = hpet_fasync,
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 793b236c9266..d4b14ff1c4c1 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -194,10 +194,8 @@ static inline void print_state(struct hvsi_struct *hp)
"HVSI_WAIT_FOR_MCTRL_RESPONSE",
"HVSI_FSP_DIED",
};
- const char *name = state_names[hp->state];
-
- if (hp->state > ARRAY_SIZE(state_names))
- name = "UNKNOWN";
+ const char *name = (hp->state < ARRAY_SIZE(state_names))
+ ? state_names[hp->state] : "UNKNOWN";
pr_debug("hvsi%i: state = %s\n", hp->index, name);
#endif /* DEBUG */
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 10f868eefaa6..0f9cbf1aaf15 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -660,7 +660,7 @@ static int __devinit n2rng_probe(struct of_device *op,
np->hvapi_major);
goto out_hvapi_unregister;
}
- np->num_units = of_getintprop_default(op->node,
+ np->num_units = of_getintprop_default(op->dev.of_node,
"rng-#units", 0);
if (!np->num_units) {
dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
@@ -751,8 +751,11 @@ static const struct of_device_id n2rng_match[] = {
MODULE_DEVICE_TABLE(of, n2rng_match);
static struct of_platform_driver n2rng_driver = {
- .name = "n2rng",
- .match_table = n2rng_match,
+ .driver = {
+ .name = "n2rng",
+ .owner = THIS_MODULE,
+ .of_match_table = n2rng_match,
+ },
.probe = n2rng_probe,
.remove = __devexit_p(n2rng_remove),
};
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a8b4c4010144..a348c7e9aa0b 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -15,6 +15,10 @@
#include <linux/amba/bus.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+static struct clk *rng_clk;
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
@@ -40,6 +44,15 @@ static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
void __iomem *base;
int ret;
+ rng_clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(rng_clk)) {
+ dev_err(&dev->dev, "could not get rng clock\n");
+ ret = PTR_ERR(rng_clk);
+ return ret;
+ }
+
+ clk_enable(rng_clk);
+
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
return ret;
@@ -57,6 +70,8 @@ out_unmap:
iounmap(base);
out_release:
amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
return ret;
}
@@ -66,6 +81,8 @@ static int nmk_rng_remove(struct amba_device *dev)
hwrng_unregister(&nmk_rng);
iounmap(base);
amba_release_regions(dev);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
return 0;
}
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 7fa61dd1d9d9..261ba8f22b8b 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -98,7 +98,7 @@ static int __devinit rng_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
void __iomem *rng_regs;
- struct device_node *rng_np = ofdev->node;
+ struct device_node *rng_np = ofdev->dev.of_node;
struct resource res;
int err = 0;
@@ -140,8 +140,11 @@ static struct of_device_id rng_match[] = {
};
static struct of_platform_driver rng_driver = {
- .name = "pasemi-rng",
- .match_table = rng_match,
+ .driver = {
+ .name = "pasemi-rng",
+ .owner = THIS_MODULE,
+ .of_match_table = rng_match,
+ },
.probe = rng_probe,
.remove = rng_remove,
};
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 64fe0a793efd..75f1cbd61c17 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -32,7 +32,7 @@ static bool busy;
static void random_recv_done(struct virtqueue *vq)
{
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
- if (!vq->vq_ops->get_buf(vq, &data_avail))
+ if (!virtqueue_get_buf(vq, &data_avail))
return;
complete(&have_data);
@@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
BUG();
- vq->vq_ops->kick(vq);
+ virtqueue_kick(vq);
}
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index fc8cf7ac7f2b..4cd8b227c11f 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -23,6 +23,7 @@
#include <linux/seq_file.h>
#include <linux/dmi.h>
#include <linux/capability.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -82,8 +83,7 @@ module_param(fan_mult, int, 0);
MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
static int i8k_open_fs(struct inode *inode, struct file *file);
-static int i8k_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long i8k_ioctl(struct file *, unsigned int, unsigned long);
static const struct file_operations i8k_fops = {
.owner = THIS_MODULE,
@@ -91,7 +91,7 @@ static const struct file_operations i8k_fops = {
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
- .ioctl = i8k_ioctl,
+ .unlocked_ioctl = i8k_ioctl,
};
struct smm_regs {
@@ -307,8 +307,8 @@ static int i8k_get_dell_signature(int req_fn)
return regs.eax == 1145651527 && regs.edx == 1145392204 ? 0 : -1;
}
-static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
- unsigned long arg)
+static int
+i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
{
int val = 0;
int speed;
@@ -395,6 +395,17 @@ static int i8k_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
return 0;
}
+static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ lock_kernel();
+ ret = i8k_ioctl_unlocked(fp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Print the information for /proc/i8k.
*/
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 65545de3dbf4..d8ec92a38980 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -228,8 +228,7 @@ static int handle_send_req(ipmi_user_t user,
return rv;
}
-static int ipmi_ioctl(struct inode *inode,
- struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd,
unsigned long data)
{
@@ -630,6 +629,23 @@ static int ipmi_ioctl(struct inode *inode,
return rv;
}
+/*
+ * Note: it doesn't make sense to take the BKL here but
+ * not in compat_ipmi_ioctl. -arnd
+ */
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ipmi_ioctl(file, cmd, data);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
/*
@@ -802,7 +818,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
if (copy_to_user(precv64, &recv64, sizeof(recv64)))
return -EFAULT;
- rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep,
+ rc = ipmi_ioctl(filep,
((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
? IPMICTL_RECEIVE_MSG
: IPMICTL_RECEIVE_MSG_TRUNC),
@@ -819,14 +835,14 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
return rc;
}
default:
- return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg);
+ return ipmi_ioctl(filep, cmd, arg);
}
}
#endif
static const struct file_operations ipmi_fops = {
.owner = THIS_MODULE,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ipmi_ioctl,
#endif
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad4234378d..4f3f8c9ec262 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
return rv;
}
- printk(KERN_INFO
- "ipmi: Found new BMC (man_id: 0x%6.6x, "
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
- bmc->id.manufacturer_id,
- bmc->id.product_id,
- bmc->id.device_id);
+ dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
+ "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
}
/*
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void)
static struct timer_list ipmi_timer;
-/* Call every ~100 ms. */
-#define IPMI_TIMEOUT_TIME 100
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4462b113ba3f..35603dd4e6c5 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@ enum si_type {
};
static char *si_to_str[] = { "kcs", "smic", "bt" };
+enum ipmi_addr_src {
+ SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+ SI_PCI, SI_DEVICETREE, SI_DEFAULT
+};
+static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
+ "ACPI", "SMBIOS", "PCI",
+ "device-tree", "default" };
+
#define DEVICE_NAME "ipmi_si"
static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@ struct smi_info {
int (*irq_setup)(struct smi_info *info);
void (*irq_cleanup)(struct smi_info *info);
unsigned int io_size;
- char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
void (*addr_source_cleanup)(struct smi_info *info);
void *addr_source_data;
@@ -300,6 +308,7 @@ static int num_max_busy_us;
static int unload_when_empty = 1;
+static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
{
/* Deliver the message to the upper layer with the lock
released. */
- spin_unlock(&(smi_info->si_lock));
- ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
+
+ if (smi_info->run_to_completion) {
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ } else {
+ spin_unlock(&(smi_info->si_lock));
+ ipmi_smi_msg_received(smi_info->intf, msg);
+ spin_lock(&(smi_info->si_lock));
+ }
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
start_disable_irq(smi_info);
smi_info->interrupt_disabled = 1;
+ if (!atomic_read(&smi_info->stop_operation))
+ mod_timer(&smi_info->si_timer,
+ jiffies + SI_TIMEOUT_JIFFIES);
}
}
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- printk(KERN_WARNING
- "ipmi_si: Error clearing flags: %2.2x\n",
- msg[2]);
+ dev_warn(smi_info->dev,
+ "Error clearing flags: %2.2x\n", msg[2]);
}
if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
start_enable_irq(smi_info);
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed get, using polled mode.\n");
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed get, using polled mode.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
- if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed set, using polled mode.\n");
- }
+ if (msg[2] != 0)
+ dev_warn(smi_info->dev, "Could not enable interrupts"
+ ", failed set, using polled mode.\n");
+ else
+ smi_info->interrupt_disabled = 0;
smi_info->si_state = SI_NORMAL;
break;
}
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed get.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed get.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed set.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed set.\n");
}
smi_info->si_state = SI_NORMAL;
break;
@@ -877,6 +890,11 @@ static void sender(void *send_info,
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
+ mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data)
; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
+ else if (smi_result == SI_SM_IDLE)
+ schedule_timeout_interruptible(100);
else
schedule_timeout_interruptible(0);
}
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data)
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
+ long timeout;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data)
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_add_timer;
+ goto do_mod_timer;
}
/*
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data)
*/
if (smi_result == SI_SM_CALL_WITH_DELAY) {
smi_inc_stat(smi_info, short_timeouts);
- smi_info->si_timer.expires = jiffies + 1;
+ timeout = jiffies + 1;
} else {
smi_inc_stat(smi_info, long_timeouts);
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
- do_add_timer:
- add_timer(&(smi_info->si_timer));
+ do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+ mod_timer(&(smi_info->si_timer), timeout);
}
static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info,
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
if (IS_ERR(new_smi->thread)) {
- printk(KERN_NOTICE "ipmi_si_intf: Could not start"
- " kernel thread due to error %ld, only using"
- " timers to drive the interface\n",
- PTR_ERR(new_smi->thread));
+ dev_notice(new_smi->dev, "Could not start"
+ " kernel thread due to error %ld, only using"
+ " timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info)
DEVICE_NAME,
info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim interrupt %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, info->irq);
info->irq = 0;
} else {
info->irq_cleanup = std_irq_cleanup;
- printk(" Using irq %d\n", info->irq);
+ dev_info(info->dev, "Using irq %d\n", info->irq);
}
return rv;
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info)
info->io.outputb = port_outl;
break;
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info)
break;
#endif
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
goto out;
}
- info->addr_source = "hotmod";
+ info->addr_source = SI_HOTMOD;
info->si_type = si_type;
info->io.addr_data = addr;
info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
info->irq_setup = std_irq_setup;
info->slave_addr = ipmb;
- try_smi_init(info);
+ if (!add_smi(info))
+ if (try_smi_init(info))
+ cleanup_one_si(info);
} else {
/* remove */
struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void)
if (!info)
return;
- info->addr_source = "hardcoded";
+ info->addr_source = SI_HARDCODED;
+ printk(KERN_INFO PFX "probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
+ printk(KERN_WARNING PFX "Interface type specified "
"for interface %d, was invalid: %s\n",
i, si_type[i]);
kfree(info);
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void)
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
- "for interface %d, "
- "but port and address were not set or "
- "set to zero.\n", i);
+ printk(KERN_WARNING PFX "Interface type specified "
+ "for interface %d, but port and address were "
+ "not set or set to zero.\n", i);
kfree(info);
continue;
}
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void)
info->irq_setup = std_irq_setup;
info->slave_addr = slave_addrs[i];
- try_smi_init(info);
+ if (!add_smi(info))
+ if (try_smi_init(info))
+ cleanup_one_si(info);
}
}
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
&ipmi_acpi_gpe,
info);
if (status != AE_OK) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim ACPI GPE %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
+ " running polled\n", DEVICE_NAME, info->irq);
info->irq = 0;
return -EINVAL;
} else {
info->irq_cleanup = acpi_gpe_irq_cleanup;
- printk(" Using ACPI GPE %d\n", info->irq);
+ dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
return 0;
}
}
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
u8 addr_space;
if (spmi->IPMIlegacy != 1) {
- printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
- return -ENODEV;
+ printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
}
if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
+ printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
}
- info->addr_source = "SPMI";
+ info->addr_source = SI_SPMI;
+ printk(KERN_INFO PFX "probing via SPMI\n");
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info->si_type = SI_BT;
break;
default:
- printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
+ printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
kfree(info);
return -EIO;
}
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown ACPI I/O Address type\n");
+ printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
- try_smi_init(info);
+ add_smi(info);
return 0;
}
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
{
struct acpi_device *acpi_dev;
struct smi_info *info;
+ struct resource *res;
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
if (!info)
return -ENOMEM;
- info->addr_source = "ACPI";
+ info->addr_source = SI_ACPI;
+ printk(KERN_INFO PFX "probing via ACPI\n");
handle = acpi_dev->handle;
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->si_type = SI_BT;
break;
default:
- dev_info(&dev->dev, "unknown interface type %lld\n", tmp);
+ dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
goto err_free;
}
- if (pnp_port_valid(dev, 0)) {
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (res) {
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
- info->io.addr_data = pnp_port_start(dev, 0);
- } else if (pnp_mem_valid(dev, 0)) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- info->io.addr_data = pnp_mem_start(dev, 0);
} else {
+ res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ }
+ if (!res) {
dev_err(&dev->dev, "no I/O or memory address\n");
goto err_free;
}
+ info->io.addr_data = res->start;
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->irq_setup = std_irq_setup;
}
- info->dev = &acpi_dev->dev;
+ info->dev = &dev->dev;
pnp_set_drvdata(dev, info);
- return try_smi_init(info);
+ dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+ res, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ return add_smi(info);
err_free:
kfree(info);
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- printk(KERN_ERR
- "ipmi_si: Could not allocate SI data\n");
+ printk(KERN_ERR PFX "Could not allocate SI data\n");
return;
}
- info->addr_source = "SMBIOS";
+ info->addr_source = SI_SMBIOS;
+ printk(KERN_INFO PFX "probing via SMBIOS\n");
switch (ipmi_data->type) {
case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
default:
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
+ printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
ipmi_data->addr_space);
return;
}
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
if (info->irq)
info->irq_setup = std_irq_setup;
- try_smi_init(info);
+ add_smi(info);
}
static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
if (!info)
return -ENOMEM;
- info->addr_source = "PCI";
+ info->addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
switch (class_type) {
case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
default:
kfree(info);
- printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
- pci_name(pdev), class_type);
+ dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
return -ENOMEM;
}
rv = pci_enable_device(pdev);
if (rv) {
- printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
kfree(info);
return rv;
}
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
info->dev = &pdev->dev;
pci_set_drvdata(pdev, info);
- return try_smi_init(info);
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ return add_smi(info);
}
static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2469,11 +2502,11 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
struct smi_info *info;
struct resource resource;
const int *regsize, *regspacing, *regshift;
- struct device_node *np = dev->node;
+ struct device_node *np = dev->dev.of_node;
int ret;
int proplen;
- dev_info(&dev->dev, PFX "probing via device tree\n");
+ dev_info(&dev->dev, "probing via device tree\n");
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
if (!info) {
dev_err(&dev->dev,
- PFX "could not allocate memory for OF probe\n");
+ "could not allocate memory for OF probe\n");
return -ENOMEM;
}
info->si_type = (enum si_type) match->data;
- info->addr_source = "device-tree";
+ info->addr_source = SI_DEVICETREE;
info->irq_setup = std_irq_setup;
if (resource.flags & IORESOURCE_IO) {
@@ -2525,16 +2558,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
info->io.regshift = regshift ? *regshift : 0;
- info->irq = irq_of_parse_and_map(dev->node, 0);
+ info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
info->dev = &dev->dev;
- dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
+ dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
dev_set_drvdata(&dev->dev, info);
- return try_smi_init(info);
+ return add_smi(info);
}
static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2555,8 +2588,11 @@ static struct of_device_id ipmi_match[] =
};
static struct of_platform_driver ipmi_of_platform_driver = {
- .name = "ipmi",
- .match_table = ipmi_match,
+ .driver = {
+ .name = "ipmi",
+ .owner = THIS_MODULE,
+ .of_match_table = ipmi_match,
+ },
.probe = ipmi_of_probe,
.remove = __devexit_p(ipmi_of_remove),
};
@@ -2640,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from get global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from get"
+ " global enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2654,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global"
- " enables command, cannot enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global"
+ " enables command, cannot enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2673,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from set global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from set"
+ " global, enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2686,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global,"
- "enables command, not enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global,"
+ "enables command, not enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2948,7 +2978,7 @@ static __devinit void default_find_bmc(void)
if (!info)
return;
- info->addr_source = NULL;
+ info->addr_source = SI_DEFAULT;
info->si_type = ipmi_defaults[i].type;
info->io_setup = port_setup;
@@ -2960,14 +2990,16 @@ static __devinit void default_find_bmc(void)
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
- if (try_smi_init(info) == 0) {
- /* Found one... */
- printk(KERN_INFO "ipmi_si: Found default %s state"
- " machine at %s address 0x%lx\n",
- si_to_str[info->si_type],
- addr_space_to_str[info->io.addr_type],
- info->io.addr_data);
- return;
+ if (add_smi(info) == 0) {
+ if ((try_smi_init(info)) == 0) {
+ /* Found one... */
+ printk(KERN_INFO PFX "Found default %s"
+ " state machine at %s address 0x%lx\n",
+ si_to_str[info->si_type],
+ addr_space_to_str[info->io.addr_type],
+ info->io.addr_data);
+ } else
+ cleanup_one_si(info);
}
}
}
@@ -2986,34 +3018,48 @@ static int is_new_interface(struct smi_info *info)
return 1;
}
-static int try_smi_init(struct smi_info *new_smi)
+static int add_smi(struct smi_info *new_smi)
{
- int rv;
- int i;
-
- if (new_smi->addr_source) {
- printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
- " machine at %s address 0x%lx, slave address 0x%x,"
- " irq %d\n",
- new_smi->addr_source,
- si_to_str[new_smi->si_type],
- addr_space_to_str[new_smi->io.addr_type],
- new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
- }
+ int rv = 0;
+ printk(KERN_INFO PFX "Adding %s-specified %s state machine",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_WARNING "ipmi_si: duplicate interface\n");
+ printk(KERN_CONT PFX "duplicate interface\n");
rv = -EBUSY;
goto out_err;
}
+ printk(KERN_CONT "\n");
+
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
new_smi->si_sm = NULL;
new_smi->handlers = NULL;
+ list_add_tail(&new_smi->link, &smi_infos);
+
+out_err:
+ mutex_unlock(&smi_infos_lock);
+ return rv;
+}
+
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv = 0;
+ int i;
+
+ printk(KERN_INFO PFX "Trying %s-specified %s state"
+ " machine at %s address 0x%lx, slave address 0x%x,"
+ " irq %d\n",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
+
switch (new_smi->si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
@@ -3036,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi)
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(KERN_ERR "Could not allocate state machine memory\n");
+ printk(KERN_ERR PFX
+ "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -3046,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(KERN_ERR "Could not set up I/O space\n");
+ printk(KERN_ERR PFX "Could not set up I/O space\n");
goto out_err;
}
@@ -3056,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: Interface detection"
- " failed\n");
+ printk(KERN_INFO PFX "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3069,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: There appears to be no BMC"
+ printk(KERN_INFO PFX "There appears to be no BMC"
" at this location\n");
goto out_err;
}
@@ -3085,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi)
for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0);
- new_smi->interrupt_disabled = 0;
+ new_smi->interrupt_disabled = 1;
atomic_set(&new_smi->stop_operation, 0);
new_smi->intf_num = smi_num;
smi_num++;
@@ -3111,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
if (!new_smi->pdev) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to allocate platform device\n");
+ printk(KERN_ERR PFX
+ "Unable to allocate platform device\n");
goto out_err;
}
new_smi->dev = &new_smi->pdev->dev;
@@ -3121,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi)
rv = platform_device_add(new_smi->pdev);
if (rv) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to register system interface device:"
+ printk(KERN_ERR PFX
+ "Unable to register system interface device:"
" %d\n",
rv);
goto out_err;
@@ -3138,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi)
"bmc",
new_smi->slave_addr);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to register device: error %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to register device: error %d\n",
+ rv);
goto out_err_stop_timer;
}
@@ -3148,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi)
type_file_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3158,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi)
stat_file_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3168,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi)
param_read_proc,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
- list_add_tail(&new_smi->link, &smi_infos);
-
- mutex_unlock(&smi_infos_lock);
-
- printk(KERN_INFO "IPMI %s interface initialized\n",
- si_to_str[new_smi->si_type]);
+ dev_info(new_smi->dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->si_type]);
return 0;
@@ -3188,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi)
wait_for_timer_and_thread(new_smi);
out_err:
- if (new_smi->intf)
+ new_smi->interrupt_disabled = 1;
+
+ if (new_smi->intf) {
ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
- if (new_smi->irq_cleanup)
+ if (new_smi->irq_cleanup) {
new_smi->irq_cleanup(new_smi);
+ new_smi->irq_cleanup = NULL;
+ }
/*
* Wait until we know that we are out of any interrupt
@@ -3205,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->handlers)
new_smi->handlers->cleanup(new_smi->si_sm);
kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
}
- if (new_smi->addr_source_cleanup)
+ if (new_smi->addr_source_cleanup) {
new_smi->addr_source_cleanup(new_smi);
- if (new_smi->io_cleanup)
+ new_smi->addr_source_cleanup = NULL;
+ }
+ if (new_smi->io_cleanup) {
new_smi->io_cleanup(new_smi);
+ new_smi->io_cleanup = NULL;
+ }
- if (new_smi->dev_registered)
+ if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev);
-
- kfree(new_smi);
-
- mutex_unlock(&smi_infos_lock);
+ new_smi->dev_registered = 0;
+ }
return rv;
}
@@ -3226,6 +3268,8 @@ static __devinit int init_ipmi_si(void)
int i;
char *str;
int rv;
+ struct smi_info *e;
+ enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
@@ -3234,9 +3278,7 @@ static __devinit int init_ipmi_si(void)
/* Register the device drivers. */
rv = driver_register(&ipmi_driver.driver);
if (rv) {
- printk(KERN_ERR
- "init_ipmi_si: Unable to register driver: %d\n",
- rv);
+ printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
return rv;
}
@@ -3260,38 +3302,81 @@ static __devinit int init_ipmi_si(void)
hardcode_find_bmc();
-#ifdef CONFIG_DMI
- dmi_find_bmc();
-#endif
+ /* If the user gave us a device, they presumably want us to use it */
+ mutex_lock(&smi_infos_lock);
+ if (!list_empty(&smi_infos)) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+ mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_ACPI
- spmi_find_bmc();
+#ifdef CONFIG_PCI
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
#endif
+
#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
#endif
-#ifdef CONFIG_PCI
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- printk(KERN_ERR
- "init_ipmi_si: Unable to register PCI driver: %d\n",
- rv);
+#ifdef CONFIG_DMI
+ dmi_find_bmc();
+#endif
+
+#ifdef CONFIG_ACPI
+ spmi_find_bmc();
#endif
#ifdef CONFIG_PPC_OF
of_register_platform_driver(&ipmi_of_platform_driver);
#endif
+ /* We prefer devices with interrupts, but in the case of a machine
+ with multiple BMCs we assume that there will be several instances
+ of a given type so if we succeed in registering a type then also
+ try to register everything else of the same type */
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ /* Try to register a device if it has an IRQ and we either
+ haven't successfully registered a device yet or this
+ device has the same type as one we successfully registered */
+ if (e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+
+ /* type will only have been set if we successfully registered an si */
+ if (type) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+
+ /* Fall back to the preferred device */
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (!e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ if (type)
+ return 0;
+
if (si_trydefaults) {
mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
mutex_unlock(&smi_infos_lock);
default_find_bmc();
- } else {
+ } else
mutex_unlock(&smi_infos_lock);
- }
}
mutex_lock(&smi_infos_lock);
@@ -3305,8 +3390,8 @@ static __devinit int init_ipmi_si(void)
of_unregister_platform_driver(&ipmi_of_platform_driver);
#endif
driver_unregister(&ipmi_driver.driver);
- printk(KERN_WARNING
- "ipmi_si: Unable to find any System Interface(s)\n");
+ printk(KERN_WARNING PFX
+ "Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
@@ -3317,7 +3402,7 @@ module_init(init_ipmi_si);
static void cleanup_one_si(struct smi_info *to_clean)
{
- int rv;
+ int rv = 0;
unsigned long flags;
if (!to_clean)
@@ -3361,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
schedule_timeout_uninterruptible(1);
}
- rv = ipmi_unregister_smi(to_clean->intf);
+ if (to_clean->intf)
+ rv = ipmi_unregister_smi(to_clean->intf);
+
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to unregister device: errno=%d\n",
+ printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
rv);
}
- to_clean->handlers->cleanup(to_clean->si_sm);
+ if (to_clean->handlers)
+ to_clean->handlers->cleanup(to_clean->si_sm);
kfree(to_clean->si_sm);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index a4d57e31f713..82bcdb262a3a 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -659,7 +659,7 @@ static struct watchdog_info ident = {
.identity = "IPMI"
};
-static int ipmi_ioctl(struct inode *inode, struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -730,6 +730,19 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
}
}
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = ipmi_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static ssize_t ipmi_write(struct file *file,
const char __user *buf,
size_t len,
@@ -880,7 +893,7 @@ static const struct file_operations ipmi_wdog_fops = {
.read = ipmi_read,
.poll = ipmi_poll,
.write = ipmi_write,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index c1ab303455cf..98310e1aae30 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1573,11 +1573,16 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
/* allot the first empty slot in the array */
- for (index = 0; index < BOARD_COUNT; index++)
+ for (index = 0; index < BOARD_COUNT; index++) {
if (isi_card[index].base == 0) {
board = &isi_card[index];
break;
}
+ }
+ if (index == BOARD_COUNT) {
+ retval = -ENODEV;
+ goto err_disable;
+ }
board->index = index;
board->base = pci_resource_start(pdev, 3);
@@ -1624,6 +1629,7 @@ errunrr:
errdec:
board->base = 0;
card_count--;
+err_disable:
pci_disable_device(pdev);
err:
return retval;
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index ada25bb8941e..54109dc9240c 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -24,6 +24,8 @@
* 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/consolemap.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -38,7 +40,6 @@
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <linux/vt_kern.h>
-#include <linux/sysrq.h>
#include <linux/input.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
@@ -82,8 +83,7 @@ void compute_shiftstate(void);
typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
char up_flag);
static k_handler_fn K_HANDLERS;
-k_handler_fn *k_handler[16] = { K_HANDLERS };
-EXPORT_SYMBOL_GPL(k_handler);
+static k_handler_fn *k_handler[16] = { K_HANDLERS };
#define FN_HANDLERS\
fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\
@@ -133,7 +133,7 @@ static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
-static int dead_key_next;
+static bool dead_key_next;
static int npadch = -1; /* -1 or number assembled on pad */
static unsigned int diacr;
static char rep; /* flag telling character repeat */
@@ -147,22 +147,6 @@ static struct ledptr {
unsigned char valid:1;
} ledptrs[3];
-/* Simple translation table for the SysRq keys */
-
-#ifdef CONFIG_MAGIC_SYSRQ
-unsigned char kbd_sysrq_xlate[KEY_MAX + 1] =
- "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
- "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
- "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
- "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
- "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
- "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
- "\r\000/"; /* 0x60 - 0x6f */
-static int sysrq_down;
-static int sysrq_alt_use;
-#endif
-static int sysrq_alt;
-
/*
* Notifier list for console keyboard events
*/
@@ -361,8 +345,8 @@ static void to_utf8(struct vc_data *vc, uint c)
/* 110***** 10****** */
put_queue(vc, 0xc0 | (c >> 6));
put_queue(vc, 0x80 | (c & 0x3f));
- } else if (c < 0x10000) {
- if (c >= 0xD800 && c < 0xE000)
+ } else if (c < 0x10000) {
+ if (c >= 0xD800 && c < 0xE000)
return;
if (c == 0xFFFF)
return;
@@ -370,7 +354,7 @@ static void to_utf8(struct vc_data *vc, uint c)
put_queue(vc, 0xe0 | (c >> 12));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
- } else if (c < 0x110000) {
+ } else if (c < 0x110000) {
/* 11110*** 10****** 10****** 10****** */
put_queue(vc, 0xf0 | (c >> 18));
put_queue(vc, 0x80 | ((c >> 12) & 0x3f));
@@ -469,6 +453,7 @@ static void fn_enter(struct vc_data *vc)
}
diacr = 0;
}
+
put_queue(vc, 13);
if (vc_kbd_mode(kbd, VC_CRLF))
put_queue(vc, 10);
@@ -478,6 +463,7 @@ static void fn_caps_toggle(struct vc_data *vc)
{
if (rep)
return;
+
chg_vc_kbd_led(kbd, VC_CAPSLOCK);
}
@@ -485,12 +471,14 @@ static void fn_caps_on(struct vc_data *vc)
{
if (rep)
return;
+
set_vc_kbd_led(kbd, VC_CAPSLOCK);
}
static void fn_show_ptregs(struct vc_data *vc)
{
struct pt_regs *regs = get_irq_regs();
+
if (regs)
show_regs(regs);
}
@@ -515,7 +503,7 @@ static void fn_hold(struct vc_data *vc)
static void fn_num(struct vc_data *vc)
{
- if (vc_kbd_mode(kbd,VC_APPLIC))
+ if (vc_kbd_mode(kbd, VC_APPLIC))
applkey(vc, 'P', 1);
else
fn_bare_num(vc);
@@ -610,7 +598,7 @@ static void fn_boot_it(struct vc_data *vc)
static void fn_compose(struct vc_data *vc)
{
- dead_key_next = 1;
+ dead_key_next = true;
}
static void fn_spawn_con(struct vc_data *vc)
@@ -657,7 +645,7 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag)
{
- printk(KERN_ERR "keyboard.c: k_lowercase was called - impossible\n");
+ pr_err("k_lowercase was called - impossible\n");
}
static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
@@ -669,7 +657,7 @@ static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
value = handle_diacr(vc, value);
if (dead_key_next) {
- dead_key_next = 0;
+ dead_key_next = false;
diacr = value;
return;
}
@@ -691,6 +679,7 @@ static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
{
if (up_flag)
return;
+
diacr = (diacr ? handle_diacr(vc, value) : value);
}
@@ -710,29 +699,28 @@ static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
static void k_dead(struct vc_data *vc, unsigned char value, char up_flag)
{
static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
- value = ret_diacr[value];
- k_deadunicode(vc, value, up_flag);
+
+ k_deadunicode(vc, ret_diacr[value], up_flag);
}
static void k_cons(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
+
set_console(value);
}
static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
{
- unsigned v;
-
if (up_flag)
return;
- v = value;
- if (v < ARRAY_SIZE(func_table)) {
+
+ if ((unsigned)value < ARRAY_SIZE(func_table)) {
if (func_table[value])
puts_queue(vc, func_table[value]);
} else
- printk(KERN_ERR "k_fn called with value=%d\n", value);
+ pr_err("k_fn called with value=%d\n", value);
}
static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
@@ -741,6 +729,7 @@ static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
if (up_flag)
return;
+
applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE));
}
@@ -758,43 +747,45 @@ static void k_pad(struct vc_data *vc, unsigned char value, char up_flag)
return;
}
- if (!vc_kbd_led(kbd, VC_NUMLOCK))
+ if (!vc_kbd_led(kbd, VC_NUMLOCK)) {
+
switch (value) {
- case KVAL(K_PCOMMA):
- case KVAL(K_PDOT):
- k_fn(vc, KVAL(K_REMOVE), 0);
- return;
- case KVAL(K_P0):
- k_fn(vc, KVAL(K_INSERT), 0);
- return;
- case KVAL(K_P1):
- k_fn(vc, KVAL(K_SELECT), 0);
- return;
- case KVAL(K_P2):
- k_cur(vc, KVAL(K_DOWN), 0);
- return;
- case KVAL(K_P3):
- k_fn(vc, KVAL(K_PGDN), 0);
- return;
- case KVAL(K_P4):
- k_cur(vc, KVAL(K_LEFT), 0);
- return;
- case KVAL(K_P6):
- k_cur(vc, KVAL(K_RIGHT), 0);
- return;
- case KVAL(K_P7):
- k_fn(vc, KVAL(K_FIND), 0);
- return;
- case KVAL(K_P8):
- k_cur(vc, KVAL(K_UP), 0);
- return;
- case KVAL(K_P9):
- k_fn(vc, KVAL(K_PGUP), 0);
- return;
- case KVAL(K_P5):
- applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
- return;
+ case KVAL(K_PCOMMA):
+ case KVAL(K_PDOT):
+ k_fn(vc, KVAL(K_REMOVE), 0);
+ return;
+ case KVAL(K_P0):
+ k_fn(vc, KVAL(K_INSERT), 0);
+ return;
+ case KVAL(K_P1):
+ k_fn(vc, KVAL(K_SELECT), 0);
+ return;
+ case KVAL(K_P2):
+ k_cur(vc, KVAL(K_DOWN), 0);
+ return;
+ case KVAL(K_P3):
+ k_fn(vc, KVAL(K_PGDN), 0);
+ return;
+ case KVAL(K_P4):
+ k_cur(vc, KVAL(K_LEFT), 0);
+ return;
+ case KVAL(K_P6):
+ k_cur(vc, KVAL(K_RIGHT), 0);
+ return;
+ case KVAL(K_P7):
+ k_fn(vc, KVAL(K_FIND), 0);
+ return;
+ case KVAL(K_P8):
+ k_cur(vc, KVAL(K_UP), 0);
+ return;
+ case KVAL(K_P9):
+ k_fn(vc, KVAL(K_PGUP), 0);
+ return;
+ case KVAL(K_P5):
+ applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
+ return;
}
+ }
put_queue(vc, pad_chars[value]);
if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF))
@@ -880,6 +871,7 @@ static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag || rep)
return;
+
chg_vc_kbd_lock(kbd, value);
}
@@ -888,6 +880,7 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag)
k_shift(vc, value, up_flag);
if (up_flag || rep)
return;
+
chg_vc_kbd_slock(kbd, value);
/* try to make Alt, oops, AltGr and such work */
if (!key_maps[kbd->lockstate ^ kbd->slockstate]) {
@@ -925,12 +918,12 @@ static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag)
static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
{
- static unsigned pressed,committing;
+ static unsigned pressed, committing;
static unsigned long releasestart;
if (kbd->kbdmode != VC_UNICODE) {
if (!up_flag)
- printk("keyboard mode must be unicode for braille patterns\n");
+ pr_warning("keyboard mode must be unicode for braille patterns\n");
return;
}
@@ -942,32 +935,28 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
if (value > 8)
return;
- if (up_flag) {
- if (brl_timeout) {
- if (!committing ||
- time_after(jiffies,
- releasestart + msecs_to_jiffies(brl_timeout))) {
- committing = pressed;
- releasestart = jiffies;
- }
- pressed &= ~(1 << (value - 1));
- if (!pressed) {
- if (committing) {
- k_brlcommit(vc, committing, 0);
- committing = 0;
- }
- }
- } else {
- if (committing) {
- k_brlcommit(vc, committing, 0);
- committing = 0;
- }
- pressed &= ~(1 << (value - 1));
- }
- } else {
+ if (!up_flag) {
pressed |= 1 << (value - 1);
if (!brl_timeout)
committing = pressed;
+ } else if (brl_timeout) {
+ if (!committing ||
+ time_after(jiffies,
+ releasestart + msecs_to_jiffies(brl_timeout))) {
+ committing = pressed;
+ releasestart = jiffies;
+ }
+ pressed &= ~(1 << (value - 1));
+ if (!pressed && committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ } else {
+ if (committing) {
+ k_brlcommit(vc, committing, 0);
+ committing = 0;
+ }
+ pressed &= ~(1 << (value - 1));
}
}
@@ -988,6 +977,7 @@ void setledstate(struct kbd_struct *kbd, unsigned int led)
kbd->ledmode = LED_SHOW_IOCTL;
} else
kbd->ledmode = LED_SHOW_FLAGS;
+
set_leds();
}
@@ -1075,7 +1065,7 @@ static const unsigned short x86_keycodes[256] =
332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 };
#ifdef CONFIG_SPARC
-static int sparc_l1_a_state = 0;
+static int sparc_l1_a_state;
extern void sun_do_break(void);
#endif
@@ -1085,52 +1075,54 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
int code;
switch (keycode) {
- case KEY_PAUSE:
- put_queue(vc, 0xe1);
- put_queue(vc, 0x1d | up_flag);
- put_queue(vc, 0x45 | up_flag);
- break;
- case KEY_HANGEUL:
- if (!up_flag)
- put_queue(vc, 0xf2);
- break;
+ case KEY_PAUSE:
+ put_queue(vc, 0xe1);
+ put_queue(vc, 0x1d | up_flag);
+ put_queue(vc, 0x45 | up_flag);
+ break;
- case KEY_HANJA:
- if (!up_flag)
- put_queue(vc, 0xf1);
- break;
+ case KEY_HANGEUL:
+ if (!up_flag)
+ put_queue(vc, 0xf2);
+ break;
- case KEY_SYSRQ:
- /*
- * Real AT keyboards (that's what we're trying
- * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
- * pressing PrtSc/SysRq alone, but simply 0x54
- * when pressing Alt+PrtSc/SysRq.
- */
- if (sysrq_alt) {
- put_queue(vc, 0x54 | up_flag);
- } else {
- put_queue(vc, 0xe0);
- put_queue(vc, 0x2a | up_flag);
- put_queue(vc, 0xe0);
- put_queue(vc, 0x37 | up_flag);
- }
- break;
+ case KEY_HANJA:
+ if (!up_flag)
+ put_queue(vc, 0xf1);
+ break;
- default:
- if (keycode > 255)
- return -1;
+ case KEY_SYSRQ:
+ /*
+ * Real AT keyboards (that's what we're trying
+ * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
+ * pressing PrtSc/SysRq alone, but simply 0x54
+ * when pressing Alt+PrtSc/SysRq.
+ */
+ if (test_bit(KEY_LEFTALT, key_down) ||
+ test_bit(KEY_RIGHTALT, key_down)) {
+ put_queue(vc, 0x54 | up_flag);
+ } else {
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x2a | up_flag);
+ put_queue(vc, 0xe0);
+ put_queue(vc, 0x37 | up_flag);
+ }
+ break;
- code = x86_keycodes[keycode];
- if (!code)
- return -1;
+ default:
+ if (keycode > 255)
+ return -1;
- if (code & 0x100)
- put_queue(vc, 0xe0);
- put_queue(vc, (code & 0x7f) | up_flag);
+ code = x86_keycodes[keycode];
+ if (!code)
+ return -1;
- break;
+ if (code & 0x100)
+ put_queue(vc, 0xe0);
+ put_queue(vc, (code & 0x7f) | up_flag);
+
+ break;
}
return 0;
@@ -1153,6 +1145,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char u
static void kbd_rawcode(unsigned char data)
{
struct vc_data *vc = vc_cons[fg_console].d;
+
kbd = kbd_table + vc->vc_num;
if (kbd->kbdmode == VC_RAW)
put_queue(vc, data);
@@ -1162,10 +1155,12 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned short keysym, *key_map;
- unsigned char type, raw_mode;
+ unsigned char type;
+ bool raw_mode;
struct tty_struct *tty;
int shift_final;
struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down };
+ int rc;
tty = vc->vc_tty;
@@ -1176,8 +1171,6 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
kbd = kbd_table + vc->vc_num;
- if (keycode == KEY_LEFTALT || keycode == KEY_RIGHTALT)
- sysrq_alt = down ? keycode : 0;
#ifdef CONFIG_SPARC
if (keycode == KEY_STOP)
sparc_l1_a_state = down;
@@ -1185,29 +1178,16 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
rep = (down == 2);
- if ((raw_mode = (kbd->kbdmode == VC_RAW)) && !hw_raw)
+ raw_mode = (kbd->kbdmode == VC_RAW);
+ if (raw_mode && !hw_raw)
if (emulate_raw(vc, keycode, !down << 7))
if (keycode < BTN_MISC && printk_ratelimit())
- printk(KERN_WARNING "keyboard.c: can't emulate rawmode for keycode %d\n", keycode);
+ pr_warning("can't emulate rawmode for keycode %d\n",
+ keycode);
-#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
- if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) {
- if (!sysrq_down) {
- sysrq_down = down;
- sysrq_alt_use = sysrq_alt;
- }
- return;
- }
- if (sysrq_down && !down && keycode == sysrq_alt_use)
- sysrq_down = 0;
- if (sysrq_down && down && !rep) {
- handle_sysrq(kbd_sysrq_xlate[keycode], tty);
- return;
- }
-#endif
#ifdef CONFIG_SPARC
if (keycode == KEY_A && sparc_l1_a_state) {
- sparc_l1_a_state = 0;
+ sparc_l1_a_state = false;
sun_do_break();
}
#endif
@@ -1229,7 +1209,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
put_queue(vc, (keycode >> 7) | 0x80);
put_queue(vc, keycode | 0x80);
}
- raw_mode = 1;
+ raw_mode = true;
}
if (down)
@@ -1252,29 +1232,32 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
param.ledstate = kbd->ledflagstate;
key_map = key_maps[shift_final];
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYCODE, &param) == NOTIFY_STOP || !key_map) {
- atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNBOUND_KEYCODE, &param);
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYCODE, &param);
+ if (rc == NOTIFY_STOP || !key_map) {
+ atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNBOUND_KEYCODE, &param);
compute_shiftstate();
kbd->slockstate = 0;
return;
}
- if (keycode >= NR_KEYS)
- if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
- keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
- else
- return;
- else
+ if (keycode < NR_KEYS)
keysym = key_map[keycode];
+ else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
+ keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
+ else
+ return;
type = KTYP(keysym);
if (type < 0xf0) {
param.value = keysym;
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNICODE, &param) == NOTIFY_STOP)
- return;
- if (down && !raw_mode)
- to_utf8(vc, keysym);
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_UNICODE, &param);
+ if (rc != NOTIFY_STOP)
+ if (down && !raw_mode)
+ to_utf8(vc, keysym);
return;
}
@@ -1288,9 +1271,11 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
keysym = key_map[keycode];
}
}
- param.value = keysym;
- if (atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYSYM, &param) == NOTIFY_STOP)
+ param.value = keysym;
+ rc = atomic_notifier_call_chain(&keyboard_notifier_list,
+ KBD_KEYSYM, &param);
+ if (rc == NOTIFY_STOP)
return;
if (raw_mode && type != KT_SPEC && type != KT_SHIFT)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 92ab03d28294..cd650ca8c679 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -144,6 +144,7 @@ static int misc_open(struct inode * inode, struct file * file)
old_fops = file->f_op;
file->f_op = new_fops;
if (file->f_op->open) {
+ file->private_data = c;
err=file->f_op->open(inode,file);
if (err) {
fops_put(file->f_op);
diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c
new file mode 100644
index 000000000000..c4161d5e053d
--- /dev/null
+++ b/drivers/char/n_gsm.c
@@ -0,0 +1,2763 @@
+/*
+ * n_gsm.c GSM 0710 tty multiplexor
+ * Copyright (c) 2009/10 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
+ *
+ * TO DO:
+ * Mostly done: ioctls for setting modes/timing
+ * Partly done: hooks so you can pull off frames to non tty devs
+ * Restart DLCI 0 when it closes ?
+ * Test basic encoding
+ * Improve the tx engine
+ * Resolve tx side locking by adding a queue_head and routing
+ * all control traffic via it
+ * General tidy/document
+ * Review the locking/move to refcounts more (mux now moved to an
+ * alloc/free model ready)
+ * Use newest tty open/close port helpers and install hooks
+ * What to do about power functions ?
+ * Termios setting and negotiation
+ * Do we need a 'which mux are you' ioctl to correlate mux and tty sets
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/bitops.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/tty_flip.h>
+#include <linux/tty_driver.h>
+#include <linux/serial.h>
+#include <linux/kfifo.h>
+#include <linux/skbuff.h>
+#include <linux/gsmmux.h>
+
+static int debug;
+module_param(debug, int, 0600);
+
+#define T1 (HZ/10)
+#define T2 (HZ/3)
+#define N2 3
+
+/* Use long timers for testing at low speed with debug on */
+#ifdef DEBUG_TIMING
+#define T1 HZ
+#define T2 (2 * HZ)
+#endif
+
+/* Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte
+ limits so this is plenty */
+#define MAX_MRU 512
+#define MAX_MTU 512
+
+/*
+ * Each block of data we have queued to go out is in the form of
+ * a gsm_msg which holds everything we need in a link layer independant
+ * format
+ */
+
+struct gsm_msg {
+ struct gsm_msg *next;
+ u8 addr; /* DLCI address + flags */
+ u8 ctrl; /* Control byte + flags */
+ unsigned int len; /* Length of data block (can be zero) */
+ unsigned char *data; /* Points into buffer but not at the start */
+ unsigned char buffer[0];
+};
+
+/*
+ * Each active data link has a gsm_dlci structure associated which ties
+ * the link layer to an optional tty (if the tty side is open). To avoid
+ * complexity right now these are only ever freed up when the mux is
+ * shut down.
+ *
+ * At the moment we don't free DLCI objects until the mux is torn down
+ * this avoid object life time issues but might be worth review later.
+ */
+
+struct gsm_dlci {
+ struct gsm_mux *gsm;
+ int addr;
+ int state;
+#define DLCI_CLOSED 0
+#define DLCI_OPENING 1 /* Sending SABM not seen UA */
+#define DLCI_OPEN 2 /* SABM/UA complete */
+#define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */
+
+ /* Link layer */
+ spinlock_t lock; /* Protects the internal state */
+ struct timer_list t1; /* Retransmit timer for SABM and UA */
+ int retries;
+ /* Uplink tty if active */
+ struct tty_port port; /* The tty bound to this DLCI if there is one */
+ struct kfifo *fifo; /* Queue fifo for the DLCI */
+ struct kfifo _fifo; /* For new fifo API porting only */
+ int adaption; /* Adaption layer in use */
+ u32 modem_rx; /* Our incoming virtual modem lines */
+ u32 modem_tx; /* Our outgoing modem lines */
+ int dead; /* Refuse re-open */
+ /* Flow control */
+ int throttled; /* Private copy of throttle state */
+ int constipated; /* Throttle status for outgoing */
+ /* Packetised I/O */
+ struct sk_buff *skb; /* Frame being sent */
+ struct sk_buff_head skb_list; /* Queued frames */
+ /* Data handling callback */
+ void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
+};
+
+/* DLCI 0, 62/63 are special or reseved see gsmtty_open */
+
+#define NUM_DLCI 64
+
+/*
+ * DLCI 0 is used to pass control blocks out of band of the data
+ * flow (and with a higher link priority). One command can be outstanding
+ * at a time and we use this structure to manage them. They are created
+ * and destroyed by the user context, and updated by the receive paths
+ * and timers
+ */
+
+struct gsm_control {
+ u8 cmd; /* Command we are issuing */
+ u8 *data; /* Data for the command in case we retransmit */
+ int len; /* Length of block for retransmission */
+ int done; /* Done flag */
+ int error; /* Error if any */
+};
+
+/*
+ * Each GSM mux we have is represented by this structure. If we are
+ * operating as an ldisc then we use this structure as our ldisc
+ * state. We need to sort out lifetimes and locking with respect
+ * to the gsm mux array. For now we don't free DLCI objects that
+ * have been instantiated until the mux itself is terminated.
+ *
+ * To consider further: tty open versus mux shutdown.
+ */
+
+struct gsm_mux {
+ struct tty_struct *tty; /* The tty our ldisc is bound to */
+ spinlock_t lock;
+
+ /* Events on the GSM channel */
+ wait_queue_head_t event;
+
+ /* Bits for GSM mode decoding */
+
+ /* Framing Layer */
+ unsigned char *buf;
+ int state;
+#define GSM_SEARCH 0
+#define GSM_START 1
+#define GSM_ADDRESS 2
+#define GSM_CONTROL 3
+#define GSM_LEN 4
+#define GSM_DATA 5
+#define GSM_FCS 6
+#define GSM_OVERRUN 7
+ unsigned int len;
+ unsigned int address;
+ unsigned int count;
+ int escape;
+ int encoding;
+ u8 control;
+ u8 fcs;
+ u8 *txframe; /* TX framing buffer */
+
+ /* Methods for the receiver side */
+ void (*receive)(struct gsm_mux *gsm, u8 ch);
+ void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag);
+ /* And transmit side */
+ int (*output)(struct gsm_mux *mux, u8 *data, int len);
+
+ /* Link Layer */
+ unsigned int mru;
+ unsigned int mtu;
+ int initiator; /* Did we initiate connection */
+ int dead; /* Has the mux been shut down */
+ struct gsm_dlci *dlci[NUM_DLCI];
+ int constipated; /* Asked by remote to shut up */
+
+ spinlock_t tx_lock;
+ unsigned int tx_bytes; /* TX data outstanding */
+#define TX_THRESH_HI 8192
+#define TX_THRESH_LO 2048
+ struct gsm_msg *tx_head; /* Pending data packets */
+ struct gsm_msg *tx_tail;
+
+ /* Control messages */
+ struct timer_list t2_timer; /* Retransmit timer for commands */
+ int cretries; /* Command retry counter */
+ struct gsm_control *pending_cmd;/* Our current pending command */
+ spinlock_t control_lock; /* Protects the pending command */
+
+ /* Configuration */
+ int adaption; /* 1 or 2 supported */
+ u8 ftype; /* UI or UIH */
+ int t1, t2; /* Timers in 1/100th of a sec */
+ int n2; /* Retry count */
+
+ /* Statistics (not currently exposed) */
+ unsigned long bad_fcs;
+ unsigned long malformed;
+ unsigned long io_error;
+ unsigned long bad_size;
+ unsigned long unsupported;
+};
+
+
+/*
+ * Mux objects - needed so that we can translate a tty index into the
+ * relevant mux and DLCI.
+ */
+
+#define MAX_MUX 4 /* 256 minors */
+static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */
+static spinlock_t gsm_mux_lock;
+
+/*
+ * This section of the driver logic implements the GSM encodings
+ * both the basic and the 'advanced'. Reliable transport is not
+ * supported.
+ */
+
+#define CR 0x02
+#define EA 0x01
+#define PF 0x10
+
+/* I is special: the rest are ..*/
+#define RR 0x01
+#define UI 0x03
+#define RNR 0x05
+#define REJ 0x09
+#define DM 0x0F
+#define SABM 0x2F
+#define DISC 0x43
+#define UA 0x63
+#define UIH 0xEF
+
+/* Channel commands */
+#define CMD_NSC 0x09
+#define CMD_TEST 0x11
+#define CMD_PSC 0x21
+#define CMD_RLS 0x29
+#define CMD_FCOFF 0x31
+#define CMD_PN 0x41
+#define CMD_RPN 0x49
+#define CMD_FCON 0x51
+#define CMD_CLD 0x61
+#define CMD_SNC 0x69
+#define CMD_MSC 0x71
+
+/* Virtual modem bits */
+#define MDM_FC 0x01
+#define MDM_RTC 0x02
+#define MDM_RTR 0x04
+#define MDM_IC 0x20
+#define MDM_DV 0x40
+
+#define GSM0_SOF 0xF9
+#define GSM1_SOF 0x7E
+#define GSM1_ESCAPE 0x7D
+#define GSM1_ESCAPE_BITS 0x20
+#define XON 0x11
+#define XOFF 0x13
+
+static const struct tty_port_operations gsm_port_ops;
+
+/*
+ * CRC table for GSM 0710
+ */
+
+static const u8 gsm_fcs8[256] = {
+ 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
+ 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
+ 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
+ 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
+ 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
+ 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
+ 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
+ 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
+ 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
+ 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
+ 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
+ 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
+ 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
+ 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
+ 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
+ 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
+ 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
+ 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
+ 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
+ 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
+ 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
+ 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
+ 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
+ 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
+ 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
+ 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
+ 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
+ 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
+ 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
+ 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
+ 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
+ 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
+};
+
+#define INIT_FCS 0xFF
+#define GOOD_FCS 0xCF
+
+/**
+ * gsm_fcs_add - update FCS
+ * @fcs: Current FCS
+ * @c: Next data
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add(u8 fcs, u8 c)
+{
+ return gsm_fcs8[fcs ^ c];
+}
+
+/**
+ * gsm_fcs_add_block - update FCS for a block
+ * @fcs: Current FCS
+ * @c: buffer of data
+ * @len: length of buffer
+ *
+ * Update the FCS to include c. Uses the algorithm in the specification
+ * notes.
+ */
+
+static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
+{
+ while (len--)
+ fcs = gsm_fcs8[fcs ^ *c++];
+ return fcs;
+}
+
+/**
+ * gsm_read_ea - read a byte into an EA
+ * @val: variable holding value
+ * c: byte going into the EA
+ *
+ * Processes one byte of an EA. Updates the passed variable
+ * and returns 1 if the EA is now completely read
+ */
+
+static int gsm_read_ea(unsigned int *val, u8 c)
+{
+ /* Add the next 7 bits into the value */
+ *val <<= 7;
+ *val |= c >> 1;
+ /* Was this the last byte of the EA 1 = yes*/
+ return c & EA;
+}
+
+/**
+ * gsm_encode_modem - encode modem data bits
+ * @dlci: DLCI to encode from
+ *
+ * Returns the correct GSM encoded modem status bits (6 bit field) for
+ * the current status of the DLCI and attached tty object
+ */
+
+static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
+{
+ u8 modembits = 0;
+ /* FC is true flow control not modem bits */
+ if (dlci->throttled)
+ modembits |= MDM_FC;
+ if (dlci->modem_tx & TIOCM_DTR)
+ modembits |= MDM_RTC;
+ if (dlci->modem_tx & TIOCM_RTS)
+ modembits |= MDM_RTR;
+ if (dlci->modem_tx & TIOCM_RI)
+ modembits |= MDM_IC;
+ if (dlci->modem_tx & TIOCM_CD)
+ modembits |= MDM_DV;
+ return modembits;
+}
+
+/**
+ * gsm_print_packet - display a frame for debug
+ * @hdr: header to print before decode
+ * @addr: address EA from the frame
+ * @cr: C/R bit from the frame
+ * @control: control including PF bit
+ * @data: following data bytes
+ * @dlen: length of data
+ *
+ * Displays a packet in human readable format for debugging purposes. The
+ * style is based on amateur radio LAP-B dump display.
+ */
+
+static void gsm_print_packet(const char *hdr, int addr, int cr,
+ u8 control, const u8 *data, int dlen)
+{
+ if (!(debug & 1))
+ return;
+
+ printk(KERN_INFO "%s %d) %c: ", hdr, addr, "RC"[cr]);
+
+ switch (control & ~PF) {
+ case SABM:
+ printk(KERN_CONT "SABM");
+ break;
+ case UA:
+ printk(KERN_CONT "UA");
+ break;
+ case DISC:
+ printk(KERN_CONT "DISC");
+ break;
+ case DM:
+ printk(KERN_CONT "DM");
+ break;
+ case UI:
+ printk(KERN_CONT "UI");
+ break;
+ case UIH:
+ printk(KERN_CONT "UIH");
+ break;
+ default:
+ if (!(control & 0x01)) {
+ printk(KERN_CONT "I N(S)%d N(R)%d",
+ (control & 0x0E) >> 1, (control & 0xE)>> 5);
+ } else switch (control & 0x0F) {
+ case RR:
+ printk("RR(%d)", (control & 0xE0) >> 5);
+ break;
+ case RNR:
+ printk("RNR(%d)", (control & 0xE0) >> 5);
+ break;
+ case REJ:
+ printk("REJ(%d)", (control & 0xE0) >> 5);
+ break;
+ default:
+ printk(KERN_CONT "[%02X]", control);
+ }
+ }
+
+ if (control & PF)
+ printk(KERN_CONT "(P)");
+ else
+ printk(KERN_CONT "(F)");
+
+ if (dlen) {
+ int ct = 0;
+ while (dlen--) {
+ if (ct % 8 == 0)
+ printk(KERN_CONT "\n ");
+ printk(KERN_CONT "%02X ", *data++);
+ ct++;
+ }
+ }
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * Link level transmission side
+ */
+
+/**
+ * gsm_stuff_packet - bytestuff a packet
+ * @ibuf: input
+ * @obuf: output
+ * @len: length of input
+ *
+ * Expand a buffer by bytestuffing it. The worst case size change
+ * is doubling and the caller is responsible for handing out
+ * suitable sized buffers.
+ */
+
+static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
+{
+ int olen = 0;
+ while (len--) {
+ if (*input == GSM1_SOF || *input == GSM1_ESCAPE
+ || *input == XON || *input == XOFF) {
+ *output++ = GSM1_ESCAPE;
+ *output++ = *input++ ^ GSM1_ESCAPE_BITS;
+ olen++;
+ } else
+ *output++ = *input++;
+ olen++;
+ }
+ return olen;
+}
+
+static void hex_packet(const unsigned char *p, int len)
+{
+ int i;
+ for (i = 0; i < len; i++) {
+ if (i && (i % 16) == 0)
+ printk("\n");
+ printk("%02X ", *p++);
+ }
+ printk("\n");
+}
+
+/**
+ * gsm_send - send a control frame
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @cr: command/response bit
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a control frame. These do not go via the
+ * queueing logic as they should be transmitted ahead of data when
+ * they are needed.
+ *
+ * FIXME: Lock versus data TX path
+ */
+
+static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
+{
+ int len;
+ u8 cbuf[10];
+ u8 ibuf[3];
+
+ switch (gsm->encoding) {
+ case 0:
+ cbuf[0] = GSM0_SOF;
+ cbuf[1] = (addr << 2) | (cr << 1) | EA;
+ cbuf[2] = control;
+ cbuf[3] = EA; /* Length of data = 0 */
+ cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
+ cbuf[5] = GSM0_SOF;
+ len = 6;
+ break;
+ case 1:
+ case 2:
+ /* Control frame + packing (but not frame stuffing) in mode 1 */
+ ibuf[0] = (addr << 2) | (cr << 1) | EA;
+ ibuf[1] = control;
+ ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
+ /* Stuffing may double the size worst case */
+ len = gsm_stuff_frame(ibuf, cbuf + 1, 3);
+ /* Now add the SOF markers */
+ cbuf[0] = GSM1_SOF;
+ cbuf[len + 1] = GSM1_SOF;
+ /* FIXME: we can omit the lead one in many cases */
+ len += 2;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ gsm->output(gsm, cbuf, len);
+ gsm_print_packet("-->", addr, cr, control, NULL, 0);
+}
+
+/**
+ * gsm_response - send a control response
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level response frame.
+ */
+
+static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 0, control);
+}
+
+/**
+ * gsm_command - send a control command
+ * @gsm: our GSM mux
+ * @addr: address for control frame
+ * @control: control byte including PF bit
+ *
+ * Format up and transmit a link level command frame.
+ */
+
+static inline void gsm_command(struct gsm_mux *gsm, int addr, int control)
+{
+ gsm_send(gsm, addr, 1, control);
+}
+
+/* Data transmission */
+
+#define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */
+
+/**
+ * gsm_data_alloc - allocate data frame
+ * @gsm: GSM mux
+ * @addr: DLCI address
+ * @len: length excluding header and FCS
+ * @ctrl: control byte
+ *
+ * Allocate a new data buffer for sending frames with data. Space is left
+ * at the front for header bytes but that is treated as an implementation
+ * detail and not for the high level code to use
+ */
+
+static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ u8 ctrl)
+{
+ struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN,
+ GFP_ATOMIC);
+ if (m == NULL)
+ return NULL;
+ m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */
+ m->len = len;
+ m->addr = addr;
+ m->ctrl = ctrl;
+ m->next = NULL;
+ return m;
+}
+
+/**
+ * gsm_data_kick - poke the queue
+ * @gsm: GSM Mux
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any
+ *
+ * FIXME: lock against link layer control transmissions
+ */
+
+static void gsm_data_kick(struct gsm_mux *gsm)
+{
+ struct gsm_msg *msg = gsm->tx_head;
+ int len;
+ int skip_sof = 0;
+
+ /* FIXME: We need to apply this solely to data messages */
+ if (gsm->constipated)
+ return;
+
+ while (gsm->tx_head != NULL) {
+ msg = gsm->tx_head;
+ if (gsm->encoding != 0) {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data,
+ gsm->txframe + 1, msg->len);
+ gsm->txframe[len + 1] = GSM1_SOF;
+ len += 2;
+ } else {
+ gsm->txframe[0] = GSM0_SOF;
+ memcpy(gsm->txframe + 1 , msg->data, msg->len);
+ gsm->txframe[msg->len + 1] = GSM0_SOF;
+ len = msg->len + 2;
+ }
+
+ if (debug & 4) {
+ printk("gsm_data_kick: \n");
+ hex_packet(gsm->txframe, len);
+ }
+
+ if (gsm->output(gsm, gsm->txframe + skip_sof,
+ len - skip_sof) < 0)
+ break;
+ /* FIXME: Can eliminate one SOF in many more cases */
+ gsm->tx_head = msg->next;
+ if (gsm->tx_head == NULL)
+ gsm->tx_tail = NULL;
+ gsm->tx_bytes -= msg->len;
+ kfree(msg);
+ /* For a burst of frames skip the extra SOF within the
+ burst */
+ skip_sof = 1;
+ }
+}
+
+/**
+ * __gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. The Caller must hold
+ * the gsm tx lock.
+ */
+
+static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ u8 *dp = msg->data;
+ u8 *fcs = dp + msg->len;
+
+ /* Fill in the header */
+ if (gsm->encoding == 0) {
+ if (msg->len < 128)
+ *--dp = (msg->len << 1) | EA;
+ else {
+ *--dp = (msg->len >> 6) | EA;
+ *--dp = (msg->len & 127) << 1;
+ }
+ }
+
+ *--dp = msg->ctrl;
+ if (gsm->initiator)
+ *--dp = (msg->addr << 2) | 2 | EA;
+ else
+ *--dp = (msg->addr << 2) | EA;
+ *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
+ /* Ugly protocol layering violation */
+ if (msg->ctrl == UI || msg->ctrl == (UI|PF))
+ *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len);
+ *fcs = 0xFF - *fcs;
+
+ gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl,
+ msg->data, msg->len);
+
+ /* Move the header back and adjust the length, also allow for the FCS
+ now tacked on the end */
+ msg->len += (msg->data - dp) + 1;
+ msg->data = dp;
+
+ /* Add to the actual output queue */
+ if (gsm->tx_tail)
+ gsm->tx_tail->next = msg;
+ else
+ gsm->tx_head = msg;
+ gsm->tx_tail = msg;
+ gsm->tx_bytes += msg->len;
+ gsm_data_kick(gsm);
+}
+
+/**
+ * gsm_data_queue - queue a UI or UIH frame
+ * @dlci: DLCI sending the data
+ * @msg: message queued
+ *
+ * Add data to the transmit queue and try and get stuff moving
+ * out of the mux tty if not already doing so. Take the
+ * the gsm tx lock and dlci lock.
+ */
+
+static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ __gsm_data_queue(dlci, msg);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_dlci_data_output - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles the usual tty
+ * interface which is a byte stream with optional modem data.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int h = dlci->adaption - 1;
+
+ len = kfifo_len(dlci->fifo);
+ if (len == 0)
+ return 0;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu)
+ len = gsm->mtu;
+
+ size = len + h;
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructed with modem bits. Always one byte as we never
+ send inline break data */
+ *dp += gsm_encode_modem(dlci);
+ len--;
+ break;
+ }
+ WARN_ON(kfifo_out_locked(dlci->fifo, dp , len, &dlci->lock) != len);
+ __gsm_data_queue(dlci, msg);
+ /* Bytes of data we used up */
+ return size;
+}
+
+/**
+ * gsm_dlci_data_output_framed - try and push data out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull data from
+ *
+ * Pull data from a DLCI and send it into the transmit queue if there
+ * is data. Keep to the MRU of the mux. This path handles framed data
+ * queued as skbuffs to the DLCI.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ struct gsm_dlci *dlci)
+{
+ struct gsm_msg *msg;
+ u8 *dp;
+ int len, size;
+ int last = 0, first = 0;
+ int overhead = 0;
+
+ /* One byte per frame is used for B/F flags */
+ if (dlci->adaption == 4)
+ overhead = 1;
+
+ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+ dlci->skb = skb_dequeue(&dlci->skb_list);
+ if (dlci->skb == NULL)
+ return 0;
+ first = 1;
+ }
+ len = dlci->skb->len + overhead;
+
+ /* MTU/MRU count only the data bits */
+ if (len > gsm->mtu) {
+ if (dlci->adaption == 3) {
+ /* Over long frame, bin it */
+ kfree_skb(dlci->skb);
+ dlci->skb = NULL;
+ return 0;
+ }
+ len = gsm->mtu;
+ } else
+ last = 1;
+
+ size = len + overhead;
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+ if (msg == NULL)
+ return -ENOMEM;
+ dp = msg->data;
+
+ if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+ /* Flag byte to carry the start/end info */
+ *dp++ = last << 7 | first << 6 | 1; /* EA */
+ len--;
+ }
+ memcpy(dp, skb_pull(dlci->skb, len), len);
+ __gsm_data_queue(dlci, msg);
+ if (last)
+ dlci->skb = NULL;
+ return size;
+}
+
+/**
+ * gsm_dlci_data_sweep - look for data to send
+ * @gsm: the GSM mux
+ *
+ * Sweep the GSM mux channels in priority order looking for ones with
+ * data to send. We could do with optimising this scan a bit. We aim
+ * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit
+ * TX_THRESH_LO we get called again
+ *
+ * FIXME: We should round robin between groups and in theory you can
+ * renegotiate DLCI priorities with optional stuff. Needs optimising.
+ */
+
+static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+{
+ int len;
+ /* Priority ordering: We should do priority with RR of the groups */
+ int i = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ while (i < NUM_DLCI) {
+ struct gsm_dlci *dlci;
+
+ if (gsm->tx_bytes > TX_THRESH_HI)
+ break;
+ dlci = gsm->dlci[i];
+ if (dlci == NULL || dlci->constipated) {
+ i++;
+ continue;
+ }
+ if (dlci->adaption < 3)
+ len = gsm_dlci_data_output(gsm, dlci);
+ else
+ len = gsm_dlci_data_output_framed(gsm, dlci);
+ if (len < 0)
+ return;
+ /* DLCI empty - try the next */
+ if (len == 0)
+ i++;
+ }
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_dlci_data_kick - transmit if possible
+ * @dlci: DLCI to kick
+ *
+ * Transmit data from this DLCI if the queue is empty. We can't rely on
+ * a tty wakeup except when we filled the pipe so we need to fire off
+ * new data ourselves in other cases.
+ */
+
+static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
+ if (dlci->gsm->tx_bytes == 0)
+ gsm_dlci_data_output(dlci->gsm, dlci);
+ else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+ gsm_dlci_data_sweep(dlci->gsm);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+}
+
+/*
+ * Control message processing
+ */
+
+
+/**
+ * gsm_control_reply - send a response frame to a control
+ * @gsm: gsm channel
+ * @cmd: the command to use
+ * @data: data to follow encoded info
+ * @dlen: length of data
+ *
+ * Encode up and queue a UI/UIH frame containing our response.
+ */
+
+static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
+ int dlen)
+{
+ struct gsm_msg *msg;
+ msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+ msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
+ msg->data[1] = (dlen << 1) | EA;
+ memcpy(msg->data + 2, data, dlen);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_process_modem - process received modem status
+ * @tty: virtual tty bound to the DLCI
+ * @dlci: DLCI to affect
+ * @modem: modem bits (full EA)
+ *
+ * Used when a modem control message or line state inline in adaption
+ * layer 2 is processed. Sort out the local modem state and throttles
+ */
+
+static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ u32 modem)
+{
+ int mlines = 0;
+ u8 brk = modem >> 6;
+
+ /* Flow control/ready to communicate */
+ if (modem & MDM_FC) {
+ /* Need to throttle our output on this device */
+ dlci->constipated = 1;
+ }
+ if (modem & MDM_RTC) {
+ mlines |= TIOCM_DSR | TIOCM_DTR;
+ dlci->constipated = 0;
+ gsm_dlci_data_kick(dlci);
+ }
+ /* Map modem bits */
+ if (modem & MDM_RTR)
+ mlines |= TIOCM_RTS | TIOCM_CTS;
+ if (modem & MDM_IC)
+ mlines |= TIOCM_RI;
+ if (modem & MDM_DV)
+ mlines |= TIOCM_CD;
+
+ /* Carrier drop -> hangup */
+ if (tty) {
+ if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
+ if (!(tty->termios->c_cflag & CLOCAL))
+ tty_hangup(tty);
+ if (brk & 0x01)
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ }
+ dlci->modem_rx = mlines;
+}
+
+/**
+ * gsm_control_modem - modem status received
+ * @gsm: GSM channel
+ * @data: data following command
+ * @clen: command length
+ *
+ * We have received a modem status control message. This is used by
+ * the GSM mux protocol to pass virtual modem line status and optionally
+ * to indicate break signals. Unpack it, convert to Linux representation
+ * and if need be stuff a break message down the tty.
+ */
+
+static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ unsigned int addr = 0;
+ unsigned int modem = 0;
+ struct gsm_dlci *dlci;
+ int len = clen;
+ u8 *dp = data;
+ struct tty_struct *tty;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following the EA */
+ len--;
+ if (len <= 0)
+ return;
+
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ dlci = gsm->dlci[addr];
+
+ while (gsm_read_ea(&modem, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ tty = tty_port_tty_get(&dlci->port);
+ gsm_process_modem(tty, dlci, modem);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_MSC, data, clen);
+}
+
+/**
+ * gsm_control_rls - remote line status
+ * @gsm: GSM channel
+ * @data: data bytes
+ * @clen: data length
+ *
+ * The modem sends us a two byte message on the control channel whenever
+ * it wishes to send us an error state from the virtual link. Stuff
+ * this into the uplink tty if present
+ */
+
+static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
+{
+ struct tty_struct *tty;
+ unsigned int addr = 0 ;
+ u8 bits;
+ int len = clen;
+ u8 *dp = data;
+
+ while (gsm_read_ea(&addr, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ /* Must be at least one byte following ea */
+ len--;
+ if (len <= 0)
+ return;
+ addr >>= 1;
+ /* Closed port, or invalid ? */
+ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
+ return;
+ /* No error ? */
+ bits = *dp;
+ if ((bits & 1) == 0)
+ return;
+ /* See if we have an uplink tty */
+ tty = tty_port_tty_get(&gsm->dlci[addr]->port);
+
+ if (tty) {
+ if (bits & 2)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ if (bits & 4)
+ tty_insert_flip_char(tty, 0, TTY_PARITY);
+ if (bits & 8)
+ tty_insert_flip_char(tty, 0, TTY_FRAME);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+ gsm_control_reply(gsm, CMD_RLS, data, clen);
+}
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
+
+/**
+ * gsm_control_message - DLCI 0 control processing
+ * @gsm: our GSM mux
+ * @command: the command EA
+ * @data: data beyond the command/length EAs
+ * @clen: length
+ *
+ * Input processor for control messages from the other end of the link.
+ * Processes the incoming request and queues a response frame or an
+ * NSC response if not supported
+ */
+
+static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ u8 buf[1];
+ switch (command) {
+ case CMD_CLD: {
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ /* Modem wishes to close down */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ }
+ }
+ break;
+ case CMD_TEST:
+ /* Modem wishes to test, reply with the data */
+ gsm_control_reply(gsm, CMD_TEST, data, clen);
+ break;
+ case CMD_FCON:
+ /* Modem wants us to STFU */
+ gsm->constipated = 1;
+ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ break;
+ case CMD_FCOFF:
+ /* Modem can accept data again */
+ gsm->constipated = 0;
+ gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+ /* Kick the link in case it is idling */
+ gsm_data_kick(gsm);
+ break;
+ case CMD_MSC:
+ /* Out of band modem line change indicator for a DLCI */
+ gsm_control_modem(gsm, data, clen);
+ break;
+ case CMD_RLS:
+ /* Out of band error reception for a DLCI */
+ gsm_control_rls(gsm, data, clen);
+ break;
+ case CMD_PSC:
+ /* Modem wishes to enter power saving state */
+ gsm_control_reply(gsm, CMD_PSC, NULL, 0);
+ break;
+ /* Optional unsupported commands */
+ case CMD_PN: /* Parameter negotiation */
+ case CMD_RPN: /* Remote port negotation */
+ case CMD_SNC: /* Service negotation command */
+ default:
+ /* Reply to bad commands with an NSC */
+ buf[0] = command;
+ gsm_control_reply(gsm, CMD_NSC, buf, 1);
+ break;
+ }
+}
+
+/**
+ * gsm_control_response - process a response to our control
+ * @gsm: our GSM mux
+ * @command: the command (response) EA
+ * @data: data beyond the command/length EA
+ * @clen: length
+ *
+ * Process a response to an outstanding command. We only allow a single
+ * control message in flight so this is fairly easy. All the clean up
+ * is done by the caller, we just update the fields, flag it as done
+ * and return
+ */
+
+static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+{
+ struct gsm_control *ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->control_lock, flags);
+
+ ctrl = gsm->pending_cmd;
+ /* Does the reply match our command */
+ command |= 1;
+ if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
+ /* Our command was replied to, kill the retry timer */
+ del_timer(&gsm->t2_timer);
+ gsm->pending_cmd = NULL;
+ /* Rejected by the other end */
+ if (command == CMD_NSC)
+ ctrl->error = -EOPNOTSUPP;
+ ctrl->done = 1;
+ wake_up(&gsm->event);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_transmit - send control packet
+ * @gsm: gsm mux
+ * @ctrl: frame to send
+ *
+ * Send out a pending control command (called under control lock)
+ */
+
+static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
+{
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1,
+ gsm->ftype|PF);
+ if (msg == NULL)
+ return;
+ msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
+ memcpy(msg->data + 1, ctrl->data, ctrl->len);
+ gsm_data_queue(gsm->dlci[0], msg);
+}
+
+/**
+ * gsm_control_retransmit - retransmit a control frame
+ * @data: pointer to our gsm object
+ *
+ * Called off the T2 timer expiry in order to retransmit control frames
+ * that have been lost in the system somewhere. The control_lock protects
+ * us from colliding with another sender or a receive completion event.
+ * In that situation the timer may still occur in a small window but
+ * gsm->pending_cmd will be NULL and we just let the timer expire.
+ */
+
+static void gsm_control_retransmit(unsigned long data)
+{
+ struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_control *ctrl;
+ unsigned long flags;
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ ctrl = gsm->pending_cmd;
+ if (ctrl) {
+ gsm->cretries--;
+ if (gsm->cretries == 0) {
+ gsm->pending_cmd = NULL;
+ ctrl->error = -ETIMEDOUT;
+ ctrl->done = 1;
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ wake_up(&gsm->event);
+ return;
+ }
+ gsm_control_transmit(gsm, ctrl);
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_send - send a control frame on DLCI 0
+ * @gsm: the GSM channel
+ * @command: command to send including CR bit
+ * @data: bytes of data (must be kmalloced)
+ * @len: length of the block to send
+ *
+ * Queue and dispatch a control command. Only one command can be
+ * active at a time. In theory more can be outstanding but the matching
+ * gets really complicated so for now stick to one outstanding.
+ */
+
+static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
+ unsigned int command, u8 *data, int clen)
+{
+ struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
+ GFP_KERNEL);
+ unsigned long flags;
+ if (ctrl == NULL)
+ return NULL;
+retry:
+ wait_event(gsm->event, gsm->pending_cmd == NULL);
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ if (gsm->pending_cmd != NULL) {
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ goto retry;
+ }
+ ctrl->cmd = command;
+ ctrl->data = data;
+ ctrl->len = clen;
+ gsm->pending_cmd = ctrl;
+ gsm->cretries = gsm->n2;
+ mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ gsm_control_transmit(gsm, ctrl);
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ return ctrl;
+}
+
+/**
+ * gsm_control_wait - wait for a control to finish
+ * @gsm: GSM mux
+ * @control: control we are waiting on
+ *
+ * Waits for the control to complete or time out. Frees any used
+ * resources and returns 0 for success, or an error if the remote
+ * rejected or ignored the request.
+ */
+
+static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
+{
+ int err;
+ wait_event(gsm->event, control->done == 1);
+ err = control->error;
+ kfree(control);
+ return err;
+}
+
+
+/*
+ * DLCI level handling: Needs krefs
+ */
+
+/*
+ * State transitions and timers
+ */
+
+/**
+ * gsm_dlci_close - a DLCI has closed
+ * @dlci: DLCI that closed
+ *
+ * Perform processing when moving a DLCI into closed state. If there
+ * is an attached tty this is hung up
+ */
+
+static void gsm_dlci_close(struct gsm_dlci *dlci)
+{
+ del_timer(&dlci->t1);
+ if (debug & 8)
+ printk("DLCI %d goes closed.\n", dlci->addr);
+ dlci->state = DLCI_CLOSED;
+ if (dlci->addr != 0) {
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ kfifo_reset(dlci->fifo);
+ } else
+ dlci->gsm->dead = 1;
+ wake_up(&dlci->gsm->event);
+ /* A DLCI 0 close is a MUX termination so we need to kick that
+ back to userspace somehow */
+}
+
+/**
+ * gsm_dlci_open - a DLCI has opened
+ * @dlci: DLCI that opened
+ *
+ * Perform processing when moving a DLCI into open state.
+ */
+
+static void gsm_dlci_open(struct gsm_dlci *dlci)
+{
+ /* Note that SABM UA .. SABM UA first UA lost can mean that we go
+ open -> open */
+ del_timer(&dlci->t1);
+ /* This will let a tty open continue */
+ dlci->state = DLCI_OPEN;
+ if (debug & 8)
+ printk("DLCI %d goes open.\n", dlci->addr);
+ wake_up(&dlci->gsm->event);
+}
+
+/**
+ * gsm_dlci_t1 - T1 timer expiry
+ * @dlci: DLCI that opened
+ *
+ * The T1 timer handles retransmits of control frames (essentially of
+ * SABM and DISC). We resend the command until the retry count runs out
+ * in which case an opening port goes back to closed and a closing port
+ * is simply put into closed state (any further frames from the other
+ * end will get a DM response)
+ */
+
+static void gsm_dlci_t1(unsigned long data)
+{
+ struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_mux *gsm = dlci->gsm;
+
+ switch (dlci->state) {
+ case DLCI_OPENING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_CLOSING:
+ dlci->retries--;
+ if (dlci->retries) {
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ } else
+ gsm_dlci_close(dlci);
+ break;
+ }
+}
+
+/**
+ * gsm_dlci_begin_open - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence opening a DLCI from the Linux side. We issue SABM messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into open state. Opening is done asynchronously with retry
+ * running off timers and the responses.
+ */
+
+static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_OPENING;
+ gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_begin_close - start channel open procedure
+ * @dlci: DLCI to open
+ *
+ * Commence closing a DLCI from the Linux side. We issue DISC messages
+ * to the modem which should then reply with a UA, at which point we
+ * will move into closed state. Closing is done asynchronously with retry
+ * off timers. We may also receive a DM reply from the other end which
+ * indicates the channel was already closed.
+ */
+
+static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
+ return;
+ dlci->retries = gsm->n2;
+ dlci->state = DLCI_CLOSING;
+ gsm_command(dlci->gsm, dlci->addr, DISC|PF);
+ mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+}
+
+/**
+ * gsm_dlci_data - data arrived
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for a channel
+ * other than the control channel. If the relevant virtual tty is
+ * open we shovel the bits down it, if not we drop them.
+ */
+
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+{
+ /* krefs .. */
+ struct tty_port *port = &dlci->port;
+ struct tty_struct *tty = tty_port_tty_get(port);
+ unsigned int modem = 0;
+
+ if (debug & 16)
+ printk("%d bytes for tty %p\n", len, tty);
+ if (tty) {
+ switch (dlci->adaption) {
+ /* Unsupported types */
+ /* Packetised interruptible data */
+ case 4:
+ break;
+ /* Packetised uininterruptible voice/data */
+ case 3:
+ break;
+ /* Asynchronous serial with line state in each frame */
+ case 2:
+ while (gsm_read_ea(&modem, *data++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ gsm_process_modem(tty, dlci, modem);
+ /* Line state will go via DLCI 0 controls only */
+ case 1:
+ default:
+ tty_insert_flip_string(tty, data, len);
+ tty_flip_buffer_push(tty);
+ }
+ tty_kref_put(tty);
+ }
+}
+
+/**
+ * gsm_dlci_control - data arrived on control channel
+ * @dlci: channel
+ * @data: block of bytes received
+ * @len: length of received block
+ *
+ * A UI or UIH frame has arrived which contains data for DLCI 0 the
+ * control channel. This should contain a command EA followed by
+ * control data bytes. The command EA contains a command/response bit
+ * and we divide up the work accordingly.
+ */
+
+static void gsm_dlci_command(struct gsm_dlci *dlci, u8 *data, int len)
+{
+ /* See what command is involved */
+ unsigned int command = 0;
+ while (len-- > 0) {
+ if (gsm_read_ea(&command, *data++) == 1) {
+ int clen = *data++;
+ len--;
+ /* FIXME: this is properly an EA */
+ clen >>= 1;
+ /* Malformed command ? */
+ if (clen > len)
+ return;
+ if (command & 1)
+ gsm_control_message(dlci->gsm, command,
+ data, clen);
+ else
+ gsm_control_response(dlci->gsm, command,
+ data, clen);
+ return;
+ }
+ }
+}
+
+/*
+ * Allocate/Free DLCI channels
+ */
+
+/**
+ * gsm_dlci_alloc - allocate a DLCI
+ * @gsm: GSM mux
+ * @addr: address of the DLCI
+ *
+ * Allocate and install a new DLCI object into the GSM mux.
+ *
+ * FIXME: review locking races
+ */
+
+static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
+{
+ struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
+ if (dlci == NULL)
+ return NULL;
+ spin_lock_init(&dlci->lock);
+ dlci->fifo = &dlci->_fifo;
+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+ kfree(dlci);
+ return NULL;
+ }
+
+ skb_queue_head_init(&dlci->skb_list);
+ init_timer(&dlci->t1);
+ dlci->t1.function = gsm_dlci_t1;
+ dlci->t1.data = (unsigned long)dlci;
+ tty_port_init(&dlci->port);
+ dlci->port.ops = &gsm_port_ops;
+ dlci->gsm = gsm;
+ dlci->addr = addr;
+ dlci->adaption = gsm->adaption;
+ dlci->state = DLCI_CLOSED;
+ if (addr)
+ dlci->data = gsm_dlci_data;
+ else
+ dlci->data = gsm_dlci_command;
+ gsm->dlci[addr] = dlci;
+ return dlci;
+}
+
+/**
+ * gsm_dlci_free - release DLCI
+ * @dlci: DLCI to destroy
+ *
+ * Free up a DLCI. Currently to keep the lifetime rules sane we only
+ * clean up DLCI objects when the MUX closes rather than as the port
+ * is closed down on both the tty and mux levels.
+ *
+ * Can sleep.
+ */
+static void gsm_dlci_free(struct gsm_dlci *dlci)
+{
+ struct tty_struct *tty = tty_port_tty_get(&dlci->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+ del_timer_sync(&dlci->t1);
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ kfree(dlci);
+}
+
+
+/*
+ * LAPBish link layer logic
+ */
+
+/**
+ * gsm_queue - a GSM frame is ready to process
+ * @gsm: pointer to our gsm mux
+ *
+ * At this point in time a frame has arrived and been demangled from
+ * the line encoding. All the differences between the encodings have
+ * been handled below us and the frame is unpacked into the structures.
+ * The fcs holds the header FCS but any data FCS must be added here.
+ */
+
+static void gsm_queue(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ u8 cr;
+ int address;
+ /* We have to sneak a look at the packet body to do the FCS.
+ A somewhat layering violation in the spec */
+
+ if ((gsm->control & ~PF) == UI)
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
+ if (gsm->fcs != GOOD_FCS) {
+ gsm->bad_fcs++;
+ if (debug & 4)
+ printk("BAD FCS %02x\n", gsm->fcs);
+ return;
+ }
+ address = gsm->address >> 1;
+ if (address >= NUM_DLCI)
+ goto invalid;
+
+ cr = gsm->address & 1; /* C/R bit */
+
+ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
+
+ cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
+ dlci = gsm->dlci[address];
+
+ switch (gsm->control) {
+ case SABM|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, address);
+ if (dlci == NULL)
+ return;
+ if (dlci->dead)
+ gsm_response(gsm, address, DM);
+ else {
+ gsm_response(gsm, address, UA);
+ gsm_dlci_open(dlci);
+ }
+ break;
+ case DISC|PF:
+ if (cr == 0)
+ goto invalid;
+ if (dlci == NULL || dlci->state == DLCI_CLOSED) {
+ gsm_response(gsm, address, DM);
+ return;
+ }
+ /* Real close complete */
+ gsm_response(gsm, address, UA);
+ gsm_dlci_close(dlci);
+ break;
+ case UA:
+ case UA|PF:
+ if (cr == 0 || dlci == NULL)
+ break;
+ switch (dlci->state) {
+ case DLCI_CLOSING:
+ gsm_dlci_close(dlci);
+ break;
+ case DLCI_OPENING:
+ gsm_dlci_open(dlci);
+ break;
+ }
+ break;
+ case DM: /* DM can be valid unsolicited */
+ case DM|PF:
+ if (cr)
+ goto invalid;
+ if (dlci == NULL)
+ return;
+ gsm_dlci_close(dlci);
+ break;
+ case UI:
+ case UI|PF:
+ case UIH:
+ case UIH|PF:
+#if 0
+ if (cr)
+ goto invalid;
+#endif
+ if (dlci == NULL || dlci->state != DLCI_OPEN) {
+ gsm_command(gsm, address, DM|PF);
+ return;
+ }
+ dlci->data(dlci, gsm->buf, gsm->len);
+ break;
+ default:
+ goto invalid;
+ }
+ return;
+invalid:
+ gsm->malformed++;
+ return;
+}
+
+
+/**
+ * gsm0_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in gsm mode 0
+ */
+
+static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ switch (gsm->state) {
+ case GSM_SEARCH: /* SOF marker */
+ if (c == GSM0_SOF) {
+ gsm->state = GSM_ADDRESS;
+ gsm->address = 0;
+ gsm->len = 0;
+ gsm->fcs = INIT_FCS;
+ }
+ break; /* Address EA */
+ case GSM_ADDRESS:
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->state = GSM_LEN;
+ break;
+ case GSM_LEN: /* Length EA */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->len, c)) {
+ if (gsm->len > gsm->mru) {
+ gsm->bad_size++;
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+ gsm->count = 0;
+ gsm->state = GSM_DATA;
+ }
+ break;
+ case GSM_DATA: /* Data */
+ gsm->buf[gsm->count++] = c;
+ if (gsm->count == gsm->len)
+ gsm->state = GSM_FCS;
+ break;
+ case GSM_FCS: /* FCS follows the packet */
+ gsm->fcs = c;
+ gsm_queue(gsm);
+ /* And then back for the next frame */
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+}
+
+/**
+ * gsm0_receive - perform processing for non-transparency
+ * @gsm: gsm data for this ldisc instance
+ * @c: character
+ *
+ * Receive bytes in mode 1 (Advanced option)
+ */
+
+static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+{
+ if (c == GSM1_SOF) {
+ /* EOF is only valid in frame if we have got to the data state
+ and received at least one byte (the FCS) */
+ if (gsm->state == GSM_DATA && gsm->count) {
+ /* Extract the FCS */
+ gsm->count--;
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
+ gsm->len = gsm->count;
+ gsm_queue(gsm);
+ gsm->state = GSM_START;
+ return;
+ }
+ /* Any partial frame was a runt so go back to start */
+ if (gsm->state != GSM_START) {
+ gsm->malformed++;
+ gsm->state = GSM_START;
+ }
+ /* A SOF in GSM_START means we are still reading idling or
+ framing bytes */
+ return;
+ }
+
+ if (c == GSM1_ESCAPE) {
+ gsm->escape = 1;
+ return;
+ }
+
+ /* Only an unescaped SOF gets us out of GSM search */
+ if (gsm->state == GSM_SEARCH)
+ return;
+
+ if (gsm->escape) {
+ c ^= GSM1_ESCAPE_BITS;
+ gsm->escape = 0;
+ }
+ switch (gsm->state) {
+ case GSM_START: /* First byte after SOF */
+ gsm->address = 0;
+ gsm->state = GSM_ADDRESS;
+ gsm->fcs = INIT_FCS;
+ /* Drop through */
+ case GSM_ADDRESS: /* Address continuation */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ if (gsm_read_ea(&gsm->address, c))
+ gsm->state = GSM_CONTROL;
+ break;
+ case GSM_CONTROL: /* Control Byte */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+ gsm->control = c;
+ gsm->count = 0;
+ gsm->state = GSM_DATA;
+ break;
+ case GSM_DATA: /* Data */
+ if (gsm->count > gsm->mru ) { /* Allow one for the FCS */
+ gsm->state = GSM_OVERRUN;
+ gsm->bad_size++;
+ } else
+ gsm->buf[gsm->count++] = c;
+ break;
+ case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
+ break;
+ }
+}
+
+/**
+ * gsm_error - handle tty error
+ * @gsm: ldisc data
+ * @data: byte received (may be invalid)
+ * @flag: error received
+ *
+ * Handle an error in the receipt of data for a frame. Currently we just
+ * go back to hunting for a SOF.
+ *
+ * FIXME: better diagnostics ?
+ */
+
+static void gsm_error(struct gsm_mux *gsm,
+ unsigned char data, unsigned char flag)
+{
+ gsm->state = GSM_SEARCH;
+ gsm->io_error++;
+}
+
+/**
+ * gsm_cleanup_mux - generic GSM protocol cleanup
+ * @gsm: our mux
+ *
+ * Clean up the bits of the mux which are the same for all framing
+ * protocols. Remove the mux from the mux table, stop all the timers
+ * and then shut down each device hanging up the channels as we go.
+ */
+
+void gsm_cleanup_mux(struct gsm_mux *gsm)
+{
+ int i;
+ struct gsm_dlci *dlci = gsm->dlci[0];
+ struct gsm_msg *txq;
+
+ gsm->dead = 1;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == gsm) {
+ gsm_mux[i] = NULL;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ WARN_ON(i == MAX_MUX);
+
+ del_timer_sync(&gsm->t2_timer);
+ /* Now we are sure T2 has stopped */
+ if (dlci) {
+ dlci->dead = 1;
+ gsm_dlci_begin_close(dlci);
+ wait_event_interruptible(gsm->event,
+ dlci->state == DLCI_CLOSED);
+ }
+ /* Free up any link layer users */
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ gsm_dlci_free(gsm->dlci[i]);
+ /* Now wipe the queues */
+ for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+ gsm->tx_head = txq->next;
+ kfree(txq);
+ }
+ gsm->tx_tail = NULL;
+}
+EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+/**
+ * gsm_activate_mux - generic GSM setup
+ * @gsm: our mux
+ *
+ * Set up the bits of the mux which are the same for all framing
+ * protocols. Add the mux to the mux table so it can be opened and
+ * finally kick off connecting to DLCI 0 on the modem.
+ */
+
+int gsm_activate_mux(struct gsm_mux *gsm)
+{
+ struct gsm_dlci *dlci;
+ int i = 0;
+
+ init_timer(&gsm->t2_timer);
+ gsm->t2_timer.function = gsm_control_retransmit;
+ gsm->t2_timer.data = (unsigned long)gsm;
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
+ spin_lock_init(&gsm->tx_lock);
+
+ if (gsm->encoding == 0)
+ gsm->receive = gsm0_receive;
+ else
+ gsm->receive = gsm1_receive;
+ gsm->error = gsm_error;
+
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm_mux[i] == NULL) {
+ gsm_mux[i] = gsm;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX)
+ return -EBUSY;
+
+ dlci = gsm_dlci_alloc(gsm, 0);
+ if (dlci == NULL)
+ return -ENOMEM;
+ gsm->dead = 0; /* Tty opens are now permissible */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gsm_activate_mux);
+
+/**
+ * gsm_free_mux - free up a mux
+ * @mux: mux to free
+ *
+ * Dispose of allocated resources for a dead mux. No refcounting
+ * at present so the mux must be truely dead.
+ */
+void gsm_free_mux(struct gsm_mux *gsm)
+{
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+ kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(gsm_free_mux);
+
+/**
+ * gsm_alloc_mux - allocate a mux
+ *
+ * Creates a new mux ready for activation.
+ */
+
+struct gsm_mux *gsm_alloc_mux(void)
+{
+ struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
+ if (gsm == NULL)
+ return NULL;
+ gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
+ if (gsm->buf == NULL) {
+ kfree(gsm);
+ return NULL;
+ }
+ gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+ if (gsm->txframe == NULL) {
+ kfree(gsm->buf);
+ kfree(gsm);
+ return NULL;
+ }
+ spin_lock_init(&gsm->lock);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+ gsm->n2 = N2;
+ gsm->ftype = UIH;
+ gsm->initiator = 0;
+ gsm->adaption = 1;
+ gsm->encoding = 1;
+ gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
+ gsm->mtu = 64;
+ gsm->dead = 1; /* Avoid early tty opens */
+
+ return gsm;
+}
+EXPORT_SYMBOL_GPL(gsm_alloc_mux);
+
+
+
+
+/**
+ * gsmld_output - write to link
+ * @gsm: our mux
+ * @data: bytes to output
+ * @len: size
+ *
+ * Write a block of data from the GSM mux to the data channel. This
+ * will eventually be serialized from above but at the moment isn't.
+ */
+
+static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
+{
+ if (tty_write_room(gsm->tty) < len) {
+ set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
+ return -ENOSPC;
+ }
+ if (debug & 4) {
+ printk("-->%d bytes out\n", len);
+ hex_packet(data, len);
+ }
+ gsm->tty->ops->write(gsm->tty, data, len);
+ return len;
+}
+
+/**
+ * gsmld_attach_gsm - mode set up
+ * @tty: our tty structure
+ * @gsm: our mux
+ *
+ * Set up the MUX for basic mode and commence connecting to the
+ * modem. Currently called from the line discipline set up but
+ * will need moving to an ioctl path.
+ */
+
+static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ int ret;
+
+ gsm->tty = tty_kref_get(tty);
+ gsm->output = gsmld_output;
+ ret = gsm_activate_mux(gsm);
+ if (ret != 0)
+ tty_kref_put(gsm->tty);
+ return ret;
+}
+
+
+/**
+ * gsmld_detach_gsm - stop doing 0710 mux
+ * @tty: tty atttached to the mux
+ * @gsm: mux
+ *
+ * Shutdown and then clean up the resources used by the line discipline
+ */
+
+static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
+{
+ WARN_ON(tty != gsm->tty);
+ gsm_cleanup_mux(gsm);
+ tty_kref_put(gsm->tty);
+ gsm->tty = NULL;
+}
+
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+ const unsigned char *dp;
+ char *f;
+ int i;
+ char buf[64];
+ char flags;
+
+ if (debug & 4) {
+ printk("Inbytes %dd\n", count);
+ hex_packet(cp, count);
+ }
+
+ for (i = count, dp = cp, f = fp; i; i--, dp++) {
+ flags = *f++;
+ switch (flags) {
+ case TTY_NORMAL:
+ gsm->receive(gsm, *dp);
+ break;
+ case TTY_OVERRUN:
+ case TTY_BREAK:
+ case TTY_PARITY:
+ case TTY_FRAME:
+ gsm->error(gsm, *dp, flags);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+ }
+ /* FASYNC if needed ? */
+ /* If clogged call tty_throttle(tty); */
+}
+
+/**
+ * gsmld_chars_in_buffer - report available bytes
+ * @tty: tty device
+ *
+ * Report the number of characters buffered to be delivered to user
+ * at this instant in time.
+ *
+ * Locking: gsm lock
+ */
+
+static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0;
+}
+
+/**
+ * gsmld_flush_buffer - clean input queue
+ * @tty: terminal device
+ *
+ * Flush the input buffer. Called when the line discipline is
+ * being closed, when the tty layer wants the buffer flushed (eg
+ * at hangup).
+ */
+
+static void gsmld_flush_buffer(struct tty_struct *tty)
+{
+}
+
+/**
+ * gsmld_close - close the ldisc for this tty
+ * @tty: device
+ *
+ * Called from the terminal layer when this line discipline is
+ * being shut down, either because of a close or becsuse of a
+ * discipline change. The function will not be called while other
+ * ldisc methods are in progress.
+ */
+
+static void gsmld_close(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+
+ gsmld_detach_gsm(tty, gsm);
+
+ gsmld_flush_buffer(tty);
+ /* Do other clean up here */
+ gsm_free_mux(gsm);
+}
+
+/**
+ * gsmld_open - open an ldisc
+ * @tty: terminal to open
+ *
+ * Called when this line discipline is being attached to the
+ * terminal device. Can sleep. Called serialized so that no
+ * other events will occur in parallel. No further open will occur
+ * until a close.
+ */
+
+static int gsmld_open(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm;
+
+ if (tty->ops->write == NULL)
+ return -EINVAL;
+
+ /* Attach our ldisc data */
+ gsm = gsm_alloc_mux();
+ if (gsm == NULL)
+ return -ENOMEM;
+
+ tty->disc_data = gsm;
+ tty->receive_room = 65536;
+
+ /* Attach the initial passive connection */
+ gsm->encoding = 1;
+ return gsmld_attach_gsm(tty, gsm);
+}
+
+/**
+ * gsmld_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
+ *
+ * Required for the ptys, serial driver etc. since processes
+ * that attach themselves to the master and rely on ASYNC
+ * IO must be woken up
+ */
+
+static void gsmld_write_wakeup(struct tty_struct *tty)
+{
+ struct gsm_mux *gsm = tty->disc_data;
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ gsm_data_kick(gsm);
+ if (gsm->tx_bytes < TX_THRESH_LO)
+ gsm_dlci_data_sweep(gsm);
+}
+
+/**
+ * gsmld_read - read function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Perform reads for the line discipline. We are guaranteed that the
+ * line discipline will not be closed under us but we may get multiple
+ * parallel readers and must handle this ourselves. We may also get
+ * a hangup. Always called in user context, may sleep.
+ *
+ * This code must be sure never to sleep through a hangup.
+ */
+
+static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * gsmld_write - write function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Called when the owner of the device wants to send a frame
+ * itself (or some other control data). The data is transferred
+ * as-is and must be properly framed and checksummed as appropriate
+ * by userspace. Frames are either sent whole or not at all as this
+ * avoids pain user side.
+ */
+
+static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr)
+{
+ int space = tty_write_room(tty);
+ if (space >= nr)
+ return tty->ops->write(tty, buf, nr);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ return -ENOBUFS;
+}
+
+/**
+ * gsmld_poll - poll method for N_GSM0710
+ * @tty: terminal device
+ * @file: file accessing it
+ * @wait: poll table
+ *
+ * Called when the line discipline is asked to poll() for data or
+ * for special events. This code is not serialized with respect to
+ * other events save open/close.
+ *
+ * This code must be sure never to sleep through a hangup.
+ * Called without the kernel lock held - fine
+ */
+
+static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ poll_wait(file, &tty->read_wait, wait);
+ poll_wait(file, &tty->write_wait, wait);
+ if (tty_hung_up_p(file))
+ mask |= POLLHUP;
+ if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
+ mask |= POLLOUT | POLLWRNORM;
+ if (gsm->dead)
+ mask |= POLLHUP;
+ return mask;
+}
+
+static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
+ struct gsm_config *c)
+{
+ int need_close = 0;
+ int need_restart = 0;
+
+ /* Stuff we don't support yet - UI or I frame transport, windowing */
+ if ((c->adaption !=1 && c->adaption != 2) || c->k)
+ return -EOPNOTSUPP;
+ /* Check the MRU/MTU range looks sane */
+ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
+ return -EINVAL;
+ if (c->n2 < 3)
+ return -EINVAL;
+ if (c->encapsulation > 1) /* Basic, advanced, no I */
+ return -EINVAL;
+ if (c->initiator > 1)
+ return -EINVAL;
+ if (c->i == 0 || c->i > 2) /* UIH and UI only */
+ return -EINVAL;
+ /*
+ * See what is needed for reconfiguration
+ */
+
+ /* Timing fields */
+ if (c->t1 != 0 && c->t1 != gsm->t1)
+ need_restart = 1;
+ if (c->t2 != 0 && c->t2 != gsm->t2)
+ need_restart = 1;
+ if (c->encapsulation != gsm->encoding)
+ need_restart = 1;
+ if (c->adaption != gsm->adaption)
+ need_restart = 1;
+ /* Requires care */
+ if (c->initiator != gsm->initiator)
+ need_close = 1;
+ if (c->mru != gsm->mru)
+ need_restart = 1;
+ if (c->mtu != gsm->mtu)
+ need_restart = 1;
+
+ /*
+ * Close down what is needed, restart and initiate the new
+ * configuration
+ */
+
+ if (need_close || need_restart) {
+ gsm_dlci_begin_close(gsm->dlci[0]);
+ /* This will timeout if the link is down due to N2 expiring */
+ wait_event_interruptible(gsm->event,
+ gsm->dlci[0]->state == DLCI_CLOSED);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ if (need_restart)
+ gsm_cleanup_mux(gsm);
+
+ gsm->initiator = c->initiator;
+ gsm->mru = c->mru;
+ gsm->encoding = c->encapsulation;
+ gsm->adaption = c->adaption;
+
+ if (c->i == 1)
+ gsm->ftype = UIH;
+ else if (c->i == 2)
+ gsm->ftype = UI;
+
+ if (c->t1)
+ gsm->t1 = c->t1;
+ if (c->t2)
+ gsm->t2 = c->t2;
+
+ /* FIXME: We need to separate activation/deactivation from adding
+ and removing from the mux array */
+ if (need_restart)
+ gsm_activate_mux(gsm);
+ if (gsm->initiator && need_close)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ return 0;
+}
+
+static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct gsm_config c;
+ struct gsm_mux *gsm = tty->disc_data;
+
+ switch (cmd) {
+ case GSMIOC_GETCONF:
+ memset(&c, 0, sizeof(c));
+ c.adaption = gsm->adaption;
+ c.encapsulation = gsm->encoding;
+ c.initiator = gsm->initiator;
+ c.t1 = gsm->t1;
+ c.t2 = gsm->t2;
+ c.t3 = 0; /* Not supported */
+ c.n2 = gsm->n2;
+ if (gsm->ftype == UIH)
+ c.i = 1;
+ else
+ c.i = 2;
+ printk("Ftype %d i %d\n", gsm->ftype, c.i);
+ c.mru = gsm->mru;
+ c.mtu = gsm->mtu;
+ c.k = 0;
+ if (copy_to_user((void *)arg, &c, sizeof(c)))
+ return -EFAULT;
+ return 0;
+ case GSMIOC_SETCONF:
+ if (copy_from_user(&c, (void *)arg, sizeof(c)))
+ return -EFAULT;
+ return gsmld_config(tty, gsm, &c);
+ default:
+ return n_tty_ioctl_helper(tty, file, cmd, arg);
+ }
+}
+
+
+/* Line discipline for real tty */
+struct tty_ldisc_ops tty_ldisc_packet = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_gsm",
+ .open = gsmld_open,
+ .close = gsmld_close,
+ .flush_buffer = gsmld_flush_buffer,
+ .chars_in_buffer = gsmld_chars_in_buffer,
+ .read = gsmld_read,
+ .write = gsmld_write,
+ .ioctl = gsmld_ioctl,
+ .poll = gsmld_poll,
+ .receive_buf = gsmld_receive_buf,
+ .write_wakeup = gsmld_write_wakeup
+};
+
+/*
+ * Virtual tty side
+ */
+
+#define TX_SIZE 512
+
+static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+{
+ u8 modembits[5];
+ struct gsm_control *ctrl;
+ int len = 2;
+
+ if (brk)
+ len++;
+
+ modembits[0] = len << 1 | EA; /* Data bytes */
+ modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
+ modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
+ if (brk)
+ modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
+ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+ if (ctrl == NULL)
+ return -ENOMEM;
+ return gsm_control_wait(dlci->gsm, ctrl);
+}
+
+static int gsm_carrier_raised(struct tty_port *port)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ /* Not yet open so no carrier info */
+ if (dlci->state != DLCI_OPEN)
+ return 0;
+ if (debug & 2)
+ return 1;
+ return dlci->modem_rx & TIOCM_CD;
+}
+
+static void gsm_dtr_rts(struct tty_port *port, int onoff)
+{
+ struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ unsigned int modem_tx = dlci->modem_tx;
+ if (onoff)
+ modem_tx |= TIOCM_DTR | TIOCM_RTS;
+ else
+ modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ gsmtty_modem_update(dlci, 0);
+ }
+}
+
+static const struct tty_port_operations gsm_port_ops = {
+ .carrier_raised = gsm_carrier_raised,
+ .dtr_rts = gsm_dtr_rts,
+};
+
+
+static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_mux *gsm;
+ struct gsm_dlci *dlci;
+ struct tty_port *port;
+ unsigned int line = tty->index;
+ unsigned int mux = line >> 6;
+
+ line = line & 0x3F;
+
+ if (mux >= MAX_MUX)
+ return -ENXIO;
+ /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */
+ if (gsm_mux[mux] == NULL)
+ return -EUNATCH;
+ if (line == 0 || line > 61) /* 62/63 reserved */
+ return -ECHRNG;
+ gsm = gsm_mux[mux];
+ if (gsm->dead)
+ return -EL2HLT;
+ dlci = gsm->dlci[line];
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, line);
+ if (dlci == NULL)
+ return -ENOMEM;
+ port = &dlci->port;
+ port->count++;
+ tty->driver_data = dlci;
+ tty_port_tty_set(port, tty);
+
+ dlci->modem_rx = 0;
+ /* We could in theory open and close before we wait - eg if we get
+ a DM straight back. This is ok as that will have caused a hangup */
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ /* Start sending off SABM messages */
+ gsm_dlci_begin_open(dlci);
+ /* And wait for virtual carrier */
+ return tty_port_block_til_ready(port, tty, filp);
+}
+
+static void gsmtty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (dlci == NULL)
+ return;
+ if (tty_port_close_start(&dlci->port, tty, filp) == 0)
+ return;
+ gsm_dlci_begin_close(dlci);
+ tty_port_close_end(&dlci->port, tty);
+ tty_port_tty_set(&dlci->port, NULL);
+}
+
+static void gsmtty_hangup(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ tty_port_hangup(&dlci->port);
+ gsm_dlci_begin_close(dlci);
+}
+
+static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Stuff the bytes into the fifo queue */
+ int sent = kfifo_in_locked(dlci->fifo, buf, len, &dlci->lock);
+ /* Need to kick the channel */
+ gsm_dlci_data_kick(dlci);
+ return sent;
+}
+
+static int gsmtty_write_room(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return TX_SIZE - kfifo_len(dlci->fifo);
+}
+
+static int gsmtty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return kfifo_len(dlci->fifo);
+}
+
+static void gsmtty_flush_buffer(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ /* Caution needed: If we implement reliable transport classes
+ then the data being transmitted can't simply be junked once
+ it has first hit the stack. Until then we can just blow it
+ away */
+ kfifo_reset(dlci->fifo);
+ /* Need to unhook this DLCI from the transmit queue logic */
+}
+
+static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ /* The FIFO handles the queue so the kernel will do the right
+ thing waiting on chars_in_buffer before calling us. No work
+ to do here */
+}
+
+static int gsmtty_tiocmget(struct tty_struct *tty, struct file *filp)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ return dlci->modem_rx;
+}
+
+static int gsmtty_tiocmset(struct tty_struct *tty, struct file *filp,
+ unsigned int set, unsigned int clear)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ unsigned int modem_tx = dlci->modem_tx;
+
+ modem_tx &= clear;
+ modem_tx |= set;
+
+ if (modem_tx != dlci->modem_tx) {
+ dlci->modem_tx = modem_tx;
+ return gsmtty_modem_update(dlci, 0);
+ }
+ return 0;
+}
+
+
+static int gsmtty_ioctl(struct tty_struct *tty, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+ /* For the moment its fixed. In actual fact the speed information
+ for the virtual channel can be propogated in both directions by
+ the RPN control message. This however rapidly gets nasty as we
+ then have to remap modem signals each way according to whether
+ our virtual cable is null modem etc .. */
+ tty_termios_copy_hw(tty->termios, old);
+}
+
+static void gsmtty_throttle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->throttled = 1;
+ /* Send an MSC with DTR cleared */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static void gsmtty_unthrottle(struct tty_struct *tty)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ if (tty->termios->c_cflag & CRTSCTS)
+ dlci->modem_tx |= TIOCM_DTR;
+ dlci->throttled = 0;
+ /* Send an MSC with DTR set */
+ gsmtty_modem_update(dlci, 0);
+}
+
+static int gsmtty_break_ctl(struct tty_struct *tty, int state)
+{
+ struct gsm_dlci *dlci = tty->driver_data;
+ int encode = 0; /* Off */
+
+ if (state == -1) /* "On indefinitely" - we can't encode this
+ properly */
+ encode = 0x0F;
+ else if (state > 0) {
+ encode = state / 200; /* mS to encoding */
+ if (encode > 0x0F)
+ encode = 0x0F; /* Best effort */
+ }
+ return gsmtty_modem_update(dlci, encode);
+}
+
+static struct tty_driver *gsm_tty_driver;
+
+/* Virtual ttys for the demux */
+static const struct tty_operations gsmtty_ops = {
+ .open = gsmtty_open,
+ .close = gsmtty_close,
+ .write = gsmtty_write,
+ .write_room = gsmtty_write_room,
+ .chars_in_buffer = gsmtty_chars_in_buffer,
+ .flush_buffer = gsmtty_flush_buffer,
+ .ioctl = gsmtty_ioctl,
+ .throttle = gsmtty_throttle,
+ .unthrottle = gsmtty_unthrottle,
+ .set_termios = gsmtty_set_termios,
+ .hangup = gsmtty_hangup,
+ .wait_until_sent = gsmtty_wait_until_sent,
+ .tiocmget = gsmtty_tiocmget,
+ .tiocmset = gsmtty_tiocmset,
+ .break_ctl = gsmtty_break_ctl,
+};
+
+
+
+static int __init gsm_init(void)
+{
+ /* Fill in our line protocol discipline, and register it */
+ int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet);
+ if (status != 0) {
+ printk(KERN_ERR "n_gsm: can't register line discipline (err = %d)\n", status);
+ return status;
+ }
+
+ gsm_tty_driver = alloc_tty_driver(256);
+ if (!gsm_tty_driver) {
+ tty_unregister_ldisc(N_GSM0710);
+ printk(KERN_ERR "gsm_init: tty allocation failed.\n");
+ return -EINVAL;
+ }
+ gsm_tty_driver->owner = THIS_MODULE;
+ gsm_tty_driver->driver_name = "gsmtty";
+ gsm_tty_driver->name = "gsmtty";
+ gsm_tty_driver->major = 0; /* Dynamic */
+ gsm_tty_driver->minor_start = 0;
+ gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gsm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+ | TTY_DRIVER_HARDWARE_BREAK;
+ gsm_tty_driver->init_termios = tty_std_termios;
+ /* Fixme */
+ gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
+ tty_set_operations(gsm_tty_driver, &gsmtty_ops);
+
+ spin_lock_init(&gsm_mux_lock);
+
+ if (tty_register_driver(gsm_tty_driver)) {
+ put_tty_driver(gsm_tty_driver);
+ tty_unregister_ldisc(N_GSM0710);
+ printk(KERN_ERR "gsm_init: tty registration failed.\n");
+ return -EBUSY;
+ }
+ printk(KERN_INFO "gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start);
+ return 0;
+}
+
+static void __exit gsm_exit(void)
+{
+ int status = tty_unregister_ldisc(N_GSM0710);
+ if (status != 0)
+ printk(KERN_ERR "n_gsm: can't unregister line discipline (err = %d)\n", status);
+ tty_unregister_driver(gsm_tty_driver);
+ put_tty_driver(gsm_tty_driver);
+ printk(KERN_INFO "gsm_init: unloaded.\n");
+}
+
+module_init(gsm_init);
+module_exit(gsm_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_GSM0710);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 47e8f7b0e4c1..66d2917b003f 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -296,8 +296,8 @@ checksum_err:
return -EIO;
}
-static int nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long nvram_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int i;
@@ -308,6 +308,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ lock_kernel();
spin_lock_irq(&rtc_lock);
for (i = 0; i < NVRAM_BYTES; ++i)
@@ -315,6 +316,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
__nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
+ unlock_kernel();
return 0;
case NVRAM_SETCKS:
@@ -323,9 +325,11 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ lock_kernel();
spin_lock_irq(&rtc_lock);
__nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
+ unlock_kernel();
return 0;
default:
@@ -422,7 +426,7 @@ static const struct file_operations nvram_fops = {
.llseek = nvram_llseek,
.read = nvram_read,
.write = nvram_write,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_ioctl,
.open = nvram_open,
.release = nvram_release,
};
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index f80810901db6..043a1c7b86be 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -94,8 +94,9 @@ static int get_flash_id(void)
return c2;
}
-static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg)
+static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
+ lock_kernel();
switch (cmd) {
case CMD_WRITE_DISABLE:
gbWriteBase64Enable = 0;
@@ -113,8 +114,10 @@ static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cm
default:
gbWriteBase64Enable = 0;
gbWriteEnable = 0;
+ unlock_kernel();
return -EINVAL;
}
+ unlock_kernel();
return 0;
}
@@ -631,7 +634,7 @@ static const struct file_operations flash_fops =
.llseek = flash_llseek,
.read = flash_read,
.write = flash_write,
- .ioctl = flash_ioctl,
+ .unlocked_ioctl = flash_ioctl,
};
static struct miscdevice flash_miscdev =
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 90b199f97bec..e7956acf2ad6 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -106,7 +106,6 @@ static int major; /* major number we get from the kernel */
struct cm4000_dev {
struct pcmcia_device *p_dev;
- dev_node_t node; /* OS node (major,minor) */
unsigned char atr[MAX_ATR];
unsigned char rbuf[512];
@@ -884,8 +883,7 @@ static void monitor_card(unsigned long p)
/* slow down warning, but prompt immediately after insertion */
if (dev->cwarn == 0 || dev->cwarn == 10) {
set_bit(IS_BAD_CARD, &dev->flags);
- printk(KERN_WARNING MODULE_NAME ": device %s: ",
- dev->node.dev_name);
+ dev_warn(&dev->p_dev->dev, MODULE_NAME ": ");
if (test_bit(IS_BAD_CSUM, &dev->flags)) {
DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
"be zero) failed\n", dev->atr_csum);
@@ -1781,11 +1779,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
goto cs_release;
dev = link->priv;
- sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
- dev->node.major = major;
- dev->node.minor = devno;
- dev->node.next = NULL;
- link->dev_node = &dev->node;
return 0;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index a6a70e476bea..c0775c844e08 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -72,7 +72,6 @@ static struct class *cmx_class;
struct reader_dev {
struct pcmcia_device *p_dev;
- dev_node_t node;
wait_queue_head_t devq;
wait_queue_head_t poll_wait;
wait_queue_head_t read_wait;
@@ -568,10 +567,6 @@ static int reader_config(struct pcmcia_device *link, int devno)
}
dev = link->priv;
- sprintf(dev->node.dev_name, DEVICE_NAME "%d", devno);
- dev->node.major = major;
- dev->node.minor = devno;
- dev->node.next = &dev->node;
DEBUGP(2, dev, "device " DEVICE_NAME "%d at 0x%.4x-0x%.4x\n", devno,
link->io.BasePort1, link->io.BasePort1+link->io.NumPorts1);
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index dff24dae1485..63c32e3f23ba 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -195,9 +195,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = ipwireless_interrupt;
-
INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1,
@@ -205,8 +202,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
ipw->is_v2_card, signalled_reboot_callback,
ipw);
- ret = pcmcia_request_irq(link, &link->irq);
-
+ ret = pcmcia_request_irq(link, ipwireless_interrupt);
if (ret != 0)
goto exit;
@@ -217,7 +213,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
(unsigned int) link->io.BasePort1,
(unsigned int) (link->io.BasePort1 +
link->io.NumPorts1 - 1),
- (unsigned int) link->irq.AssignedIRQ);
+ (unsigned int) link->irq);
if (ipw->attr_memory && ipw->common_memory)
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n",
@@ -232,8 +228,7 @@ static int config_ipwireless(struct ipw_dev *ipw)
if (!ipw->network)
goto exit;
- ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network,
- ipw->nodes);
+ ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network);
if (!ipw->tty)
goto exit;
@@ -248,8 +243,6 @@ static int config_ipwireless(struct ipw_dev *ipw)
if (ret != 0)
goto exit;
- link->dev_node = &ipw->nodes[0];
-
return 0;
exit:
@@ -271,8 +264,6 @@ exit:
static void release_ipwireless(struct ipw_dev *ipw)
{
- pcmcia_disable_device(ipw->link);
-
if (ipw->common_memory) {
release_mem_region(ipw->request_common_memory.Base,
ipw->request_common_memory.Size);
@@ -288,7 +279,6 @@ static void release_ipwireless(struct ipw_dev *ipw)
if (ipw->attr_memory)
pcmcia_release_window(ipw->link, ipw->handle_attr_memory);
- /* Break the link with Card Services */
pcmcia_disable_device(ipw->link);
}
@@ -313,9 +303,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
ipw->link = link;
link->priv = ipw;
- /* Link this device into our device list. */
- link->dev_node = &ipw->nodes[0];
-
ipw->hardware = ipwireless_hardware_create();
if (!ipw->hardware) {
kfree(ipw);
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h
index 0e0363af9ab2..96d0ef31b172 100644
--- a/drivers/char/pcmcia/ipwireless/main.h
+++ b/drivers/char/pcmcia/ipwireless/main.h
@@ -54,7 +54,6 @@ struct ipw_dev {
void __iomem *common_memory;
win_req_t request_common_memory;
- dev_node_t nodes[2];
/* Reference to attribute memory, containing CIS data */
void *attribute_memory;
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index 2bb7874a6899..1a2c2c3b068f 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -487,7 +487,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
return tty_mode_ioctl(linux_tty, file, cmd , arg);
}
-static int add_tty(dev_node_t *nodesp, int j,
+static int add_tty(int j,
struct ipw_hardware *hardware,
struct ipw_network *network, int channel_idx,
int secondary_channel_idx, int tty_type)
@@ -510,19 +510,13 @@ static int add_tty(dev_node_t *nodesp, int j,
ipwireless_associate_network_tty(network,
secondary_channel_idx,
ttys[j]);
- if (nodesp != NULL) {
- sprintf(nodesp->dev_name, "ttyIPWp%d", j);
- nodesp->major = ipw_tty_driver->major;
- nodesp->minor = j + ipw_tty_driver->minor_start;
- }
if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j])
report_registering(ttys[j]);
return 0;
}
struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
- struct ipw_network *network,
- dev_node_t *nodes)
+ struct ipw_network *network)
{
int i, j;
@@ -539,26 +533,23 @@ struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
if (allfree) {
j = i;
- if (add_tty(&nodes[0], j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
TTYTYPE_MODEM))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
- if (add_tty(&nodes[1], j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_DIALLER, -1,
TTYTYPE_MONITOR))
return NULL;
j += IPWIRELESS_PCMCIA_MINOR_RANGE;
- if (add_tty(NULL, j, hardware, network,
+ if (add_tty(j, hardware, network,
IPW_CHANNEL_RAS, -1,
TTYTYPE_RAS_RAW))
return NULL;
- nodes[0].next = &nodes[1];
- nodes[1].next = NULL;
-
return ttys[i];
}
}
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h
index b0deb9168b6b..4da6c201f727 100644
--- a/drivers/char/pcmcia/ipwireless/tty.h
+++ b/drivers/char/pcmcia/ipwireless/tty.h
@@ -34,8 +34,7 @@ int ipwireless_tty_init(void);
void ipwireless_tty_release(void);
struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw,
- struct ipw_network *net,
- dev_node_t *nodes);
+ struct ipw_network *net);
void ipwireless_tty_free(struct ipw_tty *tty);
void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
unsigned int length);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index c31a0d913d37..308903ec8bf8 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -220,7 +220,6 @@ typedef struct _mgslpc_info {
/* PCMCIA support */
struct pcmcia_device *p_dev;
- dev_node_t node;
int stop;
/* SPPP/Cisco HDLC device parts */
@@ -552,10 +551,6 @@ static int mgslpc_probe(struct pcmcia_device *link)
/* Initialize the struct pcmcia_device structure */
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
link->conf.Attributes = 0;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -608,9 +603,7 @@ static int mgslpc_config(struct pcmcia_device *link)
link->conf.ConfigIndex = 8;
link->conf.Present = PRESENT_OPTION;
- link->irq.Handler = mgslpc_isr;
-
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, mgslpc_isr);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
@@ -618,17 +611,12 @@ static int mgslpc_config(struct pcmcia_device *link)
goto failed;
info->io_base = link->io.BasePort1;
- info->irq_level = link->irq.AssignedIRQ;
-
- /* add to linked list of devices */
- sprintf(info->node.dev_name, "mgslpc0");
- info->node.major = info->node.minor = 0;
- link->dev_node = &info->node;
+ info->irq_level = link->irq;
- printk(KERN_INFO "%s: index 0x%02x:",
- info->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd37543aa79..02abfddce45a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp)
char *name;
int fl;
- name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
return -ENOMEM;
- sprintf (name, CHRDEV "%x", minor);
-
port = parport_find_number (minor);
if (!port) {
printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 606048b72bcf..85c004a518ee 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
return ps3flash_writeback(ps3flash_dev);
}
-static int ps3flash_fsync(struct file *file, struct dentry *dentry,
- int datasync)
+static int ps3flash_fsync(struct file *file, int datasync)
{
return ps3flash_writeback(ps3flash_dev);
}
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 000000000000..74f00b5ffa36
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
+/*
+ * RAM Oops/Panic logger
+ *
+ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmsg_dump.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+#define RAMOOPS_KERNMSG_HDR "===="
+#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
+
+#define RECORD_SIZE 4096
+
+static ulong mem_address;
+module_param(mem_address, ulong, 0400);
+MODULE_PARM_DESC(mem_address,
+ "start of reserved RAM used to store oops/panic logs");
+
+static ulong mem_size;
+module_param(mem_size, ulong, 0400);
+MODULE_PARM_DESC(mem_size,
+ "size of reserved RAM used to store oops/panic logs");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+static struct ramoops_context {
+ struct kmsg_dumper dump;
+ void *virt_addr;
+ phys_addr_t phys_addr;
+ unsigned long size;
+ int count;
+ int max_count;
+} oops_cxt;
+
+static void ramoops_do_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2)
+{
+ struct ramoops_context *cxt = container_of(dumper,
+ struct ramoops_context, dump);
+ unsigned long s1_start, s2_start;
+ unsigned long l1_cpy, l2_cpy;
+ int res;
+ char *buf;
+ struct timeval timestamp;
+
+ /* Only dump oopses if dump_oops is set */
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
+
+ buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+ memset(buf, '\0', RECORD_SIZE);
+ res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
+ buf += res;
+ do_gettimeofday(&timestamp);
+ res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
+ buf += res;
+
+ l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
+ l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
+
+ s2_start = l2 - l2_cpy;
+ s1_start = l1 - l1_cpy;
+
+ memcpy(buf, s1 + s1_start, l1_cpy);
+ memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
+
+ cxt->count = (cxt->count + 1) % cxt->max_count;
+}
+
+static int __init ramoops_init(void)
+{
+ struct ramoops_context *cxt = &oops_cxt;
+ int err = -EINVAL;
+
+ if (!mem_size) {
+ printk(KERN_ERR "ramoops: invalid size specification");
+ goto fail3;
+ }
+
+ rounddown_pow_of_two(mem_size);
+
+ if (mem_size < RECORD_SIZE) {
+ printk(KERN_ERR "ramoops: size too small");
+ goto fail3;
+ }
+
+ cxt->max_count = mem_size / RECORD_SIZE;
+ cxt->count = 0;
+ cxt->size = mem_size;
+ cxt->phys_addr = mem_address;
+
+ if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
+ printk(KERN_ERR "ramoops: request mem region failed");
+ err = -EINVAL;
+ goto fail3;
+ }
+
+ cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
+ if (!cxt->virt_addr) {
+ printk(KERN_ERR "ramoops: ioremap failed");
+ goto fail2;
+ }
+
+ cxt->dump.dump = ramoops_do_dump;
+ err = kmsg_dump_register(&cxt->dump);
+ if (err) {
+ printk(KERN_ERR "ramoops: registering kmsg dumper failed");
+ goto fail1;
+ }
+
+ return 0;
+
+fail1:
+ iounmap(cxt->virt_addr);
+fail2:
+ release_mem_region(cxt->phys_addr, cxt->size);
+fail3:
+ return err;
+}
+
+static void __exit ramoops_exit(void)
+{
+ struct ramoops_context *cxt = &oops_cxt;
+
+ if (kmsg_dump_unregister(&cxt->dump) < 0)
+ printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
+
+ iounmap(cxt->virt_addr);
+ release_mem_region(cxt->phys_addr, cxt->size);
+}
+
+
+module_init(ramoops_init);
+module_exit(ramoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
+MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2fd3d39995d5..8d85587b6d4f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -257,6 +257,7 @@
#define INPUT_POOL_WORDS 128
#define OUTPUT_POOL_WORDS 32
#define SEC_XFER_SIZE 512
+#define EXTRACT_SIZE 10
/*
* The minimum number of bits of entropy before we wake up a read on
@@ -414,7 +415,7 @@ struct entropy_store {
unsigned add_ptr;
int entropy_count;
int input_rotate;
- __u8 *last_data;
+ __u8 last_data[EXTRACT_SIZE];
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -714,8 +715,6 @@ void add_disk_randomness(struct gendisk *disk)
}
#endif
-#define EXTRACT_SIZE 10
-
/*********************************************************************
*
* Entropy extraction routines
@@ -862,7 +861,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
while (nbytes) {
extract_buf(r, tmp);
- if (r->last_data) {
+ if (fips_enabled) {
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -951,9 +950,6 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
- /* Enable continuous test in fips mode */
- if (fips_enabled)
- r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
}
static int rand_initialize(void)
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 8756ab0daa8b..b38942f6bf31 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -121,13 +121,17 @@ static int raw_release(struct inode *inode, struct file *filp)
/*
* Forward ioctls to the underlying block device.
*/
-static int
-raw_ioctl(struct inode *inode, struct file *filp,
- unsigned int command, unsigned long arg)
+static long
+raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
{
struct block_device *bdev = filp->private_data;
+ int ret;
+
+ lock_kernel();
+ ret = blkdev_ioctl(bdev, 0, command, arg);
+ unlock_kernel();
- return blkdev_ioctl(bdev, 0, command, arg);
+ return ret;
}
static void bind_device(struct raw_config_request *rq)
@@ -141,13 +145,14 @@ static void bind_device(struct raw_config_request *rq)
* Deal with ioctls against the raw-device control interface, to bind
* and unbind other raw devices.
*/
-static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
- unsigned int command, unsigned long arg)
+static long raw_ctl_ioctl(struct file *filp, unsigned int command,
+ unsigned long arg)
{
struct raw_config_request rq;
struct raw_device_data *rawdev;
int err = 0;
+ lock_kernel();
switch (command) {
case RAW_SETBIND:
case RAW_GETBIND:
@@ -240,25 +245,26 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
break;
}
out:
+ unlock_kernel();
return err;
}
static const struct file_operations raw_fops = {
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
- .write = do_sync_write,
- .aio_write = blkdev_aio_write,
- .fsync = blkdev_fsync,
- .open = raw_open,
- .release= raw_release,
- .ioctl = raw_ioctl,
- .owner = THIS_MODULE,
+ .read = do_sync_read,
+ .aio_read = generic_file_aio_read,
+ .write = do_sync_write,
+ .aio_write = blkdev_aio_write,
+ .fsync = blkdev_fsync,
+ .open = raw_open,
+ .release = raw_release,
+ .unlocked_ioctl = raw_ioctl,
+ .owner = THIS_MODULE,
};
static const struct file_operations raw_ctl_fops = {
- .ioctl = raw_ctl_ioctl,
- .open = raw_open,
- .owner = THIS_MODULE,
+ .unlocked_ioctl = raw_ctl_ioctl,
+ .open = raw_open,
+ .owner = THIS_MODULE,
};
static struct cdev raw_cdev;
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 8dfd24721a82..ecbe479c7d68 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -176,23 +176,6 @@ static void config_setup(struct cyclades_port *);
static void show_status(int);
#endif
-#ifdef CONFIG_REMOTE_DEBUG
-static void debug_setup(void);
-void queueDebugChar(int c);
-int getDebugChar(void);
-
-#define DEBUG_PORT 1
-#define DEBUG_LEN 256
-
-typedef struct {
- int in;
- int out;
- unsigned char buf[DEBUG_LEN];
-} debugq;
-
-debugq debugiq;
-#endif
-
/*
* I have my own version of udelay(), as it is needed when initialising
* the chip, before the delay loop has been calibrated. Should probably
@@ -515,11 +498,6 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
/* determine the channel and change to that context */
channel = (u_short) (base_addr[CyLICR] >> 2);
-#ifdef CONFIG_REMOTE_DEBUG
- if (channel == DEBUG_PORT) {
- panic("TxInt on debug port!!!");
- }
-#endif
/* validate the port number (as configured and open) */
if ((channel < 0) || (NR_PORTS <= channel)) {
base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
@@ -627,7 +605,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
char data;
int char_count;
int save_cnt;
- int len;
/* determine the channel and change to that context */
channel = (u_short) (base_addr[CyLICR] >> 2);
@@ -635,14 +612,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
info->last_active = jiffies;
save_cnt = char_count = base_addr[CyRFOC];
-#ifdef CONFIG_REMOTE_DEBUG
- if (channel == DEBUG_PORT) {
- while (char_count--) {
- data = base_addr[CyRDR];
- queueDebugChar(data);
- }
- } else
-#endif
/* if there is nowhere to put the data, discard it */
if (info->tty == 0) {
while (char_count--) {
@@ -1528,7 +1497,6 @@ static int
cy_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
- unsigned long val;
struct cyclades_port *info = tty->driver_data;
int ret_val = 0;
void __user *argp = (void __user *)arg;
@@ -2197,9 +2165,7 @@ static int __init serial167_init(void)
port_num++;
info++;
}
-#ifdef CONFIG_REMOTE_DEBUG
- debug_setup();
-#endif
+
ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0,
"cd2401_errors", cd2401_rxerr_interrupt);
if (ret) {
@@ -2520,193 +2486,4 @@ static int __init serial167_console_init(void)
console_initcall(serial167_console_init);
-#ifdef CONFIG_REMOTE_DEBUG
-void putDebugChar(int c)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- volatile u_char sink;
- u_char ier;
- int port;
-
- local_irq_save(flags);
-
- /* Ensure transmitter is enabled! */
-
- port = DEBUG_PORT;
- base_addr[CyCAR] = (u_char) port;
- while (base_addr[CyCCR])
- ;
- base_addr[CyCCR] = CyENB_XMTR;
-
- ier = base_addr[CyIER];
- base_addr[CyIER] = CyTxMpty;
-
- while (1) {
- if (pcc2chip[PccSCCTICR] & 0x20) {
- /* We have a Tx int. Acknowledge it */
- sink = pcc2chip[PccTPIACKR];
- if ((base_addr[CyLICR] >> 2) == port) {
- base_addr[CyTDR] = c;
- base_addr[CyTEOIR] = 0;
- break;
- } else
- base_addr[CyTEOIR] = CyNOTRANS;
- }
- }
-
- base_addr[CyIER] = ier;
-
- local_irq_restore(flags);
-}
-
-int getDebugChar()
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- volatile u_char sink;
- u_char ier;
- int port;
- int i, c;
-
- i = debugiq.out;
- if (i != debugiq.in) {
- c = debugiq.buf[i];
- if (++i == DEBUG_LEN)
- i = 0;
- debugiq.out = i;
- return c;
- }
- /* OK, nothing in queue, wait in poll loop */
-
- local_irq_save(flags);
-
- /* Ensure receiver is enabled! */
-
- port = DEBUG_PORT;
- base_addr[CyCAR] = (u_char) port;
-#if 0
- while (base_addr[CyCCR])
- ;
- base_addr[CyCCR] = CyENB_RCVR;
-#endif
- ier = base_addr[CyIER];
- base_addr[CyIER] = CyRxData;
-
- while (1) {
- if (pcc2chip[PccSCCRICR] & 0x20) {
- /* We have a Rx int. Acknowledge it */
- sink = pcc2chip[PccRPIACKR];
- if ((base_addr[CyLICR] >> 2) == port) {
- int cnt = base_addr[CyRFOC];
- while (cnt-- > 0) {
- c = base_addr[CyRDR];
- if (c == 0)
- printk
- ("!! debug char is null (cnt=%d) !!",
- cnt);
- else
- queueDebugChar(c);
- }
- base_addr[CyREOIR] = 0;
- i = debugiq.out;
- if (i == debugiq.in)
- panic("Debug input queue empty!");
- c = debugiq.buf[i];
- if (++i == DEBUG_LEN)
- i = 0;
- debugiq.out = i;
- break;
- } else
- base_addr[CyREOIR] = CyNOTRANS;
- }
- }
-
- base_addr[CyIER] = ier;
-
- local_irq_restore(flags);
-
- return (c);
-}
-
-void queueDebugChar(int c)
-{
- int i;
-
- i = debugiq.in;
- debugiq.buf[i] = c;
- if (++i == DEBUG_LEN)
- i = 0;
- if (i != debugiq.out)
- debugiq.in = i;
-}
-
-static void debug_setup()
-{
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int i, cflag;
-
- cflag = B19200;
-
- local_irq_save(flags);
-
- for (i = 0; i < 4; i++) {
- base_addr[CyCAR] = i;
- base_addr[CyLICR] = i << 2;
- }
-
- debugiq.in = debugiq.out = 0;
-
- base_addr[CyCAR] = DEBUG_PORT;
-
- /* baud rate */
- i = cflag & CBAUD;
-
- base_addr[CyIER] = 0;
-
- base_addr[CyCMR] = CyASYNC;
- base_addr[CyLICR] = DEBUG_PORT << 2;
- base_addr[CyLIVR] = 0x5c;
-
- /* tx and rx baud rate */
-
- base_addr[CyTCOR] = baud_co[i];
- base_addr[CyTBPR] = baud_bpr[i];
- base_addr[CyRCOR] = baud_co[i] >> 5;
- base_addr[CyRBPR] = baud_bpr[i];
-
- /* set line characteristics according configuration */
-
- base_addr[CySCHR1] = 0;
- base_addr[CySCHR2] = 0;
- base_addr[CySCRL] = 0;
- base_addr[CySCRH] = 0;
- base_addr[CyCOR1] = Cy_8_BITS | CyPARITY_NONE;
- base_addr[CyCOR2] = 0;
- base_addr[CyCOR3] = Cy_1_STOP;
- base_addr[CyCOR4] = baud_cor4[i];
- base_addr[CyCOR5] = 0;
- base_addr[CyCOR6] = 0;
- base_addr[CyCOR7] = 0;
-
- write_cy_cmd(base_addr, CyINIT_CHAN);
- write_cy_cmd(base_addr, CyENB_RCVR);
-
- base_addr[CyCAR] = DEBUG_PORT; /* !!! Is this needed? */
-
- base_addr[CyRTPRL] = 2;
- base_addr[CyRTPRH] = 0;
-
- base_addr[CyMSVR1] = CyRTS;
- base_addr[CyMSVR2] = CyDTR;
-
- base_addr[CyIER] = CyRxData;
-
- local_irq_restore(flags);
-
-} /* debug_setup */
-
-#endif
-
MODULE_LICENSE("GPL");
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 59de2525d303..5d15630a5830 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -1,7 +1,4 @@
-/* -*- linux-c -*-
- *
- * $Id: sysrq.c,v 1.15 1998/08/23 14:56:41 mj Exp $
- *
+/*
* Linux Magic System Request Key Hacks
*
* (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
@@ -10,8 +7,13 @@
* (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* overhauled to use key registration
* based upon discusions in irc://irc.openprojects.net/#kernelnewbies
+ *
+ * Copyright (c) 2010 Dmitry Torokhov
+ * Input handler conversion
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
@@ -39,33 +41,34 @@
#include <linux/hrtimer.h>
#include <linux/oom.h>
#include <linux/slab.h>
+#include <linux/input.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
/* Whether we react on sysrq keys or just ignore them */
-int __read_mostly __sysrq_enabled = 1;
-
-static int __read_mostly sysrq_always_enabled;
+static int __read_mostly sysrq_enabled = 1;
+static bool __read_mostly sysrq_always_enabled;
-int sysrq_on(void)
+static bool sysrq_on(void)
{
- return __sysrq_enabled || sysrq_always_enabled;
+ return sysrq_enabled || sysrq_always_enabled;
}
/*
* A value of 1 means 'all', other nonzero values are an op mask:
*/
-static inline int sysrq_on_mask(int mask)
+static bool sysrq_on_mask(int mask)
{
- return sysrq_always_enabled || __sysrq_enabled == 1 ||
- (__sysrq_enabled & mask);
+ return sysrq_always_enabled ||
+ sysrq_enabled == 1 ||
+ (sysrq_enabled & mask);
}
static int __init sysrq_always_enabled_setup(char *str)
{
- sysrq_always_enabled = 1;
- printk(KERN_INFO "debug: sysrq always enabled.\n");
+ sysrq_always_enabled = true;
+ pr_info("sysrq always enabled.\n");
return 1;
}
@@ -76,6 +79,7 @@ __setup("sysrq_always_enabled", sysrq_always_enabled_setup);
static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
{
int i;
+
i = key - '0';
console_loglevel = 7;
printk("Loglevel set to %d\n", i);
@@ -101,7 +105,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_SAK_op (*(struct sysrq_key_op *)0)
+#define sysrq_SAK_op (*(struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_VT
@@ -119,7 +123,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_unraw_op (*(struct sysrq_key_op *)0)
+#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
static void sysrq_handle_crash(int key, struct tty_struct *tty)
@@ -195,7 +199,7 @@ static struct sysrq_key_op sysrq_showlocks_op = {
.action_msg = "Show Locks Held",
};
#else
-#define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
+#define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_SMP
@@ -289,7 +293,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
{
- ftrace_dump();
+ ftrace_dump(DUMP_ALL);
}
static struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
@@ -298,7 +302,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
-#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)0)
+#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
#endif
static void sysrq_handle_showmem(int key, struct tty_struct *tty)
@@ -477,6 +481,7 @@ struct sysrq_key_op *__sysrq_get_key_op(int key)
i = sysrq_key_table_key2index(key);
if (i != -1)
op_p = sysrq_key_table[i];
+
return op_p;
}
@@ -488,11 +493,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p;
}
-/*
- * This is the non-locking version of handle_sysrq. It must/can only be called
- * by sysrq key handlers, as they are inside of the lock
- */
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+static void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
{
struct sysrq_key_op *op_p;
int orig_log_level;
@@ -544,10 +545,6 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
-/*
- * This function is called by the keyboard handler when SysRq is pressed
- * and any other keycode arrives.
- */
void handle_sysrq(int key, struct tty_struct *tty)
{
if (sysrq_on())
@@ -555,10 +552,177 @@ void handle_sysrq(int key, struct tty_struct *tty)
}
EXPORT_SYMBOL(handle_sysrq);
+#ifdef CONFIG_INPUT
+
+/* Simple translation table for the SysRq keys */
+static const unsigned char sysrq_xlate[KEY_MAX + 1] =
+ "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */
+ "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */
+ "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */
+ "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */
+ "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */
+ "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */
+ "\r\000/"; /* 0x60 - 0x6f */
+
+static bool sysrq_down;
+static int sysrq_alt_use;
+static int sysrq_alt;
+
+static bool sysrq_filter(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ if (type != EV_KEY)
+ goto out;
+
+ switch (code) {
+
+ case KEY_LEFTALT:
+ case KEY_RIGHTALT:
+ if (value)
+ sysrq_alt = code;
+ else if (sysrq_down && code == sysrq_alt_use)
+ sysrq_down = false;
+ break;
+
+ case KEY_SYSRQ:
+ if (value == 1 && sysrq_alt) {
+ sysrq_down = true;
+ sysrq_alt_use = sysrq_alt;
+ }
+ break;
+
+ default:
+ if (sysrq_down && value && value != 2)
+ __handle_sysrq(sysrq_xlate[code], NULL, 1);
+ break;
+ }
+
+out:
+ return sysrq_down;
+}
+
+static int sysrq_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ sysrq_down = false;
+ sysrq_alt = 0;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "sysrq";
+
+ error = input_register_handle(handle);
+ if (error) {
+ pr_err("Failed to register input sysrq handler, error %d\n",
+ error);
+ goto err_free;
+ }
+
+ error = input_open_device(handle);
+ if (error) {
+ pr_err("Failed to open input device, error %d\n", error);
+ goto err_unregister;
+ }
+
+ return 0;
+
+ err_unregister:
+ input_unregister_handle(handle);
+ err_free:
+ kfree(handle);
+ return error;
+}
+
+static void sysrq_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+/*
+ * We are matching on KEY_LEFTALT insteard of KEY_SYSRQ because not all
+ * keyboards have SysRq ikey predefined and so user may add it to keymap
+ * later, but we expect all such keyboards to have left alt.
+ */
+static const struct input_device_id sysrq_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { BIT_MASK(KEY_LEFTALT) },
+ },
+ { },
+};
+
+static struct input_handler sysrq_handler = {
+ .filter = sysrq_filter,
+ .connect = sysrq_connect,
+ .disconnect = sysrq_disconnect,
+ .name = "sysrq",
+ .id_table = sysrq_ids,
+};
+
+static bool sysrq_handler_registered;
+
+static inline void sysrq_register_handler(void)
+{
+ int error;
+
+ error = input_register_handler(&sysrq_handler);
+ if (error)
+ pr_err("Failed to register input handler, error %d", error);
+ else
+ sysrq_handler_registered = true;
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+ if (sysrq_handler_registered) {
+ input_unregister_handler(&sysrq_handler);
+ sysrq_handler_registered = false;
+ }
+}
+
+#else
+
+static inline void sysrq_register_handler(void)
+{
+}
+
+static inline void sysrq_unregister_handler(void)
+{
+}
+
+#endif /* CONFIG_INPUT */
+
+int sysrq_toggle_support(int enable_mask)
+{
+ bool was_enabled = sysrq_on();
+
+ sysrq_enabled = enable_mask;
+
+ if (was_enabled != sysrq_on()) {
+ if (sysrq_on())
+ sysrq_register_handler();
+ else
+ sysrq_unregister_handler();
+ }
+
+ return 0;
+}
+
static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
struct sysrq_key_op *remove_op_p)
{
-
int retval;
unsigned long flags;
@@ -599,6 +763,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
return -EFAULT;
__handle_sysrq(c, NULL, 0);
}
+
return count;
}
@@ -606,10 +771,28 @@ static const struct file_operations proc_sysrq_trigger_operations = {
.write = write_sysrq_trigger,
};
+static void sysrq_init_procfs(void)
+{
+ if (!proc_create("sysrq-trigger", S_IWUSR, NULL,
+ &proc_sysrq_trigger_operations))
+ pr_err("Failed to register proc interface\n");
+}
+
+#else
+
+static inline void sysrq_init_procfs(void)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
static int __init sysrq_init(void)
{
- proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations);
+ sysrq_init_procfs();
+
+ if (sysrq_on())
+ sysrq_register_handler();
+
return 0;
}
module_init(sysrq_init);
-#endif
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f5fc64f89c5c..4dc338f3d1aa 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -17,14 +17,16 @@ menuconfig TCG_TPM
obtained at: <http://sourceforge.net/projects/trousers>. To
compile this driver as a module, choose M here; the module
will be called tpm. If unsure, say N.
- Note: For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
+ Notes:
+ 1) For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
and CONFIG_PNPACPI.
+ 2) Without ACPI enabled, the BIOS event log won't be accessible,
+ which is required to validate the PCR 0-7 values.
if TCG_TPM
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface"
- depends on PNP
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification say Yes and it will be accessible
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 068c816e6942..05ad4a17a28f 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1068,6 +1068,27 @@ void tpm_remove_hardware(struct device *dev)
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
+#define TPM_ORD_SAVESTATE cpu_to_be32(152)
+#define SAVESTATE_RESULT_SIZE 10
+
+static struct tpm_input_header savestate_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(10),
+ .ordinal = TPM_ORD_SAVESTATE
+};
+
+/* Bug workaround - some TPM's don't flush the most
+ * recently changed pcr on suspend, so force the flush
+ * with an extend to the selected _unused_ non-volatile pcr.
+ */
+static int tpm_suspend_pcr;
+static int __init tpm_suspend_setup(char *str)
+{
+ get_option(&str, &tpm_suspend_pcr);
+ return 1;
+}
+__setup("tpm_suspend_pcr=", tpm_suspend_setup);
+
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.
@@ -1075,17 +1096,29 @@ EXPORT_SYMBOL_GPL(tpm_remove_hardware);
int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
- u8 savestate[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 10, /* blob length (in bytes) */
- 0, 0, 0, 152 /* TPM_ORD_SaveState */
- };
+ struct tpm_cmd_t cmd;
+ int rc;
+
+ u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
if (chip == NULL)
return -ENODEV;
- tpm_transmit(chip, savestate, sizeof(savestate));
- return 0;
+ /* for buggy tpm, flush pcrs with extend to selected dummy */
+ if (tpm_suspend_pcr) {
+ cmd.header.in = pcrextend_header;
+ cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
+ memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
+ TPM_DIGEST_SIZE);
+ rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ "extending dummy pcr before suspend");
+ }
+
+ /* now do the actual savestate */
+ cmd.header.in = savestate_header;
+ rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
+ "sending savestate before suspend");
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 94345994f8a6..24314a9cffe8 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -598,7 +598,7 @@ out_err:
tpm_remove_hardware(chip->dev);
return rc;
}
-
+#ifdef CONFIG_PNP
static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
{
@@ -663,7 +663,7 @@ static struct pnp_driver tis_pnp_driver = {
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
-
+#endif
static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
{
return tpm_pm_suspend(&dev->dev, msg);
@@ -690,21 +690,21 @@ MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
static int __init init_tis(void)
{
int rc;
+#ifdef CONFIG_PNP
+ if (!force)
+ return pnp_register_driver(&tis_pnp_driver);
+#endif
- if (force) {
- rc = platform_driver_register(&tis_drv);
- if (rc < 0)
- return rc;
- if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
- return PTR_ERR(pdev);
- if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
- platform_device_unregister(pdev);
- platform_driver_unregister(&tis_drv);
- }
+ rc = platform_driver_register(&tis_drv);
+ if (rc < 0)
return rc;
+ if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
+ return PTR_ERR(pdev);
+ if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&tis_drv);
}
-
- return pnp_register_driver(&tis_pnp_driver);
+ return rc;
}
static void __exit cleanup_tis(void)
@@ -728,12 +728,14 @@ static void __exit cleanup_tis(void)
list_del(&i->list);
}
spin_unlock(&tis_lock);
-
- if (force) {
- platform_device_unregister(pdev);
- platform_driver_unregister(&tis_drv);
- } else
+#ifdef CONFIG_PNP
+ if (!force) {
pnp_unregister_driver(&tis_pnp_driver);
+ return;
+ }
+#endif
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&tis_drv);
}
module_init(init_tis);
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 7ee52164d474..cc1e9850d655 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
* @size: size
*
* Queue a series of bytes to the tty buffering. All the characters
- * passed are marked as without error. Returns the number added.
+ * passed are marked with the supplied flag. Returns the number added.
*
* Locking: Called functions may take tty->buf.lock
*/
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 6da962c9b21c..d71f0fc34b46 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1875,6 +1875,7 @@ got_driver:
*/
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
+ unlock_kernel();
goto retry_open;
}
unlock_kernel();
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 1144a04cda6e..42f7fa442ff8 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -866,7 +866,7 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
int i = vdev->unit_address;
int j;
- struct device_node *node = vdev->dev.archdata.of_node;
+ struct device_node *node = vdev->dev.of_node;
if (i >= VIOTAPE_MAX_TAPE)
return -ENODEV;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 196428c2287a..8c99bf1b5e9f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -33,35 +33,6 @@
#include <linux/workqueue.h>
#include "hvc_console.h"
-/* Moved here from .h file in order to disable MULTIPORT. */
-#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
-
-struct virtio_console_multiport_conf {
- struct virtio_console_config config;
- /* max. number of ports this device can hold */
- __u32 max_nr_ports;
- /* number of ports added so far */
- __u32 nr_ports;
-} __attribute__((packed));
-
-/*
- * A message that's passed between the Host and the Guest for a
- * particular port.
- */
-struct virtio_console_control {
- __u32 id; /* Port number */
- __u16 event; /* The kind of control event (see below) */
- __u16 value; /* Extra information for the key */
-};
-
-/* Some events for control messages */
-#define VIRTIO_CONSOLE_PORT_READY 0
-#define VIRTIO_CONSOLE_CONSOLE_PORT 1
-#define VIRTIO_CONSOLE_RESIZE 2
-#define VIRTIO_CONSOLE_PORT_OPEN 3
-#define VIRTIO_CONSOLE_PORT_NAME 4
-#define VIRTIO_CONSOLE_PORT_REMOVE 5
-
/*
* This is a global struct for storing common data for all the devices
* this driver handles.
@@ -107,6 +78,9 @@ struct console {
/* The hvc device associated with this console port */
struct hvc_struct *hvc;
+ /* The size of the console */
+ struct winsize ws;
+
/*
* This number identifies the number that we used to register
* with hvc in hvc_instantiate() and hvc_alloc(); this is the
@@ -139,7 +113,6 @@ struct ports_device {
* notification
*/
struct work_struct control_work;
- struct work_struct config_work;
struct list_head ports;
@@ -150,7 +123,7 @@ struct ports_device {
spinlock_t cvq_lock;
/* The current config space is stored here */
- struct virtio_console_multiport_conf config;
+ struct virtio_console_config config;
/* The virtio device we're associated with */
struct virtio_device *vdev;
@@ -189,6 +162,9 @@ struct port {
*/
spinlock_t inbuf_lock;
+ /* Protect the operations on the out_vq. */
+ spinlock_t outvq_lock;
+
/* The IO vqs for this port */
struct virtqueue *in_vq, *out_vq;
@@ -214,6 +190,8 @@ struct port {
/* The 'id' to identify the port with the Host */
u32 id;
+ bool outvq_full;
+
/* Is the host device open */
bool host_connected;
@@ -328,7 +306,7 @@ static void *get_inbuf(struct port *port)
unsigned int len;
vq = port->in_vq;
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
if (buf) {
buf->len = len;
buf->offset = 0;
@@ -349,8 +327,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
- vq->vq_ops->kick(vq);
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ virtqueue_kick(vq);
return ret;
}
@@ -366,7 +344,7 @@ static void discard_port_data(struct port *port)
if (port->inbuf)
buf = port->inbuf;
else
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
ret = 0;
while (buf) {
@@ -374,7 +352,7 @@ static void discard_port_data(struct port *port)
ret++;
free_buf(buf);
}
- buf = vq->vq_ops->get_buf(vq, &len);
+ buf = virtqueue_get_buf(vq, &len);
}
port->inbuf = NULL;
if (ret)
@@ -403,57 +381,96 @@ out:
return ret;
}
-static ssize_t send_control_msg(struct port *port, unsigned int event,
- unsigned int value)
+static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ unsigned int event, unsigned int value)
{
struct scatterlist sg[1];
struct virtio_console_control cpkt;
struct virtqueue *vq;
unsigned int len;
- if (!use_multiport(port->portdev))
+ if (!use_multiport(portdev))
return 0;
- cpkt.id = port->id;
+ cpkt.id = port_id;
cpkt.event = event;
cpkt.value = value;
- vq = port->portdev->c_ovq;
+ vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
- vq->vq_ops->kick(vq);
- while (!vq->vq_ops->get_buf(vq, &len))
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
cpu_relax();
}
return 0;
}
-static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
+{
+ return __send_control_msg(port->portdev, port->id, event, value);
+}
+
+/* Callers must take the port->outvq_lock */
+static void reclaim_consumed_buffers(struct port *port)
+{
+ void *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
+ kfree(buf);
+ port->outvq_full = false;
+ }
+}
+
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
+ bool nonblock)
{
struct scatterlist sg[1];
struct virtqueue *out_vq;
ssize_t ret;
+ unsigned long flags;
unsigned int len;
out_vq = port->out_vq;
+ spin_lock_irqsave(&port->outvq_lock, flags);
+
+ reclaim_consumed_buffers(port);
+
sg_init_one(sg, in_buf, in_count);
- ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
/* Tell Host to go! */
- out_vq->vq_ops->kick(out_vq);
+ virtqueue_kick(out_vq);
if (ret < 0) {
in_count = 0;
- goto fail;
+ goto done;
}
- /* Wait till the host acknowledges it pushed out the data we sent. */
- while (!out_vq->vq_ops->get_buf(out_vq, &len))
+ if (ret == 0)
+ port->outvq_full = true;
+
+ if (nonblock)
+ goto done;
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. This is done for ports in blocking mode or for data
+ * from the hvc_console; the tty operations are performed with
+ * spinlocks held so we can't sleep here.
+ */
+ while (!virtqueue_get_buf(out_vq, &len))
cpu_relax();
-fail:
- /* We're expected to return the amount of data we wrote */
+done:
+ spin_unlock_irqrestore(&port->outvq_lock, flags);
+ /*
+ * We're expected to return the amount of data we wrote -- all
+ * of it
+ */
return in_count;
}
@@ -503,9 +520,28 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
}
/* The condition that must be true for polling to end */
-static bool wait_is_over(struct port *port)
+static bool will_read_block(struct port *port)
+{
+ return !port_has_data(port) && port->host_connected;
+}
+
+static bool will_write_block(struct port *port)
{
- return port_has_data(port) || !port->host_connected;
+ bool ret;
+
+ if (!port->host_connected)
+ return true;
+
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * Check if the Host has consumed any buffers since we last
+ * sent data (this is only applicable for nonblocking ports).
+ */
+ reclaim_consumed_buffers(port);
+ ret = port->outvq_full;
+ spin_unlock_irq(&port->outvq_lock);
+
+ return ret;
}
static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
@@ -528,7 +564,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
return -EAGAIN;
ret = wait_event_interruptible(port->waitqueue,
- wait_is_over(port));
+ !will_read_block(port));
if (ret < 0)
return ret;
}
@@ -554,9 +590,22 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
struct port *port;
char *buf;
ssize_t ret;
+ bool nonblock;
port = filp->private_data;
+ nonblock = filp->f_flags & O_NONBLOCK;
+
+ if (will_write_block(port)) {
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ !will_write_block(port));
+ if (ret < 0)
+ return ret;
+ }
+
count = min((size_t)(32 * 1024), count);
buf = kmalloc(count, GFP_KERNEL);
@@ -569,9 +618,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
goto free_buf;
}
- ret = send_buf(port, buf, count);
+ ret = send_buf(port, buf, count, nonblock);
+
+ if (nonblock && ret > 0)
+ goto out;
+
free_buf:
kfree(buf);
+out:
return ret;
}
@@ -586,7 +640,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
ret = 0;
if (port->inbuf)
ret |= POLLIN | POLLRDNORM;
- if (port->host_connected)
+ if (!will_write_block(port))
ret |= POLLOUT;
if (!port->host_connected)
ret |= POLLHUP;
@@ -610,6 +664,10 @@ static int port_fops_release(struct inode *inode, struct file *filp)
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
return 0;
}
@@ -638,6 +696,15 @@ static int port_fops_open(struct inode *inode, struct file *filp)
port->guest_connected = true;
spin_unlock_irq(&port->inbuf_lock);
+ spin_lock_irq(&port->outvq_lock);
+ /*
+ * There might be a chance that we missed reclaiming a few
+ * buffers in the window of the port getting previously closed
+ * and opening now.
+ */
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
+
/* Notify host of port being opened */
send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -676,9 +743,9 @@ static int put_chars(u32 vtermno, const char *buf, int count)
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
- return send_buf(port, (void *)buf, count);
+ return send_buf(port, (void *)buf, count, false);
}
/*
@@ -692,9 +759,13 @@ static int get_chars(u32 vtermno, char *buf, int count)
{
struct port *port;
+ /* If we've not set up the port yet, we have no input to give. */
+ if (unlikely(early_put_chars))
+ return 0;
+
port = find_port_by_vtermno(vtermno);
if (!port)
- return 0;
+ return -EPIPE;
/* If we don't have an input queue yet, we can't get input. */
BUG_ON(!port->in_vq);
@@ -705,22 +776,14 @@ static int get_chars(u32 vtermno, char *buf, int count)
static void resize_console(struct port *port)
{
struct virtio_device *vdev;
- struct winsize ws;
/* The port could have been hot-unplugged */
- if (!port)
+ if (!port || !is_console_port(port))
return;
vdev = port->portdev->vdev;
- if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, cols),
- &ws.ws_col, sizeof(u16));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_config, rows),
- &ws.ws_row, sizeof(u16));
- hvc_resize(port->cons.hvc, ws);
- }
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
+ hvc_resize(port->cons.hvc, port->cons.ws);
}
/* We set the configuration at this point, since we now have a tty */
@@ -804,6 +867,13 @@ int init_port_console(struct port *port)
spin_unlock_irq(&pdrvdata_lock);
port->guest_connected = true;
+ /*
+ * Start using the new console output if this is the first
+ * console to come up.
+ */
+ if (early_put_chars)
+ early_put_chars = NULL;
+
/* Notify host of port being opened */
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -859,6 +929,8 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"host_connected: %d\n", port->host_connected);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "outvq_full: %d\n", port->outvq_full);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
"is_console: %s\n",
is_console_port(port) ? "yes" : "no");
out_offset += snprintf(buf + out_offset, out_count - out_offset,
@@ -875,6 +947,153 @@ static const struct file_operations port_debugfs_ops = {
.read = debugfs_read,
};
+static void set_console_size(struct port *port, u16 rows, u16 cols)
+{
+ if (!port || !is_console_port(port))
+ return;
+
+ port->cons.ws.ws_row = rows;
+ port->cons.ws.ws_col = cols;
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ unsigned int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(PAGE_SIZE);
+ if (!buf)
+ break;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf);
+ break;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+ } while (ret > 0);
+
+ return nr_added_bufs;
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ struct port_buffer *buf;
+ dev_t devt;
+ unsigned int nr_added_bufs;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+
+ port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+
+ port->host_connected = port->guest_connected = false;
+
+ port->outvq_full = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ cdev_init(&port->cdev, &port_fops);
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(&port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_port;
+ }
+ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->drv_index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* Fill the in_vq with buffers so the host can send us data. */
+ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (!nr_added_bufs) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ /*
+ * If we're not using multiport support, this has to be a console port
+ */
+ if (!use_multiport(port->portdev)) {
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ if (pdrvdata.debugfs_dir) {
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ sprintf(debugfs_name, "vport%up%u",
+ port->portdev->drv_index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port,
+ &port_debugfs_ops);
+ }
+ return 0;
+
+free_inbufs:
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+free_device:
+ device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+ cdev_del(&port->cdev);
+free_port:
+ kfree(port);
+fail:
+ /* The host might want to notify management sw about port add failure */
+ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
+ return err;
+}
+
/* Remove all port-specific data. */
static int remove_port(struct port *port)
{
@@ -888,7 +1107,18 @@ static int remove_port(struct port *port)
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
+#if 0
+ /*
+ * hvc_remove() not called as removing one hvc port
+ * results in other hvc ports getting frozen.
+ *
+ * Once this is resolved in hvc, this functionality
+ * will be enabled. Till that is done, the -EPIPE
+ * return from get_chars() above will help
+ * hvc_console.c to clean up on ports we remove here.
+ */
hvc_remove(port->cons.hvc);
+#endif
}
if (port->guest_connected)
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
@@ -900,8 +1130,10 @@ static int remove_port(struct port *port)
/* Remove unused data this port might have received. */
discard_port_data(port);
+ reclaim_consumed_buffers(port);
+
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
free_buf(buf);
kfree(port->name);
@@ -924,7 +1156,7 @@ static void handle_control_message(struct ports_device *portdev,
cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
port = find_port_by_id(portdev, cpkt->id);
- if (!port) {
+ if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
/* No valid header at start of buffer. Drop it. */
dev_dbg(&portdev->vdev->dev,
"Invalid index %u in control packet\n", cpkt->id);
@@ -932,6 +1164,24 @@ static void handle_control_message(struct ports_device *portdev,
}
switch (cpkt->event) {
+ case VIRTIO_CONSOLE_PORT_ADD:
+ if (port) {
+ dev_dbg(&portdev->vdev->dev,
+ "Port %u already added\n", port->id);
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ break;
+ }
+ if (cpkt->id >= portdev->config.max_nr_ports) {
+ dev_warn(&portdev->vdev->dev,
+ "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
+ cpkt->id, portdev->config.max_nr_ports - 1);
+ break;
+ }
+ add_port(portdev, cpkt->id);
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ remove_port(port);
+ break;
case VIRTIO_CONSOLE_CONSOLE_PORT:
if (!cpkt->value)
break;
@@ -944,15 +1194,34 @@ static void handle_control_message(struct ports_device *portdev,
* have to notify the host first.
*/
break;
- case VIRTIO_CONSOLE_RESIZE:
+ case VIRTIO_CONSOLE_RESIZE: {
+ struct {
+ __u16 rows;
+ __u16 cols;
+ } size;
+
if (!is_console_port(port))
break;
+
+ memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
+ sizeof(size));
+ set_console_size(port, size.rows, size.cols);
+
port->cons.hvc->irq_requested = 1;
resize_console(port);
break;
+ }
case VIRTIO_CONSOLE_PORT_OPEN:
port->host_connected = cpkt->value;
wake_up_interruptible(&port->waitqueue);
+ /*
+ * If the host port got closed and the host had any
+ * unconsumed buffers, we'll be able to reclaim them
+ * now.
+ */
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
break;
case VIRTIO_CONSOLE_PORT_NAME:
/*
@@ -990,32 +1259,6 @@ static void handle_control_message(struct ports_device *portdev,
kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
}
break;
- case VIRTIO_CONSOLE_PORT_REMOVE:
- /*
- * Hot unplug the port. We don't decrement nr_ports
- * since we don't want to deal with extra complexities
- * of using the lowest-available port id: We can just
- * pick up the nr_ports number as the id and not have
- * userspace send it to us. This helps us in two
- * ways:
- *
- * - We don't need to have a 'port_id' field in the
- * config space when a port is hot-added. This is a
- * good thing as we might queue up multiple hotplug
- * requests issued in our workqueue.
- *
- * - Another way to deal with this would have been to
- * use a bitmap of the active ports and select the
- * lowest non-active port from that map. That
- * bloats the already tight config space and we
- * would end up artificially limiting the
- * max. number of ports to sizeof(bitmap). Right
- * now we can support 2^32 ports (as the port id is
- * stored in a u32 type).
- *
- */
- remove_port(port);
- break;
}
}
@@ -1030,7 +1273,7 @@ static void control_work_handler(struct work_struct *work)
vq = portdev->c_ivq;
spin_lock(&portdev->cvq_lock);
- while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+ while ((buf = virtqueue_get_buf(vq, &len))) {
spin_unlock(&portdev->cvq_lock);
buf->len = len;
@@ -1092,204 +1335,29 @@ static void config_intr(struct virtio_device *vdev)
struct ports_device *portdev;
portdev = vdev->priv;
- if (use_multiport(portdev)) {
- /* Handle port hot-add */
- schedule_work(&portdev->config_work);
- }
- /*
- * We'll use this way of resizing only for legacy support.
- * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use
- * control messages to indicate console size changes so that
- * it can be done per-port
- */
- resize_console(find_port_by_id(portdev, 0));
-}
-
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
-{
- struct port_buffer *buf;
- unsigned int nr_added_bufs;
- int ret;
-
- nr_added_bufs = 0;
- do {
- buf = alloc_buf(PAGE_SIZE);
- if (!buf)
- break;
-
- spin_lock_irq(lock);
- ret = add_inbuf(vq, buf);
- if (ret < 0) {
- spin_unlock_irq(lock);
- free_buf(buf);
- break;
- }
- nr_added_bufs++;
- spin_unlock_irq(lock);
- } while (ret > 0);
-
- return nr_added_bufs;
-}
-
-static int add_port(struct ports_device *portdev, u32 id)
-{
- char debugfs_name[16];
- struct port *port;
- struct port_buffer *buf;
- dev_t devt;
- unsigned int nr_added_bufs;
- int err;
-
- port = kmalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto fail;
- }
-
- port->portdev = portdev;
- port->id = id;
-
- port->name = NULL;
- port->inbuf = NULL;
- port->cons.hvc = NULL;
-
- port->host_connected = port->guest_connected = false;
-
- port->in_vq = portdev->in_vqs[port->id];
- port->out_vq = portdev->out_vqs[port->id];
-
- cdev_init(&port->cdev, &port_fops);
-
- devt = MKDEV(portdev->chr_major, id);
- err = cdev_add(&port->cdev, devt, 1);
- if (err < 0) {
- dev_err(&port->portdev->vdev->dev,
- "Error %d adding cdev for port %u\n", err, id);
- goto free_port;
- }
- port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
- devt, port, "vport%up%u",
- port->portdev->drv_index, id);
- if (IS_ERR(port->dev)) {
- err = PTR_ERR(port->dev);
- dev_err(&port->portdev->vdev->dev,
- "Error %d creating device for port %u\n",
- err, id);
- goto free_cdev;
- }
-
- spin_lock_init(&port->inbuf_lock);
- init_waitqueue_head(&port->waitqueue);
-
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
- dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
- goto free_device;
- }
-
- /*
- * If we're not using multiport support, this has to be a console port
- */
- if (!use_multiport(port->portdev)) {
- err = init_port_console(port);
- if (err)
- goto free_inbufs;
- }
-
- spin_lock_irq(&portdev->ports_lock);
- list_add_tail(&port->list, &port->portdev->ports);
- spin_unlock_irq(&portdev->ports_lock);
-
- /*
- * Tell the Host we're set so that it can send us various
- * configuration parameters for this port (eg, port name,
- * caching, whether this is a console port, etc.)
- */
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
-
- if (pdrvdata.debugfs_dir) {
- /*
- * Finally, create the debugfs file that we can use to
- * inspect a port's state at any time
- */
- sprintf(debugfs_name, "vport%up%u",
- port->portdev->drv_index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
- pdrvdata.debugfs_dir,
- port,
- &port_debugfs_ops);
- }
- return 0;
-
-free_inbufs:
- while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
- free_buf(buf);
-free_device:
- device_destroy(pdrvdata.class, port->dev->devt);
-free_cdev:
- cdev_del(&port->cdev);
-free_port:
- kfree(port);
-fail:
- return err;
-}
-/*
- * The workhandler for config-space updates.
- *
- * This is called when ports are hot-added.
- */
-static void config_work_handler(struct work_struct *work)
-{
- struct virtio_console_multiport_conf virtconconf;
- struct ports_device *portdev;
- struct virtio_device *vdev;
- int err;
+ if (!use_multiport(portdev)) {
+ struct port *port;
+ u16 rows, cols;
- portdev = container_of(work, struct ports_device, config_work);
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, cols),
+ &cols, sizeof(u16));
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, rows),
+ &rows, sizeof(u16));
- vdev = portdev->vdev;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &virtconconf.nr_ports,
- sizeof(virtconconf.nr_ports));
+ port = find_port_by_id(portdev, 0);
+ set_console_size(port, rows, cols);
- if (portdev->config.nr_ports == virtconconf.nr_ports) {
/*
- * Port 0 got hot-added. Since we already did all the
- * other initialisation for it, just tell the Host
- * that the port is ready if we find the port. In
- * case the port was hot-removed earlier, we call
- * add_port to add the port.
+ * We'll use this way of resizing only for legacy
+ * support. For newer userspace
+ * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
+ * to indicate console size changes so that it can be
+ * done per-port.
*/
- struct port *port;
-
- port = find_port_by_id(portdev, 0);
- if (!port)
- add_port(portdev, 0);
- else
- send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
- return;
- }
- if (virtconconf.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports specified (%u) than allowed (%u)",
- portdev->config.nr_ports + 1,
- portdev->config.max_nr_ports);
- return;
- }
- if (virtconconf.nr_ports < portdev->config.nr_ports)
- return;
-
- /* Hot-add ports */
- while (virtconconf.nr_ports - portdev->config.nr_ports) {
- err = add_port(portdev, portdev->config.nr_ports);
- if (err)
- break;
- portdev->config.nr_ports++;
+ resize_console(port);
}
}
@@ -1414,7 +1482,6 @@ static const struct file_operations portdev_fops = {
static int __devinit virtcons_probe(struct virtio_device *vdev)
{
struct ports_device *portdev;
- u32 i;
int err;
bool multiport;
@@ -1443,37 +1510,19 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
}
multiport = false;
- portdev->config.nr_ports = 1;
portdev->config.max_nr_ports = 1;
-#if 0 /* Multiport is not quite ready yet --RR */
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- nr_ports),
- &portdev->config.nr_ports,
- sizeof(portdev->config.nr_ports));
- vdev->config->get(vdev,
- offsetof(struct virtio_console_multiport_conf,
- max_nr_ports),
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
- if (portdev->config.nr_ports > portdev->config.max_nr_ports) {
- dev_warn(&vdev->dev,
- "More ports (%u) specified than allowed (%u). Will init %u ports.",
- portdev->config.nr_ports,
- portdev->config.max_nr_ports,
- portdev->config.max_nr_ports);
-
- portdev->config.nr_ports = portdev->config.max_nr_ports;
- }
}
/* Let the Host know we support multiple ports.*/
vdev->config->finalize_features(vdev);
-#endif
err = init_vqs(portdev);
if (err < 0) {
@@ -1489,7 +1538,6 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->cvq_lock);
INIT_WORK(&portdev->control_work, &control_work_handler);
- INIT_WORK(&portdev->config_work, &config_work_handler);
nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
if (!nr_added_bufs) {
@@ -1498,16 +1546,22 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
err = -ENOMEM;
goto free_vqs;
}
+ } else {
+ /*
+ * For backward compatibility: Create a console port
+ * if we're running on older host.
+ */
+ add_port(portdev, 0);
}
- for (i = 0; i < portdev->config.nr_ports; i++)
- add_port(portdev, i);
-
- /* Start using the new console output. */
- early_put_chars = NULL;
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 1);
return 0;
free_vqs:
+ /* The host might want to notify mgmt sw about device add failure */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
vdev->config->del_vqs(vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
@@ -1529,17 +1583,16 @@ static void virtcons_remove(struct virtio_device *vdev)
portdev = vdev->priv;
cancel_work_sync(&portdev->control_work);
- cancel_work_sync(&portdev->config_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
remove_port(port);
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
- while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
free_buf(buf);
- while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
free_buf(buf);
vdev->config->del_vqs(vdev);
@@ -1556,6 +1609,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
};
static struct virtio_driver virtio_console = {
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d1164fec5..7cdb6ee569cd 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
font.charcount = op->charcount;
font.height = op->height;
font.width = op->width;
- font.data = kmalloc(size, GFP_KERNEL);
- if (!font.data)
- return -ENOMEM;
- if (copy_from_user(font.data, op->data, size)) {
- kfree(font.data);
- return -EFAULT;
- }
+ font.data = memdup_user(op->data, size);
+ if (IS_ERR(font.data))
+ return PTR_ERR(font.data);
acquire_console_sem();
if (vc->vc_sw->con_font_set)
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 7261b8d9087c..ed8a9cec2a05 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -772,18 +772,18 @@ hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
- rc = of_address_to_resource(op->node, 0, &res);
+ rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
}
- id = of_get_property(op->node, "port-number", NULL);
+ id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
- family = of_get_property(op->node, "xlnx,family", NULL);
+ family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
if (!strcmp(family, "virtex2p")) {
@@ -812,13 +812,12 @@ static const struct of_device_id __devinitconst hwicap_of_match[] = {
MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct of_platform_driver hwicap_of_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .match_table = hwicap_of_match,
.probe = hwicap_of_probe,
.remove = __devexit_p(hwicap_of_remove),
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = hwicap_of_match,
},
};
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index b314a999aabe..d7be69f13154 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -154,14 +154,14 @@ static int __init cs5535_mfgpt_init(void)
if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
timer_irq);
- return -EIO;
+ goto err_timer;
}
/* And register it with the kernel */
ret = setup_irq(timer_irq, &mfgptirq);
if (ret) {
printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
- goto err;
+ goto err_irq;
}
/* Set the clock scale and enable the event mode for CMP2 */
@@ -184,8 +184,10 @@ static int __init cs5535_mfgpt_init(void)
return 0;
-err:
+err_irq:
cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
+err_timer:
+ cs5535_mfgpt_free_timer(cs5535_event_clock);
printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
return -EIO;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748cc84b..f6677cb19789 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -150,13 +150,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -279,7 +278,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
delay = 1;
if (!delay)
- pr_warning("sh_cmt: too long delay\n");
+ dev_warn(&p->pdev->dev, "too long delay\n");
} while (delay);
}
@@ -289,7 +288,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
unsigned long flags;
if (delta > p->max_match_value)
- pr_warning("sh_cmt: delta out of range\n");
+ dev_warn(&p->pdev->dev, "delta out of range\n");
spin_lock_irqsave(&p->lock, flags);
p->next_match_value = delta;
@@ -451,7 +450,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_cmt: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -497,13 +496,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_cmt: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_cmt_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_cmt: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_cmt_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -544,7 +541,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
ced->set_next_event = sh_cmt_clock_event_next;
ced->set_mode = sh_cmt_clock_event_mode;
- pr_info("sh_cmt: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
}
@@ -601,22 +598,27 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_cmt: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* request irq using setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "cmt_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
if (resource_size(res) == 6) {
@@ -629,17 +631,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->clear_bits = ~0xc000;
}
- ret = sh_cmt_register(p, cfg->name,
+ ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
if (ret) {
- pr_err("sh_cmt: registration failed\n");
+ dev_err(&p->pdev->dev, "registration failed\n");
goto err1;
}
ret = setup_irq(irq, &p->irqaction);
if (ret) {
- pr_err("sh_cmt: failed to request irq %d\n", irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
goto err1;
}
@@ -654,11 +656,10 @@ err0:
static int __devinit sh_cmt_probe(struct platform_device *pdev)
{
struct sh_cmt_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 5fb78bfd73bb..ef7a5be8a09f 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -119,13 +119,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -194,8 +193,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_mtu2: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_mtu2_enable(p);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -222,13 +220,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
ced->cpumask = cpumask_of(0);
ced->set_mode = sh_mtu2_clock_event_mode;
- pr_info("sh_mtu2: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_mtu2: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -274,26 +272,32 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_mtu2: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
+ return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating);
err1:
iounmap(p->mapbase);
err0:
@@ -303,11 +307,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
static int __devinit sh_mtu2_probe(struct platform_device *pdev)
{
struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e5b770..8e44e14ec4c2 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -107,13 +107,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
static int sh_tmu_enable(struct sh_tmu_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
int ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
- pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
@@ -229,7 +228,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
cs->disable = sh_tmu_clocksource_disable;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- pr_info("sh_tmu: %s used as clock source\n", cs->name);
+ dev_info(&p->pdev->dev, "used as clock source\n");
clocksource_register(cs);
return 0;
}
@@ -277,13 +276,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- pr_info("sh_tmu: %s used for periodic clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for periodic clock events\n");
sh_tmu_clock_event_start(p, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
- pr_info("sh_tmu: %s used for oneshot clock events\n",
- ced->name);
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
sh_tmu_clock_event_start(p, 0);
break;
case CLOCK_EVT_MODE_UNUSED:
@@ -324,13 +321,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
- pr_info("sh_tmu: %s used for clock events\n", ced->name);
+ dev_info(&p->pdev->dev, "used for clock events\n");
clockevents_register_device(ced);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
- pr_err("sh_tmu: failed to request irq %d\n",
- p->irqaction.irq);
+ dev_err(&p->pdev->dev, "failed to request irq %d\n",
+ p->irqaction.irq);
return;
}
}
@@ -379,26 +376,31 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
- pr_err("sh_tmu: failed to remap I/O memory\n");
+ dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
goto err0;
}
/* setup data for setup_irq() (too early for request_irq()) */
- p->irqaction.name = cfg->name;
+ p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_NOBALANCING;
/* get hold of clock */
- p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ p->clk = clk_get(&p->pdev->dev, "tmu_fck");
if (IS_ERR(p->clk)) {
- pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
- ret = PTR_ERR(p->clk);
- goto err1;
+ dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
+ p->clk = clk_get(&p->pdev->dev, cfg->clk);
+ if (IS_ERR(p->clk)) {
+ dev_err(&p->pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
}
- return sh_tmu_register(p, cfg->name,
+ return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
cfg->clockevent_rating,
cfg->clocksource_rating);
err1:
@@ -410,11 +412,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
static int __devinit sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_priv *p = platform_get_drvdata(pdev);
- struct sh_timer_config *cfg = pdev->dev.platform_data;
int ret;
if (p) {
- pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
+ dev_info(&pdev->dev, "kept as earlytimer\n");
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 75d293eeb3ee..063b2184caf5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -662,32 +662,20 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
-#define define_one_ro(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-#define define_one_ro0400(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0400, show_##_name, NULL)
-
-#define define_one_rw(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_ro0400(cpuinfo_cur_freq);
-define_one_ro(cpuinfo_min_freq);
-define_one_ro(cpuinfo_max_freq);
-define_one_ro(cpuinfo_transition_latency);
-define_one_ro(scaling_available_governors);
-define_one_ro(scaling_driver);
-define_one_ro(scaling_cur_freq);
-define_one_ro(bios_limit);
-define_one_ro(related_cpus);
-define_one_ro(affected_cpus);
-define_one_rw(scaling_min_freq);
-define_one_rw(scaling_max_freq);
-define_one_rw(scaling_governor);
-define_one_rw(scaling_setspeed);
+cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_min_freq);
+cpufreq_freq_attr_ro(cpuinfo_max_freq);
+cpufreq_freq_attr_ro(cpuinfo_transition_latency);
+cpufreq_freq_attr_ro(scaling_available_governors);
+cpufreq_freq_attr_ro(scaling_driver);
+cpufreq_freq_attr_ro(scaling_cur_freq);
+cpufreq_freq_attr_ro(bios_limit);
+cpufreq_freq_attr_ro(related_cpus);
+cpufreq_freq_attr_ro(affected_cpus);
+cpufreq_freq_attr_rw(scaling_min_freq);
+cpufreq_freq_attr_rw(scaling_max_freq);
+cpufreq_freq_attr_rw(scaling_governor);
+cpufreq_freq_attr_rw(scaling_setspeed);
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 3a147874a465..526bfbf69611 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -178,12 +178,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
return sprintf(buf, "%u\n", min_sampling_rate);
}
-#define define_one_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(sampling_rate_max);
-define_one_ro(sampling_rate_min);
+define_one_global_ro(sampling_rate_max);
+define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
#define show_one(file_name, object) \
@@ -221,12 +217,8 @@ show_one_old(freq_step);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);
-#define define_one_ro_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
-define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
-define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+cpufreq_freq_attr_ro_old(sampling_rate_min);
+cpufreq_freq_attr_ro_old(sampling_rate_max);
/*** delete after deprecation time ***/
@@ -364,16 +356,12 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
return count;
}
-#define define_one_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(sampling_rate);
-define_one_rw(sampling_down_factor);
-define_one_rw(up_threshold);
-define_one_rw(down_threshold);
-define_one_rw(ignore_nice_load);
-define_one_rw(freq_step);
+define_one_global_rw(sampling_rate);
+define_one_global_rw(sampling_down_factor);
+define_one_global_rw(up_threshold);
+define_one_global_rw(down_threshold);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -409,16 +397,12 @@ write_one_old(down_threshold);
write_one_old(ignore_nice_load);
write_one_old(freq_step);
-#define define_one_rw_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-define_one_rw_old(sampling_rate_old, sampling_rate);
-define_one_rw_old(sampling_down_factor_old, sampling_down_factor);
-define_one_rw_old(up_threshold_old, up_threshold);
-define_one_rw_old(down_threshold_old, down_threshold);
-define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
-define_one_rw_old(freq_step_old, freq_step);
+cpufreq_freq_attr_rw_old(sampling_rate);
+cpufreq_freq_attr_rw_old(sampling_down_factor);
+cpufreq_freq_attr_rw_old(up_threshold);
+cpufreq_freq_attr_rw_old(down_threshold);
+cpufreq_freq_attr_rw_old(ignore_nice_load);
+cpufreq_freq_attr_rw_old(freq_step);
static struct attribute *dbs_attributes_old[] = {
&sampling_rate_max_old.attr,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index bd444dc93cf2..e1314212d8d4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,6 +73,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_iowait;
cputime64_t prev_cpu_wall;
cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
@@ -108,6 +109,7 @@ static struct dbs_tuners {
unsigned int down_differential;
unsigned int ignore_nice;
unsigned int powersave_bias;
+ unsigned int io_is_busy;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -148,6 +150,16 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
return idle_time;
}
+static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
+
+ if (iowait_time == -1ULL)
+ return 0;
+
+ return iowait_time;
+}
+
/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_jiffies,
@@ -234,12 +246,8 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
return sprintf(buf, "%u\n", min_sampling_rate);
}
-#define define_one_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(sampling_rate_max);
-define_one_ro(sampling_rate_min);
+define_one_global_ro(sampling_rate_max);
+define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object) \
@@ -249,6 +257,7 @@ static ssize_t show_##file_name \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
show_one(sampling_rate, sampling_rate);
+show_one(io_is_busy, io_is_busy);
show_one(up_threshold, up_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
@@ -274,12 +283,8 @@ show_one_old(powersave_bias);
show_one_old(sampling_rate_min);
show_one_old(sampling_rate_max);
-#define define_one_ro_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
-define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
-define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+cpufreq_freq_attr_ro_old(sampling_rate_min);
+cpufreq_freq_attr_ro_old(sampling_rate_max);
/*** delete after deprecation time ***/
@@ -299,6 +304,23 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
return count;
}
+static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.io_is_busy = !!input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -376,14 +398,11 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
return count;
}
-#define define_one_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
-
-define_one_rw(sampling_rate);
-define_one_rw(up_threshold);
-define_one_rw(ignore_nice_load);
-define_one_rw(powersave_bias);
+define_one_global_rw(sampling_rate);
+define_one_global_rw(io_is_busy);
+define_one_global_rw(up_threshold);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -392,6 +411,7 @@ static struct attribute *dbs_attributes[] = {
&up_threshold.attr,
&ignore_nice_load.attr,
&powersave_bias.attr,
+ &io_is_busy.attr,
NULL
};
@@ -415,14 +435,10 @@ write_one_old(up_threshold);
write_one_old(ignore_nice_load);
write_one_old(powersave_bias);
-#define define_one_rw_old(object, _name) \
-static struct freq_attr object = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-define_one_rw_old(sampling_rate_old, sampling_rate);
-define_one_rw_old(up_threshold_old, up_threshold);
-define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
-define_one_rw_old(powersave_bias_old, powersave_bias);
+cpufreq_freq_attr_rw_old(sampling_rate);
+cpufreq_freq_attr_rw_old(up_threshold);
+cpufreq_freq_attr_rw_old(ignore_nice_load);
+cpufreq_freq_attr_rw_old(powersave_bias);
static struct attribute *dbs_attributes_old[] = {
&sampling_rate_max_old.attr,
@@ -470,14 +486,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ unsigned int idle_time, wall_time, iowait_time;
unsigned int load, load_freq;
int freq_avg;
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
wall_time = (unsigned int) cputime64_sub(cur_wall_time,
j_dbs_info->prev_cpu_wall);
@@ -487,6 +504,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
+ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
+ j_dbs_info->prev_cpu_iowait);
+ j_dbs_info->prev_cpu_iowait = cur_iowait_time;
+
if (dbs_tuners_ins.ignore_nice) {
cputime64_t cur_nice;
unsigned long cur_nice_jiffies;
@@ -504,6 +525,16 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
+ /*
+ * For the purpose of ondemand, waiting for disk IO is an
+ * indication that you're performance critical, and not that
+ * the system is actually idle. So subtract the iowait time
+ * from the cpu idle time.
+ */
+
+ if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
+ idle_time -= iowait_time;
+
if (unlikely(!wall_time || wall_time < idle_time))
continue;
@@ -617,6 +648,29 @@ static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
cancel_delayed_work_sync(&dbs_info->work);
}
+/*
+ * Not all CPUs want IO time to be accounted as busy; this dependson how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
+{
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
+}
+
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -679,6 +733,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_tuners_ins.sampling_rate =
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
+ dbs_tuners_ins.io_is_busy = should_io_be_busy();
}
mutex_unlock(&dbs_mutex);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 12fdd3987a36..199488576a05 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -156,7 +156,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (dev->enabled)
return 0;
- if (!cpuidle_curr_driver || !cpuidle_curr_governor)
+ if (!cpuidle_get_driver() || !cpuidle_curr_governor)
return -EIO;
if (!dev->state_count)
return -EINVAL;
@@ -207,7 +207,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
{
if (!dev->enabled)
return;
- if (!cpuidle_curr_driver || !cpuidle_curr_governor)
+ if (!cpuidle_get_driver() || !cpuidle_curr_governor)
return;
dev->enabled = 0;
@@ -271,10 +271,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (!sys_dev)
return -EINVAL;
- if (!try_module_get(cpuidle_curr_driver->owner))
+ if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
init_completion(&dev->kobj_unregister);
@@ -284,7 +285,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
- module_put(cpuidle_curr_driver->owner);
+ module_put(cpuidle_driver->owner);
return ret;
}
@@ -325,6 +326,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (dev->registered == 0)
return;
@@ -340,7 +342,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_resume_and_unlock();
- module_put(cpuidle_curr_driver->owner);
+ module_put(cpuidle_driver->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 9476ba33ee2c..33e50d556f17 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -9,7 +9,6 @@
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
-extern struct cpuidle_driver *cpuidle_curr_driver;
extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 2257004fe33d..fd1601e3d125 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,7 +14,7 @@
#include "cpuidle.h"
-struct cpuidle_driver *cpuidle_curr_driver;
+static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
/**
@@ -40,13 +40,25 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
+ * cpuidle_get_driver - return the current driver
+ */
+struct cpuidle_driver *cpuidle_get_driver(void)
+{
+ return cpuidle_curr_driver;
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+
+/**
* cpuidle_unregister_driver - unregisters a driver
* @drv: the driver
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (!drv)
+ if (drv != cpuidle_curr_driver) {
+ WARN(1, "invalid cpuidle_unregister_driver(%s)\n",
+ drv->name);
return;
+ }
spin_lock(&cpuidle_driver_lock);
cpuidle_curr_driver = NULL;
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 1c1ceb4f218f..12c98900dcf8 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,7 +67,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
- int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f8e57c6303f2..52ff8aa63f84 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -21,9 +21,12 @@
#include <linux/math64.h>
#define BUCKETS 12
+#define INTERVALS 8
#define RESOLUTION 1024
-#define DECAY 4
+#define DECAY 8
#define MAX_INTERESTING 50000
+#define STDDEV_THRESH 400
+
/*
* Concepts and ideas behind the menu governor
@@ -64,6 +67,16 @@
* indexed based on the magnitude of the expected duration as well as the
* "is IO outstanding" property.
*
+ * Repeatable-interval-detector
+ * ----------------------------
+ * There are some cases where "next timer" is a completely unusable predictor:
+ * Those cases where the interval is fixed, for example due to hardware
+ * interrupt mitigation, but also due to fixed transfer rate devices such as
+ * mice.
+ * For this, we use a different predictor: We track the duration of the last 8
+ * intervals and if the stand deviation of these 8 intervals is below a
+ * threshold value, we use the average of these intervals as prediction.
+ *
* Limiting Performance Impact
* ---------------------------
* C states, especially those with large exit latencies, can have a real
@@ -104,6 +117,8 @@ struct menu_device {
unsigned int exit_us;
unsigned int bucket;
u64 correction_factor[BUCKETS];
+ u32 intervals[INTERVALS];
+ int interval_ptr;
};
@@ -175,6 +190,42 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
+/*
+ * Try detecting repeating patterns by keeping track of the last 8
+ * intervals, and checking if the standard deviation of that set
+ * of points is below a threshold. If it is... then use the
+ * average of these 8 points as the estimated value.
+ */
+static void detect_repeating_patterns(struct menu_device *data)
+{
+ int i;
+ uint64_t avg = 0;
+ uint64_t stddev = 0; /* contains the square of the std deviation */
+
+ /* first calculate average and standard deviation of the past */
+ for (i = 0; i < INTERVALS; i++)
+ avg += data->intervals[i];
+ avg = avg / INTERVALS;
+
+ /* if the avg is beyond the known next tick, it's worthless */
+ if (avg > data->expected_us)
+ return;
+
+ for (i = 0; i < INTERVALS; i++)
+ stddev += (data->intervals[i] - avg) *
+ (data->intervals[i] - avg);
+
+ stddev = stddev / INTERVALS;
+
+ /*
+ * now.. if stddev is small.. then assume we have a
+ * repeating pattern and predict we keep doing this.
+ */
+
+ if (avg && stddev < STDDEV_THRESH)
+ data->predicted_us = avg;
+}
+
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
@@ -182,7 +233,7 @@ static u64 div_round64(u64 dividend, u32 divisor)
static int menu_select(struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
- int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
int multiplier;
@@ -218,6 +269,8 @@ static int menu_select(struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY);
+ detect_repeating_patterns(data);
+
/*
* We want to default to C1 (hlt), not to busy polling
* unless the timer is happening really really soon.
@@ -310,6 +363,11 @@ static void menu_update(struct cpuidle_device *dev)
new_factor = 1;
data->correction_factor[data->bucket] = new_factor;
+
+ /* update the repeating-pattern data */
+ data->intervals[data->interval_ptr++] = last_idle_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
}
/**
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 0ba9c8b8ee74..0310ffaec9df 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -47,10 +47,11 @@ static ssize_t show_current_driver(struct sysdev_class *class,
char *buf)
{
ssize_t ret;
+ struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
spin_lock(&cpuidle_driver_lock);
- if (cpuidle_curr_driver)
- ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name);
+ if (cpuidle_driver)
+ ret = sprintf(buf, "%s\n", cpuidle_driver->name);
else
ret = sprintf(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d7d1ca..fbf94cf496f0 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -170,6 +170,18 @@ config CRYPTO_DEV_MV_CESA
Currently the driver supports AES in ECB and CBC mode without DMA.
+config CRYPTO_DEV_NIAGARA2
+ tristate "Niagara2 Stream Processing Unit driver"
+ select CRYPTO_ALGAPI
+ depends on SPARC64
+ help
+ Each core of a Niagara2 processor contains a Stream
+ Processing Unit, which itself contains several cryptographic
+ sub-units. One set provides the Modular Arithmetic Unit,
+ used for SSL offload. The other set provides the Cipher
+ Group, which can perform encryption, decryption, hashing,
+ checksumming, and raw copies.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
@@ -222,4 +234,13 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config CRYPTO_DEV_OMAP_SHAM
+ tristate "Support for OMAP SHA1/MD5 hw accelerator"
+ depends on ARCH_OMAP2 || ARCH_OMAP3
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ help
+ OMAP processors have SHA1/MD5 hw accelerator. Select this if you
+ want to use the OMAP module for SHA1/MD5 algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f7f942..6dbbe00c4524 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,8 +1,12 @@
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
+obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
+n2_crypto-objs := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
+obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
+
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6c4c8b7ce3aa..9d65b371de64 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = {
};
static struct of_platform_driver crypto4xx_driver = {
- .name = "crypto4xx",
- .match_table = crypto4xx_match,
+ .driver = {
+ .name = "crypto4xx",
+ .owner = THIS_MODULE,
+ .of_match_table = crypto4xx_match,
+ },
.probe = crypto4xx_probe,
.remove = crypto4xx_remove,
};
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index c7a5a43ba691..09389dd2f96b 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -15,14 +15,14 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <asm/io.h>
-#include <asm/delay.h>
+#include <linux/io.h>
+#include <linux/delay.h>
#include "geode-aes.h"
/* Static structures */
-static void __iomem * _iobase;
+static void __iomem *_iobase;
static spinlock_t lock;
/* Write a 128 bit field (either a writable key or IV) */
@@ -30,7 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
@@ -39,7 +39,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
- for(i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
@@ -59,7 +59,7 @@ do_crypt(void *src, void *dst, int len, u32 flags)
do {
status = ioread32(_iobase + AES_INTR_REG);
cpu_relax();
- } while(!(status & AES_INTRA_PENDING) && --counter);
+ } while (!(status & AES_INTRA_PENDING) && --counter);
/* Clear the event */
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
@@ -317,7 +317,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -349,7 +349,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_virt(desc, &walk);
op->iv = walk.iv;
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_CBC;
@@ -429,7 +429,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -459,7 +459,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while((nbytes = walk.nbytes)) {
+ while ((nbytes = walk.nbytes)) {
op->src = walk.src.virt.addr,
op->dst = walk.dst.virt.addr;
op->mode = AES_MODE_ECB;
@@ -518,11 +518,12 @@ static int __devinit
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
-
- if ((ret = pci_enable_device(dev)))
+ ret = pci_enable_device(dev);
+ if (ret)
return ret;
- if ((ret = pci_request_regions(dev, "geode-aes")))
+ ret = pci_request_regions(dev, "geode-aes");
+ if (ret)
goto eenable;
_iobase = pci_iomap(dev, 0, 0);
@@ -537,13 +538,16 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
- if ((ret = crypto_register_alg(&geode_alg)))
+ ret = crypto_register_alg(&geode_alg);
+ if (ret)
goto eiomap;
- if ((ret = crypto_register_alg(&geode_ecb_alg)))
+ ret = crypto_register_alg(&geode_ecb_alg);
+ if (ret)
goto ealg;
- if ((ret = crypto_register_alg(&geode_cbc_alg)))
+ ret = crypto_register_alg(&geode_cbc_alg);
+ if (ret)
goto eecb;
printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 73e8b1713b54..16fce3aadf4d 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -638,7 +638,7 @@ struct hifn_crypto_alg
#define ASYNC_FLAGS_MISALIGNED (1<<0)
-struct ablkcipher_walk
+struct hifn_cipher_walk
{
struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
u32 flags;
@@ -657,7 +657,7 @@ struct hifn_request_context
u8 *iv;
unsigned int ivsize;
u8 op, type, mode, unused;
- struct ablkcipher_walk walk;
+ struct hifn_cipher_walk walk;
};
#define crypto_alg_to_hifn(a) container_of(a, struct hifn_crypto_alg, alg)
@@ -1417,7 +1417,7 @@ static int hifn_setup_dma(struct hifn_device *dev,
return 0;
}
-static int ablkcipher_walk_init(struct ablkcipher_walk *w,
+static int hifn_cipher_walk_init(struct hifn_cipher_walk *w,
int num, gfp_t gfp_flags)
{
int i;
@@ -1442,7 +1442,7 @@ static int ablkcipher_walk_init(struct ablkcipher_walk *w,
return i;
}
-static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
+static void hifn_cipher_walk_exit(struct hifn_cipher_walk *w)
{
int i;
@@ -1486,8 +1486,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst,
return idx;
}
-static int ablkcipher_walk(struct ablkcipher_request *req,
- struct ablkcipher_walk *w)
+static int hifn_cipher_walk(struct ablkcipher_request *req,
+ struct hifn_cipher_walk *w)
{
struct scatterlist *dst, *t;
unsigned int nbytes = req->nbytes, offset, copy, diff;
@@ -1600,12 +1600,12 @@ static int hifn_setup_session(struct ablkcipher_request *req)
}
if (rctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
- err = ablkcipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
+ err = hifn_cipher_walk_init(&rctx->walk, idx, GFP_ATOMIC);
if (err < 0)
return err;
}
- sg_num = ablkcipher_walk(req, &rctx->walk);
+ sg_num = hifn_cipher_walk(req, &rctx->walk);
if (sg_num < 0) {
err = sg_num;
goto err_out_exit;
@@ -1806,7 +1806,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
kunmap_atomic(saddr, KM_SOFTIRQ0);
}
- ablkcipher_walk_exit(&rctx->walk);
+ hifn_cipher_walk_exit(&rctx->walk);
}
req->base.complete(&req->base, error);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 6f29012bcc43..e095422b58dd 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,8 +15,14 @@
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
#include "mv_cesa.h"
+
+#define MV_CESA "MV-CESA:"
+#define MAX_HW_HASH_SIZE 0xFFFF
+
/*
* STM:
* /---------------------------------------\
@@ -39,10 +45,12 @@ enum engine_status {
* @dst_sg_it: sg iterator for dst
* @sg_src_left: bytes left in src to process (scatter list)
* @src_start: offset to add to src start position (scatter list)
- * @crypt_len: length of current crypt process
+ * @crypt_len: length of current hw crypt/hash process
+ * @hw_nbytes: total bytes to process in hw for this request
+ * @copy_back: whether to copy data back (crypt) or not (hash)
* @sg_dst_left: bytes left dst to process in this scatter list
* @dst_start: offset to add to dst start position (scatter list)
- * @total_req_bytes: total number of bytes processed (request).
+ * @hw_processed_bytes: number of bytes processed by hw (request).
*
* sg helper are used to iterate over the scatterlist. Since the size of the
* SRAM may be less than the scatter size, this struct struct is used to keep
@@ -51,15 +59,19 @@ enum engine_status {
struct req_progress {
struct sg_mapping_iter src_sg_it;
struct sg_mapping_iter dst_sg_it;
+ void (*complete) (void);
+ void (*process) (int is_first);
/* src mostly */
int sg_src_left;
int src_start;
int crypt_len;
+ int hw_nbytes;
/* dst mostly */
+ int copy_back;
int sg_dst_left;
int dst_start;
- int total_req_bytes;
+ int hw_processed_bytes;
};
struct crypto_priv {
@@ -72,10 +84,12 @@ struct crypto_priv {
spinlock_t lock;
struct crypto_queue queue;
enum engine_status eng_st;
- struct ablkcipher_request *cur_req;
+ struct crypto_async_request *cur_req;
struct req_progress p;
int max_req_size;
int sram_size;
+ int has_sha1;
+ int has_hmac_sha1;
};
static struct crypto_priv *cpg;
@@ -97,6 +111,31 @@ struct mv_req_ctx {
int decrypt;
};
+enum hash_op {
+ COP_SHA1,
+ COP_HMAC_SHA1
+};
+
+struct mv_tfm_hash_ctx {
+ struct crypto_shash *fallback;
+ struct crypto_shash *base_hash;
+ u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
+ int count_add;
+ enum hash_op op;
+};
+
+struct mv_req_hash_ctx {
+ u64 count;
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u8 buffer[SHA1_BLOCK_SIZE];
+ int first_hash; /* marks that we don't have previous state */
+ int last_chunk; /* marks that this is the 'final' request */
+ int extra_bytes; /* unprocessed bytes in buffer */
+ enum hash_op op;
+ int count_add;
+ struct scatterlist dummysg;
+};
+
static void compute_aes_dec_key(struct mv_ctx *ctx)
{
struct crypto_aes_ctx gen_aes_key;
@@ -144,32 +183,51 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static void setup_data_in(struct ablkcipher_request *req)
+static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
{
int ret;
- void *buf;
+ void *sbuf;
+ int copied = 0;
- if (!cpg->p.sg_src_left) {
- ret = sg_miter_next(&cpg->p.src_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_src_left = cpg->p.src_sg_it.length;
- cpg->p.src_start = 0;
- }
-
- cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
-
- buf = cpg->p.src_sg_it.addr;
- buf += cpg->p.src_start;
+ while (1) {
+ if (!p->sg_src_left) {
+ ret = sg_miter_next(&p->src_sg_it);
+ BUG_ON(!ret);
+ p->sg_src_left = p->src_sg_it.length;
+ p->src_start = 0;
+ }
- memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+ sbuf = p->src_sg_it.addr + p->src_start;
+
+ if (p->sg_src_left <= len - copied) {
+ memcpy(dbuf + copied, sbuf, p->sg_src_left);
+ copied += p->sg_src_left;
+ p->sg_src_left = 0;
+ if (copied >= len)
+ break;
+ } else {
+ int copy_len = len - copied;
+ memcpy(dbuf + copied, sbuf, copy_len);
+ p->src_start += copy_len;
+ p->sg_src_left -= copy_len;
+ break;
+ }
+ }
+}
- cpg->p.sg_src_left -= cpg->p.crypt_len;
- cpg->p.src_start += cpg->p.crypt_len;
+static void setup_data_in(void)
+{
+ struct req_progress *p = &cpg->p;
+ int data_in_sram =
+ min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
+ data_in_sram - p->crypt_len);
+ p->crypt_len = data_in_sram;
}
static void mv_process_current_q(int first_block)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
struct sec_accel_config op;
@@ -179,6 +237,7 @@ static void mv_process_current_q(int first_block)
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
break;
case COP_AES_CBC:
+ default:
op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
@@ -211,7 +270,7 @@ static void mv_process_current_q(int first_block)
ENC_P_DST(SRAM_DATA_OUT_START);
op.enc_key_p = SRAM_DATA_KEY_P;
- setup_data_in(req);
+ setup_data_in();
op.enc_len = cpg->p.crypt_len;
memcpy(cpg->sram + SRAM_CONFIG, &op,
sizeof(struct sec_accel_config));
@@ -228,91 +287,294 @@ static void mv_process_current_q(int first_block)
static void mv_crypto_algo_completion(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+
if (req_ctx->op != COP_AES_CBC)
return ;
memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
}
+static void mv_process_hash_current(int first_block)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct req_progress *p = &cpg->p;
+ struct sec_accel_config op = { 0 };
+ int is_last;
+
+ switch (req_ctx->op) {
+ case COP_SHA1:
+ default:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
+ break;
+ case COP_HMAC_SHA1:
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
+ break;
+ }
+
+ op.mac_src_p =
+ MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
+ req_ctx->
+ count);
+
+ setup_data_in();
+
+ op.mac_digest =
+ MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
+ op.mac_iv =
+ MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
+ MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
+
+ is_last = req_ctx->last_chunk
+ && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
+ && (req_ctx->count <= MAX_HW_HASH_SIZE);
+ if (req_ctx->first_hash) {
+ if (is_last)
+ op.config |= CFG_NOT_FRAG;
+ else
+ op.config |= CFG_FIRST_FRAG;
+
+ req_ctx->first_hash = 0;
+ } else {
+ if (is_last)
+ op.config |= CFG_LAST_FRAG;
+ else
+ op.config |= CFG_MID_FRAG;
+ }
+
+ memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
+ struct shash_desc *desc)
+{
+ int i;
+ struct sha1_state shash_state;
+
+ shash_state.count = ctx->count + ctx->count_add;
+ for (i = 0; i < 5; i++)
+ shash_state.state[i] = ctx->state[i];
+ memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
+ return crypto_shash_import(desc, &shash_state);
+}
+
+static int mv_hash_final_fallback(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
+ } desc;
+ int rc;
+
+ desc.shash.tfm = tfm_ctx->fallback;
+ desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ if (unlikely(req_ctx->first_hash)) {
+ crypto_shash_init(&desc.shash);
+ crypto_shash_update(&desc.shash, req_ctx->buffer,
+ req_ctx->extra_bytes);
+ } else {
+ /* only SHA1 for now....
+ */
+ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
+ if (rc)
+ goto out;
+ }
+ rc = crypto_shash_final(&desc.shash, req->result);
+out:
+ return rc;
+}
+
+static void mv_hash_algo_completion(void)
+{
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
+ if (ctx->extra_bytes)
+ copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
+ sg_miter_stop(&cpg->p.src_sg_it);
+
+ ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+ ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+ ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+ ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+ ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+
+ if (likely(ctx->last_chunk)) {
+ if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
+ memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm
+ (req)));
+ } else
+ mv_hash_final_fallback(req);
+ }
+}
+
static void dequeue_complete_req(void)
{
- struct ablkcipher_request *req = cpg->cur_req;
+ struct crypto_async_request *req = cpg->cur_req;
void *buf;
int ret;
+ cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+ if (cpg->p.copy_back) {
+ int need_copy_len = cpg->p.crypt_len;
+ int sram_offset = 0;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
- cpg->p.total_req_bytes += cpg->p.crypt_len;
- do {
- int dst_copy;
-
- if (!cpg->p.sg_dst_left) {
- ret = sg_miter_next(&cpg->p.dst_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
- cpg->p.dst_start = 0;
- }
-
- buf = cpg->p.dst_sg_it.addr;
- buf += cpg->p.dst_start;
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
- dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+ dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
- memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+ memcpy(buf,
+ cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+ dst_copy);
+ sram_offset += dst_copy;
+ cpg->p.sg_dst_left -= dst_copy;
+ need_copy_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (need_copy_len > 0);
+ }
- cpg->p.sg_dst_left -= dst_copy;
- cpg->p.crypt_len -= dst_copy;
- cpg->p.dst_start += dst_copy;
- } while (cpg->p.crypt_len > 0);
+ cpg->p.crypt_len = 0;
BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
- if (cpg->p.total_req_bytes < req->nbytes) {
+ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
/* process next scatter list entry */
cpg->eng_st = ENGINE_BUSY;
- mv_process_current_q(0);
+ cpg->p.process(0);
} else {
- sg_miter_stop(&cpg->p.src_sg_it);
- sg_miter_stop(&cpg->p.dst_sg_it);
- mv_crypto_algo_completion();
+ cpg->p.complete();
cpg->eng_st = ENGINE_IDLE;
- req->base.complete(&req->base, 0);
+ local_bh_disable();
+ req->complete(req, 0);
+ local_bh_enable();
}
}
static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
{
int i = 0;
-
- do {
- total_bytes -= sl[i].length;
- i++;
-
- } while (total_bytes > 0);
+ size_t cur_len;
+
+ while (1) {
+ cur_len = sl[i].length;
+ ++i;
+ if (total_bytes > cur_len)
+ total_bytes -= cur_len;
+ else
+ break;
+ }
return i;
}
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
{
+ struct req_progress *p = &cpg->p;
int num_sgs;
- cpg->cur_req = req;
- memset(&cpg->p, 0, sizeof(struct req_progress));
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ p->hw_nbytes = req->nbytes;
+ p->complete = mv_crypto_algo_completion;
+ p->process = mv_process_current_q;
+ p->copy_back = 1;
num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+
mv_process_current_q(1);
}
+static void mv_start_new_hash_req(struct ahash_request *req)
+{
+ struct req_progress *p = &cpg->p;
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ int num_sgs, hw_bytes, old_extra_bytes, rc;
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ hw_bytes = req->nbytes + ctx->extra_bytes;
+ old_extra_bytes = ctx->extra_bytes;
+
+ if (unlikely(ctx->extra_bytes)) {
+ memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
+ ctx->extra_bytes);
+ p->crypt_len = ctx->extra_bytes;
+ }
+
+ memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
+
+ if (unlikely(!ctx->first_hash)) {
+ writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
+
+ ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
+ if (ctx->extra_bytes != 0
+ && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
+ hw_bytes -= ctx->extra_bytes;
+ else
+ ctx->extra_bytes = 0;
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ if (hw_bytes) {
+ p->hw_nbytes = hw_bytes;
+ p->complete = mv_hash_algo_completion;
+ p->process = mv_process_hash_current;
+
+ mv_process_hash_current(1);
+ } else {
+ copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
+ ctx->extra_bytes - old_extra_bytes);
+ sg_miter_stop(&p->src_sg_it);
+ if (ctx->last_chunk)
+ rc = mv_hash_final_fallback(req);
+ else
+ rc = 0;
+ cpg->eng_st = ENGINE_IDLE;
+ local_bh_disable();
+ req->base.complete(&req->base, rc);
+ local_bh_enable();
+ }
+}
+
static int queue_manag(void *data)
{
cpg->eng_st = ENGINE_IDLE;
do {
- struct ablkcipher_request *req;
struct crypto_async_request *async_req = NULL;
struct crypto_async_request *backlog;
@@ -338,9 +600,18 @@ static int queue_manag(void *data)
}
if (async_req) {
- req = container_of(async_req,
- struct ablkcipher_request, base);
- mv_enqueue_new_req(req);
+ if (async_req->tfm->__crt_alg->cra_type !=
+ &crypto_ahash_type) {
+ struct ablkcipher_request *req =
+ container_of(async_req,
+ struct ablkcipher_request,
+ base);
+ mv_start_new_crypt_req(req);
+ } else {
+ struct ahash_request *req =
+ ahash_request_cast(async_req);
+ mv_start_new_hash_req(req);
+ }
async_req = NULL;
}
@@ -350,13 +621,13 @@ static int queue_manag(void *data)
return 0;
}
-static int mv_handle_req(struct ablkcipher_request *req)
+static int mv_handle_req(struct crypto_async_request *req)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cpg->lock, flags);
- ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ ret = crypto_enqueue_request(&cpg->queue, req);
spin_unlock_irqrestore(&cpg->lock, flags);
wake_up_process(cpg->queue_th);
return ret;
@@ -369,7 +640,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
req_ctx->op = COP_AES_ECB;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_ecb(struct ablkcipher_request *req)
@@ -381,7 +652,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_enc_aes_cbc(struct ablkcipher_request *req)
@@ -391,7 +662,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
req_ctx->op = COP_AES_CBC;
req_ctx->decrypt = 0;
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_dec_aes_cbc(struct ablkcipher_request *req)
@@ -403,7 +674,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
req_ctx->decrypt = 1;
compute_aes_dec_key(ctx);
- return mv_handle_req(req);
+ return mv_handle_req(&req->base);
}
static int mv_cra_init(struct crypto_tfm *tfm)
@@ -412,6 +683,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
+ int is_last, unsigned int req_len,
+ int count_add)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->op = op;
+ ctx->count = req_len;
+ ctx->first_hash = 1;
+ ctx->last_chunk = is_last;
+ ctx->count_add = count_add;
+}
+
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
+ unsigned req_len)
+{
+ ctx->last_chunk = is_last;
+ ctx->count += req_len;
+}
+
+static int mv_hash_init(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
+ tfm_ctx->count_add);
+ return 0;
+}
+
+static int mv_hash_update(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return 0;
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_final(struct ahash_request *req)
+{
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ /* dummy buffer of 4 bytes */
+ sg_init_one(&ctx->dummysg, ctx->buffer, 4);
+ /* I think I'm allowed to do that... */
+ ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
+ mv_update_hash_req_ctx(ctx, 1, 0);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_finup(struct ahash_request *req)
+{
+ if (!req->nbytes)
+ return mv_hash_final(req);
+
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
+ return mv_handle_req(&req->base);
+}
+
+static int mv_hash_digest(struct ahash_request *req)
+{
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
+ req->nbytes, tfm_ctx->count_add);
+ return mv_handle_req(&req->base);
+}
+
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
+ const void *ostate)
+{
+ const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
+ int i;
+ for (i = 0; i < 5; i++) {
+ ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
+ ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
+ }
+}
+
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
+ unsigned int keylen)
+{
+ int rc;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ int bs, ds, ss;
+
+ if (!ctx->base_hash)
+ return 0;
+
+ rc = crypto_shash_setkey(ctx->fallback, key, keylen);
+ if (rc)
+ return rc;
+
+ /* Can't see a way to extract the ipad/opad from the fallback tfm
+ so I'm basically copying code from the hmac module */
+ bs = crypto_shash_blocksize(ctx->base_hash);
+ ds = crypto_shash_digestsize(ctx->base_hash);
+ ss = crypto_shash_statesize(ctx->base_hash);
+
+ {
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(ctx->base_hash)];
+ } desc;
+ unsigned int i;
+ char ipad[ss];
+ char opad[ss];
+
+ desc.shash.tfm = ctx->base_hash;
+ desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ if (keylen > bs) {
+ int err;
+
+ err =
+ crypto_shash_digest(&desc.shash, key, keylen, ipad);
+ if (err)
+ return err;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, key, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ rc = crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, ipad, bs) ? :
+ crypto_shash_export(&desc.shash, ipad) ? :
+ crypto_shash_init(&desc.shash) ? :
+ crypto_shash_update(&desc.shash, opad, bs) ? :
+ crypto_shash_export(&desc.shash, opad);
+
+ if (rc == 0)
+ mv_hash_init_ivs(ctx, ipad, opad);
+
+ return rc;
+ }
+}
+
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
+ enum hash_op op, int count_add)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *fallback_tfm = NULL;
+ struct crypto_shash *base_hash = NULL;
+ int err = -ENOMEM;
+
+ ctx->op = op;
+ ctx->count_add = count_add;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ printk(KERN_WARNING MV_CESA
+ "Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+ ctx->fallback = fallback_tfm;
+
+ if (base_hash_name) {
+ /* Allocate a hash to compute the ipad/opad of hmac. */
+ base_hash = crypto_alloc_shash(base_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(base_hash)) {
+ printk(KERN_WARNING MV_CESA
+ "Base driver '%s' could not be loaded!\n",
+ base_hash_name);
+ err = PTR_ERR(fallback_tfm);
+ goto err_bad_base;
+ }
+ }
+ ctx->base_hash = base_hash;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct mv_req_hash_ctx) +
+ crypto_shash_descsize(ctx->fallback));
+ return 0;
+err_bad_base:
+ crypto_free_shash(fallback_tfm);
+out:
+ return err;
+}
+
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(ctx->fallback);
+ if (ctx->base_hash)
+ crypto_free_shash(ctx->base_hash);
+}
+
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
+}
+
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
+{
+ return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
+}
+
irqreturn_t crypto_int(int irq, void *priv)
{
u32 val;
@@ -474,6 +954,53 @@ struct crypto_alg mv_aes_alg_cbc = {
},
};
+struct ahash_alg mv_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "mv-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
+struct ahash_alg mv_hmac_sha1_alg = {
+ .init = mv_hash_init,
+ .update = mv_hash_update,
+ .final = mv_hash_final,
+ .finup = mv_hash_finup,
+ .digest = mv_hash_digest,
+ .setkey = mv_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "mv-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
+ .cra_init = mv_cra_hash_hmac_sha1_init,
+ .cra_exit = mv_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+};
+
static int mv_probe(struct platform_device *pdev)
{
struct crypto_priv *cp;
@@ -482,7 +1009,7 @@ static int mv_probe(struct platform_device *pdev)
int ret;
if (cpg) {
- printk(KERN_ERR "Second crypto dev?\n");
+ printk(KERN_ERR MV_CESA "Second crypto dev?\n");
return -EEXIST;
}
@@ -496,7 +1023,7 @@ static int mv_probe(struct platform_device *pdev)
spin_lock_init(&cp->lock);
crypto_init_queue(&cp->queue, 50);
- cp->reg = ioremap(res->start, res->end - res->start + 1);
+ cp->reg = ioremap(res->start, resource_size(res));
if (!cp->reg) {
ret = -ENOMEM;
goto err;
@@ -507,7 +1034,7 @@ static int mv_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_unmap_reg;
}
- cp->sram_size = res->end - res->start + 1;
+ cp->sram_size = resource_size(res);
cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
cp->sram = ioremap(res->start, cp->sram_size);
if (!cp->sram) {
@@ -546,6 +1073,21 @@ static int mv_probe(struct platform_device *pdev)
ret = crypto_register_alg(&mv_aes_alg_cbc);
if (ret)
goto err_unreg_ecb;
+
+ ret = crypto_register_ahash(&mv_sha1_alg);
+ if (ret == 0)
+ cpg->has_sha1 = 1;
+ else
+ printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
+
+ ret = crypto_register_ahash(&mv_hmac_sha1_alg);
+ if (ret == 0) {
+ cpg->has_hmac_sha1 = 1;
+ } else {
+ printk(KERN_WARNING MV_CESA
+ "Could not register hmac-sha1 driver\n");
+ }
+
return 0;
err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
@@ -570,6 +1112,10 @@ static int mv_remove(struct platform_device *pdev)
crypto_unregister_alg(&mv_aes_alg_ecb);
crypto_unregister_alg(&mv_aes_alg_cbc);
+ if (cp->has_sha1)
+ crypto_unregister_ahash(&mv_sha1_alg);
+ if (cp->has_hmac_sha1)
+ crypto_unregister_ahash(&mv_hmac_sha1_alg);
kthread_stop(cp->queue_th);
free_irq(cp->irq, cp);
memset(cp->sram, 0, cp->sram_size);
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
index c3e25d3bb171..08fcb1116d90 100644
--- a/drivers/crypto/mv_cesa.h
+++ b/drivers/crypto/mv_cesa.h
@@ -1,6 +1,10 @@
#ifndef __MV_CRYPTO_H__
#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DIGEST_INITIAL_VAL_B 0xdd04
+#define DIGEST_INITIAL_VAL_C 0xdd08
+#define DIGEST_INITIAL_VAL_D 0xdd0c
+#define DIGEST_INITIAL_VAL_E 0xdd10
#define DES_CMD_REG 0xdd58
#define SEC_ACCEL_CMD 0xde00
@@ -70,6 +74,10 @@ struct sec_accel_config {
#define CFG_AES_LEN_128 (0 << 24)
#define CFG_AES_LEN_192 (1 << 24)
#define CFG_AES_LEN_256 (2 << 24)
+#define CFG_NOT_FRAG (0 << 30)
+#define CFG_FIRST_FRAG (1 << 30)
+#define CFG_LAST_FRAG (2 << 30)
+#define CFG_MID_FRAG (3 << 30)
u32 enc_p;
#define ENC_P_SRC(x) (x)
@@ -90,7 +98,11 @@ struct sec_accel_config {
#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
u32 mac_digest;
+#define MAC_DIGEST_P(x) (x)
+#define MAC_FRAG_LEN(x) ((x) << 16)
u32 mac_iv;
+#define MAC_INNER_IV_P(x) (x)
+#define MAC_OUTER_IV_P(x) ((x) << 16)
}__attribute__ ((packed));
/*
* /-----------\ 0
@@ -101,19 +113,37 @@ struct sec_accel_config {
* | IV IN | 4 * 4
* |-----------| 0x40 (inplace)
* | IV BUF | 4 * 4
- * |-----------| 0x50
+ * |-----------| 0x80
* | DATA IN | 16 * x (max ->max_req_size)
- * |-----------| 0x50 (inplace operation)
+ * |-----------| 0x80 (inplace operation)
* | DATA OUT | 16 * x (max ->max_req_size)
* \-----------/ SRAM size
*/
+
+ /* Hashing memory map:
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | Inner IV | 5 * 4
+ * |-----------| 0x34
+ * | Outer IV | 5 * 4
+ * |-----------| 0x48
+ * | Output BUF| 5 * 4
+ * |-----------| 0x80
+ * | DATA IN | 64 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
#define SRAM_CONFIG 0x00
#define SRAM_DATA_KEY_P 0x20
#define SRAM_DATA_IV 0x40
#define SRAM_DATA_IV_BUF 0x40
-#define SRAM_DATA_IN_START 0x50
-#define SRAM_DATA_OUT_START 0x50
+#define SRAM_DATA_IN_START 0x80
+#define SRAM_DATA_OUT_START 0x80
+
+#define SRAM_HMAC_IV_IN 0x20
+#define SRAM_HMAC_IV_OUT 0x34
+#define SRAM_DIGEST_BUF 0x48
-#define SRAM_CFG_SPACE 0x50
+#define SRAM_CFG_SPACE 0x80
#endif
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
new file mode 100644
index 000000000000..f7c793745a1e
--- /dev/null
+++ b/drivers/crypto/n2_asm.S
@@ -0,0 +1,95 @@
+/* n2_asm.S: Hypervisor calls for NCS support.
+ *
+ * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/linkage.h>
+#include <asm/hypervisor.h>
+#include "n2_core.h"
+
+ /* o0: queue type
+ * o1: RA of queue
+ * o2: num entries in queue
+ * o3: address of queue handle return
+ */
+ENTRY(sun4v_ncs_qconf)
+ mov HV_FAST_NCS_QCONF, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o3]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qconf)
+
+ /* %o0: queue handle
+ * %o1: address of queue type return
+ * %o2: address of queue base address return
+ * %o3: address of queue num entries return
+ */
+ENTRY(sun4v_ncs_qinfo)
+ mov %o1, %g1
+ mov %o2, %g2
+ mov %o3, %g3
+ mov HV_FAST_NCS_QINFO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ stx %o3, [%g3]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qinfo)
+
+ /* %o0: queue handle
+ * %o1: address of head offset return
+ */
+ENTRY(sun4v_ncs_gethead)
+ mov %o1, %o2
+ mov HV_FAST_NCS_GETHEAD, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_gethead)
+
+ /* %o0: queue handle
+ * %o1: address of tail offset return
+ */
+ENTRY(sun4v_ncs_gettail)
+ mov %o1, %o2
+ mov HV_FAST_NCS_GETTAIL, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_gettail)
+
+ /* %o0: queue handle
+ * %o1: new tail offset
+ */
+ENTRY(sun4v_ncs_settail)
+ mov HV_FAST_NCS_SETTAIL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_ncs_settail)
+
+ /* %o0: queue handle
+ * %o1: address of devino return
+ */
+ENTRY(sun4v_ncs_qhandle_to_devino)
+ mov %o1, %o2
+ mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o2]
+ retl
+ nop
+ENDPROC(sun4v_ncs_qhandle_to_devino)
+
+ /* %o0: queue handle
+ * %o1: new head offset
+ */
+ENTRY(sun4v_ncs_sethead_marker)
+ mov HV_FAST_NCS_SETHEAD_MARKER, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
new file mode 100644
index 000000000000..8566be832f51
--- /dev/null
+++ b/drivers/crypto/n2_core.c
@@ -0,0 +1,2083 @@
+/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
+ *
+ * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#include "n2_core.h"
+
+#define DRV_MODULE_NAME "n2_crypto"
+#define DRV_MODULE_VERSION "0.1"
+#define DRV_MODULE_RELDATE "April 29, 2010"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Niagara2 Crypto driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define N2_CRA_PRIORITY 300
+
+static DEFINE_MUTEX(spu_lock);
+
+struct spu_queue {
+ cpumask_t sharing;
+ unsigned long qhandle;
+
+ spinlock_t lock;
+ u8 q_type;
+ void *q;
+ unsigned long head;
+ unsigned long tail;
+ struct list_head jobs;
+
+ unsigned long devino;
+
+ char irq_name[32];
+ unsigned int irq;
+
+ struct list_head list;
+};
+
+static struct spu_queue **cpu_to_cwq;
+static struct spu_queue **cpu_to_mau;
+
+static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
+{
+ if (q->q_type == HV_NCS_QTYPE_MAU) {
+ off += MAU_ENTRY_SIZE;
+ if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
+ off = 0;
+ } else {
+ off += CWQ_ENTRY_SIZE;
+ if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
+ off = 0;
+ }
+ return off;
+}
+
+struct n2_request_common {
+ struct list_head entry;
+ unsigned int offset;
+};
+#define OFFSET_NOT_RUNNING (~(unsigned int)0)
+
+/* An async job request records the final tail value it used in
+ * n2_request_common->offset, test to see if that offset is in
+ * the range old_head, new_head, inclusive.
+ */
+static inline bool job_finished(struct spu_queue *q, unsigned int offset,
+ unsigned long old_head, unsigned long new_head)
+{
+ if (old_head <= new_head) {
+ if (offset > old_head && offset <= new_head)
+ return true;
+ } else {
+ if (offset > old_head || offset <= new_head)
+ return true;
+ }
+ return false;
+}
+
+/* When the HEAD marker is unequal to the actual HEAD, we get
+ * a virtual device INO interrupt. We should process the
+ * completed CWQ entries and adjust the HEAD marker to clear
+ * the IRQ.
+ */
+static irqreturn_t cwq_intr(int irq, void *dev_id)
+{
+ unsigned long off, new_head, hv_ret;
+ struct spu_queue *q = dev_id;
+
+ pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
+ smp_processor_id(), q->qhandle);
+
+ spin_lock(&q->lock);
+
+ hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
+
+ pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
+ smp_processor_id(), new_head, hv_ret);
+
+ for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
+ /* XXX ... XXX */
+ }
+
+ hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
+ if (hv_ret == HV_EOK)
+ q->head = new_head;
+
+ spin_unlock(&q->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mau_intr(int irq, void *dev_id)
+{
+ struct spu_queue *q = dev_id;
+ unsigned long head, hv_ret;
+
+ spin_lock(&q->lock);
+
+ pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
+ smp_processor_id(), q->qhandle);
+
+ hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
+
+ pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
+ smp_processor_id(), head, hv_ret);
+
+ sun4v_ncs_sethead_marker(q->qhandle, head);
+
+ spin_unlock(&q->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void *spu_queue_next(struct spu_queue *q, void *cur)
+{
+ return q->q + spu_next_offset(q, cur - q->q);
+}
+
+static int spu_queue_num_free(struct spu_queue *q)
+{
+ unsigned long head = q->head;
+ unsigned long tail = q->tail;
+ unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
+ unsigned long diff;
+
+ if (head > tail)
+ diff = head - tail;
+ else
+ diff = (end - tail) + head;
+
+ return (diff / CWQ_ENTRY_SIZE) - 1;
+}
+
+static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
+{
+ int avail = spu_queue_num_free(q);
+
+ if (avail >= num_entries)
+ return q->q + q->tail;
+
+ return NULL;
+}
+
+static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
+{
+ unsigned long hv_ret, new_tail;
+
+ new_tail = spu_next_offset(q, last - q->q);
+
+ hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
+ if (hv_ret == HV_EOK)
+ q->tail = new_tail;
+ return hv_ret;
+}
+
+static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
+ int enc_type, int auth_type,
+ unsigned int hash_len,
+ bool sfas, bool sob, bool eob, bool encrypt,
+ int opcode)
+{
+ u64 word = (len - 1) & CONTROL_LEN;
+
+ word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
+ word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
+ word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
+ if (sfas)
+ word |= CONTROL_STORE_FINAL_AUTH_STATE;
+ if (sob)
+ word |= CONTROL_START_OF_BLOCK;
+ if (eob)
+ word |= CONTROL_END_OF_BLOCK;
+ if (encrypt)
+ word |= CONTROL_ENCRYPT;
+ if (hmac_key_len)
+ word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
+ if (hash_len)
+ word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
+
+ return word;
+}
+
+#if 0
+static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
+{
+ if (this_len >= 64 ||
+ qp->head != qp->tail)
+ return true;
+ return false;
+}
+#endif
+
+struct n2_base_ctx {
+ struct list_head list;
+};
+
+static void n2_base_ctx_init(struct n2_base_ctx *ctx)
+{
+ INIT_LIST_HEAD(&ctx->list);
+}
+
+struct n2_hash_ctx {
+ struct n2_base_ctx base;
+
+ struct crypto_ahash *fallback;
+
+ /* These next three members must match the layout created by
+ * crypto_init_shash_ops_async. This allows us to properly
+ * plumb requests we can't do in hardware down to the fallback
+ * operation, providing all of the data structures and layouts
+ * expected by those paths.
+ */
+ struct ahash_request fallback_req;
+ struct shash_desc fallback_desc;
+ union {
+ struct md5_state md5;
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ } u;
+
+ unsigned char hash_key[64];
+ unsigned char keyed_zero_hash[32];
+};
+
+static int n2_hash_async_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&ctx->fallback_req);
+}
+
+static int n2_hash_async_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.nbytes = req->nbytes;
+ ctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&ctx->fallback_req);
+}
+
+static int n2_hash_async_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&ctx->fallback_req);
+}
+
+static int n2_hash_async_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.nbytes = req->nbytes;
+ ctx->fallback_req.src = req->src;
+ ctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&ctx->fallback_req);
+}
+
+static int n2_hash_cra_init(struct crypto_tfm *tfm)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct crypto_ahash *fallback_tfm;
+ int err;
+
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ pr_warning("Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
+ }
+
+ ctx->fallback = fallback_tfm;
+ return 0;
+
+out:
+ return err;
+}
+
+static void n2_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ crypto_free_ahash(ctx->fallback);
+}
+
+static unsigned long wait_for_tail(struct spu_queue *qp)
+{
+ unsigned long head, hv_ret;
+
+ do {
+ hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
+ if (hv_ret != HV_EOK) {
+ pr_err("Hypervisor error on gethead\n");
+ break;
+ }
+ if (head == qp->tail) {
+ qp->head = head;
+ break;
+ }
+ } while (1);
+ return hv_ret;
+}
+
+static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
+ struct cwq_initial_entry *ent)
+{
+ unsigned long hv_ret = spu_queue_submit(qp, ent);
+
+ if (hv_ret == HV_EOK)
+ hv_ret = wait_for_tail(qp);
+
+ return hv_ret;
+}
+
+static int n2_hash_async_digest(struct ahash_request *req,
+ unsigned int auth_type, unsigned int digest_size,
+ unsigned int result_size, void *hash_loc)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct cwq_initial_entry *ent;
+ struct crypto_hash_walk walk;
+ struct spu_queue *qp;
+ unsigned long flags;
+ int err = -ENODEV;
+ int nbytes, cpu;
+
+ /* The total effective length of the operation may not
+ * exceed 2^16.
+ */
+ if (unlikely(req->nbytes > (1 << 16))) {
+ ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
+ ctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ctx->fallback_req.nbytes = req->nbytes;
+ ctx->fallback_req.src = req->src;
+ ctx->fallback_req.result = req->result;
+
+ return crypto_ahash_digest(&ctx->fallback_req);
+ }
+
+ n2_base_ctx_init(&ctx->base);
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+
+ cpu = get_cpu();
+ qp = cpu_to_cwq[cpu];
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ /* XXX can do better, improve this later by doing a by-hand scatterlist
+ * XXX walk, etc.
+ */
+ ent = qp->q + qp->tail;
+
+ ent->control = control_word_base(nbytes, 0, 0,
+ auth_type, digest_size,
+ false, true, false, false,
+ OPCODE_INPLACE_BIT |
+ OPCODE_AUTH_MAC);
+ ent->src_addr = __pa(walk.data);
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = __pa(hash_loc);
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = __pa(hash_loc);
+
+ nbytes = crypto_hash_walk_done(&walk, 0);
+ while (nbytes > 0) {
+ ent = spu_queue_next(qp, ent);
+
+ ent->control = (nbytes - 1);
+ ent->src_addr = __pa(walk.data);
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = 0UL;
+
+ nbytes = crypto_hash_walk_done(&walk, 0);
+ }
+ ent->control |= CONTROL_END_OF_BLOCK;
+
+ if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
+ err = -EINVAL;
+ else
+ err = 0;
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ if (!err)
+ memcpy(req->result, hash_loc, result_size);
+out:
+ put_cpu();
+
+ return err;
+}
+
+static int n2_md5_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct md5_state *m = &ctx->u.md5;
+
+ if (unlikely(req->nbytes == 0)) {
+ static const char md5_zero[MD5_DIGEST_SIZE] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
+ };
+
+ memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
+ return 0;
+ }
+ m->hash[0] = cpu_to_le32(0x67452301);
+ m->hash[1] = cpu_to_le32(0xefcdab89);
+ m->hash[2] = cpu_to_le32(0x98badcfe);
+ m->hash[3] = cpu_to_le32(0x10325476);
+
+ return n2_hash_async_digest(req, AUTH_TYPE_MD5,
+ MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
+ m->hash);
+}
+
+static int n2_sha1_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sha1_state *s = &ctx->u.sha1;
+
+ if (unlikely(req->nbytes == 0)) {
+ static const char sha1_zero[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
+ 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
+ 0x07, 0x09
+ };
+
+ memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA1_H0;
+ s->state[1] = SHA1_H1;
+ s->state[2] = SHA1_H2;
+ s->state[3] = SHA1_H3;
+ s->state[4] = SHA1_H4;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
+ s->state);
+}
+
+static int n2_sha256_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sha256_state *s = &ctx->u.sha256;
+
+ if (req->nbytes == 0) {
+ static const char sha256_zero[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
+ 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
+ 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
+ 0x1b, 0x78, 0x52, 0xb8, 0x55
+ };
+
+ memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA256_H0;
+ s->state[1] = SHA256_H1;
+ s->state[2] = SHA256_H2;
+ s->state[3] = SHA256_H3;
+ s->state[4] = SHA256_H4;
+ s->state[5] = SHA256_H5;
+ s->state[6] = SHA256_H6;
+ s->state[7] = SHA256_H7;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
+ s->state);
+}
+
+static int n2_sha224_async_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sha256_state *s = &ctx->u.sha256;
+
+ if (req->nbytes == 0) {
+ static const char sha224_zero[SHA224_DIGEST_SIZE] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
+ 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
+ 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
+ 0x2f
+ };
+
+ memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
+ return 0;
+ }
+ s->state[0] = SHA224_H0;
+ s->state[1] = SHA224_H1;
+ s->state[2] = SHA224_H2;
+ s->state[3] = SHA224_H3;
+ s->state[4] = SHA224_H4;
+ s->state[5] = SHA224_H5;
+ s->state[6] = SHA224_H6;
+ s->state[7] = SHA224_H7;
+
+ return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
+ SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
+ s->state);
+}
+
+struct n2_cipher_context {
+ int key_len;
+ int enc_type;
+ union {
+ u8 aes[AES_MAX_KEY_SIZE];
+ u8 des[DES_KEY_SIZE];
+ u8 des3[3 * DES_KEY_SIZE];
+ u8 arc4[258]; /* S-box, X, Y */
+ } key;
+};
+
+#define N2_CHUNK_ARR_LEN 16
+
+struct n2_crypto_chunk {
+ struct list_head entry;
+ unsigned long iv_paddr : 44;
+ unsigned long arr_len : 20;
+ unsigned long dest_paddr;
+ unsigned long dest_final;
+ struct {
+ unsigned long src_paddr : 44;
+ unsigned long src_len : 20;
+ } arr[N2_CHUNK_ARR_LEN];
+};
+
+struct n2_request_context {
+ struct ablkcipher_walk walk;
+ struct list_head chunk_list;
+ struct n2_crypto_chunk chunk;
+ u8 temp_iv[16];
+};
+
+/* The SPU allows some level of flexibility for partial cipher blocks
+ * being specified in a descriptor.
+ *
+ * It merely requires that every descriptor's length field is at least
+ * as large as the cipher block size. This means that a cipher block
+ * can span at most 2 descriptors. However, this does not allow a
+ * partial block to span into the final descriptor as that would
+ * violate the rule (since every descriptor's length must be at lest
+ * the block size). So, for example, assuming an 8 byte block size:
+ *
+ * 0xe --> 0xa --> 0x8
+ *
+ * is a valid length sequence, whereas:
+ *
+ * 0xe --> 0xb --> 0x7
+ *
+ * is not a valid sequence.
+ */
+
+struct n2_cipher_alg {
+ struct list_head entry;
+ u8 enc_type;
+ struct crypto_alg alg;
+};
+
+static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+
+ return container_of(alg, struct n2_cipher_alg, alg);
+}
+
+struct n2_cipher_request_context {
+ struct ablkcipher_walk walk;
+};
+
+static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+ ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->enc_type |= ENC_TYPE_ALG_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->enc_type |= ENC_TYPE_ALG_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->enc_type |= ENC_TYPE_ALG_AES256;
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key.aes, key, keylen);
+ return 0;
+}
+
+static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+ int err;
+
+ ctx->enc_type = n2alg->enc_type;
+
+ if (keylen != DES_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ err = des_ekey(tmp, key);
+ if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ ctx->key_len = keylen;
+ memcpy(ctx->key.des, key, keylen);
+ return 0;
+}
+
+static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+
+ ctx->enc_type = n2alg->enc_type;
+
+ if (keylen != (3 * DES_KEY_SIZE)) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = keylen;
+ memcpy(ctx->key.des3, key, keylen);
+ return 0;
+}
+
+static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u8 *s = ctx->key.arc4;
+ u8 *x = s + 256;
+ u8 *y = x + 1;
+ int i, j, k;
+
+ ctx->enc_type = n2alg->enc_type;
+
+ j = k = 0;
+ *x = 0;
+ *y = 0;
+ for (i = 0; i < 256; i++)
+ s[i] = i;
+ for (i = 0; i < 256; i++) {
+ u8 a = s[i];
+ j = (j + key[k] + a) & 0xff;
+ s[i] = s[j];
+ s[j] = a;
+ if (++k >= keylen)
+ k = 0;
+ }
+
+ return 0;
+}
+
+static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
+{
+ int this_len = nbytes;
+
+ this_len -= (nbytes & (block_size - 1));
+ return this_len > (1 << 16) ? (1 << 16) : this_len;
+}
+
+static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
+ struct spu_queue *qp, bool encrypt)
+{
+ struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
+ struct cwq_initial_entry *ent;
+ bool in_place;
+ int i;
+
+ ent = spu_queue_alloc(qp, cp->arr_len);
+ if (!ent) {
+ pr_info("queue_alloc() of %d fails\n",
+ cp->arr_len);
+ return -EBUSY;
+ }
+
+ in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
+
+ ent->control = control_word_base(cp->arr[0].src_len,
+ 0, ctx->enc_type, 0, 0,
+ false, true, false, encrypt,
+ OPCODE_ENCRYPT |
+ (in_place ? OPCODE_INPLACE_BIT : 0));
+ ent->src_addr = cp->arr[0].src_paddr;
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = __pa(&ctx->key);
+ ent->enc_iv_addr = cp->iv_paddr;
+ ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
+
+ for (i = 1; i < cp->arr_len; i++) {
+ ent = spu_queue_next(qp, ent);
+
+ ent->control = cp->arr[i].src_len - 1;
+ ent->src_addr = cp->arr[i].src_paddr;
+ ent->auth_key_addr = 0UL;
+ ent->auth_iv_addr = 0UL;
+ ent->final_auth_state_addr = 0UL;
+ ent->enc_key_addr = 0UL;
+ ent->enc_iv_addr = 0UL;
+ ent->dest_addr = 0UL;
+ }
+ ent->control |= CONTROL_END_OF_BLOCK;
+
+ return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
+}
+
+static int n2_compute_chunks(struct ablkcipher_request *req)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct ablkcipher_walk *walk = &rctx->walk;
+ struct n2_crypto_chunk *chunk;
+ unsigned long dest_prev;
+ unsigned int tot_len;
+ bool prev_in_place;
+ int err, nbytes;
+
+ ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
+ err = ablkcipher_walk_phys(req, walk);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&rctx->chunk_list);
+
+ chunk = &rctx->chunk;
+ INIT_LIST_HEAD(&chunk->entry);
+
+ chunk->iv_paddr = 0UL;
+ chunk->arr_len = 0;
+ chunk->dest_paddr = 0UL;
+
+ prev_in_place = false;
+ dest_prev = ~0UL;
+ tot_len = 0;
+
+ while ((nbytes = walk->nbytes) != 0) {
+ unsigned long dest_paddr, src_paddr;
+ bool in_place;
+ int this_len;
+
+ src_paddr = (page_to_phys(walk->src.page) +
+ walk->src.offset);
+ dest_paddr = (page_to_phys(walk->dst.page) +
+ walk->dst.offset);
+ in_place = (src_paddr == dest_paddr);
+ this_len = cipher_descriptor_len(nbytes, walk->blocksize);
+
+ if (chunk->arr_len != 0) {
+ if (in_place != prev_in_place ||
+ (!prev_in_place &&
+ dest_paddr != dest_prev) ||
+ chunk->arr_len == N2_CHUNK_ARR_LEN ||
+ tot_len + this_len > (1 << 16)) {
+ chunk->dest_final = dest_prev;
+ list_add_tail(&chunk->entry,
+ &rctx->chunk_list);
+ chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
+ if (!chunk) {
+ err = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&chunk->entry);
+ }
+ }
+ if (chunk->arr_len == 0) {
+ chunk->dest_paddr = dest_paddr;
+ tot_len = 0;
+ }
+ chunk->arr[chunk->arr_len].src_paddr = src_paddr;
+ chunk->arr[chunk->arr_len].src_len = this_len;
+ chunk->arr_len++;
+
+ dest_prev = dest_paddr + this_len;
+ prev_in_place = in_place;
+ tot_len += this_len;
+
+ err = ablkcipher_walk_done(req, walk, nbytes - this_len);
+ if (err)
+ break;
+ }
+ if (!err && chunk->arr_len != 0) {
+ chunk->dest_final = dest_prev;
+ list_add_tail(&chunk->entry, &rctx->chunk_list);
+ }
+
+ return err;
+}
+
+static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct n2_crypto_chunk *c, *tmp;
+
+ if (final_iv)
+ memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
+
+ ablkcipher_walk_complete(&rctx->walk);
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+
+}
+
+static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct crypto_tfm *tfm = req->base.tfm;
+ int err = n2_compute_chunks(req);
+ struct n2_crypto_chunk *c, *tmp;
+ unsigned long flags, hv_ret;
+ struct spu_queue *qp;
+
+ if (err)
+ return err;
+
+ qp = cpu_to_cwq[get_cpu()];
+ err = -ENODEV;
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
+ err = __n2_crypt_chunk(tfm, c, qp, encrypt);
+ if (err)
+ break;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ if (!err) {
+ hv_ret = wait_for_tail(qp);
+ if (hv_ret != HV_EOK)
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ put_cpu();
+
+out:
+ n2_chunk_complete(req, NULL);
+ return err;
+}
+
+static int n2_encrypt_ecb(struct ablkcipher_request *req)
+{
+ return n2_do_ecb(req, true);
+}
+
+static int n2_decrypt_ecb(struct ablkcipher_request *req)
+{
+ return n2_do_ecb(req, false);
+}
+
+static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
+{
+ struct n2_request_context *rctx = ablkcipher_request_ctx(req);
+ struct crypto_tfm *tfm = req->base.tfm;
+ unsigned long flags, hv_ret, iv_paddr;
+ int err = n2_compute_chunks(req);
+ struct n2_crypto_chunk *c, *tmp;
+ struct spu_queue *qp;
+ void *final_iv_addr;
+
+ final_iv_addr = NULL;
+
+ if (err)
+ return err;
+
+ qp = cpu_to_cwq[get_cpu()];
+ err = -ENODEV;
+ if (!qp)
+ goto out;
+
+ spin_lock_irqsave(&qp->lock, flags);
+
+ if (encrypt) {
+ iv_paddr = __pa(rctx->walk.iv);
+ list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
+ entry) {
+ c->iv_paddr = iv_paddr;
+ err = __n2_crypt_chunk(tfm, c, qp, true);
+ if (err)
+ break;
+ iv_paddr = c->dest_final - rctx->walk.blocksize;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ final_iv_addr = __va(iv_paddr);
+ } else {
+ list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
+ entry) {
+ if (c == &rctx->chunk) {
+ iv_paddr = __pa(rctx->walk.iv);
+ } else {
+ iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
+ tmp->arr[tmp->arr_len-1].src_len -
+ rctx->walk.blocksize);
+ }
+ if (!final_iv_addr) {
+ unsigned long pa;
+
+ pa = (c->arr[c->arr_len-1].src_paddr +
+ c->arr[c->arr_len-1].src_len -
+ rctx->walk.blocksize);
+ final_iv_addr = rctx->temp_iv;
+ memcpy(rctx->temp_iv, __va(pa),
+ rctx->walk.blocksize);
+ }
+ c->iv_paddr = iv_paddr;
+ err = __n2_crypt_chunk(tfm, c, qp, false);
+ if (err)
+ break;
+ list_del(&c->entry);
+ if (unlikely(c != &rctx->chunk))
+ kfree(c);
+ }
+ }
+ if (!err) {
+ hv_ret = wait_for_tail(qp);
+ if (hv_ret != HV_EOK)
+ err = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ put_cpu();
+
+out:
+ n2_chunk_complete(req, err ? NULL : final_iv_addr);
+ return err;
+}
+
+static int n2_encrypt_chaining(struct ablkcipher_request *req)
+{
+ return n2_do_chaining(req, true);
+}
+
+static int n2_decrypt_chaining(struct ablkcipher_request *req)
+{
+ return n2_do_chaining(req, false);
+}
+
+struct n2_cipher_tmpl {
+ const char *name;
+ const char *drv_name;
+ u8 block_size;
+ u8 enc_type;
+ struct ablkcipher_alg ablkcipher;
+};
+
+static const struct n2_cipher_tmpl cipher_tmpls[] = {
+ /* ARC4: only ECB is supported (chaining bits ignored) */
+ { .name = "ecb(arc4)",
+ .drv_name = "ecb-arc4",
+ .block_size = 1,
+ .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = 1,
+ .max_keysize = 256,
+ .setkey = n2_arc4_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+
+ /* DES: ECB CBC and CFB are supported */
+ { .name = "ecb(des)",
+ .drv_name = "ecb-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(des)",
+ .drv_name = "cbc-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "cfb(des)",
+ .drv_name = "cfb-des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_DES |
+ ENC_TYPE_CHAINING_CFB),
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = n2_des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+
+ /* 3DES: ECB CBC and CFB are supported */
+ { .name = "ecb(des3_ede)",
+ .drv_name = "ecb-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(des3_ede)",
+ .drv_name = "cbc-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "cfb(des3_ede)",
+ .drv_name = "cfb-3des",
+ .block_size = DES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_3DES |
+ ENC_TYPE_CHAINING_CFB),
+ .ablkcipher = {
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = n2_3des_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ /* AES: ECB CBC and CTR are supported */
+ { .name = "ecb(aes)",
+ .drv_name = "ecb-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_ECB),
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_ecb,
+ .decrypt = n2_decrypt_ecb,
+ },
+ },
+ { .name = "cbc(aes)",
+ .drv_name = "cbc-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_CBC),
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_decrypt_chaining,
+ },
+ },
+ { .name = "ctr(aes)",
+ .drv_name = "ctr-aes",
+ .block_size = AES_BLOCK_SIZE,
+ .enc_type = (ENC_TYPE_ALG_AES128 |
+ ENC_TYPE_CHAINING_COUNTER),
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = n2_aes_setkey,
+ .encrypt = n2_encrypt_chaining,
+ .decrypt = n2_encrypt_chaining,
+ },
+ },
+
+};
+#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
+
+static LIST_HEAD(cipher_algs);
+
+struct n2_hash_tmpl {
+ const char *name;
+ int (*digest)(struct ahash_request *req);
+ u8 digest_size;
+ u8 block_size;
+};
+static const struct n2_hash_tmpl hash_tmpls[] = {
+ { .name = "md5",
+ .digest = n2_md5_async_digest,
+ .digest_size = MD5_DIGEST_SIZE,
+ .block_size = MD5_HMAC_BLOCK_SIZE },
+ { .name = "sha1",
+ .digest = n2_sha1_async_digest,
+ .digest_size = SHA1_DIGEST_SIZE,
+ .block_size = SHA1_BLOCK_SIZE },
+ { .name = "sha256",
+ .digest = n2_sha256_async_digest,
+ .digest_size = SHA256_DIGEST_SIZE,
+ .block_size = SHA256_BLOCK_SIZE },
+ { .name = "sha224",
+ .digest = n2_sha224_async_digest,
+ .digest_size = SHA224_DIGEST_SIZE,
+ .block_size = SHA224_BLOCK_SIZE },
+};
+#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+
+struct n2_ahash_alg {
+ struct list_head entry;
+ struct ahash_alg alg;
+};
+static LIST_HEAD(ahash_algs);
+
+static int algs_registered;
+
+static void __n2_unregister_algs(void)
+{
+ struct n2_cipher_alg *cipher, *cipher_tmp;
+ struct n2_ahash_alg *alg, *alg_tmp;
+
+ list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
+ crypto_unregister_alg(&cipher->alg);
+ list_del(&cipher->entry);
+ kfree(cipher);
+ }
+ list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
+ crypto_unregister_ahash(&alg->alg);
+ list_del(&alg->entry);
+ kfree(alg);
+ }
+}
+
+static int n2_cipher_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
+ return 0;
+}
+
+static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
+{
+ struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct crypto_alg *alg;
+ int err;
+
+ if (!p)
+ return -ENOMEM;
+
+ alg = &p->alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
+ alg->cra_priority = N2_CRA_PRIORITY;
+ alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ alg->cra_blocksize = tmpl->block_size;
+ p->enc_type = tmpl->enc_type;
+ alg->cra_ctxsize = sizeof(struct n2_cipher_context);
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_u.ablkcipher = tmpl->ablkcipher;
+ alg->cra_init = n2_cipher_cra_init;
+ alg->cra_module = THIS_MODULE;
+
+ list_add(&p->entry, &cipher_algs);
+ err = crypto_register_alg(alg);
+ if (err) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ return err;
+}
+
+static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+{
+ struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct hash_alg_common *halg;
+ struct crypto_alg *base;
+ struct ahash_alg *ahash;
+ int err;
+
+ if (!p)
+ return -ENOMEM;
+
+ ahash = &p->alg;
+ ahash->init = n2_hash_async_init;
+ ahash->update = n2_hash_async_update;
+ ahash->final = n2_hash_async_final;
+ ahash->finup = n2_hash_async_finup;
+ ahash->digest = tmpl->digest;
+
+ halg = &ahash->halg;
+ halg->digestsize = tmpl->digest_size;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+ snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
+ base->cra_priority = N2_CRA_PRIORITY;
+ base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
+ base->cra_blocksize = tmpl->block_size;
+ base->cra_ctxsize = sizeof(struct n2_hash_ctx);
+ base->cra_module = THIS_MODULE;
+ base->cra_init = n2_hash_cra_init;
+ base->cra_exit = n2_hash_cra_exit;
+
+ list_add(&p->entry, &ahash_algs);
+ err = crypto_register_ahash(ahash);
+ if (err) {
+ list_del(&p->entry);
+ kfree(p);
+ }
+ return err;
+}
+
+static int __devinit n2_register_algs(void)
+{
+ int i, err = 0;
+
+ mutex_lock(&spu_lock);
+ if (algs_registered++)
+ goto out;
+
+ for (i = 0; i < NUM_HASH_TMPLS; i++) {
+ err = __n2_register_one_ahash(&hash_tmpls[i]);
+ if (err) {
+ __n2_unregister_algs();
+ goto out;
+ }
+ }
+ for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
+ err = __n2_register_one_cipher(&cipher_tmpls[i]);
+ if (err) {
+ __n2_unregister_algs();
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&spu_lock);
+ return err;
+}
+
+static void __exit n2_unregister_algs(void)
+{
+ mutex_lock(&spu_lock);
+ if (!--algs_registered)
+ __n2_unregister_algs();
+ mutex_unlock(&spu_lock);
+}
+
+/* To map CWQ queues to interrupt sources, the hypervisor API provides
+ * a devino. This isn't very useful to us because all of the
+ * interrupts listed in the of_device node have been translated to
+ * Linux virtual IRQ cookie numbers.
+ *
+ * So we have to back-translate, going through the 'intr' and 'ino'
+ * property tables of the n2cp MDESC node, matching it with the OF
+ * 'interrupts' property entries, in order to to figure out which
+ * devino goes to which already-translated IRQ.
+ */
+static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
+ unsigned long dev_ino)
+{
+ const unsigned int *dev_intrs;
+ unsigned int intr;
+ int i;
+
+ for (i = 0; i < ip->num_intrs; i++) {
+ if (ip->ino_table[i].ino == dev_ino)
+ break;
+ }
+ if (i == ip->num_intrs)
+ return -ENODEV;
+
+ intr = ip->ino_table[i].intr;
+
+ dev_intrs = of_get_property(dev->node, "interrupts", NULL);
+ if (!dev_intrs)
+ return -ENODEV;
+
+ for (i = 0; i < dev->num_irqs; i++) {
+ if (dev_intrs[i] == intr)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
+ const char *irq_name, struct spu_queue *p,
+ irq_handler_t handler)
+{
+ unsigned long herr;
+ int index;
+
+ herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
+ if (herr)
+ return -EINVAL;
+
+ index = find_devino_index(dev, ip, p->devino);
+ if (index < 0)
+ return index;
+
+ p->irq = dev->irqs[index];
+
+ sprintf(p->irq_name, "%s-%d", irq_name, index);
+
+ return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
+ p->irq_name, p);
+}
+
+static struct kmem_cache *queue_cache[2];
+
+static void *new_queue(unsigned long q_type)
+{
+ return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
+}
+
+static void free_queue(void *p, unsigned long q_type)
+{
+ return kmem_cache_free(queue_cache[q_type - 1], p);
+}
+
+static int queue_cache_init(void)
+{
+ if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+ queue_cache[HV_NCS_QTYPE_MAU - 1] =
+ kmem_cache_create("cwq_queue",
+ (MAU_NUM_ENTRIES *
+ MAU_ENTRY_SIZE),
+ MAU_ENTRY_SIZE, 0, NULL);
+ if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
+ return -ENOMEM;
+
+ if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] =
+ kmem_cache_create("cwq_queue",
+ (CWQ_NUM_ENTRIES *
+ CWQ_ENTRY_SIZE),
+ CWQ_ENTRY_SIZE, 0, NULL);
+ if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void queue_cache_destroy(void)
+{
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+}
+
+static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+{
+ cpumask_var_t old_allowed;
+ unsigned long hv_ret;
+
+ if (cpumask_empty(&p->sharing))
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(old_allowed, &current->cpus_allowed);
+
+ set_cpus_allowed_ptr(current, &p->sharing);
+
+ hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+ CWQ_NUM_ENTRIES, &p->qhandle);
+ if (!hv_ret)
+ sun4v_ncs_sethead_marker(p->qhandle, 0);
+
+ set_cpus_allowed_ptr(current, old_allowed);
+
+ free_cpumask_var(old_allowed);
+
+ return (hv_ret ? -EINVAL : 0);
+}
+
+static int spu_queue_setup(struct spu_queue *p)
+{
+ int err;
+
+ p->q = new_queue(p->q_type);
+ if (!p->q)
+ return -ENOMEM;
+
+ err = spu_queue_register(p, p->q_type);
+ if (err) {
+ free_queue(p->q, p->q_type);
+ p->q = NULL;
+ }
+
+ return err;
+}
+
+static void spu_queue_destroy(struct spu_queue *p)
+{
+ unsigned long hv_ret;
+
+ if (!p->q)
+ return;
+
+ hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
+
+ if (!hv_ret)
+ free_queue(p->q, p->q_type);
+}
+
+static void spu_list_destroy(struct list_head *list)
+{
+ struct spu_queue *p, *n;
+
+ list_for_each_entry_safe(p, n, list, list) {
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_to_cwq[i] == p)
+ cpu_to_cwq[i] = NULL;
+ }
+
+ if (p->irq) {
+ free_irq(p->irq, p);
+ p->irq = 0;
+ }
+ spu_queue_destroy(p);
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+/* Walk the backward arcs of a CWQ 'exec-unit' node,
+ * gathering cpu membership information.
+ */
+static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
+ struct of_device *dev,
+ u64 node, struct spu_queue *p,
+ struct spu_queue **table)
+{
+ u64 arc;
+
+ mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
+ u64 tgt = mdesc_arc_target(mdesc, arc);
+ const char *name = mdesc_node_name(mdesc, tgt);
+ const u64 *id;
+
+ if (strcmp(name, "cpu"))
+ continue;
+ id = mdesc_get_property(mdesc, tgt, "id", NULL);
+ if (table[*id] != NULL) {
+ dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
+ dev->node->full_name);
+ return -EINVAL;
+ }
+ cpu_set(*id, p->sharing);
+ table[*id] = p;
+ }
+ return 0;
+}
+
+/* Process an 'exec-unit' MDESC node of type 'cwq'. */
+static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
+ struct of_device *dev, struct mdesc_handle *mdesc,
+ u64 node, const char *iname, unsigned long q_type,
+ irq_handler_t handler, struct spu_queue **table)
+{
+ struct spu_queue *p;
+ int err;
+
+ p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
+ if (!p) {
+ dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
+ dev->node->full_name);
+ return -ENOMEM;
+ }
+
+ cpus_clear(p->sharing);
+ spin_lock_init(&p->lock);
+ p->q_type = q_type;
+ INIT_LIST_HEAD(&p->jobs);
+ list_add(&p->list, list);
+
+ err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
+ if (err)
+ return err;
+
+ err = spu_queue_setup(p);
+ if (err)
+ return err;
+
+ return spu_map_ino(dev, ip, iname, p, handler);
+}
+
+static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
+ struct spu_mdesc_info *ip, struct list_head *list,
+ const char *exec_name, unsigned long q_type,
+ irq_handler_t handler, struct spu_queue **table)
+{
+ int err = 0;
+ u64 node;
+
+ mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
+ const char *type;
+
+ type = mdesc_get_property(mdesc, node, "type", NULL);
+ if (!type || strcmp(type, exec_name))
+ continue;
+
+ err = handle_exec_unit(ip, list, dev, mdesc, node,
+ exec_name, q_type, handler, table);
+ if (err) {
+ spu_list_destroy(list);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
+ struct spu_mdesc_info *ip)
+{
+ const u64 *intr, *ino;
+ int intr_len, ino_len;
+ int i;
+
+ intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
+ if (!intr)
+ return -ENODEV;
+
+ ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
+ if (!intr)
+ return -ENODEV;
+
+ if (intr_len != ino_len)
+ return -EINVAL;
+
+ ip->num_intrs = intr_len / sizeof(u64);
+ ip->ino_table = kzalloc((sizeof(struct ino_blob) *
+ ip->num_intrs),
+ GFP_KERNEL);
+ if (!ip->ino_table)
+ return -ENOMEM;
+
+ for (i = 0; i < ip->num_intrs; i++) {
+ struct ino_blob *b = &ip->ino_table[i];
+ b->intr = intr[i];
+ b->ino = ino[i];
+ }
+
+ return 0;
+}
+
+static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
+ struct of_device *dev,
+ struct spu_mdesc_info *ip,
+ const char *node_name)
+{
+ const unsigned int *reg;
+ u64 node;
+
+ reg = of_get_property(dev->node, "reg", NULL);
+ if (!reg)
+ return -ENODEV;
+
+ mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
+ const char *name;
+ const u64 *chdl;
+
+ name = mdesc_get_property(mdesc, node, "name", NULL);
+ if (!name || strcmp(name, node_name))
+ continue;
+ chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
+ if (!chdl || (*chdl != *reg))
+ continue;
+ ip->cfg_handle = *chdl;
+ return get_irq_props(mdesc, node, ip);
+ }
+
+ return -ENODEV;
+}
+
+static unsigned long n2_spu_hvapi_major;
+static unsigned long n2_spu_hvapi_minor;
+
+static int __devinit n2_spu_hvapi_register(void)
+{
+ int err;
+
+ n2_spu_hvapi_major = 2;
+ n2_spu_hvapi_minor = 0;
+
+ err = sun4v_hvapi_register(HV_GRP_NCS,
+ n2_spu_hvapi_major,
+ &n2_spu_hvapi_minor);
+
+ if (!err)
+ pr_info("Registered NCS HVAPI version %lu.%lu\n",
+ n2_spu_hvapi_major,
+ n2_spu_hvapi_minor);
+
+ return err;
+}
+
+static void n2_spu_hvapi_unregister(void)
+{
+ sun4v_hvapi_unregister(HV_GRP_NCS);
+}
+
+static int global_ref;
+
+static int __devinit grab_global_resources(void)
+{
+ int err = 0;
+
+ mutex_lock(&spu_lock);
+
+ if (global_ref++)
+ goto out;
+
+ err = n2_spu_hvapi_register();
+ if (err)
+ goto out;
+
+ err = queue_cache_init();
+ if (err)
+ goto out_hvapi_release;
+
+ err = -ENOMEM;
+ cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+ GFP_KERNEL);
+ if (!cpu_to_cwq)
+ goto out_queue_cache_destroy;
+
+ cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
+ GFP_KERNEL);
+ if (!cpu_to_mau)
+ goto out_free_cwq_table;
+
+ err = 0;
+
+out:
+ if (err)
+ global_ref--;
+ mutex_unlock(&spu_lock);
+ return err;
+
+out_free_cwq_table:
+ kfree(cpu_to_cwq);
+ cpu_to_cwq = NULL;
+
+out_queue_cache_destroy:
+ queue_cache_destroy();
+
+out_hvapi_release:
+ n2_spu_hvapi_unregister();
+ goto out;
+}
+
+static void release_global_resources(void)
+{
+ mutex_lock(&spu_lock);
+ if (!--global_ref) {
+ kfree(cpu_to_cwq);
+ cpu_to_cwq = NULL;
+
+ kfree(cpu_to_mau);
+ cpu_to_mau = NULL;
+
+ queue_cache_destroy();
+ n2_spu_hvapi_unregister();
+ }
+ mutex_unlock(&spu_lock);
+}
+
+static struct n2_crypto * __devinit alloc_n2cp(void)
+{
+ struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
+
+ if (np)
+ INIT_LIST_HEAD(&np->cwq_list);
+
+ return np;
+}
+
+static void free_n2cp(struct n2_crypto *np)
+{
+ if (np->cwq_info.ino_table) {
+ kfree(np->cwq_info.ino_table);
+ np->cwq_info.ino_table = NULL;
+ }
+
+ kfree(np);
+}
+
+static void __devinit n2_spu_driver_version(void)
+{
+ static int n2_spu_version_printed;
+
+ if (n2_spu_version_printed++ == 0)
+ pr_info("%s", version);
+}
+
+static int __devinit n2_crypto_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct mdesc_handle *mdesc;
+ const char *full_name;
+ struct n2_crypto *np;
+ int err;
+
+ n2_spu_driver_version();
+
+ full_name = dev->node->full_name;
+ pr_info("Found N2CP at %s\n", full_name);
+
+ np = alloc_n2cp();
+ if (!np) {
+ dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
+ full_name);
+ return -ENOMEM;
+ }
+
+ err = grab_global_resources();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab "
+ "global resources.\n", full_name);
+ goto out_free_n2cp;
+ }
+
+ mdesc = mdesc_grab();
+
+ if (!mdesc) {
+ dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+ full_name);
+ err = -ENODEV;
+ goto out_free_global;
+ }
+ err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+ full_name);
+ mdesc_release(mdesc);
+ goto out_free_global;
+ }
+
+ err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
+ "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
+ cpu_to_cwq);
+ mdesc_release(mdesc);
+
+ if (err) {
+ dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
+ full_name);
+ goto out_free_global;
+ }
+
+ err = n2_register_algs();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
+ full_name);
+ goto out_free_spu_list;
+ }
+
+ dev_set_drvdata(&dev->dev, np);
+
+ return 0;
+
+out_free_spu_list:
+ spu_list_destroy(&np->cwq_list);
+
+out_free_global:
+ release_global_resources();
+
+out_free_n2cp:
+ free_n2cp(np);
+
+ return err;
+}
+
+static int __devexit n2_crypto_remove(struct of_device *dev)
+{
+ struct n2_crypto *np = dev_get_drvdata(&dev->dev);
+
+ n2_unregister_algs();
+
+ spu_list_destroy(&np->cwq_list);
+
+ release_global_resources();
+
+ free_n2cp(np);
+
+ return 0;
+}
+
+static struct n2_mau * __devinit alloc_ncp(void)
+{
+ struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
+
+ if (mp)
+ INIT_LIST_HEAD(&mp->mau_list);
+
+ return mp;
+}
+
+static void free_ncp(struct n2_mau *mp)
+{
+ if (mp->mau_info.ino_table) {
+ kfree(mp->mau_info.ino_table);
+ mp->mau_info.ino_table = NULL;
+ }
+
+ kfree(mp);
+}
+
+static int __devinit n2_mau_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct mdesc_handle *mdesc;
+ const char *full_name;
+ struct n2_mau *mp;
+ int err;
+
+ n2_spu_driver_version();
+
+ full_name = dev->node->full_name;
+ pr_info("Found NCP at %s\n", full_name);
+
+ mp = alloc_ncp();
+ if (!mp) {
+ dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
+ full_name);
+ return -ENOMEM;
+ }
+
+ err = grab_global_resources();
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab "
+ "global resources.\n", full_name);
+ goto out_free_ncp;
+ }
+
+ mdesc = mdesc_grab();
+
+ if (!mdesc) {
+ dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
+ full_name);
+ err = -ENODEV;
+ goto out_free_global;
+ }
+
+ err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
+ if (err) {
+ dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
+ full_name);
+ mdesc_release(mdesc);
+ goto out_free_global;
+ }
+
+ err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
+ "mau", HV_NCS_QTYPE_MAU, mau_intr,
+ cpu_to_mau);
+ mdesc_release(mdesc);
+
+ if (err) {
+ dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
+ full_name);
+ goto out_free_global;
+ }
+
+ dev_set_drvdata(&dev->dev, mp);
+
+ return 0;
+
+out_free_global:
+ release_global_resources();
+
+out_free_ncp:
+ free_ncp(mp);
+
+ return err;
+}
+
+static int __devexit n2_mau_remove(struct of_device *dev)
+{
+ struct n2_mau *mp = dev_get_drvdata(&dev->dev);
+
+ spu_list_destroy(&mp->mau_list);
+
+ release_global_resources();
+
+ free_ncp(mp);
+
+ return 0;
+}
+
+static struct of_device_id n2_crypto_match[] = {
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,n2-cwq",
+ },
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,vf-cwq",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_crypto_match);
+
+static struct of_platform_driver n2_crypto_driver = {
+ .name = "n2cp",
+ .match_table = n2_crypto_match,
+ .probe = n2_crypto_probe,
+ .remove = __devexit_p(n2_crypto_remove),
+};
+
+static struct of_device_id n2_mau_match[] = {
+ {
+ .name = "ncp",
+ .compatible = "SUNW,n2-mau",
+ },
+ {
+ .name = "ncp",
+ .compatible = "SUNW,vf-mau",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, n2_mau_match);
+
+static struct of_platform_driver n2_mau_driver = {
+ .name = "ncp",
+ .match_table = n2_mau_match,
+ .probe = n2_mau_probe,
+ .remove = __devexit_p(n2_mau_remove),
+};
+
+static int __init n2_init(void)
+{
+ int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
+
+ if (!err) {
+ err = of_register_driver(&n2_mau_driver, &of_bus_type);
+ if (err)
+ of_unregister_driver(&n2_crypto_driver);
+ }
+ return err;
+}
+
+static void __exit n2_exit(void)
+{
+ of_unregister_driver(&n2_mau_driver);
+ of_unregister_driver(&n2_crypto_driver);
+}
+
+module_init(n2_init);
+module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
new file mode 100644
index 000000000000..4bcbbeae98f5
--- /dev/null
+++ b/drivers/crypto/n2_core.h
@@ -0,0 +1,231 @@
+#ifndef _N2_CORE_H
+#define _N2_CORE_H
+
+#ifndef __ASSEMBLY__
+
+struct ino_blob {
+ u64 intr;
+ u64 ino;
+};
+
+struct spu_mdesc_info {
+ u64 cfg_handle;
+ struct ino_blob *ino_table;
+ int num_intrs;
+};
+
+struct n2_crypto {
+ struct spu_mdesc_info cwq_info;
+ struct list_head cwq_list;
+};
+
+struct n2_mau {
+ struct spu_mdesc_info mau_info;
+ struct list_head mau_list;
+};
+
+#define CWQ_ENTRY_SIZE 64
+#define CWQ_NUM_ENTRIES 64
+
+#define MAU_ENTRY_SIZE 64
+#define MAU_NUM_ENTRIES 64
+
+struct cwq_initial_entry {
+ u64 control;
+ u64 src_addr;
+ u64 auth_key_addr;
+ u64 auth_iv_addr;
+ u64 final_auth_state_addr;
+ u64 enc_key_addr;
+ u64 enc_iv_addr;
+ u64 dest_addr;
+};
+
+struct cwq_ext_entry {
+ u64 len;
+ u64 src_addr;
+ u64 resv1;
+ u64 resv2;
+ u64 resv3;
+ u64 resv4;
+ u64 resv5;
+ u64 resv6;
+};
+
+struct cwq_final_entry {
+ u64 control;
+ u64 src_addr;
+ u64 resv1;
+ u64 resv2;
+ u64 resv3;
+ u64 resv4;
+ u64 resv5;
+ u64 resv6;
+};
+
+#define CONTROL_LEN 0x000000000000ffffULL
+#define CONTROL_LEN_SHIFT 0
+#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
+#define CONTROL_HMAC_KEY_LEN_SHIFT 16
+#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
+#define CONTROL_ENC_TYPE_SHIFT 24
+#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
+#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
+#define ENC_TYPE_ALG_DES 0x08ULL
+#define ENC_TYPE_ALG_3DES 0x0cULL
+#define ENC_TYPE_ALG_AES128 0x10ULL
+#define ENC_TYPE_ALG_AES192 0x14ULL
+#define ENC_TYPE_ALG_AES256 0x18ULL
+#define ENC_TYPE_ALG_RESERVED 0x1cULL
+#define ENC_TYPE_ALG_MASK 0x1cULL
+#define ENC_TYPE_CHAINING_ECB 0x00ULL
+#define ENC_TYPE_CHAINING_CBC 0x01ULL
+#define ENC_TYPE_CHAINING_CFB 0x02ULL
+#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
+#define ENC_TYPE_CHAINING_MASK 0x03ULL
+#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
+#define CONTROL_AUTH_TYPE_SHIFT 32
+#define AUTH_TYPE_RESERVED 0x00ULL
+#define AUTH_TYPE_MD5 0x01ULL
+#define AUTH_TYPE_SHA1 0x02ULL
+#define AUTH_TYPE_SHA256 0x03ULL
+#define AUTH_TYPE_CRC32 0x04ULL
+#define AUTH_TYPE_HMAC_MD5 0x05ULL
+#define AUTH_TYPE_HMAC_SHA1 0x06ULL
+#define AUTH_TYPE_HMAC_SHA256 0x07ULL
+#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
+#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
+#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
+#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
+#define CONTROL_STRAND 0x000000e000000000ULL
+#define CONTROL_STRAND_SHIFT 37
+#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
+#define CONTROL_HASH_LEN_SHIFT 40
+#define CONTROL_INTERRUPT 0x0001000000000000ULL
+#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
+#define CONTROL_RESERVED 0x001c000000000000ULL
+#define CONTROL_HV_DONE 0x0004000000000000ULL
+#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
+#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
+#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
+#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
+#define CONTROL_ENCRYPT 0x0080000000000000ULL
+#define CONTROL_OPCODE 0xff00000000000000ULL
+#define CONTROL_OPCODE_SHIFT 56
+#define OPCODE_INPLACE_BIT 0x80ULL
+#define OPCODE_SSL_KEYBLOCK 0x10ULL
+#define OPCODE_COPY 0x20ULL
+#define OPCODE_ENCRYPT 0x40ULL
+#define OPCODE_AUTH_MAC 0x41ULL
+
+#endif /* !(__ASSEMBLY__) */
+
+/* NCS v2.0 hypervisor interfaces */
+#define HV_NCS_QTYPE_MAU 0x01
+#define HV_NCS_QTYPE_CWQ 0x02
+
+/* ncs_qconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QCONF
+ * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * ARG1: Real address of queue, or handle for unconfigure
+ * ARG2: Number of entries in queue, zero for unconfigure
+ * RET0: status
+ * RET1: queue handle
+ *
+ * Configure a queue in the stream processing unit.
+ *
+ * The real address given as the base must be 64-byte
+ * aligned.
+ *
+ * The queue size can range from a minimum of 2 to a maximum
+ * of 64. The queue size must be a power of two.
+ *
+ * To unconfigure a queue, specify a length of zero and place
+ * the queue handle into ARG1.
+ *
+ * On configure success the hypervisor will set the FIRST, HEAD,
+ * and TAIL registers to the address of the first entry in the
+ * queue. The LAST register will be set to point to the last
+ * entry in the queue.
+ */
+#define HV_FAST_NCS_QCONF 0x111
+
+/* ncs_qinfo()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QINFO
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
+ * RET2: Queue base address
+ * RET3: Number of entries
+ */
+#define HV_FAST_NCS_QINFO 0x112
+
+/* ncs_gethead()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_GETHEAD
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: queue head offset
+ */
+#define HV_FAST_NCS_GETHEAD 0x113
+
+/* ncs_gettail()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_GETTAIL
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: queue tail offset
+ */
+#define HV_FAST_NCS_GETTAIL 0x114
+
+/* ncs_settail()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_SETTAIL
+ * ARG0: Queue handle
+ * ARG1: New tail offset
+ * RET0: status
+ */
+#define HV_FAST_NCS_SETTAIL 0x115
+
+/* ncs_qhandle_to_devino()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
+ * ARG0: Queue handle
+ * RET0: status
+ * RET1: devino
+ */
+#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
+
+/* ncs_sethead_marker()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
+ * ARG0: Queue handle
+ * ARG1: New head offset
+ * RET0: status
+ */
+#define HV_FAST_NCS_SETHEAD_MARKER 0x117
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
+ unsigned long queue_ra,
+ unsigned long num_entries,
+ unsigned long *qhandle);
+extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
+ unsigned long *queue_type,
+ unsigned long *queue_ra,
+ unsigned long *num_entries);
+extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
+ unsigned long *head);
+extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
+ unsigned long *tail);
+extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
+ unsigned long tail);
+extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
+ unsigned long *devino);
+extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
+ unsigned long head);
+#endif /* !(__ASSEMBLY__) */
+
+#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
new file mode 100644
index 000000000000..8b034337793f
--- /dev/null
+++ b/drivers/crypto/omap-sham.c
@@ -0,0 +1,1259 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for OMAP SHA1/MD5 HW acceleration.
+ *
+ * Copyright (c) 2010 Nokia Corporation
+ * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from old omap-sha1-md5.c driver.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/version.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+#include <plat/cpu.h>
+#include <plat/dma.h>
+#include <mach/irqs.h>
+
+#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
+#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
+
+#define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE
+#define MD5_DIGEST_SIZE 16
+
+#define SHA_REG_DIGCNT 0x14
+
+#define SHA_REG_CTRL 0x18
+#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
+#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
+#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
+#define SHA_REG_CTRL_ALGO (1 << 2)
+#define SHA_REG_CTRL_INPUT_READY (1 << 1)
+#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
+
+#define SHA_REG_REV 0x5C
+#define SHA_REG_REV_MAJOR 0xF0
+#define SHA_REG_REV_MINOR 0x0F
+
+#define SHA_REG_MASK 0x60
+#define SHA_REG_MASK_DMA_EN (1 << 3)
+#define SHA_REG_MASK_IT_EN (1 << 2)
+#define SHA_REG_MASK_SOFTRESET (1 << 1)
+#define SHA_REG_AUTOIDLE (1 << 0)
+
+#define SHA_REG_SYSSTATUS 0x64
+#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
+
+#define DEFAULT_TIMEOUT_INTERVAL HZ
+
+#define FLAGS_FIRST 0x0001
+#define FLAGS_FINUP 0x0002
+#define FLAGS_FINAL 0x0004
+#define FLAGS_FAST 0x0008
+#define FLAGS_SHA1 0x0010
+#define FLAGS_DMA_ACTIVE 0x0020
+#define FLAGS_OUTPUT_READY 0x0040
+#define FLAGS_CLEAN 0x0080
+#define FLAGS_INIT 0x0100
+#define FLAGS_CPU 0x0200
+#define FLAGS_HMAC 0x0400
+
+/* 3rd byte */
+#define FLAGS_BUSY 16
+
+#define OP_UPDATE 1
+#define OP_FINAL 2
+
+struct omap_sham_dev;
+
+struct omap_sham_reqctx {
+ struct omap_sham_dev *dd;
+ unsigned long flags;
+ unsigned long op;
+
+ size_t digcnt;
+ u8 *buffer;
+ size_t bufcnt;
+ size_t buflen;
+ dma_addr_t dma_addr;
+
+ /* walk state */
+ struct scatterlist *sg;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* total request */
+};
+
+struct omap_sham_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA1_MD5_BLOCK_SIZE];
+ u8 opad[SHA1_MD5_BLOCK_SIZE];
+};
+
+struct omap_sham_ctx {
+ struct omap_sham_dev *dd;
+
+ unsigned long flags;
+
+ /* fallback stuff */
+ struct crypto_shash *fallback;
+
+ struct omap_sham_hmac_ctx base[0];
+};
+
+#define OMAP_SHAM_QUEUE_LENGTH 1
+
+struct omap_sham_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ struct device *dev;
+ void __iomem *io_base;
+ int irq;
+ struct clk *iclk;
+ spinlock_t lock;
+ int dma;
+ int dma_lch;
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct omap_sham_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+ unsigned long flags;
+};
+
+static struct omap_sham_drv sham = {
+ .dev_list = LIST_HEAD_INIT(sham.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
+};
+
+static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_base + offset);
+}
+
+static inline void omap_sham_write(struct omap_sham_dev *dd,
+ u32 offset, u32 value)
+{
+ __raw_writel(value, dd->io_base + offset);
+}
+
+static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
+ u32 value, u32 mask)
+{
+ u32 val;
+
+ val = omap_sham_read(dd, address);
+ val &= ~mask;
+ val |= value;
+ omap_sham_write(dd, address, val);
+}
+
+static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
+{
+ unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
+
+ while (!(omap_sham_read(dd, offset) & bit)) {
+ if (time_is_before_jiffies(timeout))
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void omap_sham_copy_hash(struct ahash_request *req, int out)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)req->result;
+ int i;
+
+ if (likely(ctx->flags & FLAGS_SHA1)) {
+ /* SHA1 results are in big endian */
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_be32(hash[i]));
+ } else {
+ /* MD5 results are in little endian */
+ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
+ if (out)
+ hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
+ SHA_REG_DIGEST(i)));
+ else
+ omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
+ cpu_to_le32(hash[i]));
+ }
+}
+
+static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+ int final, int dma)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val = length << 5, mask;
+
+ if (unlikely(!ctx->digcnt)) {
+
+ clk_enable(dd->iclk);
+
+ if (!(dd->flags & FLAGS_INIT)) {
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+ if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+ SHA_REG_SYSSTATUS_RESETDONE))
+ return -ETIMEDOUT;
+
+ dd->flags |= FLAGS_INIT;
+ }
+ } else {
+ omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
+ }
+
+ omap_sham_write_mask(dd, SHA_REG_MASK,
+ SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
+ SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
+ /*
+ * Setting ALGO_CONST only for the first iteration
+ * and CLOSE_HASH only for the last one.
+ */
+ if (ctx->flags & FLAGS_SHA1)
+ val |= SHA_REG_CTRL_ALGO;
+ if (!ctx->digcnt)
+ val |= SHA_REG_CTRL_ALGO_CONST;
+ if (final)
+ val |= SHA_REG_CTRL_CLOSE_HASH;
+
+ mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
+ SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
+
+ return 0;
+}
+
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, count, len32;
+ const u32 *buffer = (const u32 *)buf;
+
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ err = omap_sham_write_ctrl(dd, length, final, 0);
+ if (err)
+ return err;
+
+ if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
+ return -ETIMEDOUT;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ for (count = 0; count < len32; count++)
+ omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+ return -EINPROGRESS;
+}
+
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
+ size_t length, int final)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int err, len32;
+
+ dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ /* flush cache entries related to our page */
+ if (dma_addr == ctx->dma_addr)
+ dma_sync_single_for_device(dd->dev, dma_addr, length,
+ DMA_TO_DEVICE);
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
+ 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC);
+
+ omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
+ dma_addr, 0, 0);
+
+ err = omap_sham_write_ctrl(dd, length, final, 1);
+ if (err)
+ return err;
+
+ ctx->digcnt += length;
+
+ if (final)
+ ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= FLAGS_DMA_ACTIVE;
+
+ omap_start_dma(dd->dma_lch);
+
+ return -EINPROGRESS;
+}
+
+static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
+ const u8 *data, size_t length)
+{
+ size_t count = min(length, ctx->buflen - ctx->bufcnt);
+
+ count = min(count, ctx->total);
+ if (count <= 0)
+ return 0;
+ memcpy(ctx->buffer + ctx->bufcnt, data, count);
+ ctx->bufcnt += count;
+
+ return count;
+}
+
+static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
+{
+ size_t count;
+
+ while (ctx->sg) {
+ count = omap_sham_append_buffer(ctx,
+ sg_virt(ctx->sg) + ctx->offset,
+ ctx->sg->length - ctx->offset);
+ if (!count)
+ break;
+ ctx->offset += count;
+ ctx->total -= count;
+ if (ctx->offset == ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ else
+ ctx->total = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int final;
+ size_t count;
+
+ if (!ctx->total)
+ return 0;
+
+ omap_sham_append_sg(ctx);
+
+ final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+ ctx->bufcnt, ctx->digcnt, final);
+
+ if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+ }
+
+ return 0;
+}
+
+static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int length;
+
+ ctx->flags |= FLAGS_FAST;
+
+ length = min(ctx->total, sg_dma_len(ctx->sg));
+ ctx->total = length;
+
+ if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ return -EINVAL;
+ }
+
+ ctx->total -= length;
+
+ return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+}
+
+static int omap_sham_update_cpu(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ int bufcnt;
+
+ omap_sham_append_sg(ctx);
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+
+ return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ omap_stop_dma(dd->dma_lch);
+ if (ctx->flags & FLAGS_FAST)
+ dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static void omap_sham_cleanup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (ctx->flags & FLAGS_CLEAN) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return;
+ }
+ ctx->flags |= FLAGS_CLEAN;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (ctx->digcnt)
+ clk_disable(dd->iclk);
+
+ if (ctx->dma_addr)
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+ DMA_TO_DEVICE);
+
+ if (ctx->buffer)
+ free_page((unsigned long)ctx->buffer);
+
+ dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+}
+
+static int omap_sham_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = NULL, *tmp;
+
+ spin_lock_bh(&sham.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &sham.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&sham.lock);
+
+ ctx->dd = dd;
+
+ ctx->flags = 0;
+
+ ctx->flags |= FLAGS_FIRST;
+
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ ctx->flags |= FLAGS_SHA1;
+
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+
+ ctx->buflen = PAGE_SIZE;
+ ctx->buffer = (void *)__get_free_page(
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!ctx->buffer)
+ return -ENOMEM;
+
+ ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+ dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+ free_page((unsigned long)ctx->buffer);
+ return -EINVAL;
+ }
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+
+ memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
+ ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+ ctx->flags |= FLAGS_HMAC;
+ }
+
+ return 0;
+
+}
+
+static int omap_sham_update_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err;
+
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+ ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+
+ if (ctx->flags & FLAGS_CPU)
+ err = omap_sham_update_cpu(dd);
+ else if (ctx->flags & FLAGS_FAST)
+ err = omap_sham_update_dma_fast(dd);
+ else
+ err = omap_sham_update_dma_slow(dd);
+
+ /* wait for dma completion before can take more data */
+ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+
+ return err;
+}
+
+static int omap_sham_final_req(struct omap_sham_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0, use_dma = 1;
+
+ if (ctx->bufcnt <= 64)
+ /* faster to handle last block with cpu */
+ use_dma = 0;
+
+ if (use_dma)
+ err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+ else
+ err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+
+ ctx->bufcnt = 0;
+
+ if (err != -EINPROGRESS)
+ omap_sham_cleanup(req);
+
+ dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_finish_req_hmac(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(bctx->shash)];
+ } desc;
+
+ desc.shash.tfm = bctx->shash;
+ desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
+
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
+ crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+}
+
+static void omap_sham_finish_req(struct ahash_request *req, int err)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!err) {
+ omap_sham_copy_hash(ctx->dd->req, 1);
+ if (ctx->flags & FLAGS_HMAC)
+ err = omap_sham_finish_req_hmac(req);
+ }
+
+ if (ctx->flags & FLAGS_FINAL)
+ omap_sham_cleanup(req);
+
+ clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct omap_sham_reqctx *ctx;
+ struct ahash_request *req, *prev_req;
+ unsigned long flags;
+ int err = 0;
+
+ if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
+ return 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &dd->flags);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return 0;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+
+ prev_req = dd->req;
+ dd->req = req;
+
+ ctx = ahash_request_ctx(req);
+
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ ctx->op, req->nbytes);
+
+ if (req != prev_req && ctx->digcnt)
+ /* request has changed - restore hash */
+ omap_sham_copy_hash(req, 0);
+
+ if (ctx->op == OP_UPDATE) {
+ err = omap_sham_update_req(dd);
+ if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+ /* no final() after finup() */
+ err = omap_sham_final_req(dd);
+ } else if (ctx->op == OP_FINAL) {
+ err = omap_sham_final_req(dd);
+ }
+
+ if (err != -EINPROGRESS) {
+ /* done_task will not finish it, so do it here */
+ omap_sham_finish_req(req, err);
+ tasklet_schedule(&dd->queue_task);
+ }
+
+ dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+ return err;
+}
+
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_dev *dd = tctx->dd;
+ unsigned long flags;
+ int err;
+
+ ctx->op = op;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ err = ahash_enqueue_request(&dd->queue, req);
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ omap_sham_handle_queue(dd);
+
+ return err;
+}
+
+static int omap_sham_update(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->offset = 0;
+
+ if (ctx->flags & FLAGS_FINUP) {
+ if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+ /*
+ * OMAP HW accel works only with buffers >= 9
+ * will switch to bypass in final()
+ * final has the same request and data
+ */
+ omap_sham_append_sg(ctx);
+ return 0;
+ } else if (ctx->bufcnt + ctx->total <= 64) {
+ ctx->flags |= FLAGS_CPU;
+ } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
+ /* may be can use faster functions */
+ int aligned = IS_ALIGNED((u32)ctx->sg->offset,
+ sizeof(u32));
+
+ if (aligned && (ctx->flags & FLAGS_FIRST))
+ /* digest: first and final */
+ ctx->flags |= FLAGS_FAST;
+
+ ctx->flags &= ~FLAGS_FIRST;
+ }
+ } else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
+ /* if not finaup -> not fast */
+ omap_sham_append_sg(ctx);
+ return 0;
+ }
+
+ return omap_sham_enqueue(req, OP_UPDATE);
+}
+
+static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(shash)];
+ } desc;
+
+ desc.shash.tfm = shash;
+ desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(&desc.shash, data, len, out);
+}
+
+static int omap_sham_final_shash(struct ahash_request *req)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+
+ return omap_sham_shash_digest(tctx->fallback, req->base.flags,
+ ctx->buffer, ctx->bufcnt, req->result);
+}
+
+static int omap_sham_final(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ /* OMAP HW accel works only with buffers >= 9 */
+ /* HMAC is always >= 9 because of ipad */
+ if ((ctx->digcnt + ctx->bufcnt) < 9)
+ err = omap_sham_final_shash(req);
+ else if (ctx->bufcnt)
+ return omap_sham_enqueue(req, OP_FINAL);
+
+ omap_sham_cleanup(req);
+
+ return err;
+}
+
+static int omap_sham_finup(struct ahash_request *req)
+{
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->flags |= FLAGS_FINUP;
+
+ err1 = omap_sham_update(req);
+ if (err1 == -EINPROGRESS)
+ return err1;
+ /*
+ * final() has to be always called to cleanup resources
+ * even if udpate() failed, except EINPROGRESS
+ */
+ err2 = omap_sham_final(req);
+
+ return err1 ?: err2;
+}
+
+static int omap_sham_digest(struct ahash_request *req)
+{
+ return omap_sham_init(req) ?: omap_sham_finup(req);
+}
+
+static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int err, i;
+ err = crypto_shash_setkey(tctx->fallback, key, keylen);
+ if (err)
+ return err;
+
+ if (keylen > bs) {
+ err = omap_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= 0x36;
+ bctx->opad[i] ^= 0x5c;
+ }
+
+ return err;
+}
+
+static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("omap-sham: fallback driver '%s' "
+ "could not be loaded.\n", alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct omap_sham_reqctx));
+
+ if (alg_base) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ tctx->flags |= FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ pr_err("omap-sham: base driver '%s' "
+ "could not be loaded.\n", alg_base);
+ crypto_free_shash(tctx->fallback);
+ return PTR_ERR(bctx->shash);
+ }
+
+ }
+
+ return 0;
+}
+
+static int omap_sham_cra_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, NULL);
+}
+
+static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "sha1");
+}
+
+static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return omap_sham_cra_init_alg(tfm, "md5");
+}
+
+static void omap_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+
+ if (tctx->flags & FLAGS_HMAC) {
+ struct omap_sham_hmac_ctx *bctx = tctx->base;
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static struct ahash_alg algs[] = {
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "omap-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "omap-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "omap-hmac-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_sha1_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+},
+{
+ .init = omap_sham_init,
+ .update = omap_sham_update,
+ .final = omap_sham_final,
+ .finup = omap_sham_finup,
+ .digest = omap_sham_digest,
+ .setkey = omap_sham_setkey,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "omap-hmac-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct omap_sham_ctx) +
+ sizeof(struct omap_sham_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = omap_sham_cra_md5_init,
+ .cra_exit = omap_sham_cra_exit,
+ }
+}
+};
+
+static void omap_sham_done_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct ahash_request *req = dd->req;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int ready = 1;
+
+ if (ctx->flags & FLAGS_OUTPUT_READY) {
+ ctx->flags &= ~FLAGS_OUTPUT_READY;
+ ready = 1;
+ }
+
+ if (dd->flags & FLAGS_DMA_ACTIVE) {
+ dd->flags &= ~FLAGS_DMA_ACTIVE;
+ omap_sham_update_dma_stop(dd);
+ omap_sham_update_dma_slow(dd);
+ }
+
+ if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
+ dev_dbg(dd->dev, "update done\n");
+ /* finish curent request */
+ omap_sham_finish_req(req, 0);
+ /* start new request */
+ omap_sham_handle_queue(dd);
+ }
+}
+
+static void omap_sham_queue_task(unsigned long data)
+{
+ struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+
+ omap_sham_handle_queue(dd);
+}
+
+static irqreturn_t omap_sham_irq(int irq, void *dev_id)
+{
+ struct omap_sham_dev *dd = dev_id;
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ if (!ctx) {
+ dev_err(dd->dev, "unknown interrupt.\n");
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(ctx->flags & FLAGS_FINAL))
+ /* final -> allow device to go to power-saving mode */
+ omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
+
+ omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
+ SHA_REG_CTRL_OUTPUT_READY);
+ omap_sham_read(dd, SHA_REG_CTRL);
+
+ ctx->flags |= FLAGS_OUTPUT_READY;
+ tasklet_schedule(&dd->done_task);
+
+ return IRQ_HANDLED;
+}
+
+static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct omap_sham_dev *dd = data;
+
+ if (likely(lch == dd->dma_lch))
+ tasklet_schedule(&dd->done_task);
+}
+
+static int omap_sham_dma_init(struct omap_sham_dev *dd)
+{
+ int err;
+
+ dd->dma_lch = -1;
+
+ err = omap_request_dma(dd->dma, dev_name(dd->dev),
+ omap_sham_dma_callback, dd, &dd->dma_lch);
+ if (err) {
+ dev_err(dd->dev, "Unable to request DMA channel\n");
+ return err;
+ }
+ omap_set_dma_dest_params(dd->dma_lch, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+ omap_set_dma_dest_burst_mode(dd->dma_lch,
+ OMAP_DMA_DATA_BURST_16);
+
+ return 0;
+}
+
+static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
+{
+ if (dd->dma_lch >= 0) {
+ omap_free_dma(dd->dma_lch);
+ dd->dma_lch = -1;
+ }
+}
+
+static int __devinit omap_sham_probe(struct platform_device *pdev)
+{
+ struct omap_sham_dev *dd;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int err, i, j;
+
+ dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ err = -ENOMEM;
+ goto data_err;
+ }
+ dd->dev = dev;
+ platform_set_drvdata(pdev, dd);
+
+ INIT_LIST_HEAD(&dd->list);
+ spin_lock_init(&dd->lock);
+ tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
+ crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
+
+ dd->irq = -1;
+
+ /* Get the base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->phys_base = res->start;
+
+ /* Get the DMA */
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "no DMA resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ dd->dma = res->start;
+
+ /* Get the IRQ */
+ dd->irq = platform_get_irq(pdev, 0);
+ if (dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = dd->irq;
+ goto res_err;
+ }
+
+ err = request_irq(dd->irq, omap_sham_irq,
+ IRQF_TRIGGER_LOW, dev_name(dev), dd);
+ if (err) {
+ dev_err(dev, "unable to request irq.\n");
+ goto res_err;
+ }
+
+ err = omap_sham_dma_init(dd);
+ if (err)
+ goto dma_err;
+
+ /* Initializing the clock */
+ dd->iclk = clk_get(dev, "ick");
+ if (!dd->iclk) {
+ dev_err(dev, "clock intialization failed.\n");
+ err = -ENODEV;
+ goto clk_err;
+ }
+
+ dd->io_base = ioremap(dd->phys_base, SZ_4K);
+ if (!dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto io_err;
+ }
+
+ clk_enable(dd->iclk);
+ dev_info(dev, "hw accel on OMAP rev %u.%u\n",
+ (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
+ omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
+ clk_disable(dd->iclk);
+
+ spin_lock(&sham.lock);
+ list_add_tail(&dd->list, &sham.dev_list);
+ spin_unlock(&sham.lock);
+
+ for (i = 0; i < ARRAY_SIZE(algs); i++) {
+ err = crypto_register_ahash(&algs[i]);
+ if (err)
+ goto err_algs;
+ }
+
+ return 0;
+
+err_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&algs[j]);
+ iounmap(dd->io_base);
+io_err:
+ clk_put(dd->iclk);
+clk_err:
+ omap_sham_dma_cleanup(dd);
+dma_err:
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+res_err:
+ kfree(dd);
+ dd = NULL;
+data_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int __devexit omap_sham_remove(struct platform_device *pdev)
+{
+ static struct omap_sham_dev *dd;
+ int i;
+
+ dd = platform_get_drvdata(pdev);
+ if (!dd)
+ return -ENODEV;
+ spin_lock(&sham.lock);
+ list_del(&dd->list);
+ spin_unlock(&sham.lock);
+ for (i = 0; i < ARRAY_SIZE(algs); i++)
+ crypto_unregister_ahash(&algs[i]);
+ tasklet_kill(&dd->done_task);
+ tasklet_kill(&dd->queue_task);
+ iounmap(dd->io_base);
+ clk_put(dd->iclk);
+ omap_sham_dma_cleanup(dd);
+ if (dd->irq >= 0)
+ free_irq(dd->irq, dd);
+ kfree(dd);
+ dd = NULL;
+
+ return 0;
+}
+
+static struct platform_driver omap_sham_driver = {
+ .probe = omap_sham_probe,
+ .remove = omap_sham_remove,
+ .driver = {
+ .name = "omap-sham",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_sham_mod_init(void)
+{
+ pr_info("loading %s driver\n", "omap-sham");
+
+ if (!cpu_class_is_omap2() ||
+ omap_type() != OMAP2_DEVICE_TYPE_SEC) {
+ pr_err("Unsupported cpu\n");
+ return -ENODEV;
+ }
+
+ return platform_driver_register(&omap_sham_driver);
+}
+
+static void __exit omap_sham_mod_exit(void)
+{
+ platform_driver_unregister(&omap_sham_driver);
+}
+
+module_init(omap_sham_mod_init);
+module_exit(omap_sham_mod_exit);
+
+MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Dmitry Kasatkin");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dc558a097311..637c105f53d2 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1,7 +1,7 @@
/*
* talitos - Freescale Integrated Security Engine (SEC) device driver
*
- * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
*
* Scatterlist Crypto API glue code copied from files with the following:
* Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
@@ -43,9 +43,12 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include "talitos.h"
@@ -65,6 +68,13 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
+static const struct talitos_ptr zero_entry = {
+ .len = 0,
+ .j_extent = 0,
+ .eptr = 0,
+ .ptr = 0
+};
+
/* descriptor */
struct talitos_desc {
__be32 hdr; /* header high bits */
@@ -146,6 +156,7 @@ struct talitos_private {
/* .features flag */
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
@@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev)
#define TALITOS_MAX_KEY_SIZE 64
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
struct talitos_ctx {
struct device *dev;
@@ -705,6 +716,23 @@ struct talitos_ctx {
unsigned int authsize;
};
+#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
+#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
+
+struct talitos_ahash_req_ctx {
+ u64 count;
+ u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+ unsigned int hw_context_size;
+ u8 buf[HASH_MAX_BLOCK_SIZE];
+ u8 bufnext[HASH_MAX_BLOCK_SIZE];
+ unsigned int swinit;
+ unsigned int first;
+ unsigned int last;
+ unsigned int to_hash_later;
+ struct scatterlist bufsl[2];
+ struct scatterlist *psrc;
+};
+
static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
@@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev,
else
dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- if (edesc->dst_is_chained)
- talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
- else
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst) {
+ if (edesc->dst_is_chained)
+ talitos_unmap_sg_chain(dev, dst,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(dev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ }
} else
if (edesc->src_is_chained)
talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
@@ -1114,12 +1146,67 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
return sg_nents;
}
+/**
+ * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ * @skip: The number of bytes to skip before copying.
+ * Note: skip + buflen should equal SG total size.
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, unsigned int skip)
+{
+ unsigned int offset = 0;
+ unsigned int boffset = 0;
+ struct sg_mapping_iter miter;
+ unsigned long flags;
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+ size_t total_buffer = buflen + skip;
+
+ sg_flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ local_irq_save(flags);
+
+ while (sg_miter_next(&miter) && offset < total_buffer) {
+ unsigned int len;
+ unsigned int ignore;
+
+ if ((offset + miter.length) > skip) {
+ if (offset < skip) {
+ /* Copy part of this segment */
+ ignore = skip - offset;
+ len = miter.length - ignore;
+ memcpy(buf + boffset, miter.addr + ignore, len);
+ } else {
+ /* Copy all of this segment */
+ len = miter.length;
+ memcpy(buf + boffset, miter.addr, len);
+ }
+ boffset += len;
+ }
+ offset += miter.length;
+ }
+
+ sg_miter_stop(&miter);
+
+ local_irq_restore(flags);
+ return boffset;
+}
+
/*
* allocate and map the extended descriptor
*/
static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct scatterlist *src,
struct scatterlist *dst,
+ int hash_result,
unsigned int cryptlen,
unsigned int authsize,
int icv_stashing,
@@ -1139,11 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_count(src, cryptlen + authsize, &src_chained);
src_nents = (src_nents == 1) ? 0 : src_nents;
- if (dst == src) {
- dst_nents = src_nents;
+ if (hash_result) {
+ dst_nents = 0;
} else {
- dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
- dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+ if (dst == src) {
+ dst_nents = src_nents;
+ } else {
+ dst_nents = sg_count(dst, cryptlen + authsize,
+ &dst_chained);
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+ }
}
/*
@@ -1172,8 +1264,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
edesc->src_is_chained = src_chained;
edesc->dst_is_chained = dst_chained;
edesc->dma_len = dma_len;
- edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ if (dma_len)
+ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
return edesc;
}
@@ -1184,7 +1278,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
areq->cryptlen, ctx->authsize, icv_stashing,
areq->base.flags);
}
@@ -1441,8 +1535,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
- 0, 0, areq->base.flags);
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
+ areq->nbytes, 0, 0, areq->base.flags);
}
static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1478,15 +1572,329 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
}
+static void common_nonsnoop_hash_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+
+ /* When using hashctx-in, must unmap it. */
+ if (edesc->desc.ptr[1].len)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+ DMA_TO_DEVICE);
+
+ if (edesc->desc.ptr[2].len)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
+ DMA_TO_DEVICE);
+
+ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
+
+ if (edesc->dma_len)
+ dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+
+}
+
+static void ahash_done(struct device *dev,
+ struct talitos_desc *desc, void *context,
+ int err)
+{
+ struct ahash_request *areq = context;
+ struct talitos_edesc *edesc =
+ container_of(desc, struct talitos_edesc, desc);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ if (!req_ctx->last && req_ctx->to_hash_later) {
+ /* Position any partial block for next update/final/finup */
+ memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+ }
+ common_nonsnoop_hash_unmap(dev, edesc, areq);
+
+ kfree(edesc);
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ struct ahash_request *areq, unsigned int length,
+ void (*callback) (struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct device *dev = ctx->dev;
+ struct talitos_desc *desc = &edesc->desc;
+ int sg_count, ret;
+
+ /* first DWORD empty */
+ desc->ptr[0] = zero_entry;
+
+ /* hash context in */
+ if (!req_ctx->first || req_ctx->swinit) {
+ map_single_talitos_ptr(dev, &desc->ptr[1],
+ req_ctx->hw_context_size,
+ (char *)req_ctx->hw_context, 0,
+ DMA_TO_DEVICE);
+ req_ctx->swinit = 0;
+ } else {
+ desc->ptr[1] = zero_entry;
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
+ }
+
+ /* HMAC key */
+ if (ctx->keylen)
+ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+ (char *)&ctx->key, 0, DMA_TO_DEVICE);
+ else
+ desc->ptr[2] = zero_entry;
+
+ /*
+ * data in
+ */
+ desc->ptr[3].len = cpu_to_be16(length);
+ desc->ptr[3].j_extent = 0;
+
+ sg_count = talitos_map_sg(dev, req_ctx->psrc,
+ edesc->src_nents ? : 1,
+ DMA_TO_DEVICE,
+ edesc->src_is_chained);
+
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
+ } else {
+ sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
+ &edesc->link_tbl[0]);
+ if (sg_count > 1) {
+ desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
+ dma_sync_single_for_device(ctx->dev,
+ edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+ } else {
+ /* Only one segment now, so no link tbl needed */
+ to_talitos_ptr(&desc->ptr[3],
+ sg_dma_address(req_ctx->psrc));
+ }
+ }
+
+ /* fifth DWORD empty */
+ desc->ptr[4] = zero_entry;
+
+ /* hash/HMAC out -or- hash context out */
+ if (req_ctx->last)
+ map_single_talitos_ptr(dev, &desc->ptr[5],
+ crypto_ahash_digestsize(tfm),
+ areq->result, 0, DMA_FROM_DEVICE);
+ else
+ map_single_talitos_ptr(dev, &desc->ptr[5],
+ req_ctx->hw_context_size,
+ req_ctx->hw_context, 0, DMA_FROM_DEVICE);
+
+ /* last DWORD empty */
+ desc->ptr[6] = zero_entry;
+
+ ret = talitos_submit(dev, desc, callback, areq);
+ if (ret != -EINPROGRESS) {
+ common_nonsnoop_hash_unmap(dev, edesc, areq);
+ kfree(edesc);
+ }
+ return ret;
+}
+
+static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+ unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
+ nbytes, 0, 0, areq->base.flags);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ /* Initialize the context */
+ req_ctx->count = 0;
+ req_ctx->first = 1; /* first indicates h/w must init its context */
+ req_ctx->swinit = 0; /* assume h/w init of context */
+ req_ctx->hw_context_size =
+ (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+ return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+ req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0);
+ req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1);
+ req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2);
+ req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3);
+ req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4);
+ req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5);
+ req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6);
+ req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7);
+
+ /* init 64-bit count */
+ req_ctx->hw_context[8] = 0;
+ req_ctx->hw_context[9] = 0;
+
+ return 0;
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int nbytes_to_hash;
+ unsigned int to_hash_later;
+ unsigned int index;
+ int chained;
+
+ index = req_ctx->count & (blocksize - 1);
+ req_ctx->count += nbytes;
+
+ if (!req_ctx->last && (index + nbytes) < blocksize) {
+ /* Buffer the partial block */
+ sg_copy_to_buffer(areq->src,
+ sg_count(areq->src, nbytes, &chained),
+ req_ctx->buf + index, nbytes);
+ return 0;
+ }
+
+ if (index) {
+ /* partial block from previous update; chain it in. */
+ sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1);
+ sg_set_buf(req_ctx->bufsl, req_ctx->buf, index);
+ if (nbytes)
+ scatterwalk_sg_chain(req_ctx->bufsl, 2,
+ areq->src);
+ req_ctx->psrc = req_ctx->bufsl;
+ } else {
+ req_ctx->psrc = areq->src;
+ }
+ nbytes_to_hash = index + nbytes;
+ if (!req_ctx->last) {
+ to_hash_later = (nbytes_to_hash & (blocksize - 1));
+ if (to_hash_later) {
+ int nents;
+ /* Must copy to_hash_later bytes from the end
+ * to bufnext (a partial block) for later.
+ */
+ nents = sg_count(areq->src, nbytes, &chained);
+ sg_copy_end_to_buffer(areq->src, nents,
+ req_ctx->bufnext,
+ to_hash_later,
+ nbytes - to_hash_later);
+
+ /* Adjust count for what will be hashed now */
+ nbytes_to_hash -= to_hash_later;
+ }
+ req_ctx->to_hash_later = to_hash_later;
+ }
+
+ /* allocate extended descriptor */
+ edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ edesc->desc.hdr = ctx->desc_hdr_template;
+
+ /* On last one, request SEC to pad; otherwise continue */
+ if (req_ctx->last)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+ else
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+ /* request SEC to INIT hash. */
+ if (req_ctx->first && !req_ctx->swinit)
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+ /* When the tfm context has a keylen, it's an HMAC.
+ * A first or last (ie. not middle) descriptor must request HMAC.
+ */
+ if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+ ahash_done);
+}
+
+static int ahash_update(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 0;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_final(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, 0);
+}
+
+static int ahash_finup(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_digest(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+ ahash->init(areq);
+ req_ctx->last = 1;
+
+ return ahash_process_req(areq, areq->nbytes);
+}
+
struct talitos_alg_template {
- struct crypto_alg alg;
+ u32 type;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
__be32 desc_hdr_template;
};
static struct talitos_alg_template driver_algs[] = {
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1511,8 +1919,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1538,8 +1946,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1564,8 +1972,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1591,8 +1999,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1617,8 +2025,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1645,8 +2053,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
/* ABLKCIPHER algorithms. */
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1667,8 +2075,8 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CBC,
},
- {
- .alg = {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1689,14 +2097,140 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_DEU |
DESC_HDR_MODE0_DEU_CBC |
DESC_HDR_MODE0_DEU_3DES,
- }
+ },
+ /* AHASH algorithms. */
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-talitos",
+ .cra_blocksize = MD5_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_MD5,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-talitos",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA1,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-talitos",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA224,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-talitos",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-talitos",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA384,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-talitos",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA512,
+ },
};
struct talitos_crypto_alg {
struct list_head entry;
struct device *dev;
- __be32 desc_hdr_template;
- struct crypto_alg crypto_alg;
+ struct talitos_alg_template algt;
};
static int talitos_cra_init(struct crypto_tfm *tfm)
@@ -1705,13 +2239,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg);
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
/* copy descriptor header template value */
- ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
+ ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+
+ return 0;
+}
+
+static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ talitos_cra_init(tfm);
/* random first IV */
get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
@@ -1719,6 +2268,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ talitos_cra_init(tfm);
+
+ ctx->keylen = 0;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct talitos_ahash_req_ctx));
+
+ return 0;
+}
+
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -1747,7 +2309,15 @@ static int talitos_remove(struct of_device *ofdev)
int i;
list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_alg(&t_alg->algt.alg.crypto);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&t_alg->algt.alg.hash);
+ break;
+ }
list_del(&t_alg->entry);
kfree(t_alg);
}
@@ -1781,6 +2351,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
struct talitos_alg_template
*template)
{
+ struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_crypto_alg *t_alg;
struct crypto_alg *alg;
@@ -1788,16 +2359,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
if (!t_alg)
return ERR_PTR(-ENOMEM);
- alg = &t_alg->crypto_alg;
- *alg = template->alg;
+ t_alg->algt = *template;
+
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg = &t_alg->algt.alg.crypto;
+ alg->cra_init = talitos_cra_init;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg = &t_alg->algt.alg.crypto;
+ alg->cra_init = talitos_cra_init_aead;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ alg = &t_alg->algt.alg.hash.halg.base;
+ alg->cra_init = talitos_cra_init_ahash;
+ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+ !strcmp(alg->cra_name, "sha224")) {
+ t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+ t_alg->algt.desc_hdr_template =
+ DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256;
+ }
+ break;
+ }
alg->cra_module = THIS_MODULE;
- alg->cra_init = talitos_cra_init;
alg->cra_priority = TALITOS_CRA_PRIORITY;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
- t_alg->desc_hdr_template = template->desc_hdr_template;
t_alg->dev = dev;
return t_alg;
@@ -1807,7 +2398,7 @@ static int talitos_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct device *dev = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct talitos_private *priv;
const unsigned int *prop;
int i, err;
@@ -1877,7 +2468,8 @@ static int talitos_probe(struct of_device *ofdev,
priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
if (of_device_is_compatible(np, "fsl,sec2.1"))
- priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
+ priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
+ TALITOS_FTR_SHA224_HWINIT;
priv->chan = kzalloc(sizeof(struct talitos_channel) *
priv->num_channels, GFP_KERNEL);
@@ -1931,6 +2523,7 @@ static int talitos_probe(struct of_device *ofdev,
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
+ char *name = NULL;
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
@@ -1938,15 +2531,27 @@ static int talitos_probe(struct of_device *ofdev,
goto err_out;
}
- err = crypto_register_alg(&t_alg->crypto_alg);
+ switch (t_alg->algt.type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_AEAD:
+ err = crypto_register_alg(
+ &t_alg->algt.alg.crypto);
+ name = t_alg->algt.alg.crypto.cra_driver_name;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ err = crypto_register_ahash(
+ &t_alg->algt.alg.hash);
+ name =
+ t_alg->algt.alg.hash.halg.base.cra_driver_name;
+ break;
+ }
if (err) {
dev_err(dev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
+ name);
kfree(t_alg);
} else {
list_add_tail(&t_alg->entry, &priv->alg_list);
- dev_info(dev, "%s\n",
- t_alg->crypto_alg.cra_driver_name);
+ dev_info(dev, "%s\n", name);
}
}
}
@@ -1968,8 +2573,11 @@ static const struct of_device_id talitos_match[] = {
MODULE_DEVICE_TABLE(of, talitos_match);
static struct of_platform_driver talitos_driver = {
- .name = "talitos",
- .match_table = talitos_match,
+ .driver = {
+ .name = "talitos",
+ .owner = THIS_MODULE,
+ .of_match_table = talitos_match,
+ },
.probe = talitos_probe,
.remove = talitos_remove,
};
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index ff5a1450e145..0b746aca4587 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
- * Copyright (c) 2006-2008 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -130,6 +130,9 @@
#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/
#define TALITOS_CRCUISR_LO 0xf034
+#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28
+#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48
+
/*
* talitos descriptor header (hdr) bits
*/
@@ -157,12 +160,16 @@
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
#define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000)
+#define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000)
#define DESC_HDR_MODE0_MDEU_INIT cpu_to_be32(0x01000000)
#define DESC_HDR_MODE0_MDEU_HMAC cpu_to_be32(0x00800000)
#define DESC_HDR_MODE0_MDEU_PAD cpu_to_be32(0x00400000)
+#define DESC_HDR_MODE0_MDEU_SHA224 cpu_to_be32(0x00300000)
#define DESC_HDR_MODE0_MDEU_MD5 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_SHA256 cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_MDEU_SHA1 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE0_MDEUB_SHA384 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE0_MDEUB_SHA512 cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_MDEU_MD5_HMAC (DESC_HDR_MODE0_MDEU_MD5 | \
DESC_HDR_MODE0_MDEU_HMAC)
#define DESC_HDR_MODE0_MDEU_SHA256_HMAC (DESC_HDR_MODE0_MDEU_SHA256 | \
@@ -181,9 +188,12 @@
#define DESC_HDR_MODE1_MDEU_INIT cpu_to_be32(0x00001000)
#define DESC_HDR_MODE1_MDEU_HMAC cpu_to_be32(0x00000800)
#define DESC_HDR_MODE1_MDEU_PAD cpu_to_be32(0x00000400)
+#define DESC_HDR_MODE1_MDEU_SHA224 cpu_to_be32(0x00000300)
#define DESC_HDR_MODE1_MDEU_MD5 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_SHA256 cpu_to_be32(0x00000100)
#define DESC_HDR_MODE1_MDEU_SHA1 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE1_MDEUB_SHA384 cpu_to_be32(0x00000000)
+#define DESC_HDR_MODE1_MDEUB_SHA512 cpu_to_be32(0x00000200)
#define DESC_HDR_MODE1_MDEU_MD5_HMAC (DESC_HDR_MODE1_MDEU_MD5 | \
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA256_HMAC (DESC_HDR_MODE1_MDEU_SHA256 | \
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c27f80e5d531..9e01e96fee94 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -141,6 +141,13 @@ config COH901318
help
Enable support for ST-Ericsson COH 901 318 DMA.
+config STE_DMA40
+ bool "ST-Ericsson DMA40 support"
+ depends on ARCH_U8500
+ select DMA_ENGINE
+ help
+ Support for ST-Ericsson DMA40 controller
+
config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
@@ -149,9 +156,25 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config TIMB_DMA
+ tristate "Timberdale FPGA DMA support"
+ depends on MFD_TIMBERDALE || HAS_IOMEM
+ select DMA_ENGINE
+ help
+ Enable support for the Timberdale FPGA DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
+config PL330_DMA
+ tristate "DMA API Driver for PL330"
+ select DMA_ENGINE
+ depends on PL330
+ help
+ Select if your platform has one or more PL330 DMACs.
+ You need to provide platform specific settings via
+ platform_data for a dma-pl330 device.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 22bba3d5e2b6..0fe5ebbfda5d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -20,3 +20,6 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 278cf5bceef2..bd5250e8c00c 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -760,13 +760,18 @@ err_desc_get:
return NULL;
}
-static void atc_terminate_all(struct dma_chan *chan)
+static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -790,32 +795,30 @@ static void atc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc);
+
+ return 0;
}
/**
- * atc_is_tx_complete - poll for transaction completion
+ * atc_tx_status - poll for transaction completion
* @chan: DMA channel
* @cookie: transaction identifier to check status of
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
+ * @txstate: if not %NULL updated with transaction state
*
- * If @done and @used are passed in, upon return they reflect the driver
+ * If @txstate is passed in, upon return it reflect the driver
* internal state and can be used with dma_async_is_complete() to check
* the status of multiple cookies without re-checking hardware state.
*/
static enum dma_status
-atc_is_tx_complete(struct dma_chan *chan,
+atc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status ret;
- dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
- cookie, done ? *done : 0, used ? *used : 0);
-
spin_lock_bh(&atchan->lock);
last_complete = atchan->completed_cookie;
@@ -833,10 +836,10 @@ atc_is_tx_complete(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
+ cookie, last_complete ? last_complete : 0,
+ last_used ? last_used : 0);
return ret;
}
@@ -1082,7 +1085,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* set base routines */
atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
- atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
+ atdma->dma_common.device_tx_status = atc_tx_status;
atdma->dma_common.device_issue_pending = atc_issue_pending;
atdma->dma_common.dev = &pdev->dev;
@@ -1092,7 +1095,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
- atdma->dma_common.device_terminate_all = atc_terminate_all;
+ atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 1656fdcdb6c2..a724e6be1b4d 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -37,7 +37,7 @@ struct coh901318_desc {
struct list_head node;
struct scatterlist *sg;
unsigned int sg_len;
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
};
@@ -283,7 +283,7 @@ static int coh901318_start(struct coh901318_chan *cohc)
}
static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
- struct coh901318_lli *data)
+ struct coh901318_lli *lli)
{
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
@@ -292,18 +292,18 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
COH901318_CX_STAT_SPACING*channel) &
COH901318_CX_STAT_ACTIVE);
- writel(data->src_addr,
+ writel(lli->src_addr,
virtbase + COH901318_CX_SRC_ADDR +
COH901318_CX_SRC_ADDR_SPACING * channel);
- writel(data->dst_addr, virtbase +
+ writel(lli->dst_addr, virtbase +
COH901318_CX_DST_ADDR +
COH901318_CX_DST_ADDR_SPACING * channel);
- writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
+ writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
COH901318_CX_LNK_ADDR_SPACING * channel);
- writel(data->control, virtbase + COH901318_CX_CTRL +
+ writel(lli->control, virtbase + COH901318_CX_CTRL +
COH901318_CX_CTRL_SPACING * channel);
return 0;
@@ -408,33 +408,107 @@ coh901318_first_queued(struct coh901318_chan *cohc)
return d;
}
+static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
+{
+ struct coh901318_lli *lli = in_lli;
+ u32 bytes = 0;
+
+ while (lli) {
+ bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
+ lli = lli->virt_link_addr;
+ }
+ return bytes;
+}
+
/*
- * DMA start/stop controls
+ * Get the number of bytes left to transfer on this channel,
+ * it is unwise to call this before stopping the channel for
+ * absolute measures, but for a rough guess you can still call
+ * it.
*/
-u32 coh901318_get_bytes_left(struct dma_chan *chan)
+static u32 coh901318_get_bytes_left(struct dma_chan *chan)
{
- unsigned long flags;
- u32 ret;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_desc *cohd;
+ struct list_head *pos;
+ unsigned long flags;
+ u32 left = 0;
+ int i = 0;
spin_lock_irqsave(&cohc->lock, flags);
- /* Read transfer count value */
- ret = readl(cohc->base->virtbase +
- COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
- cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
+ /*
+ * If there are many queued jobs, we iterate and add the
+ * size of them all. We take a special look on the first
+ * job though, since it is probably active.
+ */
+ list_for_each(pos, &cohc->active) {
+ /*
+ * The first job in the list will be working on the
+ * hardware. The job can be stopped but still active,
+ * so that the transfer counter is somewhere inside
+ * the buffer.
+ */
+ cohd = list_entry(pos, struct coh901318_desc, node);
+
+ if (i == 0) {
+ struct coh901318_lli *lli;
+ dma_addr_t ladd;
+
+ /* Read current transfer count value */
+ left = readl(cohc->base->virtbase +
+ COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * cohc->id) &
+ COH901318_CX_CTRL_TC_VALUE_MASK;
+
+ /* See if the transfer is linked... */
+ ladd = readl(cohc->base->virtbase +
+ COH901318_CX_LNK_ADDR +
+ COH901318_CX_LNK_ADDR_SPACING *
+ cohc->id) &
+ ~COH901318_CX_LNK_LINK_IMMEDIATE;
+ /* Single transaction */
+ if (!ladd)
+ continue;
+
+ /*
+ * Linked transaction, follow the lli, find the
+ * currently processing lli, and proceed to the next
+ */
+ lli = cohd->lli;
+ while (lli && lli->link_addr != ladd)
+ lli = lli->virt_link_addr;
+
+ if (lli)
+ lli = lli->virt_link_addr;
+
+ /*
+ * Follow remaining lli links around to count the total
+ * number of bytes left
+ */
+ left += coh901318_get_bytes_in_lli(lli);
+ } else {
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
+ i++;
+ }
+
+ /* Also count bytes in the queued jobs */
+ list_for_each(pos, &cohc->queue) {
+ cohd = list_entry(pos, struct coh901318_desc, node);
+ left += coh901318_get_bytes_in_lli(cohd->lli);
+ }
spin_unlock_irqrestore(&cohc->lock, flags);
- return ret;
+ return left;
}
-EXPORT_SYMBOL(coh901318_get_bytes_left);
-
-/* Stops a transfer without losing data. Enables power save.
- Use this function in conjunction with coh901318_continue(..)
-*/
-void coh901318_stop(struct dma_chan *chan)
+/*
+ * Pauses a transfer without losing data. Enables power save.
+ * Use this function in conjunction with coh901318_resume.
+ */
+static void coh901318_pause(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -475,12 +549,11 @@ void coh901318_stop(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_stop);
-/* Continues a transfer that has been stopped via 300_dma_stop(..).
+/* Resumes a transfer that has been stopped via 300_dma_stop(..).
Power save is handled.
*/
-void coh901318_continue(struct dma_chan *chan)
+static void coh901318_resume(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
@@ -506,7 +579,6 @@ void coh901318_continue(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
}
-EXPORT_SYMBOL(coh901318_continue);
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
@@ -565,29 +637,30 @@ static int coh901318_config(struct coh901318_chan *cohc,
*/
static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
{
- struct coh901318_desc *cohd_que;
+ struct coh901318_desc *cohd;
- /* start queued jobs, if any
+ /*
+ * start queued jobs, if any
* TODO: transmit all queued jobs in one go
*/
- cohd_que = coh901318_first_queued(cohc);
+ cohd = coh901318_first_queued(cohc);
- if (cohd_que != NULL) {
+ if (cohd != NULL) {
/* Remove from queue */
- coh901318_desc_remove(cohd_que);
+ coh901318_desc_remove(cohd);
/* initiate DMA job */
cohc->busy = 1;
- coh901318_desc_submit(cohc, cohd_que);
+ coh901318_desc_submit(cohc, cohd);
- coh901318_prep_linked_list(cohc, cohd_que->data);
+ coh901318_prep_linked_list(cohc, cohd->lli);
- /* start dma job */
+ /* start dma job on this channel */
coh901318_start(cohc);
}
- return cohd_que;
+ return cohd;
}
/*
@@ -622,7 +695,7 @@ static void dma_tasklet(unsigned long data)
cohc->completed = cohd_fin->desc.cookie;
/* release the lli allocation and remove the descriptor */
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd_fin);
@@ -666,23 +739,44 @@ static void dma_tasklet(unsigned long data)
/* called from interrupt context */
static void dma_tc_handle(struct coh901318_chan *cohc)
{
- BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
- list_empty(&cohc->queue)));
-
- if (!cohc->allocated)
+ /*
+ * If the channel is not allocated, then we shouldn't have
+ * any TC interrupts on it.
+ */
+ if (!cohc->allocated) {
+ dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
+ "unallocated channel\n");
return;
+ }
spin_lock(&cohc->lock);
+ /*
+ * When we reach this point, at least one queue item
+ * should have been moved over from cohc->queue to
+ * cohc->active and run to completion, that is why we're
+ * getting a terminal count interrupt is it not?
+ * If you get this BUG() the most probable cause is that
+ * the individual nodes in the lli chain have IRQ enabled,
+ * so check your platform config for lli chain ctrl.
+ */
+ BUG_ON(list_empty(&cohc->active));
+
cohc->nbr_active_done++;
+ /*
+ * This attempt to take a job from cohc->queue, put it
+ * into cohc->active and start it.
+ */
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
- BUG_ON(list_empty(&cohc->active));
-
spin_unlock(&cohc->lock);
+ /*
+ * This tasklet will remove items from cohc->active
+ * and thus terminates them.
+ */
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
@@ -809,6 +903,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
static int coh901318_alloc_chan_resources(struct dma_chan *chan)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ unsigned long flags;
dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
__func__, cohc->id);
@@ -816,11 +911,15 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
if (chan->client_count > 1)
return -EBUSY;
+ spin_lock_irqsave(&cohc->lock, flags);
+
coh901318_config(cohc, NULL);
cohc->allocated = 1;
cohc->completed = chan->cookie = 1;
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
return 1;
}
@@ -843,7 +942,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags);
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
}
@@ -870,7 +969,7 @@ static struct dma_async_tx_descriptor *
coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t size, unsigned long flags)
{
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
unsigned long flg;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -892,23 +991,23 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
lli_len++;
- data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
- if (data == NULL)
+ if (lli == NULL)
goto err;
ret = coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
+ &cohc->base->pool, lli, src, size, dest,
cohc_chan_param(cohc)->ctrl_lli_chained,
ctrl_last);
if (ret)
goto err;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
- cohd->data = data;
+ cohd->lli = lli;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
@@ -926,7 +1025,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
- struct coh901318_lli *data;
+ struct coh901318_lli *lli;
struct coh901318_desc *cohd;
const struct coh901318_params *params;
struct scatterlist *sg;
@@ -999,13 +1098,13 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
pr_debug("Allocate %d lli:s for this transfer\n", len);
- data = coh901318_lli_alloc(&cohc->base->pool, len);
+ lli = coh901318_lli_alloc(&cohc->base->pool, len);
- if (data == NULL)
+ if (lli == NULL)
goto err_dma_alloc;
- /* initiate allocated data list */
- ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ /* initiate allocated lli list */
+ ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc_dev_addr(cohc),
ctrl_chained,
ctrl,
@@ -1014,14 +1113,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- COH_DBG(coh901318_list_print(cohc, data));
+ COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
- cohd->data = data;
+ cohd->lli = lli;
spin_unlock_irqrestore(&cohc->lock, flg);
@@ -1035,9 +1134,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
static enum dma_status
-coh901318_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done,
- dma_cookie_t *used)
+coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_cookie_t last_used;
@@ -1049,10 +1147,10 @@ coh901318_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used,
+ coh901318_get_bytes_left(chan));
+ if (ret == DMA_IN_PROGRESS && cohc->stopped)
+ ret = DMA_PAUSED;
return ret;
}
@@ -1065,23 +1163,42 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&cohc->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ /*
+ * Busy means that pending jobs are already being processed,
+ * and then there is no point in starting the queue: the
+ * terminal count interrupt on the channel will take the next
+ * job on the queue and execute it anyway.
+ */
if (!cohc->busy)
coh901318_queue_start(cohc);
spin_unlock_irqrestore(&cohc->lock, flags);
}
-static void
-coh901318_terminate_all(struct dma_chan *chan)
+static int
+coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase;
- coh901318_stop(chan);
+ if (cmd == DMA_PAUSE) {
+ coh901318_pause(chan);
+ return 0;
+ }
+
+ if (cmd == DMA_RESUME) {
+ coh901318_resume(chan);
+ return 0;
+ }
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* The remainder of this function terminates the transfer */
+ coh901318_pause(chan);
spin_lock_irqsave(&cohc->lock, flags);
/* Clear any pending BE or TC interrupt */
@@ -1099,7 +1216,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_active_get(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1108,7 +1225,7 @@ coh901318_terminate_all(struct dma_chan *chan)
while ((cohd = coh901318_first_queued(cohc))) {
/* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd->data);
+ coh901318_lli_free(&cohc->base->pool, &cohd->lli);
/* return desc to free-list */
coh901318_desc_remove(cohd);
@@ -1120,6 +1237,8 @@ coh901318_terminate_all(struct dma_chan *chan)
cohc->busy = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return 0;
}
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
@@ -1235,9 +1354,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
- base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_terminate_all = coh901318_terminate_all;
+ base->dma_slave.device_control = coh901318_control;
base->dma_slave.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_slave);
@@ -1255,9 +1374,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
- base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+ base->dma_memcpy.device_control = coh901318_control;
base->dma_memcpy.dev = &pdev->dev;
/*
* This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d18b5d069d7e..9d31d5eb95c1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -515,7 +515,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
break;
if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
- chan->private = NULL;
chan = NULL;
}
}
@@ -537,7 +536,6 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
- chan->private = NULL;
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);
@@ -695,11 +693,11 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_terminate_all);
+ !device->device_control);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
- BUG_ON(!device->device_is_tx_complete);
+ BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
@@ -978,7 +976,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
tx->chan = chan;
+ #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock);
+ #endif
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
@@ -1011,7 +1011,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
*/
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{
- struct dma_async_tx_descriptor *dep = tx->next;
+ struct dma_async_tx_descriptor *dep = txd_next(tx);
struct dma_async_tx_descriptor *dep_next;
struct dma_chan *chan;
@@ -1019,7 +1019,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
return;
/* we'll submit tx->next now, so clear the link */
- tx->next = NULL;
+ txd_clear_next(tx);
chan = dep->chan;
/* keep submitting up until a channel switch is detected
@@ -1027,14 +1027,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
* processing the interrupt from async_tx_channel_switch
*/
for (; dep; dep = dep_next) {
- spin_lock_bh(&dep->lock);
- dep->parent = NULL;
- dep_next = dep->next;
+ txd_lock(dep);
+ txd_clear_parent(dep);
+ dep_next = txd_next(dep);
if (dep_next && dep_next->chan == chan)
- dep->next = NULL; /* ->next will be submitted */
+ txd_clear_next(dep); /* ->next will be submitted */
else
dep_next = NULL; /* submit current dep and terminate */
- spin_unlock_bh(&dep->lock);
+ txd_unlock(dep);
dep->tx_submit(dep);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d28369f7afd2..a3991ab0d67e 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -781,13 +781,18 @@ err_desc_get:
return NULL;
}
-static void dwc_terminate_all(struct dma_chan *chan)
+static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
@@ -810,12 +815,14 @@ static void dwc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc);
+
+ return 0;
}
static enum dma_status
-dwc_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+dwc_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
dma_cookie_t last_used;
@@ -835,10 +842,7 @@ dwc_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1338,9 +1342,9 @@ static int __init dw_probe(struct platform_device *pdev)
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
- dw->dma.device_terminate_all = dwc_terminate_all;
+ dw->dma.device_control = dwc_control;
- dw->dma.device_is_tx_complete = dwc_is_tx_complete;
+ dw->dma.device_tx_status = dwc_tx_status;
dw->dma.device_issue_pending = dwc_issue_pending;
dma_writel(dw, CFG, DW_CFG_DMA_EN);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 88f470f0d820..8088b14ba5f7 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -775,13 +775,18 @@ fail:
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
+static int fsl_dma_device_control(struct dma_chan *dchan,
+ enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct fsldma_chan *chan;
unsigned long flags;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!dchan)
- return;
+ return -EINVAL;
chan = to_fsl_chan(dchan);
@@ -795,6 +800,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+ return 0;
}
/**
@@ -965,13 +972,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
}
/**
- * fsl_dma_is_complete - Determine the DMA status
+ * fsl_tx_status - Determine the DMA status
* @chan : Freescale DMA channel
*/
-static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
+static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used;
@@ -982,11 +988,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
last_used = dchan->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1313,7 +1315,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
INIT_LIST_HEAD(&fdev->common.channels);
/* ioremap the registers for use */
- fdev->regs = of_iomap(op->node, 0);
+ fdev->regs = of_iomap(op->dev.of_node, 0);
if (!fdev->regs) {
dev_err(&op->dev, "unable to ioremap registers\n");
err = -ENOMEM;
@@ -1321,7 +1323,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
}
/* map the channel IRQ if it exists, but don't hookup the handler yet */
- fdev->irq = irq_of_parse_and_map(op->node, 0);
+ fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1330,10 +1332,10 @@ static int __devinit fsldma_of_probe(struct of_device *op,
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
- fdev->common.device_is_tx_complete = fsl_dma_is_complete;
+ fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
- fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
+ fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev;
dev_set_drvdata(&op->dev, fdev);
@@ -1343,7 +1345,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
* of_platform_bus_remove(). Instead, we manually instantiate every DMA
* channel object.
*/
- for_each_child_of_node(op->node, child) {
+ for_each_child_of_node(op->dev.of_node, child) {
if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
@@ -1409,10 +1411,13 @@ static const struct of_device_id fsldma_of_ids[] = {
};
static struct of_platform_driver fsldma_of_driver = {
- .name = "fsl-elo-dma",
- .match_table = fsldma_of_ids,
- .probe = fsldma_of_probe,
- .remove = fsldma_of_remove,
+ .driver = {
+ .name = "fsl-elo-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = fsldma_of_ids,
+ },
+ .probe = fsldma_of_probe,
+ .remove = fsldma_of_remove,
};
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 3e5a8005c62b..c9213ead4a26 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data)
}
enum dma_status
-ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
device->cleanup_fn((unsigned long) c);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
@@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (tmo == 0 ||
- dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+ dma->device_tx_status(dma_chan, cookie, NULL)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 86b97ac8774e..6d3a73b57e54 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -96,6 +96,7 @@ struct ioat_chan_common {
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
+ #define IOAT_RESHAPE_PENDING 4
struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
@@ -142,15 +143,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
}
/**
- * ioat_is_complete - poll the status of an ioat transaction
+ * ioat_tx_status - poll the status of an ioat transaction
* @c: channel handle
* @cookie: transaction identifier
- * @done: if set, updated with last completed transaction
- * @used: if set, updated with last used transaction
+ * @txstate: if set, updated with the transaction state
*/
static inline enum dma_status
-ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
dma_cookie_t last_used;
@@ -159,10 +159,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
last_used = c->cookie;
last_complete = chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -338,8 +335,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used);
+enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index b5ae56c211e6..3c8b32a83794 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -56,8 +56,6 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head;
- /* make descriptor updates globally visible before notifying channel */
- wmb();
writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
@@ -69,9 +67,9 @@ void ioat2_issue_pending(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
if (ioat2_ring_pending(ioat)) {
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&ioat->prep_lock);
__ioat2_issue_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
}
}
@@ -80,7 +78,7 @@ void ioat2_issue_pending(struct dma_chan *c)
* @ioat: ioat2+ channel
*
* Check if the number of unsubmitted descriptors has exceeded the
- * watermark. Called with ring_lock held
+ * watermark. Called with prep_lock held
*/
static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{
@@ -92,7 +90,6 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
- int idx;
if (ioat2_ring_space(ioat) < 1) {
dev_err(to_dev(&ioat->base),
@@ -102,8 +99,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
- idx = ioat2_desc_alloc(ioat, 1);
- desc = ioat2_get_ring_ent(ioat, idx);
+ desc = ioat2_get_ring_ent(ioat, ioat->head);
hw = desc->hw;
hw->ctl = 0;
@@ -117,14 +113,16 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
async_tx_ack(&desc->txd);
ioat2_set_chainaddr(ioat, desc->txd.phys);
dump_desc_dbg(ioat, desc);
+ wmb();
+ ioat->head += 1;
__ioat2_issue_pending(ioat);
}
static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&ioat->prep_lock);
__ioat2_start_null_desc(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
}
static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
@@ -134,15 +132,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
struct ioat_ring_ent *desc;
bool seen_current = false;
u16 active;
- int i;
+ int idx = ioat->tail, i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
- prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
- desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ smp_read_barrier_depends();
+ prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
+ desc = ioat2_get_ring_ent(ioat, idx + i);
tx = &desc->txd;
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
@@ -158,11 +157,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
if (tx->phys == phys_complete)
seen_current = true;
}
- ioat->tail += i;
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- if (ioat->head == ioat->tail) {
+ if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
@@ -179,24 +179,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- prefetch(chan->completion);
-
- if (!spin_trylock_bh(&chan->cleanup_lock))
- return;
-
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- if (!spin_trylock_bh(&ioat->ring_lock)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -287,12 +272,10 @@ void ioat2_timer_event(unsigned long data)
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- spin_lock_bh(&chan->cleanup_lock);
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete;
u64 status;
- spin_lock_bh(&ioat->ring_lock);
status = ioat_chansts(chan);
/* when halted due to errors check for channel
@@ -311,26 +294,31 @@ void ioat2_timer_event(unsigned long data)
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
- if (ioat_cleanup_preamble(chan, &phys_complete))
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete)) {
__cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
ioat2_restart_channel(ioat);
- else {
+ spin_unlock_bh(&ioat->prep_lock);
+ } else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
} else {
u16 active;
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
active = ioat2_ring_active(ioat);
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
/* keep shrinking until we get back to our minimum
* default size
@@ -338,7 +326,6 @@ void ioat2_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
- spin_unlock_bh(&chan->cleanup_lock);
}
static int ioat2_reset_hw(struct ioat_chan_common *chan)
@@ -392,7 +379,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
ioat_init_channel(device, &ioat->base, i);
ioat->xfercap_log = xfercap_log;
- spin_lock_init(&ioat->ring_lock);
+ spin_lock_init(&ioat->prep_lock);
if (device->reset_hw(&ioat->base)) {
i = 0;
break;
@@ -418,8 +405,17 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ /* make descriptor updates visible before advancing ioat->head,
+ * this is purposefully not smp_wmb() since we are also
+ * publishing the descriptor updates to a dma device
+ */
+ wmb();
+
+ ioat->head += ioat->produce;
+
ioat2_update_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
return cookie;
}
@@ -531,13 +527,15 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
if (!ring)
return -ENOMEM;
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
ioat->ring = ring;
ioat->head = 0;
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
@@ -553,7 +551,7 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
*/
struct ioat_chan_common *chan = &ioat->base;
struct dma_chan *c = &chan->common;
- const u16 curr_size = ioat2_ring_mask(ioat) + 1;
+ const u16 curr_size = ioat2_ring_size(ioat);
const u16 active = ioat2_ring_active(ioat);
const u16 new_size = 1 << order;
struct ioat_ring_ent **ring;
@@ -653,54 +651,61 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
}
/**
- * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
- * @idx: gets starting descriptor index on successful allocation
+ * ioat2_check_space_lock - verify space and grab ring producer lock
* @ioat: ioat2,3 channel (ring) to operate on
* @num_descs: allocation length
*/
-int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
+int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
{
struct ioat_chan_common *chan = &ioat->base;
+ bool retry;
- spin_lock_bh(&ioat->ring_lock);
+ retry:
+ spin_lock_bh(&ioat->prep_lock);
/* never allow the last descriptor to be consumed, we need at
* least one free at all times to allow for on-the-fly ring
* resizing.
*/
- while (unlikely(ioat2_ring_space(ioat) <= num_descs)) {
- if (reshape_ring(ioat, ioat->alloc_order + 1) &&
- ioat2_ring_space(ioat) > num_descs)
- break;
-
- if (printk_ratelimit())
- dev_dbg(to_dev(chan),
- "%s: ring full! num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail,
- ioat->issued);
- spin_unlock_bh(&ioat->ring_lock);
-
- /* progress reclaim in the allocation failure case we
- * may be called under bh_disabled so we need to trigger
- * the timer event directly
- */
- spin_lock_bh(&chan->cleanup_lock);
- if (jiffies > chan->timer.expires &&
- timer_pending(&chan->timer)) {
- struct ioatdma_device *device = chan->device;
-
- mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
- spin_unlock_bh(&chan->cleanup_lock);
- device->timer_fn((unsigned long) &chan->common);
- } else
- spin_unlock_bh(&chan->cleanup_lock);
- return -ENOMEM;
+ if (likely(ioat2_ring_space(ioat) > num_descs)) {
+ dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+ ioat->produce = num_descs;
+ return 0; /* with ioat->prep_lock held */
}
+ retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
- dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
- __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+ /* is another cpu already trying to expand the ring? */
+ if (retry)
+ goto retry;
- *idx = ioat2_desc_alloc(ioat, num_descs);
- return 0; /* with ioat->ring_lock held */
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
+ retry = reshape_ring(ioat, ioat->alloc_order + 1);
+ clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+
+ /* if we were able to expand the ring retry the allocation */
+ if (retry)
+ goto retry;
+
+ if (printk_ratelimit())
+ dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+ __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
+
+ /* progress reclaim in the allocation failure case we may be
+ * called under bh_disabled so we need to trigger the timer
+ * event directly
+ */
+ if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
+ struct ioatdma_device *device = chan->device;
+
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ device->timer_fn((unsigned long) &chan->common);
+ }
+
+ return -ENOMEM;
}
struct dma_async_tx_descriptor *
@@ -713,14 +718,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dst = dma_dest;
dma_addr_t src = dma_src;
size_t total_len = len;
- int num_descs;
- u16 idx;
- int i;
+ int num_descs, idx, i;
num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -777,7 +779,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
device->cleanup_fn((unsigned long) c);
device->reset_hw(chan);
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
descs = ioat2_ring_space(ioat);
dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
for (i = 0; i < descs; i++) {
@@ -800,7 +803,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
ioat->alloc_order = 0;
pci_pool_free(device->completion_pool, chan->completion,
chan->completion_dma);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
chan->last_completion = 0;
chan->completion_dma = 0;
@@ -855,7 +859,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_tx_status;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index ef2871fd7868..a2c413b2b8d8 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -22,6 +22,7 @@
#define IOATDMA_V2_H
#include <linux/dmaengine.h>
+#include <linux/circ_buf.h>
#include "dma.h"
#include "hw.h"
@@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order;
* @tail: cleanup index
* @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors
+ * @produce: number of descriptors to produce at submit time
* @ring: software ring buffer implementation of hardware ring
- * @ring_lock: protects ring attributes
+ * @prep_lock: serializes descriptor preparation (producers)
*/
struct ioat2_dma_chan {
struct ioat_chan_common base;
@@ -60,8 +62,9 @@ struct ioat2_dma_chan {
u16 tail;
u16 dmacount;
u16 alloc_order;
+ u16 produce;
struct ioat_ring_ent **ring;
- spinlock_t ring_lock;
+ spinlock_t prep_lock;
};
static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
@@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
return container_of(chan, struct ioat2_dma_chan, base);
}
-static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat)
+static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
{
- return (1 << ioat->alloc_order) - 1;
+ return 1 << ioat->alloc_order;
}
/* count of descriptors in flight with the engine */
static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
{
- return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat);
+ return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
}
/* count of descriptors pending submission to hardware */
static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
{
- return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat);
+ return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
}
static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{
- u16 num_descs = ioat2_ring_mask(ioat) + 1;
- u16 active = ioat2_ring_active(ioat);
-
- BUG_ON(active > num_descs);
-
- return num_descs - active;
-}
-
-/* assumes caller already checked space */
-static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
-{
- ioat->head += len;
- return ioat->head - len;
+ return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
}
static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
@@ -151,7 +142,7 @@ struct ioat_ring_ent {
static inline struct ioat_ring_ent *
ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
{
- return ioat->ring[idx & ioat2_ring_mask(ioat)];
+ return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
}
static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
@@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs);
+int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *device);
struct dma_async_tx_descriptor *
ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 6740e319c9cf..1cdd22e1051b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -260,8 +260,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent *desc;
bool seen_current = false;
+ int idx = ioat->tail, i;
u16 active;
- int i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
@@ -270,13 +270,14 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
- desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
+ smp_read_barrier_depends();
+ prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
+ desc = ioat2_get_ring_ent(ioat, idx + i);
dump_desc_dbg(ioat, desc);
tx = &desc->txd;
if (tx->cookie) {
chan->completed_cookie = tx->cookie;
- ioat3_dma_unmap(ioat, desc, ioat->tail + i);
+ ioat3_dma_unmap(ioat, desc, idx + i);
tx->cookie = 0;
if (tx->callback) {
tx->callback(tx->callback_param);
@@ -293,69 +294,30 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
i++;
}
}
- ioat->tail += i;
+ smp_mb(); /* finish all descriptor reads before incrementing tail */
+ ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- active = ioat2_ring_active(ioat);
- if (active == 0) {
+ if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
/* 5 microsecond delay per pending descriptor */
- writew(min((5 * active), IOAT_INTRDELAY_MASK),
+ writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
}
-/* try to cleanup, but yield (via spin_trylock) to incoming submissions
- * with the expectation that we will immediately poll again shortly
- */
-static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
+static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- prefetch(chan->completion);
-
- if (!spin_trylock_bh(&chan->cleanup_lock))
- return;
-
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- if (!spin_trylock_bh(&ioat->ring_lock)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
- spin_unlock_bh(&chan->cleanup_lock);
-}
-
-/* run cleanup now because we already delayed the interrupt via INTRDELAY */
-static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
-{
- struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
-
- prefetch(chan->completion);
-
spin_lock_bh(&chan->cleanup_lock);
- if (!ioat_cleanup_preamble(chan, &phys_complete)) {
- spin_unlock_bh(&chan->cleanup_lock);
- return;
- }
- spin_lock_bh(&ioat->ring_lock);
-
- __cleanup(ioat, phys_complete);
-
- spin_unlock_bh(&ioat->ring_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -363,7 +325,7 @@ static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- ioat3_cleanup_sync(ioat);
+ ioat3_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -384,12 +346,10 @@ static void ioat3_timer_event(unsigned long data)
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
- spin_lock_bh(&chan->cleanup_lock);
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete;
u64 status;
- spin_lock_bh(&ioat->ring_lock);
status = ioat_chansts(chan);
/* when halted due to errors check for channel
@@ -408,26 +368,31 @@ static void ioat3_timer_event(unsigned long data)
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
+ spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
+ else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
+ spin_lock_bh(&ioat->prep_lock);
ioat3_restart_channel(ioat);
- else {
+ spin_unlock_bh(&ioat->prep_lock);
+ } else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
} else {
u16 active;
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
- spin_lock_bh(&ioat->ring_lock);
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
active = ioat2_ring_active(ioat);
if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
reshape_ring(ioat, ioat->alloc_order-1);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
/* keep shrinking until we get back to our minimum
* default size
@@ -435,21 +400,20 @@ static void ioat3_timer_event(unsigned long data)
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
- spin_unlock_bh(&chan->cleanup_lock);
}
static enum dma_status
-ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
+ if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat3_cleanup_poll(ioat);
+ ioat3_cleanup(ioat);
- return ioat_is_complete(c, cookie, done, used);
+ return ioat_tx_status(c, cookie, txstate);
}
static struct dma_async_tx_descriptor *
@@ -460,15 +424,12 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
struct ioat_ring_ent *desc;
size_t total_len = len;
struct ioat_fill_descriptor *fill;
- int num_descs;
u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
- u16 idx;
- int i;
+ int num_descs, idx, i;
num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -513,11 +474,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
struct ioat_xor_descriptor *xor;
struct ioat_xor_ext_descriptor *xor_ex = NULL;
struct ioat_dma_descriptor *hw;
+ int num_descs, with_ext, idx, i;
u32 offset = 0;
- int num_descs;
- int with_ext;
- int i;
- u16 idx;
u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
BUG_ON(src_cnt < 2);
@@ -537,9 +495,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
* (legacy) descriptor to ensure all completion writes arrive in
* order.
*/
- if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
- /* pass */;
+ if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -657,11 +614,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
struct ioat_pq_ext_descriptor *pq_ex = NULL;
struct ioat_dma_descriptor *hw;
u32 offset = 0;
- int num_descs;
- int with_ext;
- int i, s;
- u16 idx;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+ int i, s, idx, with_ext, num_descs;
dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide
@@ -687,8 +641,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
* order.
*/
if (likely(num_descs) &&
- ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
- /* pass */;
+ ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ idx = ioat->head;
else
return NULL;
i = 0;
@@ -851,10 +805,9 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
- u16 idx;
- if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0)
- desc = ioat2_get_ring_ent(ioat, idx);
+ if (ioat2_check_space_lock(ioat, 1) == 0)
+ desc = ioat2_get_ring_ent(ioat, ioat->head);
else
return NULL;
@@ -977,7 +930,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1031,7 +984,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1072,7 +1025,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test memset timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1115,7 +1068,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
- if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto free_resources;
@@ -1222,7 +1175,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
- dma->xor_align = 2;
+ dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1233,7 +1186,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (cap & IOAT_CAP_PQ) {
is_raid_device = true;
dma_set_maxpq(dma, 8, 0);
- dma->pq_align = 2;
+ dma->pq_align = 6;
dma_cap_set(DMA_PQ, dma->cap_mask);
dma->device_prep_dma_pq = ioat3_prep_pq;
@@ -1243,7 +1196,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (!(cap & IOAT_CAP_XOR)) {
dma->max_xor = 8;
- dma->xor_align = 2;
+ dma->xor_align = 6;
dma_cap_set(DMA_XOR, dma->cap_mask);
dma->device_prep_dma_xor = ioat3_prep_pqxor;
@@ -1259,11 +1212,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) {
- dma->device_is_tx_complete = ioat3_is_complete;
+ dma->device_tx_status = ioat3_tx_status;
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
} else {
- dma->device_is_tx_complete = ioat_is_dma_complete;
+ dma->device_tx_status = ioat_dma_tx_status;
device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
}
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 99ec26725bae..fab37d1cf48d 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -138,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic
if (err)
return err;
- device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
- if (!device)
- return -ENOMEM;
-
- pci_set_master(pdev);
-
device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
if (!device)
return -ENOMEM;
+ pci_set_master(pdev);
pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 1ebc801678b0..161c452923b8 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -894,14 +894,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * iop_adma_is_complete - poll the status of an ADMA transaction
+ * iop_adma_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel or NULL
*/
-static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
+static enum dma_status iop_adma_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
dma_cookie_t last_used;
@@ -910,12 +910,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
return ret;
@@ -924,11 +919,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = iop_chan->completed_cookie;
-
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -1043,7 +1034,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(1);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1143,7 +1134,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1190,7 +1181,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1214,7 +1205,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test memset timed out, disabling\n");
err = -ENODEV;
@@ -1246,7 +1237,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@@ -1341,7 +1332,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
@@ -1378,7 +1369,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1410,7 +1401,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
- if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
@@ -1508,7 +1499,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
- dma_dev->device_is_tx_complete = iop_adma_is_complete;
+ dma_dev->device_tx_status = iop_adma_status;
dma_dev->device_issue_pending = iop_adma_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 2a446397c884..cb26ee9773d6 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1472,13 +1472,18 @@ static void idmac_issue_pending(struct dma_chan *chan)
*/
}
-static void __idmac_terminate_all(struct dma_chan *chan)
+static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
unsigned long flags;
int i;
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED);
@@ -1505,17 +1510,23 @@ static void __idmac_terminate_all(struct dma_chan *chan)
tasklet_enable(&to_ipu(idmac)->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED;
+
+ return 0;
}
-static void idmac_terminate_all(struct dma_chan *chan)
+static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
+ int ret;
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ ret = __idmac_control(chan, cmd, arg);
mutex_unlock(&ichan->chan_mutex);
+
+ return ret;
}
#ifdef DEBUG
@@ -1607,7 +1618,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
mutex_lock(&ichan->chan_mutex);
- __idmac_terminate_all(chan);
+ __idmac_control(chan, DMA_TERMINATE_ALL, 0);
if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG
@@ -1637,15 +1648,12 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
tasklet_schedule(&to_ipu(idmac)->tasklet);
}
-static enum dma_status idmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status idmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
- if (done)
- *done = ichan->completed;
- if (used)
- *used = chan->cookie;
+ dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
if (cookie != chan->cookie)
return DMA_ERROR;
return DMA_SUCCESS;
@@ -1664,12 +1672,12 @@ static int __init ipu_idmac_init(struct ipu *ipu)
dma->dev = ipu->dev;
dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
dma->device_free_chan_resources = idmac_free_chan_resources;
- dma->device_is_tx_complete = idmac_is_tx_complete;
+ dma->device_tx_status = idmac_tx_status;
dma->device_issue_pending = idmac_issue_pending;
/* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_terminate_all = idmac_terminate_all;
+ dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1711,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
- idmac_terminate_all(&ichan->dma_chan);
+ idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index bbbd58566625..201e6e19c344 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -541,8 +541,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan)
/* Check request completion status */
static enum dma_status
-mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
unsigned long flags;
@@ -554,12 +554,7 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
last_complete = mchan->completed_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -663,7 +658,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
}
regs_start = res.start;
- regs_size = res.end - res.start + 1;
+ regs_size = resource_size(&res);
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
@@ -694,7 +689,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
- dma->device_is_tx_complete = mpc_dma_is_tx_complete;
+ dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
INIT_LIST_HEAD(&dma->channels);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e2fd34da64f2..86c5ae9fde34 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -810,14 +810,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
/**
- * mv_xor_is_complete - poll the status of an XOR transaction
+ * mv_xor_status - poll the status of an XOR transaction
* @chan: XOR channel handle
* @cookie: XOR transaction identifier
+ * @txstate: XOR transactions state holder (or NULL)
*/
-static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
+static enum dma_status mv_xor_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dma_cookie_t last_used;
@@ -827,10 +827,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
mv_chan->is_complete_cookie = cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) {
@@ -842,11 +839,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = mv_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
-
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -975,7 +968,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(1);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
@@ -1073,7 +1066,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
async_tx_ack(tx);
msleep(8);
- if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
@@ -1168,7 +1161,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
/* set base routines */
dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
- dma_dev->device_is_tx_complete = mv_xor_is_complete;
+ dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
dma_dev->dev = &pdev->dev;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
new file mode 100644
index 000000000000..7c50f6dfd3f4
--- /dev/null
+++ b/drivers/dma/pl330.c
@@ -0,0 +1,866 @@
+/* linux/drivers/dma/pl330.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl330.h>
+
+#define NR_DEFAULT_DESC 16
+
+enum desc_status {
+ /* In the DMAC pool */
+ FREE,
+ /*
+ * Allocted to some channel during prep_xxx
+ * Also may be sitting on the work_list.
+ */
+ PREP,
+ /*
+ * Sitting on the work_list and already submitted
+ * to the PL330 core. Not more than two descriptors
+ * of a channel can be BUSY at any time.
+ */
+ BUSY,
+ /*
+ * Sitting on the channel work_list but xfer done
+ * by PL330 core
+ */
+ DONE,
+};
+
+struct dma_pl330_chan {
+ /* Schedule desc completion */
+ struct tasklet_struct task;
+
+ /* DMA-Engine Channel */
+ struct dma_chan chan;
+
+ /* Last completed cookie */
+ dma_cookie_t completed;
+
+ /* List of to be xfered descriptors */
+ struct list_head work_list;
+
+ /* Pointer to the DMAC that manages this channel,
+ * NULL if the channel is available to be acquired.
+ * As the parent, this DMAC also provides descriptors
+ * to the channel.
+ */
+ struct dma_pl330_dmac *dmac;
+
+ /* To protect channel manipulation */
+ spinlock_t lock;
+
+ /* Token of a hardware channel thread of PL330 DMAC
+ * NULL if the channel is available to be acquired.
+ */
+ void *pl330_chid;
+};
+
+struct dma_pl330_dmac {
+ struct pl330_info pif;
+
+ /* DMA-Engine Device */
+ struct dma_device ddma;
+
+ /* Pool of descriptors available for the DMAC's channels */
+ struct list_head desc_pool;
+ /* To protect desc_pool manipulation */
+ spinlock_t pool_lock;
+
+ /* Peripheral channels connected to this DMAC */
+ struct dma_pl330_chan peripherals[0]; /* keep at end */
+};
+
+struct dma_pl330_desc {
+ /* To attach to a queue as child */
+ struct list_head node;
+
+ /* Descriptor for the DMA Engine API */
+ struct dma_async_tx_descriptor txd;
+
+ /* Xfer for PL330 core */
+ struct pl330_xfer px;
+
+ struct pl330_reqcfg rqcfg;
+ struct pl330_req req;
+
+ enum desc_status status;
+
+ /* The channel which currently holds this desc */
+ struct dma_pl330_chan *pchan;
+};
+
+static inline struct dma_pl330_chan *
+to_pchan(struct dma_chan *ch)
+{
+ if (!ch)
+ return NULL;
+
+ return container_of(ch, struct dma_pl330_chan, chan);
+}
+
+static inline struct dma_pl330_desc *
+to_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct dma_pl330_desc, txd);
+}
+
+static inline void free_desc_list(struct list_head *list)
+{
+ struct dma_pl330_dmac *pdmac;
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch;
+ unsigned long flags;
+
+ if (list_empty(list))
+ return;
+
+ /* Finish off the work list */
+ list_for_each_entry(desc, list, node) {
+ dma_async_tx_callback callback;
+ void *param;
+
+ /* All desc in a list belong to same channel */
+ pch = desc->pchan;
+ callback = desc->txd.callback;
+ param = desc->txd.callback_param;
+
+ if (callback)
+ callback(param);
+
+ desc->pchan = NULL;
+ }
+
+ pdmac = pch->dmac;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+ list_splice_tail_init(list, &pdmac->desc_pool);
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
+static inline void fill_queue(struct dma_pl330_chan *pch)
+{
+ struct dma_pl330_desc *desc;
+ int ret;
+
+ list_for_each_entry(desc, &pch->work_list, node) {
+
+ /* If already submitted */
+ if (desc->status == BUSY)
+ break;
+
+ ret = pl330_submit_req(pch->pl330_chid,
+ &desc->req);
+ if (!ret) {
+ desc->status = BUSY;
+ break;
+ } else if (ret == -EAGAIN) {
+ /* QFull or DMAC Dying */
+ break;
+ } else {
+ /* Unacceptable request */
+ desc->status = DONE;
+ dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
+ __func__, __LINE__, desc->txd.cookie);
+ tasklet_schedule(&pch->task);
+ }
+ }
+}
+
+static void pl330_tasklet(unsigned long data)
+{
+ struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
+ struct dma_pl330_desc *desc, *_dt;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* Pick up ripe tomatoes */
+ list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
+ if (desc->status == DONE) {
+ pch->completed = desc->txd.cookie;
+ list_move_tail(&desc->node, &list);
+ }
+
+ /* Try to submit a req imm. next to the last completed cookie */
+ fill_queue(pch);
+
+ /* Make sure the PL330 Channel thread is active */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ free_desc_list(&list);
+}
+
+static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
+{
+ struct dma_pl330_desc *desc = token;
+ struct dma_pl330_chan *pch = desc->pchan;
+ unsigned long flags;
+
+ /* If desc aborted */
+ if (!pch)
+ return;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ tasklet_schedule(&pch->task);
+}
+
+static int pl330_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ pch->completed = chan->cookie = 1;
+
+ pch->pl330_chid = pl330_request_channel(&pdmac->pif);
+ if (!pch->pl330_chid) {
+ spin_unlock_irqrestore(&pch->lock, flags);
+ return 0;
+ }
+
+ tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ return 1;
+}
+
+static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_desc *desc;
+ unsigned long flags;
+
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* FLUSH the PL330 Channel thread */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+
+ /* Mark all desc done */
+ list_for_each_entry(desc, &pch->work_list, node)
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ pl330_tasklet((unsigned long) pch);
+
+ return 0;
+}
+
+static void pl330_free_chan_resources(struct dma_chan *chan)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ tasklet_kill(&pch->task);
+
+ pl330_release_channel(pch->pl330_chid);
+ pch->pl330_chid = NULL;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+}
+
+static enum dma_status
+pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ dma_cookie_t last_done, last_used;
+ int ret;
+
+ last_done = pch->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_done, last_used);
+
+ dma_set_tx_state(txstate, last_done, last_used, 0);
+
+ return ret;
+}
+
+static void pl330_issue_pending(struct dma_chan *chan)
+{
+ pl330_tasklet((unsigned long) to_pchan(chan));
+}
+
+/*
+ * We returned the last one of the circular list of descriptor(s)
+ * from prep_xxx, so the argument to submit corresponds to the last
+ * descriptor of the list.
+ */
+static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_pl330_desc *desc, *last = to_desc(tx);
+ struct dma_pl330_chan *pch = to_pchan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ /* Assign cookies to all nodes */
+ cookie = tx->chan->cookie;
+
+ while (!list_empty(&last->node)) {
+ desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+
+ if (++cookie < 0)
+ cookie = 1;
+ desc->txd.cookie = cookie;
+
+ list_move_tail(&desc->node, &pch->work_list);
+ }
+
+ if (++cookie < 0)
+ cookie = 1;
+ last->txd.cookie = cookie;
+
+ list_add_tail(&last->node, &pch->work_list);
+
+ tx->chan->cookie = cookie;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ return cookie;
+}
+
+static inline void _init_desc(struct dma_pl330_desc *desc)
+{
+ desc->pchan = NULL;
+ desc->req.x = &desc->px;
+ desc->req.token = desc;
+ desc->rqcfg.swap = SWAP_NO;
+ desc->rqcfg.privileged = 0;
+ desc->rqcfg.insnaccess = 0;
+ desc->rqcfg.scctl = SCCTRL0;
+ desc->rqcfg.dcctl = DCCTRL0;
+ desc->req.cfg = &desc->rqcfg;
+ desc->req.xfer_cb = dma_pl330_rqcb;
+ desc->txd.tx_submit = pl330_tx_submit;
+
+ INIT_LIST_HEAD(&desc->node);
+}
+
+/* Returns the number of descriptors added to the DMAC pool */
+int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+{
+ struct dma_pl330_desc *desc;
+ unsigned long flags;
+ int i;
+
+ if (!pdmac)
+ return 0;
+
+ desc = kmalloc(count * sizeof(*desc), flg);
+ if (!desc)
+ return 0;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ for (i = 0; i < count; i++) {
+ _init_desc(&desc[i]);
+ list_add_tail(&desc[i].node, &pdmac->desc_pool);
+ }
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return count;
+}
+
+static struct dma_pl330_desc *
+pluck_desc(struct dma_pl330_dmac *pdmac)
+{
+ struct dma_pl330_desc *desc = NULL;
+ unsigned long flags;
+
+ if (!pdmac)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ if (!list_empty(&pdmac->desc_pool)) {
+ desc = list_entry(pdmac->desc_pool.next,
+ struct dma_pl330_desc, node);
+
+ list_del_init(&desc->node);
+
+ desc->status = PREP;
+ desc->txd.callback = NULL;
+ }
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return desc;
+}
+
+static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
+{
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct dma_pl330_peri *peri = pch->chan.private;
+ struct dma_pl330_desc *desc;
+
+ /* Pluck one desc from the pool of DMAC */
+ desc = pluck_desc(pdmac);
+
+ /* If the DMAC pool is empty, alloc new */
+ if (!desc) {
+ if (!add_desc(pdmac, GFP_ATOMIC, 1))
+ return NULL;
+
+ /* Try again */
+ desc = pluck_desc(pdmac);
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev,
+ "%s:%d ALERT!\n", __func__, __LINE__);
+ return NULL;
+ }
+ }
+
+ /* Initialize the descriptor */
+ desc->pchan = pch;
+ desc->txd.cookie = 0;
+ async_tx_ack(&desc->txd);
+
+ desc->req.rqtype = peri->rqtype;
+ desc->req.peri = peri->peri_id;
+
+ dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
+
+ return desc;
+}
+
+static inline void fill_px(struct pl330_xfer *px,
+ dma_addr_t dst, dma_addr_t src, size_t len)
+{
+ px->next = NULL;
+ px->bytes = len;
+ px->dst_addr = dst;
+ px->src_addr = src;
+}
+
+static struct dma_pl330_desc *
+__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
+ dma_addr_t src, size_t len)
+{
+ struct dma_pl330_desc *desc = pl330_get_desc(pch);
+
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ /*
+ * Ideally we should lookout for reqs bigger than
+ * those that can be programmed with 256 bytes of
+ * MC buffer, but considering a req size is seldom
+ * going to be word-unaligned and more than 200MB,
+ * we take it easy.
+ * Also, should the limit is reached we'd rather
+ * have the platform increase MC buffer size than
+ * complicating this API driver.
+ */
+ fill_px(&desc->px, dst, src, len);
+
+ return desc;
+}
+
+/* Call after fixing burst size */
+static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
+{
+ struct dma_pl330_chan *pch = desc->pchan;
+ struct pl330_info *pi = &pch->dmac->pif;
+ int burst_len;
+
+ burst_len = pi->pcfg.data_bus_width / 8;
+ burst_len *= pi->pcfg.data_buf_dep;
+ burst_len >>= desc->rqcfg.brst_size;
+
+ /* src/dst_burst_len can't be more than 16 */
+ if (burst_len > 16)
+ burst_len = 16;
+
+ while (burst_len > 1) {
+ if (!(len % (burst_len << desc->rqcfg.brst_size)))
+ break;
+ burst_len--;
+ }
+
+ return burst_len;
+}
+
+static struct dma_async_tx_descriptor *
+pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_peri *peri = chan->private;
+ struct pl330_info *pi;
+ int burst;
+
+ if (unlikely(!pch || !len || !peri))
+ return NULL;
+
+ if (peri->rqtype != MEMTOMEM)
+ return NULL;
+
+ pi = &pch->dmac->pif;
+
+ desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
+ if (!desc)
+ return NULL;
+
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 1;
+
+ /* Select max possible burst size */
+ burst = pi->pcfg.data_bus_width / 8;
+
+ while (burst > 1) {
+ if (!(len % burst))
+ break;
+ burst /= 2;
+ }
+
+ desc->rqcfg.brst_size = 0;
+ while (burst != (1 << desc->rqcfg.brst_size))
+ desc->rqcfg.brst_size++;
+
+ desc->rqcfg.brst_len = get_burst_len(desc, len);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flg)
+{
+ struct dma_pl330_desc *first, *desc = NULL;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_peri *peri = chan->private;
+ struct scatterlist *sg;
+ unsigned long flags;
+ int i, burst_size;
+ dma_addr_t addr;
+
+ if (unlikely(!pch || !sgl || !sg_len))
+ return NULL;
+
+ /* Make sure the direction is consistent */
+ if ((direction == DMA_TO_DEVICE &&
+ peri->rqtype != MEMTODEV) ||
+ (direction == DMA_FROM_DEVICE &&
+ peri->rqtype != DEVTOMEM)) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ addr = peri->fifo_addr;
+ burst_size = peri->burst_sz;
+
+ first = NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+
+ desc = pl330_get_desc(pch);
+ if (!desc) {
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+
+ dev_err(pch->dmac->pif.dev,
+ "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ if (!first)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
+
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return NULL;
+ }
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->node);
+
+ if (direction == DMA_TO_DEVICE) {
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 0;
+ fill_px(&desc->px,
+ addr, sg_dma_address(sg), sg_dma_len(sg));
+ } else {
+ desc->rqcfg.src_inc = 0;
+ desc->rqcfg.dst_inc = 1;
+ fill_px(&desc->px,
+ sg_dma_address(sg), addr, sg_dma_len(sg));
+ }
+
+ desc->rqcfg.brst_size = burst_size;
+ desc->rqcfg.brst_len = 1;
+ }
+
+ /* Return the last desc in the chain */
+ desc->txd.flags = flg;
+ return &desc->txd;
+}
+
+static irqreturn_t pl330_irq_handler(int irq, void *data)
+{
+ if (pl330_update(data))
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static int __devinit
+pl330_probe(struct amba_device *adev, struct amba_id *id)
+{
+ struct dma_pl330_platdata *pdat;
+ struct dma_pl330_dmac *pdmac;
+ struct dma_pl330_chan *pch;
+ struct pl330_info *pi;
+ struct dma_device *pd;
+ struct resource *res;
+ int i, ret, irq;
+
+ pdat = adev->dev.platform_data;
+
+ if (!pdat || !pdat->nr_valid_peri) {
+ dev_err(&adev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ /* Allocate a new DMAC and its Channels */
+ pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
+ + sizeof(*pdmac), GFP_KERNEL);
+ if (!pdmac) {
+ dev_err(&adev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ pi = &pdmac->pif;
+ pi->dev = &adev->dev;
+ pi->pl330_data = NULL;
+ pi->mcbufsz = pdat->mcbuf_sz;
+
+ res = &adev->res;
+ request_mem_region(res->start, resource_size(res), "dma-pl330");
+
+ pi->base = ioremap(res->start, resource_size(res));
+ if (!pi->base) {
+ ret = -ENXIO;
+ goto probe_err1;
+ }
+
+ irq = adev->irq[0];
+ ret = request_irq(irq, pl330_irq_handler, 0,
+ dev_name(&adev->dev), pi);
+ if (ret)
+ goto probe_err2;
+
+ ret = pl330_add(pi);
+ if (ret)
+ goto probe_err3;
+
+ INIT_LIST_HEAD(&pdmac->desc_pool);
+ spin_lock_init(&pdmac->pool_lock);
+
+ /* Create a descriptor pool of default size */
+ if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
+ dev_warn(&adev->dev, "unable to allocate desc\n");
+
+ pd = &pdmac->ddma;
+ INIT_LIST_HEAD(&pd->channels);
+
+ /* Initialize channel parameters */
+ for (i = 0; i < pdat->nr_valid_peri; i++) {
+ struct dma_pl330_peri *peri = &pdat->peri[i];
+ pch = &pdmac->peripherals[i];
+
+ switch (peri->rqtype) {
+ case MEMTOMEM:
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ break;
+ case MEMTODEV:
+ case DEVTOMEM:
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ break;
+ default:
+ dev_err(&adev->dev, "DEVTODEV Not Supported\n");
+ continue;
+ }
+
+ INIT_LIST_HEAD(&pch->work_list);
+ spin_lock_init(&pch->lock);
+ pch->pl330_chid = NULL;
+ pch->chan.private = peri;
+ pch->chan.device = pd;
+ pch->chan.chan_id = i;
+ pch->dmac = pdmac;
+
+ /* Add the channel to the DMAC list */
+ pd->chancnt++;
+ list_add_tail(&pch->chan.device_node, &pd->channels);
+ }
+
+ pd->dev = &adev->dev;
+
+ pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
+ pd->device_free_chan_resources = pl330_free_chan_resources;
+ pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
+ pd->device_tx_status = pl330_tx_status;
+ pd->device_prep_slave_sg = pl330_prep_slave_sg;
+ pd->device_control = pl330_control;
+ pd->device_issue_pending = pl330_issue_pending;
+
+ ret = dma_async_device_register(pd);
+ if (ret) {
+ dev_err(&adev->dev, "unable to register DMAC\n");
+ goto probe_err4;
+ }
+
+ amba_set_drvdata(adev, pdmac);
+
+ dev_info(&adev->dev,
+ "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
+ dev_info(&adev->dev,
+ "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
+ pi->pcfg.data_buf_dep,
+ pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
+ pi->pcfg.num_peri, pi->pcfg.num_events);
+
+ return 0;
+
+probe_err4:
+ pl330_del(pi);
+probe_err3:
+ free_irq(irq, pi);
+probe_err2:
+ iounmap(pi->base);
+probe_err1:
+ release_mem_region(res->start, resource_size(res));
+ kfree(pdmac);
+
+ return ret;
+}
+
+static int __devexit pl330_remove(struct amba_device *adev)
+{
+ struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
+ struct dma_pl330_chan *pch, *_p;
+ struct pl330_info *pi;
+ struct resource *res;
+ int irq;
+
+ if (!pdmac)
+ return 0;
+
+ amba_set_drvdata(adev, NULL);
+
+ /* Idle the DMAC */
+ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ chan.device_node) {
+
+ /* Remove the channel */
+ list_del(&pch->chan.device_node);
+
+ /* Flush the channel */
+ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+ pl330_free_chan_resources(&pch->chan);
+ }
+
+ pi = &pdmac->pif;
+
+ pl330_del(pi);
+
+ irq = adev->irq[0];
+ free_irq(irq, pi);
+
+ iounmap(pi->base);
+
+ res = &adev->res;
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(pdmac);
+
+ return 0;
+}
+
+static struct amba_id pl330_ids[] = {
+ {
+ .id = 0x00041330,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl330_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "dma-pl330",
+ },
+ .id_table = pl330_ids,
+ .probe = pl330_probe,
+ .remove = pl330_remove,
+};
+
+static int __init pl330_init(void)
+{
+ return amba_driver_register(&pl330_driver);
+}
+module_init(pl330_init);
+
+static void __exit pl330_exit(void)
+{
+ amba_driver_unregister(&pl330_driver);
+ return;
+}
+module_exit(pl330_exit);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("API Driver for PL330 DMAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index d44626fa35ad..fa98abe4686f 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3935,12 +3935,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
}
/**
- * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
+ * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle
* @cookie: ADMA transaction identifier
+ * @txstate: a holder for the current state of the channel
*/
-static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct ppc440spe_adma_chan *ppc440spe_chan;
dma_cookie_t last_used;
@@ -3951,10 +3952,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS)
@@ -3965,10 +3963,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie;
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
@@ -4180,7 +4175,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_alloc_chan_resources;
adev->common.device_free_chan_resources =
ppc440spe_adma_free_chan_resources;
- adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
+ adev->common.device_tx_status = ppc440spe_adma_tx_status;
adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
/* Set prep routines based on capability */
@@ -4949,12 +4944,12 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
static struct of_platform_driver ppc440spe_adma_driver = {
- .match_table = ppc440spe_adma_of_match,
.probe = ppc440spe_adma_probe,
.remove = __devexit_p(ppc440spe_adma_remove),
.driver = {
.name = "PPC440SP(E)-ADMA",
.owner = THIS_MODULE,
+ .of_match_table = ppc440spe_adma_of_match,
},
};
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 6f25a20de99f..a2a519fd2a24 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -26,8 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
-#include <asm/dmaengine.h>
+#include <linux/sh_dma.h>
#include "shdma.h"
@@ -45,7 +44,7 @@ enum sh_dmae_desc_status {
#define LOG2_DEFAULT_XFER_SIZE 2
/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
-static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
+static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
@@ -190,7 +189,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
struct sh_dmae_device, common);
struct sh_dmae_pdata *pdata = shdev->pdata;
- struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
int shift = chan_pdata->dmars_bit;
@@ -266,8 +265,8 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
return NULL;
}
-static struct sh_dmae_slave_config *sh_dmae_find_slave(
- struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
+static const struct sh_dmae_slave_config *sh_dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
struct dma_device *dma_dev = sh_chan->common.device;
struct sh_dmae_device *shdev = container_of(dma_dev,
@@ -275,11 +274,11 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
- if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
+ if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
return NULL;
for (i = 0; i < pdata->slave_num; i++)
- if (pdata->slave[i].slave_id == slave_id)
+ if (pdata->slave[i].slave_id == param->slave_id)
return pdata->slave + i;
return NULL;
@@ -299,9 +298,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
* never runs concurrently with itself or free_chan_resources.
*/
if (param) {
- struct sh_dmae_slave_config *cfg;
+ const struct sh_dmae_slave_config *cfg;
- cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
+ cfg = sh_dmae_find_slave(sh_chan, param);
if (!cfg) {
ret = -EINVAL;
goto efindslave;
@@ -574,12 +573,14 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
+ dma_addr_t slave_addr;
if (!chan)
return NULL;
sh_chan = to_sh_chan(chan);
param = chan->private;
+ slave_addr = param->config->addr;
/* Someone calling slave DMA on a public channel? */
if (!param || !sg_len) {
@@ -592,16 +593,21 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
* if (param != NULL), this is a successfully requested slave channel,
* therefore param->config != NULL too.
*/
- return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
+ return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
direction, flags);
}
-static void sh_dmae_terminate_all(struct dma_chan *chan)
+static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
if (!chan)
- return;
+ return -EINVAL;
dmae_halt(sh_chan);
@@ -617,6 +623,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true);
+
+ return 0;
}
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -714,6 +722,10 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
{
while (__ld_cleanup(sh_chan, all))
;
+
+ if (all)
+ /* Terminating - forgive uncompleted cookies */
+ sh_chan->completed_cookie = sh_chan->common.cookie;
}
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
@@ -748,10 +760,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
sh_chan_xfer_ld_queue(sh_chan);
}
-static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
+static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
- dma_cookie_t *done,
- dma_cookie_t *used)
+ struct dma_tx_state *txstate)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
dma_cookie_t last_used;
@@ -763,12 +774,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = sh_chan->completed_cookie;
BUG_ON(last_complete < 0);
-
- if (done)
- *done = last_complete;
-
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
spin_lock_bh(&sh_chan->desc_lock);
@@ -873,7 +879,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
int irq, unsigned long flags)
{
int err;
- struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
struct platform_device *pdev = to_platform_device(shdev->common.dev);
struct sh_dmae_chan *new_sh_chan;
@@ -1040,12 +1046,12 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
= sh_dmae_alloc_chan_resources;
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
- shdev->common.device_is_tx_complete = sh_dmae_is_complete;
+ shdev->common.device_tx_status = sh_dmae_tx_status;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
/* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
- shdev->common.device_terminate_all = sh_dmae_terminate_all;
+ shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
@@ -1186,6 +1192,7 @@ static struct platform_driver sh_dmae_driver = {
.remove = __exit_p(sh_dmae_remove),
.shutdown = sh_dmae_shutdown,
.driver = {
+ .owner = THIS_MODULE,
.name = "sh-dma-engine",
},
};
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 153609a1e96c..4021275a0a43 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,8 +17,8 @@
#include <linux/interrupt.h>
#include <linux/list.h>
-#include <asm/dmaengine.h>
-
+#define SH_DMAC_MAX_CHANNELS 6
+#define SH_DMA_SLAVE_NUMBER 256
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
struct device;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
new file mode 100644
index 000000000000..c426829f6ab8
--- /dev/null
+++ b/drivers/dma/ste_dma40.c
@@ -0,0 +1,2657 @@
+/*
+ * driver/dma/ste_dma40.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+#define D40_NAME "dma40"
+
+#define D40_PHY_CHAN -1
+
+/* For masking out/in 2 bit channel positions */
+#define D40_CHAN_POS(chan) (2 * (chan / 2))
+#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
+
+/* Maximum iterations taken before giving up suspending a channel */
+#define D40_SUSPEND_MAX_IT 500
+
+#define D40_ALLOC_FREE (1 << 31)
+#define D40_ALLOC_PHY (1 << 30)
+#define D40_ALLOC_LOG_FREE 0
+
+/* The number of free d40_desc to keep in memory before starting
+ * to kfree() them */
+#define D40_DESC_CACHE_SIZE 50
+
+/* Hardware designer of the block */
+#define D40_PERIPHID2_DESIGNER 0x8
+
+/**
+ * enum 40_command - The different commands and/or statuses.
+ *
+ * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
+ * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
+ * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
+ * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
+ */
+enum d40_command {
+ D40_DMA_STOP = 0,
+ D40_DMA_RUN = 1,
+ D40_DMA_SUSPEND_REQ = 2,
+ D40_DMA_SUSPENDED = 3
+};
+
+/**
+ * struct d40_lli_pool - Structure for keeping LLIs in memory
+ *
+ * @base: Pointer to memory area when the pre_alloc_lli's are not large
+ * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
+ * pre_alloc_lli is used.
+ * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
+ * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
+ * one buffer to one buffer.
+ */
+struct d40_lli_pool {
+ void *base;
+ int size;
+ /* Space for dst and src, plus an extra for padding */
+ u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
+};
+
+/**
+ * struct d40_desc - A descriptor is one DMA job.
+ *
+ * @lli_phy: LLI settings for physical channel. Both src and dst=
+ * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
+ * lli_len equals one.
+ * @lli_log: Same as above but for logical channels.
+ * @lli_pool: The pool with two entries pre-allocated.
+ * @lli_len: Number of LLI's in lli_pool
+ * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len
+ * then this transfer job is done.
+ * @txd: DMA engine struct. Used for among other things for communication
+ * during a transfer.
+ * @node: List entry.
+ * @dir: The transfer direction of this job.
+ * @is_in_client_list: true if the client owns this descriptor.
+ *
+ * This descriptor is used for both logical and physical transfers.
+ */
+
+struct d40_desc {
+ /* LLI physical */
+ struct d40_phy_lli_bidir lli_phy;
+ /* LLI logical */
+ struct d40_log_lli_bidir lli_log;
+
+ struct d40_lli_pool lli_pool;
+ u32 lli_len;
+ u32 lli_tcount;
+
+ struct dma_async_tx_descriptor txd;
+ struct list_head node;
+
+ enum dma_data_direction dir;
+ bool is_in_client_list;
+};
+
+/**
+ * struct d40_lcla_pool - LCLA pool settings and data.
+ *
+ * @base: The virtual address of LCLA.
+ * @phy: Physical base address of LCLA.
+ * @base_size: size of lcla.
+ * @lock: Lock to protect the content in this struct.
+ * @alloc_map: Mapping between physical channel and LCLA entries.
+ * @num_blocks: The number of entries of alloc_map. Equals to the
+ * number of physical channels.
+ */
+struct d40_lcla_pool {
+ void *base;
+ dma_addr_t phy;
+ resource_size_t base_size;
+ spinlock_t lock;
+ u32 *alloc_map;
+ int num_blocks;
+};
+
+/**
+ * struct d40_phy_res - struct for handling eventlines mapped to physical
+ * channels.
+ *
+ * @lock: A lock protection this entity.
+ * @num: The physical channel number of this entity.
+ * @allocated_src: Bit mapped to show which src event line's are mapped to
+ * this physical channel. Can also be free or physically allocated.
+ * @allocated_dst: Same as for src but is dst.
+ * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
+ * event line number. Both allocated_src and allocated_dst can not be
+ * allocated to a physical channel, since the interrupt handler has then
+ * no way of figure out which one the interrupt belongs to.
+ */
+struct d40_phy_res {
+ spinlock_t lock;
+ int num;
+ u32 allocated_src;
+ u32 allocated_dst;
+};
+
+struct d40_base;
+
+/**
+ * struct d40_chan - Struct that describes a channel.
+ *
+ * @lock: A spinlock to protect this struct.
+ * @log_num: The logical number, if any of this channel.
+ * @completed: Starts with 1, after first interrupt it is set to dma engine's
+ * current cookie.
+ * @pending_tx: The number of pending transfers. Used between interrupt handler
+ * and tasklet.
+ * @busy: Set to true when transfer is ongoing on this channel.
+ * @phy_chan: Pointer to physical channel which this instance runs on.
+ * @chan: DMA engine handle.
+ * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
+ * transfer and call client callback.
+ * @client: Cliented owned descriptor list.
+ * @active: Active descriptor.
+ * @queue: Queued jobs.
+ * @free: List of free descripts, ready to be reused.
+ * @free_len: Number of descriptors in the free list.
+ * @dma_cfg: The client configuration of this dma channel.
+ * @base: Pointer to the device instance struct.
+ * @src_def_cfg: Default cfg register setting for src.
+ * @dst_def_cfg: Default cfg register setting for dst.
+ * @log_def: Default logical channel settings.
+ * @lcla: Space for one dst src pair for logical channel transfers.
+ * @lcpa: Pointer to dst and src lcpa settings.
+ *
+ * This struct can either "be" a logical or a physical channel.
+ */
+struct d40_chan {
+ spinlock_t lock;
+ int log_num;
+ /* ID of the most recent completed transfer */
+ int completed;
+ int pending_tx;
+ bool busy;
+ struct d40_phy_res *phy_chan;
+ struct dma_chan chan;
+ struct tasklet_struct tasklet;
+ struct list_head client;
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free;
+ int free_len;
+ struct stedma40_chan_cfg dma_cfg;
+ struct d40_base *base;
+ /* Default register configurations */
+ u32 src_def_cfg;
+ u32 dst_def_cfg;
+ struct d40_def_lcsp log_def;
+ struct d40_lcla_elem lcla;
+ struct d40_log_lli_full *lcpa;
+};
+
+/**
+ * struct d40_base - The big global struct, one for each probe'd instance.
+ *
+ * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
+ * @execmd_lock: Lock for execute command usage since several channels share
+ * the same physical register.
+ * @dev: The device structure.
+ * @virtbase: The virtual base address of the DMA's register.
+ * @clk: Pointer to the DMA clock structure.
+ * @phy_start: Physical memory start of the DMA registers.
+ * @phy_size: Size of the DMA register map.
+ * @irq: The IRQ number.
+ * @num_phy_chans: The number of physical channels. Read from HW. This
+ * is the number of available channels for this driver, not counting "Secure
+ * mode" allocated physical channels.
+ * @num_log_chans: The number of logical channels. Calculated from
+ * num_phy_chans.
+ * @dma_both: dma_device channels that can do both memcpy and slave transfers.
+ * @dma_slave: dma_device channels that can do only do slave transfers.
+ * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
+ * @log_chans: Room for all possible logical channels in system.
+ * @lookup_log_chans: Used to map interrupt number to logical channel. Points
+ * to log_chans entries.
+ * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
+ * to phy_chans entries.
+ * @plat_data: Pointer to provided platform_data which is the driver
+ * configuration.
+ * @phy_res: Vector containing all physical channels.
+ * @lcla_pool: lcla pool settings and data.
+ * @lcpa_base: The virtual mapped address of LCPA.
+ * @phy_lcpa: The physical address of the LCPA.
+ * @lcpa_size: The size of the LCPA area.
+ */
+struct d40_base {
+ spinlock_t interrupt_lock;
+ spinlock_t execmd_lock;
+ struct device *dev;
+ void __iomem *virtbase;
+ struct clk *clk;
+ phys_addr_t phy_start;
+ resource_size_t phy_size;
+ int irq;
+ int num_phy_chans;
+ int num_log_chans;
+ struct dma_device dma_both;
+ struct dma_device dma_slave;
+ struct dma_device dma_memcpy;
+ struct d40_chan *phy_chans;
+ struct d40_chan *log_chans;
+ struct d40_chan **lookup_log_chans;
+ struct d40_chan **lookup_phy_chans;
+ struct stedma40_platform_data *plat_data;
+ /* Physical half channels */
+ struct d40_phy_res *phy_res;
+ struct d40_lcla_pool lcla_pool;
+ void *lcpa_base;
+ dma_addr_t phy_lcpa;
+ resource_size_t lcpa_size;
+};
+
+/**
+ * struct d40_interrupt_lookup - lookup table for interrupt handler
+ *
+ * @src: Interrupt mask register.
+ * @clr: Interrupt clear register.
+ * @is_error: true if this is an error interrupt.
+ * @offset: start delta in the lookup_log_chans in d40_base. If equals to
+ * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
+ */
+struct d40_interrupt_lookup {
+ u32 src;
+ u32 clr;
+ bool is_error;
+ int offset;
+};
+
+/**
+ * struct d40_reg_val - simple lookup struct
+ *
+ * @reg: The register.
+ * @val: The value that belongs to the register in reg.
+ */
+struct d40_reg_val {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static int d40_pool_lli_alloc(struct d40_desc *d40d,
+ int lli_len, bool is_log)
+{
+ u32 align;
+ void *base;
+
+ if (is_log)
+ align = sizeof(struct d40_log_lli);
+ else
+ align = sizeof(struct d40_phy_lli);
+
+ if (lli_len == 1) {
+ base = d40d->lli_pool.pre_alloc_lli;
+ d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
+ d40d->lli_pool.base = NULL;
+ } else {
+ d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
+
+ base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
+ d40d->lli_pool.base = base;
+
+ if (d40d->lli_pool.base == NULL)
+ return -ENOMEM;
+ }
+
+ if (is_log) {
+ d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
+ align);
+ d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
+ align);
+ } else {
+ d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
+ align);
+ d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
+ align);
+
+ d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
+ d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
+ }
+
+ return 0;
+}
+
+static void d40_pool_lli_free(struct d40_desc *d40d)
+{
+ kfree(d40d->lli_pool.base);
+ d40d->lli_pool.base = NULL;
+ d40d->lli_pool.size = 0;
+ d40d->lli_log.src = NULL;
+ d40d->lli_log.dst = NULL;
+ d40d->lli_phy.src = NULL;
+ d40d->lli_phy.dst = NULL;
+ d40d->lli_phy.src_addr = 0;
+ d40d->lli_phy.dst_addr = 0;
+}
+
+static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
+ struct d40_desc *desc)
+{
+ dma_cookie_t cookie = d40c->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ d40c->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ return cookie;
+}
+
+static void d40_desc_reset(struct d40_desc *d40d)
+{
+ d40d->lli_tcount = 0;
+}
+
+static void d40_desc_remove(struct d40_desc *d40d)
+{
+ list_del(&d40d->node);
+}
+
+static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
+{
+ struct d40_desc *desc;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ if (!list_empty(&d40c->client)) {
+ list_for_each_entry_safe(d, _d, &d40c->client, node)
+ if (async_tx_test_ack(&d->txd)) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ desc = d;
+ goto out;
+ }
+ }
+
+ if (list_empty(&d40c->free)) {
+ /* Alloc new desc because we're out of used ones */
+ desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT);
+ if (desc == NULL)
+ goto out;
+ INIT_LIST_HEAD(&desc->node);
+ } else {
+ /* Reuse an old desc. */
+ desc = list_first_entry(&d40c->free,
+ struct d40_desc,
+ node);
+ list_del(&desc->node);
+ d40c->free_len--;
+ }
+out:
+ return desc;
+}
+
+static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+ if (d40c->free_len < D40_DESC_CACHE_SIZE) {
+ list_add_tail(&d40d->node, &d40c->free);
+ d40c->free_len++;
+ } else
+ kfree(d40d);
+}
+
+static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->active);
+}
+
+static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->active))
+ return NULL;
+
+ d = list_first_entry(&d40c->active,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
+{
+ list_add_tail(&desc->node, &d40c->queue);
+}
+
+static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->queue,
+ struct d40_desc,
+ node);
+ return d;
+}
+
+/* Support functions for logical channels */
+
+static int d40_lcla_id_get(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool)
+{
+ int src_id = 0;
+ int dst_id = 0;
+ struct d40_log_lli *lcla_lidx_base =
+ pool->base + d40c->phy_chan->num * 1024;
+ int i;
+ int lli_per_log = d40c->base->plat_data->llis_per_log;
+
+ if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
+ return 0;
+
+ if (pool->num_blocks > 32)
+ return -EINVAL;
+
+ spin_lock(&pool->lock);
+
+ for (i = 0; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+ src_id = i;
+ if (src_id >= pool->num_blocks)
+ goto err;
+
+ for (; i < pool->num_blocks; i++) {
+ if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
+ pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
+ break;
+ }
+ }
+
+ dst_id = i;
+ if (dst_id == src_id)
+ goto err;
+
+ d40c->lcla.src_id = src_id;
+ d40c->lcla.dst_id = dst_id;
+ d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
+ d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
+
+
+ spin_unlock(&pool->lock);
+ return 0;
+err:
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+}
+
+static void d40_lcla_id_put(struct d40_chan *d40c,
+ struct d40_lcla_pool *pool,
+ int id)
+{
+ if (id < 0)
+ return;
+
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock(&pool->lock);
+ pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
+ spin_unlock(&pool->lock);
+}
+
+static int d40_channel_execute_command(struct d40_chan *d40c,
+ enum d40_command command)
+{
+ int status, i;
+ void __iomem *active_reg;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->base->execmd_lock, flags);
+
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
+ goto done;
+ }
+
+ writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
+
+ if (command == D40_DMA_SUSPEND_REQ) {
+
+ for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ cpu_relax();
+ /*
+ * Reduce the number of bus accesses while
+ * waiting for the DMA to suspend.
+ */
+ udelay(3);
+
+ if (status == D40_DMA_STOP ||
+ status == D40_DMA_SUSPENDED)
+ break;
+ }
+
+ if (i == D40_SUSPEND_MAX_IT) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
+ __func__, d40c->phy_chan->num, d40c->log_num,
+ status);
+ dump_stack();
+ ret = -EBUSY;
+ }
+
+ }
+done:
+ spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
+ return ret;
+}
+
+static void d40_term_all(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ struct d40_desc *d;
+ struct d40_desc *_d;
+
+ /* Release active descriptors */
+ while ((d40d = d40_first_active_get(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release queued descriptors waiting for transfer */
+ while ((d40d = d40_first_queued(d40c))) {
+ d40_desc_remove(d40d);
+
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
+ d40_pool_lli_free(d);
+ d40_desc_remove(d);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d);
+ }
+
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.src_id);
+ d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
+ d40c->lcla.dst_id);
+
+ d40c->pending_tx = 0;
+ d40c->busy = false;
+}
+
+static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (do_enable)
+ val = D40_ACTIVATE_EVENTLINE;
+ else
+ val = D40_DEACTIVATE_EVENTLINE;
+
+ spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+
+ /* Enable event line connected to device (or memcpy) */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+ }
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+
+ writel((val << D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ }
+
+ spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
+}
+
+static u32 d40_chan_has_events(struct d40_chan *d40c)
+{
+ u32 val = 0;
+
+ /* If SSLNK or SDLNK is zero all events are disabled */
+ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
+ (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSLNK);
+
+ if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
+ val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK);
+ return val;
+}
+
+static void d40_config_enable_lidx(struct d40_chan *d40c)
+{
+ /* Set LIDX for lcla */
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+
+ writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
+ D40_SREG_ELEM_LOG_LIDX_MASK,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+}
+
+static int d40_config_write(struct d40_chan *d40c)
+{
+ u32 addr_base;
+ u32 var;
+ int res;
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ return res;
+
+ /* Odd addresses are even addresses + 4 */
+ addr_base = (d40c->phy_chan->num % 2) * 4;
+ /* Setup channel mode to logical or physical */
+ var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
+ D40_CHAN_POS(d40c->phy_chan->num);
+ writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
+
+ /* Setup operational mode option register */
+ var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
+ 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
+
+ writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /* Set default config for CFG reg */
+ writel(d40c->src_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SSCFG);
+ writel(d40c->dst_def_cfg,
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDCFG);
+
+ d40_config_enable_lidx(d40c);
+ }
+ return res;
+}
+
+static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
+{
+
+ if (d40d->lli_phy.dst && d40d->lli_phy.src) {
+ d40_phy_lli_write(d40c->base->virtbase,
+ d40c->phy_chan->num,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.src);
+ d40d->lli_tcount = d40d->lli_len;
+ } else if (d40d->lli_log.dst && d40d->lli_log.src) {
+ u32 lli_len;
+ struct d40_log_lli *src = d40d->lli_log.src;
+ struct d40_log_lli *dst = d40d->lli_log.dst;
+
+ src += d40d->lli_tcount;
+ dst += d40d->lli_tcount;
+
+ if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
+ lli_len = d40d->lli_len;
+ else
+ lli_len = d40c->base->plat_data->llis_per_log;
+ d40d->lli_tcount += lli_len;
+ d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
+ d40c->lcla.dst,
+ dst, src,
+ d40c->base->plat_data->llis_per_log);
+ }
+}
+
+static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct d40_chan *d40c = container_of(tx->chan,
+ struct d40_chan,
+ chan);
+ struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ tx->cookie = d40_assign_cookie(d40c, d40d);
+
+ d40_desc_queue(d40c, d40d);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return tx->cookie;
+}
+
+static int d40_start(struct d40_chan *d40c)
+{
+ int err;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (err)
+ return err;
+ d40_config_set_event(d40c, true);
+ }
+
+ err = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+ return err;
+}
+
+static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+ int err;
+
+ /* Start queued jobs, if any */
+ d40d = d40_first_queued(d40c);
+
+ if (d40d != NULL) {
+ d40c->busy = true;
+
+ /* Remove from queue */
+ d40_desc_remove(d40d);
+
+ /* Add to active queue */
+ d40_desc_submit(d40c, d40d);
+
+ /* Initiate DMA job */
+ d40_desc_load(d40c, d40d);
+
+ /* Start dma job */
+ err = d40_start(d40c);
+
+ if (err)
+ return NULL;
+ }
+
+ return d40d;
+}
+
+/* called from interrupt context */
+static void dma_tc_handle(struct d40_chan *d40c)
+{
+ struct d40_desc *d40d;
+
+ if (!d40c->phy_chan)
+ return;
+
+ /* Get first active entry from list */
+ d40d = d40_first_active_get(d40c);
+
+ if (d40d == NULL)
+ return;
+
+ if (d40d->lli_tcount < d40d->lli_len) {
+
+ d40_desc_load(d40c, d40d);
+ /* Start dma job */
+ (void) d40_start(d40c);
+ return;
+ }
+
+ if (d40_queue_start(d40c) == NULL)
+ d40c->busy = false;
+
+ d40c->pending_tx++;
+ tasklet_schedule(&d40c->tasklet);
+
+}
+
+static void dma_tasklet(unsigned long data)
+{
+ struct d40_chan *d40c = (struct d40_chan *) data;
+ struct d40_desc *d40d_fin;
+ unsigned long flags;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Get first active entry from list */
+ d40d_fin = d40_first_active_get(d40c);
+
+ if (d40d_fin == NULL)
+ goto err;
+
+ d40c->completed = d40d_fin->txd.cookie;
+
+ /*
+ * If terminating a channel pending_tx is set to zero.
+ * This prevents any finished active jobs to return to the client.
+ */
+ if (d40c->pending_tx == 0) {
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return;
+ }
+
+ /* Callback to client */
+ callback = d40d_fin->txd.callback;
+ callback_param = d40d_fin->txd.callback_param;
+
+ if (async_tx_test_ack(&d40d_fin->txd)) {
+ d40_pool_lli_free(d40d_fin);
+ d40_desc_remove(d40d_fin);
+ /* Return desc to free-list */
+ d40_desc_free(d40c, d40d_fin);
+ } else {
+ d40_desc_reset(d40d_fin);
+ if (!d40d_fin->is_in_client_list) {
+ d40_desc_remove(d40d_fin);
+ list_add_tail(&d40d_fin->node, &d40c->client);
+ d40d_fin->is_in_client_list = true;
+ }
+ }
+
+ d40c->pending_tx--;
+
+ if (d40c->pending_tx)
+ tasklet_schedule(&d40c->tasklet);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ if (callback)
+ callback(callback_param);
+
+ return;
+
+ err:
+ /* Rescue manouver if receiving double interrupts */
+ if (d40c->pending_tx > 0)
+ d40c->pending_tx--;
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static irqreturn_t d40_handle_interrupt(int irq, void *data)
+{
+ static const struct d40_interrupt_lookup il[] = {
+ {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
+ {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
+ {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
+ {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
+ {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
+ {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
+ {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
+ {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
+ {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
+ {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
+ };
+
+ int i;
+ u32 regs[ARRAY_SIZE(il)];
+ u32 tmp;
+ u32 idx;
+ u32 row;
+ long chan = -1;
+ struct d40_chan *d40c;
+ unsigned long flags;
+ struct d40_base *base = data;
+
+ spin_lock_irqsave(&base->interrupt_lock, flags);
+
+ /* Read interrupt status of both logical and physical channels */
+ for (i = 0; i < ARRAY_SIZE(il); i++)
+ regs[i] = readl(base->virtbase + il[i].src);
+
+ for (;;) {
+
+ chan = find_next_bit((unsigned long *)regs,
+ BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
+
+ /* No more set bits found? */
+ if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
+ break;
+
+ row = chan / BITS_PER_LONG;
+ idx = chan & (BITS_PER_LONG - 1);
+
+ /* ACK interrupt */
+ tmp = readl(base->virtbase + il[row].clr);
+ tmp |= 1 << idx;
+ writel(tmp, base->virtbase + il[row].clr);
+
+ if (il[row].offset == D40_PHY_CHAN)
+ d40c = base->lookup_phy_chans[idx];
+ else
+ d40c = base->lookup_log_chans[il[row].offset + idx];
+ spin_lock(&d40c->lock);
+
+ if (!il[row].is_error)
+ dma_tc_handle(d40c);
+ else
+ dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
+ __func__, chan, il[row].offset, idx);
+
+ spin_unlock(&d40c->lock);
+ }
+
+ spin_unlock_irqrestore(&base->interrupt_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+static int d40_validate_conf(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *conf)
+{
+ int res = 0;
+ u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
+ u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
+ bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
+ src_event_group == STEDMA40_DEV_SRC_MEMORY) {
+ dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
+ dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] No event line\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
+ (src_event_group != dst_event_group)) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Invalid event group\n", __func__);
+ res = -EINVAL;
+ }
+
+ if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
+ /*
+ * DMAC HW supports it. Will be added to this driver,
+ * in case any dma client requires it.
+ */
+ dev_err(&d40c->chan.dev->device,
+ "[%s] periph to periph not supported\n",
+ __func__);
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
+ int log_event_line, bool is_log)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!is_log) {
+ /* Physical interrupts are masked per physical full channel */
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ phy->allocated_dst = D40_ALLOC_PHY;
+ phy->allocated_src = D40_ALLOC_PHY;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ if (phy->allocated_src == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_src == D40_ALLOC_FREE)
+ phy->allocated_src = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_src & (1 << log_event_line))) {
+ phy->allocated_src |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ } else {
+ if (phy->allocated_dst == D40_ALLOC_PHY)
+ goto not_found;
+
+ if (phy->allocated_dst == D40_ALLOC_FREE)
+ phy->allocated_dst = D40_ALLOC_LOG_FREE;
+
+ if (!(phy->allocated_dst & (1 << log_event_line))) {
+ phy->allocated_dst |= 1 << log_event_line;
+ goto found;
+ } else
+ goto not_found;
+ }
+
+not_found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return false;
+found:
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return true;
+}
+
+static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
+ int log_event_line)
+{
+ unsigned long flags;
+ bool is_free = false;
+
+ spin_lock_irqsave(&phy->lock, flags);
+ if (!log_event_line) {
+ /* Physical interrupts are masked per physical full channel */
+ phy->allocated_dst = D40_ALLOC_FREE;
+ phy->allocated_src = D40_ALLOC_FREE;
+ is_free = true;
+ goto out;
+ }
+
+ /* Logical channel */
+ if (is_src) {
+ phy->allocated_src &= ~(1 << log_event_line);
+ if (phy->allocated_src == D40_ALLOC_LOG_FREE)
+ phy->allocated_src = D40_ALLOC_FREE;
+ } else {
+ phy->allocated_dst &= ~(1 << log_event_line);
+ if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
+ phy->allocated_dst = D40_ALLOC_FREE;
+ }
+
+ is_free = ((phy->allocated_src | phy->allocated_dst) ==
+ D40_ALLOC_FREE);
+
+out:
+ spin_unlock_irqrestore(&phy->lock, flags);
+
+ return is_free;
+}
+
+static int d40_allocate_channel(struct d40_chan *d40c)
+{
+ int dev_type;
+ int event_group;
+ int event_line;
+ struct d40_phy_res *phys;
+ int i;
+ int j;
+ int log_num;
+ bool is_src;
+ bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
+ == STEDMA40_CHANNEL_IN_LOG_MODE;
+
+
+ phys = d40c->base->phy_res;
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ dev_type = d40c->dma_cfg.src_dev_type;
+ log_num = 2 * dev_type;
+ is_src = true;
+ } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* dst event lines are used for logical memcpy */
+ dev_type = d40c->dma_cfg.dst_dev_type;
+ log_num = 2 * dev_type + 1;
+ is_src = false;
+ } else
+ return -EINVAL;
+
+ event_group = D40_TYPE_TO_GROUP(dev_type);
+ event_line = D40_TYPE_TO_EVENT(dev_type);
+
+ if (!is_log) {
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ /* Find physical half channel */
+ for (i = 0; i < d40c->base->num_phy_chans; i++) {
+
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log))
+ goto found_phy;
+ }
+ } else
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ 0, is_log))
+ goto found_phy;
+ }
+ }
+ return -EINVAL;
+found_phy:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = D40_PHY_CHAN;
+ goto out;
+ }
+ if (dev_type == -1)
+ return -EINVAL;
+
+ /* Find logical channel */
+ for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
+ int phy_num = j + event_group * 2;
+ /*
+ * Spread logical channels across all available physical rather
+ * than pack every logical channel at the first available phy
+ * channels.
+ */
+ if (is_src) {
+ for (i = phy_num; i < phy_num + 2; i++) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line, is_log))
+ goto found_log;
+ }
+ } else {
+ for (i = phy_num + 1; i >= phy_num; i--) {
+ if (d40_alloc_mask_set(&phys[i], is_src,
+ event_line, is_log))
+ goto found_log;
+ }
+ }
+ }
+ return -EINVAL;
+
+found_log:
+ d40c->phy_chan = &phys[i];
+ d40c->log_num = log_num;
+out:
+
+ if (is_log)
+ d40c->base->lookup_log_chans[d40c->log_num] = d40c;
+ else
+ d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
+
+ return 0;
+
+}
+
+static int d40_config_chan(struct d40_chan *d40c,
+ struct stedma40_chan_cfg *info)
+{
+
+ /* Fill in basic CFG register values */
+ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
+ &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_log_cfg(&d40c->dma_cfg,
+ &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
+
+ if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.src_dev_type * 32;
+ else
+ d40c->lcpa = d40c->base->lcpa_base +
+ d40c->dma_cfg.dst_dev_type * 32 + 16;
+ }
+
+ /* Write channel configuration to the DMA */
+ return d40_config_write(d40c);
+}
+
+static int d40_config_memcpy(struct d40_chan *d40c)
+{
+ dma_cap_mask_t cap = d40c->chan.device->cap_mask;
+
+ if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
+ d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
+ d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
+ memcpy[d40c->chan.chan_id];
+
+ } else if (dma_has_cap(DMA_MEMCPY, cap) &&
+ dma_has_cap(DMA_SLAVE, cap)) {
+ d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
+ } else {
+ dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int d40_free_dma(struct d40_chan *d40c)
+{
+
+ int res = 0;
+ u32 event, dir;
+ struct d40_phy_res *phy = d40c->phy_chan;
+ bool is_src;
+
+ /* Terminate all queued and active transfers */
+ d40_term_all(d40c);
+
+ if (phy == NULL) {
+ dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (phy->allocated_src == D40_ALLOC_FREE &&
+ phy->allocated_dst == D40_ALLOC_FREE) {
+ dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
+ __func__);
+ return -EINVAL;
+ }
+
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res) {
+ dev_err(&d40c->chan.dev->device, "[%s] suspend\n",
+ __func__);
+ return res;
+ }
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ dir = D40_CHAN_REG_SDLNK;
+ is_src = false;
+ } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ dir = D40_CHAN_REG_SSLNK;
+ is_src = true;
+ } else {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unknown direction\n", __func__);
+ return -EINVAL;
+ }
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ /*
+ * Release logical channel, deactivate the event line during
+ * the time physical res is suspended.
+ */
+ writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
+ D40_EVENTLINE_MASK(event),
+ d40c->base->virtbase + D40_DREG_PCBASE +
+ phy->num * D40_DREG_PCDELTA + dir);
+
+ d40c->base->lookup_log_chans[d40c->log_num] = NULL;
+
+ /*
+ * Check if there are more logical allocation
+ * on this phy channel.
+ */
+ if (!d40_alloc_mask_free(phy, is_src, event)) {
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c)) {
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Executing RUN command\n",
+ __func__);
+ return res;
+ }
+ }
+ return 0;
+ }
+ } else
+ d40_alloc_mask_free(phy, is_src, 0);
+
+ /* Release physical channel */
+ res = d40_channel_execute_command(d40c, D40_DMA_STOP);
+ if (res) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to stop channel\n", __func__);
+ return res;
+ }
+ d40c->phy_chan = NULL;
+ /* Invalidate channel type */
+ d40c->dma_cfg.channel_type = 0;
+ d40c->base->lookup_phy_chans[phy->num] = NULL;
+
+ return 0;
+
+
+}
+
+static int d40_pause(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res == 0) {
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40_config_set_event(d40c, false);
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+ }
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static bool d40_is_paused(struct d40_chan *d40c)
+{
+ bool is_paused = false;
+ unsigned long flags;
+ void __iomem *active_reg;
+ u32 status;
+ u32 event;
+ int res;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num == D40_PHY_CHAN) {
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+ status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+ if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
+ is_paused = true;
+
+ goto _exit;
+ }
+
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res != 0)
+ goto _exit;
+
+ if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
+ d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ else {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Unknown direction\n", __func__);
+ goto _exit;
+ }
+ status = d40_chan_has_events(d40c);
+ status = (status & D40_EVENTLINE_MASK(event)) >>
+ D40_EVENTLINE_POS(event);
+
+ if (status != D40_DMA_RUN)
+ is_paused = true;
+
+ /* Resume the other logical channels if any */
+ if (d40_chan_has_events(d40c))
+ res = d40_channel_execute_command(d40c,
+ D40_DMA_RUN);
+
+_exit:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return is_paused;
+
+}
+
+
+static bool d40_tx_is_linked(struct d40_chan *d40c)
+{
+ bool is_link;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
+ else
+ is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDLNK) &
+ D40_SREG_LNK_PHYS_LNK_MASK;
+ return is_link;
+}
+
+static u32 d40_residue(struct d40_chan *d40c)
+{
+ u32 num_elt;
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
+ >> D40_MEM_LCSP2_ECNT_POS;
+ else
+ num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
+ d40c->phy_chan->num * D40_DREG_PCDELTA +
+ D40_CHAN_REG_SDELT) &
+ D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
+ return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+}
+
+static int d40_resume(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int res = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ if (res)
+ goto out;
+
+ /* If bytes left to transfer or linked tx resume job */
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
+ d40_config_set_event(d40c, true);
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+ }
+ } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
+ res = d40_channel_execute_command(d40c, D40_DMA_RUN);
+
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return res;
+}
+
+static u32 stedma40_residue(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ u32 bytes_left;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+ bytes_left = d40_residue(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+ return bytes_left;
+}
+
+/* Public DMA functions in addition to the DMA engine framework */
+
+int stedma40_set_psize(struct dma_chan *chan,
+ int src_psize,
+ int dst_psize)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
+ d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ goto out;
+ }
+
+ if (src_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ if (dst_psize == STEDMA40_PSIZE_PHY_1)
+ d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
+ else {
+ d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
+ D40_SREG_CFG_PSIZE_POS);
+ d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
+ }
+out:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(stedma40_set_psize);
+
+struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
+ struct scatterlist *sgl_dst,
+ struct scatterlist *sgl_src,
+ unsigned int sgl_len,
+ unsigned long flags)
+{
+ int res;
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL)
+ goto err;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+ d40d->lli_len = sgl_len;
+
+ d40d->txd.flags = flags;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+ if (sgl_len > 1)
+ /*
+ * Check if there is space available in lcla. If not,
+ * split list into 1-length and run only in lcpa
+ * space.
+ */
+ if (d40_lcla_id_get(d40c,
+ &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ (void) d40_log_sg_to_lli(d40c->lcla.src_id,
+ sgl_src,
+ sgl_len,
+ d40d->lli_log.src,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+ (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
+ sgl_dst,
+ sgl_len,
+ d40d->lli_log.dst,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ flags & DMA_PREP_INTERRUPT, lli_max,
+ d40c->base->plat_data->llis_per_log);
+
+
+ } else {
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ res = d40_phy_sg_to_lli(sgl_src,
+ sgl_len,
+ 0,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ res = d40_phy_sg_to_lli(sgl_dst,
+ sgl_len,
+ 0,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+
+ if (res < 0)
+ goto err;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ return &d40d->txd;
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+EXPORT_SYMBOL(stedma40_memcpy_sg);
+
+bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ struct stedma40_chan_cfg *info = data;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+
+ if (data) {
+ err = d40_validate_conf(d40c, info);
+ if (!err)
+ d40c->dma_cfg = *info;
+ } else
+ err = d40_config_memcpy(d40c);
+
+ return err == 0;
+}
+EXPORT_SYMBOL(stedma40_filter);
+
+/* DMA ENGINE functions */
+static int d40_alloc_chan_resources(struct dma_chan *chan)
+{
+ int err;
+ unsigned long flags;
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ d40c->completed = chan->cookie = 1;
+
+ /*
+ * If no dma configuration is set (channel_type == 0)
+ * use default configuration
+ */
+ if (d40c->dma_cfg.channel_type == 0) {
+ err = d40_config_memcpy(d40c);
+ if (err)
+ goto err_alloc;
+ }
+
+ err = d40_allocate_channel(d40c);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to allocate channel\n", __func__);
+ goto err_alloc;
+ }
+
+ err = d40_config_chan(d40c, &d40c->dma_cfg);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to configure channel\n",
+ __func__);
+ goto err_config;
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+
+ err_config:
+ (void) d40_free_dma(d40c);
+ err_alloc:
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Channel allocation failed\n", __func__);
+ return -EINVAL;
+}
+
+static void d40_free_chan_resources(struct dma_chan *chan)
+{
+ struct d40_chan *d40c =
+ container_of(chan, struct d40_chan, chan);
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ err = d40_free_dma(d40c);
+
+ if (err)
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to free channel\n", __func__);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ size_t size,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err = 0;
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+
+ if (d40d == NULL) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Descriptor is NULL\n", __func__);
+ goto err;
+ }
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ if (d40c->log_num != D40_PHY_CHAN) {
+
+ if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+ d40d->lli_len = 1;
+
+ d40_log_fill_lli(d40d->lli_log.src,
+ src,
+ size,
+ 0,
+ d40c->log_def.lcsp1,
+ d40c->dma_cfg.src_info.data_width,
+ true, true);
+
+ d40_log_fill_lli(d40d->lli_log.dst,
+ dst,
+ size,
+ 0,
+ d40c->log_def.lcsp3,
+ d40c->dma_cfg.dst_info.data_width,
+ true, true);
+
+ } else {
+
+ if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ goto err;
+ }
+
+ err = d40_phy_fill_lli(d40d->lli_phy.src,
+ src,
+ size,
+ d40c->dma_cfg.src_info.psize,
+ 0,
+ d40c->src_def_cfg,
+ true,
+ d40c->dma_cfg.src_info.data_width,
+ false);
+ if (err)
+ goto err_fill_lli;
+
+ err = d40_phy_fill_lli(d40d->lli_phy.dst,
+ dst,
+ size,
+ d40c->dma_cfg.dst_info.psize,
+ 0,
+ d40c->dst_def_cfg,
+ true,
+ d40c->dma_cfg.dst_info.data_width,
+ false);
+
+ if (err)
+ goto err_fill_lli;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ }
+
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return &d40d->txd;
+
+err_fill_lli:
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed filling in PHY LLI\n", __func__);
+ d40_pool_lli_free(d40d);
+err:
+ spin_unlock_irqrestore(&d40c->lock, flg);
+ return NULL;
+}
+
+static int d40_prep_slave_sg_log(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t dev_addr = 0;
+ int total_size;
+ int lli_max = d40c->base->plat_data->llis_per_log;
+
+ if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sg_len;
+ d40d->lli_tcount = 0;
+
+ if (sg_len > 1)
+ /*
+ * Check if there is space available in lcla.
+ * If not, split list into 1-length and run only
+ * in lcpa space.
+ */
+ if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
+ lli_max = 1;
+
+ if (direction == DMA_FROM_DEVICE) {
+ dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else if (direction == DMA_TO_DEVICE) {
+ dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ total_size = d40_log_sg_to_dev(&d40c->lcla,
+ sgl, sg_len,
+ &d40d->lli_log,
+ &d40c->log_def,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.dst_info.data_width,
+ direction,
+ flags & DMA_PREP_INTERRUPT,
+ dev_addr, lli_max,
+ d40c->base->plat_data->llis_per_log);
+ } else
+ return -EINVAL;
+ if (total_size < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
+ struct d40_chan *d40c,
+ struct scatterlist *sgl,
+ unsigned int sgl_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ dma_addr_t src_dev_addr;
+ dma_addr_t dst_dev_addr;
+ int res;
+
+ if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ d40d->lli_len = sgl_len;
+ d40d->lli_tcount = 0;
+
+ if (direction == DMA_FROM_DEVICE) {
+ dst_dev_addr = 0;
+ src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
+ } else if (direction == DMA_TO_DEVICE) {
+ dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
+ src_dev_addr = 0;
+ } else
+ return -EINVAL;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ src_dev_addr,
+ d40d->lli_phy.src,
+ d40d->lli_phy.src_addr,
+ d40c->src_def_cfg,
+ d40c->dma_cfg.src_info.data_width,
+ d40c->dma_cfg.src_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ res = d40_phy_sg_to_lli(sgl,
+ sgl_len,
+ dst_dev_addr,
+ d40d->lli_phy.dst,
+ d40d->lli_phy.dst_addr,
+ d40c->dst_def_cfg,
+ d40c->dma_cfg.dst_info.data_width,
+ d40c->dma_cfg.dst_info.psize,
+ true);
+ if (res < 0)
+ return res;
+
+ (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
+ d40d->lli_pool.size, DMA_TO_DEVICE);
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct d40_desc *d40d;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan,
+ chan);
+ unsigned long flg;
+ int err;
+
+ if (d40c->dma_cfg.pre_transfer)
+ d40c->dma_cfg.pre_transfer(chan,
+ d40c->dma_cfg.pre_transfer_data,
+ sg_dma_len(sgl));
+
+ spin_lock_irqsave(&d40c->lock, flg);
+ d40d = d40_desc_get(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flg);
+
+ if (d40d == NULL)
+ return NULL;
+
+ memset(d40d, 0, sizeof(struct d40_desc));
+
+ if (d40c->log_num != D40_PHY_CHAN)
+ err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ else
+ err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
+ direction, flags);
+ if (err) {
+ dev_err(&d40c->chan.dev->device,
+ "[%s] Failed to prepare %s slave sg job: %d\n",
+ __func__,
+ d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
+ return NULL;
+ }
+
+ d40d->txd.flags = flags;
+
+ dma_async_tx_descriptor_init(&d40d->txd, chan);
+
+ d40d->txd.tx_submit = d40_tx_submit;
+
+ return &d40d->txd;
+}
+
+static enum dma_status d40_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = d40c->completed;
+ last_used = chan->cookie;
+
+ if (d40_is_paused(d40c))
+ ret = DMA_PAUSED;
+ else
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ dma_set_tx_state(txstate, last_complete, last_used,
+ stedma40_residue(chan));
+
+ return ret;
+}
+
+static void d40_issue_pending(struct dma_chan *chan)
+{
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ /* Busy means that pending jobs are already being processed */
+ if (!d40c->busy)
+ (void) d40_queue_start(d40c);
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
+static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ unsigned long flags;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&d40c->lock, flags);
+ d40_term_all(d40c);
+ spin_unlock_irqrestore(&d40c->lock, flags);
+ return 0;
+ case DMA_PAUSE:
+ return d40_pause(chan);
+ case DMA_RESUME:
+ return d40_resume(chan);
+ }
+
+ /* Other commands are unimplemented */
+ return -ENXIO;
+}
+
+/* Initialization functions */
+
+static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
+ struct d40_chan *chans, int offset,
+ int num_chans)
+{
+ int i = 0;
+ struct d40_chan *d40c;
+
+ INIT_LIST_HEAD(&dma->channels);
+
+ for (i = offset; i < offset + num_chans; i++) {
+ d40c = &chans[i];
+ d40c->base = base;
+ d40c->chan.device = dma;
+
+ /* Invalidate lcla element */
+ d40c->lcla.src_id = -1;
+ d40c->lcla.dst_id = -1;
+
+ spin_lock_init(&d40c->lock);
+
+ d40c->log_num = D40_PHY_CHAN;
+
+ INIT_LIST_HEAD(&d40c->free);
+ INIT_LIST_HEAD(&d40c->active);
+ INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->client);
+
+ d40c->free_len = 0;
+
+ tasklet_init(&d40c->tasklet, dma_tasklet,
+ (unsigned long) d40c);
+
+ list_add_tail(&d40c->chan.device_node,
+ &dma->channels);
+ }
+}
+
+static int __init d40_dmaengine_init(struct d40_base *base,
+ int num_reserved_chans)
+{
+ int err ;
+
+ d40_chan_init(base, &base->dma_slave, base->log_chans,
+ 0, base->num_log_chans);
+
+ dma_cap_zero(base->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+
+ base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_slave.device_tx_status = d40_tx_status;
+ base->dma_slave.device_issue_pending = d40_issue_pending;
+ base->dma_slave.device_control = d40_control;
+ base->dma_slave.dev = base->dev;
+
+ err = dma_async_device_register(&base->dma_slave);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register slave channels\n",
+ __func__);
+ goto failure1;
+ }
+
+ d40_chan_init(base, &base->dma_memcpy, base->log_chans,
+ base->num_log_chans, base->plat_data->memcpy_len);
+
+ dma_cap_zero(base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+
+ base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_memcpy.device_tx_status = d40_tx_status;
+ base->dma_memcpy.device_issue_pending = d40_issue_pending;
+ base->dma_memcpy.device_control = d40_control;
+ base->dma_memcpy.dev = base->dev;
+ /*
+ * This controller can only access address at even
+ * 32bit boundaries, i.e. 2^2
+ */
+ base->dma_memcpy.copy_align = 2;
+
+ err = dma_async_device_register(&base->dma_memcpy);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to regsiter memcpy only channels\n",
+ __func__);
+ goto failure2;
+ }
+
+ d40_chan_init(base, &base->dma_both, base->phy_chans,
+ 0, num_reserved_chans);
+
+ dma_cap_zero(base->dma_both.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
+
+ base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
+ base->dma_both.device_free_chan_resources = d40_free_chan_resources;
+ base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
+ base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
+ base->dma_both.device_tx_status = d40_tx_status;
+ base->dma_both.device_issue_pending = d40_issue_pending;
+ base->dma_both.device_control = d40_control;
+ base->dma_both.dev = base->dev;
+ base->dma_both.copy_align = 2;
+ err = dma_async_device_register(&base->dma_both);
+
+ if (err) {
+ dev_err(base->dev,
+ "[%s] Failed to register logical and physical capable channels\n",
+ __func__);
+ goto failure3;
+ }
+ return 0;
+failure3:
+ dma_async_device_unregister(&base->dma_memcpy);
+failure2:
+ dma_async_device_unregister(&base->dma_slave);
+failure1:
+ return err;
+}
+
+/* Initialization functions. */
+
+static int __init d40_phy_res_init(struct d40_base *base)
+{
+ int i;
+ int num_phy_chans_avail = 0;
+ u32 val[2];
+ int odd_even_bit = -2;
+
+ val[0] = readl(base->virtbase + D40_DREG_PRSME);
+ val[1] = readl(base->virtbase + D40_DREG_PRSMO);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+ base->phy_res[i].num = i;
+ odd_even_bit += 2 * ((i % 2) == 0);
+ if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
+ /* Mark security only channels as occupied */
+ base->phy_res[i].allocated_src = D40_ALLOC_PHY;
+ base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ } else {
+ base->phy_res[i].allocated_src = D40_ALLOC_FREE;
+ base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ num_phy_chans_avail++;
+ }
+ spin_lock_init(&base->phy_res[i].lock);
+ }
+ dev_info(base->dev, "%d of %d physical DMA channels available\n",
+ num_phy_chans_avail, base->num_phy_chans);
+
+ /* Verify settings extended vs standard */
+ val[0] = readl(base->virtbase + D40_DREG_PRTYP);
+
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
+ (val[0] & 0x3) != 1)
+ dev_info(base->dev,
+ "[%s] INFO: channel %d is misconfigured (%d)\n",
+ __func__, i, val[0] & 0x3);
+
+ val[0] = val[0] >> 2;
+ }
+
+ return num_phy_chans_avail;
+}
+
+static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
+{
+ static const struct d40_reg_val dma_id_regs[] = {
+ /* Peripheral Id */
+ { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
+ { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
+ /*
+ * D40_DREG_PERIPHID2 Depends on HW revision:
+ * MOP500/HREF ED has 0x0008,
+ * ? has 0x0018,
+ * HREF V1 has 0x0028
+ */
+ { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
+
+ /* PCell Id */
+ { .reg = D40_DREG_CELLID0, .val = 0x000d},
+ { .reg = D40_DREG_CELLID1, .val = 0x00f0},
+ { .reg = D40_DREG_CELLID2, .val = 0x0005},
+ { .reg = D40_DREG_CELLID3, .val = 0x00b1}
+ };
+ struct stedma40_platform_data *plat_data;
+ struct clk *clk = NULL;
+ void __iomem *virtbase = NULL;
+ struct resource *res = NULL;
+ struct d40_base *base = NULL;
+ int num_log_chans = 0;
+ int num_phy_chans;
+ int i;
+
+ clk = clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "[%s] No matching clock found\n",
+ __func__);
+ goto failure;
+ }
+
+ clk_enable(clk);
+
+ /* Get IO for DMAC base address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ if (!res)
+ goto failure;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O base") == NULL)
+ goto failure;
+
+ virtbase = ioremap(res->start, resource_size(res));
+ if (!virtbase)
+ goto failure;
+
+ /* HW version check */
+ for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
+ if (dma_id_regs[i].val !=
+ readl(virtbase + dma_id_regs[i].reg)) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
+ __func__,
+ dma_id_regs[i].val,
+ dma_id_regs[i].reg,
+ readl(virtbase + dma_id_regs[i].reg));
+ goto failure;
+ }
+ }
+
+ i = readl(virtbase + D40_DREG_PERIPHID2);
+
+ if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
+ dev_err(&pdev->dev,
+ "[%s] Unknown designer! Got %x wanted %x\n",
+ __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
+ goto failure;
+ }
+
+ /* The number of physical channels on this HW */
+ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
+
+ dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
+ (i >> 4) & 0xf, res->start);
+
+ plat_data = pdev->dev.platform_data;
+
+ /* Count the number of logical channels in use */
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_rx[i] != 0)
+ num_log_chans++;
+
+ for (i = 0; i < plat_data->dev_len; i++)
+ if (plat_data->dev_tx[i] != 0)
+ num_log_chans++;
+
+ base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
+ (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
+ sizeof(struct d40_chan), GFP_KERNEL);
+
+ if (base == NULL) {
+ dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
+ goto failure;
+ }
+
+ base->clk = clk;
+ base->num_phy_chans = num_phy_chans;
+ base->num_log_chans = num_log_chans;
+ base->phy_start = res->start;
+ base->phy_size = resource_size(res);
+ base->virtbase = virtbase;
+ base->plat_data = plat_data;
+ base->dev = &pdev->dev;
+ base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
+ base->log_chans = &base->phy_chans[num_phy_chans];
+
+ base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
+ GFP_KERNEL);
+ if (!base->phy_res)
+ goto failure;
+
+ base->lookup_phy_chans = kzalloc(num_phy_chans *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_phy_chans)
+ goto failure;
+
+ if (num_log_chans + plat_data->memcpy_len) {
+ /*
+ * The max number of logical channels are event lines for all
+ * src devices and dst devices
+ */
+ base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_log_chans)
+ goto failure;
+ }
+ base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
+ GFP_KERNEL);
+ if (!base->lcla_pool.alloc_map)
+ goto failure;
+
+ return base;
+
+failure:
+ if (clk) {
+ clk_disable(clk);
+ clk_put(clk);
+ }
+ if (virtbase)
+ iounmap(virtbase);
+ if (res)
+ release_mem_region(res->start,
+ resource_size(res));
+ if (virtbase)
+ iounmap(virtbase);
+
+ if (base) {
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ return NULL;
+}
+
+static void __init d40_hw_init(struct d40_base *base)
+{
+
+ static const struct d40_reg_val dma_init_reg[] = {
+ /* Clock every part of the DMA block from start */
+ { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+
+ /* Interrupts on all logical channels */
+ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
+ { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
+ };
+ int i;
+ u32 prmseo[2] = {0, 0};
+ u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
+ u32 pcmis = 0;
+ u32 pcicr = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
+ writel(dma_init_reg[i].val,
+ base->virtbase + dma_init_reg[i].reg);
+
+ /* Configure all our dma channels to default settings */
+ for (i = 0; i < base->num_phy_chans; i++) {
+
+ activeo[i % 2] = activeo[i % 2] << 2;
+
+ if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
+ == D40_ALLOC_PHY) {
+ activeo[i % 2] |= 3;
+ continue;
+ }
+
+ /* Enable interrupt # */
+ pcmis = (pcmis << 1) | 1;
+
+ /* Clear interrupt # */
+ pcicr = (pcicr << 1) | 1;
+
+ /* Set channel to physical mode */
+ prmseo[i % 2] = prmseo[i % 2] << 2;
+ prmseo[i % 2] |= 1;
+
+ }
+
+ writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
+ writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
+ writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
+ writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
+
+ /* Write which interrupt to enable */
+ writel(pcmis, base->virtbase + D40_DREG_PCMIS);
+
+ /* Write which interrupt to clear */
+ writel(pcicr, base->virtbase + D40_DREG_PCICR);
+
+}
+
+static int __init d40_probe(struct platform_device *pdev)
+{
+ int err;
+ int ret = -ENOENT;
+ struct d40_base *base;
+ struct resource *res = NULL;
+ int num_reserved_chans;
+ u32 val;
+
+ base = d40_hw_detect_init(pdev);
+
+ if (!base)
+ goto failure;
+
+ num_reserved_chans = d40_phy_res_init(base);
+
+ platform_set_drvdata(pdev, base);
+
+ spin_lock_init(&base->interrupt_lock);
+ spin_lock_init(&base->execmd_lock);
+
+ /* Get IO for logical channel parameter address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcpa\" memory resource\n",
+ __func__);
+ goto failure;
+ }
+ base->lcpa_size = resource_size(res);
+ base->phy_lcpa = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcpa") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCPA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ /* We make use of ESRAM memory for this. */
+ val = readl(base->virtbase + D40_DREG_LCPA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCPA);
+
+ base->lcpa_base = ioremap(res->start, resource_size(res));
+ if (!base->lcpa_base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCPA region\n",
+ __func__);
+ goto failure;
+ }
+ /* Get IO for logical channel link address */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
+ if (!res) {
+ ret = -ENOENT;
+ dev_err(&pdev->dev,
+ "[%s] No \"lcla\" resource defined\n",
+ __func__);
+ goto failure;
+ }
+
+ base->lcla_pool.base_size = resource_size(res);
+ base->lcla_pool.phy = res->start;
+
+ if (request_mem_region(res->start, resource_size(res),
+ D40_NAME " I/O lcla") == NULL) {
+ ret = -EBUSY;
+ dev_err(&pdev->dev,
+ "[%s] Failed to request LCLA region 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+ val = readl(base->virtbase + D40_DREG_LCLA);
+ if (res->start != val && val != 0) {
+ dev_warn(&pdev->dev,
+ "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
+ __func__, val, res->start);
+ } else
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
+
+ base->lcla_pool.base = ioremap(res->start, resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev,
+ "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
+ __func__, res->start, res->end);
+ goto failure;
+ }
+
+ spin_lock_init(&base->lcla_pool.lock);
+
+ base->lcla_pool.num_blocks = base->num_phy_chans;
+
+ base->irq = platform_get_irq(pdev, 0);
+
+ ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
+
+ if (ret) {
+ dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
+ goto failure;
+ }
+
+ err = d40_dmaengine_init(base, num_reserved_chans);
+ if (err)
+ goto failure;
+
+ d40_hw_init(base);
+
+ dev_info(base->dev, "initialized\n");
+ return 0;
+
+failure:
+ if (base) {
+ if (base->virtbase)
+ iounmap(base->virtbase);
+ if (base->lcla_pool.phy)
+ release_mem_region(base->lcla_pool.phy,
+ base->lcla_pool.base_size);
+ if (base->phy_lcpa)
+ release_mem_region(base->phy_lcpa,
+ base->lcpa_size);
+ if (base->phy_start)
+ release_mem_region(base->phy_start,
+ base->phy_size);
+ if (base->clk) {
+ clk_disable(base->clk);
+ clk_put(base->clk);
+ }
+
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+ }
+
+ dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
+ return ret;
+}
+
+static struct platform_driver d40_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = D40_NAME,
+ },
+};
+
+int __init stedma40_init(void)
+{
+ return platform_driver_probe(&d40_driver, d40_probe);
+}
+arch_initcall(stedma40_init);
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
new file mode 100644
index 000000000000..561fdd8a80c1
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.c
@@ -0,0 +1,454 @@
+/*
+ * driver/dma/ste_dma40_ll.c
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <plat/ste_dma40.h>
+
+#include "ste_dma40_ll.h"
+
+/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp3)
+{
+ u32 l3 = 0; /* dst */
+ u32 l1 = 0; /* src */
+
+ /* src is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
+
+ /* dst is mem? -> increase address pos */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_MEM_TO_MEM)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
+
+ /* src is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
+
+ /* dst is hw? -> master port 1 */
+ if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
+ cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
+
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
+ l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
+ l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
+ l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
+
+ l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
+ l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
+ l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
+ l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
+
+ *lcsp1 = l1;
+ *lcsp3 = l3;
+
+}
+
+/* Sets up SRC and DST CFG register for both logical and physical channels */
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log)
+{
+ u32 src = 0;
+ u32 dst = 0;
+
+ if (!is_log) {
+ /* Physical channel */
+ if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ src |= 1 << D40_SREG_CFG_MST_POS;
+ src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
+
+ if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ src |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ src |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
+ (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
+ /* Set master port to 1 */
+ dst |= 1 << D40_SREG_CFG_MST_POS;
+ dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
+
+ if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
+ else
+ dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ /* Interrupt on end of transfer for destination */
+ dst |= 1 << D40_SREG_CFG_TIM_POS;
+
+ /* Generate interrupt on error */
+ src |= 1 << D40_SREG_CFG_EIM_POS;
+ dst |= 1 << D40_SREG_CFG_EIM_POS;
+
+ /* PSIZE */
+ if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
+ src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+ if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
+ dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
+ dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ /* Element size */
+ src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
+ dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
+
+ } else {
+ /* Logical channel */
+ dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ }
+
+ if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) {
+ src |= 1 << D40_SREG_CFG_PRI_POS;
+ dst |= 1 << D40_SREG_CFG_PRI_POS;
+ }
+
+ src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS;
+ dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS;
+
+ *src_cfg = src;
+ *dst_cfg = dst;
+}
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device)
+{
+ int num_elems;
+
+ if (psize == STEDMA40_PSIZE_PHY_1)
+ num_elems = 1;
+ else
+ num_elems = 2 << psize;
+
+ /*
+ * Size is 16bit. data_width is 8, 16, 32 or 64 bit
+ * Block large than 64 KiB must be split.
+ */
+ if (data_size > (0xffff << data_width))
+ return -EINVAL;
+
+ /* Must be aligned */
+ if (!IS_ALIGNED(data, 0x1 << data_width))
+ return -EINVAL;
+
+ /* Transfer size can't be smaller than (num_elms * elem_size) */
+ if (data_size < num_elems * (0x1 << data_width))
+ return -EINVAL;
+
+ /* The number of elements. IE now many chunks */
+ lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
+
+ /*
+ * Distance to next element sized entry.
+ * Usually the size of the element unless you want gaps.
+ */
+ if (!is_device)
+ lli->reg_elt |= (0x1 << data_width) <<
+ D40_SREG_ELEM_PHY_EIDX_POS;
+
+ /* Where the data is */
+ lli->reg_ptr = data;
+ lli->reg_cfg = reg_cfg;
+
+ /* If this scatter list entry is the last one, no next link */
+ if (next_lli == 0)
+ lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
+ else
+ lli->reg_lnk = next_lli;
+
+ /* Set/clear interrupt generation on this link item.*/
+ if (term_int)
+ lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
+ else
+ lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
+
+ /* Post link */
+ lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
+
+ return 0;
+}
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int)
+{
+ int total_size = 0;
+ int i;
+ struct scatterlist *current_sg = sg;
+ dma_addr_t next_lli_phys;
+ dma_addr_t dst;
+ int err = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+
+ total_size += sg_dma_len(current_sg);
+
+ /* If this scatter list entry is the last one, no next link */
+ if (sg_len - 1 == i)
+ next_lli_phys = 0;
+ else
+ next_lli_phys = ALIGN(lli_phys + (i + 1) *
+ sizeof(struct d40_phy_lli),
+ D40_LLI_ALIGN);
+
+ if (target)
+ dst = target;
+ else
+ dst = sg_phys(current_sg);
+
+ err = d40_phy_fill_lli(&lli[i],
+ dst,
+ sg_dma_len(current_sg),
+ psize,
+ next_lli_phys,
+ reg_cfg,
+ !next_lli_phys,
+ data_width,
+ target == dst);
+ if (err)
+ goto err;
+ }
+
+ return total_size;
+ err:
+ return err;
+}
+
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src)
+{
+
+ writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
+ writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
+ writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
+ writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
+
+ writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
+ writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
+ writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
+ writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
+ phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
+
+}
+
+/* DMA logical lli operations */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc)
+{
+ lli->lcsp13 = reg_cfg;
+
+ /* The number of elements to transfer */
+ lli->lcsp02 = ((data_size >> data_width) <<
+ D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
+ /* 16 LSBs address of the current element */
+ lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
+ /* 16 MSBs address of the current element */
+ lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK;
+
+ if (addr_inc)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
+
+ lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
+ /* If this scatter list entry is the last one, no next link */
+ lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
+ D40_MEM_LCSP1_SLOS_MASK;
+
+ if (term_int)
+ lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
+ else
+ lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
+}
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off_dst;
+ u32 next_lli_off_src;
+
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
+ next_lli_off_src = 0;
+ next_lli_off_dst = 0;
+ } else {
+ if (next_lli_off_dst == 0 &&
+ next_lli_off_src == 0) {
+ /* The first lli will be at next_lli_off */
+ next_lli_off_dst = (lcla->dst_id *
+ llis_per_log + 1);
+ next_lli_off_src = (lcla->src_id *
+ llis_per_log + 1);
+ } else {
+ next_lli_off_dst++;
+ next_lli_off_src++;
+ }
+ }
+
+ if (direction == DMA_TO_DEVICE) {
+ d40_log_fill_lli(&lli->src[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ true);
+ d40_log_fill_lli(&lli->dst[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ false);
+ } else {
+ d40_log_fill_lli(&lli->dst[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off_dst,
+ lcsp->lcsp3, dst_data_width,
+ /* No next == terminal interrupt */
+ term_int && !next_lli_off_dst,
+ true);
+ d40_log_fill_lli(&lli->src[i],
+ dev_addr,
+ sg_dma_len(current_sg),
+ next_lli_off_src,
+ lcsp->lcsp1, src_data_width,
+ term_int && !next_lli_off_src,
+ false);
+ }
+ }
+ return total_size;
+}
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log)
+{
+ int total_size = 0;
+ struct scatterlist *current_sg = sg;
+ int i;
+ u32 next_lli_off = 0;
+
+ for_each_sg(sg, current_sg, sg_len, i) {
+ total_size += sg_dma_len(current_sg);
+
+ /*
+ * If this scatter list entry is the last one or
+ * max length, terminate link.
+ */
+ if (sg_len - 1 == i || ((i+1) % max_len == 0))
+ next_lli_off = 0;
+ else {
+ if (next_lli_off == 0)
+ /* The first lli will be at next_lli_off */
+ next_lli_off = lcla_id * llis_per_log + 1;
+ else
+ next_lli_off++;
+ }
+
+ d40_log_fill_lli(&lli_sg[i],
+ sg_phys(current_sg),
+ sg_dma_len(current_sg),
+ next_lli_off,
+ lcsp13, data_width,
+ term_int && !next_lli_off,
+ true);
+ }
+ return total_size;
+}
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log)
+{
+ u32 slos = 0;
+ u32 dlos = 0;
+ int i;
+
+ lcpa->lcsp0 = lli_src->lcsp02;
+ lcpa->lcsp1 = lli_src->lcsp13;
+ lcpa->lcsp2 = lli_dst->lcsp02;
+ lcpa->lcsp3 = lli_dst->lcsp13;
+
+ slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+
+ for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
+ writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
+ writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
+ writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
+ writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
+
+ slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
+ dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
+ }
+}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
new file mode 100644
index 000000000000..2029280cb332
--- /dev/null
+++ b/drivers/dma/ste_dma40_ll.h
@@ -0,0 +1,354 @@
+/*
+ * driver/dma/ste_dma40_ll.h
+ *
+ * Copyright (C) ST-Ericsson 2007-2010
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Per Friden <per.friden@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#ifndef STE_DMA40_LL_H
+#define STE_DMA40_LL_H
+
+#define D40_DREG_PCBASE 0x400
+#define D40_DREG_PCDELTA (8 * 4)
+#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
+
+#define D40_TYPE_TO_GROUP(type) (type / 16)
+#define D40_TYPE_TO_EVENT(type) (type % 16)
+
+/* Most bits of the CFG register are the same in log as in phy mode */
+#define D40_SREG_CFG_MST_POS 15
+#define D40_SREG_CFG_TIM_POS 14
+#define D40_SREG_CFG_EIM_POS 13
+#define D40_SREG_CFG_LOG_INCR_POS 12
+#define D40_SREG_CFG_PHY_PEN_POS 12
+#define D40_SREG_CFG_PSIZE_POS 10
+#define D40_SREG_CFG_ESIZE_POS 8
+#define D40_SREG_CFG_PRI_POS 7
+#define D40_SREG_CFG_LBE_POS 6
+#define D40_SREG_CFG_LOG_GIM_POS 5
+#define D40_SREG_CFG_LOG_MFU_POS 4
+#define D40_SREG_CFG_PHY_TM_POS 4
+#define D40_SREG_CFG_PHY_EVTL_POS 0
+
+
+/* Standard channel parameters - basic mode (element register) */
+#define D40_SREG_ELEM_PHY_ECNT_POS 16
+#define D40_SREG_ELEM_PHY_EIDX_POS 0
+
+#define D40_SREG_ELEM_PHY_ECNT_MASK (0xFFFF << D40_SREG_ELEM_PHY_ECNT_POS)
+
+/* Standard channel parameters - basic mode (Link register) */
+#define D40_SREG_LNK_PHY_TCP_POS 0
+#define D40_SREG_LNK_PHY_LMP_POS 1
+#define D40_SREG_LNK_PHY_PRE_POS 2
+/*
+ * Source destination link address. Contains the
+ * 29-bit byte word aligned address of the reload area.
+ */
+#define D40_SREG_LNK_PHYS_LNK_MASK 0xFFFFFFF8UL
+
+/* Standard basic channel logical mode */
+
+/* Element register */
+#define D40_SREG_ELEM_LOG_ECNT_POS 16
+#define D40_SREG_ELEM_LOG_LIDX_POS 8
+#define D40_SREG_ELEM_LOG_LOS_POS 1
+#define D40_SREG_ELEM_LOG_TCP_POS 0
+
+#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
+
+/* Link register */
+#define D40_DEACTIVATE_EVENTLINE 0x0
+#define D40_ACTIVATE_EVENTLINE 0x1
+#define D40_EVENTLINE_POS(i) (2 * i)
+#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
+
+/* Standard basic channel logical params in memory */
+
+/* LCSP0 */
+#define D40_MEM_LCSP0_ECNT_POS 16
+#define D40_MEM_LCSP0_SPTR_POS 0
+
+#define D40_MEM_LCSP0_ECNT_MASK (0xFFFF << D40_MEM_LCSP0_ECNT_POS)
+#define D40_MEM_LCSP0_SPTR_MASK (0xFFFF << D40_MEM_LCSP0_SPTR_POS)
+
+/* LCSP1 */
+#define D40_MEM_LCSP1_SPTR_POS 16
+#define D40_MEM_LCSP1_SCFG_MST_POS 15
+#define D40_MEM_LCSP1_SCFG_TIM_POS 14
+#define D40_MEM_LCSP1_SCFG_EIM_POS 13
+#define D40_MEM_LCSP1_SCFG_INCR_POS 12
+#define D40_MEM_LCSP1_SCFG_PSIZE_POS 10
+#define D40_MEM_LCSP1_SCFG_ESIZE_POS 8
+#define D40_MEM_LCSP1_SLOS_POS 1
+#define D40_MEM_LCSP1_STCP_POS 0
+
+#define D40_MEM_LCSP1_SPTR_MASK (0xFFFF << D40_MEM_LCSP1_SPTR_POS)
+#define D40_MEM_LCSP1_SCFG_TIM_MASK (0x1 << D40_MEM_LCSP1_SCFG_TIM_POS)
+#define D40_MEM_LCSP1_SCFG_INCR_MASK (0x1 << D40_MEM_LCSP1_SCFG_INCR_POS)
+#define D40_MEM_LCSP1_SCFG_PSIZE_MASK (0x3 << D40_MEM_LCSP1_SCFG_PSIZE_POS)
+#define D40_MEM_LCSP1_SLOS_MASK (0x7F << D40_MEM_LCSP1_SLOS_POS)
+#define D40_MEM_LCSP1_STCP_MASK (0x1 << D40_MEM_LCSP1_STCP_POS)
+
+/* LCSP2 */
+#define D40_MEM_LCSP2_ECNT_POS 16
+
+#define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS)
+
+/* LCSP3 */
+#define D40_MEM_LCSP3_DCFG_MST_POS 15
+#define D40_MEM_LCSP3_DCFG_TIM_POS 14
+#define D40_MEM_LCSP3_DCFG_EIM_POS 13
+#define D40_MEM_LCSP3_DCFG_INCR_POS 12
+#define D40_MEM_LCSP3_DCFG_PSIZE_POS 10
+#define D40_MEM_LCSP3_DCFG_ESIZE_POS 8
+#define D40_MEM_LCSP3_DLOS_POS 1
+#define D40_MEM_LCSP3_DTCP_POS 0
+
+#define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS)
+#define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS)
+
+
+/* Standard channel parameter register offsets */
+#define D40_CHAN_REG_SSCFG 0x00
+#define D40_CHAN_REG_SSELT 0x04
+#define D40_CHAN_REG_SSPTR 0x08
+#define D40_CHAN_REG_SSLNK 0x0C
+#define D40_CHAN_REG_SDCFG 0x10
+#define D40_CHAN_REG_SDELT 0x14
+#define D40_CHAN_REG_SDPTR 0x18
+#define D40_CHAN_REG_SDLNK 0x1C
+
+/* DMA Register Offsets */
+#define D40_DREG_GCC 0x000
+#define D40_DREG_PRTYP 0x004
+#define D40_DREG_PRSME 0x008
+#define D40_DREG_PRSMO 0x00C
+#define D40_DREG_PRMSE 0x010
+#define D40_DREG_PRMSO 0x014
+#define D40_DREG_PRMOE 0x018
+#define D40_DREG_PRMOO 0x01C
+#define D40_DREG_LCPA 0x020
+#define D40_DREG_LCLA 0x024
+#define D40_DREG_ACTIVE 0x050
+#define D40_DREG_ACTIVO 0x054
+#define D40_DREG_FSEB1 0x058
+#define D40_DREG_FSEB2 0x05C
+#define D40_DREG_PCMIS 0x060
+#define D40_DREG_PCICR 0x064
+#define D40_DREG_PCTIS 0x068
+#define D40_DREG_PCEIS 0x06C
+#define D40_DREG_LCMIS0 0x080
+#define D40_DREG_LCMIS1 0x084
+#define D40_DREG_LCMIS2 0x088
+#define D40_DREG_LCMIS3 0x08C
+#define D40_DREG_LCICR0 0x090
+#define D40_DREG_LCICR1 0x094
+#define D40_DREG_LCICR2 0x098
+#define D40_DREG_LCICR3 0x09C
+#define D40_DREG_LCTIS0 0x0A0
+#define D40_DREG_LCTIS1 0x0A4
+#define D40_DREG_LCTIS2 0x0A8
+#define D40_DREG_LCTIS3 0x0AC
+#define D40_DREG_LCEIS0 0x0B0
+#define D40_DREG_LCEIS1 0x0B4
+#define D40_DREG_LCEIS2 0x0B8
+#define D40_DREG_LCEIS3 0x0BC
+#define D40_DREG_STFU 0xFC8
+#define D40_DREG_ICFG 0xFCC
+#define D40_DREG_PERIPHID0 0xFE0
+#define D40_DREG_PERIPHID1 0xFE4
+#define D40_DREG_PERIPHID2 0xFE8
+#define D40_DREG_PERIPHID3 0xFEC
+#define D40_DREG_CELLID0 0xFF0
+#define D40_DREG_CELLID1 0xFF4
+#define D40_DREG_CELLID2 0xFF8
+#define D40_DREG_CELLID3 0xFFC
+
+/* LLI related structures */
+
+/**
+ * struct d40_phy_lli - The basic configration register for each physical
+ * channel.
+ *
+ * @reg_cfg: The configuration register.
+ * @reg_elt: The element register.
+ * @reg_ptr: The pointer register.
+ * @reg_lnk: The link register.
+ *
+ * These registers are set up for both physical and logical transfers
+ * Note that the bit in each register means differently in logical and
+ * physical(standard) mode.
+ *
+ * This struct must be 16 bytes aligned, and only contain physical registers
+ * since it will be directly accessed by the DMA.
+ */
+struct d40_phy_lli {
+ u32 reg_cfg;
+ u32 reg_elt;
+ u32 reg_ptr;
+ u32 reg_lnk;
+};
+
+/**
+ * struct d40_phy_lli_bidir - struct for a transfer.
+ *
+ * @src: Register settings for src channel.
+ * @dst: Register settings for dst channel.
+ * @dst_addr: Physical destination address.
+ * @src_addr: Physical source address.
+ *
+ * All DMA transfers have a source and a destination.
+ */
+
+struct d40_phy_lli_bidir {
+ struct d40_phy_lli *src;
+ struct d40_phy_lli *dst;
+ dma_addr_t dst_addr;
+ dma_addr_t src_addr;
+};
+
+
+/**
+ * struct d40_log_lli - logical lli configuration
+ *
+ * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst.
+ * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst.
+ *
+ * This struct must be 8 bytes aligned since it will be accessed directy by
+ * the DMA. Never add any none hw mapped registers to this struct.
+ */
+
+struct d40_log_lli {
+ u32 lcsp02;
+ u32 lcsp13;
+};
+
+/**
+ * struct d40_log_lli_bidir - For both src and dst
+ *
+ * @src: pointer to src lli configuration.
+ * @dst: pointer to dst lli configuration.
+ *
+ * You always have a src and a dst when doing DMA transfers.
+ */
+
+struct d40_log_lli_bidir {
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/**
+ * struct d40_log_lli_full - LCPA layout
+ *
+ * @lcsp0: Logical Channel Standard Param 0 - Src.
+ * @lcsp1: Logical Channel Standard Param 1 - Src.
+ * @lcsp2: Logical Channel Standard Param 2 - Dst.
+ * @lcsp3: Logical Channel Standard Param 3 - Dst.
+ *
+ * This struct maps to LCPA physical memory layout. Must map to
+ * the hw.
+ */
+struct d40_log_lli_full {
+ u32 lcsp0;
+ u32 lcsp1;
+ u32 lcsp2;
+ u32 lcsp3;
+};
+
+/**
+ * struct d40_def_lcsp - Default LCSP1 and LCSP3 settings
+ *
+ * @lcsp3: The default configuration for dst.
+ * @lcsp1: The default configuration for src.
+ */
+struct d40_def_lcsp {
+ u32 lcsp3;
+ u32 lcsp1;
+};
+
+/**
+ * struct d40_lcla_elem - Info for one LCA element.
+ *
+ * @src_id: logical channel src id
+ * @dst_id: logical channel dst id
+ * @src: LCPA formated src parameters
+ * @dst: LCPA formated dst parameters
+ *
+ */
+struct d40_lcla_elem {
+ int src_id;
+ int dst_id;
+ struct d40_log_lli *src;
+ struct d40_log_lli *dst;
+};
+
+/* Physical channels */
+
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *src_cfg, u32 *dst_cfg, bool is_log);
+
+void d40_log_cfg(struct stedma40_chan_cfg *cfg,
+ u32 *lcsp1, u32 *lcsp2);
+
+int d40_phy_sg_to_lli(struct scatterlist *sg,
+ int sg_len,
+ dma_addr_t target,
+ struct d40_phy_lli *lli,
+ dma_addr_t lli_phys,
+ u32 reg_cfg,
+ u32 data_width,
+ int psize,
+ bool term_int);
+
+int d40_phy_fill_lli(struct d40_phy_lli *lli,
+ dma_addr_t data,
+ u32 data_size,
+ int psize,
+ dma_addr_t next_lli,
+ u32 reg_cfg,
+ bool term_int,
+ u32 data_width,
+ bool is_device);
+
+void d40_phy_lli_write(void __iomem *virtbase,
+ u32 phy_chan_num,
+ struct d40_phy_lli *lli_dst,
+ struct d40_phy_lli *lli_src);
+
+/* Logical channels */
+
+void d40_log_fill_lli(struct d40_log_lli *lli,
+ dma_addr_t data, u32 data_size,
+ u32 lli_next_off, u32 reg_cfg,
+ u32 data_width,
+ bool term_int, bool addr_inc);
+
+int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli_bidir *lli,
+ struct d40_def_lcsp *lcsp,
+ u32 src_data_width,
+ u32 dst_data_width,
+ enum dma_data_direction direction,
+ bool term_int, dma_addr_t dev_addr, int max_len,
+ int llis_per_log);
+
+void d40_log_lli_write(struct d40_log_lli_full *lcpa,
+ struct d40_log_lli *lcla_src,
+ struct d40_log_lli *lcla_dst,
+ struct d40_log_lli *lli_dst,
+ struct d40_log_lli *lli_src,
+ int llis_per_log);
+
+int d40_log_sg_to_lli(int lcla_id,
+ struct scatterlist *sg,
+ int sg_len,
+ struct d40_log_lli *lli_sg,
+ u32 lcsp13, /* src or dst*/
+ u32 data_width,
+ bool term_int, int max_len, int llis_per_log);
+
+#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
new file mode 100644
index 000000000000..a1bf77c1993f
--- /dev/null
+++ b/drivers/dma/timb_dma.c
@@ -0,0 +1,860 @@
+/*
+ * timb_dma.c timberdale FPGA DMA driver
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA DMA engine
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/timb_dma.h>
+
+#define DRIVER_NAME "timb-dma"
+
+/* Global DMA registers */
+#define TIMBDMA_ACR 0x34
+#define TIMBDMA_32BIT_ADDR 0x01
+
+#define TIMBDMA_ISR 0x080000
+#define TIMBDMA_IPR 0x080004
+#define TIMBDMA_IER 0x080008
+
+/* Channel specific registers */
+/* RX instances base addresses are 0x00, 0x40, 0x80 ...
+ * TX instances base addresses are 0x18, 0x58, 0x98 ...
+ */
+#define TIMBDMA_INSTANCE_OFFSET 0x40
+#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
+
+/* RX registers, relative the instance base */
+#define TIMBDMA_OFFS_RX_DHAR 0x00
+#define TIMBDMA_OFFS_RX_DLAR 0x04
+#define TIMBDMA_OFFS_RX_LR 0x0C
+#define TIMBDMA_OFFS_RX_BLR 0x10
+#define TIMBDMA_OFFS_RX_ER 0x14
+#define TIMBDMA_RX_EN 0x01
+/* bytes per Row, video specific register
+ * which is placed after the TX registers...
+ */
+#define TIMBDMA_OFFS_RX_BPRR 0x30
+
+/* TX registers, relative the instance base */
+#define TIMBDMA_OFFS_TX_DHAR 0x00
+#define TIMBDMA_OFFS_TX_DLAR 0x04
+#define TIMBDMA_OFFS_TX_BLR 0x0C
+#define TIMBDMA_OFFS_TX_LR 0x14
+
+
+#define TIMB_DMA_DESC_SIZE 8
+
+struct timb_dma_desc {
+ struct list_head desc_node;
+ struct dma_async_tx_descriptor txd;
+ u8 *desc_list;
+ unsigned int desc_list_len;
+ bool interrupt;
+};
+
+struct timb_dma_chan {
+ struct dma_chan chan;
+ void __iomem *membase;
+ spinlock_t lock; /* Used to protect data structures,
+ especially the lists and descriptors,
+ from races between the tasklet and calls
+ from above */
+ dma_cookie_t last_completed_cookie;
+ bool ongoing;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ unsigned int bytes_per_line;
+ enum dma_data_direction direction;
+ unsigned int descs; /* Descriptors to allocate */
+ unsigned int desc_elems; /* number of elems per descriptor */
+};
+
+struct timb_dma {
+ struct dma_device dma;
+ void __iomem *membase;
+ struct tasklet_struct tasklet;
+ struct timb_dma_chan channels[0];
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2dmadev(struct dma_chan *chan)
+{
+ return chan2dev(chan)->parent->parent;
+}
+
+static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ return (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+}
+
+/* Must be called with the spinlock held */
+static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = tdchantotd(td_chan);
+ u32 ier;
+
+ /* enable interrupt for this channel */
+ ier = ioread32(td->membase + TIMBDMA_IER);
+ ier |= 1 << id;
+ dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
+ ier);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+/* Should be called with the spinlock held */
+static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
+{
+ int id = td_chan->chan.chan_id;
+ struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
+ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
+ u32 isr;
+ bool done = false;
+
+ dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
+
+ isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
+ if (isr) {
+ iowrite32(isr, td->membase + TIMBDMA_ISR);
+ done = true;
+ }
+
+ return done;
+}
+
+static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
+ bool single)
+{
+ dma_addr_t addr;
+ int len;
+
+ addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
+ dma_desc[4];
+
+ len = (dma_desc[3] << 8) | dma_desc[2];
+
+ if (single)
+ dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+ else
+ dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
+ td_chan->direction);
+}
+
+static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
+{
+ struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
+ struct timb_dma_chan, chan);
+ u8 *descs;
+
+ for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
+ __td_unmap_desc(td_chan, descs, single);
+ if (descs[0] & 0x02)
+ break;
+ }
+}
+
+static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
+ struct scatterlist *sg, bool last)
+{
+ if (sg_dma_len(sg) > USHRT_MAX) {
+ dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
+ return -EINVAL;
+ }
+
+ /* length must be word aligned */
+ if (sg_dma_len(sg) % sizeof(u32)) {
+ dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
+ sg_dma_len(sg));
+ return -EINVAL;
+ }
+
+ dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
+ dma_desc, (void *)sg_dma_address(sg));
+
+ dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
+ dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
+ dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
+ dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
+
+ dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
+ dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
+
+ dma_desc[1] = 0x00;
+ dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
+
+ return 0;
+}
+
+/* Must be called with the spinlock held */
+static void __td_start_dma(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ if (td_chan->ongoing) {
+ dev_err(chan2dev(&td_chan->chan),
+ "Transfer already ongoing\n");
+ return;
+ }
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan),
+ "td_chan: %p, chan: %d, membase: %p\n",
+ td_chan, td_chan->chan.chan_id, td_chan->membase);
+
+ if (td_chan->direction == DMA_FROM_DEVICE) {
+
+ /* descriptor address */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_RX_DLAR);
+ /* Bytes per line */
+ iowrite32(td_chan->bytes_per_line, td_chan->membase +
+ TIMBDMA_OFFS_RX_BPRR);
+ /* enable RX */
+ iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+ } else {
+ /* address high */
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
+ iowrite32(td_desc->txd.phys, td_chan->membase +
+ TIMBDMA_OFFS_TX_DLAR);
+ }
+
+ td_chan->ongoing = true;
+
+ if (td_desc->interrupt)
+ __td_enable_chan_irq(td_chan);
+}
+
+static void __td_finish(struct timb_dma_chan *td_chan)
+{
+ dma_async_tx_callback callback;
+ void *param;
+ struct dma_async_tx_descriptor *txd;
+ struct timb_dma_desc *td_desc;
+
+ /* can happen if the descriptor is canceled */
+ if (list_empty(&td_chan->active_list))
+ return;
+
+ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
+ desc_node);
+ txd = &td_desc->txd;
+
+ dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
+ txd->cookie);
+
+ /* make sure to stop the transfer */
+ if (td_chan->direction == DMA_FROM_DEVICE)
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
+/* Currently no support for stopping DMA transfers
+ else
+ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
+*/
+ td_chan->last_completed_cookie = txd->cookie;
+ td_chan->ongoing = false;
+
+ callback = txd->callback;
+ param = txd->callback_param;
+
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ __td_unmap_descs(td_desc,
+ txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+}
+
+static u32 __td_ier_mask(struct timb_dma *td)
+{
+ int i;
+ u32 ret = 0;
+
+ for (i = 0; i < td->dma.chancnt; i++) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ if (td_chan->ongoing) {
+ struct timb_dma_desc *td_desc =
+ list_entry(td_chan->active_list.next,
+ struct timb_dma_desc, desc_node);
+ if (td_desc->interrupt)
+ ret |= 1 << i;
+ }
+ }
+
+ return ret;
+}
+
+static void __td_start_next(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc;
+
+ BUG_ON(list_empty(&td_chan->queue));
+ BUG_ON(td_chan->ongoing);
+
+ td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
+ desc_node);
+
+ dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
+ __func__, td_desc->txd.cookie);
+
+ list_move(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+}
+
+static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
+ txd);
+ struct timb_dma_chan *td_chan = container_of(txd->chan,
+ struct timb_dma_chan, chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&td_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&td_chan->active_list)) {
+ dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
+ txd->cookie);
+ list_add_tail(&td_desc->desc_node, &td_chan->active_list);
+ __td_start_dma(td_chan);
+ } else {
+ dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
+ txd->cookie);
+
+ list_add_tail(&td_desc->desc_node, &td_chan->queue);
+ }
+
+ spin_unlock_bh(&td_chan->lock);
+
+ return cookie;
+}
+
+static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
+{
+ struct dma_chan *chan = &td_chan->chan;
+ struct timb_dma_desc *td_desc;
+ int err;
+
+ td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
+
+ td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
+ if (!td_desc->desc_list) {
+ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ goto err;
+ }
+
+ dma_async_tx_descriptor_init(&td_desc->txd, chan);
+ td_desc->txd.tx_submit = td_tx_submit;
+ td_desc->txd.flags = DMA_CTRL_ACK;
+
+ td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
+ td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
+ if (err) {
+ dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
+ goto err;
+ }
+
+ return td_desc;
+err:
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+
+ return NULL;
+
+}
+
+static void td_free_desc(struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
+ dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ kfree(td_desc->desc_list);
+ kfree(td_desc);
+}
+
+static void td_desc_put(struct timb_dma_chan *td_chan,
+ struct timb_dma_desc *td_desc)
+{
+ dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
+
+ spin_lock_bh(&td_chan->lock);
+ list_add(&td_desc->desc_node, &td_chan->free_list);
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
+{
+ struct timb_dma_desc *td_desc, *_td_desc;
+ struct timb_dma_desc *ret = NULL;
+
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
+ desc_node) {
+ if (async_tx_test_ack(&td_desc->txd)) {
+ list_del(&td_desc->desc_node);
+ ret = td_desc;
+ break;
+ }
+ dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
+ td_desc);
+ }
+ spin_unlock_bh(&td_chan->lock);
+
+ return ret;
+}
+
+static int td_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ int i;
+
+ dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
+
+ BUG_ON(!list_empty(&td_chan->free_list));
+ for (i = 0; i < td_chan->descs; i++) {
+ struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
+ if (!td_desc) {
+ if (i)
+ break;
+ else {
+ dev_err(chan2dev(chan),
+ "Couldnt allocate any descriptors\n");
+ return -ENOMEM;
+ }
+ }
+
+ td_desc_put(td_chan, td_desc);
+ }
+
+ spin_lock_bh(&td_chan->lock);
+ td_chan->last_completed_cookie = 1;
+ chan->cookie = 1;
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_free_chan_resources(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+ LIST_HEAD(list);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ /* check that all descriptors are free */
+ BUG_ON(!list_empty(&td_chan->active_list));
+ BUG_ON(!list_empty(&td_chan->queue));
+
+ spin_lock_bh(&td_chan->lock);
+ list_splice_init(&td_chan->free_list, &list);
+ spin_unlock_bh(&td_chan->lock);
+
+ list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
+ dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
+ td_desc);
+ td_free_desc(td_desc);
+ }
+}
+
+static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ last_complete = td_chan->last_completed_cookie;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ dev_dbg(chan2dev(chan),
+ "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
+ __func__, ret, last_complete, last_used);
+
+ return ret;
+}
+
+static void td_issue_pending(struct dma_chan *chan)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+ spin_lock_bh(&td_chan->lock);
+
+ if (!list_empty(&td_chan->active_list))
+ /* transfer ongoing */
+ if (__td_dma_done_ack(td_chan))
+ __td_finish(td_chan);
+
+ if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+
+ spin_unlock_bh(&td_chan->lock);
+}
+
+static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_data_direction direction, unsigned long flags)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc;
+ struct scatterlist *sg;
+ unsigned int i;
+ unsigned int desc_usage = 0;
+
+ if (!sgl || !sg_len) {
+ dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ /* even channels are for RX, odd for TX */
+ if (td_chan->direction != direction) {
+ dev_err(chan2dev(chan),
+ "Requesting channel in wrong direction\n");
+ return NULL;
+ }
+
+ td_desc = td_desc_get(td_chan);
+ if (!td_desc) {
+ dev_err(chan2dev(chan), "Not enough descriptors available\n");
+ return NULL;
+ }
+
+ td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+ if (desc_usage > td_desc->desc_list_len) {
+ dev_err(chan2dev(chan), "No descriptor space\n");
+ return NULL;
+ }
+
+ err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
+ i == (sg_len - 1));
+ if (err) {
+ dev_err(chan2dev(chan), "Failed to update desc: %d\n",
+ err);
+ td_desc_put(td_chan, td_desc);
+ return NULL;
+ }
+ desc_usage += TIMB_DMA_DESC_SIZE;
+ }
+
+ dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
+ td_desc->desc_list_len, DMA_TO_DEVICE);
+
+ return &td_desc->txd;
+}
+
+static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct timb_dma_chan *td_chan =
+ container_of(chan, struct timb_dma_chan, chan);
+ struct timb_dma_desc *td_desc, *_td_desc;
+
+ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ /* first the easy part, put the queue into the free list */
+ spin_lock_bh(&td_chan->lock);
+ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
+ desc_node)
+ list_move(&td_desc->desc_node, &td_chan->free_list);
+
+ /* now tear down the runnning */
+ __td_finish(td_chan);
+ spin_unlock_bh(&td_chan->lock);
+
+ return 0;
+}
+
+static void td_tasklet(unsigned long data)
+{
+ struct timb_dma *td = (struct timb_dma *)data;
+ u32 isr;
+ u32 ipr;
+ u32 ier;
+ int i;
+
+ isr = ioread32(td->membase + TIMBDMA_ISR);
+ ipr = isr & __td_ier_mask(td);
+
+ /* ack the interrupts */
+ iowrite32(ipr, td->membase + TIMBDMA_ISR);
+
+ for (i = 0; i < td->dma.chancnt; i++)
+ if (ipr & (1 << i)) {
+ struct timb_dma_chan *td_chan = td->channels + i;
+ spin_lock(&td_chan->lock);
+ __td_finish(td_chan);
+ if (!list_empty(&td_chan->queue))
+ __td_start_next(td_chan);
+ spin_unlock(&td_chan->lock);
+ }
+
+ ier = __td_ier_mask(td);
+ iowrite32(ier, td->membase + TIMBDMA_IER);
+}
+
+
+static irqreturn_t td_irq(int irq, void *devid)
+{
+ struct timb_dma *td = devid;
+ u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
+
+ if (ipr) {
+ /* disable interrupts, will be re-enabled in tasklet */
+ iowrite32(0, td->membase + TIMBDMA_IER);
+
+ tasklet_schedule(&td->tasklet);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+
+static int __devinit td_probe(struct platform_device *pdev)
+{
+ struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+ struct timb_dma *td;
+ struct resource *iomem;
+ int irq;
+ int err;
+ int i;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ DRIVER_NAME))
+ return -EBUSY;
+
+ td = kzalloc(sizeof(struct timb_dma) +
+ sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
+ if (!td) {
+ err = -ENOMEM;
+ goto err_release_region;
+ }
+
+ dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
+
+ td->membase = ioremap(iomem->start, resource_size(iomem));
+ if (!td->membase) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ /* 32bit addressing */
+ iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
+
+ /* disable and clear any interrupts */
+ iowrite32(0x0, td->membase + TIMBDMA_IER);
+ iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
+
+ tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
+
+ err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+ goto err_tasklet_kill;
+ }
+
+ td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
+ td->dma.device_free_chan_resources = td_free_chan_resources;
+ td->dma.device_tx_status = td_tx_status;
+ td->dma.device_issue_pending = td_issue_pending;
+
+ dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
+ td->dma.device_prep_slave_sg = td_prep_slave_sg;
+ td->dma.device_control = td_control;
+
+ td->dma.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&td->dma.channels);
+
+ for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+ struct timb_dma_chan *td_chan = &td->channels[i];
+ struct timb_dma_platform_data_channel *pchan =
+ pdata->channels + i;
+
+ /* even channels are RX, odd are TX */
+ if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
+ dev_err(&pdev->dev, "Wrong channel configuration\n");
+ err = -EINVAL;
+ goto err_tasklet_kill;
+ }
+
+ td_chan->chan.device = &td->dma;
+ td_chan->chan.cookie = 1;
+ td_chan->chan.chan_id = i;
+ spin_lock_init(&td_chan->lock);
+ INIT_LIST_HEAD(&td_chan->active_list);
+ INIT_LIST_HEAD(&td_chan->queue);
+ INIT_LIST_HEAD(&td_chan->free_list);
+
+ td_chan->descs = pchan->descriptors;
+ td_chan->desc_elems = pchan->descriptor_elements;
+ td_chan->bytes_per_line = pchan->bytes_per_line;
+ td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ td_chan->membase = td->membase +
+ (i / 2) * TIMBDMA_INSTANCE_OFFSET +
+ (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
+
+ dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
+ i, td_chan->membase);
+
+ list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
+ }
+
+ err = dma_async_device_register(&td->dma);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register async device\n");
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(pdev, td);
+
+ dev_dbg(&pdev->dev, "Probe result: %d\n", err);
+ return err;
+
+err_free_irq:
+ free_irq(irq, td);
+err_tasklet_kill:
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+err_free_mem:
+ kfree(td);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ return err;
+
+}
+
+static int __devexit td_remove(struct platform_device *pdev)
+{
+ struct timb_dma *td = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ dma_async_device_unregister(&td->dma);
+ free_irq(irq, td);
+ tasklet_kill(&td->tasklet);
+ iounmap(td->membase);
+ kfree(td);
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&pdev->dev, "Removed...\n");
+ return 0;
+}
+
+static struct platform_driver td_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = td_probe,
+ .remove = __exit_p(td_remove),
+};
+
+static int __init td_init(void)
+{
+ return platform_driver_register(&td_driver);
+}
+module_init(td_init);
+
+static void __exit td_exit(void)
+{
+ platform_driver_unregister(&td_driver);
+}
+module_exit(td_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Timberdale DMA controller driver");
+MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 75fcf1ac8bb7..cbd83e362b5e 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -938,12 +938,17 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return &first->txd;
}
-static void txx9dmac_terminate_all(struct dma_chan *chan)
+static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_desc *desc, *_desc;
LIST_HEAD(list);
+ /* Only supports DMA_TERMINATE_ALL */
+ if (cmd != DMA_TERMINATE_ALL)
+ return -EINVAL;
+
dev_vdbg(chan2dev(chan), "terminate_all\n");
spin_lock_bh(&dc->lock);
@@ -958,12 +963,13 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
txx9dmac_descriptor_complete(dc, desc);
+
+ return 0;
}
static enum dma_status
-txx9dmac_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
+txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
dma_cookie_t last_used;
@@ -985,10 +991,7 @@ txx9dmac_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- if (done)
- *done = last_complete;
- if (used)
- *used = last_used;
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret;
}
@@ -1153,8 +1156,8 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->dma.dev = &pdev->dev;
dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
- dc->dma.device_terminate_all = txx9dmac_terminate_all;
- dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
+ dc->dma.device_control = txx9dmac_control;
+ dc->dma.device_tx_status = txx9dmac_tx_status;
dc->dma.device_issue_pending = txx9dmac_issue_pending;
if (pdata && pdata->memcpy_chan == ch) {
dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f2330f81cb5e..cace0a7b707a 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -294,7 +294,7 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
{
debugf0("%s()\n", __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
return amd76x_probe1(pdev, ent->driver_data);
}
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2ac5f6..996c1bdb5a34 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
static void i5000_check_error(struct mem_ctl_info *mci)
{
struct i5000_error_info info;
- debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i5000_get_error_info(mci, &info);
i5000_process_error_info(mci, &info, 1);
}
@@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
int num_dimms_per_channel;
int num_csrows;
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
+ debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __FILE__, __func__,
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
@@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
return -ENOMEM;
kobject_get(&mci->edac_mci_kobj);
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+ __FILE__, __func__);
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i5000_pci)
edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@ static int __init i5000_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1560,7 +1560,7 @@ static int __init i5000_init(void)
*/
static void __exit i5000_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
pci_unregister_driver(&i5000_driver);
}
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d10655ed4..010c1d6526f5 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
static void i5400_check_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
- debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i5400_get_error_info(mci, &info);
i5400_process_error_info(mci, &info);
}
@@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
+ debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __FILE__, __func__,
pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
@@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev; /* record ptr to the generic device */
@@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
+ __FILE__, __func__);
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i5400_pci)
edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@ static int __init i5400_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1425,7 +1425,7 @@ static int __init i5400_init(void)
*/
static void __exit i5400_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ debugf2("MC: %s: %s()\n", __FILE__, __func__);
pci_unregister_driver(&i5400_driver);
}
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 7f3884fcbd46..a2fa1feed724 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
{
struct i82443bxgx_edacmc_error_info info;
- debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
i82443bxgx_edacmc_get_error_info(mci, &info);
i82443bxgx_edacmc_process_error_info(mci, &info, 1);
}
@@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
- debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
- mci->mc_idx, __func__, index, drbar);
+ debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
+ mci->mc_idx, __FILE__, __func__, index, drbar);
row_high_limit = ((u32) drbar << 23);
/* find the DRAM Chip Select Base address and mask */
- debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
- "Boundry Address=%#0x, Last = %#0x \n",
- mci->mc_idx, __func__, index, row_high_limit,
+ debugf1("MC%d: %s: %s() Row=%d, "
+ "Boundry Address=%#0x, Last = %#0x\n",
+ mci->mc_idx, __FILE__, __func__, index, row_high_limit,
row_high_limit_last);
/* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
enum mem_type mtype;
enum edac_type edac_mode;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
/* Something is really hosed if PCI config space reads from
* the MC aren't working.
@@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
__func__);
}
- debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
return 0;
fail:
@@ -352,9 +352,9 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
+ debugf0("MC: %s: %s()\n", __FILE__, __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
if (mci_pdev == NULL)
@@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0(__FILE__ ": %s()\n", __func__);
+ debugf0("%s: %s()\n", __FILE__, __func__);
if (i82443bxgx_pci)
edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4471647b4807..6c1886b497ff 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -338,15 +338,13 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_pci_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_pci_err",
- .match_table = mpc85xx_pci_err_of_match,
.probe = mpc85xx_pci_err_probe,
.remove = __devexit_p(mpc85xx_pci_err_remove),
.driver = {
- .name = "mpc85xx_pci_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_pci_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_pci_err_of_match,
+ },
};
#endif /* CONFIG_PCI */
@@ -654,15 +652,13 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_l2_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_l2_err",
- .match_table = mpc85xx_l2_err_of_match,
.probe = mpc85xx_l2_err_probe,
.remove = mpc85xx_l2_err_remove,
.driver = {
- .name = "mpc85xx_l2_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_l2_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_l2_err_of_match,
+ },
};
/**************************** MC Err device ***************************/
@@ -1131,15 +1127,13 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
};
static struct of_platform_driver mpc85xx_mc_err_driver = {
- .owner = THIS_MODULE,
- .name = "mpc85xx_mc_err",
- .match_table = mpc85xx_mc_err_of_match,
.probe = mpc85xx_mc_err_probe,
.remove = mpc85xx_mc_err_remove,
.driver = {
- .name = "mpc85xx_mc_err",
- .owner = THIS_MODULE,
- },
+ .name = "mpc85xx_mc_err",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc85xx_mc_err_of_match,
+ },
};
#ifdef CONFIG_MPC85xx
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 11f2172aa1e6..9d6f6783328c 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -202,13 +202,13 @@ static struct of_device_id ppc4xx_edac_match[] = {
};
static struct of_platform_driver ppc4xx_edac_driver = {
- .match_table = ppc4xx_edac_match,
.probe = ppc4xx_edac_probe,
.remove = ppc4xx_edac_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = PPC4XX_EDAC_MODULE_NAME
- }
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = PPC4XX_EDAC_MODULE_NAME
+ .of_match_table = ppc4xx_edac_match,
+ },
};
/*
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index d55f8e9de788..6a822c631ef5 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -354,7 +354,7 @@ static int __devinit r82600_init_one(struct pci_dev *pdev,
{
debugf0("%s()\n", __func__);
- /* don't need to call pci_device_enable() */
+ /* don't need to call pci_enable_device() */
return r82600_probe1(pdev, ent->driver_data);
}
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156c5313..9dcb30466ec0 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-#include <linux/timer.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
@@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_CRC(v) ((v) << 0)
#define BIB_CRC_LENGTH(v) ((v) << 16)
#define BIB_INFO_LENGTH(v) ((v) << 24)
-
+#define BIB_BUS_NAME 0x31333934 /* "1394" */
#define BIB_LINK_SPEED(v) ((v) << 0)
#define BIB_GENERATION(v) ((v) << 4)
#define BIB_MAX_ROM(v) ((v) << 8)
@@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_BMC ((1) << 28)
#define BIB_ISC ((1) << 29)
#define BIB_CMC ((1) << 30)
-#define BIB_IMC ((1) << 31)
+#define BIB_IRMC ((1) << 31)
+#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
@@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
config_rom[0] = cpu_to_be32(
BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
- config_rom[1] = cpu_to_be32(0x31333934);
+ config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
config_rom[2] = cpu_to_be32(
BIB_LINK_SPEED(card->link_speed) |
BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
BIB_MAX_ROM(2) |
BIB_MAX_RECEIVE(card->max_receive) |
- BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC);
+ BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
config_rom[3] = cpu_to_be32(card->guid >> 32);
config_rom[4] = cpu_to_be32(card->guid);
/* Generate root directory. */
- config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */
+ config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
i = 7;
j = 7 + descriptor_count;
@@ -407,13 +407,6 @@ static void fw_card_bm_work(struct work_struct *work)
fw_card_put(card);
}
-static void flush_timer_callback(unsigned long data)
-{
- struct fw_card *card = (struct fw_card *)data;
-
- fw_flush_transactions(card);
-}
-
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver,
struct device *device)
@@ -432,8 +425,6 @@ void fw_card_initialize(struct fw_card *card,
init_completion(&card->done);
INIT_LIST_HEAD(&card->transaction_list);
spin_lock_init(&card->lock);
- setup_timer(&card->flush_timer,
- flush_timer_callback, (unsigned long)card);
card->local_node = NULL;
@@ -558,7 +549,6 @@ void fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
WARN_ON(!list_empty(&card->transaction_list));
- del_timer_sync(&card->flush_timer);
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14a34d99eea2..5bf106b9d791 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
list_add_tail(&client->link, &device->client_list);
mutex_unlock(&device->client_list_mutex);
- return 0;
+ return nonseekable_open(inode, file);
}
static void queue_event(struct client *client, struct event *event,
@@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
const struct file_operations fw_device_ops = {
.owner = THIS_MODULE,
+ .llseek = no_llseek,
.open = fw_device_op_open,
.read = fw_device_op_read,
.unlocked_ioctl = fw_device_op_ioctl,
- .poll = fw_device_op_poll,
- .release = fw_device_op_release,
.mmap = fw_device_op_mmap,
-
+ .release = fw_device_op_release,
+ .poll = fw_device_op_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = fw_device_op_compat_ioctl,
#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 673b03f8b4ec..fdc33ff06dc1 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
- list_del(&t->link);
+ list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
@@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
+ del_timer_sync(&t->split_timeout_timer);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card,
}
EXPORT_SYMBOL(fw_cancel_transaction);
+static void split_transaction_timeout_callback(unsigned long data)
+{
+ struct fw_transaction *t = (struct fw_transaction *)data;
+ struct fw_card *card = t->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ if (list_empty(&t->link)) {
+ spin_unlock_irqrestore(&card->lock, flags);
+ return;
+ }
+ list_del(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ card->driver->cancel_packet(card, &t->packet);
+
+ /*
+ * At this point cancel_packet will never call the transaction
+ * callback, since we just took the transaction out of the list.
+ * So do it here.
+ */
+ t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+}
+
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
@@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->payload_mapped = false;
}
+static int allocate_tlabel(struct fw_card *card)
+{
+ int tlabel;
+
+ tlabel = card->current_tlabel;
+ while (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = (tlabel + 1) & 0x3f;
+ if (tlabel == card->current_tlabel)
+ return -EBUSY;
+ }
+
+ card->current_tlabel = (tlabel + 1) & 0x3f;
+ card->tlabel_mask |= 1ULL << tlabel;
+
+ return tlabel;
+}
+
/**
* This function provides low-level access to the IEEE1394 transaction
* logic. Most C programs would use either fw_read(), fw_write() or
@@ -277,31 +320,26 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int tlabel;
/*
- * Bump the flush timer up 100ms first of all so we
- * don't race with a flush timer callback.
- */
-
- mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
-
- /*
* Allocate tlabel from the bitmap and put the transaction on
* the list while holding the card spinlock.
*/
spin_lock_irqsave(&card->lock, flags);
- tlabel = card->current_tlabel;
- if (card->tlabel_mask & (1ULL << tlabel)) {
+ tlabel = allocate_tlabel(card);
+ if (tlabel < 0) {
spin_unlock_irqrestore(&card->lock, flags);
callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
return;
}
- card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
- card->tlabel_mask |= (1ULL << tlabel);
-
t->node_id = destination_id;
t->tlabel = tlabel;
+ t->card = card;
+ setup_timer(&t->split_timeout_timer,
+ split_transaction_timeout_callback, (unsigned long)t);
+ /* FIXME: start this timer later, relative to t->timestamp */
+ mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
t->callback = callback;
t->callback_data = callback_data;
@@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
struct transaction_callback_data d;
struct fw_transaction t;
+ init_timer_on_stack(&t.split_timeout_timer);
init_completion(&d.done);
d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
+ destroy_timer_on_stack(&t.split_timeout_timer);
return d.rcode;
}
@@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card,
mutex_unlock(&phy_config_mutex);
}
-void fw_flush_transactions(struct fw_card *card)
-{
- struct fw_transaction *t, *next;
- struct list_head list;
- unsigned long flags;
-
- INIT_LIST_HEAD(&list);
- spin_lock_irqsave(&card->lock, flags);
- list_splice_init(&card->transaction_list, &list);
- card->tlabel_mask = 0;
- spin_unlock_irqrestore(&card->lock, flags);
-
- list_for_each_entry_safe(t, next, &list, link) {
- card->driver->cancel_packet(card, &t->packet);
-
- /*
- * At this point cancel_packet will never call the
- * transaction callback, since we just took all the
- * transactions out of the list. So do it here.
- */
- t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
- }
-}
-
static struct fw_address_handler *lookup_overlapping_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
@@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
- list_del(&t->link);
- card->tlabel_mask &= ~(1 << t->tlabel);
+ list_del_init(&t->link);
+ card->tlabel_mask &= ~(1ULL << t->tlabel);
break;
}
}
@@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
+ del_timer_sync(&t->split_timeout_timer);
+
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index fb0321300cce..0ecfcd95f4c5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,7 +27,12 @@ struct fw_packet;
#define PHY_LINK_ACTIVE 0x80
#define PHY_CONTENDER 0x40
#define PHY_BUS_RESET 0x40
+#define PHY_EXTENDED_REGISTERS 0xe0
#define PHY_BUS_SHORT_RESET 0x40
+#define PHY_INT_STATUS_BITS 0x3c
+#define PHY_ENABLE_ACCEL 0x02
+#define PHY_ENABLE_MULTI 0x01
+#define PHY_PAGE_SELECT 0xe0
#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
@@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
-void fw_flush_transactions(struct fw_card *card);
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 94b16e0340ae..9f627e758cfc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
+#define QUIRK_NO_1394A 8
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, flags;
} ohci_quirks[] = {
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
- QUIRK_RESET_PACKET},
+ QUIRK_RESET_PACKET |
+ QUIRK_NO_1394A},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
@@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
")");
-#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
-
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
#else
-#define log_irqs(evt)
-#define log_selfids(node_id, generation, self_id_count, sid)
-#define log_ar_at_event(dir, speed, header, evt)
+#define param_debug 0
+static inline void log_irqs(u32 evt) {}
+static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
+static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
@@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
- int clear_bits, int set_bits)
+static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
- struct fw_ohci *ohci = fw_ohci(card);
- u32 val, old;
+ u32 val;
+ int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
- flush_writes(ohci);
- msleep(2);
- val = reg_read(ohci, OHCI1394_PhyControl);
- if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
- fw_error("failed to set phy reg bits.\n");
- return -EBUSY;
+ for (i = 0; i < 10; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (val & OHCI1394_PhyControl_ReadDone)
+ return OHCI1394_PhyControl_ReadData(val);
+
+ msleep(1);
}
+ fw_error("failed to read phy reg\n");
+
+ return -EBUSY;
+}
+
+static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
+{
+ int i;
- old = OHCI1394_PhyControl_ReadData(val);
- old = (old & ~clear_bits) | set_bits;
reg_write(ohci, OHCI1394_PhyControl,
- OHCI1394_PhyControl_Write(addr, old));
+ OHCI1394_PhyControl_Write(addr, val));
+ for (i = 0; i < 100; i++) {
+ val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!(val & OHCI1394_PhyControl_WritePending))
+ return 0;
- return 0;
+ msleep(1);
+ }
+ fw_error("failed to write phy reg\n");
+
+ return -EBUSY;
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+ int clear_bits, int set_bits)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ int ret;
+
+ ret = read_phy_reg(ohci, addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The interrupt status bits are cleared by writing a one bit.
+ * Avoid clearing them unless explicitly requested in set_bits.
+ */
+ if (addr == 5)
+ clear_bits |= PHY_INT_STATUS_BITS;
+
+ return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
+}
+
+static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
+{
+ int ret;
+
+ ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
+ if (ret < 0)
+ return ret;
+
+ return read_phy_reg(ohci, addr);
}
static int ar_context_add_page(struct ar_context *ctx)
@@ -1351,7 +1399,7 @@ static void bus_reset_tasklet(unsigned long data)
* was set up before this reset, the old one is now no longer
* in use and we can free it. Update the config rom pointers
* to point to the current config rom and clear the
- * next_config_rom pointer so a new udpate can take place.
+ * next_config_rom pointer so a new update can take place.
*/
if (ohci->next_config_rom != NULL) {
@@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
}
+static int configure_1394a_enhancements(struct fw_ohci *ohci)
+{
+ bool enable_1394a;
+ int ret, clear, set, offset;
+
+ /* Check if the driver should configure link and PHY. */
+ if (!(reg_read(ohci, OHCI1394_HCControlSet) &
+ OHCI1394_HCControl_programPhyEnable))
+ return 0;
+
+ /* Paranoia: check whether the PHY supports 1394a, too. */
+ enable_1394a = false;
+ ret = read_phy_reg(ohci, 2);
+ if (ret < 0)
+ return ret;
+ if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
+ ret = read_paged_phy_reg(ohci, 1, 8);
+ if (ret < 0)
+ return ret;
+ if (ret >= 1)
+ enable_1394a = true;
+ }
+
+ if (ohci->quirks & QUIRK_NO_1394A)
+ enable_1394a = false;
+
+ /* Configure PHY and link consistently. */
+ if (enable_1394a) {
+ clear = 0;
+ set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ } else {
+ clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
+ set = 0;
+ }
+ ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
+ if (ret < 0)
+ return ret;
+
+ if (enable_1394a)
+ offset = OHCI1394_HCControlSet;
+ else
+ offset = OHCI1394_HCControlClear;
+ reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
+
+ /* Clean up: configuration has been taken care of. */
+ reg_write(ohci, OHCI1394_HCControlClear,
+ OHCI1394_HCControl_programPhyEnable);
+
+ return 0;
+}
+
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
u32 lps;
- int i;
+ int i, ret;
if (software_reset(ohci)) {
fw_error("Failed to reset ohci card.\n");
@@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card,
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+ ret = configure_1394a_enhancements(ohci);
+ if (ret < 0)
+ return ret;
+
/* Activate link_on bit and contender bit in our self ID packets.*/
- if (ohci_update_phy_reg(card, 4, 0,
- PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
- return -EIO;
+ ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
+ if (ret < 0)
+ return ret;
/*
* When the link is not yet enabled, the atomic config rom
@@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = {
};
#ifdef CONFIG_PPC_PMAC
-static void ohci_pmac_on(struct pci_dev *dev)
+static void pmac_ohci_on(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev)
}
}
-static void ohci_pmac_off(struct pci_dev *dev)
+static void pmac_ohci_off(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev)
}
}
#else
-#define ohci_pmac_on(dev)
-#define ohci_pmac_off(dev)
+static inline void pmac_ohci_on(struct pci_dev *dev) {}
+static inline void pmac_ohci_off(struct pci_dev *dev) {}
#endif /* CONFIG_PPC_PMAC */
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
- u32 bus_options, max_receive, link_speed, version;
+ u32 bus_options, max_receive, link_speed, version, link_enh;
u64 guid;
int i, err, n_ir, n_it;
size_t size;
@@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
err = pci_enable_device(dev);
if (err) {
@@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
+ /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
+ if (dev->vendor == PCI_VENDOR_ID_TI) {
+ pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
+
+ /* adjust latency of ATx FIFO: use 1.7 KB threshold */
+ link_enh &= ~TI_LinkEnh_atx_thresh_mask;
+ link_enh |= TI_LinkEnh_atx_thresh_1_7K;
+
+ /* use priority arbitration for asynchronous responses */
+ link_enh |= TI_LinkEnh_enab_unfair;
+
+ /* required for aPhyEnhanceEnable to work */
+ link_enh |= TI_LinkEnh_enab_accel;
+
+ pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
+ }
+
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
pci_disable_device(dev);
fail_free:
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fail:
if (err == -ENOMEM)
fw_error("Out of memory\n");
@@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev)
pci_release_region(dev, 0);
pci_disable_device(dev);
kfree(&ohci->card);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
fw_notify("Removed fw-ohci device.\n");
}
@@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
fw_error("pci_set_power_state failed with %d\n", err);
- ohci_pmac_off(dev);
+ pmac_ohci_off(dev);
return 0;
}
@@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev)
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
- ohci_pmac_on(dev);
+ pmac_ohci_on(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
err = pci_enable_device(dev);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ba492d85c516..3bc9a5d744eb 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -67,7 +67,7 @@
#define OHCI1394_PhyControl_ReadDone 0x80000000
#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
-#define OHCI1394_PhyControl_WriteDone 0x00004000
+#define OHCI1394_PhyControl_WritePending 0x00004000
#define OHCI1394_IsochronousCycleTimer 0x0F0
#define OHCI1394_AsReqFilterHiSet 0x100
#define OHCI1394_AsReqFilterHiClear 0x104
@@ -154,4 +154,12 @@
#define OHCI1394_phy_tcode 0xe
+/* TI extensions */
+
+#define PCI_CFG_TI_LinkEnh 0xf4
+#define TI_LinkEnh_enab_accel 0x00000002
+#define TI_LinkEnh_enab_unfair 0x00000080
+#define TI_LinkEnh_atx_thresh_mask 0x00003000
+#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
+
#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index fb09bb3c0ad6..aa9bc9e980e1 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -149,7 +149,7 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
return count;
}
-static ssize_t smi_data_read(struct kobject *kobj,
+static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
@@ -162,7 +162,7 @@ static ssize_t smi_data_read(struct kobject *kobj,
return ret;
}
-static ssize_t smi_data_write(struct kobject *kobj,
+static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 3a4460265b10..2f452f1f7c8a 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -522,7 +522,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
rbu_data.image_update_buffer, rbu_data.bios_image_size);
}
-static ssize_t read_rbu_data(struct kobject *kobj,
+static ssize_t read_rbu_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -576,7 +576,7 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
release_firmware(fw);
}
-static ssize_t read_rbu_image_type(struct kobject *kobj,
+static ssize_t read_rbu_image_type(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -586,7 +586,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_image_type(struct kobject *kobj,
+static ssize_t write_rbu_image_type(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -647,7 +647,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj,
return rc;
}
-static ssize_t read_rbu_packet_size(struct kobject *kobj,
+static ssize_t read_rbu_packet_size(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
@@ -660,7 +660,7 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj,
return size;
}
-static ssize_t write_rbu_packet_size(struct kobject *kobj,
+static ssize_t write_rbu_packet_size(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 81b70bd07586..2a62ec6390e0 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -402,7 +402,7 @@ efivar_unregister(struct efivar_entry *var)
}
-static ssize_t efivar_create(struct kobject *kobj,
+static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
@@ -461,7 +461,7 @@ static ssize_t efivar_create(struct kobject *kobj,
return count;
}
-static ssize_t efivar_delete(struct kobject *kobj,
+static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a19..724038dab4ca 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@ config GPIO_MAX732X
Board setup code must specify the model to use, and the start
number for these GPIOs.
+config GPIO_MAX732X_IRQ
+ bool "Interrupt controller support for MAX732x"
+ depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
+ help
+ Say yes here to enable the max732x to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+
config GPIO_PCA953X
tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
depends on I2C
@@ -188,6 +195,13 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_TC35892
+ bool "TC35892 GPIOs"
+ depends on MFD_TC35892
+ help
+ This enables support for the GPIOs found on the TC35892
+ I/O Expander.
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
@@ -264,10 +278,10 @@ config GPIO_BT8XX
If unsure, say N.
config GPIO_LANGWELL
- bool "Intel Moorestown Platform Langwell GPIO support"
+ bool "Intel Langwell/Penwell GPIO support"
depends on PCI
help
- Say Y here to support Intel Moorestown platform GPIO.
+ Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
@@ -275,6 +289,15 @@ config GPIO_TIMBERDALE
---help---
Add support for the GPIO IP in the timberdale FPGA.
+config GPIO_RDC321X
+ tristate "RDC R-321x GPIO support"
+ depends on PCI && GPIOLIB
+ select MFD_CORE
+ select MFD_RDC321X
+ help
+ Support for the RDC R321x SoC GPIOs over southbridge
+ PCI configuration space.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
@@ -310,4 +333,14 @@ config GPIO_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_gpio.
+comment "MODULbus GPIO expanders:"
+
+config GPIO_JANZ_TTL
+ tristate "Janz VMOD-TTL Digital IO Module"
+ depends on MFD_JANZ_CMODIO
+ help
+ This enables support for the Janz VMOD-TTL Digital IO module.
+ This driver provides support for driving the pins in output
+ mode only. Input mode is not supported.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 10f3f8d958b1..51c3cdd41b5a 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
@@ -27,4 +28,6 @@ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
-obj-$(CONFIG_GPIO_SCH) += sch_gpio.o \ No newline at end of file
+obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
+obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498f2260..f73a1555e49d 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
return 0;
}
-static char *cs5535_gpio_names[] = {
+static const char * const cs5535_gpio_names[] = {
"GPIO0", "GPIO1", "GPIO2", "GPIO3",
"GPIO4", "GPIO5", "GPIO6", "GPIO7",
"GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index eb0c3fe44b29..3ca36542e338 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -399,7 +399,7 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
goto free_id;
}
- pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
+ pdesc->value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
if (!pdesc->value_sd) {
ret = -ENODEV;
goto free_id;
@@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
unsigned long flags;
struct gpio_desc *desc;
int status = -EINVAL;
- char *ioname = NULL;
+ const char *ioname = NULL;
/* can't export until sysfs is available ... */
if (!gpio_class.p) {
@@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
struct device *dev;
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%d", gpio);
+ desc, ioname ? ioname : "gpio%u", gpio);
if (!IS_ERR(dev)) {
status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
@@ -1106,7 +1106,7 @@ unlock:
fail:
/* failures here can mean systems won't boot... */
if (status)
- pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n",
+ pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
return status;
@@ -1447,6 +1447,49 @@ fail:
}
EXPORT_SYMBOL_GPL(gpio_direction_output);
+/**
+ * gpio_set_debounce - sets @debounce time for a @gpio
+ * @gpio: the gpio to set debounce time
+ * @debounce: debounce time is microseconds
+ */
+int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ unsigned long flags;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (!gpio_is_valid(gpio))
+ goto fail;
+ chip = desc->chip;
+ if (!chip || !chip->set || !chip->set_debounce)
+ goto fail;
+ gpio -= chip->base;
+ if (gpio >= chip->ngpio)
+ goto fail;
+ status = gpio_ensure_requested(desc, gpio);
+ if (status < 0)
+ goto fail;
+
+ /* now we know the gpio is valid and chip won't vanish */
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ might_sleep_if(extra_checks && chip->can_sleep);
+
+ return chip->set_debounce(chip, gpio, debounce);
+
+fail:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n",
+ __func__, gpio, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_set_debounce);
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 753219cf993a..48fc43c4bdd1 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -80,8 +80,8 @@ static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
u16 reg;
u8 bit;
- bit = gpio_num % 7;
- reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba;
+ bit = gpio_num % 8;
+ reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
return !!(inb(reg) & (1 << bit));
}
@@ -91,8 +91,8 @@ static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
u8 curr_dirs;
u8 io_reg, bit;
- bit = gpio_num % 7;
- io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO;
+ bit = gpio_num % 8;
+ io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
spin_lock(&sio_lock);
@@ -116,8 +116,8 @@ static void it8761e_gpio_set(struct gpio_chip *gc,
u8 curr_vals, bit;
u16 reg;
- bit = gpio_num % 7;
- reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba;
+ bit = gpio_num % 8;
+ reg = (gpio_num >= 8) ? gpio_ba + 1 : gpio_ba;
spin_lock(&sio_lock);
@@ -135,8 +135,8 @@ static int it8761e_gpio_direction_out(struct gpio_chip *gc,
{
u8 curr_dirs, io_reg, bit;
- bit = gpio_num % 7;
- io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO;
+ bit = gpio_num % 8;
+ io_reg = (gpio_num >= 8) ? GPIO2X_IO : GPIO1X_IO;
it8761e_gpio_set(gc, gpio_num, val);
@@ -200,7 +200,7 @@ static int __init it8761e_gpio_init(void)
return -EBUSY;
it8761e_gpio_chip.base = -1;
- it8761e_gpio_chip.ngpio = 14;
+ it8761e_gpio_chip.ngpio = 16;
err = gpiochip_add(&it8761e_gpio_chip);
if (err < 0)
@@ -217,7 +217,10 @@ gpiochip_add_err:
static void __exit it8761e_gpio_exit(void)
{
if (gpio_ba) {
- gpiochip_remove(&it8761e_gpio_chip);
+ int ret = gpiochip_remove(&it8761e_gpio_chip);
+
+ WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
+ __func__, ret);
release_region(gpio_ba, GPIO_IOSIZE);
gpio_ba = 0;
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
new file mode 100644
index 000000000000..813ac077e5d7
--- /dev/null
+++ b/drivers/gpio/janz-ttl.c
@@ -0,0 +1,258 @@
+/*
+ * Janz MODULbus VMOD-TTL GPIO Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/janz.h>
+
+#define DRV_NAME "janz-ttl"
+
+#define PORTA_DIRECTION 0x23
+#define PORTB_DIRECTION 0x2B
+#define PORTC_DIRECTION 0x06
+#define PORTA_IOCTL 0x24
+#define PORTB_IOCTL 0x2C
+#define PORTC_IOCTL 0x07
+
+#define MASTER_INT_CTL 0x00
+#define MASTER_CONF_CTL 0x01
+
+#define CONF_PAE (1 << 2)
+#define CONF_PBE (1 << 7)
+#define CONF_PCE (1 << 4)
+
+struct ttl_control_regs {
+ __be16 portc;
+ __be16 portb;
+ __be16 porta;
+ __be16 control;
+};
+
+struct ttl_module {
+ struct gpio_chip gpio;
+
+ /* base address of registers */
+ struct ttl_control_regs __iomem *regs;
+
+ u8 portc_shadow;
+ u8 portb_shadow;
+ u8 porta_shadow;
+
+ spinlock_t lock;
+};
+
+static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ u8 *shadow;
+ int ret;
+
+ if (offset < 8) {
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ ret = *shadow & (1 << offset);
+ spin_unlock(&mod->lock);
+ return ret;
+}
+
+static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+{
+ struct ttl_module *mod = dev_get_drvdata(gpio->dev);
+ void __iomem *port;
+ u8 *shadow;
+
+ if (offset < 8) {
+ port = &mod->regs->porta;
+ shadow = &mod->porta_shadow;
+ } else if (offset < 16) {
+ port = &mod->regs->portb;
+ shadow = &mod->portb_shadow;
+ offset -= 8;
+ } else {
+ port = &mod->regs->portc;
+ shadow = &mod->portc_shadow;
+ offset -= 16;
+ }
+
+ spin_lock(&mod->lock);
+ if (value)
+ *shadow |= (1 << offset);
+ else
+ *shadow &= ~(1 << offset);
+
+ iowrite16be(*shadow, port);
+ spin_unlock(&mod->lock);
+}
+
+static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
+{
+ iowrite16be(reg, &mod->regs->control);
+ iowrite16be(val, &mod->regs->control);
+}
+
+static void __devinit ttl_setup_device(struct ttl_module *mod)
+{
+ /* reset the device to a known state */
+ iowrite16be(0x0000, &mod->regs->control);
+ iowrite16be(0x0001, &mod->regs->control);
+ iowrite16be(0x0000, &mod->regs->control);
+
+ /* put all ports in open-drain mode */
+ ttl_write_reg(mod, PORTA_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTB_IOCTL, 0x00ff);
+ ttl_write_reg(mod, PORTC_IOCTL, 0x000f);
+
+ /* set all ports as outputs */
+ ttl_write_reg(mod, PORTA_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTB_DIRECTION, 0x0000);
+ ttl_write_reg(mod, PORTC_DIRECTION, 0x0000);
+
+ /* set all ports to drive zeroes */
+ iowrite16be(0x0000, &mod->regs->porta);
+ iowrite16be(0x0000, &mod->regs->portb);
+ iowrite16be(0x0000, &mod->regs->portc);
+
+ /* enable all ports */
+ ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE);
+}
+
+static int __devinit ttl_probe(struct platform_device *pdev)
+{
+ struct janz_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct ttl_module *mod;
+ struct gpio_chip *gpio;
+ struct resource *res;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data\n");
+ ret = -ENXIO;
+ goto out_return;
+ }
+
+ mod = kzalloc(sizeof(*mod), GFP_KERNEL);
+ if (!mod) {
+ dev_err(dev, "unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ platform_set_drvdata(pdev, mod);
+ spin_lock_init(&mod->lock);
+
+ /* get access to the MODULbus registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "MODULbus registers not found\n");
+ ret = -ENODEV;
+ goto out_free_mod;
+ }
+
+ mod->regs = ioremap(res->start, resource_size(res));
+ if (!mod->regs) {
+ dev_err(dev, "MODULbus registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_free_mod;
+ }
+
+ ttl_setup_device(mod);
+
+ /* Initialize the GPIO data structures */
+ gpio = &mod->gpio;
+ gpio->dev = &pdev->dev;
+ gpio->label = pdev->name;
+ gpio->get = ttl_get_value;
+ gpio->set = ttl_set_value;
+ gpio->owner = THIS_MODULE;
+
+ /* request dynamic allocation */
+ gpio->base = -1;
+ gpio->ngpio = 20;
+
+ ret = gpiochip_add(gpio);
+ if (ret) {
+ dev_err(dev, "unable to add GPIO chip\n");
+ goto out_iounmap_regs;
+ }
+
+ dev_info(&pdev->dev, "module %d: registered GPIO device\n",
+ pdata->modno);
+ return 0;
+
+out_iounmap_regs:
+ iounmap(mod->regs);
+out_free_mod:
+ kfree(mod);
+out_return:
+ return ret;
+}
+
+static int __devexit ttl_remove(struct platform_device *pdev)
+{
+ struct ttl_module *mod = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = gpiochip_remove(&mod->gpio);
+ if (ret) {
+ dev_err(dev, "unable to remove GPIO chip\n");
+ return ret;
+ }
+
+ iounmap(mod->regs);
+ kfree(mod);
+ return 0;
+}
+
+static struct platform_driver ttl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ttl_probe,
+ .remove = __devexit_p(ttl_remove),
+};
+
+static int __init ttl_init(void)
+{
+ return platform_driver_register(&ttl_driver);
+}
+
+static void __exit ttl_exit(void)
+{
+ platform_driver_unregister(&ttl_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:janz-ttl");
+
+module_init(ttl_init);
+module_exit(ttl_exit);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14127af..8383a8d7f994 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
/* Supports:
* Moorestown platform Langwell chip.
+ * Medfield platform Penwell chip.
*/
#include <linux/module.h>
@@ -31,44 +32,65 @@
#include <linux/gpio.h>
#include <linux/slab.h>
-struct lnw_gpio_register {
- u32 GPLR[2];
- u32 GPDR[2];
- u32 GPSR[2];
- u32 GPCR[2];
- u32 GRER[2];
- u32 GFER[2];
- u32 GEDR[2];
+/*
+ * Langwell chip has 64 pins and thus there are 2 32bit registers to control
+ * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
+ * registers to control them, so we only define the order here instead of a
+ * structure, to get a bit offset for a pin (use GPDR as an example):
+ *
+ * nreg = ngpio / 32;
+ * reg = offset / 32;
+ * bit = offset % 32;
+ * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
+ *
+ * so the bit of reg_addr is to control pin offset's GPDR feature
+*/
+
+enum GPIO_REG {
+ GPLR = 0, /* pin level read-only */
+ GPDR, /* pin direction */
+ GPSR, /* pin set */
+ GPCR, /* pin clear */
+ GRER, /* rising edge detect */
+ GFER, /* falling edge detect */
+ GEDR, /* edge detect result */
};
struct lnw_gpio {
struct gpio_chip chip;
- struct lnw_gpio_register *reg_base;
+ void *reg_base;
spinlock_t lock;
unsigned irq_base;
};
-static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
+ enum GPIO_REG reg_type)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+ unsigned nreg = chip->ngpio / 32;
u8 reg = offset / 32;
- void __iomem *gplr;
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
+ return ptr;
+}
+
+static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *gplr = gpio_reg(chip, offset, GPLR);
- gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
return readl(gplr) & BIT(offset % 32);
}
static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
void __iomem *gpsr, *gpcr;
if (value) {
- gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]);
+ gpsr = gpio_reg(chip, offset, GPSR);
writel(BIT(offset % 32), gpsr);
} else {
- gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]);
+ gpcr = gpio_reg(chip, offset, GPCR);
writel(BIT(offset % 32), gpcr);
}
}
@@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
u32 value;
unsigned long flags;
- void __iomem *gpdr;
- gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- u8 reg = offset / 32;
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
unsigned long flags;
- void __iomem *gpdr;
lnw_gpio_set(chip, offset, value);
- gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type)
{
struct lnw_gpio *lnw = get_irq_chip_data(irq);
u32 gpio = irq - lnw->irq_base;
- u8 reg = gpio / 32;
unsigned long flags;
u32 value;
- void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
- void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
+ void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
@@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = {
.set_type = lnw_irq_type,
};
-static struct pci_device_id lnw_gpio_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) },
+static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
- u32 reg, gpio;
+ u32 base, gpio;
void __iomem *gedr;
u32 gedr_v;
/* check GPIO controller to check which pin triggered the interrupt */
- for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) {
- gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
+ for (base = 0; base < lnw->chip.ngpio; base += 32) {
+ gedr = gpio_reg(&lnw->chip, base, GEDR);
gedr_v = readl(gedr);
if (!gedr_v)
continue;
- for (gpio = reg*32; gpio < reg*32+32; gpio++)
+ for (gpio = base; gpio < base + 32; gpio++)
if (gedr_v & BIT(gpio % 32)) {
pr_debug("pin %d triggered\n", gpio);
generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.set = lnw_gpio_set;
lnw->chip.to_irq = lnw_gpio_to_irq;
lnw->chip.base = gpio_base;
- lnw->chip.ngpio = 64;
+ lnw->chip.ngpio = id->driver_data;
lnw->chip.can_sleep = 0;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f7868243af89..9cad60f9e962 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/gpio.h>
-
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/i2c/max732x.h>
@@ -31,7 +32,8 @@
* - Open Drain I/O
*
* designated by 'O', 'I' and 'P' individually according to MAXIM's
- * datasheets.
+ * datasheets. 'I' and 'P' ports are interrupt capables, some with
+ * a dedicated interrupt mask.
*
* There are two groups of I/O ports, each group usually includes
* up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
*
* Within each group of ports, there are five known combinations of
* I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
- * the detailed organization of these ports.
+ * the detailed organization of these ports. Only Goup A is interrupt
+ * capable.
*
* GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
* and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
+#define INT_NONE 0x0 /* No interrupt capability */
+#define INT_NO_MASK 0x1 /* Has interrupts, no mask */
+#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */
+#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */
+
+#define INT_CAPS(x) (((uint64_t)(x)) << 32)
+
+enum {
+ MAX7319,
+ MAX7320,
+ MAX7321,
+ MAX7322,
+ MAX7323,
+ MAX7324,
+ MAX7325,
+ MAX7326,
+ MAX7327,
+};
+
+static uint64_t max732x_features[] = {
+ [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7320] = GROUP_B(IO_8O),
+ [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
+ [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
+ [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+ [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
+ [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
+};
+
static const struct i2c_device_id max732x_id[] = {
- { "max7319", GROUP_A(IO_8I) },
- { "max7320", GROUP_B(IO_8O) },
- { "max7321", GROUP_A(IO_8P) },
- { "max7322", GROUP_A(IO_4I4O) },
- { "max7323", GROUP_A(IO_4P4O) },
- { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) },
- { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) },
- { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) },
- { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) },
+ { "max7319", MAX7319 },
+ { "max7320", MAX7320 },
+ { "max7321", MAX7321 },
+ { "max7322", MAX7322 },
+ { "max7323", MAX7323 },
+ { "max7324", MAX7324 },
+ { "max7325", MAX7325 },
+ { "max7326", MAX7326 },
+ { "max7327", MAX7327 },
{ },
};
MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@ struct max732x_chip {
struct mutex lock;
uint8_t reg_out[2];
+
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+ struct mutex irq_lock;
+ int irq_base;
+ uint8_t irq_mask;
+ uint8_t irq_mask_cur;
+ uint8_t irq_trig_raise;
+ uint8_t irq_trig_fall;
+ uint8_t irq_features;
+#endif
};
-static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
+static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
{
struct i2c_client *client;
int ret;
@@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
return 0;
}
-static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val)
+static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
{
struct i2c_client *client;
int ret;
@@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
chip = container_of(gc, struct max732x_chip, gpio_chip);
- ret = max732x_read(chip, is_group_a(chip, off), &reg_val);
+ ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
if (ret < 0)
return 0;
@@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
reg_out = (val) ? reg_out | mask : reg_out & ~mask;
- ret = max732x_write(chip, is_group_a(chip, off), reg_out);
+ ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
if (ret < 0)
goto out;
@@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
return -EACCES;
}
+ /*
+ * Open-drain pins must be set to high impedance (which is
+ * equivalent to output-high) to be turned into an input.
+ */
+ if ((mask & chip->dir_output))
+ max732x_gpio_set_value(gc, off, 1);
+
return 0;
}
@@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc,
return 0;
}
+#ifdef CONFIG_GPIO_MAX732X_IRQ
+static int max732x_writew(struct max732x_chip *chip, uint16_t val)
+{
+ int ret;
+
+ val = cpu_to_le16(val);
+
+ ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
+ if (ret < 0) {
+ dev_err(&chip->client_group_a->dev, "failed writing\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
+{
+ int ret;
+
+ ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
+ if (ret < 0) {
+ dev_err(&chip->client_group_a->dev, "failed reading\n");
+ return ret;
+ }
+
+ *val = le16_to_cpu(*val);
+ return 0;
+}
+
+static void max732x_irq_update_mask(struct max732x_chip *chip)
+{
+ uint16_t msg;
+
+ if (chip->irq_mask == chip->irq_mask_cur)
+ return;
+
+ chip->irq_mask = chip->irq_mask_cur;
+
+ if (chip->irq_features == INT_NO_MASK)
+ return;
+
+ mutex_lock(&chip->lock);
+
+ switch (chip->irq_features) {
+ case INT_INDEP_MASK:
+ msg = (chip->irq_mask << 8) | chip->reg_out[0];
+ max732x_writew(chip, msg);
+ break;
+
+ case INT_MERGED_MASK:
+ msg = chip->irq_mask | chip->reg_out[0];
+ max732x_writeb(chip, 1, (uint8_t)msg);
+ break;
+ }
+
+ mutex_unlock(&chip->lock);
+}
+
+static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
+{
+ struct max732x_chip *chip;
+
+ chip = container_of(gc, struct max732x_chip, gpio_chip);
+ return chip->irq_base + off;
+}
+
+static void max732x_irq_mask(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+}
+
+static void max732x_irq_unmask(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+}
+
+static void max732x_irq_bus_lock(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ mutex_lock(&chip->irq_lock);
+ chip->irq_mask_cur = chip->irq_mask;
+}
+
+static void max732x_irq_bus_sync_unlock(unsigned int irq)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+
+ max732x_irq_update_mask(chip);
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct max732x_chip *chip = get_irq_chip_data(irq);
+ uint16_t off = irq - chip->irq_base;
+ uint16_t mask = 1 << off;
+
+ if (!(mask & chip->dir_input)) {
+ dev_dbg(&chip->client->dev, "%s port %d is output only\n",
+ chip->client->name, off);
+ return -EACCES;
+ }
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
+ irq, type);
+ return -EINVAL;
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ chip->irq_trig_fall |= mask;
+ else
+ chip->irq_trig_fall &= ~mask;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ chip->irq_trig_raise |= mask;
+ else
+ chip->irq_trig_raise &= ~mask;
+
+ return max732x_gpio_direction_input(&chip->gpio_chip, off);
+}
+
+static struct irq_chip max732x_irq_chip = {
+ .name = "max732x",
+ .mask = max732x_irq_mask,
+ .unmask = max732x_irq_unmask,
+ .bus_lock = max732x_irq_bus_lock,
+ .bus_sync_unlock = max732x_irq_bus_sync_unlock,
+ .set_type = max732x_irq_set_type,
+};
+
+static uint8_t max732x_irq_pending(struct max732x_chip *chip)
+{
+ uint8_t cur_stat;
+ uint8_t old_stat;
+ uint8_t trigger;
+ uint8_t pending;
+ uint16_t status;
+ int ret;
+
+ ret = max732x_readw(chip, &status);
+ if (ret)
+ return 0;
+
+ trigger = status >> 8;
+ trigger &= chip->irq_mask;
+
+ if (!trigger)
+ return 0;
+
+ cur_stat = status & 0xFF;
+ cur_stat &= chip->irq_mask;
+
+ old_stat = cur_stat ^ trigger;
+
+ pending = (old_stat & chip->irq_trig_fall) |
+ (cur_stat & chip->irq_trig_raise);
+ pending &= trigger;
+
+ return pending;
+}
+
+static irqreturn_t max732x_irq_handler(int irq, void *devid)
+{
+ struct max732x_chip *chip = devid;
+ uint8_t pending;
+ uint8_t level;
+
+ pending = max732x_irq_pending(chip);
+
+ if (!pending)
+ return IRQ_HANDLED;
+
+ do {
+ level = __ffs(pending);
+ handle_nested_irq(level + chip->irq_base);
+
+ pending &= ~(1 << level);
+ } while (pending);
+
+ return IRQ_HANDLED;
+}
+
+static int max732x_irq_setup(struct max732x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct max732x_platform_data *pdata = client->dev.platform_data;
+ int has_irq = max732x_features[id->driver_data] >> 32;
+ int ret;
+
+ if (pdata->irq_base && has_irq != INT_NONE) {
+ int lvl;
+
+ chip->irq_base = pdata->irq_base;
+ chip->irq_features = has_irq;
+ mutex_init(&chip->irq_lock);
+
+ for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
+ int irq = lvl + chip->irq_base;
+
+ if (!(chip->dir_input & (1 << lvl)))
+ continue;
+
+ set_irq_chip_data(irq, chip);
+ set_irq_chip_and_handler(irq, &max732x_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ max732x_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), chip);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ client->irq);
+ goto out_failed;
+ }
+
+ chip->gpio_chip.to_irq = max732x_gpio_to_irq;
+ }
+
+ return 0;
+
+out_failed:
+ chip->irq_base = 0;
+ return ret;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+ if (chip->irq_base)
+ free_irq(chip->client->irq, chip);
+}
+#else /* CONFIG_GPIO_MAX732X_IRQ */
+static int max732x_irq_setup(struct max732x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct max732x_platform_data *pdata = client->dev.platform_data;
+ int has_irq = max732x_features[id->driver_data] >> 32;
+
+ if (pdata->irq_base && has_irq != INT_NONE)
+ dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+ return 0;
+}
+
+static void max732x_irq_teardown(struct max732x_chip *chip)
+{
+}
+#endif
+
static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
const struct i2c_device_id *id,
unsigned gpio_start)
{
struct gpio_chip *gc = &chip->gpio_chip;
- uint32_t id_data = id->driver_data;
+ uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
int i, port = 0;
for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client,
switch (client->addr & 0x70) {
case 0x60:
chip->client_group_a = client;
- if (nr_port > 7) {
+ if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_b);
chip->client_group_b = chip->client_dummy = c;
}
break;
case 0x50:
chip->client_group_b = client;
- if (nr_port > 7) {
+ if (nr_port > 8) {
c = i2c_new_dummy(client->adapter, addr_a);
chip->client_group_a = chip->client_dummy = c;
}
@@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client,
mutex_init(&chip->lock);
- max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]);
- if (nr_port > 7)
- max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+ max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+ if (nr_port > 8)
+ max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+
+ ret = max732x_irq_setup(chip, id);
+ if (ret)
+ goto out_failed;
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
@@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client,
return 0;
out_failed:
+ max732x_irq_teardown(chip);
kfree(chip);
return ret;
}
@@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client)
return ret;
}
+ max732x_irq_teardown(chip);
+
/* unregister any dummy i2c_client */
if (chip->client_dummy)
i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index b827c976dc62..a2b12aa1f2b9 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@ struct pca953x_chip {
struct i2c_client *client;
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
- char **names;
+ const char *const *names;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
@@ -449,7 +449,7 @@ pca953x_get_alt_pdata(struct i2c_client *client)
struct device_node *node;
const uint16_t *val;
- node = dev_archdata_get_node(&client->dev.archdata);
+ node = client->dev.of_node;
if (node == NULL)
return NULL;
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a1f05b..ee568c8fcbd0 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
- if (offset < 0 || offset > PL061_GPIO_NR)
+ if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
new file mode 100644
index 000000000000..2762698e0204
--- /dev/null
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -0,0 +1,246 @@
+/*
+ * RDC321x GPIO driver
+ *
+ * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/mfd/rdc321x.h>
+#include <linux/slab.h>
+
+struct rdc321x_gpio {
+ spinlock_t lock;
+ struct pci_dev *sb_pdev;
+ u32 data_reg[2];
+ int reg1_ctrl_base;
+ int reg1_data_base;
+ int reg2_ctrl_base;
+ int reg2_data_base;
+ struct gpio_chip chip;
+};
+
+/* read GPIO pin */
+static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct rdc321x_gpio *gpch;
+ u32 value = 0;
+ int reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base;
+
+ spin_lock(&gpch->lock);
+ pci_write_config_dword(gpch->sb_pdev, reg,
+ gpch->data_reg[gpio < 32 ? 0 : 1]);
+ pci_read_config_dword(gpch->sb_pdev, reg, &value);
+ spin_unlock(&gpch->lock);
+
+ return (1 << (gpio & 0x1f)) & value ? 1 : 0;
+}
+
+static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int reg = (gpio < 32) ? 0 : 1;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ if (value)
+ gpch->data_reg[reg] |= 1 << (gpio & 0x1f);
+ else
+ gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f));
+
+ pci_write_config_dword(gpch->sb_pdev,
+ reg ? gpch->reg2_data_base : gpch->reg1_data_base,
+ gpch->data_reg[reg]);
+}
+
+/* set GPIO pin to value */
+static void rdc_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+ spin_lock(&gpch->lock);
+ rdc_gpio_set_value_impl(chip, gpio, value);
+ spin_unlock(&gpch->lock);
+}
+
+static int rdc_gpio_config(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct rdc321x_gpio *gpch;
+ int err;
+ u32 reg;
+
+ gpch = container_of(chip, struct rdc321x_gpio, chip);
+
+ spin_lock(&gpch->lock);
+ err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, &reg);
+ if (err)
+ goto unlock;
+
+ reg |= 1 << (gpio & 0x1f);
+
+ err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ?
+ gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg);
+ if (err)
+ goto unlock;
+
+ rdc_gpio_set_value_impl(chip, gpio, value);
+
+unlock:
+ spin_unlock(&gpch->lock);
+
+ return err;
+}
+
+/* configure GPIO pin as input */
+static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ return rdc_gpio_config(chip, gpio, 1);
+}
+
+/*
+ * Cache the initial value of both GPIO data registers
+ */
+static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
+{
+ int err;
+ struct resource *r;
+ struct rdc321x_gpio *rdc321x_gpio_dev;
+ struct rdc321x_gpio_pdata *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -ENODEV;
+ }
+
+ rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL);
+ if (!rdc321x_gpio_dev) {
+ dev_err(&pdev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ spin_lock_init(&rdc321x_gpio_dev->lock);
+ rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev;
+ rdc321x_gpio_dev->reg1_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg1_data_base = r->start + 0x4;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2");
+ if (!r) {
+ dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ rdc321x_gpio_dev->reg2_ctrl_base = r->start;
+ rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
+
+ rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
+ rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
+ rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
+ rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
+ rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.base = 0;
+ rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
+
+ platform_set_drvdata(pdev, rdc321x_gpio_dev);
+
+ /* This might not be, what others (BIOS, bootloader, etc.)
+ wrote to these registers before, but it's a good guess. Still
+ better than just using 0xffffffff. */
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg1_data_base,
+ &rdc321x_gpio_dev->data_reg[0]);
+ if (err)
+ goto out_drvdata;
+
+ err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
+ rdc321x_gpio_dev->reg2_data_base,
+ &rdc321x_gpio_dev->data_reg[1]);
+ if (err)
+ goto out_drvdata;
+
+ dev_info(&pdev->dev, "registering %d GPIOs\n",
+ rdc321x_gpio_dev->chip.ngpio);
+ return gpiochip_add(&rdc321x_gpio_dev->chip);
+
+out_drvdata:
+ platform_set_drvdata(pdev, NULL);
+out_free:
+ kfree(rdc321x_gpio_dev);
+ return err;
+}
+
+static int __devexit rdc321x_gpio_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
+
+ ret = gpiochip_remove(&rdc321x_gpio_dev->chip);
+ if (ret)
+ dev_err(&pdev->dev, "failed to unregister chip\n");
+
+ kfree(rdc321x_gpio_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static struct platform_driver rdc321x_gpio_driver = {
+ .driver.name = "rdc321x-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = rdc321x_gpio_probe,
+ .remove = __devexit_p(rdc321x_gpio_remove),
+};
+
+static int __init rdc321x_gpio_init(void)
+{
+ return platform_driver_register(&rdc321x_gpio_driver);
+}
+
+static void __exit rdc321x_gpio_exit(void)
+{
+ platform_driver_unregister(&rdc321x_gpio_driver);
+}
+
+module_init(rdc321x_gpio_init);
+module_exit(rdc321x_gpio_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("RDC321x GPIO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rdc321x-gpio");
diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c
new file mode 100644
index 000000000000..1be6288780de
--- /dev/null
+++ b/drivers/gpio/tc35892-gpio.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/tc35892.h>
+
+/*
+ * These registers are modified under the irq bus lock and cached to avoid
+ * unnecessary writes in bus_sync_unlock.
+ */
+enum { REG_IBE, REG_IEV, REG_IS, REG_IE };
+
+#define CACHE_NR_REGS 4
+#define CACHE_NR_BANKS 3
+
+struct tc35892_gpio {
+ struct gpio_chip chip;
+ struct tc35892 *tc35892;
+ struct device *dev;
+ struct mutex irq_lock;
+
+ int irq_base;
+
+ /* Caches of interrupt control registers for bus_lock */
+ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
+ u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
+};
+
+static inline struct tc35892_gpio *to_tc35892_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tc35892_gpio, chip);
+}
+
+static int tc35892_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ u8 mask = 1 << (offset % 8);
+ int ret;
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ return ret;
+
+ return ret & mask;
+}
+
+static void tc35892_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
+ unsigned pos = offset % 8;
+ u8 data[] = {!!val << pos, 1 << pos};
+
+ tc35892_block_write(tc35892, reg, ARRAY_SIZE(data), data);
+}
+
+static int tc35892_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int val)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ tc35892_gpio_set(chip, offset, val);
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 1 << pos);
+}
+
+static int tc35892_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 reg = TC35892_GPIODIR0 + offset / 8;
+ unsigned pos = offset % 8;
+
+ return tc35892_set_bits(tc35892, reg, 1 << pos, 0);
+}
+
+static int tc35892_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
+
+ return tc35892_gpio->irq_base + offset;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tc35892",
+ .owner = THIS_MODULE,
+ .direction_input = tc35892_gpio_direction_input,
+ .get = tc35892_gpio_get,
+ .direction_output = tc35892_gpio_direction_output,
+ .set = tc35892_gpio_set,
+ .to_irq = tc35892_gpio_to_irq,
+ .can_sleep = 1,
+};
+
+static int tc35892_gpio_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ tc35892_gpio->regs[REG_IBE][regoffset] |= mask;
+ return 0;
+ }
+
+ tc35892_gpio->regs[REG_IBE][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IS][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IS][regoffset] &= ~mask;
+
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ tc35892_gpio->regs[REG_IEV][regoffset] |= mask;
+ else
+ tc35892_gpio->regs[REG_IEV][regoffset] &= ~mask;
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_lock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+
+ mutex_lock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_sync_unlock(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ static const u8 regmap[] = {
+ [REG_IBE] = TC35892_GPIOIBE0,
+ [REG_IEV] = TC35892_GPIOIEV0,
+ [REG_IS] = TC35892_GPIOIS0,
+ [REG_IE] = TC35892_GPIOIE0,
+ };
+ int i, j;
+
+ for (i = 0; i < CACHE_NR_REGS; i++) {
+ for (j = 0; j < CACHE_NR_BANKS; j++) {
+ u8 old = tc35892_gpio->oldregs[i][j];
+ u8 new = tc35892_gpio->regs[i][j];
+
+ if (new == old)
+ continue;
+
+ tc35892_gpio->oldregs[i][j] = new;
+ tc35892_reg_write(tc35892, regmap[i] + j * 8, new);
+ }
+ }
+
+ mutex_unlock(&tc35892_gpio->irq_lock);
+}
+
+static void tc35892_gpio_irq_mask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] &= ~mask;
+}
+
+static void tc35892_gpio_irq_unmask(unsigned int irq)
+{
+ struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
+ int offset = irq - tc35892_gpio->irq_base;
+ int regoffset = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ tc35892_gpio->regs[REG_IE][regoffset] |= mask;
+}
+
+static struct irq_chip tc35892_gpio_irq_chip = {
+ .name = "tc35892-gpio",
+ .bus_lock = tc35892_gpio_irq_lock,
+ .bus_sync_unlock = tc35892_gpio_irq_sync_unlock,
+ .mask = tc35892_gpio_irq_mask,
+ .unmask = tc35892_gpio_irq_unmask,
+ .set_type = tc35892_gpio_irq_set_type,
+};
+
+static irqreturn_t tc35892_gpio_irq(int irq, void *dev)
+{
+ struct tc35892_gpio *tc35892_gpio = dev;
+ struct tc35892 *tc35892 = tc35892_gpio->tc35892;
+ u8 status[CACHE_NR_BANKS];
+ int ret;
+ int i;
+
+ ret = tc35892_block_read(tc35892, TC35892_GPIOMIS0,
+ ARRAY_SIZE(status), status);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ unsigned int stat = status[i];
+ if (!stat)
+ continue;
+
+ while (stat) {
+ int bit = __ffs(stat);
+ int line = i * 8 + bit;
+
+ handle_nested_irq(tc35892_gpio->irq_base + line);
+ stat &= ~(1 << bit);
+ }
+
+ tc35892_reg_write(tc35892, TC35892_GPIOIC0 + i, status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tc35892_gpio_irq_init(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+ set_irq_chip_data(irq, tc35892_gpio);
+ set_irq_chip_and_handler(irq, &tc35892_gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_gpio_irq_remove(struct tc35892_gpio *tc35892_gpio)
+{
+ int base = tc35892_gpio->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int __devinit tc35892_gpio_probe(struct platform_device *pdev)
+{
+ struct tc35892 *tc35892 = dev_get_drvdata(pdev->dev.parent);
+ struct tc35892_gpio_platform_data *pdata;
+ struct tc35892_gpio *tc35892_gpio;
+ int ret;
+ int irq;
+
+ pdata = tc35892->pdata->gpio;
+ if (!pdata)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ tc35892_gpio = kzalloc(sizeof(struct tc35892_gpio), GFP_KERNEL);
+ if (!tc35892_gpio)
+ return -ENOMEM;
+
+ mutex_init(&tc35892_gpio->irq_lock);
+
+ tc35892_gpio->dev = &pdev->dev;
+ tc35892_gpio->tc35892 = tc35892;
+
+ tc35892_gpio->chip = template_chip;
+ tc35892_gpio->chip.ngpio = tc35892->num_gpio;
+ tc35892_gpio->chip.dev = &pdev->dev;
+ tc35892_gpio->chip.base = pdata->gpio_base;
+
+ tc35892_gpio->irq_base = tc35892->irq_base + TC35892_INT_GPIO(0);
+
+ /* Bring the GPIO module out of reset */
+ ret = tc35892_set_bits(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_GPIRST, 0);
+ if (ret < 0)
+ goto out_free;
+
+ ret = tc35892_gpio_irq_init(tc35892_gpio);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(irq, NULL, tc35892_gpio_irq, IRQF_ONESHOT,
+ "tc35892-gpio", tc35892_gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = gpiochip_add(&tc35892_gpio->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
+ goto out_freeirq;
+ }
+
+ platform_set_drvdata(pdev, tc35892_gpio);
+
+ return 0;
+
+out_freeirq:
+ free_irq(irq, tc35892_gpio);
+out_removeirq:
+ tc35892_gpio_irq_remove(tc35892_gpio);
+out_free:
+ kfree(tc35892_gpio);
+ return ret;
+}
+
+static int __devexit tc35892_gpio_remove(struct platform_device *pdev)
+{
+ struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ int ret;
+
+ ret = gpiochip_remove(&tc35892_gpio->chip);
+ if (ret < 0) {
+ dev_err(tc35892_gpio->dev,
+ "unable to remove gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ free_irq(irq, tc35892_gpio);
+ tc35892_gpio_irq_remove(tc35892_gpio);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(tc35892_gpio);
+
+ return 0;
+}
+
+static struct platform_driver tc35892_gpio_driver = {
+ .driver.name = "tc35892-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tc35892_gpio_probe,
+ .remove = __devexit_p(tc35892_gpio_remove),
+};
+
+static int __init tc35892_gpio_init(void)
+{
+ return platform_driver_register(&tc35892_gpio_driver);
+}
+subsys_initcall(tc35892_gpio_init);
+
+static void __exit tc35892_gpio_exit(void)
+{
+ platform_driver_unregister(&tc35892_gpio_driver);
+}
+module_exit(tc35892_gpio_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 GPIO driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 7607cc61e1dd..2ac9a16d3daa 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -81,6 +81,18 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
+static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+ if (!wm8994->irq_base)
+ return -EINVAL;
+
+ return wm8994->irq_base + offset;
+}
+
+
#ifdef CONFIG_DEBUG_FS
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c59003963..88910e5a2c77 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
+ select SLOW_WORK
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -59,6 +60,7 @@ config DRM_RADEON
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
+ select POWER_SUPPLY
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 932b5aa96a67..3f46772f0cb2 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index f7ba82ebf65a..2092e7bb788f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -961,7 +961,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
- /* No allocations failed, so now we can replace the orginal pagelist
+ /* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 61b9bcfdf040..994d23beeb1d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,6 +34,7 @@
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
struct drm_prop_enum_list {
int type;
@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0;
+ int ret = 0, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return ret;
}
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 51103aa469f8..764401951041 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
}
/**
- * drm_helper_probe_connector_modes - get complete set of display modes
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
@@ -154,21 +154,6 @@ prune:
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int count = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- count += drm_helper_probe_single_connector_modes(connector,
- maxX, maxY);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
- if (!fb_help_conn)
- return false;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
-
- if (!fb_help_conn)
- return mode;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- if (cmdline_mode->specified == false)
- return mode;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- }
- return mode;
- }
-
-create_mode:
- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
- cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->rb, cmdline_mode->interlace,
- cmdline_mode->margins);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- list_add(&mode->head, &connector->modes);
- return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (strict) {
- enable = connector->status == connector_status_connected;
- } else {
- enable = connector->status != connector_status_disconnected;
- }
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- i++;
- }
-
- if (any_enabled)
- return;
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, false);
- i++;
- }
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- if (enabled[i] == false) {
- i++;
- continue;
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(connector, width, height);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- connector->base.id);
- modes[i] = drm_has_preferred_mode(connector, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&connector->modes)) {
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- i++;
- }
- return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
- struct drm_crtc *best_crtc;
- int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
-
- if (n == dev->mode_config.num_connector)
- return 0;
- c = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (c == n)
- break;
- c++;
- }
-
- best_crtcs[n] = NULL;
- best_crtc = NULL;
- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kmalloc(dev->mode_config.num_connector *
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(connector))
- my_score++;
- if (drm_has_preferred_mode(connector, width, height))
- my_score++;
-
- connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
- if (!encoder)
- goto out;
-
- connector->encoder = encoder;
-
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
- c = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
- c++;
- continue;
- }
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning for now */
- c++;
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_crtc = crtc;
- best_score = score;
- memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
- sizeof(struct drm_crtc *));
- }
- c++;
- }
-out:
- kfree(crtcs);
- return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
- struct drm_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- bool *enabled;
- int width, height;
- int i, ret;
-
- DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
- crtcs = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
- sizeof(bool), GFP_KERNEL);
-
- drm_enable_connectors(dev, enabled);
-
- ret = drm_target_preferred(dev, modes, enabled, width, height);
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_display_mode *mode = modes[i];
- struct drm_crtc *crtc = crtcs[i];
-
- if (connector->encoder == NULL) {
- i++;
- continue;
- }
-
- if (mode && crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, crtc->base.id);
- crtc->desired_mode = mode;
- connector->encoder->crtc = crtc;
- } else {
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
- }
- i++;
- }
-
- kfree(crtcs);
- kfree(modes);
- kfree(enabled);
-}
-
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
- /* TODO are these needed? */
- set->crtc->desired_x = set->x;
- set->crtc->desired_y = set->y;
- set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -984,63 +669,6 @@ fail:
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
- DRM_DEBUG_KMS("\n");
-
- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- /* FIXME: send hotplug event */
- return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
- int count = 0;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- drm_fb_helper_parse_command_line(dev);
-
- count = drm_helper_probe_connector_modes(dev,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0)
- printk(KERN_INFO "No connectors reported connected with modes\n");
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
-/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
-{
- drm_helper_plugged_event(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
-
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
@@ -1200,3 +807,98 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+static struct slow_work_ops output_poll_ops;
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct slow_work *work)
+{
+ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status, status;
+ bool repoll = false, changed = false;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* if this is HPD or polled don't check it -
+ TV out for instance */
+ if (!connector->polled)
+ continue;
+
+ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+ !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ status = connector->funcs->detect(connector);
+ if (old_status != status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed) {
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+ }
+
+ if (repoll) {
+ ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ bool poll = false;
+ int ret;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled)
+ poll = true;
+ }
+ slow_work_register_user(THIS_MODULE);
+ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+ &output_poll_ops);
+
+ if (poll) {
+ ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ slow_work_unregister_user(THIS_MODULE);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ /* schedule a slow work asap */
+ delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+static struct slow_work_ops output_poll_ops = {
+ .execute = output_poll_execute,
+};
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 13f1537413fb..252cbd74df0e 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev)
{
int i;
- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+ dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
- memset(dev->dma, 0, sizeof(*dev->dma));
-
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7061f0..c1981861bbbd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
@@ -33,10 +34,9 @@
#include "drmP.h"
#include "drm_edid.h"
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
#define LEVEL_DMT 0
#define LEVEL_GTF 1
-#define LEVEL_CVT 2
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
static struct edid_quirk {
char *vendor;
@@ -109,51 +110,64 @@ static struct edid_quirk {
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
+/*** DDC fetch and block validation ***/
-/* Valid EDID header has these bytes */
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
-/**
- * drm_edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_is_valid(struct edid *edid)
+static bool
+drm_edid_block_valid(u8 *raw_edid)
{
- int i, score = 0;
+ int i;
u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (raw_edid[0] == 0x00) {
+ int score = 0;
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
- goto bad;
- }
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
+ /* allow CEA to slide through, switches mangle this */
+ if (raw_edid[0] != 0x02)
+ goto bad;
}
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
return 1;
@@ -165,8 +179,158 @@ bad:
}
return 0;
}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ return false;
+
+ return true;
+}
EXPORT_SYMBOL(drm_edid_is_valid);
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf + start,
+ }
+ };
+
+ if (i2c_transfer(adapter, msgs, 2) == 2)
+ return 0;
+
+ return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0;
+ u8 *block, *new;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, j,
+ EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + j * EDID_LENGTH))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+ }
+
+ return block;
+
+carp:
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+
+out:
+ kfree(block);
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
@@ -335,7 +499,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -426,7 +590,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
@@ -497,8 +661,8 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh)
{
int i;
struct drm_display_mode *ptr, *mode;
@@ -516,6 +680,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
}
return mode;
}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ /* XXX extension block walk */
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
@@ -536,22 +805,20 @@ bad_std_timing(u8 a, u8 b)
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
*/
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
{
- struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
@@ -572,18 +839,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
mode->vsync_start = mode->vsync_start - 1;
mode->vsync_end = mode->vsync_end - 1;
return mode;
}
- mode = NULL;
+
/* check whether it can be found in default mode table */
- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (mode)
return mode;
@@ -593,6 +880,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ kfree(mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
@@ -716,10 +1020,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
- drm_mode_set_name(mode);
-
drm_mode_do_interlace_quirk(mode, pt);
+ drm_mode_set_name(mode);
+
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -802,10 +1106,6 @@ static struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
@@ -833,19 +1133,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
return modes;
}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
/**
* add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1143,14 @@ static int standard_timing_level(struct edid *edid)
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
- struct drm_device *dev = connector->dev;
int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -881,36 +1160,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+mode_is_rb(struct drm_display_mode *mode)
{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
-
- range = &timing->data.other_data.data.range;
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
return false;
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ if (!mode_in_vsync_range(mode, edid, t))
return false;
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
+ if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
- }
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
return true;
}
@@ -919,15 +1248,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -988,13 +1318,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
return modes;
}
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= num_est3_modes)
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r
+ /*, est3_modes[m].rb */);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
static int add_detailed_modes(struct drm_connector *connector,
struct detailed_timing *timing,
struct edid *edid, u32 quirks, int preferred)
{
int i, modes = 0;
struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
@@ -1015,7 +1432,8 @@ static int add_detailed_modes(struct drm_connector *connector,
switch (data->type) {
case EDID_DETAIL_MONITOR_RANGE:
if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
+ modes += drm_gtf_modes_for_range(connector, edid,
+ timing);
break;
case EDID_DETAIL_STD_MODES:
/* Six modes per detailed section */
@@ -1024,8 +1442,8 @@ static int add_detailed_modes(struct drm_connector *connector,
struct drm_display_mode *newmode;
std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -1035,6 +1453,9 @@ static int add_detailed_modes(struct drm_connector *connector,
case EDID_DETAIL_CVT_3BYTE:
modes += drm_cvt_modes(connector, timing);
break;
+ case EDID_DETAIL_EST_TIMINGS:
+ modes += drm_est3_modes(connector, timing);
+ break;
default:
break;
}
@@ -1058,7 +1479,10 @@ static int add_detailed_info(struct drm_connector *connector,
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ int preferred = (i == 0);
+
+ if (preferred && edid->version == 1 && edid->revision < 4)
+ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
/* In 1.0, only timings are allowed */
if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1512,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- int edid_ext_num;
int start_offset, end_offset;
- int timing_level;
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
+ if (edid->version == 1 && edid->revision < 3)
return 0;
- }
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
+ if (!edid->extensions)
return 0;
- }
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
+ if (i == edid->extensions)
return 0;
- }
/* Get the start offset of detailed timing block */
start_offset = edid_ext[2];
@@ -1132,7 +1539,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return 0;
}
- timing_level = standard_timing_level(edid);
end_offset = EDID_LENGTH;
end_offset -= sizeof(struct detailed_timing);
for (i = start_offset; i < end_offset;
@@ -1144,123 +1550,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return modes;
}
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (drm_edid_is_valid((struct edid *)buf))
- return 0;
- }
-
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- int ret;
- struct edid *edid;
-
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
- }
-
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, DRM_MAX_EDID_EXT_NUM,
- DRM_MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = DRM_MAX_EDID_EXT_NUM;
- }
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
-
- }
-
- connector->display_info.raw_edid = (char *)edid;
- goto end;
-
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
#define HDMI_IDENTIFIER 0x000C03
#define VENDOR_BLOCK 0x03
/**
@@ -1273,7 +1562,7 @@ EXPORT_SYMBOL(drm_get_edid);
bool drm_detect_hdmi_monitor(struct edid *edid)
{
char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
+ int i, hdmi_id;
int start_offset, end_offset;
bool is_hdmi = false;
@@ -1281,19 +1570,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
if (edid == NULL || edid->extensions == 0)
goto end;
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
-
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num)
+ if (i == edid->extensions)
goto end;
/* Data block offset in CEA extension block */
@@ -1348,10 +1633,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
quirks = edid_get_quirks(edid);
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We don't quite implement this yet, but we're close.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
num_modes += add_detailed_info(connector, edid, quirks);
num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 288ea2f32772..b3779d243aef 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -42,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!connector->fb_helper_private)
- return -ENOMEM;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i]);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector);
*
* enable/enable Digital/disable bit at the end
*/
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
const char *mode_option)
{
const char *name;
@@ -75,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_connector *connector = fb_helper_conn->connector;
- if (!fb_help_conn)
+ if (!fb_helper_conn)
return false;
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
mode_option = fb_mode_option;
@@ -204,18 +222,21 @@ done:
return true;
}
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
- struct drm_connector *connector;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ for (i = 0; i < fb_helper->connector_count; i++) {
char *option = NULL;
+ fb_helper_conn = fb_helper->connector_info[i];
+
/* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
continue;
- drm_fb_helper_connector_parse_command_line(connector, option);
+ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
}
return 0;
}
@@ -293,6 +314,7 @@ static void drm_fb_helper_on(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -300,33 +322,28 @@ static void drm_fb_helper_on(struct fb_info *info)
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
}
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +351,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -341,32 +359,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, dpms_mode);
}
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
}
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
+ mutex_unlock(&dev->mode_config.mutex);
}
int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +413,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
+ for (i = 0; i < helper->connector_count; i++)
+ kfree(helper->connector_info[i]);
+ kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
kfree(helper->crtc_info[i].mode_set.connectors);
kfree(helper->crtc_info);
}
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
{
- struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
int ret = 0;
int i;
- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!helper->crtc_info)
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
return -ENOMEM;
- helper->crtc_count = crtc_count;
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info);
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
- helper->crtc_info[i].mode_set.connectors =
+ fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!helper->crtc_info[i].mode_set.connectors) {
+ if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
goto out_free;
}
- helper->crtc_info[i].mode_set.num_connectors = 0;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- helper->crtc_info[i].crtc_id = crtc->base.id;
- helper->crtc_info[i].mode_set.crtc = crtc;
+ fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
- helper->conn_limit = max_conn_count;
+ fb_helper->conn_limit = max_conn_count;
return 0;
out_free:
- drm_fb_helper_crtc_free(helper);
+ drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ printk(KERN_INFO "drm: unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +551,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, rc = 0;
int start;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
red = cmap->red;
green = cmap->green;
@@ -549,41 +587,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
-int drm_fb_helper_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int i;
- int ret;
-
- if (regno > 255)
- return 1;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- ret = setcolreg(crtc, red, green, blue, regno, info);
- if (ret)
- return ret;
-
- crtc_funcs->load_lut(crtc);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -687,23 +690,21 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ if (ret) {
mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
+ return ret;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ drm_fb_helper_hotplug_event(fb_helper);
+ }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +719,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
-
- if (i == fb_helper->crtc_count)
- continue;
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
modeset = &fb_helper->crtc_info[i].mode_set;
@@ -733,209 +729,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
- int preferred_bpp,
- int (*fb_create)(struct drm_device *dev,
- uint32_t fb_width,
- uint32_t fb_height,
- uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth,
- uint32_t surface_bpp,
- struct drm_framebuffer **fb_ptr))
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
- unsigned int surface_width = 0, surface_height = 0;
int new_fb = 0;
int crtc_count = 0;
- int ret, i, conn_count = 0;
+ int i;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_set *modeset = NULL;
- struct drm_fb_helper *fb_helper;
- uint32_t surface_depth = 24, surface_bpp = 32;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != surface_bpp) {
- surface_depth = surface_bpp = preferred_bpp;
+ if (preferred_bpp != sizes.surface_bpp) {
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- if (!fb_help_conn)
- continue;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
- surface_depth = surface_bpp = 8;
+ sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
- surface_depth = 15;
- surface_bpp = 16;
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
break;
case 16:
- surface_depth = surface_bpp = 16;
+ sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
- surface_depth = surface_bpp = 24;
+ sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
- surface_depth = 24;
- surface_bpp = 32;
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
break;
}
break;
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (drm_helper_crtc_in_use(crtc)) {
- if (crtc->desired_mode) {
- if (crtc->desired_mode->hdisplay < fb_width)
- fb_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay < fb_height)
- fb_height = crtc->desired_mode->vdisplay;
-
- if (crtc->desired_mode->hdisplay > surface_width)
- surface_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay > surface_height)
- surface_height = crtc->desired_mode->vdisplay;
- }
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
- return 0;
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
}
- /* do we have an fb already? */
- if (list_empty(&dev->mode_config.fb_kernel_list)) {
- ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
- surface_height, surface_depth, surface_bpp,
- &fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
- struct drm_framebuffer, filp_head);
-
- /* if someone hotplugs something bigger than we have already allocated, we are pwned.
- As really we can't resize an fbdev that is in the wild currently due to fbdev
- not really being designed for the lower layers moving stuff around under it.
- - so in the grand style of things - punt. */
- if ((fb->width < surface_width) ||
- (fb->height < surface_height)) {
- DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
- return -EINVAL;
- }
- }
-
- info = fb->fbdev;
- fb_helper = info->par;
-
- crtc_count = 0;
- /* okay we need to setup new connector sets in the crtcs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- modeset = &fb_helper->crtc_info[crtc_count].mode_set;
- modeset->fb = fb;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count] = connector;
- conn_count++;
- if (conn_count > fb_helper->conn_limit)
- BUG();
- }
- }
-
- for (i = conn_count; i < fb_helper->conn_limit; i++)
- modeset->connectors[i] = NULL;
+ /* push down into drivers */
+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (new_fb < 0)
+ return new_fb;
- modeset->crtc = crtc;
- crtc_count++;
+ info = fb_helper->fbdev;
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
+ /* set the fb pointer */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
- fb_helper->crtc_count = crtc_count;
- fb_helper->fb = fb;
if (new_fb) {
info->var.pixclock = 0;
- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
- if (ret)
- return ret;
if (register_framebuffer(info) < 0) {
- fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
} else {
drm_fb_helper_set_par(info);
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "registered panic notifier\n");
+ printk(KERN_INFO "drm: registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ if (new_fb)
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
- list_del(&helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- drm_fb_helper_crtc_free(helper);
- fb_dealloc_cmap(&helper->fb->fbdev->cmap);
-}
-EXPORT_SYMBOL(drm_fb_helper_free);
-
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -954,10 +879,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
- info->pseudo_palette = fb->pseudo_palette;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +951,457 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
+ cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->rb, cmdline_mode->interlace,
+ cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict) {
+ enable = connector->status == connector_status_connected;
+ } else {
+ enable = connector->status != connector_status_disconnected;
+ }
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ continue;
+ }
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_encoder *encoder;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ /* clean out all the encoder/crtc combos */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->crtc = NULL;
+ }
+
+ crtcs = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+ if (!ret) {
+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+ }
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ }
+ }
+
+ kfree(crtcs);
+ kfree(modes);
+ kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(fb_helper->dev);
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ printk(KERN_INFO "No connectors reported connected with modes\n");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ int count = 0;
+ u32 max_width, max_height, bpp_sel;
+ bool bound = false, crtcs_bound = false;
+ struct drm_crtc *crtc;
+
+ if (!fb_helper->fb)
+ return false;
+
+ list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound = true;
+ if (crtc->fb == fb_helper->fb)
+ bound = true;
+ }
+
+ if (!bound && crtcs_bound) {
+ fb_helper->delayed_hotplug = true;
+ return false;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 9d532d7fdf59..e7aace20981f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current_euid();
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index aa89d4b0b4c4..33dad3fa6043 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(obj->filp))
+ return -ENOMEM;
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
+ if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
- kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
- obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
return obj;
fput:
+ /* Object_init mangles the global counters - readjust them. */
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_destroy(&file_private->object_idr);
}
-static void
-drm_gem_object_free_common(struct drm_gem_object *obj)
+void
+drm_gem_object_release(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
}
+EXPORT_SYMBOL(drm_gem_object_release);
/**
* Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
dev->driver->gem_free_object(obj);
mutex_unlock(&dev->struct_mutex);
}
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d63394c776..f1f473ea97d3 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
- if (interlaced)
+ if (interlaced) {
drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- return drm_mode;
+ return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
+ * @margins :desired margin size
+ * @GTF_[MCKJ] :extended GTF formula parameters
*
* LOCKING.
* none.
*
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
+ * return the modeline based on full GTF algorithm.
*
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
*/
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock = pixel_freq;
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
return drm_mode;
}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
EXPORT_SYMBOL(drm_gtf_mode);
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 25bbd30ed7af..101d381e9d86 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -193,8 +193,9 @@ static ssize_t enabled_show(struct device *device,
"disabled");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
{
struct device *connector_dev = container_of(kobj, struct device, kobj);
struct drm_connector *connector = to_drm_connector(connector_dev);
@@ -333,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
- .size = 128,
+ .size = 0,
.read = edid_show,
};
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84ec3e1..95639017bdbe 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,3 +33,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
obj-$(CONFIG_DRM_I915) += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e2..0d6ff640e1c6 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops {
void (*dpms)(struct intel_dvo_device *dvo, int mode);
/*
- * Saves the output's state for restoration on VT switch.
- */
- void (*save)(struct intel_dvo_device *dvo);
-
- /*
- * Restore's the output's state at VT switch.
- */
- void (*restore)(struct intel_dvo_device *dvo);
-
- /*
* Callback for testing a video mode for a given output.
*
* This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87d..14d59804acd7 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t save_hapi;
- uint8_t save_vali;
- uint8_t save_valo;
- uint8_t save_ailo;
- uint8_t save_lvds_pll_vco;
- uint8_t save_feedback_div;
- uint8_t save_lvds_control_2;
- uint8_t save_outputs_enable;
- uint8_t save_lvds_power_down;
- uint8_t save_power_management;
+ uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do { \
DUMP(CH7017_LVDS_POWER_DOWN);
}
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- /* Power down before changing mode */
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.dump_regs = ch7017_dump_regs,
- .save = ch7017_save,
- .restore = ch7017_restore,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b2..6f1944b24441 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
{ CH7301_VID, "CH7301" },
};
-struct ch7xxx_reg_state {
- uint8_t regs[CH7xxx_NUM_REGS];
-};
-
struct ch7xxx_priv {
bool quiet;
-
- struct ch7xxx_reg_state save_reg;
- struct ch7xxx_reg_state mode_reg;
- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
- uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
if ((i % 8) == 0 )
DRM_LOG_KMS("\n %02X: ", i);
- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
}
}
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
-}
-
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
-
- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
-}
-
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
- .save = ch7xxx_save,
- .restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0f..a2ec3f487202 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
bool quiet;
uint16_t width, height;
-
- uint16_t save_VR01;
- uint16_t save_VR40;
};
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
-static void ivch_save(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_read(dvo, VR01, &priv->save_VR01);
- ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_write(dvo, VR01, priv->save_VR01);
- ivch_write(dvo, VR40, priv->save_VR40);
-}
-
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
- .save = ivch_save,
- .restore = ivch_restore,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a80..9b8e6765cf26 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_REGC 0x0c
-struct sil164_save_rec {
- uint8_t reg8;
- uint8_t reg9;
- uint8_t regc;
-};
-
struct sil164_priv {
//I2CDevRec d;
bool quiet;
- struct sil164_save_rec save_regs;
- struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
-static void sil164_save(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil= dvo->dev_priv;
-
- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
- return;
-
- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
- return;
-
- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
- return;
-
- return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil = dvo->dev_priv;
-
- /* Restore it powered down initially */
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
-}
-
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.dump_regs = sil164_dump_regs,
- .save = sil164_save,
- .restore = sil164_restore,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116a..66c697bc9b22 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
-struct tfp410_save_rec {
- uint8_t ctl1;
- uint8_t ctl2;
-};
-
struct tfp410_priv {
bool quiet;
-
- struct tfp410_save_rec saved_reg;
- struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
- return;
-
- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
- return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- /* Restore it powered down initially */
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
-}
-
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.dump_regs = tfp410_dump_regs,
- .save = tfp410_save,
- .restore = tfp410_restore,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0b8447b06e7..322070c0c631 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
spin_lock(lock);
list_for_each_entry(obj_priv, head, list)
{
- struct drm_gem_object *obj = obj_priv->obj;
-
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- obj,
+ &obj_priv->base,
get_pin_flag(obj_priv),
- obj->size,
- obj->read_domains, obj->write_domain,
+ obj_priv->base.size,
+ obj_priv->base.read_domains,
+ obj_priv->base.write_domain,
obj_priv->last_rendering_seqno,
obj_priv->dirty ? " dirty" : "",
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
+ if (obj_priv->base.name)
+ seq_printf(m, " (name: %d)", obj_priv->base.name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
@@ -567,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_crtc *crtc;
drm_i915_private_t *dev_priv = dev->dev_private;
- bool fbc_enabled = false;
- if (!dev_priv->display.fbc_enabled) {
+ if (!I915_HAS_FBC(dev)) {
seq_printf(m, "FBC unsupported on this chipset\n");
return 0;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (!crtc->enabled)
- continue;
- if (dev_priv->display.fbc_enabled(crtc))
- fbc_enabled = true;
- }
-
- if (fbc_enabled) {
+ if (intel_fbc_enabled(dev)) {
seq_printf(m, "FBC enabled\n");
} else {
seq_printf(m, "FBC disabled: ");
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c3cfafcbfe7d..2a6b5de5ae5d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,13 +1357,12 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
+ intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
- i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
@@ -1504,8 +1503,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- drm_helper_initial_config(dev);
-
+ intel_fbdev_init(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
destroy_ringbuffer:
@@ -1591,7 +1590,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1723,6 +1722,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
+ intel_detect_pch(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev, prealloc_start,
prealloc_size, agp_size);
@@ -1769,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_modeset_cleanup(dev);
+
/*
* free the memory space allocated for the child device
* config parsed from VBT
@@ -1792,8 +1795,6 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc03537bb883..5c51e45ab68d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+
+void intel_detect_pch (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pch;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ */
+ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (pch) {
+ if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+ int id;
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ }
+ }
+ pci_dev_put(pch);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6e4790065d9e..7f797ef1ab39 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -128,6 +128,7 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
+ struct list_head lru_list;
};
struct sdvo_device_mapping {
@@ -135,6 +136,7 @@ struct sdvo_device_mapping {
u8 slave_addr;
u8 dvo_wiring;
u8 initialized;
+ u8 ddc_pin;
};
struct drm_i915_error_state {
@@ -175,7 +177,7 @@ struct drm_i915_error_state {
struct drm_i915_display_funcs {
void (*dpms)(struct drm_crtc *crtc, int mode);
- bool (*fbc_enabled)(struct drm_crtc *crtc);
+ bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
@@ -222,6 +224,13 @@ enum no_fbc_reason {
FBC_NOT_TILED, /* buffer not tiled */
};
+enum intel_pch {
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+};
+
+struct intel_fbdev;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -335,6 +344,9 @@ typedef struct drm_i915_private {
/* Display functions */
struct drm_i915_display_funcs display;
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+
/* Register state */
bool modeset_on_lid;
u8 saveLBB;
@@ -637,11 +649,14 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
+
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
- struct drm_gem_object *obj;
+ struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
@@ -651,9 +666,6 @@ struct drm_i915_gem_object {
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on the fenced object LRU */
- struct list_head fence_list;
-
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
@@ -740,7 +752,7 @@ struct drm_i915_gem_object {
atomic_t pending_flip;
};
-#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@@ -902,6 +914,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -998,6 +1012,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/**
* Lock test for when it's just for synchronization of ring access.
@@ -1130,7 +1150,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+ !IS_GEN6(dev))
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1144,6 +1165,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
IS_GEN6(dev))
#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ef3d91dda71a..112699f71fa4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size = roundup(args->size, PAGE_SIZE);
/* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
+ obj = i915_gem_alloc_object(dev, args->size);
if (obj == NULL)
return -ENOMEM;
@@ -1051,7 +1051,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* about to occur.
*/
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list,
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
@@ -1566,7 +1568,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
@@ -1577,9 +1579,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
i915_gem_object_move_to_active(obj, seqno);
/* update the fence lru list */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- list_move_tail(&obj_priv->fence_list,
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
+ }
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@@ -1745,7 +1750,7 @@ i915_gem_retire_request(struct drm_device *dev,
obj_priv = list_first_entry(&dev_priv->mm.active_list,
struct drm_i915_gem_object,
list);
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
/* If the seqno being retired doesn't match the oldest in the
* list, then the oldest in the list must still be newer than
@@ -2119,7 +2124,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
/* Try to find the smallest clean object */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (obj->size >= min_size) {
if ((!obj_priv->dirty ||
i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2253,7 +2258,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
/* Find an object that we can immediately reuse */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->size >= min_size)
break;
@@ -2485,9 +2490,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
/* None available, try to steal one or wait for a user to finish */
i = I915_FENCE_REG_NONE;
- list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- obj = obj_priv->obj;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list,
+ lru_list) {
+ obj = reg->obj;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count)
continue;
@@ -2536,7 +2542,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
return 0;
}
@@ -2566,7 +2573,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
obj_priv->fence_reg = ret;
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
reg->obj = obj;
@@ -2598,6 +2605,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
if (IS_GEN6(dev)) {
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2616,9 +2625,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
I915_WRITE(fence_reg, 0);
}
- dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+ reg->obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&obj_priv->fence_list);
+ list_del_init(&reg->lru_list);
}
/**
@@ -4471,34 +4480,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_init_object(struct drm_gem_object *obj)
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
- if (obj_priv == NULL)
- return -ENOMEM;
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ kfree(obj);
+ return NULL;
+ }
- obj_priv->agp_type = AGP_USER_MEMORY;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj_priv->list);
- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
- INIT_LIST_HEAD(&obj_priv->fence_list);
- obj_priv->madv = I915_MADV_WILLNEED;
+ obj->agp_type = AGP_USER_MEMORY;
+ obj->base.driver_private = NULL;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->gpu_write_list);
+ obj->madv = I915_MADV_WILLNEED;
- trace_i915_gem_object_create(obj);
+ trace_i915_gem_object_create(&obj->base);
+
+ return &obj->base;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
return 0;
}
@@ -4521,9 +4534,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
- kfree(obj->driver_private);
+ kfree(obj_priv);
}
/** Unbinds all inactive objects. */
@@ -4536,9 +4551,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
struct drm_gem_object *obj;
int ret;
- obj = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->obj;
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
@@ -4608,7 +4623,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
struct drm_i915_gem_object *obj_priv;
int ret;
- obj = drm_gem_object_alloc(dev, 4096);
+ obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
@@ -4653,7 +4668,7 @@ i915_gem_init_hws(struct drm_device *dev)
if (!I915_NEED_GFX_HWS(dev))
return 0;
- obj = drm_gem_object_alloc(dev, 4096);
+ obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
ret = -ENOMEM;
@@ -4764,7 +4779,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
if (ret != 0)
return ret;
- obj = drm_gem_object_alloc(dev, 128 * 1024);
+ obj = i915_gem_alloc_object(dev, 128 * 1024);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
i915_gem_cleanup_hws(dev);
@@ -4957,6 +4972,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ for (i = 0; i < 16; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
@@ -5185,6 +5202,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
}
static int
+i915_gpu_is_active(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ return !lists_empty;
+}
+
+static int
i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
drm_i915_private_t *dev_priv, *next_dev;
@@ -5213,6 +5244,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&shrink_list_lock);
+rescan:
/* first scan for clean buffers */
list_for_each_entry_safe(dev_priv, next_dev,
&shrink_list, mm.shrink_list) {
@@ -5229,7 +5261,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
break;
}
@@ -5258,7 +5290,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (nr_to_scan > 0) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
} else
cnt++;
@@ -5270,6 +5302,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
would_deadlock = 0;
}
+ if (nr_to_scan) {
+ int active = 0;
+
+ /*
+ * We are desperate for pages, so as a last resort, wait
+ * for the GPU to finish and discard whatever we can.
+ * This has a dramatic impact to reduce the number of
+ * OOM-killer events whilst running the GPU aggressively.
+ */
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ if (i915_gpu_is_active(dev)) {
+ i915_gpu_idle(dev);
+ active++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (active)
+ goto rescan;
+ }
+
spin_unlock(&shrink_list_lock);
if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf53fa3..80f380b1d951 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4bdccefcf2cf..4b7c49d4257d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -283,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EBUSY;
+ }
+
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2b8b969d0c15..8c3f0802686d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
- else
+ else {
i915_enable_pipestat(dev_priv, 1,
I915_LEGACY_BLC_EVENT_ENABLE);
+ if (IS_I965G(dev))
+ i915_enable_pipestat(dev_priv, 0,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+ }
}
/**
@@ -256,18 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ if (mode_config->num_encoder) {
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->hot_plug)
(*intel_encoder->hot_plug) (intel_encoder);
}
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
}
static void i915_handle_rps_change(struct drm_device *dev)
@@ -456,11 +460,15 @@ i915_error_object_create(struct drm_device *dev,
for (page = 0; page < page_count; page++) {
void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ unsigned long flags;
+
if (d == NULL)
goto unwind;
- s = kmap_atomic(src_priv->pages[page], KM_USER0);
+ local_irq_save(flags);
+ s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s, KM_USER0);
+ kunmap_atomic(s, KM_IRQ0);
+ local_irq_restore(flags);
dst->pages[page] = d;
}
dst->page_count = page_count;
@@ -608,7 +616,7 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[1] = NULL;
count = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset &&
@@ -644,7 +652,7 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
error->active_bo[i].name = obj->name;
@@ -946,7 +954,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip(dev, 1);
}
- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4cbc5210fd30..f3e39cc46f0d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1764,6 +1764,14 @@
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
/* Signal voltages. These are mostly controlled by the other end */
#define DP_VOLTAGE_0_4 (0 << 25)
#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1924,7 +1932,10 @@
/* Display & cursor control */
/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_DITHER_TYPE_MASK (3 << 2)
+#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
+#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
@@ -1988,15 +1999,24 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -2023,6 +2043,43 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0x1f)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0x3f)
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO 128
+#define ILK_DISPLAY_MAXWM 64
+#define ILK_DISPLAY_DFTWM 8
+
+#define ILK_DISPLAY_SR_FIFO 512
+#define ILK_DISPLAY_MAX_SRWM 0x1ff
+#define ILK_DISPLAY_DFT_SRWM 0x3f
+#define ILK_CURSOR_SR_FIFO 64
+#define ILK_CURSOR_MAX_SRWM 0x3f
+#define ILK_CURSOR_DFT_SRWM 8
+
+#define ILK_FIFO_LINE_SIZE 64
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2304,8 +2361,15 @@
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define ILK_DISPLAY_CHICKEN2 0x42004
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DSPCLK_GATE 0x42020
+#define ILK_DPARB_CLK_GATE (1<<5)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
/* PCH */
@@ -2316,6 +2380,11 @@
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2407,6 +2476,17 @@
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
+#define PCH_DPLL_SEL 0xc7000
+#define TRANSA_DPLL_ENABLE (1<<3)
+#define TRANSA_DPLLB_SEL (1<<0)
+#define TRANSA_DPLLA_SEL 0
+#define TRANSB_DPLL_ENABLE (1<<7)
+#define TRANSB_DPLLB_SEL (1<<4)
+#define TRANSB_DPLLA_SEL (0)
+#define TRANSC_DPLL_ENABLE (1<<11)
+#define TRANSC_DPLLB_SEL (1<<8)
+#define TRANSC_DPLLA_SEL (0)
+
/* transcoder */
#define TRANS_HTOTAL_A 0xe0000
@@ -2493,6 +2573,19 @@
#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@@ -2525,6 +2618,13 @@
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
#define FDI_SEL_RAWCLK (0<<4)
#define FDI_SEL_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
#define FDI_RXA_MISC 0xf0010
#define FDI_RXB_MISC 0xf1010
@@ -2596,6 +2696,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB HDMIB
+
#define HDMIC 0xe1150
#define HDMID 0xe1160
@@ -2653,4 +2756,42 @@
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_OUTPUT_ENABLE (1<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ac0d1a73ac22..60a5800fba6e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
}
/* FIXME: save TV & SDVO state */
- /* FBC state */
- if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ /* Only save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
}
/* VGA state */
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
}
/* FIXME: restore TV & SDVO state */
- /* FBC info */
- if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else {
- i8xx_disable_fbc(dev);
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ /* only restore FBC info on the platform that supports FBC*/
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ }
}
-
/* VGA state */
if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38f..9e4c45f68d6e 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
__entry->obj, __entry->fence, __entry->tiling_mode)
);
-TRACE_EVENT(i915_gem_object_unbind,
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_gem_object *obj),
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_printk("obj=%p", __entry->obj)
);
-TRACE_EVENT(i915_gem_object_destroy,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
- TP_ARGS(obj),
+ TP_ARGS(obj)
+);
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_fast_assign(
- __entry->obj = obj;
- ),
+ TP_PROTO(struct drm_gem_object *obj),
- TP_printk("obj=%p", __entry->obj)
+ TP_ARGS(obj)
);
/* batch tracing */
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
__entry->flush_domains, __entry->invalidate_domains)
);
-
-TRACE_EVENT(i915_gem_request_complete,
+DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_retire,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_end,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
+ TP_ARGS(dev, seqno)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_ring_wait_begin,
+DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct drm_device *dev),
@@ -291,26 +258,23 @@ TRACE_EVENT(i915_ring_wait_begin,
TP_printk("dev=%u", __entry->dev)
);
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
- TP_ARGS(dev),
+ TP_ARGS(dev)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- ),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- ),
+ TP_PROTO(struct drm_device *dev),
- TP_printk("dev=%u", __entry->dev)
+ TP_ARGS(dev)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f9ba452f0cbf..4c748d8f73d6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_port = p_child->dvo_port;
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->initialized = 1;
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 759c2ef72eff..e16ac5a28c3c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0) {
- adpa |= ADPA_PIPE_A_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_A_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_A_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
- adpa |= ADPA_PIPE_B_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_B_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa;
+ u32 adpa, temp;
bool ret;
- adpa = I915_READ(PCH_ADPA);
+ temp = adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
+ if (HAS_PCH_CPT(dev)) {
+ /* Disable DAC before force detect */
+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ } else {
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
+ }
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
;
+ if (HAS_PCH_CPT(dev)) {
+ I915_WRITE(PCH_ADPA, temp);
+ (void)I915_READ(PCH_ADPA);
+ }
+
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return false;
}
-static bool intel_crt_detect_ddc(struct drm_connector *connector)
+static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* CRT should always be at 0, but check anyway */
if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
return connector_status_disconnected;
}
- if (intel_crt_detect_ddc(connector))
+ if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder,
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
NULL, &dpms_mode);
if (crtc) {
status = intel_crt_load_detect(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder,
+ connector, dpms_mode);
} else
status = connector_status_unknown;
}
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
static void intel_crt_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
int ret;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct i2c_adapter *ddcbus;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret || !IS_G4X(dev))
goto end;
- ddcbus = intel_encoder->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- intel_encoder->ddc_bus =
- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
- if (!intel_encoder->ddc_bus) {
- intel_encoder->ddc_bus = ddcbus;
+ if (!ddc_bus) {
dev_printk(KERN_ERR, &connector->dev->pdev->dev,
"DDC bus registration failed for CRTDDC_D.\n");
goto end;
}
/* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(intel_encoder);
- intel_i2c_destroy(ddcbus);
+ ret = intel_ddc_get_modes(connector, ddc_bus);
+ intel_i2c_destroy(ddc_bus);
end:
return ret;
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
if (!intel_encoder)
return;
- connector = &intel_encoder->base;
- drm_connector_init(dev, &intel_encoder->base,
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
/* Set up the DDC bus. */
@@ -553,5 +577,10 @@ void intel_crt_init(struct drm_device *dev)
drm_sysfs_connector_add(connector);
+ if (I915_HAS_HOTPLUG(dev))
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c7502b6b1600..f469a84cacfd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
+ struct drm_encoder *l_entry;
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+ list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+ if (l_entry && l_entry->crtc == crtc) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
if (intel_encoder->type == type)
return true;
}
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
return false;
}
-static struct drm_connector *
-intel_pipe_get_connector (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry, *ret = NULL;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- ret = l_entry;
- break;
- }
- }
- return ret;
-}
-
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
@@ -905,9 +887,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
+ /* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
+ /* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -1066,9 +1048,8 @@ void i8xx_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+static bool i8xx_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1125,14 +1106,43 @@ void g4x_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+static bool g4x_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ dev_priv->display.enable_fbc(crtc, interval);
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+}
+
/**
* intel_update_fbc - enable/disable FBC as needed
* @crtc: CRTC to point the compressor at
@@ -1167,9 +1177,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (!i915_powersave)
return;
- if (!dev_priv->display.fbc_enabled ||
- !dev_priv->display.enable_fbc ||
- !dev_priv->display.disable_fbc)
+ if (!I915_HAS_FBC(dev))
return;
if (!crtc->fb)
@@ -1216,28 +1224,25 @@ static void intel_update_fbc(struct drm_crtc *crtc,
goto out_disable;
}
- if (dev_priv->display.fbc_enabled(crtc)) {
+ if (intel_fbc_enabled(dev)) {
/* We can re-enable it in this case, but need to update pitch */
- if (fb->pitch > dev_priv->cfb_pitch)
- dev_priv->display.disable_fbc(dev);
- if (obj_priv->fence_reg != dev_priv->cfb_fence)
- dev_priv->display.disable_fbc(dev);
- if (plane != dev_priv->cfb_plane)
- dev_priv->display.disable_fbc(dev);
+ if ((fb->pitch > dev_priv->cfb_pitch) ||
+ (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+ (plane != dev_priv->cfb_plane))
+ intel_disable_fbc(dev);
}
- if (!dev_priv->display.fbc_enabled(crtc)) {
- /* Now try to turn it back on if possible */
- dev_priv->display.enable_fbc(crtc, 500);
- }
+ /* Now try to turn it back on if possible */
+ if (!intel_fbc_enabled(dev))
+ intel_enable_fbc(crtc, 500);
return;
out_disable:
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
- if (dev_priv->display.fbc_enabled(crtc))
- dev_priv->display.disable_fbc(dev);
+ if (intel_fbc_enabled(dev))
+ intel_disable_fbc(dev);
}
static int
@@ -1510,6 +1515,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, tries = 0;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ break;
+ }
+ }
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ tries = 0;
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done\n");
+}
+
+static int snb_b_fdi_train_param [] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, i;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1523,8 +1741,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1757,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
- int tries = 5, j, n;
+ int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1786,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable eDP PLL */
ironlake_enable_pll_edp(crtc);
} else {
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1795,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
*/
temp &= ~(0x7 << 16);
temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
- FDI_SEL_PCDCLK |
- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
I915_READ(fdi_rx_reg);
udelay(200);
@@ -1629,91 +1846,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
if (!HAS_eDP) {
- /* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
- temp |= FDI_DP_PORT_WIDTH_X4; /* default */
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
-
- udelay(150);
-
- /* Train FDI. */
- /* umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_BIT_LOCK)
- break;
- udelay(200);
- }
- if (j != tries)
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- else
- DRM_DEBUG_KMS("train 1 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("train 1 ok 2!\n");
+ /* enable PCH DPLL */
+ temp = I915_READ(pch_dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
-
- udelay(150);
+ udelay(200);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_SYMBOL_LOCK)
- break;
- udelay(200);
- }
- if (j != tries) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 1!\n");
- } else
- DRM_DEBUG_KMS("train 2 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 2!\n");
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0 &&
+ (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (trans_dpll_sel == 1 &&
+ (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
}
- DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1882,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+ /* enable normal train */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+ FDI_TX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ /* wait one idle pattern time */
+ udelay(100);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~TRANS_DP_PORT_SEL_MASK;
+ reg = TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING |
+ TRANS_DP_VSYNC_ACTIVE_HIGH |
+ TRANS_DP_HSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ reg |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ reg |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ }
+
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+ }
+
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
/*
@@ -1738,23 +1950,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
;
- /* enable normal */
-
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
-
- /* wait one idle pattern time */
- udelay(100);
-
}
intel_crtc_load_lut(crtc);
@@ -1805,6 +2000,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pf_ctl_reg);
}
I915_WRITE(pf_win_size, 0);
+ POSTING_READ(pf_win_size);
+
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +2022,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(fdi_tx_reg, temp);
+ POSTING_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
I915_WRITE(fdi_rx_reg, temp);
+ POSTING_READ(fdi_rx_reg);
udelay(100);
@@ -1859,6 +2063,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
+
temp = I915_READ(transconf_reg);
/* BPC in transcoder is consistent with that in pipeconf */
temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2072,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(transconf_reg);
udelay(100);
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+
+ }
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
+ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
if (HAS_eDP) {
ironlake_disable_pll_edp(crtc);
}
+ /* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
+ /* Disable CPU FDI TX PLL */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+ udelay(100);
+
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_RX_PLL_ENABLE;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) != 0) {
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
-
/* Wait for the clocks to turn off. */
udelay(100);
break;
@@ -2331,6 +2554,30 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
+static struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -2449,66 +2696,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
DRM_INFO("Big FIFO is disabled\n");
}
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
- unsigned long wm;
- struct cxsr_latency *latency;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
- latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= 0x7fffff;
- reg |= wm << 23;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
- latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 24);
- reg |= (wm & 0x3f) << 24;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
- reg = I915_READ(DSPFW3);
- reg &= 0xfffffe00;
- reg |= wm & 0x1ff;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
- latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 16);
- reg |= (wm & 0x3f) << 16;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
-
- DRM_INFO("Big FIFO is enabled\n");
-
- return;
-}
-
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -2593,6 +2780,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
+static void pineview_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ unsigned long wm;
+ struct cxsr_latency *latency;
+ int sr_clock;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ if (!planea_clock || !planeb_clock) {
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* Display SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ reg = I915_READ(DSPFW3);
+ reg |= PINEVIEW_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
static void g4x_update_wm(struct drm_device *dev, int planea_clock,
int planeb_clock, int sr_hdisplay, int pixel_size)
{
@@ -2813,6 +3065,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
I915_WRITE(FW_BLC, fwater_lo);
}
+#define ILK_LP0_PLANE_LATENCY 700
+
+static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int sr_wm, cursor_wm;
+ unsigned long line_time_us;
+ int sr_clock, entries_required;
+ u32 reg_value;
+
+ /* Calculate and update the watermark for plane A */
+ if (planea_clock) {
+ entries_required = ((planea_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planea_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+ planea_wm = ironlake_display_wm_info.max_wm;
+
+ cursora_wm = 16;
+ reg_value = I915_READ(WM0_PIPEA_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursora_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+ "cursor: %d\n", planea_wm, cursora_wm);
+ }
+ /* Calculate and update the watermark for plane B */
+ if (planeb_clock) {
+ entries_required = ((planeb_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planeb_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+ planeb_wm = ironlake_display_wm_info.max_wm;
+
+ cursorb_wm = 16;
+ reg_value = I915_READ(WM0_PIPEB_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+ "cursor: %d\n", planeb_wm, cursorb_wm);
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ if (!planea_clock || !planeb_clock) {
+ int line_count;
+ /* Read the self-refresh latency. The unit is 0.5us */
+ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+ / 1000;
+
+ /* calculate the self-refresh watermark for display plane */
+ entries_required = line_count * sr_hdisplay * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_srwm_info.cacheline_size);
+ sr_wm = entries_required +
+ ironlake_display_srwm_info.guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries_required = line_count * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_srwm_info.cacheline_size);
+ cursor_wm = entries_required +
+ ironlake_cursor_srwm_info.guard_size;
+
+ /* configure watermark and enable self-refresh */
+ reg_value = I915_READ(WM1_LP_ILK);
+ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+ WM1_LP_CURSOR_MASK);
+ reg_value |= WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+
+ I915_WRITE(WM1_LP_ILK, reg_value);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", sr_wm, cursor_wm);
+
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ }
+}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -2882,12 +3236,6 @@ static void intel_update_watermarks(struct drm_device *dev)
if (enabled <= 0)
return;
- /* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_PINEVIEW(dev))
- pineview_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_PINEVIEW(dev))
- pineview_disable_cxsr(dev);
-
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
}
@@ -2924,7 +3272,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
bool is_edp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -2935,6 +3284,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
int lvds_reg = LVDS;
u32 temp;
int sdvo_pixel_multiply;
@@ -2942,12 +3293,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3043,14 +3395,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
- int lane, link_bw, bpp;
+ int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_edp) {
- struct drm_connector *edp;
target_clock = mode->clock;
- edp = intel_pipe_get_connector(crtc);
- intel_edp_link_config(to_intel_encoder(edp),
+ intel_edp_link_config(intel_encoder,
&lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
@@ -3059,7 +3409,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- lane = 4;
link_bw = 270000;
}
@@ -3111,6 +3460,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
+ if (!lane) {
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ lane = bps / (link_bw * 8) + 1;
+ }
+
+ intel_crtc->fdi_lanes = lane;
+
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -3265,11 +3626,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
-
-
/* Disable the panel fitter if it was on our pipe */
if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ /* enable transcoder DPLL */
+ if (HAS_PCH_CPT(dev)) {
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+ udelay(150);
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -3303,7 +3671,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ lvds |= PORT_TRANS_B_SEL_CPT;
+ else
+ lvds |= LVDS_PIPEB_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+ lvds &= ~PORT_TRANS_SEL_MASK;
+ else
+ lvds &= ~LVDS_PIPEB_SELECT;
+ }
/* set the corresponsding LVDS_BORDER bit */
lvds |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3321,14 +3700,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* set the dithering flag */
if (IS_I965G(dev)) {
if (dev_priv->lvds_dither) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER;
- else
+ pipeconf |= PIPE_DITHER_TYPE_ST01;
+ } else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf &= ~PIPE_ENABLE_DITHER;
- else
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ } else
lvds &= ~LVDS_ENABLE_DITHER;
}
}
@@ -3337,6 +3718,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (is_dp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ else if (HAS_PCH_SPLIT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ if (pipe == 0) {
+ I915_WRITE(TRANSA_DATA_M1, 0);
+ I915_WRITE(TRANSA_DATA_N1, 0);
+ I915_WRITE(TRANSA_DP_LINK_M1, 0);
+ I915_WRITE(TRANSA_DP_LINK_N1, 0);
+ } else {
+ I915_WRITE(TRANSB_DATA_M1, 0);
+ I915_WRITE(TRANSB_DATA_N1, 0);
+ I915_WRITE(TRANSB_DP_LINK_M1, 0);
+ I915_WRITE(TRANSB_DP_LINK_N1, 0);
+ }
+ }
if (!is_edp) {
I915_WRITE(fp_reg, fp);
@@ -3411,6 +3806,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* enable FDI TX PLL too */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ /* enable FDI RX PCDCLK */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+ I915_READ(fdi_rx_reg);
udelay(200);
}
}
@@ -3671,6 +4078,7 @@ static struct drm_display_mode load_detect_mode = {
};
struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode)
{
@@ -3729,7 +4137,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
}
encoder->crtc = crtc;
- intel_encoder->base.encoder = encoder;
+ connector->encoder = encoder;
intel_encoder->load_detect_temp = true;
intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4163,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return crtc;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector, int dpms_mode)
{
struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
@@ -3765,7 +4174,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm
if (intel_encoder->load_detect_temp) {
encoder->crtc = NULL;
- intel_encoder->base.encoder = NULL;
+ connector->encoder = NULL;
intel_encoder->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4801,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
return crtc;
}
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (type_mask & intel_encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
@@ -4411,7 +4820,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
intel_crt_init(dev);
@@ -4426,9 +4835,8 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, DP_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
- /* check SDVOB */
- /* found = intel_sdvo_init(dev, HDMIB); */
- found = 0;
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4902,11 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
encoder->possible_crtcs = intel_encoder->crtc_mask;
- encoder->possible_clones = intel_connector_clones(dev,
+ encoder->possible_clones = intel_encoder_clones(dev,
intel_encoder->clone_mask);
}
}
@@ -4507,10 +4914,6 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4936,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj)
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return -ENOMEM;
-
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4950,41 @@ int intel_framebuffer_create(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
intel_fb->obj = obj;
-
- *fb = &intel_fb->base;
-
return 0;
}
-
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
int ret;
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
if (!obj)
return NULL;
- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ ret = intel_framebuffer_init(dev, intel_fb,
+ mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
+ kfree(intel_fb);
return NULL;
}
- return fb;
+ return &intel_fb->base;
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .fb_changed = intelfb_probe,
+ .output_poll_changed = intel_fb_output_poll_changed,
};
static struct drm_gem_object *
@@ -4594,7 +4993,7 @@ intel_alloc_power_context(struct drm_device *dev)
struct drm_gem_object *pwrctx;
int ret;
- pwrctx = drm_gem_object_alloc(dev, 4096);
+ pwrctx = i915_gem_alloc_object(dev, 4096);
if (!pwrctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
@@ -4732,6 +5131,25 @@ void intel_init_clock_gating(struct drm_device *dev)
}
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ }
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4809,8 +5227,7 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.dpms = i9xx_crtc_dpms;
- /* Only mobile has FBC, leave pointers NULL for other chips */
- if (IS_MOBILE(dev)) {
+ if (I915_HAS_FBC(dev)) {
if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -4847,9 +5264,31 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->display.update_wm = NULL;
- else if (IS_G4X(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ } else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
@@ -4923,13 +5362,6 @@ void intel_modeset_init(struct drm_device *dev)
(unsigned long)dev);
intel_setup_overlay(dev);
-
- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->fsb_freq,
- dev_priv->mem_freq))
- DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), disabling CxSR\n",
- dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4940,6 +5372,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
+ drm_kms_helper_poll_fini(dev);
+ intel_fbdev_fini(dev);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4974,14 +5409,29 @@ void intel_modeset_cleanup(struct drm_device *dev)
}
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
- return &intel_encoder->enc;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cfcf216..6b1c9a27c27a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,8 +48,6 @@ struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- uint32_t save_DP;
- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int dpms_mode;
uint8_t link_bw;
@@ -141,7 +139,8 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
- int try;
+ int try, precharge;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_encoder))
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (HAS_PCH_SPLIT(dev))
+ if (IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
ctl = (DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+ struct intel_connector *intel_connector, const char *name)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
/*
* Find the lane count in the intel_encoder private
*/
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
@@ -626,16 +637,24 @@ static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- dp_priv->DP = (DP_LINK_TRAIN_OFF |
- DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0 |
- DP_SYNC_VS_HIGH |
- DP_SYNC_HS_HIGH);
+ dp_priv->DP = (DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dp_priv->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dp_priv->DP |= DP_SYNC_VS_HIGH;
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ dp_priv->DP |= DP_LINK_TRAIN_OFF;
switch (dp_priv->lane_count) {
case 1:
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
- if (intel_crtc->pipe == 1)
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
dp_priv->DP |= DP_PIPEB_SELECT;
if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
return link_status[r - DP_LANE0_1_STATUS];
}
-static void
-intel_dp_save(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- dp_priv->save_DP = I915_READ(dp_priv->output_reg);
- intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
- dp_priv->save_link_configuration,
- sizeof (dp_priv->save_link_configuration));
-}
-
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
return signal_levels;
}
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
@@ -974,7 +999,7 @@ static void
intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
bool channel_eq = false;
bool first = true;
int tries;
+ u32 reg;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_encoder, 0x100,
+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- DP &= ~DP_LINK_TRAIN_MASK;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
memset(train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
channel_eq = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
++tries;
}
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ I915_WRITE(dp_priv->output_reg, reg);
POSTING_READ(dp_priv->output_reg);
intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
static void
intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
udelay(100);
}
- DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(dp_priv->output_reg);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(dp_priv->output_reg);
+ }
udelay(17000);
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
POSTING_READ(dp_priv->output_reg);
}
-static void
-intel_dp_restore(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- if (dp_priv->save_DP & DP_PORT_EN)
- intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
- else
- intel_dp_link_down(intel_encoder, dp_priv->save_DP);
-}
-
/*
* According to DP spec
* 5.1.2:
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp |
- DPB_HOTPLUG_INT_EN |
- DPC_HOTPLUG_INT_EN |
- DPD_HOTPLUG_INT_EN);
-
- POSTING_READ(PORT_HOTPLUG_EN);
-
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dp_save,
- .restore = intel_dp_restore,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_encoder);
}
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ if (!encoder || encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ return dp_priv->output_reg;
+ }
+ }
+ return -1;
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
@@ -1313,13 +1379,21 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
if (output_reg == DP_A)
intel_encoder->type = INTEL_OUTPUT_EDP;
else
@@ -1349,7 +1423,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -1378,7 +1452,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_encoder, name);
+ intel_dp_i2c_init(intel_encoder, intel_connector, name);
intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e30253755f12..df931f787665 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -96,8 +96,6 @@ struct intel_framebuffer {
struct intel_encoder {
- struct drm_connector base;
-
struct drm_encoder enc;
int type;
struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@ struct intel_encoder {
int clone_mask;
};
+struct intel_connector {
+ struct drm_connector base;
+ void *dev_priv;
+};
+
struct intel_crtc;
struct intel_overlay {
struct drm_device *dev;
@@ -149,17 +152,18 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
+ int fdi_lanes;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode);
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj);
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ebf213c96b9c..227feca7cf8d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_dvo_save(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- /* Each output should probably just save the registers it touches,
- * but for now, use more overkill.
- */
- dev_priv->saveDVOA = I915_READ(DVOA);
- dev_priv->saveDVOB = I915_READ(DVOB);
- dev_priv->saveDVOC = I915_READ(DVOC);
-
- dvo->dev_ops->save(dvo);
-}
-
-static void intel_dvo_restore(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- dvo->dev_ops->restore(dvo);
-
- I915_WRITE(DVOA, dev_priv->saveDVOA);
- I915_WRITE(DVOB, dev_priv->saveDVOB);
- I915_WRITE(DVOC, dev_priv->saveDVOC);
-}
-
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- /* no need, in i830_dvoices[] now */
- //kfree(dvo);
- }
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
-}
-
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
- return intel_pipe_to_crtc(pScrn, pipe);
+ kfree(connector);
}
-#endif
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dvo_save,
- .restore = intel_dvo_restore,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ }
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_encoder->base;
+ struct drm_connector *connector = &intel_connector->base;
int gpio;
dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_helper_add(&intel_encoder->enc,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
intel_i2c_destroy(i2cbus);
free_intel:
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8a0b3bcdc7b1..6f53cf7fbc50 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,9 +44,10 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intelfb_par {
+struct intel_fbdev {
struct drm_fb_helper helper;
- struct intel_framebuffer *intel_fb;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
struct drm_display_mode *our_mode;
};
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .gamma_set = intel_crtc_fb_gamma_set,
- .gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (!fb)
- return 1;
-
- info = fb->fbdev;
- if (!info)
- return 1;
-
- if (!mode)
- return 1;
-
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = ifbdev->helper.dev;
struct fb_info *info;
- struct intelfb_par *par;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
/* we don't do packed 24bpp */
- if (surface_bpp == 24)
- surface_bpp = 32;
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = drm_gem_object_alloc(dev, size);
+ fbo = i915_gem_alloc_object(dev, size);
if (!fbo) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@@ -157,45 +107,37 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
/* Flush everything out, we'll be doing GTT only from now on */
i915_gem_object_set_to_gtt_domain(fbo, 1);
- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
- if (ret) {
- DRM_ERROR("failed to allocate fb.\n");
- goto out_unpin;
- }
-
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- intel_fb = to_intel_framebuffer(fb);
- *fb_p = fb;
-
- info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+ info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
- par = info->par;
+ info->par = ifbdev;
- par->helper.funcs = &intel_fb_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
- INTELFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+ ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT;
-
info->fbops = &intelfb_ops;
-
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = dev->mode_config.fb_base;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (IS_I9XX(dev))
- info->aperture_size = pci_resource_len(dev->pdev, 2);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
- info->aperture_size = pci_resource_len(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
@@ -208,12 +150,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
ret = -ENOSPC;
goto out_unpin;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +173,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->intel_fb = intel_fb;
-
- /* To allow resizeing without swapping buffers */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
- intel_fb->base.width, intel_fb->base.height,
- obj_priv->gtt_offset, fbo);
+ fb->width, fb->height,
+ obj_priv->gtt_offset, fbo);
+
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +191,86 @@ out:
return ret;
}
-int intelfb_probe(struct drm_device *dev)
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ int new_fb = 0;
int ret;
- DRM_DEBUG_KMS("\n");
- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
- return ret;
+ if (!helper->fb) {
+ ret = intelfb_create(ifbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
}
-EXPORT_SYMBOL(intelfb_probe);
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intel_fb_find_or_create_single,
+};
+
+int intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
{
struct fb_info *info;
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
-
- if (info) {
- struct intelfb_par *par = info->par;
+ if (ifbdev->helper.fbdev) {
+ info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
- if (info->par)
- drm_fb_helper_free(&par->helper);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj)
+ drm_gem_object_unreference_unlocked(ifb->obj);
+
+ return 0;
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return -ENOMEM;
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ drm_fb_helper_init(dev, &ifbdev->helper, 2,
+ INTELFB_CONN_LIMIT);
+
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ drm_fb_helper_initial_config(&ifbdev->helper, 32);
return 0;
}
-EXPORT_SYMBOL(intelfb_remove);
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
MODULE_LICENSE("GPL and additional rights");
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 48cade0cf7b1..65727f0a79a3 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,7 +39,6 @@
struct intel_hdmi_priv {
u32 sdvox_reg;
- u32 save_SDVOX;
bool has_hdmi_sink;
};
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_crtc->pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_B_SEL_CPT;
+ else
+ sdvox |= SDVO_PIPE_B_SELECT;
+ }
I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_hdmi_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
-}
-
-static void intel_hdmi_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
- POSTING_READ(hdmi_priv->sdvox_reg);
-}
-
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(&intel_encoder->base,
+ edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
if (edid) {
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
- intel_encoder->base.display_info.raw_edid = NULL;
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(intel_encoder);
+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_hdmi_save,
- .restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,21 +214,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_hdmi_priv *hdmi_priv;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_encoder)
return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -285,7 +277,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -303,6 +295,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
kfree(intel_encoder);
+ kfree(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b66806a37d37..6a1accd83aec 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
/* XXX: We never power down the LVDS pairs. */
}
-static void intel_lvds_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- dev_priv->savePP_ON = I915_READ(pp_on_reg);
- dev_priv->savePP_OFF = I915_READ(pp_off_reg);
- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- /*
- * If the light is off at server startup, just make it full brightness
- */
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
-}
-
-static void intel_lvds_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
- intel_lvds_set_power(dev, true);
- else
- intel_lvds_set_power(dev, false);
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (dev_priv->lvds_edid_good) {
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder =
- to_intel_encoder(connector);
if (property == dev->mode_config.scaling_mode_property &&
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_lvds_save,
- .restore = intel_lvds_restore,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
encoder = &intel_encoder->enc;
- drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (IS_I965G(dev))
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
* the initial panel fitting mode will be FULL_SCREEN.
*/
- drm_connector_attach_property(&intel_encoder->base,
+ drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
dev_priv->lvds_edid_good = true;
- if (!intel_ddc_get_modes(intel_encoder))
+ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
dev_priv->lvds_edid_good = false;
list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 8e5c83b2d120..4b1fd3d9c73c 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, true);
ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, false);
if (ret == 2)
return true;
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
- edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(connector->dev, true);
+ edid = drm_get_edid(connector, adapter);
+ intel_i2c_quirk_set(connector->dev, false);
if (edid) {
- drm_mode_connector_update_edid_property(&intel_encoder->base,
- edid);
- ret = drm_add_edid_modes(&intel_encoder->base, edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 6d524a1fc271..b0e17b06eb6e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = overlay->vid_bo->obj;
+ obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
switch (overlay->hw_wedged) {
case RELEASE_OLD_VID:
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
return;
overlay->dev = dev;
- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 87d953664cb0..aba72c489a2f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,18 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-#include <linux/dmi.h>
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -86,12 +97,6 @@ struct intel_sdvo_priv {
/* This is for current tv format name */
char *tv_format_name;
- /* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
- int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
@@ -112,12 +117,6 @@ struct intel_sdvo_priv {
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
/*
* supported encoding mode, used to determine whether HDMI is
* supported
@@ -130,11 +129,24 @@ struct intel_sdvo_priv {
/* Mac mini hack -- use the same DDC as the analog connector */
struct i2c_adapter *analog_ddc_bus;
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
- struct intel_sdvo_dtd save_output_dtd[16];
- u32 save_SDVOX;
+};
+
+struct intel_sdvo_connector {
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ /* This contains all current supported TV format */
+ char *tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
/* add the property for the SDVO-TV */
struct drm_property *left_property;
struct drm_property *right_property;
@@ -162,7 +174,12 @@ struct intel_sdvo_priv {
};
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+ uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
*/
static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(sdvo_priv->sdvo_reg, val);
+ I915_READ(sdvo_priv->sdvo_reg);
+ return;
+ }
+
if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
+#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
return true;
}
-static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
- u16 *outputs)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
-}
-
static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
- struct intel_sdvo_dtd *dtd)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en
return false;
}
-static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
-{
- u8 response, status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-}
-
static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
u8 status;
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
sizeof(format) : sizeof(format_map));
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
sizeof(format));
status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Set output timings */
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
dev_priv->sdvo_lvds_fixed_mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->controlled_output;
+ in_out.in0 = sdvo_priv->attached_output;
in_out.in1 = 0;
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ sdvo_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
if (0)
intel_sdvo_set_encoder_power_state(intel_encoder, mode);
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
}
return;
}
-static void intel_sdvo_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
-
- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
- intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_2);
- }
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output)
- {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_get_output_timing(intel_encoder,
- &sdvo_priv->save_output_dtd[o]);
- }
- }
- if (sdvo_priv->is_tv) {
- /* XXX: Save TV format/enhancements. */
- }
-
- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
-}
-
-static void intel_sdvo_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
- int i;
- bool input1, input2;
- u8 status;
-
- intel_sdvo_set_active_outputs(intel_encoder, 0);
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output) {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
- }
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
- }
-
- intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
-
- if (sdvo_priv->is_tv) {
- /* XXX: Restore TV format/enhancements. */
- }
-
- intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
-
- if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
- {
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
- }
-
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
-}
-
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
return true;
}
+/* No use! */
+#if 0
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_encoder, &response, 2);
}
+#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@ static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_encoder = to_intel_encoder(connector);
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
- return connector;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector && encoder == intel_attached_encoder(connector))
+ return connector;
+ }
+ }
}
return NULL;
}
@@ -1625,15 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev)
}
enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(&intel_encoder->base,
- intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
@@ -1646,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
while(temp_ddc > 1) {
sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(&intel_encoder->base,
- intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
@@ -1664,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL &&
- sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev))
- edid = drm_get_edid(&intel_encoder->base,
- sdvo_priv->analog_ddc_bus);
+ if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+ !intel_analog_is_connected(connector->dev))
+ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+
if (edid != NULL) {
- /* Don't report the output as connected if it's a DVI-I
- * connector with a non-digital EDID coming out.
- */
- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- sdvo_priv->is_hdmi =
- drm_detect_hdmi_monitor(edid);
- else
- status = connector_status_disconnected;
- }
+ bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
- kfree(edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ /* DDC bus is shared, match EDID to connector type */
+ if (is_digital && need_digital)
+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+ else if (is_digital != need_digital)
+ status = connector_status_disconnected;
- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+ connector->display_info.raw_edid = NULL;
+ } else
status = connector_status_disconnected;
+
+ kfree(edid);
return status;
}
@@ -1694,8 +1571,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
{
uint16_t response;
u8 status;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ enum drm_connector_status ret;
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
if (response == 0)
return connector_status_disconnected;
- if (intel_sdvo_multifunc_encoder(intel_encoder) &&
- sdvo_priv->attached_output != response) {
- if (sdvo_priv->controlled_output != response &&
- intel_sdvo_output_setup(intel_encoder, response) != true)
- return connector_status_unknown;
- sdvo_priv->attached_output = response;
+ sdvo_priv->attached_output = response;
+
+ if ((sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (response & SDVO_TMDS_MASK)
+ ret = intel_sdvo_hdmi_sink_detect(connector);
+ else
+ ret = connector_status_connected;
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ sdvo_priv->is_tv = false;
+ sdvo_priv->is_lvds = false;
+ intel_encoder->needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ sdvo_priv->is_lvds = true;
}
- return intel_sdvo_hdmi_sink_detect(connector, response);
+
+ return ret;
}
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(intel_encoder);
+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
*/
if (num_modes == 0 &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev)) {
- struct i2c_adapter *digital_ddc_bus;
-
+ !intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- digital_ddc_bus = intel_encoder->ddc_bus;
- intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
-
- (void) intel_ddc_get_modes(intel_encoder);
-
- intel_encoder->ddc_bus = digital_ddc_bus;
+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
}
}
@@ -1821,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1842,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
sizeof(format_map) ? sizeof(format_map) :
sizeof(struct intel_sdvo_sdtv_resolution_request));
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(output, &reply, 3);
+ status = intel_sdvo_read_response(intel_encoder, &reply, 3);
if (status != SDVO_CMD_STATUS_SUCCESS)
return;
@@ -1863,7 +1755,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
@@ -1873,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1902,12 +1795,12 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->is_tv)
+ if (IS_TV(sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (sdvo_priv->is_lvds == true)
+ else if (IS_LVDS(sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
static
void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct drm_device *dev = connector->dev;
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
if (sdvo_priv->left_property)
drm_property_destroy(dev, sdvo_priv->left_property);
if (sdvo_priv->right_property)
@@ -1937,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
drm_property_destroy(dev, sdvo_priv->hpos_property);
if (sdvo_priv->vpos_property)
drm_property_destroy(dev, sdvo_priv->vpos_property);
- }
- if (sdvo_priv->is_tv) {
if (sdvo_priv->saturation_property)
drm_property_destroy(dev,
sdvo_priv->saturation_property);
@@ -1948,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
if (sdvo_priv->hue_property)
drm_property_destroy(dev, sdvo_priv->hue_property);
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_priv->brightness_property)
drm_property_destroy(dev,
sdvo_priv->brightness_property);
@@ -1958,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(connector->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->tv_format_property)
+ if (sdvo_connector->tv_format_property)
drm_property_destroy(connector->dev,
- sdvo_priv->tv_format_property);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_destroy_enhance_property(connector);
+ sdvo_connector->tv_format_property);
+ intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
-
- kfree(intel_encoder);
+ kfree(connector);
}
static int
@@ -1990,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -2003,101 +1882,101 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret < 0)
goto out;
- if (property == sdvo_priv->tv_format_property) {
+ if (property == sdvo_connector->tv_format_property) {
if (val >= TV_FORMAT_NUM) {
ret = -EINVAL;
goto out;
}
if (sdvo_priv->tv_format_name ==
- sdvo_priv->tv_format_supported[val])
+ sdvo_connector->tv_format_supported[val])
goto out;
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
changed = true;
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
cmd = 0;
temp_value = val;
- if (sdvo_priv->left_property == property) {
+ if (sdvo_connector->left_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->right_property, val);
- if (sdvo_priv->left_margin == temp_value)
+ sdvo_connector->right_property, val);
+ if (sdvo_connector->left_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->right_property == property) {
+ } else if (sdvo_connector->right_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->left_property, val);
- if (sdvo_priv->right_margin == temp_value)
+ sdvo_connector->left_property, val);
+ if (sdvo_connector->right_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->top_property == property) {
+ } else if (sdvo_connector->top_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->bottom_property, val);
- if (sdvo_priv->top_margin == temp_value)
+ sdvo_connector->bottom_property, val);
+ if (sdvo_connector->top_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->bottom_property == property) {
+ } else if (sdvo_connector->bottom_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->top_property, val);
- if (sdvo_priv->bottom_margin == temp_value)
+ sdvo_connector->top_property, val);
+ if (sdvo_connector->bottom_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->hpos_property == property) {
- if (sdvo_priv->cur_hpos == temp_value)
+ } else if (sdvo_connector->hpos_property == property) {
+ if (sdvo_connector->cur_hpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_priv->cur_hpos = temp_value;
- } else if (sdvo_priv->vpos_property == property) {
- if (sdvo_priv->cur_vpos == temp_value)
+ sdvo_connector->cur_hpos = temp_value;
+ } else if (sdvo_connector->vpos_property == property) {
+ if (sdvo_connector->cur_vpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_priv->cur_vpos = temp_value;
- } else if (sdvo_priv->saturation_property == property) {
- if (sdvo_priv->cur_saturation == temp_value)
+ sdvo_connector->cur_vpos = temp_value;
+ } else if (sdvo_connector->saturation_property == property) {
+ if (sdvo_connector->cur_saturation == temp_value)
goto out;
cmd = SDVO_CMD_SET_SATURATION;
- sdvo_priv->cur_saturation = temp_value;
- } else if (sdvo_priv->contrast_property == property) {
- if (sdvo_priv->cur_contrast == temp_value)
+ sdvo_connector->cur_saturation = temp_value;
+ } else if (sdvo_connector->contrast_property == property) {
+ if (sdvo_connector->cur_contrast == temp_value)
goto out;
cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_priv->cur_contrast = temp_value;
- } else if (sdvo_priv->hue_property == property) {
- if (sdvo_priv->cur_hue == temp_value)
+ sdvo_connector->cur_contrast = temp_value;
+ } else if (sdvo_connector->hue_property == property) {
+ if (sdvo_connector->cur_hue == temp_value)
goto out;
cmd = SDVO_CMD_SET_HUE;
- sdvo_priv->cur_hue = temp_value;
- } else if (sdvo_priv->brightness_property == property) {
- if (sdvo_priv->cur_brightness == temp_value)
+ sdvo_connector->cur_hue = temp_value;
+ } else if (sdvo_connector->brightness_property == property) {
+ if (sdvo_connector->cur_brightness == temp_value)
goto out;
cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_priv->cur_brightness = temp_value;
+ sdvo_connector->cur_brightness = temp_value;
}
if (cmd) {
intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_sdvo_save,
- .restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2138,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ sdvo_priv->sdvo_lvds_fixed_mode);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2159,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
* outputs, then LVDS outputs.
*/
static void
-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo_priv *sdvo, u32 reg)
{
- uint16_t mask = 0;
- unsigned int num_bits;
-
- /* Make a mask of outputs less than or equal to our own priority in the
- * list.
- */
- switch (dev_priv->controlled_output) {
- case SDVO_OUTPUT_LVDS1:
- mask |= SDVO_OUTPUT_LVDS1;
- case SDVO_OUTPUT_LVDS0:
- mask |= SDVO_OUTPUT_LVDS0;
- case SDVO_OUTPUT_TMDS1:
- mask |= SDVO_OUTPUT_TMDS1;
- case SDVO_OUTPUT_TMDS0:
- mask |= SDVO_OUTPUT_TMDS0;
- case SDVO_OUTPUT_RGB1:
- mask |= SDVO_OUTPUT_RGB1;
- case SDVO_OUTPUT_RGB0:
- mask |= SDVO_OUTPUT_RGB0;
- break;
- }
+ struct sdvo_device_mapping *mapping;
- /* Count bits to find what number we are in the priority list. */
- mask &= dev_priv->caps.output_flags;
- num_bits = hweight16(mask);
- if (num_bits > 3) {
- /* if more than 3 outputs, default to DDC bus 3 for now */
- num_bits = 3;
- }
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
- /* Corresponds to SDVO_CONTROL_BUS_DDCx */
- dev_priv->ddc_bus = 1 << num_bits;
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
{
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
uint8_t status;
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ if (device == 0)
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+ else
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2086,13 @@ static struct intel_encoder *
intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder = NULL;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, head) {
- if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
- intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->ddc_bus == &chan->adapter)
break;
- }
}
return intel_encoder;
}
@@ -2259,7 +2129,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (sdvo_reg == SDVOB) {
+ if (IS_SDVOB(sdvo_reg)) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -2284,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (sdvo_reg == SDVOB)
+ if (IS_SDVOB(sdvo_reg))
return 0x70;
else
return 0x72;
}
-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
{
- DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
- return 1;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ *ret = kzalloc(sizeof(*intel_connector) +
+ sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!*ret)
+ return false;
+
+ intel_connector = *ret;
+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+ intel_connector->dev_priv = sdvo_connector;
+
+ return true;
}
-static struct dmi_system_id intel_sdvo_bad_tv[] = {
- {
- .callback = intel_sdvo_bad_tv_callback,
- .ident = "IntelG45/ICH10R/DME1737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
- },
- },
+static void
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+ connector->connector_type);
- { } /* terminating entry */
-};
+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ drm_sysfs_connector_add(connector);
+}
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_connector *connector = &intel_encoder->base;
struct drm_encoder *encoder = &intel_encoder->enc;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- bool ret = true, registered = false;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+ && sdvo_priv->is_hdmi) {
+ /* enable hdmi encoding mode if supported */
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
+ SDVO_COLORIMETRY_RGB256);
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ }
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->controlled_output |= type;
+ sdvo_connector->output_flag = type;
+
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ intel_sdvo_tv_create_property(connector, type);
+
+ intel_sdvo_create_enhance_property(connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->is_lvds = true;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_create_enhance_property(connector);
+ return true;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
sdvo_priv->is_tv = false;
intel_encoder->needs_tv_clock = false;
sdvo_priv->is_lvds = false;
- if (device_is_registered(&connector->kdev)) {
- drm_sysfs_connector_remove(connector);
- registered = true;
- }
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
- if (flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
- else
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-
- encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
- connector->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- if (intel_sdvo_get_supp_encode(intel_encoder,
- &sdvo_priv->encode) &&
- intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
- sdvo_priv->is_hdmi) {
- /* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_encoder,
- SDVO_COLORIMETRY_RGB256);
- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_encoder->clone_mask =
- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- }
- } else if ((flags & SDVO_OUTPUT_SVID0) &&
- !dmi_check_system(intel_sdvo_bad_tv)) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_RGB0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_RGB1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_CVBS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_LVDS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_LVDS1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else {
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_encoder, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_encoder, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_encoder, 0))
+ return false;
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_encoder, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
sdvo_priv->controlled_output = 0;
@@ -2405,28 +2392,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
- ret = false;
+ return false;
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- if (ret && registered)
- ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
-
-
- return ret;
-
+ return true;
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
uint8_t status;
- intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, type);
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2425,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
if (format_map == 0)
return;
- sdvo_priv->format_supported_num = 0;
+ sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
if (format_map & (1 << i)) {
- sdvo_priv->tv_format_supported
- [sdvo_priv->format_supported_num++] =
+ sdvo_connector->tv_format_supported
+ [sdvo_connector->format_supported_num++] =
tv_format_names[i];
}
- sdvo_priv->tv_format_property =
+ sdvo_connector->tv_format_property =
drm_property_create(
connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_priv->format_supported_num);
+ "mode", sdvo_connector->format_supported_num);
- for (i = 0; i < sdvo_priv->format_supported_num; i++)
+ for (i = 0; i < sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_priv->tv_format_property, i,
- i, sdvo_priv->tv_format_supported[i]);
+ sdvo_connector->tv_format_property, i,
+ i, sdvo_connector->tv_format_supported[i]);
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(
- connector, sdvo_priv->tv_format_property, 0);
+ connector, sdvo_connector->tv_format_property, 0);
}
static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct intel_sdvo_enhancements_reply sdvo_data;
struct drm_device *dev = connector->dev;
uint8_t status;
@@ -2488,7 +2474,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
DRM_DEBUG_KMS("No enhancement is supported\n");
return;
}
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
/* when horizontal overscan is supported, Add the left/right
* property
*/
@@ -2636,8 +2622,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
- }
- if (sdvo_priv->is_tv) {
if (sdvo_data.saturation) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2717,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_data.brightness) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2757,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
-
u8 ch[0x40];
int i;
+ u32 i2c_reg, ddc_reg, analog_ddc_reg;
intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_encoder) {
@@ -2791,11 +2774,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
intel_encoder->dev_priv = sdvo_priv;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ if (HAS_PCH_SPLIT(dev)) {
+ i2c_reg = PCH_GPIOE;
+ ddc_reg = PCH_GPIOE;
+ analog_ddc_reg = PCH_GPIOA;
+ } else {
+ i2c_reg = GPIOE;
+ ddc_reg = GPIOE;
+ analog_ddc_reg = GPIOA;
+ }
+
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB)
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ if (IS_SDVOB(sdvo_reg))
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
else
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
@@ -2809,20 +2802,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
for (i = 0; i < 0x40; i++) {
if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
}
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB) {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ if (IS_SDVOB(sdvo_reg)) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
@@ -2833,41 +2826,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* Wrap with our custom algo which switches to DDC mode */
intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+
/* In default case sdvo lvds is false */
intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
if (intel_sdvo_output_setup(intel_encoder,
sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
-
- connector = &intel_encoder->base;
- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
-
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
- drm_encoder_init(dev, &intel_encoder->enc,
- &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
-
- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
- if (sdvo_priv->is_tv)
- intel_sdvo_tv_create_property(connector);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_create_enhance_property(connector);
-
- drm_sysfs_connector_add(connector);
-
- intel_sdvo_select_ddc_bus(sdvo_priv);
+ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
intel_sdvo_set_target_input(intel_encoder, true, false);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d7d39b2327df..6d553c29d106 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-intel_tv_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- int i;
-
- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
-
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
-
- tv_priv->save_TV_DAC = I915_READ(TV_DAC);
- tv_priv->save_TV_CTL = I915_READ(TV_CTL);
-}
-
-static void
-intel_tv_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_crtc *intel_crtc;
- int i;
-
- /* FIXME: No CRTC? */
- if (!crtc)
- return;
-
- intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
-
- {
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
-
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
-
- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
-}
-
static const struct tv_mode *
intel_tv_mode_lookup (char *tv_format)
{
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
int dpms_mode;
int type = tv_priv->type;
@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ &mode, &dpms_mode);
if (crtc) {
type = intel_tv_detect_type(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder, connector,
+ dpms_mode);
} else
type = -1;
}
@@ -1525,7 +1392,8 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_tv_save,
- .restore = intel_tv_restore,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_tv_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 453df3f6053f..acd31ed861ef 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
- nv17_gpio.o nv50_gpio.o
+ nv17_gpio.o nv50_gpio.o \
+ nv50_calc.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index abc382a9918b..e7e69ccce5c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -26,6 +26,7 @@
#define NV_DEBUG_NOTRACE
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@@ -256,6 +257,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
struct init_tbl_entry {
char *name;
uint8_t id;
+ /* Return:
+ * > 0: success, length of opcode
+ * 0: success, but abort further parsing of table (INIT_DONE etc)
+ * < 0: failure, table parsing will be aborted
+ */
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
@@ -709,6 +715,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+ int recordoffset = 0, rdofs = 1, wrofs = 0;
+ uint8_t port_type = 0;
+
+ if (!i2ctable)
+ return -EINVAL;
+
+ if (dcb_version >= 0x30) {
+ if (i2ctable[0] != dcb_version) /* necessary? */
+ NV_WARN(dev,
+ "DCB I2C table version mismatch (%02X vs %02X)\n",
+ i2ctable[0], dcb_version);
+ dcb_i2c_ver = i2ctable[0];
+ headerlen = i2ctable[1];
+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+ i2c_entries = i2ctable[2];
+ else
+ NV_WARN(dev,
+ "DCB I2C table has more entries than indexable "
+ "(%d entries, max %d)\n", i2ctable[2],
+ DCB_MAX_NUM_I2C_ENTRIES);
+ entry_len = i2ctable[3];
+ /* [4] is i2c_default_indices, read in parse_dcb_table() */
+ }
+ /*
+ * It's your own fault if you call this function on a DCB 1.1 BIOS --
+ * the test below is for DCB 1.2
+ */
+ if (dcb_version < 0x14) {
+ recordoffset = 2;
+ rdofs = 0;
+ wrofs = 1;
+ }
+
+ if (index == 0xf)
+ return 0;
+ if (index >= i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+ index, i2ctable[2]);
+ return -ENOENT;
+ }
+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+ NV_ERROR(dev, "DCB I2C entry invalid\n");
+ return -EINVAL;
+ }
+
+ if (dcb_i2c_ver >= 0x30) {
+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+ /*
+ * Fixup for chips using same address offset for read and
+ * write.
+ */
+ if (port_type == 4) /* seen on C51 */
+ rdofs = wrofs = 1;
+ if (port_type >= 5) /* G80+ */
+ rdofs = wrofs = 0;
+ }
+
+ if (dcb_i2c_ver >= 0x40) {
+ if (port_type != 5 && port_type != 6)
+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+ }
+
+ i2c->port_type = port_type;
+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+ return 0;
+}
+
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
@@ -727,6 +810,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
}
if (i2c_index == 0x80) /* g80+ */
i2c_index = dcb->i2c_default_indices & 0xf;
+ else
+ if (i2c_index == 0x81)
+ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+
+ if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
+ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+ return NULL;
+ }
+
+ /* Make sure i2c table entry has been parsed, it may not
+ * have been if this is a bus not referenced by a DCB encoder
+ */
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@@ -818,7 +915,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -920,7 +1017,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1067,6 +1164,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
}
static int
+init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_DP_CONDITION opcode: 0x3A ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): "sub" opcode
+ * offset + 2 (8 bit): unknown
+ *
+ */
+
+ struct bit_displayport_encoder_table *dpe = NULL;
+ struct dcb_entry *dcb = bios->display.output;
+ struct drm_device *dev = bios->dev;
+ uint8_t cond = bios->data[offset + 1];
+ int dummy;
+
+ BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
+
+ if (!iexec->execute)
+ return 3;
+
+ dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
+ if (!dpe) {
+ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+ return -EINVAL;
+ }
+
+ switch (cond) {
+ case 0:
+ {
+ struct dcb_connector_table_entry *ent =
+ &bios->dcb.connector.entry[dcb->connector];
+
+ if (ent->type != DCB_CONNECTOR_eDP)
+ iexec->execute = false;
+ }
+ break;
+ case 1:
+ case 2:
+ if (!(dpe->unknown & cond))
+ iexec->execute = false;
+ break;
+ case 5:
+ {
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
+ if (ret)
+ return ret;
+
+ if (cond & 1)
+ iexec->execute = false;
+ }
+ break;
+ default:
+ NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
+ break;
+ }
+
+ if (iexec->execute)
+ BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
+ else
+ BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
+
+ return 3;
+}
+
+static int
+init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3B opcode: 0x3B ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
+ return 2;
+}
+
+static int
+init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3C opcode: 0x3C ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
+ return 2;
+}
+
+static int
init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1170,7 +1387,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1231,12 +1448,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 3;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 3;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1247,35 +1463,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+ uint8_t reg = bios->data[offset + 4 + i * 3];
uint8_t mask = bios->data[offset + 5 + i * 3];
uint8_t data = bios->data[offset + 6 + i * 3];
- uint8_t value;
+ union i2c_smbus_data val;
- msg.addr = i2c_address;
- msg.flags = I2C_M_RD;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, value, mask, data);
+ offset, reg, val.byte, mask, data);
- value = (value & mask) | data;
+ if (!bios->execute)
+ continue;
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ val.byte &= mask;
+ val.byte |= data;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1301,12 +1516,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 2;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 2;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1317,23 +1531,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
- uint8_t data = bios->data[offset + 5 + i * 2];
+ uint8_t reg = bios->data[offset + 4 + i * 2];
+ union i2c_smbus_data val;
+
+ val.byte = bios->data[offset + 5 + i * 2];
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, data);
-
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &data;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ offset, reg, val.byte);
+
+ if (!bios->execute)
+ continue;
+
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1357,7 +1573,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
int len = 4 + count;
struct nouveau_i2c_chan *chan;
@@ -1374,7 +1590,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1388,7 +1604,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = count;
msg.buf = data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ return -EIO;
}
return len;
@@ -1427,7 +1643,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1471,7 +1687,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1946,7 +2162,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2001,7 +2217,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2034,7 +2250,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2656,7 +2872,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
"been parsed?\n", offset);
- return 0;
+ return -EINVAL;
}
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2840,14 +3056,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2860,7 +3076,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return 0;
+ return ret;
}
data &= bios->data[offset + 0];
@@ -2869,7 +3085,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2899,14 +3115,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2917,7 +3133,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2934,6 +3150,9 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_COPY" , 0x37, init_copy },
{ "INIT_NOT" , 0x38, init_not },
{ "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
+ { "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
+ { "INIT_OP_3B" , 0x3B, init_op_3b },
+ { "INIT_OP_3C" , 0x3C, init_op_3c },
{ "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
{ "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
{ "INIT_PLL2" , 0x4B, init_pll2 },
@@ -3001,7 +3220,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
* is changed back to EXECUTE.
*/
- int count = 0, i, res;
+ int count = 0, i, ret;
uint8_t id;
/*
@@ -3016,26 +3235,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
;
- if (itbl_entry[i].name) {
- BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
- offset, itbl_entry[i].id, itbl_entry[i].name);
-
- /* execute eventual command handler */
- res = (*itbl_entry[i].handler)(bios, offset, iexec);
- if (!res)
- break;
- /*
- * Add the offset of the current command including all data
- * of that command. The offset will then be pointing on the
- * next op code.
- */
- offset += res;
- } else {
+ if (!itbl_entry[i].name) {
NV_ERROR(bios->dev,
"0x%04X: Init table command not found: "
"0x%02X\n", offset, id);
return -ENOENT;
}
+
+ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
+ itbl_entry[i].id, itbl_entry[i].name);
+
+ /* execute eventual command handler */
+ ret = (*itbl_entry[i].handler)(bios, offset, iexec);
+ if (ret < 0) {
+ NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
+ "table opcode: %s %d\n", offset,
+ itbl_entry[i].name, ret);
+ }
+
+ if (ret <= 0)
+ break;
+
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += ret;
}
if (offset >= bios->length)
@@ -4285,31 +4511,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
break;
}
-#if 0 /* for easy debugging */
- ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
- ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
- ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
- ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
-
- ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
- ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
- ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
- ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
-
- ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
- ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
- ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
- ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
- ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
- ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
- ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
- ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
-
- ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
- ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
-
- ErrorF("pll.refclk: %d\n", pll_lim->refclk);
-#endif
+ NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+ NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+ NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+ NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+ NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+ NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+ if (pll_lim->vco2.maxfreq) {
+ NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+ NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+ NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+ NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+ NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+ NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+ }
+ if (!pll_lim->max_p) {
+ NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
+ NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+ } else {
+ NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
+ NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
+ }
+ NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
return 0;
}
@@ -4953,79 +5180,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
- int recordoffset = 0, rdofs = 1, wrofs = 0;
- uint8_t port_type = 0;
-
- if (!i2ctable)
- return -EINVAL;
-
- if (dcb_version >= 0x30) {
- if (i2ctable[0] != dcb_version) /* necessary? */
- NV_WARN(dev,
- "DCB I2C table version mismatch (%02X vs %02X)\n",
- i2ctable[0], dcb_version);
- dcb_i2c_ver = i2ctable[0];
- headerlen = i2ctable[1];
- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
- i2c_entries = i2ctable[2];
- else
- NV_WARN(dev,
- "DCB I2C table has more entries than indexable "
- "(%d entries, max %d)\n", i2ctable[2],
- DCB_MAX_NUM_I2C_ENTRIES);
- entry_len = i2ctable[3];
- /* [4] is i2c_default_indices, read in parse_dcb_table() */
- }
- /*
- * It's your own fault if you call this function on a DCB 1.1 BIOS --
- * the test below is for DCB 1.2
- */
- if (dcb_version < 0x14) {
- recordoffset = 2;
- rdofs = 0;
- wrofs = 1;
- }
-
- if (index == 0xf)
- return 0;
- if (index >= i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
- index, i2ctable[2]);
- return -ENOENT;
- }
- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
- NV_ERROR(dev, "DCB I2C entry invalid\n");
- return -EINVAL;
- }
-
- if (dcb_i2c_ver >= 0x30) {
- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
- /*
- * Fixup for chips using same address offset for read and
- * write.
- */
- if (port_type == 4) /* seen on C51 */
- rdofs = wrofs = 1;
- if (port_type >= 5) /* G80+ */
- rdofs = wrofs = 0;
- }
-
- if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
- i2c->port_type = port_type;
- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
- return 0;
-}
-
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c0d7b0a3ece0..adf4ec2d06c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -35,6 +35,7 @@
#define DCB_LOC_ON_CHIP 0
struct dcb_i2c_entry {
+ uint32_t entry;
uint8_t port_type;
uint8_t read, write;
struct nouveau_i2c_chan *chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d17629840..6f3c19522377 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
- nvbo->channel = NULL;
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ nvbo->channel = NULL;
spin_lock(&dev_priv->ttm.bo_list_lock);
list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
-
- man->io_addr = NULL;
- man->io_offset = drm_get_resource_start(dev, 1);
- man->io_size = drm_get_resource_len(dev, 1);
- if (man->io_size > dev_priv->vram_size)
- man->io_size = dev_priv->vram_size;
-
man->gpu_offset = dev_priv->vm_vram_base;
break;
case TTM_PL_TT:
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
dev_priv->gart_info.type);
return -EINVAL;
}
-
- man->io_offset = dev_priv->gart_info.aper_base;
- man->io_size = dev_priv->gart_info.aper_size;
- man->io_addr = NULL;
man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict, bool no_wait,
+ struct nouveau_bo *nvbo, bool evict,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- int no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count);
}
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->gart_info.aper_base;
+ mem->bus.is_iomem = true;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = drm_get_resource_start(dev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.sync_obj_flush = nouveau_fence_flush,
.sync_obj_unref = nouveau_fence_unref,
.sync_obj_ref = nouveau_fence_ref,
+ .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+ .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+ .io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 14afe1e47e57..266b0ff441af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector)
if (nv_encoder && nv_connector->native_mode) {
unsigned status = connector_status_connected;
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI_BUTTON) || \
+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
if (!nouveau_ignorelid && !acpi_lid_open())
status = connector_status_unknown;
#endif
@@ -843,6 +844,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
@@ -854,6 +856,17 @@ nouveau_connector_create(struct drm_device *dev,
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ /* fall-through */
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886a0ce6..7933de4aff2e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
static int
nouveau_debugfs_channel_info(struct seq_file *m, void *data)
{
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cf1c5c0a0abe..74e6b4ed12c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,10 +34,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct drm_device *dev = drm_fb->dev;
-
- if (drm_fb->fbdev)
- nouveau_fbcon_remove(dev, drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.create_handle = nouveau_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
- struct drm_mode_fb_cmd *mode_cmd)
+int
+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
{
- struct nouveau_framebuffer *fb;
int ret;
- fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!fb)
- return NULL;
-
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
if (ret) {
- kfree(fb);
- return NULL;
+ return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
-
- fb->nvbo = nvbo;
- return &fb->base;
+ drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
+ nouveau_fb->nvbo = nvbo;
+ return 0;
}
static struct drm_framebuffer *
@@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_framebuffer *fb;
+ struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
+ int ret;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
return NULL;
- fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
- if (!fb) {
+ nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+ if (!nouveau_fb)
+ return NULL;
+
+ ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+ if (ret) {
drm_gem_object_unreference(gem);
return NULL;
}
- return fb;
+ return &nouveau_fb->base;
}
const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .fb_changed = nouveau_fbcon_probe,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de974acbc65..c6079e36669d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
return 0;
NV_INFO(dev, "Disabling fbcon acceleration...\n");
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 1);
+ nouveau_fbcon_set_suspend(dev, 1);
release_console_sem();
- dev_priv->fbdev_info->flags = fbdev_flags;
+ nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 0);
+ nouveau_fbcon_set_suspend(dev, 0);
release_console_sem();
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill_all(dev);
drm_helper_resume_force_mode(dev);
- dev_priv->fbdev_info->flags = fbdev_flags;
+
+ nouveau_fbcon_restore_accel(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ace630aa89e1..5b134438effe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -535,6 +535,7 @@ struct drm_nouveau_private {
struct fb_info *fbdev_info;
+ int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
struct nouveau_engine engine;
@@ -621,6 +622,9 @@ struct drm_nouveau_private {
struct {
struct dentry *channel_root;
} debugfs;
+
+ struct nouveau_fbdev *nfbdev;
+ struct apertures_struct *apertures;
};
static inline struct drm_nouveau_private *
@@ -1166,6 +1170,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+/* nv50_calc. */
+int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P);
+int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
+ int clk, int *N, int *fN, int *M, int *P);
+
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 9f28b94e479b..e1df8209cd0f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -48,6 +48,8 @@ struct nouveau_encoder {
union {
struct {
int mc_unknown;
+ uint32_t unk0;
+ uint32_t unk1;
int dpcd_version;
int link_nr;
int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31aa1949..d432134b71e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
- struct drm_mode_fb_cmd *);
-
+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8e7dc1d4912a..fd4a2df715e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,8 +52,8 @@
static int
nouveau_fbcon_sync(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv04_fbcon_fillrect,
.fb_copyarea = nv04_fbcon_copyarea,
.fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv50_fbcon_fillrect,
.fb_copyarea = nv50_fbcon_copyarea,
.fb_imageblit = nv50_fbcon_imageblit,
@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = nv_crtc->lut.b[regno];
}
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
- .gamma_set = nouveau_fbcon_gamma_set,
- .gamma_get = nouveau_fbcon_gamma_get
-};
-
-#if defined(__i386__) || defined(__x86_64__)
-static bool
-nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
-{
- struct pci_dev *pdev = dev->pdev;
- int ramin;
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
- screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return false;
-
- if (screen_info.lfb_base < pci_resource_start(pdev, 1))
- goto not_fb;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
- goto not_fb;
-
- return true;
-not_fb:
- ramin = 2;
- if (pci_resource_len(pdev, ramin) == 0) {
- ramin = 3;
- if (pci_resource_len(pdev, ramin) == 0)
- return false;
- }
-
- if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
- return false;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
- return false;
-
- return true;
-}
-#endif
-
-void
-nouveau_fbcon_zfill(struct drm_device *dev)
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct fb_info *info = dev_priv->fbdev_info;
+ struct fb_info *info = nfbdev->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev)
}
static int
-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height, uint32_t surface_depth,
- uint32_t surface_bpp, struct drm_framebuffer **pfb)
+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info;
- struct nouveau_fbcon_par *par;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev->pdev;
+ struct device *device = &pdev->dev;
int size, ret;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
- if (!fb) {
+ info = framebuffer_alloc(0, device);
+ if (!info) {
ret = -ENOMEM;
- NV_ERROR(dev, "failed to allocate fb.\n");
goto out_unref;
}
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- nouveau_fb = nouveau_framebuffer(fb);
- *pfb = fb;
-
- info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
- if (!info) {
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
ret = -ENOMEM;
goto out_unref;
}
- par = info->par;
- par->helper.funcs = &nouveau_fbcon_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
- if (ret)
- goto out_unref;
- dev_priv->fbdev_info = info;
+ info->par = nfbdev;
+
+ nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+
+ nouveau_fb = &nfbdev->nouveau_fb;
+ fb = &nouveau_fb->base;
+
+ /* setup helper */
+ nfbdev->helper.fb = fb;
+ nfbdev->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
- info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+ info->fix.mmio_start = pci_resource_start(pdev, 1);
+ info->fix.mmio_len = pci_resource_len(pdev, 1);
/* Set aperture base/size for vesafb takeover */
-#if defined(__i386__) || defined(__x86_64__)
- if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
- /* Some NVIDIA VBIOS' are stupid and decide to put the
- * framebuffer in the middle of the PRAMIN BAR for
- * whatever reason. We need to know the exact lfb_base
- * to get vesafb kicked off, and the only reliable way
- * we have left is to find out lfb_base the same way
- * vesafb did.
- */
- info->aperture_base = screen_info.lfb_base;
- info->aperture_size = screen_info.lfb_size;
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
- info->aperture_size *= 65536;
- } else
-#endif
- {
- info->aperture_base = info->fix.mmio_start;
- info->aperture_size = info->fix.mmio_len;
+ info->apertures = dev_priv->apertures;
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
}
info->pixmap.size = 64*1024;
@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->nouveau_fb = nouveau_fb;
- par->dev = dev;
-
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
};
}
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill(dev, nfbdev);
/* To allow resizeing without swapping buffers */
NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +309,123 @@ out:
return ret;
}
-int
-nouveau_fbcon_probe(struct drm_device *dev)
+static int
+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = nouveau_fbcon_create(nfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
- return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
int
-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+ struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
struct fb_info *info;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
- if (info) {
- struct nouveau_fbcon_par *par = info->par;
-
+ if (nfbdev->helper.fbdev) {
+ info = nfbdev->helper.fbdev;
unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
- if (par)
- drm_fb_helper_free(&par->helper);
- framebuffer_release(info);
}
-
+ drm_fb_helper_fini(&nfbdev->helper);
+ drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .gamma_set = nouveau_fbcon_gamma_set,
+ .gamma_get = nouveau_fbcon_gamma_get,
+ .fb_probe = nouveau_fbcon_find_or_create_single,
+};
+
+
+int nouveau_fbcon_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *nfbdev;
+
+ nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+ if (!nfbdev)
+ return -ENOMEM;
+
+ nfbdev->dev = dev;
+ dev_priv->nfbdev = nfbdev;
+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+ drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+ drm_fb_helper_initial_config(&nfbdev->helper, 32);
+ return 0;
+}
+
+void nouveau_fbcon_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->nfbdev)
+ return;
+
+ nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
+ kfree(dev_priv->nfbdev);
+ dev_priv->nfbdev = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
+ dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1a8c11..e7e12684c37e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
#include "drm_fb_helper.h"
-struct nouveau_fbcon_par {
+#include "nouveau_fb.h"
+struct nouveau_fbdev {
struct drm_fb_helper helper;
+ struct nouveau_framebuffer nouveau_fb;
+ struct list_head fbdev_list;
struct drm_device *dev;
- struct nouveau_framebuffer *nouveau_fb;
+ unsigned int saved_flags;
};
-int nouveau_fbcon_probe(struct drm_device *dev);
-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
-void nouveau_fbcon_zfill(struct drm_device *dev);
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1bc0b38a5167..69c76cf93407 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
ttm_bo_unref(&bo);
+
+ drm_gem_object_release(gem);
+ kfree(gem);
}
int
@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false);
+ false, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 32f0e495464c..f731c5f60536 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxprog) {
NV_ERROR(dev, "OOM copying ctxprog\n");
release_firmware(fw);
return -ENOMEM;
}
- memcpy(pgraph->ctxprog, fw->data, fw->size);
cp = pgraph->ctxprog;
if (le32_to_cpu(cp->signature) != 0x5043564e ||
@@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxvals) {
NV_ERROR(dev, "OOM copying ctxvals\n");
release_firmware(fw);
nouveau_grctx_fini(dev);
return -ENOMEM;
}
- memcpy(pgraph->ctxvals, fw->data, fw->size);
cv = (void *)pgraph->ctxvals;
if (le32_to_cpu(cv->signature) != 0x5643564e ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 88583e7bf651..316a3c7e6eb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,27 @@ struct nouveau_i2c_chan *
nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
+ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (!bios->dcb.i2c[index].chan) {
- if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
- return NULL;
+ if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+ uint32_t reg = 0xe500, val;
+
+ if (i2c->port_type == 6) {
+ reg += i2c->read * 0x50;
+ val = 0x2002;
+ } else {
+ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+ val = 0xe001;
+ }
+
+ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
}
- return bios->dcb.i2c[index].chan;
+ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+ return NULL;
+ return i2c->chan;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 13e73cee4c44..53360f156063 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status, fbdev_flags = 0;
+ uint32_t status;
unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- if (dev_priv->fbdev_info) {
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
- }
-
if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (status)
NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
- if (dev_priv->fbdev_info)
- dev_priv->fbdev_info->flags = fbdev_flags;
-
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index aa9b310e41be..6ca80a3fe70d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,6 +826,7 @@
#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e1710640a278..e632339c323e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -34,6 +34,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -515,8 +516,10 @@ nouveau_card_init(struct drm_device *dev)
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_helper_initial_config(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
+ }
return 0;
@@ -563,6 +566,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+
nouveau_backlight_exit(dev);
if (dev_priv->channel) {
@@ -637,6 +641,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
#endif
}
+static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct apertures_struct *aper = alloc_apertures(3);
+ if (!aper)
+ return NULL;
+
+ aper->ranges[0].base = pci_resource_start(pdev, 1);
+ aper->ranges[0].size = pci_resource_len(pdev, 1);
+ aper->count = 1;
+
+ if (pci_resource_len(pdev, 2)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+ aper->count++;
+ }
+
+ if (pci_resource_len(pdev, 3)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+ aper->count++;
+ }
+
+ return aper;
+}
+
+static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool primary = false;
+ dev_priv->apertures = nouveau_get_apertures(dev);
+ if (!dev_priv->apertures)
+ return -ENOMEM;
+
+#ifdef CONFIG_X86
+ primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+
+ remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
+ return 0;
+}
+
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
@@ -724,6 +770,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ int ret = nouveau_remove_conflicting_drivers(dev);
+ if (ret)
+ return ret;
+ }
+
/* map larger RAMIN aperture on NV40 cards */
dev_priv->ramin = NULL;
if (dev_priv->card_type >= NV_40) {
@@ -794,6 +846,8 @@ int nouveau_unload(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
if (dev_priv->card_type >= NV_50)
nv50_display_destroy(dev);
else
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 813b25cec726..1eeac4fae73d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t fg;
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
int
nv04_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
const int sub = NvSubCtxSurf2D;
@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
0x009f : 0x005f, NvImageBlit);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index e260986ea65a..618355e9cdd5 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
return 0;
}
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bits 20-22: dither mode
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ * - bit 26: surface_src/surface_zeta valid
+ * - bit 27: pattern valid
+ * - bit 28: rop valid
+ * - bit 29: beta1 valid
+ * - bit 30: beta4 valid
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
uint32_t tmp;
tmp = nv_ri32(dev, instance);
- tmp &= ~0x00038000;
- tmp |= ((data & 7) << 15);
+ tmp &= ~mask;
+ tmp |= value;
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ uint32_t tmp, ctx1;
+ int class, op, valid = 1;
+
+ ctx1 = nv_ri32(dev, instance);
+ class = ctx1 & 0xff;
+ op = (ctx1 >> 15) & 7;
+ tmp = nv_ri32(dev, instance + 0xc);
+ tmp &= ~mask;
+ tmp |= value;
+ nv_wi32(dev, instance + 0xc, tmp);
+
+ /* check for valid surf2d/surf_dst/surf_color */
+ if (!(tmp & 0x02000000))
+ valid = 0;
+ /* check for valid surf_src/surf_zeta */
+ if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+ valid = 0;
+
+ switch (op) {
+ /* SRCCOPY_AND, SRCCOPY: no extra objects required */
+ case 0:
+ case 3:
+ break;
+ /* ROP_AND: requires pattern and rop */
+ case 1:
+ if (!(tmp & 0x18000000))
+ valid = 0;
+ break;
+ /* BLEND_AND: requires beta1 */
+ case 2:
+ if (!(tmp & 0x20000000))
+ valid = 0;
+ break;
+ /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+ case 4:
+ case 5:
+ if (!(tmp & 0x40000000))
+ valid = 0;
+ break;
+ }
+
+ nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ if (data > 5)
+ return 1;
+ /* Old versions of the objects only accept first three operations. */
+ if (data > 2 && grclass < 0x40)
+ return 1;
+ nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+ /* changing operation changes set of objects needed for validation */
+ nv04_graph_set_ctx_val(chan, 0, 0);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x40053c, min);
+ nv_wr32(chan->dev, 0x400544, max);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x400540, min);
+ nv_wr32(chan->dev, 0x400548, max);
return 0;
}
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ case 0x52:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x18:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x44:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+ return 0;
+ case 0x43:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+ return 0;
+ case 0x12:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+ return 0;
+ case 0x72:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x58:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x59:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x5a:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x5b:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x2000, 0);
+ return 0;
+ case 0x19:
+ nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x1000, 0);
+ return 0;
+ /* Yes, for some reason even the old versions of objects
+ * accept 0x57 and not 0x17. Consistency be damned.
+ */
+ case 0x57:
+ nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+ return 0;
+ }
+ return 1;
+}
+
static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
+ { 0x0184, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
+ { 0x0188, nv04_graph_mthd_bind_chroma },
+ { 0x018c, nv04_graph_mthd_bind_clip },
+ { 0x0190, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
+ { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ {},
+};
+
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0039, false, NULL },
- { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
- { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
- { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
- { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+ { 0x0038, false, NULL }, /* dvd subpicture */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
+ { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
+ { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
+ { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
+ { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
+ { 0x0064, false, NULL }, /* nv05 iifc */
+ { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
+ { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
+ { 0x0065, false, NULL }, /* nv05 ifc */
+ { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
+ { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
+ { 0x0066, false, NULL }, /* nv05 sifc */
+ { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
+ { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
{ 0x0030, false, NULL }, /* null */
{ 0x0042, false, NULL }, /* surf2d */
{ 0x0043, false, NULL }, /* rop */
{ 0x0012, false, NULL }, /* beta1 */
{ 0x0072, false, NULL }, /* beta4 */
{ 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
+ { 0x0018, false, NULL }, /* nv01 pattern */
+ { 0x0044, false, NULL }, /* nv04 pattern */
{ 0x0052, false, NULL }, /* swzsurf */
- { 0x0053, false, NULL }, /* surf3d */
+ { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
+ { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
+ { 0x0017, false, NULL }, /* nv01 chroma */
+ { 0x0057, false, NULL }, /* nv04 chroma */
+ { 0x0058, false, NULL }, /* surf_dst */
+ { 0x0059, false, NULL }, /* surf_src */
+ { 0x005a, false, NULL }, /* surf_color */
+ { 0x005b, false, NULL }, /* surf_zeta */
+ { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
+ { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
+ { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
+ { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
+ { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
+ { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
{ 0x506e, true, nv04_graph_mthds_sw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96e4b67..704a25d04ac9 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
if (!dev_priv->engine.graph.ctxprog) {
struct nouveau_grctx ctx = {};
- uint32_t cp[256];
+ uint32_t *cp;
+
+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
ctx.dev = dev;
ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+
+ kfree(cp);
}
/* No context present currently */
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 11b11c31f543..9b5c97469588 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -115,11 +115,6 @@
/* TODO:
* - get vs count from 0x1540
- * - document unimplemented bits compared to nvidia
- * - nsource handling
- * - R0 & 0x0200 handling
- * - single-vs handling
- * - 400314 bit 0
*/
static int
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
new file mode 100644
index 000000000000..2cdc2bfe7179
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm_fixed.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+int
+nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P)
+{
+ struct nouveau_pll_vals pll_vals;
+ int ret;
+
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
+ if (ret <= 0)
+ return ret;
+
+ *N1 = pll_vals.N1;
+ *M1 = pll_vals.M1;
+ *N2 = pll_vals.N2;
+ *M2 = pll_vals.M2;
+ *P = pll_vals.log2P;
+ return ret;
+}
+
+int
+nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N, int *fN, int *M, int *P)
+{
+ fixed20_12 fb_div, a, b;
+
+ *P = pll->vco1.maxfreq / clk;
+ if (*P > pll->max_p)
+ *P = pll->max_p;
+ if (*P < pll->min_p)
+ *P = pll->min_p;
+
+ /* *M = ceil(refclk / pll->vco.max_inputfreq); */
+ a.full = dfixed_const(pll->refclk);
+ b.full = dfixed_const(pll->vco1.max_inputfreq);
+ a.full = dfixed_div(a, b);
+ a.full = dfixed_ceil(a);
+ *M = dfixed_trunc(a);
+
+ /* fb_div = (vco * *M) / refclk; */
+ fb_div.full = dfixed_const(clk * *P);
+ fb_div.full = dfixed_mul(fb_div, a);
+ a.full = dfixed_const(pll->refclk);
+ fb_div.full = dfixed_div(fb_div, a);
+
+ /* *N = floor(fb_div); */
+ a.full = dfixed_floor(fb_div);
+ *N = dfixed_trunc(fb_div);
+
+ /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
+ b.full = dfixed_const(8192);
+ a.full = dfixed_mul(a, b);
+ fb_div.full = dfixed_mul(fb_div, b);
+ fb_div.full = fb_div.full - a.full;
+ *fN = dfixed_trunc(fb_div) - 4096;
+ *fN &= 0xffff;
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index cfabeb974a56..b4e4a3b05eae 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
int
nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
- uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
- struct nouveau_pll_vals pll;
- struct pll_lims limits;
+ uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct pll_lims pll;
uint32_t reg1, reg2;
- int ret;
+ int ret, N1, M1, N2, M2, P;
- ret = get_pll_limits(dev, pll_reg, &limits);
+ ret = get_pll_limits(dev, reg, &pll);
if (ret)
return ret;
- ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
- if (ret <= 0)
- return ret;
+ if (pll.vco2.maxfreq) {
+ ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
+ pclk, ret, N1, M1, N2, M2, P);
- if (limits.vco2.maxfreq) {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
- reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
- nv_wr32(dev, pll_reg, 0x10000611);
- nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
- nv_wr32(dev, pll_reg + 8,
- reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+ reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
+ nv_wr32(dev, reg, 0x10000611);
+ nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
+ nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
} else {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
- nv_wr32(dev, pll_reg, 0x50000610);
- nv_wr32(dev, pll_reg + 4, reg1 |
- (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+ pclk, ret, N1, N2, M1, P);
+
+ reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
+ nv_wr32(dev, reg, 0x50000610);
+ nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+ nv_wr32(dev, reg + 8, N2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 649db4c1b690..580a5d10be93 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
#include "drm_crtc_helper.h"
static void
@@ -783,6 +784,37 @@ ack:
}
static void
+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
+{
+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct drm_encoder *encoder;
+ uint32_t tmp, unk0 = 0, unk1 = 0;
+
+ if (dcb->type != OUTPUT_DP)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb == dcb) {
+ unk0 = nv_encoder->dp.unk0;
+ unk1 = nv_encoder->dp.unk1;
+ break;
+ }
+ }
+
+ if (unk0 || unk1) {
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= 0xfffffe03;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ tmp &= 0xfef080c0;
+ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
+ }
+}
+
+static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct dcb_entry *dcbent;
@@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+ nv50_display_unk20_dp_hack(dev, dcbent);
+
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
tmp &= ~0x000000f;
nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+
+ drm_helper_hpd_irq_event(dev);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a8c70e7e9184..6bf025c6fc6f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int
nv50_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_gpuobj *eng2d = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 0c68698f23df..b11eaf9c5c7c 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -321,18 +321,23 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_clones = 0;
if (nv_encoder->dcb->type == OUTPUT_DP) {
- uint32_t mc, or = nv_encoder->or;
+ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
+ uint32_t tmp;
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
else
- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
- switch ((mc & 0x00000f00) >> 8) {
+ switch ((tmp & 0x00000f00) >> 8) {
case 8:
case 9:
- nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
+ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ nv_encoder->dp.unk0 = tmp & 0x000001fc;
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
break;
default:
break;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 27e2c715be11..1bc72c3190a9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3780,7 +3780,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
UCHAR ucReserved[2];
}ATOM_ASIC_SS_ASSIGNMENT;
-//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+} ATOM_PPLIB_EXTENDEDHEADER;
+
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
} ATOM_PPLIB_POWERPLAYTABLE;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-// remaining 3 bits are reserved
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
@@ -5895,7 +5967,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a87990b3ae84..f3f2827017ef 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
@@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -705,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
+ default:
pll = &rdev->clock.dcpll;
break;
}
@@ -1160,6 +1163,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 28b31c64f48d..abffb1499e22 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -351,7 +351,7 @@ retry:
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
- args.v2.ucHPD_ID = chan->rec.hpd_id;
+ args.v2.ucHPD_ID = chan->rec.hpd;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8f447e20507..8c8e4d3cbaa3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -28,39 +28,235 @@
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
-#include "rv770d.h"
+#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
- /* XXX */
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
return connected;
}
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
- /* XXX */
+ u32 tmp;
+ bool connected = evergreen_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
}
void evergreen_hpd_init(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ if (rdev->irq.installed)
+ evergreen_irq_set(rdev);
}
-
-void evergreen_bandwidth_update(struct radeon_device *rdev)
+void evergreen_hpd_fini(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
}
-void evergreen_hpd_fini(struct radeon_device *rdev)
+void evergreen_bandwidth_update(struct radeon_device *rdev)
{
/* XXX */
}
@@ -83,10 +279,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
/*
* GART
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+ if (tmp == 2) {
+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+ return;
+ }
+ if (tmp) {
+ return;
+ }
+ udelay(1);
+ }
+}
+
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
- int r, i;
+ int r;
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +338,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- for (i = 1; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
- r600_pcie_gart_tlb_flush(rdev);
+ evergreen_pcie_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
}
@@ -132,11 +348,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i, r;
+ int r;
/* Disable all tables */
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +389,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +408,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
}
static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +615,656 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
-#if 0
/*
* CP.
*/
-static void evergreen_cp_stop(struct radeon_device *rdev)
-{
- /* XXX */
-}
-
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
- /* XXX */
+ const __be32 *fw_data;
+ int i;
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ WREG32(CP_RB_WPTR, 0);
+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+ r600_cp_start(rdev);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ return r;
+ }
+ return 0;
+}
/*
* Core functions
*/
-static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 cur_pipe;
+ u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ if (num_tile_pipes > EVERGREEN_MAX_PIPES)
+ num_tile_pipes = EVERGREEN_MAX_PIPES;
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_backends > EVERGREEN_MAX_BACKENDS)
+ num_backends = EVERGREEN_MAX_BACKENDS;
+ if (num_backends < 1)
+ num_backends = 1;
+
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((backend_disable_mask >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends)
+ break;
+ }
+
+ if (enabled_backends_count == 0) {
+ enabled_backends_mask = 1;
+ enabled_backends_count = 1;
+ }
+
+ if (enabled_backends_count != num_backends)
+ num_backends = enabled_backends_count;
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ force_no_swizzle = false;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+ }
return backend_map;
}
-#endif
static void evergreen_gpu_init(struct radeon_device *rdev)
{
- /* XXX */
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 grbm_gfx_index;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 sq_config;
+ u32 sq_lds_resource_mgmt;
+ u32 sq_gpr_resource_mgmt_1;
+ u32 sq_gpr_resource_mgmt_2;
+ u32 sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt;
+ u32 sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1;
+ u32 sq_stack_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_3;
+ u32 vgt_cache_invalidation;
+ u32 hdp_host_path_cntl;
+ int i, j, num_shader_engines, ps_thread_count;
+
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_JUNIPER:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_REDWOOD:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CEDAR:
+ default:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
+
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
+ & EVERGREEN_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
+ & EVERGREEN_MAX_SIMDS_MASK);
+
+ cc_rb_backend_disable =
+ BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
+ & EVERGREEN_MAX_BACKENDS_MASK);
+
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
+ gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+
+ if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
+ gb_addr_config |= ROW_SIZE(2);
+ else
+ gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
+
+ if (rdev->ddev->pdev->device == 0x689e) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_131_124;
+
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
+
+ switch(efuse_box_bit_131_124) {
+ case 0x00:
+ gb_backend_map = 0x76543210;
+ break;
+ case 0x55:
+ gb_backend_map = 0x77553311;
+ break;
+ case 0x56:
+ gb_backend_map = 0x77553300;
+ break;
+ case 0x59:
+ gb_backend_map = 0x77552211;
+ break;
+ case 0x66:
+ gb_backend_map = 0x77443300;
+ break;
+ case 0x99:
+ gb_backend_map = 0x66552211;
+ break;
+ case 0x5a:
+ gb_backend_map = 0x77552200;
+ break;
+ case 0xaa:
+ gb_backend_map = 0x66442200;
+ break;
+ case 0x95:
+ gb_backend_map = 0x66553311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else if (rdev->ddev->pdev->device == 0x68b9) {
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_127_124;
+
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+
+ switch(efuse_box_bit_127_124) {
+ case 0x0:
+ gb_backend_map = 0x00003210;
+ break;
+ case 0x5:
+ case 0x6:
+ case 0x9:
+ case 0xa:
+ gb_backend_map = 0x00003311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+ grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+ for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+ u32 rb = cc_rb_backend_disable | (0xf0 << 16);
+ u32 sp = cc_gc_shader_pipe_config;
+ u32 gfx = grbm_gfx_index | SE_INDEX(i);
+
+ if (i == num_shader_engines) {
+ rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
+ sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ }
+
+ WREG32(GRBM_GFX_INDEX, gfx);
+ WREG32(RLC_GFX_INDEX, gfx);
+
+ WREG32(CC_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+ WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+ }
+
+ grbm_gfx_index |= SE_BROADCAST_WRITES;
+ WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+ WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+
+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(SPI_CONFIG_CNTL, 0);
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (VC_ENABLE |
+ EXPORT_SRC_C |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ if (rdev->family == CHIP_CEDAR)
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+
+ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+ if (rdev->family == CHIP_CEDAR)
+ ps_thread_count = 96;
+ else
+ ps_thread_count = 128;
+
+ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+ sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+
+ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+ WREG32(SQ_CONFIG, sq_config);
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ if (rdev->family == CHIP_CEDAR)
+ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+ else
+ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+
}
int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1307,627 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int evergreen_gpu_reset(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
/* FIXME: implement for evergreen */
+ return false;
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 srbm_reset = 0;
+ u32 grbm_reset = 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+
+ /* reset all the system blocks */
+ srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
+
+ dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+ WREG32(SRBM_SOFT_RESET, srbm_reset);
+ (void)RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ (void)RREG32(SRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ /* After reset we need to reinit the asic as GPU often endup in an
+ * incoherent state.
+ */
+ atom_asic_init(rdev->mode_info.atom_context);
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+ return evergreen_gpu_soft_reset(rdev);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ switch (crtc) {
+ case 0:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ case 1:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ case 2:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ case 3:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ case 4:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ case 5:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ default:
+ return 0;
+ }
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 grbm_int_cntl = 0;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ evergreen_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("evergreen_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+ crtc1 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+ crtc2 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[2]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+ crtc3 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[3]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+ crtc4 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[4]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+ crtc5 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[5]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+ crtc6 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
return 0;
}
+static inline void evergreen_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2,
+ u32 *disp_int_cont3,
+ u32 *disp_int_cont4,
+ u32 *disp_int_cont5)
+{
+ u32 tmp;
+
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+}
+
+void evergreen_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_disable_interrupt_state(rdev);
+}
+
+static void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+ evergreen_irq_disable(rdev);
+ r600_rlc_stop(rdev);
+}
+
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = evergreen_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 ring_index;
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D2 vline */
+ if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 2);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+ }
+ break;
+ case 1: /* D3 vline */
+ if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 3);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+ }
+ break;
+ case 1: /* D4 vline */
+ if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 4);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+ }
+ break;
+ case 1: /* D5 vline */
+ if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 5);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+ }
+ break;
+ case 1: /* D6 vline */
+ if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int_cont & DC_HPD2_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 2:
+ if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 3:
+ if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
-#if 0
int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1937,15 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
}
-#endif
+
evergreen_mc_program(rdev);
-#if 0
if (rdev->flags & RADEON_IS_AGP) {
- evergreem_agp_enable(rdev);
+ evergreen_agp_enable(rdev);
} else {
r = evergreen_pcie_gart_enable(rdev);
if (r)
return r;
}
-#endif
evergreen_gpu_init(rdev);
#if 0
if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1966,7 @@ static int evergreen_startup(struct radeon_device *rdev)
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+#endif
/* Enable IRQ */
r = r600_irq_init(rdev);
@@ -544,7 +1975,7 @@ static int evergreen_startup(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
return r;
}
- r600_irq_set(rdev);
+ evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
@@ -552,12 +1983,12 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
- r = r600_cp_resume(rdev);
+ r = evergreen_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
-#endif
+
return 0;
}
@@ -582,13 +2013,13 @@ int evergreen_resume(struct radeon_device *rdev)
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
-#if 0
+
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
-#endif
+
return r;
}
@@ -597,12 +2028,14 @@ int evergreen_suspend(struct radeon_device *rdev)
{
#if 0
int r;
-
+#endif
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
r600_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
+#if 0
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
@@ -682,8 +2115,6 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -702,7 +2133,7 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_bo_init(rdev);
if (r)
return r;
-#if 0
+
r = radeon_irq_kms_init(rdev);
if (r)
return r;
@@ -716,14 +2147,16 @@ int evergreen_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
return r;
-#endif
+
rdev->accel_working = false;
r = evergreen_startup(rdev);
if (r) {
- evergreen_suspend(rdev);
- /*r600_wb_fini(rdev);*/
- /*radeon_ring_fini(rdev);*/
- /*evergreen_pcie_gart_fini(rdev);*/
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
@@ -743,16 +2176,12 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
- evergreen_suspend(rdev);
-#if 0
- r600_blit_fini(rdev);
+ /*r600_blit_fini(rdev);*/
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
evergreen_pcie_gart_fini(rdev);
-#endif
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f7c7c9643433..af86af836f13 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -164,8 +164,12 @@
#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_STATUS 0x6e8c
+#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 000000000000..93e9e17ad54a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS 256
+#define EVERGREEN_MAX_TEMP_GPRS 16
+#define EVERGREEN_MAX_SH_THREADS 256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
+#define EVERGREEN_MAX_FRC_EOV_CNT 16384
+#define EVERGREEN_MAX_BACKENDS 8
+#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
+#define EVERGREEN_MAX_SIMDS 16
+#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
+#define EVERGREEN_MAX_PIPES 8
+#define EVERGREEN_MAX_PIPES_MASK 0xFF
+#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+
+/* Registers */
+
+#define RCU_IND_INDEX 0x100
+#define RCU_IND_DATA 0x104
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+#define RLC_GFX_INDEX 0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define WRITE_DIS (1 << 0)
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define NUM_GPUS(x) ((x) << 20)
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define ROW_SIZE(x) ((x) << 28)
+#define GB_BACKEND_MAP 0x98FC
+#define DMIF_ADDR_CONFIG 0xBD4
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define CGTS_USER_TCC_DISABLE 0x914C
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define STQ_SPLIT(x) ((x) << 0)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_DEBUG 0xC1FC
+
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VC (1 << 13)
+#define SOFT_RESET_VGT (1 << 14)
+
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define SRBM_RQ_PENDING (1 << 5)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_AA_CONFIG 0x28C04
+#define PA_SC_CLIPRECT_RULE 0x2820C
+#define PA_SC_EDGERULE 0x28230
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define CS_PRIO(x) ((x) << 18)
+#define LS_PRIO(x) ((x) << 20)
+#define HS_PRIO(x) ((x) << 22)
+#define PS_PRIO(x) ((x) << 24)
+#define VS_PRIO(x) ((x) << 26)
+#define GS_PRIO(x) ((x) << 28)
+#define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
+#define NUM_GS_GPRS(x) ((x) << 0)
+#define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
+#define NUM_HS_GPRS(x) ((x) << 0)
+#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT 0x8C18
+#define NUM_PS_THREADS(x) ((x) << 0)
+#define NUM_VS_THREADS(x) ((x) << 8)
+#define NUM_GS_THREADS(x) ((x) << 16)
+#define NUM_ES_THREADS(x) ((x) << 24)
+#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
+#define NUM_HS_THREADS(x) ((x) << 0)
+#define NUM_LS_THREADS(x) ((x) << 8)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
+#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_LDS_RESOURCE_MGMT 0x8E2C
+
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MISC 0x28350
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+
+#define WAIT_UNTIL 0x8040
+
+#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x6bb8
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x6bbc
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x6b40
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x60f4
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DACB_AUTODETECT_INT_CONTROL 0x67c8
+
+#define DC_HPD1_INT_STATUS 0x601c
+#define DC_HPD2_INT_STATUS 0x6028
+#define DC_HPD3_INT_STATUS 0x6034
+#define DC_HPD4_INT_STATUS 0x6040
+#define DC_HPD5_INT_STATUS 0x604c
+#define DC_HPD6_INT_STATUS 0x6058
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x6020
+#define DC_HPD2_INT_CONTROL 0x602c
+#define DC_HPD3_INT_CONTROL 0x6038
+#define DC_HPD4_INT_CONTROL 0x6044
+#define DC_HPD5_INT_CONTROL 0x6050
+#define DC_HPD6_INT_CONTROL 0x605c
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x6024
+#define DC_HPD2_CONTROL 0x6030
+#define DC_HPD3_CONTROL 0x603c
+#define DC_HPD4_CONTROL 0x6048
+#define DC_HPD5_CONTROL 0x6054
+#define DC_HPD6_CONTROL 0x6060
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf60c0b3ef15..cc004b05d63e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -37,6 +37,7 @@
#include "rs100d.h"
#include "rv200d.h"
#include "rv250d.h"
+#include "atom.h"
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == 0) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ /* only one clock mode per power state */
+ rdev->pm.requested_clock_mode_index = 0;
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+void r100_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ sclk_cntl = RREG32_PLL(SCLK_CNTL);
+ sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+ sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+ sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+ if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+ else
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+ else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+ } else
+ sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+ switch (voltage->delay) {
+ case 33:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+ break;
+ case 66:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+ break;
+ case 99:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+ break;
+ case 132:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+ break;
+ }
+ } else
+ sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ sclk_cntl &= ~FORCE_HDP;
+ else
+ sclk_cntl |= FORCE_HDP;
+
+ WREG32_PLL(SCLK_CNTL, sclk_cntl);
+ WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+ WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+void r100_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+ return false;
+ else
+ return true;
+}
+
/* hpd for digital panel detect/disconnect */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
+ if (rdev->irq.gui_idle) {
+ tmp |= RADEON_GUI_IDLE_MASK;
+ }
if (rdev->irq.crtc_vblank_int[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
@@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= RADEON_GUI_IDLE_STAT;
+ }
+
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
@@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev)
uint32_t status, msi_rearm;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
@@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev);
}
+ /* gui idle interrupt */
+ if (status & RADEON_GUI_IDLE_STAT) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
@@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
- /* Reset CP */
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
- }
- } else {
- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
- }
-
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
@@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-int r100_cp_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 16))) {
- DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
-}
-
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_init(struct radeon_device *rdev)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
- /* TODO: anythings to do here ? pipes ? */
- r100_hdp_reset(rdev);
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+}
+
+/**
+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
+ * @rdev: radeon device structure
+ * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
+ * @cp: radeon_cp structure holding CP information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+{
+ unsigned long cjiffies, elapsed;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, lockup->last_jiffies)) {
+ /* likely a wrap around */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (cp->rptr != lockup->last_cp_rptr) {
+ /* CP is still working no lockup */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
+ if (elapsed >= 3000) {
+ /* very likely the improbable case where current
+ * rptr is equal to last recorded, a while ago, rptr
+ * this is more likely a false positive update tracking
+ * information which should force us to be recall at
+ * latter point
+ */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (elapsed >= 1000) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
}
-void r100_hdp_reset(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 rbbm_status;
+ int r;
- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
- tmp |= (7 << 28);
- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
-int r100_rb2d_reset(struct radeon_device *rdev)
+void r100_bm_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
- int i;
+ u32 tmp;
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
+ /* disable bus mastering */
+ tmp = RREG32(R_000030_BUS_CNTL);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ mdelay(1);
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 26))) {
- DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
}
-int r100_gpu_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
+ struct r100_mc_save save;
+ u32 status, tmp;
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
- /* TODO: reset 3D engine */
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+ S_0000F0_SOFT_RESET_RE(1) |
+ S_0000F0_SOFT_RESET_PP(1) |
+ S_0000F0_SOFT_RESET_RB(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
@@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
fixed20_12 memtcas_ff[8] = {
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init(0),
};
fixed20_12 memtcas_rs480_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init_half(3),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init_half(3),
};
fixed20_12 memtcas2_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
};
fixed20_12 memtrbs[8] = {
- fixed_init(1),
- fixed_init_half(1),
- fixed_init(2),
- fixed_init_half(2),
- fixed_init(3),
- fixed_init_half(3),
- fixed_init(4),
- fixed_init_half(4)
+ dfixed_init(1),
+ dfixed_init_half(1),
+ dfixed_init(2),
+ dfixed_init_half(2),
+ dfixed_init(3),
+ dfixed_init_half(3),
+ dfixed_init(4),
+ dfixed_init_half(4)
};
fixed20_12 memtrbs_r4xx[8] = {
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
- fixed_init(8),
- fixed_init(9),
- fixed_init(10),
- fixed_init(11)
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
+ dfixed_init(8),
+ dfixed_init(9),
+ dfixed_init(10),
+ dfixed_init(11)
};
fixed20_12 min_mem_eff;
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
- min_mem_eff.full = rfixed_const_8(0);
+ min_mem_eff.full = dfixed_const_8(0);
/* get modes */
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mclk_ff = rdev->pm.mclk;
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
- temp_ff.full = rfixed_const(temp);
- mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
- temp_ff.full = rfixed_const(1000);
- pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
- pix_clk.full = rfixed_div(pix_clk, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes1);
- peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+ pix_clk.full = dfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes1);
+ peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
- temp_ff.full = rfixed_const(1000);
- pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
- pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes2);
- peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+ pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes2);
+ peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
}
- mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+ mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_tras = ((temp >> 12) & 0xf) + 4;
}
/* convert to FF */
- trcd_ff.full = rfixed_const(mem_trcd);
- trp_ff.full = rfixed_const(mem_trp);
- tras_ff.full = rfixed_const(mem_tras);
+ trcd_ff.full = dfixed_const(mem_trcd);
+ trp_ff.full = dfixed_const(mem_trp);
+ tras_ff.full = dfixed_const(mem_tras);
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/* extra cas latency stored in bits 23-25 0-4 clocks */
data = (temp >> 23) & 0x7;
if (data < 5)
- tcas_ff.full += rfixed_const(data);
+ tcas_ff.full += dfixed_const(data);
}
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
fixed20_12 agpmode_ff;
- agpmode_ff.full = rfixed_const(radeon_agpmode);
- temp_ff.full = rfixed_const_666(16);
- sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+ agpmode_ff.full = dfixed_const(radeon_agpmode);
+ temp_ff.full = dfixed_const_666(16);
+ sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
}
/* TODO PCIE lanes may affect this - agpmode == 16?? */
if (ASIC_IS_R300(rdev)) {
- sclk_delay_ff.full = rfixed_const(250);
+ sclk_delay_ff.full = dfixed_const(250);
} else {
if ((rdev->family == CHIP_RV100) ||
rdev->flags & RADEON_IS_IGP) {
if (rdev->mc.vram_is_ddr)
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
else
- sclk_delay_ff.full = rfixed_const(33);
+ sclk_delay_ff.full = dfixed_const(33);
} else {
if (rdev->mc.vram_width == 128)
- sclk_delay_ff.full = rfixed_const(57);
+ sclk_delay_ff.full = dfixed_const(57);
else
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
}
}
- mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+ mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
if (rdev->mc.vram_is_ddr) {
if (rdev->mc.vram_width == 32) {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
} else {
- k1.full = rfixed_const(20);
+ k1.full = dfixed_const(20);
c = 1;
}
} else {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
}
- temp_ff.full = rfixed_const(2);
- mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
- temp_ff.full = rfixed_const(c);
- mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
- temp_ff.full = rfixed_const(4);
- mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
- mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+ temp_ff.full = dfixed_const(2);
+ mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+ temp_ff.full = dfixed_const(c);
+ mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+ temp_ff.full = dfixed_const(4);
+ mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+ mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
mc_latency_mclk.full += k1.full;
- mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
- mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+ mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+ mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
/*
HW cursor time assuming worst case of full size colour cursor.
*/
- temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+ temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full += trcd_ff.full;
if (temp_ff.full < tras_ff.full)
temp_ff.full = tras_ff.full;
- cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+ cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
- temp_ff.full = rfixed_const(cur_size);
- cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+ temp_ff.full = dfixed_const(cur_size);
+ cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
/*
Find the total latency for the display data.
*/
- disp_latency_overhead.full = rfixed_const(8);
- disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+ disp_latency_overhead.full = dfixed_const(8);
+ disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes1));
- disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes1));
+ disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
/*
Find the critical point of the display buffer.
*/
- crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point = rfixed_trunc(crit_point_ff);
+ critical_point = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point = 0;
@@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes2));
- disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes2));
+ disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
critical_point2 = 0;
else {
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
- temp_ff.full = rfixed_const(temp);
- temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
if (sclk_ff.full < temp_ff.full)
temp_ff.full = sclk_ff.full;
@@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (mode1) {
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
- time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+ time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
} else {
time_disp1_drop_priority.full = 0;
}
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
- crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point2 = rfixed_trunc(crit_point_ff);
+ critical_point2 = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point2 = 0;
@@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev)
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
- r100_gpu_init(rdev);
+// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r100_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a630c466..d016b16fa116 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define R_000030_BUS_CNTL 0x000030
+#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
+#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
+#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
+#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
+#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
+#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
+#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
+#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
+#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
+#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
+#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
+#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
+#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
+#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
+#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
+#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
+#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
+#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
+#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
+#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
+#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
+#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
+#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
+#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
+#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
+#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
+#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
+#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
+#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
+#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
+#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
+#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
+#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
+#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
+#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
+#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
+#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
+#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
+#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
+#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
+#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
+#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
+#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
+#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
+#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
+#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
+#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
+#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
+#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
+#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
+#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
+#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
+#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
+#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
+#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
+#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
+#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
+#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
+#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
+#define C_000030_BUS_SUSPEND 0xFFBFFFFF
+#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
+#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
+#define C_000030_LAT_16X 0xFF7FFFFF
+#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
+#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
+#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
+#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
+#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
+#define C_000030_ENFRCWRDY 0xFDFFFFFF
+#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
+#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
+#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
+#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
+#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
+#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
+#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
+#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
+#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
+#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
+#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
+#define C_000030_SERR_EN 0xDFFFFFFF
+#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
+#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
+#define C_000030_BUS_READ_BURST 0xBFFFFFFF
+#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
+#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
+#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
@@ -710,5 +838,41 @@
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
#define C_00000D_FORCE_RB 0xEFFFFFFF
+/* PLL regs */
+#define SCLK_CNTL 0xd
+#define FORCE_HDP (1 << 17)
+#define CLK_PWRMGT_CNTL 0x14
+#define GLOBAL_PMAN_EN (1 << 10)
+#define DISP_PM (1 << 20)
+#define PLL_PWRMGT_CNTL 0x15
+#define MPLL_TURNOFF (1 << 0)
+#define SPLL_TURNOFF (1 << 1)
+#define PPLL_TURNOFF (1 << 2)
+#define P2PLL_TURNOFF (1 << 3)
+#define TVPLL_TURNOFF (1 << 4)
+#define MOBILE_SU (1 << 16)
+#define SU_SCLK_USE_BCLK (1 << 17)
+#define SCLK_CNTL2 0x1e
+#define REDUCED_SPEED_SCLK_MODE (1 << 16)
+#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
+#define MCLK_MISC 0x1f
+#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
+#define SCLK_MORE_CNTL 0x35
+#define REDUCED_SPEED_SCLK_EN (1 << 16)
+#define IO_CG_VOLTAGE_DROP (1 << 17)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
+#define VOLTAGE_DROP_SYNC (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN 0xd08
+#define DISP_D3_GRPH_RST (1 << 18)
+#define DISP_D3_SUBPIC_RST (1 << 19)
+#define DISP_D3_OV0_RST (1 << 20)
+#define DISP_D1D2_GRPH_RST (1 << 21)
+#define DISP_D1D2_SUBPIC_RST (1 << 22)
+#define DISP_D1D2_OV0_RST (1 << 23)
+#define DISP_DVO_ENABLE_RST (1 << 24)
+#define TV_ENABLE_RST (1 << 25)
+#define AUTO_PWRUP_EN (1 << 26)
#endif
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index a5ff8076b423..b2f9efe2897c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -27,8 +27,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
u32 tmp;
int r;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
- r100_hdp_reset(rdev);
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
(rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
@@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-int r300_ga_reset(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
- bool reinit_cp;
- int i;
+ u32 rbbm_status;
+ int r;
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(R300_RE_SCISSORS_TL, 0);
- WREG32(R300_RE_SCISSORS_BR, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
-int r300_gpu_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- r300_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
+ struct r100_mc_save save;
+ u32 status, tmp;
+
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* resetting the CP seems to be problematic sometimes it end up
+ * hard locking the computer, but it's necessary for successfull
+ * reset more test & playing is needed on R3XX/R4XX to find a
+ * reliable (if any solution)
+ */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
-
/*
* r300,r350,rv350,rv380 VRAM info
*/
@@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1387,7 +1386,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114f0de9..968a33317fbf 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
-
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c2bda4ad62e7..4415a5ee5871 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -36,6 +36,35 @@
#include "r420d.h"
#include "r420_reg_safe.h"
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
static void r420_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
@@ -241,7 +270,7 @@ int r420_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -274,7 +303,6 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -322,7 +350,7 @@ int r420_init(struct radeon_device *rdev)
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -334,8 +362,6 @@ int r420_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 0cf2ad2a5585..93c9a2bbccf8 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -347,9 +347,11 @@
#define AVIVO_D1CRTC_CONTROL 0x6080
# define AVIVO_CRTC_EN (1 << 0)
+# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
@@ -488,6 +490,7 @@
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c44b8d39318..34330df28483 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f3454e2056a..44e96a2ae25a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -44,6 +44,9 @@
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -75,6 +90,401 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ /* power state array is low to high, default is first */
+ if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+ int min_power_state_index = 0;
+
+ if (rdev->pm.num_power_states > 2)
+ min_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = min_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == min_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ } else {
+ /* XXX select a power state based on AC/DC, single/dualhead, etc. */
+ /* for now just select the first power state and switch between clock modes */
+ /* power state array is low to high, default is first (0) */
+ if (rdev->pm.active_crtc_count > 1) {
+ rdev->pm.requested_power_state_index = -1;
+ /* start at 1 as we don't want the default mode */
+ for (i = 1; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+ (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ /* if nothing selected, grab the default state. */
+ if (rdev->pm.requested_power_state_index == -1)
+ rdev->pm.requested_power_state_index = 0;
+ } else
+ rdev->pm.requested_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index == 0) {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index - 1;
+ } else {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_clock_mode_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index ==
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+ rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index + 1;
+ } else {
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+ rdev->pm.dynpm_can_upclock = false;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ }
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+static int r600_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->pm.num_power_states == 2) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else if (rdev->pm.num_power_states == 3) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ }
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->family == CHIP_R600) {
+ /* XXX */
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ if (rdev->pm.num_power_states < 4) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ }
+ }
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+ return false;
+ else
+ return true;
+}
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +1124,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +1155,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 srbm_reset = 0;
u32 tmp;
dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +1169,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +1188,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- /* Reset others GPU block if necessary */
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
- if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_IH(1);
- if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
- if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
- if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
- dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
- udelay(50);
+ mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
- /* After reset we need to reinit the asic as GPU often endup in an
- * incoherent state.
- */
- atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
-int r600_gpu_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status2;
+ int r;
+
+ srbm_status = RREG32(R_000E50_SRBM_STATUS);
+ grbm_status = RREG32(R_008010_GRBM_STATUS);
+ grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+ if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}
@@ -1467,10 +1855,31 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV710";
rlc_chip_name = "R700";
break;
+ case CHIP_CEDAR:
+ chip_name = "CEDAR";
+ rlc_chip_name = "CEDAR";
+ break;
+ case CHIP_REDWOOD:
+ chip_name = "REDWOOD";
+ rlc_chip_name = "REDWOOD";
+ break;
+ case CHIP_JUNIPER:
+ chip_name = "JUNIPER";
+ rlc_chip_name = "JUNIPER";
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_name = "CYPRESS";
+ rlc_chip_name = "CYPRESS";
+ break;
default: BUG();
}
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ } else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1993,15 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
- } else {
+ if (rdev->family >= CHIP_CEDAR) {
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ } else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ } else {
+ radeon_ring_write(rdev, 0x3);
+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
@@ -2051,8 +2463,6 @@ int r600_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2117,7 +2527,6 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
@@ -2290,10 +2699,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
}
}
-static void r600_rlc_stop(struct radeon_device *rdev)
+void r600_rlc_stop(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_RV770) {
+ if ((rdev->family >= CHIP_RV770) &&
+ (rdev->family <= CHIP_RV740)) {
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2740,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2775,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
rdev->ih.enabled = true;
}
-static void r600_disable_interrupts(struct radeon_device *rdev)
+void r600_disable_interrupts(struct radeon_device *rdev)
{
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2890,10 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
- r600_disable_interrupt_state(rdev);
+ if (rdev->family >= CHIP_CEDAR)
+ evergreen_disable_interrupt_state(rdev);
+ else
+ r600_disable_interrupt_state(rdev);
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -2485,7 +2903,7 @@ int r600_irq_init(struct radeon_device *rdev)
void r600_irq_suspend(struct radeon_device *rdev)
{
- r600_disable_interrupts(rdev);
+ r600_irq_disable(rdev);
r600_rlc_stop(rdev);
}
@@ -2500,6 +2918,8 @@ int r600_irq_set(struct radeon_device *rdev)
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+ u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2933,9 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
+ hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
+ hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2945,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
} else {
+ hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2987,25 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.hdmi[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 1\n");
+ hdmi1 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.hdmi[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 2\n");
+ hdmi2 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+ WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
+ WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +3015,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD6_INT_CONTROL, hpd6);
}
} else {
+ WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +3099,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
+ if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ if (ASIC_IS_DCE3(rdev)) {
+ if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ } else {
+ if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ }
}
void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +3164,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
* 19 1 FP Hot plug detection B
* 19 2 DAC A auto-detection
* 19 3 DAC B auto-detection
+ * 21 4 HDMI block A
+ * 21 5 HDMI block B
* 176 - CP_INT RB
* 177 - CP_INT IB1
* 178 - CP_INT IB2
@@ -2852,6 +3305,10 @@ restart_ih:
break;
}
break;
+ case 21: /* HDMI */
+ DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
+ r600_audio_schedule_polling(rdev);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -2861,6 +3318,11 @@ restart_ih:
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 1d898051c631..2b26553c352c 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
/*
* current number of channels
*/
-static int r600_audio_channels(struct radeon_device *rdev)
+int r600_audio_channels(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
/*
* current bits per sample
*/
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
/*
* current sampling rate in HZ
*/
-static int r600_audio_rate(struct radeon_device *rdev)
+int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
/*
* iec 60958 status bits
*/
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
/*
* iec 60958 category code
*/
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
/*
+ * schedule next audio update event
+ */
+void r600_audio_schedule_polling(struct radeon_device *rdev)
+{
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
* update all hdmi interfaces with current audio parameters
*/
static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
uint8_t category_code = r600_audio_category_code(rdev);
struct drm_encoder *encoder;
- int changes = 0;
+ int changes = 0, still_going = 0;
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(
- encoder, channels,
- rate, bps, status_bits,
- category_code);
+ r600_hdmi_update_audio_settings(encoder);
}
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ if(still_going) r600_audio_schedule_polling(rdev);
}
/*
@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_update_hdmi,
(unsigned long)rdev);
+ return 0;
+}
+
+/*
+ * enable the polling timer, to check for status changes
+ */
+void r600_audio_enable_polling(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+ if (radeon_encoder->audio_polling_active)
+ return;
+
+ radeon_encoder->audio_polling_active = 1;
mod_timer(&rdev->audio_timer, jiffies + 1);
+}
- return 0;
+/*
+ * disable the polling timer, so we get no more status updates
+ */
+void r600_audio_disable_polling(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+ radeon_encoder->audio_polling_active = 0;
}
/*
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index f6c6c77db7e0..d13622ae74e9 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ /* don't reinitialize blit */
+ if (rdev->r600_blit.shader_obj)
+ return 0;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 2616b822ba68..26b4bc9d89a5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
if (!offset)
return;
- if (r600_hdmi_is_audio_buffer_filled(encoder)) {
- /* disable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+ if (!radeon_encoder->hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder)) {
- } else if (radeon_encoder->hdmi_audio_workaround) {
- /* enable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+ /* disable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
} else {
- /* disable audio workaround and stop delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ /* enable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
}
}
@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
}
/*
* update settings with current parameters from audio engine
*/
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code)
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
uint32_t iec;
if (!offset)
@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
r600_hdmi_audio_workaround(encoder);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
}
static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
if (ASIC_IS_DCE4(rdev))
return;
@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ offset = radeon_encoder->hdmi_offset;
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ if (rdev->irq.installed
+ && rdev->family != CHIP_RS600
+ && rdev->family != CHIP_RS690
+ && rdev->family != CHIP_RS740) {
+
+ /* if irq is available use it */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ radeon_irq_set(rdev);
+
+ r600_audio_disable_polling(encoder);
+ } else {
+ /* if not fallback to polling */
+ r600_audio_enable_polling(encoder);
+ }
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
}
@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
if (ASIC_IS_DCE4(rdev))
return;
- if (!radeon_encoder->hdmi_offset) {
+ offset = radeon_encoder->hdmi_offset;
+ if (!offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
return;
}
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
+
+ /* disable irq */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ radeon_irq_set(rdev);
+
+ /* disable polling */
+ r600_audio_disable_polling(encoder);
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 7b1d22370f6e..d84612ae47e0 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -157,33 +157,36 @@
#define R600_HDMI_BLOCK3 0x7800
/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-#define R600_HDMI_CNTL 0x08
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+# define R600_HDMI_INT_PENDING (1 << 29)
+#define R600_HDMI_CNTL 0x08
+# define R600_HDMI_INT_EN (1 << 28)
+# define R600_HDMI_INT_ACK (1 << 29)
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
/* HDMI additional config base register addresses */
#define R600_HDMI_CONFIG1 0x7600
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c3dbbb..669feb689bfc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,7 +89,6 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
-extern int radeon_dynpm;
extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
@@ -99,6 +98,7 @@ extern int radeon_hw_i2c;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -172,6 +172,8 @@ struct radeon_clock {
int radeon_pm_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
@@ -182,7 +184,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
- unsigned long count_timeout;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
@@ -197,7 +200,6 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- unsigned long timeout;
bool emited;
bool signaled;
};
@@ -259,6 +261,7 @@ struct radeon_bo_list {
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
+ bool reserved;
};
/*
@@ -371,10 +374,15 @@ struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
- bool crtc_vblank_int[2];
+ bool crtc_vblank_int[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
+ bool gui_idle;
+ bool gui_idle_acked;
+ wait_queue_head_t idle_queue;
+ /* FIXME: use defines for max HDMI blocks */
+ bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
};
@@ -462,7 +470,9 @@ int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
@@ -566,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
*/
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);
@@ -597,17 +608,24 @@ struct radeon_wb {
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
-enum radeon_pm_state {
- PM_STATE_DISABLED,
- PM_STATE_MINIMUM,
- PM_STATE_PAUSED,
- PM_STATE_ACTIVE
+
+enum radeon_pm_method {
+ PM_METHOD_PROFILE,
+ PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+ DYNPM_STATE_DISABLED,
+ DYNPM_STATE_MINIMUM,
+ DYNPM_STATE_PAUSED,
+ DYNPM_STATE_ACTIVE
};
-enum radeon_pm_action {
- PM_ACTION_NONE,
- PM_ACTION_MINIMUM,
- PM_ACTION_DOWNCLOCK,
- PM_ACTION_UPCLOCK
+enum radeon_dynpm_action {
+ DYNPM_ACTION_NONE,
+ DYNPM_ACTION_MINIMUM,
+ DYNPM_ACTION_DOWNCLOCK,
+ DYNPM_ACTION_UPCLOCK,
+ DYNPM_ACTION_DEFAULT
};
enum radeon_voltage_type {
@@ -625,11 +643,25 @@ enum radeon_pm_state_type {
POWER_STATE_TYPE_PERFORMANCE,
};
-enum radeon_pm_clock_mode_type {
- POWER_MODE_TYPE_DEFAULT,
- POWER_MODE_TYPE_LOW,
- POWER_MODE_TYPE_MID,
- POWER_MODE_TYPE_HIGH,
+enum radeon_pm_profile_type {
+ PM_PROFILE_DEFAULT,
+ PM_PROFILE_AUTO,
+ PM_PROFILE_LOW,
+ PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX 1
+#define PM_PROFILE_HIGH_SH_IDX 2
+#define PM_PROFILE_LOW_MH_IDX 3
+#define PM_PROFILE_HIGH_MH_IDX 4
+#define PM_PROFILE_MAX 5
+
+struct radeon_pm_profile {
+ int dpms_off_ps_idx;
+ int dpms_on_ps_idx;
+ int dpms_off_cm_idx;
+ int dpms_on_cm_idx;
};
struct radeon_voltage {
@@ -646,12 +678,8 @@ struct radeon_voltage {
u32 voltage;
};
-struct radeon_pm_non_clock_info {
- /* pcie lanes */
- int pcie_lanes;
- /* standardized non-clock flags */
- u32 flags;
-};
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
struct radeon_pm_clock_info {
/* memory clock */
@@ -660,10 +688,13 @@ struct radeon_pm_clock_info {
u32 sclk;
/* voltage info */
struct radeon_voltage voltage;
- /* standardized clock flags - not sure we'll need these */
+ /* standardized clock flags */
u32 flags;
};
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
struct radeon_power_state {
enum radeon_pm_state_type type;
/* XXX: use a define for num clock modes */
@@ -671,9 +702,11 @@ struct radeon_power_state {
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
- /* non clock info about this state */
- struct radeon_pm_non_clock_info non_clock_info;
- bool voltage_drop_active;
+ /* standardized state flags */
+ u32 flags;
+ u32 misc; /* vbios specific flags */
+ u32 misc2; /* vbios specific flags */
+ int pcie_lanes; /* pcie lanes */
};
/*
@@ -683,14 +716,11 @@ struct radeon_power_state {
struct radeon_pm {
struct mutex mutex;
- struct delayed_work idle_work;
- enum radeon_pm_state state;
- enum radeon_pm_action planned_action;
- unsigned long action_timeout;
- bool downclocked;
- int active_crtcs;
+ u32 active_crtcs;
+ int active_crtc_count;
int req_vblank;
bool vblank_sync;
+ bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -707,12 +737,27 @@ struct radeon_pm {
struct radeon_power_state power_state[8];
/* number of valid power states */
int num_power_states;
- struct radeon_power_state *current_power_state;
- struct radeon_pm_clock_info *current_clock_mode;
- struct radeon_power_state *requested_power_state;
- struct radeon_pm_clock_info *requested_clock_mode;
- struct radeon_power_state *default_power_state;
+ int current_power_state_index;
+ int current_clock_mode_index;
+ int requested_power_state_index;
+ int requested_clock_mode_index;
+ int default_power_state_index;
+ u32 current_sclk;
+ u32 current_mclk;
struct radeon_i2c_chan *i2c_bus;
+ /* selected pm method */
+ enum radeon_pm_method pm_method;
+ /* dynpm power management */
+ struct delayed_work dynpm_idle_work;
+ enum radeon_dynpm_state dynpm_state;
+ enum radeon_dynpm_action dynpm_planned_action;
+ unsigned long dynpm_action_timeout;
+ bool dynpm_can_upclock;
+ bool dynpm_can_downclock;
+ /* profile-based power management */
+ enum radeon_pm_profile_type profile;
+ int profile_index;
+ struct radeon_pm_profile profiles[PM_PROFILE_MAX];
};
@@ -746,7 +791,8 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- int (*gpu_reset)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -799,44 +845,84 @@ struct radeon_asic {
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ bool (*gui_idle)(struct radeon_device *rdev);
+ /* power management */
+ void (*pm_misc)(struct radeon_device *rdev);
+ void (*pm_prepare)(struct radeon_device *rdev);
+ void (*pm_finish)(struct radeon_device *rdev);
+ void (*pm_init_profile)(struct radeon_device *rdev);
+ void (*pm_get_dynpm_state)(struct radeon_device *rdev);
};
/*
* Asic structures
*/
+struct r100_gpu_lockup {
+ unsigned long last_jiffies;
+ u32 last_cp_rptr;
+};
+
struct r100_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r300_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 resync_scratch;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r600_asic {
- unsigned max_pipes;
- unsigned max_tile_pipes;
- unsigned max_simds;
- unsigned max_backends;
- unsigned max_gprs;
- unsigned max_threads;
- unsigned max_stack_entries;
- unsigned max_hw_contexts;
- unsigned max_gs_threads;
- unsigned sx_max_export_size;
- unsigned sx_max_export_pos_size;
- unsigned sx_max_export_smx_size;
- unsigned sq_num_cf_insts;
- unsigned tiling_nbanks;
- unsigned tiling_npipes;
- unsigned tiling_group_size;
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
};
struct rv770_asic {
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned sx_num_of_sets;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
+};
+
+struct evergreen_asic {
+ unsigned num_ses;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
@@ -853,7 +939,7 @@ struct rv770_asic {
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
- unsigned sc_earlyz_tile_fifo_fize;
+ unsigned sc_earlyz_tile_fifo_size;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
@@ -864,6 +950,7 @@ union radeon_asic_config {
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
+ struct evergreen_asic evergreen;
};
/*
@@ -927,9 +1014,6 @@ struct radeon_device {
bool is_atom_bios;
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
- struct fb_info *fbdev_info;
- struct radeon_bo *fbdev_rbo;
- struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
@@ -974,6 +1058,7 @@ struct radeon_device {
struct work_struct hotplug_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+ struct mutex vram_mutex;
/* audio stuff */
struct timer_list audio_timer;
@@ -984,6 +1069,7 @@ struct radeon_device {
uint8_t audio_category_code;
bool powered_down;
+ struct notifier_block acpi_nb;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1145,7 +1231,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1173,9 +1260,16 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
/* Common functions */
/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1294,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1356,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1373,39 @@ extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_gpu_reset(struct radeon_device *rdev);
+extern int r600_asic_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
+extern void r600_disable_interrupts(struct radeon_device *rdev);
+extern void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern int r600_audio_channels(struct radeon_device *rdev);
+extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
+extern int r600_audio_rate(struct radeon_device *rdev);
+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
+extern void r600_audio_schedule_polling(struct radeon_device *rdev);
+extern void r600_audio_enable_polling(struct drm_encoder *encoder);
+extern void r600_audio_disable_polling(struct drm_encoder *encoder);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+extern int evergreen_irq_set(struct radeon_device *rdev);
/* evergreen */
struct evergreen_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f1f56f..f40dfb77f9b1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
}
#endif
}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+ radeon_agp_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9fa322..e57df08d4aeb 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -164,6 +165,12 @@ static struct radeon_asic r100_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r200_asic = {
@@ -172,7 +179,8 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -201,6 +209,12 @@ static struct radeon_asic r200_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r300_asic = {
@@ -209,7 +223,8 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -239,6 +254,12 @@ static struct radeon_asic r300_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r300_asic_pcie = {
@@ -247,7 +268,8 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -276,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r420_asic = {
@@ -284,7 +312,8 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -314,6 +343,12 @@ static struct radeon_asic r420_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs400_asic = {
@@ -322,7 +357,8 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -352,6 +388,12 @@ static struct radeon_asic rs400_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs600_asic = {
@@ -360,7 +402,8 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -390,6 +433,12 @@ static struct radeon_asic rs600_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs690_asic = {
@@ -398,7 +447,8 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -428,6 +478,12 @@ static struct radeon_asic rs690_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rv515_asic = {
@@ -436,7 +492,8 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -466,6 +523,12 @@ static struct radeon_asic rv515_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r520_asic = {
@@ -474,7 +537,8 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -504,6 +568,12 @@ static struct radeon_asic r520_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r600_asic = {
@@ -513,7 +583,8 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -541,6 +612,12 @@ static struct radeon_asic r600_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic rs780_asic = {
@@ -549,8 +626,9 @@ static struct radeon_asic rs780_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -578,6 +656,12 @@ static struct radeon_asic rs780_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic rv770_asic = {
@@ -586,7 +670,8 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
- .gpu_reset = &rv770_gpu_reset,
+ .asic_reset = &r600_asic_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
@@ -615,6 +700,12 @@ static struct radeon_asic rv770_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &rv770_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic evergreen_asic = {
@@ -622,16 +713,17 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = NULL,
- .gpu_reset = &evergreen_gpu_reset,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .ring_test = NULL,
- .ring_ib_execute = NULL,
- .irq_set = NULL,
- .irq_process = NULL,
- .get_vblank_counter = NULL,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = NULL,
.cs_parse = NULL,
.copy_blit = NULL,
@@ -650,6 +742,12 @@ static struct radeon_asic evergreen_asic = {
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
int radeon_asic_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280663d1..5c40a3dfaca2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-int r100_gpu_reset(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
void r100_wb_disable(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,6 +125,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
unsigned idx);
void r100_enable_bm(struct radeon_device *rdev);
void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
@@ -134,7 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
- struct radeon_fence *fence);
+ struct radeon_fence *fence);
/*
* r300,r350,rv350,rv380
@@ -143,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern int r300_gpu_reset(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev);
extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
/*
* rs400,rs480
@@ -178,6 +186,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
/*
* rs600.
*/
+extern int rs600_asic_reset(struct radeon_device *rdev);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
@@ -195,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev);
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
/*
* rs690,rs740
@@ -212,7 +224,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
*/
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
-int rv515_gpu_reset(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +263,8 @@ int r600_copy_dma(struct radeon_device *rdev,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
-int r600_gpu_reset(struct radeon_device *rdev);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -268,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -276,20 +293,29 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-int rv770_gpu_reset(struct radeon_device *rdev);
+extern void rv770_pm_misc(struct radeon_device *rdev);
/*
* evergreen
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-int evergreen_gpu_reset(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 9916d825401c..24ea683f7cf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
/* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
if ((le16_to_cpu(path->usDeviceTag) &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -547,7 +549,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
- hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0
&& record->
@@ -585,13 +586,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else {
- hpd.hpd = RADEON_HPD_NONE;
- ddc_bus.valid = false;
}
/* needed for aux chan transactions */
- ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
+ ddc_bus.hpd = hpd.hpd;
conn_id = le16_to_cpu(path->usConnObjectId);
@@ -682,11 +680,19 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint8_t dac;
union atom_supported_devices *supported_devices;
int i, j, max_device;
- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+ struct bios_connector *bios_connectors;
+ size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
- if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+ if (!bios_connectors)
return false;
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset)) {
+ kfree(bios_connectors);
+ return false;
+ }
+
supported_devices =
(union atom_supported_devices *)(ctx->bios + data_offset);
@@ -853,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
radeon_link_encoder_connector(dev);
+ kfree(bios_connectors);
return true;
}
@@ -1174,7 +1181,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
@@ -1442,26 +1449,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
static const char *thermal_controller_names[] = {
"NONE",
- "LM63",
- "ADM1032",
- "ADM1030",
- "MUA6649",
- "LM64",
- "F75375",
- "ASC7512",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "asc7xxx",
};
static const char *pp_lib_thermal_controller_names[] = {
"NONE",
- "LM63",
- "ADM1032",
- "ADM1030",
- "MUA6649",
- "LM64",
- "F75375",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
"RV6xx",
"RV770",
- "ADT7473",
+ "adt7473",
+ "External GPIO",
+ "Evergreen",
+ "adt7473 with internal",
+
};
union power_info {
@@ -1485,7 +1496,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
int state_index = 0, mode_index = 0;
struct radeon_i2c_bus_rec i2c_bus;
- rdev->pm.default_power_state = NULL;
+ rdev->pm.default_power_state_index = -1;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -1498,10 +1509,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info.ucOverdriveControllerAddress >> 1);
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
}
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ /* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@@ -1515,13 +1535,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
@@ -1542,6 +1556,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1555,15 +1571,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
@@ -1577,13 +1601,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1605,6 +1623,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1618,18 +1639,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
+ if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
@@ -1643,13 +1675,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1677,6 +1703,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
}
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1690,42 +1719,76 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
}
}
- } else if (frev == 4) {
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ } else {
/* add the i2c bus for thermal/fan chip */
/* no support for internal controller yet */
- if (power_info->info_4.sThermalController.ucType > 0) {
- if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
- (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+ if (controller->ucType > 0) {
+ if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
DRM_INFO("Internal thermal controller %s fan control\n",
- (power_info->info_4.sThermalController.ucFanParameters &
+ (controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
- power_info->info_4.sThermalController.ucI2cAddress >> 1,
- (power_info->info_4.sThermalController.ucFanParameters &
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+
}
}
+ /* first mode is usually default, followed by low to high */
for (i = 0; i < power_info->info_4.ucNumStates; i++) {
mode_index = 0;
power_state = (struct _ATOM_PPLIB_STATE *)
@@ -1754,14 +1817,34 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
continue;
- /* skip overclock modes for now */
- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
continue;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
clock_info->usVDDC;
+ /* XXX usVDDCI */
mode_index++;
} else {
struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
@@ -1781,12 +1864,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
@@ -1798,7 +1875,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (mode_index) {
misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
misc2 = le16_to_cpu(non_clock_info->usClassification);
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
@@ -1815,22 +1894,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
POWER_STATE_TYPE_PERFORMANCE;
break;
}
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
}
state_index++;
}
}
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
}
} else {
- /* XXX figure out some good default low power mode for cards w/out power tables */
- }
-
- if (rdev->pm.default_power_state == NULL) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
@@ -1840,18 +1933,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- if (rdev->asic->get_pcie_lanes)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
- else
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
state_index++;
}
+
rdev->pm.num_power_states = state_index;
- rdev->pm.current_power_state = rdev->pm.default_power_state;
- rdev->pm.current_clock_mode =
- rdev->pm.default_power_state->default_clock_mode;
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8ad71f701316..fbba938f8048 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev)
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- rdev->bios = kmalloc(size, GFP_KERNEL);
+ rdev->bios = kmemdup(bios, size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- memcpy(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 37db8adb2748..7b5e10d3e9c9 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
int edid_info;
struct edid *edid;
+ unsigned char *raw;
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
if (!edid_info)
return false;
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
+ raw = rdev->bios + edid_info;
+ edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
if (edid == NULL)
return false;
- memcpy((unsigned char *)edid,
- (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+ memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
if (!drm_edid_is_valid(edid)) {
kfree(edid);
@@ -600,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
}
i2c.mm_i2c = false;
i2c.i2c_id = 0;
- i2c.hpd_id = 0;
+ i2c.hpd = RADEON_HPD_NONE;
if (ddc_line)
i2c.valid = true;
@@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
break;
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
- (RBIOS16(tmp + 2) ==
- lvds->native_mode.vdisplay)) {
- lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
- lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
- lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
- RBIOS16(tmp + 21)) * 8;
-
- lvds->native_mode.vtotal = RBIOS16(tmp + 24);
- lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
- lvds->native_mode.vsync_end =
- ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
- (RBIOS16(tmp + 28) & 0x7ff);
+ (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ (RBIOS8(tmp + 23) * 8);
+
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ (RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11);
lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
lvds->native_mode.flags = 0;
@@ -2196,7 +2198,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT);
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- hpd.hpd = RADEON_HPD_NONE;
+ hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
@@ -2366,7 +2368,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
u8 rev, blocks, tmp;
int state_index = 0;
- rdev->pm.default_power_state = NULL;
+ rdev->pm.default_power_state_index = -1;
if (rdev->flags & RADEON_IS_MOBILITY) {
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
@@ -2380,17 +2382,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
goto default_mode;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- goto default_mode;
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
misc = RBIOS16(offset + 0x5 + 0x0);
if (rev > 4)
misc2 = RBIOS16(offset + 0x5 + 0xe);
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
if (misc & 0x4) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
if (misc & 0x8)
@@ -2437,8 +2435,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
} else
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
if (rev > 6)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
RBIOS8(offset + 0x5 + 0x10);
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
state_index++;
} else {
/* XXX figure out some good default low power mode for mobility cards w/out power tables */
@@ -2456,16 +2455,13 @@ default_mode:
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- if (rdev->asic->get_pcie_lanes)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
- else
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.power_state[state_index].flags = 0;
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = state_index + 1;
- rdev->pm.current_power_state = rdev->pm.default_power_state;
- rdev->pm.current_clock_mode =
- rdev->pm.default_power_state->default_clock_mode;
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4559a53d5e57..0c7ccc6961a3 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1088,12 +1085,11 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1113,9 +1109,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1141,9 +1135,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
if (!radeon_connector->ddc_bus)
@@ -1163,9 +1155,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1181,7 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
@@ -1211,9 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1226,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
@@ -1250,7 +1242,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1269,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1290,12 +1279,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1309,9 +1297,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1330,9 +1316,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
* of false positive on load detect, we haven't yet
@@ -1351,9 +1335,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1366,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f9b0fe002c0a..ae0fb7356e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
int r;
mutex_lock(&rdev->cs_mutex);
- if (rdev->gpu_lockup) {
- mutex_unlock(&rdev->cs_mutex);
- return -EINVAL;
- }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e305560..fdc3fdf78acb 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
sclk = radeon_get_engine_clock(rdev);
mclk = rdev->clock.default_mclk;
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = rfixed_const(mclk);
- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
- a.full = rfixed_const(16);
+ a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
- rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
} else {
sclk = radeon_get_engine_clock(rdev);
mclk = radeon_get_memory_clock(rdev);
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = rfixed_const(mclk);
- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
}
}
@@ -599,9 +599,11 @@ int radeon_device_init(struct radeon_device *rdev,
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
+ mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
+ init_waitqueue_head(&rdev->irq.idle_queue);
/* setup workqueue */
rdev->wq = create_workqueue("radeon");
@@ -671,7 +673,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
@@ -691,6 +693,8 @@ void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
+ /* evict vram memory */
+ radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +732,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_rbo) {
+ /* don't unpin kernel fb objects */
+ if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
- if (unlikely(r == 0)) {
+ if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
@@ -743,11 +748,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_save_bios_scratch_regs(rdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
radeon_bo_evict_vram(rdev);
+ radeon_agp_suspend(rdev);
+
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
@@ -755,7 +763,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
acquire_console_sem();
- fb_set_suspend(rdev->fbdev_info, 1);
+ radeon_fbdev_set_suspend(rdev, 1);
release_console_sem();
return 0;
}
@@ -778,8 +786,9 @@ int radeon_resume_kms(struct drm_device *dev)
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
+ radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- fb_set_suspend(rdev->fbdev_info, 0);
+ radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
/* reset hpd state */
@@ -789,6 +798,26 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_save_bios_scratch_regs(rdev);
+ radeon_suspend(rdev);
+
+ r = radeon_asic_reset(rdev);
+ if (!r) {
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ radeon_resume(rdev);
+ radeon_restore_bios_scratch_regs(rdev);
+ drm_helper_resume_force_mode(rdev->ddev);
+ return 0;
+ }
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ return r;
+}
+
/*
* Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bb1c122cad21..1006549d1570 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll,
vco_freq = freq * post_div;
/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
- a.full = rfixed_const(pll->reference_freq);
- feedback_divider.full = rfixed_const(vco_freq);
- feedback_divider.full = rfixed_div(feedback_divider, a);
- a.full = rfixed_const(ref_div);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
+ a.full = dfixed_const(pll->reference_freq);
+ feedback_divider.full = dfixed_const(vco_freq);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
+ a.full = dfixed_const(ref_div);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
- a.full = rfixed_const(10);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
- feedback_divider.full += rfixed_const_half(0);
- feedback_divider.full = rfixed_floor(feedback_divider);
- feedback_divider.full = rfixed_div(feedback_divider, a);
+ a.full = dfixed_const(10);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
/* *fb_div = floor(feedback_divider); */
- a.full = rfixed_floor(feedback_divider);
- *fb_div = rfixed_trunc(a);
+ a.full = dfixed_floor(feedback_divider);
+ *fb_div = dfixed_trunc(a);
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
- a.full = rfixed_const(10);
- b.full = rfixed_mul(feedback_divider, a);
+ a.full = dfixed_const(10);
+ b.full = dfixed_mul(feedback_divider, a);
- feedback_divider.full = rfixed_floor(feedback_divider);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
feedback_divider.full = b.full - feedback_divider.full;
- *fb_div_frac = rfixed_trunc(feedback_divider);
+ *fb_div_frac = dfixed_trunc(feedback_divider);
} else {
/* *fb_div = floor(feedback_divider + 0.5); */
- feedback_divider.full += rfixed_const_half(0);
- feedback_divider.full = rfixed_floor(feedback_divider);
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
- *fb_div = rfixed_trunc(feedback_divider);
+ *fb_div = dfixed_trunc(feedback_divider);
*fb_div_frac = 0;
}
@@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll,
pll_out_max = pll->pll_out_max;
}
- ffreq.full = rfixed_const(freq);
+ ffreq.full = dfixed_const(freq);
/* max_error = ffreq * 0.0025; */
- a.full = rfixed_const(400);
- max_error.full = rfixed_div(ffreq, a);
+ a.full = dfixed_const(400);
+ max_error.full = dfixed_div(ffreq, a);
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
@@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll,
continue;
/* pll_out = vco / post_div; */
- a.full = rfixed_const(post_div);
- pll_out.full = rfixed_const(vco);
- pll_out.full = rfixed_div(pll_out, a);
+ a.full = dfixed_const(post_div);
+ pll_out.full = dfixed_const(vco);
+ pll_out.full = dfixed_div(pll_out, a);
if (pll_out.full >= ffreq.full) {
error.full = pll_out.full - ffreq.full;
@@ -831,10 +831,6 @@ void radeon_compute_pll(struct radeon_pll *pll,
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- radeonfb_remove(dev, fb);
if (radeon_fb->obj)
drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+void
+radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct radeon_framebuffer *radeon_fb;
-
- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
- return NULL;
- }
- drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
- drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
- radeon_fb->obj = obj;
- return &radeon_fb->base;
+ rfb->obj = obj;
+ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
}
static struct drm_framebuffer *
@@ -879,6 +869,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
+ struct radeon_framebuffer *radeon_fb;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (obj == NULL) {
@@ -886,12 +877,26 @@ radeon_user_framebuffer_create(struct drm_device *dev,
"can't create framebuffer\n", mode_cmd->handle);
return NULL;
}
- return radeon_framebuffer_create(dev, mode_cmd, obj);
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ if (radeon_fb == NULL) {
+ return NULL;
+ }
+
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+ return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ radeon_fb_output_poll_changed(rdev);
}
static const struct drm_mode_config_funcs radeon_mode_funcs = {
.fb_create = radeon_user_framebuffer_create,
- .fb_changed = radeonfb_probe,
+ .output_poll_changed = radeon_output_poll_changed
};
struct drm_prop_enum_list {
@@ -978,8 +983,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
/* set display priority to high for r3xx, rv515 chips
* this avoids flickering due to underflow to the
* display controllers during heavy acceleration.
+ * Don't force high on rs4xx igp chips as it seems to
+ * affect the sound card. See kernel bug 15982.
*/
- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+ !(rdev->flags & RADEON_IS_IGP))
rdev->disp_priority = 2;
else
rdev->disp_priority = 0;
@@ -1031,15 +1039,24 @@ int radeon_modeset_init(struct radeon_device *rdev)
}
/* initialize hpd */
radeon_hpd_init(rdev);
- drm_helper_initial_config(rdev->ddev);
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
+
return 0;
}
void radeon_modeset_fini(struct radeon_device *rdev)
{
+ radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
+ radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
+ drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
@@ -1089,15 +1106,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
- a.full = rfixed_const(crtc->mode.vdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
- radeon_crtc->vsc.full = rfixed_div(a, b);
- a.full = rfixed_const(crtc->mode.hdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
- radeon_crtc->hsc.full = rfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.vdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+ radeon_crtc->vsc.full = dfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.hdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+ radeon_crtc->hsc.full = dfixed_div(a, b);
} else {
- radeon_crtc->vsc.full = rfixed_const(1);
- radeon_crtc->hsc.full = rfixed_const(1);
+ radeon_crtc->vsc.full = dfixed_const(1);
+ radeon_crtc->hsc.full = dfixed_const(1);
}
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b3749d47be7b..902d1731a652 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -44,9 +44,10 @@
* - 2.1.0 - add square tiling interface
* - 2.2.0 - add r6xx/r7xx const buffer support
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 3
+#define KMS_DRIVER_MINOR 4
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,7 +92,6 @@ int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_new_pll = -1;
-int radeon_dynpm = -1;
int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
@@ -132,9 +132,6 @@ module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
-MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
-module_param_named(dynpm, radeon_dynpm, int, 0444);
-
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index c5ddaf58563a..1ebb100015b7 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -309,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
-
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -1111,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
union crtc_source_param {
@@ -1546,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev))
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ atombios_ddia_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9ac57a09784b..e192acfbf0cd 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,10 +23,6 @@
* Authors:
* David Airlie
*/
- /*
- * Modularization
- */
-
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
@@ -42,17 +38,21 @@
#include <linux/vga_switcheroo.h>
-struct radeon_fb_device {
+/* object hierarchy -
+ this contains a helper + a radeon fb
+ the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
struct drm_fb_helper helper;
- struct radeon_framebuffer *rfb;
- struct radeon_device *rdev;
+ struct radeon_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct radeon_device *rdev;
};
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (fb == NULL) {
- return 1;
- }
- info = fb->fbdev;
- if (info == NULL) {
- return 1;
- }
- if (mode == NULL) {
- return 1;
- }
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(radeonfb_resize);
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
return aligned;
}
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
- .gamma_set = radeon_crtc_fb_gamma_set,
- .gamma_get = radeon_crtc_fb_gamma_get,
-};
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+ struct radeon_bo *rbo = gobj->driver_private;
+ int ret;
+
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+}
-int radeonfb_create(struct drm_device *dev,
- uint32_t fb_width, uint32_t fb_height,
- uint32_t surface_width, uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object **gobj_p)
{
- struct radeon_device *rdev = dev->dev_private;
- struct fb_info *info;
- struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb = NULL;
- struct radeon_framebuffer *rfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
- struct device *device = &rdev->pdev->dev;
- int size, aligned_size, ret;
- u64 fb_gpuaddr;
- void *fbptr = NULL;
- unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
+ int ret;
+ int aligned_size, size;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
-
- /* avivo can't scanout real 24bpp */
- if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
- surface_bpp = 32;
-
- mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
- mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
- mode_cmd.depth = surface_depth;
+ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
-
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
- surface_width, surface_height);
- ret = -ENOMEM;
- goto out;
+ printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+ aligned_size);
+ return -ENOMEM;
}
rbo = gobj->driver_private;
@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd.bpp) {
+ switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
- tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd.pitch);
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
- mutex_lock(&rdev->ddev->struct_mutex);
- fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
- if (fb == NULL) {
- DRM_ERROR("failed to allocate fb.\n");
- ret = -ENOMEM;
- goto out_unref;
- }
+
+
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
- ret = radeon_bo_kmap(rbo, &fbptr);
+ ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+ *gobj_p = gobj;
+ return 0;
+out_unref:
+ radeonfb_destroy_pinned_object(gobj);
+ *gobj_p = NULL;
+ return ret;
+}
+
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_device *rdev = rfbdev->rdev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_bo *rbo = NULL;
+ struct device *device = &rdev->pdev->dev;
+ int ret;
+ unsigned long tmp;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ /* avivo can't scanout real 24bpp */
+ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ sizes->surface_bpp = 32;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
- *fb_p = fb;
- rfb = to_radeon_framebuffer(fb);
- rdev->fbdev_rfb = rfb;
- rdev->fbdev_rbo = rbo;
+ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ rbo = gobj->driver_private;
- info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+ /* okay we have an object now allocate the framebuffer */
+ info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
}
- rdev->fbdev_info = info;
- rfbdev = info->par;
- rfbdev->helper.funcs = &radeon_fb_helper_funcs;
- rfbdev->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
- RADEONFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ info->par = rfbdev;
+
+ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
- memset_io(fbptr, 0x0, aligned_size);
+ fb = &rfbdev->rfb.base;
+
+ /* setup helper */
+ rfbdev->helper.fb = fb;
+ rfbdev->helper.fbdev = info;
+
+ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
strcpy(info->fix.id, "radeondrmfb");
@@ -255,17 +227,22 @@ int radeonfb_create(struct drm_device *dev,
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
- tmp = fb_gpuaddr - rdev->mc.vram_start;
+ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
- info->fix.smem_len = size;
- info->screen_base = fbptr;
- info->screen_size = size;
+ info->fix.smem_len = radeon_bo_size(rbo);
+ info->screen_base = rbo->kptr;
+ info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = rdev->ddev->mode_config.fb_base;
- info->aperture_size = rdev->mc.real_vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].size = rdev->mc.real_vram_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
@@ -274,44 +251,55 @@ int radeonfb_create(struct drm_device *dev,
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)size);
+ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch);
- fb->fbdev = info;
- rfbdev->rfb = rfb;
- rfbdev->rdev = rdev;
-
- mutex_unlock(&rdev->ddev->struct_mutex);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
if (rbo) {
- ret = radeon_bo_reserve(rbo, false);
- if (likely(ret == 0)) {
- radeon_bo_kunmap(rbo);
- radeon_bo_unreserve(rbo);
- }
+
}
if (fb && ret) {
- list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
-out:
return ret;
}
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = radeonfb_create(rfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
static char *mode_option;
int radeon_parse_options(char *options)
{
@@ -328,46 +316,102 @@ int radeon_parse_options(char *options)
return 0;
}
-int radeonfb_probe(struct drm_device *dev)
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- struct radeon_device *rdev = dev->dev_private;
- int bpp_sel = 32;
-
- /* select 8 bpp console on RN50 or 16MB cards */
- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
-
- return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
- struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+ struct radeon_framebuffer *rfb = &rfbdev->rfb;
struct radeon_bo *rbo;
int r;
- if (!fb) {
- return -EINVAL;
+ if (rfbdev->helper.fbdev) {
+ info = rfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
}
- info = fb->fbdev;
- if (info) {
- struct radeon_fb_device *rfbdev = info->par;
+
+ if (rfb->obj) {
rbo = rfb->obj->driver_private;
- unregister_framebuffer(info);
r = radeon_bo_reserve(rbo, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_fb_helper_free(&rfbdev->helper);
- framebuffer_release(info);
+ drm_gem_object_unreference_unlocked(rfb->obj);
}
+ drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_cleanup(&rfb->base);
- printk(KERN_INFO "unregistered panic notifier\n");
+ return 0;
+}
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+ .gamma_set = radeon_crtc_fb_gamma_set,
+ .gamma_get = radeon_crtc_fb_gamma_get,
+ .fb_probe = radeon_fb_find_or_create_single,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+ struct radeon_fbdev *rfbdev;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+ if (!rfbdev)
+ return -ENOMEM;
+
+ rfbdev->rdev = rdev;
+ rdev->mode_info.rfbdev = rfbdev;
+ rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+ drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT);
+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
-EXPORT_SYMBOL(radeonfb_remove);
-MODULE_LICENSE("GPL");
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+ if (!rdev->mode_info.rfbdev)
+ return;
+
+ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+ kfree(rdev->mode_info.rfbdev);
+ rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+ struct radeon_bo *robj;
+ int size = 0;
+
+ robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ size += radeon_bo_size(robj);
+ return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+ if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d90f95b405c5..b1f9a81b5d1d 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
radeon_fence_ring_emit(rdev, fence);
fence->emited = true;
- fence->timeout = jiffies + ((2000 * HZ) / 1000);
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
+ unsigned long cjiffies;
- if (rdev == NULL) {
- return true;
- }
- if (rdev->shutdown) {
- return true;
- }
seq = RREG32(rdev->fence_drv.scratch_reg);
- rdev->fence_drv.last_seq = seq;
+ if (seq != rdev->fence_drv.last_seq) {
+ rdev->fence_drv.last_seq = seq;
+ rdev->fence_drv.last_jiffies = jiffies;
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ } else {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+ cjiffies -= rdev->fence_drv.last_jiffies;
+ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ /* update the timeout */
+ rdev->fence_drv.last_timeout -= cjiffies;
+ } else {
+ /* the 500ms timeout is elapsed we should test
+ * for GPU lockup
+ */
+ rdev->fence_drv.last_timeout = 1;
+ }
+ } else {
+ /* wrap around update last jiffies, we will just wait
+ * a little longer
+ */
+ rdev->fence_drv.last_jiffies = cjiffies;
+ }
+ return false;
+ }
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
- unsigned long cur_jiffies;
- unsigned long timeout;
- bool expired = false;
+ unsigned long irq_flags, timeout;
+ u32 seq;
int r;
if (fence == NULL) {
@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
-
+ timeout = rdev->fence_drv.last_timeout;
retry:
- cur_jiffies = jiffies;
- timeout = HZ / 100;
- if (time_after(fence->timeout, cur_jiffies)) {
- timeout = fence->timeout - cur_jiffies;
- }
-
+ /* save current sequence used to check for GPU lockup */
+ seq = rdev->fence_drv.last_seq;
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
- if (unlikely(r < 0))
+ if (unlikely(r < 0)) {
return r;
+ }
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@ retry:
radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
- if (unlikely(r == 0)) {
- expired = true;
+ /* we were interrupted for some reason and fence isn't
+ * isn't signaled yet, resume wait
+ */
+ if (r) {
+ timeout = r;
+ goto retry;
}
- if (unlikely(expired)) {
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- if (timeout > 500) {
- DRM_ERROR("fence(%p:0x%08X) %lums timeout "
- "going to reset GPU\n",
- fence, fence->seq, timeout);
- radeon_gpu_reset(rdev);
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
- }
+ /* don't protect read access to rdev->fence_drv.last_seq
+ * if we experiencing a lockup the value doesn't change
+ */
+ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ /* good news we believe it's a lockup */
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+ /* FIXME: what should we do ? marking everyone
+ * as signaled for now
+ */
+ rdev->gpu_lockup = true;
+ r = radeon_gpu_reset(rdev);
+ if (r)
+ return r;
+ WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ rdev->gpu_lockup = false;
}
+ timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv.last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
- if (unlikely(expired)) {
- rdev->fence_drv.count_timeout++;
- cur_jiffies = jiffies;
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
- fence, fence->seq, timeout);
- DRM_ERROR("last signaled fence(0x%08X)\n",
- rdev->fence_drv.last_seq);
- }
return 0;
}
@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1770d3c07fd0..e65b90317fab 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
- DRM_ERROR("trying to bind memory to unitialized GART !\n");
+ WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ef92d147d8f0..a72a3ee5d69b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_bo_unref(&robj);
}
+
+ drm_gem_object_release(gobj);
+ kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
args->vram_visible = rdev->mc.real_vram_size;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- if (rdev->fbdev_rbo)
- args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a212041e8b0b..059bfa4098d7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include "drmP.h"
+#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
radeon_connector_hotplug(connector);
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
}
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
@@ -67,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
+ rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++)
@@ -96,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
}
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
+ rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c633319f98ed..04068352ccd2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info;
+ struct radeon_mode_info *minfo = &rdev->mode_info;
uint32_t *value_ptr;
uint32_t value;
+ struct drm_crtc *crtc;
+ int i, found;
info = data;
value_ptr = (uint32_t *)((unsigned long)info->value);
+ value = *value_ptr;
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
value = dev->pci_device;
@@ -116,6 +120,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_ACCEL_WORKING:
value = rdev->accel_working;
break;
+ case RADEON_INFO_CRTC_FROM_ID:
+ for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+ crtc = (struct drm_crtc *)minfo->crtcs[i];
+ if (crtc && crtc->base.id == value) {
+ value = i;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ DRM_DEBUG("unknown crtc id %d\n", value);
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 88865e38fe30..e1e5255396ac 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
}
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0274abe17ad9..5a13b3eeef19 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -217,11 +215,6 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
@@ -286,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -474,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -642,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -852,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5413fcd63086..67358baf28b2 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -34,11 +34,12 @@
#include <drm_mode.h>
#include <drm_edid.h>
#include <drm_dp_helper.h>
+#include <drm_fixed.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
-#include "radeon_fixed.h"
+struct radeon_bo;
struct radeon_device;
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -65,6 +66,16 @@ enum radeon_tv_std {
TV_STD_PAL_N,
};
+enum radeon_hpd_id {
+ RADEON_HPD_1 = 0,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+ RADEON_HPD_NONE = 0xff,
+};
+
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
@@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec {
/* id used by atom */
uint8_t i2c_id;
/* id used by atom */
- uint8_t hpd_id;
+ enum radeon_hpd_id hpd;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
@@ -202,6 +213,8 @@ enum radeon_dvo_chip {
DVO_SIL1178,
};
+struct radeon_fbdev;
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -218,6 +231,9 @@ struct radeon_mode_info {
struct drm_property *tmds_pll_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
+
+ /* pointer to fbdev info structure */
+ struct radeon_fbdev *rfbdev;
};
#define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +355,7 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int audio_polling_active;
int hdmi_offset;
int hdmi_config_offset;
int hdmi_audio_workaround;
@@ -363,16 +380,6 @@ struct radeon_gpio_rec {
u32 mask;
};
-enum radeon_hpd_id {
- RADEON_HPD_NONE = 0,
- RADEON_HPD_1,
- RADEON_HPD_2,
- RADEON_HPD_3,
- RADEON_HPD_4,
- RADEON_HPD_5,
- RADEON_HPD_6,
-};
-
struct radeon_hpd {
enum radeon_hpd_id hpd;
u8 plugged_state;
@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
-
-int radeonfb_probe(struct drm_device *dev);
+void radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -575,4 +581,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 122774742bd5..d5b9373ce06c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy);
+ mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(rdev->dev,
@@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
+ struct radeon_device *rdev;
if ((*bo) == NULL)
return;
+ rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
+ mutex_lock(&rdev->vram_mutex);
ttm_bo_unref(&tbo);
+ mutex_unlock(&rdev->vram_mutex);
if (tbo == NULL)
*bo = NULL;
}
@@ -192,7 +198,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -216,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -295,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head)
r = radeon_bo_reserve(lobj->bo, false);
if (unlikely(r != 0))
return r;
+ lobj->reserved = true;
}
return 0;
}
@@ -305,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head)
list_for_each_entry(lobj, head, list) {
/* only unreserve object we successfully reserved */
- if (radeon_bo_is_reserved(lobj->bo))
+ if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
radeon_bo_unreserve(lobj->bo);
}
}
@@ -316,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head)
struct radeon_bo *bo;
int r;
+ list_for_each_entry(lobj, head, list) {
+ lobj->reserved = false;
+ }
r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
return r;
@@ -331,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head)
lobj->rdomain);
}
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+ true, false, false);
if (unlikely(r))
return r;
}
@@ -499,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_bo_check_tiling(rbo, 0, 1);
}
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
+ struct radeon_device *rdev;
struct radeon_bo *rbo;
+ unsigned long offset, size;
+ int r;
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
- return;
+ return 0;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ if ((offset + size) > rdev->mc.visible_vram_size) {
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ if (unlikely(r != 0))
+ return r;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ /* this should not happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ }
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e244..353998dc2c03 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a4b57493aa78..a8d162c6f829 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,164 +23,122 @@
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_idle_work_handler(struct work_struct *work);
-static int radeon_debugfs_pm_init(struct radeon_device *rdev);
-
-static const char *pm_state_names[4] = {
- "PM_STATE_DISABLED",
- "PM_STATE_MINIMUM",
- "PM_STATE_PAUSED",
- "PM_STATE_ACTIVE"
-};
-static const char *pm_state_types[5] = {
- "Default",
- "Powersave",
- "Battery",
- "Balanced",
- "Performance",
-};
+#define ACPI_AC_CLASS "ac_adapter"
-static void radeon_print_power_mode_info(struct radeon_device *rdev)
+#ifdef CONFIG_ACPI
+static int radeon_acpi_event(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
{
- int i, j;
- bool is_default;
+ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
- DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
- for (i = 0; i < rdev->pm.num_power_states; i++) {
- if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
- is_default = true;
+ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+ if (power_supply_is_system_supplied() > 0)
+ DRM_DEBUG("pm: AC\n");
else
- is_default = false;
- DRM_INFO("State %d %s %s\n", i,
- pm_state_types[rdev->pm.power_state[i].type],
- is_default ? "(default)" : "");
- if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
- DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
- DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
- for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
- if (rdev->flags & RADEON_IS_IGP)
- DRM_INFO("\t\t%d engine: %d\n",
- j,
- rdev->pm.power_state[i].clock_info[j].sclk * 10);
- else
- DRM_INFO("\t\t%d engine/memory: %d/%d\n",
- j,
- rdev->pm.power_state[i].clock_info[j].sclk * 10,
- rdev->pm.power_state[i].clock_info[j].mclk * 10);
+ DRM_DEBUG("pm: DC\n");
+
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.profile == PM_PROFILE_AUTO) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
}
}
+
+ return NOTIFY_OK;
}
+#endif
-static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type type)
+static void radeon_pm_update_profile(struct radeon_device *rdev)
{
- int i, j;
- enum radeon_pm_state_type wanted_types[2];
- int wanted_count;
-
- switch (type) {
- case POWER_STATE_TYPE_DEFAULT:
- default:
- return rdev->pm.default_power_state;
- case POWER_STATE_TYPE_POWERSAVE:
- if (rdev->flags & RADEON_IS_MOBILITY) {
- wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
- wanted_types[1] = POWER_STATE_TYPE_BATTERY;
- wanted_count = 2;
- } else {
- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
- wanted_count = 1;
- }
+ switch (rdev->pm.profile) {
+ case PM_PROFILE_DEFAULT:
+ rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
break;
- case POWER_STATE_TYPE_BATTERY:
- if (rdev->flags & RADEON_IS_MOBILITY) {
- wanted_types[0] = POWER_STATE_TYPE_BATTERY;
- wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
- wanted_count = 2;
+ case PM_PROFILE_AUTO:
+ if (power_supply_is_system_supplied() > 0) {
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
} else {
- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
- wanted_count = 1;
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
}
break;
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_PERFORMANCE:
- wanted_types[0] = type;
- wanted_count = 1;
+ case PM_PROFILE_LOW:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
break;
- }
-
- for (i = 0; i < wanted_count; i++) {
- for (j = 0; j < rdev->pm.num_power_states; j++) {
- if (rdev->pm.power_state[j].type == wanted_types[i])
- return &rdev->pm.power_state[j];
- }
- }
-
- return rdev->pm.default_power_state;
-}
-
-static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
- struct radeon_power_state *power_state,
- enum radeon_pm_clock_mode_type type)
-{
- switch (type) {
- case POWER_MODE_TYPE_DEFAULT:
- default:
- return power_state->default_clock_mode;
- case POWER_MODE_TYPE_LOW:
- return &power_state->clock_info[0];
- case POWER_MODE_TYPE_MID:
- if (power_state->num_clock_modes > 2)
- return &power_state->clock_info[1];
+ case PM_PROFILE_HIGH:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
else
- return &power_state->clock_info[0];
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
break;
- case POWER_MODE_TYPE_HIGH:
- return &power_state->clock_info[power_state->num_clock_modes - 1];
}
+ if (rdev->pm.active_crtc_count == 0) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+ } else {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+ }
}
-static void radeon_get_power_state(struct radeon_device *rdev,
- enum radeon_pm_action action)
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
{
- switch (action) {
- case PM_ACTION_MINIMUM:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
- break;
- case PM_ACTION_DOWNCLOCK:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
- break;
- case PM_ACTION_UPCLOCK:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
- break;
- case PM_ACTION_NONE:
- default:
- DRM_ERROR("Requested mode for not defined action\n");
+ struct radeon_bo *bo, *n;
+
+ if (list_empty(&rdev->gem.objects))
return;
+
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ ttm_bo_unmap_virtual(&bo->tbo);
}
- DRM_INFO("Requested: e: %d m: %d p: %d\n",
- rdev->pm.requested_clock_mode->sclk,
- rdev->pm.requested_clock_mode->mclk,
- rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+
+ if (rdev->gart.table.vram.robj)
+ ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
+
+ if (rdev->stollen_vga_memory)
+ ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
+
+ if (rdev->r600_blit.shader_obj)
+ ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
}
-static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
rdev->pm.vblank_sync = false;
@@ -192,73 +150,332 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
static void radeon_set_power_state(struct radeon_device *rdev)
{
- /* if *_clock_mode are the same, *_power_state are as well */
- if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
+ u32 sclk, mclk;
+
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
- DRM_INFO("Setting: e: %d m: %d p: %d\n",
- rdev->pm.requested_clock_mode->sclk,
- rdev->pm.requested_clock_mode->mclk,
- rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
-
- /* set pcie lanes */
- /* TODO */
-
- /* set voltage */
- /* TODO */
-
- /* set engine clock */
- radeon_sync_with_vblank(rdev);
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
-
-#if 0
- /* set memory clock */
- if (rdev->asic->set_memory_clock) {
- radeon_sync_with_vblank(rdev);
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
+ if (radeon_gui_idle(rdev)) {
+ sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk;
+ if (sclk > rdev->clock.default_sclk)
+ sclk = rdev->clock.default_sclk;
+
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk;
+ if (mclk > rdev->clock.default_mclk)
+ mclk = rdev->clock.default_mclk;
+
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ radeon_sync_with_vblank(rdev);
+
+ if (!radeon_pm_in_vbl(rdev))
+ return;
+
+ radeon_pm_prepare(rdev);
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
+ }
+
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+ radeon_pm_finish(rdev);
+ } else {
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_prepare(rdev);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_finish(rdev);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
+ }
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_prepare(rdev);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_finish(rdev);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+ }
+
+ rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+ rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+ } else
+ DRM_DEBUG("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+ int i;
+
+ mutex_lock(&rdev->ddev->struct_mutex);
+ mutex_lock(&rdev->vram_mutex);
+ mutex_lock(&rdev->cp.mutex);
+
+ /* gui idle int has issues on older chips it seems */
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->irq.installed) {
+ /* wait for GPU idle */
+ rdev->pm.gui_idle = false;
+ rdev->irq.gui_idle = true;
+ radeon_irq_set(rdev);
+ wait_event_interruptible_timeout(
+ rdev->irq.idle_queue, rdev->pm.gui_idle,
+ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+ rdev->irq.gui_idle = false;
+ radeon_irq_set(rdev);
+ }
+ } else {
+ if (rdev->cp.ready) {
+ struct radeon_fence *fence;
+ radeon_ring_alloc(rdev, 64);
+ radeon_fence_create(rdev, &fence);
+ radeon_fence_emit(rdev, fence);
+ radeon_ring_commit(rdev);
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+ }
}
-#endif
+ radeon_unmap_vram_bos(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.active_crtcs & (1 << i)) {
+ rdev->pm.req_vblank |= (1 << i);
+ drm_vblank_get(rdev->ddev, i);
+ }
+ }
+ }
+
+ radeon_set_power_state(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.req_vblank & (1 << i)) {
+ rdev->pm.req_vblank &= ~(1 << i);
+ drm_vblank_put(rdev->ddev, i);
+ }
+ }
+ }
+
+ /* update display watermarks based on new power state */
+ radeon_update_bandwidth_info(rdev);
+ if (rdev->pm.active_crtc_count)
+ radeon_bandwidth_update(rdev);
+
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+ mutex_unlock(&rdev->cp.mutex);
+ mutex_unlock(&rdev->vram_mutex);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int cp = rdev->pm.profile;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (cp == PM_PROFILE_AUTO) ? "auto" :
+ (cp == PM_PROFILE_LOW) ? "low" :
+ (cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (strncmp("default", buf, strlen("default")) == 0)
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ else if (strncmp("auto", buf, strlen("auto")) == 0)
+ rdev->pm.profile = PM_PROFILE_AUTO;
+ else if (strncmp("low", buf, strlen("low")) == 0)
+ rdev->pm.profile = PM_PROFILE_LOW;
+ else if (strncmp("high", buf, strlen("high")) == 0)
+ rdev->pm.profile = PM_PROFILE_HIGH;
+ else {
+ DRM_ERROR("invalid power profile!\n");
+ goto fail;
+ }
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+fail:
+ mutex_unlock(&rdev->pm.mutex);
+
+ return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int pm = rdev->pm.pm_method;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+
+ if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_DYNPM;
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (strncmp("profile", buf, strlen("profile")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ /* disable dynpm */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ DRM_ERROR("invalid power method!\n");
+ goto fail;
+ }
+ radeon_pm_compute_clocks(rdev);
+fail:
+ return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
- rdev->pm.current_power_state = rdev->pm.requested_power_state;
- rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+ mutex_lock(&rdev->pm.mutex);
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ rdev->pm.current_power_state_index = -1;
+ rdev->pm.current_clock_mode_index = -1;
+ rdev->pm.current_sclk = 0;
+ rdev->pm.current_mclk = 0;
+ mutex_unlock(&rdev->pm.mutex);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+ radeon_pm_compute_clocks(rdev);
}
int radeon_pm_init(struct radeon_device *rdev)
{
- rdev->pm.state = PM_STATE_DISABLED;
- rdev->pm.planned_action = PM_ACTION_NONE;
- rdev->pm.downclocked = false;
+ int ret;
+ /* default to profile method */
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.current_sclk = 0;
+ rdev->pm.current_mclk = 0;
if (rdev->bios) {
if (rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
radeon_combios_get_power_modes(rdev);
- radeon_print_power_mode_info(rdev);
+ radeon_pm_init_profile(rdev);
+ rdev->pm.current_power_state_index = -1;
+ rdev->pm.current_clock_mode_index = -1;
}
- if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for PM!\n");
- }
+ if (rdev->pm.num_power_states > 1) {
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+
+#ifdef CONFIG_ACPI
+ rdev->acpi_nb.notifier_call = radeon_acpi_event;
+ register_acpi_notifier(&rdev->acpi_nb);
+#endif
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
- INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
+ }
- if (radeon_dynpm != -1 && radeon_dynpm) {
- rdev->pm.state = PM_STATE_PAUSED;
- DRM_INFO("radeon: dynamic power management enabled\n");
+ DRM_INFO("radeon: power management initialized\n");
}
- DRM_INFO("radeon: power management initialized\n");
-
return 0;
}
void radeon_pm_fini(struct radeon_device *rdev)
{
+ if (rdev->pm.num_power_states > 1) {
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ /* cancel work */
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+ /* reset default clocks */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_set_clocks(rdev);
+ }
+ mutex_unlock(&rdev->pm.mutex);
+
+ device_remove_file(rdev->dev, &dev_attr_power_profile);
+ device_remove_file(rdev->dev, &dev_attr_power_method);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&rdev->acpi_nb);
+#endif
+ }
+
if (rdev->pm.i2c_bus)
radeon_i2c_destroy(rdev->pm.i2c_bus);
}
@@ -266,146 +483,167 @@ void radeon_pm_fini(struct radeon_device *rdev)
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
- struct drm_connector *connector;
+ struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- int count = 0;
- if (rdev->pm.state == PM_STATE_DISABLED)
+ if (rdev->pm.num_power_states < 2)
return;
mutex_lock(&rdev->pm.mutex);
rdev->pm.active_crtcs = 0;
- list_for_each_entry(connector,
- &ddev->mode_config.connector_list, head) {
- if (connector->encoder &&
- connector->encoder->crtc &&
- connector->dpms != DRM_MODE_DPMS_OFF) {
- radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
+ rdev->pm.active_crtc_count = 0;
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
- ++count;
+ rdev->pm.active_crtc_count++;
}
}
- if (count > 1) {
- if (rdev->pm.state == PM_STATE_ACTIVE) {
- cancel_delayed_work(&rdev->pm.idle_work);
-
- rdev->pm.state = PM_STATE_PAUSED;
- rdev->pm.planned_action = PM_ACTION_UPCLOCK;
- if (rdev->pm.downclocked)
- radeon_pm_set_clocks(rdev);
-
- DRM_DEBUG("radeon: dynamic power management deactivated\n");
- }
- } else if (count == 1) {
- /* TODO: Increase clocks if needed for current mode */
-
- if (rdev->pm.state == PM_STATE_MINIMUM) {
- rdev->pm.state = PM_STATE_ACTIVE;
- rdev->pm.planned_action = PM_ACTION_UPCLOCK;
- radeon_pm_set_clocks(rdev);
-
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- }
- else if (rdev->pm.state == PM_STATE_PAUSED) {
- rdev->pm.state = PM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- DRM_DEBUG("radeon: dynamic power management activated\n");
- }
- }
- else { /* count == 0 */
- if (rdev->pm.state != PM_STATE_MINIMUM) {
- cancel_delayed_work(&rdev->pm.idle_work);
-
- rdev->pm.state = PM_STATE_MINIMUM;
- rdev->pm.planned_action = PM_ACTION_MINIMUM;
- radeon_pm_set_clocks(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+ if (rdev->pm.active_crtc_count > 1) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ }
+ } else if (rdev->pm.active_crtc_count == 1) {
+ /* TODO: Increase clocks if needed for current mode */
+
+ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ DRM_DEBUG("radeon: dynamic power management activated\n");
+ }
+ } else { /* count == 0 */
+ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+ }
}
}
mutex_unlock(&rdev->pm.mutex);
}
-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
- u32 stat_crtc1 = 0, stat_crtc2 = 0;
+ u32 stat_crtc = 0, vbl = 0, position = 0;
bool in_vbl = true;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 2)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 3)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 4)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 5)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (position < vbl && position > 1)
+ in_vbl = false;
+ } else {
if (rdev->pm.active_crtcs & (1 << 0)) {
- stat_crtc1 = RREG32(D1CRTC_STATUS);
- if (!(stat_crtc1 & 1))
+ stat_crtc = RREG32(RADEON_CRTC_STATUS);
+ if (!(stat_crtc & 1))
in_vbl = false;
}
if (rdev->pm.active_crtcs & (1 << 1)) {
- stat_crtc2 = RREG32(D2CRTC_STATUS);
- if (!(stat_crtc2 & 1))
+ stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+ if (!(stat_crtc & 1))
in_vbl = false;
}
}
- if (in_vbl == false)
- DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
- stat_crtc2, finish ? "exit" : "entry");
- return in_vbl;
-}
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
-{
- /*radeon_fence_wait_last(rdev);*/
- switch (rdev->pm.planned_action) {
- case PM_ACTION_UPCLOCK:
- rdev->pm.downclocked = false;
- break;
- case PM_ACTION_DOWNCLOCK:
- rdev->pm.downclocked = true;
- break;
- case PM_ACTION_MINIMUM:
- break;
- case PM_ACTION_NONE:
- DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
- break;
- }
- radeon_set_power_state(rdev);
- rdev->pm.planned_action = PM_ACTION_NONE;
+ if (position < vbl && position > 1)
+ in_vbl = false;
+
+ return in_vbl;
}
-static void radeon_pm_set_clocks(struct radeon_device *rdev)
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
{
- radeon_get_power_state(rdev, rdev->pm.planned_action);
- mutex_lock(&rdev->cp.mutex);
+ u32 stat_crtc = 0;
+ bool in_vbl = radeon_pm_in_vbl(rdev);
- if (rdev->pm.active_crtcs & (1 << 0)) {
- rdev->pm.req_vblank |= (1 << 0);
- drm_vblank_get(rdev->ddev, 0);
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- rdev->pm.req_vblank |= (1 << 1);
- drm_vblank_get(rdev->ddev, 1);
- }
- radeon_pm_set_clocks_locked(rdev);
- if (rdev->pm.req_vblank & (1 << 0)) {
- rdev->pm.req_vblank &= ~(1 << 0);
- drm_vblank_put(rdev->ddev, 0);
- }
- if (rdev->pm.req_vblank & (1 << 1)) {
- rdev->pm.req_vblank &= ~(1 << 1);
- drm_vblank_put(rdev->ddev, 1);
- }
-
- mutex_unlock(&rdev->cp.mutex);
+ if (in_vbl == false)
+ DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+ finish ? "exit" : "entry");
+ return in_vbl;
}
-static void radeon_pm_idle_work_handler(struct work_struct *work)
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
+ int resched;
rdev = container_of(work, struct radeon_device,
- pm.idle_work.work);
+ pm.dynpm_idle_work.work);
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
- if (rdev->pm.state == PM_STATE_ACTIVE) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
unsigned long irq_flags;
int not_processed = 0;
@@ -421,35 +659,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
- if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
- rdev->pm.planned_action = PM_ACTION_NONE;
- } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
- rdev->pm.downclocked) {
- rdev->pm.planned_action =
- PM_ACTION_UPCLOCK;
- rdev->pm.action_timeout = jiffies +
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_upclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_UPCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
} else if (not_processed == 0) { /* should downclock */
- if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
- rdev->pm.planned_action = PM_ACTION_NONE;
- } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
- !rdev->pm.downclocked) {
- rdev->pm.planned_action =
- PM_ACTION_DOWNCLOCK;
- rdev->pm.action_timeout = jiffies +
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_downclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_DOWNCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
}
- if (rdev->pm.planned_action != PM_ACTION_NONE &&
- jiffies > rdev->pm.action_timeout) {
+ /* Note, radeon_pm_set_clocks is called with static_switch set
+ * to false since we want to wait for vbl to avoid flicker.
+ */
+ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+ jiffies > rdev->pm.dynpm_action_timeout) {
+ radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
}
mutex_unlock(&rdev->pm.mutex);
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
@@ -464,7 +707,6 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index eabbc9cf30a7..c332f46340d5 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -553,7 +553,6 @@
# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
#define RADEON_CRTC2_CRNT_FRAME 0x0314
#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
-#define RADEON_CRTC2_STATUS 0x03fc
#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
@@ -995,6 +994,7 @@
# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_FP2_DETECT_MASK (1 << 10)
+# define RADEON_GUI_IDLE_MASK (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
@@ -1006,6 +1006,8 @@
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_FP2_DETECT_STAT (1 << 10)
# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
+# define RADEON_GUI_IDLE_STAT (1 << 19)
+# define RADEON_GUI_IDLE_STAT_ACK (1 << 19)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f6e1e8d4d986..261e98a276db 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *robj;
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
radeon_ib_bogus_cleanup(rdev);
+ robj = rdev->ib_pool.robj;
+ rdev->ib_pool.robj = NULL;
+ mutex_unlock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.robj) {
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (robj) {
+ r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->ib_pool.robj);
- radeon_bo_unpin(rdev->ib_pool.robj);
- radeon_bo_unreserve(rdev->ib_pool.robj);
+ radeon_bo_kunmap(robj);
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
}
- radeon_bo_unref(&rdev->ib_pool.robj);
- rdev->ib_pool.robj = NULL;
+ radeon_bo_unref(&robj);
}
- mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
}
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- mutex_lock(&rdev->cp.mutex);
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev);
- if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ if (r)
return r;
- }
}
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
return 0;
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+ int r;
+
+ mutex_lock(&rdev->cp.mutex);
+ r = radeon_ring_alloc(rdev, ndw);
+ if (r) {
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ return 0;
+}
+
+void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+ radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
}
@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *ring_obj;
mutex_lock(&rdev->cp.mutex);
- if (rdev->cp.ring_obj) {
- r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ ring_obj = rdev->cp.ring_obj;
+ rdev->cp.ring = NULL;
+ rdev->cp.ring_obj = NULL;
+ mutex_unlock(&rdev->cp.mutex);
+
+ if (ring_obj) {
+ r = radeon_bo_reserve(ring_obj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->cp.ring_obj);
- radeon_bo_unpin(rdev->cp.ring_obj);
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ radeon_bo_kunmap(ring_obj);
+ radeon_bo_unpin(ring_obj);
+ radeon_bo_unreserve(ring_obj);
}
- radeon_bo_unref(&rdev->cp.ring_obj);
- rdev->cp.ring = NULL;
- rdev->cp.ring_obj = NULL;
+ radeon_bo_unref(&ring_obj);
}
- mutex_unlock(&rdev->cp.mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 40ab6d9c3736..b3ba44c0a818 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
- offset = *cmd << 10;
+ offset = *cmd3 << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid second packet offset\n");
@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
flags |= RADEON_FRONT;
}
if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
- if (!dev_priv->have_z_offset)
+ if (!dev_priv->have_z_offset) {
printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
- flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
}
if (flags & (RADEON_FRONT | RADEON_BACK)) {
@@ -2895,9 +2896,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
return rv;
rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
cmdbuf->bufsz);
- if (rv)
+ if (rv) {
+ drm_buffer_free(cmdbuf->buffer);
return rv;
- }
+ }
+ } else
+ goto done;
orig_nbox = cmdbuf->nbox;
@@ -2905,8 +2909,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
int temp;
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
return temp;
}
@@ -3012,16 +3015,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
}
}
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
+ done:
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b6863082..e9918d88f5b0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
(unsigned)type);
return -EINVAL;
}
- man->io_offset = rdev->mc.agp_base;
- man->io_size = rdev->mc.gtt_size;
- man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
- man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
- TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- } else
-#endif
- {
- man->io_offset = 0;
- man->io_size = 0;
- man->io_addr = NULL;
}
+#endif
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- man->io_addr = NULL;
- man->io_offset = rdev->mc.aper_base;
- man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ bool evict, int no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@ out_cleanup:
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -394,8 +384,9 @@ out_cleanup:
}
static int radeon_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
- struct ttm_mem_reg *new_mem)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
-
return r;
}
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ /* RADEON_IS_AGP is set only if AGP is active */
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = rdev->mc.agp_base;
+ mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ /* check if it's visible */
+ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ mem->bus.base = rdev->mc.aper_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
.sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+ .io_mem_free = &radeon_ttm_io_mem_free,
};
int radeon_ttm_init(struct radeon_device *rdev)
@@ -571,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL;
static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
+ struct radeon_device *rdev;
int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
+ rdev = radeon_get_rdev(bo->bdev);
+ mutex_lock(&rdev->vram_mutex);
r = ttm_vm_ops->fault(vma, vmf);
+ mutex_unlock(&rdev->vram_mutex);
return r;
}
@@ -745,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
}
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
+ /* Add ttm page pool to debugfs */
+ sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i].data = NULL;
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 1a41cb268b72..9e4240b3bf0b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs400 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -458,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -497,7 +494,7 @@ int rs400_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -509,8 +506,6 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a21e14..79887cac5b54 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,135 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+ u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+ } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+ }
+ } else {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+ }
+ WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+ dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+ dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+ } else
+ dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+ WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+ hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ hdp_dyn_cntl &= ~HDP_FORCEON;
+ else
+ hdp_dyn_cntl |= HDP_FORCEON;
+ WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+ /* mc_host_dyn seems to cause hangs from time to time */
+ mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+ mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+ else
+ mc_host_dyn_cntl |= MC_HOST_FORCEON;
+ WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+ dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+ dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+ else
+ dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+ WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -147,6 +276,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* disable bus mastering */
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ mdelay(1);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+ u32 status, tmp;
+
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ rs600_bm_disable(rdev);
+ /* reset GA+VAP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset CP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset MC */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ /* Check if GPU is idle */
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
+ return -1;
+ }
+ rv515_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ return 0;
+}
+
/*
* GART.
*/
@@ -310,6 +511,9 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
+ if (rdev->irq.gui_idle) {
+ tmp |= S_000040_GUI_IDLE(1);
+ }
if (rdev->irq.crtc_vblank_int[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
@@ -332,9 +536,15 @@ int rs600_irq_set(struct radeon_device *rdev)
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
- uint32_t irq_mask = ~C_000044_SW_INT;
+ uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= S_000044_GUI_IDLE_STAT(1);
+ }
+
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -382,6 +592,9 @@ int rs600_irq_process(struct radeon_device *rdev)
uint32_t r500_disp_int;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = rs600_irq_ack(rdev, &r500_disp_int);
if (!status && !r500_disp_int) {
return IRQ_NONE;
@@ -390,6 +603,12 @@ int rs600_irq_process(struct radeon_device *rdev)
/* SW interrupt */
if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
+ /* GUI idle */
+ if (G_000040_GUI_IDLE(status)) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0);
@@ -411,6 +630,8 @@ int rs600_irq_process(struct radeon_device *rdev)
}
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -454,7 +675,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- r100_hdp_reset(rdev);
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +821,7 @@ int rs600_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -626,7 +846,6 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -664,7 +883,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -676,8 +895,6 @@ int rs600_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs600_mc_init(rdev);
rs600_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index e52d2695510b..a27c13ac47c3 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000074_MC_IND_DATA 0x00000000
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_000134_HDP_FB_LOCATION 0x000134
#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
@@ -588,4 +634,38 @@
#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+/* PLL regs */
+#define GENERAL_PWRMGT 0x8
+#define GLOBAL_PWRMGT_EN (1 << 0)
+#define MOBILE_SU (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH 0xc
+#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0)
+#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4)
+#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8)
+#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12)
+#define POWER_D1_SCLK_HILEN(x) ((x) << 16)
+#define POWER_D1_SCLK_LOLEN(x) ((x) << 20)
+#define STATIC_SCREEN_HILEN(x) ((x) << 24)
+#define STATIC_SCREEN_LOLEN(x) ((x) << 28)
+#define DYN_SCLK_VOL_CNTL 0xe
+#define IO_CG_VOLTAGE_DROP (1 << 0)
+#define VOLTAGE_DROP_SYNC (1 << 2)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 3)
+#define HDP_DYN_CNTL 0x10
+#define HDP_FORCEON (1 << 0)
+#define MC_HOST_DYN_CNTL 0x1e
+#define MC_HOST_FORCEON (1 << 0)
+#define DYN_BACKBIAS_CNTL 0x29
+#define IO_CG_BACKBIAS_EN (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0
+#define PWRDN_WAIT_BUSY_OFF (1 << 0)
+#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4)
+#define PWRDN_WAIT_PPLL_OFF (1 << 8)
+#define PWRUP_WAIT_PPLL_ON (1 << 12)
+#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16)
+#define PM_ASSERT_RESET (1 << 20)
+#define PM_PWRDN_PPLL (1 << 24)
+
#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bbf3da790fd5..bcc33195ebc2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
static void rs690_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs690 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
@@ -78,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev)
/* Get various system informations from bios */
switch (crev) {
case 1:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
- rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
- rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+ rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
break;
case 2:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
- rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
- rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+ rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
break;
default:
- tmp.full = rfixed_const(100);
+ tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
- rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
- rdev->pm.igp_system_mclk.full = rfixed_const(100);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
- rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ rdev->pm.igp_system_mclk.full = dfixed_const(100);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
break;
}
} else {
- tmp.full = rfixed_const(100);
+ tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
- rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
- rdev->pm.igp_system_mclk.full = rfixed_const(100);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
- rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ rdev->pm.igp_system_mclk.full = dfixed_const(100);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
}
/* Compute various bandwidth */
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
- tmp.full = rfixed_const(4);
- rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
+ tmp.full = dfixed_const(4);
+ rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
* = ht_clk * ht_width / 5
*/
- tmp.full = rfixed_const(5);
- rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
+ tmp.full = dfixed_const(5);
+ rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
rdev->pm.igp_ht_link_width);
- rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
+ rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
if (tmp.full < rdev->pm.max_bandwidth.full) {
/* HT link is a limiting factor */
rdev->pm.max_bandwidth.full = tmp.full;
@@ -138,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev)
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
* = (sideport_clk * 14) / 10
*/
- tmp.full = rfixed_const(14);
- rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
- tmp.full = rfixed_const(10);
- rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
+ tmp.full = dfixed_const(14);
+ rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+ tmp.full = dfixed_const(10);
+ rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
void rs690_mc_init(struct radeon_device *rdev)
@@ -241,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -263,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -287,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Maximun bandwidth is the minimun bandwidth of all component */
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
@@ -306,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
- read_delay_latency.full = rfixed_div(read_delay_latency,
+ read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
rdev->pm.igp_sideport_mclk);
} else {
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
@@ -316,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.ht_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
- read_delay_latency.full = rfixed_const(5000);
+ read_delay_latency.full = dfixed_const(5000);
}
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
- a.full = rfixed_const(16);
- rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
- a.full = rfixed_const(1000);
- rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
+ a.full = dfixed_const(16);
+ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+ a.full = dfixed_const(1000);
+ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(ns)
*/
- a.full = rfixed_const(256 * 13);
- chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
- a.full = rfixed_const(10);
- chunk_time.full = rfixed_div(chunk_time, a);
+ a.full = dfixed_const(256 * 13);
+ chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+ a.full = dfixed_const(10);
+ chunk_time.full = dfixed_div(chunk_time, a);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -342,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
- a.full = rfixed_const(2);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ a.full = dfixed_const(2);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
}
@@ -362,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(4 * 8);
+ wm->dbpp.full = dfixed_const(4 * 8);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
- wm->priority_mark.full = rfixed_const(10);
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ wm->priority_mark.full = dfixed_const(10);
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -441,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
@@ -502,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -537,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
S_006D4C_D2MODE_PRIORITY_B_OFF(1));
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -678,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -717,7 +714,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -729,8 +726,6 @@ int rs690_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs690_mc_init(rdev);
rv515_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 9035121f4b58..7d9a7b0a180a 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
- r100_rb2d_reset(rdev);
-
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
}
-
rv515_vga_render_disable(rdev);
-
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
}
}
-int rv515_ga_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(CP_CSQ_MODE, 0);
- WREG32(CP_CSQ_CNTL, 0);
- WREG32(RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
- DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
- DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
-}
-
-int rv515_gpu_reset(struct radeon_device *rdev)
-{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- rv515_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
- /* Check if GPU is idle */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 31)) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
- return -1;
- }
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
- return 0;
-}
-
static void rv515_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
tmp = RREG32(0x2140);
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
tmp = RREG32(0x425C);
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
return 0;
@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -535,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -573,7 +482,7 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -584,8 +493,6 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
@@ -885,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -907,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -931,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(Mhz)
*/
- a.full = rfixed_const(600 * 1000);
- chunk_time.full = rfixed_div(a, rdev->pm.sclk);
- read_delay_latency.full = rfixed_const(1000);
+ a.full = dfixed_const(600 * 1000);
+ chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+ read_delay_latency.full = dfixed_const(1000);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -961,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
@@ -979,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(2 * 16);
+ wm->dbpp.full = dfixed_const(2 * 16);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = wm->priority_mark_max.full;
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -1035,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
@@ -1096,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -1129,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e49384d..590309a710b1 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97958a64df1a..253f24aec031 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,10 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+
+}
/*
* GART
@@ -237,7 +241,6 @@ void r700_cp_stop(struct radeon_device *rdev)
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
-
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -272,6 +275,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+void r700_cp_fini(struct radeon_device *rdev)
+{
+ r700_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
/*
* Core functions
@@ -906,23 +914,12 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int rv770_gpu_reset(struct radeon_device *rdev)
-{
- /* FIXME: implement any rv770 specific bits */
- return r600_gpu_reset(rdev);
-}
-
static int rv770_startup(struct radeon_device *rdev)
{
int r;
@@ -1094,8 +1091,6 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1132,7 +1127,7 @@ int rv770_init(struct radeon_device *rdev)
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1164,9 +1159,8 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r600_blit_fini(rdev);
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bff6fc2524c8..2d0c9ca484c5 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
- dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
+ dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
- memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = (enum savage_family)chipset;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e3754a3a303..555ebb12ace8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
- printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
- printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
- bool evict, bool interruptible, bool no_wait)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait, mem);
+ no_wait_reserve, no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
if (ret)
goto out_err;
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+ return cancel_delayed_work_sync(&bdev->wq);
+}
+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+ if (resched)
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait)
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
int ret = 0;
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
+ evict_mem.bus.io_reserved = false;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- if (likely(!no_wait))
+ if (likely(!no_wait_gpu))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@ retry:
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
}
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
mem->mm_node->private = bo;
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* instead of doing it here.
*/
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
+ mem.bus.io_reserved = false;
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
int ret;
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
+ bo->mem.bus.io_reserved = false;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (ret)
goto out_err;
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- unsigned long *bus_base,
- unsigned long *bus_offset, unsigned long *bus_size)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- *bus_size = 0;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- if (ttm_mem_reg_is_pci(bdev, mem)) {
- *bus_offset = mem->mm_node->start << PAGE_SHIFT;
- *bus_size = mem->num_pages << PAGE_SHIFT;
- *bus_base = man->io_offset;
- }
-
- return 0;
-}
-
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
-
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ ttm_mem_io_free(bdev, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
+ false, false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799b..13012a1f1486 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ int ret;
+
+ if (!mem->bus.io_reserved) {
+ mem->bus.io_reserved = true;
+ ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ if (bdev->driver->io_mem_reserve) {
+ if (mem->bus.io_reserved) {
+ mem->bus.io_reserved = false;
+ bdev->driver->io_mem_free(bdev, mem);
+ }
+ }
+}
+
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
- if (ret || bus_size == 0)
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret || !mem->bus.is_iomem)
return ret;
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
- addr = (void *)(((u8 *) man->io_addr) + bus_offset);
- else {
+ if (mem->bus.addr) {
+ addr = mem->bus.addr;
+ } else {
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(bus_base + bus_offset, bus_size);
+ addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(bus_base + bus_offset, bus_size);
- if (!addr)
+ addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ if (!addr) {
+ ttm_mem_io_free(bdev, mem);
return -ENOMEM;
+ }
}
*virtual = addr;
return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
man = &bdev->man[mem->mem_type];
- if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+ if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ ttm_mem_io_free(bdev, mem);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
- unsigned long bus_base,
- unsigned long bus_offset,
- unsigned long bus_size,
+ unsigned long offset,
+ unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+ if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
else
- map->virtual = ioremap_nocache(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ unsigned long offset, size;
int ret;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
+ map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
- &bus_offset, &bus_size);
+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
if (ret)
return ret;
- if (bus_size == 0) {
+ if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
- bus_offset += start_page << PAGE_SHIFT;
- bus_size = num_pages << PAGE_SHIFT;
- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+ offset = start_page << PAGE_SHIFT;
+ size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
- unsigned long dst_offset,
- unsigned long *pfn, pgprot_t *prot)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
- int ret;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
- &bus_size);
- if (ret)
- return -EINVAL;
- if (bus_size != 0)
- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
- else
- if (!bo->ttm)
- return -EINVAL;
- else
- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
- dst_offset >>
- PAGE_SHIFT));
- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
- return 0;
-}
-
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
- bool evict, bool no_wait,
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..fe6cb77899f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- bool is_iomem;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
- if (bdev->driver->fault_reserve_notify)
- bdev->driver->fault_reserve_notify(bo);
+ if (bdev->driver->fault_reserve_notify) {
+ ret = bdev->driver->fault_reserve_notify(bo);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ set_need_resched();
+ case -ERESTARTSYS:
+ retval = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ default:
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
/*
* Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spin_unlock(&bo->lock);
- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
- &bus_size);
- if (unlikely(ret != 0)) {
+ ret = ttm_mem_io_reserve(bdev, &bo->mem);
+ if (ret) {
retval = VM_FAULT_SIGBUS;
goto out_unlock;
}
- is_iomem = (bus_size != 0);
-
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/
-
- if (is_iomem) {
+ if (bo->mem.bus.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot);
} else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
- if (is_iomem)
- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
- page_offset;
+ if (bo->mem.bus.is_iomem)
+ pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_unlock;
-
}
address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
ttm_bo_unref(&bo);
vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e6..e70ddd82dc02 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
unsigned int i;
struct ttm_mem_zone *zone;
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..0d9a42c2394f
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+#include <asm/agp.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 16
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+ spinlock_t lock;
+ bool fill_lock;
+ struct list_head list;
+ int gfp_flags;
+ unsigned npages;
+ char *name;
+ unsigned long nfrees;
+ unsigned long nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialiazation to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+ struct kobject kobj;
+ struct shrinker mm_shrink;
+ atomic_t page_alloc_inited;
+ struct ttm_pool_opts options;
+
+ union {
+ struct ttm_page_pool pools[NUM_POOLS];
+ struct {
+ struct ttm_page_pool wc_pool;
+ struct ttm_page_pool uc_pool;
+ struct ttm_page_pool wc_pool_dma32;
+ struct ttm_page_pool uc_pool_dma32;
+ } ;
+ };
+};
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ (void)m;
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR "[ttm] Setting allocation size to %lu "
+ "is not allowed. Recomended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING "[ttm] Setting allocation size to "
+ "larger than %lu is not recomended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager _manager = {
+ .page_alloc_inited = ATOMIC_INIT(0)
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager.pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+ unsigned i;
+ if (set_pages_array_wb(pages, npages))
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+ npages);
+ for (i = 0; i < npages; ++i)
+ __free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages -= freed_pages;
+ pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct page *p;
+ struct page **pages_to_free;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_to_free) {
+ printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+ return 0;
+ }
+
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ list_for_each_entry_reverse(p, &pool->list, lru) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ pages_to_free[freed_pages++] = p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+ /* remove range of pages from the pool */
+ __list_del(p->lru.prev, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall tough or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ __list_del(&p->lru, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager.pools[i].npages;
+
+ return total;
+}
+
+/**
+ * Calback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ struct ttm_page_pool *pool;
+
+ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
+ cpages);
+ break;
+ case tt_wc:
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
+ cpages);
+ break;
+ default:
+ break;
+ }
+ return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+ int ttm_flags, enum ttm_caching_state cstate,
+ struct page **failed_pages, unsigned cpages)
+{
+ unsigned i;
+ /* Failed pages has to be reed */
+ for (i = 0; i < cpages; ++i) {
+ list_del(&failed_pages[i]->lru);
+ __free_page(failed_pages[i]);
+ }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ struct page **caching_array;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+ return -ENOMEM;
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ p = alloc_page(gfp_flags);
+
+ if (!p) {
+ printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r) {
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+
+ list_add(&p->lru, pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+
+ return r;
+}
+
+/**
+ * Fill the given pool if there isn't enough pages and requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+ unsigned long *irq_flags)
+{
+ struct page *p;
+ int r;
+ unsigned cpages = 0;
+ /**
+ * Only allow one pool fill operation at a time.
+ * If pool doesn't have enough pages for the allocation new pages are
+ * allocated from outside of pool.
+ */
+ if (pool->fill_lock)
+ return;
+
+ pool->fill_lock = true;
+
+ /* If allocation request is small and there is not enough
+ * pages in pool we fill the pool first */
+ if (count < _manager.options.small
+ && count > pool->npages) {
+ struct list_head new_pages;
+ unsigned alloc_size = _manager.options.alloc_size;
+
+ /**
+ * Can't change page caching if in irqsave context. We have to
+ * drop the pool->lock.
+ */
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ INIT_LIST_HEAD(&new_pages);
+ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+ cstate, alloc_size);
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+
+ if (!r) {
+ list_splice(&new_pages, &pool->list);
+ ++pool->nrefills;
+ pool->npages += alloc_size;
+ } else {
+ printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+ /* If we have any pages left put them to the pool. */
+ list_for_each_entry(p, &pool->list, lru) {
+ ++cpages;
+ }
+ list_splice(&new_pages, &pool->list);
+ pool->npages += cpages;
+ }
+
+ }
+ pool->fill_lock = false;
+}
+
+/**
+ * Cut count nubmer of pages from the pool and put them to return list
+ *
+ * @return count of pages still to allocate to fill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages, int ttm_flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ unsigned long irq_flags;
+ struct list_head *p;
+ unsigned i;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+ if (count >= pool->npages) {
+ /* take all pages from the pool */
+ list_splice_init(&pool->list, pages);
+ count -= pool->npages;
+ pool->npages = 0;
+ goto out;
+ }
+ /* find the last pages to include for requested number of pages. Split
+ * pool to begin and halves to reduce search space. */
+ if (count <= pool->npages/2) {
+ i = 0;
+ list_for_each(p, &pool->list) {
+ if (++i == count)
+ break;
+ }
+ } else {
+ i = pool->npages + 1;
+ list_for_each_prev(p, &pool->list) {
+ if (--i == count)
+ break;
+ }
+ }
+ /* Cut count number of pages from pool */
+ list_cut_position(pages, &pool->list, p);
+ pool->npages -= count;
+ count = 0;
+out:
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return count;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p = NULL;
+ int gfp_flags = 0;
+ int r;
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* No pool for cached pages */
+ if (pool == NULL) {
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= __GFP_HIGHMEM;
+
+ for (r = 0; r < count; ++r) {
+ p = alloc_page(gfp_flags);
+ if (!p) {
+
+ printk(KERN_ERR "[ttm] unable to allocate page.");
+ return -ENOMEM;
+ }
+
+ list_add(&p->lru, pages);
+ }
+ return 0;
+ }
+
+
+ /* combine zero flag to pool flags */
+ gfp_flags |= pool->gfp_flags;
+
+ /* First we take pages from the pool */
+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(p, pages, lru) {
+ clear_page(page_address(p));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count > 0) {
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool. */
+ printk(KERN_ERR "[ttm] Failed to allocate extra pages "
+ "for large request.");
+ ttm_put_pages(pages, 0, flags, cstate);
+ return r;
+ }
+ }
+
+
+ return 0;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* Make the pages list empty */
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+ if (page_count == 0) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ ++page_count;
+ }
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice_init(pages, &pool->list);
+ pool->npages += page_count;
+ /* Check that we don't go over the pool limit */
+ page_count = 0;
+ if (pool->npages > _manager.options.max_size) {
+ page_count = pool->npages - _manager.options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (page_count < NUM_PAGES_TO_ALLOC)
+ page_count = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (page_count)
+ ttm_page_pool_free(pool, page_count);
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ char *name)
+{
+ spin_lock_init(&pool->lock);
+ pool->fill_lock = false;
+ INIT_LIST_HEAD(&pool->list);
+ pool->npages = pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret;
+ if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
+ return 0;
+
+ printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
+ "wc dma");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
+ "uc dma");
+
+ _manager.options.max_size = max_pages;
+ _manager.options.small = SMALL_ALLOCATION;
+ _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
+ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager.kobj);
+ return ret;
+ }
+
+ ttm_pool_mm_shrink_init(&_manager);
+
+ return 0;
+}
+
+void ttm_page_alloc_fini()
+{
+ int i;
+
+ if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
+ return;
+
+ printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+ ttm_pool_mm_shrink_fini(&_manager);
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+
+ kobject_put(&_manager.kobj);
+}
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct ttm_page_pool *p;
+ unsigned i;
+ char *h[] = {"pool", "refills", "pages freed", "size"};
+ if (atomic_read(&_manager.page_alloc_inited) == 0) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%6s %12s %13s %8s\n",
+ h[0], h[1], h[2], h[3]);
+ for (i = 0; i < NUM_POOLS; ++i) {
+ p = &_manager.pools[i];
+
+ seq_printf(m, "%6s %12ld %13ld %8d\n",
+ p->name, p->nrefills,
+ p->nfrees, p->npages);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb3..a7bab87a548b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
ttm->pages = NULL;
}
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
- gfp_t gfp_flags = GFP_USER;
-
- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= __GFP_DMA32;
- else
- gfp_flags |= __GFP_HIGHMEM;
-
- return alloc_page(gfp_flags);
-}
-
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
+ struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
- p = ttm_tt_alloc_page(ttm->page_flags);
- if (!p)
+ INIT_LIST_HEAD(&h);
+
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+ if (ret != 0)
return NULL;
+ p = list_first_entry(&h, struct page, lru);
+
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0))
goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state)
return 0;
- if (c_state != tt_cached) {
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
}
if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
int i;
+ unsigned count = 0;
+ struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be;
+ INIT_LIST_HEAD(&h);
+
if (be)
be->func->clear(be);
- (void)ttm_tt_set_caching(ttm, tt_cached);
for (i = 0; i < ttm->num_pages; ++i) {
+
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
- __free_page(cur_page);
+ list_add(&cur_page->lru, &h);
+ count++;
}
}
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d5..c4f5114aee7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = 0;
- man->io_offset = dev_priv->vram_start;
- man->io_size = dev_priv->vram_size;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->io_addr = NULL;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_WC;
break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
vmw_dmabuf_gmr_unbind(bo);
}
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.is_iomem = false;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->vram_start;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify,
- .swap_notify = vmw_swap_notify
+ .swap_notify = vmw_swap_notify,
+ .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+ .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+ .io_mem_free = &vmw_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4e..dbd36b8910cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* Put BO in VRAM, only if there is space.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
if (unlikely(ret == -ERESTARTSYS))
return ret;
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cdc..7421aaad8d09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,8 +559,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->pixmap.scan_align = 1;
#endif
- info->aperture_base = vmw_priv->vram_start;
- info->aperture_size = vmw_priv->vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_aper;
+ }
+ info->apertures->ranges[0].base = vmw_priv->vram_start;
+ info->apertures->ranges[0].size = vmw_priv->vram_size;
/*
* Dirty & Deferred IO
@@ -580,6 +585,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
err_defio:
fb_deferred_io_cleanup(info);
+err_aper:
ttm_bo_kunmap(&par->map);
err_unref:
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
@@ -628,7 +634,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
ttm_bo_unreserve(bo);
return ret;
@@ -652,7 +658,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
- ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afed0a63..bbc7c4c30bc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -752,14 +752,8 @@ err_not_scanout:
return NULL;
}
-static int vmw_kms_fb_changed(struct drm_device *dev)
-{
- return 0;
-}
-
static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
- .fb_changed = vmw_kms_fb_changed,
};
int vmw_kms_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f51..ad566c85b075 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (pin)
overlay_placement = &vmw_vram_ne_placement;
- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 61ab4daf0bbb..8d0e31a22027 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -18,12 +18,12 @@ config VGA_ARB_MAX_GPUS
multiple GPUS. The overhead for each GPU is very small.
config VGA_SWITCHEROO
- bool "Laptop Hybrid Grapics - GPU switching support"
+ bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
help
- Many laptops released in 2008/9/10 have two gpus with a multiplxer
+ Many laptops released in 2008/9/10 have two GPUs with a multiplexer
to switch between them. This adds support for dynamic switching when
X isn't running and delayed switching until the next logoff. This
- features is called hybrid graphics, ATI PowerXpress, and Nvidia
+ feature is called hybrid graphics, ATI PowerXpress, and Nvidia
HybridPower.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 71d4c0703629..132278fa6240 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -86,6 +86,12 @@ config HID_BELKIN
---help---
Support for Belkin Flip KVM and Wireless keyboard.
+config HID_CANDO
+ tristate "Cando dual touch panel"
+ depends on USB_HID
+ ---help---
+ Support for Cando dual touch panel.
+
config HID_CHERRY
tristate "Cherry" if EMBEDDED
depends on USB_HID
@@ -100,6 +106,21 @@ config HID_CHICONY
---help---
Support for Chicony Tactical pad.
+config HID_PRODIKEYS
+ tristate "Prodikeys PC-MIDI Keyboard support"
+ depends on USB_HID && SND
+ select SND_RAWMIDI
+ ---help---
+ Support for Prodikeys PC-MIDI Keyboard device support.
+ Say Y here to enable support for this device.
+ - Prodikeys PC-MIDI keyboard.
+ The Prodikeys PC-MIDI acts as a USB Audio device, with one MIDI
+ input and one MIDI output. These MIDI jacks appear as
+ a sound "card" in the ALSA sound system.
+ Note: if you say N here, this device will still function as a basic
+ multimedia keyboard, but will lack support for the musical keyboard
+ and some additional multimedia keys.
+
config HID_CYPRESS
tristate "Cypress" if EMBEDDED
depends on USB_HID
@@ -108,9 +129,8 @@ config HID_CYPRESS
Support for cypress mouse and barcode readers.
config HID_DRAGONRISE
- tristate "DragonRise Inc. support" if EMBEDDED
+ tristate "DragonRise Inc. support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have DragonRise Inc.game controllers.
@@ -122,6 +142,12 @@ config DRAGONRISE_FF
Say Y here if you want to enable force feedback support for DragonRise Inc.
game controllers.
+config HID_EGALAX
+ tristate "eGalax multi-touch panel"
+ depends on USB_HID
+ ---help---
+ Support for the eGalax dual-touch panel.
+
config HID_EZKEY
tristate "Ezkey" if EMBEDDED
depends on USB_HID
@@ -137,16 +163,14 @@ config HID_KYE
Support for Kye/Genius Ergo Mouse.
config HID_GYRATION
- tristate "Gyration" if EMBEDDED
+ tristate "Gyration"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Gyration remote control.
config HID_TWINHAN
- tristate "Twinhan" if EMBEDDED
+ tristate "Twinhan"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Twinhan IR remote control.
@@ -233,16 +257,14 @@ config HID_NTRIG
Support for N-Trig touch screen.
config HID_ORTEK
- tristate "Ortek" if EMBEDDED
+ tristate "Ortek"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Ortek WKB-2000 wireless keyboard + mouse trackpad.
config HID_PANTHERLORD
- tristate "Pantherlord support" if EMBEDDED
+ tristate "Pantherlord support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a PantherLord/GreenAsia based game controller
or adapter.
@@ -256,29 +278,98 @@ config PANTHERLORD_FF
or adapter and want to enable force feedback support for it.
config HID_PETALYNX
- tristate "Petalynx" if EMBEDDED
+ tristate "Petalynx"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Petalynx Maxter remote control.
+config HID_PICOLCD
+ tristate "PicoLCD (graphic version)"
+ depends on USB_HID
+ ---help---
+ This provides support for Minibox PicoLCD devices, currently
+ only the graphical ones are supported.
+
+ This includes support for the following device features:
+ - Keypad
+ - Switching between Firmware and Flash mode
+ - EEProm / Flash access (via debugfs)
+ Features selectively enabled:
+ - Framebuffer for monochrome 256x64 display
+ - Backlight control
+ - Contrast control
+ - General purpose outputs
+ Features that are not (yet) supported:
+ - IR
+
+config HID_PICOLCD_FB
+ bool "Framebuffer support" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=FB || FB=y
+ select FB_DEFERRED_IO
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ ---help---
+ Provide access to PicoLCD's 256x64 monochrome display via a
+ frambuffer device.
+
+config HID_PICOLCD_BACKLIGHT
+ bool "Backlight control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's backlight control via backlight
+ class.
+
+config HID_PICOLCD_LCD
+ bool "Contrast control" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
+ ---help---
+ Provide access to PicoLCD's LCD contrast via lcd class.
+
+config HID_PICOLCD_LEDS
+ bool "GPO via leds class" if EMBEDDED
+ default !EMBEDDED
+ depends on HID_PICOLCD
+ depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
+ ---help---
+ Provide access to PicoLCD's GPO pins via leds class.
+
config HID_QUANTA
tristate "Quanta Optical Touch"
depends on USB_HID
---help---
Support for Quanta Optical Touch dual-touch panels.
+config HID_ROCCAT
+ tristate "Roccat special event support"
+ depends on USB_HID
+ ---help---
+ Support for Roccat special events.
+ Say Y here if you have a Roccat mouse or keyboard and want OSD or
+ macro execution support.
+
+config HID_ROCCAT_KONE
+ tristate "Roccat Kone Mouse support"
+ depends on USB_HID
+ ---help---
+ Support for Roccat Kone mouse.
+
config HID_SAMSUNG
- tristate "Samsung" if EMBEDDED
+ tristate "Samsung"
depends on USB_HID
- default !EMBEDDED
---help---
- Support for Samsung InfraRed remote control.
+ Support for Samsung InfraRed remote control or keyboards.
config HID_SONY
- tristate "Sony" if EMBEDDED
+ tristate "Sony"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Sony PS3 controller.
@@ -289,16 +380,14 @@ config HID_STANTUM
Support for Stantum multitouch panel.
config HID_SUNPLUS
- tristate "Sunplus" if EMBEDDED
+ tristate "Sunplus"
depends on USB_HID
- default !EMBEDDED
---help---
Support for Sunplus wireless desktop.
config HID_GREENASIA
- tristate "GreenAsia (Product ID 0x12) support" if EMBEDDED
+ tristate "GreenAsia (Product ID 0x12) support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a GreenAsia (Product ID 0x12) based game
controller or adapter.
@@ -313,9 +402,8 @@ config GREENASIA_FF
and want to enable force feedback support for it.
config HID_SMARTJOYPLUS
- tristate "SmartJoy PLUS PS2/USB adapter support" if EMBEDDED
+ tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
- default !EMBEDDED
---help---
Support for SmartJoy PLUS PS2/USB adapter.
@@ -328,16 +416,14 @@ config SMARTJOYPLUS_FF
enable force feedback support for it.
config HID_TOPSEED
- tristate "TopSeed Cyberlink remote control support" if EMBEDDED
+ tristate "TopSeed Cyberlink remote control support"
depends on USB_HID
- default !EMBEDDED
---help---
- Say Y if you have a TopSeed Cyberlink remote control.
+ Say Y if you have a TopSeed Cyberlink or BTC Emprex remote control.
config HID_THRUSTMASTER
- tristate "ThrustMaster devices support" if EMBEDDED
+ tristate "ThrustMaster devices support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
a THRUSTMASTER Ferrari GT Rumble Wheel.
@@ -357,10 +443,17 @@ config HID_WACOM
---help---
Support for Wacom Graphire Bluetooth tablet.
+config HID_WACOM_POWER_SUPPLY
+ bool "Wacom Bluetooth devices power supply status support"
+ depends on HID_WACOM
+ select POWER_SUPPLY
+ ---help---
+ Say Y here if you want to enable power supply status monitoring for
+ Wacom Bluetooth devices.
+
config HID_ZEROPLUS
- tristate "Zeroplus based game controller support" if EMBEDDED
+ tristate "Zeroplus based game controller support"
depends on USB_HID
- default !EMBEDDED
---help---
Say Y here if you have a Zeroplus based game controller.
@@ -372,6 +465,12 @@ config ZEROPLUS_FF
Say Y here if you have a Zeroplus based game controller and want
to have force feedback support for it.
+config HID_ZYDACRON
+ tristate "Zydacron remote control support"
+ depends on USB_HID
+ ---help---
+ Support for Zydacron remote control.
+
endmenu
endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0b2618f092ca..987fa0627367 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -26,10 +26,12 @@ obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
+obj-$(CONFIG_HID_CANDO) += hid-cando.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
+obj-$(CONFIG_HID_EGALAX) += hid-egalax.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
@@ -41,9 +43,13 @@ obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MOSART) += hid-mosart.o
obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
+obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
+obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
+obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
+obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
@@ -54,6 +60,7 @@ obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
+obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
obj-$(CONFIG_HID_WACOM) += hid-wacom.o
obj-$(CONFIG_USB_HID) += usbhid/
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index c31e0be8ccea..2a0d56b7a02b 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -1,7 +1,7 @@
/*
* HID driver for 3M PCT multitouch panels
*
- * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
*
*/
@@ -25,7 +25,7 @@ MODULE_LICENSE("GPL");
#include "hid-ids.h"
struct mmm_finger {
- __s32 x, y;
+ __s32 x, y, w, h;
__u8 rank;
bool touch, valid;
};
@@ -82,7 +82,18 @@ static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
/* touchscreen emulation */
hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
return 1;
+ case HID_DG_WIDTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MAJOR);
+ return 1;
+ case HID_DG_HEIGHT:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MINOR);
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ 1, 1, 0, 0);
+ return 1;
case HID_DG_CONTACTID:
+ field->logical_maximum = 59;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_TRACKING_ID);
return 1;
@@ -128,9 +139,15 @@ static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
/* this finger is just placeholder data, ignore */
} else if (f->touch) {
/* this finger is on the screen */
+ int wide = (f->w > f->h);
input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i);
input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR,
+ wide ? f->w : f->h);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR,
+ wide ? f->h : f->w);
input_mt_sync(input);
/*
* touchscreen emulation: maintain the age rank
@@ -197,6 +214,14 @@ static int mmm_event(struct hid_device *hid, struct hid_field *field,
case HID_DG_CONFIDENCE:
md->valid = value;
break;
+ case HID_DG_WIDTH:
+ if (md->valid)
+ md->f[md->curid].w = value;
+ break;
+ case HID_DG_HEIGHT:
+ if (md->valid)
+ md->f[md->curid].h = value;
+ break;
case HID_DG_CONTACTID:
if (md->valid) {
md->curid = value;
@@ -255,6 +280,7 @@ static void mmm_remove(struct hid_device *hdev)
static const struct hid_device_id mmm_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ }
};
MODULE_DEVICE_TABLE(hid, mmm_devices);
@@ -287,5 +313,4 @@ static void __exit mmm_exit(void)
module_init(mmm_init);
module_exit(mmm_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
new file mode 100644
index 000000000000..4267a6fdc277
--- /dev/null
+++ b/drivers/hid/hid-cando.c
@@ -0,0 +1,272 @@
+/*
+ * HID driver for Cando dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Cando dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct cando_data {
+ __u16 x, y;
+ __u8 id;
+ __s8 oldest; /* id of the oldest finger in previous frame */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool first; /* is this the first finger in this frame? */
+ __s8 firstid; /* id of the first finger in the frame */
+ __u16 firstx, firsty; /* (x, y) of the first finger in the frame */
+};
+
+static int cando_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_INRANGE:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+static int cando_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void cando_filter_event(struct cando_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if this is the second finger and
+ * the first was valid, the first was the oldest; if the
+ * first was not valid and there was a valid finger in the
+ * previous frame, this is a release.
+ */
+ if (td->first) {
+ td->firstid = -1;
+ } else if (td->firstid >= 0) {
+ input_event(input, EV_ABS, ABS_X, td->firstx);
+ input_event(input, EV_ABS, ABS_Y, td->firsty);
+ td->oldest = td->firstid;
+ } else if (td->oldest >= 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->oldest = -1;
+ }
+
+ return;
+ }
+
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: if there was no touching finger previously,
+ * emit touch event
+ */
+ if (td->oldest < 0) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->oldest = td->id;
+ }
+
+ /*
+ * touchscreen emulation: if this is the first finger, wait for the
+ * second; the oldest is then the second if it was the oldest already
+ * or if there was no first, the first otherwise.
+ */
+ if (td->first) {
+ td->firstx = td->x;
+ td->firsty = td->y;
+ td->firstid = td->id;
+ } else {
+ int x, y, oldest;
+ if (td->id == td->oldest || td->firstid < 0) {
+ x = td->x;
+ y = td->y;
+ oldest = td->id;
+ } else {
+ x = td->firstx;
+ y = td->firsty;
+ oldest = td->firstid;
+ }
+ input_event(input, EV_ABS, ABS_X, x);
+ input_event(input, EV_ABS, ABS_Y, y);
+ td->oldest = oldest;
+ }
+}
+
+
+static int cando_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct cando_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ td->valid = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ cando_filter_event(td, input);
+ break;
+ case HID_DG_TIPSWITCH:
+ /* avoid interference from generic hidinput handling */
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int cando_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct cando_data *td;
+
+ td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+ td->first = false;
+ td->oldest = -1;
+ td->valid = false;
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void cando_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id cando_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+ USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cando_devices);
+
+static const struct hid_usage_id cando_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver cando_driver = {
+ .name = "cando-touch",
+ .id_table = cando_devices,
+ .probe = cando_probe,
+ .remove = cando_remove,
+ .input_mapping = cando_input_mapping,
+ .input_mapped = cando_input_mapped,
+ .usage_table = cando_grabbed_usages,
+ .event = cando_event,
+};
+
+static int __init cando_init(void)
+{
+ return hid_register_driver(&cando_driver);
+}
+
+static void __exit cando_exit(void)
+{
+ hid_unregister_driver(&cando_driver);
+}
+
+module_init(cando_init);
+module_exit(cando_exit);
+
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index 7e597d7f770f..24663a8717b1 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -59,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ }
};
MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2e2aa759d230..aa0f7dcabcd7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -653,10 +653,9 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
if (device->driver->report_fixup)
device->driver->report_fixup(device, start, size);
- device->rdesc = kmalloc(size, GFP_KERNEL);
+ device->rdesc = kmemdup(start, size, GFP_KERNEL);
if (device->rdesc == NULL)
return -ENOMEM;
- memcpy(device->rdesc, start, size);
device->rsize = size;
parser = vmalloc(sizeof(struct hid_parser));
@@ -940,13 +939,8 @@ static void hid_output_field(struct hid_field *field, __u8 *data)
unsigned count = field->report_count;
unsigned offset = field->report_offset;
unsigned size = field->report_size;
- unsigned bitsused = offset + count * size;
unsigned n;
- /* make sure the unused bits in the last byte are zeros */
- if (count > 0 && size > 0 && (bitsused % 8) != 0)
- data[(bitsused-1)/8] &= (1 << (bitsused % 8)) - 1;
-
for (n = 0; n < count; n++) {
if (field->logical_minimum < 0) /* signed values */
implement(data, offset + n * size, size, s32ton(field->value[n], size));
@@ -966,6 +960,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
if (report->id > 0)
*data++ = report->id;
+ memset(data, 0, ((report->size - 1) >> 3) + 1);
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->field[n], data);
}
@@ -1043,13 +1038,8 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
hid->hiddev_report_event(hid, report);
- if (hid->claimed & HID_CLAIMED_HIDRAW) {
- /* numbered reports need to be passed with the report num */
- if (report_enum->numbered)
- hidraw_report_event(hid, data - 1, size + 1);
- else
- hidraw_report_event(hid, data, size);
- }
+ if (hid->claimed & HID_CLAIMED_HIDRAW)
+ hidraw_report_event(hid, data, size);
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
@@ -1091,35 +1081,28 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
- if (!buf) {
- report = hid_get_report(report_enum, data);
+ if (!buf)
goto nomem;
- }
-
- snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "\nreport (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
- hid_debug_event(hid, buf);
-
- report = hid_get_report(report_enum, data);
- if (!report) {
- kfree(buf);
- return -1;
- }
/* dump the report */
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
- "report %d (size %u) = ", report->id, size);
+ "\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un");
hid_debug_event(hid, buf);
+
for (i = 0; i < size; i++) {
snprintf(buf, HID_DEBUG_BUFSIZE - 1,
" %02x", data[i]);
hid_debug_event(hid, buf);
}
hid_debug_event(hid, "\n");
-
kfree(buf);
nomem:
+ report = hid_get_report(report_enum, data);
+
+ if (!report)
+ return -1;
+
if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
ret = hdrv->raw_event(hid, report, data, size);
if (ret != 0)
@@ -1172,6 +1155,8 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
unsigned int i;
int len;
+ if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
+ connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
if (hdev->bus != BUS_USB)
connect_mask &= ~HID_CONNECT_HIDDEV;
if (hid_hiddev(hdev))
@@ -1251,6 +1236,7 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
@@ -1295,13 +1281,19 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
@@ -1309,6 +1301,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1335,6 +1328,8 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
@@ -1346,7 +1341,9 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
@@ -1363,8 +1360,10 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ }
@@ -1761,7 +1760,7 @@ int hid_add_device(struct hid_device *hdev)
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
- if (hid_ignore(hdev))
+ if (!(hdev->quirks & HID_QUIRK_NO_IGNORE) && hid_ignore(hdev))
return -ENODEV;
/* XXX hack, any other cleaner solution after the driver core
@@ -1769,11 +1768,12 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
+ hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
hdev->status |= HID_STAT_ADDED;
-
- hid_debug_register(hdev, dev_name(&hdev->dev));
+ else
+ hid_debug_unregister(hdev);
return ret;
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314fbd4f9..c94026768570 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = {
[REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
};
-static const char *absolutes[ABS_MAX + 1] = {
+static const char *absolutes[ABS_CNT] = {
[ABS_X] = "X", [ABS_Y] = "Y",
[ABS_Z] = "Z", [ABS_RX] = "Rx",
[ABS_RY] = "Ry", [ABS_RZ] = "Rz",
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
new file mode 100644
index 000000000000..f44bdc084cb2
--- /dev/null
+++ b/drivers/hid/hid-egalax.c
@@ -0,0 +1,281 @@
+/*
+ * HID driver for eGalax dual-touch panels
+ *
+ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include "usbhid/usbhid.h"
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("eGalax dual-touch panel");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct egalax_data {
+ __u16 x, y, z;
+ __u8 id;
+ bool first; /* is this the first finger in the frame? */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool activity; /* at least one active finger previously? */
+ __u16 lastx, lasty; /* latest valid (x, y) in the frame */
+};
+
+static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ return -1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ case HID_DG_TIPPRESSURE:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_PRESSURE);
+ return 1;
+ }
+ return 0;
+ }
+
+ /* ignore others (from other reports we won't get anyway) */
+ return -1;
+}
+
+static int egalax_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
+{
+ td->first = !td->first; /* touchscreen emulation */
+
+ if (td->valid) {
+ /* emit multitouch events */
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
+
+ input_mt_sync(input);
+
+ /*
+ * touchscreen emulation: store (x, y) as
+ * the last valid values in this frame
+ */
+ td->lastx = td->x;
+ td->lasty = td->y;
+ }
+
+ /*
+ * touchscreen emulation: if this is the second finger and at least
+ * one in this frame is valid, the latest valid in the frame is
+ * the oldest on the panel, the one we want for single touch
+ */
+ if (!td->first && td->activity) {
+ input_event(input, EV_ABS, ABS_X, td->lastx);
+ input_event(input, EV_ABS, ABS_Y, td->lasty);
+ }
+
+ if (!td->valid) {
+ /*
+ * touchscreen emulation: if the first finger is invalid
+ * and there previously was finger activity, this is a release
+ */
+ if (td->first && td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ td->activity = false;
+ }
+ return;
+ }
+
+
+ /* touchscreen emulation: if no previous activity, emit touch event */
+ if (!td->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ td->activity = true;
+ }
+}
+
+
+static int egalax_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct egalax_data *td = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ /* avoid interference from generic hidinput handling */
+ break;
+ case HID_DG_TIPSWITCH:
+ td->valid = value;
+ break;
+ case HID_DG_TIPPRESSURE:
+ td->z = value;
+ break;
+ case HID_DG_CONTACTID:
+ td->id = value;
+ break;
+ case HID_GD_X:
+ td->x = value;
+ break;
+ case HID_GD_Y:
+ td->y = value;
+ /* this is the last field in a finger */
+ egalax_filter_event(td, input);
+ break;
+ case HID_DG_CONTACTCOUNT:
+ /* touch emulation: this is the last field in a frame */
+ td->first = false;
+ break;
+
+ default:
+ /* fallback to the generic hidinput handling */
+ return 0;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct egalax_data *td;
+ struct hid_report *report;
+
+ td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL);
+ if (!td) {
+ dev_err(&hdev->dev, "cannot allocate eGalax data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, td);
+
+ ret = hid_parse(hdev);
+ if (ret)
+ goto end;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret)
+ goto end;
+
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[5];
+ if (report) {
+ report->field[0]->value[0] = 2;
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ }
+
+end:
+ if (ret)
+ kfree(td);
+
+ return ret;
+}
+
+static void egalax_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id egalax_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, egalax_devices);
+
+static const struct hid_usage_id egalax_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver egalax_driver = {
+ .name = "egalax-touch",
+ .id_table = egalax_devices,
+ .probe = egalax_probe,
+ .remove = egalax_remove,
+ .input_mapping = egalax_input_mapping,
+ .input_mapped = egalax_input_mapped,
+ .usage_table = egalax_grabbed_usages,
+ .event = egalax_event,
+};
+
+static int __init egalax_init(void)
+{
+ return hid_register_driver(&egalax_driver);
+}
+
+static void __exit egalax_exit(void)
+{
+ hid_unregister_driver(&egalax_driver);
+}
+
+module_init(egalax_init);
+module_exit(egalax_exit);
+
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 62416e6baeca..3975e039c3dd 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 797e06470356..6af77ed0b555 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -20,6 +20,7 @@
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
+#define USB_DEVICE_ID_3M2256 0x0502
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
@@ -123,6 +124,13 @@
#define USB_VENDOR_ID_BERKSHIRE 0x0c98
#define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140
+#define USB_VENDOR_ID_BTC 0x046e
+#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
+
+#define USB_VENDOR_ID_CANDO 0x2087
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
+
#define USB_VENDOR_ID_CH 0x068e
#define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
#define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4
@@ -131,6 +139,7 @@
#define USB_VENDOR_ID_CHERRY 0x046a
#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
+#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
#define USB_VENDOR_ID_CHIC 0x05fe
#define USB_DEVICE_ID_CHIC_GAMEPAD 0x0014
@@ -147,6 +156,9 @@
#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
+#define USB_VENDOR_ID_CREATIVELABS 0x041e
+#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
+
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
@@ -170,6 +182,10 @@
#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_VENDOR_ID_DWAV 0x0eef
+#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
+
#define USB_VENDOR_ID_ELO 0x04E7
#define USB_DEVICE_ID_ELO_TS2700 0x0020
@@ -266,6 +282,7 @@
#define USB_VENDOR_ID_GYRATION 0x0c16
#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
+#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
@@ -341,6 +358,8 @@
#define USB_VENDOR_ID_MICROCHIP 0x04d8
#define USB_DEVICE_ID_PICKIT1 0x0032
#define USB_DEVICE_ID_PICKIT2 0x0033
+#define USB_DEVICE_ID_PICOLCD 0xc002
+#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
@@ -399,6 +418,9 @@
#define USB_VENDOR_ID_PRODIGE 0x05af
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
+#define USB_VENDOR_ID_ROCCAT 0x1e7d
+#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
+
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -408,6 +430,7 @@
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
+#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
@@ -456,6 +479,7 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0xbd
#define USB_VENDOR_ID_WISEGROUP 0x0925
#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
@@ -474,6 +498,9 @@
#define USB_VENDOR_ID_ZEROPLUS 0x0c12
+#define USB_VENDOR_ID_ZYDACRON 0x13EC
+#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
+
#define USB_VENDOR_ID_KYE 0x0458
#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 3677c9037a11..f6433d8050a9 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -126,6 +126,9 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1004: lg_map_key_clear(KEY_VIDEO); break;
case 0x1005: lg_map_key_clear(KEY_AUDIO); break;
case 0x100a: lg_map_key_clear(KEY_DOCUMENTS); break;
+ /* The following two entries are Playlist 1 and 2 on the MX3200 */
+ case 0x100f: lg_map_key_clear(KEY_FN_1); break;
+ case 0x1010: lg_map_key_clear(KEY_FN_2); break;
case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG); break;
case 0x1012: lg_map_key_clear(KEY_NEXTSONG); break;
case 0x1013: lg_map_key_clear(KEY_CAMERA); break;
@@ -137,6 +140,7 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1019: lg_map_key_clear(KEY_PROG1); break;
case 0x101a: lg_map_key_clear(KEY_PROG2); break;
case 0x101b: lg_map_key_clear(KEY_PROG3); break;
+ case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
case 0x101f: lg_map_key_clear(KEY_ZOOMIN); break;
case 0x1020: lg_map_key_clear(KEY_ZOOMOUT); break;
case 0x1021: lg_map_key_clear(KEY_ZOOMRESET); break;
@@ -147,6 +151,11 @@ static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage,
case 0x1029: lg_map_key_clear(KEY_SHUFFLE); break;
case 0x102a: lg_map_key_clear(KEY_BACK); break;
case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS); break;
+ case 0x102d: lg_map_key_clear(KEY_WWW); break;
+ /* The following two are 'Start/answer call' and 'End/reject call'
+ on the MX3200 */
+ case 0x1031: lg_map_key_clear(KEY_OK); break;
+ case 0x1032: lg_map_key_clear(KEY_CANCEL); break;
case 0x1041: lg_map_key_clear(KEY_BATTERY); break;
case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR); break;
case 0x1043: lg_map_key_clear(KEY_SPREADSHEET); break;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0d471fc2ab82..f10d56a15f21 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -354,12 +354,15 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_free;
}
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT);
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
dev_err(&hdev->dev, "magicmouse hw start failed\n");
goto err_free;
}
+ /* we are handling the input ourselves */
+ hidinput_disconnect(hdev);
+
report = hid_register_report(hdev, HID_INPUT_REPORT, TOUCH_REPORT_ID);
if (!report) {
dev_err(&hdev->dev, "unable to register touch report\n");
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 9b24fc510712..b6b0caeeac58 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -1,8 +1,8 @@
/*
* HID driver for N-Trig touchscreens
*
- * Copyright (c) 2008 Rafi Rubin
- * Copyright (c) 2009 Stephane Chatty
+ * Copyright (c) 2008-2010 Rafi Rubin
+ * Copyright (c) 2009-2010 Stephane Chatty
*
*/
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/usb.h>
+#include "usbhid/usbhid.h"
#include <linux/module.h>
#include <linux/slab.h>
@@ -22,20 +24,350 @@
#define NTRIG_DUPLICATE_USAGES 0x001
-#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
- EV_KEY, (c))
+static unsigned int min_width;
+module_param(min_width, uint, 0644);
+MODULE_PARM_DESC(min_width, "Minimum touch contact width to accept.");
+
+static unsigned int min_height;
+module_param(min_height, uint, 0644);
+MODULE_PARM_DESC(min_height, "Minimum touch contact height to accept.");
+
+static unsigned int activate_slack = 1;
+module_param(activate_slack, uint, 0644);
+MODULE_PARM_DESC(activate_slack, "Number of touch frames to ignore at "
+ "the start of touch input.");
+
+static unsigned int deactivate_slack = 4;
+module_param(deactivate_slack, uint, 0644);
+MODULE_PARM_DESC(deactivate_slack, "Number of empty frames to ignore before "
+ "deactivating touch.");
+
+static unsigned int activation_width = 64;
+module_param(activation_width, uint, 0644);
+MODULE_PARM_DESC(activation_width, "Width threshold to immediately start "
+ "processing touch events.");
+
+static unsigned int activation_height = 32;
+module_param(activation_height, uint, 0644);
+MODULE_PARM_DESC(activation_height, "Height threshold to immediately start "
+ "processing touch events.");
struct ntrig_data {
/* Incoming raw values for a single contact */
__u16 x, y, w, h;
__u16 id;
- __u8 confidence;
+
+ bool tipswitch;
+ bool confidence;
+ bool first_contact_touch;
bool reading_mt;
- __u8 first_contact_confidence;
__u8 mt_footer[4];
__u8 mt_foot_count;
+
+ /* The current activation state. */
+ __s8 act_state;
+
+ /* Empty frames to ignore before recognizing the end of activity */
+ __s8 deactivate_slack;
+
+ /* Frames to ignore before acknowledging the start of activity */
+ __s8 activate_slack;
+
+ /* Minimum size contact to accept */
+ __u16 min_width;
+ __u16 min_height;
+
+ /* Threshold to override activation slack */
+ __u16 activation_width;
+ __u16 activation_height;
+
+ __u16 sensor_logical_width;
+ __u16 sensor_logical_height;
+ __u16 sensor_physical_width;
+ __u16 sensor_physical_height;
+};
+
+
+static ssize_t show_phys_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_width);
+}
+
+static DEVICE_ATTR(sensor_physical_width, S_IRUGO, show_phys_width, NULL);
+
+static ssize_t show_phys_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_physical_height);
+}
+
+static DEVICE_ATTR(sensor_physical_height, S_IRUGO, show_phys_height, NULL);
+
+static ssize_t show_log_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_width);
+}
+
+static DEVICE_ATTR(sensor_logical_width, S_IRUGO, show_log_width, NULL);
+
+static ssize_t show_log_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->sensor_logical_height);
+}
+
+static DEVICE_ATTR(sensor_logical_height, S_IRUGO, show_log_height, NULL);
+
+static ssize_t show_min_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_min_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->min_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_width, S_IWUSR | S_IRUGO, show_min_width, set_min_width);
+
+static ssize_t show_min_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->min_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_min_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->min_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(min_height, S_IWUSR | S_IRUGO, show_min_height,
+ set_min_height);
+
+static ssize_t show_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activate_slack);
+}
+
+static ssize_t set_activate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 0x7f)
+ return -EINVAL;
+
+ nd->activate_slack = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(activate_slack, S_IWUSR | S_IRUGO, show_activate_slack,
+ set_activate_slack);
+
+static ssize_t show_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_width *
+ nd->sensor_physical_width /
+ nd->sensor_logical_width);
+}
+
+static ssize_t set_activation_width(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_width)
+ return -EINVAL;
+
+ nd->activation_width = val * nd->sensor_logical_width /
+ nd->sensor_physical_width;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_width, S_IWUSR | S_IRUGO, show_activation_width,
+ set_activation_width);
+
+static ssize_t show_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", nd->activation_height *
+ nd->sensor_physical_height /
+ nd->sensor_logical_height);
+}
+
+static ssize_t set_activation_height(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > nd->sensor_physical_height)
+ return -EINVAL;
+
+ nd->activation_height = val * nd->sensor_logical_height /
+ nd->sensor_physical_height;
+
+ return count;
+}
+
+static DEVICE_ATTR(activation_height, S_IWUSR | S_IRUGO,
+ show_activation_height, set_activation_height);
+
+static ssize_t show_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%d\n", -nd->deactivate_slack);
+}
+
+static ssize_t set_deactivate_slack(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+
+ /*
+ * No more than 8 terminal frames have been observed so far
+ * and higher slack is highly likely to leave the single
+ * touch emulation stuck down.
+ */
+ if (val > 7)
+ return -EINVAL;
+
+ nd->deactivate_slack = -val;
+
+ return count;
+}
+
+static DEVICE_ATTR(deactivate_slack, S_IWUSR | S_IRUGO, show_deactivate_slack,
+ set_deactivate_slack);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_sensor_physical_width.attr,
+ &dev_attr_sensor_physical_height.attr,
+ &dev_attr_sensor_logical_width.attr,
+ &dev_attr_sensor_logical_height.attr,
+ &dev_attr_min_height.attr,
+ &dev_attr_min_width.attr,
+ &dev_attr_activate_slack.attr,
+ &dev_attr_activation_width.attr,
+ &dev_attr_activation_height.attr,
+ &dev_attr_deactivate_slack.attr,
+ NULL
+};
+
+static struct attribute_group ntrig_attribute_group = {
+ .attrs = sysfs_attrs
};
/*
@@ -48,6 +380,8 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ struct ntrig_data *nd = hid_get_drvdata(hdev);
+
/* No special mappings needed for the pen and single touch */
if (field->physical)
return 0;
@@ -61,6 +395,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_X,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_width) {
+ nd->sensor_logical_width =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_width =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_width = activation_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ nd->min_width = min_width *
+ nd->sensor_logical_width /
+ nd->sensor_physical_width;
+ }
return 1;
case HID_GD_Y:
hid_map_usage(hi, usage, bit, max,
@@ -68,6 +417,21 @@ static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
input_set_abs_params(hi->input, ABS_Y,
field->logical_minimum,
field->logical_maximum, 0, 0);
+
+ if (!nd->sensor_logical_height) {
+ nd->sensor_logical_height =
+ field->logical_maximum -
+ field->logical_minimum;
+ nd->sensor_physical_height =
+ field->physical_maximum -
+ field->physical_minimum;
+ nd->activation_height = activation_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ nd->min_height = min_height *
+ nd->sensor_logical_height /
+ nd->sensor_physical_height;
+ }
return 1;
}
return 0;
@@ -139,9 +503,10 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
case 0xff000001:
/* Tag indicating the start of a multitouch group */
nd->reading_mt = 1;
- nd->first_contact_confidence = 0;
+ nd->first_contact_touch = 0;
break;
case HID_DG_TIPSWITCH:
+ nd->tipswitch = value;
/* Prevent emission of touch until validated */
return 1;
case HID_DG_CONFIDENCE:
@@ -169,8 +534,14 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
* to emit a normal (X, Y) position
*/
if (!nd->reading_mt) {
+ /*
+ * TipSwitch indicates the presence of a
+ * finger in single touch mode.
+ */
+ input_report_key(input, BTN_TOUCH,
+ nd->tipswitch);
input_report_key(input, BTN_TOOL_DOUBLETAP,
- (nd->confidence != 0));
+ nd->tipswitch);
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
}
@@ -193,28 +564,89 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
if (nd->mt_foot_count != 4)
break;
- /* Pen activity signal, trigger end of touch. */
+ /* Pen activity signal. */
if (nd->mt_footer[2]) {
+ /*
+ * When the pen deactivates touch, we see a
+ * bogus frame with ContactCount > 0.
+ * We can
+ * save a bit of work by ensuring act_state < 0
+ * even if deactivation slack is turned off.
+ */
+ nd->act_state = deactivate_slack - 1;
nd->confidence = 0;
break;
}
- /* If the contact was invalid */
- if (!(nd->confidence && nd->mt_footer[0])
- || nd->w <= 250
- || nd->h <= 190) {
- nd->confidence = 0;
+ /*
+ * The first footer value indicates the presence of a
+ * finger.
+ */
+ if (nd->mt_footer[0]) {
+ /*
+ * We do not want to process contacts under
+ * the size threshold, but do not want to
+ * ignore them for activation state
+ */
+ if (nd->w < nd->min_width ||
+ nd->h < nd->min_height)
+ nd->confidence = 0;
+ } else
break;
+
+ if (nd->act_state > 0) {
+ /*
+ * Contact meets the activation size threshold
+ */
+ if (nd->w >= nd->activation_width &&
+ nd->h >= nd->activation_height) {
+ if (nd->id)
+ /*
+ * first contact, activate now
+ */
+ nd->act_state = 0;
+ else {
+ /*
+ * avoid corrupting this frame
+ * but ensure next frame will
+ * be active
+ */
+ nd->act_state = 1;
+ break;
+ }
+ } else
+ /*
+ * Defer adjusting the activation state
+ * until the end of the frame.
+ */
+ break;
}
+ /* Discarding this contact */
+ if (!nd->confidence)
+ break;
+
/* emit a normal (X, Y) for the first point only */
if (nd->id == 0) {
- nd->first_contact_confidence = nd->confidence;
+ /*
+ * TipSwitch is superfluous in multitouch
+ * mode. The footer events tell us
+ * if there is a finger on the screen or
+ * not.
+ */
+ nd->first_contact_touch = nd->confidence;
input_event(input, EV_ABS, ABS_X, nd->x);
input_event(input, EV_ABS, ABS_Y, nd->y);
}
+
+ /* Emit MT events */
input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
+
+ /*
+ * Translate from height and width to size
+ * and orientation.
+ */
if (nd->w > nd->h) {
input_event(input, EV_ABS,
ABS_MT_ORIENTATION, 1);
@@ -234,41 +666,98 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT: /* End of a multitouch group */
- if (!nd->reading_mt)
+ if (!nd->reading_mt) /* Just to be sure */
break;
nd->reading_mt = 0;
- if (nd->first_contact_confidence) {
- switch (value) {
- case 0: /* for single touch devices */
- case 1:
- input_report_key(input,
- BTN_TOOL_DOUBLETAP, 1);
- break;
- case 2:
- input_report_key(input,
- BTN_TOOL_TRIPLETAP, 1);
+
+ /*
+ * Activation state machine logic:
+ *
+ * Fundamental states:
+ * state > 0: Inactive
+ * state <= 0: Active
+ * state < -deactivate_slack:
+ * Pen termination of touch
+ *
+ * Specific values of interest
+ * state == activate_slack
+ * no valid input since the last reset
+ *
+ * state == 0
+ * general operational state
+ *
+ * state == -deactivate_slack
+ * read sufficient empty frames to accept
+ * the end of input and reset
+ */
+
+ if (nd->act_state > 0) { /* Currently inactive */
+ if (value)
+ /*
+ * Consider each live contact as
+ * evidence of intentional activity.
+ */
+ nd->act_state = (nd->act_state > value)
+ ? nd->act_state - value
+ : 0;
+ else
+ /*
+ * Empty frame before we hit the
+ * activity threshold, reset.
+ */
+ nd->act_state = nd->activate_slack;
+
+ /*
+ * Entered this block inactive and no
+ * coordinates sent this frame, so hold off
+ * on button state.
+ */
+ break;
+ } else { /* Currently active */
+ if (value && nd->act_state >=
+ nd->deactivate_slack)
+ /*
+ * Live point: clear accumulated
+ * deactivation count.
+ */
+ nd->act_state = 0;
+ else if (nd->act_state <= nd->deactivate_slack)
+ /*
+ * We've consumed the deactivation
+ * slack, time to deactivate and reset.
+ */
+ nd->act_state =
+ nd->activate_slack;
+ else { /* Move towards deactivation */
+ nd->act_state--;
break;
- case 3:
- default:
- input_report_key(input,
- BTN_TOOL_QUADTAP, 1);
}
+ }
+
+ if (nd->first_contact_touch && nd->act_state <= 0) {
+ /*
+ * Check to see if we're ready to start
+ * emitting touch events.
+ *
+ * Note: activation slack will decrease over
+ * the course of the frame, and it will be
+ * inconsistent from the start to the end of
+ * the frame. However if the frame starts
+ * with slack, first_contact_touch will still
+ * be 0 and we will not get to this point.
+ */
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 1);
input_report_key(input, BTN_TOUCH, 1);
} else {
- input_report_key(input,
- BTN_TOOL_DOUBLETAP, 0);
- input_report_key(input,
- BTN_TOOL_TRIPLETAP, 0);
- input_report_key(input,
- BTN_TOOL_QUADTAP, 0);
+ input_report_key(input, BTN_TOOL_DOUBLETAP, 0);
input_report_key(input, BTN_TOUCH, 0);
}
break;
default:
- /* fallback to the generic hidinput handling */
+ /* fall-back to the generic hidinput handling */
return 0;
}
}
@@ -286,6 +775,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct ntrig_data *nd;
struct hid_input *hidinput;
struct input_dev *input;
+ struct hid_report *report;
if (id->driver_data)
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
@@ -297,6 +787,16 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
nd->reading_mt = 0;
+ nd->min_width = 0;
+ nd->min_height = 0;
+ nd->activate_slack = activate_slack;
+ nd->act_state = activate_slack;
+ nd->deactivate_slack = -deactivate_slack;
+ nd->sensor_logical_width = 0;
+ nd->sensor_logical_height = 0;
+ nd->sensor_physical_width = 0;
+ nd->sensor_physical_height = 0;
+
hid_set_drvdata(hdev, nd);
ret = hid_parse(hdev);
@@ -327,13 +827,7 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
__clear_bit(BTN_TOOL_PEN, input->keybit);
__clear_bit(BTN_TOOL_FINGER, input->keybit);
__clear_bit(BTN_0, input->keybit);
- /*
- * A little something special to enable
- * two and three finger taps.
- */
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
- __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
- __set_bit(BTN_TOOL_QUADTAP, input->keybit);
/*
* The physical touchscreen (single touch)
* input has a value for physical, whereas
@@ -349,6 +843,14 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /* This is needed for devices with more recent firmware versions */
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
+ if (report)
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+
+ ret = sysfs_create_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
+
return 0;
err_free:
kfree(nd);
@@ -357,6 +859,8 @@ err_free:
static void ntrig_remove(struct hid_device *hdev)
{
+ sysfs_remove_group(&hdev->dev.kobj,
+ &ntrig_attribute_group);
hid_hw_stop(hdev);
kfree(hid_get_drvdata(hdev));
}
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
new file mode 100644
index 000000000000..7aabf65c48ef
--- /dev/null
+++ b/drivers/hid/hid-picolcd.c
@@ -0,0 +1,2631 @@
+/***************************************************************************
+ * Copyright (C) 2010 by Bruno Prémont <bonbons@linux-vserver.org> *
+ * *
+ * Based on Logitech G13 driver (v0.4) *
+ * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, version 2 of the License. *
+ * *
+ * This driver is distributed in the hope that it will be useful, but *
+ * WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this software. If not see <http://www.gnu.org/licenses/>. *
+ ***************************************************************************/
+
+#include <linux/hid.h>
+#include <linux/hid-debug.h>
+#include <linux/input.h>
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+#include <linux/usb.h>
+
+#include <linux/fb.h>
+#include <linux/vmalloc.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+
+#include <linux/leds.h>
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include <linux/completion.h>
+#include <linux/uaccess.h>
+
+#define PICOLCD_NAME "PicoLCD (graphic)"
+
+/* Report numbers */
+#define REPORT_ERROR_CODE 0x10 /* LCD: IN[16] */
+#define ERR_SUCCESS 0x00
+#define ERR_PARAMETER_MISSING 0x01
+#define ERR_DATA_MISSING 0x02
+#define ERR_BLOCK_READ_ONLY 0x03
+#define ERR_BLOCK_NOT_ERASABLE 0x04
+#define ERR_BLOCK_TOO_BIG 0x05
+#define ERR_SECTION_OVERFLOW 0x06
+#define ERR_INVALID_CMD_LEN 0x07
+#define ERR_INVALID_DATA_LEN 0x08
+#define REPORT_KEY_STATE 0x11 /* LCD: IN[2] */
+#define REPORT_IR_DATA 0x21 /* LCD: IN[63] */
+#define REPORT_EE_DATA 0x32 /* LCD: IN[63] */
+#define REPORT_MEMORY 0x41 /* LCD: IN[63] */
+#define REPORT_LED_STATE 0x81 /* LCD: OUT[1] */
+#define REPORT_BRIGHTNESS 0x91 /* LCD: OUT[1] */
+#define REPORT_CONTRAST 0x92 /* LCD: OUT[1] */
+#define REPORT_RESET 0x93 /* LCD: OUT[2] */
+#define REPORT_LCD_CMD 0x94 /* LCD: OUT[63] */
+#define REPORT_LCD_DATA 0x95 /* LCD: OUT[63] */
+#define REPORT_LCD_CMD_DATA 0x96 /* LCD: OUT[63] */
+#define REPORT_EE_READ 0xa3 /* LCD: OUT[63] */
+#define REPORT_EE_WRITE 0xa4 /* LCD: OUT[63] */
+#define REPORT_ERASE_MEMORY 0xb2 /* LCD: OUT[2] */
+#define REPORT_READ_MEMORY 0xb3 /* LCD: OUT[3] */
+#define REPORT_WRITE_MEMORY 0xb4 /* LCD: OUT[63] */
+#define REPORT_SPLASH_RESTART 0xc1 /* LCD: OUT[1] */
+#define REPORT_EXIT_KEYBOARD 0xef /* LCD: OUT[2] */
+#define REPORT_VERSION 0xf1 /* LCD: IN[2],OUT[1] Bootloader: IN[2],OUT[1] */
+#define REPORT_BL_ERASE_MEMORY 0xf2 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_READ_MEMORY 0xf3 /* Bootloader: IN[36],OUT[4] */
+#define REPORT_BL_WRITE_MEMORY 0xf4 /* Bootloader: IN[36],OUT[36] */
+#define REPORT_DEVID 0xf5 /* LCD: IN[5], OUT[1] Bootloader: IN[5],OUT[1] */
+#define REPORT_SPLASH_SIZE 0xf6 /* LCD: IN[4], OUT[1] */
+#define REPORT_HOOK_VERSION 0xf7 /* LCD: IN[2], OUT[1] */
+#define REPORT_EXIT_FLASHER 0xff /* Bootloader: OUT[2] */
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Framebuffer
+ *
+ * The PicoLCD use a Topway LCD module of 256x64 pixel
+ * This display area is tiled over 4 controllers with 8 tiles
+ * each. Each tile has 8x64 pixel, each data byte representing
+ * a 1-bit wide vertical line of the tile.
+ *
+ * The display can be updated at a tile granularity.
+ *
+ * Chip 1 Chip 2 Chip 3 Chip 4
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 1 | Tile 1 | Tile 1 | Tile 1 |
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 2 | Tile 2 | Tile 2 | Tile 2 |
+ * +----------------+----------------+----------------+----------------+
+ * ...
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 8 | Tile 8 | Tile 8 | Tile 8 |
+ * +----------------+----------------+----------------+----------------+
+ */
+#define PICOLCDFB_NAME "picolcdfb"
+#define PICOLCDFB_WIDTH (256)
+#define PICOLCDFB_HEIGHT (64)
+#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8)
+
+#define PICOLCDFB_UPDATE_RATE_LIMIT 10
+#define PICOLCDFB_UPDATE_RATE_DEFAULT 2
+
+/* Framebuffer visual structures */
+static const struct fb_fix_screeninfo picolcdfb_fix = {
+ .id = PICOLCDFB_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO01,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = PICOLCDFB_WIDTH / 8,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const struct fb_var_screeninfo picolcdfb_var = {
+ .xres = PICOLCDFB_WIDTH,
+ .yres = PICOLCDFB_HEIGHT,
+ .xres_virtual = PICOLCDFB_WIDTH,
+ .yres_virtual = PICOLCDFB_HEIGHT,
+ .width = 103,
+ .height = 26,
+ .bits_per_pixel = 1,
+ .grayscale = 1,
+};
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+/* Input device
+ *
+ * The PicoLCD has an IR receiver header, a built-in keypad with 5 keys
+ * and header for 4x4 key matrix. The built-in keys are part of the matrix.
+ */
+static const unsigned short def_keymap[] = {
+ KEY_RESERVED, /* none */
+ KEY_BACK, /* col 4 + row 1 */
+ KEY_HOMEPAGE, /* col 3 + row 1 */
+ KEY_RESERVED, /* col 2 + row 1 */
+ KEY_RESERVED, /* col 1 + row 1 */
+ KEY_SCROLLUP, /* col 4 + row 2 */
+ KEY_OK, /* col 3 + row 2 */
+ KEY_SCROLLDOWN, /* col 2 + row 2 */
+ KEY_RESERVED, /* col 1 + row 2 */
+ KEY_RESERVED, /* col 4 + row 3 */
+ KEY_RESERVED, /* col 3 + row 3 */
+ KEY_RESERVED, /* col 2 + row 3 */
+ KEY_RESERVED, /* col 1 + row 3 */
+ KEY_RESERVED, /* col 4 + row 4 */
+ KEY_RESERVED, /* col 3 + row 4 */
+ KEY_RESERVED, /* col 2 + row 4 */
+ KEY_RESERVED, /* col 1 + row 4 */
+};
+#define PICOLCD_KEYS ARRAY_SIZE(def_keymap)
+
+/* Description of in-progress IO operation, used for operations
+ * that trigger response from device */
+struct picolcd_pending {
+ struct hid_report *out_report;
+ struct hid_report *in_report;
+ struct completion ready;
+ int raw_size;
+ u8 raw_data[64];
+};
+
+/* Per device data structure */
+struct picolcd_data {
+ struct hid_device *hdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_reset;
+ struct dentry *debug_eeprom;
+ struct dentry *debug_flash;
+ struct mutex mutex_flash;
+ int addr_sz;
+#endif
+ u8 version[2];
+ unsigned short opmode_delay;
+ /* input stuff */
+ u8 pressed_keys[2];
+ struct input_dev *input_keys;
+ struct input_dev *input_cir;
+ unsigned short keycode[PICOLCD_KEYS];
+
+#ifdef CONFIG_HID_PICOLCD_FB
+ /* Framebuffer stuff */
+ u8 fb_update_rate;
+ u8 fb_bpp;
+ u8 *fb_vbitmap; /* local copy of what was sent to PicoLCD */
+ u8 *fb_bitmap; /* framebuffer */
+ struct fb_info *fb_info;
+ struct fb_deferred_io fb_defio;
+#endif /* CONFIG_HID_PICOLCD_FB */
+#ifdef CONFIG_HID_PICOLCD_LCD
+ struct lcd_device *lcd;
+ u8 lcd_contrast;
+#endif /* CONFIG_HID_PICOLCD_LCD */
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+ struct backlight_device *backlight;
+ u8 lcd_brightness;
+ u8 lcd_power;
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+#ifdef CONFIG_HID_PICOLCD_LEDS
+ /* LED stuff */
+ u8 led_state;
+ struct led_classdev *led[8];
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+ /* Housekeeping stuff */
+ spinlock_t lock;
+ struct mutex mutex;
+ struct picolcd_pending *pending;
+ int status;
+#define PICOLCD_BOOTLOADER 1
+#define PICOLCD_FAILED 2
+#define PICOLCD_READY_FB 4
+};
+
+
+/* Find a given report */
+#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT)
+#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT)
+
+static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir)
+{
+ struct list_head *feature_report_list = &hdev->report_enum[dir].report_list;
+ struct hid_report *report = NULL;
+
+ list_for_each_entry(report, feature_report_list, list) {
+ if (report->id == id)
+ return report;
+ }
+ dev_warn(&hdev->dev, "No report with id 0x%x found\n", id);
+ return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report);
+#define usbhid_submit_report(a, b, c) \
+ do { \
+ picolcd_debug_out_report(hid_get_drvdata(a), a, b); \
+ usbhid_submit_report(a, b, c); \
+ } while (0)
+#endif
+
+/* Submit a report and wait for a reply from device - if device fades away
+ * or does not respond in time, return NULL */
+static struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev,
+ int report_id, const u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *work;
+ struct hid_report *report = picolcd_out_report(report_id, hdev);
+ unsigned long flags;
+ int i, j, k;
+
+ if (!report || !data)
+ return NULL;
+ if (data->status & PICOLCD_FAILED)
+ return NULL;
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NULL;
+
+ init_completion(&work->ready);
+ work->out_report = report;
+ work->in_report = NULL;
+ work->raw_size = 0;
+
+ mutex_lock(&data->mutex);
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = k = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0);
+ k++;
+ }
+ data->pending = work;
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ wait_for_completion_interruptible_timeout(&work->ready, HZ*2);
+ spin_lock_irqsave(&data->lock, flags);
+ data->pending = NULL;
+ spin_unlock_irqrestore(&data->lock, flags);
+ mutex_unlock(&data->mutex);
+ return work;
+}
+
+#ifdef CONFIG_HID_PICOLCD_FB
+/* Send a given tile to PicoLCD */
+static int picolcd_fb_send_tile(struct hid_device *hdev, int chip, int tile)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, hdev);
+ struct hid_report *report2 = picolcd_out_report(REPORT_LCD_DATA, hdev);
+ unsigned long flags;
+ u8 *tdata;
+ int i;
+
+ if (!report1 || report1->maxfield != 1 || !report2 || report2->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report1->field[0], 0, chip << 2);
+ hid_set_field(report1->field[0], 1, 0x02);
+ hid_set_field(report1->field[0], 2, 0x00);
+ hid_set_field(report1->field[0], 3, 0x00);
+ hid_set_field(report1->field[0], 4, 0xb8 | tile);
+ hid_set_field(report1->field[0], 5, 0x00);
+ hid_set_field(report1->field[0], 6, 0x00);
+ hid_set_field(report1->field[0], 7, 0x40);
+ hid_set_field(report1->field[0], 8, 0x00);
+ hid_set_field(report1->field[0], 9, 0x00);
+ hid_set_field(report1->field[0], 10, 32);
+
+ hid_set_field(report2->field[0], 0, (chip << 2) | 0x01);
+ hid_set_field(report2->field[0], 1, 0x00);
+ hid_set_field(report2->field[0], 2, 0x00);
+ hid_set_field(report2->field[0], 3, 32);
+
+ tdata = data->fb_vbitmap + (tile * 4 + chip) * 64;
+ for (i = 0; i < 64; i++)
+ if (i < 32)
+ hid_set_field(report1->field[0], 11 + i, tdata[i]);
+ else
+ hid_set_field(report2->field[0], 4 + i - 32, tdata[i]);
+
+ usbhid_submit_report(data->hdev, report1, USB_DIR_OUT);
+ usbhid_submit_report(data->hdev, report2, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+/* Translate a single tile*/
+static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp,
+ int chip, int tile)
+{
+ int i, b, changed = 0;
+ u8 tdata[64];
+ u8 *vdata = vbitmap + (tile * 4 + chip) * 64;
+
+ if (bpp == 1) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i/8] >> (7 - i % 8)) & 0x01;
+ }
+ }
+ } else if (bpp == 8) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00;
+ }
+ }
+ } else {
+ /* Oops, we should never get here! */
+ WARN_ON(1);
+ return 0;
+ }
+
+ for (i = 0; i < 64; i++)
+ if (tdata[i] != vdata[i]) {
+ changed = 1;
+ vdata[i] = tdata[i];
+ }
+ return changed;
+}
+
+/* Reconfigure LCD display */
+static int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev);
+ int i, j;
+ unsigned long flags;
+ static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 };
+
+ if (!report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < report->field[0]->maxusage; j++)
+ if (j == 0)
+ hid_set_field(report->field[0], j, i << 2);
+ else if (j < sizeof(mapcmd))
+ hid_set_field(report->field[0], j, mapcmd[j]);
+ else
+ hid_set_field(report->field[0], j, 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ }
+
+ data->status |= PICOLCD_READY_FB;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (data->fb_bitmap) {
+ if (clear) {
+ memset(data->fb_vbitmap, 0xff, PICOLCDFB_SIZE);
+ memset(data->fb_bitmap, 0, PICOLCDFB_SIZE*data->fb_bpp);
+ } else {
+ /* invert 1 byte in each tile to force resend */
+ for (i = 0; i < PICOLCDFB_SIZE; i += 64)
+ data->fb_vbitmap[i] = ~data->fb_vbitmap[i];
+ }
+ }
+
+ /* schedule first output of framebuffer */
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+
+ return 0;
+}
+
+/* Update fb_vbitmap from the screen_base and send changed tiles to device */
+static void picolcd_fb_update(struct picolcd_data *data)
+{
+ int chip, tile, n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (!(data->status & PICOLCD_READY_FB)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ picolcd_fb_reset(data, 0);
+ } else {
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /*
+ * Translate the framebuffer into the format needed by the PicoLCD.
+ * See display layout above.
+ * Do this one tile after the other and push those tiles that changed.
+ *
+ * Wait for our IO to complete as otherwise we might flood the queue!
+ */
+ n = 0;
+ for (chip = 0; chip < 4; chip++)
+ for (tile = 0; tile < 8; tile++)
+ if (picolcd_fb_update_tile(data->fb_vbitmap,
+ data->fb_bitmap, data->fb_bpp, chip, tile)) {
+ n += 2;
+ if (n >= HID_OUTPUT_FIFO_SIZE / 2) {
+ usbhid_wait_io(data->hdev);
+ n = 0;
+ }
+ picolcd_fb_send_tile(data->hdev, chip, tile);
+ }
+ if (n)
+ usbhid_wait_io(data->hdev);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ if (!info->par)
+ return;
+ sys_fillrect(info, rect);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ if (!info->par)
+ return;
+ sys_copyarea(info, area);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ if (!info->par)
+ return;
+ sys_imageblit(info, image);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ if (!info->par)
+ return -ENODEV;
+ ret = fb_sys_write(info, buf, count, ppos);
+ if (ret >= 0)
+ schedule_delayed_work(&info->deferred_work, 0);
+ return ret;
+}
+
+static int picolcd_fb_blank(int blank, struct fb_info *info)
+{
+ if (!info->par)
+ return -ENODEV;
+ /* We let fb notification do this for us via lcd/backlight device */
+ return 0;
+}
+
+static void picolcd_fb_destroy(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ info->par = NULL;
+ if (data)
+ data->fb_info = NULL;
+ fb_deferred_io_cleanup(info);
+ framebuffer_release(info);
+}
+
+static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ __u32 bpp = var->bits_per_pixel;
+ __u32 activate = var->activate;
+
+ /* only allow 1/8 bit depth (8-bit is grayscale) */
+ *var = picolcdfb_var;
+ var->activate = activate;
+ if (bpp >= 8)
+ var->bits_per_pixel = 8;
+ else
+ var->bits_per_pixel = 1;
+ return 0;
+}
+
+static int picolcd_set_par(struct fb_info *info)
+{
+ struct picolcd_data *data = info->par;
+ u8 *o_fb, *n_fb;
+ if (info->var.bits_per_pixel == data->fb_bpp)
+ return 0;
+ /* switch between 1/8 bit depths */
+ if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8)
+ return -EINVAL;
+
+ o_fb = data->fb_bitmap;
+ n_fb = vmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel);
+ if (!n_fb)
+ return -ENOMEM;
+
+ fb_deferred_io_cleanup(info);
+ /* translate FB content to new bits-per-pixel */
+ if (info->var.bits_per_pixel == 1) {
+ int i, b;
+ for (i = 0; i < PICOLCDFB_SIZE; i++) {
+ u8 p = 0;
+ for (b = 0; b < 8; b++) {
+ p <<= 1;
+ p |= o_fb[i*8+b] ? 0x01 : 0x00;
+ }
+ }
+ info->fix.visual = FB_VISUAL_MONO01;
+ info->fix.line_length = PICOLCDFB_WIDTH / 8;
+ } else {
+ int i;
+ for (i = 0; i < PICOLCDFB_SIZE * 8; i++)
+ n_fb[i] = o_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = PICOLCDFB_WIDTH;
+ }
+
+ data->fb_bitmap = n_fb;
+ data->fb_bpp = info->var.bits_per_pixel;
+ info->screen_base = (char __force __iomem *)n_fb;
+ info->fix.smem_start = (unsigned long)n_fb;
+ info->fix.smem_len = PICOLCDFB_SIZE*data->fb_bpp;
+ fb_deferred_io_init(info);
+ vfree(o_fb);
+ return 0;
+}
+
+/* Note this can't be const because of struct fb_info definition */
+static struct fb_ops picolcdfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_destroy = picolcd_fb_destroy,
+ .fb_read = fb_sys_read,
+ .fb_write = picolcd_fb_write,
+ .fb_blank = picolcd_fb_blank,
+ .fb_fillrect = picolcd_fb_fillrect,
+ .fb_copyarea = picolcd_fb_copyarea,
+ .fb_imageblit = picolcd_fb_imageblit,
+ .fb_check_var = picolcd_fb_check_var,
+ .fb_set_par = picolcd_set_par,
+};
+
+
+/* Callback from deferred IO workqueue */
+static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ picolcd_fb_update(info->par);
+}
+
+static const struct fb_deferred_io picolcd_fb_defio = {
+ .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT,
+ .deferred_io = picolcd_fb_deferred_io,
+};
+
+
+/*
+ * The "fb_update_rate" sysfs attribute
+ */
+static ssize_t picolcd_fb_update_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned i, fb_update_rate = data->fb_update_rate;
+ size_t ret = 0;
+
+ for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
+ if (ret >= PAGE_SIZE)
+ break;
+ else if (i == fb_update_rate)
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+ else
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+ if (ret > 0)
+ buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
+ return ret;
+}
+
+static ssize_t picolcd_fb_update_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ int i;
+ unsigned u;
+
+ if (count < 1 || count > 10)
+ return -EINVAL;
+
+ i = sscanf(buf, "%u", &u);
+ if (i != 1)
+ return -EINVAL;
+
+ if (u > PICOLCDFB_UPDATE_RATE_LIMIT)
+ return -ERANGE;
+ else if (u == 0)
+ u = PICOLCDFB_UPDATE_RATE_DEFAULT;
+
+ data->fb_update_rate = u;
+ data->fb_defio.delay = HZ / data->fb_update_rate;
+ return count;
+}
+
+static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
+ picolcd_fb_update_rate_store);
+
+/* initialize Framebuffer device */
+static int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ struct device *dev = &data->hdev->dev;
+ struct fb_info *info = NULL;
+ int error = -ENOMEM;
+ u8 *fb_vbitmap = NULL;
+ u8 *fb_bitmap = NULL;
+
+ fb_bitmap = vmalloc(PICOLCDFB_SIZE*picolcdfb_var.bits_per_pixel);
+ if (fb_bitmap == NULL) {
+ dev_err(dev, "can't get a free page for framebuffer\n");
+ goto err_nomem;
+ }
+
+ fb_vbitmap = kmalloc(PICOLCDFB_SIZE, GFP_KERNEL);
+ if (fb_vbitmap == NULL) {
+ dev_err(dev, "can't alloc vbitmap image buffer\n");
+ goto err_nomem;
+ }
+
+ data->fb_update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT;
+ data->fb_defio = picolcd_fb_defio;
+ info = framebuffer_alloc(0, dev);
+ if (info == NULL) {
+ dev_err(dev, "failed to allocate a framebuffer\n");
+ goto err_nomem;
+ }
+
+ info->fbdefio = &data->fb_defio;
+ info->screen_base = (char __force __iomem *)fb_bitmap;
+ info->fbops = &picolcdfb_ops;
+ info->var = picolcdfb_var;
+ info->fix = picolcdfb_fix;
+ info->fix.smem_len = PICOLCDFB_SIZE;
+ info->fix.smem_start = (unsigned long)fb_bitmap;
+ info->par = data;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ data->fb_vbitmap = fb_vbitmap;
+ data->fb_bitmap = fb_bitmap;
+ data->fb_bpp = picolcdfb_var.bits_per_pixel;
+ error = picolcd_fb_reset(data, 1);
+ if (error) {
+ dev_err(dev, "failed to configure display\n");
+ goto err_cleanup;
+ }
+ error = device_create_file(dev, &dev_attr_fb_update_rate);
+ if (error) {
+ dev_err(dev, "failed to create sysfs attributes\n");
+ goto err_cleanup;
+ }
+ data->fb_info = info;
+ error = register_framebuffer(info);
+ if (error) {
+ dev_err(dev, "failed to register framebuffer\n");
+ goto err_sysfs;
+ }
+ fb_deferred_io_init(info);
+ /* schedule first output of framebuffer */
+ schedule_delayed_work(&info->deferred_work, 0);
+ return 0;
+
+err_sysfs:
+ device_remove_file(dev, &dev_attr_fb_update_rate);
+err_cleanup:
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+
+err_nomem:
+ framebuffer_release(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+ return error;
+}
+
+static void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+ struct fb_info *info = data->fb_info;
+ u8 *fb_vbitmap = data->fb_vbitmap;
+ u8 *fb_bitmap = data->fb_bitmap;
+
+ if (!info)
+ return;
+
+ data->fb_vbitmap = NULL;
+ data->fb_bitmap = NULL;
+ data->fb_bpp = 0;
+ data->fb_info = NULL;
+ device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+ fb_deferred_io_cleanup(info);
+ unregister_framebuffer(info);
+ vfree(fb_bitmap);
+ kfree(fb_vbitmap);
+}
+
+#define picolcd_fbinfo(d) ((d)->fb_info)
+#else
+static inline int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ return 0;
+}
+static inline int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+}
+#define picolcd_fbinfo(d) NULL
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+#ifdef CONFIG_HID_PICOLCD_BACKLIGHT
+/*
+ * backlight class device
+ */
+static int picolcd_get_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ return data->lcd_brightness;
+}
+
+static int picolcd_set_brightness(struct backlight_device *bdev)
+{
+ struct picolcd_data *data = bl_get_data(bdev);
+ struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_brightness = bdev->props.brightness & 0x0ff;
+ data->lcd_power = bdev->props.power;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev));
+}
+
+static const struct backlight_ops picolcd_blops = {
+ .update_status = picolcd_set_brightness,
+ .get_brightness = picolcd_get_brightness,
+ .check_fb = picolcd_check_bl_fb,
+};
+
+static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct backlight_device *bdev;
+ struct backlight_properties props;
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported BRIGHTNESS report");
+ return -EINVAL;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = 0xff;
+ bdev = backlight_device_register(dev_name(dev), dev, data,
+ &picolcd_blops, &props);
+ if (IS_ERR(bdev)) {
+ dev_err(dev, "failed to register backlight\n");
+ return PTR_ERR(bdev);
+ }
+ bdev->props.brightness = 0xff;
+ data->lcd_brightness = 0xff;
+ data->backlight = bdev;
+ picolcd_set_brightness(bdev);
+ return 0;
+}
+
+static void picolcd_exit_backlight(struct picolcd_data *data)
+{
+ struct backlight_device *bdev = data->backlight;
+
+ data->backlight = NULL;
+ if (bdev)
+ backlight_device_unregister(bdev);
+}
+
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ if (!data->backlight)
+ return 0;
+ return picolcd_set_brightness(data->backlight);
+}
+
+#ifdef CONFIG_PM
+static void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+ int bl_power = data->lcd_power;
+ if (!data->backlight)
+ return;
+
+ data->backlight->props.power = FB_BLANK_POWERDOWN;
+ picolcd_set_brightness(data->backlight);
+ data->lcd_power = data->backlight->props.power = bl_power;
+}
+#endif /* CONFIG_PM */
+#else
+static inline int picolcd_init_backlight(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_backlight(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_backlight(struct picolcd_data *data)
+{
+ return 0;
+}
+static inline void picolcd_suspend_backlight(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */
+
+#ifdef CONFIG_HID_PICOLCD_LCD
+/*
+ * lcd class device
+ */
+static int picolcd_get_contrast(struct lcd_device *ldev)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ return data->lcd_contrast;
+}
+
+static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
+{
+ struct picolcd_data *data = lcd_get_data(ldev);
+ struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev);
+ unsigned long flags;
+
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return -ENODEV;
+
+ data->lcd_contrast = contrast & 0x0ff;
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->lcd_contrast);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
+{
+ return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
+}
+
+static struct lcd_ops picolcd_lcdops = {
+ .get_contrast = picolcd_get_contrast,
+ .set_contrast = picolcd_set_contrast,
+ .check_fb = picolcd_check_lcd_fb,
+};
+
+static int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct lcd_device *ldev;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported CONTRAST report");
+ return -EINVAL;
+ }
+
+ ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops);
+ if (IS_ERR(ldev)) {
+ dev_err(dev, "failed to register LCD\n");
+ return PTR_ERR(ldev);
+ }
+ ldev->props.max_contrast = 0x0ff;
+ data->lcd_contrast = 0xe5;
+ data->lcd = ldev;
+ picolcd_set_contrast(ldev, 0xe5);
+ return 0;
+}
+
+static void picolcd_exit_lcd(struct picolcd_data *data)
+{
+ struct lcd_device *ldev = data->lcd;
+
+ data->lcd = NULL;
+ if (ldev)
+ lcd_device_unregister(ldev);
+}
+
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ if (!data->lcd)
+ return 0;
+ return picolcd_set_contrast(data->lcd, data->lcd_contrast);
+}
+#else
+static inline int picolcd_init_lcd(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_lcd(struct picolcd_data *data)
+{
+}
+static inline int picolcd_resume_lcd(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LCD */
+
+#ifdef CONFIG_HID_PICOLCD_LEDS
+/**
+ * LED class device
+ */
+static void picolcd_leds_set(struct picolcd_data *data)
+{
+ struct hid_report *report;
+ unsigned long flags;
+
+ if (!data->led[0])
+ return;
+ report = picolcd_out_report(REPORT_LED_STATE, data->hdev);
+ if (!report || report->maxfield != 1 || report->field[0]->report_count != 1)
+ return;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, data->led_state);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void picolcd_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, state = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++) {
+ if (led_cdev != data->led[i])
+ continue;
+ state = (data->led_state >> i) & 1;
+ if (value == LED_OFF && state) {
+ data->led_state &= ~(1 << i);
+ picolcd_leds_set(data);
+ } else if (value != LED_OFF && !state) {
+ data->led_state |= 1 << i;
+ picolcd_leds_set(data);
+ }
+ break;
+ }
+}
+
+static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct picolcd_data *data;
+ int i, value = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data = hid_get_drvdata(hdev);
+ for (i = 0; i < 8; i++)
+ if (led_cdev == data->led[i]) {
+ value = (data->led_state >> i) & 1;
+ break;
+ }
+ return value ? LED_FULL : LED_OFF;
+}
+
+static int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report)
+{
+ struct device *dev = &data->hdev->dev;
+ struct led_classdev *led;
+ size_t name_sz = strlen(dev_name(dev)) + 8;
+ char *name;
+ int i, ret = 0;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 1 ||
+ report->field[0]->report_size != 8) {
+ dev_err(dev, "unsupported LED_STATE report");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 8; i++) {
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ dev_err(dev, "can't allocate memory for LED %d\n", i);
+ ret = -ENOMEM;
+ goto err;
+ }
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = picolcd_led_get_brightness;
+ led->brightness_set = picolcd_led_set_brightness;
+
+ data->led[i] = led;
+ ret = led_classdev_register(dev, data->led[i]);
+ if (ret) {
+ data->led[i] = NULL;
+ kfree(led);
+ dev_err(dev, "can't register LED %d\n", i);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ for (i = 0; i < 8; i++)
+ if (data->led[i]) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ return ret;
+}
+
+static void picolcd_exit_leds(struct picolcd_data *data)
+{
+ struct led_classdev *led;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ led = data->led[i];
+ data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+}
+
+#else
+static inline int picolcd_init_leds(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ return 0;
+}
+static inline void picolcd_exit_leds(struct picolcd_data *data)
+{
+}
+static inline int picolcd_leds_set(struct picolcd_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_HID_PICOLCD_LEDS */
+
+/*
+ * input class device
+ */
+static int picolcd_raw_keypad(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /*
+ * Keypad event
+ * First and second data bytes list currently pressed keys,
+ * 0x00 means no key and at most 2 keys may be pressed at same time
+ */
+ int i, j;
+
+ /* determine newly pressed keys */
+ for (i = 0; i < size; i++) {
+ unsigned int key_code;
+ if (raw_data[i] == 0)
+ continue;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_already_down;
+ for (j = 0; j < sizeof(data->pressed_keys); j++)
+ if (data->pressed_keys[j] == 0) {
+ data->pressed_keys[j] = raw_data[i];
+ break;
+ }
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]);
+ if (raw_data[i] < PICOLCD_KEYS)
+ key_code = data->keycode[raw_data[i]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key press for %u:%d",
+ raw_data[i], key_code);
+ input_report_key(data->input_keys, key_code, 1);
+ }
+ input_sync(data->input_keys);
+key_already_down:
+ continue;
+ }
+
+ /* determine newly released keys */
+ for (j = 0; j < sizeof(data->pressed_keys); j++) {
+ unsigned int key_code;
+ if (data->pressed_keys[j] == 0)
+ continue;
+ for (i = 0; i < size; i++)
+ if (data->pressed_keys[j] == raw_data[i])
+ goto key_still_down;
+ input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]);
+ if (data->pressed_keys[j] < PICOLCD_KEYS)
+ key_code = data->keycode[data->pressed_keys[j]];
+ else
+ key_code = KEY_UNKNOWN;
+ if (key_code != KEY_UNKNOWN) {
+ dbg_hid(PICOLCD_NAME " got key release for %u:%d",
+ data->pressed_keys[j], key_code);
+ input_report_key(data->input_keys, key_code, 0);
+ }
+ input_sync(data->input_keys);
+ data->pressed_keys[j] = 0;
+key_still_down:
+ continue;
+ }
+ return 1;
+}
+
+static int picolcd_raw_cir(struct picolcd_data *data,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ /* Need understanding of CIR data format to implement ... */
+ return 1;
+}
+
+static int picolcd_check_version(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct picolcd_pending *verinfo;
+ int ret = 0;
+
+ if (!data)
+ return -ENODEV;
+
+ verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
+ if (!verinfo) {
+ dev_err(&hdev->dev, "no version response from PicoLCD");
+ return -ENODEV;
+ }
+
+ if (verinfo->raw_size == 2) {
+ data->version[0] = verinfo->raw_data[1];
+ data->version[1] = verinfo->raw_data[0];
+ if (data->status & PICOLCD_BOOTLOADER) {
+ dev_info(&hdev->dev, "PicoLCD, bootloader version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ } else {
+ dev_info(&hdev->dev, "PicoLCD, firmware version %d.%d\n",
+ verinfo->raw_data[1], verinfo->raw_data[0]);
+ }
+ } else {
+ dev_err(&hdev->dev, "confused, got unexpected version response from PicoLCD\n");
+ ret = -EINVAL;
+ }
+ kfree(verinfo);
+ return ret;
+}
+
+/*
+ * Reset our device and wait for answer to VERSION request
+ */
+static int picolcd_reset(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev);
+ unsigned long flags;
+ int error;
+
+ if (!data || !report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+
+ /* perform the reset */
+ hid_set_field(report->field[0], 0, 1);
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ picolcd_resume_lcd(data);
+ picolcd_resume_backlight(data);
+#ifdef CONFIG_HID_PICOLCD_FB
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+#endif /* CONFIG_HID_PICOLCD_FB */
+
+ picolcd_leds_set(data);
+ return 0;
+}
+
+/*
+ * The "operation_mode" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n");
+}
+
+static ssize_t picolcd_operation_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ struct hid_report *report = NULL;
+ size_t cnt = count;
+ int timeout = data->opmode_delay;
+ unsigned long flags;
+
+ if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+ if (data->status & PICOLCD_BOOTLOADER)
+ report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
+ buf += 3;
+ cnt -= 3;
+ } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+ if (!(data->status & PICOLCD_BOOTLOADER))
+ report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
+ buf += 10;
+ cnt -= 10;
+ }
+ if (!report)
+ return -EINVAL;
+
+ while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+ cnt--;
+ if (cnt != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&data->lock, flags);
+ hid_set_field(report->field[0], 0, timeout & 0xff);
+ hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show,
+ picolcd_operation_mode_store);
+
+/*
+ * The "operation_mode_delay" sysfs attribute
+ */
+static ssize_t picolcd_operation_mode_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay);
+}
+
+static ssize_t picolcd_operation_mode_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ unsigned u;
+ if (sscanf(buf, "%u", &u) != 1)
+ return -EINVAL;
+ if (u > 30000)
+ return -EINVAL;
+ else
+ data->opmode_delay = u;
+ return count;
+}
+
+static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show,
+ picolcd_operation_mode_delay_store);
+
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * The "reset" file
+ */
+static int picolcd_debug_reset_show(struct seq_file *f, void *p)
+{
+ if (picolcd_fbinfo((struct picolcd_data *)f->private))
+ seq_printf(f, "all fb\n");
+ else
+ seq_printf(f, "all\n");
+ return 0;
+}
+
+static int picolcd_debug_reset_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, picolcd_debug_reset_show, inode->i_private);
+}
+
+static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct picolcd_data *data = ((struct seq_file *)f->private_data)->private;
+ char buf[32];
+ size_t cnt = min(count, sizeof(buf)-1);
+ if (copy_from_user(buf, user_buf, cnt))
+ return -EFAULT;
+
+ while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n'))
+ cnt--;
+ buf[cnt] = '\0';
+ if (strcmp(buf, "all") == 0) {
+ picolcd_reset(data->hdev);
+ picolcd_fb_reset(data, 1);
+ } else if (strcmp(buf, "fb") == 0) {
+ picolcd_fb_reset(data, 1);
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static const struct file_operations picolcd_debug_reset_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_reset_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = picolcd_debug_reset_write,
+ .release = single_release,
+};
+
+/*
+ * The "eeprom" file
+ */
+static int picolcd_debug_eeprom_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ ssize_t ret = -EIO;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return 0;
+
+ /* prepare buffer with info about what we want to read (addr & len) */
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data,
+ sizeof(raw_data));
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* successful read :) */
+ ret = resp->raw_data[2];
+ if (ret > s)
+ ret = s;
+ if (copy_to_user(u, resp->raw_data+3, ret))
+ ret = -EFAULT;
+ else
+ *off += ret;
+ } /* anything else is some kind of IO error */
+
+ kfree(resp);
+ return ret;
+}
+
+static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ struct picolcd_pending *resp;
+ ssize_t ret = -EIO;
+ u8 raw_data[23];
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x0ff)
+ return -ENOSPC;
+
+ memset(raw_data, 0, sizeof(raw_data));
+ raw_data[0] = *off & 0xff;
+ raw_data[1] = (*off >> 8) && 0xff;
+ raw_data[2] = s < 20 ? s : 20;
+ if (*off + raw_data[2] > 0xff)
+ raw_data[2] = 0x100 - *off;
+
+ if (copy_from_user(raw_data+3, u, raw_data[2]))
+ return -EFAULT;
+ resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data,
+ sizeof(raw_data));
+
+ if (!resp)
+ return -EIO;
+
+ if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) {
+ /* check if written data matches */
+ if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) {
+ *off += raw_data[2];
+ ret = raw_data[2];
+ }
+ }
+ kfree(resp);
+ return ret;
+}
+
+/*
+ * Notes:
+ * - read/write happens in chunks of at most 20 bytes, it's up to userspace
+ * to loop in order to get more data.
+ * - on write errors on otherwise correct write request the bytes
+ * that should have been written are in undefined state.
+ */
+static const struct file_operations picolcd_debug_eeprom_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_eeprom_open,
+ .read = picolcd_debug_eeprom_read,
+ .write = picolcd_debug_eeprom_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * The "flash" file
+ */
+static int picolcd_debug_flash_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+/* record a flash address to buf (bounds check to be done by caller) */
+static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off)
+{
+ buf[0] = off & 0xff;
+ buf[1] = (off >> 8) & 0xff;
+ if (data->addr_sz == 3)
+ buf[2] = (off >> 16) & 0xff;
+ return data->addr_sz == 2 ? 2 : 3;
+}
+
+/* read a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id,
+ char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[4];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_READ_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1) != 0)
+ goto skip;
+ if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) {
+ err = -EFAULT;
+ goto skip;
+ }
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ return ret > 0 ? ret : err;
+ }
+ return ret;
+}
+
+static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x05fff)
+ return 0;
+ if (*off + s > 0x05fff)
+ s = 0x06000 - *off;
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off);
+ else
+ return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off);
+}
+
+/* erase block aligned to 64bytes boundary */
+static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id,
+ loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[3];
+ int len_off;
+ ssize_t ret = -EIO;
+
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_ERASE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off) != 0)
+ goto skip;
+ ret = 0;
+ }
+skip:
+ kfree(resp);
+ return ret;
+}
+
+/* write a given size of data (bounds check to be done by caller) */
+static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id,
+ const char __user *u, size_t s, loff_t *off)
+{
+ struct picolcd_pending *resp;
+ u8 raw_data[36];
+ ssize_t ret = 0;
+ int len_off, err = -EIO;
+
+ while (s > 0) {
+ err = -EIO;
+ len_off = _picolcd_flash_setaddr(data, raw_data, *off);
+ raw_data[len_off] = s > 32 ? 32 : s;
+ if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) {
+ err = -EFAULT;
+ break;
+ }
+ resp = picolcd_send_and_wait(data->hdev, report_id, raw_data,
+ len_off+1+raw_data[len_off]);
+ if (!resp || !resp->in_report)
+ goto skip;
+ if (resp->in_report->id == REPORT_MEMORY ||
+ resp->in_report->id == REPORT_BL_WRITE_MEMORY) {
+ if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0)
+ goto skip;
+ *off += raw_data[len_off];
+ s -= raw_data[len_off];
+ ret += raw_data[len_off];
+ err = 0;
+ }
+skip:
+ kfree(resp);
+ if (err)
+ break;
+ }
+ return ret > 0 ? ret : err;
+}
+
+static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct picolcd_data *data = f->private_data;
+ ssize_t err, ret = 0;
+ int report_erase, report_write;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0x5fff)
+ return -ENOSPC;
+ if (s & 0x3f)
+ return -EINVAL;
+ if (*off & 0x3f)
+ return -EINVAL;
+
+ if (data->status & PICOLCD_BOOTLOADER) {
+ report_erase = REPORT_BL_ERASE_MEMORY;
+ report_write = REPORT_BL_WRITE_MEMORY;
+ } else {
+ report_erase = REPORT_ERASE_MEMORY;
+ report_write = REPORT_WRITE_MEMORY;
+ }
+ mutex_lock(&data->mutex_flash);
+ while (s > 0) {
+ err = _picolcd_flash_erase64(data, report_erase, off);
+ if (err)
+ break;
+ err = _picolcd_flash_write(data, report_write, u, 64, off);
+ if (err < 0)
+ break;
+ ret += err;
+ *off += err;
+ s -= err;
+ if (err != 64)
+ break;
+ }
+ mutex_unlock(&data->mutex_flash);
+ return ret > 0 ? ret : err;
+}
+
+/*
+ * Notes:
+ * - concurrent writing is prevented by mutex and all writes must be
+ * n*64 bytes and 64-byte aligned, each write being preceeded by an
+ * ERASE which erases a 64byte block.
+ * If less than requested was written or an error is returned for an
+ * otherwise correct write request the next 64-byte block which should
+ * have been written is in undefined state (mostly: original, erased,
+ * (half-)written with write error)
+ * - reading can happend without special restriction
+ */
+static const struct file_operations picolcd_debug_flash_fops = {
+ .owner = THIS_MODULE,
+ .open = picolcd_debug_flash_open,
+ .read = picolcd_debug_flash_read,
+ .write = picolcd_debug_flash_write,
+ .llseek = generic_file_llseek,
+};
+
+
+/*
+ * Helper code for HID report level dumping/debugging
+ */
+static const char *error_codes[] = {
+ "success", "parameter missing", "data_missing", "block readonly",
+ "block not erasable", "block too big", "section overflow",
+ "invalid command length", "invalid data length",
+};
+
+static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
+ const size_t data_len)
+{
+ int i, j;
+ for (i = j = 0; i < data_len && j + 3 < dst_sz; i++) {
+ dst[j++] = hex_asc[(data[i] >> 4) & 0x0f];
+ dst[j++] = hex_asc[data[i] & 0x0f];
+ dst[j++] = ' ';
+ }
+ if (j < dst_sz) {
+ dst[j--] = '\0';
+ dst[j] = '\n';
+ } else
+ dst[j] = '\0';
+}
+
+static void picolcd_debug_out_report(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report)
+{
+ u8 raw_data[70];
+ int raw_size = (report->size >> 3) + 1;
+ char *buff;
+#define BUFF_SZ 256
+
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ",
+ report->id, raw_size);
+ hid_debug_event(hdev, buff);
+ if (raw_size + 5 > sizeof(raw_data)) {
+ hid_debug_event(hdev, " TOO BIG\n");
+ return;
+ } else {
+ raw_data[0] = report->id;
+ hid_output_report(report, raw_data);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+ hid_debug_event(hdev, buff);
+ }
+
+ switch (report->id) {
+ case REPORT_LED_STATE:
+ /* 1 data byte with GPO state */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LED_STATE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BRIGHTNESS:
+ /* 1 data byte with brightness */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_BRIGHTNESS", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_CONTRAST:
+ /* 1 data byte with contrast */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_CONTRAST", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_RESET:
+ /* 2 data bytes with reset duration in ms */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_RESET", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n",
+ raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD:
+ /* 63 data bytes with LCD commands */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO: format decoding */
+ break;
+ case REPORT_LCD_DATA:
+ /* 63 data bytes with LCD data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_LCD_CMD_DATA:
+ /* 63 data bytes with LCD commands and data */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_LCD_CMD", report->id, raw_size-1);
+ /* TODO: format decoding */
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_READ:
+ /* 3 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_READ", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EE_WRITE:
+ /* 3+1..20 data bytes with write area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EE_WRITE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_ERASE_MEMORY:
+ case REPORT_BL_ERASE_MEMORY:
+ /* 3 data bytes with pointer inside erase block */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_ERASE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_READ_MEMORY:
+ case REPORT_BL_READ_MEMORY:
+ /* 4 data bytes with read area description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_READ_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_WRITE_MEMORY:
+ case REPORT_BL_WRITE_MEMORY:
+ /* 4+1..32 data bytes with write adrea description */
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_WRITE_MEMORY", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= raw_size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_RESTART:
+ /* TODO */
+ break;
+ case REPORT_EXIT_KEYBOARD:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_EXIT_KEYBOARD", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_EXIT_FLASHER:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n",
+ raw_data[1] | (raw_data[2] << 8),
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n",
+ "<unknown>", report->id, raw_size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ char *buff;
+
+#define BUFF_SZ 256
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (!hdev->debug_events)
+ return;
+
+ buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ switch (report->id) {
+ case REPORT_ERROR_CODE:
+ /* 2 data bytes with affected report and error code */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_ERROR_CODE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[2] < ARRAY_SIZE(error_codes))
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n",
+ raw_data[2], error_codes[raw_data[2]], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_KEY_STATE:
+ /* 2 data bytes with key state */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_KEY_STATE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0)
+ snprintf(buff, BUFF_SZ, "\tNo key pressed\n");
+ else if (raw_data[2] == 0)
+ snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n",
+ raw_data[1], raw_data[1]);
+ else
+ snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n",
+ raw_data[1], raw_data[1], raw_data[2], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_IR_DATA:
+ /* Up to 20 byes of IR scancode data */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_IR_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ if (raw_data[1] == 0) {
+ snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[1] + 1 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n",
+ raw_data[1]-1);
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_EE_DATA:
+ /* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_EE_DATA", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ hid_debug_event(hdev, buff);
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ hid_debug_event(hdev, buff);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ hid_debug_event(hdev, buff);
+ }
+ break;
+ case REPORT_MEMORY:
+ /* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ switch (data->addr_sz) {
+ case 2:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[3] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[3] + 4 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ case 3:
+ snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n",
+ raw_data[3], raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]);
+ hid_debug_event(hdev, buff);
+ if (raw_data[4] == 0) {
+ snprintf(buff, BUFF_SZ, "\tNo data\n");
+ } else if (raw_data[4] + 5 <= size) {
+ snprintf(buff, BUFF_SZ, "\tData: ");
+ hid_debug_event(hdev, buff);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]);
+ } else {
+ snprintf(buff, BUFF_SZ, "\tData overflowed\n");
+ }
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "\tNot supported\n");
+ }
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[2], raw_data[1]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_BL_ERASE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_ERASE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_READ_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_READ_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_BL_WRITE_MEMORY:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_BL_WRITE_MEMORY", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ /* TODO */
+ break;
+ case REPORT_DEVID:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_DEVID", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n",
+ raw_data[1], raw_data[2], raw_data[3], raw_data[4]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n",
+ raw_data[5]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_SPLASH_SIZE:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_SPLASH_SIZE", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n",
+ (raw_data[2] << 8) | raw_data[1]);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n",
+ (raw_data[4] << 8) | raw_data[3]);
+ hid_debug_event(hdev, buff);
+ break;
+ case REPORT_HOOK_VERSION:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "REPORT_HOOK_VERSION", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n",
+ raw_data[1], raw_data[2]);
+ hid_debug_event(hdev, buff);
+ break;
+ default:
+ snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n",
+ "<unknown>", report->id, size-1);
+ hid_debug_event(hdev, buff);
+ break;
+ }
+ wake_up_interruptible(&hdev->debug_wait);
+ kfree(buff);
+}
+
+static void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+ struct hid_device *hdev = data->hdev;
+
+ mutex_init(&data->mutex_flash);
+
+ /* reset */
+ if (reset)
+ data->debug_reset = debugfs_create_file("reset", 0600,
+ hdev->debug_dir, data, &picolcd_debug_reset_fops);
+
+ /* eeprom */
+ if (eeprom_r || eeprom_w)
+ data->debug_eeprom = debugfs_create_file("eeprom",
+ (eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_eeprom_fops);
+
+ /* flash */
+ if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8)
+ data->addr_sz = flash_r->field[0]->report_count - 1;
+ else
+ data->addr_sz = -1;
+ if (data->addr_sz == 2 || data->addr_sz == 3) {
+ data->debug_flash = debugfs_create_file("flash",
+ (flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
+ hdev->debug_dir, data, &picolcd_debug_flash_fops);
+ } else if (flash_r || flash_w)
+ dev_warn(&hdev->dev, "Unexpected FLASH access reports, "
+ "please submit rdesc for review\n");
+}
+
+static void picolcd_exit_devfs(struct picolcd_data *data)
+{
+ struct dentry *dent;
+
+ dent = data->debug_reset;
+ data->debug_reset = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_eeprom;
+ data->debug_eeprom = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ dent = data->debug_flash;
+ data->debug_flash = NULL;
+ if (dent)
+ debugfs_remove(dent);
+ mutex_destroy(&data->mutex_flash);
+}
+#else
+static inline void picolcd_debug_raw_event(struct picolcd_data *data,
+ struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+}
+static inline void picolcd_init_devfs(struct picolcd_data *data,
+ struct hid_report *eeprom_r, struct hid_report *eeprom_w,
+ struct hid_report *flash_r, struct hid_report *flash_w,
+ struct hid_report *reset)
+{
+}
+static inline void picolcd_exit_devfs(struct picolcd_data *data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Handle raw report as sent by device
+ */
+static int picolcd_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!data)
+ return 1;
+
+ if (report->id == REPORT_KEY_STATE) {
+ if (data->input_keys)
+ ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
+ } else if (report->id == REPORT_IR_DATA) {
+ if (data->input_cir)
+ ret = picolcd_raw_cir(data, report, raw_data+1, size-1);
+ } else {
+ spin_lock_irqsave(&data->lock, flags);
+ /*
+ * We let the caller of picolcd_send_and_wait() check if the
+ * report we got is one of the expected ones or not.
+ */
+ if (data->pending) {
+ memcpy(data->pending->raw_data, raw_data+1, size-1);
+ data->pending->raw_size = size-1;
+ data->pending->in_report = report;
+ complete(&data->pending->ready);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ picolcd_debug_raw_event(data, hdev, report, raw_data, size);
+ return 1;
+}
+
+#ifdef CONFIG_PM
+static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
+{
+ if (message.event & PM_EVENT_AUTO)
+ return 0;
+
+ picolcd_suspend_backlight(hid_get_drvdata(hdev));
+ dbg_hid(PICOLCD_NAME " device ready for suspend\n");
+ return 0;
+}
+
+static int picolcd_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ return 0;
+}
+
+static int picolcd_reset_resume(struct hid_device *hdev)
+{
+ int ret;
+ ret = picolcd_reset(hdev);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret);
+ ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0);
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret);
+ ret = picolcd_resume_lcd(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret);
+ ret = picolcd_resume_backlight(hid_get_drvdata(hdev));
+ if (ret)
+ dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret);
+ picolcd_leds_set(hid_get_drvdata(hdev));
+ return 0;
+}
+#endif
+
+/* initialize keypad input device */
+static int picolcd_init_keys(struct picolcd_data *data,
+ struct hid_report *report)
+{
+ struct hid_device *hdev = data->hdev;
+ struct input_dev *idev;
+ int error, i;
+
+ if (!report)
+ return -ENODEV;
+ if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
+ report->field[0]->report_size != 8) {
+ dev_err(&hdev->dev, "unsupported KEY_STATE report");
+ return -EINVAL;
+ }
+
+ idev = input_allocate_device();
+ if (idev == NULL) {
+ dev_err(&hdev->dev, "failed to allocate input device");
+ return -ENOMEM;
+ }
+ input_set_drvdata(idev, hdev);
+ memcpy(data->keycode, def_keymap, sizeof(def_keymap));
+ idev->name = hdev->name;
+ idev->phys = hdev->phys;
+ idev->uniq = hdev->uniq;
+ idev->id.bustype = hdev->bus;
+ idev->id.vendor = hdev->vendor;
+ idev->id.product = hdev->product;
+ idev->id.version = hdev->version;
+ idev->dev.parent = hdev->dev.parent;
+ idev->keycode = &data->keycode;
+ idev->keycodemax = PICOLCD_KEYS;
+ idev->keycodesize = sizeof(data->keycode[0]);
+ input_set_capability(idev, EV_MSC, MSC_SCAN);
+ set_bit(EV_REP, idev->evbit);
+ for (i = 0; i < PICOLCD_KEYS; i++)
+ input_set_capability(idev, EV_KEY, data->keycode[i]);
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(&hdev->dev, "error registering the input device");
+ input_free_device(idev);
+ return error;
+ }
+ data->input_keys = idev;
+ return 0;
+}
+
+static void picolcd_exit_keys(struct picolcd_data *data)
+{
+ struct input_dev *idev = data->input_keys;
+
+ data->input_keys = NULL;
+ if (idev)
+ input_unregister_device(idev);
+}
+
+/* initialize CIR input device */
+static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
+{
+ /* support not implemented yet */
+ return 0;
+}
+
+static inline void picolcd_exit_cir(struct picolcd_data *data)
+{
+}
+
+static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 0 && data->version[1] != 3)
+ dev_info(&hdev->dev, "Device with untested firmware revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ /* Setup keypad input device */
+ error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
+ if (error)
+ goto err;
+
+ /* Setup CIR input device */
+ error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev));
+ if (error)
+ goto err;
+
+ /* Set up the framebuffer device */
+ error = picolcd_init_framebuffer(data);
+ if (error)
+ goto err;
+
+ /* Setup lcd class device */
+ error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev));
+ if (error)
+ goto err;
+
+ /* Setup backlight class device */
+ error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev));
+ if (error)
+ goto err;
+
+ /* Setup the LED class devices */
+ error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev));
+ if (error)
+ goto err;
+
+ picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev),
+ picolcd_out_report(REPORT_EE_WRITE, hdev),
+ picolcd_out_report(REPORT_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_WRITE_MEMORY, hdev),
+ picolcd_out_report(REPORT_RESET, hdev));
+ return 0;
+err:
+ picolcd_exit_leds(data);
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+ return error;
+}
+
+static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data)
+{
+ int error;
+
+ error = picolcd_check_version(hdev);
+ if (error)
+ return error;
+
+ if (data->version[0] != 1 && data->version[1] != 0)
+ dev_info(&hdev->dev, "Device with untested bootloader revision, "
+ "please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+ dev_name(&hdev->dev));
+
+ picolcd_init_devfs(data, NULL, NULL,
+ picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
+ picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL);
+ return 0;
+}
+
+static int picolcd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct picolcd_data *data;
+ int error = -ENOMEM;
+
+ dbg_hid(PICOLCD_NAME " hardware probe...\n");
+
+ /*
+ * Let's allocate the picolcd data structure, set some reasonable
+ * defaults, and associate it with the device
+ */
+ data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(&hdev->dev, "can't allocate space for Minibox PicoLCD device data\n");
+ error = -ENOMEM;
+ goto err_no_cleanup;
+ }
+
+ spin_lock_init(&data->lock);
+ mutex_init(&data->mutex);
+ data->hdev = hdev;
+ data->opmode_delay = 5000;
+ if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER)
+ data->status |= PICOLCD_BOOTLOADER;
+ hid_set_drvdata(hdev, data);
+
+ /* Parse the device reports and start it up */
+ error = hid_parse(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "device report parse failed\n");
+ goto err_cleanup_data;
+ }
+
+ /* We don't use hidinput but hid_hw_start() fails if nothing is
+ * claimed. So spoof claimed input. */
+ hdev->claimed = HID_CLAIMED_INPUT;
+ error = hid_hw_start(hdev, 0);
+ hdev->claimed = 0;
+ if (error) {
+ dev_err(&hdev->dev, "hardware start failed\n");
+ goto err_cleanup_data;
+ }
+
+ error = hdev->ll_driver->open(hdev);
+ if (error) {
+ dev_err(&hdev->dev, "failed to open input interrupt pipe for key and IR events\n");
+ goto err_cleanup_hid_hw;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_hid_ll;
+ }
+
+ error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
+ if (error) {
+ dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+ goto err_cleanup_sysfs1;
+ }
+
+ if (data->status & PICOLCD_BOOTLOADER)
+ error = picolcd_probe_bootloader(hdev, data);
+ else
+ error = picolcd_probe_lcd(hdev, data);
+ if (error)
+ goto err_cleanup_sysfs2;
+
+ dbg_hid(PICOLCD_NAME " activated and initialized\n");
+ return 0;
+
+err_cleanup_sysfs2:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+err_cleanup_sysfs1:
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+err_cleanup_hid_ll:
+ hdev->ll_driver->close(hdev);
+err_cleanup_hid_hw:
+ hid_hw_stop(hdev);
+err_cleanup_data:
+ kfree(data);
+err_no_cleanup:
+ hid_set_drvdata(hdev, NULL);
+
+ return error;
+}
+
+static void picolcd_remove(struct hid_device *hdev)
+{
+ struct picolcd_data *data = hid_get_drvdata(hdev);
+ unsigned long flags;
+
+ dbg_hid(PICOLCD_NAME " hardware remove...\n");
+ spin_lock_irqsave(&data->lock, flags);
+ data->status |= PICOLCD_FAILED;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ picolcd_exit_devfs(data);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode);
+ device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
+ hdev->ll_driver->close(hdev);
+ hid_hw_stop(hdev);
+ hid_set_drvdata(hdev, NULL);
+
+ /* Shortcut potential pending reply that will never arrive */
+ spin_lock_irqsave(&data->lock, flags);
+ if (data->pending)
+ complete(&data->pending->ready);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Cleanup LED */
+ picolcd_exit_leds(data);
+ /* Clean up the framebuffer */
+ picolcd_exit_backlight(data);
+ picolcd_exit_lcd(data);
+ picolcd_exit_framebuffer(data);
+ /* Cleanup input */
+ picolcd_exit_cir(data);
+ picolcd_exit_keys(data);
+
+ mutex_destroy(&data->mutex);
+ /* Finally, clean up the picolcd data itself */
+ kfree(data);
+}
+
+static const struct hid_device_id picolcd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, picolcd_devices);
+
+static struct hid_driver picolcd_driver = {
+ .name = "hid-picolcd",
+ .id_table = picolcd_devices,
+ .probe = picolcd_probe,
+ .remove = picolcd_remove,
+ .raw_event = picolcd_raw_event,
+#ifdef CONFIG_PM
+ .suspend = picolcd_suspend,
+ .resume = picolcd_resume,
+ .reset_resume = picolcd_reset_resume,
+#endif
+};
+
+static int __init picolcd_init(void)
+{
+ return hid_register_driver(&picolcd_driver);
+}
+
+static void __exit picolcd_exit(void)
+{
+ hid_unregister_driver(&picolcd_driver);
+}
+
+module_init(picolcd_init);
+module_exit(picolcd_exit);
+MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
new file mode 100644
index 000000000000..845f428b8090
--- /dev/null
+++ b/drivers/hid/hid-prodikeys.c
@@ -0,0 +1,910 @@
+/*
+ * HID driver for the Prodikeys PC-MIDI Keyboard
+ * providing midi & extra multimedia keys functionality
+ *
+ * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ * Controls for Octave Shift Up/Down, Channel, and
+ * Sustain Duration available via sysfs.
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/hid.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/rawmidi.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+
+#define pk_debug(format, arg...) \
+ pr_debug("hid-prodikeys: " format "\n" , ## arg)
+#define pk_error(format, arg...) \
+ pr_err("hid-prodikeys: " format "\n" , ## arg)
+
+struct pcmidi_snd;
+
+struct pk_device {
+ unsigned long quirks;
+
+ struct hid_device *hdev;
+ struct pcmidi_snd *pm; /* pcmidi device context */
+};
+
+struct pcmidi_snd;
+
+struct pcmidi_sustain {
+ unsigned long in_use;
+ struct pcmidi_snd *pm;
+ struct timer_list timer;
+ unsigned char status;
+ unsigned char note;
+ unsigned char velocity;
+};
+
+#define PCMIDI_SUSTAINED_MAX 32
+struct pcmidi_snd {
+ struct pk_device *pk;
+ unsigned short ifnum;
+ struct hid_report *pcmidi_report6;
+ struct input_dev *input_ep82;
+ unsigned short midi_mode;
+ unsigned short midi_sustain_mode;
+ unsigned short midi_sustain;
+ unsigned short midi_channel;
+ short midi_octave;
+ struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX];
+ unsigned short fn_state;
+ unsigned short last_key[24];
+ spinlock_t rawmidi_in_lock;
+ struct snd_card *card;
+ struct snd_rawmidi *rwmidi;
+ struct snd_rawmidi_substream *in_substream;
+ struct snd_rawmidi_substream *out_substream;
+ unsigned long in_triggered;
+ unsigned long out_active;
+};
+
+#define PK_QUIRK_NOGET 0x00010000
+#define PCMIDI_MIDDLE_C 60
+#define PCMIDI_CHANNEL_MIN 0
+#define PCMIDI_CHANNEL_MAX 15
+#define PCMIDI_OCTAVE_MIN (-2)
+#define PCMIDI_OCTAVE_MAX 2
+#define PCMIDI_SUSTAIN_MIN 0
+#define PCMIDI_SUSTAIN_MAX 5000
+
+static const char shortname[] = "PC-MIDI";
+static const char longname[] = "Prodikeys PC-MIDI Keyboard";
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+
+module_param_array(index, int, NULL, 0444);
+module_param_array(id, charp, NULL, 0444);
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver");
+MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver");
+
+
+/* Output routine for the sysfs channel file */
+static ssize_t show_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel);
+
+ return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel,
+ PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX);
+}
+
+/* Input routine for the sysfs channel file */
+static ssize_t store_channel(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ unsigned channel = 0;
+
+ if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) {
+ dbg_hid("pcmidi sysfs write channel=%u\n", channel);
+ pk->pm->midi_channel = channel;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(channel, S_IRUGO | S_IWUGO, show_channel,
+ store_channel);
+
+static struct device_attribute *sysfs_device_attr_channel = {
+ &dev_attr_channel,
+ };
+
+/* Output routine for the sysfs sustain file */
+static ssize_t show_sustain(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain);
+
+ return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain,
+ PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX);
+}
+
+/* Input routine for the sysfs sustain file */
+static ssize_t store_sustain(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ unsigned sustain = 0;
+
+ if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) {
+ dbg_hid("pcmidi sysfs write sustain=%u\n", sustain);
+ pk->pm->midi_sustain = sustain;
+ pk->pm->midi_sustain_mode =
+ (0 == sustain || !pk->pm->midi_mode) ? 0 : 1;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(sustain, S_IRUGO | S_IWUGO, show_sustain,
+ store_sustain);
+
+static struct device_attribute *sysfs_device_attr_sustain = {
+ &dev_attr_sustain,
+ };
+
+/* Output routine for the sysfs octave file */
+static ssize_t show_octave(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave);
+
+ return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave,
+ PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX);
+}
+
+/* Input routine for the sysfs octave file */
+static ssize_t store_octave(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+
+ int octave = 0;
+
+ if (sscanf(buf, "%d", &octave) > 0 &&
+ octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) {
+ dbg_hid("pcmidi sysfs write octave=%d\n", octave);
+ pk->pm->midi_octave = octave;
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(octave, S_IRUGO | S_IWUGO, show_octave,
+ store_octave);
+
+static struct device_attribute *sysfs_device_attr_octave = {
+ &dev_attr_octave,
+ };
+
+
+static void pcmidi_send_note(struct pcmidi_snd *pm,
+ unsigned char status, unsigned char note, unsigned char velocity)
+{
+ unsigned long flags;
+ unsigned char buffer[3];
+
+ buffer[0] = status;
+ buffer[1] = note;
+ buffer[2] = velocity;
+
+ spin_lock_irqsave(&pm->rawmidi_in_lock, flags);
+
+ if (!pm->in_substream)
+ goto drop_note;
+ if (!test_bit(pm->in_substream->number, &pm->in_triggered))
+ goto drop_note;
+
+ snd_rawmidi_receive(pm->in_substream, buffer, 3);
+
+drop_note:
+ spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags);
+
+ return;
+}
+
+void pcmidi_sustained_note_release(unsigned long data)
+{
+ struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+
+ pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
+ pms->in_use = 0;
+}
+
+void init_sustain_timers(struct pcmidi_snd *pm)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i;
+
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ pms->in_use = 0;
+ pms->pm = pm;
+ setup_timer(&pms->timer, pcmidi_sustained_note_release,
+ (unsigned long)pms);
+ }
+}
+
+void stop_sustain_timers(struct pcmidi_snd *pm)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i;
+
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ pms->in_use = 1;
+ del_timer_sync(&pms->timer);
+ }
+}
+
+static int pcmidi_get_output_report(struct pcmidi_snd *pm)
+{
+ struct hid_device *hdev = pm->pk->hdev;
+ struct hid_report *report;
+
+ list_for_each_entry(report,
+ &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) {
+ if (!(6 == report->id))
+ continue;
+
+ if (report->maxfield < 1) {
+ dev_err(&hdev->dev, "output report is empty\n");
+ break;
+ }
+ if (report->field[0]->report_count != 2) {
+ dev_err(&hdev->dev, "field count too low\n");
+ break;
+ }
+ pm->pcmidi_report6 = report;
+ return 0;
+ }
+ /* should never get here */
+ return -ENODEV;
+}
+
+static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state)
+{
+ struct hid_device *hdev = pm->pk->hdev;
+ struct hid_report *report = pm->pcmidi_report6;
+ report->field[0]->value[0] = 0x01;
+ report->field[0]->value[1] = state;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+}
+
+static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data)
+{
+ u32 bit_mask;
+
+ bit_mask = data[1];
+ bit_mask = (bit_mask << 8) | data[2];
+ bit_mask = (bit_mask << 8) | data[3];
+
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+
+ /*KEY_MAIL or octave down*/
+ if (pm->midi_mode && bit_mask == 0x004000) {
+ /* octave down */
+ pm->midi_octave--;
+ if (pm->midi_octave < -2)
+ pm->midi_octave = -2;
+ dbg_hid("pcmidi mode: %d octave: %d\n",
+ pm->midi_mode, pm->midi_octave);
+ return 1;
+ }
+ /*KEY_WWW or sustain*/
+ else if (pm->midi_mode && bit_mask == 0x000004) {
+ /* sustain on/off*/
+ pm->midi_sustain_mode ^= 0x1;
+ return 1;
+ }
+
+ return 0; /* continue key processing */
+}
+
+static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size)
+{
+ struct pcmidi_sustain *pms;
+ unsigned i, j;
+ unsigned char status, note, velocity;
+
+ unsigned num_notes = (size-1)/2;
+ for (j = 0; j < num_notes; j++) {
+ note = data[j*2+1];
+ velocity = data[j*2+2];
+
+ if (note < 0x81) { /* note on */
+ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */
+ note = note - 0x54 + PCMIDI_MIDDLE_C +
+ (pm->midi_octave * 12);
+ if (0 == velocity)
+ velocity = 1; /* force note on */
+ } else { /* note off */
+ status = 128 + pm->midi_channel; /* 1000nnnn */
+ note = note - 0x94 + PCMIDI_MIDDLE_C +
+ (pm->midi_octave*12);
+
+ if (pm->midi_sustain_mode) {
+ for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
+ pms = &pm->sustained_notes[i];
+ if (!pms->in_use) {
+ pms->status = status;
+ pms->note = note;
+ pms->velocity = velocity;
+ pms->in_use = 1;
+
+ mod_timer(&pms->timer,
+ jiffies +
+ msecs_to_jiffies(pm->midi_sustain));
+ return 1;
+ }
+ }
+ }
+ }
+ pcmidi_send_note(pm, status, note, velocity);
+ }
+
+ return 1;
+}
+
+static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
+{
+ unsigned key;
+ u32 bit_mask;
+ u32 bit_index;
+
+ bit_mask = data[1];
+ bit_mask = (bit_mask << 8) | data[2];
+ bit_mask = (bit_mask << 8) | data[3];
+
+ /* break keys */
+ for (bit_index = 0; bit_index < 24; bit_index++) {
+ key = pm->last_key[bit_index];
+ if (!((0x01 << bit_index) & bit_mask)) {
+ input_event(pm->input_ep82, EV_KEY,
+ pm->last_key[bit_index], 0);
+ pm->last_key[bit_index] = 0;
+ }
+ }
+
+ /* make keys */
+ for (bit_index = 0; bit_index < 24; bit_index++) {
+ key = 0;
+ switch ((0x01 << bit_index) & bit_mask) {
+ case 0x000010: /* Fn lock*/
+ pm->fn_state ^= 0x000010;
+ if (pm->fn_state)
+ pcmidi_submit_output_report(pm, 0xc5);
+ else
+ pcmidi_submit_output_report(pm, 0xc6);
+ continue;
+ case 0x020000: /* midi launcher..send a key (qwerty) or not? */
+ pcmidi_submit_output_report(pm, 0xc1);
+ pm->midi_mode ^= 0x01;
+
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+ continue;
+ case 0x100000: /* KEY_MESSENGER or octave up */
+ dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
+ if (pm->midi_mode) {
+ pm->midi_octave++;
+ if (pm->midi_octave > 2)
+ pm->midi_octave = 2;
+ dbg_hid("pcmidi mode: %d octave: %d\n",
+ pm->midi_mode, pm->midi_octave);
+ continue;
+ } else
+ key = KEY_MESSENGER;
+ break;
+ case 0x400000:
+ key = KEY_CALENDAR;
+ break;
+ case 0x080000:
+ key = KEY_ADDRESSBOOK;
+ break;
+ case 0x040000:
+ key = KEY_DOCUMENTS;
+ break;
+ case 0x800000:
+ key = KEY_WORDPROCESSOR;
+ break;
+ case 0x200000:
+ key = KEY_SPREADSHEET;
+ break;
+ case 0x010000:
+ key = KEY_COFFEE;
+ break;
+ case 0x000100:
+ key = KEY_HELP;
+ break;
+ case 0x000200:
+ key = KEY_SEND;
+ break;
+ case 0x000400:
+ key = KEY_REPLY;
+ break;
+ case 0x000800:
+ key = KEY_FORWARDMAIL;
+ break;
+ case 0x001000:
+ key = KEY_NEW;
+ break;
+ case 0x002000:
+ key = KEY_OPEN;
+ break;
+ case 0x004000:
+ key = KEY_CLOSE;
+ break;
+ case 0x008000:
+ key = KEY_SAVE;
+ break;
+ case 0x000001:
+ key = KEY_UNDO;
+ break;
+ case 0x000002:
+ key = KEY_REDO;
+ break;
+ case 0x000004:
+ key = KEY_SPELLCHECK;
+ break;
+ case 0x000008:
+ key = KEY_PRINT;
+ break;
+ }
+ if (key) {
+ input_event(pm->input_ep82, EV_KEY, key, 1);
+ pm->last_key[bit_index] = key;
+ }
+ }
+
+ return 1;
+}
+
+int pcmidi_handle_report(
+ struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size)
+{
+ int ret = 0;
+
+ switch (report_id) {
+ case 0x01: /* midi keys (qwerty)*/
+ ret = pcmidi_handle_report1(pm, data);
+ break;
+ case 0x03: /* midi keyboard (musical)*/
+ ret = pcmidi_handle_report3(pm, data, size);
+ break;
+ case 0x04: /* multimedia/midi keys (qwerty)*/
+ ret = pcmidi_handle_report4(pm, data);
+ break;
+ }
+ return ret;
+}
+
+void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input)
+{
+ /* reassigned functionality for N/A keys
+ MY PICTURES => KEY_WORDPROCESSOR
+ MY MUSIC=> KEY_SPREADSHEET
+ */
+ unsigned int keys[] = {
+ KEY_FN,
+ KEY_MESSENGER, KEY_CALENDAR,
+ KEY_ADDRESSBOOK, KEY_DOCUMENTS,
+ KEY_WORDPROCESSOR,
+ KEY_SPREADSHEET,
+ KEY_COFFEE,
+ KEY_HELP, KEY_SEND,
+ KEY_REPLY, KEY_FORWARDMAIL,
+ KEY_NEW, KEY_OPEN,
+ KEY_CLOSE, KEY_SAVE,
+ KEY_UNDO, KEY_REDO,
+ KEY_SPELLCHECK, KEY_PRINT,
+ 0
+ };
+
+ unsigned int *pkeys = &keys[0];
+ unsigned short i;
+
+ if (pm->ifnum != 1) /* only set up ONCE for interace 1 */
+ return;
+
+ pm->input_ep82 = input;
+
+ for (i = 0; i < 24; i++)
+ pm->last_key[i] = 0;
+
+ while (*pkeys != 0) {
+ set_bit(*pkeys, pm->input_ep82->keybit);
+ ++pkeys;
+ }
+}
+
+static int pcmidi_set_operational(struct pcmidi_snd *pm)
+{
+ if (pm->ifnum != 1)
+ return 0; /* only set up ONCE for interace 1 */
+
+ pcmidi_get_output_report(pm);
+ pcmidi_submit_output_report(pm, 0xc1);
+ return 0;
+}
+
+static int pcmidi_snd_free(struct snd_device *dev)
+{
+ return 0;
+}
+
+static int pcmidi_in_open(struct snd_rawmidi_substream *substream)
+{
+ struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+ dbg_hid("pcmidi in open\n");
+ pm->in_substream = substream;
+ return 0;
+}
+
+static int pcmidi_in_close(struct snd_rawmidi_substream *substream)
+{
+ dbg_hid("pcmidi in close\n");
+ return 0;
+}
+
+static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
+{
+ struct pcmidi_snd *pm = substream->rmidi->private_data;
+
+ dbg_hid("pcmidi in trigger %d\n", up);
+
+ pm->in_triggered = up;
+}
+
+static struct snd_rawmidi_ops pcmidi_in_ops = {
+ .open = pcmidi_in_open,
+ .close = pcmidi_in_close,
+ .trigger = pcmidi_in_trigger
+};
+
+int pcmidi_snd_initialise(struct pcmidi_snd *pm)
+{
+ static int dev;
+ struct snd_card *card;
+ struct snd_rawmidi *rwmidi;
+ int err;
+
+ static struct snd_device_ops ops = {
+ .dev_free = pcmidi_snd_free,
+ };
+
+ if (pm->ifnum != 1)
+ return 0; /* only set up midi device ONCE for interace 1 */
+
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
+
+ if (!enable[dev]) {
+ dev++;
+ return -ENOENT;
+ }
+
+ /* Setup sound card */
+
+ err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
+ if (err < 0) {
+ pk_error("failed to create pc-midi sound card\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+ pm->card = card;
+
+ /* Setup sound device */
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops);
+ if (err < 0) {
+ pk_error("failed to create pc-midi sound device: error %d\n",
+ err);
+ goto fail;
+ }
+
+ strncpy(card->driver, shortname, sizeof(card->driver));
+ strncpy(card->shortname, shortname, sizeof(card->shortname));
+ strncpy(card->longname, longname, sizeof(card->longname));
+
+ /* Set up rawmidi */
+ err = snd_rawmidi_new(card, card->shortname, 0,
+ 0, 1, &rwmidi);
+ if (err < 0) {
+ pk_error("failed to create pc-midi rawmidi device: error %d\n",
+ err);
+ goto fail;
+ }
+ pm->rwmidi = rwmidi;
+ strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name));
+ rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT;
+ rwmidi->private_data = pm;
+
+ snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT,
+ &pcmidi_in_ops);
+
+ snd_card_set_dev(card, &pm->pk->hdev->dev);
+
+ /* create sysfs variables */
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_channel);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute channel: error %d\n",
+ err);
+ goto fail;
+ }
+
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_sustain);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute sustain: error %d\n",
+ err);
+ goto fail_attr_sustain;
+ }
+
+ err = device_create_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_octave);
+ if (err < 0) {
+ pk_error("failed to create sysfs attribute octave: error %d\n",
+ err);
+ goto fail_attr_octave;
+ }
+
+ spin_lock_init(&pm->rawmidi_in_lock);
+
+ init_sustain_timers(pm);
+ pcmidi_set_operational(pm);
+
+ /* register it */
+ err = snd_card_register(card);
+ if (err < 0) {
+ pk_error("failed to register pc-midi sound card: error %d\n",
+ err);
+ goto fail_register;
+ }
+
+ dbg_hid("pcmidi_snd_initialise finished ok\n");
+ return 0;
+
+fail_register:
+ stop_sustain_timers(pm);
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave);
+fail_attr_octave:
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain);
+fail_attr_sustain:
+ device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel);
+fail:
+ if (pm->card) {
+ snd_card_free(pm->card);
+ pm->card = NULL;
+ }
+ return err;
+}
+
+int pcmidi_snd_terminate(struct pcmidi_snd *pm)
+{
+ if (pm->card) {
+ stop_sustain_timers(pm);
+
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_channel);
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_sustain);
+ device_remove_file(&pm->pk->hdev->dev,
+ sysfs_device_attr_octave);
+
+ snd_card_disconnect(pm->card);
+ snd_card_free_when_closed(pm->card);
+ }
+
+ return 0;
+}
+
+/*
+ * PC-MIDI report descriptor for report id is wrong.
+ */
+static void pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (rsize == 178 &&
+ rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
+ rdesc[113] == 0xff) {
+ dev_info(&hdev->dev, "fixing up pc-midi keyboard report "
+ "descriptor\n");
+
+ rdesc[144] = 0x18; /* report 4: was 0x10 report count */
+ }
+}
+
+static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm;
+
+ pm = pk->pm;
+
+ if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) &&
+ 1 == pm->ifnum) {
+ pcmidi_setup_extra_keys(pm, hi->input);
+ return 0;
+ }
+
+ return 0;
+}
+
+
+static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ int ret = 0;
+
+ if (1 == pk->pm->ifnum) {
+ if (report->id == data[0])
+ switch (report->id) {
+ case 0x01: /* midi keys (qwerty)*/
+ case 0x03: /* midi keyboard (musical)*/
+ case 0x04: /* extra/midi keys (qwerty)*/
+ ret = pcmidi_handle_report(pk->pm,
+ report->id, data, size);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+ unsigned long quirks = id->driver_data;
+ struct pk_device *pk;
+ struct pcmidi_snd *pm = NULL;
+
+ pk = kzalloc(sizeof(*pk), GFP_KERNEL);
+ if (pk == NULL) {
+ dev_err(&hdev->dev, "prodikeys: can't alloc descriptor\n");
+ return -ENOMEM;
+ }
+
+ pk->hdev = hdev;
+
+ pm = kzalloc(sizeof(*pm), GFP_KERNEL);
+ if (pm == NULL) {
+ dev_err(&hdev->dev,
+ "prodikeys: can't alloc descriptor\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ pm->pk = pk;
+ pk->pm = pm;
+ pm->ifnum = ifnum;
+
+ hid_set_drvdata(hdev, pk);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "prodikeys: hid parse failed\n");
+ goto err_free;
+ }
+
+ if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */
+ hdev->quirks |= HID_QUIRK_NOGET;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ dev_err(&hdev->dev, "prodikeys: hw start failed\n");
+ goto err_free;
+ }
+
+ ret = pcmidi_snd_initialise(pm);
+ if (ret < 0)
+ goto err_stop;
+
+ return 0;
+err_stop:
+ hid_hw_stop(hdev);
+err_free:
+ if (pm != NULL)
+ kfree(pm);
+
+ kfree(pk);
+ return ret;
+}
+
+static void pk_remove(struct hid_device *hdev)
+{
+ struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev);
+ struct pcmidi_snd *pm;
+
+ pm = pk->pm;
+ if (pm) {
+ pcmidi_snd_terminate(pm);
+ kfree(pm);
+ }
+
+ hid_hw_stop(hdev);
+
+ kfree(pk);
+}
+
+static const struct hid_device_id pk_devices[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS,
+ USB_DEVICE_ID_PRODIKEYS_PCMIDI),
+ .driver_data = PK_QUIRK_NOGET},
+ { }
+};
+MODULE_DEVICE_TABLE(hid, pk_devices);
+
+static struct hid_driver pk_driver = {
+ .name = "prodikeys",
+ .id_table = pk_devices,
+ .report_fixup = pk_report_fixup,
+ .input_mapping = pk_input_mapping,
+ .raw_event = pk_raw_event,
+ .probe = pk_probe,
+ .remove = pk_remove,
+};
+
+static int pk_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&pk_driver);
+ if (ret)
+ printk(KERN_ERR "can't register prodikeys driver\n");
+
+ return ret;
+}
+
+static void pk_exit(void)
+{
+ hid_unregister_driver(&pk_driver);
+}
+
+module_init(pk_init);
+module_exit(pk_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
new file mode 100644
index 000000000000..17f2dc04f883
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.c
@@ -0,0 +1,1043 @@
+/*
+ * Roccat Kone driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kone is a gamer mouse which consists of a mouse part and a keyboard
+ * part. The keyboard part enables the mouse to execute stored macros with mixed
+ * key- and button-events.
+ *
+ * TODO implement on-the-fly polling-rate change
+ * The windows driver has the ability to change the polling rate of the
+ * device on the press of a mousebutton.
+ * Is it possible to remove and reinstall the urb in raw-event- or any
+ * other handler, or to defer this action to be executed somewhere else?
+ *
+ * TODO implement notification mechanism for overlong macro execution
+ * If user wants to execute an overlong macro only the names of macroset
+ * and macro are given. Should userland tap hidraw or is there an
+ * additional streaming mechanism?
+ *
+ * TODO is it possible to overwrite group for sysfs attributes via udev?
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "hid-ids.h"
+#include "hid-roccat.h"
+#include "hid-roccat-kone.h"
+
+static void kone_set_settings_checksum(struct kone_settings *settings)
+{
+ uint16_t checksum = 0;
+ unsigned char *address = (unsigned char *)settings;
+ int i;
+
+ for (i = 0; i < sizeof(struct kone_settings) - 2; ++i, ++address)
+ checksum += *address;
+ settings->checksum = cpu_to_le16(checksum);
+}
+
+/*
+ * Checks success after writing data to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_check_write(struct usb_device *usb_dev)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ do {
+ /*
+ * Mouse needs 50 msecs until it says ok, but there are
+ * 30 more msecs needed for next write to work.
+ */
+ msleep(80);
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE |
+ USB_DIR_IN,
+ kone_command_confirm_write, 0, data, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+
+ /*
+ * value of 3 seems to mean something like
+ * "not finished yet, but it looks good"
+ * So check again after a moment.
+ */
+ } while (*data == 3);
+
+ if (*data == 1) { /* everything alright */
+ kfree(data);
+ return 0;
+ } else { /* unknown answer */
+ dev_err(&usb_dev->dev, "got retval %d when checking write\n",
+ *data);
+ kfree(data);
+ return -EIO;
+ }
+}
+
+/*
+ * Reads settings from mouse and stores it in @buf
+ * @buf has to be alloced with GFP_KERNEL
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_settings(struct usb_device *usb_dev,
+ struct kone_settings *buf)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_settings, 0, buf,
+ sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes settings from @buf to mouse
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_settings(struct usb_device *usb_dev,
+ struct kone_settings const *settings)
+{
+ int len;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_settings, 0, (char *)settings,
+ sizeof(struct kone_settings),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_settings))
+ return -EIO;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads profile data from mouse and stores it in @buf
+ * @number: profile number to read
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_profile(struct usb_device *usb_dev,
+ struct kone_profile *buf, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_profile, number, buf,
+ sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Writes profile data to mouse.
+ * @number: profile number to write
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_set_profile(struct usb_device *usb_dev,
+ struct kone_profile const *profile, int number)
+{
+ int len;
+
+ if (number < 1 || number > 5)
+ return -EINVAL;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_profile, number, (char *)profile,
+ sizeof(struct kone_profile),
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != sizeof(struct kone_profile))
+ return len;
+
+ if (kone_check_write(usb_dev))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Reads value of "fast-clip-weight" and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_weight(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ uint8_t *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT);
+
+ if (len != 1) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = (int)*data;
+ kfree(data);
+ return 0;
+}
+
+/*
+ * Reads firmware_version of mouse and stores it in @result
+ * On success returns 0
+ * On failure returns errno
+ */
+static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
+{
+ int len;
+ unsigned char *data;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ kone_command_firmware_version, 0, data, 2,
+ USB_CTRL_SET_TIMEOUT);
+
+ if (len != 2) {
+ kfree(data);
+ return -EIO;
+ }
+ *result = le16_to_cpu(*data);
+ kfree(data);
+ return 0;
+}
+
+static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_settings))
+ return 0;
+
+ if (off + count > sizeof(struct kone_settings))
+ count = sizeof(struct kone_settings) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->settings + off, count);
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+/*
+ * Writing settings automatically activates startup_profile.
+ * This function keeps values in kone_device up to date and assumes that in
+ * case of error the old data is still valid
+ */
+static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_settings))
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
+ if (difference) {
+ retval = kone_set_settings(usb_dev,
+ (struct kone_settings const *)buf);
+ if (!retval)
+ memcpy(&kone->settings, buf,
+ sizeof(struct kone_settings));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /*
+ * If we get here, treat settings as okay and update actual values
+ * according to startup_profile
+ */
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return sizeof(struct kone_settings);
+}
+
+static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+
+ if (off >= sizeof(struct kone_profile))
+ return 0;
+
+ if (off + count > sizeof(struct kone_profile))
+ count = sizeof(struct kone_profile) - off;
+
+ mutex_lock(&kone->kone_lock);
+ memcpy(buf, &kone->profiles[number - 1], sizeof(struct kone_profile));
+ mutex_unlock(&kone->kone_lock);
+
+ return count;
+}
+
+static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
+}
+
+/* Writes data only if different to stored data */
+static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, int number) {
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ struct kone_profile *profile;
+ int retval = 0, difference;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_profile))
+ return -EINVAL;
+
+ profile = &kone->profiles[number - 1];
+
+ mutex_lock(&kone->kone_lock);
+ difference = memcmp(buf, profile, sizeof(struct kone_profile));
+ if (difference) {
+ retval = kone_set_profile(usb_dev,
+ (struct kone_profile const *)buf, number);
+ if (!retval)
+ memcpy(profile, buf, sizeof(struct kone_profile));
+ }
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ return sizeof(struct kone_profile);
+}
+
+static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
+}
+
+static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
+}
+
+static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
+}
+
+static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
+}
+
+static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count) {
+ return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
+}
+
+static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
+}
+
+static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
+}
+
+/* weight is read each time, since we don't get informed when it's changed */
+static ssize_t kone_sysfs_show_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int weight = 0;
+ int retval;
+
+ mutex_lock(&kone->kone_lock);
+ retval = kone_get_weight(usb_dev, &weight);
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+ return snprintf(buf, PAGE_SIZE, "%d\n", weight);
+}
+
+static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
+}
+
+static ssize_t kone_sysfs_show_tcu(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
+}
+
+static int kone_tcu_command(struct usb_device *usb_dev, int number)
+{
+ int len;
+ char *value;
+
+ value = kmalloc(1, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ *value = number;
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_CONFIGURATION,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ kone_command_calibrate, 0, value, 1,
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(value);
+ return ((len != 1) ? -EIO : 0);
+}
+
+/*
+ * Calibrating the tcu is the only action that changes settings data inside the
+ * mouse, so this data needs to be reread
+ */
+static ssize_t kone_sysfs_set_tcu(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long state;
+
+ retval = strict_strtoul(buf, 10, &state);
+ if (retval)
+ return retval;
+
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ if (state == 1) { /* state activate */
+ retval = kone_tcu_command(usb_dev, 1);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 2);
+ if (retval)
+ goto exit_unlock;
+ ssleep(5); /* tcu needs this time for calibration */
+ retval = kone_tcu_command(usb_dev, 3);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 0);
+ if (retval)
+ goto exit_unlock;
+ retval = kone_tcu_command(usb_dev, 4);
+ if (retval)
+ goto exit_unlock;
+ /*
+ * Kone needs this time to settle things.
+ * Reading settings too early will result in invalid data.
+ * Roccat's driver waits 1 sec, maybe this time could be
+ * shortened.
+ */
+ ssleep(1);
+ }
+
+ /* calibration changes values in settings, so reread */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+
+ /* only write settings back if activation state is different */
+ if (kone->settings.tcu != state) {
+ kone->settings.tcu = state;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+ if (retval) {
+ dev_err(&usb_dev->dev, "couldn't set tcu state\n");
+ /*
+ * try to reread valid settings into buffer overwriting
+ * first error code
+ */
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ goto exit_no_settings;
+ goto exit_unlock;
+ }
+ }
+
+ retval = size;
+exit_no_settings:
+ dev_err(&usb_dev->dev, "couldn't read settings\n");
+exit_unlock:
+ mutex_unlock(&kone->kone_lock);
+ return retval;
+}
+
+static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
+}
+
+static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+ unsigned long new_startup_profile;
+
+ retval = strict_strtoul(buf, 10, &new_startup_profile);
+ if (retval)
+ return retval;
+
+ if (new_startup_profile < 1 || new_startup_profile > 5)
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+
+ kone->settings.startup_profile = new_startup_profile;
+ kone_set_settings_checksum(&kone->settings);
+
+ retval = kone_set_settings(usb_dev, &kone->settings);
+
+ mutex_unlock(&kone->kone_lock);
+
+ if (retval)
+ return retval;
+
+ /* changing the startup profile immediately activates this profile */
+ kone->actual_profile = new_startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].startup_dpi;
+
+ return size;
+}
+
+/*
+ * This file is used by userland software to find devices that are handled by
+ * this driver. This provides a consistent way for actual and older kernels
+ * where this driver replaced usbhid instead of generic-usb.
+ * Driver capabilities are determined by version number.
+ */
+static ssize_t kone_sysfs_show_driver_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, ROCCAT_KONE_DRIVER_VERSION "\n");
+}
+
+/*
+ * Read actual dpi settings.
+ * Returns raw value for further processing. Refer to enum kone_polling_rates to
+ * get real value.
+ */
+static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
+
+static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
+
+/*
+ * The mouse can be equipped with one of four supplied weights from 5 to 20
+ * grams which are recognized and its value can be read out.
+ * This returns the raw value reported by the mouse for easy evaluation by
+ * software. Refer to enum kone_weights to get corresponding real weight.
+ */
+static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
+
+/*
+ * Prints firmware version stored in mouse as integer.
+ * The raw value reported by the mouse is returned for easy evaluation, to get
+ * the real version number the decimal point has to be shifted 2 positions to
+ * the left. E.g. a value of 138 means 1.38.
+ */
+static DEVICE_ATTR(firmware_version, 0440,
+ kone_sysfs_show_firmware_version, NULL);
+
+/*
+ * Prints state of Tracking Control Unit as number where 0 = off and 1 = on
+ * Writing 0 deactivates tcu and writing 1 calibrates and activates the tcu
+ */
+static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
+
+/* Prints and takes the number of the profile the mouse starts with */
+static DEVICE_ATTR(startup_profile, 0660,
+ kone_sysfs_show_startup_profile,
+ kone_sysfs_set_startup_profile);
+
+static DEVICE_ATTR(kone_driver_version, 0440,
+ kone_sysfs_show_driver_version, NULL);
+
+static struct attribute *kone_attributes[] = {
+ &dev_attr_actual_dpi.attr,
+ &dev_attr_actual_profile.attr,
+ &dev_attr_weight.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_tcu.attr,
+ &dev_attr_startup_profile.attr,
+ &dev_attr_kone_driver_version.attr,
+ NULL
+};
+
+static struct attribute_group kone_attribute_group = {
+ .attrs = kone_attributes
+};
+
+static struct bin_attribute kone_settings_attr = {
+ .attr = { .name = "settings", .mode = 0660 },
+ .size = sizeof(struct kone_settings),
+ .read = kone_sysfs_read_settings,
+ .write = kone_sysfs_write_settings
+};
+
+static struct bin_attribute kone_profile1_attr = {
+ .attr = { .name = "profile1", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile1,
+ .write = kone_sysfs_write_profile1
+};
+
+static struct bin_attribute kone_profile2_attr = {
+ .attr = { .name = "profile2", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile2,
+ .write = kone_sysfs_write_profile2
+};
+
+static struct bin_attribute kone_profile3_attr = {
+ .attr = { .name = "profile3", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile3,
+ .write = kone_sysfs_write_profile3
+};
+
+static struct bin_attribute kone_profile4_attr = {
+ .attr = { .name = "profile4", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile4,
+ .write = kone_sysfs_write_profile4
+};
+
+static struct bin_attribute kone_profile5_attr = {
+ .attr = { .name = "profile5", .mode = 0660 },
+ .size = sizeof(struct kone_profile),
+ .read = kone_sysfs_read_profile5,
+ .write = kone_sysfs_write_profile5
+};
+
+static int kone_create_sysfs_attributes(struct usb_interface *intf)
+{
+ int retval;
+
+ retval = sysfs_create_group(&intf->dev.kobj, &kone_attribute_group);
+ if (retval)
+ goto exit_1;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ if (retval)
+ goto exit_2;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ if (retval)
+ goto exit_3;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ if (retval)
+ goto exit_4;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ if (retval)
+ goto exit_5;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ if (retval)
+ goto exit_6;
+
+ retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ if (retval)
+ goto exit_7;
+
+ return 0;
+
+exit_7:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+exit_6:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+exit_5:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+exit_4:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+exit_3:
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+exit_2:
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+exit_1:
+ return retval;
+}
+
+static void kone_remove_sysfs_attributes(struct usb_interface *intf)
+{
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile5_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
+ sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
+ sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
+}
+
+static int kone_init_kone_device_struct(struct usb_device *usb_dev,
+ struct kone_device *kone)
+{
+ uint i;
+ int retval;
+
+ mutex_init(&kone->kone_lock);
+
+ for (i = 0; i < 5; ++i) {
+ retval = kone_get_profile(usb_dev, &kone->profiles[i], i + 1);
+ if (retval)
+ return retval;
+ }
+
+ retval = kone_get_settings(usb_dev, &kone->settings);
+ if (retval)
+ return retval;
+
+ retval = kone_get_firmware_version(usb_dev, &kone->firmware_version);
+ if (retval)
+ return retval;
+
+ kone->actual_profile = kone->settings.startup_profile;
+ kone->actual_dpi = kone->profiles[kone->actual_profile].startup_dpi;
+
+ return 0;
+}
+
+/*
+ * Since IGNORE_MOUSE quirk moved to hid-apple, there is no way to bind only to
+ * mousepart if usb_hid is compiled into the kernel and kone is compiled as
+ * module.
+ * Secial behaviour is bound only to mousepart since only mouseevents contain
+ * additional notifications.
+ */
+static int kone_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct kone_device *kone;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+
+ kone = kzalloc(sizeof(*kone), GFP_KERNEL);
+ if (!kone) {
+ dev_err(&hdev->dev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, kone);
+
+ retval = kone_init_kone_device_struct(usb_dev, kone);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "couldn't init struct kone_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(hdev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "couldn't init char dev\n");
+ /* be tolerant about not getting chrdev */
+ } else {
+ kone->roccat_claimed = 1;
+ kone->chrdev_minor = retval;
+ }
+
+ retval = kone_create_sysfs_attributes(intf);
+ if (retval) {
+ dev_err(&hdev->dev, "cannot create sysfs files\n");
+ goto exit_free;
+ }
+ } else {
+ hid_set_drvdata(hdev, NULL);
+ }
+
+ return 0;
+exit_free:
+ kfree(kone);
+ return retval;
+}
+
+
+static void kone_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct kone_device *kone;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ == USB_INTERFACE_PROTOCOL_MOUSE) {
+ kone_remove_sysfs_attributes(intf);
+ kone = hid_get_drvdata(hdev);
+ if (kone->roccat_claimed)
+ roccat_disconnect(kone->chrdev_minor);
+ kfree(hid_get_drvdata(hdev));
+ }
+}
+
+static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = kone_init_specials(hdev);
+ if (retval) {
+ dev_err(&hdev->dev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void kone_remove(struct hid_device *hdev)
+{
+ kone_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+/* handle special events and keep actual profile and dpi values up to date */
+static void kone_keep_values_up_to_date(struct kone_device *kone,
+ struct kone_mouse_event const *event)
+{
+ switch (event->event) {
+ case kone_mouse_event_switch_profile:
+ case kone_mouse_event_osd_profile:
+ kone->actual_profile = event->value;
+ kone->actual_dpi = kone->profiles[kone->actual_profile - 1].
+ startup_dpi;
+ break;
+ case kone_mouse_event_switch_dpi:
+ case kone_mouse_event_osd_dpi:
+ kone->actual_dpi = event->value;
+ break;
+ }
+}
+
+static void kone_report_to_chrdev(struct kone_device const *kone,
+ struct kone_mouse_event const *event)
+{
+ struct kone_roccat_report roccat_report;
+
+ switch (event->event) {
+ case kone_mouse_event_switch_profile:
+ case kone_mouse_event_switch_dpi:
+ case kone_mouse_event_osd_profile:
+ case kone_mouse_event_osd_dpi:
+ roccat_report.event = event->event;
+ roccat_report.value = event->value;
+ roccat_report.key = 0;
+ roccat_report_event(kone->chrdev_minor,
+ (uint8_t *)&roccat_report,
+ sizeof(struct kone_roccat_report));
+ break;
+ case kone_mouse_event_call_overlong_macro:
+ if (event->value == kone_keystroke_action_press) {
+ roccat_report.event = kone_mouse_event_call_overlong_macro;
+ roccat_report.value = kone->actual_profile;
+ roccat_report.key = event->macro_key;
+ roccat_report_event(kone->chrdev_minor,
+ (uint8_t *)&roccat_report,
+ sizeof(struct kone_roccat_report));
+ }
+ break;
+ }
+
+}
+
+/*
+ * Is called for keyboard- and mousepart.
+ * Only mousepart gets informations about special events in its extended event
+ * structure.
+ */
+static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct kone_device *kone = hid_get_drvdata(hdev);
+ struct kone_mouse_event *event = (struct kone_mouse_event *)data;
+
+ /* keyboard events are always processed by default handler */
+ if (size != sizeof(struct kone_mouse_event))
+ return 0;
+
+ /*
+ * Firmware 1.38 introduced new behaviour for tilt and special buttons.
+ * Pressed button is reported in each movement event.
+ * Workaround sends only one event per press.
+ */
+ if (memcmp(&kone->last_mouse_event.tilt, &event->tilt, 5))
+ memcpy(&kone->last_mouse_event, event,
+ sizeof(struct kone_mouse_event));
+ else
+ memset(&event->tilt, 0, 5);
+
+ kone_keep_values_up_to_date(kone, event);
+
+ if (kone->roccat_claimed)
+ kone_report_to_chrdev(kone, event);
+
+ return 0; /* always do further processing */
+}
+
+static const struct hid_device_id kone_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, kone_devices);
+
+static struct hid_driver kone_driver = {
+ .name = "kone",
+ .id_table = kone_devices,
+ .probe = kone_probe,
+ .remove = kone_remove,
+ .raw_event = kone_raw_event
+};
+
+static int __init kone_init(void)
+{
+ return hid_register_driver(&kone_driver);
+}
+
+static void __exit kone_exit(void)
+{
+ hid_unregister_driver(&kone_driver);
+}
+
+module_init(kone_init);
+module_exit(kone_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kone driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
new file mode 100644
index 000000000000..003e6f81c195
--- /dev/null
+++ b/drivers/hid/hid-roccat-kone.h
@@ -0,0 +1,233 @@
+#ifndef __HID_ROCCAT_KONE_H
+#define __HID_ROCCAT_KONE_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+#define ROCCAT_KONE_DRIVER_VERSION "v0.3.1"
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct kone_keystroke {
+ uint8_t key;
+ uint8_t action;
+ uint16_t period; /* in milliseconds */
+};
+
+enum kone_keystroke_buttons {
+ kone_keystroke_button_1 = 0xf0, /* left mouse button */
+ kone_keystroke_button_2 = 0xf1, /* right mouse button */
+ kone_keystroke_button_3 = 0xf2, /* wheel */
+ kone_keystroke_button_9 = 0xf3, /* side button up */
+ kone_keystroke_button_8 = 0xf4 /* side button down */
+};
+
+enum kone_keystroke_actions {
+ kone_keystroke_action_press = 0,
+ kone_keystroke_action_release = 1
+};
+
+struct kone_button_info {
+ uint8_t number; /* range 1-8 */
+ uint8_t type;
+ uint8_t macro_type; /* 0 = short, 1 = overlong */
+ uint8_t macro_set_name[16]; /* can be max 15 chars long */
+ uint8_t macro_name[16]; /* can be max 15 chars long */
+ uint8_t count;
+ struct kone_keystroke keystrokes[20];
+};
+
+enum kone_button_info_types {
+ /* valid button types until firmware 1.32 */
+ kone_button_info_type_button_1 = 0x1, /* click (left mouse button) */
+ kone_button_info_type_button_2 = 0x2, /* menu (right mouse button)*/
+ kone_button_info_type_button_3 = 0x3, /* scroll (wheel) */
+ kone_button_info_type_double_click = 0x4,
+ kone_button_info_type_key = 0x5,
+ kone_button_info_type_macro = 0x6,
+ kone_button_info_type_off = 0x7,
+ /* TODO clarify function and rename */
+ kone_button_info_type_osd_xy_prescaling = 0x8,
+ kone_button_info_type_osd_dpi = 0x9,
+ kone_button_info_type_osd_profile = 0xa,
+ kone_button_info_type_button_9 = 0xb, /* ie forward */
+ kone_button_info_type_button_8 = 0xc, /* ie backward */
+ kone_button_info_type_dpi_up = 0xd, /* internal */
+ kone_button_info_type_dpi_down = 0xe, /* internal */
+ kone_button_info_type_button_7 = 0xf, /* tilt left */
+ kone_button_info_type_button_6 = 0x10, /* tilt right */
+ kone_button_info_type_profile_up = 0x11, /* internal */
+ kone_button_info_type_profile_down = 0x12, /* internal */
+ /* additional valid button types since firmware 1.38 */
+ kone_button_info_type_multimedia_open_player = 0x20,
+ kone_button_info_type_multimedia_next_track = 0x21,
+ kone_button_info_type_multimedia_prev_track = 0x22,
+ kone_button_info_type_multimedia_play_pause = 0x23,
+ kone_button_info_type_multimedia_stop = 0x24,
+ kone_button_info_type_multimedia_mute = 0x25,
+ kone_button_info_type_multimedia_volume_up = 0x26,
+ kone_button_info_type_multimedia_volume_down = 0x27
+};
+
+enum kone_button_info_numbers {
+ kone_button_top = 1,
+ kone_button_wheel_tilt_left = 2,
+ kone_button_wheel_tilt_right = 3,
+ kone_button_forward = 4,
+ kone_button_backward = 5,
+ kone_button_middle = 6,
+ kone_button_plus = 7,
+ kone_button_minus = 8,
+};
+
+struct kone_light_info {
+ uint8_t number; /* number of light 1-5 */
+ uint8_t mod; /* 1 = on, 2 = off */
+ uint8_t red; /* range 0x00-0xff */
+ uint8_t green; /* range 0x00-0xff */
+ uint8_t blue; /* range 0x00-0xff */
+};
+
+struct kone_profile {
+ uint16_t size; /* always 975 */
+ uint16_t unused; /* always 0 */
+
+ /*
+ * range 1-5
+ * This number does not need to correspond with location where profile
+ * saved
+ */
+ uint8_t profile; /* range 1-5 */
+
+ uint16_t main_sensitivity; /* range 100-1000 */
+ uint8_t xy_sensitivity_enabled; /* 1 = on, 2 = off */
+ uint16_t x_sensitivity; /* range 100-1000 */
+ uint16_t y_sensitivity; /* range 100-1000 */
+ uint8_t dpi_rate; /* bit 1 = 800, ... */
+ uint8_t startup_dpi; /* range 1-6 */
+ uint8_t polling_rate; /* 1 = 125Hz, 2 = 500Hz, 3 = 1000Hz */
+ /* kone has no dcu
+ * value is always 2 in firmwares <= 1.32 and
+ * 1 in firmwares > 1.32
+ */
+ uint8_t dcu_flag;
+ uint8_t light_effect_1; /* range 1-3 */
+ uint8_t light_effect_2; /* range 1-5 */
+ uint8_t light_effect_3; /* range 1-4 */
+ uint8_t light_effect_speed; /* range 0-255 */
+
+ struct kone_light_info light_infos[5];
+ /* offset is kone_button_info_numbers - 1 */
+ struct kone_button_info button_infos[8];
+
+ uint16_t checksum; /* \brief holds checksum of struct */
+};
+
+enum kone_polling_rates {
+ kone_polling_rate_125 = 1,
+ kone_polling_rate_500 = 2,
+ kone_polling_rate_1000 = 3
+};
+
+struct kone_settings {
+ uint16_t size; /* always 36 */
+ uint8_t startup_profile; /* 1-5 */
+ uint8_t unknown1;
+ uint8_t tcu; /* 0 = off, 1 = on */
+ uint8_t unknown2[23];
+ uint8_t calibration_data[4];
+ uint8_t unknown3[2];
+ uint16_t checksum;
+};
+
+/*
+ * 12 byte mouse event read by interrupt_read
+ */
+struct kone_mouse_event {
+ uint8_t report_number; /* always 1 */
+ uint8_t button;
+ uint16_t x;
+ uint16_t y;
+ uint8_t wheel; /* up = 1, down = -1 */
+ uint8_t tilt; /* right = 1, left = -1 */
+ uint8_t unknown;
+ uint8_t event;
+ uint8_t value; /* press = 0, release = 1 */
+ uint8_t macro_key; /* 0 to 8 */
+};
+
+enum kone_mouse_events {
+ /* osd events are thought to be display on screen */
+ kone_mouse_event_osd_dpi = 0xa0,
+ kone_mouse_event_osd_profile = 0xb0,
+ /* TODO clarify meaning and occurence of kone_mouse_event_calibration */
+ kone_mouse_event_calibration = 0xc0,
+ kone_mouse_event_call_overlong_macro = 0xe0,
+ /* switch events notify if user changed values with mousebutton click */
+ kone_mouse_event_switch_dpi = 0xf0,
+ kone_mouse_event_switch_profile = 0xf1
+};
+
+enum kone_commands {
+ kone_command_profile = 0x5a,
+ kone_command_settings = 0x15a,
+ kone_command_firmware_version = 0x25a,
+ kone_command_weight = 0x45a,
+ kone_command_calibrate = 0x55a,
+ kone_command_confirm_write = 0x65a,
+ kone_command_firmware = 0xe5a
+};
+
+struct kone_roccat_report {
+ uint8_t event;
+ uint8_t value; /* holds dpi or profile value */
+ uint8_t key; /* macro key on overlong macro execution */
+};
+
+#pragma pack(pop)
+
+struct kone_device {
+ /*
+ * Storing actual values when we get informed about changes since there
+ * is no way of getting this information from the device on demand
+ */
+ int actual_profile, actual_dpi;
+ /* Used for neutralizing abnormal button behaviour */
+ struct kone_mouse_event last_mouse_event;
+
+ /*
+ * It's unlikely that multiple sysfs attributes are accessed at a time,
+ * so only one mutex is used to secure hardware access and profiles and
+ * settings of this struct.
+ */
+ struct mutex kone_lock;
+
+ /*
+ * Storing the data here reduces IO and ensures that data is available
+ * when its needed (E.g. interrupt handler).
+ */
+ struct kone_profile profiles[5];
+ struct kone_settings settings;
+
+ /*
+ * firmware doesn't change unless firmware update is implemented,
+ * so it's read only once
+ */
+ int firmware_version;
+
+ int roccat_claimed;
+ int chrdev_minor;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
new file mode 100644
index 000000000000..e05d48edb66f
--- /dev/null
+++ b/drivers/hid/hid-roccat.c
@@ -0,0 +1,428 @@
+/*
+ * Roccat driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Module roccat is a char device used to report special events of roccat
+ * hardware to userland. These events include requests for on-screen-display of
+ * profile or dpi settings or requests for execution of macro sequences that are
+ * not stored in device. The information in these events depends on hid device
+ * implementation and contains data that is not available in a single hid event
+ * or else hidraw could have been used.
+ * It is inspired by hidraw, but uses only one circular buffer for all readers.
+ */
+
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+
+#include "hid-roccat.h"
+
+#define ROCCAT_FIRST_MINOR 0
+#define ROCCAT_MAX_DEVICES 8
+
+/* should be a power of 2 for performance reason */
+#define ROCCAT_CBUF_SIZE 16
+
+struct roccat_report {
+ uint8_t *value;
+ int len;
+};
+
+struct roccat_device {
+ unsigned int minor;
+ int open;
+ int exist;
+ wait_queue_head_t wait;
+ struct device *dev;
+ struct hid_device *hid;
+ struct list_head readers;
+ /* protects modifications of readers list */
+ struct mutex readers_lock;
+
+ /*
+ * circular_buffer has one writer and multiple readers with their own
+ * read pointers
+ */
+ struct roccat_report cbuf[ROCCAT_CBUF_SIZE];
+ int cbuf_end;
+ struct mutex cbuf_lock;
+};
+
+struct roccat_reader {
+ struct list_head node;
+ struct roccat_device *device;
+ int cbuf_start;
+};
+
+static int roccat_major;
+static struct class *roccat_class;
+static struct cdev roccat_cdev;
+
+static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
+/* protects modifications of devices array */
+static DEFINE_MUTEX(devices_lock);
+
+static ssize_t roccat_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct roccat_reader *reader = file->private_data;
+ struct roccat_device *device = reader->device;
+ struct roccat_report *report;
+ ssize_t retval = 0, len;
+ DECLARE_WAITQUEUE(wait, current);
+
+ mutex_lock(&device->cbuf_lock);
+
+ /* no data? */
+ if (reader->cbuf_start == device->cbuf_end) {
+ add_wait_queue(&device->wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* wait for data */
+ while (reader->cbuf_start == device->cbuf_end) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ if (!device->exist) {
+ retval = -EIO;
+ break;
+ }
+
+ mutex_unlock(&device->cbuf_lock);
+ schedule();
+ mutex_lock(&device->cbuf_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&device->wait, &wait);
+ }
+
+ /* here we either have data or a reason to return if retval is set */
+ if (retval)
+ goto exit_unlock;
+
+ report = &device->cbuf[reader->cbuf_start];
+ /*
+ * If report is larger than requested amount of data, rest of report
+ * is lost!
+ */
+ len = report->len > count ? count : report->len;
+
+ if (copy_to_user(buffer, report->value, len)) {
+ retval = -EFAULT;
+ goto exit_unlock;
+ }
+ retval += len;
+ reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
+
+exit_unlock:
+ mutex_unlock(&device->cbuf_lock);
+ return retval;
+}
+
+static unsigned int roccat_poll(struct file *file, poll_table *wait)
+{
+ struct roccat_reader *reader = file->private_data;
+ poll_wait(file, &reader->device->wait, wait);
+ if (reader->cbuf_start != reader->device->cbuf_end)
+ return POLLIN | POLLRDNORM;
+ if (!reader->device->exist)
+ return POLLERR | POLLHUP;
+ return 0;
+}
+
+static int roccat_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct roccat_reader *reader;
+ struct roccat_device *device;
+ int error = 0;
+
+ reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+
+ mutex_lock(&device->readers_lock);
+
+ if (!device) {
+ printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
+ minor);
+ error = -ENODEV;
+ goto exit_unlock;
+ }
+
+ if (!device->open++) {
+ /* power on device on adding first reader */
+ if (device->hid->ll_driver->power) {
+ error = device->hid->ll_driver->power(device->hid,
+ PM_HINT_FULLON);
+ if (error < 0) {
+ --device->open;
+ goto exit_unlock;
+ }
+ }
+ error = device->hid->ll_driver->open(device->hid);
+ if (error < 0) {
+ if (device->hid->ll_driver->power)
+ device->hid->ll_driver->power(device->hid,
+ PM_HINT_NORMAL);
+ --device->open;
+ goto exit_unlock;
+ }
+ }
+
+ reader->device = device;
+ /* new reader doesn't get old events */
+ reader->cbuf_start = device->cbuf_end;
+
+ list_add_tail(&reader->node, &device->readers);
+ file->private_data = reader;
+
+exit_unlock:
+ mutex_unlock(&device->readers_lock);
+ mutex_unlock(&devices_lock);
+ return error;
+}
+
+static int roccat_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ struct roccat_reader *reader = file->private_data;
+ struct roccat_device *device;
+
+ mutex_lock(&devices_lock);
+
+ device = devices[minor];
+ if (!device) {
+ mutex_unlock(&devices_lock);
+ printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
+ minor);
+ return -ENODEV;
+ }
+
+ mutex_lock(&device->readers_lock);
+ list_del(&reader->node);
+ mutex_unlock(&device->readers_lock);
+ kfree(reader);
+
+ if (!--device->open) {
+ /* removing last reader */
+ if (device->exist) {
+ if (device->hid->ll_driver->power)
+ device->hid->ll_driver->power(device->hid,
+ PM_HINT_NORMAL);
+ device->hid->ll_driver->close(device->hid);
+ } else {
+ kfree(device);
+ }
+ }
+
+ mutex_unlock(&devices_lock);
+
+ return 0;
+}
+
+/*
+ * roccat_report_event() - output data to readers
+ * @minor: minor device number returned by roccat_connect()
+ * @data: pointer to data
+ * @len: size of data
+ *
+ * Return value is zero on success, a negative error code on failure.
+ *
+ * This is called from interrupt handler.
+ */
+int roccat_report_event(int minor, u8 const *data, int len)
+{
+ struct roccat_device *device;
+ struct roccat_reader *reader;
+ struct roccat_report *report;
+ uint8_t *new_value;
+
+ new_value = kmemdup(data, len, GFP_ATOMIC);
+ if (!new_value)
+ return -ENOMEM;
+
+ device = devices[minor];
+
+ report = &device->cbuf[device->cbuf_end];
+
+ /* passing NULL is safe */
+ kfree(report->value);
+
+ report->value = new_value;
+ report->len = len;
+ device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
+
+ list_for_each_entry(reader, &device->readers, node) {
+ /*
+ * As we already inserted one element, the buffer can't be
+ * empty. If start and end are equal, buffer is full and we
+ * increase start, so that slow reader misses one event, but
+ * gets the newer ones in the right order.
+ */
+ if (reader->cbuf_start == device->cbuf_end)
+ reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
+ }
+
+ wake_up_interruptible(&device->wait);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(roccat_report_event);
+
+/*
+ * roccat_connect() - create a char device for special event output
+ * @hid: the hid device the char device should be connected to.
+ *
+ * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
+ * success, a negative error code on failure.
+ */
+int roccat_connect(struct hid_device *hid)
+{
+ unsigned int minor;
+ struct roccat_device *device;
+ int temp;
+
+ device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ mutex_lock(&devices_lock);
+
+ for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) {
+ if (devices[minor])
+ continue;
+ break;
+ }
+
+ if (minor < ROCCAT_MAX_DEVICES) {
+ devices[minor] = device;
+ } else {
+ mutex_unlock(&devices_lock);
+ kfree(device);
+ return -EINVAL;
+ }
+
+ device->dev = device_create(roccat_class, &hid->dev,
+ MKDEV(roccat_major, minor), NULL,
+ "%s%s%d", "roccat", hid->driver->name, minor);
+
+ if (IS_ERR(device->dev)) {
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+ temp = PTR_ERR(device->dev);
+ kfree(device);
+ return temp;
+ }
+
+ mutex_unlock(&devices_lock);
+
+ init_waitqueue_head(&device->wait);
+ INIT_LIST_HEAD(&device->readers);
+ mutex_init(&device->readers_lock);
+ mutex_init(&device->cbuf_lock);
+ device->minor = minor;
+ device->hid = hid;
+ device->exist = 1;
+ device->cbuf_end = 0;
+
+ return minor;
+}
+EXPORT_SYMBOL_GPL(roccat_connect);
+
+/* roccat_disconnect() - remove char device from hid device
+ * @minor: the minor device number returned by roccat_connect()
+ */
+void roccat_disconnect(int minor)
+{
+ struct roccat_device *device;
+
+ mutex_lock(&devices_lock);
+ device = devices[minor];
+ devices[minor] = NULL;
+ mutex_unlock(&devices_lock);
+
+ device->exist = 0; /* TODO exist maybe not needed */
+
+ device_destroy(roccat_class, MKDEV(roccat_major, minor));
+
+ if (device->open) {
+ device->hid->ll_driver->close(device->hid);
+ wake_up_interruptible(&device->wait);
+ } else {
+ kfree(device);
+ }
+}
+EXPORT_SYMBOL_GPL(roccat_disconnect);
+
+static const struct file_operations roccat_ops = {
+ .owner = THIS_MODULE,
+ .read = roccat_read,
+ .poll = roccat_poll,
+ .open = roccat_open,
+ .release = roccat_release,
+};
+
+static int __init roccat_init(void)
+{
+ int retval;
+ dev_t dev_id;
+
+ retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
+ ROCCAT_MAX_DEVICES, "roccat");
+
+ roccat_major = MAJOR(dev_id);
+
+ if (retval < 0) {
+ printk(KERN_WARNING "roccat: can't get major number\n");
+ return retval;
+ }
+
+ roccat_class = class_create(THIS_MODULE, "roccat");
+ if (IS_ERR(roccat_class)) {
+ retval = PTR_ERR(roccat_class);
+ unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+ return retval;
+ }
+
+ cdev_init(&roccat_cdev, &roccat_ops);
+ cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
+
+ return 0;
+}
+
+static void __exit roccat_exit(void)
+{
+ dev_t dev_id = MKDEV(roccat_major, 0);
+
+ cdev_del(&roccat_cdev);
+ class_destroy(roccat_class);
+ unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+}
+
+module_init(roccat_init);
+module_exit(roccat_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat char device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
new file mode 100644
index 000000000000..d8aae0c1fa7e
--- /dev/null
+++ b/drivers/hid/hid-roccat.h
@@ -0,0 +1,31 @@
+#ifndef __HID_ROCCAT_H
+#define __HID_ROCCAT_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_HID_ROCCAT) || defined (CONFIG_HID_ROCCAT_MODULE)
+int roccat_connect(struct hid_device *hid);
+void roccat_disconnect(int minor);
+int roccat_report_event(int minor, u8 const *data, int len);
+#else
+static inline int roccat_connect(struct hid_device *hid) { return -1; }
+static inline void roccat_disconnect(int minor) {}
+static inline int roccat_report_event(int minor, u8 const *data, int len)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 510dd1340597..bda0fd60c98d 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -7,6 +7,18 @@
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
+ * Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+ *
+ *
+ * This driver supports several HID devices:
+ *
+ * [0419:0001] Samsung IrDA remote controller (reports as Cypress USB Mouse).
+ * various hid report fixups for different variants.
+ *
+ * [0419:0600] Creative Desktop Wireless 6000 keyboard/mouse combo
+ * several key mappings used from the consumer usage page
+ * deviate from the USB HUT 1.12 standard.
+ *
*/
/*
@@ -17,14 +29,13 @@
*/
#include <linux/device.h>
+#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
/*
- * Samsung IrDA remote controller (reports as Cypress USB Mouse).
- *
* There are several variants for 0419:0001:
*
* 1. 184 byte report descriptor
@@ -43,21 +54,21 @@
* 4. 171 byte report descriptor
* Report #3 has an array field with logical range 0..1 instead of 1..3.
*/
-static inline void samsung_dev_trace(struct hid_device *hdev,
+static inline void samsung_irda_dev_trace(struct hid_device *hdev,
unsigned int rsize)
{
dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
"descriptor\n", rsize);
}
-static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static void samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int rsize)
{
if (rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 &&
rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
rdesc[182] == 0x40) {
- samsung_dev_trace(hdev, 184);
+ samsung_irda_dev_trace(hdev, 184);
rdesc[176] = 0xff;
rdesc[178] = 0x08;
rdesc[180] = 0x06;
@@ -65,24 +76,80 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
} else
if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
rdesc[194] == 0x25 && rdesc[195] == 0x12) {
- samsung_dev_trace(hdev, 203);
+ samsung_irda_dev_trace(hdev, 203);
rdesc[193] = 0x1;
rdesc[195] = 0xf;
} else
if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
rdesc[126] == 0x25 && rdesc[127] == 0x11) {
- samsung_dev_trace(hdev, 135);
+ samsung_irda_dev_trace(hdev, 135);
rdesc[125] = 0x1;
rdesc[127] = 0xe;
} else
if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
rdesc[162] == 0x25 && rdesc[163] == 0x01) {
- samsung_dev_trace(hdev, 171);
+ samsung_irda_dev_trace(hdev, 171);
rdesc[161] = 0x1;
rdesc[163] = 0x3;
}
}
+#define samsung_kbd_mouse_map_key_clear(c) \
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (1 != ifnum || HID_UP_CONSUMER != (usage->hid & HID_USAGE_PAGE))
+ return 0;
+
+ dbg_hid("samsung wireless keyboard/mouse input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ case 0x183: samsung_kbd_mouse_map_key_clear(KEY_MEDIA); break;
+ case 0x195: samsung_kbd_mouse_map_key_clear(KEY_EMAIL); break;
+ case 0x196: samsung_kbd_mouse_map_key_clear(KEY_CALC); break;
+ case 0x197: samsung_kbd_mouse_map_key_clear(KEY_COMPUTER); break;
+ case 0x22b: samsung_kbd_mouse_map_key_clear(KEY_SEARCH); break;
+ case 0x22c: samsung_kbd_mouse_map_key_clear(KEY_WWW); break;
+ case 0x22d: samsung_kbd_mouse_map_key_clear(KEY_BACK); break;
+ case 0x22e: samsung_kbd_mouse_map_key_clear(KEY_FORWARD); break;
+ case 0x22f: samsung_kbd_mouse_map_key_clear(KEY_FAVORITES); break;
+ case 0x230: samsung_kbd_mouse_map_key_clear(KEY_REFRESH); break;
+ case 0x231: samsung_kbd_mouse_map_key_clear(KEY_STOP); break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product)
+ samsung_irda_report_fixup(hdev, rdesc, rsize);
+}
+
+static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int ret = 0;
+
+ if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE == hdev->product)
+ ret = samsung_kbd_mouse_input_mapping(hdev,
+ hi, field, usage, bit, max);
+
+ return ret;
+}
+
static int samsung_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -95,10 +162,12 @@ static int samsung_probe(struct hid_device *hdev,
goto err_free;
}
- if (hdev->rsize == 184) {
- /* disable hidinput, force hiddev */
- cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
- HID_CONNECT_HIDDEV_FORCE;
+ if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) {
+ if (hdev->rsize == 184) {
+ /* disable hidinput, force hiddev */
+ cmask = (cmask & ~HID_CONNECT_HIDINPUT) |
+ HID_CONNECT_HIDDEV_FORCE;
+ }
}
ret = hid_hw_start(hdev, cmask);
@@ -114,6 +183,7 @@ err_free:
static const struct hid_device_id samsung_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ }
};
MODULE_DEVICE_TABLE(hid, samsung_devices);
@@ -122,6 +192,7 @@ static struct hid_driver samsung_driver = {
.name = "samsung",
.id_table = samsung_devices,
.report_fixup = samsung_report_fixup,
+ .input_mapping = samsung_input_mapping,
.probe = samsung_probe,
};
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7502a4b2fa86..402d5574b574 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -76,7 +76,7 @@ static int sony_set_operational_usb(struct hid_device *hdev)
static int sony_set_operational_bt(struct hid_device *hdev)
{
- unsigned char buf[] = { 0x53, 0xf4, 0x42, 0x03, 0x00, 0x00 };
+ unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
return hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
}
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 6925eda1081a..2eebdcc57bcf 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2008 Lev Babiev
* based on hid-cherry driver
+ *
+ * Modified to also support BTC "Emprex 3009URF III Vista MCE Remote" by
+ * Wayne Thomas 2010.
*/
/*
@@ -24,23 +27,29 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000)
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
switch (usage->hid & HID_USAGE) {
- case 0x00d: ts_map_key_clear(KEY_HOME); break;
- case 0x024: ts_map_key_clear(KEY_MENU); break;
- case 0x025: ts_map_key_clear(KEY_TV); break;
- case 0x048: ts_map_key_clear(KEY_RED); break;
- case 0x047: ts_map_key_clear(KEY_GREEN); break;
- case 0x049: ts_map_key_clear(KEY_YELLOW); break;
- case 0x04a: ts_map_key_clear(KEY_BLUE); break;
- case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
- case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
- case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
- case 0x031: ts_map_key_clear(KEY_AUDIO); break;
- case 0x032: ts_map_key_clear(KEY_TEXT); break;
- case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x00d: ts_map_key_clear(KEY_MEDIA); break;
+ case 0x024: ts_map_key_clear(KEY_MENU); break;
+ case 0x025: ts_map_key_clear(KEY_TV); break;
+ case 0x031: ts_map_key_clear(KEY_AUDIO); break;
+ case 0x032: ts_map_key_clear(KEY_TEXT); break;
+ case 0x033: ts_map_key_clear(KEY_CHANNEL); break;
+ case 0x047: ts_map_key_clear(KEY_MP3); break;
+ case 0x048: ts_map_key_clear(KEY_TV2); break;
+ case 0x049: ts_map_key_clear(KEY_CAMERA); break;
+ case 0x04a: ts_map_key_clear(KEY_VIDEO); break;
+ case 0x04b: ts_map_key_clear(KEY_ANGLE); break;
+ case 0x04c: ts_map_key_clear(KEY_LANGUAGE); break;
+ case 0x04d: ts_map_key_clear(KEY_SUBTITLE); break;
+ case 0x050: ts_map_key_clear(KEY_RADIO); break;
+ case 0x05a: ts_map_key_clear(KEY_TEXT); break;
+ case 0x05b: ts_map_key_clear(KEY_RED); break;
+ case 0x05c: ts_map_key_clear(KEY_GREEN); break;
+ case 0x05d: ts_map_key_clear(KEY_YELLOW); break;
+ case 0x05e: ts_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
@@ -50,6 +59,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ts_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ }
};
MODULE_DEVICE_TABLE(hid, ts_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index f7700cf49721..1e051f1171e4 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -22,14 +22,159 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+#include <linux/power_supply.h>
+#endif
#include "hid-ids.h"
struct wacom_data {
__u16 tool;
unsigned char butstate;
+ unsigned char high_speed;
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ int battery_capacity;
+ struct power_supply battery;
+ struct power_supply ac;
+#endif
};
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+/*percent of battery capacity, 0 means AC online*/
+static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 };
+
+static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+static enum power_supply_property wacom_ac_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE
+};
+
+static int wacom_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy,
+ struct wacom_data, battery);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ /* show 100% battery capacity when charging */
+ if (power_state == 0)
+ val->intval = 100;
+ else
+ val->intval = power_state;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int wacom_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom_data *wdata = container_of(psy, struct wacom_data, ac);
+ int power_state = batcap[wdata->battery_capacity];
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* fall through */
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (power_state == 0)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+#endif
+
+static void wacom_poke(struct hid_device *hdev, u8 speed)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ int limit, ret;
+ char rep_data[2];
+
+ rep_data[0] = 0x03 ; rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ if (speed == 0)
+ rep_data[0] = 0x05;
+ else
+ rep_data[0] = 0x06;
+
+ rep_data[1] = 0x00;
+ limit = 3;
+ do {
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ } while (ret < 0 && limit-- > 0);
+
+ if (ret >= 0) {
+ wdata->high_speed = speed;
+ return;
+ }
+ }
+
+ /*
+ * Note that if the raw queries fail, it's not a hard failure and it
+ * is safe to continue
+ */
+ dev_warn(&hdev->dev, "failed to poke device, command %d, err %d\n",
+ rep_data[0], ret);
+ return;
+}
+
+static ssize_t wacom_show_speed(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct wacom_data *wdata = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed);
+}
+
+static ssize_t wacom_store_speed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ int new_speed;
+
+ if (sscanf(buf, "%1d", &new_speed ) != 1)
+ return -EINVAL;
+
+ if (new_speed == 0 || new_speed == 1) {
+ wacom_poke(hdev, new_speed);
+ return strnlen(buf, PAGE_SIZE);
+ } else
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUGO,
+ wacom_show_speed, wacom_store_speed);
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -148,6 +293,12 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
input_sync(input);
}
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ /* Store current battery capacity */
+ rw = (data[7] >> 2 & 0x07);
+ if (rw != wdata->battery_capacity)
+ wdata->battery_capacity = rw;
+#endif
return 1;
}
@@ -157,9 +308,7 @@ static int wacom_probe(struct hid_device *hdev,
struct hid_input *hidinput;
struct input_dev *input;
struct wacom_data *wdata;
- char rep_data[2];
int ret;
- int limit;
wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
if (wdata == NULL) {
@@ -182,31 +331,53 @@ static int wacom_probe(struct hid_device *hdev,
goto err_free;
}
- /*
- * Note that if the raw queries fail, it's not a hard failure and it
- * is safe to continue
- */
+ ret = device_create_file(&hdev->dev, &dev_attr_speed);
+ if (ret)
+ dev_warn(&hdev->dev,
+ "can't create sysfs speed attribute err: %d\n", ret);
- /* Set Wacom mode2 */
- rep_data[0] = 0x03; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #1, %d\n", ret);
+ /* Set Wacom mode 2 with high reporting speed */
+ wacom_poke(hdev, 1);
- /* 0x06 - high reporting speed, 0x05 - low speed */
- rep_data[0] = 0x06; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
- HID_FEATURE_REPORT);
- } while (ret < 0 && limit-- > 0);
- if (ret < 0)
- dev_warn(&hdev->dev, "failed to poke device #2, %d\n", ret);
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ wdata->battery.properties = wacom_battery_props;
+ wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
+ wdata->battery.get_property = wacom_battery_get_property;
+ wdata->battery.name = "wacom_battery";
+ wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wdata->battery.use_for_apm = 0;
+ ret = power_supply_register(&hdev->dev, &wdata->battery);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create sysfs battery attribute, err: %d\n", ret);
+ /*
+ * battery attribute is not critical for the tablet, but if it
+ * failed then there is no need to create ac attribute
+ */
+ goto move_on;
+ }
+
+ wdata->ac.properties = wacom_ac_props;
+ wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
+ wdata->ac.get_property = wacom_ac_get_property;
+ wdata->ac.name = "wacom_ac";
+ wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ wdata->ac.use_for_apm = 0;
+
+ ret = power_supply_register(&hdev->dev, &wdata->ac);
+ if (ret) {
+ dev_warn(&hdev->dev,
+ "can't create ac battery attribute, err: %d\n", ret);
+ /*
+ * ac attribute is not critical for the tablet, but if it
+ * failed then we don't want to battery attribute to exist
+ */
+ power_supply_unregister(&wdata->battery);
+ }
+
+move_on:
+#endif
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
input = hidinput->input;
@@ -251,13 +422,21 @@ err_free:
static void wacom_remove(struct hid_device *hdev)
{
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+#endif
hid_hw_stop(hdev);
+
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+ power_supply_unregister(&wdata->battery);
+ power_supply_unregister(&wdata->ac);
+#endif
kfree(hid_get_drvdata(hdev));
}
static const struct hid_device_id wacom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
-
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ }
};
MODULE_DEVICE_TABLE(hid, wacom_devices);
@@ -277,7 +456,6 @@ static int __init wacom_init(void)
ret = hid_register_driver(&wacom_driver);
if (ret)
printk(KERN_ERR "can't register wacom driver\n");
- printk(KERN_ERR "wacom driver registered\n");
return ret;
}
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
new file mode 100644
index 000000000000..9e8d35a203e4
--- /dev/null
+++ b/drivers/hid/hid-zydacron.c
@@ -0,0 +1,237 @@
+/*
+* HID driver for zydacron remote control
+*
+* Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk>
+*/
+
+/*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation; either version 2 of the License, or (at your option)
+* any later version.
+*/
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct zc_device {
+ struct input_dev *input_ep81;
+ unsigned short last_key[4];
+};
+
+
+/*
+* Zydacron remote control has an invalid HID report descriptor,
+* that needs fixing before we can parse it.
+*/
+static void zc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int rsize)
+{
+ if (rsize >= 253 &&
+ rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff &&
+ rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff &&
+ rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) {
+ dev_info(&hdev->dev,
+ "fixing up zydacron remote control report "
+ "descriptor\n");
+ rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c;
+ rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00;
+ }
+}
+
+#define zc_map_key_clear(c) \
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int zc_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int i;
+ struct zc_device *zc = hid_get_drvdata(hdev);
+ zc->input_ep81 = hi->input;
+
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
+ return 0;
+
+ dbg_hid("zynacron input mapping event [0x%x]\n",
+ usage->hid & HID_USAGE);
+
+ switch (usage->hid & HID_USAGE) {
+ /* report 2 */
+ case 0x10:
+ zc_map_key_clear(KEY_MODE);
+ break;
+ case 0x30:
+ zc_map_key_clear(KEY_SCREEN);
+ break;
+ case 0x70:
+ zc_map_key_clear(KEY_INFO);
+ break;
+ /* report 3 */
+ case 0x04:
+ zc_map_key_clear(KEY_RADIO);
+ break;
+ /* report 4 */
+ case 0x0d:
+ zc_map_key_clear(KEY_PVR);
+ break;
+ case 0x25:
+ zc_map_key_clear(KEY_TV);
+ break;
+ case 0x47:
+ zc_map_key_clear(KEY_AUDIO);
+ break;
+ case 0x49:
+ zc_map_key_clear(KEY_AUX);
+ break;
+ case 0x4a:
+ zc_map_key_clear(KEY_VIDEO);
+ break;
+ case 0x48:
+ zc_map_key_clear(KEY_DVD);
+ break;
+ case 0x24:
+ zc_map_key_clear(KEY_MENU);
+ break;
+ case 0x32:
+ zc_map_key_clear(KEY_TEXT);
+ break;
+ default:
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++)
+ zc->last_key[i] = 0;
+
+ return 1;
+}
+
+static int zc_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct zc_device *zc = hid_get_drvdata(hdev);
+ int ret = 0;
+ unsigned key;
+ unsigned short index;
+
+ if (report->id == data[0]) {
+
+ /* break keys */
+ for (index = 0; index < 4; index++) {
+ key = zc->last_key[index];
+ if (key) {
+ input_event(zc->input_ep81, EV_KEY, key, 0);
+ zc->last_key[index] = 0;
+ }
+ }
+
+ key = 0;
+ switch (report->id) {
+ case 0x02:
+ case 0x03:
+ switch (data[1]) {
+ case 0x10:
+ key = KEY_MODE;
+ index = 0;
+ break;
+ case 0x30:
+ key = KEY_SCREEN;
+ index = 1;
+ break;
+ case 0x70:
+ key = KEY_INFO;
+ index = 2;
+ break;
+ case 0x04:
+ key = KEY_RADIO;
+ index = 3;
+ break;
+ }
+
+ if (key) {
+ input_event(zc->input_ep81, EV_KEY, key, 1);
+ zc->last_key[index] = key;
+ }
+
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct zc_device *zc;
+
+ zc = kzalloc(sizeof(*zc), GFP_KERNEL);
+ if (zc == NULL) {
+ dev_err(&hdev->dev, "zydacron: can't alloc descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, zc);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "zydacron: parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ dev_err(&hdev->dev, "zydacron: hw start failed\n");
+ goto err_free;
+ }
+
+ return 0;
+err_free:
+ kfree(zc);
+
+ return ret;
+}
+
+static void zc_remove(struct hid_device *hdev)
+{
+ struct zc_device *zc = hid_get_drvdata(hdev);
+
+ hid_hw_stop(hdev);
+
+ if (NULL != zc)
+ kfree(zc);
+}
+
+static const struct hid_device_id zc_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, zc_devices);
+
+static struct hid_driver zc_driver = {
+ .name = "zydacron",
+ .id_table = zc_devices,
+ .report_fixup = zc_report_fixup,
+ .input_mapping = zc_input_mapping,
+ .raw_event = zc_raw_event,
+ .probe = zc_probe,
+ .remove = zc_remove,
+};
+
+static int __init zc_init(void)
+{
+ return hid_register_driver(&zc_driver);
+}
+
+static void __exit zc_exit(void)
+{
+ hid_unregister_driver(&zc_driver);
+}
+
+module_init(zc_init);
+module_exit(zc_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 6eadf1a9b3cc..3ccd47850677 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -106,38 +106,48 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
- /* FIXME: What stops hidraw_table going NULL */
- struct hid_device *dev = hidraw_table[minor]->hid;
+ struct hid_device *dev;
__u8 *buf;
int ret = 0;
- if (!dev->hid_output_raw_report)
- return -ENODEV;
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor]->hid;
+
+ if (!dev->hid_output_raw_report) {
+ ret = -ENODEV;
+ goto out;
+ }
if (count > HID_MAX_BUFFER_SIZE) {
printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (count < 2) {
printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
task_pid_nr(current));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, buffer, count)) {
ret = -EFAULT;
- goto out;
+ goto out_free;
}
ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT);
-out:
+out_free:
kfree(buf);
+out:
+ mutex_unlock(&minors_lock);
return ret;
}
@@ -165,11 +175,8 @@ static int hidraw_open(struct inode *inode, struct file *file)
goto out;
}
- lock_kernel();
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
kfree(list);
err = -ENODEV;
goto out_unlock;
@@ -197,7 +204,6 @@ static int hidraw_open(struct inode *inode, struct file *file)
out_unlock:
mutex_unlock(&minors_lock);
- unlock_kernel();
out:
return err;
@@ -209,11 +215,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
- if (!hidraw_table[minor]) {
- printk(KERN_EMERG "hidraw device with minor %d doesn't exist\n",
- minor);
+ if (!hidraw_table[minor])
return -ENODEV;
- }
list_del(&list->node);
dev = hidraw_table[minor];
@@ -238,11 +241,12 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
struct inode *inode = file->f_path.dentry->d_inode;
unsigned int minor = iminor(inode);
long ret = 0;
- /* FIXME: What stops hidraw_table going NULL */
- struct hidraw *dev = hidraw_table[minor];
+ struct hidraw *dev;
void __user *user_arg = (void __user*) arg;
- lock_kernel();
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor];
+
switch (cmd) {
case HIDIOCGRDESCSIZE:
if (put_user(dev->hid->rsize, (int __user *)arg))
@@ -311,11 +315,11 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
-EFAULT : len;
break;
}
- }
+ }
ret = -ENOTTY;
}
- unlock_kernel();
+ mutex_unlock(&minors_lock);
return ret;
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 56d06cd8075b..1ebd3244eb85 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -623,6 +623,7 @@ int usbhid_wait_io(struct hid_device *hid)
return 0;
}
+EXPORT_SYMBOL_GPL(usbhid_wait_io);
static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle)
{
@@ -783,13 +784,12 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- usbhid->inbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->inbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->inbuf_dma);
- usbhid->outbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->outbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->outbuf_dma);
- usbhid->cr = usb_buffer_alloc(dev, sizeof(*usbhid->cr), GFP_KERNEL,
- &usbhid->cr_dma);
- usbhid->ctrlbuf = usb_buffer_alloc(dev, usbhid->bufsize, GFP_KERNEL,
+ usbhid->cr = kmalloc(sizeof(*usbhid->cr), GFP_KERNEL);
+ usbhid->ctrlbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL,
&usbhid->ctrlbuf_dma);
if (!usbhid->inbuf || !usbhid->outbuf || !usbhid->cr ||
!usbhid->ctrlbuf)
@@ -807,16 +807,36 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
struct usb_host_interface *interface = intf->cur_altsetting;
int ret;
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- HID_REQ_SET_REPORT,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ((report_type + 1) << 8) | *buf,
- interface->desc.bInterfaceNumber, buf + 1, count - 1,
- USB_CTRL_SET_TIMEOUT);
-
- /* count also the report id */
- if (ret > 0)
- ret++;
+ if (usbhid->urbout) {
+ int actual_length;
+ int skipped_report_id = 0;
+ if (buf[0] == 0x0) {
+ /* Don't send the Report ID */
+ buf++;
+ count--;
+ skipped_report_id = 1;
+ }
+ ret = usb_interrupt_msg(dev, usbhid->urbout->pipe,
+ buf, count, &actual_length,
+ USB_CTRL_SET_TIMEOUT);
+ /* return the number of bytes transferred */
+ if (ret == 0) {
+ ret = actual_length;
+ /* count also the report id */
+ if (skipped_report_id)
+ ret++;
+ }
+ } else {
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ HID_REQ_SET_REPORT,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ ((report_type + 1) << 8) | *buf,
+ interface->desc.bInterfaceNumber, buf + 1, count - 1,
+ USB_CTRL_SET_TIMEOUT);
+ /* count also the report id */
+ if (ret > 0)
+ ret++;
+ }
return ret;
}
@@ -844,10 +864,10 @@ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- usb_buffer_free(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma);
- usb_buffer_free(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma);
- usb_buffer_free(dev, sizeof(*(usbhid->cr)), usbhid->cr, usbhid->cr_dma);
- usb_buffer_free(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma);
+ kfree(usbhid->cr);
+ usb_free_coherent(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma);
}
static int usbhid_parse(struct hid_device *hid)
@@ -999,13 +1019,6 @@ static int usbhid_start(struct hid_device *hid)
}
}
- init_waitqueue_head(&usbhid->wait);
- INIT_WORK(&usbhid->reset_work, hid_reset);
- INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
- setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
-
- spin_lock_init(&usbhid->lock);
-
usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
if (!usbhid->urbctrl) {
ret = -ENOMEM;
@@ -1014,9 +1027,8 @@ static int usbhid_start(struct hid_device *hid)
usb_fill_control_urb(usbhid->urbctrl, dev, 0, (void *) usbhid->cr,
usbhid->ctrlbuf, 1, hid_ctrl, hid);
- usbhid->urbctrl->setup_dma = usbhid->cr_dma;
usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
- usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
+ usbhid->urbctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
usbhid_init_reports(hid);
@@ -1026,12 +1038,15 @@ static int usbhid_start(struct hid_device *hid)
/* Some keyboards don't work until their LEDs have been set.
* Since BIOSes do set the LEDs, it must be safe for any device
* that supports the keyboard boot protocol.
+ * In addition, enable remote wakeup by default for all keyboard
+ * devices supporting the boot protocol.
*/
if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT &&
interface->desc.bInterfaceProtocol ==
- USB_INTERFACE_PROTOCOL_KEYBOARD)
+ USB_INTERFACE_PROTOCOL_KEYBOARD) {
usbhid_set_leds(hid);
-
+ device_set_wakeup_enable(&dev->dev, 1);
+ }
return 0;
fail:
@@ -1140,6 +1155,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
hid->name[0] = 0;
+ hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1179,6 +1195,12 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
usbhid->intf = intf;
usbhid->ifnum = interface->desc.bInterfaceNumber;
+ init_waitqueue_head(&usbhid->wait);
+ INIT_WORK(&usbhid->reset_work, hid_reset);
+ INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
+ setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
+ spin_lock_init(&usbhid->lock);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1290,6 +1312,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
} else {
usbhid_mark_busy(usbhid);
spin_unlock_irq(&usbhid->lock);
@@ -1297,6 +1324,11 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
}
} else {
+ if (hid->driver && hid->driver->suspend) {
+ status = hid->driver->suspend(hid, message);
+ if (status < 0)
+ return status;
+ }
spin_lock_irq(&usbhid->lock);
set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
@@ -1351,6 +1383,11 @@ static int hid_resume(struct usb_interface *intf)
hid_io_error(hid);
usbhid_restart_queues(usbhid);
+ if (status >= 0 && hid->driver && hid->driver->resume) {
+ int ret = hid->driver->resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
}
@@ -1359,9 +1396,16 @@ static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
+ int status;
clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
- return hid_post_reset(intf);
+ status = hid_post_reset(intf);
+ if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+ int ret = hid->driver->reset_resume(hid);
+ if (ret < 0)
+ status = ret;
+ }
+ return status;
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1152f9b5fd44..5ff8d327f33a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
+ { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 433602aed468..c24d2fa3e3b6 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -267,6 +267,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
struct hiddev_list *list;
int res, i;
+ /* See comment in hiddev_connect() for BKL explanation */
lock_kernel();
i = iminor(inode) - HIDDEV_MINOR_BASE;
@@ -894,8 +895,22 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
hiddev->hid = hid;
hiddev->exist = 1;
- /* when lock_kernel() usage is fixed in usb_open(),
- * we could also fix it here */
+ /*
+ * BKL here is used to avoid race after usb_register_dev().
+ * Once the device node has been created, open() could happen on it.
+ * The code below will then fail, as hiddev_table hasn't been
+ * updated.
+ *
+ * The obvious fix -- introducing mutex to guard hiddev_table[]
+ * doesn't work, as usb_open() and usb_register_dev() both take
+ * minor_rwsem, thus we'll have ABBA deadlock.
+ *
+ * Before BKL pushdown, usb_open() had been acquiring it in right
+ * order, so _open() was safe to use it to protect from this race.
+ * Now the order is different, but AB-BA deadlock still doesn't occur
+ * as BKL is dropped on schedule() (i.e. while sleeping on
+ * minor_rwsem). Fugly.
+ */
lock_kernel();
retval = usb_register_dev(usbhid->intf, &hiddev_class);
if (retval) {
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index ec20400c7f29..693fd3e720df 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -75,7 +75,6 @@ struct usbhid_device {
struct urb *urbctrl; /* Control URB */
struct usb_ctrlrequest *cr; /* Control request struct */
- dma_addr_t cr_dma; /* Control request struct dma */
struct hid_control_fifo ctrl[HID_CONTROL_FIFO_SIZE]; /* Control fifo */
unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */
char *ctrlbuf; /* Control buffer */
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index f843443ba5c3..a948605564fb 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -74,7 +74,6 @@ struct usb_kbd {
unsigned char *new;
struct usb_ctrlrequest *cr;
unsigned char *leds;
- dma_addr_t cr_dma;
dma_addr_t new_dma;
dma_addr_t leds_dma;
};
@@ -197,11 +196,11 @@ static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd)
return -1;
if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
return -1;
- if (!(kbd->new = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
+ if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
return -1;
- if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), GFP_ATOMIC, &kbd->cr_dma)))
+ if (!(kbd->cr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL)))
return -1;
- if (!(kbd->leds = usb_buffer_alloc(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
+ if (!(kbd->leds = usb_alloc_coherent(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
return -1;
return 0;
@@ -211,9 +210,9 @@ static void usb_kbd_free_mem(struct usb_device *dev, struct usb_kbd *kbd)
{
usb_free_urb(kbd->irq);
usb_free_urb(kbd->led);
- usb_buffer_free(dev, 8, kbd->new, kbd->new_dma);
- usb_buffer_free(dev, sizeof(struct usb_ctrlrequest), kbd->cr, kbd->cr_dma);
- usb_buffer_free(dev, 1, kbd->leds, kbd->leds_dma);
+ usb_free_coherent(dev, 8, kbd->new, kbd->new_dma);
+ kfree(kbd->cr);
+ usb_free_coherent(dev, 1, kbd->leds, kbd->leds_dma);
}
static int usb_kbd_probe(struct usb_interface *iface,
@@ -304,15 +303,15 @@ static int usb_kbd_probe(struct usb_interface *iface,
usb_fill_control_urb(kbd->led, dev, usb_sndctrlpipe(dev, 0),
(void *) kbd->cr, kbd->leds, 1,
usb_kbd_led, kbd);
- kbd->led->setup_dma = kbd->cr_dma;
kbd->led->transfer_dma = kbd->leds_dma;
- kbd->led->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
+ kbd->led->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
error = input_register_device(kbd->dev);
if (error)
goto fail2;
usb_set_intfdata(iface, kbd);
+ device_set_wakeup_enable(&dev->dev, 1);
return 0;
fail2:
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 72ab4b268096..79b2bf81a059 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -142,7 +142,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
if (!mouse || !input_dev)
goto fail1;
- mouse->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &mouse->data_dma);
+ mouse->data = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &mouse->data_dma);
if (!mouse->data)
goto fail1;
@@ -205,7 +205,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
fail3:
usb_free_urb(mouse->irq);
fail2:
- usb_buffer_free(dev, 8, mouse->data, mouse->data_dma);
+ usb_free_coherent(dev, 8, mouse->data, mouse->data_dma);
fail1:
input_free_device(input_dev);
kfree(mouse);
@@ -221,7 +221,7 @@ static void usb_mouse_disconnect(struct usb_interface *intf)
usb_kill_urb(mouse->irq);
input_unregister_device(mouse->dev);
usb_free_urb(mouse->irq);
- usb_buffer_free(interface_to_usbdev(intf), 8, mouse->data, mouse->data_dma);
+ usb_free_coherent(interface_to_usbdev(intf), 8, mouse->data, mouse->data_dma);
kfree(mouse);
}
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9be8e1754a0b..e19cf8eb6ccf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -447,13 +447,14 @@ config SENSORS_IT87
will be called it87.
config SENSORS_LM63
- tristate "National Semiconductor LM63"
+ tristate "National Semiconductor LM63 and LM64"
depends on I2C
help
- If you say yes here you get support for the National Semiconductor
- LM63 remote diode digital temperature sensor with integrated fan
- control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro)
- motherboard, among others.
+ If you say yes here you get support for the National
+ Semiconductor LM63 and LM64 remote diode digital temperature
+ sensors with integrated fan control. Such chips are found
+ on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
+ others.
This driver can also be built as a module. If so, the module
will be called lm63.
@@ -492,7 +493,8 @@ config SENSORS_LM75
- NXP's LM75A
- ST Microelectronics STDS75
- TelCom (now Microchip) TCN75
- - Texas Instruments TMP100, TMP101, TMP75, TMP175, TMP275
+ - Texas Instruments TMP100, TMP101, TMP105, TMP75, TMP175,
+ TMP275
This driver supports driver model based binding through board
specific I2C device tables.
@@ -749,6 +751,16 @@ config SENSORS_DME1737
This driver can also be built as a module. If so, the module
will be called dme1737.
+config SENSORS_EMC1403
+ tristate "SMSC EMC1403 thermal sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the SMSC EMC1403
+ temperature monitoring chip.
+
+ Threshold values can be configured using sysfs.
+ Data from the different diodes are accessible via sysfs.
+
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
help
@@ -802,6 +814,15 @@ config SENSORS_ADS7828
This driver can also be built as a module. If so, the module
will be called ads7828.
+config SENSORS_ADS7871
+ tristate "Texas Instruments ADS7871 A/D converter"
+ depends on SPI
+ help
+ If you say yes here you get support for TI ADS7871 & ADS7870
+
+ This driver can also be built as a module. If so, the module
+ will be called ads7871.
+
config SENSORS_AMC6821
tristate "Texas Instruments AMC6821"
depends on I2C && EXPERIMENTAL
@@ -822,6 +843,16 @@ config SENSORS_THMC50
This driver can also be built as a module. If so, the module
will be called thmc50.
+config SENSORS_TMP102
+ tristate "Texas Instruments TMP102"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Texas Instruments TMP102
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp102.
+
config SENSORS_TMP401
tristate "Texas Instruments TMP401 and compatibles"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4aa1a3d112ad..2138ceb1a713 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
+obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
+obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
+obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 1644b92e7cc4..15c1a9616af3 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -36,6 +36,7 @@
#define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr))
#define ADM1031_REG_PWM (0x22)
#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr))
+#define ADM1031_REG_FAN_FILTER (0x23)
#define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr))
#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr))
@@ -61,6 +62,9 @@
#define ADM1031_CONF2_TACH2_ENABLE 0x08
#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan))
+#define ADM1031_UPDATE_RATE_MASK 0x1c
+#define ADM1031_UPDATE_RATE_SHIFT 2
+
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
@@ -75,6 +79,7 @@ struct adm1031_data {
int chip_type;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
+ unsigned int update_rate; /* In milliseconds */
/* The chan_select_table contains the possible configurations for
* auto fan control.
*/
@@ -738,6 +743,57 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+/* Update Rate */
+static const unsigned int update_rates[] = {
+ 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
+};
+
+static ssize_t show_update_rate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%u\n", data->update_rate);
+}
+
+static ssize_t set_update_rate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int i, err;
+ u8 reg;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ /* find the nearest update rate from the table */
+ for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
+ if (val >= update_rates[i])
+ break;
+ }
+ /* if not found, we point to the last entry (lowest update rate) */
+
+ /* set the new update rate while preserving other settings */
+ reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ reg &= ~ADM1031_UPDATE_RATE_MASK;
+ reg |= i << ADM1031_UPDATE_RATE_SHIFT;
+ adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
+
+ mutex_lock(&data->update_lock);
+ data->update_rate = update_rates[i];
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
+ set_update_rate);
+
static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
@@ -774,6 +830,7 @@ static struct attribute *adm1031_attributes[] = {
&sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
+ &dev_attr_update_rate.attr,
&dev_attr_alarms.attr,
NULL
@@ -900,6 +957,7 @@ static void adm1031_init_client(struct i2c_client *client)
{
unsigned int read_val;
unsigned int mask;
+ int i;
struct adm1031_data *data = i2c_get_clientdata(client);
mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE);
@@ -919,18 +977,24 @@ static void adm1031_init_client(struct i2c_client *client)
ADM1031_CONF1_MONITOR_ENABLE);
}
+ /* Read the chip's update rate */
+ mask = ADM1031_UPDATE_RATE_MASK;
+ read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
+ i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
+ data->update_rate = update_rates[i];
}
static struct adm1031_data *adm1031_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
+ unsigned long next_update;
int chan;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
+ next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
+ if (time_after(jiffies, next_update) || !data->valid) {
dev_dbg(&client->dev, "Starting adm1031 update\n");
for (chan = 0;
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
new file mode 100644
index 000000000000..b300a2048af1
--- /dev/null
+++ b/drivers/hwmon/ads7871.c
@@ -0,0 +1,253 @@
+/*
+ * ads7871 - driver for TI ADS7871 A/D converter
+ *
+ * Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or
+ * later as publishhed by the Free Software Foundation.
+ *
+ * You need to have something like this in struct spi_board_info
+ * {
+ * .modalias = "ads7871",
+ * .max_speed_hz = 2*1000*1000,
+ * .chip_select = 0,
+ * .bus_num = 1,
+ * },
+ */
+
+/*From figure 18 in the datasheet*/
+/*Register addresses*/
+#define REG_LS_BYTE 0 /*A/D Output Data, LS Byte*/
+#define REG_MS_BYTE 1 /*A/D Output Data, MS Byte*/
+#define REG_PGA_VALID 2 /*PGA Valid Register*/
+#define REG_AD_CONTROL 3 /*A/D Control Register*/
+#define REG_GAIN_MUX 4 /*Gain/Mux Register*/
+#define REG_IO_STATE 5 /*Digital I/O State Register*/
+#define REG_IO_CONTROL 6 /*Digital I/O Control Register*/
+#define REG_OSC_CONTROL 7 /*Rev/Oscillator Control Register*/
+#define REG_SER_CONTROL 24 /*Serial Interface Control Register*/
+#define REG_ID 31 /*ID Register*/
+
+/*From figure 17 in the datasheet
+* These bits get ORed with the address to form
+* the instruction byte */
+/*Instruction Bit masks*/
+#define INST_MODE_bm (1<<7)
+#define INST_READ_bm (1<<6)
+#define INST_16BIT_bm (1<<5)
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define MUX_CNV_bv 7
+#define MUX_CNV_bm (1<<MUX_CNV_bv)
+#define MUX_M3_bm (1<<3) /*M3 selects single ended*/
+#define MUX_G_bv 4 /*allows for reg = (gain << MUX_G_bv) | ...*/
+
+/*From figure 18 in the datasheet*/
+/*bit masks for Rev/Oscillator Control Register*/
+#define OSC_OSCR_bm (1<<5)
+#define OSC_OSCE_bm (1<<4)
+#define OSC_REFE_bm (1<<3)
+#define OSC_BUFE_bm (1<<2)
+#define OSC_R2V_bm (1<<1)
+#define OSC_RBG_bm (1<<0)
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#define DEVICE_NAME "ads7871"
+
+struct ads7871_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+};
+
+static int ads7871_read_reg8(struct spi_device *spi, int reg)
+{
+ int ret;
+ reg = reg | INST_READ_bm;
+ ret = spi_w8r8(spi, reg);
+ return ret;
+}
+
+static int ads7871_read_reg16(struct spi_device *spi, int reg)
+{
+ int ret;
+ reg = reg | INST_READ_bm | INST_16BIT_bm;
+ ret = spi_w8r16(spi, reg);
+ return ret;
+}
+
+static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
+{
+ u8 tmp[2] = {reg, val};
+ return spi_write(spi, tmp, sizeof(tmp));
+}
+
+static ssize_t show_voltage(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int ret, val, i = 0;
+ uint8_t channel, mux_cnv;
+
+ channel = attr->index;
+ /*TODO: add support for conversions
+ *other than single ended with a gain of 1*/
+ /*MUX_M3_bm forces single ended*/
+ /*This is also where the gain of the PGA would be set*/
+ ads7871_write_reg8(spi, REG_GAIN_MUX,
+ (MUX_CNV_bm | MUX_M3_bm | channel));
+
+ ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+ mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+ /*on 400MHz arm9 platform the conversion
+ *is already done when we do this test*/
+ while ((i < 2) && mux_cnv) {
+ i++;
+ ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
+ mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
+ msleep_interruptible(1);
+ }
+
+ if (mux_cnv == 0) {
+ val = ads7871_read_reg16(spi, REG_LS_BYTE);
+ /*result in volts*10000 = (val/8192)*2.5*10000*/
+ val = ((val>>2) * 25000) / 8192;
+ return sprintf(buf, "%d\n", val);
+ } else {
+ return -1;
+ }
+}
+
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
+
+static struct attribute *ads7871_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ads7871_group = {
+ .attrs = ads7871_attributes,
+};
+
+static int __devinit ads7871_probe(struct spi_device *spi)
+{
+ int status, ret, err = 0;
+ uint8_t val;
+ struct ads7871_data *pdata;
+
+ dev_dbg(&spi->dev, "probe\n");
+
+ pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+ if (status < 0)
+ goto error_free;
+
+ pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+ if (IS_ERR(pdata->hwmon_dev)) {
+ err = PTR_ERR(pdata->hwmon_dev);
+ goto error_remove;
+ }
+
+ spi_set_drvdata(spi, pdata);
+
+ /* Configure the SPI bus */
+ spi->mode = (SPI_MODE_0);
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ ads7871_write_reg8(spi, REG_SER_CONTROL, 0);
+ ads7871_write_reg8(spi, REG_AD_CONTROL, 0);
+
+ val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm);
+ ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
+ ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
+
+ dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
+ /*because there is no other error checking on an SPI bus
+ we need to make sure we really have a chip*/
+ if (val != ret) {
+ err = -ENODEV;
+ goto error_remove;
+ }
+
+ return 0;
+
+error_remove:
+ sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+error_free:
+ kfree(pdata);
+exit:
+ return err;
+}
+
+static int __devexit ads7871_remove(struct spi_device *spi)
+{
+ struct ads7871_data *pdata = spi_get_drvdata(spi);
+
+ hwmon_device_unregister(pdata->hwmon_dev);
+ sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
+ kfree(pdata);
+ return 0;
+}
+
+static struct spi_driver ads7871_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = ads7871_probe,
+ .remove = __devexit_p(ads7871_remove),
+};
+
+static int __init ads7871_init(void)
+{
+ return spi_register_driver(&ads7871_driver);
+}
+
+static void __exit ads7871_exit(void)
+{
+ spi_unregister_driver(&ads7871_driver);
+}
+
+module_init(ads7871_init);
+module_exit(ads7871_exit);
+
+MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>");
+MODULE_DESCRIPTION("TI ADS7871 A/D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0f28d91f29d8..b6598aa557a0 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -148,6 +148,20 @@ static const char *temperature_sensors_sets[][41] = {
/* Set 18: MacBook Pro 2,2 */
{ "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
"Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
+/* Set 19: Macbook Pro 5,3 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
+ "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
+ "Tm0P", "Ts0P", "Ts0S", NULL },
+/* Set 20: MacBook Pro 5,4 */
+ { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
+ "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
+/* Set 21: MacBook Pro 6,2 */
+ { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
+ "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
+ "Ts0P", "Ts0S", NULL },
+/* Set 22: MacBook Pro 7,1 */
+ { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
+ "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
};
/* List of keys used to read/write fan speeds */
@@ -195,6 +209,9 @@ static unsigned int applesmc_accelerometer;
/* Indicates whether this computer has light sensors and keyboard backlight. */
static unsigned int applesmc_light;
+/* The number of fans handled by the driver */
+static unsigned int fans_handled;
+
/* Indicates which temperature sensors set to use. */
static unsigned int applesmc_temperature_set;
@@ -643,6 +660,17 @@ out:
return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right);
}
+/* Displays sensor key as label */
+static ssize_t applesmc_show_sensor_label(struct device *dev,
+ struct device_attribute *devattr, char *sysfsbuf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ const char *key =
+ temperature_sensors_sets[applesmc_temperature_set][attr->index];
+
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
+}
+
/* Displays degree Celsius * 1000 */
static ssize_t applesmc_show_temperature(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
@@ -1110,6 +1138,86 @@ static const struct attribute_group fan_attribute_groups[] = {
/*
* Temperature sensors sysfs entries.
*/
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 15);
+static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 16);
+static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 17);
+static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 18);
+static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 19);
+static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 20);
+static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 21);
+static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 22);
+static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 23);
+static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 24);
+static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 25);
+static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 26);
+static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 27);
+static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 28);
+static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 29);
+static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 30);
+static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 31);
+static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 32);
+static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 33);
+static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 34);
+static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 35);
+static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 36);
+static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 37);
+static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 38);
+static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO,
+ applesmc_show_sensor_label, NULL, 39);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
applesmc_show_temperature, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
@@ -1191,6 +1299,50 @@ static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
applesmc_show_temperature, NULL, 39);
+static struct attribute *label_attributes[] = {
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp3_label.dev_attr.attr,
+ &sensor_dev_attr_temp4_label.dev_attr.attr,
+ &sensor_dev_attr_temp5_label.dev_attr.attr,
+ &sensor_dev_attr_temp6_label.dev_attr.attr,
+ &sensor_dev_attr_temp7_label.dev_attr.attr,
+ &sensor_dev_attr_temp8_label.dev_attr.attr,
+ &sensor_dev_attr_temp9_label.dev_attr.attr,
+ &sensor_dev_attr_temp10_label.dev_attr.attr,
+ &sensor_dev_attr_temp11_label.dev_attr.attr,
+ &sensor_dev_attr_temp12_label.dev_attr.attr,
+ &sensor_dev_attr_temp13_label.dev_attr.attr,
+ &sensor_dev_attr_temp14_label.dev_attr.attr,
+ &sensor_dev_attr_temp15_label.dev_attr.attr,
+ &sensor_dev_attr_temp16_label.dev_attr.attr,
+ &sensor_dev_attr_temp17_label.dev_attr.attr,
+ &sensor_dev_attr_temp18_label.dev_attr.attr,
+ &sensor_dev_attr_temp19_label.dev_attr.attr,
+ &sensor_dev_attr_temp20_label.dev_attr.attr,
+ &sensor_dev_attr_temp21_label.dev_attr.attr,
+ &sensor_dev_attr_temp22_label.dev_attr.attr,
+ &sensor_dev_attr_temp23_label.dev_attr.attr,
+ &sensor_dev_attr_temp24_label.dev_attr.attr,
+ &sensor_dev_attr_temp25_label.dev_attr.attr,
+ &sensor_dev_attr_temp26_label.dev_attr.attr,
+ &sensor_dev_attr_temp27_label.dev_attr.attr,
+ &sensor_dev_attr_temp28_label.dev_attr.attr,
+ &sensor_dev_attr_temp29_label.dev_attr.attr,
+ &sensor_dev_attr_temp30_label.dev_attr.attr,
+ &sensor_dev_attr_temp31_label.dev_attr.attr,
+ &sensor_dev_attr_temp32_label.dev_attr.attr,
+ &sensor_dev_attr_temp33_label.dev_attr.attr,
+ &sensor_dev_attr_temp34_label.dev_attr.attr,
+ &sensor_dev_attr_temp35_label.dev_attr.attr,
+ &sensor_dev_attr_temp36_label.dev_attr.attr,
+ &sensor_dev_attr_temp37_label.dev_attr.attr,
+ &sensor_dev_attr_temp38_label.dev_attr.attr,
+ &sensor_dev_attr_temp39_label.dev_attr.attr,
+ &sensor_dev_attr_temp40_label.dev_attr.attr,
+ NULL
+};
+
static struct attribute *temperature_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
@@ -1238,6 +1390,10 @@ static struct attribute *temperature_attributes[] = {
static const struct attribute_group temperature_attributes_group =
{ .attrs = temperature_attributes };
+static const struct attribute_group label_attributes_group = {
+ .attrs = label_attributes
+};
+
/* Module stuff */
/*
@@ -1360,6 +1516,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 0, .light = 0, .temperature_set = 17 },
/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
{ .accelerometer = 1, .light = 1, .temperature_set = 18 },
+/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 19 },
+/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 20 },
+/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 21 },
+/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
+ { .accelerometer = 1, .light = 1, .temperature_set = 22 },
};
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1373,6 +1537,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
&applesmc_dmi_data[7]},
+ { applesmc_dmi_match, "Apple MacBook Pro 7", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
+ &applesmc_dmi_data[22]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,4", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
+ &applesmc_dmi_data[20]},
+ { applesmc_dmi_match, "Apple MacBook Pro 5,3", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
+ &applesmc_dmi_data[19]},
+ { applesmc_dmi_match, "Apple MacBook Pro 6", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
+ &applesmc_dmi_data[21]},
{ applesmc_dmi_match, "Apple MacBook Pro 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
@@ -1492,45 +1672,31 @@ static int __init applesmc_init(void)
/* create fan files */
count = applesmc_get_fan_count();
- if (count < 0) {
+ if (count < 0)
printk(KERN_ERR "applesmc: Cannot get the number of fans.\n");
- } else {
+ else
printk(KERN_INFO "applesmc: %d fans found.\n", count);
- switch (count) {
- default:
- printk(KERN_WARNING "applesmc: More than 4 fans found,"
- " but at most 4 fans are supported"
- " by the driver.\n");
- case 4:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[3]);
- if (ret)
- goto out_key_enumeration;
- case 3:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[2]);
- if (ret)
- goto out_key_enumeration;
- case 2:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[1]);
- if (ret)
- goto out_key_enumeration;
- case 1:
- ret = sysfs_create_group(&pdev->dev.kobj,
- &fan_attribute_groups[0]);
- if (ret)
- goto out_fan_1;
- case 0:
- ;
- }
+ if (count > 4) {
+ count = 4;
+ printk(KERN_WARNING "applesmc: More than 4 fans found,"
+ " but at most 4 fans are supported"
+ " by the driver.\n");
+ }
+
+ while (fans_handled < count) {
+ ret = sysfs_create_group(&pdev->dev.kobj,
+ &fan_attribute_groups[fans_handled]);
+ if (ret)
+ goto out_fans;
+ fans_handled++;
}
for (i = 0;
temperature_sensors_sets[applesmc_temperature_set][i] != NULL;
i++) {
- if (temperature_attributes[i] == NULL) {
+ if (temperature_attributes[i] == NULL ||
+ label_attributes[i] == NULL) {
printk(KERN_ERR "applesmc: More temperature sensors "
"in temperature_sensors_sets (at least %i)"
"than available sysfs files in "
@@ -1542,6 +1708,10 @@ static int __init applesmc_init(void)
temperature_attributes[i]);
if (ret)
goto out_temperature;
+ ret = sysfs_create_file(&pdev->dev.kobj,
+ label_attributes[i]);
+ if (ret)
+ goto out_temperature;
}
if (applesmc_accelerometer) {
@@ -1592,11 +1762,12 @@ out_accelerometer:
if (applesmc_accelerometer)
applesmc_release_accelerometer();
out_temperature:
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]);
-out_fan_1:
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]);
-out_key_enumeration:
+out_fans:
+ while (fans_handled)
+ sysfs_remove_group(&pdev->dev.kobj,
+ &fan_attribute_groups[--fans_handled]);
sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
out_name:
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
@@ -1621,9 +1792,11 @@ static void __exit applesmc_exit(void)
}
if (applesmc_accelerometer)
applesmc_release_accelerometer();
+ sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]);
- sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]);
+ while (fans_handled)
+ sysfs_remove_group(&pdev->dev.kobj,
+ &fan_attribute_groups[--fans_handled]);
sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
platform_device_unregister(pdev);
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 7f948105d8ad..0f388adc6187 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -268,8 +268,11 @@ static ssize_t store_fan16(struct device *dev,
if (strict_strtol(buf, 10, &reqval))
return -EINVAL;
+ /* If a minimum RPM of zero is requested, then we set the register to
+ 0xffff. This value allows the fan to be stopped completely without
+ generating an alarm. */
reqval =
- (SENSORS_LIMIT((reqval) <= 0 ? 0 : 5400000 / (reqval), 0, 65534));
+ (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe));
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -285,8 +288,9 @@ static ssize_t store_fan16(struct device *dev,
* Voltages are scaled in the device so that the nominal voltage
* is 3/4ths of the 0-255 range (i.e. 192).
* If all voltages are 'normal' then all voltage registers will
- * read 0xC0. This doesn't help us if we don't have a point of refernce.
- * The data sheet however provides us with the full scale value for each
+ * read 0xC0.
+ *
+ * The data sheet provides us with the 3/4 scale value for each voltage
* which is stored in in_scaling. The sda->index parameter value provides
* the index into in_scaling.
*
@@ -295,7 +299,7 @@ static ssize_t store_fan16(struct device *dev,
*/
static int asc7621_in_scaling[] = {
- 3320, 3000, 4380, 6640, 16000
+ 2500, 2250, 3300, 5000, 12000
};
static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
@@ -306,19 +310,12 @@ static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
u8 nr = sda->index;
mutex_lock(&data->update_lock);
- regval = (data->reg[param->msb[0]] * asc7621_in_scaling[nr]) / 256;
-
- /* The LSB value is a 2-bit scaling of the MSB's LSbit value.
- * I.E. If the maximim voltage for this input is 6640 millivolts then
- * a MSB register value of 0 = 0mv and 255 = 6640mv.
- * A 1 step change therefore represents 25.9mv (6640 / 256).
- * The extra 2-bits therefore represent increments of 6.48mv.
- */
- regval += ((asc7621_in_scaling[nr] / 256) / 4) *
- (data->reg[param->lsb[0]] >> 6);
-
+ regval = (data->reg[param->msb[0]] << 8) | (data->reg[param->lsb[0]]);
mutex_unlock(&data->update_lock);
+ /* The LSB value is a 2-bit scaling of the MSB's LSbit value. */
+ regval = (regval >> 6) * asc7621_in_scaling[nr] / (0xc0 << 2);
+
return sprintf(buf, "%u\n", regval);
}
@@ -331,7 +328,7 @@ static ssize_t show_in8(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n",
((data->reg[param->msb[0]] *
- asc7621_in_scaling[nr]) / 256));
+ asc7621_in_scaling[nr]) / 0xc0));
}
static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
@@ -344,9 +341,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
if (strict_strtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, asc7621_in_scaling[nr]);
+ reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
+
+ reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
- reqval = (reqval * 255 + 128) / asc7621_in_scaling[nr];
+ reqval = SENSORS_LIMIT(reqval, 0, 0xff);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
@@ -846,11 +845,11 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8),
PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8),
- PREAD(in0_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 0, bitmask),
- PREAD(in1_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 1, bitmask),
- PREAD(in2_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 2, bitmask),
- PREAD(in3_alarm, 3, PRI_LOW, 0x41, 0, 0x01, 3, bitmask),
- PREAD(in4_alarm, 4, PRI_LOW, 0x42, 0, 0x01, 0, bitmask),
+ PREAD(in0_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 0, bitmask),
+ PREAD(in1_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 1, bitmask),
+ PREAD(in2_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 2, bitmask),
+ PREAD(in3_alarm, 3, PRI_HIGH, 0x41, 0, 0x01, 3, bitmask),
+ PREAD(in4_alarm, 4, PRI_HIGH, 0x42, 0, 0x01, 0, bitmask),
PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16),
PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16),
@@ -862,10 +861,10 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16),
PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16),
- PREAD(fan1_alarm, 0, PRI_LOW, 0x42, 0, 0x01, 0, bitmask),
- PREAD(fan2_alarm, 1, PRI_LOW, 0x42, 0, 0x01, 1, bitmask),
- PREAD(fan3_alarm, 2, PRI_LOW, 0x42, 0, 0x01, 2, bitmask),
- PREAD(fan4_alarm, 3, PRI_LOW, 0x42, 0, 0x01, 3, bitmask),
+ PREAD(fan1_alarm, 0, PRI_HIGH, 0x42, 0, 0x01, 2, bitmask),
+ PREAD(fan2_alarm, 1, PRI_HIGH, 0x42, 0, 0x01, 3, bitmask),
+ PREAD(fan3_alarm, 2, PRI_HIGH, 0x42, 0, 0x01, 4, bitmask),
+ PREAD(fan4_alarm, 3, PRI_HIGH, 0x42, 0, 0x01, 5, bitmask),
PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10),
PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10),
@@ -886,10 +885,10 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8),
PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8),
- PREAD(temp1_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 4, bitmask),
- PREAD(temp2_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 5, bitmask),
- PREAD(temp3_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 6, bitmask),
- PREAD(temp4_alarm, 3, PRI_LOW, 0x43, 0, 0x01, 0, bitmask),
+ PREAD(temp1_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 4, bitmask),
+ PREAD(temp2_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 5, bitmask),
+ PREAD(temp3_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 6, bitmask),
+ PREAD(temp4_alarm, 3, PRI_HIGH, 0x43, 0, 0x01, 0, bitmask),
PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask),
PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask),
@@ -898,7 +897,7 @@ static struct asc7621_param asc7621_params[] = {
PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask),
PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask),
- PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x64, 0, 0x01, 3, bitmask),
+ PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x63, 0, 0x01, 3, bitmask),
PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask),
PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st),
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 16c420240724..653db1bda934 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1411,6 +1411,13 @@ static int __init atk0110_init(void)
{
int ret;
+ /* Make sure it's safe to access the device through ACPI */
+ if (!acpi_resources_are_enforced()) {
+ pr_err("atk: Resources not safely usable due to "
+ "acpi_enforce_resources kernel parameter\n");
+ return -EBUSY;
+ }
+
ret = acpi_bus_register_driver(&atk_driver);
if (ret)
pr_info("atk: acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index e9b7fbc5a447..2988da150ed6 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -241,6 +241,55 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
return tjmax;
}
+static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
+ struct device *dev)
+{
+ /* The 100C is default for both mobile and non mobile CPUs */
+ int err;
+ u32 eax, edx;
+ u32 val;
+
+ /* A new feature of current Intel(R) processors, the
+ IA32_TEMPERATURE_TARGET contains the TjMax value */
+ err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+ if (err) {
+ dev_warn(dev, "Unable to read TjMax from CPU.\n");
+ } else {
+ val = (eax >> 16) & 0xff;
+ /*
+ * If the TjMax is not plausible, an assumption
+ * will be used
+ */
+ if ((val > 80) && (val < 120)) {
+ dev_info(dev, "TjMax is %d C.\n", val);
+ return val * 1000;
+ }
+ }
+
+ /*
+ * An assumption is made for early CPUs and unreadable MSR.
+ * NOTE: the given value may not be correct.
+ */
+
+ switch (c->x86_model) {
+ case 0xe:
+ case 0xf:
+ case 0x16:
+ case 0x1a:
+ dev_warn(dev, "TjMax is assumed as 100 C!\n");
+ return 100000;
+ break;
+ case 0x17:
+ case 0x1c: /* Atom CPUs */
+ return adjust_tjmax(c, id, dev);
+ break;
+ default:
+ dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
+ " using default TjMax of 100C.\n", c->x86_model);
+ return 100000;
+ }
+}
+
static int __devinit coretemp_probe(struct platform_device *pdev)
{
struct coretemp_data *data;
@@ -283,14 +332,18 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
}
}
- data->tjmax = adjust_tjmax(c, data->id, &pdev->dev);
+ data->tjmax = get_tjmax(c, data->id, &pdev->dev);
platform_set_drvdata(pdev, data);
- /* read the still undocumented IA32_TEMPERATURE_TARGET it exists
- on older CPUs but not in this register, Atoms don't have it either */
+ /*
+ * read the still undocumented IA32_TEMPERATURE_TARGET. It exists
+ * on older CPUs but not in this register,
+ * Atoms don't have it either.
+ */
if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) {
- err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx);
+ err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET,
+ &eax, &edx);
if (err) {
dev_warn(&pdev->dev, "Unable to read"
" IA32_TEMPERATURE_TARGET MSR\n");
@@ -451,28 +504,20 @@ static int __init coretemp_init(void)
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
+ /*
+ * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+ if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) {
+ err = coretemp_device_add(i);
+ if (err)
+ goto exit_devices_unreg;
- /* check if family 6, models 0xe (Pentium M DC),
- 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm),
- 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom),
- 0x1e (Lynnfield) */
- if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
- !((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
- (c->x86_model == 0x16) || (c->x86_model == 0x17) ||
- (c->x86_model == 0x1a) || (c->x86_model == 0x1c) ||
- (c->x86_model == 0x1e))) {
-
- /* supported CPU not found, but report the unknown
- family 6 CPU */
- if ((c->x86 == 0x6) && (c->x86_model > 0xf))
- printk(KERN_WARNING DRVNAME ": Unknown CPU "
- "model 0x%x\n", c->x86_model);
- continue;
+ } else {
+ printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
+ " has no thermal sensor.\n", c->x86_model);
}
-
- err = coretemp_device_add(i);
- if (err)
- goto exit_devices_unreg;
}
if (list_empty(&pdev_list)) {
err = -ENODEV;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 823dd28a902c..980c17d5eeae 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1,12 +1,14 @@
/*
- * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and
- * SCH5027 Super-I/O chips integrated hardware monitoring features.
- * Copyright (c) 2007, 2008 Juerg Haefliger <juergh@gmail.com>
+ * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x, SCH5027,
+ * and SCH5127 Super-I/O chips integrated hardware monitoring
+ * features.
+ * Copyright (c) 2007, 2008, 2009, 2010 Juerg Haefliger <juergh@gmail.com>
*
* This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access
* the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus
- * if a SCH311x chip is found. Both types of chips have very similar hardware
- * monitoring capabilities but differ in the way they can be accessed.
+ * if a SCH311x or SCH5127 chip is found. Both types of chips have very
+ * similar hardware monitoring capabilities but differ in the way they can be
+ * accessed.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,7 +59,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
/* Addresses to scan */
static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
-enum chips { dme1737, sch5027, sch311x };
+enum chips { dme1737, sch5027, sch311x, sch5127 };
/* ---------------------------------------------------------------------
* Registers
@@ -164,10 +166,29 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
#define DME1737_VERSTEP_MASK 0xf8
#define SCH311X_DEVICE 0x8c
#define SCH5027_VERSTEP 0x69
+#define SCH5127_DEVICE 0x8e
+
+/* Device ID values (global configuration register index 0x20) */
+#define DME1737_ID_1 0x77
+#define DME1737_ID_2 0x78
+#define SCH3112_ID 0x7c
+#define SCH3114_ID 0x7d
+#define SCH3116_ID 0x7f
+#define SCH5027_ID 0x89
+#define SCH5127_ID 0x86
/* Length of ISA address segment */
#define DME1737_EXTENT 2
+/* chip-dependent features */
+#define HAS_TEMP_OFFSET (1 << 0) /* bit 0 */
+#define HAS_VID (1 << 1) /* bit 1 */
+#define HAS_ZONE3 (1 << 2) /* bit 2 */
+#define HAS_ZONE_HYST (1 << 3) /* bit 3 */
+#define HAS_PWM_MIN (1 << 4) /* bit 4 */
+#define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */
+#define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */
+
/* ---------------------------------------------------------------------
* Data structures and manipulation thereof
* --------------------------------------------------------------------- */
@@ -187,8 +208,7 @@ struct dme1737_data {
u8 vid;
u8 pwm_rr_en;
- u8 has_pwm;
- u8 has_fan;
+ u32 has_features;
/* Register values */
u16 in[7];
@@ -224,8 +244,11 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300,
3300};
static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
3300};
+static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
+ 3300};
#define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \
(type) == sch5027 ? IN_NOMINAL_SCH5027 : \
+ (type) == sch5127 ? IN_NOMINAL_SCH5127 : \
IN_NOMINAL_DME1737)
/* Voltage input
@@ -568,7 +591,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* Sample register contents every 1 sec */
if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
data->vid = dme1737_read(data, DME1737_REG_VID) &
0x3f;
}
@@ -599,7 +622,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
DME1737_REG_TEMP_MIN(ix));
data->temp_max[ix] = dme1737_read(data,
DME1737_REG_TEMP_MAX(ix));
- if (data->type != sch5027) {
+ if (data->has_features & HAS_TEMP_OFFSET) {
data->temp_offset[ix] = dme1737_read(data,
DME1737_REG_TEMP_OFFSET(ix));
}
@@ -626,7 +649,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) {
/* Skip reading registers if optional fans are not
* present */
- if (!(data->has_fan & (1 << ix))) {
+ if (!(data->has_features & HAS_FAN(ix))) {
continue;
}
data->fan[ix] = dme1737_read(data,
@@ -650,7 +673,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) {
/* Skip reading registers if optional PWMs are not
* present */
- if (!(data->has_pwm & (1 << ix))) {
+ if (!(data->has_features & HAS_PWM(ix))) {
continue;
}
data->pwm[ix] = dme1737_read(data,
@@ -672,12 +695,24 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
/* Thermal zone registers */
for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) {
- data->zone_low[ix] = dme1737_read(data,
- DME1737_REG_ZONE_LOW(ix));
- data->zone_abs[ix] = dme1737_read(data,
- DME1737_REG_ZONE_ABS(ix));
+ /* Skip reading registers if zone3 is not present */
+ if ((ix == 2) && !(data->has_features & HAS_ZONE3)) {
+ continue;
+ }
+ /* sch5127 zone2 registers are special */
+ if ((ix == 1) && (data->type == sch5127)) {
+ data->zone_low[1] = dme1737_read(data,
+ DME1737_REG_ZONE_LOW(2));
+ data->zone_abs[1] = dme1737_read(data,
+ DME1737_REG_ZONE_ABS(2));
+ } else {
+ data->zone_low[ix] = dme1737_read(data,
+ DME1737_REG_ZONE_LOW(ix));
+ data->zone_abs[ix] = dme1737_read(data,
+ DME1737_REG_ZONE_ABS(ix));
+ }
}
- if (data->type != sch5027) {
+ if (data->has_features & HAS_ZONE_HYST) {
for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) {
data->zone_hyst[ix] = dme1737_read(data,
DME1737_REG_ZONE_HYST(ix));
@@ -1594,10 +1629,6 @@ static struct attribute *dme1737_attr[] ={
&sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
NULL
};
@@ -1605,27 +1636,23 @@ static const struct attribute_group dme1737_group = {
.attrs = dme1737_attr,
};
-/* The following struct holds misc attributes, which are not available in all
- * chips. Their creation depends on the chip type which is determined during
- * module load. */
-static struct attribute *dme1737_misc_attr[] = {
- /* Temperatures */
+/* The following struct holds temp offset attributes, which are not available
+ * in all chips. The following chips support them:
+ * DME1737, SCH311x */
+static struct attribute *dme1737_temp_offset_attr[] = {
&sensor_dev_attr_temp1_offset.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp3_offset.dev_attr.attr,
- /* Zones */
- &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
- &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
- &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
NULL
};
-static const struct attribute_group dme1737_misc_group = {
- .attrs = dme1737_misc_attr,
+static const struct attribute_group dme1737_temp_offset_group = {
+ .attrs = dme1737_temp_offset_attr,
};
-/* The following struct holds VID-related attributes. Their creation
- depends on the chip type which is determined during module load. */
+/* The following struct holds VID related attributes, which are not available
+ * in all chips. The following chips support them:
+ * DME1737 */
static struct attribute *dme1737_vid_attr[] = {
&dev_attr_vrm.attr,
&dev_attr_cpu0_vid.attr,
@@ -1636,6 +1663,36 @@ static const struct attribute_group dme1737_vid_group = {
.attrs = dme1737_vid_attr,
};
+/* The following struct holds temp zone 3 related attributes, which are not
+ * available in all chips. The following chips support them:
+ * DME1737, SCH311x, SCH5027 */
+static struct attribute *dme1737_zone3_attr[] = {
+ &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone3_group = {
+ .attrs = dme1737_zone3_attr,
+};
+
+
+/* The following struct holds temp zone hysteresis related attributes, which
+ * are not available in all chips. The following chips support them:
+ * DME1737, SCH311x */
+static struct attribute *dme1737_zone_hyst_attr[] = {
+ &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone_hyst_group = {
+ .attrs = dme1737_zone_hyst_attr,
+};
+
/* The following structs hold the PWM attributes, some of which are optional.
* Their creation depends on the chip configuration which is determined during
* module load. */
@@ -1691,10 +1748,10 @@ static const struct attribute_group dme1737_pwm_group[] = {
{ .attrs = dme1737_pwm6_attr },
};
-/* The following struct holds misc PWM attributes, which are not available in
- * all chips. Their creation depends on the chip type which is determined
+/* The following struct holds auto PWM min attributes, which are not available
+ * in all chips. Their creation depends on the chip type which is determined
* during module load. */
-static struct attribute *dme1737_pwm_misc_attr[] = {
+static struct attribute *dme1737_auto_pwm_min_attr[] = {
&sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
@@ -1764,14 +1821,25 @@ static struct attribute *dme1737_zone_chmod_attr[] = {
&sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_zone_chmod_group = {
+ .attrs = dme1737_zone_chmod_attr,
+};
+
+
+/* The permissions of the following zone 3 attributes are changed to read-
+ * writeable if the chip is *not* locked. Otherwise they stay read-only. */
+static struct attribute *dme1737_zone3_chmod_attr[] = {
&sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
NULL
};
-static const struct attribute_group dme1737_zone_chmod_group = {
- .attrs = dme1737_zone_chmod_attr,
+static const struct attribute_group dme1737_zone3_chmod_group = {
+ .attrs = dme1737_zone3_chmod_attr,
};
/* The permissions of the following PWM attributes are changed to read-
@@ -1887,30 +1955,35 @@ static void dme1737_remove_files(struct device *dev)
int ix;
for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
- if (data->has_fan & (1 << ix)) {
+ if (data->has_features & HAS_FAN(ix)) {
sysfs_remove_group(&dev->kobj,
&dme1737_fan_group[ix]);
}
}
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
sysfs_remove_group(&dev->kobj,
&dme1737_pwm_group[ix]);
- if (data->type != sch5027 && ix < 3) {
+ if ((data->has_features & HAS_PWM_MIN) && ix < 3) {
sysfs_remove_file(&dev->kobj,
- dme1737_pwm_misc_attr[ix]);
+ dme1737_auto_pwm_min_attr[ix]);
}
}
}
- if (data->type != sch5027) {
- sysfs_remove_group(&dev->kobj, &dme1737_misc_group);
+ if (data->has_features & HAS_TEMP_OFFSET) {
+ sysfs_remove_group(&dev->kobj, &dme1737_temp_offset_group);
}
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
sysfs_remove_group(&dev->kobj, &dme1737_vid_group);
}
-
+ if (data->has_features & HAS_ZONE3) {
+ sysfs_remove_group(&dev->kobj, &dme1737_zone3_group);
+ }
+ if (data->has_features & HAS_ZONE_HYST) {
+ sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
+ }
sysfs_remove_group(&dev->kobj, &dme1737_group);
if (!data->client) {
@@ -1934,23 +2007,31 @@ static int dme1737_create_files(struct device *dev)
goto exit_remove;
}
- /* Create misc sysfs attributes */
- if ((data->type != sch5027) &&
+ /* Create chip-dependent sysfs attributes */
+ if ((data->has_features & HAS_TEMP_OFFSET) &&
(err = sysfs_create_group(&dev->kobj,
- &dme1737_misc_group))) {
+ &dme1737_temp_offset_group))) {
goto exit_remove;
}
-
- /* Create VID-related sysfs attributes */
- if ((data->type == dme1737) &&
+ if ((data->has_features & HAS_VID) &&
(err = sysfs_create_group(&dev->kobj,
&dme1737_vid_group))) {
goto exit_remove;
}
+ if ((data->has_features & HAS_ZONE3) &&
+ (err = sysfs_create_group(&dev->kobj,
+ &dme1737_zone3_group))) {
+ goto exit_remove;
+ }
+ if ((data->has_features & HAS_ZONE_HYST) &&
+ (err = sysfs_create_group(&dev->kobj,
+ &dme1737_zone_hyst_group))) {
+ goto exit_remove;
+ }
/* Create fan sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
- if (data->has_fan & (1 << ix)) {
+ if (data->has_features & HAS_FAN(ix)) {
if ((err = sysfs_create_group(&dev->kobj,
&dme1737_fan_group[ix]))) {
goto exit_remove;
@@ -1960,14 +2041,14 @@ static int dme1737_create_files(struct device *dev)
/* Create PWM sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
if ((err = sysfs_create_group(&dev->kobj,
&dme1737_pwm_group[ix]))) {
goto exit_remove;
}
- if (data->type != sch5027 && ix < 3 &&
+ if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
(err = sysfs_create_file(&dev->kobj,
- dme1737_pwm_misc_attr[ix]))) {
+ dme1737_auto_pwm_min_attr[ix]))) {
goto exit_remove;
}
}
@@ -1983,21 +2064,30 @@ static int dme1737_create_files(struct device *dev)
dme1737_chmod_group(dev, &dme1737_zone_chmod_group,
S_IRUGO | S_IWUSR);
- /* Change permissions of misc sysfs attributes */
- if (data->type != sch5027) {
- dme1737_chmod_group(dev, &dme1737_misc_group,
+ /* Change permissions of chip-dependent sysfs attributes */
+ if (data->has_features & HAS_TEMP_OFFSET) {
+ dme1737_chmod_group(dev, &dme1737_temp_offset_group,
+ S_IRUGO | S_IWUSR);
+ }
+ if (data->has_features & HAS_ZONE3) {
+ dme1737_chmod_group(dev, &dme1737_zone3_chmod_group,
+ S_IRUGO | S_IWUSR);
+ }
+ if (data->has_features & HAS_ZONE_HYST) {
+ dme1737_chmod_group(dev, &dme1737_zone_hyst_group,
S_IRUGO | S_IWUSR);
}
/* Change permissions of PWM sysfs attributes */
for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) {
- if (data->has_pwm & (1 << ix)) {
+ if (data->has_features & HAS_PWM(ix)) {
dme1737_chmod_group(dev,
&dme1737_pwm_chmod_group[ix],
S_IRUGO | S_IWUSR);
- if (data->type != sch5027 && ix < 3) {
+ if ((data->has_features & HAS_PWM_MIN) &&
+ ix < 3) {
dme1737_chmod_file(dev,
- dme1737_pwm_misc_attr[ix],
+ dme1737_auto_pwm_min_attr[ix],
S_IRUGO | S_IWUSR);
}
}
@@ -2005,7 +2095,7 @@ static int dme1737_create_files(struct device *dev)
/* Change permissions of pwm[1-3] if in manual mode */
for (ix = 0; ix < 3; ix++) {
- if ((data->has_pwm & (1 << ix)) &&
+ if ((data->has_features & HAS_PWM(ix)) &&
(PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) {
dme1737_chmod_file(dev,
dme1737_pwm_chmod_attr[ix],
@@ -2052,20 +2142,20 @@ static int dme1737_init_device(struct device *dev)
return -EFAULT;
}
- /* Determine which optional fan and pwm features are enabled/present */
+ /* Determine which optional fan and pwm features are enabled (only
+ * valid for I2C devices) */
if (client) { /* I2C chip */
data->config2 = dme1737_read(data, DME1737_REG_CONFIG2);
/* Check if optional fan3 input is enabled */
if (data->config2 & 0x04) {
- data->has_fan |= (1 << 2);
+ data->has_features |= HAS_FAN(2);
}
/* Fan4 and pwm3 are only available if the client's I2C address
* is the default 0x2e. Otherwise the I/Os associated with
* these functions are used for addr enable/select. */
if (client->addr == 0x2e) {
- data->has_fan |= (1 << 3);
- data->has_pwm |= (1 << 2);
+ data->has_features |= HAS_FAN(3) | HAS_PWM(2);
}
/* Determine which of the optional fan[5-6] and pwm[5-6]
@@ -2077,26 +2167,40 @@ static int dme1737_init_device(struct device *dev)
dev_warn(dev, "Failed to query Super-IO for optional "
"features.\n");
}
- } else { /* ISA chip */
- /* Fan3 and pwm3 are always available. Fan[4-5] and pwm[5-6]
- * don't exist in the ISA chip. */
- data->has_fan |= (1 << 2);
- data->has_pwm |= (1 << 2);
}
- /* Fan1, fan2, pwm1, and pwm2 are always present */
- data->has_fan |= 0x03;
- data->has_pwm |= 0x03;
+ /* Fan[1-2] and pwm[1-2] are present in all chips */
+ data->has_features |= HAS_FAN(0) | HAS_FAN(1) | HAS_PWM(0) | HAS_PWM(1);
+
+ /* Chip-dependent features */
+ switch (data->type) {
+ case dme1737:
+ data->has_features |= HAS_TEMP_OFFSET | HAS_VID | HAS_ZONE3 |
+ HAS_ZONE_HYST | HAS_PWM_MIN;
+ break;
+ case sch311x:
+ data->has_features |= HAS_TEMP_OFFSET | HAS_ZONE3 |
+ HAS_ZONE_HYST | HAS_PWM_MIN | HAS_FAN(2) | HAS_PWM(2);
+ break;
+ case sch5027:
+ data->has_features |= HAS_ZONE3;
+ break;
+ case sch5127:
+ data->has_features |= HAS_FAN(2) | HAS_PWM(2);
+ break;
+ default:
+ break;
+ }
dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, "
"fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n",
- (data->has_pwm & (1 << 2)) ? "yes" : "no",
- (data->has_pwm & (1 << 4)) ? "yes" : "no",
- (data->has_pwm & (1 << 5)) ? "yes" : "no",
- (data->has_fan & (1 << 2)) ? "yes" : "no",
- (data->has_fan & (1 << 3)) ? "yes" : "no",
- (data->has_fan & (1 << 4)) ? "yes" : "no",
- (data->has_fan & (1 << 5)) ? "yes" : "no");
+ (data->has_features & HAS_PWM(2)) ? "yes" : "no",
+ (data->has_features & HAS_PWM(4)) ? "yes" : "no",
+ (data->has_features & HAS_PWM(5)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(2)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(3)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(4)) ? "yes" : "no",
+ (data->has_features & HAS_FAN(5)) ? "yes" : "no");
reg = dme1737_read(data, DME1737_REG_TACH_PWM);
/* Inform if fan-to-pwm mapping differs from the default */
@@ -2122,7 +2226,7 @@ static int dme1737_init_device(struct device *dev)
for (ix = 0; ix < 3; ix++) {
data->pwm_config[ix] = dme1737_read(data,
DME1737_REG_PWM_CONFIG(ix));
- if ((data->has_pwm & (1 << ix)) &&
+ if ((data->has_features & HAS_PWM(ix)) &&
(PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) {
dev_info(dev, "Switching pwm%d to "
"manual mode.\n", ix + 1);
@@ -2142,7 +2246,7 @@ static int dme1737_init_device(struct device *dev)
data->pwm_acz[2] = 4; /* pwm3 -> zone3 */
/* Set VRM */
- if (data->type == dme1737) {
+ if (data->has_features & HAS_VID) {
data->vrm = vid_which_vrm();
}
@@ -2163,10 +2267,10 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
dme1737_sio_enter(sio_cip);
/* Check device ID
- * The DME1737 can return either 0x78 or 0x77 as its device ID.
- * The SCH5027 returns 0x89 as its device ID. */
+ * We currently know about two kinds of DME1737 and SCH5027. */
reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
- if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) {
+ if (!(reg == DME1737_ID_1 || reg == DME1737_ID_2 ||
+ reg == SCH5027_ID)) {
err = -ENODEV;
goto exit;
}
@@ -2185,16 +2289,16 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
* are enabled and available. Bits [3:2] of registers 0x43-0x46 are set
* to '10' if the respective feature is enabled. */
if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */
- data->has_fan |= (1 << 5);
+ data->has_features |= HAS_FAN(5);
}
if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */
- data->has_pwm |= (1 << 5);
+ data->has_features |= HAS_PWM(5);
}
if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */
- data->has_fan |= (1 << 4);
+ data->has_features |= HAS_FAN(4);
}
if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */
- data->has_pwm |= (1 << 4);
+ data->has_features |= HAS_PWM(4);
}
exit:
@@ -2222,7 +2326,6 @@ static int dme1737_i2c_detect(struct i2c_client *client,
if (company == DME1737_COMPANY_SMSC &&
verstep == SCH5027_VERSTEP) {
name = "sch5027";
-
} else if (company == DME1737_COMPANY_SMSC &&
(verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) {
name = "dme1737";
@@ -2329,10 +2432,10 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr)
dme1737_sio_enter(sio_cip);
/* Check device ID
- * We currently know about SCH3112 (0x7c), SCH3114 (0x7d), and
- * SCH3116 (0x7f). */
+ * We currently know about SCH3112, SCH3114, SCH3116, and SCH5127 */
reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
- if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) {
+ if (!(reg == SCH3112_ID || reg == SCH3114_ID || reg == SCH3116_ID ||
+ reg == SCH5127_ID)) {
err = -ENODEV;
goto exit;
}
@@ -2424,23 +2527,42 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
/* Skip chip detection if module is loaded with force_id parameter */
- if (!force_id) {
+ switch (force_id) {
+ case SCH3112_ID:
+ case SCH3114_ID:
+ case SCH3116_ID:
+ data->type = sch311x;
+ break;
+ case SCH5127_ID:
+ data->type = sch5127;
+ break;
+ default:
company = dme1737_read(data, DME1737_REG_COMPANY);
device = dme1737_read(data, DME1737_REG_DEVICE);
- if (!((company == DME1737_COMPANY_SMSC) &&
- (device == SCH311X_DEVICE))) {
+ if ((company == DME1737_COMPANY_SMSC) &&
+ (device == SCH311X_DEVICE)) {
+ data->type = sch311x;
+ } else if ((company == DME1737_COMPANY_SMSC) &&
+ (device == SCH5127_DEVICE)) {
+ data->type = sch5127;
+ } else {
err = -ENODEV;
goto exit_kfree;
}
}
- data->type = sch311x;
- /* Fill in the remaining client fields and initialize the mutex */
- data->name = "sch311x";
+ if (data->type == sch5127) {
+ data->name = "sch5127";
+ } else {
+ data->name = "sch311x";
+ }
+
+ /* Initialize the mutex */
mutex_init(&data->update_lock);
- dev_info(dev, "Found a SCH311x chip at 0x%04x\n", data->addr);
+ dev_info(dev, "Found a %s chip at 0x%04x\n",
+ data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
/* Initialize the chip */
if ((err = dme1737_init_device(dev))) {
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
new file mode 100644
index 000000000000..0e4b5642638d
--- /dev/null
+++ b/drivers/hwmon/emc1403.c
@@ -0,0 +1,344 @@
+/*
+ * emc1403.c - SMSC Thermal Driver
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * TODO
+ * - cache alarm and critical limit registers
+ * - add emc1404 support
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+
+#define THERMAL_PID_REG 0xfd
+#define THERMAL_SMSC_ID_REG 0xfe
+#define THERMAL_REVISION_REG 0xff
+
+struct thermal_data {
+ struct device *hwmon_dev;
+ struct mutex mutex;
+ /* Cache the hyst value so we don't keep re-reading it. In theory
+ we could cache it forever as nobody else should be writing it. */
+ u8 cached_hyst;
+ unsigned long hyst_valid;
+};
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->index);
+
+ if (retval < 0)
+ return retval;
+ return sprintf(buf, "%d000\n", retval);
+}
+
+static ssize_t show_bit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+ int retval = i2c_smbus_read_byte_data(client, sda->nr);
+
+ if (retval < 0)
+ return retval;
+ retval &= sda->index;
+ return sprintf(buf, "%d\n", retval ? 1 : 0);
+}
+
+static ssize_t store_temp(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned long val;
+ int retval;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+ retval = i2c_smbus_write_byte_data(client, sda->index,
+ DIV_ROUND_CLOSEST(val, 1000));
+ if (retval < 0)
+ return retval;
+ return count;
+}
+
+static ssize_t show_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ return retval;
+
+ if (time_after(jiffies, data->hyst_valid)) {
+ hyst = i2c_smbus_read_byte_data(client, 0x21);
+ if (hyst < 0)
+ return retval;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+ return sprintf(buf, "%d000\n", retval - data->cached_hyst);
+}
+
+static ssize_t store_hyst(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct thermal_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ int retval;
+ int hyst;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ retval = i2c_smbus_read_byte_data(client, sda->index);
+ if (retval < 0)
+ goto fail;
+
+ hyst = val - retval * 1000;
+ hyst = DIV_ROUND_CLOSEST(hyst, 1000);
+ if (hyst < 0 || hyst > 255) {
+ retval = -ERANGE;
+ goto fail;
+ }
+
+ retval = i2c_smbus_write_byte_data(client, 0x21, hyst);
+ if (retval == 0) {
+ retval = count;
+ data->cached_hyst = hyst;
+ data->hyst_valid = jiffies + HZ;
+ }
+fail:
+ mutex_unlock(&data->mutex);
+ return retval;
+}
+
+/*
+ * Sensors. We pass the actual i2c register to the methods.
+ */
+
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x06);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x05);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x20);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00);
+static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x01);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x20);
+
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x08);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x07);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x19);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01);
+static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x02);
+static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x02);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x19);
+
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x16);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x15);
+static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0x1A);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23);
+static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO,
+ show_bit, NULL, 0x36, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO,
+ show_bit, NULL, 0x35, 0x04);
+static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
+ show_bit, NULL, 0x37, 0x04);
+static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
+ show_hyst, store_hyst, 0x1A);
+
+static struct attribute *mid_att_thermal[] = {
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group m_thermal_gr = {
+ .attrs = mid_att_thermal
+};
+
+static int emc1403_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ int id;
+ /* Check if thermal chip is SMSC and EMC1403 */
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
+ if (id != 0x5d)
+ return -ENODEV;
+
+ /* Note: 0x25 is the 1404 which is very similar and this
+ driver could be extended */
+ id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+ if (id != 0x21)
+ return -ENODEV;
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
+ if (id != 0x01)
+ return -ENODEV;
+
+ strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int emc1403_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int res;
+ struct thermal_data *data;
+
+ data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_warn(&client->dev, "out of memory");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->mutex);
+ data->hyst_valid = jiffies - 1; /* Expired */
+
+ res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
+ if (res) {
+ dev_warn(&client->dev, "create group failed\n");
+ hwmon_device_unregister(data->hwmon_dev);
+ goto thermal_error1;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ res = PTR_ERR(data->hwmon_dev);
+ dev_warn(&client->dev, "register hwmon dev failed\n");
+ goto thermal_error2;
+ }
+ dev_info(&client->dev, "EMC1403 Thermal chip found\n");
+ return res;
+
+thermal_error2:
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+thermal_error1:
+ kfree(data);
+ return res;
+}
+
+static int emc1403_remove(struct i2c_client *client)
+{
+ struct thermal_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+ kfree(data);
+ return 0;
+}
+
+static const unsigned short emc1403_address_list[] = {
+ 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+};
+
+static const struct i2c_device_id emc1403_idtable[] = {
+ { "emc1403", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
+
+static struct i2c_driver sensor_emc1403 = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "emc1403",
+ },
+ .detect = emc1403_detect,
+ .probe = emc1403_probe,
+ .remove = emc1403_remove,
+ .id_table = emc1403_idtable,
+ .address_list = emc1403_address_list,
+};
+
+static int __init sensor_emc1403_init(void)
+{
+ return i2c_add_driver(&sensor_emc1403);
+}
+
+static void __exit sensor_emc1403_exit(void)
+{
+ i2c_del_driver(&sensor_emc1403);
+}
+
+module_init(sensor_emc1403_init);
+module_exit(sensor_emc1403_exit);
+
+MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
+MODULE_DESCRIPTION("emc1403 Thermal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a95fa4256caa..537841ef44b9 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -856,21 +856,19 @@ static inline int superio_inb(int base, int reg)
static int superio_inw(int base, int reg)
{
int val;
- outb(reg++, base);
- val = inb(base + 1) << 8;
- outb(reg, base);
- val |= inb(base + 1);
+ val = superio_inb(base, reg) << 8;
+ val |= superio_inb(base, reg + 1);
return val;
}
static inline void superio_enter(int base)
{
/* according to the datasheet the key must be send twice! */
- outb( SIO_UNLOCK_KEY, base);
- outb( SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
+ outb(SIO_UNLOCK_KEY, base);
}
-static inline void superio_select( int base, int ld)
+static inline void superio_select(int base, int ld)
{
outb(SIO_REG_LDSEL, base);
outb(ld, base + 1);
@@ -905,10 +903,8 @@ static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
{
u16 val;
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- val = inb(data->addr + DATA_REG_OFFSET) << 8;
- outb(reg, data->addr + ADDR_REG_OFFSET);
- val |= inb(data->addr + DATA_REG_OFFSET);
+ val = f71882fg_read8(data, reg) << 8;
+ val |= f71882fg_read8(data, reg + 1);
return val;
}
@@ -921,10 +917,8 @@ static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
{
- outb(reg++, data->addr + ADDR_REG_OFFSET);
- outb(val >> 8, data->addr + DATA_REG_OFFSET);
- outb(reg, data->addr + ADDR_REG_OFFSET);
- outb(val & 255, data->addr + DATA_REG_OFFSET);
+ f71882fg_write8(data, reg, val >> 8);
+ f71882fg_write8(data, reg + 1, val & 0xff);
}
static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
@@ -945,7 +939,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
mutex_lock(&data->update_lock);
/* Update once every 60 seconds */
- if ( time_after(jiffies, data->last_limits + 60 * HZ ) ||
+ if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
if (data->type == f71882fg || data->type == f71889fg) {
data->in1_max =
@@ -1127,8 +1121,12 @@ static ssize_t store_fan_full_speed(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
val = SENSORS_LIMIT(val, 23, 1500000);
val = fan_to_reg(val);
@@ -1157,8 +1155,12 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
@@ -1206,7 +1208,14 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- long val = simple_strtol(buf, NULL, 10) / 8;
+ int err;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 8;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1233,8 +1242,12 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
@@ -1299,8 +1312,14 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1333,10 +1352,16 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
ssize_t ret = count;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
@@ -1372,8 +1397,14 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1427,8 +1458,12 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
@@ -1490,8 +1525,13 @@ static ssize_t store_pwm(struct device *dev,
size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1551,8 +1591,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
*devattr, const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
/* Special case for F8000 pwm channel 3 which only does auto mode */
if (data->type == f8000 && nr == 2 && val != 2)
@@ -1626,9 +1670,14 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
@@ -1674,10 +1723,16 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
u8 reg;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
mutex_lock(&data->update_lock);
data->pwm_auto_point_temp[nr][point] =
@@ -1716,8 +1771,12 @@ static ssize_t store_pwm_interpolate(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->pwm_auto_point_mapping[nr] =
@@ -1752,8 +1811,12 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- long val = simple_strtol(buf, NULL, 10);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
switch (val) {
case 1:
@@ -1798,9 +1861,15 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
const char *buf, size_t count)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
int point = to_sensor_dev_attr_2(devattr)->nr;
- long val = simple_strtol(buf, NULL, 10) / 1000;
+ long val;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
if (data->type == f71889fg)
val = SENSORS_LIMIT(val, -128, 127);
@@ -2109,6 +2178,13 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
int err = -ENODEV;
u16 devid;
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_region(sioaddr, 2, DRVNAME)) {
+ printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
+ (int)sioaddr);
+ return -EBUSY;
+ }
+
superio_enter(sioaddr);
devid = superio_inw(sioaddr, SIO_REG_MANID);
@@ -2151,8 +2227,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
}
*address = superio_inw(sioaddr, SIO_REG_ADDR);
- if (*address == 0)
- {
+ if (*address == 0) {
printk(KERN_WARNING DRVNAME ": Base address not set\n");
goto exit;
}
@@ -2164,6 +2239,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
(int)superio_inb(sioaddr, SIO_REG_DEVREV));
exit:
superio_exit(sioaddr);
+ release_region(sioaddr, 2);
return err;
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 0627f7a5b9b8..b7ca2a9676cf 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -38,6 +38,7 @@
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/smp_lock.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
@@ -847,8 +848,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
return count;
}
-static int watchdog_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
static struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
@@ -858,6 +858,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
int i, ret = 0;
struct fschmd_data *data = filp->private_data;
+ lock_kernel();
switch (cmd) {
case WDIOC_GETSUPPORT:
ident.firmware_version = data->revision;
@@ -914,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
default:
ret = -ENOTTY;
}
-
+ unlock_kernel();
return ret;
}
@@ -924,7 +925,7 @@ static const struct file_operations watchdog_fops = {
.open = watchdog_open,
.release = watchdog_release,
.write = watchdog_write,
- .ioctl = watchdog_ioctl,
+ .unlocked_ioctl = watchdog_ioctl,
};
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index c8ab50516672..7580f55e67e3 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -328,8 +328,8 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
lis3lv02d_joystick_disable();
lis3lv02d_poweroff(&lis3_dev);
- flush_work(&hpled_led.work);
led_classdev_unregister(&hpled_led.led_classdev);
+ flush_work(&hpled_led.work);
return lis3lv02d_remove_fs(&lis3_dev);
}
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index b2f2277cad3c..6138f036b159 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -41,6 +41,8 @@
/* joystick device poll interval in milliseconds */
#define MDPS_POLL_INTERVAL 50
+#define MDPS_POLL_MIN 0
+#define MDPS_POLL_MAX 2000
/*
* The sensor can also generate interrupts (DRDY) but it's pretty pointless
* because they are generated even if the data do not change. So it's better
@@ -121,11 +123,9 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
int position[3];
int i;
- mutex_lock(&lis3->mutex);
position[0] = lis3->read_data(lis3, OUTX);
position[1] = lis3->read_data(lis3, OUTY);
position[2] = lis3->read_data(lis3, OUTZ);
- mutex_unlock(&lis3->mutex);
for (i = 0; i < 3; i++)
position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
@@ -249,8 +249,24 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+{
+ int x, y, z;
+
+ mutex_lock(&lis3_dev.mutex);
+ lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ input_report_abs(pidev->input, ABS_X, x);
+ input_report_abs(pidev->input, ABS_Y, y);
+ input_report_abs(pidev->input, ABS_Z, z);
+ input_sync(pidev->input);
+ mutex_unlock(&lis3_dev.mutex);
+}
+
static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
{
+ if (!test_bit(0, &lis3_dev.misc_opened))
+ goto out;
+
/*
* Be careful: on some HP laptops the bios force DD when on battery and
* the lid is closed. This leads to interrupts as soon as a little move
@@ -260,44 +276,93 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
wake_up_interruptible(&lis3_dev.misc_wait);
kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
+out:
+ if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
+ lis3_dev.idev->input->users)
+ return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
-static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
- int ret;
+ struct input_dev *dev = lis3->idev->input;
+ u8 click_src;
- if (test_and_set_bit(0, &lis3_dev.misc_opened))
- return -EBUSY; /* already open */
+ mutex_lock(&lis3->mutex);
+ lis3->read(lis3, CLICK_SRC, &click_src);
- atomic_set(&lis3_dev.count, 0);
+ if (click_src & CLICK_SINGLE_X) {
+ input_report_key(dev, lis3->mapped_btns[0], 1);
+ input_report_key(dev, lis3->mapped_btns[0], 0);
+ }
- /*
- * The sensor can generate interrupts for free-fall and direction
- * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
- * the things simple and _fast_ we activate it only for free-fall, so
- * no need to read register (very slow with ACPI). For the same reason,
- * we forbid shared interrupts.
- *
- * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
- * io-apic is not configurable (and generates a warning) but I keep it
- * in case of support for other hardware.
- */
- ret = request_irq(lis3_dev.irq, lis302dl_interrupt, IRQF_TRIGGER_RISING,
- DRIVER_NAME, &lis3_dev);
+ if (click_src & CLICK_SINGLE_Y) {
+ input_report_key(dev, lis3->mapped_btns[1], 1);
+ input_report_key(dev, lis3->mapped_btns[1], 0);
+ }
- if (ret) {
- clear_bit(0, &lis3_dev.misc_opened);
- printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
- return -EBUSY;
+ if (click_src & CLICK_SINGLE_Z) {
+ input_report_key(dev, lis3->mapped_btns[2], 1);
+ input_report_key(dev, lis3->mapped_btns[2], 0);
}
+ input_sync(dev);
+ mutex_unlock(&lis3->mutex);
+}
+
+static void lis302dl_interrupt_handle_ff_wu(struct lis3lv02d *lis3)
+{
+ u8 wu1_src;
+ u8 wu2_src;
+
+ lis3->read(lis3, FF_WU_SRC_1, &wu1_src);
+ lis3->read(lis3, FF_WU_SRC_2, &wu2_src);
+
+ wu1_src = wu1_src & FF_WU_SRC_IA ? wu1_src : 0;
+ wu2_src = wu2_src & FF_WU_SRC_IA ? wu2_src : 0;
+
+ /* joystick poll is internally protected by the lis3->mutex. */
+ if (wu1_src || wu2_src)
+ lis3lv02d_joystick_poll(lis3_dev.idev);
+}
+
+static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
+{
+
+ struct lis3lv02d *lis3 = data;
+
+ if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
+{
+
+ struct lis3lv02d *lis3 = data;
+
+ if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
+ lis302dl_interrupt_handle_click(lis3);
+ else
+ lis302dl_interrupt_handle_ff_wu(lis3);
+
+ return IRQ_HANDLED;
+}
+
+static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &lis3_dev.misc_opened))
+ return -EBUSY; /* already open */
+
+ atomic_set(&lis3_dev.count, 0);
return 0;
}
static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
{
fasync_helper(-1, file, 0, &lis3_dev.async_queue);
- free_irq(lis3_dev.irq, &lis3_dev);
clear_bit(0, &lis3_dev.misc_opened); /* release the device */
return 0;
}
@@ -380,22 +445,12 @@ static struct miscdevice lis3lv02d_misc_device = {
.fops = &lis3lv02d_misc_fops,
};
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
-{
- int x, y, z;
-
- lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x);
- input_report_abs(pidev->input, ABS_Y, y);
- input_report_abs(pidev->input, ABS_Z, z);
- input_sync(pidev->input);
-}
-
int lis3lv02d_joystick_enable(void)
{
struct input_dev *input_dev;
int err;
int max_val, fuzz, flat;
+ int btns[] = {BTN_X, BTN_Y, BTN_Z};
if (lis3_dev.idev)
return -EINVAL;
@@ -406,6 +461,8 @@ int lis3lv02d_joystick_enable(void)
lis3_dev.idev->poll = lis3lv02d_joystick_poll;
lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
+ lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
+ lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
input_dev = lis3_dev.idev->input;
input_dev->name = "ST LIS3LV02DL Accelerometer";
@@ -422,6 +479,10 @@ int lis3lv02d_joystick_enable(void)
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+ lis3_dev.mapped_btns[0] = lis3lv02d_get_axis(abs(lis3_dev.ac.x), btns);
+ lis3_dev.mapped_btns[1] = lis3lv02d_get_axis(abs(lis3_dev.ac.y), btns);
+ lis3_dev.mapped_btns[2] = lis3lv02d_get_axis(abs(lis3_dev.ac.z), btns);
+
err = input_register_polled_device(lis3_dev.idev);
if (err) {
input_free_polled_device(lis3_dev.idev);
@@ -434,6 +495,11 @@ EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
void lis3lv02d_joystick_disable(void)
{
+ if (lis3_dev.irq)
+ free_irq(lis3_dev.irq, &lis3_dev);
+ if (lis3_dev.pdata && lis3_dev.pdata->irq2)
+ free_irq(lis3_dev.pdata->irq2, &lis3_dev);
+
if (!lis3_dev.idev)
return;
@@ -462,7 +528,9 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
{
int x, y, z;
+ mutex_lock(&lis3_dev.mutex);
lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+ mutex_unlock(&lis3_dev.mutex);
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
@@ -521,12 +589,70 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
+static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
+ struct lis3lv02d_platform_data *p)
+{
+ int err;
+ int ctrl2 = p->hipass_ctrl;
+
+ if (p->click_flags) {
+ dev->write(dev, CLICK_CFG, p->click_flags);
+ dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
+ dev->write(dev, CLICK_LATENCY, p->click_latency);
+ dev->write(dev, CLICK_WINDOW, p->click_window);
+ dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
+ dev->write(dev, CLICK_THSY_X,
+ (p->click_thresh_x & 0xf) |
+ (p->click_thresh_y << 4));
+
+ if (dev->idev) {
+ struct input_dev *input_dev = lis3_dev.idev->input;
+ input_set_capability(input_dev, EV_KEY, BTN_X);
+ input_set_capability(input_dev, EV_KEY, BTN_Y);
+ input_set_capability(input_dev, EV_KEY, BTN_Z);
+ }
+ }
+
+ if (p->wakeup_flags) {
+ dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
+ dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
+ /* default to 2.5ms for now */
+ dev->write(dev, FF_WU_DURATION_1, 1);
+ ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
+ }
+
+ if (p->wakeup_flags2) {
+ dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2);
+ dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
+ /* default to 2.5ms for now */
+ dev->write(dev, FF_WU_DURATION_2, 1);
+ ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
+ }
+ /* Configure hipass filters */
+ dev->write(dev, CTRL_REG2, ctrl2);
+
+ if (p->irq2) {
+ err = request_threaded_irq(p->irq2,
+ NULL,
+ lis302dl_interrupt_thread2_8b,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ DRIVER_NAME, &lis3_dev);
+ if (err < 0)
+ printk(KERN_ERR DRIVER_NAME
+ "No second IRQ. Limited functionality\n");
+ }
+}
+
/*
* Initialise the accelerometer and the various subsystems.
* Should be rather independent of the bus system.
*/
int lis3lv02d_init_device(struct lis3lv02d *dev)
{
+ int err;
+ irq_handler_t thread_fn;
+
dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
switch (dev->whoami) {
@@ -567,25 +693,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
if (dev->pdata) {
struct lis3lv02d_platform_data *p = dev->pdata;
- if (p->click_flags && (dev->whoami == WAI_8B)) {
- dev->write(dev, CLICK_CFG, p->click_flags);
- dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
- dev->write(dev, CLICK_LATENCY, p->click_latency);
- dev->write(dev, CLICK_WINDOW, p->click_window);
- dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
- dev->write(dev, CLICK_THSY_X,
- (p->click_thresh_x & 0xf) |
- (p->click_thresh_y << 4));
- }
-
- if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
- dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
- dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
- /* default to 2.5ms for now */
- dev->write(dev, FF_WU_DURATION_1, 1);
- /* enable high pass filter for both free-fall units */
- dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2);
- }
+ if (dev->whoami == WAI_8B)
+ lis3lv02d_8b_configure(dev, p);
if (p->irq_cfg)
dev->write(dev, CTRL_REG3, p->irq_cfg);
@@ -598,6 +707,32 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
goto out;
}
+ /*
+ * The sensor can generate interrupts for free-fall and direction
+ * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
+ * the things simple and _fast_ we activate it only for free-fall, so
+ * no need to read register (very slow with ACPI). For the same reason,
+ * we forbid shared interrupts.
+ *
+ * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
+ * io-apic is not configurable (and generates a warning) but I keep it
+ * in case of support for other hardware.
+ */
+ if (dev->whoami == WAI_8B)
+ thread_fn = lis302dl_interrupt_thread1_8b;
+ else
+ thread_fn = NULL;
+
+ err = request_threaded_irq(dev->irq, lis302dl_interrupt,
+ thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ DRIVER_NAME, &lis3_dev);
+
+ if (err < 0) {
+ printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n");
+ goto out;
+ }
+
if (misc_register(&lis3lv02d_misc_device))
printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
out:
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index e6a01f44709b..854091380e33 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -196,6 +196,16 @@ enum lis3lv02d_dd_src {
DD_SRC_IA = 0x40,
};
+enum lis3lv02d_click_src_8b {
+ CLICK_SINGLE_X = 0x01,
+ CLICK_DOUBLE_X = 0x02,
+ CLICK_SINGLE_Y = 0x04,
+ CLICK_DOUBLE_Y = 0x08,
+ CLICK_SINGLE_Z = 0x10,
+ CLICK_DOUBLE_Z = 0x20,
+ CLICK_IA = 0x40,
+};
+
struct axis_conversion {
s8 x;
s8 y;
@@ -223,6 +233,7 @@ struct lis3lv02d {
struct platform_device *pdev; /* platform device */
atomic_t count; /* interrupt count after last read */
struct axis_conversion ac; /* hw -> logical axis */
+ int mapped_btns[3];
u32 irq; /* IRQ number */
struct fasync_struct *async_queue; /* queue for the misc device */
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index bf81aff7051d..776aeb3019d2 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -53,7 +53,7 @@
* Address is fully defined internally and cannot be changed.
*/
-static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
+static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
/*
* The LM63 registers
@@ -131,12 +131,15 @@ static struct lm63_data *lm63_update_device(struct device *dev);
static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
+enum chips { lm63, lm64 };
+
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm63_id[] = {
- { "lm63", 0 },
+ { "lm63", lm63 },
+ { "lm64", lm64 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -422,6 +425,7 @@ static int lm63_detect(struct i2c_client *new_client,
struct i2c_adapter *adapter = new_client->adapter;
u8 man_id, chip_id, reg_config1, reg_config2;
u8 reg_alert_status, reg_alert_mask;
+ int address = new_client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -439,7 +443,6 @@ static int lm63_detect(struct i2c_client *new_client,
LM63_REG_ALERT_MASK);
if (man_id != 0x01 /* National Semiconductor */
- || chip_id != 0x41 /* LM63 */
|| (reg_config1 & 0x18) != 0x00
|| (reg_config2 & 0xF8) != 0x00
|| (reg_alert_status & 0x20) != 0x00
@@ -450,7 +453,12 @@ static int lm63_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ if (chip_id == 0x41 && address == 0x4c)
+ strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
+ strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ else
+ return -ENODEV;
return 0;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 8ae2cfe2d827..56463428a419 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -46,6 +46,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tcn75,
tmp100,
tmp101,
+ tmp105,
tmp175,
tmp275,
tmp75,
@@ -220,6 +221,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tcn75", tcn75, },
{ "tmp100", tmp100, },
{ "tmp101", tmp101, },
+ { "tmp105", tmp105, },
{ "tmp175", tmp175, },
{ "tmp275", tmp275, },
{ "tmp75", tmp75, },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 7cc2708871ab..760ef72eea56 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -982,7 +982,8 @@ static struct lm90_data *lm90_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10)
+ || !data->valid) {
u8 h, l;
dev_dbg(&client->dev, "Updating lm90 data.\n");
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 65c232a9d0c5..21d201befc2c 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -45,9 +45,7 @@ enum ltc4245_cmd {
LTC4245_VEEIN = 0x19,
LTC4245_VEESENSE = 0x1a,
LTC4245_VEEOUT = 0x1b,
- LTC4245_GPIOADC1 = 0x1c,
- LTC4245_GPIOADC2 = 0x1d,
- LTC4245_GPIOADC3 = 0x1e,
+ LTC4245_GPIOADC = 0x1c,
};
struct ltc4245_data {
@@ -61,7 +59,7 @@ struct ltc4245_data {
u8 cregs[0x08];
/* Voltage registers */
- u8 vregs[0x0f];
+ u8 vregs[0x0d];
};
static struct ltc4245_data *ltc4245_update_device(struct device *dev)
@@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
data->cregs[i] = val;
}
- /* Read voltage registers -- 0x10 to 0x1f */
+ /* Read voltage registers -- 0x10 to 0x1c */
for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
val = i2c_smbus_read_byte_data(client, i+0x10);
if (unlikely(val < 0))
@@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
case LTC4245_VEEOUT:
voltage = regval * -55;
break;
- case LTC4245_GPIOADC1:
- case LTC4245_GPIOADC2:
- case LTC4245_GPIOADC3:
+ case LTC4245_GPIOADC:
voltage = regval * 10;
break;
default:
@@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
/* GPIO voltages */
-LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
-LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
-LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
+LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
/* Power Consumption (virtual) */
LTC4245_POWER(power1_input, LTC4245_12VSENSE);
@@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
&sensor_dev_attr_in8_min_alarm.dev_attr.attr,
&sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in11_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power2_input.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
new file mode 100644
index 000000000000..8013895a1faf
--- /dev/null
+++ b/drivers/hwmon/tmp102.c
@@ -0,0 +1,321 @@
+/* Texas Instruments TMP102 SMBus temperature sensor driver
+ *
+ * Copyright (C) 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+#define DRIVER_NAME "tmp102"
+
+#define TMP102_TEMP_REG 0x00
+#define TMP102_CONF_REG 0x01
+/* note: these bit definitions are byte swapped */
+#define TMP102_CONF_SD 0x0100
+#define TMP102_CONF_TM 0x0200
+#define TMP102_CONF_POL 0x0400
+#define TMP102_CONF_F0 0x0800
+#define TMP102_CONF_F1 0x1000
+#define TMP102_CONF_R0 0x2000
+#define TMP102_CONF_R1 0x4000
+#define TMP102_CONF_OS 0x8000
+#define TMP102_CONF_EM 0x0010
+#define TMP102_CONF_AL 0x0020
+#define TMP102_CONF_CR0 0x0040
+#define TMP102_CONF_CR1 0x0080
+#define TMP102_TLOW_REG 0x02
+#define TMP102_THIGH_REG 0x03
+
+struct tmp102 {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ u16 config_orig;
+ unsigned long last_update;
+ int temp[3];
+};
+
+/* SMBus specifies low byte first, but the TMP102 returns high byte first,
+ * so we have to swab16 the values */
+static inline int tmp102_read_reg(struct i2c_client *client, u8 reg)
+{
+ int result = i2c_smbus_read_word_data(client, reg);
+ return result < 0 ? result : swab16(result);
+}
+
+static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, swab16(val));
+}
+
+/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
+static inline int tmp102_reg_to_mC(s16 val)
+{
+ return ((val & ~0x01) * 1000) / 128;
+}
+
+/* convert milliCelsius to left adjusted 13-bit TMP102 register value */
+static inline u16 tmp102_mC_to_reg(int val)
+{
+ return (val * 128) / 1000;
+}
+
+static const u8 tmp102_reg[] = {
+ TMP102_TEMP_REG,
+ TMP102_TLOW_REG,
+ TMP102_THIGH_REG,
+};
+
+static struct tmp102 *tmp102_update_device(struct i2c_client *client)
+{
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+
+ mutex_lock(&tmp102->lock);
+ if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
+ int status = tmp102_read_reg(client, tmp102_reg[i]);
+ if (status > -1)
+ tmp102->temp[i] = tmp102_reg_to_mC(status);
+ }
+ tmp102->last_update = jiffies;
+ }
+ mutex_unlock(&tmp102->lock);
+ return tmp102;
+}
+
+static ssize_t tmp102_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev));
+
+ return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
+}
+
+static ssize_t tmp102_set_temp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+ long val;
+ int status;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+ val = SENSORS_LIMIT(val, -256000, 255000);
+
+ mutex_lock(&tmp102->lock);
+ tmp102->temp[sda->index] = val;
+ status = tmp102_write_reg(client, tmp102_reg[sda->index],
+ tmp102_mC_to_reg(val));
+ mutex_unlock(&tmp102->lock);
+ return status ? : count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
+
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
+ tmp102_set_temp, 1);
+
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
+ tmp102_set_temp, 2);
+
+static struct attribute *tmp102_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tmp102_attr_group = {
+ .attrs = tmp102_attributes,
+};
+
+#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
+#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
+
+static int __devinit tmp102_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tmp102 *tmp102;
+ int status;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "adapter doesnt support SMBus word "
+ "transactions\n");
+ return -ENODEV;
+ }
+
+ tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL);
+ if (!tmp102) {
+ dev_dbg(&client->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, tmp102);
+
+ status = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (status < 0) {
+ dev_err(&client->dev, "error reading config register\n");
+ goto fail_free;
+ }
+ tmp102->config_orig = status;
+ status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG);
+ if (status < 0) {
+ dev_err(&client->dev, "error writing config register\n");
+ goto fail_restore_config;
+ }
+ status = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (status < 0) {
+ dev_err(&client->dev, "error reading config register\n");
+ goto fail_restore_config;
+ }
+ status &= ~TMP102_CONFIG_RD_ONLY;
+ if (status != TMP102_CONFIG) {
+ dev_err(&client->dev, "config settings did not stick\n");
+ status = -ENODEV;
+ goto fail_restore_config;
+ }
+ tmp102->last_update = jiffies - HZ;
+ mutex_init(&tmp102->lock);
+
+ status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group);
+ if (status) {
+ dev_dbg(&client->dev, "could not create sysfs files\n");
+ goto fail_restore_config;
+ }
+ tmp102->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(tmp102->hwmon_dev)) {
+ dev_dbg(&client->dev, "unable to register hwmon device\n");
+ status = PTR_ERR(tmp102->hwmon_dev);
+ goto fail_remove_sysfs;
+ }
+
+ dev_info(&client->dev, "initialized\n");
+
+ return 0;
+
+fail_remove_sysfs:
+ sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
+fail_restore_config:
+ tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig);
+fail_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(tmp102);
+
+ return status;
+}
+
+static int __devexit tmp102_remove(struct i2c_client *client)
+{
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(tmp102->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
+
+ /* Stop monitoring if device was stopped originally */
+ if (tmp102->config_orig & TMP102_CONF_SD) {
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config >= 0)
+ tmp102_write_reg(client, TMP102_CONF_REG,
+ config | TMP102_CONF_SD);
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(tmp102);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tmp102_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config < 0)
+ return config;
+
+ config |= TMP102_CONF_SD;
+ return tmp102_write_reg(client, TMP102_CONF_REG, config);
+}
+
+static int tmp102_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int config;
+
+ config = tmp102_read_reg(client, TMP102_CONF_REG);
+ if (config < 0)
+ return config;
+
+ config &= ~TMP102_CONF_SD;
+ return tmp102_write_reg(client, TMP102_CONF_REG, config);
+}
+
+static const struct dev_pm_ops tmp102_dev_pm_ops = {
+ .suspend = tmp102_suspend,
+ .resume = tmp102_resume,
+};
+
+#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
+#else
+#define TMP102_DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id tmp102_id[] = {
+ { "tmp102", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp102_id);
+
+static struct i2c_driver tmp102_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.pm = TMP102_DEV_PM_OPS,
+ .probe = tmp102_probe,
+ .remove = __devexit_p(tmp102_remove),
+ .id_table = tmp102_id,
+};
+
+static int __init tmp102_init(void)
+{
+ return i2c_add_driver(&tmp102_driver);
+}
+module_init(tmp102_init);
+
+static void __exit tmp102_exit(void)
+{
+ i2c_del_driver(&tmp102_driver);
+}
+module_exit(tmp102_exit);
+
+MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index d14a1af9f550..ad8d535235c5 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -92,17 +92,6 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
#define TMP411_DEVICE_ID 0x12
/*
- * Functions declarations
- */
-
-static int tmp401_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int tmp401_detect(struct i2c_client *client,
- struct i2c_board_info *info);
-static int tmp401_remove(struct i2c_client *client);
-static struct tmp401_data *tmp401_update_device(struct device *dev);
-
-/*
* Driver data (common to all clients)
*/
@@ -113,18 +102,6 @@ static const struct i2c_device_id tmp401_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
-static struct i2c_driver tmp401_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "tmp401",
- },
- .probe = tmp401_probe,
- .remove = tmp401_remove,
- .id_table = tmp401_id,
- .detect = tmp401_detect,
- .address_list = normal_i2c,
-};
-
/*
* Client data (each client gets its own)
*/
@@ -194,6 +171,71 @@ static u8 tmp401_crit_temp_to_register(long temp, u8 config)
return (temp + 500) / 1000;
}
+static struct tmp401_data *tmp401_update_device_reg16(
+ struct i2c_client *client, struct tmp401_data *data)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ /*
+ * High byte must be read first immediately followed
+ * by the low byte
+ */
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_MSB[i]) << 8;
+ data->temp[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LSB[i]);
+ data->temp_low[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
+ data->temp_low[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_LOW_LIMIT_LSB[i]);
+ data->temp_high[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
+ data->temp_high[i] |= i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_HIGH_LIMIT_LSB[i]);
+ data->temp_crit[i] = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_LIMIT[i]);
+
+ if (data->kind == tmp411) {
+ data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
+ TMP411_TEMP_LOWEST_MSB[i]) << 8;
+ data->temp_lowest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_LOWEST_LSB[i]);
+
+ data->temp_highest[i] = i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
+ data->temp_highest[i] |= i2c_smbus_read_byte_data(
+ client, TMP411_TEMP_HIGHEST_LSB[i]);
+ }
+ }
+ return data;
+}
+
+static struct tmp401_data *tmp401_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tmp401_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
+ data->config = i2c_smbus_read_byte_data(client,
+ TMP401_CONFIG_READ);
+ tmp401_update_device_reg16(client, data);
+
+ data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
+ TMP401_TEMP_CRIT_HYST);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
static ssize_t show_temp_value(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -420,30 +462,36 @@ static ssize_t reset_temp_history(struct device *dev,
}
static struct sensor_device_attribute tmp401_attr[] = {
- SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
- SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0),
- SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0),
- SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0),
- SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst,
+ SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0),
+ SENSOR_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 0),
+ SENSOR_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 0),
+ SENSOR_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 0),
+ SENSOR_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_crit_hyst,
store_temp_crit_hyst, 0),
- SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_LOW),
- SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_HIGH),
- SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_LOCAL_CRIT),
- SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
- SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1),
- SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1),
- SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1),
- SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1),
- SENSOR_ATTR(temp2_fault, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1),
+ SENSOR_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
+ store_temp_min, 1),
+ SENSOR_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ store_temp_max, 1),
+ SENSOR_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ store_temp_crit, 1),
+ SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1),
+ SENSOR_ATTR(temp2_fault, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_OPEN),
- SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_LOW),
- SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_HIGH),
- SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL,
+ SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_status, NULL,
TMP401_STATUS_REMOTE_CRIT),
};
@@ -455,11 +503,11 @@ static struct sensor_device_attribute tmp401_attr[] = {
* and remote channels.
*/
static struct sensor_device_attribute tmp411_attr[] = {
- SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0),
- SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0),
- SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1),
- SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1),
- SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0),
+ SENSOR_ATTR(temp1_highest, S_IRUGO, show_temp_highest, NULL, 0),
+ SENSOR_ATTR(temp1_lowest, S_IRUGO, show_temp_lowest, NULL, 0),
+ SENSOR_ATTR(temp2_highest, S_IRUGO, show_temp_highest, NULL, 1),
+ SENSOR_ATTR(temp2_lowest, S_IRUGO, show_temp_lowest, NULL, 1),
+ SENSOR_ATTR(temp_reset_history, S_IWUSR, NULL, reset_temp_history, 0),
};
/*
@@ -529,6 +577,27 @@ static int tmp401_detect(struct i2c_client *client,
return 0;
}
+static int tmp401_remove(struct i2c_client *client)
+{
+ struct tmp401_data *data = i2c_get_clientdata(client);
+ int i;
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
+ device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
+
+ if (data->kind == tmp411) {
+ for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
+ device_remove_file(&client->dev,
+ &tmp411_attr[i].dev_attr);
+ }
+
+ kfree(data);
+ return 0;
+}
+
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -581,91 +650,17 @@ exit_remove:
return err;
}
-static int tmp401_remove(struct i2c_client *client)
-{
- struct tmp401_data *data = i2c_get_clientdata(client);
- int i;
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
- device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
-
- if (data->kind == tmp411) {
- for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
- device_remove_file(&client->dev,
- &tmp411_attr[i].dev_attr);
- }
-
- kfree(data);
- return 0;
-}
-
-static struct tmp401_data *tmp401_update_device_reg16(
- struct i2c_client *client, struct tmp401_data *data)
-{
- int i;
-
- for (i = 0; i < 2; i++) {
- /*
- * High byte must be read first immediately followed
- * by the low byte
- */
- data->temp[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_MSB[i]) << 8;
- data->temp[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LSB[i]);
- data->temp_low[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
- data->temp_low[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_LOW_LIMIT_LSB[i]);
- data->temp_high[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
- data->temp_high[i] |= i2c_smbus_read_byte_data(client,
- TMP401_TEMP_HIGH_LIMIT_LSB[i]);
- data->temp_crit[i] = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_LIMIT[i]);
-
- if (data->kind == tmp411) {
- data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
- TMP411_TEMP_LOWEST_MSB[i]) << 8;
- data->temp_lowest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_LOWEST_LSB[i]);
-
- data->temp_highest[i] = i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
- data->temp_highest[i] |= i2c_smbus_read_byte_data(
- client, TMP411_TEMP_HIGHEST_LSB[i]);
- }
- }
- return data;
-}
-
-static struct tmp401_data *tmp401_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tmp401_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
- data->config = i2c_smbus_read_byte_data(client,
- TMP401_CONFIG_READ);
- tmp401_update_device_reg16(client, data);
-
- data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
- TMP401_TEMP_CRIT_HYST);
-
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
+static struct i2c_driver tmp401_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tmp401",
+ },
+ .probe = tmp401_probe,
+ .remove = tmp401_remove,
+ .id_table = tmp401_id,
+ .detect = tmp401_detect,
+ .address_list = normal_i2c,
+};
static int __init tmp401_init(void)
{
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 68e90abeba96..5da5942cf970 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -300,8 +300,11 @@ static const struct of_device_id env_match[] = {
MODULE_DEVICE_TABLE(of, env_match);
static struct of_platform_driver env_driver = {
- .name = "ultra45_env",
- .match_table = env_match,
+ .driver = {
+ .name = "ultra45_env",
+ .owner = THIS_MODULE,
+ .of_match_table = env_match,
+ },
.probe = env_probe,
.remove = __devexit_p(env_remove),
};
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 612807d97155..697202e27891 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
+#include <linux/smp_lock.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
@@ -1319,8 +1320,8 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
return count;
}
-static int watchdog_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long watchdog_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
static struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
@@ -1332,6 +1333,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
int val, ret = 0;
struct w83793_data *data = filp->private_data;
+ lock_kernel();
switch (cmd) {
case WDIOC_GETSUPPORT:
if (!nowayout)
@@ -1385,7 +1387,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
default:
ret = -ENOTTY;
}
-
+ unlock_kernel();
return ret;
}
@@ -1395,7 +1397,7 @@ static const struct file_operations watchdog_fops = {
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
- .ioctl = watchdog_ioctl,
+ .unlocked_ioctl = watchdog_ioctl,
};
/*
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index dcdaf8e675bf..2b9a8f54bb2c 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -109,13 +109,13 @@ static void pca_stop(struct i2c_algo_pca_data *adap)
* returns after the address has been sent
*/
static int pca_address(struct i2c_algo_pca_data *adap,
- struct i2c_msg *msg)
+ struct i2c_msg *msg)
{
int sta = pca_get_con(adap);
int addr;
- addr = ( (0x7f & msg->addr) << 1 );
- if (msg->flags & I2C_M_RD )
+ addr = ((0x7f & msg->addr) << 1);
+ if (msg->flags & I2C_M_RD)
addr |= 1;
DEB2("=== SLAVE ADDRESS %#04x+%c=%#04x\n",
msg->addr, msg->flags & I2C_M_RD ? 'R' : 'W', addr);
@@ -134,7 +134,7 @@ static int pca_address(struct i2c_algo_pca_data *adap,
* Returns after the byte has been transmitted
*/
static int pca_tx_byte(struct i2c_algo_pca_data *adap,
- __u8 b)
+ __u8 b)
{
int sta = pca_get_con(adap);
DEB2("=== WRITE %#04x\n", b);
@@ -164,13 +164,13 @@ static void pca_rx_byte(struct i2c_algo_pca_data *adap,
* Returns after next byte has arrived.
*/
static int pca_rx_ack(struct i2c_algo_pca_data *adap,
- int ack)
+ int ack)
{
int sta = pca_get_con(adap);
sta &= ~(I2C_PCA_CON_STO|I2C_PCA_CON_STA|I2C_PCA_CON_SI|I2C_PCA_CON_AA);
- if ( ack )
+ if (ack)
sta |= I2C_PCA_CON_AA;
pca_set_con(adap, sta);
@@ -178,12 +178,12 @@ static int pca_rx_ack(struct i2c_algo_pca_data *adap,
}
static int pca_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs,
- int num)
+ struct i2c_msg *msgs,
+ int num)
{
- struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
- struct i2c_msg *msg = NULL;
- int curmsg;
+ struct i2c_algo_pca_data *adap = i2c_adap->algo_data;
+ struct i2c_msg *msg = NULL;
+ int curmsg;
int numbytes = 0;
int state;
int ret;
@@ -202,21 +202,21 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
DEB1("{{{ XFER %d messages\n", num);
- if (i2c_debug>=2) {
+ if (i2c_debug >= 2) {
for (curmsg = 0; curmsg < num; curmsg++) {
int addr, i;
msg = &msgs[curmsg];
addr = (0x7f & msg->addr) ;
- if (msg->flags & I2C_M_RD )
+ if (msg->flags & I2C_M_RD)
printk(KERN_INFO " [%02d] RD %d bytes from %#02x [%#02x, ...]\n",
- curmsg, msg->len, addr, (addr<<1) | 1);
+ curmsg, msg->len, addr, (addr << 1) | 1);
else {
printk(KERN_INFO " [%02d] WR %d bytes to %#02x [%#02x%s",
- curmsg, msg->len, addr, addr<<1,
+ curmsg, msg->len, addr, addr << 1,
msg->len == 0 ? "" : ", ");
- for(i=0; i < msg->len; i++)
+ for (i = 0; i < msg->len; i++)
printk("%#04x%s", msg->buf[i], i == msg->len - 1 ? "" : ", ");
printk("]\n");
}
@@ -305,7 +305,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
goto out;
case 0x58: /* Data byte has been received; NOT ACK has been returned */
- if ( numbytes == msg->len - 1 ) {
+ if (numbytes == msg->len - 1) {
pca_rx_byte(adap, &msg->buf[numbytes], 0);
curmsg++; numbytes = 0;
if (curmsg == num)
@@ -352,7 +352,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
static u32 pca_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm pca_algo = {
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9c6170cd9aac..87ab0568bb0e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -564,7 +564,7 @@ config I2C_STU300
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
- depends on ARCH_VERSATILE || ARCH_REALVIEW
+ depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
select I2C_ALGOBIT
help
Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index bd8f1e4d9e6c..906a3ca50db6 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -60,7 +60,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI1535 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 659f63f5e4af..b14f6d68221d 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -67,7 +67,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* ALI15X3 SMBus address offsets */
#define SMBHSTSTS (0 + ali15x3_smba)
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index c5a9fa488e7f..03bcd07c4697 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -43,7 +43,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* AMD756 SMBus address offsets */
#define SMB_ADDR_OFFSET 0xE0
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 2fbef27b6cd6..af1e5e254b7b 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -18,7 +18,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Vojtech Pavlik <vojtech@suse.cz>");
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 06e1ecb4919f..305c07504f7e 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -23,8 +23,7 @@
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <mach/at91_twi.h>
#include <mach/board.h>
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index f1e14dd590c9..fb26e5c67515 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -25,8 +25,6 @@
#include <asm/portmux.h>
#include <asm/irq.h>
-#define POLL_TIMEOUT (2 * HZ)
-
/* SMBus mode*/
#define TWI_I2C_MODE_STANDARD 1
#define TWI_I2C_MODE_STANDARDSUB 2
@@ -44,8 +42,6 @@ struct bfin_twi_iface {
int cur_mode;
int manual_stop;
int result;
- int timeout_count;
- struct timer_list timeout_timer;
struct i2c_adapter adap;
struct completion complete;
struct i2c_msg *pmsg;
@@ -85,14 +81,15 @@ static const u16 pin_req[2][3] = {
{P_TWI1_SCL, P_TWI1_SDA, 0},
};
-static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
+static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
+ unsigned short twi_int_status)
{
- unsigned short twi_int_status = read_INT_STAT(iface);
unsigned short mast_stat = read_MASTER_STAT(iface);
if (twi_int_status & XMTSERV) {
/* Transmit next data */
if (iface->writeNum > 0) {
+ SSYNC();
write_XMT_DATA8(iface, *(iface->transPtr++));
iface->writeNum--;
}
@@ -114,10 +111,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) | RSTART) & ~MDIR);
}
- SSYNC();
- /* Clear status */
- write_INT_STAT(iface, XMTSERV);
- SSYNC();
}
if (twi_int_status & RCVSERV) {
if (iface->readNum > 0) {
@@ -139,7 +132,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
} else if (iface->manual_stop) {
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) | STOP);
- SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg + 1 < iface->msg_num) {
if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
@@ -148,44 +140,37 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
else
write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) | RSTART) & ~MDIR);
- SSYNC();
}
- /* Clear interrupt source */
- write_INT_STAT(iface, RCVSERV);
- SSYNC();
}
if (twi_int_status & MERR) {
- write_INT_STAT(iface, MERR);
write_INT_MASK(iface, 0);
write_MASTER_STAT(iface, 0x3e);
write_MASTER_CTL(iface, 0);
- SSYNC();
iface->result = -EIO;
- /* if both err and complete int stats are set, return proper
- * results.
+
+ if (mast_stat & LOSTARB)
+ dev_dbg(&iface->adap.dev, "Lost Arbitration\n");
+ if (mast_stat & ANAK)
+ dev_dbg(&iface->adap.dev, "Address Not Acknowledged\n");
+ if (mast_stat & DNAK)
+ dev_dbg(&iface->adap.dev, "Data Not Acknowledged\n");
+ if (mast_stat & BUFRDERR)
+ dev_dbg(&iface->adap.dev, "Buffer Read Error\n");
+ if (mast_stat & BUFWRERR)
+ dev_dbg(&iface->adap.dev, "Buffer Write Error\n");
+
+ /* If it is a quick transfer, only address without data,
+ * not an err, return 1.
*/
- if (twi_int_status & MCOMP) {
- write_INT_STAT(iface, MCOMP);
- write_INT_MASK(iface, 0);
- write_MASTER_CTL(iface, 0);
- SSYNC();
- /* If it is a quick transfer, only address bug no data,
- * not an err, return 1.
- */
- if (iface->writeNum == 0 && (mast_stat & BUFRDERR))
- iface->result = 1;
- /* If address not acknowledged return -1,
- * else return 0.
- */
- else if (!(mast_stat & ANAK))
- iface->result = 0;
- }
+ if (iface->cur_mode == TWI_I2C_MODE_STANDARD &&
+ iface->transPtr == NULL &&
+ (twi_int_status & MCOMP) && (mast_stat & DNAK))
+ iface->result = 1;
+
complete(&iface->complete);
return;
}
if (twi_int_status & MCOMP) {
- write_INT_STAT(iface, MCOMP);
- SSYNC();
if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
if (iface->readNum == 0) {
/* set the read number to 1 and ask for manual
@@ -207,7 +192,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- SSYNC();
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
iface->cur_msg+1 < iface->msg_num) {
iface->cur_msg++;
@@ -226,7 +210,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
write_XMT_DATA8(iface,
*(iface->transPtr++));
iface->writeNum--;
- SSYNC();
}
}
@@ -244,15 +227,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
/* remove restart bit and enable master receive */
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
- SSYNC();
} else {
iface->result = 1;
write_INT_MASK(iface, 0);
write_MASTER_CTL(iface, 0);
- SSYNC();
- complete(&iface->complete);
}
}
+ complete(&iface->complete);
}
/* Interrupt handler */
@@ -260,38 +241,26 @@ static irqreturn_t bfin_twi_interrupt_entry(int irq, void *dev_id)
{
struct bfin_twi_iface *iface = dev_id;
unsigned long flags;
+ unsigned short twi_int_status;
spin_lock_irqsave(&iface->lock, flags);
- del_timer(&iface->timeout_timer);
- bfin_twi_handle_interrupt(iface);
- spin_unlock_irqrestore(&iface->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void bfin_twi_timeout(unsigned long data)
-{
- struct bfin_twi_iface *iface = (struct bfin_twi_iface *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&iface->lock, flags);
- bfin_twi_handle_interrupt(iface);
- if (iface->result == 0) {
- iface->timeout_count--;
- if (iface->timeout_count > 0) {
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
- } else {
- iface->result = -1;
- complete(&iface->complete);
- }
+ while (1) {
+ twi_int_status = read_INT_STAT(iface);
+ if (!twi_int_status)
+ break;
+ /* Clear interrupt status */
+ write_INT_STAT(iface, twi_int_status);
+ bfin_twi_handle_interrupt(iface, twi_int_status);
+ SSYNC();
}
spin_unlock_irqrestore(&iface->lock, flags);
+ return IRQ_HANDLED;
}
/*
- * Generic i2c master transfer entrypoint
+ * One i2c master transfer
*/
-static int bfin_twi_master_xfer(struct i2c_adapter *adap,
+static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct bfin_twi_iface *iface = adap->algo_data;
@@ -319,7 +288,6 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
iface->transPtr = pmsg->buf;
iface->writeNum = iface->readNum = pmsg->len;
iface->result = 0;
- iface->timeout_count = 10;
init_completion(&(iface->complete));
/* Set Transmit device address */
write_MASTER_ADDR(iface, pmsg->addr);
@@ -358,30 +326,41 @@ static int bfin_twi_master_xfer(struct i2c_adapter *adap,
iface->manual_stop = 1;
}
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-
/* Master enable */
write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
SSYNC();
- wait_for_completion(&iface->complete);
-
- rc = iface->result;
+ while (!iface->result) {
+ if (!wait_for_completion_timeout(&iface->complete,
+ adap->timeout)) {
+ iface->result = -1;
+ dev_err(&adap->dev, "master transfer timeout\n");
+ }
+ }
- if (rc == 1)
- return num;
+ if (iface->result == 1)
+ rc = iface->cur_msg + 1;
else
- return rc;
+ rc = iface->result;
+
+ return rc;
}
/*
- * SMBus type transfer entrypoint
+ * Generic i2c master transfer entrypoint
*/
+static int bfin_twi_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ return bfin_twi_do_master_xfer(adap, msgs, num);
+}
-int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+/*
+ * One I2C SMBus transfer
+ */
+int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
@@ -469,7 +448,6 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
iface->manual_stop = 0;
iface->read_write = read_write;
iface->command = command;
- iface->timeout_count = 10;
init_completion(&(iface->complete));
/* FIFO Initiation. Data in FIFO should be discarded before
@@ -486,9 +464,6 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
write_MASTER_ADDR(iface, addr);
SSYNC();
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-
switch (iface->cur_mode) {
case TWI_I2C_MODE_STANDARDSUB:
write_XMT_DATA8(iface, iface->command);
@@ -550,10 +525,8 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
else if (iface->readNum > 255) {
write_MASTER_CTL(iface, 0xff << 6);
iface->manual_stop = 1;
- } else {
- del_timer(&iface->timeout_timer);
+ } else
break;
- }
}
}
write_INT_MASK(iface, MCOMP | MERR |
@@ -569,7 +542,13 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
SSYNC();
- wait_for_completion(&iface->complete);
+ while (!iface->result) {
+ if (!wait_for_completion_timeout(&iface->complete,
+ adap->timeout)) {
+ iface->result = -1;
+ dev_err(&adap->dev, "smbus transfer timeout\n");
+ }
+ }
rc = (iface->result >= 0) ? 0 : -1;
@@ -577,6 +556,17 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
}
/*
+ * Generic I2C SMBus transfer entrypoint
+ */
+int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data)
+{
+ return bfin_twi_do_smbus_xfer(adap, addr, flags,
+ read_write, command, size, data);
+}
+
+/*
* Return what the adapter supports
*/
static u32 bfin_twi_functionality(struct i2c_adapter *adap)
@@ -667,10 +657,6 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
goto out_error_no_irq;
}
- init_timer(&(iface->timeout_timer));
- iface->timeout_timer.function = bfin_twi_timeout;
- iface->timeout_timer.data = (unsigned long)iface;
-
p_adap = &iface->adap;
p_adap->nr = pdev->id;
strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
@@ -678,6 +664,8 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
p_adap->algo_data = iface;
p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
p_adap->dev.parent = &pdev->dev;
+ p_adap->timeout = 5 * HZ;
+ p_adap->retries = 3;
rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi");
if (rc) {
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 9c2e10082b79..b02b4533651d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -440,8 +440,8 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
init_waitqueue_head(&cpm->i2c_wait);
- cpm->irq = of_irq_to_resource(ofdev->node, 0, NULL);
- if (cpm->irq == NO_IRQ)
+ cpm->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
+ if (!cpm->irq)
return -EINVAL;
/* Install interrupt handler. */
@@ -451,13 +451,13 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
return ret;
/* I2C parameter RAM */
- i2c_base = of_iomap(ofdev->node, 1);
+ i2c_base = of_iomap(ofdev->dev.of_node, 1);
if (i2c_base == NULL) {
ret = -EINVAL;
goto out_irq;
}
- if (of_device_is_compatible(ofdev->node, "fsl,cpm1-i2c")) {
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm1-i2c")) {
/* Check for and use a microcode relocation patch. */
cpm->i2c_ram = i2c_base;
@@ -474,7 +474,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
cpm->version = 1;
- } else if (of_device_is_compatible(ofdev->node, "fsl,cpm2-i2c")) {
+ } else if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm2-i2c")) {
cpm->i2c_addr = cpm_muram_alloc(sizeof(struct i2c_ram), 64);
cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr);
out_be16(i2c_base, cpm->i2c_addr);
@@ -489,24 +489,24 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
}
/* I2C control/status registers */
- cpm->i2c_reg = of_iomap(ofdev->node, 0);
+ cpm->i2c_reg = of_iomap(ofdev->dev.of_node, 0);
if (cpm->i2c_reg == NULL) {
ret = -EINVAL;
goto out_ram;
}
- data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4) {
ret = -EINVAL;
goto out_reg;
}
cpm->cp_command = *data;
- data = of_get_property(ofdev->node, "linux,i2c-class", &len);
+ data = of_get_property(ofdev->dev.of_node, "linux,i2c-class", &len);
if (data && len == 4)
cpm->adap.class = *data;
- data = of_get_property(ofdev->node, "clock-frequency", &len);
+ data = of_get_property(ofdev->dev.of_node, "clock-frequency", &len);
if (data && len == 4)
cpm->freq = *data;
else
@@ -661,7 +661,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
/* register new adapter to i2c module... */
- data = of_get_property(ofdev->node, "linux,i2c-index", &len);
+ data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len);
if (data && len == 4) {
cpm->adap.nr = *data;
result = i2c_add_numbered_adapter(&cpm->adap);
@@ -679,7 +679,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
/*
* register OF I2C devices
*/
- of_register_i2c_devices(&cpm->adap, ofdev->node);
+ of_register_i2c_devices(&cpm->adap, ofdev->dev.of_node);
return 0;
out_shut:
@@ -718,13 +718,13 @@ static const struct of_device_id cpm_i2c_match[] = {
MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct of_platform_driver cpm_i2c_driver = {
- .match_table = cpm_i2c_match,
.probe = cpm_i2c_probe,
.remove = __devexit_p(cpm_i2c_remove),
- .driver = {
- .name = "fsl-i2c-cpm",
- .owner = THIS_MODULE,
- }
+ .driver = {
+ .name = "fsl-i2c-cpm",
+ .owner = THIS_MODULE,
+ .of_match_table = cpm_i2c_match,
+ },
};
static int __init cpm_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 612255614a66..e5b1a3bf5b80 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -37,8 +37,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include "../algos/i2c-algo-pcf.h"
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c21077d248af..d9aa9a649e35 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -211,7 +211,7 @@ static int __init i2c_gpio_init(void)
return ret;
}
-module_init(i2c_gpio_init);
+subsys_initcall(i2c_gpio_init);
static void __exit i2c_gpio_exit(void)
{
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index ce87a902c94d..3df1bc80f37a 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -282,7 +282,6 @@ static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
union i2c_smbus_data *data)
{
struct highlander_i2c_dev *dev = i2c_get_adapdata(adap);
- int read = read_write & I2C_SMBUS_READ;
u16 tmp;
init_completion(&dev->cmd_complete);
@@ -337,11 +336,11 @@ static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
highlander_i2c_done(dev);
/* Set slave address */
- iowrite16((addr << 1) | read, dev->base + SMSMADR);
+ iowrite16((addr << 1) | read_write, dev->base + SMSMADR);
highlander_i2c_command(dev, command, dev->buf_len);
- if (read)
+ if (read_write == I2C_SMBUS_READ)
return highlander_i2c_read(dev);
else
return highlander_i2c_write(dev);
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c767295ad1fb..9ff1695d8458 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/hydra.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 299b918455a3..f4b21f2bb8ed 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -138,6 +138,17 @@ static struct pci_dev *I801_dev;
#define FEATURE_I2C_BLOCK_READ (1 << 3)
static unsigned int i801_features;
+static const char *i801_feature_names[] = {
+ "SMBus PEC",
+ "Block buffer",
+ "Block process call",
+ "I2C block read",
+};
+
+static unsigned int disable_features;
+module_param(disable_features, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_features, "Disable selected driver features");
+
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
static int i801_check_pre(void)
@@ -341,9 +352,8 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
do {
msleep(1);
status = inb_p(SMBHSTSTS);
- }
- while ((!(status & SMBHSTSTS_BYTE_DONE))
- && (timeout++ < MAX_TIMEOUT));
+ } while ((!(status & SMBHSTSTS_BYTE_DONE))
+ && (timeout++ < MAX_TIMEOUT));
result = i801_check_post(status, timeout > MAX_TIMEOUT);
if (result < 0)
@@ -440,9 +450,9 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
}
/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter * adap, u16 addr,
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data * data)
+ int size, union i2c_smbus_data *data)
{
int hwpec;
int block = 0;
@@ -511,7 +521,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
else
outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
- if(block)
+ if (block)
ret = i801_block_transaction(data, read_write, size, hwpec);
else
ret = i801_transaction(xact | ENABLE_INT9);
@@ -523,9 +533,9 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
SMBAUXCTL);
- if(block)
+ if (block)
return ret;
- if(ret)
+ if (ret)
return ret;
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
return 0;
@@ -585,7 +595,7 @@ static const struct pci_device_id i801_ids[] = {
{ 0, }
};
-MODULE_DEVICE_TABLE (pci, i801_ids);
+MODULE_DEVICE_TABLE(pci, i801_ids);
#if defined CONFIG_INPUT_APANEL || defined CONFIG_INPUT_APANEL_MODULE
static unsigned char apanel_addr;
@@ -689,10 +699,11 @@ static void __devinit dmi_check_onboard_devices(const struct dmi_header *dm,
}
#endif
-static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int __devinit i801_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
unsigned char temp;
- int err;
+ int err, i;
#if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
const char *vendor;
#endif
@@ -700,26 +711,28 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
I801_dev = dev;
i801_features = 0;
switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_82801EB_3:
- case PCI_DEVICE_ID_INTEL_ESB_4:
- case PCI_DEVICE_ID_INTEL_ICH6_16:
- case PCI_DEVICE_ID_INTEL_ICH7_17:
- case PCI_DEVICE_ID_INTEL_ESB2_17:
- case PCI_DEVICE_ID_INTEL_ICH8_5:
- case PCI_DEVICE_ID_INTEL_ICH9_6:
- case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
- case PCI_DEVICE_ID_INTEL_ICH10_4:
- case PCI_DEVICE_ID_INTEL_ICH10_5:
- case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
- case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
+ default:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
i801_features |= FEATURE_SMBUS_PEC;
i801_features |= FEATURE_BLOCK_BUFFER;
+ /* fall through */
+ case PCI_DEVICE_ID_INTEL_82801CA_3:
+ case PCI_DEVICE_ID_INTEL_82801BA_2:
+ case PCI_DEVICE_ID_INTEL_82801AB_3:
+ case PCI_DEVICE_ID_INTEL_82801AA_3:
break;
}
+ /* Disable features on user request */
+ for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
+ if (i801_features & disable_features & (1 << i))
+ dev_notice(&dev->dev, "%s disabled by user\n",
+ i801_feature_names[i]);
+ }
+ i801_features &= ~disable_features;
+
err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index b1bc6e277d2a..bf344135647a 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/of_platform.h>
@@ -664,16 +664,16 @@ static inline u8 iic_clckdiv(unsigned int opb)
static int __devinit iic_request_irq(struct of_device *ofdev,
struct ibm_iic_private *dev)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
int irq;
if (iic_force_poll)
- return NO_IRQ;
+ return 0;
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n");
- return NO_IRQ;
+ return 0;
}
/* Disable interrupts until we finish initialization, assumes
@@ -683,7 +683,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) {
dev_err(&ofdev->dev, "request_irq %d failed\n", irq);
/* Fallback to the polling mode */
- return NO_IRQ;
+ return 0;
}
return irq;
@@ -695,7 +695,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
static int __devinit iic_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct ibm_iic_private *dev;
struct i2c_adapter *adap;
const u32 *freq;
@@ -719,7 +719,7 @@ static int __devinit iic_probe(struct of_device *ofdev,
init_waitqueue_head(&dev->wq);
dev->irq = iic_request_irq(ofdev, dev);
- if (dev->irq == NO_IRQ)
+ if (!dev->irq)
dev_warn(&ofdev->dev, "using polling mode\n");
/* Board specific settings */
@@ -766,7 +766,7 @@ static int __devinit iic_probe(struct of_device *ofdev,
return 0;
error_cleanup:
- if (dev->irq != NO_IRQ) {
+ if (dev->irq) {
iic_interrupt_mode(dev, 0);
free_irq(dev->irq, dev);
}
@@ -790,7 +790,7 @@ static int __devexit iic_remove(struct of_device *ofdev)
i2c_del_adapter(&dev->adap);
- if (dev->irq != NO_IRQ) {
+ if (dev->irq) {
iic_interrupt_mode(dev, 0);
free_irq(dev->irq, dev);
}
@@ -807,8 +807,11 @@ static const struct of_device_id ibm_iic_match[] = {
};
static struct of_platform_driver ibm_iic_driver = {
- .name = "ibm-iic",
- .match_table = ibm_iic_match,
+ .driver = {
+ .name = "ibm-iic",
+ .owner = THIS_MODULE,
+ .of_match_table = ibm_iic_match,
+ },
.probe = iic_probe,
.remove = __devexit_p(iic_remove),
};
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 5901707fc66a..112c61f7b8cd 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -38,8 +38,7 @@
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-iop3xx.h"
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index f1321f763789..df00eb1f11f9 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -118,7 +118,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
u32 x;
int result = 0;
- if (i2c->irq == NO_IRQ) {
+ if (!i2c->irq) {
while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
schedule();
if (time_after(jiffies, orig_jiffies + timeout)) {
@@ -560,15 +560,15 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
init_waitqueue_head(&i2c->queue);
- i2c->base = of_iomap(op->node, 0);
+ i2c->base = of_iomap(op->dev.of_node, 0);
if (!i2c->base) {
dev_err(i2c->dev, "failed to map controller\n");
result = -ENOMEM;
goto fail_map;
}
- i2c->irq = irq_of_parse_and_map(op->node, 0);
- if (i2c->irq != NO_IRQ) { /* i2c->irq = NO_IRQ implies polling */
+ i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ if (i2c->irq) { /* no i2c->irq implies polling */
result = request_irq(i2c->irq, mpc_i2c_isr,
IRQF_SHARED, "i2c-mpc", i2c);
if (result < 0) {
@@ -577,21 +577,22 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
}
}
- if (of_get_property(op->node, "fsl,preserve-clocking", NULL)) {
+ if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
- prop = of_get_property(op->node, "clock-frequency", &plen);
+ prop = of_get_property(op->dev.of_node, "clock-frequency",
+ &plen);
if (prop && plen == sizeof(u32))
clock = *prop;
}
if (match->data) {
struct mpc_i2c_data *data = match->data;
- data->setup(op->node, i2c, clock, data->prescaler);
+ data->setup(op->dev.of_node, i2c, clock, data->prescaler);
} else {
/* Backwards compatibility */
- if (of_get_property(op->node, "dfsrr", NULL))
- mpc_i2c_setup_8xxx(op->node, i2c, clock, 0);
+ if (of_get_property(op->dev.of_node, "dfsrr", NULL))
+ mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0);
}
dev_set_drvdata(&op->dev, i2c);
@@ -605,7 +606,7 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
- of_register_i2c_devices(&i2c->adap, op->node);
+ of_register_i2c_devices(&i2c->adap, op->dev.of_node);
return result;
@@ -627,7 +628,7 @@ static int __devexit fsl_i2c_remove(struct of_device *op)
i2c_del_adapter(&i2c->adap);
dev_set_drvdata(&op->dev, NULL);
- if (i2c->irq != NO_IRQ)
+ if (i2c->irq)
free_irq(i2c->irq, i2c);
irq_dispose_mapping(i2c->irq);
@@ -674,12 +675,12 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct of_platform_driver mpc_i2c_driver = {
- .match_table = mpc_i2c_of_match,
.probe = fsl_i2c_probe,
.remove = __devexit_p(fsl_i2c_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = mpc_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 3623a4499084..16242063144f 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -17,8 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
/* Register defines */
#define MV64XXX_I2C_REG_SLAVE_ADDR 0x00
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 4a48dd4ef787..a605a5029cfe 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -57,7 +57,7 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Hans-Frieder Vogt <hfvogt@gmx.net>");
@@ -404,10 +404,9 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
/* SMBus adapter 1 */
res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1");
- if (res1 < 0) {
- dev_err(&dev->dev, "Error probing SMB1.\n");
+ if (res1 < 0)
smbuses[0].base = 0; /* to have a check value */
- }
+
/* SMBus adapter 2 */
if (dmi_check_system(nforce2_dmi_blacklist2)) {
dev_err(&dev->dev, "Disabling SMB2 for safety reasons.\n");
@@ -416,11 +415,10 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
} else {
res2 = nforce2_probe_smb(dev, 5, NFORCE_PCI_SMB2, &smbuses[1],
"SMB2");
- if (res2 < 0) {
- dev_err(&dev->dev, "Error probing SMB2.\n");
+ if (res2 < 0)
smbuses[1].base = 0; /* to have a check value */
- }
}
+
if ((res1 < 0) && (res2 < 0)) {
/* we did not find even one of the SMBuses, so we give up */
kfree(smbuses);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index a4f8d33fa389..73de8ade10b1 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -704,7 +704,8 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_MTD:
case I2C_IT_MTDWS:
if (dev->cli.operation == I2C_READ) {
- while (!readl(dev->virtbase + I2C_RISR) & I2C_IT_RXFE) {
+ while (!(readl(dev->virtbase + I2C_RISR)
+ & I2C_IT_RXFE)) {
if (dev->cli.count == 0)
break;
*dev->cli.buffer =
@@ -914,6 +915,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
static int __devexit nmk_i2c_remove(struct platform_device *pdev)
{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct nmk_i2c_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adap);
@@ -924,6 +926,8 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev)
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
free_irq(dev->irq, dev);
iounmap(dev->virtbase);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
clk_disable(dev->clk);
clk_put(dev->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index b4ed4ca802ed..0070371b29f3 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -19,7 +19,7 @@
#include <linux/wait.h>
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
struct ocores_i2c {
void __iomem *base;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 389ac6032a7b..7674efb55378 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -38,6 +38,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/i2c-omap.h>
/* I2C controller revisions */
#define OMAP_I2C_REV_2 0x20
@@ -45,29 +46,37 @@
/* I2C controller revisions present on specific hardware */
#define OMAP_I2C_REV_ON_2430 0x36
#define OMAP_I2C_REV_ON_3430 0x3C
+#define OMAP_I2C_REV_ON_4430 0x40
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
-#define OMAP_I2C_REV_REG 0x00
-#define OMAP_I2C_IE_REG 0x01
-#define OMAP_I2C_STAT_REG 0x02
-#define OMAP_I2C_IV_REG 0x03
/* For OMAP3 I2C_IV has changed to I2C_WE (wakeup enable) */
-#define OMAP_I2C_WE_REG 0x03
-#define OMAP_I2C_SYSS_REG 0x04
-#define OMAP_I2C_BUF_REG 0x05
-#define OMAP_I2C_CNT_REG 0x06
-#define OMAP_I2C_DATA_REG 0x07
-#define OMAP_I2C_SYSC_REG 0x08
-#define OMAP_I2C_CON_REG 0x09
-#define OMAP_I2C_OA_REG 0x0a
-#define OMAP_I2C_SA_REG 0x0b
-#define OMAP_I2C_PSC_REG 0x0c
-#define OMAP_I2C_SCLL_REG 0x0d
-#define OMAP_I2C_SCLH_REG 0x0e
-#define OMAP_I2C_SYSTEST_REG 0x0f
-#define OMAP_I2C_BUFSTAT_REG 0x10
+enum {
+ OMAP_I2C_REV_REG = 0,
+ OMAP_I2C_IE_REG,
+ OMAP_I2C_STAT_REG,
+ OMAP_I2C_IV_REG,
+ OMAP_I2C_WE_REG,
+ OMAP_I2C_SYSS_REG,
+ OMAP_I2C_BUF_REG,
+ OMAP_I2C_CNT_REG,
+ OMAP_I2C_DATA_REG,
+ OMAP_I2C_SYSC_REG,
+ OMAP_I2C_CON_REG,
+ OMAP_I2C_OA_REG,
+ OMAP_I2C_SA_REG,
+ OMAP_I2C_PSC_REG,
+ OMAP_I2C_SCLL_REG,
+ OMAP_I2C_SCLH_REG,
+ OMAP_I2C_SYSTEST_REG,
+ OMAP_I2C_BUFSTAT_REG,
+ OMAP_I2C_REVNB_LO,
+ OMAP_I2C_REVNB_HI,
+ OMAP_I2C_IRQSTATUS_RAW,
+ OMAP_I2C_IRQENABLE_SET,
+ OMAP_I2C_IRQENABLE_CLR,
+};
/* I2C Interrupt Enable Register (OMAP_I2C_IE): */
#define OMAP_I2C_IE_XDR (1 << 14) /* TX Buffer drain int enable */
@@ -157,6 +166,9 @@
#define SYSC_IDLEMODE_SMART 0x2
#define SYSC_CLOCKACTIVITY_FCLK 0x2
+/* Errata definitions */
+#define I2C_OMAP_ERRATA_I207 (1 << 0)
+#define I2C_OMAP3_1P153 (1 << 1)
struct omap_i2c_dev {
struct device *dev;
@@ -167,9 +179,13 @@ struct omap_i2c_dev {
struct clk *fclk; /* Functional clock */
struct completion cmd_complete;
struct resource *ioarea;
+ u32 latency; /* maximum mpu wkup latency */
+ void (*set_mpu_wkup_lat)(struct device *dev,
+ long latency);
u32 speed; /* Speed of bus in Khz */
u16 cmd_err;
u8 *buf;
+ u8 *regs;
size_t buf_len;
struct i2c_adapter adapter;
u8 fifo_size; /* use as flag and value
@@ -186,17 +202,67 @@ struct omap_i2c_dev {
u16 bufstate;
u16 syscstate;
u16 westate;
+ u16 errata;
+};
+
+const static u8 reg_map[] = {
+ [OMAP_I2C_REV_REG] = 0x00,
+ [OMAP_I2C_IE_REG] = 0x01,
+ [OMAP_I2C_STAT_REG] = 0x02,
+ [OMAP_I2C_IV_REG] = 0x03,
+ [OMAP_I2C_WE_REG] = 0x03,
+ [OMAP_I2C_SYSS_REG] = 0x04,
+ [OMAP_I2C_BUF_REG] = 0x05,
+ [OMAP_I2C_CNT_REG] = 0x06,
+ [OMAP_I2C_DATA_REG] = 0x07,
+ [OMAP_I2C_SYSC_REG] = 0x08,
+ [OMAP_I2C_CON_REG] = 0x09,
+ [OMAP_I2C_OA_REG] = 0x0a,
+ [OMAP_I2C_SA_REG] = 0x0b,
+ [OMAP_I2C_PSC_REG] = 0x0c,
+ [OMAP_I2C_SCLL_REG] = 0x0d,
+ [OMAP_I2C_SCLH_REG] = 0x0e,
+ [OMAP_I2C_SYSTEST_REG] = 0x0f,
+ [OMAP_I2C_BUFSTAT_REG] = 0x10,
+};
+
+const static u8 omap4_reg_map[] = {
+ [OMAP_I2C_REV_REG] = 0x04,
+ [OMAP_I2C_IE_REG] = 0x2c,
+ [OMAP_I2C_STAT_REG] = 0x28,
+ [OMAP_I2C_IV_REG] = 0x34,
+ [OMAP_I2C_WE_REG] = 0x34,
+ [OMAP_I2C_SYSS_REG] = 0x90,
+ [OMAP_I2C_BUF_REG] = 0x94,
+ [OMAP_I2C_CNT_REG] = 0x98,
+ [OMAP_I2C_DATA_REG] = 0x9c,
+ [OMAP_I2C_SYSC_REG] = 0x20,
+ [OMAP_I2C_CON_REG] = 0xa4,
+ [OMAP_I2C_OA_REG] = 0xa8,
+ [OMAP_I2C_SA_REG] = 0xac,
+ [OMAP_I2C_PSC_REG] = 0xb0,
+ [OMAP_I2C_SCLL_REG] = 0xb4,
+ [OMAP_I2C_SCLH_REG] = 0xb8,
+ [OMAP_I2C_SYSTEST_REG] = 0xbC,
+ [OMAP_I2C_BUFSTAT_REG] = 0xc0,
+ [OMAP_I2C_REVNB_LO] = 0x00,
+ [OMAP_I2C_REVNB_HI] = 0x04,
+ [OMAP_I2C_IRQSTATUS_RAW] = 0x24,
+ [OMAP_I2C_IRQENABLE_SET] = 0x2c,
+ [OMAP_I2C_IRQENABLE_CLR] = 0x30,
};
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev,
int reg, u16 val)
{
- __raw_writew(val, i2c_dev->base + (reg << i2c_dev->reg_shift));
+ __raw_writew(val, i2c_dev->base +
+ (i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
{
- return __raw_readw(i2c_dev->base + (reg << i2c_dev->reg_shift));
+ return __raw_readw(i2c_dev->base +
+ (i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
static int __init omap_i2c_get_clocks(struct omap_i2c_dev *dev)
@@ -265,7 +331,11 @@ static void omap_i2c_idle(struct omap_i2c_dev *dev)
WARN_ON(dev->idle);
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
+ if (dev->rev >= OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 1);
+ else
+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
+
if (dev->rev < OMAP_I2C_REV_2) {
iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
} else {
@@ -330,7 +400,9 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
+ if (dev->rev < OMAP_I2C_REV_ON_4430)
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
+ dev->westate);
}
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -357,7 +429,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -430,6 +502,11 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
/* Take the I2C module out of reset: */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+ dev->errata = 0;
+
+ if (cpu_is_omap2430() || cpu_is_omap34xx())
+ dev->errata |= I2C_OMAP_ERRATA_I207;
+
/* Enable interrupts */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
@@ -539,8 +616,12 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, dev->latency);
r = wait_for_completion_timeout(&dev->cmd_complete,
OMAP_I2C_TIMEOUT);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
dev->buf_len = 0;
if (r < 0)
return r;
@@ -623,6 +704,34 @@ omap_i2c_ack_stat(struct omap_i2c_dev *dev, u16 stat)
omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
}
+static inline void i2c_omap_errata_i207(struct omap_i2c_dev *dev, u16 stat)
+{
+ /*
+ * I2C Errata(Errata Nos. OMAP2: 1.67, OMAP3: 1.8)
+ * Not applicable for OMAP4.
+ * Under certain rare conditions, RDR could be set again
+ * when the bus is busy, then ignore the interrupt and
+ * clear the interrupt.
+ */
+ if (stat & OMAP_I2C_STAT_RDR) {
+ /* Step 1: If RDR is set, clear it */
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+
+ /* Step 2: */
+ if (!(omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ & OMAP_I2C_STAT_BB)) {
+
+ /* Step 3: */
+ if (omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG)
+ & OMAP_I2C_STAT_RDR) {
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
+ dev_dbg(dev->dev, "RDR when bus is busy.\n");
+ }
+
+ }
+ }
+}
+
/* rev1 devices are apparently only on some 15xx */
#ifdef CONFIG_ARCH_OMAP15XX
@@ -684,6 +793,35 @@ omap_i2c_rev1_isr(int this_irq, void *dev_id)
#define omap_i2c_rev1_isr NULL
#endif
+/*
+ * OMAP3430 Errata 1.153: When an XRDY/XDR is hit, wait for XUDF before writing
+ * data to DATA_REG. Otherwise some data bytes can be lost while transferring
+ * them from the memory to the I2C interface.
+ */
+static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err)
+{
+ unsigned long timeout = 10000;
+
+ while (--timeout && !(*stat & OMAP_I2C_STAT_XUDF)) {
+ if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
+ omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY |
+ OMAP_I2C_STAT_XDR));
+ *err |= OMAP_I2C_STAT_XUDF;
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ *stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+ }
+
+ if (!timeout) {
+ dev_err(dev->dev, "timeout waiting on XUDF bit\n");
+ return 0;
+ }
+
+ return 0;
+}
+
static irqreturn_t
omap_i2c_isr(int this_irq, void *dev_id)
{
@@ -733,6 +871,10 @@ complete:
}
if (stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR)) {
u8 num_bytes = 1;
+
+ if (dev->errata & I2C_OMAP_ERRATA_I207)
+ i2c_omap_errata_i207(dev, stat);
+
if (dev->fifo_size) {
if (stat & OMAP_I2C_STAT_RRDY)
num_bytes = dev->fifo_size;
@@ -747,9 +889,12 @@ complete:
if (dev->buf_len) {
*dev->buf++ = w;
dev->buf_len--;
- /* Data reg from 2430 is 8 bit wide */
- if (!cpu_is_omap2430() &&
- !cpu_is_omap34xx()) {
+ /*
+ * Data reg in 2430, omap3 and
+ * omap4 is 8 bit wide
+ */
+ if (cpu_class_is_omap1() ||
+ cpu_is_omap2420()) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
dev->buf_len--;
@@ -787,9 +932,12 @@ complete:
if (dev->buf_len) {
w = *dev->buf++;
dev->buf_len--;
- /* Data reg from 2430 is 8 bit wide */
- if (!cpu_is_omap2430() &&
- !cpu_is_omap34xx()) {
+ /*
+ * Data reg in 2430, omap3 and
+ * omap4 is 8 bit wide
+ */
+ if (cpu_class_is_omap1() ||
+ cpu_is_omap2420()) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
dev->buf_len--;
@@ -807,25 +955,9 @@ complete:
break;
}
- /*
- * OMAP3430 Errata 1.153: When an XRDY/XDR
- * is hit, wait for XUDF before writing data
- * to DATA_REG. Otherwise some data bytes can
- * be lost while transferring them from the
- * memory to the I2C interface.
- */
-
- if (dev->rev <= OMAP_I2C_REV_ON_3430) {
- while (!(stat & OMAP_I2C_STAT_XUDF)) {
- if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
- omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
- err |= OMAP_I2C_STAT_XUDF;
- goto complete;
- }
- cpu_relax();
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
- }
- }
+ if ((dev->errata & I2C_OMAP3_1P153) &&
+ errata_omap3_1p153(dev, &stat, &err))
+ goto complete;
omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
}
@@ -857,6 +989,7 @@ omap_i2c_probe(struct platform_device *pdev)
struct omap_i2c_dev *dev;
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
+ struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
irq_handler_t isr;
int r;
u32 speed = 0;
@@ -886,10 +1019,13 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- if (pdev->dev.platform_data != NULL)
- speed = *(u32 *)pdev->dev.platform_data;
- else
- speed = 100; /* Defualt speed */
+ if (pdata != NULL) {
+ speed = pdata->clkrate;
+ dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
+ } else {
+ speed = 100; /* Default speed */
+ dev->set_mpu_wkup_lat = NULL;
+ }
dev->speed = speed;
dev->idle = 1;
@@ -905,17 +1041,27 @@ omap_i2c_probe(struct platform_device *pdev)
if (cpu_is_omap7xx())
dev->reg_shift = 1;
+ else if (cpu_is_omap44xx())
+ dev->reg_shift = 0;
else
dev->reg_shift = 2;
if ((r = omap_i2c_get_clocks(dev)) != 0)
goto err_iounmap;
+ if (cpu_is_omap44xx())
+ dev->regs = (u8 *) omap4_reg_map;
+ else
+ dev->regs = (u8 *) reg_map;
+
omap_i2c_unidle(dev);
dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (dev->rev <= OMAP_I2C_REV_ON_3430)
+ dev->errata |= I2C_OMAP3_1P153;
+
+ if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -927,8 +1073,17 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- dev->fifo_size = (dev->fifo_size / 2);
- dev->b_hw = 1; /* Enable hardware fixes */
+ if (dev->rev >= OMAP_I2C_REV_ON_4430) {
+ dev->fifo_size = 0;
+ dev->b_hw = 0; /* Disable hardware fixes */
+ } else {
+ dev->fifo_size = (dev->fifo_size / 2);
+ dev->b_hw = 1; /* Enable hardware fixes */
+ }
+ /* calculate wakeup latency constraint for MPU */
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->fifo_size) /
+ (1000 * speed / 8);
}
/* reset ASAP, clearing any IRQs */
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 5f41ec0f72d2..fc5fbd1012c9 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -33,7 +33,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c-smbus.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "i2c-parport.h"
#define DEFAULT_BASE 0x378
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 846583ed4763..0eb1515541e7 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -137,7 +137,7 @@ static int parport_getsda(void *data)
copied. The attaching code will set getscl to NULL for adapters that
cannot read SCL back, and will also make the data field point to
the parallel port structure. */
-static struct i2c_algo_bit_data parport_algo_data = {
+static const struct i2c_algo_bit_data parport_algo_data = {
.setsda = parport_setsda,
.setscl = parport_setscl,
.getsda = parport_getsda,
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index d3d4a4b43a1d..4174101660c9 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -25,7 +25,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_driver pasemi_smb_driver;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index f7346a9bd95f..bbd77603a417 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -30,8 +30,8 @@
#include <linux/isa.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pca.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/irq.h>
#define DRIVER "i2c-pca-isa"
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 5b2213df5ed0..ef5c78487eb7 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -23,9 +23,9 @@
#include <linux/i2c-algo-pca.h>
#include <linux/i2c-pca-platform.h>
#include <linux/gpio.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
struct i2c_pca_pf_data {
void __iomem *reg_base;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index ee9da6fcf69a..6d14ac2e3c41 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -39,7 +39,7 @@
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* PIIX4 SMBus address offsets */
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 7b57d5f267e1..dfa7ae9c1b8e 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -33,7 +33,7 @@
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define DRV_NAME "pmcmsptwi"
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 14d249f5ed3f..020ff23d762f 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,9 +34,9 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/i2c.h>
/*
@@ -209,18 +209,6 @@ static void i2c_pxa_show_state(struct pxa_i2c *i2c, int lno, const char *fname)
}
#define show_state(i2c) i2c_pxa_show_state(i2c, __LINE__, __func__)
-#else
-#define i2c_debug 0
-
-#define show_state(i2c) do { } while (0)
-#define decode_ISR(val) do { } while (0)
-#define decode_ICR(val) do { } while (0)
-#endif
-
-#define eedbg(lvl, x...) do { if ((lvl) < 1) { printk(KERN_DEBUG "" x); } } while(0)
-
-static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret);
-static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id);
static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
{
@@ -236,6 +224,20 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why)
printk("\n");
}
+#else /* ifdef DEBUG */
+
+#define i2c_debug 0
+
+#define show_state(i2c) do { } while (0)
+#define decode_ISR(val) do { } while (0)
+#define decode_ICR(val) do { } while (0)
+#define i2c_pxa_scream_blue_murder(i2c, why) do { } while (0)
+
+#endif /* ifdef DEBUG / else */
+
+static void i2c_pxa_master_complete(struct pxa_i2c *i2c, int ret);
+static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id);
+
static inline int i2c_pxa_is_slavemode(struct pxa_i2c *i2c)
{
return !(readl(_ICR(i2c)) & ICR_SCLE);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index d27072b2249f..72902e0bbfa7 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -35,9 +35,9 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <plat/regs-iic.h>
#include <plat/iic.h>
@@ -482,7 +482,8 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
- unsigned long timeout;
+ unsigned long iicstat, timeout;
+ int spins = 20;
int ret;
if (i2c->suspended)
@@ -521,7 +522,21 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* ensure the stop has been through the bus */
- msleep(1);
+ dev_dbg(i2c->dev, "waiting for bus idle\n");
+
+ /* first, try busy waiting briefly */
+ do {
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
+
+ /* if that timed out sleep */
+ if (!spins) {
+ msleep(1);
+ iicstat = readl(i2c->regs + S3C2410_IICSTAT);
+ }
+
+ if (iicstat & S3C2410_IICSTAT_START)
+ dev_warn(i2c->dev, "timeout waiting for bus idle\n");
out:
return ret;
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index c91359f4965c..cadc0216e02f 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -36,8 +36,8 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include "i2c-s6000.h"
#define DRV_NAME "i2c-s6000"
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index b9680f50f541..4f93da31d3ad 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -16,10 +16,10 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <asm/clock.h>
#include <asm/i2c-sh7760.h>
-#include <asm/io.h>
/* register offsets */
#define I2CSCR 0x0 /* slave ctrl */
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 98b1ec489159..3d76a188e42f 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_smbus.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 78b06107342c..2fc08fbf67a2 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -24,12 +24,11 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
-
struct simtec_i2c_data {
struct resource *ioarea;
void __iomem *reg;
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 55a71370c79b..437586611d4a 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -61,7 +61,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static int blacklist[] = {
PCI_DEVICE_ID_SI_540,
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 2309c7f1bde2..e6f539e26f65 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -53,7 +53,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* SIS630 SMBus registers */
#define SMB_STS 0x80 /* status */
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index d43d8f8943dd..86837f0c4cb9 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -38,7 +38,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* base address register in PCI config space */
#define SIS96x_BAR 0x04
diff --git a/drivers/i2c/busses/i2c-stub.c b/drivers/i2c/busses/i2c-stub.c
index 0c770eabe85e..b1b3447942c9 100644
--- a/drivers/i2c/busses/i2c-stub.c
+++ b/drivers/i2c/busses/i2c-stub.c
@@ -29,13 +29,16 @@
#include <linux/i2c.h>
#define MAX_CHIPS 10
+#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
static unsigned short chip_addr[MAX_CHIPS];
module_param_array(chip_addr, ushort, NULL, S_IRUGO);
MODULE_PARM_DESC(chip_addr,
"Chip addresses (up to 10, between 0x03 and 0x77)");
-static unsigned long functionality = ~0UL;
+static unsigned long functionality = STUB_FUNC;
module_param(functionality, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(functionality, "Override functionality bitfield");
@@ -156,9 +159,7 @@ static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
static u32 stub_func(struct i2c_adapter *adapter)
{
- return (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK) & functionality;
+ return STUB_FUNC & functionality;
}
static const struct i2c_algorithm smbus_algorithm = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 5c473833d948..60556012312f 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -15,8 +15,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define I2C_CONTROL 0x00
#define I2C_CONTROLS 0x00
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index de78283bddbe..7799fe5bda88 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -25,7 +25,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
/* Power management registers */
#define PM_CFG_REVID 0x08 /* silicon revision code */
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index d57292e5dae0..4c6fff5f330d 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -51,7 +51,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/acpi.h>
-#include <asm/io.h>
+#include <linux/io.h>
static struct pci_dev *vt596_pdev;
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 684395b6f3e2..4cb4bb009950 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -32,7 +32,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200.h>
@@ -552,7 +552,7 @@ static int __init scx200_create_isa(const char *text, unsigned long base,
* the name and the BAR where the I/O address resource is located. ISA
* devices are flagged with a bar value of -1 */
-static struct pci_device_id scx200_pci[] = {
+static const struct pci_device_id scx200_pci[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index 42df0eca43d5..7ee0d502ceab 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -27,7 +27,7 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/scx200_gpio.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index c2258a51fe0c..e0f833cca3f1 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -159,107 +159,131 @@ static void i2c_device_shutdown(struct device *dev)
driver->shutdown(client);
}
-#ifdef CONFIG_SUSPEND
-static int i2c_device_pm_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
{
- const struct dev_pm_ops *pm;
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->suspend)
+ driver = to_i2c_driver(dev->driver);
+ if (!driver->suspend)
return 0;
- return pm->suspend(dev);
+ return driver->suspend(client, mesg);
}
-static int i2c_device_pm_resume(struct device *dev)
+static int i2c_legacy_resume(struct device *dev)
{
- const struct dev_pm_ops *pm;
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
- if (!dev->driver)
+ if (!client || !dev->driver)
return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->resume)
+ driver = to_i2c_driver(dev->driver);
+ if (!driver->resume)
return 0;
- return pm->resume(dev);
+ return driver->resume(client);
}
-#else
-#define i2c_device_pm_suspend NULL
-#define i2c_device_pm_resume NULL
-#endif
-#ifdef CONFIG_PM_RUNTIME
-static int i2c_device_runtime_suspend(struct device *dev)
+static int i2c_device_pm_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!dev->driver)
- return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->runtime_suspend)
+ if (pm_runtime_suspended(dev))
return 0;
- return pm->runtime_suspend(dev);
-}
-static int i2c_device_runtime_resume(struct device *dev)
-{
- const struct dev_pm_ops *pm;
+ if (pm)
+ return pm->suspend ? pm->suspend(dev) : 0;
- if (!dev->driver)
- return 0;
- pm = dev->driver->pm;
- if (!pm || !pm->runtime_resume)
- return 0;
- return pm->runtime_resume(dev);
+ return i2c_legacy_suspend(dev, PMSG_SUSPEND);
}
-static int i2c_device_runtime_idle(struct device *dev)
+static int i2c_device_pm_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = NULL;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
- if (dev->driver)
- pm = dev->driver->pm;
- if (pm && pm->runtime_idle) {
- ret = pm->runtime_idle(dev);
- if (ret)
- return ret;
+ if (pm)
+ ret = pm->resume ? pm->resume(dev) : 0;
+ else
+ ret = i2c_legacy_resume(dev);
+
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
- return pm_runtime_suspend(dev);
+ return ret;
}
-#else
-#define i2c_device_runtime_suspend NULL
-#define i2c_device_runtime_resume NULL
-#define i2c_device_runtime_idle NULL
-#endif
-static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
+static int i2c_device_pm_freeze(struct device *dev)
{
- struct i2c_client *client = i2c_verify_client(dev);
- struct i2c_driver *driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!client || !dev->driver)
+ if (pm_runtime_suspended(dev))
return 0;
- driver = to_i2c_driver(dev->driver);
- if (!driver->suspend)
- return 0;
- return driver->suspend(client, mesg);
+
+ if (pm)
+ return pm->freeze ? pm->freeze(dev) : 0;
+
+ return i2c_legacy_suspend(dev, PMSG_FREEZE);
}
-static int i2c_device_resume(struct device *dev)
+static int i2c_device_pm_thaw(struct device *dev)
{
- struct i2c_client *client = i2c_verify_client(dev);
- struct i2c_driver *driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (!client || !dev->driver)
+ if (pm_runtime_suspended(dev))
return 0;
- driver = to_i2c_driver(dev->driver);
- if (!driver->resume)
+
+ if (pm)
+ return pm->thaw ? pm->thaw(dev) : 0;
+
+ return i2c_legacy_resume(dev);
+}
+
+static int i2c_device_pm_poweroff(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm_runtime_suspended(dev))
return 0;
- return driver->resume(client);
+
+ if (pm)
+ return pm->poweroff ? pm->poweroff(dev) : 0;
+
+ return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
}
+static int i2c_device_pm_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ if (pm)
+ ret = pm->restore ? pm->restore(dev) : 0;
+ else
+ ret = i2c_legacy_resume(dev);
+
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return ret;
+}
+#else /* !CONFIG_PM_SLEEP */
+#define i2c_device_pm_suspend NULL
+#define i2c_device_pm_resume NULL
+#define i2c_device_pm_freeze NULL
+#define i2c_device_pm_thaw NULL
+#define i2c_device_pm_poweroff NULL
+#define i2c_device_pm_restore NULL
+#endif /* !CONFIG_PM_SLEEP */
+
static void i2c_client_dev_release(struct device *dev)
{
kfree(to_i2c_client(dev));
@@ -301,9 +325,15 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
- .runtime_suspend = i2c_device_runtime_suspend,
- .runtime_resume = i2c_device_runtime_resume,
- .runtime_idle = i2c_device_runtime_idle,
+ .freeze = i2c_device_pm_freeze,
+ .thaw = i2c_device_pm_thaw,
+ .poweroff = i2c_device_pm_poweroff,
+ .restore = i2c_device_pm_restore,
+ SET_RUNTIME_PM_OPS(
+ pm_generic_runtime_suspend,
+ pm_generic_runtime_resume,
+ pm_generic_runtime_idle
+ )
};
struct bus_type i2c_bus_type = {
@@ -312,8 +342,6 @@ struct bus_type i2c_bus_type = {
.probe = i2c_device_probe,
.remove = i2c_device_remove,
.shutdown = i2c_device_shutdown,
- .suspend = i2c_device_suspend,
- .resume = i2c_device_resume,
.pm = &i2c_device_pm_ops,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
@@ -390,6 +418,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
+#ifdef CONFIG_OF
+ client->dev.of_node = info->of_node;
+#endif
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
@@ -1193,10 +1224,10 @@ EXPORT_SYMBOL(i2c_transfer);
*
* Returns negative errno, or else the number of bytes written.
*/
-int i2c_master_send(struct i2c_client *client,const char *buf ,int count)
+int i2c_master_send(struct i2c_client *client, const char *buf, int count)
{
int ret;
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
msg.addr = client->addr;
@@ -1220,9 +1251,9 @@ EXPORT_SYMBOL(i2c_master_send);
*
* Returns negative errno, or else the number of bytes read.
*/
-int i2c_master_recv(struct i2c_client *client, char *buf ,int count)
+int i2c_master_recv(struct i2c_client *client, char *buf, int count)
{
- struct i2c_adapter *adap=client->adapter;
+ struct i2c_adapter *adap = client->adapter;
struct i2c_msg msg;
int ret;
@@ -1424,7 +1455,7 @@ i2c_new_probed_device(struct i2c_adapter *adap,
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
-struct i2c_adapter* i2c_get_adapter(int id)
+struct i2c_adapter *i2c_get_adapter(int id)
{
struct i2c_adapter *adapter;
@@ -1451,7 +1482,7 @@ static u8 crc8(u16 data)
{
int i;
- for(i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++) {
if (data & 0x8000)
data = data ^ POLY;
data = data << 1;
@@ -1464,7 +1495,7 @@ static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
{
int i;
- for(i = 0; i < count; i++)
+ for (i = 0; i < count; i++)
crc = crc8((crc ^ p[i]) << 8);
return crc;
}
@@ -1534,7 +1565,7 @@ EXPORT_SYMBOL(i2c_smbus_read_byte);
*/
s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
{
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
}
EXPORT_SYMBOL(i2c_smbus_write_byte);
@@ -1572,9 +1603,9 @@ s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
{
union i2c_smbus_data data;
data.byte = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BYTE_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_byte_data);
@@ -1611,9 +1642,9 @@ s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
{
union i2c_smbus_data data;
data.word = value;
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_WORD_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_WORD_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_word_data);
@@ -1690,9 +1721,9 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
length = I2C_SMBUS_BLOCK_MAX;
data.block[0] = length;
memcpy(&data.block[1], values, length);
- return i2c_smbus_xfer(client->adapter,client->addr,client->flags,
- I2C_SMBUS_WRITE,command,
- I2C_SMBUS_BLOCK_DATA,&data);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BLOCK_DATA, &data);
}
EXPORT_SYMBOL(i2c_smbus_write_block_data);
@@ -1734,10 +1765,10 @@ EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data);
/* Simulate a SMBus command using the i2c protocol
No checking of parameters is done! */
-static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
- unsigned short flags,
- char read_write, u8 command, int size,
- union i2c_smbus_data * data)
+static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags,
+ char read_write, u8 command, int size,
+ union i2c_smbus_data *data)
{
/* So we need to generate a series of msgs. In the case of writing, we
need to use only one message; when reading, we need two. We initialize
@@ -1745,7 +1776,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
simpler. */
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
- int num = read_write == I2C_SMBUS_READ?2:1;
+ int num = read_write == I2C_SMBUS_READ ? 2 : 1;
struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
{ addr, flags | I2C_M_RD, 0, msgbuf1 }
};
@@ -1754,7 +1785,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
int status;
msgbuf0[0] = command;
- switch(size) {
+ switch (size) {
case I2C_SMBUS_QUICK:
msg[0].len = 0;
/* Special case: The read/write field is used as data */
@@ -1781,7 +1812,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
if (read_write == I2C_SMBUS_READ)
msg[1].len = 2;
else {
- msg[0].len=3;
+ msg[0].len = 3;
msgbuf0[1] = data->word & 0xff;
msgbuf0[2] = data->word >> 8;
}
@@ -1874,26 +1905,26 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
}
if (read_write == I2C_SMBUS_READ)
- switch(size) {
- case I2C_SMBUS_BYTE:
- data->byte = msgbuf0[0];
- break;
- case I2C_SMBUS_BYTE_DATA:
- data->byte = msgbuf1[0];
- break;
- case I2C_SMBUS_WORD_DATA:
- case I2C_SMBUS_PROC_CALL:
- data->word = msgbuf1[0] | (msgbuf1[1] << 8);
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- for (i = 0; i < data->block[0]; i++)
- data->block[i+1] = msgbuf1[i];
- break;
- case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_BLOCK_PROC_CALL:
- for (i = 0; i < msgbuf1[0] + 1; i++)
- data->block[i] = msgbuf1[i];
- break;
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ data->byte = msgbuf0[0];
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = msgbuf1[0];
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ data->word = msgbuf1[0] | (msgbuf1[1] << 8);
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ for (i = 0; i < data->block[0]; i++)
+ data->block[i+1] = msgbuf1[i];
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ for (i = 0; i < msgbuf1[0] + 1; i++)
+ data->block[i] = msgbuf1[i];
+ break;
}
return 0;
}
@@ -1938,7 +1969,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
}
rt_mutex_unlock(&adapter->bus_lock);
} else
- res = i2c_smbus_xfer_emulated(adapter,addr,flags,read_write,
+ res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
command, protocol, data);
return res;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index f4110aa49600..e0694e4d86c7 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,7 +35,7 @@
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/jiffies.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
static struct i2c_driver i2cdev_driver;
@@ -132,45 +132,45 @@ static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
* needed by those system calls and by this SMBus interface.
*/
-static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
+ loff_t *offset)
{
char *tmp;
int ret;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_recv(client,tmp,count);
+ ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
- ret = copy_to_user(buf,tmp,count)?-EFAULT:ret;
+ ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
kfree(tmp);
return ret;
}
-static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t count,
- loff_t *offset)
+static ssize_t i2cdev_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
{
int ret;
char *tmp;
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
- tmp = kmalloc(count,GFP_KERNEL);
- if (tmp==NULL)
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (tmp == NULL)
return -ENOMEM;
- if (copy_from_user(tmp,buf,count)) {
+ if (copy_from_user(tmp, buf, count)) {
kfree(tmp);
return -EFAULT;
}
@@ -178,7 +178,7 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
- ret = i2c_master_send(client,tmp,count);
+ ret = i2c_master_send(client, tmp, count);
kfree(tmp);
return ret;
}
@@ -369,13 +369,13 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct i2c_client *client = (struct i2c_client *)file->private_data;
+ struct i2c_client *client = file->private_data;
unsigned long funcs;
dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
cmd, arg);
- switch ( cmd ) {
+ switch (cmd) {
case I2C_SLAVE:
case I2C_SLAVE_FORCE:
/* NOTE: devices set up to work with "new style" drivers
@@ -601,7 +601,7 @@ static void __exit i2c_dev_exit(void)
{
i2c_del_driver(&i2cdev_driver);
class_destroy(i2c_dev_class);
- unregister_chrdev(I2C_MAJOR,"i2c");
+ unregister_chrdev(I2C_MAJOR, "i2c");
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index d2b8b272bc27..cb10201a15ed 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -633,12 +633,10 @@ static void __init cmd640_init_dev(ide_drive_t *drive)
static int cmd640_test_irq(ide_hwif_t *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
int irq_reg = hwif->channel ? ARTTIM23 : CFR;
- u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
+ u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
CFR_IDE01INTR;
-
- pci_read_config_byte(dev, irq_reg, &irq_stat);
+ u8 irq_stat = get_cmd640_reg(irq_reg);
return (irq_stat & irq_mask) ? 1 : 0;
}
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index b9e517de6a82..3feaa26410be 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/zorro.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
@@ -24,15 +25,6 @@
/*
- * Bases of the IDE interfaces
- */
-
-#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
-#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
-
-#define GAYLE_IDEREG_SIZE 0x2000
-
- /*
* Offsets from one of the above bases
*/
@@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
static int gayle_test_irq(ide_hwif_t *hwif)
{
- unsigned char ch;
+ unsigned char ch;
- ch = z_readb(hwif->io_ports.irq_addr);
- if (!(ch & GAYLE_IRQ_IDE))
- return 0;
- return 1;
+ ch = z_readb(hwif->io_ports.irq_addr);
+ if (!(ch & GAYLE_IRQ_IDE))
+ return 0;
+ return 1;
}
static void gayle_a1200_clear_irq(ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ ide_hwif_t *hwif = drive->hwif;
- (void)z_readb(hwif->io_ports.status_addr);
- z_writeb(0x7c, hwif->io_ports.irq_addr);
+ (void)z_readb(hwif->io_ports.status_addr);
+ z_writeb(0x7c, hwif->io_ports.irq_addr);
}
static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
@@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = {
* Probe for a Gayle IDE interface (and optionally for an IDE doubler)
*/
-static int __init gayle_init(void)
+static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
{
- unsigned long phys_base, res_start, res_n;
- unsigned long base, ctrlport, irqport;
- int a4000, i, rc;
- struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
- struct ide_port_info d = gayle_port_info;
-
- if (!MACH_IS_AMIGA)
- return -ENODEV;
-
- if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE))
- goto found;
-
-#ifdef CONFIG_ZORRO
- if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE,
- NULL))
- goto found;
-#endif
- return -ENODEV;
-
-found:
- printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n",
- a4000 ? 4000 : 1200,
- ide_doubler ? ", IDE doubler" : "");
-
- if (a4000) {
- phys_base = GAYLE_BASE_4000;
- irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
- d.port_ops = &gayle_a4000_port_ops;
- } else {
- phys_base = GAYLE_BASE_1200;
- irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200);
- d.port_ops = &gayle_a1200_port_ops;
+ struct resource *res;
+ struct gayle_ide_platform_data *pdata;
+ unsigned long base, ctrlport, irqport;
+ unsigned int i;
+ int error;
+ struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
+ struct ide_port_info d = gayle_port_info;
+ struct ide_host *host;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), "IDE"))
+ return -EBUSY;
+
+ pdata = pdev->dev.platform_data;
+ pr_info("ide: Gayle IDE controller (A%u style%s)\n",
+ pdata->explicit_ack ? 1200 : 4000,
+ ide_doubler ? ", IDE doubler" : "");
+
+ base = (unsigned long)ZTWO_VADDR(pdata->base);
+ ctrlport = 0;
+ irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
+ if (pdata->explicit_ack)
+ d.port_ops = &gayle_a1200_port_ops;
+ else
+ d.port_ops = &gayle_a4000_port_ops;
+
+ for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
+ if (GAYLE_HAS_CONTROL_REG)
+ ctrlport = base + GAYLE_CONTROL;
+
+ gayle_setup_ports(&hw[i], base, ctrlport, irqport);
+ hws[i] = &hw[i];
}
- res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
- res_n = GAYLE_IDEREG_SIZE;
+ error = ide_host_add(&d, hws, i, &host);
+ if (error)
+ goto out;
- if (!request_mem_region(res_start, res_n, "IDE"))
- return -EBUSY;
+ platform_set_drvdata(pdev, host);
+ return 0;
- for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
- base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
- ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
+out:
+ release_mem_region(res->start, resource_size(res));
+ return error;
+}
+
+static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
+{
+ struct ide_host *host = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ ide_host_remove(host);
+ release_mem_region(res->start, resource_size(res));
+ return 0;
+}
- gayle_setup_ports(&hw[i], base, ctrlport, irqport);
+static struct platform_driver amiga_gayle_ide_driver = {
+ .remove = __exit_p(amiga_gayle_ide_remove),
+ .driver = {
+ .name = "amiga-gayle-ide",
+ .owner = THIS_MODULE,
+ },
+};
- hws[i] = &hw[i];
- }
+static int __init amiga_gayle_ide_init(void)
+{
+ return platform_driver_probe(&amiga_gayle_ide_driver,
+ amiga_gayle_ide_probe);
+}
- rc = ide_host_add(&d, hws, i, NULL);
- if (rc)
- release_mem_region(res_start, res_n);
+module_init(amiga_gayle_ide_init);
- return rc;
+static void __exit amiga_gayle_ide_exit(void)
+{
+ platform_driver_unregister(&amiga_gayle_ide_driver);
}
-module_init(gayle_init);
+module_exit(amiga_gayle_ide_exit);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index b85450865ff0..0b7815d2581c 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -65,8 +65,7 @@ MODULE_LICENSE("Dual MPL/GPL");
typedef struct ide_info_t {
struct pcmcia_device *p_dev;
struct ide_host *host;
- int ndev;
- dev_node_t node;
+ int ndev;
} ide_info_t;
static void ide_release(struct pcmcia_device *);
@@ -102,7 +101,6 @@ static int ide_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.IOAddrLines = 3;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -285,8 +283,7 @@ static int ide_config(struct pcmcia_device *link)
io_base = link->io.BasePort1;
ctl_base = stk->ctl_base;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -299,24 +296,21 @@ static int ide_config(struct pcmcia_device *link)
if (is_kme)
outb(0x81, ctl_base+1);
- host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+ host = idecs_register(io_base, ctl_base, link->irq, link);
if (host == NULL && link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
host = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq.AssignedIRQ, link);
+ link->irq, link);
}
if (host == NULL)
goto failed;
info->ndev = 1;
- sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
- info->node.major = host->ports[0]->major;
- info->node.minor = 0;
info->host = host;
- link->dev_node = &info->node;
- printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
- info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
+ dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n",
+ 'a' + host->ports[0]->index * 2,
+ link->conf.Vpp / 10, link->conf.Vpp % 10);
kfree(stk);
return 0;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3b128dce9c3a..33d65039cce9 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -407,32 +407,24 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
return 0;
}
-static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
+static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
{
- u64 set = min(capacity, drive->probed_capacity);
u16 *id = drive->id;
int lba48 = ata_id_lba48_enabled(id);
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
ata_id_hpa_enabled(id) == 0)
- goto out;
+ return;
/*
* according to the spec the SET MAX ADDRESS command shall be
* immediately preceded by a READ NATIVE MAX ADDRESS command
*/
- capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
- if (capacity == 0)
- goto out;
-
- set = ide_disk_hpa_set_capacity(drive, set, lba48);
- if (set) {
- /* needed for ->resume to disable HPA */
- drive->dev_flags |= IDE_DFLAG_NOHPA;
- return set;
- }
-out:
- return drive->capacity64;
+ if (!ide_disk_hpa_get_native_capacity(drive, lba48))
+ return;
+
+ if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
+ drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
}
static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
@@ -783,13 +775,13 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
}
const struct ide_disk_ops ide_ata_disk_ops = {
- .check = ide_disk_check,
- .set_capacity = ide_disk_set_capacity,
- .get_capacity = ide_disk_get_capacity,
- .setup = ide_disk_setup,
- .flush = ide_disk_flush,
- .init_media = ide_disk_init_media,
- .set_doorlock = ide_disk_set_doorlock,
- .do_request = ide_do_rw_disk,
- .ioctl = ide_disk_ioctl,
+ .check = ide_disk_check,
+ .unlock_native_capacity = ide_disk_unlock_native_capacity,
+ .get_capacity = ide_disk_get_capacity,
+ .setup = ide_disk_setup,
+ .flush = ide_disk_flush,
+ .init_media = ide_disk_init_media,
+ .set_doorlock = ide_disk_set_doorlock,
+ .do_request = ide_do_rw_disk,
+ .ioctl = ide_disk_ioctl,
};
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index c32d83996ae1..c102d23d9b38 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -288,17 +288,14 @@ static int ide_gd_media_changed(struct gendisk *disk)
return ret;
}
-static unsigned long long ide_gd_set_capacity(struct gendisk *disk,
- unsigned long long capacity)
+static void ide_gd_unlock_native_capacity(struct gendisk *disk)
{
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
ide_drive_t *drive = idkp->drive;
const struct ide_disk_ops *disk_ops = drive->disk_ops;
- if (disk_ops->set_capacity)
- return disk_ops->set_capacity(drive, capacity);
-
- return drive->capacity64;
+ if (disk_ops->unlock_native_capacity)
+ disk_ops->unlock_native_capacity(drive);
}
static int ide_gd_revalidate_disk(struct gendisk *disk)
@@ -329,7 +326,7 @@ static const struct block_device_operations ide_gd_ops = {
.locked_ioctl = ide_gd_ioctl,
.getgeo = ide_gd_getgeo,
.media_changed = ide_gd_media_changed,
- .set_capacity = ide_gd_set_capacity,
+ .unlock_native_capacity = ide_gd_unlock_native_capacity,
.revalidate_disk = ide_gd_revalidate_disk
};
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 42965b3e30b9..542603b394e4 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -95,6 +95,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
+ d.irq_flags = res_irq->flags;
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index c5f3841af360..3a35ec6193d2 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -93,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
* bit 7: error, bit 6: interrupting,
* bit 5: FIFO full, bit 4: FIFO empty
*/
- return ((sc1d & 0x50) == 0x50) ? 1 : 0;
+ return (sc1d & 0x40) ? 1 : 0;
} else {
/*
* bit 3: error, bit 2: interrupting,
* bit 1: FIFO full, bit 0: FIFO empty
*/
- return ((sc1d & 0x05) == 0x05) ? 1 : 0;
+ return (sc1d & 0x04) ? 1 : 0;
}
}
@@ -241,6 +241,7 @@ static const struct ide_port_ops pdc20246_port_ops = {
static const struct ide_port_ops pdc2026x_port_ops = {
.set_pio_mode = pdc202xx_set_pio_mode,
.set_dma_mode = pdc202xx_set_mode,
+ .test_irq = pdc202xx_test_irq,
.cable_detect = pdc2026x_cable_detect,
};
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 159955d16c47..183fa38760d8 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1153,7 +1153,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_resource_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no address for %s\n",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto out_free_pmif;
}
@@ -1161,7 +1161,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
/* Request memory resource for IO ports */
if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
- "%s!\n", mdev->ofdev.node->full_name);
+ "%s!\n", mdev->ofdev.dev.of_node->full_name);
rc = -EBUSY;
goto out_free_pmif;
}
@@ -1173,7 +1173,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
*/
if (macio_irq_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
- "13\n", mdev->ofdev.node->full_name);
+ "13\n", mdev->ofdev.dev.of_node->full_name);
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
@@ -1182,7 +1182,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
regbase = (unsigned long) base;
pmif->mdev = mdev;
- pmif->node = mdev->ofdev.node;
+ pmif->node = mdev->ofdev.dev.of_node;
pmif->regbase = regbase;
pmif->irq = irq;
pmif->kauai_fcr = NULL;
@@ -1191,7 +1191,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
"resource for %s!\n",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
else
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index f15e90a453d1..fb5c5186d4aa 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -1,3 +1,14 @@
+config INTEL_IDLE
+ tristate "Cpuidle Driver for Intel Processors"
+ depends on CPU_IDLE
+ depends on X86
+ depends on CPU_SUP_INTEL
+ depends on EXPERIMENTAL
+ help
+ Enable intel_idle, a cpuidle driver that includes knowledge of
+ native Intel hardware idle features. The acpi_idle driver
+ can be configured at the same time, in order to handle
+ processors intel_idle does not support.
menu "Memory power savings"
depends on X86_64
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 5f68fc377e21..23d295cf10f2 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
+obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
new file mode 100755
index 000000000000..54f0fb4cd5d2
--- /dev/null
+++ b/drivers/idle/intel_idle.c
@@ -0,0 +1,461 @@
+/*
+ * intel_idle.c - native hardware idle loop for modern Intel processors
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * intel_idle is a cpuidle driver that loads on specific Intel processors
+ * in lieu of the legacy ACPI processor_idle driver. The intent is to
+ * make Linux more efficient on these processors, as intel_idle knows
+ * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
+ */
+
+/*
+ * Design Assumptions
+ *
+ * All CPUs have same idle states as boot CPU
+ *
+ * Chipset BM_STS (bus master status) bit is a NOP
+ * for preventing entry into deep C-stats
+ */
+
+/*
+ * Known limitations
+ *
+ * The driver currently initializes for_each_online_cpu() upon modprobe.
+ * It it unaware of subsequent processors hot-added to the system.
+ * This means that if you boot with maxcpus=n and later online
+ * processors above n, those processors will use C1 only.
+ *
+ * ACPI has a .suspend hack to turn off deep c-statees during suspend
+ * to avoid complications with the lapic timer workaround.
+ * Have not seen issues with suspend, but may need same workaround here.
+ *
+ * There is currently no kernel-based automatic probing/loading mechanism
+ * if the driver is built as a module.
+ */
+
+/* un-comment DEBUG to enable pr_debug() statements */
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/cpuidle.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h> /* ktime_get_real() */
+#include <trace/events/power.h>
+#include <linux/sched.h>
+
+#define INTEL_IDLE_VERSION "0.4"
+#define PREFIX "intel_idle: "
+
+#define MWAIT_SUBSTATE_MASK (0xf)
+#define MWAIT_CSTATE_MASK (0xf)
+#define MWAIT_SUBSTATE_SIZE (4)
+#define MWAIT_MAX_NUM_CSTATES 8
+#define CPUID_MWAIT_LEAF (5)
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
+#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
+
+static struct cpuidle_driver intel_idle_driver = {
+ .name = "intel_idle",
+ .owner = THIS_MODULE,
+};
+/* intel_idle.max_cstate=0 disables driver */
+static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
+static int power_policy = 7; /* 0 = max perf; 15 = max powersave */
+
+static unsigned int substates;
+static int (*choose_substate)(int);
+
+/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
+static unsigned int lapic_timer_reliable_states;
+
+static struct cpuidle_device *intel_idle_cpuidle_devices;
+static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
+
+static struct cpuidle_state *cpuidle_state_table;
+
+/*
+ * States are indexed by the cstate number,
+ * which is also the index into the MWAIT hint array.
+ * Thus C0 is a dummy.
+ */
+static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "NHM-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 3,
+ .power_usage = 1000,
+ .target_residency = 6,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "NHM-C3",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 20,
+ .power_usage = 500,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */
+ .name = "NHM-C6",
+ .desc = "MWAIT 0x20",
+ .driver_data = (void *) 0x20,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 200,
+ .power_usage = 350,
+ .target_residency = 800,
+ .enter = &intel_idle },
+};
+
+static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "ATM-C1",
+ .desc = "MWAIT 0x00",
+ .driver_data = (void *) 0x00,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .power_usage = 1000,
+ .target_residency = 4,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "ATM-C2",
+ .desc = "MWAIT 0x10",
+ .driver_data = (void *) 0x10,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 20,
+ .power_usage = 500,
+ .target_residency = 80,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */ },
+ { /* MWAIT C4 */
+ .name = "ATM-C4",
+ .desc = "MWAIT 0x30",
+ .driver_data = (void *) 0x30,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 100,
+ .power_usage = 250,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ { /* MWAIT C5 */ },
+ { /* MWAIT C6 */
+ .name = "ATM-C6",
+ .desc = "MWAIT 0x40",
+ .driver_data = (void *) 0x40,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 200,
+ .power_usage = 150,
+ .target_residency = 800,
+ .enter = NULL }, /* disabled */
+};
+
+/*
+ * choose_tunable_substate()
+ *
+ * Run-time decision on which C-state substate to invoke
+ * If power_policy = 0, choose shallowest substate (0)
+ * If power_policy = 15, choose deepest substate
+ * If power_policy = middle, choose middle substate etc.
+ */
+static int choose_tunable_substate(int cstate)
+{
+ unsigned int num_substates;
+ unsigned int substate_choice;
+
+ power_policy &= 0xF; /* valid range: 0-15 */
+ cstate &= 7; /* valid range: 0-7 */
+
+ num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK;
+
+ if (num_substates <= 1)
+ return 0;
+
+ substate_choice = ((power_policy + (power_policy + 1) *
+ (num_substates - 1)) / 16);
+
+ return substate_choice;
+}
+
+/*
+ * choose_zero_substate()
+ */
+static int choose_zero_substate(int cstate)
+{
+ return 0;
+}
+
+/**
+ * intel_idle
+ * @dev: cpuidle_device
+ * @state: cpuidle state
+ *
+ */
+static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
+{
+ unsigned long ecx = 1; /* break on interrupt flag */
+ unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
+ unsigned int cstate;
+ ktime_t kt_before, kt_after;
+ s64 usec_delta;
+ int cpu = smp_processor_id();
+
+ cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+
+ eax = eax + (choose_substate)(cstate);
+
+ local_irq_disable();
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+ kt_before = ktime_get_real();
+
+ stop_critical_timings();
+#ifndef MODULE
+ trace_power_start(POWER_CSTATE, (eax >> 4) + 1);
+#endif
+ if (!need_resched()) {
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __mwait(eax, ecx);
+ }
+
+ start_critical_timings();
+
+ kt_after = ktime_get_real();
+ usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
+
+ local_irq_enable();
+
+ if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+
+ return usec_delta;
+}
+
+/*
+ * intel_idle_probe()
+ */
+static int intel_idle_probe(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ if (max_cstate == 0) {
+ pr_debug(PREFIX "disabled\n");
+ return -EPERM;
+ }
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
+ if (!boot_cpu_has(X86_FEATURE_MWAIT))
+ return -ENODEV;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return -ENODEV;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return -ENODEV;
+#ifdef DEBUG
+ if (substates == 0) /* can over-ride via modparam */
+#endif
+ substates = edx;
+
+ pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates);
+
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = 0xFFFFFFFF;
+
+ if (boot_cpu_data.x86 != 6) /* family 6 */
+ return -ENODEV;
+
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x1A: /* Core i7, Xeon 5500 series */
+ case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */
+ case 0x1F: /* Core i7 and i5 Processor - Nehalem */
+ case 0x2E: /* Nehalem-EX Xeon */
+ lapic_timer_reliable_states = (1 << 1); /* C1 */
+
+ case 0x25: /* Westmere */
+ case 0x2C: /* Westmere */
+ cpuidle_state_table = nehalem_cstates;
+ choose_substate = choose_tunable_substate;
+ break;
+
+ case 0x1C: /* 28 - Atom Processor */
+ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+ cpuidle_state_table = atom_cstates;
+ choose_substate = choose_zero_substate;
+ break;
+#ifdef FUTURE_USE
+ case 0x17: /* 23 - Core 2 Duo */
+ lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
+#endif
+
+ default:
+ pr_debug(PREFIX "does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+
+ pr_debug(PREFIX "v" INTEL_IDLE_VERSION
+ " model 0x%X\n", boot_cpu_data.x86_model);
+
+ pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+ lapic_timer_reliable_states);
+ return 0;
+}
+
+/*
+ * intel_idle_cpuidle_devices_uninit()
+ * unregister, free cpuidle_devices
+ */
+static void intel_idle_cpuidle_devices_uninit(void)
+{
+ int i;
+ struct cpuidle_device *dev;
+
+ for_each_online_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
+ free_percpu(intel_idle_cpuidle_devices);
+ return;
+}
+/*
+ * intel_idle_cpuidle_devices_init()
+ * allocate, initialize, register cpuidle_devices
+ */
+static int intel_idle_cpuidle_devices_init(void)
+{
+ int i, cstate;
+ struct cpuidle_device *dev;
+
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
+ for_each_online_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+
+ dev->state_count = 1;
+
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
+
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
+
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL) {
+ /* does the driver not know about the state? */
+ if (*cpuidle_state_table[cstate].name == '\0')
+ pr_debug(PREFIX "unaware of model 0x%x"
+ " MWAIT %d please"
+ " contact lenb@kernel.org",
+ boot_cpu_data.x86_model, cstate);
+ continue;
+ }
+
+ if ((cstate > 2) &&
+ !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle"
+ " states deeper than C2");
+
+ dev->states[dev->state_count] = /* structure copy */
+ cpuidle_state_table[cstate];
+
+ dev->state_count += 1;
+ }
+
+ dev->cpu = i;
+ if (cpuidle_register_device(dev)) {
+ pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
+ i);
+ intel_idle_cpuidle_devices_uninit();
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+
+static int __init intel_idle_init(void)
+{
+ int retval;
+
+ retval = intel_idle_probe();
+ if (retval)
+ return retval;
+
+ retval = cpuidle_register_driver(&intel_idle_driver);
+ if (retval) {
+ printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
+ cpuidle_get_driver()->name);
+ return retval;
+ }
+
+ retval = intel_idle_cpuidle_devices_init();
+ if (retval) {
+ cpuidle_unregister_driver(&intel_idle_driver);
+ return retval;
+ }
+
+ return 0;
+}
+
+static void __exit intel_idle_exit(void)
+{
+ intel_idle_cpuidle_devices_uninit();
+ cpuidle_unregister_driver(&intel_idle_driver);
+
+ return;
+}
+
+module_init(intel_idle_init);
+module_exit(intel_idle_exit);
+
+module_param(power_policy, int, 0644);
+module_param(max_cstate, int, 0444);
+#ifdef DEBUG
+module_param(substates, int, 0444);
+#endif
+
+MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
+MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9fd4a0d3206e..adaefabc40e9 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
"and will not be available in the new firewire driver stack. "
"Try libraw1394 based programs instead.\n", current->comm);
- return 0;
+ return nonseekable_open(inode, file);
}
@@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev;
static const struct file_operations dv1394_fops=
{
.owner = THIS_MODULE,
- .poll = dv1394_poll,
+ .poll = dv1394_poll,
.unlocked_ioctl = dv1394_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = dv1394_compat_ioctl,
#endif
.mmap = dv1394_mmap,
.open = dv1394_open,
- .write = dv1394_write,
- .read = dv1394_read,
+ .write = dv1394_write,
+ .read = dv1394_read,
.release = dv1394_release,
- .fasync = dv1394_fasync,
+ .fasync = dv1394_fasync,
+ .llseek = no_llseek,
};
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 8aa56ac07e29..b563d5e9fa2e 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
file->private_data = fi;
- return 0;
+ return nonseekable_open(inode, file);
}
static int raw1394_release(struct inode *inode, struct file *file)
@@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = {
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
+ .llseek = no_llseek,
};
static int __init init_raw1394(void)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 949064a05675..a42bd6893bcf 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
ctx->current_ctx = NULL;
file->private_data = ctx;
- return 0;
+ return nonseekable_open(inode, file);
}
static int video1394_release(struct inode *inode, struct file *file)
@@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops=
.poll = video1394_poll,
.mmap = video1394_mmap,
.open = video1394_open,
- .release = video1394_release
+ .release = video1394_release,
+ .llseek = no_llseek,
};
/*** HOTPLUG STUFF **********************************************************/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 975adce5f40c..89d70de5e235 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -43,9 +43,11 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
+source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
+source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index ed35e4496241..9cc7a47d3e67 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,9 +1,11 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
+obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
+obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6d777069d86d..b930b8110a63 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
static DEFINE_IDR(udp_ps);
static DEFINE_IDR(ipoib_ps);
-static int next_port;
struct cma_device {
struct list_head list;
@@ -1677,13 +1676,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
return -EINVAL;
- id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
+ id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
+ GFP_KERNEL);
if (!id->route.path_rec) {
ret = -ENOMEM;
goto err;
}
- memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
id->route.num_paths = num_paths;
return 0;
err:
@@ -1970,47 +1969,33 @@ err1:
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
{
- struct rdma_bind_list *bind_list;
- int port, ret, low, high;
-
- bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
- if (!bind_list)
- return -ENOMEM;
-
-retry:
- /* FIXME: add proper port randomization per like inet_csk_get_port */
- do {
- ret = idr_get_new_above(ps, bind_list, next_port, &port);
- } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
+ static unsigned int last_used_port;
+ int low, high, remaining;
+ unsigned int rover;
inet_get_local_port_range(&low, &high);
- if (port > high) {
- if (next_port != low) {
- idr_remove(ps, port);
- next_port = low;
- goto retry;
- }
- ret = -EADDRNOTAVAIL;
- goto err2;
+ remaining = (high - low) + 1;
+ rover = net_random() % remaining + low;
+retry:
+ if (last_used_port != rover &&
+ !idr_find(ps, (unsigned short) rover)) {
+ int ret = cma_alloc_port(ps, id_priv, rover);
+ /*
+ * Remember previously used port number in order to avoid
+ * re-using same port immediately after it is closed.
+ */
+ if (!ret)
+ last_used_port = rover;
+ if (ret != -EADDRNOTAVAIL)
+ return ret;
}
-
- if (port == high)
- next_port = low;
- else
- next_port = port + 1;
-
- bind_list->ps = ps;
- bind_list->port = (unsigned short) port;
- cma_bind_port(bind_list, id_priv);
- return 0;
-err2:
- idr_remove(ps, port);
-err1:
- kfree(bind_list);
- return ret;
+ if (--remaining) {
+ rover++;
+ if ((rover < low) || (rover > high))
+ rover = low;
+ goto retry;
+ }
+ return -EADDRNOTAVAIL;
}
static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
static int __init cma_init(void)
{
- int ret, low, high, remaining;
-
- get_random_bytes(&next_port, sizeof next_port);
- inet_get_local_port_range(&low, &high);
- remaining = (high - low) + 1;
- next_port = ((unsigned int) next_port % remaining) + low;
+ int ret;
cma_wq = create_singlethread_workqueue("rdma_cm");
if (!cma_wq)
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 05ac36e6acdb..a565af5c2d2e 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,9 @@
#include <rdma/ib_verbs.h>
-int ib_device_register_sysfs(struct ib_device *device);
+int ib_device_register_sysfs(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
int ib_sysfs_setup(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d1fba4153332..a19effad0811 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -267,7 +267,9 @@ out:
* callback for each device that is added. @device must be allocated
* with ib_alloc_device().
*/
-int ib_register_device(struct ib_device *device)
+int ib_register_device(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
int ret;
@@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device)
goto out;
}
- ret = ib_device_register_sysfs(device);
+ ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n",
device->name);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1df1194aeba4..ef1304f151dc 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
-int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
-int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
+static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
+static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
module_param_named(send_queue_size, mad_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
@@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
}
if (mad_reg_req) {
- reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
+ reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
if (!reg_req) {
ret = ERR_PTR(-ENOMEM);
goto error3;
}
- /* Make a copy of the MAD registration request */
- memcpy(reg_req, mad_reg_req, sizeof *reg_req);
}
/* Now, fill in the various structures */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index f901957abc8b..3627300e2a10 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -475,7 +475,9 @@ err:
return NULL;
}
-static int add_port(struct ib_device *device, int port_num)
+static int add_port(struct ib_device *device, int port_num,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
struct ib_port *p;
struct ib_port_attr attr;
@@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num)
if (ret)
goto err_free_pkey;
+ if (port_callback) {
+ ret = port_callback(device, port_num, &p->kobj);
+ if (ret)
+ goto err_remove_pkey;
+ }
+
list_add_tail(&p->kobj.entry, &device->port_list);
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
+err_remove_pkey:
+ sysfs_remove_group(&p->kobj, &p->pkey_group);
+
err_free_pkey:
for (i = 0; i < attr.pkey_tbl_len; ++i)
kfree(p->pkey_group.attrs[i]);
@@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = {
.attrs = iw_proto_stats_attrs,
};
-int ib_device_register_sysfs(struct ib_device *device)
+int ib_device_register_sysfs(struct ib_device *device,
+ int (*port_callback)(struct ib_device *,
+ u8, struct kobject *))
{
struct device *class_dev = &device->dev;
int ret;
@@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device)
}
if (device->node_type == RDMA_NODE_IB_SWITCH) {
- ret = add_port(device, 0);
+ ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
} else {
for (i = 1; i <= device->phys_port_cnt; ++i) {
- ret = add_port(device, i);
+ ret = add_port(device, i, port_callback);
if (ret)
goto err_put;
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 512b1c43460c..08f948df8fa9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -706,14 +706,9 @@ static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
if (!len)
return 0;
- data = kmalloc(len, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
- kfree(data);
- return -EFAULT;
- }
+ data = memdup_user((void __user *)(unsigned long)src, len);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
*dest = data;
return 0;
@@ -1181,7 +1176,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
file->filp = filp;
file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
- return 0;
+ return nonseekable_open(inode, filp);
}
static int ib_ucm_close(struct inode *inode, struct file *filp)
@@ -1229,6 +1224,7 @@ static const struct file_operations ucm_fops = {
.release = ib_ucm_close,
.write = ib_ucm_write,
.poll = ib_ucm_poll,
+ .llseek = no_llseek,
};
static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 46185084121e..ac7edc24165c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
filp->private_data = file;
file->filp = filp;
- return 0;
+
+ return nonseekable_open(inode, filp);
}
static int ucma_close(struct inode *inode, struct file *filp)
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
.release = ucma_close,
.write = ucma_write,
.poll = ucma_poll,
+ .llseek = no_llseek,
};
static struct miscdevice ucma_misc = {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index e7db054fb1c8..6babb72b39fc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret = 0;
+ int ret;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
if (port)
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
list_add_tail(&file->port_list, &port->file_list);
+ ret = nonseekable_open(inode, filp);
+
out:
mutex_unlock(&port->file_mutex);
return ret;
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
.compat_ioctl = ib_umad_compat_ioctl,
#endif
.open = ib_umad_open,
- .release = ib_umad_close
+ .release = ib_umad_close,
+ .llseek = no_llseek,
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
filp->private_data = port;
- return 0;
+ return nonseekable_open(inode, filp);
fail:
kref_put(&port->umad_dev->ref, ib_umad_release_dev);
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
static const struct file_operations umad_sm_fops = {
.owner = THIS_MODULE,
.open = ib_umad_sm_open,
- .release = ib_umad_sm_close
+ .release = ib_umad_sm_close,
+ .llseek = no_llseek,
};
static struct ib_client umad_client = {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb3526254426..ec83e9fe387b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = {
.read = ib_uverbs_event_read,
.poll = ib_uverbs_event_poll,
.release = ib_uverbs_event_close,
- .fasync = ib_uverbs_event_fasync
+ .fasync = ib_uverbs_event_fasync,
+ .llseek = no_llseek,
};
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
filp->private_data = file;
- return 0;
+ return nonseekable_open(inode, filp);
err_module:
module_put(dev->ib_dev->owner);
@@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = {
.owner = THIS_MODULE,
.write = ib_uverbs_write,
.open = ib_uverbs_open,
- .release = ib_uverbs_close
+ .release = ib_uverbs_close,
+ .llseek = no_llseek,
};
static const struct file_operations uverbs_mmap_fops = {
@@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = {
.write = ib_uverbs_write,
.mmap = ib_uverbs_mmap,
.open = ib_uverbs_open,
- .release = ib_uverbs_close
+ .release = ib_uverbs_close,
+ .llseek = no_llseek,
};
static struct ib_client uverbs_client = {
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index f7ff66f98361..6ae698e68775 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -250,7 +250,7 @@ struct c2_array {
struct sp_chunk {
struct sp_chunk *next;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 head;
u16 shared_ptr[0];
};
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index d4f5f5d42e90..78d247ec6961 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
return -ENOMEM;
new_head->dma_addr = dma_addr;
- pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
+ dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
new_head->next = NULL;
new_head->head = 0;
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
while (root) {
next = root->next;
dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
- pci_unmap_addr(root, mapping));
+ dma_unmap_addr(root, mapping));
root = next;
}
}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index f7b0fc23f413..49e0e8533f74 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
- mq->msg_pool.host, pci_unmap_addr(mq, mapping));
+ mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
NULL, /* peer (currently unknown) */
C2_MQ_HOST_TARGET);
- pci_unmap_addr_set(mq, mapping, mq->host_dma);
+ dma_unmap_addr_set(mq, mapping, mq->host_dma);
return 0;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
index acede007b94a..fc1b9a7cec4b 100644
--- a/drivers/infiniband/hw/amso1100/c2_mq.h
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -71,7 +71,7 @@ struct c2_mq {
u8 __iomem *adapter;
} msg_pool;
dma_addr_t host_dma;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
u16 hint_count;
u16 priv;
struct c2_mq_shared __iomem *peer;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index c47f618d12e8..aeebc4d37e33 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.iwcm->create_listen = c2_service_create;
dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
- ret = ib_register_device(&dev->ibdev);
+ ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto out_free_iwcm;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index 1076df2ee96a..bf189987711f 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -50,7 +50,7 @@
struct c2_buf_list {
void *buf;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 78c4bcc6ef60..85cfae4cad71 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail1;
}
- pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
+ dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
(unsigned long long) c2dev->rep_vq.host_dma);
c2_mq_rep_init(&c2dev->rep_vq,
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
err = -ENOMEM;
goto bail2;
}
- pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
+ dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
(unsigned long long) c2dev->aeq.host_dma);
c2_mq_rep_init(&c2dev->aeq,
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
bail3:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
- q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
+ q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
bail2:
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
- q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
+ q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
bail1:
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
bail0:
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->aeq.q_size * c2dev->aeq.msg_size,
c2dev->aeq.msg_pool.host,
- pci_unmap_addr(&c2dev->aeq, mapping));
+ dma_unmap_addr(&c2dev->aeq, mapping));
/* Free the verbs reply queue */
dma_free_coherent(&c2dev->pcidev->dev,
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
c2dev->rep_vq.msg_pool.host,
- pci_unmap_addr(&c2dev->rep_vq, mapping));
+ dma_unmap_addr(&c2dev->rep_vq, mapping));
/* Free the MQ shared pointer pool */
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 35f286f1ad1e..005b7b52bc1e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
kfree(cq->sw_queue);
return -ENOMEM;
}
- pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, size);
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
goto err4;
memset(wq->queue, 0, depth * sizeof(union t3_wr));
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
if (!kernel_domain)
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
* sizeof(struct t3_cqe), cq->queue,
- pci_unmap_addr(cq, mapping));
+ dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
}
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (wq->size_log2))
* sizeof(union t3_wr), wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
kfree(wq->sq);
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
kfree(wq->rq);
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
err = -ENOMEM;
goto err;
}
- pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
+ dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
rdev_p->ctrl_qp.dma_addr);
rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
memset(rdev_p->ctrl_qp.workq, 0,
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << T3_CTRL_QP_SIZE_LOG2)
* sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
+ dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 073373c2c560..8f0caf7d4482 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
union t3_wr *workq; /* the work request queue */
dma_addr_t dma_addr; /* pci bus address of the workq */
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
void __iomem *doorbell;
};
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 15073b2da1c5..e5ddb63e7d23 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -691,7 +691,7 @@ struct t3_swrq {
struct t3_wq {
union t3_wr *queue; /* DMA accessable memory */
dma_addr_t dma_addr; /* DMA address for HW */
- DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
+ DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
u32 error; /* 1 once we go to ERROR */
u32 qpid;
u32 wptr; /* idx to next available WR slot */
@@ -718,7 +718,7 @@ struct t3_cq {
u32 wptr;
u32 size_log2;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
struct t3_cqe *queue;
struct t3_cqe *sw_queue;
u32 sw_rptr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30f..8e77dc543dd1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 4fef03296276..ebfb117ba68b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
module_param(cong_flavor, uint, 0644);
MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-static void process_work(struct work_struct *work);
static struct workqueue_struct *workq;
-static DECLARE_WORK(skb_work, process_work);
static struct sk_buff_head rxq;
-static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
return -EIO;
}
error = l2t_send(tdev, skb, l2e);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
return -EIO;
}
error = cxgb3_ofld_send(tdev, skb);
- if (error)
+ if (error < 0)
kfree_skb(skb);
return error;
}
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
put_ep(&ep->com);
}
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
static int status2errno(int status)
{
switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
/*
* All the CM events are handled on a work queue to have a safe context.
+ * These are the real handlers that are called from the work queue.
*/
+static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = act_establish,
+ [CPL_ACT_OPEN_RPL] = act_open_rpl,
+ [CPL_RX_DATA] = rx_data,
+ [CPL_TX_DMA_ACK] = tx_ack,
+ [CPL_ABORT_RPL_RSS] = abort_rpl,
+ [CPL_ABORT_RPL] = abort_rpl,
+ [CPL_PASS_OPEN_RPL] = pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+ [CPL_PASS_ESTABLISH] = pass_establish,
+ [CPL_PEER_CLOSE] = peer_close,
+ [CPL_ABORT_REQ_RSS] = peer_abort,
+ [CPL_CLOSE_CON_RPL] = close_con_rpl,
+ [CPL_RDMA_TERMINATE] = terminate,
+ [CPL_RDMA_EC_STATUS] = ec_status,
+};
+
+static void process_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ void *ep;
+ struct t3cdev *tdev;
+ int ret;
+
+ while ((skb = skb_dequeue(&rxq))) {
+ ep = *((void **) (skb->cb));
+ tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
+ ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
+ if (ret & CPL_RET_BUF_DONE)
+ kfree_skb(skb);
+
+ /*
+ * ep was referenced in sched(), and is freed here.
+ */
+ put_ep((struct iwch_ep_common *)ep);
+ }
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
{
struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
+/*
+ * All upcalls from the T3 Core go to sched() to schedule the
+ * processing on a work queue.
+ */
+cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = sched,
+ [CPL_ACT_OPEN_RPL] = sched,
+ [CPL_RX_DATA] = sched,
+ [CPL_TX_DMA_ACK] = sched,
+ [CPL_ABORT_RPL_RSS] = sched,
+ [CPL_ABORT_RPL] = sched,
+ [CPL_PASS_OPEN_RPL] = sched,
+ [CPL_CLOSE_LISTSRV_RPL] = sched,
+ [CPL_PASS_ACCEPT_REQ] = sched,
+ [CPL_PASS_ESTABLISH] = sched,
+ [CPL_PEER_CLOSE] = sched,
+ [CPL_CLOSE_CON_RPL] = sched,
+ [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_RDMA_TERMINATE] = sched,
+ [CPL_RDMA_EC_STATUS] = sched,
+ [CPL_SET_TCB_RPL] = set_tcb_rpl,
+};
+
int __init iwch_cm_init(void)
{
skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
if (!workq)
return -ENOMEM;
- /*
- * All upcalls from the T3 Core go to sched() to
- * schedule the processing on a work queue.
- */
- t3c_handlers[CPL_ACT_ESTABLISH] = sched;
- t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
- t3c_handlers[CPL_RX_DATA] = sched;
- t3c_handlers[CPL_TX_DMA_ACK] = sched;
- t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
- t3c_handlers[CPL_ABORT_RPL] = sched;
- t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
- t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
- t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
- t3c_handlers[CPL_PASS_ESTABLISH] = sched;
- t3c_handlers[CPL_PEER_CLOSE] = sched;
- t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
- t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
- t3c_handlers[CPL_RDMA_TERMINATE] = sched;
- t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
- t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
-
- /*
- * These are the real handlers that are called from a
- * work queue.
- */
- work_handlers[CPL_ACT_ESTABLISH] = act_establish;
- work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
- work_handlers[CPL_RX_DATA] = rx_data;
- work_handlers[CPL_TX_DMA_ACK] = tx_ack;
- work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
- work_handlers[CPL_ABORT_RPL] = abort_rpl;
- work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
- work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
- work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
- work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
- work_handlers[CPL_PEER_CLOSE] = peer_close;
- work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
- work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
- work_handlers[CPL_RDMA_TERMINATE] = terminate;
- work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 19b1c4a62a23..fca0b4b747e4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
- ret = ib_register_device(&dev->ibdev);
+ ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
goto bail1;
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
new file mode 100644
index 000000000000..ccb85eaaad75
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -0,0 +1,18 @@
+config INFINIBAND_CXGB4
+ tristate "Chelsio T4 RDMA Driver"
+ depends on CHELSIO_T4 && INET
+ select GENERIC_ALLOCATOR
+ ---help---
+ This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
+ 10GbE adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.htm>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iw_cxgb4.
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
new file mode 100644
index 000000000000..e31a499f0172
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -Idrivers/net/cxgb4
+
+obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
+
+iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
new file mode 100644
index 000000000000..30ce0a8eca09
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -0,0 +1,2374 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+
+#include "iw_cxgb4.h"
+
+static char *states[] = {
+ "idle",
+ "listen",
+ "connecting",
+ "mpa_wait_req",
+ "mpa_req_sent",
+ "mpa_req_rcvd",
+ "mpa_rep_sent",
+ "fpdu_mode",
+ "aborting",
+ "closing",
+ "moribund",
+ "dead",
+ NULL,
+};
+
+int c4iw_max_read_depth = 8;
+module_param(c4iw_max_read_depth, int, 0644);
+MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+
+static int enable_tcp_timestamps;
+module_param(enable_tcp_timestamps, int, 0644);
+MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
+
+static int enable_tcp_sack;
+module_param(enable_tcp_sack, int, 0644);
+MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
+
+static int enable_tcp_window_scaling = 1;
+module_param(enable_tcp_window_scaling, int, 0644);
+MODULE_PARM_DESC(enable_tcp_window_scaling,
+ "Enable tcp window scaling (default=1)");
+
+int c4iw_debug;
+module_param(c4iw_debug, int, 0644);
+MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
+
+static int peer2peer;
+module_param(peer2peer, int, 0644);
+MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
+
+static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
+module_param(p2p_type, int, 0644);
+MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
+ "1=RDMA_READ 0=RDMA_WRITE (default 1)");
+
+static int ep_timeout_secs = 60;
+module_param(ep_timeout_secs, int, 0644);
+MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
+ "in seconds (default=60)");
+
+static int mpa_rev = 1;
+module_param(mpa_rev, int, 0644);
+MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
+ "1 is spec compliant. (default=1)");
+
+static int markers_enabled;
+module_param(markers_enabled, int, 0644);
+MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
+
+static int crc_enabled = 1;
+module_param(crc_enabled, int, 0644);
+MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
+
+static int rcv_win = 256 * 1024;
+module_param(rcv_win, int, 0644);
+MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
+
+static int snd_win = 32 * 1024;
+module_param(snd_win, int, 0644);
+MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
+
+static struct workqueue_struct *workq;
+
+static struct sk_buff_head rxq;
+
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
+static void ep_timeout(unsigned long arg);
+static void connect_reply_upcall(struct c4iw_ep *ep, int status);
+
+static LIST_HEAD(timeout_list);
+static spinlock_t timeout_lock;
+
+static void start_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (timer_pending(&ep->timer)) {
+ PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
+ del_timer_sync(&ep->timer);
+ } else
+ c4iw_get_ep(&ep->com);
+ ep->timer.expires = jiffies + ep_timeout_secs * HZ;
+ ep->timer.data = (unsigned long)ep;
+ ep->timer.function = ep_timeout;
+ add_timer(&ep->timer);
+}
+
+static void stop_ep_timer(struct c4iw_ep *ep)
+{
+ PDBG("%s ep %p\n", __func__, ep);
+ if (!timer_pending(&ep->timer)) {
+ printk(KERN_ERR "%s timer stopped when its not running! "
+ "ep %p state %u\n", __func__, ep, ep->com.state);
+ WARN_ON(1);
+ return;
+ }
+ del_timer_sync(&ep->timer);
+ c4iw_put_ep(&ep->com);
+}
+
+static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
+ struct l2t_entry *l2e)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
+{
+ int error = 0;
+
+ if (c4iw_fatal_error(rdev)) {
+ kfree_skb(skb);
+ PDBG("%s - device in error state - dropping\n", __func__);
+ return -EIO;
+ }
+ error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
+ if (error < 0)
+ kfree_skb(skb);
+ return error;
+}
+
+static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
+{
+ struct cpl_tid_release *req;
+
+ skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+ if (!skb)
+ return;
+ req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ c4iw_ofld_send(rdev, skb);
+ return;
+}
+
+static void set_emss(struct c4iw_ep *ep, u16 opt)
+{
+ ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
+ ep->mss = ep->emss;
+ if (GET_TCPOPT_TSTAMP(opt))
+ ep->emss -= 12;
+ if (ep->emss < 128)
+ ep->emss = 128;
+ PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
+ ep->mss, ep->emss);
+}
+
+static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
+{
+ unsigned long flags;
+ enum c4iw_ep_state state;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ state = epc->state;
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return state;
+}
+
+static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ epc->state = new;
+}
+
+static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ __state_set(epc, new);
+ spin_unlock_irqrestore(&epc->lock, flags);
+ return;
+}
+
+static void *alloc_ep(int size, gfp_t gfp)
+{
+ struct c4iw_ep_common *epc;
+
+ epc = kzalloc(size, gfp);
+ if (epc) {
+ kref_init(&epc->kref);
+ spin_lock_init(&epc->lock);
+ init_waitqueue_head(&epc->waitq);
+ }
+ PDBG("%s alloc ep %p\n", __func__, epc);
+ return epc;
+}
+
+void _c4iw_free_ep(struct kref *kref)
+{
+ struct c4iw_ep *ep;
+
+ ep = container_of(kref, struct c4iw_ep, com.kref);
+ PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
+ if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ }
+ kfree(ep);
+}
+
+static void release_ep_resources(struct c4iw_ep *ep)
+{
+ set_bit(RELEASE_RESOURCES, &ep->com.flags);
+ c4iw_put_ep(&ep->com);
+}
+
+static int status2errno(int status)
+{
+ switch (status) {
+ case CPL_ERR_NONE:
+ return 0;
+ case CPL_ERR_CONN_RESET:
+ return -ECONNRESET;
+ case CPL_ERR_ARP_MISS:
+ return -EHOSTUNREACH;
+ case CPL_ERR_CONN_TIMEDOUT:
+ return -ETIMEDOUT;
+ case CPL_ERR_TCAM_FULL:
+ return -ENOMEM;
+ case CPL_ERR_CONN_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EIO;
+ }
+}
+
+/*
+ * Try and reuse skbs already allocated...
+ */
+static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
+{
+ if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
+ skb_trim(skb, 0);
+ skb_get(skb);
+ skb_reset_transport_header(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
+ }
+ return skb;
+}
+
+static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
+ __be32 peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi fl = {
+ .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = peer_ip,
+ .saddr = local_ip,
+ .tos = tos}
+ },
+ .proto = IPPROTO_TCP,
+ .uli_u = {
+ .ports = {
+ .sport = local_port,
+ .dport = peer_port}
+ }
+ };
+
+ if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ return NULL;
+ return rt;
+}
+
+static void arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+ PDBG("%s c4iw_dev %p\n", __func__, handle);
+ kfree_skb(skb);
+}
+
+/*
+ * Handle an ARP failure for an active open.
+ */
+static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
+{
+ printk(KERN_ERR MOD "ARP failure duing connect\n");
+ kfree_skb(skb);
+}
+
+/*
+ * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
+ * and send it along.
+ */
+static void abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct c4iw_rdev *rdev = handle;
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ PDBG("%s rdev %p\n", __func__, rdev);
+ req->cmd = CPL_ABORT_NO_RST;
+ c4iw_ofld_send(rdev, skb);
+}
+
+static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ unsigned int flowclen = 80;
+ struct fw_flowc_wr *flowc;
+ int i;
+
+ skb = get_skb(skb, flowclen, GFP_KERNEL);
+ flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS(8));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
+ 16)) | FW_WR_FLOWID(ep->hwtid));
+
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(0);
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+ flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+ flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
+ flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[6].val = cpu_to_be32(snd_win);
+ flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[7].val = cpu_to_be32(ep->emss);
+ /* Pad WR to 16 byte boundary */
+ flowc->mnemval[8].mnemonic = 0;
+ flowc->mnemval[8].val = 0;
+ for (i = 0; i < 9; i++) {
+ flowc->mnemval[i].r4[0] = 0;
+ flowc->mnemval[i].r4[1] = 0;
+ flowc->mnemval[i].r4[2] = 0;
+ }
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ c4iw_ofld_send(&ep->com.dev->rdev, skb);
+}
+
+static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
+{
+ struct cpl_close_con_req *req;
+ struct sk_buff *skb;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb = get_skb(NULL, wrlen, gfp);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
+ ep->hwtid));
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+{
+ struct cpl_abort_req *req;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb = get_skb(skb, wrlen, gfp);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
+ req = (struct cpl_abort_req *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_connect(struct c4iw_ep *ep)
+{
+ struct cpl_act_open_req *req;
+ struct sk_buff *skb;
+ u64 opt0;
+ u32 opt2;
+ unsigned int mtu_idx;
+ int wscale;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ opt0 = KEEP_ALIVE(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ RCV_BUFSIZ(rcv_win>>10);
+ opt2 = RX_CHANNEL(0) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ if (enable_tcp_timestamps)
+ opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack)
+ opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ opt2 |= WND_SCALE_EN(1);
+ t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+
+ req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
+ req->local_port = ep->com.local_addr.sin_port;
+ req->peer_port = ep->com.remote_addr.sin_port;
+ req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = 0;
+ req->opt2 = cpu_to_be32(opt2);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ BUG_ON(skb_cloned(skb));
+
+ mpalen = sizeof(*mpa) + ep->plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+ skb = get_skb(skb, wrlen, GFP_KERNEL);
+ if (!skb) {
+ connect_reply_upcall(ep, -ENOMEM);
+ return;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
+ mpa->flags = (crc_enabled ? MPA_CRC : 0) |
+ (markers_enabled ? MPA_MARKERS : 0);
+ mpa->private_data_size = htons(ep->plen);
+ mpa->revision = mpa_rev;
+
+ if (ep->plen)
+ memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
+
+ /*
+ * Reference the mpa skb. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ BUG_ON(ep->mpa_skb);
+ ep->mpa_skb = skb;
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ start_ep_timer(ep);
+ state_set(&ep->com, MPA_REQ_SENT);
+ ep->mpa_attr.initiator = 1;
+ return;
+}
+
+static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+ struct sk_buff *skb;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ mpalen = sizeof(*mpa) + plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memset(mpa, 0, sizeof(*mpa));
+ memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+ mpa->flags = MPA_REJECT;
+ mpa->revision = mpa_rev;
+ mpa->private_data_size = htons(plen);
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
+
+ /*
+ * Reference the mpa skb again. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ BUG_ON(ep->mpa_skb);
+ ep->mpa_skb = skb;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+ int mpalen, wrlen;
+ struct fw_ofld_tx_data_wr *req;
+ struct mpa_message *mpa;
+ struct sk_buff *skb;
+
+ PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
+
+ mpalen = sizeof(*mpa) + plen;
+ wrlen = roundup(mpalen + sizeof *req, 16);
+
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+
+ req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ req->op_to_immdlen = cpu_to_be32(
+ FW_WR_OP(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL(1) |
+ FW_WR_IMMDLEN(mpalen));
+ req->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(ep->hwtid) |
+ FW_WR_LEN16(wrlen >> 4));
+ req->plen = cpu_to_be32(mpalen);
+ req->tunnel_to_proxy = cpu_to_be32(
+ FW_OFLD_TX_DATA_WR_FLUSH(1) |
+ FW_OFLD_TX_DATA_WR_SHOVE(1));
+
+ mpa = (struct mpa_message *)(req + 1);
+ memset(mpa, 0, sizeof(*mpa));
+ memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+ mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
+ (markers_enabled ? MPA_MARKERS : 0);
+ mpa->revision = mpa_rev;
+ mpa->private_data_size = htons(plen);
+ if (plen)
+ memcpy(mpa->private_data, pdata, plen);
+
+ /*
+ * Reference the mpa skb. This ensures the data area
+ * will remain in memory until the hw acks the tx.
+ * Function fw4_ack() will deref it.
+ */
+ skb_get(skb);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ ep->mpa_skb = skb;
+ state_set(&ep->com, MPA_REP_SENT);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+}
+
+static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int tid = GET_TID(req);
+ unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_atid(t, atid);
+
+ PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+ be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
+
+ dst_confirm(ep->dst);
+
+ /* setup the hwtid for this connection */
+ ep->hwtid = tid;
+ cxgb4_insert_tid(t, ep, tid);
+
+ ep->snd_seq = be32_to_cpu(req->snd_isn);
+ ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+
+ set_emss(ep, ntohs(req->tcp_opt));
+
+ /* dealloc the atid */
+ cxgb4_free_atid(t, atid);
+
+ /* start MPA negotiation */
+ send_flowc(ep, NULL);
+ send_mpa_req(ep, skb);
+
+ return 0;
+}
+
+static void close_complete_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CLOSE;
+ if (ep->com.cm_id) {
+ PDBG("close complete delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+{
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ close_complete_upcall(ep);
+ state_set(&ep->com, ABORTING);
+ return send_abort(ep, skb, gfp);
+}
+
+static void peer_close_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_DISCONNECT;
+ if (ep->com.cm_id) {
+ PDBG("peer close delivered ep %p cm_id %p tid %u\n",
+ ep, ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+}
+
+static void peer_abort_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CLOSE;
+ event.status = -ECONNRESET;
+ if (ep->com.cm_id) {
+ PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
+ ep->com.cm_id, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static void connect_reply_upcall(struct c4iw_ep *ep, int status)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REPLY;
+ event.status = status;
+ event.local_addr = ep->com.local_addr;
+ event.remote_addr = ep->com.remote_addr;
+
+ if ((status == 0) || (status == -ECONNREFUSED)) {
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ }
+ if (ep->com.cm_id) {
+ PDBG("%s ep %p tid %u status %d\n", __func__, ep,
+ ep->hwtid, status);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+ if (status < 0) {
+ ep->com.cm_id->rem_ref(ep->com.cm_id);
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ }
+}
+
+static void connect_request_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ event.local_addr = ep->com.local_addr;
+ event.remote_addr = ep->com.remote_addr;
+ event.private_data_len = ep->plen;
+ event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+ event.provider_data = ep;
+ if (state_read(&ep->parent_ep->com) != DEAD) {
+ c4iw_get_ep(&ep->com);
+ ep->parent_ep->com.cm_id->event_handler(
+ ep->parent_ep->com.cm_id,
+ &event);
+ }
+ c4iw_put_ep(&ep->parent_ep->com);
+ ep->parent_ep = NULL;
+}
+
+static void established_upcall(struct c4iw_ep *ep)
+{
+ struct iw_cm_event event;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_ESTABLISHED;
+ if (ep->com.cm_id) {
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+ }
+}
+
+static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
+{
+ struct cpl_rx_data_ack *req;
+ struct sk_buff *skb;
+ int wrlen = roundup(sizeof *req, 16);
+
+ PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ skb = get_skb(NULL, wrlen, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
+ return 0;
+ }
+
+ req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
+ memset(req, 0, wrlen);
+ INIT_TP_WR(req, ep->hwtid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+ ep->hwtid));
+ req->credit_dack = cpu_to_be32(credits);
+ set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
+ c4iw_ofld_send(&ep->com.dev->rdev, skb);
+ return credits;
+}
+
+static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ struct mpa_message *mpa;
+ u16 plen;
+ struct c4iw_qp_attributes attrs;
+ enum c4iw_qp_attr_mask mask;
+ int err;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ /*
+ * Stop mpa timer. If it expired, then the state has
+ * changed and we bail since ep_timeout already aborted
+ * the connection.
+ */
+ stop_ep_timer(ep);
+ if (state_read(&ep->com) != MPA_REQ_SENT)
+ return;
+
+ /*
+ * If we get more than the supported amount of private data
+ * then we must fail this connection.
+ */
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * copy the new data into our accumulation buffer.
+ */
+ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+ skb->len);
+ ep->mpa_pkt_len += skb->len;
+
+ /*
+ * if we don't even have the mpa message, then bail.
+ */
+ if (ep->mpa_pkt_len < sizeof(*mpa))
+ return;
+ mpa = (struct mpa_message *) ep->mpa_pkt;
+
+ /* Validate MPA header. */
+ if (mpa->revision != mpa_rev) {
+ err = -EPROTO;
+ goto err;
+ }
+ if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ plen = ntohs(mpa->private_data_size);
+
+ /*
+ * Fail if there's too much private data.
+ */
+ if (plen > MPA_MAX_PRIVATE_DATA) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ /*
+ * If plen does not account for pkt size
+ */
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ err = -EPROTO;
+ goto err;
+ }
+
+ ep->plen = (u8) plen;
+
+ /*
+ * If we don't have all the pdata yet, then bail.
+ * We'll continue process when more data arrives.
+ */
+ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+ return;
+
+ if (mpa->flags & MPA_REJECT) {
+ err = -ECONNREFUSED;
+ goto err;
+ }
+
+ /*
+ * If we get here we have accumulated the entire mpa
+ * start reply message including private data. And
+ * the MPA header is valid.
+ */
+ state_set(&ep->com, FPDU_MODE);
+ ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+ ep->mpa_attr.recv_marker_enabled = markers_enabled;
+ ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+ ep->mpa_attr.version = mpa_rev;
+ ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
+ FW_RI_INIT_P2PTYPE_DISABLED;
+ PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
+ "xmit_marker_enabled=%d, version=%d\n", __func__,
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
+
+ attrs.mpa_attr = ep->mpa_attr;
+ attrs.max_ird = ep->ird;
+ attrs.max_ord = ep->ord;
+ attrs.llp_stream_handle = ep;
+ attrs.next_state = C4IW_QP_STATE_RTS;
+
+ mask = C4IW_QP_ATTR_NEXT_STATE |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
+
+ /* bind QP and TID with INIT_WR */
+ err = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, mask, &attrs, 1);
+ if (err)
+ goto err;
+ goto out;
+err:
+ abort_connection(ep, skb, GFP_KERNEL);
+out:
+ connect_reply_upcall(ep, err);
+ return;
+}
+
+static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+{
+ struct mpa_message *mpa;
+ u16 plen;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (state_read(&ep->com) != MPA_REQ_WAIT)
+ return;
+
+ /*
+ * If we get more than the supported amount of private data
+ * then we must fail this connection.
+ */
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
+ stop_ep_timer(ep);
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+
+ /*
+ * Copy the new data into our accumulation buffer.
+ */
+ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
+ skb->len);
+ ep->mpa_pkt_len += skb->len;
+
+ /*
+ * If we don't even have the mpa message, then bail.
+ * We'll continue process when more data arrives.
+ */
+ if (ep->mpa_pkt_len < sizeof(*mpa))
+ return;
+
+ PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ stop_ep_timer(ep);
+ mpa = (struct mpa_message *) ep->mpa_pkt;
+
+ /*
+ * Validate MPA Header.
+ */
+ if (mpa->revision != mpa_rev) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ plen = ntohs(mpa->private_data_size);
+
+ /*
+ * Fail if there's too much private data.
+ */
+ if (plen > MPA_MAX_PRIVATE_DATA) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+
+ /*
+ * If plen does not account for pkt size
+ */
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+ abort_connection(ep, skb, GFP_KERNEL);
+ return;
+ }
+ ep->plen = (u8) plen;
+
+ /*
+ * If we don't have all the pdata yet, then bail.
+ */
+ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+ return;
+
+ /*
+ * If we get here we have accumulated the entire mpa
+ * start reply message including private data.
+ */
+ ep->mpa_attr.initiator = 0;
+ ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+ ep->mpa_attr.recv_marker_enabled = markers_enabled;
+ ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+ ep->mpa_attr.version = mpa_rev;
+ ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
+ FW_RI_INIT_P2PTYPE_DISABLED;
+ PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
+ "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
+ ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+ ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+ ep->mpa_attr.p2p_type);
+
+ state_set(&ep->com, MPA_REQ_RCVD);
+
+ /* drive upcall */
+ connect_request_upcall(ep);
+ return;
+}
+
+static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_rx_data *hdr = cplhdr(skb);
+ unsigned int dlen = ntohs(hdr->len);
+ unsigned int tid = GET_TID(hdr);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ skb_pull(skb, sizeof(*hdr));
+ skb_trim(skb, dlen);
+
+ ep->rcv_seq += dlen;
+ BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
+
+ /* update RX credits */
+ update_rx_credits(ep, dlen);
+
+ switch (state_read(&ep->com)) {
+ case MPA_REQ_SENT:
+ process_mpa_reply(ep, skb);
+ break;
+ case MPA_REQ_WAIT:
+ process_mpa_request(ep, skb);
+ break;
+ case MPA_REP_SENT:
+ break;
+ default:
+ printk(KERN_ERR MOD "%s Unexpected streaming data."
+ " ep %p state %d tid %u\n",
+ __func__, ep, state_read(&ep->com), ep->hwtid);
+
+ /*
+ * The ep will timeout and inform the ULP of the failure.
+ * See ep_timeout().
+ */
+ break;
+ }
+ return 0;
+}
+
+static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ unsigned long flags;
+ int release = 0;
+ unsigned int tid = GET_TID(rpl);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(!ep);
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case ABORTING:
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ break;
+ default:
+ printk(KERN_ERR "%s ep %p state %d\n",
+ __func__, ep, ep->com.state);
+ break;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+/*
+ * Return whether a failed active open has allocated a TID
+ */
+static inline int act_open_has_tid(int status)
+{
+ return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
+ status != CPL_ERR_ARP_MISS;
+}
+
+static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+ unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
+ ntohl(rpl->atid_status)));
+ struct tid_info *t = dev->rdev.lldi.tids;
+ int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+
+ ep = lookup_atid(t, atid);
+
+ PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+ status, status2errno(status));
+
+ if (status == CPL_ERR_RTX_NEG_ADVICE) {
+ printk(KERN_WARNING MOD "Connection problems for atid %u\n",
+ atid);
+ return 0;
+ }
+
+ connect_reply_upcall(ep, status2errno(status));
+ state_set(&ep->com, DEAD);
+
+ if (status && act_open_has_tid(status))
+ cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
+
+ cxgb4_free_atid(t, atid);
+ dst_release(ep->dst);
+ cxgb4_l2t_release(ep->l2t);
+ c4iw_put_ep(&ep->com);
+
+ return 0;
+}
+
+static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+
+ if (!ep) {
+ printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
+ return 0;
+ }
+ PDBG("%s ep %p status %d error %d\n", __func__, ep,
+ rpl->status, status2errno(rpl->status));
+ ep->com.rpl_err = status2errno(rpl->status);
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+
+ return 0;
+}
+
+static int listen_stop(struct c4iw_listen_ep *ep)
+{
+ struct sk_buff *skb;
+ struct cpl_close_listsvr_req *req;
+
+ PDBG("%s ep %p\n", __func__, ep);
+ skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ return -ENOMEM;
+ }
+ req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
+ ep->stid));
+ req->reply_ctrl = cpu_to_be16(
+ QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ return c4iw_ofld_send(&ep->com.dev->rdev, skb);
+}
+
+static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+
+ PDBG("%s ep %p\n", __func__, ep);
+ ep->com.rpl_err = status2errno(rpl->status);
+ ep->com.rpl_done = 1;
+ wake_up(&ep->com.waitq);
+ return 0;
+}
+
+static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
+ struct cpl_pass_accept_req *req)
+{
+ struct cpl_pass_accept_rpl *rpl;
+ unsigned int mtu_idx;
+ u64 opt0;
+ u32 opt2;
+ int wscale;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(skb_cloned(skb));
+ skb_trim(skb, sizeof(*rpl));
+ skb_get(skb);
+ cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
+ wscale = compute_wscale(rcv_win);
+ opt0 = KEEP_ALIVE(1) |
+ WND_SCALE(wscale) |
+ MSS_IDX(mtu_idx) |
+ L2T_IDX(ep->l2t->idx) |
+ TX_CHAN(ep->tx_chan) |
+ SMAC_SEL(ep->smac_idx) |
+ DSCP(ep->tos) |
+ RCV_BUFSIZ(rcv_win>>10);
+ opt2 = RX_CHANNEL(0) |
+ RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+
+ if (enable_tcp_timestamps && req->tcpopt.tstamp)
+ opt2 |= TSTAMPS_EN(1);
+ if (enable_tcp_sack && req->tcpopt.sack)
+ opt2 |= SACK_EN(1);
+ if (wscale && enable_tcp_window_scaling)
+ opt2 |= WND_SCALE_EN(1);
+
+ rpl = cplhdr(skb);
+ INIT_TP_WR(rpl, ep->hwtid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ ep->hwtid));
+ rpl->opt0 = cpu_to_be64(opt0);
+ rpl->opt2 = cpu_to_be32(opt2);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
+ c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+
+ return;
+}
+
+static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
+ struct sk_buff *skb)
+{
+ PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
+ peer_ip);
+ BUG_ON(skb_cloned(skb));
+ skb_trim(skb, sizeof(struct cpl_tid_release));
+ skb_get(skb);
+ release_tid(&dev->rdev, hwtid, skb);
+ return;
+}
+
+static void get_4tuple(struct cpl_pass_accept_req *req,
+ __be32 *local_ip, __be32 *peer_ip,
+ __be16 *local_port, __be16 *peer_port)
+{
+ int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
+ int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
+ ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
+ ntohs(tcp->dest));
+
+ *peer_ip = ip->saddr;
+ *local_ip = ip->daddr;
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+
+ return;
+}
+
+static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *child_ep, *parent_ep;
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int hwtid = GET_TID(req);
+ struct dst_entry *dst;
+ struct l2t_entry *l2t;
+ struct rtable *rt;
+ __be32 local_ip, peer_ip;
+ __be16 local_port, peer_port;
+ struct net_device *pdev;
+ u32 tx_chan, smac_idx;
+ u16 rss_qid;
+ u32 mtu;
+ int step;
+ int txq_idx;
+
+ parent_ep = lookup_stid(t, stid);
+ PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
+
+ get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
+
+ if (state_read(&parent_ep->com) != LISTEN) {
+ printk(KERN_ERR "%s - listening ep not in LISTEN\n",
+ __func__);
+ goto reject;
+ }
+
+ /* Find output route */
+ rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
+ GET_POPEN_TOS(ntohl(req->tos_stid)));
+ if (!rt) {
+ printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+ dst = &rt->u.dst;
+ if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ pdev = ip_dev_find(&init_net, peer_ip);
+ BUG_ON(!pdev);
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
+ pdev, 0);
+ mtu = pdev->mtu;
+ tx_chan = cxgb4_port_chan(pdev);
+ smac_idx = tx_chan << 1;
+ step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
+ txq_idx = cxgb4_port_idx(pdev) * step;
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
+ dst->neighbour->dev, 0);
+ mtu = dst_mtu(dst);
+ tx_chan = cxgb4_port_chan(dst->neighbour->dev);
+ smac_idx = tx_chan << 1;
+ step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
+ txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
+ step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
+ rss_qid = dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(dst->neighbour->dev) * step];
+ }
+ if (!l2t) {
+ printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+ __func__);
+ dst_release(dst);
+ goto reject;
+ }
+
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+ if (!child_ep) {
+ printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+ __func__);
+ cxgb4_l2t_release(l2t);
+ dst_release(dst);
+ goto reject;
+ }
+ state_set(&child_ep->com, CONNECTING);
+ child_ep->com.dev = dev;
+ child_ep->com.cm_id = NULL;
+ child_ep->com.local_addr.sin_family = PF_INET;
+ child_ep->com.local_addr.sin_port = local_port;
+ child_ep->com.local_addr.sin_addr.s_addr = local_ip;
+ child_ep->com.remote_addr.sin_family = PF_INET;
+ child_ep->com.remote_addr.sin_port = peer_port;
+ child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
+ c4iw_get_ep(&parent_ep->com);
+ child_ep->parent_ep = parent_ep;
+ child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
+ child_ep->l2t = l2t;
+ child_ep->dst = dst;
+ child_ep->hwtid = hwtid;
+ child_ep->tx_chan = tx_chan;
+ child_ep->smac_idx = smac_idx;
+ child_ep->rss_qid = rss_qid;
+ child_ep->mtu = mtu;
+ child_ep->txq_idx = txq_idx;
+
+ PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+ tx_chan, smac_idx, rss_qid);
+
+ init_timer(&child_ep->timer);
+ cxgb4_insert_tid(t, child_ep, hwtid);
+ accept_cr(child_ep, peer_ip, skb, req);
+ goto out;
+reject:
+ reject_cr(dev, hwtid, peer_ip, skb);
+out:
+ return 0;
+}
+
+static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_pass_establish *req = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ ep->snd_seq = be32_to_cpu(req->snd_isn);
+ ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+
+ set_emss(ep, ntohs(req->tcp_opt));
+
+ dst_confirm(ep->dst);
+ state_set(&ep->com, MPA_REQ_WAIT);
+ start_ep_timer(ep);
+ send_flowc(ep, skb);
+
+ return 0;
+}
+
+static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_peer_close *hdr = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
+ unsigned long flags;
+ int disconnect = 1;
+ int release = 0;
+ int closing = 0;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(hdr);
+ int start_timer = 0;
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ dst_confirm(ep->dst);
+
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case MPA_REQ_WAIT:
+ __state_set(&ep->com, CLOSING);
+ break;
+ case MPA_REQ_SENT:
+ __state_set(&ep->com, CLOSING);
+ connect_reply_upcall(ep, -ECONNRESET);
+ break;
+ case MPA_REQ_RCVD:
+
+ /*
+ * We're gonna mark this puppy DEAD, but keep
+ * the reference on it until the ULP accepts or
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see c4iw_accept_cr()).
+ */
+ __state_set(&ep->com, CLOSING);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case MPA_REP_SENT:
+ __state_set(&ep->com, CLOSING);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case FPDU_MODE:
+ start_timer = 1;
+ __state_set(&ep->com, CLOSING);
+ closing = 1;
+ peer_close_upcall(ep);
+ break;
+ case ABORTING:
+ disconnect = 0;
+ break;
+ case CLOSING:
+ __state_set(&ep->com, MORIBUND);
+ disconnect = 0;
+ break;
+ case MORIBUND:
+ stop_timer = 1;
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_IDLE;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ close_complete_upcall(ep);
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ disconnect = 0;
+ break;
+ case DEAD:
+ disconnect = 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (closing) {
+ attrs.next_state = C4IW_QP_STATE_CLOSING;
+ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ }
+ if (start_timer)
+ start_ep_timer(ep);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (disconnect)
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+/*
+ * Returns whether an ABORT_REQ_RSS message is a negative advice.
+ */
+static int is_neg_adv_abort(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *rpl_skb;
+ struct c4iw_qp_attributes attrs;
+ int ret;
+ int release = 0;
+ unsigned long flags;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+ return 0;
+ }
+ spin_lock_irqsave(&ep->com.lock, flags);
+ PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+ switch (ep->com.state) {
+ case CONNECTING:
+ break;
+ case MPA_REQ_WAIT:
+ stop_timer = 1;
+ break;
+ case MPA_REQ_SENT:
+ stop_timer = 1;
+ connect_reply_upcall(ep, -ECONNRESET);
+ break;
+ case MPA_REP_SENT:
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p\n", ep);
+ wake_up(&ep->com.waitq);
+ break;
+ case MPA_REQ_RCVD:
+
+ /*
+ * We're gonna mark this puppy DEAD, but keep
+ * the reference on it until the ULP accepts or
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see c4iw_accept_cr()).
+ */
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
+ wake_up(&ep->com.waitq);
+ break;
+ case MORIBUND:
+ case CLOSING:
+ stop_timer = 1;
+ /*FALLTHROUGH*/
+ case FPDU_MODE:
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ ret = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ if (ret)
+ printk(KERN_ERR MOD
+ "%s - qp <- error failed!\n",
+ __func__);
+ }
+ peer_abort_upcall(ep);
+ break;
+ case ABORTING:
+ break;
+ case DEAD:
+ PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ return 0;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ dst_confirm(ep->dst);
+ if (ep->com.state != ABORTING) {
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+
+ rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
+ if (!rpl_skb) {
+ printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
+ __func__);
+ release = 1;
+ goto out;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
+ INIT_TP_WR(rpl, ep->hwtid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
+out:
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attrs;
+ struct cpl_close_con_rpl *rpl = cplhdr(skb);
+ unsigned long flags;
+ int release = 0;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(rpl);
+ int stop_timer = 0;
+
+ ep = lookup_tid(t, tid);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ BUG_ON(!ep);
+
+ /* The cm_id may be null if we failed to connect */
+ spin_lock_irqsave(&ep->com.lock, flags);
+ switch (ep->com.state) {
+ case CLOSING:
+ __state_set(&ep->com, MORIBUND);
+ break;
+ case MORIBUND:
+ stop_timer = 1;
+ if ((ep->com.cm_id) && (ep->com.qp)) {
+ attrs.next_state = C4IW_QP_STATE_IDLE;
+ c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+ close_complete_upcall(ep);
+ __state_set(&ep->com, DEAD);
+ release = 1;
+ break;
+ case ABORTING:
+ case DEAD:
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (release)
+ release_ep_resources(ep);
+ return 0;
+}
+
+static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_rdma_terminate *term = cplhdr(skb);
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(term);
+
+ ep = lookup_tid(t, tid);
+
+ if (state_read(&ep->com) != FPDU_MODE)
+ return 0;
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ skb_pull(skb, sizeof *term);
+ PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
+ skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
+ skb->len);
+ ep->com.qp->attr.terminate_msg_len = skb->len;
+ ep->com.qp->attr.is_terminate_local = 0;
+ return 0;
+}
+
+/*
+ * Upcall from the adapter indicating data has been transmitted.
+ * For us its just the single MPA request or reply. We can now free
+ * the skb holding the mpa message.
+ */
+static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+ struct cpl_fw4_ack *hdr = cplhdr(skb);
+ u8 credits = hdr->credits;
+ unsigned int tid = GET_TID(hdr);
+ struct tid_info *t = dev->rdev.lldi.tids;
+
+
+ ep = lookup_tid(t, tid);
+ PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
+ if (credits == 0) {
+ PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, state_read(&ep->com));
+ return 0;
+ }
+
+ dst_confirm(ep->dst);
+ if (ep->mpa_skb) {
+ PDBG("%s last streaming msg ack ep %p tid %u state %u "
+ "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
+ state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ kfree_skb(ep->mpa_skb);
+ ep->mpa_skb = NULL;
+ }
+ return 0;
+}
+
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ int err;
+ struct c4iw_ep *ep = to_ep(cm_id);
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+
+ if (state_read(&ep->com) == DEAD) {
+ c4iw_put_ep(&ep->com);
+ return -ECONNRESET;
+ }
+ BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ if (mpa_rev == 0)
+ abort_connection(ep, NULL, GFP_KERNEL);
+ else {
+ err = send_mpa_reject(ep, pdata, pdata_len);
+ err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ }
+ c4iw_put_ep(&ep->com);
+ return 0;
+}
+
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ int err;
+ struct c4iw_qp_attributes attrs;
+ enum c4iw_qp_attr_mask mask;
+ struct c4iw_ep *ep = to_ep(cm_id);
+ struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
+ struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
+
+ PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ if (state_read(&ep->com) == DEAD) {
+ err = -ECONNRESET;
+ goto err;
+ }
+
+ BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+ BUG_ON(!qp);
+
+ if ((conn_param->ord > c4iw_max_read_depth) ||
+ (conn_param->ird > c4iw_max_read_depth)) {
+ abort_connection(ep, NULL, GFP_KERNEL);
+ err = -EINVAL;
+ goto err;
+ }
+
+ cm_id->add_ref(cm_id);
+ ep->com.cm_id = cm_id;
+ ep->com.qp = qp;
+
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+
+ if (peer2peer && ep->ird == 0)
+ ep->ird = 1;
+
+ PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+
+ /* bind QP to EP and move to RTS */
+ attrs.mpa_attr = ep->mpa_attr;
+ attrs.max_ird = ep->ird;
+ attrs.max_ord = ep->ord;
+ attrs.llp_stream_handle = ep;
+ attrs.next_state = C4IW_QP_STATE_RTS;
+
+ /* bind QP and TID with INIT_WR */
+ mask = C4IW_QP_ATTR_NEXT_STATE |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+ C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_MAX_IRD |
+ C4IW_QP_ATTR_MAX_ORD;
+
+ err = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, mask, &attrs, 1);
+ if (err)
+ goto err1;
+ err = send_mpa_reply(ep, conn_param->private_data,
+ conn_param->private_data_len);
+ if (err)
+ goto err1;
+
+ state_set(&ep->com, FPDU_MODE);
+ established_upcall(ep);
+ c4iw_put_ep(&ep->com);
+ return 0;
+err1:
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ cm_id->rem_ref(cm_id);
+err:
+ c4iw_put_ep(&ep->com);
+ return err;
+}
+
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ int err = 0;
+ struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+ struct c4iw_ep *ep;
+ struct rtable *rt;
+ struct net_device *pdev;
+ int step;
+
+ if ((conn_param->ord > c4iw_max_read_depth) ||
+ (conn_param->ird > c4iw_max_read_depth)) {
+ err = -EINVAL;
+ goto out;
+ }
+ ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+ init_timer(&ep->timer);
+ ep->plen = conn_param->private_data_len;
+ if (ep->plen)
+ memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
+ conn_param->private_data, ep->plen);
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+
+ if (peer2peer && ep->ord == 0)
+ ep->ord = 1;
+
+ cm_id->add_ref(cm_id);
+ ep->com.dev = dev;
+ ep->com.cm_id = cm_id;
+ ep->com.qp = get_qhp(dev, conn_param->qpn);
+ BUG_ON(!ep->com.qp);
+ PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+ ep->com.qp, cm_id);
+
+ /*
+ * Allocate an active TID to initiate a TCP connection.
+ */
+ ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
+ if (ep->atid == -1) {
+ printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port),
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port));
+
+ /* find a route */
+ rt = find_route(dev,
+ cm_id->local_addr.sin_addr.s_addr,
+ cm_id->remote_addr.sin_addr.s_addr,
+ cm_id->local_addr.sin_port,
+ cm_id->remote_addr.sin_port, 0);
+ if (!rt) {
+ printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
+ err = -EHOSTUNREACH;
+ goto fail3;
+ }
+ ep->dst = &rt->u.dst;
+
+ /* get a l2t entry */
+ if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ PDBG("%s LOOPBACK\n", __func__);
+ pdev = ip_dev_find(&init_net,
+ cm_id->remote_addr.sin_addr.s_addr);
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ ep->dst->neighbour,
+ pdev, 0);
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = ep->tx_chan << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
+ ep->dst->neighbour,
+ ep->dst->neighbour->dev, 0);
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
+ ep->smac_idx = ep->tx_chan << 1;
+ step = ep->com.dev->rdev.lldi.ntxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
+ step = ep->com.dev->rdev.lldi.nrxq /
+ ep->com.dev->rdev.lldi.nchan;
+ ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(ep->dst->neighbour->dev) * step];
+ }
+ if (!ep->l2t) {
+ printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
+ err = -ENOMEM;
+ goto fail4;
+ }
+
+ PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ ep->l2t->idx);
+
+ state_set(&ep->com, CONNECTING);
+ ep->tos = 0;
+ ep->com.local_addr = cm_id->local_addr;
+ ep->com.remote_addr = cm_id->remote_addr;
+
+ /* send connect request to rnic */
+ err = send_connect(ep);
+ if (!err)
+ goto out;
+
+ cxgb4_l2t_release(ep->l2t);
+fail4:
+ dst_release(ep->dst);
+fail3:
+ cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+fail2:
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+out:
+ return err;
+}
+
+int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ int err = 0;
+ struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+ struct c4iw_listen_ep *ep;
+
+
+ might_sleep();
+
+ ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+ if (!ep) {
+ printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
+ err = -ENOMEM;
+ goto fail1;
+ }
+ PDBG("%s ep %p\n", __func__, ep);
+ cm_id->add_ref(cm_id);
+ ep->com.cm_id = cm_id;
+ ep->com.dev = dev;
+ ep->backlog = backlog;
+ ep->com.local_addr = cm_id->local_addr;
+
+ /*
+ * Allocate a server TID.
+ */
+ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+ if (ep->stid == -1) {
+ printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ state_set(&ep->com, LISTEN);
+ err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.local_addr.sin_addr.s_addr,
+ ep->com.local_addr.sin_port,
+ ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (err)
+ goto fail3;
+
+ /* wait for pass_open_rpl */
+ wait_event(ep->com.waitq, ep->com.rpl_done);
+ err = ep->com.rpl_err;
+ if (!err) {
+ cm_id->provider_data = ep;
+ goto out;
+ }
+fail3:
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+fail2:
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+fail1:
+out:
+ return err;
+}
+
+int c4iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ int err;
+ struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
+
+ PDBG("%s ep %p\n", __func__, ep);
+
+ might_sleep();
+ state_set(&ep->com, DEAD);
+ ep->com.rpl_done = 0;
+ ep->com.rpl_err = 0;
+ err = listen_stop(ep);
+ if (err)
+ goto done;
+ wait_event(ep->com.waitq, ep->com.rpl_done);
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+done:
+ err = ep->com.rpl_err;
+ cm_id->rem_ref(cm_id);
+ c4iw_put_ep(&ep->com);
+ return err;
+}
+
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
+{
+ int ret = 0;
+ unsigned long flags;
+ int close = 0;
+ int fatal = 0;
+ struct c4iw_rdev *rdev;
+ int start_timer = 0;
+ int stop_timer = 0;
+
+ spin_lock_irqsave(&ep->com.lock, flags);
+
+ PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
+ states[ep->com.state], abrupt);
+
+ rdev = &ep->com.dev->rdev;
+ if (c4iw_fatal_error(rdev)) {
+ fatal = 1;
+ close_complete_upcall(ep);
+ ep->com.state = DEAD;
+ }
+ switch (ep->com.state) {
+ case MPA_REQ_WAIT:
+ case MPA_REQ_SENT:
+ case MPA_REQ_RCVD:
+ case MPA_REP_SENT:
+ case FPDU_MODE:
+ close = 1;
+ if (abrupt)
+ ep->com.state = ABORTING;
+ else {
+ ep->com.state = CLOSING;
+ start_timer = 1;
+ }
+ set_bit(CLOSE_SENT, &ep->com.flags);
+ break;
+ case CLOSING:
+ if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+ close = 1;
+ if (abrupt) {
+ stop_timer = 1;
+ ep->com.state = ABORTING;
+ } else
+ ep->com.state = MORIBUND;
+ }
+ break;
+ case MORIBUND:
+ case ABORTING:
+ case DEAD:
+ PDBG("%s ignoring disconnect ep %p state %u\n",
+ __func__, ep, ep->com.state);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&ep->com.lock, flags);
+ if (start_timer)
+ start_ep_timer(ep);
+ if (stop_timer)
+ stop_ep_timer(ep);
+ if (close) {
+ if (abrupt)
+ ret = abort_connection(ep, NULL, gfp);
+ else
+ ret = send_halfclose(ep, gfp);
+ if (ret)
+ fatal = 1;
+ }
+ if (fatal)
+ release_ep_resources(ep);
+ return ret;
+}
+
+/*
+ * These are the real handlers that are called from a
+ * work queue.
+ */
+static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = act_establish,
+ [CPL_ACT_OPEN_RPL] = act_open_rpl,
+ [CPL_RX_DATA] = rx_data,
+ [CPL_ABORT_RPL_RSS] = abort_rpl,
+ [CPL_ABORT_RPL] = abort_rpl,
+ [CPL_PASS_OPEN_RPL] = pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
+ [CPL_PASS_ESTABLISH] = pass_establish,
+ [CPL_PEER_CLOSE] = peer_close,
+ [CPL_ABORT_REQ_RSS] = peer_abort,
+ [CPL_CLOSE_CON_RPL] = close_con_rpl,
+ [CPL_RDMA_TERMINATE] = terminate,
+ [CPL_FW4_ACK] = fw4_ack
+};
+
+static void process_timeout(struct c4iw_ep *ep)
+{
+ struct c4iw_qp_attributes attrs;
+ int abort = 1;
+
+ spin_lock_irq(&ep->com.lock);
+ PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+ switch (ep->com.state) {
+ case MPA_REQ_SENT:
+ __state_set(&ep->com, ABORTING);
+ connect_reply_upcall(ep, -ETIMEDOUT);
+ break;
+ case MPA_REQ_WAIT:
+ __state_set(&ep->com, ABORTING);
+ break;
+ case CLOSING:
+ case MORIBUND:
+ if (ep->com.cm_id && ep->com.qp) {
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+ __state_set(&ep->com, ABORTING);
+ break;
+ default:
+ printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
+ __func__, ep, ep->hwtid, ep->com.state);
+ WARN_ON(1);
+ abort = 0;
+ }
+ spin_unlock_irq(&ep->com.lock);
+ if (abort)
+ abort_connection(ep, NULL, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
+}
+
+static void process_timedout_eps(void)
+{
+ struct c4iw_ep *ep;
+
+ spin_lock_irq(&timeout_lock);
+ while (!list_empty(&timeout_list)) {
+ struct list_head *tmp;
+
+ tmp = timeout_list.next;
+ list_del(tmp);
+ spin_unlock_irq(&timeout_lock);
+ ep = list_entry(tmp, struct c4iw_ep, entry);
+ process_timeout(ep);
+ spin_lock_irq(&timeout_lock);
+ }
+ spin_unlock_irq(&timeout_lock);
+}
+
+static void process_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct c4iw_dev *dev;
+ struct cpl_act_establish *rpl = cplhdr(skb);
+ unsigned int opcode;
+ int ret;
+
+ while ((skb = skb_dequeue(&rxq))) {
+ rpl = cplhdr(skb);
+ dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
+ opcode = rpl->ot.opcode;
+
+ BUG_ON(!work_handlers[opcode]);
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
+ process_timedout_eps();
+}
+
+static DECLARE_WORK(skb_work, process_work);
+
+static void ep_timeout(unsigned long arg)
+{
+ struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+
+ spin_lock(&timeout_lock);
+ list_add_tail(&ep->entry, &timeout_list);
+ spin_unlock(&timeout_lock);
+ queue_work(workq, &skb_work);
+}
+
+/*
+ * All the CM events are handled on a work queue to have a safe context.
+ */
+static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+
+ /*
+ * Save dev in the skb->cb area.
+ */
+ *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
+
+ /*
+ * Queue the skb and schedule the worker thread.
+ */
+ skb_queue_tail(&rxq, skb);
+ queue_work(workq, &skb_work);
+ return 0;
+}
+
+static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
+
+ if (rpl->status != CPL_ERR_NONE) {
+ printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
+ "for tid %u\n", rpl->status, GET_TID(rpl));
+ }
+ return 0;
+}
+
+static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_fw6_msg *rpl = cplhdr(skb);
+ struct c4iw_wr_wait *wr_waitp;
+ int ret;
+
+ PDBG("%s type %u\n", __func__, rpl->type);
+
+ switch (rpl->type) {
+ case 1:
+ ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
+ wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
+ PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ if (wr_waitp) {
+ wr_waitp->ret = ret;
+ wr_waitp->done = 1;
+ wake_up(&wr_waitp->wait);
+ }
+ break;
+ case 2:
+ c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
+ break;
+ default:
+ printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
+ rpl->type);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Most upcalls from the T4 Core go to sched() to
+ * schedule the processing on a work queue.
+ */
+c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_ESTABLISH] = sched,
+ [CPL_ACT_OPEN_RPL] = sched,
+ [CPL_RX_DATA] = sched,
+ [CPL_ABORT_RPL_RSS] = sched,
+ [CPL_ABORT_RPL] = sched,
+ [CPL_PASS_OPEN_RPL] = sched,
+ [CPL_CLOSE_LISTSRV_RPL] = sched,
+ [CPL_PASS_ACCEPT_REQ] = sched,
+ [CPL_PASS_ESTABLISH] = sched,
+ [CPL_PEER_CLOSE] = sched,
+ [CPL_CLOSE_CON_RPL] = sched,
+ [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_RDMA_TERMINATE] = sched,
+ [CPL_FW4_ACK] = sched,
+ [CPL_SET_TCB_RPL] = set_tcb_rpl,
+ [CPL_FW6_MSG] = fw6_msg
+};
+
+int __init c4iw_cm_init(void)
+{
+ spin_lock_init(&timeout_lock);
+ skb_queue_head_init(&rxq);
+
+ workq = create_singlethread_workqueue("iw_cxgb4");
+ if (!workq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __exit c4iw_cm_term(void)
+{
+ WARN_ON(!list_empty(&timeout_list));
+ flush_workqueue(workq);
+ destroy_workqueue(workq);
+}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
new file mode 100644
index 000000000000..2447f5295482
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "iw_cxgb4.h"
+
+static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+ int ret;
+
+ wr_len = sizeof *res_wr + sizeof *res;
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(1) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+ res->u.cq.op = FW_RI_RES_OP_RESET;
+ res->u.cq.iqid = cpu_to_be32(cq->cqid);
+
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (!ret) {
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ }
+
+ kfree(cq->sw_queue);
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ cq->memsize, cq->queue,
+ pci_unmap_addr(cq, mapping));
+ c4iw_put_cqid(rdev, cq->cqid, uctx);
+ return ret;
+}
+
+static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ int user = (uctx != &rdev->uctx);
+ struct c4iw_wr_wait wr_wait;
+ int ret;
+ struct sk_buff *skb;
+
+ cq->cqid = c4iw_get_cqid(rdev, uctx);
+ if (!cq->cqid) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ if (!user) {
+ cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
+ if (!cq->sw_queue) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+ }
+ cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
+ &cq->dma_addr, GFP_KERNEL);
+ if (!cq->queue) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+ pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+ memset(cq->queue, 0, cq->memsize);
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof *res_wr + sizeof *res;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err4;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(1) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+ res->u.cq.op = FW_RI_RES_OP_WRITE;
+ res->u.cq.iqid = cpu_to_be32(cq->cqid);
+ res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
+ V_FW_RI_RES_WR_IQANUS(0) |
+ V_FW_RI_RES_WR_IQANUD(1) |
+ F_FW_RI_RES_WR_IQANDST |
+ V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
+ res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
+ F_FW_RI_RES_WR_IQDROPRSS |
+ V_FW_RI_RES_WR_IQPCIECH(2) |
+ V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
+ F_FW_RI_RES_WR_IQO |
+ V_FW_RI_RES_WR_IQESIZE(1));
+ res->u.cq.iqsize = cpu_to_be16(cq->size);
+ res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
+
+ c4iw_init_wr_wait(&wr_wait);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ goto err4;
+ PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ if (ret)
+ goto err4;
+
+ cq->gen = 1;
+ cq->gts = rdev->lldi.gts_reg;
+ cq->rdev = rdev;
+ if (user) {
+ cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (cq->cqid << rdev->cqshift);
+ cq->ugts &= PAGE_MASK;
+ }
+ return 0;
+err4:
+ dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
+ pci_unmap_addr(cq, mapping));
+err3:
+ kfree(cq->sw_queue);
+err2:
+ c4iw_put_cqid(rdev, cq->cqid, uctx);
+err1:
+ return ret;
+}
+
+static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
+{
+ struct t4_cqe cqe;
+
+ PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
+ memset(&cqe, 0, sizeof(cqe));
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(FW_RI_SEND) |
+ V_CQE_TYPE(0) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(wq->rq.qid));
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+}
+
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+ int flushed = 0;
+ int in_use = wq->rq.in_use - count;
+
+ BUG_ON(in_use < 0);
+ PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
+ wq, cq, wq->rq.in_use, count);
+ while (in_use--) {
+ insert_recv_cqe(wq, cq);
+ flushed++;
+ }
+ return flushed;
+}
+
+static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
+ struct t4_swsqe *swcqe)
+{
+ struct t4_cqe cqe;
+
+ PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ wq, cq, cq->sw_cidx, cq->sw_pidx);
+ memset(&cqe, 0, sizeof(cqe));
+ cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+ V_CQE_OPCODE(swcqe->opcode) |
+ V_CQE_TYPE(1) |
+ V_CQE_SWCQE(1) |
+ V_CQE_QPID(wq->sq.qid));
+ CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
+ cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+}
+
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+ int flushed = 0;
+ struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
+ int in_use = wq->sq.in_use - count;
+
+ BUG_ON(in_use < 0);
+ while (in_use--) {
+ swsqe->signaled = 0;
+ insert_sq_cqe(wq, cq, swsqe);
+ swsqe++;
+ if (swsqe == (wq->sq.sw_sq + wq->sq.size))
+ swsqe = wq->sq.sw_sq;
+ flushed++;
+ }
+ return flushed;
+}
+
+/*
+ * Move all CQEs from the HWCQ into the SWCQ.
+ */
+void c4iw_flush_hw_cq(struct t4_cq *cq)
+{
+ struct t4_cqe *cqe = NULL, *swcqe;
+ int ret;
+
+ PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
+ ret = t4_next_hw_cqe(cq, &cqe);
+ while (!ret) {
+ PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
+ __func__, cq->cidx, cq->sw_pidx);
+ swcqe = &cq->sw_queue[cq->sw_pidx];
+ *swcqe = *cqe;
+ swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+ t4_swcq_produce(cq);
+ t4_hwcq_consume(cq);
+ ret = t4_next_hw_cqe(cq, &cqe);
+ }
+}
+
+static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
+{
+ if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
+ return 0;
+
+ if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
+ return 0;
+
+ if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
+ return 0;
+
+ if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
+ return 0;
+ return 1;
+}
+
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+ struct t4_cqe *cqe;
+ u32 ptr;
+
+ *count = 0;
+ ptr = cq->sw_cidx;
+ while (ptr != cq->sw_pidx) {
+ cqe = &cq->sw_queue[ptr];
+ if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
+ wq->sq.oldest_read)) &&
+ (CQE_QPID(cqe) == wq->sq.qid))
+ (*count)++;
+ if (++ptr == cq->size)
+ ptr = 0;
+ }
+ PDBG("%s cq %p count %d\n", __func__, cq, *count);
+}
+
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+ struct t4_cqe *cqe;
+ u32 ptr;
+
+ *count = 0;
+ PDBG("%s count zero %d\n", __func__, *count);
+ ptr = cq->sw_cidx;
+ while (ptr != cq->sw_pidx) {
+ cqe = &cq->sw_queue[ptr];
+ if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
+ (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (*count)++;
+ if (++ptr == cq->size)
+ ptr = 0;
+ }
+ PDBG("%s cq %p count %d\n", __func__, cq, *count);
+}
+
+static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
+{
+ struct t4_swsqe *swsqe;
+ u16 ptr = wq->sq.cidx;
+ int count = wq->sq.in_use;
+ int unsignaled = 0;
+
+ swsqe = &wq->sq.sw_sq[ptr];
+ while (count--)
+ if (!swsqe->signaled) {
+ if (++ptr == wq->sq.size)
+ ptr = 0;
+ swsqe = &wq->sq.sw_sq[ptr];
+ unsignaled++;
+ } else if (swsqe->complete) {
+
+ /*
+ * Insert this completed cqe into the swcq.
+ */
+ PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
+ __func__, ptr, cq->sw_pidx);
+ swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+ cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
+ t4_swcq_produce(cq);
+ swsqe->signaled = 0;
+ wq->sq.in_use -= unsignaled;
+ break;
+ } else
+ break;
+}
+
+static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
+ struct t4_cqe *read_cqe)
+{
+ read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
+ read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
+ read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
+ V_CQE_SWCQE(SW_CQE(hw_cqe)) |
+ V_CQE_OPCODE(FW_RI_READ_REQ) |
+ V_CQE_TYPE(1));
+ read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
+}
+
+/*
+ * Return a ptr to the next read wr in the SWSQ or NULL.
+ */
+static void advance_oldest_read(struct t4_wq *wq)
+{
+
+ u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
+
+ if (rptr == wq->sq.size)
+ rptr = 0;
+ while (rptr != wq->sq.pidx) {
+ wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
+
+ if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
+ return;
+ if (++rptr == wq->sq.size)
+ rptr = 0;
+ }
+ wq->sq.oldest_read = NULL;
+}
+
+/*
+ * poll_cq
+ *
+ * Caller must:
+ * check the validity of the first CQE,
+ * supply the wq assicated with the qpid.
+ *
+ * credit: cq credit to return to sge.
+ * cqe_flushed: 1 iff the CQE is flushed.
+ * cqe: copy of the polled CQE.
+ *
+ * return value:
+ * 0 CQE returned ok.
+ * -EAGAIN CQE skipped, try again.
+ * -EOVERFLOW CQ overflow detected.
+ */
+static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
+ u8 *cqe_flushed, u64 *cookie, u32 *credit)
+{
+ int ret = 0;
+ struct t4_cqe *hw_cqe, read_cqe;
+
+ *cqe_flushed = 0;
+ *credit = 0;
+ ret = t4_next_cqe(cq, &hw_cqe);
+ if (ret)
+ return ret;
+
+ PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
+ " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+ __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+ CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
+ CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
+ CQE_WRID_LOW(hw_cqe));
+
+ /*
+ * skip cqe's not affiliated with a QP.
+ */
+ if (wq == NULL) {
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
+ * Gotta tweak READ completions:
+ * 1) the cqe doesn't contain the sq_wptr from the wr.
+ * 2) opcode not reflected from the wr.
+ * 3) read_len not reflected from the wr.
+ * 4) cq_type is RQ_TYPE not SQ_TYPE.
+ */
+ if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
+
+ /*
+ * If this is an unsolicited read response, then the read
+ * was generated by the kernel driver as part of peer-2-peer
+ * connection setup. So ignore the completion.
+ */
+ if (!wq->sq.oldest_read) {
+ if (CQE_STATUS(hw_cqe))
+ t4_set_wq_in_error(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
+ * Don't write to the HWCQ, so create a new read req CQE
+ * in local memory.
+ */
+ create_read_req_cqe(wq, hw_cqe, &read_cqe);
+ hw_cqe = &read_cqe;
+ advance_oldest_read(wq);
+ }
+
+ if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
+ *cqe_flushed = t4_wq_in_error(wq);
+ t4_set_wq_in_error(wq);
+ goto proc_cqe;
+ }
+
+ /*
+ * RECV completion.
+ */
+ if (RQ_TYPE(hw_cqe)) {
+
+ /*
+ * HW only validates 4 bits of MSN. So we must validate that
+ * the MSN in the SEND is the next expected MSN. If its not,
+ * then we complete this with T4_ERR_MSN and mark the wq in
+ * error.
+ */
+
+ if (t4_rq_empty(wq)) {
+ t4_set_wq_in_error(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+ if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+ t4_set_wq_in_error(wq);
+ hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
+ goto proc_cqe;
+ }
+ goto proc_cqe;
+ }
+
+ /*
+ * If we get here its a send completion.
+ *
+ * Handle out of order completion. These get stuffed
+ * in the SW SQ. Then the SW SQ is walked to move any
+ * now in-order completions into the SW CQ. This handles
+ * 2 cases:
+ * 1) reaping unsignaled WRs when the first subsequent
+ * signaled WR is completed.
+ * 2) out of order read completions.
+ */
+ if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
+ struct t4_swsqe *swsqe;
+
+ PDBG("%s out of order completion going in sw_sq at idx %u\n",
+ __func__, CQE_WRID_SQ_IDX(hw_cqe));
+ swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
+ swsqe->cqe = *hw_cqe;
+ swsqe->complete = 1;
+ ret = -EAGAIN;
+ goto flush_wq;
+ }
+
+proc_cqe:
+ *cqe = *hw_cqe;
+
+ /*
+ * Reap the associated WR(s) that are freed up with this
+ * completion.
+ */
+ if (SQ_TYPE(hw_cqe)) {
+ wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
+ PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+ *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
+ t4_sq_consume(wq);
+ } else {
+ PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
+ *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
+ BUG_ON(t4_rq_empty(wq));
+ t4_rq_consume(wq);
+ }
+
+flush_wq:
+ /*
+ * Flush any completed cqes that are now in-order.
+ */
+ flush_completed_wrs(wq, cq);
+
+skip_cqe:
+ if (SW_CQE(hw_cqe)) {
+ PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->sw_cidx);
+ t4_swcq_consume(cq);
+ } else {
+ PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
+ __func__, cq, cq->cqid, cq->cidx);
+ t4_hwcq_consume(cq);
+ }
+ return ret;
+}
+
+/*
+ * Get one cq entry from c4iw and map it to openib.
+ *
+ * Returns:
+ * 0 cqe returned
+ * -ENODATA EMPTY;
+ * -EAGAIN caller must try again
+ * any other -errno fatal error
+ */
+static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+{
+ struct c4iw_qp *qhp = NULL;
+ struct t4_cqe cqe = {0, 0}, *rd_cqe;
+ struct t4_wq *wq;
+ u32 credit = 0;
+ u8 cqe_flushed;
+ u64 cookie = 0;
+ int ret;
+
+ ret = t4_next_cqe(&chp->cq, &rd_cqe);
+
+ if (ret)
+ return ret;
+
+ qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
+ if (!qhp)
+ wq = NULL;
+ else {
+ spin_lock(&qhp->lock);
+ wq = &(qhp->wq);
+ }
+ ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
+ if (ret)
+ goto out;
+
+ wc->wr_id = cookie;
+ wc->qp = &qhp->ibqp;
+ wc->vendor_err = CQE_STATUS(&cqe);
+ wc->wc_flags = 0;
+
+ PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
+ "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
+ CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
+ CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
+
+ if (CQE_TYPE(&cqe) == 0) {
+ if (!CQE_STATUS(&cqe))
+ wc->byte_len = CQE_LEN(&cqe);
+ else
+ wc->byte_len = 0;
+ wc->opcode = IB_WC_RECV;
+ if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
+ CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
+ wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+ } else {
+ switch (CQE_OPCODE(&cqe)) {
+ case FW_RI_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case FW_RI_READ_REQ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = CQE_LEN(&cqe);
+ break;
+ case FW_RI_SEND_WITH_INV:
+ case FW_RI_SEND_WITH_SE_INV:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case FW_RI_SEND:
+ case FW_RI_SEND_WITH_SE:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case FW_RI_BIND_MW:
+ wc->opcode = IB_WC_BIND_MW;
+ break;
+
+ case FW_RI_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ break;
+ case FW_RI_FAST_REGISTER:
+ wc->opcode = IB_WC_FAST_REG_MR;
+ break;
+ default:
+ printk(KERN_ERR MOD "Unexpected opcode %d "
+ "in the CQE received for QPID=0x%0x\n",
+ CQE_OPCODE(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (cqe_flushed)
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ else {
+
+ switch (CQE_STATUS(&cqe)) {
+ case T4_ERR_SUCCESS:
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case T4_ERR_STAG:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case T4_ERR_PDID:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case T4_ERR_QPID:
+ case T4_ERR_ACCESS:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case T4_ERR_WRAP:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ case T4_ERR_BOUND:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case T4_ERR_CRC:
+ case T4_ERR_MARKER:
+ case T4_ERR_PDU_LEN_ERR:
+ case T4_ERR_OUT_OF_RQE:
+ case T4_ERR_DDP_VERSION:
+ case T4_ERR_RDMA_VERSION:
+ case T4_ERR_DDP_QUEUE_NUM:
+ case T4_ERR_MSN:
+ case T4_ERR_TBIT:
+ case T4_ERR_MO:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_IRD_OVERFLOW:
+ case T4_ERR_OPCODE:
+ wc->status = IB_WC_FATAL_ERR;
+ break;
+ case T4_ERR_SWFLUSH:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ default:
+ printk(KERN_ERR MOD
+ "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
+ CQE_STATUS(&cqe), CQE_QPID(&cqe));
+ ret = -EINVAL;
+ }
+ }
+out:
+ if (wq)
+ spin_unlock(&qhp->lock);
+ return ret;
+}
+
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct c4iw_cq *chp;
+ unsigned long flags;
+ int npolled;
+ int err = 0;
+
+ chp = to_c4iw_cq(ibcq);
+
+ spin_lock_irqsave(&chp->lock, flags);
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ do {
+ err = c4iw_poll_cq_one(chp, wc + npolled);
+ } while (err == -EAGAIN);
+ if (err)
+ break;
+ }
+ spin_unlock_irqrestore(&chp->lock, flags);
+ return !err || err == -ENODATA ? npolled : err;
+}
+
+int c4iw_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct c4iw_cq *chp;
+ struct c4iw_ucontext *ucontext;
+
+ PDBG("%s ib_cq %p\n", __func__, ib_cq);
+ chp = to_c4iw_cq(ib_cq);
+
+ remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
+ atomic_dec(&chp->refcnt);
+ wait_event(chp->wait, !atomic_read(&chp->refcnt));
+
+ ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
+ : NULL;
+ destroy_cq(&chp->rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
+ kfree(chp);
+ return 0;
+}
+
+struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+ int vector, struct ib_ucontext *ib_context,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_cq *chp;
+ struct c4iw_create_cq_resp uresp;
+ struct c4iw_ucontext *ucontext = NULL;
+ int ret;
+ size_t memsize;
+ struct c4iw_mm_entry *mm, *mm2;
+
+ PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+
+ rhp = to_c4iw_dev(ibdev);
+
+ chp = kzalloc(sizeof(*chp), GFP_KERNEL);
+ if (!chp)
+ return ERR_PTR(-ENOMEM);
+
+ if (ib_context)
+ ucontext = to_c4iw_ucontext(ib_context);
+
+ /* account for the status page. */
+ entries++;
+
+ /* IQ needs one extra entry to differentiate full vs empty. */
+ entries++;
+
+ /*
+ * entries must be multiple of 16 for HW.
+ */
+ entries = roundup(entries, 16);
+ memsize = entries * sizeof *chp->cq.queue;
+
+ /*
+ * memsize must be a multiple of the page size if its a user cq.
+ */
+ if (ucontext)
+ memsize = roundup(memsize, PAGE_SIZE);
+ chp->cq.size = entries;
+ chp->cq.memsize = memsize;
+
+ ret = create_cq(&rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ if (ret)
+ goto err1;
+
+ chp->rhp = rhp;
+ chp->cq.size--; /* status page */
+ chp->ibcq.cqe = chp->cq.size - 1;
+ spin_lock_init(&chp->lock);
+ atomic_set(&chp->refcnt, 1);
+ init_waitqueue_head(&chp->wait);
+ ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+ if (ret)
+ goto err2;
+
+ if (ucontext) {
+ mm = kmalloc(sizeof *mm, GFP_KERNEL);
+ if (!mm)
+ goto err3;
+ mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+ if (!mm2)
+ goto err4;
+
+ uresp.qid_mask = rhp->rdev.cqmask;
+ uresp.cqid = chp->cq.cqid;
+ uresp.size = chp->cq.size;
+ uresp.memsize = chp->cq.memsize;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ if (ret)
+ goto err5;
+
+ mm->key = uresp.key;
+ mm->addr = virt_to_phys(chp->cq.queue);
+ mm->len = chp->cq.memsize;
+ insert_mmap(ucontext, mm);
+
+ mm2->key = uresp.gts_key;
+ mm2->addr = chp->cq.ugts;
+ mm2->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm2);
+ }
+ PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
+ __func__, chp->cq.cqid, chp, chp->cq.size,
+ chp->cq.memsize,
+ (unsigned long long) chp->cq.dma_addr);
+ return &chp->ibcq;
+err5:
+ kfree(mm2);
+err4:
+ kfree(mm);
+err3:
+ remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
+err2:
+ destroy_cq(&chp->rhp->rdev, &chp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+ kfree(chp);
+ return ERR_PTR(ret);
+}
+
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
+{
+ return -ENOSYS;
+}
+
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct c4iw_cq *chp;
+ int ret;
+ unsigned long flag;
+
+ chp = to_c4iw_cq(ibcq);
+ spin_lock_irqsave(&chp->lock, flag);
+ ret = t4_arm_cq(&chp->cq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ spin_unlock_irqrestore(&chp->lock, flag);
+ if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
+ ret = 0;
+ return ret;
+}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
new file mode 100644
index 000000000000..d870f9c17c1e
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "iw_cxgb4.h"
+
+#define DRV_VERSION "0.1"
+
+MODULE_AUTHOR("Steve Wise");
+MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_mutex);
+
+static struct dentry *c4iw_debugfs_root;
+
+struct debugfs_qp_data {
+ struct c4iw_dev *devp;
+ char *buf;
+ int bufsize;
+ int pos;
+};
+
+static int count_qps(int id, void *p, void *data)
+{
+ struct c4iw_qp *qp = p;
+ int *countp = data;
+
+ if (id != qp->wq.sq.qid)
+ return 0;
+
+ *countp = *countp + 1;
+ return 0;
+}
+
+static int dump_qps(int id, void *p, void *data)
+{
+ struct c4iw_qp *qp = p;
+ struct debugfs_qp_data *qpd = data;
+ int space;
+ int cc;
+
+ if (id != qp->wq.sq.qid)
+ return 0;
+
+ space = qpd->bufsize - qpd->pos - 1;
+ if (space == 0)
+ return 1;
+
+ if (qp->ep)
+ cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
+ "ep tid %u state %u %pI4:%u->%pI4:%u\n",
+ qp->wq.sq.qid, (int)qp->attr.state,
+ qp->ep->hwtid, (int)qp->ep->com.state,
+ &qp->ep->com.local_addr.sin_addr.s_addr,
+ ntohs(qp->ep->com.local_addr.sin_port),
+ &qp->ep->com.remote_addr.sin_addr.s_addr,
+ ntohs(qp->ep->com.remote_addr.sin_port));
+ else
+ cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
+ qp->wq.sq.qid, (int)qp->attr.state);
+ if (cc < space)
+ qpd->pos += cc;
+ return 0;
+}
+
+static int qp_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_qp_data *qpd = file->private_data;
+ if (!qpd) {
+ printk(KERN_INFO "%s null qpd?\n", __func__);
+ return 0;
+ }
+ kfree(qpd->buf);
+ kfree(qpd);
+ return 0;
+}
+
+static int qp_open(struct inode *inode, struct file *file)
+{
+ struct debugfs_qp_data *qpd;
+ int ret = 0;
+ int count = 1;
+
+ qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
+ if (!qpd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ qpd->devp = inode->i_private;
+ qpd->pos = 0;
+
+ spin_lock_irq(&qpd->devp->lock);
+ idr_for_each(&qpd->devp->qpidr, count_qps, &count);
+ spin_unlock_irq(&qpd->devp->lock);
+
+ qpd->bufsize = count * 128;
+ qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
+ if (!qpd->buf) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ spin_lock_irq(&qpd->devp->lock);
+ idr_for_each(&qpd->devp->qpidr, dump_qps, qpd);
+ spin_unlock_irq(&qpd->devp->lock);
+
+ qpd->buf[qpd->pos++] = 0;
+ file->private_data = qpd;
+ goto out;
+err1:
+ kfree(qpd);
+out:
+ return ret;
+}
+
+static ssize_t qp_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct debugfs_qp_data *qpd = file->private_data;
+ loff_t pos = *ppos;
+ loff_t avail = qpd->pos;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len = 0;
+
+ len = min((int)count, (int)qpd->pos - (int)pos);
+ if (copy_to_user(buf, qpd->buf + pos, len))
+ return -EFAULT;
+ if (len == 0)
+ return -EINVAL;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations qp_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qp_open,
+ .release = qp_release,
+ .read = qp_read,
+};
+
+static int setup_debugfs(struct c4iw_dev *devp)
+{
+ struct dentry *de;
+
+ if (!devp->debugfs_root)
+ return -1;
+
+ de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &qp_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
+ return 0;
+}
+
+void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct list_head *pos, *nxt;
+ struct c4iw_qid_list *entry;
+
+ mutex_lock(&uctx->lock);
+ list_for_each_safe(pos, nxt, &uctx->qpids) {
+ entry = list_entry(pos, struct c4iw_qid_list, entry);
+ list_del_init(&entry->entry);
+ if (!(entry->qid & rdev->qpmask))
+ c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
+ &rdev->resource.qid_fifo_lock);
+ kfree(entry);
+ }
+
+ list_for_each_safe(pos, nxt, &uctx->qpids) {
+ entry = list_entry(pos, struct c4iw_qid_list, entry);
+ list_del_init(&entry->entry);
+ kfree(entry);
+ }
+ mutex_unlock(&uctx->lock);
+}
+
+void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx)
+{
+ INIT_LIST_HEAD(&uctx->qpids);
+ INIT_LIST_HEAD(&uctx->cqids);
+ mutex_init(&uctx->lock);
+}
+
+/* Caller takes care of locking if needed */
+static int c4iw_rdev_open(struct c4iw_rdev *rdev)
+{
+ int err;
+
+ c4iw_init_dev_ucontext(rdev, &rdev->uctx);
+
+ /*
+ * qpshift is the number of bits to shift the qpid left in order
+ * to get the correct address of the doorbell for that qp.
+ */
+ rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
+ rdev->qpmask = rdev->lldi.udb_density - 1;
+ rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
+ rdev->cqmask = rdev->lldi.ucq_density - 1;
+ PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
+ "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n",
+ __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+ rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
+ rdev->lldi.vr->pbl.start,
+ rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
+ rdev->lldi.vr->rq.size);
+ PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
+ "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
+ (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
+ (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ rdev->lldi.db_reg,
+ rdev->lldi.gts_reg,
+ rdev->qpshift, rdev->qpmask,
+ rdev->cqshift, rdev->cqmask);
+
+ if (c4iw_num_stags(rdev) == 0) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing resources\n", err);
+ goto err1;
+ }
+ err = c4iw_pblpool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
+ goto err2;
+ }
+ err = c4iw_rqtpool_create(rdev);
+ if (err) {
+ printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
+ goto err3;
+ }
+ return 0;
+err3:
+ c4iw_pblpool_destroy(rdev);
+err2:
+ c4iw_destroy_resource(&rdev->resource);
+err1:
+ return err;
+}
+
+static void c4iw_rdev_close(struct c4iw_rdev *rdev)
+{
+ c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
+ c4iw_destroy_resource(&rdev->resource);
+}
+
+static void c4iw_remove(struct c4iw_dev *dev)
+{
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ cancel_delayed_work_sync(&dev->db_drop_task);
+ list_del(&dev->entry);
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ c4iw_rdev_close(&dev->rdev);
+ idr_destroy(&dev->cqidr);
+ idr_destroy(&dev->qpidr);
+ idr_destroy(&dev->mmidr);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
+{
+ struct c4iw_dev *devp;
+ int ret;
+
+ devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
+ if (!devp) {
+ printk(KERN_ERR MOD "Cannot allocate ib device\n");
+ return NULL;
+ }
+ devp->rdev.lldi = *infop;
+
+ mutex_lock(&dev_mutex);
+
+ ret = c4iw_rdev_open(&devp->rdev);
+ if (ret) {
+ mutex_unlock(&dev_mutex);
+ printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
+ ib_dealloc_device(&devp->ibdev);
+ return NULL;
+ }
+
+ idr_init(&devp->cqidr);
+ idr_init(&devp->qpidr);
+ idr_init(&devp->mmidr);
+ spin_lock_init(&devp->lock);
+ list_add_tail(&devp->entry, &dev_list);
+ mutex_unlock(&dev_mutex);
+
+ if (c4iw_debugfs_root) {
+ devp->debugfs_root = debugfs_create_dir(
+ pci_name(devp->rdev.lldi.pdev),
+ c4iw_debugfs_root);
+ setup_debugfs(devp);
+ }
+ return devp;
+}
+
+static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
+{
+ struct c4iw_dev *dev;
+ static int vers_printed;
+ int i;
+
+ if (!vers_printed++)
+ printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
+ DRV_VERSION);
+
+ dev = c4iw_alloc(infop);
+ if (!dev)
+ goto out;
+
+ PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+ __func__, pci_name(dev->rdev.lldi.pdev),
+ dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq,
+ dev->rdev.lldi.ntxq, dev->rdev.lldi.nports);
+
+ for (i = 0; i < dev->rdev.lldi.nrxq; i++)
+ PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
+out:
+ return dev;
+}
+
+static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len,
+ unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ if (gl->tot_len <= 512) {
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags - 1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+out:
+ return skb;
+}
+
+static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl)
+{
+ struct c4iw_dev *dev = handle;
+ struct sk_buff *skb;
+ const struct cpl_act_establish *rpl;
+ unsigned int opcode;
+
+ if (gl == NULL) {
+ /* omit RSS and rsp_ctrl at end of descriptor */
+ unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
+
+ skb = alloc_skb(256, GFP_ATOMIC);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, &rsp[1], len);
+ } else if (gl == CXGB4_MSG_AN) {
+ const struct rsp_ctrl *rc = (void *)rsp;
+
+ u32 qid = be32_to_cpu(rc->pldbuflen_qid);
+ c4iw_ev_handler(dev, qid);
+ return 0;
+ } else {
+ skb = t4_pktgl_to_skb(gl, 128, 128);
+ if (unlikely(!skb))
+ goto nomem;
+ }
+
+ rpl = cplhdr(skb);
+ opcode = rpl->ot.opcode;
+
+ if (c4iw_handlers[opcode])
+ c4iw_handlers[opcode](dev, skb);
+ else
+ printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
+ opcode);
+
+ return 0;
+nomem:
+ return -1;
+}
+
+static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+ struct c4iw_dev *dev = handle;
+
+ PDBG("%s new_state %u\n", __func__, new_state);
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
+ if (!dev->registered) {
+ int ret;
+ ret = c4iw_register_device(dev);
+ if (ret)
+ printk(KERN_ERR MOD
+ "%s: RDMA registration failed: %d\n",
+ pci_name(dev->rdev.lldi.pdev), ret);
+ }
+ break;
+ case CXGB4_STATE_DOWN:
+ printk(KERN_INFO MOD "%s: Down\n",
+ pci_name(dev->rdev.lldi.pdev));
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ printk(KERN_INFO MOD "%s: Fatal Error\n",
+ pci_name(dev->rdev.lldi.pdev));
+ if (dev->registered)
+ c4iw_unregister_device(dev);
+ break;
+ case CXGB4_STATE_DETACH:
+ printk(KERN_INFO MOD "%s: Detach\n",
+ pci_name(dev->rdev.lldi.pdev));
+ mutex_lock(&dev_mutex);
+ c4iw_remove(dev);
+ mutex_unlock(&dev_mutex);
+ break;
+ }
+ return 0;
+}
+
+static struct cxgb4_uld_info c4iw_uld_info = {
+ .name = DRV_NAME,
+ .add = c4iw_uld_add,
+ .rx_handler = c4iw_uld_rx_handler,
+ .state_change = c4iw_uld_state_change,
+};
+
+static int __init c4iw_init_module(void)
+{
+ int err;
+
+ err = c4iw_cm_init();
+ if (err)
+ return err;
+
+ c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
+ if (!c4iw_debugfs_root)
+ printk(KERN_WARNING MOD
+ "could not create debugfs entry, continuing\n");
+
+ cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
+
+ return 0;
+}
+
+static void __exit c4iw_exit_module(void)
+{
+ struct c4iw_dev *dev, *tmp;
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
+ c4iw_remove(dev);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+ c4iw_cm_term();
+ debugfs_remove_recursive(c4iw_debugfs_root);
+}
+
+module_init(c4iw_init_module);
+module_exit(c4iw_exit_module);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
new file mode 100644
index 000000000000..491e76a0327f
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/mman.h>
+#include <net/sock.h>
+
+#include "iw_cxgb4.h"
+
+static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
+ struct c4iw_qp *qhp,
+ struct t4_cqe *err_cqe,
+ enum ib_event_type ib_event)
+{
+ struct ib_event event;
+ struct c4iw_qp_attributes attrs;
+
+ if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
+ (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
+ PDBG("%s AE received after RTS - "
+ "qp state %d qpid 0x%x status 0x%x\n", __func__,
+ qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
+ return;
+ }
+
+ printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
+ "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+ CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
+ CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+ if (qhp->attr.state == C4IW_QP_STATE_RTS) {
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ }
+
+ event.event = ib_event;
+ event.device = chp->ibcq.device;
+ if (ib_event == IB_EVENT_CQ_ERR)
+ event.element.cq = &chp->ibcq;
+ else
+ event.element.qp = &qhp->ibqp;
+ if (qhp->ibqp.event_handler)
+ (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+}
+
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+ struct c4iw_cq *chp;
+ struct c4iw_qp *qhp;
+ u32 cqid;
+
+ spin_lock(&dev->lock);
+ qhp = get_qhp(dev, CQE_QPID(err_cqe));
+ if (!qhp) {
+ printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
+ "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ CQE_QPID(err_cqe),
+ CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+ CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+ CQE_WRID_LOW(err_cqe));
+ spin_unlock(&dev->lock);
+ goto out;
+ }
+
+ if (SQ_TYPE(err_cqe))
+ cqid = qhp->attr.scq;
+ else
+ cqid = qhp->attr.rcq;
+ chp = get_chp(dev, cqid);
+ if (!chp) {
+ printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
+ "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+ cqid, CQE_QPID(err_cqe),
+ CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+ CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+ CQE_WRID_LOW(err_cqe));
+ spin_unlock(&dev->lock);
+ goto out;
+ }
+
+ c4iw_qp_add_ref(&qhp->ibqp);
+ atomic_inc(&chp->refcnt);
+ spin_unlock(&dev->lock);
+
+ /* Bad incoming write */
+ if (RQ_TYPE(err_cqe) &&
+ (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
+ goto done;
+ }
+
+ switch (CQE_STATUS(err_cqe)) {
+
+ /* Completion Events */
+ case T4_ERR_SUCCESS:
+ printk(KERN_ERR MOD "AE with status 0!\n");
+ break;
+
+ case T4_ERR_STAG:
+ case T4_ERR_PDID:
+ case T4_ERR_QPID:
+ case T4_ERR_ACCESS:
+ case T4_ERR_WRAP:
+ case T4_ERR_BOUND:
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
+ break;
+
+ /* Device Fatal Errors */
+ case T4_ERR_ECC:
+ case T4_ERR_ECC_PSTAG:
+ case T4_ERR_INTERNAL_ERR:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
+ break;
+
+ /* QP Fatal Errors */
+ case T4_ERR_OUT_OF_RQE:
+ case T4_ERR_PBL_ADDR_BOUND:
+ case T4_ERR_CRC:
+ case T4_ERR_MARKER:
+ case T4_ERR_PDU_LEN_ERR:
+ case T4_ERR_DDP_VERSION:
+ case T4_ERR_RDMA_VERSION:
+ case T4_ERR_OPCODE:
+ case T4_ERR_DDP_QUEUE_NUM:
+ case T4_ERR_MSN:
+ case T4_ERR_TBIT:
+ case T4_ERR_MO:
+ case T4_ERR_MSN_GAP:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_RQE_ADDR_BOUND:
+ case T4_ERR_IRD_OVERFLOW:
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+ break;
+
+ default:
+ printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n",
+ CQE_STATUS(err_cqe), qhp->wq.sq.qid);
+ post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+ break;
+ }
+done:
+ if (atomic_dec_and_test(&chp->refcnt))
+ wake_up(&chp->wait);
+ c4iw_qp_rem_ref(&qhp->ibqp);
+out:
+ return;
+}
+
+int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
+{
+ struct c4iw_cq *chp;
+
+ chp = get_chp(dev, qid);
+ if (chp)
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ else
+ PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
new file mode 100644
index 000000000000..277ab589b44d
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -0,0 +1,746 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __IW_CXGB4_H__
+#define __IW_CXGB4_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+
+#include <asm/byteorder.h>
+
+#include <net/net_namespace.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "user.h"
+
+#define DRV_NAME "iw_cxgb4"
+#define MOD DRV_NAME ":"
+
+extern int c4iw_debug;
+#define PDBG(fmt, args...) \
+do { \
+ if (c4iw_debug) \
+ printk(MOD fmt, ## args); \
+} while (0)
+
+#include "t4.h"
+
+#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
+#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+#define C4IW_WR_TO (10*HZ)
+
+struct c4iw_wr_wait {
+ wait_queue_head_t wait;
+ int done;
+ int ret;
+};
+
+static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ wr_waitp->done = 0;
+ init_waitqueue_head(&wr_waitp->wait);
+}
+
+struct c4iw_resource {
+ struct kfifo tpt_fifo;
+ spinlock_t tpt_fifo_lock;
+ struct kfifo qid_fifo;
+ spinlock_t qid_fifo_lock;
+ struct kfifo pdid_fifo;
+ spinlock_t pdid_fifo_lock;
+};
+
+struct c4iw_qid_list {
+ struct list_head entry;
+ u32 qid;
+};
+
+struct c4iw_dev_ucontext {
+ struct list_head qpids;
+ struct list_head cqids;
+ struct mutex lock;
+};
+
+enum c4iw_rdev_flags {
+ T4_FATAL_ERROR = (1<<0),
+};
+
+struct c4iw_rdev {
+ struct c4iw_resource resource;
+ unsigned long qpshift;
+ u32 qpmask;
+ unsigned long cqshift;
+ u32 cqmask;
+ struct c4iw_dev_ucontext uctx;
+ struct gen_pool *pbl_pool;
+ struct gen_pool *rqt_pool;
+ u32 flags;
+ struct cxgb4_lld_info lldi;
+};
+
+static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
+{
+ return rdev->flags & T4_FATAL_ERROR;
+}
+
+static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
+{
+ return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
+}
+
+struct c4iw_dev {
+ struct ib_device ibdev;
+ struct c4iw_rdev rdev;
+ u32 device_cap_flags;
+ struct idr cqidr;
+ struct idr qpidr;
+ struct idr mmidr;
+ spinlock_t lock;
+ struct list_head entry;
+ struct delayed_work db_drop_task;
+ struct dentry *debugfs_root;
+ u8 registered;
+};
+
+static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct c4iw_dev, ibdev);
+}
+
+static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
+{
+ return container_of(rdev, struct c4iw_dev, rdev);
+}
+
+static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
+{
+ return idr_find(&rhp->cqidr, cqid);
+}
+
+static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
+{
+ return idr_find(&rhp->qpidr, qpid);
+}
+
+static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
+{
+ return idr_find(&rhp->mmidr, mmid);
+}
+
+static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
+ void *handle, u32 id)
+{
+ int ret;
+ int newid;
+
+ do {
+ if (!idr_pre_get(idr, GFP_KERNEL))
+ return -ENOMEM;
+ spin_lock_irq(&rhp->lock);
+ ret = idr_get_new_above(idr, handle, id, &newid);
+ BUG_ON(newid != id);
+ spin_unlock_irq(&rhp->lock);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
+{
+ spin_lock_irq(&rhp->lock);
+ idr_remove(idr, id);
+ spin_unlock_irq(&rhp->lock);
+}
+
+struct c4iw_pd {
+ struct ib_pd ibpd;
+ u32 pdid;
+ struct c4iw_dev *rhp;
+};
+
+static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct c4iw_pd, ibpd);
+}
+
+struct tpt_attributes {
+ u64 len;
+ u64 va_fbo;
+ enum fw_ri_mem_perms perms;
+ u32 stag;
+ u32 pdid;
+ u32 qpid;
+ u32 pbl_addr;
+ u32 pbl_size;
+ u32 state:1;
+ u32 type:2;
+ u32 rsvd:1;
+ u32 remote_invaliate_disable:1;
+ u32 zbva:1;
+ u32 mw_bind_enable:1;
+ u32 page_size:5;
+};
+
+struct c4iw_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct c4iw_dev *rhp;
+ u64 kva;
+ struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct c4iw_mr, ibmr);
+}
+
+struct c4iw_mw {
+ struct ib_mw ibmw;
+ struct c4iw_dev *rhp;
+ u64 kva;
+ struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct c4iw_mw, ibmw);
+}
+
+struct c4iw_fr_page_list {
+ struct ib_fast_reg_page_list ibpl;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ dma_addr_t dma_addr;
+ struct c4iw_dev *dev;
+ int size;
+};
+
+static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
+ struct ib_fast_reg_page_list *ibpl)
+{
+ return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
+}
+
+struct c4iw_cq {
+ struct ib_cq ibcq;
+ struct c4iw_dev *rhp;
+ struct t4_cq cq;
+ spinlock_t lock;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+};
+
+static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct c4iw_cq, ibcq);
+}
+
+struct c4iw_mpa_attributes {
+ u8 initiator;
+ u8 recv_marker_enabled;
+ u8 xmit_marker_enabled;
+ u8 crc_enabled;
+ u8 version;
+ u8 p2p_type;
+};
+
+struct c4iw_qp_attributes {
+ u32 scq;
+ u32 rcq;
+ u32 sq_num_entries;
+ u32 rq_num_entries;
+ u32 sq_max_sges;
+ u32 sq_max_sges_rdma_write;
+ u32 rq_max_sges;
+ u32 state;
+ u8 enable_rdma_read;
+ u8 enable_rdma_write;
+ u8 enable_bind;
+ u8 enable_mmid0_fastreg;
+ u32 max_ord;
+ u32 max_ird;
+ u32 pd;
+ u32 next_state;
+ char terminate_buffer[52];
+ u32 terminate_msg_len;
+ u8 is_terminate_local;
+ struct c4iw_mpa_attributes mpa_attr;
+ struct c4iw_ep *llp_stream_handle;
+};
+
+struct c4iw_qp {
+ struct ib_qp ibqp;
+ struct c4iw_dev *rhp;
+ struct c4iw_ep *ep;
+ struct c4iw_qp_attributes attr;
+ struct t4_wq wq;
+ spinlock_t lock;
+ atomic_t refcnt;
+ wait_queue_head_t wait;
+ struct timer_list timer;
+};
+
+static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct c4iw_qp, ibqp);
+}
+
+struct c4iw_ucontext {
+ struct ib_ucontext ibucontext;
+ struct c4iw_dev_ucontext uctx;
+ u32 key;
+ spinlock_t mmap_lock;
+ struct list_head mmaps;
+};
+
+static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
+{
+ return container_of(c, struct c4iw_ucontext, ibucontext);
+}
+
+struct c4iw_mm_entry {
+ struct list_head entry;
+ u64 addr;
+ u32 key;
+ unsigned len;
+};
+
+static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
+ u32 key, unsigned len)
+{
+ struct list_head *pos, *nxt;
+ struct c4iw_mm_entry *mm;
+
+ spin_lock(&ucontext->mmap_lock);
+ list_for_each_safe(pos, nxt, &ucontext->mmaps) {
+
+ mm = list_entry(pos, struct c4iw_mm_entry, entry);
+ if (mm->key == key && mm->len == len) {
+ list_del_init(&mm->entry);
+ spin_unlock(&ucontext->mmap_lock);
+ PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
+ key, (unsigned long long) mm->addr, mm->len);
+ return mm;
+ }
+ }
+ spin_unlock(&ucontext->mmap_lock);
+ return NULL;
+}
+
+static inline void insert_mmap(struct c4iw_ucontext *ucontext,
+ struct c4iw_mm_entry *mm)
+{
+ spin_lock(&ucontext->mmap_lock);
+ PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
+ mm->key, (unsigned long long) mm->addr, mm->len);
+ list_add_tail(&mm->entry, &ucontext->mmaps);
+ spin_unlock(&ucontext->mmap_lock);
+}
+
+enum c4iw_qp_attr_mask {
+ C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
+ C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
+ C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
+ C4IW_QP_ATTR_MAX_ORD = 1 << 11,
+ C4IW_QP_ATTR_MAX_IRD = 1 << 12,
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
+ C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
+ C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
+ C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
+ C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+ C4IW_QP_ATTR_MAX_ORD |
+ C4IW_QP_ATTR_MAX_IRD |
+ C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+ C4IW_QP_ATTR_STREAM_MSG_BUFFER |
+ C4IW_QP_ATTR_MPA_ATTR |
+ C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
+};
+
+int c4iw_modify_qp(struct c4iw_dev *rhp,
+ struct c4iw_qp *qhp,
+ enum c4iw_qp_attr_mask mask,
+ struct c4iw_qp_attributes *attrs,
+ int internal);
+
+enum c4iw_qp_state {
+ C4IW_QP_STATE_IDLE,
+ C4IW_QP_STATE_RTS,
+ C4IW_QP_STATE_ERROR,
+ C4IW_QP_STATE_TERMINATE,
+ C4IW_QP_STATE_CLOSING,
+ C4IW_QP_STATE_TOT
+};
+
+static inline int c4iw_convert_state(enum ib_qp_state ib_state)
+{
+ switch (ib_state) {
+ case IB_QPS_RESET:
+ case IB_QPS_INIT:
+ return C4IW_QP_STATE_IDLE;
+ case IB_QPS_RTS:
+ return C4IW_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return C4IW_QP_STATE_CLOSING;
+ case IB_QPS_SQE:
+ return C4IW_QP_STATE_TERMINATE;
+ case IB_QPS_ERR:
+ return C4IW_QP_STATE_ERROR;
+ default:
+ return -1;
+ }
+}
+
+static inline u32 c4iw_ib_to_tpt_access(int a)
+{
+ return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+ (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
+ (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
+ FW_RI_MEM_ACCESS_LOCAL_READ;
+}
+
+static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
+}
+
+enum c4iw_mmid_state {
+ C4IW_STAG_STATE_VALID,
+ C4IW_STAG_STATE_INVALID
+};
+
+#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
+
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+
+#define MPA_MAX_PRIVATE_DATA 256
+#define MPA_REJECT 0x20
+#define MPA_CRC 0x40
+#define MPA_MARKERS 0x80
+#define MPA_FLAGS_MASK 0xE0
+
+#define c4iw_put_ep(ep) { \
+ PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
+ ep, atomic_read(&((ep)->kref.refcount))); \
+ WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
+ kref_put(&((ep)->kref), _c4iw_free_ep); \
+}
+
+#define c4iw_get_ep(ep) { \
+ PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
+ ep, atomic_read(&((ep)->kref.refcount))); \
+ kref_get(&((ep)->kref)); \
+}
+void _c4iw_free_ep(struct kref *kref);
+
+struct mpa_message {
+ u8 key[16];
+ u8 flags;
+ u8 revision;
+ __be16 private_data_size;
+ u8 private_data[0];
+};
+
+struct terminate_message {
+ u8 layer_etype;
+ u8 ecode;
+ __be16 hdrct_rsvd;
+ u8 len_hdrs[0];
+};
+
+#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
+
+enum c4iw_layers_types {
+ LAYER_RDMAP = 0x00,
+ LAYER_DDP = 0x10,
+ LAYER_MPA = 0x20,
+ RDMAP_LOCAL_CATA = 0x00,
+ RDMAP_REMOTE_PROT = 0x01,
+ RDMAP_REMOTE_OP = 0x02,
+ DDP_LOCAL_CATA = 0x00,
+ DDP_TAGGED_ERR = 0x01,
+ DDP_UNTAGGED_ERR = 0x02,
+ DDP_LLP = 0x03
+};
+
+enum c4iw_rdma_ecodes {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_BASE_BOUNDS = 0x01,
+ RDMAP_ACC_VIOL = 0x02,
+ RDMAP_STAG_NOT_ASSOC = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_VERS = 0x05,
+ RDMAP_INV_OPCODE = 0x06,
+ RDMAP_STREAM_CATA = 0x07,
+ RDMAP_GLOBAL_CATA = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff
+};
+
+enum c4iw_ddp_ecodes {
+ DDPT_INV_STAG = 0x00,
+ DDPT_BASE_BOUNDS = 0x01,
+ DDPT_STAG_NOT_ASSOC = 0x02,
+ DDPT_TO_WRAP = 0x03,
+ DDPT_INV_VERS = 0x04,
+ DDPU_INV_QN = 0x01,
+ DDPU_INV_MSN_NOBUF = 0x02,
+ DDPU_INV_MSN_RANGE = 0x03,
+ DDPU_INV_MO = 0x04,
+ DDPU_MSG_TOOBIG = 0x05,
+ DDPU_INV_VERS = 0x06
+};
+
+enum c4iw_mpa_ecodes {
+ MPA_CRC_ERR = 0x02,
+ MPA_MARKER_ERR = 0x03
+};
+
+enum c4iw_ep_state {
+ IDLE = 0,
+ LISTEN,
+ CONNECTING,
+ MPA_REQ_WAIT,
+ MPA_REQ_SENT,
+ MPA_REQ_RCVD,
+ MPA_REP_SENT,
+ FPDU_MODE,
+ ABORTING,
+ CLOSING,
+ MORIBUND,
+ DEAD,
+};
+
+enum c4iw_ep_flags {
+ PEER_ABORT_IN_PROGRESS = 0,
+ ABORT_REQ_IN_PROGRESS = 1,
+ RELEASE_RESOURCES = 2,
+ CLOSE_SENT = 3,
+};
+
+struct c4iw_ep_common {
+ struct iw_cm_id *cm_id;
+ struct c4iw_qp *qp;
+ struct c4iw_dev *dev;
+ enum c4iw_ep_state state;
+ struct kref kref;
+ spinlock_t lock;
+ struct sockaddr_in local_addr;
+ struct sockaddr_in remote_addr;
+ wait_queue_head_t waitq;
+ int rpl_done;
+ int rpl_err;
+ unsigned long flags;
+};
+
+struct c4iw_listen_ep {
+ struct c4iw_ep_common com;
+ unsigned int stid;
+ int backlog;
+};
+
+struct c4iw_ep {
+ struct c4iw_ep_common com;
+ struct c4iw_ep *parent_ep;
+ struct timer_list timer;
+ struct list_head entry;
+ unsigned int atid;
+ u32 hwtid;
+ u32 snd_seq;
+ u32 rcv_seq;
+ struct l2t_entry *l2t;
+ struct dst_entry *dst;
+ struct sk_buff *mpa_skb;
+ struct c4iw_mpa_attributes mpa_attr;
+ u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
+ unsigned int mpa_pkt_len;
+ u32 ird;
+ u32 ord;
+ u32 smac_idx;
+ u32 tx_chan;
+ u32 mtu;
+ u16 mss;
+ u16 emss;
+ u16 plen;
+ u16 rss_qid;
+ u16 txq_idx;
+ u8 tos;
+};
+
+static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
+{
+ return cm_id->provider_data;
+}
+
+static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
+{
+ return cm_id->provider_data;
+}
+
+static inline int compute_wscale(int win)
+{
+ int wscale = 0;
+
+ while (wscale < 14 && (65535<<wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
+
+int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
+ struct l2t_entry *l2t);
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
+ struct c4iw_dev_ucontext *uctx);
+u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
+void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_pblpool_create(struct c4iw_rdev *rdev);
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_destroy_resource(struct c4iw_resource *rscp);
+int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_register_device(struct c4iw_dev *dev);
+void c4iw_unregister_device(struct c4iw_dev *dev);
+int __init c4iw_cm_init(void);
+void __exit c4iw_cm_term(void);
+void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx);
+void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
+ struct c4iw_dev_ucontext *uctx);
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind);
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
+int c4iw_destroy_listen(struct iw_cm_id *cm_id);
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+void c4iw_qp_add_ref(struct ib_qp *qp);
+void c4iw_qp_rem_ref(struct ib_qp *qp);
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
+ struct ib_device *device,
+ int page_list_len);
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
+int c4iw_dealloc_mw(struct ib_mw *mw);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
+ u64 length, u64 virt, int acc,
+ struct ib_udata *udata);
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int acc,
+ u64 *iova_start);
+int c4iw_reregister_phys_mem(struct ib_mr *mr,
+ int mr_rereg_mask,
+ struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int acc, u64 *iova_start);
+int c4iw_dereg_mr(struct ib_mr *ib_mr);
+int c4iw_destroy_cq(struct ib_cq *ib_cq);
+struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+ int vector,
+ struct ib_ucontext *ib_context,
+ struct ib_udata *udata);
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int c4iw_destroy_qp(struct ib_qp *ib_qp);
+struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
+void c4iw_flush_hw_cq(struct t4_cq *cq);
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
+u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
+int c4iw_post_zb_read(struct c4iw_qp *qhp);
+int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx);
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx);
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
+
+extern struct cxgb4_client t4c_client;
+extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
+extern int c4iw_max_read_depth;
+
+#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
new file mode 100644
index 000000000000..7f94da1a2437
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <asm/atomic.h>
+
+#include "iw_cxgb4.h"
+
+#define T4_ULPTX_MIN_IO 32
+#define C4IW_MAX_INLINE_SIZE 96
+
+static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
+{
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_idata *sc;
+ u8 wr_len, *to_dp, *from_dp;
+ int copy_len, num_wqe, i, ret = 0;
+ struct c4iw_wr_wait wr_wait;
+
+ addr &= 0x7FFFFFF;
+ PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
+ num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
+ c4iw_init_wr_wait(&wr_wait);
+ for (i = 0; i < num_wqe; i++) {
+
+ copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
+ len;
+ wr_len = roundup(sizeof *req + sizeof *sc +
+ roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ memset(req, 0, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, 0);
+
+ if (i == (num_wqe-1)) {
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
+ FW_WR_COMPL(1));
+ req->wr.wr_lo = (__force __be64)&wr_wait;
+ } else
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
+ req->wr.wr_mid = cpu_to_be32(
+ FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+
+ req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
+ req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
+ 16));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
+
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
+
+ to_dp = (u8 *)(sc + 1);
+ from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
+ if (data)
+ memcpy(to_dp, from_dp, copy_len);
+ else
+ memset(to_dp, 0, copy_len);
+ if (copy_len % T4_ULPTX_MIN_IO)
+ memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
+ (copy_len % T4_ULPTX_MIN_IO));
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ return ret;
+ len -= C4IW_MAX_INLINE_SIZE;
+ }
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ return ret;
+}
+
+/*
+ * Build and write a TPT entry.
+ * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
+ * pbl_size and pbl_addr
+ * OUT: stag index
+ */
+static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+ u32 *stag, u8 stag_state, u32 pdid,
+ enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
+ int bind_enabled, u32 zbva, u64 to,
+ u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
+{
+ int err;
+ struct fw_ri_tpte tpt;
+ u32 stag_idx;
+ static atomic_t key;
+
+ if (c4iw_fatal_error(rdev))
+ return -EIO;
+
+ stag_state = stag_state > 0;
+ stag_idx = (*stag) >> 8;
+
+ if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
+ stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
+ &rdev->resource.tpt_fifo_lock);
+ if (!stag_idx)
+ return -ENOMEM;
+ *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
+ }
+ PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ __func__, stag_state, type, pdid, stag_idx);
+
+ /* write TPT entry */
+ if (reset_tpt_entry)
+ memset(&tpt, 0, sizeof(tpt));
+ else {
+ tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
+ V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
+ V_FW_RI_TPTE_STAGSTATE(stag_state) |
+ V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
+ tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
+ (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
+ V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
+ FW_RI_VA_BASED_TO))|
+ V_FW_RI_TPTE_PS(page_size));
+ tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
+ tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+ tpt.va_hi = cpu_to_be32((u32)(to >> 32));
+ tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+ tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
+ tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+ }
+ err = write_adapter_mem(rdev, stag_idx +
+ (rdev->lldi.vr->stag.start >> 5),
+ sizeof(tpt), &tpt);
+
+ if (reset_tpt_entry)
+ c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
+ &rdev->resource.tpt_fifo_lock);
+ return err;
+}
+
+static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
+ u32 pbl_addr, u32 pbl_size)
+{
+ int err;
+
+ PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ __func__, pbl_addr, rdev->lldi.vr->pbl.start,
+ pbl_size);
+
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
+ return err;
+}
+
+static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
+ u32 pbl_addr)
+{
+ return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
+ pbl_size, pbl_addr);
+}
+
+static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
+{
+ *stag = T4_STAG_UNSET;
+ return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
+ 0UL, 0, 0, 0, 0);
+}
+
+static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
+{
+ return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
+ 0);
+}
+
+static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
+ u32 pbl_size, u32 pbl_addr)
+{
+ *stag = T4_STAG_UNSET;
+ return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
+ 0UL, 0, 0, pbl_size, pbl_addr);
+}
+
+static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
+{
+ u32 mmid;
+
+ mhp->attr.state = 1;
+ mhp->attr.stag = stag;
+ mmid = stag >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+}
+
+static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+ struct c4iw_mr *mhp, int shift)
+{
+ u32 stag = T4_STAG_UNSET;
+ int ret;
+
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, mhp->attr.zbva,
+ mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ return ret;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ return ret;
+}
+
+static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+ struct c4iw_mr *mhp, int shift, int npages)
+{
+ u32 stag;
+ int ret;
+
+ if (npages > mhp->attr.pbl_size)
+ return -ENOMEM;
+
+ stag = mhp->attr.stag;
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, mhp->attr.zbva,
+ mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ return ret;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+
+ return ret;
+}
+
+static int alloc_pbl(struct c4iw_mr *mhp, int npages)
+{
+ mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
+ npages << 3);
+
+ if (!mhp->attr.pbl_addr)
+ return -ENOMEM;
+
+ mhp->attr.pbl_size = npages;
+
+ return 0;
+}
+
+static int build_phys_page_list(struct ib_phys_buf *buffer_list,
+ int num_phys_buf, u64 *iova_start,
+ u64 *total_size, int *npages,
+ int *shift, __be64 **page_list)
+{
+ u64 mask;
+ int i, j, n;
+
+ mask = 0;
+ *total_size = 0;
+ for (i = 0; i < num_phys_buf; ++i) {
+ if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (i != 0 && i != num_phys_buf - 1 &&
+ (buffer_list[i].size & ~PAGE_MASK))
+ return -EINVAL;
+ *total_size += buffer_list[i].size;
+ if (i > 0)
+ mask |= buffer_list[i].addr;
+ else
+ mask |= buffer_list[i].addr & PAGE_MASK;
+ if (i != num_phys_buf - 1)
+ mask |= buffer_list[i].addr + buffer_list[i].size;
+ else
+ mask |= (buffer_list[i].addr + buffer_list[i].size +
+ PAGE_SIZE - 1) & PAGE_MASK;
+ }
+
+ if (*total_size > 0xFFFFFFFFULL)
+ return -ENOMEM;
+
+ /* Find largest page shift we can use to cover buffers */
+ for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
+ if ((1ULL << *shift) & mask)
+ break;
+
+ buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
+ buffer_list[0].addr &= ~0ull << *shift;
+
+ *npages = 0;
+ for (i = 0; i < num_phys_buf; ++i)
+ *npages += (buffer_list[i].size +
+ (1ULL << *shift) - 1) >> *shift;
+
+ if (!*npages)
+ return -EINVAL;
+
+ *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
+ if (!*page_list)
+ return -ENOMEM;
+
+ n = 0;
+ for (i = 0; i < num_phys_buf; ++i)
+ for (j = 0;
+ j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
+ ++j)
+ (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
+ ((u64) j << *shift));
+
+ PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
+ __func__, (unsigned long long)*iova_start,
+ (unsigned long long)mask, *shift, (unsigned long long)*total_size,
+ *npages);
+
+ return 0;
+
+}
+
+int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
+ struct ib_pd *pd, struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+
+ struct c4iw_mr mh, *mhp;
+ struct c4iw_pd *php;
+ struct c4iw_dev *rhp;
+ __be64 *page_list = NULL;
+ int shift = 0;
+ u64 total_size;
+ int npages;
+ int ret;
+
+ PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
+
+ /* There can be no memory windows */
+ if (atomic_read(&mr->usecnt))
+ return -EINVAL;
+
+ mhp = to_c4iw_mr(mr);
+ rhp = mhp->rhp;
+ php = to_c4iw_pd(mr->pd);
+
+ /* make sure we are on the same adapter */
+ if (rhp != php->rhp)
+ return -EINVAL;
+
+ memcpy(&mh, mhp, sizeof *mhp);
+
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ php = to_c4iw_pd(pd);
+ if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
+ mh.attr.perms = c4iw_ib_to_tpt_access(acc);
+ mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
+ IB_ACCESS_MW_BIND;
+ }
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ ret = build_phys_page_list(buffer_list, num_phys_buf,
+ iova_start,
+ &total_size, &npages,
+ &shift, &page_list);
+ if (ret)
+ return ret;
+ }
+
+ ret = reregister_mem(rhp, php, &mh, shift, npages);
+ kfree(page_list);
+ if (ret)
+ return ret;
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ mhp->attr.pdid = php->pdid;
+ if (mr_rereg_mask & IB_MR_REREG_ACCESS)
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ mhp->attr.zbva = 0;
+ mhp->attr.va_fbo = *iova_start;
+ mhp->attr.page_size = shift - 12;
+ mhp->attr.len = (u32) total_size;
+ mhp->attr.pbl_size = npages;
+ }
+
+ return 0;
+}
+
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ __be64 *page_list;
+ int shift;
+ u64 total_size;
+ int npages;
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ int ret;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+
+ /* First check that we have enough alignment */
+ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (num_phys_buf > 1 &&
+ ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
+ &total_size, &npages, &shift,
+ &page_list);
+ if (ret)
+ goto err;
+
+ ret = alloc_pbl(mhp, npages);
+ if (ret) {
+ kfree(page_list);
+ goto err_pbl;
+ }
+
+ ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
+ npages);
+ kfree(page_list);
+ if (ret)
+ goto err_pbl;
+
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.zbva = 0;
+
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.va_fbo = *iova_start;
+ mhp->attr.page_size = shift - 12;
+
+ mhp->attr.len = (u32) total_size;
+ mhp->attr.pbl_size = npages;
+ ret = register_mem(rhp, php, mhp, shift);
+ if (ret)
+ goto err_pbl;
+
+ return &mhp->ibmr;
+
+err_pbl:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+
+err:
+ kfree(mhp);
+ return ERR_PTR(ret);
+
+}
+
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ int ret;
+ u32 stag = T4_STAG_UNSET;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
+ mhp->attr.zbva = 0;
+ mhp->attr.va_fbo = 0;
+ mhp->attr.page_size = 0;
+ mhp->attr.len = ~0UL;
+ mhp->attr.pbl_size = 0;
+
+ ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
+ FW_RI_STAG_NSMR, mhp->attr.perms,
+ mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+ if (ret)
+ goto err1;
+
+ ret = finish_mem_reg(mhp, stag);
+ if (ret)
+ goto err2;
+ return &mhp->ibmr;
+err2:
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err1:
+ kfree(mhp);
+ return ERR_PTR(ret);
+}
+
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
+{
+ __be64 *pages;
+ int shift, n, len;
+ int i, j, k;
+ int err = 0;
+ struct ib_umem_chunk *chunk;
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+
+ if (length == ~0ULL)
+ return ERR_PTR(-EINVAL);
+
+ if ((length + start) < start)
+ return ERR_PTR(-EINVAL);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+
+ mhp->rhp = rhp;
+
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ if (IS_ERR(mhp->umem)) {
+ err = PTR_ERR(mhp->umem);
+ kfree(mhp);
+ return ERR_PTR(err);
+ }
+
+ shift = ffs(mhp->umem->page_size) - 1;
+
+ n = 0;
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+ n += chunk->nents;
+
+ err = alloc_pbl(mhp, n);
+ if (err)
+ goto err;
+
+ pages = (__be64 *) __get_free_page(GFP_KERNEL);
+ if (!pages) {
+ err = -ENOMEM;
+ goto err_pbl;
+ }
+
+ i = n = 0;
+
+ list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+ for (j = 0; j < chunk->nmap; ++j) {
+ len = sg_dma_len(&chunk->page_list[j]) >> shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = cpu_to_be64(sg_dma_address(
+ &chunk->page_list[j]) +
+ mhp->umem->page_size * k);
+ if (i == PAGE_SIZE / sizeof *pages) {
+ err = write_pbl(&mhp->rhp->rdev,
+ pages,
+ mhp->attr.pbl_addr + (n << 3), i);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
+ }
+ }
+ }
+
+ if (i)
+ err = write_pbl(&mhp->rhp->rdev, pages,
+ mhp->attr.pbl_addr + (n << 3), i);
+
+pbl_done:
+ free_page((unsigned long) pages);
+ if (err)
+ goto err_pbl;
+
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.zbva = 0;
+ mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+ mhp->attr.va_fbo = virt;
+ mhp->attr.page_size = shift - 12;
+ mhp->attr.len = (u32) length;
+
+ err = register_mem(rhp, php, mhp, shift);
+ if (err)
+ goto err_pbl;
+
+ return &mhp->ibmr;
+
+err_pbl:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+
+err:
+ ib_umem_release(mhp->umem);
+ kfree(mhp);
+ return ERR_PTR(err);
+}
+
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mw *mhp;
+ u32 mmid;
+ u32 stag = 0;
+ int ret;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp)
+ return ERR_PTR(-ENOMEM);
+ ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+ if (ret) {
+ kfree(mhp);
+ return ERR_PTR(ret);
+ }
+ mhp->rhp = rhp;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.type = FW_RI_STAG_MW;
+ mhp->attr.stag = stag;
+ mmid = (stag) >> 8;
+ mhp->ibmw.rkey = stag;
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ deallocate_window(&rhp->rdev, mhp->attr.stag);
+ kfree(mhp);
+ return ERR_PTR(-ENOMEM);
+ }
+ PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ return &(mhp->ibmw);
+}
+
+int c4iw_dealloc_mw(struct ib_mw *mw)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_mw *mhp;
+ u32 mmid;
+
+ mhp = to_c4iw_mw(mw);
+ rhp = mhp->rhp;
+ mmid = (mw->rkey) >> 8;
+ deallocate_window(&rhp->rdev, mhp->attr.stag);
+ remove_handle(rhp, &rhp->mmidr, mmid);
+ kfree(mhp);
+ PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ return 0;
+}
+
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+ struct c4iw_mr *mhp;
+ u32 mmid;
+ u32 stag = 0;
+ int ret = 0;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+ if (!mhp) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mhp->rhp = rhp;
+ ret = alloc_pbl(mhp, pbl_depth);
+ if (ret)
+ goto err1;
+ mhp->attr.pbl_size = pbl_depth;
+ ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ if (ret)
+ goto err2;
+ mhp->attr.pdid = php->pdid;
+ mhp->attr.type = FW_RI_STAG_NSMR;
+ mhp->attr.stag = stag;
+ mhp->attr.state = 1;
+ mmid = (stag) >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+
+ PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ return &(mhp->ibmr);
+err3:
+ dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err2:
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+err1:
+ kfree(mhp);
+err:
+ return ERR_PTR(ret);
+}
+
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
+ int page_list_len)
+{
+ struct c4iw_fr_page_list *c4pl;
+ struct c4iw_dev *dev = to_c4iw_dev(device);
+ dma_addr_t dma_addr;
+ int size = sizeof *c4pl + page_list_len * sizeof(u64);
+
+ c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
+ &dma_addr, GFP_KERNEL);
+ if (!c4pl)
+ return ERR_PTR(-ENOMEM);
+
+ pci_unmap_addr_set(c4pl, mapping, dma_addr);
+ c4pl->dma_addr = dma_addr;
+ c4pl->dev = dev;
+ c4pl->size = size;
+ c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
+ c4pl->ibpl.max_page_list_len = page_list_len;
+
+ return &c4pl->ibpl;
+}
+
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
+{
+ struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
+
+ dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
+ c4pl, pci_unmap_addr(c4pl, mapping));
+}
+
+int c4iw_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_mr *mhp;
+ u32 mmid;
+
+ PDBG("%s ib_mr %p\n", __func__, ib_mr);
+ /* There can be no memory windows */
+ if (atomic_read(&ib_mr->usecnt))
+ return -EINVAL;
+
+ mhp = to_c4iw_mr(ib_mr);
+ rhp = mhp->rhp;
+ mmid = mhp->attr.stag >> 8;
+ dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ if (mhp->attr.pbl_size)
+ c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+ remove_handle(rhp, &rhp->mmidr, mmid);
+ if (mhp->kva)
+ kfree((void *) (unsigned long) mhp->kva);
+ if (mhp->umem)
+ ib_umem_release(mhp->umem);
+ PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+ kfree(mhp);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
new file mode 100644
index 000000000000..8f645c83a125
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "iw_cxgb4.h"
+
+static int fastreg_support;
+module_param(fastreg_support, int, 0644);
+MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)");
+
+static int c4iw_modify_port(struct ib_device *ibdev,
+ u8 port, int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ return -ENOSYS;
+}
+
+static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static int c4iw_ah_destroy(struct ib_ah *ah)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
+ u8 port_num, struct ib_wc *in_wc,
+ struct ib_grh *in_grh, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ return -ENOSYS;
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct c4iw_dev *rhp = to_c4iw_dev(context->device);
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_mm_entry *mm, *tmp;
+
+ PDBG("%s context %p\n", __func__, context);
+ list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
+ kfree(mm);
+ c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
+ kfree(ucontext);
+ return 0;
+}
+
+static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct c4iw_ucontext *context;
+ struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+ c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
+ INIT_LIST_HEAD(&context->mmaps);
+ spin_lock_init(&context->mmap_lock);
+ return &context->ibucontext;
+}
+
+static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ int len = vma->vm_end - vma->vm_start;
+ u32 key = vma->vm_pgoff << PAGE_SHIFT;
+ struct c4iw_rdev *rdev;
+ int ret = 0;
+ struct c4iw_mm_entry *mm;
+ struct c4iw_ucontext *ucontext;
+ u64 addr;
+
+ PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
+ key, len);
+
+ if (vma->vm_start & (PAGE_SIZE-1))
+ return -EINVAL;
+
+ rdev = &(to_c4iw_dev(context->device)->rdev);
+ ucontext = to_c4iw_ucontext(context);
+
+ mm = remove_mmap(ucontext, key, len);
+ if (!mm)
+ return -EINVAL;
+ addr = mm->addr;
+ kfree(mm);
+
+ if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
+ (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
+ pci_resource_len(rdev->lldi.pdev, 2)))) {
+
+ /*
+ * Map T4 DB register.
+ */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vma->vm_flags &= ~VM_MAYREAD;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ } else {
+
+ /*
+ * Map WQ or CQ contig dma memory...
+ */
+ ret = remap_pfn_range(vma, vma->vm_start,
+ addr >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static int c4iw_deallocate_pd(struct ib_pd *pd)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_pd *php;
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+ c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ kfree(php);
+ return 0;
+}
+
+static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct c4iw_pd *php;
+ u32 pdid;
+ struct c4iw_dev *rhp;
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ rhp = (struct c4iw_dev *) ibdev;
+ pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ if (!pdid)
+ return ERR_PTR(-EINVAL);
+ php = kzalloc(sizeof(*php), GFP_KERNEL);
+ if (!php) {
+ c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
+ &rhp->rdev.resource.pdid_fifo_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+ php->pdid = pdid;
+ php->rhp = rhp;
+ if (context) {
+ if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
+ c4iw_deallocate_pd(&php->ibpd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+ PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
+ return &php->ibpd;
+}
+
+static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+ *pkey = 0;
+ return 0;
+}
+
+static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct c4iw_dev *dev;
+
+ PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
+ __func__, ibdev, port, index, gid);
+ dev = to_c4iw_dev(ibdev);
+ BUG_ON(port == 0);
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
+ return 0;
+}
+
+static int c4iw_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+
+ struct c4iw_dev *dev;
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+
+ dev = to_c4iw_dev(ibdev);
+ memset(props, 0, sizeof *props);
+ memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ props->hw_ver = dev->rdev.lldi.adapter_type;
+ props->fw_ver = dev->rdev.lldi.fw_vers;
+ props->device_cap_flags = dev->device_cap_flags;
+ props->page_size_cap = T4_PAGESIZE_MASK;
+ props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
+ props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
+ props->max_mr_size = T4_MAX_MR_SIZE;
+ props->max_qp = T4_MAX_NUM_QP;
+ props->max_qp_wr = T4_MAX_QP_DEPTH;
+ props->max_sge = T4_MAX_RECV_SGE;
+ props->max_sge_rd = 1;
+ props->max_qp_rd_atom = c4iw_max_read_depth;
+ props->max_qp_init_rd_atom = c4iw_max_read_depth;
+ props->max_cq = T4_MAX_NUM_CQ;
+ props->max_cqe = T4_MAX_CQ_DEPTH;
+ props->max_mr = c4iw_num_stags(&dev->rdev);
+ props->max_pd = T4_MAX_NUM_PD;
+ props->local_ca_ack_delay = 0;
+ props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
+
+ return 0;
+}
+
+static int c4iw_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct c4iw_dev *dev;
+ struct net_device *netdev;
+ struct in_device *inetdev;
+
+ PDBG("%s ibdev %p\n", __func__, ibdev);
+
+ dev = to_c4iw_dev(ibdev);
+ netdev = dev->rdev.lldi.ports[port-1];
+
+ memset(props, 0, sizeof(struct ib_port_attr));
+ props->max_mtu = IB_MTU_4096;
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
+ if (!netif_carrier_ok(netdev))
+ props->state = IB_PORT_DOWN;
+ else {
+ inetdev = in_dev_get(netdev);
+ if (inetdev) {
+ if (inetdev->ifa_list)
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_INIT;
+ in_dev_put(inetdev);
+ } else
+ props->state = IB_PORT_INIT;
+ }
+
+ props->port_cap_flags =
+ IB_PORT_CM_SUP |
+ IB_PORT_SNMP_TUNNEL_SUP |
+ IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->active_width = 2;
+ props->active_speed = 2;
+ props->max_msg_sz = -1;
+
+ return 0;
+}
+
+static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
+}
+
+static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+
+ return sprintf(buf, "%u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
+}
+
+static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ struct ethtool_drvinfo info;
+ struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
+
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ lldev->ethtool_ops->get_drvinfo(lldev, &info);
+ return sprintf(buf, "%s\n", info.driver);
+}
+
+static ssize_t show_board(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev.dev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+ return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
+ c4iw_dev->rdev.lldi.pdev->device);
+}
+
+static int c4iw_get_mib(struct ib_device *ibdev,
+ union rdma_protocol_stats *stats)
+{
+ return -ENOSYS;
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct device_attribute *c4iw_class_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type,
+ &dev_attr_board_id,
+};
+
+int c4iw_register_device(struct c4iw_dev *dev)
+{
+ int ret;
+ int i;
+
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ BUG_ON(!dev->rdev.lldi.ports[0]);
+ strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
+ memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
+ memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ dev->ibdev.owner = THIS_MODULE;
+ dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
+ if (fastreg_support)
+ dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ dev->ibdev.local_dma_lkey = 0;
+ dev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV);
+ dev->ibdev.node_type = RDMA_NODE_RNIC;
+ memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
+ dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
+ dev->ibdev.num_comp_vectors = 1;
+ dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
+ dev->ibdev.query_device = c4iw_query_device;
+ dev->ibdev.query_port = c4iw_query_port;
+ dev->ibdev.modify_port = c4iw_modify_port;
+ dev->ibdev.query_pkey = c4iw_query_pkey;
+ dev->ibdev.query_gid = c4iw_query_gid;
+ dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
+ dev->ibdev.mmap = c4iw_mmap;
+ dev->ibdev.alloc_pd = c4iw_allocate_pd;
+ dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
+ dev->ibdev.create_ah = c4iw_ah_create;
+ dev->ibdev.destroy_ah = c4iw_ah_destroy;
+ dev->ibdev.create_qp = c4iw_create_qp;
+ dev->ibdev.modify_qp = c4iw_ib_modify_qp;
+ dev->ibdev.destroy_qp = c4iw_destroy_qp;
+ dev->ibdev.create_cq = c4iw_create_cq;
+ dev->ibdev.destroy_cq = c4iw_destroy_cq;
+ dev->ibdev.resize_cq = c4iw_resize_cq;
+ dev->ibdev.poll_cq = c4iw_poll_cq;
+ dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
+ dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
+ dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
+ dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
+ dev->ibdev.dereg_mr = c4iw_dereg_mr;
+ dev->ibdev.alloc_mw = c4iw_alloc_mw;
+ dev->ibdev.bind_mw = c4iw_bind_mw;
+ dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
+ dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
+ dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
+ dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
+ dev->ibdev.attach_mcast = c4iw_multicast_attach;
+ dev->ibdev.detach_mcast = c4iw_multicast_detach;
+ dev->ibdev.process_mad = c4iw_process_mad;
+ dev->ibdev.req_notify_cq = c4iw_arm_cq;
+ dev->ibdev.post_send = c4iw_post_send;
+ dev->ibdev.post_recv = c4iw_post_receive;
+ dev->ibdev.get_protocol_stats = c4iw_get_mib;
+
+ dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ if (!dev->ibdev.iwcm)
+ return -ENOMEM;
+
+ dev->ibdev.iwcm->connect = c4iw_connect;
+ dev->ibdev.iwcm->accept = c4iw_accept_cr;
+ dev->ibdev.iwcm->reject = c4iw_reject_cr;
+ dev->ibdev.iwcm->create_listen = c4iw_create_listen;
+ dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
+ dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
+ dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
+ dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+
+ ret = ib_register_device(&dev->ibdev, NULL);
+ if (ret)
+ goto bail1;
+
+ for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
+ ret = device_create_file(&dev->ibdev.dev,
+ c4iw_class_attributes[i]);
+ if (ret)
+ goto bail2;
+ }
+ dev->registered = 1;
+ return 0;
+bail2:
+ ib_unregister_device(&dev->ibdev);
+bail1:
+ kfree(dev->ibdev.iwcm);
+ return ret;
+}
+
+void c4iw_unregister_device(struct c4iw_dev *dev)
+{
+ int i;
+
+ PDBG("%s c4iw_dev %p\n", __func__, dev);
+ for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
+ device_remove_file(&dev->ibdev.dev,
+ c4iw_class_attributes[i]);
+ ib_unregister_device(&dev->ibdev);
+ kfree(dev->ibdev.iwcm);
+ dev->registered = 0;
+ return;
+}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
new file mode 100644
index 000000000000..0c28ed1eafa6
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -0,0 +1,1576 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "iw_cxgb4.h"
+
+static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ /*
+ * uP clears EQ contexts when the connection exits rdma mode,
+ * so no need to post a RESET WR for these EQs.
+ */
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, wq->rq.queue,
+ pci_unmap_addr(&wq->rq, mapping));
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, wq->sq.queue,
+ pci_unmap_addr(&wq->sq, mapping));
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+ kfree(wq->rq.sw_rq);
+ kfree(wq->sq.sw_sq);
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+ c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+ return 0;
+}
+
+static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ struct t4_cq *rcq, struct t4_cq *scq,
+ struct c4iw_dev_ucontext *uctx)
+{
+ int user = (uctx != &rdev->uctx);
+ struct fw_ri_res_wr *res_wr;
+ struct fw_ri_res *res;
+ int wr_len;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+ int ret;
+ int eqsize;
+
+ wq->sq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->sq.qid)
+ return -ENOMEM;
+
+ wq->rq.qid = c4iw_get_qpid(rdev, uctx);
+ if (!wq->rq.qid)
+ goto err1;
+
+ if (!user) {
+ wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
+ GFP_KERNEL);
+ if (!wq->sq.sw_sq)
+ goto err2;
+
+ wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
+ GFP_KERNEL);
+ if (!wq->rq.sw_rq)
+ goto err3;
+ }
+
+ /*
+ * RQT must be a power of 2.
+ */
+ wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
+ wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
+ if (!wq->rq.rqt_hwaddr)
+ goto err4;
+
+ wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, &(wq->sq.dma_addr),
+ GFP_KERNEL);
+ if (!wq->sq.queue)
+ goto err5;
+ memset(wq->sq.queue, 0, wq->sq.memsize);
+ pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
+
+ wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, &(wq->rq.dma_addr),
+ GFP_KERNEL);
+ if (!wq->rq.queue)
+ goto err6;
+ PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ __func__, wq->sq.queue,
+ (unsigned long long)virt_to_phys(wq->sq.queue),
+ wq->rq.queue,
+ (unsigned long long)virt_to_phys(wq->rq.queue));
+ memset(wq->rq.queue, 0, wq->rq.memsize);
+ pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
+
+ wq->db = rdev->lldi.db_reg;
+ wq->gts = rdev->lldi.gts_reg;
+ if (user) {
+ wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (wq->sq.qid << rdev->qpshift);
+ wq->sq.udb &= PAGE_MASK;
+ wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
+ (wq->rq.qid << rdev->qpshift);
+ wq->rq.udb &= PAGE_MASK;
+ }
+ wq->rdev = rdev;
+ wq->rq.msn = 1;
+
+ /* build fw_ri_res_wr */
+ wr_len = sizeof *res_wr + 2 * sizeof *res;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err7;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
+ memset(res_wr, 0, wr_len);
+ res_wr->op_nres = cpu_to_be32(
+ FW_WR_OP(FW_RI_RES_WR) |
+ V_FW_RI_RES_WR_NRES(2) |
+ FW_WR_COMPL(1));
+ res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+ res_wr->cookie = (u64)&wr_wait;
+ res = res_wr->res;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+
+ res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+ V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
+ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
+ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ V_FW_RI_RES_WR_IQID(scq->cqid));
+ res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+ V_FW_RI_RES_WR_DCAEN(0) |
+ V_FW_RI_RES_WR_DCACPU(0) |
+ V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+ V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+ V_FW_RI_RES_WR_EQSIZE(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
+ res++;
+ res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
+ res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+ /*
+ * eqsize is the number of 64B entries plus the status page size.
+ */
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+ res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+ V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
+ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
+ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
+ V_FW_RI_RES_WR_IQID(rcq->cqid));
+ res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+ V_FW_RI_RES_WR_DCAEN(0) |
+ V_FW_RI_RES_WR_DCACPU(0) |
+ V_FW_RI_RES_WR_FBMIN(3) |
+ V_FW_RI_RES_WR_FBMAX(3) |
+ V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+ V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+ V_FW_RI_RES_WR_EQSIZE(eqsize));
+ res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
+ res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+
+ c4iw_init_wr_wait(&wr_wait);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ goto err7;
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rdev->lldi.pdev));
+ rdev->flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+ if (ret)
+ goto err7;
+
+ PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
+ __func__, wq->sq.qid, wq->rq.qid, wq->db,
+ (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
+
+ return 0;
+err7:
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->rq.memsize, wq->rq.queue,
+ pci_unmap_addr(&wq->rq, mapping));
+err6:
+ dma_free_coherent(&(rdev->lldi.pdev->dev),
+ wq->sq.memsize, wq->sq.queue,
+ pci_unmap_addr(&wq->sq, mapping));
+err5:
+ c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+err4:
+ kfree(wq->rq.sw_rq);
+err3:
+ kfree(wq->sq.sw_sq);
+err2:
+ c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+err1:
+ c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+ return -ENOMEM;
+}
+
+static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ int i;
+ u32 plen;
+ int size;
+ u8 *datap;
+
+ if (wr->num_sge > T4_MAX_SEND_SGE)
+ return -EINVAL;
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
+ else
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
+ wqe->send.stag_inv = 0;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
+ else
+ wqe->send.sendop_pkd = cpu_to_be32(
+ V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
+ wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ plen = 0;
+ if (wr->num_sge) {
+ if (wr->send_flags & IB_SEND_INLINE) {
+ datap = (u8 *)wqe->send.u.immd_src[0].data;
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) >
+ T4_MAX_SEND_INLINE) {
+ return -EMSGSIZE;
+ }
+ plen += wr->sg_list[i].length;
+ memcpy(datap,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ datap += wr->sg_list[i].length;
+ }
+ wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->send.u.immd_src[0].r1 = 0;
+ wqe->send.u.immd_src[0].r2 = 0;
+ wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
+ plen;
+ } else {
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->send.u.isgl_src[0].sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->send.u.isgl_src[0].sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->send.u.isgl_src[0].sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
+ wqe->send.u.isgl_src[0].r1 = 0;
+ wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
+ wqe->send.u.isgl_src[0].r2 = 0;
+ size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ }
+ } else {
+ wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->send.u.immd_src[0].r1 = 0;
+ wqe->send.u.immd_src[0].r2 = 0;
+ wqe->send.u.immd_src[0].immdlen = 0;
+ size = sizeof wqe->send + sizeof(struct fw_ri_immd);
+ }
+ *len16 = DIV_ROUND_UP(size, 16);
+ wqe->send.plen = cpu_to_be32(plen);
+ return 0;
+}
+
+static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ int i;
+ u32 plen;
+ int size;
+ u8 *datap;
+
+ if (wr->num_sge > T4_MAX_WRITE_SGE)
+ return -EINVAL;
+ wqe->write.r2 = 0;
+ wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
+ wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
+ plen = 0;
+ if (wr->num_sge) {
+ if (wr->send_flags & IB_SEND_INLINE) {
+ datap = (u8 *)wqe->write.u.immd_src[0].data;
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) >
+ T4_MAX_WRITE_INLINE) {
+ return -EMSGSIZE;
+ }
+ plen += wr->sg_list[i].length;
+ memcpy(datap,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ datap += wr->sg_list[i].length;
+ }
+ wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->write.u.immd_src[0].r1 = 0;
+ wqe->write.u.immd_src[0].r2 = 0;
+ wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
+ size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
+ plen;
+ } else {
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->write.u.isgl_src[0].sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->write.u.isgl_src[0].sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->write.u.isgl_src[0].sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
+ wqe->write.u.isgl_src[0].r1 = 0;
+ wqe->write.u.isgl_src[0].nsge =
+ cpu_to_be16(wr->num_sge);
+ wqe->write.u.isgl_src[0].r2 = 0;
+ size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
+ wr->num_sge * sizeof(struct fw_ri_sge);
+ }
+ } else {
+ wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ wqe->write.u.immd_src[0].r1 = 0;
+ wqe->write.u.immd_src[0].r2 = 0;
+ wqe->write.u.immd_src[0].immdlen = 0;
+ size = sizeof wqe->write + sizeof(struct fw_ri_immd);
+ }
+ *len16 = DIV_ROUND_UP(size, 16);
+ wqe->write.plen = cpu_to_be32(plen);
+ return 0;
+}
+
+static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+ if (wr->num_sge > 1)
+ return -EINVAL;
+ if (wr->num_sge) {
+ wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
+ wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
+ >> 32));
+ wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
+ wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
+ wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
+ wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
+ >> 32));
+ wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
+ } else {
+ wqe->read.stag_src = cpu_to_be32(2);
+ wqe->read.to_src_hi = 0;
+ wqe->read.to_src_lo = 0;
+ wqe->read.stag_sink = cpu_to_be32(2);
+ wqe->read.plen = 0;
+ wqe->read.to_sink_hi = 0;
+ wqe->read.to_sink_lo = 0;
+ }
+ wqe->read.r2 = 0;
+ wqe->read.r5 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+ return 0;
+}
+
+static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
+ struct ib_recv_wr *wr, u8 *len16)
+{
+ int i;
+ int plen = 0;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ if ((plen + wr->sg_list[i].length) < plen)
+ return -EMSGSIZE;
+ plen += wr->sg_list[i].length;
+ wqe->recv.isgl.sge[i].stag =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ wqe->recv.isgl.sge[i].len =
+ cpu_to_be32(wr->sg_list[i].length);
+ wqe->recv.isgl.sge[i].to =
+ cpu_to_be64(wr->sg_list[i].addr);
+ }
+ for (; i < T4_MAX_RECV_SGE; i++) {
+ wqe->recv.isgl.sge[i].stag = 0;
+ wqe->recv.isgl.sge[i].len = 0;
+ wqe->recv.isgl.sge[i].to = 0;
+ }
+ wqe->recv.isgl.op = FW_RI_DATA_ISGL;
+ wqe->recv.isgl.r1 = 0;
+ wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
+ wqe->recv.isgl.r2 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->recv +
+ wr->num_sge * sizeof(struct fw_ri_sge), 16);
+ return 0;
+}
+
+static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+
+ struct fw_ri_immd *imdp;
+ __be64 *p;
+ int i;
+ int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+
+ if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
+ return -EINVAL;
+
+ wqe->fr.qpbinde_to_dcacpu = 0;
+ wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
+ wqe->fr.addr_type = FW_RI_VA_BASED_TO;
+ wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
+ wqe->fr.len_hi = 0;
+ wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
+ wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
+ wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
+ wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
+ 0xffffffff);
+ if (pbllen > T4_MAX_FR_IMMD) {
+ struct c4iw_fr_page_list *c4pl =
+ to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ struct fw_ri_dsgl *sglp;
+
+ sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ sglp->op = FW_RI_DATA_DSGL;
+ sglp->r1 = 0;
+ sglp->nsge = cpu_to_be16(1);
+ sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ sglp->len0 = cpu_to_be32(pbllen);
+
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
+ } else {
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
+ *p = cpu_to_be64(
+ (u64)wr->wr.fast_reg.page_list->page_list[i]);
+ *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
+ 16);
+ }
+ return 0;
+}
+
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
+ u8 *len16)
+{
+ wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+ wqe->inv.r2 = 0;
+ *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
+ return 0;
+}
+
+void c4iw_qp_add_ref(struct ib_qp *qp)
+{
+ PDBG("%s ib_qp %p\n", __func__, qp);
+ atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+}
+
+void c4iw_qp_rem_ref(struct ib_qp *qp)
+{
+ PDBG("%s ib_qp %p\n", __func__, qp);
+ if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
+ wake_up(&(to_c4iw_qp(qp)->wait));
+}
+
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ int err = 0;
+ u8 len16 = 0;
+ enum fw_wr_opcodes fw_opcode = 0;
+ enum fw_ri_wr_flags fw_flags;
+ struct c4iw_qp *qhp;
+ union t4_wr *wqe;
+ u32 num_wrs;
+ struct t4_swsqe *swsqe;
+ unsigned long flag;
+ u16 idx = 0;
+
+ qhp = to_c4iw_qp(ibqp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (t4_wq_in_error(&qhp->wq)) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -EINVAL;
+ }
+ num_wrs = t4_sq_avail(&qhp->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (num_wrs == 0) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
+ fw_flags = 0;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ fw_flags |= FW_RI_COMPLETION_FLAG;
+ swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_SEND:
+ if (wr->send_flags & IB_SEND_FENCE)
+ fw_flags |= FW_RI_READ_FENCE_FLAG;
+ fw_opcode = FW_RI_SEND_WR;
+ if (wr->opcode == IB_WR_SEND)
+ swsqe->opcode = FW_RI_SEND;
+ else
+ swsqe->opcode = FW_RI_SEND_WITH_INV;
+ err = build_rdma_send(wqe, wr, &len16);
+ break;
+ case IB_WR_RDMA_WRITE:
+ fw_opcode = FW_RI_RDMA_WRITE_WR;
+ swsqe->opcode = FW_RI_RDMA_WRITE;
+ err = build_rdma_write(wqe, wr, &len16);
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ fw_opcode = FW_RI_RDMA_READ_WR;
+ swsqe->opcode = FW_RI_READ_REQ;
+ if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+ fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
+ else
+ fw_flags = 0;
+ err = build_rdma_read(wqe, wr, &len16);
+ if (err)
+ break;
+ swsqe->read_len = wr->sg_list[0].length;
+ if (!qhp->wq.sq.oldest_read)
+ qhp->wq.sq.oldest_read = swsqe;
+ break;
+ case IB_WR_FAST_REG_MR:
+ fw_opcode = FW_RI_FR_NSMR_WR;
+ swsqe->opcode = FW_RI_FAST_REGISTER;
+ err = build_fastreg(wqe, wr, &len16);
+ break;
+ case IB_WR_LOCAL_INV:
+ if (wr->send_flags & IB_SEND_FENCE)
+ fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
+ fw_opcode = FW_RI_INV_LSTAG_WR;
+ swsqe->opcode = FW_RI_LOCAL_INV;
+ err = build_inv_stag(wqe, wr, &len16);
+ break;
+ default:
+ PDBG("%s post of type=%d TBD!\n", __func__,
+ wr->opcode);
+ err = -EINVAL;
+ }
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+ swsqe->idx = qhp->wq.sq.pidx;
+ swsqe->complete = 0;
+ swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
+ swsqe->wr_id = wr->wr_id;
+
+ init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
+
+ PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
+ __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
+ swsqe->opcode, swsqe->read_len);
+ wr = wr->next;
+ num_wrs--;
+ t4_sq_produce(&qhp->wq);
+ idx++;
+ }
+ if (t4_wq_db_enabled(&qhp->wq))
+ t4_ring_sq_db(&qhp->wq, idx);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return err;
+}
+
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ struct c4iw_qp *qhp;
+ union t4_recv_wr *wqe;
+ u32 num_wrs;
+ u8 len16 = 0;
+ unsigned long flag;
+ u16 idx = 0;
+
+ qhp = to_c4iw_qp(ibqp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (t4_wq_in_error(&qhp->wq)) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -EINVAL;
+ }
+ num_wrs = t4_rq_avail(&qhp->wq);
+ if (num_wrs == 0) {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return -ENOMEM;
+ }
+ while (wr) {
+ if (wr->num_sge > T4_MAX_RECV_SGE) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+ wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
+ if (num_wrs)
+ err = build_rdma_recv(qhp, wqe, wr, &len16);
+ else
+ err = -ENOMEM;
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
+
+ wqe->recv.opcode = FW_RI_RECV_WR;
+ wqe->recv.r1 = 0;
+ wqe->recv.wrid = qhp->wq.rq.pidx;
+ wqe->recv.r2[0] = 0;
+ wqe->recv.r2[1] = 0;
+ wqe->recv.r2[2] = 0;
+ wqe->recv.len16 = len16;
+ if (len16 < 5)
+ wqe->flits[8] = 0;
+
+ PDBG("%s cookie 0x%llx pidx %u\n", __func__,
+ (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
+ t4_rq_produce(&qhp->wq);
+ wr = wr->next;
+ num_wrs--;
+ idx++;
+ }
+ if (t4_wq_db_enabled(&qhp->wq))
+ t4_ring_rq_db(&qhp->wq, idx);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ return err;
+}
+
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
+{
+ return -ENOSYS;
+}
+
+static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
+ u8 *ecode)
+{
+ int status;
+ int tagged;
+ int opcode;
+ int rqtype;
+ int send_inv;
+
+ if (!err_cqe) {
+ *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ return;
+ }
+
+ status = CQE_STATUS(err_cqe);
+ opcode = CQE_OPCODE(err_cqe);
+ rqtype = RQ_TYPE(err_cqe);
+ send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
+ (opcode == FW_RI_SEND_WITH_SE_INV);
+ tagged = (opcode == FW_RI_RDMA_WRITE) ||
+ (rqtype && (opcode == FW_RI_READ_RESP));
+
+ switch (status) {
+ case T4_ERR_STAG:
+ if (send_inv) {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_CANT_INV_STAG;
+ } else {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_INV_STAG;
+ }
+ break;
+ case T4_ERR_PDID:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ if ((opcode == FW_RI_SEND_WITH_INV) ||
+ (opcode == FW_RI_SEND_WITH_SE_INV))
+ *ecode = RDMAP_CANT_INV_STAG;
+ else
+ *ecode = RDMAP_STAG_NOT_ASSOC;
+ break;
+ case T4_ERR_QPID:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_STAG_NOT_ASSOC;
+ break;
+ case T4_ERR_ACCESS:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_ACC_VIOL;
+ break;
+ case T4_ERR_WRAP:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_TO_WRAP;
+ break;
+ case T4_ERR_BOUND:
+ if (tagged) {
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_BASE_BOUNDS;
+ } else {
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+ *ecode = RDMAP_BASE_BOUNDS;
+ }
+ break;
+ case T4_ERR_INVALIDATE_SHARED_MR:
+ case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_CANT_INV_STAG;
+ break;
+ case T4_ERR_ECC:
+ case T4_ERR_ECC_PSTAG:
+ case T4_ERR_INTERNAL_ERR:
+ *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ case T4_ERR_OUT_OF_RQE:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MSN_NOBUF;
+ break;
+ case T4_ERR_PBL_ADDR_BOUND:
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_BASE_BOUNDS;
+ break;
+ case T4_ERR_CRC:
+ *layer_type = LAYER_MPA|DDP_LLP;
+ *ecode = MPA_CRC_ERR;
+ break;
+ case T4_ERR_MARKER:
+ *layer_type = LAYER_MPA|DDP_LLP;
+ *ecode = MPA_MARKER_ERR;
+ break;
+ case T4_ERR_PDU_LEN_ERR:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_MSG_TOOBIG;
+ break;
+ case T4_ERR_DDP_VERSION:
+ if (tagged) {
+ *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+ *ecode = DDPT_INV_VERS;
+ } else {
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_VERS;
+ }
+ break;
+ case T4_ERR_RDMA_VERSION:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_INV_VERS;
+ break;
+ case T4_ERR_OPCODE:
+ *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+ *ecode = RDMAP_INV_OPCODE;
+ break;
+ case T4_ERR_DDP_QUEUE_NUM:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_QN;
+ break;
+ case T4_ERR_MSN:
+ case T4_ERR_MSN_GAP:
+ case T4_ERR_MSN_RANGE:
+ case T4_ERR_IRD_OVERFLOW:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MSN_RANGE;
+ break;
+ case T4_ERR_TBIT:
+ *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ case T4_ERR_MO:
+ *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+ *ecode = DDPU_INV_MO;
+ break;
+ default:
+ *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+ *ecode = 0;
+ break;
+ }
+}
+
+int c4iw_post_zb_read(struct c4iw_qp *qhp)
+{
+ union t4_wr *wqe;
+ struct sk_buff *skb;
+ u8 len16;
+
+ PDBG("%s enter\n", __func__);
+ skb = alloc_skb(40, GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
+ memset(wqe, 0, sizeof wqe->read);
+ wqe->read.r2 = cpu_to_be64(0);
+ wqe->read.stag_sink = cpu_to_be32(1);
+ wqe->read.to_sink_hi = cpu_to_be32(0);
+ wqe->read.to_sink_lo = cpu_to_be32(1);
+ wqe->read.stag_src = cpu_to_be32(1);
+ wqe->read.plen = cpu_to_be32(0);
+ wqe->read.to_src_hi = cpu_to_be32(0);
+ wqe->read.to_src_lo = cpu_to_be32(1);
+ len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+ init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
+
+ return c4iw_ofld_send(&qhp->rhp->rdev, skb);
+}
+
+static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
+ gfp_t gfp)
+{
+ struct fw_ri_wr *wqe;
+ struct sk_buff *skb;
+ struct terminate_message *term;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, gfp);
+ if (!skb)
+ return;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+ wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
+ wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
+ term = (struct terminate_message *)wqe->u.terminate.termmsg;
+ build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
+ c4iw_ofld_send(&qhp->rhp->rdev, skb);
+}
+
+/*
+ * Assumes qhp lock is held.
+ */
+static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ struct c4iw_cq *schp, unsigned long *flag)
+{
+ int count;
+ int flushed;
+
+ PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+ /* take a ref on the qhp since we must release the lock */
+ atomic_inc(&qhp->refcnt);
+ spin_unlock_irqrestore(&qhp->lock, *flag);
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&rchp->lock, *flag);
+ spin_lock(&qhp->lock);
+ c4iw_flush_hw_cq(&rchp->cq);
+ c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+ flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&rchp->lock, *flag);
+ if (flushed)
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+
+ /* locking heirarchy: cq lock first, then qp lock. */
+ spin_lock_irqsave(&schp->lock, *flag);
+ spin_lock(&qhp->lock);
+ c4iw_flush_hw_cq(&schp->cq);
+ c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
+ flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&schp->lock, *flag);
+ if (flushed)
+ (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+
+ /* deref */
+ if (atomic_dec_and_test(&qhp->refcnt))
+ wake_up(&qhp->wait);
+
+ spin_lock_irqsave(&qhp->lock, *flag);
+}
+
+static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
+{
+ struct c4iw_cq *rchp, *schp;
+
+ rchp = get_chp(qhp->rhp, qhp->attr.rcq);
+ schp = get_chp(qhp->rhp, qhp->attr.scq);
+
+ if (qhp->ibqp.uobject) {
+ t4_set_wq_in_error(&qhp->wq);
+ t4_set_cq_in_error(&rchp->cq);
+ if (schp != rchp)
+ t4_set_cq_in_error(&schp->cq);
+ return;
+ }
+ __flush_qp(qhp, rchp, schp, flag);
+}
+
+static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+{
+ struct fw_ri_wr *wqe;
+ int ret;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(
+ FW_WR_OP(FW_RI_INIT_WR) |
+ FW_WR_COMPL(1));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ wqe->cookie = (u64)&wr_wait;
+
+ wqe->u.fini.type = FW_RI_TYPE_FINI;
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(&rhp->rdev, skb);
+ if (ret)
+ goto out;
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rhp->rdev.lldi.pdev));
+ rhp->rdev.flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else {
+ ret = wr_wait.ret;
+ if (ret)
+ printk(KERN_WARNING MOD
+ "%s: Abnormal close qpid %d ret %u\n",
+ pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
+ ret);
+ }
+out:
+ PDBG("%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
+{
+ memset(&init->u, 0, sizeof init->u);
+ switch (p2p_type) {
+ case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
+ init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
+ init->u.write.stag_sink = cpu_to_be32(1);
+ init->u.write.to_sink = cpu_to_be64(1);
+ init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+ init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
+ sizeof(struct fw_ri_immd),
+ 16);
+ break;
+ case FW_RI_INIT_P2PTYPE_READ_REQ:
+ init->u.write.opcode = FW_RI_RDMA_READ_WR;
+ init->u.read.stag_src = cpu_to_be32(1);
+ init->u.read.to_src_lo = cpu_to_be32(1);
+ init->u.read.stag_sink = cpu_to_be32(1);
+ init->u.read.to_sink_lo = cpu_to_be32(1);
+ init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
+ break;
+ }
+}
+
+static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+{
+ struct fw_ri_wr *wqe;
+ int ret;
+ struct c4iw_wr_wait wr_wait;
+ struct sk_buff *skb;
+
+ PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ qhp->ep->hwtid);
+
+ skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
+
+ wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
+ memset(wqe, 0, sizeof *wqe);
+ wqe->op_compl = cpu_to_be32(
+ FW_WR_OP(FW_RI_INIT_WR) |
+ FW_WR_COMPL(1));
+ wqe->flowid_len16 = cpu_to_be32(
+ FW_WR_FLOWID(qhp->ep->hwtid) |
+ FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+ wqe->cookie = (u64)&wr_wait;
+
+ wqe->u.init.type = FW_RI_TYPE_INIT;
+ wqe->u.init.mpareqbit_p2ptype =
+ V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
+ V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
+ wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
+ if (qhp->attr.mpa_attr.recv_marker_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
+ if (qhp->attr.mpa_attr.xmit_marker_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
+ if (qhp->attr.mpa_attr.crc_enabled)
+ wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
+
+ wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
+ FW_RI_QP_RDMA_WRITE_ENABLE |
+ FW_RI_QP_BIND_ENABLE;
+ if (!qhp->ibqp.uobject)
+ wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
+ FW_RI_QP_STAG0_ENABLE;
+ wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
+ wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
+ wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
+ wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
+ wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+ wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
+ wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
+ wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
+ wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
+ wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
+ wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
+ wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
+ wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
+ rhp->rdev.lldi.vr->rq.start);
+ if (qhp->attr.mpa_attr.initiator)
+ build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
+
+ c4iw_init_wr_wait(&wr_wait);
+ ret = c4iw_ofld_send(&rhp->rdev, skb);
+ if (ret)
+ goto out;
+
+ wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
+ if (!wr_wait.done) {
+ printk(KERN_ERR MOD "Device %s not responding!\n",
+ pci_name(rhp->rdev.lldi.pdev));
+ rhp->rdev.flags = T4_FATAL_ERROR;
+ ret = -EIO;
+ } else
+ ret = wr_wait.ret;
+out:
+ PDBG("%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+ enum c4iw_qp_attr_mask mask,
+ struct c4iw_qp_attributes *attrs,
+ int internal)
+{
+ int ret = 0;
+ struct c4iw_qp_attributes newattr = qhp->attr;
+ unsigned long flag;
+ int disconnect = 0;
+ int terminate = 0;
+ int abort = 0;
+ int free = 0;
+ struct c4iw_ep *ep = NULL;
+
+ PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
+ qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
+ (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
+
+ spin_lock_irqsave(&qhp->lock, flag);
+
+ /* Process attr changes if in IDLE */
+ if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
+ if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
+ ret = -EIO;
+ goto out;
+ }
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
+ newattr.enable_rdma_read = attrs->enable_rdma_read;
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
+ newattr.enable_rdma_write = attrs->enable_rdma_write;
+ if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
+ newattr.enable_bind = attrs->enable_bind;
+ if (mask & C4IW_QP_ATTR_MAX_ORD) {
+ if (attrs->max_ord > c4iw_max_read_depth) {
+ ret = -EINVAL;
+ goto out;
+ }
+ newattr.max_ord = attrs->max_ord;
+ }
+ if (mask & C4IW_QP_ATTR_MAX_IRD) {
+ if (attrs->max_ird > c4iw_max_read_depth) {
+ ret = -EINVAL;
+ goto out;
+ }
+ newattr.max_ird = attrs->max_ird;
+ }
+ qhp->attr = newattr;
+ }
+
+ if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
+ goto out;
+ if (qhp->attr.state == attrs->next_state)
+ goto out;
+
+ switch (qhp->attr.state) {
+ case C4IW_QP_STATE_IDLE:
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_RTS:
+ if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qhp->attr.mpa_attr = attrs->mpa_attr;
+ qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
+ qhp->ep = qhp->attr.llp_stream_handle;
+ qhp->attr.state = C4IW_QP_STATE_RTS;
+
+ /*
+ * Ref the endpoint here and deref when we
+ * disassociate the endpoint from the QP. This
+ * happens in CLOSING->IDLE transition or *->ERROR
+ * transition.
+ */
+ c4iw_get_ep(&qhp->ep->com);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ret = rdma_init(rhp, qhp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (ret)
+ goto err;
+ break;
+ case C4IW_QP_STATE_ERROR:
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ flush_qp(qhp, &flag);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case C4IW_QP_STATE_RTS:
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_CLOSING:
+ BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+ qhp->attr.state = C4IW_QP_STATE_CLOSING;
+ if (!internal) {
+ abort = 0;
+ disconnect = 1;
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ }
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ret = rdma_fini(rhp, qhp);
+ spin_lock_irqsave(&qhp->lock, flag);
+ if (ret) {
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ disconnect = abort = 1;
+ goto err;
+ }
+ break;
+ case C4IW_QP_STATE_TERMINATE:
+ qhp->attr.state = C4IW_QP_STATE_TERMINATE;
+ if (qhp->ibqp.uobject)
+ t4_set_wq_in_error(&qhp->wq);
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ terminate = 1;
+ disconnect = 1;
+ break;
+ case C4IW_QP_STATE_ERROR:
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ if (!internal) {
+ abort = 1;
+ disconnect = 1;
+ ep = qhp->ep;
+ c4iw_get_ep(&ep->com);
+ }
+ goto err;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case C4IW_QP_STATE_CLOSING:
+ if (!internal) {
+ ret = -EINVAL;
+ goto out;
+ }
+ switch (attrs->next_state) {
+ case C4IW_QP_STATE_IDLE:
+ flush_qp(qhp, &flag);
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ qhp->attr.llp_stream_handle = NULL;
+ c4iw_put_ep(&qhp->ep->com);
+ qhp->ep = NULL;
+ wake_up(&qhp->wait);
+ break;
+ case C4IW_QP_STATE_ERROR:
+ goto err;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case C4IW_QP_STATE_ERROR:
+ if (attrs->next_state != C4IW_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ break;
+ case C4IW_QP_STATE_TERMINATE:
+ if (!internal) {
+ ret = -EINVAL;
+ goto out;
+ }
+ goto err;
+ break;
+ default:
+ printk(KERN_ERR "%s in a bad state %d\n",
+ __func__, qhp->attr.state);
+ ret = -EINVAL;
+ goto err;
+ break;
+ }
+ goto out;
+err:
+ PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
+ qhp->wq.sq.qid);
+
+ /* disassociate the LLP connection */
+ qhp->attr.llp_stream_handle = NULL;
+ ep = qhp->ep;
+ qhp->ep = NULL;
+ qhp->attr.state = C4IW_QP_STATE_ERROR;
+ free = 1;
+ wake_up(&qhp->wait);
+ BUG_ON(!ep);
+ flush_qp(qhp, &flag);
+out:
+ spin_unlock_irqrestore(&qhp->lock, flag);
+
+ if (terminate)
+ post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
+
+ /*
+ * If disconnect is 1, then we need to initiate a disconnect
+ * on the EP. This can be a normal close (RTS->CLOSING) or
+ * an abnormal close (RTS/CLOSING->ERROR).
+ */
+ if (disconnect) {
+ c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
+ GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
+ }
+
+ /*
+ * If free is 1, then we've disassociated the EP from the QP
+ * and we need to dereference the EP.
+ */
+ if (free)
+ c4iw_put_ep(&ep->com);
+
+ PDBG("%s exit state %d\n", __func__, qhp->attr.state);
+ return ret;
+}
+
+int c4iw_destroy_qp(struct ib_qp *ib_qp)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ struct c4iw_qp_attributes attrs;
+ struct c4iw_ucontext *ucontext;
+
+ qhp = to_c4iw_qp(ib_qp);
+ rhp = qhp->rhp;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ wait_event(qhp->wait, !qhp->ep);
+
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+ atomic_dec(&qhp->refcnt);
+ wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
+
+ ucontext = ib_qp->uobject ?
+ to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+ PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
+ kfree(qhp);
+ return 0;
+}
+
+struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ struct c4iw_pd *php;
+ struct c4iw_cq *schp;
+ struct c4iw_cq *rchp;
+ struct c4iw_create_qp_resp uresp;
+ int sqsize, rqsize;
+ struct c4iw_ucontext *ucontext;
+ int ret;
+ struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+
+ PDBG("%s ib_pd %p\n", __func__, pd);
+
+ if (attrs->qp_type != IB_QPT_RC)
+ return ERR_PTR(-EINVAL);
+
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+ schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
+ rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
+ if (!schp || !rchp)
+ return ERR_PTR(-EINVAL);
+
+ if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
+ return ERR_PTR(-EINVAL);
+
+ rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
+ if (rqsize > T4_MAX_RQ_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
+ if (sqsize > T4_MAX_SQ_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+
+
+ qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
+ if (!qhp)
+ return ERR_PTR(-ENOMEM);
+ qhp->wq.sq.size = sqsize;
+ qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+ qhp->wq.rq.size = rqsize;
+ qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
+
+ if (ucontext) {
+ qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
+ qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
+ }
+
+ PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
+ __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
+
+ ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ if (ret)
+ goto err1;
+
+ attrs->cap.max_recv_wr = rqsize - 1;
+ attrs->cap.max_send_wr = sqsize - 1;
+ attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
+
+ qhp->rhp = rhp;
+ qhp->attr.pd = php->pdid;
+ qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
+ qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
+ qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
+ qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
+ qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
+ qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
+ qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+ qhp->attr.state = C4IW_QP_STATE_IDLE;
+ qhp->attr.next_state = C4IW_QP_STATE_IDLE;
+ qhp->attr.enable_rdma_read = 1;
+ qhp->attr.enable_rdma_write = 1;
+ qhp->attr.enable_bind = 1;
+ qhp->attr.max_ord = 1;
+ qhp->attr.max_ird = 1;
+ spin_lock_init(&qhp->lock);
+ init_waitqueue_head(&qhp->wait);
+ atomic_set(&qhp->refcnt, 1);
+
+ ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+ if (ret)
+ goto err2;
+
+ if (udata) {
+ mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
+ if (!mm1) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+ mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+ if (!mm2) {
+ ret = -ENOMEM;
+ goto err4;
+ }
+ mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
+ if (!mm3) {
+ ret = -ENOMEM;
+ goto err5;
+ }
+ mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
+ if (!mm4) {
+ ret = -ENOMEM;
+ goto err6;
+ }
+
+ uresp.qid_mask = rhp->rdev.qpmask;
+ uresp.sqid = qhp->wq.sq.qid;
+ uresp.sq_size = qhp->wq.sq.size;
+ uresp.sq_memsize = qhp->wq.sq.memsize;
+ uresp.rqid = qhp->wq.rq.qid;
+ uresp.rq_size = qhp->wq.rq.size;
+ uresp.rq_memsize = qhp->wq.rq.memsize;
+ spin_lock(&ucontext->mmap_lock);
+ uresp.sq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.rq_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.sq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ uresp.rq_db_gts_key = ucontext->key;
+ ucontext->key += PAGE_SIZE;
+ spin_unlock(&ucontext->mmap_lock);
+ ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ if (ret)
+ goto err7;
+ mm1->key = uresp.sq_key;
+ mm1->addr = virt_to_phys(qhp->wq.sq.queue);
+ mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_mmap(ucontext, mm1);
+ mm2->key = uresp.rq_key;
+ mm2->addr = virt_to_phys(qhp->wq.rq.queue);
+ mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_mmap(ucontext, mm2);
+ mm3->key = uresp.sq_db_gts_key;
+ mm3->addr = qhp->wq.sq.udb;
+ mm3->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm3);
+ mm4->key = uresp.rq_db_gts_key;
+ mm4->addr = qhp->wq.rq.udb;
+ mm4->len = PAGE_SIZE;
+ insert_mmap(ucontext, mm4);
+ }
+ qhp->ibqp.qp_num = qhp->wq.sq.qid;
+ init_timer(&(qhp->timer));
+ PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
+ __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
+ qhp->wq.sq.qid);
+ return &qhp->ibqp;
+err7:
+ kfree(mm4);
+err6:
+ kfree(mm3);
+err5:
+ kfree(mm2);
+err4:
+ kfree(mm1);
+err3:
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+err2:
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+ kfree(qhp);
+ return ERR_PTR(ret);
+}
+
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct c4iw_dev *rhp;
+ struct c4iw_qp *qhp;
+ enum c4iw_qp_attr_mask mask = 0;
+ struct c4iw_qp_attributes attrs;
+
+ PDBG("%s ib_qp %p\n", __func__, ibqp);
+
+ /* iwarp does not support the RTR state */
+ if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
+ attr_mask &= ~IB_QP_STATE;
+
+ /* Make sure we still have something left to do */
+ if (!attr_mask)
+ return 0;
+
+ memset(&attrs, 0, sizeof attrs);
+ qhp = to_c4iw_qp(ibqp);
+ rhp = qhp->rhp;
+
+ attrs.next_state = c4iw_convert_state(attr->qp_state);
+ attrs.enable_rdma_read = (attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ attrs.enable_rdma_write = (attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
+
+
+ mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
+ mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
+ (C4IW_QP_ATTR_ENABLE_RDMA_READ |
+ C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+ C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
+
+ return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
+}
+
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
+{
+ PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+ return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
+}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
new file mode 100644
index 000000000000..fb195d1d9015
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* Crude resource management */
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/genalloc.h>
+#include "iw_cxgb4.h"
+
+#define RANDOM_SIZE 16
+
+static int __c4iw_init_resource_fifo(struct kfifo *fifo,
+ spinlock_t *fifo_lock,
+ u32 nr, u32 skip_low,
+ u32 skip_high,
+ int random)
+{
+ u32 i, j, entry = 0, idx;
+ u32 random_bytes;
+ u32 rarray[16];
+ spin_lock_init(fifo_lock);
+
+ if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = 0; i < skip_low + skip_high; i++)
+ kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
+ if (random) {
+ j = 0;
+ random_bytes = random32();
+ for (i = 0; i < RANDOM_SIZE; i++)
+ rarray[i] = i + skip_low;
+ for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
+ if (j >= RANDOM_SIZE) {
+ j = 0;
+ random_bytes = random32();
+ }
+ idx = (random_bytes >> (j * 2)) & 0xF;
+ kfifo_in(fifo,
+ (unsigned char *) &rarray[idx],
+ sizeof(u32));
+ rarray[idx] = i;
+ j++;
+ }
+ for (i = 0; i < RANDOM_SIZE; i++)
+ kfifo_in(fifo,
+ (unsigned char *) &rarray[i],
+ sizeof(u32));
+ } else
+ for (i = skip_low; i < nr - skip_high; i++)
+ kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
+
+ for (i = 0; i < skip_low + skip_high; i++)
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry,
+ sizeof(u32), fifo_lock))
+ break;
+ return 0;
+}
+
+static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
+ u32 nr, u32 skip_low, u32 skip_high)
+{
+ return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
+ skip_high, 0);
+}
+
+static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
+ spinlock_t *fifo_lock,
+ u32 nr, u32 skip_low, u32 skip_high)
+{
+ return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
+ skip_high, 1);
+}
+
+static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
+{
+ u32 i;
+
+ spin_lock_init(&rdev->resource.qid_fifo_lock);
+
+ if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
+ GFP_KERNEL))
+ return -ENOMEM;
+
+ for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
+ if (!(i & rdev->qpmask))
+ kfifo_in(&rdev->resource.qid_fifo,
+ (unsigned char *) &i, sizeof(u32));
+ return 0;
+}
+
+/* nr_* must be power of 2 */
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+{
+ int err = 0;
+ err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
+ &rdev->resource.tpt_fifo_lock,
+ nr_tpt, 1, 0);
+ if (err)
+ goto tpt_err;
+ err = c4iw_init_qid_fifo(rdev);
+ if (err)
+ goto qid_err;
+ err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
+ &rdev->resource.pdid_fifo_lock,
+ nr_pdid, 1, 0);
+ if (err)
+ goto pdid_err;
+ return 0;
+pdid_err:
+ kfifo_free(&rdev->resource.qid_fifo);
+qid_err:
+ kfifo_free(&rdev->resource.tpt_fifo);
+tpt_err:
+ return -ENOMEM;
+}
+
+/*
+ * returns 0 if no resource available
+ */
+u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
+{
+ u32 entry;
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
+ return entry;
+ else
+ return 0;
+}
+
+void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
+{
+ PDBG("%s entry 0x%x\n", __func__, entry);
+ kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
+}
+
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+ u32 qid;
+ int i;
+
+ mutex_lock(&uctx->lock);
+ if (!list_empty(&uctx->cqids)) {
+ entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
+ entry);
+ list_del(&entry->entry);
+ qid = entry->qid;
+ kfree(entry);
+ } else {
+ qid = c4iw_get_resource(&rdev->resource.qid_fifo,
+ &rdev->resource.qid_fifo_lock);
+ if (!qid)
+ goto out;
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ }
+
+ /*
+ * now put the same ids on the qp list since they all
+ * map to the same db/gts page.
+ */
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = qid;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ }
+ }
+out:
+ mutex_unlock(&uctx->lock);
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ return qid;
+}
+
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ return;
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ entry->qid = qid;
+ mutex_lock(&uctx->lock);
+ list_add_tail(&entry->entry, &uctx->cqids);
+ mutex_unlock(&uctx->lock);
+}
+
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+ u32 qid;
+ int i;
+
+ mutex_lock(&uctx->lock);
+ if (!list_empty(&uctx->qpids)) {
+ entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
+ entry);
+ list_del(&entry->entry);
+ qid = entry->qid;
+ kfree(entry);
+ } else {
+ qid = c4iw_get_resource(&rdev->resource.qid_fifo,
+ &rdev->resource.qid_fifo_lock);
+ if (!qid)
+ goto out;
+ for (i = qid+1; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->qpids);
+ }
+
+ /*
+ * now put the same ids on the cq list since they all
+ * map to the same db/gts page.
+ */
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = qid;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ for (i = qid; i & rdev->qpmask; i++) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ goto out;
+ entry->qid = i;
+ list_add_tail(&entry->entry, &uctx->cqids);
+ }
+ }
+out:
+ mutex_unlock(&uctx->lock);
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ return qid;
+}
+
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+ struct c4iw_dev_ucontext *uctx)
+{
+ struct c4iw_qid_list *entry;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry)
+ return;
+ PDBG("%s qid 0x%x\n", __func__, qid);
+ entry->qid = qid;
+ mutex_lock(&uctx->lock);
+ list_add_tail(&entry->entry, &uctx->qpids);
+ mutex_unlock(&uctx->lock);
+}
+
+void c4iw_destroy_resource(struct c4iw_resource *rscp)
+{
+ kfifo_free(&rscp->tpt_fifo);
+ kfifo_free(&rscp->qid_fifo);
+ kfifo_free(&rscp->pdid_fifo);
+}
+
+/*
+ * PBL Memory Manager. Uses Linux generic allocator.
+ */
+
+#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
+
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ return (u32)addr;
+}
+
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+ gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+}
+
+int c4iw_pblpool_create(struct c4iw_rdev *rdev)
+{
+ unsigned pbl_start, pbl_chunk, pbl_top;
+
+ rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
+ if (!rdev->pbl_pool)
+ return -ENOMEM;
+
+ pbl_start = rdev->lldi.vr->pbl.start;
+ pbl_chunk = rdev->lldi.vr->pbl.size;
+ pbl_top = pbl_start + pbl_chunk;
+
+ while (pbl_start < pbl_top) {
+ pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
+ if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
+ PDBG("%s failed to add PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all PBL chunks (%x/%x)\n",
+ pbl_start,
+ pbl_top - pbl_start);
+ return 0;
+ }
+ pbl_chunk >>= 1;
+ } else {
+ PDBG("%s added PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ pbl_start += pbl_chunk;
+ }
+ }
+
+ return 0;
+}
+
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->pbl_pool);
+}
+
+/*
+ * RQT Memory Manager. Uses Linux generic allocator.
+ */
+
+#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
+
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+ unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
+ PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ return (u32)addr;
+}
+
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+ PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+ gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+}
+
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
+{
+ unsigned rqt_start, rqt_chunk, rqt_top;
+
+ rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
+ if (!rdev->rqt_pool)
+ return -ENOMEM;
+
+ rqt_start = rdev->lldi.vr->rq.start;
+ rqt_chunk = rdev->lldi.vr->rq.size;
+ rqt_top = rqt_start + rqt_chunk;
+
+ while (rqt_start < rqt_top) {
+ rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
+ if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
+ PDBG("%s failed to add RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
+ if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
+ printk(KERN_WARNING MOD
+ "Failed to add all RQT chunks (%x/%x)\n",
+ rqt_start, rqt_top - rqt_start);
+ return 0;
+ }
+ rqt_chunk >>= 1;
+ } else {
+ PDBG("%s added RQT chunk (%x/%x)\n",
+ __func__, rqt_start, rqt_chunk);
+ rqt_start += rqt_chunk;
+ }
+ }
+ return 0;
+}
+
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
+{
+ gen_pool_destroy(rdev->rqt_pool);
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
new file mode 100644
index 000000000000..1057cb96302e
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __T4_H__
+#define __T4_H__
+
+#include "t4_hw.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_ri_api.h"
+
+#define T4_QID_BASE 1024
+#define T4_MAX_QIDS 256
+#define T4_MAX_NUM_QP (1<<16)
+#define T4_MAX_NUM_CQ (1<<15)
+#define T4_MAX_NUM_PD (1<<15)
+#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
+#define T4_MAX_IQ_SIZE (65520 - 1)
+#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
+#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
+#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
+#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
+#define T4_MAX_NUM_STAG (1<<15)
+#define T4_MAX_MR_SIZE (~0ULL - 1)
+#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_FW_MAJ 0
+#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
+
+struct t4_status_page {
+ __be32 rsvd1; /* flit 0 - hw owns */
+ __be16 rsvd2;
+ __be16 qid;
+ __be16 cidx;
+ __be16 pidx;
+ u8 qp_err; /* flit 1 - sw owns */
+ u8 db_off;
+};
+
+#define T4_EQ_SIZE 64
+
+#define T4_SQ_NUM_SLOTS 4
+#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
+#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
+ sizeof(struct fw_ri_rdma_write_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
+ sizeof(struct fw_ri_rdma_write_wr) - \
+ sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
+ sizeof(struct fw_ri_immd)))
+#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
+
+#define T4_RQ_NUM_SLOTS 2
+#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
+#define T4_MAX_RECV_SGE 4
+
+union t4_wr {
+ struct fw_ri_res_wr res;
+ struct fw_ri_wr ri;
+ struct fw_ri_rdma_write_wr write;
+ struct fw_ri_send_wr send;
+ struct fw_ri_rdma_read_wr read;
+ struct fw_ri_bind_mw_wr bind;
+ struct fw_ri_fr_nsmr_wr fr;
+ struct fw_ri_inv_lstag_wr inv;
+ struct t4_status_page status;
+ __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
+};
+
+union t4_recv_wr {
+ struct fw_ri_recv_wr recv;
+ struct t4_status_page status;
+ __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
+};
+
+static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
+ enum fw_wr_opcodes opcode, u8 flags, u8 len16)
+{
+ int slots_used;
+
+ wqe->send.opcode = (u8)opcode;
+ wqe->send.flags = flags;
+ wqe->send.wrid = wrid;
+ wqe->send.r1[0] = 0;
+ wqe->send.r1[1] = 0;
+ wqe->send.r1[2] = 0;
+ wqe->send.len16 = len16;
+
+ slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
+ while (slots_used < T4_SQ_NUM_SLOTS) {
+ wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
+ slots_used++;
+ }
+}
+
+/* CQE/AE status codes */
+#define T4_ERR_SUCCESS 0x0
+#define T4_ERR_STAG 0x1 /* STAG invalid: either the */
+ /* STAG is offlimt, being 0, */
+ /* or STAG_key mismatch */
+#define T4_ERR_PDID 0x2 /* PDID mismatch */
+#define T4_ERR_QPID 0x3 /* QPID mismatch */
+#define T4_ERR_ACCESS 0x4 /* Invalid access right */
+#define T4_ERR_WRAP 0x5 /* Wrap error */
+#define T4_ERR_BOUND 0x6 /* base and bounds voilation */
+#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
+ /* shared memory region */
+#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
+ /* shared memory region */
+#define T4_ERR_ECC 0x9 /* ECC error detected */
+#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
+ /* reading PSTAG for a MW */
+ /* Invalidate */
+#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
+ /* software error */
+#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
+#define T4_ERR_CRC 0x10 /* CRC error */
+#define T4_ERR_MARKER 0x11 /* Marker error */
+#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
+#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
+#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
+#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
+#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
+#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
+#define T4_ERR_MSN 0x18 /* MSN error */
+#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
+#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
+ /* or READ_REQ */
+#define T4_ERR_MSN_GAP 0x1B
+#define T4_ERR_MSN_RANGE 0x1C
+#define T4_ERR_IRD_OVERFLOW 0x1D
+#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
+ /* software error */
+#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
+ /* mismatch) */
+/*
+ * CQE defs
+ */
+struct t4_cqe {
+ __be32 header;
+ __be32 len;
+ union {
+ struct {
+ __be32 stag;
+ __be32 msn;
+ } rcqe;
+ struct {
+ u32 nada1;
+ u16 nada2;
+ u16 cidx;
+ } scqe;
+ struct {
+ __be32 wrid_hi;
+ __be32 wrid_low;
+ } gen;
+ } u;
+ __be64 reserved;
+ __be64 bits_type_ts;
+};
+
+/* macros for flit 0 of the cqe */
+
+#define S_CQE_QPID 12
+#define M_CQE_QPID 0xFFFFF
+#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
+#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
+
+#define S_CQE_SWCQE 11
+#define M_CQE_SWCQE 0x1
+#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
+#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
+
+#define S_CQE_STATUS 5
+#define M_CQE_STATUS 0x1F
+#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
+#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
+
+#define S_CQE_TYPE 4
+#define M_CQE_TYPE 0x1
+#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
+#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
+
+#define S_CQE_OPCODE 0
+#define M_CQE_OPCODE 0xF
+#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
+#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
+
+#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
+#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define SQ_TYPE(x) (CQE_TYPE((x)))
+#define RQ_TYPE(x) (!CQE_TYPE((x)))
+#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+
+#define CQE_SEND_OPCODE(x)( \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+ (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+
+#define CQE_LEN(x) (be32_to_cpu((x)->len))
+
+/* used for RQ completion processing */
+#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
+#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
+
+/* used for SQ completion processing */
+#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
+
+/* generic accessor macros */
+#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
+#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
+
+/* macros for flit 3 of the cqe */
+#define S_CQE_GENBIT 63
+#define M_CQE_GENBIT 0x1
+#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
+#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+
+#define S_CQE_OVFBIT 62
+#define M_CQE_OVFBIT 0x1
+#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+
+#define S_CQE_IQTYPE 60
+#define M_CQE_IQTYPE 0x3
+#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+
+#define M_CQE_TS 0x0fffffffffffffffULL
+#define G_CQE_TS(x) ((x) & M_CQE_TS)
+
+#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+
+struct t4_swsqe {
+ u64 wr_id;
+ struct t4_cqe cqe;
+ int read_len;
+ int opcode;
+ int complete;
+ int signaled;
+ u16 idx;
+};
+
+struct t4_sq {
+ union t4_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swsqe *sw_sq;
+ struct t4_swsqe *oldest_read;
+ u64 udb;
+ size_t memsize;
+ u32 qid;
+ u16 in_use;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+};
+
+struct t4_swrqe {
+ u64 wr_id;
+};
+
+struct t4_rq {
+ union t4_recv_wr *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_swrqe *sw_rq;
+ u64 udb;
+ size_t memsize;
+ u32 qid;
+ u32 msn;
+ u32 rqt_hwaddr;
+ u16 rqt_size;
+ u16 in_use;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+};
+
+struct t4_wq {
+ struct t4_sq sq;
+ struct t4_rq rq;
+ void __iomem *db;
+ void __iomem *gts;
+ struct c4iw_rdev *rdev;
+};
+
+static inline int t4_rqes_posted(struct t4_wq *wq)
+{
+ return wq->rq.in_use;
+}
+
+static inline int t4_rq_empty(struct t4_wq *wq)
+{
+ return wq->rq.in_use == 0;
+}
+
+static inline int t4_rq_full(struct t4_wq *wq)
+{
+ return wq->rq.in_use == (wq->rq.size - 1);
+}
+
+static inline u32 t4_rq_avail(struct t4_wq *wq)
+{
+ return wq->rq.size - 1 - wq->rq.in_use;
+}
+
+static inline void t4_rq_produce(struct t4_wq *wq)
+{
+ wq->rq.in_use++;
+ if (++wq->rq.pidx == wq->rq.size)
+ wq->rq.pidx = 0;
+}
+
+static inline void t4_rq_consume(struct t4_wq *wq)
+{
+ wq->rq.in_use--;
+ wq->rq.msn++;
+ if (++wq->rq.cidx == wq->rq.size)
+ wq->rq.cidx = 0;
+}
+
+static inline int t4_sq_empty(struct t4_wq *wq)
+{
+ return wq->sq.in_use == 0;
+}
+
+static inline int t4_sq_full(struct t4_wq *wq)
+{
+ return wq->sq.in_use == (wq->sq.size - 1);
+}
+
+static inline u32 t4_sq_avail(struct t4_wq *wq)
+{
+ return wq->sq.size - 1 - wq->sq.in_use;
+}
+
+static inline void t4_sq_produce(struct t4_wq *wq)
+{
+ wq->sq.in_use++;
+ if (++wq->sq.pidx == wq->sq.size)
+ wq->sq.pidx = 0;
+}
+
+static inline void t4_sq_consume(struct t4_wq *wq)
+{
+ wq->sq.in_use--;
+ if (++wq->sq.cidx == wq->sq.size)
+ wq->sq.cidx = 0;
+}
+
+static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
+{
+ inc *= T4_SQ_NUM_SLOTS;
+ wmb();
+ writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+}
+
+static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
+{
+ inc *= T4_RQ_NUM_SLOTS;
+ wmb();
+ writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+}
+
+static inline int t4_wq_in_error(struct t4_wq *wq)
+{
+ return wq->sq.queue[wq->sq.size].status.qp_err;
+}
+
+static inline void t4_set_wq_in_error(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.qp_err = 1;
+ wq->rq.queue[wq->rq.size].status.qp_err = 1;
+}
+
+static inline void t4_disable_wq_db(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.db_off = 1;
+ wq->rq.queue[wq->rq.size].status.db_off = 1;
+}
+
+static inline void t4_enable_wq_db(struct t4_wq *wq)
+{
+ wq->sq.queue[wq->sq.size].status.db_off = 0;
+ wq->rq.queue[wq->rq.size].status.db_off = 0;
+}
+
+static inline int t4_wq_db_enabled(struct t4_wq *wq)
+{
+ return !wq->sq.queue[wq->sq.size].status.db_off;
+}
+
+struct t4_cq {
+ struct t4_cqe *queue;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ struct t4_cqe *sw_queue;
+ void __iomem *gts;
+ struct c4iw_rdev *rdev;
+ u64 ugts;
+ size_t memsize;
+ __be64 bits_type_ts;
+ u32 cqid;
+ u16 size; /* including status page */
+ u16 cidx;
+ u16 sw_pidx;
+ u16 sw_cidx;
+ u16 sw_in_use;
+ u16 cidx_inc;
+ u8 gen;
+ u8 error;
+};
+
+static inline int t4_arm_cq(struct t4_cq *cq, int se)
+{
+ u32 val;
+
+ while (cq->cidx_inc > CIDXINC_MASK) {
+ val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
+ INGRESSQID(cq->cqid);
+ writel(val, cq->gts);
+ cq->cidx_inc -= CIDXINC_MASK;
+ }
+ val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
+ INGRESSQID(cq->cqid);
+ writel(val, cq->gts);
+ cq->cidx_inc = 0;
+ return 0;
+}
+
+static inline void t4_swcq_produce(struct t4_cq *cq)
+{
+ cq->sw_in_use++;
+ if (++cq->sw_pidx == cq->size)
+ cq->sw_pidx = 0;
+}
+
+static inline void t4_swcq_consume(struct t4_cq *cq)
+{
+ cq->sw_in_use--;
+ if (++cq->sw_cidx == cq->size)
+ cq->sw_cidx = 0;
+}
+
+static inline void t4_hwcq_consume(struct t4_cq *cq)
+{
+ cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
+ if (++cq->cidx_inc == cq->size)
+ cq->cidx_inc = 0;
+ if (++cq->cidx == cq->size) {
+ cq->cidx = 0;
+ cq->gen ^= 1;
+ }
+}
+
+static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
+{
+ return (CQE_GENBIT(cqe) == cq->gen);
+}
+
+static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+ int ret;
+ u16 prev_cidx;
+
+ if (cq->cidx == 0)
+ prev_cidx = cq->size - 1;
+ else
+ prev_cidx = cq->cidx - 1;
+
+ if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
+ ret = -EOVERFLOW;
+ cq->error = 1;
+ printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
+ *cqe = &cq->queue[cq->cidx];
+ ret = 0;
+ } else
+ ret = -ENODATA;
+ return ret;
+}
+
+static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
+{
+ if (cq->sw_in_use)
+ return &cq->sw_queue[cq->sw_cidx];
+ return NULL;
+}
+
+static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+ int ret = 0;
+
+ if (cq->error)
+ ret = -ENODATA;
+ else if (cq->sw_in_use)
+ *cqe = &cq->sw_queue[cq->sw_cidx];
+ else
+ ret = t4_next_hw_cqe(cq, cqe);
+ return ret;
+}
+
+static inline int t4_cq_in_error(struct t4_cq *cq)
+{
+ return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+}
+
+static inline void t4_set_cq_in_error(struct t4_cq *cq)
+{
+ ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+}
+#endif
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
new file mode 100644
index 000000000000..fc706bd07fae
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -0,0 +1,829 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _T4FW_RI_API_H_
+#define _T4FW_RI_API_H_
+
+#include "t4fw_api.h"
+
+enum fw_ri_wr_opcode {
+ FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
+ FW_RI_READ_REQ = 0x1,
+ FW_RI_READ_RESP = 0x2,
+ FW_RI_SEND = 0x3,
+ FW_RI_SEND_WITH_INV = 0x4,
+ FW_RI_SEND_WITH_SE = 0x5,
+ FW_RI_SEND_WITH_SE_INV = 0x6,
+ FW_RI_TERMINATE = 0x7,
+ FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
+ FW_RI_BIND_MW = 0x9,
+ FW_RI_FAST_REGISTER = 0xa,
+ FW_RI_LOCAL_INV = 0xb,
+ FW_RI_QP_MODIFY = 0xc,
+ FW_RI_BYPASS = 0xd,
+ FW_RI_RECEIVE = 0xe,
+
+ FW_RI_SGE_EC_CR_RETURN = 0xf
+};
+
+enum fw_ri_wr_flags {
+ FW_RI_COMPLETION_FLAG = 0x01,
+ FW_RI_NOTIFICATION_FLAG = 0x02,
+ FW_RI_SOLICITED_EVENT_FLAG = 0x04,
+ FW_RI_READ_FENCE_FLAG = 0x08,
+ FW_RI_LOCAL_FENCE_FLAG = 0x10,
+ FW_RI_RDMA_READ_INVALIDATE = 0x20
+};
+
+enum fw_ri_mpa_attrs {
+ FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
+ FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
+ FW_RI_MPA_CRC_ENABLE = 0x04,
+ FW_RI_MPA_IETF_ENABLE = 0x08
+};
+
+enum fw_ri_qp_caps {
+ FW_RI_QP_RDMA_READ_ENABLE = 0x01,
+ FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
+ FW_RI_QP_BIND_ENABLE = 0x04,
+ FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
+ FW_RI_QP_STAG0_ENABLE = 0x10
+};
+
+enum fw_ri_addr_type {
+ FW_RI_ZERO_BASED_TO = 0x00,
+ FW_RI_VA_BASED_TO = 0x01
+};
+
+enum fw_ri_mem_perms {
+ FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
+ FW_RI_MEM_ACCESS_REM_READ = 0x02,
+ FW_RI_MEM_ACCESS_REM = 0x03,
+ FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
+ FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
+ FW_RI_MEM_ACCESS_LOCAL = 0x0C
+};
+
+enum fw_ri_stag_type {
+ FW_RI_STAG_NSMR = 0x00,
+ FW_RI_STAG_SMR = 0x01,
+ FW_RI_STAG_MW = 0x02,
+ FW_RI_STAG_MW_RELAXED = 0x03
+};
+
+enum fw_ri_data_op {
+ FW_RI_DATA_IMMD = 0x81,
+ FW_RI_DATA_DSGL = 0x82,
+ FW_RI_DATA_ISGL = 0x83
+};
+
+enum fw_ri_sgl_depth {
+ FW_RI_SGL_DEPTH_MAX_SQ = 16,
+ FW_RI_SGL_DEPTH_MAX_RQ = 4
+};
+
+struct fw_ri_dsge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct fw_ri_dsgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 len0;
+ __be64 addr0;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_dsge_pair sge[0];
+#endif
+};
+
+struct fw_ri_sge {
+ __be32 stag;
+ __be32 len;
+ __be64 to;
+};
+
+struct fw_ri_isgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 r2;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_sge sge[0];
+#endif
+};
+
+struct fw_ri_immd {
+ __u8 op;
+ __u8 r1;
+ __be16 r2;
+ __be32 immdlen;
+#ifndef C99_NOT_SUPPORTED
+ __u8 data[0];
+#endif
+};
+
+struct fw_ri_tpte {
+ __be32 valid_to_pdid;
+ __be32 locread_to_qpid;
+ __be32 nosnoop_pbladdr;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+ __be32 dca_mwbcnt_pstag;
+ __be32 len_hi;
+};
+
+#define S_FW_RI_TPTE_VALID 31
+#define M_FW_RI_TPTE_VALID 0x1
+#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
+#define G_FW_RI_TPTE_VALID(x) \
+ (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
+#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
+
+#define S_FW_RI_TPTE_STAGKEY 23
+#define M_FW_RI_TPTE_STAGKEY 0xff
+#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
+#define G_FW_RI_TPTE_STAGKEY(x) \
+ (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
+
+#define S_FW_RI_TPTE_STAGSTATE 22
+#define M_FW_RI_TPTE_STAGSTATE 0x1
+#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
+#define G_FW_RI_TPTE_STAGSTATE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
+#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
+
+#define S_FW_RI_TPTE_STAGTYPE 20
+#define M_FW_RI_TPTE_STAGTYPE 0x3
+#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
+#define G_FW_RI_TPTE_STAGTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
+
+#define S_FW_RI_TPTE_PDID 0
+#define M_FW_RI_TPTE_PDID 0xfffff
+#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
+#define G_FW_RI_TPTE_PDID(x) \
+ (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
+
+#define S_FW_RI_TPTE_PERM 28
+#define M_FW_RI_TPTE_PERM 0xf
+#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
+#define G_FW_RI_TPTE_PERM(x) \
+ (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
+
+#define S_FW_RI_TPTE_REMINVDIS 27
+#define M_FW_RI_TPTE_REMINVDIS 0x1
+#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
+#define G_FW_RI_TPTE_REMINVDIS(x) \
+ (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
+#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
+
+#define S_FW_RI_TPTE_ADDRTYPE 26
+#define M_FW_RI_TPTE_ADDRTYPE 1
+#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
+#define G_FW_RI_TPTE_ADDRTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
+#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
+
+#define S_FW_RI_TPTE_MWBINDEN 25
+#define M_FW_RI_TPTE_MWBINDEN 0x1
+#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
+#define G_FW_RI_TPTE_MWBINDEN(x) \
+ (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
+#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
+
+#define S_FW_RI_TPTE_PS 20
+#define M_FW_RI_TPTE_PS 0x1f
+#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
+#define G_FW_RI_TPTE_PS(x) \
+ (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
+
+#define S_FW_RI_TPTE_QPID 0
+#define M_FW_RI_TPTE_QPID 0xfffff
+#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
+#define G_FW_RI_TPTE_QPID(x) \
+ (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
+
+#define S_FW_RI_TPTE_NOSNOOP 30
+#define M_FW_RI_TPTE_NOSNOOP 0x1
+#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
+#define G_FW_RI_TPTE_NOSNOOP(x) \
+ (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
+#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
+
+#define S_FW_RI_TPTE_PBLADDR 0
+#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
+#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
+#define G_FW_RI_TPTE_PBLADDR(x) \
+ (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
+
+#define S_FW_RI_TPTE_DCA 24
+#define M_FW_RI_TPTE_DCA 0x1f
+#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
+#define G_FW_RI_TPTE_DCA(x) \
+ (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
+
+#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
+#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
+#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
+ ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
+#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
+ (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
+
+enum fw_ri_res_type {
+ FW_RI_RES_TYPE_SQ,
+ FW_RI_RES_TYPE_RQ,
+ FW_RI_RES_TYPE_CQ,
+};
+
+enum fw_ri_res_op {
+ FW_RI_RES_OP_WRITE,
+ FW_RI_RES_OP_RESET,
+};
+
+struct fw_ri_res {
+ union fw_ri_restype {
+ struct fw_ri_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_ri_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_ri_res_wr {
+ __be32 op_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_res res[0];
+#endif
+};
+
+#define S_FW_RI_RES_WR_NRES 0
+#define M_FW_RI_RES_WR_NRES 0xff
+#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
+#define G_FW_RI_RES_WR_NRES(x) \
+ (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
+
+#define S_FW_RI_RES_WR_FETCHSZM 26
+#define M_FW_RI_RES_WR_FETCHSZM 0x1
+#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
+#define G_FW_RI_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
+#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGNS 25
+#define M_FW_RI_RES_WR_STATUSPGNS 0x1
+#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
+#define G_FW_RI_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
+#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGRO 24
+#define M_FW_RI_RES_WR_STATUSPGRO 0x1
+#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
+#define G_FW_RI_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
+#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_RI_RES_WR_FETCHNS 23
+#define M_FW_RI_RES_WR_FETCHNS 0x1
+#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
+#define G_FW_RI_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
+#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
+
+#define S_FW_RI_RES_WR_FETCHRO 22
+#define M_FW_RI_RES_WR_FETCHRO 0x1
+#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
+#define G_FW_RI_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
+#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
+
+#define S_FW_RI_RES_WR_HOSTFCMODE 20
+#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
+#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
+#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
+
+#define S_FW_RI_RES_WR_CPRIO 19
+#define M_FW_RI_RES_WR_CPRIO 0x1
+#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
+#define G_FW_RI_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
+#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
+
+#define S_FW_RI_RES_WR_ONCHIP 18
+#define M_FW_RI_RES_WR_ONCHIP 0x1
+#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
+#define G_FW_RI_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
+#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
+
+#define S_FW_RI_RES_WR_PCIECHN 16
+#define M_FW_RI_RES_WR_PCIECHN 0x3
+#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
+#define G_FW_RI_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
+
+#define S_FW_RI_RES_WR_IQID 0
+#define M_FW_RI_RES_WR_IQID 0xffff
+#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
+#define G_FW_RI_RES_WR_IQID(x) \
+ (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
+
+#define S_FW_RI_RES_WR_DCAEN 31
+#define M_FW_RI_RES_WR_DCAEN 0x1
+#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
+#define G_FW_RI_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
+#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
+
+#define S_FW_RI_RES_WR_DCACPU 26
+#define M_FW_RI_RES_WR_DCACPU 0x1f
+#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
+#define G_FW_RI_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
+
+#define S_FW_RI_RES_WR_FBMIN 23
+#define M_FW_RI_RES_WR_FBMIN 0x7
+#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
+#define G_FW_RI_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
+
+#define S_FW_RI_RES_WR_FBMAX 20
+#define M_FW_RI_RES_WR_FBMAX 0x7
+#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
+#define G_FW_RI_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
+#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
+#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
+#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESH 16
+#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
+#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
+
+#define S_FW_RI_RES_WR_EQSIZE 0
+#define M_FW_RI_RES_WR_EQSIZE 0xffff
+#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
+#define G_FW_RI_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
+
+#define S_FW_RI_RES_WR_IQANDST 15
+#define M_FW_RI_RES_WR_IQANDST 0x1
+#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
+#define G_FW_RI_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
+#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
+
+#define S_FW_RI_RES_WR_IQANUS 14
+#define M_FW_RI_RES_WR_IQANUS 0x1
+#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
+#define G_FW_RI_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
+#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
+
+#define S_FW_RI_RES_WR_IQANUD 12
+#define M_FW_RI_RES_WR_IQANUD 0x3
+#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
+#define G_FW_RI_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
+
+#define S_FW_RI_RES_WR_IQANDSTINDEX 0
+#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
+#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
+
+#define S_FW_RI_RES_WR_IQDROPRSS 15
+#define M_FW_RI_RES_WR_IQDROPRSS 0x1
+#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
+#define G_FW_RI_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
+#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_RI_RES_WR_IQGTSMODE 14
+#define M_FW_RI_RES_WR_IQGTSMODE 0x1
+#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
+#define G_FW_RI_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
+#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_RI_RES_WR_IQPCIECH 12
+#define M_FW_RI_RES_WR_IQPCIECH 0x3
+#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
+#define G_FW_RI_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
+
+#define S_FW_RI_RES_WR_IQDCAEN 11
+#define M_FW_RI_RES_WR_IQDCAEN 0x1
+#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
+#define G_FW_RI_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
+#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
+
+#define S_FW_RI_RES_WR_IQDCACPU 6
+#define M_FW_RI_RES_WR_IQDCACPU 0x1f
+#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
+#define G_FW_RI_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
+
+#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
+#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_RI_RES_WR_IQO 3
+#define M_FW_RI_RES_WR_IQO 0x1
+#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
+#define G_FW_RI_RES_WR_IQO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
+#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
+
+#define S_FW_RI_RES_WR_IQCPRIO 2
+#define M_FW_RI_RES_WR_IQCPRIO 0x1
+#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
+#define G_FW_RI_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
+#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
+
+#define S_FW_RI_RES_WR_IQESIZE 0
+#define M_FW_RI_RES_WR_IQESIZE 0x3
+#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
+#define G_FW_RI_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
+
+#define S_FW_RI_RES_WR_IQNS 31
+#define M_FW_RI_RES_WR_IQNS 0x1
+#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
+#define G_FW_RI_RES_WR_IQNS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
+#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
+
+#define S_FW_RI_RES_WR_IQRO 30
+#define M_FW_RI_RES_WR_IQRO 0x1
+#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
+#define G_FW_RI_RES_WR_IQRO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
+#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
+
+struct fw_ri_rdma_write_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 plen;
+ __be32 stag_sink;
+ __be64 to_sink;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+struct fw_ri_send_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 sendop_pkd;
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 r3;
+ __be64 r4;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_SEND_WR_SENDOP 0
+#define M_FW_RI_SEND_WR_SENDOP 0xf
+#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
+#define G_FW_RI_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
+
+struct fw_ri_rdma_read_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 stag_sink;
+ __be32 to_sink_hi;
+ __be32 to_sink_lo;
+ __be32 plen;
+ __be32 stag_src;
+ __be32 to_src_hi;
+ __be32 to_src_lo;
+ __be32 r5;
+};
+
+struct fw_ri_recv_wr {
+ __u8 opcode;
+ __u8 r1;
+ __u16 wrid;
+ __u8 r2[3];
+ __u8 len16;
+ struct fw_ri_isgl isgl;
+};
+
+struct fw_ri_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+#define S_FW_RI_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_BIND_MW_WR_NS 5
+#define M_FW_RI_BIND_MW_WR_NS 0x1
+#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
+#define G_FW_RI_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
+#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
+#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
+
+struct fw_ri_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_FR_NSMR_WR_NS 5
+#define M_FW_RI_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
+#define G_FW_RI_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
+#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
+
+struct fw_ri_inv_lstag_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 stag_inv;
+};
+
+enum fw_ri_type {
+ FW_RI_TYPE_INIT,
+ FW_RI_TYPE_FINI,
+ FW_RI_TYPE_TERMINATE
+};
+
+enum fw_ri_init_p2ptype {
+ FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
+ FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
+ FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
+ FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
+};
+
+struct fw_ri_wr {
+ __be32 op_compl;
+ __be32 flowid_len16;
+ __u64 cookie;
+ union fw_ri {
+ struct fw_ri_init {
+ __u8 type;
+ __u8 mpareqbit_p2ptype;
+ __u8 r4[2];
+ __u8 mpa_attrs;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 iss;
+ __be32 irs;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be64 r5;
+ union fw_ri_init_p2p {
+ struct fw_ri_rdma_write_wr write;
+ struct fw_ri_rdma_read_wr read;
+ struct fw_ri_send_wr send;
+ } u;
+ } init;
+ struct fw_ri_fini {
+ __u8 type;
+ __u8 r3[7];
+ __be64 r4;
+ } fini;
+ struct fw_ri_terminate {
+ __u8 type;
+ __u8 r3[3];
+ __be32 immdlen;
+ __u8 termmsg[40];
+ } terminate;
+ } u;
+};
+
+#define S_FW_RI_WR_MPAREQBIT 7
+#define M_FW_RI_WR_MPAREQBIT 0x1
+#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
+#define G_FW_RI_WR_MPAREQBIT(x) \
+ (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
+#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
+
+#define S_FW_RI_WR_P2PTYPE 0
+#define M_FW_RI_WR_P2PTYPE 0xf
+#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
+#define G_FW_RI_WR_P2PTYPE(x) \
+ (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:4;
+ __u8 unknown:1;
+ __u8:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8:1;
+ __u8 unknown:1;
+ __u8:4;
+#endif
+};
+
+struct cpl_pass_accept_req {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 hdr_len;
+ __be16 vlan;
+ __be16 l2info;
+ __be32 tos_stid;
+ struct tcp_options tcpopt;
+};
+
+/* cpl_pass_accept_req.hdr_len fields */
+#define S_SYN_RX_CHAN 0
+#define M_SYN_RX_CHAN 0xF
+#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
+#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
+
+#define S_TCP_HDR_LEN 10
+#define M_TCP_HDR_LEN 0x3F
+#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
+#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
+
+#define S_IP_HDR_LEN 16
+#define M_IP_HDR_LEN 0x3FF
+#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
+#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
+
+#define S_ETH_HDR_LEN 26
+#define M_ETH_HDR_LEN 0x1F
+#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
+#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+
+/* cpl_pass_accept_req.l2info fields */
+#define S_SYN_MAC_IDX 0
+#define M_SYN_MAC_IDX 0x1FF
+#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
+#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+
+#define S_SYN_XACT_MATCH 9
+#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
+#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
+
+#define S_SYN_INTF 12
+#define M_SYN_INTF 0xF
+#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
+#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+#define S_ULPTX_NSGE 0
+#define M_ULPTX_NSGE 0xFFFF
+#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
new file mode 100644
index 000000000000..ed6414abde02
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __C4IW_USER_H__
+#define __C4IW_USER_H__
+
+#define C4IW_UVERBS_ABI_VERSION 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct c4iw_create_cq_resp {
+ __u64 key;
+ __u64 gts_key;
+ __u64 memsize;
+ __u32 cqid;
+ __u32 size;
+ __u32 qid_mask;
+};
+
+struct c4iw_create_qp_resp {
+ __u64 sq_key;
+ __u64 rq_key;
+ __u64 sq_db_gts_key;
+ __u64 rq_db_gts_key;
+ __u64 sq_memsize;
+ __u64 rq_memsize;
+ __u32 sqid;
+ __u32 rqid;
+ __u32 sq_size;
+ __u32 rq_size;
+ __u32 qid_mask;
+};
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae552cafb..e571e60ecb88 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
if (!create_comp_task(pool, cpu)) {
ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
break;
case CPU_UP_CANCELED:
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 129a6bebd6e3..ecb51b396c42 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -291,8 +291,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
};
ehca_gen_dbg("Probing adapter %s...",
- shca->ofdev->node->full_name);
- loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
+ shca->ofdev->dev.of_node->full_name);
+ loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
+ NULL);
if (loc_code)
ehca_gen_dbg(" ... location lode=%s", loc_code);
@@ -720,16 +721,16 @@ static int __devinit ehca_probe(struct of_device *dev,
int ret, i, eq_size;
unsigned long flags;
- handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
+ handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
if (!handle) {
ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -ENODEV;
}
if (!(*handle)) {
ehca_gen_err("Wrong eHCA handle for adapter: %s.",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
return -ENODEV;
}
@@ -798,7 +799,7 @@ static int __devinit ehca_probe(struct of_device *dev,
goto probe5;
}
- ret = ib_register_device(&shca->ib_device);
+ ret = ib_register_device(&shca->ib_device, NULL);
if (ret) {
ehca_err(&shca->ib_device,
"ib_register_device() failed ret=%i", ret);
@@ -936,12 +937,13 @@ static struct of_device_id ehca_device_table[] =
MODULE_DEVICE_TABLE(of, ehca_device_table);
static struct of_platform_driver ehca_driver = {
- .name = "ehca",
- .match_table = ehca_device_table,
.probe = ehca_probe,
.remove = ehca_remove,
- .driver = {
+ .driver = {
+ .name = "ehca",
+ .owner = THIS_MODULE,
.groups = ehca_drv_attr_groups,
+ .of_match_table = ehca_device_table,
},
};
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 3c7968f25ec2..1d9bb115cbf6 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,9 +1,11 @@
config INFINIBAND_IPATH
- tristate "QLogic InfiniPath Driver"
- depends on 64BIT && NET
+ tristate "QLogic HTX HCA support"
+ depends on 64BIT && NET && HT_IRQ
---help---
- This is a driver for QLogic InfiniPath host channel adapters,
+ This is a driver for the obsolete QLogic Hyper-Transport
+ IB host channel adapter (model QHT7140),
including InfiniBand verbs support. This driver allows these
devices to be used with both kernel upper level protocols such
as IP-over-InfiniBand as well as with userspace applications
(in conjunction with InfiniBand userspace access).
+ For QLogic PCIe QLE based cards, use the QIB driver instead.
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index bf9450061986..fa3df82681df 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -29,13 +29,9 @@ ib_ipath-y := \
ipath_user_pages.o \
ipath_user_sdma.o \
ipath_verbs_mcast.o \
- ipath_verbs.o \
- ipath_iba7220.o \
- ipath_sd7220.o \
- ipath_sd7220_img.o
+ ipath_verbs.o
ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 6302626d17f0..21337468c652 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *,
/* Only needed for registration, nothing else needs this info */
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_VENDOR_ID_QLOGIC 0x1077
#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
-#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
/* Number of seconds before our card status check... */
#define STATUS_TIMEOUT 60
static const struct pci_device_id ipath_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
- { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
{ 0, }
};
@@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
/* setup the chip-specific functions, as early as possible. */
switch (ent->device) {
case PCI_DEVICE_ID_INFINIPATH_HT:
-#ifdef CONFIG_HT_IRQ
ipath_init_iba6110_funcs(dd);
break;
-#else
- ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
- "CONFIG_HT_IRQ is not enabled\n", ent->device);
- return -ENODEV;
-#endif
- case PCI_DEVICE_ID_INFINIPATH_PE800:
-#ifdef CONFIG_PCI_MSI
- ipath_init_iba6120_funcs(dd);
- break;
-#else
- ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
- "CONFIG_PCI_MSI is not enabled\n", ent->device);
- return -ENODEV;
-#endif
- case PCI_DEVICE_ID_INFINIPATH_7220:
-#ifndef CONFIG_PCI_MSI
- ipath_dbg("CONFIG_PCI_MSI is not enabled, "
- "using INTx for unit %u\n", dd->ipath_unit);
-#endif
- ipath_init_iba7220_funcs(dd);
- break;
+
default:
ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
"failing\n", ent->device);
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 37d12e5efa49..1d7aea132a09 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1474,7 +1474,7 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
/**
* ipath_pe_put_tid - write a TID in chip
* @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
+ * @tidptr: pointer to the expected TID (in chip) to update
* @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
deleted file mode 100644
index fbf8c5379ea8..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ /dev/null
@@ -1,1862 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath PCIe chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64);
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the QLogic InfiniPath PCI-Express chip.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
- unsigned long long Revision;
- unsigned long long Control;
- unsigned long long PageAlign;
- unsigned long long PortCnt;
- unsigned long long DebugPortSelect;
- unsigned long long Reserved0;
- unsigned long long SendRegBase;
- unsigned long long UserRegBase;
- unsigned long long CounterRegBase;
- unsigned long long Scratch;
- unsigned long long Reserved1;
- unsigned long long Reserved2;
- unsigned long long IntBlocked;
- unsigned long long IntMask;
- unsigned long long IntStatus;
- unsigned long long IntClear;
- unsigned long long ErrorMask;
- unsigned long long ErrorStatus;
- unsigned long long ErrorClear;
- unsigned long long HwErrMask;
- unsigned long long HwErrStatus;
- unsigned long long HwErrClear;
- unsigned long long HwDiagCtrl;
- unsigned long long MDIO;
- unsigned long long IBCStatus;
- unsigned long long IBCCtrl;
- unsigned long long ExtStatus;
- unsigned long long ExtCtrl;
- unsigned long long GPIOOut;
- unsigned long long GPIOMask;
- unsigned long long GPIOStatus;
- unsigned long long GPIOClear;
- unsigned long long RcvCtrl;
- unsigned long long RcvBTHQP;
- unsigned long long RcvHdrSize;
- unsigned long long RcvHdrCnt;
- unsigned long long RcvHdrEntSize;
- unsigned long long RcvTIDBase;
- unsigned long long RcvTIDCnt;
- unsigned long long RcvEgrBase;
- unsigned long long RcvEgrCnt;
- unsigned long long RcvBufBase;
- unsigned long long RcvBufSize;
- unsigned long long RxIntMemBase;
- unsigned long long RxIntMemSize;
- unsigned long long RcvPartitionKey;
- unsigned long long Reserved3;
- unsigned long long RcvPktLEDCnt;
- unsigned long long Reserved4[8];
- unsigned long long SendCtrl;
- unsigned long long SendPIOBufBase;
- unsigned long long SendPIOSize;
- unsigned long long SendPIOBufCnt;
- unsigned long long SendPIOAvailAddr;
- unsigned long long TxIntMemBase;
- unsigned long long TxIntMemSize;
- unsigned long long Reserved5;
- unsigned long long PCIeRBufTestReg0;
- unsigned long long PCIeRBufTestReg1;
- unsigned long long Reserved51[6];
- unsigned long long SendBufferError;
- unsigned long long SendBufferErrorCONT1;
- unsigned long long Reserved6SBE[6];
- unsigned long long RcvHdrAddr0;
- unsigned long long RcvHdrAddr1;
- unsigned long long RcvHdrAddr2;
- unsigned long long RcvHdrAddr3;
- unsigned long long RcvHdrAddr4;
- unsigned long long Reserved7RHA[11];
- unsigned long long RcvHdrTailAddr0;
- unsigned long long RcvHdrTailAddr1;
- unsigned long long RcvHdrTailAddr2;
- unsigned long long RcvHdrTailAddr3;
- unsigned long long RcvHdrTailAddr4;
- unsigned long long Reserved8RHTA[11];
- unsigned long long Reserved9SW[8];
- unsigned long long SerdesConfig0;
- unsigned long long SerdesConfig1;
- unsigned long long SerdesStatus;
- unsigned long long XGXSConfig;
- unsigned long long IBPLLCfg;
- unsigned long long Reserved10SW2[3];
- unsigned long long PCIEQ0SerdesConfig0;
- unsigned long long PCIEQ0SerdesConfig1;
- unsigned long long PCIEQ0SerdesStatus;
- unsigned long long Reserved11;
- unsigned long long PCIEQ1SerdesConfig0;
- unsigned long long PCIEQ1SerdesConfig1;
- unsigned long long PCIEQ1SerdesStatus;
- unsigned long long Reserved12;
-};
-
-struct _infinipath_do_not_use_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 Reserved1;
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 Reserved6;
- __u64 Reserved7;
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_pe_kregs = {
- .kr_control = IPATH_KREG_OFFSET(Control),
- .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
- .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
- .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
- .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
- .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
- .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
- .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
- .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
- .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
- .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
- .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
- .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
- .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
- .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
- .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
- .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
- .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
- .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
- .kr_intclear = IPATH_KREG_OFFSET(IntClear),
- .kr_intmask = IPATH_KREG_OFFSET(IntMask),
- .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
- .kr_mdio = IPATH_KREG_OFFSET(MDIO),
- .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
- .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
- .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
- .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
- .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
- .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
- .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
- .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
- .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
- .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
- .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
- .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
- .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
- .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
- .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
- .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
- .kr_revision = IPATH_KREG_OFFSET(Revision),
- .kr_scratch = IPATH_KREG_OFFSET(Scratch),
- .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
- .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
- .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
- .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
- .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
- .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
- .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
- .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
- .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
- .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
- .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
- .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
- .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
- .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
- .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
-
- /*
- * These should not be used directly via ipath_write_kreg64(),
- * use them with ipath_write_kreg64_port(),
- */
- .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
- .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
- /* The rcvpktled register controls one of the debug port signals, so
- * a packet activity LED can be connected to it. */
- .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
- .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
- .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
- .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
- .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
- .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
- .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
- .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
- .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
-};
-
-static const struct ipath_cregs ipath_pe_cregs = {
- .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
- .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
- .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
- .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
- .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
- .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
- .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
- .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
- .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
- .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
- .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
- .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
- .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
- .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
- .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
- .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
- .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
- .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
- .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
- .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
- .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
- .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
- .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
- .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
- .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
- .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
- .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
- .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
- .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
- .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
- .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
- .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
- .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET 1U
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-
-#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6120_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET 0x5ULL
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
- (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
- (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
-#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
- ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
-#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
-#define INFINIPATH_RT_IS_VALID(tid) \
- (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
- ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
-#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
-#define INFINIPATH_RT_ADDR_SHIFT 10
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* 6120 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
- INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
- INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
- INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
- INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
- << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
- u32, unsigned long);
-
-/*
- * On platforms using this chip, and not having ordered WC stores, we
- * can get TXE parity errors due to speculative reads to the PIO buffers,
- * and this, due to a chip bug can result in (many) false parity error
- * reports. So it's a debug print on those, and an info print on systems
- * where the speculative reads don't occur.
- */
-static void ipath_pe_txe_recover(struct ipath_devdata *dd)
-{
- if (ipath_unordered_wc())
- ipath_dbg("Recovering from TXE PIO parity error\n");
- else {
- ++ipath_stats.sps_txeparity;
- dev_info(&dd->pcidev->dev,
- "Recovering from TXE PIO parity error\n");
- }
-}
-
-
-/**
- * ipath_pe_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
-{
- ipath_err_t hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char bitsmsg[64];
- int log_idx;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (!hwerrs) {
- /*
- * better than printing cofusing messages
- * This seems to be related to clearing the crc error, or
- * the pll error during init.
- */
- ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
- return;
- } else if (hwerrs == ~0ULL) {
- ipath_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
- return;
- }
- ipath_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
- hwerrs &= dd->ipath_hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
- ipath_inc_eeprom_err(dd, log_idx, 1);
-
- /*
- * make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds
- */
- if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
- RXE_EAGER_PARITY)) ||
- (ipath_debug & __IPATH_VERBDBG))
- dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
- dd->ipath_lasthwerror |= hwerrs;
-
- if (hwerrs & ~dd->ipath_hwe_bitsextant)
- ipath_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~dd->ipath_hwe_bitsextant));
-
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
- if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
- /*
- * parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue. They can
- * occur if a processor speculative read is done to the PIO
- * buffer while we are sending a packet, for example.
- */
- if (hwerrs & TXE_PIO_PARITY) {
- ipath_pe_txe_recover(dd);
- hwerrs &= ~TXE_PIO_PARITY;
- }
- if (!hwerrs) {
- static u32 freeze_cnt;
-
- freeze_cnt++;
- ipath_dbg("Clearing freezemode on ignored or recovered "
- "hardware error (%u)\n", freeze_cnt);
- ipath_clear_freeze(dd);
- }
- }
-
- *msg = '\0';
-
- if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_format_hwerrors(hwerrs,
- ipath_6120_hwerror_msgs,
- sizeof(ipath_6120_hwerror_msgs)/
- sizeof(ipath_6120_hwerror_msgs[0]),
- msg, msgl);
-
- if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
- << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
- INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
- INFINIPATH_HWE_COREPLL_RFSLIP )
-
- if (hwerrs & _IPATH_PLL_FAIL) {
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the external
- * interface is unused
- */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs) {
- /*
- * if any set that we aren't ignoring; only
- * make the complaint once, in case it's stuck
- * or recurring, and we get here multiple
- * times.
- */
- ipath_dev_err(dd, "%s hardware error\n", msg);
- if (dd->ipath_flags & IPATH_INITTED) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- ipath_setup_pe_setextled(dd,
- INFINIPATH_IBCS_L_STATE_DOWN,
- INFINIPATH_IBCS_LT_STATE_DISABLED);
- ipath_dev_err(dd, "Fatal Hardware Error (freeze "
- "mode), no longer usable, SN %.16s\n",
- dd->ipath_serial);
- isfatal = 1;
- }
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- /* mark as having had error */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- /*
- * mark as not usable, at a minimum until driver
- * is reloaded, probably until reboot, since no
- * other reset is possible.
- */
- dd->ipath_flags &= ~IPATH_INITTED;
- } else
- *msg = 0; /* recovered from all of them */
-
- if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
- /*
- * for /sys status file ; if no trailing brace is copied,
- * we'll know it was truncated.
- */
- snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
- "{%s}", msg);
- }
-}
-
-/**
- * ipath_pe_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- char *n = NULL;
- u8 boardrev = dd->ipath_boardrev;
- int ret;
-
- switch (boardrev) {
- case 0:
- n = "InfiniPath_Emulation";
- break;
- case 1:
- n = "InfiniPath_QLE7140-Bringup";
- break;
- case 2:
- n = "InfiniPath_QLE7140";
- break;
- case 3:
- n = "InfiniPath_QMI7140";
- break;
- case 4:
- n = "InfiniPath_QEM7140";
- break;
- case 5:
- n = "InfiniPath_QMH7140";
- break;
- case 6:
- n = "InfiniPath_QLE7142";
- break;
- default:
- ipath_dev_err(dd,
- "Don't yet know about board with ID %u\n",
- boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
- boardrev);
- break;
- }
- if (n)
- snprintf(name, namelen, "%s", n);
-
- if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
- ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 1;
- } else {
- ret = 0;
- if (dd->ipath_minrev >= 2)
- dd->ipath_f_put_tid = ipath_pe_put_tid_2;
- }
-
- /*
- * set here, not in ipath_init_*_funcs because we have to do
- * it after we can read chip registers.
- */
- dd->ipath_ureg_align =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
- return ret;
-}
-
-/**
- * ipath_pe_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
-{
- ipath_err_t val;
- u64 extsval;
-
- extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
- if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
- ipath_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND)
- ipath_dbg("MemBIST corrected\n");
-
- val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
-
- if (!dd->ipath_boardrev) // no PLL for Emulator
- val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
- if (dd->ipath_minrev < 2) {
- /* workaround bug 9460 in internal interface bus parity
- * checking. Fixed (HW bug 9490) in Rev2.
- */
- val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
- }
- dd->ipath_hwerrmask = val;
-}
-
-/**
- * ipath_pe_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
-{
- u64 val, config1, prev_val;
- int ret = 0;
-
- ipath_dbg("Trying to bringup serdes\n");
-
- if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
- INFINIPATH_HWE_SERDESPLLFAILED) {
- ipath_dbg("At start, serdes PLL failed bit set "
- "in hwerrstatus, clearing and continuing\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_SERDESPLLFAILED);
- }
-
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
- config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
- ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
- "xgxsconfig %llx\n", (unsigned long long) val,
- (unsigned long long) config1, (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- /*
- * Force reset on, also set rxdetect enable. Must do before reading
- * serdesstatus at least for simulation, or some of the bits in
- * serdes status will come back as undefined and cause simulation
- * failures
- */
- val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
- | INFINIPATH_SERDC0_L1PWR_DN;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(5); /* need pll reset set at least for a bit */
- /*
- * after PLL is reset, set the per-lane Resets and TxIdle and
- * clear the PLL reset and rxdetect (to get falling edge).
- * Leave L1PWR bits set (permanently)
- */
- val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
- | INFINIPATH_SERDC0_L1PWR_DN);
- val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
- ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
- "and txidle (%llx)\n", (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- /* need PLL reset clear for at least 11 usec before lane
- * resets cleared; give it a few more to be sure */
- udelay(15);
- val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
-
- ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
- "(writing %llx)\n", (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
- /* be sure chip saw it */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- prev_val = val;
- if (val & INFINIPATH_XGXS_RESET)
- val &= ~INFINIPATH_XGXS_RESET;
- if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
- INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
- /* need to compensate for Tx inversion in partner */
- val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
- INFINIPATH_XGXS_RX_POL_SHIFT);
- val |= dd->ipath_rx_pol_inv <<
- INFINIPATH_XGXS_RX_POL_SHIFT;
- }
- if (val != prev_val)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- /* clear current and de-emphasis bits */
- config1 &= ~0x0ffffffff00ULL;
- /* set current to 20ma */
- config1 |= 0x00000000000ULL;
- /* set de-emphasis to -5.68dB */
- config1 |= 0x0cccc000000ULL;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
- ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
- "config1=%llx, sstatus=%llx xgxs=%llx\n",
- (unsigned long long) val, (unsigned long long) config1,
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- return ret;
-}
-
-/**
- * ipath_pe_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
-{
- u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
- if (dd->ibsymdelta || dd->iblnkerrdelta ||
- dd->ibdeltainprog) {
- u64 diagc;
- /* enable counter writes */
- diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
- diagc | INFINIPATH_DC_COUNTERWREN);
-
- if (dd->ibsymdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->ibsymsnap;
- val -= dd->ibsymdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt, val);
- }
- if (dd->iblnkerrdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->iblnkerrsnap;
- val -= dd->iblnkerrdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
- }
-
- /* and disable counter writes */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
- }
- val |= INFINIPATH_SERDC0_TXIDLE;
- ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
- (unsigned long long) val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-static int ipath_pe_intconfig(struct ipath_devdata *dd)
-{
- u32 chiprev;
-
- /*
- * If the chip supports added error indication via GPIO pins,
- * enable interrupts on those bits so the interrupt routine
- * can count the events. Also set flag so interrupt routine
- * can know they are expected.
- */
- chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT;
- if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
- /* Rev2+ reports extra errors via internal GPIO pins */
- dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
- dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
- dd->ipath_gpio_mask);
- }
- return 0;
-}
-
-/**
- * ipath_setup_pe_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
-
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
- u64 ltst)
-{
- u64 extctl;
- unsigned long flags = 0;
-
- /* the diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running */
- if (ipath_diag_inuse)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (dd->ipath_led_override) {
- ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
- ? INFINIPATH_IBCS_LT_STATE_LINKUP
- : INFINIPATH_IBCS_LT_STATE_DISABLED;
- lst = (dd->ipath_led_override & IPATH_LED_LOG)
- ? INFINIPATH_IBCS_L_STATE_ACTIVE
- : INFINIPATH_IBCS_L_STATE_DOWN;
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
- INFINIPATH_EXTC_LED2PRIPORT_ON);
-
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
- extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
- dd->ipath_extctrl = extctl;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-/**
- * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * This is called during driver unload.
- * We do the pci_disable_msi here, not in generic code, because it
- * isn't used for the HT chips. If we do end up needing pci_enable_msi
- * at some point in the future for HT, we'll move the call back
- * into the main init_one code.
- */
-static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
-{
- dd->ipath_msi_lo = 0; /* just in case unload fails */
- pci_disable_msi(dd->pcidev);
-}
-
-static void ipath_6120_pcie_params(struct ipath_devdata *dd)
-{
- u16 linkstat, speed;
- int pos;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
- if (!pos) {
- ipath_dev_err(dd, "Can't find PCI Express capability!\n");
- goto bail;
- }
-
- pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
- &linkstat);
- /*
- * speed is bits 0-4, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->ipath_lbus_width = linkstat;
-
- switch (speed) {
- case 1:
- dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->ipath_lbus_speed = 2500;
- break;
- }
-
- if (linkstat < 8)
- ipath_dev_err(dd,
- "PCIe width %u (x8 HCA), performance reduced\n",
- linkstat);
- else
- ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
- dd->ipath_lbus_speed, linkstat);
-
- if (speed != 1)
- ipath_dev_err(dd,
- "PCIe linkspeed %u is incorrect; "
- "should be 1 (2500)!\n", speed);
-bail:
- /* fill in string, even on errors */
- snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
- "PCIe,%uMHz,x%u\n",
- dd->ipath_lbus_speed,
- dd->ipath_lbus_width);
-
- return;
-}
-
-/**
- * ipath_setup_pe_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe. If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_pe_config(struct ipath_devdata *dd,
- struct pci_dev *pdev)
-{
- int pos, ret;
-
- dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
- ret = pci_enable_msi(dd->pcidev);
- if (ret)
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "interrupts may not work\n", ret);
- /* continue even if it fails, we may still be OK... */
- dd->ipath_irq = pdev->irq;
-
- if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
- u16 control;
- pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- &dd->ipath_msi_lo);
- pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- &dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- &control);
- /* now save the data (vector) info */
- pci_read_config_word(dd->pcidev,
- pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? 12 : 8),
- &dd->ipath_msi_data);
- ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
- "0x%x, control=0x%x\n", dd->ipath_msi_data,
- pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- control);
- /* we save the cachelinesize also, although it doesn't
- * really matter */
- pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- &dd->ipath_pci_cacheline);
- } else
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't save MSI settings for reset\n");
-
- ipath_6120_pcie_params(dd);
-
- dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- dd->ipath_link_speed_supported = IPATH_IB_SDR;
- dd->ipath_link_width_enabled = IB_WIDTH_4X;
- dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
- /* these can't change for this chip, so set once */
- dd->ipath_link_width_active = dd->ipath_link_width_enabled;
- dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
- return 0;
-}
-
-static void ipath_init_pe_variables(struct ipath_devdata *dd)
-{
- /*
- * setup the register offsets, since they are different for each
- * chip
- */
- dd->ipath_kregs = &ipath_pe_kregs;
- dd->ipath_cregs = &ipath_pe_cregs;
-
- /*
- * bits for selecting i2c direction and values,
- * used for I2C serial flash
- */
- dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
- /*
- * Fill in data for field-values that change in newer chips.
- * We dynamically specify only the mask for LINKTRAININGSTATE
- * and only the shift for LINKSTATE, as they are the only ones
- * that change. Also precalculate the 3 link states of interest
- * and the combined mask.
- */
- dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
- dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
- dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
- dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
- dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
- dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
- dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
- /*
- * Fill in data for ibcc field-values that change in newer chips.
- * We dynamically specify only the mask for LINKINITCMD
- * and only the shift for LINKCMD and MAXPKTLEN, as they are
- * the only ones that change.
- */
- dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
- dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
- dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
- /* Fill in shifts for RcvCtrl. */
- dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
- dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
- dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
- dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
-
- /* variables for sanity checking interrupt and errors */
- dd->ipath_hwe_bitsextant =
- (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
- INFINIPATH_HWE_PCIE1PLLFAILED |
- INFINIPATH_HWE_PCIE0PLLFAILED |
- INFINIPATH_HWE_PCIEPOISONEDTLP |
- INFINIPATH_HWE_PCIECPLTIMEOUT |
- INFINIPATH_HWE_PCIEBUSPARITYXTLH |
- INFINIPATH_HWE_PCIEBUSPARITYXADM |
- INFINIPATH_HWE_PCIEBUSPARITYRADM |
- INFINIPATH_HWE_MEMBISTFAILED |
- INFINIPATH_HWE_COREPLL_FBSLIP |
- INFINIPATH_HWE_COREPLL_RFSLIP |
- INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
- INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
- dd->ipath_i_bitsextant =
- (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- (INFINIPATH_I_RCVAVAIL_MASK <<
- INFINIPATH_I_RCVAVAIL_SHIFT) |
- INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
- INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
- dd->ipath_e_bitsextant =
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
- INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
- INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
- INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
- INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
- INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
- INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
- INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
- INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
- INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
- INFINIPATH_E_HARDWARE;
-
- dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
- dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
- dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->ipath_eep_st_masks[0].hwerrs_to_log =
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
- /* Ignore errors in PIO/PBC on systems with unordered write-combining */
- if (ipath_unordered_wc())
- dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
-
- dd->ipath_eep_st_masks[1].hwerrs_to_log =
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
- dd->delay_mult = 2; /* SDR, 4X, can't change */
-}
-
-/* setup the MSI stuff again after a reset. I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_pe_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
- int pos;
- u16 control;
- int ret;
-
- if (!dd->ipath_msi_lo) {
- dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
- "initial setup failed?\n");
- ret = 0;
- goto bail;
- }
-
- if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't restore MSI settings\n");
- ret = 0;
- goto bail;
- }
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->ipath_msi_lo);
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
- "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
- control, control | PCI_MSI_FLAGS_ENABLE);
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->ipath_msi_data);
- /* we restore the cachelinesize also, although it doesn't really
- * matter */
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- dd->ipath_pci_cacheline);
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
- ret = 1;
-
-bail:
- return ret;
-}
-
-/* This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
-*/
-static int ipath_setup_pe_reset(struct ipath_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
-
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
- /* Use ERROR so it shows up in logs, etc. */
- ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
- /* keep chip from being accessed in a few places */
- dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
- val = dd->ipath_control | INFINIPATH_C_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
- mb();
-
- for (i = 1; i <= 5; i++) {
- int r;
- /* allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
- if ((r =
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->ipath_pcibar0)))
- ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
- r);
- if ((r =
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->ipath_pcibar1)))
- ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
- r);
- /* now re-enable memory access */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
- if ((r = pci_enable_device(dd->pcidev)))
- ipath_dev_err(dd, "pci_enable_device failed after "
- "reset: %d\n", r);
- /*
- * whether it fully enabled or not, mark as present,
- * again (but not INITTED)
- */
- dd->ipath_flags |= IPATH_PRESENT;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
- if (val == dd->ipath_revision) {
- ipath_cdbg(VERBOSE, "Got matching revision "
- "register %llx on try %d\n",
- (unsigned long long) val, i);
- ret = ipath_reinit_msi(dd);
- goto bail;
- }
- /* Probably getting -1 back */
- ipath_dbg("Didn't get expected revision register, "
- "got %llx, try %d\n", (unsigned long long) val,
- i + 1);
- }
- ret = 0; /* failed */
-
-bail:
- if (ret)
- ipath_6120_pcie_params(dd);
- return ret;
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- unsigned long flags = 0; /* keep gcc quiet */
- int tidx;
- spinlock_t *tidlockp;
-
- if (!dd->ipath_kregbase)
- return;
-
- if (pa != dd->ipath_tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- pa >>= 11;
- /* paranoia check */
- if (pa & ~INFINIPATH_RT_ADDR_MASK)
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "has bits set in 31-29\n", pa);
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
-
- /*
- * Workaround chip bug 9437 by writing the scratch register
- * before and after the TID, and with an io write barrier.
- * We use a spinlock around the writes, so they can't intermix
- * with other TID (eager or expected) writes (the chip bug
- * is triggered by back to back TID writes). Unfortunately, this
- * call can be done from interrupt level for the port 0 eager TIDs,
- * so we have to use irqsave locks.
- */
- /*
- * Assumes tidptr always > ipath_egrtidbase
- * if type == RCVHQ_RCV_TYPE_EAGER.
- */
- tidx = tidptr - dd->ipath_egrtidbase;
-
- tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
- ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
- spin_lock_irqsave(tidlockp, flags);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
- writel(pa, tidp32);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
- mmiowb();
- spin_unlock_irqrestore(tidlockp, flags);
-}
-
-/**
- * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- u32 tidx;
-
- if (!dd->ipath_kregbase)
- return;
-
- if (pa != dd->ipath_tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- pa >>= 11;
- /* paranoia check */
- if (pa & ~INFINIPATH_RT_ADDR_MASK)
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "has bits set in 31-29\n", pa);
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
- tidx = tidptr - dd->ipath_egrtidbase;
- writel(pa, tidp32);
- mmiowb();
-}
-
-
-/**
- * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- int i;
-
- if (!dd->ipath_kregbase)
- return;
-
- ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
- tidinv = dd->ipath_tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvegrbase +
- port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvegrcnt; i++)
- dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * ipath_pe_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
-{
- u32 egrsize = dd->ipath_rcvegrbufsize;
-
- /* For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * ipath_rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (egrsize == 2048)
- dd->ipath_tidtemplate = 1U << 29;
- else if (egrsize == 4096)
- dd->ipath_tidtemplate = 2U << 29;
- else {
- egrsize = 4096;
- dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
- "%u, using %u\n", dd->ipath_rcvegrbufsize,
- egrsize);
- dd->ipath_tidtemplate = 2U << 29;
- }
- dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_pe_early_init(struct ipath_devdata *dd)
-{
- dd->ipath_flags |= IPATH_4BYTE_TID;
- if (ipath_unordered_wc())
- dd->ipath_flags |= IPATH_PIO_FLUSH_WC;
-
- /*
- * For openfabrics, we need to be able to handle an IB header of
- * 24 dwords. HT chip has arbitrary sized receive buffers, so we
- * made them the same size as the PIO buffers. This chip does not
- * handle arbitrary size buffers, so we need the header large enough
- * to handle largest IB header, but still have room for a 2KB MTU
- * standard IB packet.
- */
- dd->ipath_rcvhdrentsize = 24;
- dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
- dd->ipath_rhf_offset = 0;
- dd->ipath_egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
-
- dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
- /*
- * the min() check here is currently a nop, but it may not always
- * be, depending on just how we do ipath_rcvegrbufsize
- */
- dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
- dd->ipath_piosize2k,
- dd->ipath_rcvegrbufsize +
- (dd->ipath_rcvhdrentsize << 2));
- dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->ipath_rhdrhead_intr_off = 1ULL<<32;
-
- ipath_get_eeprom_info(dd);
-
- return 0;
-}
-
-int __attribute__((weak)) ipath_unordered_wc(void)
-{
- return 0;
-}
-
-/**
- * ipath_init_pe_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
- struct ipath_base_info *kinfo = kbase;
- struct ipath_devdata *dd;
-
- if (ipath_unordered_wc()) {
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
- ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
- }
- else
- ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
-
- if (pd == NULL)
- goto done;
-
- dd = pd->port_dd;
-
-done:
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE |
- IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED;
- return 0;
-}
-
-static void ipath_pe_free_irq(struct ipath_devdata *dd)
-{
- free_irq(dd->ipath_irq, dd);
- dd->ipath_irq = 0;
-}
-
-
-static struct ipath_message_header *
-ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
- return (struct ipath_message_header *)
- &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
- dd->ipath_portcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- dd->ipath_p0_rcvegrcnt =
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_pe_read_counters(struct ipath_devdata *dd,
- struct infinipath_counters *cntrs)
-{
- cntrs->LBIntCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
- cntrs->LBFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
- cntrs->TxSDmaDescCnt = 0;
- cntrs->TxUnsupVLErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
- cntrs->TxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
- cntrs->TxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
- cntrs->TxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
- cntrs->TxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
- cntrs->TxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
- cntrs->TxUnderrunCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
- cntrs->TxFlowStallCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
- cntrs->TxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
- cntrs->RxDroppedPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
- cntrs->RxDataPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
- cntrs->RxFlowPktCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
- cntrs->RxDwordCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
- cntrs->RxLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
- cntrs->RxMaxMinLenErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
- cntrs->RxICRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
- cntrs->RxVCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
- cntrs->RxFlowCtrlErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
- cntrs->RxBadFormatCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
- cntrs->RxLinkProblemCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
- cntrs->RxEBPCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
- cntrs->RxLPCRCErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
- cntrs->RxBufOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
- cntrs->RxTIDFullErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
- cntrs->RxTIDValidErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
- cntrs->RxPKeyMismatchCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
- cntrs->RxP0HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
- cntrs->RxP1HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
- cntrs->RxP2HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
- cntrs->RxP3HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
- cntrs->RxP4HdrEgrOvflCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
- cntrs->RxP5HdrEgrOvflCnt = 0;
- cntrs->RxP6HdrEgrOvflCnt = 0;
- cntrs->RxP7HdrEgrOvflCnt = 0;
- cntrs->RxP8HdrEgrOvflCnt = 0;
- cntrs->RxP9HdrEgrOvflCnt = 0;
- cntrs->RxP10HdrEgrOvflCnt = 0;
- cntrs->RxP11HdrEgrOvflCnt = 0;
- cntrs->RxP12HdrEgrOvflCnt = 0;
- cntrs->RxP13HdrEgrOvflCnt = 0;
- cntrs->RxP14HdrEgrOvflCnt = 0;
- cntrs->RxP15HdrEgrOvflCnt = 0;
- cntrs->RxP16HdrEgrOvflCnt = 0;
- cntrs->IBStatusChangeCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
- cntrs->IBLinkErrRecoveryCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
- cntrs->IBLinkDownedCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
- cntrs->IBSymbolErrCnt =
- ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
- cntrs->RxVL15DroppedPktCnt = 0;
- cntrs->RxOtherLocalPhyErrCnt = 0;
- cntrs->PcieRetryBufDiagQwordCnt = 0;
- cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
- cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
- cntrs->RxVlErrCnt = 0;
- cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
-{
- return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
-{
- u64 val, prev_val;
-
- prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val = prev_val | INFINIPATH_XGXS_RESET;
- prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-}
-
-
-static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
- int ret;
-
- switch (which) {
- case IPATH_IB_CFG_LWID:
- ret = dd->ipath_link_width_active;
- break;
- case IPATH_IB_CFG_SPD:
- ret = dd->ipath_link_speed_active;
- break;
- case IPATH_IB_CFG_LWID_ENB:
- ret = dd->ipath_link_width_enabled;
- break;
- case IPATH_IB_CFG_SPD_ENB:
- ret = dd->ipath_link_speed_enabled;
- break;
- default:
- ret = -ENOTSUPP;
- break;
- }
- return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
- int ret = 0;
-
- if (which == IPATH_IB_CFG_LWID_ENB)
- dd->ipath_link_width_enabled = val;
- else if (which == IPATH_IB_CFG_SPD_ENB)
- dd->ipath_link_speed_enabled = val;
- else
- ret = -ENOTSUPP;
- return ret;
-}
-
-static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
- if (ibup) {
- if (dd->ibdeltainprog) {
- dd->ibdeltainprog = 0;
- dd->ibsymdelta +=
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt) -
- dd->ibsymsnap;
- dd->iblnkerrdelta +=
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt) -
- dd->iblnkerrsnap;
- }
- } else {
- dd->ipath_lli_counter = 0;
- if (!dd->ibdeltainprog) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
- }
-
- ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
- ipath_ib_linktrstate(dd, ibcs));
- return 0;
-}
-
-
-/**
- * ipath_init_iba6120_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
-{
- dd->ipath_f_intrsetup = ipath_pe_intconfig;
- dd->ipath_f_bus = ipath_setup_pe_config;
- dd->ipath_f_reset = ipath_setup_pe_reset;
- dd->ipath_f_get_boardname = ipath_pe_boardname;
- dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
- dd->ipath_f_early_init = ipath_pe_early_init;
- dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
- dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
- dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
- dd->ipath_f_clear_tids = ipath_pe_clear_tids;
- /*
- * _f_put_tid may get changed after we read the chip revision,
- * but we start with the safe version for all revs
- */
- dd->ipath_f_put_tid = ipath_pe_put_tid;
- dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
- dd->ipath_f_setextled = ipath_setup_pe_setextled;
- dd->ipath_f_get_base_info = ipath_pe_get_base_info;
- dd->ipath_f_free_irq = ipath_pe_free_irq;
- dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
- dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
- dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
- dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
- dd->ipath_f_config_ports = ipath_pe_config_ports;
- dd->ipath_f_read_counters = ipath_pe_read_counters;
- dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
- dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
- dd->ipath_f_config_jint = ipath_pe_config_jint;
- dd->ipath_f_ib_updown = ipath_pe_ib_updown;
-
-
- /* initialize chip-specific variables */
- ipath_init_pe_variables(dd);
-}
-
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
deleted file mode 100644
index a805402dd4ae..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ /dev/null
@@ -1,2631 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath 7220 chip (except that specific to the SerDes)
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
-
-static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
-
-static unsigned ipath_compat_ddr_negotiate = 1;
-
-module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(compat_ddr_negotiate,
- "Attempt pre-IBTA 1.2 DDR speed negotiation");
-
-static unsigned ipath_sdma_fetch_arb = 1;
-module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
-MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
- * exception of SerDes support, which in in ipath_sd7220.c.
- *
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- */
-struct _infinipath_do_not_use_kernel_regs {
- unsigned long long Revision;
- unsigned long long Control;
- unsigned long long PageAlign;
- unsigned long long PortCnt;
- unsigned long long DebugPortSelect;
- unsigned long long DebugSigsIntSel; /* was Reserved0;*/
- unsigned long long SendRegBase;
- unsigned long long UserRegBase;
- unsigned long long CounterRegBase;
- unsigned long long Scratch;
- unsigned long long EEPROMAddrCmd; /* was Reserved1; */
- unsigned long long EEPROMData; /* was Reserved2; */
- unsigned long long IntBlocked;
- unsigned long long IntMask;
- unsigned long long IntStatus;
- unsigned long long IntClear;
- unsigned long long ErrorMask;
- unsigned long long ErrorStatus;
- unsigned long long ErrorClear;
- unsigned long long HwErrMask;
- unsigned long long HwErrStatus;
- unsigned long long HwErrClear;
- unsigned long long HwDiagCtrl;
- unsigned long long MDIO;
- unsigned long long IBCStatus;
- unsigned long long IBCCtrl;
- unsigned long long ExtStatus;
- unsigned long long ExtCtrl;
- unsigned long long GPIOOut;
- unsigned long long GPIOMask;
- unsigned long long GPIOStatus;
- unsigned long long GPIOClear;
- unsigned long long RcvCtrl;
- unsigned long long RcvBTHQP;
- unsigned long long RcvHdrSize;
- unsigned long long RcvHdrCnt;
- unsigned long long RcvHdrEntSize;
- unsigned long long RcvTIDBase;
- unsigned long long RcvTIDCnt;
- unsigned long long RcvEgrBase;
- unsigned long long RcvEgrCnt;
- unsigned long long RcvBufBase;
- unsigned long long RcvBufSize;
- unsigned long long RxIntMemBase;
- unsigned long long RxIntMemSize;
- unsigned long long RcvPartitionKey;
- unsigned long long RcvQPMulticastPort;
- unsigned long long RcvPktLEDCnt;
- unsigned long long IBCDDRCtrl;
- unsigned long long HRTBT_GUID;
- unsigned long long IB_SDTEST_IF_TX;
- unsigned long long IB_SDTEST_IF_RX;
- unsigned long long IBCDDRCtrl2;
- unsigned long long IBCDDRStatus;
- unsigned long long JIntReload;
- unsigned long long IBNCModeCtrl;
- unsigned long long SendCtrl;
- unsigned long long SendBufBase;
- unsigned long long SendBufSize;
- unsigned long long SendBufCnt;
- unsigned long long SendAvailAddr;
- unsigned long long TxIntMemBase;
- unsigned long long TxIntMemSize;
- unsigned long long SendDmaBase;
- unsigned long long SendDmaLenGen;
- unsigned long long SendDmaTail;
- unsigned long long SendDmaHead;
- unsigned long long SendDmaHeadAddr;
- unsigned long long SendDmaBufMask0;
- unsigned long long SendDmaBufMask1;
- unsigned long long SendDmaBufMask2;
- unsigned long long SendDmaStatus;
- unsigned long long SendBufferError;
- unsigned long long SendBufferErrorCONT1;
- unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
- unsigned long long Reserved6L[2];
- unsigned long long AvailUpdCount;
- unsigned long long RcvHdrAddr0;
- unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
- unsigned long long Reserved7hdtl; /* Align next to 300 */
- unsigned long long RcvHdrTailAddr0; /* 300, like others */
- unsigned long long RcvHdrTailAddrs[16];
- unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
- unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
- unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
- unsigned long long Reserved10sds; /* was SerdesStatus on */
- unsigned long long XGXSConfig;
- unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
- unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
- unsigned long long EEPAddrCmd;
- unsigned long long EEPData;
- unsigned long long PcieEpbAccCtl;
- unsigned long long PcieEpbTransCtl;
- unsigned long long EfuseCtl; /* E-Fuse control */
- unsigned long long EfuseData[4];
- unsigned long long ProcMon;
- /* this chip moves following two from previous 200, 208 */
- unsigned long long PCIeRBufTestReg0;
- unsigned long long PCIeRBufTestReg1;
- /* added for this chip */
- unsigned long long PCIeRBufTestReg2;
- unsigned long long PCIeRBufTestReg3;
- /* added for this chip, debug only */
- unsigned long long SPC_JTAG_ACCESS_REG;
- unsigned long long LAControlReg;
- unsigned long long GPIODebugSelReg;
- unsigned long long DebugPortValueReg;
- /* added for this chip, DMA */
- unsigned long long SendDmaBufUsed[3];
- unsigned long long SendDmaReqTagUsed;
- /*
- * added for this chip, EFUSE: note that these program 64-bit
- * words 2 and 3 */
- unsigned long long efuse_pgm_data[2];
- unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
- /* we have 30 regs for DDS and RXEQ in IB SERDES */
- unsigned long long SerDesDDSRXEQ[30];
- unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
- /* added for LA debug support */
- unsigned long long LAMemory[32];
-};
-
-struct _infinipath_do_not_use_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 TxSDmaDescCnt; /* was Reserved1 */
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
- __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
- __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
- /* The following are new for IBA7220 */
- __u64 RxVL15DroppedPktCnt;
- __u64 RxOtherLocalPhyErrCnt;
- __u64 PcieRetryBufDiagQwordCnt;
- __u64 ExcessBufferOvflCnt;
- __u64 LocalLinkIntegrityErrCnt;
- __u64 RxVlErrCnt;
- __u64 RxDlidFltrCnt;
- __u64 Reserved8[7];
- __u64 PSStat;
- __u64 PSStart;
- __u64 PSInterval;
- __u64 PSRcvDataCount;
- __u64 PSRcvPktsCount;
- __u64 PSXmitDataCount;
- __u64 PSXmitPktsCount;
- __u64 PSXmitWaitCount;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
- struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_7220_kregs = {
- .kr_control = IPATH_KREG_OFFSET(Control),
- .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
- .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
- .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
- .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
- .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
- .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
- .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
- .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
- .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
- .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
- .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
- .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
- .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
- .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
- .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
- .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
- .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
- .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
- .kr_intclear = IPATH_KREG_OFFSET(IntClear),
- .kr_intmask = IPATH_KREG_OFFSET(IntMask),
- .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
- .kr_mdio = IPATH_KREG_OFFSET(MDIO),
- .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
- .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
- .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
- .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
- .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
- .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
- .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
- .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
- .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
- .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
- .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
- .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
- .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
- .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
- .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
- .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
- .kr_revision = IPATH_KREG_OFFSET(Revision),
- .kr_scratch = IPATH_KREG_OFFSET(Scratch),
- .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
- .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
- .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
- .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
- .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
- .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
- .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
- .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
- .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
- .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-
- .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-
- /* send dma related regs */
- .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
- .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
- .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
- .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
- .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
- .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
- .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
- .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
- .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
-
- /* SerDes related regs */
- .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
- .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
- .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
- .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
- .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
- .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
-
- /*
- * These should not be used directly via ipath_read_kreg64(),
- * use them with ipath_read_kreg64_port()
- */
- .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
- .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
-
- /*
- * The rcvpktled register controls one of the debug port signals, so
- * a packet activity LED can be connected to it.
- */
- .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
- .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
- .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
-
- .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
- .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
- .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
- .kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
-};
-
-static const struct ipath_cregs ipath_7220_cregs = {
- .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
- .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
- .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
- .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
- .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
- .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
- .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
- .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
- .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
- .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
- .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
- .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
- .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
- .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
- .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
- .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
- .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
- .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
- .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
- .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
- .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
- .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
- .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
- .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
- .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
- .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
- .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
- .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
- .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
- .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
- .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
- .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
- .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
- .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
- .cr_rxotherlocalphyerrcnt =
- IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
- .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
- .cr_locallinkintegrityerrcnt =
- IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
- .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
- .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
- .cr_psstat = IPATH_CREG_OFFSET(PSStat),
- .cr_psstart = IPATH_CREG_OFFSET(PSStart),
- .cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
- .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
- .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
- .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
- .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
- .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
-};
-
-/* kr_control bits */
-#define INFINIPATH_C_RESET (1U<<7)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 32
-#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 0
-#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
-#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-/* specific to this chip */
-#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
-#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
-#define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL
-#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
-#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
-#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
-#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
-
-#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
-#define IBA7220_IBCS_LINKSTATE_SHIFT 5
-#define IBA7220_IBCS_LINKSPEED_SHIFT 8
-#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
-
-#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
-#define IBA7220_IBCC_LINKCMD_SHIFT 19
-#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
-
-/* kr_ibcddrctrl bits */
-#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
-#define IBA7220_IBC_DLIDLMC_SHIFT 32
-#define IBA7220_IBC_HRTBT_MASK 3
-#define IBA7220_IBC_HRTBT_SHIFT 16
-#define IBA7220_IBC_HRTBT_ENB 0x10000UL
-#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
-#define IBA7220_IBC_LREV_MASK 1
-#define IBA7220_IBC_LREV_SHIFT 8
-#define IBA7220_IBC_RXPOL_MASK 1
-#define IBA7220_IBC_RXPOL_SHIFT 7
-#define IBA7220_IBC_WIDTH_SHIFT 5
-#define IBA7220_IBC_WIDTH_MASK 0x3
-#define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_SPEED_AUTONEG (1<<1)
-#define IBA7220_IBC_SPEED_SDR (1<<2)
-#define IBA7220_IBC_SPEED_DDR (1<<3)
-#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1)
-#define IBA7220_IBC_IBTA_1_2_MASK (1)
-
-/* kr_ibcddrstatus */
-/* link latency shift is 0, don't bother defining */
-#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET 0x5ULL
-#define INFINIPATH_XGXS_FC_SAFE (1ULL<<63)
-
-/* kr_rcvpktledcnt */
-#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
-#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA (1ULL << \
- (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL (1ULL << \
- (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-#define IBA7220_R_INTRAVAIL_SHIFT 17
-#define IBA7220_R_TAILUPD_SHIFT 35
-#define IBA7220_R_PORTCFG_SHIFT 36
-
-#define INFINIPATH_JINT_PACKETSHIFT 16
-#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0
-#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
-
-#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
-#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
-#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
-#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-
-#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
-
-static char int_type[16] = "auto";
-module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
-MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx");
-
-/* packet rate matching delay; chip has support */
-static u8 rate_to_delay[2][2] = {
- /* 1x, 4x */
- { 8, 2 }, /* SDR */
- { 4, 1 } /* DDR */
-};
-
-/* 7220 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
- INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
- INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
- INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
- INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
- INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
- INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
- INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
- INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
- INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
- INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
- INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
- "PCIe serdes Q0 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
- "PCIe serdes Q1 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
- "PCIe serdes Q2 no clock"),
- INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
- "PCIe serdes Q3 no clock"),
- INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
- "DDS RXEQ memory parity"),
- INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
- INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
- "PCIe uC oct0 memory parity"),
- INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
- "PCIe uC oct1 memory parity"),
-};
-
-static void autoneg_work(struct work_struct *);
-
-/*
- * the offset is different for different configured port numbers, since
- * port0 is fixed in size, but others can vary. Make it a function to
- * make the issue more obvious.
-*/
-static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
-{
- return port ? dd->ipath_p0_rcvegrcnt +
- (port-1) * dd->ipath_rcvegrcnt : 0;
-}
-
-static void ipath_7220_txe_recover(struct ipath_devdata *dd)
-{
- ++ipath_stats.sps_txeparity;
-
- dev_info(&dd->pcidev->dev,
- "Recovering from TXE PIO parity error\n");
- ipath_disarm_senderrbufs(dd);
-}
-
-
-/**
- * ipath_7220_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
-{
- ipath_err_t hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char bitsmsg[64];
- int log_idx;
-
- hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (!hwerrs) {
- /*
- * better than printing cofusing messages
- * This seems to be related to clearing the crc error, or
- * the pll error during init.
- */
- ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
- goto bail;
- } else if (hwerrs == ~0ULL) {
- ipath_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
- goto bail;
- }
- ipath_stats.sps_hwerrs++;
-
- /*
- * Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
- hwerrs &= dd->ipath_hwerrmask;
-
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
- ipath_inc_eeprom_err(dd, log_idx, 1);
- /*
- * Make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds.
- */
- if ((hwerrs & ~(dd->ipath_lasthwerror |
- ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
- (ipath_debug & __IPATH_VERBDBG))
- dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
- dd->ipath_lasthwerror |= hwerrs;
-
- if (hwerrs & ~dd->ipath_hwe_bitsextant)
- ipath_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~dd->ipath_hwe_bitsextant));
-
- if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
- ipath_sd7220_clr_ibpar(dd);
-
- ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
- if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
- /*
- * Parity errors in send memory are recoverable by h/w
- * just do housekeeping, exit freeze mode and continue.
- */
- if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
- ipath_7220_txe_recover(dd);
- hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
- INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
- << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
- }
- if (hwerrs) {
- /*
- * If any set that we aren't ignoring only make the
- * complaint once, in case it's stuck or recurring,
- * and we get here multiple times
- * Force link down, so switch knows, and
- * LEDs are turned off.
- */
- if (dd->ipath_flags & IPATH_INITTED) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
- ipath_setup_7220_setextled(dd,
- INFINIPATH_IBCS_L_STATE_DOWN,
- INFINIPATH_IBCS_LT_STATE_DISABLED);
- ipath_dev_err(dd, "Fatal Hardware Error "
- "(freeze mode), no longer"
- " usable, SN %.16s\n",
- dd->ipath_serial);
- isfatal = 1;
- }
- /*
- * Mark as having had an error for driver, and also
- * for /sys and status word mapped to user programs.
- * This marks unit as not usable, until reset.
- */
- *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_flags &= ~IPATH_INITTED;
- } else {
- ipath_dbg("Clearing freezemode on ignored or "
- "recovered hardware error\n");
- ipath_clear_freeze(dd);
- }
- }
-
- *msg = '\0';
-
- if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, "
- "InfiniPath hardware unusable]", msgl);
- /* ignore from now on, so disable until driver reloaded */
- *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_format_hwerrors(hwerrs,
- ipath_7220_hwerror_msgs,
- ARRAY_SIZE(ipath_7220_hwerror_msgs),
- msg, msgl);
-
- if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
- << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
- INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
- INFINIPATH_HWE_COREPLL_RFSLIP)
-
- if (hwerrs & _IPATH_PLL_FAIL) {
- snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the eternal
- * interface is unused.
- */
- dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
- }
-
- ipath_dev_err(dd, "%s hardware error\n", msg);
- /*
- * For /sys status file. if no trailing } is copied, we'll
- * know it was truncated.
- */
- if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
- snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
- "{%s}", msg);
-bail:;
-}
-
-/**
- * ipath_7220_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * info is based on the board revision register
- */
-static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- char *n = NULL;
- u8 boardrev = dd->ipath_boardrev;
- int ret;
-
- if (boardrev == 15) {
- /*
- * Emulator sometimes comes up all-ones, rather than zero.
- */
- boardrev = 0;
- dd->ipath_boardrev = boardrev;
- }
- switch (boardrev) {
- case 0:
- n = "InfiniPath_7220_Emulation";
- break;
- case 1:
- n = "InfiniPath_QLE7240";
- break;
- case 2:
- n = "InfiniPath_QLE7280";
- break;
- case 3:
- n = "InfiniPath_QLE7242";
- break;
- case 4:
- n = "InfiniPath_QEM7240";
- break;
- case 5:
- n = "InfiniPath_QMI7240";
- break;
- case 6:
- n = "InfiniPath_QMI7264";
- break;
- case 7:
- n = "InfiniPath_QMH7240";
- break;
- case 8:
- n = "InfiniPath_QME7240";
- break;
- case 9:
- n = "InfiniPath_QLE7250";
- break;
- case 10:
- n = "InfiniPath_QLE7290";
- break;
- case 11:
- n = "InfiniPath_QEM7250";
- break;
- case 12:
- n = "InfiniPath_QLE-Bringup";
- break;
- default:
- ipath_dev_err(dd,
- "Don't yet know about board with ID %u\n",
- boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
- boardrev);
- break;
- }
- if (n)
- snprintf(name, namelen, "%s", n);
-
- if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
- dd->ipath_minrev > 2) {
- ipath_dev_err(dd, "Unsupported InfiniPath hardware "
- "revision %u.%u!\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 1;
- } else if (dd->ipath_minrev == 1 &&
- !(dd->ipath_flags & IPATH_INITTED)) {
- /* Rev1 chips are prototype. Complain at init, but allow use */
- ipath_dev_err(dd, "Unsupported hardware "
- "revision %u.%u, Contact support@qlogic.com\n",
- dd->ipath_majrev, dd->ipath_minrev);
- ret = 0;
- } else
- ret = 0;
-
- /*
- * Set here not in ipath_init_*_funcs because we have to do
- * it after we can read chip registers.
- */
- dd->ipath_ureg_align = 0x10000; /* 64KB alignment */
-
- return ret;
-}
-
-/**
- * ipath_7220_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
-{
- ipath_err_t val;
- u64 extsval;
-
- extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
- if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
- INFINIPATH_EXTS_MEMBIST_DISABLED)))
- ipath_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
- dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
-
- val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
-
- if (!dd->ipath_boardrev) /* no PLL for Emulator */
- val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-
- if (dd->ipath_minrev == 1)
- val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
-
- val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
- dd->ipath_hwerrmask = val;
-
- /*
- * special trigger "error" is for debugging purposes. It
- * works around a processor/chipset problem. The error
- * interrupt allows us to count occurrences, but we don't
- * want to pay the overhead for normal use. Emulation only
- */
- if (!dd->ipath_boardrev)
- dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
-}
-
-/*
- * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
- *
- * The portion of IBA7220-specific bringup_serdes() that actually deals with
- * registers and memory within the SerDes itself is ipath_sd7220_init().
- */
-
-/**
- * ipath_7220_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
-{
- int ret = 0;
- u64 val, prev_val, guid;
- int was_reset; /* Note whether uC was reset */
-
- ipath_dbg("Trying to bringup serdes\n");
-
- if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
- INFINIPATH_HWE_SERDESPLLFAILED) {
- ipath_dbg("At start, serdes PLL failed bit set "
- "in hwerrstatus, clearing and continuing\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_SERDESPLLFAILED);
- }
-
- dd->ibdeltainprog = 1;
- dd->ibsymsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap =
- ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
-
- if (!dd->ipath_ibcddrctrl) {
- /* not on re-init after reset */
- dd->ipath_ibcddrctrl =
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
-
- if (dd->ipath_link_speed_enabled ==
- (IPATH_IB_SDR | IPATH_IB_DDR))
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- dd->ipath_ibcddrctrl |=
- dd->ipath_link_speed_enabled == IPATH_IB_DDR
- ? IBA7220_IBC_SPEED_DDR :
- IBA7220_IBC_SPEED_SDR;
- if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
- IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
- dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
- else
- dd->ipath_ibcddrctrl |=
- dd->ipath_link_width_enabled == IB_WIDTH_4X
- ? IBA7220_IBC_WIDTH_4X_ONLY :
- IBA7220_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
- /*
- * automatic lane reversal detection for receive
- * doesn't work correctly in rev 1, so disable it
- * on that rev, otherwise enable (disabling not
- * sticky across reload for >rev1)
- */
- if (dd->ipath_minrev == 1)
- dd->ipath_ibcddrctrl &=
- ~IBA7220_IBC_LANE_REV_SUPPORTED;
- else
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_LANE_REV_SUPPORTED;
- }
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
-
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
-
- /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
- /* remember if uC was in Reset or not, for dactrim */
- was_reset = (val & 1);
- ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
- was_reset ? "Asserted" : "Negated", (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
- if (dd->ipath_boardrev) {
- /*
- * Hardware is not emulator, and may have been reset. Init it.
- * Below will release reset, but needs to know if chip was
- * originally in reset, to only trim DACs on first time
- * after chip reset or powercycle (not driver reload)
- */
- ret = ipath_sd7220_init(dd, was_reset);
- }
-
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- prev_val = val;
- val |= INFINIPATH_XGXS_FC_SAFE;
- if (val != prev_val) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- }
- if (val & INFINIPATH_XGXS_RESET)
- val &= ~INFINIPATH_XGXS_RESET;
- if (val != prev_val)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
- ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
- (unsigned long long)
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
- (unsigned long long) prev_val);
-
- guid = be64_to_cpu(dd->ipath_guid);
-
- if (!guid) {
- /* have to have something, so use likely unique tsc */
- guid = get_cycles();
- ipath_dbg("No GUID for heartbeat, faking %llx\n",
- (unsigned long long)guid);
- } else
- ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
- (unsigned long long) guid);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
- return ret;
-}
-
-static void ipath_7220_config_jint(struct ipath_devdata *dd,
- u16 idle_ticks, u16 max_packets)
-{
-
- /*
- * We can request a receive interrupt for 1 or more packets
- * from current offset.
- */
- if (idle_ticks == 0 || max_packets == 0)
- /* interrupt after one packet if no mitigation */
- dd->ipath_rhdrhead_intr_off =
- 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
- else
- /* Turn off RcvHdrHead interrupts if using mitigation */
- dd->ipath_rhdrhead_intr_off = 0ULL;
-
- /* refresh kernel RcvHdrHead registers... */
- ipath_write_ureg(dd, ur_rcvhdrhead,
- dd->ipath_rhdrhead_intr_off |
- dd->ipath_pd[0]->port_head, 0);
-
- dd->ipath_jint_max_packets = max_packets;
- dd->ipath_jint_idle_ticks = idle_ticks;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
- ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
- idle_ticks);
-}
-
-/**
- * ipath_7220_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * Called when driver is being unloaded
- */
-static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
-{
- u64 val;
- if (dd->ibsymdelta || dd->iblnkerrdelta ||
- dd->ibdeltainprog) {
- u64 diagc;
- /* enable counter writes */
- diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
- diagc | INFINIPATH_DC_COUNTERWREN);
-
- if (dd->ibsymdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->ibsymsnap;
- val -= dd->ibsymdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt, val);
- }
- if (dd->iblnkerrdelta || dd->ibdeltainprog) {
- val = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- if (dd->ibdeltainprog)
- val -= val - dd->iblnkerrsnap;
- val -= dd->iblnkerrdelta;
- ipath_write_creg(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
- }
-
- /* and disable counter writes */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
- }
-
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
- wake_up(&dd->ipath_autoneg_wait);
- cancel_delayed_work(&dd->ipath_autoneg_work);
- flush_scheduled_work();
- ipath_shutdown_relock_poll(dd);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val |= INFINIPATH_XGXS_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-}
-
-static int ipath_7220_intconfig(struct ipath_devdata *dd)
-{
- ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
- dd->ipath_jint_max_packets);
- return 0;
-}
-
-/**
- * ipath_setup_7220_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
- u64 ltst)
-{
- u64 extctl, ledblink = 0;
- unsigned long flags = 0;
-
- /* the diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running */
- if (ipath_diag_inuse)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (dd->ipath_led_override) {
- ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
- ? INFINIPATH_IBCS_LT_STATE_LINKUP
- : INFINIPATH_IBCS_LT_STATE_DISABLED;
- lst = (dd->ipath_led_override & IPATH_LED_LOG)
- ? INFINIPATH_IBCS_L_STATE_ACTIVE
- : INFINIPATH_IBCS_L_STATE_DOWN;
- }
-
- spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
- extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
- INFINIPATH_EXTC_LED2PRIPORT_ON);
- if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
- extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
- /*
- * counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd
- */
- ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
- | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
- }
- if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
- extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
- dd->ipath_extctrl = extctl;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
- ledblink);
-}
-
-/*
- * Similar to pci_intx(pdev, 1), except that we make sure
- * msi is off...
- */
-static void ipath_enable_intx(struct pci_dev *pdev)
-{
- u16 cw, new;
- int pos;
-
- /* first, turn on INTx */
- pci_read_config_word(pdev, PCI_COMMAND, &cw);
- new = cw & ~PCI_COMMAND_INTX_DISABLE;
- if (new != cw)
- pci_write_config_word(pdev, PCI_COMMAND, new);
-
- /* then turn off MSI */
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- new = cw & ~PCI_MSI_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
- }
-}
-
-static int ipath_msi_enabled(struct pci_dev *pdev)
-{
- int pos, ret = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- u16 cw;
-
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
- }
- return ret;
-}
-
-/*
- * disable msi interrupt if enabled, and clear the flag.
- * flag is used primarily for the fallback to INTx, but
- * is also used in reinit after reset as a flag.
- */
-static void ipath_7220_nomsi(struct ipath_devdata *dd)
-{
- dd->ipath_msi_lo = 0;
-
- if (ipath_msi_enabled(dd->pcidev)) {
- /*
- * free, but don't zero; later kernels require
- * it be freed before disable_msi, so the intx
- * setup has to request it again.
- */
- if (dd->ipath_irq)
- free_irq(dd->ipath_irq, dd);
- pci_disable_msi(dd->pcidev);
- }
-}
-
-/*
- * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Nothing but msi interrupt cleanup for now.
- *
- * This is called during driver unload.
- */
-static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
-{
- ipath_7220_nomsi(dd);
-}
-
-
-static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
-{
- u16 linkstat, minwidth, speed;
- int pos;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
- if (!pos) {
- ipath_dev_err(dd, "Can't find PCI Express capability!\n");
- goto bail;
- }
-
- pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
- &linkstat);
- /*
- * speed is bits 0-4, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->ipath_lbus_width = linkstat;
- switch (boardrev) {
- case 0:
- case 2:
- case 10:
- case 12:
- minwidth = 16; /* x16 capable boards */
- break;
- default:
- minwidth = 8; /* x8 capable boards */
- break;
- }
-
- switch (speed) {
- case 1:
- dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->ipath_lbus_speed = 2500;
- break;
- }
-
- if (linkstat < minwidth)
- ipath_dev_err(dd,
- "PCIe width %u (x%u HCA), performance "
- "reduced\n", linkstat, minwidth);
- else
- ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
- dd->ipath_lbus_speed, linkstat, minwidth);
-
- if (speed != 1)
- ipath_dev_err(dd,
- "PCIe linkspeed %u is incorrect; "
- "should be 1 (2500)!\n", speed);
-
-bail:
- /* fill in string, even on errors */
- snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
- "PCIe,%uMHz,x%u\n",
- dd->ipath_lbus_speed,
- dd->ipath_lbus_width);
- return;
-}
-
-
-/**
- * ipath_setup_7220_config - setup PCIe config related stuff
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * The pci_enable_msi() call will fail on systems with MSI quirks
- * such as those with AMD8131, even if the device of interest is not
- * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
- * late in 2.6.16).
- * All that can be done is to edit the kernel source to remove the quirk
- * check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip,
- * even though it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for PCIe. If we do end up needing
- * pci_enable_msi at some point in the future for HT, we'll move the
- * call back into the main init_one code.
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly).
- */
-static int ipath_setup_7220_config(struct ipath_devdata *dd,
- struct pci_dev *pdev)
-{
- int pos, ret = -1;
- u32 boardrev;
-
- dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
- ret = pci_enable_msi(pdev);
- if (ret) {
- if (!strcmp(int_type, "force_msi")) {
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "force_msi is on, so not continuing.\n",
- ret);
- return ret;
- }
-
- ipath_enable_intx(pdev);
- if (!strcmp(int_type, "auto"))
- ipath_dev_err(dd, "pci_enable_msi failed: %d, "
- "falling back to INTx\n", ret);
- } else if (pos) {
- u16 control;
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
- &dd->ipath_msi_lo);
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
- &dd->ipath_msi_hi);
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
- &control);
- /* now save the data (vector) info */
- pci_read_config_word(pdev,
- pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? PCI_MSI_DATA_64 :
- PCI_MSI_DATA_32),
- &dd->ipath_msi_data);
- } else
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't save MSI settings for reset\n");
-
- dd->ipath_irq = pdev->irq;
-
- /*
- * We save the cachelinesize also, although it doesn't
- * really matter.
- */
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
- &dd->ipath_pci_cacheline);
-
- /*
- * this function called early, ipath_boardrev not set yet. Can't
- * use ipath_read_kreg64() yet, too early in init, so use readq()
- */
- boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
- >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
-
- ipath_7220_pcie_params(dd, boardrev);
-
- dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
- IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
- dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
- return 0;
-}
-
-static void ipath_init_7220_variables(struct ipath_devdata *dd)
-{
- /*
- * setup the register offsets, since they are different for each
- * chip
- */
- dd->ipath_kregs = &ipath_7220_kregs;
- dd->ipath_cregs = &ipath_7220_cregs;
-
- /*
- * bits for selecting i2c direction and values,
- * used for I2C serial flash
- */
- dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
- dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
- dd->ipath_gpio_sda = IPATH_GPIO_SDA;
- dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
- /*
- * Fill in data for field-values that change in IBA7220.
- * We dynamically specify only the mask for LINKTRAININGSTATE
- * and only the shift for LINKSTATE, as they are the only ones
- * that change. Also precalculate the 3 link states of interest
- * and the combined mask.
- */
- dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
- dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
- dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
- dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
- dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
- dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
- dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
- INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
- (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
- /*
- * Fill in data for ibcc field-values that change in IBA7220.
- * We dynamically specify only the mask for LINKINITCMD
- * and only the shift for LINKCMD and MAXPKTLEN, as they are
- * the only ones that change.
- */
- dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
- dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
- dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
-
- /* Fill in shifts for RcvCtrl. */
- dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
- dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
- dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
- dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
-
- /* variables for sanity checking interrupt and errors */
- dd->ipath_hwe_bitsextant =
- (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
- (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
- INFINIPATH_HWE_PCIE1PLLFAILED |
- INFINIPATH_HWE_PCIE0PLLFAILED |
- INFINIPATH_HWE_PCIEPOISONEDTLP |
- INFINIPATH_HWE_PCIECPLTIMEOUT |
- INFINIPATH_HWE_PCIEBUSPARITYXTLH |
- INFINIPATH_HWE_PCIEBUSPARITYXADM |
- INFINIPATH_HWE_PCIEBUSPARITYRADM |
- INFINIPATH_HWE_MEMBISTFAILED |
- INFINIPATH_HWE_COREPLL_FBSLIP |
- INFINIPATH_HWE_COREPLL_RFSLIP |
- INFINIPATH_HWE_SERDESPLLFAILED |
- INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
- INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
- INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
- INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
- INFINIPATH_HWE_SDMAMEMREADERR |
- INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
- INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
- INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
- INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
- INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
- INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
- INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
- dd->ipath_i_bitsextant =
- INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
- (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
- (INFINIPATH_I_RCVAVAIL_MASK <<
- INFINIPATH_I_RCVAVAIL_SHIFT) |
- INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
- INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
- INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
- dd->ipath_e_bitsextant =
- INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
- INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
- INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
- INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
- INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
- INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
- INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
- INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
- INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
- INFINIPATH_E_SENDSPECIALTRIGGER |
- INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
- INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
- INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
- INFINIPATH_E_SDROPPEDDATAPKT |
- INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
- INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
- INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
- INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
- INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
- INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
- INFINIPATH_E_SDMAUNEXPDATA |
- INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
- INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
- INFINIPATH_E_SDMADESCADDRMISALIGN |
- INFINIPATH_E_INVALIDEEPCMD;
-
- dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
- dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
- dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
- dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
- dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
- | IPATH_HAS_LINK_LATENCY;
-
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->ipath_eep_st_masks[0].hwerrs_to_log =
- INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[1].hwerrs_to_log =
- INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
- INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
- dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
- ipath_linkrecovery = 0;
-
- init_waitqueue_head(&dd->ipath_autoneg_wait);
- INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work);
-
- dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
-
- dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
- dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
- /*
- * set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- dd->ipath_link_width_active = IB_WIDTH_4X;
- dd->ipath_link_speed_active = IPATH_IB_SDR;
- dd->delay_mult = rate_to_delay[0][1];
-}
-
-
-/*
- * Setup the MSI stuff again after a reset. I'd like to just call
- * pci_enable_msi() and request_irq() again, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- * Note, because I'm doing it all here, I don't call pci_disable_msi()
- * or free_irq() at the start of ipath_setup_7220_reset().
- */
-static int ipath_reinit_msi(struct ipath_devdata *dd)
-{
- int ret = 0;
-
- int pos;
- u16 control;
- if (!dd->ipath_msi_lo) /* Using intX, or init problem */
- goto bail;
-
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
- if (!pos) {
- ipath_dev_err(dd, "Can't find MSI capability, "
- "can't restore MSI settings\n");
- goto bail;
- }
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->ipath_msi_lo);
- ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
- dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->ipath_msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
- "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
- control, control | PCI_MSI_FLAGS_ENABLE);
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->ipath_msi_data);
- ret = 1;
-
-bail:
- if (!ret) {
- ipath_dbg("Using INTx, MSI disabled or not configured\n");
- ipath_enable_intx(dd->pcidev);
- ret = 1;
- }
- /*
- * We restore the cachelinesize also, although it doesn't really
- * matter.
- */
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
- dd->ipath_pci_cacheline);
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
-
- return ret;
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
- */
-static int ipath_setup_7220_reset(struct ipath_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
-
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
-
- /* Use dev_err so it shows up in logs, etc. */
- ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
-
- /* keep chip from being accessed in a few places */
- dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
- val = dd->ipath_control | INFINIPATH_C_RESET;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
- mb();
-
- for (i = 1; i <= 5; i++) {
- int r;
-
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->ipath_pcibar0);
- if (r)
- ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->ipath_pcibar1);
- if (r)
- ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
- /* now re-enable memory access */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
- r = pci_enable_device(dd->pcidev);
- if (r)
- ipath_dev_err(dd, "pci_enable_device failed after "
- "reset: %d\n", r);
- /*
- * whether it fully enabled or not, mark as present,
- * again (but not INITTED)
- */
- dd->ipath_flags |= IPATH_PRESENT;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
- if (val == dd->ipath_revision) {
- ipath_cdbg(VERBOSE, "Got matching revision "
- "register %llx on try %d\n",
- (unsigned long long) val, i);
- ret = ipath_reinit_msi(dd);
- goto bail;
- }
- /* Probably getting -1 back */
- ipath_dbg("Didn't get expected revision register, "
- "got %llx, try %d\n", (unsigned long long) val,
- i + 1);
- }
- ret = 0; /* failed */
-
-bail:
- if (ret)
- ipath_7220_pcie_params(dd, dd->ipath_boardrev);
-
- return ret;
-}
-
-/**
- * ipath_7220_put_tid - write a TID to the chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (pa != dd->ipath_tidinvalid) {
- u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
- dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
- "not 2KB aligned!\n", pa);
- return;
- }
- if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
- ipath_dev_err(dd,
- "BUG: Physical page address 0x%lx "
- "larger than supported\n", pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->ipath_tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7220_TID_SZ_4K;
- writeq(chippa, tidptr);
- } else
- writeq(pa, tidptr);
- mmiowb();
-}
-
-/**
- * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * clear all TID entries for a port, expected and eager.
- * Used from ipath_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- int i;
-
- if (!dd->ipath_kregbase)
- return;
-
- ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
- tidinv = dd->ipath_tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvtidbase +
- port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->ipath_kregbase) +
- dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
- * sizeof(*tidbase));
-
- for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
- ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * ipath_7220_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
-{
- /* For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * ipath_rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (dd->ipath_rcvegrbufsize == 2048)
- dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
- else if (dd->ipath_rcvegrbufsize == 4096)
- dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
- else {
- dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
- "%u, using %u\n", dd->ipath_rcvegrbufsize,
- 4096);
- dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
- }
- dd->ipath_tidinvalid = 0;
-}
-
-static int ipath_7220_early_init(struct ipath_devdata *dd)
-{
- u32 i, s;
-
- if (strcmp(int_type, "auto") &&
- strcmp(int_type, "force_msi") &&
- strcmp(int_type, "force_intx")) {
- ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
- "auto, force_msi or force_intx\n", int_type);
- return -EINVAL;
- }
-
- /*
- * Control[4] has been added to change the arbitration within
- * the SDMA engine between favoring data fetches over descriptor
- * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority.
- */
- if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
- dd->ipath_control |= 1<<4;
-
- dd->ipath_flags |= IPATH_4BYTE_TID;
-
- /*
- * For openfabrics, we need to be able to handle an IB header of
- * 24 dwords. HT chip has arbitrary sized receive buffers, so we
- * made them the same size as the PIO buffers. This chip does not
- * handle arbitrary size buffers, so we need the header large enough
- * to handle largest IB header, but still have room for a 2KB MTU
- * standard IB packet.
- */
- dd->ipath_rcvhdrentsize = 24;
- dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
- dd->ipath_rhf_offset =
- dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
- /*
- * the min() check here is currently a nop, but it may not always
- * be, depending on just how we do ipath_rcvegrbufsize
- */
- dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
- dd->ipath_piosize2k,
- dd->ipath_rcvegrbufsize +
- (dd->ipath_rcvhdrentsize << 2));
- dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-
- ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
- INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
-
- if (dd->ipath_boardrev) /* no eeprom on emulator */
- ipath_get_eeprom_info(dd);
-
- /* start of code to check and print procmon */
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
- s &= ~(1U<<31); /* clear done bit */
- s |= 1U<<14; /* clear counter (write 1 to clear) */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
- /* make sure clear_counter low long enough before start */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-
- s &= ~(1U<<14); /* allow counter to count (before starting) */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
-
- s |= 1U<<15; /* start the counter */
- s &= ~(1U<<31); /* clear done bit */
- s &= ~0x7ffU; /* clear frequency bits */
- s |= 0xe29; /* set frequency bits, in case cleared */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
-
- s = 0;
- for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
- }
- if (!(s&(1U<<31)))
- ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
- else
- ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
-
- return 0;
-}
-
-/**
- * ipath_init_7220_get_base_info - set chip-specific flags for user code
- * @pd: the infinipath port
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
- struct ipath_base_info *kinfo = kbase;
-
- kinfo->spi_runtime_flags |=
- IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
- IPATH_RUNTIME_SDMA;
-
- return 0;
-}
-
-static void ipath_7220_free_irq(struct ipath_devdata *dd)
-{
- free_irq(dd->ipath_irq, dd);
- dd->ipath_irq = 0;
-}
-
-static struct ipath_message_header *
-ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = ipath_hdrget_offset(rhf_addr);
-
- return (struct ipath_message_header *)
- (rhf_addr - dd->ipath_rhf_offset + offset);
-}
-
-static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
- u32 nchipports;
-
- nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
- if (!cfgports) {
- int ncpus = num_online_cpus();
-
- if (ncpus <= 4)
- dd->ipath_portcnt = 5;
- else if (ncpus <= 8)
- dd->ipath_portcnt = 9;
- if (dd->ipath_portcnt)
- ipath_dbg("Auto-configured for %u ports, %d cpus "
- "online\n", dd->ipath_portcnt, ncpus);
- } else if (cfgports <= nchipports)
- dd->ipath_portcnt = cfgports;
- if (!dd->ipath_portcnt) /* none of the above, set to max */
- dd->ipath_portcnt = nchipports;
- /*
- * chip can be configured for 5, 9, or 17 ports, and choice
- * affects number of eager TIDs per port (1K, 2K, 4K).
- */
- if (dd->ipath_portcnt > 9)
- dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
- else if (dd->ipath_portcnt > 5)
- dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
- /* else configure for default 5 receive ports */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
- dd->ipath_p0_rcvegrcnt = 2048; /* always */
- if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
-}
-
-
-static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = dd->ipath_link_width_enabled;
- goto done;
-
- case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
- ret = dd->ipath_link_width_active;
- goto done;
-
- case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = dd->ipath_link_speed_enabled;
- goto done;
-
- case IPATH_IB_CFG_SPD: /* Get current Link spd */
- ret = dd->ipath_link_speed_active;
- goto done;
-
- case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case IPATH_IB_CFG_LINKLATENCY:
- ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
- & IBA7220_DDRSTAT_LINKLAT_MASK;
- goto done;
-
- default:
- ret = -ENOTSUPP;
- goto done;
- }
- ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
-done:
- return ret;
-}
-
-static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
- int lsb, ret = 0, setforce = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case IPATH_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7220_IBC_DLIDLMC_SHIFT;
- maskr = IBA7220_IBC_DLIDLMC_MASK;
- break;
-
- case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val & IPATH_IB_HRTBT_ON &&
- (dd->ipath_flags & IPATH_NO_HRTBT))
- goto bail;
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
- /*
- * As with speed, only write the actual register if
- * the link is currently down, otherwise takes effect
- * on next link change.
- */
- dd->ipath_link_width_enabled = val;
- if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
- IPATH_LINKDOWN)
- goto bail;
- /*
- * We set the IPATH_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_width_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- val--; /* convert from IB to chip */
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- setforce = 1;
- dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
- break;
-
- case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * If we turn off IB1.2, need to preset SerDes defaults,
- * but not right now. Set a flag for the next time
- * we command the link down. As with width, only write the
- * actual register if the link is currently down, otherwise
- * takes effect on next link change. Since setting is being
- * explictly requested (via MAD or sysfs), clear autoneg
- * failure status if speed autoneg is enabled.
- */
- dd->ipath_link_speed_enabled = val;
- if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
- !(val & (val - 1)))
- dd->ipath_presets_needed = 1;
- if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
- IPATH_LINKDOWN)
- goto bail;
- /*
- * We set the IPATH_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_speed_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition. When setting
- * speed autoneg, clear AUTONEG_FAILED.
- */
- if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
- val = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
- } else
- val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR
- : IBA7220_IBC_SPEED_SDR;
- maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- lsb = 0; /* speed bits are low bits */
- setforce = 1;
- break;
-
- case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- default:
- ret = -ENOTSUPP;
- goto bail;
- }
- dd->ipath_ibcddrctrl &= ~(maskr << lsb);
- dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
- if (setforce)
- dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
-bail:
- return ret;
-}
-
-static void ipath_7220_read_counters(struct ipath_devdata *dd,
- struct infinipath_counters *cntrs)
-{
- u64 *counters = (u64 *) cntrs;
- int i;
-
- for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
- counters[i] = ipath_snap_cntr(dd, i);
-}
-
-/* if we are using MSI, try to fallback to INTx */
-static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
-{
- if (dd->ipath_msi_lo) {
- dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
- " trying INTx interrupts\n");
- ipath_7220_nomsi(dd);
- ipath_enable_intx(dd->pcidev);
- /*
- * some newer kernels require free_irq before disable_msi,
- * and irq can be changed during disable and intx enable
- * and we need to therefore use the pcidev->irq value,
- * not our saved MSI value.
- */
- dd->ipath_irq = dd->pcidev->irq;
- if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
- IPATH_DRV_NAME, dd))
- ipath_dev_err(dd,
- "Could not re-request_irq for INTx\n");
- return 1;
- }
- return 0;
-}
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
-{
- u64 val, prev_val;
-
- prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
- val = prev_val | INFINIPATH_XGXS_RESET;
- prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
-}
-
-
-/* Still needs cleanup, too much hardwired stuff */
-static void autoneg_send(struct ipath_devdata *dd,
- u32 *hdr, u32 dcnt, u32 *data)
-{
- int i;
- u64 cnt;
- u32 __iomem *piobuf;
- u32 pnum;
-
- i = 0;
- cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
- if (i++ > 15) {
- ipath_dbg("Couldn't get pio buffer for send\n");
- return;
- }
- udelay(2);
- }
- if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
- cnt |= 0x80000000UL<<32; /* mark as VL15 */
- writeq(cnt, piobuf);
- ipath_flush_wc();
- __iowrite32_copy(piobuf + 2, hdr, 7);
- __iowrite32_copy(piobuf + 9, data, dcnt);
- ipath_flush_wc();
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
-{
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
- ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
-
- autoneg_send(dd, hdr, dcnt, data);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(2);
- autoneg_send(dd, hdr, dcnt, data);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- udelay(2);
-}
-
-
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
-{
- dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK |
- (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
-
- if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
- dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
-
- /*
- * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
- * to chip-centric 0 = 1x, 1 = 4x, 2 = auto
- */
- dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
- IBA7220_IBC_WIDTH_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
- dd->ipath_ibcddrctrl);
- ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
-}
-
-
-/*
- * this routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_auto_neg(struct ipath_devdata *dd)
-{
- /*
- * required for older non-IB1.2 DDR switches. Newer
- * non-IB-compliant switches don't need it, but so far,
- * aren't bothered by it either. "Magic constant"
- */
- ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
- 0x3b9dc07);
- dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
- ipath_autoneg_send(dd, 0);
- set_speed_fast(dd, IPATH_IB_DDR);
- ipath_toggle_rclkrls(dd);
- /* 2 msec is minimum length of a poll cycle */
- schedule_delayed_work(&dd->ipath_autoneg_work,
- msecs_to_jiffies(2));
-}
-
-
-static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- u32 ltstate = ipath_ib_linkstate(dd, ibcs);
-
- dd->ipath_link_width_active =
- ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- dd->ipath_link_speed_active =
- ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
- IPATH_IB_DDR : IPATH_IB_SDR;
-
- if (!ibup) {
- /*
- * when link goes down we don't want aeq running, so it
- * won't't interfere with IBC training, etc., and we need
- * to go back to the static SerDes preset values
- */
- if (dd->ipath_x1_fix_tries &&
- ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
- ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
- dd->ipath_x1_fix_tries = 0;
- if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
- IPATH_IB_AUTONEG_INPROG)))
- set_speed_fast(dd, dd->ipath_link_speed_enabled);
- if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
- ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
- ipath_sd7220_presets(dd);
- }
- /* this might better in ipath_sd7220_presets() */
- ipath_set_relock_poll(dd, ibup);
- } else {
- if (ipath_compat_ddr_negotiate &&
- !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
- IPATH_IB_AUTONEG_INPROG)) &&
- dd->ipath_link_speed_active == IPATH_IB_SDR &&
- (dd->ipath_link_speed_enabled &
- (IPATH_IB_DDR | IPATH_IB_SDR)) ==
- (IPATH_IB_DDR | IPATH_IB_SDR) &&
- dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
- /* we are SDR, and DDR auto-negotiation enabled */
- ++dd->ipath_autoneg_tries;
- ipath_dbg("DDR negotiation try, %u/%u\n",
- dd->ipath_autoneg_tries,
- IPATH_AUTONEG_TRIES);
- if (!dd->ibdeltainprog) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
- try_auto_neg(dd);
- ret = 1; /* no other IB status change processing */
- } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
- && dd->ipath_link_speed_active == IPATH_IB_SDR) {
- ipath_autoneg_send(dd, 1);
- set_speed_fast(dd, IPATH_IB_DDR);
- udelay(2);
- ipath_toggle_rclkrls(dd);
- ret = 1; /* no other IB status change processing */
- } else {
- if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
- (dd->ipath_link_speed_active & IPATH_IB_DDR)) {
- ipath_dbg("Got to INIT with DDR autoneg\n");
- dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
- | IPATH_IB_AUTONEG_FAILED);
- dd->ipath_autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_speed_fast(dd,
- dd->ipath_link_speed_enabled);
- wake_up(&dd->ipath_autoneg_wait);
- symadj = 1;
- } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
- /*
- * clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to different
- * device).
- */
- ipath_dbg("INIT %sDR after autoneg failure\n",
- (dd->ipath_link_speed_active &
- IPATH_IB_DDR) ? "D" : "S");
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
- dd->ipath_ibcddrctrl |=
- IBA7220_IBC_IBTA_1_2_MASK;
- ipath_write_kreg(dd,
- IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
- symadj = 1;
- }
- }
- /*
- * if we are in 1X on rev1 only, and are in autoneg width,
- * it could be due to an xgxs problem, so if we haven't
- * already tried, try twice to get to 4X; if we
- * tried, and couldn't, report it, since it will
- * probably not be what is desired.
- */
- if (dd->ipath_minrev == 1 &&
- (dd->ipath_link_width_enabled & (IB_WIDTH_1X |
- IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
- && dd->ipath_link_width_active == IB_WIDTH_1X
- && dd->ipath_x1_fix_tries < 3) {
- if (++dd->ipath_x1_fix_tries == 3) {
- dev_info(&dd->pcidev->dev,
- "IB link is in 1X mode\n");
- if (!(dd->ipath_flags &
- IPATH_IB_AUTONEG_INPROG))
- symadj = 1;
- }
- else {
- ipath_cdbg(VERBOSE, "IB 1X in "
- "auto-width, try %u to be "
- "sure it's really 1X; "
- "ltstate %u\n",
- dd->ipath_x1_fix_tries,
- ltstate);
- dd->ipath_f_xgxs_reset(dd);
- ret = 1; /* skip other processing */
- }
- } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
- symadj = 1;
-
- if (!ret) {
- dd->delay_mult = rate_to_delay
- [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
- [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
-
- ipath_set_relock_poll(dd, ibup);
- }
- }
-
- if (symadj) {
- if (dd->ibdeltainprog) {
- dd->ibdeltainprog = 0;
- dd->ibsymdelta += ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt) -
- dd->ibsymsnap;
- dd->iblnkerrdelta += ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt) -
- dd->iblnkerrsnap;
- }
- } else if (!ibup && !dd->ibdeltainprog
- && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
- dd->ibdeltainprog = 1;
- dd->ibsymsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_ibsymbolerrcnt);
- dd->iblnkerrsnap = ipath_read_creg32(dd,
- dd->ipath_cregs->cr_iblinkerrrecovcnt);
- }
-
- if (!ret)
- ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
- ltstate);
- return ret;
-}
-
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_work(struct work_struct *work)
-{
- struct ipath_devdata *dd;
- u64 startms;
- u32 lastlts, i;
-
- dd = container_of(work, struct ipath_devdata,
- ipath_autoneg_work.work);
-
- startms = jiffies_to_msecs(jiffies);
-
- /*
- * busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
- if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
- ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
-
- ipath_toggle_rclkrls(dd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
-
- set_speed_fast(dd, IPATH_IB_SDR);
- ipath_toggle_rclkrls(dd);
-
- /*
- * wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early
- */
- wait_event_timeout(dd->ipath_autoneg_wait,
- !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
- ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
- ipath_ib_state(dd, dd->ipath_lastibcstat),
- (unsigned long long) jiffies_to_msecs(jiffies)-startms);
- dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
- if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
- dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
- ipath_dbg("Giving up on DDR until next IB "
- "link Down\n");
- dd->ipath_autoneg_tries = 0;
- }
- set_speed_fast(dd, dd->ipath_link_speed_enabled);
- }
-}
-
-
-/**
- * ipath_init_iba7220_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
-{
- dd->ipath_f_intrsetup = ipath_7220_intconfig;
- dd->ipath_f_bus = ipath_setup_7220_config;
- dd->ipath_f_reset = ipath_setup_7220_reset;
- dd->ipath_f_get_boardname = ipath_7220_boardname;
- dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
- dd->ipath_f_early_init = ipath_7220_early_init;
- dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
- dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
- dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
- dd->ipath_f_clear_tids = ipath_7220_clear_tids;
- dd->ipath_f_put_tid = ipath_7220_put_tid;
- dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
- dd->ipath_f_setextled = ipath_setup_7220_setextled;
- dd->ipath_f_get_base_info = ipath_7220_get_base_info;
- dd->ipath_f_free_irq = ipath_7220_free_irq;
- dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
- dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
- dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
- dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
- dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
- dd->ipath_f_config_jint = ipath_7220_config_jint;
- dd->ipath_f_config_ports = ipath_7220_config_ports;
- dd->ipath_f_read_counters = ipath_7220_read_counters;
- dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
- dd->ipath_f_ib_updown = ipath_7220_ib_updown;
-
- /* initialize chip-specific variables */
- ipath_init_7220_variables(dd);
-}
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index b3d7efcdf021..6559af60bffd 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd);
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail);
-void ipath_init_iba7220_funcs(struct ipath_devdata *);
-void ipath_init_iba6120_funcs(struct ipath_devdata *);
void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *);
int ipath_update_eeprom_log(struct ipath_devdata *dd);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 559f39be0dcc..dd7f26d04d46 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename);
- ret = ib_register_device(dev);
+ ret = ib_register_device(dev, NULL);
if (ret)
goto err_reg;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cc2ddd29ac57..5a219a2fdf16 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -661,6 +661,14 @@ repoll:
wc->opcode = IB_WC_FETCH_ADD;
wc->byte_len = 8;
break;
+ case MLX4_OPCODE_MASKED_ATOMIC_CS:
+ wc->opcode = IB_WC_MASKED_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX4_OPCODE_MASKED_ATOMIC_FA:
+ wc->opcode = IB_WC_MASKED_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW;
break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 01f2a3f93355..4e94e360e43b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+ props->masked_atomic_cap = IB_ATOMIC_HCA;
props->max_pkeys = dev->dev->caps.pkey_table_len[1];
props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
@@ -661,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
- if (ib_register_device(&ibdev->ib_dev))
+ if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_map;
if (mlx4_ib_mad_init(ibdev))
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 5643f4a8ffef..6a60827b2301 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -74,17 +74,19 @@ enum {
};
static const __be32 mlx4_ib_opcode[] = {
- [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
- [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
- [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
- [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
- [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
- [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
- [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
- [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
- [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
- [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
- [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
+ [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
+ [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
+ [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
+ [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
+ [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
+ [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
+ [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
+ [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
+ [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
+ [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
};
static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
@@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *
}
+static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
+ struct ib_send_wr *wr)
+{
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
+ aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
+}
+
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
@@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
@@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+ wr->wr.atomic.rkey);
+ wqe += sizeof (struct mlx4_wqe_raddr_seg);
+
+ set_masked_atomic_seg(wqe, wr);
+ wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
+
+ size += (sizeof (struct mlx4_wqe_raddr_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16;
+
+ break;
+
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index c5ccc2daab60..b4e0cf4e95cd 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
if (!buf->direct.buf)
return -ENOMEM;
- pci_unmap_addr_set(&buf->direct, mapping, t);
+ dma_unmap_addr_set(&buf->direct, mapping, t);
memset(buf->direct.buf, 0, size);
@@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
goto err_free;
dma_list[i] = t;
- pci_unmap_addr_set(&buf->page_list[i], mapping, t);
+ dma_unmap_addr_set(&buf->page_list[i], mapping, t);
clear_page(buf->page_list[i].buf);
}
@@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
if (is_direct)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
- pci_unmap_addr(&buf->direct, mapping));
+ dma_unmap_addr(&buf->direct, mapping));
else {
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
- pci_unmap_addr(&buf->page_list[i],
+ dma_unmap_addr(&buf->page_list[i],
mapping));
kfree(buf->page_list);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 9388164b6053..8e8c728aff88 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
goto err_out_free_pages;
dma_list[i] = t;
- pci_unmap_addr_set(&eq->page_list[i], mapping, t);
+ dma_unmap_addr_set(&eq->page_list[i], mapping, t);
clear_page(eq->page_list[i].buf);
}
@@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
if (eq->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
- pci_unmap_addr(&eq->page_list[i],
+ dma_unmap_addr(&eq->page_list[i],
mapping));
mthca_free_mailbox(dev, mailbox);
@@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev,
for (i = 0; i < npages; ++i)
pci_free_consistent(dev->pdev, PAGE_SIZE,
eq->page_list[i].buf,
- pci_unmap_addr(&eq->page_list[i], mapping));
+ dma_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index f080a784bc79..1e0b4b6074ad 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
- ret = ib_register_device(&dev->ib_dev);
+ ret = ib_register_device(&dev->ib_dev, NULL);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 90f4c4d2e983..596acc45569b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -46,7 +46,7 @@
struct mthca_buf_list {
void *buf;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
union mthca_buf {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index c36a3f514929..57874a165083 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev)
/**
* nes_init_1g_phy
*/
-int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
u32 counter = 0;
u16 phy_data;
@@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
/**
* nes_init_2025_phy
*/
-int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
u32 temp_phy_data = 0;
u32 temp_phy_data2 = 0;
@@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
return;
}
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
/* ack the MAC interrupt */
mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
@@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
nesdev->link_status_interrupts++;
- if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) {
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS)))
nes_reset_link(nesdev, mac_index);
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
- }
+
/* read the PHY interrupt status register */
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
@@ -2641,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
}
}
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
}
@@ -3424,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 aeq_info;
u32 next_iwarp_state = 0;
+ u32 aeqe_cq_id;
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
@@ -3451,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
+ aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
+ if (aeq_info & NES_AEQE_QP) {
+ if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps,
+ aeqe_cq_id)) ||
+ (atomic_read(&nesqp->close_timer_started)))
+ return;
+ }
+
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
if (nesqp->term_flags)
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be43..5cc0a9ae5bb1 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -877,7 +877,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
if (!mc_all_on) {
char *addrs;
int i;
- struct dev_mc_list *mcaddr;
+ struct netdev_hw_addr *ha;
addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
if (!addrs) {
@@ -885,9 +885,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
goto unlock;
}
i = 0;
- netdev_for_each_mc_addr(mcaddr, netdev)
- memcpy(get_addr(addrs, i++),
- mcaddr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
pft_entries_preallocated * 0x8;
@@ -1002,6 +1001,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+
static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Link Change Interrupts",
"Linearized SKBs",
@@ -1016,11 +1016,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Rx Jabber Errors",
"Rx Oversized Frames",
"Rx Short Frames",
+ "Rx Length Errors",
+ "Rx CRC Errors",
+ "Rx Port Discard",
"Endnode Rx Discards",
"Endnode Rx Octets",
"Endnode Rx Frames",
"Endnode Tx Octets",
"Endnode Tx Frames",
+ "Tx Errors",
"mh detected",
"mh pauses",
"Retransmission Count",
@@ -1049,19 +1053,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"CM Nodes Destroyed",
"CM Accel Drops",
"CM Resets Received",
+ "Free 4Kpbls",
+ "Free 256pbls",
"Timer Inits",
- "CQ Depth 1",
- "CQ Depth 4",
- "CQ Depth 16",
- "CQ Depth 24",
- "CQ Depth 32",
- "CQ Depth 128",
- "CQ Depth 256",
"LRO aggregated",
"LRO flushed",
"LRO no_desc",
};
-
#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
/**
@@ -1121,12 +1119,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
/**
* nes_netdev_get_ethtool_stats
*/
+
static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
{
u64 u64temp;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 nic_count;
u32 u32temp;
u32 index = 0;
@@ -1155,6 +1155,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
nesvnic->nesdev->port_tx_discards += u32temp;
nesvnic->netstats.tx_dropped += u32temp;
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_short_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_length_errors += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_crc_errors += u32temp;
+ nesvnic->netstats.rx_crc_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_tx_errors += u32temp;
+ nesvnic->netstats.tx_errors += u32temp;
+
for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
if (nesvnic->qp_nic_index[nic_count] == 0xf)
break;
@@ -1219,11 +1259,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
+ target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
+ target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
+ target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
+ target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
target_stat_values[++index] = mh_detected;
target_stat_values[++index] = mh_pauses_sent;
target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
@@ -1252,21 +1296,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
target_stat_values[++index] = atomic_read(&cm_resets_recvd);
+ target_stat_values[++index] = nesadapter->free_4kpbl;
+ target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
- target_stat_values[++index] = int_mod_cq_depth_1;
- target_stat_values[++index] = int_mod_cq_depth_4;
- target_stat_values[++index] = int_mod_cq_depth_16;
- target_stat_values[++index] = int_mod_cq_depth_24;
- target_stat_values[++index] = int_mod_cq_depth_32;
- target_stat_values[++index] = int_mod_cq_depth_128;
- target_stat_values[++index] = int_mod_cq_depth_256;
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
-
}
-
/**
* nes_netdev_get_drvinfo
*/
@@ -1461,11 +1498,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->phy_address = mac_index;
} else {
+ unsigned long flags;
et_cmd->supported = SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg;
et_cmd->advertising = ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg;
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x1000)
et_cmd->autoneg = AUTONEG_ENABLE;
else
@@ -1503,12 +1543,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- u16 phy_data;
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
- nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
- &phy_data);
+ unsigned long flags;
+ u16 phy_data;
+ u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
if (et_cmd->autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
@@ -1516,8 +1559,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
/* Turn off autoneg */
phy_data &= ~0x1000;
}
- nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
- phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 186623d86959..a9f5dd272f1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
*/
void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
- unsigned long flags;
-
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
-
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
@@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1
*/
void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
- unsigned long flags;
/* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
phy_addr, nesdev->mac_index); */
- spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
@@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
} else {
*data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
}
- spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e54f312e4bdc..9bc2d744b2ea 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
/*
* nes_alloc_fast_reg_mr
*/
-struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
struct nes_adapter *nesadapter = nesdev->nesadapter;
int i, ret;
- ret = ib_register_device(&nesvnic->nesibdev->ibdev);
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
if (ret) {
return ret;
}
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
new file mode 100644
index 000000000000..7c03a70c55a2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_QIB
+ tristate "QLogic PCIe HCA support"
+ depends on 64BIT && NET
+ ---help---
+ This is a low-level driver for QLogic PCIe QLE InfiniBand host
+ channel adapters. This driver does not support the QLogic
+ HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
new file mode 100644
index 000000000000..c6515a1b9a6a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
+
+ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
+ qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
+ qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
+ qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
+ qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
+ qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
+ qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o
+
+# 6120 has no fallback if no MSI interrupts, others can do INTx
+ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
+
+ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
+ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
new file mode 100644
index 000000000000..32d9208efcff
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -0,0 +1,1439 @@
+#ifndef _QIB_KERNEL_H
+#define _QIB_KERNEL_H
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This header file is the base header file for qlogic_ib kernel code
+ * qib_user.h serves a similar purpose for user code.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+
+#include "qib_common.h"
+#include "qib_verbs.h"
+
+/* only s/w major version of QLogic_IB we can handle */
+#define QIB_CHIP_VERS_MAJ 2U
+
+/* don't care about this except printing */
+#define QIB_CHIP_VERS_MIN 0U
+
+/* The Organization Unique Identifier (Mfg code), and its position in GUID */
+#define QIB_OUI 0x001175
+#define QIB_OUI_LSB 40
+
+/*
+ * per driver stats, either not device nor port-specific, or
+ * summed over all of the devices and ports.
+ * They are described by name via ipathfs filesystem, so layout
+ * and number of elements can change without breaking compatibility.
+ * If members are added or deleted qib_statnames[] in qib_fs.c must
+ * change to match.
+ */
+struct qlogic_ib_stats {
+ __u64 sps_ints; /* number of interrupts handled */
+ __u64 sps_errints; /* number of error interrupts */
+ __u64 sps_txerrs; /* tx-related packet errors */
+ __u64 sps_rcverrs; /* non-crc rcv packet errors */
+ __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
+ __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
+ __u64 sps_ctxts; /* number of contexts currently open */
+ __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
+ __u64 sps_buffull;
+ __u64 sps_hdrfull;
+};
+
+extern struct qlogic_ib_stats qib_stats;
+extern struct pci_error_handlers qib_pci_err_handler;
+extern struct pci_driver qib_driver;
+
+#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
+/*
+ * First-cut critierion for "device is active" is
+ * two thousand dwords combined Tx, Rx traffic per
+ * 5-second interval. SMA packets are 64 dwords,
+ * and occur "a few per second", presumably each way.
+ */
+#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
+
+/*
+ * Struct used to indicate which errors are logged in each of the
+ * error-counters that are logged to EEPROM. A counter is incremented
+ * _once_ (saturating at 255) for each event with any bits set in
+ * the error or hwerror register masks below.
+ */
+#define QIB_EEP_LOG_CNT (4)
+struct qib_eep_log_mask {
+ u64 errs_to_log;
+ u64 hwerrs_to_log;
+};
+
+/*
+ * Below contains all data related to a single context (formerly called port).
+ */
+struct qib_ctxtdata {
+ void **rcvegrbuf;
+ dma_addr_t *rcvegrbuf_phys;
+ /* rcvhdrq base, needs mmap before useful */
+ void *rcvhdrq;
+ /* kernel virtual address where hdrqtail is updated */
+ void *rcvhdrtail_kvaddr;
+ /*
+ * temp buffer for expected send setup, allocated at open, instead
+ * of each setup call
+ */
+ void *tid_pg_list;
+ /*
+ * Shared page for kernel to signal user processes that send buffers
+ * need disarming. The process should call QIB_CMD_DISARM_BUFS
+ * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
+ */
+ unsigned long *user_event_mask;
+ /* when waiting for rcv or pioavail */
+ wait_queue_head_t wait;
+ /*
+ * rcvegr bufs base, physical, must fit
+ * in 44 bits so 32 bit programs mmap64 44 bit works)
+ */
+ dma_addr_t rcvegr_phys;
+ /* mmap of hdrq, must fit in 44 bits */
+ dma_addr_t rcvhdrq_phys;
+ dma_addr_t rcvhdrqtailaddr_phys;
+
+ /*
+ * number of opens (including slave sub-contexts) on this instance
+ * (ignoring forks, dup, etc. for now)
+ */
+ int cnt;
+ /*
+ * how much space to leave at start of eager TID entries for
+ * protocol use, on each TID
+ */
+ /* instead of calculating it */
+ unsigned ctxt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_cnt;
+ /* non-zero if ctxt is being shared. */
+ u16 subctxt_id;
+ /* number of eager TID entries. */
+ u16 rcvegrcnt;
+ /* index of first eager TID entry. */
+ u16 rcvegr_tid_base;
+ /* number of pio bufs for this ctxt (all procs, if shared) */
+ u32 piocnt;
+ /* first pio buffer for this ctxt */
+ u32 pio_base;
+ /* chip offset of PIO buffers for this ctxt */
+ u32 piobufs;
+ /* how many alloc_pages() chunks in rcvegrbuf_pages */
+ u32 rcvegrbuf_chunks;
+ /* how many egrbufs per chunk */
+ u32 rcvegrbufs_perchunk;
+ /* order for rcvegrbuf_pages */
+ size_t rcvegrbuf_size;
+ /* rcvhdrq size (for freeing) */
+ size_t rcvhdrq_size;
+ /* per-context flags for fileops/intr communication */
+ unsigned long flag;
+ /* next expected TID to check when looking for free */
+ u32 tidcursor;
+ /* WAIT_RCV that timed out, no interrupt */
+ u32 rcvwait_to;
+ /* WAIT_PIO that timed out, no interrupt */
+ u32 piowait_to;
+ /* WAIT_RCV already happened, no wait */
+ u32 rcvnowait;
+ /* WAIT_PIO already happened, no wait */
+ u32 pionowait;
+ /* total number of polled urgent packets */
+ u32 urgent;
+ /* saved total number of polled urgent packets for poll edge trigger */
+ u32 urgent_poll;
+ /* pid of process using this ctxt */
+ pid_t pid;
+ pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
+ /* same size as task_struct .comm[], command that opened context */
+ char comm[16];
+ /* pkeys set by this use of this ctxt */
+ u16 pkeys[4];
+ /* so file ops can get at unit */
+ struct qib_devdata *dd;
+ /* so funcs that need physical port can get it easily */
+ struct qib_pportdata *ppd;
+ /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
+ void *subctxt_uregbase;
+ /* An array of pages for the eager receive buffers * N */
+ void *subctxt_rcvegrbuf;
+ /* An array of pages for the eager header queue entries * N */
+ void *subctxt_rcvhdr_base;
+ /* The version of the library which opened this ctxt */
+ u32 userversion;
+ /* Bitmask of active slaves */
+ u32 active_slaves;
+ /* Type of packets or conditions we want to poll for */
+ u16 poll_type;
+ /* receive packet sequence counter */
+ u8 seq_cnt;
+ u8 redirect_seq_cnt;
+ /* ctxt rcvhdrq head offset */
+ u32 head;
+ u32 pkt_count;
+ /* QPs waiting for context processing */
+ struct list_head qp_wait_list;
+};
+
+struct qib_sge_state;
+
+struct qib_sdma_txreq {
+ int flags;
+ int sg_count;
+ dma_addr_t addr;
+ void (*callback)(struct qib_sdma_txreq *, int);
+ u16 start_idx; /* sdma private */
+ u16 next_descq_idx; /* sdma private */
+ struct list_head list; /* sdma private */
+};
+
+struct qib_sdma_desc {
+ __le64 qw[2];
+};
+
+struct qib_verbs_txreq {
+ struct qib_sdma_txreq txreq;
+ struct qib_qp *qp;
+ struct qib_swqe *wqe;
+ u32 dwords;
+ u16 hdr_dwords;
+ u16 hdr_inx;
+ struct qib_pio_header *align_buf;
+ struct qib_mregion *mr;
+ struct qib_sge_state *ss;
+};
+
+#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
+#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
+#define QIB_SDMA_TXREQ_F_INTREQ 0x4
+#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
+#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
+
+#define QIB_SDMA_TXREQ_S_OK 0
+#define QIB_SDMA_TXREQ_S_SENDERROR 1
+#define QIB_SDMA_TXREQ_S_ABORTED 2
+#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
+
+/*
+ * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
+ * Mostly for MADs that set or query link parameters, also ipath
+ * config interfaces
+ */
+#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
+#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
+#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
+#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
+#define QIB_IB_CFG_SPD 5 /* current Link spd */
+#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
+#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
+#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
+#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
+#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
+#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
+#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
+#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
+#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
+#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
+#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
+#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
+#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
+#define QIB_IB_CFG_VL_HIGH_LIMIT 19
+#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
+#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
+
+/*
+ * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
+ * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
+ * QIB_IB_CFG_LINKDEFAULT cmd
+ */
+#define IB_LINKCMD_DOWN (0 << 16)
+#define IB_LINKCMD_ARMED (1 << 16)
+#define IB_LINKCMD_ACTIVE (2 << 16)
+#define IB_LINKINITCMD_NOP 0
+#define IB_LINKINITCMD_POLL 1
+#define IB_LINKINITCMD_SLEEP 2
+#define IB_LINKINITCMD_DISABLE 3
+
+/*
+ * valid states passed to qib_set_linkstate() user call
+ */
+#define QIB_IB_LINKDOWN 0
+#define QIB_IB_LINKARM 1
+#define QIB_IB_LINKACTIVE 2
+#define QIB_IB_LINKDOWN_ONLY 3
+#define QIB_IB_LINKDOWN_SLEEP 4
+#define QIB_IB_LINKDOWN_DISABLE 5
+
+/*
+ * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
+ * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
+ * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
+ * are also the the possible values for qib_link_speed_enabled and active
+ * The values were chosen to match values used within the IB spec.
+ */
+#define QIB_IB_SDR 1
+#define QIB_IB_DDR 2
+#define QIB_IB_QDR 4
+
+#define QIB_DEFAULT_MTU 4096
+
+/*
+ * Possible IB config parameters for f_get/set_ib_table()
+ */
+#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
+#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
+
+/*
+ * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
+ * these are bits so they can be combined, e.g.
+ * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
+ */
+#define QIB_RCVCTRL_TAILUPD_ENB 0x01
+#define QIB_RCVCTRL_TAILUPD_DIS 0x02
+#define QIB_RCVCTRL_CTXT_ENB 0x04
+#define QIB_RCVCTRL_CTXT_DIS 0x08
+#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
+#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
+#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
+#define QIB_RCVCTRL_PKEY_DIS 0x80
+#define QIB_RCVCTRL_BP_ENB 0x0100
+#define QIB_RCVCTRL_BP_DIS 0x0200
+#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
+#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
+
+/*
+ * Possible "operations" for f_sendctrl(ppd, op, var)
+ * these are bits so they can be combined, e.g.
+ * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
+ * Some operations (e.g. DISARM, ABORT) are known to
+ * be "one-shot", so do not modify shadow.
+ */
+#define QIB_SENDCTRL_DISARM (0x1000)
+#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
+ /* available (0x2000) */
+#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
+#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
+#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
+#define QIB_SENDCTRL_SEND_DIS (0x20000)
+#define QIB_SENDCTRL_SEND_ENB (0x40000)
+#define QIB_SENDCTRL_FLUSH (0x80000)
+#define QIB_SENDCTRL_CLEAR (0x100000)
+#define QIB_SENDCTRL_DISARM_ALL (0x200000)
+
+/*
+ * These are the generic indices for requesting per-port
+ * counter values via the f_portcntr function. They
+ * are always returned as 64 bit values, although most
+ * are 32 bit counters.
+ */
+/* send-related counters */
+#define QIBPORTCNTR_PKTSEND 0U
+#define QIBPORTCNTR_WORDSEND 1U
+#define QIBPORTCNTR_PSXMITDATA 2U
+#define QIBPORTCNTR_PSXMITPKTS 3U
+#define QIBPORTCNTR_PSXMITWAIT 4U
+#define QIBPORTCNTR_SENDSTALL 5U
+/* receive-related counters */
+#define QIBPORTCNTR_PKTRCV 6U
+#define QIBPORTCNTR_PSRCVDATA 7U
+#define QIBPORTCNTR_PSRCVPKTS 8U
+#define QIBPORTCNTR_RCVEBP 9U
+#define QIBPORTCNTR_RCVOVFL 10U
+#define QIBPORTCNTR_WORDRCV 11U
+/* IB link related error counters */
+#define QIBPORTCNTR_RXLOCALPHYERR 12U
+#define QIBPORTCNTR_RXVLERR 13U
+#define QIBPORTCNTR_ERRICRC 14U
+#define QIBPORTCNTR_ERRVCRC 15U
+#define QIBPORTCNTR_ERRLPCRC 16U
+#define QIBPORTCNTR_BADFORMAT 17U
+#define QIBPORTCNTR_ERR_RLEN 18U
+#define QIBPORTCNTR_IBSYMBOLERR 19U
+#define QIBPORTCNTR_INVALIDRLEN 20U
+#define QIBPORTCNTR_UNSUPVL 21U
+#define QIBPORTCNTR_EXCESSBUFOVFL 22U
+#define QIBPORTCNTR_ERRLINK 23U
+#define QIBPORTCNTR_IBLINKDOWN 24U
+#define QIBPORTCNTR_IBLINKERRRECOV 25U
+#define QIBPORTCNTR_LLI 26U
+/* other error counters */
+#define QIBPORTCNTR_RXDROPPKT 27U
+#define QIBPORTCNTR_VL15PKTDROP 28U
+#define QIBPORTCNTR_ERRPKEY 29U
+#define QIBPORTCNTR_KHDROVFL 30U
+/* sampling counters (these are actually control registers) */
+#define QIBPORTCNTR_PSINTERVAL 31U
+#define QIBPORTCNTR_PSSTART 32U
+#define QIBPORTCNTR_PSSTAT 33U
+
+/* how often we check for packet activity for "power on hours (in seconds) */
+#define ACTIVITY_TIMER 5
+
+/* Below is an opaque struct. Each chip (device) can maintain
+ * private data needed for its operation, but not germane to the
+ * rest of the driver. For convenience, we define another that
+ * is chip-specific, per-port
+ */
+struct qib_chip_specific;
+struct qib_chipport_specific;
+
+enum qib_sdma_states {
+ qib_sdma_state_s00_hw_down,
+ qib_sdma_state_s10_hw_start_up_wait,
+ qib_sdma_state_s20_idle,
+ qib_sdma_state_s30_sw_clean_up_wait,
+ qib_sdma_state_s40_hw_clean_up_wait,
+ qib_sdma_state_s50_hw_halt_wait,
+ qib_sdma_state_s99_running,
+};
+
+enum qib_sdma_events {
+ qib_sdma_event_e00_go_hw_down,
+ qib_sdma_event_e10_go_hw_start,
+ qib_sdma_event_e20_hw_started,
+ qib_sdma_event_e30_go_running,
+ qib_sdma_event_e40_sw_cleaned,
+ qib_sdma_event_e50_hw_cleaned,
+ qib_sdma_event_e60_hw_halted,
+ qib_sdma_event_e70_go_idle,
+ qib_sdma_event_e7220_err_halted,
+ qib_sdma_event_e7322_err_halted,
+ qib_sdma_event_e90_timer_tick,
+};
+
+extern char *qib_sdma_state_names[];
+extern char *qib_sdma_event_names[];
+
+struct sdma_set_state_action {
+ unsigned op_enable:1;
+ unsigned op_intenable:1;
+ unsigned op_halt:1;
+ unsigned op_drain:1;
+ unsigned go_s99_running_tofalse:1;
+ unsigned go_s99_running_totrue:1;
+};
+
+struct qib_sdma_state {
+ struct kref kref;
+ struct completion comp;
+ enum qib_sdma_states current_state;
+ struct sdma_set_state_action *set_state_action;
+ unsigned current_op;
+ unsigned go_s99_running;
+ unsigned first_sendbuf;
+ unsigned last_sendbuf; /* really last +1 */
+ /* debugging/devel */
+ enum qib_sdma_states previous_state;
+ unsigned previous_op;
+ enum qib_sdma_events last_event;
+};
+
+struct xmit_wait {
+ struct timer_list timer;
+ u64 counter;
+ u8 flags;
+ struct cache {
+ u64 psxmitdata;
+ u64 psrcvdata;
+ u64 psxmitpkts;
+ u64 psrcvpkts;
+ u64 psxmitwait;
+ } counter_cache;
+};
+
+/*
+ * The structure below encapsulates data relevant to a physical IB Port.
+ * Current chips support only one such port, but the separation
+ * clarifies things a bit. Note that to conform to IB conventions,
+ * port-numbers are one-based. The first or only port is port1.
+ */
+struct qib_pportdata {
+ struct qib_ibport ibport_data;
+
+ struct qib_devdata *dd;
+ struct qib_chippport_specific *cpspec; /* chip-specific per-port */
+ struct kobject pport_kobj;
+ struct kobject sl2vl_kobj;
+ struct kobject diagc_kobj;
+
+ /* GUID for this interface, in network order */
+ __be64 guid;
+
+ /* QIB_POLL, etc. link-state specific flags, per port */
+ u32 lflags;
+ /* qib_lflags driver is waiting for */
+ u32 state_wanted;
+ spinlock_t lflags_lock;
+ /* number of (port-specific) interrupts for this port -- saturates... */
+ u32 int_counter;
+
+ /* ref count for each pkey */
+ atomic_t pkeyrefs[4];
+
+ /*
+ * this address is mapped readonly into user processes so they can
+ * get status cheaply, whenever they want. One qword of status per port
+ */
+ u64 *statusp;
+
+ /* SendDMA related entries */
+ spinlock_t sdma_lock;
+ struct qib_sdma_state sdma_state;
+ unsigned long sdma_buf_jiffies;
+ struct qib_sdma_desc *sdma_descq;
+ u64 sdma_descq_added;
+ u64 sdma_descq_removed;
+ u16 sdma_descq_cnt;
+ u16 sdma_descq_tail;
+ u16 sdma_descq_head;
+ u16 sdma_next_intr;
+ u16 sdma_reset_wait;
+ u8 sdma_generation;
+ struct tasklet_struct sdma_sw_clean_up_task;
+ struct list_head sdma_activelist;
+
+ dma_addr_t sdma_descq_phys;
+ volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
+ dma_addr_t sdma_head_phys;
+
+ wait_queue_head_t state_wait; /* for state_wanted */
+
+ /* HoL blocking for SMP replies */
+ unsigned hol_state;
+ struct timer_list hol_timer;
+
+ /*
+ * Shadow copies of registers; size indicates read access size.
+ * Most of them are readonly, but some are write-only register,
+ * where we manipulate the bits in the shadow copy, and then write
+ * the shadow copy to qlogic_ib.
+ *
+ * We deliberately make most of these 32 bits, since they have
+ * restricted range. For any that we read, we won't to generate 32
+ * bit accesses, since Opteron will generate 2 separate 32 bit HT
+ * transactions for a 64 bit read, and we want to avoid unnecessary
+ * bus transactions.
+ */
+
+ /* This is the 64 bit group */
+ /* last ibcstatus. opaque outside chip-specific code */
+ u64 lastibcstat;
+
+ /* these are the "32 bit" regs */
+
+ /*
+ * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+ * all expect bit fields to be "unsigned long"
+ */
+ unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
+ unsigned long p_sendctrl; /* shadow per-port sendctrl */
+
+ u32 ibmtu; /* The MTU programmed for this unit */
+ /*
+ * Current max size IB packet (in bytes) including IB headers, that
+ * we can send. Changes when ibmtu changes.
+ */
+ u32 ibmaxlen;
+ /*
+ * ibmaxlen at init time, limited by chip and by receive buffer
+ * size. Not changed after init.
+ */
+ u32 init_ibmaxlen;
+ /* LID programmed for this instance */
+ u16 lid;
+ /* list of pkeys programmed; 0 if not set */
+ u16 pkeys[4];
+ /* LID mask control */
+ u8 lmc;
+ u8 link_width_supported;
+ u8 link_speed_supported;
+ u8 link_width_enabled;
+ u8 link_speed_enabled;
+ u8 link_width_active;
+ u8 link_speed_active;
+ u8 vls_supported;
+ u8 vls_operational;
+ /* Rx Polarity inversion (compensate for ~tx on partner) */
+ u8 rx_pol_inv;
+
+ u8 hw_pidx; /* physical port index */
+ u8 port; /* IB port number and index into dd->pports - 1 */
+
+ u8 delay_mult;
+
+ /* used to override LED behavior */
+ u8 led_override; /* Substituted for normal value, if non-zero */
+ u16 led_override_timeoff; /* delta to next timer event */
+ u8 led_override_vals[2]; /* Alternates per blink-frame */
+ u8 led_override_phase; /* Just counts, LSB picks from vals[] */
+ atomic_t led_override_timer_active;
+ /* Used to flash LEDs in override mode */
+ struct timer_list led_override_timer;
+ struct xmit_wait cong_stats;
+ struct timer_list symerr_clear_timer;
+};
+
+/* Observers. Not to be taken lightly, possibly not to ship. */
+/*
+ * If a diag read or write is to (bottom <= offset <= top),
+ * the "hoook" is called, allowing, e.g. shadows to be
+ * updated in sync with the driver. struct diag_observer
+ * is the "visible" part.
+ */
+struct diag_observer;
+
+typedef int (*diag_hook) (struct qib_devdata *dd,
+ const struct diag_observer *op,
+ u32 offs, u64 *data, u64 mask, int only_32);
+
+struct diag_observer {
+ diag_hook hook;
+ u32 bottom;
+ u32 top;
+};
+
+extern int qib_register_observer(struct qib_devdata *dd,
+ const struct diag_observer *op);
+
+/* Only declared here, not defined. Private to diags */
+struct diag_observer_list_elt;
+
+/* device data struct now contains only "general per-device" info.
+ * fields related to a physical IB port are in a qib_pportdata struct,
+ * described above) while fields only used by a particualr chip-type are in
+ * a qib_chipdata struct, whose contents are opaque to this file.
+ */
+struct qib_devdata {
+ struct qib_ibdev verbs_dev; /* must be first */
+ struct list_head list;
+ /* pointers to related structs for this device */
+ /* pci access data structure */
+ struct pci_dev *pcidev;
+ struct cdev *user_cdev;
+ struct cdev *diag_cdev;
+ struct device *user_device;
+ struct device *diag_device;
+
+ /* mem-mapped pointer to base of chip regs */
+ u64 __iomem *kregbase;
+ /* end of mem-mapped chip space excluding sendbuf and user regs */
+ u64 __iomem *kregend;
+ /* physical address of chip for io_remap, etc. */
+ resource_size_t physaddr;
+ /* qib_cfgctxts pointers */
+ struct qib_ctxtdata **rcd; /* Receive Context Data */
+
+ /* qib_pportdata, points to array of (physical) port-specific
+ * data structs, indexed by pidx (0..n-1)
+ */
+ struct qib_pportdata *pport;
+ struct qib_chip_specific *cspec; /* chip-specific */
+
+ /* kvirt address of 1st 2k pio buffer */
+ void __iomem *pio2kbase;
+ /* kvirt address of 1st 4k pio buffer */
+ void __iomem *pio4kbase;
+ /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
+ void __iomem *piobase;
+ /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
+ u64 __iomem *userbase;
+ /*
+ * points to area where PIOavail registers will be DMA'ed.
+ * Has to be on a page of it's own, because the page will be
+ * mapped into user program space. This copy is *ONLY* ever
+ * written by DMA, not by the driver! Need a copy per device
+ * when we get to multiple devices
+ */
+ volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
+ /* physical address where updates occur */
+ dma_addr_t pioavailregs_phys;
+
+ /* device-specific implementations of functions needed by
+ * common code. Contrary to previous consensus, we can't
+ * really just point to a device-specific table, because we
+ * may need to "bend", e.g. *_f_put_tid
+ */
+ /* fallback to alternate interrupt type if possible */
+ int (*f_intr_fallback)(struct qib_devdata *);
+ /* hard reset chip */
+ int (*f_reset)(struct qib_devdata *);
+ void (*f_quiet_serdes)(struct qib_pportdata *);
+ int (*f_bringup_serdes)(struct qib_pportdata *);
+ int (*f_early_init)(struct qib_devdata *);
+ void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
+ void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
+ u32, unsigned long);
+ void (*f_cleanup)(struct qib_devdata *);
+ void (*f_setextled)(struct qib_pportdata *, u32);
+ /* fill out chip-specific fields */
+ int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
+ /* free irq */
+ void (*f_free_irq)(struct qib_devdata *);
+ struct qib_message_header *(*f_get_msgheader)
+ (struct qib_devdata *, __le32 *);
+ void (*f_config_ctxts)(struct qib_devdata *);
+ int (*f_get_ib_cfg)(struct qib_pportdata *, int);
+ int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
+ int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
+ int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
+ int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
+ u32 (*f_iblink_state)(u64);
+ u8 (*f_ibphys_portstate)(u64);
+ void (*f_xgxs_reset)(struct qib_pportdata *);
+ /* per chip actions needed for IB Link up/down changes */
+ int (*f_ib_updown)(struct qib_pportdata *, int, u64);
+ u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
+ /* Read/modify/write of GPIO pins (potentially chip-specific */
+ int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
+ u32 mask);
+ /* Enable writes to config EEPROM (if supported) */
+ int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
+ /*
+ * modify rcvctrl shadow[s] and write to appropriate chip-regs.
+ * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
+ * (ctxt == -1) means "all contexts", only meaningful for
+ * clearing. Could remove if chip_spec shutdown properly done.
+ */
+ void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
+ int ctxt);
+ /* Read/modify/write sendctrl appropriately for op and port. */
+ void (*f_sendctrl)(struct qib_pportdata *, u32 op);
+ void (*f_set_intr_state)(struct qib_devdata *, u32);
+ void (*f_set_armlaunch)(struct qib_devdata *, u32);
+ void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
+ int (*f_late_initreg)(struct qib_devdata *);
+ int (*f_init_sdma_regs)(struct qib_pportdata *);
+ u16 (*f_sdma_gethead)(struct qib_pportdata *);
+ int (*f_sdma_busy)(struct qib_pportdata *);
+ void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
+ void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
+ void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
+ void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
+ void (*f_sdma_hw_start_up)(struct qib_pportdata *);
+ void (*f_sdma_init_early)(struct qib_pportdata *);
+ void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
+ void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+ u32 (*f_hdrqempty)(struct qib_ctxtdata *);
+ u64 (*f_portcntr)(struct qib_pportdata *, u32);
+ u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
+ u64 **);
+ u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
+ char **, u64 **);
+ u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
+ void (*f_initvl15_bufs)(struct qib_devdata *);
+ void (*f_init_ctxt)(struct qib_ctxtdata *);
+ void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
+ struct qib_ctxtdata *);
+ void (*f_writescratch)(struct qib_devdata *, u32);
+ int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
+
+ char *boardname; /* human readable board info */
+
+ /* template for writing TIDs */
+ u64 tidtemplate;
+ /* value to write to free TIDs */
+ u64 tidinvalid;
+
+ /* number of registers used for pioavail */
+ u32 pioavregs;
+ /* device (not port) flags, basically device capabilities */
+ u32 flags;
+ /* last buffer for user use */
+ u32 lastctxt_piobuf;
+
+ /* saturating counter of (non-port-specific) device interrupts */
+ u32 int_counter;
+
+ /* pio bufs allocated per ctxt */
+ u32 pbufsctxt;
+ /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
+ u32 ctxts_extrabuf;
+ /*
+ * number of ctxts configured as max; zero is set to number chip
+ * supports, less gives more pio bufs/ctxt, etc.
+ */
+ u32 cfgctxts;
+
+ /*
+ * hint that we should update pioavailshadow before
+ * looking for a PIO buffer
+ */
+ u32 upd_pio_shadow;
+
+ /* internal debugging stats */
+ u32 maxpkts_call;
+ u32 avgpkts_call;
+ u64 nopiobufs;
+
+ /* PCI Vendor ID (here for NodeInfo) */
+ u16 vendorid;
+ /* PCI Device ID (here for NodeInfo) */
+ u16 deviceid;
+ /* for write combining settings */
+ unsigned long wc_cookie;
+ unsigned long wc_base;
+ unsigned long wc_len;
+
+ /* shadow copy of struct page *'s for exp tid pages */
+ struct page **pageshadow;
+ /* shadow copy of dma handles for exp tid pages */
+ dma_addr_t *physshadow;
+ u64 __iomem *egrtidbase;
+ spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
+ /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
+ spinlock_t uctxt_lock; /* rcd and user context changes */
+ /*
+ * per unit status, see also portdata statusp
+ * mapped readonly into user processes so they can get unit and
+ * IB link status cheaply
+ */
+ u64 *devstatusp;
+ char *freezemsg; /* freeze msg if hw error put chip in freeze */
+ u32 freezelen; /* max length of freezemsg */
+ /* timer used to prevent stats overflow, error throttling, etc. */
+ struct timer_list stats_timer;
+
+ /* timer to verify interrupts work, and fallback if possible */
+ struct timer_list intrchk_timer;
+ unsigned long ureg_align; /* user register alignment */
+
+ /*
+ * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
+ * pio_writing.
+ */
+ spinlock_t pioavail_lock;
+
+ /*
+ * Shadow copies of registers; size indicates read access size.
+ * Most of them are readonly, but some are write-only register,
+ * where we manipulate the bits in the shadow copy, and then write
+ * the shadow copy to qlogic_ib.
+ *
+ * We deliberately make most of these 32 bits, since they have
+ * restricted range. For any that we read, we won't to generate 32
+ * bit accesses, since Opteron will generate 2 separate 32 bit HT
+ * transactions for a 64 bit read, and we want to avoid unnecessary
+ * bus transactions.
+ */
+
+ /* This is the 64 bit group */
+
+ unsigned long pioavailshadow[6];
+ /* bitmap of send buffers available for the kernel to use with PIO. */
+ unsigned long pioavailkernel[6];
+ /* bitmap of send buffers which need to be disarmed. */
+ unsigned long pio_need_disarm[3];
+ /* bitmap of send buffers which are being written to. */
+ unsigned long pio_writing[3];
+ /* kr_revision shadow */
+ u64 revision;
+ /* Base GUID for device (from eeprom, network order) */
+ __be64 base_guid;
+
+ /*
+ * kr_sendpiobufbase value (chip offset of pio buffers), and the
+ * base of the 2KB buffer s(user processes only use 2K)
+ */
+ u64 piobufbase;
+ u32 pio2k_bufbase;
+
+ /* these are the "32 bit" regs */
+
+ /* number of GUIDs in the flash for this interface */
+ u32 nguid;
+ /*
+ * the following two are 32-bit bitmasks, but {test,clear,set}_bit
+ * all expect bit fields to be "unsigned long"
+ */
+ unsigned long rcvctrl; /* shadow per device rcvctrl */
+ unsigned long sendctrl; /* shadow per device sendctrl */
+
+ /* value we put in kr_rcvhdrcnt */
+ u32 rcvhdrcnt;
+ /* value we put in kr_rcvhdrsize */
+ u32 rcvhdrsize;
+ /* value we put in kr_rcvhdrentsize */
+ u32 rcvhdrentsize;
+ /* kr_ctxtcnt value */
+ u32 ctxtcnt;
+ /* kr_pagealign value */
+ u32 palign;
+ /* number of "2KB" PIO buffers */
+ u32 piobcnt2k;
+ /* size in bytes of "2KB" PIO buffers */
+ u32 piosize2k;
+ /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
+ u32 piosize2kmax_dwords;
+ /* number of "4KB" PIO buffers */
+ u32 piobcnt4k;
+ /* size in bytes of "4KB" PIO buffers */
+ u32 piosize4k;
+ /* kr_rcvegrbase value */
+ u32 rcvegrbase;
+ /* kr_rcvtidbase value */
+ u32 rcvtidbase;
+ /* kr_rcvtidcnt value */
+ u32 rcvtidcnt;
+ /* kr_userregbase */
+ u32 uregbase;
+ /* shadow the control register contents */
+ u32 control;
+
+ /* chip address space used by 4k pio buffers */
+ u32 align4k;
+ /* size of each rcvegrbuffer */
+ u32 rcvegrbufsize;
+ /* localbus width (1, 2,4,8,16,32) from config space */
+ u32 lbus_width;
+ /* localbus speed in MHz */
+ u32 lbus_speed;
+ int unit; /* unit # of this chip */
+
+ /* start of CHIP_SPEC move to chipspec, but need code changes */
+ /* low and high portions of MSI capability/vector */
+ u32 msi_lo;
+ /* saved after PCIe init for restore after reset */
+ u32 msi_hi;
+ /* MSI data (vector) saved for restore */
+ u16 msi_data;
+ /* so we can rewrite it after a chip reset */
+ u32 pcibar0;
+ /* so we can rewrite it after a chip reset */
+ u32 pcibar1;
+ u64 rhdrhead_intr_off;
+
+ /*
+ * ASCII serial number, from flash, large enough for original
+ * all digit strings, and longer QLogic serial number format
+ */
+ u8 serial[16];
+ /* human readable board version */
+ u8 boardversion[96];
+ u8 lbus_info[32]; /* human readable localbus info */
+ /* chip major rev, from qib_revision */
+ u8 majrev;
+ /* chip minor rev, from qib_revision */
+ u8 minrev;
+
+ /* Misc small ints */
+ /* Number of physical ports available */
+ u8 num_pports;
+ /* Lowest context number which can be used by user processes */
+ u8 first_user_ctxt;
+ u8 n_krcv_queues;
+ u8 qpn_mask;
+ u8 skip_kctxt_mask;
+
+ u16 rhf_offset; /* offset of RHF within receive header entry */
+
+ /*
+ * GPIO pins for twsi-connected devices, and device code for eeprom
+ */
+ u8 gpio_sda_num;
+ u8 gpio_scl_num;
+ u8 twsi_eeprom_dev;
+ u8 board_atten;
+
+ /* Support (including locks) for EEPROM logging of errors and time */
+ /* control access to actual counters, timer */
+ spinlock_t eep_st_lock;
+ /* control high-level access to EEPROM */
+ struct mutex eep_lock;
+ uint64_t traffic_wds;
+ /* active time is kept in seconds, but logged in hours */
+ atomic_t active_time;
+ /* Below are nominal shadow of EEPROM, new since last EEPROM update */
+ uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
+ uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
+ uint16_t eep_hrs;
+ /*
+ * masks for which bits of errs, hwerrs that cause
+ * each of the counters to increment.
+ */
+ struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
+ struct qib_diag_client *diag_client;
+ spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
+ struct diag_observer_list_elt *diag_observer_list;
+
+ u8 psxmitwait_supported;
+ /* cycle length of PS* counters in HW (in picoseconds) */
+ u16 psxmitwait_check_rate;
+};
+
+/* hol_state values */
+#define QIB_HOL_UP 0
+#define QIB_HOL_INIT 1
+
+#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
+#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
+#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
+#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
+#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
+
+/* operation types for f_txchk_change() */
+#define TXCHK_CHG_TYPE_DIS1 3
+#define TXCHK_CHG_TYPE_ENAB1 2
+#define TXCHK_CHG_TYPE_KERN 1
+#define TXCHK_CHG_TYPE_USER 0
+
+#define QIB_CHASE_TIME msecs_to_jiffies(145)
+#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
+
+/* Private data for file operations */
+struct qib_filedata {
+ struct qib_ctxtdata *rcd;
+ unsigned subctxt;
+ unsigned tidcursor;
+ struct qib_user_sdma_queue *pq;
+ int rec_cpu_num; /* for cpu affinity; -1 if none */
+};
+
+extern struct list_head qib_dev_list;
+extern spinlock_t qib_devs_lock;
+extern struct qib_devdata *qib_lookup(int unit);
+extern u32 qib_cpulist_count;
+extern unsigned long *qib_cpulist;
+
+extern unsigned qib_wc_pat;
+int qib_init(struct qib_devdata *, int);
+int init_chip_wc_pat(struct qib_devdata *dd, u32);
+int qib_enable_wc(struct qib_devdata *dd);
+void qib_disable_wc(struct qib_devdata *dd);
+int qib_count_units(int *npresentp, int *nupp);
+int qib_count_active_units(void);
+
+int qib_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev **cdevp, struct device **devp);
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
+int qib_dev_init(void);
+void qib_dev_cleanup(void);
+
+int qib_diag_add(struct qib_devdata *);
+void qib_diag_remove(struct qib_devdata *);
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
+void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
+
+int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
+void qib_bad_intrstatus(struct qib_devdata *);
+void qib_handle_urcv(struct qib_devdata *, u64);
+
+/* clean up any per-chip chip-specific stuff */
+void qib_chip_cleanup(struct qib_devdata *);
+/* clean up any chip type-specific stuff */
+void qib_chip_done(void);
+
+/* check to see if we have to force ordering for write combining */
+int qib_unordered_wc(void);
+void qib_pio_copy(void __iomem *to, const void *from, size_t count);
+
+void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
+void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
+void qib_cancel_sends(struct qib_pportdata *);
+
+int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
+int qib_setup_eagerbufs(struct qib_ctxtdata *);
+void qib_set_ctxtcnt(struct qib_devdata *);
+int qib_create_ctxts(struct qib_devdata *dd);
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32);
+void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
+void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
+
+u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
+int qib_reset_device(int);
+int qib_wait_linkstate(struct qib_pportdata *, u32, int);
+int qib_set_linkstate(struct qib_pportdata *, u8);
+int qib_set_mtu(struct qib_pportdata *, u16);
+int qib_set_lid(struct qib_pportdata *, u32, u8);
+void qib_hol_down(struct qib_pportdata *);
+void qib_hol_init(struct qib_pportdata *);
+void qib_hol_up(struct qib_pportdata *);
+void qib_hol_event(unsigned long);
+void qib_disable_after_error(struct qib_devdata *);
+int qib_set_uevent_bits(struct qib_pportdata *, const int);
+
+/* for use in system calls, where we want to know device type, etc. */
+#define ctxt_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->rcd)
+#define subctxt_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->subctxt)
+#define tidcursor_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->tidcursor)
+#define user_sdma_queue_fp(fp) \
+ (((struct qib_filedata *)(fp)->private_data)->pq)
+
+static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
+{
+ return ppd->dd;
+}
+
+static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
+{
+ return container_of(dev, struct qib_devdata, verbs_dev);
+}
+
+static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
+{
+ return dd_from_dev(to_idev(ibdev));
+}
+
+static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
+{
+ return container_of(ibp, struct qib_pportdata, ibport_data);
+}
+
+static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ WARN_ON(pidx >= dd->num_pports);
+ return &dd->pport[pidx].ibport_data;
+}
+
+/*
+ * values for dd->flags (_device_ related flags) and
+ */
+#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
+#define QIB_INITTED 0x2 /* chip and driver up and initted */
+#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
+#define QIB_PRESENT 0x8 /* chip accesses can be done */
+#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
+#define QIB_HAS_THRESH_UPDATE 0x40
+#define QIB_HAS_SDMA_TIMEOUT 0x80
+#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
+#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
+#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
+#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
+#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
+#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
+#define QIB_BADINTR 0x8000 /* severe interrupt problems */
+#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
+#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
+
+/*
+ * values for ppd->lflags (_ib_port_ related flags)
+ */
+#define QIBL_LINKV 0x1 /* IB link state valid */
+#define QIBL_LINKDOWN 0x8 /* IB link is down */
+#define QIBL_LINKINIT 0x10 /* IB link level is up */
+#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
+#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
+/* leave a gap for more IB-link state */
+#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
+#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
+#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
+ * Do not try to bring up */
+#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
+
+/* IB dword length mask in PBC (lower 11 bits); same for all chips */
+#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
+
+
+/* ctxt_flag bit offsets */
+ /* waiting for a packet to arrive */
+#define QIB_CTXT_WAITING_RCV 2
+ /* master has not finished initializing */
+#define QIB_CTXT_MASTER_UNINIT 4
+ /* waiting for an urgent packet to arrive */
+#define QIB_CTXT_WAITING_URG 5
+
+/* free up any allocated data at closes */
+void qib_free_data(struct qib_ctxtdata *dd);
+void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
+ u32, struct qib_ctxtdata *);
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
+ const struct pci_device_id *);
+void qib_free_devdata(struct qib_devdata *);
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
+
+#define QIB_TWSI_NO_DEV 0xFF
+/* Below qib_twsi_ functions must be called with eep_lock held */
+int qib_twsi_reset(struct qib_devdata *dd);
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
+ int len);
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ const void *buffer, int len);
+void qib_get_eeprom_info(struct qib_devdata *);
+int qib_update_eeprom_log(struct qib_devdata *dd);
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+void qib_dump_lookup_output_queue(struct qib_devdata *);
+void qib_force_pio_avail_update(struct qib_devdata *);
+void qib_clear_symerror_on_linkup(unsigned long opaque);
+
+/*
+ * Set LED override, only the two LSBs have "public" meaning, but
+ * any non-zero value substitutes them for the Link and LinkTrain
+ * LED states.
+ */
+#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
+#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
+
+/* send dma routines */
+int qib_setup_sdma(struct qib_pportdata *);
+void qib_teardown_sdma(struct qib_pportdata *);
+void __qib_sdma_intr(struct qib_pportdata *);
+void qib_sdma_intr(struct qib_pportdata *);
+int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
+ u32, struct qib_verbs_txreq *);
+/* ppd->sdma_lock should be locked before calling this. */
+int qib_sdma_make_progress(struct qib_pportdata *dd);
+
+/* must be called under qib_sdma_lock */
+static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
+{
+ return ppd->sdma_descq_cnt -
+ (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
+}
+
+static inline int __qib_sdma_running(struct qib_pportdata *ppd)
+{
+ return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
+}
+int qib_sdma_running(struct qib_pportdata *);
+
+void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
+
+/*
+ * number of words used for protocol header if not set by qib_userinit();
+ */
+#define QIB_DFLT_RCVHDRSIZE 9
+
+/*
+ * We need to be able to handle an IB header of at least 24 dwords.
+ * We need the rcvhdrq large enough to handle largest IB header, but
+ * still have room for a 2KB MTU standard IB packet.
+ * Additionally, some processor/memory controller combinations
+ * benefit quite strongly from having the DMA'ed data be cacheline
+ * aligned and a cacheline multiple, so we set the size to 32 dwords
+ * (2 64-byte primary cachelines for pretty much all processors of
+ * interest). The alignment hurts nothing, other than using somewhat
+ * more memory.
+ */
+#define QIB_RCVHDR_ENTSIZE 32
+
+int qib_get_user_pages(unsigned long, size_t, struct page **);
+void qib_release_user_pages(struct page **, size_t);
+int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
+int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
+void qib_sendbuf_done(struct qib_devdata *, unsigned);
+
+static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+ *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
+}
+
+static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
+{
+ /*
+ * volatile because it's a DMA target from the chip, routine is
+ * inlined, and don't want register caching or reordering.
+ */
+ return (u32) le64_to_cpu(
+ *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
+}
+
+static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
+{
+ const struct qib_devdata *dd = rcd->dd;
+ u32 hdrqtail;
+
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ __le32 *rhf_addr;
+ u32 seq;
+
+ rhf_addr = (__le32 *) rcd->rcvhdrq +
+ rcd->head + dd->rhf_offset;
+ seq = qib_hdrget_seq(rhf_addr);
+ hdrqtail = rcd->head;
+ if (seq == rcd->seq_cnt)
+ hdrqtail++;
+ } else
+ hdrqtail = qib_get_rcvhdrtail(rcd);
+
+ return hdrqtail;
+}
+
+/*
+ * sysfs interface.
+ */
+
+extern const char ib_qib_version[];
+
+int qib_device_create(struct qib_devdata *);
+void qib_device_remove(struct qib_devdata *);
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj);
+int qib_verbs_register_sysfs(struct qib_devdata *);
+void qib_verbs_unregister_sysfs(struct qib_devdata *);
+/* Hook for sysfs read of QSFP */
+extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
+
+int __init qib_init_qibfs(void);
+int __exit qib_exit_qibfs(void);
+
+int qibfs_add(struct qib_devdata *);
+int qibfs_remove(struct qib_devdata *);
+
+int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
+int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
+ const struct pci_device_id *);
+void qib_pcie_ddcleanup(struct qib_devdata *);
+int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
+int qib_reinit_intr(struct qib_devdata *);
+void qib_enable_intx(struct pci_dev *);
+void qib_nomsi(struct qib_devdata *);
+void qib_nomsix(struct qib_devdata *);
+void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
+void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
+
+/*
+ * dma_addr wrappers - all 0's invalid for hw
+ */
+dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
+ size_t, int);
+const char *qib_get_unit_name(int unit);
+
+/*
+ * Flush write combining store buffers (if present) and perform a write
+ * barrier.
+ */
+#if defined(CONFIG_X86_64)
+#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+#else
+#define qib_flush_wc() wmb() /* no reorder around wc flush */
+#endif
+
+/* global module parameter variables */
+extern unsigned qib_ibmtu;
+extern ushort qib_cfgctxts;
+extern ushort qib_num_cfg_vls;
+extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
+extern unsigned qib_n_krcv_queues;
+extern unsigned qib_sdma_fetch_arb;
+extern unsigned qib_compat_ddr_negotiate;
+extern int qib_special_trigger;
+
+extern struct mutex qib_mutex;
+
+/* Number of seconds before our card status check... */
+#define STATUS_TIMEOUT 60
+
+#define QIB_DRV_NAME "ib_qib"
+#define QIB_USER_MINOR_BASE 0
+#define QIB_TRACE_MINOR 127
+#define QIB_DIAGPKT_MINOR 128
+#define QIB_DIAG_MINOR_BASE 129
+#define QIB_NMINORS 255
+
+#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
+#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
+#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
+
+/*
+ * qib_early_err is used (only!) to print early errors before devdata is
+ * allocated, or when dd->pcidev may not be valid, and at the tail end of
+ * cleanup when devdata may have been freed, etc. qib_dev_porterr is
+ * the same as qib_dev_err, but is used when the message really needs
+ * the IB port# to be definitive as to what's happening..
+ * All of these go to the trace log, and the trace log entry is done
+ * first to avoid possible serial port delays from printk.
+ */
+#define qib_early_err(dev, fmt, ...) \
+ do { \
+ dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_dev_err(dd, fmt, ...) \
+ do { \
+ dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
+ qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_dev_porterr(dd, port, fmt, ...) \
+ do { \
+ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
+ qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define qib_devinfo(pcidev, fmt, ...) \
+ do { \
+ dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/*
+ * this is used for formatting hw error messages...
+ */
+struct qib_hwerror_msgs {
+ u64 mask;
+ const char *msg;
+};
+
+#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
+
+/* in qib_intr.c... */
+void qib_format_hwerrors(u64 hwerrs,
+ const struct qib_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t lmsg);
+#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
new file mode 100644
index 000000000000..e16cb6f7de2c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_6120_regs.h
@@ -0,0 +1,977 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_6120_Revision_OFFS 0x0
+#define QIB_6120_Revision_R_Simulator_LSB 0x3F
+#define QIB_6120_Revision_R_Simulator_RMASK 0x1
+#define QIB_6120_Revision_Reserved_LSB 0x28
+#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_Revision_BoardID_LSB 0x20
+#define QIB_6120_Revision_BoardID_RMASK 0xFF
+#define QIB_6120_Revision_R_SW_LSB 0x18
+#define QIB_6120_Revision_R_SW_RMASK 0xFF
+#define QIB_6120_Revision_R_Arch_LSB 0x10
+#define QIB_6120_Revision_R_Arch_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_6120_Control_OFFS 0x8
+#define QIB_6120_Control_TxLatency_LSB 0x4
+#define QIB_6120_Control_TxLatency_RMASK 0x1
+#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_6120_Control_LinkEn_LSB 0x2
+#define QIB_6120_Control_LinkEn_RMASK 0x1
+#define QIB_6120_Control_FreezeMode_LSB 0x1
+#define QIB_6120_Control_FreezeMode_RMASK 0x1
+#define QIB_6120_Control_SyncReset_LSB 0x0
+#define QIB_6120_Control_SyncReset_RMASK 0x1
+
+#define QIB_6120_PageAlign_OFFS 0x10
+
+#define QIB_6120_PortCnt_OFFS 0x18
+
+#define QIB_6120_SendRegBase_OFFS 0x30
+
+#define QIB_6120_UserRegBase_OFFS 0x38
+
+#define QIB_6120_CntrRegBase_OFFS 0x40
+
+#define QIB_6120_Scratch_OFFS 0x48
+#define QIB_6120_Scratch_TopHalf_LSB 0x20
+#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
+#define QIB_6120_Scratch_BottomHalf_LSB 0x0
+#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
+
+#define QIB_6120_IntBlocked_OFFS 0x60
+#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
+#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
+#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
+#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
+#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved_LSB 0xF
+#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
+#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
+#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
+#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
+#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
+#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
+#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
+#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
+#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
+#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
+#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
+#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
+
+#define QIB_6120_IntMask_OFFS 0x68
+#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved_LSB 0x11
+#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
+#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
+#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
+#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
+#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
+#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
+#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
+#define QIB_6120_IntMask_Reserved1_LSB 0x5
+#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
+#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
+#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
+#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
+#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
+#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
+#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
+
+#define QIB_6120_IntStatus_OFFS 0x70
+#define QIB_6120_IntStatus_Error_LSB 0x1F
+#define QIB_6120_IntStatus_Error_RMASK 0x1
+#define QIB_6120_IntStatus_PioSent_LSB 0x1E
+#define QIB_6120_IntStatus_PioSent_RMASK 0x1
+#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved_LSB 0xF
+#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
+#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
+#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
+#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
+#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
+#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
+#define QIB_6120_IntStatus_Reserved1_LSB 0x5
+#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
+#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
+#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
+#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
+#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
+#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
+#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
+
+#define QIB_6120_IntClear_OFFS 0x78
+#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved_LSB 0xF
+#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
+#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
+#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
+#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
+#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
+#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
+#define QIB_6120_IntClear_Reserved1_LSB 0x5
+#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
+#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
+#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
+#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
+#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
+#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
+#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
+
+#define QIB_6120_ErrMask_OFFS 0x80
+#define QIB_6120_ErrMask_Reserved_LSB 0x34
+#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved1_LSB 0x26
+#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_Reserved2_LSB 0x12
+#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_6120_ErrStatus_OFFS 0x88
+#define QIB_6120_ErrStatus_Reserved_LSB 0x34
+#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
+#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
+#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_6120_ErrClear_OFFS 0x90
+#define QIB_6120_ErrClear_Reserved_LSB 0x34
+#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
+#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved1_LSB 0x26
+#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_Reserved2_LSB 0x12
+#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_6120_HwErrMask_OFFS 0x98
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
+#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
+#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
+#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
+#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
+#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
+#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
+#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
+#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
+#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
+
+#define QIB_6120_HwErrStatus_OFFS 0xA0
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
+#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
+#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
+#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
+#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
+#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
+#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
+#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
+#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
+#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_HwErrClear_OFFS 0xA8
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
+#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
+#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
+#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
+#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
+#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
+#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
+#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
+#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
+#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
+#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
+#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
+
+#define QIB_6120_HwDiagCtrl_OFFS 0xB0
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
+#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
+
+#define QIB_6120_IBCStatus_OFFS 0xC0
+#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
+#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
+#define QIB_6120_IBCStatus_Reserved_LSB 0x7
+#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
+#define QIB_6120_IBCStatus_LinkState_LSB 0x4
+#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
+#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
+
+#define QIB_6120_IBCCtrl_OFFS 0xC8
+#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
+#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
+#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
+#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
+#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_6120_EXTStatus_OFFS 0xD0
+#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved_LSB 0x20
+#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
+#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
+#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
+#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
+#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_6120_EXTCtrl_OFFS 0xD8
+#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
+#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_6120_GPIOOut_OFFS 0xE0
+
+#define QIB_6120_GPIOMask_OFFS 0xE8
+
+#define QIB_6120_GPIOStatus_OFFS 0xF0
+
+#define QIB_6120_GPIOClear_OFFS 0xF8
+
+#define QIB_6120_RcvCtrl_OFFS 0x100
+#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
+#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
+#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
+#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
+#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
+#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
+#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
+#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
+#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
+#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
+#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
+
+#define QIB_6120_RcvBTHQP_OFFS 0x108
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
+#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
+#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
+#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_6120_RcvHdrSize_OFFS 0x110
+
+#define QIB_6120_RcvHdrCnt_OFFS 0x118
+
+#define QIB_6120_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_6120_RcvTIDBase_OFFS 0x128
+
+#define QIB_6120_RcvTIDCnt_OFFS 0x130
+
+#define QIB_6120_RcvEgrBase_OFFS 0x138
+
+#define QIB_6120_RcvEgrCnt_OFFS 0x140
+
+#define QIB_6120_RcvBufBase_OFFS 0x148
+
+#define QIB_6120_RcvBufSize_OFFS 0x150
+
+#define QIB_6120_RxIntMemBase_OFFS 0x158
+
+#define QIB_6120_RxIntMemSize_OFFS 0x160
+
+#define QIB_6120_RcvPartitionKey_OFFS 0x168
+
+#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
+#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_6120_SendCtrl_OFFS 0x1C0
+#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
+#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
+#define QIB_6120_SendCtrl_Reserved_LSB 0x17
+#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
+#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
+#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
+#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
+#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
+#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
+#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
+#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
+#define QIB_6120_SendCtrl_Abort_LSB 0x0
+#define QIB_6120_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
+#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
+#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
+#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_6120_SendPIOSize_OFFS 0x1D0
+#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
+#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
+#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
+#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
+#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
+#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
+#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
+#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
+#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
+#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
+
+#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
+#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
+#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_6120_SendBufErr0_OFFS 0x240
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
+#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
+
+#define QIB_6120_RcvHdrAddr0_OFFS 0x280
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_6120_SerdesCfg0_OFFS 0x3C0
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
+#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
+#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
+#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
+#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
+#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
+#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
+#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
+#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
+#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
+#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
+#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
+#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
+#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
+#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
+#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
+#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
+#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
+#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
+#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
+#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
+#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
+#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
+#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
+#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
+#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
+#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
+#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
+#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
+#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
+#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
+#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
+#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
+#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
+#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
+#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
+#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
+#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
+#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
+#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
+#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
+#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
+
+#define QIB_6120_SerdesStat_OFFS 0x3D0
+#define QIB_6120_SerdesStat_Reserved_LSB 0xC
+#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
+#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
+#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
+#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
+#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
+#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
+#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
+#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
+#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
+#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
+#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
+#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
+#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
+#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
+#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
+
+#define QIB_6120_XGXSCfg_OFFS 0x3D8
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
+#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
+#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
+#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
+#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
+#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
+#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
+#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
+#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
+#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
+#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
+#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
+#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
+#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
+#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
+
+#define QIB_6120_LBIntCnt_OFFS 0x12000
+
+#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
+
+#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
+
+#define QIB_6120_TxDataPktCnt_OFFS 0x12020
+
+#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
+
+#define QIB_6120_TxDwordCnt_OFFS 0x12030
+
+#define QIB_6120_TxLenErrCnt_OFFS 0x12038
+
+#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
+
+#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
+
+#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
+
+#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
+
+#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
+
+#define QIB_6120_RxDataPktCnt_OFFS 0x12068
+
+#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
+
+#define QIB_6120_RxDwordCnt_OFFS 0x12078
+
+#define QIB_6120_RxLenErrCnt_OFFS 0x12080
+
+#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
+
+#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
+
+#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
+
+#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
+
+#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
+
+#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
+
+#define QIB_6120_RxEBPCnt_OFFS 0x120B8
+
+#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
+
+#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
+
+#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
+
+#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
+
+#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
+
+#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
+
+#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
+
+#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
+
+#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
+
+#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
+
+#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
+
+#define QIB_6120_RcvEgrArray0_OFFS 0x14000
+
+#define QIB_6120_RcvTIDArray0_OFFS 0x54000
+
+#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_6120_RcvBuf1_OFFS 0x72000
+
+#define QIB_6120_RcvBuf2_OFFS 0x75000
+
+#define QIB_6120_RcvFlags_OFFS 0x77000
+
+#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_6120_RcvDMABuf_OFFS 0x7B000
+
+#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_6120_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_6120_PCIERetryBuf_OFFS 0x82000
+
+#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
+
+#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
new file mode 100644
index 000000000000..ea0bfd896f92
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -0,0 +1,156 @@
+#ifndef _QIB_7220_H
+#define _QIB_7220_H
+/*
+ * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* grab register-defs auto-generated by HW */
+#include "qib_7220_regs.h"
+
+/* The number of eager receive TIDs for context zero. */
+#define IBA7220_KRCVEGRCNT 2048U
+
+#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7220_LT_STATE_TXREVLANES 0x0d
+#define IB_7220_LT_STATE_CFGENH 0x10
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ u64 *portcntrs;
+ spinlock_t sdepb_lock; /* serdes EPB bus */
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 hwerrmask;
+ u64 errormask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ u32 ncntrs;
+ u32 nportcntrs;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 numctxts;
+ u32 rcvegrcnt;
+ u32 autoneg_tries;
+ u32 serdes_first_init_done;
+ u32 sdmabufcnt;
+ u32 lastbuf_for_pio;
+ u32 updthresh; /* current AvailUpdThld */
+ u32 updthresh_dflt; /* default AvailUpdThld */
+ int irq;
+ u8 presets_needed;
+ u8 relock_timer_active;
+ char emsgbuf[128];
+ char sdmamsgbuf[192];
+ char bitsmsgbuf[64];
+ struct timer_list relock_timer;
+ unsigned int relock_interval; /* in jiffies */
+};
+
+struct qib_chippport_specific {
+ struct qib_pportdata pportdata;
+ wait_queue_head_t autoneg_wait;
+ struct delayed_work autoneg_work;
+ struct timer_list chase_timer;
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 ibcctrl; /* kr_ibcctrl shadow */
+ u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
+ u64 chase_end;
+ u32 last_delay_mult;
+};
+
+/*
+ * This header file provides the declarations and common definitions
+ * for (mostly) manipulation of the SerDes blocks within the IBA7220.
+ * the functions declared should only be called from within other
+ * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
+ */
+int qib_sd7220_presets(struct qib_devdata *dd);
+int qib_sd7220_init(struct qib_devdata *dd);
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img,
+ int len, int offset);
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img,
+ int len, int offset);
+void qib_sd7220_clr_ibpar(struct qib_devdata *);
+/*
+ * Below used for sdnum parameter, selecting one of the two sections
+ * used for PCIe, or the single SerDes used for IB, which is the
+ * only one currently used
+ */
+#define IB_7220_SERDES 2
+
+int qib_sd7220_ib_load(struct qib_devdata *dd);
+int qib_sd7220_ib_vfy(struct qib_devdata *dd);
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u16 regno, u64 value)
+{
+ if (dd->kregbase)
+ writeq(value, &dd->kregbase[regno]);
+}
+
+void set_7220_relock_poll(struct qib_devdata *, int);
+void shutdown_7220_relock_poll(struct qib_devdata *);
+void toggle_7220_rclkrls(struct qib_devdata *);
+
+
+#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
new file mode 100644
index 000000000000..0da5bb750e52
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220_regs.h
@@ -0,0 +1,1496 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7220_Revision_OFFS 0x0
+#define QIB_7220_Revision_R_Simulator_LSB 0x3F
+#define QIB_7220_Revision_R_Simulator_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_LSB 0x3E
+#define QIB_7220_Revision_R_Emulation_RMASK 0x1
+#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7220_Revision_BoardID_LSB 0x20
+#define QIB_7220_Revision_BoardID_RMASK 0xFF
+#define QIB_7220_Revision_R_SW_LSB 0x18
+#define QIB_7220_Revision_R_SW_RMASK 0xFF
+#define QIB_7220_Revision_R_Arch_LSB 0x10
+#define QIB_7220_Revision_R_Arch_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7220_Control_OFFS 0x8
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
+#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
+#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7220_Control_Reserved_LSB 0x5
+#define QIB_7220_Control_Reserved_RMASK 0x1
+#define QIB_7220_Control_TxLatency_LSB 0x4
+#define QIB_7220_Control_TxLatency_RMASK 0x1
+#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7220_Control_LinkEn_LSB 0x2
+#define QIB_7220_Control_LinkEn_RMASK 0x1
+#define QIB_7220_Control_FreezeMode_LSB 0x1
+#define QIB_7220_Control_FreezeMode_RMASK 0x1
+#define QIB_7220_Control_SyncReset_LSB 0x0
+#define QIB_7220_Control_SyncReset_RMASK 0x1
+
+#define QIB_7220_PageAlign_OFFS 0x10
+
+#define QIB_7220_PortCnt_OFFS 0x18
+
+#define QIB_7220_SendRegBase_OFFS 0x30
+
+#define QIB_7220_UserRegBase_OFFS 0x38
+
+#define QIB_7220_CntrRegBase_OFFS 0x40
+
+#define QIB_7220_Scratch_OFFS 0x48
+
+#define QIB_7220_IntMask_OFFS 0x68
+#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
+#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
+#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
+#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
+#define QIB_7220_IntMask_Reserved_LSB 0x31
+#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
+#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
+#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
+#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
+#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
+#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
+#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
+#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
+#define QIB_7220_IntMask_JIntMask_LSB 0x1A
+#define QIB_7220_IntMask_JIntMask_RMASK 0x1
+#define QIB_7220_IntMask_Reserved1_LSB 0x11
+#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7220_IntStatus_OFFS 0x70
+#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
+#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
+#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
+#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved_LSB 0x31
+#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7220_IntStatus_Error_LSB 0x1F
+#define QIB_7220_IntStatus_Error_RMASK 0x1
+#define QIB_7220_IntStatus_PioSent_LSB 0x1E
+#define QIB_7220_IntStatus_PioSent_RMASK 0x1
+#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
+#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
+#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
+#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
+#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
+#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
+#define QIB_7220_IntStatus_JInt_LSB 0x1A
+#define QIB_7220_IntStatus_JInt_RMASK 0x1
+#define QIB_7220_IntStatus_Reserved1_LSB 0x11
+#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7220_IntClear_OFFS 0x78
+#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
+#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
+#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
+#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved_LSB 0x31
+#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
+#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
+#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
+#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
+#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
+#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
+#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
+#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
+#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
+#define QIB_7220_IntClear_JIntClear_LSB 0x1A
+#define QIB_7220_IntClear_JIntClear_RMASK 0x1
+#define QIB_7220_IntClear_Reserved1_LSB 0x11
+#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7220_ErrMask_OFFS 0x80
+#define QIB_7220_ErrMask_Reserved_LSB 0x36
+#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
+#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
+#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
+#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
+#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
+#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
+#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
+#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
+#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_Reserved1_LSB 0x12
+#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
+#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
+#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
+#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
+#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
+#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
+#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7220_ErrStatus_OFFS 0x88
+#define QIB_7220_ErrStatus_Reserved_LSB 0x36
+#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
+#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
+#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
+#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
+#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
+#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
+#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
+#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
+#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
+#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
+#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
+#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
+#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
+#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
+#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
+#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
+#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
+#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
+#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
+#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
+#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
+#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
+#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
+#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
+#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
+#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
+#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
+#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
+#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
+
+#define QIB_7220_ErrClear_OFFS 0x90
+#define QIB_7220_ErrClear_Reserved_LSB 0x36
+#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
+#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
+#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
+#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
+#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
+#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
+#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
+#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
+#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_Reserved1_LSB 0x12
+#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
+#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
+#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
+#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
+#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
+#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
+#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7220_HwErrMask_OFFS 0x98
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
+#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
+#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
+#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
+#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
+#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
+#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
+#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
+#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved_LSB 0x37
+#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
+#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
+#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
+#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
+#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
+#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
+#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
+#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
+#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
+#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
+#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
+#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
+#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
+#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
+
+#define QIB_7220_HwErrStatus_OFFS 0xA0
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
+#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
+#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
+#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
+#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
+#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
+#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
+#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
+#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
+#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
+#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
+#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
+#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
+#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
+#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
+#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
+#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
+#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_HwErrClear_OFFS 0xA8
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
+#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
+#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
+#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
+#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
+#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
+#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
+#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
+#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved_LSB 0x37
+#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
+#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
+#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
+#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
+#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
+#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
+#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
+#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
+#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
+#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
+#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
+#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
+#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
+#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
+#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
+#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
+#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
+#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
+#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
+
+#define QIB_7220_HwDiagCtrl_OFFS 0xB0
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
+#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
+#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
+#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
+#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
+#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
+#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
+#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
+#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
+#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
+#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
+#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
+
+#define QIB_7220_REG_0000B8_OFFS 0xB8
+
+#define QIB_7220_IBCStatus_OFFS 0xC0
+#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
+#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
+#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
+#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
+#define QIB_7220_IBCStatus_Reserved_LSB 0xE
+#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
+#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
+#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
+#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
+#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
+#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
+#define QIB_7220_IBCStatus_LinkState_LSB 0x5
+#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
+#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
+#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7220_IBCCtrl_OFFS 0xC8
+#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
+#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
+#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
+#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
+#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
+#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
+#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
+#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
+#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
+#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
+#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
+#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
+#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
+#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
+#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
+#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7220_EXTStatus_OFFS 0xD0
+#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved_LSB 0x20
+#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
+#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
+#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
+#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
+#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
+
+#define QIB_7220_EXTCtrl_OFFS 0xD8
+#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
+#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
+#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
+#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
+#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
+#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
+
+#define QIB_7220_GPIOOut_OFFS 0xE0
+
+#define QIB_7220_GPIOMask_OFFS 0xE8
+
+#define QIB_7220_GPIOStatus_OFFS 0xF0
+
+#define QIB_7220_GPIOClear_OFFS 0xF8
+
+#define QIB_7220_RcvCtrl_OFFS 0x100
+#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
+#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
+#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
+#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
+#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
+#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
+#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
+#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
+#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
+#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
+#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
+
+#define QIB_7220_RcvBTHQP_OFFS 0x108
+#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
+#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
+#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
+#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7220_RcvHdrSize_OFFS 0x110
+
+#define QIB_7220_RcvHdrCnt_OFFS 0x118
+
+#define QIB_7220_RcvHdrEntSize_OFFS 0x120
+
+#define QIB_7220_RcvTIDBase_OFFS 0x128
+
+#define QIB_7220_RcvTIDCnt_OFFS 0x130
+
+#define QIB_7220_RcvEgrBase_OFFS 0x138
+
+#define QIB_7220_RcvEgrCnt_OFFS 0x140
+
+#define QIB_7220_RcvBufBase_OFFS 0x148
+
+#define QIB_7220_RcvBufSize_OFFS 0x150
+
+#define QIB_7220_RxIntMemBase_OFFS 0x158
+
+#define QIB_7220_RxIntMemSize_OFFS 0x160
+
+#define QIB_7220_RcvPartitionKey_OFFS 0x168
+
+#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
+#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
+#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
+#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
+
+#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
+#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
+#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
+#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7220_IBCDDRCtrl_OFFS 0x180
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
+#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
+#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
+#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
+#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
+#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
+#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
+#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
+#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
+#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
+#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
+#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
+#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7220_HRTBT_GUID_OFFS 0x188
+
+#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
+#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
+#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
+#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
+#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
+#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7220_JIntReload_OFFS 0x1B0
+#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
+#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
+#define QIB_7220_JIntReload_J_reload_LSB 0x0
+#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
+
+#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
+#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
+#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
+#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
+#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7220_SendCtrl_OFFS 0x1C0
+#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
+#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
+#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
+#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
+#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
+#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
+#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
+#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
+#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
+#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
+#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
+#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
+#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
+#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
+#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
+#define QIB_7220_SendCtrl_Abort_LSB 0x0
+#define QIB_7220_SendCtrl_Abort_RMASK 0x1
+
+#define QIB_7220_SendBufBase_OFFS 0x1C8
+#define QIB_7220_SendBufBase_Reserved_LSB 0x35
+#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
+#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7220_SendBufSize_OFFS 0x1D0
+#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
+#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
+#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
+#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7220_SendBufCnt_OFFS 0x1D8
+#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
+#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
+#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
+#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
+#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
+
+#define QIB_7220_TxIntMemBase_OFFS 0x1E8
+
+#define QIB_7220_TxIntMemSize_OFFS 0x1F0
+
+#define QIB_7220_SendDmaBase_OFFS 0x1F8
+#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
+#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
+#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaLenGen_OFFS 0x200
+#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
+#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
+#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
+#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
+#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
+#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
+#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaTail_OFFS 0x208
+#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
+#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
+#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
+#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHead_OFFS 0x210
+#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
+#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
+#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
+#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
+#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
+#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
+#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
+#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7220_SendDmaBufMask0_OFFS 0x220
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
+#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7220_SendDmaStatus_OFFS 0x238
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
+#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
+#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
+#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
+#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
+#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
+#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
+#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
+#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
+#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
+#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
+#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7220_SendBufErr0_OFFS 0x240
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7220_RcvHdrAddr0_OFFS 0x270
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
+#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
+#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
+#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
+#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
+
+#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
+#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
+#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
+
+#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
+#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
+#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
+
+#define QIB_7220_XGXSCfg_OFFS 0x3D8
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
+#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
+#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
+#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
+#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
+#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
+#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
+#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
+#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
+#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
+#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
+#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
+#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
+
+#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
+#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
+#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
+#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
+#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
+#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
+#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
+#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
+#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
+#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
+#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
+#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
+#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
+#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
+#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
+#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
+#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
+#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
+#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
+#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
+#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
+
+#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
+#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
+#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
+#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
+
+#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
+#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
+#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
+
+#define QIB_7220_LBIntCnt_OFFS 0x13000
+
+#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
+
+#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
+
+#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
+
+#define QIB_7220_TxDataPktCnt_OFFS 0x13020
+
+#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
+
+#define QIB_7220_TxDwordCnt_OFFS 0x13030
+
+#define QIB_7220_TxLenErrCnt_OFFS 0x13038
+
+#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
+
+#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
+
+#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
+
+#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
+
+#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
+
+#define QIB_7220_RxDataPktCnt_OFFS 0x13068
+
+#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
+
+#define QIB_7220_RxDwordCnt_OFFS 0x13078
+
+#define QIB_7220_RxLenErrCnt_OFFS 0x13080
+
+#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
+
+#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
+
+#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
+
+#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
+
+#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
+
+#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
+
+#define QIB_7220_RxEBPCnt_OFFS 0x130B8
+
+#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
+
+#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
+
+#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
+
+#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
+
+#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
+
+#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
+
+#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
+
+#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
+
+#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
+
+#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
+
+#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
+
+#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
+
+#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
+
+#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
+
+#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
+
+#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
+
+#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
+
+#define QIB_7220_CNT_0131C8_OFFS 0x131C8
+
+#define QIB_7220_PSStat_OFFS 0x13200
+
+#define QIB_7220_PSStart_OFFS 0x13208
+
+#define QIB_7220_PSInterval_OFFS 0x13210
+
+#define QIB_7220_PSRcvDataCount_OFFS 0x13218
+
+#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
+
+#define QIB_7220_PSXmitDataCount_OFFS 0x13228
+
+#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
+
+#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
+
+#define QIB_7220_CNT_013240_OFFS 0x13240
+
+#define QIB_7220_RcvEgrArray_OFFS 0x14000
+
+#define QIB_7220_MEM_038000_OFFS 0x38000
+
+#define QIB_7220_RcvTIDArray0_OFFS 0x53000
+
+#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
+
+#define QIB_7220_MEM_064480_OFFS 0x64480
+
+#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
+
+#define QIB_7220_MEM_064C80_OFFS 0x64C80
+
+#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
+
+#define QIB_7220_MEM_065080_OFFS 0x65080
+
+#define QIB_7220_ScoreBoard_OFFS 0x65400
+
+#define QIB_7220_MEM_065440_OFFS 0x65440
+
+#define QIB_7220_DescriptorFIFO_OFFS 0x65800
+
+#define QIB_7220_MEM_065880_OFFS 0x65880
+
+#define QIB_7220_RcvBuf1_OFFS 0x72000
+
+#define QIB_7220_MEM_074800_OFFS 0x74800
+
+#define QIB_7220_RcvBuf2_OFFS 0x75000
+
+#define QIB_7220_MEM_076400_OFFS 0x76400
+
+#define QIB_7220_RcvFlags_OFFS 0x77000
+
+#define QIB_7220_MEM_078400_OFFS 0x78400
+
+#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
+
+#define QIB_7220_MEM_07A400_OFFS 0x7A400
+
+#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
+
+#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
+
+#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
+
+#define QIB_7220_MEM_07D400_OFFS 0x7D400
+
+#define QIB_7220_PCIERcvBuf_OFFS 0x80000
+
+#define QIB_7220_PCIERetryBuf_OFFS 0x84000
+
+#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
+
+#define QIB_7220_PCIECplBuf_OFFS 0x90000
+
+#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
+
+#define QIB_7220_MEM_095000_OFFS 0x95000
+
+#define QIB_7220_SendBuf0_MA_OFFS 0x100000
+
+#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
new file mode 100644
index 000000000000..a97440ba924c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7322_regs.h
@@ -0,0 +1,3163 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
+
+#define QIB_7322_Revision_OFFS 0x0
+#define QIB_7322_Revision_DEF 0x0000000002010601
+#define QIB_7322_Revision_R_Simulator_LSB 0x3F
+#define QIB_7322_Revision_R_Simulator_MSB 0x3F
+#define QIB_7322_Revision_R_Simulator_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_LSB 0x3E
+#define QIB_7322_Revision_R_Emulation_MSB 0x3E
+#define QIB_7322_Revision_R_Emulation_RMASK 0x1
+#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
+#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
+#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
+#define QIB_7322_Revision_BoardID_LSB 0x20
+#define QIB_7322_Revision_BoardID_MSB 0x27
+#define QIB_7322_Revision_BoardID_RMASK 0xFF
+#define QIB_7322_Revision_R_SW_LSB 0x18
+#define QIB_7322_Revision_R_SW_MSB 0x1F
+#define QIB_7322_Revision_R_SW_RMASK 0xFF
+#define QIB_7322_Revision_R_Arch_LSB 0x10
+#define QIB_7322_Revision_R_Arch_MSB 0x17
+#define QIB_7322_Revision_R_Arch_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
+#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
+#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
+#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
+#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
+#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
+
+#define QIB_7322_Control_OFFS 0x8
+#define QIB_7322_Control_DEF 0x0000000000000000
+#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
+#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
+#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
+#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
+#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
+#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
+#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
+#define QIB_7322_Control_FreezeMode_LSB 0x1
+#define QIB_7322_Control_FreezeMode_MSB 0x1
+#define QIB_7322_Control_FreezeMode_RMASK 0x1
+#define QIB_7322_Control_SyncReset_LSB 0x0
+#define QIB_7322_Control_SyncReset_MSB 0x0
+#define QIB_7322_Control_SyncReset_RMASK 0x1
+
+#define QIB_7322_PageAlign_OFFS 0x10
+#define QIB_7322_PageAlign_DEF 0x0000000000001000
+
+#define QIB_7322_ContextCnt_OFFS 0x18
+#define QIB_7322_ContextCnt_DEF 0x0000000000000012
+
+#define QIB_7322_Scratch_OFFS 0x20
+#define QIB_7322_Scratch_DEF 0x0000000000000000
+
+#define QIB_7322_CntrRegBase_OFFS 0x28
+#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
+
+#define QIB_7322_SendRegBase_OFFS 0x30
+#define QIB_7322_SendRegBase_DEF 0x0000000000003000
+
+#define QIB_7322_UserRegBase_OFFS 0x38
+#define QIB_7322_UserRegBase_DEF 0x0000000000200000
+
+#define QIB_7322_IntMask_OFFS 0x68
+#define QIB_7322_IntMask_DEF 0x0000000000000000
+#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
+#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
+#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
+#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
+#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
+#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
+#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
+#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
+#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
+#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
+#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
+#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
+#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
+#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
+#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
+#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
+#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
+#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
+#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
+#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
+#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
+#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
+#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
+#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
+#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
+#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
+#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
+#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
+#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
+#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
+#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
+#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
+#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
+#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
+#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
+#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
+#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
+#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
+#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
+#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
+#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
+#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
+#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
+#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
+#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
+#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
+#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
+#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
+#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
+#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
+#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
+#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
+#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
+#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
+#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
+
+#define QIB_7322_IntStatus_OFFS 0x70
+#define QIB_7322_IntStatus_DEF 0x0000000000000000
+#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
+#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
+#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
+#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
+#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
+#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
+#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
+#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
+#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
+#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
+#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
+#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
+#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
+#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
+#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
+#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
+#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
+#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
+#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
+#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
+#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
+#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
+#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
+#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
+#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
+#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
+#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_1_LSB 0x1F
+#define QIB_7322_IntStatus_Err_1_MSB 0x1F
+#define QIB_7322_IntStatus_Err_1_RMASK 0x1
+#define QIB_7322_IntStatus_Err_0_LSB 0x1E
+#define QIB_7322_IntStatus_Err_0_MSB 0x1E
+#define QIB_7322_IntStatus_Err_0_RMASK 0x1
+#define QIB_7322_IntStatus_Err_LSB 0x1D
+#define QIB_7322_IntStatus_Err_MSB 0x1D
+#define QIB_7322_IntStatus_Err_RMASK 0x1
+#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
+#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
+#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
+#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
+#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
+#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
+#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
+#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
+#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
+#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
+#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
+#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
+#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
+#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
+#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
+#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
+#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
+#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
+#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
+#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
+#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
+#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
+#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
+#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
+#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
+#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
+
+#define QIB_7322_IntClear_OFFS 0x78
+#define QIB_7322_IntClear_DEF 0x0000000000000000
+#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
+#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
+#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
+#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
+#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
+#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
+#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
+#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
+#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
+#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
+#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
+#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
+#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
+#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
+#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
+#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
+#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
+#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
+#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
+#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
+#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
+#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
+#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
+#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
+#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
+#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
+#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
+#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
+#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
+#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
+#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
+#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
+#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
+#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
+#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
+#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
+#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
+#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
+#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
+#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
+#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
+#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
+#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
+#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
+#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
+#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
+#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
+#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
+#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
+#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
+#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
+#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
+#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
+#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
+#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
+
+#define QIB_7322_ErrMask_OFFS 0x80
+#define QIB_7322_ErrMask_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
+#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
+#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
+#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
+#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
+#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
+#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
+#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
+#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
+#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
+#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
+#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
+#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
+#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
+#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_OFFS 0x88
+#define QIB_7322_ErrStatus_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
+#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
+#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
+#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
+#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
+#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
+#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
+#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
+#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
+#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
+#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
+#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
+#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
+#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
+#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
+#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
+#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_OFFS 0x90
+#define QIB_7322_ErrClear_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
+#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
+#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
+#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
+#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
+#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
+#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
+#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
+#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
+#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
+#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
+#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
+#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
+#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
+#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
+#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
+
+#define QIB_7322_HwErrMask_OFFS 0x98
+#define QIB_7322_HwErrMask_DEF 0x0000000000000000
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
+#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
+#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
+#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
+#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
+#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
+#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
+#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
+#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
+#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
+#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
+#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
+#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
+#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
+#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
+#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
+
+#define QIB_7322_HwErrStatus_OFFS 0xA0
+#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
+#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
+#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
+#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
+#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
+#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
+#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
+#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
+#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
+#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
+#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
+#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
+#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
+#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
+#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
+
+#define QIB_7322_HwErrClear_OFFS 0xA8
+#define QIB_7322_HwErrClear_DEF 0x0000000000000000
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
+#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
+#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
+#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
+#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
+#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
+#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
+#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
+#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
+#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
+#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
+#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
+#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
+#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
+#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
+#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
+#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
+#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
+
+#define QIB_7322_HwDiagCtrl_OFFS 0xB0
+#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
+#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
+#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
+#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
+#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
+#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
+#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
+#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
+
+#define QIB_7322_EXTStatus_OFFS 0xC0
+#define QIB_7322_EXTStatus_DEF 0x000000000000X000
+#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
+#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
+#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
+#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
+#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
+#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
+#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
+
+#define QIB_7322_EXTCtrl_OFFS 0xC8
+#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
+#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
+#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
+#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
+#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
+#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
+#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
+#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
+#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
+#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
+
+#define QIB_7322_GPIOOut_OFFS 0xE0
+#define QIB_7322_GPIOOut_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOMask_OFFS 0xE8
+#define QIB_7322_GPIOMask_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOStatus_OFFS 0xF0
+#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
+
+#define QIB_7322_GPIOClear_OFFS 0xF8
+#define QIB_7322_GPIOClear_DEF 0x0000000000000000
+
+#define QIB_7322_RcvCtrl_OFFS 0x100
+#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
+#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
+#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
+#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
+#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
+#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
+#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
+#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
+#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
+#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
+#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
+#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
+#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
+#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
+#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
+#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
+#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
+
+#define QIB_7322_RcvHdrSize_OFFS 0x110
+#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrCnt_OFFS 0x118
+#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrEntSize_OFFS 0x120
+#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDBase_OFFS 0x128
+#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
+
+#define QIB_7322_RcvTIDCnt_OFFS 0x130
+#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
+
+#define QIB_7322_RcvEgrBase_OFFS 0x138
+#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
+
+#define QIB_7322_RcvEgrCnt_OFFS 0x140
+#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
+
+#define QIB_7322_RcvBufBase_OFFS 0x148
+#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
+
+#define QIB_7322_RcvBufSize_OFFS 0x150
+#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
+
+#define QIB_7322_RxIntMemBase_OFFS 0x158
+#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
+
+#define QIB_7322_RxIntMemSize_OFFS 0x160
+#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
+
+#define QIB_7322_feature_mask_OFFS 0x190
+#define QIB_7322_feature_mask_DEF 0x00000000000000XX
+
+#define QIB_7322_active_feature_mask_OFFS 0x198
+#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
+#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
+#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
+#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
+#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
+#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
+#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
+
+#define QIB_7322_SendCtrl_OFFS 0x1C0
+#define QIB_7322_SendCtrl_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
+#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
+#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
+#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
+#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
+#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
+#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
+#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
+#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
+#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
+#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
+#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
+#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
+#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
+
+#define QIB_7322_SendBufBase_OFFS 0x1C8
+#define QIB_7322_SendBufBase_DEF 0x0018000000100000
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
+#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
+#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
+
+#define QIB_7322_SendBufSize_OFFS 0x1D0
+#define QIB_7322_SendBufSize_DEF 0x0000108000000880
+#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
+#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
+#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
+#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
+#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
+#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
+
+#define QIB_7322_SendBufCnt_OFFS 0x1D8
+#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
+#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
+#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
+
+#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
+#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
+#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
+
+#define QIB_7322_SendBufErr0_OFFS 0x240
+#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
+#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
+
+#define QIB_7322_AvailUpdCount_OFFS 0x268
+#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
+#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
+#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
+#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
+
+#define QIB_7322_RcvHdrAddr0_OFFS 0x280
+#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
+#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
+#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
+#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
+
+#define QIB_7322_ahb_access_ctrl_OFFS 0x460
+#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
+#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
+#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
+
+#define QIB_7322_ahb_transaction_reg_OFFS 0x468
+#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
+#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
+#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
+#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
+#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
+#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
+#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
+#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
+#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
+#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
+
+#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
+#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
+#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
+#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
+#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
+#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
+
+#define QIB_7322_SendCheckMask0_OFFS 0x4C0
+#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
+#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
+#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
+
+#define QIB_7322_SendIBPacketMask0_OFFS 0x500
+#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
+#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
+
+#define QIB_7322_IntRedirect0_OFFS 0x540
+#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
+#define QIB_7322_IntRedirect0_vec11_LSB 0x37
+#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
+#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec10_LSB 0x32
+#define QIB_7322_IntRedirect0_vec10_MSB 0x36
+#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
+#define QIB_7322_IntRedirect0_vec9_MSB 0x31
+#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec8_LSB 0x28
+#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
+#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec7_LSB 0x23
+#define QIB_7322_IntRedirect0_vec7_MSB 0x27
+#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
+#define QIB_7322_IntRedirect0_vec6_MSB 0x22
+#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec5_LSB 0x19
+#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
+#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec4_LSB 0x14
+#define QIB_7322_IntRedirect0_vec4_MSB 0x18
+#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec3_LSB 0xF
+#define QIB_7322_IntRedirect0_vec3_MSB 0x13
+#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec2_LSB 0xA
+#define QIB_7322_IntRedirect0_vec2_MSB 0xE
+#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec1_LSB 0x5
+#define QIB_7322_IntRedirect0_vec1_MSB 0x9
+#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
+#define QIB_7322_IntRedirect0_vec0_LSB 0x0
+#define QIB_7322_IntRedirect0_vec0_MSB 0x4
+#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
+
+#define QIB_7322_Int_Granted_OFFS 0x570
+#define QIB_7322_Int_Granted_DEF 0x0000000000000000
+
+#define QIB_7322_vec_clr_without_int_OFFS 0x578
+#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
+
+#define QIB_7322_DCACtrlA_OFFS 0x580
+#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
+#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
+#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
+#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
+#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
+#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
+
+#define QIB_7322_DCACtrlB_OFFS 0x588
+#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlC_OFFS 0x590
+#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlD_OFFS 0x598
+#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlE_OFFS 0x5A0
+#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
+#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
+
+#define QIB_7322_DCACtrlF_OFFS 0x5A8
+#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
+#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
+#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
+#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
+#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
+
+#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
+#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
+#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
+
+#define QIB_7322_CntrRegBase_0_OFFS 0x1028
+#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
+
+#define QIB_7322_ErrMask_0_OFFS 0x1080
+#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
+#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
+#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
+#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
+#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
+#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
+#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
+#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
+#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
+#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
+#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
+#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
+#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
+#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
+#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
+#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
+#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
+#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
+#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
+#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
+#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
+#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
+#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
+#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
+#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
+#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
+#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
+#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
+#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
+#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
+#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
+#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
+#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
+#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
+#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
+#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
+#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
+#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
+#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
+#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
+
+#define QIB_7322_ErrStatus_0_OFFS 0x1088
+#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
+#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
+#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
+#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
+#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
+#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
+#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
+#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
+#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
+#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
+#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
+#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
+#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
+#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
+#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
+#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
+#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
+#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
+#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
+#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
+#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
+#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
+#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
+#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
+#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
+#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
+#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
+#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
+#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
+#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
+#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
+#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
+#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
+#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
+#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
+#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
+#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
+#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
+#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
+#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
+#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
+#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
+
+#define QIB_7322_ErrClear_0_OFFS 0x1090
+#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
+#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
+#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
+#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
+#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
+#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
+#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
+#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
+#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
+#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
+#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
+#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
+#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
+#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
+#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
+#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
+#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
+#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
+#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
+#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
+#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
+#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
+#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
+#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
+#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
+#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
+#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
+#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
+#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
+#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
+#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
+#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
+#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
+#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
+#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
+#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
+#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
+#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
+#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
+#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
+
+#define QIB_7322_TXEStatus_0_OFFS 0x10B8
+#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
+#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
+#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
+#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
+
+#define QIB_7322_RcvCtrl_0_OFFS 0x1100
+#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
+#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
+#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
+#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
+#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
+#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
+#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
+
+#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
+#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
+#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
+
+#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
+#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
+#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
+#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
+#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
+#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
+#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
+#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
+#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
+#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
+#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
+
+#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
+#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
+#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
+
+#define QIB_7322_PSStat_0_OFFS 0x1140
+#define QIB_7322_PSStat_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSStart_0_OFFS 0x1148
+#define QIB_7322_PSStart_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSInterval_0_OFFS 0x1150
+#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvStatus_0_OFFS 0x1160
+#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
+#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
+#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
+#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
+
+#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
+#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
+#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
+#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
+
+#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
+#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
+#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
+#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
+
+#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
+#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
+#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
+#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
+#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
+#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
+#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
+
+#define QIB_7322_SendCtrl_0_OFFS 0x11C0
+#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
+#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
+#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
+#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
+#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
+#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
+#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
+#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
+#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
+#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
+#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
+#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
+#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
+#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
+
+#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
+#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
+#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
+#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
+#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
+#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
+#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
+#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
+#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
+#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaTail_0_OFFS 0x1208
+#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
+#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
+#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHead_0_OFFS 0x1210
+#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
+#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
+#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
+#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
+#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
+#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
+
+#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
+#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
+
+#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
+#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
+#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
+#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
+#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
+#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
+#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
+#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
+#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
+#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
+#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
+#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
+#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
+#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
+#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
+#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
+#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
+#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
+
+#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
+#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
+#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
+
+#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
+#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
+#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
+#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
+#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
+#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
+#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
+#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
+#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
+
+#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
+#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
+#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
+#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
+
+#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
+#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
+#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
+
+#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
+#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
+#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
+#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
+#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
+#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
+#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
+#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
+
+#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
+#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
+
+#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
+#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
+#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
+
+#define QIB_7322_IBCStatusA_0_OFFS 0x1540
+#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
+#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
+#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
+#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
+#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
+#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
+#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
+#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
+#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
+#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
+#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
+#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
+#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
+#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
+#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
+
+#define QIB_7322_IBCStatusB_0_OFFS 0x1548
+#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
+#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
+#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
+#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
+#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
+#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
+#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
+
+#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
+#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
+#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
+#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
+#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
+#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
+#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
+#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
+#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
+#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
+#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
+#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
+#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
+#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
+#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
+#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
+#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
+#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
+
+#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
+#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
+#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
+#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
+#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
+#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
+#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
+#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
+#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
+#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
+#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
+#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
+#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
+#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
+#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
+#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
+#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
+#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
+
+#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
+#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
+#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
+#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
+
+#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
+#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
+
+#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
+#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
+#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
+
+#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
+#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
+#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
+
+#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
+#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
+#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
+#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
+#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
+
+#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
+#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
+#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
+#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
+#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
+#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
+#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
+
+#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
+#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
+#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
+#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
+#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
+#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
+#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
+#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
+#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
+#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
+#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
+#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
+#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
+#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
+#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
+
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
+#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
+#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
+
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
+#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
+
+#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
+#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
+#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
+#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
+
+#define QIB_7322_LowPriority0_0_OFFS 0x1C00
+#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
+#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
+#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_HighPriority0_0_OFFS 0x1E00
+#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
+#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
+#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
+#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
+#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
+#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
+#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
+
+#define QIB_7322_CntrRegBase_1_OFFS 0x2028
+#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
+
+#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
+
+#define QIB_7322_SendCtrl_1_OFFS 0x21C0
+
+#define QIB_7322_SendBufAvail0_OFFS 0x3000
+#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
+#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
+
+#define QIB_7322_MsixTable_OFFS 0x8000
+#define QIB_7322_MsixTable_DEF 0x0000000000000000
+
+#define QIB_7322_MsixPba_OFFS 0x9000
+#define QIB_7322_MsixPba_DEF 0x0000000000000000
+
+#define QIB_7322_LAMemory_OFFS 0xA000
+#define QIB_7322_LAMemory_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_OFFS 0x11000
+#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
+#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
+#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
+#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
+#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
+
+#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
+#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
+
+#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
+#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_0_OFFS 0x12000
+#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
+#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
+#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
+#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
+#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
+#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
+#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
+#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
+#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
+#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
+#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
+#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
+#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
+#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
+#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
+#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
+#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
+#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
+#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
+#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
+#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
+#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
+#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
+#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
+#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
+#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
+#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
+#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
+#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
+#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
+#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
+#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
+#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
+#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
+#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
+#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
+#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
+#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
+#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
+#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
+#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
+#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
+#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
+
+#define QIB_7322_LBIntCnt_1_OFFS 0x13000
+#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
+#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
+#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
+#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
+#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
+#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
+#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
+#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
+#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
+#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
+#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
+#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
+#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
+#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
+#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
+#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
+#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
+#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
+#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
+#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
+#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
+#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
+#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
+#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
+#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
+#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
+#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
+#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
+#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
+#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
+#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
+#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
+#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
+#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
+#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
+#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
+#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
+#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
+#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
+#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
+#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
+#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
+#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
+#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
+#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrArray_OFFS 0x14000
+#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
+#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
+#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
+#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_RcvTIDArray0_OFFS 0x50000
+#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
+#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
+#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
+#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
+#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
+#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
+
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
+#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrTail0_OFFS 0x200000
+#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvHdrHead0_OFFS 0x200008
+#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
+#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
+#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
+#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
+#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
+
+#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
+#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
+#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
+
+#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
+#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
+#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
+#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
+#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
+#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
+#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
+#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
+#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
+#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
+#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
+#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
new file mode 100644
index 000000000000..b3955ed8f794
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -0,0 +1,758 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _QIB_COMMON_H
+#define _QIB_COMMON_H
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
+#define QIB_SRC_OUI_1 0x00
+#define QIB_SRC_OUI_2 0x11
+#define QIB_SRC_OUI_3 0x75
+
+/* version of protocol header (known to chip also). In the long run,
+ * we should be able to generate and accept a range of version numbers;
+ * for now we only accept one, and it's compiled in.
+ */
+#define IPS_PROTO_VERSION 2
+
+/*
+ * These are compile time constants that you may want to enable or disable
+ * if you are trying to debug problems with code or performance.
+ * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
+ * fastpath code
+ * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
+ * traced in faspath code
+ * _QIB_TRACING define as 0 if you want to remove all tracing in a
+ * compilation unit
+ */
+
+/*
+ * The value in the BTH QP field that QLogic_IB uses to differentiate
+ * an qlogic_ib protocol IB packet vs standard IB transport
+ * This it needs to be even (0x656b78), because the LSB is sometimes
+ * used for the MSB of context. The change may cause a problem
+ * interoperating with older software.
+ */
+#define QIB_KD_QP 0x656b78
+
+/*
+ * These are the status bits readable (in ascii form, 64bit value)
+ * from the "status" sysfs file. For binary compatibility, values
+ * must remain as is; removed states can be reused for different
+ * purposes.
+ */
+#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
+/* Chip has been found and initted */
+#define QIB_STATUS_CHIP_PRESENT 0x20
+/* IB link is at ACTIVE, usable for data traffic */
+#define QIB_STATUS_IB_READY 0x40
+/* link is configured, LID, MTU, etc. have been set */
+#define QIB_STATUS_IB_CONF 0x80
+/* A Fatal hardware error has occurred. */
+#define QIB_STATUS_HWERROR 0x200
+
+/*
+ * The list of usermode accessible registers. Also see Reg_* later in file.
+ */
+enum qib_ureg {
+ /* (RO) DMA RcvHdr to be used next. */
+ ur_rcvhdrtail = 0,
+ /* (RW) RcvHdr entry to be processed next by host. */
+ ur_rcvhdrhead = 1,
+ /* (RO) Index of next Eager index to use. */
+ ur_rcvegrindextail = 2,
+ /* (RW) Eager TID to be processed next */
+ ur_rcvegrindexhead = 3,
+ /* For internal use only; max register number. */
+ _QIB_UregMax
+};
+
+/* bit values for spi_runtime_flags */
+#define QIB_RUNTIME_PCIE 0x0002
+#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
+#define QIB_RUNTIME_RCVHDR_COPY 0x0008
+#define QIB_RUNTIME_MASTER 0x0010
+#define QIB_RUNTIME_RCHK 0x0020
+#define QIB_RUNTIME_NODMA_RTAIL 0x0080
+#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
+#define QIB_RUNTIME_SDMA 0x0200
+#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
+#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
+#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
+#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
+#define QIB_RUNTIME_HDRSUPP 0x4000
+
+/*
+ * This structure is returned by qib_userinit() immediately after
+ * open to get implementation-specific info, and info specific to this
+ * instance.
+ *
+ * This struct must have explict pad fields where type sizes
+ * may result in different alignments between 32 and 64 bit
+ * programs, since the 64 bit * bit kernel requires the user code
+ * to have matching offsets
+ */
+struct qib_base_info {
+ /* version of hardware, for feature checking. */
+ __u32 spi_hw_version;
+ /* version of software, for feature checking. */
+ __u32 spi_sw_version;
+ /* QLogic_IB context assigned, goes into sent packets */
+ __u16 spi_ctxt;
+ __u16 spi_subctxt;
+ /*
+ * IB MTU, packets IB data must be less than this.
+ * The MTU is in bytes, and will be a multiple of 4 bytes.
+ */
+ __u32 spi_mtu;
+ /*
+ * Size of a PIO buffer. Any given packet's total size must be less
+ * than this (in words). Included is the starting control word, so
+ * if 513 is returned, then total pkt size is 512 words or less.
+ */
+ __u32 spi_piosize;
+ /* size of the TID cache in qlogic_ib, in entries */
+ __u32 spi_tidcnt;
+ /* size of the TID Eager list in qlogic_ib, in entries */
+ __u32 spi_tidegrcnt;
+ /* size of a single receive header queue entry in words. */
+ __u32 spi_rcvhdrent_size;
+ /*
+ * Count of receive header queue entries allocated.
+ * This may be less than the spu_rcvhdrcnt passed in!.
+ */
+ __u32 spi_rcvhdr_cnt;
+
+ /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
+ __u32 spi_runtime_flags;
+
+ /* address where hardware receive header queue is mapped */
+ __u64 spi_rcvhdr_base;
+
+ /* user program. */
+
+ /* base address of eager TID receive buffers used by hardware. */
+ __u64 spi_rcv_egrbufs;
+
+ /* Allocated by initialization code, not by protocol. */
+
+ /*
+ * Size of each TID buffer in host memory, starting at
+ * spi_rcv_egrbufs. The buffers are virtually contiguous.
+ */
+ __u32 spi_rcv_egrbufsize;
+ /*
+ * The special QP (queue pair) value that identifies an qlogic_ib
+ * protocol packet from standard IB packets. More, probably much
+ * more, to be added.
+ */
+ __u32 spi_qpair;
+
+ /*
+ * User register base for init code, not to be used directly by
+ * protocol or applications. Always points to chip registers,
+ * for normal or shared context.
+ */
+ __u64 spi_uregbase;
+ /*
+ * Maximum buffer size in bytes that can be used in a single TID
+ * entry (assuming the buffer is aligned to this boundary). This is
+ * the minimum of what the hardware and software support Guaranteed
+ * to be a power of 2.
+ */
+ __u32 spi_tid_maxsize;
+ /*
+ * alignment of each pio send buffer (byte count
+ * to add to spi_piobufbase to get to second buffer)
+ */
+ __u32 spi_pioalign;
+ /*
+ * The index of the first pio buffer available to this process;
+ * needed to do lookup in spi_pioavailaddr; not added to
+ * spi_piobufbase.
+ */
+ __u32 spi_pioindex;
+ /* number of buffers mapped for this process */
+ __u32 spi_piocnt;
+
+ /*
+ * Base address of writeonly pio buffers for this process.
+ * Each buffer has spi_piosize words, and is aligned on spi_pioalign
+ * boundaries. spi_piocnt buffers are mapped from this address
+ */
+ __u64 spi_piobufbase;
+
+ /*
+ * Base address of readonly memory copy of the pioavail registers.
+ * There are 2 bits for each buffer.
+ */
+ __u64 spi_pioavailaddr;
+
+ /*
+ * Address where driver updates a copy of the interface and driver
+ * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
+ * link status qword (formerly combined with driver status), then a
+ * string indicating hardware error, if there was one.
+ */
+ __u64 spi_status;
+
+ /* number of chip ctxts available to user processes */
+ __u32 spi_nctxts;
+ __u16 spi_unit; /* unit number of chip we are using */
+ __u16 spi_port; /* IB port number we are using */
+ /* num bufs in each contiguous set */
+ __u32 spi_rcv_egrperchunk;
+ /* size in bytes of each contiguous set */
+ __u32 spi_rcv_egrchunksize;
+ /* total size of mmap to cover full rcvegrbuffers */
+ __u32 spi_rcv_egrbuftotlen;
+ __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
+ /* address of readonly memory copy of the rcvhdrq tail register. */
+ __u64 spi_rcvhdr_tailaddr;
+
+ /*
+ * shared memory pages for subctxts if ctxt is shared; these cover
+ * all the processes in the group sharing a single context.
+ * all have enough space for the num_subcontexts value on this job.
+ */
+ __u64 spi_subctxt_uregbase;
+ __u64 spi_subctxt_rcvegrbuf;
+ __u64 spi_subctxt_rcvhdr_base;
+
+ /* shared memory page for send buffer disarm status */
+ __u64 spi_sendbuf_status;
+} __attribute__ ((aligned(8)));
+
+/*
+ * This version number is given to the driver by the user code during
+ * initialization in the spu_userversion field of qib_user_info, so
+ * the driver can check for compatibility with user code.
+ *
+ * The major version changes when data structures
+ * change in an incompatible way. The driver must be the same or higher
+ * for initialization to succeed. In some cases, a higher version
+ * driver will not interoperate with older software, and initialization
+ * will return an error.
+ */
+#define QIB_USER_SWMAJOR 1
+
+/*
+ * Minor version differences are always compatible
+ * a within a major version, however if user software is larger
+ * than driver software, some new features and/or structure fields
+ * may not be implemented; the user code must deal with this if it
+ * cares, or it must abort after initialization reports the difference.
+ */
+#define QIB_USER_SWMINOR 10
+
+#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
+
+#ifndef QIB_KERN_TYPE
+#define QIB_KERN_TYPE 0
+#define QIB_IDSTR "QLogic kernel.org driver"
+#endif
+
+/*
+ * Similarly, this is the kernel version going back to the user. It's
+ * slightly different, in that we want to tell if the driver was built as
+ * part of a QLogic release, or from the driver from openfabrics.org,
+ * kernel.org, or a standard distribution, for support reasons.
+ * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
+ *
+ * It's returned by the driver to the user code during initialization in the
+ * spi_sw_version field of qib_base_info, so the user code can in turn
+ * check for compatibility with the kernel.
+*/
+#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
+
+/*
+ * This structure is passed to qib_userinit() to tell the driver where
+ * user code buffers are, sizes, etc. The offsets and sizes of the
+ * fields must remain unchanged, for binary compatibility. It can
+ * be extended, if userversion is changed so user code can tell, if needed
+ */
+struct qib_user_info {
+ /*
+ * version of user software, to detect compatibility issues.
+ * Should be set to QIB_USER_SWVERSION.
+ */
+ __u32 spu_userversion;
+
+ __u32 _spu_unused2;
+
+ /* size of struct base_info to write to */
+ __u32 spu_base_info_size;
+
+ __u32 _spu_unused3;
+
+ /*
+ * If two or more processes wish to share a context, each process
+ * must set the spu_subctxt_cnt and spu_subctxt_id to the same
+ * values. The only restriction on the spu_subctxt_id is that
+ * it be unique for a given node.
+ */
+ __u16 spu_subctxt_cnt;
+ __u16 spu_subctxt_id;
+
+ __u32 spu_port; /* IB port requested by user if > 0 */
+
+ /*
+ * address of struct base_info to write to
+ */
+ __u64 spu_base_info;
+
+} __attribute__ ((aligned(8)));
+
+/* User commands. */
+
+/* 16 available, was: old set up userspace (for old user code) */
+#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
+#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
+#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
+#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
+#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
+/* 22 available, was: return info on slave processes (for old user code) */
+#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
+#define QIB_CMD_USER_INIT 24 /* set up userspace */
+#define QIB_CMD_UNUSED_1 25
+#define QIB_CMD_UNUSED_2 26
+#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
+#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
+#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
+/* 30 is unused */
+#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
+#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
+/* 33 available, was a testing feature */
+#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
+#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
+#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
+ * processes: qib_cpus_list */
+
+/*
+ * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
+ * compatibility with libraries from previous release. The ACK_EVENT
+ * will take appropriate driver action (if any, just DISARM for now),
+ * then clear the bits passed in as part of the mask. These bits are
+ * in the first 64bit word at spi_sendbuf_status, and are passed to
+ * the driver in the event_mask union as well.
+ */
+#define _QIB_EVENT_DISARM_BUFS_BIT 0
+#define _QIB_EVENT_LINKDOWN_BIT 1
+#define _QIB_EVENT_LID_CHANGE_BIT 2
+#define _QIB_EVENT_LMC_CHANGE_BIT 3
+#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
+#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
+
+#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
+#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
+#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
+#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
+#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
+
+
+/*
+ * Poll types
+ */
+#define QIB_POLL_TYPE_ANYRCV 0x0
+#define QIB_POLL_TYPE_URGENT 0x1
+
+struct qib_ctxt_info {
+ __u16 num_active; /* number of active units */
+ __u16 unit; /* unit (chip) assigned to caller */
+ __u16 port; /* IB port assigned to caller (1-based) */
+ __u16 ctxt; /* ctxt on unit assigned to caller */
+ __u16 subctxt; /* subctxt on unit assigned to caller */
+ __u16 num_ctxts; /* number of ctxts available on unit */
+ __u16 num_subctxts; /* number of subctxts opened on ctxt */
+ __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
+};
+
+struct qib_tid_info {
+ __u32 tidcnt;
+ /* make structure same size in 32 and 64 bit */
+ __u32 tid__unused;
+ /* virtual address of first page in transfer */
+ __u64 tidvaddr;
+ /* pointer (same size 32/64 bit) to __u16 tid array */
+ __u64 tidlist;
+
+ /*
+ * pointer (same size 32/64 bit) to bitmap of TIDs used
+ * for this call; checked for being large enough at open
+ */
+ __u64 tidmap;
+};
+
+struct qib_cmd {
+ __u32 type; /* command type */
+ union {
+ struct qib_tid_info tid_info;
+ struct qib_user_info user_info;
+
+ /*
+ * address in userspace where we should put the sdma
+ * inflight counter
+ */
+ __u64 sdma_inflight;
+ /*
+ * address in userspace where we should put the sdma
+ * completion counter
+ */
+ __u64 sdma_complete;
+ /* address in userspace of struct qib_ctxt_info to
+ write result to */
+ __u64 ctxt_info;
+ /* enable/disable receipt of packets */
+ __u32 recv_ctrl;
+ /* enable/disable armlaunch errors (non-zero to enable) */
+ __u32 armlaunch_ctrl;
+ /* partition key to set */
+ __u16 part_key;
+ /* user address of __u32 bitmask of active slaves */
+ __u64 slave_mask_addr;
+ /* type of polling we want */
+ __u16 poll_type;
+ /* back pressure enable bit for one particular context */
+ __u8 ctxt_bp;
+ /* qib_user_event_ack(), IPATH_EVENT_* bits */
+ __u64 event_mask;
+ } cmd;
+};
+
+struct qib_iovec {
+ /* Pointer to data, but same size 32 and 64 bit */
+ __u64 iov_base;
+
+ /*
+ * Length of data; don't need 64 bits, but want
+ * qib_sendpkt to remain same size as before 32 bit changes, so...
+ */
+ __u64 iov_len;
+};
+
+/*
+ * Describes a single packet for send. Each packet can have one or more
+ * buffers, but the total length (exclusive of IB headers) must be less
+ * than the MTU, and if using the PIO method, entire packet length,
+ * including IB headers, must be less than the qib_piosize value (words).
+ * Use of this necessitates including sys/uio.h
+ */
+struct __qib_sendpkt {
+ __u32 sps_flags; /* flags for packet (TBD) */
+ __u32 sps_cnt; /* number of entries to use in sps_iov */
+ /* array of iov's describing packet. TEMPORARY */
+ struct qib_iovec sps_iov[4];
+};
+
+/*
+ * Diagnostics can send a packet by "writing" the following
+ * structs to the diag data special file.
+ * This allows a custom
+ * pbc (+ static rate) qword, so that special modes and deliberate
+ * changes to CRCs can be used. The elements were also re-ordered
+ * for better alignment and to avoid padding issues.
+ */
+#define _DIAG_XPKT_VERS 3
+struct qib_diag_xpkt {
+ __u16 version;
+ __u16 unit;
+ __u16 port;
+ __u16 len;
+ __u64 data;
+ __u64 pbc_wd;
+};
+
+/*
+ * Data layout in I2C flash (for GUID, etc.)
+ * All fields are little-endian binary unless otherwise stated
+ */
+#define QIB_FLASH_VERSION 2
+struct qib_flash {
+ /* flash layout version (QIB_FLASH_VERSION) */
+ __u8 if_fversion;
+ /* checksum protecting if_length bytes */
+ __u8 if_csum;
+ /*
+ * valid length (in use, protected by if_csum), including
+ * if_fversion and if_csum themselves)
+ */
+ __u8 if_length;
+ /* the GUID, in network order */
+ __u8 if_guid[8];
+ /* number of GUIDs to use, starting from if_guid */
+ __u8 if_numguid;
+ /* the (last 10 characters of) board serial number, in ASCII */
+ char if_serial[12];
+ /* board mfg date (YYYYMMDD ASCII) */
+ char if_mfgdate[8];
+ /* last board rework/test date (YYYYMMDD ASCII) */
+ char if_testdate[8];
+ /* logging of error counts, TBD */
+ __u8 if_errcntp[4];
+ /* powered on hours, updated at driver unload */
+ __u8 if_powerhour[2];
+ /* ASCII free-form comment field */
+ char if_comment[32];
+ /* Backwards compatible prefix for longer QLogic Serial Numbers */
+ char if_sprefix[4];
+ /* 82 bytes used, min flash size is 128 bytes */
+ __u8 if_future[46];
+};
+
+/*
+ * These are the counters implemented in the chip, and are listed in order.
+ * The InterCaps naming is taken straight from the chip spec.
+ */
+struct qlogic_ib_counters {
+ __u64 LBIntCnt;
+ __u64 LBFlowStallCnt;
+ __u64 TxSDmaDescCnt; /* was Reserved1 */
+ __u64 TxUnsupVLErrCnt;
+ __u64 TxDataPktCnt;
+ __u64 TxFlowPktCnt;
+ __u64 TxDwordCnt;
+ __u64 TxLenErrCnt;
+ __u64 TxMaxMinLenErrCnt;
+ __u64 TxUnderrunCnt;
+ __u64 TxFlowStallCnt;
+ __u64 TxDroppedPktCnt;
+ __u64 RxDroppedPktCnt;
+ __u64 RxDataPktCnt;
+ __u64 RxFlowPktCnt;
+ __u64 RxDwordCnt;
+ __u64 RxLenErrCnt;
+ __u64 RxMaxMinLenErrCnt;
+ __u64 RxICRCErrCnt;
+ __u64 RxVCRCErrCnt;
+ __u64 RxFlowCtrlErrCnt;
+ __u64 RxBadFormatCnt;
+ __u64 RxLinkProblemCnt;
+ __u64 RxEBPCnt;
+ __u64 RxLPCRCErrCnt;
+ __u64 RxBufOvflCnt;
+ __u64 RxTIDFullErrCnt;
+ __u64 RxTIDValidErrCnt;
+ __u64 RxPKeyMismatchCnt;
+ __u64 RxP0HdrEgrOvflCnt;
+ __u64 RxP1HdrEgrOvflCnt;
+ __u64 RxP2HdrEgrOvflCnt;
+ __u64 RxP3HdrEgrOvflCnt;
+ __u64 RxP4HdrEgrOvflCnt;
+ __u64 RxP5HdrEgrOvflCnt;
+ __u64 RxP6HdrEgrOvflCnt;
+ __u64 RxP7HdrEgrOvflCnt;
+ __u64 RxP8HdrEgrOvflCnt;
+ __u64 RxP9HdrEgrOvflCnt;
+ __u64 RxP10HdrEgrOvflCnt;
+ __u64 RxP11HdrEgrOvflCnt;
+ __u64 RxP12HdrEgrOvflCnt;
+ __u64 RxP13HdrEgrOvflCnt;
+ __u64 RxP14HdrEgrOvflCnt;
+ __u64 RxP15HdrEgrOvflCnt;
+ __u64 RxP16HdrEgrOvflCnt;
+ __u64 IBStatusChangeCnt;
+ __u64 IBLinkErrRecoveryCnt;
+ __u64 IBLinkDownedCnt;
+ __u64 IBSymbolErrCnt;
+ __u64 RxVL15DroppedPktCnt;
+ __u64 RxOtherLocalPhyErrCnt;
+ __u64 PcieRetryBufDiagQwordCnt;
+ __u64 ExcessBufferOvflCnt;
+ __u64 LocalLinkIntegrityErrCnt;
+ __u64 RxVlErrCnt;
+ __u64 RxDlidFltrCnt;
+};
+
+/*
+ * The next set of defines are for packet headers, and chip register
+ * and memory bits that are visible to and/or used by user-mode software.
+ */
+
+/* RcvHdrFlags bits */
+#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
+#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
+#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
+#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
+#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
+#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
+#define QLOGIC_IB_RHF_SEQ_MASK 0xF
+#define QLOGIC_IB_RHF_SEQ_SHIFT 0
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
+#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
+#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
+#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
+#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
+#define QLOGIC_IB_RHF_H_LENERR 0x10000000
+#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
+#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
+#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
+#define QLOGIC_IB_RHF_H_MKERR 0x01000000
+#define QLOGIC_IB_RHF_H_IBERR 0x00800000
+#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
+#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
+#define QLOGIC_IB_RHF_L_SWA 0x00008000
+#define QLOGIC_IB_RHF_L_SWB 0x00004000
+
+/* qlogic_ib header fields */
+#define QLOGIC_IB_I_VERS_MASK 0xF
+#define QLOGIC_IB_I_VERS_SHIFT 28
+#define QLOGIC_IB_I_CTXT_MASK 0xF
+#define QLOGIC_IB_I_CTXT_SHIFT 24
+#define QLOGIC_IB_I_TID_MASK 0x7FF
+#define QLOGIC_IB_I_TID_SHIFT 13
+#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
+#define QLOGIC_IB_I_OFFSET_SHIFT 0
+
+/* K_PktFlags bits */
+#define QLOGIC_IB_KPF_INTR 0x1
+#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
+#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
+
+#define QLOGIC_IB_MAX_SUBCTXT 4
+
+/* SendPIO per-buffer control */
+#define QLOGIC_IB_SP_TEST 0x40
+#define QLOGIC_IB_SP_TESTEBP 0x20
+#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
+
+/* SendPIOAvail bits */
+#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
+#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
+
+/* qlogic_ib header format */
+struct qib_header {
+ /*
+ * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
+ * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
+ * Context 4, TID 11, offset 13.
+ */
+ __le32 ver_ctxt_tid_offset;
+ __le16 chksum;
+ __le16 pkt_flags;
+};
+
+/*
+ * qlogic_ib user message header format.
+ * This structure contains the first 4 fields common to all protocols
+ * that employ qlogic_ib.
+ */
+struct qib_message_header {
+ __be16 lrh[4];
+ __be32 bth[3];
+ /* fields below this point are in host byte order */
+ struct qib_header iph;
+ __u8 sub_opcode;
+};
+
+/* IB - LRH header consts */
+#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
+#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
+
+/* misc. */
+#define SIZE_OF_CRC 1
+
+#define QIB_DEFAULT_P_KEY 0xFFFF
+#define QIB_PERMISSIVE_LID 0xFFFF
+#define QIB_AETH_CREDIT_SHIFT 24
+#define QIB_AETH_CREDIT_MASK 0x1F
+#define QIB_AETH_CREDIT_INVAL 0x1F
+#define QIB_PSN_MASK 0xFFFFFF
+#define QIB_MSN_MASK 0xFFFFFF
+#define QIB_QPN_MASK 0xFFFFFF
+#define QIB_MULTICAST_LID_BASE 0xC000
+#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
+#define QIB_MULTICAST_QPN 0xFFFFFF
+
+/* Receive Header Queue: receive type (from qlogic_ib) */
+#define RCVHQ_RCV_TYPE_EXPECTED 0
+#define RCVHQ_RCV_TYPE_EAGER 1
+#define RCVHQ_RCV_TYPE_NON_KD 2
+#define RCVHQ_RCV_TYPE_ERROR 3
+
+#define QIB_HEADER_QUEUE_WORDS 9
+
+/* functions for extracting fields from rcvhdrq entries for the driver.
+ */
+static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
+{
+ return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
+}
+
+static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
+ QLOGIC_IB_RHF_RCVTYPE_MASK;
+}
+
+static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
+{
+ return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
+ QLOGIC_IB_RHF_LENGTH_MASK) << 2;
+}
+
+static inline __u32 qib_hdrget_index(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
+ QLOGIC_IB_RHF_EGRINDEX_MASK;
+}
+
+static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
+ QLOGIC_IB_RHF_SEQ_MASK;
+}
+
+static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
+{
+ return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
+ QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
+}
+
+static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
+{
+ return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
+}
+
+static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
+{
+ return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
+ QLOGIC_IB_I_VERS_MASK;
+}
+
+#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
new file mode 100644
index 000000000000..a86cbf880f98
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_cq_enter - add a new entry to the completion queue
+ * @cq: completion queue
+ * @entry: work completion entry to add
+ * @sig: true if @entry is a solicitated entry
+ *
+ * This may be called with qp->s_lock held.
+ */
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
+{
+ struct qib_cq_wc *wc;
+ unsigned long flags;
+ u32 head;
+ u32 next;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ /*
+ * Note that the head pointer might be writable by user processes.
+ * Take care to verify it is a sane value.
+ */
+ wc = cq->queue;
+ head = wc->head;
+ if (head >= (unsigned) cq->ibcq.cqe) {
+ head = cq->ibcq.cqe;
+ next = 0;
+ } else
+ next = head + 1;
+ if (unlikely(next == wc->tail)) {
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (cq->ibcq.event_handler) {
+ struct ib_event ev;
+
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+ return;
+ }
+ if (cq->ip) {
+ wc->uqueue[head].wr_id = entry->wr_id;
+ wc->uqueue[head].status = entry->status;
+ wc->uqueue[head].opcode = entry->opcode;
+ wc->uqueue[head].vendor_err = entry->vendor_err;
+ wc->uqueue[head].byte_len = entry->byte_len;
+ wc->uqueue[head].ex.imm_data =
+ (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].qp_num = entry->qp->qp_num;
+ wc->uqueue[head].src_qp = entry->src_qp;
+ wc->uqueue[head].wc_flags = entry->wc_flags;
+ wc->uqueue[head].pkey_index = entry->pkey_index;
+ wc->uqueue[head].slid = entry->slid;
+ wc->uqueue[head].sl = entry->sl;
+ wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+ wc->uqueue[head].port_num = entry->port_num;
+ /* Make sure entry is written before the head index. */
+ smp_wmb();
+ } else
+ wc->kqueue[head] = *entry;
+ wc->head = next;
+
+ if (cq->notify == IB_CQ_NEXT_COMP ||
+ (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ cq->notify = IB_CQ_NONE;
+ cq->triggered++;
+ /*
+ * This will cause send_complete() to be called in
+ * another thread.
+ */
+ queue_work(qib_cq_wq, &cq->comptask);
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+}
+
+/**
+ * qib_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * Returns the number of completion entries polled.
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ */
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ struct qib_cq_wc *wc;
+ unsigned long flags;
+ int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip) {
+ npolled = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ wc = cq->queue;
+ tail = wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (tail == wc->head)
+ break;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ wc->tail = tail;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+bail:
+ return npolled;
+}
+
+static void send_complete(struct work_struct *work)
+{
+ struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
+
+ /*
+ * The completion handler will most likely rearm the notification
+ * and poll for all pending entries. If a new completion entry
+ * is added while we are in this routine, queue_work()
+ * won't call us again until we return so we check triggered to
+ * see if we need to call the handler again.
+ */
+ for (;;) {
+ u8 triggered = cq->triggered;
+
+ /*
+ * IPoIB connected mode assumes the callback is from a
+ * soft IRQ. We simulate this by blocking "bottom halves".
+ * See the implementation for ipoib_cm_handle_tx_wc(),
+ * netif_tx_lock_bh() and netif_tx_lock().
+ */
+ local_bh_disable();
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ local_bh_enable();
+
+ if (cq->triggered == triggered)
+ return;
+ }
+}
+
+/**
+ * qib_create_cq - create a completion queue
+ * @ibdev: the device this completion queue is attached to
+ * @entries: the minimum size of the completion queue
+ * @context: unused by the QLogic_IB driver
+ * @udata: user data for libibverbs.so
+ *
+ * Returns a pointer to the completion queue or negative errno values
+ * for failure.
+ *
+ * Called by ib_create_cq() in the generic verbs code.
+ */
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_cq *cq;
+ struct qib_cq_wc *wc;
+ struct ib_cq *ret;
+ u32 sz;
+
+ if (entries < 1 || entries > ib_qib_max_cqes) {
+ ret = ERR_PTR(-EINVAL);
+ goto done;
+ }
+
+ /* Allocate the completion queue structure. */
+ cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ /*
+ * Allocate the completion queue entries and head/tail pointers.
+ * This is allocated separately so that it can be resized and
+ * also mapped into user space.
+ * We need to use vmalloc() in order to support mmap and large
+ * numbers of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
+ else
+ sz += sizeof(struct ib_wc) * (entries + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_cq;
+ }
+
+ /*
+ * Return the address of the WC as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+
+ cq->ip = qib_create_mmap_info(dev, sz, context, wc);
+ if (!cq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wc;
+ }
+
+ err = ib_copy_to_udata(udata, &cq->ip->offset,
+ sizeof(cq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ cq->ip = NULL;
+
+ spin_lock(&dev->n_cqs_lock);
+ if (dev->n_cqs_allocated == ib_qib_max_cqs) {
+ spin_unlock(&dev->n_cqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_cqs_allocated++;
+ spin_unlock(&dev->n_cqs_lock);
+
+ if (cq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ /*
+ * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
+ * The number of entries should be >= the number requested or return
+ * an error.
+ */
+ cq->ibcq.cqe = entries;
+ cq->notify = IB_CQ_NONE;
+ cq->triggered = 0;
+ spin_lock_init(&cq->lock);
+ INIT_WORK(&cq->comptask, send_complete);
+ wc->head = 0;
+ wc->tail = 0;
+ cq->queue = wc;
+
+ ret = &cq->ibcq;
+
+ goto done;
+
+bail_ip:
+ kfree(cq->ip);
+bail_wc:
+ vfree(wc);
+bail_cq:
+ kfree(cq);
+done:
+ return ret;
+}
+
+/**
+ * qib_destroy_cq - destroy a completion queue
+ * @ibcq: the completion queue to destroy.
+ *
+ * Returns 0 for success.
+ *
+ * Called by ib_destroy_cq() in the generic verbs code.
+ */
+int qib_destroy_cq(struct ib_cq *ibcq)
+{
+ struct qib_ibdev *dev = to_idev(ibcq->device);
+ struct qib_cq *cq = to_icq(ibcq);
+
+ flush_work(&cq->comptask);
+ spin_lock(&dev->n_cqs_lock);
+ dev->n_cqs_allocated--;
+ spin_unlock(&dev->n_cqs_lock);
+ if (cq->ip)
+ kref_put(&cq->ip->ref, qib_release_mmap_info);
+ else
+ vfree(cq->queue);
+ kfree(cq);
+
+ return 0;
+}
+
+/**
+ * qib_req_notify_cq - change the notification type for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: the type of notification to request
+ *
+ * Returns 0 for success.
+ *
+ * This may be called from interrupt context. Also called by
+ * ib_req_notify_cq() in the generic verbs code.
+ */
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ /*
+ * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
+ * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
+ */
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->queue->head != cq->queue->tail)
+ ret = 1;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return ret;
+}
+
+/**
+ * qib_resize_cq - change the size of the CQ
+ * @ibcq: the completion queue
+ *
+ * Returns 0 for success.
+ */
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ struct qib_cq *cq = to_icq(ibcq);
+ struct qib_cq_wc *old_wc;
+ struct qib_cq_wc *wc;
+ u32 head, tail, n;
+ int ret;
+ u32 sz;
+
+ if (cqe < 1 || cqe > ib_qib_max_cqes) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ sz = sizeof(*wc);
+ if (udata && udata->outlen >= sizeof(__u64))
+ sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ else
+ sz += sizeof(struct ib_wc) * (cqe + 1);
+ wc = vmalloc_user(sz);
+ if (!wc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ __u64 offset = 0;
+
+ ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&cq->lock);
+ /*
+ * Make sure head and tail are sane since they
+ * might be user writable.
+ */
+ old_wc = cq->queue;
+ head = old_wc->head;
+ if (head > (u32) cq->ibcq.cqe)
+ head = (u32) cq->ibcq.cqe;
+ tail = old_wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ if (head < tail)
+ n = cq->ibcq.cqe + 1 + head - tail;
+ else
+ n = head - tail;
+ if (unlikely((u32)cqe < n)) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ for (n = 0; tail != head; n++) {
+ if (cq->ip)
+ wc->uqueue[n] = old_wc->uqueue[tail];
+ else
+ wc->kqueue[n] = old_wc->kqueue[tail];
+ if (tail == (u32) cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ cq->ibcq.cqe = cqe;
+ wc->head = n;
+ wc->tail = 0;
+ cq->queue = wc;
+ spin_unlock_irq(&cq->lock);
+
+ vfree(old_wc);
+
+ if (cq->ip) {
+ struct qib_ibdev *dev = to_idev(ibcq->device);
+ struct qib_mmap_info *ip = cq->ip;
+
+ qib_update_mmap_info(dev, ip, sz, wc);
+
+ /*
+ * Return the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = 0;
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&cq->lock);
+bail_free:
+ vfree(wc);
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
new file mode 100644
index 000000000000..ca98dd523752
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (c) 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains support for diagnostic functions. It is accessed by
+ * opening the qib_diag device, normally minor number 129. Diagnostic use
+ * of the QLogic_IB chip may render the chip or board unusable until the
+ * driver is unloaded, or in some cases, until the system is rebooted.
+ *
+ * Accesses to the chip through this interface are not similar to going
+ * through the /sys/bus/pci resource mmap interface.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * Each client that opens the diag device must read then write
+ * offset 0, to prevent lossage from random cat or od. diag_state
+ * sequences this "handshake".
+ */
+enum diag_state { UNUSED = 0, OPENED, INIT, READY };
+
+/* State for an individual client. PID so children cannot abuse handshake */
+static struct qib_diag_client {
+ struct qib_diag_client *next;
+ struct qib_devdata *dd;
+ pid_t pid;
+ enum diag_state state;
+} *client_pool;
+
+/*
+ * Get a client struct. Recycled if possible, else kmalloc.
+ * Must be called with qib_mutex held
+ */
+static struct qib_diag_client *get_client(struct qib_devdata *dd)
+{
+ struct qib_diag_client *dc;
+
+ dc = client_pool;
+ if (dc)
+ /* got from pool remove it and use */
+ client_pool = dc->next;
+ else
+ /* None in pool, alloc and init */
+ dc = kmalloc(sizeof *dc, GFP_KERNEL);
+
+ if (dc) {
+ dc->next = NULL;
+ dc->dd = dd;
+ dc->pid = current->pid;
+ dc->state = OPENED;
+ }
+ return dc;
+}
+
+/*
+ * Return to pool. Must be called with qib_mutex held
+ */
+static void return_client(struct qib_diag_client *dc)
+{
+ struct qib_devdata *dd = dc->dd;
+ struct qib_diag_client *tdc, *rdc;
+
+ rdc = NULL;
+ if (dc == dd->diag_client) {
+ dd->diag_client = dc->next;
+ rdc = dc;
+ } else {
+ tdc = dc->dd->diag_client;
+ while (tdc) {
+ if (dc == tdc->next) {
+ tdc->next = dc->next;
+ rdc = dc;
+ break;
+ }
+ tdc = tdc->next;
+ }
+ }
+ if (rdc) {
+ rdc->state = UNUSED;
+ rdc->dd = NULL;
+ rdc->pid = 0;
+ rdc->next = client_pool;
+ client_pool = rdc;
+ }
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp);
+static int qib_diag_release(struct inode *in, struct file *fp);
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off);
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static const struct file_operations diag_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_diag_write,
+ .read = qib_diag_read,
+ .open = qib_diag_open,
+ .release = qib_diag_release
+};
+
+static atomic_t diagpkt_count = ATOMIC_INIT(0);
+static struct cdev *diagpkt_cdev;
+static struct device *diagpkt_device;
+
+static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off);
+
+static const struct file_operations diagpkt_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_diagpkt_write,
+};
+
+int qib_diag_add(struct qib_devdata *dd)
+{
+ char name[16];
+ int ret = 0;
+
+ if (atomic_inc_return(&diagpkt_count) == 1) {
+ ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
+ &diagpkt_file_ops, &diagpkt_cdev,
+ &diagpkt_device);
+ if (ret)
+ goto done;
+ }
+
+ snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
+ ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
+ &diag_file_ops, &dd->diag_cdev,
+ &dd->diag_device);
+done:
+ return ret;
+}
+
+static void qib_unregister_observers(struct qib_devdata *dd);
+
+void qib_diag_remove(struct qib_devdata *dd)
+{
+ struct qib_diag_client *dc;
+
+ if (atomic_dec_and_test(&diagpkt_count))
+ qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
+
+ qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
+
+ /*
+ * Return all diag_clients of this device. There should be none,
+ * as we are "guaranteed" that no clients are still open
+ */
+ while (dd->diag_client)
+ return_client(dd->diag_client);
+
+ /* Now clean up all unused client structs */
+ while (client_pool) {
+ dc = client_pool;
+ client_pool = dc->next;
+ kfree(dc);
+ }
+ /* Clean up observer list */
+ qib_unregister_observers(dd);
+}
+
+/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
+ *
+ * @dd: the qlogic_ib device
+ * @offs: the offset in chip-space
+ * @cntp: Pointer to max (byte) count for transfer starting at offset
+ * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
+ * mapping. It is needed because with the use of PAT for control of
+ * write-combining, the logically contiguous address-space of the chip
+ * may be split into virtually non-contiguous spaces, with different
+ * attributes, which are them mapped to contiguous physical space
+ * based from the first BAR.
+ *
+ * The code below makes the same assumptions as were made in
+ * init_chip_wc_pat() (qib_init.c), copied here:
+ * Assumes chip address space looks like:
+ * - kregs + sregs + cregs + uregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * or:
+ * - kregs + sregs + cregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * - uregs
+ *
+ * If cntp is non-NULL, returns how many bytes from offset can be accessed
+ * Returns 0 if the offset is not mapped.
+ */
+static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
+ u32 *cntp)
+{
+ u32 kreglen;
+ u32 snd_bottom, snd_lim = 0;
+ u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
+ u32 __iomem *map = NULL;
+ u32 cnt = 0;
+
+ /* First, simplest case, offset is within the first map. */
+ kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
+ if (offset < kreglen) {
+ map = krb32 + (offset / sizeof(u32));
+ cnt = kreglen - offset;
+ goto mapped;
+ }
+
+ /*
+ * Next check for user regs, the next most common case,
+ * and a cheap check because if they are not in the first map
+ * they are last in chip.
+ */
+ if (dd->userbase) {
+ /* If user regs mapped, they are after send, so set limit. */
+ u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+ snd_lim = dd->uregbase;
+ krb32 = (u32 __iomem *)dd->userbase;
+ if (offset >= dd->uregbase && offset < ulim) {
+ map = krb32 + (offset - dd->uregbase) / sizeof(u32);
+ cnt = ulim - offset;
+ goto mapped;
+ }
+ }
+
+ /*
+ * Lastly, check for offset within Send Buffers.
+ * This is gnarly because struct devdata is deliberately vague
+ * about things like 7322 VL15 buffers, and we are not in
+ * chip-specific code here, so should not make many assumptions.
+ * The one we _do_ make is that the only chip that has more sndbufs
+ * than we admit is the 7322, and it has userregs above that, so
+ * we know the snd_lim.
+ */
+ /* Assume 2K buffers are first. */
+ snd_bottom = dd->pio2k_bufbase;
+ if (snd_lim == 0) {
+ u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+ snd_lim = snd_bottom + tot2k;
+ }
+ /* If 4k buffers exist, account for them by bumping
+ * appropriate limit.
+ */
+ if (dd->piobcnt4k) {
+ u32 tot4k = dd->piobcnt4k * dd->align4k;
+ u32 offs4k = dd->piobufbase >> 32;
+ if (snd_bottom > offs4k)
+ snd_bottom = offs4k;
+ else {
+ /* 4k above 2k. Bump snd_lim, if needed*/
+ if (!dd->userbase)
+ snd_lim = offs4k + tot4k;
+ }
+ }
+ /*
+ * Judgement call: can we ignore the space between SendBuffs and
+ * UserRegs, where we would like to see vl15 buffs, but not more?
+ */
+ if (offset >= snd_bottom && offset < snd_lim) {
+ offset -= snd_bottom;
+ map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
+ cnt = snd_lim - offset;
+ }
+
+mapped:
+ if (cntp)
+ *cntp = cnt;
+ return map;
+}
+
+/*
+ * qib_read_umem64 - read a 64-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy (multiple of 32 bits)
+ *
+ * This function also localizes all chip memory accesses.
+ * The copy should be written such that we read full cacheline packets
+ * from the chip. This is usually used for a single qword
+ *
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
+ u32 regoffs, size_t count)
+{
+ const u64 __iomem *reg_addr;
+ const u64 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u64));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u64 data = readq(reg_addr);
+
+ if (copy_to_user(uaddr, &data, sizeof(u64))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ reg_addr++;
+ uaddr += sizeof(u64);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_write_umem64 - write a 64-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: the number of bytes to copy (multiple of 32 bits)
+ *
+ * This is usually used for a single qword
+ * NOTE: This assumes the chip address is 64-bit aligned.
+ */
+
+static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
+ const void __user *uaddr, size_t count)
+{
+ u64 __iomem *reg_addr;
+ const u64 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u64));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u64 data;
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writeq(data, reg_addr);
+
+ reg_addr++;
+ uaddr += sizeof(u64);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_read_umem32 - read a 32-bit quantity from the chip into user space
+ * @dd: the qlogic_ib device
+ * @uaddr: the location to store the data in user memory
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @count: number of bytes to copy
+ *
+ * read 32 bit values, not 64 bit; for memories that only
+ * support 32 bit reads; usually a single dword.
+ */
+static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
+ u32 regoffs, size_t count)
+{
+ const u32 __iomem *reg_addr;
+ const u32 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u32));
+
+ /* not very efficient, but it works for now */
+ while (reg_addr < reg_end) {
+ u32 data = readl(reg_addr);
+
+ if (copy_to_user(uaddr, &data, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ reg_addr++;
+ uaddr += sizeof(u32);
+
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+/*
+ * qib_write_umem32 - write a 32-bit quantity to the chip from user space
+ * @dd: the qlogic_ib device
+ * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
+ * @uaddr: the source of the data in user memory
+ * @count: number of bytes to copy
+ *
+ * write 32 bit values, not 64 bit; for memories that only
+ * support 32 bit write; usually a single dword.
+ */
+
+static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
+ const void __user *uaddr, size_t count)
+{
+ u32 __iomem *reg_addr;
+ const u32 __iomem *reg_end;
+ u32 limit;
+ int ret;
+
+ reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
+ if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (count >= limit)
+ count = limit;
+ reg_end = reg_addr + (count / sizeof(u32));
+
+ while (reg_addr < reg_end) {
+ u32 data;
+
+ if (copy_from_user(&data, uaddr, sizeof(data))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ writel(data, reg_addr);
+
+ reg_addr++;
+ uaddr += sizeof(u32);
+ }
+ ret = 0;
+bail:
+ return ret;
+}
+
+static int qib_diag_open(struct inode *in, struct file *fp)
+{
+ int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
+ struct qib_devdata *dd;
+ struct qib_diag_client *dc;
+ int ret;
+
+ mutex_lock(&qib_mutex);
+
+ dd = qib_lookup(unit);
+
+ if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
+ !dd->kregbase) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ dc = get_client(dd);
+ if (!dc) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ dc->next = dd->diag_client;
+ dd->diag_client = dc;
+ fp->private_data = dc;
+ ret = 0;
+bail:
+ mutex_unlock(&qib_mutex);
+
+ return ret;
+}
+
+/**
+ * qib_diagpkt_write - write an IB packet
+ * @fp: the diag data device file pointer
+ * @data: qib_diag_pkt structure saying where to get the packet
+ * @count: size of data to write
+ * @off: unused by this code
+ */
+static ssize_t qib_diagpkt_write(struct file *fp,
+ const char __user *data,
+ size_t count, loff_t *off)
+{
+ u32 __iomem *piobuf;
+ u32 plen, clen, pbufn;
+ struct qib_diag_xpkt dp;
+ u32 *tmpbuf = NULL;
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ ssize_t ret = 0;
+
+ if (count != sizeof(dp)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (copy_from_user(&dp, data, sizeof(dp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ dd = qib_lookup(dp.unit);
+ if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
+ ret = -ENODEV;
+ goto bail;
+ }
+ if (!(dd->flags & QIB_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ if (dp.version != _DIAG_XPKT_VERS) {
+ qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
+ dp.version);
+ ret = -EINVAL;
+ goto bail;
+ }
+ /* send count must be an exact number of dwords */
+ if (dp.len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (!dp.port || dp.port > dd->num_pports) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd = &dd->pport[dp.port - 1];
+
+ /* need total length before first word written */
+ /* +1 word is for the qword padding */
+ plen = sizeof(u32) + dp.len;
+ clen = dp.len >> 2;
+
+ if ((plen + 4) > ppd->ibmaxlen) {
+ ret = -EINVAL;
+ goto bail; /* before writing pbc */
+ }
+ tmpbuf = vmalloc(plen);
+ if (!tmpbuf) {
+ qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmpbuf,
+ (const void __user *) (unsigned long) dp.data,
+ dp.len)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ plen >>= 2; /* in dwords */
+
+ if (dp.pbc_wd == 0)
+ dp.pbc_wd = plen;
+
+ piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
+ if (!piobuf) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ /* disarm it just to be extra sure */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
+
+ /* disable header check on pbufn for this packet */
+ dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+
+ writeq(dp.pbc_wd, piobuf);
+ /*
+ * Copy all but the trigger word, then flush, so it's written
+ * to chip before trigger word, then write trigger word, then
+ * flush again, so packet is sent.
+ */
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
+ qib_flush_wc();
+ __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+ } else
+ qib_pio_copy(piobuf + 2, tmpbuf, clen);
+
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+
+ /*
+ * Ensure buffer is written to the chip, then re-enable
+ * header checks (if supported by chip). The txchk
+ * code will ensure seen by chip before returning.
+ */
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pbufn);
+ dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+ ret = sizeof(dp);
+
+bail:
+ vfree(tmpbuf);
+ return ret;
+}
+
+static int qib_diag_release(struct inode *in, struct file *fp)
+{
+ mutex_lock(&qib_mutex);
+ return_client(fp->private_data);
+ fp->private_data = NULL;
+ mutex_unlock(&qib_mutex);
+ return 0;
+}
+
+/*
+ * Chip-specific code calls to register its interest in
+ * a specific range.
+ */
+struct diag_observer_list_elt {
+ struct diag_observer_list_elt *next;
+ const struct diag_observer *op;
+};
+
+int qib_register_observer(struct qib_devdata *dd,
+ const struct diag_observer *op)
+{
+ struct diag_observer_list_elt *olp;
+ int ret = -EINVAL;
+
+ if (!dd || !op)
+ goto bail;
+ ret = -ENOMEM;
+ olp = vmalloc(sizeof *olp);
+ if (!olp) {
+ printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
+ goto bail;
+ }
+ if (olp) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp->op = op;
+ olp->next = dd->diag_observer_list;
+ dd->diag_observer_list = olp;
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ ret = 0;
+ }
+bail:
+ return ret;
+}
+
+/* Remove all registered observers when device is closed */
+static void qib_unregister_observers(struct qib_devdata *dd)
+{
+ struct diag_observer_list_elt *olp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp = dd->diag_observer_list;
+ while (olp) {
+ /* Pop one observer, let go of lock */
+ dd->diag_observer_list = olp->next;
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ vfree(olp);
+ /* try again. */
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ olp = dd->diag_observer_list;
+ }
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+}
+
+/*
+ * Find the observer, if any, for the specified address. Initial implementation
+ * is simple stack of observers. This must be called with diag transaction
+ * lock held.
+ */
+static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
+ u32 addr)
+{
+ struct diag_observer_list_elt *olp;
+ const struct diag_observer *op = NULL;
+
+ olp = dd->diag_observer_list;
+ while (olp) {
+ op = olp->op;
+ if (addr >= op->bottom && addr <= op->top)
+ break;
+ olp = olp->next;
+ }
+ if (!olp)
+ op = NULL;
+
+ return op;
+}
+
+static ssize_t qib_diag_read(struct file *fp, char __user *data,
+ size_t count, loff_t *off)
+{
+ struct qib_diag_client *dc = fp->private_data;
+ struct qib_devdata *dd = dc->dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ if (dc->pid != current->pid) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ kreg_base = dd->kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if (dc->state < READY && (*off || count != 8))
+ ret = -EINVAL; /* prevent cat /dev/qib_diag* */
+ else {
+ unsigned long flags;
+ u64 data64 = 0;
+ int use_32;
+ const struct diag_observer *op;
+
+ use_32 = (count % 8) || (*off % 8);
+ ret = -1;
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ /*
+ * Check for observer on this address range.
+ * we only support a single 32 or 64-bit read
+ * via observer, currently.
+ */
+ op = diag_get_observer(dd, *off);
+ if (op) {
+ u32 offset = *off;
+ ret = op->hook(dd, op, offset, &data64, 0, use_32);
+ }
+ /*
+ * We need to release lock before any copy_to_user(),
+ * whether implicit in qib_read_umem* or explicit below.
+ */
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ if (!op) {
+ if (use_32)
+ /*
+ * Address or length is not 64-bit aligned;
+ * do 32-bit rd
+ */
+ ret = qib_read_umem32(dd, data, (u32) *off,
+ count);
+ else
+ ret = qib_read_umem64(dd, data, (u32) *off,
+ count);
+ } else if (ret == count) {
+ /* Below finishes case where observer existed */
+ ret = copy_to_user(data, &data64, use_32 ?
+ sizeof(u32) : sizeof(u64));
+ if (ret)
+ ret = -EFAULT;
+ }
+ }
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ if (dc->state == OPENED)
+ dc->state = INIT;
+ }
+bail:
+ return ret;
+}
+
+static ssize_t qib_diag_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ struct qib_diag_client *dc = fp->private_data;
+ struct qib_devdata *dd = dc->dd;
+ void __iomem *kreg_base;
+ ssize_t ret;
+
+ if (dc->pid != current->pid) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ kreg_base = dd->kregbase;
+
+ if (count == 0)
+ ret = 0;
+ else if ((count % 4) || (*off % 4))
+ /* address or length is not 32-bit aligned, hence invalid */
+ ret = -EINVAL;
+ else if (dc->state < READY &&
+ ((*off || count != 8) || dc->state != INIT))
+ /* No writes except second-step of init seq */
+ ret = -EINVAL; /* before any other write allowed */
+ else {
+ unsigned long flags;
+ const struct diag_observer *op = NULL;
+ int use_32 = (count % 8) || (*off % 8);
+
+ /*
+ * Check for observer on this address range.
+ * We only support a single 32 or 64-bit write
+ * via observer, currently. This helps, because
+ * we would otherwise have to jump through hoops
+ * to make "diag transaction" meaningful when we
+ * cannot do a copy_from_user while holding the lock.
+ */
+ if (count == 4 || count == 8) {
+ u64 data64;
+ u32 offset = *off;
+ ret = copy_from_user(&data64, data, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
+ op = diag_get_observer(dd, *off);
+ if (op)
+ ret = op->hook(dd, op, offset, &data64, ~0Ull,
+ use_32);
+ spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
+ }
+
+ if (!op) {
+ if (use_32)
+ /*
+ * Address or length is not 64-bit aligned;
+ * do 32-bit write
+ */
+ ret = qib_write_umem32(dd, (u32) *off, data,
+ count);
+ else
+ ret = qib_write_umem64(dd, (u32) *off, data,
+ count);
+ }
+ }
+
+ if (ret >= 0) {
+ *off += count;
+ ret = count;
+ if (dc->state == INIT)
+ dc->state = READY; /* all read/write OK now */
+ }
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
new file mode 100644
index 000000000000..2920bb39a65b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_dma.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include "qib_verbs.h"
+
+#define BAD_DMA_ADDRESS ((u64) 0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return (u64) cpu_addr;
+}
+
+static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ if (offset + size > PAGE_SIZE) {
+ addr = BAD_DMA_ADDRESS;
+ goto done;
+ }
+
+ addr = (u64) page_address(page);
+ if (addr)
+ addr += offset;
+ /* TODO: handle highmem pages */
+
+done:
+ return addr;
+}
+
+static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (u64) page_address(sg_page(sg));
+ /* TODO: handle highmem pages */
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void qib_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+ u64 addr = (u64) page_address(sg_page(sg));
+
+ if (addr)
+ addr += sg->offset;
+ return addr;
+}
+
+static unsigned int qib_sg_dma_len(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return sg->length;
+}
+
+static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64) addr;
+ return addr;
+}
+
+static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops qib_dma_mapping_ops = {
+ .mapping_error = qib_mapping_error,
+ .map_single = qib_dma_map_single,
+ .unmap_single = qib_dma_unmap_single,
+ .map_page = qib_dma_map_page,
+ .unmap_page = qib_dma_unmap_page,
+ .map_sg = qib_map_sg,
+ .unmap_sg = qib_unmap_sg,
+ .dma_address = qib_sg_dma_address,
+ .dma_len = qib_sg_dma_len,
+ .sync_single_for_cpu = qib_sync_single_for_cpu,
+ .sync_single_for_device = qib_sync_single_for_device,
+ .alloc_coherent = qib_dma_alloc_coherent,
+ .free_coherent = qib_dma_free_coherent
+};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
new file mode 100644
index 000000000000..f15ce076ac49
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * The size has to be longer than this string, so we can append
+ * board/chip information to it in the init code.
+ */
+const char ib_qib_version[] = QIB_IDSTR "\n";
+
+DEFINE_SPINLOCK(qib_devs_lock);
+LIST_HEAD(qib_dev_list);
+DEFINE_MUTEX(qib_mutex); /* general driver use */
+
+unsigned qib_ibmtu;
+module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
+MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
+
+unsigned qib_compat_ddr_negotiate = 1;
+module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(compat_ddr_negotiate,
+ "Attempt pre-IBTA 1.2 DDR speed negotiation");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("QLogic <support@qlogic.com>");
+MODULE_DESCRIPTION("QLogic IB driver");
+
+/*
+ * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
+ * PIO send buffers. This is well beyond anything currently
+ * defined in the InfiniBand spec.
+ */
+#define QIB_PIO_MAXIBHDR 128
+
+struct qlogic_ib_stats qib_stats;
+
+const char *qib_get_unit_name(int unit)
+{
+ static char iname[16];
+
+ snprintf(iname, sizeof iname, "infinipath%u", unit);
+ return iname;
+}
+
+/*
+ * Return count of units with at least one port ACTIVE.
+ */
+int qib_count_active_units(void)
+{
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ int pidx, nunits_active = 0;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ list_for_each_entry(dd, &qib_dev_list, list) {
+ if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
+ continue;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE))) {
+ nunits_active++;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+ return nunits_active;
+}
+
+/*
+ * Return count of all units, optionally return in arguments
+ * the number of usable (present) units, and the number of
+ * ports that are up.
+ */
+int qib_count_units(int *npresentp, int *nupp)
+{
+ int nunits = 0, npresent = 0, nup = 0;
+ struct qib_devdata *dd;
+ unsigned long flags;
+ int pidx;
+ struct qib_pportdata *ppd;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+
+ list_for_each_entry(dd, &qib_dev_list, list) {
+ nunits++;
+ if ((dd->flags & QIB_PRESENT) && dd->kregbase)
+ npresent++;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE)))
+ nup++;
+ }
+ }
+
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ if (npresentp)
+ *npresentp = npresent;
+ if (nupp)
+ *nupp = nup;
+
+ return nunits;
+}
+
+/**
+ * qib_wait_linkstate - wait for an IB link state change to occur
+ * @dd: the qlogic_ib device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * wait up to msecs milliseconds for IB link state change to occur for
+ * now, take the easy polling route. Currently used only by
+ * qib_set_linkstate. Returns 0 if state reached, otherwise
+ * -ETIMEDOUT state can have multiple states set, for any of several
+ * transitions.
+ */
+int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ if (ppd->state_wanted) {
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ret = -EBUSY;
+ goto bail;
+ }
+ ppd->state_wanted = state;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wait_event_interruptible_timeout(ppd->state_wait,
+ (ppd->lflags & state),
+ msecs_to_jiffies(msecs));
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->state_wanted = 0;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (!(ppd->lflags & state))
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+bail:
+ return ret;
+}
+
+int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
+{
+ u32 lstate;
+ int ret;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ switch (newstate) {
+ case QIB_IB_LINKDOWN_ONLY:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN_SLEEP:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKDOWN_DISABLE:
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case QIB_IB_LINKARM:
+ if (ppd->lflags & QIBL_LINKARMED) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ /*
+ * Since the port can be ACTIVE when we ask for ARMED,
+ * clear QIBL_LINKV so we can wait for a transition.
+ * If the link isn't ARMED, then something else happened
+ * and there is no point waiting for ARMED.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
+ lstate = QIBL_LINKV;
+ break;
+
+ case QIB_IB_LINKACTIVE:
+ if (ppd->lflags & QIBL_LINKACTIVE) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(ppd->lflags & QIBL_LINKARMED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+ IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
+ lstate = QIBL_LINKACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ret = qib_wait_linkstate(ppd, lstate, 10);
+
+bail:
+ return ret;
+}
+
+/*
+ * Get address of eager buffer from it's index (allocated in chunks, not
+ * contiguous).
+ */
+static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
+{
+ const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
+ const u32 idx = etail % rcd->rcvegrbufs_perchunk;
+
+ return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
+}
+
+/*
+ * Returns 1 if error was a CRC, else 0.
+ * Needed for some chip's synthesized error counters.
+ */
+static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
+ u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
+ struct qib_message_header *hdr)
+{
+ u32 ret = 0;
+
+ if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
+ ret = 1;
+ return ret;
+}
+
+/*
+ * qib_kreceive - receive a packet
+ * @rcd: the qlogic_ib context
+ * @llic: gets count of good packets needed to clear lli,
+ * (used with chips that need need to track crcs for lli)
+ *
+ * called from interrupt handler for errors or receive interrupt
+ * Returns number of CRC error packets, needed by some chips for
+ * local link integrity tracking. crcs are adjusted down by following
+ * good packets, if any, and count of good packets is also tracked.
+ */
+u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
+{
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ __le32 *rhf_addr;
+ void *ebuf;
+ const u32 rsize = dd->rcvhdrentsize; /* words */
+ const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
+ u32 etail = -1, l, hdrqtail;
+ struct qib_message_header *hdr;
+ u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
+ int last;
+ u64 lval;
+ struct qib_qp *qp, *nqp;
+
+ l = rcd->head;
+ rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ u32 seq = qib_hdrget_seq(rhf_addr);
+ if (seq != rcd->seq_cnt)
+ goto bail;
+ hdrqtail = 0;
+ } else {
+ hdrqtail = qib_get_rcvhdrtail(rcd);
+ if (l == hdrqtail)
+ goto bail;
+ smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+ }
+
+ for (last = 0, i = 1; !last; i += !last) {
+ hdr = dd->f_get_msgheader(dd, rhf_addr);
+ eflags = qib_hdrget_err_flags(rhf_addr);
+ etype = qib_hdrget_rcv_type(rhf_addr);
+ /* total length */
+ tlen = qib_hdrget_length_in_bytes(rhf_addr);
+ ebuf = NULL;
+ if ((dd->flags & QIB_NODMA_RTAIL) ?
+ qib_hdrget_use_egr_buf(rhf_addr) :
+ (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
+ etail = qib_hdrget_index(rhf_addr);
+ updegr = 1;
+ if (tlen > sizeof(*hdr) ||
+ etype >= RCVHQ_RCV_TYPE_NON_KD)
+ ebuf = qib_get_egrbuf(rcd, etail);
+ }
+ if (!eflags) {
+ u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
+
+ if (lrh_len != tlen) {
+ qib_stats.sps_lenerrs++;
+ goto move_along;
+ }
+ }
+ if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
+ ebuf == NULL &&
+ tlen > (dd->rcvhdrentsize - 2 + 1 -
+ qib_hdrget_offset(rhf_addr)) << 2) {
+ goto move_along;
+ }
+
+ /*
+ * Both tiderr and qibhdrerr are set for all plain IB
+ * packets; only qibhdrerr should be set.
+ */
+ if (unlikely(eflags))
+ crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+ etail, rhf_addr, hdr);
+ else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
+ qib_ib_rcv(rcd, hdr, ebuf, tlen);
+ if (crcs)
+ crcs--;
+ else if (llic && *llic)
+ --*llic;
+ }
+move_along:
+ l += rsize;
+ if (l >= maxcnt)
+ l = 0;
+ rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
+ if (dd->flags & QIB_NODMA_RTAIL) {
+ u32 seq = qib_hdrget_seq(rhf_addr);
+
+ if (++rcd->seq_cnt > 13)
+ rcd->seq_cnt = 1;
+ if (seq != rcd->seq_cnt)
+ last = 1;
+ } else if (l == hdrqtail)
+ last = 1;
+ /*
+ * Update head regs etc., every 16 packets, if not last pkt,
+ * to help prevent rcvhdrq overflows, when many packets
+ * are processed and queue is nearly full.
+ * Don't request an interrupt for intermediate updates.
+ */
+ lval = l;
+ if (!last && !(i & 0xf)) {
+ dd->f_update_usrhead(rcd, lval, updegr, etail);
+ updegr = 0;
+ }
+ }
+
+ rcd->head = l;
+ rcd->pkt_count += i;
+
+ /*
+ * Iterate over all QPs waiting to respond.
+ * The list won't change since the IRQ is only run on one CPU.
+ */
+ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
+ list_del_init(&qp->rspwait);
+ if (qp->r_flags & QIB_R_RSP_NAK) {
+ qp->r_flags &= ~QIB_R_RSP_NAK;
+ qib_send_rc_ack(qp);
+ }
+ if (qp->r_flags & QIB_R_RSP_SEND) {
+ unsigned long flags;
+
+ qp->r_flags &= ~QIB_R_RSP_SEND;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_OR_FLUSH_SEND)
+ qib_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+
+bail:
+ /* Report number of packets consumed */
+ if (npkts)
+ *npkts = i;
+
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ lval = (u64)rcd->head | dd->rhdrhead_intr_off;
+ dd->f_update_usrhead(rcd, lval, updegr, etail);
+ return crcs;
+}
+
+/**
+ * qib_set_mtu - set the MTU
+ * @ppd: the perport data
+ * @arg: the new MTU
+ *
+ * We can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size. For now, we don't do any
+ * sanity checking on this, and we don't deal with what happens to
+ * programs that are already running when the size changes.
+ * NOTE: changing the MTU will usually cause the IBC to go back to
+ * link INIT state...
+ */
+int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
+{
+ u32 piosize;
+ int ret, chk;
+
+ if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
+ arg != 4096) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ chk = ib_mtu_enum_to_int(qib_ibmtu);
+ if (chk > 0 && arg > chk) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ piosize = ppd->ibmaxlen;
+ ppd->ibmtu = arg;
+
+ if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
+ /* Only if it's not the initial value (or reset to it) */
+ if (piosize != ppd->init_ibmaxlen) {
+ if (arg > piosize && arg <= ppd->init_ibmaxlen)
+ piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
+ ppd->ibmaxlen = piosize;
+ }
+ } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
+ piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
+ ppd->ibmaxlen = piosize;
+ }
+
+ ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
+{
+ struct qib_devdata *dd = ppd->dd;
+ ppd->lid = lid;
+ ppd->lmc = lmc;
+
+ dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
+ lid | (~((1U << lmc) - 1)) << 16);
+
+ qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
+ dd->unit, ppd->port, lid);
+
+ return 0;
+}
+
+/*
+ * Following deal with the "obviously simple" task of overriding the state
+ * of the LEDS, which normally indicate link physical and logical status.
+ * The complications arise in dealing with different hardware mappings
+ * and the board-dependent routine being called from interrupts.
+ * and then there's the requirement to _flash_ them.
+ */
+#define LED_OVER_FREQ_SHIFT 8
+#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
+/* Below is "non-zero" to force override, but both actual LEDs are off */
+#define LED_OVER_BOTH_OFF (8)
+
+static void qib_run_led_override(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_devdata *dd = ppd->dd;
+ int timeoff;
+ int ph_idx;
+
+ if (!(dd->flags & QIB_INITTED))
+ return;
+
+ ph_idx = ppd->led_override_phase++ & 1;
+ ppd->led_override = ppd->led_override_vals[ph_idx];
+ timeoff = ppd->led_override_timeoff;
+
+ dd->f_setextled(ppd, 1);
+ /*
+ * don't re-fire the timer if user asked for it to be off; we let
+ * it fire one more time after they turn it off to simplify
+ */
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + timeoff);
+}
+
+void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int timeoff, freq;
+
+ if (!(dd->flags & QIB_INITTED))
+ return;
+
+ /* First check if we are blinking. If not, use 1HZ polling */
+ timeoff = HZ;
+ freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
+
+ if (freq) {
+ /* For blink, set each phase from one nybble of val */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = (val >> 4) & 0xF;
+ timeoff = (HZ << 4)/freq;
+ } else {
+ /* Non-blink set both phases the same. */
+ ppd->led_override_vals[0] = val & 0xF;
+ ppd->led_override_vals[1] = val & 0xF;
+ }
+ ppd->led_override_timeoff = timeoff;
+
+ /*
+ * If the timer has not already been started, do so. Use a "quick"
+ * timeout so the function will be called soon, to look at our request.
+ */
+ if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
+ /* Need to start timer */
+ init_timer(&ppd->led_override_timer);
+ ppd->led_override_timer.function = qib_run_led_override;
+ ppd->led_override_timer.data = (unsigned long) ppd;
+ ppd->led_override_timer.expires = jiffies + 1;
+ add_timer(&ppd->led_override_timer);
+ } else {
+ if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
+ mod_timer(&ppd->led_override_timer, jiffies + 1);
+ atomic_dec(&ppd->led_override_timer_active);
+ }
+}
+
+/**
+ * qib_reset_device - reset the chip if possible
+ * @unit: the device to reset
+ *
+ * Whether or not reset is successful, we attempt to re-initialize the chip
+ * (that is, much like a driver unload/reload). We clear the INITTED flag
+ * so that the various entry points will fail until we reinitialize. For
+ * now, we only allow this if no user contexts are open that use chip resources
+ */
+int qib_reset_device(int unit)
+{
+ int ret, i;
+ struct qib_devdata *dd = qib_lookup(unit);
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ int pidx;
+
+ if (!dd) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
+
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
+ qib_devinfo(dd->pcidev, "Invalid unit number %u or "
+ "not initialized or not present\n", unit);
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (dd->rcd)
+ for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+ if (!dd->rcd[i] || !dd->rcd[i]->cnt)
+ continue;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ ret = -EBUSY;
+ goto bail;
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (atomic_read(&ppd->led_override_timer_active)) {
+ /* Need to stop LED timer, _then_ shut off LEDs */
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+
+ /* Shut off LEDs after we are sure timer is not running */
+ ppd->led_override = LED_OVER_BOTH_OFF;
+ dd->f_setextled(ppd, 0);
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_teardown_sdma(ppd);
+ }
+
+ ret = dd->f_reset(dd);
+ if (ret == 1)
+ ret = qib_init(dd, 1);
+ else
+ ret = -EAGAIN;
+ if (ret)
+ qib_dev_err(dd, "Reinitialize unit %u after "
+ "reset failed with %d\n", unit, ret);
+ else
+ qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
+ "resetting\n", unit);
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
new file mode 100644
index 000000000000..92d9cfe98a68
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * Functions specific to the serial EEPROM on cards handled by ib_qib.
+ * The actual serail interface code is in qib_twsi.c. This file is a client
+ */
+
+/**
+ * qib_eeprom_read - receives bytes from the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: address to read from
+ * @buffer: where to store result
+ * @len: number of bytes to receive
+ */
+int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
+ void *buff, int len)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (!ret) {
+ ret = qib_twsi_reset(dd);
+ if (ret)
+ qib_dev_err(dd, "EEPROM Reset for read failed\n");
+ else
+ ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
+ eeprom_offset, buff, len);
+ mutex_unlock(&dd->eep_lock);
+ }
+
+ return ret;
+}
+
+/*
+ * Actually update the eeprom, first doing write enable if
+ * needed, then restoring write enable state.
+ * Must be called with eep_lock held
+ */
+static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
+ const void *buf, int len)
+{
+ int ret, pwen;
+
+ pwen = dd->f_eeprom_wen(dd, 1);
+ ret = qib_twsi_reset(dd);
+ if (ret)
+ qib_dev_err(dd, "EEPROM Reset for write failed\n");
+ else
+ ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
+ offset, buf, len);
+ dd->f_eeprom_wen(dd, pwen);
+ return ret;
+}
+
+/**
+ * qib_eeprom_write - writes data to the eeprom via I2C
+ * @dd: the qlogic_ib device
+ * @eeprom_offset: where to place data
+ * @buffer: data to write
+ * @len: number of bytes to write
+ */
+int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
+ const void *buff, int len)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (!ret) {
+ ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
+ mutex_unlock(&dd->eep_lock);
+ }
+
+ return ret;
+}
+
+static u8 flash_csum(struct qib_flash *ifp, int adjust)
+{
+ u8 *ip = (u8 *) ifp;
+ u8 csum = 0, len;
+
+ /*
+ * Limit length checksummed to max length of actual data.
+ * Checksum of erased eeprom will still be bad, but we avoid
+ * reading past the end of the buffer we were passed.
+ */
+ len = ifp->if_length;
+ if (len > sizeof(struct qib_flash))
+ len = sizeof(struct qib_flash);
+ while (len--)
+ csum += *ip++;
+ csum -= ifp->if_csum;
+ csum = ~csum;
+ if (adjust)
+ ifp->if_csum = csum;
+
+ return csum;
+}
+
+/**
+ * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
+ * @dd: the qlogic_ib device
+ *
+ * We have the capability to use the nguid field, and get
+ * the guid from the first chip's flash, to use for all of them.
+ */
+void qib_get_eeprom_info(struct qib_devdata *dd)
+{
+ void *buf;
+ struct qib_flash *ifp;
+ __be64 guid;
+ int len, eep_stat;
+ u8 csum, *bguid;
+ int t = dd->unit;
+ struct qib_devdata *dd0 = qib_lookup(0);
+
+ if (t && dd0->nguid > 1 && t <= dd0->nguid) {
+ u8 oguid;
+ dd->base_guid = dd0->base_guid;
+ bguid = (u8 *) &dd->base_guid;
+
+ oguid = bguid[7];
+ bguid[7] += t;
+ if (oguid > bguid[7]) {
+ if (bguid[6] == 0xff) {
+ if (bguid[5] == 0xff) {
+ qib_dev_err(dd, "Can't set %s GUID"
+ " from base, wraps to"
+ " OUI!\n",
+ qib_get_unit_name(t));
+ dd->base_guid = 0;
+ goto bail;
+ }
+ bguid[5]++;
+ }
+ bguid[6]++;
+ }
+ dd->nguid = 1;
+ goto bail;
+ }
+
+ /*
+ * Read full flash, not just currently used part, since it may have
+ * been written with a newer definition.
+ * */
+ len = sizeof(struct qib_flash);
+ buf = vmalloc(len);
+ if (!buf) {
+ qib_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for GUID\n", len);
+ goto bail;
+ }
+
+ /*
+ * Use "public" eeprom read function, which does locking and
+ * figures out device. This will migrate to chip-specific.
+ */
+ eep_stat = qib_eeprom_read(dd, 0, buf, len);
+
+ if (eep_stat) {
+ qib_dev_err(dd, "Failed reading GUID from eeprom\n");
+ goto done;
+ }
+ ifp = (struct qib_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
+ "0x%x, not 0x%x\n", csum, ifp->if_csum);
+ goto done;
+ }
+ if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
+ *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
+ qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
+ *(unsigned long long *) ifp->if_guid);
+ /* don't allow GUID if all 0 or all 1's */
+ goto done;
+ }
+
+ /* complain, but allow it */
+ if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
+ qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
+ "default, probably not correct!\n",
+ *(unsigned long long *) ifp->if_guid);
+
+ bguid = ifp->if_guid;
+ if (!bguid[0] && !bguid[1] && !bguid[2]) {
+ /*
+ * Original incorrect GUID format in flash; fix in
+ * core copy, by shifting up 2 octets; don't need to
+ * change top octet, since both it and shifted are 0.
+ */
+ bguid[1] = bguid[3];
+ bguid[2] = bguid[4];
+ bguid[3] = 0;
+ bguid[4] = 0;
+ guid = *(__be64 *) ifp->if_guid;
+ } else
+ guid = *(__be64 *) ifp->if_guid;
+ dd->base_guid = guid;
+ dd->nguid = ifp->if_numguid;
+ /*
+ * Things are slightly complicated by the desire to transparently
+ * support both the Pathscale 10-digit serial number and the QLogic
+ * 13-character version.
+ */
+ if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
+ ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
+ char *snp = dd->serial;
+
+ /*
+ * This board has a Serial-prefix, which is stored
+ * elsewhere for backward-compatibility.
+ */
+ memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
+ snp[sizeof ifp->if_sprefix] = '\0';
+ len = strlen(snp);
+ snp += len;
+ len = (sizeof dd->serial) - len;
+ if (len > sizeof ifp->if_serial)
+ len = sizeof ifp->if_serial;
+ memcpy(snp, ifp->if_serial, len);
+ } else
+ memcpy(dd->serial, ifp->if_serial,
+ sizeof ifp->if_serial);
+ if (!strstr(ifp->if_comment, "Tested successfully"))
+ qib_dev_err(dd, "Board SN %s did not pass functional "
+ "test: %s\n", dd->serial, ifp->if_comment);
+
+ memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
+ /*
+ * Power-on (actually "active") hours are kept as little-endian value
+ * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+ * atomic_t while running.
+ */
+ atomic_set(&dd->active_time, 0);
+ dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+
+done:
+ vfree(buf);
+
+bail:;
+}
+
+/**
+ * qib_update_eeprom_log - copy active-time and error counters to eeprom
+ * @dd: the qlogic_ib device
+ *
+ * Although the time is kept as seconds in the qib_devdata struct, it is
+ * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+ * First-cut code reads whole (expected) struct qib_flash, modifies,
+ * re-writes. Future direction: read/write only what we need, assuming
+ * that the EEPROM had to have been "good enough" for driver init, and
+ * if not, we aren't making it worse.
+ *
+ */
+int qib_update_eeprom_log(struct qib_devdata *dd)
+{
+ void *buf;
+ struct qib_flash *ifp;
+ int len, hi_water;
+ uint32_t new_time, new_hrs;
+ u8 csum;
+ int ret, idx;
+ unsigned long flags;
+
+ /* first, check if we actually need to do anything. */
+ ret = 0;
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ if (dd->eep_st_new_errs[idx]) {
+ ret = 1;
+ break;
+ }
+ }
+ new_time = atomic_read(&dd->active_time);
+
+ if (ret == 0 && new_time < 3600)
+ goto bail;
+
+ /*
+ * The quick-check above determined that there is something worthy
+ * of logging, so get current contents and do a more detailed idea.
+ * read full flash, not just currently used part, since it may have
+ * been written with a newer definition
+ */
+ len = sizeof(struct qib_flash);
+ buf = vmalloc(len);
+ ret = 1;
+ if (!buf) {
+ qib_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for logging\n", len);
+ goto bail;
+ }
+
+ /* Grab semaphore and read current EEPROM. If we get an
+ * error, let go, but if not, keep it until we finish write.
+ */
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret) {
+ qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+ goto free_bail;
+ }
+ ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
+ if (ret) {
+ mutex_unlock(&dd->eep_lock);
+ qib_dev_err(dd, "Unable read EEPROM for logging\n");
+ goto free_bail;
+ }
+ ifp = (struct qib_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ mutex_unlock(&dd->eep_lock);
+ qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+ csum, ifp->if_csum);
+ ret = 1;
+ goto free_bail;
+ }
+ hi_water = 0;
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ int new_val = dd->eep_st_new_errs[idx];
+ if (new_val) {
+ /*
+ * If we have seen any errors, add to EEPROM values
+ * We need to saturate at 0xFF (255) and we also
+ * would need to adjust the checksum if we were
+ * trying to minimize EEPROM traffic
+ * Note that we add to actual current count in EEPROM,
+ * in case it was altered while we were running.
+ */
+ new_val += ifp->if_errcntp[idx];
+ if (new_val > 0xFF)
+ new_val = 0xFF;
+ if (ifp->if_errcntp[idx] != new_val) {
+ ifp->if_errcntp[idx] = new_val;
+ hi_water = offsetof(struct qib_flash,
+ if_errcntp) + idx;
+ }
+ /*
+ * update our shadow (used to minimize EEPROM
+ * traffic), to match what we are about to write.
+ */
+ dd->eep_st_errs[idx] = new_val;
+ dd->eep_st_new_errs[idx] = 0;
+ }
+ }
+ /*
+ * Now update active-time. We would like to round to the nearest hour
+ * but unless atomic_t are sure to be proper signed ints we cannot,
+ * because we need to account for what we "transfer" to EEPROM and
+ * if we log an hour at 31 minutes, then we would need to set
+ * active_time to -29 to accurately count the _next_ hour.
+ */
+ if (new_time >= 3600) {
+ new_hrs = new_time / 3600;
+ atomic_sub((new_hrs * 3600), &dd->active_time);
+ new_hrs += dd->eep_hrs;
+ if (new_hrs > 0xFFFF)
+ new_hrs = 0xFFFF;
+ dd->eep_hrs = new_hrs;
+ if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+ ifp->if_powerhour[0] = new_hrs & 0xFF;
+ hi_water = offsetof(struct qib_flash, if_powerhour);
+ }
+ if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+ ifp->if_powerhour[1] = new_hrs >> 8;
+ hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
+ }
+ }
+ /*
+ * There is a tiny possibility that we could somehow fail to write
+ * the EEPROM after updating our shadows, but problems from holding
+ * the spinlock too long are a much bigger issue.
+ */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ if (hi_water) {
+ /* we made some change to the data, uopdate cksum and write */
+ csum = flash_csum(ifp, 1);
+ ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
+ }
+ mutex_unlock(&dd->eep_lock);
+ if (ret)
+ qib_dev_err(dd, "Failed updating EEPROM\n");
+
+free_bail:
+ vfree(buf);
+bail:
+ return ret;
+}
+
+/**
+ * qib_inc_eeprom_err - increment one of the four error counters
+ * that are logged to EEPROM.
+ * @dd: the qlogic_ib device
+ * @eidx: 0..3, the counter to increment
+ * @incr: how much to add
+ *
+ * Each counter is 8-bits, and saturates at 255 (0xFF). They
+ * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
+ * is called, but it can only be called in a context that allows sleep.
+ * This function can be called even at interrupt level.
+ */
+void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
+{
+ uint new_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ new_val = dd->eep_st_new_errs[eidx] + incr;
+ if (new_val > 255)
+ new_val = 255;
+ dd->eep_st_new_errs[eidx] = new_val;
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
new file mode 100644
index 000000000000..a142a9eb5226
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -0,0 +1,2317 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/swap.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/jiffies.h>
+#include <asm/pgtable.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+#include "qib_user_sdma.h"
+
+static int qib_open(struct inode *, struct file *);
+static int qib_close(struct inode *, struct file *);
+static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
+static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
+ unsigned long, loff_t);
+static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static int qib_mmapf(struct file *, struct vm_area_struct *);
+
+static const struct file_operations qib_file_ops = {
+ .owner = THIS_MODULE,
+ .write = qib_write,
+ .aio_write = qib_aio_write,
+ .open = qib_open,
+ .release = qib_close,
+ .poll = qib_poll,
+ .mmap = qib_mmapf
+};
+
+/*
+ * Convert kernel virtual addresses to physical addresses so they don't
+ * potentially conflict with the chip addresses used as mmap offsets.
+ * It doesn't really matter what mmap offset we use as long as we can
+ * interpret it correctly.
+ */
+static u64 cvt_kvaddr(void *p)
+{
+ struct page *page;
+ u64 paddr = 0;
+
+ page = vmalloc_to_page(p);
+ if (page)
+ paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+ return paddr;
+}
+
+static int qib_get_base_info(struct file *fp, void __user *ubase,
+ size_t ubase_size)
+{
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ int ret = 0;
+ struct qib_base_info *kinfo = NULL;
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ unsigned subctxt_cnt;
+ int shared, master;
+ size_t sz;
+
+ subctxt_cnt = rcd->subctxt_cnt;
+ if (!subctxt_cnt) {
+ shared = 0;
+ master = 0;
+ subctxt_cnt = 1;
+ } else {
+ shared = 1;
+ master = !subctxt_fp(fp);
+ }
+
+ sz = sizeof(*kinfo);
+ /* If context sharing is not requested, allow the old size structure */
+ if (!shared)
+ sz -= 7 * sizeof(u64);
+ if (ubase_size < sz) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
+ if (kinfo == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ret = dd->f_get_base_info(rcd, kinfo);
+ if (ret < 0)
+ goto bail;
+
+ kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
+ kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
+ kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
+ kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
+ /*
+ * have to mmap whole thing
+ */
+ kinfo->spi_rcv_egrbuftotlen =
+ rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+ kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
+ kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
+ rcd->rcvegrbuf_chunks;
+ kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
+ if (master)
+ kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
+ /*
+ * for this use, may be cfgctxts summed over all chips that
+ * are are configured and present
+ */
+ kinfo->spi_nctxts = dd->cfgctxts;
+ /* unit (chip/board) our context is on */
+ kinfo->spi_unit = dd->unit;
+ kinfo->spi_port = ppd->port;
+ /* for now, only a single page */
+ kinfo->spi_tid_maxsize = PAGE_SIZE;
+
+ /*
+ * Doing this per context, and based on the skip value, etc. This has
+ * to be the actual buffer size, since the protocol code treats it
+ * as an array.
+ *
+ * These have to be set to user addresses in the user code via mmap.
+ * These values are used on return to user code for the mmap target
+ * addresses only. For 32 bit, same 44 bit address problem, so use
+ * the physical address, not virtual. Before 2.6.11, using the
+ * page_address() macro worked, but in 2.6.11, even that returns the
+ * full 64 bit address (upper bits all 1's). So far, using the
+ * physical addresses (or chip offsets, for chip mapping) works, but
+ * no doubt some future kernel release will change that, and we'll be
+ * on to yet another method of dealing with this.
+ * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
+ * since the chips with non-zero rhf_offset don't normally
+ * enable tail register updates to host memory, but for testing,
+ * both can be enabled and used.
+ */
+ kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
+ kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
+ kinfo->spi_rhf_offset = dd->rhf_offset;
+ kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
+ kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
+ /* setup per-unit (not port) status area for user programs */
+ kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
+ (char *) ppd->statusp -
+ (char *) dd->pioavailregs_dma;
+ kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
+ if (!shared) {
+ kinfo->spi_piocnt = rcd->piocnt;
+ kinfo->spi_piobufbase = (u64) rcd->piobufs;
+ kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
+ } else if (master) {
+ kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
+ (rcd->piocnt % subctxt_cnt);
+ /* Master's PIO buffers are after all the slave's */
+ kinfo->spi_piobufbase = (u64) rcd->piobufs +
+ dd->palign *
+ (rcd->piocnt - kinfo->spi_piocnt);
+ } else {
+ unsigned slave = subctxt_fp(fp) - 1;
+
+ kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
+ kinfo->spi_piobufbase = (u64) rcd->piobufs +
+ dd->palign * kinfo->spi_piocnt * slave;
+ }
+
+ if (shared) {
+ kinfo->spi_sendbuf_status =
+ cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
+ /* only spi_subctxt_* fields should be set in this block! */
+ kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
+
+ kinfo->spi_subctxt_rcvegrbuf =
+ cvt_kvaddr(rcd->subctxt_rcvegrbuf);
+ kinfo->spi_subctxt_rcvhdr_base =
+ cvt_kvaddr(rcd->subctxt_rcvhdr_base);
+ }
+
+ /*
+ * All user buffers are 2KB buffers. If we ever support
+ * giving 4KB buffers to user processes, this will need some
+ * work. Can't use piobufbase directly, because it has
+ * both 2K and 4K buffer base values.
+ */
+ kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
+ dd->palign;
+ kinfo->spi_pioalign = dd->palign;
+ kinfo->spi_qpair = QIB_KD_QP;
+ /*
+ * user mode PIO buffers are always 2KB, even when 4KB can
+ * be received, and sent via the kernel; this is ibmaxlen
+ * for 2K MTU.
+ */
+ kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
+ kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
+ kinfo->spi_ctxt = rcd->ctxt;
+ kinfo->spi_subctxt = subctxt_fp(fp);
+ kinfo->spi_sw_version = QIB_KERN_SWVERSION;
+ kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
+ kinfo->spi_hw_version = dd->revision;
+
+ if (master)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
+
+ sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
+ if (copy_to_user(ubase, kinfo, sz))
+ ret = -EFAULT;
+bail:
+ kfree(kinfo);
+ return ret;
+}
+
+/**
+ * qib_tid_update - update a context TID
+ * @rcd: the context
+ * @fp: the qib device file
+ * @ti: the TID information
+ *
+ * The new implementation as of Oct 2004 is that the driver assigns
+ * the tid and returns it to the caller. To reduce search time, we
+ * keep a cursor for each context, walking the shadow tid array to find
+ * one that's not in use.
+ *
+ * For now, if we can't allocate the full list, we fail, although
+ * in the long run, we'll allocate as many as we can, and the
+ * caller will deal with that by trying the remaining pages later.
+ * That means that when we fail, we have to mark the tids as not in
+ * use again, in our shadow copy.
+ *
+ * It's up to the caller to free the tids when they are done.
+ * We'll unlock the pages as they free them.
+ *
+ * Also, right now we are locking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance.
+ */
+static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
+ const struct qib_tid_info *ti)
+{
+ int ret = 0, ntids;
+ u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
+ u16 *tidlist;
+ struct qib_devdata *dd = rcd->dd;
+ u64 physaddr;
+ unsigned long vaddr;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+ struct page **pagep = NULL;
+ unsigned subctxt = subctxt_fp(fp);
+
+ if (!dd->pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cnt = ti->tidcnt;
+ if (!cnt) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ctxttid = rcd->ctxt * dd->rcvtidcnt;
+ if (!rcd->subctxt_cnt) {
+ tidcnt = dd->rcvtidcnt;
+ tid = rcd->tidcursor;
+ tidoff = 0;
+ } else if (!subctxt) {
+ tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+ (dd->rcvtidcnt % rcd->subctxt_cnt);
+ tidoff = dd->rcvtidcnt - tidcnt;
+ ctxttid += tidoff;
+ tid = tidcursor_fp(fp);
+ } else {
+ tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+ tidoff = tidcnt * (subctxt - 1);
+ ctxttid += tidoff;
+ tid = tidcursor_fp(fp);
+ }
+ if (cnt > tidcnt) {
+ /* make sure it all fits in tid_pg_list */
+ qib_devinfo(dd->pcidev, "Process tried to allocate %u "
+ "TIDs, only trying max (%u)\n", cnt, tidcnt);
+ cnt = tidcnt;
+ }
+ pagep = (struct page **) rcd->tid_pg_list;
+ tidlist = (u16 *) &pagep[dd->rcvtidcnt];
+ pagep += tidoff;
+ tidlist += tidoff;
+
+ memset(tidmap, 0, sizeof(tidmap));
+ /* before decrement; chip actual # */
+ ntids = tidcnt;
+ tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
+ dd->rcvtidbase +
+ ctxttid * sizeof(*tidbase));
+
+ /* virtual address of first page in transfer */
+ vaddr = ti->tidvaddr;
+ if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
+ cnt * PAGE_SIZE)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ret = qib_get_user_pages(vaddr, cnt, pagep);
+ if (ret) {
+ /*
+ * if (ret == -EBUSY)
+ * We can't continue because the pagep array won't be
+ * initialized. This should never happen,
+ * unless perhaps the user has mpin'ed the pages
+ * themselves.
+ */
+ qib_devinfo(dd->pcidev,
+ "Failed to lock addr %p, %u pages: "
+ "errno %d\n", (void *) vaddr, cnt, -ret);
+ goto done;
+ }
+ for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+ for (; ntids--; tid++) {
+ if (tid == tidcnt)
+ tid = 0;
+ if (!dd->pageshadow[ctxttid + tid])
+ break;
+ }
+ if (ntids < 0) {
+ /*
+ * Oops, wrapped all the way through their TIDs,
+ * and didn't have enough free; see comments at
+ * start of routine
+ */
+ i--; /* last tidlist[i] not filled in */
+ ret = -ENOMEM;
+ break;
+ }
+ tidlist[i] = tid + tidoff;
+ /* we "know" system pages and TID pages are same size */
+ dd->pageshadow[ctxttid + tid] = pagep[i];
+ dd->physshadow[ctxttid + tid] =
+ qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ /*
+ * don't need atomic or it's overhead
+ */
+ __set_bit(tid, tidmap);
+ physaddr = dd->physshadow[ctxttid + tid];
+ /* PERFORMANCE: below should almost certainly be cached */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED, physaddr);
+ /*
+ * don't check this tid in qib_ctxtshadow, since we
+ * just filled it in; start with the next one.
+ */
+ tid++;
+ }
+
+ if (ret) {
+ u32 limit;
+cleanup:
+ /* jump here if copy out of updated info failed... */
+ /* same code that's in qib_free_tid() */
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit((const unsigned long *)tidmap, limit);
+ for (; tid < limit; tid++) {
+ if (!test_bit(tid, tidmap))
+ continue;
+ if (dd->pageshadow[ctxttid + tid]) {
+ dma_addr_t phys;
+
+ phys = dd->physshadow[ctxttid + tid];
+ dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+ /* PERFORMANCE: below should almost certainly
+ * be cached
+ */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED,
+ dd->tidinvalid);
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dd->pageshadow[ctxttid + tid] = NULL;
+ }
+ }
+ qib_release_user_pages(pagep, cnt);
+ } else {
+ /*
+ * Copy the updated array, with qib_tid's filled in, back
+ * to user. Since we did the copy in already, this "should
+ * never fail" If it does, we have to clean up...
+ */
+ if (copy_to_user((void __user *)
+ (unsigned long) ti->tidlist,
+ tidlist, cnt * sizeof(*tidlist))) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
+ tidmap, sizeof tidmap)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+ if (tid == tidcnt)
+ tid = 0;
+ if (!rcd->subctxt_cnt)
+ rcd->tidcursor = tid;
+ else
+ tidcursor_fp(fp) = tid;
+ }
+
+done:
+ return ret;
+}
+
+/**
+ * qib_tid_free - free a context TID
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @ti: the TID info
+ *
+ * right now we are unlocking one page at a time, but since
+ * the intended use of this routine is for a single group of
+ * virtually contiguous pages, that should change to improve
+ * performance. We check that the TID is in range for this context
+ * but otherwise don't check validity; if user has an error and
+ * frees the wrong tid, it's only their own data that can thereby
+ * be corrupted. We do check that the TID was in use, for sanity
+ * We always use our idea of the saved address, not the address that
+ * they pass in to us.
+ */
+static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
+ const struct qib_tid_info *ti)
+{
+ int ret = 0;
+ u32 tid, ctxttid, cnt, limit, tidcnt;
+ struct qib_devdata *dd = rcd->dd;
+ u64 __iomem *tidbase;
+ unsigned long tidmap[8];
+
+ if (!dd->pageshadow) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
+ sizeof tidmap)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ctxttid = rcd->ctxt * dd->rcvtidcnt;
+ if (!rcd->subctxt_cnt)
+ tidcnt = dd->rcvtidcnt;
+ else if (!subctxt) {
+ tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
+ (dd->rcvtidcnt % rcd->subctxt_cnt);
+ ctxttid += dd->rcvtidcnt - tidcnt;
+ } else {
+ tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
+ ctxttid += tidcnt * (subctxt - 1);
+ }
+ tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxttid * sizeof(*tidbase));
+
+ limit = sizeof(tidmap) * BITS_PER_BYTE;
+ if (limit > tidcnt)
+ /* just in case size changes in future */
+ limit = tidcnt;
+ tid = find_first_bit(tidmap, limit);
+ for (cnt = 0; tid < limit; tid++) {
+ /*
+ * small optimization; if we detect a run of 3 or so without
+ * any set, use find_first_bit again. That's mainly to
+ * accelerate the case where we wrapped, so we have some at
+ * the beginning, and some at the end, and a big gap
+ * in the middle.
+ */
+ if (!test_bit(tid, tidmap))
+ continue;
+ cnt++;
+ if (dd->pageshadow[ctxttid + tid]) {
+ struct page *p;
+ dma_addr_t phys;
+
+ p = dd->pageshadow[ctxttid + tid];
+ dd->pageshadow[ctxttid + tid] = NULL;
+ phys = dd->physshadow[ctxttid + tid];
+ dd->physshadow[ctxttid + tid] = dd->tidinvalid;
+ /* PERFORMANCE: below should almost certainly be
+ * cached
+ */
+ dd->f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&p, 1);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_set_part_key - set a partition key
+ * @rcd: the context
+ * @key: the key
+ *
+ * We can have up to 4 active at a time (other than the default, which is
+ * always allowed). This is somewhat tricky, since multiple contexts may set
+ * the same key, so we reference count them, and clean up at exit. All 4
+ * partition keys are packed into a single qlogic_ib register. It's an
+ * error for a process to set the same pkey multiple times. We provide no
+ * mechanism to de-allocate a pkey at this time, we may eventually need to
+ * do that. I've used the atomic operations, and no locking, and only make
+ * a single pass through what's available. This should be more than
+ * adequate for some time. I'll think about spinlocks or the like if and as
+ * it's necessary.
+ */
+static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
+{
+ struct qib_pportdata *ppd = rcd->ppd;
+ int i, any = 0, pidx = -1;
+ u16 lkey = key & 0x7FFF;
+ int ret;
+
+ if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+ /* nothing to do; this key always valid */
+ ret = 0;
+ goto bail;
+ }
+
+ if (!lkey) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Set the full membership bit, because it has to be
+ * set in the register or the packet, and it seems
+ * cleaner to set in the register than to force all
+ * callers to set it.
+ */
+ key |= 0x8000;
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ if (!rcd->pkeys[i] && pidx == -1)
+ pidx = i;
+ if (rcd->pkeys[i] == key) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (pidx == -1) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i]) {
+ any++;
+ continue;
+ }
+ if (ppd->pkeys[i] == key) {
+ atomic_t *pkrefs = &ppd->pkeyrefs[i];
+
+ if (atomic_inc_return(pkrefs) > 1) {
+ rcd->pkeys[pidx] = key;
+ ret = 0;
+ goto bail;
+ } else {
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ any++;
+ }
+ }
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ /*
+ * It makes no sense to have both the limited and
+ * full membership PKEY set at the same time since
+ * the unlimited one will disable the limited one.
+ */
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i] &&
+ atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+ rcd->pkeys[pidx] = key;
+ ppd->pkeys[i] = key;
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+ ret = 0;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_manage_rcvq - manage a context's receive queue
+ * @rcd: the context
+ * @subctxt: the subcontext
+ * @start_stop: action to carry out
+ *
+ * start_stop == 0 disables receive on the context, for use in queue
+ * overflow conditions. start_stop==1 re-enables, to be used to
+ * re-init the software copy of the head register
+ */
+static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
+ int start_stop)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned int rcvctrl_op;
+
+ if (subctxt)
+ goto bail;
+ /* atomically clear receive enable ctxt. */
+ if (start_stop) {
+ /*
+ * On enable, force in-memory copy of the tail register to
+ * 0, so that protocol code doesn't have to worry about
+ * whether or not the chip has yet updated the in-memory
+ * copy or not on return from the system call. The chip
+ * always resets it's tail register back to 0 on a
+ * transition from disabled to enabled.
+ */
+ if (rcd->rcvhdrtail_kvaddr)
+ qib_clear_rcvhdrtail(rcd);
+ rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
+ } else
+ rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
+ dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
+ /* always; new head should be equal to new tail; see above */
+bail:
+ return 0;
+}
+
+static void qib_clean_part_key(struct qib_ctxtdata *rcd,
+ struct qib_devdata *dd)
+{
+ int i, j, pchanged = 0;
+ u64 oldpkey;
+ struct qib_pportdata *ppd = rcd->ppd;
+
+ /* for debugging only */
+ oldpkey = (u64) ppd->pkeys[0] |
+ ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ if (!rcd->pkeys[i])
+ continue;
+ for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
+ /* check for match independent of the global bit */
+ if ((ppd->pkeys[j] & 0x7fff) !=
+ (rcd->pkeys[i] & 0x7fff))
+ continue;
+ if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
+ ppd->pkeys[j] = 0;
+ pchanged++;
+ }
+ break;
+ }
+ rcd->pkeys[i] = 0;
+ }
+ if (pchanged)
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+}
+
+/* common code for the mappings on dma_alloc_coherent mem */
+static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
+ unsigned len, void *kvaddr, u32 write_ok, char *what)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned long pfn;
+ int ret;
+
+ if ((vma->vm_end - vma->vm_start) > len) {
+ qib_devinfo(dd->pcidev,
+ "FAIL on %s: len %lx > %x\n", what,
+ vma->vm_end - vma->vm_start, len);
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ /*
+ * shared context user code requires rcvhdrq mapped r/w, others
+ * only allowed readonly mapping.
+ */
+ if (!write_ok) {
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev,
+ "%s must be mapped readonly\n", what);
+ ret = -EPERM;
+ goto bail;
+ }
+
+ /* don't allow them to later change with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
+ pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ len, vma->vm_page_prot);
+ if (ret)
+ qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
+ "bytes failed: %d\n", what, rcd->ctxt,
+ pfn, len, ret);
+bail:
+ return ret;
+}
+
+static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
+ u64 ureg)
+{
+ unsigned long phys;
+ unsigned long sz;
+ int ret;
+
+ /*
+ * This is real hardware, so use io_remap. This is the mechanism
+ * for the user process to update the head registers for their ctxt
+ * in the chip.
+ */
+ sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
+ if ((vma->vm_end - vma->vm_start) > sz) {
+ qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
+ "%lx > PAGE\n", vma->vm_end - vma->vm_start);
+ ret = -EFAULT;
+ } else {
+ phys = dd->physaddr + ureg;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+ return ret;
+}
+
+static int mmap_piobufs(struct vm_area_struct *vma,
+ struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd,
+ unsigned piobufs, unsigned piocnt)
+{
+ unsigned long phys;
+ int ret;
+
+ /*
+ * When we map the PIO buffers in the chip, we want to map them as
+ * writeonly, no read possible; unfortunately, x86 doesn't allow
+ * for this in hardware, but we still prevent users from asking
+ * for it.
+ */
+ if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
+ qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
+ "reqlen %lx > PAGE\n",
+ vma->vm_end - vma->vm_start);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ phys = dd->physaddr + piobufs;
+
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+#endif
+
+ /*
+ * don't allow them to later change to readable with mprotect (for when
+ * not initially mapped readable, as is normally the case)
+ */
+ vma->vm_flags &= ~VM_MAYREAD;
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+
+ if (qib_wc_pat)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+bail:
+ return ret;
+}
+
+static int mmap_rcvegrbufs(struct vm_area_struct *vma,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned long start, size;
+ size_t total_size, i;
+ unsigned long pfn;
+ int ret;
+
+ size = rcd->rcvegrbuf_size;
+ total_size = rcd->rcvegrbuf_chunks * size;
+ if ((vma->vm_end - vma->vm_start) > total_size) {
+ qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
+ "reqlen %lx > actual %lx\n",
+ vma->vm_end - vma->vm_start,
+ (unsigned long) total_size);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev, "Can't map eager buffers as "
+ "writable (flags=%lx)\n", vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+ /* don't allow them to later change to writeable with mprotect */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ start = vma->vm_start;
+
+ for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
+ pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
+ ret = remap_pfn_range(vma, start, pfn, size,
+ vma->vm_page_prot);
+ if (ret < 0)
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * qib_file_vma_fault - handle a VMA page fault.
+ */
+static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page;
+
+ page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
+ if (!page)
+ return VM_FAULT_SIGBUS;
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+}
+
+static struct vm_operations_struct qib_file_vm_ops = {
+ .fault = qib_file_vma_fault,
+};
+
+static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
+ struct qib_ctxtdata *rcd, unsigned subctxt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned subctxt_cnt;
+ unsigned long len;
+ void *addr;
+ size_t size;
+ int ret = 0;
+
+ subctxt_cnt = rcd->subctxt_cnt;
+ size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
+
+ /*
+ * Each process has all the subctxt uregbase, rcvhdrq, and
+ * rcvegrbufs mmapped - as an array for all the processes,
+ * and also separately for this process.
+ */
+ if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
+ addr = rcd->subctxt_uregbase;
+ size = PAGE_SIZE * subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
+ addr = rcd->subctxt_rcvhdr_base;
+ size = rcd->rcvhdrq_size * subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
+ addr = rcd->subctxt_rcvegrbuf;
+ size *= subctxt_cnt;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
+ PAGE_SIZE * subctxt)) {
+ addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
+ size = PAGE_SIZE;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
+ rcd->rcvhdrq_size * subctxt)) {
+ addr = rcd->subctxt_rcvhdr_base +
+ rcd->rcvhdrq_size * subctxt;
+ size = rcd->rcvhdrq_size;
+ } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
+ addr = rcd->user_event_mask;
+ size = PAGE_SIZE;
+ } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
+ size * subctxt)) {
+ addr = rcd->subctxt_rcvegrbuf + size * subctxt;
+ /* rcvegrbufs are read-only on the slave */
+ if (vma->vm_flags & VM_WRITE) {
+ qib_devinfo(dd->pcidev,
+ "Can't map eager buffers as "
+ "writable (flags=%lx)\n", vma->vm_flags);
+ ret = -EPERM;
+ goto bail;
+ }
+ /*
+ * Don't allow permission to later change to writeable
+ * with mprotect.
+ */
+ vma->vm_flags &= ~VM_MAYWRITE;
+ } else
+ goto bail;
+ len = vma->vm_end - vma->vm_start;
+ if (len > size) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
+ vma->vm_ops = &qib_file_vm_ops;
+ vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_mmapf - mmap various structures into user space
+ * @fp: the file pointer
+ * @vma: the VM area
+ *
+ * We use this to have a shared buffer between the kernel and the user code
+ * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
+ * buffers in the chip. We have the open and close entries so we can bump
+ * the ref count and keep the driver from being unloaded while still mapped.
+ */
+static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
+{
+ struct qib_ctxtdata *rcd;
+ struct qib_devdata *dd;
+ u64 pgaddr, ureg;
+ unsigned piobufs, piocnt;
+ int ret, match = 1;
+
+ rcd = ctxt_fp(fp);
+ if (!rcd || !(vma->vm_flags & VM_SHARED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ dd = rcd->dd;
+
+ /*
+ * This is the qib_do_user_init() code, mapping the shared buffers
+ * and per-context user registers into the user process. The address
+ * referred to by vm_pgoff is the file offset passed via mmap().
+ * For shared contexts, this is the kernel vmalloc() address of the
+ * pages to share with the master.
+ * For non-shared or master ctxts, this is a physical address.
+ * We only do one mmap for each space mapped.
+ */
+ pgaddr = vma->vm_pgoff << PAGE_SHIFT;
+
+ /*
+ * Check for 0 in case one of the allocations failed, but user
+ * called mmap anyway.
+ */
+ if (!pgaddr) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Physical addresses must fit in 40 bits for our hardware.
+ * Check for kernel virtual addresses first, anything else must
+ * match a HW or memory address.
+ */
+ ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto bail;
+ }
+
+ ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
+ if (!rcd->subctxt_cnt) {
+ /* ctxt is not shared */
+ piocnt = rcd->piocnt;
+ piobufs = rcd->piobufs;
+ } else if (!subctxt_fp(fp)) {
+ /* caller is the master */
+ piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
+ (rcd->piocnt % rcd->subctxt_cnt);
+ piobufs = rcd->piobufs +
+ dd->palign * (rcd->piocnt - piocnt);
+ } else {
+ unsigned slave = subctxt_fp(fp) - 1;
+
+ /* caller is a slave */
+ piocnt = rcd->piocnt / rcd->subctxt_cnt;
+ piobufs = rcd->piobufs + dd->palign * piocnt * slave;
+ }
+
+ if (pgaddr == ureg)
+ ret = mmap_ureg(vma, dd, ureg);
+ else if (pgaddr == piobufs)
+ ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
+ else if (pgaddr == dd->pioavailregs_phys)
+ /* in-memory copy of pioavail registers */
+ ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+ (void *) dd->pioavailregs_dma, 0,
+ "pioavail registers");
+ else if (pgaddr == rcd->rcvegr_phys)
+ ret = mmap_rcvegrbufs(vma, rcd);
+ else if (pgaddr == (u64) rcd->rcvhdrq_phys)
+ /*
+ * The rcvhdrq itself; multiple pages, contiguous
+ * from an i/o perspective. Shared contexts need
+ * to map r/w, so we allow writing.
+ */
+ ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
+ rcd->rcvhdrq, 1, "rcvhdrq");
+ else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
+ /* in-memory copy of rcvhdrq tail register */
+ ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
+ rcd->rcvhdrtail_kvaddr, 0,
+ "rcvhdrq tail");
+ else
+ match = 0;
+ if (!match)
+ ret = -EINVAL;
+
+ vma->vm_private_data = NULL;
+
+ if (ret < 0)
+ qib_devinfo(dd->pcidev,
+ "mmap Failure %d: off %llx len %lx\n",
+ -ret, (unsigned long long)pgaddr,
+ vma->vm_end - vma->vm_start);
+bail:
+ return ret;
+}
+
+static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+ struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &rcd->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (rcd->urgent != rcd->urgent_poll) {
+ pollflag = POLLIN | POLLRDNORM;
+ rcd->urgent_poll = rcd->urgent;
+ } else {
+ pollflag = 0;
+ set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
+ }
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+ struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned pollflag;
+
+ poll_wait(fp, &rcd->wait, pt);
+
+ spin_lock_irq(&dd->uctxt_lock);
+ if (dd->f_hdrqempty(rcd)) {
+ set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
+ pollflag = 0;
+ } else
+ pollflag = POLLIN | POLLRDNORM;
+ spin_unlock_irq(&dd->uctxt_lock);
+
+ return pollflag;
+}
+
+static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned pollflag;
+
+ rcd = ctxt_fp(fp);
+ if (!rcd)
+ pollflag = POLLERR;
+ else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
+ pollflag = qib_poll_urgent(rcd, fp, pt);
+ else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
+ pollflag = qib_poll_next(rcd, fp, pt);
+ else /* invalid */
+ pollflag = POLLERR;
+
+ return pollflag;
+}
+
+/*
+ * Check that userland and driver are compatible for subcontexts.
+ */
+static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
+{
+ /* this code is written long-hand for clarity */
+ if (QIB_USER_SWMAJOR != user_swmajor) {
+ /* no promise of compatibility if major mismatch */
+ return 0;
+ }
+ if (QIB_USER_SWMAJOR == 1) {
+ switch (QIB_USER_SWMINOR) {
+ case 0:
+ case 1:
+ case 2:
+ /* no subctxt implementation so cannot be compatible */
+ return 0;
+ case 3:
+ /* 3 is only compatible with itself */
+ return user_swminor == 3;
+ default:
+ /* >= 4 are compatible (or are expected to be) */
+ return user_swminor >= 4;
+ }
+ }
+ /* make no promises yet for future major versions */
+ return 0;
+}
+
+static int init_subctxts(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd,
+ const struct qib_user_info *uinfo)
+{
+ int ret = 0;
+ unsigned num_subctxts;
+ size_t size;
+
+ /*
+ * If the user is requesting zero subctxts,
+ * skip the subctxt allocation.
+ */
+ if (uinfo->spu_subctxt_cnt <= 0)
+ goto bail;
+ num_subctxts = uinfo->spu_subctxt_cnt;
+
+ /* Check for subctxt compatibility */
+ if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
+ uinfo->spu_userversion & 0xffff)) {
+ qib_devinfo(dd->pcidev,
+ "Mismatched user version (%d.%d) and driver "
+ "version (%d.%d) while context sharing. Ensure "
+ "that driver and library are from the same "
+ "release.\n",
+ (int) (uinfo->spu_userversion >> 16),
+ (int) (uinfo->spu_userversion & 0xffff),
+ QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
+ goto bail;
+ }
+ if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
+ if (!rcd->subctxt_uregbase) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ /* Note: rcd->rcvhdrq_size isn't initialized yet. */
+ size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE) * num_subctxts;
+ rcd->subctxt_rcvhdr_base = vmalloc_user(size);
+ if (!rcd->subctxt_rcvhdr_base) {
+ ret = -ENOMEM;
+ goto bail_ureg;
+ }
+
+ rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
+ rcd->rcvegrbuf_size *
+ num_subctxts);
+ if (!rcd->subctxt_rcvegrbuf) {
+ ret = -ENOMEM;
+ goto bail_rhdr;
+ }
+
+ rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
+ rcd->subctxt_id = uinfo->spu_subctxt_id;
+ rcd->active_slaves = 1;
+ rcd->redirect_seq_cnt = 1;
+ set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+ goto bail;
+
+bail_rhdr:
+ vfree(rcd->subctxt_rcvhdr_base);
+bail_ureg:
+ vfree(rcd->subctxt_uregbase);
+ rcd->subctxt_uregbase = NULL;
+bail:
+ return ret;
+}
+
+static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
+ struct file *fp, const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ void *ptmp = NULL;
+ int ret;
+
+ rcd = qib_create_ctxtdata(ppd, ctxt);
+
+ /*
+ * Allocate memory for use in qib_tid_update() at open to
+ * reduce cost of expected send setup per message segment
+ */
+ if (rcd)
+ ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
+ dd->rcvtidcnt * sizeof(struct page **),
+ GFP_KERNEL);
+
+ if (!rcd || !ptmp) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata "
+ "memory, failing open\n");
+ ret = -ENOMEM;
+ goto bailerr;
+ }
+ rcd->userversion = uinfo->spu_userversion;
+ ret = init_subctxts(dd, rcd, uinfo);
+ if (ret)
+ goto bailerr;
+ rcd->tid_pg_list = ptmp;
+ rcd->pid = current->pid;
+ init_waitqueue_head(&dd->rcd[ctxt]->wait);
+ strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
+ ctxt_fp(fp) = rcd;
+ qib_stats.sps_ctxts++;
+ ret = 0;
+ goto bail;
+
+bailerr:
+ dd->rcd[ctxt] = NULL;
+ kfree(rcd);
+ kfree(ptmp);
+bail:
+ return ret;
+}
+
+static inline int usable(struct qib_pportdata *ppd, int active_only)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 linkok = active_only ? QIBL_LINKACTIVE :
+ (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
+
+ return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
+ (ppd->lflags & linkok);
+}
+
+static int find_free_ctxt(int unit, struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ struct qib_devdata *dd = qib_lookup(unit);
+ struct qib_pportdata *ppd = NULL;
+ int ret;
+ u32 ctxt;
+
+ if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ /*
+ * If users requests specific port, only try that one port, else
+ * select "best" port below, based on context.
+ */
+ if (uinfo->spu_port) {
+ ppd = dd->pport + uinfo->spu_port - 1;
+ if (!usable(ppd, 0)) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+ }
+
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ if (dd->rcd[ctxt])
+ continue;
+ /*
+ * The setting and clearing of user context rcd[x] protected
+ * by the qib_mutex
+ */
+ if (!ppd) {
+ /* choose port based on ctxt, if up, else 1st up */
+ ppd = dd->pport + (ctxt % dd->num_pports);
+ if (!usable(ppd, 0)) {
+ int i;
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = dd->pport + i;
+ if (usable(ppd, 0))
+ break;
+ }
+ if (i == dd->num_pports) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+ }
+ }
+ ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ goto bail;
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+ struct qib_pportdata *ppd;
+ int ret = 0, devmax;
+ int npresent, nup;
+ int ndev;
+ u32 port = uinfo->spu_port, ctxt;
+
+ devmax = qib_count_units(&npresent, &nup);
+
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+
+ /* device portion of usable() */
+ if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+ continue;
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ if (dd->rcd[ctxt])
+ continue;
+ if (port) {
+ if (port > dd->num_pports)
+ continue;
+ ppd = dd->pport + port - 1;
+ if (!usable(ppd, 0))
+ continue;
+ } else {
+ /*
+ * choose port based on ctxt, if up, else
+ * first port that's up for multi-port HCA
+ */
+ ppd = dd->pport + (ctxt % dd->num_pports);
+ if (!usable(ppd, 0)) {
+ int j;
+
+ ppd = NULL;
+ for (j = 0; j < dd->num_pports &&
+ !ppd; j++)
+ if (usable(dd->pport + j, 0))
+ ppd = dd->pport + j;
+ if (!ppd)
+ continue; /* to next unit */
+ }
+ }
+ ret = setup_ctxt(ppd, ctxt, fp, uinfo);
+ goto done;
+ }
+ }
+
+ if (npresent) {
+ if (nup == 0)
+ ret = -ENETDOWN;
+ else
+ ret = -EBUSY;
+ } else
+ ret = -ENXIO;
+
+done:
+ return ret;
+}
+
+static int find_shared_ctxt(struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ int devmax, ndev, i;
+ int ret = 0;
+
+ devmax = qib_count_units(NULL, NULL);
+
+ for (ndev = 0; ndev < devmax; ndev++) {
+ struct qib_devdata *dd = qib_lookup(ndev);
+
+ /* device portion of usable() */
+ if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
+ continue;
+ for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ /* Skip ctxts which are not yet open */
+ if (!rcd || !rcd->cnt)
+ continue;
+ /* Skip ctxt if it doesn't match the requested one */
+ if (rcd->subctxt_id != uinfo->spu_subctxt_id)
+ continue;
+ /* Verify the sharing process matches the master */
+ if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
+ rcd->userversion != uinfo->spu_userversion ||
+ rcd->cnt >= rcd->subctxt_cnt) {
+ ret = -EINVAL;
+ goto done;
+ }
+ ctxt_fp(fp) = rcd;
+ subctxt_fp(fp) = rcd->cnt++;
+ rcd->subpid[subctxt_fp(fp)] = current->pid;
+ tidcursor_fp(fp) = 0;
+ rcd->active_slaves |= 1 << subctxt_fp(fp);
+ ret = 1;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int qib_open(struct inode *in, struct file *fp)
+{
+ /* The real work is performed later in qib_assign_ctxt() */
+ fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
+ if (fp->private_data) /* no cpu affinity by default */
+ ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
+ return fp->private_data ? 0 : -ENOMEM;
+}
+
+/*
+ * Get ctxt early, so can set affinity prior to memory allocation.
+ */
+static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
+{
+ int ret;
+ int i_minor;
+ unsigned swmajor, swminor;
+
+ /* Check to be sure we haven't already initialized this file */
+ if (ctxt_fp(fp)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* for now, if major version is different, bail */
+ swmajor = uinfo->spu_userversion >> 16;
+ if (swmajor != QIB_USER_SWMAJOR) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ swminor = uinfo->spu_userversion & 0xffff;
+
+ mutex_lock(&qib_mutex);
+
+ if (qib_compatible_subctxts(swmajor, swminor) &&
+ uinfo->spu_subctxt_cnt) {
+ ret = find_shared_ctxt(fp, uinfo);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto done_chk_sdma;
+ }
+ }
+
+ i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
+ if (i_minor)
+ ret = find_free_ctxt(i_minor - 1, fp, uinfo);
+ else
+ ret = get_a_ctxt(fp, uinfo);
+
+done_chk_sdma:
+ if (!ret) {
+ struct qib_filedata *fd = fp->private_data;
+ const struct qib_ctxtdata *rcd = fd->rcd;
+ const struct qib_devdata *dd = rcd->dd;
+
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
+ dd->unit,
+ rcd->ctxt,
+ fd->subctxt);
+ if (!fd->pq)
+ ret = -ENOMEM;
+ }
+
+ /*
+ * If process has NOT already set it's affinity, select and
+ * reserve a processor for it, as a rendevous for all
+ * users of the driver. If they don't actually later
+ * set affinity to this cpu, or set it to some other cpu,
+ * it just means that sooner or later we don't recommend
+ * a cpu, and let the scheduler do it's best.
+ */
+ if (!ret && cpus_weight(current->cpus_allowed) >=
+ qib_cpulist_count) {
+ int cpu;
+ cpu = find_first_zero_bit(qib_cpulist,
+ qib_cpulist_count);
+ if (cpu != qib_cpulist_count) {
+ __set_bit(cpu, qib_cpulist);
+ fd->rec_cpu_num = cpu;
+ }
+ } else if (cpus_weight(current->cpus_allowed) == 1 &&
+ test_bit(first_cpu(current->cpus_allowed),
+ qib_cpulist))
+ qib_devinfo(dd->pcidev, "%s PID %u affinity "
+ "set to cpu %d; already allocated\n",
+ current->comm, current->pid,
+ first_cpu(current->cpus_allowed));
+ }
+
+ mutex_unlock(&qib_mutex);
+
+done:
+ return ret;
+}
+
+
+static int qib_do_user_init(struct file *fp,
+ const struct qib_user_info *uinfo)
+{
+ int ret;
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ struct qib_devdata *dd;
+ unsigned uctxt;
+
+ /* Subctxts don't need to initialize anything since master did it. */
+ if (subctxt_fp(fp)) {
+ ret = wait_event_interruptible(rcd->wait,
+ !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
+ goto bail;
+ }
+
+ dd = rcd->dd;
+
+ /* some ctxts may get extra buffers, calculate that here */
+ uctxt = rcd->ctxt - dd->first_user_ctxt;
+ if (uctxt < dd->ctxts_extrabuf) {
+ rcd->piocnt = dd->pbufsctxt + 1;
+ rcd->pio_base = rcd->piocnt * uctxt;
+ } else {
+ rcd->piocnt = dd->pbufsctxt;
+ rcd->pio_base = rcd->piocnt * uctxt +
+ dd->ctxts_extrabuf;
+ }
+
+ /*
+ * All user buffers are 2KB buffers. If we ever support
+ * giving 4KB buffers to user processes, this will need some
+ * work. Can't use piobufbase directly, because it has
+ * both 2K and 4K buffer base values. So check and handle.
+ */
+ if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
+ if (rcd->pio_base >= dd->piobcnt2k) {
+ qib_dev_err(dd,
+ "%u:ctxt%u: no 2KB buffers available\n",
+ dd->unit, rcd->ctxt);
+ ret = -ENOBUFS;
+ goto bail;
+ }
+ rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
+ qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
+ rcd->ctxt, rcd->piocnt);
+ }
+
+ rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
+ qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+ TXCHK_CHG_TYPE_USER, rcd);
+ /*
+ * try to ensure that processes start up with consistent avail update
+ * for their own range, at least. If system very quiet, it might
+ * have the in-memory copy out of date at startup for this range of
+ * buffers, when a context gets re-used. Do after the chg_pioavail
+ * and before the rest of setup, so it's "almost certain" the dma
+ * will have occurred (can't 100% guarantee, but should be many
+ * decimals of 9s, with this ordering), given how much else happens
+ * after this.
+ */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+
+ /*
+ * Now allocate the rcvhdr Q and eager TIDs; skip the TID
+ * array for time being. If rcd->ctxt > chip-supported,
+ * we need to do extra stuff here to handle by handling overflow
+ * through ctxt 0, someday
+ */
+ ret = qib_create_rcvhdrq(dd, rcd);
+ if (!ret)
+ ret = qib_setup_eagerbufs(rcd);
+ if (ret)
+ goto bail_pio;
+
+ rcd->tidcursor = 0; /* start at beginning after open */
+
+ /* initialize poll variables... */
+ rcd->urgent = 0;
+ rcd->urgent_poll = 0;
+
+ /*
+ * Now enable the ctxt for receive.
+ * For chips that are set to DMA the tail register to memory
+ * when they change (and when the update bit transitions from
+ * 0 to 1. So for those chips, we turn it off and then back on.
+ * This will (very briefly) affect any other open ctxts, but the
+ * duration is very short, and therefore isn't an issue. We
+ * explictly set the in-memory tail copy to 0 beforehand, so we
+ * don't have to wait to be sure the DMA update has happened
+ * (chip resets head/tail to 0 on transition to enable).
+ */
+ if (rcd->rcvhdrtail_kvaddr)
+ qib_clear_rcvhdrtail(rcd);
+
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
+ rcd->ctxt);
+
+ /* Notify any waiting slaves */
+ if (rcd->subctxt_cnt) {
+ clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
+ wake_up(&rcd->wait);
+ }
+ return 0;
+
+bail_pio:
+ qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
+ TXCHK_CHG_TYPE_KERN, rcd);
+bail:
+ return ret;
+}
+
+/**
+ * unlock_exptid - unlock any expected TID entries context still had in use
+ * @rcd: ctxt
+ *
+ * We don't actually update the chip here, because we do a bulk update
+ * below, using f_clear_tids.
+ */
+static void unlock_expected_tids(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
+ int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+ for (i = ctxt_tidbase; i < maxtid; i++) {
+ struct page *p = dd->pageshadow[i];
+ dma_addr_t phys;
+
+ if (!p)
+ continue;
+
+ phys = dd->physshadow[i];
+ dd->physshadow[i] = dd->tidinvalid;
+ dd->pageshadow[i] = NULL;
+ pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&p, 1);
+ cnt++;
+ }
+}
+
+static int qib_close(struct inode *in, struct file *fp)
+{
+ int ret = 0;
+ struct qib_filedata *fd;
+ struct qib_ctxtdata *rcd;
+ struct qib_devdata *dd;
+ unsigned long flags;
+ unsigned ctxt;
+ pid_t pid;
+
+ mutex_lock(&qib_mutex);
+
+ fd = (struct qib_filedata *) fp->private_data;
+ fp->private_data = NULL;
+ rcd = fd->rcd;
+ if (!rcd) {
+ mutex_unlock(&qib_mutex);
+ goto bail;
+ }
+
+ dd = rcd->dd;
+
+ /* ensure all pio buffer writes in progress are flushed */
+ qib_flush_wc();
+
+ /* drain user sdma queue */
+ if (fd->pq) {
+ qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
+ qib_user_sdma_queue_destroy(fd->pq);
+ }
+
+ if (fd->rec_cpu_num != -1)
+ __clear_bit(fd->rec_cpu_num, qib_cpulist);
+
+ if (--rcd->cnt) {
+ /*
+ * XXX If the master closes the context before the slave(s),
+ * revoke the mmap for the eager receive queue so
+ * the slave(s) don't wait for receive data forever.
+ */
+ rcd->active_slaves &= ~(1 << fd->subctxt);
+ rcd->subpid[fd->subctxt] = 0;
+ mutex_unlock(&qib_mutex);
+ goto bail;
+ }
+
+ /* early; no interrupt users after this */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ ctxt = rcd->ctxt;
+ dd->rcd[ctxt] = NULL;
+ pid = rcd->pid;
+ rcd->pid = 0;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ if (rcd->rcvwait_to || rcd->piowait_to ||
+ rcd->rcvnowait || rcd->pionowait) {
+ rcd->rcvwait_to = 0;
+ rcd->piowait_to = 0;
+ rcd->rcvnowait = 0;
+ rcd->pionowait = 0;
+ }
+ if (rcd->flag)
+ rcd->flag = 0;
+
+ if (dd->kregbase) {
+ /* atomically clear receive enable ctxt and intr avail. */
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
+
+ /* clean up the pkeys for this ctxt user */
+ qib_clean_part_key(rcd, dd);
+ qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
+ qib_chg_pioavailkernel(dd, rcd->pio_base,
+ rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
+
+ dd->f_clear_tids(dd, rcd);
+
+ if (dd->pageshadow)
+ unlock_expected_tids(rcd);
+ qib_stats.sps_ctxts--;
+ }
+
+ mutex_unlock(&qib_mutex);
+ qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
+
+bail:
+ kfree(fd);
+ return ret;
+}
+
+static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
+{
+ struct qib_ctxt_info info;
+ int ret;
+ size_t sz;
+ struct qib_ctxtdata *rcd = ctxt_fp(fp);
+ struct qib_filedata *fd;
+
+ fd = (struct qib_filedata *) fp->private_data;
+
+ info.num_active = qib_count_active_units();
+ info.unit = rcd->dd->unit;
+ info.port = rcd->ppd->port;
+ info.ctxt = rcd->ctxt;
+ info.subctxt = subctxt_fp(fp);
+ /* Number of user ctxts available for this device. */
+ info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
+ info.num_subctxts = rcd->subctxt_cnt;
+ info.rec_cpu = fd->rec_cpu_num;
+ sz = sizeof(info);
+
+ if (copy_to_user(uinfo, &info, sz)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
+ u32 __user *inflightp)
+{
+ const u32 val = qib_user_sdma_inflight_counter(pq);
+
+ if (put_user(val, inflightp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int qib_sdma_get_complete(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq,
+ u32 __user *completep)
+{
+ u32 val;
+ int err;
+
+ if (!pq)
+ return -EINVAL;
+
+ err = qib_user_sdma_make_progress(ppd, pq);
+ if (err < 0)
+ return err;
+
+ val = qib_user_sdma_complete_counter(pq);
+ if (put_user(val, completep))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int disarm_req_delay(struct qib_ctxtdata *rcd)
+{
+ int ret = 0;
+
+ if (!usable(rcd->ppd, 1)) {
+ int i;
+ /*
+ * if link is down, or otherwise not usable, delay
+ * the caller up to 30 seconds, so we don't thrash
+ * in trying to get the chip back to ACTIVE, and
+ * set flag so they make the call again.
+ */
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
+ msleep(100);
+ ret = -ENETDOWN;
+ }
+ return ret;
+}
+
+/*
+ * Find all user contexts in use, and set the specified bit in their
+ * event mask.
+ * See also find_ctxt() for a similar use, that is specific to send buffers.
+ */
+int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned ctxt;
+ int ret = 0;
+
+ spin_lock(&ppd->dd->uctxt_lock);
+ for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
+ ctxt++) {
+ rcd = ppd->dd->rcd[ctxt];
+ if (!rcd)
+ continue;
+ if (rcd->user_event_mask) {
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(evtbit, &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(evtbit, &rcd->user_event_mask[i]);
+ }
+ ret = 1;
+ break;
+ }
+ spin_unlock(&ppd->dd->uctxt_lock);
+
+ return ret;
+}
+
+/*
+ * clear the event notifier events for this context.
+ * For the DISARM_BUFS case, we also take action (this obsoletes
+ * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
+ * compatibility.
+ * Other bits don't currently require actions, just atomically clear.
+ * User process then performs actions appropriate to bit having been
+ * set, if desired, and checks again in future.
+ */
+static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
+ unsigned long events)
+{
+ int ret = 0, i;
+
+ for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
+ if (!test_bit(i, &events))
+ continue;
+ if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
+ (void)qib_disarm_piobufs_ifneeded(rcd);
+ ret = disarm_req_delay(rcd);
+ } else
+ clear_bit(i, &rcd->user_event_mask[subctxt]);
+ }
+ return ret;
+}
+
+static ssize_t qib_write(struct file *fp, const char __user *data,
+ size_t count, loff_t *off)
+{
+ const struct qib_cmd __user *ucmd;
+ struct qib_ctxtdata *rcd;
+ const void __user *src;
+ size_t consumed, copy = 0;
+ struct qib_cmd cmd;
+ ssize_t ret = 0;
+ void *dest;
+
+ if (count < sizeof(cmd.type)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ucmd = (const struct qib_cmd __user *) data;
+
+ if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ consumed = sizeof(cmd.type);
+
+ switch (cmd.type) {
+ case QIB_CMD_ASSIGN_CTXT:
+ case QIB_CMD_USER_INIT:
+ copy = sizeof(cmd.cmd.user_info);
+ dest = &cmd.cmd.user_info;
+ src = &ucmd->cmd.user_info;
+ break;
+
+ case QIB_CMD_RECV_CTRL:
+ copy = sizeof(cmd.cmd.recv_ctrl);
+ dest = &cmd.cmd.recv_ctrl;
+ src = &ucmd->cmd.recv_ctrl;
+ break;
+
+ case QIB_CMD_CTXT_INFO:
+ copy = sizeof(cmd.cmd.ctxt_info);
+ dest = &cmd.cmd.ctxt_info;
+ src = &ucmd->cmd.ctxt_info;
+ break;
+
+ case QIB_CMD_TID_UPDATE:
+ case QIB_CMD_TID_FREE:
+ copy = sizeof(cmd.cmd.tid_info);
+ dest = &cmd.cmd.tid_info;
+ src = &ucmd->cmd.tid_info;
+ break;
+
+ case QIB_CMD_SET_PART_KEY:
+ copy = sizeof(cmd.cmd.part_key);
+ dest = &cmd.cmd.part_key;
+ src = &ucmd->cmd.part_key;
+ break;
+
+ case QIB_CMD_DISARM_BUFS:
+ case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
+ copy = 0;
+ src = NULL;
+ dest = NULL;
+ break;
+
+ case QIB_CMD_POLL_TYPE:
+ copy = sizeof(cmd.cmd.poll_type);
+ dest = &cmd.cmd.poll_type;
+ src = &ucmd->cmd.poll_type;
+ break;
+
+ case QIB_CMD_ARMLAUNCH_CTRL:
+ copy = sizeof(cmd.cmd.armlaunch_ctrl);
+ dest = &cmd.cmd.armlaunch_ctrl;
+ src = &ucmd->cmd.armlaunch_ctrl;
+ break;
+
+ case QIB_CMD_SDMA_INFLIGHT:
+ copy = sizeof(cmd.cmd.sdma_inflight);
+ dest = &cmd.cmd.sdma_inflight;
+ src = &ucmd->cmd.sdma_inflight;
+ break;
+
+ case QIB_CMD_SDMA_COMPLETE:
+ copy = sizeof(cmd.cmd.sdma_complete);
+ dest = &cmd.cmd.sdma_complete;
+ src = &ucmd->cmd.sdma_complete;
+ break;
+
+ case QIB_CMD_ACK_EVENT:
+ copy = sizeof(cmd.cmd.event_mask);
+ dest = &cmd.cmd.event_mask;
+ src = &ucmd->cmd.event_mask;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (copy) {
+ if ((count - consumed) < copy) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (copy_from_user(dest, src, copy)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+ consumed += copy;
+ }
+
+ rcd = ctxt_fp(fp);
+ if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ switch (cmd.type) {
+ case QIB_CMD_ASSIGN_CTXT:
+ ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
+ if (ret)
+ goto bail;
+ break;
+
+ case QIB_CMD_USER_INIT:
+ ret = qib_do_user_init(fp, &cmd.cmd.user_info);
+ if (ret)
+ goto bail;
+ ret = qib_get_base_info(fp, (void __user *) (unsigned long)
+ cmd.cmd.user_info.spu_base_info,
+ cmd.cmd.user_info.spu_base_info_size);
+ break;
+
+ case QIB_CMD_RECV_CTRL:
+ ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
+ break;
+
+ case QIB_CMD_CTXT_INFO:
+ ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
+ (unsigned long) cmd.cmd.ctxt_info);
+ break;
+
+ case QIB_CMD_TID_UPDATE:
+ ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
+ break;
+
+ case QIB_CMD_TID_FREE:
+ ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
+ break;
+
+ case QIB_CMD_SET_PART_KEY:
+ ret = qib_set_part_key(rcd, cmd.cmd.part_key);
+ break;
+
+ case QIB_CMD_DISARM_BUFS:
+ (void)qib_disarm_piobufs_ifneeded(rcd);
+ ret = disarm_req_delay(rcd);
+ break;
+
+ case QIB_CMD_PIOAVAILUPD:
+ qib_force_pio_avail_update(rcd->dd);
+ break;
+
+ case QIB_CMD_POLL_TYPE:
+ rcd->poll_type = cmd.cmd.poll_type;
+ break;
+
+ case QIB_CMD_ARMLAUNCH_CTRL:
+ rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
+ break;
+
+ case QIB_CMD_SDMA_INFLIGHT:
+ ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
+ (u32 __user *) (unsigned long)
+ cmd.cmd.sdma_inflight);
+ break;
+
+ case QIB_CMD_SDMA_COMPLETE:
+ ret = qib_sdma_get_complete(rcd->ppd,
+ user_sdma_queue_fp(fp),
+ (u32 __user *) (unsigned long)
+ cmd.cmd.sdma_complete);
+ break;
+
+ case QIB_CMD_ACK_EVENT:
+ ret = qib_user_event_ack(rcd, subctxt_fp(fp),
+ cmd.cmd.event_mask);
+ break;
+ }
+
+ if (ret >= 0)
+ ret = consumed;
+
+bail:
+ return ret;
+}
+
+static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long dim, loff_t off)
+{
+ struct qib_filedata *fp = iocb->ki_filp->private_data;
+ struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
+ struct qib_user_sdma_queue *pq = fp->pq;
+
+ if (!dim || !pq)
+ return -EINVAL;
+
+ return qib_user_sdma_writev(rcd, pq, iov, dim);
+}
+
+static struct class *qib_class;
+static dev_t qib_dev;
+
+int qib_cdev_init(int minor, const char *name,
+ const struct file_operations *fops,
+ struct cdev **cdevp, struct device **devp)
+{
+ const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
+ struct cdev *cdev;
+ struct device *device = NULL;
+ int ret;
+
+ cdev = cdev_alloc();
+ if (!cdev) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Could not allocate cdev for minor %d, %s\n",
+ minor, name);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cdev->owner = THIS_MODULE;
+ cdev->ops = fops;
+ kobject_set_name(&cdev->kobj, name);
+
+ ret = cdev_add(cdev, dev, 1);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Could not add cdev for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ goto err_cdev;
+ }
+
+ device = device_create(qib_class, NULL, dev, NULL, name);
+ if (!IS_ERR(device))
+ goto done;
+ ret = PTR_ERR(device);
+ device = NULL;
+ printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+ "device for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+err_cdev:
+ cdev_del(cdev);
+ cdev = NULL;
+done:
+ *cdevp = cdev;
+ *devp = device;
+ return ret;
+}
+
+void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
+{
+ struct device *device = *devp;
+
+ if (device) {
+ device_unregister(device);
+ *devp = NULL;
+ }
+
+ if (*cdevp) {
+ cdev_del(*cdevp);
+ *cdevp = NULL;
+ }
+}
+
+static struct cdev *wildcard_cdev;
+static struct device *wildcard_device;
+
+int __init qib_dev_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
+ "chrdev region (err %d)\n", -ret);
+ goto done;
+ }
+
+ qib_class = class_create(THIS_MODULE, "ipath");
+ if (IS_ERR(qib_class)) {
+ ret = PTR_ERR(qib_class);
+ printk(KERN_ERR QIB_DRV_NAME ": Could not create "
+ "device class (err %d)\n", -ret);
+ unregister_chrdev_region(qib_dev, QIB_NMINORS);
+ }
+
+done:
+ return ret;
+}
+
+void qib_dev_cleanup(void)
+{
+ if (qib_class) {
+ class_destroy(qib_class);
+ qib_class = NULL;
+ }
+
+ unregister_chrdev_region(qib_dev, QIB_NMINORS);
+}
+
+static atomic_t user_count = ATOMIC_INIT(0);
+
+static void qib_user_remove(struct qib_devdata *dd)
+{
+ if (atomic_dec_return(&user_count) == 0)
+ qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
+
+ qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
+}
+
+static int qib_user_add(struct qib_devdata *dd)
+{
+ char name[10];
+ int ret;
+
+ if (atomic_inc_return(&user_count) == 1) {
+ ret = qib_cdev_init(0, "ipath", &qib_file_ops,
+ &wildcard_cdev, &wildcard_device);
+ if (ret)
+ goto done;
+ }
+
+ snprintf(name, sizeof(name), "ipath%d", dd->unit);
+ ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
+ &dd->user_cdev, &dd->user_device);
+ if (ret)
+ qib_user_remove(dd);
+done:
+ return ret;
+}
+
+/*
+ * Create per-unit files in /dev
+ */
+int qib_device_create(struct qib_devdata *dd)
+{
+ int r, ret;
+
+ r = qib_user_add(dd);
+ ret = qib_diag_add(dd);
+ if (r && !ret)
+ ret = r;
+ return ret;
+}
+
+/*
+ * Remove per-unit files in /dev
+ * void, core kernel returns no errors for this stuff
+ */
+void qib_device_remove(struct qib_devdata *dd)
+{
+ qib_user_remove(dd);
+ qib_diag_remove(dd);
+}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
new file mode 100644
index 000000000000..edef8527eb34
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+
+#include "qib.h"
+
+#define QIBFS_MAGIC 0x726a77
+
+static struct super_block *qib_super;
+
+#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
+
+static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
+ int mode, const struct file_operations *fops,
+ void *data)
+{
+ int error;
+ struct inode *inode = new_inode(dir->i_sb);
+
+ if (!inode) {
+ error = -EPERM;
+ goto bail;
+ }
+
+ inode->i_mode = mode;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_blocks = 0;
+ inode->i_atime = CURRENT_TIME;
+ inode->i_mtime = inode->i_atime;
+ inode->i_ctime = inode->i_atime;
+ inode->i_private = data;
+ if ((mode & S_IFMT) == S_IFDIR) {
+ inode->i_op = &simple_dir_inode_operations;
+ inc_nlink(inode);
+ inc_nlink(dir);
+ }
+
+ inode->i_fop = fops;
+
+ d_instantiate(dentry, inode);
+ error = 0;
+
+bail:
+ return error;
+}
+
+static int create_file(const char *name, mode_t mode,
+ struct dentry *parent, struct dentry **dentry,
+ const struct file_operations *fops, void *data)
+{
+ int error;
+
+ *dentry = NULL;
+ mutex_lock(&parent->d_inode->i_mutex);
+ *dentry = lookup_one_len(name, parent, strlen(name));
+ if (!IS_ERR(*dentry))
+ error = qibfs_mknod(parent->d_inode, *dentry,
+ mode, fops, data);
+ else
+ error = PTR_ERR(*dentry);
+ mutex_unlock(&parent->d_inode->i_mutex);
+
+ return error;
+}
+
+static ssize_t driver_stats_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, &qib_stats,
+ sizeof qib_stats);
+}
+
+/*
+ * driver stats field names, one line per stat, single string. Used by
+ * programs like ipathstats to print the stats in a way which works for
+ * different versions of drivers, without changing program source.
+ * if qlogic_ib_stats changes, this needs to change. Names need to be
+ * 12 chars or less (w/o newline), for proper display by ipathstats utility.
+ */
+static const char qib_statnames[] =
+ "KernIntr\n"
+ "ErrorIntr\n"
+ "Tx_Errs\n"
+ "Rcv_Errs\n"
+ "H/W_Errs\n"
+ "NoPIOBufs\n"
+ "CtxtsOpen\n"
+ "RcvLen_Errs\n"
+ "EgrBufFull\n"
+ "EgrHdrFull\n"
+ ;
+
+static ssize_t driver_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, count, ppos, qib_statnames,
+ sizeof qib_statnames - 1); /* no null */
+}
+
+static const struct file_operations driver_ops[] = {
+ { .read = driver_stats_read, },
+ { .read = driver_names_read, },
+};
+
+/* read the per-device counters */
+static ssize_t dev_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+/* read the per-device counters */
+static ssize_t dev_names_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
+ return simple_read_from_buffer(buf, count, ppos, names, avail);
+}
+
+static const struct file_operations cntr_ops[] = {
+ { .read = dev_counters_read, },
+ { .read = dev_names_read, },
+};
+
+/*
+ * Could use file->f_dentry->d_inode->i_ino to figure out which file,
+ * instead of separate routine for each, but for now, this works...
+ */
+
+/* read the per-port names (same for each port) */
+static ssize_t portnames_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *names;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
+ return simple_read_from_buffer(buf, count, ppos, names, avail);
+}
+
+/* read the per-port counters for port 1 (pidx 0) */
+static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+/* read the per-port counters for port 2 (pidx 1) */
+static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 *counters;
+ size_t avail;
+ struct qib_devdata *dd = private2dd(file);
+
+ avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
+ return simple_read_from_buffer(buf, count, ppos, counters, avail);
+}
+
+static const struct file_operations portcntr_ops[] = {
+ { .read = portnames_read, },
+ { .read = portcntrs_1_read, },
+ { .read = portcntrs_2_read, },
+};
+
+/*
+ * read the per-port QSFP data for port 1 (pidx 0)
+ */
+static ssize_t qsfp_1_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd = private2dd(file);
+ char *tmp;
+ int ret;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ kfree(tmp);
+ return ret;
+}
+
+/*
+ * read the per-port QSFP data for port 2 (pidx 1)
+ */
+static ssize_t qsfp_2_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd = private2dd(file);
+ char *tmp;
+ int ret;
+
+ if (dd->num_pports < 2)
+ return -ENODEV;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
+ kfree(tmp);
+ return ret;
+}
+
+static const struct file_operations qsfp_ops[] = {
+ { .read = qsfp_1_read, },
+ { .read = qsfp_2_read, },
+};
+
+static ssize_t flash_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if (pos < 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (pos >= sizeof(struct qib_flash)) {
+ ret = 0;
+ goto bail;
+ }
+
+ if (count > sizeof(struct qib_flash) - pos)
+ count = sizeof(struct qib_flash) - pos;
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ dd = private2dd(file);
+ if (qib_eeprom_read(dd, pos, tmp, count)) {
+ qib_dev_err(dd, "failed to read from flash\n");
+ ret = -ENXIO;
+ goto bail_tmp;
+ }
+
+ if (copy_to_user(buf, tmp, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static ssize_t flash_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct qib_devdata *dd;
+ ssize_t ret;
+ loff_t pos;
+ char *tmp;
+
+ pos = *ppos;
+
+ if (pos != 0) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (count != sizeof(struct qib_flash)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmp, buf, count)) {
+ ret = -EFAULT;
+ goto bail_tmp;
+ }
+
+ dd = private2dd(file);
+ if (qib_eeprom_write(dd, pos, tmp, count)) {
+ ret = -ENXIO;
+ qib_dev_err(dd, "failed to write to flash\n");
+ goto bail_tmp;
+ }
+
+ *ppos = pos + count;
+ ret = count;
+
+bail_tmp:
+ kfree(tmp);
+
+bail:
+ return ret;
+}
+
+static const struct file_operations flash_ops = {
+ .read = flash_read,
+ .write = flash_write,
+};
+
+static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
+{
+ struct dentry *dir, *tmp;
+ char unit[10];
+ int ret, i;
+
+ /* create the per-unit directory */
+ snprintf(unit, sizeof unit, "%u", dd->unit);
+ ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
+ &simple_dir_operations, dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
+ goto bail;
+ }
+
+ /* create the files in the new directory */
+ ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
+ &cntr_ops[0], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
+ unit, ret);
+ goto bail;
+ }
+ ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
+ &cntr_ops[1], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
+ unit, ret);
+ goto bail;
+ }
+ ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
+ &portcntr_ops[0], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, "portcounter_names", ret);
+ goto bail;
+ }
+ for (i = 1; i <= dd->num_pports; i++) {
+ char fname[24];
+
+ sprintf(fname, "port%dcounters", i);
+ /* create the files in the new directory */
+ ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+ &portcntr_ops[i], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, fname, ret);
+ goto bail;
+ }
+ if (!(dd->flags & QIB_HAS_QSFP))
+ continue;
+ sprintf(fname, "qsfp%d", i);
+ ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
+ &qsfp_ops[i - 1], dd);
+ if (ret) {
+ printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ unit, fname, ret);
+ goto bail;
+ }
+ }
+
+ ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
+ &flash_ops, dd);
+ if (ret)
+ printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
+ unit, ret);
+bail:
+ return ret;
+}
+
+static int remove_file(struct dentry *parent, char *name)
+{
+ struct dentry *tmp;
+ int ret;
+
+ tmp = lookup_one_len(name, parent, strlen(name));
+
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto bail;
+ }
+
+ spin_lock(&dcache_lock);
+ spin_lock(&tmp->d_lock);
+ if (!(d_unhashed(tmp) && tmp->d_inode)) {
+ dget_locked(tmp);
+ __d_drop(tmp);
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ simple_unlink(parent->d_inode, tmp);
+ } else {
+ spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_lock);
+ }
+
+ ret = 0;
+bail:
+ /*
+ * We don't expect clients to care about the return value, but
+ * it's there if they need it.
+ */
+ return ret;
+}
+
+static int remove_device_files(struct super_block *sb,
+ struct qib_devdata *dd)
+{
+ struct dentry *dir, *root;
+ char unit[10];
+ int ret, i;
+
+ root = dget(sb->s_root);
+ mutex_lock(&root->d_inode->i_mutex);
+ snprintf(unit, sizeof unit, "%u", dd->unit);
+ dir = lookup_one_len(unit, root, strlen(unit));
+
+ if (IS_ERR(dir)) {
+ ret = PTR_ERR(dir);
+ printk(KERN_ERR "Lookup of %s failed\n", unit);
+ goto bail;
+ }
+
+ remove_file(dir, "counters");
+ remove_file(dir, "counter_names");
+ remove_file(dir, "portcounter_names");
+ for (i = 0; i < dd->num_pports; i++) {
+ char fname[24];
+
+ sprintf(fname, "port%dcounters", i + 1);
+ remove_file(dir, fname);
+ if (dd->flags & QIB_HAS_QSFP) {
+ sprintf(fname, "qsfp%d", i + 1);
+ remove_file(dir, fname);
+ }
+ }
+ remove_file(dir, "flash");
+ d_delete(dir);
+ ret = simple_rmdir(root->d_inode, dir);
+
+bail:
+ mutex_unlock(&root->d_inode->i_mutex);
+ dput(root);
+ return ret;
+}
+
+/*
+ * This fills everything in when the fs is mounted, to handle umount/mount
+ * after device init. The direct add_cntr_files() call handles adding
+ * them from the init code, when the fs is already mounted.
+ */
+static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct qib_devdata *dd, *tmp;
+ unsigned long flags;
+ int ret;
+
+ static struct tree_descr files[] = {
+ [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
+ [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
+ {""},
+ };
+
+ ret = simple_fill_super(sb, QIBFS_MAGIC, files);
+ if (ret) {
+ printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+
+ list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+ ret = add_cntr_files(sb, dd);
+ if (ret) {
+ deactivate_super(sb);
+ goto bail;
+ }
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+bail:
+ return ret;
+}
+
+static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ int ret = get_sb_single(fs_type, flags, data,
+ qibfs_fill_super, mnt);
+ if (ret >= 0)
+ qib_super = mnt->mnt_sb;
+ return ret;
+}
+
+static void qibfs_kill_super(struct super_block *s)
+{
+ kill_litter_super(s);
+ qib_super = NULL;
+}
+
+int qibfs_add(struct qib_devdata *dd)
+{
+ int ret;
+
+ /*
+ * On first unit initialized, qib_super will not yet exist
+ * because nobody has yet tried to mount the filesystem, so
+ * we can't consider that to be an error; if an error occurs
+ * during the mount, that will get a complaint, so this is OK.
+ * add_cntr_files() for all units is done at mount from
+ * qibfs_fill_super(), so one way or another, everything works.
+ */
+ if (qib_super == NULL)
+ ret = 0;
+ else
+ ret = add_cntr_files(qib_super, dd);
+ return ret;
+}
+
+int qibfs_remove(struct qib_devdata *dd)
+{
+ int ret = 0;
+
+ if (qib_super)
+ ret = remove_device_files(qib_super, dd);
+
+ return ret;
+}
+
+static struct file_system_type qibfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ipathfs",
+ .get_sb = qibfs_get_sb,
+ .kill_sb = qibfs_kill_super,
+};
+
+int __init qib_init_qibfs(void)
+{
+ return register_filesystem(&qibfs_fs_type);
+}
+
+int __exit qib_exit_qibfs(void)
+{
+ return unregister_filesystem(&qibfs_fs_type);
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
new file mode 100644
index 000000000000..1eadadc13da8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -0,0 +1,3576 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 6120 PCIe chip.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_6120_regs.h"
+
+static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
+static u8 qib_6120_phys_portstate(u64);
+static u32 qib_6120_iblink_state(u64);
+
+/*
+ * This file contains all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB PCI-Express chip.
+ *
+ */
+
+/* KREG_IDX uses machine-generated #defines */
+#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
+#define kr_sendpiosize KREG_IDX(SendPIOSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_control KREG_IDX(Control)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_revision KREG_IDX(Revision)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
+#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
+#define kr_serdes_stat KREG_IDX(SerdesStat)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
+ QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxBadFormatCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkProblemCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_6120_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_6120_##regname##_##fldname##_RMASK << \
+ QIB_6120_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* link training states, from IBC */
+#define IB_6120_LT_STATE_DISABLED 0x00
+#define IB_6120_LT_STATE_LINKUP 0x01
+#define IB_6120_LT_STATE_POLLACTIVE 0x02
+#define IB_6120_LT_STATE_POLLQUIET 0x03
+#define IB_6120_LT_STATE_SLEEPDELAY 0x04
+#define IB_6120_LT_STATE_SLEEPQUIET 0x05
+#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
+#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
+#define IB_6120_LT_STATE_CFGIDLE 0x0b
+#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
+
+/* link state machine states from IBC */
+#define IB_6120_L_STATE_DOWN 0x0
+#define IB_6120_L_STATE_INIT 0x1
+#define IB_6120_L_STATE_ARM 0x2
+#define IB_6120_L_STATE_ACTIVE 0x3
+#define IB_6120_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_6120_physportstate[0x20] = {
+ [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_6120_LT_STATE_CFGDEBOUNCE] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_6120_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_6120_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_6120_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ u64 *portcntrs;
+ void *dummy_hdrq; /* used after ctxt close */
+ dma_addr_t dummy_hdrq_phys;
+ spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
+ spinlock_t user_tid_lock; /* no back to back user TID writes */
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 hwerrmask;
+ u64 errormask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 ibcctrl; /* shadow for kr_ibcctrl */
+ u32 lastlinkrecov; /* link recovery issue */
+ int irq;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 ncntrs;
+ u32 nportcntrs;
+ /* used with gpio interrupts to implement IB counters */
+ u32 rxfc_unsupvl_errs;
+ u32 overrun_thresh_errs;
+ /*
+ * these count only cases where _successive_ LocalLinkIntegrity
+ * errors were seen in the receive headers of IB standard packets
+ */
+ u32 lli_errs;
+ u32 lli_counter;
+ u64 lli_thresh;
+ u64 sword; /* total dwords sent (sample result) */
+ u64 rword; /* total dwords received (sample result) */
+ u64 spkts; /* total packets sent (sample result) */
+ u64 rpkts; /* total packets received (sample result) */
+ u64 xmit_wait; /* # of ticks no data sent (sample result) */
+ struct timer_list pma_timer;
+ char emsgbuf[128];
+ char bitsmsgbuf[64];
+ u8 pma_sample_status;
+};
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner. It also gives us some error
+ * checking. 64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible. User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+
+ if (dd->userbase)
+ return readl(regno + (u64 __iomem *)
+ ((char __iomem *)dd->userbase +
+ dd->ureg_align * ctxt));
+ else
+ return readl(regno + (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *)dd->kregbase +
+ dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *)&dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u16 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u16 regno, u64 value)
+{
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->kregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_6120_creg(const struct qib_devdata *dd,
+ u16 regno, u64 value)
+{
+ if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET 1U
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 0
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
+#define QLOGIC_IB_I_BITSEXTANT \
+ ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+ (QLOGIC_IB_I_RCVAVAIL_MASK << \
+ QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+ QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+ QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
+
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET 0x5ULL
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+
+/* Bits in GPIO for the added IB link interrupts */
+#define GPIO_RXUVL_BIT 3
+#define GPIO_OVRUN_BIT 4
+#define GPIO_LLI_BIT 5
+#define GPIO_ERRINTR_MASK 0x38
+
+
+#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
+#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
+ ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
+#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
+#define QLOGIC_IB_RT_IS_VALID(tid) \
+ (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
+ ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
+#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
+#define QLOGIC_IB_RT_ADDR_SHIFT 10
+
+#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
+#define QLOGIC_IB_R_TAILUPD_SHIFT 31
+#define IBA6120_R_PKEY_DIS_SHIFT 30
+
+#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+ ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 6120 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
+ /* generic hardware errors */
+ QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+ QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+ "TXE PIOBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+ "TXE PIOPBC Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+ "TXE PIOLAUNCHFIFO Memory Parity"),
+
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+ "RXE RCVBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+ "RXE LOOKUPQ Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+ "RXE EAGERTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+ "RXE EXPTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+ "RXE FLAGBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+ "RXE DATAINFO Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+ "RXE HDRINFO Memory Parity"),
+
+ /* chip-specific hardware errors */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+ "PCIe Poisoned TLP"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+ "PCIe completion timeout"),
+ /*
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * parity or memory parity error failures, because most likely we
+ * won't be able to talk to the core of the chip. Nonetheless, we
+ * might see them, if they are in parts of the PCIe core that aren't
+ * essential.
+ */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+ "PCIePLL1"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+ "PCIePLL0"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+ "PCIe XTLH core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+ "PCIe ADM TX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+ "PCIe ADM RX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+ "SerDes PLL"),
+};
+
+#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+ /* variables for sanity checking interrupt and errors */
+#define IB_HWE_BITSEXTANT \
+ (HWE_MASK(RXEMemParityErr) | \
+ HWE_MASK(TXEMemParityErr) | \
+ (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
+ QLOGIC_IB_HWE_PCIE1PLLFAILED | \
+ QLOGIC_IB_HWE_PCIE0PLLFAILED | \
+ QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
+ QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
+ HWE_MASK(PowerOnBISTFailed) | \
+ QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP | \
+ QLOGIC_IB_HWE_SERDESPLLFAILED | \
+ HWE_MASK(IBCBusToSPCParityErr) | \
+ HWE_MASK(IBCBusFromSPCParityErr))
+
+#define IB_E_BITSEXTANT \
+ (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+ ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
+ ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
+ ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
+ ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
+ ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
+ ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
+ ERR_MASK(HardwareErr))
+
+#define QLOGIC_IB_E_PKTERRS ( \
+ ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | \
+ ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvEBPErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS \
+ (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
+ ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
+ ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
+ ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS \
+ (ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers. Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+ (ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received. This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+ (ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvUnexpectedCharErr))
+
+static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
+ u32, unsigned long);
+
+/*
+ * On platforms using this chip, and not having ordered WC stores, we
+ * can get TXE parity errors due to speculative reads to the PIO buffers,
+ * and this, due to a chip issue can result in (many) false parity error
+ * reports. So it's a debug print on those, and an info print on systems
+ * where the speculative reads don't occur.
+ */
+static void qib_6120_txe_recover(struct qib_devdata *dd)
+{
+ if (!qib_unordered_wc())
+ qib_devinfo(dd->pcidev,
+ "Recovering from TXE PIO parity error\n");
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, ~0ULL);
+ /* force re-interrupt of any pending interrupts. */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips
+ */
+static void qib_6120_clear_freeze(struct qib_devdata *dd)
+{
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_6120_set_intr_state(dd, 0);
+
+ qib_cancel_sends(dd->pport);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /* force in-memory update now we are out of freeze */
+ qib_force_pio_avail_update(dd);
+
+ /*
+ * force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ qib_6120_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_handle_6120_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. Reuse the same message buffer as
+ * handle_6120_errors() to avoid excessive stack usage.
+ */
+static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char *bitsmsg;
+ int log_idx;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ return;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ return;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support */
+ qib_write_kreg(dd, kr_hwerrclear,
+ hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ /*
+ * Make sure we get this much out, unless told to be quiet,
+ * or it's occurred within the last 5 seconds.
+ */
+ if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ if (hwerrs & ~IB_HWE_BITSEXTANT)
+ qib_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~IB_HWE_BITSEXTANT));
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+ /*
+ * Parity errors in send memory are recoverable,
+ * just cancel the send (if indicated in * sendbuffererror),
+ * count the occurrence, unfreeze (if no other handled
+ * hardware error bits are set), and continue. They can
+ * occur if a processor speculative read is done to the PIO
+ * buffer while we are sending a packet, for example.
+ */
+ if (hwerrs & TXE_PIO_PARITY) {
+ qib_6120_txe_recover(dd);
+ hwerrs &= ~TXE_PIO_PARITY;
+ }
+
+ if (!hwerrs) {
+ static u32 freeze_cnt;
+
+ freeze_cnt++;
+ qib_6120_clear_freeze(dd);
+ } else
+ isfatal = 1;
+ }
+
+ *msg = '\0';
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
+ " unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
+ ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
+
+ bitsmsg = dd->cspec->bitsmsgbuf;
+ if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PCIe Mem Parity Errs %x] ", bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+
+ if (hwerrs & _QIB_PLL_FAIL) {
+ isfatal = 1;
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
+ (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the external
+ * interface is unused
+ */
+ dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs)
+ /*
+ * if any set that we aren't ignoring; only
+ * make the complaint once, in case it's stuck
+ * or recurring, and we get here multiple
+ * times.
+ */
+ qib_dev_err(dd, "%s hardware error\n", msg);
+ else
+ *msg = 0; /* recovered from all of them */
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * for /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else. Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
+ u64 err)
+{
+ int iserr = 1;
+
+ *buf = '\0';
+ if (err & QLOGIC_IB_E_PKTERRS) {
+ if (!(err & ~QLOGIC_IB_E_PKTERRS))
+ iserr = 0;
+ if ((err & ERR_MASK(RcvICRCErr)) &&
+ !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
+ strlcat(buf, "CRC ", blen);
+ if (!iserr)
+ goto done;
+ }
+ if (err & ERR_MASK(RcvHdrLenErr))
+ strlcat(buf, "rhdrlen ", blen);
+ if (err & ERR_MASK(RcvBadTidErr))
+ strlcat(buf, "rbadtid ", blen);
+ if (err & ERR_MASK(RcvBadVersionErr))
+ strlcat(buf, "rbadversion ", blen);
+ if (err & ERR_MASK(RcvHdrErr))
+ strlcat(buf, "rhdr ", blen);
+ if (err & ERR_MASK(RcvLongPktLenErr))
+ strlcat(buf, "rlongpktlen ", blen);
+ if (err & ERR_MASK(RcvMaxPktLenErr))
+ strlcat(buf, "rmaxpktlen ", blen);
+ if (err & ERR_MASK(RcvMinPktLenErr))
+ strlcat(buf, "rminpktlen ", blen);
+ if (err & ERR_MASK(SendMinPktLenErr))
+ strlcat(buf, "sminpktlen ", blen);
+ if (err & ERR_MASK(RcvFormatErr))
+ strlcat(buf, "rformaterr ", blen);
+ if (err & ERR_MASK(RcvUnsupportedVLErr))
+ strlcat(buf, "runsupvl ", blen);
+ if (err & ERR_MASK(RcvUnexpectedCharErr))
+ strlcat(buf, "runexpchar ", blen);
+ if (err & ERR_MASK(RcvIBFlowErr))
+ strlcat(buf, "ribflow ", blen);
+ if (err & ERR_MASK(SendUnderRunErr))
+ strlcat(buf, "sunderrun ", blen);
+ if (err & ERR_MASK(SendPioArmLaunchErr))
+ strlcat(buf, "spioarmlaunch ", blen);
+ if (err & ERR_MASK(SendUnexpectedPktNumErr))
+ strlcat(buf, "sunexperrpktnum ", blen);
+ if (err & ERR_MASK(SendDroppedSmpPktErr))
+ strlcat(buf, "sdroppedsmppkt ", blen);
+ if (err & ERR_MASK(SendMaxPktLenErr))
+ strlcat(buf, "smaxpktlen ", blen);
+ if (err & ERR_MASK(SendUnsupportedVLErr))
+ strlcat(buf, "sunsupVL ", blen);
+ if (err & ERR_MASK(InvalidAddrErr))
+ strlcat(buf, "invalidaddr ", blen);
+ if (err & ERR_MASK(RcvEgrFullErr))
+ strlcat(buf, "rcvegrfull ", blen);
+ if (err & ERR_MASK(RcvHdrFullErr))
+ strlcat(buf, "rcvhdrfull ", blen);
+ if (err & ERR_MASK(IBStatusChanged))
+ strlcat(buf, "ibcstatuschg ", blen);
+ if (err & ERR_MASK(RcvIBLostLinkErr))
+ strlcat(buf, "riblostlink ", blen);
+ if (err & ERR_MASK(HardwareErr))
+ strlcat(buf, "hardware ", blen);
+ if (err & ERR_MASK(ResetNegated))
+ strlcat(buf, "reset ", blen);
+done:
+ return iserr;
+}
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ */
+static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
+{
+ unsigned long sbuf[2];
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+
+ if (sbuf[0] || sbuf[1])
+ qib_disarm_piobufs_set(dd, sbuf,
+ dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
+{
+ int ret = 1;
+ u32 ibstate = qib_6120_iblink_state(ibcs);
+ u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+ if (linkrecov != dd->cspec->lastlinkrecov) {
+ /* and no more until active again */
+ dd->cspec->lastlinkrecov = 0;
+ qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
+ ret = 0;
+ }
+ if (ibstate == IB_PORT_ACTIVE)
+ dd->cspec->lastlinkrecov =
+ read_6120_creg32(dd, cr_iblinkerrrecov);
+ return ret;
+}
+
+static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
+{
+ char *msg;
+ u64 ignore_this_time = 0;
+ u64 iserr = 0;
+ int log_idx;
+ struct qib_pportdata *ppd = dd->pport;
+ u64 mask;
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & ERR_MASK(HardwareErr))
+ qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & ~IB_E_BITSEXTANT)
+ qib_dev_err(dd, "error interrupt with unknown errors "
+ "%llx set\n",
+ (unsigned long long) (errs & ~IB_E_BITSEXTANT));
+
+ if (errs & E_SUM_ERRS) {
+ qib_disarm_6120_senderrbufs(ppd);
+ if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+ } else if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above.
+ */
+ mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
+ ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
+ qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+ if (errs & E_SUM_PKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & E_SUM_ERRS)
+ qib_stats.sps_txerrs++;
+
+ iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
+
+ if (errs & ERR_MASK(IBStatusChanged)) {
+ u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+ u32 ibstate = qib_6120_iblink_state(ibcs);
+ int handle = 1;
+
+ if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
+ handle = chk_6120_linkrecovery(dd, ibcs);
+ /*
+ * Since going into a recovery state causes the link state
+ * to go down and since recovery is transitory, it is better
+ * if we "miss" ever seeing the link training state go into
+ * recovery (i.e., ignore this transition for link state
+ * special handling purposes) without updating lastibcstat.
+ */
+ if (handle && qib_6120_phys_portstate(ibcs) ==
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+ handle = 0;
+ if (handle)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+
+ if (errs & ERR_MASK(ResetNegated)) {
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+done:
+ return;
+}
+
+/**
+ * qib_6120_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_6120_init_hwerrors(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+
+ if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+
+ /* init so all hwerrors interrupt, and enter freeze, ajdust below */
+ val = ~0ULL;
+ if (dd->minrev < 2) {
+ /*
+ * Avoid problem with internal interface bus parity
+ * checking. Fixed in Rev2.
+ */
+ val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
+ }
+ /* avoid some intel cpu's speculative read freeze mode issue */
+ val &= ~TXEMEMPARITYERR_PIOBUF;
+
+ dd->cspec->hwerrmask = val;
+
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ /* clear any interrupts up to this point (ints still not enabled) */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+ qib_write_kreg(dd, kr_rcvbthqp,
+ dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
+ QIB_KD_QP);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear,
+ ERR_MASK(SendPioArmLaunchErr));
+ dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+ } else
+ dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
+ /* write to chip to prevent back-to-back writes of control reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/**
+ * qib_6120_bringup_serdes - bring up the serdes
+ * @dd: the qlogic_ib device
+ */
+static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, config1, prev_val, hwstat, ibc;
+
+ /* Put IBC in reset, sends disabled */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ dd->cspec->ibdeltainprog = 1;
+ dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
+ dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+ /*
+ * How often flowctrl sent. More or less in usecs; balance against
+ * watermark value, so that in theory senders always get a flow
+ * control update in time to not let the IB link go idle.
+ */
+ ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+ /* max error tolerance */
+ dd->cspec->lli_thresh = 0xf;
+ ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
+ /* use "real" buffer space for */
+ ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+ dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg(dd, kr_ibcctrl, val);
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+ config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
+
+ /*
+ * Force reset on, also set rxdetect enable. Must do before reading
+ * serdesstatus at least for simulation, or some of the bits in
+ * serdes status will come back as undefined and cause simulation
+ * failures
+ */
+ val |= SYM_MASK(SerdesCfg0, ResetPLL) |
+ SYM_MASK(SerdesCfg0, RxDetEnX) |
+ (SYM_MASK(SerdesCfg0, L1PwrDnA) |
+ SYM_MASK(SerdesCfg0, L1PwrDnB) |
+ SYM_MASK(SerdesCfg0, L1PwrDnC) |
+ SYM_MASK(SerdesCfg0, L1PwrDnD));
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(5); /* need pll reset set at least for a bit */
+ /*
+ * after PLL is reset, set the per-lane Resets and TxIdle and
+ * clear the PLL reset and rxdetect (to get falling edge).
+ * Leave L1PWR bits set (permanently)
+ */
+ val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
+ SYM_MASK(SerdesCfg0, ResetPLL) |
+ (SYM_MASK(SerdesCfg0, L1PwrDnA) |
+ SYM_MASK(SerdesCfg0, L1PwrDnB) |
+ SYM_MASK(SerdesCfg0, L1PwrDnC) |
+ SYM_MASK(SerdesCfg0, L1PwrDnD)));
+ val |= (SYM_MASK(SerdesCfg0, ResetA) |
+ SYM_MASK(SerdesCfg0, ResetB) |
+ SYM_MASK(SerdesCfg0, ResetC) |
+ SYM_MASK(SerdesCfg0, ResetD)) |
+ SYM_MASK(SerdesCfg0, TxIdeEnX);
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ (void) qib_read_kreg64(dd, kr_scratch);
+ /* need PLL reset clear for at least 11 usec before lane
+ * resets cleared; give it a few more to be sure */
+ udelay(15);
+ val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
+ SYM_MASK(SerdesCfg0, ResetB) |
+ SYM_MASK(SerdesCfg0, ResetC) |
+ SYM_MASK(SerdesCfg0, ResetD)) |
+ SYM_MASK(SerdesCfg0, TxIdeEnX));
+
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+ /* be sure chip saw it */
+ (void) qib_read_kreg64(dd, kr_scratch);
+
+ val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ prev_val = val;
+ if (val & QLOGIC_IB_XGXS_RESET)
+ val &= ~QLOGIC_IB_XGXS_RESET;
+ if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
+ /* need to compensate for Tx inversion in partner */
+ val &= ~SYM_MASK(XGXSCfg, polarity_inv);
+ val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
+ }
+ if (val != prev_val)
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+
+ /* clear current and de-emphasis bits */
+ config1 &= ~0x0ffffffff00ULL;
+ /* set current to 20ma */
+ config1 |= 0x00000000000ULL;
+ /* set de-emphasis to -5.68dB */
+ config1 |= 0x0cccc000000ULL;
+ qib_write_kreg(dd, kr_serdes_cfg1, config1);
+
+ /* base and port guid same for single port */
+ ppd->guid = dd->base_guid;
+
+ /*
+ * the process of setting and un-resetting the serdes normally
+ * causes a serdes PLL error, so check for that and clear it
+ * here. Also clearr hwerr bit in errstatus, but not others.
+ */
+ hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (hwstat) {
+ /* should just have PLL, clear all set, in an case */
+ if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear, hwstat);
+ qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
+ }
+
+ dd->control |= QLOGIC_IB_C_LINKENABLE;
+ dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
+ qib_write_kreg(dd, kr_control, dd->control);
+
+ return 0;
+}
+
+/**
+ * qib_6120_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val;
+
+ qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ /* disable IBC */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control,
+ dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+ if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
+ dd->cspec->ibdeltainprog) {
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
+ val = read_6120_creg32(dd, cr_ibsymbolerr);
+ if (dd->cspec->ibdeltainprog)
+ val -= val - dd->cspec->ibsymsnap;
+ val -= dd->cspec->ibsymdelta;
+ write_6120_creg(dd, cr_ibsymbolerr, val);
+ }
+ if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
+ val = read_6120_creg32(dd, cr_iblinkerrrecov);
+ if (dd->cspec->ibdeltainprog)
+ val -= val - dd->cspec->iblnkerrsnap;
+ val -= dd->cspec->iblnkerrdelta;
+ write_6120_creg(dd, cr_iblinkerrrecov, val);
+ }
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+
+ val = qib_read_kreg64(dd, kr_serdes_cfg0);
+ val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
+ qib_write_kreg(dd, kr_serdes_cfg0, val);
+}
+
+/**
+ * qib_6120_setup_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ u64 extctl, val, lst, ltst;
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (ppd->led_override) {
+ ltst = (ppd->led_override & QIB_LED_PHYS) ?
+ IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+ lst = (ppd->led_override & QIB_LED_LOG) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ } else if (on) {
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ ltst = qib_6120_phys_portstate(val);
+ lst = qib_6120_iblink_state(val);
+ } else {
+ ltst = 0;
+ lst = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+ SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+
+ if (ltst == IB_PHYSPORTSTATE_LINKUP)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+ if (lst == IB_PORT_ACTIVE)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+static void qib_6120_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_nomsi(dd);
+}
+
+/**
+ * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+*/
+static void qib_6120_setup_cleanup(struct qib_devdata *dd)
+{
+ qib_6120_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->portcntrs);
+ if (dd->cspec->dummy_hdrq) {
+ dma_free_coherent(&dd->pcidev->dev,
+ ALIGN(dd->rcvhdrcnt *
+ dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE),
+ dd->cspec->dummy_hdrq,
+ dd->cspec->dummy_hdrq_phys);
+ dd->cspec->dummy_hdrq = NULL;
+ }
+}
+
+static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
+ else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling
+ */
+static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+ qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
+ istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+ if (istat & QLOGIC_IB_I_ERROR) {
+ u64 estat = 0;
+
+ qib_stats.sps_errints++;
+ estat = qib_read_kreg64(dd, kr_errstatus);
+ if (!estat)
+ qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+ "but no error bits set!\n", istat);
+ handle_6120_errors(dd, estat);
+ }
+
+ if (istat & QLOGIC_IB_I_GPIO) {
+ u32 gpiostatus;
+ u32 to_clear = 0;
+
+ /*
+ * GPIO_3..5 on IBA6120 Rev2 chips indicate
+ * errors that we need to count.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /* First the error-counter case. */
+ if (gpiostatus & GPIO_ERRINTR_MASK) {
+ /* want to clear the bits we see asserted. */
+ to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
+
+ /*
+ * Count appropriately, clear bits out of our copy,
+ * as they have been "handled".
+ */
+ if (gpiostatus & (1 << GPIO_RXUVL_BIT))
+ dd->cspec->rxfc_unsupvl_errs++;
+ if (gpiostatus & (1 << GPIO_OVRUN_BIT))
+ dd->cspec->overrun_thresh_errs++;
+ if (gpiostatus & (1 << GPIO_LLI_BIT))
+ dd->cspec->lli_errs++;
+ gpiostatus &= ~GPIO_ERRINTR_MASK;
+ }
+ if (gpiostatus) {
+ /*
+ * Some unexpected bits remain. If they could have
+ * caused the interrupt, complain and clear.
+ * To avoid repetition of this condition, also clear
+ * the mask. It is almost certainly due to error.
+ */
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+
+ /*
+ * Also check that the chip reflects our shadow,
+ * and report issues, If they caused the interrupt.
+ * we will suppress by refreshing from the shadow.
+ */
+ if (mask & gpiostatus) {
+ to_clear |= (gpiostatus & mask);
+ dd->cspec->gpio_mask &= ~(gpiostatus & mask);
+ qib_write_kreg(dd, kr_gpio_mask,
+ dd->cspec->gpio_mask);
+ }
+ }
+ if (to_clear)
+ qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
+ }
+}
+
+static irqreturn_t qib_6120intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u32 istat, ctxtrbits, rmask, crcs = 0;
+ unsigned i;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg32(dd, kr_intstatus);
+
+ if (unlikely(!istat)) {
+ ret = IRQ_NONE; /* not our interrupt, or already handled */
+ goto bail;
+ }
+ if (unlikely(istat == -1)) {
+ qib_bad_intrstatus(dd);
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+ QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+ unlikely_6120_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat &
+ ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+ if (ctxtrbits) {
+ rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (1U << QLOGIC_IB_I_RCVURG_SHIFT);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ crcs += qib_kreceive(dd->rcd[i],
+ &dd->cspec->lli_counter,
+ NULL);
+ }
+ rmask <<= 1;
+ }
+ if (crcs) {
+ u32 cntr = dd->cspec->lli_counter;
+ cntr += crcs;
+ if (cntr) {
+ if (cntr > dd->cspec->lli_thresh) {
+ dd->cspec->lli_counter = 0;
+ dd->cspec->lli_errs++;
+ } else
+ dd->cspec->lli_counter += cntr;
+ }
+ }
+
+
+ if (ctxtrbits) {
+ ctxtrbits =
+ (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ */
+static void qib_setup_6120_interrupt(struct qib_devdata *dd)
+{
+ /*
+ * If the chip supports added error indication via GPIO pins,
+ * enable interrupts on those bits so the interrupt routine
+ * can count the events. Also set flag so interrupt routine
+ * can know they are expected.
+ */
+ if (SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor) > 1) {
+ /* Rev2+ reports extra errors via internal GPIO pins */
+ dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+
+ if (!dd->cspec->irq)
+ qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
+ "work\n");
+ else {
+ int ret;
+ ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
+ QIB_DRV_NAME, dd);
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup interrupt "
+ "(irq=%d): %d\n", dd->cspec->irq,
+ ret);
+ }
+}
+
+/**
+ * pe_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void pe_boardname(struct qib_devdata *dd)
+{
+ char *n;
+ u32 boardid, namelen;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+
+ switch (boardid) {
+ case 2:
+ n = "InfiniPath_QLE7140";
+ break;
+ default:
+ qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
+ n = "Unknown_InfiniPath_6120";
+ break;
+ }
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
+ qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
+ "%u.%u!\n", dd->majrev, dd->minrev);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context. If we need interrupt context, we can split
+ * it into two routines.
+ */
+static int qib_6120_setup_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ int i;
+ int ret;
+ u16 cmdval;
+ u8 int_line, clinesz;
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ /* Use ERROR so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ /* no interrupts till re-initted */
+ qib_6120_set_intr_state(dd, 0);
+
+ dd->cspec->ibdeltainprog = 0;
+ dd->cspec->ibsymdelta = 0;
+ dd->cspec->iblnkerrdelta = 0;
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+ dd->int_counter = 0; /* so we check interrupts work again */
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+ mb(); /* prevent compiler re-ordering around actual reset */
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 2000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision) {
+ dd->flags |= QIB_PRESENT; /* it's back */
+ ret = qib_reinit_intr(dd);
+ goto bail;
+ }
+ }
+ ret = 0; /* failed */
+
+bail:
+ if (ret) {
+ if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ qib_dev_err(dd, "Reset failed to setup PCIe or "
+ "interrupts; continuing anyway\n");
+ /* clear the reset error, init error/hwerror mask */
+ qib_6120_init_hwerrors(dd);
+ /* for Rev2 error interrupts; nop for rev 1 */
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ /* clear the reset error, init error/hwerror mask */
+ qib_6120_init_hwerrors(dd);
+ }
+ return ret;
+}
+
+/**
+ * qib_6120_put_tid - write a TID in chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for special locking etc.
+ * It's used for both the full cleanup on exit, as well as the normal
+ * setup and teardown.
+ */
+static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+ unsigned long flags;
+ int tidx;
+ spinlock_t *tidlockp; /* select appropriate spinlock */
+
+ if (!dd->kregbase)
+ return;
+
+ if (pa != dd->tidinvalid) {
+ if (pa & ((1U << 11) - 1)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ pa >>= 11;
+ if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ pa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ pa |= 2 << 29;
+ }
+
+ /*
+ * Avoid chip issue by writing the scratch register
+ * before and after the TID, and with an io write barrier.
+ * We use a spinlock around the writes, so they can't intermix
+ * with other TID (eager or expected) writes (the chip problem
+ * is triggered by back to back TID writes). Unfortunately, this
+ * call can be done from interrupt level for the ctxt 0 eager TIDs,
+ * so we have to use irqsave locks.
+ */
+ /*
+ * Assumes tidptr always > egrtidbase
+ * if type == RCVHQ_RCV_TYPE_EAGER.
+ */
+ tidx = tidptr - dd->egrtidbase;
+
+ tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
+ ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
+ spin_lock_irqsave(tidlockp, flags);
+ qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
+ writel(pa, tidp32);
+ qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
+ mmiowb();
+ spin_unlock_irqrestore(tidlockp, flags);
+}
+
+/**
+ * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
+ * for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ *
+ * This exists as a separate routine to allow for selection of the
+ * appropriate "flavor". The static calls in cleanup just use the
+ * revision-agnostic form, as they are not performance critical.
+ */
+static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
+ u32 tidx;
+
+ if (!dd->kregbase)
+ return;
+
+ if (pa != dd->tidinvalid) {
+ if (pa & ((1U << 11) - 1)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ pa >>= 11;
+ if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ pa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ pa |= 2 << 29;
+ }
+ tidx = tidptr - dd->egrtidbase;
+ writel(pa, tidp32);
+ mmiowb();
+}
+
+
+/**
+ * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the context
+ *
+ * clear all TID entries for a context, expected and eager.
+ * Used from qib_close(). On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_6120_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ /* use func pointer because could be one of two funcs */
+ dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ /* use func pointer because could be one of two funcs */
+ dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_6120_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_6120_tidtemplate(struct qib_devdata *dd)
+{
+ u32 egrsize = dd->rcvegrbufsize;
+
+ /*
+ * For now, we always allocate 4KB buffers (at init) so we can
+ * receive max size packets. We may want a module parameter to
+ * specify 2KB or 4KB and/or make be per ctxt instead of per device
+ * for those who want to reduce memory footprint. Note that the
+ * rcvhdrentsize size must be large enough to hold the largest
+ * IB header (currently 96 bytes) that we expect to handle (plus of
+ * course the 2 dwords of RHF).
+ */
+ if (egrsize == 2048)
+ dd->tidtemplate = 1U << 29;
+ else if (egrsize == 4096)
+ dd->tidtemplate = 2U << 29;
+ dd->tidinvalid = 0;
+}
+
+int __attribute__((weak)) qib_unordered_wc(void)
+{
+ return 0;
+}
+
+/**
+ * qib_6120_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithms.
+ */
+static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ if (qib_unordered_wc())
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
+
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+ QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
+ return 0;
+}
+
+
+static struct qib_message_header *
+qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ return (struct qib_message_header *)
+ &rhf_addr[sizeof(u64) / sizeof(u32)];
+}
+
+static void qib_6120_config_ctxts(struct qib_devdata *dd)
+{
+ dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
+ if (qib_n_krcv_queues > 1) {
+ dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+ if (dd->first_user_ctxt > dd->ctxtcnt)
+ dd->first_user_ctxt = dd->ctxtcnt;
+ dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
+ } else
+ dd->first_user_ctxt = dd->num_pports;
+ dd->n_krcv_queues = dd->first_user_ctxt;
+}
+
+static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+/*
+ * Used when we close any ctxt, for DMA already in flight
+ * at close. Can't be done until we know hdrq size, so not
+ * early in chip init.
+ */
+static void alloc_dummy_hdrq(struct qib_devdata *dd)
+{
+ dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
+ dd->rcd[0]->rcvhdrq_size,
+ &dd->cspec->dummy_hdrq_phys,
+ GFP_KERNEL | __GFP_COMP);
+ if (!dd->cspec->dummy_hdrq) {
+ qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
+ /* fallback to just 0'ing */
+ dd->cspec->dummy_hdrq_phys = 0UL;
+ }
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specific, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
+ if (ctxt < 0)
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ else
+ mask = (1ULL << ctxt);
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /* always done for specific ctxt */
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (!(dd->flags & QIB_NODMA_RTAIL))
+ dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrq_phys);
+
+ if (ctxt == 0 && !dd->cspec->dummy_hdrq)
+ alloc_dummy_hdrq(dd);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+ dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ /*
+ * Be paranoid, and never write 0's to these, just use an
+ * unused page. Of course,
+ * rcvhdraddr points to a large chunk of memory, so this
+ * could still trash things, but at least it won't trash
+ * page 0, and by disabling the ctxt, it should stop "soon",
+ * even if a packet or two is in already in flight after we
+ * disabled the ctxt. Only 6120 has this issue.
+ */
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->cspec->dummy_hdrq_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->cspec->dummy_hdrq_phys);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+ i, dd->cspec->dummy_hdrq_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
+ i, dd->cspec->dummy_hdrq_phys);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. Only operations actually used
+ * are implemented yet.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ /*
+ * disarm any that are not yet launched, disabling sends
+ * and updates until done.
+ */
+ last = dd->piobcnt2k + dd->piobcnt4k;
+ tmp_dd_sendctrl &=
+ ~(SYM_MASK(SendCtrl, PIOEnable) |
+ SYM_MASK(SendCtrl, PIOBufAvailUpd));
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_FLUSH)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmPIOBuf));
+ if (op & QIB_SENDCTRL_AVAIL_BLIP)
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
+
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+/**
+ * qib_portcntr_6120 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
+{
+ u64 ret = 0ULL;
+ struct qib_devdata *dd = ppd->dd;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u16 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = cr_pktsend,
+ [QIBPORTCNTR_WORDSEND] = cr_wordsend,
+ [QIBPORTCNTR_PSXMITDATA] = 0xffff,
+ [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
+ [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
+ [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+ [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+ [QIBPORTCNTR_PSRCVDATA] = 0xffff,
+ [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
+ [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+ [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+ [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
+ [QIBPORTCNTR_RXVLERR] = 0xffff,
+ [QIBPORTCNTR_ERRICRC] = cr_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = cr_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
+ [QIBPORTCNTR_ERRLINK] = cr_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = 0xffff,
+ [QIBPORTCNTR_PSINTERVAL] = 0xffff,
+ [QIBPORTCNTR_PSSTART] = 0xffff,
+ [QIBPORTCNTR_PSSTAT] = 0xffff,
+ [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
+ [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg];
+
+ /* handle counters requests not implemented as chip counters */
+ if (reg == QIBPORTCNTR_LLI)
+ ret = dd->cspec->lli_errs;
+ else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
+ ret = dd->cspec->overrun_thresh_errs;
+ else if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts */
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ ret += read_6120_creg32(dd, cr_portovfl + i);
+ } else if (reg == QIBPORTCNTR_PSSTAT)
+ ret = dd->cspec->pma_sample_status;
+ if (creg == 0xffff)
+ goto done;
+
+ /*
+ * only fast incrementing counters are 64bit; use 32 bit reads to
+ * avoid two independent reads when on opteron
+ */
+ if (creg == cr_wordsend || creg == cr_wordrcv ||
+ creg == cr_pktsend || creg == cr_pktrcv)
+ ret = read_6120_creg(dd, creg);
+ else
+ ret = read_6120_creg32(dd, creg);
+ if (creg == cr_ibsymbolerr) {
+ if (dd->cspec->ibdeltainprog)
+ ret -= ret - dd->cspec->ibsymsnap;
+ ret -= dd->cspec->ibsymdelta;
+ } else if (creg == cr_iblinkerrrecov) {
+ if (dd->cspec->ibdeltainprog)
+ ret -= ret - dd->cspec->iblnkerrsnap;
+ ret -= dd->cspec->iblnkerrdelta;
+ }
+ if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
+ ret += dd->cspec->rxfc_unsupvl_errs;
+
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr6120indices contains the corresponding register indices.
+ */
+static const char cntr6120names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n";
+
+static const size_t cntr6120indices[] = {
+ cr_lbint,
+ cr_lbflowstall,
+ cr_errtidfull,
+ cr_errtidvalid,
+ cr_portovfl + 0,
+ cr_portovfl + 1,
+ cr_portovfl + 2,
+ cr_portovfl + 3,
+ cr_portovfl + 4,
+};
+
+/*
+ * same as cntr6120names and cntr6120indices, but for port-specific counters.
+ * portcntr6120indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr6120names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "E IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ ;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr6120indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ cr_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ cr_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ cr_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ cr_rcvflowctrl_err,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ cr_invalidslen,
+ cr_senddropped,
+ cr_errslen,
+ cr_sendunderrun,
+ cr_txunsupvl,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_6120_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr6120names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr6120names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
+ dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)cntr6120names;
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ if (pos >= ret) {
+ ret = 0; /* final read after getting everything */
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)portcntr6120names;
+ } else {
+ u64 *cntr = dd->cspec->portcntrs;
+ struct qib_pportdata *ppd = &dd->pport[port];
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_6120(ppd,
+ portcntr6120indices[i] &
+ ~_PORT_VIRT_FLAG);
+ else
+ *cntr++ = read_6120_creg32(dd,
+ portcntr6120indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+static void qib_chk_6120_errormask(struct qib_devdata *dd)
+{
+ static u32 fixed;
+ u32 ctrl;
+ unsigned long errormask;
+ unsigned long hwerrs;
+
+ if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
+ return;
+
+ errormask = qib_read_kreg64(dd, kr_errmask);
+
+ if (errormask == dd->cspec->errormask)
+ return;
+ fixed++;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ ctrl = qib_read_kreg32(dd, kr_control);
+
+ qib_write_kreg(dd, kr_errmask,
+ dd->cspec->errormask);
+
+ if ((hwerrs & dd->cspec->hwerrmask) ||
+ (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, 0ULL);
+ /* force re-interrupt of pending events, just in case */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ qib_devinfo(dd->pcidev,
+ "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
+ fixed, errormask, (unsigned long)dd->cspec->errormask,
+ ctrl, hwerrs);
+ }
+}
+
+/**
+ * qib_get_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_6120_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 traffic_wds;
+
+ /*
+ * don't access the chip while running diags, or memory diags can
+ * fail
+ */
+ if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+ /* but re-arm the timer, for diags case; won't hurt other */
+ goto done;
+
+ /*
+ * We now try to maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
+ qib_portcntr_6120(ppd, cr_wordrcv);
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+
+ qib_chk_6120_errormask(dd);
+done:
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/* no interrupt fallback for these chips */
+static int qib_6120_nointr_fallback(struct qib_devdata *dd)
+{
+ return 0;
+}
+
+/*
+ * reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well.
+ */
+static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
+{
+ u64 val, prev_val;
+ struct qib_devdata *dd = ppd->dd;
+
+ prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ val = prev_val | QLOGIC_IB_XGXS_RESET;
+ prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+ qib_write_kreg(dd, kr_control,
+ dd->control & ~QLOGIC_IB_C_LINKENABLE);
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+ qib_write_kreg(dd, kr_control, dd->control);
+}
+
+static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+ int ret;
+
+ switch (which) {
+ case QIB_IB_CFG_LWID:
+ ret = ppd->link_width_active;
+ break;
+
+ case QIB_IB_CFG_SPD:
+ ret = ppd->link_speed_active;
+ break;
+
+ case QIB_IB_CFG_LWID_ENB:
+ ret = ppd->link_width_enabled;
+ break;
+
+ case QIB_IB_CFG_SPD_ENB:
+ ret = ppd->link_speed_enabled;
+ break;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ break;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 0;
+ break;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 0;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ break;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ break;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->dd->cspec->ibcctrl &
+ SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ break;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ ret = 0; /* no heartbeat on this chip */
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ ret = 250; /* 1 usec. */
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * We assume range checking is already done, if needed.
+ */
+static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = 0;
+ u64 val64;
+ u16 lcmd, licmd;
+
+ switch (which) {
+ case QIB_IB_CFG_LWID_ENB:
+ ppd->link_width_enabled = val;
+ break;
+
+ case QIB_IB_CFG_SPD_ENB:
+ ppd->link_speed_enabled = val;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ if (val64 != val) {
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, OverrunThreshold);
+ dd->cspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, OverrunThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ break;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ if (val64 != val) {
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, PhyerrThreshold);
+ dd->cspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, PhyerrThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ break;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg(dd, kr_partitionkey, val64);
+ break;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ dd->cspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ else /* SLEEP */
+ dd->cspec->ibcctrl |=
+ SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ break;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+ dd->cspec->ibcctrl |= (u64)val <<
+ SYM_LSB(IBCCtrl, MaxPktLen);
+ qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ break;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ if (!dd->cspec->ibdeltainprog) {
+ dd->cspec->ibdeltainprog = 1;
+ dd->cspec->ibsymsnap =
+ read_6120_creg32(dd, cr_ibsymbolerr);
+ dd->cspec->iblnkerrsnap =
+ read_6120_creg32(dd, cr_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_6120_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT:
+ ret = -EINVAL;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+bail:
+ return ret;
+}
+
+static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void pma_6120_timer(unsigned long data)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)data;
+ struct qib_chip_specific *cs = ppd->dd->cspec;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+ qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+ &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+ mod_timer(&cs->pma_timer,
+ jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
+ } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
+ u64 ta, tb, tc, td, te;
+
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
+
+ cs->sword = ta - cs->sword;
+ cs->rword = tb - cs->rword;
+ cs->spkts = tc - cs->spkts;
+ cs->rpkts = td - cs->rpkts;
+ cs->xmit_wait = te - cs->xmit_wait;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+}
+
+/*
+ * Note that the caller has the ibp->lock held.
+ */
+static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ struct qib_chip_specific *cs = ppd->dd->cspec;
+
+ if (start && intv) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
+ mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
+ } else if (intv) {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
+ qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
+ &cs->spkts, &cs->rpkts, &cs->xmit_wait);
+ mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
+ } else {
+ cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ cs->sword = 0;
+ cs->rword = 0;
+ cs->spkts = 0;
+ cs->rpkts = 0;
+ cs->xmit_wait = 0;
+ }
+}
+
+static u32 qib_6120_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+ switch (state) {
+ case IB_6120_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_6120_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_6120_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_6120_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_6120_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_6120_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+ return qib_6120_physportstate[state];
+}
+
+static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (ibup) {
+ if (ppd->dd->cspec->ibdeltainprog) {
+ ppd->dd->cspec->ibdeltainprog = 0;
+ ppd->dd->cspec->ibsymdelta +=
+ read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
+ ppd->dd->cspec->ibsymsnap;
+ ppd->dd->cspec->iblnkerrdelta +=
+ read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
+ ppd->dd->cspec->iblnkerrsnap;
+ }
+ qib_hol_init(ppd);
+ } else {
+ ppd->dd->cspec->lli_counter = 0;
+ if (!ppd->dd->cspec->ibdeltainprog) {
+ ppd->dd->cspec->ibdeltainprog = 1;
+ ppd->dd->cspec->ibsymsnap =
+ read_6120_creg32(ppd->dd, cr_ibsymbolerr);
+ ppd->dd->cspec->iblnkerrsnap =
+ read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
+ }
+ qib_hol_down(ppd);
+ }
+
+ qib_6120_setup_setextled(ppd, ibup);
+
+ return 0;
+}
+
+/* Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_6120_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->palign = qib_read_kreg32(dd, kr_palign);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport->ibmtu = (u32)mtu;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
+ if (dd->piobcnt4k) {
+ dd->pio4kbase = (u32 __iomem *)
+ (((char __iomem *) dd->kregbase) +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+ }
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_6120_chip_params(), so split out as separate function
+ */
+static void set_6120_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+ dd->cspec->cregbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + cregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_6120_initreg(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int init_6120_variables(struct qib_devdata *dd)
+{
+ int ret = 0;
+ struct qib_pportdata *ppd;
+ u32 sbufs;
+
+ ppd = (struct qib_pportdata *)(dd + 1);
+ dd->pport = ppd;
+ dd->num_pports = 1;
+
+ dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
+ ppd->cpspec = NULL; /* not used in this chip */
+
+ spin_lock_init(&dd->cspec->kernel_tid_lock);
+ spin_lock_init(&dd->cspec->user_tid_lock);
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor);
+
+ get_6120_chip_params(dd);
+ pe_boardname(dd); /* fill in boardname */
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
+
+ if (qib_unordered_wc())
+ dd->flags |= QIB_PIO_FLUSH_WC;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+ /* Ignore errors in PIO/PBC on systems with unordered write-combining */
+ if (qib_unordered_wc())
+ dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
+
+ dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+ dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+ qib_init_pportdata(ppd, dd, 0, 1);
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_speed_supported = QIB_IB_SDR;
+ ppd->link_width_enabled = IB_WIDTH_4X;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /* these can't change for this chip, so set once */
+ ppd->link_width_active = ppd->link_width_enabled;
+ ppd->link_speed_active = ppd->link_speed_enabled;
+ ppd->vls_supported = IB_VL_VL0;
+ ppd->vls_operational = ppd->vls_supported;
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset = 0;
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ ret = ib_mtu_enum_to_int(qib_ibmtu);
+ dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+ qib_6120_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset. For now, we set this
+ * up for a single packet.
+ */
+ dd->rhdrhead_intr_off = 1ULL << 32;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_6120_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+
+ init_timer(&dd->cspec->pma_timer);
+ dd->cspec->pma_timer.function = pma_6120_timer;
+ dd->cspec->pma_timer.data = (unsigned long) ppd;
+
+ dd->ureg_align = qib_read_kreg32(dd, kr_palign);
+
+ dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+ qib_6120_config_ctxts(dd);
+ qib_set_ctxtcnt(dd);
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
+ }
+ set_6120_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+
+ qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
+
+ ret = qib_create_ctxts(dd);
+ init_6120_cntrnames(dd);
+
+ /* use all of 4KB buffers for the kernel, otherwise 16 */
+ sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
+
+ dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
+ dd->pbufsctxt = dd->lastctxt_piobuf /
+ (dd->cfgctxts - dd->first_user_ctxt);
+
+ if (ret)
+ goto bail;
+bail:
+ return ret;
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets). At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out. In that case, we flush all packets, and try again. If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+ u32 __iomem *buf;
+ u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
+
+ /*
+ * always blip to get avail list updated, since it's almost
+ * always needed, and is fairly cheap.
+ */
+ sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ if (buf)
+ goto done;
+
+ sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+ QIB_SENDCTRL_AVAIL_BLIP);
+ ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+done:
+ return buf;
+}
+
+static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *buf;
+
+ if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+ buf = get_6120_link_buf(ppd, pbufnum);
+ else {
+
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ /* try 4k if all 2k busy, so same last for both sizes */
+ last = dd->piobcnt2k + dd->piobcnt4k - 1;
+ buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+ }
+ return buf;
+}
+
+static int init_sdma_6120_regs(struct qib_pportdata *ppd)
+{
+ return -ENODEV;
+}
+
+static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
+{
+ return 0;
+}
+
+static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
+{
+ return 0;
+}
+
+static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
+{
+}
+
+static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+}
+
+static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+/*
+ * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
+ * The chip ignores the bit if set.
+ */
+static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
+}
+
+static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
+ rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
+}
+
+static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 avail, struct qib_ctxtdata *rcd)
+{
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ (void) qib_write_kreg(dd, kr_scratch, val);
+}
+
+static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ return -ENXIO;
+}
+
+/* Dummy function, as 6120 boards never disable EEPROM Write */
+static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ return 1;
+}
+
+/**
+ * qib_init_iba6120_funcs - set up the chip-specific function pointers
+ * @pdev: pci_dev of the qlogic_ib device
+ * @ent: pci_device_id matching this chip
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ *
+ * It also allocates/partially-inits the qib_devdata struct for
+ * this device.
+ */
+struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret;
+
+ dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
+ sizeof(struct qib_chip_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_6120_bringup_serdes;
+ dd->f_cleanup = qib_6120_setup_cleanup;
+ dd->f_clear_tids = qib_6120_clear_tids;
+ dd->f_free_irq = qib_6120_free_irq;
+ dd->f_get_base_info = qib_6120_get_base_info;
+ dd->f_get_msgheader = qib_6120_get_msgheader;
+ dd->f_getsendbuf = qib_6120_getsendbuf;
+ dd->f_gpio_mod = gpio_6120_mod;
+ dd->f_eeprom_wen = qib_6120_eeprom_wen;
+ dd->f_hdrqempty = qib_6120_hdrqempty;
+ dd->f_ib_updown = qib_6120_ib_updown;
+ dd->f_init_ctxt = qib_6120_init_ctxt;
+ dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
+ dd->f_intr_fallback = qib_6120_nointr_fallback;
+ dd->f_late_initreg = qib_late_6120_initreg;
+ dd->f_setpbc_control = qib_6120_setpbc_control;
+ dd->f_portcntr = qib_portcntr_6120;
+ dd->f_put_tid = (dd->minrev >= 2) ?
+ qib_6120_put_tid_2 :
+ qib_6120_put_tid;
+ dd->f_quiet_serdes = qib_6120_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_6120_mod;
+ dd->f_read_cntrs = qib_read_6120cntrs;
+ dd->f_read_portcntrs = qib_read_6120portcntrs;
+ dd->f_reset = qib_6120_setup_reset;
+ dd->f_init_sdma_regs = init_sdma_6120_regs;
+ dd->f_sdma_busy = qib_sdma_6120_busy;
+ dd->f_sdma_gethead = qib_sdma_6120_gethead;
+ dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
+ dd->f_sendctrl = sendctrl_6120_mod;
+ dd->f_set_armlaunch = qib_set_6120_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
+ dd->f_iblink_state = qib_6120_iblink_state;
+ dd->f_ibphys_portstate = qib_6120_phys_portstate;
+ dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_6120_set_loopback;
+ dd->f_set_intr_state = qib_6120_set_intr_state;
+ dd->f_setextled = qib_6120_setup_setextled;
+ dd->f_txchk_change = qib_6120_txchk_change;
+ dd->f_update_usrhead = qib_update_6120_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
+ dd->f_xgxs_reset = qib_6120_xgxs_reset;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_6120_tempsense_rd;
+ /*
+ * Do remaining pcie setup and save pcie values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped and accessible,
+ * but chip registers are not set up until start of
+ * init_6120_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = init_6120_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init)
+ goto bail;
+
+ if (qib_pcie_params(dd, 8, NULL, NULL))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+ dd->cspec->irq = pdev->irq; /* save IRQ */
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ if (qib_read_kreg64(dd, kr_hwerrstatus) &
+ QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+ /* setup interrupt handler (interrupt type handled above) */
+ qib_setup_6120_interrupt(dd);
+ /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
+ qib_6120_init_hwerrors(dd);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
new file mode 100644
index 000000000000..6fd8d74e7392
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -0,0 +1,4618 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/*
+ * This file contains all of the code that is specific to the
+ * QLogic_IB 7220 chip (except that specific to the SerDes)
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <rdma/ib_verbs.h>
+
+#include "qib.h"
+#include "qib_7220.h"
+
+static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
+static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
+static u32 qib_7220_iblink_state(u64);
+static u8 qib_7220_phys_portstate(u64);
+static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
+static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
+ * exception of SerDes support, which in in qib_sd7220.c.
+ */
+
+/* Below uses machine-generated qib_chipnum_regs.h file */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcctrl KREG_IDX(IBCCtrl)
+#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
+#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
+#define kr_palign KREG_IDX(PageAlign)
+#define kr_partitionkey KREG_IDX(RcvPartitionKey)
+#define kr_portcnt KREG_IDX(PortCnt)
+#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
+#define kr_rcvctrl KREG_IDX(RcvCtrl)
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0)
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_senddmabase KREG_IDX(SendDmaBase)
+#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
+#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
+#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
+#define kr_senddmahead KREG_IDX(SendDmaHead)
+#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
+#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
+#define kr_senddmastatus KREG_IDX(SendDmaStatus)
+#define kr_senddmatail KREG_IDX(SendDmaTail)
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+
+/* These must only be written via qib_write_kreg_ctxt() */
+#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+
+#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
+ QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
+
+#define cr_badformat CREG_IDX(RxVersionErrCnt)
+#define cr_erricrc CREG_IDX(RxICRCErrCnt)
+#define cr_errlink CREG_IDX(RxLinkMalformCnt)
+#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
+#define cr_err_rlen CREG_IDX(RxLenErrCnt)
+#define cr_errslen CREG_IDX(TxLenErrCnt)
+#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
+#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
+#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define cr_lbint CREG_IDX(LBIntCnt)
+#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
+#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
+#define cr_pktrcv CREG_IDX(RxDataPktCnt)
+#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define cr_pktsend CREG_IDX(TxDataPktCnt)
+#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
+#define cr_rcvebp CREG_IDX(RxEBPCnt)
+#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
+#define cr_sendstall CREG_IDX(TxFlowStallCnt)
+#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
+#define cr_wordrcv CREG_IDX(RxDwordCnt)
+#define cr_wordsend CREG_IDX(TxDwordCnt)
+#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
+#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define cr_psstat CREG_IDX(PSStat)
+#define cr_psstart CREG_IDX(PSStart)
+#define cr_psinterval CREG_IDX(PSInterval)
+#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_7220_##regname##_##fldname##_RMASK)
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_7220_##regname##_##fldname##_RMASK << \
+ QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7220_IBCHG 0x81
+
+/*
+ * We could have a single register get/put routine, that takes a group type,
+ * but this is somewhat clearer and cleaner. It also gives us some error
+ * checking. 64 bit register reads should always work, but are inefficient
+ * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
+ * so we use kreg32 wherever possible. User register and counter register
+ * reads are always 32 bit reads, so only one form of those routines.
+ */
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+
+ if (dd->userbase)
+ return readl(regno + (u64 __iomem *)
+ ((char __iomem *)dd->userbase +
+ dd->ureg_align * ctxt));
+ else
+ return readl(regno + (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *)dd->kregbase +
+ dd->ureg_align * ctxt));
+}
+
+/**
+ * qib_write_ureg - write 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline void write_7220_creg(const struct qib_devdata *dd,
+ u16 regno, u64 value)
+{
+ if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->cspec->cregbase[regno]);
+}
+
+static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+}
+
+static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+}
+
+/* kr_revision bits */
+#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
+#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
+
+/* kr_control bits */
+#define QLOGIC_IB_C_RESET (1U << 7)
+
+/* kr_intstatus, kr_intclear, kr_intmask bits */
+#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVURG_SHIFT 32
+#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
+#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
+#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
+
+#define QLOGIC_IB_C_FREEZEMODE 0x00000002
+#define QLOGIC_IB_C_LINKENABLE 0x00000004
+
+#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
+#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
+#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
+#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
+#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
+#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
+
+/* variables for sanity checking interrupt and errors */
+#define QLOGIC_IB_I_BITSEXTANT \
+ (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
+ (QLOGIC_IB_I_RCVAVAIL_MASK << \
+ QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
+ QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
+ QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
+ QLOGIC_IB_I_SERDESTRIMDONE)
+
+#define IB_HWE_BITSEXTANT \
+ (HWE_MASK(RXEMemParityErr) | \
+ HWE_MASK(TXEMemParityErr) | \
+ (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
+ QLOGIC_IB_HWE_PCIE1PLLFAILED | \
+ QLOGIC_IB_HWE_PCIE0PLLFAILED | \
+ QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
+ QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
+ QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
+ HWE_MASK(PowerOnBISTFailed) | \
+ QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP | \
+ QLOGIC_IB_HWE_SERDESPLLFAILED | \
+ HWE_MASK(IBCBusToSPCParityErr) | \
+ HWE_MASK(IBCBusFromSPCParityErr) | \
+ QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
+ QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
+ QLOGIC_IB_HWE_SDMAMEMREADERR | \
+ QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
+ QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
+ QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
+ QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
+ QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
+ QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
+
+#define IB_E_BITSEXTANT \
+ (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
+ ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
+ ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
+ ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
+ ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
+ ERR_MASK(SendSpecialTriggerErr) | \
+ ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
+ ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(SendPioArmLaunchErr) | \
+ ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
+ ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaUnexpDataErr) | \
+ ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
+ ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(InvalidEEPCmd))
+
+/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
+#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
+#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
+#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
+#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
+#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
+/* specific to this chip */
+#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
+#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
+#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
+#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
+#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
+#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
+#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
+
+#define IBA7220_IBCC_LINKCMD_SHIFT 19
+
+/* kr_ibcddrctrl bits */
+#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
+#define IBA7220_IBC_DLIDLMC_SHIFT 32
+
+#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
+ SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
+#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
+
+#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
+#define IBA7220_IBC_LREV_MASK 1
+#define IBA7220_IBC_LREV_SHIFT 8
+#define IBA7220_IBC_RXPOL_MASK 1
+#define IBA7220_IBC_RXPOL_SHIFT 7
+#define IBA7220_IBC_WIDTH_SHIFT 5
+#define IBA7220_IBC_WIDTH_MASK 0x3
+#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
+#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
+#define IBA7220_IBC_SPEED_SDR (1 << 2)
+#define IBA7220_IBC_SPEED_DDR (1 << 3)
+#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
+#define IBA7220_IBC_IBTA_1_2_MASK (1)
+
+/* kr_ibcddrstatus */
+/* link latency shift is 0, don't bother defining */
+#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
+
+/* kr_extstatus bits */
+#define QLOGIC_IB_EXTS_FREQSEL 0x2
+#define QLOGIC_IB_EXTS_SERDESSEL 0x4
+#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
+#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
+
+/* kr_xgxsconfig bits */
+#define QLOGIC_IB_XGXS_RESET 0x5ULL
+#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
+
+/* kr_rcvpktledcnt */
+#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
+#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
+#define QIB_TWSI_TEMP_DEV 0x98
+
+/* HW counter clock is at 4nsec */
+#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
+
+#define IBA7220_R_INTRAVAIL_SHIFT 17
+#define IBA7220_R_PKEY_DIS_SHIFT 34
+#define IBA7220_R_TAILUPD_SHIFT 35
+#define IBA7220_R_CTXTCFG_SHIFT 36
+
+#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * the size bits give us 2^N, in KB units. 0 marks as invalid,
+ * and 7 is reserved. We currently use only 2KB and 4KB
+ */
+#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
+#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
+#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
+#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
+
+/* packet rate matching delay multiplier */
+static u8 rate_to_delay[2][2] = {
+ /* 1x, 4x */
+ { 8, 2 }, /* SDR */
+ { 4, 1 } /* DDR */
+};
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+ [IB_RATE_2_5_GBPS] = 8,
+ [IB_RATE_5_GBPS] = 4,
+ [IB_RATE_10_GBPS] = 2,
+ [IB_RATE_20_GBPS] = 1
+};
+
+#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
+#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7220_LT_STATE_DISABLED 0x00
+#define IB_7220_LT_STATE_LINKUP 0x01
+#define IB_7220_LT_STATE_POLLACTIVE 0x02
+#define IB_7220_LT_STATE_POLLQUIET 0x03
+#define IB_7220_LT_STATE_SLEEPDELAY 0x04
+#define IB_7220_LT_STATE_SLEEPQUIET 0x05
+#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7220_LT_STATE_CFGIDLE 0x0b
+#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
+
+/* link state machine states from IBC */
+#define IB_7220_L_STATE_DOWN 0x0
+#define IB_7220_L_STATE_INIT 0x1
+#define IB_7220_L_STATE_ARM 0x2
+#define IB_7220_L_STATE_ACTIVE 0x3
+#define IB_7220_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_7220_physportstate[0x20] = {
+ [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7220_LT_STATE_CFGDEBOUNCE] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7220_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7220_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7220_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+int qib_special_trigger;
+module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
+MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
+
+#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
+#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
+
+#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
+ (1ULL << (SYM_LSB(regname, fldname) + (bit))))
+
+#define TXEMEMPARITYERR_PIOBUF \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
+#define TXEMEMPARITYERR_PIOPBC \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
+#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
+ SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
+
+#define RXEMEMPARITYERR_RCVBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
+#define RXEMEMPARITYERR_LOOKUPQ \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
+#define RXEMEMPARITYERR_EXPTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
+#define RXEMEMPARITYERR_EAGERTID \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
+#define RXEMEMPARITYERR_FLAGBUF \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
+#define RXEMEMPARITYERR_DATAINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
+#define RXEMEMPARITYERR_HDRINFO \
+ SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
+
+/* 7220 specific hardware errors... */
+static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
+ /* generic hardware errors */
+ QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
+ QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
+
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
+ "TXE PIOBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
+ "TXE PIOPBC Memory Parity"),
+ QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
+ "TXE PIOLAUNCHFIFO Memory Parity"),
+
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
+ "RXE RCVBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
+ "RXE LOOKUPQ Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
+ "RXE EAGERTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
+ "RXE EXPTID Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
+ "RXE FLAGBUF Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
+ "RXE DATAINFO Memory Parity"),
+ QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
+ "RXE HDRINFO Memory Parity"),
+
+ /* chip-specific hardware errors */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
+ "PCIe Poisoned TLP"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
+ "PCIe completion timeout"),
+ /*
+ * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
+ * parity or memory parity error failures, because most likely we
+ * won't be able to talk to the core of the chip. Nonetheless, we
+ * might see them, if they are in parts of the PCIe core that aren't
+ * essential.
+ */
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
+ "PCIePLL1"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
+ "PCIePLL0"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
+ "PCIe XTLH core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
+ "PCIe ADM TX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
+ "PCIe ADM RX core parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
+ "SerDes PLL"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
+ "PCIe cpl header queue"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
+ "PCIe cpl data queue"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
+ "Send DMA memory read"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
+ "uC PLL clock not locked"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
+ "PCIe serdes Q0 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
+ "PCIe serdes Q1 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
+ "PCIe serdes Q2 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
+ "PCIe serdes Q3 no clock"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
+ "DDS RXEQ memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
+ "IB uC memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
+ "PCIe uC oct0 memory parity"),
+ QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
+ "PCIe uC oct1 memory parity"),
+};
+
+#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
+
+#define QLOGIC_IB_E_PKTERRS (\
+ ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | \
+ ERR_MASK(RcvVCRCErr) | \
+ ERR_MASK(RcvICRCErr) | \
+ ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvEBPErr))
+
+/* Convenience for decoding Send DMA errors */
+#define QLOGIC_IB_E_SDMAERRS ( \
+ ERR_MASK(SDmaGenMismatchErr) | \
+ ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaUnexpDataErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(SDmaDisabledErr) | \
+ ERR_MASK(SendBufMisuseErr))
+
+/* These are all rcv-related errors which we want to count for stats */
+#define E_SUM_PKTERRS \
+ (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
+ ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
+ ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
+ ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
+ ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
+
+/* These are all send-related errors which we want to count for stats */
+#define E_SUM_ERRS \
+ (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
+ ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(InvalidAddrErr))
+
+/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers. Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
+ ERR_MASK(SendPktLenErr))
+
+/*
+ * these are errors that can occur when the link changes state while
+ * a packet is being sent or received. This doesn't cover things
+ * like EBP or VCRC that can be the result of a sending having the
+ * link change state, so we receive a "known bad" packet.
+ */
+#define E_SUM_LINK_PKTERRS \
+ (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
+ ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
+ ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
+ ERR_MASK(RcvUnexpectedCharErr))
+
+static void autoneg_7220_work(struct work_struct *);
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used.
+ * because we don't need to force the update of pioavail.
+ */
+static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
+{
+ unsigned long sbuf[3];
+ struct qib_devdata *dd = ppd->dd;
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ /* read these before writing errorclear */
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+ sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+ if (sbuf[0] || sbuf[1] || sbuf[2])
+ qib_disarm_piobufs_set(dd, sbuf,
+ dd->piobcnt2k + dd->piobcnt4k);
+}
+
+static void qib_7220_txe_recover(struct qib_devdata *dd)
+{
+ qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
+ qib_disarm_7220_senderrbufs(dd->pport);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 set_sendctrl = 0;
+ u64 clr_sendctrl = 0;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+ set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
+
+ spin_lock(&dd->sendctrl_lock);
+
+ dd->sendctrl |= set_sendctrl;
+ dd->sendctrl &= ~clr_sendctrl;
+
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ spin_unlock(&dd->sendctrl_lock);
+}
+
+static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
+ u64 err, char *buf, size_t blen)
+{
+ static const struct {
+ u64 err;
+ const char *msg;
+ } errs[] = {
+ { ERR_MASK(SDmaGenMismatchErr),
+ "SDmaGenMismatch" },
+ { ERR_MASK(SDmaOutOfBoundErr),
+ "SDmaOutOfBound" },
+ { ERR_MASK(SDmaTailOutOfBoundErr),
+ "SDmaTailOutOfBound" },
+ { ERR_MASK(SDmaBaseErr),
+ "SDmaBase" },
+ { ERR_MASK(SDma1stDescErr),
+ "SDma1stDesc" },
+ { ERR_MASK(SDmaRpyTagErr),
+ "SDmaRpyTag" },
+ { ERR_MASK(SDmaDwEnErr),
+ "SDmaDwEn" },
+ { ERR_MASK(SDmaMissingDwErr),
+ "SDmaMissingDw" },
+ { ERR_MASK(SDmaUnexpDataErr),
+ "SDmaUnexpData" },
+ { ERR_MASK(SDmaDescAddrMisalignErr),
+ "SDmaDescAddrMisalign" },
+ { ERR_MASK(SendBufMisuseErr),
+ "SendBufMisuse" },
+ { ERR_MASK(SDmaDisabledErr),
+ "SDmaDisabled" },
+ };
+ int i;
+ size_t bidx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(errs); i++) {
+ if (err & errs[i].err)
+ bidx += scnprintf(buf + bidx, blen - bidx,
+ "%s ", errs[i].msg);
+ }
+}
+
+/*
+ * This is called as part of link down clean up so disarm and flush
+ * all send buffers so that SMP packets can be sent.
+ */
+static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+ /* This will trigger the Abort interrupt */
+ sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
+ QIB_SENDCTRL_AVAIL_BLIP);
+ ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
+}
+
+static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
+{
+ /*
+ * Set SendDmaLenGen and clear and set
+ * the MSB of the generation count to enable generation checking
+ * and load the internal generation counter.
+ */
+ qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
+ qib_write_kreg(ppd->dd, kr_senddmalengen,
+ ppd->sdma_descq_cnt |
+ (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
+}
+
+static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ qib_sdma_7220_setlengen(ppd);
+ qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+ ppd->sdma_head_dma[0] = 0;
+}
+
+#define DISABLES_SDMA ( \
+ ERR_MASK(SDmaDisabledErr) | \
+ ERR_MASK(SDmaBaseErr) | \
+ ERR_MASK(SDmaTailOutOfBoundErr) | \
+ ERR_MASK(SDmaOutOfBoundErr) | \
+ ERR_MASK(SDma1stDescErr) | \
+ ERR_MASK(SDmaRpyTagErr) | \
+ ERR_MASK(SDmaGenMismatchErr) | \
+ ERR_MASK(SDmaDescAddrMisalignErr) | \
+ ERR_MASK(SDmaMissingDwErr) | \
+ ERR_MASK(SDmaDwEnErr))
+
+static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
+{
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+ char *msg;
+
+ errs &= QLOGIC_IB_E_SDMAERRS;
+
+ msg = dd->cspec->sdmamsgbuf;
+ qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ if (errs & ERR_MASK(SendBufMisuseErr)) {
+ unsigned long sbuf[3];
+
+ sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
+ sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
+ sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
+
+ qib_dev_err(ppd->dd,
+ "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
+ ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
+ sbuf[0]);
+ }
+
+ if (errs & ERR_MASK(SDmaUnexpDataErr))
+ qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
+ ppd->port);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ /* handled in intr path */
+ break;
+
+ case qib_sdma_state_s20_idle:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ /* not expecting any interrupts */
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ if (errs & ERR_MASK(SDmaDisabledErr))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e50_hw_cleaned);
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ /* handled in intr path */
+ break;
+
+ case qib_sdma_state_s99_running:
+ if (errs & DISABLES_SDMA)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e7220_err_halted);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * Decode the error status into strings, deciding whether to always
+ * print * it or not depending on "normal packet errors" vs everything
+ * else. Return 1 if "real" errors, otherwise 0 if only packet
+ * errors, so caller can decide what to print with the string.
+ */
+static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
+ u64 err)
+{
+ int iserr = 1;
+
+ *buf = '\0';
+ if (err & QLOGIC_IB_E_PKTERRS) {
+ if (!(err & ~QLOGIC_IB_E_PKTERRS))
+ iserr = 0;
+ if ((err & ERR_MASK(RcvICRCErr)) &&
+ !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
+ strlcat(buf, "CRC ", blen);
+ if (!iserr)
+ goto done;
+ }
+ if (err & ERR_MASK(RcvHdrLenErr))
+ strlcat(buf, "rhdrlen ", blen);
+ if (err & ERR_MASK(RcvBadTidErr))
+ strlcat(buf, "rbadtid ", blen);
+ if (err & ERR_MASK(RcvBadVersionErr))
+ strlcat(buf, "rbadversion ", blen);
+ if (err & ERR_MASK(RcvHdrErr))
+ strlcat(buf, "rhdr ", blen);
+ if (err & ERR_MASK(SendSpecialTriggerErr))
+ strlcat(buf, "sendspecialtrigger ", blen);
+ if (err & ERR_MASK(RcvLongPktLenErr))
+ strlcat(buf, "rlongpktlen ", blen);
+ if (err & ERR_MASK(RcvMaxPktLenErr))
+ strlcat(buf, "rmaxpktlen ", blen);
+ if (err & ERR_MASK(RcvMinPktLenErr))
+ strlcat(buf, "rminpktlen ", blen);
+ if (err & ERR_MASK(SendMinPktLenErr))
+ strlcat(buf, "sminpktlen ", blen);
+ if (err & ERR_MASK(RcvFormatErr))
+ strlcat(buf, "rformaterr ", blen);
+ if (err & ERR_MASK(RcvUnsupportedVLErr))
+ strlcat(buf, "runsupvl ", blen);
+ if (err & ERR_MASK(RcvUnexpectedCharErr))
+ strlcat(buf, "runexpchar ", blen);
+ if (err & ERR_MASK(RcvIBFlowErr))
+ strlcat(buf, "ribflow ", blen);
+ if (err & ERR_MASK(SendUnderRunErr))
+ strlcat(buf, "sunderrun ", blen);
+ if (err & ERR_MASK(SendPioArmLaunchErr))
+ strlcat(buf, "spioarmlaunch ", blen);
+ if (err & ERR_MASK(SendUnexpectedPktNumErr))
+ strlcat(buf, "sunexperrpktnum ", blen);
+ if (err & ERR_MASK(SendDroppedSmpPktErr))
+ strlcat(buf, "sdroppedsmppkt ", blen);
+ if (err & ERR_MASK(SendMaxPktLenErr))
+ strlcat(buf, "smaxpktlen ", blen);
+ if (err & ERR_MASK(SendUnsupportedVLErr))
+ strlcat(buf, "sunsupVL ", blen);
+ if (err & ERR_MASK(InvalidAddrErr))
+ strlcat(buf, "invalidaddr ", blen);
+ if (err & ERR_MASK(RcvEgrFullErr))
+ strlcat(buf, "rcvegrfull ", blen);
+ if (err & ERR_MASK(RcvHdrFullErr))
+ strlcat(buf, "rcvhdrfull ", blen);
+ if (err & ERR_MASK(IBStatusChanged))
+ strlcat(buf, "ibcstatuschg ", blen);
+ if (err & ERR_MASK(RcvIBLostLinkErr))
+ strlcat(buf, "riblostlink ", blen);
+ if (err & ERR_MASK(HardwareErr))
+ strlcat(buf, "hardware ", blen);
+ if (err & ERR_MASK(ResetNegated))
+ strlcat(buf, "reset ", blen);
+ if (err & QLOGIC_IB_E_SDMAERRS)
+ qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
+ if (err & ERR_MASK(InvalidEEPCmd))
+ strlcat(buf, "invalideepromcmd ", blen);
+done:
+ return iserr;
+}
+
+static void reenable_7220_chase(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ ppd->cpspec->chase_timer.expires = 0;
+ qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
+{
+ u8 ibclt;
+ u64 tnow;
+
+ ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
+
+ /*
+ * Detect and handle the state chase issue, where we can
+ * get stuck if we are unlucky on timing on both sides of
+ * the link. If we are, we disable, set a timer, and
+ * then re-enable.
+ */
+ switch (ibclt) {
+ case IB_7220_LT_STATE_CFGRCVFCFG:
+ case IB_7220_LT_STATE_CFGWAITRMT:
+ case IB_7220_LT_STATE_TXREVLANES:
+ case IB_7220_LT_STATE_CFGENH:
+ tnow = get_jiffies_64();
+ if (ppd->cpspec->chase_end &&
+ time_after64(tnow, ppd->cpspec->chase_end)) {
+ ppd->cpspec->chase_end = 0;
+ qib_set_ib_7220_lstate(ppd,
+ QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ ppd->cpspec->chase_timer.expires = jiffies +
+ QIB_CHASE_DIS_TIME;
+ add_timer(&ppd->cpspec->chase_timer);
+ } else if (!ppd->cpspec->chase_end)
+ ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+ break;
+
+ default:
+ ppd->cpspec->chase_end = 0;
+ break;
+ }
+}
+
+static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
+{
+ char *msg;
+ u64 ignore_this_time = 0;
+ u64 iserr = 0;
+ int log_idx;
+ struct qib_pportdata *ppd = dd->pport;
+ u64 mask;
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & ERR_MASK(HardwareErr))
+ qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & QLOGIC_IB_E_SDMAERRS)
+ sdma_7220_errors(ppd, errs);
+
+ if (errs & ~IB_E_BITSEXTANT)
+ qib_dev_err(dd, "error interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (errs & ~IB_E_BITSEXTANT));
+
+ if (errs & E_SUM_ERRS) {
+ qib_disarm_7220_senderrbufs(ppd);
+ if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+ } else if ((errs & E_SUM_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ ignore_this_time = errs & E_SUM_LINK_PKTERRS;
+ }
+
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above. Also mask SDMADISABLED by default as it
+ * is too chatty.
+ */
+ mask = ERR_MASK(IBStatusChanged) |
+ ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
+ ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
+
+ qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+
+ if (errs & E_SUM_PKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & E_SUM_ERRS)
+ qib_stats.sps_txerrs++;
+ iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
+ ERR_MASK(SDmaDisabledErr));
+
+ if (errs & ERR_MASK(IBStatusChanged)) {
+ u64 ibcs;
+
+ ibcs = qib_read_kreg64(dd, kr_ibcstatus);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ handle_7220_chase(ppd, ibcs);
+
+ /* Update our picture of width and speed from chip */
+ ppd->link_width_active =
+ ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ ppd->link_speed_active =
+ ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
+ QIB_IB_DDR : QIB_IB_SDR;
+
+ /*
+ * Since going into a recovery state causes the link state
+ * to go down and since recovery is transitory, it is better
+ * if we "miss" ever seeing the link training state go into
+ * recovery (i.e., ignore this transition for link state
+ * special handling purposes) without updating lastibcstat.
+ */
+ if (qib_7220_phys_portstate(ibcs) !=
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+
+ if (errs & ERR_MASK(ResetNegated)) {
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+done:
+ return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, ~0ULL);
+ /* force re-interrupt of any pending interrupts. */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7220_clear_freeze(struct qib_devdata *dd)
+{
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_7220_set_intr_state(dd, 0);
+
+ qib_cancel_sends(dd->pport);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /* force in-memory update now we are out of freeze */
+ qib_force_pio_avail_update(dd);
+
+ /*
+ * force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ qib_7220_set_intr_state(dd, 1);
+}
+
+/**
+ * qib_7220_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. We reuse the same message buffer as
+ * handle_7220_errors() to avoid excessive stack usage.
+ */
+static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 bits, ctrl;
+ int isfatal = 0;
+ char *bitsmsg;
+ int log_idx;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ goto bail;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ goto bail;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /*
+ * Always clear the error status register, except MEMBISTFAIL,
+ * regardless of whether we continue or stop using the chip.
+ * We want that set so we know it failed, even across driver reload.
+ * We'll still ignore it in the hwerrmask. We do this partly for
+ * diagnostics, but also for support.
+ */
+ qib_write_kreg(dd, kr_hwerrclear,
+ hwerrs & ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+ if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
+ RXE_PARITY))
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ if (hwerrs & ~IB_HWE_BITSEXTANT)
+ qib_dev_err(dd, "hwerror interrupt with unknown errors "
+ "%llx set\n", (unsigned long long)
+ (hwerrs & ~IB_HWE_BITSEXTANT));
+
+ if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
+ qib_sd7220_clr_ibpar(dd);
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
+ /*
+ * Parity errors in send memory are recoverable by h/w
+ * just do housekeeping, exit freeze mode and continue.
+ */
+ if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
+ TXEMEMPARITYERR_PIOPBC)) {
+ qib_7220_txe_recover(dd);
+ hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
+ TXEMEMPARITYERR_PIOPBC);
+ }
+ if (hwerrs)
+ isfatal = 1;
+ else
+ qib_7220_clear_freeze(dd);
+ }
+
+ *msg = '\0';
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcat(msg, "[Memory BIST test failed, "
+ "InfiniPath hardware unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
+ ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
+
+ bitsmsg = dd->cspec->bitsmsgbuf;
+ if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
+ bits = (u32) ((hwerrs >>
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
+ QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PCIe Mem Parity Errs %x] ", bits);
+ strlcat(msg, bitsmsg, msgl);
+ }
+
+#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
+ QLOGIC_IB_HWE_COREPLL_RFSLIP)
+
+ if (hwerrs & _QIB_PLL_FAIL) {
+ isfatal = 1;
+ snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
+ (unsigned long long) hwerrs & _QIB_PLL_FAIL);
+ strlcat(msg, bitsmsg, msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
+ /*
+ * If it occurs, it is left masked since the eternal
+ * interface is unused.
+ */
+ dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ qib_dev_err(dd, "%s hardware error\n", msg);
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * For /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+bail:;
+}
+
+/**
+ * qib_7220_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7220_init_hwerrors(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+
+ if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
+ QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+ if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
+ qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
+
+ val = ~0ULL; /* default to all hwerrors become interrupts, */
+
+ val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
+ dd->cspec->hwerrmask = val;
+
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ /* clear any interrupts up to this point (ints still not enabled) */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
+ dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
+ } else
+ dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+}
+
+/*
+ * All detailed interaction with the SerDes has been moved to qib_sd7220.c
+ *
+ * The portion of IBA7220-specific bringup_serdes() that actually deals with
+ * registers and memory within the SerDes itself is qib_sd7220_init().
+ */
+
+/**
+ * qib_7220_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, prev_val, guid, ibc;
+ int ret = 0;
+
+ /* Put IBC in reset, sends disabled */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7220_creg32(dd, cr_iblinkerrrecov);
+ }
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
+ /*
+ * How often flowctrl sent. More or less in usecs; balance against
+ * watermark value, so that in theory senders always get a flow
+ * control update in time to not let the IB link go idle.
+ */
+ ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
+ /* max error tolerance */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
+ /* use "real" buffer space for */
+ ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
+ ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg(dd, kr_ibcctrl, val);
+
+ if (!ppd->cpspec->ibcddrctrl) {
+ /* not on re-init after reset */
+ ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
+
+ if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ else
+ ppd->cpspec->ibcddrctrl |=
+ ppd->link_speed_enabled == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+ if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+ (IB_WIDTH_1X | IB_WIDTH_4X))
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
+ else
+ ppd->cpspec->ibcddrctrl |=
+ ppd->link_width_enabled == IB_WIDTH_4X ?
+ IBA7220_IBC_WIDTH_4X_ONLY :
+ IBA7220_IBC_WIDTH_1X_ONLY;
+
+ /* always enable these on driver reload, not sticky */
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+
+ /* enable automatic lane reversal detection for receive */
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
+ } else
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ ret = qib_sd7220_init(dd);
+
+ val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ prev_val = val;
+ val |= QLOGIC_IB_XGXS_FC_SAFE;
+ if (val != prev_val) {
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+ if (val & QLOGIC_IB_XGXS_RESET)
+ val &= ~QLOGIC_IB_XGXS_RESET;
+ if (val != prev_val)
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+
+ /* first time through, set port guid */
+ if (!ppd->guid)
+ ppd->guid = dd->base_guid;
+ guid = be64_to_cpu(ppd->guid);
+
+ qib_write_kreg(dd, kr_hrtbt_guid, guid);
+ if (!ret) {
+ dd->control |= QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control, dd->control);
+ } else
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+ return ret;
+}
+
+/**
+ * qib_7220_quiet_serdes - set serdes to txidle
+ * @ppd: physical port of the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
+{
+ u64 val;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ /* disable IBC */
+ dd->control &= ~QLOGIC_IB_C_LINKENABLE;
+ qib_write_kreg(dd, kr_control,
+ dd->control | QLOGIC_IB_C_FREEZEMODE);
+
+ ppd->cpspec->chase_end = 0;
+ if (ppd->cpspec->chase_timer.data) /* if initted */
+ del_timer_sync(&ppd->cpspec->chase_timer);
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+ ppd->cpspec->ibdeltainprog) {
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7220_creg32(dd, cr_ibsymbolerr);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->ibsymsnap;
+ val -= ppd->cpspec->ibsymdelta;
+ write_7220_creg(dd, cr_ibsymbolerr, val);
+ }
+ if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7220_creg32(dd, cr_iblinkerrrecov);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->iblnkerrsnap;
+ val -= ppd->cpspec->iblnkerrdelta;
+ write_7220_creg(dd, cr_iblinkerrrecov, val);
+ }
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+ qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ cancel_delayed_work(&ppd->cpspec->autoneg_work);
+ flush_scheduled_work();
+
+ shutdown_7220_relock_poll(ppd->dd);
+ val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
+ val |= QLOGIC_IB_XGXS_RESET;
+ qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
+}
+
+/**
+ * qib_setup_7220_setextled - set the state of the two external LEDs
+ * @dd: the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ *
+ */
+static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 extctl, ledblink = 0, val, lst, ltst;
+ unsigned long flags;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ if (ppd->led_override) {
+ ltst = (ppd->led_override & QIB_LED_PHYS) ?
+ IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
+ lst = (ppd->led_override & QIB_LED_LOG) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ } else if (on) {
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ ltst = qib_7220_phys_portstate(val);
+ lst = qib_7220_iblink_state(val);
+ } else {
+ ltst = 0;
+ lst = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
+ SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
+ if (ltst == IB_PHYSPORTSTATE_LINKUP) {
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
+ /*
+ * counts are in chip clock (4ns) periods.
+ * This is 1/16 sec (66.6ms) on,
+ * 3/16 sec (187.5 ms) off, with packets rcvd
+ */
+ ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
+ | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
+ }
+ if (lst == IB_PORT_ACTIVE)
+ extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+ if (ledblink) /* blink the LED on packet receive */
+ qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
+}
+
+static void qib_7220_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_nomsi(dd);
+}
+
+/*
+ * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
+ * @dd: the qlogic_ib device
+ *
+ * This is called during driver unload.
+ *
+ */
+static void qib_setup_7220_cleanup(struct qib_devdata *dd)
+{
+ qib_7220_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->portcntrs);
+}
+
+/*
+ * This is only called for SDmaInt.
+ * SDmaDisabled is handled on the error path.
+ */
+static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+ break;
+
+ case qib_sdma_state_s20_idle:
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+ break;
+
+ case qib_sdma_state_s99_running:
+ /* too chatty to print here */
+ __qib_sdma_intr(ppd);
+ break;
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint) {
+ if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ goto done;
+ /*
+ * blip the availupd off, next write will be on, so
+ * we ensure an avail update, regardless of threshold or
+ * buffers becoming free, whenever we want an interrupt
+ */
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
+ ~SYM_MASK(SendCtrl, SendBufAvailUpd));
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+ } else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+done:
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
+ qib_dev_err(dd,
+ "interrupt with unknown interrupts %Lx set\n",
+ istat & ~QLOGIC_IB_I_BITSEXTANT);
+
+ if (istat & QLOGIC_IB_I_GPIO) {
+ u32 gpiostatus;
+
+ /*
+ * Boards for this chip currently don't use GPIO interrupts,
+ * so clear by writing GPIOstatus to GPIOclear, and complain
+ * to alert developer. To avoid endless repeats, clear
+ * the bits in the mask, since there is some kind of
+ * programming error or chip problem.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /*
+ * In theory, writing GPIOstatus to GPIOclear could
+ * have a bad side-effect on some diagnostic that wanted
+ * to poll for a status-change, but the various shadows
+ * make that problematic at best. Diags will just suppress
+ * all GPIO interrupts during such tests.
+ */
+ qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+
+ if (gpiostatus) {
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+ u32 gpio_irq = mask & gpiostatus;
+
+ /*
+ * A bit set in status and (chip) Mask register
+ * would cause an interrupt. Since we are not
+ * expecting any, report it. Also check that the
+ * chip reflects our shadow, report issues,
+ * and refresh from the shadow.
+ */
+ /*
+ * Clear any troublemakers, and update chip
+ * from shadow
+ */
+ dd->cspec->gpio_mask &= ~gpio_irq;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+ }
+
+ if (istat & QLOGIC_IB_I_ERROR) {
+ u64 estat;
+
+ qib_stats.sps_errints++;
+ estat = qib_read_kreg64(dd, kr_errstatus);
+ if (!estat)
+ qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
+ "but no error bits set!\n", istat);
+ else
+ handle_7220_errors(dd, estat);
+ }
+}
+
+static irqreturn_t qib_7220intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u64 istat;
+ u64 ctxtrbits;
+ u64 rmask;
+ unsigned i;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg64(dd, kr_intstatus);
+
+ if (unlikely(!istat)) {
+ ret = IRQ_NONE; /* not our interrupt, or already handled */
+ goto bail;
+ }
+ if (unlikely(istat == -1)) {
+ qib_bad_intrstatus(dd);
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
+ QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
+ unlikely_7220_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat &
+ ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
+ if (ctxtrbits) {
+ rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ qib_kreceive(dd->rcd[i], NULL, NULL);
+ }
+ rmask <<= 1;
+ }
+ if (ctxtrbits) {
+ ctxtrbits =
+ (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
+ (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ /* only call for SDmaInt */
+ if (istat & QLOGIC_IB_I_SDMAINT)
+ sdma_7220_intr(dd->pport, istat);
+
+ if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSI interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7220_interrupt(struct qib_devdata *dd)
+{
+ if (!dd->cspec->irq)
+ qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
+ "work\n");
+ else {
+ int ret = request_irq(dd->cspec->irq, qib_7220intr,
+ dd->msi_lo ? 0 : IRQF_SHARED,
+ QIB_DRV_NAME, dd);
+
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup %s interrupt "
+ "(irq=%d): %d\n", dd->msi_lo ?
+ "MSI" : "INTx", dd->cspec->irq, ret);
+ }
+}
+
+/**
+ * qib_7220_boardname - fill in the board name
+ * @dd: the qlogic_ib device
+ *
+ * info is based on the board revision register
+ */
+static void qib_7220_boardname(struct qib_devdata *dd)
+{
+ char *n;
+ u32 boardid, namelen;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+
+ switch (boardid) {
+ case 1:
+ n = "InfiniPath_QLE7240";
+ break;
+ case 2:
+ n = "InfiniPath_QLE7280";
+ break;
+ default:
+ qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
+ n = "Unknown_InfiniPath_7220";
+ break;
+ }
+
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
+ qib_dev_err(dd, "Unsupported InfiniPath hardware "
+ "revision %u.%u!\n",
+ dd->majrev, dd->minrev);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_setup_7220_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ int i;
+ int ret;
+ u16 cmdval;
+ u8 int_line, clinesz;
+ unsigned long flags;
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ /* Use dev_err so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ /* no interrupts till re-initted */
+ qib_7220_set_intr_state(dd, 0);
+
+ dd->pport->cpspec->ibdeltainprog = 0;
+ dd->pport->cpspec->ibsymdelta = 0;
+ dd->pport->cpspec->iblnkerrdelta = 0;
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
+ dd->int_counter = 0; /* so we check interrupts work again */
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+ mb(); /* prevent compiler reordering around actual reset */
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 2000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision) {
+ dd->flags |= QIB_PRESENT; /* it's back */
+ ret = qib_reinit_intr(dd);
+ goto bail;
+ }
+ }
+ ret = 0; /* failed */
+
+bail:
+ if (ret) {
+ if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ qib_dev_err(dd, "Reset failed to setup PCIe or "
+ "interrupts; continuing anyway\n");
+
+ /* hold IBC in reset, no sends, etc till later */
+ qib_write_kreg(dd, kr_control, 0ULL);
+
+ /* clear the reset error, init error/hwerror mask */
+ qib_7220_init_hwerrors(dd);
+
+ /* do setup similar to speed or link-width changes */
+ if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
+ dd->cspec->presets_needed = 1;
+ spin_lock_irqsave(&dd->pport->lflags_lock, flags);
+ dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
+ dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
+ }
+
+ return ret;
+}
+
+/**
+ * qib_7220_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ if (pa != dd->tidinvalid) {
+ u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
+
+ /* paranoia checks */
+ if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ chippa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ chippa |= IBA7220_TID_SZ_4K;
+ pa = chippa;
+ }
+ writeq(pa, tidptr);
+ mmiowb();
+}
+
+/**
+ * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close(). On this chip, TIDs are only 32 bits,
+ * not 64, but they are still on 64 bit boundaries, so tidbase
+ * is declared as u64 * for the pointer math, even though we write 32 bits
+ */
+static void qib_7220_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *)(dd->kregbase) +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_7220_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7220_tidtemplate(struct qib_devdata *dd)
+{
+ if (dd->rcvegrbufsize == 2048)
+ dd->tidtemplate = IBA7220_TID_SZ_2K;
+ else if (dd->rcvegrbufsize == 4096)
+ dd->tidtemplate = IBA7220_TID_SZ_4K;
+ dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7220_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
+ QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
+
+ if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+ return 0;
+}
+
+static struct qib_message_header *
+qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ u32 offset = qib_hdrget_offset(rhf_addr);
+
+ return (struct qib_message_header *)
+ (rhf_addr - dd->rhf_offset + offset);
+}
+
+static void qib_7220_config_ctxts(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ u32 nchipctxts;
+
+ nchipctxts = qib_read_kreg32(dd, kr_portcnt);
+ dd->cspec->numctxts = nchipctxts;
+ if (qib_n_krcv_queues > 1) {
+ dd->qpn_mask = 0x3f;
+ dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
+ if (dd->first_user_ctxt > nchipctxts)
+ dd->first_user_ctxt = nchipctxts;
+ } else
+ dd->first_user_ctxt = dd->num_pports;
+ dd->n_krcv_queues = dd->first_user_ctxt;
+
+ if (!qib_cfgctxts) {
+ int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+ if (nctxts <= 5)
+ dd->ctxtcnt = 5;
+ else if (nctxts <= 9)
+ dd->ctxtcnt = 9;
+ else if (nctxts <= nchipctxts)
+ dd->ctxtcnt = nchipctxts;
+ } else if (qib_cfgctxts <= nchipctxts)
+ dd->ctxtcnt = qib_cfgctxts;
+ if (!dd->ctxtcnt) /* none of the above, set to max */
+ dd->ctxtcnt = nchipctxts;
+
+ /*
+ * Chip can be configured for 5, 9, or 17 ctxts, and choice
+ * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+ * Lock to be paranoid about later motion, etc.
+ */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (dd->ctxtcnt > 9)
+ dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
+ else if (dd->ctxtcnt > 5)
+ dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
+ /* else configure for default 5 receive ctxts */
+ if (dd->qpn_mask)
+ dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* kr_rcvegrcnt changes based on the number of contexts enabled */
+ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
+}
+
+static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+ int lsb, ret = 0;
+ u64 maskr; /* right-justified mask */
+
+ switch (which) {
+ case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+ ret = ppd->link_width_enabled;
+ goto done;
+
+ case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+ ret = ppd->link_width_active;
+ goto done;
+
+ case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+ ret = ppd->link_speed_enabled;
+ goto done;
+
+ case QIB_IB_CFG_SPD: /* Get current Link spd */
+ ret = ppd->link_speed_active;
+ goto done;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+ lsb = IBA7220_IBC_RXPOL_SHIFT;
+ maskr = IBA7220_IBC_RXPOL_MASK;
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+ lsb = IBA7220_IBC_LREV_SHIFT;
+ maskr = IBA7220_IBC_LREV_MASK;
+ break;
+
+ case QIB_IB_CFG_LINKLATENCY:
+ ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
+ & IBA7220_DDRSTAT_LINKLAT_MASK;
+ goto done;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ goto done;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 0;
+ goto done;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 0;
+ goto done;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ goto done;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ goto done;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->cpspec->ibcctrl &
+ SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ goto done;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ lsb = IBA7220_IBC_HRTBT_SHIFT;
+ maskr = IBA7220_IBC_HRTBT_MASK;
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ /*
+ * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+ * Since the clock is always 250MHz, the value is 1 or 0.
+ */
+ ret = (ppd->link_speed_active == QIB_IB_DDR);
+ goto done;
+
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
+done:
+ return ret;
+}
+
+static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 maskr; /* right-justified mask */
+ int lsb, ret = 0, setforce = 0;
+ u16 lcmd, licmd;
+ unsigned long flags;
+
+ switch (which) {
+ case QIB_IB_CFG_LIDLMC:
+ /*
+ * Set LID and LMC. Combined to avoid possible hazard
+ * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+ */
+ lsb = IBA7220_IBC_DLIDLMC_SHIFT;
+ maskr = IBA7220_IBC_DLIDLMC_MASK;
+ break;
+
+ case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ /*
+ * As with speed, only write the actual register if
+ * the link is currently down, otherwise takes effect
+ * on next link change.
+ */
+ ppd->link_width_enabled = val;
+ if (!(ppd->lflags & QIBL_LINKDOWN))
+ goto bail;
+ /*
+ * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+ * will get called because we want update
+ * link_width_active, and the change may not take
+ * effect for some time (if we are in POLL), so this
+ * flag will force the updown routine to be called
+ * on the next ibstatuschange down interrupt, even
+ * if it's not an down->up transition.
+ */
+ val--; /* convert from IB to chip */
+ maskr = IBA7220_IBC_WIDTH_MASK;
+ lsb = IBA7220_IBC_WIDTH_SHIFT;
+ setforce = 1;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ break;
+
+ case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+ /*
+ * If we turn off IB1.2, need to preset SerDes defaults,
+ * but not right now. Set a flag for the next time
+ * we command the link down. As with width, only write the
+ * actual register if the link is currently down, otherwise
+ * takes effect on next link change. Since setting is being
+ * explictly requested (via MAD or sysfs), clear autoneg
+ * failure status if speed autoneg is enabled.
+ */
+ ppd->link_speed_enabled = val;
+ if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
+ !(val & (val - 1)))
+ dd->cspec->presets_needed = 1;
+ if (!(ppd->lflags & QIBL_LINKDOWN))
+ goto bail;
+ /*
+ * We set the QIBL_IB_FORCE_NOTIFY bit so updown
+ * will get called because we want update
+ * link_speed_active, and the change may not take
+ * effect for some time (if we are in POLL), so this
+ * flag will force the updown routine to be called
+ * on the next ibstatuschange down interrupt, even
+ * if it's not an down->up transition.
+ */
+ if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
+ val = IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else
+ val = val == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+ maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ /* IBTA 1.2 mode + speed bits are contiguous */
+ lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
+ setforce = 1;
+ break;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+ lsb = IBA7220_IBC_RXPOL_SHIFT;
+ maskr = IBA7220_IBC_RXPOL_MASK;
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+ lsb = IBA7220_IBC_LREV_SHIFT;
+ maskr = IBA7220_IBC_LREV_MASK;
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ OverrunThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, OverrunThreshold);
+ ppd->cpspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, OverrunThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
+ PhyerrThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, PhyerrThreshold);
+ ppd->cpspec->ibcctrl |= (u64) val <<
+ SYM_LSB(IBCCtrl, PhyerrThreshold);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg(dd, kr_partitionkey, maskr);
+ goto bail;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ ppd->cpspec->ibcctrl &=
+ ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ else /* SLEEP */
+ ppd->cpspec->ibcctrl |=
+ SYM_MASK(IBCCtrl, LinkDownDefaultState);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ goto bail;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
+ ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
+ qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ goto bail;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ if (!ppd->cpspec->ibdeltainprog &&
+ qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap =
+ read_7220_creg32(dd, cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7220_creg32(dd, cr_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ ppd->cpspec->chase_end = 0;
+ /*
+ * stop state chase counter and timer, if running.
+ * wait forpending timer, but don't clear .data (ppd)!
+ */
+ if (ppd->cpspec->chase_timer.expires) {
+ del_timer_sync(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.expires = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_7220_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+ if (val > IBA7220_IBC_HRTBT_MASK) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ lsb = IBA7220_IBC_HRTBT_SHIFT;
+ maskr = IBA7220_IBC_HRTBT_MASK;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
+ ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
+ qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ if (setforce) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+bail:
+ return ret;
+}
+
+static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ u64 val, ddr;
+
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
+ val = 0; /* disable heart beat, so link will come up */
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
+ /* enable heart beat again */
+ val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
+ ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
+ << IBA7220_IBC_HRTBT_SHIFT);
+ ppd->cpspec->ibcddrctrl = ddr | val;
+ qib_write_kreg(ppd->dd, kr_ibcddrctrl,
+ ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
+ if (ctxt < 0)
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ else
+ mask = (1ULL << ctxt);
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /* always done for specific ctxt */
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (!(dd->flags & QIB_NODMA_RTAIL))
+ dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
+ dd->rcd[ctxt]->rcvhdrq_phys);
+ dd->rcd[ctxt]->seq_cnt = 1;
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
+ dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_ENB) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
+ i, 0);
+ qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function there may be multiple such registers with
+ * slightly different layouts. To start, we assume the
+ * "canonical" register layout of the first chips.
+ * Chip requires no back-back sendctrl writes, so write
+ * scratch register after writing sendctrl
+ */
+static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB) {
+ dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
+ if (dd->flags & QIB_USE_SPCL_TRIG)
+ dd->sendctrl |= SYM_MASK(SendCtrl,
+ SSpecialTriggerEn);
+ }
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ /*
+ * disarm any that are not yet launched, disabling sends
+ * and updates until done.
+ */
+ last = dd->piobcnt2k + dd->piobcnt4k;
+ tmp_dd_sendctrl &=
+ ~(SYM_MASK(SendCtrl, SPioEnable) |
+ SYM_MASK(SendCtrl, SendBufAvailUpd));
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl,
+ tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_FLUSH)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmPIOBuf));
+ if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+ (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+/**
+ * qib_portcntr_7220 - read a per-port counter
+ * @dd: the qlogic_ib device
+ * @creg: the counter to snapshot
+ */
+static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
+{
+ u64 ret = 0ULL;
+ struct qib_devdata *dd = ppd->dd;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u16 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = cr_pktsend,
+ [QIBPORTCNTR_WORDSEND] = cr_wordsend,
+ [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
+ [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
+ [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
+ [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
+ [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
+ [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
+ [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
+ [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
+ [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
+ [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
+ [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
+ [QIBPORTCNTR_ERRICRC] = cr_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = cr_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
+ [QIBPORTCNTR_ERRLINK] = cr_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
+ [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
+ [QIBPORTCNTR_PSSTART] = cr_psstart,
+ [QIBPORTCNTR_PSSTAT] = cr_psstat,
+ [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
+ [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg];
+
+ if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts */
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ ret += read_7220_creg32(dd, cr_portovfl + i);
+ }
+ if (creg == 0xffff)
+ goto done;
+
+ /*
+ * only fast incrementing counters are 64bit; use 32 bit reads to
+ * avoid two independent reads when on opteron
+ */
+ if ((creg == cr_wordsend || creg == cr_wordrcv ||
+ creg == cr_pktsend || creg == cr_pktrcv))
+ ret = read_7220_creg(dd, creg);
+ else
+ ret = read_7220_creg32(dd, creg);
+ if (creg == cr_ibsymbolerr) {
+ if (dd->pport->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->ibsymsnap;
+ ret -= dd->pport->cpspec->ibsymdelta;
+ } else if (creg == cr_iblinkerrrecov) {
+ if (dd->pport->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->iblnkerrsnap;
+ ret -= dd->pport->cpspec->iblnkerrdelta;
+ }
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7220indices contains the corresponding register indices.
+ */
+static const char cntr7220names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n"
+ "Ctxt5EgrOvfl\n"
+ "Ctxt6EgrOvfl\n"
+ "Ctxt7EgrOvfl\n"
+ "Ctxt8EgrOvfl\n"
+ "Ctxt9EgrOvfl\n"
+ "Ctx10EgrOvfl\n"
+ "Ctx11EgrOvfl\n"
+ "Ctx12EgrOvfl\n"
+ "Ctx13EgrOvfl\n"
+ "Ctx14EgrOvfl\n"
+ "Ctx15EgrOvfl\n"
+ "Ctx16EgrOvfl\n";
+
+static const size_t cntr7220indices[] = {
+ cr_lbint,
+ cr_lbflowstall,
+ cr_errtidfull,
+ cr_errtidvalid,
+ cr_portovfl + 0,
+ cr_portovfl + 1,
+ cr_portovfl + 2,
+ cr_portovfl + 3,
+ cr_portovfl + 4,
+ cr_portovfl + 5,
+ cr_portovfl + 6,
+ cr_portovfl + 7,
+ cr_portovfl + 8,
+ cr_portovfl + 9,
+ cr_portovfl + 10,
+ cr_portovfl + 11,
+ cr_portovfl + 12,
+ cr_portovfl + 13,
+ cr_portovfl + 14,
+ cr_portovfl + 15,
+ cr_portovfl + 16,
+};
+
+/*
+ * same as cntr7220names and cntr7220indices, but for port-specific counters.
+ * portcntr7220indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7220names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "TxDmaDesc\n" /* 7220 and 7322-only */
+ "E RxDlidFltr\n" /* 7220 and 7322-only */
+ "IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ "RxLclPhyErr\n" /* 7220 and 7322-only */
+ "RxVL15Drop\n" /* 7220 and 7322-only */
+ "RxVlErr\n" /* 7220 and 7322-only */
+ "XcessBufOvfl\n" /* 7220 and 7322-only */
+ ;
+
+#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
+static const size_t portcntr7220indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ cr_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ cr_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ cr_txsdmadesc,
+ cr_rxdlidfltr,
+ cr_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ cr_rcvflowctrl_err,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ cr_invalidslen,
+ cr_senddropped,
+ cr_errslen,
+ cr_sendunderrun,
+ cr_txunsupvl,
+ QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7220_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr7220names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr7220names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
+ dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for portcounters\n");
+}
+
+static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (!dd->cspec->cntrs) {
+ ret = 0;
+ goto done;
+ }
+
+ if (namep) {
+ *namep = (char *)cntr7220names;
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (!dd->cspec->portcntrs) {
+ ret = 0;
+ goto done;
+ }
+ if (namep) {
+ *namep = (char *)portcntr7220names;
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ } else {
+ u64 *cntr = dd->cspec->portcntrs;
+ struct qib_pportdata *ppd = &dd->pport[port];
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_7220(ppd,
+ portcntr7220indices[i] &
+ ~_PORT_VIRT_FLAG);
+ else
+ *cntr++ = read_7220_creg32(dd,
+ portcntr7220indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_get_7220_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * This needs more work; in particular, decision on whether we really
+ * need traffic_wds done the way it is
+ * called from add_timer
+ */
+static void qib_get_7220_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned long flags;
+ u64 traffic_wds;
+
+ /*
+ * don't access the chip while running diags, or memory diags can
+ * fail
+ */
+ if (!(dd->flags & QIB_INITTED) || dd->diag_client)
+ /* but re-arm the timer, for diags case; won't hurt other */
+ goto done;
+
+ /*
+ * We now try to maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
+ qib_portcntr_7220(ppd, cr_wordrcv);
+ spin_lock_irqsave(&dd->eep_st_lock, flags);
+ traffic_wds -= dd->traffic_wds;
+ dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(5, &dd->active_time); /* S/B #define */
+ spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+done:
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we are using MSI, try to fallback to INTx.
+ */
+static int qib_7220_intr_fallback(struct qib_devdata *dd)
+{
+ if (!dd->msi_lo)
+ return 0;
+
+ qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
+ " trying INTx interrupts\n");
+ qib_7220_free_irq(dd);
+ qib_enable_intx(dd->pcidev);
+ /*
+ * Some newer kernels require free_irq before disable_msi,
+ * and irq can be changed during disable and INTx enable
+ * and we need to therefore use the pcidev->irq value,
+ * not our saved MSI value.
+ */
+ dd->cspec->irq = dd->pcidev->irq;
+ qib_setup_7220_interrupt(dd);
+ return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well.
+ */
+static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
+{
+ u64 val, prev_val;
+ struct qib_devdata *dd = ppd->dd;
+
+ prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
+ val = prev_val | QLOGIC_IB_XGXS_RESET;
+ prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
+ qib_write_kreg(dd, kr_control,
+ dd->control & ~QLOGIC_IB_C_LINKENABLE);
+ qib_write_kreg(dd, kr_xgxs_cfg, val);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
+ qib_write_kreg(dd, kr_control, dd->control);
+}
+
+/*
+ * For this chip, we want to use the same buffer every time
+ * when we are trying to bring the link up (they are always VL15
+ * packets). At that link state the packet should always go out immediately
+ * (or at least be discarded at the tx interface if the link is down).
+ * If it doesn't, and the buffer isn't available, that means some other
+ * sender has gotten ahead of us, and is preventing our packet from going
+ * out. In that case, we flush all packets, and try again. If that still
+ * fails, we fail the request, and hope things work the next time around.
+ *
+ * We don't need very complicated heuristics on whether the packet had
+ * time to go out or not, since even at SDR 1X, it goes out in very short
+ * time periods, covered by the chip reads done here and as part of the
+ * flush.
+ */
+static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
+{
+ u32 __iomem *buf;
+ u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
+ int do_cleanup;
+ unsigned long flags;
+
+ /*
+ * always blip to get avail list updated, since it's almost
+ * always needed, and is fairly cheap.
+ */
+ sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ if (buf)
+ goto done;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
+ ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
+ __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+ do_cleanup = 0;
+ } else {
+ do_cleanup = 1;
+ qib_7220_sdma_hw_clean_up(ppd);
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ if (do_cleanup) {
+ qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
+ buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
+ }
+done:
+ return buf;
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware. It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+ u32 dcnt, u32 *data)
+{
+ int i;
+ u64 pbc;
+ u32 __iomem *piobuf;
+ u32 pnum;
+ struct qib_devdata *dd = ppd->dd;
+
+ i = 0;
+ pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+ pbc |= PBC_7220_VL15_SEND;
+ while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
+ if (i++ > 5)
+ return;
+ udelay(2);
+ }
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
+ writeq(pbc, piobuf);
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, 7);
+ qib_pio_copy(piobuf + 9, data, dcnt);
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pnum);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
+{
+ struct qib_devdata *dd = ppd->dd;
+ static u32 swapped;
+ u32 dw, i, hcnt, dcnt, *data;
+ static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+ static u32 madpayload_start[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+ };
+ static u32 madpayload_done[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x40000001, 0x1388, 0x15e, /* rest 0's */
+ };
+
+ dcnt = ARRAY_SIZE(madpayload_start);
+ hcnt = ARRAY_SIZE(hdr);
+ if (!swapped) {
+ /* for maintainability, do it at runtime */
+ for (i = 0; i < hcnt; i++) {
+ dw = (__force u32) cpu_to_be32(hdr[i]);
+ hdr[i] = dw;
+ }
+ for (i = 0; i < dcnt; i++) {
+ dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+ madpayload_start[i] = dw;
+ dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+ madpayload_done[i] = dw;
+ }
+ swapped = 1;
+ }
+
+ data = which ? madpayload_done : madpayload_start;
+
+ autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+ autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change. The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+ ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK);
+
+ if (speed == (QIB_IB_SDR | QIB_IB_DDR))
+ ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
+ IBA7220_IBC_IBTA_1_2_MASK;
+ else
+ ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
+ IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
+
+ qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7220_autoneg(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ /*
+ * Required for older non-IB1.2 DDR switches. Newer
+ * non-IB-compliant switches don't need it, but so far,
+ * aren't bothered by it either. "Magic constant"
+ */
+ qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ autoneg_7220_send(ppd, 0);
+ set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+
+ toggle_7220_rclkrls(ppd->dd);
+ /* 2 msec is minimum length of a poll cycle */
+ schedule_delayed_work(&ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7220_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd;
+ u64 startms;
+ u32 i;
+ unsigned long flags;
+
+ ppd = &container_of(work, struct qib_chippport_specific,
+ autoneg_work.work)->pportdata;
+ dd = ppd->dd;
+
+ startms = jiffies_to_msecs(jiffies);
+
+ /*
+ * Busy wait for this first part, it should be at most a
+ * few hundred usec, since we scheduled ourselves for 2msec.
+ */
+ for (i = 0; i < 25; i++) {
+ if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
+ == IB_7220_LT_STATE_POLLQUIET) {
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+ break;
+ }
+ udelay(100);
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ goto done; /* we got there early or told to stop */
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(90)))
+ goto done;
+
+ toggle_7220_rclkrls(dd);
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(1700)))
+ goto done;
+
+ set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
+ toggle_7220_rclkrls(dd);
+
+ /*
+ * Wait up to 250 msec for link to train and get to INIT at DDR;
+ * this should terminate early.
+ */
+ wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(250));
+done:
+ if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
+ ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+ dd->cspec->autoneg_tries = 0;
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ }
+}
+
+static u32 qib_7220_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
+
+ switch (state) {
+ case IB_7220_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_7220_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_7220_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_7220_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_7220_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7220_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
+ return qib_7220_physportstate[state];
+}
+
+static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ int ret = 0, symadj = 0;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ if (!ibup) {
+ /*
+ * When the link goes down we don't want AEQ running, so it
+ * won't interfere with IBC training, etc., and we need
+ * to go back to the static SerDes preset values.
+ */
+ if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)))
+ set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ qib_sd7220_presets(dd);
+ qib_cancel_sends(ppd); /* initial disarm, etc. */
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (__qib_sdma_running(ppd))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e70_go_idle);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ /* this might better in qib_sd7220_presets() */
+ set_7220_relock_poll(dd, ibup);
+ } else {
+ if (qib_compat_ddr_negotiate &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)) &&
+ ppd->link_speed_active == QIB_IB_SDR &&
+ (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
+ (QIB_IB_DDR | QIB_IB_SDR) &&
+ dd->cspec->autoneg_tries < AUTONEG_TRIES) {
+ /* we are SDR, and DDR auto-negotiation enabled */
+ ++dd->cspec->autoneg_tries;
+ if (!ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
+ cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
+ cr_iblinkerrrecov);
+ }
+ try_7220_autoneg(ppd);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ ppd->link_speed_active == QIB_IB_SDR) {
+ autoneg_7220_send(ppd, 1);
+ set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
+ udelay(2);
+ toggle_7220_rclkrls(dd);
+ ret = 1; /* no other IB status change processing */
+ } else {
+ if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ (ppd->link_speed_active & QIB_IB_DDR)) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+ QIBL_IB_AUTONEG_FAILED);
+ spin_unlock_irqrestore(&ppd->lflags_lock,
+ flags);
+ dd->cspec->autoneg_tries = 0;
+ /* re-enable SDR, for next link down */
+ set_7220_ibspeed_fast(ppd,
+ ppd->link_speed_enabled);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ symadj = 1;
+ } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+ /*
+ * Clear autoneg failure flag, and do setup
+ * so we'll try next time link goes down and
+ * back to INIT (possibly connected to a
+ * different device).
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock,
+ flags);
+ ppd->cpspec->ibcddrctrl |=
+ IBA7220_IBC_IBTA_1_2_MASK;
+ qib_write_kreg(dd, kr_ncmodectrl, 0);
+ symadj = 1;
+ }
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ symadj = 1;
+
+ if (!ret) {
+ ppd->delay_mult = rate_to_delay
+ [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
+ [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
+
+ set_7220_relock_poll(dd, ibup);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ /*
+ * Unlike 7322, the 7220 needs this, due to lack of
+ * interrupt in some cases when we have sdma active
+ * when the link goes down.
+ */
+ if (ppd->sdma_state.current_state !=
+ qib_sdma_state_s20_idle)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e00_go_hw_down);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ }
+
+ if (symadj) {
+ if (ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 0;
+ ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
+ cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
+ cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+ }
+ } else if (!ibup && qib_compat_ddr_negotiate &&
+ !ppd->cpspec->ibdeltainprog &&
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
+ cr_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
+ cr_iblinkerrrecov);
+ }
+
+ if (!ret)
+ qib_setup_7220_setextled(ppd, ibup);
+ return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7220_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->palign = qib_read_kreg32(dd, kr_palign);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport->ibmtu = (u32)mtu;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+ if (dd->piobcnt4k) {
+ dd->pio4kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+ }
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * qib_get_7220_chip_params(), so split out as separate function
+ */
+static void set_7220_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ /* init after possible re-map in init_chip_wc_pat() */
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+ dd->cspec->cregbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + cregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+}
+
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
+ SYM_MASK(SendCtrl, SPioEnable) | \
+ SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
+ SYM_MASK(SendCtrl, SendBufAvailUpd) | \
+ SYM_MASK(SendCtrl, AvailUpdThld) | \
+ SYM_MASK(SendCtrl, SDmaEnable) | \
+ SYM_MASK(SendCtrl, SDmaIntEnable) | \
+ SYM_MASK(SendCtrl, SDmaHalt) | \
+ SYM_MASK(SendCtrl, SDmaSingleDescriptor))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+ const struct diag_observer *op,
+ u32 offs, u64 *data, u64 mask, int only_32)
+{
+ unsigned long flags;
+ unsigned idx = offs / sizeof(u64);
+ u64 local_data, all_bits;
+
+ if (idx != kr_sendctrl) {
+ qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
+ offs, only_32 ? "32" : "64");
+ return 0;
+ }
+
+ all_bits = ~0ULL;
+ if (only_32)
+ all_bits >>= 32;
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if ((mask & all_bits) != all_bits) {
+ /*
+ * At least some mask bits are zero, so we need
+ * to read. The judgement call is whether from
+ * reg or shadow. First-cut: read reg, and complain
+ * if any bits which should be shadowed are different
+ * from their shadowed value.
+ */
+ if (only_32)
+ local_data = (u64)qib_read_kreg32(dd, idx);
+ else
+ local_data = qib_read_kreg64(dd, idx);
+ qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
+ (u32)local_data, (u32)dd->sendctrl);
+ if ((local_data & SENDCTRL_SHADOWED) !=
+ (dd->sendctrl & SENDCTRL_SHADOWED))
+ qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
+ (u32)local_data, (u32) dd->sendctrl);
+ *data = (local_data & ~mask) | (*data & mask);
+ }
+ if (mask) {
+ /*
+ * At least some mask bits are one, so we need
+ * to write, but only shadow some bits.
+ */
+ u64 sval, tval; /* Shadowed, transient */
+
+ /*
+ * New shadow val is bits we don't want to touch,
+ * ORed with bits we do, that are intended for shadow.
+ */
+ sval = (dd->sendctrl & ~mask);
+ sval |= *data & SENDCTRL_SHADOWED & mask;
+ dd->sendctrl = sval;
+ tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+ qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
+ (u32)tval, (u32)sval);
+ qib_write_kreg(dd, kr_sendctrl, tval);
+ qib_write_kreg(dd, kr_scratch, 0Ull);
+ }
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_observer = {
+ sendctrl_hook, kr_sendctrl * sizeof(u64),
+ kr_sendctrl * sizeof(u64)
+};
+
+/*
+ * write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7220_initreg(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+ qib_register_observer(dd, &sendctrl_observer);
+ return ret;
+}
+
+static int qib_init_7220_variables(struct qib_devdata *dd)
+{
+ struct qib_chippport_specific *cpspec;
+ struct qib_pportdata *ppd;
+ int ret = 0;
+ u32 sbufs, updthresh;
+
+ cpspec = (struct qib_chippport_specific *)(dd + 1);
+ ppd = &cpspec->pportdata;
+ dd->pport = ppd;
+ dd->num_pports = 1;
+
+ dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
+ ppd->cpspec = cpspec;
+
+ spin_lock_init(&dd->cspec->sdepb_lock);
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
+ ChipRevMinor);
+
+ get_7220_chip_params(dd);
+ qib_7220_boardname(dd);
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+ dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+ QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
+ dd->flags |= qib_special_trigger ?
+ QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
+
+ dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
+
+ dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
+
+ init_waitqueue_head(&cpspec->autoneg_wait);
+ INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
+
+ qib_init_pportdata(ppd, dd, 0, 1);
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
+
+ ppd->link_width_enabled = ppd->link_width_supported;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->link_width_active = IB_WIDTH_4X;
+ ppd->link_speed_active = QIB_IB_SDR;
+ ppd->delay_mult = rate_to_delay[0][1];
+ ppd->vls_supported = IB_VL_VL0;
+ ppd->vls_operational = ppd->vls_supported;
+
+ if (!qib_mini_init)
+ qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
+
+ init_timer(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.function = reenable_7220_chase;
+ ppd->cpspec->chase_timer.data = (unsigned long)ppd;
+
+ qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset =
+ dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ ret = ib_mtu_enum_to_int(qib_ibmtu);
+ dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+
+ qib_7220_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset. For now, we set this
+ * up for a single packet.
+ */
+ dd->rhdrhead_intr_off = 1ULL << 32;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_7220_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+ dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
+
+ /*
+ * Control[4] has been added to change the arbitration within
+ * the SDMA engine between favoring data fetches over descriptor
+ * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
+ */
+ if (qib_sdma_fetch_arb)
+ dd->control |= 1 << 4;
+
+ dd->ureg_align = 0x10000; /* 64KB alignment */
+
+ dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
+ qib_7220_config_ctxts(dd);
+ qib_set_ctxtcnt(dd); /* needed for PAT setup */
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
+ }
+ set_7220_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+
+ ret = qib_create_ctxts(dd);
+ init_7220_cntrnames(dd);
+
+ /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+ * reserve the update threshold amount for other kernel use, such
+ * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+ * unless we aren't enabling SDMA, in which case we want to use
+ * all the 4k bufs for the kernel.
+ * if this was less than the update threshold, we could wait
+ * a long time for an update. Coded this way because we
+ * sometimes change the update threshold for various reasons,
+ * and we want this to remain robust.
+ */
+ updthresh = 8U; /* update threshold */
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ dd->cspec->sdmabufcnt = dd->piobcnt4k;
+ sbufs = updthresh > 3 ? updthresh : 3;
+ } else {
+ dd->cspec->sdmabufcnt = 0;
+ sbufs = dd->piobcnt4k;
+ }
+
+ dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+ dd->cspec->sdmabufcnt;
+ dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+ dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+ dd->pbufsctxt = dd->lastctxt_piobuf /
+ (dd->cfgctxts - dd->first_user_ctxt);
+
+ /*
+ * if we are at 16 user contexts, we will have one 7 sbufs
+ * per context, so drop the update threshold to match. We
+ * want to update before we actually run out, at low pbufs/ctxt
+ * so give ourselves some margin
+ */
+ if ((dd->pbufsctxt - 2) < updthresh)
+ updthresh = dd->pbufsctxt - 2;
+
+ dd->cspec->updthresh_dflt = updthresh;
+ dd->cspec->updthresh = updthresh;
+
+ /* before full enable, no interrupts, no locking needed */
+ dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+
+ dd->psxmitwait_supported = 1;
+ dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
+bail:
+ return ret;
+}
+
+static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *buf;
+
+ if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
+ buf = get_7220_link_buf(ppd, pbufnum);
+ else {
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ /* try 4k if all 2k busy, so same last for both sizes */
+ last = dd->cspec->lastbuf_for_pio;
+ buf = qib_getsendbuf_range(dd, pbufnum, first, last);
+ }
+ return buf;
+}
+
+/* these 2 "counters" are really control registers, and are always RW */
+static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ write_7220_creg(ppd->dd, cr_psinterval, intv);
+ write_7220_creg(ppd->dd, cr_psstart, start);
+}
+
+/*
+ * NOTE: no real attempt is made to generalize the SDMA stuff.
+ * At some point "soon" we will have a new more generalized
+ * set of sdma interface, and then we'll clean this up.
+ */
+
+/* Must be called with sdma_lock held, or before init finished */
+static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ wmb();
+ ppd->sdma_descq_tail = tail;
+ qib_write_kreg(ppd->dd, kr_senddmatail, tail);
+}
+
+static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+}
+
+static struct sdma_set_state_action sdma_7220_action_table[] = {
+ [qib_sdma_state_s00_hw_down] = {
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .go_s99_running_tofalse = 1,
+ },
+ [qib_sdma_state_s10_hw_start_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s20_idle] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 0,
+ },
+ [qib_sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ },
+ [qib_sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
+{
+ ppd->sdma_state.set_state_action = sdma_7220_action_table;
+}
+
+static int init_sdma_7220_regs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned i, n;
+ u64 senddmabufmask[3] = { 0 };
+
+ /* Set SendDmaBase */
+ qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
+ qib_sdma_7220_setlengen(ppd);
+ qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
+ /* Set SendDmaHeadAddr */
+ qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
+
+ /*
+ * Reserve all the former "kernel" piobufs, using high number range
+ * so we get as many 4K buffers as possible
+ */
+ n = dd->piobcnt2k + dd->piobcnt4k;
+ i = n - dd->cspec->sdmabufcnt;
+
+ for (; i < n; ++i) {
+ unsigned word = i / 64;
+ unsigned bit = i & 63;
+
+ BUG_ON(word >= 3);
+ senddmabufmask[word] |= 1ULL << bit;
+ }
+ qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
+ qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
+ qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
+
+ ppd->sdma_state.first_sendbuf = i;
+ ppd->sdma_state.last_sendbuf = n;
+
+ return 0;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int sane;
+ int use_dmahead;
+ u16 swhead;
+ u16 swtail;
+ u16 cnt;
+ u16 hwhead;
+
+ use_dmahead = __qib_sdma_running(ppd) &&
+ (dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+ hwhead = use_dmahead ?
+ (u16)le64_to_cpu(*ppd->sdma_head_dma) :
+ (u16)qib_read_kreg32(dd, kr_senddmahead);
+
+ swhead = ppd->sdma_descq_head;
+ swtail = ppd->sdma_descq_tail;
+ cnt = ppd->sdma_descq_cnt;
+
+ if (swhead < swtail) {
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ } else if (swhead > swtail) {
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ } else {
+ /* empty */
+ sane = (hwhead == swhead);
+ }
+
+ if (unlikely(!sane)) {
+ if (use_dmahead) {
+ /* try one more time, directly from the register */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* assume no progress */
+ hwhead = swhead;
+ }
+
+ return hwhead;
+}
+
+static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
+{
+ u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
+
+ return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * Since the delay affects this packet but the amount of the delay is
+ * based on the length of the previous packet, use the last delay computed
+ * and save the delay count for this packet to be used next time
+ * we get here.
+ */
+static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ u8 snd_mult = ppd->delay_mult;
+ u8 rcv_mult = ib_rate_to_delay[srate];
+ u32 ret = ppd->cpspec->last_delay_mult;
+
+ ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
+ (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
+
+ /* Indicate VL15, if necessary */
+ if (vl == 15)
+ ret |= PBC_7220_VL15_SEND_CTRL;
+ return ret;
+}
+
+static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
+{
+}
+
+static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ if (!rcd->ctxt) {
+ rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
+ rcd->rcvegr_tid_base = 0;
+ } else {
+ rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+ rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
+ (rcd->ctxt - 1) * rcd->rcvegrcnt;
+ }
+}
+
+static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+ int i;
+ unsigned long flags;
+
+ switch (which) {
+ case TXCHK_CHG_TYPE_KERN:
+ /* see if we need to raise avail update threshold */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (i = dd->first_user_ctxt;
+ dd->cspec->updthresh != dd->cspec->updthresh_dflt
+ && i < dd->cfgctxts; i++)
+ if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+ ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+ < dd->cspec->updthresh_dflt)
+ break;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ if (i == dd->cfgctxts) {
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+ SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ }
+ break;
+ case TXCHK_CHG_TYPE_USER:
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+ / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+ dd->cspec->updthresh = (rcd->piocnt /
+ rcd->subctxt_cnt) - 1;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ } else
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ break;
+ }
+}
+
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ qib_write_kreg(dd, kr_scratch, val);
+}
+
+#define VALID_TS_RD_REG_MASK 0xBF
+/**
+ * qib_7220_tempsense_read - read register of temp sensor via TWSI
+ * @dd: the qlogic_ib device
+ * @regnum: register to read from
+ *
+ * returns reg contents (0..255) or < 0 for error
+ */
+static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ int ret;
+ u8 rdata;
+
+ if (regnum > 7) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* return a bogus value for (the one) register we do not have */
+ if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
+ ret = 0;
+ goto bail;
+ }
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto bail;
+
+ ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
+ if (!ret)
+ ret = rdata;
+
+ mutex_unlock(&dd->eep_lock);
+
+ /*
+ * There are three possibilities here:
+ * ret is actual value (0..255)
+ * ret is -ENXIO or -EINVAL from twsi code or this file
+ * ret is -EINTR from mutex_lock_interruptible.
+ */
+bail:
+ return ret;
+}
+
+/* Dummy function, as 7220 boards never disable EEPROM Write */
+static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ return 1;
+}
+
+/**
+ * qib_init_iba7220_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret;
+ u32 boardid, minwidth;
+
+ dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
+ sizeof(struct qib_chippport_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_7220_bringup_serdes;
+ dd->f_cleanup = qib_setup_7220_cleanup;
+ dd->f_clear_tids = qib_7220_clear_tids;
+ dd->f_free_irq = qib_7220_free_irq;
+ dd->f_get_base_info = qib_7220_get_base_info;
+ dd->f_get_msgheader = qib_7220_get_msgheader;
+ dd->f_getsendbuf = qib_7220_getsendbuf;
+ dd->f_gpio_mod = gpio_7220_mod;
+ dd->f_eeprom_wen = qib_7220_eeprom_wen;
+ dd->f_hdrqempty = qib_7220_hdrqempty;
+ dd->f_ib_updown = qib_7220_ib_updown;
+ dd->f_init_ctxt = qib_7220_init_ctxt;
+ dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
+ dd->f_intr_fallback = qib_7220_intr_fallback;
+ dd->f_late_initreg = qib_late_7220_initreg;
+ dd->f_setpbc_control = qib_7220_setpbc_control;
+ dd->f_portcntr = qib_portcntr_7220;
+ dd->f_put_tid = qib_7220_put_tid;
+ dd->f_quiet_serdes = qib_7220_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_7220_mod;
+ dd->f_read_cntrs = qib_read_7220cntrs;
+ dd->f_read_portcntrs = qib_read_7220portcntrs;
+ dd->f_reset = qib_setup_7220_reset;
+ dd->f_init_sdma_regs = init_sdma_7220_regs;
+ dd->f_sdma_busy = qib_sdma_7220_busy;
+ dd->f_sdma_gethead = qib_sdma_7220_gethead;
+ dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
+ dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
+ dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
+ dd->f_sdma_init_early = qib_7220_sdma_init_early;
+ dd->f_sendctrl = sendctrl_7220_mod;
+ dd->f_set_armlaunch = qib_set_7220_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
+ dd->f_iblink_state = qib_7220_iblink_state;
+ dd->f_ibphys_portstate = qib_7220_phys_portstate;
+ dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_7220_set_loopback;
+ dd->f_set_intr_state = qib_7220_set_intr_state;
+ dd->f_setextled = qib_setup_7220_setextled;
+ dd->f_txchk_change = qib_7220_txchk_change;
+ dd->f_update_usrhead = qib_update_7220_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
+ dd->f_xgxs_reset = qib_7220_xgxs_reset;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_7220_tempsense_rd;
+ /*
+ * Do remaining pcie setup and save pcie values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped, but chip registers
+ * are not set up until start of qib_init_7220_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = qib_init_7220_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init)
+ goto bail;
+
+ boardid = SYM_FIELD(dd->revision, Revision,
+ BoardID);
+ switch (boardid) {
+ case 0:
+ case 2:
+ case 10:
+ case 12:
+ minwidth = 16; /* x16 capable boards */
+ break;
+ default:
+ minwidth = 8; /* x8 capable boards */
+ break;
+ }
+ if (qib_pcie_params(dd, minwidth, NULL, NULL))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+
+ /* save IRQ for possible later use */
+ dd->cspec->irq = pdev->irq;
+
+ if (qib_read_kreg64(dd, kr_hwerrstatus) &
+ QLOGIC_IB_HWE_SERDESPLLFAILED)
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_SERDESPLLFAILED);
+
+ /* setup interrupt handler (interrupt type handled above) */
+ qib_setup_7220_interrupt(dd);
+ qib_7220_init_hwerrors(dd);
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
new file mode 100644
index 000000000000..503992d9c5ce
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -0,0 +1,7645 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file contains all of the code that is specific to the
+ * InfiniPath 7322 chip
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_7322_regs.h"
+#include "qib_qsfp.h"
+
+#include "qib_mad.h"
+
+static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
+static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
+static irqreturn_t qib_7322intr(int irq, void *data);
+static irqreturn_t qib_7322bufavail(int irq, void *data);
+static irqreturn_t sdma_intr(int irq, void *data);
+static irqreturn_t sdma_idle_intr(int irq, void *data);
+static irqreturn_t sdma_progress_intr(int irq, void *data);
+static irqreturn_t sdma_cleanup_intr(int irq, void *data);
+static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
+ struct qib_ctxtdata *rcd);
+static u8 qib_7322_phys_portstate(u64);
+static u32 qib_7322_iblink_state(u64);
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd);
+static void force_h1(struct qib_pportdata *);
+static void adj_tx_serdes(struct qib_pportdata *);
+static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
+
+static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
+static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+
+#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+/* LE2 serdes values for different cases */
+#define LE2_DEFAULT 5
+#define LE2_5m 4
+#define LE2_QME 0
+
+/* Below is special-purpose, so only really works for the IB SerDes blocks. */
+#define IBSD(hw_pidx) (hw_pidx + 2)
+
+/* these are variables for documentation and experimentation purposes */
+static const unsigned rcv_int_timeout = 375;
+static const unsigned rcv_int_count = 16;
+static const unsigned sdma_idle_cnt = 64;
+
+/* Time to stop altering Rx Equalization parameters, after link up. */
+#define RXEQ_DISABLE_MSECS 2500
+
+/*
+ * Number of VLs we are configured to use (to allow for more
+ * credits per vl, etc.)
+ */
+ushort qib_num_cfg_vls = 2;
+module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
+MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
+
+static ushort qib_chase = 1;
+module_param_named(chase, qib_chase, ushort, S_IRUGO);
+MODULE_PARM_DESC(chase, "Enable state chase handling");
+
+static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
+module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
+MODULE_PARM_DESC(long_attenuation, \
+ "attenuation cutoff (dB) for long copper cable setup");
+
+static ushort qib_singleport;
+module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
+MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+
+#define MAX_ATTEN_LEN 64 /* plenty for any real system */
+/* for read back, default index is ~5m copper cable */
+static char txselect_list[MAX_ATTEN_LEN] = "10";
+static struct kparam_string kp_txselect = {
+ .string = txselect_list,
+ .maxlen = MAX_ATTEN_LEN
+};
+static int setup_txselect(const char *, struct kernel_param *);
+module_param_call(txselect, setup_txselect, param_get_string,
+ &kp_txselect, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(txselect, \
+ "Tx serdes indices (for no QSFP or invalid QSFP data)");
+
+#define BOARD_QME7342 5
+#define BOARD_QMH7342 6
+#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+ BOARD_QMH7342)
+#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
+ BOARD_QME7342)
+
+#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
+
+#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
+
+#define MASK_ACROSS(lsb, msb) \
+ (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
+
+#define SYM_RMASK(regname, fldname) ((u64) \
+ QIB_7322_##regname##_##fldname##_RMASK)
+
+#define SYM_MASK(regname, fldname) ((u64) \
+ QIB_7322_##regname##_##fldname##_RMASK << \
+ QIB_7322_##regname##_##fldname##_LSB)
+
+#define SYM_FIELD(value, regname, fldname) ((u64) \
+ (((value) >> SYM_LSB(regname, fldname)) & \
+ SYM_RMASK(regname, fldname)))
+
+/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
+#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
+ (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
+
+#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
+#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
+#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
+#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
+#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
+/* Below because most, but not all, fields of IntMask have that full suffix */
+#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
+
+
+#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
+
+/*
+ * the size bits give us 2^N, in KB units. 0 marks as invalid,
+ * and 7 is reserved. We currently use only 2KB and 4KB
+ */
+#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
+#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
+#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
+#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
+
+#define SendIBSLIDAssignMask \
+ QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
+#define SendIBSLMCMask \
+ QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
+
+#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
+#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
+#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
+#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
+#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
+#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
+
+#define _QIB_GPIO_SDA_NUM 1
+#define _QIB_GPIO_SCL_NUM 0
+#define QIB_EEPROM_WEN_NUM 14
+#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
+
+/* HW counter clock is at 4nsec */
+#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
+
+/* full speed IB port 1 only */
+#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
+#define PORT_SPD_CAP_SHIFT 3
+
+/* full speed featuremask, both ports */
+#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
+
+/*
+ * This file contains almost all the chip-specific register information and
+ * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
+ */
+
+/* Use defines to tie machine-generated names to lower-case names */
+#define kr_contextcnt KREG_IDX(ContextCnt)
+#define kr_control KREG_IDX(Control)
+#define kr_counterregbase KREG_IDX(CntrRegBase)
+#define kr_errclear KREG_IDX(ErrClear)
+#define kr_errmask KREG_IDX(ErrMask)
+#define kr_errstatus KREG_IDX(ErrStatus)
+#define kr_extctrl KREG_IDX(EXTCtrl)
+#define kr_extstatus KREG_IDX(EXTStatus)
+#define kr_gpio_clear KREG_IDX(GPIOClear)
+#define kr_gpio_mask KREG_IDX(GPIOMask)
+#define kr_gpio_out KREG_IDX(GPIOOut)
+#define kr_gpio_status KREG_IDX(GPIOStatus)
+#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
+#define kr_debugportval KREG_IDX(DebugPortValueReg)
+#define kr_fmask KREG_IDX(feature_mask)
+#define kr_act_fmask KREG_IDX(active_feature_mask)
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_intclear KREG_IDX(IntClear)
+#define kr_intmask KREG_IDX(IntMask)
+#define kr_intredirect KREG_IDX(IntRedirect0)
+#define kr_intstatus KREG_IDX(IntStatus)
+#define kr_pagealign KREG_IDX(PageAlign)
+#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
+#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
+#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
+#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
+#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
+#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
+#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
+#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
+#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
+#define kr_revision KREG_IDX(Revision)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
+#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
+#define kr_sendctrl KREG_IDX(SendCtrl)
+#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
+#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
+#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
+#define kr_sendpiobufbase KREG_IDX(SendBufBase)
+#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
+#define kr_sendpiosize KREG_IDX(SendBufSize)
+#define kr_sendregbase KREG_IDX(SendRegBase)
+#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
+#define kr_userregbase KREG_IDX(UserRegBase)
+#define kr_intgranted KREG_IDX(Int_Granted)
+#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
+#define kr_intblocked KREG_IDX(IntBlocked)
+#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
+
+/*
+ * per-port kernel registers. Access only with qib_read_kreg_port()
+ * or qib_write_kreg_port()
+ */
+#define krp_errclear KREG_IBPORT_IDX(ErrClear)
+#define krp_errmask KREG_IBPORT_IDX(ErrMask)
+#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
+#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
+#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
+#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
+#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
+#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
+#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
+#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
+#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
+#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
+#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
+#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
+#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
+#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
+#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
+#define krp_psstart KREG_IBPORT_IDX(PSStart)
+#define krp_psstat KREG_IBPORT_IDX(PSStat)
+#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
+#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
+#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
+#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
+#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
+#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
+#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
+#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
+#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
+#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
+#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
+#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
+#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
+#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
+#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
+#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
+#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
+#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
+#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
+#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
+#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
+#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
+#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
+#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
+#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
+#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
+#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
+#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
+#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
+#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
+#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
+
+/*
+ * Per-context kernel registers. Acess only with qib_read_kreg_ctxt()
+ * or qib_write_kreg_ctxt()
+ */
+#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
+#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
+
+/*
+ * TID Flow table, per context. Reduces
+ * number of hdrq updates to one per flow (or on errors).
+ * context 0 and 1 share same memory, but have distinct
+ * addresses. Since for now, we never use expected sends
+ * on kernel contexts, we don't worry about that (we initialize
+ * those entries for ctxt 0/1 on driver load twice, for example).
+ */
+#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
+#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
+
+/* these are the error bits in the tid flows, and are W1C */
+#define TIDFLOW_ERRBITS ( \
+ (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
+ SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
+ (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
+ SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
+
+/* Most (not all) Counters are per-IBport.
+ * Requires LBIntCnt is at offset 0 in the group
+ */
+#define CREG_IDX(regname) \
+((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+
+#define crp_badformat CREG_IDX(RxVersionErrCnt)
+#define crp_err_rlen CREG_IDX(RxLenErrCnt)
+#define crp_erricrc CREG_IDX(RxICRCErrCnt)
+#define crp_errlink CREG_IDX(RxLinkMalformCnt)
+#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
+#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
+#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
+#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
+#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
+#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
+#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
+#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
+#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
+#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
+#define crp_pktrcv CREG_IDX(RxDataPktCnt)
+#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
+#define crp_pktsend CREG_IDX(TxDataPktCnt)
+#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
+#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
+#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
+#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
+#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
+#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
+#define crp_rcvebp CREG_IDX(RxEBPCnt)
+#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
+#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
+#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
+#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
+#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
+#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
+#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
+#define crp_sendstall CREG_IDX(TxFlowStallCnt)
+#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
+#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txlenerr CREG_IDX(TxLenErrCnt)
+#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
+#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
+#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
+#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
+#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
+#define crp_wordrcv CREG_IDX(RxDwordCnt)
+#define crp_wordsend CREG_IDX(TxDwordCnt)
+#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
+
+/* these are the (few) counters that are not port-specific */
+#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
+ QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
+#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
+#define cr_lbint CREG_DEVIDX(LBIntCnt)
+#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
+#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
+#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
+#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
+#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
+
+/* no chip register for # of IB ports supported, so define */
+#define NUM_IB_PORTS 2
+
+/* 1 VL15 buffer per hardware IB port, no register for this, so define */
+#define NUM_VL15_BUFS NUM_IB_PORTS
+
+/*
+ * context 0 and 1 are special, and there is no chip register that
+ * defines this value, so we have to define it here.
+ * These are all allocated to either 0 or 1 for single port
+ * hardware configuration, otherwise each gets half
+ */
+#define KCTXT0_EGRCNT 2048
+
+/* values for vl and port fields in PBC, 7322-specific */
+#define PBC_PORT_SEL_LSB 26
+#define PBC_PORT_SEL_RMASK 1
+#define PBC_VL_NUM_LSB 27
+#define PBC_VL_NUM_RMASK 7
+#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
+#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
+
+static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
+ [IB_RATE_2_5_GBPS] = 16,
+ [IB_RATE_5_GBPS] = 8,
+ [IB_RATE_10_GBPS] = 4,
+ [IB_RATE_20_GBPS] = 2,
+ [IB_RATE_30_GBPS] = 2,
+ [IB_RATE_40_GBPS] = 1
+};
+
+#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
+#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
+
+/* link training states, from IBC */
+#define IB_7322_LT_STATE_DISABLED 0x00
+#define IB_7322_LT_STATE_LINKUP 0x01
+#define IB_7322_LT_STATE_POLLACTIVE 0x02
+#define IB_7322_LT_STATE_POLLQUIET 0x03
+#define IB_7322_LT_STATE_SLEEPDELAY 0x04
+#define IB_7322_LT_STATE_SLEEPQUIET 0x05
+#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
+#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
+#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
+#define IB_7322_LT_STATE_CFGIDLE 0x0b
+#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
+#define IB_7322_LT_STATE_TXREVLANES 0x0d
+#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
+#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
+#define IB_7322_LT_STATE_CFGENH 0x10
+#define IB_7322_LT_STATE_CFGTEST 0x11
+
+/* link state machine states from IBC */
+#define IB_7322_L_STATE_DOWN 0x0
+#define IB_7322_L_STATE_INIT 0x1
+#define IB_7322_L_STATE_ARM 0x2
+#define IB_7322_L_STATE_ACTIVE 0x3
+#define IB_7322_L_STATE_ACT_DEFER 0x4
+
+static const u8 qib_7322_physportstate[0x20] = {
+ [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
+ [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
+ [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
+ [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
+ [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
+ [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGRCVFCFG] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGWAITRMT] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
+ [IB_7322_LT_STATE_RECOVERRETRAIN] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_RECOVERWAITRMT] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_RECOVERIDLE] =
+ IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
+ [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
+ [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+ [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
+ [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
+};
+
+struct qib_chip_specific {
+ u64 __iomem *cregbase;
+ u64 *cntrs;
+ spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
+ spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
+ u64 main_int_mask; /* clear bits which have dedicated handlers */
+ u64 int_enable_mask; /* for per port interrupts in single port mode */
+ u64 errormask;
+ u64 hwerrmask;
+ u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
+ u64 gpio_mask; /* shadow the gpio mask register */
+ u64 extctrl; /* shadow the gpio output enable, etc... */
+ u32 ncntrs;
+ u32 nportcntrs;
+ u32 cntrnamelen;
+ u32 portcntrnamelen;
+ u32 numctxts;
+ u32 rcvegrcnt;
+ u32 updthresh; /* current AvailUpdThld */
+ u32 updthresh_dflt; /* default AvailUpdThld */
+ u32 r1;
+ int irq;
+ u32 num_msix_entries;
+ u32 sdmabufcnt;
+ u32 lastbuf_for_pio;
+ u32 stay_in_freeze;
+ u32 recovery_ports_initted;
+ struct msix_entry *msix_entries;
+ void **msix_arg;
+ unsigned long *sendchkenable;
+ unsigned long *sendgrhchk;
+ unsigned long *sendibchk;
+ u32 rcvavail_timeout[18];
+ char emsgbuf[128]; /* for device error interrupt msg buffer */
+};
+
+/* Table of entries in "human readable" form Tx Emphasis. */
+struct txdds_ent {
+ u8 amp;
+ u8 pre;
+ u8 main;
+ u8 post;
+};
+
+struct vendor_txdds_ent {
+ u8 oui[QSFP_VOUI_LEN];
+ u8 *partnum;
+ struct txdds_ent sdr;
+ struct txdds_ent ddr;
+ struct txdds_ent qdr;
+};
+
+static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
+
+#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
+#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */
+#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
+
+#define H1_FORCE_VAL 8
+#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
+#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
+
+/* The static and dynamic registers are paired, and the pairs indexed by spd */
+#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
+ + ((spd) * 2))
+
+#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
+#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
+#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
+#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
+
+struct qib_chippport_specific {
+ u64 __iomem *kpregbase;
+ u64 __iomem *cpregbase;
+ u64 *portcntrs;
+ struct qib_pportdata *ppd;
+ wait_queue_head_t autoneg_wait;
+ struct delayed_work autoneg_work;
+ struct delayed_work ipg_work;
+ struct timer_list chase_timer;
+ /*
+ * these 5 fields are used to establish deltas for IB symbol
+ * errors and linkrecovery errors. They can be reported on
+ * some chips during link negotiation prior to INIT, and with
+ * DDR when faking DDR negotiations with non-IBTA switches.
+ * The chip counters are adjusted at driver unload if there is
+ * a non-zero delta.
+ */
+ u64 ibdeltainprog;
+ u64 ibsymdelta;
+ u64 ibsymsnap;
+ u64 iblnkerrdelta;
+ u64 iblnkerrsnap;
+ u64 iblnkdownsnap;
+ u64 iblnkdowndelta;
+ u64 ibmalfdelta;
+ u64 ibmalfsnap;
+ u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
+ u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
+ u64 qdr_dfe_time;
+ u64 chase_end;
+ u32 autoneg_tries;
+ u32 recovery_init;
+ u32 qdr_dfe_on;
+ u32 qdr_reforce;
+ /*
+ * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
+ * entry zero is unused, to simplify indexing
+ */
+ u8 h1_val;
+ u8 no_eep; /* txselect table index to use if no qsfp info */
+ u8 ipg_tries;
+ u8 ibmalfusesnap;
+ struct qib_qsfp_data qsfp_data;
+ char epmsgbuf[192]; /* for port error interrupt msg buffer */
+};
+
+static struct {
+ const char *name;
+ irq_handler_t handler;
+ int lsb;
+ int port; /* 0 if not port-specific, else port # */
+} irq_table[] = {
+ { QIB_DRV_NAME, qib_7322intr, -1, 0 },
+ { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
+ SYM_LSB(IntStatus, SendBufAvail), 0 },
+ { QIB_DRV_NAME " (sdma 0)", sdma_intr,
+ SYM_LSB(IntStatus, SDmaInt_0), 1 },
+ { QIB_DRV_NAME " (sdma 1)", sdma_intr,
+ SYM_LSB(IntStatus, SDmaInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
+ SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
+ { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
+ SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
+ SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
+ { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
+ SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
+ { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
+ SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
+ { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
+ SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
+};
+
+/* ibcctrl bits */
+#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
+/* cycle through TS1/TS2 till OK */
+#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
+/* wait for TS1, then go on */
+#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
+#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
+
+#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
+#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
+#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
+
+#define BLOB_7322_IBCHG 0x101
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u32 regno, u64 value);
+static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
+static void write_7322_initregs(struct qib_devdata *);
+static void write_7322_init_portregs(struct qib_pportdata *);
+static void setup_7322_link_recovery(struct qib_pportdata *, u32);
+static void check_7322_rxe_status(struct qib_pportdata *);
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
+
+/**
+ * qib_read_ureg32 - read 32-bit virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(regno + (u64 __iomem *)(
+ (dd->ureg_align * ctxt) + (dd->userbase ?
+ (char __iomem *)dd->userbase :
+ (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_read_ureg - read virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @ctxt: context number
+ *
+ * Return the contents of a register that is virtualized to be per context.
+ * Returns -1 on errors (not distinguishable from valid contents at
+ * runtime; we may add a separate error variable at some point).
+ */
+static inline u64 qib_read_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, int ctxt)
+{
+
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(regno + (u64 __iomem *)(
+ (dd->ureg_align * ctxt) + (dd->userbase ?
+ (char __iomem *)dd->userbase :
+ (char __iomem *)dd->kregbase + dd->uregbase)));
+}
+
+/**
+ * qib_write_ureg - write virtualized per-context register
+ * @dd: device
+ * @regno: register number
+ * @value: value
+ * @ctxt: context
+ *
+ * Write the contents of a register that is virtualized to be per context.
+ */
+static inline void qib_write_ureg(const struct qib_devdata *dd,
+ enum qib_ureg regno, u64 value, int ctxt)
+{
+ u64 __iomem *ubase;
+ if (dd->userbase)
+ ubase = (u64 __iomem *)
+ ((char __iomem *) dd->userbase +
+ dd->ureg_align * ctxt);
+ else
+ ubase = (u64 __iomem *)
+ (dd->uregbase +
+ (char __iomem *) dd->kregbase +
+ dd->ureg_align * ctxt);
+
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &ubase[regno]);
+}
+
+static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
+ const u32 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readl((u32 __iomem *) &dd->kregbase[regno]);
+}
+
+static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
+ const u32 regno)
+{
+ if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
+ return -1;
+ return readq(&dd->kregbase[regno]);
+}
+
+static inline void qib_write_kreg(const struct qib_devdata *dd,
+ const u32 regno, u64 value)
+{
+ if (dd->kregbase && (dd->flags & QIB_PRESENT))
+ writeq(value, &dd->kregbase[regno]);
+}
+
+/*
+ * not many sanity checks for the port-specific kernel register routines,
+ * since they are only used when it's known to be safe.
+*/
+static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
+ const u16 regno)
+{
+ if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
+ return 0ULL;
+ return readq(&ppd->cpspec->kpregbase[regno]);
+}
+
+static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
+ const u16 regno, u64 value)
+{
+ if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
+ (ppd->dd->flags & QIB_PRESENT))
+ writeq(value, &ppd->cpspec->kpregbase[regno]);
+}
+
+/**
+ * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
+ * @dd: the qlogic_ib device
+ * @regno: the register number to write
+ * @ctxt: the context containing the register
+ * @value: the value to write
+ */
+static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
+ const u16 regno, unsigned ctxt,
+ u64 value)
+{
+ qib_write_kreg(dd, regno + ctxt, value);
+}
+
+static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
+{
+ if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&dd->cspec->cregbase[regno]);
+
+
+}
+
+static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
+ u16 regno, u64 value)
+{
+ if (ppd->cpspec && ppd->cpspec->cpregbase &&
+ (ppd->dd->flags & QIB_PRESENT))
+ writeq(value, &ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
+ u16 regno)
+{
+ if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+ !(ppd->dd->flags & QIB_PRESENT))
+ return 0;
+ return readq(&ppd->cpspec->cpregbase[regno]);
+}
+
+static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
+ u16 regno)
+{
+ if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
+ !(ppd->dd->flags & QIB_PRESENT))
+ return 0;
+ return readl(&ppd->cpspec->cpregbase[regno]);
+}
+
+/* bits in Control register */
+#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
+#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
+
+/* bits in general interrupt regs */
+#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
+#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
+#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
+#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
+#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
+#define QIB_I_C_ERROR INT_MASK(Err)
+
+#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
+#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
+#define QIB_I_GPIO INT_MASK(AssertGPIO)
+#define QIB_I_P_SDMAINT(pidx) \
+ (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+ INT_MASK_P(SDmaProgress, pidx) | \
+ INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are "per port" */
+#define QIB_I_P_BITSEXTANT(pidx) \
+ (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
+ INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
+ INT_MASK_P(SDmaProgress, pidx) | \
+ INT_MASK_PM(SDmaCleanupDone, pidx))
+
+/* Interrupt bits that are common to a device */
+/* currently unused: QIB_I_SPIOSENT */
+#define QIB_I_C_BITSEXTANT \
+ (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
+ QIB_I_SPIOSENT | \
+ QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
+
+#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
+ QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
+
+/*
+ * Error bits that are "per port".
+ */
+#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
+#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
+#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
+#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
+#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
+#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
+#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
+#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
+#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
+#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
+#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
+#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
+#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
+#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
+#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
+#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
+#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
+#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
+#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
+#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
+#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
+#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
+#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
+#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
+#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
+#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
+#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
+#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
+
+#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
+#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
+#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
+#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
+#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
+#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
+#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
+#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
+#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
+#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
+#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
+
+/* Error bits that are common to a device */
+#define QIB_E_RESET ERR_MASK(ResetNegated)
+#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
+#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
+
+
+/*
+ * Per chip (rather than per-port) errors. Most either do
+ * nothing but trigger a print (because they self-recover, or
+ * always occur in tandem with other errors that handle the
+ * issue), or because they indicate errors with no recovery,
+ * but we want to know that they happened.
+ */
+#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
+#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
+#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
+#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
+#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
+#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
+#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
+#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
+
+/* SDMA chip errors (not per port)
+ * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
+ * the SDMAHALT error immediately, so we just print the dup error via the
+ * E_AUTO mechanism. This is true of most of the per-port fatal errors
+ * as well, but since this is port-independent, by definition, it's
+ * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
+ * packet send errors, and so are handled in the same manner as other
+ * per-packet errors.
+ */
+#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
+#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
+#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
+
+/*
+ * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
+ * it is used to print "common" packet errors.
+ */
+#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
+ QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
+ QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
+ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+ QIB_E_P_REBP)
+
+/* Error Bits that Packet-related (Receive, per-port) */
+#define QIB_E_P_RPKTERRS (\
+ QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
+ QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
+ QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
+ QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
+ QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
+ QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
+
+/*
+ * Error bits that are Send-related (per port)
+ * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
+ * All of these potentially need to have a buffer disarmed
+ */
+#define QIB_E_P_SPKTERRS (\
+ QIB_E_P_SUNEXP_PKTNUM |\
+ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+ QIB_E_P_SMAXPKTLEN |\
+ QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
+ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
+ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
+
+#define QIB_E_SPKTERRS ( \
+ QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
+ ERR_MASK_N(SendUnsupportedVLErr) | \
+ QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
+
+#define QIB_E_P_SDMAERRS ( \
+ QIB_E_P_SDMAHALT | \
+ QIB_E_P_SDMADESCADDRMISALIGN | \
+ QIB_E_P_SDMAUNEXPDATA | \
+ QIB_E_P_SDMAMISSINGDW | \
+ QIB_E_P_SDMADWEN | \
+ QIB_E_P_SDMARPYTAG | \
+ QIB_E_P_SDMA1STDESC | \
+ QIB_E_P_SDMABASE | \
+ QIB_E_P_SDMATAILOUTOFBOUND | \
+ QIB_E_P_SDMAOUTOFBOUND | \
+ QIB_E_P_SDMAGENMISMATCH)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories, and the repeat definition
+ * is not a problem.
+ */
+#define QIB_E_P_BITSEXTANT ( \
+ QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
+ QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
+ QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
+ QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
+ )
+
+/*
+ * These are errors that can occur when the link
+ * changes state while a packet is being sent or received. This doesn't
+ * cover things like EBP or VCRC that can be the result of a sending
+ * having the link change state, so we receive a "known bad" packet.
+ * All of these are "per port", so renamed:
+ */
+#define QIB_E_P_LINK_PKTERRS (\
+ QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
+ QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
+ QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
+ QIB_E_P_RUNEXPCHAR)
+
+/*
+ * This sets some bits more than once, but makes it more obvious which
+ * bits are not handled under other categories (such as QIB_E_SPKTERRS),
+ * and the repeat definition is not a problem.
+ */
+#define QIB_E_C_BITSEXTANT (\
+ QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
+ QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
+ QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
+
+/* Likewise Neuter E_SPKT_ERRS_IGNORE */
+#define E_SPKT_ERRS_IGNORE 0
+
+#define QIB_EXTS_MEMBIST_DISABLED \
+ SYM_MASK(EXTStatus, MemBISTDisabled)
+#define QIB_EXTS_MEMBIST_ENDTEST \
+ SYM_MASK(EXTStatus, MemBISTEndTest)
+
+#define QIB_E_SPIOARMLAUNCH \
+ ERR_MASK(SendArmLaunchErr)
+
+#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
+#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
+
+/*
+ * IBTA_1_2 is set when multiple speeds are enabled (normal),
+ * and also if forced QDR (only QDR enabled). It's enabled for the
+ * forced QDR case so that scrambling will be enabled by the TS3
+ * exchange, when supported by both sides of the link.
+ */
+#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
+#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
+#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
+#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
+#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
+#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
+ SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
+#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
+
+#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
+#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
+
+#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
+#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
+
+#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
+#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
+ SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
+ SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
+#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
+
+#define IBA7322_REDIRECT_VEC_PER_REG 12
+
+#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
+#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
+#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
+#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
+#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
+
+#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
+
+#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
+ .msg = #fldname }
+#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
+ fldname##Mask##_##port), .msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
+ HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
+ HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
+ HWE_AUTO(PCIESerdesPClkNotDetect),
+ HWE_AUTO(PowerOnBISTFailed),
+ HWE_AUTO(TempsenseTholdReached),
+ HWE_AUTO(MemoryErr),
+ HWE_AUTO(PCIeBusParityErr),
+ HWE_AUTO(PcieCplTimeout),
+ HWE_AUTO(PciePoisonedTLP),
+ HWE_AUTO_P(SDmaMemReadErr, 1),
+ HWE_AUTO_P(SDmaMemReadErr, 0),
+ HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
+ HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
+ HWE_AUTO_P(statusValidNoEop, 1),
+ HWE_AUTO_P(statusValidNoEop, 0),
+ HWE_AUTO(LATriggered),
+ { .mask = 0 }
+};
+
+#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
+ .msg = #fldname }
+#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
+ .msg = #fldname }
+static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
+ E_AUTO(ResetNegated),
+ E_AUTO(HardwareErr),
+ E_AUTO(InvalidAddrErr),
+ E_AUTO(SDmaVL15Err),
+ E_AUTO(SBufVL15MisUseErr),
+ E_AUTO(InvalidEEPCmd),
+ E_AUTO(RcvContextShareErr),
+ E_AUTO(SendVLMismatchErr),
+ E_AUTO(SendArmLaunchErr),
+ E_AUTO(SendSpecialTriggerErr),
+ E_AUTO(SDmaWrongPortErr),
+ E_AUTO(SDmaBufMaskDuplicateErr),
+ E_AUTO(RcvHdrFullErr),
+ E_AUTO(RcvEgrFullErr),
+ { .mask = 0 }
+};
+
+static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
+ E_P_AUTO(IBStatusChanged),
+ E_P_AUTO(SHeadersErr),
+ E_P_AUTO(VL15BufMisuseErr),
+ /*
+ * SDmaHaltErr is not really an error, make it clearer;
+ */
+ {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
+ E_P_AUTO(SDmaDescAddrMisalignErr),
+ E_P_AUTO(SDmaUnexpDataErr),
+ E_P_AUTO(SDmaMissingDwErr),
+ E_P_AUTO(SDmaDwEnErr),
+ E_P_AUTO(SDmaRpyTagErr),
+ E_P_AUTO(SDma1stDescErr),
+ E_P_AUTO(SDmaBaseErr),
+ E_P_AUTO(SDmaTailOutOfBoundErr),
+ E_P_AUTO(SDmaOutOfBoundErr),
+ E_P_AUTO(SDmaGenMismatchErr),
+ E_P_AUTO(SendBufMisuseErr),
+ E_P_AUTO(SendUnsupportedVLErr),
+ E_P_AUTO(SendUnexpectedPktNumErr),
+ E_P_AUTO(SendDroppedDataPktErr),
+ E_P_AUTO(SendDroppedSmpPktErr),
+ E_P_AUTO(SendPktLenErr),
+ E_P_AUTO(SendUnderRunErr),
+ E_P_AUTO(SendMaxPktLenErr),
+ E_P_AUTO(SendMinPktLenErr),
+ E_P_AUTO(RcvIBLostLinkErr),
+ E_P_AUTO(RcvHdrErr),
+ E_P_AUTO(RcvHdrLenErr),
+ E_P_AUTO(RcvBadTidErr),
+ E_P_AUTO(RcvBadVersionErr),
+ E_P_AUTO(RcvIBFlowErr),
+ E_P_AUTO(RcvEBPErr),
+ E_P_AUTO(RcvUnsupportedVLErr),
+ E_P_AUTO(RcvUnexpectedCharErr),
+ E_P_AUTO(RcvShortPktLenErr),
+ E_P_AUTO(RcvLongPktLenErr),
+ E_P_AUTO(RcvMaxPktLenErr),
+ E_P_AUTO(RcvMinPktLenErr),
+ E_P_AUTO(RcvICRCErr),
+ E_P_AUTO(RcvVCRCErr),
+ E_P_AUTO(RcvFormatErr),
+ { .mask = 0 }
+};
+
+/*
+ * Below generates "auto-message" for interrupts not specific to any port or
+ * context
+ */
+#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
+ .msg = #fldname }
+/* Below generates "auto-message" for interrupts specific to a port */
+#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##Mask##_0), \
+ SYM_LSB(IntMask, fldname##Mask##_1)), \
+ .msg = #fldname "_P" }
+/* For some reason, the SerDesTrimDone bits are reversed */
+#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##Mask##_1), \
+ SYM_LSB(IntMask, fldname##Mask##_0)), \
+ .msg = #fldname "_P" }
+/*
+ * Below generates "auto-message" for interrupts specific to a context,
+ * with ctxt-number appended
+ */
+#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
+ SYM_LSB(IntMask, fldname##0IntMask), \
+ SYM_LSB(IntMask, fldname##17IntMask)), \
+ .msg = #fldname "_C"}
+
+static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
+ INTR_AUTO_P(SDmaInt),
+ INTR_AUTO_P(SDmaProgressInt),
+ INTR_AUTO_P(SDmaIdleInt),
+ INTR_AUTO_P(SDmaCleanupDone),
+ INTR_AUTO_C(RcvUrg),
+ INTR_AUTO_P(ErrInt),
+ INTR_AUTO(ErrInt), /* non-port-specific errs */
+ INTR_AUTO(AssertGPIOInt),
+ INTR_AUTO_P(SendDoneInt),
+ INTR_AUTO(SendBufAvailInt),
+ INTR_AUTO_C(RcvAvail),
+ { .mask = 0 }
+};
+
+#define TXSYMPTOM_AUTO_P(fldname) \
+ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
+static const struct qib_hwerror_msgs hdrchk_msgs[] = {
+ TXSYMPTOM_AUTO_P(NonKeyPacket),
+ TXSYMPTOM_AUTO_P(GRHFail),
+ TXSYMPTOM_AUTO_P(PkeyFail),
+ TXSYMPTOM_AUTO_P(QPFail),
+ TXSYMPTOM_AUTO_P(SLIDFail),
+ TXSYMPTOM_AUTO_P(RawIPV6),
+ TXSYMPTOM_AUTO_P(PacketTooSmall),
+ { .mask = 0 }
+};
+
+#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
+
+/*
+ * Called when we might have an error that is specific to a particular
+ * PIO buffer, and may need to cancel that buffer, so it can be re-used,
+ * because we don't need to force the update of pioavail
+ */
+static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 i;
+ int any;
+ u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ unsigned long sbuf[4];
+
+ /*
+ * It's possible that sendbuffererror could have bits set; might
+ * have already done this as a result of hardware error handling.
+ */
+ any = 0;
+ for (i = 0; i < regcnt; ++i) {
+ sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
+ if (sbuf[i]) {
+ any = 1;
+ qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
+ }
+ }
+
+ if (any)
+ qib_disarm_piobufs_set(dd, sbuf, piobcnt);
+}
+
+/* No txe_recover yet, if ever */
+
+/* No decode__errors yet */
+static void err_decode(char *msg, size_t len, u64 errs,
+ const struct qib_hwerror_msgs *msp)
+{
+ u64 these, lmask;
+ int took, multi, n = 0;
+
+ while (msp && msp->mask) {
+ multi = (msp->mask & (msp->mask - 1));
+ while (errs & msp->mask) {
+ these = (errs & msp->mask);
+ lmask = (these & (these - 1)) ^ these;
+ if (len) {
+ if (n++) {
+ /* separate the strings */
+ *msg++ = ',';
+ len--;
+ }
+ took = scnprintf(msg, len, "%s", msp->msg);
+ len -= took;
+ msg += took;
+ }
+ errs &= ~lmask;
+ if (len && multi) {
+ /* More than one bit this mask */
+ int idx = -1;
+
+ while (lmask & msp->mask) {
+ ++idx;
+ lmask >>= 1;
+ }
+ took = scnprintf(msg, len, "_%d", idx);
+ len -= took;
+ msg += took;
+ }
+ }
+ ++msp;
+ }
+ /* If some bits are left, show in hex. */
+ if (len && errs)
+ snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
+ (unsigned long long) errs);
+}
+
+/* only called if r1 set */
+static void flush_fifo(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 __iomem *piobuf;
+ u32 bufn;
+ u32 *hdr;
+ u64 pbc;
+ const unsigned hdrwords = 7;
+ static struct qib_ib_header ibhdr = {
+ .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
+ .lrh[1] = IB_LID_PERMISSIVE,
+ .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
+ .lrh[3] = IB_LID_PERMISSIVE,
+ .u.oth.bth[0] = cpu_to_be32(
+ (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
+ .u.oth.bth[1] = cpu_to_be32(0),
+ .u.oth.bth[2] = cpu_to_be32(0),
+ .u.oth.u.ud.deth[0] = cpu_to_be32(0),
+ .u.oth.u.ud.deth[1] = cpu_to_be32(0),
+ };
+
+ /*
+ * Send a dummy VL15 packet to flush the launch FIFO.
+ * This will not actually be sent since the TxeBypassIbc bit is set.
+ */
+ pbc = PBC_7322_VL15_SEND |
+ (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
+ (hdrwords + SIZE_OF_CRC);
+ piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
+ if (!piobuf)
+ return;
+ writeq(pbc, piobuf);
+ hdr = (u32 *) &ibhdr;
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf + 2, hdr, hdrwords);
+ qib_sendbuf_done(dd, bufn);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 set_sendctrl = 0;
+ u64 clr_sendctrl = 0;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_HALT)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
+ set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+ SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+ else
+ clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
+ SYM_MASK(SendCtrl_0, TxeAbortIbc) |
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
+
+ spin_lock(&dd->sendctrl_lock);
+
+ /* If we are draining everything, block sends first */
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+ ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ ppd->p_sendctrl |= set_sendctrl;
+ ppd->p_sendctrl &= ~clr_sendctrl;
+
+ if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
+ qib_write_kreg_port(ppd, krp_sendctrl,
+ ppd->p_sendctrl |
+ SYM_MASK(SendCtrl_0, SDmaCleanup));
+ else
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock(&dd->sendctrl_lock);
+
+ if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
+ flush_fifo(ppd);
+}
+
+static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
+{
+ __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
+}
+
+static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
+{
+ /*
+ * Set SendDmaLenGen and clear and set
+ * the MSB of the generation count to enable generation checking
+ * and load the internal generation counter.
+ */
+ qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
+ qib_write_kreg_port(ppd, krp_senddmalengen,
+ ppd->sdma_descq_cnt |
+ (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
+{
+ /* Commit writes to memory and advance the tail on the chip */
+ wmb();
+ ppd->sdma_descq_tail = tail;
+ qib_write_kreg_port(ppd, krp_senddmatail, tail);
+}
+
+/*
+ * This is called with interrupts disabled and sdma_lock held.
+ */
+static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ /*
+ * Drain all FIFOs.
+ * The hardware doesn't require this but we do it so that verbs
+ * and user applications don't wait for link active to send stale
+ * data.
+ */
+ sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
+
+ qib_sdma_7322_setlengen(ppd);
+ qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+ ppd->sdma_head_dma[0] = 0;
+ qib_7322_sdma_sendctrl(ppd,
+ ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
+}
+
+#define DISABLES_SDMA ( \
+ QIB_E_P_SDMAHALT | \
+ QIB_E_P_SDMADESCADDRMISALIGN | \
+ QIB_E_P_SDMAMISSINGDW | \
+ QIB_E_P_SDMADWEN | \
+ QIB_E_P_SDMARPYTAG | \
+ QIB_E_P_SDMA1STDESC | \
+ QIB_E_P_SDMABASE | \
+ QIB_E_P_SDMATAILOUTOFBOUND | \
+ QIB_E_P_SDMAOUTOFBOUND | \
+ QIB_E_P_SDMAGENMISMATCH)
+
+static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
+{
+ unsigned long flags;
+ struct qib_devdata *dd = ppd->dd;
+
+ errs &= QIB_E_P_SDMAERRS;
+
+ if (errs & QIB_E_P_SDMAUNEXPDATA)
+ qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
+ ppd->port);
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ switch (ppd->sdma_state.current_state) {
+ case qib_sdma_state_s00_hw_down:
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e20_hw_started);
+ break;
+
+ case qib_sdma_state_s20_idle:
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e50_hw_cleaned);
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ if (errs & QIB_E_P_SDMAHALT)
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e60_hw_halted);
+ break;
+
+ case qib_sdma_state_s99_running:
+ __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
+ __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
+ break;
+ }
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * handle per-device errors (not per-port errors)
+ */
+static noinline void handle_7322_errors(struct qib_devdata *dd)
+{
+ char *msg;
+ u64 iserr = 0;
+ u64 errs;
+ u64 mask;
+ int log_idx;
+
+ qib_stats.sps_errints++;
+ errs = qib_read_kreg64(dd, kr_errstatus);
+ if (!errs) {
+ qib_devinfo(dd->pcidev, "device error interrupt, "
+ "but no error bits set!\n");
+ goto done;
+ }
+
+ /* don't report errors that are masked */
+ errs &= dd->cspec->errormask;
+ msg = dd->cspec->emsgbuf;
+
+ /* do these first, they are most important */
+ if (errs & QIB_E_HARDWARE) {
+ *msg = '\0';
+ qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+ } else
+ for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
+ if (errs & dd->eep_st_masks[log_idx].errs_to_log)
+ qib_inc_eeprom_err(dd, log_idx, 1);
+
+ if (errs & QIB_E_SPKTERRS) {
+ qib_disarm_7322_senderrbufs(dd->pport);
+ qib_stats.sps_txerrs++;
+ } else if (errs & QIB_E_INVALIDADDR)
+ qib_stats.sps_txerrs++;
+ else if (errs & QIB_E_ARMLAUNCH) {
+ qib_stats.sps_txerrs++;
+ qib_disarm_7322_senderrbufs(dd->pport);
+ }
+ qib_write_kreg(dd, kr_errclear, errs);
+
+ /*
+ * The ones we mask off are handled specially below
+ * or above. Also mask SDMADISABLED by default as it
+ * is too chatty.
+ */
+ mask = QIB_E_HARDWARE;
+ *msg = '\0';
+
+ err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+ qib_7322error_msgs);
+
+ /*
+ * Getting reset is a tragedy for all ports. Mark the device
+ * _and_ the ports as "offline" in way meaningful to each.
+ */
+ if (errs & QIB_E_RESET) {
+ int pidx;
+
+ qib_dev_err(dd, "Got reset, requires re-init "
+ "(unload and reload driver)\n");
+ dd->flags &= ~QIB_INITTED; /* needs re-init */
+ /* mark as having had error */
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
+ }
+
+ if (*msg && iserr)
+ qib_dev_err(dd, "%s error\n", msg);
+
+ /*
+ * If there were hdrq or egrfull errors, wake up any processes
+ * waiting in poll. We used to try to check which contexts had
+ * the overflow, but given the cost of that and the chip reads
+ * to support it, it's better to just wake everybody up if we
+ * get an overflow; waiters can poll again if it's not them.
+ */
+ if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
+ qib_handle_urcv(dd, ~0U);
+ if (errs & ERR_MASK(RcvEgrFullErr))
+ qib_stats.sps_buffull++;
+ else
+ qib_stats.sps_hdrfull++;
+ }
+
+done:
+ return;
+}
+
+static void reenable_chase(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ ppd->cpspec->chase_timer.expires = 0;
+ qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_POLL);
+}
+
+static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+{
+ ppd->cpspec->chase_end = 0;
+
+ if (!qib_chase)
+ return;
+
+ qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
+ add_timer(&ppd->cpspec->chase_timer);
+}
+
+static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
+{
+ u8 ibclt;
+ u64 tnow;
+
+ ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
+
+ /*
+ * Detect and handle the state chase issue, where we can
+ * get stuck if we are unlucky on timing on both sides of
+ * the link. If we are, we disable, set a timer, and
+ * then re-enable.
+ */
+ switch (ibclt) {
+ case IB_7322_LT_STATE_CFGRCVFCFG:
+ case IB_7322_LT_STATE_CFGWAITRMT:
+ case IB_7322_LT_STATE_TXREVLANES:
+ case IB_7322_LT_STATE_CFGENH:
+ tnow = get_jiffies_64();
+ if (ppd->cpspec->chase_end &&
+ time_after64(tnow, ppd->cpspec->chase_end))
+ disable_chase(ppd, tnow, ibclt);
+ else if (!ppd->cpspec->chase_end)
+ ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
+ break;
+ default:
+ ppd->cpspec->chase_end = 0;
+ break;
+ }
+
+ if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+ (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
+ force_h1(ppd);
+ ppd->cpspec->qdr_reforce = 1;
+ } else if (ppd->cpspec->qdr_reforce &&
+ (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
+ (ibclt == IB_7322_LT_STATE_CFGENH ||
+ ibclt == IB_7322_LT_STATE_CFGIDLE ||
+ ibclt == IB_7322_LT_STATE_LINKUP))
+ force_h1(ppd);
+
+ if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
+ ppd->link_speed_enabled == QIB_IB_QDR &&
+ (ibclt == IB_7322_LT_STATE_CFGTEST ||
+ ibclt == IB_7322_LT_STATE_CFGENH ||
+ (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
+ adj_tx_serdes(ppd);
+
+ if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
+ ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+ ppd->cpspec->qdr_dfe_on = 1;
+ ppd->cpspec->qdr_dfe_time = 0;
+ /* On link down, reenable QDR adaptation */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 :
+ QDR_STATIC_ADAPT_DOWN);
+ }
+}
+
+/*
+ * This is per-pport error handling.
+ * will likely get it's own MSIx interrupt (one for each port,
+ * although just a single handler).
+ */
+static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
+{
+ char *msg;
+ u64 ignore_this_time = 0, iserr = 0, errs, fmask;
+ struct qib_devdata *dd = ppd->dd;
+
+ /* do this as soon as possible */
+ fmask = qib_read_kreg64(dd, kr_act_fmask);
+ if (!fmask)
+ check_7322_rxe_status(ppd);
+
+ errs = qib_read_kreg_port(ppd, krp_errstatus);
+ if (!errs)
+ qib_devinfo(dd->pcidev,
+ "Port%d error interrupt, but no error bits set!\n",
+ ppd->port);
+ if (!fmask)
+ errs &= ~QIB_E_P_IBSTATUSCHANGED;
+ if (!errs)
+ goto done;
+
+ msg = ppd->cpspec->epmsgbuf;
+ *msg = '\0';
+
+ if (errs & ~QIB_E_P_BITSEXTANT) {
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+ errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
+ if (!*msg)
+ snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+ "no others");
+ qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
+ " errors 0x%016Lx set (and %s)\n",
+ (errs & ~QIB_E_P_BITSEXTANT), msg);
+ *msg = '\0';
+ }
+
+ if (errs & QIB_E_P_SHDR) {
+ u64 symptom;
+
+ /* determine cause, then write to clear */
+ symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
+ qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+ hdrchk_msgs);
+ *msg = '\0';
+ /* senderrbuf cleared in SPKTERRS below */
+ }
+
+ if (errs & QIB_E_P_SPKTERRS) {
+ if ((errs & QIB_E_P_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when trying to bring the link
+ * up, but the IB link changes state at the "wrong"
+ * time. The IB logic then complains that the packet
+ * isn't valid. We don't want to confuse people, so
+ * we just don't print them, except at debug
+ */
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+ (errs & QIB_E_P_LINK_PKTERRS),
+ qib_7322p_error_msgs);
+ *msg = '\0';
+ ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+ }
+ qib_disarm_7322_senderrbufs(ppd);
+ } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
+ !(ppd->lflags & QIBL_LINKACTIVE)) {
+ /*
+ * This can happen when SMA is trying to bring the link
+ * up, but the IB link changes state at the "wrong" time.
+ * The IB logic then complains that the packet isn't
+ * valid. We don't want to confuse people, so we just
+ * don't print them, except at debug
+ */
+ err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+ qib_7322p_error_msgs);
+ ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
+ *msg = '\0';
+ }
+
+ qib_write_kreg_port(ppd, krp_errclear, errs);
+
+ errs &= ~ignore_this_time;
+ if (!errs)
+ goto done;
+
+ if (errs & QIB_E_P_RPKTERRS)
+ qib_stats.sps_rcverrs++;
+ if (errs & QIB_E_P_SPKTERRS)
+ qib_stats.sps_txerrs++;
+
+ iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
+
+ if (errs & QIB_E_P_SDMAERRS)
+ sdma_7322_p_errors(ppd, errs);
+
+ if (errs & QIB_E_P_IBSTATUSCHANGED) {
+ u64 ibcs;
+ u8 ltstate;
+
+ ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+ ltstate = qib_7322_phys_portstate(ibcs);
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ handle_serdes_issues(ppd, ibcs);
+ if (!(ppd->cpspec->ibcctrl_a &
+ SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
+ /*
+ * We got our interrupt, so init code should be
+ * happy and not try alternatives. Now squelch
+ * other "chatter" from link-negotiation (pre Init)
+ */
+ ppd->cpspec->ibcctrl_a |=
+ SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ }
+
+ /* Update our picture of width and speed from chip */
+ ppd->link_width_active =
+ (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
+ IB_WIDTH_4X : IB_WIDTH_1X;
+ ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
+ LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
+ SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
+ QIB_IB_DDR : QIB_IB_SDR;
+
+ if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
+ IB_PHYSPORTSTATE_DISABLED)
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ else
+ /*
+ * Since going into a recovery state causes the link
+ * state to go down and since recovery is transitory,
+ * it is better if we "miss" ever seeing the link
+ * training state go into recovery (i.e., ignore this
+ * transition for link state special handling purposes)
+ * without updating lastibcstat.
+ */
+ if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+ ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+ qib_handle_e_ibstatuschanged(ppd, ibcs);
+ }
+ if (*msg && iserr)
+ qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
+
+ if (ppd->state_wanted & ppd->lflags)
+ wake_up_interruptible(&ppd->state_wait);
+done:
+ return;
+}
+
+/* enable/disable chip from delivering interrupts */
+static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ if (dd->flags & QIB_BADINTR)
+ return;
+ qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
+ /* cause any pending enabled interrupts to be re-delivered */
+ qib_write_kreg(dd, kr_intclear, 0ULL);
+ if (dd->cspec->num_msix_entries) {
+ /* and same for MSIx */
+ u64 val = qib_read_kreg64(dd, kr_intgranted);
+ if (val)
+ qib_write_kreg(dd, kr_intgranted, val);
+ }
+ } else
+ qib_write_kreg(dd, kr_intmask, 0ULL);
+}
+
+/*
+ * Try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ * This is in chip-specific code because of all of the register accesses,
+ * even though the details are similar on most chips.
+ */
+static void qib_7322_clear_freeze(struct qib_devdata *dd)
+{
+ int pidx;
+
+ /* disable error interrupts, to avoid confusion */
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+ 0ULL);
+
+ /* also disable interrupts; errormask is sometimes overwriten */
+ qib_7322_set_intr_state(dd, 0);
+
+ /* clear the freeze, and be sure chip saw it */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+
+ /*
+ * Force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ qib_write_kreg(dd, kr_hwerrclear, 0ULL);
+ qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+ /* We need to purge per-port errs and reset mask, too */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ if (!dd->pport[pidx].link_speed_supported)
+ continue;
+ qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
+ }
+ qib_7322_set_intr_state(dd, 1);
+}
+
+/* no error handling to speak of */
+/**
+ * qib_7322_handle_hwerrors - display hardware errors.
+ * @dd: the qlogic_ib device
+ * @msg: the output buffer
+ * @msgl: the size of the output buffer
+ *
+ * Use same msg buffer as regular errors to avoid excessive stack
+ * use. Most hardware errors are catastrophic, but for right now,
+ * we'll print them and continue. We reuse the same message buffer as
+ * qib_handle_errors() to avoid excessive stack usage.
+ */
+static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
+ size_t msgl)
+{
+ u64 hwerrs;
+ u32 ctrl;
+ int isfatal = 0;
+
+ hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (!hwerrs)
+ goto bail;
+ if (hwerrs == ~0ULL) {
+ qib_dev_err(dd, "Read of hardware error status failed "
+ "(all bits set); ignoring\n");
+ goto bail;
+ }
+ qib_stats.sps_hwerrs++;
+
+ /* Always clear the error status register, except BIST fail */
+ qib_write_kreg(dd, kr_hwerrclear, hwerrs &
+ ~HWE_MASK(PowerOnBISTFailed));
+
+ hwerrs &= dd->cspec->hwerrmask;
+
+ /* no EEPROM logging, yet */
+
+ if (hwerrs)
+ qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
+ "(cleared)\n", (unsigned long long) hwerrs);
+
+ ctrl = qib_read_kreg32(dd, kr_control);
+ if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
+ /*
+ * No recovery yet...
+ */
+ if ((hwerrs & ~HWE_MASK(LATriggered)) ||
+ dd->cspec->stay_in_freeze) {
+ /*
+ * If any set that we aren't ignoring only make the
+ * complaint once, in case it's stuck or recurring,
+ * and we get here multiple times
+ * Force link down, so switch knows, and
+ * LEDs are turned off.
+ */
+ if (dd->flags & QIB_INITTED)
+ isfatal = 1;
+ } else
+ qib_7322_clear_freeze(dd);
+ }
+
+ if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
+ isfatal = 1;
+ strlcpy(msg, "[Memory BIST test failed, "
+ "InfiniPath hardware unusable]", msgl);
+ /* ignore from now on, so disable until driver reloaded */
+ dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+ }
+
+ err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
+
+ /* Ignore esoteric PLL failures et al. */
+
+ qib_dev_err(dd, "%s hardware error\n", msg);
+
+ if (isfatal && !dd->diag_client) {
+ qib_dev_err(dd, "Fatal Hardware Error, no longer"
+ " usable, SN %.16s\n", dd->serial);
+ /*
+ * for /sys status file and user programs to print; if no
+ * trailing brace is copied, we'll know it was truncated.
+ */
+ if (dd->freezemsg)
+ snprintf(dd->freezemsg, dd->freezelen,
+ "{%s}", msg);
+ qib_disable_after_error(dd);
+ }
+bail:;
+}
+
+/**
+ * qib_7322_init_hwerrors - enable hardware errors
+ * @dd: the qlogic_ib device
+ *
+ * now that we have finished initializing everything that might reasonably
+ * cause a hardware error, and cleared those errors bits as they occur,
+ * we can enable hardware errors in the mask (potentially enabling
+ * freeze mode), and enable hardware errors as errors (along with
+ * everything else) in errormask
+ */
+static void qib_7322_init_hwerrors(struct qib_devdata *dd)
+{
+ int pidx;
+ u64 extsval;
+
+ extsval = qib_read_kreg64(dd, kr_extstatus);
+ if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
+ QIB_EXTS_MEMBIST_ENDTEST)))
+ qib_dev_err(dd, "MemBIST did not complete!\n");
+
+ /* never clear BIST failure, so reported on each driver load */
+ qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
+ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
+
+ /* clear all */
+ qib_write_kreg(dd, kr_errclear, ~0ULL);
+ /* enable errors that are masked, at least this first time. */
+ qib_write_kreg(dd, kr_errmask, ~0ULL);
+ dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ qib_write_kreg_port(dd->pport + pidx, krp_errmask,
+ ~0ULL);
+}
+
+/*
+ * Disable and enable the armlaunch error. Used for PIO bandwidth testing
+ * on chips that are count-based, rather than trigger-based. There is no
+ * reference counting, but that's also fine, given the intended use.
+ * Only chip-specific because it's all register accesses
+ */
+static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
+{
+ if (enable) {
+ qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
+ dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
+ } else
+ dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
+/*
+ * Formerly took parameter <which> in pre-shifted,
+ * pre-merged form with LinkCmd and LinkInitCmd
+ * together, and assuming the zero was NOP.
+ */
+static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
+ u16 linitcmd)
+{
+ u64 mod_wd;
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
+ /*
+ * If we are told to disable, note that so link-recovery
+ * code does not attempt to bring us back up.
+ * Also reset everything that we can, so we start
+ * completely clean when re-enabled (before we
+ * actually issue the disable to the IBC)
+ */
+ qib_7322_mini_pcs_reset(ppd);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
+ /*
+ * Any other linkinitcmd will lead to LINKDOWN and then
+ * to INIT (if all is well), so clear flag to let
+ * link-recovery code attempt to bring us back up.
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ /*
+ * Clear status change interrupt reduction so the
+ * new state is seen.
+ */
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ }
+
+ mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
+ (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
+ mod_wd);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+}
+
+/*
+ * The total RCV buffer memory is 64KB, used for both ports, and is
+ * in units of 64 bytes (same as IB flow control credit unit).
+ * The consumedVL unit in the same registers are in 32 byte units!
+ * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
+ * and we can therefore allocate just 9 IB credits for 2 VL15 packets
+ * in krp_rxcreditvl15, rather than 10.
+ */
+#define RCV_BUF_UNITSZ 64
+#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
+
+static void set_vls(struct qib_pportdata *ppd)
+{
+ int i, numvls, totcred, cred_vl, vl0extra;
+ struct qib_devdata *dd = ppd->dd;
+ u64 val;
+
+ numvls = qib_num_vls(ppd->vls_operational);
+
+ /*
+ * Set up per-VL credits. Below is kluge based on these assumptions:
+ * 1) port is disabled at the time early_init is called.
+ * 2) give VL15 17 credits, for two max-plausible packets.
+ * 3) Give VL0-N the rest, with any rounding excess used for VL0
+ */
+ /* 2 VL15 packets @ 288 bytes each (including IB headers) */
+ totcred = NUM_RCV_BUF_UNITS(dd);
+ cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
+ totcred -= cred_vl;
+ qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
+ cred_vl = totcred / numvls;
+ vl0extra = totcred - cred_vl * numvls;
+ qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
+ for (i = 1; i < numvls; i++)
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
+ for (; i < 8; i++) /* no buffer space for other VLs */
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+
+ /* Notify IBC that credits need to be recalculated */
+ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+ val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+ for (i = 0; i < numvls; i++)
+ val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
+ val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
+
+ /* Change the number of operational VLs */
+ ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
+ ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
+ ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * The code that deals with actual SerDes is in serdes_7322_init().
+ * Compared to the code for iba7220, it is minimal.
+ */
+static int serdes_7322_init(struct qib_pportdata *ppd);
+
+/**
+ * qib_7322_bringup_serdes - bring up the serdes
+ * @ppd: physical port on the qlogic_ib device
+ */
+static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 val, guid, ibc;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * SerDes model not in Pd, but still need to
+ * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
+ * eventually.
+ */
+ /* Put IBC in reset, sends disabled (should be in reset already) */
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+
+ /* flowcontrolwatermark is in units of KBytes */
+ ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
+ /*
+ * Flow control is sent this often, even if no changes in
+ * buffer space occur. Units are 128ns for this chip.
+ * Set to 3usec.
+ */
+ ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
+ /* max error tolerance */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+ /* IB credit flow control. */
+ ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+ /*
+ * set initial max size pkt IBC will send, including ICRC; it's the
+ * PIO buffer size in dwords, less 1; also see qib_set_mtu()
+ */
+ ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
+ SYM_LSB(IBCCtrlA_0, MaxPktLen);
+ ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
+
+ /* initially come up waiting for TS1, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+
+ /*
+ * Reset the PCS interface to the serdes (and also ibc, which is still
+ * in reset from above). Writes new value of ibcctrl_a as last step.
+ */
+ qib_7322_mini_pcs_reset(ppd);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
+ if (!ppd->cpspec->ibcctrl_b) {
+ unsigned lse = ppd->link_speed_enabled;
+
+ /*
+ * Not on re-init after reset, establish shadow
+ * and force initial config.
+ */
+ ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
+ krp_ibcctrl_b);
+ ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
+ IBA7322_IBC_SPEED_DDR |
+ IBA7322_IBC_SPEED_SDR |
+ IBA7322_IBC_WIDTH_AUTONEG |
+ SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
+ if (lse & (lse - 1)) /* Muliple speeds enabled */
+ ppd->cpspec->ibcctrl_b |=
+ (lse << IBA7322_IBC_SPEED_LSB) |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ else
+ ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
+ IBA7322_IBC_SPEED_QDR |
+ IBA7322_IBC_IBTA_1_2_MASK :
+ (lse == QIB_IB_DDR) ?
+ IBA7322_IBC_SPEED_DDR :
+ IBA7322_IBC_SPEED_SDR;
+ if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
+ (IB_WIDTH_1X | IB_WIDTH_4X))
+ ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
+ else
+ ppd->cpspec->ibcctrl_b |=
+ ppd->link_width_enabled == IB_WIDTH_4X ?
+ IBA7322_IBC_WIDTH_4X_ONLY :
+ IBA7322_IBC_WIDTH_1X_ONLY;
+
+ /* always enable these on driver reload, not sticky */
+ ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
+ IBA7322_IBC_HRTBT_MASK);
+ }
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+
+ /* setup so we have more time at CFGTEST to change H1 */
+ val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
+ val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
+ val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
+ qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
+
+ serdes_7322_init(ppd);
+
+ guid = be64_to_cpu(ppd->guid);
+ if (!guid) {
+ if (dd->base_guid)
+ guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
+ ppd->guid = cpu_to_be64(guid);
+ }
+
+ qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
+ /* write to chip to prevent back-to-back writes of ibc reg */
+ qib_write_kreg(dd, kr_scratch, 0);
+
+ /* Enable port */
+ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ set_vls(ppd);
+
+ /* be paranoid against later code motion, etc. */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
+ qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* Also enable IBSTATUSCHG interrupt. */
+ val = qib_read_kreg_port(ppd, krp_errmask);
+ qib_write_kreg_port(ppd, krp_errmask,
+ val | ERR_MASK_N(IBStatusChanged));
+
+ /* Always zero until we start messing with SerDes for real */
+ return ret;
+}
+
+/**
+ * qib_7322_quiet_serdes - set serdes to txidle
+ * @dd: the qlogic_ib device
+ * Called when driver is being unloaded
+ */
+static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
+{
+ u64 val;
+ unsigned long flags;
+
+ qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ cancel_delayed_work(&ppd->cpspec->autoneg_work);
+ if (ppd->dd->cspec->r1)
+ cancel_delayed_work(&ppd->cpspec->ipg_work);
+ flush_scheduled_work();
+
+ ppd->cpspec->chase_end = 0;
+ if (ppd->cpspec->chase_timer.data) /* if initted */
+ del_timer_sync(&ppd->cpspec->chase_timer);
+
+ /*
+ * Despite the name, actually disables IBC as well. Do it when
+ * we are as sure as possible that no more packets can be
+ * received, following the down and the PCS reset.
+ * The actual disabling happens in qib_7322_mini_pci_reset(),
+ * along with the PCS being reset.
+ */
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
+ qib_7322_mini_pcs_reset(ppd);
+
+ /*
+ * Update the adjusted counters so the adjustment persists
+ * across driver reload.
+ */
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
+ ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
+ struct qib_devdata *dd = ppd->dd;
+ u64 diagc;
+
+ /* enable counter writes */
+ diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
+ qib_write_kreg(dd, kr_hwdiagctrl,
+ diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
+
+ if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->ibsymsnap;
+ val -= ppd->cpspec->ibsymdelta;
+ write_7322_creg_port(ppd, crp_ibsymbolerr, val);
+ }
+ if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
+ val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
+ if (ppd->cpspec->ibdeltainprog)
+ val -= val - ppd->cpspec->iblnkerrsnap;
+ val -= ppd->cpspec->iblnkerrdelta;
+ write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
+ }
+ if (ppd->cpspec->iblnkdowndelta) {
+ val = read_7322_creg32_port(ppd, crp_iblinkdown);
+ val += ppd->cpspec->iblnkdowndelta;
+ write_7322_creg_port(ppd, crp_iblinkdown, val);
+ }
+ /*
+ * No need to save ibmalfdelta since IB perfcounters
+ * are cleared on driver reload.
+ */
+
+ /* and disable counter writes */
+ qib_write_kreg(dd, kr_hwdiagctrl, diagc);
+ }
+}
+
+/**
+ * qib_setup_7322_setextled - set the state of the two external LEDs
+ * @ppd: physical port on the qlogic_ib device
+ * @on: whether the link is up or not
+ *
+ * The exact combo of LEDs if on is true is determined by looking
+ * at the ibcstatus.
+ *
+ * These LEDs indicate the physical and logical state of IB link.
+ * For this chip (at least with recommended board pinouts), LED1
+ * is Yellow (logical state) and LED2 is Green (physical state),
+ *
+ * Note: We try to match the Mellanox HCA LED behavior as best
+ * we can. Green indicates physical link state is OK (something is
+ * plugged in, and we can train).
+ * Amber indicates the link is logically up (ACTIVE).
+ * Mellanox further blinks the amber LED to indicate data packet
+ * activity, but we have no hardware support for that, so it would
+ * require waking up every 10-20 msecs and checking the counters
+ * on the chip, and then turning the LED off if appropriate. That's
+ * visible overhead, so not something we will do.
+ */
+static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 extctl, ledblink = 0, val;
+ unsigned long flags;
+ int yel, grn;
+
+ /*
+ * The diags use the LED to indicate diag info, so we leave
+ * the external LED alone when the diags are running.
+ */
+ if (dd->diag_client)
+ return;
+
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (ppd->led_override) {
+ grn = (ppd->led_override & QIB_LED_PHYS);
+ yel = (ppd->led_override & QIB_LED_LOG);
+ } else if (on) {
+ val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
+ grn = qib_7322_phys_portstate(val) ==
+ IB_PHYSPORTSTATE_LINKUP;
+ yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
+ } else {
+ grn = 0;
+ yel = 0;
+ }
+
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ extctl = dd->cspec->extctrl & (ppd->port == 1 ?
+ ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
+ if (grn) {
+ extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
+ /*
+ * Counts are in chip clock (4ns) periods.
+ * This is 1/16 sec (66.6ms) on,
+ * 3/16 sec (187.5 ms) off, with packets rcvd.
+ */
+ ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
+ ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
+ }
+ if (yel)
+ extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
+ dd->cspec->extctrl = extctl;
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+
+ if (ledblink) /* blink the LED on packet receive */
+ qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
+}
+
+/*
+ * Disable MSIx interrupt if enabled, call generic MSIx code
+ * to cleanup, and clear pending MSIx interrupts.
+ * Used for fallback to INTx, after reset, and when MSIx setup fails.
+ */
+static void qib_7322_nomsix(struct qib_devdata *dd)
+{
+ u64 intgranted;
+ int n;
+
+ dd->cspec->main_int_mask = ~0ULL;
+ n = dd->cspec->num_msix_entries;
+ if (n) {
+ int i;
+
+ dd->cspec->num_msix_entries = 0;
+ for (i = 0; i < n; i++)
+ free_irq(dd->cspec->msix_entries[i].vector,
+ dd->cspec->msix_arg[i]);
+ qib_nomsix(dd);
+ }
+ /* make sure no MSIx interrupts are left pending */
+ intgranted = qib_read_kreg64(dd, kr_intgranted);
+ if (intgranted)
+ qib_write_kreg(dd, kr_intgranted, intgranted);
+}
+
+static void qib_7322_free_irq(struct qib_devdata *dd)
+{
+ if (dd->cspec->irq) {
+ free_irq(dd->cspec->irq, dd);
+ dd->cspec->irq = 0;
+ }
+ qib_7322_nomsix(dd);
+}
+
+static void qib_setup_7322_cleanup(struct qib_devdata *dd)
+{
+ int i;
+
+ qib_7322_free_irq(dd);
+ kfree(dd->cspec->cntrs);
+ kfree(dd->cspec->sendchkenable);
+ kfree(dd->cspec->sendgrhchk);
+ kfree(dd->cspec->sendibchk);
+ kfree(dd->cspec->msix_entries);
+ kfree(dd->cspec->msix_arg);
+ for (i = 0; i < dd->num_pports; i++) {
+ unsigned long flags;
+ u32 mask = QSFP_GPIO_MOD_PRS_N |
+ (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
+
+ kfree(dd->pport[i].cpspec->portcntrs);
+ if (dd->flags & QIB_HAS_QSFP) {
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->gpio_mask &= ~mask;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
+ }
+ if (dd->pport[i].ibport_data.smi_ah)
+ ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
+ }
+}
+
+/* handle SDMA interrupts */
+static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+ struct qib_pportdata *ppd0 = &dd->pport[0];
+ struct qib_pportdata *ppd1 = &dd->pport[1];
+ u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
+ INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
+ u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
+ INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
+
+ if (intr0)
+ qib_sdma_intr(ppd0);
+ if (intr1)
+ qib_sdma_intr(ppd1);
+
+ if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
+ qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
+ if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
+ qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
+}
+
+/*
+ * Set or clear the Send buffer available interrupt enable bit.
+ */
+static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (needint)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
+ else
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+}
+
+/*
+ * Somehow got an interrupt with reserved bits set in interrupt status.
+ * Print a message so we know it happened, then clear them.
+ * keep mainline interrupt handler cache-friendly
+ */
+static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
+{
+ u64 kills;
+ char msg[128];
+
+ kills = istat & ~QIB_I_BITSEXTANT;
+ qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
+ " %s\n", (unsigned long long) kills, msg);
+ qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
+}
+
+/* keep mainline interrupt handler cache-friendly */
+static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
+{
+ u32 gpiostatus;
+ int handled = 0;
+ int pidx;
+
+ /*
+ * Boards for this chip currently don't use GPIO interrupts,
+ * so clear by writing GPIOstatus to GPIOclear, and complain
+ * to developer. To avoid endless repeats, clear
+ * the bits in the mask, since there is some kind of
+ * programming error or chip problem.
+ */
+ gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
+ /*
+ * In theory, writing GPIOstatus to GPIOclear could
+ * have a bad side-effect on some diagnostic that wanted
+ * to poll for a status-change, but the various shadows
+ * make that problematic at best. Diags will just suppress
+ * all GPIO interrupts during such tests.
+ */
+ qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
+ /*
+ * Check for QSFP MOD_PRS changes
+ * only works for single port if IB1 != pidx1
+ */
+ for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
+ ++pidx) {
+ struct qib_pportdata *ppd;
+ struct qib_qsfp_data *qd;
+ u32 mask;
+ if (!dd->pport[pidx].link_speed_supported)
+ continue;
+ mask = QSFP_GPIO_MOD_PRS_N;
+ ppd = dd->pport + pidx;
+ mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+ if (gpiostatus & dd->cspec->gpio_mask & mask) {
+ u64 pins;
+ qd = &ppd->cpspec->qsfp_data;
+ gpiostatus &= ~mask;
+ pins = qib_read_kreg64(dd, kr_extstatus);
+ pins >>= SYM_LSB(EXTStatus, GPIOIn);
+ if (!(pins & mask)) {
+ ++handled;
+ qd->t_insert = get_jiffies_64();
+ schedule_work(&qd->work);
+ }
+ }
+ }
+
+ if (gpiostatus && !handled) {
+ const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
+ u32 gpio_irq = mask & gpiostatus;
+
+ /*
+ * Clear any troublemakers, and update chip from shadow
+ */
+ dd->cspec->gpio_mask &= ~gpio_irq;
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ }
+}
+
+/*
+ * Handle errors and unusual events first, separate function
+ * to improve cache hits for fast path interrupt handling.
+ */
+static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
+{
+ if (istat & ~QIB_I_BITSEXTANT)
+ unknown_7322_ibits(dd, istat);
+ if (istat & QIB_I_GPIO)
+ unknown_7322_gpio_intr(dd);
+ if (istat & QIB_I_C_ERROR)
+ handle_7322_errors(dd);
+ if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
+ handle_7322_p_errors(dd->rcd[0]->ppd);
+ if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
+ handle_7322_p_errors(dd->rcd[1]->ppd);
+}
+
+/*
+ * Dynamically adjust the rcv int timeout for a context based on incoming
+ * packet rate.
+ */
+static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
+{
+ struct qib_devdata *dd = rcd->dd;
+ u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
+
+ /*
+ * Dynamically adjust idle timeout on chip
+ * based on number of packets processed.
+ */
+ if (npkts < rcv_int_count && timeout > 2)
+ timeout >>= 1;
+ else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
+ timeout = min(timeout << 1, rcv_int_timeout);
+ else
+ return;
+
+ dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
+ qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
+}
+
+/*
+ * This is the main interrupt handler.
+ * It will normally only be used for low frequency interrupts but may
+ * have to handle all interrupts if INTx is enabled or fewer than normal
+ * MSIx interrupts were allocated.
+ * This routine should ignore the interrupt bits for any of the
+ * dedicated MSIx handlers.
+ */
+static irqreturn_t qib_7322intr(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+ irqreturn_t ret;
+ u64 istat;
+ u64 ctxtrbits;
+ u64 rmask;
+ unsigned i;
+ u32 npkts;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ ret = IRQ_HANDLED;
+ goto bail;
+ }
+
+ istat = qib_read_kreg64(dd, kr_intstatus);
+
+ if (unlikely(istat == ~0ULL)) {
+ qib_bad_intrstatus(dd);
+ qib_dev_err(dd, "Interrupt status all f's, skipping\n");
+ /* don't know if it was our interrupt or not */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ istat &= dd->cspec->main_int_mask;
+ if (unlikely(!istat)) {
+ /* already handled, or shared and not us */
+ ret = IRQ_NONE;
+ goto bail;
+ }
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* handle "errors" of various kinds first, device ahead of port */
+ if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
+ QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
+ INT_MASK_P(Err, 1))))
+ unlikely_7322_intr(dd, istat);
+
+ /*
+ * Clear the interrupt bits we found set, relatively early, so we
+ * "know" know the chip will have seen this by the time we process
+ * the queue, and will re-interrupt if necessary. The processor
+ * itself won't take the interrupt again until we return.
+ */
+ qib_write_kreg(dd, kr_intclear, istat);
+
+ /*
+ * Handle kernel receive queues before checking for pio buffers
+ * available since receives can overflow; piobuf waiters can afford
+ * a few extra cycles, since they were waiting anyway.
+ */
+ ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
+ if (ctxtrbits) {
+ rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
+ (1ULL << QIB_I_RCVURG_LSB);
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ if (ctxtrbits & rmask) {
+ ctxtrbits &= ~rmask;
+ if (dd->rcd[i]) {
+ qib_kreceive(dd->rcd[i], NULL, &npkts);
+ adjust_rcv_timeout(dd->rcd[i], npkts);
+ }
+ }
+ rmask <<= 1;
+ }
+ if (ctxtrbits) {
+ ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
+ (ctxtrbits >> QIB_I_RCVURG_LSB);
+ qib_handle_urcv(dd, ctxtrbits);
+ }
+ }
+
+ if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
+ sdma_7322_intr(dd, istat);
+
+ if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
+ qib_ib_piobufavail(dd);
+
+ ret = IRQ_HANDLED;
+bail:
+ return ret;
+}
+
+/*
+ * Dedicated receive packet available interrupt handler.
+ */
+static irqreturn_t qib_7322pintr(int irq, void *data)
+{
+ struct qib_ctxtdata *rcd = data;
+ struct qib_devdata *dd = rcd->dd;
+ u32 npkts;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
+ (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
+
+ qib_kreceive(rcd, NULL, &npkts);
+ adjust_rcv_timeout(rcd, npkts);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send buffer available interrupt handler.
+ */
+static irqreturn_t qib_7322bufavail(int irq, void *data)
+{
+ struct qib_devdata *dd = data;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
+
+ /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
+ if (dd->flags & QIB_INITTED)
+ qib_ib_piobufavail(dd);
+ else
+ qib_wantpiobuf_7322_intr(dd, 0);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA interrupt handler.
+ */
+static irqreturn_t sdma_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA idle interrupt handler.
+ */
+static irqreturn_t sdma_idle_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA progress interrupt handler.
+ */
+static irqreturn_t sdma_progress_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_P(SDmaProgress, 1) :
+ INT_MASK_P(SDmaProgress, 0));
+ qib_sdma_intr(ppd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Dedicated Send DMA cleanup interrupt handler.
+ */
+static irqreturn_t sdma_cleanup_intr(int irq, void *data)
+{
+ struct qib_pportdata *ppd = data;
+ struct qib_devdata *dd = ppd->dd;
+
+ if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
+ /*
+ * This return value is not great, but we do not want the
+ * interrupt core code to remove our interrupt handler
+ * because we don't appear to be handling an interrupt
+ * during a chip reset.
+ */
+ return IRQ_HANDLED;
+
+ qib_stats.sps_ints++;
+ if (dd->int_counter != (u32) -1)
+ dd->int_counter++;
+
+ /* Clear the interrupt bit we expect to be set. */
+ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
+ INT_MASK_PM(SDmaCleanupDone, 1) :
+ INT_MASK_PM(SDmaCleanupDone, 0));
+ qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set up our chip-specific interrupt handler.
+ * The interrupt type has already been setup, so
+ * we just need to do the registration and error checking.
+ * If we are using MSIx interrupts, we may fall back to
+ * INTx later, if the interrupt handler doesn't get called
+ * within 1/2 second (see verify_interrupt()).
+ */
+static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
+{
+ int ret, i, msixnum;
+ u64 redirect[6];
+ u64 mask;
+
+ if (!dd->num_pports)
+ return;
+
+ if (clearpend) {
+ /*
+ * if not switching interrupt types, be sure interrupts are
+ * disabled, and then clear anything pending at this point,
+ * because we are starting clean.
+ */
+ qib_7322_set_intr_state(dd, 0);
+
+ /* clear the reset error, init error/hwerror mask */
+ qib_7322_init_hwerrors(dd);
+
+ /* clear any interrupt bits that might be set */
+ qib_write_kreg(dd, kr_intclear, ~0ULL);
+
+ /* make sure no pending MSIx intr, and clear diag reg */
+ qib_write_kreg(dd, kr_intgranted, ~0ULL);
+ qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
+ }
+
+ if (!dd->cspec->num_msix_entries) {
+ /* Try to get INTx interrupt */
+try_intx:
+ if (!dd->pcidev->irq) {
+ qib_dev_err(dd, "irq is 0, BIOS error? "
+ "Interrupts won't work\n");
+ goto bail;
+ }
+ ret = request_irq(dd->pcidev->irq, qib_7322intr,
+ IRQF_SHARED, QIB_DRV_NAME, dd);
+ if (ret) {
+ qib_dev_err(dd, "Couldn't setup INTx "
+ "interrupt (irq=%d): %d\n",
+ dd->pcidev->irq, ret);
+ goto bail;
+ }
+ dd->cspec->irq = dd->pcidev->irq;
+ dd->cspec->main_int_mask = ~0ULL;
+ goto bail;
+ }
+
+ /* Try to get MSIx interrupts */
+ memset(redirect, 0, sizeof redirect);
+ mask = ~0ULL;
+ msixnum = 0;
+ for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
+ irq_handler_t handler;
+ const char *name;
+ void *arg;
+ u64 val;
+ int lsb, reg, sh;
+
+ if (i < ARRAY_SIZE(irq_table)) {
+ if (irq_table[i].port) {
+ /* skip if for a non-configured port */
+ if (irq_table[i].port > dd->num_pports)
+ continue;
+ arg = dd->pport + irq_table[i].port - 1;
+ } else
+ arg = dd;
+ lsb = irq_table[i].lsb;
+ handler = irq_table[i].handler;
+ name = irq_table[i].name;
+ } else {
+ unsigned ctxt;
+
+ ctxt = i - ARRAY_SIZE(irq_table);
+ /* per krcvq context receive interrupt */
+ arg = dd->rcd[ctxt];
+ if (!arg)
+ continue;
+ lsb = QIB_I_RCVAVAIL_LSB + ctxt;
+ handler = qib_7322pintr;
+ name = QIB_DRV_NAME " (kctx)";
+ }
+ ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
+ handler, 0, name, arg);
+ if (ret) {
+ /*
+ * Shouldn't happen since the enable said we could
+ * have as many as we are trying to setup here.
+ */
+ qib_dev_err(dd, "Couldn't setup MSIx "
+ "interrupt (vec=%d, irq=%d): %d\n", msixnum,
+ dd->cspec->msix_entries[msixnum].vector,
+ ret);
+ qib_7322_nomsix(dd);
+ goto try_intx;
+ }
+ dd->cspec->msix_arg[msixnum] = arg;
+ if (lsb >= 0) {
+ reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
+ sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
+ SYM_LSB(IntRedirect0, vec1);
+ mask &= ~(1ULL << lsb);
+ redirect[reg] |= ((u64) msixnum) << sh;
+ }
+ val = qib_read_kreg64(dd, 2 * msixnum + 1 +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ msixnum++;
+ }
+ /* Initialize the vector mapping */
+ for (i = 0; i < ARRAY_SIZE(redirect); i++)
+ qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
+ dd->cspec->main_int_mask = mask;
+bail:;
+}
+
+/**
+ * qib_7322_boardname - fill in the board name and note features
+ * @dd: the qlogic_ib device
+ *
+ * info will be based on the board revision register
+ */
+static unsigned qib_7322_boardname(struct qib_devdata *dd)
+{
+ /* Will need enumeration of board-types here */
+ char *n;
+ u32 boardid, namelen;
+ unsigned features = DUAL_PORT_CAP;
+
+ boardid = SYM_FIELD(dd->revision, Revision, BoardID);
+
+ switch (boardid) {
+ case 0:
+ n = "InfiniPath_QLE7342_Emulation";
+ break;
+ case 1:
+ n = "InfiniPath_QLE7340";
+ dd->flags |= QIB_HAS_QSFP;
+ features = PORT_SPD_CAP;
+ break;
+ case 2:
+ n = "InfiniPath_QLE7342";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
+ case 3:
+ n = "InfiniPath_QMI7342";
+ break;
+ case 4:
+ n = "InfiniPath_Unsupported7342";
+ qib_dev_err(dd, "Unsupported version of QMH7342\n");
+ features = 0;
+ break;
+ case BOARD_QMH7342:
+ n = "InfiniPath_QMH7342";
+ features = 0x24;
+ break;
+ case BOARD_QME7342:
+ n = "InfiniPath_QME7342";
+ break;
+ case 15:
+ n = "InfiniPath_QLE7342_TEST";
+ dd->flags |= QIB_HAS_QSFP;
+ break;
+ default:
+ n = "InfiniPath_QLE73xy_UNKNOWN";
+ qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
+ break;
+ }
+ dd->board_atten = 1; /* index into txdds_Xdr */
+
+ namelen = strlen(n) + 1;
+ dd->boardname = kmalloc(namelen, GFP_KERNEL);
+ if (!dd->boardname)
+ qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
+ else
+ snprintf(dd->boardname, namelen, "%s", n);
+
+ snprintf(dd->boardversion, sizeof(dd->boardversion),
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
+ QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ dd->majrev, dd->minrev,
+ (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+
+ if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
+ qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
+ " by module parameter\n", dd->unit);
+ features &= PORT_SPD_CAP;
+ }
+
+ return features;
+}
+
+/*
+ * This routine sleeps, so it can only be called from user context, not
+ * from interrupt context.
+ */
+static int qib_do_7322_reset(struct qib_devdata *dd)
+{
+ u64 val;
+ u64 *msix_vecsave;
+ int i, msix_entries, ret = 1;
+ u16 cmdval;
+ u8 int_line, clinesz;
+ unsigned long flags;
+
+ /* Use dev_err so it shows up in logs, etc. */
+ qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
+
+ qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
+
+ msix_entries = dd->cspec->num_msix_entries;
+
+ /* no interrupts till re-initted */
+ qib_7322_set_intr_state(dd, 0);
+
+ if (msix_entries) {
+ qib_7322_nomsix(dd);
+ /* can be up to 512 bytes, too big for stack */
+ msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
+ sizeof(u64), GFP_KERNEL);
+ if (!msix_vecsave)
+ qib_dev_err(dd, "No mem to save MSIx data\n");
+ } else
+ msix_vecsave = NULL;
+
+ /*
+ * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
+ * info that is set up by the BIOS, so we have to save and restore
+ * it ourselves. There is some risk something could change it,
+ * after we save it, but since we have disabled the MSIx, it
+ * shouldn't be touched...
+ */
+ for (i = 0; i < msix_entries; i++) {
+ u64 vecaddr, vecdata;
+ vecaddr = qib_read_kreg64(dd, 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ vecdata = qib_read_kreg64(dd, 1 + 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ if (msix_vecsave) {
+ msix_vecsave[2 * i] = vecaddr;
+ /* save it without the masked bit set */
+ msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
+ }
+ }
+
+ dd->pport->cpspec->ibdeltainprog = 0;
+ dd->pport->cpspec->ibsymdelta = 0;
+ dd->pport->cpspec->iblnkerrdelta = 0;
+ dd->pport->cpspec->ibmalfdelta = 0;
+ dd->int_counter = 0; /* so we check interrupts work again */
+
+ /*
+ * Keep chip from being accessed until we are ready. Use
+ * writeq() directly, to allow the write even though QIB_PRESENT
+ * isnt' set.
+ */
+ dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
+ dd->flags |= QIB_DOING_RESET;
+ val = dd->control | QLOGIC_IB_C_RESET;
+ writeq(val, &dd->kregbase[kr_control]);
+
+ for (i = 1; i <= 5; i++) {
+ /*
+ * Allow MBIST, etc. to complete; longer on each retry.
+ * We sometimes get machine checks from bus timeout if no
+ * response, so for now, make it *really* long.
+ */
+ msleep(1000 + (1 + i) * 3000);
+
+ qib_pcie_reenable(dd, cmdval, int_line, clinesz);
+
+ /*
+ * Use readq directly, so we don't need to mark it as PRESENT
+ * until we get a successful indication that all is well.
+ */
+ val = readq(&dd->kregbase[kr_revision]);
+ if (val == dd->revision)
+ break;
+ if (i == 5) {
+ qib_dev_err(dd, "Failed to initialize after reset, "
+ "unusable\n");
+ ret = 0;
+ goto bail;
+ }
+ }
+
+ dd->flags |= QIB_PRESENT; /* it's back */
+
+ if (msix_entries) {
+ /* restore the MSIx vector address and data if saved above */
+ for (i = 0; i < msix_entries; i++) {
+ dd->cspec->msix_entries[i].entry = i;
+ if (!msix_vecsave || !msix_vecsave[2 * i])
+ continue;
+ qib_write_kreg(dd, 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)),
+ msix_vecsave[2 * i]);
+ qib_write_kreg(dd, 1 + 2 * i +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)),
+ msix_vecsave[1 + 2 * i]);
+ }
+ }
+
+ /* initialize the remaining registers. */
+ for (i = 0; i < dd->num_pports; ++i)
+ write_7322_init_portregs(&dd->pport[i]);
+ write_7322_initregs(dd);
+
+ if (qib_pcie_params(dd, dd->lbus_width,
+ &dd->cspec->num_msix_entries,
+ dd->cspec->msix_entries))
+ qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+
+ qib_setup_7322_interrupt(dd, 1);
+
+ for (i = 0; i < dd->num_pports; ++i) {
+ struct qib_pportdata *ppd = &dd->pport[i];
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+bail:
+ dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
+ kfree(msix_vecsave);
+ return ret;
+}
+
+/**
+ * qib_7322_put_tid - write a TID to the chip
+ * @dd: the qlogic_ib device
+ * @tidptr: pointer to the expected TID (in chip) to update
+ * @tidtype: 0 for eager, 1 for expected
+ * @pa: physical address of in memory buffer; tidinvalid if freeing
+ */
+static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
+ u32 type, unsigned long pa)
+{
+ if (!(dd->flags & QIB_PRESENT))
+ return;
+ if (pa != dd->tidinvalid) {
+ u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
+
+ /* paranoia checks */
+ if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
+ qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
+ pa);
+ return;
+ }
+ if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
+ qib_dev_err(dd, "Physical page address 0x%lx "
+ "larger than supported\n", pa);
+ return;
+ }
+
+ if (type == RCVHQ_RCV_TYPE_EAGER)
+ chippa |= dd->tidtemplate;
+ else /* for now, always full 4KB page */
+ chippa |= IBA7322_TID_SZ_4K;
+ pa = chippa;
+ }
+ writeq(pa, tidptr);
+ mmiowb();
+}
+
+/**
+ * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
+ * @dd: the qlogic_ib device
+ * @ctxt: the ctxt
+ *
+ * clear all TID entries for a ctxt, expected and eager.
+ * Used from qib_close().
+ */
+static void qib_7322_clear_tids(struct qib_devdata *dd,
+ struct qib_ctxtdata *rcd)
+{
+ u64 __iomem *tidbase;
+ unsigned long tidinv;
+ u32 ctxt;
+ int i;
+
+ if (!dd->kregbase || !rcd)
+ return;
+
+ ctxt = rcd->ctxt;
+
+ tidinv = dd->tidinvalid;
+ tidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ dd->rcvtidbase +
+ ctxt * dd->rcvtidcnt * sizeof(*tidbase));
+
+ for (i = 0; i < dd->rcvtidcnt; i++)
+ qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
+
+ tidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ dd->rcvegrbase +
+ rcd->rcvegr_tid_base * sizeof(*tidbase));
+
+ for (i = 0; i < rcd->rcvegrcnt; i++)
+ qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
+}
+
+/**
+ * qib_7322_tidtemplate - setup constants for TID updates
+ * @dd: the qlogic_ib device
+ *
+ * We setup stuff that we use a lot, to avoid calculating each time
+ */
+static void qib_7322_tidtemplate(struct qib_devdata *dd)
+{
+ /*
+ * For now, we always allocate 4KB buffers (at init) so we can
+ * receive max size packets. We may want a module parameter to
+ * specify 2KB or 4KB and/or make it per port instead of per device
+ * for those who want to reduce memory footprint. Note that the
+ * rcvhdrentsize size must be large enough to hold the largest
+ * IB header (currently 96 bytes) that we expect to handle (plus of
+ * course the 2 dwords of RHF).
+ */
+ if (dd->rcvegrbufsize == 2048)
+ dd->tidtemplate = IBA7322_TID_SZ_2K;
+ else if (dd->rcvegrbufsize == 4096)
+ dd->tidtemplate = IBA7322_TID_SZ_4K;
+ dd->tidinvalid = 0;
+}
+
+/**
+ * qib_init_7322_get_base_info - set chip-specific flags for user code
+ * @rcd: the qlogic_ib ctxt
+ * @kbase: qib_base_info pointer
+ *
+ * We set the PCIE flag because the lower bandwidth on PCIe vs
+ * HyperTransport can affect some user packet algorithims.
+ */
+
+static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
+ struct qib_base_info *kinfo)
+{
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
+ QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
+ QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
+ if (rcd->dd->cspec->r1)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
+ if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
+ kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
+
+ return 0;
+}
+
+static struct qib_message_header *
+qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
+{
+ u32 offset = qib_hdrget_offset(rhf_addr);
+
+ return (struct qib_message_header *)
+ (rhf_addr - dd->rhf_offset + offset);
+}
+
+/*
+ * Configure number of contexts.
+ */
+static void qib_7322_config_ctxts(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ u32 nchipctxts;
+
+ nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
+ dd->cspec->numctxts = nchipctxts;
+ if (qib_n_krcv_queues > 1 && dd->num_pports) {
+ /*
+ * Set the mask for which bits from the QPN are used
+ * to select a context number.
+ */
+ dd->qpn_mask = 0x3f;
+ dd->first_user_ctxt = NUM_IB_PORTS +
+ (qib_n_krcv_queues - 1) * dd->num_pports;
+ if (dd->first_user_ctxt > nchipctxts)
+ dd->first_user_ctxt = nchipctxts;
+ dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
+ } else {
+ dd->first_user_ctxt = NUM_IB_PORTS;
+ dd->n_krcv_queues = 1;
+ }
+
+ if (!qib_cfgctxts) {
+ int nctxts = dd->first_user_ctxt + num_online_cpus();
+
+ if (nctxts <= 6)
+ dd->ctxtcnt = 6;
+ else if (nctxts <= 10)
+ dd->ctxtcnt = 10;
+ else if (nctxts <= nchipctxts)
+ dd->ctxtcnt = nchipctxts;
+ } else if (qib_cfgctxts < dd->num_pports)
+ dd->ctxtcnt = dd->num_pports;
+ else if (qib_cfgctxts <= nchipctxts)
+ dd->ctxtcnt = qib_cfgctxts;
+ if (!dd->ctxtcnt) /* none of the above, set to max */
+ dd->ctxtcnt = nchipctxts;
+
+ /*
+ * Chip can be configured for 6, 10, or 18 ctxts, and choice
+ * affects number of eager TIDs per ctxt (1K, 2K, 4K).
+ * Lock to be paranoid about later motion, etc.
+ */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ if (dd->ctxtcnt > 10)
+ dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
+ else if (dd->ctxtcnt > 6)
+ dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
+ /* else configure for default 6 receive ctxts */
+
+ /* The XRC opcode is 5. */
+ dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
+
+ /*
+ * RcvCtrl *must* be written here so that the
+ * chip understands how to change rcvegrcnt below.
+ */
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* kr_rcvegrcnt changes based on the number of contexts enabled */
+ dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
+ dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->num_pports > 1 ? 1024U : 2048U);
+}
+
+static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
+{
+
+ int lsb, ret = 0;
+ u64 maskr; /* right-justified mask */
+
+ switch (which) {
+
+ case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
+ ret = ppd->link_width_enabled;
+ goto done;
+
+ case QIB_IB_CFG_LWID: /* Get currently active Link-width */
+ ret = ppd->link_width_active;
+ goto done;
+
+ case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
+ ret = ppd->link_speed_enabled;
+ goto done;
+
+ case QIB_IB_CFG_SPD: /* Get current Link spd */
+ ret = ppd->link_speed_active;
+ goto done;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ break;
+
+ case QIB_IB_CFG_LINKLATENCY:
+ ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+ SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
+ goto done;
+
+ case QIB_IB_CFG_OP_VLS:
+ ret = ppd->vls_operational;
+ goto done;
+
+ case QIB_IB_CFG_VL_HIGH_CAP:
+ ret = 16;
+ goto done;
+
+ case QIB_IB_CFG_VL_LOW_CAP:
+ ret = 16;
+ goto done;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ OverrunThreshold);
+ goto done;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ PhyerrThreshold);
+ goto done;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ ret = (ppd->cpspec->ibcctrl_a &
+ SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
+ IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
+ goto done;
+
+ case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
+ lsb = IBA7322_IBC_HRTBT_LSB;
+ maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+ break;
+
+ case QIB_IB_CFG_PMA_TICKS:
+ /*
+ * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
+ * Since the clock is always 250MHz, the value is 3, 1 or 0.
+ */
+ if (ppd->link_speed_active == QIB_IB_QDR)
+ ret = 3;
+ else if (ppd->link_speed_active == QIB_IB_DDR)
+ ret = 1;
+ else
+ ret = 0;
+ goto done;
+
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
+done:
+ return ret;
+}
+
+/*
+ * Below again cribbed liberally from older version. Do not lean
+ * heavily on it.
+ */
+#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
+#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
+ | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
+
+static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 maskr; /* right-justified mask */
+ int lsb, ret = 0;
+ u16 lcmd, licmd;
+ unsigned long flags;
+
+ switch (which) {
+ case QIB_IB_CFG_LIDLMC:
+ /*
+ * Set LID and LMC. Combined to avoid possible hazard
+ * caller puts LMC in 16MSbits, DLID in 16LSbits of val
+ */
+ lsb = IBA7322_IBC_DLIDLMC_SHIFT;
+ maskr = IBA7322_IBC_DLIDLMC_MASK;
+ /*
+ * For header-checking, the SLID in the packet will
+ * be masked with SendIBSLMCMask, and compared
+ * with SendIBSLIDAssignMask. Make sure we do not
+ * set any bits not covered by the mask, or we get
+ * false-positives.
+ */
+ qib_write_kreg_port(ppd, krp_sendslid,
+ val & (val >> 16) & SendIBSLIDAssignMask);
+ qib_write_kreg_port(ppd, krp_sendslidmask,
+ (val >> 16) & SendIBSLMCMask);
+ break;
+
+ case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
+ ppd->link_width_enabled = val;
+ /* convert IB value to chip register value */
+ if (val == IB_WIDTH_1X)
+ val = 0;
+ else if (val == IB_WIDTH_4X)
+ val = 1;
+ else
+ val = 3;
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
+ lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
+ break;
+
+ case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
+ /*
+ * As with width, only write the actual register if the
+ * link is currently down, otherwise takes effect on next
+ * link change. Since setting is being explictly requested
+ * (via MAD or sysfs), clear autoneg failure status if speed
+ * autoneg is enabled.
+ */
+ ppd->link_speed_enabled = val;
+ val <<= IBA7322_IBC_SPEED_LSB;
+ maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ if (val & (val - 1)) {
+ /* Muliple speeds enabled */
+ val |= IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else if (val & IBA7322_IBC_SPEED_QDR)
+ val |= IBA7322_IBC_IBTA_1_2_MASK;
+ /* IBTA 1.2 mode + min/max + speed bits are contiguous */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
+ break;
+
+ case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
+ break;
+
+ case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
+ lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
+ break;
+
+ case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ OverrunThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
+ ppd->cpspec->ibcctrl_a |= (u64) val <<
+ SYM_LSB(IBCCtrlA_0, OverrunThreshold);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
+ maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
+ PhyerrThreshold);
+ if (maskr != val) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
+ ppd->cpspec->ibcctrl_a |= (u64) val <<
+ SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_PKEYS: /* update pkeys */
+ maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
+ ((u64) ppd->pkeys[2] << 32) |
+ ((u64) ppd->pkeys[3] << 48);
+ qib_write_kreg_port(ppd, krp_partitionkey, maskr);
+ goto bail;
+
+ case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
+ /* will only take effect when the link state changes */
+ if (val == IB_LINKINITCMD_POLL)
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+ else /* SLEEP */
+ ppd->cpspec->ibcctrl_a |=
+ SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ goto bail;
+
+ case QIB_IB_CFG_MTU: /* update the MTU in IBC */
+ /*
+ * Update our housekeeping variables, and set IBC max
+ * size, same as init code; max IBC is max we allow in
+ * buffer, less the qword pbc, plus 1 for ICRC, in dwords
+ * Set even if it's unchanged, print debug message only
+ * on changes.
+ */
+ val = (ppd->ibmaxlen >> 2) + 1;
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
+ ppd->cpspec->ibcctrl_a |= (u64)val <<
+ SYM_LSB(IBCCtrlA_0, MaxPktLen);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ goto bail;
+
+ case QIB_IB_CFG_LSTATE: /* set the IB link state */
+ switch (val & 0xffff0000) {
+ case IB_LINKCMD_DOWN:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
+ ppd->cpspec->ibmalfusesnap = 1;
+ ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+ crp_errlink);
+ if (!ppd->cpspec->ibdeltainprog &&
+ qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap =
+ read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap =
+ read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+ break;
+
+ case IB_LINKCMD_ARMED:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
+ if (ppd->cpspec->ibmalfusesnap) {
+ ppd->cpspec->ibmalfusesnap = 0;
+ ppd->cpspec->ibmalfdelta +=
+ read_7322_creg32_port(ppd,
+ crp_errlink) -
+ ppd->cpspec->ibmalfsnap;
+ }
+ break;
+
+ case IB_LINKCMD_ACTIVE:
+ lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
+ goto bail;
+ }
+ switch (val & 0xffff) {
+ case IB_LINKINITCMD_NOP:
+ licmd = 0;
+ break;
+
+ case IB_LINKINITCMD_POLL:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
+ break;
+
+ case IB_LINKINITCMD_SLEEP:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
+ break;
+
+ case IB_LINKINITCMD_DISABLE:
+ licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
+ ppd->cpspec->chase_end = 0;
+ /*
+ * stop state chase counter and timer, if running.
+ * wait forpending timer, but don't clear .data (ppd)!
+ */
+ if (ppd->cpspec->chase_timer.expires) {
+ del_timer_sync(&ppd->cpspec->chase_timer);
+ ppd->cpspec->chase_timer.expires = 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
+ val & 0xffff);
+ goto bail;
+ }
+ qib_set_ib_7322_lstate(ppd, lcmd, licmd);
+ goto bail;
+
+ case QIB_IB_CFG_OP_VLS:
+ if (ppd->vls_operational != val) {
+ ppd->vls_operational = val;
+ set_vls(ppd);
+ }
+ goto bail;
+
+ case QIB_IB_CFG_VL_HIGH_LIMIT:
+ qib_write_kreg_port(ppd, krp_highprio_limit, val);
+ goto bail;
+
+ case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
+ if (val > 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ lsb = IBA7322_IBC_HRTBT_LSB;
+ maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
+ break;
+
+ case QIB_IB_CFG_PORT:
+ /* val is the port number of the switch we are connected to. */
+ if (ppd->dd->cspec->r1) {
+ cancel_delayed_work(&ppd->cpspec->ipg_work);
+ ppd->cpspec->ipg_tries = 0;
+ }
+ goto bail;
+
+ default:
+ ret = -EINVAL;
+ goto bail;
+ }
+ ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
+ ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(dd, kr_scratch, 0);
+bail:
+ return ret;
+}
+
+static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
+{
+ int ret = 0;
+ u64 val, ctrlb;
+
+ /* only IBC loopback, may add serdes and xgxs loopbacks later */
+ if (!strncmp(what, "ibc", 3)) {
+ ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
+ Loopback);
+ val = 0; /* disable heart beat, so link will come up */
+ qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
+ ppd->dd->unit, ppd->port);
+ } else if (!strncmp(what, "off", 3)) {
+ ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
+ Loopback);
+ /* enable heart beat again */
+ val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
+ qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
+ "(normal)\n", ppd->dd->unit, ppd->port);
+ } else
+ ret = -EINVAL;
+ if (!ret) {
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
+ << IBA7322_IBC_HRTBT_LSB);
+ ppd->cpspec->ibcctrl_b = ctrlb | val;
+ qib_write_kreg_port(ppd, krp_ibcctrl_b,
+ ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ }
+ return ret;
+}
+
+static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned i;
+
+ for (i = 0; i < 16; i++, regno++, vl++) {
+ u32 val = qib_read_kreg_port(ppd, regno);
+
+ vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
+ SYM_RMASK(LowPriority0_0, VirtualLane);
+ vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
+ SYM_RMASK(LowPriority0_0, Weight);
+ }
+}
+
+static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
+ struct ib_vl_weight_elem *vl)
+{
+ unsigned i;
+
+ for (i = 0; i < 16; i++, regno++, vl++) {
+ u64 val;
+
+ val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
+ SYM_LSB(LowPriority0_0, VirtualLane)) |
+ ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
+ SYM_LSB(LowPriority0_0, Weight));
+ qib_write_kreg_port(ppd, regno, val);
+ }
+ if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ }
+}
+
+static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+ switch (which) {
+ case QIB_IB_TBL_VL_HIGH_ARB:
+ get_vl_weights(ppd, krp_highprio_0, t);
+ break;
+
+ case QIB_IB_TBL_VL_LOW_ARB:
+ get_vl_weights(ppd, krp_lowprio_0, t);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
+{
+ switch (which) {
+ case QIB_IB_TBL_VL_HIGH_ARB:
+ set_vl_weights(ppd, krp_highprio_0, t);
+ break;
+
+ case QIB_IB_TBL_VL_LOW_ARB:
+ set_vl_weights(ppd, krp_lowprio_0, t);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd)
+{
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+}
+
+static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
+{
+ u32 head, tail;
+
+ head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
+ if (rcd->rcvhdrtail_kvaddr)
+ tail = qib_get_rcvhdrtail(rcd);
+ else
+ tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
+ return head == tail;
+}
+
+#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
+ QIB_RCVCTRL_CTXT_DIS | \
+ QIB_RCVCTRL_TIDFLOW_ENB | \
+ QIB_RCVCTRL_TIDFLOW_DIS | \
+ QIB_RCVCTRL_TAILUPD_ENB | \
+ QIB_RCVCTRL_TAILUPD_DIS | \
+ QIB_RCVCTRL_INTRAVAIL_ENB | \
+ QIB_RCVCTRL_INTRAVAIL_DIS | \
+ QIB_RCVCTRL_BP_ENB | \
+ QIB_RCVCTRL_BP_DIS)
+
+#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
+ QIB_RCVCTRL_CTXT_DIS | \
+ QIB_RCVCTRL_PKEY_DIS | \
+ QIB_RCVCTRL_PKEY_ENB)
+
+/*
+ * Modify the RCVCTRL register in chip-specific way. This
+ * is a function because bit positions and (future) register
+ * location is chip-specifc, but the needed operations are
+ * generic. <op> is a bit-mask because we often want to
+ * do multiple modifications.
+ */
+static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
+ int ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ u64 mask, val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+
+ if (op & QIB_RCVCTRL_TIDFLOW_ENB)
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
+ if (op & QIB_RCVCTRL_TIDFLOW_DIS)
+ dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
+ if (op & QIB_RCVCTRL_TAILUPD_ENB)
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+ if (op & QIB_RCVCTRL_TAILUPD_DIS)
+ dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
+ if (op & QIB_RCVCTRL_PKEY_ENB)
+ ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+ if (op & QIB_RCVCTRL_PKEY_DIS)
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
+ if (ctxt < 0) {
+ mask = (1ULL << dd->ctxtcnt) - 1;
+ rcd = NULL;
+ } else {
+ mask = (1ULL << ctxt);
+ rcd = dd->rcd[ctxt];
+ }
+ if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
+ ppd->p_rcvctrl |=
+ (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+ if (!(dd->flags & QIB_NODMA_RTAIL)) {
+ op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
+ dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
+ }
+ /* Write these registers before the context is enabled. */
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
+ rcd->rcvhdrqtailaddr_phys);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
+ rcd->rcvhdrq_phys);
+ rcd->seq_cnt = 1;
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS)
+ ppd->p_rcvctrl &=
+ ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
+ if (op & QIB_RCVCTRL_BP_ENB)
+ dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
+ if (op & QIB_RCVCTRL_BP_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
+ if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
+ dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
+ if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
+ dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
+ /*
+ * Decide which registers to write depending on the ops enabled.
+ * Special case is "flush" (no bits set at all)
+ * which needs to write both.
+ */
+ if (op == 0 || (op & RCVCTRL_COMMON_MODS))
+ qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
+ if (op == 0 || (op & RCVCTRL_PORT_MODS))
+ qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
+ if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
+ /*
+ * Init the context registers also; if we were
+ * disabled, tail and head should both be zero
+ * already from the enable, but since we don't
+ * know, we have to do it explictly.
+ */
+ val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
+ qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
+
+ /* be sure enabling write seen; hd/tl should be 0 */
+ (void) qib_read_kreg32(dd, kr_scratch);
+ val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
+ dd->rcd[ctxt]->head = val;
+ /* If kctxt, interrupt on next receive. */
+ if (ctxt < dd->first_user_ctxt)
+ val |= dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
+ dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
+ /* arm rcv interrupt */
+ val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
+ qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
+ }
+ if (op & QIB_RCVCTRL_CTXT_DIS) {
+ unsigned f;
+
+ /* Now that the context is disabled, clear these registers. */
+ if (ctxt >= 0) {
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
+ for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+ qib_write_ureg(dd, ur_rcvflowtable + f,
+ TIDFLOW_ERRBITS, ctxt);
+ } else {
+ unsigned i;
+
+ for (i = 0; i < dd->cfgctxts; i++) {
+ qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
+ i, 0);
+ qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
+ for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
+ qib_write_ureg(dd, ur_rcvflowtable + f,
+ TIDFLOW_ERRBITS, i);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+}
+
+/*
+ * Modify the SENDCTRL register in chip-specific way. This
+ * is a function where there are multiple such registers with
+ * slightly different layouts.
+ * The chip doesn't allow back-to-back sendctrl writes, so write
+ * the scratch register after writing sendctrl.
+ *
+ * Which register is written depends on the operation.
+ * Most operate on the common register, while
+ * SEND_ENB and SEND_DIS operate on the per-port ones.
+ * SEND_ENB is included in common because it can change SPCL_TRIG
+ */
+#define SENDCTRL_COMMON_MODS (\
+ QIB_SENDCTRL_CLEAR | \
+ QIB_SENDCTRL_AVAIL_DIS | \
+ QIB_SENDCTRL_AVAIL_ENB | \
+ QIB_SENDCTRL_AVAIL_BLIP | \
+ QIB_SENDCTRL_DISARM | \
+ QIB_SENDCTRL_DISARM_ALL | \
+ QIB_SENDCTRL_SEND_ENB)
+
+#define SENDCTRL_PORT_MODS (\
+ QIB_SENDCTRL_CLEAR | \
+ QIB_SENDCTRL_SEND_ENB | \
+ QIB_SENDCTRL_SEND_DIS | \
+ QIB_SENDCTRL_FLUSH)
+
+static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 tmp_dd_sendctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+
+ /* First the dd ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_CLEAR)
+ dd->sendctrl = 0;
+ if (op & QIB_SENDCTRL_AVAIL_DIS)
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ else if (op & QIB_SENDCTRL_AVAIL_ENB) {
+ dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
+ if (dd->flags & QIB_USE_SPCL_TRIG)
+ dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
+ }
+
+ /* Then the ppd ones that are "sticky", saved in shadow */
+ if (op & QIB_SENDCTRL_SEND_DIS)
+ ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
+ else if (op & QIB_SENDCTRL_SEND_ENB)
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
+
+ if (op & QIB_SENDCTRL_DISARM_ALL) {
+ u32 i, last;
+
+ tmp_dd_sendctrl = dd->sendctrl;
+ last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ /*
+ * Disarm any buffers that are not yet launched,
+ * disabling updates until done.
+ */
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+ for (i = 0; i < last; i++) {
+ qib_write_kreg(dd, kr_sendctrl,
+ tmp_dd_sendctrl |
+ SYM_MASK(SendCtrl, Disarm) | i);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+ }
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
+
+ /*
+ * Now drain all the fifos. The Abort bit should never be
+ * needed, so for now, at least, we don't use it.
+ */
+ tmp_ppd_sendctrl |=
+ SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
+ SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
+ SYM_MASK(SendCtrl_0, TxeBypassIbc);
+ qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ tmp_dd_sendctrl = dd->sendctrl;
+
+ if (op & QIB_SENDCTRL_DISARM)
+ tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
+ ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
+ SYM_LSB(SendCtrl, DisarmSendBuf));
+ if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
+ (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
+ tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
+
+ if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
+ qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
+ qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ if (op & QIB_SENDCTRL_AVAIL_BLIP) {
+ qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ }
+
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+
+ if (op & QIB_SENDCTRL_FLUSH) {
+ u32 v;
+ /*
+ * ensure writes have hit chip, then do a few
+ * more reads, to allow DMA of pioavail registers
+ * to occur, so in-memory copy is in sync with
+ * the chip. Not always safe to sleep.
+ */
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ v = qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg(dd, kr_scratch, v);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+}
+
+#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
+#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
+#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
+
+/**
+ * qib_portcntr_7322 - read a per-port chip counter
+ * @ppd: the qlogic_ib pport
+ * @creg: the counter to read (not a chip offset)
+ */
+static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 ret = 0ULL;
+ u16 creg;
+ /* 0xffff for unimplemented or synthesized counters */
+ static const u32 xlator[] = {
+ [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
+ [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
+ [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
+ [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
+ [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
+ [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
+ [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
+ [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
+ [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
+ [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
+ [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
+ [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
+ [QIBPORTCNTR_ERRICRC] = crp_erricrc,
+ [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
+ [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
+ [QIBPORTCNTR_BADFORMAT] = crp_badformat,
+ [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
+ [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
+ [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
+ [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
+ [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
+ [QIBPORTCNTR_ERRLINK] = crp_errlink,
+ [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
+ [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
+ [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
+ [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
+ [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
+ /*
+ * the next 3 aren't really counters, but were implemented
+ * as counters in older chips, so still get accessed as
+ * though they were counters from this code.
+ */
+ [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
+ [QIBPORTCNTR_PSSTART] = krp_psstart,
+ [QIBPORTCNTR_PSSTAT] = krp_psstat,
+ /* pseudo-counter, summed for all ports */
+ [QIBPORTCNTR_KHDROVFL] = 0xffff,
+ };
+
+ if (reg >= ARRAY_SIZE(xlator)) {
+ qib_devinfo(ppd->dd->pcidev,
+ "Unimplemented portcounter %u\n", reg);
+ goto done;
+ }
+ creg = xlator[reg] & _PORT_CNTR_IDXMASK;
+
+ /* handle non-counters and special cases first */
+ if (reg == QIBPORTCNTR_KHDROVFL) {
+ int i;
+
+ /* sum over all kernel contexts (skip if mini_init) */
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ if (!rcd || rcd->ppd != ppd)
+ continue;
+ ret += read_7322_creg32(dd, cr_base_egrovfl + i);
+ }
+ goto done;
+ } else if (reg == QIBPORTCNTR_RXDROPPKT) {
+ /*
+ * Used as part of the synthesis of port_rcv_errors
+ * in the verbs code for IBTA counters. Not needed for 7322,
+ * because all the errors are already counted by other cntrs.
+ */
+ goto done;
+ } else if (reg == QIBPORTCNTR_PSINTERVAL ||
+ reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
+ /* were counters in older chips, now per-port kernel regs */
+ ret = qib_read_kreg_port(ppd, creg);
+ goto done;
+ }
+
+ /*
+ * Only fast increment counters are 64 bits; use 32 bit reads to
+ * avoid two independent reads when on Opteron.
+ */
+ if (xlator[reg] & _PORT_64BIT_FLAG)
+ ret = read_7322_creg_port(ppd, creg);
+ else
+ ret = read_7322_creg32_port(ppd, creg);
+ if (creg == crp_ibsymbolerr) {
+ if (ppd->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->ibsymsnap;
+ ret -= ppd->cpspec->ibsymdelta;
+ } else if (creg == crp_iblinkerrrecov) {
+ if (ppd->cpspec->ibdeltainprog)
+ ret -= ret - ppd->cpspec->iblnkerrsnap;
+ ret -= ppd->cpspec->iblnkerrdelta;
+ } else if (creg == crp_errlink)
+ ret -= ppd->cpspec->ibmalfdelta;
+ else if (creg == crp_iblinkdown)
+ ret += ppd->cpspec->iblnkdowndelta;
+done:
+ return ret;
+}
+
+/*
+ * Device counter names (not port-specific), one line per stat,
+ * single string. Used by utilities like ipathstats to print the stats
+ * in a way which works for different versions of drivers, without changing
+ * the utility. Names need to be 12 chars or less (w/o newline), for proper
+ * display by utility.
+ * Non-error counters are first.
+ * Start of "error" conters is indicated by a leading "E " on the first
+ * "error" counter, and doesn't count in label length.
+ * The EgrOvfl list needs to be last so we truncate them at the configured
+ * context count for the device.
+ * cntr7322indices contains the corresponding register indices.
+ */
+static const char cntr7322names[] =
+ "Interrupts\n"
+ "HostBusStall\n"
+ "E RxTIDFull\n"
+ "RxTIDInvalid\n"
+ "RxTIDFloDrop\n" /* 7322 only */
+ "Ctxt0EgrOvfl\n"
+ "Ctxt1EgrOvfl\n"
+ "Ctxt2EgrOvfl\n"
+ "Ctxt3EgrOvfl\n"
+ "Ctxt4EgrOvfl\n"
+ "Ctxt5EgrOvfl\n"
+ "Ctxt6EgrOvfl\n"
+ "Ctxt7EgrOvfl\n"
+ "Ctxt8EgrOvfl\n"
+ "Ctxt9EgrOvfl\n"
+ "Ctx10EgrOvfl\n"
+ "Ctx11EgrOvfl\n"
+ "Ctx12EgrOvfl\n"
+ "Ctx13EgrOvfl\n"
+ "Ctx14EgrOvfl\n"
+ "Ctx15EgrOvfl\n"
+ "Ctx16EgrOvfl\n"
+ "Ctx17EgrOvfl\n"
+ ;
+
+static const u32 cntr7322indices[] = {
+ cr_lbint | _PORT_64BIT_FLAG,
+ cr_lbstall | _PORT_64BIT_FLAG,
+ cr_tidfull,
+ cr_tidinvalid,
+ cr_rxtidflowdrop,
+ cr_base_egrovfl + 0,
+ cr_base_egrovfl + 1,
+ cr_base_egrovfl + 2,
+ cr_base_egrovfl + 3,
+ cr_base_egrovfl + 4,
+ cr_base_egrovfl + 5,
+ cr_base_egrovfl + 6,
+ cr_base_egrovfl + 7,
+ cr_base_egrovfl + 8,
+ cr_base_egrovfl + 9,
+ cr_base_egrovfl + 10,
+ cr_base_egrovfl + 11,
+ cr_base_egrovfl + 12,
+ cr_base_egrovfl + 13,
+ cr_base_egrovfl + 14,
+ cr_base_egrovfl + 15,
+ cr_base_egrovfl + 16,
+ cr_base_egrovfl + 17,
+};
+
+/*
+ * same as cntr7322names and cntr7322indices, but for port-specific counters.
+ * portcntr7322indices is somewhat complicated by some registers needing
+ * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
+ */
+static const char portcntr7322names[] =
+ "TxPkt\n"
+ "TxFlowPkt\n"
+ "TxWords\n"
+ "RxPkt\n"
+ "RxFlowPkt\n"
+ "RxWords\n"
+ "TxFlowStall\n"
+ "TxDmaDesc\n" /* 7220 and 7322-only */
+ "E RxDlidFltr\n" /* 7220 and 7322-only */
+ "IBStatusChng\n"
+ "IBLinkDown\n"
+ "IBLnkRecov\n"
+ "IBRxLinkErr\n"
+ "IBSymbolErr\n"
+ "RxLLIErr\n"
+ "RxBadFormat\n"
+ "RxBadLen\n"
+ "RxBufOvrfl\n"
+ "RxEBP\n"
+ "RxFlowCtlErr\n"
+ "RxICRCerr\n"
+ "RxLPCRCerr\n"
+ "RxVCRCerr\n"
+ "RxInvalLen\n"
+ "RxInvalPKey\n"
+ "RxPktDropped\n"
+ "TxBadLength\n"
+ "TxDropped\n"
+ "TxInvalLen\n"
+ "TxUnderrun\n"
+ "TxUnsupVL\n"
+ "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
+ "RxVL15Drop\n"
+ "RxVlErr\n"
+ "XcessBufOvfl\n"
+ "RxQPBadCtxt\n" /* 7322-only from here down */
+ "TXBadHeader\n"
+ ;
+
+static const u32 portcntr7322indices[] = {
+ QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
+ crp_pktsendflow,
+ QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
+ crp_pktrcvflowctrl,
+ QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
+ crp_txsdmadesc | _PORT_64BIT_FLAG,
+ crp_rxdlidfltr,
+ crp_ibstatuschange,
+ QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
+ crp_rcvflowctrlviol,
+ QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
+ crp_txminmaxlenerr,
+ crp_txdroppedpkt,
+ crp_txlenerr,
+ crp_txunderrun,
+ crp_txunsupvl,
+ QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
+ QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
+ crp_rxqpinvalidctxt,
+ crp_txhdrerr,
+};
+
+/* do all the setup to make the counter reads efficient later */
+static void init_7322_cntrnames(struct qib_devdata *dd)
+{
+ int i, j = 0;
+ char *s;
+
+ for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
+ i++) {
+ /* we always have at least one counter before the egrovfl */
+ if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
+ j = 1;
+ s = strchr(s + 1, '\n');
+ if (s && j)
+ j++;
+ }
+ dd->cspec->ncntrs = i;
+ if (!s)
+ /* full list; size is without terminating null */
+ dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
+ else
+ dd->cspec->cntrnamelen = 1 + s - cntr7322names;
+ dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->cspec->cntrs)
+ qib_dev_err(dd, "Failed allocation for counters\n");
+
+ for (i = 0, s = (char *)portcntr7322names; s; i++)
+ s = strchr(s + 1, '\n');
+ dd->cspec->nportcntrs = i - 1;
+ dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
+ for (i = 0; i < dd->num_pports; ++i) {
+ dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
+ * sizeof(u64), GFP_KERNEL);
+ if (!dd->pport[i].cpspec->portcntrs)
+ qib_dev_err(dd, "Failed allocation for"
+ " portcounters\n");
+ }
+}
+
+static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
+ u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->cntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *) cntr7322names;
+ } else {
+ u64 *cntr = dd->cspec->cntrs;
+ int i;
+
+ ret = dd->cspec->ncntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->ncntrs; i++)
+ if (cntr7322indices[i] & _PORT_64BIT_FLAG)
+ *cntr++ = read_7322_creg(dd,
+ cntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else
+ *cntr++ = read_7322_creg32(dd,
+ cntr7322indices[i]);
+ }
+done:
+ return ret;
+}
+
+static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
+ char **namep, u64 **cntrp)
+{
+ u32 ret;
+
+ if (namep) {
+ ret = dd->cspec->portcntrnamelen;
+ if (pos >= ret)
+ ret = 0; /* final read after getting everything */
+ else
+ *namep = (char *)portcntr7322names;
+ } else {
+ struct qib_pportdata *ppd = &dd->pport[port];
+ u64 *cntr = ppd->cpspec->portcntrs;
+ int i;
+
+ ret = dd->cspec->nportcntrs * sizeof(u64);
+ if (!cntr || pos >= ret) {
+ /* everything read, or couldn't get memory */
+ ret = 0;
+ goto done;
+ }
+ *cntrp = cntr;
+ for (i = 0; i < dd->cspec->nportcntrs; i++) {
+ if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
+ *cntr++ = qib_portcntr_7322(ppd,
+ portcntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
+ *cntr++ = read_7322_creg_port(ppd,
+ portcntr7322indices[i] &
+ _PORT_CNTR_IDXMASK);
+ else
+ *cntr++ = read_7322_creg32_port(ppd,
+ portcntr7322indices[i]);
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * qib_get_7322_faststats - get word counters from chip before they overflow
+ * @opaque - contains a pointer to the qlogic_ib device qib_devdata
+ *
+ * VESTIGIAL IBA7322 has no "small fast counters", so the only
+ * real purpose of this function is to maintain the notion of
+ * "active time", which in turn is only logged into the eeprom,
+ * which we don;t have, yet, for 7322-based boards.
+ *
+ * called from add_timer
+ */
+static void qib_get_7322_faststats(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+ u64 traffic_wds;
+ int pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ /*
+ * If port isn't enabled or not operational ports, or
+ * diags is running (can cause memory diags to fail)
+ * skip this port this time.
+ */
+ if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
+ || dd->diag_client)
+ continue;
+
+ /*
+ * Maintain an activity timer, based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
+ qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
+ spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
+ traffic_wds -= ppd->dd->traffic_wds;
+ ppd->dd->traffic_wds += traffic_wds;
+ if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
+ spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
+ if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
+ QIB_IB_QDR) &&
+ (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE)) &&
+ ppd->cpspec->qdr_dfe_time &&
+ time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+ ppd->cpspec->qdr_dfe_on = 0;
+
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_INIT_R1 :
+ QDR_STATIC_ADAPT_INIT);
+ force_h1(ppd);
+ }
+ }
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+}
+
+/*
+ * If we were using MSIx, try to fallback to INTx.
+ */
+static int qib_7322_intr_fallback(struct qib_devdata *dd)
+{
+ if (!dd->cspec->num_msix_entries)
+ return 0; /* already using INTx */
+
+ qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
+ " trying INTx interrupts\n");
+ qib_7322_nomsix(dd);
+ qib_enable_intx(dd->pcidev);
+ qib_setup_7322_interrupt(dd, 0);
+ return 1;
+}
+
+/*
+ * Reset the XGXS (between serdes and IBC). Slightly less intrusive
+ * than resetting the IBC or external link state, and useful in some
+ * cases to cause some retraining. To do this right, we reset IBC
+ * as well, then return to previous state (which may be still in reset)
+ * NOTE: some callers of this "know" this writes the current value
+ * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
+ * check all callers.
+ */
+static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
+{
+ u64 val;
+ struct qib_devdata *dd = ppd->dd;
+ const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
+ SYM_MASK(IBPCSConfig_0, xcv_treset) |
+ SYM_MASK(IBPCSConfig_0, tx_rx_reset);
+
+ val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a &
+ ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
+
+ qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
+ qib_read_kreg32(dd, kr_scratch);
+ qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+}
+
+/*
+ * This code for non-IBTA-compliant IB speed negotiation is only known to
+ * work for the SDR to DDR transition, and only between an HCA and a switch
+ * with recent firmware. It is based on observed heuristics, rather than
+ * actual knowledge of the non-compliant speed negotiation.
+ * It has a number of hard-coded fields, since the hope is to rewrite this
+ * when a spec is available on how the negoation is intended to work.
+ */
+static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
+ u32 dcnt, u32 *data)
+{
+ int i;
+ u64 pbc;
+ u32 __iomem *piobuf;
+ u32 pnum, control, len;
+ struct qib_devdata *dd = ppd->dd;
+
+ i = 0;
+ len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
+ control = qib_7322_setpbc_control(ppd, len, 0, 15);
+ pbc = ((u64) control << 32) | len;
+ while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
+ if (i++ > 15)
+ return;
+ udelay(2);
+ }
+ /* disable header check on this packet, since it can't be valid */
+ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
+ writeq(pbc, piobuf);
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdr, 7);
+ qib_pio_copy(piobuf + 9, data, dcnt);
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pnum);
+ /* and re-enable hdr check */
+ dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
+}
+
+/*
+ * _start packet gets sent twice at start, _done gets sent twice at end
+ */
+static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
+{
+ struct qib_devdata *dd = ppd->dd;
+ static u32 swapped;
+ u32 dw, i, hcnt, dcnt, *data;
+ static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
+ static u32 madpayload_start[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
+ };
+ static u32 madpayload_done[0x40] = {
+ 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
+ 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x40000001, 0x1388, 0x15e, /* rest 0's */
+ };
+
+ dcnt = ARRAY_SIZE(madpayload_start);
+ hcnt = ARRAY_SIZE(hdr);
+ if (!swapped) {
+ /* for maintainability, do it at runtime */
+ for (i = 0; i < hcnt; i++) {
+ dw = (__force u32) cpu_to_be32(hdr[i]);
+ hdr[i] = dw;
+ }
+ for (i = 0; i < dcnt; i++) {
+ dw = (__force u32) cpu_to_be32(madpayload_start[i]);
+ madpayload_start[i] = dw;
+ dw = (__force u32) cpu_to_be32(madpayload_done[i]);
+ madpayload_done[i] = dw;
+ }
+ swapped = 1;
+ }
+
+ data = which ? madpayload_done : madpayload_start;
+
+ autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+ autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
+ qib_read_kreg64(dd, kr_scratch);
+ udelay(2);
+}
+
+/*
+ * Do the absolute minimum to cause an IB speed change, and make it
+ * ready, but don't actually trigger the change. The caller will
+ * do that when ready (if link is in Polling training state, it will
+ * happen immediately, otherwise when link next goes down)
+ *
+ * This routine should only be used as part of the DDR autonegotation
+ * code for devices that are not compliant with IB 1.2 (or code that
+ * fixes things up for same).
+ *
+ * When link has gone down, and autoneg enabled, or autoneg has
+ * failed and we give up until next time we set both speeds, and
+ * then we want IBTA enabled as well as "use max enabled speed.
+ */
+static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
+{
+ u64 newctrlb;
+ newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK);
+
+ if (speed & (speed - 1)) /* multiple speeds */
+ newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
+ IBA7322_IBC_IBTA_1_2_MASK |
+ IBA7322_IBC_MAX_SPEED_MASK;
+ else
+ newctrlb |= speed == QIB_IB_QDR ?
+ IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
+ ((speed == QIB_IB_DDR ?
+ IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
+
+ if (newctrlb == ppd->cpspec->ibcctrl_b)
+ return;
+
+ ppd->cpspec->ibcctrl_b = newctrlb;
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+/*
+ * This routine is only used when we are not talking to another
+ * IB 1.2-compliant device that we think can do DDR.
+ * (This includes all existing switch chips as of Oct 2007.)
+ * 1.2-compliant devices go directly to DDR prior to reaching INIT
+ */
+static void try_7322_autoneg(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ qib_autoneg_7322_send(ppd, 0);
+ set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+ qib_7322_mini_pcs_reset(ppd);
+ /* 2 msec is minimum length of a poll cycle */
+ schedule_delayed_work(&ppd->cpspec->autoneg_work,
+ msecs_to_jiffies(2));
+}
+
+/*
+ * Handle the empirically determined mechanism for auto-negotiation
+ * of DDR speed with switches.
+ */
+static void autoneg_7322_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd;
+ u64 startms;
+ u32 i;
+ unsigned long flags;
+
+ ppd = container_of(work, struct qib_chippport_specific,
+ autoneg_work.work)->ppd;
+ dd = ppd->dd;
+
+ startms = jiffies_to_msecs(jiffies);
+
+ /*
+ * Busy wait for this first part, it should be at most a
+ * few hundred usec, since we scheduled ourselves for 2msec.
+ */
+ for (i = 0; i < 25; i++) {
+ if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
+ == IB_7322_LT_STATE_POLLQUIET) {
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
+ break;
+ }
+ udelay(100);
+ }
+
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ goto done; /* we got there early or told to stop */
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(90)))
+ goto done;
+ qib_7322_mini_pcs_reset(ppd);
+
+ /* we expect this to timeout */
+ if (wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(1700)))
+ goto done;
+ qib_7322_mini_pcs_reset(ppd);
+
+ set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
+
+ /*
+ * Wait up to 250 msec for link to train and get to INIT at DDR;
+ * this should terminate early.
+ */
+ wait_event_timeout(ppd->cpspec->autoneg_wait,
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
+ msecs_to_jiffies(250));
+done:
+ if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
+ if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
+ ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
+ ppd->cpspec->autoneg_tries = 0;
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ }
+}
+
+/*
+ * This routine is used to request IPG set in the QLogic switch.
+ * Only called if r1.
+ */
+static void try_7322_ipg(struct qib_pportdata *ppd)
+{
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct ib_smp *smp;
+ unsigned delay;
+ int ret;
+
+ agent = ibp->send_agent;
+ if (!agent)
+ goto retry;
+
+ send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+ IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ goto retry;
+
+ if (!ibp->smi_ah) {
+ struct ib_ah_attr attr;
+ struct ib_ah *ah;
+
+ memset(&attr, 0, sizeof attr);
+ attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
+ attr.port_num = ppd->port;
+ ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ if (IS_ERR(ah))
+ ret = -EINVAL;
+ else {
+ send_buf->ah = ah;
+ ibp->smi_ah = to_iah(ah);
+ ret = 0;
+ }
+ } else {
+ send_buf->ah = &ibp->smi_ah->ibah;
+ ret = 0;
+ }
+
+ smp = send_buf->mad;
+ smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
+ smp->class_version = 1;
+ smp->method = IB_MGMT_METHOD_SEND;
+ smp->hop_cnt = 1;
+ smp->attr_id = QIB_VENDOR_IPG;
+ smp->attr_mod = 0;
+
+ if (!ret)
+ ret = ib_post_send_mad(send_buf, NULL);
+ if (ret)
+ ib_free_send_mad(send_buf);
+retry:
+ delay = 2 << ppd->cpspec->ipg_tries;
+ schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+}
+
+/*
+ * Timeout handler for setting IPG.
+ * Only called if r1.
+ */
+static void ipg_7322_work(struct work_struct *work)
+{
+ struct qib_pportdata *ppd;
+
+ ppd = container_of(work, struct qib_chippport_specific,
+ ipg_work.work)->ppd;
+ if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
+ && ++ppd->cpspec->ipg_tries <= 10)
+ try_7322_ipg(ppd);
+}
+
+static u32 qib_7322_iblink_state(u64 ibcs)
+{
+ u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
+
+ switch (state) {
+ case IB_7322_L_STATE_INIT:
+ state = IB_PORT_INIT;
+ break;
+ case IB_7322_L_STATE_ARM:
+ state = IB_PORT_ARMED;
+ break;
+ case IB_7322_L_STATE_ACTIVE:
+ /* fall through */
+ case IB_7322_L_STATE_ACT_DEFER:
+ state = IB_PORT_ACTIVE;
+ break;
+ default: /* fall through */
+ case IB_7322_L_STATE_DOWN:
+ state = IB_PORT_DOWN;
+ break;
+ }
+ return state;
+}
+
+/* returns the IBTA port state, rather than the IBC link training state */
+static u8 qib_7322_phys_portstate(u64 ibcs)
+{
+ u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
+ return qib_7322_physportstate[state];
+}
+
+static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
+{
+ int ret = 0, symadj = 0;
+ unsigned long flags;
+ int mult;
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ /* Update our picture of width and speed from chip */
+ if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
+ ppd->link_speed_active = QIB_IB_QDR;
+ mult = 4;
+ } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
+ ppd->link_speed_active = QIB_IB_DDR;
+ mult = 2;
+ } else {
+ ppd->link_speed_active = QIB_IB_SDR;
+ mult = 1;
+ }
+ if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
+ ppd->link_width_active = IB_WIDTH_4X;
+ mult *= 4;
+ } else
+ ppd->link_width_active = IB_WIDTH_1X;
+ ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
+
+ if (!ibup) {
+ u64 clr;
+
+ /* Link went down. */
+ /* do IPG MAD again after linkdown, even if last time failed */
+ ppd->cpspec->ipg_tries = 0;
+ clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
+ (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
+ SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
+ if (clr)
+ qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
+ if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)))
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ /* unlock the Tx settings, speed may change */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+ qib_cancel_sends(ppd);
+ /* on link down, ensure sane pcs state */
+ qib_7322_mini_pcs_reset(ppd);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ if (__qib_sdma_running(ppd))
+ __qib_sdma_process_event(ppd,
+ qib_sdma_event_e70_go_idle);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ }
+ clr = read_7322_creg32_port(ppd, crp_iblinkdown);
+ if (clr == ppd->cpspec->iblnkdownsnap)
+ ppd->cpspec->iblnkdowndelta++;
+ } else {
+ if (qib_compat_ddr_negotiate &&
+ !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
+ QIBL_IB_AUTONEG_INPROG)) &&
+ ppd->link_speed_active == QIB_IB_SDR &&
+ (ppd->link_speed_enabled & QIB_IB_DDR)
+ && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
+ /* we are SDR, and auto-negotiation enabled */
+ ++ppd->cpspec->autoneg_tries;
+ if (!ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymdelta +=
+ read_7322_creg32_port(ppd,
+ crp_ibsymbolerr) -
+ ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta +=
+ read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov) -
+ ppd->cpspec->iblnkerrsnap;
+ }
+ try_7322_autoneg(ppd);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ ppd->link_speed_active == QIB_IB_SDR) {
+ qib_autoneg_7322_send(ppd, 1);
+ set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
+ qib_7322_mini_pcs_reset(ppd);
+ udelay(2);
+ ret = 1; /* no other IB status change processing */
+ } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
+ (ppd->link_speed_active & QIB_IB_DDR)) {
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
+ QIBL_IB_AUTONEG_FAILED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ppd->cpspec->autoneg_tries = 0;
+ /* re-enable SDR, for next link down */
+ set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
+ wake_up(&ppd->cpspec->autoneg_wait);
+ symadj = 1;
+ } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
+ /*
+ * Clear autoneg failure flag, and do setup
+ * so we'll try next time link goes down and
+ * back to INIT (possibly connected to a
+ * different device).
+ */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
+ symadj = 1;
+ }
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ symadj = 1;
+ if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
+ try_7322_ipg(ppd);
+ if (!ppd->cpspec->recovery_init)
+ setup_7322_link_recovery(ppd, 0);
+ ppd->cpspec->qdr_dfe_time = jiffies +
+ msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
+ }
+ ppd->cpspec->ibmalfusesnap = 0;
+ ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
+ crp_errlink);
+ }
+ if (symadj) {
+ ppd->cpspec->iblnkdownsnap =
+ read_7322_creg32_port(ppd, crp_iblinkdown);
+ if (ppd->cpspec->ibdeltainprog) {
+ ppd->cpspec->ibdeltainprog = 0;
+ ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
+ crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
+ ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
+ }
+ } else if (!ibup && qib_compat_ddr_negotiate &&
+ !ppd->cpspec->ibdeltainprog &&
+ !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+ crp_ibsymbolerr);
+ ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
+ crp_iblinkerrrecov);
+ }
+
+ if (!ret)
+ qib_setup_7322_setextled(ppd, ibup);
+ return ret;
+}
+
+/*
+ * Does read/modify/write to appropriate registers to
+ * set output and direction bits selected by mask.
+ * these are in their canonical postions (e.g. lsb of
+ * dir will end up in D48 of extctrl on existing chips).
+ * returns contents of GP Inputs.
+ */
+static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
+{
+ u64 read_val, new_out;
+ unsigned long flags;
+
+ if (mask) {
+ /* some bits being written, lock access to GPIO */
+ dir &= mask;
+ out &= mask;
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
+ dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
+ new_out = (dd->cspec->gpio_out & ~mask) | out;
+
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_out, new_out);
+ dd->cspec->gpio_out = new_out;
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+ }
+ /*
+ * It is unlikely that a read at this time would get valid
+ * data on a pin whose direction line was set in the same
+ * call to this function. We include the read here because
+ * that allows us to potentially combine a change on one pin with
+ * a read on another, and because the old code did something like
+ * this.
+ */
+ read_val = qib_read_kreg64(dd, kr_extstatus);
+ return SYM_FIELD(read_val, EXTStatus, GPIOIn);
+}
+
+/* Enable writes to config EEPROM, if possible. Returns previous state */
+static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
+{
+ int prev_wen;
+ u32 mask;
+
+ mask = 1 << QIB_EEPROM_WEN_NUM;
+ prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
+ gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
+
+ return prev_wen & 1;
+}
+
+/*
+ * Read fundamental info we need to use the chip. These are
+ * the registers that describe chip capabilities, and are
+ * saved in shadow registers.
+ */
+static void get_7322_chip_params(struct qib_devdata *dd)
+{
+ u64 val;
+ u32 piobufs;
+ int mtu;
+
+ dd->palign = qib_read_kreg32(dd, kr_pagealign);
+
+ dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
+
+ dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
+ dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
+ dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
+ dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
+ dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
+
+ val = qib_read_kreg64(dd, kr_sendpiobufcnt);
+ dd->piobcnt2k = val & ~0U;
+ dd->piobcnt4k = val >> 32;
+ val = qib_read_kreg64(dd, kr_sendpiosize);
+ dd->piosize2k = val & ~0U;
+ dd->piosize4k = val >> 32;
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+ dd->pport[0].ibmtu = (u32)mtu;
+ dd->pport[1].ibmtu = (u32)mtu;
+
+ /* these may be adjusted in init_chip_wc_pat() */
+ dd->pio2kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
+ dd->pio4kbase = (u32 __iomem *)
+ ((char __iomem *) dd->kregbase +
+ (dd->piobufbase >> 32));
+ /*
+ * 4K buffers take 2 pages; we use roundup just to be
+ * paranoid; we calculate it once here, rather than on
+ * ever buf allocate
+ */
+ dd->align4k = ALIGN(dd->piosize4k, dd->palign);
+
+ piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
+
+ dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
+ (sizeof(u64) * BITS_PER_BYTE / 2);
+}
+
+/*
+ * The chip base addresses in cspec and cpspec have to be set
+ * after possible init_chip_wc_pat(), rather than in
+ * get_7322_chip_params(), so split out as separate function
+ */
+static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
+{
+ u32 cregbase;
+ cregbase = qib_read_kreg32(dd, kr_counterregbase);
+
+ dd->cspec->cregbase = (u64 __iomem *)(cregbase +
+ (char __iomem *)dd->kregbase);
+
+ dd->egrtidbase = (u64 __iomem *)
+ ((char __iomem *) dd->kregbase + dd->rcvegrbase);
+
+ /* port registers are defined as relative to base of chip */
+ dd->pport[0].cpspec->kpregbase =
+ (u64 __iomem *)((char __iomem *)dd->kregbase);
+ dd->pport[1].cpspec->kpregbase =
+ (u64 __iomem *)(dd->palign +
+ (char __iomem *)dd->kregbase);
+ dd->pport[0].cpspec->cpregbase =
+ (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
+ kr_counterregbase) + (char __iomem *)dd->kregbase);
+ dd->pport[1].cpspec->cpregbase =
+ (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
+ kr_counterregbase) + (char __iomem *)dd->kregbase);
+}
+
+/*
+ * This is a fairly special-purpose observer, so we only support
+ * the port-specific parts of SendCtrl
+ */
+
+#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
+ SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
+ SYM_MASK(SendCtrl_0, SDmaHalt) | \
+ SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
+ SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
+
+static int sendctrl_hook(struct qib_devdata *dd,
+ const struct diag_observer *op, u32 offs,
+ u64 *data, u64 mask, int only_32)
+{
+ unsigned long flags;
+ unsigned idx;
+ unsigned pidx;
+ struct qib_pportdata *ppd = NULL;
+ u64 local_data, all_bits;
+
+ /*
+ * The fixed correspondence between Physical ports and pports is
+ * severed. We need to hunt for the ppd that corresponds
+ * to the offset we got. And we have to do that without admitting
+ * we know the stride, apparently.
+ */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ u64 __iomem *psptr;
+ u32 psoffs;
+
+ ppd = dd->pport + pidx;
+ if (!ppd->cpspec->kpregbase)
+ continue;
+
+ psptr = ppd->cpspec->kpregbase + krp_sendctrl;
+ psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
+ if (psoffs == offs)
+ break;
+ }
+
+ /* If pport is not being managed by driver, just avoid shadows. */
+ if (pidx >= dd->num_pports)
+ ppd = NULL;
+
+ /* In any case, "idx" is flat index in kreg space */
+ idx = offs / sizeof(u64);
+
+ all_bits = ~0ULL;
+ if (only_32)
+ all_bits >>= 32;
+
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (!ppd || (mask & all_bits) != all_bits) {
+ /*
+ * At least some mask bits are zero, so we need
+ * to read. The judgement call is whether from
+ * reg or shadow. First-cut: read reg, and complain
+ * if any bits which should be shadowed are different
+ * from their shadowed value.
+ */
+ if (only_32)
+ local_data = (u64)qib_read_kreg32(dd, idx);
+ else
+ local_data = qib_read_kreg64(dd, idx);
+ *data = (local_data & ~mask) | (*data & mask);
+ }
+ if (mask) {
+ /*
+ * At least some mask bits are one, so we need
+ * to write, but only shadow some bits.
+ */
+ u64 sval, tval; /* Shadowed, transient */
+
+ /*
+ * New shadow val is bits we don't want to touch,
+ * ORed with bits we do, that are intended for shadow.
+ */
+ if (ppd) {
+ sval = ppd->p_sendctrl & ~mask;
+ sval |= *data & SENDCTRL_SHADOWED & mask;
+ ppd->p_sendctrl = sval;
+ } else
+ sval = *data & SENDCTRL_SHADOWED & mask;
+ tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
+ qib_write_kreg(dd, idx, tval);
+ qib_write_kreg(dd, kr_scratch, 0Ull);
+ }
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ return only_32 ? 4 : 8;
+}
+
+static const struct diag_observer sendctrl_0_observer = {
+ sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
+ KREG_IDX(SendCtrl_0) * sizeof(u64)
+};
+
+static const struct diag_observer sendctrl_1_observer = {
+ sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
+ KREG_IDX(SendCtrl_1) * sizeof(u64)
+};
+
+static ushort sdma_fetch_prio = 8;
+module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
+
+/* Besides logging QSFP events, we set appropriate TxDDS values */
+static void init_txdds_table(struct qib_pportdata *ppd, int override);
+
+static void qsfp_7322_event(struct work_struct *work)
+{
+ struct qib_qsfp_data *qd;
+ struct qib_pportdata *ppd;
+ u64 pwrup;
+ int ret;
+ u32 le2;
+
+ qd = container_of(work, struct qib_qsfp_data, work);
+ ppd = qd->ppd;
+ pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
+
+ /*
+ * Some QSFP's not only do not respond until the full power-up
+ * time, but may behave badly if we try. So hold off responding
+ * to insertion.
+ */
+ while (1) {
+ u64 now = get_jiffies_64();
+ if (time_after64(now, pwrup))
+ break;
+ msleep(1);
+ }
+ ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
+ /*
+ * Need to change LE2 back to defaults if we couldn't
+ * read the cable type (to handle cable swaps), so do this
+ * even on failure to read cable information. We don't
+ * get here for QME, so IS_QME check not needed here.
+ */
+ le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
+ !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
+ LE2_5m : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
+ init_txdds_table(ppd, 0);
+}
+
+/*
+ * There is little we can do but complain to the user if QSFP
+ * initialization fails.
+ */
+static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+ struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
+ struct qib_devdata *dd = ppd->dd;
+ u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
+
+ mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
+ qd->ppd = ppd;
+ qib_qsfp_init(qd, qsfp_7322_event);
+ spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
+ dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
+ dd->cspec->gpio_mask |= mod_prs_bit;
+ qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
+ qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
+ spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
+}
+
+/*
+ * called at device initialization time, and also if the txselect
+ * module parameter is changed. This is used for cables that don't
+ * have valid QSFP EEPROMs (not present, or attenuation is zero).
+ * We initialize to the default, then if there is a specific
+ * unit,port match, we use that (and set it immediately, for the
+ * current speed, if the link is at INIT or better).
+ * String format is "default# unit#,port#=# ... u,p=#", separators must
+ * be a SPACE character. A newline terminates. The u,p=# tuples may
+ * optionally have "u,p=#,#", where the final # is the H1 value
+ * The last specific match is used (actually, all are used, but last
+ * one is the one that winds up set); if none at all, fall back on default.
+ */
+static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
+{
+ char *nxt, *str;
+ u32 pidx, unit, port, deflt, h1;
+ unsigned long val;
+ int any = 0, seth1;
+
+ str = txselect_list;
+
+ /* default number is validated in setup_txselect() */
+ deflt = simple_strtoul(str, &nxt, 0);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->pport[pidx].cpspec->no_eep = deflt;
+
+ while (*nxt && nxt[1]) {
+ str = ++nxt;
+ unit = simple_strtoul(str, &nxt, 0);
+ if (nxt == str || !*nxt || *nxt != ',') {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ str = ++nxt;
+ port = simple_strtoul(str, &nxt, 0);
+ if (nxt == str || *nxt != '=') {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ str = ++nxt;
+ val = simple_strtoul(str, &nxt, 0);
+ if (nxt == str) {
+ while (*nxt && *nxt++ != ' ') /* skip to next, if any */
+ ;
+ continue;
+ }
+ if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+ continue;
+ seth1 = 0;
+ h1 = 0; /* gcc thinks it might be used uninitted */
+ if (*nxt == ',' && nxt[1]) {
+ str = ++nxt;
+ h1 = (u32)simple_strtoul(str, &nxt, 0);
+ if (nxt == str)
+ while (*nxt && *nxt++ != ' ') /* skip */
+ ;
+ else
+ seth1 = 1;
+ }
+ for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
+ ++pidx) {
+ struct qib_pportdata *ppd = &dd->pport[pidx];
+
+ if (ppd->port != port || !ppd->link_speed_supported)
+ continue;
+ ppd->cpspec->no_eep = val;
+ /* now change the IBC and serdes, overriding generic */
+ init_txdds_table(ppd, 1);
+ any++;
+ }
+ if (*nxt == '\n')
+ break; /* done */
+ }
+ if (change && !any) {
+ /* no specific setting, use the default.
+ * Change the IBC and serdes, but since it's
+ * general, don't override specific settings.
+ */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].link_speed_supported)
+ init_txdds_table(&dd->pport[pidx], 0);
+ }
+}
+
+/* handle the txselect parameter changing */
+static int setup_txselect(const char *str, struct kernel_param *kp)
+{
+ struct qib_devdata *dd;
+ unsigned long val;
+ char *n;
+ if (strlen(str) >= MAX_ATTEN_LEN) {
+ printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
+ "too long\n");
+ return -ENOSPC;
+ }
+ val = simple_strtoul(str, &n, 0);
+ if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ printk(KERN_INFO QIB_DRV_NAME
+ "txselect_values must start with a number < %d\n",
+ TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+ return -EINVAL;
+ }
+ strcpy(txselect_list, str);
+
+ list_for_each_entry(dd, &qib_dev_list, list)
+ if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
+ set_no_qsfp_atten(dd, 1);
+ return 0;
+}
+
+/*
+ * Write the final few registers that depend on some of the
+ * init setup. Done late in init, just before bringing up
+ * the serdes.
+ */
+static int qib_late_7322_initreg(struct qib_devdata *dd)
+{
+ int ret = 0, n;
+ u64 val;
+
+ qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
+ qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
+ qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
+ qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
+ val = qib_read_kreg64(dd, kr_sendpioavailaddr);
+ if (val != dd->pioavailregs_phys) {
+ qib_dev_err(dd, "Catastrophic software error, "
+ "SendPIOAvailAddr written as %lx, "
+ "read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
+ ret = -EINVAL;
+ }
+
+ n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
+ qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
+ /* driver sends get pkey, lid, etc. checking also, to catch bugs */
+ qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
+
+ qib_register_observer(dd, &sendctrl_0_observer);
+ qib_register_observer(dd, &sendctrl_1_observer);
+
+ dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
+ qib_write_kreg(dd, kr_control, dd->control);
+ /*
+ * Set SendDmaFetchPriority and init Tx params, including
+ * QSFP handler on boards that have QSFP.
+ * First set our default attenuation entry for cables that
+ * don't have valid attenuation.
+ */
+ set_no_qsfp_atten(dd, 0);
+ for (n = 0; n < dd->num_pports; ++n) {
+ struct qib_pportdata *ppd = dd->pport + n;
+
+ qib_write_kreg_port(ppd, krp_senddmaprioritythld,
+ sdma_fetch_prio & 0xf);
+ /* Initialize qsfp if present on board. */
+ if (dd->flags & QIB_HAS_QSFP)
+ qib_init_7322_qsfp(ppd);
+ }
+ dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
+ qib_write_kreg(dd, kr_control, dd->control);
+
+ return ret;
+}
+
+/* per IB port errors. */
+#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
+ MASK_ACROSS(8, 15))
+#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
+#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
+ MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
+ MASK_ACROSS(0, 11))
+
+/*
+ * Write the initialization per-port registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c).
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_init_portregs(struct qib_pportdata *ppd)
+{
+ u64 val;
+ int i;
+
+ if (!ppd->link_speed_supported) {
+ /* no buffer credits for this port */
+ for (i = 1; i < 8; i++)
+ qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+ return;
+ }
+
+ /*
+ * Set the number of supported virtual lanes in IBC,
+ * for flow control packet handling on unsupported VLs
+ */
+ val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
+ val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
+ val |= (u64)(ppd->vls_supported - 1) <<
+ SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
+ qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
+
+ qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
+
+ /* enable tx header checking */
+ qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
+ IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
+ IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
+
+ qib_write_kreg_port(ppd, krp_ncmodectrl,
+ SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
+
+ /*
+ * Unconditionally clear the bufmask bits. If SDMA is
+ * enabled, we'll set them appropriately later.
+ */
+ qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
+ qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
+ qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
+ if (ppd->dd->cspec->r1)
+ ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
+}
+
+/*
+ * Write the initialization per-device registers that need to be done at
+ * driver load and after reset completes (i.e., that aren't done as part
+ * of other init procedures called from qib_init.c). Also write per-port
+ * registers that are affected by overall device config, such as QP mapping
+ * Some of these should be redundant on reset, but play safe.
+ */
+static void write_7322_initregs(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int i, pidx;
+ u64 val;
+
+ /* Set Multicast QPs received by port 2 to map to context one. */
+ qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ unsigned n, regno;
+ unsigned long flags;
+
+ if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+ continue;
+
+ ppd = &dd->pport[pidx];
+
+ /* be paranoid against later code motion, etc. */
+ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
+ ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
+ spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
+
+ /* Initialize QP to context mapping */
+ regno = krp_rcvqpmaptable;
+ val = 0;
+ if (dd->num_pports > 1)
+ n = dd->first_user_ctxt / dd->num_pports;
+ else
+ n = dd->first_user_ctxt - 1;
+ for (i = 0; i < 32; ) {
+ unsigned ctxt;
+
+ if (dd->num_pports > 1)
+ ctxt = (i % n) * dd->num_pports + pidx;
+ else if (i % n)
+ ctxt = (i % n) + 1;
+ else
+ ctxt = ppd->hw_pidx;
+ val |= ctxt << (5 * (i % 6));
+ i++;
+ if (i % 6 == 0) {
+ qib_write_kreg_port(ppd, regno, val);
+ val = 0;
+ regno++;
+ }
+ }
+ qib_write_kreg_port(ppd, regno, val);
+ }
+
+ /*
+ * Setup up interrupt mitigation for kernel contexts, but
+ * not user contexts (user contexts use interrupts when
+ * stalled waiting for any packet, so want those interrupts
+ * right away).
+ */
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
+ qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
+ }
+
+ /*
+ * Initialize as (disabled) rcvflow tables. Application code
+ * will setup each flow as it uses the flow.
+ * Doesn't clear any of the error bits that might be set.
+ */
+ val = TIDFLOW_ERRBITS; /* these are W1C */
+ for (i = 0; i < dd->ctxtcnt; i++) {
+ int flow;
+ for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
+ qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
+ }
+
+ /*
+ * dual cards init to dual port recovery, single port cards to
+ * the one port. Dual port cards may later adjust to 1 port,
+ * and then back to dual port if both ports are connected
+ * */
+ if (dd->num_pports)
+ setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
+}
+
+static int qib_init_7322_variables(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ unsigned features, pidx, sbufcnt;
+ int ret, mtu;
+ u32 sbufs, updthresh;
+
+ /* pport structs are contiguous, allocated after devdata */
+ ppd = (struct qib_pportdata *)(dd + 1);
+ dd->pport = ppd;
+ ppd[0].dd = dd;
+ ppd[1].dd = dd;
+
+ dd->cspec = (struct qib_chip_specific *)(ppd + 2);
+
+ ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
+ ppd[1].cpspec = &ppd[0].cpspec[1];
+ ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
+ ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
+
+ spin_lock_init(&dd->cspec->rcvmod_lock);
+ spin_lock_init(&dd->cspec->gpio_lock);
+
+ /* we haven't yet set QIB_PRESENT, so use read directly */
+ dd->revision = readq(&dd->kregbase[kr_revision]);
+
+ if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
+ qib_dev_err(dd, "Revision register read failure, "
+ "giving up initialization\n");
+ ret = -ENODEV;
+ goto bail;
+ }
+ dd->flags |= QIB_PRESENT; /* now register routines work */
+
+ dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
+ dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
+ dd->cspec->r1 = dd->minrev == 1;
+
+ get_7322_chip_params(dd);
+ features = qib_7322_boardname(dd);
+
+ /* now that piobcnt2k and 4k set, we can allocate these */
+ sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
+ NUM_VL15_BUFS + BITS_PER_LONG - 1;
+ sbufcnt /= BITS_PER_LONG;
+ dd->cspec->sendchkenable = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
+ dd->cspec->sendgrhchk = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
+ dd->cspec->sendibchk = kmalloc(sbufcnt *
+ sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
+ if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
+ !dd->cspec->sendibchk) {
+ qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ ppd = dd->pport;
+
+ /*
+ * GPIO bits for TWSI data and clock,
+ * used for serial EEPROM.
+ */
+ dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
+ dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
+ dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
+
+ dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
+ QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
+ QIB_HAS_THRESH_UPDATE |
+ (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
+ dd->flags |= qib_special_trigger ?
+ QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
+
+ /*
+ * Setup initial values. These may change when PAT is enabled, but
+ * we need these to do initial chip register accesses.
+ */
+ qib_7322_set_baseaddrs(dd);
+
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1)
+ mtu = QIB_DEFAULT_MTU;
+
+ dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
+ /* all hwerrors become interrupts, unless special purposed */
+ dd->cspec->hwerrmask = ~0ULL;
+ /* link_recovery setup causes these errors, so ignore them,
+ * other than clearing them when they occur */
+ dd->cspec->hwerrmask &=
+ ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
+ SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
+ HWE_MASK(LATriggered));
+
+ for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
+ struct qib_chippport_specific *cp = ppd->cpspec;
+ ppd->link_speed_supported = features & PORT_SPD_CAP;
+ features >>= PORT_SPD_CAP_SHIFT;
+ if (!ppd->link_speed_supported) {
+ /* single port mode (7340, or configured) */
+ dd->skip_kctxt_mask |= 1 << pidx;
+ if (pidx == 0) {
+ /* Make sure port is disabled. */
+ qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+ ppd[0] = ppd[1];
+ dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+ IBSerdesPClkNotDetectMask_0)
+ | SYM_MASK(HwErrMask,
+ SDmaMemReadErrMask_0));
+ dd->cspec->int_enable_mask &= ~(
+ SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
+ SYM_MASK(IntMask, SDmaIdleIntMask_0) |
+ SYM_MASK(IntMask, SDmaProgressIntMask_0) |
+ SYM_MASK(IntMask, SDmaIntMask_0) |
+ SYM_MASK(IntMask, ErrIntMask_0) |
+ SYM_MASK(IntMask, SendDoneIntMask_0));
+ } else {
+ /* Make sure port is disabled. */
+ qib_write_kreg_port(ppd, krp_rcvctrl, 0);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
+ dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
+ IBSerdesPClkNotDetectMask_1)
+ | SYM_MASK(HwErrMask,
+ SDmaMemReadErrMask_1));
+ dd->cspec->int_enable_mask &= ~(
+ SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
+ SYM_MASK(IntMask, SDmaIdleIntMask_1) |
+ SYM_MASK(IntMask, SDmaProgressIntMask_1) |
+ SYM_MASK(IntMask, SDmaIntMask_1) |
+ SYM_MASK(IntMask, ErrIntMask_1) |
+ SYM_MASK(IntMask, SendDoneIntMask_1));
+ }
+ continue;
+ }
+
+ dd->num_pports++;
+ qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
+
+ ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
+ ppd->link_width_enabled = IB_WIDTH_4X;
+ ppd->link_speed_enabled = ppd->link_speed_supported;
+ /*
+ * Set the initial values to reasonable default, will be set
+ * for real when link is up.
+ */
+ ppd->link_width_active = IB_WIDTH_4X;
+ ppd->link_speed_active = QIB_IB_SDR;
+ ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
+ switch (qib_num_cfg_vls) {
+ case 1:
+ ppd->vls_supported = IB_VL_VL0;
+ break;
+ case 2:
+ ppd->vls_supported = IB_VL_VL0_1;
+ break;
+ default:
+ qib_devinfo(dd->pcidev,
+ "Invalid num_vls %u, using 4 VLs\n",
+ qib_num_cfg_vls);
+ qib_num_cfg_vls = 4;
+ /* fall through */
+ case 4:
+ ppd->vls_supported = IB_VL_VL0_3;
+ break;
+ case 8:
+ if (mtu <= 2048)
+ ppd->vls_supported = IB_VL_VL0_7;
+ else {
+ qib_devinfo(dd->pcidev,
+ "Invalid num_vls %u for MTU %d "
+ ", using 4 VLs\n",
+ qib_num_cfg_vls, mtu);
+ ppd->vls_supported = IB_VL_VL0_3;
+ qib_num_cfg_vls = 4;
+ }
+ break;
+ }
+ ppd->vls_operational = ppd->vls_supported;
+
+ init_waitqueue_head(&cp->autoneg_wait);
+ INIT_DELAYED_WORK(&cp->autoneg_work,
+ autoneg_7322_work);
+ if (ppd->dd->cspec->r1)
+ INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
+
+ /*
+ * For Mez and similar cards, no qsfp info, so do
+ * the "cable info" setup here. Can be overridden
+ * in adapter-specific routines.
+ */
+ if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
+ if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
+ qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
+ "Unknown mezzanine card type\n",
+ dd->unit, ppd->port);
+ cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
+ /*
+ * Choose center value as default tx serdes setting
+ * until changed through module parameter.
+ */
+ ppd->cpspec->no_eep = IS_QMH(dd) ?
+ TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
+ } else
+ cp->h1_val = H1_FORCE_VAL;
+
+ /* Avoid writes to chip for mini_init */
+ if (!qib_mini_init)
+ write_7322_init_portregs(ppd);
+
+ init_timer(&cp->chase_timer);
+ cp->chase_timer.function = reenable_chase;
+ cp->chase_timer.data = (unsigned long)ppd;
+
+ ppd++;
+ }
+
+ dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
+ dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+ dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
+
+ /* we always allocate at least 2048 bytes for eager buffers */
+ dd->rcvegrbufsize = max(mtu, 2048);
+
+ qib_7322_tidtemplate(dd);
+
+ /*
+ * We can request a receive interrupt for 1 or
+ * more packets from current offset.
+ */
+ dd->rhdrhead_intr_off =
+ (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
+
+ /* setup the stats timer; the add_timer is done at end of init */
+ init_timer(&dd->stats_timer);
+ dd->stats_timer.function = qib_get_7322_faststats;
+ dd->stats_timer.data = (unsigned long) dd;
+
+ dd->ureg_align = 0x10000; /* 64KB alignment */
+
+ dd->piosize2kmax_dwords = dd->piosize2k >> 2;
+
+ qib_7322_config_ctxts(dd);
+ qib_set_ctxtcnt(dd);
+
+ if (qib_wc_pat) {
+ ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
+ if (ret)
+ goto bail;
+ }
+ qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
+
+ ret = 0;
+ if (qib_mini_init)
+ goto bail;
+ if (!dd->num_pports) {
+ qib_dev_err(dd, "No ports enabled, giving up initialization\n");
+ goto bail; /* no error, so can still figure out why err */
+ }
+
+ write_7322_initregs(dd);
+ ret = qib_create_ctxts(dd);
+ init_7322_cntrnames(dd);
+
+ updthresh = 8U; /* update threshold */
+
+ /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
+ * reserve the update threshold amount for other kernel use, such
+ * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
+ * unless we aren't enabling SDMA, in which case we want to use
+ * all the 4k bufs for the kernel.
+ * if this was less than the update threshold, we could wait
+ * a long time for an update. Coded this way because we
+ * sometimes change the update threshold for various reasons,
+ * and we want this to remain robust.
+ */
+ if (dd->flags & QIB_HAS_SEND_DMA) {
+ dd->cspec->sdmabufcnt = dd->piobcnt4k;
+ sbufs = updthresh > 3 ? updthresh : 3;
+ } else {
+ dd->cspec->sdmabufcnt = 0;
+ sbufs = dd->piobcnt4k;
+ }
+ dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
+ dd->cspec->sdmabufcnt;
+ dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
+ dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+ dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
+ dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
+
+ /*
+ * If we have 16 user contexts, we will have 7 sbufs
+ * per context, so reduce the update threshold to match. We
+ * want to update before we actually run out, at low pbufs/ctxt
+ * so give ourselves some margin.
+ */
+ if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
+ updthresh = dd->pbufsctxt - 2;
+ dd->cspec->updthresh_dflt = updthresh;
+ dd->cspec->updthresh = updthresh;
+
+ /* before full enable, no interrupts, no locking needed */
+ dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld)) |
+ SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
+
+ dd->psxmitwait_supported = 1;
+ dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
+bail:
+ if (!dd->ctxtcnt)
+ dd->ctxtcnt = 1; /* for other initialization code */
+
+ return ret;
+}
+
+static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
+ u32 *pbufnum)
+{
+ u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
+ struct qib_devdata *dd = ppd->dd;
+
+ /* last is same for 2k and 4k, because we use 4k if all 2k busy */
+ if (pbc & PBC_7322_VL15_SEND) {
+ first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
+ last = first;
+ } else {
+ if ((plen + 1) > dd->piosize2kmax_dwords)
+ first = dd->piobcnt2k;
+ else
+ first = 0;
+ last = dd->cspec->lastbuf_for_pio;
+ }
+ return qib_getsendbuf_range(dd, pbufnum, first, last);
+}
+
+static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
+ u32 start)
+{
+ qib_write_kreg_port(ppd, krp_psinterval, intv);
+ qib_write_kreg_port(ppd, krp_psstart, start);
+}
+
+/*
+ * Must be called with sdma_lock held, or before init finished.
+ */
+static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
+{
+ qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
+}
+
+static struct sdma_set_state_action sdma_7322_action_table[] = {
+ [qib_sdma_state_s00_hw_down] = {
+ .go_s99_running_tofalse = 1,
+ .op_enable = 0,
+ .op_intenable = 0,
+ .op_halt = 0,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s10_hw_start_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s20_idle] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s30_sw_clean_up_wait] = {
+ .op_enable = 0,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s40_hw_clean_up_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 0,
+ },
+ [qib_sdma_state_s50_hw_halt_wait] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 1,
+ .op_drain = 1,
+ },
+ [qib_sdma_state_s99_running] = {
+ .op_enable = 1,
+ .op_intenable = 1,
+ .op_halt = 0,
+ .op_drain = 0,
+ .go_s99_running_totrue = 1,
+ },
+};
+
+static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
+{
+ ppd->sdma_state.set_state_action = sdma_7322_action_table;
+}
+
+static int init_sdma_7322_regs(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned lastbuf, erstbuf;
+ u64 senddmabufmask[3] = { 0 };
+ int n, ret = 0;
+
+ qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
+ qib_sdma_7322_setlengen(ppd);
+ qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
+ qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
+ qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
+ qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
+
+ if (dd->num_pports)
+ n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
+ else
+ n = dd->cspec->sdmabufcnt; /* failsafe for init */
+ erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
+ ((dd->num_pports == 1 || ppd->port == 2) ? n :
+ dd->cspec->sdmabufcnt);
+ lastbuf = erstbuf + n;
+
+ ppd->sdma_state.first_sendbuf = erstbuf;
+ ppd->sdma_state.last_sendbuf = lastbuf;
+ for (; erstbuf < lastbuf; ++erstbuf) {
+ unsigned word = erstbuf / BITS_PER_LONG;
+ unsigned bit = erstbuf & (BITS_PER_LONG - 1);
+
+ BUG_ON(word >= 3);
+ senddmabufmask[word] |= 1ULL << bit;
+ }
+ qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
+ qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
+ qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
+ return ret;
+}
+
+/* sdma_lock must be held */
+static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int sane;
+ int use_dmahead;
+ u16 swhead;
+ u16 swtail;
+ u16 cnt;
+ u16 hwhead;
+
+ use_dmahead = __qib_sdma_running(ppd) &&
+ (dd->flags & QIB_HAS_SDMA_TIMEOUT);
+retry:
+ hwhead = use_dmahead ?
+ (u16) le64_to_cpu(*ppd->sdma_head_dma) :
+ (u16) qib_read_kreg_port(ppd, krp_senddmahead);
+
+ swhead = ppd->sdma_descq_head;
+ swtail = ppd->sdma_descq_tail;
+ cnt = ppd->sdma_descq_cnt;
+
+ if (swhead < swtail)
+ /* not wrapped */
+ sane = (hwhead >= swhead) & (hwhead <= swtail);
+ else if (swhead > swtail)
+ /* wrapped around */
+ sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
+ (hwhead <= swtail);
+ else
+ /* empty */
+ sane = (hwhead == swhead);
+
+ if (unlikely(!sane)) {
+ if (use_dmahead) {
+ /* try one more time, directly from the register */
+ use_dmahead = 0;
+ goto retry;
+ }
+ /* proceed as if no progress */
+ hwhead = swhead;
+ }
+
+ return hwhead;
+}
+
+static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
+{
+ u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
+
+ return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
+ (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
+ !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
+}
+
+/*
+ * Compute the amount of delay before sending the next packet if the
+ * port's send rate differs from the static rate set for the QP.
+ * The delay affects the next packet and the amount of the delay is
+ * based on the length of the this packet.
+ */
+static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
+ u8 srate, u8 vl)
+{
+ u8 snd_mult = ppd->delay_mult;
+ u8 rcv_mult = ib_rate_to_delay[srate];
+ u32 ret;
+
+ ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
+
+ /* Indicate VL15, else set the VL in the control word */
+ if (vl == 15)
+ ret |= PBC_7322_VL15_SEND_CTRL;
+ else
+ ret |= vl << PBC_VL_NUM_LSB;
+ ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
+
+ return ret;
+}
+
+/*
+ * Enable the per-port VL15 send buffers for use.
+ * They follow the rest of the buffers, without a config parameter.
+ * This was in initregs, but that is done before the shadow
+ * is set up, and this has to be done after the shadow is
+ * set up.
+ */
+static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
+{
+ unsigned vl15bufs;
+
+ vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
+ qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
+ TXCHK_CHG_TYPE_KERN, NULL);
+}
+
+static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
+{
+ if (rcd->ctxt < NUM_IB_PORTS) {
+ if (rcd->dd->num_pports > 1) {
+ rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
+ rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
+ } else {
+ rcd->rcvegrcnt = KCTXT0_EGRCNT;
+ rcd->rcvegr_tid_base = 0;
+ }
+ } else {
+ rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
+ rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
+ (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
+ }
+}
+
+#define QTXSLEEPS 5000
+static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
+ u32 len, u32 which, struct qib_ctxtdata *rcd)
+{
+ int i;
+ const int last = start + len - 1;
+ const int lastr = last / BITS_PER_LONG;
+ u32 sleeps = 0;
+ int wait = rcd != NULL;
+ unsigned long flags;
+
+ while (wait) {
+ unsigned long shadow;
+ int cstart, previ = -1;
+
+ /*
+ * when flipping from kernel to user, we can't change
+ * the checking type if the buffer is allocated to the
+ * driver. It's OK the other direction, because it's
+ * from close, and we have just disarm'ed all the
+ * buffers. All the kernel to kernel changes are also
+ * OK.
+ */
+ for (cstart = start; cstart <= last; cstart++) {
+ i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+ / BITS_PER_LONG;
+ if (i != previ) {
+ shadow = (unsigned long)
+ le64_to_cpu(dd->pioavailregs_dma[i]);
+ previ = i;
+ }
+ if (test_bit(((2 * cstart) +
+ QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
+ % BITS_PER_LONG, &shadow))
+ break;
+ }
+
+ if (cstart > last)
+ break;
+
+ if (sleeps == QTXSLEEPS)
+ break;
+ /* make sure we see an updated copy next time around */
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ sleeps++;
+ msleep(1);
+ }
+
+ switch (which) {
+ case TXCHK_CHG_TYPE_DIS1:
+ /*
+ * disable checking on a range; used by diags; just
+ * one buffer, but still written generically
+ */
+ for (i = start; i <= last; i++)
+ clear_bit(i, dd->cspec->sendchkenable);
+ break;
+
+ case TXCHK_CHG_TYPE_ENAB1:
+ /*
+ * (re)enable checking on a range; used by diags; just
+ * one buffer, but still written generically; read
+ * scratch to be sure buffer actually triggered, not
+ * just flushed from processor.
+ */
+ qib_read_kreg32(dd, kr_scratch);
+ for (i = start; i <= last; i++)
+ set_bit(i, dd->cspec->sendchkenable);
+ break;
+
+ case TXCHK_CHG_TYPE_KERN:
+ /* usable by kernel */
+ for (i = start; i <= last; i++) {
+ set_bit(i, dd->cspec->sendibchk);
+ clear_bit(i, dd->cspec->sendgrhchk);
+ }
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ /* see if we need to raise avail update threshold */
+ for (i = dd->first_user_ctxt;
+ dd->cspec->updthresh != dd->cspec->updthresh_dflt
+ && i < dd->cfgctxts; i++)
+ if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
+ ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
+ < dd->cspec->updthresh_dflt)
+ break;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ if (i == dd->cfgctxts) {
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ dd->cspec->updthresh = dd->cspec->updthresh_dflt;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld)) <<
+ SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ }
+ break;
+
+ case TXCHK_CHG_TYPE_USER:
+ /* for user process */
+ for (i = start; i <= last; i++) {
+ clear_bit(i, dd->cspec->sendibchk);
+ set_bit(i, dd->cspec->sendgrhchk);
+ }
+ spin_lock_irqsave(&dd->sendctrl_lock, flags);
+ if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
+ / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
+ dd->cspec->updthresh = (rcd->piocnt /
+ rcd->subctxt_cnt) - 1;
+ dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
+ dd->sendctrl |= (dd->cspec->updthresh &
+ SYM_RMASK(SendCtrl, AvailUpdThld))
+ << SYM_LSB(SendCtrl, AvailUpdThld);
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+ } else
+ spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
+ qib_write_kreg(dd, kr_sendcheckmask + i,
+ dd->cspec->sendchkenable[i]);
+
+ for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
+ qib_write_kreg(dd, kr_sendgrhcheckmask + i,
+ dd->cspec->sendgrhchk[i]);
+ qib_write_kreg(dd, kr_sendibpktmask + i,
+ dd->cspec->sendibchk[i]);
+ }
+
+ /*
+ * Be sure whatever we did was seen by the chip and acted upon,
+ * before we return. Mostly important for which >= 2.
+ */
+ qib_read_kreg32(dd, kr_scratch);
+}
+
+
+/* useful for trigger analyzers, etc. */
+static void writescratch(struct qib_devdata *dd, u32 val)
+{
+ qib_write_kreg(dd, kr_scratch, val);
+}
+
+/* Dummy for now, use chip regs soon */
+static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
+{
+ return -ENXIO;
+}
+
+/**
+ * qib_init_iba7322_funcs - set up the chip-specific function pointers
+ * @dev: the pci_dev for qlogic_ib device
+ * @ent: pci_device_id struct for this dev
+ *
+ * Also allocates, inits, and returns the devdata struct for this
+ * device instance
+ *
+ * This is global, and is called directly at init to set up the
+ * chip-specific function pointers for later use.
+ */
+struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct qib_devdata *dd;
+ int ret, i;
+ u32 tabsize, actual_cnt = 0;
+
+ dd = qib_alloc_devdata(pdev,
+ NUM_IB_PORTS * sizeof(struct qib_pportdata) +
+ sizeof(struct qib_chip_specific) +
+ NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
+ if (IS_ERR(dd))
+ goto bail;
+
+ dd->f_bringup_serdes = qib_7322_bringup_serdes;
+ dd->f_cleanup = qib_setup_7322_cleanup;
+ dd->f_clear_tids = qib_7322_clear_tids;
+ dd->f_free_irq = qib_7322_free_irq;
+ dd->f_get_base_info = qib_7322_get_base_info;
+ dd->f_get_msgheader = qib_7322_get_msgheader;
+ dd->f_getsendbuf = qib_7322_getsendbuf;
+ dd->f_gpio_mod = gpio_7322_mod;
+ dd->f_eeprom_wen = qib_7322_eeprom_wen;
+ dd->f_hdrqempty = qib_7322_hdrqempty;
+ dd->f_ib_updown = qib_7322_ib_updown;
+ dd->f_init_ctxt = qib_7322_init_ctxt;
+ dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
+ dd->f_intr_fallback = qib_7322_intr_fallback;
+ dd->f_late_initreg = qib_late_7322_initreg;
+ dd->f_setpbc_control = qib_7322_setpbc_control;
+ dd->f_portcntr = qib_portcntr_7322;
+ dd->f_put_tid = qib_7322_put_tid;
+ dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
+ dd->f_rcvctrl = rcvctrl_7322_mod;
+ dd->f_read_cntrs = qib_read_7322cntrs;
+ dd->f_read_portcntrs = qib_read_7322portcntrs;
+ dd->f_reset = qib_do_7322_reset;
+ dd->f_init_sdma_regs = init_sdma_7322_regs;
+ dd->f_sdma_busy = qib_sdma_7322_busy;
+ dd->f_sdma_gethead = qib_sdma_7322_gethead;
+ dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
+ dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
+ dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
+ dd->f_sendctrl = sendctrl_7322_mod;
+ dd->f_set_armlaunch = qib_set_7322_armlaunch;
+ dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
+ dd->f_iblink_state = qib_7322_iblink_state;
+ dd->f_ibphys_portstate = qib_7322_phys_portstate;
+ dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
+ dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
+ dd->f_set_ib_loopback = qib_7322_set_loopback;
+ dd->f_get_ib_table = qib_7322_get_ib_table;
+ dd->f_set_ib_table = qib_7322_set_ib_table;
+ dd->f_set_intr_state = qib_7322_set_intr_state;
+ dd->f_setextled = qib_setup_7322_setextled;
+ dd->f_txchk_change = qib_7322_txchk_change;
+ dd->f_update_usrhead = qib_update_7322_usrhead;
+ dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
+ dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
+ dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
+ dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
+ dd->f_sdma_init_early = qib_7322_sdma_init_early;
+ dd->f_writescratch = writescratch;
+ dd->f_tempsense_rd = qib_7322_tempsense_rd;
+ /*
+ * Do remaining PCIe setup and save PCIe values in dd.
+ * Any error printing is already done by the init code.
+ * On return, we have the chip mapped, but chip registers
+ * are not set up until start of qib_init_7322_variables.
+ */
+ ret = qib_pcie_ddinit(dd, pdev, ent);
+ if (ret < 0)
+ goto bail_free;
+
+ /* initialize chip-specific variables */
+ ret = qib_init_7322_variables(dd);
+ if (ret)
+ goto bail_cleanup;
+
+ if (qib_mini_init || !dd->num_pports)
+ goto bail;
+
+ /*
+ * Determine number of vectors we want; depends on port count
+ * and number of configured kernel receive queues actually used.
+ * Should also depend on whether sdma is enabled or not, but
+ * that's such a rare testing case it's not worth worrying about.
+ */
+ tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
+ for (i = 0; i < tabsize; i++)
+ if ((i < ARRAY_SIZE(irq_table) &&
+ irq_table[i].port <= dd->num_pports) ||
+ (i >= ARRAY_SIZE(irq_table) &&
+ dd->rcd[i - ARRAY_SIZE(irq_table)]))
+ actual_cnt++;
+ tabsize = actual_cnt;
+ dd->cspec->msix_entries = kmalloc(tabsize *
+ sizeof(struct msix_entry), GFP_KERNEL);
+ dd->cspec->msix_arg = kmalloc(tabsize *
+ sizeof(void *), GFP_KERNEL);
+ if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
+ qib_dev_err(dd, "No memory for MSIx table\n");
+ tabsize = 0;
+ }
+ for (i = 0; i < tabsize; i++)
+ dd->cspec->msix_entries[i].entry = i;
+
+ if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
+ qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
+ "continuing anyway\n");
+ /* may be less than we wanted, if not enough available */
+ dd->cspec->num_msix_entries = tabsize;
+
+ /* setup interrupt handler */
+ qib_setup_7322_interrupt(dd, 1);
+
+ /* clear diagctrl register, in case diags were running and crashed */
+ qib_write_kreg(dd, kr_hwdiagctrl, 0);
+
+ goto bail;
+
+bail_cleanup:
+ qib_pcie_ddcleanup(dd);
+bail_free:
+ qib_free_devdata(dd);
+ dd = ERR_PTR(ret);
+bail:
+ return dd;
+}
+
+/*
+ * Set the table entry at the specified index from the table specifed.
+ * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
+ * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
+ * 'idx' below addresses the correct entry, while its 4 LSBs select the
+ * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
+ */
+#define DDS_ENT_AMP_LSB 14
+#define DDS_ENT_MAIN_LSB 9
+#define DDS_ENT_POST_LSB 5
+#define DDS_ENT_PRE_XTRA_LSB 3
+#define DDS_ENT_PRE_LSB 0
+
+/*
+ * Set one entry in the TxDDS table for spec'd port
+ * ridx picks one of the entries, while tp points
+ * to the appropriate table entry.
+ */
+static void set_txdds(struct qib_pportdata *ppd, int ridx,
+ const struct txdds_ent *tp)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 pack_ent;
+ int regidx;
+
+ /* Get correct offset in chip-space, and in source table */
+ regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
+ /*
+ * We do not use qib_write_kreg_port() because it was intended
+ * only for registers in the lower "port specific" pages.
+ * So do index calculation by hand.
+ */
+ if (ppd->hw_pidx)
+ regidx += (dd->palign / sizeof(u64));
+
+ pack_ent = tp->amp << DDS_ENT_AMP_LSB;
+ pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
+ pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
+ pack_ent |= tp->post << DDS_ENT_POST_LSB;
+ qib_write_kreg(dd, regidx, pack_ent);
+ /* Prevent back-to-back writes by hitting scratch */
+ qib_write_kreg(ppd->dd, kr_scratch, 0);
+}
+
+static const struct vendor_txdds_ent vendor_txdds[] = {
+ { /* Amphenol 1m 30awg NoEq */
+ { 0x41, 0x50, 0x48 }, "584470002 ",
+ { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
+ },
+ { /* Amphenol 3m 28awg NoEq */
+ { 0x41, 0x50, 0x48 }, "584470004 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
+ },
+ { /* Finisar 3m OM2 Optical */
+ { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
+ { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
+ },
+ { /* Finisar 30m OM2 Optical */
+ { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
+ { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
+ },
+ { /* Finisar Default OM2 Optical */
+ { 0x00, 0x90, 0x65 }, NULL,
+ { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
+ },
+ { /* Gore 1m 30awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
+ },
+ { /* Gore 2m 30awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
+ },
+ { /* Gore 1m 28awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
+ },
+ { /* Gore 3m 28awg NoEq */
+ { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
+ { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
+ },
+ { /* Gore 5m 24awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
+ { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
+ },
+ { /* Gore 7m 24awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
+ { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
+ },
+ { /* Gore 5m 26awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
+ },
+ { /* Gore 7m 26awg Eq */
+ { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
+ { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
+ },
+ { /* Intersil 12m 24awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
+ { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
+ },
+ { /* Intersil 10m 28awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
+ },
+ { /* Intersil 7m 30awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
+ },
+ { /* Intersil 5m 32awg Active */
+ { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
+ },
+ { /* Intersil Default Active */
+ { 0x00, 0x30, 0xB4 }, NULL,
+ { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
+ },
+ { /* Luxtera 20m Active Optical */
+ { 0x00, 0x25, 0x63 }, NULL,
+ { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
+ },
+ { /* Molex 1M Cu loopback */
+ { 0x00, 0x09, 0x3A }, "74763-0025 ",
+ { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
+ },
+ { /* Molex 2m 28awg NoEq */
+ { 0x00, 0x09, 0x3A }, "74757-2201 ",
+ { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
+ },
+};
+
+static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 0, 0, 1 }, /* 2 dB */
+ { 0, 0, 0, 2 }, /* 3 dB */
+ { 0, 0, 0, 3 }, /* 4 dB */
+ { 0, 0, 0, 4 }, /* 5 dB */
+ { 0, 0, 0, 5 }, /* 6 dB */
+ { 0, 0, 0, 6 }, /* 7 dB */
+ { 0, 0, 0, 7 }, /* 8 dB */
+ { 0, 0, 0, 8 }, /* 9 dB */
+ { 0, 0, 0, 9 }, /* 10 dB */
+ { 0, 0, 0, 10 }, /* 11 dB */
+ { 0, 0, 0, 11 }, /* 12 dB */
+ { 0, 0, 0, 12 }, /* 13 dB */
+ { 0, 0, 0, 13 }, /* 14 dB */
+ { 0, 0, 0, 14 }, /* 15 dB */
+ { 0, 0, 0, 15 }, /* 16 dB */
+};
+
+static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 0, 0, 8 }, /* 2 dB */
+ { 0, 0, 0, 8 }, /* 3 dB */
+ { 0, 0, 0, 9 }, /* 4 dB */
+ { 0, 0, 0, 9 }, /* 5 dB */
+ { 0, 0, 0, 10 }, /* 6 dB */
+ { 0, 0, 0, 10 }, /* 7 dB */
+ { 0, 0, 0, 11 }, /* 8 dB */
+ { 0, 0, 0, 11 }, /* 9 dB */
+ { 0, 0, 0, 12 }, /* 10 dB */
+ { 0, 0, 0, 12 }, /* 11 dB */
+ { 0, 0, 0, 13 }, /* 12 dB */
+ { 0, 0, 0, 13 }, /* 13 dB */
+ { 0, 0, 0, 14 }, /* 14 dB */
+ { 0, 0, 0, 14 }, /* 15 dB */
+ { 0, 0, 0, 15 }, /* 16 dB */
+};
+
+static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
+ /* amp, pre, main, post */
+ { 2, 2, 15, 6 }, /* Loopback */
+ { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
+ { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
+ { 0, 1, 0, 11 }, /* 4 dB */
+ { 0, 1, 0, 13 }, /* 5 dB */
+ { 0, 1, 0, 15 }, /* 6 dB */
+ { 0, 1, 3, 15 }, /* 7 dB */
+ { 0, 1, 7, 15 }, /* 8 dB */
+ { 0, 1, 7, 15 }, /* 9 dB */
+ { 0, 1, 8, 15 }, /* 10 dB */
+ { 0, 1, 9, 15 }, /* 11 dB */
+ { 0, 1, 10, 15 }, /* 12 dB */
+ { 0, 2, 6, 15 }, /* 13 dB */
+ { 0, 2, 7, 15 }, /* 14 dB */
+ { 0, 2, 8, 15 }, /* 15 dB */
+ { 0, 2, 9, 15 }, /* 16 dB */
+};
+
+/*
+ * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
+ * These are mostly used for mez cards going through connectors
+ * and backplane traces, but can be used to add other "unusual"
+ * table values as well.
+ */
+static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
+};
+
+static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
+};
+
+static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
+ /* amp, pre, main, post */
+ { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
+ { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
+ { 0, 1, 12, 10 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 11 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 12 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 14 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 6 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 7 }, /* QME7342 backplane setting */
+ { 0, 1, 12, 8 }, /* QME7342 backplane setting */
+};
+
+static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
+ unsigned atten)
+{
+ /*
+ * The attenuation table starts at 2dB for entry 1,
+ * with entry 0 being the loopback entry.
+ */
+ if (atten <= 2)
+ atten = 1;
+ else if (atten > TXDDS_TABLE_SZ)
+ atten = TXDDS_TABLE_SZ - 1;
+ else
+ atten--;
+ return txdds + atten;
+}
+
+/*
+ * if override is set, the module parameter txselect has a value
+ * for this specific port, so use it, rather than our normal mechanism.
+ */
+static void find_best_ent(struct qib_pportdata *ppd,
+ const struct txdds_ent **sdr_dds,
+ const struct txdds_ent **ddr_dds,
+ const struct txdds_ent **qdr_dds, int override)
+{
+ struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
+ int idx;
+
+ /* Search table of known cables */
+ for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
+ const struct vendor_txdds_ent *v = vendor_txdds + idx;
+
+ if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
+ (!v->partnum ||
+ !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
+ *sdr_dds = &v->sdr;
+ *ddr_dds = &v->ddr;
+ *qdr_dds = &v->qdr;
+ return;
+ }
+ }
+
+ /* Lookup serdes setting by cable type and attenuation */
+ if (!override && QSFP_IS_ACTIVE(qd->tech)) {
+ *sdr_dds = txdds_sdr + ppd->dd->board_atten;
+ *ddr_dds = txdds_ddr + ppd->dd->board_atten;
+ *qdr_dds = txdds_qdr + ppd->dd->board_atten;
+ return;
+ }
+
+ if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
+ qd->atten[1])) {
+ *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
+ *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
+ *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
+ return;
+ } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
+ /*
+ * If we have no (or incomplete) data from the cable
+ * EEPROM, or no QSFP, or override is set, use the
+ * module parameter value to index into the attentuation
+ * table.
+ */
+ idx = ppd->cpspec->no_eep;
+ *sdr_dds = &txdds_sdr[idx];
+ *ddr_dds = &txdds_ddr[idx];
+ *qdr_dds = &txdds_qdr[idx];
+ } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+ /* similar to above, but index into the "extra" table. */
+ idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
+ *sdr_dds = &txdds_extra_sdr[idx];
+ *ddr_dds = &txdds_extra_ddr[idx];
+ *qdr_dds = &txdds_extra_qdr[idx];
+ } else {
+ /* this shouldn't happen, it's range checked */
+ *sdr_dds = txdds_sdr + qib_long_atten;
+ *ddr_dds = txdds_ddr + qib_long_atten;
+ *qdr_dds = txdds_qdr + qib_long_atten;
+ }
+}
+
+static void init_txdds_table(struct qib_pportdata *ppd, int override)
+{
+ const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
+ struct txdds_ent *dds;
+ int idx;
+ int single_ent = 0;
+
+ find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
+
+ /* for mez cards or override, use the selected value for all entries */
+ if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
+ single_ent = 1;
+
+ /* Fill in the first entry with the best entry found. */
+ set_txdds(ppd, 0, sdr_dds);
+ set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
+ set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
+ if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE)) {
+ dds = (struct txdds_ent *)(ppd->link_speed_active ==
+ QIB_IB_QDR ? qdr_dds :
+ (ppd->link_speed_active ==
+ QIB_IB_DDR ? ddr_dds : sdr_dds));
+ write_tx_serdes_param(ppd, dds);
+ }
+
+ /* Fill in the remaining entries with the default table values. */
+ for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
+ set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
+ set_txdds(ppd, idx + TXDDS_TABLE_SZ,
+ single_ent ? ddr_dds : txdds_ddr + idx);
+ set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
+ single_ent ? qdr_dds : txdds_qdr + idx);
+ }
+}
+
+#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
+#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
+#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
+#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
+#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
+#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
+#define AHB_TRANS_TRIES 10
+
+/*
+ * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
+ * 5=subsystem which is why most calls have "chan + chan >> 1"
+ * for the channel argument.
+ */
+static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
+ u32 data, u32 mask)
+{
+ u32 rd_data, wr_data, sz_mask;
+ u64 trans, acc, prev_acc;
+ u32 ret = 0xBAD0BAD;
+ int tries;
+
+ prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
+ /* From this point on, make sure we return access */
+ acc = (quad << 1) | 1;
+ qib_write_kreg(dd, KR_AHB_ACC, acc);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
+ goto bail;
+ }
+
+ /* If mask is not all 1s, we need to read, but different SerDes
+ * entities have different sizes
+ */
+ sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
+ wr_data = data & mask & sz_mask;
+ if ((~mask & sz_mask) != 0) {
+ trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+ qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
+ AHB_TRANS_TRIES);
+ goto bail;
+ }
+ /* Re-read in case host split reads and read data first */
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
+ wr_data |= (rd_data & ~mask & sz_mask);
+ }
+
+ /* If mask is not zero, we need to write. */
+ if (mask & sz_mask) {
+ trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
+ trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
+ trans |= AHB_WR;
+ qib_write_kreg(dd, KR_AHB_TRANS, trans);
+
+ for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
+ trans = qib_read_kreg64(dd, KR_AHB_TRANS);
+ if (trans & AHB_TRANS_RDY)
+ break;
+ }
+ if (tries >= AHB_TRANS_TRIES) {
+ qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
+ AHB_TRANS_TRIES);
+ goto bail;
+ }
+ }
+ ret = wr_data;
+bail:
+ qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
+ return ret;
+}
+
+static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
+ unsigned mask)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int chan;
+ u32 rbc;
+
+ for (chan = 0; chan < SERDES_CHANS; ++chan) {
+ ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
+ data, mask);
+ rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ addr, 0, 0);
+ }
+}
+
+static int serdes_7322_init(struct qib_pportdata *ppd)
+{
+ u64 data;
+ u32 le_val;
+
+ /*
+ * Initialize the Tx DDS tables. Also done every QSFP event,
+ * for adapters with QSFP
+ */
+ init_txdds_table(ppd, 0);
+
+ /* ensure no tx overrides from earlier driver loads */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
+ /* Patch some SerDes defaults to "Better for IB" */
+ /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
+ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+
+ /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+ ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+ /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
+ ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
+
+ /* May be overridden in qsfp_7322_event */
+ le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+ ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+
+ /* enable LE1 adaptation for all but QME, which is disabled */
+ le_val = IS_QME(ppd->dd) ? 0 : 1;
+ ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
+
+ /* Clear cmode-override, may be set from older driver */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+ /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
+ ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
+
+ /* setup LoS params; these are subsystem, so chan == 5 */
+ /* LoS filter threshold_count on, ch 0-3, set to 8 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+ /* LoS filter threshold_count off, ch 0-3, set to 4 */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+ /* LoS filter select enabled */
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+ /* LoS target data: SDR=4, DDR=2, QDR=1 */
+ ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+ ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+ ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+ data = qib_read_kreg_port(ppd, krp_serdesctrl);
+ qib_write_kreg_port(ppd, krp_serdesctrl, data |
+ SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+
+ /* rxbistena; set 0 to avoid effects of it switch later */
+ ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
+
+ /* Configure 4 DFE taps, and only they adapt */
+ ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
+
+ /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+ le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+ ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+
+ /*
+ * Set receive adaptation mode. SDR and DDR adaptation are
+ * always on, and QDR is initially enabled; later disabled.
+ */
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+ qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+ ppd->dd->cspec->r1 ?
+ QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+ ppd->cpspec->qdr_dfe_on = 1;
+
+ /* FLoop LOS gate: PPM filter enabled */
+ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+ /* rx offset center enabled */
+ ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
+
+ if (!ppd->dd->cspec->r1) {
+ ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
+ ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
+ }
+
+ /* Set the frequency loop bandwidth to 15 */
+ ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
+
+ return 0;
+}
+
+/* start adjust QMH serdes parameters */
+
+static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
+{
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 9, code << 9, 0x3f << 9);
+}
+
+static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
+ int enable, u32 tapenable)
+{
+ if (enable)
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 1, 3 << 10, 0x1f << 10);
+ else
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 1, 0, 0x1f << 10);
+}
+
+/* Set clock to 1, 0, 1, 0 */
+static void clock_man(struct qib_pportdata *ppd, int chan)
+{
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0x4000, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0x4000, 0x4000);
+ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
+ 4, 0, 0x4000);
+}
+
+/*
+ * write the current Tx serdes pre,post,main,amp settings into the serdes.
+ * The caller must pass the settings appropriate for the current speed,
+ * or not care if they are correct for the current speed.
+ */
+static void write_tx_serdes_param(struct qib_pportdata *ppd,
+ struct txdds_ent *txdds)
+{
+ u64 deemph;
+
+ deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
+ /* field names for amp, main, post, pre, respectively */
+ deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
+
+ deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ tx_override_deemphasis_select);
+ deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txampcntl_d2a);
+ deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txc0_ena);
+ deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcp1_ena);
+ deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ txcn1_ena);
+ qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
+}
+
+/*
+ * Set the parameters for mez cards on link bounce, so they are
+ * always exactly what was requested. Similar logic to init_txdds
+ * but does just the serdes.
+ */
+static void adj_tx_serdes(struct qib_pportdata *ppd)
+{
+ const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
+ struct txdds_ent *dds;
+
+ find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
+ dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
+ qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
+ ddr_dds : sdr_dds));
+ write_tx_serdes_param(ppd, dds);
+}
+
+/* set QDR forced value for H1, if needed */
+static void force_h1(struct qib_pportdata *ppd)
+{
+ int chan;
+
+ ppd->cpspec->qdr_reforce = 0;
+ if (!ppd->dd->cspec->r1)
+ return;
+
+ for (chan = 0; chan < SERDES_CHANS; chan++) {
+ set_man_mode_h1(ppd, chan, 1, 0);
+ set_man_code(ppd, chan, ppd->cpspec->h1_val);
+ clock_man(ppd, chan);
+ set_man_mode_h1(ppd, chan, 0, 0);
+ }
+}
+
+#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
+#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
+
+#define R_OPCODE_LSB 3
+#define R_OP_NOP 0
+#define R_OP_SHIFT 2
+#define R_OP_UPDATE 3
+#define R_TDI_LSB 2
+#define R_TDO_LSB 1
+#define R_RDY 1
+
+static int qib_r_grab(struct qib_devdata *dd)
+{
+ u64 val;
+ val = SJA_EN;
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ return 0;
+}
+
+/* qib_r_wait_for_rdy() not only waits for the ready bit, it
+ * returns the current state of R_TDO
+ */
+static int qib_r_wait_for_rdy(struct qib_devdata *dd)
+{
+ u64 val;
+ int timeout;
+ for (timeout = 0; timeout < 100 ; ++timeout) {
+ val = qib_read_kreg32(dd, kr_r_access);
+ if (val & R_RDY)
+ return (val >> R_TDO_LSB) & 1;
+ }
+ return -1;
+}
+
+static int qib_r_shift(struct qib_devdata *dd, int bisten,
+ int len, u8 *inp, u8 *outp)
+{
+ u64 valbase, val;
+ int ret, pos;
+
+ valbase = SJA_EN | (bisten << BISTEN_LSB) |
+ (R_OP_SHIFT << R_OPCODE_LSB);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret < 0)
+ goto bail;
+ for (pos = 0; pos < len; ++pos) {
+ val = valbase;
+ if (outp) {
+ outp[pos >> 3] &= ~(1 << (pos & 7));
+ outp[pos >> 3] |= (ret << (pos & 7));
+ }
+ if (inp) {
+ int tdi = inp[pos >> 3] >> (pos & 7);
+ val |= ((tdi & 1) << R_TDI_LSB);
+ }
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret < 0)
+ break;
+ }
+ /* Restore to NOP between operations. */
+ val = SJA_EN | (bisten << BISTEN_LSB);
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ ret = qib_r_wait_for_rdy(dd);
+
+ if (ret >= 0)
+ ret = pos;
+bail:
+ return ret;
+}
+
+static int qib_r_update(struct qib_devdata *dd, int bisten)
+{
+ u64 val;
+ int ret;
+
+ val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
+ ret = qib_r_wait_for_rdy(dd);
+ if (ret >= 0) {
+ qib_write_kreg(dd, kr_r_access, val);
+ qib_read_kreg32(dd, kr_scratch);
+ }
+ return ret;
+}
+
+#define BISTEN_PORT_SEL 15
+#define LEN_PORT_SEL 625
+#define BISTEN_AT 17
+#define LEN_AT 156
+#define BISTEN_ETM 16
+#define LEN_ETM 632
+
+#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+
+/* these are common for all IB port use cases. */
+static u8 reset_at[BIT2BYTE(LEN_AT)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
+ 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
+ 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
+ 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
+ 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+};
+static u8 at[BIT2BYTE(LEN_AT)] = {
+ 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+};
+
+/* used for IB1 or IB2, only one in use */
+static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
+ 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
+ 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
+ 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
+ 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB1 is in use */
+static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* used when only IB2 is in use */
+static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
+ 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
+ 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+ 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
+ 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
+};
+
+/* used when both IB1 and IB2 are in use */
+static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
+ 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
+ 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
+ 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/*
+ * Do setup to properly handle IB link recovery; if port is zero, we
+ * are initializing to cover both ports; otherwise we are initializing
+ * to cover a single port card, or the port has reached INIT and we may
+ * need to switch coverage types.
+ */
+static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
+{
+ u8 *portsel, *etm;
+ struct qib_devdata *dd = ppd->dd;
+
+ if (!ppd->dd->cspec->r1)
+ return;
+ if (!both) {
+ dd->cspec->recovery_ports_initted++;
+ ppd->cpspec->recovery_init = 1;
+ }
+ if (!both && dd->cspec->recovery_ports_initted == 1) {
+ portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
+ etm = atetm_1port;
+ } else {
+ portsel = portsel_2port;
+ etm = atetm_2port;
+ }
+
+ if (qib_r_grab(dd) < 0 ||
+ qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_ETM) < 0 ||
+ qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_AT) < 0 ||
+ qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
+ portsel, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
+ qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_AT) < 0 ||
+ qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
+ qib_r_update(dd, BISTEN_ETM) < 0)
+ qib_dev_err(dd, "Failed IB link recovery setup\n");
+}
+
+static void check_7322_rxe_status(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u64 fmask;
+
+ if (dd->cspec->recovery_ports_initted != 1)
+ return; /* rest doesn't apply to dualport */
+ qib_write_kreg(dd, kr_control, dd->control |
+ SYM_MASK(Control, FreezeMode));
+ (void)qib_read_kreg64(dd, kr_scratch);
+ udelay(3); /* ibcreset asserted 400ns, be sure that's over */
+ fmask = qib_read_kreg64(dd, kr_act_fmask);
+ if (!fmask) {
+ /*
+ * require a powercycle before we'll work again, and make
+ * sure we get no more interrupts, and don't turn off
+ * freeze.
+ */
+ ppd->dd->cspec->stay_in_freeze = 1;
+ qib_7322_set_intr_state(ppd->dd, 0);
+ qib_write_kreg(dd, kr_fmask, 0ULL);
+ qib_dev_err(dd, "HCA unusable until powercycled\n");
+ return; /* eventually reset */
+ }
+
+ qib_write_kreg(ppd->dd, kr_hwerrclear,
+ SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
+
+ /* don't do the full clear_freeze(), not needed for this */
+ qib_write_kreg(dd, kr_control, dd->control);
+ qib_read_kreg32(dd, kr_scratch);
+ /* take IBC out of reset */
+ if (ppd->link_speed_supported) {
+ ppd->cpspec->ibcctrl_a &=
+ ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a,
+ ppd->cpspec->ibcctrl_a);
+ qib_read_kreg32(dd, kr_scratch);
+ if (ppd->lflags & QIBL_IB_LINK_DISABLED)
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
new file mode 100644
index 000000000000..9b40f345ac3f
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -0,0 +1,1586 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/*
+ * min buffers we want to have per context, after driver
+ */
+#define QIB_MIN_USER_CTXT_BUFCNT 7
+
+#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
+#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
+#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
+
+/*
+ * Number of ctxts we are configured to use (to allow for more pio
+ * buffers per ctxt, etc.) Zero means use chip value.
+ */
+ushort qib_cfgctxts;
+module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
+MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
+
+/*
+ * If set, do not write to any regs if avoidable, hack to allow
+ * check for deranged default register values.
+ */
+ushort qib_mini_init;
+module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
+MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
+
+unsigned qib_n_krcv_queues;
+module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
+
+/*
+ * qib_wc_pat parameter:
+ * 0 is WC via MTRR
+ * 1 is WC via PAT
+ * If PAT initialization fails, code reverts back to MTRR
+ */
+unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
+module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
+MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
+
+struct workqueue_struct *qib_wq;
+struct workqueue_struct *qib_cq_wq;
+
+static void verify_interrupt(unsigned long);
+
+static struct idr qib_unit_table;
+u32 qib_cpulist_count;
+unsigned long *qib_cpulist;
+
+/* set number of contexts we'll actually use */
+void qib_set_ctxtcnt(struct qib_devdata *dd)
+{
+ if (!qib_cfgctxts)
+ dd->cfgctxts = dd->ctxtcnt;
+ else if (qib_cfgctxts < dd->num_pports)
+ dd->cfgctxts = dd->ctxtcnt;
+ else if (qib_cfgctxts <= dd->ctxtcnt)
+ dd->cfgctxts = qib_cfgctxts;
+ else
+ dd->cfgctxts = dd->ctxtcnt;
+}
+
+/*
+ * Common code for creating the receive context array.
+ */
+int qib_create_ctxts(struct qib_devdata *dd)
+{
+ unsigned i;
+ int ret;
+
+ /*
+ * Allocate full ctxtcnt array, rather than just cfgctxts, because
+ * cleanup iterates across all possible ctxts.
+ */
+ dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+ if (!dd->rcd) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata array, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* create (one or more) kctxt */
+ for (i = 0; i < dd->first_user_ctxt; ++i) {
+ struct qib_pportdata *ppd;
+ struct qib_ctxtdata *rcd;
+
+ if (dd->skip_kctxt_mask & (1 << i))
+ continue;
+
+ ppd = dd->pport + (i % dd->num_pports);
+ rcd = qib_create_ctxtdata(ppd, i);
+ if (!rcd) {
+ qib_dev_err(dd, "Unable to allocate ctxtdata"
+ " for Kernel ctxt, failing\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
+ rcd->seq_cnt = 1;
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+/*
+ * Common code for user and kernel context setup.
+ */
+struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+
+ rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+ if (rcd) {
+ INIT_LIST_HEAD(&rcd->qp_wait_list);
+ rcd->ppd = ppd;
+ rcd->dd = dd;
+ rcd->cnt = 1;
+ rcd->ctxt = ctxt;
+ dd->rcd[ctxt] = rcd;
+
+ dd->f_init_ctxt(rcd);
+
+ /*
+ * To avoid wasting a lot of memory, we allocate 32KB chunks
+ * of physically contiguous memory, advance through it until
+ * used up and then allocate more. Of course, we need
+ * memory to store those extra pointers, now. 32KB seems to
+ * be the most that is "safe" under memory pressure
+ * (creating large files and then copying them over
+ * NFS while doing lots of MPI jobs). The OOM killer can
+ * get invoked, even though we say we can sleep and this can
+ * cause significant system problems....
+ */
+ rcd->rcvegrbuf_size = 0x8000;
+ rcd->rcvegrbufs_perchunk =
+ rcd->rcvegrbuf_size / dd->rcvegrbufsize;
+ rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
+ rcd->rcvegrbufs_perchunk - 1) /
+ rcd->rcvegrbufs_perchunk;
+ }
+ return rcd;
+}
+
+/*
+ * Common code for initializing the physical port structure.
+ */
+void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
+ u8 hw_pidx, u8 port)
+{
+ ppd->dd = dd;
+ ppd->hw_pidx = hw_pidx;
+ ppd->port = port; /* IB port number, not index */
+
+ spin_lock_init(&ppd->sdma_lock);
+ spin_lock_init(&ppd->lflags_lock);
+ init_waitqueue_head(&ppd->state_wait);
+
+ init_timer(&ppd->symerr_clear_timer);
+ ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
+ ppd->symerr_clear_timer.data = (unsigned long)ppd;
+}
+
+static int init_pioavailregs(struct qib_devdata *dd)
+{
+ int ret, pidx;
+ u64 *status_page;
+
+ dd->pioavailregs_dma = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
+ GFP_KERNEL);
+ if (!dd->pioavailregs_dma) {
+ qib_dev_err(dd, "failed to allocate PIOavail reg area "
+ "in memory\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * We really want L2 cache aligned, but for current CPUs of
+ * interest, they are the same.
+ */
+ status_page = (u64 *)
+ ((char *) dd->pioavailregs_dma +
+ ((2 * L1_CACHE_BYTES +
+ dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
+ /* device status comes first, for backwards compatibility */
+ dd->devstatusp = status_page;
+ *status_page++ = 0;
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ dd->pport[pidx].statusp = status_page;
+ *status_page++ = 0;
+ }
+
+ /*
+ * Setup buffer to hold freeze and other messages, accessible to
+ * apps, following statusp. This is per-unit, not per port.
+ */
+ dd->freezemsg = (char *) status_page;
+ *dd->freezemsg = 0;
+ /* length of msg buffer is "whatever is left" */
+ ret = (char *) status_page - (char *) dd->pioavailregs_dma;
+ dd->freezelen = PAGE_SIZE - ret;
+
+ ret = 0;
+
+done:
+ return ret;
+}
+
+/**
+ * init_shadow_tids - allocate the shadow TID array
+ * @dd: the qlogic_ib device
+ *
+ * allocate the shadow TID array, so we can qib_munlock previous
+ * entries. It may make more sense to move the pageshadow to the
+ * ctxt data structure, so we only allocate memory for ctxts actually
+ * in use, since we at 8k per ctxt, now.
+ * We don't want failures here to prevent use of the driver/chip,
+ * so no return value.
+ */
+static void init_shadow_tids(struct qib_devdata *dd)
+{
+ struct page **pages;
+ dma_addr_t *addrs;
+
+ pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+ if (!pages) {
+ qib_dev_err(dd, "failed to allocate shadow page * "
+ "array, no expected sends!\n");
+ goto bail;
+ }
+
+ addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+ if (!addrs) {
+ qib_dev_err(dd, "failed to allocate shadow dma handle "
+ "array, no expected sends!\n");
+ goto bail_free;
+ }
+
+ memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
+ memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
+
+ dd->pageshadow = pages;
+ dd->physshadow = addrs;
+ return;
+
+bail_free:
+ vfree(pages);
+bail:
+ dd->pageshadow = NULL;
+}
+
+/*
+ * Do initialization for device that is only needed on
+ * first detect, not on resets.
+ */
+static int loadtime_init(struct qib_devdata *dd)
+{
+ int ret = 0;
+
+ if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
+ QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
+ qib_dev_err(dd, "Driver only handles version %d, "
+ "chip swversion is %d (%llx), failng\n",
+ QIB_CHIP_SWVERSION,
+ (int)(dd->revision >>
+ QLOGIC_IB_R_SOFTWARE_SHIFT) &
+ QLOGIC_IB_R_SOFTWARE_MASK,
+ (unsigned long long) dd->revision);
+ ret = -ENOSYS;
+ goto done;
+ }
+
+ if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
+ qib_devinfo(dd->pcidev, "%s", dd->boardversion);
+
+ spin_lock_init(&dd->pioavail_lock);
+ spin_lock_init(&dd->sendctrl_lock);
+ spin_lock_init(&dd->uctxt_lock);
+ spin_lock_init(&dd->qib_diag_trans_lock);
+ spin_lock_init(&dd->eep_st_lock);
+ mutex_init(&dd->eep_lock);
+
+ if (qib_mini_init)
+ goto done;
+
+ ret = init_pioavailregs(dd);
+ init_shadow_tids(dd);
+
+ qib_get_eeprom_info(dd);
+
+ /* setup time (don't start yet) to verify we got interrupt */
+ init_timer(&dd->intrchk_timer);
+ dd->intrchk_timer.function = verify_interrupt;
+ dd->intrchk_timer.data = (unsigned long) dd;
+
+done:
+ return ret;
+}
+
+/**
+ * init_after_reset - re-initialize after a reset
+ * @dd: the qlogic_ib device
+ *
+ * sanity check at least some of the values after reset, and
+ * ensure no receive or transmit (explictly, in case reset
+ * failed
+ */
+static int init_after_reset(struct qib_devdata *dd)
+{
+ int i;
+
+ /*
+ * Ensure chip does no sends or receives, tail updates, or
+ * pioavail updates while we re-initialize. This is mostly
+ * for the driver data structures, not chip registers.
+ */
+ for (i = 0; i < dd->num_pports; ++i) {
+ /*
+ * ctxt == -1 means "all contexts". Only really safe for
+ * _dis_abling things, as here.
+ */
+ dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS |
+ QIB_RCVCTRL_TAILUPD_DIS, -1);
+ /* Redundant across ports for some, but no big deal. */
+ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
+ QIB_SENDCTRL_AVAIL_DIS);
+ }
+
+ return 0;
+}
+
+static void enable_chip(struct qib_devdata *dd)
+{
+ u64 rcvmask;
+ int i;
+
+ /*
+ * Enable PIO send, and update of PIOavail regs to memory.
+ */
+ for (i = 0; i < dd->num_pports; ++i)
+ dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
+ QIB_SENDCTRL_AVAIL_ENB);
+ /*
+ * Enable kernel ctxts' receive and receive interrupt.
+ * Other ctxts done as user opens and inits them.
+ */
+ rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
+ rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
+ QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+ struct qib_ctxtdata *rcd = dd->rcd[i];
+
+ if (rcd)
+ dd->f_rcvctrl(rcd->ppd, rcvmask, i);
+ }
+}
+
+static void verify_interrupt(unsigned long opaque)
+{
+ struct qib_devdata *dd = (struct qib_devdata *) opaque;
+
+ if (!dd)
+ return; /* being torn down */
+
+ /*
+ * If we don't have a lid or any interrupts, let the user know and
+ * don't bother checking again.
+ */
+ if (dd->int_counter == 0) {
+ if (!dd->f_intr_fallback(dd))
+ dev_err(&dd->pcidev->dev, "No interrupts detected, "
+ "not usable.\n");
+ else /* re-arm the timer to see if fallback works */
+ mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+ }
+}
+
+static void init_piobuf_state(struct qib_devdata *dd)
+{
+ int i, pidx;
+ u32 uctxts;
+
+ /*
+ * Ensure all buffers are free, and fifos empty. Buffers
+ * are common, so only do once for port 0.
+ *
+ * After enable and qib_chg_pioavailkernel so we can safely
+ * enable pioavail updates and PIOENABLE. After this, packets
+ * are ready and able to go out.
+ */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
+
+ /*
+ * If not all sendbufs are used, add the one to each of the lower
+ * numbered contexts. pbufsctxt and lastctxt_piobuf are
+ * calculated in chip-specific code because it may cause some
+ * chip-specific adjustments to be made.
+ */
+ uctxts = dd->cfgctxts - dd->first_user_ctxt;
+ dd->ctxts_extrabuf = dd->pbufsctxt ?
+ dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
+
+ /*
+ * Set up the shadow copies of the piobufavail registers,
+ * which we compare against the chip registers for now, and
+ * the in memory DMA'ed copies of the registers.
+ * By now pioavail updates to memory should have occurred, so
+ * copy them into our working/shadow registers; this is in
+ * case something went wrong with abort, but mostly to get the
+ * initial values of the generation bit correct.
+ */
+ for (i = 0; i < dd->pioavregs; i++) {
+ __le64 tmp;
+
+ tmp = dd->pioavailregs_dma[i];
+ /*
+ * Don't need to worry about pioavailkernel here
+ * because we will call qib_chg_pioavailkernel() later
+ * in initialization, to busy out buffers as needed.
+ */
+ dd->pioavailshadow[i] = le64_to_cpu(tmp);
+ }
+ while (i < ARRAY_SIZE(dd->pioavailshadow))
+ dd->pioavailshadow[i++] = 0; /* for debugging sanity */
+
+ /* after pioavailshadow is setup */
+ qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
+ TXCHK_CHG_TYPE_KERN, NULL);
+ dd->f_initvl15_bufs(dd);
+}
+
+/**
+ * qib_init - do the actual initialization sequence on the chip
+ * @dd: the qlogic_ib device
+ * @reinit: reinitializing, so don't allocate new memory
+ *
+ * Do the actual initialization sequence on the chip. This is done
+ * both from the init routine called from the PCI infrastructure, and
+ * when we reset the chip, or detect that it was reset internally,
+ * or it's administratively re-enabled.
+ *
+ * Memory allocation here and in called routines is only done in
+ * the first case (reinit == 0). We have to be careful, because even
+ * without memory allocation, we need to re-write all the chip registers
+ * TIDs, etc. after the reset or enable has completed.
+ */
+int qib_init(struct qib_devdata *dd, int reinit)
+{
+ int ret = 0, pidx, lastfail = 0;
+ u32 portok = 0;
+ unsigned i;
+ struct qib_ctxtdata *rcd;
+ struct qib_pportdata *ppd;
+ unsigned long flags;
+
+ /* Set linkstate to unknown, so we can watch for a transition. */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
+ QIBL_LINKDOWN | QIBL_LINKINIT |
+ QIBL_LINKV);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+
+ if (reinit)
+ ret = init_after_reset(dd);
+ else
+ ret = loadtime_init(dd);
+ if (ret)
+ goto done;
+
+ /* Bypass most chip-init, to get to device creation */
+ if (qib_mini_init)
+ return 0;
+
+ ret = dd->f_late_initreg(dd);
+ if (ret)
+ goto done;
+
+ /* dd->rcd can be NULL if early init failed */
+ for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
+ /*
+ * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
+ * re-init, the simplest way to handle this is to free
+ * existing, and re-allocate.
+ * Need to re-create rest of ctxt 0 ctxtdata as well.
+ */
+ rcd = dd->rcd[i];
+ if (!rcd)
+ continue;
+
+ lastfail = qib_create_rcvhdrq(dd, rcd);
+ if (!lastfail)
+ lastfail = qib_setup_eagerbufs(rcd);
+ if (lastfail) {
+ qib_dev_err(dd, "failed to allocate kernel ctxt's "
+ "rcvhdrq and/or egr bufs\n");
+ continue;
+ }
+ }
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ int mtu;
+ if (lastfail)
+ ret = lastfail;
+ ppd = dd->pport + pidx;
+ mtu = ib_mtu_enum_to_int(qib_ibmtu);
+ if (mtu == -1) {
+ mtu = QIB_DEFAULT_MTU;
+ qib_ibmtu = 0; /* don't leave invalid value */
+ }
+ /* set max we can ever have for this driver load */
+ ppd->init_ibmaxlen = min(mtu > 2048 ?
+ dd->piosize4k : dd->piosize2k,
+ dd->rcvegrbufsize +
+ (dd->rcvhdrentsize << 2));
+ /*
+ * Have to initialize ibmaxlen, but this will normally
+ * change immediately in qib_set_mtu().
+ */
+ ppd->ibmaxlen = ppd->init_ibmaxlen;
+ qib_set_mtu(ppd, mtu);
+
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+
+ lastfail = dd->f_bringup_serdes(ppd);
+ if (lastfail) {
+ qib_devinfo(dd->pcidev,
+ "Failed to bringup IB port %u\n", ppd->port);
+ lastfail = -ENETDOWN;
+ continue;
+ }
+
+ /* let link come up, and enable IBC */
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ portok++;
+ }
+
+ if (!portok) {
+ /* none of the ports initialized */
+ if (!ret && lastfail)
+ ret = lastfail;
+ else if (!ret)
+ ret = -ENETDOWN;
+ /* but continue on, so we can debug cause */
+ }
+
+ enable_chip(dd);
+
+ init_piobuf_state(dd);
+
+done:
+ if (!ret) {
+ /* chip is OK for user apps; mark it as initialized */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ /*
+ * Set status even if port serdes is not initialized
+ * so that diags will work.
+ */
+ *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
+ QIB_STATUS_INITTED;
+ if (!ppd->link_speed_enabled)
+ continue;
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ ret = qib_setup_sdma(ppd);
+ init_timer(&ppd->hol_timer);
+ ppd->hol_timer.function = qib_hol_event;
+ ppd->hol_timer.data = (unsigned long)ppd;
+ ppd->hol_state = QIB_HOL_UP;
+ }
+
+ /* now we can enable all interrupts from the chip */
+ dd->f_set_intr_state(dd, 1);
+
+ /*
+ * Setup to verify we get an interrupt, and fallback
+ * to an alternate if necessary and possible.
+ */
+ mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
+ /* start stats retrieval timer */
+ mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+ }
+
+ /* if ret is non-zero, we probably should do some cleanup here... */
+ return ret;
+}
+
+/*
+ * These next two routines are placeholders in case we don't have per-arch
+ * code for controlling write combining. If explicit control of write
+ * combining is not available, performance will probably be awful.
+ */
+
+int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
+{
+ return -EOPNOTSUPP;
+}
+
+void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
+{
+}
+
+static inline struct qib_devdata *__qib_lookup(int unit)
+{
+ return idr_find(&qib_unit_table, unit);
+}
+
+struct qib_devdata *qib_lookup(int unit)
+{
+ struct qib_devdata *dd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ dd = __qib_lookup(unit);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ return dd;
+}
+
+/*
+ * Stop the timers during unit shutdown, or after an error late
+ * in initialization.
+ */
+static void qib_stop_timers(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int pidx;
+
+ if (dd->stats_timer.data) {
+ del_timer_sync(&dd->stats_timer);
+ dd->stats_timer.data = 0;
+ }
+ if (dd->intrchk_timer.data) {
+ del_timer_sync(&dd->intrchk_timer);
+ dd->intrchk_timer.data = 0;
+ }
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->hol_timer.data)
+ del_timer_sync(&ppd->hol_timer);
+ if (ppd->led_override_timer.data) {
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ }
+ if (ppd->symerr_clear_timer.data)
+ del_timer_sync(&ppd->symerr_clear_timer);
+ }
+}
+
+/**
+ * qib_shutdown_device - shut down a device
+ * @dd: the qlogic_ib device
+ *
+ * This is called to make the device quiet when we are about to
+ * unload the driver, and also when the device is administratively
+ * disabled. It does not free any data structures.
+ * Everything it does has to be setup again by qib_init(dd, 1)
+ */
+static void qib_shutdown_device(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ unsigned pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ spin_lock_irq(&ppd->lflags_lock);
+ ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
+ QIBL_LINKARMED | QIBL_LINKACTIVE |
+ QIBL_LINKV);
+ spin_unlock_irq(&ppd->lflags_lock);
+ *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
+ }
+ dd->flags &= ~QIB_INITTED;
+
+ /* mask interrupts, but not errors */
+ dd->f_set_intr_state(dd, 0);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
+ QIB_RCVCTRL_CTXT_DIS |
+ QIB_RCVCTRL_INTRAVAIL_DIS |
+ QIB_RCVCTRL_PKEY_ENB, -1);
+ /*
+ * Gracefully stop all sends allowing any in progress to
+ * trickle out first.
+ */
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
+ }
+
+ /*
+ * Enough for anything that's going to trickle out to have actually
+ * done so.
+ */
+ udelay(20);
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ dd->f_setextled(ppd, 0); /* make sure LEDs are off */
+
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_teardown_sdma(ppd);
+
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
+ QIB_SENDCTRL_SEND_DIS);
+ /*
+ * Clear SerdesEnable.
+ * We can't count on interrupts since we are stopping.
+ */
+ dd->f_quiet_serdes(ppd);
+ }
+
+ qib_update_eeprom_log(dd);
+}
+
+/**
+ * qib_free_ctxtdata - free a context's allocated data
+ * @dd: the qlogic_ib device
+ * @rcd: the ctxtdata structure
+ *
+ * free up any allocated data for a context
+ * This should not touch anything that would affect a simultaneous
+ * re-allocation of context data, because it is called after qib_mutex
+ * is released (and can be called from reinit as well).
+ * It should never change any chip state, or global driver state.
+ */
+void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+ if (!rcd)
+ return;
+
+ if (rcd->rcvhdrq) {
+ dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
+ rcd->rcvhdrq, rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+ if (rcd->rcvhdrtail_kvaddr) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ rcd->rcvhdrtail_kvaddr,
+ rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrtail_kvaddr = NULL;
+ }
+ }
+ if (rcd->rcvegrbuf) {
+ unsigned e;
+
+ for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+ void *base = rcd->rcvegrbuf[e];
+ size_t size = rcd->rcvegrbuf_size;
+
+ dma_free_coherent(&dd->pcidev->dev, size,
+ base, rcd->rcvegrbuf_phys[e]);
+ }
+ kfree(rcd->rcvegrbuf);
+ rcd->rcvegrbuf = NULL;
+ kfree(rcd->rcvegrbuf_phys);
+ rcd->rcvegrbuf_phys = NULL;
+ rcd->rcvegrbuf_chunks = 0;
+ }
+
+ kfree(rcd->tid_pg_list);
+ vfree(rcd->user_event_mask);
+ vfree(rcd->subctxt_uregbase);
+ vfree(rcd->subctxt_rcvegrbuf);
+ vfree(rcd->subctxt_rcvhdr_base);
+ kfree(rcd);
+}
+
+/*
+ * Perform a PIO buffer bandwidth write test, to verify proper system
+ * configuration. Even when all the setup calls work, occasionally
+ * BIOS or other issues can prevent write combining from working, or
+ * can cause other bandwidth problems to the chip.
+ *
+ * This test simply writes the same buffer over and over again, and
+ * measures close to the peak bandwidth to the chip (not testing
+ * data bandwidth to the wire). On chips that use an address-based
+ * trigger to send packets to the wire, this is easy. On chips that
+ * use a count to trigger, we want to make sure that the packet doesn't
+ * go out on the wire, or trigger flow control checks.
+ */
+static void qib_verify_pioperf(struct qib_devdata *dd)
+{
+ u32 pbnum, cnt, lcnt;
+ u32 __iomem *piobuf;
+ u32 *addr;
+ u64 msecs, emsecs;
+
+ piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
+ if (!piobuf) {
+ qib_devinfo(dd->pcidev,
+ "No PIObufs for checking perf, skipping\n");
+ return;
+ }
+
+ /*
+ * Enough to give us a reasonable test, less than piobuf size, and
+ * likely multiple of store buffer length.
+ */
+ cnt = 1024;
+
+ addr = vmalloc(cnt);
+ if (!addr) {
+ qib_devinfo(dd->pcidev,
+ "Couldn't get memory for checking PIO perf,"
+ " skipping\n");
+ goto done;
+ }
+
+ preempt_disable(); /* we want reasonably accurate elapsed time */
+ msecs = 1 + jiffies_to_msecs(jiffies);
+ for (lcnt = 0; lcnt < 10000U; lcnt++) {
+ /* wait until we cross msec boundary */
+ if (jiffies_to_msecs(jiffies) >= msecs)
+ break;
+ udelay(1);
+ }
+
+ dd->f_set_armlaunch(dd, 0);
+
+ /*
+ * length 0, no dwords actually sent
+ */
+ writeq(0, piobuf);
+ qib_flush_wc();
+
+ /*
+ * This is only roughly accurate, since even with preempt we
+ * still take interrupts that could take a while. Running for
+ * >= 5 msec seems to get us "close enough" to accurate values.
+ */
+ msecs = jiffies_to_msecs(jiffies);
+ for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
+ qib_pio_copy(piobuf + 64, addr, cnt >> 2);
+ emsecs = jiffies_to_msecs(jiffies) - msecs;
+ }
+
+ /* 1 GiB/sec, slightly over IB SDR line rate */
+ if (lcnt < (emsecs * 1024U))
+ qib_dev_err(dd,
+ "Performance problem: bandwidth to PIO buffers is "
+ "only %u MiB/sec\n",
+ lcnt / (u32) emsecs);
+
+ preempt_enable();
+
+ vfree(addr);
+
+done:
+ /* disarm piobuf, so it's available again */
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
+ qib_sendbuf_done(dd, pbnum);
+ dd->f_set_armlaunch(dd, 1);
+}
+
+
+void qib_free_devdata(struct qib_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ idr_remove(&qib_unit_table, dd->unit);
+ list_del(&dd->list);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+}
+
+/*
+ * Allocate our primary per-unit data structure. Must be done via verbs
+ * allocator, because the verbs cleanup process both does cleanup and
+ * free of the data structure.
+ * "extra" is for chip-specific data.
+ *
+ * Use the idr mechanism to get a unit number for this unit.
+ */
+struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
+{
+ unsigned long flags;
+ struct qib_devdata *dd;
+ int ret;
+
+ if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
+ if (!dd) {
+ dd = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qib_devs_lock, flags);
+ ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
+ if (ret >= 0)
+ list_add(&dd->list, &qib_dev_list);
+ spin_unlock_irqrestore(&qib_devs_lock, flags);
+
+ if (ret < 0) {
+ qib_early_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
+ ib_dealloc_device(&dd->verbs_dev.ibdev);
+ dd = ERR_PTR(ret);
+ goto bail;
+ }
+
+ if (!qib_cpulist_count) {
+ u32 count = num_online_cpus();
+ qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
+ sizeof(long), GFP_KERNEL);
+ if (qib_cpulist)
+ qib_cpulist_count = count;
+ else
+ qib_early_err(&pdev->dev, "Could not alloc cpulist "
+ "info, cpu affinity might be wrong\n");
+ }
+
+bail:
+ return dd;
+}
+
+/*
+ * Called from freeze mode handlers, and from PCI error
+ * reporting code. Should be paranoid about state of
+ * system and data structures.
+ */
+void qib_disable_after_error(struct qib_devdata *dd)
+{
+ if (dd->flags & QIB_INITTED) {
+ u32 pidx;
+
+ dd->flags &= ~QIB_INITTED;
+ if (dd->pport)
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ struct qib_pportdata *ppd;
+
+ ppd = dd->pport + pidx;
+ if (dd->flags & QIB_PRESENT) {
+ qib_set_linkstate(ppd,
+ QIB_IB_LINKDOWN_DISABLE);
+ dd->f_setextled(ppd, 0);
+ }
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ }
+ }
+
+ /*
+ * Mark as having had an error for driver, and also
+ * for /sys and status word mapped to user programs.
+ * This marks unit as not usable, until reset.
+ */
+ if (dd->devstatusp)
+ *dd->devstatusp |= QIB_STATUS_HWERROR;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *);
+static int __devinit qib_init_one(struct pci_dev *,
+ const struct pci_device_id *);
+
+#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
+#define PFX QIB_DRV_NAME ": "
+
+static const struct pci_device_id qib_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
+
+struct pci_driver qib_driver = {
+ .name = QIB_DRV_NAME,
+ .probe = qib_init_one,
+ .remove = __devexit_p(qib_remove_one),
+ .id_table = qib_pci_tbl,
+ .err_handler = &qib_pci_err_handler,
+};
+
+/*
+ * Do all the generic driver unit- and chip-independent memory
+ * allocation and initialization.
+ */
+static int __init qlogic_ib_init(void)
+{
+ int ret;
+
+ ret = qib_dev_init();
+ if (ret)
+ goto bail;
+
+ /*
+ * We create our own workqueue mainly because we want to be
+ * able to flush it when devices are being removed. We can't
+ * use schedule_work()/flush_scheduled_work() because both
+ * unregister_netdev() and linkwatch_event take the rtnl lock,
+ * so flush_scheduled_work() can deadlock during device
+ * removal.
+ */
+ qib_wq = create_workqueue("qib");
+ if (!qib_wq) {
+ ret = -ENOMEM;
+ goto bail_dev;
+ }
+
+ qib_cq_wq = create_workqueue("qib_cq");
+ if (!qib_cq_wq) {
+ ret = -ENOMEM;
+ goto bail_wq;
+ }
+
+ /*
+ * These must be called before the driver is registered with
+ * the PCI subsystem.
+ */
+ idr_init(&qib_unit_table);
+ if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
+ printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
+ ret = -ENOMEM;
+ goto bail_cq_wq;
+ }
+
+ ret = pci_register_driver(&qib_driver);
+ if (ret < 0) {
+ printk(KERN_ERR QIB_DRV_NAME
+ ": Unable to register driver: error %d\n", -ret);
+ goto bail_unit;
+ }
+
+ /* not fatal if it doesn't work */
+ if (qib_init_qibfs())
+ printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
+ goto bail; /* all OK */
+
+bail_unit:
+ idr_destroy(&qib_unit_table);
+bail_cq_wq:
+ destroy_workqueue(qib_cq_wq);
+bail_wq:
+ destroy_workqueue(qib_wq);
+bail_dev:
+ qib_dev_cleanup();
+bail:
+ return ret;
+}
+
+module_init(qlogic_ib_init);
+
+/*
+ * Do the non-unit driver cleanup, memory free, etc. at unload.
+ */
+static void __exit qlogic_ib_cleanup(void)
+{
+ int ret;
+
+ ret = qib_exit_qibfs();
+ if (ret)
+ printk(KERN_ERR QIB_DRV_NAME ": "
+ "Unable to cleanup counter filesystem: "
+ "error %d\n", -ret);
+
+ pci_unregister_driver(&qib_driver);
+
+ destroy_workqueue(qib_wq);
+ destroy_workqueue(qib_cq_wq);
+
+ qib_cpulist_count = 0;
+ kfree(qib_cpulist);
+
+ idr_destroy(&qib_unit_table);
+ qib_dev_cleanup();
+}
+
+module_exit(qlogic_ib_cleanup);
+
+/* this can only be called after a successful initialization */
+static void cleanup_device_data(struct qib_devdata *dd)
+{
+ int ctxt;
+ int pidx;
+ struct qib_ctxtdata **tmp;
+ unsigned long flags;
+
+ /* users can't do anything more with chip */
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ if (dd->pport[pidx].statusp)
+ *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
+
+ if (!qib_wc_pat)
+ qib_disable_wc(dd);
+
+ if (dd->pioavailregs_dma) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *) dd->pioavailregs_dma,
+ dd->pioavailregs_phys);
+ dd->pioavailregs_dma = NULL;
+ }
+
+ if (dd->pageshadow) {
+ struct page **tmpp = dd->pageshadow;
+ dma_addr_t *tmpd = dd->physshadow;
+ int i, cnt = 0;
+
+ for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
+ int ctxt_tidbase = ctxt * dd->rcvtidcnt;
+ int maxtid = ctxt_tidbase + dd->rcvtidcnt;
+
+ for (i = ctxt_tidbase; i < maxtid; i++) {
+ if (!tmpp[i])
+ continue;
+ pci_unmap_page(dd->pcidev, tmpd[i],
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ qib_release_user_pages(&tmpp[i], 1);
+ tmpp[i] = NULL;
+ cnt++;
+ }
+ }
+
+ tmpp = dd->pageshadow;
+ dd->pageshadow = NULL;
+ vfree(tmpp);
+ }
+
+ /*
+ * Free any resources still in use (usually just kernel contexts)
+ * at unload; we do for ctxtcnt, because that's what we allocate.
+ * We acquire lock to be really paranoid that rcd isn't being
+ * accessed from some interrupt-related code (that should not happen,
+ * but best to be sure).
+ */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ tmp = dd->rcd;
+ dd->rcd = NULL;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
+ struct qib_ctxtdata *rcd = tmp[ctxt];
+
+ tmp[ctxt] = NULL; /* debugging paranoia */
+ qib_free_ctxtdata(dd, rcd);
+ }
+ kfree(tmp);
+ kfree(dd->boardname);
+}
+
+/*
+ * Clean up on unit shutdown, or error during unit load after
+ * successful initialization.
+ */
+static void qib_postinit_cleanup(struct qib_devdata *dd)
+{
+ /*
+ * Clean up chip-specific stuff.
+ * We check for NULL here, because it's outside
+ * the kregbase check, and we need to call it
+ * after the free_irq. Thus it's possible that
+ * the function pointers were never initialized.
+ */
+ if (dd->f_cleanup)
+ dd->f_cleanup(dd);
+
+ qib_pcie_ddcleanup(dd);
+
+ cleanup_device_data(dd);
+
+ qib_free_devdata(dd);
+}
+
+static int __devinit qib_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret, j, pidx, initfail;
+ struct qib_devdata *dd = NULL;
+
+ ret = qib_pcie_init(pdev, ent);
+ if (ret)
+ goto bail;
+
+ /*
+ * Do device-specific initialiation, function table setup, dd
+ * allocation, etc.
+ */
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_IB_6120:
+#ifdef CONFIG_PCI_MSI
+ dd = qib_init_iba6120_funcs(pdev, ent);
+#else
+ qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
+ "work if CONFIG_PCI_MSI is not enabled\n",
+ ent->device);
+#endif
+ break;
+
+ case PCI_DEVICE_ID_QLOGIC_IB_7220:
+ dd = qib_init_iba7220_funcs(pdev, ent);
+ break;
+
+ case PCI_DEVICE_ID_QLOGIC_IB_7322:
+ dd = qib_init_iba7322_funcs(pdev, ent);
+ break;
+
+ default:
+ qib_early_err(&pdev->dev, "Failing on unknown QLogic "
+ "deviceid 0x%x\n", ent->device);
+ ret = -ENODEV;
+ }
+
+ if (IS_ERR(dd))
+ ret = PTR_ERR(dd);
+ if (ret)
+ goto bail; /* error already printed */
+
+ /* do the generic initialization */
+ initfail = qib_init(dd, 0);
+
+ ret = qib_register_ib_device(dd);
+
+ /*
+ * Now ready for use. this should be cleared whenever we
+ * detect a reset, or initiate one. If earlier failure,
+ * we still create devices, so diags, etc. can be used
+ * to determine cause of problem.
+ */
+ if (!qib_mini_init && !initfail && !ret)
+ dd->flags |= QIB_INITTED;
+
+ j = qib_device_create(dd);
+ if (j)
+ qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
+ j = qibfs_add(dd);
+ if (j)
+ qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
+ -j);
+
+ if (qib_mini_init || initfail || ret) {
+ qib_stop_timers(dd);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ dd->f_quiet_serdes(dd->pport + pidx);
+ if (initfail)
+ ret = initfail;
+ goto bail;
+ }
+
+ if (!qib_wc_pat) {
+ ret = qib_enable_wc(dd);
+ if (ret) {
+ qib_dev_err(dd, "Write combining not enabled "
+ "(err %d): performance may be poor\n",
+ -ret);
+ ret = 0;
+ }
+ }
+
+ qib_verify_pioperf(dd);
+bail:
+ return ret;
+}
+
+static void __devexit qib_remove_one(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ int ret;
+
+ /* unregister from IB core */
+ qib_unregister_ib_device(dd);
+
+ /*
+ * Disable the IB link, disable interrupts on the device,
+ * clear dma engines, etc.
+ */
+ if (!qib_mini_init)
+ qib_shutdown_device(dd);
+
+ qib_stop_timers(dd);
+
+ /* wait until all of our (qsfp) schedule_work() calls complete */
+ flush_scheduled_work();
+
+ ret = qibfs_remove(dd);
+ if (ret)
+ qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
+ -ret);
+
+ qib_device_remove(dd);
+
+ qib_postinit_cleanup(dd);
+}
+
+/**
+ * qib_create_rcvhdrq - create a receive header queue
+ * @dd: the qlogic_ib device
+ * @rcd: the context data
+ *
+ * This must be contiguous memory (from an i/o perspective), and must be
+ * DMA'able (which means for some systems, it will go through an IOMMU,
+ * or be forced into a low address range).
+ */
+int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
+{
+ unsigned amt;
+
+ if (!rcd->rcvhdrq) {
+ dma_addr_t phys_hdrqtail;
+ gfp_t gfp_flags;
+
+ amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
+ sizeof(u32), PAGE_SIZE);
+ gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
+ GFP_USER : GFP_KERNEL;
+ rcd->rcvhdrq = dma_alloc_coherent(
+ &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
+ gfp_flags | __GFP_COMP);
+
+ if (!rcd->rcvhdrq) {
+ qib_dev_err(dd, "attempt to allocate %d bytes "
+ "for ctxt %u rcvhdrq failed\n",
+ amt, rcd->ctxt);
+ goto bail;
+ }
+
+ if (rcd->ctxt >= dd->first_user_ctxt) {
+ rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
+ if (!rcd->user_event_mask)
+ goto bail_free_hdrq;
+ }
+
+ if (!(dd->flags & QIB_NODMA_RTAIL)) {
+ rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
+ &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+ gfp_flags);
+ if (!rcd->rcvhdrtail_kvaddr)
+ goto bail_free;
+ rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
+ }
+
+ rcd->rcvhdrq_size = amt;
+ }
+
+ /* clear for security and sanity on each use */
+ memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
+ if (rcd->rcvhdrtail_kvaddr)
+ memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
+ return 0;
+
+bail_free:
+ qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
+ "rcvhdrqtailaddr failed\n", rcd->ctxt);
+ vfree(rcd->user_event_mask);
+ rcd->user_event_mask = NULL;
+bail_free_hdrq:
+ dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
+ rcd->rcvhdrq_phys);
+ rcd->rcvhdrq = NULL;
+bail:
+ return -ENOMEM;
+}
+
+/**
+ * allocate eager buffers, both kernel and user contexts.
+ * @rcd: the context we are setting up.
+ *
+ * Allocate the eager TID buffers and program them into hip.
+ * They are no longer completely contiguous, we do multiple allocation
+ * calls. Otherwise we get the OOM code involved, by asking for too
+ * much per call, with disastrous results on some kernels.
+ */
+int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
+ size_t size;
+ gfp_t gfp_flags;
+
+ /*
+ * GFP_USER, but without GFP_FS, so buffer cache can be
+ * coalesced (we hope); otherwise, even at order 4,
+ * heavy filesystem activity makes these fail, and we can
+ * use compound pages.
+ */
+ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+
+ egrcnt = rcd->rcvegrcnt;
+ egroff = rcd->rcvegr_tid_base;
+ egrsize = dd->rcvegrbufsize;
+
+ chunk = rcd->rcvegrbuf_chunks;
+ egrperchunk = rcd->rcvegrbufs_perchunk;
+ size = rcd->rcvegrbuf_size;
+ if (!rcd->rcvegrbuf) {
+ rcd->rcvegrbuf =
+ kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
+ GFP_KERNEL);
+ if (!rcd->rcvegrbuf)
+ goto bail;
+ }
+ if (!rcd->rcvegrbuf_phys) {
+ rcd->rcvegrbuf_phys =
+ kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
+ GFP_KERNEL);
+ if (!rcd->rcvegrbuf_phys)
+ goto bail_rcvegrbuf;
+ }
+ for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
+ if (rcd->rcvegrbuf[e])
+ continue;
+ rcd->rcvegrbuf[e] =
+ dma_alloc_coherent(&dd->pcidev->dev, size,
+ &rcd->rcvegrbuf_phys[e],
+ gfp_flags);
+ if (!rcd->rcvegrbuf[e])
+ goto bail_rcvegrbuf_phys;
+ }
+
+ rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
+
+ for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
+ dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
+ unsigned i;
+
+ for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
+ dd->f_put_tid(dd, e + egroff +
+ (u64 __iomem *)
+ ((char __iomem *)
+ dd->kregbase +
+ dd->rcvegrbase),
+ RCVHQ_RCV_TYPE_EAGER, pa);
+ pa += egrsize;
+ }
+ cond_resched(); /* don't hog the cpu */
+ }
+
+ return 0;
+
+bail_rcvegrbuf_phys:
+ for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
+ dma_free_coherent(&dd->pcidev->dev, size,
+ rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
+ kfree(rcd->rcvegrbuf_phys);
+ rcd->rcvegrbuf_phys = NULL;
+bail_rcvegrbuf:
+ kfree(rcd->rcvegrbuf);
+ rcd->rcvegrbuf = NULL;
+bail:
+ return -ENOMEM;
+}
+
+int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
+{
+ u64 __iomem *qib_kregbase = NULL;
+ void __iomem *qib_piobase = NULL;
+ u64 __iomem *qib_userbase = NULL;
+ u64 qib_kreglen;
+ u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
+ u64 qib_pio4koffset = dd->piobufbase >> 32;
+ u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
+ u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
+ u64 qib_physaddr = dd->physaddr;
+ u64 qib_piolen;
+ u64 qib_userlen = 0;
+
+ /*
+ * Free the old mapping because the kernel will try to reuse the
+ * old mapping and not create a new mapping with the
+ * write combining attribute.
+ */
+ iounmap(dd->kregbase);
+ dd->kregbase = NULL;
+
+ /*
+ * Assumes chip address space looks like:
+ * - kregs + sregs + cregs + uregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * or:
+ * - kregs + sregs + cregs (in any order)
+ * - piobufs (2K and 4K bufs in either order)
+ * - uregs
+ */
+ if (dd->piobcnt4k == 0) {
+ qib_kreglen = qib_pio2koffset;
+ qib_piolen = qib_pio2klen;
+ } else if (qib_pio2koffset < qib_pio4koffset) {
+ qib_kreglen = qib_pio2koffset;
+ qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
+ } else {
+ qib_kreglen = qib_pio4koffset;
+ qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
+ }
+ qib_piolen += vl15buflen;
+ /* Map just the configured ports (not all hw ports) */
+ if (dd->uregbase > qib_kreglen)
+ qib_userlen = dd->ureg_align * dd->cfgctxts;
+
+ /* Sanity checks passed, now create the new mappings */
+ qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
+ if (!qib_kregbase)
+ goto bail;
+
+ qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
+ if (!qib_piobase)
+ goto bail_kregbase;
+
+ if (qib_userlen) {
+ qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
+ qib_userlen);
+ if (!qib_userbase)
+ goto bail_piobase;
+ }
+
+ dd->kregbase = qib_kregbase;
+ dd->kregend = (u64 __iomem *)
+ ((char __iomem *) qib_kregbase + qib_kreglen);
+ dd->piobase = qib_piobase;
+ dd->pio2kbase = (void __iomem *)
+ (((char __iomem *) dd->piobase) +
+ qib_pio2koffset - qib_kreglen);
+ if (dd->piobcnt4k)
+ dd->pio4kbase = (void __iomem *)
+ (((char __iomem *) dd->piobase) +
+ qib_pio4koffset - qib_kreglen);
+ if (qib_userlen)
+ /* ureg will now be accessed relative to dd->userbase */
+ dd->userbase = qib_userbase;
+ return 0;
+
+bail_piobase:
+ iounmap(qib_piobase);
+bail_kregbase:
+ iounmap(qib_kregbase);
+bail:
+ return -ENOMEM;
+}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
new file mode 100644
index 000000000000..54a40828a106
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/**
+ * qib_format_hwmsg - format a single hwerror message
+ * @msg message buffer
+ * @msgl length of message buffer
+ * @hwmsg message to add to message buffer
+ */
+static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
+{
+ strlcat(msg, "[", msgl);
+ strlcat(msg, hwmsg, msgl);
+ strlcat(msg, "]", msgl);
+}
+
+/**
+ * qib_format_hwerrors - format hardware error messages for display
+ * @hwerrs hardware errors bit vector
+ * @hwerrmsgs hardware error descriptions
+ * @nhwerrmsgs number of hwerrmsgs
+ * @msg message buffer
+ * @msgl message buffer length
+ */
+void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
+ size_t nhwerrmsgs, char *msg, size_t msgl)
+{
+ int i;
+
+ for (i = 0; i < nhwerrmsgs; i++)
+ if (hwerrs & hwerrmsgs[i].mask)
+ qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
+}
+
+static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
+{
+ struct ib_event event;
+ struct qib_devdata *dd = ppd->dd;
+
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = ppd->port;
+ event.event = ev;
+ ib_dispatch_event(&event);
+}
+
+void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+ u32 lstate;
+ u8 ltstate;
+ enum ib_event_type ev = 0;
+
+ lstate = dd->f_iblink_state(ibcs); /* linkstate */
+ ltstate = dd->f_ibphys_portstate(ibcs);
+
+ /*
+ * If linkstate transitions into INIT from any of the various down
+ * states, or if it transitions from any of the up (INIT or better)
+ * states into any of the down states (except link recovery), then
+ * call the chip-specific code to take appropriate actions.
+ */
+ if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+ ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ /* transitioned to UP */
+ if (dd->f_ib_updown(ppd, 1, ibcs))
+ goto skip_ibchange; /* chip-code handled */
+ } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
+ if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
+ ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
+ dd->f_ib_updown(ppd, 0, ibcs))
+ goto skip_ibchange; /* chip-code handled */
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
+ }
+
+ if (lstate != IB_PORT_DOWN) {
+ /* lstate is INIT, ARMED, or ACTIVE */
+ if (lstate != IB_PORT_ACTIVE) {
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ ev = IB_EVENT_PORT_ERR;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ if (lstate == IB_PORT_ARMED) {
+ ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKDOWN | QIBL_LINKACTIVE);
+ } else {
+ ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKARMED |
+ QIBL_LINKDOWN | QIBL_LINKACTIVE);
+ }
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ /* start a 75msec timer to clear symbol errors */
+ mod_timer(&ppd->symerr_clear_timer,
+ msecs_to_jiffies(75));
+ } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+ /* active, but not active defered */
+ qib_hol_up(ppd); /* useful only for 6120 now */
+ *ppd->statusp |=
+ QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
+ qib_clear_symerror_on_linkup((unsigned long)ppd);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKDOWN | QIBL_LINKARMED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ if (dd->flags & QIB_HAS_SEND_DMA)
+ qib_sdma_process_event(ppd,
+ qib_sdma_event_e30_go_running);
+ ev = IB_EVENT_PORT_ACTIVE;
+ dd->f_setextled(ppd, 1);
+ }
+ } else { /* down */
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ ev = IB_EVENT_PORT_ERR;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
+ ppd->lflags &= ~(QIBL_LINKINIT |
+ QIBL_LINKACTIVE | QIBL_LINKARMED);
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ *ppd->statusp &= ~QIB_STATUS_IB_READY;
+ }
+
+skip_ibchange:
+ ppd->lastibcstat = ibcs;
+ if (ev)
+ signal_ib_event(ppd, ev);
+ return;
+}
+
+void qib_clear_symerror_on_linkup(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ if (ppd->lflags & QIBL_LINKACTIVE)
+ return;
+
+ ppd->ibport_data.z_symbol_error_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+}
+
+/*
+ * Handle receive interrupts for user ctxts; this means a user
+ * process was waiting for a packet to arrive, and didn't want
+ * to poll.
+ */
+void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
+ if (!(ctxtr & (1ULL << i)))
+ continue;
+ rcd = dd->rcd[i];
+ if (!rcd || !rcd->cnt)
+ continue;
+
+ if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
+ wake_up_interruptible(&rcd->wait);
+ dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
+ rcd->ctxt);
+ } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
+ &rcd->flag)) {
+ rcd->urgent++;
+ wake_up_interruptible(&rcd->wait);
+ }
+ }
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+}
+
+void qib_bad_intrstatus(struct qib_devdata *dd)
+{
+ static int allbits;
+
+ /* separate routine, for better optimization of qib_intr() */
+
+ /*
+ * We print the message and disable interrupts, in hope of
+ * having a better chance of debugging the problem.
+ */
+ qib_dev_err(dd, "Read of chip interrupt status failed"
+ " disabling interrupts\n");
+ if (allbits++) {
+ /* disable interrupt delivery, something is very wrong */
+ if (allbits == 2)
+ dd->f_set_intr_state(dd, 0);
+ if (allbits == 3) {
+ qib_dev_err(dd, "2nd bad interrupt status, "
+ "unregistering interrupts\n");
+ dd->flags |= QIB_BADINTR;
+ dd->flags &= ~QIB_INITTED;
+ dd->f_free_irq(dd);
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
new file mode 100644
index 000000000000..4b80eb153d57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_alloc_lkey - allocate an lkey
+ * @rkt: lkey table in which to allocate the lkey
+ * @mr: memory region that this lkey protects
+ *
+ * Returns 1 if successful, otherwise returns 0.
+ */
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* Find the next available LKEY */
+ r = rkt->next;
+ n = r;
+ for (;;) {
+ if (rkt->table[r] == NULL)
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n) {
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+ goto bail;
+ }
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+ ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ rkt->table[r] = mr;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_free_lkey - free an lkey
+ * @rkt: table from which to free the lkey
+ * @lkey: lkey id to free
+ */
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
+{
+ unsigned long flags;
+ u32 lkey = mr->lkey;
+ u32 r;
+ int ret;
+
+ spin_lock_irqsave(&dev->lk_table.lock, flags);
+ if (lkey == 0) {
+ if (dev->dma_mr && dev->dma_mr == mr) {
+ ret = atomic_read(&dev->dma_mr->refcount);
+ if (!ret)
+ dev->dma_mr = NULL;
+ } else
+ ret = 0;
+ } else {
+ r = lkey >> (32 - ib_qib_lkey_table_size);
+ ret = atomic_read(&dev->lk_table.table[r]->refcount);
+ if (!ret)
+ dev->lk_table.table[r] = NULL;
+ }
+ spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+ if (ret)
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qib_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Return 1 if valid and successful, otherwise returns 0.
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ */
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+ struct qib_sge *isge, struct ib_sge *sge, int acc)
+{
+ struct qib_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * We use LKEY == zero for kernel virtual addresses
+ * (see qib_get_dma_mr and qib_dma.c).
+ */
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (sge->lkey == 0) {
+ struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ if (!dev->dma_mr)
+ goto bail;
+ atomic_inc(&dev->dma_mr->refcount);
+ isge->mr = dev->dma_mr;
+ isge->vaddr = (void *) sge->addr;
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ isge->m = 0;
+ isge->n = 0;
+ goto ok;
+ }
+ mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
+ mr->pd != &pd->ibpd))
+ goto bail;
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc))
+ goto bail;
+
+ off += mr->offset;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ atomic_inc(&mr->refcount);
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
+ isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
+ok:
+ ret = 1;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
+
+/**
+ * qib_rkey_ok - check the IB virtual address, length, and RKEY
+ * @dev: infiniband device
+ * @ss: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return 1 if successful, otherwise 0.
+ */
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct qib_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * We use RKEY == zero for kernel virtual addresses
+ * (see qib_get_dma_mr and qib_dma.c).
+ */
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (rkey == 0) {
+ struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct qib_ibdev *dev = to_idev(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ if (!dev->dma_mr)
+ goto bail;
+ atomic_inc(&dev->dma_mr->refcount);
+ sge->mr = dev->dma_mr;
+ sge->vaddr = (void *) vaddr;
+ sge->length = len;
+ sge->sge_length = len;
+ sge->m = 0;
+ sge->n = 0;
+ goto ok;
+ }
+
+ mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0))
+ goto bail;
+
+ off += mr->offset;
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ atomic_inc(&mr->refcount);
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
+ sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
+ok:
+ ret = 1;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
+
+/*
+ * Initialize the memory region specified by the work reqeust.
+ */
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+ struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct qib_mregion *mr;
+ u32 rkey = wr->wr.fast_reg.rkey;
+ unsigned i, n, m;
+ int ret = -EINVAL;
+ unsigned long flags;
+ u64 *page_list;
+ size_t ps;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (pd->user || rkey == 0)
+ goto bail;
+
+ mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+ if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ if (wr->wr.fast_reg.page_list_len > mr->max_segs)
+ goto bail;
+
+ ps = 1UL << wr->wr.fast_reg.page_shift;
+ if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
+ goto bail;
+
+ mr->user_base = wr->wr.fast_reg.iova_start;
+ mr->iova = wr->wr.fast_reg.iova_start;
+ mr->lkey = rkey;
+ mr->length = wr->wr.fast_reg.length;
+ mr->access_flags = wr->wr.fast_reg.access_flags;
+ page_list = wr->wr.fast_reg.page_list->page_list;
+ m = 0;
+ n = 0;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ mr->map[m]->segs[n].vaddr = (void *) page_list[i];
+ mr->map[m]->segs[n].length = ps;
+ if (++n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = 0;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
new file mode 100644
index 000000000000..94b0d1f3a8f0
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -0,0 +1,2173 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+static int reply(struct ib_smp *smp)
+{
+ /*
+ * The verbs framework will handle the directed/LID route
+ * packet changes.
+ */
+ smp->method = IB_MGMT_METHOD_GET_RESP;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ smp->status |= IB_SMP_DIRECTION;
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
+{
+ struct ib_mad_send_buf *send_buf;
+ struct ib_mad_agent *agent;
+ struct ib_smp *smp;
+ int ret;
+ unsigned long flags;
+ unsigned long timeout;
+
+ agent = ibp->send_agent;
+ if (!agent)
+ return;
+
+ /* o14-3.2.1 */
+ if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
+ return;
+
+ /* o14-2 */
+ if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+ return;
+
+ send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
+ IB_MGMT_MAD_DATA, GFP_ATOMIC);
+ if (IS_ERR(send_buf))
+ return;
+
+ smp = send_buf->mad;
+ smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ smp->class_version = 1;
+ smp->method = IB_MGMT_METHOD_TRAP;
+ ibp->tid++;
+ smp->tid = cpu_to_be64(ibp->tid);
+ smp->attr_id = IB_SMP_ATTR_NOTICE;
+ /* o14-1: smp->mkey = 0; */
+ memcpy(smp->data, data, len);
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (!ibp->sm_ah) {
+ if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ struct ib_ah *ah;
+ struct ib_ah_attr attr;
+
+ memset(&attr, 0, sizeof attr);
+ attr.dlid = ibp->sm_lid;
+ attr.port_num = ppd_from_ibp(ibp)->port;
+ ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ if (IS_ERR(ah))
+ ret = -EINVAL;
+ else {
+ send_buf->ah = ah;
+ ibp->sm_ah = to_iah(ah);
+ ret = 0;
+ }
+ } else
+ ret = -EINVAL;
+ } else {
+ send_buf->ah = &ibp->sm_ah->ibah;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ if (!ret)
+ ret = ib_post_send_mad(send_buf, NULL);
+ if (!ret) {
+ /* 4.096 usec. */
+ timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
+ ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+ } else {
+ ib_free_send_mad(send_buf);
+ ibp->trap_timeout = 0;
+ }
+}
+
+/*
+ * Send a bad [PQ]_Key trap (ch. 14.3.8).
+ */
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
+{
+ struct ib_mad_notice_attr data;
+
+ if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
+ ibp->pkey_violations++;
+ else
+ ibp->qkey_violations++;
+ ibp->n_pkt_drops++;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = trap_num;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_257_258.lid1 = lid1;
+ data.details.ntc_257_258.lid2 = lid2;
+ data.details.ntc_257_258.key = cpu_to_be32(key);
+ data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
+ data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a bad M_Key trap (ch. 14.3.9).
+ */
+static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
+{
+ struct ib_mad_notice_attr data;
+
+ /* Send violation trap */
+ data.generic_type = IB_NOTICE_TYPE_SECURITY;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_256.lid = data.issuer_lid;
+ data.details.ntc_256.method = smp->method;
+ data.details.ntc_256.attr_id = smp->attr_id;
+ data.details.ntc_256.attr_mod = smp->attr_mod;
+ data.details.ntc_256.mkey = smp->mkey;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ u8 hop_cnt;
+
+ data.details.ntc_256.dr_slid = smp->dr_slid;
+ data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+ hop_cnt = smp->hop_cnt;
+ if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
+ data.details.ntc_256.dr_trunc_hop |=
+ IB_NOTICE_TRAP_DR_TRUNC;
+ hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
+ }
+ data.details.ntc_256.dr_trunc_hop |= hop_cnt;
+ memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
+ hop_cnt);
+ }
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Port Capability Mask Changed trap (ch. 14.3.11).
+ */
+void qib_cap_mask_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a System Image GUID Changed trap (ch. 14.3.12).
+ */
+void qib_sys_guid_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_145.lid = data.issuer_lid;
+ data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+/*
+ * Send a Node Description Changed trap (ch. 14.3.13).
+ */
+void qib_node_desc_chg(struct qib_ibport *ibp)
+{
+ struct ib_mad_notice_attr data;
+
+ data.generic_type = IB_NOTICE_TYPE_INFO;
+ data.prod_type_msb = 0;
+ data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
+ data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
+ data.toggle_count = 0;
+ memset(&data.details, 0, sizeof data.details);
+ data.details.ntc_144.lid = data.issuer_lid;
+ data.details.ntc_144.local_changes = 1;
+ data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
+
+ qib_send_trap(ibp, &data, sizeof data);
+}
+
+static int subn_get_nodedescription(struct ib_smp *smp,
+ struct ib_device *ibdev)
+{
+ if (smp->attr_mod)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+
+ return reply(smp);
+}
+
+static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 vendor, majrev, minrev;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* GUID 0 is illegal */
+ if (smp->attr_mod || pidx >= dd->num_pports ||
+ dd->pport[pidx].guid == 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else
+ nip->port_guid = dd->pport[pidx].guid;
+
+ nip->base_version = 1;
+ nip->class_version = 1;
+ nip->node_type = 1; /* channel adapter */
+ nip->num_ports = ibdev->phys_port_cnt;
+ /* This is already in network order */
+ nip->sys_guid = ib_qib_sys_image_guid;
+ nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
+ nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
+ nip->device_id = cpu_to_be16(dd->deviceid);
+ majrev = dd->majrev;
+ minrev = dd->minrev;
+ nip->revision = cpu_to_be32((majrev << 16) | minrev);
+ nip->local_port_num = port;
+ vendor = dd->vendorid;
+ nip->vendor_id[0] = QIB_SRC_OUI_1;
+ nip->vendor_id[1] = QIB_SRC_OUI_2;
+ nip->vendor_id[2] = QIB_SRC_OUI_3;
+
+ return reply(smp);
+}
+
+static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+ __be64 *p = (__be64 *) smp->data;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* 32 blocks of 8 64-bit GUIDs per block */
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (startgx == 0 && pidx < dd->num_pports) {
+ struct qib_pportdata *ppd = dd->pport + pidx;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ __be64 g = ppd->guid;
+ unsigned i;
+
+ /* GUID 0 is illegal */
+ if (g == 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+ else {
+ /* The first is a copy of the read-only HW GUID. */
+ p[0] = g;
+ for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+ p[i] = ibp->guids[i - 1];
+ }
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
+}
+
+static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
+}
+
+static int get_overrunthreshold(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
+}
+
+/**
+ * set_overrunthreshold - set the overrun threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
+ (u32)n);
+ return 0;
+}
+
+static int get_phyerrthreshold(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
+}
+
+/**
+ * set_phyerrthreshold - set the physical error threshold
+ * @ppd: the physical port data
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
+{
+ (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
+ (u32)n);
+ return 0;
+}
+
+/**
+ * get_linkdowndefaultstate - get the default linkdown state
+ * @ppd: the physical port data
+ *
+ * Returns zero if the default is POLL, 1 if the default is SLEEP.
+ */
+static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
+{
+ return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
+ IB_LINKINITCMD_SLEEP;
+}
+
+static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
+{
+ int ret = 0;
+
+ /* Is the mkey in the process of expiring? */
+ if (ibp->mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+ /* Clear timeout and mkey protection field. */
+ ibp->mkey_lease_timeout = 0;
+ ibp->mkeyprot = 0;
+ }
+
+ /* M_Key checking depends on Portinfo:M_Key_protect_bits */
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
+ ibp->mkey != smp->mkey &&
+ (smp->method == IB_MGMT_METHOD_SET ||
+ smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
+ (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
+ if (ibp->mkey_violations != 0xFFFF)
+ ++ibp->mkey_violations;
+ if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
+ ibp->mkey_lease_timeout = jiffies +
+ ibp->mkey_lease_period * HZ;
+ /* Generate a trap notice. */
+ qib_bad_mkey(ibp, smp);
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ } else if (ibp->mkey_lease_timeout)
+ ibp->mkey_lease_timeout = 0;
+
+ return ret;
+}
+
+static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+ u16 lid;
+ u8 mtu;
+ int ret;
+ u32 state;
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ if (port_num == 0)
+ port_num = port;
+ else {
+ if (port_num > ibdev->phys_port_cnt) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply(smp);
+ goto bail;
+ }
+ if (port_num != port) {
+ ibp = to_iport(ibdev, port_num);
+ ret = check_mkey(ibp, smp, 0);
+ if (ret)
+ goto bail;
+ }
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hdw from 0 */
+ ppd = dd->pport + (port_num - 1);
+ ibp = &ppd->ibport_data;
+
+ /* Clear all fields. Only set the non-zero fields. */
+ memset(smp->data, 0, sizeof(smp->data));
+
+ /* Only return the mkey if the protection field allows it. */
+ if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
+ ibp->mkeyprot == 0)
+ pip->mkey = ibp->mkey;
+ pip->gid_prefix = ibp->gid_prefix;
+ lid = ppd->lid;
+ pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
+ pip->sm_lid = cpu_to_be16(ibp->sm_lid);
+ pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ /* pip->diag_code; */
+ pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+ pip->local_port_num = port;
+ pip->link_width_enabled = ppd->link_width_enabled;
+ pip->link_width_supported = ppd->link_width_supported;
+ pip->link_width_active = ppd->link_width_active;
+ state = dd->f_iblink_state(ppd->lastibcstat);
+ pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
+
+ pip->portphysstate_linkdown =
+ (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
+ (get_linkdowndefaultstate(ppd) ? 1 : 2);
+ pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+ pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
+ ppd->link_speed_enabled;
+ switch (ppd->ibmtu) {
+ default: /* something is wrong; fall through */
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ }
+ pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
+ pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
+ pip->vl_high_limit = ibp->vl_high_limit;
+ pip->vl_arb_high_cap =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
+ pip->vl_arb_low_cap =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
+ /* InitTypeReply = 0 */
+ pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+ /* HCAs ignore VLStallCount and HOQLife */
+ /* pip->vlstallcnt_hoqlife; */
+ pip->operationalvl_pei_peo_fpi_fpo =
+ dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
+ pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+ /* P_KeyViolations are counted by hardware. */
+ pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
+ pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+ /* Only the hardware GUID is supported for now */
+ pip->guid_cap = QIB_GUIDS_PER_PORT;
+ pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
+ /* 32.768 usec. response time (guessing) */
+ pip->resv_resptimevalue = 3;
+ pip->localphyerrors_overrunerrors =
+ (get_phyerrthreshold(ppd) << 4) |
+ get_overrunthreshold(ppd);
+ /* pip->max_credit_hint; */
+ if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
+ u32 v;
+
+ v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
+ pip->link_roundtrip_latency[0] = v >> 16;
+ pip->link_roundtrip_latency[1] = v >> 8;
+ pip->link_roundtrip_latency[2] = v;
+ }
+
+ ret = reply(smp);
+
+bail:
+ return ret;
+}
+
+/**
+ * get_pkeys - return the PKEY table
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct qib_pportdata *ppd = dd->pport + port - 1;
+ /*
+ * always a kernel context, no locking needed.
+ * If we get here with ppd setup, no need to check
+ * that pd is valid.
+ */
+ struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
+
+ memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
+
+ return 0;
+}
+
+static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ u16 *p = (u16 *) smp->data;
+ __be16 *q = (__be16 *) smp->data;
+
+ /* 64 blocks of 32 16-bit P_Key entries */
+
+ memset(smp->data, 0, sizeof(smp->data));
+ if (startpx == 0) {
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned i, n = qib_get_npkeys(dd);
+
+ get_pkeys(dd, port, p);
+
+ for (i = 0; i < n; i++)
+ q[i] = cpu_to_be16(p[i]);
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
+ __be64 *p = (__be64 *) smp->data;
+ unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+
+ /* 32 blocks of 8 64-bit GUIDs per block */
+
+ if (startgx == 0 && pidx < dd->num_pports) {
+ struct qib_pportdata *ppd = dd->pport + pidx;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ unsigned i;
+
+ /* The first entry is read-only. */
+ for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
+ ibp->guids[i - 1] = p[i];
+ } else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ /* The only GUID we support is the first read-only entry. */
+ return subn_get_guidinfo(smp, ibdev, port);
+}
+
+/**
+ * subn_set_portinfo - set port information
+ * @smp: the incoming SM packet
+ * @ibdev: the infiniband device
+ * @port: the port on the device
+ *
+ * Set Portinfo (see ch. 14.2.5.6).
+ */
+static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct ib_port_info *pip = (struct ib_port_info *)smp->data;
+ struct ib_event event;
+ struct qib_devdata *dd;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ char clientrereg = 0;
+ unsigned long flags;
+ u16 lid, smlid;
+ u8 lwe;
+ u8 lse;
+ u8 state;
+ u8 vls;
+ u8 msl;
+ u16 lstate;
+ int ret, ore, mtu;
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ if (port_num == 0)
+ port_num = port;
+ else {
+ if (port_num > ibdev->phys_port_cnt)
+ goto err;
+ /* Port attributes can only be set on the receiving port */
+ if (port_num != port)
+ goto get_only;
+ }
+
+ dd = dd_from_ibdev(ibdev);
+ /* IB numbers ports from 1, hdw from 0 */
+ ppd = dd->pport + (port_num - 1);
+ ibp = &ppd->ibport_data;
+ event.device = ibdev;
+ event.element.port_num = port;
+
+ ibp->mkey = pip->mkey;
+ ibp->gid_prefix = pip->gid_prefix;
+ ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
+
+ lid = be16_to_cpu(pip->lid);
+ /* Must be a valid unicast LID address. */
+ if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
+ goto err;
+ if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+ if (ppd->lid != lid)
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
+ if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
+ qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
+ qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
+ event.event = IB_EVENT_LID_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ smlid = be16_to_cpu(pip->sm_lid);
+ msl = pip->neighbormtu_mastersmsl & 0xF;
+ /* Must be a valid unicast LID address. */
+ if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
+ goto err;
+ if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+ spin_lock_irqsave(&ibp->lock, flags);
+ if (ibp->sm_ah) {
+ if (smlid != ibp->sm_lid)
+ ibp->sm_ah->attr.dlid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_ah->attr.sl = msl;
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ if (smlid != ibp->sm_lid)
+ ibp->sm_lid = smlid;
+ if (msl != ibp->sm_sl)
+ ibp->sm_sl = msl;
+ event.event = IB_EVENT_SM_CHANGE;
+ ib_dispatch_event(&event);
+ }
+
+ /* Allow 1x or 4x to be set (see 14.2.6.6). */
+ lwe = pip->link_width_enabled;
+ if (lwe) {
+ if (lwe == 0xFF)
+ lwe = ppd->link_width_supported;
+ else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
+ goto err;
+ set_link_width_enabled(ppd, lwe);
+ }
+
+ lse = pip->linkspeedactive_enabled & 0xF;
+ if (lse) {
+ /*
+ * The IB 1.2 spec. only allows link speed values
+ * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
+ * speeds.
+ */
+ if (lse == 15)
+ lse = ppd->link_speed_supported;
+ else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
+ goto err;
+ set_link_speed_enabled(ppd, lse);
+ }
+
+ /* Set link down default state. */
+ switch (pip->portphysstate_linkdown & 0xF) {
+ case 0: /* NOP */
+ break;
+ case 1: /* SLEEP */
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+ IB_LINKINITCMD_SLEEP);
+ break;
+ case 2: /* POLL */
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
+ IB_LINKINITCMD_POLL);
+ break;
+ default:
+ goto err;
+ }
+
+ ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
+ ibp->vl_high_limit = pip->vl_high_limit;
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
+ ibp->vl_high_limit);
+
+ mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
+ if (mtu == -1)
+ goto err;
+ qib_set_mtu(ppd, mtu);
+
+ /* Set operational VLs */
+ vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
+ if (vls) {
+ if (vls > ppd->vls_supported)
+ goto err;
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+ }
+
+ if (pip->mkey_violations == 0)
+ ibp->mkey_violations = 0;
+
+ if (pip->pkey_violations == 0)
+ ibp->pkey_violations = 0;
+
+ if (pip->qkey_violations == 0)
+ ibp->qkey_violations = 0;
+
+ ore = pip->localphyerrors_overrunerrors;
+ if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
+ goto err;
+
+ if (set_overrunthreshold(ppd, (ore & 0xF)))
+ goto err;
+
+ ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
+
+ if (pip->clientrereg_resv_subnetto & 0x80) {
+ clientrereg = 1;
+ event.event = IB_EVENT_CLIENT_REREGISTER;
+ ib_dispatch_event(&event);
+ }
+
+ /*
+ * Do the port state change now that the other link parameters
+ * have been set.
+ * Changing the port physical state only makes sense if the link
+ * is down or is being set to down.
+ */
+ state = pip->linkspeed_portstate & 0xF;
+ lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
+ if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
+ goto err;
+
+ /*
+ * Only state changes of DOWN, ARM, and ACTIVE are valid
+ * and must be in the correct state to take effect (see 7.2.6).
+ */
+ switch (state) {
+ case IB_PORT_NOP:
+ if (lstate == 0)
+ break;
+ /* FALLTHROUGH */
+ case IB_PORT_DOWN:
+ if (lstate == 0)
+ lstate = QIB_IB_LINKDOWN_ONLY;
+ else if (lstate == 1)
+ lstate = QIB_IB_LINKDOWN_SLEEP;
+ else if (lstate == 2)
+ lstate = QIB_IB_LINKDOWN;
+ else if (lstate == 3)
+ lstate = QIB_IB_LINKDOWN_DISABLE;
+ else
+ goto err;
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ qib_set_linkstate(ppd, lstate);
+ /*
+ * Don't send a reply if the response would be sent
+ * through the disabled port.
+ */
+ if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ goto done;
+ }
+ qib_wait_linkstate(ppd, QIBL_LINKV, 10);
+ break;
+ case IB_PORT_ARMED:
+ qib_set_linkstate(ppd, QIB_IB_LINKARM);
+ break;
+ case IB_PORT_ACTIVE:
+ qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
+ break;
+ default:
+ /* XXX We have already partially updated our state! */
+ goto err;
+ }
+
+ ret = subn_get_portinfo(smp, ibdev, port);
+
+ if (clientrereg)
+ pip->clientrereg_resv_subnetto |= 0x80;
+
+ goto done;
+
+err:
+ smp->status |= IB_SMP_INVALID_FIELD;
+get_only:
+ ret = subn_get_portinfo(smp, ibdev, port);
+done:
+ return ret;
+}
+
+/**
+ * rm_pkey - decrecment the reference count for the given PKEY
+ * @dd: the qlogic_ib device
+ * @key: the PKEY index
+ *
+ * Return true if this was the last reference and the hardware table entry
+ * needs to be changed.
+ */
+static int rm_pkey(struct qib_pportdata *ppd, u16 key)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (ppd->pkeys[i] != key)
+ continue;
+ if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
+ ppd->pkeys[i] = 0;
+ ret = 1;
+ goto bail;
+ }
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * add_pkey - add the given PKEY to the hardware table
+ * @dd: the qlogic_ib device
+ * @key: the PKEY
+ *
+ * Return an error code if unable to add the entry, zero if no change,
+ * or 1 if the hardware PKEY register needs to be updated.
+ */
+static int add_pkey(struct qib_pportdata *ppd, u16 key)
+{
+ int i;
+ u16 lkey = key & 0x7FFF;
+ int any = 0;
+ int ret;
+
+ if (lkey == 0x7FFF) {
+ ret = 0;
+ goto bail;
+ }
+
+ /* Look for an empty slot or a matching PKEY. */
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i]) {
+ any++;
+ continue;
+ }
+ /* If it matches exactly, try to increment the ref count */
+ if (ppd->pkeys[i] == key) {
+ if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
+ ret = 0;
+ goto bail;
+ }
+ /* Lost the race. Look for an empty slot below. */
+ atomic_dec(&ppd->pkeyrefs[i]);
+ any++;
+ }
+ /*
+ * It makes no sense to have both the limited and unlimited
+ * PKEY set at the same time since the unlimited one will
+ * disable the limited one.
+ */
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!ppd->pkeys[i] &&
+ atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
+ /* for qibstats, etc. */
+ ppd->pkeys[i] = key;
+ ret = 1;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * set_pkeys - set the PKEY table for ctxt 0
+ * @dd: the qlogic_ib device
+ * @port: the IB port number
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
+{
+ struct qib_pportdata *ppd;
+ struct qib_ctxtdata *rcd;
+ int i;
+ int changed = 0;
+
+ /*
+ * IB port one/two always maps to context zero/one,
+ * always a kernel context, no locking needed
+ * If we get here with ppd setup, no need to check
+ * that rcd is valid.
+ */
+ ppd = dd->pport + (port - 1);
+ rcd = dd->rcd[ppd->hw_pidx];
+
+ for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
+ u16 key = pkeys[i];
+ u16 okey = rcd->pkeys[i];
+
+ if (key == okey)
+ continue;
+ /*
+ * The value of this PKEY table entry is changing.
+ * Remove the old entry in the hardware's array of PKEYs.
+ */
+ if (okey & 0x7FFF)
+ changed |= rm_pkey(ppd, okey);
+ if (key & 0x7FFF) {
+ int ret = add_pkey(ppd, key);
+
+ if (ret < 0)
+ key = 0;
+ else
+ changed |= ret;
+ }
+ rcd->pkeys[i] = key;
+ }
+ if (changed) {
+ struct ib_event event;
+
+ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.ibdev;
+ event.element.port_num = 1;
+ ib_dispatch_event(&event);
+ }
+ return 0;
+}
+
+static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
+ __be16 *p = (__be16 *) smp->data;
+ u16 *q = (u16 *) smp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ unsigned i, n = qib_get_npkeys(dd);
+
+ for (i = 0; i < n; i++)
+ q[i] = be16_to_cpu(p[i]);
+
+ if (startpx != 0 || set_pkeys(dd, port, q) != 0)
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return subn_get_pkeytable(smp, ibdev, port);
+}
+
+static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *) smp->data;
+ unsigned i;
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
+ *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
+
+ return reply(smp);
+}
+
+static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ u8 *p = (u8 *) smp->data;
+ unsigned i;
+
+ if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ return reply(smp);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
+ ibp->sl_to_vl[i] = *p >> 4;
+ ibp->sl_to_vl[i + 1] = *p & 0xF;
+ }
+ qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
+ _QIB_EVENT_SL2VL_CHANGE_BIT);
+
+ return subn_get_sl_to_vl(smp, ibdev, port);
+}
+
+static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+ struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+ memset(smp->data, 0, sizeof(smp->data));
+
+ if (ppd->vls_supported == IB_VL_VL0)
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else if (which == IB_VLARB_LOWPRI_0_31)
+ (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+ smp->data);
+ else if (which == IB_VLARB_HIGHPRI_0_31)
+ (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+ smp->data);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return reply(smp);
+}
+
+static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
+ struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
+
+ if (ppd->vls_supported == IB_VL_VL0)
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ else if (which == IB_VLARB_LOWPRI_0_31)
+ (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
+ smp->data);
+ else if (which == IB_VLARB_HIGHPRI_0_31)
+ (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
+ smp->data);
+ else
+ smp->status |= IB_SMP_INVALID_FIELD;
+
+ return subn_get_vl_arb(smp, ibdev, port);
+}
+
+static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
+ u8 port)
+{
+ /*
+ * For now, we only send the trap once so no need to process this.
+ * o13-6, o13-7,
+ * o14-3.a4 The SMA shall not send any message in response to a valid
+ * SubnTrapRepress() message.
+ */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+}
+
+static int pma_get_classportinfo(struct ib_perf *pmp,
+ struct ib_device *ibdev)
+{
+ struct ib_pma_classportinfo *p =
+ (struct ib_pma_classportinfo *)pmp->data;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ if (pmp->attr_mod != 0)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ /* Note that AllPortSelect is not valid */
+ p->base_version = 1;
+ p->class_version = 1;
+ p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ /*
+ * Set the most significant bit of CM2 to indicate support for
+ * congestion statistics
+ */
+ p->reserved[0] = dd->psxmitwait_supported << 7;
+ /*
+ * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
+ */
+ p->resp_time_value = 18;
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 port_select = p->port_select;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
+ p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->counter_width = 4; /* 32 bit counters */
+ p->counter_mask0_9 = COUNTER_MASK0_9;
+ p->sample_start = cpu_to_be32(ibp->pma_sample_start);
+ p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ p->counter_select[0] = ibp->pma_counter_select[0];
+ p->counter_select[1] = ibp->pma_counter_select[1];
+ p->counter_select[2] = ibp->pma_counter_select[2];
+ p->counter_select[3] = ibp->pma_counter_select[3];
+ p->counter_select[4] = ibp->pma_counter_select[4];
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+bail:
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portsamplescontrol(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplescontrol *p =
+ (struct ib_pma_portsamplescontrol *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status, xmit_flags;
+ int ret;
+
+ if (pmp->attr_mod != 0 || p->port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&ibp->lock, flags);
+
+ /* Port Sampling code owns the PS* HW counters */
+ xmit_flags = ppd->cong_stats.flags;
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE ||
+ (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
+ xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
+ ibp->pma_sample_start = be32_to_cpu(p->sample_start);
+ ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
+ ibp->pma_tag = be16_to_cpu(p->tag);
+ ibp->pma_counter_select[0] = p->counter_select[0];
+ ibp->pma_counter_select[1] = p->counter_select[1];
+ ibp->pma_counter_select[2] = p->counter_select[2];
+ ibp->pma_counter_select[3] = p->counter_select[3];
+ ibp->pma_counter_select[4] = p->counter_select[4];
+ dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
+ ibp->pma_sample_start);
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+
+bail:
+ return ret;
+}
+
+static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
+ __be16 sel)
+{
+ u64 ret;
+
+ switch (sel) {
+ case IB_PMA_PORT_XMIT_DATA:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
+ break;
+ case IB_PMA_PORT_RCV_DATA:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
+ break;
+ case IB_PMA_PORT_XMIT_PKTS:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
+ break;
+ case IB_PMA_PORT_RCV_PKTS:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
+ break;
+ case IB_PMA_PORT_XMIT_WAIT:
+ ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* This function assumes that the xmit_wait lock is already held */
+static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
+{
+ u32 delta;
+
+ delta = get_counter(&ppd->ibport_data, ppd,
+ IB_PMA_PORT_XMIT_WAIT);
+ return ppd->cong_stats.counter + delta;
+}
+
+static void cache_hw_sample_counters(struct qib_pportdata *ppd)
+{
+ struct qib_ibport *ibp = &ppd->ibport_data;
+
+ ppd->cong_stats.counter_cache.psxmitdata =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
+ ppd->cong_stats.counter_cache.psrcvdata =
+ get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
+ ppd->cong_stats.counter_cache.psxmitpkts =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
+ ppd->cong_stats.counter_cache.psrcvpkts =
+ get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
+ ppd->cong_stats.counter_cache.psxmitwait =
+ get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
+}
+
+static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
+ __be16 sel)
+{
+ u64 ret;
+
+ switch (sel) {
+ case IB_PMA_PORT_XMIT_DATA:
+ ret = ppd->cong_stats.counter_cache.psxmitdata;
+ break;
+ case IB_PMA_PORT_RCV_DATA:
+ ret = ppd->cong_stats.counter_cache.psrcvdata;
+ break;
+ case IB_PMA_PORT_XMIT_PKTS:
+ ret = ppd->cong_stats.counter_cache.psxmitpkts;
+ break;
+ case IB_PMA_PORT_RCV_PKTS:
+ ret = ppd->cong_stats.counter_cache.psrcvpkts;
+ break;
+ case IB_PMA_PORT_XMIT_WAIT:
+ ret = ppd->cong_stats.counter_cache.psxmitwait;
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int pma_get_portsamplesresult(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplesresult *p =
+ (struct ib_pma_portsamplesresult *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status;
+ int i;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+ p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ else {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->sample_status = cpu_to_be16(status);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.counter =
+ xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd,
+ QIB_CONG_TIMER_PSINTERVAL, 0);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be32(
+ get_cache_hw_sample_counters(
+ ppd, ibp->pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portsamplesresult_ext *p =
+ (struct ib_pma_portsamplesresult_ext *)pmp->data;
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ unsigned long flags;
+ u8 status;
+ int i;
+
+ /* Port Sampling code owns the PS* HW counters */
+ memset(pmp->data, 0, sizeof(pmp->data));
+ spin_lock_irqsave(&ibp->lock, flags);
+ p->tag = cpu_to_be16(ibp->pma_tag);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
+ p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
+ else {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ p->sample_status = cpu_to_be16(status);
+ /* 64 bits */
+ p->extended_width = cpu_to_be32(0x80000000);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.counter =
+ xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd,
+ QIB_CONG_TIMER_PSINTERVAL, 0);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ p->counter[i] = cpu_to_be64(
+ get_cache_hw_sample_counters(
+ ppd, ibp->pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_counters cntrs;
+ u8 port_select = p->port_select;
+
+ qib_get_counters(ppd, &cntrs);
+
+ /* Adjust counters for any resets done. */
+ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+ cntrs.link_error_recovery_counter -=
+ ibp->z_link_error_recovery_counter;
+ cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+ cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+ cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+ cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+ cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+ cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+ cntrs.local_link_integrity_errors -=
+ ibp->z_local_link_integrity_errors;
+ cntrs.excessive_buffer_overrun_errors -=
+ ibp->z_excessive_buffer_overrun_errors;
+ cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+ cntrs.vl15_dropped += ibp->n_vl15_dropped;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ if (cntrs.symbol_error_counter > 0xFFFFUL)
+ p->symbol_error_counter = cpu_to_be16(0xFFFF);
+ else
+ p->symbol_error_counter =
+ cpu_to_be16((u16)cntrs.symbol_error_counter);
+ if (cntrs.link_error_recovery_counter > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter =
+ (u8)cntrs.link_error_recovery_counter;
+ if (cntrs.link_downed_counter > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter = (u8)cntrs.link_downed_counter;
+ if (cntrs.port_rcv_errors > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors =
+ cpu_to_be16((u16) cntrs.port_rcv_errors);
+ if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors =
+ cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
+ if (cntrs.port_xmit_discards > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards =
+ cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.local_link_integrity_errors > 0xFUL)
+ cntrs.local_link_integrity_errors = 0xFUL;
+ if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+ cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ cntrs.excessive_buffer_overrun_errors;
+ if (cntrs.vl15_dropped > 0xFFFFUL)
+ p->vl15_dropped = cpu_to_be16(0xFFFF);
+ else
+ p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+ if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
+ p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
+ if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
+ p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
+ if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
+ p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_xmit_packets =
+ cpu_to_be32((u32)cntrs.port_xmit_packets);
+ if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
+ p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
+ else
+ p->port_rcv_packets =
+ cpu_to_be32((u32) cntrs.port_rcv_packets);
+
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_get_portcounters_cong(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ /* Congestion PMA packets start at offset 24 not 64 */
+ struct ib_pma_portcounters_cong *p =
+ (struct ib_pma_portcounters_cong *)pmp->reserved;
+ struct qib_verbs_counters cntrs;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
+ u64 xmit_wait_counter;
+ unsigned long flags;
+
+ /*
+ * This check is performed only in the GET method because the
+ * SET method ends up calling this anyway.
+ */
+ if (!dd->psxmitwait_supported)
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ if (port_select != port)
+ pmp->status |= IB_SMP_INVALID_FIELD;
+
+ qib_get_counters(ppd, &cntrs);
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ xmit_wait_counter = xmit_wait_get_value_delta(ppd);
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+
+ /* Adjust counters for any resets done. */
+ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
+ cntrs.link_error_recovery_counter -=
+ ibp->z_link_error_recovery_counter;
+ cntrs.link_downed_counter -= ibp->z_link_downed_counter;
+ cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
+ cntrs.port_rcv_remphys_errors -=
+ ibp->z_port_rcv_remphys_errors;
+ cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
+ cntrs.local_link_integrity_errors -=
+ ibp->z_local_link_integrity_errors;
+ cntrs.excessive_buffer_overrun_errors -=
+ ibp->z_excessive_buffer_overrun_errors;
+ cntrs.vl15_dropped -= ibp->z_vl15_dropped;
+ cntrs.vl15_dropped += ibp->n_vl15_dropped;
+ cntrs.port_xmit_data -= ibp->z_port_xmit_data;
+ cntrs.port_rcv_data -= ibp->z_port_rcv_data;
+ cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
+ cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
+
+ memset(pmp->reserved, 0, sizeof(pmp->reserved) +
+ sizeof(pmp->data));
+
+ /*
+ * Set top 3 bits to indicate interval in picoseconds in
+ * remaining bits.
+ */
+ p->port_check_rate =
+ cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
+ (dd->psxmitwait_check_rate &
+ ~(QIB_XMIT_RATE_PICO << 13)));
+ p->port_adr_events = cpu_to_be64(0);
+ p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
+ p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
+ p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
+ p->port_xmit_packets =
+ cpu_to_be64(cntrs.port_xmit_packets);
+ p->port_rcv_packets =
+ cpu_to_be64(cntrs.port_rcv_packets);
+ if (cntrs.symbol_error_counter > 0xFFFFUL)
+ p->symbol_error_counter = cpu_to_be16(0xFFFF);
+ else
+ p->symbol_error_counter =
+ cpu_to_be16(
+ (u16)cntrs.symbol_error_counter);
+ if (cntrs.link_error_recovery_counter > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter =
+ (u8)cntrs.link_error_recovery_counter;
+ if (cntrs.link_downed_counter > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter =
+ (u8)cntrs.link_downed_counter;
+ if (cntrs.port_rcv_errors > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors =
+ cpu_to_be16((u16) cntrs.port_rcv_errors);
+ if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors =
+ cpu_to_be16(
+ (u16)cntrs.port_rcv_remphys_errors);
+ if (cntrs.port_xmit_discards > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards =
+ cpu_to_be16((u16)cntrs.port_xmit_discards);
+ if (cntrs.local_link_integrity_errors > 0xFUL)
+ cntrs.local_link_integrity_errors = 0xFUL;
+ if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
+ cntrs.excessive_buffer_overrun_errors = 0xFUL;
+ p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ cntrs.excessive_buffer_overrun_errors;
+ if (cntrs.vl15_dropped > 0xFFFFUL)
+ p->vl15_dropped = cpu_to_be16(0xFFFF);
+ else
+ p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
+
+ return reply((struct ib_smp *)pmp);
+}
+
+static int pma_get_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters_ext *p =
+ (struct ib_pma_portcounters_ext *)pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 swords, rwords, spkts, rpkts, xwait;
+ u8 port_select = p->port_select;
+
+ memset(pmp->data, 0, sizeof(pmp->data));
+
+ p->port_select = port_select;
+ if (pmp->attr_mod != 0 || port_select != port) {
+ pmp->status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+
+ qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+ /* Adjust counters for any resets done. */
+ swords -= ibp->z_port_xmit_data;
+ rwords -= ibp->z_port_rcv_data;
+ spkts -= ibp->z_port_xmit_packets;
+ rpkts -= ibp->z_port_rcv_packets;
+
+ p->port_xmit_data = cpu_to_be64(swords);
+ p->port_rcv_data = cpu_to_be64(rwords);
+ p->port_xmit_packets = cpu_to_be64(spkts);
+ p->port_rcv_packets = cpu_to_be64(rpkts);
+ p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
+ p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
+ p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
+ p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
+
+bail:
+ return reply((struct ib_smp *) pmp);
+}
+
+static int pma_set_portcounters(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_counters cntrs;
+
+ /*
+ * Since the HW doesn't support clearing counters, we save the
+ * current count and subtract it from future responses.
+ */
+ qib_get_counters(ppd, &cntrs);
+
+ if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
+ ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+
+ if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
+ ibp->z_link_downed_counter = cntrs.link_downed_counter;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
+ ibp->z_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
+ ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+
+ if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+
+ if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
+ ibp->n_vl15_dropped = 0;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ }
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+
+ if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+
+ return pma_get_portcounters(pmp, ibdev, port);
+}
+
+static int pma_set_portcounters_cong(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ struct qib_verbs_counters cntrs;
+ u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
+ int ret = 0;
+ unsigned long flags;
+
+ qib_get_counters(ppd, &cntrs);
+ /* Get counter values before we save them */
+ ret = pma_get_portcounters_cong(pmp, ibdev, port);
+
+ if (counter_select & IB_PMA_SEL_CONG_XMIT) {
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ ppd->cong_stats.counter = 0;
+ dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
+ 0x0);
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ }
+ if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+ }
+ if (counter_select & IB_PMA_SEL_CONG_ALL) {
+ ibp->z_symbol_error_counter =
+ cntrs.symbol_error_counter;
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+ ibp->z_link_downed_counter =
+ cntrs.link_downed_counter;
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+ ibp->z_port_rcv_remphys_errors =
+ cntrs.port_rcv_remphys_errors;
+ ibp->z_port_xmit_discards =
+ cntrs.port_xmit_discards;
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+ ibp->n_vl15_dropped = 0;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ }
+
+ return ret;
+}
+
+static int pma_set_portcounters_ext(struct ib_perf *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 swords, rwords, spkts, rpkts, xwait;
+
+ qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
+ ibp->z_port_xmit_data = swords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
+ ibp->z_port_rcv_data = rwords;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
+ ibp->z_port_xmit_packets = spkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
+ ibp->z_port_rcv_packets = rpkts;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
+ ibp->n_unicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
+ ibp->n_unicast_rcv = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
+ ibp->n_multicast_xmit = 0;
+
+ if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
+ ibp->n_multicast_rcv = 0;
+
+ return pma_get_portcounters_ext(pmp, ibdev, port);
+}
+
+static int process_subn(struct ib_device *ibdev, int mad_flags,
+ u8 port, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_smp *smp = (struct ib_smp *)out_mad;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ int ret;
+
+ *out_mad = *in_mad;
+ if (smp->class_version != 1) {
+ smp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ ret = check_mkey(ibp, smp, mad_flags);
+ if (ret) {
+ u32 port_num = be32_to_cpu(smp->attr_mod);
+
+ /*
+ * If this is a get/set portinfo, we already check the
+ * M_Key if the MAD is for another port and the M_Key
+ * is OK on the receiving port. This check is needed
+ * to increment the error counters when the M_Key
+ * fails to match on *both* ports.
+ */
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ (smp->method == IB_MGMT_METHOD_GET ||
+ smp->method == IB_MGMT_METHOD_SET) &&
+ port_num && port_num <= ibdev->phys_port_cnt &&
+ port != port_num)
+ (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
+ goto bail;
+ }
+
+ switch (smp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_NODE_DESC:
+ ret = subn_get_nodedescription(smp, ibdev);
+ goto bail;
+ case IB_SMP_ATTR_NODE_INFO:
+ ret = subn_get_nodeinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = subn_get_guidinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = subn_get_portinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = subn_get_pkeytable(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ ret = subn_get_sl_to_vl(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = subn_get_vl_arb(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (ibp->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (smp->attr_id) {
+ case IB_SMP_ATTR_GUID_INFO:
+ ret = subn_set_guidinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PORT_INFO:
+ ret = subn_set_portinfo(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_PKEY_TABLE:
+ ret = subn_set_pkeytable(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ ret = subn_set_sl_to_vl(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_VL_ARB_TABLE:
+ ret = subn_set_vl_arb(smp, ibdev, port);
+ goto bail;
+ case IB_SMP_ATTR_SM_INFO:
+ if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ ret = IB_MAD_RESULT_SUCCESS |
+ IB_MAD_RESULT_CONSUMED;
+ goto bail;
+ }
+ if (ibp->port_cap_flags & IB_PORT_SM) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ /* FALLTHROUGH */
+ default:
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ if (smp->attr_id == IB_SMP_ATTR_NOTICE)
+ ret = subn_trap_repress(smp, ibdev, port);
+ else {
+ smp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply(smp);
+ }
+ goto bail;
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_REPORT:
+ case IB_MGMT_METHOD_REPORT_RESP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ case IB_MGMT_METHOD_SEND:
+ if (ib_get_smp_direction(smp) &&
+ smp->attr_id == QIB_VENDOR_IPG) {
+ ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
+ smp->data[0]);
+ ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+ } else
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ default:
+ smp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply(smp);
+ }
+
+bail:
+ return ret;
+}
+
+static int process_perf(struct ib_device *ibdev, u8 port,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ int ret;
+
+ *out_mad = *in_mad;
+ if (pmp->class_version != 1) {
+ pmp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ switch (pmp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ ret = pma_get_classportinfo(pmp, ibdev);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = pma_get_portsamplescontrol(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT:
+ ret = pma_get_portsamplesresult(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_SAMPLES_RESULT_EXT:
+ ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_get_portcounters(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_get_portcounters_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_CONG:
+ ret = pma_get_portcounters_cong(pmp, ibdev, port);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (pmp->attr_id) {
+ case IB_PMA_PORT_SAMPLES_CONTROL:
+ ret = pma_set_portsamplescontrol(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_set_portcounters(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_set_portcounters_ext(pmp, ibdev, port);
+ goto bail;
+ case IB_PMA_PORT_COUNTERS_CONG:
+ ret = pma_set_portcounters_cong(pmp, ibdev, port);
+ goto bail;
+ default:
+ pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) pmp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ default:
+ pmp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_smp *) pmp);
+ }
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port: the port number this packet came in on
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
+ * interested in processing.
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ */
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ int ret;
+
+ switch (in_mad->mad_hdr.mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ goto bail;
+
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = process_perf(ibdev, port, in_mad, out_mad);
+ goto bail;
+
+ default:
+ ret = IB_MAD_RESULT_SUCCESS;
+ }
+
+bail:
+ return ret;
+}
+
+static void send_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+static void xmit_wait_timer_func(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_devdata *dd = dd_from_ppd(ppd);
+ unsigned long flags;
+ u8 status;
+
+ spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
+ status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
+ if (status == IB_PMA_SAMPLE_STATUS_DONE) {
+ /* save counter cache */
+ cache_hw_sample_counters(ppd);
+ ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
+ } else
+ goto done;
+ }
+ ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
+ dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
+done:
+ spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
+}
+
+int qib_create_agents(struct qib_ibdev *dev)
+{
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct qib_ibport *ibp;
+ int p;
+ int ret;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
+ NULL, 0, send_handler,
+ NULL, NULL);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+
+ /* Initialize xmit_wait structure */
+ dd->pport[p].cong_stats.counter = 0;
+ init_timer(&dd->pport[p].cong_stats.timer);
+ dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
+ dd->pport[p].cong_stats.timer.data =
+ (unsigned long)(&dd->pport[p]);
+ dd->pport[p].cong_stats.timer.expires = 0;
+ add_timer(&dd->pport[p].cong_stats.timer);
+
+ ibp->send_agent = agent;
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ }
+
+ return ret;
+}
+
+void qib_free_agents(struct qib_ibdev *dev)
+{
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct ib_mad_agent *agent;
+ struct qib_ibport *ibp;
+ int p;
+
+ for (p = 0; p < dd->num_pports; p++) {
+ ibp = &dd->pport[p].ibport_data;
+ if (ibp->send_agent) {
+ agent = ibp->send_agent;
+ ibp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ if (ibp->sm_ah) {
+ ib_destroy_ah(&ibp->sm_ah->ibah);
+ ibp->sm_ah = NULL;
+ }
+ if (dd->pport[p].cong_stats.timer.data)
+ del_timer_sync(&dd->pport[p].cong_stats.timer);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
new file mode 100644
index 000000000000..147aff9117d7
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
+
+struct ib_node_info {
+ u8 base_version;
+ u8 class_version;
+ u8 node_type;
+ u8 num_ports;
+ __be64 sys_guid;
+ __be64 node_guid;
+ __be64 port_guid;
+ __be16 partition_cap;
+ __be16 device_id;
+ __be32 revision;
+ u8 local_port_num;
+ u8 vendor_id[3];
+} __attribute__ ((packed));
+
+struct ib_mad_notice_attr {
+ u8 generic_type;
+ u8 prod_type_msb;
+ __be16 prod_type_lsb;
+ __be16 trap_num;
+ __be16 issuer_lid;
+ __be16 toggle_count;
+
+ union {
+ struct {
+ u8 details[54];
+ } raw_data;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* where violation happened */
+ u8 port_num; /* where violation happened */
+ } __attribute__ ((packed)) ntc_129_131;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* LID where change occured */
+ u8 reserved2;
+ u8 local_changes; /* low bit - local changes */
+ __be32 new_cap_mask; /* new capability mask */
+ u8 reserved3;
+ u8 change_flags; /* low 3 bits only */
+ } __attribute__ ((packed)) ntc_144;
+
+ struct {
+ __be16 reserved;
+ __be16 lid; /* lid where sys guid changed */
+ __be16 reserved2;
+ __be64 new_sys_guid;
+ } __attribute__ ((packed)) ntc_145;
+
+ struct {
+ __be16 reserved;
+ __be16 lid;
+ __be16 dr_slid;
+ u8 method;
+ u8 reserved2;
+ __be16 attr_id;
+ __be32 attr_mod;
+ __be64 mkey;
+ u8 reserved3;
+ u8 dr_trunc_hop;
+ u8 dr_rtn_path[30];
+ } __attribute__ ((packed)) ntc_256;
+
+ struct {
+ __be16 reserved;
+ __be16 lid1;
+ __be16 lid2;
+ __be32 key;
+ __be32 sl_qp1; /* SL: high 4 bits */
+ __be32 qp2; /* high 8 bits reserved */
+ union ib_gid gid1;
+ union ib_gid gid2;
+ } __attribute__ ((packed)) ntc_257_258;
+
+ } details;
+};
+
+/*
+ * Generic trap/notice types
+ */
+#define IB_NOTICE_TYPE_FATAL 0x80
+#define IB_NOTICE_TYPE_URGENT 0x81
+#define IB_NOTICE_TYPE_SECURITY 0x82
+#define IB_NOTICE_TYPE_SM 0x83
+#define IB_NOTICE_TYPE_INFO 0x84
+
+/*
+ * Generic trap/notice producers
+ */
+#define IB_NOTICE_PROD_CA cpu_to_be16(1)
+#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
+#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
+#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
+
+/*
+ * Generic trap/notice numbers
+ */
+#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
+#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
+#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
+#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
+#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
+#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
+#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
+#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
+
+/*
+ * Repress trap/notice flags
+ */
+#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0)
+#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1)
+#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2)
+#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3)
+#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4)
+#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5)
+#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6)
+#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7)
+
+/*
+ * Generic trap/notice other local changes flags (trap 144).
+ */
+#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
+#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
+#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
+
+/*
+ * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
+ */
+#define IB_NOTICE_TRAP_DR_NOTICE 0x80
+#define IB_NOTICE_TRAP_DR_TRUNC 0x40
+
+struct ib_vl_weight_elem {
+ u8 vl; /* Only low 4 bits, upper 4 bits reserved */
+ u8 weight;
+};
+
+#define IB_VLARB_LOWPRI_0_31 1
+#define IB_VLARB_LOWPRI_32_63 2
+#define IB_VLARB_HIGHPRI_0_31 3
+#define IB_VLARB_HIGHPRI_32_63 4
+
+/*
+ * PMA class portinfo capability mask bits
+ */
+#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
+#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
+#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
+
+#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
+#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
+#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
+
+struct ib_perf {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 unused;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ u8 reserved[40];
+ u8 data[192];
+} __attribute__ ((packed));
+
+struct ib_pma_classportinfo {
+ u8 base_version;
+ u8 class_version;
+ __be16 cap_mask;
+ u8 reserved[3];
+ u8 resp_time_value; /* only lower 5 bits */
+ union ib_gid redirect_gid;
+ __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 redirect_lid;
+ __be16 redirect_pkey;
+ __be32 redirect_qp; /* only lower 24 bits */
+ __be32 redirect_qkey;
+ union ib_gid trap_gid;
+ __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 trap_lid;
+ __be16 trap_pkey;
+ __be32 trap_hl_qp; /* 8, 24 bits respectively */
+ __be32 trap_qkey;
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplescontrol {
+ u8 opcode;
+ u8 port_select;
+ u8 tick;
+ u8 counter_width; /* only lower 3 bits */
+ __be32 counter_mask0_9; /* 2, 10 * 3, bits */
+ __be16 counter_mask10_14; /* 1, 5 * 3, bits */
+ u8 sample_mechanisms;
+ u8 sample_status; /* only lower 2 bits */
+ __be64 option_mask;
+ __be64 vendor_mask;
+ __be32 sample_start;
+ __be32 sample_interval;
+ __be16 tag;
+ __be16 counter_select[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portsamplesresult_ext {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 extended_width; /* only upper 2 bits */
+ __be64 counter[15];
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved1;
+ u8 lli_ebor_errors; /* 4, 4, bits */
+ __be16 reserved2;
+ __be16 vl15_dropped;
+ __be32 port_xmit_data;
+ __be32 port_rcv_data;
+ __be32 port_xmit_packets;
+ __be32 port_rcv_packets;
+} __attribute__ ((packed));
+
+struct ib_pma_portcounters_cong {
+ u8 reserved;
+ u8 reserved1;
+ __be16 port_check_rate;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved2;
+ u8 lli_ebor_errors; /* 4, 4, bits */
+ __be16 reserved3;
+ __be16 vl15_dropped;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_xmit_wait;
+ __be64 port_adr_events;
+} __attribute__ ((packed));
+
+#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
+#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
+
+#define QIB_XMIT_RATE_UNSUPPORTED 0x0
+#define QIB_XMIT_RATE_PICO 0x7
+/* number of 4nsec cycles equaling 2secs */
+#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
+
+#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
+#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
+
+#define IB_PMA_SEL_CONG_ALL 0x01
+#define IB_PMA_SEL_CONG_PORT_DATA 0x02
+#define IB_PMA_SEL_CONG_XMIT 0x04
+#define IB_PMA_SEL_CONG_ROUTING 0x08
+
+struct ib_pma_portcounters_ext {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be32 reserved1;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_unicast_xmit_packets;
+ __be64 port_unicast_rcv_packets;
+ __be64 port_multicast_xmit_packets;
+ __be64 port_multicast_rcv_packets;
+} __attribute__ ((packed));
+
+#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
+
+/*
+ * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
+ * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
+ * We support 5 counters which only count the mandatory quantities.
+ */
+#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
+#define COUNTER_MASK0_9 \
+ cpu_to_be32(COUNTER_MASK(1, 0) | \
+ COUNTER_MASK(1, 1) | \
+ COUNTER_MASK(1, 2) | \
+ COUNTER_MASK(1, 3) | \
+ COUNTER_MASK(1, 4))
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
new file mode 100644
index 000000000000..8b73a11d571c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct qib_mmap_info
+ */
+void qib_release_mmap_info(struct kref *ref)
+{
+ struct qib_mmap_info *ip =
+ container_of(ref, struct qib_mmap_info, ref);
+ struct qib_ibdev *dev = to_idev(ip->context->device);
+
+ spin_lock_irq(&dev->pending_lock);
+ list_del(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ vfree(ip->obj);
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the CQ is mapped,
+ * to avoid releasing it.
+ */
+static void qib_vma_open(struct vm_area_struct *vma)
+{
+ struct qib_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void qib_vma_close(struct vm_area_struct *vma)
+{
+ struct qib_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, qib_release_mmap_info);
+}
+
+static struct vm_operations_struct qib_vm_ops = {
+ .open = qib_vma_open,
+ .close = qib_vma_close,
+};
+
+/**
+ * qib_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct qib_ibdev *dev = to_idev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct qib_mmap_info *ip, *pp;
+ int ret = -EINVAL;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_irq(&dev->pending_lock);
+ list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+ pending_mmaps) {
+ /* Only the creator is allowed to mmap the object */
+ if (context != ip->context || (__u64) offset != ip->offset)
+ continue;
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->size)
+ break;
+
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret)
+ goto done;
+ vma->vm_ops = &qib_vm_ops;
+ vma->vm_private_data = ip;
+ qib_vma_open(vma);
+ goto done;
+ }
+ spin_unlock_irq(&dev->pending_lock);
+done:
+ return ret;
+}
+
+/*
+ * Allocate information for qib_mmap
+ */
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj) {
+ struct qib_mmap_info *ip;
+
+ ip = kmalloc(sizeof *ip, GFP_KERNEL);
+ if (!ip)
+ goto bail;
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->size = size;
+ ip->context = context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+bail:
+ return ip;
+}
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+ u32 size, void *obj)
+{
+ size = PAGE_ALIGN(size);
+
+ spin_lock_irq(&dev->mmap_offset_lock);
+ if (dev->mmap_offset == 0)
+ dev->mmap_offset = PAGE_SIZE;
+ ip->offset = dev->mmap_offset;
+ dev->mmap_offset += size;
+ spin_unlock_irq(&dev->mmap_offset_lock);
+
+ ip->size = size;
+ ip->obj = obj;
+}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
new file mode 100644
index 000000000000..5f95f0f6385d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+
+/* Fast memory region */
+struct qib_fmr {
+ struct ib_fmr ibfmr;
+ u8 page_shift;
+ struct qib_mregion mr; /* must be last */
+};
+
+static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct qib_fmr, ibfmr);
+}
+
+/**
+ * qib_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see qib_dma.c).
+ */
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct qib_ibdev *dev = to_idev(pd->device);
+ struct qib_mr *mr;
+ struct ib_mr *ret;
+ unsigned long flags;
+
+ if (to_ipd(pd)->user) {
+ ret = ERR_PTR(-EPERM);
+ goto bail;
+ }
+
+ mr = kzalloc(sizeof *mr, GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.access_flags = acc;
+ atomic_set(&mr->mr.refcount, 0);
+
+ spin_lock_irqsave(&dev->lk_table.lock, flags);
+ if (!dev->dma_mr)
+ dev->dma_mr = &mr->mr;
+ spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
+{
+ struct qib_mr *mr;
+ int m, i = 0;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
+ mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+ if (!mr)
+ goto done;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
+ if (!mr->mr.map[i])
+ goto bail;
+ }
+ mr->mr.mapsz = m;
+ mr->mr.max_segs = count;
+
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ if (!qib_alloc_lkey(lk_table, &mr->mr))
+ goto bail;
+ mr->ibmr.lkey = mr->mr.lkey;
+ mr->ibmr.rkey = mr->mr.lkey;
+
+ atomic_set(&mr->mr.refcount, 0);
+ goto done;
+
+bail:
+ while (i)
+ kfree(mr->mr.map[--i]);
+ kfree(mr);
+ mr = NULL;
+
+done:
+ return mr;
+}
+
+/**
+ * qib_reg_phys_mr - register a physical memory region
+ * @pd: protection domain for this memory region
+ * @buffer_list: pointer to the list of physical buffers to register
+ * @num_phys_buf: the number of physical buffers to register
+ * @iova_start: the starting address passed over IB which maps to this MR
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start)
+{
+ struct qib_mr *mr;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
+ if (mr == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = *iova_start;
+ mr->mr.iova = *iova_start;
+ mr->mr.length = 0;
+ mr->mr.offset = 0;
+ mr->mr.access_flags = acc;
+ mr->umem = NULL;
+
+ m = 0;
+ n = 0;
+ for (i = 0; i < num_phys_buf; i++) {
+ mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
+ mr->mr.map[m]->segs[n].length = buffer_list[i].size;
+ mr->mr.length += buffer_list[i].size;
+ n++;
+ if (n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @virt_addr: virtual address to use (from HCA's point of view)
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the QLogic_IB driver
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct qib_mr *mr;
+ struct ib_umem *umem;
+ struct ib_umem_chunk *chunk;
+ int n, m, i;
+ struct ib_mr *ret;
+
+ if (length == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ umem = ib_umem_get(pd->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem))
+ return (void *) umem;
+
+ n = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list)
+ n += chunk->nents;
+
+ mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ ib_umem_release(umem);
+ goto bail;
+ }
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = umem->offset;
+ mr->mr.access_flags = mr_access_flags;
+ mr->umem = umem;
+
+ m = 0;
+ n = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list) {
+ for (i = 0; i < chunk->nents; i++) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page(&chunk->page_list[i]));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
+ n++;
+ if (n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ ret = &mr->ibmr;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ * Returns 0 on success.
+ *
+ * Note that this is called to free MRs created by qib_get_dma_mr()
+ * or qib_reg_user_mr().
+ */
+int qib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct qib_mr *mr = to_imr(ibmr);
+ struct qib_ibdev *dev = to_idev(ibmr->device);
+ int ret;
+ int i;
+
+ ret = qib_free_lkey(dev, &mr->mr);
+ if (ret)
+ return ret;
+
+ i = mr->mr.mapsz;
+ while (i)
+ kfree(mr->mr.map[--i]);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+ kfree(mr);
+ return 0;
+}
+
+/*
+ * Allocate a memory region usable with the
+ * IB_WR_FAST_REG_MR send work request.
+ *
+ * Return the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
+{
+ struct qib_mr *mr;
+
+ mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
+ if (mr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mr->mr.pd = pd;
+ mr->mr.user_base = 0;
+ mr->mr.iova = 0;
+ mr->mr.length = 0;
+ mr->mr.offset = 0;
+ mr->mr.access_flags = 0;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+}
+
+struct ib_fast_reg_page_list *
+qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
+{
+ unsigned size = page_list_len * sizeof(u64);
+ struct ib_fast_reg_page_list *pl;
+
+ if (size > PAGE_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ pl = kmalloc(sizeof *pl, GFP_KERNEL);
+ if (!pl)
+ return ERR_PTR(-ENOMEM);
+
+ pl->page_list = kmalloc(size, GFP_KERNEL);
+ if (!pl->page_list)
+ goto err_free;
+
+ return pl;
+
+err_free:
+ kfree(pl);
+ return ERR_PTR(-ENOMEM);
+}
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
+{
+ kfree(pl->page_list);
+ kfree(pl);
+}
+
+/**
+ * qib_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Returns the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct qib_fmr *fmr;
+ int m, i = 0;
+ struct ib_fmr *ret;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
+ fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+ if (!fmr)
+ goto bail;
+
+ /* Allocate first level page tables. */
+ for (; i < m; i++) {
+ fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
+ GFP_KERNEL);
+ if (!fmr->mr.map[i])
+ goto bail;
+ }
+ fmr->mr.mapsz = m;
+
+ /*
+ * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+ * rkey.
+ */
+ if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
+ goto bail;
+ fmr->ibfmr.rkey = fmr->mr.lkey;
+ fmr->ibfmr.lkey = fmr->mr.lkey;
+ /*
+ * Resources are allocated but no valid mapping (RKEY can't be
+ * used).
+ */
+ fmr->mr.pd = pd;
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ fmr->mr.offset = 0;
+ fmr->mr.access_flags = mr_access_flags;
+ fmr->mr.max_segs = fmr_attr->max_pages;
+ fmr->page_shift = fmr_attr->page_shift;
+
+ atomic_set(&fmr->mr.refcount, 0);
+ ret = &fmr->ibfmr;
+ goto done;
+
+bail:
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ ret = ERR_PTR(-ENOMEM);
+
+done:
+ return ret;
+}
+
+/**
+ * qib_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ */
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ struct qib_fmr *fmr = to_ifmr(ibfmr);
+ struct qib_lkey_table *rkt;
+ unsigned long flags;
+ int m, n, i;
+ u32 ps;
+ int ret;
+
+ if (atomic_read(&fmr->mr.refcount))
+ return -EBUSY;
+
+ if (list_len > fmr->mr.max_segs) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ rkt = &to_idev(ibfmr->device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = iova;
+ fmr->mr.iova = iova;
+ ps = 1 << fmr->page_shift;
+ fmr->mr.length = list_len * ps;
+ m = 0;
+ n = 0;
+ for (i = 0; i < list_len; i++) {
+ fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
+ fmr->mr.map[m]->segs[n].length = ps;
+ if (++n == QIB_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Returns 0 on success.
+ */
+int qib_unmap_fmr(struct list_head *fmr_list)
+{
+ struct qib_fmr *fmr;
+ struct qib_lkey_table *rkt;
+ unsigned long flags;
+
+ list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ rkt = &to_idev(fmr->ibfmr.device)->lk_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qib_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Returns 0 on success.
+ */
+int qib_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct qib_fmr *fmr = to_ifmr(ibfmr);
+ int ret;
+ int i;
+
+ ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
+ if (ret)
+ return ret;
+
+ i = fmr->mr.mapsz;
+ while (i)
+ kfree(fmr->mr.map[--i]);
+ kfree(fmr);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
new file mode 100644
index 000000000000..c926bf4541df
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/aer.h>
+
+#include "qib.h"
+
+/*
+ * This file contains PCIe utility routines that are common to the
+ * various QLogic InfiniPath adapters
+ */
+
+/*
+ * Code to adjust PCIe capabilities.
+ * To minimize the change footprint, we call it
+ * from qib_pcie_params, which every chip-specific
+ * file calls, even though this violates some
+ * expectations of harmlessness.
+ */
+static int qib_tune_pcie_caps(struct qib_devdata *);
+static int qib_tune_pcie_coalesce(struct qib_devdata *);
+
+/*
+ * Do all the common PCIe setup and initialization.
+ * devdata is not yet allocated, and is not allocated until after this
+ * routine returns success. Therefore qib_dev_err() can't be used for error
+ * printing.
+ */
+int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ /*
+ * This can happen (in theory) iff:
+ * We did a chip reset, and then failed to reprogram the
+ * BAR, or the chip reset due to an internal error. We then
+ * unloaded the driver and reloaded it.
+ *
+ * Both reset cases set the BAR back to initial state. For
+ * the latter case, the AER sticky error bit at offset 0x718
+ * should be set, but the Linux kernel doesn't yet know
+ * about that, it appears. If the original BAR was retained
+ * in the kernel data structures, this may be OK.
+ */
+ qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
+ -ret);
+ goto done;
+ }
+
+ ret = pci_request_regions(pdev, QIB_DRV_NAME);
+ if (ret) {
+ qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
+ goto bail;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ /*
+ * If the 64 bit setup fails, try 32 bit. Some systems
+ * do not setup 64 bit maps on systems with 2GB or less
+ * memory installed.
+ */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
+ goto bail;
+ }
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret)
+ qib_early_err(&pdev->dev,
+ "Unable to set DMA consistent mask: %d\n", ret);
+
+ pci_set_master(pdev);
+ ret = pci_enable_pcie_error_reporting(pdev);
+ if (ret)
+ qib_early_err(&pdev->dev,
+ "Unable to enable pcie error reporting: %d\n",
+ ret);
+ goto done;
+
+bail:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+done:
+ return ret;
+}
+
+/*
+ * Do remaining PCIe setup, once dd is allocated, and save away
+ * fields required to re-initialize after a chip reset, or for
+ * various other purposes
+ */
+int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned long len;
+ resource_size_t addr;
+
+ dd->pcidev = pdev;
+ pci_set_drvdata(pdev, dd);
+
+ addr = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
+#else
+ dd->kregbase = ioremap_nocache(addr, len);
+#endif
+
+ if (!dd->kregbase)
+ return -ENOMEM;
+
+ dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
+ dd->physaddr = addr; /* used for io_remap, etc. */
+
+ /*
+ * Save BARs to rewrite after device reset. Save all 64 bits of
+ * BAR, just in case.
+ */
+ dd->pcibar0 = addr;
+ dd->pcibar1 = addr >> 32;
+ dd->deviceid = ent->device; /* save for later use */
+ dd->vendorid = ent->vendor;
+
+ return 0;
+}
+
+/*
+ * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
+ * to releasing the dd memory.
+ * void because none of the core pcie cleanup returns are void
+ */
+void qib_pcie_ddcleanup(struct qib_devdata *dd)
+{
+ u64 __iomem *base = (void __iomem *) dd->kregbase;
+
+ dd->kregbase = NULL;
+ iounmap(base);
+ if (dd->piobase)
+ iounmap(dd->piobase);
+ if (dd->userbase)
+ iounmap(dd->userbase);
+
+ pci_disable_device(dd->pcidev);
+ pci_release_regions(dd->pcidev);
+
+ pci_set_drvdata(dd->pcidev, NULL);
+}
+
+static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
+ struct msix_entry *msix_entry)
+{
+ int ret;
+ u32 tabsize = 0;
+ u16 msix_flags;
+
+ pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
+ tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
+ if (tabsize > *msixcnt)
+ tabsize = *msixcnt;
+ ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+ if (ret > 0) {
+ tabsize = ret;
+ ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
+ }
+ if (ret) {
+ qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
+ "falling back to INTx\n", tabsize, ret);
+ tabsize = 0;
+ }
+ *msixcnt = tabsize;
+
+ if (ret)
+ qib_enable_intx(dd->pcidev);
+
+}
+
+/**
+ * We save the msi lo and hi values, so we can restore them after
+ * chip reset (the kernel PCI infrastructure doesn't yet handle that
+ * correctly.
+ */
+static int qib_msi_setup(struct qib_devdata *dd, int pos)
+{
+ struct pci_dev *pdev = dd->pcidev;
+ u16 control;
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ qib_dev_err(dd, "pci_enable_msi failed: %d, "
+ "interrupts may not work\n", ret);
+ /* continue even if it fails, we may still be OK... */
+
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
+ &dd->msi_lo);
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
+ &dd->msi_hi);
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ /* now save the data (vector) info */
+ pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
+ ? 12 : 8),
+ &dd->msi_data);
+ return ret;
+}
+
+int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
+ struct msix_entry *entry)
+{
+ u16 linkstat, speed;
+ int pos = 0, pose, ret = 1;
+
+ pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ if (!pose) {
+ qib_dev_err(dd, "Can't find PCI Express capability!\n");
+ /* set up something... */
+ dd->lbus_width = 1;
+ dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+ goto bail;
+ }
+
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
+ if (nent && *nent && pos) {
+ qib_msix_setup(dd, pos, nent, entry);
+ ret = 0; /* did it, either MSIx or INTx */
+ } else {
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ if (pos)
+ ret = qib_msi_setup(dd, pos);
+ else
+ qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
+ }
+ if (!pos)
+ qib_enable_intx(dd->pcidev);
+
+ pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
+ /*
+ * speed is bits 0-3, linkwidth is bits 4-8
+ * no defines for them in headers
+ */
+ speed = linkstat & 0xf;
+ linkstat >>= 4;
+ linkstat &= 0x1f;
+ dd->lbus_width = linkstat;
+
+ switch (speed) {
+ case 1:
+ dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+ break;
+ case 2:
+ dd->lbus_speed = 5000; /* Gen1, 5GHz */
+ break;
+ default: /* not defined, assume gen1 */
+ dd->lbus_speed = 2500;
+ break;
+ }
+
+ /*
+ * Check against expected pcie width and complain if "wrong"
+ * on first initialization, not afterwards (i.e., reset).
+ */
+ if (minw && linkstat < minw)
+ qib_dev_err(dd,
+ "PCIe width %u (x%u HCA), performance reduced\n",
+ linkstat, minw);
+
+ qib_tune_pcie_caps(dd);
+
+ qib_tune_pcie_coalesce(dd);
+
+bail:
+ /* fill in string, even on errors */
+ snprintf(dd->lbus_info, sizeof(dd->lbus_info),
+ "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
+ return ret;
+}
+
+/*
+ * Setup pcie interrupt stuff again after a reset. I'd like to just call
+ * pci_enable_msi() again for msi, but when I do that,
+ * the MSI enable bit doesn't get set in the command word, and
+ * we switch to to a different interrupt vector, which is confusing,
+ * so I instead just do it all inline. Perhaps somehow can tie this
+ * into the PCIe hotplug support at some point
+ */
+int qib_reinit_intr(struct qib_devdata *dd)
+{
+ int pos;
+ u16 control;
+ int ret = 0;
+
+ /* If we aren't using MSI, don't restore it */
+ if (!dd->msi_lo)
+ goto bail;
+
+ pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ if (!pos) {
+ qib_dev_err(dd, "Can't find MSI capability, "
+ "can't restore MSI settings\n");
+ ret = 0;
+ /* nothing special for MSIx, just MSI */
+ goto bail;
+ }
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
+ dd->msi_lo);
+ pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
+ dd->msi_hi);
+ pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
+ if (!(control & PCI_MSI_FLAGS_ENABLE)) {
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
+ control);
+ }
+ /* now rewrite the data (vector) info */
+ pci_write_config_word(dd->pcidev, pos +
+ ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
+ dd->msi_data);
+ ret = 1;
+bail:
+ if (!ret && (dd->flags & QIB_HAS_INTX)) {
+ qib_enable_intx(dd->pcidev);
+ ret = 1;
+ }
+
+ /* and now set the pci master bit again */
+ pci_set_master(dd->pcidev);
+
+ return ret;
+}
+
+/*
+ * Disable msi interrupt if enabled, and clear msi_lo.
+ * This is used primarily for the fallback to INTx, but
+ * is also used in reinit after reset, and during cleanup.
+ */
+void qib_nomsi(struct qib_devdata *dd)
+{
+ dd->msi_lo = 0;
+ pci_disable_msi(dd->pcidev);
+}
+
+/*
+ * Same as qib_nosmi, but for MSIx.
+ */
+void qib_nomsix(struct qib_devdata *dd)
+{
+ pci_disable_msix(dd->pcidev);
+}
+
+/*
+ * Similar to pci_intx(pdev, 1), except that we make sure
+ * msi(x) is off.
+ */
+void qib_enable_intx(struct pci_dev *pdev)
+{
+ u16 cw, new;
+ int pos;
+
+ /* first, turn on INTx */
+ pci_read_config_word(pdev, PCI_COMMAND, &cw);
+ new = cw & ~PCI_COMMAND_INTX_DISABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ if (pos) {
+ /* then turn off MSI */
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
+ new = cw & ~PCI_MSI_FLAGS_ENABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
+ }
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ /* then turn off MSIx */
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
+ new = cw & ~PCI_MSIX_FLAGS_ENABLE;
+ if (new != cw)
+ pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
+ }
+}
+
+/*
+ * These two routines are helper routines for the device reset code
+ * to move all the pcie code out of the chip-specific driver code.
+ */
+void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
+{
+ pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
+ pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+ pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+}
+
+void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
+{
+ int r;
+ r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ dd->pcibar0);
+ if (r)
+ qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
+ r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ dd->pcibar1);
+ if (r)
+ qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
+ /* now re-enable memory access, and restore cosmetic settings */
+ pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
+ pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
+ pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
+ r = pci_enable_device(dd->pcidev);
+ if (r)
+ qib_dev_err(dd, "pci_enable_device failed after "
+ "reset: %d\n", r);
+}
+
+/* code to adjust PCIe capabilities. */
+
+static int fld2val(int wd, int mask)
+{
+ int lsbmask;
+
+ if (!mask)
+ return 0;
+ wd &= mask;
+ lsbmask = mask ^ (mask & (mask - 1));
+ wd /= lsbmask;
+ return wd;
+}
+
+static int val2fld(int wd, int mask)
+{
+ int lsbmask;
+
+ if (!mask)
+ return 0;
+ lsbmask = mask ^ (mask & (mask - 1));
+ wd *= lsbmask;
+ return wd;
+}
+
+static int qib_pcie_coalesce;
+module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
+
+/*
+ * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
+ * chipsets. This is known to be unsafe for some revisions of some
+ * of these chipsets, with some BIOS settings, and enabling it on those
+ * systems may result in the system crashing, and/or data corruption.
+ */
+static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
+{
+ int r;
+ struct pci_dev *parent;
+ int ppos;
+ u16 devid;
+ u32 mask, bits, val;
+
+ if (!qib_pcie_coalesce)
+ return 0;
+
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ if (parent->bus->parent) {
+ qib_devinfo(dd->pcidev, "Parent not root\n");
+ return 1;
+ }
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (!ppos)
+ return 1;
+ if (parent->vendor != 0x8086)
+ return 1;
+
+ /*
+ * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
+ * - bit 11: COALESCE_FORCE: need to set to 0
+ * - bit 10: COALESCE_EN: need to set to 1
+ * (but limitations on some on some chipsets)
+ *
+ * On the Intel 5000, 5100, and 7300 chipsets, there is
+ * also: - bit 25:24: COALESCE_MODE, need to set to 0
+ */
+ devid = parent->device;
+ if (devid >= 0x25e2 && devid <= 0x25fa) {
+ u8 rev;
+
+ /* 5000 P/V/X/Z */
+ pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
+ if (rev <= 0xb2)
+ bits = 1U << 10;
+ else
+ bits = 7U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else if (devid >= 0x65e2 && devid <= 0x65fa) {
+ /* 5100 */
+ bits = 1U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else if (devid >= 0x4021 && devid <= 0x402e) {
+ /* 5400 */
+ bits = 7U << 10;
+ mask = 7U << 10;
+ } else if (devid >= 0x3604 && devid <= 0x360a) {
+ /* 7300 */
+ bits = 7U << 10;
+ mask = (3U << 24) | (7U << 10);
+ } else {
+ /* not one of the chipsets that we know about */
+ return 1;
+ }
+ pci_read_config_dword(parent, 0x48, &val);
+ val &= ~mask;
+ val |= bits;
+ r = pci_write_config_dword(parent, 0x48, val);
+ return 0;
+}
+
+/*
+ * BIOS may not set PCIe bus-utilization parameters for best performance.
+ * Check and optionally adjust them to maximize our throughput.
+ */
+static int qib_pcie_caps;
+module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+
+static int qib_tune_pcie_caps(struct qib_devdata *dd)
+{
+ int ret = 1; /* Assume the worst */
+ struct pci_dev *parent;
+ int ppos, epos;
+ u16 pcaps, pctl, ecaps, ectl;
+ int rc_sup, ep_sup;
+ int rc_cur, ep_cur;
+
+ /* Find out supported and configured values for parent (root) */
+ parent = dd->pcidev->bus->self;
+ if (parent->bus->parent) {
+ qib_devinfo(dd->pcidev, "Parent not root\n");
+ goto bail;
+ }
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (ppos) {
+ pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
+ pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
+ } else
+ goto bail;
+ /* Find out supported and configured values for endpoint (us) */
+ epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ if (epos) {
+ pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
+ pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
+ } else
+ goto bail;
+ ret = 0;
+ /* Find max payload supported by root, endpoint */
+ rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
+ ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
+ if (rc_sup > ep_sup)
+ rc_sup = ep_sup;
+
+ rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
+ ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
+
+ /* If Supported greater than limit in module param, limit it */
+ if (rc_sup > (qib_pcie_caps & 7))
+ rc_sup = qib_pcie_caps & 7;
+ /* If less than (allowed, supported), bump root payload */
+ if (rc_sup > rc_cur) {
+ rc_cur = rc_sup;
+ pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+ val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
+ pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ }
+ /* If less than (allowed, supported), bump endpoint payload */
+ if (rc_sup > ep_cur) {
+ ep_cur = rc_sup;
+ ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
+ val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
+ pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ }
+
+ /*
+ * Now the Read Request size.
+ * No field for max supported, but PCIe spec limits it to 4096,
+ * which is code '5' (log2(4096) - 7)
+ */
+ rc_sup = 5;
+ if (rc_sup > ((qib_pcie_caps >> 4) & 7))
+ rc_sup = (qib_pcie_caps >> 4) & 7;
+ rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
+ ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
+
+ if (rc_sup > rc_cur) {
+ rc_cur = rc_sup;
+ pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
+ val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
+ pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
+ }
+ if (rc_sup > ep_cur) {
+ ep_cur = rc_sup;
+ ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
+ val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
+ pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
+ }
+bail:
+ return ret;
+}
+/* End of PCIe capability tuning */
+
+/*
+ * From here through qib_pci_err_handler definition is invoked via
+ * PCI error infrastructure, registered via pci
+ */
+static pci_ers_result_t
+qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ qib_devinfo(pdev, "State Normal, ignoring\n");
+ break;
+
+ case pci_channel_io_frozen:
+ qib_devinfo(pdev, "State Frozen, requesting reset\n");
+ pci_disable_device(pdev);
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ break;
+
+ case pci_channel_io_perm_failure:
+ qib_devinfo(pdev, "State Permanent Failure, disabling\n");
+ if (dd) {
+ /* no more register accesses! */
+ dd->flags &= ~QIB_PRESENT;
+ qib_disable_after_error(dd);
+ }
+ /* else early, or other problem */
+ ret = PCI_ERS_RESULT_DISCONNECT;
+ break;
+
+ default: /* shouldn't happen */
+ qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
+ state);
+ break;
+ }
+ return ret;
+}
+
+static pci_ers_result_t
+qib_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ u64 words = 0U;
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
+
+ if (dd && dd->pport) {
+ words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
+ if (words == ~0ULL)
+ ret = PCI_ERS_RESULT_NEED_RESET;
+ }
+ qib_devinfo(pdev, "QIB mmio_enabled function called, "
+ "read wordscntr %Lx, returning %d\n", words, ret);
+ return ret;
+}
+
+static pci_ers_result_t
+qib_pci_slot_reset(struct pci_dev *pdev)
+{
+ qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t
+qib_pci_link_reset(struct pci_dev *pdev)
+{
+ qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static void
+qib_pci_resume(struct pci_dev *pdev)
+{
+ struct qib_devdata *dd = pci_get_drvdata(pdev);
+ qib_devinfo(pdev, "QIB resume function called\n");
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ /*
+ * Running jobs will fail, since it's asynchronous
+ * unlike sysfs-requested reset. Better than
+ * doing nothing.
+ */
+ qib_init(dd, 1); /* same as re-init after reset */
+}
+
+struct pci_error_handlers qib_pci_err_handler = {
+ .error_detected = qib_pci_error_detected,
+ .mmio_enabled = qib_pci_mmio_enabled,
+ .link_reset = qib_pci_link_reset,
+ .slot_reset = qib_pci_slot_reset,
+ .resume = qib_pci_resume,
+};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
new file mode 100644
index 000000000000..10b8c444dd31
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pio_copy.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/**
+ * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
+ * @to: destination, in MMIO space (must be 64-bit aligned)
+ * @from: source (must be 64-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void qib_pio_copy(void __iomem *to, const void *from, size_t count)
+{
+#ifdef CONFIG_64BIT
+ u64 __iomem *dst = to;
+ const u64 *src = from;
+ const u64 *end = src + (count >> 1);
+
+ while (src < end)
+ __raw_writeq(*src++, dst++);
+ if (count & 1)
+ __raw_writel(*(const u32 *)src, dst);
+#else
+ u32 __iomem *dst = to;
+ const u32 *src = from;
+ const u32 *end = src + count;
+
+ while (src < end)
+ __raw_writel(*src++, dst++);
+#endif
+}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
new file mode 100644
index 000000000000..e0f65e39076b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+
+static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
+ struct qpn_map *map, unsigned off)
+{
+ return (map - qpt->map) * BITS_PER_PAGE + off;
+}
+
+static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
+ struct qpn_map *map, unsigned off,
+ unsigned r)
+{
+ if (qpt->mask) {
+ off++;
+ if ((off & qpt->mask) >> 1 != r)
+ off = ((off & qpt->mask) ?
+ (off | qpt->mask) + 1 : off) | (r << 1);
+ } else
+ off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ return off;
+}
+
+/*
+ * Convert the AETH credit code into the number of credits.
+ */
+static u32 credit_table[31] = {
+ 0, /* 0 */
+ 1, /* 1 */
+ 2, /* 2 */
+ 3, /* 3 */
+ 4, /* 4 */
+ 6, /* 5 */
+ 8, /* 6 */
+ 12, /* 7 */
+ 16, /* 8 */
+ 24, /* 9 */
+ 32, /* A */
+ 48, /* B */
+ 64, /* C */
+ 96, /* D */
+ 128, /* E */
+ 192, /* F */
+ 256, /* 10 */
+ 384, /* 11 */
+ 512, /* 12 */
+ 768, /* 13 */
+ 1024, /* 14 */
+ 1536, /* 15 */
+ 2048, /* 16 */
+ 3072, /* 17 */
+ 4096, /* 18 */
+ 6144, /* 19 */
+ 8192, /* 1A */
+ 12288, /* 1B */
+ 16384, /* 1C */
+ 24576, /* 1D */
+ 32768 /* 1E */
+};
+
+static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+{
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+ /*
+ * Free the page if someone raced with us installing it.
+ */
+
+ spin_lock(&qpt->lock);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock(&qpt->lock);
+}
+
+/*
+ * Allocate the next available QPN or
+ * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+ */
+static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+ enum ib_qp_type type, u8 port)
+{
+ u32 i, offset, max_scan, qpn;
+ struct qpn_map *map;
+ u32 ret;
+ int r;
+
+ if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+ unsigned n;
+
+ ret = type == IB_QPT_GSI;
+ n = 1 << (ret + 2 * (port - 1));
+ spin_lock(&qpt->lock);
+ if (qpt->flags & n)
+ ret = -EINVAL;
+ else
+ qpt->flags |= n;
+ spin_unlock(&qpt->lock);
+ goto bail;
+ }
+
+ r = smp_processor_id();
+ if (r >= dd->n_krcv_queues)
+ r %= dd->n_krcv_queues;
+ qpn = qpt->last + 1;
+ if (qpn >= QPN_MAX)
+ qpn = 2;
+ if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
+ qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
+ (r << 1);
+ offset = qpn & BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ get_map_page(qpt, map);
+ if (unlikely(!map->page))
+ break;
+ }
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ qpt->last = qpn;
+ ret = qpn;
+ goto bail;
+ }
+ offset = find_next_offset(qpt, map, offset, r);
+ qpn = mk_qpn(qpt, map, offset);
+ /*
+ * This test differs from alloc_pidmap().
+ * If find_next_offset() does find a zero
+ * bit, we don't need to check for QPN
+ * wrapping around past our starting QPN.
+ * We just need to be sure we don't loop
+ * forever.
+ */
+ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ offset = qpt->mask ? (r << 1) : 0;
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ offset = qpt->mask ? (r << 1) : 0;
+ } else {
+ map = &qpt->map[0];
+ offset = qpt->mask ? (r << 1) : 2;
+ }
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ ret = -ENOMEM;
+
+bail:
+ return ret;
+}
+
+static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
+{
+ struct qpn_map *map;
+
+ map = qpt->map + qpn / BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+}
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (qp->ibqp.qp_num == 0)
+ ibp->qp0 = qp;
+ else if (qp->ibqp.qp_num == 1)
+ ibp->qp1 = qp;
+ else {
+ qp->next = dev->qp_table[n];
+ dev->qp_table[n] = qp;
+ }
+ atomic_inc(&qp->refcount);
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/*
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive interrupt routine.
+ */
+static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_qp *q, **qpp;
+ unsigned long flags;
+
+ qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (ibp->qp0 == qp) {
+ ibp->qp0 = NULL;
+ atomic_dec(&qp->refcount);
+ } else if (ibp->qp1 == qp) {
+ ibp->qp1 = NULL;
+ atomic_dec(&qp->refcount);
+ } else
+ for (; (q = *qpp) != NULL; qpp = &q->next)
+ if (q == qp) {
+ *qpp = qp->next;
+ qp->next = NULL;
+ atomic_dec(&qp->refcount);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+}
+
+/**
+ * qib_free_all_qps - check for QPs still in use
+ * @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ */
+unsigned qib_free_all_qps(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ unsigned long flags;
+ struct qib_qp *qp;
+ unsigned n, qp_inuse = 0;
+
+ for (n = 0; n < dd->num_pports; n++) {
+ struct qib_ibport *ibp = &dd->pport[n].ibport_data;
+
+ if (!qib_mcast_tree_empty(ibp))
+ qp_inuse++;
+ if (ibp->qp0)
+ qp_inuse++;
+ if (ibp->qp1)
+ qp_inuse++;
+ }
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+ for (n = 0; n < dev->qp_table_size; n++) {
+ qp = dev->qp_table[n];
+ dev->qp_table[n] = NULL;
+
+ for (; qp; qp = qp->next)
+ qp_inuse++;
+ }
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+
+ return qp_inuse;
+}
+
+/**
+ * qib_lookup_qpn - return the QP with the given QPN
+ * @qpt: the QP table
+ * @qpn: the QP number to look up
+ *
+ * The caller is responsible for decrementing the QP reference count
+ * when done.
+ */
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
+{
+ struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
+ unsigned long flags;
+ struct qib_qp *qp;
+
+ spin_lock_irqsave(&dev->qpt_lock, flags);
+
+ if (qpn == 0)
+ qp = ibp->qp0;
+ else if (qpn == 1)
+ qp = ibp->qp1;
+ else
+ for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
+ qp = qp->next)
+ if (qp->ibqp.qp_num == qpn)
+ break;
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ return qp;
+}
+
+/**
+ * qib_reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ */
+static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
+{
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ atomic_set(&qp->s_dma_busy, 0);
+ qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
+ qp->s_hdrwords = 0;
+ qp->s_wqe = NULL;
+ qp->s_draining = 0;
+ qp->s_next_psn = 0;
+ qp->s_last_psn = 0;
+ qp->s_sending_psn = 0;
+ qp->s_sending_hpsn = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ qp->r_msn = 0;
+ if (type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_acked = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+ qp->r_head_ack_queue = 0;
+ qp->s_tail_ack_queue = 0;
+ qp->s_num_rd_atomic = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
+ qp->r_sge.num_sge = 0;
+}
+
+static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
+{
+ unsigned n;
+
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+
+ if (clr_sends) {
+ while (qp->s_last != qp->s_head) {
+ struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+ unsigned i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ }
+
+ if (qp->ibqp.qp_type != IB_QPT_RC)
+ return;
+
+ for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ struct qib_ack_entry *e = &qp->s_ack_queue[n];
+
+ if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+ e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ }
+}
+
+/**
+ * qib_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ * Returns true if last WQE event should be generated.
+ * The QP s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+ int ret = 0;
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
+
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
+ qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
+ list_del_init(&qp->iowait);
+ }
+ spin_unlock(&dev->pending_lock);
+
+ if (!(qp->s_flags & QIB_S_BUSY)) {
+ qp->s_hdrwords = 0;
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ }
+
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (qp->s_last != qp->s_head)
+ qib_schedule_send(qp);
+
+ clear_mr_refs(qp, 0);
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
+ wc.wr_id = qp->r_wr_id;
+ wc.status = err;
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
+
+ if (qp->r_rq.wq) {
+ struct qib_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ while (tail != head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
+ } else if (qp->ibqp.event_handler)
+ ret = 1;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Returns 0 on success, otherwise returns an errno.
+ */
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_qp *qp = to_iqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct ib_event ev;
+ int lastwqe = 0;
+ int mig = 0;
+ int ret;
+ u32 pmtu = 0; /* for gcc warning only */
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask))
+ goto inval;
+
+ if (attr_mask & IB_QP_AV) {
+ if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+ goto inval;
+ if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
+ goto inval;
+ if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ goto inval;
+ if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
+ if (attr_mask & IB_QP_PORT)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI ||
+ attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ if (attr->dest_qp_num > QIB_QPN_MASK)
+ goto inval;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ if (attr->retry_cnt > 7)
+ goto inval;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ if (attr->rnr_retry > 7)
+ goto inval;
+
+ /*
+ * Don't allow invalid path_mtu values. OK to set greater
+ * than the active mtu (or even the max_cap, if we have tuned
+ * that to a small mtu. We'll set qp->path_mtu
+ * to the lesser of requested attribute mtu and active,
+ * for packetizing messages.
+ * Note that the QP port has to be set in INIT and MTU in RTR.
+ */
+ if (attr_mask & IB_QP_PATH_MTU) {
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int mtu, pidx = qp->port_num - 1;
+
+ mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ if (mtu == -1)
+ goto inval;
+ if (mtu > dd->pport[pidx].ibmtu) {
+ switch (dd->pport[pidx].ibmtu) {
+ case 4096:
+ pmtu = IB_MTU_4096;
+ break;
+ case 2048:
+ pmtu = IB_MTU_2048;
+ break;
+ case 1024:
+ pmtu = IB_MTU_1024;
+ break;
+ case 512:
+ pmtu = IB_MTU_512;
+ break;
+ case 256:
+ pmtu = IB_MTU_256;
+ break;
+ default:
+ pmtu = IB_MTU_2048;
+ }
+ } else
+ pmtu = attr->path_mtu;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ if (attr->path_mig_state == IB_MIG_REARM) {
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ goto inval;
+ if (new_state != IB_QPS_RTS)
+ goto inval;
+ } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+ if (qp->s_mig_state == IB_MIG_REARM)
+ goto inval;
+ if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+ goto inval;
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ mig = 1;
+ } else
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait))
+ list_del_init(&qp->iowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ /* Stop the sending work queue and retry timer */
+ cancel_work_sync(&qp->s_work);
+ del_timer_sync(&qp->s_timer);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_lock);
+ clear_mr_refs(qp, 1);
+ qib_reset_qp(qp, ibqp->qp_type);
+ }
+ break;
+
+ case IB_QPS_RTR:
+ /* Allow event to retrigger if QP set to RTR more than once */
+ qp->r_flags &= ~QIB_R_COMM_EST;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_ERR:
+ lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ break;
+
+ default:
+ qp->state = new_state;
+ break;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port_num = attr->port_num;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sending_psn = qp->s_next_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ qp->s_sending_hpsn = qp->s_last_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ qp->remote_ah_attr = attr->ah_attr;
+ qp->s_srate = attr->ah_attr.static_rate;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ qp->alt_ah_attr = attr->alt_ah_attr;
+ qp->s_alt_pkey_index = attr->alt_pkey_index;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ qp->s_mig_state = attr->path_mig_state;
+ if (mig) {
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ qp->path_mtu = pmtu;
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ qp->s_retry_cnt = attr->retry_cnt;
+ qp->s_retry = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry_cnt = attr->rnr_retry;
+ qp->s_rnr_retry = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ qp->timeout = attr->timeout;
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ insert_qp(dev, qp);
+
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ if (mig) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ ret = 0;
+ goto bail;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock_irq(&qp->r_lock);
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = qp->path_mtu;
+ attr->path_mig_state = qp->s_mig_state;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
+ attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ attr->alt_ah_attr = qp->alt_ah_attr;
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = qp->s_alt_pkey_index;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = qp->s_draining;
+ attr->max_rd_atomic = qp->s_max_rd_atomic;
+ attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
+ attr->port_num = qp->port_num;
+ attr->timeout = qp->timeout;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry_cnt;
+ attr->alt_port_num = qp->alt_ah_attr.port_num;
+ attr->alt_timeout = qp->alt_timeout;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = qp->port_num;
+ return 0;
+}
+
+/**
+ * qib_compute_aeth - compute the AETH (syndrome + MSN)
+ * @qp: the queue pair to compute the AETH for
+ *
+ * Returns the AETH.
+ */
+__be32 qib_compute_aeth(struct qib_qp *qp)
+{
+ u32 aeth = qp->r_msn & QIB_MSN_MASK;
+
+ if (qp->ibqp.srq) {
+ /*
+ * Shared receive queues don't generate credits.
+ * Set the credit field to the invalid value.
+ */
+ aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
+ } else {
+ u32 min, max, x;
+ u32 credits;
+ struct qib_rwq *wq = qp->r_rq.wq;
+ u32 head;
+ u32 tail;
+
+ /* sanity check pointers before trusting them */
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ /*
+ * Compute the number of credits available (RWQEs).
+ * XXX Not holding the r_rq.lock here so there is a small
+ * chance that the pair of reads are not atomic.
+ */
+ credits = head - tail;
+ if ((int)credits < 0)
+ credits += qp->r_rq.size;
+ /*
+ * Binary search the credit table to find the code to
+ * use.
+ */
+ min = 0;
+ max = 31;
+ for (;;) {
+ x = (min + max) / 2;
+ if (credit_table[x] == credits)
+ break;
+ if (credit_table[x] > credits)
+ max = x;
+ else if (min == x)
+ break;
+ else
+ min = x;
+ }
+ aeth |= x << QIB_AETH_CREDIT_SHIFT;
+ }
+ return cpu_to_be32(aeth);
+}
+
+/**
+ * qib_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Returns the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct qib_qp *qp;
+ int err;
+ struct qib_swqe *swq = NULL;
+ struct qib_ibdev *dev;
+ struct qib_devdata *dd;
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret;
+
+ if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+ init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+ if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
+ init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_recv_wr == 0) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibpd->device->phys_port_cnt) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ sz = sizeof(struct qib_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct qib_swqe);
+ swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
+ if (swq == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ sz = sizeof(*qp);
+ sg_list_sz = 0;
+ if (init_attr->srq) {
+ struct qib_srq *srq = to_isrq(init_attr->srq);
+
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
+ if (!qp) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_swq;
+ }
+ if (init_attr->srq)
+ sz = 0;
+ else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct qib_rwqe);
+ qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+ qp->r_rq.size * sz);
+ if (!qp->r_rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+ }
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->r_lock);
+ spin_lock_init(&qp->s_lock);
+ spin_lock_init(&qp->r_rq.lock);
+ atomic_set(&qp->refcount, 0);
+ init_waitqueue_head(&qp->wait);
+ init_waitqueue_head(&qp->wait_dma);
+ init_timer(&qp->s_timer);
+ qp->s_timer.data = (unsigned long)qp;
+ INIT_WORK(&qp->s_work, qib_do_send);
+ INIT_LIST_HEAD(&qp->iowait);
+ INIT_LIST_HEAD(&qp->rspwait);
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = QIB_S_SIGNAL_REQ_WR;
+ dev = to_idev(ibpd->device);
+ dd = dd_from_dev(dev);
+ err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+ init_attr->port_num);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ vfree(qp->r_rq.wq);
+ goto bail_qp;
+ }
+ qp->ibqp.qp_num = err;
+ qp->port_num = init_attr->port_num;
+ qp->processor_id = smp_processor_id();
+ qib_reset_qp(qp, init_attr->qp_type);
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ ret = ERR_PTR(-ENOSYS);
+ goto bail;
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ if (!qp->r_rq.wq) {
+ __u64 offset = 0;
+
+ err = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else {
+ u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
+
+ qp->ip = qib_create_mmap_info(dev, s,
+ ibpd->uobject->context,
+ qp->r_rq.wq);
+ if (!qp->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ err = ib_copy_to_udata(udata, &(qp->ip->offset),
+ sizeof(qp->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ }
+ }
+
+ spin_lock(&dev->n_qps_lock);
+ if (dev->n_qps_allocated == ib_qib_max_qps) {
+ spin_unlock(&dev->n_qps_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_qps_allocated++;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &qp->ibqp;
+ goto bail;
+
+bail_ip:
+ if (qp->ip)
+ kref_put(&qp->ip->ref, qib_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+bail_qp:
+ kfree(qp);
+bail_swq:
+ vfree(swq);
+bail:
+ return ret;
+}
+
+/**
+ * qib_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Returns 0 on success.
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ */
+int qib_destroy_qp(struct ib_qp *ibqp)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+
+ /* Make sure HW and driver activity is stopped. */
+ spin_lock_irq(&qp->s_lock);
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->iowait))
+ list_del_init(&qp->iowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
+ spin_unlock_irq(&qp->s_lock);
+ cancel_work_sync(&qp->s_work);
+ del_timer_sync(&qp->s_timer);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ if (qp->s_tx) {
+ qib_put_txreq(qp->s_tx);
+ qp->s_tx = NULL;
+ }
+ remove_qp(dev, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+ clear_mr_refs(qp, 1);
+ } else
+ spin_unlock_irq(&qp->s_lock);
+
+ /* all user's cleaned up, mark it available */
+ free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
+ spin_lock(&dev->n_qps_lock);
+ dev->n_qps_allocated--;
+ spin_unlock(&dev->n_qps_lock);
+
+ if (qp->ip)
+ kref_put(&qp->ip->ref, qib_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ vfree(qp->s_wq);
+ kfree(qp);
+ return 0;
+}
+
+/**
+ * qib_init_qpn_table - initialize the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
+{
+ spin_lock_init(&qpt->lock);
+ qpt->last = 1; /* start with QPN 2 */
+ qpt->nmaps = 1;
+ qpt->mask = dd->qpn_mask;
+}
+
+/**
+ * qib_free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+void qib_free_qpn_table(struct qib_qpn_table *qpt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+ if (qpt->map[i].page)
+ free_page((unsigned long) qpt->map[i].page);
+}
+
+/**
+ * qib_get_credit - flush the send work queue of a QP
+ * @qp: the qp who's send work queue to flush
+ * @aeth: the Acknowledge Extended Transport Header
+ *
+ * The QP s_lock should be held.
+ */
+void qib_get_credit(struct qib_qp *qp, u32 aeth)
+{
+ u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
+
+ /*
+ * If the credit is invalid, we can send
+ * as many packets as we like. Otherwise, we have to
+ * honor the credit field.
+ */
+ if (credit == QIB_AETH_CREDIT_INVAL) {
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ qib_schedule_send(qp);
+ }
+ }
+ } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ /* Compute new LSN (i.e., MSN + credit) */
+ credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
+ if (qib_cmp24(credit, qp->s_lsn) > 0) {
+ qp->s_lsn = credit;
+ if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ qib_schedule_send(qp);
+ }
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
new file mode 100644
index 000000000000..35b3604b691d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+#include "qib_qsfp.h"
+
+/*
+ * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
+ * in qib_twsi.c
+ */
+#define QSFP_MAX_RETRY 4
+
+static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 out, mask;
+ int ret, cnt, pass = 0;
+ int stuck = 0;
+ u8 *buff = bp;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto no_unlock;
+
+ if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ /*
+ * We presume, if we are called at all, that this board has
+ * QSFP. This is on the same i2c chain as the legacy parts,
+ * but only responds if the module is selected via GPIO pins.
+ * Further, there are very long setup and hold requirements
+ * on MODSEL.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ if (ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ out <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+
+ dd->f_gpio_mod(dd, out, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL, and there
+ * is no way to tell if it is ready, so we must wait.
+ */
+ msleep(2);
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = qib_twsi_reset(dd);
+ if (ret) {
+ qib_dev_porterr(dd, ppd->port,
+ "QSFP interface Reset for read failed\n");
+ ret = -EIO;
+ stuck = 1;
+ goto deselect;
+ }
+
+ /* All QSFP modules are at A0 */
+
+ cnt = 0;
+ while (cnt < len) {
+ unsigned in_page;
+ int wlen = len - cnt;
+ in_page = addr % QSFP_PAGESIZE;
+ if ((in_page + wlen) > QSFP_PAGESIZE)
+ wlen = QSFP_PAGESIZE - in_page;
+ ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
+ /* Some QSFP's fail first try. Retry as experiment */
+ if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
+ continue;
+ if (ret) {
+ /* qib_twsi_blk_rd() 1 for error, else 0 */
+ ret = -EIO;
+ goto deselect;
+ }
+ addr += wlen;
+ cnt += wlen;
+ }
+ ret = cnt;
+
+deselect:
+ /*
+ * Module could take up to 10 uSec after transfer before
+ * ready to respond to MOD_SEL negation, and there is no way
+ * to tell if it is ready, so we must wait.
+ */
+ udelay(10);
+ /* set QSFP MODSEL, RST. LP all high */
+ dd->f_gpio_mod(dd, mask, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL
+ * going away, and there is no way to tell if it is ready.
+ * so we must wait.
+ */
+ if (stuck)
+ qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
+
+ if (pass >= QSFP_MAX_RETRY && ret)
+ qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
+ else if (pass)
+ qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
+
+ msleep(2);
+
+bail:
+ mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+ return ret;
+}
+
+/*
+ * qsfp_write
+ * We do not ordinarily write the QSFP, but this is needed to select
+ * the page on non-flat QSFPs, and possibly later unusual cases
+ */
+static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
+ int len)
+{
+ struct qib_devdata *dd = ppd->dd;
+ u32 out, mask;
+ int ret, cnt;
+ u8 *buff = bp;
+
+ ret = mutex_lock_interruptible(&dd->eep_lock);
+ if (ret)
+ goto no_unlock;
+
+ if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
+ ret = -ENXIO;
+ goto bail;
+ }
+
+ /*
+ * We presume, if we are called at all, that this board has
+ * QSFP. This is on the same i2c chain as the legacy parts,
+ * but only responds if the module is selected via GPIO pins.
+ * Further, there are very long setup and hold requirements
+ * on MODSEL.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ if (ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ out <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+ dd->f_gpio_mod(dd, out, mask, mask);
+
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL,
+ * and there is no way to tell if it is ready, so we must wait.
+ */
+ msleep(2);
+
+ /* Make sure TWSI bus is in sane state. */
+ ret = qib_twsi_reset(dd);
+ if (ret) {
+ qib_dev_porterr(dd, ppd->port,
+ "QSFP interface Reset for write failed\n");
+ ret = -EIO;
+ goto deselect;
+ }
+
+ /* All QSFP modules are at A0 */
+
+ cnt = 0;
+ while (cnt < len) {
+ unsigned in_page;
+ int wlen = len - cnt;
+ in_page = addr % QSFP_PAGESIZE;
+ if ((in_page + wlen) > QSFP_PAGESIZE)
+ wlen = QSFP_PAGESIZE - in_page;
+ ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
+ if (ret) {
+ /* qib_twsi_blk_wr() 1 for error, else 0 */
+ ret = -EIO;
+ goto deselect;
+ }
+ addr += wlen;
+ cnt += wlen;
+ }
+ ret = cnt;
+
+deselect:
+ /*
+ * Module could take up to 10 uSec after transfer before
+ * ready to respond to MOD_SEL negation, and there is no way
+ * to tell if it is ready, so we must wait.
+ */
+ udelay(10);
+ /* set QSFP MODSEL, RST, LP high */
+ dd->f_gpio_mod(dd, mask, mask, mask);
+ /*
+ * Module could take up to 2 Msec to respond to MOD_SEL
+ * going away, and there is no way to tell if it is ready.
+ * so we must wait.
+ */
+ msleep(2);
+
+bail:
+ mutex_unlock(&dd->eep_lock);
+
+no_unlock:
+ return ret;
+}
+
+/*
+ * For validation, we want to check the checksums, even of the
+ * fields we do not otherwise use. This function reads the bytes from
+ * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
+ */
+static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
+{
+ int ret;
+ u16 cks;
+ u8 bval;
+
+ cks = 0;
+ while (first < next) {
+ ret = qsfp_read(ppd, first, &bval, 1);
+ if (ret < 0)
+ goto bail;
+ cks += bval;
+ ++first;
+ }
+ ret = cks & 0xFF;
+bail:
+ return ret;
+
+}
+
+int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
+{
+ int ret;
+ int idx;
+ u16 cks;
+ u32 mask;
+ u8 peek[4];
+
+ /* ensure sane contents on invalid reads, for cable swaps */
+ memset(cp, 0, sizeof(*cp));
+
+ mask = QSFP_GPIO_MOD_PRS_N;
+ if (ppd->hw_pidx)
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+ ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
+ if (ret & mask) {
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ ret = qsfp_read(ppd, 0, peek, 3);
+ if (ret < 0)
+ goto bail;
+ if ((peek[0] & 0xFE) != 0x0C)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
+
+ if ((peek[2] & 2) == 0) {
+ /*
+ * If cable is paged, rather than "flat memory", we need to
+ * set the page to zero, Even if it already appears to be zero.
+ */
+ u8 poke = 0;
+ ret = qib_qsfp_write(ppd, 127, &poke, 1);
+ udelay(50);
+ if (ret != 1) {
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "Failed QSFP Page set\n");
+ goto bail;
+ }
+ }
+
+ ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
+ if (ret < 0)
+ goto bail;
+ if ((cp->id & 0xFE) != 0x0C)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
+ cks = cp->id;
+
+ ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->pwr;
+
+ ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->len;
+
+ ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->tech;
+
+ ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
+ cks += cp->vendor[idx];
+
+ ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
+ if (ret < 0)
+ goto bail;
+ cks += cp->xt_xcv;
+
+ ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
+ cks += cp->oui[idx];
+
+ ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_PN_LEN; ++idx)
+ cks += cp->partnum[idx];
+
+ ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_REV_LEN; ++idx)
+ cks += cp->rev[idx];
+
+ ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
+ cks += cp->atten[idx];
+
+ ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ cks &= 0xFF;
+ ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
+ if (ret < 0)
+ goto bail;
+ if (cks != cp->cks1)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
+ cks);
+
+ /* Second checksum covers 192 to (serial, date, lot) */
+ ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks = ret;
+
+ ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_SN_LEN; ++idx)
+ cks += cp->serial[idx];
+
+ ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
+ cks += cp->date[idx];
+
+ ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
+ if (ret < 0)
+ goto bail;
+ for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
+ cks += cp->lot[idx];
+
+ ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
+ if (ret < 0)
+ goto bail;
+ cks += ret;
+
+ ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
+ if (ret < 0)
+ goto bail;
+ cks &= 0xFF;
+ if (cks != cp->cks2)
+ qib_dev_porterr(ppd->dd, ppd->port,
+ "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
+ cks);
+ return 0;
+
+bail:
+ cp->id = 0;
+ return ret;
+}
+
+const char * const qib_qsfp_devtech[16] = {
+ "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
+ "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
+ "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
+ "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
+};
+
+#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
+#define QSFP_DEFAULT_HDR_CNT 224
+
+static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+
+/*
+ * Initialize structures that control access to QSFP. Called once per port
+ * on cards that support QSFP.
+ */
+void qib_qsfp_init(struct qib_qsfp_data *qd,
+ void (*fevent)(struct work_struct *))
+{
+ u32 mask, highs;
+ int pins;
+
+ struct qib_devdata *dd = qd->ppd->dd;
+
+ /* Initialize work struct for later QSFP events */
+ INIT_WORK(&qd->work, fevent);
+
+ /*
+ * Later, we may want more validation. For now, just set up pins and
+ * blip reset. If module is present, call qib_refresh_qsfp_cache(),
+ * to do further init.
+ */
+ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
+ highs = mask - QSFP_GPIO_MOD_RST_N;
+ if (qd->ppd->hw_pidx) {
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+ highs <<= QSFP_GPIO_PORT2_SHIFT;
+ }
+ dd->f_gpio_mod(dd, highs, mask, mask);
+ udelay(20); /* Generous RST dwell */
+
+ dd->f_gpio_mod(dd, mask, mask, mask);
+ /* Spec says module can take up to two seconds! */
+ mask = QSFP_GPIO_MOD_PRS_N;
+ if (qd->ppd->hw_pidx)
+ mask <<= QSFP_GPIO_PORT2_SHIFT;
+
+ /* Do not try to wait here. Better to let event handle it */
+ pins = dd->f_gpio_mod(dd, 0, 0, 0);
+ if (pins & mask)
+ goto bail;
+ /* We see a module, but it may be unwise to look yet. Just schedule */
+ qd->t_insert = get_jiffies_64();
+ schedule_work(&qd->work);
+bail:
+ return;
+}
+
+void qib_qsfp_deinit(struct qib_qsfp_data *qd)
+{
+ /*
+ * There is nothing to do here for now. our
+ * work is scheduled with schedule_work(), and
+ * flush_scheduled_work() from remove_one will
+ * block until all work ssetup with schedule_work()
+ * completes.
+ */
+}
+
+int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
+{
+ struct qib_qsfp_cache cd;
+ u8 bin_buff[QSFP_DUMP_CHUNK];
+ char lenstr[6];
+ int sofar, ret;
+ int bidx = 0;
+
+ sofar = 0;
+ ret = qib_refresh_qsfp_cache(ppd, &cd);
+ if (ret < 0)
+ goto bail;
+
+ lenstr[0] = ' ';
+ lenstr[1] = '\0';
+ if (QSFP_IS_CU(cd.tech))
+ sprintf(lenstr, "%dM ", cd.len);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
+ (QSFP_PWR(cd.pwr) * 4));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
+ qib_qsfp_devtech[cd.tech >> 4]);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
+ QSFP_VEND_LEN, cd.vendor);
+
+ sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
+ QSFP_OUI(cd.oui));
+
+ sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
+ QSFP_PN_LEN, cd.partnum);
+ sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
+ QSFP_REV_LEN, cd.rev);
+ if (QSFP_IS_CU(cd.tech))
+ sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
+ QSFP_ATTEN_SDR(cd.atten),
+ QSFP_ATTEN_DDR(cd.atten));
+ sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
+ QSFP_SN_LEN, cd.serial);
+ sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
+ QSFP_DATE_LEN, cd.date);
+ sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
+ QSFP_LOT_LEN, cd.date);
+
+ while (bidx < QSFP_DEFAULT_HDR_CNT) {
+ int iidx;
+ ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
+ if (ret < 0)
+ goto bail;
+ for (iidx = 0; iidx < ret; ++iidx) {
+ sofar += scnprintf(buf + sofar, len-sofar, " %02X",
+ bin_buff[iidx]);
+ }
+ sofar += scnprintf(buf + sofar, len - sofar, "\n");
+ bidx += QSFP_DUMP_CHUNK;
+ }
+ ret = sofar;
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
new file mode 100644
index 000000000000..19b527bafd57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* QSFP support common definitions, for ib_qib driver */
+
+#define QSFP_DEV 0xA0
+#define QSFP_PWR_LAG_MSEC 2000
+
+/*
+ * Below are masks for various QSFP signals, for Port 1.
+ * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
+ * _N means asserted low
+ */
+#define QSFP_GPIO_MOD_SEL_N (4)
+#define QSFP_GPIO_MOD_PRS_N (8)
+#define QSFP_GPIO_INT_N (0x10)
+#define QSFP_GPIO_MOD_RST_N (0x20)
+#define QSFP_GPIO_LP_MODE (0x40)
+#define QSFP_GPIO_PORT2_SHIFT 5
+
+#define QSFP_PAGESIZE 128
+/* Defined fields that QLogic requires of qualified cables */
+/* Byte 0 is Identifier, not checked */
+/* Byte 1 is reserved "status MSB" */
+/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
+/*
+ * Rest of first 128 not used, although 127 is reserved for page select
+ * if module is not "Flat memory".
+ */
+/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
+#define QSFP_MOD_ID_OFFS 128
+/*
+ * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
+ * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ */
+#define QSFP_MOD_PWR_OFFS 129
+/* Byte 130 is Connector type. Not QLogic req'd */
+/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
+/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
+/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
+/* Byte 141 is Extended Rate Select. Not QLogic req'd */
+/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
+/* Byte 146 is length for Copper. Units of 1 meter */
+#define QSFP_MOD_LEN_OFFS 146
+/*
+ * Byte 147 is Device technology. D0..3 not Qlogc req'd
+ * D4..7 select from 15 choices, translated by table:
+ */
+#define QSFP_MOD_TECH_OFFS 147
+extern const char *const qib_qsfp_devtech[16];
+/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
+#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
+/* Attenuation should be valid for copper other than full/near Eq */
+#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
+/* Length is only valid if technology is "copper" */
+#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
+#define QSFP_TECH_1490 9
+
+#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
+ oui[2])
+#define QSFP_OUI_AMPHENOL 0x415048
+#define QSFP_OUI_FINISAR 0x009065
+#define QSFP_OUI_GORE 0x002177
+
+/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
+#define QSFP_VEND_OFFS 148
+#define QSFP_VEND_LEN 16
+/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
+#define QSFP_IBXCV_OFFS 164
+/* Bytes 165..167 are Vendor OUI number */
+#define QSFP_VOUI_OFFS 165
+#define QSFP_VOUI_LEN 3
+/* Bytes 168..183 are Vendor Part Number, string */
+#define QSFP_PN_OFFS 168
+#define QSFP_PN_LEN 16
+/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
+#define QSFP_REV_OFFS 184
+#define QSFP_REV_LEN 2
+/*
+ * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
+ * If copper, they are attenuation in dB:
+ * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
+ */
+#define QSFP_ATTEN_OFFS 186
+#define QSFP_ATTEN_LEN 2
+/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
+/* Byte 190 is Max Case Temp. Not QLogic req'd */
+/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
+#define QSFP_CC_OFFS 191
+/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
+/* Bytes 196..211 are Serial Number, String */
+#define QSFP_SN_OFFS 196
+#define QSFP_SN_LEN 16
+/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
+#define QSFP_DATE_OFFS 212
+#define QSFP_DATE_LEN 6
+/* Bytes 218,219 are optional lot-code, string */
+#define QSFP_LOT_OFFS 218
+#define QSFP_LOT_LEN 2
+/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
+/* Byte 223 is LSB of sum of bytes 192..222 */
+#define QSFP_CC_EXT_OFFS 223
+
+/*
+ * struct qib_qsfp_data encapsulates state of QSFP device for one port.
+ * it will be part of port-chip-specific data if a board supports QSFP.
+ *
+ * Since multiple board-types use QSFP, and their pport_data structs
+ * differ (in the chip-specific section), we need a pointer to its head.
+ *
+ * Avoiding premature optimization, we will have one work_struct per port,
+ * and let the (increasingly inaccurately named) eep_lock arbitrate
+ * access to common resources.
+ *
+ */
+
+/*
+ * Hold the parts of the onboard EEPROM that we care about, so we aren't
+ * coonstantly bit-boffing
+ */
+struct qib_qsfp_cache {
+ u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
+ u8 pwr; /* in D6,7 */
+ u8 len; /* in meters, Cu only */
+ u8 tech;
+ char vendor[QSFP_VEND_LEN];
+ u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
+ u8 oui[QSFP_VOUI_LEN];
+ u8 partnum[QSFP_PN_LEN];
+ u8 rev[QSFP_REV_LEN];
+ u8 atten[QSFP_ATTEN_LEN];
+ u8 cks1; /* Checksum of bytes 128..190 */
+ u8 serial[QSFP_SN_LEN];
+ u8 date[QSFP_DATE_LEN];
+ u8 lot[QSFP_LOT_LEN];
+ u8 cks2; /* Checsum of bytes 192..222 */
+};
+
+#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
+#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
+
+struct qib_qsfp_data {
+ /* Helps to find our way */
+ struct qib_pportdata *ppd;
+ struct work_struct work;
+ struct qib_qsfp_cache cache;
+ u64 t_insert;
+};
+
+extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
+ struct qib_qsfp_cache *cp);
+extern void qib_qsfp_init(struct qib_qsfp_data *qd,
+ void (*fevent)(struct work_struct *));
+extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
new file mode 100644
index 000000000000..40c0a373719c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -0,0 +1,2288 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/io.h>
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_RC_##x
+
+static void rc_timeout(unsigned long arg);
+
+static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
+ u32 psn, u32 pmtu)
+{
+ u32 len;
+
+ len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+ ss->sge = wqe->sg_list[0];
+ ss->sg_list = wqe->sg_list + 1;
+ ss->num_sge = wqe->wr.num_sge;
+ ss->total_len = wqe->length;
+ qib_skip_sge(ss, len, 0);
+ return wqe->length - len;
+}
+
+static void start_timer(struct qib_qp *qp)
+{
+ qp->s_flags |= QIB_S_TIMER;
+ qp->s_timer.function = rc_timeout;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ qp->s_timer.expires = jiffies +
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
+ add_timer(&qp->s_timer);
+}
+
+/**
+ * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
+ * @dev: the device for this QP
+ * @qp: a pointer to the QP
+ * @ohdr: a pointer to the IB header being constructed
+ * @pmtu: the path MTU
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ * Note that we are in the responder's side of the QP context.
+ * Note the QP s_lock must be held.
+ */
+static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+ struct qib_other_headers *ohdr, u32 pmtu)
+{
+ struct qib_ack_entry *e;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto bail;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+
+ switch (qp->s_ack_state) {
+ case OP(RDMA_READ_RESPONSE_LAST):
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ /* FALLTHROUGH */
+ case OP(ATOMIC_ACKNOWLEDGE):
+ /*
+ * We can increment the tail pointer now that the last
+ * response has been sent instead of only being
+ * constructed.
+ */
+ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
+ qp->s_tail_ack_queue = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_ONLY):
+ case OP(ACKNOWLEDGE):
+ /* Check for no next entry in the queue. */
+ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
+ if (qp->s_flags & QIB_S_ACK_PENDING)
+ goto normal;
+ goto bail;
+ }
+
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST)) {
+ /*
+ * If a RDMA read response is being resent and
+ * we haven't seen the duplicate request yet,
+ * then stop sending the remaining responses the
+ * responder has seen until the requester resends it.
+ */
+ len = e->rdma_sge.sge_length;
+ if (len && !e->rdma_sge.mr) {
+ qp->s_tail_ack_queue = qp->r_head_ack_queue;
+ goto bail;
+ }
+ /* Copy SGE state in case we need to resend */
+ qp->s_rdma_mr = e->rdma_sge.mr;
+ if (qp->s_rdma_mr)
+ atomic_inc(&qp->s_rdma_mr->refcount);
+ qp->s_ack_rdma_sge.sge = e->rdma_sge;
+ qp->s_ack_rdma_sge.num_sge = 1;
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ if (len > pmtu) {
+ len = pmtu;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
+ } else {
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ e->sent = 1;
+ }
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_rdma_psn = e->psn;
+ bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+ } else {
+ /* COMPARE_SWAP or FETCH_ADD */
+ qp->s_cur_sge = NULL;
+ len = 0;
+ qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
+ ohdr->u.at.aeth = qib_compute_aeth(qp);
+ ohdr->u.at.atomic_ack_eth[0] =
+ cpu_to_be32(e->atomic_data >> 32);
+ ohdr->u.at.atomic_ack_eth[1] =
+ cpu_to_be32(e->atomic_data);
+ hwords += sizeof(ohdr->u.at) / sizeof(u32);
+ bth2 = e->psn & QIB_PSN_MASK;
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ qp->s_cur_sge = &qp->s_ack_rdma_sge;
+ qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
+ if (qp->s_rdma_mr)
+ atomic_inc(&qp->s_rdma_mr->refcount);
+ len = qp->s_ack_rdma_sge.sge.sge_length;
+ if (len > pmtu)
+ len = pmtu;
+ else {
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
+ e->sent = 1;
+ }
+ bth0 = qp->s_ack_state << 24;
+ bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
+ break;
+
+ default:
+normal:
+ /*
+ * Send a regular ACK.
+ * Set the s_ack_state so we wait until after sending
+ * the ACK before setting s_ack_state to ACKNOWLEDGE
+ * (see above).
+ */
+ qp->s_ack_state = OP(SEND_ONLY);
+ qp->s_flags &= ~QIB_S_ACK_PENDING;
+ qp->s_cur_sge = NULL;
+ if (qp->s_nak_state)
+ ohdr->u.aeth =
+ cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ (qp->s_nak_state <<
+ QIB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ hwords++;
+ len = 0;
+ bth0 = OP(ACKNOWLEDGE) << 24;
+ bth2 = qp->s_ack_psn & QIB_PSN_MASK;
+ }
+ qp->s_rdma_ack_cnt++;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0, bth2);
+ return 1;
+
+bail:
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
+ return 0;
+}
+
+/**
+ * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_rc_req(struct qib_qp *qp)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_other_headers *ohdr;
+ struct qib_sge_state *ss;
+ struct qib_swqe *wqe;
+ u32 hwords;
+ u32 len;
+ u32 bth0;
+ u32 bth2;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ char newreq;
+ unsigned long flags;
+ int ret = 0;
+ int delta;
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+ /*
+ * The lock is needed to synchronize between the sending tasklet,
+ * the receive interrupt handler, and timeout resends.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Sending responses has higher priority over sending requests. */
+ if ((qp->s_flags & QIB_S_RESP_PENDING) &&
+ qib_make_rc_ack(dev, qp, ohdr, pmtu))
+ goto done;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ while (qp->s_last != qp->s_acked) {
+ qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ }
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
+ goto bail;
+
+ if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
+ if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
+ qp->s_flags |= QIB_S_WAIT_PSN;
+ goto bail;
+ }
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ }
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ bth0 = 0;
+
+ /* Send a request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ switch (qp->s_state) {
+ default:
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /*
+ * Resend an old request or start a new one.
+ *
+ * We keep track of the current SWQE so that
+ * we don't reset the "furthest progress" state
+ * if we need to back up.
+ */
+ newreq = 0;
+ if (qp->s_cur == qp->s_tail) {
+ /* Check if send work queue is empty. */
+ if (qp->s_tail == qp->s_head)
+ goto bail;
+ /*
+ * If a fence is requested, wait for previous
+ * RDMA read and atomic operations to finish.
+ */
+ if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
+ qp->s_num_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_FENCE;
+ goto bail;
+ }
+ wqe->psn = qp->s_next_psn;
+ newreq = 1;
+ }
+ /*
+ * Note that we have to be careful not to modify the
+ * original work request since we may need to resend
+ * it.
+ */
+ len = wqe->length;
+ ss = &qp->s_sge;
+ bth2 = qp->s_psn & QIB_PSN_MASK;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / sizeof(u32);
+ wqe->lpsn = wqe->psn;
+ if (len > pmtu) {
+ wqe->lpsn += (len - 1) / pmtu;
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_READ:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ /*
+ * Adjust s_next_psn to count the
+ * expected number of responses.
+ */
+ if (len > pmtu)
+ qp->s_next_psn += (len - 1) / pmtu;
+ wqe->lpsn = qp->s_next_psn++;
+ }
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ /*
+ * Don't allow more operations to be started
+ * than the QP limits allow.
+ */
+ if (newreq) {
+ if (qp->s_num_rd_atomic >=
+ qp->s_max_rd_atomic) {
+ qp->s_flags |= QIB_S_WAIT_RDMAR;
+ goto bail;
+ }
+ qp->s_num_rd_atomic++;
+ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+ wqe->lpsn = wqe->psn;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ qp->s_state = OP(COMPARE_SWAP);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.swap);
+ ohdr->u.atomic_eth.compare_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ } else {
+ qp->s_state = OP(FETCH_ADD);
+ ohdr->u.atomic_eth.swap_data = cpu_to_be64(
+ wqe->wr.wr.atomic.compare_add);
+ ohdr->u.atomic_eth.compare_data = 0;
+ }
+ ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr >> 32);
+ ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
+ wqe->wr.wr.atomic.remote_addr);
+ ohdr->u.atomic_eth.rkey = cpu_to_be32(
+ wqe->wr.wr.atomic.rkey);
+ hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
+ ss = NULL;
+ len = 0;
+ bth2 |= IB_BTH_REQ_ACK;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ qp->s_len = wqe->length;
+ if (newreq) {
+ qp->s_tail++;
+ if (qp->s_tail >= qp->s_size)
+ qp->s_tail = 0;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_psn = wqe->lpsn + 1;
+ else {
+ qp->s_psn++;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ }
+ break;
+
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
+ * thread to indicate a SEND needs to be restarted from an
+ * earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ bth2 = qp->s_psn++ & QIB_PSN_MASK;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_LAST is used by the ACK processing
+ * thread to indicate a RDMA write needs to be restarted from
+ * an earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ bth2 = qp->s_psn++ & QIB_PSN_MASK;
+ if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
+ qp->s_next_psn = qp->s_psn;
+ ss = &qp->s_sge;
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ bth2 |= IB_BTH_REQ_ACK;
+ qp->s_cur++;
+ if (qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /*
+ * qp->s_state is normally set to the opcode of the
+ * last packet constructed for new requests and therefore
+ * is never set to RDMA read response.
+ * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
+ * thread to indicate a RDMA read needs to be restarted from
+ * an earlier PSN without interferring with the sending thread.
+ * See qib_restart_rc().
+ */
+ len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
+ qp->s_state = OP(RDMA_READ_REQUEST);
+ hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
+ bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
+ qp->s_psn = wqe->lpsn + 1;
+ ss = NULL;
+ len = 0;
+ qp->s_cur++;
+ if (qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_sending_hpsn = bth2;
+ delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
+ if (delta && delta % QIB_PSN_CREDIT == 0)
+ bth2 |= IB_BTH_REQ_ACK;
+ if (qp->s_flags & QIB_S_SEND_ONE) {
+ qp->s_flags &= ~QIB_S_SEND_ONE;
+ qp->s_flags |= QIB_S_WAIT_ACK;
+ bth2 |= IB_BTH_REQ_ACK;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = ss;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_send_rc_ack - Construct an ACK packet and send it
+ * @qp: a pointer to the QP
+ *
+ * This is called from qib_rc_rcv() and qib_kreceive().
+ * Note that RDMA reads and atomics are handled in the
+ * send side QP state and tasklet.
+ */
+void qib_send_rc_ack(struct qib_qp *qp)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u64 pbc;
+ u16 lrh0;
+ u32 bth0;
+ u32 hwords;
+ u32 pbufn;
+ u32 __iomem *piobuf;
+ struct qib_ib_header hdr;
+ struct qib_other_headers *ohdr;
+ u32 control;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto unlock;
+
+ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
+ if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
+ goto queue_ack;
+
+ /* Construct the header with s_lock held so APM doesn't change it. */
+ ohdr = &hdr.u.oth;
+ lrh0 = QIB_LRH_BTH;
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
+ hwords = 6;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ hwords += qib_make_grh(ibp, &hdr.u.l.grh,
+ &qp->remote_ah_attr.grh, hwords, 0);
+ ohdr = &hdr.u.l.oth;
+ lrh0 = QIB_LRH_GRH;
+ }
+ /* read pkey_index w/o lock (its atomic) */
+ bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ if (qp->r_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
+ (qp->r_nak_state <<
+ QIB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = qib_compute_aeth(qp);
+ lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+ qp->remote_ah_attr.sl << 4;
+ hdr.lrh[0] = cpu_to_be16(lrh0);
+ hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
+ hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Don't try to send ACKs if the link isn't ACTIVE */
+ if (!(ppd->lflags & QIBL_LINKACTIVE))
+ goto done;
+
+ control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
+ qp->s_srate, lrh0 >> 12);
+ /* length is + 1 for the control dword */
+ pbc = ((u64) control << 32) | (hwords + 1);
+
+ piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+ if (!piobuf) {
+ /*
+ * We are out of PIO buffers at the moment.
+ * Pass responsibility for sending the ACK to the
+ * send tasklet so that when a PIO buffer becomes
+ * available, the ACK is sent ahead of other outgoing
+ * packets.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ goto queue_ack;
+ }
+
+ /*
+ * Write the pbc.
+ * We have to flush after the PBC for correctness
+ * on some cpus or WC buffer can be written out of order.
+ */
+ writeq(pbc, piobuf);
+
+ if (dd->flags & QIB_PIO_FLUSH_WC) {
+ u32 *hdrp = (u32 *) &hdr;
+
+ qib_flush_wc();
+ qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
+ } else
+ qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
+
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf + spcl_off);
+ }
+
+ qib_flush_wc();
+ qib_sendbuf_done(dd, pbufn);
+
+ ibp->n_unicast_xmit++;
+ goto done;
+
+queue_ack:
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ ibp->n_rc_qacks++;
+ qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+
+ /* Schedule the send tasklet. */
+ qib_schedule_send(qp);
+ }
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return;
+}
+
+/**
+ * reset_psn - reset the QP state to send starting from PSN
+ * @qp: the QP
+ * @psn: the packet sequence number to restart at
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ */
+static void reset_psn(struct qib_qp *qp, u32 psn)
+{
+ u32 n = qp->s_acked;
+ struct qib_swqe *wqe = get_swqe_ptr(qp, n);
+ u32 opcode;
+
+ qp->s_cur = n;
+
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (qib_cmp24(psn, wqe->psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+
+ /* Find the work request opcode corresponding to the given PSN. */
+ opcode = wqe->wr.opcode;
+ for (;;) {
+ int diff;
+
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ wqe = get_swqe_ptr(qp, n);
+ diff = qib_cmp24(psn, wqe->psn);
+ if (diff < 0)
+ break;
+ qp->s_cur = n;
+ /*
+ * If we are starting the request from the beginning,
+ * let the normal send code handle initialization.
+ */
+ if (diff == 0) {
+ qp->s_state = OP(SEND_LAST);
+ goto done;
+ }
+ opcode = wqe->wr.opcode;
+ }
+
+ /*
+ * Set the state to restart in the middle of a request.
+ * Don't change the s_sge, s_cur_sge, or s_cur_size.
+ * See qib_make_rc_req().
+ */
+ switch (opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
+ break;
+
+ case IB_WR_RDMA_READ:
+ qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
+ break;
+
+ default:
+ /*
+ * This case shouldn't happen since its only
+ * one PSN per req.
+ */
+ qp->s_state = OP(SEND_LAST);
+ }
+done:
+ qp->s_psn = psn;
+ /*
+ * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
+ * asynchronously before the send tasklet can get scheduled.
+ * Doing it in qib_make_rc_req() is too late.
+ */
+ if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
+ (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
+ qp->s_flags |= QIB_S_WAIT_PSN;
+}
+
+/*
+ * Back up requester to resend the last un-ACKed request.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
+{
+ struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ struct qib_ibport *ibp;
+
+ if (qp->s_retry == 0) {
+ if (qp->s_mig_state == IB_MIG_ARMED) {
+ qib_migrate_qp(qp);
+ qp->s_retry = qp->s_retry_cnt;
+ } else if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ return;
+ } else /* XXX need to handle delayed completion */
+ return;
+ } else
+ qp->s_retry--;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ ibp->n_rc_resends++;
+ else
+ ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+ qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
+ QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
+ QIB_S_WAIT_ACK);
+ if (wait)
+ qp->s_flags |= QIB_S_SEND_ONE;
+ reset_psn(qp, psn);
+}
+
+/*
+ * This is called from s_timer for missing responses.
+ */
+static void rc_timeout(unsigned long arg)
+{
+ struct qib_qp *qp = (struct qib_qp *)arg;
+ struct qib_ibport *ibp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_TIMER) {
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ibp->n_rc_timeouts++;
+ qp->s_flags &= ~QIB_S_TIMER;
+ del_timer(&qp->s_timer);
+ qib_restart_rc(qp, qp->s_last_psn + 1, 1);
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * This is called from s_timer for RNR timeouts.
+ */
+void qib_rc_rnr_retry(unsigned long arg)
+{
+ struct qib_qp *qp = (struct qib_qp *)arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_RNR) {
+ qp->s_flags &= ~QIB_S_WAIT_RNR;
+ del_timer(&qp->s_timer);
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+/*
+ * Set qp->s_sending_psn to the next PSN after the given one.
+ * This would be psn+1 except when RDMA reads are present.
+ */
+static void reset_sending_psn(struct qib_qp *qp, u32 psn)
+{
+ struct qib_swqe *wqe;
+ u32 n = qp->s_last;
+
+ /* Find the work request corresponding to the given PSN. */
+ for (;;) {
+ wqe = get_swqe_ptr(qp, n);
+ if (qib_cmp24(psn, wqe->lpsn) <= 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ)
+ qp->s_sending_psn = wqe->lpsn + 1;
+ else
+ qp->s_sending_psn = psn + 1;
+ break;
+ }
+ if (++n == qp->s_size)
+ n = 0;
+ if (n == qp->s_tail)
+ break;
+ }
+}
+
+/*
+ * This should be called with the QP s_lock held and interrupts disabled.
+ */
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+{
+ struct qib_other_headers *ohdr;
+ struct qib_swqe *wqe;
+ struct ib_wc wc;
+ unsigned i;
+ u32 opcode;
+ u32 psn;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ /* Find out where the BTH is */
+ if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ WARN_ON(!qp->s_rdma_ack_cnt);
+ qp->s_rdma_ack_cnt--;
+ return;
+ }
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ reset_sending_psn(qp, psn);
+
+ /*
+ * Start timer after a packet requesting an ACK has been sent and
+ * there are still requests that haven't been acked.
+ */
+ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
+ !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+ start_timer(qp);
+
+ while (qp->s_last != qp->s_acked) {
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ }
+ /*
+ * If we were waiting for sends to complete before resending,
+ * and they are now complete, restart sending.
+ */
+ if (qp->s_flags & QIB_S_WAIT_PSN &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ qp->s_flags &= ~QIB_S_WAIT_PSN;
+ qp->s_sending_psn = qp->s_psn;
+ qp->s_sending_hpsn = qp->s_psn - 1;
+ qib_schedule_send(qp);
+ }
+}
+
+static inline void update_last_psn(struct qib_qp *qp, u32 psn)
+{
+ qp->s_last_psn = psn;
+}
+
+/*
+ * Generate a SWQE completion.
+ * This is similar to qib_send_complete but has to check to be sure
+ * that the SGEs are not being referenced if the SWQE is being resent.
+ */
+static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
+ struct qib_swqe *wqe,
+ struct qib_ibport *ibp)
+{
+ struct ib_wc wc;
+ unsigned i;
+
+ /*
+ * Don't decrement refcount and don't generate a
+ * completion if the SWQE is being resent until the send
+ * is finished.
+ */
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ /* Post a send completion queue entry if requested. */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ }
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ } else
+ ibp->n_rc_delayed_comp++;
+
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, wqe->lpsn);
+
+ /*
+ * If we are completing a request which is in the process of
+ * being resent, we can stop resending it since we know the
+ * responder has already seen it.
+ */
+ if (qp->s_acked == qp->s_cur) {
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ qp->s_acked = qp->s_cur;
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ if (qp->s_acked != qp->s_tail) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = wqe->psn;
+ }
+ } else {
+ if (++qp->s_acked >= qp->s_size)
+ qp->s_acked = 0;
+ if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
+ qp->s_draining = 0;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ }
+ return wqe;
+}
+
+/**
+ * do_rc_ack - process an incoming RC ACK
+ * @qp: the QP the ACK came in on
+ * @psn: the packet sequence number of the ACK
+ * @opcode: the opcode of the request that resulted in the ACK
+ *
+ * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
+ * for the given QP.
+ * Called at interrupt level with the QP s_lock held.
+ * Returns 1 if OK, 0 if current operation should be aborted (NAK).
+ */
+static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
+ u64 val, struct qib_ctxtdata *rcd)
+{
+ struct qib_ibport *ibp;
+ enum ib_wc_status status;
+ struct qib_swqe *wqe;
+ int ret = 0;
+ u32 ack_psn;
+ int diff;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+ * before the NAK'ed request. The MSN won't include the NAK'ed
+ * request but will include an ACK'ed request(s).
+ */
+ ack_psn = psn;
+ if (aeth >> 29)
+ ack_psn--;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+
+ /*
+ * The MSN might be for a later WQE than the PSN indicates so
+ * only complete WQEs that the PSN finishes.
+ */
+ while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
+ /*
+ * RDMA_READ_RESPONSE_ONLY is a special case since
+ * we want to generate completion events for everything
+ * before the RDMA read, copy the data, then generate
+ * the completion for the read.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ &&
+ opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
+ diff == 0) {
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * If this request is a RDMA read or atomic, and the ACK is
+ * for a later operation, this ACK NAKs the RDMA read or
+ * atomic. In other words, only a RDMA_READ_LAST or ONLY
+ * can ACK a RDMA read and likewise for atomic ops. Note
+ * that the NAK case can only happen if relaxed ordering is
+ * used and requests are sent after an RDMA read or atomic
+ * is sent but before the response is received.
+ */
+ if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
+ (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
+ ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
+ /* Retry this request. */
+ if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
+ qp->r_flags |= QIB_R_RDMAR_SEQ;
+ qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait,
+ &rcd->qp_wait_list);
+ }
+ }
+ /*
+ * No need to process the ACK/NAK since we are
+ * restarting an earlier request.
+ */
+ goto bail;
+ }
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ u64 *vaddr = wqe->sg_list[0].vaddr;
+ *vaddr = val;
+ }
+ if (qp->s_num_rd_atomic &&
+ (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
+ qp->s_num_rd_atomic--;
+ /* Restart sending task if fence is complete */
+ if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
+ !qp->s_num_rd_atomic) {
+ qp->s_flags &= ~(QIB_S_WAIT_FENCE |
+ QIB_S_WAIT_ACK);
+ qib_schedule_send(qp);
+ } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
+ QIB_S_WAIT_ACK);
+ qib_schedule_send(qp);
+ }
+ }
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+ break;
+ }
+
+ switch (aeth >> 29) {
+ case 0: /* ACK */
+ ibp->n_rc_acks++;
+ if (qp->s_acked != qp->s_tail) {
+ /*
+ * We are expecting more ACKs so
+ * reset the retransmit timer.
+ */
+ start_timer(qp);
+ /*
+ * We can stop resending the earlier packets and
+ * continue with the next packet the receiver wants.
+ */
+ if (qib_cmp24(qp->s_psn, psn) <= 0)
+ reset_psn(qp, psn + 1);
+ } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
+ if (qp->s_flags & QIB_S_WAIT_ACK) {
+ qp->s_flags &= ~QIB_S_WAIT_ACK;
+ qib_schedule_send(qp);
+ }
+ qib_get_credit(qp, aeth);
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ qp->s_retry = qp->s_retry_cnt;
+ update_last_psn(qp, psn);
+ ret = 1;
+ goto bail;
+
+ case 1: /* RNR NAK */
+ ibp->n_rnr_naks++;
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ if (qp->s_flags & QIB_S_WAIT_RNR)
+ goto bail;
+ if (qp->s_rnr_retry == 0) {
+ status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto class_b;
+ }
+ if (qp->s_rnr_retry_cnt < 7)
+ qp->s_rnr_retry--;
+
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+
+ ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+
+ reset_psn(qp, psn);
+
+ qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
+ qp->s_flags |= QIB_S_WAIT_RNR;
+ qp->s_timer.function = qib_rc_rnr_retry;
+ qp->s_timer.expires = jiffies + usecs_to_jiffies(
+ ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
+ QIB_AETH_CREDIT_MASK]);
+ add_timer(&qp->s_timer);
+ goto bail;
+
+ case 3: /* NAK */
+ if (qp->s_acked == qp->s_tail)
+ goto bail;
+ /* The last valid PSN is the previous PSN. */
+ update_last_psn(qp, psn - 1);
+ switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
+ QIB_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
+ ibp->n_seq_naks++;
+ /*
+ * Back up to the responder's expected PSN.
+ * Note that we might get a NAK in the middle of an
+ * RDMA READ response which terminates the RDMA
+ * READ.
+ */
+ qib_restart_rc(qp, psn, 0);
+ qib_schedule_send(qp);
+ break;
+
+ case 1: /* Invalid Request */
+ status = IB_WC_REM_INV_REQ_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 2: /* Remote Access Error */
+ status = IB_WC_REM_ACCESS_ERR;
+ ibp->n_other_naks++;
+ goto class_b;
+
+ case 3: /* Remote Operation Error */
+ status = IB_WC_REM_OP_ERR;
+ ibp->n_other_naks++;
+class_b:
+ if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, status);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ break;
+
+ default:
+ /* Ignore other reserved NAK error codes */
+ goto reserved;
+ }
+ qp->s_retry = qp->s_retry_cnt;
+ qp->s_rnr_retry = qp->s_rnr_retry_cnt;
+ goto bail;
+
+ default: /* 2: reserved */
+reserved:
+ /* Ignore reserved NAK codes. */
+ goto bail;
+ }
+
+bail:
+ return ret;
+}
+
+/*
+ * We have seen an out of sequence RDMA read middle or last packet.
+ * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
+ */
+static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_swqe *wqe;
+
+ /* Remove QP from retry timer */
+ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
+ qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+
+ while (qib_cmp24(psn, wqe->lpsn) > 0) {
+ if (wqe->wr.opcode == IB_WR_RDMA_READ ||
+ wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ break;
+ wqe = do_rc_completion(qp, wqe, ibp);
+ }
+
+ ibp->n_rdma_seq++;
+ qp->r_flags |= QIB_R_RDMAR_SEQ;
+ qib_restart_rc(qp, qp->s_last_psn + 1, 0);
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_SEND;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+/**
+ * qib_rc_rcv_resp - process an incoming RC response packet
+ * @ibp: the port this packet came in on
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @hdrsize: the header length
+ * @pmtu: the path MTU
+ *
+ * This is called from qib_rc_rcv() to process an incoming RC response
+ * packet for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_rc_rcv_resp(struct qib_ibport *ibp,
+ struct qib_other_headers *ohdr,
+ void *data, u32 tlen,
+ struct qib_qp *qp,
+ u32 opcode,
+ u32 psn, u32 hdrsize, u32 pmtu,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_swqe *wqe;
+ enum ib_wc_status status;
+ unsigned long flags;
+ int diff;
+ u32 pad;
+ u32 aeth;
+ u64 val;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto ack_done;
+
+ /* Ignore invalid responses. */
+ if (qib_cmp24(psn, qp->s_next_psn) >= 0)
+ goto ack_done;
+
+ /* Ignore duplicate responses. */
+ diff = qib_cmp24(psn, qp->s_last_psn);
+ if (unlikely(diff <= 0)) {
+ /* Update credits for "ghost" ACKs */
+ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if ((aeth >> 29) == 0)
+ qib_get_credit(qp, aeth);
+ }
+ goto ack_done;
+ }
+
+ /*
+ * Skip everything other than the PSN we expect, if we are waiting
+ * for a reply to a restarted RDMA read or atomic op.
+ */
+ if (qp->r_flags & QIB_R_RDMAR_SEQ) {
+ if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
+ goto ack_done;
+ qp->r_flags &= ~QIB_R_RDMAR_SEQ;
+ }
+
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_done;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ status = IB_WC_SUCCESS;
+
+ switch (opcode) {
+ case OP(ACKNOWLEDGE):
+ case OP(ATOMIC_ACKNOWLEDGE):
+ case OP(RDMA_READ_RESPONSE_FIRST):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
+ __be32 *p = ohdr->u.at.atomic_ack_eth;
+
+ val = ((u64) be32_to_cpu(p[0]) << 32) |
+ be32_to_cpu(p[1]);
+ } else
+ val = 0;
+ if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
+ opcode != OP(RDMA_READ_RESPONSE_FIRST))
+ goto ack_done;
+ hdrsize += 4;
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_middle;
+
+ case OP(RDMA_READ_RESPONSE_MIDDLE):
+ /* no AETH, no ACK */
+ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+read_middle:
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto ack_len_err;
+ if (unlikely(pmtu >= qp->s_rdma_read_len))
+ goto ack_len_err;
+
+ /*
+ * We got a response so update the timeout.
+ * 4.096 usec. * (1 << qp->timeout)
+ */
+ qp->s_flags |= QIB_S_TIMER;
+ mod_timer(&qp->s_timer, jiffies +
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL));
+ if (qp->s_flags & QIB_S_WAIT_ACK) {
+ qp->s_flags &= ~QIB_S_WAIT_ACK;
+ qib_schedule_send(qp);
+ }
+
+ if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
+ qp->s_retry = qp->s_retry_cnt;
+
+ /*
+ * Update the RDMA receive state but do the copy w/o
+ * holding the locks and blocking interrupts.
+ */
+ qp->s_rdma_read_len -= pmtu;
+ update_last_psn(qp, psn);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ goto bail;
+
+ case OP(RDMA_READ_RESPONSE_ONLY):
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
+ goto ack_done;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 0 && <= pmtu.
+ * Remember to account for the AETH header (4) and
+ * ICRC (4).
+ */
+ if (unlikely(tlen < (hdrsize + pad + 8)))
+ goto ack_len_err;
+ /*
+ * If this is a response to a resent RDMA read, we
+ * have to be careful to copy the data to the right
+ * location.
+ */
+ wqe = get_swqe_ptr(qp, qp->s_acked);
+ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
+ wqe, psn, pmtu);
+ goto read_last;
+
+ case OP(RDMA_READ_RESPONSE_LAST):
+ /* ACKs READ req. */
+ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
+ goto ack_seq_err;
+ if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
+ goto ack_op_err;
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /*
+ * Check that the data size is >= 1 && <= pmtu.
+ * Remember to account for the AETH header (4) and
+ * ICRC (4).
+ */
+ if (unlikely(tlen <= (hdrsize + pad + 8)))
+ goto ack_len_err;
+read_last:
+ tlen -= hdrsize + pad + 8;
+ if (unlikely(tlen != qp->s_rdma_read_len))
+ goto ack_len_err;
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ WARN_ON(qp->s_rdma_read_sge.num_sge);
+ (void) do_rc_ack(qp, aeth, psn,
+ OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
+ goto ack_done;
+ }
+
+ack_op_err:
+ status = IB_WC_LOC_QP_OP_ERR;
+ goto ack_err;
+
+ack_seq_err:
+ rdma_seq_err(qp, ibp, psn, rcd);
+ goto ack_done;
+
+ack_len_err:
+ status = IB_WC_LOC_LEN_ERR;
+ack_err:
+ if (qp->s_last == qp->s_acked) {
+ qib_send_complete(qp, wqe, status);
+ qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ }
+ack_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+bail:
+ return;
+}
+
+/**
+ * qib_rc_rcv_error - process an incoming duplicate or error RC packet
+ * @ohdr: the other headers for this packet
+ * @data: the packet data
+ * @qp: the QP for this packet
+ * @opcode: the opcode for this packet
+ * @psn: the packet sequence number for this packet
+ * @diff: the difference between the PSN and the expected PSN
+ *
+ * This is called from qib_rc_rcv() to process an unexpected
+ * incoming RC packet for the given QP.
+ * Called at interrupt level.
+ * Return 1 if no more processing is needed; otherwise return 0 to
+ * schedule a response to be sent.
+ */
+static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
+ void *data,
+ struct qib_qp *qp,
+ u32 opcode,
+ u32 psn,
+ int diff,
+ struct qib_ctxtdata *rcd)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_ack_entry *e;
+ unsigned long flags;
+ u8 i, prev;
+ int old_req;
+
+ if (diff > 0) {
+ /*
+ * Packet sequence error.
+ * A NAK will ACK earlier sends and RDMA writes.
+ * Don't queue the NAK if we already sent one.
+ */
+ if (!qp->r_nak_state) {
+ ibp->n_rc_seqnak++;
+ qp->r_nak_state = IB_NAK_PSN_ERROR;
+ /* Use the expected PSN. */
+ qp->r_ack_psn = qp->r_psn;
+ /*
+ * Wait to send the sequence NAK until all packets
+ * in the receive queue have been processed.
+ * Otherwise, we end up propagating congestion.
+ */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ }
+ goto done;
+ }
+
+ /*
+ * Handle a duplicate request. Don't re-execute SEND, RDMA
+ * write or atomic op. Don't NAK errors, just silently drop
+ * the duplicate request. Note that r_sge, r_len, and
+ * r_rcv_len may be in use so don't modify them.
+ *
+ * We are supposed to ACK the earliest duplicate PSN but we
+ * can coalesce an outstanding duplicate ACK. We have to
+ * send the earliest so that RDMA reads can be restarted at
+ * the requester's expected PSN.
+ *
+ * First, find where this duplicate PSN falls within the
+ * ACKs previously sent.
+ * old_req is true if there is an older response that is scheduled
+ * to be sent before sending this one.
+ */
+ e = NULL;
+ old_req = 1;
+ ibp->n_rc_dupreq++;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto unlock_done;
+
+ for (i = qp->r_head_ack_queue; ; i = prev) {
+ if (i == qp->s_tail_ack_queue)
+ old_req = 0;
+ if (i)
+ prev = i - 1;
+ else
+ prev = QIB_MAX_RDMA_ATOMIC;
+ if (prev == qp->r_head_ack_queue) {
+ e = NULL;
+ break;
+ }
+ e = &qp->s_ack_queue[prev];
+ if (!e->opcode) {
+ e = NULL;
+ break;
+ }
+ if (qib_cmp24(psn, e->psn) >= 0) {
+ if (prev == qp->s_tail_ack_queue &&
+ qib_cmp24(psn, e->lpsn) <= 0)
+ old_req = 0;
+ break;
+ }
+ }
+ switch (opcode) {
+ case OP(RDMA_READ_REQUEST): {
+ struct ib_reth *reth;
+ u32 offset;
+ u32 len;
+
+ /*
+ * If we didn't find the RDMA read request in the ack queue,
+ * we can ignore this request.
+ */
+ if (!e || e->opcode != OP(RDMA_READ_REQUEST))
+ goto unlock_done;
+ /* RETH comes after BTH */
+ reth = &ohdr->u.rc.reth;
+ /*
+ * Address range must be a subset of the original
+ * request and start on pmtu boundaries.
+ * We reuse the old ack_queue slot since the requester
+ * should not back up and request an earlier PSN for the
+ * same request.
+ */
+ offset = ((psn - e->psn) & QIB_PSN_MASK) *
+ ib_mtu_enum_to_int(qp->path_mtu);
+ len = be32_to_cpu(reth->length);
+ if (unlikely(offset + len != e->rdma_sge.sge_length))
+ goto unlock_done;
+ if (e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ if (len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto unlock_done;
+ } else {
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->psn = psn;
+ if (old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ /*
+ * If we didn't find the atomic request in the ack queue
+ * or the send tasklet is already backed up to send an
+ * earlier entry, we can ignore this request.
+ */
+ if (!e || e->opcode != (u8) opcode || old_req)
+ goto unlock_done;
+ qp->s_tail_ack_queue = prev;
+ break;
+ }
+
+ default:
+ /*
+ * Ignore this operation if it doesn't request an ACK
+ * or an earlier RDMA read or atomic is going to be resent.
+ */
+ if (!(psn & IB_BTH_REQ_ACK) || old_req)
+ goto unlock_done;
+ /*
+ * Resend the most recent ACK if this request is
+ * after all the previous RDMA reads and atomics.
+ */
+ if (i == qp->r_head_ack_queue) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->r_psn - 1;
+ goto send_ack;
+ }
+ /*
+ * Try to send a simple ACK to work around a Mellanox bug
+ * which doesn't accept a RDMA read response or atomic
+ * response as an ACK for earlier SENDs or RDMA writes.
+ */
+ if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->r_nak_state = 0;
+ qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
+ goto send_ack;
+ }
+ /*
+ * Resend the RDMA read or atomic op which
+ * ACKs this duplicate request.
+ */
+ qp->s_tail_ack_queue = i;
+ break;
+ }
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->r_nak_state = 0;
+ qib_schedule_send(qp);
+
+unlock_done:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+done:
+ return 1;
+
+send_ack:
+ return 0;
+}
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
+{
+ unsigned long flags;
+ int lastwqe;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ lastwqe = qib_error_qp(qp, err);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
+{
+ unsigned next;
+
+ next = n + 1;
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ qp->s_tail_ack_queue = next;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+}
+
+/**
+ * qib_rc_rcv - process an incoming RC packet
+ * @rcd: the context pointer
+ * @hdr: the header of this packet
+ * @has_grh: true if the header has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP for this packet
+ *
+ * This is called from qib_qp_rcv() to process an incoming RC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+ struct qib_other_headers *ohdr;
+ u32 opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ int diff;
+ struct ib_reth *reth;
+ unsigned long flags;
+ int ret;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ }
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ goto sunlock;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+
+ /* Prevent simultaneous processing after APM on different CPUs */
+ spin_lock(&qp->r_lock);
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+ * packet sequence number will be for something in the send work
+ * queue rather than the expected receive packet sequence number.
+ * In other words, this QP is the requester.
+ */
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
+ hdrsize, pmtu, rcd);
+ goto runlock;
+ }
+
+ /* Compute 24 bits worth of difference. */
+ diff = qib_cmp24(psn, qp->r_psn);
+ if (unlikely(diff)) {
+ if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
+ goto runlock;
+ goto send_ack;
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto nack_inv;
+
+ default:
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ goto nack_inv;
+ /*
+ * Note that it is up to the requester to not send a new
+ * RDMA read or atomic operation before receiving an ACK
+ * for the previous operation.
+ */
+ break;
+ }
+
+ memset(&wc, 0, sizeof wc);
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+ qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ }
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ case OP(RDMA_WRITE_MIDDLE):
+send_middle:
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto nack_inv;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto nack_inv;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+ /* consume RWQE */
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+ case OP(RDMA_WRITE_LAST):
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto nack_inv;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto nack_inv;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ qp->r_msn++;
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ break;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
+ /* consume RWQE */
+ reth = &ohdr->u.rc.reth;
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto nack_acc;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_FIRST))
+ goto send_middle;
+ else if (opcode == OP(RDMA_WRITE_ONLY))
+ goto send_last;
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto nack_op_err;
+ if (!ret)
+ goto rnr_nak;
+ goto send_last_imm;
+
+ case OP(RDMA_READ_REQUEST): {
+ struct qib_ack_entry *e;
+ u32 len;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto srunlock;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ qib_update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ reth = &ohdr->u.rc.reth;
+ len = be32_to_cpu(reth->length);
+ if (len) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey & NAK */
+ ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ);
+ if (unlikely(!ok))
+ goto nack_acc_unlck;
+ /*
+ * Update the next expected PSN. We add 1 later
+ * below, so only add the remainder here.
+ */
+ if (len > pmtu)
+ qp->r_psn += (len - 1) / pmtu;
+ } else {
+ e->rdma_sge.mr = NULL;
+ e->rdma_sge.vaddr = NULL;
+ e->rdma_sge.length = 0;
+ e->rdma_sge.sge_length = 0;
+ }
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = qp->r_psn;
+ /*
+ * We need to increment the MSN here instead of when we
+ * finish sending the result since a duplicate request would
+ * increment it more than once.
+ */
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qib_schedule_send(qp);
+
+ goto srunlock;
+ }
+
+ case OP(COMPARE_SWAP):
+ case OP(FETCH_ADD): {
+ struct ib_atomic_eth *ateth;
+ struct qib_ack_entry *e;
+ u64 vaddr;
+ atomic64_t *maddr;
+ u64 sdata;
+ u32 rkey;
+ u8 next;
+
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_inv;
+ next = qp->r_head_ack_queue + 1;
+ if (next > QIB_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto srunlock;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv_unlck;
+ qib_update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+ if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ atomic_dec(&e->rdma_sge.mr->refcount);
+ e->rdma_sge.mr = NULL;
+ }
+ ateth = &ohdr->u.atomic_eth;
+ vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
+ be32_to_cpu(ateth->vaddr[1]);
+ if (unlikely(vaddr & (sizeof(u64) - 1)))
+ goto nack_inv_unlck;
+ rkey = be32_to_cpu(ateth->rkey);
+ /* Check rkey & NAK */
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ vaddr, rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto nack_acc_unlck;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = be64_to_cpu(ateth->swap_data);
+ e->atomic_data = (opcode == OP(FETCH_ADD)) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ be64_to_cpu(ateth->compare_data),
+ sdata);
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qp->r_sge.num_sge = 0;
+ e->opcode = opcode;
+ e->sent = 0;
+ e->psn = psn;
+ e->lpsn = psn;
+ qp->r_msn++;
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_nak_state = 0;
+ qp->r_head_ack_queue = next;
+
+ /* Schedule the send tasklet. */
+ qp->s_flags |= QIB_S_RESP_PENDING;
+ qib_schedule_send(qp);
+
+ goto srunlock;
+ }
+
+ default:
+ /* NAK unknown opcodes. */
+ goto nack_inv;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+ if (psn & (1 << 31))
+ goto send_ack;
+ goto runlock;
+
+rnr_nak:
+ qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue RNR NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_op_err:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_inv_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ /* Queue NAK for later */
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= QIB_R_RSP_NAK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+ goto runlock;
+
+nack_acc_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_acc:
+ qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+send_ack:
+ qib_send_rc_ack(qp);
+runlock:
+ spin_unlock(&qp->r_lock);
+ return;
+
+srunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock(&qp->r_lock);
+ return;
+
+sunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
new file mode 100644
index 000000000000..eb78d9367f06
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/*
+ * Convert the AETH RNR timeout code into the number of microseconds.
+ */
+const u32 ib_qib_rnr_table[32] = {
+ 655360, /* 00: 655.36 */
+ 10, /* 01: .01 */
+ 20, /* 02 .02 */
+ 30, /* 03: .03 */
+ 40, /* 04: .04 */
+ 60, /* 05: .06 */
+ 80, /* 06: .08 */
+ 120, /* 07: .12 */
+ 160, /* 08: .16 */
+ 240, /* 09: .24 */
+ 320, /* 0A: .32 */
+ 480, /* 0B: .48 */
+ 640, /* 0C: .64 */
+ 960, /* 0D: .96 */
+ 1280, /* 0E: 1.28 */
+ 1920, /* 0F: 1.92 */
+ 2560, /* 10: 2.56 */
+ 3840, /* 11: 3.84 */
+ 5120, /* 12: 5.12 */
+ 7680, /* 13: 7.68 */
+ 10240, /* 14: 10.24 */
+ 15360, /* 15: 15.36 */
+ 20480, /* 16: 20.48 */
+ 30720, /* 17: 30.72 */
+ 40960, /* 18: 40.96 */
+ 61440, /* 19: 61.44 */
+ 81920, /* 1A: 81.92 */
+ 122880, /* 1B: 122.88 */
+ 163840, /* 1C: 163.84 */
+ 245760, /* 1D: 245.76 */
+ 327680, /* 1E: 327.68 */
+ 491520 /* 1F: 491.52 */
+};
+
+/*
+ * Validate a RWQE and fill in the SGE state.
+ * Return 1 if OK.
+ */
+static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
+{
+ int i, j, ret;
+ struct ib_wc wc;
+ struct qib_lkey_table *rkt;
+ struct qib_pd *pd;
+ struct qib_sge_state *ss;
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ ss = &qp->r_sge;
+ ss->sg_list = qp->r_sg_list;
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ss->total_len = qp->r_len;
+ ret = 1;
+ goto bail;
+
+bad_lkey:
+ while (j) {
+ struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ ss->num_sge = 0;
+ memset(&wc, 0, sizeof(wc));
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ /* Signal solicited completion event. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ ret = 0;
+bail:
+ return ret;
+}
+
+/**
+ * qib_get_rwqe - copy the next RWQE into the QP's RWQE
+ * @qp: the QP
+ * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
+ *
+ * Return -1 if there is a local error, 0 if no RWQE is available,
+ * otherwise return 1.
+ *
+ * Can be called from interrupt level.
+ */
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
+{
+ unsigned long flags;
+ struct qib_rq *rq;
+ struct qib_rwq *wq;
+ struct qib_srq *srq;
+ struct qib_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
+
+ if (qp->ibqp.srq) {
+ srq = to_isrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
+ rq = &qp->r_rq;
+ }
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ if (unlikely(tail == wq->head)) {
+ ret = 0;
+ goto unlock;
+ }
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
+ wqe = get_rwqe_ptr(rq, tail);
+ /*
+ * Even though we update the tail index in memory, the verbs
+ * consumer is not supposed to post more entries until a
+ * completion is generated.
+ */
+ if (++tail >= rq->size)
+ tail = 0;
+ wq->tail = tail;
+ if (!wr_id_only && !qib_init_sge(qp, wqe)) {
+ ret = -1;
+ goto unlock;
+ }
+ qp->r_wr_id = wqe->wr_id;
+
+ ret = 1;
+ set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
+ if (handler) {
+ u32 n;
+
+ /*
+ * Validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
+ else
+ n -= tail;
+ if (n < srq->limit) {
+ struct ib_event ev;
+
+ srq->limit = 0;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ handler(&ev, srq->ibsrq.srq_context);
+ goto bail;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+bail:
+ return ret;
+}
+
+/*
+ * Switch to alternate path.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+void qib_migrate_qp(struct qib_qp *qp)
+{
+ struct ib_event ev;
+
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+}
+
+static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
+{
+ if (!index) {
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ return ppd->guid;
+ } else
+ return ibp->guids[index - 1];
+}
+
+static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
+{
+ return (gid->global.interface_id == id &&
+ (gid->global.subnet_prefix == gid_prefix ||
+ gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
+}
+
+/*
+ *
+ * This should be called with the QP s_lock held.
+ */
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, struct qib_qp *qp, u32 bth0)
+{
+ __be64 guid;
+
+ if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
+ if (!has_grh) {
+ if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->alt_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (!qib_pkey_ok((u16)bth0,
+ qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
+ goto err;
+ qib_migrate_qp(qp);
+ } else {
+ if (!has_grh) {
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ goto err;
+ } else {
+ if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
+ goto err;
+ guid = get_sguid(ibp,
+ qp->remote_ah_attr.grh.sgid_index);
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ goto err;
+ if (!gid_ok(&hdr->u.l.grh.sgid,
+ qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->remote_ah_attr.grh.dgid.global.interface_id))
+ goto err;
+ }
+ if (!qib_pkey_ok((u16)bth0,
+ qib_get_pkey(ibp, qp->s_pkey_index))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto err;
+ }
+ /* Validate the SLID. See Ch. 9.6.1.5 */
+ if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
+ ppd_from_ibp(ibp)->port != qp->port_num)
+ goto err;
+ if (qp->s_mig_state == IB_MIG_REARM &&
+ !(bth0 & IB_BTH_MIG_REQ))
+ qp->s_mig_state = IB_MIG_ARMED;
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+/**
+ * qib_ruc_loopback - handle UC and RC lookback requests
+ * @sqp: the sending QP
+ *
+ * This is called from qib_do_send() to
+ * forward a WQE addressed to the same HCA.
+ * Note that although we are single threaded due to the tasklet, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+static void qib_ruc_loopback(struct qib_qp *sqp)
+{
+ struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct qib_qp *qp;
+ struct qib_swqe *wqe;
+ struct qib_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ int release;
+ int ret;
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+ qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
+ !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= QIB_S_BUSY;
+
+again:
+ if (sqp->s_last == sqp->s_head)
+ goto clr_busy;
+ wqe = get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work reqeust. */
+ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ ibp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof wc);
+ send_status = IB_WC_SUCCESS;
+
+ release = 1;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* FALLTHROUGH */
+ case IB_WR_RDMA_WRITE:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->wr.wr.rdma.remote_addr,
+ wqe->wr.wr.rdma.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = 0;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->wr.wr.atomic.remote_addr,
+ wqe->wr.wr.atomic.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ sdata = wqe->wr.wr.atomic.compare_add;
+ *(u64 *) sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64) atomic64_add_return(sdata, maddr) - sdata :
+ (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ sdata, wqe->wr.wr.atomic.swap);
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (!release)
+ atomic_dec(&sge->mr->refcount);
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+ if (release)
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ ibp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ qib_send_complete(sqp, wqe, send_status);
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ ibp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
+ goto clr_busy;
+ sqp->s_flags |= QIB_S_WAIT_RNR;
+ sqp->s_timer.function = qib_rc_rnr_retry;
+ sqp->s_timer.expires = jiffies +
+ usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
+ add_timer(&sqp->s_timer);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ qib_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ qib_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ if (qp && atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_grh - construct a GRH header
+ * @ibp: a pointer to the IB port
+ * @hdr: a pointer to the GRH header being constructed
+ * @grh: the global route address to send to
+ * @hwords: the number of 32 bit words of header being sent
+ * @nwords: the number of 32 bit words of data being sent
+ *
+ * Return the size of the header in 32 bit words.
+ */
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords)
+{
+ hdr->version_tclass_flow =
+ cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
+ (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
+ (grh->flow_label << IB_GRH_FLOW_SHIFT));
+ hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
+ /* next_hdr is defined by C8-7 in ch. 8.4.1 */
+ hdr->next_hdr = IB_GRH_NEXT_HDR;
+ hdr->hop_limit = grh->hop_limit;
+ /* The SGID is 32-bit aligned. */
+ hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+ hdr->sgid.global.interface_id = grh->sgid_index ?
+ ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
+ hdr->dgid = grh->dgid;
+
+ /* GRH header size in 32-bit words. */
+ return sizeof(struct ib_grh) / sizeof(u32);
+}
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+ u32 bth0, u32 bth2)
+{
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ u16 lrh0;
+ u32 nwords;
+ u32 extra_bytes;
+
+ /* Construct the header. */
+ extra_bytes = -qp->s_cur_size & 3;
+ nwords = (qp->s_cur_size + extra_bytes) >> 2;
+ lrh0 = QIB_LRH_BTH;
+ if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
+ qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+ &qp->remote_ah_attr.grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = QIB_LRH_GRH;
+ }
+ lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
+ qp->remote_ah_attr.sl << 4;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ qp->remote_ah_attr.src_path_bits);
+ bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
+ bth0 |= extra_bytes << 20;
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+}
+
+/**
+ * qib_do_send - perform a send on a QP
+ * @work: contains a pointer to the QP
+ *
+ * Process entries in the send work queue until credit or queue is
+ * exhausted. Only allow one CPU to send a packet per QP (tasklet).
+ * Otherwise, two threads could send packets out of order.
+ */
+void qib_do_send(struct work_struct *work)
+{
+ struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ int (*make_req)(struct qib_qp *qp);
+ unsigned long flags;
+
+ if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC) &&
+ (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
+ qib_ruc_loopback(qp);
+ return;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ make_req = qib_make_rc_req;
+ else if (qp->ibqp.qp_type == IB_QPT_UC)
+ make_req = qib_make_uc_req;
+ else
+ make_req = qib_make_ud_req;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if (!qib_send_ok(qp)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
+ }
+
+ qp->s_flags |= QIB_S_BUSY;
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ do {
+ /* Check for a constructed packet to be sent. */
+ if (qp->s_hdrwords != 0) {
+ /*
+ * If the packet cannot be sent now, return and
+ * the send tasklet will be woken up later.
+ */
+ if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
+ qp->s_cur_sge, qp->s_cur_size))
+ break;
+ /* Record that s_hdr is empty. */
+ qp->s_hdrwords = 0;
+ }
+ } while (make_req(qp));
+}
+
+/*
+ * This should be called with s_lock held.
+ */
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ unsigned i;
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct qib_sge *sge = &wqe->sg_list[i];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
+
+ /* See ch. 11.2.4.1 and 10.7.3.1 */
+ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ status != IB_WC_SUCCESS) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof wc);
+ wc.wr_id = wqe->wr.wr_id;
+ wc.status = status;
+ wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+ wc.qp = &qp->ibqp;
+ if (status == IB_WC_SUCCESS)
+ wc.byte_len = wqe->length;
+ qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ status != IB_WC_SUCCESS);
+ }
+
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 2a68d9f624dd..0aeed0e74cb6 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -32,22 +32,40 @@
*/
/*
* This file contains all of the code that is specific to the SerDes
- * on the InfiniPath 7220 chip.
+ * on the QLogic_IB 7220 chip.
*/
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
+
+/*
+ * Same as in qib_iba7220.c, but just the registers needed here.
+ * Could move whole set to qib_7220.h, but decided better to keep
+ * local.
+ */
+#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
+#define kr_hwerrclear KREG_IDX(HwErrClear)
+#define kr_hwerrmask KREG_IDX(HwErrMask)
+#define kr_hwerrstatus KREG_IDX(HwErrStatus)
+#define kr_ibcstatus KREG_IDX(IBCStatus)
+#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
+#define kr_scratch KREG_IDX(Scratch)
+#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
+/* these are used only here, not in qib_iba7220.c */
+#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
+#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
+#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
+#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
+#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
/*
* The IBSerDesMappTable is a memory that holds values to be stored in
- * various SerDes registers by IBC. It is not part of the normal kregs
- * map and is used in exactly one place, hence the #define below.
+ * various SerDes registers by IBC.
*/
-#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t)))
+#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
/*
* Below used for sdnum parameter, selecting one of the two sections
@@ -71,42 +89,37 @@
#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
/* Forward declarations. */
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
- u32 data, u32 mask);
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+ u32 data, u32 mask);
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
int mask);
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd);
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
- const char *where);
-static int ipath_sd_setvals(struct ipath_devdata *dd);
-static int ipath_sd_early(struct ipath_devdata *dd);
-static int ipath_sd_dactrim(struct ipath_devdata *dd);
-/* Set the registers that IBC may muck with to their default "preset" values */
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-static int ipath_internal_presets(struct ipath_devdata *dd);
+static int qib_sd_trimdone_poll(struct qib_devdata *dd);
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
+static int qib_sd_setvals(struct qib_devdata *dd);
+static int qib_sd_early(struct qib_devdata *dd);
+static int qib_sd_dactrim(struct qib_devdata *dd);
+static int qib_internal_presets(struct qib_devdata *dd);
/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val);
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim);
-
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
+static int qib_sd_trimself(struct qib_devdata *dd, int val);
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
/*
* Below keeps track of whether the "once per power-on" initialization has
* been done, because uC code Version 1.32.17 or higher allows the uC to
* be reset at will, and Automatic Equalization may require it. So the
- * state of the reset "pin", as reflected in was_reset parameter to
- * ipath_sd7220_init() is no longer valid. Instead, we check for the
+ * state of the reset "pin", is no longer valid. Instead, we check for the
* actual uC code having been loaded.
*/
-static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
+static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd)
{
- if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0))
- dd->serdes_first_init_done = 1;
- return dd->serdes_first_init_done;
+ struct qib_devdata *dd = ppd->dd;
+ if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0))
+ dd->cspec->serdes_first_init_done = 1;
+ return dd->cspec->serdes_first_init_done;
}
-/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */
-#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
+/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
+#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
#define UC_PAR_CLR_D 8
@@ -114,25 +127,25 @@ static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd)
+void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
{
int ret;
/* clear, then re-enable parity errs */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
UC_PAR_CLR_D, UC_PAR_CLR_M);
if (ret < 0) {
- ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
+ qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
goto bail;
}
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
UC_PAR_CLR_M);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(4);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
- INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_write_kreg(dd, kr_hwerrclear,
+ QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+ qib_read_kreg32(dd, kr_scratch);
bail:
return;
}
@@ -146,7 +159,7 @@ bail:
#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
-static int ipath_resync_ibepb(struct ipath_devdata *dd)
+static int qib_resync_ibepb(struct qib_devdata *dd)
{
int ret, pat, tries, chn;
u32 loc;
@@ -155,43 +168,42 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
chn = 0;
for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
loc = IB_PGUDP(chn);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed read in resync\n");
+ qib_dev_err(dd, "Failed read in resync\n");
continue;
}
if (ret != 0xF0 && ret != 0x55 && tries == 0)
- ipath_dev_err(dd, "unexpected pattern in resync\n");
+ qib_dev_err(dd, "unexpected pattern in resync\n");
pat = ret ^ 0xA5; /* alternate F0 and 55 */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
if (ret < 0) {
- ipath_dev_err(dd, "Failed write in resync\n");
+ qib_dev_err(dd, "Failed write in resync\n");
continue;
}
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed re-read in resync\n");
+ qib_dev_err(dd, "Failed re-read in resync\n");
continue;
}
if (ret != pat) {
- ipath_dev_err(dd, "Failed compare1 in resync\n");
+ qib_dev_err(dd, "Failed compare1 in resync\n");
continue;
}
loc = IB_CMUDONE(chn);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
if (ret < 0) {
- ipath_dev_err(dd, "Failed CMUDONE rd in resync\n");
+ qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
continue;
}
if ((ret & 0x70) != ((chn << 4) | 0x40)) {
- ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
- ret, chn);
+ qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
+ ret, chn);
continue;
}
if (++chn == 4)
break; /* Success */
}
- ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
return (ret > 0) ? 0 : ret;
}
@@ -199,32 +211,32 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
* Localize the stuff that should be done to change IB uC reset
* returns <0 for errors.
*/
-static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
+static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
{
u64 rst_val;
int ret = 0;
unsigned long flags;
- rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+ rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
if (assert_rst) {
/*
* Vendor recommends "interrupting" uC before reset, to
* minimize possible glitches.
*/
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
epb_access(dd, IB_7220_SERDES, 1);
rst_val |= 1ULL;
/* Squelch possible parity error from _asserting_ reset */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask &
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask &
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
+ qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay to ensure it took effect */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(2);
/* once it's reset, can remove interrupt */
epb_access(dd, IB_7220_SERDES, -1);
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
} else {
/*
* Before we de-assert reset, we need to deal with
@@ -235,46 +247,46 @@ static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
*/
u64 val;
rst_val &= ~(1ULL);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask &
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask &
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- ret = ipath_resync_ibepb(dd);
+ ret = qib_resync_ibepb(dd);
if (ret < 0)
- ipath_dev_err(dd, "unable to re-sync IB EPB\n");
+ qib_dev_err(dd, "unable to re-sync IB EPB\n");
/* set uC control regs to suppress parity errs */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
if (ret < 0)
goto bail;
/* IB uC code past Version 1.32.17 allow suppression of wdog */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
0x80);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set WDOG disable\n");
+ qib_dev_err(dd, "Failed to set WDOG disable\n");
goto bail;
}
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val);
+ qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
/* flush write, delay for startup */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(1);
/* clear, then re-enable parity errs */
- ipath_sd7220_clr_ibpar(dd);
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
- if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) {
- ipath_dev_err(dd, "IBUC Parity still set after RST\n");
- dd->ipath_hwerrmask &=
- ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
+ qib_sd7220_clr_ibpar(dd);
+ val = qib_read_kreg64(dd, kr_hwerrstatus);
+ if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
+ qib_dev_err(dd, "IBUC Parity still set after RST\n");
+ dd->cspec->hwerrmask &=
+ ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
}
- ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
- dd->ipath_hwerrmask);
+ qib_write_kreg(dd, kr_hwerrmask,
+ dd->cspec->hwerrmask);
}
bail:
return ret;
}
-static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
+static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
const char *where)
{
int ret, chn, baduns;
@@ -286,69 +298,71 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
/* give time for reset to settle out in EPB */
udelay(2);
- ret = ipath_resync_ibepb(dd);
+ ret = qib_resync_ibepb(dd);
if (ret < 0)
- ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
+ qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
/* Do "sacrificial read" to get EPB in sane state after reset */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
if (ret < 0)
- ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
+ qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
/* Check/show "summary" Trim-done bit in IBCStatus */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- if (val & (1ULL << 11))
- ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where);
- else
- ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
-
+ val = qib_read_kreg64(dd, kr_ibcstatus);
+ if (!(val & (1ULL << 11)))
+ qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
+ /*
+ * Do "dummy read/mod/wr" to get EPB in sane state after reset
+ * The default value for MPREG6 is 0.
+ */
udelay(2);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
+ qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
udelay(10);
baduns = 0;
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
if (ret < 0)
- ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d"
- " (%s)\n", chn, where);
+ qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
+ " (%s)\n", chn, where);
if (!(ret & 0x10)) {
int probe;
+
baduns |= (1 << chn);
- ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
+ qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
" (%s)\n", chn, ret, where);
- probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_PGUDP(0), 0, 0);
- ipath_dev_err(dd, "probe is %d (%02X)\n",
+ qib_dev_err(dd, "probe is %d (%02X)\n",
probe, probe);
- probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
- ipath_dev_err(dd, "re-read: %d (%02X)\n",
+ qib_dev_err(dd, "re-read: %d (%02X)\n",
probe, probe);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
- ipath_dev_err(dd,
+ qib_dev_err(dd,
"Err on TRIMDONE rewrite1\n");
}
}
for (chn = 3; chn >= 0; --chn) {
/* Read CTRL reg for each channel to check TRIMDONE */
if (baduns & (1 << chn)) {
- ipath_dev_err(dd,
+ qib_dev_err(dd,
"Reseting TRIMDONE on chn %d (%s)\n",
chn, where);
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
- ipath_dev_err(dd, "Failed re-setting "
+ qib_dev_err(dd, "Failed re-setting "
"TRIMDONE, chn %d (%s)\n",
chn, where);
}
@@ -361,96 +375,86 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
* Post IB uC code version 1.32.17, was_reset being 1 is not really
* informative, so we double-check.
*/
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
+int qib_sd7220_init(struct qib_devdata *dd)
{
int ret = 1; /* default to failure */
- int first_reset;
- int val_stat;
+ int first_reset, was_reset;
+ /* SERDES MPU reset recorded in D0 */
+ was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
if (!was_reset) {
/* entered with reset not asserted, we need to do it */
- ipath_ibsd_reset(dd, 1);
- ipath_sd_trimdone_monitor(dd, "Driver-reload");
+ qib_ibsd_reset(dd, 1);
+ qib_sd_trimdone_monitor(dd, "Driver-reload");
}
-
/* Substitute our deduced value for was_reset */
- ret = ipath_ibsd_ucode_loaded(dd);
- if (ret < 0) {
- ret = 1;
- goto done;
- }
- first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
+ ret = qib_ibsd_ucode_loaded(dd->pport);
+ if (ret < 0)
+ goto bail;
+ first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
/*
* Alter some regs per vendor latest doc, reset-defaults
* are not right for IB.
*/
- ret = ipath_sd_early(dd);
+ ret = qib_sd_early(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
+ goto bail;
}
-
/*
* Set DAC manual trim IB.
* We only do this once after chip has been reset (usually
* same as once per system boot).
*/
if (first_reset) {
- ret = ipath_sd_dactrim(dd);
+ ret = qib_sd_dactrim(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed IB SERDES DAC trim\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
+ goto bail;
}
}
-
/*
* Set various registers (DDS and RXEQ) that will be
* controlled by IBC (in 1.2 mode) to reasonable preset values
* Calling the "internal" version avoids the "check for needed"
* and "trimdone monitor" that might be counter-productive.
*/
- ret = ipath_internal_presets(dd);
+ ret = qib_internal_presets(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES presets\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES presets\n");
+ goto bail;
}
- ret = ipath_sd_trimself(dd, 0x80);
+ ret = qib_sd_trimself(dd, 0x80);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
+ goto bail;
}
/* Load image, then try to verify */
- ret = 0; /* Assume success */
+ ret = 0; /* Assume success */
if (first_reset) {
int vfy;
int trim_done;
- ipath_dbg("SerDes uC was reset, reloading PRAM\n");
- ret = ipath_sd7220_ib_load(dd);
+
+ ret = qib_sd7220_ib_load(dd);
if (ret < 0) {
- ipath_dev_err(dd, "Failed to load IB SERDES image\n");
- ret = 1;
- goto done;
- }
+ qib_dev_err(dd, "Failed to load IB SERDES image\n");
+ goto bail;
+ } else {
+ /* Loaded image, try to verify */
+ vfy = qib_sd7220_ib_vfy(dd);
+ if (vfy != ret) {
+ qib_dev_err(dd, "SERDES PRAM VFY failed\n");
+ goto bail;
+ } /* end if verified */
+ } /* end if loaded */
- /* Loaded image, try to verify */
- vfy = ipath_sd7220_ib_vfy(dd);
- if (vfy != ret) {
- ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
- ret = 1;
- goto done;
- }
/*
* Loaded and verified. Almost good...
* hold "success" in ret
*/
ret = 0;
-
/*
* Prev steps all worked, continue bringup
* De-assert RESET to uC, only in first reset, to allow
@@ -461,45 +465,47 @@ int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
*/
ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
if (ret < 0) {
- ipath_dev_err(dd, "Failed clearing START_EQ1\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "Failed clearing START_EQ1\n");
+ goto bail;
}
- ipath_ibsd_reset(dd, 0);
+ qib_ibsd_reset(dd, 0);
/*
* If this is not the first reset, trimdone should be set
- * already.
+ * already. We may need to check about this.
*/
- trim_done = ipath_sd_trimdone_poll(dd);
+ trim_done = qib_sd_trimdone_poll(dd);
/*
* Whether or not trimdone succeeded, we need to put the
* uC back into reset to avoid a possible fight with the
* IBC state-machine.
*/
- ipath_ibsd_reset(dd, 1);
+ qib_ibsd_reset(dd, 1);
if (!trim_done) {
- ipath_dev_err(dd, "No TRIMDONE seen\n");
- ret = 1;
- goto done;
+ qib_dev_err(dd, "No TRIMDONE seen\n");
+ goto bail;
}
-
- ipath_sd_trimdone_monitor(dd, "First-reset");
+ /*
+ * DEBUG: check each time we reset if trimdone bits have
+ * gotten cleared, and re-set them.
+ */
+ qib_sd_trimdone_monitor(dd, "First-reset");
/* Remember so we do not re-do the load, dactrim, etc. */
- dd->serdes_first_init_done = 1;
+ dd->cspec->serdes_first_init_done = 1;
}
/*
- * Setup for channel training and load values for
+ * setup for channel training and load values for
* RxEq and DDS in tables used by IBC in IB1.2 mode
*/
-
- val_stat = ipath_sd_setvals(dd);
- if (val_stat < 0)
- ret = 1;
+ ret = 0;
+ if (qib_sd_setvals(dd) >= 0)
+ goto done;
+bail:
+ ret = 1;
done:
/* start relock timer regardless, but start at 1 second */
- ipath_set_relock_poll(dd, -1);
+ set_7220_relock_poll(dd, -1);
return ret;
}
@@ -517,7 +523,7 @@ done:
* the "claim" parameter is >0 to claim, <0 to release, 0 to query.
* Returns <0 for errors, >0 if we had ownership, else 0.
*/
-static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
+static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
{
u16 acc;
u64 accval;
@@ -525,28 +531,30 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
u64 oct_sel = 0;
switch (sdnum) {
- case IB_7220_SERDES :
+ case IB_7220_SERDES:
/*
* The IB SERDES "ownership" is fairly simple. A single each
* request/grant.
*/
- acc = dd->ipath_kregs->kr_ib_epbacc;
+ acc = kr_ibsd_epb_access_ctrl;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
/* PCIe SERDES has two "octants", need to select which */
- acc = dd->ipath_kregs->kr_pcie_epbacc;
+ acc = kr_pciesd_epb_access_ctrl;
oct_sel = (2 << (sdnum - PCIE_SERDES0));
break;
- default :
+
+ default:
return 0;
}
/* Make sure any outstanding transaction was seen */
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
udelay(15);
- accval = ipath_read_kreg32(dd, acc);
+ accval = qib_read_kreg32(dd, acc);
owned = !!(accval & EPB_ACC_GNT);
if (claim < 0) {
@@ -557,22 +565,22 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
* Both should be clear
*/
u64 newval = 0;
- ipath_write_kreg(dd, acc, newval);
+ qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
udelay(5);
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
if (pollval & EPB_ACC_GNT)
owned = -1;
} else if (claim > 0) {
/* Need to claim */
u64 pollval;
u64 newval = EPB_ACC_REQ | oct_sel;
- ipath_write_kreg(dd, acc, newval);
+ qib_write_kreg(dd, acc, newval);
/* First read after write is not trustworthy */
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
udelay(5);
- pollval = ipath_read_kreg32(dd, acc);
+ pollval = qib_read_kreg32(dd, acc);
if (!(pollval & EPB_ACC_GNT))
owned = -1;
}
@@ -582,18 +590,17 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
/*
* Lemma to deal with race condition of write..read to epb regs
*/
-static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
+static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
{
int tries;
u64 transval;
-
- ipath_write_kreg(dd, reg, i_val);
+ qib_write_kreg(dd, reg, i_val);
/* Throw away first read, as RDY bit may be stale */
- transval = ipath_read_kreg64(dd, reg);
+ transval = qib_read_kreg64(dd, reg);
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, reg);
+ transval = qib_read_kreg32(dd, reg);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
@@ -606,21 +613,20 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
}
/**
- *
- * ipath_sd7220_reg_mod - modify SERDES register
- * @dd: the infinipath device
+ * qib_sd7220_reg_mod - modify SERDES register
+ * @dd: the qlogic_ib device
* @sdnum: which SERDES to access
* @loc: location - channel, element, register, as packed by EPB_LOC() macro.
* @wd: Write Data - value to set in register
* @mask: ones where data should be spliced into reg.
*
- * Basic register read/modify/write, with un-needed accesses elided. That is,
+ * Basic register read/modify/write, with un-needed acesses elided. That is,
* a mask of zero will prevent write, while a mask of 0xFF will prevent read.
* returns current (presumed, if a write was done) contents of selected
* register, or <0 if errors.
*/
-static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
- u32 wd, u32 mask)
+static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
+ u32 wd, u32 mask)
{
u16 trans;
u64 transval;
@@ -629,14 +635,16 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
unsigned long flags;
switch (sdnum) {
- case IB_7220_SERDES :
- trans = dd->ipath_kregs->kr_ib_epbtrans;
+ case IB_7220_SERDES:
+ trans = kr_ibsd_epb_transaction_reg;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
- trans = dd->ipath_kregs->kr_pcie_epbtrans;
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
+ trans = kr_pciesd_epb_transaction_reg;
break;
- default :
+
+ default:
return -1;
}
@@ -644,23 +652,23 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
* All access is locked in software (vs other host threads) and
* hardware (vs uC access).
*/
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
ret = 0;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, trans);
+ transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
if (tries > 0) {
- tries = 1; /* to make read-skip work */
+ tries = 1; /* to make read-skip work */
if (mask != 0xFF) {
/*
* Not a pure write, so need to read.
@@ -688,7 +696,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
else
ret = transval & EPB_DATA_MASK;
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
if (tries <= 0)
ret = -1;
return ret;
@@ -707,7 +715,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
#define EPB_RAMDATA EPB_LOC(6, 0, 5)
/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
-static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
+static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
u8 *buf, int cnt, int rd_notwr)
{
u16 trans;
@@ -723,29 +731,28 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
/* Pick appropriate transaction reg and "Chip select" for this serdes */
switch (sdnum) {
- case IB_7220_SERDES :
+ case IB_7220_SERDES:
csbit = 1ULL << EPB_IB_UC_CS_SHF;
- trans = dd->ipath_kregs->kr_ib_epbtrans;
+ trans = kr_ibsd_epb_transaction_reg;
break;
- case PCIE_SERDES0 :
- case PCIE_SERDES1 :
+
+ case PCIE_SERDES0:
+ case PCIE_SERDES1:
/* PCIe SERDES has uC "chip select" in different bit, too */
csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
- trans = dd->ipath_kregs->kr_pcie_epbtrans;
+ trans = kr_pciesd_epb_transaction_reg;
break;
- default :
+
+ default:
return -1;
}
op = rd_notwr ? "Rd" : "Wr";
- spin_lock_irqsave(&dd->ipath_sdepb_lock, flags);
+ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
if (owned < 0) {
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
- ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
- op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
- owned, loc);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
return -1;
}
@@ -758,16 +765,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
*/
addr = loc & 0x1FFF;
for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = ipath_read_kreg32(dd, trans);
+ transval = qib_read_kreg32(dd, trans);
if (transval & EPB_TRANS_RDY)
break;
udelay(5);
}
sofar = 0;
- if (tries <= 0)
- ipath_dbg("No initial RDY on EPB access request\n");
- else {
+ if (tries > 0) {
/*
* Every "memory" access is doubly-indirect.
* We set two bytes of address, then read/write
@@ -778,8 +783,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
transval = csbit | EPB_UC_CTL |
(rd_notwr ? EPB_ROM_R : EPB_ROM_W);
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- ipath_dbg("No EPB response to uC %s cmd\n", op);
while (tries > 0 && sofar < cnt) {
if (!sofar) {
/* Only set address at start of chunk */
@@ -787,18 +790,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
transval = csbit | EPB_MADDRH | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response ADDRH\n");
+ if (tries <= 0)
break;
- }
addrbyte = (addr + sofar) & 0xFF;
transval = csbit | EPB_MADDRL | addrbyte;
tries = epb_trans(dd, trans, transval,
&transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response ADDRL\n");
+ if (tries <= 0)
break;
- }
}
if (rd_notwr)
@@ -806,10 +805,8 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
else
transval = csbit | EPB_ROMDATA | buf[sofar];
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0) {
- ipath_dbg("No EPB response DATA\n");
+ if (tries <= 0)
break;
- }
if (rd_notwr)
buf[sofar] = transval & EPB_DATA_MASK;
++sofar;
@@ -817,8 +814,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
/* Finally, clear control-bit for Read or Write */
transval = csbit | EPB_UC_CTL;
tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
}
ret = sofar;
@@ -826,18 +821,16 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
if (epb_access(dd, sdnum, -1) < 0)
ret = -1;
- spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags);
- if (tries <= 0) {
- ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
+ spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
+ if (tries <= 0)
ret = -1;
- }
return ret;
}
#define PROG_CHUNK 64
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
- u8 *img, int len, int offset)
+int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
+ u8 *img, int len, int offset)
{
int cnt, sofar, req;
@@ -846,7 +839,7 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
req = len - sofar;
if (req > PROG_CHUNK)
req = PROG_CHUNK;
- cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar,
+ cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
img + sofar, req, 0);
if (cnt < req) {
sofar = -1;
@@ -860,8 +853,8 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
#define VFY_CHUNK 64
#define SD_PRAM_ERROR_LIMIT 42
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
+int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
+ const u8 *img, int len, int offset)
{
int cnt, sofar, req, idx, errors;
unsigned char readback[VFY_CHUNK];
@@ -872,7 +865,7 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
req = len - sofar;
if (req > VFY_CHUNK)
req = VFY_CHUNK;
- cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset,
+ cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
readback, req, 1);
if (cnt < req) {
/* failed in read itself */
@@ -888,11 +881,13 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
return errors ? -errors : sofar;
}
-/* IRQ not set up at this point in init, so we poll. */
+/*
+ * IRQ not set up at this point in init, so we poll.
+ */
#define IB_SERDES_TRIM_DONE (1ULL << 11)
#define TRIM_TMO (30)
-static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
+static int qib_sd_trimdone_poll(struct qib_devdata *dd)
{
int trim_tmo, ret;
uint64_t val;
@@ -903,16 +898,15 @@ static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
*/
ret = 0;
for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+ val = qib_read_kreg64(dd, kr_ibcstatus);
if (val & IB_SERDES_TRIM_DONE) {
- ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
ret = 1;
break;
}
msleep(10);
}
if (trim_tmo >= TRIM_TMO) {
- ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
+ qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
ret = 0;
}
return ret;
@@ -964,8 +958,7 @@ static struct dds_init {
};
/*
- * Next, values related to Receive Equalization.
- * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
+ * Now the RXEQ section of the table.
*/
/* Hardware packs an element number and register address thus: */
#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
@@ -981,23 +974,23 @@ static struct dds_init {
#define RXEQ_SDR_ZCNT 23
static struct rxeq_init {
- u16 rdesc; /* in form used in SerDesDDSRXEQ */
+ u16 rdesc; /* in form used in SerDesDDSRXEQ */
u8 rdata[4];
} rxeq_init_vals[] = {
/* Set Rcv Eq. to Preset node */
RXEQ_VAL_ALL(7, 0x27, 0x10),
/* Set DFELTHFDR/HDR thresholds */
- RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */
+ RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
- /* Set TLTHFDR/HDR threshold */
- RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */
- RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */
+ /* Set TLTHFDR/HDR theshold */
+ RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
+ RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
/* Set Preamp setting 2 (ZFR/ZCNT) */
- RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */
- RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */
+ RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
+ RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
/* Set Preamp DC gain and Setting 1 (GFR/GHR) */
- RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */
- RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */
+ RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
+ RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
/* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
@@ -1007,27 +1000,27 @@ static struct rxeq_init {
#define DDS_ROWS (16)
#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
-static int ipath_sd_setvals(struct ipath_devdata *dd)
+static int qib_sd_setvals(struct qib_devdata *dd)
{
int idx, midx;
- int min_idx; /* Minimum index for this portion of table */
+ int min_idx; /* Minimum index for this portion of table */
uint32_t dds_reg_map;
u64 __iomem *taddr, *iaddr;
uint64_t data;
uint64_t sdctl;
- taddr = dd->ipath_kregbase + KR_IBSerDesMappTable;
- iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq;
+ taddr = dd->kregbase + kr_serdes_maptable;
+ iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
/*
* Init the DDS section of the table.
* Each "row" of the table provokes NUM_DDS_REG writes, to the
* registers indicated in DDS_REG_MAP.
*/
- sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
+ sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl);
+ qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
/*
* Iterate down table within loop for each register to store.
@@ -1037,21 +1030,21 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
writeq(data, iaddr + idx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
dds_reg_map >>= 4;
for (midx = 0; midx < DDS_ROWS; ++midx) {
u64 __iomem *daddr = taddr + ((midx << 4) + idx);
data = dds_init_vals[midx].reg_vals[idx];
writeq(data, daddr);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
} /* End inner for (vals for this reg, each row) */
} /* end outer for (regs to be stored) */
/*
- * Init the RXEQ section of the table. As explained above the table
- * rxeq_init_vals[], this runs in a different order, as the pattern
- * of register references is more complex, but there are only
+ * Init the RXEQ section of the table.
+ * This runs in a different order, as the pattern of
+ * register references is more complex, but there are only
* four "data" values per register.
*/
min_idx = idx; /* RXEQ indices pick up where DDS left off */
@@ -1066,13 +1059,13 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
/* Store the next RXEQ register address */
writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
/* Iterate through RXEQ values */
for (vidx = 0; vidx < 4; vidx++) {
data = rxeq_init_vals[idx].rdata[vidx];
writeq(data, taddr + (vidx << 6) + idx);
mmiowb();
- ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
+ qib_read_kreg32(dd, kr_scratch);
}
} /* end outer for (Reg-writes for RXEQ) */
return 0;
@@ -1085,33 +1078,18 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
-static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
-{
- int ret = -1;
- int sloc; /* shifted loc, for messages */
-
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- sloc = loc >> EPB_ADDR_SHF;
-
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
- if (ret < 0)
- ipath_dev_err(dd, "Write failed: elt %d,"
- " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
- val & 0xFF, mask & 0xFF);
- return ret;
-}
-
/*
* Repeat a "store" across all channels of the IB SerDes.
* Although nominally it inherits the "read value" of the last
* channel it modified, the only really useful return is <0 for
* failure, >= 0 for success. The parameter 'loc' is assumed to
- * be the location for the channel-0 copy of the register to
- * be modified.
+ * be the location in some channel of the register to be modified
+ * The caller can specify use of the "gang write" option of EPB,
+ * in which case we use the specified channel data for any fields
+ * not explicitely written.
*/
-static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
- int mask)
+static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
+ int mask)
{
int ret = -1;
int chnl;
@@ -1126,24 +1104,27 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
if (mask != 0xFF) {
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES,
- loc & ~EPB_GLOBAL_WR, 0, 0);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
+ loc & ~EPB_GLOBAL_WR, 0, 0);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "pre-read failed: elt %d,"
- " addr 0x%X, chnl %d\n", (sloc & 0xF),
- (sloc >> 9) & 0x3f, chnl);
+
+ qib_dev_err(dd, "pre-read failed: elt %d,"
+ " addr 0x%X, chnl %d\n",
+ (sloc & 0xF),
+ (sloc >> 9) & 0x3f, chnl);
return ret;
}
val = (ret & ~mask) | (val & mask);
}
loc &= ~(7 << (4+EPB_ADDR_SHF));
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "Global WR failed: elt %d,"
- " addr 0x%X, val %02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, val);
+
+ qib_dev_err(dd, "Global WR failed: elt %d,"
+ " addr 0x%X, val %02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, val);
}
return ret;
}
@@ -1151,16 +1132,17 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
loc &= ~(7 << (4+EPB_ADDR_SHF));
loc |= (1U << EPB_IB_QUAD0_CS_SHF);
for (chnl = 0; chnl < 4; ++chnl) {
- int cloc;
- cloc = loc | (chnl << (4+EPB_ADDR_SHF));
- ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
+ int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
+
+ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- ipath_dev_err(dd, "Write failed: elt %d,"
- " addr 0x%X, chnl %d, val 0x%02X,"
- " mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
- val & 0xFF, mask & 0xFF);
+
+ qib_dev_err(dd, "Write failed: elt %d,"
+ " addr 0x%X, chnl %d, val 0x%02X,"
+ " mask 0x%02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
+ val & 0xFF, mask & 0xFF);
break;
}
}
@@ -1171,7 +1153,7 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
* Set the Tx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from first row of init table.
*/
-static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
+static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
{
int ret;
int idx, reg, data;
@@ -1194,7 +1176,7 @@ static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
* Set the Rx values normally modified by IBC in IB1.2 mode to default
* values, as gotten from selected column of init table.
*/
-static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
+static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
{
int ret;
int ridx;
@@ -1202,6 +1184,7 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
for (ridx = 0; ridx < cnt; ++ridx) {
int elt, reg, val, loc;
+
elt = rxeq_init_vals[ridx].rdesc & 0xF;
reg = rxeq_init_vals[ridx].rdesc >> 4;
loc = EPB_LOC(0, elt, reg);
@@ -1217,83 +1200,66 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
/*
* Set the default values (row 0) for DDR Driver Demphasis.
* we do this initially and whenever we turn off IB-1.2
+ *
* The "default" values for Rx equalization are also stored to
* SerDes registers. Formerly (and still default), we used set 2.
* For experimenting with cables and link-partners, we allow changing
* that via a module parameter.
*/
-static unsigned ipath_rxeq_set = 2;
-module_param_named(rxeq_default_set, ipath_rxeq_set, uint,
- S_IWUSR | S_IRUGO);
+static unsigned qib_rxeq_set = 2;
+module_param_named(rxeq_default_set, qib_rxeq_set, uint,
+ S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(rxeq_default_set,
- "Which set [0..3] of Rx Equalization values is default");
+ "Which set [0..3] of Rx Equalization values is default");
-static int ipath_internal_presets(struct ipath_devdata *dd)
+static int qib_internal_presets(struct qib_devdata *dd)
{
int ret = 0;
ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
if (ret < 0)
- ipath_dev_err(dd, "Failed to set default DDS values\n");
- ret = set_rxeq_vals(dd, ipath_rxeq_set & 3);
+ qib_dev_err(dd, "Failed to set default DDS values\n");
+ ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
if (ret < 0)
- ipath_dev_err(dd, "Failed to set default RXEQ values\n");
+ qib_dev_err(dd, "Failed to set default RXEQ values\n");
return ret;
}
-int ipath_sd7220_presets(struct ipath_devdata *dd)
+int qib_sd7220_presets(struct qib_devdata *dd)
{
int ret = 0;
- if (!dd->ipath_presets_needed)
+ if (!dd->cspec->presets_needed)
return ret;
- dd->ipath_presets_needed = 0;
+ dd->cspec->presets_needed = 0;
/* Assert uC reset, so we don't clash with it. */
- ipath_ibsd_reset(dd, 1);
+ qib_ibsd_reset(dd, 1);
udelay(2);
- ipath_sd_trimdone_monitor(dd, "link-down");
+ qib_sd_trimdone_monitor(dd, "link-down");
- ret = ipath_internal_presets(dd);
-return ret;
+ ret = qib_internal_presets(dd);
+ return ret;
}
-static int ipath_sd_trimself(struct ipath_devdata *dd, int val)
+static int qib_sd_trimself(struct qib_devdata *dd, int val)
{
- return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF);
+ int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
+
+ return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
}
-static int ipath_sd_early(struct ipath_devdata *dd)
+static int qib_sd_early(struct qib_devdata *dd)
{
- int ret = -1; /* Default failed */
- int chnl;
+ int ret;
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF);
- if (ret < 0)
- goto bail;
- }
- /* more fine-tuning of what will be default */
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
- if (ret < 0)
- goto bail;
- }
+ ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
+ if (ret < 0)
+ goto bail;
+ ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
+ if (ret < 0)
+ goto bail;
+ ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
bail:
return ret;
}
@@ -1302,50 +1268,53 @@ bail:
#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
-static int ipath_sd_dactrim(struct ipath_devdata *dd)
+static int qib_sd_dactrim(struct qib_devdata *dd)
{
- int ret = -1; /* Default failed */
- int chnl;
+ int ret;
+
+ ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ /* more fine-tuning of what will be default */
+ ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+ if (ret < 0)
+ goto bail;
+
+ ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
+ if (ret < 0)
+ goto bail;
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
- if (ret < 0)
- goto bail;
- }
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
- if (ret < 0)
- goto bail;
- }
/*
- * delay for max possible number of steps, with slop.
+ * Delay for max possible number of steps, with slop.
* Each step is about 4usec.
*/
udelay(415);
- for (chnl = 0; chnl < 4; ++chnl) {
- ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF);
- if (ret < 0)
- goto bail;
- }
+
+ ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
+
bail:
return ret;
}
#define RELOCK_FIRST_MS 3
#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
-void ipath_toggle_rclkrls(struct ipath_devdata *dd)
+void toggle_7220_rclkrls(struct qib_devdata *dd)
{
int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
int ret;
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+ qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
@@ -1354,109 +1323,91 @@ void ipath_toggle_rclkrls(struct ipath_devdata *dd)
udelay(1);
ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
if (ret < 0)
- ipath_dev_err(dd, "RCLKRLS failed to clear D7\n");
+ qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
else {
udelay(1);
ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
}
/* Now reset xgxs and IBC to complete the recovery */
- dd->ipath_f_xgxs_reset(dd);
+ dd->f_xgxs_reset(dd->pport);
}
/*
* Shut down the timer that polls for relock occasions, if needed
- * this is "hooked" from ipath_7220_quiet_serdes(), which is called
- * just before ipath_shutdown_device() in ipath_driver.c shuts down all
+ * this is "hooked" from qib_7220_quiet_serdes(), which is called
+ * just before qib_shutdown_device() in qib_driver.c shuts down all
* the other timers
*/
-void ipath_shutdown_relock_poll(struct ipath_devdata *dd)
+void shutdown_7220_relock_poll(struct qib_devdata *dd)
{
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
- if (atomic_read(&irp->ipath_relock_timer_active)) {
- del_timer_sync(&irp->ipath_relock_timer);
- atomic_set(&irp->ipath_relock_timer_active, 0);
- }
+ if (dd->cspec->relock_timer_active)
+ del_timer_sync(&dd->cspec->relock_timer);
}
-static unsigned ipath_relock_by_timer = 1;
-module_param_named(relock_by_timer, ipath_relock_by_timer, uint,
- S_IWUSR | S_IRUGO);
+static unsigned qib_relock_by_timer = 1;
+module_param_named(relock_by_timer, qib_relock_by_timer, uint,
+ S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-static void ipath_run_relock(unsigned long opaque)
+static void qib_run_relock(unsigned long opaque)
{
- struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
- u64 val, ltstate;
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* Not yet up, just reenable the timer for later */
- irp->ipath_relock_interval = HZ;
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
- return;
- }
+ struct qib_devdata *dd = (struct qib_devdata *)opaque;
+ struct qib_pportdata *ppd = dd->pport;
+ struct qib_chip_specific *cs = dd->cspec;
+ int timeoff;
/*
- * Check link-training state for "stuck" state.
+ * Check link-training state for "stuck" state, when down.
* if found, try relock and schedule another try at
* exponentially growing delay, maxed at one second.
* if not stuck, our work is done.
*/
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
- ltstate = ipath_ib_linktrstate(dd, val);
-
- if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
- && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
- int timeoff;
- /* Not up yet. Try again, if allowed by module-param */
- if (ipath_relock_by_timer) {
- if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
- ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
- else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
- ipath_cdbg(VERBOSE, "RELOCK\n");
- ipath_toggle_rclkrls(dd);
- }
+ if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
+ (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
+ QIBL_LINKACTIVE))) {
+ if (qib_relock_by_timer) {
+ if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
+ toggle_7220_rclkrls(dd);
}
/* re-set timer for next check */
- timeoff = irp->ipath_relock_interval << 1;
+ timeoff = cs->relock_interval << 1;
if (timeoff > HZ)
timeoff = HZ;
- irp->ipath_relock_interval = timeoff;
-
- mod_timer(&irp->ipath_relock_timer, jiffies + timeoff);
- } else {
- /* Up, so no more need to check so often */
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
- }
+ cs->relock_interval = timeoff;
+ } else
+ timeoff = HZ;
+ mod_timer(&cs->relock_timer, jiffies + timeoff);
}
-void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup)
+void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
{
- struct ipath_relock *irp = &dd->ipath_relock_singleton;
+ struct qib_chip_specific *cs = dd->cspec;
- if (ibup > 0) {
- /* we are now up, so relax timer to 1 second interval */
- if (atomic_read(&irp->ipath_relock_timer_active))
- mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
+ if (ibup) {
+ /* We are now up, relax timer to 1 second interval */
+ if (cs->relock_timer_active) {
+ cs->relock_interval = HZ;
+ mod_timer(&cs->relock_timer, jiffies + HZ);
+ }
} else {
/* Transition to down, (re-)set timer to short interval. */
- int timeout;
- timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000;
+ unsigned int timeout;
+
+ timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
if (timeout == 0)
timeout = 1;
/* If timer has not yet been started, do so. */
- if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) {
- init_timer(&irp->ipath_relock_timer);
- irp->ipath_relock_timer.function = ipath_run_relock;
- irp->ipath_relock_timer.data = (unsigned long) dd;
- irp->ipath_relock_interval = timeout;
- irp->ipath_relock_timer.expires = jiffies + timeout;
- add_timer(&irp->ipath_relock_timer);
+ if (!cs->relock_timer_active) {
+ cs->relock_timer_active = 1;
+ init_timer(&cs->relock_timer);
+ cs->relock_timer.function = qib_run_relock;
+ cs->relock_timer.data = (unsigned long) dd;
+ cs->relock_interval = timeout;
+ cs->relock_timer.expires = jiffies + timeout;
+ add_timer(&cs->relock_timer);
} else {
- irp->ipath_relock_interval = timeout;
- mod_timer(&irp->ipath_relock_timer, jiffies + timeout);
- atomic_dec(&irp->ipath_relock_timer_active);
+ cs->relock_interval = timeout;
+ mod_timer(&cs->relock_timer, jiffies + timeout);
}
}
}
-
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c
index 5ef59da9270a..a1118fbd2370 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c
@@ -38,11 +38,10 @@
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-#include "ipath_7220.h"
+#include "qib.h"
+#include "qib_7220.h"
-static unsigned char ipath_sd7220_ib_img[] = {
+static unsigned char qib_sd7220_ib_img[] = {
/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
@@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = {
0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
};
-int ipath_sd7220_ib_load(struct ipath_devdata *dd)
+int qib_sd7220_ib_load(struct qib_devdata *dd)
{
- return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
- sizeof(ipath_sd7220_ib_img), 0);
+ return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+ sizeof(qib_sd7220_ib_img), 0);
}
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd)
+int qib_sd7220_ib_vfy(struct qib_devdata *dd)
{
- return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img,
- sizeof(ipath_sd7220_ib_img), 0);
+ return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img,
+ sizeof(qib_sd7220_ib_img), 0);
}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
new file mode 100644
index 000000000000..b8456881f7f6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+/* default pio off, sdma on */
+static ushort sdma_descq_cnt = 256;
+module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
+MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
+
+/*
+ * Bits defined in the send DMA descriptor.
+ */
+#define SDMA_DESC_LAST (1ULL << 11)
+#define SDMA_DESC_FIRST (1ULL << 12)
+#define SDMA_DESC_DMA_HEAD (1ULL << 13)
+#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
+#define SDMA_DESC_INTR (1ULL << 15)
+#define SDMA_DESC_COUNT_LSB 16
+#define SDMA_DESC_GEN_LSB 30
+
+char *qib_sdma_state_names[] = {
+ [qib_sdma_state_s00_hw_down] = "s00_HwDown",
+ [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
+ [qib_sdma_state_s20_idle] = "s20_Idle",
+ [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
+ [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
+ [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
+ [qib_sdma_state_s99_running] = "s99_Running",
+};
+
+char *qib_sdma_event_names[] = {
+ [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
+ [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
+ [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
+ [qib_sdma_event_e30_go_running] = "e30_GoRunning",
+ [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
+ [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
+ [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
+ [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
+ [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
+ [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
+ [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
+};
+
+/* declare all statics here rather than keep sorting */
+static int alloc_sdma(struct qib_pportdata *);
+static void sdma_complete(struct kref *);
+static void sdma_finalput(struct qib_sdma_state *);
+static void sdma_get(struct qib_sdma_state *);
+static void sdma_put(struct qib_sdma_state *);
+static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
+static void sdma_start_sw_clean_up(struct qib_pportdata *);
+static void sdma_sw_clean_up_task(unsigned long);
+static void unmap_desc(struct qib_pportdata *, unsigned);
+
+static void sdma_get(struct qib_sdma_state *ss)
+{
+ kref_get(&ss->kref);
+}
+
+static void sdma_complete(struct kref *kref)
+{
+ struct qib_sdma_state *ss =
+ container_of(kref, struct qib_sdma_state, kref);
+
+ complete(&ss->comp);
+}
+
+static void sdma_put(struct qib_sdma_state *ss)
+{
+ kref_put(&ss->kref, sdma_complete);
+}
+
+static void sdma_finalput(struct qib_sdma_state *ss)
+{
+ sdma_put(ss);
+ wait_for_completion(&ss->comp);
+}
+
+/*
+ * Complete all the sdma requests on the active list, in the correct
+ * order, and with appropriate processing. Called when cleaning up
+ * after sdma shutdown, and when new sdma requests are submitted for
+ * a link that is down. This matches what is done for requests
+ * that complete normally, it's just the full list.
+ *
+ * Must be called with sdma_lock held
+ */
+static void clear_sdma_activelist(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_txreq *txp, *txp_next;
+
+ list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
+ list_del_init(&txp->list);
+ if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
+ unsigned idx;
+
+ idx = txp->start_idx;
+ while (idx != txp->next_descq_idx) {
+ unmap_desc(ppd, idx);
+ if (++idx == ppd->sdma_descq_cnt)
+ idx = 0;
+ }
+ }
+ if (txp->callback)
+ (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
+ }
+}
+
+static void sdma_sw_clean_up_task(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ /*
+ * At this point, the following should always be true:
+ * - We are halted, so no more descriptors are getting retired.
+ * - We are not running, so no one is submitting new work.
+ * - Only we can send the e40_sw_cleaned, so we can't start
+ * running again until we say so. So, the active list and
+ * descq are ours to play with.
+ */
+
+ /* Process all retired requests. */
+ qib_sdma_make_progress(ppd);
+
+ clear_sdma_activelist(ppd);
+
+ /*
+ * Resync count of added and removed. It is VERY important that
+ * sdma_descq_removed NEVER decrement - user_sdma depends on it.
+ */
+ ppd->sdma_descq_removed = ppd->sdma_descq_added;
+
+ /*
+ * Reset our notion of head and tail.
+ * Note that the HW registers will be reset when switching states
+ * due to calling __qib_sdma_process_event() below.
+ */
+ ppd->sdma_descq_tail = 0;
+ ppd->sdma_descq_head = 0;
+ ppd->sdma_head_dma[0] = 0;
+ ppd->sdma_generation = 0;
+
+ __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+/*
+ * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
+ * as a result of send buffer errors or send DMA descriptor errors.
+ * We want to disarm the buffers in these cases.
+ */
+static void sdma_hw_start_up(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+ unsigned bufno;
+
+ for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
+ ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
+
+ ppd->dd->f_sdma_hw_start_up(ppd);
+}
+
+static void sdma_sw_tear_down(struct qib_pportdata *ppd)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+
+ /* Releasing this reference means the state machine has stopped. */
+ sdma_put(ss);
+}
+
+static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
+{
+ tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
+}
+
+static void sdma_set_state(struct qib_pportdata *ppd,
+ enum qib_sdma_states next_state)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+ struct sdma_set_state_action *action = ss->set_state_action;
+ unsigned op = 0;
+
+ /* debugging bookkeeping */
+ ss->previous_state = ss->current_state;
+ ss->previous_op = ss->current_op;
+
+ ss->current_state = next_state;
+
+ if (action[next_state].op_enable)
+ op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
+
+ if (action[next_state].op_intenable)
+ op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
+
+ if (action[next_state].op_halt)
+ op |= QIB_SDMA_SENDCTRL_OP_HALT;
+
+ if (action[next_state].op_drain)
+ op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
+
+ if (action[next_state].go_s99_running_tofalse)
+ ss->go_s99_running = 0;
+
+ if (action[next_state].go_s99_running_totrue)
+ ss->go_s99_running = 1;
+
+ ss->current_op = op;
+
+ ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
+}
+
+static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
+{
+ __le64 *descqp = &ppd->sdma_descq[head].qw[0];
+ u64 desc[2];
+ dma_addr_t addr;
+ size_t len;
+
+ desc[0] = le64_to_cpu(descqp[0]);
+ desc[1] = le64_to_cpu(descqp[1]);
+
+ addr = (desc[1] << 32) | (desc[0] >> 32);
+ len = (desc[0] >> 14) & (0x7ffULL << 2);
+ dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
+}
+
+static int alloc_sdma(struct qib_pportdata *ppd)
+{
+ ppd->sdma_descq_cnt = sdma_descq_cnt;
+ if (!ppd->sdma_descq_cnt)
+ ppd->sdma_descq_cnt = 256;
+
+ /* Allocate memory for SendDMA descriptor FIFO */
+ ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
+ GFP_KERNEL);
+
+ if (!ppd->sdma_descq) {
+ qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
+ "FIFO memory\n");
+ goto bail;
+ }
+
+ /* Allocate memory for DMA of head register to memory */
+ ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
+ PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
+ if (!ppd->sdma_head_dma) {
+ qib_dev_err(ppd->dd, "failed to allocate SendDMA "
+ "head memory\n");
+ goto cleanup_descq;
+ }
+ ppd->sdma_head_dma[0] = 0;
+ return 0;
+
+cleanup_descq:
+ dma_free_coherent(&ppd->dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
+ ppd->sdma_descq_phys);
+ ppd->sdma_descq = NULL;
+ ppd->sdma_descq_phys = 0;
+bail:
+ ppd->sdma_descq_cnt = 0;
+ return -ENOMEM;
+}
+
+static void free_sdma(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+
+ if (ppd->sdma_head_dma) {
+ dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
+ (void *)ppd->sdma_head_dma,
+ ppd->sdma_head_phys);
+ ppd->sdma_head_dma = NULL;
+ ppd->sdma_head_phys = 0;
+ }
+
+ if (ppd->sdma_descq) {
+ dma_free_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt * sizeof(u64[2]),
+ ppd->sdma_descq, ppd->sdma_descq_phys);
+ ppd->sdma_descq = NULL;
+ ppd->sdma_descq_phys = 0;
+ }
+}
+
+static inline void make_sdma_desc(struct qib_pportdata *ppd,
+ u64 *sdmadesc, u64 addr, u64 dwlen,
+ u64 dwoffset)
+{
+
+ WARN_ON(addr & 3);
+ /* SDmaPhyAddr[47:32] */
+ sdmadesc[1] = addr >> 32;
+ /* SDmaPhyAddr[31:0] */
+ sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
+ /* SDmaGeneration[1:0] */
+ sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
+ SDMA_DESC_GEN_LSB;
+ /* SDmaDwordCount[10:0] */
+ sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
+ /* SDmaBufOffset[12:2] */
+ sdmadesc[0] |= dwoffset & 0x7ffULL;
+}
+
+/* sdma_lock must be held */
+int qib_sdma_make_progress(struct qib_pportdata *ppd)
+{
+ struct list_head *lp = NULL;
+ struct qib_sdma_txreq *txp = NULL;
+ struct qib_devdata *dd = ppd->dd;
+ int progress = 0;
+ u16 hwhead;
+ u16 idx = 0;
+
+ hwhead = dd->f_sdma_gethead(ppd);
+
+ /* The reason for some of the complexity of this code is that
+ * not all descriptors have corresponding txps. So, we have to
+ * be able to skip over descs until we wander into the range of
+ * the next txp on the list.
+ */
+
+ if (!list_empty(&ppd->sdma_activelist)) {
+ lp = ppd->sdma_activelist.next;
+ txp = list_entry(lp, struct qib_sdma_txreq, list);
+ idx = txp->start_idx;
+ }
+
+ while (ppd->sdma_descq_head != hwhead) {
+ /* if desc is part of this txp, unmap if needed */
+ if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
+ (idx == ppd->sdma_descq_head)) {
+ unmap_desc(ppd, ppd->sdma_descq_head);
+ if (++idx == ppd->sdma_descq_cnt)
+ idx = 0;
+ }
+
+ /* increment dequed desc count */
+ ppd->sdma_descq_removed++;
+
+ /* advance head, wrap if needed */
+ if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
+ ppd->sdma_descq_head = 0;
+
+ /* if now past this txp's descs, do the callback */
+ if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
+ /* remove from active list */
+ list_del_init(&txp->list);
+ if (txp->callback)
+ (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
+ /* see if there is another txp */
+ if (list_empty(&ppd->sdma_activelist))
+ txp = NULL;
+ else {
+ lp = ppd->sdma_activelist.next;
+ txp = list_entry(lp, struct qib_sdma_txreq,
+ list);
+ idx = txp->start_idx;
+ }
+ }
+ progress = 1;
+ }
+ if (progress)
+ qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+ return progress;
+}
+
+/*
+ * This is called from interrupt context.
+ */
+void qib_sdma_intr(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ __qib_sdma_intr(ppd);
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_intr(struct qib_pportdata *ppd)
+{
+ if (__qib_sdma_running(ppd))
+ qib_sdma_make_progress(ppd);
+}
+
+int qib_setup_sdma(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = alloc_sdma(ppd);
+ if (ret)
+ goto bail;
+
+ /* set consistent sdma state */
+ ppd->dd->f_sdma_init_early(ppd);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ /* set up reference counting */
+ kref_init(&ppd->sdma_state.kref);
+ init_completion(&ppd->sdma_state.comp);
+
+ ppd->sdma_generation = 0;
+ ppd->sdma_descq_head = 0;
+ ppd->sdma_descq_removed = 0;
+ ppd->sdma_descq_added = 0;
+
+ INIT_LIST_HEAD(&ppd->sdma_activelist);
+
+ tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
+ (unsigned long)ppd);
+
+ ret = dd->f_init_sdma_regs(ppd);
+ if (ret)
+ goto bail_alloc;
+
+ qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
+
+ return 0;
+
+bail_alloc:
+ qib_teardown_sdma(ppd);
+bail:
+ return ret;
+}
+
+void qib_teardown_sdma(struct qib_pportdata *ppd)
+{
+ qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
+
+ /*
+ * This waits for the state machine to exit so it is not
+ * necessary to kill the sdma_sw_clean_up_task to make sure
+ * it is not running.
+ */
+ sdma_finalput(&ppd->sdma_state);
+
+ free_sdma(ppd);
+}
+
+int qib_sdma_running(struct qib_pportdata *ppd)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ ret = __qib_sdma_running(ppd);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Complete a request when sdma not running; likely only request
+ * but to simplify the code, always queue it, then process the full
+ * activelist. We process the entire list to ensure that this particular
+ * request does get it's callback, but in the correct order.
+ * Must be called with sdma_lock held
+ */
+static void complete_sdma_err_req(struct qib_pportdata *ppd,
+ struct qib_verbs_txreq *tx)
+{
+ atomic_inc(&tx->qp->s_dma_busy);
+ /* no sdma descriptors, so no unmap_desc */
+ tx->txreq.start_idx = 0;
+ tx->txreq.next_descq_idx = 0;
+ list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+ clear_sdma_activelist(ppd);
+}
+
+/*
+ * This function queues one IB packet onto the send DMA queue per call.
+ * The caller is responsible for checking:
+ * 1) The number of send DMA descriptor entries is less than the size of
+ * the descriptor queue.
+ * 2) The IB SGE addresses and lengths are 32-bit aligned
+ * (except possibly the last SGE's length)
+ * 3) The SGE addresses are suitable for passing to dma_map_single().
+ */
+int qib_sdma_verbs_send(struct qib_pportdata *ppd,
+ struct qib_sge_state *ss, u32 dwords,
+ struct qib_verbs_txreq *tx)
+{
+ unsigned long flags;
+ struct qib_sge *sge;
+ struct qib_qp *qp;
+ int ret = 0;
+ u16 tail;
+ __le64 *descqp;
+ u64 sdmadesc[2];
+ u32 dwoffset;
+ dma_addr_t addr;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+retry:
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ complete_sdma_err_req(ppd, tx);
+ goto unlock;
+ }
+
+ if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
+ if (qib_sdma_make_progress(ppd))
+ goto retry;
+ if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
+ ppd->dd->f_sdma_set_desc_cnt(ppd,
+ ppd->sdma_descq_cnt / 2);
+ goto busy;
+ }
+
+ dwoffset = tx->hdr_dwords;
+ make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
+
+ sdmadesc[0] |= SDMA_DESC_FIRST;
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+ sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+
+ /* write to the descq */
+ tail = ppd->sdma_descq_tail;
+ descqp = &ppd->sdma_descq[tail].qw[0];
+ *descqp++ = cpu_to_le64(sdmadesc[0]);
+ *descqp++ = cpu_to_le64(sdmadesc[1]);
+
+ /* increment the tail */
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ descqp = &ppd->sdma_descq[0].qw[0];
+ ++ppd->sdma_generation;
+ }
+
+ tx->txreq.start_idx = tail;
+
+ sge = &ss->sge;
+ while (dwords) {
+ u32 dw;
+ u32 len;
+
+ len = dwords << 2;
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ dw = (len + 3) >> 2;
+ addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
+ dw << 2, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
+ goto unmap;
+ sdmadesc[0] = 0;
+ make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
+ /* SDmaUseLargeBuf has to be set in every descriptor */
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
+ sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
+ /* write to the descq */
+ *descqp++ = cpu_to_le64(sdmadesc[0]);
+ *descqp++ = cpu_to_le64(sdmadesc[1]);
+
+ /* increment the tail */
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ descqp = &ppd->sdma_descq[0].qw[0];
+ ++ppd->sdma_generation;
+ }
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+
+ dwoffset += dw;
+ dwords -= dw;
+ }
+
+ if (!tail)
+ descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
+ descqp -= 2;
+ descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
+ descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
+ descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
+
+ atomic_inc(&tx->qp->s_dma_busy);
+ tx->txreq.next_descq_idx = tail;
+ ppd->dd->f_sdma_update_tail(ppd, tail);
+ ppd->sdma_descq_added += tx->txreq.sg_count;
+ list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
+ goto unlock;
+
+unmap:
+ for (;;) {
+ if (!tail)
+ tail = ppd->sdma_descq_cnt - 1;
+ else
+ tail--;
+ if (tail == ppd->sdma_descq_tail)
+ break;
+ unmap_desc(ppd, tail);
+ }
+ qp = tx->qp;
+ qib_put_txreq(tx);
+ spin_lock(&qp->s_lock);
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ /* XXX what about error sending RDMA read responses? */
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
+ qib_error_qp(qp, IB_WC_GENERAL_ERR);
+ } else if (qp->s_wqe)
+ qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ spin_unlock(&qp->s_lock);
+ /* return zero to process the next send work request */
+ goto unlock;
+
+busy:
+ qp = tx->qp;
+ spin_lock(&qp->s_lock);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ struct qib_ibdev *dev;
+
+ /*
+ * If we couldn't queue the DMA request, save the info
+ * and try again later rather than destroying the
+ * buffer and undoing the side effects of the copy.
+ */
+ tx->ss = ss;
+ tx->dwords = dwords;
+ qp->s_tx = tx;
+ dev = &ppd->dd->verbs_dev;
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ struct qib_ibport *ibp;
+
+ ibp = &ppd->ibport_data;
+ ibp->n_dmawait++;
+ qp->s_flags |= QIB_S_WAIT_DMA_DESC;
+ list_add_tail(&qp->iowait, &dev->dmawait);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&qp->s_lock);
+ ret = -EBUSY;
+ } else {
+ spin_unlock(&qp->s_lock);
+ qib_put_txreq(tx);
+ }
+unlock:
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ return ret;
+}
+
+void qib_sdma_process_event(struct qib_pportdata *ppd,
+ enum qib_sdma_events event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ __qib_sdma_process_event(ppd, event);
+
+ if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
+ qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
+
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+}
+
+void __qib_sdma_process_event(struct qib_pportdata *ppd,
+ enum qib_sdma_events event)
+{
+ struct qib_sdma_state *ss = &ppd->sdma_state;
+
+ switch (ss->current_state) {
+ case qib_sdma_state_s00_hw_down:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ break;
+ case qib_sdma_event_e30_go_running:
+ /*
+ * If down, but running requested (usually result
+ * of link up, then we need to start up.
+ * This can happen when hw down is requested while
+ * bringing the link up with traffic active on
+ * 7220, e.g. */
+ ss->go_s99_running = 1;
+ /* fall through and start dma engine */
+ case qib_sdma_event_e10_go_hw_start:
+ /* This reference means the state machine is started */
+ sdma_get(&ppd->sdma_state);
+ sdma_set_state(ppd,
+ qib_sdma_state_s10_hw_start_up_wait);
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s10_hw_start_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ sdma_set_state(ppd, ss->go_s99_running ?
+ qib_sdma_state_s99_running :
+ qib_sdma_state_s20_idle);
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s20_idle:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_sw_tear_down(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ sdma_set_state(ppd, qib_sdma_state_s99_running);
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s30_sw_clean_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ sdma_set_state(ppd,
+ qib_sdma_state_s10_hw_start_up_wait);
+ sdma_hw_start_up(ppd);
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s40_hw_clean_up_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s50_hw_halt_wait:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ ss->go_s99_running = 1;
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s40_hw_clean_up_wait);
+ ppd->dd->f_sdma_hw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e70_go_idle:
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+
+ case qib_sdma_state_s99_running:
+ switch (event) {
+ case qib_sdma_event_e00_go_hw_down:
+ sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e10_go_hw_start:
+ break;
+ case qib_sdma_event_e20_hw_started:
+ break;
+ case qib_sdma_event_e30_go_running:
+ break;
+ case qib_sdma_event_e40_sw_cleaned:
+ break;
+ case qib_sdma_event_e50_hw_cleaned:
+ break;
+ case qib_sdma_event_e60_hw_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e70_go_idle:
+ sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+ ss->go_s99_running = 0;
+ break;
+ case qib_sdma_event_e7220_err_halted:
+ sdma_set_state(ppd,
+ qib_sdma_state_s30_sw_clean_up_wait);
+ sdma_start_sw_clean_up(ppd);
+ break;
+ case qib_sdma_event_e7322_err_halted:
+ sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
+ break;
+ case qib_sdma_event_e90_timer_tick:
+ break;
+ }
+ break;
+ }
+
+ ss->last_event = event;
+}
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
new file mode 100644
index 000000000000..c3ec8efc2ed8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qib_verbs.h"
+
+/**
+ * qib_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ */
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_rwq *wq;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct qib_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ wq = srq->rq.wq;
+ next = wq->head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&srq->rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_create_srq - create a shared receive queue
+ * @ibpd: the protection domain of the SRQ to create
+ * @srq_init_attr: the attributes of the SRQ
+ * @udata: data from libibverbs when creating a user SRQ
+ */
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibpd->device);
+ struct qib_srq *srq;
+ u32 sz;
+ struct ib_srq *ret;
+
+ if (srq_init_attr->attr.max_sge == 0 ||
+ srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
+ srq_init_attr->attr.max_wr == 0 ||
+ srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
+ ret = ERR_PTR(-EINVAL);
+ goto done;
+ }
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ /*
+ * Need to use vmalloc() if we want to support large #s of entries.
+ */
+ srq->rq.size = srq_init_attr->attr.max_wr + 1;
+ srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ sz = sizeof(struct ib_sge) * srq->rq.max_sge +
+ sizeof(struct qib_rwqe);
+ srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
+ if (!srq->rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_srq;
+ }
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ int err;
+ u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
+
+ srq->ip =
+ qib_create_mmap_info(dev, s, ibpd->uobject->context,
+ srq->rq.wq);
+ if (!srq->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wq;
+ }
+
+ err = ib_copy_to_udata(udata, &srq->ip->offset,
+ sizeof(srq->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ } else
+ srq->ip = NULL;
+
+ /*
+ * ib_create_srq() will initialize srq->ibsrq.
+ */
+ spin_lock_init(&srq->rq.lock);
+ srq->rq.wq->head = 0;
+ srq->rq.wq->tail = 0;
+ srq->limit = srq_init_attr->attr.srq_limit;
+
+ spin_lock(&dev->n_srqs_lock);
+ if (dev->n_srqs_allocated == ib_qib_max_srqs) {
+ spin_unlock(&dev->n_srqs_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_srqs_allocated++;
+ spin_unlock(&dev->n_srqs_lock);
+
+ if (srq->ip) {
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
+ ret = &srq->ibsrq;
+ goto done;
+
+bail_ip:
+ kfree(srq->ip);
+bail_wq:
+ vfree(srq->rq.wq);
+bail_srq:
+ kfree(srq);
+done:
+ return ret;
+}
+
+/**
+ * qib_modify_srq - modify a shared receive queue
+ * @ibsrq: the SRQ to modify
+ * @attr: the new attributes of the SRQ
+ * @attr_mask: indicates which attributes to modify
+ * @udata: user data for libibverbs.so
+ */
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_rwq *wq;
+ int ret = 0;
+
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ struct qib_rwq *owq;
+ struct qib_rwqe *p;
+ u32 sz, size, n, head, tail;
+
+ /* Check that the requested sizes are below the limits. */
+ if ((attr->max_wr > ib_qib_max_srq_wrs) ||
+ ((attr_mask & IB_SRQ_LIMIT) ?
+ attr->srq_limit : srq->limit) > attr->max_wr) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ sz = sizeof(struct qib_rwqe) +
+ srq->rq.max_sge * sizeof(struct ib_sge);
+ size = attr->max_wr + 1;
+ wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
+ if (!wq) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /* Check that we can write the offset to mmap. */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 offset_addr;
+ __u64 offset = 0;
+
+ ret = ib_copy_from_udata(&offset_addr, udata,
+ sizeof(offset_addr));
+ if (ret)
+ goto bail_free;
+ udata->outbuf =
+ (void __user *) (unsigned long) offset_addr;
+ ret = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (ret)
+ goto bail_free;
+ }
+
+ spin_lock_irq(&srq->rq.lock);
+ /*
+ * validate head and tail pointer values and compute
+ * the number of remaining WQEs.
+ */
+ owq = srq->rq.wq;
+ head = owq->head;
+ tail = owq->tail;
+ if (head >= srq->rq.size || tail >= srq->rq.size) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = head;
+ if (n < tail)
+ n += srq->rq.size - tail;
+ else
+ n -= tail;
+ if (size <= n) {
+ ret = -EINVAL;
+ goto bail_unlock;
+ }
+ n = 0;
+ p = wq->wq;
+ while (tail != head) {
+ struct qib_rwqe *wqe;
+ int i;
+
+ wqe = get_rwqe_ptr(&srq->rq, tail);
+ p->wr_id = wqe->wr_id;
+ p->num_sge = wqe->num_sge;
+ for (i = 0; i < wqe->num_sge; i++)
+ p->sg_list[i] = wqe->sg_list[i];
+ n++;
+ p = (struct qib_rwqe *)((char *) p + sz);
+ if (++tail >= srq->rq.size)
+ tail = 0;
+ }
+ srq->rq.wq = wq;
+ srq->rq.size = size;
+ wq->head = n;
+ wq->tail = 0;
+ if (attr_mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+
+ vfree(owq);
+
+ if (srq->ip) {
+ struct qib_mmap_info *ip = srq->ip;
+ struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
+ u32 s = sizeof(struct qib_rwq) + size * sz;
+
+ qib_update_mmap_info(dev, ip, s, wq);
+
+ /*
+ * Return the offset to mmap.
+ * See qib_mmap() for details.
+ */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ ret = ib_copy_to_udata(udata, &ip->offset,
+ sizeof(ip->offset));
+ if (ret)
+ goto bail;
+ }
+
+ /*
+ * Put user mapping info onto the pending list
+ * unless it already is on the list.
+ */
+ spin_lock_irq(&dev->pending_lock);
+ if (list_empty(&ip->pending_mmaps))
+ list_add(&ip->pending_mmaps,
+ &dev->pending_mmaps);
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ } else if (attr_mask & IB_SRQ_LIMIT) {
+ spin_lock_irq(&srq->rq.lock);
+ if (attr->srq_limit >= srq->rq.size)
+ ret = -EINVAL;
+ else
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+ }
+ goto bail;
+
+bail_unlock:
+ spin_unlock_irq(&srq->rq.lock);
+bail_free:
+ vfree(wq);
+bail:
+ return ret;
+}
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+
+ attr->max_wr = srq->rq.size - 1;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+/**
+ * qib_destroy_srq - destroy a shared receive queue
+ * @ibsrq: the SRQ to destroy
+ */
+int qib_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct qib_srq *srq = to_isrq(ibsrq);
+ struct qib_ibdev *dev = to_idev(ibsrq->device);
+
+ spin_lock(&dev->n_srqs_lock);
+ dev->n_srqs_allocated--;
+ spin_unlock(&dev->n_srqs_lock);
+ if (srq->ip)
+ kref_put(&srq->ip->ref, qib_release_mmap_info);
+ else
+ vfree(srq->rq.wq);
+ kfree(srq);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
new file mode 100644
index 000000000000..dab4d9f4a2cc
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/ctype.h>
+
+#include "qib.h"
+
+/**
+ * qib_parse_ushort - parse an unsigned short value in an arbitrary base
+ * @str: the string containing the number
+ * @valp: where to put the result
+ *
+ * Returns the number of bytes consumed, or negative value on error.
+ */
+static int qib_parse_ushort(const char *str, unsigned short *valp)
+{
+ unsigned long val;
+ char *end;
+ int ret;
+
+ if (!isdigit(str[0])) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ val = simple_strtoul(str, &end, 0);
+
+ if (val > 0xffff) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *valp = val;
+
+ ret = end + 1 - str;
+ if (ret == 0)
+ ret = -EINVAL;
+
+bail:
+ return ret;
+}
+
+/* start of per-port functions */
+/*
+ * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
+ */
+static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+
+ ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return ret;
+}
+
+static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+ u16 val;
+
+ ret = qib_parse_ushort(buf, &val);
+
+ /*
+ * Set the "intentional" heartbeat enable per either of
+ * "Enable" and "Auto", as these are normally set together.
+ * This bit is consulted when leaving loopback mode,
+ * because entering loopback mode overrides it and automatically
+ * disables heartbeat.
+ */
+ if (ret >= 0)
+ ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
+ if (ret < 0)
+ qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = count, r;
+
+ r = dd->f_set_ib_loopback(ppd, buf);
+ if (r < 0)
+ ret = r;
+
+ return ret;
+}
+
+static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret;
+ u16 val;
+
+ ret = qib_parse_ushort(buf, &val);
+ if (ret > 0)
+ qib_set_led_override(ppd, val);
+ else
+ qib_dev_err(dd, "attempt to set invalid LED override\n");
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
+{
+ ssize_t ret;
+
+ if (!ppd->statusp)
+ ret = -EINVAL;
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
+ (unsigned long long) *(ppd->statusp));
+ return ret;
+}
+
+/*
+ * For userland compatibility, these offsets must remain fixed.
+ * They are strings for QIB_STATUS_*
+ */
+static const char *qib_status_str[] = {
+ "Initted",
+ "",
+ "",
+ "",
+ "",
+ "Present",
+ "IB_link_up",
+ "IB_configured",
+ "",
+ "Fatal_Hardware_Error",
+ NULL,
+};
+
+static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
+{
+ int i, any;
+ u64 s;
+ ssize_t ret;
+
+ if (!ppd->statusp) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ s = *(ppd->statusp);
+ *buf = '\0';
+ for (any = i = 0; s && qib_status_str[i]; i++) {
+ if (s & 1) {
+ /* if overflow */
+ if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+ break;
+ if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
+ PAGE_SIZE)
+ break;
+ any = 1;
+ }
+ s >>= 1;
+ }
+ if (any)
+ strlcat(buf, "\n", PAGE_SIZE);
+
+ ret = strlen(buf);
+
+bail:
+ return ret;
+}
+
+/* end of per-port functions */
+
+/*
+ * Start of per-port file structures and support code
+ * Because we are fitting into other infrastructure, we have to supply the
+ * full set of kobject/sysfs_ops structures and routines.
+ */
+#define QIB_PORT_ATTR(name, mode, show, store) \
+ static struct qib_port_attr qib_port_attr_##name = \
+ __ATTR(name, mode, show, store)
+
+struct qib_port_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct qib_pportdata *, char *);
+ ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
+};
+
+QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
+QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
+QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
+ store_hrtbt_enb);
+QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
+QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
+
+static struct attribute *port_default_attributes[] = {
+ &qib_port_attr_loopback.attr,
+ &qib_port_attr_led_override.attr,
+ &qib_port_attr_hrtbt_enable.attr,
+ &qib_port_attr_status.attr,
+ &qib_port_attr_status_str.attr,
+ NULL
+};
+
+static ssize_t qib_portattr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct qib_port_attr *pattr =
+ container_of(attr, struct qib_port_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
+ return pattr->show(ppd, buf);
+}
+
+static ssize_t qib_portattr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t len)
+{
+ struct qib_port_attr *pattr =
+ container_of(attr, struct qib_port_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
+ return pattr->store(ppd, buf, len);
+}
+
+static void qib_port_release(struct kobject *kobj)
+{
+ /* nothing to do since memory is freed by qib_free_devdata() */
+}
+
+static const struct sysfs_ops qib_port_ops = {
+ .show = qib_portattr_show,
+ .store = qib_portattr_store,
+};
+
+static struct kobj_type qib_port_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_port_ops,
+ .default_attrs = port_default_attributes
+};
+
+/* Start sl2vl */
+
+#define QIB_SL2VL_ATTR(N) \
+ static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .sl = N \
+ }
+
+struct qib_sl2vl_attr {
+ struct attribute attr;
+ int sl;
+};
+
+QIB_SL2VL_ATTR(0);
+QIB_SL2VL_ATTR(1);
+QIB_SL2VL_ATTR(2);
+QIB_SL2VL_ATTR(3);
+QIB_SL2VL_ATTR(4);
+QIB_SL2VL_ATTR(5);
+QIB_SL2VL_ATTR(6);
+QIB_SL2VL_ATTR(7);
+QIB_SL2VL_ATTR(8);
+QIB_SL2VL_ATTR(9);
+QIB_SL2VL_ATTR(10);
+QIB_SL2VL_ATTR(11);
+QIB_SL2VL_ATTR(12);
+QIB_SL2VL_ATTR(13);
+QIB_SL2VL_ATTR(14);
+QIB_SL2VL_ATTR(15);
+
+static struct attribute *sl2vl_default_attributes[] = {
+ &qib_sl2vl_attr_0.attr,
+ &qib_sl2vl_attr_1.attr,
+ &qib_sl2vl_attr_2.attr,
+ &qib_sl2vl_attr_3.attr,
+ &qib_sl2vl_attr_4.attr,
+ &qib_sl2vl_attr_5.attr,
+ &qib_sl2vl_attr_6.attr,
+ &qib_sl2vl_attr_7.attr,
+ &qib_sl2vl_attr_8.attr,
+ &qib_sl2vl_attr_9.attr,
+ &qib_sl2vl_attr_10.attr,
+ &qib_sl2vl_attr_11.attr,
+ &qib_sl2vl_attr_12.attr,
+ &qib_sl2vl_attr_13.attr,
+ &qib_sl2vl_attr_14.attr,
+ &qib_sl2vl_attr_15.attr,
+ NULL
+};
+
+static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct qib_sl2vl_attr *sattr =
+ container_of(attr, struct qib_sl2vl_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, sl2vl_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+
+ return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
+}
+
+static const struct sysfs_ops qib_sl2vl_ops = {
+ .show = sl2vl_attr_show,
+};
+
+static struct kobj_type qib_sl2vl_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_sl2vl_ops,
+ .default_attrs = sl2vl_default_attributes
+};
+
+/* End sl2vl */
+
+/* Start diag_counters */
+
+#define QIB_DIAGC_ATTR(N) \
+ static struct qib_diagc_attr qib_diagc_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0444 }, \
+ .counter = offsetof(struct qib_ibport, n_##N) \
+ }
+
+struct qib_diagc_attr {
+ struct attribute attr;
+ size_t counter;
+};
+
+QIB_DIAGC_ATTR(rc_resends);
+QIB_DIAGC_ATTR(rc_acks);
+QIB_DIAGC_ATTR(rc_qacks);
+QIB_DIAGC_ATTR(rc_delayed_comp);
+QIB_DIAGC_ATTR(seq_naks);
+QIB_DIAGC_ATTR(rdma_seq);
+QIB_DIAGC_ATTR(rnr_naks);
+QIB_DIAGC_ATTR(other_naks);
+QIB_DIAGC_ATTR(rc_timeouts);
+QIB_DIAGC_ATTR(loop_pkts);
+QIB_DIAGC_ATTR(pkt_drops);
+QIB_DIAGC_ATTR(dmawait);
+QIB_DIAGC_ATTR(unaligned);
+QIB_DIAGC_ATTR(rc_dupreq);
+QIB_DIAGC_ATTR(rc_seqnak);
+
+static struct attribute *diagc_default_attributes[] = {
+ &qib_diagc_attr_rc_resends.attr,
+ &qib_diagc_attr_rc_acks.attr,
+ &qib_diagc_attr_rc_qacks.attr,
+ &qib_diagc_attr_rc_delayed_comp.attr,
+ &qib_diagc_attr_seq_naks.attr,
+ &qib_diagc_attr_rdma_seq.attr,
+ &qib_diagc_attr_rnr_naks.attr,
+ &qib_diagc_attr_other_naks.attr,
+ &qib_diagc_attr_rc_timeouts.attr,
+ &qib_diagc_attr_loop_pkts.attr,
+ &qib_diagc_attr_pkt_drops.attr,
+ &qib_diagc_attr_dmawait.attr,
+ &qib_diagc_attr_unaligned.attr,
+ &qib_diagc_attr_rc_dupreq.attr,
+ &qib_diagc_attr_rc_seqnak.attr,
+ NULL
+};
+
+static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct qib_diagc_attr *dattr =
+ container_of(attr, struct qib_diagc_attr, attr);
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, diagc_kobj);
+ struct qib_ibport *qibp = &ppd->ibport_data;
+
+ return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
+}
+
+static const struct sysfs_ops qib_diagc_ops = {
+ .show = diagc_attr_show,
+};
+
+static struct kobj_type qib_diagc_ktype = {
+ .release = qib_port_release,
+ .sysfs_ops = &qib_diagc_ops,
+ .default_attrs = diagc_default_attributes
+};
+
+/* End diag_counters */
+
+/* end of per-port file structures and support code */
+
+/*
+ * Start of per-unit (or driver, in some cases, but replicated
+ * per unit) functions (these get a device *)
+ */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+
+ return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
+}
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (!dd->boardname)
+ ret = -EINVAL;
+ else
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
+ return ret;
+}
+
+static ssize_t show_version(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
+}
+
+static ssize_t show_boardversion(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
+}
+
+
+static ssize_t show_localbus_info(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* The string printed here is already newline-terminated. */
+ return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
+}
+
+
+static ssize_t show_nctxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of user ports (contexts) available. */
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
+ dd->first_user_ctxt);
+}
+
+static ssize_t show_serial(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ buf[sizeof dd->serial] = '\0';
+ memcpy(buf, dd->serial, sizeof dd->serial);
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
+static ssize_t store_chip_reset(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+
+ if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ ret = qib_reset_device(dd->unit);
+bail:
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t show_logged_errs(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int idx, count;
+
+ /* force consistency with actual EEPROM */
+ if (qib_update_eeprom_log(dd) != 0)
+ return -ENXIO;
+
+ count = 0;
+ for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+ dd->eep_st_errs[idx],
+ idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
+ }
+
+ return count;
+}
+
+/*
+ * Dump tempsense regs. in decimal, to ease shell-scripts.
+ */
+static ssize_t show_tempsense(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ int ret;
+ int idx;
+ u8 regvals[8];
+
+ ret = -ENXIO;
+ for (idx = 0; idx < 8; ++idx) {
+ if (idx == 6)
+ continue;
+ ret = dd->f_tempsense_rd(dd, idx);
+ if (ret < 0)
+ break;
+ regvals[idx] = ret;
+ }
+ if (idx == 8)
+ ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
+ *(signed char *)(regvals),
+ *(signed char *)(regvals + 1),
+ regvals[2], regvals[3],
+ *(signed char *)(regvals + 5),
+ *(signed char *)(regvals + 7));
+ return ret;
+}
+
+/*
+ * end of per-unit (or driver, in some cases, but replicated
+ * per unit) functions
+ */
+
+/* start of per-unit file structures and support code */
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
+static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+
+static struct device_attribute *qib_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type,
+ &dev_attr_board_id,
+ &dev_attr_version,
+ &dev_attr_nctxts,
+ &dev_attr_serial,
+ &dev_attr_boardversion,
+ &dev_attr_logged_errors,
+ &dev_attr_tempsense,
+ &dev_attr_localbus_info,
+ &dev_attr_chip_reset,
+};
+
+int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
+ struct kobject *kobj)
+{
+ struct qib_pportdata *ppd;
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (!port_num || port_num > dd->num_pports) {
+ qib_dev_err(dd, "Skipping infiniband class with "
+ "invalid port %u\n", port_num);
+ ret = -ENODEV;
+ goto bail;
+ }
+ ppd = &dd->pport[port_num - 1];
+
+ ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
+ "linkcontrol");
+ if (ret) {
+ qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail;
+ }
+ kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
+ "sl2vl");
+ if (ret) {
+ qib_dev_err(dd, "Skipping sl2vl sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail_sl;
+ }
+ kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
+
+ ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
+ "diag_counters");
+ if (ret) {
+ qib_dev_err(dd, "Skipping diag_counters sysfs info, "
+ "(err %d) port %u\n", ret, port_num);
+ goto bail_diagc;
+ }
+ kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
+
+ return 0;
+
+bail_diagc:
+ kobject_put(&ppd->sl2vl_kobj);
+bail_sl:
+ kobject_put(&ppd->pport_kobj);
+bail:
+ return ret;
+}
+
+/*
+ * Register and create our files in /sys/class/infiniband.
+ */
+int qib_verbs_register_sysfs(struct qib_devdata *dd)
+{
+ struct ib_device *dev = &dd->verbs_dev.ibdev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
+ ret = device_create_file(&dev->dev, qib_attributes[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Unregister and remove our files in /sys/class/infiniband.
+ */
+void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
+{
+ struct qib_pportdata *ppd;
+ int i;
+
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = &dd->pport[i];
+ kobject_put(&ppd->pport_kobj);
+ kobject_put(&ppd->sl2vl_kobj);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
new file mode 100644
index 000000000000..6f31ca5039db
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+/*
+ * QLogic_IB "Two Wire Serial Interface" driver.
+ * Originally written for a not-quite-i2c serial eeprom, which is
+ * still used on some supported boards. Later boards have added a
+ * variety of other uses, most board-specific, so teh bit-boffing
+ * part has been split off to this file, while the other parts
+ * have been moved to chip-specific files.
+ *
+ * We have also dropped all pretense of fully generic (e.g. pretend
+ * we don't know whether '1' is the higher voltage) interface, as
+ * the restrictions of the generic i2c interface (e.g. no access from
+ * driver itself) make it unsuitable for this use.
+ */
+
+#define READ_CMD 1
+#define WRITE_CMD 0
+
+/**
+ * i2c_wait_for_writes - wait for a write
+ * @dd: the qlogic_ib device
+ *
+ * We use this instead of udelay directly, so we can make sure
+ * that previous register writes have been flushed all the way
+ * to the chip. Since we are delaying anyway, the cost doesn't
+ * hurt, and makes the bit twiddling more regular
+ */
+static void i2c_wait_for_writes(struct qib_devdata *dd)
+{
+ /*
+ * implicit read of EXTStatus is as good as explicit
+ * read of scratch, if all we want to do is flush
+ * writes.
+ */
+ dd->f_gpio_mod(dd, 0, 0, 0);
+ rmb(); /* inlined, so prevent compiler reordering */
+}
+
+/*
+ * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
+ * for "almost compliant" modules
+ */
+#define SCL_WAIT_USEC 1000
+
+/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
+ * Should be 20, but some chips need more.
+ */
+#define TWSI_BUF_WAIT_USEC 60
+
+static void scl_out(struct qib_devdata *dd, u8 bit)
+{
+ u32 mask;
+
+ udelay(1);
+
+ mask = 1UL << dd->gpio_scl_num;
+
+ /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+ /*
+ * Allow for slow slaves by simple
+ * delay for falling edge, sampling on rise.
+ */
+ if (!bit)
+ udelay(2);
+ else {
+ int rise_usec;
+ for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
+ if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
+ break;
+ udelay(2);
+ }
+ if (rise_usec <= 0)
+ qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
+ SCL_WAIT_USEC);
+ }
+ i2c_wait_for_writes(dd);
+}
+
+static void sda_out(struct qib_devdata *dd, u8 bit)
+{
+ u32 mask;
+
+ mask = 1UL << dd->gpio_sda_num;
+
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
+
+ i2c_wait_for_writes(dd);
+ udelay(2);
+}
+
+static u8 sda_in(struct qib_devdata *dd, int wait)
+{
+ int bnum;
+ u32 read_val, mask;
+
+ bnum = dd->gpio_sda_num;
+ mask = (1UL << bnum);
+ /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
+ dd->f_gpio_mod(dd, 0, 0, mask);
+ read_val = dd->f_gpio_mod(dd, 0, 0, 0);
+ if (wait)
+ i2c_wait_for_writes(dd);
+ return (read_val & mask) >> bnum;
+}
+
+/**
+ * i2c_ackrcv - see if ack following write is true
+ * @dd: the qlogic_ib device
+ */
+static int i2c_ackrcv(struct qib_devdata *dd)
+{
+ u8 ack_received;
+
+ /* AT ENTRY SCL = LOW */
+ /* change direction, ignore data */
+ ack_received = sda_in(dd, 1);
+ scl_out(dd, 1);
+ ack_received = sda_in(dd, 1) == 0;
+ scl_out(dd, 0);
+ return ack_received;
+}
+
+static void stop_cmd(struct qib_devdata *dd);
+
+/**
+ * rd_byte - read a byte, sending STOP on last, else ACK
+ * @dd: the qlogic_ib device
+ *
+ * Returns byte shifted out of device
+ */
+static int rd_byte(struct qib_devdata *dd, int last)
+{
+ int bit_cntr, data;
+
+ data = 0;
+
+ for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
+ data <<= 1;
+ scl_out(dd, 1);
+ data |= sda_in(dd, 0);
+ scl_out(dd, 0);
+ }
+ if (last) {
+ scl_out(dd, 1);
+ stop_cmd(dd);
+ } else {
+ sda_out(dd, 0);
+ scl_out(dd, 1);
+ scl_out(dd, 0);
+ sda_out(dd, 1);
+ }
+ return data;
+}
+
+/**
+ * wr_byte - write a byte, one bit at a time
+ * @dd: the qlogic_ib device
+ * @data: the byte to write
+ *
+ * Returns 0 if we got the following ack, otherwise 1
+ */
+static int wr_byte(struct qib_devdata *dd, u8 data)
+{
+ int bit_cntr;
+ u8 bit;
+
+ for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
+ bit = (data >> bit_cntr) & 1;
+ sda_out(dd, bit);
+ scl_out(dd, 1);
+ scl_out(dd, 0);
+ }
+ return (!i2c_ackrcv(dd)) ? 1 : 0;
+}
+
+/*
+ * issue TWSI start sequence:
+ * (both clock/data high, clock high, data low while clock is high)
+ */
+static void start_seq(struct qib_devdata *dd)
+{
+ sda_out(dd, 1);
+ scl_out(dd, 1);
+ sda_out(dd, 0);
+ udelay(1);
+ scl_out(dd, 0);
+}
+
+/**
+ * stop_seq - transmit the stop sequence
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_seq(struct qib_devdata *dd)
+{
+ scl_out(dd, 0);
+ sda_out(dd, 0);
+ scl_out(dd, 1);
+ sda_out(dd, 1);
+}
+
+/**
+ * stop_cmd - transmit the stop condition
+ * @dd: the qlogic_ib device
+ *
+ * (both clock/data low, clock high, data high while clock is high)
+ */
+static void stop_cmd(struct qib_devdata *dd)
+{
+ stop_seq(dd);
+ udelay(TWSI_BUF_WAIT_USEC);
+}
+
+/**
+ * qib_twsi_reset - reset I2C communication
+ * @dd: the qlogic_ib device
+ */
+
+int qib_twsi_reset(struct qib_devdata *dd)
+{
+ int clock_cycles_left = 9;
+ int was_high = 0;
+ u32 pins, mask;
+
+ /* Both SCL and SDA should be high. If not, there
+ * is something wrong.
+ */
+ mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
+
+ /*
+ * Force pins to desired innocuous state.
+ * This is the default power-on state with out=0 and dir=0,
+ * So tri-stated and should be floating high (barring HW problems)
+ */
+ dd->f_gpio_mod(dd, 0, 0, mask);
+
+ /*
+ * Clock nine times to get all listeners into a sane state.
+ * If SDA does not go high at any point, we are wedged.
+ * One vendor recommends then issuing START followed by STOP.
+ * we cannot use our "normal" functions to do that, because
+ * if SCL drops between them, another vendor's part will
+ * wedge, dropping SDA and keeping it low forever, at the end of
+ * the next transaction (even if it was not the device addressed).
+ * So our START and STOP take place with SCL held high.
+ */
+ while (clock_cycles_left--) {
+ scl_out(dd, 0);
+ scl_out(dd, 1);
+ /* Note if SDA is high, but keep clocking to sync slave */
+ was_high |= sda_in(dd, 0);
+ }
+
+ if (was_high) {
+ /*
+ * We saw a high, which we hope means the slave is sync'd.
+ * Issue START, STOP, pause for T_BUF.
+ */
+
+ pins = dd->f_gpio_mod(dd, 0, 0, 0);
+ if ((pins & mask) != mask)
+ qib_dev_err(dd, "GPIO pins not at rest: %d\n",
+ pins & mask);
+ /* Drop SDA to issue START */
+ udelay(1); /* Guarantee .6 uSec setup */
+ sda_out(dd, 0);
+ udelay(1); /* Guarantee .6 uSec hold */
+ /* At this point, SCL is high, SDA low. Raise SDA for STOP */
+ sda_out(dd, 1);
+ udelay(TWSI_BUF_WAIT_USEC);
+ }
+
+ return !was_high;
+}
+
+#define QIB_TWSI_START 0x100
+#define QIB_TWSI_STOP 0x200
+
+/* Write byte to TWSI, optionally prefixed with START or suffixed with
+ * STOP.
+ * returns 0 if OK (ACK received), else != 0
+ */
+static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
+{
+ int ret = 1;
+ if (flags & QIB_TWSI_START)
+ start_seq(dd);
+
+ ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
+
+ if (flags & QIB_TWSI_STOP)
+ stop_cmd(dd);
+ return ret;
+}
+
+/* Added functionality for IBA7220-based cards */
+#define QIB_TEMP_DEV 0x98
+
+/*
+ * qib_twsi_blk_rd
+ * Formerly called qib_eeprom_internal_read, and only used for eeprom,
+ * but now the general interface for data transfer from twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device from which data should
+ * be read.
+ */
+int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
+ void *buffer, int len)
+{
+ int ret;
+ u8 *bp = buffer;
+
+ ret = 1;
+
+ if (dev == QIB_TWSI_NO_DEV) {
+ /* legacy not-really-I2C */
+ addr = (addr << 1) | READ_CMD;
+ ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
+ } else {
+ /* Actual I2C */
+ ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
+ if (ret) {
+ stop_cmd(dd);
+ ret = 1;
+ goto bail;
+ }
+ /*
+ * SFF spec claims we do _not_ stop after the addr
+ * but simply issue a start with the "read" dev-addr.
+ * Since we are implicitely waiting for ACK here,
+ * we need t_buf (nominally 20uSec) before that start,
+ * and cannot rely on the delay built in to the STOP
+ */
+ ret = qib_twsi_wr(dd, addr, 0);
+ udelay(TWSI_BUF_WAIT_USEC);
+
+ if (ret) {
+ qib_dev_err(dd,
+ "Failed to write interface read addr %02X\n",
+ addr);
+ ret = 1;
+ goto bail;
+ }
+ ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
+ }
+ if (ret) {
+ stop_cmd(dd);
+ ret = 1;
+ goto bail;
+ }
+
+ /*
+ * block devices keeps clocking data out as long as we ack,
+ * automatically incrementing the address. Some have "pages"
+ * whose boundaries will not be crossed, but the handling
+ * of these is left to the caller, who is in a better
+ * position to know.
+ */
+ while (len-- > 0) {
+ /*
+ * Get and store data, sending ACK if length remaining,
+ * else STOP
+ */
+ *bp++ = rd_byte(dd, !len);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/*
+ * qib_twsi_blk_wr
+ * Formerly called qib_eeprom_internal_write, and only used for eeprom,
+ * but now the general interface for data transfer to twsi devices.
+ * One vestige of its former role is that it recognizes a device
+ * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
+ * which responded to all TWSI device codes, interpreting them as
+ * address within device. On all other devices found on board handled by
+ * this driver, the device is followed by a one-byte "address" which selects
+ * the "register" or "offset" within the device to which data should
+ * be written.
+ */
+int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ const void *buffer, int len)
+{
+ int sub_len;
+ const u8 *bp = buffer;
+ int max_wait_time, i;
+ int ret;
+ ret = 1;
+
+ while (len > 0) {
+ if (dev == QIB_TWSI_NO_DEV) {
+ if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
+ QIB_TWSI_START)) {
+ goto failed_write;
+ }
+ } else {
+ /* Real I2C */
+ if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
+ goto failed_write;
+ ret = qib_twsi_wr(dd, addr, 0);
+ if (ret) {
+ qib_dev_err(dd, "Failed to write interface"
+ " write addr %02X\n", addr);
+ goto failed_write;
+ }
+ }
+
+ sub_len = min(len, 4);
+ addr += sub_len;
+ len -= sub_len;
+
+ for (i = 0; i < sub_len; i++)
+ if (qib_twsi_wr(dd, *bp++, 0))
+ goto failed_write;
+
+ stop_cmd(dd);
+
+ /*
+ * Wait for write complete by waiting for a successful
+ * read (the chip replies with a zero after the write
+ * cmd completes, and before it writes to the eeprom.
+ * The startcmd for the read will fail the ack until
+ * the writes have completed. We do this inline to avoid
+ * the debug prints that are in the real read routine
+ * if the startcmd fails.
+ * We also use the proper device address, so it doesn't matter
+ * whether we have real eeprom_dev. Legacy likes any address.
+ */
+ max_wait_time = 100;
+ while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
+ stop_cmd(dd);
+ if (!--max_wait_time)
+ goto failed_write;
+ }
+ /* now read (and ignore) the resulting byte */
+ rd_byte(dd, 1);
+ }
+
+ ret = 0;
+ goto bail;
+
+failed_write:
+ stop_cmd(dd);
+ ret = 1;
+
+bail:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
new file mode 100644
index 000000000000..f7eb1ddff5f3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include "qib.h"
+
+static unsigned qib_hol_timeout_ms = 3000;
+module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
+MODULE_PARM_DESC(hol_timeout_ms,
+ "duration of user app suspension after link failure");
+
+unsigned qib_sdma_fetch_arb = 1;
+module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
+MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
+
+/**
+ * qib_disarm_piobufs - cancel a range of PIO buffers
+ * @dd: the qlogic_ib device
+ * @first: the first PIO buffer to cancel
+ * @cnt: the number of PIO buffers to cancel
+ *
+ * Cancel a range of PIO buffers. Used at user process close,
+ * in case it died while writing to a PIO buffer.
+ */
+void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
+{
+ unsigned long flags;
+ unsigned i;
+ unsigned last;
+
+ last = first + cnt;
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (i = first; i < last; i++) {
+ __clear_bit(i, dd->pio_need_disarm);
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * This is called by a user process when it sees the DISARM_BUFS event
+ * bit is set.
+ */
+int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
+{
+ struct qib_devdata *dd = rcd->dd;
+ unsigned i;
+ unsigned last;
+ unsigned n = 0;
+
+ last = rcd->pio_base + rcd->piocnt;
+ /*
+ * Don't need uctxt_lock here, since user has called in to us.
+ * Clear at start in case more interrupts set bits while we
+ * are disarming
+ */
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ spin_lock_irq(&dd->pioavail_lock);
+ for (i = rcd->pio_base; i < last; i++) {
+ if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
+ n++;
+ dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ }
+ spin_unlock_irq(&dd->pioavail_lock);
+ return 0;
+}
+
+static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
+{
+ struct qib_pportdata *ppd;
+ unsigned pidx;
+
+ for (pidx = 0; pidx < dd->num_pports; pidx++) {
+ ppd = dd->pport + pidx;
+ if (i >= ppd->sdma_state.first_sendbuf &&
+ i < ppd->sdma_state.last_sendbuf)
+ return ppd;
+ }
+ return NULL;
+}
+
+/*
+ * Return true if send buffer is being used by a user context.
+ * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
+ */
+static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
+{
+ struct qib_ctxtdata *rcd;
+ unsigned ctxt;
+ int ret = 0;
+
+ spin_lock(&dd->uctxt_lock);
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ rcd = dd->rcd[ctxt];
+ if (!rcd || bufn < rcd->pio_base ||
+ bufn >= rcd->pio_base + rcd->piocnt)
+ continue;
+ if (rcd->user_event_mask) {
+ int i;
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt, if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ ret = 1;
+ break;
+ }
+ spin_unlock(&dd->uctxt_lock);
+
+ return ret;
+}
+
+/*
+ * Disarm a set of send buffers. If the buffer might be actively being
+ * written to, mark the buffer to be disarmed later when it is not being
+ * written to.
+ *
+ * This should only be called from the IRQ error handler.
+ */
+void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
+ unsigned cnt)
+{
+ struct qib_pportdata *ppd, *pppd[dd->num_pports];
+ unsigned i;
+ unsigned long flags;
+
+ for (i = 0; i < dd->num_pports; i++)
+ pppd[i] = NULL;
+
+ for (i = 0; i < cnt; i++) {
+ int which;
+ if (!test_bit(i, mask))
+ continue;
+ /*
+ * If the buffer is owned by the DMA hardware,
+ * reset the DMA engine.
+ */
+ ppd = is_sdma_buf(dd, i);
+ if (ppd) {
+ pppd[ppd->port] = ppd;
+ continue;
+ }
+ /*
+ * If the kernel is writing the buffer or the buffer is
+ * owned by a user process, we can't clear it yet.
+ */
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ if (test_bit(i, dd->pio_writing) ||
+ (!test_bit(i << 1, dd->pioavailkernel) &&
+ find_ctxt(dd, i))) {
+ __set_bit(i, dd->pio_need_disarm);
+ which = 0;
+ } else {
+ which = 1;
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+ }
+
+ /* do cancel_sends once per port that had sdma piobufs in error */
+ for (i = 0; i < dd->num_pports; i++)
+ if (pppd[i])
+ qib_cancel_sends(pppd[i]);
+}
+
+/**
+ * update_send_bufs - update shadow copy of the PIO availability map
+ * @dd: the qlogic_ib device
+ *
+ * called whenever our local copy indicates we have run out of send buffers
+ */
+static void update_send_bufs(struct qib_devdata *dd)
+{
+ unsigned long flags;
+ unsigned i;
+ const unsigned piobregs = dd->pioavregs;
+
+ /*
+ * If the generation (check) bits have changed, then we update the
+ * busy bit for the corresponding PIO buffer. This algorithm will
+ * modify positions to the value they already have in some cases
+ * (i.e., no change), but it's faster than changing only the bits
+ * that have changed.
+ *
+ * We would like to do this atomicly, to avoid spinlocks in the
+ * critical send path, but that's not really possible, given the
+ * type of changes, and that this routine could be called on
+ * multiple cpu's simultaneously, so we lock in this routine only,
+ * to avoid conflicting updates; all we change is the shadow, and
+ * it's a single 64 bit memory location, so by definition the update
+ * is atomic in terms of what other cpu's can see in testing the
+ * bits. The spin_lock overhead isn't too bad, since it only
+ * happens when all buffers are in use, so only cpu overhead, not
+ * latency or bandwidth is affected.
+ */
+ if (!dd->pioavailregs_dma)
+ return;
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (i = 0; i < piobregs; i++) {
+ u64 pchbusy, pchg, piov, pnew;
+
+ piov = le64_to_cpu(dd->pioavailregs_dma[i]);
+ pchg = dd->pioavailkernel[i] &
+ ~(dd->pioavailshadow[i] ^ piov);
+ pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
+ if (pchg && (pchbusy & dd->pioavailshadow[i])) {
+ pnew = dd->pioavailshadow[i] & ~pchbusy;
+ pnew |= piov & pchbusy;
+ dd->pioavailshadow[i] = pnew;
+ }
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/*
+ * Debugging code and stats updates if no pio buffers available.
+ */
+static noinline void no_send_bufs(struct qib_devdata *dd)
+{
+ dd->upd_pio_shadow = 1;
+
+ /* not atomic, but if we lose a stat count in a while, that's OK */
+ qib_stats.sps_nopiobufs++;
+}
+
+/*
+ * Common code for normal driver send buffer allocation, and reserved
+ * allocation.
+ *
+ * Do appropriate marking as busy, etc.
+ * Returns buffer pointer if one is found, otherwise NULL.
+ */
+u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
+ u32 first, u32 last)
+{
+ unsigned i, j, updated = 0;
+ unsigned nbufs;
+ unsigned long flags;
+ unsigned long *shadow = dd->pioavailshadow;
+ u32 __iomem *buf;
+
+ if (!(dd->flags & QIB_PRESENT))
+ return NULL;
+
+ nbufs = last - first + 1; /* number in range to check */
+ if (dd->upd_pio_shadow) {
+ /*
+ * Minor optimization. If we had no buffers on last call,
+ * start out by doing the update; continue and do scan even
+ * if no buffers were updated, to be paranoid.
+ */
+ update_send_bufs(dd);
+ updated++;
+ }
+ i = first;
+rescan:
+ /*
+ * While test_and_set_bit() is atomic, we do that and then the
+ * change_bit(), and the pair is not. See if this is the cause
+ * of the remaining armlaunch errors.
+ */
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (j = 0; j < nbufs; j++, i++) {
+ if (i > last)
+ i = first;
+ if (__test_and_set_bit((2 * i) + 1, shadow))
+ continue;
+ /* flip generation bit */
+ __change_bit(2 * i, shadow);
+ /* remember that the buffer can be written to now */
+ __set_bit(i, dd->pio_writing);
+ break;
+ }
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+ if (j == nbufs) {
+ if (!updated) {
+ /*
+ * First time through; shadow exhausted, but may be
+ * buffers available, try an update and then rescan.
+ */
+ update_send_bufs(dd);
+ updated++;
+ i = first;
+ goto rescan;
+ }
+ no_send_bufs(dd);
+ buf = NULL;
+ } else {
+ if (i < dd->piobcnt2k)
+ buf = (u32 __iomem *)(dd->pio2kbase +
+ i * dd->palign);
+ else
+ buf = (u32 __iomem *)(dd->pio4kbase +
+ (i - dd->piobcnt2k) * dd->align4k);
+ if (pbufnum)
+ *pbufnum = i;
+ dd->upd_pio_shadow = 0;
+ }
+
+ return buf;
+}
+
+/*
+ * Record that the caller is finished writing to the buffer so we don't
+ * disarm it while it is being written and disarm it now if needed.
+ */
+void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ __clear_bit(n, dd->pio_writing);
+ if (__test_and_clear_bit(n, dd->pio_need_disarm))
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+}
+
+/**
+ * qib_chg_pioavailkernel - change which send buffers are available for kernel
+ * @dd: the qlogic_ib device
+ * @start: the starting send buffer number
+ * @len: the number of send buffers
+ * @avail: true if the buffers are available for kernel use, false otherwise
+ */
+void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
+ unsigned len, u32 avail, struct qib_ctxtdata *rcd)
+{
+ unsigned long flags;
+ unsigned end;
+ unsigned ostart = start;
+
+ /* There are two bits per send buffer (busy and generation) */
+ start *= 2;
+ end = start + len * 2;
+
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ /* Set or clear the busy bit in the shadow. */
+ while (start < end) {
+ if (avail) {
+ unsigned long dma;
+ int i;
+
+ /*
+ * The BUSY bit will never be set, because we disarm
+ * the user buffers before we hand them back to the
+ * kernel. We do have to make sure the generation
+ * bit is set correctly in shadow, since it could
+ * have changed many times while allocated to user.
+ * We can't use the bitmap functions on the full
+ * dma array because it is always little-endian, so
+ * we have to flip to host-order first.
+ * BITS_PER_LONG is slightly wrong, since it's
+ * always 64 bits per register in chip...
+ * We only work on 64 bit kernels, so that's OK.
+ */
+ i = start / BITS_PER_LONG;
+ __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
+ dd->pioavailshadow);
+ dma = (unsigned long)
+ le64_to_cpu(dd->pioavailregs_dma[i]);
+ if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+ start) % BITS_PER_LONG, &dma))
+ __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
+ start, dd->pioavailshadow);
+ else
+ __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
+ + start, dd->pioavailshadow);
+ __set_bit(start, dd->pioavailkernel);
+ } else {
+ __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
+ dd->pioavailshadow);
+ __clear_bit(start, dd->pioavailkernel);
+ }
+ start += 2;
+ }
+
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+
+ dd->f_txchk_change(dd, ostart, len, avail, rcd);
+}
+
+/*
+ * Flush all sends that might be in the ready to send state, as well as any
+ * that are in the process of being sent. Used whenever we need to be
+ * sure the send side is idle. Cleans up all buffer state by canceling
+ * all pio buffers, and issuing an abort, which cleans up anything in the
+ * launch fifo. The cancel is superfluous on some chip versions, but
+ * it's safer to always do it.
+ * PIOAvail bits are updated by the chip as if a normal send had happened.
+ */
+void qib_cancel_sends(struct qib_pportdata *ppd)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct qib_ctxtdata *rcd;
+ unsigned long flags;
+ unsigned ctxt;
+ unsigned i;
+ unsigned last;
+
+ /*
+ * Tell PSM to disarm buffers again before trying to reuse them.
+ * We need to be sure the rcd doesn't change out from under us
+ * while we do so. We hold the two locks sequentially. We might
+ * needlessly set some need_disarm bits as a result, if the
+ * context is closed after we release the uctxt_lock, but that's
+ * fairly benign, and safer than nesting the locks.
+ */
+ for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ rcd = dd->rcd[ctxt];
+ if (rcd && rcd->ppd == ppd) {
+ last = rcd->pio_base + rcd->piocnt;
+ if (rcd->user_event_mask) {
+ /*
+ * subctxt_cnt is 0 if not shared, so do base
+ * separately, first, then remaining subctxt,
+ * if any
+ */
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[0]);
+ for (i = 1; i < rcd->subctxt_cnt; i++)
+ set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
+ &rcd->user_event_mask[i]);
+ }
+ i = rcd->pio_base;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ spin_lock_irqsave(&dd->pioavail_lock, flags);
+ for (; i < last; i++)
+ __set_bit(i, dd->pio_need_disarm);
+ spin_unlock_irqrestore(&dd->pioavail_lock, flags);
+ } else
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ }
+
+ if (!(dd->flags & QIB_HAS_SEND_DMA))
+ dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
+ QIB_SENDCTRL_FLUSH);
+}
+
+/*
+ * Force an update of in-memory copy of the pioavail registers, when
+ * needed for any of a variety of reasons.
+ * If already off, this routine is a nop, on the assumption that the
+ * caller (or set of callers) will "do the right thing".
+ * This is a per-device operation, so just the first port.
+ */
+void qib_force_pio_avail_update(struct qib_devdata *dd)
+{
+ dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
+}
+
+void qib_hol_down(struct qib_pportdata *ppd)
+{
+ /*
+ * Cancel sends when the link goes DOWN so that we aren't doing it
+ * at INIT when we might be trying to send SMI packets.
+ */
+ if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
+ qib_cancel_sends(ppd);
+}
+
+/*
+ * Link is at INIT.
+ * We start the HoL timer so we can detect stuck packets blocking SMP replies.
+ * Timer may already be running, so use mod_timer, not add_timer.
+ */
+void qib_hol_init(struct qib_pportdata *ppd)
+{
+ if (ppd->hol_state != QIB_HOL_INIT) {
+ ppd->hol_state = QIB_HOL_INIT;
+ mod_timer(&ppd->hol_timer,
+ jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+ }
+}
+
+/*
+ * Link is up, continue any user processes, and ensure timer
+ * is a nop, if running. Let timer keep running, if set; it
+ * will nop when it sees the link is up.
+ */
+void qib_hol_up(struct qib_pportdata *ppd)
+{
+ ppd->hol_state = QIB_HOL_UP;
+}
+
+/*
+ * This is only called via the timer.
+ */
+void qib_hol_event(unsigned long opaque)
+{
+ struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
+ /* If hardware error, etc, skip. */
+ if (!(ppd->dd->flags & QIB_INITTED))
+ return;
+
+ if (ppd->hol_state != QIB_HOL_UP) {
+ /*
+ * Try to flush sends in case a stuck packet is blocking
+ * SMP replies.
+ */
+ qib_hol_down(ppd);
+ mod_timer(&ppd->hol_timer,
+ jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
new file mode 100644
index 000000000000..6c7fe78cca64
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "qib.h"
+
+/* cut down ridiculously long IB macro names */
+#define OP(x) IB_OPCODE_UC_##x
+
+/**
+ * qib_make_uc_req - construct a request packet (SEND, RDMA write)
+ * @qp: a pointer to the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_uc_req(struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ struct qib_swqe *wqe;
+ unsigned long flags;
+ u32 hwords;
+ u32 bth0;
+ u32 len;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ ohdr = &qp->s_hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &qp->s_hdr.u.l.oth;
+
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ bth0 = 0;
+
+ /* Get the next send request. */
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ qp->s_wqe = NULL;
+ switch (qp->s_state) {
+ default:
+ if (!(ib_qib_state_ops[qp->state] &
+ QIB_PROCESS_NEXT_SEND_OK))
+ goto bail;
+ /* Check if send work queue is empty. */
+ if (qp->s_cur == qp->s_head)
+ goto bail;
+ /*
+ * Start a new request.
+ */
+ wqe->psn = qp->s_next_psn;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+ len = wqe->length;
+ qp->s_len = len;
+ switch (wqe->wr.opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ if (len > pmtu) {
+ qp->s_state = OP(SEND_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_ONLY);
+ else {
+ qp->s_state =
+ OP(SEND_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
+ ohdr->u.rc.reth.rkey =
+ cpu_to_be32(wqe->wr.wr.rdma.rkey);
+ ohdr->u.rc.reth.length = cpu_to_be32(len);
+ hwords += sizeof(struct ib_reth) / 4;
+ if (len > pmtu) {
+ qp->s_state = OP(RDMA_WRITE_FIRST);
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_ONLY);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
+ /* Immediate data comes after the RETH */
+ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ default:
+ goto bail;
+ }
+ break;
+
+ case OP(SEND_FIRST):
+ qp->s_state = OP(SEND_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND)
+ qp->s_state = OP(SEND_LAST);
+ else {
+ qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ }
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ qp->s_state = OP(RDMA_WRITE_MIDDLE);
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ len = qp->s_len;
+ if (len > pmtu) {
+ len = pmtu;
+ break;
+ }
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ qp->s_state = OP(RDMA_WRITE_LAST);
+ else {
+ qp->s_state =
+ OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
+ /* Immediate data comes after the BTH */
+ ohdr->u.imm_data = wqe->wr.ex.imm_data;
+ hwords += 1;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ }
+ qp->s_wqe = wqe;
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
+ break;
+ }
+ qp->s_len -= len;
+ qp->s_hdrwords = hwords;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_cur_size = len;
+ qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
+ qp->s_next_psn++ & QIB_PSN_MASK);
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_uc_rcv - handle an incoming UC packet
+ * @ibp: the port the packet came in on
+ * @hdr: the header of the packet
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the length of the packet
+ * @qp: the QP for this packet.
+ *
+ * This is called from qib_qp_rcv() to process an incoming UC packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ unsigned long flags;
+ u32 opcode;
+ u32 hdrsize;
+ u32 psn;
+ u32 pad;
+ struct ib_wc wc;
+ u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ struct ib_reth *reth;
+ int ret;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12; /* LRH + BTH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
+ }
+
+ opcode = be32_to_cpu(ohdr->bth[0]);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ goto sunlock;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ psn = be32_to_cpu(ohdr->bth[2]);
+ opcode >>= 24;
+ memset(&wc, 0, sizeof wc);
+
+ /* Prevent simultaneous processing after APM on different CPUs */
+ spin_lock(&qp->r_lock);
+
+ /* Compare the PSN verses the expected PSN. */
+ if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
+ /*
+ * Handle a sequence error.
+ * Silently drop any current message.
+ */
+ qp->r_psn = psn;
+inv:
+ if (qp->r_state == OP(SEND_FIRST) ||
+ qp->r_state == OP(SEND_MIDDLE)) {
+ set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+ } else
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ qp->r_state = OP(SEND_LAST);
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+ goto send_first;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ goto rdma_first;
+
+ default:
+ goto drop;
+ }
+ }
+
+ /* Check for opcode sequence errors. */
+ switch (qp->r_state) {
+ case OP(SEND_FIRST):
+ case OP(SEND_MIDDLE):
+ if (opcode == OP(SEND_MIDDLE) ||
+ opcode == OP(SEND_LAST) ||
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_MIDDLE):
+ if (opcode == OP(RDMA_WRITE_MIDDLE) ||
+ opcode == OP(RDMA_WRITE_LAST) ||
+ opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
+ break;
+ goto inv;
+
+ default:
+ if (opcode == OP(SEND_FIRST) ||
+ opcode == OP(SEND_ONLY) ||
+ opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_FIRST) ||
+ opcode == OP(RDMA_WRITE_ONLY) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ break;
+ goto inv;
+ }
+
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
+ qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_COMM_EST;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ }
+
+ /* OK, process the packet. */
+ switch (opcode) {
+ case OP(SEND_FIRST):
+ case OP(SEND_ONLY):
+ case OP(SEND_ONLY_WITH_IMMEDIATE):
+send_first:
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ qp->r_sge = qp->s_rdma_read_sge;
+ else {
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ /*
+ * qp->s_rdma_read_sge will be the owner
+ * of the mr references.
+ */
+ qp->s_rdma_read_sge = qp->r_sge;
+ }
+ qp->r_rcv_len = 0;
+ if (opcode == OP(SEND_ONLY))
+ goto send_last;
+ else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
+ goto send_last_imm;
+ /* FALLTHROUGH */
+ case OP(SEND_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto rewind;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto rewind;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+ break;
+
+ case OP(SEND_LAST_WITH_IMMEDIATE):
+send_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ /* FALLTHROUGH */
+ case OP(SEND_LAST):
+send_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto rewind;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ wc.byte_len = tlen + qp->r_rcv_len;
+ if (unlikely(wc.byte_len > qp->r_len))
+ goto rewind;
+ wc.opcode = IB_WC_RECV;
+last_imm:
+ qib_copy_sge(&qp->r_sge, data, tlen, 0);
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = qp->remote_ah_attr.dlid;
+ wc.sl = qp->remote_ah_attr.sl;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ break;
+
+ case OP(RDMA_WRITE_FIRST):
+ case OP(RDMA_WRITE_ONLY):
+ case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
+rdma_first:
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE))) {
+ goto drop;
+ }
+ reth = &ohdr->u.rc.reth;
+ hdrsize += sizeof(*reth);
+ qp->r_len = be32_to_cpu(reth->length);
+ qp->r_rcv_len = 0;
+ qp->r_sge.sg_list = NULL;
+ if (qp->r_len != 0) {
+ u32 rkey = be32_to_cpu(reth->rkey);
+ u64 vaddr = be64_to_cpu(reth->vaddr);
+ int ok;
+
+ /* Check rkey */
+ ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
+ if (unlikely(!ok))
+ goto drop;
+ qp->r_sge.num_sge = 1;
+ } else {
+ qp->r_sge.num_sge = 0;
+ qp->r_sge.sge.mr = NULL;
+ qp->r_sge.sge.vaddr = NULL;
+ qp->r_sge.sge.length = 0;
+ qp->r_sge.sge.sge_length = 0;
+ }
+ if (opcode == OP(RDMA_WRITE_ONLY))
+ goto rdma_last;
+ else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ goto rdma_last_imm;
+ /* FALLTHROUGH */
+ case OP(RDMA_WRITE_MIDDLE):
+ /* Check for invalid length PMTU or posted rwqe len. */
+ if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ goto drop;
+ qp->r_rcv_len += pmtu;
+ if (unlikely(qp->r_rcv_len > qp->r_len))
+ goto drop;
+ qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ break;
+
+ case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
+rdma_last_imm:
+ wc.ex.imm_data = ohdr->u.imm_data;
+ hdrsize += 4;
+ wc.wc_flags = IB_WC_WITH_IMM;
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ while (qp->s_rdma_read_sge.num_sge) {
+ atomic_dec(&qp->s_rdma_read_sge.sge.mr->
+ refcount);
+ if (--qp->s_rdma_read_sge.num_sge)
+ qp->s_rdma_read_sge.sge =
+ *qp->s_rdma_read_sge.sg_list++;
+ }
+ else {
+ ret = qib_get_rwqe(qp, 1);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto drop;
+ }
+ wc.byte_len = qp->r_len;
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ goto last_imm;
+
+ case OP(RDMA_WRITE_LAST):
+rdma_last:
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ /* Check for invalid length. */
+ /* XXX LAST len should be >= 1 */
+ if (unlikely(tlen < (hdrsize + pad + 4)))
+ goto drop;
+ /* Don't count the CRC. */
+ tlen -= (hdrsize + pad + 4);
+ if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
+ goto drop;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ break;
+
+ default:
+ /* Drop packet for unknown opcodes. */
+ goto drop;
+ }
+ qp->r_psn++;
+ qp->r_state = opcode;
+ spin_unlock(&qp->r_lock);
+ return;
+
+rewind:
+ set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ qp->r_sge.num_sge = 0;
+drop:
+ ibp->n_pkt_drops++;
+ spin_unlock(&qp->r_lock);
+ return;
+
+op_err:
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ spin_unlock(&qp->r_lock);
+ return;
+
+sunlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
new file mode 100644
index 000000000000..c838cda73347
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_smi.h>
+
+#include "qib.h"
+#include "qib_mad.h"
+
+/**
+ * qib_ud_loopback - handle send on loopback QPs
+ * @sqp: the sending QP
+ * @swqe: the send work request
+ *
+ * This is called from qib_make_ud_req() to forward a WQE addressed
+ * to the same HCA.
+ * Note that the receive interrupt handler may be calling qib_ud_rcv()
+ * while this is being called.
+ */
+static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+{
+ struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
+ struct qib_pportdata *ppd;
+ struct qib_qp *qp;
+ struct ib_ah_attr *ah_attr;
+ unsigned long flags;
+ struct qib_sge_state ssge;
+ struct qib_sge *sge;
+ struct ib_wc wc;
+ u32 length;
+
+ qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
+ if (!qp) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+ if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+ !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto drop;
+ }
+
+ ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
+ ppd = ppd_from_ibp(ibp);
+
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey1;
+ u16 pkey2;
+ u16 lid;
+
+ pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
+ pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+ if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+ lid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ if (qp->ibqp.qp_num) {
+ u32 qkey;
+
+ qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
+ sqp->qkey : swqe->wr.wr.ud.remote_qkey;
+ if (unlikely(qkey != qp->qkey)) {
+ u16 lid;
+
+ lid = ppd->lid | (ah_attr->src_path_bits &
+ ((1 << ppd->lmc) - 1));
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ ah_attr->sl,
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(ah_attr->dlid));
+ goto drop;
+ }
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ length = swqe->length;
+ memset(&wc, 0, sizeof wc);
+ wc.byte_len = length + sizeof(struct ib_grh);
+
+ if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = swqe->wr.ex.imm_data;
+ }
+
+ spin_lock_irqsave(&qp->r_lock, flags);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & QIB_R_REUSE_SGE)
+ qp->r_flags &= ~QIB_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0) {
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= QIB_R_REUSE_SGE;
+ ibp->n_pkt_drops++;
+ goto bail_unlock;
+ }
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ qib_copy_sge(&qp->r_sge, &ah_attr->grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ ssge.sg_list = swqe->sg_list + 1;
+ ssge.sge = *swqe->sg_list;
+ ssge.num_sge = swqe->wr.num_sge;
+ sge = &ssge.sge;
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ssge.num_sge)
+ *sge = *ssge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = sqp->ibqp.qp_num;
+ wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+ swqe->wr.wr.ud.pkey_index : 0;
+ wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
+ wc.sl = ah_attr->sl;
+ wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ swqe->wr.send_flags & IB_SEND_SOLICITED);
+ ibp->n_loop_pkts++;
+bail_unlock:
+ spin_unlock_irqrestore(&qp->r_lock, flags);
+drop:
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
+ * qib_make_ud_req - construct a UD request packet
+ * @qp: the QP
+ *
+ * Return 1 if constructed; otherwise, return 0.
+ */
+int qib_make_ud_req(struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ struct ib_ah_attr *ah_attr;
+ struct qib_pportdata *ppd;
+ struct qib_ibport *ibp;
+ struct qib_swqe *wqe;
+ unsigned long flags;
+ u32 nwords;
+ u32 extra_bytes;
+ u32 bth0;
+ u16 lrh0;
+ u16 lid;
+ int ret = 0;
+ int next_cur;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
+
+ if (qp->s_cur == qp->s_head)
+ goto bail;
+
+ wqe = get_swqe_ptr(qp, qp->s_cur);
+ next_cur = qp->s_cur + 1;
+ if (next_cur >= qp->s_size)
+ next_cur = 0;
+
+ /* Construct the header. */
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
+ if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
+ if (ah_attr->dlid != QIB_PERMISSIVE_LID)
+ ibp->n_multicast_xmit++;
+ else
+ ibp->n_unicast_xmit++;
+ } else {
+ ibp->n_unicast_xmit++;
+ lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid == ppd->lid)) {
+ /*
+ * If DMAs are in progress, we can't generate
+ * a completion for the loopback packet since
+ * it would be out of order.
+ * XXX Instead of waiting, we could queue a
+ * zero length descriptor so we get a callback.
+ */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= QIB_S_WAIT_DMA;
+ goto bail;
+ }
+ qp->s_cur = next_cur;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ qib_ud_loopback(qp, wqe);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ goto done;
+ }
+ }
+
+ qp->s_cur = next_cur;
+ extra_bytes = -wqe->length & 3;
+ nwords = (wqe->length + extra_bytes) >> 2;
+
+ /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
+ qp->s_hdrwords = 7;
+ qp->s_cur_size = wqe->length;
+ qp->s_cur_sge = &qp->s_sge;
+ qp->s_srate = ah_attr->static_rate;
+ qp->s_wqe = wqe;
+ qp->s_sge.sge = wqe->sg_list[0];
+ qp->s_sge.sg_list = wqe->sg_list + 1;
+ qp->s_sge.num_sge = wqe->wr.num_sge;
+ qp->s_sge.total_len = wqe->length;
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ /* Header size in 32-bit words. */
+ qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+ &ah_attr->grh,
+ qp->s_hdrwords, nwords);
+ lrh0 = QIB_LRH_GRH;
+ ohdr = &qp->s_hdr.u.l.oth;
+ /*
+ * Don't worry about sending to locally attached multicast
+ * QPs. It is unspecified by the spec. what happens.
+ */
+ } else {
+ /* Header size in 32-bit words. */
+ lrh0 = QIB_LRH_BTH;
+ ohdr = &qp->s_hdr.u.oth;
+ }
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ qp->s_hdrwords++;
+ ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
+ bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ } else
+ bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ lrh0 |= ah_attr->sl << 4;
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+ else
+ lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
+ qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
+ qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ lid = ppd->lid;
+ if (lid) {
+ lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
+ qp->s_hdr.lrh[3] = cpu_to_be16(lid);
+ } else
+ qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth0 |= extra_bytes << 20;
+ bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
+ qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
+ wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ /*
+ * Use the multicast QP if the destination LID is a multicast LID.
+ */
+ ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+ ah_attr->dlid != QIB_PERMISSIVE_LID ?
+ cpu_to_be32(QIB_MULTICAST_QPN) :
+ cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
+ /*
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
+ qp->qkey : wqe->wr.wr.ud.remote_qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+
+done:
+ ret = 1;
+ goto unlock;
+
+bail:
+ qp->s_flags &= ~QIB_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
+{
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ unsigned ctxt = ppd->hw_pidx;
+ unsigned i;
+
+ pkey &= 0x7fff; /* remove limited/full membership bit */
+
+ for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
+ if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
+ return i;
+
+ /*
+ * Should not get here, this means hardware failed to validate pkeys.
+ * Punt and return index 0.
+ */
+ return 0;
+}
+
+/**
+ * qib_ud_rcv - receive an incoming UD packet
+ * @ibp: the port the packet came in on
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_qp_rcv() to process an incoming UD packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_other_headers *ohdr;
+ int opcode;
+ u32 hdrsize;
+ u32 pad;
+ struct ib_wc wc;
+ u32 qkey;
+ u32 src_qp;
+ u16 dlid;
+
+ /* Check for GRH */
+ if (!has_grh) {
+ ohdr = &hdr->u.oth;
+ hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
+ } else {
+ ohdr = &hdr->u.l.oth;
+ hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
+ }
+ qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
+
+ /* Get the number of bytes the message was padded by. */
+ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ if (unlikely(tlen < (hdrsize + pad + 4))) {
+ /* Drop incomplete packets. */
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ tlen -= hdrsize + pad + 4;
+
+ /*
+ * Check that the permissive LID is only used on QP0
+ * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
+ */
+ if (qp->ibqp.qp_num) {
+ if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE)) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ if (qp->ibqp.qp_num > 1) {
+ u16 pkey1, pkey2;
+
+ pkey1 = be32_to_cpu(ohdr->bth[0]);
+ pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
+ if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ pkey1,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) &
+ 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto bail;
+ }
+ }
+ if (unlikely(qkey != qp->qkey)) {
+ qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
+ goto bail;
+ }
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (unlikely(qp->ibqp.qp_num == 1 &&
+ (tlen != 256 ||
+ (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ } else {
+ struct ib_smp *smp;
+
+ /* Drop invalid MAD packets (see 13.5.3.1). */
+ if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ smp = (struct ib_smp *) data;
+ if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
+ hdr->lrh[3] == IB_LID_PERMISSIVE) &&
+ smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+ }
+
+ /*
+ * The opcode is in the low byte when its in network order
+ * (top byte when in host order).
+ */
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ if (qp->ibqp.qp_num > 1 &&
+ opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
+ wc.ex.imm_data = ohdr->u.ud.imm_data;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ hdrsize += sizeof(u32);
+ } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
+ } else {
+ ibp->n_pkt_drops++;
+ goto bail;
+ }
+
+ /*
+ * A GRH is expected to preceed the data even if not
+ * present on the wire.
+ */
+ wc.byte_len = tlen + sizeof(struct ib_grh);
+
+ /*
+ * We need to serialize getting a receive work queue entry and
+ * generating a completion for it against QPs sending to this QP
+ * locally.
+ */
+ spin_lock(&qp->r_lock);
+
+ /*
+ * Get the next work request entry to find where to put the data.
+ */
+ if (qp->r_flags & QIB_R_REUSE_SGE)
+ qp->r_flags &= ~QIB_R_REUSE_SGE;
+ else {
+ int ret;
+
+ ret = qib_get_rwqe(qp, 0);
+ if (ret < 0) {
+ qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ goto bail_unlock;
+ }
+ if (!ret) {
+ if (qp->ibqp.qp_num == 0)
+ ibp->n_vl15_dropped++;
+ goto bail_unlock;
+ }
+ }
+ /* Silently drop packets which are too big. */
+ if (unlikely(wc.byte_len > qp->r_len)) {
+ qp->r_flags |= QIB_R_REUSE_SGE;
+ ibp->n_pkt_drops++;
+ goto bail_unlock;
+ }
+ if (has_grh) {
+ qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh), 1);
+ wc.wc_flags |= IB_WC_GRH;
+ } else
+ qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+ while (qp->r_sge.num_sge) {
+ atomic_dec(&qp->r_sge.sge.mr->refcount);
+ if (--qp->r_sge.num_sge)
+ qp->r_sge.sge = *qp->r_sge.sg_list++;
+ }
+ if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ goto bail_unlock;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = src_qp;
+ wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
+ qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
+ wc.slid = be16_to_cpu(hdr->lrh[3]);
+ wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
+ dlid = be16_to_cpu(hdr->lrh[1]);
+ /*
+ * Save the LMC lower bits if the destination LID is a unicast LID.
+ */
+ wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
+ dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
+ wc.port_num = qp->port_num;
+ /* Signal completion event if the solicited bit is set. */
+ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+bail_unlock:
+ spin_unlock(&qp->r_lock);
+bail:;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
new file mode 100644
index 000000000000..d7a26c1d4f37
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include "qib.h"
+
+static void __qib_release_user_pages(struct page **p, size_t num_pages,
+ int dirty)
+{
+ size_t i;
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty_lock(p[i]);
+ put_page(p[i]);
+ }
+}
+
+/*
+ * Call with current->mm->mmap_sem held.
+ */
+static int __get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p, struct vm_area_struct **vma)
+{
+ unsigned long lock_limit;
+ size_t got;
+ int ret;
+
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ for (got = 0; got < num_pages; got += ret) {
+ ret = get_user_pages(current, current->mm,
+ start_page + got * PAGE_SIZE,
+ num_pages - got, 1, 1,
+ p + got, vma);
+ if (ret < 0)
+ goto bail_release;
+ }
+
+ current->mm->locked_vm += num_pages;
+
+ ret = 0;
+ goto bail;
+
+bail_release:
+ __qib_release_user_pages(p, got, 0);
+bail:
+ return ret;
+}
+
+/**
+ * qib_map_page - a safety wrapper around pci_map_page()
+ *
+ * A dma_addr of all 0's is interpreted by the chip as "disabled".
+ * Unfortunately, it can also be a valid dma_addr returned on some
+ * architectures.
+ *
+ * The powerpc iommu assigns dma_addrs in ascending order, so we don't
+ * have to bother with retries or mapping a dummy page to insure we
+ * don't just get the same mapping again.
+ *
+ * I'm sure we won't be so lucky with other iommu's, so FIXME.
+ */
+dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int direction)
+{
+ dma_addr_t phys;
+
+ phys = pci_map_page(hwdev, page, offset, size, direction);
+
+ if (phys == 0) {
+ pci_unmap_page(hwdev, phys, size, direction);
+ phys = pci_map_page(hwdev, page, offset, size, direction);
+ /*
+ * FIXME: If we get 0 again, we should keep this page,
+ * map another, then free the 0 page.
+ */
+ }
+
+ return phys;
+}
+
+/**
+ * qib_get_user_pages - lock user pages into memory
+ * @start_page: the start page
+ * @num_pages: the number of pages
+ * @p: the output page structures
+ *
+ * This function takes a given start page (page aligned user virtual
+ * address) and pins it and the following specified number of pages. For
+ * now, num_pages is always 1, but that will probably change at some point
+ * (because caller is doing expected sends on a single virtually contiguous
+ * buffer, so we can do all pages at once).
+ */
+int qib_get_user_pages(unsigned long start_page, size_t num_pages,
+ struct page **p)
+{
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+
+ ret = __get_user_pages(start_page, num_pages, p, NULL);
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+void qib_release_user_pages(struct page **p, size_t num_pages)
+{
+ if (current->mm) /* during close after signal, mm can be NULL */
+ down_write(&current->mm->mmap_sem);
+
+ __qib_release_user_pages(p, num_pages, 1);
+
+ if (current->mm) {
+ current->mm->locked_vm -= num_pages;
+ up_write(&current->mm->mmap_sem);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
new file mode 100644
index 000000000000..4c19e06b5e85
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "qib.h"
+#include "qib_user_sdma.h"
+
+/* minimum size of header */
+#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
+/* expected size of headers (for dma_pool) */
+#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
+/* attempt to drain the queue for 5secs */
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+
+struct qib_user_sdma_pkt {
+ u8 naddr; /* dimension of addr (1..3) ... */
+ u32 counter; /* sdma pkts queued counter for this entry */
+ u64 added; /* global descq number of entries */
+
+ struct {
+ u32 offset; /* offset for kvaddr, addr */
+ u32 length; /* length in page */
+ u8 put_page; /* should we put_page? */
+ u8 dma_mapped; /* is page dma_mapped? */
+ struct page *page; /* may be NULL (coherent mem) */
+ void *kvaddr; /* FIXME: only for pio hack */
+ dma_addr_t addr;
+ } addr[4]; /* max pages, any more and we coalesce */
+ struct list_head list; /* list element */
+};
+
+struct qib_user_sdma_queue {
+ /*
+ * pkts sent to dma engine are queued on this
+ * list head. the type of the elements of this
+ * list are struct qib_user_sdma_pkt...
+ */
+ struct list_head sent;
+
+ /* headers with expected length are allocated from here... */
+ char header_cache_name[64];
+ struct dma_pool *header_cache;
+
+ /* packets are allocated from the slab cache... */
+ char pkt_slab_name[64];
+ struct kmem_cache *pkt_slab;
+
+ /* as packets go on the queued queue, they are counted... */
+ u32 counter;
+ u32 sent_counter;
+
+ /* dma page table */
+ struct rb_root dma_pages_root;
+
+ /* protect everything above... */
+ struct mutex lock;
+};
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
+{
+ struct qib_user_sdma_queue *pq =
+ kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
+
+ if (!pq)
+ goto done;
+
+ pq->counter = 0;
+ pq->sent_counter = 0;
+ INIT_LIST_HEAD(&pq->sent);
+
+ mutex_init(&pq->lock);
+
+ snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
+ "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
+ pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
+ sizeof(struct qib_user_sdma_pkt),
+ 0, 0, NULL);
+
+ if (!pq->pkt_slab)
+ goto err_kfree;
+
+ snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
+ "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
+ pq->header_cache = dma_pool_create(pq->header_cache_name,
+ dev,
+ QIB_USER_SDMA_EXP_HEADER_LENGTH,
+ 4, 0);
+ if (!pq->header_cache)
+ goto err_slab;
+
+ pq->dma_pages_root = RB_ROOT;
+
+ goto done;
+
+err_slab:
+ kmem_cache_destroy(pq->pkt_slab);
+err_kfree:
+ kfree(pq);
+ pq = NULL;
+
+done:
+ return pq;
+}
+
+static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
+ int i, size_t offset, size_t len,
+ int put_page, int dma_mapped,
+ struct page *page,
+ void *kvaddr, dma_addr_t dma_addr)
+{
+ pkt->addr[i].offset = offset;
+ pkt->addr[i].length = len;
+ pkt->addr[i].put_page = put_page;
+ pkt->addr[i].dma_mapped = dma_mapped;
+ pkt->addr[i].page = page;
+ pkt->addr[i].kvaddr = kvaddr;
+ pkt->addr[i].addr = dma_addr;
+}
+
+static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
+ u32 counter, size_t offset,
+ size_t len, int dma_mapped,
+ struct page *page,
+ void *kvaddr, dma_addr_t dma_addr)
+{
+ pkt->naddr = 1;
+ pkt->counter = counter;
+ qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
+ kvaddr, dma_addr);
+}
+
+/* we've too many pages in the iovec, coalesce to a single page */
+static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov)
+{
+ int ret = 0;
+ struct page *page = alloc_page(GFP_KERNEL);
+ void *mpage_save;
+ char *mpage;
+ int i;
+ int len = 0;
+ dma_addr_t dma_addr;
+
+ if (!page) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ mpage = kmap(page);
+ mpage_save = mpage;
+ for (i = 0; i < niov; i++) {
+ int cfur;
+
+ cfur = copy_from_user(mpage,
+ iov[i].iov_base, iov[i].iov_len);
+ if (cfur) {
+ ret = -EFAULT;
+ goto free_unmap;
+ }
+
+ mpage += iov[i].iov_len;
+ len += iov[i].iov_len;
+ }
+
+ dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto free_unmap;
+ }
+
+ qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
+ dma_addr);
+ pkt->naddr = 2;
+
+ goto done;
+
+free_unmap:
+ kunmap(page);
+ __free_page(page);
+done:
+ return ret;
+}
+
+/*
+ * How many pages in this iovec element?
+ */
+static int qib_user_sdma_num_pages(const struct iovec *iov)
+{
+ const unsigned long addr = (unsigned long) iov->iov_base;
+ const unsigned long len = iov->iov_len;
+ const unsigned long spage = addr & PAGE_MASK;
+ const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+ return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+/*
+ * Truncate length to page boundry.
+ */
+static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
+{
+ const unsigned long offset = addr & ~PAGE_MASK;
+
+ return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
+}
+
+static void qib_user_sdma_free_pkt_frag(struct device *dev,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ int frag)
+{
+ const int i = frag;
+
+ if (pkt->addr[i].page) {
+ if (pkt->addr[i].dma_mapped)
+ dma_unmap_page(dev,
+ pkt->addr[i].addr,
+ pkt->addr[i].length,
+ DMA_TO_DEVICE);
+
+ if (pkt->addr[i].kvaddr)
+ kunmap(pkt->addr[i].page);
+
+ if (pkt->addr[i].put_page)
+ put_page(pkt->addr[i].page);
+ else
+ __free_page(pkt->addr[i].page);
+ } else if (pkt->addr[i].kvaddr)
+ /* free coherent mem from cache... */
+ dma_pool_free(pq->header_cache,
+ pkt->addr[i].kvaddr, pkt->addr[i].addr);
+}
+
+/* return number of pages pinned... */
+static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ struct qib_user_sdma_pkt *pkt,
+ unsigned long addr, int tlen, int npages)
+{
+ struct page *pages[2];
+ int j;
+ int ret;
+
+ ret = get_user_pages(current, current->mm, addr,
+ npages, 0, 1, pages, NULL);
+
+ if (ret != npages) {
+ int i;
+
+ for (i = 0; i < ret; i++)
+ put_page(pages[i]);
+
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (j = 0; j < npages; j++) {
+ /* map the pages... */
+ const int flen = qib_user_sdma_page_length(addr, tlen);
+ dma_addr_t dma_addr =
+ dma_map_page(&dd->pcidev->dev,
+ pages[j], 0, flen, DMA_TO_DEVICE);
+ unsigned long fofs = addr & ~PAGE_MASK;
+
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
+ pages[j], kmap(pages[j]), dma_addr);
+
+ pkt->naddr++;
+ addr += flen;
+ tlen -= flen;
+ }
+
+done:
+ return ret;
+}
+
+static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov)
+{
+ int ret = 0;
+ unsigned long idx;
+
+ for (idx = 0; idx < niov; idx++) {
+ const int npages = qib_user_sdma_num_pages(iov + idx);
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+
+ ret = qib_user_sdma_pin_pages(dd, pkt, addr,
+ iov[idx].iov_len, npages);
+ if (ret < 0)
+ goto free_pkt;
+ }
+
+ goto done;
+
+free_pkt:
+ for (idx = 0; idx < pkt->naddr; idx++)
+ qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
+
+done:
+ return ret;
+}
+
+static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ const struct iovec *iov,
+ unsigned long niov, int npages)
+{
+ int ret = 0;
+
+ if (npages >= ARRAY_SIZE(pkt->addr))
+ ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
+ else
+ ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
+
+ return ret;
+}
+
+/* free a packet list -- return counter value of last packet */
+static void qib_user_sdma_free_pkt_list(struct device *dev,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *list)
+{
+ struct qib_user_sdma_pkt *pkt, *pkt_next;
+
+ list_for_each_entry_safe(pkt, pkt_next, list, list) {
+ int i;
+
+ for (i = 0; i < pkt->naddr; i++)
+ qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
+
+ kmem_cache_free(pq->pkt_slab, pkt);
+ }
+}
+
+/*
+ * copy headers, coalesce etc -- pq->lock must be held
+ *
+ * we queue all the packets to list, returning the
+ * number of bytes total. list must be empty initially,
+ * as, if there is an error we clean it...
+ */
+static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *list,
+ const struct iovec *iov,
+ unsigned long niov,
+ int maxpkts)
+{
+ unsigned long idx = 0;
+ int ret = 0;
+ int npkts = 0;
+ struct page *page = NULL;
+ __le32 *pbc;
+ dma_addr_t dma_addr;
+ struct qib_user_sdma_pkt *pkt = NULL;
+ size_t len;
+ size_t nw;
+ u32 counter = pq->counter;
+ int dma_mapped = 0;
+
+ while (idx < niov && npkts < maxpkts) {
+ const unsigned long addr = (unsigned long) iov[idx].iov_base;
+ const unsigned long idx_save = idx;
+ unsigned pktnw;
+ unsigned pktnwc;
+ int nfrags = 0;
+ int npages = 0;
+ int cfur;
+
+ dma_mapped = 0;
+ len = iov[idx].iov_len;
+ nw = len >> 2;
+ page = NULL;
+
+ pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto free_list;
+ }
+
+ if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
+ len > PAGE_SIZE || len & 3 || addr & 3) {
+ ret = -EINVAL;
+ goto free_pkt;
+ }
+
+ if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
+ pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
+ &dma_addr);
+ else
+ pbc = NULL;
+
+ if (!pbc) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto free_pkt;
+ }
+ pbc = kmap(page);
+ }
+
+ cfur = copy_from_user(pbc, iov[idx].iov_base, len);
+ if (cfur) {
+ ret = -EFAULT;
+ goto free_pbc;
+ }
+
+ /*
+ * This assignment is a bit strange. it's because the
+ * the pbc counts the number of 32 bit words in the full
+ * packet _except_ the first word of the pbc itself...
+ */
+ pktnwc = nw - 1;
+
+ /*
+ * pktnw computation yields the number of 32 bit words
+ * that the caller has indicated in the PBC. note that
+ * this is one less than the total number of words that
+ * goes to the send DMA engine as the first 32 bit word
+ * of the PBC itself is not counted. Armed with this count,
+ * we can verify that the packet is consistent with the
+ * iovec lengths.
+ */
+ pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
+ if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ idx++;
+ while (pktnwc < pktnw && idx < niov) {
+ const size_t slen = iov[idx].iov_len;
+ const unsigned long faddr =
+ (unsigned long) iov[idx].iov_base;
+
+ if (slen & 3 || faddr & 3 || !slen ||
+ slen > PAGE_SIZE) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ npages++;
+ if ((faddr & PAGE_MASK) !=
+ ((faddr + slen - 1) & PAGE_MASK))
+ npages++;
+
+ pktnwc += slen >> 2;
+ idx++;
+ nfrags++;
+ }
+
+ if (pktnwc != pktnw) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ if (page) {
+ dma_addr = dma_map_page(&dd->pcidev->dev,
+ page, 0, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto free_pbc;
+ }
+
+ dma_mapped = 1;
+ }
+
+ qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
+ page, pbc, dma_addr);
+
+ if (nfrags) {
+ ret = qib_user_sdma_init_payload(dd, pq, pkt,
+ iov + idx_save + 1,
+ nfrags, npages);
+ if (ret < 0)
+ goto free_pbc_dma;
+ }
+
+ counter++;
+ npkts++;
+
+ list_add_tail(&pkt->list, list);
+ }
+
+ ret = idx;
+ goto done;
+
+free_pbc_dma:
+ if (dma_mapped)
+ dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
+free_pbc:
+ if (page) {
+ kunmap(page);
+ __free_page(page);
+ } else
+ dma_pool_free(pq->header_cache, pbc, dma_addr);
+free_pkt:
+ kmem_cache_free(pq->pkt_slab, pkt);
+free_list:
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
+done:
+ return ret;
+}
+
+static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
+ u32 c)
+{
+ pq->sent_counter = c;
+}
+
+/* try to clean out queue -- needs pq->lock */
+static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ struct qib_devdata *dd = ppd->dd;
+ struct list_head free_list;
+ struct qib_user_sdma_pkt *pkt;
+ struct qib_user_sdma_pkt *pkt_prev;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&free_list);
+
+ list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
+ s64 descd = ppd->sdma_descq_removed - pkt->added;
+
+ if (descd < 0)
+ break;
+
+ list_move_tail(&pkt->list, &free_list);
+
+ /* one more packet cleaned */
+ ret++;
+ }
+
+ if (!list_empty(&free_list)) {
+ u32 counter;
+
+ pkt = list_entry(free_list.prev,
+ struct qib_user_sdma_pkt, list);
+ counter = pkt->counter;
+
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+ qib_user_sdma_set_complete_counter(pq, counter);
+ }
+
+ return ret;
+}
+
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
+{
+ if (!pq)
+ return;
+
+ kmem_cache_destroy(pq->pkt_slab);
+ dma_pool_destroy(pq->header_cache);
+ kfree(pq);
+}
+
+/* clean descriptor queue, returns > 0 if some elements cleaned */
+static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ ret = qib_sdma_make_progress(ppd);
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+/* we're in close, drain packets so that we can cleanup successfully... */
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int i;
+
+ if (!pq)
+ return;
+
+ for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
+ mutex_lock(&pq->lock);
+ if (list_empty(&pq->sent)) {
+ mutex_unlock(&pq->lock);
+ break;
+ }
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ mutex_unlock(&pq->lock);
+ msleep(10);
+ }
+
+ if (!list_empty(&pq->sent)) {
+ struct list_head free_list;
+
+ qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
+ INIT_LIST_HEAD(&free_list);
+ mutex_lock(&pq->lock);
+ list_splice_init(&pq->sent, &free_list);
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
+ mutex_unlock(&pq->lock);
+ }
+}
+
+static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
+ u64 addr, u64 dwlen, u64 dwoffset)
+{
+ u8 tmpgen;
+
+ tmpgen = ppd->sdma_generation;
+
+ return cpu_to_le64(/* SDmaPhyAddr[31:0] */
+ ((addr & 0xfffffffcULL) << 32) |
+ /* SDmaGeneration[1:0] */
+ ((tmpgen & 3ULL) << 30) |
+ /* SDmaDwordCount[10:0] */
+ ((dwlen & 0x7ffULL) << 16) |
+ /* SDmaBufOffset[12:2] */
+ (dwoffset & 0x7ffULL));
+}
+
+static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
+{
+ return descq | cpu_to_le64(1ULL << 12);
+}
+
+static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
+{
+ /* last */ /* dma head */
+ return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
+}
+
+static inline __le64 qib_sdma_make_desc1(u64 addr)
+{
+ /* SDmaPhyAddr[47:32] */
+ return cpu_to_le64(addr >> 32);
+}
+
+static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
+ struct qib_user_sdma_pkt *pkt, int idx,
+ unsigned ofs, u16 tail)
+{
+ const u64 addr = (u64) pkt->addr[idx].addr +
+ (u64) pkt->addr[idx].offset;
+ const u64 dwlen = (u64) pkt->addr[idx].length / 4;
+ __le64 *descqp;
+ __le64 descq0;
+
+ descqp = &ppd->sdma_descq[tail].qw[0];
+
+ descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
+ if (idx == 0)
+ descq0 = qib_sdma_make_first_desc0(descq0);
+ if (idx == pkt->naddr - 1)
+ descq0 = qib_sdma_make_last_desc0(descq0);
+
+ descqp[0] = descq0;
+ descqp[1] = qib_sdma_make_desc1(addr);
+}
+
+/* pq->lock must be held, get packets on the wire... */
+static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *pktlist)
+{
+ struct qib_devdata *dd = ppd->dd;
+ int ret = 0;
+ unsigned long flags;
+ u16 tail;
+ u8 generation;
+ u64 descq_added;
+
+ if (list_empty(pktlist))
+ return 0;
+
+ if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
+ return -ECOMM;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ /* keep a copy for restoring purposes in case of problems */
+ generation = ppd->sdma_generation;
+ descq_added = ppd->sdma_descq_added;
+
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ ret = -ECOMM;
+ goto unlock;
+ }
+
+ tail = ppd->sdma_descq_tail;
+ while (!list_empty(pktlist)) {
+ struct qib_user_sdma_pkt *pkt =
+ list_entry(pktlist->next, struct qib_user_sdma_pkt,
+ list);
+ int i;
+ unsigned ofs = 0;
+ u16 dtail = tail;
+
+ if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
+ goto unlock_check_tail;
+
+ for (i = 0; i < pkt->naddr; i++) {
+ qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
+ ofs += pkt->addr[i].length >> 2;
+
+ if (++tail == ppd->sdma_descq_cnt) {
+ tail = 0;
+ ++ppd->sdma_generation;
+ }
+ }
+
+ if ((ofs << 2) > ppd->ibmaxlen) {
+ ret = -EMSGSIZE;
+ goto unlock;
+ }
+
+ /*
+ * If the packet is >= 2KB mtu equivalent, we have to use
+ * the large buffers, and have to mark each descriptor as
+ * part of a large buffer packet.
+ */
+ if (ofs > dd->piosize2kmax_dwords) {
+ for (i = 0; i < pkt->naddr; i++) {
+ ppd->sdma_descq[dtail].qw[0] |=
+ cpu_to_le64(1ULL << 14);
+ if (++dtail == ppd->sdma_descq_cnt)
+ dtail = 0;
+ }
+ }
+
+ ppd->sdma_descq_added += pkt->naddr;
+ pkt->added = ppd->sdma_descq_added;
+ list_move_tail(&pkt->list, &pq->sent);
+ ret++;
+ }
+
+unlock_check_tail:
+ /* advance the tail on the chip if necessary */
+ if (ppd->sdma_descq_tail != tail)
+ dd->f_sdma_update_tail(ppd, tail);
+
+unlock:
+ if (unlikely(ret < 0)) {
+ ppd->sdma_generation = generation;
+ ppd->sdma_descq_added = descq_added;
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
+ return ret;
+}
+
+int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
+ struct qib_user_sdma_queue *pq,
+ const struct iovec *iov,
+ unsigned long dim)
+{
+ struct qib_devdata *dd = rcd->dd;
+ struct qib_pportdata *ppd = rcd->ppd;
+ int ret = 0;
+ struct list_head list;
+ int npkts = 0;
+
+ INIT_LIST_HEAD(&list);
+
+ mutex_lock(&pq->lock);
+
+ /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
+ if (!qib_sdma_running(ppd))
+ goto done_unlock;
+
+ if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ }
+
+ while (dim) {
+ const int mxp = 8;
+
+ down_write(&current->mm->mmap_sem);
+ ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+ up_write(&current->mm->mmap_sem);
+
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+ dim -= ret;
+ iov += ret;
+ }
+
+ /* force packets onto the sdma hw queue... */
+ if (!list_empty(&list)) {
+ /*
+ * Lazily clean hw queue. the 4 is a guess of about
+ * how many sdma descriptors a packet will take (it
+ * doesn't have to be perfect).
+ */
+ if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
+ qib_user_sdma_hwqueue_clean(ppd);
+ qib_user_sdma_queue_clean(ppd, pq);
+ }
+
+ ret = qib_user_sdma_push_pkts(ppd, pq, &list);
+ if (ret < 0)
+ goto done_unlock;
+ else {
+ npkts += ret;
+ pq->counter += ret;
+
+ if (!list_empty(&list))
+ goto done_unlock;
+ }
+ }
+ }
+
+done_unlock:
+ if (!list_empty(&list))
+ qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
+ mutex_unlock(&pq->lock);
+
+ return (ret < 0) ? ret : npkts;
+}
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq)
+{
+ int ret = 0;
+
+ mutex_lock(&pq->lock);
+ qib_user_sdma_hwqueue_clean(ppd);
+ ret = qib_user_sdma_queue_clean(ppd, pq);
+ mutex_unlock(&pq->lock);
+
+ return ret;
+}
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
+{
+ return pq ? pq->sent_counter : 0;
+}
+
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
+{
+ return pq ? pq->counter : 0;
+}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
new file mode 100644
index 000000000000..ce8cbaf6a5c2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/device.h>
+
+struct qib_user_sdma_queue;
+
+struct qib_user_sdma_queue *
+qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
+void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
+
+int qib_user_sdma_writev(struct qib_ctxtdata *pd,
+ struct qib_user_sdma_queue *pq,
+ const struct iovec *iov,
+ unsigned long dim);
+
+int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq);
+
+void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq);
+
+u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
+u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
new file mode 100644
index 000000000000..cda8f4173d23
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -0,0 +1,2248 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
+#include <linux/utsname.h>
+#include <linux/rculist.h>
+#include <linux/mm.h>
+
+#include "qib.h"
+#include "qib_common.h"
+
+static unsigned int ib_qib_qp_table_size = 251;
+module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(qp_table_size, "QP table size");
+
+unsigned int ib_qib_lkey_table_size = 16;
+module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(lkey_table_size,
+ "LKEY table size in bits (2^n, 1 <= n <= 23)");
+
+static unsigned int ib_qib_max_pds = 0xFFFF;
+module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
+MODULE_PARM_DESC(max_pds,
+ "Maximum number of protection domains to support");
+
+static unsigned int ib_qib_max_ahs = 0xFFFF;
+module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
+
+unsigned int ib_qib_max_cqes = 0x2FFFF;
+module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqes,
+ "Maximum number of completion queue entries to support");
+
+unsigned int ib_qib_max_cqs = 0x1FFFF;
+module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
+
+unsigned int ib_qib_max_qp_wrs = 0x3FFF;
+module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+
+unsigned int ib_qib_max_qps = 16384;
+module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
+unsigned int ib_qib_max_sges = 0x60;
+module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
+
+unsigned int ib_qib_max_mcast_grps = 16384;
+module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_grps,
+ "Maximum number of multicast groups to support");
+
+unsigned int ib_qib_max_mcast_qp_attached = 16;
+module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
+ uint, S_IRUGO);
+MODULE_PARM_DESC(max_mcast_qp_attached,
+ "Maximum number of attached QPs to support");
+
+unsigned int ib_qib_max_srqs = 1024;
+module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
+
+unsigned int ib_qib_max_srq_sges = 128;
+module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
+
+unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
+module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
+MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+
+static unsigned int ib_qib_disable_sma;
+module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(disable_sma, "Disable the SMA");
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; qib_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = QIB_POST_RECV_OK,
+ [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
+ QIB_PROCESS_NEXT_SEND_OK,
+ [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
+ QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+ [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
+ QIB_POST_SEND_OK | QIB_FLUSH_SEND,
+};
+
+struct qib_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct qib_ucontext, ibucontext);
+}
+
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_qib_wc_opcode[] = {
+ [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+ [IB_WR_SEND] = IB_WC_SEND,
+ [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+ [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+};
+
+/*
+ * System image GUID.
+ */
+__be64 ib_qib_sys_image_guid;
+
+/**
+ * qib_copy_sge - copy data to SGE memory
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ */
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ memcpy(sge->vaddr, data, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ atomic_dec(&sge->mr->refcount);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
+ * @ss: the SGE state
+ * @length: the number of bytes to skip
+ */
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (release)
+ atomic_dec(&sge->mr->refcount);
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ length -= len;
+ }
+}
+
+/*
+ * Count the number of DMA descriptors needed to send length bytes of data.
+ * Don't modify the qib_sge_state to get the count.
+ * Return zero if any of the segments is not aligned.
+ */
+static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sg_list = ss->sg_list;
+ struct qib_sge sge = ss->sge;
+ u8 num_sge = ss->num_sge;
+ u32 ndesc = 1; /* count the header */
+
+ while (length) {
+ u32 len = sge.length;
+
+ if (len > length)
+ len = length;
+ if (len > sge.sge_length)
+ len = sge.sge_length;
+ BUG_ON(len == 0);
+ if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
+ (len != length && (len & (sizeof(u32) - 1)))) {
+ ndesc = 0;
+ break;
+ }
+ ndesc++;
+ sge.vaddr += len;
+ sge.length -= len;
+ sge.sge_length -= len;
+ if (sge.sge_length == 0) {
+ if (--num_sge)
+ sge = *sg_list++;
+ } else if (sge.length == 0 && sge.mr->lkey) {
+ if (++sge.n >= QIB_SEGSZ) {
+ if (++sge.m >= sge.mr->mapsz)
+ break;
+ sge.n = 0;
+ }
+ sge.vaddr =
+ sge.mr->map[sge.m]->segs[sge.n].vaddr;
+ sge.length =
+ sge.mr->map[sge.m]->segs[sge.n].length;
+ }
+ length -= len;
+ }
+ return ndesc;
+}
+
+/*
+ * Copy from the SGEs to the data buffer.
+ */
+static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ while (length) {
+ u32 len = sge->length;
+
+ if (len > length)
+ len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
+ memcpy(data, sge->vaddr, len);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ data += len;
+ length -= len;
+ }
+}
+
+/**
+ * qib_post_one_send - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
+{
+ struct qib_swqe *wqe;
+ u32 next;
+ int i;
+ int j;
+ int acc;
+ int ret;
+ unsigned long flags;
+ struct qib_lkey_table *rkt;
+ struct qib_pd *pd;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Check that state is OK to post send. */
+ if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
+ goto bail_inval;
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (wr->num_sge > qp->s_max_sge)
+ goto bail_inval;
+
+ /*
+ * Don't allow RDMA reads or atomic operations on UC or
+ * undefined operations.
+ * Make sure buffer is large enough to hold the result for atomics.
+ */
+ if (wr->opcode == IB_WR_FAST_REG_MR) {
+ if (qib_fast_reg_mr(qp, wr))
+ goto bail_inval;
+ } else if (qp->ibqp.qp_type == IB_QPT_UC) {
+ if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
+ goto bail_inval;
+ } else if (qp->ibqp.qp_type != IB_QPT_RC) {
+ /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)
+ goto bail_inval;
+ /* Check UD destination address PD */
+ if (qp->ibqp.pd != wr->wr.ud.ah->pd)
+ goto bail_inval;
+ } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
+ goto bail_inval;
+ else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1)))
+ goto bail_inval;
+ else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
+ goto bail_inval;
+
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+ if (next == qp->s_last) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ rkt = &to_idev(qp->ibqp.device)->lk_table;
+ pd = to_ipd(qp->ibqp.pd);
+ wqe = get_swqe_ptr(qp, qp->s_head);
+ wqe->wr = *wr;
+ wqe->length = 0;
+ j = 0;
+ if (wr->num_sge) {
+ acc = wr->opcode >= IB_WR_RDMA_READ ?
+ IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 length = wr->sg_list[i].length;
+ int ok;
+
+ if (length == 0)
+ continue;
+ ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
+ &wr->sg_list[i], acc);
+ if (!ok)
+ goto bail_inval_free;
+ wqe->length += length;
+ j++;
+ }
+ wqe->wr.num_sge = j;
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_RC) {
+ if (wqe->length > 0x80000000U)
+ goto bail_inval_free;
+ } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
+ qp->port_num - 1)->ibmtu)
+ goto bail_inval_free;
+ else
+ atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
+ wqe->ssn = qp->s_ssn++;
+ qp->s_head = next;
+
+ ret = 0;
+ goto bail;
+
+bail_inval_free:
+ while (j) {
+ struct qib_sge *sge = &wqe->sg_list[--j];
+
+ atomic_dec(&sge->mr->refcount);
+ }
+bail_inval:
+ ret = -EINVAL;
+bail:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+/**
+ * qib_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ int err = 0;
+
+ for (; wr; wr = wr->next) {
+ err = qib_post_one_send(qp, wr);
+ if (err) {
+ *bad_wr = wr;
+ goto bail;
+ }
+ }
+
+ /* Try to do the send work in the caller's context. */
+ qib_do_send(&qp->s_work);
+
+bail:
+ return err;
+}
+
+/**
+ * qib_post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_rwq *wq = qp->r_rq.wq;
+ unsigned long flags;
+ int ret;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct qib_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ next = wq->head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ *bad_wr = wr;
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ }
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_qp_rcv - processing an incoming packet on a QP
+ * @rcd: the context pointer
+ * @hdr: the packet header
+ * @has_grh: true if the packet has a GRH
+ * @data: the packet data
+ * @tlen: the packet length
+ * @qp: the QP the packet came on
+ *
+ * This is called from qib_ib_rcv() to process an incoming packet
+ * for the given QP.
+ * Called at interrupt level.
+ */
+static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+{
+ struct qib_ibport *ibp = &rcd->ppd->ibport_data;
+
+ /* Check for valid receive state. */
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (ib_qib_disable_sma)
+ break;
+ /* FALLTHROUGH */
+ case IB_QPT_UD:
+ qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_RC:
+ qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
+ break;
+
+ case IB_QPT_UC:
+ qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * qib_ib_rcv - process an incoming packet
+ * @rcd: the context pointer
+ * @rhdr: the header of the packet
+ * @data: the packet payload
+ * @tlen: the packet length
+ *
+ * This is called from qib_kreceive() to process an incoming packet at
+ * interrupt level. Tlen is the length of the header + data + CRC in bytes.
+ */
+void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
+{
+ struct qib_pportdata *ppd = rcd->ppd;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+ struct qib_ib_header *hdr = rhdr;
+ struct qib_other_headers *ohdr;
+ struct qib_qp *qp;
+ u32 qp_num;
+ int lnh;
+ u8 opcode;
+ u16 lid;
+
+ /* 24 == LRH+BTH+CRC */
+ if (unlikely(tlen < 24))
+ goto drop;
+
+ /* Check for a valid destination LID (see ch. 7.11.1). */
+ lid = be16_to_cpu(hdr->lrh[1]);
+ if (lid < QIB_MULTICAST_LID_BASE) {
+ lid &= ~((1 << ppd->lmc) - 1);
+ if (unlikely(lid != ppd->lid))
+ goto drop;
+ }
+
+ /* Check for GRH */
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh == QIB_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else if (lnh == QIB_LRH_GRH) {
+ u32 vtf;
+
+ ohdr = &hdr->u.l.oth;
+ if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else
+ goto drop;
+
+ opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ ibp->opstats[opcode & 0x7f].n_bytes += tlen;
+ ibp->opstats[opcode & 0x7f].n_packets++;
+
+ /* Get the destination QP number. */
+ qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ if (qp_num == QIB_MULTICAST_QPN) {
+ struct qib_mcast *mcast;
+ struct qib_mcast_qp *p;
+
+ if (lnh != QIB_LRH_GRH)
+ goto drop;
+ mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
+ if (mcast == NULL)
+ goto drop;
+ ibp->n_multicast_rcv++;
+ list_for_each_entry_rcu(p, &mcast->qp_list, list)
+ qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
+ /*
+ * Notify qib_multicast_detach() if it is waiting for us
+ * to finish.
+ */
+ if (atomic_dec_return(&mcast->refcount) <= 1)
+ wake_up(&mcast->wait);
+ } else {
+ qp = qib_lookup_qpn(ibp, qp_num);
+ if (!qp)
+ goto drop;
+ ibp->n_unicast_rcv++;
+ qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
+ /*
+ * Notify qib_destroy_qp() if it is waiting
+ * for us to finish.
+ */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+ return;
+
+drop:
+ ibp->n_pkt_drops++;
+}
+
+/*
+ * This is called from a timer to check for QPs
+ * which need kernel memory in order to send a packet.
+ */
+static void mem_timer(unsigned long data)
+{
+ struct qib_ibdev *dev = (struct qib_ibdev *) data;
+ struct list_head *list = &dev->memwait;
+ struct qib_qp *qp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ if (!list_empty(list)) {
+ qp = list_entry(list->next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ if (!list_empty(list))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ }
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ if (qp) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_KMEM) {
+ qp->s_flags &= ~QIB_S_WAIT_KMEM;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+static void update_sge(struct qib_sge_state *ss, u32 length)
+{
+ struct qib_sge *sge = &ss->sge;
+
+ sge->vaddr += length;
+ sge->length -= length;
+ sge->sge_length -= length;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ return;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+}
+
+#ifdef __LITTLE_ENDIAN
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#else
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#endif
+
+static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
+ u32 length, unsigned flush_wc)
+{
+ u32 extra = 0;
+ u32 data = 0;
+ u32 last;
+
+ while (1) {
+ u32 len = ss->sge.length;
+ u32 off;
+
+ if (len > length)
+ len = length;
+ if (len > ss->sge.sge_length)
+ len = ss->sge.sge_length;
+ BUG_ON(len == 0);
+ /* If the source address is not aligned, try to align it. */
+ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
+ if (off) {
+ u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
+ ~(sizeof(u32) - 1));
+ u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
+ u32 y;
+
+ y = sizeof(u32) - off;
+ if (len > y)
+ len = y;
+ if (len + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, extra *
+ BITS_PER_BYTE);
+ len = sizeof(u32) - extra;
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, len, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += len;
+ }
+ } else if (extra) {
+ /* Source address is aligned. */
+ u32 *addr = (u32 *) ss->sge.vaddr;
+ int shift = extra * BITS_PER_BYTE;
+ int ushift = 32 - shift;
+ u32 l = len;
+
+ while (l >= sizeof(u32)) {
+ u32 v = *addr;
+
+ data |= set_upper_bits(v, shift);
+ __raw_writel(data, piobuf);
+ data = get_upper_bits(v, ushift);
+ piobuf++;
+ addr++;
+ l -= sizeof(u32);
+ }
+ /*
+ * We still have 'extra' number of bytes leftover.
+ */
+ if (l) {
+ u32 v = *addr;
+
+ if (l + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, shift);
+ len -= l + extra - sizeof(u32);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, l, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += l;
+ }
+ } else if (len == length) {
+ last = data;
+ break;
+ }
+ } else if (len == length) {
+ u32 w;
+
+ /*
+ * Need to round up for the last dword in the
+ * packet.
+ */
+ w = (len + 3) >> 2;
+ qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
+ piobuf += w - 1;
+ last = ((u32 *) ss->sge.vaddr)[w - 1];
+ break;
+ } else {
+ u32 w = len >> 2;
+
+ qib_pio_copy(piobuf, ss->sge.vaddr, w);
+ piobuf += w;
+
+ extra = len & (sizeof(u32) - 1);
+ if (extra) {
+ u32 v = ((u32 *) ss->sge.vaddr)[w];
+
+ /* Clear unused upper bytes */
+ data = clear_upper_bytes(v, extra, 0);
+ }
+ }
+ update_sge(ss, len);
+ length -= len;
+ }
+ /* Update address before sending packet. */
+ update_sge(ss, length);
+ if (flush_wc) {
+ /* must flush early everything before trigger word */
+ qib_flush_wc();
+ __raw_writel(last, piobuf);
+ /* be sure trigger word is written */
+ qib_flush_wc();
+ } else
+ __raw_writel(last, piobuf);
+}
+
+static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp, int *retp)
+{
+ struct qib_verbs_txreq *tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock(&dev->pending_lock);
+
+ if (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ *retp = 0;
+ } else {
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
+ list_empty(&qp->iowait)) {
+ dev->n_txwait++;
+ qp->s_flags |= QIB_S_WAIT_TX;
+ list_add_tail(&qp->iowait, &dev->txwait);
+ }
+ tx = NULL;
+ qp->s_flags &= ~QIB_S_BUSY;
+ *retp = -EBUSY;
+ }
+
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return tx;
+}
+
+void qib_put_txreq(struct qib_verbs_txreq *tx)
+{
+ struct qib_ibdev *dev;
+ struct qib_qp *qp;
+ unsigned long flags;
+
+ qp = tx->qp;
+ dev = to_idev(qp->ibqp.device);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ if (tx->mr) {
+ atomic_dec(&tx->mr->refcount);
+ tx->mr = NULL;
+ }
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
+ tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
+ dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
+ tx->txreq.addr, tx->hdr_dwords << 2,
+ DMA_TO_DEVICE);
+ kfree(tx->align_buf);
+ }
+
+ spin_lock_irqsave(&dev->pending_lock, flags);
+
+ /* Put struct back on free list */
+ list_add(&tx->txreq.list, &dev->txreq_free);
+
+ if (!list_empty(&dev->txwait)) {
+ /* Wake up first QP wanting a free struct */
+ qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_TX) {
+ qp->s_flags &= ~QIB_S_WAIT_TX;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ } else
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+}
+
+/*
+ * This is called when there are send DMA descriptors that might be
+ * available.
+ *
+ * This is called with ppd->sdma_lock held.
+ */
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
+{
+ struct qib_qp *qp, *nqp;
+ struct qib_qp *qps[20];
+ struct qib_ibdev *dev;
+ unsigned i, n;
+
+ n = 0;
+ dev = &ppd->dd->verbs_dev;
+ spin_lock(&dev->pending_lock);
+
+ /* Search wait list for first QP wanting DMA descriptors. */
+ list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
+ if (qp->port_num != ppd->port)
+ continue;
+ if (n == ARRAY_SIZE(qps))
+ break;
+ if (qp->s_tx->txreq.sg_count > avail)
+ break;
+ avail -= qp->s_tx->txreq.sg_count;
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ qps[n++] = qp;
+ }
+
+ spin_unlock(&dev->pending_lock);
+
+ for (i = 0; i < n; i++) {
+ qp = qps[i];
+ spin_lock(&qp->s_lock);
+ if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
+ qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
+ qib_schedule_send(qp);
+ }
+ spin_unlock(&qp->s_lock);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+/*
+ * This is called with ppd->sdma_lock held.
+ */
+static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
+{
+ struct qib_verbs_txreq *tx =
+ container_of(cookie, struct qib_verbs_txreq, txreq);
+ struct qib_qp *qp = tx->qp;
+
+ spin_lock(&qp->s_lock);
+ if (tx->wqe)
+ qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ struct qib_ib_header *hdr;
+
+ if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
+ hdr = &tx->align_buf->hdr;
+ else {
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+
+ hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
+ }
+ qib_rc_send_complete(qp, hdr);
+ }
+ if (atomic_dec_and_test(&qp->s_dma_busy)) {
+ if (qp->state == IB_QPS_RESET)
+ wake_up(&qp->wait_dma);
+ else if (qp->s_flags & QIB_S_WAIT_DMA) {
+ qp->s_flags &= ~QIB_S_WAIT_DMA;
+ qib_schedule_send(qp);
+ }
+ }
+ spin_unlock(&qp->s_lock);
+
+ qib_put_txreq(tx);
+}
+
+static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ if (list_empty(&dev->memwait))
+ mod_timer(&dev->mem_timer, jiffies + 1);
+ qp->s_flags |= QIB_S_WAIT_KMEM;
+ list_add_tail(&qp->iowait, &dev->memwait);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ return ret;
+}
+
+static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len,
+ u32 plen, u32 dwords)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_devdata *dd = dd_from_dev(dev);
+ struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_verbs_txreq *tx;
+ struct qib_pio_header *phdr;
+ u32 control;
+ u32 ndesc;
+ int ret;
+
+ tx = qp->s_tx;
+ if (tx) {
+ qp->s_tx = NULL;
+ /* resend previously constructed packet */
+ ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
+ goto bail;
+ }
+
+ tx = get_txreq(dev, qp, &ret);
+ if (!tx)
+ goto bail;
+
+ control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+ be16_to_cpu(hdr->lrh[0]) >> 12);
+ tx->qp = qp;
+ atomic_inc(&qp->refcount);
+ tx->wqe = qp->s_wqe;
+ tx->mr = qp->s_rdma_mr;
+ if (qp->s_rdma_mr)
+ qp->s_rdma_mr = NULL;
+ tx->txreq.callback = sdma_complete;
+ if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
+ tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
+ else
+ tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
+ if (plen + 1 > dd->piosize2kmax_dwords)
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
+
+ if (len) {
+ /*
+ * Don't try to DMA if it takes more descriptors than
+ * the queue holds.
+ */
+ ndesc = qib_count_sge(ss, len);
+ if (ndesc >= ppd->sdma_descq_cnt)
+ ndesc = 0;
+ } else
+ ndesc = 1;
+ if (ndesc) {
+ phdr = &dev->pio_hdrs[tx->hdr_inx];
+ phdr->pbc[0] = cpu_to_le32(plen);
+ phdr->pbc[1] = cpu_to_le32(control);
+ memcpy(&phdr->hdr, hdr, hdrwords << 2);
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
+ tx->txreq.sg_count = ndesc;
+ tx->txreq.addr = dev->pio_hdrs_phys +
+ tx->hdr_inx * sizeof(struct qib_pio_header);
+ tx->hdr_dwords = hdrwords + 2; /* add PBC length */
+ ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
+ goto bail;
+ }
+
+ /* Allocate a buffer and copy the header and payload to it. */
+ tx->hdr_dwords = plen + 1;
+ phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
+ if (!phdr)
+ goto err_tx;
+ phdr->pbc[0] = cpu_to_le32(plen);
+ phdr->pbc[1] = cpu_to_le32(control);
+ memcpy(&phdr->hdr, hdr, hdrwords << 2);
+ qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
+
+ tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
+ tx->hdr_dwords << 2, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
+ goto map_err;
+ tx->align_buf = phdr;
+ tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
+ tx->txreq.sg_count = 1;
+ ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
+ goto unaligned;
+
+map_err:
+ kfree(phdr);
+err_tx:
+ qib_put_txreq(tx);
+ ret = wait_kmem(dev, qp);
+unaligned:
+ ibp->n_unaligned++;
+bail:
+ return ret;
+}
+
+/*
+ * If we are now in the error state, return zero to flush the
+ * send work request.
+ */
+static int no_bufs_available(struct qib_qp *qp)
+{
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
+ struct qib_devdata *dd;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Note that as soon as want_buffer() is called and
+ * possibly before it returns, qib_ib_piobufavail()
+ * could be called. Therefore, put QP on the I/O wait list before
+ * enabling the PIO avail interrupt.
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->iowait)) {
+ dev->n_piowait++;
+ qp->s_flags |= QIB_S_WAIT_PIO;
+ list_add_tail(&qp->iowait, &dev->piowait);
+ dd = dd_from_dev(dev);
+ dd->f_wantpiobuf_intr(dd, 1);
+ }
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~QIB_S_BUSY;
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return ret;
+}
+
+static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len,
+ u32 plen, u32 dwords)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
+ u32 *hdr = (u32 *) ibhdr;
+ u32 __iomem *piobuf_orig;
+ u32 __iomem *piobuf;
+ u64 pbc;
+ unsigned long flags;
+ unsigned flush_wc;
+ u32 control;
+ u32 pbufn;
+
+ control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
+ be16_to_cpu(ibhdr->lrh[0]) >> 12);
+ pbc = ((u64) control << 32) | plen;
+ piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
+ if (unlikely(piobuf == NULL))
+ return no_bufs_available(qp);
+
+ /*
+ * Write the pbc.
+ * We have to flush after the PBC for correctness on some cpus
+ * or WC buffer can be written out of order.
+ */
+ writeq(pbc, piobuf);
+ piobuf_orig = piobuf;
+ piobuf += 2;
+
+ flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
+ if (len == 0) {
+ /*
+ * If there is just the header portion, must flush before
+ * writing last word of header for correctness, and after
+ * the last header word (trigger word).
+ */
+ if (flush_wc) {
+ qib_flush_wc();
+ qib_pio_copy(piobuf, hdr, hdrwords - 1);
+ qib_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf, hdr, hdrwords);
+ goto done;
+ }
+
+ if (flush_wc)
+ qib_flush_wc();
+ qib_pio_copy(piobuf, hdr, hdrwords);
+ piobuf += hdrwords;
+
+ /* The common case is aligned and contained in one segment. */
+ if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
+ !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
+ u32 *addr = (u32 *) ss->sge.vaddr;
+
+ /* Update address before sending packet. */
+ update_sge(ss, len);
+ if (flush_wc) {
+ qib_pio_copy(piobuf, addr, dwords - 1);
+ /* must flush early everything before trigger word */
+ qib_flush_wc();
+ __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
+ /* be sure trigger word is written */
+ qib_flush_wc();
+ } else
+ qib_pio_copy(piobuf, addr, dwords);
+ goto done;
+ }
+ copy_io(piobuf, ss, len, flush_wc);
+done:
+ if (dd->flags & QIB_USE_SPCL_TRIG) {
+ u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+ qib_flush_wc();
+ __raw_writel(0xaebecede, piobuf_orig + spcl_off);
+ }
+ qib_sendbuf_done(dd, pbufn);
+ if (qp->s_rdma_mr) {
+ atomic_dec(&qp->s_rdma_mr->refcount);
+ qp->s_rdma_mr = NULL;
+ }
+ if (qp->s_wqe) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qib_rc_send_complete(qp, ibhdr);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qib_verbs_send - send a packet
+ * @qp: the QP to send on
+ * @hdr: the packet header
+ * @hdrwords: the number of 32-bit words in the header
+ * @ss: the SGE to send
+ * @len: the length of the packet in bytes
+ *
+ * Return zero if packet is sent or queued OK.
+ * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
+ */
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len)
+{
+ struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ u32 plen;
+ int ret;
+ u32 dwords = (len + 3) >> 2;
+
+ /*
+ * Calculate the send buffer trigger address.
+ * The +1 counts for the pbc control dword following the pbc length.
+ */
+ plen = hdrwords + dwords + 1;
+
+ /*
+ * VL15 packets (IB_QPT_SMI) will always use PIO, so we
+ * can defer SDMA restart until link goes ACTIVE without
+ * worrying about just how we got there.
+ */
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ !(dd->flags & QIB_HAS_SEND_DMA))
+ ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
+ plen, dwords);
+ else
+ ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
+ plen, dwords);
+
+ return ret;
+}
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait)
+{
+ int ret;
+ struct qib_devdata *dd = ppd->dd;
+
+ if (!(dd->flags & QIB_PRESENT)) {
+ /* no hardware, freeze, etc. */
+ ret = -EINVAL;
+ goto bail;
+ }
+ *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
+ *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
+ *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
+ *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
+ *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_get_counters - get various chip counters
+ * @dd: the qlogic_ib device
+ * @cntrs: counters are placed here
+ *
+ * Return the counters needed by recv_pma_get_portcounters().
+ */
+int qib_get_counters(struct qib_pportdata *ppd,
+ struct qib_verbs_counters *cntrs)
+{
+ int ret;
+
+ if (!(ppd->dd->flags & QIB_PRESENT)) {
+ /* no hardware, freeze, etc. */
+ ret = -EINVAL;
+ goto bail;
+ }
+ cntrs->symbol_error_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
+ cntrs->link_error_recovery_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
+ /*
+ * The link downed counter counts when the other side downs the
+ * connection. We add in the number of times we downed the link
+ * due to local link integrity errors to compensate.
+ */
+ cntrs->link_downed_counter =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
+ cntrs->port_rcv_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
+ cntrs->port_rcv_errors +=
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
+ cntrs->port_rcv_errors +=
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
+ cntrs->port_rcv_remphys_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
+ cntrs->port_xmit_discards =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
+ cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_WORDSEND);
+ cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_WORDRCV);
+ cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_PKTSEND);
+ cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
+ QIBPORTCNTR_PKTRCV);
+ cntrs->local_link_integrity_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
+ cntrs->excessive_buffer_overrun_errors =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
+ cntrs->vl15_dropped =
+ ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_ib_piobufavail - callback when a PIO buffer is available
+ * @dd: the device pointer
+ *
+ * This is called from qib_intr() at interrupt level when a PIO buffer is
+ * available after qib_verbs_send() returned an error that no buffers were
+ * available. Disable the interrupt if there are no more QPs waiting.
+ */
+void qib_ib_piobufavail(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct list_head *list;
+ struct qib_qp *qps[5];
+ struct qib_qp *qp;
+ unsigned long flags;
+ unsigned i, n;
+
+ list = &dev->piowait;
+ n = 0;
+
+ /*
+ * Note: checking that the piowait list is empty and clearing
+ * the buffer available interrupt needs to be atomic or we
+ * could end up with QPs on the wait list with the interrupt
+ * disabled.
+ */
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ while (!list_empty(list)) {
+ if (n == ARRAY_SIZE(qps))
+ goto full;
+ qp = list_entry(list->next, struct qib_qp, iowait);
+ list_del_init(&qp->iowait);
+ atomic_inc(&qp->refcount);
+ qps[n++] = qp;
+ }
+ dd->f_wantpiobuf_intr(dd, 0);
+full:
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+
+ for (i = 0; i < n; i++) {
+ qp = qps[i];
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->s_flags & QIB_S_WAIT_PIO) {
+ qp->s_flags &= ~QIB_S_WAIT_PIO;
+ qib_schedule_send(qp);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Notify qib_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+static int qib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibdev *dev = to_idev(ibdev);
+
+ memset(props, 0, sizeof(*props));
+
+ props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ props->page_size_cap = PAGE_SIZE;
+ props->vendor_id =
+ QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
+ props->vendor_part_id = dd->deviceid;
+ props->hw_ver = dd->minrev;
+ props->sys_image_guid = ib_qib_sys_image_guid;
+ props->max_mr_size = ~0ULL;
+ props->max_qp = ib_qib_max_qps;
+ props->max_qp_wr = ib_qib_max_qp_wrs;
+ props->max_sge = ib_qib_max_sges;
+ props->max_cq = ib_qib_max_cqs;
+ props->max_ah = ib_qib_max_ahs;
+ props->max_cqe = ib_qib_max_cqes;
+ props->max_mr = dev->lk_table.max;
+ props->max_fmr = dev->lk_table.max;
+ props->max_map_per_fmr = 32767;
+ props->max_pd = ib_qib_max_pds;
+ props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
+ props->max_qp_init_rd_atom = 255;
+ /* props->max_res_rd_atom */
+ props->max_srq = ib_qib_max_srqs;
+ props->max_srq_wr = ib_qib_max_srq_wrs;
+ props->max_srq_sge = ib_qib_max_srq_sges;
+ /* props->local_ca_ack_delay */
+ props->atomic_cap = IB_ATOMIC_GLOB;
+ props->max_pkeys = qib_get_npkeys(dd);
+ props->max_mcast_grp = ib_qib_max_mcast_grps;
+ props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+
+ return 0;
+}
+
+static int qib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ enum ib_mtu mtu;
+ u16 lid = ppd->lid;
+
+ memset(props, 0, sizeof(*props));
+ props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
+ props->lmc = ppd->lmc;
+ props->sm_lid = ibp->sm_lid;
+ props->sm_sl = ibp->sm_sl;
+ props->state = dd->f_iblink_state(ppd->lastibcstat);
+ props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
+ props->port_cap_flags = ibp->port_cap_flags;
+ props->gid_tbl_len = QIB_GUIDS_PER_PORT;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = qib_get_npkeys(dd);
+ props->bad_pkey_cntr = ibp->pkey_violations;
+ props->qkey_viol_cntr = ibp->qkey_violations;
+ props->active_width = ppd->link_width_active;
+ /* See rate_show() */
+ props->active_speed = ppd->link_speed_active;
+ props->max_vl_num = qib_num_vls(ppd->vls_supported);
+ props->init_type_reply = 0;
+
+ props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
+ switch (ppd->ibmtu) {
+ case 4096:
+ mtu = IB_MTU_4096;
+ break;
+ case 2048:
+ mtu = IB_MTU_2048;
+ break;
+ case 1024:
+ mtu = IB_MTU_1024;
+ break;
+ case 512:
+ mtu = IB_MTU_512;
+ break;
+ case 256:
+ mtu = IB_MTU_256;
+ break;
+ default:
+ mtu = IB_MTU_2048;
+ }
+ props->active_mtu = mtu;
+ props->subnet_timeout = ibp->subnet_timeout;
+
+ return 0;
+}
+
+static int qib_modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ struct qib_devdata *dd = dd_from_ibdev(device);
+ unsigned i;
+ int ret;
+
+ if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC)) {
+ ret = -EOPNOTSUPP;
+ goto bail;
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(device->node_desc, device_modify->node_desc, 64);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+ qib_node_desc_chg(ibp);
+ }
+ }
+
+ if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+ ib_qib_sys_image_guid =
+ cpu_to_be64(device_modify->sys_image_guid);
+ for (i = 0; i < dd->num_pports; i++) {
+ struct qib_ibport *ibp = &dd->pport[i].ibport_data;
+
+ qib_sys_guid_chg(ibp);
+ }
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+static int qib_modify_port(struct ib_device *ibdev, u8 port,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ ibp->port_cap_flags |= props->set_port_cap_mask;
+ ibp->port_cap_flags &= ~props->clr_port_cap_mask;
+ if (props->set_port_cap_mask || props->clr_port_cap_mask)
+ qib_cap_mask_chg(ibp);
+ if (port_modify_mask & IB_PORT_SHUTDOWN)
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ ibp->qkey_violations = 0;
+ return 0;
+}
+
+static int qib_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret = 0;
+
+ if (!port || port > dd->num_pports)
+ ret = -EINVAL;
+ else {
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ gid->global.subnet_prefix = ibp->gid_prefix;
+ if (index == 0)
+ gid->global.interface_id = ppd->guid;
+ else if (index < QIB_GUIDS_PER_PORT)
+ gid->global.interface_id = ibp->guids[index - 1];
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct qib_ibdev *dev = to_idev(ibdev);
+ struct qib_pd *pd;
+ struct ib_pd *ret;
+
+ /*
+ * This is actually totally arbitrary. Some correctness tests
+ * assume there's a maximum number of PDs that can be allocated.
+ * We don't actually have this limit, but we fail the test if
+ * we allow allocations of more than we report for this value.
+ */
+
+ pd = kmalloc(sizeof *pd, GFP_KERNEL);
+ if (!pd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == ib_qib_max_pds) {
+ spin_unlock(&dev->n_pds_lock);
+ kfree(pd);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = udata != NULL;
+
+ ret = &pd->ibpd;
+
+bail:
+ return ret;
+}
+
+static int qib_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct qib_pd *pd = to_ipd(ibpd);
+ struct qib_ibdev *dev = to_idev(ibpd->device);
+
+ spin_lock(&dev->n_pds_lock);
+ dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
+
+ kfree(pd);
+
+ return 0;
+}
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
+{
+ /* A multicast address requires a GRH (see ch. 8.4.1). */
+ if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
+ ah_attr->dlid != QIB_PERMISSIVE_LID &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ goto bail;
+ if ((ah_attr->ah_flags & IB_AH_GRH) &&
+ ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
+ goto bail;
+ if (ah_attr->dlid == 0)
+ goto bail;
+ if (ah_attr->port_num < 1 ||
+ ah_attr->port_num > ibdev->phys_port_cnt)
+ goto bail;
+ if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
+ ib_rate_to_mult(ah_attr->static_rate) < 0)
+ goto bail;
+ if (ah_attr->sl > 15)
+ goto bail;
+ return 0;
+bail:
+ return -EINVAL;
+}
+
+/**
+ * qib_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ */
+static struct ib_ah *qib_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah;
+ struct ib_ah *ret;
+ struct qib_ibdev *dev = to_idev(pd->device);
+ unsigned long flags;
+
+ if (qib_check_ah(pd->device, ah_attr)) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail;
+ }
+
+ ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+ if (!ah) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ if (dev->n_ahs_allocated == ib_qib_max_ahs) {
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+ kfree(ah);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_ahs_allocated++;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ /* ib_create_ah() will initialize ah->ibah. */
+ ah->attr = *ah_attr;
+ atomic_set(&ah->refcount, 0);
+
+ ret = &ah->ibah;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_destroy_ah - destroy an address handle
+ * @ibah: the AH to destroy
+ *
+ * This may be called from interrupt context.
+ */
+static int qib_destroy_ah(struct ib_ah *ibah)
+{
+ struct qib_ibdev *dev = to_idev(ibah->device);
+ struct qib_ah *ah = to_iah(ibah);
+ unsigned long flags;
+
+ if (atomic_read(&ah->refcount) != 0)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ dev->n_ahs_allocated--;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ kfree(ah);
+
+ return 0;
+}
+
+static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah = to_iah(ibah);
+
+ if (qib_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
+
+ ah->attr = *ah_attr;
+
+ return 0;
+}
+
+static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct qib_ah *ah = to_iah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
+
+/**
+ * qib_get_npkeys - return the size of the PKEY table for context 0
+ * @dd: the qlogic_ib device
+ */
+unsigned qib_get_npkeys(struct qib_devdata *dd)
+{
+ return ARRAY_SIZE(dd->rcd[0]->pkeys);
+}
+
+/*
+ * Return the indexed PKEY from the port PKEY table.
+ * No need to validate rcd[ctxt]; the port is setup if we are here.
+ */
+unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
+{
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ unsigned ctxt = ppd->hw_pidx;
+ unsigned ret;
+
+ /* dd->rcd null if mini_init or some init failures */
+ if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
+ ret = 0;
+ else
+ ret = dd->rcd[ctxt]->pkeys[index];
+
+ return ret;
+}
+
+static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct qib_devdata *dd = dd_from_ibdev(ibdev);
+ int ret;
+
+ if (index >= qib_get_npkeys(dd)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ *pkey = qib_get_pkey(to_iport(ibdev, port), index);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * qib_alloc_ucontext - allocate a ucontest
+ * @ibdev: the infiniband device
+ * @udata: not used by the QLogic_IB driver
+ */
+
+static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct qib_ucontext *context;
+ struct ib_ucontext *ret;
+
+ context = kmalloc(sizeof *context, GFP_KERNEL);
+ if (!context) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ ret = &context->ibucontext;
+
+bail:
+ return ret;
+}
+
+static int qib_dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(to_iucontext(context));
+ return 0;
+}
+
+static void init_ibport(struct qib_pportdata *ppd)
+{
+ struct qib_verbs_counters cntrs;
+ struct qib_ibport *ibp = &ppd->ibport_data;
+
+ spin_lock_init(&ibp->lock);
+ /* Set the prefix to the default value (see ch. 4.1.1) */
+ ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
+ ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
+ IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
+ IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
+ IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
+ IB_PORT_OTHER_LOCAL_CHANGES_SUP;
+ if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
+ ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
+ ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+
+ /* Snapshot current HW counters to "clear" them. */
+ qib_get_counters(ppd, &cntrs);
+ ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
+ ibp->z_link_error_recovery_counter =
+ cntrs.link_error_recovery_counter;
+ ibp->z_link_downed_counter = cntrs.link_downed_counter;
+ ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
+ ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
+ ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
+ ibp->z_port_xmit_data = cntrs.port_xmit_data;
+ ibp->z_port_rcv_data = cntrs.port_rcv_data;
+ ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
+ ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
+ ibp->z_local_link_integrity_errors =
+ cntrs.local_link_integrity_errors;
+ ibp->z_excessive_buffer_overrun_errors =
+ cntrs.excessive_buffer_overrun_errors;
+ ibp->z_vl15_dropped = cntrs.vl15_dropped;
+}
+
+/**
+ * qib_register_ib_device - register our device with the infiniband core
+ * @dd: the device data structure
+ * Return the allocated qib_ibdev pointer or NULL on error.
+ */
+int qib_register_ib_device(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+ struct qib_pportdata *ppd = dd->pport;
+ unsigned i, lk_tab_size;
+ int ret;
+
+ dev->qp_table_size = ib_qib_qp_table_size;
+ dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
+ GFP_KERNEL);
+ if (!dev->qp_table) {
+ ret = -ENOMEM;
+ goto err_qpt;
+ }
+
+ for (i = 0; i < dd->num_pports; i++)
+ init_ibport(ppd + i);
+
+ /* Only need to initialize non-zero fields. */
+ spin_lock_init(&dev->qpt_lock);
+ spin_lock_init(&dev->n_pds_lock);
+ spin_lock_init(&dev->n_ahs_lock);
+ spin_lock_init(&dev->n_cqs_lock);
+ spin_lock_init(&dev->n_qps_lock);
+ spin_lock_init(&dev->n_srqs_lock);
+ spin_lock_init(&dev->n_mcast_grps_lock);
+ init_timer(&dev->mem_timer);
+ dev->mem_timer.function = mem_timer;
+ dev->mem_timer.data = (unsigned long) dev;
+
+ qib_init_qpn_table(dd, &dev->qpn_table);
+
+ /*
+ * The top ib_qib_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ spin_lock_init(&dev->lk_table.lock);
+ dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+ lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ dev->lk_table.table = (struct qib_mregion **)
+ __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+ if (dev->lk_table.table == NULL) {
+ ret = -ENOMEM;
+ goto err_lk;
+ }
+ memset(dev->lk_table.table, 0, lk_tab_size);
+ INIT_LIST_HEAD(&dev->pending_mmaps);
+ spin_lock_init(&dev->pending_lock);
+ dev->mmap_offset = PAGE_SIZE;
+ spin_lock_init(&dev->mmap_offset_lock);
+ INIT_LIST_HEAD(&dev->piowait);
+ INIT_LIST_HEAD(&dev->dmawait);
+ INIT_LIST_HEAD(&dev->txwait);
+ INIT_LIST_HEAD(&dev->memwait);
+ INIT_LIST_HEAD(&dev->txreq_free);
+
+ if (ppd->sdma_descq_cnt) {
+ dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ &dev->pio_hdrs_phys,
+ GFP_KERNEL);
+ if (!dev->pio_hdrs) {
+ ret = -ENOMEM;
+ goto err_hdrs;
+ }
+ }
+
+ for (i = 0; i < ppd->sdma_descq_cnt; i++) {
+ struct qib_verbs_txreq *tx;
+
+ tx = kzalloc(sizeof *tx, GFP_KERNEL);
+ if (!tx) {
+ ret = -ENOMEM;
+ goto err_tx;
+ }
+ tx->hdr_inx = i;
+ list_add(&tx->txreq.list, &dev->txreq_free);
+ }
+
+ /*
+ * The system image GUID is supposed to be the same for all
+ * IB HCAs in a single system but since there can be other
+ * device types in the system, we can't be sure this is unique.
+ */
+ if (!ib_qib_sys_image_guid)
+ ib_qib_sys_image_guid = ppd->guid;
+
+ strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
+ ibdev->owner = THIS_MODULE;
+ ibdev->node_guid = ppd->guid;
+ ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
+ ibdev->uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = dd->num_pports;
+ ibdev->num_comp_vectors = 1;
+ ibdev->dma_device = &dd->pcidev->dev;
+ ibdev->query_device = qib_query_device;
+ ibdev->modify_device = qib_modify_device;
+ ibdev->query_port = qib_query_port;
+ ibdev->modify_port = qib_modify_port;
+ ibdev->query_pkey = qib_query_pkey;
+ ibdev->query_gid = qib_query_gid;
+ ibdev->alloc_ucontext = qib_alloc_ucontext;
+ ibdev->dealloc_ucontext = qib_dealloc_ucontext;
+ ibdev->alloc_pd = qib_alloc_pd;
+ ibdev->dealloc_pd = qib_dealloc_pd;
+ ibdev->create_ah = qib_create_ah;
+ ibdev->destroy_ah = qib_destroy_ah;
+ ibdev->modify_ah = qib_modify_ah;
+ ibdev->query_ah = qib_query_ah;
+ ibdev->create_srq = qib_create_srq;
+ ibdev->modify_srq = qib_modify_srq;
+ ibdev->query_srq = qib_query_srq;
+ ibdev->destroy_srq = qib_destroy_srq;
+ ibdev->create_qp = qib_create_qp;
+ ibdev->modify_qp = qib_modify_qp;
+ ibdev->query_qp = qib_query_qp;
+ ibdev->destroy_qp = qib_destroy_qp;
+ ibdev->post_send = qib_post_send;
+ ibdev->post_recv = qib_post_receive;
+ ibdev->post_srq_recv = qib_post_srq_receive;
+ ibdev->create_cq = qib_create_cq;
+ ibdev->destroy_cq = qib_destroy_cq;
+ ibdev->resize_cq = qib_resize_cq;
+ ibdev->poll_cq = qib_poll_cq;
+ ibdev->req_notify_cq = qib_req_notify_cq;
+ ibdev->get_dma_mr = qib_get_dma_mr;
+ ibdev->reg_phys_mr = qib_reg_phys_mr;
+ ibdev->reg_user_mr = qib_reg_user_mr;
+ ibdev->dereg_mr = qib_dereg_mr;
+ ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
+ ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
+ ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
+ ibdev->alloc_fmr = qib_alloc_fmr;
+ ibdev->map_phys_fmr = qib_map_phys_fmr;
+ ibdev->unmap_fmr = qib_unmap_fmr;
+ ibdev->dealloc_fmr = qib_dealloc_fmr;
+ ibdev->attach_mcast = qib_multicast_attach;
+ ibdev->detach_mcast = qib_multicast_detach;
+ ibdev->process_mad = qib_process_mad;
+ ibdev->mmap = qib_mmap;
+ ibdev->dma_ops = &qib_dma_mapping_ops;
+
+ snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
+ QIB_IDSTR " %s", init_utsname()->nodename);
+
+ ret = ib_register_device(ibdev, qib_create_port_files);
+ if (ret)
+ goto err_reg;
+
+ ret = qib_create_agents(dev);
+ if (ret)
+ goto err_agents;
+
+ if (qib_verbs_register_sysfs(dd))
+ goto err_class;
+
+ goto bail;
+
+err_class:
+ qib_free_agents(dev);
+err_agents:
+ ib_unregister_device(ibdev);
+err_reg:
+err_tx:
+ while (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+ struct qib_verbs_txreq *tx;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ kfree(tx);
+ }
+ if (ppd->sdma_descq_cnt)
+ dma_free_coherent(&dd->pcidev->dev,
+ ppd->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ dev->pio_hdrs, dev->pio_hdrs_phys);
+err_hdrs:
+ free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+err_lk:
+ kfree(dev->qp_table);
+err_qpt:
+ qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
+bail:
+ return ret;
+}
+
+void qib_unregister_ib_device(struct qib_devdata *dd)
+{
+ struct qib_ibdev *dev = &dd->verbs_dev;
+ struct ib_device *ibdev = &dev->ibdev;
+ u32 qps_inuse;
+ unsigned lk_tab_size;
+
+ qib_verbs_unregister_sysfs(dd);
+
+ qib_free_agents(dev);
+
+ ib_unregister_device(ibdev);
+
+ if (!list_empty(&dev->piowait))
+ qib_dev_err(dd, "piowait list not empty!\n");
+ if (!list_empty(&dev->dmawait))
+ qib_dev_err(dd, "dmawait list not empty!\n");
+ if (!list_empty(&dev->txwait))
+ qib_dev_err(dd, "txwait list not empty!\n");
+ if (!list_empty(&dev->memwait))
+ qib_dev_err(dd, "memwait list not empty!\n");
+ if (dev->dma_mr)
+ qib_dev_err(dd, "DMA MR not NULL!\n");
+
+ qps_inuse = qib_free_all_qps(dd);
+ if (qps_inuse)
+ qib_dev_err(dd, "QP memory leak! %u still in use\n",
+ qps_inuse);
+
+ del_timer_sync(&dev->mem_timer);
+ qib_free_qpn_table(&dev->qpn_table);
+ while (!list_empty(&dev->txreq_free)) {
+ struct list_head *l = dev->txreq_free.next;
+ struct qib_verbs_txreq *tx;
+
+ list_del(l);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ kfree(tx);
+ }
+ if (dd->pport->sdma_descq_cnt)
+ dma_free_coherent(&dd->pcidev->dev,
+ dd->pport->sdma_descq_cnt *
+ sizeof(struct qib_pio_header),
+ dev->pio_hdrs, dev->pio_hdrs_phys);
+ lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ free_pages((unsigned long) dev->lk_table.table,
+ get_order(lk_tab_size));
+ kfree(dev->qp_table);
+}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
new file mode 100644
index 000000000000..bd57c1273225
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef QIB_VERBS_H
+#define QIB_VERBS_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_user_verbs.h>
+
+struct qib_ctxtdata;
+struct qib_pportdata;
+struct qib_devdata;
+struct qib_verbs_txreq;
+
+#define QIB_MAX_RDMA_ATOMIC 16
+#define QIB_GUIDS_PER_PORT 5
+
+#define QPN_MAX (1 << 24)
+#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define QIB_UVERBS_ABI_VERSION 2
+
+/*
+ * Define an ib_cq_notify value that is not valid so we know when CQ
+ * notifications are armed.
+ */
+#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
+
+#define IB_SEQ_NAK (3 << 29)
+
+/* AETH NAK opcode values */
+#define IB_RNR_NAK 0x20
+#define IB_NAK_PSN_ERROR 0x60
+#define IB_NAK_INVALID_REQUEST 0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST 0x64
+
+/* Flags for checking QP state (see ib_qib_state_ops[]) */
+#define QIB_POST_SEND_OK 0x01
+#define QIB_POST_RECV_OK 0x02
+#define QIB_PROCESS_RECV_OK 0x04
+#define QIB_PROCESS_SEND_OK 0x08
+#define QIB_PROCESS_NEXT_SEND_OK 0x10
+#define QIB_FLUSH_SEND 0x20
+#define QIB_FLUSH_RECV 0x40
+#define QIB_PROCESS_OR_FLUSH_SEND \
+ (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
+
+/* IB Performance Manager status values */
+#define IB_PMA_SAMPLE_STATUS_DONE 0x00
+#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
+#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
+
+/* Mandatory IB performance counter select values. */
+#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
+
+#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
+
+#define IB_BTH_REQ_ACK (1 << 31)
+#define IB_BTH_SOLICITED (1 << 23)
+#define IB_BTH_MIG_REQ (1 << 22)
+
+/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
+#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
+
+#define IB_GRH_VERSION 6
+#define IB_GRH_VERSION_MASK 0xF
+#define IB_GRH_VERSION_SHIFT 28
+#define IB_GRH_TCLASS_MASK 0xFF
+#define IB_GRH_TCLASS_SHIFT 20
+#define IB_GRH_FLOW_MASK 0xFFFFF
+#define IB_GRH_FLOW_SHIFT 0
+#define IB_GRH_NEXT_HDR 0x1B
+
+#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+
+/* Values for set/get portinfo VLCap OperationalVLs */
+#define IB_VL_VL0 1
+#define IB_VL_VL0_1 2
+#define IB_VL_VL0_3 3
+#define IB_VL_VL0_7 4
+#define IB_VL_VL0_14 5
+
+static inline int qib_num_vls(int vls)
+{
+ switch (vls) {
+ default:
+ case IB_VL_VL0:
+ return 1;
+ case IB_VL_VL0_1:
+ return 2;
+ case IB_VL_VL0_3:
+ return 4;
+ case IB_VL_VL0_7:
+ return 8;
+ case IB_VL_VL0_14:
+ return 15;
+ }
+}
+
+struct ib_reth {
+ __be64 vaddr;
+ __be32 rkey;
+ __be32 length;
+} __attribute__ ((packed));
+
+struct ib_atomic_eth {
+ __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
+ __be32 rkey;
+ __be64 swap_data;
+ __be64 compare_data;
+} __attribute__ ((packed));
+
+struct qib_other_headers {
+ __be32 bth[3];
+ union {
+ struct {
+ __be32 deth[2];
+ __be32 imm_data;
+ } ud;
+ struct {
+ struct ib_reth reth;
+ __be32 imm_data;
+ } rc;
+ struct {
+ __be32 aeth;
+ __be32 atomic_ack_eth[2];
+ } at;
+ __be32 imm_data;
+ __be32 aeth;
+ struct ib_atomic_eth atomic_eth;
+ } u;
+} __attribute__ ((packed));
+
+/*
+ * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
+ * long (72 w/ imm_data). Only the first 56 bytes of the IB header
+ * will be in the eager header buffer. The remaining 12 or 16 bytes
+ * are in the data buffer.
+ */
+struct qib_ib_header {
+ __be16 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct qib_other_headers oth;
+ } l;
+ struct qib_other_headers oth;
+ } u;
+} __attribute__ ((packed));
+
+struct qib_pio_header {
+ __le32 pbc[2];
+ struct qib_ib_header hdr;
+} __attribute__ ((packed));
+
+/*
+ * There is one struct qib_mcast for each multicast GID.
+ * All attached QPs are then stored as a list of
+ * struct qib_mcast_qp.
+ */
+struct qib_mcast_qp {
+ struct list_head list;
+ struct qib_qp *qp;
+};
+
+struct qib_mcast {
+ struct rb_node rb_node;
+ union ib_gid mgid;
+ struct list_head qp_list;
+ wait_queue_head_t wait;
+ atomic_t refcount;
+ int n_attached;
+};
+
+/* Protection domain */
+struct qib_pd {
+ struct ib_pd ibpd;
+ int user; /* non-zero if created from user space */
+};
+
+/* Address Handle */
+struct qib_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+ atomic_t refcount;
+};
+
+/*
+ * This structure is used by qib_mmap() to validate an offset
+ * when an mmap() request is made. The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct qib_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ void *obj;
+ __u64 offset;
+ struct kref ref;
+ unsigned size;
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ */
+struct qib_cq_wc {
+ u32 head; /* index of next entry to fill */
+ u32 tail; /* index of next ib_poll_cq() entry */
+ union {
+ /* these are actually size ibcq.cqe + 1 */
+ struct ib_uverbs_wc uqueue[0];
+ struct ib_wc kqueue[0];
+ };
+};
+
+/*
+ * The completion queue structure.
+ */
+struct qib_cq {
+ struct ib_cq ibcq;
+ struct work_struct comptask;
+ spinlock_t lock; /* protect changes in this struct */
+ u8 notify;
+ u8 triggered;
+ struct qib_cq_wc *queue;
+ struct qib_mmap_info *ip;
+};
+
+/*
+ * A segment is a linear region of low physical memory.
+ * XXX Maybe we should use phys addr here and kmap()/kunmap().
+ * Used by the verbs layer.
+ */
+struct qib_seg {
+ void *vaddr;
+ size_t length;
+};
+
+/* The number of qib_segs that fit in a page. */
+#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
+
+struct qib_segarray {
+ struct qib_seg segs[QIB_SEGSZ];
+};
+
+struct qib_mregion {
+ struct ib_pd *pd; /* shares refcnt of ibmr.pd */
+ u64 user_base; /* User's address for this region */
+ u64 iova; /* IB start address of this region */
+ size_t length;
+ u32 lkey;
+ u32 offset; /* offset (bytes) to start of region */
+ int access_flags;
+ u32 max_segs; /* number of qib_segs in all the arrays */
+ u32 mapsz; /* size of the map array */
+ atomic_t refcount;
+ struct qib_segarray *map[0]; /* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct qib_sge {
+ struct qib_mregion *mr;
+ void *vaddr; /* kernel virtual address of segment */
+ u32 sge_length; /* length of the SGE */
+ u32 length; /* remaining length of the segment */
+ u16 m; /* current index: mr->map[m] */
+ u16 n; /* current index: mr->map[m]->segs[n] */
+};
+
+/* Memory region */
+struct qib_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct qib_mregion mr; /* must be last */
+};
+
+/*
+ * Send work request queue entry.
+ * The size of the sg_list is determined when the QP is created and stored
+ * in qp->s_max_sge.
+ */
+struct qib_swqe {
+ struct ib_send_wr wr; /* don't use wr.sg_list */
+ u32 psn; /* first packet sequence number */
+ u32 lpsn; /* last packet sequence number */
+ u32 ssn; /* send sequence number */
+ u32 length; /* total length of data in sg_list */
+ struct qib_sge sg_list[0];
+};
+
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+ */
+struct qib_rwqe {
+ u64 wr_id;
+ u8 num_sge;
+ struct ib_sge sg_list[0];
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct qib_rwq {
+ u32 head; /* new work requests posted to the head */
+ u32 tail; /* receives pull requests from here. */
+ struct qib_rwqe wq[0];
+};
+
+struct qib_rq {
+ struct qib_rwq *wq;
+ spinlock_t lock; /* protect changes in this struct */
+ u32 size; /* size of RWQE array */
+ u8 max_sge;
+};
+
+struct qib_srq {
+ struct ib_srq ibsrq;
+ struct qib_rq rq;
+ struct qib_mmap_info *ip;
+ /* send signal when number of RWQEs < limit */
+ u32 limit;
+};
+
+struct qib_sge_state {
+ struct qib_sge *sg_list; /* next SGE to be used if any */
+ struct qib_sge sge; /* progress state for the current SGE */
+ u32 total_len;
+ u8 num_sge;
+};
+
+/*
+ * This structure holds the information that the send tasklet needs
+ * to send a RDMA read response or atomic operation.
+ */
+struct qib_ack_entry {
+ u8 opcode;
+ u8 sent;
+ u32 psn;
+ u32 lpsn;
+ union {
+ struct qib_sge rdma_sge;
+ u64 atomic_data;
+ };
+};
+
+/*
+ * Variables prefixed with s_ are for the requester (sender).
+ * Variables prefixed with r_ are for the responder (receiver).
+ * Variables prefixed with ack_ are for responder replies.
+ *
+ * Common variables are protected by both r_rq.lock and s_lock in that order
+ * which only happens in modify_qp() or changing the QP 'state'.
+ */
+struct qib_qp {
+ struct ib_qp ibqp;
+ struct qib_qp *next; /* link list for QPN hash table */
+ struct qib_qp *timer_next; /* link list for qib_ib_timer() */
+ struct list_head iowait; /* link for wait PIO buf */
+ struct list_head rspwait; /* link for waititing to respond */
+ struct ib_ah_attr remote_ah_attr;
+ struct ib_ah_attr alt_ah_attr;
+ struct qib_ib_header s_hdr; /* next packet header to send */
+ atomic_t refcount;
+ wait_queue_head_t wait;
+ wait_queue_head_t wait_dma;
+ struct timer_list s_timer;
+ struct work_struct s_work;
+ struct qib_mmap_info *ip;
+ struct qib_sge_state *s_cur_sge;
+ struct qib_verbs_txreq *s_tx;
+ struct qib_mregion *s_rdma_mr;
+ struct qib_sge_state s_sge; /* current send request data */
+ struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
+ struct qib_sge_state s_ack_rdma_sge;
+ struct qib_sge_state s_rdma_read_sge;
+ struct qib_sge_state r_sge; /* current receive data */
+ spinlock_t r_lock; /* used for APM */
+ spinlock_t s_lock;
+ atomic_t s_dma_busy;
+ unsigned processor_id; /* Processor ID QP is bound to */
+ u32 s_flags;
+ u32 s_cur_size; /* size of send packet in bytes */
+ u32 s_len; /* total length of s_sge */
+ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
+ u32 s_next_psn; /* PSN for next request */
+ u32 s_last_psn; /* last response PSN processed */
+ u32 s_sending_psn; /* lowest PSN that is being sent */
+ u32 s_sending_hpsn; /* highest PSN that is being sent */
+ u32 s_psn; /* current packet sequence number */
+ u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
+ u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
+ u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
+ u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
+ u64 r_wr_id; /* ID for current receive WQE */
+ unsigned long r_aflags;
+ u32 r_len; /* total length of r_sge */
+ u32 r_rcv_len; /* receive data len processed */
+ u32 r_psn; /* expected rcv packet sequence number */
+ u32 r_msn; /* message sequence number */
+ u16 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u16 s_rdma_ack_cnt;
+ u8 state; /* QP state */
+ u8 s_state; /* opcode of last packet sent */
+ u8 s_ack_state; /* opcode of packet to ACK */
+ u8 s_nak_state; /* non-zero if NAK is pending */
+ u8 r_state; /* opcode of last packet received */
+ u8 r_nak_state; /* non-zero if NAK is pending */
+ u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
+ u8 r_flags;
+ u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
+ u8 r_head_ack_queue; /* index into s_ack_queue[] */
+ u8 qp_access_flags;
+ u8 s_max_sge; /* size of s_wq->sg_list */
+ u8 s_retry_cnt; /* number of times to retry */
+ u8 s_rnr_retry_cnt;
+ u8 s_retry; /* requester retry counter */
+ u8 s_rnr_retry; /* requester RNR retry counter */
+ u8 s_pkey_index; /* PKEY index to use */
+ u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
+ u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
+ u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
+ u8 s_tail_ack_queue; /* index into s_ack_queue[] */
+ u8 s_srate;
+ u8 s_draining;
+ u8 s_mig_state;
+ u8 timeout; /* Timeout for this QP */
+ u8 alt_timeout; /* Alternate path timeout for this QP */
+ u8 port_num;
+ enum ib_mtu path_mtu;
+ u32 remote_qpn;
+ u32 qkey; /* QKEY for this QP (for UD or RD) */
+ u32 s_size; /* send work queue size */
+ u32 s_head; /* new entries added here */
+ u32 s_tail; /* next entry to process */
+ u32 s_cur; /* current work queue entry */
+ u32 s_acked; /* last un-ACK'ed entry */
+ u32 s_last; /* last completed entry */
+ u32 s_ssn; /* SSN of tail entry */
+ u32 s_lsn; /* limit sequence number (credit) */
+ struct qib_swqe *s_wq; /* send work queue */
+ struct qib_swqe *s_wqe;
+ struct qib_rq r_rq; /* receive work queue */
+ struct qib_sge r_sg_list[0]; /* verified SGEs */
+};
+
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define QIB_R_WRID_VALID 0
+#define QIB_R_REWIND_SGE 1
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define QIB_R_REUSE_SGE 0x01
+#define QIB_R_RDMAR_SEQ 0x02
+#define QIB_R_RSP_NAK 0x04
+#define QIB_R_RSP_SEND 0x08
+#define QIB_R_COMM_EST 0x10
+
+/*
+ * Bit definitions for s_flags.
+ *
+ * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
+ * QIB_S_BUSY - send tasklet is processing the QP
+ * QIB_S_TIMER - the RC retry timer is active
+ * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
+ * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
+ * before processing the next SWQE
+ * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
+ * before processing the next SWQE
+ * QIB_S_WAIT_RNR - waiting for RNR timeout
+ * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA
+ * QIB_S_WAIT_PIO - waiting for a send buffer to be available
+ * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
+ * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
+ * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
+ * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
+ * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
+ */
+#define QIB_S_SIGNAL_REQ_WR 0x0001
+#define QIB_S_BUSY 0x0002
+#define QIB_S_TIMER 0x0004
+#define QIB_S_RESP_PENDING 0x0008
+#define QIB_S_ACK_PENDING 0x0010
+#define QIB_S_WAIT_FENCE 0x0020
+#define QIB_S_WAIT_RDMAR 0x0040
+#define QIB_S_WAIT_RNR 0x0080
+#define QIB_S_WAIT_SSN_CREDIT 0x0100
+#define QIB_S_WAIT_DMA 0x0200
+#define QIB_S_WAIT_PIO 0x0400
+#define QIB_S_WAIT_TX 0x0800
+#define QIB_S_WAIT_DMA_DESC 0x1000
+#define QIB_S_WAIT_KMEM 0x2000
+#define QIB_S_WAIT_PSN 0x4000
+#define QIB_S_WAIT_ACK 0x8000
+#define QIB_S_SEND_ONE 0x10000
+#define QIB_S_UNLIMITED_CREDIT 0x20000
+
+/*
+ * Wait flags that would prevent any packet type from being sent.
+ */
+#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
+ QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
+
+/*
+ * Wait flags that would prevent send work requests from making progress.
+ */
+#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
+ QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
+ QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
+
+#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
+
+#define QIB_PSN_CREDIT 16
+
+/*
+ * Since struct qib_swqe is not a fixed size, we can't simply index into
+ * struct qib_qp.s_wq. This function does the array index computation.
+ */
+static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
+ unsigned n)
+{
+ return (struct qib_swqe *)((char *)qp->s_wq +
+ (sizeof(struct qib_swqe) +
+ qp->s_max_sge *
+ sizeof(struct qib_sge)) * n);
+}
+
+/*
+ * Since struct qib_rwqe is not a fixed size, we can't simply index into
+ * struct qib_rwq.wq. This function does the array index computation.
+ */
+static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
+{
+ return (struct qib_rwqe *)
+ ((char *) rq->wq->wq +
+ (sizeof(struct qib_rwqe) +
+ rq->max_sge * sizeof(struct ib_sge)) * n);
+}
+
+/*
+ * QPN-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way,
+ * large bitmaps are not allocated unless large numbers of QPs are used.
+ */
+struct qpn_map {
+ void *page;
+};
+
+struct qib_qpn_table {
+ spinlock_t lock; /* protect changes in this struct */
+ unsigned flags; /* flags for QP0/1 allocated for each port */
+ u32 last; /* last QP number allocated */
+ u32 nmaps; /* size of the map table */
+ u16 limit;
+ u16 mask;
+ /* bit map of free QP numbers other than 0/1 */
+ struct qpn_map map[QPNMAP_ENTRIES];
+};
+
+struct qib_lkey_table {
+ spinlock_t lock; /* protect changes in this struct */
+ u32 next; /* next unused index (speeds search) */
+ u32 gen; /* generation count */
+ u32 max; /* size of the table */
+ struct qib_mregion **table;
+};
+
+struct qib_opcode_stats {
+ u64 n_packets; /* number of packets */
+ u64 n_bytes; /* total number of bytes */
+};
+
+struct qib_ibport {
+ struct qib_qp *qp0;
+ struct qib_qp *qp1;
+ struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
+ struct qib_ah *sm_ah;
+ struct qib_ah *smi_ah;
+ struct rb_root mcast_tree;
+ spinlock_t lock; /* protect changes in this struct */
+
+ /* non-zero when timer is set */
+ unsigned long mkey_lease_timeout;
+ unsigned long trap_timeout;
+ __be64 gid_prefix; /* in network order */
+ __be64 mkey;
+ __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
+ u64 tid; /* TID for traps */
+ u64 n_unicast_xmit; /* total unicast packets sent */
+ u64 n_unicast_rcv; /* total unicast packets received */
+ u64 n_multicast_xmit; /* total multicast packets sent */
+ u64 n_multicast_rcv; /* total multicast packets received */
+ u64 z_symbol_error_counter; /* starting count for PMA */
+ u64 z_link_error_recovery_counter; /* starting count for PMA */
+ u64 z_link_downed_counter; /* starting count for PMA */
+ u64 z_port_rcv_errors; /* starting count for PMA */
+ u64 z_port_rcv_remphys_errors; /* starting count for PMA */
+ u64 z_port_xmit_discards; /* starting count for PMA */
+ u64 z_port_xmit_data; /* starting count for PMA */
+ u64 z_port_rcv_data; /* starting count for PMA */
+ u64 z_port_xmit_packets; /* starting count for PMA */
+ u64 z_port_rcv_packets; /* starting count for PMA */
+ u32 z_local_link_integrity_errors; /* starting count for PMA */
+ u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
+ u32 z_vl15_dropped; /* starting count for PMA */
+ u32 n_rc_resends;
+ u32 n_rc_acks;
+ u32 n_rc_qacks;
+ u32 n_rc_delayed_comp;
+ u32 n_seq_naks;
+ u32 n_rdma_seq;
+ u32 n_rnr_naks;
+ u32 n_other_naks;
+ u32 n_loop_pkts;
+ u32 n_pkt_drops;
+ u32 n_vl15_dropped;
+ u32 n_rc_timeouts;
+ u32 n_dmawait;
+ u32 n_unaligned;
+ u32 n_rc_dupreq;
+ u32 n_rc_seqnak;
+ u32 port_cap_flags;
+ u32 pma_sample_start;
+ u32 pma_sample_interval;
+ __be16 pma_counter_select[5];
+ u16 pma_tag;
+ u16 pkey_violations;
+ u16 qkey_violations;
+ u16 mkey_violations;
+ u16 mkey_lease_period;
+ u16 sm_lid;
+ u16 repress_traps;
+ u8 sm_sl;
+ u8 mkeyprot;
+ u8 subnet_timeout;
+ u8 vl_high_limit;
+ u8 sl_to_vl[16];
+
+ struct qib_opcode_stats opstats[128];
+};
+
+struct qib_ibdev {
+ struct ib_device ibdev;
+ struct list_head pending_mmaps;
+ spinlock_t mmap_offset_lock; /* protect mmap_offset */
+ u32 mmap_offset;
+ struct qib_mregion *dma_mr;
+
+ /* QP numbers are shared by all IB ports */
+ struct qib_qpn_table qpn_table;
+ struct qib_lkey_table lk_table;
+ struct list_head piowait; /* list for wait PIO buf */
+ struct list_head dmawait; /* list for wait DMA */
+ struct list_head txwait; /* list for wait qib_verbs_txreq */
+ struct list_head memwait; /* list for wait kernel memory */
+ struct list_head txreq_free;
+ struct timer_list mem_timer;
+ struct qib_qp **qp_table;
+ struct qib_pio_header *pio_hdrs;
+ dma_addr_t pio_hdrs_phys;
+ /* list of QPs waiting for RNR timer */
+ spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
+ unsigned qp_table_size; /* size of the hash table */
+ spinlock_t qpt_lock;
+
+ u32 n_piowait;
+ u32 n_txwait;
+
+ u32 n_pds_allocated; /* number of PDs allocated for device */
+ spinlock_t n_pds_lock;
+ u32 n_ahs_allocated; /* number of AHs allocated for device */
+ spinlock_t n_ahs_lock;
+ u32 n_cqs_allocated; /* number of CQs allocated for device */
+ spinlock_t n_cqs_lock;
+ u32 n_qps_allocated; /* number of QPs allocated for device */
+ spinlock_t n_qps_lock;
+ u32 n_srqs_allocated; /* number of SRQs allocated for device */
+ spinlock_t n_srqs_lock;
+ u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+ spinlock_t n_mcast_grps_lock;
+};
+
+struct qib_verbs_counters {
+ u64 symbol_error_counter;
+ u64 link_error_recovery_counter;
+ u64 link_downed_counter;
+ u64 port_rcv_errors;
+ u64 port_rcv_remphys_errors;
+ u64 port_xmit_discards;
+ u64 port_xmit_data;
+ u64 port_rcv_data;
+ u64 port_xmit_packets;
+ u64 port_rcv_packets;
+ u32 local_link_integrity_errors;
+ u32 excessive_buffer_overrun_errors;
+ u32 vl15_dropped;
+};
+
+static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qib_mr, ibmr);
+}
+
+static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qib_pd, ibpd);
+}
+
+static inline struct qib_ah *to_iah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qib_ah, ibah);
+}
+
+static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qib_cq, ibcq);
+}
+
+static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct qib_srq, ibsrq);
+}
+
+static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qib_qp, ibqp);
+}
+
+static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qib_ibdev, ibdev);
+}
+
+/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int qib_send_ok(struct qib_qp *qp)
+{
+ return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
+ !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
+}
+
+extern struct workqueue_struct *qib_wq;
+extern struct workqueue_struct *qib_cq_wq;
+
+/*
+ * This must be called with s_lock held.
+ */
+static inline void qib_schedule_send(struct qib_qp *qp)
+{
+ if (qib_send_ok(qp)) {
+ if (qp->processor_id == smp_processor_id())
+ queue_work(qib_wq, &qp->s_work);
+ else
+ queue_work_on(qp->processor_id,
+ qib_wq, &qp->s_work);
+ }
+}
+
+static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
+{
+ u16 p1 = pkey1 & 0x7FFF;
+ u16 p2 = pkey2 & 0x7FFF;
+
+ /*
+ * Low 15 bits must be non-zero and match, and
+ * one of the two must be a full member.
+ */
+ return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
+}
+
+void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
+void qib_cap_mask_chg(struct qib_ibport *ibp);
+void qib_sys_guid_chg(struct qib_ibport *ibp);
+void qib_node_desc_chg(struct qib_ibport *ibp);
+int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad);
+int qib_create_agents(struct qib_ibdev *dev);
+void qib_free_agents(struct qib_ibdev *dev);
+
+/*
+ * Compare the lower 24 bits of the two values.
+ * Returns an integer <, ==, or > than zero.
+ */
+static inline int qib_cmp24(u32 a, u32 b)
+{
+ return (((int) a) - ((int) b)) << 8;
+}
+
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
+
+int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait);
+
+int qib_get_counters(struct qib_pportdata *ppd,
+ struct qib_verbs_counters *cntrs);
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp);
+
+__be32 qib_compute_aeth(struct qib_qp *qp);
+
+struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
+
+struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int qib_destroy_qp(struct ib_qp *ibqp);
+
+int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+
+int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+
+unsigned qib_free_all_qps(struct qib_devdata *dd);
+
+void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
+
+void qib_free_qpn_table(struct qib_qpn_table *qpt);
+
+void qib_get_credit(struct qib_qp *qp, u32 aeth);
+
+unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
+
+void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
+
+void qib_put_txreq(struct qib_verbs_txreq *tx);
+
+int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct qib_sge_state *ss, u32 len);
+
+void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
+ int release);
+
+void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
+
+void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+
+void qib_rc_rnr_retry(unsigned long arg);
+
+void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
+
+void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
+
+int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
+
+void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+
+int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
+
+int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
+
+int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
+ struct qib_sge *isge, struct ib_sge *sge, int acc);
+
+int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc);
+
+int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
+struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+
+int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
+
+int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+
+int qib_destroy_srq(struct ib_srq *ibsrq);
+
+void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
+
+int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+
+struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int qib_destroy_cq(struct ib_cq *ibcq);
+
+int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+
+int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+
+struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
+
+struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 *iova_start);
+
+struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+
+int qib_dereg_mr(struct ib_mr *ibmr);
+
+struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+
+struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
+ struct ib_device *ibdev, int page_list_len);
+
+void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
+
+int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
+
+struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+
+int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova);
+
+int qib_unmap_fmr(struct list_head *fmr_list);
+
+int qib_dealloc_fmr(struct ib_fmr *ibfmr);
+
+void qib_release_mmap_info(struct kref *ref);
+
+struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+
+void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
+ u32 size, void *obj);
+
+int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
+
+void qib_migrate_qp(struct qib_qp *qp);
+
+int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
+ int has_grh, struct qib_qp *qp, u32 bth0);
+
+u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
+ struct ib_global_route *grh, u32 hwords, u32 nwords);
+
+void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+ u32 bth0, u32 bth2);
+
+void qib_do_send(struct work_struct *work);
+
+void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+ enum ib_wc_status status);
+
+void qib_send_rc_ack(struct qib_qp *qp);
+
+int qib_make_rc_req(struct qib_qp *qp);
+
+int qib_make_uc_req(struct qib_qp *qp);
+
+int qib_make_ud_req(struct qib_qp *qp);
+
+int qib_register_ib_device(struct qib_devdata *);
+
+void qib_unregister_ib_device(struct qib_devdata *);
+
+void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
+
+void qib_ib_piobufavail(struct qib_devdata *);
+
+unsigned qib_get_npkeys(struct qib_devdata *);
+
+unsigned qib_get_pkey(struct qib_ibport *, unsigned);
+
+extern const enum ib_wc_opcode ib_qib_wc_opcode[];
+
+/*
+ * Below HCA-independent IB PhysPortState values, returned
+ * by the f_ibphys_portstate() routine.
+ */
+#define IB_PHYSPORTSTATE_SLEEP 1
+#define IB_PHYSPORTSTATE_POLL 2
+#define IB_PHYSPORTSTATE_DISABLED 3
+#define IB_PHYSPORTSTATE_CFG_TRAIN 4
+#define IB_PHYSPORTSTATE_LINKUP 5
+#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
+#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
+#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
+#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
+#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
+#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
+#define IB_PHYSPORTSTATE_CFG_ENH 0x10
+#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
+
+extern const int ib_qib_state_ops[];
+
+extern __be64 ib_qib_sys_image_guid; /* in network order */
+
+extern unsigned int ib_qib_lkey_table_size;
+
+extern unsigned int ib_qib_max_cqes;
+
+extern unsigned int ib_qib_max_cqs;
+
+extern unsigned int ib_qib_max_qp_wrs;
+
+extern unsigned int ib_qib_max_qps;
+
+extern unsigned int ib_qib_max_sges;
+
+extern unsigned int ib_qib_max_mcast_grps;
+
+extern unsigned int ib_qib_max_mcast_qp_attached;
+
+extern unsigned int ib_qib_max_srqs;
+
+extern unsigned int ib_qib_max_srq_sges;
+
+extern unsigned int ib_qib_max_srq_wrs;
+
+extern const u32 ib_qib_rnr_table[];
+
+extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
+
+#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
new file mode 100644
index 000000000000..dabb697b1c2a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/rculist.h>
+
+#include "qib.h"
+
+/**
+ * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
+ * @qp: the QP to link
+ */
+static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
+{
+ struct qib_mcast_qp *mqp;
+
+ mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+ if (!mqp)
+ goto bail;
+
+ mqp->qp = qp;
+ atomic_inc(&qp->refcount);
+
+bail:
+ return mqp;
+}
+
+static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
+{
+ struct qib_qp *qp = mqp->qp;
+
+ /* Notify qib_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+
+ kfree(mqp);
+}
+
+/**
+ * qib_mcast_alloc - allocate the multicast GID structure
+ * @mgid: the multicast GID
+ *
+ * A list of QPs will be attached to this structure.
+ */
+static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
+{
+ struct qib_mcast *mcast;
+
+ mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+ if (!mcast)
+ goto bail;
+
+ mcast->mgid = *mgid;
+ INIT_LIST_HEAD(&mcast->qp_list);
+ init_waitqueue_head(&mcast->wait);
+ atomic_set(&mcast->refcount, 0);
+ mcast->n_attached = 0;
+
+bail:
+ return mcast;
+}
+
+static void qib_mcast_free(struct qib_mcast *mcast)
+{
+ struct qib_mcast_qp *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
+ qib_mcast_qp_free(p);
+
+ kfree(mcast);
+}
+
+/**
+ * qib_mcast_find - search the global table for the given multicast GID
+ * @ibp: the IB port structure
+ * @mgid: the multicast GID to search for
+ *
+ * Returns NULL if not found.
+ *
+ * The caller is responsible for decrementing the reference count if found.
+ */
+struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
+{
+ struct rb_node *n;
+ unsigned long flags;
+ struct qib_mcast *mcast;
+
+ spin_lock_irqsave(&ibp->lock, flags);
+ n = ibp->mcast_tree.rb_node;
+ while (n) {
+ int ret;
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+
+ ret = memcmp(mgid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else {
+ atomic_inc(&mcast->refcount);
+ spin_unlock_irqrestore(&ibp->lock, flags);
+ goto bail;
+ }
+ }
+ spin_unlock_irqrestore(&ibp->lock, flags);
+
+ mcast = NULL;
+
+bail:
+ return mcast;
+}
+
+/**
+ * qib_mcast_add - insert mcast GID into table and attach QP struct
+ * @mcast: the mcast GID table
+ * @mqp: the QP to attach
+ *
+ * Return zero if both were added. Return EEXIST if the GID was already in
+ * the table but the QP was added. Return ESRCH if the QP was already
+ * attached and neither structure was added.
+ */
+static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
+ struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
+{
+ struct rb_node **n = &ibp->mcast_tree.rb_node;
+ struct rb_node *pn = NULL;
+ int ret;
+
+ spin_lock_irq(&ibp->lock);
+
+ while (*n) {
+ struct qib_mcast *tmcast;
+ struct qib_mcast_qp *p;
+
+ pn = *n;
+ tmcast = rb_entry(pn, struct qib_mcast, rb_node);
+
+ ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0) {
+ n = &pn->rb_left;
+ continue;
+ }
+ if (ret > 0) {
+ n = &pn->rb_right;
+ continue;
+ }
+
+ /* Search the QP list to see if this is already there. */
+ list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
+ if (p->qp == mqp->qp) {
+ ret = ESRCH;
+ goto bail;
+ }
+ }
+ if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ tmcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
+ ret = EEXIST;
+ goto bail;
+ }
+
+ spin_lock(&dev->n_mcast_grps_lock);
+ if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
+ spin_unlock(&dev->n_mcast_grps_lock);
+ ret = ENOMEM;
+ goto bail;
+ }
+
+ dev->n_mcast_grps_allocated++;
+ spin_unlock(&dev->n_mcast_grps_lock);
+
+ mcast->n_attached++;
+
+ list_add_tail_rcu(&mqp->list, &mcast->qp_list);
+
+ atomic_inc(&mcast->refcount);
+ rb_link_node(&mcast->rb_node, pn, n);
+ rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
+
+ ret = 0;
+
+bail:
+ spin_unlock_irq(&ibp->lock);
+
+ return ret;
+}
+
+int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp;
+ struct qib_mcast *mcast;
+ struct qib_mcast_qp *mqp;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /*
+ * Allocate data structures since its better to do this outside of
+ * spin locks and it will most likely be needed.
+ */
+ mcast = qib_mcast_alloc(gid);
+ if (mcast == NULL) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+ mqp = qib_mcast_qp_alloc(qp);
+ if (mqp == NULL) {
+ qib_mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+ }
+ ibp = to_iport(ibqp->device, qp->port_num);
+ switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: OK to attach the same QP twice. */
+ qib_mcast_qp_free(mqp);
+ qib_mcast_free(mcast);
+ break;
+
+ case EEXIST: /* The mcast wasn't used */
+ qib_mcast_free(mcast);
+ break;
+
+ case ENOMEM:
+ /* Exceeded the maximum number of mcast groups. */
+ qib_mcast_qp_free(mqp);
+ qib_mcast_free(mcast);
+ ret = -ENOMEM;
+ goto bail;
+
+ default:
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct qib_qp *qp = to_iqp(ibqp);
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ struct qib_mcast *mcast = NULL;
+ struct qib_mcast_qp *p, *tmp;
+ struct rb_node *n;
+ int last = 0;
+ int ret;
+
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ spin_lock_irq(&ibp->lock);
+
+ /* Find the GID in the mcast table. */
+ n = ibp->mcast_tree.rb_node;
+ while (1) {
+ if (n == NULL) {
+ spin_unlock_irq(&ibp->lock);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+ ret = memcmp(gid->raw, mcast->mgid.raw,
+ sizeof(union ib_gid));
+ if (ret < 0)
+ n = n->rb_left;
+ else if (ret > 0)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ /* Search the QP list. */
+ list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
+ if (p->qp != qp)
+ continue;
+ /*
+ * We found it, so remove it, but don't poison the forward
+ * link until we are sure there are no list walkers.
+ */
+ list_del_rcu(&p->list);
+ mcast->n_attached--;
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+ rb_erase(&mcast->rb_node, &ibp->mcast_tree);
+ last = 1;
+ }
+ break;
+ }
+
+ spin_unlock_irq(&ibp->lock);
+
+ if (p) {
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ qib_mcast_qp_free(p);
+ }
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+ qib_mcast_free(mcast);
+ spin_lock_irq(&dev->n_mcast_grps_lock);
+ dev->n_mcast_grps_allocated--;
+ spin_unlock_irq(&dev->n_mcast_grps_lock);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int qib_mcast_tree_empty(struct qib_ibport *ibp)
+{
+ return ibp->mcast_tree.rb_node == NULL;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
index 74fa5cc5131d..673cf4c22ebd 100644
--- a/drivers/infiniband/hw/ipath/ipath_7220.h
+++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
@@ -1,7 +1,5 @@
-#ifndef _IPATH_7220_H
-#define _IPATH_7220_H
/*
- * Copyright (c) 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,25 +31,32 @@
*/
/*
- * This header file provides the declarations and common definitions
- * for (mostly) manipulation of the SerDes blocks within the IBA7220.
- * the functions declared should only be called from within other
- * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
+ * This file is conditionally built on PowerPC only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
*/
-int ipath_sd7220_presets(struct ipath_devdata *dd);
-int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
-int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
- int len, int offset);
-int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
- int len, int offset);
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB, which is the
- * only one currently used
- */
-#define IB_7220_SERDES 2
-int ipath_sd7220_ib_load(struct ipath_devdata *dd);
-int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
+#include "qib.h"
-#endif /* _IPATH_7220_H */
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * Nothing to do on PowerPC, so just return without error.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+ return 0;
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is unordered
+ *
+ * Because our performance depends on our ability to do write
+ * combining mmio writes in the most efficient way, we need to
+ * know if we are on a processor that may reorder stores when
+ * write combining.
+ */
+int qib_unordered_wc(void)
+{
+ return 1;
+}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
new file mode 100644
index 000000000000..561b8bca4060
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file is conditionally built on x86_64 only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
+ */
+
+#include <linux/pci.h>
+#include <asm/mtrr.h>
+#include <asm/processor.h>
+
+#include "qib.h"
+
+/**
+ * qib_enable_wc - enable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ *
+ * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
+ * write combining.
+ */
+int qib_enable_wc(struct qib_devdata *dd)
+{
+ int ret = 0;
+ u64 pioaddr, piolen;
+ unsigned bits;
+ const unsigned long addr = pci_resource_start(dd->pcidev, 0);
+ const size_t len = pci_resource_len(dd->pcidev, 0);
+
+ /*
+ * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
+ * chip. Linux (possibly the hardware) requires it to be on a power
+ * of 2 address matching the length (which has to be a power of 2).
+ * For rev1, that means the base address, for rev2, it will be just
+ * the PIO buffers themselves.
+ * For chips with two sets of buffers, the calculations are
+ * somewhat more complicated; we need to sum, and the piobufbase
+ * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
+ * The buffers are still packed, so a single range covers both.
+ */
+ if (dd->piobcnt2k && dd->piobcnt4k) {
+ /* 2 sizes for chip */
+ unsigned long pio2kbase, pio4kbase;
+ pio2kbase = dd->piobufbase & 0xffffffffUL;
+ pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
+ if (pio2kbase < pio4kbase) {
+ /* all current chips */
+ pioaddr = addr + pio2kbase;
+ piolen = pio4kbase - pio2kbase +
+ dd->piobcnt4k * dd->align4k;
+ } else {
+ pioaddr = addr + pio4kbase;
+ piolen = pio2kbase - pio4kbase +
+ dd->piobcnt2k * dd->palign;
+ }
+ } else { /* single buffer size (2K, currently) */
+ pioaddr = addr + dd->piobufbase;
+ piolen = dd->piobcnt2k * dd->palign +
+ dd->piobcnt4k * dd->align4k;
+ }
+
+ for (bits = 0; !(piolen & (1ULL << bits)); bits++)
+ /* do nothing */ ;
+
+ if (piolen != (1ULL << bits)) {
+ piolen >>= bits;
+ while (piolen >>= 1)
+ bits++;
+ piolen = 1ULL << (bits + 1);
+ }
+ if (pioaddr & (piolen - 1)) {
+ u64 atmp;
+ atmp = pioaddr & ~(piolen - 1);
+ if (atmp < addr || (atmp + piolen) > (addr + len)) {
+ qib_dev_err(dd, "No way to align address/size "
+ "(%llx/%llx), no WC mtrr\n",
+ (unsigned long long) atmp,
+ (unsigned long long) piolen << 1);
+ ret = -ENODEV;
+ } else {
+ pioaddr = atmp;
+ piolen <<= 1;
+ }
+ }
+
+ if (!ret) {
+ int cookie;
+
+ cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
+ if (cookie < 0) {
+ {
+ qib_devinfo(dd->pcidev,
+ "mtrr_add() WC for PIO bufs "
+ "failed (%d)\n",
+ cookie);
+ ret = -EINVAL;
+ }
+ } else {
+ dd->wc_cookie = cookie;
+ dd->wc_base = (unsigned long) pioaddr;
+ dd->wc_len = (unsigned long) piolen;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * qib_disable_wc - disable write combining for MMIO writes to the device
+ * @dd: qlogic_ib device
+ */
+void qib_disable_wc(struct qib_devdata *dd)
+{
+ if (dd->wc_cookie) {
+ int r;
+
+ r = mtrr_del(dd->wc_cookie, dd->wc_base,
+ dd->wc_len);
+ if (r < 0)
+ qib_devinfo(dd->pcidev,
+ "mtrr_del(%lx, %lx, %lx) failed: %d\n",
+ dd->wc_cookie, dd->wc_base,
+ dd->wc_len, r);
+ dd->wc_cookie = 0; /* even on failure */
+ }
+}
+
+/**
+ * qib_unordered_wc - indicate whether write combining is ordered
+ *
+ * Because our performance depends on our ability to do write combining mmio
+ * writes in the most efficient way, we need to know if we are on an Intel
+ * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
+ * the order completed, and so no special flushing is required to get
+ * correct ordering. Intel processors, however, will flush write buffers
+ * out in "random" orders, and so explicit ordering is needed at times.
+ */
+int qib_unordered_wc(void)
+{
+ return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index d10b4ec68d28..40e858492f90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev)
!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
+static int ipoib_set_tso(struct net_device *dev, u32 data)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (data) {
+ if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ (dev->features & NETIF_F_SG) &&
+ (priv->hca_caps & IB_DEVICE_UD_TSO)) {
+ dev->features |= NETIF_F_TSO;
+ } else {
+ ipoib_warn(priv, "can't set TSO on\n");
+ return -EOPNOTSUPP;
+ }
+ } else
+ dev->features &= ~NETIF_F_TSO;
+
+ return 0;
+}
+
static int ipoib_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
@@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_rx_csum = ipoib_get_rx_csum,
+ .set_tso = ipoib_set_tso,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
.get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b166bb75753d..3871ac663554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -768,11 +768,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
-static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
- const u8 *broadcast)
+static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
{
- if (addrlen != INFINIBAND_ALEN)
- return 0;
/* reserved QPN, prefix, scope */
if (memcmp(addr, broadcast, 6))
return 0;
@@ -787,7 +784,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, restart_task);
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
unsigned long flags;
@@ -812,15 +809,13 @@ void ipoib_mcast_restart_task(struct work_struct *work)
clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
/* Mark all of the entries that are found or don't exist */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
union ib_gid mgid;
- if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
- mclist->dmi_addrlen,
- dev->broadcast))
+ if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
- memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
+ memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 93399dff0c6f..7b2fc98e2f2b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
*/
if (ib_conn) {
ib_conn->iser_conn = NULL;
- iser_conn_put(ib_conn);
+ iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
}
}
@@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
- iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn);
+ iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
+ conn, conn->dd_data, ib_conn);
iser_conn = conn->dd_data;
ib_conn->iser_conn = iser_conn;
iser_conn->ib_conn = ib_conn;
- iser_conn_get(ib_conn);
+ iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */
return 0;
}
@@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
* There is no unbind event so the stop callback
* must release the ref from the bind.
*/
- iser_conn_put(ib_conn);
+ iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */
}
iser_conn->ib_conn = NULL;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 036934cdcb92..f1df01567bb6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -232,6 +232,7 @@ struct iser_device {
struct ib_cq *tx_cq;
struct ib_mr *mr;
struct tasklet_struct cq_tasklet;
+ struct ib_event_handler event_handler;
struct list_head ig_list; /* entry in ig devices list */
int refcount;
};
@@ -246,7 +247,6 @@ struct iser_conn {
struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */
struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
- int disc_evt_flag; /* disconn event delivered */
wait_queue_head_t wait; /* waitq for conn/disconn */
int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
@@ -320,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn);
void iser_conn_get(struct iser_conn *ib_conn);
-void iser_conn_put(struct iser_conn *ib_conn);
+int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed);
void iser_conn_terminate(struct iser_conn *ib_conn);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b89d76b39a13..9876865732f7 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context)
iser_err("got qp event %d\n",cause->event);
}
+static void iser_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ iser_err("async event %d on device %s port %d\n", event->event,
+ event->device->name, event->element.port_num);
+}
+
/**
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
@@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
if (IS_ERR(device->mr))
goto dma_mr_err;
+ INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
+ iser_event_handler);
+ if (ib_register_event_handler(&device->event_handler))
+ goto handler_err;
+
return 0;
+handler_err:
+ ib_dereg_mr(device->mr);
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
@@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
BUG_ON(device->mr == NULL);
tasklet_kill(&device->cq_tasklet);
-
+ (void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr);
(void)ib_destroy_cq(device->tx_cq);
(void)ib_destroy_cq(device->rx_cq);
@@ -149,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
device = ib_conn->device;
ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!ib_conn->login_buf) {
- goto alloc_err;
- ret = -ENOMEM;
- }
+ if (!ib_conn->login_buf)
+ goto out_err;
ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
(void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
@@ -161,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
GFP_KERNEL);
- if (!ib_conn->page_vec) {
- ret = -ENOMEM;
- goto alloc_err;
- }
+ if (!ib_conn->page_vec)
+ goto out_err;
+
ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
params.page_shift = SHIFT_4K;
@@ -184,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
if (IS_ERR(ib_conn->fmr_pool)) {
ret = PTR_ERR(ib_conn->fmr_pool);
- goto fmr_pool_err;
+ ib_conn->fmr_pool = NULL;
+ goto out_err;
}
memset(&init_attr, 0, sizeof init_attr);
@@ -202,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
if (ret)
- goto qp_err;
+ goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
@@ -210,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
ib_conn->fmr_pool, ib_conn->cma_id->qp);
return ret;
-qp_err:
- (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
-fmr_pool_err:
- kfree(ib_conn->page_vec);
- kfree(ib_conn->login_buf);
-alloc_err:
+out_err:
iser_err("unable to alloc mem or create resource, err %d\n", ret);
return ret;
}
@@ -224,7 +231,7 @@ alloc_err:
* releases the FMR pool, QP and CMA ID objects, returns 0 on success,
* -1 on failure
*/
-static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
BUG_ON(ib_conn == NULL);
@@ -239,7 +246,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
if (ib_conn->qp != NULL)
rdma_destroy_qp(ib_conn->cma_id);
- if (ib_conn->cma_id != NULL)
+ /* if cma handler context, the caller acts s.t the cma destroy the id */
+ if (ib_conn->cma_id != NULL && can_destroy_id)
rdma_destroy_id(ib_conn->cma_id);
ib_conn->fmr_pool = NULL;
@@ -317,7 +325,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
/**
* Frees all conn objects and deallocs conn descriptor
*/
-static void iser_conn_release(struct iser_conn *ib_conn)
+static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
{
struct iser_device *device = ib_conn->device;
@@ -327,13 +335,11 @@ static void iser_conn_release(struct iser_conn *ib_conn)
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_rx_descriptors(ib_conn);
- iser_free_ib_conn_res(ib_conn);
+ iser_free_ib_conn_res(ib_conn, can_destroy_id);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
- if (ib_conn->iser_conn)
- ib_conn->iser_conn->ib_conn = NULL;
iscsi_destroy_endpoint(ib_conn->ep);
}
@@ -342,10 +348,13 @@ void iser_conn_get(struct iser_conn *ib_conn)
atomic_inc(&ib_conn->refcount);
}
-void iser_conn_put(struct iser_conn *ib_conn)
+int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
{
- if (atomic_dec_and_test(&ib_conn->refcount))
- iser_conn_release(ib_conn);
+ if (atomic_dec_and_test(&ib_conn->refcount)) {
+ iser_conn_release(ib_conn, can_destroy_id);
+ return 1;
+ }
+ return 0;
}
/**
@@ -369,19 +378,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
wait_event_interruptible(ib_conn->wait,
ib_conn->state == ISER_CONN_DOWN);
- iser_conn_put(ib_conn);
+ iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
}
-static void iser_connect_error(struct rdma_cm_id *cma_id)
+static int iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
ib_conn = (struct iser_conn *)cma_id->context;
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
+ return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
}
-static void iser_addr_handler(struct rdma_cm_id *cma_id)
+static int iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
struct iser_conn *ib_conn;
@@ -390,8 +400,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
device = iser_device_find_by_ib_device(cma_id);
if (!device) {
iser_err("device lookup/creation failed\n");
- iser_connect_error(cma_id);
- return;
+ return iser_connect_error(cma_id);
}
ib_conn = (struct iser_conn *)cma_id->context;
@@ -400,11 +409,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
ret = rdma_resolve_route(cma_id, 1000);
if (ret) {
iser_err("resolve route failed: %d\n", ret);
- iser_connect_error(cma_id);
+ return iser_connect_error(cma_id);
}
+
+ return 0;
}
-static void iser_route_handler(struct rdma_cm_id *cma_id)
+static int iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
int ret;
@@ -425,9 +436,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
}
- return;
+ return 0;
failure:
- iser_connect_error(cma_id);
+ return iser_connect_error(cma_id);
}
static void iser_connected_handler(struct rdma_cm_id *cma_id)
@@ -439,12 +450,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
wake_up_interruptible(&ib_conn->wait);
}
-static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
+static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
struct iser_conn *ib_conn;
+ int ret;
ib_conn = (struct iser_conn *)cma_id->context;
- ib_conn->disc_evt_flag = 1;
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
@@ -459,20 +470,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
}
+
+ ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
+ return ret;
}
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
int ret = 0;
- iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id);
+ iser_err("event %d status %d conn %p id %p\n",
+ event->event, event->status, cma_id->context, cma_id);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
- iser_addr_handler(cma_id);
+ ret = iser_addr_handler(cma_id);
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- iser_route_handler(cma_id);
+ ret = iser_route_handler(cma_id);
break;
case RDMA_CM_EVENT_ESTABLISHED:
iser_connected_handler(cma_id);
@@ -482,13 +497,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
- iser_err("event: %d, error: %d\n", event->event, event->status);
- iser_connect_error(cma_id);
+ ret = iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
- iser_disconnected_handler(cma_id);
+ ret = iser_disconnected_handler(cma_id);
break;
default:
iser_err("Unexpected RDMA CM event (%d)\n", event->event);
@@ -503,7 +517,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
init_waitqueue_head(&ib_conn->wait);
ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
- atomic_set(&ib_conn->refcount, 1);
+ atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
}
@@ -531,6 +545,7 @@ int iser_connect(struct iser_conn *ib_conn,
ib_conn->state = ISER_CONN_PENDING;
+ iser_conn_get(ib_conn); /* ref ib conn's cma id */
ib_conn->cma_id = rdma_create_id(iser_cma_handler,
(void *)ib_conn,
RDMA_PS_TCP);
@@ -568,7 +583,7 @@ id_failure:
addr_failure:
ib_conn->state = ISER_CONN_DOWN;
connect_failure:
- iser_conn_release(ib_conn);
+ iser_conn_release(ib_conn, 1);
return err;
}
@@ -737,12 +752,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
- /* complete the termination process if disconnect event was delivered *
- * note there are no more non completed posts to the QP */
- if (ib_conn->disc_evt_flag) {
- ib_conn->state = ISER_CONN_DOWN;
- wake_up_interruptible(&ib_conn->wait);
- }
+ /* no more non completed posts to the QP, complete the
+ * termination process w.o worrying on disconnect event */
+ ib_conn->state = ISER_CONN_DOWN;
+ wake_up_interruptible(&ib_conn->wait);
}
}
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 7e18bcf05a66..46239e47a260 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -59,11 +59,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6031ab..34157bb97ed6 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@ struct joydev {
struct mutex mutex;
struct device dev;
- struct js_corr corr[ABS_MAX + 1];
+ struct js_corr corr[ABS_CNT];
struct JS_DATA_SAVE_TYPE glue;
int nabs;
int nkey;
__u16 keymap[KEY_MAX - BTN_MISC + 1];
__u16 keypam[KEY_MAX - BTN_MISC + 1];
- __u8 absmap[ABS_MAX + 1];
- __u8 abspam[ABS_MAX + 1];
- __s16 abs[ABS_MAX + 1];
+ __u8 absmap[ABS_CNT];
+ __u8 abspam[ABS_CNT];
+ __s16 abs[ABS_CNT];
};
struct joydev_client {
@@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
joydev->handle.handler = handler;
joydev->handle.private = joydev;
- for (i = 0; i < ABS_MAX + 1; i++)
+ for (i = 0; i < ABS_CNT; i++)
if (test_bit(i, dev->absbit)) {
joydev->absmap[i] = joydev->nabs;
joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 1c0b529c06aa..4afe0a3b4884 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -146,11 +146,11 @@ static unsigned int get_time_pit(void)
unsigned long flags;
unsigned int count;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
outb_p(0x00, 0x43);
count = inb_p(0x40);
count |= inb_p(0x40) << 8;
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return count;
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b1edd778639c..405febd94f24 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -54,6 +54,9 @@ static signed short btn_avb_wheel[] =
static signed short abs_joystick[] =
{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_HAT0X, ABS_HAT0Y, -1 };
+static signed short abs_joystick_rudder[] =
+{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y, -1 };
+
static signed short abs_avb_pegasus[] =
{ ABS_X, ABS_Y, ABS_THROTTLE, ABS_RUDDER, ABS_HAT0X, ABS_HAT0Y,
ABS_HAT1X, ABS_HAT1Y, -1 };
@@ -76,8 +79,9 @@ static struct iforce_device iforce_device[] = {
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
{ 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
+ { 0x06f8, 0x0001, "Guillemot Jet Leader Force Feedback", btn_joystick, abs_joystick_rudder, ff_iforce },
{ 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
- { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
+ { 0x06f8, 0xa302, "Guillemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
{ 0x06d6, 0x29bc, "Trust Force Feedback Race Master", btn_wheel, abs_wheel, ff_iforce },
{ 0x0000, 0x0000, "Unknown I-Force Device [%04x:%04x]", btn_joystick, abs_joystick, ff_iforce }
};
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index b41303d3ec54..6c96631ae5d9 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -212,6 +212,7 @@ static struct usb_device_id iforce_usb_ids [] = {
{ USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
{ USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */
{ USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
+ { USB_DEVICE(0x06f8, 0x0003) }, /* Guillemot Jet Leader Force Feedback */
{ USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
{ USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
{ } /* Terminating entry */
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 9b3353b404da..c1087ce4cef9 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -533,8 +533,8 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
return 0;
- xpad->odata = usb_buffer_alloc(xpad->udev, XPAD_PKT_LEN,
- GFP_KERNEL, &xpad->odata_dma);
+ xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
+ GFP_KERNEL, &xpad->odata_dma);
if (!xpad->odata)
goto fail1;
@@ -554,7 +554,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
return 0;
- fail2: usb_buffer_free(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
+ fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
fail1: return error;
}
@@ -568,7 +568,7 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
{
if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX) {
usb_free_urb(xpad->irq_out);
- usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
+ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->odata, xpad->odata_dma);
}
}
@@ -788,8 +788,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
if (!xpad || !input_dev)
goto fail1;
- xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN,
- GFP_KERNEL, &xpad->idata_dma);
+ xpad->idata = usb_alloc_coherent(udev, XPAD_PKT_LEN,
+ GFP_KERNEL, &xpad->idata_dma);
if (!xpad->idata)
goto fail1;
@@ -942,7 +942,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
fail5: usb_kill_urb(xpad->irq_in);
fail4: usb_free_urb(xpad->irq_in);
fail3: xpad_deinit_output(xpad);
- fail2: usb_buffer_free(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
+ fail2: usb_free_coherent(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
fail1: input_free_device(input_dev);
kfree(xpad);
return error;
@@ -964,7 +964,7 @@ static void xpad_disconnect(struct usb_interface *intf)
usb_kill_urb(xpad->irq_in);
}
usb_free_urb(xpad->irq_in);
- usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
+ usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
xpad->idata, xpad->idata_dma);
kfree(xpad);
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 64c102355f53..d8fa5d724c57 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -73,7 +73,7 @@ config KEYBOARD_ATKBD
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if X86 && !X86_MRST
select SERIO_GSCPS2 if GSC
help
Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
@@ -143,19 +143,6 @@ config KEYBOARD_BFIN
To compile this driver as a module, choose M here: the
module will be called bf54x-keys.
-config KEYBOARD_CORGI
- tristate "Corgi keyboard (deprecated)"
- depends on PXA_SHARPSL
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C7xx
- series of PDAs.
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called corgikbd.
-
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
@@ -192,6 +179,22 @@ config KEYBOARD_GPIO
To compile this driver as a module, choose M here: the
module will be called gpio_keys.
+config KEYBOARD_TCA6416
+ tristate "TCA6416 Keypad Support"
+ depends on I2C
+ help
+ This driver implements basic keypad functionality
+ for keys connected through TCA6416 IO expander
+
+ Say Y here if your device has keys connected to
+ TCA6416 IO expander. Your board-specific setup logic
+ must also provide pin-mask details(of which TCA6416 pins
+ are used for keypad).
+
+ If enabled the complete TCA6416 device will be managed through
+ this driver.
+
+
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
@@ -339,19 +342,6 @@ config KEYBOARD_PXA930_ROTARY
To compile this driver as a module, choose M here: the
module will be called pxa930_rotary.
-config KEYBOARD_SPITZ
- tristate "Spitz keyboard (deprecated)"
- depends on PXA_SHARPSL
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
- SL-C3000 and Sl-C3100 series of PDAs.
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called spitzkbd.
-
config KEYBOARD_STOWAWAY
tristate "Stowaway keyboard"
select SERIO
@@ -414,28 +404,6 @@ config KEYBOARD_TWL4030
To compile this driver as a module, choose M here: the
module will be called twl4030_keypad.
-config KEYBOARD_TOSA
- tristate "Tosa keyboard (deprecated)"
- depends on MACH_TOSA
- help
- Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
-
- This driver is now deprecated, use generic GPIO based matrix
- keyboard driver instead.
-
- To compile this driver as a module, choose M here: the
- module will be called tosakbd.
-
-config KEYBOARD_TOSA_USE_EXT_KEYCODES
- bool "Tosa keyboard: use extended keycodes"
- depends on KEYBOARD_TOSA
- help
- Say Y here to enable the tosa keyboard driver to generate extended
- (>= 127) keycodes. Be aware, that they can't be correctly interpreted
- by either console keyboard driver or by Kdrive keybd driver.
-
- Say Y only if you know, what you are doing!
-
config KEYBOARD_XTKBD
tristate "XT keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 706c6b5ed5f4..4596d0c6f922 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,10 +11,10 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
-obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
+obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
@@ -33,10 +33,8 @@ obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
-obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
-obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 35149ec455a9..79172af164f2 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/keyboard.h>
+#include <linux/platform_device.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
@@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = {
[7] = KERN_WARNING "amikbd: keyboard interrupt\n"
};
-static struct input_dev *amikbd_dev;
-
-static irqreturn_t amikbd_interrupt(int irq, void *dummy)
+static irqreturn_t amikbd_interrupt(int irq, void *data)
{
+ struct input_dev *dev = data;
unsigned char scancode, down;
scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */
@@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy)
if (scancode < 0x78) { /* scancodes < 0x78 are keys */
if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(amikbd_dev, scancode, 1);
- input_report_key(amikbd_dev, scancode, 0);
+ input_report_key(dev, scancode, 1);
+ input_report_key(dev, scancode, 0);
} else {
- input_report_key(amikbd_dev, scancode, down);
+ input_report_key(dev, scancode, down);
}
- input_sync(amikbd_dev);
+ input_sync(dev);
} else /* scancodes >= 0x78 are error codes */
printk(amikbd_messages[scancode - 0x78]);
return IRQ_HANDLED;
}
-static int __init amikbd_init(void)
+static int __init amikbd_probe(struct platform_device *pdev)
{
+ struct input_dev *dev;
int i, j, err;
- if (!AMIGAHW_PRESENT(AMI_KEYBOARD))
- return -ENODEV;
-
- if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb"))
- return -EBUSY;
-
- amikbd_dev = input_allocate_device();
- if (!amikbd_dev) {
- printk(KERN_ERR "amikbd: not enough memory for input device\n");
- err = -ENOMEM;
- goto fail1;
+ dev = input_allocate_device();
+ if (!dev) {
+ dev_err(&pdev->dev, "Not enough memory for input device\n");
+ return -ENOMEM;
}
- amikbd_dev->name = "Amiga Keyboard";
- amikbd_dev->phys = "amikbd/input0";
- amikbd_dev->id.bustype = BUS_AMIGA;
- amikbd_dev->id.vendor = 0x0001;
- amikbd_dev->id.product = 0x0001;
- amikbd_dev->id.version = 0x0100;
+ dev->name = pdev->name;
+ dev->phys = "amikbd/input0";
+ dev->id.bustype = BUS_AMIGA;
+ dev->id.vendor = 0x0001;
+ dev->id.product = 0x0001;
+ dev->id.version = 0x0100;
+ dev->dev.parent = &pdev->dev;
- amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
for (i = 0; i < 0x78; i++)
- set_bit(i, amikbd_dev->keybit);
+ set_bit(i, dev->keybit);
for (i = 0; i < MAX_NR_KEYMAPS; i++) {
static u_short temp_map[NR_KEYS] __initdata;
@@ -229,30 +224,54 @@ static int __init amikbd_init(void)
memcpy(key_maps[i], temp_map, sizeof(temp_map));
}
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
- if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
- amikbd_interrupt)) {
- err = -EBUSY;
+ err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
+ dev);
+ if (err)
goto fail2;
- }
- err = input_register_device(amikbd_dev);
+ err = input_register_device(dev);
if (err)
goto fail3;
+ platform_set_drvdata(pdev, dev);
+
return 0;
- fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt);
- fail2: input_free_device(amikbd_dev);
- fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
+ fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
+ fail2: input_free_device(dev);
return err;
}
-static void __exit amikbd_exit(void)
+static int __exit amikbd_remove(struct platform_device *pdev)
+{
+ struct input_dev *dev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ free_irq(IRQ_AMIGA_CIAA_SP, dev);
+ input_unregister_device(dev);
+ return 0;
+}
+
+static struct platform_driver amikbd_driver = {
+ .remove = __exit_p(amikbd_remove),
+ .driver = {
+ .name = "amiga-keyboard",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amikbd_init(void)
{
- free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt);
- input_unregister_device(amikbd_dev);
- release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
+ return platform_driver_probe(&amikbd_driver, amikbd_probe);
}
module_init(amikbd_init);
+
+static void __exit amikbd_exit(void)
+{
+ platform_driver_unregister(&amikbd_driver);
+}
+
module_exit(amikbd_exit);
+
+MODULE_ALIAS("platform:amiga-keyboard");
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
deleted file mode 100644
index 634af6a8e6b3..000000000000
--- a/drivers/input/keyboard/corgikbd.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Keyboard driver for Sharp Corgi models (SL-C7xx)
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * Based on xtkbd.c/locomkbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/corgi.h>
-#include <mach/pxa2xx-gpio.h>
-#include <asm/hardware/scoop.h>
-
-#define KB_ROWS 8
-#define KB_COLS 12
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r,c) ( ((r)<<4) + (c) + 1 )
-/* zero code, 124 scancodes */
-#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 )
-
-#define SCAN_INTERVAL (50) /* ms */
-#define HINGE_SCAN_INTERVAL (250) /* ms */
-
-#define CORGI_KEY_CALENDER KEY_F1
-#define CORGI_KEY_ADDRESS KEY_F2
-#define CORGI_KEY_FN KEY_F3
-#define CORGI_KEY_CANCEL KEY_F4
-#define CORGI_KEY_OFF KEY_SUSPEND
-#define CORGI_KEY_EXOK KEY_F5
-#define CORGI_KEY_EXCANCEL KEY_F6
-#define CORGI_KEY_EXJOGDOWN KEY_F7
-#define CORGI_KEY_EXJOGUP KEY_F8
-#define CORGI_KEY_JAP1 KEY_LEFTCTRL
-#define CORGI_KEY_JAP2 KEY_LEFTALT
-#define CORGI_KEY_MAIL KEY_F10
-#define CORGI_KEY_OK KEY_F11
-#define CORGI_KEY_MENU KEY_F12
-
-static unsigned char corgikbd_keycode[NR_SCANCODES] = {
- 0, /* 0 */
- 0, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, 0, 0, 0, 0, 0, 0, 0, /* 1-16 */
- 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, 0, 0, 0, 0, 0, 0, 0, /* 17-32 */
- KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
- CORGI_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
- CORGI_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, 0, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, /* 65-80 */
- CORGI_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, 0, CORGI_KEY_FN, 0, 0, 0, 0, /* 81-96 */
- KEY_SYSRQ, CORGI_KEY_JAP1, CORGI_KEY_JAP2, CORGI_KEY_CANCEL, CORGI_KEY_OK, CORGI_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0, /* 97-112 */
- CORGI_KEY_OFF, CORGI_KEY_EXOK, CORGI_KEY_EXCANCEL, CORGI_KEY_EXJOGDOWN, CORGI_KEY_EXJOGUP, 0, 0, 0, 0, 0, 0, 0, /* 113-124 */
-};
-
-
-struct corgikbd {
- unsigned char keycode[ARRAY_SIZE(corgikbd_keycode)];
- struct input_dev *input;
-
- spinlock_t lock;
- struct timer_list timer;
- struct timer_list htimer;
-
- unsigned int suspended;
- unsigned long suspend_jiffies;
-};
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 12 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-static inline void corgikbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR2 = CORGI_GPIO_ALL_STROBE_BIT;
- GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT;
-}
-
-static inline void corgikbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR2 = CORGI_GPIO_ALL_STROBE_BIT;
- GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* Clear any interrupts we may have triggered when altering the GPIO lines */
- GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT;
- GEDR2 = CORGI_GPIO_LOW_SENSE_BIT;
-}
-
-static inline void corgikbd_activate_col(int col)
-{
- /* STROBE col -> High, not col -> HiZ */
- GPSR2 = CORGI_GPIO_STROBE_BIT(col);
- GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
-}
-
-static inline void corgikbd_reset_col(int col)
-{
- /* STROBE col -> Low */
- GPCR2 = CORGI_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
-}
-
-#define GET_ROWS_STATUS(c) (((GPLR1 & CORGI_GPIO_HIGH_SENSE_BIT) >> CORGI_GPIO_HIGH_SENSE_RSHIFT) | ((GPLR2 & CORGI_GPIO_LOW_SENSE_BIT) << CORGI_GPIO_LOW_SENSE_LSHIFT))
-
-/*
- * The corgi keyboard only generates interrupts when a key is pressed.
- * When a key is pressed, we enable a timer which then scans the
- * keyboard to detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data)
-{
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed;
-
- if (corgikbd_data->suspended)
- return;
-
- spin_lock_irqsave(&corgikbd_data->lock, flags);
-
- num_pressed = 0;
- for (col = 0; col < KB_COLS; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
-
- corgikbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- corgikbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = GET_ROWS_STATUS(col);
- for (row = 0; row < KB_ROWS; row++) {
- unsigned int scancode, pressed;
-
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- input_report_key(corgikbd_data->input, corgikbd_data->keycode[scancode], pressed);
-
- if (pressed)
- num_pressed++;
-
- if (pressed && (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
- && time_after(jiffies, corgikbd_data->suspend_jiffies + HZ)) {
- input_event(corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
- corgikbd_data->suspend_jiffies=jiffies;
- }
- }
- corgikbd_reset_col(col);
- }
-
- corgikbd_activate_all();
-
- input_sync(corgikbd_data->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&corgikbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
-
- spin_unlock_irqrestore(&corgikbd_data->lock, flags);
-}
-
-/*
- * corgi keyboard interrupt handler.
- */
-static irqreturn_t corgikbd_interrupt(int irq, void *dev_id)
-{
- struct corgikbd *corgikbd_data = dev_id;
-
- if (!timer_pending(&corgikbd_data->timer)) {
- /** wait chattering delay **/
- udelay(20);
- corgikbd_scankeyboard(corgikbd_data);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * corgi timer checking for released keys
- */
-static void corgikbd_timer_callback(unsigned long data)
-{
- struct corgikbd *corgikbd_data = (struct corgikbd *) data;
- corgikbd_scankeyboard(corgikbd_data);
-}
-
-/*
- * The hinge switches generate no interrupt so they need to be
- * monitored by a timer.
- *
- * We debounce the switches and pass them to the input system.
- *
- * gprr == 0x00 - Keyboard with Landscape Screen
- * 0x08 - No Keyboard with Portrait Screen
- * 0x0c - Keyboard and Screen Closed
- */
-
-#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
-#define HINGE_STABLE_COUNT 2
-static int sharpsl_hinge_state;
-static int hinge_count;
-
-static void corgikbd_hinge_timer(unsigned long data)
-{
- struct corgikbd *corgikbd_data = (struct corgikbd *) data;
- unsigned long gprr;
- unsigned long flags;
-
- gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
- gprr |= (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0);
- if (gprr != sharpsl_hinge_state) {
- hinge_count = 0;
- sharpsl_hinge_state = gprr;
- } else if (hinge_count < HINGE_STABLE_COUNT) {
- hinge_count++;
- if (hinge_count >= HINGE_STABLE_COUNT) {
- spin_lock_irqsave(&corgikbd_data->lock, flags);
-
- input_report_switch(corgikbd_data->input, SW_LID, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
- input_report_switch(corgikbd_data->input, SW_TABLET_MODE, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
- input_report_switch(corgikbd_data->input, SW_HEADPHONE_INSERT, (READ_GPIO_BIT(CORGI_GPIO_AK_INT) != 0));
- input_sync(corgikbd_data->input);
-
- spin_unlock_irqrestore(&corgikbd_data->lock, flags);
- }
- }
- mod_timer(&corgikbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-}
-
-#ifdef CONFIG_PM
-static int corgikbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(dev);
-
- corgikbd->suspended = 1;
- /* strobe 0 is the power key so this can't be made an input for
- powersaving therefore i = 1 */
- for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_IN);
-
- return 0;
-}
-
-static int corgikbd_resume(struct platform_device *dev)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(dev);
-
- for (i = 1; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Upon resume, ignore the suspend key for a short while */
- corgikbd->suspend_jiffies=jiffies;
- corgikbd->suspended = 0;
-
- return 0;
-}
-#else
-#define corgikbd_suspend NULL
-#define corgikbd_resume NULL
-#endif
-
-static int __devinit corgikbd_probe(struct platform_device *pdev)
-{
- struct corgikbd *corgikbd;
- struct input_dev *input_dev;
- int i, err = -ENOMEM;
-
- corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!corgikbd || !input_dev)
- goto fail;
-
- platform_set_drvdata(pdev, corgikbd);
-
- corgikbd->input = input_dev;
- spin_lock_init(&corgikbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&corgikbd->timer);
- corgikbd->timer.function = corgikbd_timer_callback;
- corgikbd->timer.data = (unsigned long) corgikbd;
-
- /* Init Hinge Timer */
- init_timer(&corgikbd->htimer);
- corgikbd->htimer.function = corgikbd_hinge_timer;
- corgikbd->htimer.data = (unsigned long) corgikbd;
-
- corgikbd->suspend_jiffies=jiffies;
-
- memcpy(corgikbd->keycode, corgikbd_keycode, sizeof(corgikbd->keycode));
-
- input_dev->name = "Corgi Keyboard";
- input_dev->phys = "corgikbd/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
- input_dev->keycode = corgikbd->keycode;
- input_dev->keycodesize = sizeof(unsigned char);
- input_dev->keycodemax = ARRAY_SIZE(corgikbd_keycode);
-
- for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++)
- set_bit(corgikbd->keycode[i], input_dev->keybit);
- clear_bit(0, input_dev->keybit);
- set_bit(SW_LID, input_dev->swbit);
- set_bit(SW_TABLET_MODE, input_dev->swbit);
- set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
-
- err = input_register_device(corgikbd->input);
- if (err)
- goto fail;
-
- mod_timer(&corgikbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
- pxa_gpio_mode(CORGI_GPIO_KEY_SENSE(i) | GPIO_IN);
- if (request_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "corgikbd", corgikbd))
- printk(KERN_WARNING "corgikbd: Can't get IRQ: %d!\n", i);
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < CORGI_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(CORGI_GPIO_KEY_STROBE(i) | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Setup the headphone jack as an input */
- pxa_gpio_mode(CORGI_GPIO_AK_INT | GPIO_IN);
-
- return 0;
-
- fail: input_free_device(input_dev);
- kfree(corgikbd);
- return err;
-}
-
-static int __devexit corgikbd_remove(struct platform_device *pdev)
-{
- int i;
- struct corgikbd *corgikbd = platform_get_drvdata(pdev);
-
- for (i = 0; i < CORGI_KEY_SENSE_NUM; i++)
- free_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd);
-
- del_timer_sync(&corgikbd->htimer);
- del_timer_sync(&corgikbd->timer);
-
- input_unregister_device(corgikbd->input);
-
- kfree(corgikbd);
-
- return 0;
-}
-
-static struct platform_driver corgikbd_driver = {
- .probe = corgikbd_probe,
- .remove = __devexit_p(corgikbd_remove),
- .suspend = corgikbd_suspend,
- .resume = corgikbd_resume,
- .driver = {
- .name = "corgi-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init corgikbd_init(void)
-{
- return platform_driver_register(&corgikbd_driver);
-}
-
-static void __exit corgikbd_exit(void)
-{
- platform_driver_unregister(&corgikbd_driver);
-}
-
-module_init(corgikbd_init);
-module_exit(corgikbd_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:corgi-keyboard");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 60ac4684f875..bc696931fed7 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -670,8 +670,6 @@ static int __devinit lm8323_probe(struct i2c_client *client,
goto fail1;
}
- i2c_set_clientdata(client, lm);
-
lm->client = client;
lm->idev = idev;
mutex_init(&lm->lock);
@@ -753,6 +751,8 @@ static int __devinit lm8323_probe(struct i2c_client *client,
goto fail4;
}
+ i2c_set_clientdata(client, lm);
+
device_init_wakeup(&client->dev, 1);
enable_irq_wake(client->irq);
@@ -778,6 +778,8 @@ static int __devexit lm8323_remove(struct i2c_client *client)
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
+ i2c_set_clientdata(client, NULL);
+
disable_irq_wake(client->irq);
free_irq(client->irq, lm);
cancel_work_sync(&lm->work);
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
deleted file mode 100644
index 13967422658c..000000000000
--- a/drivers/input/keyboard/spitzkbd.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Keyboard driver for Sharp Spitz, Borzoi and Akita (SL-Cxx00 series)
- *
- * Copyright (c) 2005 Richard Purdie
- *
- * Based on corgikbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/spitz.h>
-#include <mach/pxa2xx-gpio.h>
-
-#define KB_ROWS 7
-#define KB_COLS 11
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
-#define NR_SCANCODES ((KB_ROWS<<4) + 1)
-
-#define SCAN_INTERVAL (50) /* ms */
-#define HINGE_SCAN_INTERVAL (150) /* ms */
-
-#define SPITZ_KEY_CALENDER KEY_F1
-#define SPITZ_KEY_ADDRESS KEY_F2
-#define SPITZ_KEY_FN KEY_F3
-#define SPITZ_KEY_CANCEL KEY_F4
-#define SPITZ_KEY_EXOK KEY_F5
-#define SPITZ_KEY_EXCANCEL KEY_F6
-#define SPITZ_KEY_EXJOGDOWN KEY_F7
-#define SPITZ_KEY_EXJOGUP KEY_F8
-#define SPITZ_KEY_JAP1 KEY_LEFTALT
-#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
-#define SPITZ_KEY_SYNC KEY_F9
-#define SPITZ_KEY_MAIL KEY_F10
-#define SPITZ_KEY_OK KEY_F11
-#define SPITZ_KEY_MENU KEY_F12
-
-static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
- 0, /* 0 */
- KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
- 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
- KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
- SPITZ_KEY_ADDRESS, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
- SPITZ_KEY_CALENDER, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
- SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
- KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
-};
-
-static int spitz_strobes[] = {
- SPITZ_GPIO_KEY_STROBE0,
- SPITZ_GPIO_KEY_STROBE1,
- SPITZ_GPIO_KEY_STROBE2,
- SPITZ_GPIO_KEY_STROBE3,
- SPITZ_GPIO_KEY_STROBE4,
- SPITZ_GPIO_KEY_STROBE5,
- SPITZ_GPIO_KEY_STROBE6,
- SPITZ_GPIO_KEY_STROBE7,
- SPITZ_GPIO_KEY_STROBE8,
- SPITZ_GPIO_KEY_STROBE9,
- SPITZ_GPIO_KEY_STROBE10,
-};
-
-static int spitz_senses[] = {
- SPITZ_GPIO_KEY_SENSE0,
- SPITZ_GPIO_KEY_SENSE1,
- SPITZ_GPIO_KEY_SENSE2,
- SPITZ_GPIO_KEY_SENSE3,
- SPITZ_GPIO_KEY_SENSE4,
- SPITZ_GPIO_KEY_SENSE5,
- SPITZ_GPIO_KEY_SENSE6,
-};
-
-struct spitzkbd {
- unsigned char keycode[ARRAY_SIZE(spitzkbd_keycode)];
- struct input_dev *input;
- char phys[32];
-
- spinlock_t lock;
- struct timer_list timer;
- struct timer_list htimer;
-
- unsigned int suspended;
- unsigned long suspend_jiffies;
-};
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 11 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-static inline void spitzkbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR0 = SPITZ_GPIO_G0_STROBE_BIT;
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPCR1 = SPITZ_GPIO_G1_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPCR2 = SPITZ_GPIO_G2_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPCR3 = SPITZ_GPIO_G3_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
-}
-
-static inline void spitzkbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR0 = SPITZ_GPIO_G0_STROBE_BIT;
- GPDR0 |= SPITZ_GPIO_G0_STROBE_BIT;
- GPSR1 = SPITZ_GPIO_G1_STROBE_BIT;
- GPDR1 |= SPITZ_GPIO_G1_STROBE_BIT;
- GPSR2 = SPITZ_GPIO_G2_STROBE_BIT;
- GPDR2 |= SPITZ_GPIO_G2_STROBE_BIT;
- GPSR3 = SPITZ_GPIO_G3_STROBE_BIT;
- GPDR3 |= SPITZ_GPIO_G3_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* Clear any interrupts we may have triggered when altering the GPIO lines */
- GEDR0 = SPITZ_GPIO_G0_SENSE_BIT;
- GEDR1 = SPITZ_GPIO_G1_SENSE_BIT;
- GEDR2 = SPITZ_GPIO_G2_SENSE_BIT;
- GEDR3 = SPITZ_GPIO_G3_SENSE_BIT;
-}
-
-static inline void spitzkbd_activate_col(int col)
-{
- int gpio = spitz_strobes[col];
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
- GPSR(gpio) = GPIO_bit(gpio);
- GPDR(gpio) |= GPIO_bit(gpio);
-}
-
-static inline void spitzkbd_reset_col(int col)
-{
- int gpio = spitz_strobes[col];
- GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
- GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
- GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
- GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
- GPCR(gpio) = GPIO_bit(gpio);
- GPDR(gpio) |= GPIO_bit(gpio);
-}
-
-static inline int spitzkbd_get_row_status(int col)
-{
- return ((GPLR0 >> 12) & 0x01) | ((GPLR0 >> 16) & 0x02)
- | ((GPLR2 >> 25) & 0x04) | ((GPLR1 << 1) & 0x08)
- | ((GPLR1 >> 0) & 0x10) | ((GPLR1 >> 1) & 0x60);
-}
-
-/*
- * The spitz keyboard only generates interrupts when a key is pressed.
- * When a key is pressed, we enable a timer which then scans the
- * keyboard to detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data)
-{
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed, pwrkey = ((GPLR(SPITZ_GPIO_ON_KEY) & GPIO_bit(SPITZ_GPIO_ON_KEY)) != 0);
-
- if (spitzkbd_data->suspended)
- return;
-
- spin_lock_irqsave(&spitzkbd_data->lock, flags);
-
- num_pressed = 0;
- for (col = 0; col < KB_COLS; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
-
- spitzkbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- spitzkbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = spitzkbd_get_row_status(col);
- for (row = 0; row < KB_ROWS; row++) {
- unsigned int scancode, pressed;
-
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- input_report_key(spitzkbd_data->input, spitzkbd_data->keycode[scancode], pressed);
-
- if (pressed)
- num_pressed++;
- }
- spitzkbd_reset_col(col);
- }
-
- spitzkbd_activate_all();
-
- input_report_key(spitzkbd_data->input, SPITZ_KEY_SYNC, (GPLR(SPITZ_GPIO_SYNC) & GPIO_bit(SPITZ_GPIO_SYNC)) != 0 );
- input_report_key(spitzkbd_data->input, KEY_SUSPEND, pwrkey);
-
- if (pwrkey && time_after(jiffies, spitzkbd_data->suspend_jiffies + msecs_to_jiffies(1000))) {
- input_event(spitzkbd_data->input, EV_PWR, KEY_SUSPEND, 1);
- spitzkbd_data->suspend_jiffies = jiffies;
- }
-
- input_sync(spitzkbd_data->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(SCAN_INTERVAL));
-
- spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
-}
-
-/*
- * spitz keyboard interrupt handler.
- */
-static irqreturn_t spitzkbd_interrupt(int irq, void *dev_id)
-{
- struct spitzkbd *spitzkbd_data = dev_id;
-
- if (!timer_pending(&spitzkbd_data->timer)) {
- /** wait chattering delay **/
- udelay(20);
- spitzkbd_scankeyboard(spitzkbd_data);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * spitz timer checking for released keys
- */
-static void spitzkbd_timer_callback(unsigned long data)
-{
- struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
-
- spitzkbd_scankeyboard(spitzkbd_data);
-}
-
-/*
- * The hinge switches generate an interrupt.
- * We debounce the switches and pass them to the input system.
- */
-
-static irqreturn_t spitzkbd_hinge_isr(int irq, void *dev_id)
-{
- struct spitzkbd *spitzkbd_data = dev_id;
-
- if (!timer_pending(&spitzkbd_data->htimer))
- mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- return IRQ_HANDLED;
-}
-
-#define HINGE_STABLE_COUNT 2
-static int sharpsl_hinge_state;
-static int hinge_count;
-
-static void spitzkbd_hinge_timer(unsigned long data)
-{
- struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
- unsigned long state;
- unsigned long flags;
-
- state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
- state |= (GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT));
- if (state != sharpsl_hinge_state) {
- hinge_count = 0;
- sharpsl_hinge_state = state;
- } else if (hinge_count < HINGE_STABLE_COUNT) {
- hinge_count++;
- }
-
- if (hinge_count >= HINGE_STABLE_COUNT) {
- spin_lock_irqsave(&spitzkbd_data->lock, flags);
-
- input_report_switch(spitzkbd_data->input, SW_LID, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
- input_report_switch(spitzkbd_data->input, SW_TABLET_MODE, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
- input_report_switch(spitzkbd_data->input, SW_HEADPHONE_INSERT, ((GPLR(SPITZ_GPIO_AK_INT) & GPIO_bit(SPITZ_GPIO_AK_INT)) != 0));
- input_sync(spitzkbd_data->input);
-
- spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
- } else {
- mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
- }
-}
-
-#ifdef CONFIG_PM
-static int spitzkbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
- spitzkbd->suspended = 1;
-
- /* Set Strobe lines as inputs - *except* strobe line 0 leave this
- enabled so we can detect a power button press for resume */
- for (i = 1; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_IN);
-
- return 0;
-}
-
-static int spitzkbd_resume(struct platform_device *dev)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
-
- for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
-
- /* Upon resume, ignore the suspend key for a short while */
- spitzkbd->suspend_jiffies = jiffies;
- spitzkbd->suspended = 0;
-
- return 0;
-}
-#else
-#define spitzkbd_suspend NULL
-#define spitzkbd_resume NULL
-#endif
-
-static int __devinit spitzkbd_probe(struct platform_device *dev)
-{
- struct spitzkbd *spitzkbd;
- struct input_dev *input_dev;
- int i, err = -ENOMEM;
-
- spitzkbd = kzalloc(sizeof(struct spitzkbd), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!spitzkbd || !input_dev)
- goto fail;
-
- platform_set_drvdata(dev, spitzkbd);
- strcpy(spitzkbd->phys, "spitzkbd/input0");
-
- spin_lock_init(&spitzkbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&spitzkbd->timer);
- spitzkbd->timer.function = spitzkbd_timer_callback;
- spitzkbd->timer.data = (unsigned long) spitzkbd;
-
- /* Init Hinge Timer */
- init_timer(&spitzkbd->htimer);
- spitzkbd->htimer.function = spitzkbd_hinge_timer;
- spitzkbd->htimer.data = (unsigned long) spitzkbd;
-
- spitzkbd->suspend_jiffies = jiffies;
-
- spitzkbd->input = input_dev;
-
- input_dev->name = "Spitz Keyboard";
- input_dev->phys = spitzkbd->phys;
- input_dev->dev.parent = &dev->dev;
-
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_PWR) | BIT_MASK(EV_SW);
- input_dev->keycode = spitzkbd->keycode;
- input_dev->keycodesize = sizeof(unsigned char);
- input_dev->keycodemax = ARRAY_SIZE(spitzkbd_keycode);
-
- memcpy(spitzkbd->keycode, spitzkbd_keycode, sizeof(spitzkbd->keycode));
- for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++)
- set_bit(spitzkbd->keycode[i], input_dev->keybit);
- clear_bit(0, input_dev->keybit);
- set_bit(KEY_SUSPEND, input_dev->keybit);
- set_bit(SW_LID, input_dev->swbit);
- set_bit(SW_TABLET_MODE, input_dev->swbit);
- set_bit(SW_HEADPHONE_INSERT, input_dev->swbit);
-
- err = input_register_device(input_dev);
- if (err)
- goto fail;
-
- mod_timer(&spitzkbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
- pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
- if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
- IRQF_DISABLED|IRQF_TRIGGER_RISING,
- "Spitzkbd Sense", spitzkbd))
- printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
- pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
-
- pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_ON_KEY | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
- pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
-
- request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd Sync", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd PwrOn", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd SWA", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd SWB", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd_hinge_isr,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Spitzkbd HP", spitzkbd);
-
- return 0;
-
- fail: input_free_device(input_dev);
- kfree(spitzkbd);
- return err;
-}
-
-static int __devexit spitzkbd_remove(struct platform_device *dev)
-{
- int i;
- struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
-
- for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++)
- free_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd);
-
- free_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
- free_irq(SPITZ_IRQ_GPIO_AK_INT, spitzkbd);
-
- del_timer_sync(&spitzkbd->htimer);
- del_timer_sync(&spitzkbd->timer);
-
- input_unregister_device(spitzkbd->input);
-
- kfree(spitzkbd);
-
- return 0;
-}
-
-static struct platform_driver spitzkbd_driver = {
- .probe = spitzkbd_probe,
- .remove = __devexit_p(spitzkbd_remove),
- .suspend = spitzkbd_suspend,
- .resume = spitzkbd_resume,
- .driver = {
- .name = "spitz-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init spitzkbd_init(void)
-{
- return platform_driver_register(&spitzkbd_driver);
-}
-
-static void __exit spitzkbd_exit(void)
-{
- platform_driver_unregister(&spitzkbd_driver);
-}
-
-module_init(spitzkbd_init);
-module_exit(spitzkbd_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Spitz Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:spitz-keyboard");
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
new file mode 100644
index 000000000000..493c93f25e2a
--- /dev/null
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -0,0 +1,349 @@
+/*
+ * Driver for keys on TCA6416 I2C IO expander
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Author : Sriramakrishnan.A.G. <srk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/tca6416_keypad.h>
+
+#define TCA6416_INPUT 0
+#define TCA6416_OUTPUT 1
+#define TCA6416_INVERT 2
+#define TCA6416_DIRECTION 3
+
+static const struct i2c_device_id tca6416_id[] = {
+ { "tca6416-keys", 16, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca6416_id);
+
+struct tca6416_drv_data {
+ struct input_dev *input;
+ struct tca6416_button data[0];
+};
+
+struct tca6416_keypad_chip {
+ uint16_t reg_output;
+ uint16_t reg_direction;
+ uint16_t reg_input;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct delayed_work dwork;
+ u16 pinmask;
+ int irqnum;
+ bool use_polling;
+ struct tca6416_button buttons[0];
+};
+
+static int tca6416_write_reg(struct tca6416_keypad_chip *chip, int reg, u16 val)
+{
+ int error;
+
+ error = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+ if (error < 0) {
+ dev_err(&chip->client->dev,
+ "%s failed, reg: %d, val: %d, error: %d\n",
+ __func__, reg, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int tca6416_read_reg(struct tca6416_keypad_chip *chip, int reg, u16 *val)
+{
+ int retval;
+
+ retval = i2c_smbus_read_word_data(chip->client, reg << 1);
+ if (retval < 0) {
+ dev_err(&chip->client->dev, "%s failed, reg: %d, error: %d\n",
+ __func__, reg, retval);
+ return retval;
+ }
+
+ *val = (u16)retval;
+ return 0;
+}
+
+static void tca6416_keys_scan(struct tca6416_keypad_chip *chip)
+{
+ struct input_dev *input = chip->input;
+ u16 reg_val, val;
+ int error, i, pin_index;
+
+ error = tca6416_read_reg(chip, TCA6416_INPUT, &reg_val);
+ if (error)
+ return;
+
+ reg_val &= chip->pinmask;
+
+ /* Figure out which lines have changed */
+ val = reg_val ^ chip->reg_input;
+ chip->reg_input = reg_val;
+
+ for (i = 0, pin_index = 0; i < 16; i++) {
+ if (val & (1 << i)) {
+ struct tca6416_button *button = &chip->buttons[pin_index];
+ unsigned int type = button->type ?: EV_KEY;
+ int state = ((reg_val & (1 << i)) ? 1 : 0)
+ ^ button->active_low;
+
+ input_event(input, type, button->code, !!state);
+ input_sync(input);
+ }
+
+ if (chip->pinmask & (1 << i))
+ pin_index++;
+ }
+}
+
+/*
+ * This is threaded IRQ handler and this can (and will) sleep.
+ */
+static irqreturn_t tca6416_keys_isr(int irq, void *dev_id)
+{
+ struct tca6416_keypad_chip *chip = dev_id;
+
+ tca6416_keys_scan(chip);
+
+ return IRQ_HANDLED;
+}
+
+static void tca6416_keys_work_func(struct work_struct *work)
+{
+ struct tca6416_keypad_chip *chip =
+ container_of(work, struct tca6416_keypad_chip, dwork.work);
+
+ tca6416_keys_scan(chip);
+ schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+}
+
+static int tca6416_keys_open(struct input_dev *dev)
+{
+ struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
+
+ /* Get initial device state in case it has switches */
+ tca6416_keys_scan(chip);
+
+ if (chip->use_polling)
+ schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
+ else
+ enable_irq(chip->irqnum);
+
+ return 0;
+}
+
+static void tca6416_keys_close(struct input_dev *dev)
+{
+ struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
+
+ if (chip->use_polling)
+ cancel_delayed_work_sync(&chip->dwork);
+ else
+ disable_irq(chip->irqnum);
+}
+
+static int __devinit tca6416_setup_registers(struct tca6416_keypad_chip *chip)
+{
+ int error;
+
+ error = tca6416_read_reg(chip, TCA6416_OUTPUT, &chip->reg_output);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
+ if (error)
+ return error;
+
+ /* ensure that keypad pins are set to input */
+ error = tca6416_write_reg(chip, TCA6416_DIRECTION,
+ chip->reg_direction | chip->pinmask);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
+ if (error)
+ return error;
+
+ error = tca6416_read_reg(chip, TCA6416_INPUT, &chip->reg_input);
+ if (error)
+ return error;
+
+ chip->reg_input &= chip->pinmask;
+
+ return 0;
+}
+
+static int __devinit tca6416_keypad_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tca6416_keys_platform_data *pdata;
+ struct tca6416_keypad_chip *chip;
+ struct input_dev *input;
+ int error;
+ int i;
+
+ /* Check functionality */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_dbg(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(struct tca6416_keypad_chip) +
+ pdata->nbuttons * sizeof(struct tca6416_button),
+ GFP_KERNEL);
+ input = input_allocate_device();
+ if (!chip || !input) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+
+ chip->client = client;
+ chip->input = input;
+ chip->pinmask = pdata->pinmask;
+ chip->use_polling = pdata->use_polling;
+
+ INIT_DELAYED_WORK(&chip->dwork, tca6416_keys_work_func);
+
+ input->phys = "tca6416-keys/input0";
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+
+ input->open = tca6416_keys_open;
+ input->close = tca6416_keys_close;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < pdata->nbuttons; i++) {
+ unsigned int type;
+
+ chip->buttons[i] = pdata->buttons[i];
+ type = (pdata->buttons[i].type) ?: EV_KEY;
+ input_set_capability(input, type, pdata->buttons[i].code);
+ }
+
+ input_set_drvdata(input, chip);
+
+ /*
+ * Initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+ error = tca6416_setup_registers(chip);
+ if (error)
+ goto fail1;
+
+ if (!chip->use_polling) {
+ if (pdata->irq_is_gpio)
+ chip->irqnum = gpio_to_irq(client->irq);
+ else
+ chip->irqnum = client->irq;
+
+ error = request_threaded_irq(chip->irqnum, NULL,
+ tca6416_keys_isr,
+ IRQF_TRIGGER_FALLING,
+ "tca6416-keypad", chip);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to claim irq %d; error %d\n",
+ chip->irqnum, error);
+ goto fail1;
+ }
+ disable_irq(chip->irqnum);
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to register input device, error: %d\n", error);
+ goto fail2;
+ }
+
+ i2c_set_clientdata(client, chip);
+
+ return 0;
+
+fail2:
+ if (!chip->use_polling) {
+ free_irq(chip->irqnum, chip);
+ enable_irq(chip->irqnum);
+ }
+fail1:
+ input_free_device(input);
+ kfree(chip);
+ return error;
+}
+
+static int __devexit tca6416_keypad_remove(struct i2c_client *client)
+{
+ struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
+
+ if (!chip->use_polling) {
+ free_irq(chip->irqnum, chip);
+ enable_irq(chip->irqnum);
+ }
+
+ input_unregister_device(chip->input);
+ kfree(chip);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+
+static struct i2c_driver tca6416_keypad_driver = {
+ .driver = {
+ .name = "tca6416-keypad",
+ },
+ .probe = tca6416_keypad_probe,
+ .remove = __devexit_p(tca6416_keypad_remove),
+ .id_table = tca6416_id,
+};
+
+static int __init tca6416_keypad_init(void)
+{
+ return i2c_add_driver(&tca6416_keypad_driver);
+}
+
+subsys_initcall(tca6416_keypad_init);
+
+static void __exit tca6416_keypad_exit(void)
+{
+ i2c_del_driver(&tca6416_keypad_driver);
+}
+module_exit(tca6416_keypad_exit);
+
+MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
+MODULE_DESCRIPTION("Keypad driver over tca6146 IO expander");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
deleted file mode 100644
index 3910f269cfc8..000000000000
--- a/drivers/input/keyboard/tosakbd.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Keyboard driver for Sharp Tosa models (SL-6000x)
- *
- * Copyright (c) 2005 Dirk Opfer
- * Copyright (c) 2007 Dmitry Baryshkov
- *
- * Based on xtkbd.c/locomkbd.c/corgikbd.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include <mach/gpio.h>
-#include <mach/tosa.h>
-
-#define KB_ROWMASK(r) (1 << (r))
-#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
-#define NR_SCANCODES SCANCODE(TOSA_KEY_SENSE_NUM - 1, TOSA_KEY_STROBE_NUM - 1) + 1
-
-#define SCAN_INTERVAL (HZ/10)
-
-#define KB_DISCHARGE_DELAY 10
-#define KB_ACTIVATE_DELAY 10
-
-static unsigned short tosakbd_keycode[NR_SCANCODES] = {
-0,
-0, KEY_W, 0, 0, 0, KEY_K, KEY_BACKSPACE, KEY_P,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_Q, KEY_E, KEY_T, KEY_Y, 0, KEY_O, KEY_I, KEY_COMMA,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_A, KEY_D, KEY_G, KEY_U, 0, KEY_L, KEY_ENTER, KEY_DOT,
-0, 0, 0, 0, 0, 0, 0, 0,
-KEY_Z, KEY_C, KEY_V, KEY_J, TOSA_KEY_ADDRESSBOOK, TOSA_KEY_CANCEL, TOSA_KEY_CENTER, TOSA_KEY_OK,
-KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, 0,
-KEY_S, KEY_R, KEY_B, KEY_N, TOSA_KEY_CALENDAR, TOSA_KEY_HOMEPAGE, KEY_LEFTCTRL, TOSA_KEY_LIGHT,
-0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0,
-KEY_TAB, KEY_SLASH, KEY_H, KEY_M, TOSA_KEY_MENU, 0, KEY_UP, 0,
-0, 0, TOSA_KEY_FN, 0, 0, 0, 0, 0,
-KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
-0, 0, 0,
-};
-
-struct tosakbd {
- unsigned short keycode[ARRAY_SIZE(tosakbd_keycode)];
- struct input_dev *input;
- bool suspended;
- spinlock_t lock; /* protect kbd scanning */
- struct timer_list timer;
-};
-
-
-/* Helper functions for reading the keyboard matrix
- * Note: We should really be using the generic gpio functions to alter
- * GPDR but it requires a function call per GPIO bit which is
- * excessive when we need to access 12 bits at once, multiple times.
- * These functions must be called within local_irq_save()/local_irq_restore()
- * or similar.
- */
-#define GET_ROWS_STATUS(c) ((GPLR2 & TOSA_GPIO_ALL_SENSE_BIT) >> TOSA_GPIO_ALL_SENSE_RSHIFT)
-
-static inline void tosakbd_discharge_all(void)
-{
- /* STROBE All HiZ */
- GPCR1 = TOSA_GPIO_HIGH_STROBE_BIT;
- GPDR1 &= ~TOSA_GPIO_HIGH_STROBE_BIT;
- GPCR2 = TOSA_GPIO_LOW_STROBE_BIT;
- GPDR2 &= ~TOSA_GPIO_LOW_STROBE_BIT;
-}
-
-static inline void tosakbd_activate_all(void)
-{
- /* STROBE ALL -> High */
- GPSR1 = TOSA_GPIO_HIGH_STROBE_BIT;
- GPDR1 |= TOSA_GPIO_HIGH_STROBE_BIT;
- GPSR2 = TOSA_GPIO_LOW_STROBE_BIT;
- GPDR2 |= TOSA_GPIO_LOW_STROBE_BIT;
-
- udelay(KB_DISCHARGE_DELAY);
-
- /* STATE CLEAR */
- GEDR2 |= TOSA_GPIO_ALL_SENSE_BIT;
-}
-
-static inline void tosakbd_activate_col(int col)
-{
- if (col <= 5) {
- /* STROBE col -> High, not col -> HiZ */
- GPSR1 = TOSA_GPIO_STROBE_BIT(col);
- GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- } else {
- /* STROBE col -> High, not col -> HiZ */
- GPSR2 = TOSA_GPIO_STROBE_BIT(col);
- GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- }
-}
-
-static inline void tosakbd_reset_col(int col)
-{
- if (col <= 5) {
- /* STROBE col -> Low */
- GPCR1 = TOSA_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- } else {
- /* STROBE col -> Low */
- GPCR2 = TOSA_GPIO_STROBE_BIT(col);
- /* STROBE col -> out, not col -> HiZ */
- GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
- }
-}
-/*
- * The tosa keyboard only generates interrupts when a key is pressed.
- * So when a key is pressed, we enable a timer. This timer scans the
- * keyboard, and this is how we detect when the key is released.
- */
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void tosakbd_scankeyboard(struct platform_device *dev)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
- unsigned int row, col, rowd;
- unsigned long flags;
- unsigned int num_pressed = 0;
-
- spin_lock_irqsave(&tosakbd->lock, flags);
-
- if (tosakbd->suspended)
- goto out;
-
- for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
- /*
- * Discharge the output driver capacitatance
- * in the keyboard matrix. (Yes it is significant..)
- */
- tosakbd_discharge_all();
- udelay(KB_DISCHARGE_DELAY);
-
- tosakbd_activate_col(col);
- udelay(KB_ACTIVATE_DELAY);
-
- rowd = GET_ROWS_STATUS(col);
-
- for (row = 0; row < TOSA_KEY_SENSE_NUM; row++) {
- unsigned int scancode, pressed;
- scancode = SCANCODE(row, col);
- pressed = rowd & KB_ROWMASK(row);
-
- if (pressed && !tosakbd->keycode[scancode])
- dev_warn(&dev->dev,
- "unhandled scancode: 0x%02x\n",
- scancode);
-
- input_report_key(tosakbd->input,
- tosakbd->keycode[scancode],
- pressed);
- if (pressed)
- num_pressed++;
- }
-
- tosakbd_reset_col(col);
- }
-
- tosakbd_activate_all();
-
- input_sync(tosakbd->input);
-
- /* if any keys are pressed, enable the timer */
- if (num_pressed)
- mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
-
- out:
- spin_unlock_irqrestore(&tosakbd->lock, flags);
-}
-
-/*
- * tosa keyboard interrupt handler.
- */
-static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
-{
- struct platform_device *dev = __dev;
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- if (!timer_pending(&tosakbd->timer)) {
- /** wait chattering delay **/
- udelay(20);
- tosakbd_scankeyboard(dev);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * tosa timer checking for released keys
- */
-static void tosakbd_timer_callback(unsigned long __dev)
-{
- struct platform_device *dev = (struct platform_device *)__dev;
-
- tosakbd_scankeyboard(dev);
-}
-
-#ifdef CONFIG_PM
-static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&tosakbd->lock, flags);
- tosakbd->suspended = true;
- spin_unlock_irqrestore(&tosakbd->lock, flags);
-
- del_timer_sync(&tosakbd->timer);
-
- return 0;
-}
-
-static int tosakbd_resume(struct platform_device *dev)
-{
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- tosakbd->suspended = false;
- tosakbd_scankeyboard(dev);
-
- return 0;
-}
-#else
-#define tosakbd_suspend NULL
-#define tosakbd_resume NULL
-#endif
-
-static int __devinit tosakbd_probe(struct platform_device *pdev) {
-
- int i;
- struct tosakbd *tosakbd;
- struct input_dev *input_dev;
- int error;
-
- tosakbd = kzalloc(sizeof(struct tosakbd), GFP_KERNEL);
- if (!tosakbd)
- return -ENOMEM;
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- kfree(tosakbd);
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, tosakbd);
-
- spin_lock_init(&tosakbd->lock);
-
- /* Init Keyboard rescan timer */
- init_timer(&tosakbd->timer);
- tosakbd->timer.function = tosakbd_timer_callback;
- tosakbd->timer.data = (unsigned long) pdev;
-
- tosakbd->input = input_dev;
-
- input_set_drvdata(input_dev, tosakbd);
- input_dev->name = "Tosa Keyboard";
- input_dev->phys = "tosakbd/input0";
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0001;
- input_dev->id.version = 0x0100;
-
- input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
- input_dev->keycode = tosakbd->keycode;
- input_dev->keycodesize = sizeof(tosakbd->keycode[0]);
- input_dev->keycodemax = ARRAY_SIZE(tosakbd_keycode);
-
- memcpy(tosakbd->keycode, tosakbd_keycode, sizeof(tosakbd_keycode));
-
- for (i = 0; i < ARRAY_SIZE(tosakbd_keycode); i++)
- __set_bit(tosakbd->keycode[i], input_dev->keybit);
- __clear_bit(KEY_RESERVED, input_dev->keybit);
-
- /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
- for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
- int gpio = TOSA_GPIO_KEY_SENSE(i);
- int irq;
- error = gpio_request(gpio, "tosakbd");
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
- " error %d\n", gpio, error);
- goto fail;
- }
-
- error = gpio_direction_input(TOSA_GPIO_KEY_SENSE(i));
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to configure input"
- " direction for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail;
- }
-
- irq = gpio_to_irq(gpio);
- if (irq < 0) {
- error = irq;
- printk(KERN_ERR "gpio-keys: Unable to get irq number"
- " for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail;
- }
-
- error = request_irq(irq, tosakbd_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_RISING,
- "tosakbd", pdev);
-
- if (error) {
- printk("tosakbd: Can't get IRQ: %d: error %d!\n",
- irq, error);
- gpio_free(gpio);
- goto fail;
- }
- }
-
- /* Set Strobe lines as outputs - set high */
- for (i = 0; i < TOSA_KEY_STROBE_NUM; i++) {
- int gpio = TOSA_GPIO_KEY_STROBE(i);
- error = gpio_request(gpio, "tosakbd");
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
- " error %d\n", gpio, error);
- goto fail2;
- }
-
- error = gpio_direction_output(gpio, 1);
- if (error < 0) {
- printk(KERN_ERR "tosakbd: failed to configure input"
- " direction for GPIO %d, error %d\n",
- gpio, error);
- gpio_free(gpio);
- goto fail2;
- }
-
- }
-
- error = input_register_device(input_dev);
- if (error) {
- printk(KERN_ERR "tosakbd: Unable to register input device, "
- "error: %d\n", error);
- goto fail2;
- }
-
- printk(KERN_INFO "input: Tosa Keyboard Registered\n");
-
- return 0;
-
-fail2:
- while (--i >= 0)
- gpio_free(TOSA_GPIO_KEY_STROBE(i));
-
- i = TOSA_KEY_SENSE_NUM;
-fail:
- while (--i >= 0) {
- free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), pdev);
- gpio_free(TOSA_GPIO_KEY_SENSE(i));
- }
-
- platform_set_drvdata(pdev, NULL);
- input_free_device(input_dev);
- kfree(tosakbd);
-
- return error;
-}
-
-static int __devexit tosakbd_remove(struct platform_device *dev)
-{
- int i;
- struct tosakbd *tosakbd = platform_get_drvdata(dev);
-
- for (i = 0; i < TOSA_KEY_STROBE_NUM; i++)
- gpio_free(TOSA_GPIO_KEY_STROBE(i));
-
- for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
- free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), dev);
- gpio_free(TOSA_GPIO_KEY_SENSE(i));
- }
-
- del_timer_sync(&tosakbd->timer);
-
- input_unregister_device(tosakbd->input);
-
- kfree(tosakbd);
-
- return 0;
-}
-
-static struct platform_driver tosakbd_driver = {
- .probe = tosakbd_probe,
- .remove = __devexit_p(tosakbd_remove),
- .suspend = tosakbd_suspend,
- .resume = tosakbd_resume,
- .driver = {
- .name = "tosa-keyboard",
- .owner = THIS_MODULE,
- },
-};
-
-static int __devinit tosakbd_init(void)
-{
- return platform_driver_register(&tosakbd_driver);
-}
-
-static void __exit tosakbd_exit(void)
-{
- platform_driver_unregister(&tosakbd_driver);
-}
-
-module_init(tosakbd_init);
-module_exit(tosakbd_exit);
-
-MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
-MODULE_DESCRIPTION("Tosa Keyboard Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tosa-keyboard");
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 40dabd8487b5..4cc82826ea6b 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -87,7 +87,6 @@ static int __devinit pm860x_onkey_probe(struct platform_device *pdev)
info->idev->phys = "88pm860x_on/input0";
info->idev->id.bustype = BUS_I2C;
info->idev->dev.parent = &pdev->dev;
- info->irq = irq;
info->idev->evbit[0] = BIT_MASK(EV_KEY);
info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 23140a3bb8e0..c44b9eafc556 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,6 +22,36 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_AD714X
+ tristate "Analog Devices AD714x Capacitance Touch Sensor"
+ help
+ Say Y here if you want to support an AD7142/3/7/8/7A touch sensor.
+
+ You should select a bus connection too.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x.
+
+config INPUT_AD714X_I2C
+ tristate "support I2C bus connection"
+ depends on INPUT_AD714X && I2C
+ default y
+ help
+ Say Y here if you have AD7142/AD7147 hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x-i2c.
+
+config INPUT_AD714X_SPI
+ tristate "support SPI bus connection"
+ depends on INPUT_AD714X && SPI
+ default y
+ help
+ Say Y here if you have AD7142/AD7147 hooked to a SPI bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad714x-spi.
+
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
@@ -50,6 +80,16 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX8925_ONKEY
+ tristate "MAX8925 ONKEY support"
+ depends on MFD_MAX8925
+ help
+ Support the ONKEY of MAX8925 PMICs as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called max8925_onkey.
+
config INPUT_APANEL
tristate "Fujitsu Lifebook Application Panel buttons"
depends on X86 && I2C && LEDS_CLASS
@@ -277,6 +317,16 @@ config INPUT_PCF50633_PMU
Say Y to include support for delivering PMU events via input
layer on NXP PCF50633.
+config INPUT_PCF8574
+ tristate "PCF8574 Keypad input device"
+ depends on I2C && EXPERIMENTAL
+ help
+ Say Y here if you want to support a keypad connetced via I2C
+ with a PCF8574.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcf8574_keypad.
+
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
depends on GPIOLIB && GENERIC_GPIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 7e95a5d474dc..71fe57d8023f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,9 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_AD714X) += ad714x.o
+obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
+obj-$(CONFIG_INPUT_AD714X_SPI) += ad714x-spi.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
@@ -17,8 +20,10 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
+obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
new file mode 100644
index 000000000000..e9adbe49f6a4
--- /dev/null
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -0,0 +1,140 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (I2C bus)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/input.h> /* BUS_I2C */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include "ad714x.h"
+
+#ifdef CONFIG_PM
+static int ad714x_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+ return ad714x_disable(i2c_get_clientdata(client));
+}
+
+static int ad714x_i2c_resume(struct i2c_client *client)
+{
+ return ad714x_enable(i2c_get_clientdata(client));
+}
+#else
+# define ad714x_i2c_suspend NULL
+# define ad714x_i2c_resume NULL
+#endif
+
+static int ad714x_i2c_write(struct device *dev, unsigned short reg,
+ unsigned short data)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
+ u8 *_reg = (u8 *)&reg;
+ u8 *_data = (u8 *)&data;
+
+ u8 tx[4] = {
+ _reg[1],
+ _reg[0],
+ _data[1],
+ _data[0]
+ };
+
+ ret = i2c_master_send(client, tx, 4);
+ if (ret < 0)
+ dev_err(&client->dev, "I2C write error\n");
+
+ return ret;
+}
+
+static int ad714x_i2c_read(struct device *dev, unsigned short reg,
+ unsigned short *data)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
+ u8 *_reg = (u8 *)&reg;
+ u8 *_data = (u8 *)data;
+
+ u8 tx[2] = {
+ _reg[1],
+ _reg[0]
+ };
+ u8 rx[2];
+
+ ret = i2c_master_send(client, tx, 2);
+ if (ret >= 0)
+ ret = i2c_master_recv(client, rx, 2);
+
+ if (unlikely(ret < 0)) {
+ dev_err(&client->dev, "I2C read error\n");
+ } else {
+ _data[0] = rx[1];
+ _data[1] = rx[0];
+ }
+
+ return ret;
+}
+
+static int __devinit ad714x_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad714x_chip *chip;
+
+ chip = ad714x_probe(&client->dev, BUS_I2C, client->irq,
+ ad714x_i2c_read, ad714x_i2c_write);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ i2c_set_clientdata(client, chip);
+
+ return 0;
+}
+
+static int __devexit ad714x_i2c_remove(struct i2c_client *client)
+{
+ struct ad714x_chip *chip = i2c_get_clientdata(client);
+
+ ad714x_remove(chip);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad714x_id[] = {
+ { "ad7142_captouch", 0 },
+ { "ad7143_captouch", 0 },
+ { "ad7147_captouch", 0 },
+ { "ad7147a_captouch", 0 },
+ { "ad7148_captouch", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad714x_id);
+
+static struct i2c_driver ad714x_i2c_driver = {
+ .driver = {
+ .name = "ad714x_captouch",
+ },
+ .probe = ad714x_i2c_probe,
+ .remove = __devexit_p(ad714x_i2c_remove),
+ .suspend = ad714x_i2c_suspend,
+ .resume = ad714x_i2c_resume,
+ .id_table = ad714x_id,
+};
+
+static __init int ad714x_i2c_init(void)
+{
+ return i2c_add_driver(&ad714x_i2c_driver);
+}
+module_init(ad714x_i2c_init);
+
+static __exit void ad714x_i2c_exit(void)
+{
+ i2c_del_driver(&ad714x_i2c_driver);
+}
+module_exit(ad714x_i2c_exit);
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor I2C Bus Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
new file mode 100644
index 000000000000..7f8dedfd1bfe
--- /dev/null
+++ b/drivers/input/misc/ad714x-spi.c
@@ -0,0 +1,103 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (SPI bus)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/input.h> /* BUS_I2C */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include "ad714x.h"
+
+#define AD714x_SPI_CMD_PREFIX 0xE000 /* bits 15:11 */
+#define AD714x_SPI_READ BIT(10)
+
+#ifdef CONFIG_PM
+static int ad714x_spi_suspend(struct spi_device *spi, pm_message_t message)
+{
+ return ad714x_disable(spi_get_drvdata(spi));
+}
+
+static int ad714x_spi_resume(struct spi_device *spi)
+{
+ return ad714x_enable(spi_get_drvdata(spi));
+}
+#else
+# define ad714x_spi_suspend NULL
+# define ad714x_spi_resume NULL
+#endif
+
+static int ad714x_spi_read(struct device *dev, unsigned short reg,
+ unsigned short *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg;
+
+ return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2);
+}
+
+static int ad714x_spi_write(struct device *dev, unsigned short reg,
+ unsigned short data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned short tx[2] = {
+ AD714x_SPI_CMD_PREFIX | reg,
+ data
+ };
+
+ return spi_write(spi, (u8 *)tx, 4);
+}
+
+static int __devinit ad714x_spi_probe(struct spi_device *spi)
+{
+ struct ad714x_chip *chip;
+
+ chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
+ ad714x_spi_read, ad714x_spi_write);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ spi_set_drvdata(spi, chip);
+
+ return 0;
+}
+
+static int __devexit ad714x_spi_remove(struct spi_device *spi)
+{
+ struct ad714x_chip *chip = spi_get_drvdata(spi);
+
+ ad714x_remove(chip);
+ spi_set_drvdata(spi, NULL);
+
+ return 0;
+}
+
+static struct spi_driver ad714x_spi_driver = {
+ .driver = {
+ .name = "ad714x_captouch",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad714x_spi_probe,
+ .remove = __devexit_p(ad714x_spi_remove),
+ .suspend = ad714x_spi_suspend,
+ .resume = ad714x_spi_resume,
+};
+
+static __init int ad714x_spi_init(void)
+{
+ return spi_register_driver(&ad714x_spi_driver);
+}
+module_init(ad714x_spi_init);
+
+static __exit void ad714x_spi_exit(void)
+{
+ spi_unregister_driver(&ad714x_spi_driver);
+}
+module_exit(ad714x_spi_exit);
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor SPI Bus Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
new file mode 100644
index 000000000000..0fe27baf5e72
--- /dev/null
+++ b/drivers/input/misc/ad714x.c
@@ -0,0 +1,1347 @@
+/*
+ * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input/ad714x.h>
+#include "ad714x.h"
+
+#define AD714X_PWR_CTRL 0x0
+#define AD714X_STG_CAL_EN_REG 0x1
+#define AD714X_AMB_COMP_CTRL0_REG 0x2
+#define AD714X_PARTID_REG 0x17
+#define AD7142_PARTID 0xE620
+#define AD7143_PARTID 0xE630
+#define AD7147_PARTID 0x1470
+#define AD7148_PARTID 0x1480
+#define AD714X_STAGECFG_REG 0x80
+#define AD714X_SYSCFG_REG 0x0
+
+#define STG_LOW_INT_EN_REG 0x5
+#define STG_HIGH_INT_EN_REG 0x6
+#define STG_COM_INT_EN_REG 0x7
+#define STG_LOW_INT_STA_REG 0x8
+#define STG_HIGH_INT_STA_REG 0x9
+#define STG_COM_INT_STA_REG 0xA
+
+#define CDC_RESULT_S0 0xB
+#define CDC_RESULT_S1 0xC
+#define CDC_RESULT_S2 0xD
+#define CDC_RESULT_S3 0xE
+#define CDC_RESULT_S4 0xF
+#define CDC_RESULT_S5 0x10
+#define CDC_RESULT_S6 0x11
+#define CDC_RESULT_S7 0x12
+#define CDC_RESULT_S8 0x13
+#define CDC_RESULT_S9 0x14
+#define CDC_RESULT_S10 0x15
+#define CDC_RESULT_S11 0x16
+
+#define STAGE0_AMBIENT 0xF1
+#define STAGE1_AMBIENT 0x115
+#define STAGE2_AMBIENT 0x139
+#define STAGE3_AMBIENT 0x15D
+#define STAGE4_AMBIENT 0x181
+#define STAGE5_AMBIENT 0x1A5
+#define STAGE6_AMBIENT 0x1C9
+#define STAGE7_AMBIENT 0x1ED
+#define STAGE8_AMBIENT 0x211
+#define STAGE9_AMBIENT 0x234
+#define STAGE10_AMBIENT 0x259
+#define STAGE11_AMBIENT 0x27D
+
+#define PER_STAGE_REG_NUM 36
+#define STAGE_NUM 12
+#define STAGE_CFGREG_NUM 8
+#define SYS_CFGREG_NUM 8
+
+/*
+ * driver information which will be used to maintain the software flow
+ */
+enum ad714x_device_state { IDLE, JITTER, ACTIVE, SPACE };
+
+struct ad714x_slider_drv {
+ int highest_stage;
+ int abs_pos;
+ int flt_pos;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_wheel_drv {
+ int abs_pos;
+ int flt_pos;
+ int pre_mean_value;
+ int pre_highest_stage;
+ int pre_mean_value_no_offset;
+ int mean_value;
+ int mean_value_no_offset;
+ int pos_offset;
+ int pos_ratio;
+ int highest_stage;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_touchpad_drv {
+ int x_highest_stage;
+ int x_flt_pos;
+ int x_abs_pos;
+ int y_highest_stage;
+ int y_flt_pos;
+ int y_abs_pos;
+ int left_ep;
+ int left_ep_val;
+ int right_ep;
+ int right_ep_val;
+ int top_ep;
+ int top_ep_val;
+ int bottom_ep;
+ int bottom_ep_val;
+ enum ad714x_device_state state;
+ struct input_dev *input;
+};
+
+struct ad714x_button_drv {
+ enum ad714x_device_state state;
+ /*
+ * Unlike slider/wheel/touchpad, all buttons point to
+ * same input_dev instance
+ */
+ struct input_dev *input;
+};
+
+struct ad714x_driver_data {
+ struct ad714x_slider_drv *slider;
+ struct ad714x_wheel_drv *wheel;
+ struct ad714x_touchpad_drv *touchpad;
+ struct ad714x_button_drv *button;
+};
+
+/*
+ * information to integrate all things which will be private data
+ * of spi/i2c device
+ */
+struct ad714x_chip {
+ unsigned short h_state;
+ unsigned short l_state;
+ unsigned short c_state;
+ unsigned short adc_reg[STAGE_NUM];
+ unsigned short amb_reg[STAGE_NUM];
+ unsigned short sensor_val[STAGE_NUM];
+
+ struct ad714x_platform_data *hw;
+ struct ad714x_driver_data *sw;
+
+ int irq;
+ struct device *dev;
+ ad714x_read_t read;
+ ad714x_write_t write;
+
+ struct mutex mutex;
+
+ unsigned product;
+ unsigned version;
+};
+
+static void ad714x_use_com_int(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ unsigned short data;
+ unsigned short mask;
+
+ mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage);
+
+ ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ data |= 1 << start_stage;
+ ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+
+ ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ data &= ~mask;
+ ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+}
+
+static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ unsigned short data;
+ unsigned short mask;
+
+ mask = ((1 << (end_stage + 1)) - 1) - (1 << start_stage);
+
+ ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data);
+ data &= ~(1 << start_stage);
+ ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data);
+
+ ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data);
+ data |= mask;
+ ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data);
+}
+
+static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage)
+{
+ int max_res = 0;
+ int max_idx = 0;
+ int i;
+
+ for (i = start_stage; i <= end_stage; i++) {
+ if (ad714x->sensor_val[i] > max_res) {
+ max_res = ad714x->sensor_val[i];
+ max_idx = i;
+ }
+ }
+
+ return max_idx;
+}
+
+static int ad714x_cal_abs_pos(struct ad714x_chip *ad714x,
+ int start_stage, int end_stage,
+ int highest_stage, int max_coord)
+{
+ int a_param, b_param;
+
+ if (highest_stage == start_stage) {
+ a_param = ad714x->sensor_val[start_stage + 1];
+ b_param = ad714x->sensor_val[start_stage] +
+ ad714x->sensor_val[start_stage + 1];
+ } else if (highest_stage == end_stage) {
+ a_param = ad714x->sensor_val[end_stage] *
+ (end_stage - start_stage) +
+ ad714x->sensor_val[end_stage - 1] *
+ (end_stage - start_stage - 1);
+ b_param = ad714x->sensor_val[end_stage] +
+ ad714x->sensor_val[end_stage - 1];
+ } else {
+ a_param = ad714x->sensor_val[highest_stage] *
+ (highest_stage - start_stage) +
+ ad714x->sensor_val[highest_stage - 1] *
+ (highest_stage - start_stage - 1) +
+ ad714x->sensor_val[highest_stage + 1] *
+ (highest_stage - start_stage + 1);
+ b_param = ad714x->sensor_val[highest_stage] +
+ ad714x->sensor_val[highest_stage - 1] +
+ ad714x->sensor_val[highest_stage + 1];
+ }
+
+ return (max_coord / (end_stage - start_stage)) * a_param / b_param;
+}
+
+/*
+ * One button can connect to multi positive and negative of CDCs
+ * Multi-buttons can connect to same positive/negative of one CDC
+ */
+static void ad714x_button_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_button_plat *hw = &ad714x->hw->button[idx];
+ struct ad714x_button_drv *sw = &ad714x->sw->button[idx];
+
+ switch (sw->state) {
+ case IDLE:
+ if (((ad714x->h_state & hw->h_mask) == hw->h_mask) &&
+ ((ad714x->l_state & hw->l_mask) == hw->l_mask)) {
+ dev_dbg(ad714x->dev, "button %d touched\n", idx);
+ input_report_key(sw->input, hw->keycode, 1);
+ input_sync(sw->input);
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (((ad714x->h_state & hw->h_mask) != hw->h_mask) ||
+ ((ad714x->l_state & hw->l_mask) != hw->l_mask)) {
+ dev_dbg(ad714x->dev, "button %d released\n", idx);
+ input_report_key(sw->input, hw->keycode, 0);
+ input_sync(sw->input);
+ sw->state = IDLE;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * The response of a sensor is defined by the absolute number of codes
+ * between the current CDC value and the ambient value.
+ */
+static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ int i;
+
+ for (i = hw->start_stage; i <= hw->end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+
+ ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] -
+ ad714x->amb_reg[i]);
+ }
+}
+
+static void ad714x_slider_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage,
+ hw->end_stage);
+
+ dev_dbg(ad714x->dev, "slider %d highest_stage:%d\n", idx,
+ sw->highest_stage);
+}
+
+/*
+ * The formulae are very straight forward. It uses the sensor with the
+ * highest response and the 2 adjacent ones.
+ * When Sensor 0 has the highest response, only sensor 0 and sensor 1
+ * are used in the calculations. Similarly when the last sensor has the
+ * highest response, only the last sensor and the second last sensors
+ * are used in the calculations.
+ *
+ * For i= idx_of_peak_Sensor-1 to i= idx_of_peak_Sensor+1
+ * v += Sensor response(i)*i
+ * w += Sensor response(i)
+ * POS=(Number_of_Positions_Wanted/(Number_of_Sensors_Used-1)) *(v/w)
+ */
+static void ad714x_slider_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->abs_pos = ad714x_cal_abs_pos(ad714x, hw->start_stage, hw->end_stage,
+ sw->highest_stage, hw->max_coord);
+
+ dev_dbg(ad714x->dev, "slider %d absolute position:%d\n", idx,
+ sw->abs_pos);
+}
+
+/*
+ * To minimise the Impact of the noise on the algorithm, ADI developed a
+ * routine that filters the CDC results after they have been read by the
+ * host processor.
+ * The filter used is an Infinite Input Response(IIR) filter implemented
+ * in firmware and attenuates the noise on the CDC results after they've
+ * been read by the host processor.
+ * Filtered_CDC_result = (Filtered_CDC_result * (10 - Coefficient) +
+ * Latest_CDC_result * Coefficient)/10
+ */
+static void ad714x_slider_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+
+ sw->flt_pos = (sw->flt_pos * (10 - 4) +
+ sw->abs_pos * 4)/10;
+
+ dev_dbg(ad714x->dev, "slider %d filter position:%d\n", idx,
+ sw->flt_pos);
+}
+
+static void ad714x_slider_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+
+ ad714x_use_com_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_slider_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+
+ ad714x_use_thr_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_slider_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
+ struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = ((1 << (hw->end_stage + 1)) - 1) - ((1 << hw->start_stage) - 1);
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ ad714x_slider_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "slider %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ ad714x_slider_cal_sensor_val(ad714x, idx);
+ ad714x_slider_cal_highest_stage(ad714x, idx);
+ ad714x_slider_cal_abs_pos(ad714x, idx);
+ sw->flt_pos = sw->abs_pos;
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ ad714x_slider_cal_sensor_val(ad714x, idx);
+ ad714x_slider_cal_highest_stage(ad714x, idx);
+ ad714x_slider_cal_abs_pos(ad714x, idx);
+ ad714x_slider_cal_flt_pos(ad714x, idx);
+
+ input_report_abs(sw->input, ABS_X, sw->flt_pos);
+ input_report_key(sw->input, BTN_TOUCH, 1);
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ ad714x_slider_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+ dev_dbg(ad714x->dev, "slider %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * When the scroll wheel is activated, we compute the absolute position based
+ * on the sensor values. To calculate the position, we first determine the
+ * sensor that has the greatest response among the 8 sensors that constitutes
+ * the scrollwheel. Then we determined the 2 sensors on either sides of the
+ * sensor with the highest response and we apply weights to these sensors.
+ */
+static void ad714x_wheel_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+
+ sw->pre_highest_stage = sw->highest_stage;
+ sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage,
+ hw->end_stage);
+
+ dev_dbg(ad714x->dev, "wheel %d highest_stage:%d\n", idx,
+ sw->highest_stage);
+}
+
+static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ int i;
+
+ for (i = hw->start_stage; i <= hw->end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+ if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
+ ad714x->sensor_val[i] = ad714x->adc_reg[i] -
+ ad714x->amb_reg[i];
+ else
+ ad714x->sensor_val[i] = 0;
+ }
+}
+
+/*
+ * When the scroll wheel is activated, we compute the absolute position based
+ * on the sensor values. To calculate the position, we first determine the
+ * sensor that has the greatest response among the 8 sensors that constitutes
+ * the scrollwheel. Then we determined the 2 sensors on either sides of the
+ * sensor with the highest response and we apply weights to these sensors. The
+ * result of this computation gives us the mean value which defined by the
+ * following formula:
+ * For i= second_before_highest_stage to i= second_after_highest_stage
+ * v += Sensor response(i)*WEIGHT*(i+3)
+ * w += Sensor response(i)
+ * Mean_Value=v/w
+ * pos_on_scrollwheel = (Mean_Value - position_offset) / position_ratio
+ */
+
+#define WEIGHT_FACTOR 30
+/* This constant prevents the "PositionOffset" from reaching a big value */
+#define OFFSET_POSITION_CLAMP 120
+static void ad714x_wheel_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ int stage_num = hw->end_stage - hw->start_stage + 1;
+ int second_before, first_before, highest, first_after, second_after;
+ int a_param, b_param;
+
+ /* Calculate Mean value */
+
+ second_before = (sw->highest_stage + stage_num - 2) % stage_num;
+ first_before = (sw->highest_stage + stage_num - 1) % stage_num;
+ highest = sw->highest_stage;
+ first_after = (sw->highest_stage + stage_num + 1) % stage_num;
+ second_after = (sw->highest_stage + stage_num + 2) % stage_num;
+
+ if (((sw->highest_stage - hw->start_stage) > 1) &&
+ ((hw->end_stage - sw->highest_stage) > 1)) {
+ a_param = ad714x->sensor_val[second_before] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_before] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[highest] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_after] *
+ (first_after - hw->start_stage + 3) +
+ ad714x->sensor_val[second_after] *
+ (second_after - hw->start_stage + 3);
+ } else {
+ a_param = ad714x->sensor_val[second_before] *
+ (second_before - hw->start_stage + 1) +
+ ad714x->sensor_val[first_before] *
+ (second_before - hw->start_stage + 2) +
+ ad714x->sensor_val[highest] *
+ (second_before - hw->start_stage + 3) +
+ ad714x->sensor_val[first_after] *
+ (first_after - hw->start_stage + 4) +
+ ad714x->sensor_val[second_after] *
+ (second_after - hw->start_stage + 5);
+ }
+ a_param *= WEIGHT_FACTOR;
+
+ b_param = ad714x->sensor_val[second_before] +
+ ad714x->sensor_val[first_before] +
+ ad714x->sensor_val[highest] +
+ ad714x->sensor_val[first_after] +
+ ad714x->sensor_val[second_after];
+
+ sw->pre_mean_value = sw->mean_value;
+ sw->mean_value = a_param / b_param;
+
+ /* Calculate the offset */
+
+ if ((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage))
+ sw->pos_offset = sw->mean_value;
+ else if ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage))
+ sw->pos_offset = sw->pre_mean_value;
+
+ if (sw->pos_offset > OFFSET_POSITION_CLAMP)
+ sw->pos_offset = OFFSET_POSITION_CLAMP;
+
+ /* Calculate the mean value without the offset */
+
+ sw->pre_mean_value_no_offset = sw->mean_value_no_offset;
+ sw->mean_value_no_offset = sw->mean_value - sw->pos_offset;
+ if (sw->mean_value_no_offset < 0)
+ sw->mean_value_no_offset = 0;
+
+ /* Calculate ratio to scale down to NUMBER_OF_WANTED_POSITIONS */
+
+ if ((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage))
+ sw->pos_ratio = (sw->pre_mean_value_no_offset * 100) /
+ hw->max_coord;
+ else if ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage))
+ sw->pos_ratio = (sw->mean_value_no_offset * 100) /
+ hw->max_coord;
+ sw->abs_pos = (sw->mean_value_no_offset * 100) / sw->pos_ratio;
+ if (sw->abs_pos > hw->max_coord)
+ sw->abs_pos = hw->max_coord;
+}
+
+static void ad714x_wheel_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ if (((sw->pre_highest_stage == hw->end_stage) &&
+ (sw->highest_stage == hw->start_stage)) ||
+ ((sw->pre_highest_stage == hw->start_stage) &&
+ (sw->highest_stage == hw->end_stage)))
+ sw->flt_pos = sw->abs_pos;
+ else
+ sw->flt_pos = ((sw->flt_pos * 30) + (sw->abs_pos * 71)) / 100;
+
+ if (sw->flt_pos > hw->max_coord)
+ sw->flt_pos = hw->max_coord;
+}
+
+static void ad714x_wheel_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+
+ ad714x_use_com_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_wheel_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+
+ ad714x_use_thr_int(ad714x, hw->start_stage, hw->end_stage);
+}
+
+static void ad714x_wheel_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
+ struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = ((1 << (hw->end_stage + 1)) - 1) - ((1 << hw->start_stage) - 1);
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ ad714x_wheel_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "wheel %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ ad714x_wheel_cal_sensor_val(ad714x, idx);
+ ad714x_wheel_cal_highest_stage(ad714x, idx);
+ ad714x_wheel_cal_abs_pos(ad714x, idx);
+ sw->flt_pos = sw->abs_pos;
+ sw->state = ACTIVE;
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ ad714x_wheel_cal_sensor_val(ad714x, idx);
+ ad714x_wheel_cal_highest_stage(ad714x, idx);
+ ad714x_wheel_cal_abs_pos(ad714x, idx);
+ ad714x_wheel_cal_flt_pos(ad714x, idx);
+
+ input_report_abs(sw->input, ABS_WHEEL,
+ sw->abs_pos);
+ input_report_key(sw->input, BTN_TOUCH, 1);
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ ad714x_wheel_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+
+ dev_dbg(ad714x->dev, "wheel %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ int i;
+
+ for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) {
+ ad714x->read(ad714x->dev, CDC_RESULT_S0 + i,
+ &ad714x->adc_reg[i]);
+ ad714x->read(ad714x->dev,
+ STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
+ &ad714x->amb_reg[i]);
+ if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
+ ad714x->sensor_val[i] = ad714x->adc_reg[i] -
+ ad714x->amb_reg[i];
+ else
+ ad714x->sensor_val[i] = 0;
+ }
+}
+
+static void touchpad_cal_highest_stage(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_highest_stage = ad714x_cal_highest_stage(ad714x,
+ hw->x_start_stage, hw->x_end_stage);
+ sw->y_highest_stage = ad714x_cal_highest_stage(ad714x,
+ hw->y_start_stage, hw->y_end_stage);
+
+ dev_dbg(ad714x->dev,
+ "touchpad %d x_highest_stage:%d, y_highest_stage:%d\n",
+ idx, sw->x_highest_stage, sw->y_highest_stage);
+}
+
+/*
+ * If 2 fingers are touching the sensor then 2 peaks can be observed in the
+ * distribution.
+ * The arithmetic doesn't support to get absolute coordinates for multi-touch
+ * yet.
+ */
+static int touchpad_check_second_peak(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ int i;
+
+ for (i = hw->x_start_stage; i < sw->x_highest_stage; i++) {
+ if ((ad714x->sensor_val[i] - ad714x->sensor_val[i + 1])
+ > (ad714x->sensor_val[i + 1] / 10))
+ return 1;
+ }
+
+ for (i = sw->x_highest_stage; i < hw->x_end_stage; i++) {
+ if ((ad714x->sensor_val[i + 1] - ad714x->sensor_val[i])
+ > (ad714x->sensor_val[i] / 10))
+ return 1;
+ }
+
+ for (i = hw->y_start_stage; i < sw->y_highest_stage; i++) {
+ if ((ad714x->sensor_val[i] - ad714x->sensor_val[i + 1])
+ > (ad714x->sensor_val[i + 1] / 10))
+ return 1;
+ }
+
+ for (i = sw->y_highest_stage; i < hw->y_end_stage; i++) {
+ if ((ad714x->sensor_val[i + 1] - ad714x->sensor_val[i])
+ > (ad714x->sensor_val[i] / 10))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * If only one finger is used to activate the touch pad then only 1 peak will be
+ * registered in the distribution. This peak and the 2 adjacent sensors will be
+ * used in the calculation of the absolute position. This will prevent hand
+ * shadows to affect the absolute position calculation.
+ */
+static void touchpad_cal_abs_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_abs_pos = ad714x_cal_abs_pos(ad714x, hw->x_start_stage,
+ hw->x_end_stage, sw->x_highest_stage, hw->x_max_coord);
+ sw->y_abs_pos = ad714x_cal_abs_pos(ad714x, hw->y_start_stage,
+ hw->y_end_stage, sw->y_highest_stage, hw->y_max_coord);
+
+ dev_dbg(ad714x->dev, "touchpad %d absolute position:(%d, %d)\n", idx,
+ sw->x_abs_pos, sw->y_abs_pos);
+}
+
+static void touchpad_cal_flt_pos(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+
+ sw->x_flt_pos = (sw->x_flt_pos * (10 - 4) +
+ sw->x_abs_pos * 4)/10;
+ sw->y_flt_pos = (sw->y_flt_pos * (10 - 4) +
+ sw->y_abs_pos * 4)/10;
+
+ dev_dbg(ad714x->dev, "touchpad %d filter position:(%d, %d)\n",
+ idx, sw->x_flt_pos, sw->y_flt_pos);
+}
+
+/*
+ * To prevent distortion from showing in the absolute position, it is
+ * necessary to detect the end points. When endpoints are detected, the
+ * driver stops updating the status variables with absolute positions.
+ * End points are detected on the 4 edges of the touchpad sensor. The
+ * method to detect them is the same for all 4.
+ * To detect the end points, the firmware computes the difference in
+ * percent between the sensor on the edge and the adjacent one. The
+ * difference is calculated in percent in order to make the end point
+ * detection independent of the pressure.
+ */
+
+#define LEFT_END_POINT_DETECTION_LEVEL 550
+#define RIGHT_END_POINT_DETECTION_LEVEL 750
+#define LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL 850
+#define TOP_END_POINT_DETECTION_LEVEL 550
+#define BOTTOM_END_POINT_DETECTION_LEVEL 950
+#define TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL 700
+static int touchpad_check_endpoint(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ int percent_sensor_diff;
+
+ /* left endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->x_start_stage] -
+ ad714x->sensor_val[hw->x_start_stage + 1]) * 100 /
+ ad714x->sensor_val[hw->x_start_stage + 1];
+ if (!sw->left_ep) {
+ if (percent_sensor_diff >= LEFT_END_POINT_DETECTION_LEVEL) {
+ sw->left_ep = 1;
+ sw->left_ep_val =
+ ad714x->sensor_val[hw->x_start_stage + 1];
+ }
+ } else {
+ if ((percent_sensor_diff < LEFT_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->x_start_stage + 1] >
+ LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->left_ep_val))
+ sw->left_ep = 0;
+ }
+
+ /* right endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->x_end_stage] -
+ ad714x->sensor_val[hw->x_end_stage - 1]) * 100 /
+ ad714x->sensor_val[hw->x_end_stage - 1];
+ if (!sw->right_ep) {
+ if (percent_sensor_diff >= RIGHT_END_POINT_DETECTION_LEVEL) {
+ sw->right_ep = 1;
+ sw->right_ep_val =
+ ad714x->sensor_val[hw->x_end_stage - 1];
+ }
+ } else {
+ if ((percent_sensor_diff < RIGHT_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->x_end_stage - 1] >
+ LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->right_ep_val))
+ sw->right_ep = 0;
+ }
+
+ /* top endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->y_start_stage] -
+ ad714x->sensor_val[hw->y_start_stage + 1]) * 100 /
+ ad714x->sensor_val[hw->y_start_stage + 1];
+ if (!sw->top_ep) {
+ if (percent_sensor_diff >= TOP_END_POINT_DETECTION_LEVEL) {
+ sw->top_ep = 1;
+ sw->top_ep_val =
+ ad714x->sensor_val[hw->y_start_stage + 1];
+ }
+ } else {
+ if ((percent_sensor_diff < TOP_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->y_start_stage + 1] >
+ TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->top_ep_val))
+ sw->top_ep = 0;
+ }
+
+ /* bottom endpoint detect */
+ percent_sensor_diff = (ad714x->sensor_val[hw->y_end_stage] -
+ ad714x->sensor_val[hw->y_end_stage - 1]) * 100 /
+ ad714x->sensor_val[hw->y_end_stage - 1];
+ if (!sw->bottom_ep) {
+ if (percent_sensor_diff >= BOTTOM_END_POINT_DETECTION_LEVEL) {
+ sw->bottom_ep = 1;
+ sw->bottom_ep_val =
+ ad714x->sensor_val[hw->y_end_stage - 1];
+ }
+ } else {
+ if ((percent_sensor_diff < BOTTOM_END_POINT_DETECTION_LEVEL) &&
+ (ad714x->sensor_val[hw->y_end_stage - 1] >
+ TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->bottom_ep_val))
+ sw->bottom_ep = 0;
+ }
+
+ return sw->left_ep || sw->right_ep || sw->top_ep || sw->bottom_ep;
+}
+
+static void touchpad_use_com_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+
+ ad714x_use_com_int(ad714x, hw->x_start_stage, hw->x_end_stage);
+}
+
+static void touchpad_use_thr_int(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+
+ ad714x_use_thr_int(ad714x, hw->x_start_stage, hw->x_end_stage);
+ ad714x_use_thr_int(ad714x, hw->y_start_stage, hw->y_end_stage);
+}
+
+static void ad714x_touchpad_state_machine(struct ad714x_chip *ad714x, int idx)
+{
+ struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
+ struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx];
+ unsigned short h_state, c_state;
+ unsigned short mask;
+
+ mask = (((1 << (hw->x_end_stage + 1)) - 1) -
+ ((1 << hw->x_start_stage) - 1)) +
+ (((1 << (hw->y_end_stage + 1)) - 1) -
+ ((1 << hw->y_start_stage) - 1));
+
+ h_state = ad714x->h_state & mask;
+ c_state = ad714x->c_state & mask;
+
+ switch (sw->state) {
+ case IDLE:
+ if (h_state) {
+ sw->state = JITTER;
+ /* In End of Conversion interrupt mode, the AD714X
+ * continuously generates hardware interrupts.
+ */
+ touchpad_use_com_int(ad714x, idx);
+ dev_dbg(ad714x->dev, "touchpad %d touched\n", idx);
+ }
+ break;
+
+ case JITTER:
+ if (c_state == mask) {
+ touchpad_cal_sensor_val(ad714x, idx);
+ touchpad_cal_highest_stage(ad714x, idx);
+ if ((!touchpad_check_second_peak(ad714x, idx)) &&
+ (!touchpad_check_endpoint(ad714x, idx))) {
+ dev_dbg(ad714x->dev,
+ "touchpad%d, 2 fingers or endpoint\n",
+ idx);
+ touchpad_cal_abs_pos(ad714x, idx);
+ sw->x_flt_pos = sw->x_abs_pos;
+ sw->y_flt_pos = sw->y_abs_pos;
+ sw->state = ACTIVE;
+ }
+ }
+ break;
+
+ case ACTIVE:
+ if (c_state == mask) {
+ if (h_state) {
+ touchpad_cal_sensor_val(ad714x, idx);
+ touchpad_cal_highest_stage(ad714x, idx);
+ if ((!touchpad_check_second_peak(ad714x, idx))
+ && (!touchpad_check_endpoint(ad714x, idx))) {
+ touchpad_cal_abs_pos(ad714x, idx);
+ touchpad_cal_flt_pos(ad714x, idx);
+ input_report_abs(sw->input, ABS_X,
+ sw->x_flt_pos);
+ input_report_abs(sw->input, ABS_Y,
+ sw->y_flt_pos);
+ input_report_key(sw->input, BTN_TOUCH,
+ 1);
+ }
+ } else {
+ /* When the user lifts off the sensor, configure
+ * the AD714X back to threshold interrupt mode.
+ */
+ touchpad_use_thr_int(ad714x, idx);
+ sw->state = IDLE;
+ input_report_key(sw->input, BTN_TOUCH, 0);
+ dev_dbg(ad714x->dev, "touchpad %d released\n",
+ idx);
+ }
+ input_sync(sw->input);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int ad714x_hw_detect(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data);
+ switch (data & 0xFFF0) {
+ case AD7142_PARTID:
+ ad714x->product = 0x7142;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7142 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7143_PARTID:
+ ad714x->product = 0x7143;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7143 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7147_PARTID:
+ ad714x->product = 0x7147;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7147(A) captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ case AD7148_PARTID:
+ ad714x->product = 0x7148;
+ ad714x->version = data & 0xF;
+ dev_info(ad714x->dev, "found AD7148 captouch, rev:%d\n",
+ ad714x->version);
+ return 0;
+
+ default:
+ dev_err(ad714x->dev,
+ "fail to detect AD714X captouch, read ID is %04x\n",
+ data);
+ return -ENODEV;
+ }
+}
+
+static void ad714x_hw_init(struct ad714x_chip *ad714x)
+{
+ int i, j;
+ unsigned short reg_base;
+ unsigned short data;
+
+ /* configuration CDC and interrupts */
+
+ for (i = 0; i < STAGE_NUM; i++) {
+ reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;
+ for (j = 0; j < STAGE_CFGREG_NUM; j++)
+ ad714x->write(ad714x->dev, reg_base + j,
+ ad714x->hw->stage_cfg_reg[i][j]);
+ }
+
+ for (i = 0; i < SYS_CFGREG_NUM; i++)
+ ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i,
+ ad714x->hw->sys_cfg_reg[i]);
+ for (i = 0; i < SYS_CFGREG_NUM; i++)
+ ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i,
+ &data);
+
+ ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF);
+
+ /* clear all interrupts */
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+}
+
+static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
+{
+ struct ad714x_chip *ad714x = data;
+ int i;
+
+ mutex_lock(&ad714x->mutex);
+
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state);
+
+ for (i = 0; i < ad714x->hw->button_num; i++)
+ ad714x_button_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->slider_num; i++)
+ ad714x_slider_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->wheel_num; i++)
+ ad714x_wheel_state_machine(ad714x, i);
+ for (i = 0; i < ad714x->hw->touchpad_num; i++)
+ ad714x_touchpad_state_machine(ad714x, i);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return IRQ_HANDLED;
+}
+
+#define MAX_DEVICE_NUM 8
+struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
+ ad714x_read_t read, ad714x_write_t write)
+{
+ int i, alloc_idx;
+ int error;
+ struct input_dev *input[MAX_DEVICE_NUM];
+
+ struct ad714x_platform_data *plat_data = dev->platform_data;
+ struct ad714x_chip *ad714x;
+ void *drv_mem;
+
+ struct ad714x_button_drv *bt_drv;
+ struct ad714x_slider_drv *sd_drv;
+ struct ad714x_wheel_drv *wl_drv;
+ struct ad714x_touchpad_drv *tp_drv;
+
+
+ if (irq <= 0) {
+ dev_err(dev, "IRQ not configured!\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (dev->platform_data == NULL) {
+ dev_err(dev, "platform data for ad714x doesn't exist\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ ad714x = kzalloc(sizeof(*ad714x) + sizeof(*ad714x->sw) +
+ sizeof(*sd_drv) * plat_data->slider_num +
+ sizeof(*wl_drv) * plat_data->wheel_num +
+ sizeof(*tp_drv) * plat_data->touchpad_num +
+ sizeof(*bt_drv) * plat_data->button_num, GFP_KERNEL);
+ if (!ad714x) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+
+ ad714x->hw = plat_data;
+
+ drv_mem = ad714x + 1;
+ ad714x->sw = drv_mem;
+ drv_mem += sizeof(*ad714x->sw);
+ ad714x->sw->slider = sd_drv = drv_mem;
+ drv_mem += sizeof(*sd_drv) * ad714x->hw->slider_num;
+ ad714x->sw->wheel = wl_drv = drv_mem;
+ drv_mem += sizeof(*wl_drv) * ad714x->hw->wheel_num;
+ ad714x->sw->touchpad = tp_drv = drv_mem;
+ drv_mem += sizeof(*tp_drv) * ad714x->hw->touchpad_num;
+ ad714x->sw->button = bt_drv = drv_mem;
+ drv_mem += sizeof(*bt_drv) * ad714x->hw->button_num;
+
+ ad714x->read = read;
+ ad714x->write = write;
+ ad714x->irq = irq;
+ ad714x->dev = dev;
+
+ error = ad714x_hw_detect(ad714x);
+ if (error)
+ goto err_free_mem;
+
+ /* initilize and request sw/hw resources */
+
+ ad714x_hw_init(ad714x);
+ mutex_init(&ad714x->mutex);
+
+ /*
+ * Allocate and register AD714X input device
+ */
+ alloc_idx = 0;
+
+ /* a slider uses one input_dev instance */
+ if (ad714x->hw->slider_num > 0) {
+ struct ad714x_slider_plat *sd_plat = ad714x->hw->slider;
+
+ for (i = 0; i < ad714x->hw->slider_num; i++) {
+ sd_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(ABS_X, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_X, 0, sd_plat->max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* a wheel uses one input_dev instance */
+ if (ad714x->hw->wheel_num > 0) {
+ struct ad714x_wheel_plat *wl_plat = ad714x->hw->wheel;
+
+ for (i = 0; i < ad714x->hw->wheel_num; i++) {
+ wl_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(ABS_WHEEL, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_WHEEL, 0, wl_plat->max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* a touchpad uses one input_dev instance */
+ if (ad714x->hw->touchpad_num > 0) {
+ struct ad714x_touchpad_plat *tp_plat = ad714x->hw->touchpad;
+
+ for (i = 0; i < ad714x->hw->touchpad_num; i++) {
+ tp_drv[i].input = input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_ABS, input[alloc_idx]->evbit);
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ __set_bit(ABS_X, input[alloc_idx]->absbit);
+ __set_bit(ABS_Y, input[alloc_idx]->absbit);
+ __set_bit(BTN_TOUCH, input[alloc_idx]->keybit);
+ input_set_abs_params(input[alloc_idx],
+ ABS_X, 0, tp_plat->x_max_coord, 0, 0);
+ input_set_abs_params(input[alloc_idx],
+ ABS_Y, 0, tp_plat->y_max_coord, 0, 0);
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+ }
+
+ /* all buttons use one input node */
+ if (ad714x->hw->button_num > 0) {
+ struct ad714x_button_plat *bt_plat = ad714x->hw->button;
+
+ input[alloc_idx] = input_allocate_device();
+ if (!input[alloc_idx]) {
+ error = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ __set_bit(EV_KEY, input[alloc_idx]->evbit);
+ for (i = 0; i < ad714x->hw->button_num; i++) {
+ bt_drv[i].input = input[alloc_idx];
+ __set_bit(bt_plat[i].keycode, input[alloc_idx]->keybit);
+ }
+
+ input[alloc_idx]->id.bustype = bus_type;
+ input[alloc_idx]->id.product = ad714x->product;
+ input[alloc_idx]->id.version = ad714x->version;
+
+ error = input_register_device(input[alloc_idx]);
+ if (error)
+ goto err_free_dev;
+
+ alloc_idx++;
+ }
+
+ error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
+ IRQF_TRIGGER_FALLING, "ad714x_captouch", ad714x);
+ if (error) {
+ dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
+ goto err_unreg_dev;
+ }
+
+ return ad714x;
+
+ err_free_dev:
+ dev_err(dev, "failed to setup AD714x input device %i\n", alloc_idx);
+ input_free_device(input[alloc_idx]);
+ err_unreg_dev:
+ while (--alloc_idx >= 0)
+ input_unregister_device(input[alloc_idx]);
+ err_free_mem:
+ kfree(ad714x);
+ err_out:
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL(ad714x_probe);
+
+void ad714x_remove(struct ad714x_chip *ad714x)
+{
+ struct ad714x_platform_data *hw = ad714x->hw;
+ struct ad714x_driver_data *sw = ad714x->sw;
+ int i;
+
+ free_irq(ad714x->irq, ad714x);
+
+ /* unregister and free all input devices */
+
+ for (i = 0; i < hw->slider_num; i++)
+ input_unregister_device(sw->slider[i].input);
+
+ for (i = 0; i < hw->wheel_num; i++)
+ input_unregister_device(sw->wheel[i].input);
+
+ for (i = 0; i < hw->touchpad_num; i++)
+ input_unregister_device(sw->touchpad[i].input);
+
+ if (hw->button_num)
+ input_unregister_device(sw->button[0].input);
+
+ kfree(ad714x);
+}
+EXPORT_SYMBOL(ad714x_remove);
+
+#ifdef CONFIG_PM
+int ad714x_disable(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ dev_dbg(ad714x->dev, "%s enter\n", __func__);
+
+ mutex_lock(&ad714x->mutex);
+
+ data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3;
+ ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ad714x_disable);
+
+int ad714x_enable(struct ad714x_chip *ad714x)
+{
+ unsigned short data;
+
+ dev_dbg(ad714x->dev, "%s enter\n", __func__);
+
+ mutex_lock(&ad714x->mutex);
+
+ /* resume to non-shutdown mode */
+
+ ad714x->write(ad714x->dev, AD714X_PWR_CTRL,
+ ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);
+
+ /* make sure the interrupt output line is not low level after resume,
+ * otherwise we will get no chance to enter falling-edge irq again
+ */
+
+ ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
+ ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
+
+ mutex_unlock(&ad714x->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ad714x_enable);
+#endif
+
+MODULE_DESCRIPTION("Analog Devices AD714X Capacitance Touch Sensor Driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h
new file mode 100644
index 000000000000..45c54fb13f07
--- /dev/null
+++ b/drivers/input/misc/ad714x.h
@@ -0,0 +1,26 @@
+/*
+ * AD714X CapTouch Programmable Controller driver (bus interfaces)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _AD714X_H_
+#define _AD714X_H_
+
+#include <linux/types.h>
+
+struct device;
+struct ad714x_chip;
+
+typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *);
+typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short);
+
+int ad714x_disable(struct ad714x_chip *ad714x);
+int ad714x_enable(struct ad714x_chip *ad714x);
+struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
+ ad714x_read_t read, ad714x_write_t write);
+void ad714x_remove(struct ad714x_chip *ad714x);
+
+#endif
diff --git a/drivers/input/misc/ati_remote.c b/drivers/input/misc/ati_remote.c
index e8bbc619f6df..bce57129afba 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/input/misc/ati_remote.c
@@ -624,13 +624,13 @@ static void ati_remote_irq_in(struct urb *urb)
static int ati_remote_alloc_buffers(struct usb_device *udev,
struct ati_remote *ati_remote)
{
- ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
- &ati_remote->inbuf_dma);
+ ati_remote->inbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
+ &ati_remote->inbuf_dma);
if (!ati_remote->inbuf)
return -1;
- ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
- &ati_remote->outbuf_dma);
+ ati_remote->outbuf = usb_alloc_coherent(udev, DATA_BUFSIZE, GFP_ATOMIC,
+ &ati_remote->outbuf_dma);
if (!ati_remote->outbuf)
return -1;
@@ -653,10 +653,10 @@ static void ati_remote_free_buffers(struct ati_remote *ati_remote)
usb_free_urb(ati_remote->irq_urb);
usb_free_urb(ati_remote->out_urb);
- usb_buffer_free(ati_remote->udev, DATA_BUFSIZE,
+ usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->inbuf, ati_remote->inbuf_dma);
- usb_buffer_free(ati_remote->udev, DATA_BUFSIZE,
+ usb_free_coherent(ati_remote->udev, DATA_BUFSIZE,
ati_remote->outbuf, ati_remote->outbuf_dma);
}
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 2124b99378bb..e148749b5851 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -589,7 +589,7 @@ static int ati_remote2_urb_init(struct ati_remote2 *ar2)
int i, pipe, maxp;
for (i = 0; i < 2; i++) {
- ar2->buf[i] = usb_buffer_alloc(udev, 4, GFP_KERNEL, &ar2->buf_dma[i]);
+ ar2->buf[i] = usb_alloc_coherent(udev, 4, GFP_KERNEL, &ar2->buf_dma[i]);
if (!ar2->buf[i])
return -ENOMEM;
@@ -617,7 +617,7 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2)
for (i = 0; i < 2; i++) {
usb_free_urb(ar2->urb[i]);
- usb_buffer_free(ar2->udev, 4, ar2->buf[i], ar2->buf_dma[i]);
+ usb_free_coherent(ar2->udev, 4, ar2->buf[i], ar2->buf_dma[i]);
}
}
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 86457feccfc4..2b0eba6619bd 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -102,7 +102,6 @@ struct cm109_dev {
struct cm109_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
- dma_addr_t ctl_req_dma;
struct urb *urb_ctl;
/*
* The 3 bitfields below are protected by ctl_submit_lock.
@@ -629,15 +628,13 @@ static const struct usb_device_id cm109_usb_table[] = {
static void cm109_usb_cleanup(struct cm109_dev *dev)
{
- if (dev->ctl_req)
- usb_buffer_free(dev->udev, sizeof(*(dev->ctl_req)),
- dev->ctl_req, dev->ctl_req_dma);
+ kfree(dev->ctl_req);
if (dev->ctl_data)
- usb_buffer_free(dev->udev, USB_PKT_LEN,
- dev->ctl_data, dev->ctl_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN,
+ dev->ctl_data, dev->ctl_dma);
if (dev->irq_data)
- usb_buffer_free(dev->udev, USB_PKT_LEN,
- dev->irq_data, dev->irq_dma);
+ usb_free_coherent(dev->udev, USB_PKT_LEN,
+ dev->irq_data, dev->irq_dma);
usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */
usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */
@@ -686,18 +683,17 @@ static int cm109_usb_probe(struct usb_interface *intf,
goto err_out;
/* allocate usb buffers */
- dev->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_KERNEL, &dev->irq_dma);
+ dev->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_KERNEL, &dev->irq_dma);
if (!dev->irq_data)
goto err_out;
- dev->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_KERNEL, &dev->ctl_dma);
+ dev->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_KERNEL, &dev->ctl_dma);
if (!dev->ctl_data)
goto err_out;
- dev->ctl_req = usb_buffer_alloc(udev, sizeof(*(dev->ctl_req)),
- GFP_KERNEL, &dev->ctl_req_dma);
+ dev->ctl_req = kmalloc(sizeof(*(dev->ctl_req)), GFP_KERNEL);
if (!dev->ctl_req)
goto err_out;
@@ -735,10 +731,8 @@ static int cm109_usb_probe(struct usb_interface *intf,
usb_fill_control_urb(dev->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)dev->ctl_req, dev->ctl_data, USB_PKT_LEN,
cm109_urb_ctl_callback, dev);
- dev->urb_ctl->setup_dma = dev->ctl_req_dma;
dev->urb_ctl->transfer_dma = dev->ctl_dma;
- dev->urb_ctl->transfer_flags |= URB_NO_SETUP_DMA_MAP |
- URB_NO_TRANSFER_DMA_MAP;
+ dev->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev->urb_ctl->dev = udev;
/* find out the physical bus location */
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index ad730e15afc0..e00a1cc79c0a 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -43,6 +43,7 @@
#include <linux/proc_fs.h>
#include <linux/poll.h>
#include <linux/rtc.h>
+#include <linux/smp_lock.h>
#include <linux/semaphore.h>
MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>");
@@ -64,8 +65,8 @@ static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
@@ -512,7 +513,7 @@ static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off,
return len;
}
-static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
+static int hp_sdc_rtc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
#if 1
@@ -659,14 +660,27 @@ static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
#endif
}
+static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = hp_sdc_rtc_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+
static const struct file_operations hp_sdc_rtc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hp_sdc_rtc_read,
- .poll = hp_sdc_rtc_poll,
- .ioctl = hp_sdc_rtc_ioctl,
- .open = hp_sdc_rtc_open,
- .fasync = hp_sdc_rtc_fasync,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = hp_sdc_rtc_read,
+ .poll = hp_sdc_rtc_poll,
+ .unlocked_ioctl = hp_sdc_rtc_ioctl,
+ .open = hp_sdc_rtc_open,
+ .fasync = hp_sdc_rtc_fasync,
};
static struct miscdevice hp_sdc_rtc_dev = {
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 86afdd1fdf9d..a93c525475c6 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -464,7 +464,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
remote->in_endpoint = endpoint;
remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */
- remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
+ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
if (!remote->in_buffer) {
error = -ENOMEM;
goto fail1;
@@ -543,7 +543,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
return 0;
fail3: usb_free_urb(remote->irq_urb);
- fail2: usb_buffer_free(udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
+ fail2: usb_free_coherent(udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
fail1: kfree(remote);
input_free_device(input_dev);
@@ -564,7 +564,7 @@ static void keyspan_disconnect(struct usb_interface *interface)
input_unregister_device(remote->input);
usb_kill_urb(remote->irq_urb);
usb_free_urb(remote->irq_urb);
- usb_buffer_free(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
+ usb_free_coherent(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma);
kfree(remote);
}
}
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 000000000000..80af44608018
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
+/**
+ * max8925_onkey.c - MAX8925 ONKEY driver
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max8925.h>
+#include <linux/slab.h>
+
+#define HARDRESET_EN (1 << 7)
+#define PWREN_EN (1 << 7)
+
+struct max8925_onkey_info {
+ struct input_dev *idev;
+ struct i2c_client *i2c;
+ int irq;
+};
+
+/*
+ * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
+ * max8925_set_bits() operates I2C bus and may sleep. So implement
+ * it in thread IRQ handler.
+ */
+static irqreturn_t max8925_onkey_handler(int irq, void *data)
+{
+ struct max8925_onkey_info *info = data;
+
+ input_report_key(info->idev, KEY_POWER, 1);
+ input_sync(info->idev);
+
+ /* Enable hardreset to halt if system isn't shutdown on time */
+ max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
+ HARDRESET_EN, HARDRESET_EN);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit max8925_onkey_probe(struct platform_device *pdev)
+{
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max8925_onkey_info *info;
+ int error;
+
+ info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->i2c = chip->i2c;
+ info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
+
+ info->idev = input_allocate_device();
+ if (!info->idev) {
+ dev_err(chip->dev, "Failed to allocate input dev\n");
+ error = -ENOMEM;
+ goto out_input;
+ }
+
+ info->idev->name = "max8925_on";
+ info->idev->phys = "max8925_on/input0";
+ info->idev->id.bustype = BUS_I2C;
+ info->idev->dev.parent = &pdev->dev;
+ info->idev->evbit[0] = BIT_MASK(EV_KEY);
+ info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
+
+ error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
+ IRQF_ONESHOT, "onkey", info);
+ if (error < 0) {
+ dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+ info->irq, error);
+ goto out_irq;
+ }
+
+ error = input_register_device(info->idev);
+ if (error) {
+ dev_err(chip->dev, "Can't register input device: %d\n", error);
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+out:
+ free_irq(info->irq, info);
+out_irq:
+ input_free_device(info->idev);
+out_input:
+ kfree(info);
+ return error;
+}
+
+static int __devexit max8925_onkey_remove(struct platform_device *pdev)
+{
+ struct max8925_onkey_info *info = platform_get_drvdata(pdev);
+
+ free_irq(info->irq, info);
+ input_unregister_device(info->idev);
+ kfree(info);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver max8925_onkey_driver = {
+ .driver = {
+ .name = "max8925-onkey",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8925_onkey_probe,
+ .remove = __devexit_p(max8925_onkey_remove),
+};
+
+static int __init max8925_onkey_init(void)
+{
+ return platform_driver_register(&max8925_onkey_driver);
+}
+module_init(max8925_onkey_init);
+
+static void __exit max8925_onkey_exit(void)
+{
+ platform_driver_unregister(&max8925_onkey_driver);
+}
+module_exit(max8925_onkey_exit);
+
+MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
new file mode 100644
index 000000000000..5c3ac4e0b055
--- /dev/null
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for a keypad w/16 buttons connected to a PCF8574 I2C I/O expander
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#define DRV_NAME "pcf8574_keypad"
+
+static const unsigned char pcf8574_kp_btncode[] = {
+ [0] = KEY_RESERVED,
+ [1] = KEY_ENTER,
+ [2] = KEY_BACKSLASH,
+ [3] = KEY_0,
+ [4] = KEY_RIGHTBRACE,
+ [5] = KEY_C,
+ [6] = KEY_9,
+ [7] = KEY_8,
+ [8] = KEY_7,
+ [9] = KEY_B,
+ [10] = KEY_6,
+ [11] = KEY_5,
+ [12] = KEY_4,
+ [13] = KEY_A,
+ [14] = KEY_3,
+ [15] = KEY_2,
+ [16] = KEY_1
+};
+
+struct kp_data {
+ unsigned short btncode[ARRAY_SIZE(pcf8574_kp_btncode)];
+ struct input_dev *idev;
+ struct i2c_client *client;
+ char name[64];
+ char phys[32];
+ unsigned char laststate;
+};
+
+static short read_state(struct kp_data *lp)
+{
+ unsigned char x, y, a, b;
+
+ i2c_smbus_write_byte(lp->client, 240);
+ x = 0xF & (~(i2c_smbus_read_byte(lp->client) >> 4));
+
+ i2c_smbus_write_byte(lp->client, 15);
+ y = 0xF & (~i2c_smbus_read_byte(lp->client));
+
+ for (a = 0; x > 0; a++)
+ x = x >> 1;
+ for (b = 0; y > 0; b++)
+ y = y >> 1;
+
+ return ((a - 1) * 4) + b;
+}
+
+static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
+{
+ struct kp_data *lp = dev_id;
+ unsigned char nextstate = read_state(lp);
+
+ if (lp->laststate != nextstate) {
+ int key_down = nextstate <= ARRAY_SIZE(lp->btncode);
+ unsigned short keycode = key_down ?
+ lp->btncode[nextstate] : lp->btncode[lp->laststate];
+
+ input_report_key(lp->idev, keycode, key_down);
+ input_sync(lp->idev);
+
+ lp->laststate = nextstate;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int i, ret;
+ struct input_dev *idev;
+ struct kp_data *lp;
+
+ if (i2c_smbus_write_byte(client, 240) < 0) {
+ dev_err(&client->dev, "probe: write fail\n");
+ return -ENODEV;
+ }
+
+ lp = kzalloc(sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ idev = input_allocate_device();
+ if (!idev) {
+ dev_err(&client->dev, "Can't allocate input device\n");
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ lp->idev = idev;
+ lp->client = client;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY);
+ idev->keycode = lp->btncode;
+ idev->keycodesize = sizeof(lp->btncode[0]);
+ idev->keycodemax = ARRAY_SIZE(lp->btncode);
+
+ for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
+ lp->btncode[i] = pcf8574_kp_btncode[i];
+ __set_bit(lp->btncode[i] & KEY_MAX, idev->keybit);
+ }
+
+ sprintf(lp->name, DRV_NAME);
+ sprintf(lp->phys, "kp_data/input0");
+
+ idev->name = lp->name;
+ idev->phys = lp->phys;
+ idev->id.bustype = BUS_I2C;
+ idev->id.vendor = 0x0001;
+ idev->id.product = 0x0001;
+ idev->id.version = 0x0100;
+
+ input_set_drvdata(idev, lp);
+
+ ret = input_register_device(idev);
+ if (ret) {
+ dev_err(&client->dev, "input_register_device() failed\n");
+ goto fail_register;
+ }
+
+ lp->laststate = read_state(lp);
+
+ ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ DRV_NAME, lp);
+ if (ret) {
+ dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
+ goto fail_irq;
+ }
+
+ i2c_set_clientdata(client, lp);
+ return 0;
+
+ fail_irq:
+ input_unregister_device(idev);
+ fail_register:
+ input_set_drvdata(idev, NULL);
+ input_free_device(idev);
+ fail_allocate:
+ kfree(lp);
+
+ return ret;
+}
+
+static int __devexit pcf8574_kp_remove(struct i2c_client *client)
+{
+ struct kp_data *lp = i2c_get_clientdata(client);
+
+ free_irq(client->irq, lp);
+
+ input_unregister_device(lp->idev);
+ kfree(lp);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pcf8574_kp_resume(struct i2c_client *client)
+{
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static int pcf8574_kp_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ disable_irq(client->irq);
+
+ return 0;
+}
+#else
+# define pcf8574_kp_resume NULL
+# define pcf8574_kp_suspend NULL
+#endif
+
+static const struct i2c_device_id pcf8574_kp_id[] = {
+ { DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf8574_kp_id);
+
+static struct i2c_driver pcf8574_kp_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = pcf8574_kp_probe,
+ .remove = __devexit_p(pcf8574_kp_remove),
+ .suspend = pcf8574_kp_suspend,
+ .resume = pcf8574_kp_resume,
+ .id_table = pcf8574_kp_id,
+};
+
+static int __init pcf8574_kp_init(void)
+{
+ return i2c_add_driver(&pcf8574_kp_driver);
+}
+module_init(pcf8574_kp_init);
+
+static void __exit pcf8574_kp_exit(void)
+{
+ i2c_del_driver(&pcf8574_kp_driver);
+}
+module_exit(pcf8574_kp_exit);
+
+MODULE_AUTHOR("Michael Hennerich");
+MODULE_DESCRIPTION("Keypad input driver for 16 keys connected to PCF8574");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index ea4e1fd12651..f080dd31499b 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -30,7 +30,7 @@ MODULE_ALIAS("platform:pcspkr");
#include <asm/i8253.h>
#else
#include <asm/8253pit.h>
-static DEFINE_SPINLOCK(i8253_lock);
+static DEFINE_RAW_SPINLOCK(i8253_lock);
#endif
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -50,7 +50,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
if (value > 20 && value < 32767)
count = PIT_TICK_RATE / value;
- spin_lock_irqsave(&i8253_lock, flags);
+ raw_spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* set command for counter 2, 2 byte write */
@@ -65,7 +65,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
outb(inb_p(0x61) & 0xFC, 0x61);
}
- spin_unlock_irqrestore(&i8253_lock, flags);
+ raw_spin_unlock_irqrestore(&i8253_lock, flags);
return 0;
}
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 668913d12044..bf170f6b4422 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -64,7 +64,6 @@ struct powermate_device {
dma_addr_t data_dma;
struct urb *irq, *config;
struct usb_ctrlrequest *configcr;
- dma_addr_t configcr_dma;
struct usb_device *udev;
struct input_dev *input;
spinlock_t lock;
@@ -182,8 +181,6 @@ static void powermate_sync_state(struct powermate_device *pm)
usb_fill_control_urb(pm->config, pm->udev, usb_sndctrlpipe(pm->udev, 0),
(void *) pm->configcr, NULL, 0,
powermate_config_complete, pm);
- pm->config->setup_dma = pm->configcr_dma;
- pm->config->transfer_flags |= URB_NO_SETUP_DMA_MAP;
if (usb_submit_urb(pm->config, GFP_ATOMIC))
printk(KERN_ERR "powermate: usb_submit_urb(config) failed");
@@ -276,13 +273,12 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig
static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
{
- pm->data = usb_buffer_alloc(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- GFP_ATOMIC, &pm->data_dma);
+ pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
+ GFP_ATOMIC, &pm->data_dma);
if (!pm->data)
return -1;
- pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)),
- GFP_ATOMIC, &pm->configcr_dma);
+ pm->configcr = kmalloc(sizeof(*(pm->configcr)), GFP_KERNEL);
if (!pm->configcr)
return -1;
@@ -291,10 +287,9 @@ static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_dev
static void powermate_free_buffers(struct usb_device *udev, struct powermate_device *pm)
{
- usb_buffer_free(udev, POWERMATE_PAYLOAD_SIZE_MAX,
- pm->data, pm->data_dma);
- usb_buffer_free(udev, sizeof(*(pm->configcr)),
- pm->configcr, pm->configcr_dma);
+ usb_free_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
+ pm->data, pm->data_dma);
+ kfree(pm->configcr);
}
/* Called whenever a USB device matching one in our supported devices table is connected */
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 0d45422f8095..1dacae4b43f0 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -259,8 +259,11 @@ static const struct of_device_id bbc_beep_match[] = {
};
static struct of_platform_driver bbc_beep_driver = {
- .name = "bbcbeep",
- .match_table = bbc_beep_match,
+ .driver = {
+ .name = "bbcbeep",
+ .owner = THIS_MODULE,
+ .of_match_table = bbc_beep_match,
+ },
.probe = bbc_beep_probe,
.remove = __devexit_p(bbc_remove),
.shutdown = sparcspkr_shutdown,
@@ -338,8 +341,11 @@ static const struct of_device_id grover_beep_match[] = {
};
static struct of_platform_driver grover_beep_driver = {
- .name = "groverbeep",
- .match_table = grover_beep_match,
+ .driver = {
+ .name = "groverbeep",
+ .owner = THIS_MODULE,
+ .of_match_table = grover_beep_match,
+ },
.probe = grover_beep_probe,
.remove = __devexit_p(grover_remove),
.shutdown = sparcspkr_shutdown,
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac8e04a..4f9b2afc24e8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
- twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
+ twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
info->enabled = false;
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466076ad..b71eb55f2dbc 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
unsigned int cnt;
int retval = 0;
- for (cnt = 0; cnt < ABS_MAX + 1; cnt++) {
+ for (cnt = 0; cnt < ABS_CNT; cnt++) {
if (!test_bit(cnt, dev->absbit))
continue;
@@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
- size = sizeof(int) * (ABS_MAX + 1);
+ size = sizeof(int) * ABS_CNT;
memcpy(dev->absmax, user_dev->absmax, size);
memcpy(dev->absmin, user_dev->absmin, size);
memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 04d5a4a3181f..4dac8b79fcd4 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -983,11 +983,11 @@ static int __init copy_keymap(void)
for (key = keymap; key->type != KE_END; key++)
length++;
- new_keymap = kmalloc(length * sizeof(struct key_entry), GFP_KERNEL);
+ new_keymap = kmemdup(keymap, length * sizeof(struct key_entry),
+ GFP_KERNEL);
if (!new_keymap)
return -ENOMEM;
- memcpy(new_keymap, keymap, length * sizeof(struct key_entry));
keymap = new_keymap;
return 0;
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 93a22ac0f88c..41201c6b5e68 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -111,7 +111,6 @@ struct yealink_dev {
struct yld_ctl_packet *ctl_data;
dma_addr_t ctl_dma;
struct usb_ctrlrequest *ctl_req;
- dma_addr_t ctl_req_dma;
struct urb *urb_ctl;
char phys[64]; /* physical device path */
@@ -836,12 +835,9 @@ static int usb_cleanup(struct yealink_dev *yld, int err)
usb_free_urb(yld->urb_irq);
usb_free_urb(yld->urb_ctl);
- usb_buffer_free(yld->udev, sizeof(*(yld->ctl_req)),
- yld->ctl_req, yld->ctl_req_dma);
- usb_buffer_free(yld->udev, USB_PKT_LEN,
- yld->ctl_data, yld->ctl_dma);
- usb_buffer_free(yld->udev, USB_PKT_LEN,
- yld->irq_data, yld->irq_dma);
+ kfree(yld->ctl_req);
+ usb_free_coherent(yld->udev, USB_PKT_LEN, yld->ctl_data, yld->ctl_dma);
+ usb_free_coherent(yld->udev, USB_PKT_LEN, yld->irq_data, yld->irq_dma);
kfree(yld);
return err;
@@ -886,18 +882,17 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
return usb_cleanup(yld, -ENOMEM);
/* allocate usb buffers */
- yld->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->irq_dma);
+ yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_ATOMIC, &yld->irq_dma);
if (yld->irq_data == NULL)
return usb_cleanup(yld, -ENOMEM);
- yld->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
- GFP_ATOMIC, &yld->ctl_dma);
+ yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
+ GFP_ATOMIC, &yld->ctl_dma);
if (!yld->ctl_data)
return usb_cleanup(yld, -ENOMEM);
- yld->ctl_req = usb_buffer_alloc(udev, sizeof(*(yld->ctl_req)),
- GFP_ATOMIC, &yld->ctl_req_dma);
+ yld->ctl_req = kmalloc(sizeof(*(yld->ctl_req)), GFP_KERNEL);
if (yld->ctl_req == NULL)
return usb_cleanup(yld, -ENOMEM);
@@ -936,10 +931,8 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_fill_control_urb(yld->urb_ctl, udev, usb_sndctrlpipe(udev, 0),
(void *)yld->ctl_req, yld->ctl_data, USB_PKT_LEN,
urb_ctl_callback, yld);
- yld->urb_ctl->setup_dma = yld->ctl_req_dma;
yld->urb_ctl->transfer_dma = yld->ctl_dma;
- yld->urb_ctl->transfer_flags |= URB_NO_SETUP_DMA_MAP |
- URB_NO_TRANSFER_DMA_MAP;
+ yld->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
yld->urb_ctl->dev = udev;
/* find out the physical bus location */
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index c714ca2407f8..eeb58c1cac16 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86
+ select SERIO_I8042 if X86 && !X86_MRST
select SERIO_GSCPS2 if GSC
help
Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a185ac78a42c..ff5f61a0fd3a 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/setup.h>
@@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver");
MODULE_LICENSE("GPL");
static int amimouse_lastx, amimouse_lasty;
-static struct input_dev *amimouse_dev;
-static irqreturn_t amimouse_interrupt(int irq, void *dummy)
+static irqreturn_t amimouse_interrupt(int irq, void *data)
{
+ struct input_dev *dev = data;
unsigned short joy0dat, potgor;
int nx, ny, dx, dy;
@@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
potgor = amiga_custom.potgor;
- input_report_rel(amimouse_dev, REL_X, dx);
- input_report_rel(amimouse_dev, REL_Y, dy);
+ input_report_rel(dev, REL_X, dx);
+ input_report_rel(dev, REL_Y, dy);
- input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40);
- input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100);
- input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400);
+ input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40);
+ input_report_key(dev, BTN_MIDDLE, potgor & 0x0100);
+ input_report_key(dev, BTN_RIGHT, potgor & 0x0400);
- input_sync(amimouse_dev);
+ input_sync(dev);
return IRQ_HANDLED;
}
@@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
static int amimouse_open(struct input_dev *dev)
{
unsigned short joy0dat;
+ int error;
joy0dat = amiga_custom.joy0dat;
amimouse_lastx = joy0dat & 0xff;
amimouse_lasty = joy0dat >> 8;
- if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) {
- printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
- return -EBUSY;
- }
+ error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse",
+ dev);
+ if (error)
+ dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
- return 0;
+ return error;
}
static void amimouse_close(struct input_dev *dev)
{
- free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt);
+ free_irq(IRQ_AMIGA_VERTB, dev);
}
-static int __init amimouse_init(void)
+static int __init amimouse_probe(struct platform_device *pdev)
{
int err;
+ struct input_dev *dev;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE))
- return -ENODEV;
-
- amimouse_dev = input_allocate_device();
- if (!amimouse_dev)
+ dev = input_allocate_device();
+ if (!dev)
return -ENOMEM;
- amimouse_dev->name = "Amiga mouse";
- amimouse_dev->phys = "amimouse/input0";
- amimouse_dev->id.bustype = BUS_AMIGA;
- amimouse_dev->id.vendor = 0x0001;
- amimouse_dev->id.product = 0x0002;
- amimouse_dev->id.version = 0x0100;
+ dev->name = pdev->name;
+ dev->phys = "amimouse/input0";
+ dev->id.bustype = BUS_AMIGA;
+ dev->id.vendor = 0x0001;
+ dev->id.product = 0x0002;
+ dev->id.version = 0x0100;
- amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
- amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
- amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
+ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
+ dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
+ dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
- amimouse_dev->open = amimouse_open;
- amimouse_dev->close = amimouse_close;
+ dev->open = amimouse_open;
+ dev->close = amimouse_close;
+ dev->dev.parent = &pdev->dev;
- err = input_register_device(amimouse_dev);
+ err = input_register_device(dev);
if (err) {
- input_free_device(amimouse_dev);
+ input_free_device(dev);
return err;
}
+ platform_set_drvdata(pdev, dev);
+
return 0;
}
-static void __exit amimouse_exit(void)
+static int __exit amimouse_remove(struct platform_device *pdev)
{
- input_unregister_device(amimouse_dev);
+ struct input_dev *dev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ input_unregister_device(dev);
+ return 0;
+}
+
+static struct platform_driver amimouse_driver = {
+ .remove = __exit_p(amimouse_remove),
+ .driver = {
+ .name = "amiga-mouse",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amimouse_init(void)
+{
+ return platform_driver_probe(&amimouse_driver, amimouse_probe);
}
module_init(amimouse_init);
+
+static void __exit amimouse_exit(void)
+{
+ platform_driver_unregister(&amimouse_driver);
+}
+
module_exit(amimouse_exit);
+
+MODULE_ALIAS("platform:amiga-mouse");
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 53ec7ddd1826..05edd75abca0 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -806,8 +806,8 @@ static int atp_probe(struct usb_interface *iface,
if (!dev->urb)
goto err_free_devs;
- dev->data = usb_buffer_alloc(dev->udev, dev->info->datalen, GFP_KERNEL,
- &dev->urb->transfer_dma);
+ dev->data = usb_alloc_coherent(dev->udev, dev->info->datalen, GFP_KERNEL,
+ &dev->urb->transfer_dma);
if (!dev->data)
goto err_free_urb;
@@ -862,8 +862,8 @@ static int atp_probe(struct usb_interface *iface,
return 0;
err_free_buffer:
- usb_buffer_free(dev->udev, dev->info->datalen,
- dev->data, dev->urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->info->datalen,
+ dev->data, dev->urb->transfer_dma);
err_free_urb:
usb_free_urb(dev->urb);
err_free_devs:
@@ -881,8 +881,8 @@ static void atp_disconnect(struct usb_interface *iface)
if (dev) {
usb_kill_urb(dev->urb);
input_unregister_device(dev->input);
- usb_buffer_free(dev->udev, dev->info->datalen,
- dev->data, dev->urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->info->datalen,
+ dev->data, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
kfree(dev);
}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b89879bd860f..6dedded27222 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -715,15 +715,15 @@ static int bcm5974_probe(struct usb_interface *iface,
if (!dev->tp_urb)
goto err_free_bt_urb;
- dev->bt_data = usb_buffer_alloc(dev->udev,
- dev->cfg.bt_datalen, GFP_KERNEL,
- &dev->bt_urb->transfer_dma);
+ dev->bt_data = usb_alloc_coherent(dev->udev,
+ dev->cfg.bt_datalen, GFP_KERNEL,
+ &dev->bt_urb->transfer_dma);
if (!dev->bt_data)
goto err_free_urb;
- dev->tp_data = usb_buffer_alloc(dev->udev,
- dev->cfg.tp_datalen, GFP_KERNEL,
- &dev->tp_urb->transfer_dma);
+ dev->tp_data = usb_alloc_coherent(dev->udev,
+ dev->cfg.tp_datalen, GFP_KERNEL,
+ &dev->tp_urb->transfer_dma);
if (!dev->tp_data)
goto err_free_bt_buffer;
@@ -765,10 +765,10 @@ static int bcm5974_probe(struct usb_interface *iface,
return 0;
err_free_buffer:
- usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
+ usb_free_coherent(dev->udev, dev->cfg.tp_datalen,
dev->tp_data, dev->tp_urb->transfer_dma);
err_free_bt_buffer:
- usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
+ usb_free_coherent(dev->udev, dev->cfg.bt_datalen,
dev->bt_data, dev->bt_urb->transfer_dma);
err_free_urb:
usb_free_urb(dev->tp_urb);
@@ -788,10 +788,10 @@ static void bcm5974_disconnect(struct usb_interface *iface)
usb_set_intfdata(iface, NULL);
input_unregister_device(dev->input);
- usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
- dev->tp_data, dev->tp_urb->transfer_dma);
- usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
- dev->bt_data, dev->bt_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->cfg.tp_datalen,
+ dev->tp_data, dev->tp_urb->transfer_dma);
+ usb_free_coherent(dev->udev, dev->cfg.bt_datalen,
+ dev->bt_data, dev->bt_urb->transfer_dma);
usb_free_urb(dev->tp_urb);
usb_free_urb(dev->bt_urb);
kfree(dev);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 0520c2e19927..b18862b2a70e 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -10,6 +10,8 @@
* Trademarks are the property of their respective owners.
*/
+#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -19,10 +21,10 @@
#include "psmouse.h"
#include "elantech.h"
-#define elantech_debug(format, arg...) \
- do { \
- if (etd->debug) \
- printk(KERN_DEBUG format, ##arg); \
+#define elantech_debug(fmt, ...) \
+ do { \
+ if (etd->debug) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
static bool force_elantech;
@@ -37,7 +39,7 @@ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
{
if (psmouse_sliced_command(psmouse, c) ||
ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- pr_err("elantech.c: synaptics_send_cmd query 0x%02x failed.\n", c);
+ pr_err("synaptics_send_cmd query 0x%02x failed.\n", c);
return -1;
}
@@ -60,13 +62,13 @@ static int elantech_ps2_command(struct psmouse *psmouse,
if (rc == 0)
break;
tries--;
- elantech_debug("elantech.c: retrying ps2 command 0x%02x (%d).\n",
- command, tries);
+ elantech_debug("retrying ps2 command 0x%02x (%d).\n",
+ command, tries);
msleep(ETP_PS2_COMMAND_DELAY);
} while (tries > 0);
if (rc)
- pr_err("elantech.c: ps2 command 0x%02x failed.\n", command);
+ pr_err("ps2 command 0x%02x failed.\n", command);
return rc;
}
@@ -108,7 +110,7 @@ static int elantech_read_reg(struct psmouse *psmouse, unsigned char reg,
}
if (rc)
- pr_err("elantech.c: failed to read register 0x%02x.\n", reg);
+ pr_err("failed to read register 0x%02x.\n", reg);
else
*val = param[0];
@@ -154,7 +156,7 @@ static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
}
if (rc)
- pr_err("elantech.c: failed to write register 0x%02x with value 0x%02x.\n",
+ pr_err("failed to write register 0x%02x with value 0x%02x.\n",
reg, val);
return rc;
@@ -167,7 +169,7 @@ static void elantech_packet_dump(unsigned char *packet, int size)
{
int i;
- printk(KERN_DEBUG "elantech.c: PS/2 packet [");
+ printk(KERN_DEBUG pr_fmt("PS/2 packet ["));
for (i = 0; i < size; i++)
printk("%s0x%02x ", (i) ? ", " : " ", packet[i]);
printk("]\n");
@@ -185,7 +187,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
int fingers;
static int old_fingers;
- if (etd->fw_version_maj == 0x01) {
+ if (etd->fw_version < 0x020000) {
/*
* byte 0: D U p1 p2 1 p3 R L
* byte 1: f 0 th tw x9 x8 y9 y8
@@ -203,7 +205,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
if (etd->jumpy_cursor) {
/* Discard packets that are likely to have bogus coordinates */
if (fingers > old_fingers) {
- elantech_debug("elantech.c: discarding packet\n");
+ elantech_debug("discarding packet\n");
goto discard_packet_v1;
}
}
@@ -227,7 +229,7 @@ static void elantech_report_absolute_v1(struct psmouse *psmouse)
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
- if ((etd->fw_version_maj == 0x01) &&
+ if (etd->fw_version < 0x020000 &&
(etd->capabilities & ETP_CAP_HAS_ROCKER)) {
/* rocker up */
input_report_key(dev, BTN_FORWARD, packet[0] & 0x40);
@@ -321,7 +323,7 @@ static int elantech_check_parity_v1(struct psmouse *psmouse)
unsigned char p1, p2, p3;
/* Parity bits are placed differently */
- if (etd->fw_version_maj == 0x01) {
+ if (etd->fw_version < 0x020000) {
/* byte 0: D U p1 p2 1 p3 R L */
p1 = (packet[0] & 0x20) >> 5;
p2 = (packet[0] & 0x10) >> 4;
@@ -413,23 +415,21 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
if (rc == 0)
break;
tries--;
- elantech_debug("elantech.c: retrying read (%d).\n",
- tries);
+ elantech_debug("retrying read (%d).\n", tries);
msleep(ETP_READ_BACK_DELAY);
} while (tries > 0);
if (rc) {
- pr_err("elantech.c: failed to read back register 0x10.\n");
+ pr_err("failed to read back register 0x10.\n");
} else if (etd->hw_version == 1 &&
!(val & ETP_R10_ABSOLUTE_MODE)) {
- pr_err("elantech.c: touchpad refuses "
- "to switch to absolute mode.\n");
+ pr_err("touchpad refuses to switch to absolute mode.\n");
rc = -1;
}
}
if (rc)
- pr_err("elantech.c: failed to initialise registers.\n");
+ pr_err("failed to initialise registers.\n");
return rc;
}
@@ -457,7 +457,7 @@ static void elantech_set_input_params(struct psmouse *psmouse)
switch (etd->hw_version) {
case 1:
/* Rocker button */
- if ((etd->fw_version_maj == 0x01) &&
+ if (etd->fw_version < 0x020000 &&
(etd->capabilities & ETP_CAP_HAS_ROCKER)) {
__set_bit(BTN_FORWARD, dev->keybit);
__set_bit(BTN_BACK, dev->keybit);
@@ -575,6 +575,24 @@ static struct attribute_group elantech_attr_group = {
.attrs = elantech_attrs,
};
+static bool elantech_is_signature_valid(const unsigned char *param)
+{
+ static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10 };
+ int i;
+
+ if (param[0] == 0)
+ return false;
+
+ if (param[1] == 0)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(rates); i++)
+ if (param[2] == rates[i])
+ return false;
+
+ return true;
+}
+
/*
* Use magic knock to detect Elantech touchpad
*/
@@ -590,7 +608,7 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- pr_debug("elantech.c: sending Elantech magic knock failed.\n");
+ pr_debug("sending Elantech magic knock failed.\n");
return -1;
}
@@ -599,8 +617,7 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
* set of magic numbers
*/
if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {
- pr_debug("elantech.c: "
- "unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
+ pr_debug("unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
param[0], param[1], param[2]);
return -1;
}
@@ -611,20 +628,20 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties)
* to Elantech magic knock and there might be more.
*/
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
- pr_debug("elantech.c: failed to query firmware version.\n");
+ pr_debug("failed to query firmware version.\n");
return -1;
}
- pr_debug("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
+ pr_debug("Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
param[0], param[1], param[2]);
- if (param[0] == 0 || param[1] != 0) {
+ if (!elantech_is_signature_valid(param)) {
if (!force_elantech) {
- pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");
+ pr_debug("Probably not a real Elantech touchpad. Aborting.\n");
return -1;
}
- pr_debug("elantech.c: Probably not a real Elantech touchpad. Enabling anyway due to force_elantech.\n");
+ pr_debug("Probably not a real Elantech touchpad. Enabling anyway due to force_elantech.\n");
}
if (set_properties) {
@@ -655,7 +672,7 @@ static int elantech_reconnect(struct psmouse *psmouse)
return -1;
if (elantech_set_absolute_mode(psmouse)) {
- pr_err("elantech.c: failed to put touchpad back into absolute mode.\n");
+ pr_err("failed to put touchpad back into absolute mode.\n");
return -1;
}
@@ -683,18 +700,17 @@ int elantech_init(struct psmouse *psmouse)
* Do the version query again so we can store the result
*/
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
- pr_err("elantech.c: failed to query firmware version.\n");
+ pr_err("failed to query firmware version.\n");
goto init_fail;
}
- etd->fw_version_maj = param[0];
- etd->fw_version_min = param[2];
+
+ etd->fw_version = (param[0] << 16) | (param[1] << 8) | param[2];
/*
* Assume every version greater than this is new EeePC style
* hardware with 6 byte packets
*/
- if ((etd->fw_version_maj == 0x02 && etd->fw_version_min >= 0x30) ||
- etd->fw_version_maj > 0x02) {
+ if (etd->fw_version >= 0x020030) {
etd->hw_version = 2;
/* For now show extra debug information */
etd->debug = 1;
@@ -704,14 +720,15 @@ int elantech_init(struct psmouse *psmouse)
etd->hw_version = 1;
etd->paritycheck = 1;
}
- pr_info("elantech.c: assuming hardware version %d, firmware version %d.%d\n",
- etd->hw_version, etd->fw_version_maj, etd->fw_version_min);
+
+ pr_info("assuming hardware version %d, firmware version %d.%d.%d\n",
+ etd->hw_version, param[0], param[1], param[2]);
if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY, param)) {
- pr_err("elantech.c: failed to query capabilities.\n");
+ pr_err("failed to query capabilities.\n");
goto init_fail;
}
- pr_info("elantech.c: Synaptics capabilities query result 0x%02x, 0x%02x, 0x%02x.\n",
+ pr_info("Synaptics capabilities query result 0x%02x, 0x%02x, 0x%02x.\n",
param[0], param[1], param[2]);
etd->capabilities = param[0];
@@ -720,14 +737,13 @@ int elantech_init(struct psmouse *psmouse)
* a touch action starts causing the mouse cursor or scrolled page
* to jump. Enable a workaround.
*/
- if (etd->fw_version_maj == 0x02 && etd->fw_version_min == 0x22) {
- pr_info("elantech.c: firmware version 2.34 detected, "
- "enabling jumpy cursor workaround\n");
+ if (etd->fw_version == 0x020022) {
+ pr_info("firmware version 2.0.34 detected, enabling jumpy cursor workaround\n");
etd->jumpy_cursor = 1;
}
if (elantech_set_absolute_mode(psmouse)) {
- pr_err("elantech.c: failed to put touchpad into absolute mode.\n");
+ pr_err("failed to put touchpad into absolute mode.\n");
goto init_fail;
}
@@ -736,8 +752,7 @@ int elantech_init(struct psmouse *psmouse)
error = sysfs_create_group(&psmouse->ps2dev.serio->dev.kobj,
&elantech_attr_group);
if (error) {
- pr_err("elantech.c: failed to create sysfs attributes, error: %d.\n",
- error);
+ pr_err("failed to create sysfs attributes, error: %d.\n", error);
goto init_fail;
}
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index feac5f7af966..ac57bde1bb9f 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -100,11 +100,10 @@ struct elantech_data {
unsigned char reg_26;
unsigned char debug;
unsigned char capabilities;
- unsigned char fw_version_maj;
- unsigned char fw_version_min;
- unsigned char hw_version;
unsigned char paritycheck;
unsigned char jumpy_cursor;
+ unsigned char hw_version;
+ unsigned int fw_version;
unsigned char parity[256];
};
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 08d66d820d2b..1d2205b24800 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -40,8 +40,8 @@
#include "psmouse.h"
#include "hgpk.h"
-static int tpdebug;
-module_param(tpdebug, int, 0644);
+static bool tpdebug;
+module_param(tpdebug, bool, 0644);
MODULE_PARM_DESC(tpdebug, "enable debugging, dumping packets to KERN_DEBUG.");
static int recalib_delta = 100;
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index 543c240a85f2..c9983aee9082 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -56,36 +56,36 @@ static psmouse_ret_t ps2pp_process_byte(struct psmouse *psmouse)
/* Logitech extended packet */
switch ((packet[1] >> 4) | (packet[0] & 0x30)) {
- case 0x0d: /* Mouse extra info */
+ case 0x0d: /* Mouse extra info */
- input_report_rel(dev, packet[2] & 0x80 ? REL_HWHEEL : REL_WHEEL,
- (int) (packet[2] & 8) - (int) (packet[2] & 7));
- input_report_key(dev, BTN_SIDE, (packet[2] >> 4) & 1);
- input_report_key(dev, BTN_EXTRA, (packet[2] >> 5) & 1);
+ input_report_rel(dev, packet[2] & 0x80 ? REL_HWHEEL : REL_WHEEL,
+ (int) (packet[2] & 8) - (int) (packet[2] & 7));
+ input_report_key(dev, BTN_SIDE, (packet[2] >> 4) & 1);
+ input_report_key(dev, BTN_EXTRA, (packet[2] >> 5) & 1);
- break;
+ break;
- case 0x0e: /* buttons 4, 5, 6, 7, 8, 9, 10 info */
+ case 0x0e: /* buttons 4, 5, 6, 7, 8, 9, 10 info */
- input_report_key(dev, BTN_SIDE, (packet[2]) & 1);
- input_report_key(dev, BTN_EXTRA, (packet[2] >> 1) & 1);
- input_report_key(dev, BTN_BACK, (packet[2] >> 3) & 1);
- input_report_key(dev, BTN_FORWARD, (packet[2] >> 4) & 1);
- input_report_key(dev, BTN_TASK, (packet[2] >> 2) & 1);
+ input_report_key(dev, BTN_SIDE, (packet[2]) & 1);
+ input_report_key(dev, BTN_EXTRA, (packet[2] >> 1) & 1);
+ input_report_key(dev, BTN_BACK, (packet[2] >> 3) & 1);
+ input_report_key(dev, BTN_FORWARD, (packet[2] >> 4) & 1);
+ input_report_key(dev, BTN_TASK, (packet[2] >> 2) & 1);
- break;
+ break;
- case 0x0f: /* TouchPad extra info */
+ case 0x0f: /* TouchPad extra info */
- input_report_rel(dev, packet[2] & 0x08 ? REL_HWHEEL : REL_WHEEL,
- (int) ((packet[2] >> 4) & 8) - (int) ((packet[2] >> 4) & 7));
- packet[0] = packet[2] | 0x08;
- break;
+ input_report_rel(dev, packet[2] & 0x08 ? REL_HWHEEL : REL_WHEEL,
+ (int) ((packet[2] >> 4) & 8) - (int) ((packet[2] >> 4) & 7));
+ packet[0] = packet[2] | 0x08;
+ break;
#ifdef DEBUG
- default:
- printk(KERN_WARNING "psmouse.c: Received PS2++ packet #%x, but don't know how to handle.\n",
- (packet[1] >> 4) | (packet[0] & 0x30));
+ default:
+ printk(KERN_WARNING "psmouse.c: Received PS2++ packet #%x, but don't know how to handle.\n",
+ (packet[1] >> 4) | (packet[0] & 0x30));
#endif
}
} else {
@@ -250,7 +250,6 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
if (model == ps2pp_list[i].model)
return &ps2pp_list[i];
- printk(KERN_WARNING "logips2pp: Detected unknown logitech mouse model %d\n", model);
return NULL;
}
@@ -285,31 +284,32 @@ static void ps2pp_set_model_properties(struct psmouse *psmouse,
__set_bit(REL_HWHEEL, input_dev->relbit);
switch (model_info->kind) {
- case PS2PP_KIND_WHEEL:
- psmouse->name = "Wheel Mouse";
- break;
-
- case PS2PP_KIND_MX:
- psmouse->name = "MX Mouse";
- break;
- case PS2PP_KIND_TP3:
- psmouse->name = "TouchPad 3";
- break;
-
- case PS2PP_KIND_TRACKMAN:
- psmouse->name = "TrackMan";
- break;
-
- default:
- /*
- * Set name to "Mouse" only when using PS2++,
- * otherwise let other protocols define suitable
- * name
- */
- if (using_ps2pp)
- psmouse->name = "Mouse";
- break;
+ case PS2PP_KIND_WHEEL:
+ psmouse->name = "Wheel Mouse";
+ break;
+
+ case PS2PP_KIND_MX:
+ psmouse->name = "MX Mouse";
+ break;
+
+ case PS2PP_KIND_TP3:
+ psmouse->name = "TouchPad 3";
+ break;
+
+ case PS2PP_KIND_TRACKMAN:
+ psmouse->name = "TrackMan";
+ break;
+
+ default:
+ /*
+ * Set name to "Mouse" only when using PS2++,
+ * otherwise let other protocols define suitable
+ * name
+ */
+ if (using_ps2pp)
+ psmouse->name = "Mouse";
+ break;
}
}
@@ -343,7 +343,8 @@ int ps2pp_init(struct psmouse *psmouse, bool set_properties)
if (!model || !buttons)
return -1;
- if ((model_info = get_model_info(model)) != NULL) {
+ model_info = get_model_info(model);
+ if (model_info) {
/*
* Do Logitech PS2++ / PS2T++ magic init.
@@ -379,6 +380,9 @@ int ps2pp_init(struct psmouse *psmouse, bool set_properties)
use_ps2pp = true;
}
}
+
+ } else {
+ printk(KERN_WARNING "logips2pp: Detected unknown logitech mouse model %d\n", model);
}
if (set_properties) {
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index cbc807264940..979c50215282 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -147,18 +147,18 @@ static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
if (psmouse->type == PSMOUSE_IMEX) {
switch (packet[3] & 0xC0) {
- case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
- input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
- break;
- case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
- input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
- break;
- case 0x00:
- case 0xC0:
- input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
- input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
- input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
- break;
+ case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
+ input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
+ break;
+ case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
+ input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
+ break;
+ case 0x00:
+ case 0xC0:
+ input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
+ input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
+ input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
+ break;
}
}
@@ -247,31 +247,31 @@ static int psmouse_handle_byte(struct psmouse *psmouse)
psmouse_ret_t rc = psmouse->protocol_handler(psmouse);
switch (rc) {
- case PSMOUSE_BAD_DATA:
- if (psmouse->state == PSMOUSE_ACTIVATED) {
- printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
- psmouse->name, psmouse->phys, psmouse->pktcnt);
- if (++psmouse->out_of_sync_cnt == psmouse->resetafter) {
- __psmouse_set_state(psmouse, PSMOUSE_IGNORE);
- printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
- serio_reconnect(psmouse->ps2dev.serio);
- return -1;
- }
+ case PSMOUSE_BAD_DATA:
+ if (psmouse->state == PSMOUSE_ACTIVATED) {
+ printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
+ psmouse->name, psmouse->phys, psmouse->pktcnt);
+ if (++psmouse->out_of_sync_cnt == psmouse->resetafter) {
+ __psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+ printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
+ serio_reconnect(psmouse->ps2dev.serio);
+ return -1;
}
- psmouse->pktcnt = 0;
- break;
-
- case PSMOUSE_FULL_PACKET:
- psmouse->pktcnt = 0;
- if (psmouse->out_of_sync_cnt) {
- psmouse->out_of_sync_cnt = 0;
- printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
- psmouse->name, psmouse->phys);
- }
- break;
+ }
+ psmouse->pktcnt = 0;
+ break;
+
+ case PSMOUSE_FULL_PACKET:
+ psmouse->pktcnt = 0;
+ if (psmouse->out_of_sync_cnt) {
+ psmouse->out_of_sync_cnt = 0;
+ printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
+ psmouse->name, psmouse->phys);
+ }
+ break;
- case PSMOUSE_GOOD_DATA:
- break;
+ case PSMOUSE_GOOD_DATA:
+ break;
}
return 0;
}
@@ -1245,7 +1245,7 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
psmouse->pktsize = 3;
if (proto && (proto->detect || proto->init)) {
- if (proto->detect && proto->detect(psmouse, 1) < 0)
+ if (proto->detect && proto->detect(psmouse, true) < 0)
return -1;
if (proto->init && proto->init(psmouse) < 0)
@@ -1394,6 +1394,7 @@ static int psmouse_reconnect(struct serio *serio)
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
struct serio_driver *drv = serio->drv;
+ unsigned char type;
int rc = -1;
if (!drv || !psmouse) {
@@ -1413,10 +1414,15 @@ static int psmouse_reconnect(struct serio *serio)
if (psmouse->reconnect) {
if (psmouse->reconnect(psmouse))
goto out;
- } else if (psmouse_probe(psmouse) < 0 ||
- psmouse->type != psmouse_extensions(psmouse,
- psmouse_max_proto, false)) {
- goto out;
+ } else {
+ psmouse_reset(psmouse);
+
+ if (psmouse_probe(psmouse) < 0)
+ goto out;
+
+ type = psmouse_extensions(psmouse, psmouse_max_proto, false);
+ if (psmouse->type != type)
+ goto out;
}
/* ok, the device type (and capabilities) match the old one,
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ebd7a99efeae..40cea334ad13 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -36,6 +36,8 @@
* The x/y limits are taken from the Synaptics TouchPad interfacing Guide,
* section 2.3.2, which says that they should be valid regardless of the
* actual size of the sensor.
+ * Note that newer firmware allows querying device for maximum useable
+ * coordinates.
*/
#define XMIN_NOMINAL 1472
#define XMAX_NOMINAL 5472
@@ -194,23 +196,33 @@ static int synaptics_identify(struct psmouse *psmouse)
}
/*
- * Read touchpad resolution
+ * Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char res[3];
+ unsigned char max[3];
if (SYN_ID_MAJOR(priv->identity) < 4)
- return 0;
- if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res))
- return 0;
+ if (synaptics_send_cmd(psmouse, SYN_QUE_RESOLUTION, res) == 0) {
+ if (res[0] != 0 && (res[1] & 0x80) && res[2] != 0) {
+ priv->x_res = res[0]; /* x resolution in units/mm */
+ priv->y_res = res[2]; /* y resolution in units/mm */
+ }
+ }
- if ((res[0] != 0) && (res[1] & 0x80) && (res[2] != 0)) {
- priv->x_res = res[0]; /* x resolution in units/mm */
- priv->y_res = res[2]; /* y resolution in units/mm */
+ if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
+ SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
+ if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_DIMENSIONS, max)) {
+ printk(KERN_ERR "Synaptics claims to have dimensions query,"
+ " but I'm not able to read it.\n");
+ } else {
+ priv->x_max = (max[0] << 5) | ((max[1] & 0x0f) << 1);
+ priv->y_max = (max[2] << 5) | ((max[1] & 0xf0) >> 3);
+ }
}
return 0;
@@ -520,19 +532,20 @@ static int synaptics_validate_byte(unsigned char packet[], int idx, unsigned cha
return 0;
switch (pkt_type) {
- case SYN_NEWABS:
- case SYN_NEWABS_RELAXED:
- return (packet[idx] & newabs_rel_mask[idx]) == newabs_rslt[idx];
- case SYN_NEWABS_STRICT:
- return (packet[idx] & newabs_mask[idx]) == newabs_rslt[idx];
+ case SYN_NEWABS:
+ case SYN_NEWABS_RELAXED:
+ return (packet[idx] & newabs_rel_mask[idx]) == newabs_rslt[idx];
- case SYN_OLDABS:
- return (packet[idx] & oldabs_mask[idx]) == oldabs_rslt[idx];
+ case SYN_NEWABS_STRICT:
+ return (packet[idx] & newabs_mask[idx]) == newabs_rslt[idx];
- default:
- printk(KERN_ERR "synaptics: unknown packet type %d\n", pkt_type);
- return 0;
+ case SYN_OLDABS:
+ return (packet[idx] & oldabs_mask[idx]) == oldabs_rslt[idx];
+
+ default:
+ printk(KERN_ERR "synaptics: unknown packet type %d\n", pkt_type);
+ return 0;
}
}
@@ -578,8 +591,10 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
int i;
__set_bit(EV_ABS, dev->evbit);
- input_set_abs_params(dev, ABS_X, XMIN_NOMINAL, XMAX_NOMINAL, 0, 0);
- input_set_abs_params(dev, ABS_Y, YMIN_NOMINAL, YMAX_NOMINAL, 0, 0);
+ input_set_abs_params(dev, ABS_X,
+ XMIN_NOMINAL, priv->x_max ?: XMAX_NOMINAL, 0, 0);
+ input_set_abs_params(dev, ABS_Y,
+ YMIN_NOMINAL, priv->y_max ?: YMAX_NOMINAL, 0, 0);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
__set_bit(ABS_TOOL_WIDTH, dev->absbit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index ae37c5d162a4..7d4d5e12c0df 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -19,6 +19,7 @@
#define SYN_QUE_RESOLUTION 0x08
#define SYN_QUE_EXT_CAPAB 0x09
#define SYN_QUE_EXT_CAPAB_0C 0x0c
+#define SYN_QUE_EXT_DIMENSIONS 0x0d
/* synatics modes */
#define SYN_BIT_ABSOLUTE_MODE (1 << 7)
@@ -51,6 +52,7 @@
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100)
+#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
/* synaptics modes query bits */
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
@@ -101,8 +103,8 @@ struct synaptics_data {
unsigned long int ext_cap; /* Extended Capabilities */
unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
unsigned long int identity; /* Identification */
- int x_res; /* X resolution in units/mm */
- int y_res; /* Y resolution in units/mm */
+ unsigned int x_res, y_res; /* X/Y resolution in units/mm */
+ unsigned int x_max, y_max; /* Max dimensions (from FW) */
unsigned char pkt_type; /* packet type - old, new, etc */
unsigned char mode; /* current mode byte */
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 7e319d65ec57..f34f1dbeb577 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -209,4 +209,20 @@ config SERIO_ALTERA_PS2
To compile this driver as a module, choose M here: the
module will be called altera_ps2.
+config SERIO_AMS_DELTA
+ tristate "Amstrad Delta (E3) mailboard support"
+ depends on MACH_AMS_DELTA
+ default y
+ select AMS_DELTA_FIQ
+ ---help---
+ Say Y here if you have an E3 and want to use its mailboard,
+ or any standard AT keyboard connected to the mailboard port.
+
+ When used for the E3 mailboard, a non-standard key table
+ must be loaded from userspace, possibly using udev extras
+ provided keymap helper utility.
+
+ To compile this driver as a module, choose M here;
+ the module will be called ams_delta_serio.
+
endif
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index bf945f789d05..84c80bf7185e 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_SERIO_PCIPS2) += pcips2.o
obj-$(CONFIG_SERIO_MACEPS2) += maceps2.o
obj-$(CONFIG_SERIO_LIBPS2) += libps2.o
obj-$(CONFIG_SERIO_RAW) += serio_raw.o
+obj-$(CONFIG_SERIO_AMS_DELTA) += ams_delta_serio.o
obj-$(CONFIG_SERIO_XILINX_XPS_PS2) += xilinx_ps2.o
obj-$(CONFIG_SERIO_ALTERA_PS2) += altera_ps2.o
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
new file mode 100644
index 000000000000..8f1770e1e08b
--- /dev/null
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -0,0 +1,177 @@
+/*
+ * Amstrad E3 (Delta) keyboard port driver
+ *
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Thanks to Cliff Lawson for his help
+ *
+ * The Amstrad Delta keyboard (aka mailboard) uses normal PC-AT style serial
+ * transmission. The keyboard port is formed of two GPIO lines, for clock
+ * and data. Due to strict timing requirements of the interface,
+ * the serial data stream is read and processed by a FIQ handler.
+ * The resulting words are fetched by this driver from a circular buffer.
+ *
+ * Standard AT keyboard driver (atkbd) is used for handling the keyboard data.
+ * However, when used with the E3 mailboard that producecs non-standard
+ * scancodes, a custom key table must be prepared and loaded from userspace.
+ */
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include <asm/mach-types.h>
+#include <plat/board-ams-delta.h>
+
+#include <mach/ams-delta-fiq.h>
+
+MODULE_AUTHOR("Matt Callow");
+MODULE_DESCRIPTION("AMS Delta (E3) keyboard port driver");
+MODULE_LICENSE("GPL");
+
+static struct serio *ams_delta_serio;
+
+static int check_data(int data)
+{
+ int i, parity = 0;
+
+ /* check valid stop bit */
+ if (!(data & 0x400)) {
+ dev_warn(&ams_delta_serio->dev,
+ "invalid stop bit, data=0x%X\n",
+ data);
+ return SERIO_FRAME;
+ }
+ /* calculate the parity */
+ for (i = 1; i < 10; i++) {
+ if (data & (1 << i))
+ parity++;
+ }
+ /* it should be odd */
+ if (!(parity & 0x01)) {
+ dev_warn(&ams_delta_serio->dev,
+ "paritiy check failed, data=0x%X parity=0x%X\n",
+ data, parity);
+ return SERIO_PARITY;
+ }
+ return 0;
+}
+
+static irqreturn_t ams_delta_serio_interrupt(int irq, void *dev_id)
+{
+ int *circ_buff = &fiq_buffer[FIQ_CIRC_BUFF];
+ int data, dfl;
+ u8 scancode;
+
+ fiq_buffer[FIQ_IRQ_PEND] = 0;
+
+ /*
+ * Read data from the circular buffer, check it
+ * and then pass it on the serio
+ */
+ while (fiq_buffer[FIQ_KEYS_CNT] > 0) {
+
+ data = circ_buff[fiq_buffer[FIQ_HEAD_OFFSET]++];
+ fiq_buffer[FIQ_KEYS_CNT]--;
+ if (fiq_buffer[FIQ_HEAD_OFFSET] == fiq_buffer[FIQ_BUF_LEN])
+ fiq_buffer[FIQ_HEAD_OFFSET] = 0;
+
+ dfl = check_data(data);
+ scancode = (u8) (data >> 1) & 0xFF;
+ serio_interrupt(ams_delta_serio, scancode, dfl);
+ }
+ return IRQ_HANDLED;
+}
+
+static int ams_delta_serio_open(struct serio *serio)
+{
+ /* enable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR,
+ AMD_DELTA_LATCH2_KEYBRD_PWR);
+
+ return 0;
+}
+
+static void ams_delta_serio_close(struct serio *serio)
+{
+ /* disable keyboard */
+ ams_delta_latch2_write(AMD_DELTA_LATCH2_KEYBRD_PWR, 0);
+}
+
+static int __init ams_delta_serio_init(void)
+{
+ int err;
+
+ if (!machine_is_ams_delta())
+ return -ENODEV;
+
+ ams_delta_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!ams_delta_serio)
+ return -ENOMEM;
+
+ ams_delta_serio->id.type = SERIO_8042;
+ ams_delta_serio->open = ams_delta_serio_open;
+ ams_delta_serio->close = ams_delta_serio_close;
+ strlcpy(ams_delta_serio->name, "AMS DELTA keyboard adapter",
+ sizeof(ams_delta_serio->name));
+ strlcpy(ams_delta_serio->phys, "GPIO/serio0",
+ sizeof(ams_delta_serio->phys));
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_DATA, "serio-data");
+ if (err) {
+ pr_err("ams_delta_serio: Couldn't request gpio pin for data\n");
+ goto serio;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+
+ err = gpio_request(AMS_DELTA_GPIO_PIN_KEYBRD_CLK, "serio-clock");
+ if (err) {
+ pr_err("ams_delta_serio: couldn't request gpio pin for clock\n");
+ goto gpio_data;
+ }
+ gpio_direction_input(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+
+ err = request_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ ams_delta_serio_interrupt, IRQ_TYPE_EDGE_RISING,
+ "ams-delta-serio", 0);
+ if (err < 0) {
+ pr_err("ams_delta_serio: couldn't request gpio interrupt %d\n",
+ gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
+ goto gpio_clk;
+ }
+ /*
+ * Since GPIO register handling for keyboard clock pin is performed
+ * at FIQ level, switch back from edge to simple interrupt handler
+ * to avoid bad interaction.
+ */
+ set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ handle_simple_irq);
+
+ serio_register_port(ams_delta_serio);
+ dev_info(&ams_delta_serio->dev, "%s\n", ams_delta_serio->name);
+
+ return 0;
+gpio_clk:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+gpio_data:
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+serio:
+ kfree(ams_delta_serio);
+ return err;
+}
+module_init(ams_delta_serio_init);
+
+static void __exit ams_delta_serio_exit(void)
+{
+ serio_unregister_port(ams_delta_serio);
+ free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
+ gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
+ kfree(ams_delta_serio);
+}
+module_exit(ams_delta_serio_exit);
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 5071af2c0604..04e32f2d1241 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -51,7 +51,7 @@ static inline void i8042_write_command(int val)
static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
dp = dp->child;
while (dp) {
@@ -96,8 +96,11 @@ static const struct of_device_id sparc_i8042_match[] = {
MODULE_DEVICE_TABLE(of, sparc_i8042_match);
static struct of_platform_driver sparc_i8042_driver = {
- .name = "i8042",
- .match_table = sparc_i8042_match,
+ .driver = {
+ .name = "i8042",
+ .owner = THIS_MODULE,
+ .of_match_table = sparc_i8042_match,
+ },
.probe = sparc_i8042_probe,
.remove = __devexit_p(sparc_i8042_remove),
};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ead0494721d0..6168469ad1a6 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -660,8 +660,21 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
}
static struct pnp_device_id pnp_kbd_devids[] = {
+ { .id = "PNP0300", .driver_data = 0 },
+ { .id = "PNP0301", .driver_data = 0 },
+ { .id = "PNP0302", .driver_data = 0 },
{ .id = "PNP0303", .driver_data = 0 },
+ { .id = "PNP0304", .driver_data = 0 },
+ { .id = "PNP0305", .driver_data = 0 },
+ { .id = "PNP0306", .driver_data = 0 },
+ { .id = "PNP0309", .driver_data = 0 },
+ { .id = "PNP030a", .driver_data = 0 },
{ .id = "PNP030b", .driver_data = 0 },
+ { .id = "PNP0320", .driver_data = 0 },
+ { .id = "PNP0343", .driver_data = 0 },
+ { .id = "PNP0344", .driver_data = 0 },
+ { .id = "PNP0345", .driver_data = 0 },
+ { .id = "CPQA0D7", .driver_data = 0 },
{ .id = "", },
};
@@ -672,6 +685,7 @@ static struct pnp_driver i8042_pnp_kbd_driver = {
};
static struct pnp_device_id pnp_aux_devids[] = {
+ { .id = "AUI0200", .driver_data = 0 },
{ .id = "FJC6000", .driver_data = 0 },
{ .id = "FJC6001", .driver_data = 0 },
{ .id = "PNP0f03", .driver_data = 0 },
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index f84f8e32e3f1..e2c028d2638f 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -244,17 +244,17 @@ static int __devinit xps2_of_probe(struct of_device *ofdev,
int error;
dev_info(dev, "Device Tree Probing \'%s\'\n",
- ofdev->node->name);
+ ofdev->dev.of_node->name);
/* Get iospace for the device */
- error = of_address_to_resource(ofdev->node, 0, &r_mem);
+ error = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
if (error) {
dev_err(dev, "invalid address\n");
return error;
}
/* Get IRQ for the device */
- if (of_irq_to_resource(ofdev->node, 0, &r_irq) == NO_IRQ) {
+ if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -342,7 +342,7 @@ static int __devexit xps2_of_remove(struct of_device *of_dev)
iounmap(drvdata->base_address);
/* Get iospace of the device */
- if (of_address_to_resource(of_dev->node, 0, &r_mem))
+ if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem))
dev_err(dev, "invalid address\n");
else
release_mem_region(r_mem.start, resource_size(&r_mem));
@@ -362,8 +362,11 @@ static const struct of_device_id xps2_of_match[] __devinitconst = {
MODULE_DEVICE_TABLE(of, xps2_of_match);
static struct of_platform_driver xps2_of_driver = {
- .name = DRIVER_NAME,
- .match_table = xps2_of_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = xps2_of_match,
+ },
.probe = xps2_of_probe,
.remove = __devexit_p(xps2_of_remove),
};
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 670c61c5a516..aea9a9399a36 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -66,18 +66,18 @@ static void usb_acecad_irq(struct urb *urb)
int prox, status;
switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__, urb->status);
- return;
- default:
- dbg("%s - nonzero urb status received: %d", __func__, urb->status);
- goto resubmit;
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__, urb->status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__, urb->status);
+ goto resubmit;
}
prox = (data[0] & 0x04) >> 2;
@@ -135,7 +135,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
struct usb_acecad *acecad;
struct input_dev *input_dev;
int pipe, maxp;
- int err = -ENOMEM;
+ int err;
if (interface->desc.bNumEndpoints != 1)
return -ENODEV;
@@ -155,7 +155,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
goto fail1;
}
- acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma);
+ acecad->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &acecad->data_dma);
if (!acecad->data) {
err= -ENOMEM;
goto fail1;
@@ -193,40 +193,34 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
input_dev->close = usb_acecad_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
- BIT_MASK(ABS_PRESSURE);
- input_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
input_dev->keybit[BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_TOOL_PEN) |
BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_STYLUS2);
switch (id->driver_info) {
- case 0:
- input_dev->absmax[ABS_X] = 5000;
- input_dev->absmax[ABS_Y] = 3750;
- input_dev->absmax[ABS_PRESSURE] = 512;
- if (!strlen(acecad->name))
- snprintf(acecad->name, sizeof(acecad->name),
- "USB Acecad Flair Tablet %04x:%04x",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- break;
- case 1:
- input_dev->absmax[ABS_X] = 3000;
- input_dev->absmax[ABS_Y] = 2250;
- input_dev->absmax[ABS_PRESSURE] = 1024;
- if (!strlen(acecad->name))
- snprintf(acecad->name, sizeof(acecad->name),
- "USB Acecad 302 Tablet %04x:%04x",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
- break;
+ case 0:
+ input_set_abs_params(input_dev, ABS_X, 0, 5000, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 3750, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 512, 0, 0);
+ if (!strlen(acecad->name))
+ snprintf(acecad->name, sizeof(acecad->name),
+ "USB Acecad Flair Tablet %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ break;
+
+ case 1:
+ input_set_abs_params(input_dev, ABS_X, 0, 53000, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 2250, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 1024, 0, 0);
+ if (!strlen(acecad->name))
+ snprintf(acecad->name, sizeof(acecad->name),
+ "USB Acecad 302 Tablet %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+ break;
}
- input_dev->absfuzz[ABS_X] = 4;
- input_dev->absfuzz[ABS_Y] = 4;
-
usb_fill_int_urb(acecad->irq, dev, pipe,
acecad->data, maxp > 8 ? 8 : maxp,
usb_acecad_irq, acecad, endpoint->bInterval);
@@ -241,7 +235,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
return 0;
- fail2: usb_buffer_free(dev, 8, acecad->data, acecad->data_dma);
+ fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
fail1: input_free_device(input_dev);
kfree(acecad);
return err;
@@ -252,13 +246,11 @@ static void usb_acecad_disconnect(struct usb_interface *intf)
struct usb_acecad *acecad = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- if (acecad) {
- usb_kill_urb(acecad->irq);
- input_unregister_device(acecad->input);
- usb_free_urb(acecad->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10, acecad->data, acecad->data_dma);
- kfree(acecad);
- }
+
+ input_unregister_device(acecad->input);
+ usb_free_urb(acecad->irq);
+ usb_free_coherent(acecad->usbdev, 8, acecad->data, acecad->data_dma);
+ kfree(acecad);
}
static struct usb_device_id usb_acecad_id_table [] = {
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 4be039d7dcad..51b80b08d467 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1711,8 +1711,8 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail1;
}
- aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH,
- GFP_ATOMIC, &aiptek->data_dma);
+ aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH,
+ GFP_ATOMIC, &aiptek->data_dma);
if (!aiptek->data) {
dev_warn(&intf->dev, "cannot allocate usb buffer\n");
goto fail1;
@@ -1884,8 +1884,8 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
fail4: sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
fail3: usb_free_urb(aiptek->urb);
- fail2: usb_buffer_free(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
- aiptek->data_dma);
+ fail2: usb_free_coherent(usbdev, AIPTEK_PACKET_LENGTH, aiptek->data,
+ aiptek->data_dma);
fail1: usb_set_intfdata(intf, NULL);
input_free_device(inputdev);
kfree(aiptek);
@@ -1909,9 +1909,9 @@ static void aiptek_disconnect(struct usb_interface *intf)
input_unregister_device(aiptek->inputdev);
sysfs_remove_group(&intf->dev.kobj, &aiptek_attribute_group);
usb_free_urb(aiptek->urb);
- usb_buffer_free(interface_to_usbdev(intf),
- AIPTEK_PACKET_LENGTH,
- aiptek->data, aiptek->data_dma);
+ usb_free_coherent(interface_to_usbdev(intf),
+ AIPTEK_PACKET_LENGTH,
+ aiptek->data, aiptek->data_dma);
kfree(aiptek);
}
}
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 866a9ee1af1a..8ea6afe2e992 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -850,8 +850,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
gtco->usbdev = usb_get_dev(interface_to_usbdev(usbinterface));
/* Allocate some data for incoming reports */
- gtco->buffer = usb_buffer_alloc(gtco->usbdev, REPORT_MAX_SIZE,
- GFP_KERNEL, &gtco->buf_dma);
+ gtco->buffer = usb_alloc_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ GFP_KERNEL, &gtco->buf_dma);
if (!gtco->buffer) {
err("No more memory for us buffers");
error = -ENOMEM;
@@ -982,8 +982,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
err_free_urb:
usb_free_urb(gtco->urbinfo);
err_free_buf:
- usb_buffer_free(gtco->usbdev, REPORT_MAX_SIZE,
- gtco->buffer, gtco->buf_dma);
+ usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ gtco->buffer, gtco->buf_dma);
err_free_devs:
input_free_device(input_dev);
kfree(gtco);
@@ -1005,8 +1005,8 @@ static void gtco_disconnect(struct usb_interface *interface)
input_unregister_device(gtco->inputdevice);
usb_kill_urb(gtco->urbinfo);
usb_free_urb(gtco->urbinfo);
- usb_buffer_free(gtco->usbdev, REPORT_MAX_SIZE,
- gtco->buffer, gtco->buf_dma);
+ usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ gtco->buffer, gtco->buf_dma);
kfree(gtco);
}
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 6682b17bf844..290f4e57b589 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -34,10 +34,6 @@ struct kbtab {
struct input_dev *dev;
struct usb_device *usbdev;
struct urb *irq;
- int x, y;
- int button;
- int pressure;
- __u32 serial[2];
char phys[32];
};
@@ -46,6 +42,7 @@ static void kbtab_irq(struct urb *urb)
struct kbtab *kbtab = urb->context;
unsigned char *data = kbtab->data;
struct input_dev *dev = kbtab->dev;
+ int pressure;
int retval;
switch (urb->status) {
@@ -63,31 +60,27 @@ static void kbtab_irq(struct urb *urb)
goto exit;
}
- kbtab->x = get_unaligned_le16(&data[1]);
- kbtab->y = get_unaligned_le16(&data[3]);
-
- kbtab->pressure = (data[5]);
input_report_key(dev, BTN_TOOL_PEN, 1);
- input_report_abs(dev, ABS_X, kbtab->x);
- input_report_abs(dev, ABS_Y, kbtab->y);
+ input_report_abs(dev, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(dev, ABS_Y, get_unaligned_le16(&data[3]));
/*input_report_key(dev, BTN_TOUCH , data[0] & 0x01);*/
input_report_key(dev, BTN_RIGHT, data[0] & 0x02);
- if (-1 == kb_pressure_click) {
- input_report_abs(dev, ABS_PRESSURE, kbtab->pressure);
- } else {
- input_report_key(dev, BTN_LEFT, (kbtab->pressure > kb_pressure_click) ? 1 : 0);
- };
+ pressure = data[5];
+ if (kb_pressure_click == -1)
+ input_report_abs(dev, ABS_PRESSURE, pressure);
+ else
+ input_report_key(dev, BTN_LEFT, pressure > kb_pressure_click ? 1 : 0);
input_sync(dev);
exit:
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
- err ("%s - usb_submit_urb failed with result %d",
+ err("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
@@ -129,7 +122,7 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
if (!kbtab || !input_dev)
goto fail1;
- kbtab->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &kbtab->data_dma);
+ kbtab->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &kbtab->data_dma);
if (!kbtab->data)
goto fail1;
@@ -153,13 +146,11 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
input_dev->open = kbtab_open;
input_dev->close = kbtab_close;
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) |
- BIT_MASK(EV_MSC);
- input_dev->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_TOUCH);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_LEFT)] |=
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |=
+ BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, 0x2000, 4, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
@@ -182,7 +173,7 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
return 0;
fail3: usb_free_urb(kbtab->irq);
- fail2: usb_buffer_free(dev, 10, kbtab->data, kbtab->data_dma);
+ fail2: usb_free_coherent(dev, 8, kbtab->data, kbtab->data_dma);
fail1: input_free_device(input_dev);
kfree(kbtab);
return error;
@@ -193,13 +184,11 @@ static void kbtab_disconnect(struct usb_interface *intf)
struct kbtab *kbtab = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- if (kbtab) {
- usb_kill_urb(kbtab->irq);
- input_unregister_device(kbtab->dev);
- usb_free_urb(kbtab->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10, kbtab->data, kbtab->data_dma);
- kfree(kbtab);
- }
+
+ input_unregister_device(kbtab->dev);
+ usb_free_urb(kbtab->irq);
+ usb_free_coherent(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma);
+ kfree(kbtab);
}
static struct usb_driver kbtab_driver = {
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 8fef1b689c69..284dfaab6b8c 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -106,44 +106,18 @@ MODULE_LICENSE(DRIVER_LICENSE);
struct wacom {
dma_addr_t data_dma;
- struct input_dev *dev;
struct usb_device *usbdev;
struct usb_interface *intf;
struct urb *irq;
- struct wacom_wac *wacom_wac;
+ struct wacom_wac wacom_wac;
struct mutex lock;
- unsigned int open:1;
+ bool open;
char phys[32];
};
-struct wacom_combo {
- struct wacom *wacom;
- struct urb *urb;
-};
-
extern const struct usb_device_id wacom_ids[];
-extern int wacom_wac_irq(struct wacom_wac * wacom_wac, void * wcombo);
-extern void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data);
-extern void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data);
-extern void wacom_report_key(void *wcombo, unsigned int key_type, int key_data);
-extern void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value);
-extern void wacom_input_sync(void *wcombo);
-extern void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_g4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
-extern __u16 wacom_le16_to_cpu(unsigned char *data);
-extern __u16 wacom_be16_to_cpu(unsigned char *data);
-
+void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
+void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac);
#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index f46502589e4e..2dc0c07c0469 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -11,8 +11,8 @@
* (at your option) any later version.
*/
-#include "wacom.h"
#include "wacom_wac.h"
+#include "wacom.h"
/* defines to get HID report descriptor */
#define HID_DEVICET_HID (USB_TYPE_CLASS | 0x01)
@@ -70,15 +70,9 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
buf, size, 1000);
}
-static struct input_dev * get_input_dev(struct wacom_combo *wcombo)
-{
- return wcombo->wacom->dev;
-}
-
static void wacom_sys_irq(struct urb *urb)
{
struct wacom *wacom = urb->context;
- struct wacom_combo wcombo;
int retval;
switch (urb->status) {
@@ -96,59 +90,16 @@ static void wacom_sys_irq(struct urb *urb)
goto exit;
}
- wcombo.wacom = wacom;
- wcombo.urb = urb;
-
- if (wacom_wac_irq(wacom->wacom_wac, (void *)&wcombo))
- input_sync(get_input_dev(&wcombo));
+ wacom_wac_irq(&wacom->wacom_wac, urb->actual_length);
exit:
usb_mark_last_busy(wacom->usbdev);
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
}
-void wacom_report_key(void *wcombo, unsigned int key_type, int key_data)
-{
- input_report_key(get_input_dev((struct wacom_combo *)wcombo), key_type, key_data);
-}
-
-void wacom_report_abs(void *wcombo, unsigned int abs_type, int abs_data)
-{
- input_report_abs(get_input_dev((struct wacom_combo *)wcombo), abs_type, abs_data);
-}
-
-void wacom_report_rel(void *wcombo, unsigned int rel_type, int rel_data)
-{
- input_report_rel(get_input_dev((struct wacom_combo *)wcombo), rel_type, rel_data);
-}
-
-void wacom_input_event(void *wcombo, unsigned int type, unsigned int code, int value)
-{
- input_event(get_input_dev((struct wacom_combo *)wcombo), type, code, value);
-}
-
-__u16 wacom_be16_to_cpu(unsigned char *data)
-{
- __u16 value;
- value = be16_to_cpu(*(__be16 *) data);
- return value;
-}
-
-__u16 wacom_le16_to_cpu(unsigned char *data)
-{
- __u16 value;
- value = le16_to_cpu(*(__le16 *) data);
- return value;
-}
-
-void wacom_input_sync(void *wcombo)
-{
- input_sync(get_input_dev((struct wacom_combo *)wcombo));
-}
-
static int wacom_open(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
@@ -168,7 +119,7 @@ static int wacom_open(struct input_dev *dev)
return -EIO;
}
- wacom->open = 1;
+ wacom->open = true;
wacom->intf->needs_remote_wakeup = 1;
mutex_unlock(&wacom->lock);
@@ -181,128 +132,11 @@ static void wacom_close(struct input_dev *dev)
mutex_lock(&wacom->lock);
usb_kill_urb(wacom->irq);
- wacom->open = 0;
+ wacom->open = false;
wacom->intf->needs_remote_wakeup = 0;
mutex_unlock(&wacom->lock);
}
-void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_1) |
- BIT_MASK(BTN_5);
- input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
-}
-
-void input_dev_g4(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_MSC);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
- BIT_MASK(BTN_4);
-}
-
-void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_REL);
- input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
- BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
- BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2);
- input_set_abs_params(input_dev, ABS_DISTANCE,
- 0, wacom_wac->features.distance_max, 0, 0);
-}
-
-void input_dev_i3s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) |
- BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3);
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-}
-
-void input_dev_i3(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_4) |
- BIT_MASK(BTN_5) | BIT_MASK(BTN_6) | BIT_MASK(BTN_7);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
-}
-
-void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_FINGER);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_0) | BIT_MASK(BTN_1) | BIT_MASK(BTN_2) | BIT_MASK(BTN_3);
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_4) | BIT_MASK(BTN_5) | BIT_MASK(BTN_6);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
-}
-
-void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
-}
-
-void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_MISC)] |= BIT_MASK(BTN_8) | BIT_MASK(BTN_9);
-}
-
-void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->evbit[0] |= BIT_MASK(EV_MSC) | BIT_MASK(EV_REL);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- input_dev->relbit[0] |= BIT_MASK(REL_WHEEL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
- BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) |
- BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
- BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
- BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_TOOL_BRUSH) |
- BIT_MASK(BTN_TOOL_PENCIL) | BIT_MASK(BTN_TOOL_AIRBRUSH) |
- BIT_MASK(BTN_TOOL_LENS) | BIT_MASK(BTN_STYLUS2);
- input_set_abs_params(input_dev, ABS_DISTANCE,
- 0, wacom_wac->features.distance_max, 0, 0);
- input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
- input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
- input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
-}
-
-void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_STYLUS) | BIT_MASK(BTN_STYLUS2);
-}
-
-void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER);
-}
-
-void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- struct wacom_features *features = &wacom_wac->features;
-
- if (features->device_type == BTN_TOOL_DOUBLETAP ||
- features->device_type == BTN_TOOL_TRIPLETAP) {
- input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- }
-}
-
-void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
-{
- if (wacom_wac->features.device_type == BTN_TOOL_TRIPLETAP) {
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_TRIPLETAP);
- input_dev->evbit[0] |= BIT_MASK(EV_MSC);
- input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
- }
-}
-
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
struct wacom_features *features)
{
@@ -362,9 +196,9 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->device_type = BTN_TOOL_TRIPLETAP;
}
features->x_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
features->x_phy =
- wacom_le16_to_cpu(&report[i + 6]);
+ get_unaligned_le16(&report[i + 6]);
features->unit = report[i + 9];
features->unitExpo = report[i + 11];
i += 12;
@@ -374,7 +208,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->x_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
} else if (usage == WCM_DIGITIZER) {
@@ -396,15 +230,15 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_TPC2FG;
features->device_type = BTN_TOOL_TRIPLETAP;
features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
features->y_phy =
- wacom_le16_to_cpu(&report[i + 6]);
+ get_unaligned_le16(&report[i + 6]);
i += 7;
} else {
features->y_max =
features->x_max;
features->y_phy =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
} else if (pen) {
@@ -413,7 +247,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
features->device_type = BTN_TOOL_PEN;
features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
}
}
@@ -432,7 +266,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
case HID_USAGE_UNDEFINED:
if (usage == WCM_DESKTOP && finger) /* capacity */
features->pressure_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ get_unaligned_le16(&report[i + 3]);
i += 4;
break;
}
@@ -528,6 +362,81 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
return error;
}
+struct wacom_usbdev_data {
+ struct list_head list;
+ struct kref kref;
+ struct usb_device *dev;
+ struct wacom_shared shared;
+};
+
+static LIST_HEAD(wacom_udev_list);
+static DEFINE_MUTEX(wacom_udev_list_lock);
+
+static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
+{
+ struct wacom_usbdev_data *data;
+
+ list_for_each_entry(data, &wacom_udev_list, list) {
+ if (data->dev == dev) {
+ kref_get(&data->kref);
+ return data;
+ }
+ }
+
+ return NULL;
+}
+
+static int wacom_add_shared_data(struct wacom_wac *wacom,
+ struct usb_device *dev)
+{
+ struct wacom_usbdev_data *data;
+ int retval = 0;
+
+ mutex_lock(&wacom_udev_list_lock);
+
+ data = wacom_get_usbdev_data(dev);
+ if (!data) {
+ data = kzalloc(sizeof(struct wacom_usbdev_data), GFP_KERNEL);
+ if (!data) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ kref_init(&data->kref);
+ data->dev = dev;
+ list_add_tail(&data->list, &wacom_udev_list);
+ }
+
+ wacom->shared = &data->shared;
+
+out:
+ mutex_unlock(&wacom_udev_list_lock);
+ return retval;
+}
+
+static void wacom_release_shared_data(struct kref *kref)
+{
+ struct wacom_usbdev_data *data =
+ container_of(kref, struct wacom_usbdev_data, kref);
+
+ mutex_lock(&wacom_udev_list_lock);
+ list_del(&data->list);
+ mutex_unlock(&wacom_udev_list_lock);
+
+ kfree(data);
+}
+
+static void wacom_remove_shared_data(struct wacom_wac *wacom)
+{
+ struct wacom_usbdev_data *data;
+
+ if (wacom->shared) {
+ data = container_of(wacom->shared, struct wacom_usbdev_data, shared);
+ kref_put(&data->kref, wacom_release_shared_data);
+ wacom->shared = NULL;
+ }
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -542,13 +451,13 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
return -EINVAL;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
- wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL);
input_dev = input_allocate_device();
- if (!wacom || !input_dev || !wacom_wac) {
+ if (!wacom || !input_dev) {
error = -ENOMEM;
goto fail1;
}
+ wacom_wac = &wacom->wacom_wac;
wacom_wac->features = *((struct wacom_features *)id->driver_info);
features = &wacom_wac->features;
if (features->pktlen > WACOM_PKGLEN_MAX) {
@@ -556,8 +465,8 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
goto fail1;
}
- wacom_wac->data = usb_buffer_alloc(dev, WACOM_PKGLEN_MAX,
- GFP_KERNEL, &wacom->data_dma);
+ wacom_wac->data = usb_alloc_coherent(dev, WACOM_PKGLEN_MAX,
+ GFP_KERNEL, &wacom->data_dma);
if (!wacom_wac->data) {
error = -ENOMEM;
goto fail1;
@@ -570,20 +479,12 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
}
wacom->usbdev = dev;
- wacom->dev = input_dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
- usb_to_input_id(dev, &input_dev->id);
-
- input_dev->dev.parent = &intf->dev;
-
- input_set_drvdata(input_dev, wacom);
-
- input_dev->open = wacom_open;
- input_dev->close = wacom_close;
+ wacom_wac->input = input_dev;
endpoint = &intf->cur_altsetting->endpoint[0].desc;
@@ -600,20 +501,21 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
features->device_type == BTN_TOOL_PEN ?
" Pen" : " Finger",
sizeof(wacom_wac->name));
+
+ error = wacom_add_shared_data(wacom_wac, dev);
+ if (error)
+ goto fail3;
}
input_dev->name = wacom_wac->name;
- wacom->wacom_wac = wacom_wac;
-
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
- input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
+ input_dev->name = wacom_wac->name;
+ input_dev->dev.parent = &intf->dev;
+ input_dev->open = wacom_open;
+ input_dev->close = wacom_close;
+ usb_to_input_id(dev, &input_dev->id);
+ input_set_drvdata(input_dev, wacom);
- wacom_init_input_dev(input_dev, wacom_wac);
+ wacom_setup_input_capabilities(input_dev, wacom_wac);
usb_fill_int_urb(wacom->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
@@ -622,9 +524,9 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
wacom->irq->transfer_dma = wacom->data_dma;
wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- error = input_register_device(wacom->dev);
+ error = input_register_device(input_dev);
if (error)
- goto fail3;
+ goto fail4;
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(intf, features);
@@ -632,11 +534,11 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
usb_set_intfdata(intf, wacom);
return 0;
+ fail4: wacom_remove_shared_data(wacom_wac);
fail3: usb_free_urb(wacom->irq);
- fail2: usb_buffer_free(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
+ fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
fail1: input_free_device(input_dev);
kfree(wacom);
- kfree(wacom_wac);
return error;
}
@@ -647,11 +549,11 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
usb_kill_urb(wacom->irq);
- input_unregister_device(wacom->dev);
+ input_unregister_device(wacom->wacom_wac.input);
usb_free_urb(wacom->irq);
- usb_buffer_free(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
- wacom->wacom_wac->data, wacom->data_dma);
- kfree(wacom->wacom_wac);
+ usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
+ wacom->wacom_wac.data, wacom->data_dma);
+ wacom_remove_shared_data(&wacom->wacom_wac);
kfree(wacom);
}
@@ -669,7 +571,7 @@ static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
static int wacom_resume(struct usb_interface *intf)
{
struct wacom *wacom = usb_get_intfdata(intf);
- struct wacom_features *features = &wacom->wacom_wac->features;
+ struct wacom_features *features = &wacom->wacom_wac.features;
int rv;
mutex_lock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 4a852d815c68..847fd0135bcf 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -11,52 +11,58 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include "wacom.h"
+
#include "wacom_wac.h"
+#include "wacom.h"
-static int wacom_penpartner_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_penpartner_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
switch (data[0]) {
- case 1:
- if (data[5] & 0x80) {
- wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID;
- wacom_report_key(wcombo, wacom->tool[0], 1);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_abs(wcombo, ABS_PRESSURE, (signed char)data[6] + 127);
- wacom_report_key(wcombo, BTN_TOUCH, ((signed char)data[6] > -127));
- wacom_report_key(wcombo, BTN_STYLUS, (data[5] & 0x40));
- } else {
- wacom_report_key(wcombo, wacom->tool[0], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0); /* report tool id */
- wacom_report_abs(wcombo, ABS_PRESSURE, -1);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- }
- break;
- case 2:
- wacom_report_key(wcombo, BTN_TOOL_PEN, 1);
- wacom_report_abs(wcombo, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_abs(wcombo, ABS_PRESSURE, (signed char)data[6] + 127);
- wacom_report_key(wcombo, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20));
- wacom_report_key(wcombo, BTN_STYLUS, (data[5] & 0x40));
- break;
- default:
- printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
- return 0;
+ case 1:
+ if (data[5] & 0x80) {
+ wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID;
+ input_report_key(input, wacom->tool[0], 1);
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127);
+ input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -127));
+ input_report_key(input, BTN_STYLUS, (data[5] & 0x40));
+ } else {
+ input_report_key(input, wacom->tool[0], 0);
+ input_report_abs(input, ABS_MISC, 0); /* report tool id */
+ input_report_abs(input, ABS_PRESSURE, -1);
+ input_report_key(input, BTN_TOUCH, 0);
+ }
+ break;
+
+ case 2:
+ input_report_key(input, BTN_TOOL_PEN, 1);
+ input_report_abs(input, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127);
+ input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20));
+ input_report_key(input, BTN_STYLUS, (data[5] & 0x40));
+ break;
+
+ default:
+ printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
+ return 0;
}
+
return 1;
}
-static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_pl_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
int prox, pressure;
if (data[0] != WACOM_REPORT_PENABLED) {
@@ -90,8 +96,8 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
/* was entered with stylus2 pressed */
if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) {
/* report out proximity for previous tool */
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_input_sync(wcombo);
+ input_report_key(input, wacom->tool[1], 0);
+ input_sync(input);
wacom->tool[1] = BTN_TOOL_PEN;
return 0;
}
@@ -101,32 +107,33 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
wacom->tool[1] = BTN_TOOL_PEN;
wacom->id[0] = STYLUS_DEVICE_ID;
}
- wacom_report_key(wcombo, wacom->tool[1], prox); /* report in proximity for tool */
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
- wacom_report_abs(wcombo, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
- wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
-
- wacom_report_key(wcombo, BTN_TOUCH, data[4] & 0x08);
- wacom_report_key(wcombo, BTN_STYLUS, data[4] & 0x10);
+ input_report_key(input, wacom->tool[1], prox); /* report in proximity for tool */
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+ input_report_abs(input, ABS_PRESSURE, pressure);
+
+ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
/* Only allow the stylus2 button to be reported for the pen tool. */
- wacom_report_key(wcombo, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
+ input_report_key(input, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
} else {
/* report proximity-out of a (valid) tool */
if (wacom->tool[1] != BTN_TOOL_RUBBER) {
/* Unknown tool selected default to pen tool */
wacom->tool[1] = BTN_TOOL_PEN;
}
- wacom_report_key(wcombo, wacom->tool[1], prox);
+ input_report_key(input, wacom->tool[1], prox);
}
wacom->tool[0] = prox; /* Save proximity state */
return 1;
}
-static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_ptu_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
if (data[0] != WACOM_REPORT_PENABLED) {
printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
@@ -134,40 +141,41 @@ static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
}
if (data[1] & 0x04) {
- wacom_report_key(wcombo, BTN_TOOL_RUBBER, data[1] & 0x20);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x08);
+ input_report_key(input, BTN_TOOL_RUBBER, data[1] & 0x20);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x08);
wacom->id[0] = ERASER_DEVICE_ID;
} else {
- wacom_report_key(wcombo, BTN_TOOL_PEN, data[1] & 0x20);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(input, BTN_TOOL_PEN, data[1] & 0x20);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x01);
wacom->id[0] = STYLUS_DEVICE_ID;
}
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6]));
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
return 1;
}
-static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_graphire_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
- int x, y, rw;
- static int penData = 0;
+ struct input_dev *input = wacom->input;
+ int prox;
+ int rw = 0;
+ int retval = 0;
if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
- return 0;
+ goto exit;
}
- if (data[1] & 0x80) {
- /* in prox and not a pad data */
- penData = 1;
-
- switch ((data[1] >> 5) & 3) {
+ prox = data[1] & 0x80;
+ if (prox || wacom->id[0]) {
+ if (prox) {
+ switch ((data[1] >> 5) & 3) {
case 0: /* Pen */
wacom->tool[0] = BTN_TOOL_PEN;
@@ -180,128 +188,89 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
break;
case 2: /* Mouse with wheel */
- wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04);
- if (features->type == WACOM_G4 || features->type == WACOM_MO) {
- rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03);
- wacom_report_rel(wcombo, REL_WHEEL, -rw);
- } else
- wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]);
+ input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
/* fall through */
case 3: /* Mouse without wheel */
wacom->tool[0] = BTN_TOOL_MOUSE;
wacom->id[0] = CURSOR_DEVICE_ID;
- wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
- wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
- if (features->type == WACOM_G4 || features->type == WACOM_MO)
- wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
- else
- wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
break;
+ }
}
- x = wacom_le16_to_cpu(&data[2]);
- y = wacom_le16_to_cpu(&data[4]);
- wacom_report_abs(wcombo, ABS_X, x);
- wacom_report_abs(wcombo, ABS_Y, y);
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
if (wacom->tool[0] != BTN_TOOL_MOUSE) {
- wacom_report_abs(wcombo, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04);
- }
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
- wacom_report_key(wcombo, wacom->tool[0], 1);
- } else if (wacom->id[0]) {
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- if (wacom->tool[0] == BTN_TOOL_MOUSE) {
- wacom_report_key(wcombo, BTN_LEFT, 0);
- wacom_report_key(wcombo, BTN_RIGHT, 0);
- wacom_report_abs(wcombo, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
+ input_report_key(input, BTN_TOUCH, data[1] & 0x01);
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
} else {
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_LEFT, data[1] & 0x01);
+ input_report_key(input, BTN_RIGHT, data[1] & 0x02);
+ if (features->type == WACOM_G4 ||
+ features->type == WACOM_MO) {
+ input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
+ rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+ } else {
+ input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
+ rw = -(signed)data[6];
+ }
+ input_report_rel(input, REL_WHEEL, rw);
}
- wacom->id[0] = 0;
- wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
- wacom_report_key(wcombo, wacom->tool[0], 0);
+
+ if (!prox)
+ wacom->id[0] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
+ input_report_key(input, wacom->tool[0], prox);
+ input_sync(input); /* sync last event */
}
/* send pad data */
switch (features->type) {
- case WACOM_G4:
- if (data[7] & 0xf8) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
+ case WACOM_G4:
+ prox = data[7] & 0xf8;
+ if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
+ input_report_key(input, BTN_0, (data[7] & 0x40));
+ input_report_key(input, BTN_4, (data[7] & 0x80));
rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3);
- wacom_report_rel(wcombo, REL_WHEEL, rw);
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- } else if (wacom->id[1]) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
- wacom->id[1] = 0;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
- wacom_report_rel(wcombo, REL_WHEEL, 0);
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_rel(input, REL_WHEEL, rw);
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
+ if (!prox)
+ wacom->id[1] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ retval = 1;
}
break;
- case WACOM_MO:
- if ((data[7] & 0xf8) || (data[8] & 0xff)) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
+
+ case WACOM_MO:
+ prox = (data[7] & 0xf8) || data[8];
+ if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
- wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
- wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
- wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- } else if (wacom->id[1]) {
- if (penData) {
- wacom_input_sync(wcombo); /* sync last event */
- if (!wacom->id[0])
- penData = 0;
- }
- wacom->id[1] = 0;
- wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
- wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
- wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
- wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
- wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
- wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_key(input, BTN_0, (data[7] & 0x08));
+ input_report_key(input, BTN_1, (data[7] & 0x20));
+ input_report_key(input, BTN_4, (data[7] & 0x10));
+ input_report_key(input, BTN_5, (data[7] & 0x40));
+ input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f));
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
+ if (!prox)
+ wacom->id[1] = 0;
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
}
+ retval = 1;
break;
}
- return 1;
+exit:
+ return retval;
}
-static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
+static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
int idx = 0;
/* tool number */
@@ -316,64 +285,73 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4);
+
switch (wacom->id[idx]) {
- case 0x812: /* Inking pen */
- case 0x801: /* Intuos3 Inking pen */
- case 0x20802: /* Intuos4 Classic Pen */
- case 0x012:
- wacom->tool[idx] = BTN_TOOL_PENCIL;
- break;
- case 0x822: /* Pen */
- case 0x842:
- case 0x852:
- case 0x823: /* Intuos3 Grip Pen */
- case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
- case 0x802: /* Intuos4 Grip Pen Eraser */
- case 0x804: /* Intuos4 Marker Pen */
- case 0x40802: /* Intuos4 Classic Pen */
- case 0x022:
- wacom->tool[idx] = BTN_TOOL_PEN;
- break;
- case 0x832: /* Stroke pen */
- case 0x032:
- wacom->tool[idx] = BTN_TOOL_BRUSH;
- break;
- case 0x007: /* Mouse 4D and 2D */
- case 0x09c:
- case 0x094:
- case 0x017: /* Intuos3 2D Mouse */
- case 0x806: /* Intuos4 Mouse */
- wacom->tool[idx] = BTN_TOOL_MOUSE;
- break;
- case 0x096: /* Lens cursor */
- case 0x097: /* Intuos3 Lens cursor */
- case 0x006: /* Intuos4 Lens cursor */
- wacom->tool[idx] = BTN_TOOL_LENS;
- break;
- case 0x82a: /* Eraser */
- case 0x85a:
- case 0x91a:
- case 0xd1a:
- case 0x0fa:
- case 0x82b: /* Intuos3 Grip Pen Eraser */
- case 0x81b: /* Intuos3 Classic Pen Eraser */
- case 0x91b: /* Intuos3 Airbrush Eraser */
- case 0x80c: /* Intuos4 Marker Pen Eraser */
- case 0x80a: /* Intuos4 Grip Pen Eraser */
- case 0x4080a: /* Intuos4 Classic Pen Eraser */
- case 0x90a: /* Intuos4 Airbrush Eraser */
- wacom->tool[idx] = BTN_TOOL_RUBBER;
- break;
- case 0xd12:
- case 0x912:
- case 0x112:
- case 0x913: /* Intuos3 Airbrush */
- case 0x902: /* Intuos4 Airbrush */
- wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
- break;
- default: /* Unknown tool */
- wacom->tool[idx] = BTN_TOOL_PEN;
+ case 0x812: /* Inking pen */
+ case 0x801: /* Intuos3 Inking pen */
+ case 0x20802: /* Intuos4 Classic Pen */
+ case 0x012:
+ wacom->tool[idx] = BTN_TOOL_PENCIL;
+ break;
+
+ case 0x822: /* Pen */
+ case 0x842:
+ case 0x852:
+ case 0x823: /* Intuos3 Grip Pen */
+ case 0x813: /* Intuos3 Classic Pen */
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x802: /* Intuos4 Grip Pen Eraser */
+ case 0x804: /* Intuos4 Marker Pen */
+ case 0x40802: /* Intuos4 Classic Pen */
+ case 0x022:
+ wacom->tool[idx] = BTN_TOOL_PEN;
+ break;
+
+ case 0x832: /* Stroke pen */
+ case 0x032:
+ wacom->tool[idx] = BTN_TOOL_BRUSH;
+ break;
+
+ case 0x007: /* Mouse 4D and 2D */
+ case 0x09c:
+ case 0x094:
+ case 0x017: /* Intuos3 2D Mouse */
+ case 0x806: /* Intuos4 Mouse */
+ wacom->tool[idx] = BTN_TOOL_MOUSE;
+ break;
+
+ case 0x096: /* Lens cursor */
+ case 0x097: /* Intuos3 Lens cursor */
+ case 0x006: /* Intuos4 Lens cursor */
+ wacom->tool[idx] = BTN_TOOL_LENS;
+ break;
+
+ case 0x82a: /* Eraser */
+ case 0x85a:
+ case 0x91a:
+ case 0xd1a:
+ case 0x0fa:
+ case 0x82b: /* Intuos3 Grip Pen Eraser */
+ case 0x81b: /* Intuos3 Classic Pen Eraser */
+ case 0x91b: /* Intuos3 Airbrush Eraser */
+ case 0x80c: /* Intuos4 Marker Pen Eraser */
+ case 0x80a: /* Intuos4 Grip Pen Eraser */
+ case 0x4080a: /* Intuos4 Classic Pen Eraser */
+ case 0x90a: /* Intuos4 Airbrush Eraser */
+ wacom->tool[idx] = BTN_TOOL_RUBBER;
+ break;
+
+ case 0xd12:
+ case 0x912:
+ case 0x112:
+ case 0x913: /* Intuos3 Airbrush */
+ case 0x902: /* Intuos4 Airbrush */
+ wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
+ break;
+
+ default: /* Unknown tool */
+ wacom->tool[idx] = BTN_TOOL_PEN;
+ break;
}
return 1;
}
@@ -384,41 +362,42 @@ static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
* Reset all states otherwise we lose the initial states
* when in-prox next time
*/
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_DISTANCE, 0);
- wacom_report_abs(wcombo, ABS_TILT_X, 0);
- wacom_report_abs(wcombo, ABS_TILT_Y, 0);
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_DISTANCE, 0);
+ input_report_abs(input, ABS_TILT_X, 0);
+ input_report_abs(input, ABS_TILT_Y, 0);
if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
- wacom_report_key(wcombo, BTN_LEFT, 0);
- wacom_report_key(wcombo, BTN_MIDDLE, 0);
- wacom_report_key(wcombo, BTN_RIGHT, 0);
- wacom_report_key(wcombo, BTN_SIDE, 0);
- wacom_report_key(wcombo, BTN_EXTRA, 0);
- wacom_report_abs(wcombo, ABS_THROTTLE, 0);
- wacom_report_abs(wcombo, ABS_RZ, 0);
+ input_report_key(input, BTN_LEFT, 0);
+ input_report_key(input, BTN_MIDDLE, 0);
+ input_report_key(input, BTN_RIGHT, 0);
+ input_report_key(input, BTN_SIDE, 0);
+ input_report_key(input, BTN_EXTRA, 0);
+ input_report_abs(input, ABS_THROTTLE, 0);
+ input_report_abs(input, ABS_RZ, 0);
} else {
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, BTN_STYLUS, 0);
+ input_report_key(input, BTN_STYLUS2, 0);
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
if (features->type >= INTUOS3S)
- wacom_report_abs(wcombo, ABS_Z, 0);
+ input_report_abs(input, ABS_Z, 0);
}
- wacom_report_key(wcombo, wacom->tool[idx], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ input_report_key(input, wacom->tool[idx], 0);
+ input_report_abs(input, ABS_MISC, 0); /* reset tool id */
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->id[idx] = 0;
return 2;
}
return 0;
}
-static void wacom_intuos_general(struct wacom_wac *wacom, void *wcombo)
+static void wacom_intuos_general(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
unsigned int t;
/* general pen packet */
@@ -426,30 +405,30 @@ static void wacom_intuos_general(struct wacom_wac *wacom, void *wcombo)
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if (features->type >= INTUOS4S && features->type <= INTUOS4L)
t = (t << 1) | (data[1] & 1);
- wacom_report_abs(wcombo, ABS_PRESSURE, t);
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_PRESSURE, t);
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 2);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 4);
- wacom_report_key(wcombo, BTN_TOUCH, t > 10);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_key(input, BTN_STYLUS, data[1] & 2);
+ input_report_key(input, BTN_STYLUS2, data[1] & 4);
+ input_report_key(input, BTN_TOUCH, t > 10);
}
/* airbrush second packet */
if ((data[1] & 0xbc) == 0xb4) {
- wacom_report_abs(wcombo, ABS_WHEEL,
+ input_report_abs(input, ABS_WHEEL,
(data[6] << 2) | ((data[7] >> 6) & 3));
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
}
- return;
}
-static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_intuos_irq(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->input;
unsigned int t;
int idx = 0, result;
@@ -470,61 +449,61 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
wacom->tool[1] = BTN_TOOL_FINGER;
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
- wacom_report_key(wcombo, BTN_0, (data[2] & 0x01));
- wacom_report_key(wcombo, BTN_1, (data[3] & 0x01));
- wacom_report_key(wcombo, BTN_2, (data[3] & 0x02));
- wacom_report_key(wcombo, BTN_3, (data[3] & 0x04));
- wacom_report_key(wcombo, BTN_4, (data[3] & 0x08));
- wacom_report_key(wcombo, BTN_5, (data[3] & 0x10));
- wacom_report_key(wcombo, BTN_6, (data[3] & 0x20));
+ input_report_key(input, BTN_0, (data[2] & 0x01));
+ input_report_key(input, BTN_1, (data[3] & 0x01));
+ input_report_key(input, BTN_2, (data[3] & 0x02));
+ input_report_key(input, BTN_3, (data[3] & 0x04));
+ input_report_key(input, BTN_4, (data[3] & 0x08));
+ input_report_key(input, BTN_5, (data[3] & 0x10));
+ input_report_key(input, BTN_6, (data[3] & 0x20));
if (data[1] & 0x80) {
- wacom_report_abs(wcombo, ABS_WHEEL, (data[1] & 0x7f));
+ input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
} else {
/* Out of proximity, clear wheel value. */
- wacom_report_abs(wcombo, ABS_WHEEL, 0);
+ input_report_abs(input, ABS_WHEEL, 0);
}
if (features->type != INTUOS4S) {
- wacom_report_key(wcombo, BTN_7, (data[3] & 0x40));
- wacom_report_key(wcombo, BTN_8, (data[3] & 0x80));
+ input_report_key(input, BTN_7, (data[3] & 0x40));
+ input_report_key(input, BTN_8, (data[3] & 0x80));
}
if (data[1] | (data[2] & 0x01) | data[3]) {
- wacom_report_key(wcombo, wacom->tool[1], 1);
- wacom_report_abs(wcombo, ABS_MISC, PAD_DEVICE_ID);
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
}
} else {
- wacom_report_key(wcombo, BTN_0, (data[5] & 0x01));
- wacom_report_key(wcombo, BTN_1, (data[5] & 0x02));
- wacom_report_key(wcombo, BTN_2, (data[5] & 0x04));
- wacom_report_key(wcombo, BTN_3, (data[5] & 0x08));
- wacom_report_key(wcombo, BTN_4, (data[6] & 0x01));
- wacom_report_key(wcombo, BTN_5, (data[6] & 0x02));
- wacom_report_key(wcombo, BTN_6, (data[6] & 0x04));
- wacom_report_key(wcombo, BTN_7, (data[6] & 0x08));
- wacom_report_key(wcombo, BTN_8, (data[5] & 0x10));
- wacom_report_key(wcombo, BTN_9, (data[6] & 0x10));
- wacom_report_abs(wcombo, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
- wacom_report_abs(wcombo, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
+ input_report_key(input, BTN_0, (data[5] & 0x01));
+ input_report_key(input, BTN_1, (data[5] & 0x02));
+ input_report_key(input, BTN_2, (data[5] & 0x04));
+ input_report_key(input, BTN_3, (data[5] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x01));
+ input_report_key(input, BTN_5, (data[6] & 0x02));
+ input_report_key(input, BTN_6, (data[6] & 0x04));
+ input_report_key(input, BTN_7, (data[6] & 0x08));
+ input_report_key(input, BTN_8, (data[5] & 0x10));
+ input_report_key(input, BTN_9, (data[6] & 0x10));
+ input_report_abs(input, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]);
+ input_report_abs(input, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]);
if ((data[5] & 0x1f) | (data[6] & 0x1f) | (data[1] & 0x1f) |
data[2] | (data[3] & 0x1f) | data[4]) {
- wacom_report_key(wcombo, wacom->tool[1], 1);
- wacom_report_abs(wcombo, ABS_MISC, PAD_DEVICE_ID);
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
} else {
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
}
}
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xffffffff);
+ input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff);
return 1;
}
/* process in/out prox events */
- result = wacom_intuos_inout(wacom, wcombo);
+ result = wacom_intuos_inout(wacom);
if (result)
- return result-1;
+ return result - 1;
/* don't proceed if we don't know the ID */
if (!wacom->id[idx])
@@ -545,17 +524,17 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
return 0;
if (features->type >= INTUOS3S) {
- wacom_report_abs(wcombo, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
- wacom_report_abs(wcombo, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
- wacom_report_abs(wcombo, ABS_DISTANCE, ((data[9] >> 2) & 0x3f));
+ input_report_abs(input, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1));
+ input_report_abs(input, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1));
+ input_report_abs(input, ABS_DISTANCE, ((data[9] >> 2) & 0x3f));
} else {
- wacom_report_abs(wcombo, ABS_X, wacom_be16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_be16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_DISTANCE, ((data[9] >> 3) & 0x1f));
+ input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[2]));
+ input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[4]));
+ input_report_abs(input, ABS_DISTANCE, ((data[9] >> 3) & 0x1f));
}
/* process general packets */
- wacom_intuos_general(wacom, wcombo);
+ wacom_intuos_general(wacom);
/* 4D mouse, 2D mouse, marker pen rotation, tilt mouse, or Lens cursor packets */
if ((data[1] & 0xbc) == 0xa8 || (data[1] & 0xbe) == 0xb0 || (data[1] & 0xbc) == 0xac) {
@@ -567,174 +546,191 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
t = (data[6] << 3) | ((data[7] >> 5) & 7);
t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) :
((t-1) / 2 + 450)) : (450 - t / 2) ;
- wacom_report_abs(wcombo, ABS_Z, t);
+ input_report_abs(input, ABS_Z, t);
} else {
/* 4D mouse rotation packet */
t = (data[6] << 3) | ((data[7] >> 5) & 7);
- wacom_report_abs(wcombo, ABS_RZ, (data[7] & 0x20) ?
+ input_report_abs(input, ABS_RZ, (data[7] & 0x20) ?
((t - 1) / 2) : -t / 2);
}
} else if (!(data[1] & 0x10) && features->type < INTUOS3S) {
/* 4D mouse packet */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x04);
+ input_report_key(input, BTN_LEFT, data[8] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x20);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x10);
+ input_report_key(input, BTN_SIDE, data[8] & 0x20);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x10);
t = (data[6] << 2) | ((data[7] >> 6) & 3);
- wacom_report_abs(wcombo, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
+ input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t);
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
- wacom_report_key(wcombo, BTN_LEFT, data[6] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[6] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[6] & 0x04);
- wacom_report_rel(wcombo, REL_WHEEL, ((data[7] & 0x80) >> 7)
+ input_report_key(input, BTN_LEFT, data[6] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[6] & 0x04);
+ input_report_rel(input, REL_WHEEL, ((data[7] & 0x80) >> 7)
- ((data[7] & 0x40) >> 6));
- wacom_report_key(wcombo, BTN_SIDE, data[6] & 0x08);
- wacom_report_key(wcombo, BTN_EXTRA, data[6] & 0x10);
+ input_report_key(input, BTN_SIDE, data[6] & 0x08);
+ input_report_key(input, BTN_EXTRA, data[6] & 0x10);
- wacom_report_abs(wcombo, ABS_TILT_X,
+ input_report_abs(input, ABS_TILT_X,
((data[7] << 1) & 0x7e) | (data[8] >> 7));
- wacom_report_abs(wcombo, ABS_TILT_Y, data[8] & 0x7f);
+ input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f);
} else {
/* 2D mouse packet */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x08);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x10);
- wacom_report_rel(wcombo, REL_WHEEL, (data[8] & 0x01)
+ input_report_key(input, BTN_LEFT, data[8] & 0x04);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x08);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x10);
+ input_report_rel(input, REL_WHEEL, (data[8] & 0x01)
- ((data[8] & 0x02) >> 1));
/* I3 2D mouse side buttons */
if (features->type >= INTUOS3S && features->type <= INTUOS3L) {
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x40);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x20);
+ input_report_key(input, BTN_SIDE, data[8] & 0x40);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x20);
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
features->type == INTUOS4L) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
- wacom_report_key(wcombo, BTN_LEFT, data[8] & 0x01);
- wacom_report_key(wcombo, BTN_MIDDLE, data[8] & 0x02);
- wacom_report_key(wcombo, BTN_RIGHT, data[8] & 0x04);
- wacom_report_key(wcombo, BTN_SIDE, data[8] & 0x10);
- wacom_report_key(wcombo, BTN_EXTRA, data[8] & 0x08);
+ input_report_key(input, BTN_LEFT, data[8] & 0x01);
+ input_report_key(input, BTN_MIDDLE, data[8] & 0x02);
+ input_report_key(input, BTN_RIGHT, data[8] & 0x04);
+ input_report_key(input, BTN_SIDE, data[8] & 0x10);
+ input_report_key(input, BTN_EXTRA, data[8] & 0x08);
}
}
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[idx]); /* report tool id */
- wacom_report_key(wcombo, wacom->tool[idx], 1);
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+ input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_key(input, wacom->tool[idx], 1);
+ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
return 1;
}
-static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
+static void wacom_tpc_finger_in(struct wacom_wac *wacom, char *data, int idx)
{
- wacom_report_abs(wcombo, ABS_X,
- (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
- wacom_report_abs(wcombo, ABS_Y,
- (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[idx], 1);
- if (idx)
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- else
- wacom_report_key(wcombo, BTN_TOUCH, 1);
+ struct input_dev *input = wacom->input;
+ int finger = idx + 1;
+ int x = le16_to_cpup((__le16 *)&data[finger * 2]) & 0x7fff;
+ int y = le16_to_cpup((__le16 *)&data[4 + finger * 2]) & 0x7fff;
+
+ /*
+ * Work around input core suppressing "duplicate" events since
+ * we are abusing ABS_X/ABS_Y to transmit multi-finger data.
+ * This should go away once we switch to true multitouch
+ * protocol.
+ */
+ if (wacom->last_finger != finger) {
+ if (x == input->abs[ABS_X])
+ x++;
+
+ if (y == input->abs[ABS_Y])
+ y++;
+ }
+
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_report_key(input, wacom->tool[finger], 1);
+ if (!idx)
+ input_report_key(input, BTN_TOUCH, 1);
+ input_event(input, EV_MSC, MSC_SERIAL, finger);
+ input_sync(wacom->input);
+
+ wacom->last_finger = finger;
}
-static void wacom_tpc_touch_out(struct wacom_wac *wacom, void *wcombo, int idx)
+static void wacom_tpc_touch_out(struct wacom_wac *wacom, int idx)
{
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_MISC, 0);
- wacom_report_key(wcombo, wacom->tool[idx], 0);
- if (idx)
- wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
- else
- wacom_report_key(wcombo, BTN_TOUCH, 0);
- return;
+ struct input_dev *input = wacom->input;
+ int finger = idx + 1;
+
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_MISC, 0);
+ input_report_key(input, wacom->tool[finger], 0);
+ if (!idx)
+ input_report_key(input, BTN_TOUCH, 0);
+ input_event(input, EV_MSC, MSC_SERIAL, finger);
+ input_sync(input);
}
-static void wacom_tpc_touch_in(struct wacom_wac *wacom, void *wcombo)
+static void wacom_tpc_touch_in(struct wacom_wac *wacom, size_t len)
{
char *data = wacom->data;
- struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
- static int firstFinger = 0;
- static int secondFinger = 0;
+ struct input_dev *input = wacom->input;
- wacom->tool[0] = BTN_TOOL_DOUBLETAP;
+ wacom->tool[1] = BTN_TOOL_DOUBLETAP;
wacom->id[0] = TOUCH_DEVICE_ID;
- wacom->tool[1] = BTN_TOOL_TRIPLETAP;
+ wacom->tool[2] = BTN_TOOL_TRIPLETAP;
+
+ if (len != WACOM_PKGLEN_TPC1FG) {
- if (urb->actual_length != WACOM_PKGLEN_TPC1FG) {
switch (data[0]) {
- case WACOM_REPORT_TPC1FG:
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[0], 1);
- break;
- case WACOM_REPORT_TPC2FG:
- /* keep this byte to send proper out-prox event */
- wacom->id[1] = data[1] & 0x03;
-
- if (data[1] & 0x01) {
- wacom_tpc_finger_in(wacom, wcombo, data, 0);
- firstFinger = 1;
- } else if (firstFinger) {
- wacom_tpc_touch_out(wacom, wcombo, 0);
- }
- if (data[1] & 0x02) {
- /* sync first finger data */
- if (firstFinger)
- wacom_input_sync(wcombo);
+ case WACOM_REPORT_TPC1FG:
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6]));
+ input_report_key(input, BTN_TOUCH, le16_to_cpup((__le16 *)&data[6]));
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ input_report_key(input, wacom->tool[1], 1);
+ input_sync(input);
+ break;
- wacom_tpc_finger_in(wacom, wcombo, data, 1);
- secondFinger = 1;
- } else if (secondFinger) {
- /* sync first finger data */
- if (firstFinger)
- wacom_input_sync(wcombo);
+ case WACOM_REPORT_TPC2FG:
+ if (data[1] & 0x01)
+ wacom_tpc_finger_in(wacom, data, 0);
+ else if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
- wacom_tpc_touch_out(wacom, wcombo, 1);
- secondFinger = 0;
- }
- if (!(data[1] & 0x01))
- firstFinger = 0;
- break;
+ if (data[1] & 0x02)
+ wacom_tpc_finger_in(wacom, data, 1);
+ else if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+ break;
}
} else {
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_key(wcombo, BTN_TOUCH, 1);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[0], 1);
+ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1]));
+ input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3]));
+ input_report_key(input, BTN_TOUCH, 1);
+ input_report_abs(input, ABS_MISC, wacom->id[1]);
+ input_report_key(input, wacom->tool[1], 1);
+ input_sync(input);
}
- return;
}
-static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
+static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
struct wacom_features *features = &wacom->features;
char *data = wacom->data;
- int prox = 0, pressure, idx = -1;
- static int stylusInProx, touchInProx = 1, touchOut;
- struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
+ struct input_dev *input = wacom->input;
+ int prox = 0, pressure;
+ int retval = 0;
dbg("wacom_tpc_irq: received report #%d", data[0]);
- if (urb->actual_length == WACOM_PKGLEN_TPC1FG || /* single touch */
+ if (len == WACOM_PKGLEN_TPC1FG || /* single touch */
data[0] == WACOM_REPORT_TPC1FG || /* single touch */
data[0] == WACOM_REPORT_TPC2FG) { /* 2FG touch */
- if (urb->actual_length == WACOM_PKGLEN_TPC1FG) { /* with touch */
+
+ if (wacom->shared->stylus_in_proximity) {
+ if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
+
+ if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+
+ wacom->id[1] = 0;
+ return 0;
+ }
+
+ if (len == WACOM_PKGLEN_TPC1FG) { /* with touch */
prox = data[0] & 0x01;
} else { /* with capacity */
if (data[0] == WACOM_REPORT_TPC1FG)
@@ -745,168 +741,264 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
prox = data[1] & 0x03;
}
- if (!stylusInProx) { /* stylus not in prox */
- if (prox) {
- if (touchInProx) {
- wacom_tpc_touch_in(wacom, wcombo);
- touchOut = 1;
- return 1;
- }
- } else {
+ if (prox) {
+ if (!wacom->id[1])
+ wacom->last_finger = 1;
+ wacom_tpc_touch_in(wacom, len);
+ } else {
+ if (data[0] == WACOM_REPORT_TPC2FG) {
/* 2FGT out-prox */
- if (data[0] == WACOM_REPORT_TPC2FG) {
- idx = (wacom->id[1] & 0x01) - 1;
- if (idx == 0) {
- wacom_tpc_touch_out(wacom, wcombo, idx);
- /* sync first finger event */
- if (wacom->id[1] & 0x02)
- wacom_input_sync(wcombo);
- }
- idx = (wacom->id[1] & 0x02) - 1;
- if (idx == 1)
- wacom_tpc_touch_out(wacom, wcombo, idx);
- } else /* one finger touch */
- wacom_tpc_touch_out(wacom, wcombo, 0);
- touchOut = 0;
- touchInProx = 1;
- return 1;
- }
- } else if (touchOut || !prox) { /* force touch out-prox */
- wacom_tpc_touch_out(wacom, wcombo, 0);
- touchOut = 0;
- touchInProx = 1;
- return 1;
+ if (wacom->id[1] & 0x01)
+ wacom_tpc_touch_out(wacom, 0);
+
+ if (wacom->id[1] & 0x02)
+ wacom_tpc_touch_out(wacom, 1);
+ } else
+ /* one finger touch */
+ wacom_tpc_touch_out(wacom, 0);
+
+ wacom->id[0] = 0;
}
+ /* keep prox bit to send proper out-prox event */
+ wacom->id[1] = prox;
} else if (data[0] == WACOM_REPORT_PENABLED) { /* Penabled */
prox = data[1] & 0x20;
- touchInProx = 0;
+ if (!wacom->shared->stylus_in_proximity) { /* first in prox */
+ /* Going into proximity select tool */
+ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom->tool[0] == BTN_TOOL_PEN)
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ else
+ wacom->id[0] = ERASER_DEVICE_ID;
- if (prox) { /* in prox */
- if (!wacom->id[0]) {
- /* Going into proximity select tool */
- wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->tool[0] == BTN_TOOL_PEN)
- wacom->id[0] = STYLUS_DEVICE_ID;
- else
- wacom->id[0] = ERASER_DEVICE_ID;
- }
- wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
- wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- pressure = ((data[7] & 0x01) << 8) | data[6];
- if (pressure < 0)
- pressure = features->pressure_max + pressure + 1;
- wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
- wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
- } else {
- wacom_report_abs(wcombo, ABS_X, 0);
- wacom_report_abs(wcombo, ABS_Y, 0);
- wacom_report_abs(wcombo, ABS_PRESSURE, 0);
- wacom_report_key(wcombo, BTN_STYLUS, 0);
- wacom_report_key(wcombo, BTN_STYLUS2, 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom->shared->stylus_in_proximity = true;
+ }
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
+ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
+ input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
+ pressure = ((data[7] & 0x01) << 8) | data[6];
+ if (pressure < 0)
+ pressure = features->pressure_max + pressure + 1;
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_key(input, BTN_TOUCH, data[1] & 0x05);
+ if (!prox) { /* out-prox */
wacom->id[0] = 0;
- /* pen is out so touch can be enabled now */
- touchInProx = 1;
+ wacom->shared->stylus_in_proximity = false;
}
- wacom_report_key(wcombo, wacom->tool[0], prox);
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- stylusInProx = prox;
- return 1;
+ input_report_key(input, wacom->tool[0], prox);
+ input_report_abs(input, ABS_MISC, wacom->id[0]);
+ retval = 1;
}
- return 0;
+ return retval;
}
-int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo)
+void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
{
+ bool sync;
+
switch (wacom_wac->features.type) {
- case PENPARTNER:
- return wacom_penpartner_irq(wacom_wac, wcombo);
-
- case PL:
- return wacom_pl_irq(wacom_wac, wcombo);
-
- case WACOM_G4:
- case GRAPHIRE:
- case WACOM_MO:
- return wacom_graphire_irq(wacom_wac, wcombo);
-
- case PTU:
- return wacom_ptu_irq(wacom_wac, wcombo);
-
- case INTUOS:
- case INTUOS3S:
- case INTUOS3:
- case INTUOS3L:
- case INTUOS4S:
- case INTUOS4:
- case INTUOS4L:
- case CINTIQ:
- case WACOM_BEE:
- return wacom_intuos_irq(wacom_wac, wcombo);
-
- case TABLETPC:
- case TABLETPC2FG:
- return wacom_tpc_irq(wacom_wac, wcombo);
-
- default:
- return 0;
+ case PENPARTNER:
+ sync = wacom_penpartner_irq(wacom_wac);
+ break;
+
+ case PL:
+ sync = wacom_pl_irq(wacom_wac);
+ break;
+
+ case WACOM_G4:
+ case GRAPHIRE:
+ case WACOM_MO:
+ sync = wacom_graphire_irq(wacom_wac);
+ break;
+
+ case PTU:
+ sync = wacom_ptu_irq(wacom_wac);
+ break;
+
+ case INTUOS:
+ case INTUOS3S:
+ case INTUOS3:
+ case INTUOS3L:
+ case INTUOS4S:
+ case INTUOS4:
+ case INTUOS4L:
+ case CINTIQ:
+ case WACOM_BEE:
+ sync = wacom_intuos_irq(wacom_wac);
+ break;
+
+ case TABLETPC:
+ case TABLETPC2FG:
+ sync = wacom_tpc_irq(wacom_wac, len);
+ break;
+
+ default:
+ sync = false;
+ break;
}
- return 0;
+
+ if (sync)
+ input_sync(wacom_wac->input);
+}
+
+static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
+{
+ struct input_dev *input_dev = wacom_wac->input;
+
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+ __set_bit(BTN_SIDE, input_dev->keybit);
+ __set_bit(BTN_EXTRA, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_TOOL_BRUSH, input_dev->keybit);
+ __set_bit(BTN_TOOL_PENCIL, input_dev->keybit);
+ __set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit);
+ __set_bit(BTN_TOOL_LENS, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_DISTANCE,
+ 0, wacom_wac->features.distance_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
}
-void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
+void wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
{
+ struct wacom_features *features = &wacom_wac->features;
+ int i;
+
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
+
+ __set_bit(ABS_MISC, input_dev->absbit);
+
switch (wacom_wac->features.type) {
- case WACOM_MO:
- input_dev_mo(input_dev, wacom_wac);
- case WACOM_G4:
- input_dev_g4(input_dev, wacom_wac);
- /* fall through */
- case GRAPHIRE:
- input_dev_g(input_dev, wacom_wac);
- break;
- case WACOM_BEE:
- input_dev_bee(input_dev, wacom_wac);
- case INTUOS3:
- case INTUOS3L:
- case CINTIQ:
- input_dev_i3(input_dev, wacom_wac);
- /* fall through */
- case INTUOS3S:
- input_dev_i3s(input_dev, wacom_wac);
- /* fall through */
- case INTUOS:
- input_dev_i(input_dev, wacom_wac);
- break;
- case INTUOS4:
- case INTUOS4L:
- input_dev_i4(input_dev, wacom_wac);
- /* fall through */
- case INTUOS4S:
- input_dev_i4s(input_dev, wacom_wac);
- input_dev_i(input_dev, wacom_wac);
- break;
- case TABLETPC2FG:
- input_dev_tpc2fg(input_dev, wacom_wac);
- /* fall through */
- case TABLETPC:
- input_dev_tpc(input_dev, wacom_wac);
- if (wacom_wac->features.device_type != BTN_TOOL_PEN)
- break; /* no need to process stylus stuff */
-
- /* fall through */
- case PL:
- case PTU:
- input_dev_pl(input_dev, wacom_wac);
- /* fall through */
- case PENPARTNER:
- input_dev_pt(input_dev, wacom_wac);
- break;
+ case WACOM_MO:
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ /* fall through */
+
+ case WACOM_G4:
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+ /* fall through */
+
+ case GRAPHIRE:
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ break;
+
+ case WACOM_BEE:
+ __set_bit(BTN_8, input_dev->keybit);
+ __set_bit(BTN_9, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS3:
+ case INTUOS3L:
+ case CINTIQ:
+ __set_bit(BTN_4, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ /* fall through */
+
+ case INTUOS3S:
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ /* fall through */
+
+ case INTUOS:
+ wacom_setup_intuos(wacom_wac);
+ break;
+
+ case INTUOS4:
+ case INTUOS4L:
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS4S:
+ for (i = 0; i < 7; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ wacom_setup_intuos(wacom_wac);
+ break;
+
+ case TABLETPC2FG:
+ if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+ }
+ /* fall through */
+
+ case TABLETPC:
+ if (features->device_type == BTN_TOOL_DOUBLETAP ||
+ features->device_type == BTN_TOOL_TRIPLETAP) {
+ input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ }
+
+ if (features->device_type != BTN_TOOL_PEN)
+ break; /* no need to process stylus stuff */
+
+ /* fall through */
+
+ case PL:
+ case PTU:
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+ /* fall through */
+
+ case PENPARTNER:
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ break;
}
- return;
}
static const struct wacom_features wacom_features_0x00 =
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index b50cf04e61a8..063f1af3204f 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -9,6 +9,8 @@
#ifndef WACOM_WAC_H
#define WACOM_WAC_H
+#include <linux/types.h>
+
/* maximum packet length for USB devices */
#define WACOM_PKGLEN_MAX 32
@@ -71,13 +73,20 @@ struct wacom_features {
unsigned char unitExpo;
};
+struct wacom_shared {
+ bool stylus_in_proximity;
+};
+
struct wacom_wac {
char name[64];
unsigned char *data;
- int tool[2];
- int id[2];
+ int tool[3];
+ int id[3];
__u32 serial[2];
+ int last_finger;
struct wacom_features features;
+ struct wacom_shared *shared;
+ struct input_dev *input;
};
#endif
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 8a8fa4d2d6a8..6703c6b9800a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -99,22 +99,6 @@ config TOUCHSCREEN_BITSY
To compile this driver as a module, choose M here: the
module will be called h3600_ts_input.
-config TOUCHSCREEN_CORGI
- tristate "SharpSL (Corgi and Spitz series) touchscreen driver (DEPRECATED)"
- depends on PXA_SHARPSL
- select CORGI_SSP_DEPRECATED
- help
- Say Y here to enable the driver for the touchscreen on the
- Sharp SL-C7xx and SL-Cxx00 series of PDAs.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called corgi_ts.
-
- NOTE: this driver is deprecated, try enable SPI and generic
- ADS7846-based touchscreen driver.
-
config TOUCHSCREEN_DA9034
tristate "Touchscreen support for Dialog Semiconductor DA9034"
depends on PMIC_DA903X
@@ -135,6 +119,18 @@ config TOUCHSCREEN_DYNAPRO
To compile this driver as a module, choose M here: the
module will be called dynapro.
+config TOUCHSCREEN_HAMPSHIRE
+ tristate "Hampshire serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Hampshire serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hampshire.
+
config TOUCHSCREEN_EETI
tristate "EETI touchscreen panel support"
depends on I2C
@@ -158,8 +154,8 @@ config TOUCHSCREEN_FUJITSU
module will be called fujitsu-ts.
config TOUCHSCREEN_S3C2410
- tristate "Samsung S3C2410 touchscreen input driver"
- depends on ARCH_S3C2410
+ tristate "Samsung S3C2410/generic touchscreen input driver"
+ depends on ARCH_S3C2410 || SAMSUNG_DEV_TS
select S3C24XX_ADC
help
Say Y here if you have the s3c2410 touchscreen.
@@ -594,4 +590,17 @@ config TOUCHSCREEN_PCAP
To compile this driver as a module, choose M here: the
module will be called pcap_ts.
+
+config TOUCHSCREEN_TPS6507X
+ tristate "TPS6507x based touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a TPS6507x based touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tps6507x_ts.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7fef7d5cca23..497964a7a214 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,8 +12,8 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
-obj-$(CONFIG_TOUCHSCREEN_CORGI) += corgi_ts.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
+obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
@@ -46,3 +46,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
+obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index e019d53d1ab4..0d2d7e54b465 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -156,9 +156,14 @@ struct ser_req {
u16 reset;
u16 ref_on;
u16 command;
- u16 sample;
struct spi_message msg;
struct spi_transfer xfer[6];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u16 sample ____cacheline_aligned;
};
struct ad7877 {
@@ -182,8 +187,6 @@ struct ad7877 {
u8 averaging;
u8 pen_down_acc_interval;
- u16 conversion_data[AD7877_NR_SENSE];
-
struct spi_transfer xfer[AD7877_NR_SENSE + 2];
struct spi_message msg;
@@ -195,6 +198,12 @@ struct ad7877 {
spinlock_t lock;
struct timer_list timer; /* P: lock */
unsigned pending:1; /* P: lock */
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u16 conversion_data[AD7877_NR_SENSE] ____cacheline_aligned;
};
static int gpio3;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279cda0e4..634f6f6b9b13 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
ts->reg = regulator_get(&spi->dev, "vcc");
if (IS_ERR(ts->reg)) {
- dev_err(&spi->dev, "unable to get regulator: %ld\n",
- PTR_ERR(ts->reg));
+ err = PTR_ERR(ts->reg);
+ dev_err(&spi->dev, "unable to get regulator: %ld\n", err);
goto err_free_gpio;
}
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
deleted file mode 100644
index 94a1919d439d..000000000000
--- a/drivers/input/touchscreen/corgi_ts.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Touchscreen driver for Sharp SL-C7xx and SL-Cxx00 models
- *
- * Copyright (c) 2004-2005 Richard Purdie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-
-#include <mach/sharpsl.h>
-#include <mach/hardware.h>
-#include <mach/pxa2xx-gpio.h>
-
-
-#define PWR_MODE_ACTIVE 0
-#define PWR_MODE_SUSPEND 1
-
-#define X_AXIS_MAX 3830
-#define X_AXIS_MIN 150
-#define Y_AXIS_MAX 3830
-#define Y_AXIS_MIN 190
-#define PRESSURE_MIN 0
-#define PRESSURE_MAX 15000
-
-struct ts_event {
- short pressure;
- short x;
- short y;
-};
-
-struct corgi_ts {
- struct input_dev *input;
- struct timer_list timer;
- struct ts_event tc;
- int pendown;
- int power_mode;
- int irq_gpio;
- struct corgits_machinfo *machinfo;
-};
-
-#ifdef CONFIG_PXA25x
-#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
-#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
-#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
-#endif
-#ifdef CONFIG_PXA27x
-#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C1, 0" : "=r"(a))
-#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C1, 0" : "=r"(x))
-#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C1, 0" : : "r"(x))
-#endif
-
-/* ADS7846 Touch Screen Controller bit definitions */
-#define ADSCTRL_PD0 (1u << 0) /* PD0 */
-#define ADSCTRL_PD1 (1u << 1) /* PD1 */
-#define ADSCTRL_DFR (1u << 2) /* SER/DFR */
-#define ADSCTRL_MOD (1u << 3) /* Mode */
-#define ADSCTRL_ADR_SH 4 /* Address setting */
-#define ADSCTRL_STS (1u << 7) /* Start Bit */
-
-/* External Functions */
-extern unsigned int get_clk_frequency_khz(int info);
-
-static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
-{
- unsigned long hsync_invperiod = corgi_ts->machinfo->get_hsync_invperiod();
-
- if (hsync_invperiod)
- return get_clk_frequency_khz(0)*1000/hsync_invperiod;
- else
- return 0;
-}
-
-static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, int doSend,
- unsigned int address, unsigned long wait_time)
-{
- unsigned long timer1 = 0, timer2, pmnc = 0;
- int pos = 0;
-
- if (wait_time && doSend) {
- PMNC_GET(pmnc);
- if (!(pmnc & 0x01))
- PMNC_SET(0x01);
-
- /* polling HSync */
- corgi_ts->machinfo->wait_hsync();
- /* get CCNT */
- CCNT(timer1);
- }
-
- if (doRecive)
- pos = corgi_ssp_ads7846_get();
-
- if (doSend) {
- int cmd = ADSCTRL_PD0 | ADSCTRL_PD1 | (address << ADSCTRL_ADR_SH) | ADSCTRL_STS;
- /* dummy command */
- corgi_ssp_ads7846_put(cmd);
- corgi_ssp_ads7846_get();
-
- if (wait_time) {
- /* Wait after HSync */
- CCNT(timer2);
- if (timer2-timer1 > wait_time) {
- /* too slow - timeout, try again */
- corgi_ts->machinfo->wait_hsync();
- /* get CCNT */
- CCNT(timer1);
- /* Wait after HSync */
- CCNT(timer2);
- }
- while (timer2 - timer1 < wait_time)
- CCNT(timer2);
- }
- corgi_ssp_ads7846_put(cmd);
- if (wait_time && !(pmnc & 0x01))
- PMNC_SET(pmnc);
- }
- return pos;
-}
-
-static int read_xydata(struct corgi_ts *corgi_ts)
-{
- unsigned int x, y, z1, z2;
- unsigned long flags, wait_time;
-
- /* critical section */
- local_irq_save(flags);
- corgi_ssp_ads7846_lock();
- wait_time = calc_waittime(corgi_ts);
-
- /* Y-axis */
- sync_receive_data_send_cmd(corgi_ts, 0, 1, 1u, wait_time);
-
- /* Y-axis */
- sync_receive_data_send_cmd(corgi_ts, 1, 1, 1u, wait_time);
-
- /* X-axis */
- y = sync_receive_data_send_cmd(corgi_ts, 1, 1, 5u, wait_time);
-
- /* Z1 */
- x = sync_receive_data_send_cmd(corgi_ts, 1, 1, 3u, wait_time);
-
- /* Z2 */
- z1 = sync_receive_data_send_cmd(corgi_ts, 1, 1, 4u, wait_time);
- z2 = sync_receive_data_send_cmd(corgi_ts, 1, 0, 4u, wait_time);
-
- /* Power-Down Enable */
- corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- corgi_ssp_ads7846_get();
-
- corgi_ssp_ads7846_unlock();
- local_irq_restore(flags);
-
- if (x== 0 || y == 0 || z1 == 0 || (x * (z2 - z1) / z1) >= 15000) {
- corgi_ts->tc.pressure = 0;
- return 0;
- }
-
- corgi_ts->tc.x = x;
- corgi_ts->tc.y = y;
- corgi_ts->tc.pressure = (x * (z2 - z1)) / z1;
- return 1;
-}
-
-static void new_data(struct corgi_ts *corgi_ts)
-{
- struct input_dev *dev = corgi_ts->input;
-
- if (corgi_ts->power_mode != PWR_MODE_ACTIVE)
- return;
-
- if (!corgi_ts->tc.pressure && corgi_ts->pendown == 0)
- return;
-
- input_report_abs(dev, ABS_X, corgi_ts->tc.x);
- input_report_abs(dev, ABS_Y, corgi_ts->tc.y);
- input_report_abs(dev, ABS_PRESSURE, corgi_ts->tc.pressure);
- input_report_key(dev, BTN_TOUCH, corgi_ts->pendown);
- input_sync(dev);
-}
-
-static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer)
-{
- if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
- /* Disable Interrupt */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_NONE);
- if (read_xydata(corgi_ts)) {
- corgi_ts->pendown = 1;
- new_data(corgi_ts);
- }
- mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
- } else {
- if (corgi_ts->pendown == 1 || corgi_ts->pendown == 2) {
- mod_timer(&corgi_ts->timer, jiffies + HZ / 100);
- corgi_ts->pendown++;
- return;
- }
-
- if (corgi_ts->pendown) {
- corgi_ts->tc.pressure = 0;
- new_data(corgi_ts);
- }
-
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
- corgi_ts->pendown = 0;
- }
-}
-
-static void corgi_ts_timer(unsigned long data)
-{
- struct corgi_ts *corgits_data = (struct corgi_ts *) data;
-
- ts_interrupt_main(corgits_data, 1);
-}
-
-static irqreturn_t ts_interrupt(int irq, void *dev_id)
-{
- struct corgi_ts *corgits_data = dev_id;
-
- ts_interrupt_main(corgits_data, 0);
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PM
-static int corgits_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
-
- if (corgi_ts->pendown) {
- del_timer_sync(&corgi_ts->timer);
- corgi_ts->tc.pressure = 0;
- new_data(corgi_ts);
- corgi_ts->pendown = 0;
- }
- corgi_ts->power_mode = PWR_MODE_SUSPEND;
-
- corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
-
- return 0;
-}
-
-static int corgits_resume(struct platform_device *dev)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(dev);
-
- corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
- corgi_ts->power_mode = PWR_MODE_ACTIVE;
-
- return 0;
-}
-#else
-#define corgits_suspend NULL
-#define corgits_resume NULL
-#endif
-
-static int __devinit corgits_probe(struct platform_device *pdev)
-{
- struct corgi_ts *corgi_ts;
- struct input_dev *input_dev;
- int err = -ENOMEM;
-
- corgi_ts = kzalloc(sizeof(struct corgi_ts), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!corgi_ts || !input_dev)
- goto fail1;
-
- platform_set_drvdata(pdev, corgi_ts);
-
- corgi_ts->machinfo = pdev->dev.platform_data;
- corgi_ts->irq_gpio = platform_get_irq(pdev, 0);
-
- if (corgi_ts->irq_gpio < 0) {
- err = -ENODEV;
- goto fail1;
- }
-
- corgi_ts->input = input_dev;
-
- init_timer(&corgi_ts->timer);
- corgi_ts->timer.data = (unsigned long) corgi_ts;
- corgi_ts->timer.function = corgi_ts_timer;
-
- input_dev->name = "Corgi Touchscreen";
- input_dev->phys = "corgits/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->id.vendor = 0x0001;
- input_dev->id.product = 0x0002;
- input_dev->id.version = 0x0100;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(input_dev, ABS_X, X_AXIS_MIN, X_AXIS_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, Y_AXIS_MIN, Y_AXIS_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, PRESSURE_MIN, PRESSURE_MAX, 0, 0);
-
- pxa_gpio_mode(IRQ_TO_GPIO(corgi_ts->irq_gpio) | GPIO_IN);
-
- /* Initiaize ADS7846 Difference Reference mode */
- corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((3u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
- corgi_ssp_ads7846_putget((5u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
- mdelay(5);
-
- if (request_irq(corgi_ts->irq_gpio, ts_interrupt, IRQF_DISABLED, "ts", corgi_ts)) {
- err = -EBUSY;
- goto fail1;
- }
-
- err = input_register_device(corgi_ts->input);
- if (err)
- goto fail2;
-
- corgi_ts->power_mode = PWR_MODE_ACTIVE;
-
- /* Enable Falling Edge */
- set_irq_type(corgi_ts->irq_gpio, IRQ_TYPE_EDGE_FALLING);
-
- return 0;
-
- fail2: free_irq(corgi_ts->irq_gpio, corgi_ts);
- fail1: input_free_device(input_dev);
- kfree(corgi_ts);
- return err;
-}
-
-static int __devexit corgits_remove(struct platform_device *pdev)
-{
- struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
-
- free_irq(corgi_ts->irq_gpio, corgi_ts);
- del_timer_sync(&corgi_ts->timer);
- corgi_ts->machinfo->put_hsync();
- input_unregister_device(corgi_ts->input);
- kfree(corgi_ts);
-
- return 0;
-}
-
-static struct platform_driver corgits_driver = {
- .probe = corgits_probe,
- .remove = __devexit_p(corgits_remove),
- .suspend = corgits_suspend,
- .resume = corgits_resume,
- .driver = {
- .name = "corgi-ts",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init corgits_init(void)
-{
- return platform_driver_register(&corgits_driver);
-}
-
-static void __exit corgits_exit(void)
-{
- platform_driver_unregister(&corgits_driver);
-}
-
-module_init(corgits_init);
-module_exit(corgits_exit);
-
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Corgi TouchScreen Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:corgi-ts");
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
new file mode 100644
index 000000000000..2da6cc31bb21
--- /dev/null
+++ b/drivers/input/touchscreen/hampshire.c
@@ -0,0 +1,205 @@
+/*
+ * Hampshire serial touchscreen driver
+ *
+ * Copyright (c) 2010 Adam Bennett
+ * Based on the dynapro driver (c) Tias Guns
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/*
+ * 2010/04/08 Adam Bennett <abennett72@gmail.com>
+ * Copied dynapro.c and edited for Hampshire 4-byte protocol
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/init.h>
+
+#define DRIVER_DESC "Hampshire serial touchscreen driver"
+
+MODULE_AUTHOR("Adam Bennett <abennett72@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/*
+ * Definitions & global arrays.
+ */
+
+#define HAMPSHIRE_FORMAT_TOUCH_BIT 0x40
+#define HAMPSHIRE_FORMAT_LENGTH 4
+#define HAMPSHIRE_RESPONSE_BEGIN_BYTE 0x80
+
+#define HAMPSHIRE_MIN_XC 0
+#define HAMPSHIRE_MAX_XC 0x1000
+#define HAMPSHIRE_MIN_YC 0
+#define HAMPSHIRE_MAX_YC 0x1000
+
+#define HAMPSHIRE_GET_XC(data) (((data[3] & 0x0c) >> 2) | (data[1] << 2) | ((data[0] & 0x38) << 6))
+#define HAMPSHIRE_GET_YC(data) ((data[3] & 0x03) | (data[2] << 2) | ((data[0] & 0x07) << 9))
+#define HAMPSHIRE_GET_TOUCHED(data) (HAMPSHIRE_FORMAT_TOUCH_BIT & data[0])
+
+/*
+ * Per-touchscreen data.
+ */
+
+struct hampshire {
+ struct input_dev *dev;
+ struct serio *serio;
+ int idx;
+ unsigned char data[HAMPSHIRE_FORMAT_LENGTH];
+ char phys[32];
+};
+
+static void hampshire_process_data(struct hampshire *phampshire)
+{
+ struct input_dev *dev = phampshire->dev;
+
+ if (HAMPSHIRE_FORMAT_LENGTH == ++phampshire->idx) {
+ input_report_abs(dev, ABS_X, HAMPSHIRE_GET_XC(phampshire->data));
+ input_report_abs(dev, ABS_Y, HAMPSHIRE_GET_YC(phampshire->data));
+ input_report_key(dev, BTN_TOUCH,
+ HAMPSHIRE_GET_TOUCHED(phampshire->data));
+ input_sync(dev);
+
+ phampshire->idx = 0;
+ }
+}
+
+static irqreturn_t hampshire_interrupt(struct serio *serio,
+ unsigned char data, unsigned int flags)
+{
+ struct hampshire *phampshire = serio_get_drvdata(serio);
+
+ phampshire->data[phampshire->idx] = data;
+
+ if (HAMPSHIRE_RESPONSE_BEGIN_BYTE & phampshire->data[0])
+ hampshire_process_data(phampshire);
+ else
+ dev_dbg(&serio->dev, "unknown/unsynchronized data: %x\n",
+ phampshire->data[0]);
+
+ return IRQ_HANDLED;
+}
+
+static void hampshire_disconnect(struct serio *serio)
+{
+ struct hampshire *phampshire = serio_get_drvdata(serio);
+
+ input_get_device(phampshire->dev);
+ input_unregister_device(phampshire->dev);
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ input_put_device(phampshire->dev);
+ kfree(phampshire);
+}
+
+/*
+ * hampshire_connect() is the routine that is called when someone adds a
+ * new serio device that supports hampshire protocol and registers it as
+ * an input device. This is usually accomplished using inputattach.
+ */
+
+static int hampshire_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct hampshire *phampshire;
+ struct input_dev *input_dev;
+ int err;
+
+ phampshire = kzalloc(sizeof(struct hampshire), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!phampshire || !input_dev) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ phampshire->serio = serio;
+ phampshire->dev = input_dev;
+ snprintf(phampshire->phys, sizeof(phampshire->phys),
+ "%s/input0", serio->phys);
+
+ input_dev->name = "Hampshire Serial TouchScreen";
+ input_dev->phys = phampshire->phys;
+ input_dev->id.bustype = BUS_RS232;
+ input_dev->id.vendor = SERIO_HAMPSHIRE;
+ input_dev->id.product = 0;
+ input_dev->id.version = 0x0001;
+ input_dev->dev.parent = &serio->dev;
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(phampshire->dev, ABS_X,
+ HAMPSHIRE_MIN_XC, HAMPSHIRE_MAX_XC, 0, 0);
+ input_set_abs_params(phampshire->dev, ABS_Y,
+ HAMPSHIRE_MIN_YC, HAMPSHIRE_MAX_YC, 0, 0);
+
+ serio_set_drvdata(serio, phampshire);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto fail2;
+
+ err = input_register_device(phampshire->dev);
+ if (err)
+ goto fail3;
+
+ return 0;
+
+ fail3: serio_close(serio);
+ fail2: serio_set_drvdata(serio, NULL);
+ fail1: input_free_device(input_dev);
+ kfree(phampshire);
+ return err;
+}
+
+/*
+ * The serio driver structure.
+ */
+
+static struct serio_device_id hampshire_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_HAMPSHIRE,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, hampshire_serio_ids);
+
+static struct serio_driver hampshire_drv = {
+ .driver = {
+ .name = "hampshire",
+ },
+ .description = DRIVER_DESC,
+ .id_table = hampshire_serio_ids,
+ .interrupt = hampshire_interrupt,
+ .connect = hampshire_connect,
+ .disconnect = hampshire_disconnect,
+};
+
+/*
+ * The functions for inserting/removing us as a module.
+ */
+
+static int __init hampshire_init(void)
+{
+ return serio_register_driver(&hampshire_drv);
+}
+
+static void __exit hampshire_exit(void)
+{
+ serio_unregister_driver(&hampshire_drv);
+}
+
+module_init(hampshire_init);
+module_exit(hampshire_exit);
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 98a7d1279486..ac5d0f9b0cb1 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -37,9 +37,7 @@
#include <plat/adc.h>
#include <plat/regs-adc.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/ts.h>
+#include <plat/ts.h>
#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0))
@@ -57,6 +55,8 @@
S3C2410_ADCTSC_AUTO_PST | \
S3C2410_ADCTSC_XY_PST(0))
+#define FEAT_PEN_IRQ (1 << 0) /* HAS ADCCLRINTPNDNUP */
+
/* Per-touchscreen data. */
/**
@@ -71,6 +71,7 @@
* @irq_tc: The interrupt number for pen up/down interrupt
* @count: The number of samples collected.
* @shift: The log2 of the maximum count to read in one go.
+ * @features: The features supported by the TSADC MOdule.
*/
struct s3c2410ts {
struct s3c_adc_client *client;
@@ -83,26 +84,12 @@ struct s3c2410ts {
int irq_tc;
int count;
int shift;
+ int features;
};
static struct s3c2410ts ts;
/**
- * s3c2410_ts_connect - configure gpio for s3c2410 systems
- *
- * Configure the GPIO for the S3C2410 system, where we have external FETs
- * connected to the device (later systems such as the S3C2440 integrate
- * these into the device).
-*/
-static inline void s3c2410_ts_connect(void)
-{
- s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON);
-}
-
-/**
* get_down - return the down state of the pen
* @data0: The data read from ADCDAT0 register.
* @data1: The data read from ADCDAT1 register.
@@ -188,6 +175,11 @@ static irqreturn_t stylus_irq(int irq, void *dev_id)
else
dev_info(ts.dev, "%s: count=%d\n", __func__, ts.count);
+ if (ts.features & FEAT_PEN_IRQ) {
+ /* Clear pen down/up interrupt */
+ writel(0x0, ts.io + S3C64XX_ADCCLRINTPNDNUP);
+ }
+
return IRQ_HANDLED;
}
@@ -296,9 +288,9 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
goto err_clk;
}
- /* Configure the touchscreen external FETs on the S3C2410 */
- if (!platform_get_device_id(pdev)->driver_data)
- s3c2410_ts_connect();
+ /* inititalise the gpio */
+ if (info->cfg_gpio)
+ info->cfg_gpio(to_platform_device(ts.dev));
ts.client = s3c_adc_register(pdev, s3c24xx_ts_select,
s3c24xx_ts_conversion, 1);
@@ -334,6 +326,7 @@ static int __devinit s3c2410ts_probe(struct platform_device *pdev)
ts.input->id.version = 0x0102;
ts.shift = info->oversampling_shift;
+ ts.features = platform_get_device_id(pdev)->driver_data;
ret = request_irq(ts.irq_tc, stylus_irq, IRQF_DISABLED,
"s3c2410_ts_pen", ts.input);
@@ -421,14 +414,15 @@ static struct dev_pm_ops s3c_ts_pmops = {
static struct platform_device_id s3cts_driver_ids[] = {
{ "s3c2410-ts", 0 },
- { "s3c2440-ts", 1 },
+ { "s3c2440-ts", 0 },
+ { "s3c64xx-ts", FEAT_PEN_IRQ },
{ }
};
MODULE_DEVICE_TABLE(platform, s3cts_driver_ids);
static struct platform_driver s3c_ts_driver = {
.driver = {
- .name = "s3c24xx-ts",
+ .name = "samsung-ts",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &s3c_ts_pmops,
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
new file mode 100644
index 000000000000..5de80a1a730b
--- /dev/null
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -0,0 +1,400 @@
+/*
+ * drivers/input/touchscreen/tps6507x_ts.c
+ *
+ * Touchscreen driver for the tps6507x chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ * Credits:
+ *
+ * Using code from tsc2007, MtekVision Co., Ltd.
+ *
+ * For licencing details see kernel-base/COPYING
+ *
+ * TPS65070, TPS65073, TPS650731, and TPS650732 support
+ * 10 bit touch screen interface.
+ */
+
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps6507x.h>
+#include <linux/input/tps6507x-ts.h>
+#include <linux/delay.h>
+
+#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */
+#define TPS_DEFAULT_MIN_PRESSURE 0x30
+#define MAX_10BIT ((1 << 10) - 1)
+
+#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \
+ TPS6507X_ADCONFIG_START_CONVERSION | \
+ TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC)
+
+struct ts_event {
+ u16 x;
+ u16 y;
+ u16 pressure;
+};
+
+struct tps6507x_ts {
+ struct input_dev *input_dev;
+ struct device *dev;
+ char phys[32];
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned polling; /* polling is active */
+ struct ts_event tc;
+ struct tps6507x_dev *mfd;
+ u16 model;
+ unsigned pendown;
+ int irq;
+ void (*clear_penirq)(void);
+ unsigned long poll_period; /* ms */
+ u16 min_pressure;
+ int vref; /* non-zero to leave vref on */
+};
+
+static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data)
+{
+ int err;
+
+ err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data)
+{
+ return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data);
+}
+
+static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc,
+ u8 tsc_mode, u16 *value)
+{
+ s32 ret;
+ u8 adc_status;
+ u8 result;
+
+ /* Route input signal to A/D converter */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode);
+ if (ret) {
+ dev_err(tsc->dev, "TSC mode read failed\n");
+ goto err;
+ }
+
+ /* Start A/D conversion */
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_CONVERT_TS);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config write failed\n");
+ return ret;
+ }
+
+ do {
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG,
+ &adc_status);
+ if (ret) {
+ dev_err(tsc->dev, "ADC config read failed\n");
+ goto err;
+ }
+ } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION);
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 2 read failed\n");
+ goto err;
+ }
+
+ *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result);
+ if (ret) {
+ dev_err(tsc->dev, "ADC result 1 read failed\n");
+ goto err;
+ }
+
+ *value |= result;
+
+ dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value);
+
+err:
+ return ret;
+}
+
+/* Need to call tps6507x_adc_standby() after using A/D converter for the
+ * touch screen interrupt to work properly.
+ */
+
+static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
+{
+ s32 ret;
+ s32 loops = 0;
+ u8 val;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
+ TPS6507X_ADCONFIG_INPUT_TSC);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE,
+ TPS6507X_TSCMODE_STANDBY);
+ if (ret)
+ return ret;
+
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+
+ while (val & TPS6507X_REG_TSC_INT) {
+ mdelay(10);
+ ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
+ if (ret)
+ return ret;
+ loops++;
+ }
+
+ return ret;
+}
+
+static void tps6507x_ts_handler(struct work_struct *work)
+{
+ struct tps6507x_ts *tsc = container_of(work,
+ struct tps6507x_ts, work.work);
+ struct input_dev *input_dev = tsc->input_dev;
+ int pendown;
+ int schd;
+ int poll = 0;
+ s32 ret;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE,
+ &tsc->tc.pressure);
+ if (ret)
+ goto done;
+
+ pendown = tsc->tc.pressure > tsc->min_pressure;
+
+ if (unlikely(!pendown && tsc->pendown)) {
+ dev_dbg(tsc->dev, "UP\n");
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_abs(input_dev, ABS_PRESSURE, 0);
+ input_sync(input_dev);
+ tsc->pendown = 0;
+ }
+
+ if (pendown) {
+
+ if (!tsc->pendown) {
+ dev_dbg(tsc->dev, "DOWN\n");
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ } else
+ dev_dbg(tsc->dev, "still down\n");
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION,
+ &tsc->tc.x);
+ if (ret)
+ goto done;
+
+ ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION,
+ &tsc->tc.y);
+ if (ret)
+ goto done;
+
+ input_report_abs(input_dev, ABS_X, tsc->tc.x);
+ input_report_abs(input_dev, ABS_Y, tsc->tc.y);
+ input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure);
+ input_sync(input_dev);
+ tsc->pendown = 1;
+ poll = 1;
+ }
+
+done:
+ /* always poll if not using interrupts */
+ poll = 1;
+
+ if (poll) {
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ tsc->poll_period * HZ / 1000);
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "re-schedule failed");
+ }
+ } else
+ tsc->polling = 0;
+
+ ret = tps6507x_adc_standby(tsc);
+}
+
+static int tps6507x_ts_probe(struct platform_device *pdev)
+{
+ int error;
+ struct tps6507x_ts *tsc;
+ struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
+ struct touchscreen_init_data *init_data;
+ struct input_dev *input_dev;
+ struct tps6507x_board *tps_board;
+ int schd;
+
+ /**
+ * tps_board points to pmic related constants
+ * coming from the board-evm file.
+ */
+
+ tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data;
+
+ if (!tps_board) {
+ dev_err(tps6507x_dev->dev,
+ "Could not find tps6507x platform data\n");
+ return -EIO;
+ }
+
+ /**
+ * init_data points to array of regulator_init structures
+ * coming from the board-evm file.
+ */
+
+ init_data = tps_board->tps6507x_ts_init_data;
+
+ tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL);
+ if (!tsc) {
+ dev_err(tps6507x_dev->dev, "failed to allocate driver data\n");
+ error = -ENOMEM;
+ goto err0;
+ }
+
+ tps6507x_dev->ts = tsc;
+ tsc->mfd = tps6507x_dev;
+ tsc->dev = tps6507x_dev->dev;
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(tsc->dev, "Failed to allocate input device.\n");
+ error = -ENOMEM;
+ goto err1;
+ }
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
+
+ input_dev->name = "TPS6507x Touchscreen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = tsc->dev;
+
+ snprintf(tsc->phys, sizeof(tsc->phys),
+ "%s/input0", dev_name(tsc->dev));
+ input_dev->phys = tsc->phys;
+
+ dev_dbg(tsc->dev, "device: %s\n", input_dev->phys);
+
+ input_set_drvdata(input_dev, tsc);
+
+ tsc->input_dev = input_dev;
+
+ INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
+ tsc->wq = create_workqueue("TPS6507x Touchscreen");
+
+ if (init_data) {
+ tsc->poll_period = init_data->poll_period;
+ tsc->vref = init_data->vref;
+ tsc->min_pressure = init_data->min_pressure;
+ input_dev->id.vendor = init_data->vendor;
+ input_dev->id.product = init_data->product;
+ input_dev->id.version = init_data->version;
+ } else {
+ tsc->poll_period = TSC_DEFAULT_POLL_PERIOD;
+ tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE;
+ }
+
+ error = tps6507x_adc_standby(tsc);
+ if (error)
+ goto err2;
+
+ error = input_register_device(input_dev);
+ if (error)
+ goto err2;
+
+ schd = queue_delayed_work(tsc->wq, &tsc->work,
+ tsc->poll_period * HZ / 1000);
+
+ if (schd)
+ tsc->polling = 1;
+ else {
+ tsc->polling = 0;
+ dev_err(tsc->dev, "schedule failed");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ cancel_delayed_work(&tsc->work);
+ flush_workqueue(tsc->wq);
+ destroy_workqueue(tsc->wq);
+ tsc->wq = 0;
+ input_free_device(input_dev);
+err1:
+ kfree(tsc);
+ tps6507x_dev->ts = NULL;
+err0:
+ return error;
+}
+
+static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
+{
+ struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
+ struct tps6507x_ts *tsc = tps6507x_dev->ts;
+ struct input_dev *input_dev = tsc->input_dev;
+
+ if (!tsc)
+ return 0;
+
+ cancel_delayed_work(&tsc->work);
+ flush_workqueue(tsc->wq);
+ destroy_workqueue(tsc->wq);
+ tsc->wq = 0;
+
+ input_free_device(input_dev);
+
+ tps6507x_dev->ts = NULL;
+ kfree(tsc);
+
+ return 0;
+}
+
+static struct platform_driver tps6507x_ts_driver = {
+ .driver = {
+ .name = "tps6507x-ts",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6507x_ts_probe,
+ .remove = __devexit_p(tps6507x_ts_remove),
+};
+
+static int __init tps6507x_ts_init(void)
+{
+ return platform_driver_register(&tps6507x_ts_driver);
+}
+module_init(tps6507x_ts_init);
+
+static void __exit tps6507x_ts_exit(void)
+{
+ platform_driver_unregister(&tps6507x_ts_driver);
+}
+module_exit(tps6507x_ts_exit);
+
+MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
+MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps6507x-tsc");
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index be23780e8a3e..769b479fcaa6 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -347,6 +347,8 @@ static int __devexit tsc2007_remove(struct i2c_client *client)
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
+ i2c_set_clientdata(client, NULL);
+
tsc2007_free_irq(ts);
if (pdata->exit_platform_hw)
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 99330bbdbac7..567d57215c28 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -811,12 +811,11 @@ static int nexio_init(struct usbtouch_usb *usbtouch)
priv = usbtouch->priv;
- priv->ack_buf = kmalloc(sizeof(nexio_ack_pkt), GFP_KERNEL);
+ priv->ack_buf = kmemdup(nexio_ack_pkt, sizeof(nexio_ack_pkt),
+ GFP_KERNEL);
if (!priv->ack_buf)
goto err_priv;
- memcpy(priv->ack_buf, nexio_ack_pkt, sizeof(nexio_ack_pkt));
-
priv->ack = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->ack) {
dbg("%s - usb_alloc_urb failed: usbtouch->ack", __func__);
@@ -858,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
if ((pkt[0] & 0xe0) != 0xe0)
return 0;
+ if (be16_to_cpu(packet->data_len) > 0xff)
+ packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
+ if (be16_to_cpu(packet->x_len) > 0xff)
+ packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
+
/* send ACK */
ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
@@ -1113,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
[DEVTYPE_NEXIO] = {
- .rept_size = 128,
+ .rept_size = 1024,
.irq_always = true,
.read_data = nexio_read_data,
.init = nexio_init,
@@ -1291,8 +1295,8 @@ static void usbtouch_close(struct input_dev *input)
static void usbtouch_free_buffers(struct usb_device *udev,
struct usbtouch_usb *usbtouch)
{
- usb_buffer_free(udev, usbtouch->type->rept_size,
- usbtouch->data, usbtouch->data_dma);
+ usb_free_coherent(udev, usbtouch->type->rept_size,
+ usbtouch->data, usbtouch->data_dma);
kfree(usbtouch->buffer);
}
@@ -1336,8 +1340,8 @@ static int usbtouch_probe(struct usb_interface *intf,
if (!type->process_pkt)
type->process_pkt = usbtouch_process_pkt;
- usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
- GFP_KERNEL, &usbtouch->data_dma);
+ usbtouch->data = usb_alloc_coherent(udev, type->rept_size,
+ GFP_KERNEL, &usbtouch->data_dma);
if (!usbtouch->data)
goto out_free;
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 5109bf3dd858..cbfef1ea7e30 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -200,7 +200,7 @@ void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio,
mutex_lock(&wm->codec_mutex);
reg = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
- if (status & WM97XX_GPIO_HIGH)
+ if (status == WM97XX_GPIO_HIGH)
reg |= gpio;
else
reg &= ~gpio;
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ee5837522f5a..0cabe31f26df 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -787,8 +787,7 @@ capi_poll(struct file *file, poll_table * wait)
}
static int
-capi_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+capi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct capidev *cdev = file->private_data;
capi_ioctl_struct data;
@@ -981,6 +980,18 @@ register_out:
}
}
+static long
+capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = capi_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static int capi_open(struct inode *inode, struct file *file)
{
struct capidev *cdev;
@@ -1026,7 +1037,7 @@ static const struct file_operations capi_fops =
.read = capi_read,
.write = capi_write,
.poll = capi_poll,
- .ioctl = capi_ioctl,
+ .unlocked_ioctl = capi_unlocked_ioctl,
.open = capi_open,
.release = capi_release,
};
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index bd00dceacaf0..bde3c88b8b27 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1147,6 +1147,12 @@ load_unlock_out:
if (ctr->state == CAPI_CTR_DETECTED)
goto reset_unlock_out;
+ if (ctr->reset_ctr == NULL) {
+ printk(KERN_DEBUG "kcapi: reset: no reset function\n");
+ retval = -ESRCH;
+ goto reset_unlock_out;
+ }
+
ctr->reset_ctr(ctr);
retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 9f49d9065791..c53e2417e7d4 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/isdnif.h>
#include <net/net_namespace.h>
+#include <linux/smp_lock.h>
#include "isdn_divert.h"
@@ -177,9 +178,7 @@ isdn_divert_close(struct inode *ino, struct file *filep)
/*********/
/* IOCTL */
/*********/
-static int
-isdn_divert_ioctl(struct inode *inode, struct file *file,
- uint cmd, ulong arg)
+static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
{
divert_ioctl dioctl;
int i;
@@ -258,6 +257,17 @@ isdn_divert_ioctl(struct inode *inode, struct file *file,
return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0;
} /* isdn_divert_ioctl */
+static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg)
+{
+ long ret;
+
+ lock_kernel();
+ ret = isdn_divert_ioctl_unlocked(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static const struct file_operations isdn_fops =
{
.owner = THIS_MODULE,
@@ -265,7 +275,7 @@ static const struct file_operations isdn_fops =
.read = isdn_divert_read,
.write = isdn_divert_write,
.poll = isdn_divert_poll,
- .ioctl = isdn_divert_ioctl,
+ .unlocked_ioctl = isdn_divert_ioctl,
.open = isdn_divert_open,
.release = isdn_divert_close,
};
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 964a55fb1486..8f78f15c8ef7 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -170,17 +170,6 @@ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
}
/*
- * convert hex to binary
- */
-static inline u8 hex2bin(char c)
-{
- int result = c & 0x0f;
- if (c & 0x40)
- result += 9;
- return result;
-}
-
-/*
* convert an IE from Gigaset hex string to ETSI binary representation
* including length byte
* return value: result length, -1 on error
@@ -191,7 +180,7 @@ static int encode_ie(char *in, u8 *out, int maxlen)
while (*in) {
if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
return -1;
- out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
+ out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
in += 2;
}
out[0] = l;
@@ -933,30 +922,6 @@ void gigaset_isdn_stop(struct cardstate *cs)
*/
/*
- * load firmware
- */
-static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
-{
- struct cardstate *cs = ctr->driverdata;
-
- /* AVM specific operation, not needed for Gigaset -- ignore */
- dev_notice(cs->dev, "load_firmware ignored\n");
-
- return 0;
-}
-
-/*
- * reset (deactivate) controller
- */
-static void gigaset_reset_ctr(struct capi_ctr *ctr)
-{
- struct cardstate *cs = ctr->driverdata;
-
- /* AVM specific operation, not needed for Gigaset -- ignore */
- dev_notice(cs->dev, "reset_ctr ignored\n");
-}
-
-/*
* register CAPI application
*/
static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
@@ -2213,8 +2178,8 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
iif->ctr.driverdata = cs;
strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
iif->ctr.driver_name = "gigaset";
- iif->ctr.load_firmware = gigaset_load_firmware;
- iif->ctr.reset_ctr = gigaset_reset_ctr;
+ iif->ctr.load_firmware = NULL;
+ iif->ctr.reset_ctr = NULL;
iif->ctr.register_appl = gigaset_register_appl;
iif->ctr.release_appl = gigaset_release_appl;
iif->ctr.send_message = gigaset_send_message;
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 94b796d84053..f410d0eb2fef 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <linux/serial.h>
@@ -61,31 +60,6 @@ static void avmcs_release(struct pcmcia_device *link);
static void avmcs_detach(struct pcmcia_device *p_dev);
-/*
- A linked list of "instances" of the skeleton device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally can't be allocated dynamically.
-*/
-
-typedef struct local_info_t {
- dev_node_t node;
-} local_info_t;
-
/*======================================================================
avmcs_attach() creates an "instance" of the driver, allocating
@@ -100,32 +74,19 @@ typedef struct local_info_t {
static int avmcs_probe(struct pcmcia_device *p_dev)
{
- local_info_t *local;
/* The io structure describes IO port mapping */
p_dev->io.NumPorts1 = 16;
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.NumPorts2 = 0;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
p_dev->conf.ConfigIndex = 1;
p_dev->conf.Present = PRESENT_OPTION;
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local)
- goto err;
- p_dev->priv = local;
-
return avmcs_config(p_dev);
-
- err:
- return -ENOMEM;
} /* avmcs_attach */
/*======================================================================
@@ -140,7 +101,6 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
static void avmcs_detach(struct pcmcia_device *link)
{
avmcs_release(link);
- kfree(link->priv);
} /* avmcs_detach */
/*======================================================================
@@ -171,14 +131,11 @@ static int avmcs_configcheck(struct pcmcia_device *p_dev,
static int avmcs_config(struct pcmcia_device *link)
{
- local_info_t *dev;
- int i;
+ int i = -1;
char devname[128];
int cardtype;
int (*addcard)(unsigned int port, unsigned irq);
- dev = link->priv;
-
devname[0] = 0;
if (link->prod_id[1])
strlcpy(devname, link->prod_id[1], sizeof(devname));
@@ -190,11 +147,7 @@ static int avmcs_config(struct pcmcia_device *link)
return -ENODEV;
do {
- /*
- * allocate an interrupt line
- */
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
+ if (!link->irq) {
/* undo */
pcmcia_disable_device(link);
break;
@@ -211,15 +164,11 @@ static int avmcs_config(struct pcmcia_device *link)
} while (0);
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. */
-
if (devname[0]) {
char *s = strrchr(devname, ' ');
if (!s)
s = devname;
else s++;
- strcpy(dev->node.dev_name, s);
if (strcmp("M1", s) == 0) {
cardtype = AVM_CARDTYPE_M1;
} else if (strcmp("M2", s) == 0) {
@@ -227,14 +176,8 @@ static int avmcs_config(struct pcmcia_device *link)
} else {
cardtype = AVM_CARDTYPE_B1;
}
- } else {
- strcpy(dev->node.dev_name, "b1");
+ } else
cardtype = AVM_CARDTYPE_B1;
- }
-
- dev->node.major = 64;
- dev->node.minor = 0;
- link->dev_node = &dev->node;
/* If any step failed, release any partially configured state */
if (i != 0) {
@@ -249,13 +192,12 @@ static int avmcs_config(struct pcmcia_device *link)
default:
case AVM_CARDTYPE_B1: addcard = b1pcmcia_addcard_b1; break;
}
- if ((i = (*addcard)(link->io.BasePort1, link->irq.AssignedIRQ)) < 0) {
- printk(KERN_ERR "avm_cs: failed to add AVM-%s-Controller at i/o %#x, irq %d\n",
- dev->node.dev_name, link->io.BasePort1, link->irq.AssignedIRQ);
- avmcs_release(link);
- return -ENODEV;
+ if ((i = (*addcard)(link->io.BasePort1, link->irq)) < 0) {
+ dev_err(&link->dev, "avm_cs: failed to add AVM-Controller at i/o %#x, irq %d\n",
+ link->io.BasePort1, link->irq);
+ avmcs_release(link);
+ return -ENODEV;
}
- dev->node.minor = i;
return 0;
} /* avmcs_config */
@@ -270,7 +212,7 @@ static int avmcs_config(struct pcmcia_device *link)
static void avmcs_release(struct pcmcia_device *link)
{
- b1pcmcia_delcard(link->io.BasePort1, link->irq.AssignedIRQ);
+ b1pcmcia_delcard(link->io.BasePort1, link->irq);
pcmcia_disable_device(link);
} /* avmcs_release */
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 75e71b5d9215..095ed76ebe80 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -117,7 +117,7 @@
* NOTE: only one mode value must be given for every card.
* -> See hfc_multi.h for HFC_IO_MODE_* values
* By default, the IO mode is pci memory IO (MEMIO).
- * Some cards requre specific IO mode, so it cannot be changed.
+ * Some cards require specific IO mode, so it cannot be changed.
* It may be usefull to set IO mode to register io (REGIO) to solve
* PCI bridge problems.
* If unsure, don't give this parameter.
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 8d1d63a02b34..a80a7617f16f 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -62,31 +62,6 @@ static void avma1cs_release(struct pcmcia_device *link);
static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ;
-/*
- A linked list of "instances" of the skeleton device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally can't be allocated dynamically.
-*/
-
-typedef struct local_info_t {
- dev_node_t node;
-} local_info_t;
-
/*======================================================================
avma1cs_attach() creates an "instance" of the driver, allocating
@@ -101,17 +76,8 @@ typedef struct local_info_t {
static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
{
- local_info_t *local;
-
dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- p_dev->priv = local;
-
/* The io structure describes IO port mapping */
p_dev->io.NumPorts1 = 16;
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -119,9 +85,6 @@ static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_16;
p_dev->io.IOAddrLines = 5;
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
-
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -176,14 +139,11 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev,
static int __devinit avma1cs_config(struct pcmcia_device *link)
{
- local_info_t *dev;
- int i;
+ int i = -1;
char devname[128];
IsdnCard_t icard;
int busy = 0;
- dev = link->priv;
-
dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link);
devname[0] = 0;
@@ -197,8 +157,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
/*
* allocate an interrupt line
*/
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
+ if (!link->irq) {
/* undo */
pcmcia_disable_device(link);
break;
@@ -215,14 +174,6 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
} while (0);
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. */
-
- strcpy(dev->node.dev_name, "A1");
- dev->node.major = 45;
- dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* If any step failed, release any partially configured state */
if (i != 0) {
avma1cs_release(link);
@@ -230,9 +181,9 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
}
printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n",
- link->io.BasePort1, link->irq.AssignedIRQ);
+ link->io.BasePort1, link->irq);
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = isdnprot;
icard.typ = ISDN_CTYPE_A1_PCMCIA;
@@ -243,7 +194,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
avma1cs_release(link);
return -ENODEV;
}
- dev->node.minor = i;
+ link->priv = (void *) (unsigned long) i;
return 0;
} /* avma1cs_config */
@@ -258,12 +209,12 @@ static int __devinit avma1cs_config(struct pcmcia_device *link)
static void avma1cs_release(struct pcmcia_device *link)
{
- local_info_t *local = link->priv;
+ unsigned long minor = (unsigned long) link->priv;
dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link);
/* now unregister function with hisax */
- HiSax_closecard(local->node.minor);
+ HiSax_closecard(minor);
pcmcia_disable_device(link);
} /* avma1cs_release */
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index c9f2279e21f5..218927e3a4ea 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -87,24 +87,8 @@ static void elsa_cs_release(struct pcmcia_device *link);
static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int busy;
int cardnr;
} local_info_t;
@@ -136,10 +120,6 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link)
local->cardnr = -1;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -223,28 +203,18 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- link->irq.AssignedIRQ = 0;
+ if (!link->irq)
goto failed;
- }
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
goto failed;
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. *//* */
- sprintf(dev->node.dev_name, "elsa");
- dev->node.major = dev->node.minor = 0x0;
-
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -253,7 +223,7 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_ELSA_PCMCIA;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 051b44e2556c..384d5118e325 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -310,7 +310,7 @@ wait_busy(hfc4s8s_hw * a)
/******************************************************/
/* function to read critical counter registers that */
-/* may be udpated by the chip during read */
+/* may be updated by the chip during read */
/******************************************************/
static u_char
Read_hfc8_stable(hfc4s8s_hw * hw, int reg)
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 1925118122f8..8b0a7d86b30f 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -74,9 +74,10 @@ static struct pnp_device_id fcpnp_ids[] __devinitdata = {
.id = "AVM0900",
.driver_data = (unsigned long) "Fritz!Card PnP",
},
+ { .id = "" }
};
-MODULE_DEVICE_TABLE(isapnp, fcpnp_ids);
+MODULE_DEVICE_TABLE(pnp, fcpnp_ids);
#endif
static int protocol = 2; /* EURO-ISDN Default */
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 71b3ddef03bb..1f4feaab21af 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -87,32 +87,8 @@ static void sedlbauer_release(struct pcmcia_device *link);
static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'memory_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int stop;
int cardnr;
} local_info_t;
@@ -143,10 +119,6 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -227,9 +199,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -285,7 +255,6 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
static int __devinit sedlbauer_config(struct pcmcia_device *link)
{
- local_info_t *dev = link->priv;
win_req_t *req;
int ret;
IsdnCard_t icard;
@@ -313,17 +282,6 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
goto failed;
/*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
-
- /*
This actually configures the PCMCIA socket -- setting up
the I/O windows and the interrupt mapping, and putting the
card and host interface into "Memory and IO" mode.
@@ -332,21 +290,13 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev.
- */
- sprintf(dev->node.dev_name, "sedlbauer");
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x:",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -358,7 +308,7 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link)
req->Base+req->Size-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index d010a0da8e19..5771955cc532 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -68,34 +68,8 @@ static void teles_cs_release(struct pcmcia_device *link);
static void teles_detach(struct pcmcia_device *p_dev) __devexit ;
-/*
- A linked list of "instances" of the teles_cs device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers, where minor
- device numbers are used to derive the corresponding array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
int busy;
int cardnr;
} local_info_t;
@@ -126,10 +100,6 @@ static int __devinit teles_probe(struct pcmcia_device *link)
local->p_dev = link;
link->priv = local;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -213,28 +183,18 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
if (i != 0)
goto cs_failed;
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0) {
- link->irq.AssignedIRQ = 0;
+ if (!link->irq)
goto cs_failed;
- }
i = pcmcia_request_configuration(link, &link->conf);
if (i != 0)
goto cs_failed;
- /* At this point, the dev_node_t structure(s) should be
- initialized and arranged in a linked list at link->dev. *//* */
- sprintf(dev->node.dev_name, "teles");
- dev->node.major = dev->node.minor = 0x0;
-
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x:",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x:",
+ link->conf.ConfigIndex);
if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
@@ -243,7 +203,7 @@ static int __devinit teles_cs_config(struct pcmcia_device *link)
link->io.BasePort2+link->io.NumPorts2-1);
printk("\n");
- icard.para[0] = link->irq.AssignedIRQ;
+ icard.para[0] = link->irq;
icard.para[1] = link->io.BasePort1;
icard.protocol = protocol;
icard.typ = ISDN_CTYPE_TELESPCMCIA;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 70044ee4b228..a44cdb492ea9 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1272,9 +1272,9 @@ isdn_poll(struct file *file, poll_table * wait)
static int
-isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
+isdn_ioctl(struct file *file, uint cmd, ulong arg)
{
- uint minor = iminor(inode);
+ uint minor = iminor(file->f_path.dentry->d_inode);
isdn_ctrl c;
int drvidx;
int chidx;
@@ -1722,6 +1722,18 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
#undef cfg
}
+static long
+isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = isdn_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
/*
* Open the device code.
*/
@@ -1838,7 +1850,7 @@ static const struct file_operations isdn_fops =
.read = isdn_read,
.write = isdn_write,
.poll = isdn_poll,
- .ioctl = isdn_ioctl,
+ .unlocked_ioctl = isdn_unlocked_ioctl,
.open = isdn_open,
.release = isdn_close,
};
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index efcf1f9327e5..fd10d7c785d4 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -194,7 +194,7 @@ static int isdn_x25iface_receive(struct concap_proto *cprot, struct sk_buff *skb
if ( ( (ix25_pdata_t*) (cprot->proto_data) )
-> state == WAN_CONNECTED ){
if( skb_push(skb, 1)){
- skb -> data[0]=0x00;
+ skb->data[0] = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -224,7 +224,7 @@ static int isdn_x25iface_connect_ind(struct concap_proto *cprot)
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x01;
+ *(skb_put(skb, 1)) = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -253,7 +253,7 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *cprot)
*state_p = WAN_DISCONNECTED;
skb = dev_alloc_skb(1);
if( skb ){
- *( skb_put(skb, 1) ) = 0x02;
+ *(skb_put(skb, 1)) = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, cprot->net_dev);
netif_rx(skb);
return 0;
@@ -272,9 +272,10 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
unsigned char firstbyte = skb->data[0];
enum wan_states *state = &((ix25_pdata_t*)cprot->proto_data)->state;
int ret = 0;
- IX25DEBUG( "isdn_x25iface_xmit: %s first=%x state=%d \n", MY_DEVNAME(cprot -> net_dev), firstbyte, *state );
+ IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n",
+ MY_DEVNAME(cprot->net_dev), firstbyte, *state);
switch ( firstbyte ){
- case 0x00: /* dl_data request */
+ case X25_IFACE_DATA:
if( *state == WAN_CONNECTED ){
skb_pull(skb, 1);
cprot -> net_dev -> trans_start = jiffies;
@@ -285,7 +286,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
}
illegal_state_warn( *state, firstbyte );
break;
- case 0x01: /* dl_connect request */
+ case X25_IFACE_CONNECT:
if( *state == WAN_DISCONNECTED ){
*state = WAN_CONNECTING;
ret = cprot -> dops -> connect_req(cprot);
@@ -298,7 +299,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x02: /* dl_disconnect request */
+ case X25_IFACE_DISCONNECT:
switch ( *state ){
case WAN_DISCONNECTED:
/* Should not happen. However, give upper layer a
@@ -318,7 +319,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
illegal_state_warn( *state, firstbyte );
}
break;
- case 0x03: /* changing lapb parameters requested */
+ case X25_IFACE_PARAMS:
printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
" options not yet supported\n");
break;
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 8785004e85e0..81048b8ed8ad 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -24,6 +24,7 @@
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mISDNif.h>
+#include <linux/smp_lock.h>
#include "core.h"
static u_int *debug;
@@ -97,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
filep, buf, (int)count, off);
- if (*off != filep->f_pos)
- return -ESPIPE;
if (list_empty(&dev->expired) && (dev->work == 0)) {
if (filep->f_flags & O_NONBLOCK)
@@ -215,9 +214,8 @@ unlock:
return ret;
}
-static int
-mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
- unsigned long arg)
+static long
+mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct mISDNtimerdev *dev = filep->private_data;
int id, tout, ret = 0;
@@ -226,6 +224,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__,
filep, cmd, arg);
+ lock_kernel();
switch (cmd) {
case IMADDTIMER:
if (get_user(tout, (int __user *)arg)) {
@@ -257,13 +256,14 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
default:
ret = -EINVAL;
}
+ unlock_kernel();
return ret;
}
static const struct file_operations mISDN_fops = {
.read = mISDN_read,
.poll = mISDN_poll,
- .ioctl = mISDN_ioctl,
+ .unlocked_ioctl = mISDN_ioctl,
.open = mISDN_open,
.release = mISDN_close,
};
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 505eb64c329c..81bf25e67ce1 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -21,7 +21,7 @@ comment "LED drivers"
config LEDS_88PM860X
tristate "LED Support for Marvell 88PM860x PMIC"
- depends on LEDS_CLASS && MFD_88PM860X
+ depends on MFD_88PM860X
help
This option enables support for on-chip LED drivers found on Marvell
Semiconductor 88PM8606 PMIC.
@@ -67,6 +67,16 @@ config LEDS_NET48XX
This option enables support for the Soekris net4801 and net4826 error
LED.
+config LEDS_NET5501
+ tristate "LED Support for Soekris net5501 series Error LED"
+ depends on LEDS_TRIGGERS
+ depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535
+ select LEDS_TRIGGER_DEFAULT_ON
+ default n
+ help
+ Add support for the Soekris net5501 board (detection, error led
+ and GPIO).
+
config LEDS_FSG
tristate "LED Support for the Freecom FSG-3"
depends on MACH_FSG
@@ -285,6 +295,13 @@ config LEDS_DELL_NETBOOKS
This adds support for the Latitude 2100 and similar
notebooks that have an external LED.
+config LEDS_MC13783
+ tristate "LED Support for MC13783 PMIC"
+ depends on MFD_MC13783
+ help
+ This option enable support for on-chip LED drivers found
+ on Freescale Semiconductor MC13783 PMIC.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
help
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 0cd8b9957380..2493de499374 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
+obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o
+obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 69e7d86a5143..260660076507 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -74,7 +74,7 @@ static ssize_t led_max_brightness_show(struct device *dev,
static struct device_attribute led_class_attrs[] = {
__ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
- __ATTR(max_brightness, 0644, led_max_brightness_show, NULL),
+ __ATTR(max_brightness, 0444, led_max_brightness_show, NULL),
#ifdef CONFIG_LEDS_TRIGGERS
__ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
#endif
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 16a60c06c96c..b7677106cff8 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -256,8 +256,10 @@ static int pm860x_led_probe(struct platform_device *pdev)
if (pdev->dev.parent->platform_data) {
pm860x_pdata = pdev->dev.parent->platform_data;
pdata = pm860x_pdata->led;
- } else
- pdata = NULL;
+ } else {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
@@ -268,8 +270,11 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->iset = pdata->iset;
data->port = __check_device(pdata, data->name);
- if (data->port < 0)
+ if (data->port < 0) {
+ dev_err(&pdev->dev, "check device failed\n");
+ kfree(data);
return -EINVAL;
+ }
data->current_brightness = 0;
data->cdev.name = data->name;
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index c6e4b772b757..cc22eeefa10b 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -26,7 +26,8 @@ struct gpio_led_data {
u8 new_level;
u8 can_sleep;
u8 active_low;
- int (*platform_gpio_blink_set)(unsigned gpio,
+ u8 blinking;
+ int (*platform_gpio_blink_set)(unsigned gpio, int state,
unsigned long *delay_on, unsigned long *delay_off);
};
@@ -35,7 +36,13 @@ static void gpio_led_work(struct work_struct *work)
struct gpio_led_data *led_dat =
container_of(work, struct gpio_led_data, work);
- gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
+ if (led_dat->blinking) {
+ led_dat->platform_gpio_blink_set(led_dat->gpio,
+ led_dat->new_level,
+ NULL, NULL);
+ led_dat->blinking = 0;
+ } else
+ gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
}
static void gpio_led_set(struct led_classdev *led_cdev,
@@ -60,8 +67,14 @@ static void gpio_led_set(struct led_classdev *led_cdev,
if (led_dat->can_sleep) {
led_dat->new_level = level;
schedule_work(&led_dat->work);
- } else
- gpio_set_value(led_dat->gpio, level);
+ } else {
+ if (led_dat->blinking) {
+ led_dat->platform_gpio_blink_set(led_dat->gpio, level,
+ NULL, NULL);
+ led_dat->blinking = 0;
+ } else
+ gpio_set_value(led_dat->gpio, level);
+ }
}
static int gpio_blink_set(struct led_classdev *led_cdev,
@@ -70,12 +83,14 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
struct gpio_led_data *led_dat =
container_of(led_cdev, struct gpio_led_data, cdev);
- return led_dat->platform_gpio_blink_set(led_dat->gpio, delay_on, delay_off);
+ led_dat->blinking = 1;
+ return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK,
+ delay_on, delay_off);
}
static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
- int (*blink_set)(unsigned, unsigned long *, unsigned long *))
+ int (*blink_set)(unsigned, int, unsigned long *, unsigned long *))
{
int ret, state;
@@ -97,6 +112,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
led_dat->gpio = template->gpio;
led_dat->can_sleep = gpio_cansleep(template->gpio);
led_dat->active_low = template->active_low;
+ led_dat->blinking = 0;
if (blink_set) {
led_dat->platform_gpio_blink_set = blink_set;
led_dat->cdev.blink_set = gpio_blink_set;
@@ -113,7 +129,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
-
+
INIT_WORK(&led_dat->work, gpio_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
@@ -211,7 +227,7 @@ struct gpio_led_of_platform_data {
static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node, *child;
+ struct device_node *np = ofdev->dev.of_node, *child;
struct gpio_led_of_platform_data *pdata;
int count = 0, ret;
@@ -291,8 +307,8 @@ static struct of_platform_driver of_gpio_leds_driver = {
.driver = {
.name = "of_gpio_leds",
.owner = THIS_MODULE,
+ .of_match_table = of_gpio_leds_match,
},
- .match_table = of_gpio_leds_match,
.probe = of_gpio_leds_probe,
.remove = __devexit_p(of_gpio_leds_remove),
};
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 8d5ecceba181..932a58da76c4 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -379,6 +379,7 @@ static int __devinit lp3944_probe(struct i2c_client *client,
{
struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
struct lp3944_data *data;
+ int err;
if (lp3944_pdata == NULL) {
dev_err(&client->dev, "no platform data\n");
@@ -401,9 +402,13 @@ static int __devinit lp3944_probe(struct i2c_client *client,
mutex_init(&data->lock);
- dev_info(&client->dev, "lp3944 enabled\n");
+ err = lp3944_configure(client, data, lp3944_pdata);
+ if (err < 0) {
+ kfree(data);
+ return err;
+ }
- lp3944_configure(client, data, lp3944_pdata);
+ dev_info(&client->dev, "lp3944 enabled\n");
return 0;
}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
new file mode 100644
index 000000000000..f05bb08d0f09
--- /dev/null
+++ b/drivers/leds/leds-mc13783.c
@@ -0,0 +1,403 @@
+/*
+ * LEDs driver for Freescale MC13783
+ *
+ * Copyright (C) 2010 Philippe Rétornaz
+ *
+ * Based on leds-da903x:
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/mc13783.h>
+#include <linux/slab.h>
+
+struct mc13783_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct mc13783 *master;
+ enum led_brightness new_brightness;
+ int id;
+};
+
+#define MC13783_REG_LED_CONTROL_0 51
+#define MC13783_LED_C0_ENABLE_BIT (1 << 0)
+#define MC13783_LED_C0_TRIODE_MD_BIT (1 << 7)
+#define MC13783_LED_C0_TRIODE_AD_BIT (1 << 8)
+#define MC13783_LED_C0_TRIODE_KP_BIT (1 << 9)
+#define MC13783_LED_C0_BOOST_BIT (1 << 10)
+#define MC13783_LED_C0_ABMODE_MASK 0x7
+#define MC13783_LED_C0_ABMODE 11
+#define MC13783_LED_C0_ABREF_MASK 0x3
+#define MC13783_LED_C0_ABREF 14
+
+#define MC13783_REG_LED_CONTROL_1 52
+#define MC13783_LED_C1_TC1HALF_BIT (1 << 18)
+
+#define MC13783_REG_LED_CONTROL_2 53
+#define MC13783_LED_C2_BL_P_MASK 0xf
+#define MC13783_LED_C2_MD_P 9
+#define MC13783_LED_C2_AD_P 13
+#define MC13783_LED_C2_KP_P 17
+#define MC13783_LED_C2_BL_C_MASK 0x7
+#define MC13783_LED_C2_MD_C 0
+#define MC13783_LED_C2_AD_C 3
+#define MC13783_LED_C2_KP_C 6
+
+#define MC13783_REG_LED_CONTROL_3 54
+#define MC13783_LED_C3_TC_P 6
+#define MC13783_LED_C3_TC_P_MASK 0x1f
+
+#define MC13783_REG_LED_CONTROL_4 55
+#define MC13783_REG_LED_CONTROL_5 56
+
+#define MC13783_LED_Cx_PERIOD 21
+#define MC13783_LED_Cx_PERIOD_MASK 0x3
+#define MC13783_LED_Cx_SLEWLIM_BIT (1 << 23)
+#define MC13783_LED_Cx_TRIODE_TC_BIT (1 << 23)
+#define MC13783_LED_Cx_TC_C_MASK 0x3
+
+static void mc13783_led_work(struct work_struct *work)
+{
+ struct mc13783_led *led = container_of(work, struct mc13783_led, work);
+ int reg = 0;
+ int mask = 0;
+ int value = 0;
+ int bank, off, shift;
+
+ switch (led->id) {
+ case MC13783_LED_MD:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_MD_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_MD_P;
+ break;
+ case MC13783_LED_AD:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_AD_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_AD_P;
+ break;
+ case MC13783_LED_KP:
+ reg = MC13783_REG_LED_CONTROL_2;
+ mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_KP_P;
+ value = (led->new_brightness >> 4) << MC13783_LED_C2_KP_P;
+ break;
+ case MC13783_LED_R1:
+ case MC13783_LED_G1:
+ case MC13783_LED_B1:
+ case MC13783_LED_R2:
+ case MC13783_LED_G2:
+ case MC13783_LED_B2:
+ case MC13783_LED_R3:
+ case MC13783_LED_G3:
+ case MC13783_LED_B3:
+ off = led->id - MC13783_LED_R1;
+ bank = off/3;
+ reg = MC13783_REG_LED_CONTROL_3 + off/3;
+ shift = (off - bank * 3) * 5 + MC13783_LED_C3_TC_P;
+ value = (led->new_brightness >> 3) << shift;
+ mask = MC13783_LED_C3_TC_P_MASK << shift;
+ break;
+ }
+
+ mc13783_lock(led->master);
+
+ mc13783_reg_rmw(led->master, reg, mask, value);
+
+ mc13783_unlock(led->master);
+}
+
+static void mc13783_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct mc13783_led *led;
+
+ led = container_of(led_cdev, struct mc13783_led, cdev);
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
+{
+ int shift = 0;
+ int mask = 0;
+ int value = 0;
+ int reg = 0;
+ int ret, bank;
+
+ switch (led->id) {
+ case MC13783_LED_MD:
+ shift = MC13783_LED_C2_MD_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_AD:
+ shift = MC13783_LED_C2_AD_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_KP:
+ shift = MC13783_LED_C2_KP_C;
+ mask = MC13783_LED_C2_BL_C_MASK;
+ value = max_current & MC13783_LED_C2_BL_C_MASK;
+ reg = MC13783_REG_LED_CONTROL_2;
+ break;
+ case MC13783_LED_R1:
+ case MC13783_LED_G1:
+ case MC13783_LED_B1:
+ case MC13783_LED_R2:
+ case MC13783_LED_G2:
+ case MC13783_LED_B2:
+ case MC13783_LED_R3:
+ case MC13783_LED_G3:
+ case MC13783_LED_B3:
+ bank = (led->id - MC13783_LED_R1)/3;
+ reg = MC13783_REG_LED_CONTROL_3 + bank;
+ shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2;
+ mask = MC13783_LED_Cx_TC_C_MASK;
+ value = max_current & MC13783_LED_Cx_TC_C_MASK;
+ break;
+ }
+
+ mc13783_lock(led->master);
+
+ ret = mc13783_reg_rmw(led->master, reg, mask << shift,
+ value << shift);
+
+ mc13783_unlock(led->master);
+ return ret;
+}
+
+static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ int ret = 0;
+ int reg = 0;
+
+ mc13783_lock(dev);
+
+ if (pdata->flags & MC13783_LED_TC1HALF)
+ reg |= MC13783_LED_C1_TC1HALF_BIT;
+
+ if (pdata->flags & MC13783_LED_SLEWLIMTC)
+ reg |= MC13783_LED_Cx_SLEWLIM_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->bl_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_SLEWLIMBL)
+ reg |= MC13783_LED_Cx_SLEWLIM_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc1_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC1)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc2_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC2)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg);
+ if (ret)
+ goto out;
+
+ reg = (pdata->tc3_period & MC13783_LED_Cx_PERIOD_MASK) <<
+ MC13783_LED_Cx_PERIOD;
+
+ if (pdata->flags & MC13783_LED_TRIODE_TC3)
+ reg |= MC13783_LED_Cx_TRIODE_TC_BIT;;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg);
+ if (ret)
+ goto out;
+
+ reg = MC13783_LED_C0_ENABLE_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_MD)
+ reg |= MC13783_LED_C0_TRIODE_MD_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_AD)
+ reg |= MC13783_LED_C0_TRIODE_AD_BIT;
+ if (pdata->flags & MC13783_LED_TRIODE_KP)
+ reg |= MC13783_LED_C0_TRIODE_KP_BIT;
+ if (pdata->flags & MC13783_LED_BOOST_EN)
+ reg |= MC13783_LED_C0_BOOST_BIT;
+
+ reg |= (pdata->abmode & MC13783_LED_C0_ABMODE_MASK) <<
+ MC13783_LED_C0_ABMODE;
+ reg |= (pdata->abref & MC13783_LED_C0_ABREF_MASK) <<
+ MC13783_LED_C0_ABREF;
+
+ ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg);
+
+out:
+ mc13783_unlock(dev);
+ return ret;
+}
+
+static int __devinit mc13783_led_probe(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_led_platform_data *led_cur;
+ struct mc13783_led *led, *led_dat;
+ int ret, i;
+ int init_led = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
+ dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
+ return -EINVAL;
+ }
+
+ led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = mc13783_leds_prepare(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to init led driver\n");
+ goto err_free;
+ }
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_dat = &led[i];
+ led_cur = &pdata->led[i];
+
+ if (led_cur->id > MC13783_LED_MAX || led_cur->id < 0) {
+ dev_err(&pdev->dev, "invalid id %d\n", led_cur->id);
+ ret = -EINVAL;
+ goto err_register;
+ }
+
+ if (init_led & (1 << led_cur->id)) {
+ dev_err(&pdev->dev, "led %d already initialized\n",
+ led_cur->id);
+ ret = -EINVAL;
+ goto err_register;
+ }
+
+ init_led |= 1 << led_cur->id;
+ led_dat->cdev.name = led_cur->name;
+ led_dat->cdev.default_trigger = led_cur->default_trigger;
+ led_dat->cdev.brightness_set = mc13783_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+ led_dat->id = led_cur->id;
+ led_dat->master = dev_get_drvdata(pdev->dev.parent);
+
+ INIT_WORK(&led_dat->work, mc13783_led_work);
+
+ ret = led_classdev_register(pdev->dev.parent, &led_dat->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register led %d\n",
+ led_dat->id);
+ goto err_register;
+ }
+
+ ret = mc13783_led_setup(led_dat, led_cur->max_current);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to init led %d\n",
+ led_dat->id);
+ i++;
+ goto err_register;
+ }
+ }
+
+ platform_set_drvdata(pdev, led);
+ return 0;
+
+err_register:
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+err_free:
+ kfree(led);
+ return ret;
+}
+
+static int __devexit mc13783_led_remove(struct platform_device *pdev)
+{
+ struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct mc13783_led *led = platform_get_drvdata(pdev);
+ struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
+ int i;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ mc13783_lock(dev);
+
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0);
+ mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0);
+
+ mc13783_unlock(dev);
+
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver mc13783_led_driver = {
+ .driver = {
+ .name = "mc13783-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = mc13783_led_probe,
+ .remove = __devexit_p(mc13783_led_remove),
+};
+
+static int __init mc13783_led_init(void)
+{
+ return platform_driver_register(&mc13783_led_driver);
+}
+module_init(mc13783_led_init);
+
+static void __exit mc13783_led_exit(void)
+{
+ platform_driver_unregister(&mc13783_led_driver);
+}
+module_exit(mc13783_led_exit);
+
+MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
+MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mc13783-led");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
new file mode 100644
index 000000000000..3063f591f0dc
--- /dev/null
+++ b/drivers/leds/leds-net5501.c
@@ -0,0 +1,94 @@
+/*
+ * Soekris board support code
+ *
+ * Copyright (C) 2008-2009 Tower Technologies
+ * Written by Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <asm/geode.h>
+
+static struct gpio_led net5501_leds[] = {
+ {
+ .name = "error",
+ .gpio = 6,
+ .default_trigger = "default-on",
+ },
+};
+
+static struct gpio_led_platform_data net5501_leds_data = {
+ .num_leds = ARRAY_SIZE(net5501_leds),
+ .leds = net5501_leds,
+};
+
+static struct platform_device net5501_leds_dev = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &net5501_leds_data,
+};
+
+static void __init init_net5501(void)
+{
+ platform_device_register(&net5501_leds_dev);
+}
+
+struct soekris_board {
+ u16 offset;
+ char *sig;
+ u8 len;
+ void (*init)(void);
+};
+
+static struct soekris_board __initdata boards[] = {
+ { 0xb7b, "net5501", 7, init_net5501 }, /* net5501 v1.33/1.33c */
+ { 0xb1f, "net5501", 7, init_net5501 }, /* net5501 v1.32i */
+};
+
+static int __init soekris_init(void)
+{
+ int i;
+ unsigned char *rombase, *bios;
+
+ if (!is_geode())
+ return 0;
+
+ rombase = ioremap(0xffff0000, 0xffff);
+ if (!rombase) {
+ printk(KERN_INFO "Soekris net5501 LED driver failed to get rombase");
+ return 0;
+ }
+
+ bios = rombase + 0x20; /* null terminated */
+
+ if (strncmp(bios, "comBIOS", 7))
+ goto unmap;
+
+ for (i = 0; i < ARRAY_SIZE(boards); i++) {
+ unsigned char *model = rombase + boards[i].offset;
+
+ if (strncmp(model, boards[i].sig, boards[i].len) == 0) {
+ printk(KERN_INFO "Soekris %s: %s\n", model, bios);
+
+ if (boards[i].init)
+ boards[i].init();
+ break;
+ }
+ }
+
+unmap:
+ iounmap(rombase);
+ return 0;
+}
+
+arch_initcall(soekris_init);
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 51477ec71391..a688293abd0b 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -534,7 +534,7 @@ static int __init nas_gpio_init(void)
set_power_light_amber_noblink();
return 0;
out_err:
- for (; i >= 0; i--)
+ for (i--; i >= 0; i--)
unregister_nasgpio_led(i);
pci_unregister_driver(&nas_gpio_pci_driver);
return ret;
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 79119f56e82d..bd6da7a9c55b 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -155,6 +155,7 @@ static int macio_adb_reset_bus(void)
while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
if (--timeout == 0) {
out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST);
+ spin_unlock_irqrestore(&macio_lock, flags);
return -1;
}
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 26a303a1d1ab..97147804a49c 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -39,14 +39,12 @@ static struct macio_chip *macio_on_hold;
static int macio_bus_match(struct device *dev, struct device_driver *drv)
{
- struct macio_dev * macio_dev = to_macio_device(dev);
- struct macio_driver * macio_drv = to_macio_driver(drv);
- const struct of_device_id * matches = macio_drv->match_table;
+ const struct of_device_id * matches = drv->of_match_table;
if (!matches)
return 0;
- return of_match_device(matches, &macio_dev->ofdev) != NULL;
+ return of_match_device(matches, dev) != NULL;
}
struct macio_dev *macio_dev_get(struct macio_dev *dev)
@@ -84,7 +82,7 @@ static int macio_device_probe(struct device *dev)
macio_dev_get(macio_dev);
- match = of_match_device(drv->match_table, &macio_dev->ofdev);
+ match = of_match_device(drv->driver.of_match_table, dev);
if (match)
error = drv->probe(macio_dev, match);
if (error)
@@ -248,7 +246,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index,
static void macio_add_missing_resources(struct macio_dev *dev)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
unsigned int irq_base;
/* Gatwick has some missing interrupts on child nodes */
@@ -289,7 +287,7 @@ static void macio_add_missing_resources(struct macio_dev *dev)
static void macio_setup_interrupts(struct macio_dev *dev)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
unsigned int irq;
int i = 0, j = 0;
@@ -317,7 +315,7 @@ static void macio_setup_interrupts(struct macio_dev *dev)
static void macio_setup_resources(struct macio_dev *dev,
struct resource *parent_res)
{
- struct device_node *np = dev->ofdev.node;
+ struct device_node *np = dev->ofdev.dev.of_node;
struct resource r;
int index;
@@ -373,9 +371,9 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->bus = &chip->lbus;
dev->media_bay = in_bay;
- dev->ofdev.node = np;
- dev->ofdev.dma_mask = 0xffffffffUL;
- dev->ofdev.dev.dma_mask = &dev->ofdev.dma_mask;
+ dev->ofdev.dev.of_node = np;
+ dev->ofdev.archdata.dma_mask = 0xffffffffUL;
+ dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
dev->ofdev.dev.parent = parent;
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
@@ -494,9 +492,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
}
/* Add media bay devices if any */
+ pnode = mbdev->ofdev.dev.of_node;
if (mbdev)
- for (np = NULL; (np = of_get_next_child(mbdev->ofdev.node, np))
- != NULL;) {
+ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
if (macio_skip_device(np))
continue;
of_node_get(np);
@@ -506,9 +504,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
}
/* Add serial ports if any */
+ pnode = sdev->ofdev.dev.of_node;
if (sdev) {
- for (np = NULL; (np = of_get_next_child(sdev->ofdev.node, np))
- != NULL;) {
+ for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
if (macio_skip_device(np))
continue;
of_node_get(np);
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 9e9453b58425..6999ce59fd10 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -9,7 +9,7 @@ field##_show (struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct macio_dev *mdev = to_macio_device (dev); \
- return sprintf (buf, format_string, mdev->ofdev.node->field); \
+ return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \
}
static ssize_t
@@ -21,7 +21,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
int length = 0;
of = &to_macio_device (dev)->ofdev;
- compat = of_get_property(of->node, "compatible", &cplen);
+ compat = of_get_property(of->dev.of_node, "compatible", &cplen);
if (!compat) {
*buf = '\0';
return 0;
@@ -58,7 +58,7 @@ static ssize_t devspec_show(struct device *dev,
struct of_device *ofdev;
ofdev = to_of_device(dev);
- return sprintf(buf, "%s\n", ofdev->node->full_name);
+ return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
}
macio_config_of_attr (name, "%s\n");
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 08002b88f342..288acce76b74 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -564,7 +564,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
unsigned long base;
int i;
- ofnode = mdev->ofdev.node;
+ ofnode = mdev->ofdev.dev.of_node;
if (macio_resource_count(mdev) < 1)
return -ENODEV;
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index c876349c32de..a271c8218d82 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -100,7 +100,7 @@ const struct file_operations nvram_fops = {
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
- .ioctl = nvram_ioctl,
+ .unlocked_ioctl = nvram_ioctl,
};
static struct miscdevice nvram_dev = {
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 7c54d80c4fb2..12946c5f583f 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -375,7 +375,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
pr_debug("rackmeter_probe()\n");
/* Get i2s-a node */
- while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL)
+ while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL)
if (strcmp(i2s->name, "i2s-a") == 0)
break;
if (i2s == NULL) {
@@ -431,7 +431,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
"rackmeter: found match but lacks resources: %s",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto bail_free;
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 888448cf7f1f..2506c957712e 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -671,8 +671,11 @@ static const struct of_device_id smu_platform_match[] =
static struct of_platform_driver smu_of_platform_driver =
{
- .name = "smu",
- .match_table = smu_platform_match,
+ .driver = {
+ .name = "smu",
+ .owner = THIS_MODULE,
+ .of_match_table = smu_platform_match,
+ },
.probe = smu_platform_probe,
};
@@ -1183,8 +1186,10 @@ static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
return -EOVERFLOW;
spin_lock_irqsave(&pp->lock, flags);
if (pp->cmd.status == 1) {
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK) {
+ spin_unlock_irqrestore(&pp->lock, flags);
return -EAGAIN;
+ }
add_wait_queue(&pp->wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c42eeb43042d..16d82f17ae82 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -182,6 +182,7 @@ remove_thermostat(struct i2c_client *client)
thermostat = NULL;
+ i2c_set_clientdata(client, NULL);
kfree(th);
return 0;
@@ -399,6 +400,7 @@ static int probe_thermostat(struct i2c_client *client,
rc = read_reg(th, CONFIG_REG);
if (rc < 0) {
dev_err(&client->dev, "Thermostat failed to read config!\n");
+ i2c_set_clientdata(client, NULL);
kfree(th);
return -ENODEV;
}
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index b18fa948f3d1..e60605bd0ea9 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2215,7 +2215,7 @@ static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
state = state_detached;
/* Lookup the fans in the device tree */
- fcu_lookup_fans(dev->node);
+ fcu_lookup_fans(dev->dev.of_node);
/* Add the driver */
return i2c_add_driver(&therm_pm72_driver);
@@ -2238,8 +2238,11 @@ static const struct of_device_id fcu_match[] =
static struct of_platform_driver fcu_of_platform_driver =
{
- .name = "temperature",
- .match_table = fcu_match,
+ .driver = {
+ .name = "temperature",
+ .owner = THIS_MODULE,
+ .of_match_table = fcu_match,
+ },
.probe = fcu_of_probe,
.remove = fcu_of_remove
};
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 0839770e4ec5..5c9367acf0cf 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -463,8 +463,11 @@ static const struct of_device_id therm_of_match[] = {{
};
static struct of_platform_driver therm_of_driver = {
- .name = "temperature",
- .match_table = therm_of_match,
+ .driver = {
+ .name = "temperature",
+ .owner = THIS_MODULE,
+ .of_match_table = therm_of_match,
+ },
.probe = therm_of_probe,
.remove = therm_of_remove,
};
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 42764849eb78..3d4fc0f7b00b 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2273,8 +2273,7 @@ static int register_pmu_pm_ops(void)
device_initcall(register_pmu_pm_ops);
#endif
-static int
-pmu_ioctl(struct inode * inode, struct file *filp,
+static int pmu_ioctl(struct file *filp,
u_int cmd, u_long arg)
{
__u32 __user *argp = (__u32 __user *)arg;
@@ -2337,11 +2336,23 @@ pmu_ioctl(struct inode * inode, struct file *filp,
return error;
}
+static long pmu_unlocked_ioctl(struct file *filp,
+ u_int cmd, u_long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = pmu_ioctl(filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
static const struct file_operations pmu_device_fops = {
.read = pmu_read,
.write = pmu_write,
.poll = pmu_fpoll,
- .ioctl = pmu_ioctl,
+ .unlocked_ioctl = pmu_unlocked_ioctl,
.open = pmu_open,
.release = pmu_release,
};
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 565d5b2adc95..749d174b0dc6 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -188,7 +188,7 @@ struct wf_smu_sys_fans_state {
};
/*
- * Configs for SMU Sytem Fan control loop
+ * Configs for SMU System Fan control loop
*/
static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
/* Model ID 2 */
@@ -757,10 +757,8 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
wf_put_control(cpufreq_clamp);
/* Destroy control loops state structures */
- if (wf_smu_sys_fans)
- kfree(wf_smu_sys_fans);
- if (wf_smu_cpu_fans)
- kfree(wf_smu_cpu_fans);
+ kfree(wf_smu_sys_fans);
+ kfree(wf_smu_cpu_fans);
return 0;
}
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index bea99168ff35..344273235124 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -687,12 +687,9 @@ static int __devexit wf_smu_remove(struct platform_device *ddev)
wf_put_control(cpufreq_clamp);
/* Destroy control loops state structures */
- if (wf_smu_slots_fans)
- kfree(wf_smu_cpu_fans);
- if (wf_smu_drive_fans)
- kfree(wf_smu_cpu_fans);
- if (wf_smu_cpu_fans)
- kfree(wf_smu_cpu_fans);
+ kfree(wf_smu_slots_fans);
+ kfree(wf_smu_drive_fans);
+ kfree(wf_smu_cpu_fans);
return 0;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index acb3a4e404ff..4a6feac8c94a 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -100,8 +100,8 @@ config MD_RAID1
If unsure, say Y.
config MD_RAID10
- tristate "RAID-10 (mirrored striping) mode (EXPERIMENTAL)"
- depends on BLK_DEV_MD && EXPERIMENTAL
+ tristate "RAID-10 (mirrored striping) mode"
+ depends on BLK_DEV_MD
---help---
RAID-10 provides a combination of striping (RAID-0) and
mirroring (RAID-1) with easier configuration and more flexible
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 26ac8aad0b19..1742435ce3ae 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -505,7 +505,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
return;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared) {
/* rocking back to read-only */
@@ -526,7 +526,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->sb_page)
return;
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -575,7 +575,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return err;
}
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -661,7 +661,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
return 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
old = le32_to_cpu(sb->state) & bits;
switch (op) {
case MASK_SET: sb->state |= cpu_to_le32(bits);
@@ -1292,9 +1292,14 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
if (!bitmap) return 0;
if (behind) {
+ int bw;
atomic_inc(&bitmap->behind_writes);
+ bw = atomic_read(&bitmap->behind_writes);
+ if (bw > bitmap->behind_writes_used)
+ bitmap->behind_writes_used = bw;
+
PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n",
- atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
+ bw, bitmap->max_write_behind);
}
while (sectors) {
@@ -1351,7 +1356,8 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
{
if (!bitmap) return;
if (behind) {
- atomic_dec(&bitmap->behind_writes);
+ if (atomic_dec_and_test(&bitmap->behind_writes))
+ wake_up(&bitmap->behind_wait);
PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
}
@@ -1675,12 +1681,13 @@ int bitmap_create(mddev_t *mddev)
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
init_waitqueue_head(&bitmap->overflow_wait);
+ init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
- bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
+ bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
if (bm) {
- bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
sysfs_put(bm);
} else
bitmap->sysfs_can_clear = NULL;
@@ -1692,7 +1699,7 @@ int bitmap_create(mddev_t *mddev)
* and bypass the page cache, we must sync the file
* first.
*/
- vfs_fsync(file, file->f_dentry, 1);
+ vfs_fsync(file, 1);
}
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
if (!mddev->bitmap_info.external)
@@ -2006,6 +2013,27 @@ static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len)
static struct md_sysfs_entry bitmap_can_clear =
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
+static ssize_t
+behind_writes_used_show(mddev_t *mddev, char *page)
+{
+ if (mddev->bitmap == NULL)
+ return sprintf(page, "0\n");
+ return sprintf(page, "%lu\n",
+ mddev->bitmap->behind_writes_used);
+}
+
+static ssize_t
+behind_writes_used_reset(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap)
+ mddev->bitmap->behind_writes_used = 0;
+ return len;
+}
+
+static struct md_sysfs_entry max_backlog_used =
+__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
+ behind_writes_used_show, behind_writes_used_reset);
+
static struct attribute *md_bitmap_attrs[] = {
&bitmap_location.attr,
&bitmap_timeout.attr,
@@ -2013,6 +2041,7 @@ static struct attribute *md_bitmap_attrs[] = {
&bitmap_chunksize.attr,
&bitmap_metadata.attr,
&bitmap_can_clear.attr,
+ &max_backlog_used.attr,
NULL
};
struct attribute_group md_bitmap_group = {
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index cb821d76d1b4..3797dea4723a 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -227,6 +227,7 @@ struct bitmap {
int allclean;
atomic_t behind_writes;
+ unsigned long behind_writes_used; /* highest actual value at runtime */
/*
* the bitmap daemon - periodically wakes up and sweeps the bitmap
@@ -239,6 +240,7 @@ struct bitmap {
atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
+ wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
};
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 8e3850b98cca..1a8987884614 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -169,10 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(struct request_queue *q, struct bio *bio)
+static int make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int failit = 0;
if (bio_data_dir(bio) == WRITE) {
@@ -225,7 +224,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- conf_t *conf = (conf_t*)mddev->private;
+ conf_t *conf = mddev->private;
int n;
if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
@@ -328,7 +327,7 @@ static int run(mddev_t *mddev)
static int stop(mddev_t *mddev)
{
- conf_t *conf = (conf_t *)mddev->private;
+ conf_t *conf = mddev->private;
kfree(conf);
mddev->private = NULL;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 09437e958235..7e0e057db9a7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -159,7 +159,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
- printk("linear: disk numbering problem. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -187,7 +188,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
}
if (cnt != raid_disks) {
- printk("linear: not enough drives present. Aborting!\n");
+ printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
+ mdname(mddev));
goto out;
}
@@ -282,29 +284,21 @@ static int linear_stop (mddev_t *mddev)
rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf);
+ mddev->private = NULL;
return 0;
}
-static int linear_make_request (struct request_queue *q, struct bio *bio)
+static int linear_make_request (mddev_t *mddev, struct bio *bio)
{
- const int rw = bio_data_dir(bio);
- mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
sector_t start_sector;
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
rcu_read_lock();
tmp_dev = which_dev(mddev, bio->bi_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
@@ -314,12 +308,14 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|| (bio->bi_sector < start_sector))) {
char b[BDEVNAME_SIZE];
- printk("linear_make_request: Sector %llu out of bounds on "
- "dev %s: %llu sectors, offset %llu\n",
- (unsigned long long)bio->bi_sector,
- bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
+ printk(KERN_ERR
+ "md/linear:%s: make_request: Sector %llu out of bounds on "
+ "dev %s: %llu sectors, offset %llu\n",
+ mdname(mddev),
+ (unsigned long long)bio->bi_sector,
+ bdevname(tmp_dev->rdev->bdev, b),
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
rcu_read_unlock();
bio_io_error(bio);
return 0;
@@ -336,9 +332,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bp = bio_split(bio, end_sector - bio->bi_sector);
- if (linear_make_request(q, &bp->bio1))
+ if (linear_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (linear_make_request(q, &bp->bio2))
+ if (linear_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cefd63daff31..46b3a044eadf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,8 +215,11 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
*/
static int md_make_request(struct request_queue *q, struct bio *bio)
{
+ const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
int rv;
+ int cpu;
+
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
return 0;
@@ -237,13 +240,27 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
- rv = mddev->pers->make_request(q, bio);
+
+ rv = mddev->pers->make_request(mddev, bio);
+
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
+
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
return rv;
}
+/* mddev_suspend makes sure no new requests are submitted
+ * to the device, and that any requests that have been submitted
+ * are completely handled.
+ * Once ->stop is called and completes, the module will be completely
+ * unused.
+ */
static void mddev_suspend(mddev_t *mddev)
{
BUG_ON(mddev->suspended);
@@ -251,13 +268,6 @@ static void mddev_suspend(mddev_t *mddev)
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
- /* we now know that no code is executing in the personality module,
- * except possibly the tail end of a ->bi_end_io function, but that
- * is certain to complete before the module has a chance to get
- * unloaded
- */
}
static void mddev_resume(mddev_t *mddev)
@@ -344,7 +354,7 @@ static void md_submit_barrier(struct work_struct *ws)
bio_endio(bio, 0);
else {
bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
- if (mddev->pers->make_request(mddev->queue, bio))
+ if (mddev->pers->make_request(mddev, bio))
generic_make_request(bio);
mddev->barrier = POST_REQUEST_BARRIER;
submit_barriers(mddev);
@@ -406,6 +416,27 @@ static void mddev_put(mddev_t *mddev)
spin_unlock(&all_mddevs_lock);
}
+static void mddev_init(mddev_t *mddev)
+{
+ mutex_init(&mddev->open_mutex);
+ mutex_init(&mddev->reconfig_mutex);
+ mutex_init(&mddev->bitmap_info.mutex);
+ INIT_LIST_HEAD(&mddev->disks);
+ INIT_LIST_HEAD(&mddev->all_mddevs);
+ init_timer(&mddev->safemode_timer);
+ atomic_set(&mddev->active, 1);
+ atomic_set(&mddev->openers, 0);
+ atomic_set(&mddev->active_io, 0);
+ spin_lock_init(&mddev->write_lock);
+ atomic_set(&mddev->flush_pending, 0);
+ init_waitqueue_head(&mddev->sb_wait);
+ init_waitqueue_head(&mddev->recovery_wait);
+ mddev->reshape_position = MaxSector;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->level = LEVEL_NONE;
+}
+
static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
@@ -472,23 +503,7 @@ static mddev_t * mddev_find(dev_t unit)
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
- mutex_init(&new->open_mutex);
- mutex_init(&new->reconfig_mutex);
- mutex_init(&new->bitmap_info.mutex);
- INIT_LIST_HEAD(&new->disks);
- INIT_LIST_HEAD(&new->all_mddevs);
- init_timer(&new->safemode_timer);
- atomic_set(&new->active, 1);
- atomic_set(&new->openers, 0);
- atomic_set(&new->active_io, 0);
- spin_lock_init(&new->write_lock);
- atomic_set(&new->flush_pending, 0);
- init_waitqueue_head(&new->sb_wait);
- init_waitqueue_head(&new->recovery_wait);
- new->reshape_position = MaxSector;
- new->resync_min = 0;
- new->resync_max = MaxSector;
- new->level = LEVEL_NONE;
+ mddev_init(new);
goto retry;
}
@@ -508,9 +523,36 @@ static inline int mddev_trylock(mddev_t * mddev)
return mutex_trylock(&mddev->reconfig_mutex);
}
-static inline void mddev_unlock(mddev_t * mddev)
+static struct attribute_group md_redundancy_group;
+
+static void mddev_unlock(mddev_t * mddev)
{
- mutex_unlock(&mddev->reconfig_mutex);
+ if (mddev->to_remove) {
+ /* These cannot be removed under reconfig_mutex as
+ * an access to the files will try to take reconfig_mutex
+ * while holding the file unremovable, which leads to
+ * a deadlock.
+ * So hold open_mutex instead - we are allowed to take
+ * it while holding reconfig_mutex, and md_run can
+ * use it to wait for the remove to complete.
+ */
+ struct attribute_group *to_remove = mddev->to_remove;
+ mddev->to_remove = NULL;
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->reconfig_mutex);
+
+ if (to_remove != &md_redundancy_group)
+ sysfs_remove_group(&mddev->kobj, to_remove);
+ if (mddev->pers == NULL ||
+ mddev->pers->sync_request == NULL) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ }
+ mutex_unlock(&mddev->open_mutex);
+ } else
+ mutex_unlock(&mddev->reconfig_mutex);
md_wakeup_thread(mddev->thread);
}
@@ -1029,10 +1071,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->bitmap_info.default_offset;
} else if (mddev->pers == NULL) {
- /* Insist on good event counter while assembling */
+ /* Insist on good event counter while assembling, except
+ * for spares (which don't need an event count) */
++ev1;
- if (ev1 < mddev->events)
- return -EINVAL;
+ if (sb->disks[rdev->desc_nr].state & (
+ (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
+ if (ev1 < mddev->events)
+ return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
@@ -1428,10 +1473,14 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
}
} else if (mddev->pers == NULL) {
- /* Insist of good event counter while assembling */
+ /* Insist of good event counter while assembling, except for
+ * spares (which don't need an event count) */
++ev1;
- if (ev1 < mddev->events)
- return -EINVAL;
+ if (rdev->desc_nr >= 0 &&
+ rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
+ if (ev1 < mddev->events)
+ return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
@@ -1766,7 +1815,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
kobject_del(&rdev->kobj);
goto fail;
}
- rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
+ rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
@@ -2047,7 +2096,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
- (rdev->sb_events&1)==0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
@@ -2100,28 +2148,14 @@ repeat:
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
- && (mddev->events & 1)
- && mddev->events != 1)
+ && mddev->can_decrease_events
+ && mddev->events != 1) {
mddev->events--;
- else {
+ mddev->can_decrease_events = 0;
+ } else {
/* otherwise we have to go forward and ... */
mddev->events ++;
- if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
- /* .. if the array isn't clean, an 'even' event must also go
- * to spares. */
- if ((mddev->events&1)==0) {
- nospares = 0;
- sync_req = 2; /* force a second update to get the
- * even/odd in sync */
- }
- } else {
- /* otherwise an 'odd' event must go to spares */
- if ((mddev->events&1)) {
- nospares = 0;
- sync_req = 2; /* force a second update to get the
- * even/odd in sync */
- }
- }
+ mddev->can_decrease_events = nospares;
}
if (!mddev->events) {
@@ -2365,6 +2399,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return err;
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&rdev->mddev->kobj, nm);
+ rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
@@ -2780,8 +2815,9 @@ static void analyze_sbs(mddev_t * mddev)
i = 0;
rdev_for_each(rdev, tmp, mddev) {
- if (rdev->desc_nr >= mddev->max_disks ||
- i > mddev->max_disks) {
+ if (mddev->max_disks &&
+ (rdev->desc_nr >= mddev->max_disks ||
+ i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
@@ -2897,9 +2933,10 @@ level_show(mddev_t *mddev, char *page)
static ssize_t
level_store(mddev_t *mddev, const char *buf, size_t len)
{
- char level[16];
+ char clevel[16];
ssize_t rv = len;
struct mdk_personality *pers;
+ long level;
void *priv;
mdk_rdev_t *rdev;
@@ -2932,19 +2969,22 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
}
/* Now find the new personality */
- if (len == 0 || len >= sizeof(level))
+ if (len == 0 || len >= sizeof(clevel))
return -EINVAL;
- strncpy(level, buf, len);
- if (level[len-1] == '\n')
+ strncpy(clevel, buf, len);
+ if (clevel[len-1] == '\n')
len--;
- level[len] = 0;
+ clevel[len] = 0;
+ if (strict_strtol(clevel, 10, &level))
+ level = LEVEL_NONE;
- request_module("md-%s", level);
+ if (request_module("md-%s", clevel) != 0)
+ request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
- pers = find_pers(LEVEL_NONE, level);
+ pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %s not loaded\n", level);
+ printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
@@ -2957,7 +2997,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return -EINVAL;
}
@@ -2973,13 +3013,44 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
- mdname(mddev), level);
+ mdname(mddev), clevel);
return PTR_ERR(priv);
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
+
+ if (mddev->pers->sync_request == NULL &&
+ pers->sync_request != NULL) {
+ /* need to add the md_redundancy_group */
+ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
+ printk(KERN_WARNING
+ "md: cannot register extra attributes for %s\n",
+ mdname(mddev));
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ }
+ if (mddev->pers->sync_request != NULL &&
+ pers->sync_request == NULL) {
+ /* need to remove the md_redundancy_group */
+ if (mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ }
+
+ if (mddev->pers->sync_request == NULL &&
+ mddev->external) {
+ /* We are converting from a no-redundancy array
+ * to a redundancy array and metadata is managed
+ * externally so we need to be sure that writes
+ * won't block due to a need to transition
+ * clean->dirty
+ * until external management is started.
+ */
+ mddev->in_sync = 0;
+ mddev->safemode_delay = 0;
+ mddev->safemode = 0;
+ }
+
module_put(mddev->pers->owner);
/* Invalidate devices that are now superfluous */
list_for_each_entry(rdev, &mddev->disks, same_set)
@@ -2994,11 +3065,20 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ if (mddev->pers->sync_request == NULL) {
+ /* this is now an array without redundancy, so
+ * it must always be in_sync
+ */
+ mddev->in_sync = 1;
+ del_timer_sync(&mddev->safemode_timer);
+ }
pers->run(mddev);
mddev_resume(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ sysfs_notify(&mddev->kobj, NULL, "level");
+ md_new_event(mddev);
return rv;
}
@@ -3237,6 +3317,7 @@ array_state_show(mddev_t *mddev, char *page)
}
static int do_md_stop(mddev_t * mddev, int ro, int is_open);
+static int md_set_readonly(mddev_t * mddev, int is_open);
static int do_md_run(mddev_t * mddev);
static int restart_array(mddev_t *mddev);
@@ -3267,7 +3348,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
@@ -3277,7 +3358,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
- err = do_md_stop(mddev, 1, 0);
+ err = md_set_readonly(mddev, 0);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
@@ -4082,15 +4163,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);
- if (mddev->private) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->private != (void*)1)
- sysfs_remove_group(&mddev->kobj, mddev->private);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
- mddev->private = NULL;
- }
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
@@ -4189,7 +4261,7 @@ static int md_alloc(dev_t dev, char *name)
mutex_unlock(&disks_mutex);
if (!error) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
+ mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
}
mddev_put(mddev);
return error;
@@ -4234,11 +4306,10 @@ static void md_safemode_timeout(unsigned long data)
static int start_dirty_degraded;
-static int do_md_run(mddev_t * mddev)
+static int md_run(mddev_t *mddev)
{
int err;
mdk_rdev_t *rdev;
- struct gendisk *disk;
struct mdk_personality *pers;
if (list_empty(&mddev->disks))
@@ -4248,6 +4319,13 @@ static int do_md_run(mddev_t * mddev)
if (mddev->pers)
return -EBUSY;
+ /* These two calls synchronise us with the
+ * sysfs_remove_group calls in mddev_unlock,
+ * so they must have completed.
+ */
+ mutex_lock(&mddev->open_mutex);
+ mutex_unlock(&mddev->open_mutex);
+
/*
* Analyze all RAID superblock(s)
*/
@@ -4296,8 +4374,6 @@ static int do_md_run(mddev_t * mddev)
sysfs_notify_dirent(rdev->sysfs_state);
}
- disk = mddev->gendisk;
-
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
@@ -4398,7 +4474,7 @@ static int do_md_run(mddev_t * mddev)
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
@@ -4425,22 +4501,32 @@ static int do_md_run(mddev_t * mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- set_capacity(disk, mddev->array_sectors);
-
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
- revalidate_disk(mddev->gendisk);
- mddev->changed = 1;
md_new_event(mddev);
sysfs_notify_dirent(mddev->sysfs_state);
if (mddev->sysfs_action)
sysfs_notify_dirent(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
- kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
return 0;
}
+static int do_md_run(mddev_t *mddev)
+{
+ int err;
+
+ err = md_run(mddev);
+ if (err)
+ goto out;
+
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
+out:
+ return err;
+}
+
static int restart_array(mddev_t *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -4491,9 +4577,110 @@ void restore_bitmap_write_access(struct file *file)
spin_unlock(&inode->i_lock);
}
+static void md_clean(mddev_t *mddev)
+{
+ mddev->array_sectors = 0;
+ mddev->external_size = 0;
+ mddev->dev_sectors = 0;
+ mddev->raid_disks = 0;
+ mddev->recovery_cp = 0;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->reshape_position = MaxSector;
+ mddev->external = 0;
+ mddev->persistent = 0;
+ mddev->level = LEVEL_NONE;
+ mddev->clevel[0] = 0;
+ mddev->flags = 0;
+ mddev->ro = 0;
+ mddev->metadata_type[0] = 0;
+ mddev->chunk_sectors = 0;
+ mddev->ctime = mddev->utime = 0;
+ mddev->layout = 0;
+ mddev->max_disks = 0;
+ mddev->events = 0;
+ mddev->can_decrease_events = 0;
+ mddev->delta_disks = 0;
+ mddev->new_level = LEVEL_NONE;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = 0;
+ mddev->curr_resync = 0;
+ mddev->resync_mismatches = 0;
+ mddev->suspend_lo = mddev->suspend_hi = 0;
+ mddev->sync_speed_min = mddev->sync_speed_max = 0;
+ mddev->recovery = 0;
+ mddev->in_sync = 0;
+ mddev->degraded = 0;
+ mddev->barriers_work = 0;
+ mddev->safemode = 0;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 0;
+ mddev->bitmap_info.chunksize = 0;
+ mddev->bitmap_info.daemon_sleep = 0;
+ mddev->bitmap_info.max_write_behind = 0;
+}
+
+static void md_stop_writes(mddev_t *mddev)
+{
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_unregister_thread(mddev->sync_thread);
+ mddev->sync_thread = NULL;
+ }
+
+ del_timer_sync(&mddev->safemode_timer);
+
+ bitmap_flush(mddev);
+ md_super_wait(mddev);
+
+ if (!mddev->in_sync || mddev->flags) {
+ /* mark array as shutdown cleanly */
+ mddev->in_sync = 1;
+ md_update_sb(mddev, 1);
+ }
+}
+
+static void md_stop(mddev_t *mddev)
+{
+ md_stop_writes(mddev);
+
+ mddev->pers->stop(mddev);
+ if (mddev->pers->sync_request && mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+ module_put(mddev->pers->owner);
+ mddev->pers = NULL;
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+}
+
+static int md_set_readonly(mddev_t *mddev, int is_open)
+{
+ int err = 0;
+ mutex_lock(&mddev->open_mutex);
+ if (atomic_read(&mddev->openers) > is_open) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ err = -EBUSY;
+ goto out;
+ }
+ if (mddev->pers) {
+ md_stop_writes(mddev);
+
+ err = -ENXIO;
+ if (mddev->ro==1)
+ goto out;
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ sysfs_notify_dirent(mddev->sysfs_state);
+ err = 0;
+ }
+out:
+ mutex_unlock(&mddev->open_mutex);
+ return err;
+}
+
/* mode:
* 0 - completely stop and dis-assemble array
- * 1 - switch to readonly
* 2 - stop but do not disassemble array
*/
static int do_md_stop(mddev_t * mddev, int mode, int is_open)
@@ -4508,64 +4695,32 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
err = -EBUSY;
} else if (mddev->pers) {
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
- }
-
- del_timer_sync(&mddev->safemode_timer);
+ if (mddev->ro)
+ set_disk_ro(disk, 0);
- switch(mode) {
- case 1: /* readonly */
- err = -ENXIO;
- if (mddev->ro==1)
- goto out;
- mddev->ro = 1;
- break;
- case 0: /* disassemble */
- case 2: /* stop */
- bitmap_flush(mddev);
- md_super_wait(mddev);
- if (mddev->ro)
- set_disk_ro(disk, 0);
+ md_stop(mddev);
+ mddev->queue->merge_bvec_fn = NULL;
+ mddev->queue->unplug_fn = NULL;
+ mddev->queue->backing_dev_info.congested_fn = NULL;
- mddev->pers->stop(mddev);
- mddev->queue->merge_bvec_fn = NULL;
- mddev->queue->unplug_fn = NULL;
- mddev->queue->backing_dev_info.congested_fn = NULL;
- module_put(mddev->pers->owner);
- if (mddev->pers->sync_request && mddev->private == NULL)
- mddev->private = (void*)1;
- mddev->pers = NULL;
- /* tell userspace to handle 'inactive' */
- sysfs_notify_dirent(mddev->sysfs_state);
+ /* tell userspace to handle 'inactive' */
+ sysfs_notify_dirent(mddev->sysfs_state);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- }
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
- set_capacity(disk, 0);
- mddev->changed = 1;
+ set_capacity(disk, 0);
+ revalidate_disk(disk);
- if (mddev->ro)
- mddev->ro = 0;
- }
- if (!mddev->in_sync || mddev->flags) {
- /* mark array as shutdown cleanly */
- mddev->in_sync = 1;
- md_update_sb(mddev, 1);
- }
- if (mode == 1)
- set_disk_ro(disk, 1);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (mddev->ro)
+ mddev->ro = 0;
+
err = 0;
}
-out:
mutex_unlock(&mddev->open_mutex);
if (err)
return err;
@@ -4586,52 +4741,12 @@ out:
export_array(mddev);
- mddev->array_sectors = 0;
- mddev->external_size = 0;
- mddev->dev_sectors = 0;
- mddev->raid_disks = 0;
- mddev->recovery_cp = 0;
- mddev->resync_min = 0;
- mddev->resync_max = MaxSector;
- mddev->reshape_position = MaxSector;
- mddev->external = 0;
- mddev->persistent = 0;
- mddev->level = LEVEL_NONE;
- mddev->clevel[0] = 0;
- mddev->flags = 0;
- mddev->ro = 0;
- mddev->metadata_type[0] = 0;
- mddev->chunk_sectors = 0;
- mddev->ctime = mddev->utime = 0;
- mddev->layout = 0;
- mddev->max_disks = 0;
- mddev->events = 0;
- mddev->delta_disks = 0;
- mddev->new_level = LEVEL_NONE;
- mddev->new_layout = 0;
- mddev->new_chunk_sectors = 0;
- mddev->curr_resync = 0;
- mddev->resync_mismatches = 0;
- mddev->suspend_lo = mddev->suspend_hi = 0;
- mddev->sync_speed_min = mddev->sync_speed_max = 0;
- mddev->recovery = 0;
- mddev->in_sync = 0;
- mddev->changed = 0;
- mddev->degraded = 0;
- mddev->barriers_work = 0;
- mddev->safemode = 0;
- mddev->bitmap_info.offset = 0;
- mddev->bitmap_info.default_offset = 0;
- mddev->bitmap_info.chunksize = 0;
- mddev->bitmap_info.daemon_sleep = 0;
- mddev->bitmap_info.max_write_behind = 0;
+ md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
- } else if (mddev->pers)
- printk(KERN_INFO "md: %s switched to read-only mode.\n",
- mdname(mddev));
+ }
err = 0;
blk_integrity_unregister(disk);
md_new_event(mddev);
@@ -5349,7 +5464,7 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks)
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (raid_disks <= 0 ||
- raid_disks >= mddev->max_disks)
+ (mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -5486,7 +5601,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
geo->heads = 2;
geo->sectors = 4;
- geo->cylinders = get_capacity(mddev->gendisk) / 8;
+ geo->cylinders = mddev->array_sectors / 8;
return 0;
}
@@ -5496,6 +5611,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
mddev_t *mddev = NULL;
+ int ro;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -5628,9 +5744,37 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto done_unlock;
case STOP_ARRAY_RO:
- err = do_md_stop(mddev, 1, 1);
+ err = md_set_readonly(mddev, 1);
goto done_unlock;
+ case BLKROSET:
+ if (get_user(ro, (int __user *)(arg))) {
+ err = -EFAULT;
+ goto done_unlock;
+ }
+ err = -EINVAL;
+
+ /* if the bdev is going readonly the value of mddev->ro
+ * does not matter, no writes are coming
+ */
+ if (ro)
+ goto done_unlock;
+
+ /* are we are already prepared for writes? */
+ if (mddev->ro != 1)
+ goto done_unlock;
+
+ /* transitioning to readauto need only happen for
+ * arrays that call md_write_start
+ */
+ if (mddev->pers) {
+ err = restart_array(mddev);
+ if (err == 0) {
+ mddev->ro = 2;
+ set_disk_ro(mddev->gendisk, 0);
+ }
+ }
+ goto done_unlock;
}
/*
@@ -5751,7 +5895,6 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_change(bdev);
out:
return err;
}
@@ -5766,21 +5909,6 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
-
-static int md_media_changed(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- return mddev->changed;
-}
-
-static int md_revalidate(struct gendisk *disk)
-{
- mddev_t *mddev = disk->private_data;
-
- mddev->changed = 0;
- return 0;
-}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -5791,8 +5919,6 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
- .media_changed = md_media_changed,
- .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -5906,7 +6032,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(StateChanged, &rdev->flags);
+ sysfs_notify_dirent(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -6898,11 +7024,6 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->flags)
md_update_sb(mddev, 0);
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (test_and_clear_bit(StateChanged, &rdev->flags))
- sysfs_notify_dirent(rdev->sysfs_state);
-
-
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
@@ -7039,7 +7160,7 @@ static int md_notify_reboot(struct notifier_block *this,
* appears to still be in use. Hence
* the '100'.
*/
- do_md_stop(mddev, 1, 100);
+ md_set_readonly(mddev, 100);
mddev_unlock(mddev);
}
/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8e4c75c00d46..7ab5ea155452 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -74,9 +74,6 @@ struct mdk_rdev_s
#define Blocked 8 /* An error occured on an externally
* managed array, don't allow writes
* until it is cleared */
-#define StateChanged 9 /* Faulty or Blocked has changed during
- * interrupt, so it needs to be
- * notified by the thread */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -153,6 +150,12 @@ struct mddev_s
int external_size; /* size managed
* externally */
__u64 events;
+ /* If the last 'event' was simply a clean->dirty transition, and
+ * we didn't write it to the spares, then it is safe and simple
+ * to just decrement the event count on a dirty->clean transition.
+ * So we record that possibility here.
+ */
+ int can_decrease_events;
char uuid[16];
@@ -240,7 +243,6 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
- int changed; /* true if we might need to reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
@@ -279,9 +281,6 @@ struct mddev_s
atomic_t writes_pending;
struct request_queue *queue; /* for plugging ... */
- atomic_t write_behind; /* outstanding async IO */
- unsigned int max_write_behind; /* 0 = sync */
-
struct bitmap *bitmap; /* the bitmap for the device */
struct {
struct file *file; /* the bitmap file */
@@ -305,6 +304,7 @@ struct mddev_s
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
+ struct attribute_group *to_remove;
/* Generic barrier handling.
* If there is a pending barrier request, all other
* writes are blocked while the devices are flushed.
@@ -336,7 +336,7 @@ struct mdk_personality
int level;
struct list_head list;
struct module *owner;
- int (*make_request)(struct request_queue *q, struct bio *bio);
+ int (*make_request)(mddev_t *mddev, struct bio *bio);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 789bf535d29c..410fb60699ac 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -85,7 +85,7 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
+ struct multipath_bh *mp_bh = bio->bi_private;
multipath_conf_t *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
@@ -136,14 +136,11 @@ static void multipath_unplug(struct request_queue *q)
}
-static int multipath_make_request (struct request_queue *q, struct bio * bio)
+static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev->private;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
@@ -155,12 +152,6 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
bio_endio(bio, -EIO);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c3bec024612e..e70f004c99e8 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -23,15 +23,17 @@
#include <linux/slab.h>
#include "md.h"
#include "raid0.h"
+#include "raid5.h"
static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i;
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
blk_unplug(r_queue);
@@ -43,12 +45,13 @@ static int raid0_congested(void *data, int bits)
mddev_t *mddev = data;
raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->devlist;
+ int raid_disks = conf->strip_zone[0].nb_dev;
int i, ret = 0;
if (mddev_congested(mddev, bits))
return 1;
- for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+ for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
@@ -66,16 +69,17 @@ static void dump_zones(mddev_t *mddev)
sector_t zone_start = 0;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
printk(KERN_INFO "******* %s configuration *********\n",
mdname(mddev));
h = 0;
for (j = 0; j < conf->nr_strip_zones; j++) {
printk(KERN_INFO "zone%d=[", j);
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- printk("%s/",
- bdevname(conf->devlist[j*mddev->raid_disks
+ printk(KERN_CONT "%s/",
+ bdevname(conf->devlist[j*raid_disks
+ k]->bdev, b));
- printk("]\n");
+ printk(KERN_CONT "]\n");
zone_size = conf->strip_zone[j].zone_end - zone_start;
printk(KERN_INFO " zone offset=%llukb "
@@ -88,7 +92,7 @@ static void dump_zones(mddev_t *mddev)
printk(KERN_INFO "**********************************\n\n");
}
-static int create_strip_zones(mddev_t *mddev)
+static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
{
int i, c, err;
sector_t curr_zone_end, sectors;
@@ -101,8 +105,9 @@ static int create_strip_zones(mddev_t *mddev)
if (!conf)
return -ENOMEM;
list_for_each_entry(rdev1, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: looking at %s\n",
- bdevname(rdev1->bdev,b));
+ printk(KERN_INFO "md/raid0:%s: looking at %s\n",
+ mdname(mddev),
+ bdevname(rdev1->bdev, b));
c = 0;
/* round size to chunk_size */
@@ -111,14 +116,16 @@ static int create_strip_zones(mddev_t *mddev)
rdev1->sectors = sectors * mddev->chunk_sectors;
list_for_each_entry(rdev2, &mddev->disks, same_set) {
- printk(KERN_INFO "raid0: comparing %s(%llu)",
+ printk(KERN_INFO "md/raid0:%s: comparing %s(%llu)",
+ mdname(mddev),
bdevname(rdev1->bdev,b),
(unsigned long long)rdev1->sectors);
- printk(KERN_INFO " with %s(%llu)\n",
+ printk(KERN_CONT " with %s(%llu)\n",
bdevname(rdev2->bdev,b),
(unsigned long long)rdev2->sectors);
if (rdev2 == rdev1) {
- printk(KERN_INFO "raid0: END\n");
+ printk(KERN_INFO "md/raid0:%s: END\n",
+ mdname(mddev));
break;
}
if (rdev2->sectors == rdev1->sectors) {
@@ -126,20 +133,24 @@ static int create_strip_zones(mddev_t *mddev)
* Not unique, don't count it as a new
* group
*/
- printk(KERN_INFO "raid0: EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: EQUAL\n",
+ mdname(mddev));
c = 1;
break;
}
- printk(KERN_INFO "raid0: NOT EQUAL\n");
+ printk(KERN_INFO "md/raid0:%s: NOT EQUAL\n",
+ mdname(mddev));
}
if (!c) {
- printk(KERN_INFO "raid0: ==> UNIQUE\n");
+ printk(KERN_INFO "md/raid0:%s: ==> UNIQUE\n",
+ mdname(mddev));
conf->nr_strip_zones++;
- printk(KERN_INFO "raid0: %d zones\n",
- conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
}
}
- printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
+ printk(KERN_INFO "md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
@@ -162,14 +173,18 @@ static int create_strip_zones(mddev_t *mddev)
list_for_each_entry(rdev1, &mddev->disks, same_set) {
int j = rdev1->raid_disk;
+ if (mddev->level == 10)
+ /* taking over a raid10-n2 array */
+ j /= 2;
+
if (j < 0 || j >= mddev->raid_disks) {
- printk(KERN_ERR "raid0: bad disk number %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
if (dev[j]) {
- printk(KERN_ERR "raid0: multiple devices for %d - "
- "aborting!\n", j);
+ printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
+ "aborting!\n", mdname(mddev), j);
goto abort;
}
dev[j] = rdev1;
@@ -191,8 +206,8 @@ static int create_strip_zones(mddev_t *mddev)
cnt++;
}
if (cnt != mddev->raid_disks) {
- printk(KERN_ERR "raid0: too few disks (%d of %d) - "
- "aborting!\n", cnt, mddev->raid_disks);
+ printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+ "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
@@ -208,39 +223,44 @@ static int create_strip_zones(mddev_t *mddev)
zone = conf->strip_zone + i;
dev = conf->devlist + i * mddev->raid_disks;
- printk(KERN_INFO "raid0: zone %d\n", i);
+ printk(KERN_INFO "md/raid0:%s: zone %d\n",
+ mdname(mddev), i);
zone->dev_start = smallest->sectors;
smallest = NULL;
c = 0;
for (j=0; j<cnt; j++) {
rdev = conf->devlist[j];
- printk(KERN_INFO "raid0: checking %s ...",
- bdevname(rdev->bdev, b));
+ printk(KERN_INFO "md/raid0:%s: checking %s ...",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
if (rdev->sectors <= zone->dev_start) {
- printk(KERN_INFO " nope.\n");
+ printk(KERN_CONT " nope.\n");
continue;
}
- printk(KERN_INFO " contained as device %d\n", c);
+ printk(KERN_CONT " contained as device %d\n", c);
dev[c] = rdev;
c++;
if (!smallest || rdev->sectors < smallest->sectors) {
smallest = rdev;
- printk(KERN_INFO " (%llu) is smallest!.\n",
- (unsigned long long)rdev->sectors);
+ printk(KERN_INFO "md/raid0:%s: (%llu) is smallest!.\n",
+ mdname(mddev),
+ (unsigned long long)rdev->sectors);
}
}
zone->nb_dev = c;
sectors = (smallest->sectors - zone->dev_start) * c;
- printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
- zone->nb_dev, (unsigned long long)sectors);
+ printk(KERN_INFO "md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
+ mdname(mddev),
+ zone->nb_dev, (unsigned long long)sectors);
curr_zone_end += sectors;
zone->zone_end = curr_zone_end;
- printk(KERN_INFO "raid0: current zone start: %llu\n",
- (unsigned long long)smallest->sectors);
+ printk(KERN_INFO "md/raid0:%s: current zone start: %llu\n",
+ mdname(mddev),
+ (unsigned long long)smallest->sectors);
}
mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
@@ -251,7 +271,7 @@ static int create_strip_zones(mddev_t *mddev)
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
- printk(KERN_ERR "%s chunk_size of %d not valid\n",
+ printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
mdname(mddev),
mddev->chunk_sectors << 9);
goto abort;
@@ -261,14 +281,15 @@ static int create_strip_zones(mddev_t *mddev)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);
- printk(KERN_INFO "raid0: done.\n");
- mddev->private = conf;
+ printk(KERN_INFO "md/raid0:%s: done.\n", mdname(mddev));
+ *private_conf = conf;
+
return 0;
abort:
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- mddev->private = NULL;
+ *private_conf = NULL;
return err;
}
@@ -319,10 +340,12 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int raid0_run(mddev_t *mddev)
{
+ raid0_conf_t *conf;
int ret;
if (mddev->chunk_sectors == 0) {
- printk(KERN_ERR "md/raid0: chunk size must be set.\n");
+ printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
+ mdname(mddev));
return -EINVAL;
}
if (md_check_no_bitmap(mddev))
@@ -330,15 +353,27 @@ static int raid0_run(mddev_t *mddev)
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
- ret = create_strip_zones(mddev);
- if (ret < 0)
- return ret;
+ /* if private is not null, we are here after takeover */
+ if (mddev->private == NULL) {
+ ret = create_strip_zones(mddev, &conf);
+ if (ret < 0)
+ return ret;
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (conf->scale_raid_disks) {
+ int i;
+ for (i=0; i < conf->strip_zone[0].nb_dev; i++)
+ conf->devlist[i]->raid_disk /= conf->scale_raid_disks;
+ /* FIXME update sysfs rd links */
+ }
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
- printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
- (unsigned long long)mddev->array_sectors);
+ printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
+ mdname(mddev),
+ (unsigned long long)mddev->array_sectors);
/* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices
@@ -402,6 +437,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
unsigned int sect_in_chunk;
sector_t chunk;
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
unsigned int chunk_sects = mddev->chunk_sectors;
if (is_power_of_2(chunk_sects)) {
@@ -424,7 +460,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
* + the position in the chunk
*/
*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
- return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+ return conf->devlist[(zone - conf->strip_zone)*raid_disks
+ sector_div(sector, zone->nb_dev)];
}
@@ -444,27 +480,18 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev,
}
}
-static int raid0_make_request(struct request_queue *q, struct bio *bio)
+static int raid0_make_request(mddev_t *mddev, struct bio *bio)
{
- mddev_t *mddev = q->queuedata;
unsigned int chunk_sects;
sector_t sector_offset;
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- const int rw = bio_data_dir(bio);
- int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
md_barrier_request(mddev, bio);
return 0;
}
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
chunk_sects = mddev->chunk_sectors;
if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
sector_t sector = bio->bi_sector;
@@ -482,9 +509,9 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
else
bp = bio_split(bio, chunk_sects -
sector_div(sector, chunk_sects));
- if (raid0_make_request(q, &bp->bio1))
+ if (raid0_make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (raid0_make_request(q, &bp->bio2))
+ if (raid0_make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
@@ -504,9 +531,10 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
return 1;
bad_map:
- printk("raid0_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects / 2,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+ printk("md/raid0:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n",
+ mdname(mddev), chunk_sects / 2,
+ (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
return 0;
@@ -519,6 +547,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
int j, k, h;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
+ int raid_disks = conf->strip_zone[0].nb_dev;
sector_t zone_size;
sector_t zone_start = 0;
@@ -529,7 +558,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, "=[");
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
seq_printf(seq, "%s/", bdevname(
- conf->devlist[j*mddev->raid_disks + k]
+ conf->devlist[j*raid_disks + k]
->bdev, b));
zone_size = conf->strip_zone[j].zone_end - zone_start;
@@ -544,6 +573,104 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
return;
}
+static void *raid0_takeover_raid5(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ raid0_conf_t *priv_conf;
+
+ if (mddev->degraded != 1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+ mdname(mddev),
+ mddev->degraded);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ /* check slot number for a disk */
+ if (rdev->raid_disk == mddev->raid_disks-1) {
+ printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks--;
+ mddev->delta_disks = -1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ return priv_conf;
+}
+
+static void *raid0_takeover_raid10(mddev_t *mddev)
+{
+ raid0_conf_t *priv_conf;
+
+ /* Check layout:
+ * - far_copies must be 1
+ * - near_copies must be 2
+ * - disks number must be even
+ * - all mirrors must be already degraded
+ */
+ if (mddev->layout != ((1 << 8) + 2)) {
+ printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
+ mdname(mddev),
+ mddev->layout);
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->raid_disks & 1) {
+ printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ if (mddev->degraded != (mddev->raid_disks>>1)) {
+ printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 0;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = - mddev->raid_disks / 2;
+ mddev->raid_disks += mddev->delta_disks;
+ mddev->degraded = 0;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ create_strip_zones(mddev, &priv_conf);
+ priv_conf->scale_raid_disks = 2;
+ return priv_conf;
+}
+
+static void *raid0_takeover(mddev_t *mddev)
+{
+ /* raid0 can take over:
+ * raid5 - providing it is Raid4 layout and one disk is faulty
+ * raid10 - assuming we have all necessary active disks
+ */
+ if (mddev->level == 5) {
+ if (mddev->layout == ALGORITHM_PARITY_N)
+ return raid0_takeover_raid5(mddev);
+
+ printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+ mdname(mddev), ALGORITHM_PARITY_N);
+ }
+
+ if (mddev->level == 10)
+ return raid0_takeover_raid10(mddev);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void raid0_quiesce(mddev_t *mddev, int state)
+{
+}
+
static struct mdk_personality raid0_personality=
{
.name = "raid0",
@@ -554,6 +681,8 @@ static struct mdk_personality raid0_personality=
.stop = raid0_stop,
.status = raid0_status,
.size = raid0_size,
+ .takeover = raid0_takeover,
+ .quiesce = raid0_quiesce,
};
static int __init raid0_init (void)
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 91f8e876ee64..d724e664ca4d 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -13,6 +13,9 @@ struct raid0_private_data
struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones;
+ int scale_raid_disks; /* divide rdev->raid_disks by this in run()
+ * to handle conversion from raid10
+ */
};
typedef struct raid0_private_data raid0_conf_t;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e59b10e66edb..a948da8012de 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -263,7 +263,7 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
static void raid1_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror;
conf_t *conf = r1_bio->mddev->private;
@@ -297,7 +297,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
reschedule_retry(r1_bio);
}
@@ -308,7 +309,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
static void raid1_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
@@ -418,7 +419,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
*/
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
- const unsigned long this_sector = r1_bio->sector;
+ const sector_t this_sector = r1_bio->sector;
int new_disk = conf->last_used, disk = new_disk;
int wonly_disk = -1;
const int sectors = r1_bio->sectors;
@@ -434,7 +435,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
retry:
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
- /* Choose the first operation device, for consistancy */
+ /* Choose the first operational device, for consistancy */
new_disk = 0;
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
@@ -774,9 +775,8 @@ do_sync_io:
return NULL;
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r1bio_t *r1_bio;
@@ -788,7 +788,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
- int cpu;
bool do_barriers;
mdk_rdev_t *blocked_rdev;
@@ -834,12 +833,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
bitmap = mddev->bitmap;
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
/*
* make_request() can abort the operation when READA is being
* used and no empty request is available.
@@ -866,6 +859,15 @@ static int make_request(struct request_queue *q, struct bio * bio)
}
mirror = conf->mirrors + rdisk;
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /* Reading from a write-mostly device must
+ * take care not to over-take any writes
+ * that are 'behind'
+ */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
r1_bio->read_disk = rdisk;
read_bio = bio_clone(bio, GFP_NOIO);
@@ -912,9 +914,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
if (test_bit(Faulty, &rdev->flags)) {
rdev_dec_pending(rdev, mddev);
r1_bio->bios[i] = NULL;
- } else
+ } else {
r1_bio->bios[i] = bio;
- targets++;
+ targets++;
+ }
} else
r1_bio->bios[i] = NULL;
}
@@ -942,10 +945,14 @@ static int make_request(struct request_queue *q, struct bio * bio)
set_bit(R1BIO_Degraded, &r1_bio->state);
}
- /* do behind I/O ? */
+ /* do behind I/O ?
+ * Not if there are too many, or cannot allocate memory,
+ * or a reader on WriteMostly is waiting for behind writes
+ * to flush */
if (bitmap &&
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
+ !waitqueue_active(&bitmap->behind_wait) &&
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
@@ -1070,21 +1077,22 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
} else
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
- "raid1: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
{
int i;
- printk("RAID1 conf printout:\n");
+ printk(KERN_DEBUG "RAID1 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
rcu_read_lock();
@@ -1092,7 +1100,7 @@ static void print_conf(conf_t *conf)
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
bdevname(rdev->bdev,b));
@@ -1223,7 +1231,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
int i;
for (i=r1_bio->mddev->raid_disks; i--; )
@@ -1246,7 +1254,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
+ r1bio_t *r1_bio = bio->bi_private;
mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
int i;
@@ -1453,9 +1461,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
char b[BDEVNAME_SIZE];
/* Cannot read from anywhere, array is toast */
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
" for block %llu\n",
- bdevname(bio->bi_bdev,b),
+ mdname(mddev),
+ bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->sectors, 0);
put_buf(r1_bio);
@@ -1577,7 +1586,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
else {
atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO
- "raid1:%s: read error corrected "
+ "md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect +
@@ -1682,8 +1691,9 @@ static void raid1d(mddev_t *mddev)
bio = r1_bio->bios[r1_bio->read_disk];
if ((disk=read_balance(conf, r1_bio)) == -1) {
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
@@ -1697,10 +1707,11 @@ static void raid1d(mddev_t *mddev)
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
- " another mirror\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)r1_bio->sector);
+ printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
+ " other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev,b));
bio->bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
@@ -1755,13 +1766,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int still_degraded = 0;
if (!conf->r1buf_pool)
- {
-/*
- printk("sync start - bitmap %p\n", mddev->bitmap);
-*/
if (init_resync(conf))
return 0;
- }
max_sector = mddev->dev_sectors;
if (sector_nr >= max_sector) {
@@ -2042,7 +2048,7 @@ static conf_t *setup_conf(mddev_t *mddev)
err = -EIO;
if (conf->last_used < 0) {
- printk(KERN_ERR "raid1: no operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
mdname(mddev));
goto abort;
}
@@ -2050,7 +2056,7 @@ static conf_t *setup_conf(mddev_t *mddev)
conf->thread = md_register_thread(raid1d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid1: couldn't allocate thread for %s\n",
+ "md/raid1:%s: couldn't allocate thread\n",
mdname(mddev));
goto abort;
}
@@ -2076,12 +2082,12 @@ static int run(mddev_t *mddev)
mdk_rdev_t *rdev;
if (mddev->level != 1) {
- printk("raid1: %s: raid level not set to mirroring (%d)\n",
+ printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
mdname(mddev), mddev->level);
return -EIO;
}
if (mddev->reshape_position != MaxSector) {
- printk("raid1: %s: reshape_position set but not supported\n",
+ printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
mdname(mddev));
return -EIO;
}
@@ -2124,11 +2130,11 @@ static int run(mddev_t *mddev)
mddev->recovery_cp = MaxSector;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid1: %s is not clean"
+ printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid1: raid set %s active with %d out of %d mirrors\n",
+ "md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
@@ -2152,15 +2158,14 @@ static int stop(mddev_t *mddev)
{
conf_t *conf = mddev->private;
struct bitmap *bitmap = mddev->bitmap;
- int behind_wait = 0;
/* wait for behind writes to complete */
- while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
- behind_wait++;
- printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ); /* wait a second */
+ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
+ printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
+ mdname(mddev));
/* need to kick something here to make sure I/O goes? */
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
}
raise_barrier(conf);
@@ -2191,7 +2196,6 @@ static int raid1_resize(mddev_t *mddev, sector_t sectors)
if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp == MaxSector) {
@@ -2286,9 +2290,9 @@ static int raid1_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "md/raid1: cannot register "
- "%s for %s\n",
- nm, mdname(mddev));
+ "md/raid1:%s: cannot register "
+ "%s\n",
+ mdname(mddev), nm);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e2766d8251a1..03724992cdf2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include "md.h"
#include "raid10.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -255,7 +256,7 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -285,7 +286,8 @@ static void raid10_end_read_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
+ printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
reschedule_retry(r10_bio);
}
@@ -296,7 +298,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
int slot, dev;
conf_t *conf = r10_bio->mddev->private;
@@ -494,7 +496,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
*/
static int read_balance(conf_t *conf, r10bio_t *r10_bio)
{
- const unsigned long this_sector = r10_bio->sector;
+ const sector_t this_sector = r10_bio->sector;
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
@@ -601,7 +603,7 @@ static void unplug_slaves(mddev_t *mddev)
int i;
rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
+ for (i=0; i < conf->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -635,7 +637,7 @@ static int raid10_congested(void *data, int bits)
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock();
- for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
+ for (i = 0; i < conf->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -788,14 +790,12 @@ static void unfreeze_array(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(struct request_queue *q, struct bio * bio)
+static int make_request(mddev_t *mddev, struct bio * bio)
{
- mddev_t *mddev = q->queuedata;
conf_t *conf = mddev->private;
mirror_info_t *mirror;
r10bio_t *r10_bio;
struct bio *read_bio;
- int cpu;
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
@@ -825,16 +825,16 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
- if (make_request(q, &bp->bio1))
+ if (make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1);
- if (make_request(q, &bp->bio2))
+ if (make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2);
bio_pair_release(bp);
return 0;
bad_map:
- printk("raid10_make_request bug: can't convert block across chunks"
- " or bigger than %dk %llu %d\n", chunk_sects/2,
+ printk("md/raid10:%s: make_request bug: can't convert block across chunks"
+ " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10);
bio_io_error(bio);
@@ -850,12 +850,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
wait_barrier(conf);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bio));
- part_stat_unlock();
-
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
@@ -1039,9 +1033,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
- "raid10: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+ mdname(mddev), bdevname(rdev->bdev, b),
+ mdname(mddev), conf->raid_disks - mddev->degraded);
}
static void print_conf(conf_t *conf)
@@ -1049,19 +1044,19 @@ static void print_conf(conf_t *conf)
int i;
mirror_info_t *tmp;
- printk("RAID10 conf printout:\n");
+ printk(KERN_DEBUG "RAID10 conf printout:\n");
if (!conf) {
- printk("(!conf)\n");
+ printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->mirrors + i;
if (tmp->rdev)
- printk(" disk %d, wo:%d, o:%d, dev:%s\n",
+ printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
i, !test_bit(In_sync, &tmp->rdev->flags),
!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
@@ -1132,7 +1127,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int mirror;
mirror_info_t *p;
int first = 0;
- int last = mddev->raid_disks - 1;
+ int last = conf->raid_disks - 1;
if (mddev->recovery_cp < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
@@ -1224,7 +1219,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
conf_t *conf = r10_bio->mddev->private;
int i,d;
@@ -1261,7 +1256,7 @@ static void end_sync_read(struct bio *bio, int error)
static void end_sync_write(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
+ r10bio_t *r10_bio = bio->bi_private;
mddev_t *mddev = r10_bio->mddev;
conf_t *conf = mddev->private;
int i,d;
@@ -1510,13 +1505,14 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
if (cur_read_error_count > max_read_errors) {
rcu_read_unlock();
printk(KERN_NOTICE
- "raid10: %s: Raid device exceeded "
+ "md/raid10:%s: %s: Raid device exceeded "
"read_error threshold "
"[cur %d:max %d]\n",
+ mdname(mddev),
b, cur_read_error_count, max_read_errors);
printk(KERN_NOTICE
- "raid10: %s: Failing raid "
- "device\n", b);
+ "md/raid10:%s: %s: Failing raid "
+ "device\n", mdname(mddev), b);
md_error(mddev, conf->mirrors[d].rdev);
return;
}
@@ -1586,15 +1582,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: read correction "
+ "md/raid10:%s: read correction "
"write failed"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing "
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
}
@@ -1622,20 +1619,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
READ) == 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
- "raid10:%s: unable to read back "
+ "md/raid10:%s: unable to read back "
"corrected sectors"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "raid10:%s: failing drive\n",
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
+ mdname(mddev),
bdevname(rdev->bdev, b));
md_error(mddev, rdev);
} else {
printk(KERN_INFO
- "raid10:%s: read error corrected"
+ "md/raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect+
@@ -1710,8 +1708,9 @@ static void raid10d(mddev_t *mddev)
mddev->ro ? IO_BLOCKED : NULL;
mirror = read_balance(conf, r10_bio);
if (mirror == -1) {
- printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
+ printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
+ mdname(mddev),
bdevname(bio->bi_bdev,b),
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
@@ -1721,8 +1720,9 @@ static void raid10d(mddev_t *mddev)
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
- printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
+ printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
" another mirror\n",
+ mdname(mddev),
bdevname(rdev->bdev,b),
(unsigned long long)r10_bio->sector);
bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
@@ -1980,7 +1980,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
r10_bio = rb2;
if (!test_and_set_bit(MD_RECOVERY_INTR,
&mddev->recovery))
- printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
+ printk(KERN_INFO "md/raid10:%s: insufficient "
+ "working devices for recovery.\n",
mdname(mddev));
break;
}
@@ -2140,9 +2141,9 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
conf_t *conf = mddev->private;
if (!raid_disks)
- raid_disks = mddev->raid_disks;
+ raid_disks = conf->raid_disks;
if (!sectors)
- sectors = mddev->dev_sectors;
+ sectors = conf->dev_sectors;
size = sectors >> conf->chunk_shift;
sector_div(size, conf->far_copies);
@@ -2152,62 +2153,61 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return size << conf->chunk_shift;
}
-static int run(mddev_t *mddev)
+
+static conf_t *setup_conf(mddev_t *mddev)
{
- conf_t *conf;
- int i, disk_idx, chunk_size;
- mirror_info_t *disk;
- mdk_rdev_t *rdev;
+ conf_t *conf = NULL;
int nc, fc, fo;
sector_t stride, size;
+ int err = -EINVAL;
if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
!is_power_of_2(mddev->chunk_sectors)) {
- printk(KERN_ERR "md/raid10: chunk size must be "
- "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
- return -EINVAL;
+ printk(KERN_ERR "md/raid10:%s: chunk size must be "
+ "at least PAGE_SIZE(%ld) and be a power of 2.\n",
+ mdname(mddev), PAGE_SIZE);
+ goto out;
}
nc = mddev->layout & 255;
fc = (mddev->layout >> 8) & 255;
fo = mddev->layout & (1<<16);
+
if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
(mddev->layout >> 17)) {
- printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
+ printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->layout);
goto out;
}
- /*
- * copy the already verified devices into our private RAID10
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
+
+ err = -ENOMEM;
conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
- mddev->private = conf;
- if (!conf) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
+ if (!conf)
goto out;
- }
+
conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
- GFP_KERNEL);
- if (!conf->mirrors) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
+ GFP_KERNEL);
+ if (!conf->mirrors)
+ goto out;
conf->tmppage = alloc_page(GFP_KERNEL);
if (!conf->tmppage)
- goto out_free_conf;
+ goto out;
+
conf->raid_disks = mddev->raid_disks;
conf->near_copies = nc;
conf->far_copies = fc;
conf->copies = nc*fc;
conf->far_offset = fo;
- conf->chunk_mask = mddev->chunk_sectors - 1;
- conf->chunk_shift = ffz(~mddev->chunk_sectors);
+ conf->chunk_mask = mddev->new_chunk_sectors - 1;
+ conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
+
+ conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
+ r10bio_pool_free, conf);
+ if (!conf->r10bio_pool)
+ goto out;
+
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
@@ -2221,7 +2221,8 @@ static int run(mddev_t *mddev)
*/
stride += conf->raid_disks - 1;
sector_div(stride, conf->raid_disks);
- mddev->dev_sectors = stride << conf->chunk_shift;
+
+ conf->dev_sectors = stride << conf->chunk_shift;
if (fo)
stride = 1;
@@ -2229,18 +2230,63 @@ static int run(mddev_t *mddev)
sector_div(stride, fc);
conf->stride = stride << conf->chunk_shift;
- conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
- r10bio_pool_free, conf);
- if (!conf->r10bio_pool) {
- printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
- conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
+ INIT_LIST_HEAD(&conf->retry_list);
+
+ spin_lock_init(&conf->resync_lock);
+ init_waitqueue_head(&conf->wait_barrier);
+
+ conf->thread = md_register_thread(raid10d, mddev, NULL);
+ if (!conf->thread)
+ goto out;
+
+ conf->scale_disks = 0;
+ conf->mddev = mddev;
+ return conf;
+
+ out:
+ printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
+ mdname(mddev));
+ if (conf) {
+ if (conf->r10bio_pool)
+ mempool_destroy(conf->r10bio_pool);
+ kfree(conf->mirrors);
+ safe_put_page(conf->tmppage);
+ kfree(conf);
+ }
+ return ERR_PTR(err);
+}
+
+static int run(mddev_t *mddev)
+{
+ conf_t *conf;
+ int i, disk_idx, chunk_size;
+ mirror_info_t *disk;
+ mdk_rdev_t *rdev;
+ sector_t size;
+
+ /*
+ * copy the already verified devices into our private RAID10
+ * bookkeeping area. [whatever we allocate in run(),
+ * should be freed in stop()]
+ */
+
+ if (mddev->private == NULL) {
+ conf = setup_conf(mddev);
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
+ mddev->private = conf;
+ }
+ conf = mddev->private;
+ if (!conf)
+ goto out;
+
mddev->queue->queue_lock = &conf->device_lock;
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->raid_disks % conf->near_copies)
@@ -2251,9 +2297,14 @@ static int run(mddev_t *mddev)
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
+ if (disk_idx >= conf->raid_disks
|| disk_idx < 0)
continue;
+ if (conf->scale_disks) {
+ disk_idx *= conf->scale_disks;
+ rdev->raid_disk = disk_idx;
+ /* MOVE 'rd%d' link !! */
+ }
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
@@ -2271,14 +2322,9 @@ static int run(mddev_t *mddev)
disk->head_position = 0;
}
- INIT_LIST_HEAD(&conf->retry_list);
-
- spin_lock_init(&conf->resync_lock);
- init_waitqueue_head(&conf->wait_barrier);
-
/* need to check that every block has at least one working mirror */
if (!enough(conf)) {
- printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
+ printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
}
@@ -2297,28 +2343,21 @@ static int run(mddev_t *mddev)
}
}
-
- mddev->thread = md_register_thread(raid10d, mddev, NULL);
- if (!mddev->thread) {
- printk(KERN_ERR
- "raid10: couldn't allocate thread for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
-
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid10: %s is not clean"
+ printk(KERN_NOTICE "md/raid10:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
- "raid10: raid set %s active with %d out of %d devices\n",
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks);
+ "md/raid10:%s: active with %d out of %d devices\n",
+ mdname(mddev), conf->raid_disks - mddev->degraded,
+ conf->raid_disks);
/*
* Ok, everything is just fine now
*/
- md_set_array_sectors(mddev, raid10_size(mddev, 0, 0));
- mddev->resync_max_sectors = raid10_size(mddev, 0, 0);
+ mddev->dev_sectors = conf->dev_sectors;
+ size = raid10_size(mddev, 0, 0);
+ md_set_array_sectors(mddev, size);
+ mddev->resync_max_sectors = size;
mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
@@ -2336,7 +2375,7 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
- if (conf->near_copies < mddev->raid_disks)
+ if (conf->near_copies < conf->raid_disks)
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
md_integrity_register(mddev);
return 0;
@@ -2348,6 +2387,7 @@ out_free_conf:
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL;
+ md_unregister_thread(mddev->thread);
out:
return -EIO;
}
@@ -2384,6 +2424,61 @@ static void raid10_quiesce(mddev_t *mddev, int state)
}
}
+static void *raid10_takeover_raid0(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+ conf_t *conf;
+
+ if (mddev->degraded > 0) {
+ printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Update slot numbers to obtain
+ * degraded raid10 with missing mirrors
+ */
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ rdev->raid_disk *= 2;
+ }
+
+ /* Set new parameters */
+ mddev->new_level = 10;
+ /* new layout: far_copies = 1, near_copies = 2 */
+ mddev->new_layout = (1<<8) + 2;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->delta_disks = mddev->raid_disks;
+ mddev->degraded = mddev->raid_disks;
+ mddev->raid_disks *= 2;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ conf = setup_conf(mddev);
+ conf->scale_disks = 2;
+ return conf;
+}
+
+static void *raid10_takeover(mddev_t *mddev)
+{
+ struct raid0_private_data *raid0_priv;
+
+ /* raid10 can take over:
+ * raid0 - providing it has only two drives
+ */
+ if (mddev->level == 0) {
+ /* for raid0 takeover only one zone is supported */
+ raid0_priv = mddev->private;
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
+ " with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+ return raid10_takeover_raid0(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
+
static struct mdk_personality raid10_personality =
{
.name = "raid10",
@@ -2400,6 +2495,7 @@ static struct mdk_personality raid10_personality =
.sync_request = sync_request,
.quiesce = raid10_quiesce,
.size = raid10_size,
+ .takeover = raid10_takeover,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 59cd1efb8d30..3824a087e17c 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -33,9 +33,16 @@ struct r10_private_data_s {
* 1 stripe.
*/
+ sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
+
int chunk_shift; /* shift from chunks to sectors */
sector_t chunk_mask;
+ int scale_disks; /* When starting array, multiply
+ * each ->raid_disk by this.
+ * Need for raid0->raid10 migration
+ */
+
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
@@ -57,6 +64,11 @@ struct r10_private_data_s {
mempool_t *r10bio_pool;
mempool_t *r10buf_pool;
struct page *tmppage;
+
+ /* When taking over an array from a different personality, we store
+ * the new thread here until we fully activate the array.
+ */
+ struct mdk_thread_s *thread;
};
typedef struct r10_private_data_s conf_t;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15348c393b5d..d2c0f94fa37d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include "md.h"
#include "raid5.h"
+#include "raid0.h"
#include "bitmap.h"
/*
@@ -1509,7 +1510,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
rdev = conf->disks[i].rdev;
- printk_rl(KERN_INFO "raid5:%s: read error corrected"
+ printk_rl(KERN_INFO "md/raid:%s: read error corrected"
" (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
(unsigned long long)(sh->sector
@@ -1529,7 +1530,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
atomic_inc(&rdev->read_errors);
if (conf->mddev->degraded >= conf->max_degraded)
printk_rl(KERN_WARNING
- "raid5:%s: read error not correctable "
+ "md/raid:%s: read error not correctable "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1538,7 +1539,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
printk_rl(KERN_WARNING
- "raid5:%s: read error NOT corrected!! "
+ "md/raid:%s: read error NOT corrected!! "
"(sector %llu on %s).\n",
mdname(conf->mddev),
(unsigned long long)(sh->sector
@@ -1547,7 +1548,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
- "raid5:%s: Too many read errors, failing device %s.\n",
+ "md/raid:%s: Too many read errors, failing device %s.\n",
mdname(conf->mddev), bdn);
else
retry = 1;
@@ -1619,8 +1620,8 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
- pr_debug("raid5: error called\n");
+ raid5_conf_t *conf = mddev->private;
+ pr_debug("raid456: error called\n");
if (!test_bit(Faulty, &rdev->flags)) {
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1636,9 +1637,13 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
}
set_bit(Faulty, &rdev->flags);
printk(KERN_ALERT
- "raid5: Disk failure on %s, disabling device.\n"
- "raid5: Operation continuing on %d devices.\n",
- bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
+ "md/raid:%s: Disk failure on %s, disabling device.\n"
+ KERN_ALERT
+ "md/raid:%s: Operation continuing on %d devices.\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ mdname(mddev),
+ conf->raid_disks - mddev->degraded);
}
}
@@ -1714,8 +1719,6 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
pd_idx = data_disks;
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1832,10 +1835,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
qd_idx = raid_disks - 1;
break;
-
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1898,8 +1898,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
case ALGORITHM_PARITY_N:
break;
default:
- printk(KERN_ERR "raid5: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1958,8 +1956,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
i -= 1;
break;
default:
- printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
- algorithm);
BUG();
}
break;
@@ -1972,7 +1968,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
previous, &dummy1, &sh2);
if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
|| sh2.qd_idx != sh->qd_idx) {
- printk(KERN_ERR "compute_blocknr: map not correct\n");
+ printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
+ mdname(conf->mddev));
return 0;
}
return r_sector;
@@ -3709,10 +3706,10 @@ static void raid5_align_endio(struct bio *bi, int error)
bio_put(bi);
- mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
- conf = mddev->private;
rdev = (void*)raid_bi->bi_next;
raid_bi->bi_next = NULL;
+ mddev = rdev->mddev;
+ conf = mddev->private;
rdev_dec_pending(rdev, conf->mddev);
@@ -3749,9 +3746,8 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
+static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
struct bio* align_bi;
@@ -3866,16 +3862,15 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
return sh;
}
-static int make_request(struct request_queue *q, struct bio * bi)
+static int make_request(mddev_t *mddev, struct bio * bi)
{
- mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
int dd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
- int cpu, remaining;
+ int remaining;
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
/* Drain all pending writes. We only really need
@@ -3890,15 +3885,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
md_write_start(mddev, bi);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
- bio_sectors(bi));
- part_stat_unlock();
-
if (rw == READ &&
mddev->reshape_position == MaxSector &&
- chunk_aligned_read(q,bi))
+ chunk_aligned_read(mddev,bi))
return 0;
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
@@ -3946,7 +3935,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
new_sector = raid5_compute_sector(conf, logical_sector,
previous,
&dd_idx, NULL);
- pr_debug("raid5: make_request, sector %llu logical %llu\n",
+ pr_debug("raid456: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
@@ -4054,7 +4043,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* As the reads complete, handle_stripe will copy the data
* into the destination stripe and release that stripe.
*/
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t first_sector, last_sector;
int raid_disks = conf->previous_raid_disks;
@@ -4263,7 +4252,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
/* FIXME go_faster isn't used */
static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
struct stripe_head *sh;
sector_t max_sector = mddev->dev_sectors;
int sync_blocks;
@@ -4656,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
kfree(percpu->scribble);
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
- return NOTIFY_BAD;
+ return notifier_from_errno(-ENOMEM);
}
break;
case CPU_DEAD:
@@ -4725,7 +4714,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (mddev->new_level != 5
&& mddev->new_level != 4
&& mddev->new_level != 6) {
- printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
+ printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
mdname(mddev), mddev->new_level);
return ERR_PTR(-EIO);
}
@@ -4733,12 +4722,12 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
&& !algorithm_valid_raid5(mddev->new_layout)) ||
(mddev->new_level == 6
&& !algorithm_valid_raid6(mddev->new_layout))) {
- printk(KERN_ERR "raid5: %s: layout %d not supported\n",
+ printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
mdname(mddev), mddev->new_layout);
return ERR_PTR(-EIO);
}
if (mddev->new_level == 6 && mddev->raid_disks < 4) {
- printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
+ printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
mdname(mddev), mddev->raid_disks);
return ERR_PTR(-EINVAL);
}
@@ -4746,8 +4735,8 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
- printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
- mddev->new_chunk_sectors << 9, mdname(mddev));
+ printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
+ mdname(mddev), mddev->new_chunk_sectors << 9);
return ERR_PTR(-EINVAL);
}
@@ -4789,7 +4778,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (raid5_alloc_percpu(conf) != 0)
goto abort;
- pr_debug("raid5: run(%s) called.\n", mdname(mddev));
+ pr_debug("raid456: run(%s) called.\n", mdname(mddev));
list_for_each_entry(rdev, &mddev->disks, same_set) {
raid_disk = rdev->raid_disk;
@@ -4802,9 +4791,9 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- printk(KERN_INFO "raid5: device %s operational as raid"
- " disk %d\n", bdevname(rdev->bdev,b),
- raid_disk);
+ printk(KERN_INFO "md/raid:%s: device %s operational as raid"
+ " disk %d\n",
+ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else
/* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1;
@@ -4828,16 +4817,17 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
if (grow_stripes(conf, conf->max_nr_stripes)) {
printk(KERN_ERR
- "raid5: couldn't allocate %dkB for buffers\n", memory);
+ "md/raid:%s: couldn't allocate %dkB for buffers\n",
+ mdname(mddev), memory);
goto abort;
} else
- printk(KERN_INFO "raid5: allocated %dkB for %s\n",
- memory, mdname(mddev));
+ printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
+ mdname(mddev), memory);
conf->thread = md_register_thread(raid5d, mddev, NULL);
if (!conf->thread) {
printk(KERN_ERR
- "raid5: couldn't allocate thread for %s\n",
+ "md/raid:%s: couldn't allocate thread.\n",
mdname(mddev));
goto abort;
}
@@ -4888,7 +4878,7 @@ static int run(mddev_t *mddev)
sector_t reshape_offset = 0;
if (mddev->recovery_cp != MaxSector)
- printk(KERN_NOTICE "raid5: %s is not clean"
+ printk(KERN_NOTICE "md/raid:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
if (mddev->reshape_position != MaxSector) {
@@ -4902,7 +4892,7 @@ static int run(mddev_t *mddev)
int max_degraded = (mddev->level == 6 ? 2 : 1);
if (mddev->new_level != mddev->level) {
- printk(KERN_ERR "raid5: %s: unsupported reshape "
+ printk(KERN_ERR "md/raid:%s: unsupported reshape "
"required - aborting.\n",
mdname(mddev));
return -EINVAL;
@@ -4915,8 +4905,8 @@ static int run(mddev_t *mddev)
here_new = mddev->reshape_position;
if (sector_div(here_new, mddev->new_chunk_sectors *
(mddev->raid_disks - max_degraded))) {
- printk(KERN_ERR "raid5: reshape_position not "
- "on a stripe boundary\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position not "
+ "on a stripe boundary\n", mdname(mddev));
return -EINVAL;
}
reshape_offset = here_new * mddev->new_chunk_sectors;
@@ -4937,8 +4927,9 @@ static int run(mddev_t *mddev)
if ((here_new * mddev->new_chunk_sectors !=
here_old * mddev->chunk_sectors) ||
mddev->ro == 0) {
- printk(KERN_ERR "raid5: in-place reshape must be started"
- " in read-only mode - aborting\n");
+ printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
+ " in read-only mode - aborting\n",
+ mdname(mddev));
return -EINVAL;
}
} else if (mddev->delta_disks < 0
@@ -4947,11 +4938,13 @@ static int run(mddev_t *mddev)
: (here_new * mddev->new_chunk_sectors >=
here_old * mddev->chunk_sectors)) {
/* Reading from the same stripe as writing to - bad */
- printk(KERN_ERR "raid5: reshape_position too early for "
- "auto-recovery - aborting.\n");
+ printk(KERN_ERR "md/raid:%s: reshape_position too early for "
+ "auto-recovery - aborting.\n",
+ mdname(mddev));
return -EINVAL;
}
- printk(KERN_INFO "raid5: reshape will continue\n");
+ printk(KERN_INFO "md/raid:%s: reshape will continue\n",
+ mdname(mddev));
/* OK, we should be able to continue; */
} else {
BUG_ON(mddev->level != mddev->new_level);
@@ -4993,18 +4986,6 @@ static int run(mddev_t *mddev)
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
- printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
- rdev->raid_disk, working_disks, conf->prev_algo,
- conf->previous_raid_disks, conf->max_degraded,
- conf->algorithm, conf->raid_disks,
- only_parity(rdev->raid_disk,
- conf->prev_algo,
- conf->previous_raid_disks,
- conf->max_degraded),
- only_parity(rdev->raid_disk,
- conf->algorithm,
- conf->raid_disks,
- conf->max_degraded));
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
@@ -5025,7 +5006,7 @@ static int run(mddev_t *mddev)
- working_disks);
if (mddev->degraded > conf->max_degraded) {
- printk(KERN_ERR "raid5: not enough operational devices for %s"
+ printk(KERN_ERR "md/raid:%s: not enough operational devices"
" (%d/%d failed)\n",
mdname(mddev), mddev->degraded, conf->raid_disks);
goto abort;
@@ -5039,32 +5020,32 @@ static int run(mddev_t *mddev)
mddev->recovery_cp != MaxSector) {
if (mddev->ok_start_degraded)
printk(KERN_WARNING
- "raid5: starting dirty degraded array: %s"
- "- data corruption possible.\n",
+ "md/raid:%s: starting dirty degraded array"
+ " - data corruption possible.\n",
mdname(mddev));
else {
printk(KERN_ERR
- "raid5: cannot start dirty degraded array for %s\n",
+ "md/raid:%s: cannot start dirty degraded array.\n",
mdname(mddev));
goto abort;
}
}
if (mddev->degraded == 0)
- printk("raid5: raid level %d set %s active with %d out of %d"
- " devices, algorithm %d\n", conf->level, mdname(mddev),
+ printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
+ " devices, algorithm %d\n", mdname(mddev), conf->level,
mddev->raid_disks-mddev->degraded, mddev->raid_disks,
mddev->new_layout);
else
- printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
- " out of %d devices, algorithm %d\n", conf->level,
- mdname(mddev), mddev->raid_disks - mddev->degraded,
- mddev->raid_disks, mddev->new_layout);
+ printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
+ " out of %d devices, algorithm %d\n",
+ mdname(mddev), conf->level,
+ mddev->raid_disks - mddev->degraded,
+ mddev->raid_disks, mddev->new_layout);
print_raid5_conf(conf);
if (conf->reshape_progress != MaxSector) {
- printk("...ok start reshape thread\n");
conf->reshape_safe = conf->reshape_progress;
atomic_set(&conf->reshape_stripes, 0);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -5087,9 +5068,11 @@ static int run(mddev_t *mddev)
}
/* Ok, everything is just fine now */
- if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
+ if (mddev->to_remove == &raid5_attrs_group)
+ mddev->to_remove = NULL;
+ else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING
- "raid5: failed to create sysfs attributes for %s\n",
+ "md/raid:%s: failed to create sysfs attributes.\n",
mdname(mddev));
mddev->queue->queue_lock = &conf->device_lock;
@@ -5119,22 +5102,21 @@ abort:
free_conf(conf);
}
mddev->private = NULL;
- printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
+ printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
return -EIO;
}
-
-
static int stop(mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
free_conf(conf);
- mddev->private = &raid5_attrs_group;
+ mddev->private = NULL;
+ mddev->to_remove = &raid5_attrs_group;
return 0;
}
@@ -5175,7 +5157,7 @@ static void printall(struct seq_file *seq, raid5_conf_t *conf)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
+ raid5_conf_t *conf = mddev->private;
int i;
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
@@ -5197,21 +5179,22 @@ static void print_raid5_conf (raid5_conf_t *conf)
int i;
struct disk_info *tmp;
- printk("RAID5 conf printout:\n");
+ printk(KERN_DEBUG "RAID conf printout:\n");
if (!conf) {
printk("(conf==NULL)\n");
return;
}
- printk(" --- rd:%d wd:%d\n", conf->raid_disks,
- conf->raid_disks - conf->mddev->degraded);
+ printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+ conf->raid_disks,
+ conf->raid_disks - conf->mddev->degraded);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->disks + i;
if (tmp->rdev)
- printk(" disk %d, o:%d, dev:%s\n",
- i, !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+ i, !test_bit(Faulty, &tmp->rdev->flags),
+ bdevname(tmp->rdev->bdev, b));
}
}
@@ -5334,7 +5317,6 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
raid5_size(mddev, sectors, mddev->raid_disks))
return -EINVAL;
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
mddev->recovery_cp = mddev->dev_sectors;
@@ -5360,7 +5342,8 @@ static int check_stripe_cache(mddev_t *mddev)
> conf->max_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes) {
- printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
+ printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
+ mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
/ STRIPE_SIZE)*4);
return 0;
@@ -5431,7 +5414,7 @@ static int raid5_start_reshape(mddev_t *mddev)
*/
if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
< mddev->array_sectors) {
- printk(KERN_ERR "md: %s: array size must be reduced "
+ printk(KERN_ERR "md/raid:%s: array size must be reduced "
"before number of disks\n", mdname(mddev));
return -EINVAL;
}
@@ -5469,9 +5452,9 @@ static int raid5_start_reshape(mddev_t *mddev)
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
printk(KERN_WARNING
- "raid5: failed to create "
- " link %s for %s\n",
- nm, mdname(mddev));
+ "md/raid:%s: failed to create "
+ " link %s\n",
+ mdname(mddev), nm);
} else
break;
}
@@ -5548,7 +5531,6 @@ static void raid5_finish_reshape(mddev_t *mddev)
if (mddev->delta_disks > 0) {
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
- mddev->changed = 1;
revalidate_disk(mddev->gendisk);
} else {
int d;
@@ -5613,6 +5595,29 @@ static void raid5_quiesce(mddev_t *mddev, int state)
}
+static void *raid45_takeover_raid0(mddev_t *mddev, int level)
+{
+ struct raid0_private_data *raid0_priv = mddev->private;
+
+ /* for raid0 takeover only one zone is supported */
+ if (raid0_priv->nr_strip_zones > 1) {
+ printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+ mdname(mddev));
+ return ERR_PTR(-EINVAL);
+ }
+
+ mddev->new_level = level;
+ mddev->new_layout = ALGORITHM_PARITY_N;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+ mddev->raid_disks += 1;
+ mddev->delta_disks = 1;
+ /* make sure it will be not marked as dirty */
+ mddev->recovery_cp = MaxSector;
+
+ return setup_conf(mddev);
+}
+
+
static void *raid5_takeover_raid1(mddev_t *mddev)
{
int chunksect;
@@ -5737,12 +5742,13 @@ static int raid6_check_reshape(mddev_t *mddev)
static void *raid5_takeover(mddev_t *mddev)
{
/* raid5 can take over:
- * raid0 - if all devices are the same - make it a raid4 layout
+ * raid0 - if there is only one strip zone - make it a raid4 layout
* raid1 - if there are two drives. We need to know the chunk size
* raid4 - trivial - just use a raid4 layout.
* raid6 - Providing it is a *_6 layout
*/
-
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 5);
if (mddev->level == 1)
return raid5_takeover_raid1(mddev);
if (mddev->level == 4) {
@@ -5756,6 +5762,22 @@ static void *raid5_takeover(mddev_t *mddev)
return ERR_PTR(-EINVAL);
}
+static void *raid4_takeover(mddev_t *mddev)
+{
+ /* raid4 can take over:
+ * raid0 - if there is only one strip zone
+ * raid5 - if layout is right
+ */
+ if (mddev->level == 0)
+ return raid45_takeover_raid0(mddev, 4);
+ if (mddev->level == 5 &&
+ mddev->layout == ALGORITHM_PARITY_N) {
+ mddev->new_layout = 0;
+ mddev->new_level = 4;
+ return setup_conf(mddev);
+ }
+ return ERR_PTR(-EINVAL);
+}
static struct mdk_personality raid5_personality;
@@ -5871,6 +5893,7 @@ static struct mdk_personality raid4_personality =
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
+ .takeover = raid4_takeover,
};
static int __init raid5_init(void)
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 4dde7d180a32..195c6cf359f6 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -7,3 +7,62 @@ config VIDEO_IR
tristate
depends on IR_CORE
default IR_CORE
+
+source "drivers/media/IR/keymaps/Kconfig"
+
+config IR_NEC_DECODER
+ tristate "Enable IR raw decoder for the NEC protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have IR with NEC protocol, and
+ if the IR is decoded in software
+
+config IR_RC5_DECODER
+ tristate "Enable IR raw decoder for the RC-5 protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have IR with RC-5 protocol, and
+ if the IR is decoded in software
+
+config IR_RC6_DECODER
+ tristate "Enable IR raw decoder for the RC6 protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the RC6 protocol, and you need software decoding support.
+
+config IR_JVC_DECODER
+ tristate "Enable IR raw decoder for the JVC protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the JVC protocol, and you need software decoding support.
+
+config IR_SONY_DECODER
+ tristate "Enable IR raw decoder for the Sony protocol"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ Enable this option if you have an infrared remote control which
+ uses the Sony protocol, and you need software decoding support.
+
+config IR_IMON
+ tristate "SoundGraph iMON Receiver and Display"
+ depends on USB_ARCH_HAS_HCD
+ depends on IR_CORE
+ select USB
+ ---help---
+ Say Y here if you want to use a SoundGraph iMON (aka Antec Veris)
+ IR Receiver and/or LCD/VFD/VGA display.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imon.
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
index 171890e7a41d..b998fcced2e4 100644
--- a/drivers/media/IR/Makefile
+++ b/drivers/media/IR/Makefile
@@ -1,5 +1,15 @@
-ir-common-objs := ir-functions.o ir-keymaps.o
-ir-core-objs := ir-keytable.o ir-sysfs.o
+ir-common-objs := ir-functions.o
+ir-core-objs := ir-keytable.o ir-sysfs.o ir-raw-event.o rc-map.o
+
+obj-y += keymaps/
obj-$(CONFIG_IR_CORE) += ir-core.o
obj-$(CONFIG_VIDEO_IR) += ir-common.o
+obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
+obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o
+obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
+obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
+obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
+
+# stand-alone IR receivers/transmitters
+obj-$(CONFIG_IR_IMON) += imon.o
diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c
new file mode 100644
index 000000000000..5e2045670004
--- /dev/null
+++ b/drivers/media/IR/imon.c
@@ -0,0 +1,2396 @@
+/*
+ * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD
+ *
+ * Copyright(C) 2009 Jarod Wilson <jarod@wilsonet.com>
+ * Portions based on the original lirc_imon driver,
+ * Copyright(C) 2004 Venky Raju(dev@venky.ws)
+ *
+ * Huge thanks to R. Geoff Newbury for invaluable debugging on the
+ * 0xffdc iMON devices, and for sending me one to hack on, without
+ * which the support for them wouldn't be nearly as good. Thanks
+ * also to the numerous 0xffdc device owners that tested auto-config
+ * support for me and provided debug dumps from their devices.
+ *
+ * imon is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/input.h>
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <media/ir-core.h>
+
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
+#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
+#define MOD_NAME "imon"
+#define MOD_VERSION "0.9.1"
+
+#define DISPLAY_MINOR_BASE 144
+#define DEVICE_NAME "lcd%d"
+
+#define BUF_CHUNK_SIZE 8
+#define BUF_SIZE 128
+
+#define BIT_DURATION 250 /* each bit received is 250us */
+
+#define IMON_CLOCK_ENABLE_PACKETS 2
+
+/*** P R O T O T Y P E S ***/
+
+/* USB Callback prototypes */
+static int imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id);
+static void imon_disconnect(struct usb_interface *interface);
+static void usb_rx_callback_intf0(struct urb *urb);
+static void usb_rx_callback_intf1(struct urb *urb);
+static void usb_tx_callback(struct urb *urb);
+
+/* suspend/resume support */
+static int imon_resume(struct usb_interface *intf);
+static int imon_suspend(struct usb_interface *intf, pm_message_t message);
+
+/* Display file_operations function prototypes */
+static int display_open(struct inode *inode, struct file *file);
+static int display_close(struct inode *inode, struct file *file);
+
+/* VFD write operation */
+static ssize_t vfd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos);
+
+/* LCD file_operations override function prototypes */
+static ssize_t lcd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos);
+
+/*** G L O B A L S ***/
+
+struct imon_context {
+ struct device *dev;
+ struct ir_dev_props *props;
+ struct ir_input_dev *ir;
+ /* Newer devices have two interfaces */
+ struct usb_device *usbdev_intf0;
+ struct usb_device *usbdev_intf1;
+
+ bool display_supported; /* not all controllers do */
+ bool display_isopen; /* display port has been opened */
+ bool rf_isassociating; /* RF remote associating */
+ bool dev_present_intf0; /* USB device presence, interface 0 */
+ bool dev_present_intf1; /* USB device presence, interface 1 */
+
+ struct mutex lock; /* to lock this object */
+ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
+
+ struct usb_endpoint_descriptor *rx_endpoint_intf0;
+ struct usb_endpoint_descriptor *rx_endpoint_intf1;
+ struct usb_endpoint_descriptor *tx_endpoint;
+ struct urb *rx_urb_intf0;
+ struct urb *rx_urb_intf1;
+ struct urb *tx_urb;
+ bool tx_control;
+ unsigned char usb_rx_buf[8];
+ unsigned char usb_tx_buf[8];
+
+ struct tx_t {
+ unsigned char data_buf[35]; /* user data buffer */
+ struct completion finished; /* wait for write to finish */
+ bool busy; /* write in progress */
+ int status; /* status of tx completion */
+ } tx;
+
+ u16 vendor; /* usb vendor ID */
+ u16 product; /* usb product ID */
+
+ struct input_dev *idev; /* input device for remote */
+ struct input_dev *touch; /* input device for touchscreen */
+
+ u32 kc; /* current input keycode */
+ u32 last_keycode; /* last reported input keycode */
+ u64 ir_type; /* iMON or MCE (RC6) IR protocol? */
+ u8 mce_toggle_bit; /* last mce toggle bit */
+ bool release_code; /* some keys send a release code */
+
+ u8 display_type; /* store the display type */
+ bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */
+
+ char name_idev[128]; /* input device name */
+ char phys_idev[64]; /* input device phys path */
+ struct timer_list itimer; /* input device timer, need for rc6 */
+
+ char name_touch[128]; /* touch screen name */
+ char phys_touch[64]; /* touch screen phys path */
+ struct timer_list ttimer; /* touch screen timer */
+ int touch_x; /* x coordinate on touchscreen */
+ int touch_y; /* y coordinate on touchscreen */
+};
+
+#define TOUCH_TIMEOUT (HZ/30)
+
+/* vfd character device file operations */
+static const struct file_operations vfd_fops = {
+ .owner = THIS_MODULE,
+ .open = &display_open,
+ .write = &vfd_write,
+ .release = &display_close
+};
+
+/* lcd character device file operations */
+static const struct file_operations lcd_fops = {
+ .owner = THIS_MODULE,
+ .open = &display_open,
+ .write = &lcd_write,
+ .release = &display_close
+};
+
+enum {
+ IMON_DISPLAY_TYPE_AUTO = 0,
+ IMON_DISPLAY_TYPE_VFD = 1,
+ IMON_DISPLAY_TYPE_LCD = 2,
+ IMON_DISPLAY_TYPE_VGA = 3,
+ IMON_DISPLAY_TYPE_NONE = 4,
+};
+
+enum {
+ IMON_KEY_IMON = 0,
+ IMON_KEY_MCE = 1,
+ IMON_KEY_PANEL = 2,
+};
+
+/*
+ * USB Device ID for iMON USB Control Boards
+ *
+ * The Windows drivers contain 6 different inf files, more or less one for
+ * each new device until the 0x0034-0x0046 devices, which all use the same
+ * driver. Some of the devices in the 34-46 range haven't been definitively
+ * identified yet. Early devices have either a TriGem Computer, Inc. or a
+ * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later
+ * devices use the SoundGraph vendor ID (0x15c2). This driver only supports
+ * the ffdc and later devices, which do onboard decoding.
+ */
+static struct usb_device_id imon_usb_id_table[] = {
+ /*
+ * Several devices with this same device ID, all use iMON_PAD.inf
+ * SoundGraph iMON PAD (IR & VFD)
+ * SoundGraph iMON PAD (IR & LCD)
+ * SoundGraph iMON Knob (IR only)
+ */
+ { USB_DEVICE(0x15c2, 0xffdc) },
+
+ /*
+ * Newer devices, all driven by the latest iMON Windows driver, full
+ * list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2'
+ * Need user input to fill in details on unknown devices.
+ */
+ /* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */
+ { USB_DEVICE(0x15c2, 0x0034) },
+ /* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */
+ { USB_DEVICE(0x15c2, 0x0035) },
+ /* SoundGraph iMON OEM VFD (IR & VFD) */
+ { USB_DEVICE(0x15c2, 0x0036) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0037) },
+ /* SoundGraph iMON OEM LCD (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0038) },
+ /* SoundGraph iMON UltraBay (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0039) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003a) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003b) },
+ /* SoundGraph iMON OEM Inside (IR only) */
+ { USB_DEVICE(0x15c2, 0x003c) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003d) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003e) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x003f) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0040) },
+ /* SoundGraph iMON MINI (IR only) */
+ { USB_DEVICE(0x15c2, 0x0041) },
+ /* Antec Veris Multimedia Station EZ External (IR only) */
+ { USB_DEVICE(0x15c2, 0x0042) },
+ /* Antec Veris Multimedia Station Basic Internal (IR only) */
+ { USB_DEVICE(0x15c2, 0x0043) },
+ /* Antec Veris Multimedia Station Elite (IR & VFD) */
+ { USB_DEVICE(0x15c2, 0x0044) },
+ /* Antec Veris Multimedia Station Premiere (IR & LCD) */
+ { USB_DEVICE(0x15c2, 0x0045) },
+ /* device specifics unknown */
+ { USB_DEVICE(0x15c2, 0x0046) },
+ {}
+};
+
+/* USB Device data */
+static struct usb_driver imon_driver = {
+ .name = MOD_NAME,
+ .probe = imon_probe,
+ .disconnect = imon_disconnect,
+ .suspend = imon_suspend,
+ .resume = imon_resume,
+ .id_table = imon_usb_id_table,
+};
+
+static struct usb_class_driver imon_vfd_class = {
+ .name = DEVICE_NAME,
+ .fops = &vfd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+static struct usb_class_driver imon_lcd_class = {
+ .name = DEVICE_NAME,
+ .fops = &lcd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+/* imon receiver front panel/knob key table */
+static const struct {
+ u64 hw_code;
+ u32 keycode;
+} imon_panel_key_table[] = {
+ { 0x000000000f00ffeell, KEY_PROG1 }, /* Go */
+ { 0x000000001f00ffeell, KEY_AUDIO },
+ { 0x000000002000ffeell, KEY_VIDEO },
+ { 0x000000002100ffeell, KEY_CAMERA },
+ { 0x000000002700ffeell, KEY_DVD },
+ { 0x000000002300ffeell, KEY_TV },
+ { 0x000000000500ffeell, KEY_PREVIOUS },
+ { 0x000000000700ffeell, KEY_REWIND },
+ { 0x000000000400ffeell, KEY_STOP },
+ { 0x000000003c00ffeell, KEY_PLAYPAUSE },
+ { 0x000000000800ffeell, KEY_FASTFORWARD },
+ { 0x000000000600ffeell, KEY_NEXT },
+ { 0x000000010000ffeell, KEY_RIGHT },
+ { 0x000001000000ffeell, KEY_LEFT },
+ { 0x000000003d00ffeell, KEY_SELECT },
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000000100ffeell, KEY_MUTE },
+ /* iMON Knob values */
+ { 0x000100ffffffffeell, KEY_VOLUMEUP },
+ { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
+ { 0x000008ffffffffeell, KEY_MUTE },
+};
+
+/* to prevent races between open() and disconnect(), probing, etc */
+static DEFINE_MUTEX(driver_lock);
+
+/* Module bookkeeping bits */
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_VERSION(MOD_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(usb, imon_usb_id_table);
+
+static bool debug;
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes(default: no)");
+
+/* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
+static int display_type;
+module_param(display_type, int, S_IRUGO);
+MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, "
+ "1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
+
+static int pad_stabilize = 1;
+module_param(pad_stabilize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
+ "presses in arrow key mode. 0=disable, 1=enable (default).");
+
+/*
+ * In certain use cases, mouse mode isn't really helpful, and could actually
+ * cause confusion, so allow disabling it when the IR device is open.
+ */
+static bool nomouse;
+module_param(nomouse, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is "
+ "open. 0=don't disable, 1=disable. (default: don't disable)");
+
+/* threshold at which a pad push registers as an arrow key in kbd mode */
+static int pad_thresh;
+module_param(pad_thresh, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an "
+ "arrow key in kbd mode (default: 28)");
+
+
+static void free_imon_context(struct imon_context *ictx)
+{
+ struct device *dev = ictx->dev;
+
+ usb_free_urb(ictx->tx_urb);
+ usb_free_urb(ictx->rx_urb_intf0);
+ usb_free_urb(ictx->rx_urb_intf1);
+ kfree(ictx);
+
+ dev_dbg(dev, "%s: iMON context freed\n", __func__);
+}
+
+/**
+ * Called when the Display device (e.g. /dev/lcd0)
+ * is opened by the application.
+ */
+static int display_open(struct inode *inode, struct file *file)
+{
+ struct usb_interface *interface;
+ struct imon_context *ictx = NULL;
+ int subminor;
+ int retval = 0;
+
+ /* prevent races with disconnect */
+ mutex_lock(&driver_lock);
+
+ subminor = iminor(inode);
+ interface = usb_find_interface(&imon_driver, subminor);
+ if (!interface) {
+ err("%s: could not find interface for minor %d",
+ __func__, subminor);
+ retval = -ENODEV;
+ goto exit;
+ }
+ ictx = usb_get_intfdata(interface);
+
+ if (!ictx) {
+ err("%s: no context found for minor %d", __func__, subminor);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: display not supported by device", __func__);
+ retval = -ENODEV;
+ } else if (ictx->display_isopen) {
+ err("%s: display port is already open", __func__);
+ retval = -EBUSY;
+ } else {
+ ictx->display_isopen = 1;
+ file->private_data = ictx;
+ dev_dbg(ictx->dev, "display port opened\n");
+ }
+
+ mutex_unlock(&ictx->lock);
+
+exit:
+ mutex_unlock(&driver_lock);
+ return retval;
+}
+
+/**
+ * Called when the display device (e.g. /dev/lcd0)
+ * is closed by the application.
+ */
+static int display_close(struct inode *inode, struct file *file)
+{
+ struct imon_context *ictx = NULL;
+ int retval = 0;
+
+ ictx = (struct imon_context *)file->private_data;
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: display not supported by device", __func__);
+ retval = -ENODEV;
+ } else if (!ictx->display_isopen) {
+ err("%s: display is not open", __func__);
+ retval = -EIO;
+ } else {
+ ictx->display_isopen = 0;
+ dev_dbg(ictx->dev, "display port closed\n");
+ if (!ictx->dev_present_intf0) {
+ /*
+ * Device disconnected before close and IR port is not
+ * open. If IR port is open, context will be deleted by
+ * ir_close.
+ */
+ mutex_unlock(&ictx->lock);
+ free_imon_context(ictx);
+ return retval;
+ }
+ }
+
+ mutex_unlock(&ictx->lock);
+ return retval;
+}
+
+/**
+ * Sends a packet to the device -- this function must be called
+ * with ictx->lock held.
+ */
+static int send_packet(struct imon_context *ictx)
+{
+ unsigned int pipe;
+ unsigned long timeout;
+ int interval = 0;
+ int retval = 0;
+ struct usb_ctrlrequest *control_req = NULL;
+
+ /* Check if we need to use control or interrupt urb */
+ if (!ictx->tx_control) {
+ pipe = usb_sndintpipe(ictx->usbdev_intf0,
+ ictx->tx_endpoint->bEndpointAddress);
+ interval = ictx->tx_endpoint->bInterval;
+
+ usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe,
+ ictx->usb_tx_buf,
+ sizeof(ictx->usb_tx_buf),
+ usb_tx_callback, ictx, interval);
+
+ ictx->tx_urb->actual_length = 0;
+ } else {
+ /* fill request into kmalloc'ed space: */
+ control_req = kmalloc(sizeof(struct usb_ctrlrequest),
+ GFP_KERNEL);
+ if (control_req == NULL)
+ return -ENOMEM;
+
+ /* setup packet is '21 09 0200 0001 0008' */
+ control_req->bRequestType = 0x21;
+ control_req->bRequest = 0x09;
+ control_req->wValue = cpu_to_le16(0x0200);
+ control_req->wIndex = cpu_to_le16(0x0001);
+ control_req->wLength = cpu_to_le16(0x0008);
+
+ /* control pipe is endpoint 0x00 */
+ pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0);
+
+ /* build the control urb */
+ usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0,
+ pipe, (unsigned char *)control_req,
+ ictx->usb_tx_buf,
+ sizeof(ictx->usb_tx_buf),
+ usb_tx_callback, ictx);
+ ictx->tx_urb->actual_length = 0;
+ }
+
+ init_completion(&ictx->tx.finished);
+ ictx->tx.busy = 1;
+ smp_rmb(); /* ensure later readers know we're busy */
+
+ retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL);
+ if (retval) {
+ ictx->tx.busy = 0;
+ smp_rmb(); /* ensure later readers know we're not busy */
+ err("%s: error submitting urb(%d)", __func__, retval);
+ } else {
+ /* Wait for transmission to complete (or abort) */
+ mutex_unlock(&ictx->lock);
+ retval = wait_for_completion_interruptible(
+ &ictx->tx.finished);
+ if (retval)
+ err("%s: task interrupted", __func__);
+ mutex_lock(&ictx->lock);
+
+ retval = ictx->tx.status;
+ if (retval)
+ err("%s: packet tx failed (%d)", __func__, retval);
+ }
+
+ kfree(control_req);
+
+ /*
+ * Induce a mandatory 5ms delay before returning, as otherwise,
+ * send_packet can get called so rapidly as to overwhelm the device,
+ * particularly on faster systems and/or those with quirky usb.
+ */
+ timeout = msecs_to_jiffies(5);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(timeout);
+
+ return retval;
+}
+
+/**
+ * Sends an associate packet to the iMON 2.4G.
+ *
+ * This might not be such a good idea, since it has an id collision with
+ * some versions of the "IR & VFD" combo. The only way to determine if it
+ * is an RF version is to look at the product description string. (Which
+ * we currently do not fetch).
+ */
+static int send_associate_24g(struct imon_context *ictx)
+{
+ int retval;
+ const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20 };
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ if (!ictx->dev_present_intf0) {
+ err("%s: no iMON device present", __func__);
+ return -ENODEV;
+ }
+
+ memcpy(ictx->usb_tx_buf, packet, sizeof(packet));
+ retval = send_packet(ictx);
+
+ return retval;
+}
+
+/**
+ * Sends packets to setup and show clock on iMON display
+ *
+ * Arguments: year - last 2 digits of year, month - 1..12,
+ * day - 1..31, dow - day of the week (0-Sun...6-Sat),
+ * hour - 0..23, minute - 0..59, second - 0..59
+ */
+static int send_set_imon_clock(struct imon_context *ictx,
+ unsigned int year, unsigned int month,
+ unsigned int day, unsigned int dow,
+ unsigned int hour, unsigned int minute,
+ unsigned int second)
+{
+ unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8];
+ int retval = 0;
+ int i;
+
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ switch (ictx->display_type) {
+ case IMON_DISPLAY_TYPE_LCD:
+ clock_enable_pkt[0][0] = 0x80;
+ clock_enable_pkt[0][1] = year;
+ clock_enable_pkt[0][2] = month-1;
+ clock_enable_pkt[0][3] = day;
+ clock_enable_pkt[0][4] = hour;
+ clock_enable_pkt[0][5] = minute;
+ clock_enable_pkt[0][6] = second;
+
+ clock_enable_pkt[1][0] = 0x80;
+ clock_enable_pkt[1][1] = 0;
+ clock_enable_pkt[1][2] = 0;
+ clock_enable_pkt[1][3] = 0;
+ clock_enable_pkt[1][4] = 0;
+ clock_enable_pkt[1][5] = 0;
+ clock_enable_pkt[1][6] = 0;
+
+ if (ictx->product == 0xffdc) {
+ clock_enable_pkt[0][7] = 0x50;
+ clock_enable_pkt[1][7] = 0x51;
+ } else {
+ clock_enable_pkt[0][7] = 0x88;
+ clock_enable_pkt[1][7] = 0x8a;
+ }
+
+ break;
+
+ case IMON_DISPLAY_TYPE_VFD:
+ clock_enable_pkt[0][0] = year;
+ clock_enable_pkt[0][1] = month-1;
+ clock_enable_pkt[0][2] = day;
+ clock_enable_pkt[0][3] = dow;
+ clock_enable_pkt[0][4] = hour;
+ clock_enable_pkt[0][5] = minute;
+ clock_enable_pkt[0][6] = second;
+ clock_enable_pkt[0][7] = 0x40;
+
+ clock_enable_pkt[1][0] = 0;
+ clock_enable_pkt[1][1] = 0;
+ clock_enable_pkt[1][2] = 1;
+ clock_enable_pkt[1][3] = 0;
+ clock_enable_pkt[1][4] = 0;
+ clock_enable_pkt[1][5] = 0;
+ clock_enable_pkt[1][6] = 0;
+ clock_enable_pkt[1][7] = 0x42;
+
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) {
+ memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8);
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send_packet failed for packet %d",
+ __func__, i);
+ break;
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * These are the sysfs functions to handle the association on the iMON 2.4G LT.
+ */
+static ssize_t show_associate_remote(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+ if (ictx->rf_isassociating)
+ strcpy(buf, "associating\n");
+ else
+ strcpy(buf, "closed\n");
+
+ dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for "
+ "instructions on how to associate your iMON 2.4G DT/LT "
+ "remote\n");
+ mutex_unlock(&ictx->lock);
+ return strlen(buf);
+}
+
+static ssize_t store_associate_remote(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct imon_context *ictx;
+
+ ictx = dev_get_drvdata(d);
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+ ictx->rf_isassociating = 1;
+ send_associate_24g(ictx);
+ mutex_unlock(&ictx->lock);
+
+ return count;
+}
+
+/**
+ * sysfs functions to control internal imon clock
+ */
+static ssize_t show_imon_clock(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+ size_t len;
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ len = snprintf(buf, PAGE_SIZE, "Not supported.");
+ } else {
+ len = snprintf(buf, PAGE_SIZE,
+ "To set the clock on your iMON display:\n"
+ "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n"
+ "%s", ictx->display_isopen ?
+ "\nNOTE: imon device must be closed\n" : "");
+ }
+
+ mutex_unlock(&ictx->lock);
+
+ return len;
+}
+
+static ssize_t store_imon_clock(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct imon_context *ictx = dev_get_drvdata(d);
+ ssize_t retval;
+ unsigned int year, month, day, dow, hour, minute, second;
+
+ if (!ictx)
+ return -ENODEV;
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ retval = -ENODEV;
+ goto exit;
+ } else if (ictx->display_isopen) {
+ retval = -EBUSY;
+ goto exit;
+ }
+
+ if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow,
+ &hour, &minute, &second) != 7) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if ((month < 1 || month > 12) ||
+ (day < 1 || day > 31) || (dow > 6) ||
+ (hour > 23) || (minute > 59) || (second > 59)) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = send_set_imon_clock(ictx, year, month, day, dow,
+ hour, minute, second);
+ if (retval)
+ goto exit;
+
+ retval = count;
+exit:
+ mutex_unlock(&ictx->lock);
+
+ return retval;
+}
+
+
+static DEVICE_ATTR(imon_clock, S_IWUSR | S_IRUGO, show_imon_clock,
+ store_imon_clock);
+
+static DEVICE_ATTR(associate_remote, S_IWUSR | S_IRUGO, show_associate_remote,
+ store_associate_remote);
+
+static struct attribute *imon_display_sysfs_entries[] = {
+ &dev_attr_imon_clock.attr,
+ NULL
+};
+
+static struct attribute_group imon_display_attribute_group = {
+ .attrs = imon_display_sysfs_entries
+};
+
+static struct attribute *imon_rf_sysfs_entries[] = {
+ &dev_attr_associate_remote.attr,
+ NULL
+};
+
+static struct attribute_group imon_rf_attribute_group = {
+ .attrs = imon_rf_sysfs_entries
+};
+
+/**
+ * Writes data to the VFD. The iMON VFD is 2x16 characters
+ * and requires data in 5 consecutive USB interrupt packets,
+ * each packet but the last carrying 7 bytes.
+ *
+ * I don't know if the VFD board supports features such as
+ * scrolling, clearing rows, blanking, etc. so at
+ * the caller must provide a full screen of data. If fewer
+ * than 32 bytes are provided spaces will be appended to
+ * generate a full screen.
+ */
+static ssize_t vfd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos)
+{
+ int i;
+ int offset;
+ int seq;
+ int retval = 0;
+ struct imon_context *ictx;
+ const unsigned char vfd_packet6[] = {
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
+
+ ictx = (struct imon_context *)file->private_data;
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->dev_present_intf0) {
+ err("%s: no iMON device present", __func__);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (n_bytes <= 0 || n_bytes > 32) {
+ err("%s: invalid payload size", __func__);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ /* Pad with spaces */
+ for (i = n_bytes; i < 32; ++i)
+ ictx->tx.data_buf[i] = ' ';
+
+ for (i = 32; i < 35; ++i)
+ ictx->tx.data_buf[i] = 0xFF;
+
+ offset = 0;
+ seq = 0;
+
+ do {
+ memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7);
+ ictx->usb_tx_buf[7] = (unsigned char) seq;
+
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send packet failed for packet #%d",
+ __func__, seq/2);
+ goto exit;
+ } else {
+ seq += 2;
+ offset += 7;
+ }
+
+ } while (offset < 35);
+
+ /* Send packet #6 */
+ memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6));
+ ictx->usb_tx_buf[7] = (unsigned char) seq;
+ retval = send_packet(ictx);
+ if (retval)
+ err("%s: send packet failed for packet #%d",
+ __func__, seq / 2);
+
+exit:
+ mutex_unlock(&ictx->lock);
+
+ return (!retval) ? n_bytes : retval;
+}
+
+/**
+ * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte
+ * packets. We accept data as 16 hexadecimal digits, followed by a
+ * newline (to make it easy to drive the device from a command-line
+ * -- even though the actual binary data is a bit complicated).
+ *
+ * The device itself is not a "traditional" text-mode display. It's
+ * actually a 16x96 pixel bitmap display. That means if you want to
+ * display text, you've got to have your own "font" and translate the
+ * text into bitmaps for display. This is really flexible (you can
+ * display whatever diacritics you need, and so on), but it's also
+ * a lot more complicated than most LCDs...
+ */
+static ssize_t lcd_write(struct file *file, const char *buf,
+ size_t n_bytes, loff_t *pos)
+{
+ int retval = 0;
+ struct imon_context *ictx;
+
+ ictx = (struct imon_context *)file->private_data;
+ if (!ictx) {
+ err("%s: no context for device", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (!ictx->display_supported) {
+ err("%s: no iMON display present", __func__);
+ retval = -ENODEV;
+ goto exit;
+ }
+
+ if (n_bytes != 8) {
+ err("%s: invalid payload size: %d (expecting 8)",
+ __func__, (int) n_bytes);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (copy_from_user(ictx->usb_tx_buf, buf, 8)) {
+ retval = -EFAULT;
+ goto exit;
+ }
+
+ retval = send_packet(ictx);
+ if (retval) {
+ err("%s: send packet failed!", __func__);
+ goto exit;
+ } else {
+ dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n",
+ __func__, (int) n_bytes);
+ }
+exit:
+ mutex_unlock(&ictx->lock);
+ return (!retval) ? n_bytes : retval;
+}
+
+/**
+ * Callback function for USB core API: transmit data
+ */
+static void usb_tx_callback(struct urb *urb)
+{
+ struct imon_context *ictx;
+
+ if (!urb)
+ return;
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ ictx->tx.status = urb->status;
+
+ /* notify waiters that write has finished */
+ ictx->tx.busy = 0;
+ smp_rmb(); /* ensure later readers know we're not busy */
+ complete(&ictx->tx.finished);
+}
+
+/**
+ * mce/rc6 keypresses have no distinct release code, use timer
+ */
+static void imon_mce_timeout(unsigned long data)
+{
+ struct imon_context *ictx = (struct imon_context *)data;
+
+ input_report_key(ictx->idev, ictx->last_keycode, 0);
+ input_sync(ictx->idev);
+}
+
+/**
+ * report touchscreen input
+ */
+static void imon_touch_display_timeout(unsigned long data)
+{
+ struct imon_context *ictx = (struct imon_context *)data;
+
+ if (ictx->display_type != IMON_DISPLAY_TYPE_VGA)
+ return;
+
+ input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
+ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
+ input_report_key(ictx->touch, BTN_TOUCH, 0x00);
+ input_sync(ictx->touch);
+}
+
+/**
+ * iMON IR receivers support two different signal sets -- those used by
+ * the iMON remotes, and those used by the Windows MCE remotes (which is
+ * really just RC-6), but only one or the other at a time, as the signals
+ * are decoded onboard the receiver.
+ */
+int imon_ir_change_protocol(void *priv, u64 ir_type)
+{
+ int retval;
+ struct imon_context *ictx = priv;
+ struct device *dev = ictx->dev;
+ bool pad_mouse;
+ unsigned char ir_proto_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
+
+ if (ir_type && !(ir_type & ictx->props->allowed_protos))
+ dev_warn(dev, "Looks like you're trying to use an IR protocol "
+ "this device does not support\n");
+
+ switch (ir_type) {
+ case IR_TYPE_RC6:
+ dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
+ ir_proto_packet[0] = 0x01;
+ pad_mouse = false;
+ init_timer(&ictx->itimer);
+ ictx->itimer.data = (unsigned long)ictx;
+ ictx->itimer.function = imon_mce_timeout;
+ break;
+ case IR_TYPE_UNKNOWN:
+ case IR_TYPE_OTHER:
+ dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
+ if (pad_stabilize)
+ pad_mouse = true;
+ else {
+ dev_dbg(dev, "PAD stabilize functionality disabled\n");
+ pad_mouse = false;
+ }
+ /* ir_proto_packet[0] = 0x00; // already the default */
+ ir_type = IR_TYPE_OTHER;
+ break;
+ default:
+ dev_warn(dev, "Unsupported IR protocol specified, overriding "
+ "to iMON IR protocol\n");
+ if (pad_stabilize)
+ pad_mouse = true;
+ else {
+ dev_dbg(dev, "PAD stabilize functionality disabled\n");
+ pad_mouse = false;
+ }
+ /* ir_proto_packet[0] = 0x00; // already the default */
+ ir_type = IR_TYPE_OTHER;
+ break;
+ }
+
+ memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
+
+ retval = send_packet(ictx);
+ if (retval)
+ goto out;
+
+ ictx->ir_type = ir_type;
+ ictx->pad_mouse = pad_mouse;
+
+out:
+ return retval;
+}
+
+static inline int tv2int(const struct timeval *a, const struct timeval *b)
+{
+ int usecs = 0;
+ int sec = 0;
+
+ if (b->tv_usec > a->tv_usec) {
+ usecs = 1000000;
+ sec--;
+ }
+
+ usecs += a->tv_usec - b->tv_usec;
+
+ sec += a->tv_sec - b->tv_sec;
+ sec *= 1000;
+ usecs /= 1000;
+ sec += usecs;
+
+ if (sec < 0)
+ sec = 1000;
+
+ return sec;
+}
+
+/**
+ * The directional pad behaves a bit differently, depending on whether this is
+ * one of the older ffdc devices or a newer device. Newer devices appear to
+ * have a higher resolution matrix for more precise mouse movement, but it
+ * makes things overly sensitive in keyboard mode, so we do some interesting
+ * contortions to make it less touchy. Older devices run through the same
+ * routine with shorter timeout and a smaller threshold.
+ */
+static int stabilize(int a, int b, u16 timeout, u16 threshold)
+{
+ struct timeval ct;
+ static struct timeval prev_time = {0, 0};
+ static struct timeval hit_time = {0, 0};
+ static int x, y, prev_result, hits;
+ int result = 0;
+ int msec, msec_hit;
+
+ do_gettimeofday(&ct);
+ msec = tv2int(&ct, &prev_time);
+ msec_hit = tv2int(&ct, &hit_time);
+
+ if (msec > 100) {
+ x = 0;
+ y = 0;
+ hits = 0;
+ }
+
+ x += a;
+ y += b;
+
+ prev_time = ct;
+
+ if (abs(x) > threshold || abs(y) > threshold) {
+ if (abs(y) > abs(x))
+ result = (y > 0) ? 0x7F : 0x80;
+ else
+ result = (x > 0) ? 0x7F00 : 0x8000;
+
+ x = 0;
+ y = 0;
+
+ if (result == prev_result) {
+ hits++;
+
+ if (hits > 3) {
+ switch (result) {
+ case 0x7F:
+ y = 17 * threshold / 30;
+ break;
+ case 0x80:
+ y -= 17 * threshold / 30;
+ break;
+ case 0x7F00:
+ x = 17 * threshold / 30;
+ break;
+ case 0x8000:
+ x -= 17 * threshold / 30;
+ break;
+ }
+ }
+
+ if (hits == 2 && msec_hit < timeout) {
+ result = 0;
+ hits = 1;
+ }
+ } else {
+ prev_result = result;
+ hits = 1;
+ hit_time = ct;
+ }
+ }
+
+ return result;
+}
+
+static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 hw_code)
+{
+ u32 scancode = be32_to_cpu(hw_code);
+ u32 keycode;
+ u32 release;
+ bool is_release_code = false;
+
+ /* Look for the initial press of a button */
+ keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+
+ /* Look for the release of a button */
+ if (keycode == KEY_RESERVED) {
+ release = scancode & ~0x4000;
+ keycode = ir_g_keycode_from_table(ictx->idev, release);
+ if (keycode != KEY_RESERVED)
+ is_release_code = true;
+ }
+
+ ictx->release_code = is_release_code;
+
+ return keycode;
+}
+
+static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 hw_code)
+{
+ u32 scancode = be32_to_cpu(hw_code);
+ u32 keycode;
+
+#define MCE_KEY_MASK 0x7000
+#define MCE_TOGGLE_BIT 0x8000
+
+ /*
+ * On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx
+ * (the toggle bit flipping between alternating key presses), while
+ * on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep
+ * the table trim, we always or in the bits to look up 0x8000ff4xx,
+ * but we can't or them into all codes, as some keys are decoded in
+ * a different way w/o the same use of the toggle bit...
+ */
+ if ((scancode >> 24) & 0x80)
+ scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT;
+
+ keycode = ir_g_keycode_from_table(ictx->idev, scancode);
+
+ return keycode;
+}
+
+static u32 imon_panel_key_lookup(u64 hw_code)
+{
+ int i;
+ u64 code = be64_to_cpu(hw_code);
+ u32 keycode = KEY_RESERVED;
+
+ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
+ if (imon_panel_key_table[i].hw_code == (code | 0xffee)) {
+ keycode = imon_panel_key_table[i].keycode;
+ break;
+ }
+ }
+
+ return keycode;
+}
+
+static bool imon_mouse_event(struct imon_context *ictx,
+ unsigned char *buf, int len)
+{
+ char rel_x = 0x00, rel_y = 0x00;
+ u8 right_shift = 1;
+ bool mouse_input = 1;
+ int dir = 0;
+
+ /* newer iMON device PAD or mouse button */
+ if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) {
+ rel_x = buf[2];
+ rel_y = buf[3];
+ right_shift = 1;
+ /* 0xffdc iMON PAD or mouse button input */
+ } else if (ictx->product == 0xffdc && (buf[0] & 0x40) &&
+ !((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) {
+ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
+ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
+ if (buf[0] & 0x02)
+ rel_x |= ~0x0f;
+ rel_x = rel_x + rel_x / 2;
+ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
+ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
+ if (buf[0] & 0x01)
+ rel_y |= ~0x0f;
+ rel_y = rel_y + rel_y / 2;
+ right_shift = 2;
+ /* some ffdc devices decode mouse buttons differently... */
+ } else if (ictx->product == 0xffdc && (buf[0] == 0x68)) {
+ right_shift = 2;
+ /* ch+/- buttons, which we use for an emulated scroll wheel */
+ } else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) {
+ dir = 1;
+ } else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) {
+ dir = -1;
+ } else
+ mouse_input = 0;
+
+ if (mouse_input) {
+ dev_dbg(ictx->dev, "sending mouse data via input subsystem\n");
+
+ if (dir) {
+ input_report_rel(ictx->idev, REL_WHEEL, dir);
+ } else if (rel_x || rel_y) {
+ input_report_rel(ictx->idev, REL_X, rel_x);
+ input_report_rel(ictx->idev, REL_Y, rel_y);
+ } else {
+ input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1);
+ input_report_key(ictx->idev, BTN_RIGHT,
+ buf[1] >> right_shift & 0x1);
+ }
+ input_sync(ictx->idev);
+ ictx->last_keycode = ictx->kc;
+ }
+
+ return mouse_input;
+}
+
+static void imon_touch_event(struct imon_context *ictx, unsigned char *buf)
+{
+ mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT);
+ ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4);
+ ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf));
+ input_report_abs(ictx->touch, ABS_X, ictx->touch_x);
+ input_report_abs(ictx->touch, ABS_Y, ictx->touch_y);
+ input_report_key(ictx->touch, BTN_TOUCH, 0x01);
+ input_sync(ictx->touch);
+}
+
+static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
+{
+ int dir = 0;
+ char rel_x = 0x00, rel_y = 0x00;
+ u16 timeout, threshold;
+ u64 temp_key;
+ u32 remote_key;
+
+ /*
+ * The imon directional pad functions more like a touchpad. Bytes 3 & 4
+ * contain a position coordinate (x,y), with each component ranging
+ * from -14 to 14. We want to down-sample this to only 4 discrete values
+ * for up/down/left/right arrow keys. Also, when you get too close to
+ * diagonals, it has a tendancy to jump back and forth, so lets try to
+ * ignore when they get too close.
+ */
+ if (ictx->product != 0xffdc) {
+ /* first, pad to 8 bytes so it conforms with everything else */
+ buf[5] = buf[6] = buf[7] = 0;
+ timeout = 500; /* in msecs */
+ /* (2*threshold) x (2*threshold) square */
+ threshold = pad_thresh ? pad_thresh : 28;
+ rel_x = buf[2];
+ rel_y = buf[3];
+
+ if (ictx->ir_type == IR_TYPE_OTHER && pad_stabilize) {
+ if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) {
+ dir = stabilize((int)rel_x, (int)rel_y,
+ timeout, threshold);
+ if (!dir) {
+ ictx->kc = KEY_UNKNOWN;
+ return;
+ }
+ buf[2] = dir & 0xFF;
+ buf[3] = (dir >> 8) & 0xFF;
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ remote_key = (u32) (le64_to_cpu(temp_key)
+ & 0xffffffff);
+ ictx->kc = imon_remote_key_lookup(ictx,
+ remote_key);
+ }
+ } else {
+ if (abs(rel_y) > abs(rel_x)) {
+ buf[2] = (rel_y > 0) ? 0x7F : 0x80;
+ buf[3] = 0;
+ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ } else {
+ buf[2] = 0;
+ buf[3] = (rel_x > 0) ? 0x7F : 0x80;
+ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ }
+ }
+
+ /*
+ * Handle on-board decoded pad events for e.g. older VFD/iMON-Pad
+ * device (15c2:ffdc). The remote generates various codes from
+ * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
+ * 0x688301b7 and the right one 0x688481b7. All other keys generate
+ * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
+ * reversed endianess. Extract direction from buffer, rotate endianess,
+ * adjust sign and feed the values into stabilize(). The resulting codes
+ * will be 0x01008000, 0x01007F00, which match the newer devices.
+ */
+ } else {
+ timeout = 10; /* in msecs */
+ /* (2*threshold) x (2*threshold) square */
+ threshold = pad_thresh ? pad_thresh : 15;
+
+ /* buf[1] is x */
+ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 |
+ (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6;
+ if (buf[0] & 0x02)
+ rel_x |= ~0x10+1;
+ /* buf[2] is y */
+ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 |
+ (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6;
+ if (buf[0] & 0x01)
+ rel_y |= ~0x10+1;
+
+ buf[0] = 0x01;
+ buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0;
+
+ if (ictx->ir_type == IR_TYPE_OTHER && pad_stabilize) {
+ dir = stabilize((int)rel_x, (int)rel_y,
+ timeout, threshold);
+ if (!dir) {
+ ictx->kc = KEY_UNKNOWN;
+ return;
+ }
+ buf[2] = dir & 0xFF;
+ buf[3] = (dir >> 8) & 0xFF;
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ ictx->kc = imon_remote_key_lookup(ictx, remote_key);
+ } else {
+ if (abs(rel_y) > abs(rel_x)) {
+ buf[2] = (rel_y > 0) ? 0x7F : 0x80;
+ buf[3] = 0;
+ ictx->kc = (rel_y > 0) ? KEY_DOWN : KEY_UP;
+ } else {
+ buf[2] = 0;
+ buf[3] = (rel_x > 0) ? 0x7F : 0x80;
+ ictx->kc = (rel_x > 0) ? KEY_RIGHT : KEY_LEFT;
+ }
+ }
+ }
+}
+
+static int imon_parse_press_type(struct imon_context *ictx,
+ unsigned char *buf, u8 ktype)
+{
+ int press_type = 0;
+ int rep_delay = ictx->idev->rep[REP_DELAY];
+ int rep_period = ictx->idev->rep[REP_PERIOD];
+
+ /* key release of 0x02XXXXXX key */
+ if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00)
+ ictx->kc = ictx->last_keycode;
+
+ /* mouse button release on (some) 0xffdc devices */
+ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x68 && buf[1] == 0x82 &&
+ buf[2] == 0x81 && buf[3] == 0xb7)
+ ictx->kc = ictx->last_keycode;
+
+ /* mouse button release on (some other) 0xffdc devices */
+ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x01 && buf[1] == 0x00 &&
+ buf[2] == 0x81 && buf[3] == 0xb7)
+ ictx->kc = ictx->last_keycode;
+
+ /* mce-specific button handling */
+ else if (ktype == IMON_KEY_MCE) {
+ /* initial press */
+ if (ictx->kc != ictx->last_keycode
+ || buf[2] != ictx->mce_toggle_bit) {
+ ictx->last_keycode = ictx->kc;
+ ictx->mce_toggle_bit = buf[2];
+ press_type = 1;
+ mod_timer(&ictx->itimer,
+ jiffies + msecs_to_jiffies(rep_delay));
+ /* repeat */
+ } else {
+ press_type = 2;
+ mod_timer(&ictx->itimer,
+ jiffies + msecs_to_jiffies(rep_period));
+ }
+
+ /* incoherent or irrelevant data */
+ } else if (ictx->kc == KEY_RESERVED)
+ press_type = -EINVAL;
+
+ /* key release of 0xXXXXXXb7 key */
+ else if (ictx->release_code)
+ press_type = 0;
+
+ /* this is a button press */
+ else
+ press_type = 1;
+
+ return press_type;
+}
+
+/**
+ * Process the incoming packet
+ */
+static void imon_incoming_packet(struct imon_context *ictx,
+ struct urb *urb, int intf)
+{
+ int len = urb->actual_length;
+ unsigned char *buf = urb->transfer_buffer;
+ struct device *dev = ictx->dev;
+ u32 kc;
+ bool norelease = 0;
+ int i;
+ u64 temp_key;
+ u64 panel_key = 0;
+ u32 remote_key = 0;
+ struct input_dev *idev = NULL;
+ int press_type = 0;
+ int msec;
+ struct timeval t;
+ static struct timeval prev_time = { 0, 0 };
+ u8 ktype = IMON_KEY_IMON;
+
+ idev = ictx->idev;
+
+ /* filter out junk data on the older 0xffdc imon devices */
+ if ((buf[0] == 0xff) && (buf[7] == 0xff))
+ return;
+
+ /* Figure out what key was pressed */
+ memcpy(&temp_key, buf, sizeof(temp_key));
+ if (len == 8 && buf[7] == 0xee) {
+ ktype = IMON_KEY_PANEL;
+ panel_key = le64_to_cpu(temp_key);
+ kc = imon_panel_key_lookup(panel_key);
+ } else {
+ remote_key = (u32) (le64_to_cpu(temp_key) & 0xffffffff);
+ if (ictx->ir_type == IR_TYPE_RC6) {
+ if (buf[0] == 0x80)
+ ktype = IMON_KEY_MCE;
+ kc = imon_mce_key_lookup(ictx, remote_key);
+ } else
+ kc = imon_remote_key_lookup(ictx, remote_key);
+ }
+
+ /* keyboard/mouse mode toggle button */
+ if (kc == KEY_KEYBOARD && !ictx->release_code) {
+ ictx->last_keycode = kc;
+ if (!nomouse) {
+ ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+ dev_dbg(dev, "toggling to %s mode\n",
+ ictx->pad_mouse ? "mouse" : "keyboard");
+ return;
+ } else {
+ ictx->pad_mouse = 0;
+ dev_dbg(dev, "mouse mode disabled, passing key value\n");
+ }
+ }
+
+ ictx->kc = kc;
+
+ /* send touchscreen events through input subsystem if touchpad data */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
+ buf[7] == 0x86) {
+ imon_touch_event(ictx, buf);
+
+ /* look for mouse events with pad in mouse mode */
+ } else if (ictx->pad_mouse) {
+ if (imon_mouse_event(ictx, buf, len))
+ return;
+ }
+
+ /* Now for some special handling to convert pad input to arrow keys */
+ if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) ||
+ ((len == 8) && (buf[0] & 0x40) &&
+ !(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) {
+ len = 8;
+ imon_pad_to_keys(ictx, buf);
+ norelease = 1;
+ }
+
+ if (debug) {
+ printk(KERN_INFO "intf%d decoded packet: ", intf);
+ for (i = 0; i < len; ++i)
+ printk("%02x ", buf[i]);
+ printk("\n");
+ }
+
+ press_type = imon_parse_press_type(ictx, buf, ktype);
+ if (press_type < 0)
+ goto not_input_data;
+
+ if (ictx->kc == KEY_UNKNOWN)
+ goto unknown_key;
+
+ /* KEY_MUTE repeats from MCE and knob need to be suppressed */
+ if ((ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode)
+ && (buf[7] == 0xee || ktype == IMON_KEY_MCE)) {
+ do_gettimeofday(&t);
+ msec = tv2int(&t, &prev_time);
+ prev_time = t;
+ if (msec < idev->rep[REP_DELAY])
+ return;
+ }
+
+ input_report_key(idev, ictx->kc, press_type);
+ input_sync(idev);
+
+ /* panel keys and some remote keys don't generate a release */
+ if (panel_key || norelease) {
+ input_report_key(idev, ictx->kc, 0);
+ input_sync(idev);
+ }
+
+ ictx->last_keycode = ictx->kc;
+
+ return;
+
+unknown_key:
+ dev_info(dev, "%s: unknown keypress, code 0x%llx\n", __func__,
+ (panel_key ? be64_to_cpu(panel_key) :
+ be32_to_cpu(remote_key)));
+ return;
+
+not_input_data:
+ if (len != 8) {
+ dev_warn(dev, "imon %s: invalid incoming packet "
+ "size (len = %d, intf%d)\n", __func__, len, intf);
+ return;
+ }
+
+ /* iMON 2.4G associate frame */
+ if (buf[0] == 0x00 &&
+ buf[2] == 0xFF && /* REFID */
+ buf[3] == 0xFF &&
+ buf[4] == 0xFF &&
+ buf[5] == 0xFF && /* iMON 2.4G */
+ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */
+ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */
+ dev_warn(dev, "%s: remote associated refid=%02X\n",
+ __func__, buf[1]);
+ ictx->rf_isassociating = 0;
+ }
+}
+
+/**
+ * Callback function for USB core API: receive data
+ */
+static void usb_rx_callback_intf0(struct urb *urb)
+{
+ struct imon_context *ictx;
+ int intfnum = 0;
+
+ if (!urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+
+ case -ESHUTDOWN: /* transport endpoint was shut down */
+ break;
+
+ case 0:
+ imon_incoming_packet(ictx, urb, intfnum);
+ break;
+
+ default:
+ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
+ __func__, urb->status);
+ break;
+ }
+
+ usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
+}
+
+static void usb_rx_callback_intf1(struct urb *urb)
+{
+ struct imon_context *ictx;
+ int intfnum = 1;
+
+ if (!urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+ if (!ictx)
+ return;
+
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+
+ case -ESHUTDOWN: /* transport endpoint was shut down */
+ break;
+
+ case 0:
+ imon_incoming_packet(ictx, urb, intfnum);
+ break;
+
+ default:
+ dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
+ __func__, urb->status);
+ break;
+ }
+
+ usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
+}
+
+static struct input_dev *imon_init_idev(struct imon_context *ictx)
+{
+ struct input_dev *idev;
+ struct ir_dev_props *props;
+ struct ir_input_dev *ir;
+ int ret, i;
+
+ idev = input_allocate_device();
+ if (!idev) {
+ dev_err(ictx->dev, "remote input dev allocation failed\n");
+ goto idev_alloc_failed;
+ }
+
+ props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
+ if (!props) {
+ dev_err(ictx->dev, "remote ir dev props allocation failed\n");
+ goto props_alloc_failed;
+ }
+
+ ir = kzalloc(sizeof(struct ir_input_dev), GFP_KERNEL);
+ if (!ir) {
+ dev_err(ictx->dev, "remote ir input dev allocation failed\n");
+ goto ir_dev_alloc_failed;
+ }
+
+ snprintf(ictx->name_idev, sizeof(ictx->name_idev),
+ "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product);
+ idev->name = ictx->name_idev;
+
+ usb_make_path(ictx->usbdev_intf0, ictx->phys_idev,
+ sizeof(ictx->phys_idev));
+ strlcat(ictx->phys_idev, "/input0", sizeof(ictx->phys_idev));
+ idev->phys = ictx->phys_idev;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL);
+
+ idev->keybit[BIT_WORD(BTN_MOUSE)] =
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
+ idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) |
+ BIT_MASK(REL_WHEEL);
+
+ /* panel and/or knob code support */
+ for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
+ u32 kc = imon_panel_key_table[i].keycode;
+ __set_bit(kc, idev->keybit);
+ }
+
+ props->priv = ictx;
+ props->driver_type = RC_DRIVER_SCANCODE;
+ /* IR_TYPE_OTHER maps to iMON PAD remote, IR_TYPE_RC6 to MCE remote */
+ props->allowed_protos = IR_TYPE_OTHER | IR_TYPE_RC6;
+ props->change_protocol = imon_ir_change_protocol;
+ ictx->props = props;
+
+ ictx->ir = ir;
+ memcpy(&ir->dev, ictx->dev, sizeof(struct device));
+
+ usb_to_input_id(ictx->usbdev_intf0, &idev->id);
+ idev->dev.parent = ictx->dev;
+
+ input_set_drvdata(idev, ir);
+
+ ret = ir_input_register(idev, RC_MAP_IMON_PAD, props, MOD_NAME);
+ if (ret < 0) {
+ dev_err(ictx->dev, "remote input dev register failed\n");
+ goto idev_register_failed;
+ }
+
+ return idev;
+
+idev_register_failed:
+ kfree(ir);
+ir_dev_alloc_failed:
+ kfree(props);
+props_alloc_failed:
+ input_free_device(idev);
+idev_alloc_failed:
+
+ return NULL;
+}
+
+static struct input_dev *imon_init_touch(struct imon_context *ictx)
+{
+ struct input_dev *touch;
+ int ret;
+
+ touch = input_allocate_device();
+ if (!touch) {
+ dev_err(ictx->dev, "touchscreen input dev allocation failed\n");
+ goto touch_alloc_failed;
+ }
+
+ snprintf(ictx->name_touch, sizeof(ictx->name_touch),
+ "iMON USB Touchscreen (%04x:%04x)",
+ ictx->vendor, ictx->product);
+ touch->name = ictx->name_touch;
+
+ usb_make_path(ictx->usbdev_intf1, ictx->phys_touch,
+ sizeof(ictx->phys_touch));
+ strlcat(ictx->phys_touch, "/input1", sizeof(ictx->phys_touch));
+ touch->phys = ictx->phys_touch;
+
+ touch->evbit[0] =
+ BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ touch->keybit[BIT_WORD(BTN_TOUCH)] =
+ BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(touch, ABS_X,
+ 0x00, 0xfff, 0, 0);
+ input_set_abs_params(touch, ABS_Y,
+ 0x00, 0xfff, 0, 0);
+
+ input_set_drvdata(touch, ictx);
+
+ usb_to_input_id(ictx->usbdev_intf1, &touch->id);
+ touch->dev.parent = ictx->dev;
+ ret = input_register_device(touch);
+ if (ret < 0) {
+ dev_info(ictx->dev, "touchscreen input dev register failed\n");
+ goto touch_register_failed;
+ }
+
+ return touch;
+
+touch_register_failed:
+ input_free_device(ictx->touch);
+
+touch_alloc_failed:
+ return NULL;
+}
+
+static bool imon_find_endpoints(struct imon_context *ictx,
+ struct usb_host_interface *iface_desc)
+{
+ struct usb_endpoint_descriptor *ep;
+ struct usb_endpoint_descriptor *rx_endpoint = NULL;
+ struct usb_endpoint_descriptor *tx_endpoint = NULL;
+ int ifnum = iface_desc->desc.bInterfaceNumber;
+ int num_endpts = iface_desc->desc.bNumEndpoints;
+ int i, ep_dir, ep_type;
+ bool ir_ep_found = 0;
+ bool display_ep_found = 0;
+ bool tx_control = 0;
+
+ /*
+ * Scan the endpoint list and set:
+ * first input endpoint = IR endpoint
+ * first output endpoint = display endpoint
+ */
+ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
+ ep = &iface_desc->endpoint[i].desc;
+ ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+ ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ if (!ir_ep_found && ep_dir == USB_DIR_IN &&
+ ep_type == USB_ENDPOINT_XFER_INT) {
+
+ rx_endpoint = ep;
+ ir_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__);
+
+ } else if (!display_ep_found && ep_dir == USB_DIR_OUT &&
+ ep_type == USB_ENDPOINT_XFER_INT) {
+ tx_endpoint = ep;
+ display_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__);
+ }
+ }
+
+ if (ifnum == 0) {
+ ictx->rx_endpoint_intf0 = rx_endpoint;
+ /*
+ * tx is used to send characters to lcd/vfd, associate RF
+ * remotes, set IR protocol, and maybe more...
+ */
+ ictx->tx_endpoint = tx_endpoint;
+ } else {
+ ictx->rx_endpoint_intf1 = rx_endpoint;
+ }
+
+ /*
+ * If we didn't find a display endpoint, this is probably one of the
+ * newer iMON devices that use control urb instead of interrupt
+ */
+ if (!display_ep_found) {
+ tx_control = 1;
+ display_ep_found = 1;
+ dev_dbg(ictx->dev, "%s: device uses control endpoint, not "
+ "interface OUT endpoint\n", __func__);
+ }
+
+ /*
+ * Some iMON receivers have no display. Unfortunately, it seems
+ * that SoundGraph recycles device IDs between devices both with
+ * and without... :\
+ */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) {
+ display_ep_found = 0;
+ dev_dbg(ictx->dev, "%s: device has no display\n", __func__);
+ }
+
+ /*
+ * iMON Touch devices have a VGA touchscreen, but no "display", as
+ * that refers to e.g. /dev/lcd0 (a character device LCD or VFD).
+ */
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ display_ep_found = 0;
+ dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__);
+ }
+
+ /* Input endpoint is mandatory */
+ if (!ir_ep_found)
+ err("%s: no valid input (IR) endpoint found.", __func__);
+
+ ictx->tx_control = tx_control;
+
+ if (display_ep_found)
+ ictx->display_supported = true;
+
+ return ir_ep_found;
+
+}
+
+static struct imon_context *imon_init_intf0(struct usb_interface *intf)
+{
+ struct imon_context *ictx;
+ struct urb *rx_urb;
+ struct urb *tx_urb;
+ struct device *dev = &intf->dev;
+ struct usb_host_interface *iface_desc;
+ int ret = -ENOMEM;
+
+ ictx = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
+ if (!ictx) {
+ dev_err(dev, "%s: kzalloc failed for context", __func__);
+ goto exit;
+ }
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rx_urb) {
+ dev_err(dev, "%s: usb_alloc_urb failed for IR urb", __func__);
+ goto rx_urb_alloc_failed;
+ }
+ tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_urb) {
+ dev_err(dev, "%s: usb_alloc_urb failed for display urb",
+ __func__);
+ goto tx_urb_alloc_failed;
+ }
+
+ mutex_init(&ictx->lock);
+
+ mutex_lock(&ictx->lock);
+
+ ictx->dev = dev;
+ ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf));
+ ictx->dev_present_intf0 = 1;
+ ictx->rx_urb_intf0 = rx_urb;
+ ictx->tx_urb = tx_urb;
+
+ ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
+ ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
+
+ ret = -ENODEV;
+ iface_desc = intf->cur_altsetting;
+ if (!imon_find_endpoints(ictx, iface_desc)) {
+ goto find_endpoint_failed;
+ }
+
+ ictx->idev = imon_init_idev(ictx);
+ if (!ictx->idev) {
+ dev_err(dev, "%s: input device setup failed\n", __func__);
+ goto idev_setup_failed;
+ }
+
+ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
+ usb_rcvintpipe(ictx->usbdev_intf0,
+ ictx->rx_endpoint_intf0->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf0, ictx,
+ ictx->rx_endpoint_intf0->bInterval);
+
+ ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL);
+ if (ret) {
+ err("%s: usb_submit_urb failed for intf0 (%d)",
+ __func__, ret);
+ goto urb_submit_failed;
+ }
+
+ return ictx;
+
+urb_submit_failed:
+ input_unregister_device(ictx->idev);
+ input_free_device(ictx->idev);
+idev_setup_failed:
+find_endpoint_failed:
+ mutex_unlock(&ictx->lock);
+ usb_free_urb(tx_urb);
+tx_urb_alloc_failed:
+ usb_free_urb(rx_urb);
+rx_urb_alloc_failed:
+ kfree(ictx);
+exit:
+ dev_err(dev, "unable to initialize intf0, err %d\n", ret);
+
+ return NULL;
+}
+
+static struct imon_context *imon_init_intf1(struct usb_interface *intf,
+ struct imon_context *ictx)
+{
+ struct urb *rx_urb;
+ struct usb_host_interface *iface_desc;
+ int ret = -ENOMEM;
+
+ rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rx_urb) {
+ err("%s: usb_alloc_urb failed for IR urb", __func__);
+ goto rx_urb_alloc_failed;
+ }
+
+ mutex_lock(&ictx->lock);
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ init_timer(&ictx->ttimer);
+ ictx->ttimer.data = (unsigned long)ictx;
+ ictx->ttimer.function = imon_touch_display_timeout;
+ }
+
+ ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
+ ictx->dev_present_intf1 = 1;
+ ictx->rx_urb_intf1 = rx_urb;
+
+ ret = -ENODEV;
+ iface_desc = intf->cur_altsetting;
+ if (!imon_find_endpoints(ictx, iface_desc))
+ goto find_endpoint_failed;
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
+ ictx->touch = imon_init_touch(ictx);
+ if (!ictx->touch)
+ goto touch_setup_failed;
+ } else
+ ictx->touch = NULL;
+
+ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
+ usb_rcvintpipe(ictx->usbdev_intf1,
+ ictx->rx_endpoint_intf1->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf1, ictx,
+ ictx->rx_endpoint_intf1->bInterval);
+
+ ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL);
+
+ if (ret) {
+ err("%s: usb_submit_urb failed for intf1 (%d)",
+ __func__, ret);
+ goto urb_submit_failed;
+ }
+
+ return ictx;
+
+urb_submit_failed:
+ if (ictx->touch) {
+ input_unregister_device(ictx->touch);
+ input_free_device(ictx->touch);
+ }
+touch_setup_failed:
+find_endpoint_failed:
+ mutex_unlock(&ictx->lock);
+ usb_free_urb(rx_urb);
+rx_urb_alloc_failed:
+ dev_err(ictx->dev, "unable to initialize intf0, err %d\n", ret);
+
+ return NULL;
+}
+
+/*
+ * The 0x15c2:0xffdc device ID was used for umpteen different imon
+ * devices, and all of them constantly spew interrupts, even when there
+ * is no actual data to report. However, byte 6 of this buffer looks like
+ * its unique across device variants, so we're trying to key off that to
+ * figure out which display type (if any) and what IR protocol the device
+ * actually supports. These devices have their IR protocol hard-coded into
+ * their firmware, they can't be changed on the fly like the newer hardware.
+ */
+static void imon_get_ffdc_type(struct imon_context *ictx)
+{
+ u8 ffdc_cfg_byte = ictx->usb_rx_buf[6];
+ u8 detected_display_type = IMON_DISPLAY_TYPE_NONE;
+ u64 allowed_protos = IR_TYPE_OTHER;
+
+ switch (ffdc_cfg_byte) {
+ /* iMON Knob, no display, iMON IR + vol knob */
+ case 0x21:
+ dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR");
+ ictx->display_supported = false;
+ break;
+ /* iMON VFD, no IR (does have vol knob tho) */
+ case 0x35:
+ dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON VFD, iMON IR */
+ case 0x24:
+ case 0x85:
+ dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ /* iMON LCD, MCE IR */
+ case 0x9f:
+ dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = IR_TYPE_RC6;
+ break;
+ default:
+ dev_info(ictx->dev, "Unknown 0xffdc device, "
+ "defaulting to VFD and iMON IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+
+ printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
+
+ ictx->display_type = detected_display_type;
+ ictx->props->allowed_protos = allowed_protos;
+ ictx->ir_type = allowed_protos;
+}
+
+static void imon_set_display_type(struct imon_context *ictx,
+ struct usb_interface *intf)
+{
+ u8 configured_display_type = IMON_DISPLAY_TYPE_VFD;
+
+ /*
+ * Try to auto-detect the type of display if the user hasn't set
+ * it by hand via the display_type modparam. Default is VFD.
+ */
+
+ if (display_type == IMON_DISPLAY_TYPE_AUTO) {
+ switch (ictx->product) {
+ case 0xffdc:
+ /* set in imon_get_ffdc_type() */
+ configured_display_type = ictx->display_type;
+ break;
+ case 0x0034:
+ case 0x0035:
+ configured_display_type = IMON_DISPLAY_TYPE_VGA;
+ break;
+ case 0x0038:
+ case 0x0039:
+ case 0x0045:
+ configured_display_type = IMON_DISPLAY_TYPE_LCD;
+ break;
+ case 0x003c:
+ case 0x0041:
+ case 0x0042:
+ case 0x0043:
+ configured_display_type = IMON_DISPLAY_TYPE_NONE;
+ ictx->display_supported = false;
+ break;
+ case 0x0036:
+ case 0x0044:
+ default:
+ configured_display_type = IMON_DISPLAY_TYPE_VFD;
+ break;
+ }
+ } else {
+ configured_display_type = display_type;
+ if (display_type == IMON_DISPLAY_TYPE_NONE)
+ ictx->display_supported = false;
+ else
+ ictx->display_supported = true;
+ dev_info(ictx->dev, "%s: overriding display type to %d via "
+ "modparam\n", __func__, display_type);
+ }
+
+ ictx->display_type = configured_display_type;
+}
+
+static void imon_init_display(struct imon_context *ictx,
+ struct usb_interface *intf)
+{
+ int ret;
+
+ dev_dbg(ictx->dev, "Registering iMON display with sysfs\n");
+
+ /* set up sysfs entry for built-in clock */
+ ret = sysfs_create_group(&intf->dev.kobj,
+ &imon_display_attribute_group);
+ if (ret)
+ dev_err(ictx->dev, "Could not create display sysfs "
+ "entries(%d)", ret);
+
+ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
+ ret = usb_register_dev(intf, &imon_lcd_class);
+ else
+ ret = usb_register_dev(intf, &imon_vfd_class);
+ if (ret)
+ /* Not a fatal error, so ignore */
+ dev_info(ictx->dev, "could not get a minor number for "
+ "display\n");
+
+}
+
+/**
+ * Callback function for USB core API: Probe
+ */
+static int __devinit imon_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev = NULL;
+ struct usb_host_interface *iface_desc = NULL;
+ struct usb_interface *first_if;
+ struct device *dev = &interface->dev;
+ int ifnum, code_length, sysfs_err;
+ int ret = 0;
+ struct imon_context *ictx = NULL;
+ struct imon_context *first_if_ctx = NULL;
+ u16 vendor, product;
+ const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x88 };
+
+ code_length = BUF_CHUNK_SIZE * 8;
+
+ usbdev = usb_get_dev(interface_to_usbdev(interface));
+ iface_desc = interface->cur_altsetting;
+ ifnum = iface_desc->desc.bInterfaceNumber;
+ vendor = le16_to_cpu(usbdev->descriptor.idVendor);
+ product = le16_to_cpu(usbdev->descriptor.idProduct);
+
+ dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n",
+ __func__, vendor, product, ifnum);
+
+ /* prevent races probing devices w/multiple interfaces */
+ mutex_lock(&driver_lock);
+
+ first_if = usb_ifnum_to_if(usbdev, 0);
+ first_if_ctx = (struct imon_context *)usb_get_intfdata(first_if);
+
+ if (ifnum == 0) {
+ ictx = imon_init_intf0(interface);
+ if (!ictx) {
+ err("%s: failed to initialize context!\n", __func__);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (product == 0xffdc) {
+ /* RF products *also* use 0xffdc... sigh... */
+ sysfs_err = sysfs_create_group(&interface->dev.kobj,
+ &imon_rf_attribute_group);
+ if (sysfs_err)
+ err("%s: Could not create RF sysfs entries(%d)",
+ __func__, sysfs_err);
+ }
+
+ } else {
+ /* this is the secondary interface on the device */
+ ictx = imon_init_intf1(interface, first_if_ctx);
+ if (!ictx) {
+ err("%s: failed to attach to context!\n", __func__);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ }
+
+ usb_set_intfdata(interface, ictx);
+
+ if (ifnum == 0) {
+ /* Enable front-panel buttons and/or knobs */
+ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
+ ret = send_packet(ictx);
+ /* Not fatal, but warn about it */
+ if (ret)
+ dev_info(dev, "failed to enable panel buttons "
+ "and/or knobs\n");
+
+ if (product == 0xffdc)
+ imon_get_ffdc_type(ictx);
+
+ imon_set_display_type(ictx, interface);
+
+ if (ictx->display_supported)
+ imon_init_display(ictx, interface);
+ }
+
+ /* set IR protocol/remote type */
+ ret = imon_ir_change_protocol(ictx, ictx->ir_type);
+ if (ret) {
+ dev_warn(dev, "%s: failed to set IR protocol, falling back "
+ "to standard iMON protocol mode\n", __func__);
+ ictx->ir_type = IR_TYPE_OTHER;
+ }
+
+ dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
+ "usb<%d:%d> initialized\n", vendor, product, ifnum,
+ usbdev->bus->busnum, usbdev->devnum);
+
+ mutex_unlock(&ictx->lock);
+ mutex_unlock(&driver_lock);
+
+ return 0;
+
+fail:
+ mutex_unlock(&driver_lock);
+ dev_err(dev, "unable to register, err %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * Callback function for USB core API: disconnect
+ */
+static void __devexit imon_disconnect(struct usb_interface *interface)
+{
+ struct imon_context *ictx;
+ struct device *dev;
+ int ifnum;
+
+ /* prevent races with multi-interface device probing and display_open */
+ mutex_lock(&driver_lock);
+
+ ictx = usb_get_intfdata(interface);
+ dev = ictx->dev;
+ ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
+
+ mutex_lock(&ictx->lock);
+
+ /*
+ * sysfs_remove_group is safe to call even if sysfs_create_group
+ * hasn't been called
+ */
+ sysfs_remove_group(&interface->dev.kobj,
+ &imon_display_attribute_group);
+ sysfs_remove_group(&interface->dev.kobj,
+ &imon_rf_attribute_group);
+
+ usb_set_intfdata(interface, NULL);
+
+ /* Abort ongoing write */
+ if (ictx->tx.busy) {
+ usb_kill_urb(ictx->tx_urb);
+ complete_all(&ictx->tx.finished);
+ }
+
+ if (ifnum == 0) {
+ ictx->dev_present_intf0 = 0;
+ usb_kill_urb(ictx->rx_urb_intf0);
+ input_unregister_device(ictx->idev);
+ if (ictx->display_supported) {
+ if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
+ usb_deregister_dev(interface, &imon_lcd_class);
+ else
+ usb_deregister_dev(interface, &imon_vfd_class);
+ }
+ } else {
+ ictx->dev_present_intf1 = 0;
+ usb_kill_urb(ictx->rx_urb_intf1);
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA)
+ input_unregister_device(ictx->touch);
+ }
+
+ if (!ictx->dev_present_intf0 && !ictx->dev_present_intf1) {
+ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA)
+ del_timer_sync(&ictx->ttimer);
+ mutex_unlock(&ictx->lock);
+ if (!ictx->display_isopen)
+ free_imon_context(ictx);
+ } else {
+ if (ictx->ir_type == IR_TYPE_RC6)
+ del_timer_sync(&ictx->itimer);
+ mutex_unlock(&ictx->lock);
+ }
+
+ mutex_unlock(&driver_lock);
+
+ dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n",
+ __func__, ifnum);
+}
+
+static int imon_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct imon_context *ictx = usb_get_intfdata(intf);
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (ifnum == 0)
+ usb_kill_urb(ictx->rx_urb_intf0);
+ else
+ usb_kill_urb(ictx->rx_urb_intf1);
+
+ return 0;
+}
+
+static int imon_resume(struct usb_interface *intf)
+{
+ int rc = 0;
+ struct imon_context *ictx = usb_get_intfdata(intf);
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ if (ifnum == 0) {
+ usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
+ usb_rcvintpipe(ictx->usbdev_intf0,
+ ictx->rx_endpoint_intf0->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf0, ictx,
+ ictx->rx_endpoint_intf0->bInterval);
+
+ rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
+
+ } else {
+ usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1,
+ usb_rcvintpipe(ictx->usbdev_intf1,
+ ictx->rx_endpoint_intf1->bEndpointAddress),
+ ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf),
+ usb_rx_callback_intf1, ictx,
+ ictx->rx_endpoint_intf1->bInterval);
+
+ rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
+ }
+
+ return rc;
+}
+
+static int __init imon_init(void)
+{
+ int rc;
+
+ rc = usb_register(&imon_driver);
+ if (rc) {
+ err("%s: usb register failed(%d)", __func__, rc);
+ rc = -ENODEV;
+ }
+
+ return rc;
+}
+
+static void __exit imon_exit(void)
+{
+ usb_deregister(&imon_driver);
+}
+
+module_init(imon_init);
+module_exit(imon_exit);
diff --git a/drivers/media/IR/ir-core-priv.h b/drivers/media/IR/ir-core-priv.h
new file mode 100644
index 000000000000..9a5e65a471a5
--- /dev/null
+++ b/drivers/media/IR/ir-core-priv.h
@@ -0,0 +1,126 @@
+/*
+ * Remote Controller core raw events header
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IR_RAW_EVENT
+#define _IR_RAW_EVENT
+
+#include <linux/slab.h>
+#include <media/ir-core.h>
+
+struct ir_raw_handler {
+ struct list_head list;
+
+ int (*decode)(struct input_dev *input_dev, struct ir_raw_event event);
+ int (*raw_register)(struct input_dev *input_dev);
+ int (*raw_unregister)(struct input_dev *input_dev);
+};
+
+struct ir_raw_event_ctrl {
+ struct work_struct rx_work; /* for the rx decoding workqueue */
+ struct kfifo kfifo; /* fifo for the pulse/space durations */
+ ktime_t last_event; /* when last event occurred */
+ enum raw_event_type last_type; /* last event type */
+ struct input_dev *input_dev; /* pointer to the parent input_dev */
+};
+
+/* macros for IR decoders */
+static inline bool geq_margin(unsigned d1, unsigned d2, unsigned margin)
+{
+ return d1 > (d2 - margin);
+}
+
+static inline bool eq_margin(unsigned d1, unsigned d2, unsigned margin)
+{
+ return ((d1 > (d2 - margin)) && (d1 < (d2 + margin)));
+}
+
+static inline bool is_transition(struct ir_raw_event *x, struct ir_raw_event *y)
+{
+ return x->pulse != y->pulse;
+}
+
+static inline void decrease_duration(struct ir_raw_event *ev, unsigned duration)
+{
+ if (duration > ev->duration)
+ ev->duration = 0;
+ else
+ ev->duration -= duration;
+}
+
+#define TO_US(duration) (((duration) + 500) / 1000)
+#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
+#define IS_RESET(ev) (ev.duration == 0)
+
+/*
+ * Routines from ir-sysfs.c - Meant to be called only internally inside
+ * ir-core
+ */
+
+int ir_register_class(struct input_dev *input_dev);
+void ir_unregister_class(struct input_dev *input_dev);
+
+/*
+ * Routines from ir-raw-event.c to be used internally and by decoders
+ */
+int ir_raw_event_register(struct input_dev *input_dev);
+void ir_raw_event_unregister(struct input_dev *input_dev);
+int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
+void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler);
+void ir_raw_init(void);
+
+
+/*
+ * Decoder initialization code
+ *
+ * Those load logic are called during ir-core init, and automatically
+ * loads the compiled decoders for their usage with IR raw events
+ */
+
+/* from ir-nec-decoder.c */
+#ifdef CONFIG_IR_NEC_DECODER_MODULE
+#define load_nec_decode() request_module("ir-nec-decoder")
+#else
+#define load_nec_decode() 0
+#endif
+
+/* from ir-rc5-decoder.c */
+#ifdef CONFIG_IR_RC5_DECODER_MODULE
+#define load_rc5_decode() request_module("ir-rc5-decoder")
+#else
+#define load_rc5_decode() 0
+#endif
+
+/* from ir-rc6-decoder.c */
+#ifdef CONFIG_IR_RC6_DECODER_MODULE
+#define load_rc6_decode() request_module("ir-rc6-decoder")
+#else
+#define load_rc6_decode() 0
+#endif
+
+/* from ir-jvc-decoder.c */
+#ifdef CONFIG_IR_JVC_DECODER_MODULE
+#define load_jvc_decode() request_module("ir-jvc-decoder")
+#else
+#define load_jvc_decode() 0
+#endif
+
+/* from ir-sony-decoder.c */
+#ifdef CONFIG_IR_SONY_DECODER_MODULE
+#define load_sony_decode() request_module("ir-sony-decoder")
+#else
+#define load_sony_decode() 0
+#endif
+
+#endif /* _IR_RAW_EVENT */
diff --git a/drivers/media/IR/ir-functions.c b/drivers/media/IR/ir-functions.c
index ab06919ad5fc..db591e421887 100644
--- a/drivers/media/IR/ir-functions.c
+++ b/drivers/media/IR/ir-functions.c
@@ -24,6 +24,7 @@
#include <linux/string.h>
#include <linux/jiffies.h>
#include <media/ir-common.h>
+#include "ir-core-priv.h"
/* -------------------------------------------------------------------------- */
diff --git a/drivers/media/IR/ir-jvc-decoder.c b/drivers/media/IR/ir-jvc-decoder.c
new file mode 100644
index 000000000000..0b804944cbb0
--- /dev/null
+++ b/drivers/media/IR/ir-jvc-decoder.c
@@ -0,0 +1,320 @@
+/* ir-jvc-decoder.c - handle JVC IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define JVC_NBITS 16 /* dev(8) + func(8) */
+#define JVC_UNIT 525000 /* ns */
+#define JVC_HEADER_PULSE (16 * JVC_UNIT) /* lack of header -> repeat */
+#define JVC_HEADER_SPACE (8 * JVC_UNIT)
+#define JVC_BIT_PULSE (1 * JVC_UNIT)
+#define JVC_BIT_0_SPACE (1 * JVC_UNIT)
+#define JVC_BIT_1_SPACE (3 * JVC_UNIT)
+#define JVC_TRAILER_PULSE (1 * JVC_UNIT)
+#define JVC_TRAILER_SPACE (35 * JVC_UNIT)
+
+/* Used to register jvc_decoder clients */
+static LIST_HEAD(decoder_list);
+DEFINE_SPINLOCK(decoder_lock);
+
+enum jvc_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_TRAILER_SPACE,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum jvc_state state;
+ u16 jvc_bits;
+ u16 jvc_old_bits;
+ unsigned count;
+ bool first;
+ bool toggle;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "jvc_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_jvc_decode() - Decode one JVC pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_jvc_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, JVC_UNIT, JVC_UNIT / 2))
+ goto out;
+
+ IR_dprintk(2, "JVC decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_HEADER_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->first = true;
+ data->toggle = !data->toggle;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_HEADER_SPACE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_BIT_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ data->jvc_bits <<= 1;
+ if (eq_margin(ev.duration, JVC_BIT_1_SPACE, JVC_UNIT / 2)) {
+ data->jvc_bits |= 1;
+ decrease_duration(&ev, JVC_BIT_1_SPACE);
+ } else if (eq_margin(ev.duration, JVC_BIT_0_SPACE, JVC_UNIT / 2))
+ decrease_duration(&ev, JVC_BIT_0_SPACE);
+ else
+ break;
+ data->count++;
+
+ if (data->count == JVC_NBITS)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, JVC_TRAILER_PULSE, JVC_UNIT / 2))
+ break;
+
+ data->state = STATE_TRAILER_SPACE;
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, JVC_TRAILER_SPACE, JVC_UNIT / 2))
+ break;
+
+ if (data->first) {
+ u32 scancode;
+ scancode = (bitrev8((data->jvc_bits >> 8) & 0xff) << 8) |
+ (bitrev8((data->jvc_bits >> 0) & 0xff) << 0);
+ IR_dprintk(1, "JVC scancode 0x%04x\n", scancode);
+ ir_keydown(input_dev, scancode, data->toggle);
+ data->first = false;
+ data->jvc_old_bits = data->jvc_bits;
+ } else if (data->jvc_bits == data->jvc_old_bits) {
+ IR_dprintk(1, "JVC repeat\n");
+ ir_repeat(input_dev);
+ } else {
+ IR_dprintk(1, "JVC invalid repeat msg\n");
+ break;
+ }
+
+ data->count = 0;
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "JVC decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_jvc_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_jvc_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler jvc_handler = {
+ .decode = ir_jvc_decode,
+ .raw_register = ir_jvc_register,
+ .raw_unregister = ir_jvc_unregister,
+};
+
+static int __init ir_jvc_decode_init(void)
+{
+ ir_raw_handler_register(&jvc_handler);
+
+ printk(KERN_INFO "IR JVC protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_jvc_decode_exit(void)
+{
+ ir_raw_handler_unregister(&jvc_handler);
+}
+
+module_init(ir_jvc_decode_init);
+module_exit(ir_jvc_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("JVC IR protocol decoder");
diff --git a/drivers/media/IR/ir-keymaps.c b/drivers/media/IR/ir-keymaps.c
deleted file mode 100644
index 0efdefe75f32..000000000000
--- a/drivers/media/IR/ir-keymaps.c
+++ /dev/null
@@ -1,3494 +0,0 @@
-/*
- Keytables for supported remote controls, used on drivers/media
- devices.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/*
- * NOTICE FOR DEVELOPERS:
- * The IR mappings should be as close as possible to what's
- * specified at:
- * http://linuxtv.org/wiki/index.php/Remote_Controllers
- */
-#include <linux/module.h>
-
-#include <linux/input.h>
-#include <media/ir-common.h>
-
-/* empty keytable, can be used as placeholder for not-yet created keytables */
-static struct ir_scancode ir_codes_empty[] = {
- { 0x2a, KEY_COFFEE },
-};
-
-struct ir_scancode_table ir_codes_empty_table = {
- .scan = ir_codes_empty,
- .size = ARRAY_SIZE(ir_codes_empty),
-};
-EXPORT_SYMBOL_GPL(ir_codes_empty_table);
-
-/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
-static struct ir_scancode ir_codes_proteus_2309[] = {
- /* numeric */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x5c, KEY_POWER }, /* power */
- { 0x20, KEY_ZOOM }, /* full screen */
- { 0x0f, KEY_BACKSPACE }, /* recall */
- { 0x1b, KEY_ENTER }, /* mute */
- { 0x41, KEY_RECORD }, /* record */
- { 0x43, KEY_STOP }, /* stop */
- { 0x16, KEY_S },
- { 0x1a, KEY_POWER2 }, /* off */
- { 0x2e, KEY_RED },
- { 0x1f, KEY_CHANNELDOWN }, /* channel - */
- { 0x1c, KEY_CHANNELUP }, /* channel + */
- { 0x10, KEY_VOLUMEDOWN }, /* volume - */
- { 0x1e, KEY_VOLUMEUP }, /* volume + */
- { 0x14, KEY_F1 },
-};
-
-struct ir_scancode_table ir_codes_proteus_2309_table = {
- .scan = ir_codes_proteus_2309,
- .size = ARRAY_SIZE(ir_codes_proteus_2309),
-};
-EXPORT_SYMBOL_GPL(ir_codes_proteus_2309_table);
-
-/* Matt Jesson <dvb@jesson.eclipse.co.uk */
-static struct ir_scancode ir_codes_avermedia_dvbt[] = {
- { 0x28, KEY_0 }, /* '0' / 'enter' */
- { 0x22, KEY_1 }, /* '1' */
- { 0x12, KEY_2 }, /* '2' / 'up arrow' */
- { 0x32, KEY_3 }, /* '3' */
- { 0x24, KEY_4 }, /* '4' / 'left arrow' */
- { 0x14, KEY_5 }, /* '5' */
- { 0x34, KEY_6 }, /* '6' / 'right arrow' */
- { 0x26, KEY_7 }, /* '7' */
- { 0x16, KEY_8 }, /* '8' / 'down arrow' */
- { 0x36, KEY_9 }, /* '9' */
-
- { 0x20, KEY_LIST }, /* 'source' */
- { 0x10, KEY_TEXT }, /* 'teletext' */
- { 0x00, KEY_POWER }, /* 'power' */
- { 0x04, KEY_AUDIO }, /* 'audio' */
- { 0x06, KEY_ZOOM }, /* 'full screen' */
- { 0x18, KEY_VIDEO }, /* 'display' */
- { 0x38, KEY_SEARCH }, /* 'loop' */
- { 0x08, KEY_INFO }, /* 'preview' */
- { 0x2a, KEY_REWIND }, /* 'backward <<' */
- { 0x1a, KEY_FASTFORWARD }, /* 'forward >>' */
- { 0x3a, KEY_RECORD }, /* 'capture' */
- { 0x0a, KEY_MUTE }, /* 'mute' */
- { 0x2c, KEY_RECORD }, /* 'record' */
- { 0x1c, KEY_PAUSE }, /* 'pause' */
- { 0x3c, KEY_STOP }, /* 'stop' */
- { 0x0c, KEY_PLAY }, /* 'play' */
- { 0x2e, KEY_RED }, /* 'red' */
- { 0x01, KEY_BLUE }, /* 'blue' / 'cancel' */
- { 0x0e, KEY_YELLOW }, /* 'yellow' / 'ok' */
- { 0x21, KEY_GREEN }, /* 'green' */
- { 0x11, KEY_CHANNELDOWN }, /* 'channel -' */
- { 0x31, KEY_CHANNELUP }, /* 'channel +' */
- { 0x1e, KEY_VOLUMEDOWN }, /* 'volume -' */
- { 0x3e, KEY_VOLUMEUP }, /* 'volume +' */
-};
-
-struct ir_scancode_table ir_codes_avermedia_dvbt_table = {
- .scan = ir_codes_avermedia_dvbt,
- .size = ARRAY_SIZE(ir_codes_avermedia_dvbt),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_dvbt_table);
-
-/* Mauro Carvalho Chehab <mchehab@infradead.org> */
-static struct ir_scancode ir_codes_avermedia_m135a[] = {
- { 0x00, KEY_POWER2 },
- { 0x2e, KEY_DOT }, /* '.' */
- { 0x01, KEY_MODE }, /* TV/FM */
-
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x11, KEY_0 },
-
- { 0x13, KEY_RIGHT }, /* -> */
- { 0x12, KEY_LEFT }, /* <- */
-
- { 0x17, KEY_SLEEP }, /* Capturar Imagem */
- { 0x10, KEY_SHUFFLE }, /* Amostra */
-
- /* FIXME: The keys bellow aren't ok */
-
- { 0x43, KEY_CHANNELUP },
- { 0x42, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x0c, KEY_ENTER },
-
- { 0x14, KEY_MUTE },
- { 0x08, KEY_AUDIO },
-
- { 0x03, KEY_TEXT },
- { 0x04, KEY_EPG },
- { 0x2b, KEY_TV2 }, /* TV2 */
-
- { 0x1d, KEY_RED },
- { 0x1c, KEY_YELLOW },
- { 0x41, KEY_GREEN },
- { 0x40, KEY_BLUE },
-
- { 0x1a, KEY_PLAYPAUSE },
- { 0x19, KEY_RECORD },
- { 0x18, KEY_PLAY },
- { 0x1b, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_avermedia_m135a_table = {
- .scan = ir_codes_avermedia_m135a,
- .size = ARRAY_SIZE(ir_codes_avermedia_m135a),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_m135a_table);
-
-/* Oldrich Jedlicka <oldium.pro@seznam.cz> */
-static struct ir_scancode ir_codes_avermedia_cardbus[] = {
- { 0x00, KEY_POWER },
- { 0x01, KEY_TUNER }, /* TV/FM */
- { 0x03, KEY_TEXT }, /* Teletext */
- { 0x04, KEY_EPG },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x08, KEY_AUDIO },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0c, KEY_ZOOM }, /* Full screen */
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x10, KEY_PAGEUP }, /* 16-CH PREV */
- { 0x11, KEY_0 },
- { 0x12, KEY_INFO },
- { 0x13, KEY_AGAIN }, /* CH RTN - channel return */
- { 0x14, KEY_MUTE },
- { 0x15, KEY_EDIT }, /* Autoscan */
- { 0x17, KEY_SAVE }, /* Screenshot */
- { 0x18, KEY_PLAYPAUSE },
- { 0x19, KEY_RECORD },
- { 0x1a, KEY_PLAY },
- { 0x1b, KEY_STOP },
- { 0x1c, KEY_FASTFORWARD },
- { 0x1d, KEY_REWIND },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_VOLUMEUP },
- { 0x22, KEY_SLEEP }, /* Sleep */
- { 0x23, KEY_ZOOM }, /* Aspect */
- { 0x26, KEY_SCREEN }, /* Pos */
- { 0x27, KEY_ANGLE }, /* Size */
- { 0x28, KEY_SELECT }, /* Select */
- { 0x29, KEY_BLUE }, /* Blue/Picture */
- { 0x2a, KEY_BACKSPACE }, /* Back */
- { 0x2b, KEY_MEDIA }, /* PIP (Picture-in-picture) */
- { 0x2c, KEY_DOWN },
- { 0x2e, KEY_DOT },
- { 0x2f, KEY_TV }, /* Live TV */
- { 0x32, KEY_LEFT },
- { 0x33, KEY_CLEAR }, /* Clear */
- { 0x35, KEY_RED }, /* Red/TV */
- { 0x36, KEY_UP },
- { 0x37, KEY_HOME }, /* Home */
- { 0x39, KEY_GREEN }, /* Green/Video */
- { 0x3d, KEY_YELLOW }, /* Yellow/Music */
- { 0x3e, KEY_OK }, /* Ok */
- { 0x3f, KEY_RIGHT },
- { 0x40, KEY_NEXT }, /* Next */
- { 0x41, KEY_PREVIOUS }, /* Previous */
- { 0x42, KEY_CHANNELDOWN }, /* Channel down */
- { 0x43, KEY_CHANNELUP }, /* Channel up */
-};
-
-struct ir_scancode_table ir_codes_avermedia_cardbus_table = {
- .scan = ir_codes_avermedia_cardbus,
- .size = ARRAY_SIZE(ir_codes_avermedia_cardbus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_cardbus_table);
-
-/* Attila Kondoros <attila.kondoros@chello.hu> */
-static struct ir_scancode ir_codes_apac_viewcomp[] = {
-
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x00, KEY_0 },
- { 0x17, KEY_LAST }, /* +100 */
- { 0x0a, KEY_LIST }, /* recall */
-
-
- { 0x1c, KEY_TUNER }, /* TV/FM */
- { 0x15, KEY_SEARCH }, /* scan */
- { 0x12, KEY_POWER }, /* power */
- { 0x1f, KEY_VOLUMEDOWN }, /* vol up */
- { 0x1b, KEY_VOLUMEUP }, /* vol down */
- { 0x1e, KEY_CHANNELDOWN }, /* chn up */
- { 0x1a, KEY_CHANNELUP }, /* chn down */
-
- { 0x11, KEY_VIDEO }, /* video */
- { 0x0f, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MUTE }, /* mute/unmute */
- { 0x10, KEY_TEXT }, /* min */
-
- { 0x0d, KEY_STOP }, /* freeze */
- { 0x0e, KEY_RECORD }, /* record */
- { 0x1d, KEY_PLAYPAUSE }, /* stop */
- { 0x19, KEY_PLAY }, /* play */
-
- { 0x16, KEY_GOTO }, /* osd */
- { 0x14, KEY_REFRESH }, /* default */
- { 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
- { 0x18, KEY_KPMINUS }, /* fine tune <<<< */
-};
-
-struct ir_scancode_table ir_codes_apac_viewcomp_table = {
- .scan = ir_codes_apac_viewcomp,
- .size = ARRAY_SIZE(ir_codes_apac_viewcomp),
-};
-EXPORT_SYMBOL_GPL(ir_codes_apac_viewcomp_table);
-
-/* ---------------------------------------------------------------------- */
-
-static struct ir_scancode ir_codes_pixelview[] = {
-
- { 0x1e, KEY_POWER }, /* power */
- { 0x07, KEY_MEDIA }, /* source */
- { 0x1c, KEY_SEARCH }, /* scan */
-
-
- { 0x03, KEY_TUNER }, /* TV/FM */
-
- { 0x00, KEY_RECORD },
- { 0x08, KEY_STOP },
- { 0x11, KEY_PLAY },
-
- { 0x1a, KEY_PLAYPAUSE }, /* freeze */
- { 0x19, KEY_ZOOM }, /* zoom */
- { 0x0f, KEY_TEXT }, /* min */
-
- { 0x01, KEY_1 },
- { 0x0b, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x05, KEY_4 },
- { 0x09, KEY_5 },
- { 0x15, KEY_6 },
- { 0x06, KEY_7 },
- { 0x0a, KEY_8 },
- { 0x12, KEY_9 },
- { 0x02, KEY_0 },
- { 0x10, KEY_LAST }, /* +100 */
- { 0x13, KEY_LIST }, /* recall */
-
- { 0x1f, KEY_CHANNELUP }, /* chn down */
- { 0x17, KEY_CHANNELDOWN }, /* chn up */
- { 0x16, KEY_VOLUMEUP }, /* vol down */
- { 0x14, KEY_VOLUMEDOWN }, /* vol up */
-
- { 0x04, KEY_KPMINUS }, /* <<< */
- { 0x0e, KEY_SETUP }, /* function */
- { 0x0c, KEY_KPPLUS }, /* >>> */
-
- { 0x0d, KEY_GOTO }, /* mts */
- { 0x1d, KEY_REFRESH }, /* reset */
- { 0x18, KEY_MUTE }, /* mute/unmute */
-};
-
-struct ir_scancode_table ir_codes_pixelview_table = {
- .scan = ir_codes_pixelview,
- .size = ARRAY_SIZE(ir_codes_pixelview),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pixelview_table);
-
-/*
- Mauro Carvalho Chehab <mchehab@infradead.org>
- present on PV MPEG 8000GT
- */
-static struct ir_scancode ir_codes_pixelview_new[] = {
- { 0x3c, KEY_TIME }, /* Timeshift */
- { 0x12, KEY_POWER },
-
- { 0x3d, KEY_1 },
- { 0x38, KEY_2 },
- { 0x18, KEY_3 },
- { 0x35, KEY_4 },
- { 0x39, KEY_5 },
- { 0x15, KEY_6 },
- { 0x36, KEY_7 },
- { 0x3a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3e, KEY_0 },
-
- { 0x1c, KEY_AGAIN }, /* LOOP */
- { 0x3f, KEY_MEDIA }, /* Source */
- { 0x1f, KEY_LAST }, /* +100 */
- { 0x1b, KEY_MUTE },
-
- { 0x17, KEY_CHANNELDOWN },
- { 0x16, KEY_CHANNELUP },
- { 0x10, KEY_VOLUMEUP },
- { 0x14, KEY_VOLUMEDOWN },
- { 0x13, KEY_ZOOM },
-
- { 0x19, KEY_CAMERA }, /* SNAPSHOT */
- { 0x1a, KEY_SEARCH }, /* scan */
-
- { 0x37, KEY_REWIND }, /* << */
- { 0x32, KEY_RECORD }, /* o (red) */
- { 0x33, KEY_FORWARD }, /* >> */
- { 0x11, KEY_STOP }, /* square */
- { 0x3b, KEY_PLAY }, /* > */
- { 0x30, KEY_PLAYPAUSE }, /* || */
-
- { 0x31, KEY_TV },
- { 0x34, KEY_RADIO },
-};
-
-struct ir_scancode_table ir_codes_pixelview_new_table = {
- .scan = ir_codes_pixelview_new,
- .size = ARRAY_SIZE(ir_codes_pixelview_new),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pixelview_new_table);
-
-static struct ir_scancode ir_codes_nebula[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_TV },
- { 0x0b, KEY_AUX },
- { 0x0c, KEY_DVD },
- { 0x0d, KEY_POWER },
- { 0x0e, KEY_MHP }, /* labelled 'Picture' */
- { 0x0f, KEY_AUDIO },
- { 0x10, KEY_INFO },
- { 0x11, KEY_F13 }, /* 16:9 */
- { 0x12, KEY_F14 }, /* 14:9 */
- { 0x13, KEY_EPG },
- { 0x14, KEY_EXIT },
- { 0x15, KEY_MENU },
- { 0x16, KEY_UP },
- { 0x17, KEY_DOWN },
- { 0x18, KEY_LEFT },
- { 0x19, KEY_RIGHT },
- { 0x1a, KEY_ENTER },
- { 0x1b, KEY_CHANNELUP },
- { 0x1c, KEY_CHANNELDOWN },
- { 0x1d, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_RED },
- { 0x20, KEY_GREEN },
- { 0x21, KEY_YELLOW },
- { 0x22, KEY_BLUE },
- { 0x23, KEY_SUBTITLE },
- { 0x24, KEY_F15 }, /* AD */
- { 0x25, KEY_TEXT },
- { 0x26, KEY_MUTE },
- { 0x27, KEY_REWIND },
- { 0x28, KEY_STOP },
- { 0x29, KEY_PLAY },
- { 0x2a, KEY_FASTFORWARD },
- { 0x2b, KEY_F16 }, /* chapter */
- { 0x2c, KEY_PAUSE },
- { 0x2d, KEY_PLAY },
- { 0x2e, KEY_RECORD },
- { 0x2f, KEY_F17 }, /* picture in picture */
- { 0x30, KEY_KPPLUS }, /* zoom in */
- { 0x31, KEY_KPMINUS }, /* zoom out */
- { 0x32, KEY_F18 }, /* capture */
- { 0x33, KEY_F19 }, /* web */
- { 0x34, KEY_EMAIL },
- { 0x35, KEY_PHONE },
- { 0x36, KEY_PC },
-};
-
-struct ir_scancode_table ir_codes_nebula_table = {
- .scan = ir_codes_nebula,
- .size = ARRAY_SIZE(ir_codes_nebula),
-};
-EXPORT_SYMBOL_GPL(ir_codes_nebula_table);
-
-/* DigitalNow DNTV Live DVB-T Remote */
-static struct ir_scancode ir_codes_dntv_live_dvb_t[] = {
- { 0x00, KEY_ESC }, /* 'go up a level?' */
- /* Keys 0 to 9 */
- { 0x0a, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0b, KEY_TUNER }, /* tv/fm */
- { 0x0c, KEY_SEARCH }, /* scan */
- { 0x0d, KEY_STOP },
- { 0x0e, KEY_PAUSE },
- { 0x0f, KEY_LIST }, /* source */
-
- { 0x10, KEY_MUTE },
- { 0x11, KEY_REWIND }, /* backward << */
- { 0x12, KEY_POWER },
- { 0x13, KEY_CAMERA }, /* snap */
- { 0x14, KEY_AUDIO }, /* stereo */
- { 0x15, KEY_CLEAR }, /* reset */
- { 0x16, KEY_PLAY },
- { 0x17, KEY_ENTER },
- { 0x18, KEY_ZOOM }, /* full screen */
- { 0x19, KEY_FASTFORWARD }, /* forward >> */
- { 0x1a, KEY_CHANNELUP },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1c, KEY_INFO }, /* preview */
- { 0x1d, KEY_RECORD }, /* record */
- { 0x1e, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_dntv_live_dvb_t_table = {
- .scan = ir_codes_dntv_live_dvb_t,
- .size = ARRAY_SIZE(ir_codes_dntv_live_dvb_t),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvb_t_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* IO-DATA BCTV7E Remote */
-static struct ir_scancode ir_codes_iodata_bctv7e[] = {
- { 0x40, KEY_TV },
- { 0x20, KEY_RADIO }, /* FM */
- { 0x60, KEY_EPG },
- { 0x00, KEY_POWER },
-
- /* Keys 0 to 9 */
- { 0x44, KEY_0 }, /* 10 */
- { 0x50, KEY_1 },
- { 0x30, KEY_2 },
- { 0x70, KEY_3 },
- { 0x48, KEY_4 },
- { 0x28, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x38, KEY_8 },
- { 0x78, KEY_9 },
-
- { 0x10, KEY_L }, /* Live */
- { 0x08, KEY_TIME }, /* Time Shift */
-
- { 0x18, KEY_PLAYPAUSE }, /* Play */
-
- { 0x24, KEY_ENTER }, /* 11 */
- { 0x64, KEY_ESC }, /* 12 */
- { 0x04, KEY_M }, /* Multi */
-
- { 0x54, KEY_VIDEO },
- { 0x34, KEY_CHANNELUP },
- { 0x74, KEY_VOLUMEUP },
- { 0x14, KEY_MUTE },
-
- { 0x4c, KEY_VCR }, /* SVIDEO */
- { 0x2c, KEY_CHANNELDOWN },
- { 0x6c, KEY_VOLUMEDOWN },
- { 0x0c, KEY_ZOOM },
-
- { 0x5c, KEY_PAUSE },
- { 0x3c, KEY_RED }, /* || (red) */
- { 0x7c, KEY_RECORD }, /* recording */
- { 0x1c, KEY_STOP },
-
- { 0x41, KEY_REWIND }, /* backward << */
- { 0x21, KEY_PLAY },
- { 0x61, KEY_FASTFORWARD }, /* forward >> */
- { 0x01, KEY_NEXT }, /* skip >| */
-};
-
-struct ir_scancode_table ir_codes_iodata_bctv7e_table = {
- .scan = ir_codes_iodata_bctv7e,
- .size = ARRAY_SIZE(ir_codes_iodata_bctv7e),
-};
-EXPORT_SYMBOL_GPL(ir_codes_iodata_bctv7e_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* ADS Tech Instant TV DVB-T PCI Remote */
-static struct ir_scancode ir_codes_adstech_dvb_t_pci[] = {
- /* Keys 0 to 9 */
- { 0x4d, KEY_0 },
- { 0x57, KEY_1 },
- { 0x4f, KEY_2 },
- { 0x53, KEY_3 },
- { 0x56, KEY_4 },
- { 0x4e, KEY_5 },
- { 0x5e, KEY_6 },
- { 0x54, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x5c, KEY_9 },
-
- { 0x5b, KEY_POWER },
- { 0x5f, KEY_MUTE },
- { 0x55, KEY_GOTO },
- { 0x5d, KEY_SEARCH },
- { 0x17, KEY_EPG }, /* Guide */
- { 0x1f, KEY_MENU },
- { 0x0f, KEY_UP },
- { 0x46, KEY_DOWN },
- { 0x16, KEY_LEFT },
- { 0x1e, KEY_RIGHT },
- { 0x0e, KEY_SELECT }, /* Enter */
- { 0x5a, KEY_INFO },
- { 0x52, KEY_EXIT },
- { 0x59, KEY_PREVIOUS },
- { 0x51, KEY_NEXT },
- { 0x58, KEY_REWIND },
- { 0x50, KEY_FORWARD },
- { 0x44, KEY_PLAYPAUSE },
- { 0x07, KEY_STOP },
- { 0x1b, KEY_RECORD },
- { 0x13, KEY_TUNER }, /* Live */
- { 0x0a, KEY_A },
- { 0x12, KEY_B },
- { 0x03, KEY_PROG1 }, /* 1 */
- { 0x01, KEY_PROG2 }, /* 2 */
- { 0x00, KEY_PROG3 }, /* 3 */
- { 0x06, KEY_DVD },
- { 0x48, KEY_AUX }, /* Photo */
- { 0x40, KEY_VIDEO },
- { 0x19, KEY_AUDIO }, /* Music */
- { 0x0b, KEY_CHANNELUP },
- { 0x08, KEY_CHANNELDOWN },
- { 0x15, KEY_VOLUMEUP },
- { 0x1c, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_adstech_dvb_t_pci_table = {
- .scan = ir_codes_adstech_dvb_t_pci,
- .size = ARRAY_SIZE(ir_codes_adstech_dvb_t_pci),
-};
-EXPORT_SYMBOL_GPL(ir_codes_adstech_dvb_t_pci_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* MSI TV@nywhere MASTER remote */
-
-static struct ir_scancode ir_codes_msi_tvanywhere[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0c, KEY_MUTE },
- { 0x0f, KEY_SCREEN }, /* Full Screen */
- { 0x10, KEY_FN }, /* Funtion */
- { 0x11, KEY_TIME }, /* Time shift */
- { 0x12, KEY_POWER },
- { 0x13, KEY_MEDIA }, /* MTS */
- { 0x14, KEY_SLOW },
- { 0x16, KEY_REWIND }, /* backward << */
- { 0x17, KEY_ENTER }, /* Return */
- { 0x18, KEY_FASTFORWARD }, /* forward >> */
- { 0x1a, KEY_CHANNELUP },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1e, KEY_CHANNELDOWN },
- { 0x1f, KEY_VOLUMEDOWN },
-};
-
-struct ir_scancode_table ir_codes_msi_tvanywhere_table = {
- .scan = ir_codes_msi_tvanywhere,
- .size = ARRAY_SIZE(ir_codes_msi_tvanywhere),
-};
-EXPORT_SYMBOL_GPL(ir_codes_msi_tvanywhere_table);
-
-/* ---------------------------------------------------------------------- */
-
-/*
- Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
- is marked "KS003". The controller is I2C at address 0x30, but does not seem
- to respond to probes until a read is performed from a valid device.
- I don't know why...
-
- Note: This remote may be of similar or identical design to the
- Pixelview remote (?). The raw codes and duplicate button codes
- appear to be the same.
-
- Henry Wong <henry@stuffedcow.net>
- Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com>
-
-*/
-
-static struct ir_scancode ir_codes_msi_tvanywhere_plus[] = {
-
-/* ---- Remote Button Layout ----
-
- POWER SOURCE SCAN MUTE
- TV/FM 1 2 3
- |> 4 5 6
- <| 7 8 9
- ^^UP 0 + RECALL
- vvDN RECORD STOP PLAY
-
- MINIMIZE ZOOM
-
- CH+
- VOL- VOL+
- CH-
-
- SNAPSHOT MTS
-
- << FUNC >> RESET
-*/
-
- { 0x01, KEY_1 }, /* 1 */
- { 0x0b, KEY_2 }, /* 2 */
- { 0x1b, KEY_3 }, /* 3 */
- { 0x05, KEY_4 }, /* 4 */
- { 0x09, KEY_5 }, /* 5 */
- { 0x15, KEY_6 }, /* 6 */
- { 0x06, KEY_7 }, /* 7 */
- { 0x0a, KEY_8 }, /* 8 */
- { 0x12, KEY_9 }, /* 9 */
- { 0x02, KEY_0 }, /* 0 */
- { 0x10, KEY_KPPLUS }, /* + */
- { 0x13, KEY_AGAIN }, /* Recall */
-
- { 0x1e, KEY_POWER }, /* Power */
- { 0x07, KEY_TUNER }, /* Source */
- { 0x1c, KEY_SEARCH }, /* Scan */
- { 0x18, KEY_MUTE }, /* Mute */
-
- { 0x03, KEY_RADIO }, /* TV/FM */
- /* The next four keys are duplicates that appear to send the
- same IR code as Ch+, Ch-, >>, and << . The raw code assigned
- to them is the actual code + 0x20 - they will never be
- detected as such unless some way is discovered to distinguish
- these buttons from those that have the same code. */
- { 0x3f, KEY_RIGHT }, /* |> and Ch+ */
- { 0x37, KEY_LEFT }, /* <| and Ch- */
- { 0x2c, KEY_UP }, /* ^^Up and >> */
- { 0x24, KEY_DOWN }, /* vvDn and << */
-
- { 0x00, KEY_RECORD }, /* Record */
- { 0x08, KEY_STOP }, /* Stop */
- { 0x11, KEY_PLAY }, /* Play */
-
- { 0x0f, KEY_CLOSE }, /* Minimize */
- { 0x19, KEY_ZOOM }, /* Zoom */
- { 0x1a, KEY_CAMERA }, /* Snapshot */
- { 0x0d, KEY_LANGUAGE }, /* MTS */
-
- { 0x14, KEY_VOLUMEDOWN }, /* Vol- */
- { 0x16, KEY_VOLUMEUP }, /* Vol+ */
- { 0x17, KEY_CHANNELDOWN }, /* Ch- */
- { 0x1f, KEY_CHANNELUP }, /* Ch+ */
-
- { 0x04, KEY_REWIND }, /* << */
- { 0x0e, KEY_MENU }, /* Function */
- { 0x0c, KEY_FASTFORWARD }, /* >> */
- { 0x1d, KEY_RESTART }, /* Reset */
-};
-
-struct ir_scancode_table ir_codes_msi_tvanywhere_plus_table = {
- .scan = ir_codes_msi_tvanywhere_plus,
- .size = ARRAY_SIZE(ir_codes_msi_tvanywhere_plus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_msi_tvanywhere_plus_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* Cinergy 1400 DVB-T */
-static struct ir_scancode ir_codes_cinergy_1400[] = {
- { 0x01, KEY_POWER },
- { 0x02, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x06, KEY_5 },
- { 0x07, KEY_6 },
- { 0x08, KEY_7 },
- { 0x09, KEY_8 },
- { 0x0a, KEY_9 },
- { 0x0c, KEY_0 },
-
- { 0x0b, KEY_VIDEO },
- { 0x0d, KEY_REFRESH },
- { 0x0e, KEY_SELECT },
- { 0x0f, KEY_EPG },
- { 0x10, KEY_UP },
- { 0x11, KEY_LEFT },
- { 0x12, KEY_OK },
- { 0x13, KEY_RIGHT },
- { 0x14, KEY_DOWN },
- { 0x15, KEY_TEXT },
- { 0x16, KEY_INFO },
-
- { 0x17, KEY_RED },
- { 0x18, KEY_GREEN },
- { 0x19, KEY_YELLOW },
- { 0x1a, KEY_BLUE },
-
- { 0x1b, KEY_CHANNELUP },
- { 0x1c, KEY_VOLUMEUP },
- { 0x1d, KEY_MUTE },
- { 0x1e, KEY_VOLUMEDOWN },
- { 0x1f, KEY_CHANNELDOWN },
-
- { 0x40, KEY_PAUSE },
- { 0x4c, KEY_PLAY },
- { 0x58, KEY_RECORD },
- { 0x54, KEY_PREVIOUS },
- { 0x48, KEY_STOP },
- { 0x5c, KEY_NEXT },
-};
-
-struct ir_scancode_table ir_codes_cinergy_1400_table = {
- .scan = ir_codes_cinergy_1400,
- .size = ARRAY_SIZE(ir_codes_cinergy_1400),
-};
-EXPORT_SYMBOL_GPL(ir_codes_cinergy_1400_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* AVERTV STUDIO 303 Remote */
-static struct ir_scancode ir_codes_avertv_303[] = {
- { 0x2a, KEY_1 },
- { 0x32, KEY_2 },
- { 0x3a, KEY_3 },
- { 0x4a, KEY_4 },
- { 0x52, KEY_5 },
- { 0x5a, KEY_6 },
- { 0x6a, KEY_7 },
- { 0x72, KEY_8 },
- { 0x7a, KEY_9 },
- { 0x0e, KEY_0 },
-
- { 0x02, KEY_POWER },
- { 0x22, KEY_VIDEO },
- { 0x42, KEY_AUDIO },
- { 0x62, KEY_ZOOM },
- { 0x0a, KEY_TV },
- { 0x12, KEY_CD },
- { 0x1a, KEY_TEXT },
-
- { 0x16, KEY_SUBTITLE },
- { 0x1e, KEY_REWIND },
- { 0x06, KEY_PRINT },
-
- { 0x2e, KEY_SEARCH },
- { 0x36, KEY_SLEEP },
- { 0x3e, KEY_SHUFFLE },
- { 0x26, KEY_MUTE },
-
- { 0x4e, KEY_RECORD },
- { 0x56, KEY_PAUSE },
- { 0x5e, KEY_STOP },
- { 0x46, KEY_PLAY },
-
- { 0x6e, KEY_RED },
- { 0x0b, KEY_GREEN },
- { 0x66, KEY_YELLOW },
- { 0x03, KEY_BLUE },
-
- { 0x76, KEY_LEFT },
- { 0x7e, KEY_RIGHT },
- { 0x13, KEY_DOWN },
- { 0x1b, KEY_UP },
-};
-
-struct ir_scancode_table ir_codes_avertv_303_table = {
- .scan = ir_codes_avertv_303,
- .size = ARRAY_SIZE(ir_codes_avertv_303),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avertv_303_table);
-
-/* ---------------------------------------------------------------------- */
-
-/* DigitalNow DNTV Live! DVB-T Pro Remote */
-static struct ir_scancode ir_codes_dntv_live_dvbt_pro[] = {
- { 0x16, KEY_POWER },
- { 0x5b, KEY_HOME },
-
- { 0x55, KEY_TV }, /* live tv */
- { 0x58, KEY_TUNER }, /* digital Radio */
- { 0x5a, KEY_RADIO }, /* FM radio */
- { 0x59, KEY_DVD }, /* dvd menu */
- { 0x03, KEY_1 },
- { 0x01, KEY_2 },
- { 0x06, KEY_3 },
- { 0x09, KEY_4 },
- { 0x1d, KEY_5 },
- { 0x1f, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x19, KEY_8 },
- { 0x1b, KEY_9 },
- { 0x0c, KEY_CANCEL },
- { 0x15, KEY_0 },
- { 0x4a, KEY_CLEAR },
- { 0x13, KEY_BACK },
- { 0x00, KEY_TAB },
- { 0x4b, KEY_UP },
- { 0x4e, KEY_LEFT },
- { 0x4f, KEY_OK },
- { 0x52, KEY_RIGHT },
- { 0x51, KEY_DOWN },
- { 0x1e, KEY_VOLUMEUP },
- { 0x0a, KEY_VOLUMEDOWN },
- { 0x02, KEY_CHANNELDOWN },
- { 0x05, KEY_CHANNELUP },
- { 0x11, KEY_RECORD },
- { 0x14, KEY_PLAY },
- { 0x4c, KEY_PAUSE },
- { 0x1a, KEY_STOP },
- { 0x40, KEY_REWIND },
- { 0x12, KEY_FASTFORWARD },
- { 0x41, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x42, KEY_NEXTSONG }, /* skip >| */
- { 0x54, KEY_CAMERA }, /* capture */
- { 0x50, KEY_LANGUAGE }, /* sap */
- { 0x47, KEY_TV2 }, /* pip */
- { 0x4d, KEY_SCREEN },
- { 0x43, KEY_SUBTITLE },
- { 0x10, KEY_MUTE },
- { 0x49, KEY_AUDIO }, /* l/r */
- { 0x07, KEY_SLEEP },
- { 0x08, KEY_VIDEO }, /* a/v */
- { 0x0e, KEY_PREVIOUS }, /* recall */
- { 0x45, KEY_ZOOM }, /* zoom + */
- { 0x46, KEY_ANGLE }, /* zoom - */
- { 0x56, KEY_RED },
- { 0x57, KEY_GREEN },
- { 0x5c, KEY_YELLOW },
- { 0x5d, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_dntv_live_dvbt_pro_table = {
- .scan = ir_codes_dntv_live_dvbt_pro,
- .size = ARRAY_SIZE(ir_codes_dntv_live_dvbt_pro),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dntv_live_dvbt_pro_table);
-
-static struct ir_scancode ir_codes_em_terratec[] = {
- { 0x01, KEY_CHANNEL },
- { 0x02, KEY_SELECT },
- { 0x03, KEY_MUTE },
- { 0x04, KEY_POWER },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x08, KEY_CHANNELUP },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0c, KEY_CHANNELDOWN },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_0 },
- { 0x12, KEY_MENU },
- { 0x13, KEY_PRINT },
- { 0x14, KEY_VOLUMEDOWN },
- { 0x16, KEY_PAUSE },
- { 0x18, KEY_RECORD },
- { 0x19, KEY_REWIND },
- { 0x1a, KEY_PLAY },
- { 0x1b, KEY_FORWARD },
- { 0x1c, KEY_BACKSPACE },
- { 0x1e, KEY_STOP },
- { 0x40, KEY_ZOOM },
-};
-
-struct ir_scancode_table ir_codes_em_terratec_table = {
- .scan = ir_codes_em_terratec,
- .size = ARRAY_SIZE(ir_codes_em_terratec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_em_terratec_table);
-
-static struct ir_scancode ir_codes_pinnacle_grey[] = {
- { 0x3a, KEY_0 },
- { 0x31, KEY_1 },
- { 0x32, KEY_2 },
- { 0x33, KEY_3 },
- { 0x34, KEY_4 },
- { 0x35, KEY_5 },
- { 0x36, KEY_6 },
- { 0x37, KEY_7 },
- { 0x38, KEY_8 },
- { 0x39, KEY_9 },
-
- { 0x2f, KEY_POWER },
-
- { 0x2e, KEY_P },
- { 0x1f, KEY_L },
- { 0x2b, KEY_I },
-
- { 0x2d, KEY_SCREEN },
- { 0x1e, KEY_ZOOM },
- { 0x1b, KEY_VOLUMEUP },
- { 0x0f, KEY_VOLUMEDOWN },
- { 0x17, KEY_CHANNELUP },
- { 0x1c, KEY_CHANNELDOWN },
- { 0x25, KEY_INFO },
-
- { 0x3c, KEY_MUTE },
-
- { 0x3d, KEY_LEFT },
- { 0x3b, KEY_RIGHT },
-
- { 0x3f, KEY_UP },
- { 0x3e, KEY_DOWN },
- { 0x1a, KEY_ENTER },
-
- { 0x1d, KEY_MENU },
- { 0x19, KEY_AGAIN },
- { 0x16, KEY_PREVIOUSSONG },
- { 0x13, KEY_NEXTSONG },
- { 0x15, KEY_PAUSE },
- { 0x0e, KEY_REWIND },
- { 0x0d, KEY_PLAY },
- { 0x0b, KEY_STOP },
- { 0x07, KEY_FORWARD },
- { 0x27, KEY_RECORD },
- { 0x26, KEY_TUNER },
- { 0x29, KEY_TEXT },
- { 0x2a, KEY_MEDIA },
- { 0x18, KEY_EPG },
-};
-
-struct ir_scancode_table ir_codes_pinnacle_grey_table = {
- .scan = ir_codes_pinnacle_grey,
- .size = ARRAY_SIZE(ir_codes_pinnacle_grey),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_grey_table);
-
-static struct ir_scancode ir_codes_flyvideo[] = {
- { 0x0f, KEY_0 },
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
-
- { 0x0e, KEY_MODE }, /* Air/Cable */
- { 0x11, KEY_VIDEO }, /* Video */
- { 0x15, KEY_AUDIO }, /* Audio */
- { 0x00, KEY_POWER }, /* Power */
- { 0x18, KEY_TUNER }, /* AV Source */
- { 0x02, KEY_ZOOM }, /* Fullscreen */
- { 0x1a, KEY_LANGUAGE }, /* Stereo */
- { 0x1b, KEY_MUTE }, /* Mute */
- { 0x14, KEY_VOLUMEUP }, /* Volume + */
- { 0x17, KEY_VOLUMEDOWN },/* Volume - */
- { 0x12, KEY_CHANNELUP },/* Channel + */
- { 0x13, KEY_CHANNELDOWN },/* Channel - */
- { 0x06, KEY_AGAIN }, /* Recall */
- { 0x10, KEY_ENTER }, /* Enter */
-
- { 0x19, KEY_BACK }, /* Rewind ( <<< ) */
- { 0x1f, KEY_FORWARD }, /* Forward ( >>> ) */
- { 0x0a, KEY_ANGLE }, /* no label, may be used as the PAUSE button */
-};
-
-struct ir_scancode_table ir_codes_flyvideo_table = {
- .scan = ir_codes_flyvideo,
- .size = ARRAY_SIZE(ir_codes_flyvideo),
-};
-EXPORT_SYMBOL_GPL(ir_codes_flyvideo_table);
-
-static struct ir_scancode ir_codes_flydvb[] = {
- { 0x01, KEY_ZOOM }, /* Full Screen */
- { 0x00, KEY_POWER }, /* Power */
-
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x07, KEY_4 },
- { 0x08, KEY_5 },
- { 0x09, KEY_6 },
- { 0x0b, KEY_7 },
- { 0x0c, KEY_8 },
- { 0x0d, KEY_9 },
- { 0x06, KEY_AGAIN }, /* Recall */
- { 0x0f, KEY_0 },
- { 0x10, KEY_MUTE }, /* Mute */
- { 0x02, KEY_RADIO }, /* TV/Radio */
- { 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
-
- { 0x14, KEY_VOLUMEUP }, /* VOL+ */
- { 0x17, KEY_VOLUMEDOWN }, /* VOL- */
- { 0x12, KEY_CHANNELUP }, /* CH+ */
- { 0x13, KEY_CHANNELDOWN }, /* CH- */
- { 0x1d, KEY_ENTER }, /* Enter */
-
- { 0x1a, KEY_MODE }, /* PIP */
- { 0x18, KEY_TUNER }, /* Source */
-
- { 0x1e, KEY_RECORD }, /* Record/Pause */
- { 0x15, KEY_ANGLE }, /* Swap (no label on key) */
- { 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
- { 0x19, KEY_BACK }, /* Rewind << */
- { 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
- { 0x1f, KEY_FORWARD }, /* Forward >> */
- { 0x16, KEY_PREVIOUS }, /* Back |<< */
- { 0x11, KEY_STOP }, /* Stop */
- { 0x0e, KEY_NEXT }, /* End >>| */
-};
-
-struct ir_scancode_table ir_codes_flydvb_table = {
- .scan = ir_codes_flydvb,
- .size = ARRAY_SIZE(ir_codes_flydvb),
-};
-EXPORT_SYMBOL_GPL(ir_codes_flydvb_table);
-
-static struct ir_scancode ir_codes_cinergy[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_POWER },
- { 0x0b, KEY_PROG1 }, /* app */
- { 0x0c, KEY_ZOOM }, /* zoom/fullscreen */
- { 0x0d, KEY_CHANNELUP }, /* channel */
- { 0x0e, KEY_CHANNELDOWN }, /* channel- */
- { 0x0f, KEY_VOLUMEUP },
- { 0x10, KEY_VOLUMEDOWN },
- { 0x11, KEY_TUNER }, /* AV */
- { 0x12, KEY_NUMLOCK }, /* -/-- */
- { 0x13, KEY_AUDIO }, /* audio */
- { 0x14, KEY_MUTE },
- { 0x15, KEY_UP },
- { 0x16, KEY_DOWN },
- { 0x17, KEY_LEFT },
- { 0x18, KEY_RIGHT },
- { 0x19, BTN_LEFT, },
- { 0x1a, BTN_RIGHT, },
- { 0x1b, KEY_WWW }, /* text */
- { 0x1c, KEY_REWIND },
- { 0x1d, KEY_FORWARD },
- { 0x1e, KEY_RECORD },
- { 0x1f, KEY_PLAY },
- { 0x20, KEY_PREVIOUSSONG },
- { 0x21, KEY_NEXTSONG },
- { 0x22, KEY_PAUSE },
- { 0x23, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_cinergy_table = {
- .scan = ir_codes_cinergy,
- .size = ARRAY_SIZE(ir_codes_cinergy),
-};
-EXPORT_SYMBOL_GPL(ir_codes_cinergy_table);
-
-/* Alfons Geser <a.geser@cox.net>
- * updates from Job D. R. Borges <jobdrb@ig.com.br> */
-static struct ir_scancode ir_codes_eztv[] = {
- { 0x12, KEY_POWER },
- { 0x01, KEY_TV }, /* DVR */
- { 0x15, KEY_DVD }, /* DVD */
- { 0x17, KEY_AUDIO }, /* music */
- /* DVR mode / DVD mode / music mode */
-
- { 0x1b, KEY_MUTE }, /* mute */
- { 0x02, KEY_LANGUAGE }, /* MTS/SAP / audio / autoseek */
- { 0x1e, KEY_SUBTITLE }, /* closed captioning / subtitle / seek */
- { 0x16, KEY_ZOOM }, /* full screen */
- { 0x1c, KEY_VIDEO }, /* video source / eject / delall */
- { 0x1d, KEY_RESTART }, /* playback / angle / del */
- { 0x2f, KEY_SEARCH }, /* scan / menu / playlist */
- { 0x30, KEY_CHANNEL }, /* CH surfing / bookmark / memo */
-
- { 0x31, KEY_HELP }, /* help */
- { 0x32, KEY_MODE }, /* num/memo */
- { 0x33, KEY_ESC }, /* cancel */
-
- { 0x0c, KEY_UP }, /* up */
- { 0x10, KEY_DOWN }, /* down */
- { 0x08, KEY_LEFT }, /* left */
- { 0x04, KEY_RIGHT }, /* right */
- { 0x03, KEY_SELECT }, /* select */
-
- { 0x1f, KEY_REWIND }, /* rewind */
- { 0x20, KEY_PLAYPAUSE },/* play/pause */
- { 0x29, KEY_FORWARD }, /* forward */
- { 0x14, KEY_AGAIN }, /* repeat */
- { 0x2b, KEY_RECORD }, /* recording */
- { 0x2c, KEY_STOP }, /* stop */
- { 0x2d, KEY_PLAY }, /* play */
- { 0x2e, KEY_CAMERA }, /* snapshot / shuffle */
-
- { 0x00, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
-
- { 0x2a, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x18, KEY_CHANNELUP },/* CH.tracking up */
- { 0x19, KEY_CHANNELDOWN },/* CH.tracking down */
-
- { 0x13, KEY_ENTER }, /* enter */
- { 0x21, KEY_DOT }, /* . (decimal dot) */
-};
-
-struct ir_scancode_table ir_codes_eztv_table = {
- .scan = ir_codes_eztv,
- .size = ARRAY_SIZE(ir_codes_eztv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_eztv_table);
-
-/* Alex Hermann <gaaf@gmx.net> */
-static struct ir_scancode ir_codes_avermedia[] = {
- { 0x28, KEY_1 },
- { 0x18, KEY_2 },
- { 0x38, KEY_3 },
- { 0x24, KEY_4 },
- { 0x14, KEY_5 },
- { 0x34, KEY_6 },
- { 0x2c, KEY_7 },
- { 0x1c, KEY_8 },
- { 0x3c, KEY_9 },
- { 0x22, KEY_0 },
-
- { 0x20, KEY_TV }, /* TV/FM */
- { 0x10, KEY_CD }, /* CD */
- { 0x30, KEY_TEXT }, /* TELETEXT */
- { 0x00, KEY_POWER }, /* POWER */
-
- { 0x08, KEY_VIDEO }, /* VIDEO */
- { 0x04, KEY_AUDIO }, /* AUDIO */
- { 0x0c, KEY_ZOOM }, /* FULL SCREEN */
-
- { 0x12, KEY_SUBTITLE }, /* DISPLAY */
- { 0x32, KEY_REWIND }, /* LOOP */
- { 0x02, KEY_PRINT }, /* PREVIEW */
-
- { 0x2a, KEY_SEARCH }, /* AUTOSCAN */
- { 0x1a, KEY_SLEEP }, /* FREEZE */
- { 0x3a, KEY_CAMERA }, /* SNAPSHOT */
- { 0x0a, KEY_MUTE }, /* MUTE */
-
- { 0x26, KEY_RECORD }, /* RECORD */
- { 0x16, KEY_PAUSE }, /* PAUSE */
- { 0x36, KEY_STOP }, /* STOP */
- { 0x06, KEY_PLAY }, /* PLAY */
-
- { 0x2e, KEY_RED }, /* RED */
- { 0x21, KEY_GREEN }, /* GREEN */
- { 0x0e, KEY_YELLOW }, /* YELLOW */
- { 0x01, KEY_BLUE }, /* BLUE */
-
- { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */
- { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */
- { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */
- { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */
-};
-
-struct ir_scancode_table ir_codes_avermedia_table = {
- .scan = ir_codes_avermedia,
- .size = ARRAY_SIZE(ir_codes_avermedia),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_table);
-
-static struct ir_scancode ir_codes_videomate_tv_pvr[] = {
- { 0x14, KEY_MUTE },
- { 0x24, KEY_ZOOM },
-
- { 0x01, KEY_DVD },
- { 0x23, KEY_RADIO },
- { 0x00, KEY_TV },
-
- { 0x0a, KEY_REWIND },
- { 0x08, KEY_PLAYPAUSE },
- { 0x0f, KEY_FORWARD },
-
- { 0x02, KEY_PREVIOUS },
- { 0x07, KEY_STOP },
- { 0x06, KEY_NEXT },
-
- { 0x0c, KEY_UP },
- { 0x0e, KEY_DOWN },
- { 0x0b, KEY_LEFT },
- { 0x0d, KEY_RIGHT },
- { 0x11, KEY_OK },
-
- { 0x03, KEY_MENU },
- { 0x09, KEY_SETUP },
- { 0x05, KEY_VIDEO },
- { 0x22, KEY_CHANNEL },
-
- { 0x12, KEY_VOLUMEUP },
- { 0x15, KEY_VOLUMEDOWN },
- { 0x10, KEY_CHANNELUP },
- { 0x13, KEY_CHANNELDOWN },
-
- { 0x04, KEY_RECORD },
-
- { 0x16, KEY_1 },
- { 0x17, KEY_2 },
- { 0x18, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1a, KEY_5 },
- { 0x1b, KEY_6 },
- { 0x1c, KEY_7 },
- { 0x1d, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x1f, KEY_0 },
-
- { 0x20, KEY_LANGUAGE },
- { 0x21, KEY_SLEEP },
-};
-
-struct ir_scancode_table ir_codes_videomate_tv_pvr_table = {
- .scan = ir_codes_videomate_tv_pvr,
- .size = ARRAY_SIZE(ir_codes_videomate_tv_pvr),
-};
-EXPORT_SYMBOL_GPL(ir_codes_videomate_tv_pvr_table);
-
-/* Michael Tokarev <mjt@tls.msk.ru>
- http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
- keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
- least, and probably other cards too.
- The "ascii-art picture" below (in comments, first row
- is the keycode in hex, and subsequent row(s) shows
- the button labels (several variants when appropriate)
- helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_manli[] = {
-
- /* 0x1c 0x12 *
- * FUNCTION POWER *
- * FM (|) *
- * */
- { 0x1c, KEY_RADIO }, /*XXX*/
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 *
- * 1 2 3 *
- * *
- * 0x04 0x05 0x06 *
- * 4 5 6 *
- * *
- * 0x07 0x08 0x09 *
- * 7 8 9 *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- /* 0x0a 0x00 0x17 *
- * RECALL 0 +100 *
- * PLUS *
- * */
- { 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */
- { 0x00, KEY_0 },
- { 0x17, KEY_DIGITS }, /*XXX*/
-
- /* 0x14 0x10 *
- * MENU INFO *
- * OSD */
- { 0x14, KEY_MENU },
- { 0x10, KEY_INFO },
-
- /* 0x0b *
- * Up *
- * *
- * 0x18 0x16 0x0c *
- * Left Ok Right *
- * *
- * 0x015 *
- * Down *
- * */
- { 0x0b, KEY_UP },
- { 0x18, KEY_LEFT },
- { 0x16, KEY_OK }, /*XXX KEY_SELECT? KEY_ENTER? */
- { 0x0c, KEY_RIGHT },
- { 0x15, KEY_DOWN },
-
- /* 0x11 0x0d *
- * TV/AV MODE *
- * SOURCE STEREO *
- * */
- { 0x11, KEY_TV }, /*XXX*/
- { 0x0d, KEY_MODE }, /*XXX there's no KEY_STEREO */
-
- /* 0x0f 0x1b 0x1a *
- * AUDIO Vol+ Chan+ *
- * TIMESHIFT??? *
- * *
- * 0x0e 0x1f 0x1e *
- * SLEEP Vol- Chan- *
- * */
- { 0x0f, KEY_AUDIO },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1a, KEY_CHANNELUP },
- { 0x0e, KEY_TIME },
- { 0x1f, KEY_VOLUMEDOWN },
- { 0x1e, KEY_CHANNELDOWN },
-
- /* 0x13 0x19 *
- * MUTE SNAPSHOT*
- * */
- { 0x13, KEY_MUTE },
- { 0x19, KEY_CAMERA },
-
- /* 0x1d unused ? */
-};
-
-struct ir_scancode_table ir_codes_manli_table = {
- .scan = ir_codes_manli,
- .size = ARRAY_SIZE(ir_codes_manli),
-};
-EXPORT_SYMBOL_GPL(ir_codes_manli_table);
-
-/* Mike Baikov <mike@baikov.com> */
-static struct ir_scancode ir_codes_gotview7135[] = {
-
- { 0x11, KEY_POWER },
- { 0x35, KEY_TV },
- { 0x1b, KEY_0 },
- { 0x29, KEY_1 },
- { 0x19, KEY_2 },
- { 0x39, KEY_3 },
- { 0x1f, KEY_4 },
- { 0x2c, KEY_5 },
- { 0x21, KEY_6 },
- { 0x24, KEY_7 },
- { 0x18, KEY_8 },
- { 0x2b, KEY_9 },
- { 0x3b, KEY_AGAIN }, /* LOOP */
- { 0x06, KEY_AUDIO },
- { 0x31, KEY_PRINT }, /* PREVIEW */
- { 0x3e, KEY_VIDEO },
- { 0x10, KEY_CHANNELUP },
- { 0x20, KEY_CHANNELDOWN },
- { 0x0c, KEY_VOLUMEDOWN },
- { 0x28, KEY_VOLUMEUP },
- { 0x08, KEY_MUTE },
- { 0x26, KEY_SEARCH }, /* SCAN */
- { 0x3f, KEY_CAMERA }, /* SNAPSHOT */
- { 0x12, KEY_RECORD },
- { 0x32, KEY_STOP },
- { 0x3c, KEY_PLAY },
- { 0x1d, KEY_REWIND },
- { 0x2d, KEY_PAUSE },
- { 0x0d, KEY_FORWARD },
- { 0x05, KEY_ZOOM }, /*FULL*/
-
- { 0x2a, KEY_F21 }, /* LIVE TIMESHIFT */
- { 0x0e, KEY_F22 }, /* MIN TIMESHIFT */
- { 0x1e, KEY_TIME }, /* TIMESHIFT */
- { 0x38, KEY_F24 }, /* NORMAL TIMESHIFT */
-};
-
-struct ir_scancode_table ir_codes_gotview7135_table = {
- .scan = ir_codes_gotview7135,
- .size = ARRAY_SIZE(ir_codes_gotview7135),
-};
-EXPORT_SYMBOL_GPL(ir_codes_gotview7135_table);
-
-static struct ir_scancode ir_codes_purpletv[] = {
- { 0x03, KEY_POWER },
- { 0x6f, KEY_MUTE },
- { 0x10, KEY_BACKSPACE }, /* Recall */
-
- { 0x11, KEY_0 },
- { 0x04, KEY_1 },
- { 0x05, KEY_2 },
- { 0x06, KEY_3 },
- { 0x08, KEY_4 },
- { 0x09, KEY_5 },
- { 0x0a, KEY_6 },
- { 0x0c, KEY_7 },
- { 0x0d, KEY_8 },
- { 0x0e, KEY_9 },
- { 0x12, KEY_DOT }, /* 100+ */
-
- { 0x07, KEY_VOLUMEUP },
- { 0x0b, KEY_VOLUMEDOWN },
- { 0x1a, KEY_KPPLUS },
- { 0x18, KEY_KPMINUS },
- { 0x15, KEY_UP },
- { 0x1d, KEY_DOWN },
- { 0x0f, KEY_CHANNELUP },
- { 0x13, KEY_CHANNELDOWN },
- { 0x48, KEY_ZOOM },
-
- { 0x1b, KEY_VIDEO }, /* Video source */
- { 0x1f, KEY_CAMERA }, /* Snapshot */
- { 0x49, KEY_LANGUAGE }, /* MTS Select */
- { 0x19, KEY_SEARCH }, /* Auto Scan */
-
- { 0x4b, KEY_RECORD },
- { 0x46, KEY_PLAY },
- { 0x45, KEY_PAUSE }, /* Pause */
- { 0x44, KEY_STOP },
- { 0x43, KEY_TIME }, /* Time Shift */
- { 0x17, KEY_CHANNEL }, /* SURF CH */
- { 0x40, KEY_FORWARD }, /* Forward ? */
- { 0x42, KEY_REWIND }, /* Backward ? */
-
-};
-
-struct ir_scancode_table ir_codes_purpletv_table = {
- .scan = ir_codes_purpletv,
- .size = ARRAY_SIZE(ir_codes_purpletv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_purpletv_table);
-
-/* Mapping for the 28 key remote control as seen at
- http://www.sednacomputer.com/photo/cardbus-tv.jpg
- Pavel Mihaylov <bin@bash.info>
- Also for the remote bundled with Kozumi KTV-01C card */
-static struct ir_scancode ir_codes_pctv_sedna[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_AGAIN }, /* Recall */
- { 0x0b, KEY_CHANNELUP },
- { 0x0c, KEY_VOLUMEUP },
- { 0x0d, KEY_MODE }, /* Stereo */
- { 0x0e, KEY_STOP },
- { 0x0f, KEY_PREVIOUSSONG },
- { 0x10, KEY_ZOOM },
- { 0x11, KEY_TUNER }, /* Source */
- { 0x12, KEY_POWER },
- { 0x13, KEY_MUTE },
- { 0x15, KEY_CHANNELDOWN },
- { 0x18, KEY_VOLUMEDOWN },
- { 0x19, KEY_CAMERA }, /* Snapshot */
- { 0x1a, KEY_NEXTSONG },
- { 0x1b, KEY_TIME }, /* Time Shift */
- { 0x1c, KEY_RADIO }, /* FM Radio */
- { 0x1d, KEY_RECORD },
- { 0x1e, KEY_PAUSE },
- /* additional codes for Kozumi's remote */
- { 0x14, KEY_INFO }, /* OSD */
- { 0x16, KEY_OK }, /* OK */
- { 0x17, KEY_DIGITS }, /* Plus */
- { 0x1f, KEY_PLAY }, /* Play */
-};
-
-struct ir_scancode_table ir_codes_pctv_sedna_table = {
- .scan = ir_codes_pctv_sedna,
- .size = ARRAY_SIZE(ir_codes_pctv_sedna),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pctv_sedna_table);
-
-/* Mark Phalan <phalanm@o2.ie> */
-static struct ir_scancode ir_codes_pv951[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x12, KEY_POWER },
- { 0x10, KEY_MUTE },
- { 0x1f, KEY_VOLUMEDOWN },
- { 0x1b, KEY_VOLUMEUP },
- { 0x1a, KEY_CHANNELUP },
- { 0x1e, KEY_CHANNELDOWN },
- { 0x0e, KEY_PAGEUP },
- { 0x1d, KEY_PAGEDOWN },
- { 0x13, KEY_SOUND },
-
- { 0x18, KEY_KPPLUSMINUS }, /* CH +/- */
- { 0x16, KEY_SUBTITLE }, /* CC */
- { 0x0d, KEY_TEXT }, /* TTX */
- { 0x0b, KEY_TV }, /* AIR/CBL */
- { 0x11, KEY_PC }, /* PC/TV */
- { 0x17, KEY_OK }, /* CH RTN */
- { 0x19, KEY_MODE }, /* FUNC */
- { 0x0c, KEY_SEARCH }, /* AUTOSCAN */
-
- /* Not sure what to do with these ones! */
- { 0x0f, KEY_SELECT }, /* SOURCE */
- { 0x0a, KEY_KPPLUS }, /* +100 */
- { 0x14, KEY_EQUAL }, /* SYNC */
- { 0x1c, KEY_MEDIA }, /* PC/TV */
-};
-
-struct ir_scancode_table ir_codes_pv951_table = {
- .scan = ir_codes_pv951,
- .size = ARRAY_SIZE(ir_codes_pv951),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pv951_table);
-
-/* generic RC5 keytable */
-/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
-/* used by old (black) Hauppauge remotes */
-static struct ir_scancode ir_codes_rc5_tv[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0b, KEY_CHANNEL }, /* channel / program (japan: 11) */
- { 0x0c, KEY_POWER }, /* standby */
- { 0x0d, KEY_MUTE }, /* mute / demute */
- { 0x0f, KEY_TV }, /* display */
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x12, KEY_BRIGHTNESSUP },
- { 0x13, KEY_BRIGHTNESSDOWN },
- { 0x1e, KEY_SEARCH }, /* search + */
- { 0x20, KEY_CHANNELUP }, /* channel / program + */
- { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x22, KEY_CHANNEL }, /* alt / channel */
- { 0x23, KEY_LANGUAGE }, /* 1st / 2nd language */
- { 0x26, KEY_SLEEP }, /* sleeptimer */
- { 0x2e, KEY_MENU }, /* 2nd controls (USA: menu) */
- { 0x30, KEY_PAUSE },
- { 0x32, KEY_REWIND },
- { 0x33, KEY_GOTO },
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD }, /* recording */
- { 0x3c, KEY_TEXT }, /* teletext submode (Japan: 12) */
- { 0x3d, KEY_SUSPEND }, /* system standby */
-
-};
-
-struct ir_scancode_table ir_codes_rc5_tv_table = {
- .scan = ir_codes_rc5_tv,
- .size = ARRAY_SIZE(ir_codes_rc5_tv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_rc5_tv_table);
-
-/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
-static struct ir_scancode ir_codes_winfast[] = {
- /* Keys 0 to 9 */
- { 0x12, KEY_0 },
- { 0x05, KEY_1 },
- { 0x06, KEY_2 },
- { 0x07, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0a, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x0d, KEY_7 },
- { 0x0e, KEY_8 },
- { 0x0f, KEY_9 },
-
- { 0x00, KEY_POWER },
- { 0x1b, KEY_AUDIO }, /* Audio Source */
- { 0x02, KEY_TUNER }, /* TV/FM, not on Y0400052 */
- { 0x1e, KEY_VIDEO }, /* Video Source */
- { 0x16, KEY_INFO }, /* Display information */
- { 0x04, KEY_VOLUMEUP },
- { 0x08, KEY_VOLUMEDOWN },
- { 0x0c, KEY_CHANNELUP },
- { 0x10, KEY_CHANNELDOWN },
- { 0x03, KEY_ZOOM }, /* fullscreen */
- { 0x1f, KEY_TEXT }, /* closed caption/teletext */
- { 0x20, KEY_SLEEP },
- { 0x29, KEY_CLEAR }, /* boss key */
- { 0x14, KEY_MUTE },
- { 0x2b, KEY_RED },
- { 0x2c, KEY_GREEN },
- { 0x2d, KEY_YELLOW },
- { 0x2e, KEY_BLUE },
- { 0x18, KEY_KPPLUS }, /* fine tune + , not on Y040052 */
- { 0x19, KEY_KPMINUS }, /* fine tune - , not on Y040052 */
- { 0x2a, KEY_MEDIA }, /* PIP (Picture in picture */
- { 0x21, KEY_DOT },
- { 0x13, KEY_ENTER },
- { 0x11, KEY_LAST }, /* Recall (last channel */
- { 0x22, KEY_PREVIOUS },
- { 0x23, KEY_PLAYPAUSE },
- { 0x24, KEY_NEXT },
- { 0x25, KEY_TIME }, /* Time Shifting */
- { 0x26, KEY_STOP },
- { 0x27, KEY_RECORD },
- { 0x28, KEY_SAVE }, /* Screenshot */
- { 0x2f, KEY_MENU },
- { 0x30, KEY_CANCEL },
- { 0x31, KEY_CHANNEL }, /* Channel Surf */
- { 0x32, KEY_SUBTITLE },
- { 0x33, KEY_LANGUAGE },
- { 0x34, KEY_REWIND },
- { 0x35, KEY_FASTFORWARD },
- { 0x36, KEY_TV },
- { 0x37, KEY_RADIO }, /* FM */
- { 0x38, KEY_DVD },
-
- { 0x1a, KEY_MODE}, /* change to MCE mode on Y04G0051 */
- { 0x3e, KEY_F21 }, /* MCE +VOL, on Y04G0033 */
- { 0x3a, KEY_F22 }, /* MCE -VOL, on Y04G0033 */
- { 0x3b, KEY_F23 }, /* MCE +CH, on Y04G0033 */
- { 0x3f, KEY_F24 } /* MCE -CH, on Y04G0033 */
-};
-
-struct ir_scancode_table ir_codes_winfast_table = {
- .scan = ir_codes_winfast,
- .size = ARRAY_SIZE(ir_codes_winfast),
-};
-EXPORT_SYMBOL_GPL(ir_codes_winfast_table);
-
-static struct ir_scancode ir_codes_pinnacle_color[] = {
- { 0x59, KEY_MUTE },
- { 0x4a, KEY_POWER },
-
- { 0x18, KEY_TEXT },
- { 0x26, KEY_TV },
- { 0x3d, KEY_PRINT },
-
- { 0x48, KEY_RED },
- { 0x04, KEY_GREEN },
- { 0x11, KEY_YELLOW },
- { 0x00, KEY_BLUE },
-
- { 0x2d, KEY_VOLUMEUP },
- { 0x1e, KEY_VOLUMEDOWN },
-
- { 0x49, KEY_MENU },
-
- { 0x16, KEY_CHANNELUP },
- { 0x17, KEY_CHANNELDOWN },
-
- { 0x20, KEY_UP },
- { 0x21, KEY_DOWN },
- { 0x22, KEY_LEFT },
- { 0x23, KEY_RIGHT },
- { 0x0d, KEY_SELECT },
-
- { 0x08, KEY_BACK },
- { 0x07, KEY_REFRESH },
-
- { 0x2f, KEY_ZOOM },
- { 0x29, KEY_RECORD },
-
- { 0x4b, KEY_PAUSE },
- { 0x4d, KEY_REWIND },
- { 0x2e, KEY_PLAY },
- { 0x4e, KEY_FORWARD },
- { 0x53, KEY_PREVIOUS },
- { 0x4c, KEY_STOP },
- { 0x54, KEY_NEXT },
-
- { 0x69, KEY_0 },
- { 0x6a, KEY_1 },
- { 0x6b, KEY_2 },
- { 0x6c, KEY_3 },
- { 0x6d, KEY_4 },
- { 0x6e, KEY_5 },
- { 0x6f, KEY_6 },
- { 0x70, KEY_7 },
- { 0x71, KEY_8 },
- { 0x72, KEY_9 },
-
- { 0x74, KEY_CHANNEL },
- { 0x0a, KEY_BACKSPACE },
-};
-
-struct ir_scancode_table ir_codes_pinnacle_color_table = {
- .scan = ir_codes_pinnacle_color,
- .size = ARRAY_SIZE(ir_codes_pinnacle_color),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_color_table);
-
-/* Hauppauge: the newer, gray remotes (seems there are multiple
- * slightly different versions), shipped with cx88+ivtv cards.
- * almost rc5 coding, but some non-standard keys */
-static struct ir_scancode ir_codes_hauppauge_new[] = {
- /* Keys 0 to 9 */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- { 0x0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x0b, KEY_RED }, /* red button */
- { 0x0c, KEY_RADIO },
- { 0x0d, KEY_MENU },
- { 0x0e, KEY_SUBTITLE }, /* also the # key */
- { 0x0f, KEY_MUTE },
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x12, KEY_PREVIOUS }, /* previous channel */
- { 0x14, KEY_UP },
- { 0x15, KEY_DOWN },
- { 0x16, KEY_LEFT },
- { 0x17, KEY_RIGHT },
- { 0x18, KEY_VIDEO }, /* Videos */
- { 0x19, KEY_AUDIO }, /* Music */
- /* 0x1a: Pictures - presume this means
- "Multimedia Home Platform" -
- no "PICTURES" key in input.h
- */
- { 0x1a, KEY_MHP },
-
- { 0x1b, KEY_EPG }, /* Guide */
- { 0x1c, KEY_TV },
- { 0x1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1f, KEY_EXIT }, /* back/exit */
- { 0x20, KEY_CHANNELUP }, /* channel / program + */
- { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x22, KEY_CHANNEL }, /* source (old black remote) */
- { 0x24, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x25, KEY_ENTER }, /* OK */
- { 0x26, KEY_SLEEP }, /* minimize (old black remote) */
- { 0x29, KEY_BLUE }, /* blue key */
- { 0x2e, KEY_GREEN }, /* green button */
- { 0x30, KEY_PAUSE }, /* pause */
- { 0x32, KEY_REWIND }, /* backward << */
- { 0x34, KEY_FASTFORWARD }, /* forward >> */
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD }, /* recording */
- { 0x38, KEY_YELLOW }, /* yellow key */
- { 0x3b, KEY_SELECT }, /* top right button */
- { 0x3c, KEY_ZOOM }, /* full */
- { 0x3d, KEY_POWER }, /* system power (green button) */
-};
-
-struct ir_scancode_table ir_codes_hauppauge_new_table = {
- .scan = ir_codes_hauppauge_new,
- .size = ARRAY_SIZE(ir_codes_hauppauge_new),
-};
-EXPORT_SYMBOL_GPL(ir_codes_hauppauge_new_table);
-
-static struct ir_scancode ir_codes_npgtech[] = {
- { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
- { 0x2a, KEY_FRONT },
-
- { 0x3e, KEY_1 },
- { 0x02, KEY_2 },
- { 0x06, KEY_3 },
- { 0x0a, KEY_4 },
- { 0x0e, KEY_5 },
- { 0x12, KEY_6 },
- { 0x16, KEY_7 },
- { 0x1a, KEY_8 },
- { 0x1e, KEY_9 },
- { 0x3a, KEY_0 },
- { 0x22, KEY_NUMLOCK }, /* -/-- */
- { 0x20, KEY_REFRESH },
-
- { 0x03, KEY_BRIGHTNESSDOWN },
- { 0x28, KEY_AUDIO },
- { 0x3c, KEY_CHANNELUP },
- { 0x3f, KEY_VOLUMEDOWN },
- { 0x2e, KEY_MUTE },
- { 0x3b, KEY_VOLUMEUP },
- { 0x00, KEY_CHANNELDOWN },
- { 0x07, KEY_BRIGHTNESSUP },
- { 0x2c, KEY_TEXT },
-
- { 0x37, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x13, KEY_PAUSE },
- { 0x26, KEY_STOP },
- { 0x18, KEY_FASTFORWARD },
- { 0x14, KEY_REWIND },
- { 0x33, KEY_ZOOM },
- { 0x32, KEY_KEYBOARD },
- { 0x30, KEY_GOTO }, /* Pointing arrow */
- { 0x36, KEY_MACRO }, /* Maximize/Minimize (yellow) */
- { 0x0b, KEY_RADIO },
- { 0x10, KEY_POWER },
-
-};
-
-struct ir_scancode_table ir_codes_npgtech_table = {
- .scan = ir_codes_npgtech,
- .size = ARRAY_SIZE(ir_codes_npgtech),
-};
-EXPORT_SYMBOL_GPL(ir_codes_npgtech_table);
-
-/* Norwood Micro (non-Pro) TV Tuner
- By Peter Naulls <peter@chocky.org>
- Key comments are the functions given in the manual */
-static struct ir_scancode ir_codes_norwood[] = {
- /* Keys 0 to 9 */
- { 0x20, KEY_0 },
- { 0x21, KEY_1 },
- { 0x22, KEY_2 },
- { 0x23, KEY_3 },
- { 0x24, KEY_4 },
- { 0x25, KEY_5 },
- { 0x26, KEY_6 },
- { 0x27, KEY_7 },
- { 0x28, KEY_8 },
- { 0x29, KEY_9 },
-
- { 0x78, KEY_TUNER }, /* Video Source */
- { 0x2c, KEY_EXIT }, /* Open/Close software */
- { 0x2a, KEY_SELECT }, /* 2 Digit Select */
- { 0x69, KEY_AGAIN }, /* Recall */
-
- { 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */
- { 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */
- { 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */
- { 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */
-
- { 0x2d, KEY_MUTE }, /* Mute */
- { 0x30, KEY_VOLUMEUP }, /* Volume up */
- { 0x31, KEY_VOLUMEDOWN }, /* Volume down */
- { 0x60, KEY_CHANNELUP }, /* Channel up */
- { 0x61, KEY_CHANNELDOWN }, /* Channel down */
-
- { 0x3f, KEY_RECORD }, /* Record */
- { 0x37, KEY_PLAY }, /* Play */
- { 0x36, KEY_PAUSE }, /* Pause */
- { 0x2b, KEY_STOP }, /* Stop */
- { 0x67, KEY_FASTFORWARD }, /* Foward */
- { 0x66, KEY_REWIND }, /* Rewind */
- { 0x3e, KEY_SEARCH }, /* Auto Scan */
- { 0x2e, KEY_CAMERA }, /* Capture Video */
- { 0x6d, KEY_MENU }, /* Show/Hide Control */
- { 0x2f, KEY_ZOOM }, /* Full Screen */
- { 0x34, KEY_RADIO }, /* FM */
- { 0x65, KEY_POWER }, /* Computer power */
-};
-
-struct ir_scancode_table ir_codes_norwood_table = {
- .scan = ir_codes_norwood,
- .size = ARRAY_SIZE(ir_codes_norwood),
-};
-EXPORT_SYMBOL_GPL(ir_codes_norwood_table);
-
-/* From reading the following remotes:
- * Zenith Universal 7 / TV Mode 807 / VCR Mode 837
- * Hauppauge (from NOVA-CI-s box product)
- * This is a "middle of the road" approach, differences are noted
- */
-static struct ir_scancode ir_codes_budget_ci_old[] = {
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_ENTER },
- { 0x0b, KEY_RED },
- { 0x0c, KEY_POWER }, /* RADIO on Hauppauge */
- { 0x0d, KEY_MUTE },
- { 0x0f, KEY_A }, /* TV on Hauppauge */
- { 0x10, KEY_VOLUMEUP },
- { 0x11, KEY_VOLUMEDOWN },
- { 0x14, KEY_B },
- { 0x1c, KEY_UP },
- { 0x1d, KEY_DOWN },
- { 0x1e, KEY_OPTION }, /* RESERVED on Hauppauge */
- { 0x1f, KEY_BREAK },
- { 0x20, KEY_CHANNELUP },
- { 0x21, KEY_CHANNELDOWN },
- { 0x22, KEY_PREVIOUS }, /* Prev Ch on Zenith, SOURCE on Hauppauge */
- { 0x24, KEY_RESTART },
- { 0x25, KEY_OK },
- { 0x26, KEY_CYCLEWINDOWS }, /* MINIMIZE on Hauppauge */
- { 0x28, KEY_ENTER }, /* VCR mode on Zenith */
- { 0x29, KEY_PAUSE },
- { 0x2b, KEY_RIGHT },
- { 0x2c, KEY_LEFT },
- { 0x2e, KEY_MENU }, /* FULL SCREEN on Hauppauge */
- { 0x30, KEY_SLOW },
- { 0x31, KEY_PREVIOUS }, /* VCR mode on Zenith */
- { 0x32, KEY_REWIND },
- { 0x34, KEY_FASTFORWARD },
- { 0x35, KEY_PLAY },
- { 0x36, KEY_STOP },
- { 0x37, KEY_RECORD },
- { 0x38, KEY_TUNER }, /* TV/VCR on Zenith */
- { 0x3a, KEY_C },
- { 0x3c, KEY_EXIT },
- { 0x3d, KEY_POWER2 },
- { 0x3e, KEY_TUNER },
-};
-
-struct ir_scancode_table ir_codes_budget_ci_old_table = {
- .scan = ir_codes_budget_ci_old,
- .size = ARRAY_SIZE(ir_codes_budget_ci_old),
-};
-EXPORT_SYMBOL_GPL(ir_codes_budget_ci_old_table);
-
-/*
- * Marc Fargas <telenieko@telenieko.com>
- * this is the remote control that comes with the asus p7131
- * which has a label saying is "Model PC-39"
- */
-static struct ir_scancode ir_codes_asus_pc39[] = {
- /* Keys 0 to 9 */
- { 0x15, KEY_0 },
- { 0x29, KEY_1 },
- { 0x2d, KEY_2 },
- { 0x2b, KEY_3 },
- { 0x09, KEY_4 },
- { 0x0d, KEY_5 },
- { 0x0b, KEY_6 },
- { 0x31, KEY_7 },
- { 0x35, KEY_8 },
- { 0x33, KEY_9 },
-
- { 0x3e, KEY_RADIO }, /* radio */
- { 0x03, KEY_MENU }, /* dvd/menu */
- { 0x2a, KEY_VOLUMEUP },
- { 0x19, KEY_VOLUMEDOWN },
- { 0x37, KEY_UP },
- { 0x3b, KEY_DOWN },
- { 0x27, KEY_LEFT },
- { 0x2f, KEY_RIGHT },
- { 0x25, KEY_VIDEO }, /* video */
- { 0x39, KEY_AUDIO }, /* music */
-
- { 0x21, KEY_TV }, /* tv */
- { 0x1d, KEY_EXIT }, /* back */
- { 0x0a, KEY_CHANNELUP }, /* channel / program + */
- { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1a, KEY_ENTER }, /* enter */
-
- { 0x06, KEY_PAUSE }, /* play/pause */
- { 0x1e, KEY_PREVIOUS }, /* rew */
- { 0x26, KEY_NEXT }, /* forward */
- { 0x0e, KEY_REWIND }, /* backward << */
- { 0x3a, KEY_FASTFORWARD }, /* forward >> */
- { 0x36, KEY_STOP },
- { 0x2e, KEY_RECORD }, /* recording */
- { 0x16, KEY_POWER }, /* the button that reads "close" */
-
- { 0x11, KEY_ZOOM }, /* full screen */
- { 0x13, KEY_MACRO }, /* recall */
- { 0x23, KEY_HOME }, /* home */
- { 0x05, KEY_PVR }, /* picture */
- { 0x3d, KEY_MUTE }, /* mute */
- { 0x01, KEY_DVD }, /* dvd */
-};
-
-struct ir_scancode_table ir_codes_asus_pc39_table = {
- .scan = ir_codes_asus_pc39,
- .size = ARRAY_SIZE(ir_codes_asus_pc39),
-};
-EXPORT_SYMBOL_GPL(ir_codes_asus_pc39_table);
-
-
-/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
- Juan Pablo Sormani <sorman@gmail.com> */
-static struct ir_scancode ir_codes_encore_enltv[] = {
-
- /* Power button does nothing, neither in Windows app,
- although it sends data (used for BIOS wakeup?) */
- { 0x0d, KEY_MUTE },
-
- { 0x1e, KEY_TV },
- { 0x00, KEY_VIDEO },
- { 0x01, KEY_AUDIO }, /* music */
- { 0x02, KEY_MHP }, /* picture */
-
- { 0x1f, KEY_1 },
- { 0x03, KEY_2 },
- { 0x04, KEY_3 },
- { 0x05, KEY_4 },
- { 0x1c, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x1d, KEY_9 },
- { 0x0a, KEY_0 },
-
- { 0x09, KEY_LIST }, /* -/-- */
- { 0x0b, KEY_LAST }, /* recall */
-
- { 0x14, KEY_HOME }, /* win start menu */
- { 0x15, KEY_EXIT }, /* exit */
- { 0x16, KEY_CHANNELUP }, /* UP */
- { 0x12, KEY_CHANNELDOWN }, /* DOWN */
- { 0x0c, KEY_VOLUMEUP }, /* RIGHT */
- { 0x17, KEY_VOLUMEDOWN }, /* LEFT */
-
- { 0x18, KEY_ENTER }, /* OK */
-
- { 0x0e, KEY_ESC },
- { 0x13, KEY_CYCLEWINDOWS }, /* desktop */
- { 0x11, KEY_TAB },
- { 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
-
- { 0x1a, KEY_MENU },
- { 0x1b, KEY_ZOOM }, /* fullscreen */
- { 0x44, KEY_TIME }, /* time shift */
- { 0x40, KEY_MODE }, /* source */
-
- { 0x5a, KEY_RECORD },
- { 0x42, KEY_PLAY }, /* play/pause */
- { 0x45, KEY_STOP },
- { 0x43, KEY_CAMERA }, /* camera icon */
-
- { 0x48, KEY_REWIND },
- { 0x4a, KEY_FASTFORWARD },
- { 0x49, KEY_PREVIOUS },
- { 0x4b, KEY_NEXT },
-
- { 0x4c, KEY_FAVORITES }, /* tv wall */
- { 0x4d, KEY_SOUND }, /* DVD sound */
- { 0x4e, KEY_LANGUAGE }, /* DVD lang */
- { 0x4f, KEY_TEXT }, /* DVD text */
-
- { 0x50, KEY_SLEEP }, /* shutdown */
- { 0x51, KEY_MODE }, /* stereo > main */
- { 0x52, KEY_SELECT }, /* stereo > sap */
- { 0x53, KEY_PROG1 }, /* teletext */
-
-
- { 0x59, KEY_RED }, /* AP1 */
- { 0x41, KEY_GREEN }, /* AP2 */
- { 0x47, KEY_YELLOW }, /* AP3 */
- { 0x57, KEY_BLUE }, /* AP4 */
-};
-
-struct ir_scancode_table ir_codes_encore_enltv_table = {
- .scan = ir_codes_encore_enltv,
- .size = ARRAY_SIZE(ir_codes_encore_enltv),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv_table);
-
-/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
- Mauro Carvalho Chehab <mchehab@infradead.org> */
-static struct ir_scancode ir_codes_encore_enltv2[] = {
- { 0x4c, KEY_POWER2 },
- { 0x4a, KEY_TUNER },
- { 0x40, KEY_1 },
- { 0x60, KEY_2 },
- { 0x50, KEY_3 },
- { 0x70, KEY_4 },
- { 0x48, KEY_5 },
- { 0x68, KEY_6 },
- { 0x58, KEY_7 },
- { 0x78, KEY_8 },
- { 0x44, KEY_9 },
- { 0x54, KEY_0 },
-
- { 0x64, KEY_LAST }, /* +100 */
- { 0x4e, KEY_AGAIN }, /* Recall */
-
- { 0x6c, KEY_SWITCHVIDEOMODE }, /* Video Source */
- { 0x5e, KEY_MENU },
- { 0x56, KEY_SCREEN },
- { 0x7a, KEY_SETUP },
-
- { 0x46, KEY_MUTE },
- { 0x5c, KEY_MODE }, /* Stereo */
- { 0x74, KEY_INFO },
- { 0x7c, KEY_CLEAR },
-
- { 0x55, KEY_UP },
- { 0x49, KEY_DOWN },
- { 0x7e, KEY_LEFT },
- { 0x59, KEY_RIGHT },
- { 0x6a, KEY_ENTER },
-
- { 0x42, KEY_VOLUMEUP },
- { 0x62, KEY_VOLUMEDOWN },
- { 0x52, KEY_CHANNELUP },
- { 0x72, KEY_CHANNELDOWN },
-
- { 0x41, KEY_RECORD },
- { 0x51, KEY_CAMERA }, /* Snapshot */
- { 0x75, KEY_TIME }, /* Timeshift */
- { 0x71, KEY_TV2 }, /* PIP */
-
- { 0x45, KEY_REWIND },
- { 0x6f, KEY_PAUSE },
- { 0x7d, KEY_FORWARD },
- { 0x79, KEY_STOP },
-};
-
-struct ir_scancode_table ir_codes_encore_enltv2_table = {
- .scan = ir_codes_encore_enltv2,
- .size = ARRAY_SIZE(ir_codes_encore_enltv2),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv2_table);
-
-/* for the Technotrend 1500 bundled remotes (grey and black): */
-static struct ir_scancode ir_codes_tt_1500[] = {
- { 0x01, KEY_POWER },
- { 0x02, KEY_SHUFFLE }, /* ? double-arrow key */
- { 0x03, KEY_1 },
- { 0x04, KEY_2 },
- { 0x05, KEY_3 },
- { 0x06, KEY_4 },
- { 0x07, KEY_5 },
- { 0x08, KEY_6 },
- { 0x09, KEY_7 },
- { 0x0a, KEY_8 },
- { 0x0b, KEY_9 },
- { 0x0c, KEY_0 },
- { 0x0d, KEY_UP },
- { 0x0e, KEY_LEFT },
- { 0x0f, KEY_OK },
- { 0x10, KEY_RIGHT },
- { 0x11, KEY_DOWN },
- { 0x12, KEY_INFO },
- { 0x13, KEY_EXIT },
- { 0x14, KEY_RED },
- { 0x15, KEY_GREEN },
- { 0x16, KEY_YELLOW },
- { 0x17, KEY_BLUE },
- { 0x18, KEY_MUTE },
- { 0x19, KEY_TEXT },
- { 0x1a, KEY_MODE }, /* ? TV/Radio */
- { 0x21, KEY_OPTION },
- { 0x22, KEY_EPG },
- { 0x23, KEY_CHANNELUP },
- { 0x24, KEY_CHANNELDOWN },
- { 0x25, KEY_VOLUMEUP },
- { 0x26, KEY_VOLUMEDOWN },
- { 0x27, KEY_SETUP },
- { 0x3a, KEY_RECORD }, /* these keys are only in the black remote */
- { 0x3b, KEY_PLAY },
- { 0x3c, KEY_STOP },
- { 0x3d, KEY_REWIND },
- { 0x3e, KEY_PAUSE },
- { 0x3f, KEY_FORWARD },
-};
-
-struct ir_scancode_table ir_codes_tt_1500_table = {
- .scan = ir_codes_tt_1500,
- .size = ARRAY_SIZE(ir_codes_tt_1500),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tt_1500_table);
-
-/* DViCO FUSION HDTV MCE remote */
-static struct ir_scancode ir_codes_fusionhdtv_mce[] = {
-
- { 0x0b, KEY_1 },
- { 0x17, KEY_2 },
- { 0x1b, KEY_3 },
- { 0x07, KEY_4 },
- { 0x50, KEY_5 },
- { 0x54, KEY_6 },
- { 0x48, KEY_7 },
- { 0x4c, KEY_8 },
- { 0x58, KEY_9 },
- { 0x03, KEY_0 },
-
- { 0x5e, KEY_OK },
- { 0x51, KEY_UP },
- { 0x53, KEY_DOWN },
- { 0x5b, KEY_LEFT },
- { 0x5f, KEY_RIGHT },
-
- { 0x02, KEY_TV }, /* Labeled DTV on remote */
- { 0x0e, KEY_MP3 },
- { 0x1a, KEY_DVD },
- { 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */
- { 0x16, KEY_SETUP },
- { 0x46, KEY_POWER2 }, /* TV On/Off button on remote */
- { 0x0a, KEY_EPG }, /* Labeled Guide on remote */
-
- { 0x49, KEY_BACK },
- { 0x59, KEY_INFO }, /* Labeled MORE on remote */
- { 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */
- { 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */
-
- { 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */
- { 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */
- { 0x42, KEY_ENTER }, /* Labeled START with a green
- MS windows logo on remote */
-
- { 0x15, KEY_VOLUMEUP },
- { 0x05, KEY_VOLUMEDOWN },
- { 0x11, KEY_CHANNELUP },
- { 0x09, KEY_CHANNELDOWN },
-
- { 0x52, KEY_CAMERA },
- { 0x5a, KEY_TUNER },
- { 0x19, KEY_OPEN },
-
- { 0x13, KEY_MODE }, /* 4:3 16:9 select */
- { 0x1f, KEY_ZOOM },
-
- { 0x43, KEY_REWIND },
- { 0x47, KEY_PLAYPAUSE },
- { 0x4f, KEY_FASTFORWARD },
- { 0x57, KEY_MUTE },
- { 0x0d, KEY_STOP },
- { 0x01, KEY_RECORD },
- { 0x4e, KEY_POWER },
-};
-
-struct ir_scancode_table ir_codes_fusionhdtv_mce_table = {
- .scan = ir_codes_fusionhdtv_mce,
- .size = ARRAY_SIZE(ir_codes_fusionhdtv_mce),
-};
-EXPORT_SYMBOL_GPL(ir_codes_fusionhdtv_mce_table);
-
-/* Pinnacle PCTV HD 800i mini remote */
-static struct ir_scancode ir_codes_pinnacle_pctv_hd[] = {
-
- { 0x0f, KEY_1 },
- { 0x15, KEY_2 },
- { 0x10, KEY_3 },
- { 0x18, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x1e, KEY_6 },
- { 0x11, KEY_7 },
- { 0x21, KEY_8 },
- { 0x12, KEY_9 },
- { 0x27, KEY_0 },
-
- { 0x24, KEY_ZOOM },
- { 0x2a, KEY_SUBTITLE },
-
- { 0x00, KEY_MUTE },
- { 0x01, KEY_ENTER }, /* Pinnacle Logo */
- { 0x39, KEY_POWER },
-
- { 0x03, KEY_VOLUMEUP },
- { 0x09, KEY_VOLUMEDOWN },
- { 0x06, KEY_CHANNELUP },
- { 0x0c, KEY_CHANNELDOWN },
-
- { 0x2d, KEY_REWIND },
- { 0x30, KEY_PLAYPAUSE },
- { 0x33, KEY_FASTFORWARD },
- { 0x3c, KEY_STOP },
- { 0x36, KEY_RECORD },
- { 0x3f, KEY_EPG }, /* Labeled "?" */
-};
-
-struct ir_scancode_table ir_codes_pinnacle_pctv_hd_table = {
- .scan = ir_codes_pinnacle_pctv_hd,
- .size = ARRAY_SIZE(ir_codes_pinnacle_pctv_hd),
-};
-EXPORT_SYMBOL_GPL(ir_codes_pinnacle_pctv_hd_table);
-
-/*
- * Igor Kuznetsov <igk72@ya.ru>
- * Andrey J. Melnikov <temnota@kmv.ru>
- *
- * Keytable is used by BeholdTV 60x series, M6 series at
- * least, and probably other cards too.
- * The "ascii-art picture" below (in comments, first row
- * is the keycode in hex, and subsequent row(s) shows
- * the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_behold[] = {
-
- /* 0x1c 0x12 *
- * TV/FM POWER *
- * */
- { 0x1c, KEY_TUNER }, /* XXX KEY_TV / KEY_RADIO */
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 *
- * 1 2 3 *
- * *
- * 0x04 0x05 0x06 *
- * 4 5 6 *
- * *
- * 0x07 0x08 0x09 *
- * 7 8 9 *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
-
- /* 0x0a 0x00 0x17 *
- * RECALL 0 MODE *
- * */
- { 0x0a, KEY_AGAIN },
- { 0x00, KEY_0 },
- { 0x17, KEY_MODE },
-
- /* 0x14 0x10 *
- * ASPECT FULLSCREEN *
- * */
- { 0x14, KEY_SCREEN },
- { 0x10, KEY_ZOOM },
-
- /* 0x0b *
- * Up *
- * *
- * 0x18 0x16 0x0c *
- * Left Ok Right *
- * *
- * 0x015 *
- * Down *
- * */
- { 0x0b, KEY_CHANNELUP },
- { 0x18, KEY_VOLUMEDOWN },
- { 0x16, KEY_OK }, /* XXX KEY_ENTER */
- { 0x0c, KEY_VOLUMEUP },
- { 0x15, KEY_CHANNELDOWN },
-
- /* 0x11 0x0d *
- * MUTE INFO *
- * */
- { 0x11, KEY_MUTE },
- { 0x0d, KEY_INFO },
-
- /* 0x0f 0x1b 0x1a *
- * RECORD PLAY/PAUSE STOP *
- * *
- * 0x0e 0x1f 0x1e *
- *TELETEXT AUDIO SOURCE *
- * RED YELLOW *
- * */
- { 0x0f, KEY_RECORD },
- { 0x1b, KEY_PLAYPAUSE },
- { 0x1a, KEY_STOP },
- { 0x0e, KEY_TEXT },
- { 0x1f, KEY_RED }, /*XXX KEY_AUDIO */
- { 0x1e, KEY_YELLOW }, /*XXX KEY_SOURCE */
-
- /* 0x1d 0x13 0x19 *
- * SLEEP PREVIEW DVB *
- * GREEN BLUE *
- * */
- { 0x1d, KEY_SLEEP },
- { 0x13, KEY_GREEN },
- { 0x19, KEY_BLUE }, /* XXX KEY_SAT */
-
- /* 0x58 0x5c *
- * FREEZE SNAPSHOT *
- * */
- { 0x58, KEY_SLOW },
- { 0x5c, KEY_CAMERA },
-
-};
-
-struct ir_scancode_table ir_codes_behold_table = {
- .scan = ir_codes_behold,
- .size = ARRAY_SIZE(ir_codes_behold),
-};
-EXPORT_SYMBOL_GPL(ir_codes_behold_table);
-
-/* Beholder Intl. Ltd. 2008
- * Dmitry Belimov d.belimov@google.com
- * Keytable is used by BeholdTV Columbus
- * The "ascii-art picture" below (in comments, first row
- * is the keycode in hex, and subsequent row(s) shows
- * the button labels (several variants when appropriate)
- * helps to descide which keycodes to assign to the buttons.
- */
-static struct ir_scancode ir_codes_behold_columbus[] = {
-
- /* 0x13 0x11 0x1C 0x12 *
- * Mute Source TV/FM Power *
- * */
-
- { 0x13, KEY_MUTE },
- { 0x11, KEY_PROPS },
- { 0x1C, KEY_TUNER }, /* KEY_TV/KEY_RADIO */
- { 0x12, KEY_POWER },
-
- /* 0x01 0x02 0x03 0x0D *
- * 1 2 3 Stereo *
- * *
- * 0x04 0x05 0x06 0x19 *
- * 4 5 6 Snapshot *
- * *
- * 0x07 0x08 0x09 0x10 *
- * 7 8 9 Zoom *
- * */
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x0D, KEY_SETUP }, /* Setup key */
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x19, KEY_CAMERA }, /* Snapshot key */
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x10, KEY_ZOOM },
-
- /* 0x0A 0x00 0x0B 0x0C *
- * RECALL 0 ChannelUp VolumeUp *
- * */
- { 0x0A, KEY_AGAIN },
- { 0x00, KEY_0 },
- { 0x0B, KEY_CHANNELUP },
- { 0x0C, KEY_VOLUMEUP },
-
- /* 0x1B 0x1D 0x15 0x18 *
- * Timeshift Record ChannelDown VolumeDown *
- * */
-
- { 0x1B, KEY_TIME },
- { 0x1D, KEY_RECORD },
- { 0x15, KEY_CHANNELDOWN },
- { 0x18, KEY_VOLUMEDOWN },
-
- /* 0x0E 0x1E 0x0F 0x1A *
- * Stop Pause Previouse Next *
- * */
-
- { 0x0E, KEY_STOP },
- { 0x1E, KEY_PAUSE },
- { 0x0F, KEY_PREVIOUS },
- { 0x1A, KEY_NEXT },
-
-};
-
-struct ir_scancode_table ir_codes_behold_columbus_table = {
- .scan = ir_codes_behold_columbus,
- .size = ARRAY_SIZE(ir_codes_behold_columbus),
-};
-EXPORT_SYMBOL_GPL(ir_codes_behold_columbus_table);
-
-/*
- * Remote control for the Genius TVGO A11MCE
- * Adrian Pardini <pardo.bsso@gmail.com>
- */
-static struct ir_scancode ir_codes_genius_tvgo_a11mce[] = {
- /* Keys 0 to 9 */
- { 0x48, KEY_0 },
- { 0x09, KEY_1 },
- { 0x1d, KEY_2 },
- { 0x1f, KEY_3 },
- { 0x19, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x11, KEY_6 },
- { 0x17, KEY_7 },
- { 0x12, KEY_8 },
- { 0x16, KEY_9 },
-
- { 0x54, KEY_RECORD }, /* recording */
- { 0x06, KEY_MUTE }, /* mute */
- { 0x10, KEY_POWER },
- { 0x40, KEY_LAST }, /* recall */
- { 0x4c, KEY_CHANNELUP }, /* channel / program + */
- { 0x00, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x0d, KEY_VOLUMEUP },
- { 0x15, KEY_VOLUMEDOWN },
- { 0x4d, KEY_OK }, /* also labeled as Pause */
- { 0x1c, KEY_ZOOM }, /* full screen and Stop*/
- { 0x02, KEY_MODE }, /* AV Source or Rewind*/
- { 0x04, KEY_LIST }, /* -/-- */
- /* small arrows above numbers */
- { 0x1a, KEY_NEXT }, /* also Fast Forward */
- { 0x0e, KEY_PREVIOUS }, /* also Rewind */
- /* these are in a rather non standard layout and have
- an alternate name written */
- { 0x1e, KEY_UP }, /* Video Setting */
- { 0x0a, KEY_DOWN }, /* Video Default */
- { 0x05, KEY_CAMERA }, /* Snapshot */
- { 0x0c, KEY_RIGHT }, /* Hide Panel */
- /* Four buttons without label */
- { 0x49, KEY_RED },
- { 0x0b, KEY_GREEN },
- { 0x13, KEY_YELLOW },
- { 0x50, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_genius_tvgo_a11mce_table = {
- .scan = ir_codes_genius_tvgo_a11mce,
- .size = ARRAY_SIZE(ir_codes_genius_tvgo_a11mce),
-};
-EXPORT_SYMBOL_GPL(ir_codes_genius_tvgo_a11mce_table);
-
-/*
- * Remote control for Powercolor Real Angel 330
- * Daniel Fraga <fragabr@gmail.com>
- */
-static struct ir_scancode ir_codes_powercolor_real_angel[] = {
- { 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
- { 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
- { 0x00, KEY_0 },
- { 0x01, KEY_1 },
- { 0x02, KEY_2 },
- { 0x03, KEY_3 },
- { 0x04, KEY_4 },
- { 0x05, KEY_5 },
- { 0x06, KEY_6 },
- { 0x07, KEY_7 },
- { 0x08, KEY_8 },
- { 0x09, KEY_9 },
- { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */
- { 0x29, KEY_PREVIOUS }, /* previous channel */
- { 0x12, KEY_BRIGHTNESSUP },
- { 0x13, KEY_BRIGHTNESSDOWN },
- { 0x2b, KEY_MODE }, /* stereo/mono */
- { 0x2c, KEY_TEXT }, /* teletext */
- { 0x20, KEY_CHANNELUP }, /* channel up */
- { 0x21, KEY_CHANNELDOWN }, /* channel down */
- { 0x10, KEY_VOLUMEUP }, /* volume up */
- { 0x11, KEY_VOLUMEDOWN }, /* volume down */
- { 0x0d, KEY_MUTE },
- { 0x1f, KEY_RECORD },
- { 0x17, KEY_PLAY },
- { 0x16, KEY_PAUSE },
- { 0x0b, KEY_STOP },
- { 0x27, KEY_FASTFORWARD },
- { 0x26, KEY_REWIND },
- { 0x1e, KEY_SEARCH }, /* autoscan */
- { 0x0e, KEY_CAMERA }, /* snapshot */
- { 0x2d, KEY_SETUP },
- { 0x0f, KEY_SCREEN }, /* full screen */
- { 0x14, KEY_RADIO }, /* FM radio */
- { 0x25, KEY_POWER }, /* power */
-};
-
-struct ir_scancode_table ir_codes_powercolor_real_angel_table = {
- .scan = ir_codes_powercolor_real_angel,
- .size = ARRAY_SIZE(ir_codes_powercolor_real_angel),
-};
-EXPORT_SYMBOL_GPL(ir_codes_powercolor_real_angel_table);
-
-/* Kworld Plus TV Analog Lite PCI IR
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_kworld_plus_tv_analog[] = {
- { 0x0c, KEY_PROG1 }, /* Kworld key */
- { 0x16, KEY_CLOSECD }, /* -> ) */
- { 0x1d, KEY_POWER2 },
-
- { 0x00, KEY_1 },
- { 0x01, KEY_2 },
- { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */
- { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */
- { 0x04, KEY_5 },
- { 0x05, KEY_6 },
- { 0x06, KEY_7 },
- { 0x07, KEY_8 },
- { 0x08, KEY_9 },
- { 0x0a, KEY_0 },
-
- { 0x09, KEY_AGAIN },
- { 0x14, KEY_MUTE },
-
- { 0x20, KEY_UP },
- { 0x21, KEY_DOWN },
- { 0x0b, KEY_ENTER },
-
- { 0x10, KEY_CHANNELUP },
- { 0x11, KEY_CHANNELDOWN },
-
- /* Couldn't map key left/key right since those
- conflict with '3' and '4' scancodes
- I dunno what the original driver does
- */
-
- { 0x13, KEY_VOLUMEUP },
- { 0x12, KEY_VOLUMEDOWN },
-
- /* The lower part of the IR
- There are several duplicated keycodes there.
- Most of them conflict with digits.
- Add mappings just to the unused scancodes.
- Somehow, the original driver has a way to know,
- but this doesn't seem to be on some GPIO.
- Also, it is not related to the time between keyup
- and keydown.
- */
- { 0x19, KEY_TIME}, /* Timeshift */
- { 0x1a, KEY_STOP},
- { 0x1b, KEY_RECORD},
-
- { 0x22, KEY_TEXT},
-
- { 0x15, KEY_AUDIO}, /* ((*)) */
- { 0x0f, KEY_ZOOM},
- { 0x1c, KEY_CAMERA}, /* snapshot */
-
- { 0x18, KEY_RED}, /* B */
- { 0x23, KEY_GREEN}, /* C */
-};
-struct ir_scancode_table ir_codes_kworld_plus_tv_analog_table = {
- .scan = ir_codes_kworld_plus_tv_analog,
- .size = ARRAY_SIZE(ir_codes_kworld_plus_tv_analog),
-};
-EXPORT_SYMBOL_GPL(ir_codes_kworld_plus_tv_analog_table);
-
-/* Kaiomy TVnPC U2
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_kaiomy[] = {
- { 0x43, KEY_POWER2},
- { 0x01, KEY_LIST},
- { 0x0b, KEY_ZOOM},
- { 0x03, KEY_POWER},
-
- { 0x04, KEY_1},
- { 0x08, KEY_2},
- { 0x02, KEY_3},
-
- { 0x0f, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
-
- { 0x0c, KEY_7},
- { 0x0d, KEY_8},
- { 0x0a, KEY_9},
-
- { 0x11, KEY_0},
-
- { 0x09, KEY_CHANNELUP},
- { 0x07, KEY_CHANNELDOWN},
-
- { 0x0e, KEY_VOLUMEUP},
- { 0x13, KEY_VOLUMEDOWN},
-
- { 0x10, KEY_HOME},
- { 0x12, KEY_ENTER},
-
- { 0x14, KEY_RECORD},
- { 0x15, KEY_STOP},
- { 0x16, KEY_PLAY},
- { 0x17, KEY_MUTE},
-
- { 0x18, KEY_UP},
- { 0x19, KEY_DOWN},
- { 0x1a, KEY_LEFT},
- { 0x1b, KEY_RIGHT},
-
- { 0x1c, KEY_RED},
- { 0x1d, KEY_GREEN},
- { 0x1e, KEY_YELLOW},
- { 0x1f, KEY_BLUE},
-};
-struct ir_scancode_table ir_codes_kaiomy_table = {
- .scan = ir_codes_kaiomy,
- .size = ARRAY_SIZE(ir_codes_kaiomy),
-};
-EXPORT_SYMBOL_GPL(ir_codes_kaiomy_table);
-
-static struct ir_scancode ir_codes_avermedia_a16d[] = {
- { 0x20, KEY_LIST},
- { 0x00, KEY_POWER},
- { 0x28, KEY_1},
- { 0x18, KEY_2},
- { 0x38, KEY_3},
- { 0x24, KEY_4},
- { 0x14, KEY_5},
- { 0x34, KEY_6},
- { 0x2c, KEY_7},
- { 0x1c, KEY_8},
- { 0x3c, KEY_9},
- { 0x12, KEY_SUBTITLE},
- { 0x22, KEY_0},
- { 0x32, KEY_REWIND},
- { 0x3a, KEY_SHUFFLE},
- { 0x02, KEY_PRINT},
- { 0x11, KEY_CHANNELDOWN},
- { 0x31, KEY_CHANNELUP},
- { 0x0c, KEY_ZOOM},
- { 0x1e, KEY_VOLUMEDOWN},
- { 0x3e, KEY_VOLUMEUP},
- { 0x0a, KEY_MUTE},
- { 0x04, KEY_AUDIO},
- { 0x26, KEY_RECORD},
- { 0x06, KEY_PLAY},
- { 0x36, KEY_STOP},
- { 0x16, KEY_PAUSE},
- { 0x2e, KEY_REWIND},
- { 0x0e, KEY_FASTFORWARD},
- { 0x30, KEY_TEXT},
- { 0x21, KEY_GREEN},
- { 0x01, KEY_BLUE},
- { 0x08, KEY_EPG},
- { 0x2a, KEY_MENU},
-};
-struct ir_scancode_table ir_codes_avermedia_a16d_table = {
- .scan = ir_codes_avermedia_a16d,
- .size = ARRAY_SIZE(ir_codes_avermedia_a16d),
-};
-EXPORT_SYMBOL_GPL(ir_codes_avermedia_a16d_table);
-
-/* Encore ENLTV-FM v5.3
- Mauro Carvalho Chehab <mchehab@infradead.org>
- */
-static struct ir_scancode ir_codes_encore_enltv_fm53[] = {
- { 0x10, KEY_POWER2},
- { 0x06, KEY_MUTE},
-
- { 0x09, KEY_1},
- { 0x1d, KEY_2},
- { 0x1f, KEY_3},
- { 0x19, KEY_4},
- { 0x1b, KEY_5},
- { 0x11, KEY_6},
- { 0x17, KEY_7},
- { 0x12, KEY_8},
- { 0x16, KEY_9},
- { 0x48, KEY_0},
-
- { 0x04, KEY_LIST}, /* -/-- */
- { 0x40, KEY_LAST}, /* recall */
-
- { 0x02, KEY_MODE}, /* TV/AV */
- { 0x05, KEY_CAMERA}, /* SNAPSHOT */
-
- { 0x4c, KEY_CHANNELUP}, /* UP */
- { 0x00, KEY_CHANNELDOWN}, /* DOWN */
- { 0x0d, KEY_VOLUMEUP}, /* RIGHT */
- { 0x15, KEY_VOLUMEDOWN}, /* LEFT */
- { 0x49, KEY_ENTER}, /* OK */
-
- { 0x54, KEY_RECORD},
- { 0x4d, KEY_PLAY}, /* pause */
-
- { 0x1e, KEY_MENU}, /* video setting */
- { 0x0e, KEY_RIGHT}, /* <- */
- { 0x1a, KEY_LEFT}, /* -> */
-
- { 0x0a, KEY_CLEAR}, /* video default */
- { 0x0c, KEY_ZOOM}, /* hide pannel */
- { 0x47, KEY_SLEEP}, /* shutdown */
-};
-struct ir_scancode_table ir_codes_encore_enltv_fm53_table = {
- .scan = ir_codes_encore_enltv_fm53,
- .size = ARRAY_SIZE(ir_codes_encore_enltv_fm53),
-};
-EXPORT_SYMBOL_GPL(ir_codes_encore_enltv_fm53_table);
-
-/* Zogis Real Audio 220 - 32 keys IR */
-static struct ir_scancode ir_codes_real_audio_220_32_keys[] = {
- { 0x1c, KEY_RADIO},
- { 0x12, KEY_POWER2},
-
- { 0x01, KEY_1},
- { 0x02, KEY_2},
- { 0x03, KEY_3},
- { 0x04, KEY_4},
- { 0x05, KEY_5},
- { 0x06, KEY_6},
- { 0x07, KEY_7},
- { 0x08, KEY_8},
- { 0x09, KEY_9},
- { 0x00, KEY_0},
-
- { 0x0c, KEY_VOLUMEUP},
- { 0x18, KEY_VOLUMEDOWN},
- { 0x0b, KEY_CHANNELUP},
- { 0x15, KEY_CHANNELDOWN},
- { 0x16, KEY_ENTER},
-
- { 0x11, KEY_LIST}, /* Source */
- { 0x0d, KEY_AUDIO}, /* stereo */
-
- { 0x0f, KEY_PREVIOUS}, /* Prev */
- { 0x1b, KEY_TIME}, /* Timeshift */
- { 0x1a, KEY_NEXT}, /* Next */
-
- { 0x0e, KEY_STOP},
- { 0x1f, KEY_PLAY},
- { 0x1e, KEY_PLAYPAUSE}, /* Pause */
-
- { 0x1d, KEY_RECORD},
- { 0x13, KEY_MUTE},
- { 0x19, KEY_CAMERA}, /* Snapshot */
-
-};
-struct ir_scancode_table ir_codes_real_audio_220_32_keys_table = {
- .scan = ir_codes_real_audio_220_32_keys,
- .size = ARRAY_SIZE(ir_codes_real_audio_220_32_keys),
-};
-EXPORT_SYMBOL_GPL(ir_codes_real_audio_220_32_keys_table);
-
-/* ATI TV Wonder HD 600 USB
- Devin Heitmueller <devin.heitmueller@gmail.com>
- */
-static struct ir_scancode ir_codes_ati_tv_wonder_hd_600[] = {
- { 0x00, KEY_RECORD}, /* Row 1 */
- { 0x01, KEY_PLAYPAUSE},
- { 0x02, KEY_STOP},
- { 0x03, KEY_POWER},
- { 0x04, KEY_PREVIOUS}, /* Row 2 */
- { 0x05, KEY_REWIND},
- { 0x06, KEY_FORWARD},
- { 0x07, KEY_NEXT},
- { 0x08, KEY_EPG}, /* Row 3 */
- { 0x09, KEY_HOME},
- { 0x0a, KEY_MENU},
- { 0x0b, KEY_CHANNELUP},
- { 0x0c, KEY_BACK}, /* Row 4 */
- { 0x0d, KEY_UP},
- { 0x0e, KEY_INFO},
- { 0x0f, KEY_CHANNELDOWN},
- { 0x10, KEY_LEFT}, /* Row 5 */
- { 0x11, KEY_SELECT},
- { 0x12, KEY_RIGHT},
- { 0x13, KEY_VOLUMEUP},
- { 0x14, KEY_LAST}, /* Row 6 */
- { 0x15, KEY_DOWN},
- { 0x16, KEY_MUTE},
- { 0x17, KEY_VOLUMEDOWN},
-};
-struct ir_scancode_table ir_codes_ati_tv_wonder_hd_600_table = {
- .scan = ir_codes_ati_tv_wonder_hd_600,
- .size = ARRAY_SIZE(ir_codes_ati_tv_wonder_hd_600),
-};
-EXPORT_SYMBOL_GPL(ir_codes_ati_tv_wonder_hd_600_table);
-
-/* DVBWorld remotes
- Igor M. Liplianin <liplianin@me.by>
- */
-static struct ir_scancode ir_codes_dm1105_nec[] = {
- { 0x0a, KEY_POWER2}, /* power */
- { 0x0c, KEY_MUTE}, /* mute */
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
- { 0x1c, KEY_CHANNELUP}, /* ch+ */
- { 0x0f, KEY_CHANNELDOWN}, /* ch- */
- { 0x1a, KEY_VOLUMEUP}, /* vol+ */
- { 0x0e, KEY_VOLUMEDOWN}, /* vol- */
- { 0x04, KEY_RECORD}, /* rec */
- { 0x09, KEY_CHANNEL}, /* fav */
- { 0x08, KEY_BACKSPACE}, /* rewind */
- { 0x07, KEY_FASTFORWARD}, /* fast */
- { 0x0b, KEY_PAUSE}, /* pause */
- { 0x02, KEY_ESC}, /* cancel */
- { 0x03, KEY_TAB}, /* tab */
- { 0x00, KEY_UP}, /* up */
- { 0x1f, KEY_ENTER}, /* ok */
- { 0x01, KEY_DOWN}, /* down */
- { 0x05, KEY_RECORD}, /* cap */
- { 0x06, KEY_STOP}, /* stop */
- { 0x40, KEY_ZOOM}, /* full */
- { 0x1e, KEY_TV}, /* tvmode */
- { 0x1b, KEY_B}, /* recall */
-};
-struct ir_scancode_table ir_codes_dm1105_nec_table = {
- .scan = ir_codes_dm1105_nec,
- .size = ARRAY_SIZE(ir_codes_dm1105_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec_table);
-
-static struct ir_scancode ir_codes_tevii_nec[] = {
- { 0x0a, KEY_POWER2},
- { 0x0c, KEY_MUTE},
- { 0x11, KEY_1},
- { 0x12, KEY_2},
- { 0x13, KEY_3},
- { 0x14, KEY_4},
- { 0x15, KEY_5},
- { 0x16, KEY_6},
- { 0x17, KEY_7},
- { 0x18, KEY_8},
- { 0x19, KEY_9},
- { 0x10, KEY_0},
- { 0x1c, KEY_MENU},
- { 0x0f, KEY_VOLUMEDOWN},
- { 0x1a, KEY_LAST},
- { 0x0e, KEY_OPEN},
- { 0x04, KEY_RECORD},
- { 0x09, KEY_VOLUMEUP},
- { 0x08, KEY_CHANNELUP},
- { 0x07, KEY_PVR},
- { 0x0b, KEY_TIME},
- { 0x02, KEY_RIGHT},
- { 0x03, KEY_LEFT},
- { 0x00, KEY_UP},
- { 0x1f, KEY_OK},
- { 0x01, KEY_DOWN},
- { 0x05, KEY_TUNER},
- { 0x06, KEY_CHANNELDOWN},
- { 0x40, KEY_PLAYPAUSE},
- { 0x1e, KEY_REWIND},
- { 0x1b, KEY_FAVORITES},
- { 0x1d, KEY_BACK},
- { 0x4d, KEY_FASTFORWARD},
- { 0x44, KEY_EPG},
- { 0x4c, KEY_INFO},
- { 0x41, KEY_AB},
- { 0x43, KEY_AUDIO},
- { 0x45, KEY_SUBTITLE},
- { 0x4a, KEY_LIST},
- { 0x46, KEY_F1},
- { 0x47, KEY_F2},
- { 0x5e, KEY_F3},
- { 0x5c, KEY_F4},
- { 0x52, KEY_F5},
- { 0x5a, KEY_F6},
- { 0x56, KEY_MODE},
- { 0x58, KEY_SWITCHVIDEOMODE},
-};
-struct ir_scancode_table ir_codes_tevii_nec_table = {
- .scan = ir_codes_tevii_nec,
- .size = ARRAY_SIZE(ir_codes_tevii_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tevii_nec_table);
-
-static struct ir_scancode ir_codes_tbs_nec[] = {
- { 0x04, KEY_POWER2}, /*power*/
- { 0x14, KEY_MUTE}, /*mute*/
- { 0x07, KEY_1},
- { 0x06, KEY_2},
- { 0x05, KEY_3},
- { 0x0b, KEY_4},
- { 0x0a, KEY_5},
- { 0x09, KEY_6},
- { 0x0f, KEY_7},
- { 0x0e, KEY_8},
- { 0x0d, KEY_9},
- { 0x12, KEY_0},
- { 0x16, KEY_CHANNELUP}, /*ch+*/
- { 0x11, KEY_CHANNELDOWN},/*ch-*/
- { 0x13, KEY_VOLUMEUP}, /*vol+*/
- { 0x0c, KEY_VOLUMEDOWN},/*vol-*/
- { 0x03, KEY_RECORD}, /*rec*/
- { 0x18, KEY_PAUSE}, /*pause*/
- { 0x19, KEY_OK}, /*ok*/
- { 0x1a, KEY_CAMERA}, /* snapshot */
- { 0x01, KEY_UP},
- { 0x10, KEY_LEFT},
- { 0x02, KEY_RIGHT},
- { 0x08, KEY_DOWN},
- { 0x15, KEY_FAVORITES},
- { 0x17, KEY_SUBTITLE},
- { 0x1d, KEY_ZOOM},
- { 0x1f, KEY_EXIT},
- { 0x1e, KEY_MENU},
- { 0x1c, KEY_EPG},
- { 0x00, KEY_PREVIOUS},
- { 0x1b, KEY_MODE},
-};
-struct ir_scancode_table ir_codes_tbs_nec_table = {
- .scan = ir_codes_tbs_nec,
- .size = ARRAY_SIZE(ir_codes_tbs_nec),
-};
-EXPORT_SYMBOL_GPL(ir_codes_tbs_nec_table);
-
-/* Terratec Cinergy Hybrid T USB XS
- Devin Heitmueller <dheitmueller@linuxtv.org>
- */
-static struct ir_scancode ir_codes_terratec_cinergy_xs[] = {
- { 0x41, KEY_HOME},
- { 0x01, KEY_POWER},
- { 0x42, KEY_MENU},
- { 0x02, KEY_1},
- { 0x03, KEY_2},
- { 0x04, KEY_3},
- { 0x43, KEY_SUBTITLE},
- { 0x05, KEY_4},
- { 0x06, KEY_5},
- { 0x07, KEY_6},
- { 0x44, KEY_TEXT},
- { 0x08, KEY_7},
- { 0x09, KEY_8},
- { 0x0a, KEY_9},
- { 0x45, KEY_DELETE},
- { 0x0b, KEY_TUNER},
- { 0x0c, KEY_0},
- { 0x0d, KEY_MODE},
- { 0x46, KEY_TV},
- { 0x47, KEY_DVD},
- { 0x49, KEY_VIDEO},
- { 0x4b, KEY_AUX},
- { 0x10, KEY_UP},
- { 0x11, KEY_LEFT},
- { 0x12, KEY_OK},
- { 0x13, KEY_RIGHT},
- { 0x14, KEY_DOWN},
- { 0x0f, KEY_EPG},
- { 0x16, KEY_INFO},
- { 0x4d, KEY_BACKSPACE},
- { 0x1c, KEY_VOLUMEUP},
- { 0x4c, KEY_PLAY},
- { 0x1b, KEY_CHANNELUP},
- { 0x1e, KEY_VOLUMEDOWN},
- { 0x1d, KEY_MUTE},
- { 0x1f, KEY_CHANNELDOWN},
- { 0x17, KEY_RED},
- { 0x18, KEY_GREEN},
- { 0x19, KEY_YELLOW},
- { 0x1a, KEY_BLUE},
- { 0x58, KEY_RECORD},
- { 0x48, KEY_STOP},
- { 0x40, KEY_PAUSE},
- { 0x54, KEY_LAST},
- { 0x4e, KEY_REWIND},
- { 0x4f, KEY_FASTFORWARD},
- { 0x5c, KEY_NEXT},
-};
-struct ir_scancode_table ir_codes_terratec_cinergy_xs_table = {
- .scan = ir_codes_terratec_cinergy_xs,
- .size = ARRAY_SIZE(ir_codes_terratec_cinergy_xs),
-};
-EXPORT_SYMBOL_GPL(ir_codes_terratec_cinergy_xs_table);
-
-/* EVGA inDtube
- Devin Heitmueller <devin.heitmueller@gmail.com>
- */
-static struct ir_scancode ir_codes_evga_indtube[] = {
- { 0x12, KEY_POWER},
- { 0x02, KEY_MODE}, /* TV */
- { 0x14, KEY_MUTE},
- { 0x1a, KEY_CHANNELUP},
- { 0x16, KEY_TV2}, /* PIP */
- { 0x1d, KEY_VOLUMEUP},
- { 0x05, KEY_CHANNELDOWN},
- { 0x0f, KEY_PLAYPAUSE},
- { 0x19, KEY_VOLUMEDOWN},
- { 0x1c, KEY_REWIND},
- { 0x0d, KEY_RECORD},
- { 0x18, KEY_FORWARD},
- { 0x1e, KEY_PREVIOUS},
- { 0x1b, KEY_STOP},
- { 0x1f, KEY_NEXT},
- { 0x13, KEY_CAMERA},
-};
-struct ir_scancode_table ir_codes_evga_indtube_table = {
- .scan = ir_codes_evga_indtube,
- .size = ARRAY_SIZE(ir_codes_evga_indtube),
-};
-EXPORT_SYMBOL_GPL(ir_codes_evga_indtube_table);
-
-static struct ir_scancode ir_codes_videomate_s350[] = {
- { 0x00, KEY_TV},
- { 0x01, KEY_DVD},
- { 0x04, KEY_RECORD},
- { 0x05, KEY_VIDEO}, /* TV/Video */
- { 0x07, KEY_STOP},
- { 0x08, KEY_PLAYPAUSE},
- { 0x0a, KEY_REWIND},
- { 0x0f, KEY_FASTFORWARD},
- { 0x10, KEY_CHANNELUP},
- { 0x12, KEY_VOLUMEUP},
- { 0x13, KEY_CHANNELDOWN},
- { 0x14, KEY_MUTE},
- { 0x15, KEY_VOLUMEDOWN},
- { 0x16, KEY_1},
- { 0x17, KEY_2},
- { 0x18, KEY_3},
- { 0x19, KEY_4},
- { 0x1a, KEY_5},
- { 0x1b, KEY_6},
- { 0x1c, KEY_7},
- { 0x1d, KEY_8},
- { 0x1e, KEY_9},
- { 0x1f, KEY_0},
- { 0x21, KEY_SLEEP},
- { 0x24, KEY_ZOOM},
- { 0x25, KEY_LAST}, /* Recall */
- { 0x26, KEY_SUBTITLE}, /* CC */
- { 0x27, KEY_LANGUAGE}, /* MTS */
- { 0x29, KEY_CHANNEL}, /* SURF */
- { 0x2b, KEY_A},
- { 0x2c, KEY_B},
- { 0x2f, KEY_CAMERA}, /* Snapshot */
- { 0x23, KEY_RADIO},
- { 0x02, KEY_PREVIOUSSONG},
- { 0x06, KEY_NEXTSONG},
- { 0x03, KEY_EPG},
- { 0x09, KEY_SETUP},
- { 0x22, KEY_BACKSPACE},
- { 0x0c, KEY_UP},
- { 0x0e, KEY_DOWN},
- { 0x0b, KEY_LEFT},
- { 0x0d, KEY_RIGHT},
- { 0x11, KEY_ENTER},
- { 0x20, KEY_TEXT},
-};
-struct ir_scancode_table ir_codes_videomate_s350_table = {
- .scan = ir_codes_videomate_s350,
- .size = ARRAY_SIZE(ir_codes_videomate_s350),
-};
-EXPORT_SYMBOL_GPL(ir_codes_videomate_s350_table);
-
-/* GADMEI UTV330+ RM008Z remote
- Shine Liu <shinel@foxmail.com>
- */
-static struct ir_scancode ir_codes_gadmei_rm008z[] = {
- { 0x14, KEY_POWER2}, /* POWER OFF */
- { 0x0c, KEY_MUTE}, /* MUTE */
-
- { 0x18, KEY_TV}, /* TV */
- { 0x0e, KEY_VIDEO}, /* AV */
- { 0x0b, KEY_AUDIO}, /* SV */
- { 0x0f, KEY_RADIO}, /* FM */
-
- { 0x00, KEY_1},
- { 0x01, KEY_2},
- { 0x02, KEY_3},
- { 0x03, KEY_4},
- { 0x04, KEY_5},
- { 0x05, KEY_6},
- { 0x06, KEY_7},
- { 0x07, KEY_8},
- { 0x08, KEY_9},
- { 0x09, KEY_0},
- { 0x0a, KEY_INFO}, /* OSD */
- { 0x1c, KEY_BACKSPACE}, /* LAST */
-
- { 0x0d, KEY_PLAY}, /* PLAY */
- { 0x1e, KEY_CAMERA}, /* SNAPSHOT */
- { 0x1a, KEY_RECORD}, /* RECORD */
- { 0x17, KEY_STOP}, /* STOP */
-
- { 0x1f, KEY_UP}, /* UP */
- { 0x44, KEY_DOWN}, /* DOWN */
- { 0x46, KEY_TAB}, /* BACK */
- { 0x4a, KEY_ZOOM}, /* FULLSECREEN */
-
- { 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */
- { 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
- { 0x12, KEY_CHANNELUP}, /* CHANNELUP */
- { 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */
- { 0x15, KEY_ENTER}, /* OK */
-};
-struct ir_scancode_table ir_codes_gadmei_rm008z_table = {
- .scan = ir_codes_gadmei_rm008z,
- .size = ARRAY_SIZE(ir_codes_gadmei_rm008z),
-};
-EXPORT_SYMBOL_GPL(ir_codes_gadmei_rm008z_table);
-
-/*************************************************************
- * COMPLETE SCANCODE TABLES
- * Instead of just a partial scancode, the tables bellow
- * contains the complete scancode and the receiver protocol
- *************************************************************/
-
-/*
- * Hauppauge:the newer, gray remotes (seems there are multiple
- * slightly different versions), shipped with cx88+ivtv cards.
- *
- * This table contains the complete RC5 code, instead of just the data part
- */
-static struct ir_scancode ir_codes_rc5_hauppauge_new[] = {
- /* Keys 0 to 9 */
- { 0x1e00, KEY_0 },
- { 0x1e01, KEY_1 },
- { 0x1e02, KEY_2 },
- { 0x1e03, KEY_3 },
- { 0x1e04, KEY_4 },
- { 0x1e05, KEY_5 },
- { 0x1e06, KEY_6 },
- { 0x1e07, KEY_7 },
- { 0x1e08, KEY_8 },
- { 0x1e09, KEY_9 },
-
- { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x1e0b, KEY_RED }, /* red button */
- { 0x1e0c, KEY_RADIO },
- { 0x1e0d, KEY_MENU },
- { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
- { 0x1e0f, KEY_MUTE },
- { 0x1e10, KEY_VOLUMEUP },
- { 0x1e11, KEY_VOLUMEDOWN },
- { 0x1e12, KEY_PREVIOUS }, /* previous channel */
- { 0x1e14, KEY_UP },
- { 0x1e15, KEY_DOWN },
- { 0x1e16, KEY_LEFT },
- { 0x1e17, KEY_RIGHT },
- { 0x1e18, KEY_VIDEO }, /* Videos */
- { 0x1e19, KEY_AUDIO }, /* Music */
- /* 0x1e1a: Pictures - presume this means
- "Multimedia Home Platform" -
- no "PICTURES" key in input.h
- */
- { 0x1e1a, KEY_MHP },
-
- { 0x1e1b, KEY_EPG }, /* Guide */
- { 0x1e1c, KEY_TV },
- { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1e1f, KEY_EXIT }, /* back/exit */
- { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
- { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
- { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x1e25, KEY_ENTER }, /* OK */
- { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
- { 0x1e29, KEY_BLUE }, /* blue key */
- { 0x1e2e, KEY_GREEN }, /* green button */
- { 0x1e30, KEY_PAUSE }, /* pause */
- { 0x1e32, KEY_REWIND }, /* backward << */
- { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
- { 0x1e35, KEY_PLAY },
- { 0x1e36, KEY_STOP },
- { 0x1e37, KEY_RECORD }, /* recording */
- { 0x1e38, KEY_YELLOW }, /* yellow key */
- { 0x1e3b, KEY_SELECT }, /* top right button */
- { 0x1e3c, KEY_ZOOM }, /* full */
- { 0x1e3d, KEY_POWER }, /* system power (green button) */
-};
-
-struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = {
- .scan = ir_codes_rc5_hauppauge_new,
- .size = ARRAY_SIZE(ir_codes_rc5_hauppauge_new),
- .ir_type = IR_TYPE_RC5,
-};
-EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table);
-
-/* Terratec Cinergy Hybrid T USB XS FM
- Mauro Carvalho Chehab <mchehab@redhat.com>
- */
-static struct ir_scancode ir_codes_nec_terratec_cinergy_xs[] = {
- { 0x1441, KEY_HOME},
- { 0x1401, KEY_POWER2},
-
- { 0x1442, KEY_MENU}, /* DVD menu */
- { 0x1443, KEY_SUBTITLE},
- { 0x1444, KEY_TEXT}, /* Teletext */
- { 0x1445, KEY_DELETE},
-
- { 0x1402, KEY_1},
- { 0x1403, KEY_2},
- { 0x1404, KEY_3},
- { 0x1405, KEY_4},
- { 0x1406, KEY_5},
- { 0x1407, KEY_6},
- { 0x1408, KEY_7},
- { 0x1409, KEY_8},
- { 0x140a, KEY_9},
- { 0x140c, KEY_0},
-
- { 0x140b, KEY_TUNER}, /* AV */
- { 0x140d, KEY_MODE}, /* A.B */
-
- { 0x1446, KEY_TV},
- { 0x1447, KEY_DVD},
- { 0x1449, KEY_VIDEO},
- { 0x144a, KEY_RADIO}, /* Music */
- { 0x144b, KEY_CAMERA}, /* PIC */
-
- { 0x1410, KEY_UP},
- { 0x1411, KEY_LEFT},
- { 0x1412, KEY_OK},
- { 0x1413, KEY_RIGHT},
- { 0x1414, KEY_DOWN},
-
- { 0x140f, KEY_EPG},
- { 0x1416, KEY_INFO},
- { 0x144d, KEY_BACKSPACE},
-
- { 0x141c, KEY_VOLUMEUP},
- { 0x141e, KEY_VOLUMEDOWN},
-
- { 0x144c, KEY_PLAY},
- { 0x141d, KEY_MUTE},
-
- { 0x141b, KEY_CHANNELUP},
- { 0x141f, KEY_CHANNELDOWN},
-
- { 0x1417, KEY_RED},
- { 0x1418, KEY_GREEN},
- { 0x1419, KEY_YELLOW},
- { 0x141a, KEY_BLUE},
-
- { 0x1458, KEY_RECORD},
- { 0x1448, KEY_STOP},
- { 0x1440, KEY_PAUSE},
-
- { 0x1454, KEY_LAST},
- { 0x144e, KEY_REWIND},
- { 0x144f, KEY_FASTFORWARD},
- { 0x145c, KEY_NEXT},
-};
-struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table = {
- .scan = ir_codes_nec_terratec_cinergy_xs,
- .size = ARRAY_SIZE(ir_codes_nec_terratec_cinergy_xs),
- .ir_type = IR_TYPE_NEC,
-};
-EXPORT_SYMBOL_GPL(ir_codes_nec_terratec_cinergy_xs_table);
-
-
-/* Leadtek Winfast TV USB II Deluxe remote
- Magnus Alm <magnus.alm@gmail.com>
- */
-static struct ir_scancode ir_codes_winfast_usbii_deluxe[] = {
- { 0x62, KEY_0},
- { 0x75, KEY_1},
- { 0x76, KEY_2},
- { 0x77, KEY_3},
- { 0x79, KEY_4},
- { 0x7a, KEY_5},
- { 0x7b, KEY_6},
- { 0x7d, KEY_7},
- { 0x7e, KEY_8},
- { 0x7f, KEY_9},
-
- { 0x38, KEY_CAMERA}, /* SNAPSHOT */
- { 0x37, KEY_RECORD}, /* RECORD */
- { 0x35, KEY_TIME}, /* TIMESHIFT */
-
- { 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
- { 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
- { 0x64, KEY_MUTE}, /* MUTE */
-
- { 0x21, KEY_CHANNEL}, /* SURF */
- { 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
- { 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
- { 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
-
- { 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
-
- { 0x70, KEY_POWER2}, /* TV ON/OFF */
-
- { 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
- { 0x3a, KEY_NEW}, /* PIP */
- { 0x73, KEY_ZOOM}, /* FULLSECREEN */
-
- { 0x66, KEY_INFO}, /* OSD (DISPLAY) */
-
- { 0x31, KEY_DOT}, /* '.' */
- { 0x63, KEY_ENTER}, /* ENTER */
-
-};
-struct ir_scancode_table ir_codes_winfast_usbii_deluxe_table = {
- .scan = ir_codes_winfast_usbii_deluxe,
- .size = ARRAY_SIZE(ir_codes_winfast_usbii_deluxe),
-};
-EXPORT_SYMBOL_GPL(ir_codes_winfast_usbii_deluxe_table);
-
-/* Kworld 315U
- */
-static struct ir_scancode ir_codes_kworld_315u[] = {
- { 0x6143, KEY_POWER },
- { 0x6101, KEY_TUNER }, /* source */
- { 0x610b, KEY_ZOOM },
- { 0x6103, KEY_POWER2 }, /* shutdown */
-
- { 0x6104, KEY_1 },
- { 0x6108, KEY_2 },
- { 0x6102, KEY_3 },
- { 0x6109, KEY_CHANNELUP },
-
- { 0x610f, KEY_4 },
- { 0x6105, KEY_5 },
- { 0x6106, KEY_6 },
- { 0x6107, KEY_CHANNELDOWN },
-
- { 0x610c, KEY_7 },
- { 0x610d, KEY_8 },
- { 0x610a, KEY_9 },
- { 0x610e, KEY_VOLUMEUP },
-
- { 0x6110, KEY_LAST },
- { 0x6111, KEY_0 },
- { 0x6112, KEY_ENTER },
- { 0x6113, KEY_VOLUMEDOWN },
-
- { 0x6114, KEY_RECORD },
- { 0x6115, KEY_STOP },
- { 0x6116, KEY_PLAY },
- { 0x6117, KEY_MUTE },
-
- { 0x6118, KEY_UP },
- { 0x6119, KEY_DOWN },
- { 0x611a, KEY_LEFT },
- { 0x611b, KEY_RIGHT },
-
- { 0x611c, KEY_RED },
- { 0x611d, KEY_GREEN },
- { 0x611e, KEY_YELLOW },
- { 0x611f, KEY_BLUE },
-};
-
-struct ir_scancode_table ir_codes_kworld_315u_table = {
- .scan = ir_codes_kworld_315u,
- .size = ARRAY_SIZE(ir_codes_kworld_315u),
- .ir_type = IR_TYPE_NEC,
-};
-EXPORT_SYMBOL_GPL(ir_codes_kworld_315u_table);
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index bfca26d51827..9374a006f43d 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -1,4 +1,4 @@
-/* ir-register.c - handle IR scancode->keycode tables
+/* ir-keytable.c - handle IR scancode->keycode tables
*
* Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
@@ -15,384 +15,408 @@
#include <linux/input.h>
#include <linux/slab.h>
-#include <media/ir-common.h>
+#include "ir-core-priv.h"
-#define IR_TAB_MIN_SIZE 32
-#define IR_TAB_MAX_SIZE 1024
+/* Sizes are in bytes, 256 bytes allows for 32 entries on x64 */
+#define IR_TAB_MIN_SIZE 256
+#define IR_TAB_MAX_SIZE 8192
+
+/* FIXME: IR_KEYPRESS_TIMEOUT should be protocol specific */
+#define IR_KEYPRESS_TIMEOUT 250
/**
- * ir_seek_table() - returns the element order on the table
- * @rc_tab: the ir_scancode_table with the keymap to be used
- * @scancode: the scancode that we're seeking
+ * ir_resize_table() - resizes a scancode table if necessary
+ * @rc_tab: the ir_scancode_table to resize
+ * @return: zero on success or a negative error code
*
- * This routine is used by the input routines when a key is pressed at the
- * IR. The scancode is received and needs to be converted into a keycode.
- * If the key is not found, it returns KEY_UNKNOWN. Otherwise, returns the
- * corresponding keycode from the table.
+ * This routine will shrink the ir_scancode_table if it has lots of
+ * unused entries and grow it if it is full.
*/
-static int ir_seek_table(struct ir_scancode_table *rc_tab, u32 scancode)
+static int ir_resize_table(struct ir_scancode_table *rc_tab)
{
- int rc;
- unsigned long flags;
- struct ir_scancode *keymap = rc_tab->scan;
+ unsigned int oldalloc = rc_tab->alloc;
+ unsigned int newalloc = oldalloc;
+ struct ir_scancode *oldscan = rc_tab->scan;
+ struct ir_scancode *newscan;
+
+ if (rc_tab->size == rc_tab->len) {
+ /* All entries in use -> grow keytable */
+ if (rc_tab->alloc >= IR_TAB_MAX_SIZE)
+ return -ENOMEM;
- spin_lock_irqsave(&rc_tab->lock, flags);
+ newalloc *= 2;
+ IR_dprintk(1, "Growing table to %u bytes\n", newalloc);
+ }
- /* FIXME: replace it by a binary search */
+ if ((rc_tab->len * 3 < rc_tab->size) && (oldalloc > IR_TAB_MIN_SIZE)) {
+ /* Less than 1/3 of entries in use -> shrink keytable */
+ newalloc /= 2;
+ IR_dprintk(1, "Shrinking table to %u bytes\n", newalloc);
+ }
- for (rc = 0; rc < rc_tab->size; rc++)
- if (keymap[rc].scancode == scancode)
- goto exit;
+ if (newalloc == oldalloc)
+ return 0;
- /* Not found */
- rc = -EINVAL;
+ newscan = kmalloc(newalloc, GFP_ATOMIC);
+ if (!newscan) {
+ IR_dprintk(1, "Failed to kmalloc %u bytes\n", newalloc);
+ return -ENOMEM;
+ }
-exit:
- spin_unlock_irqrestore(&rc_tab->lock, flags);
- return rc;
+ memcpy(newscan, rc_tab->scan, rc_tab->len * sizeof(struct ir_scancode));
+ rc_tab->scan = newscan;
+ rc_tab->alloc = newalloc;
+ rc_tab->size = rc_tab->alloc / sizeof(struct ir_scancode);
+ kfree(oldscan);
+ return 0;
}
/**
- * ir_roundup_tablesize() - gets an optimum value for the table size
- * @n_elems: minimum number of entries to store keycodes
- *
- * This routine is used to choose the keycode table size.
+ * ir_do_setkeycode() - internal function to set a keycode in the
+ * scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @rc_tab: the struct ir_scancode_table to set the keycode in
+ * @scancode: the scancode for the ir command
+ * @keycode: the keycode for the ir command
+ * @resize: whether the keytable may be shrunk
+ * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
*
- * In order to have some empty space for new keycodes,
- * and knowing in advance that kmalloc allocates only power of two
- * segments, it optimizes the allocated space to have some spare space
- * for those new keycodes by using the maximum number of entries that
- * will be effectively be allocated by kmalloc.
- * In order to reduce the quantity of table resizes, it has a minimum
- * table size of IR_TAB_MIN_SIZE.
+ * This routine is used internally to manipulate the scancode->keycode table.
+ * The caller has to hold @rc_tab->lock.
*/
-static int ir_roundup_tablesize(int n_elems)
+static int ir_do_setkeycode(struct input_dev *dev,
+ struct ir_scancode_table *rc_tab,
+ unsigned scancode, unsigned keycode,
+ bool resize)
{
- size_t size;
-
- if (n_elems < IR_TAB_MIN_SIZE)
- n_elems = IR_TAB_MIN_SIZE;
+ unsigned int i;
+ int old_keycode = KEY_RESERVED;
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
/*
- * As kmalloc only allocates sizes of power of two, get as
- * much entries as possible for the allocated memory segment
+ * Unfortunately, some hardware-based IR decoders don't provide
+ * all bits for the complete IR code. In general, they provide only
+ * the command part of the IR code. Yet, as it is possible to replace
+ * the provided IR with another one, it is needed to allow loading
+ * IR tables from other remotes. So,
*/
- size = roundup_pow_of_two(n_elems * sizeof(struct ir_scancode));
- n_elems = size / sizeof(struct ir_scancode);
+ if (ir_dev->props && ir_dev->props->scanmask) {
+ scancode &= ir_dev->props->scanmask;
+ }
- return n_elems;
+ /* First check if we already have a mapping for this ir command */
+ for (i = 0; i < rc_tab->len; i++) {
+ /* Keytable is sorted from lowest to highest scancode */
+ if (rc_tab->scan[i].scancode > scancode)
+ break;
+ else if (rc_tab->scan[i].scancode < scancode)
+ continue;
+
+ old_keycode = rc_tab->scan[i].keycode;
+ rc_tab->scan[i].keycode = keycode;
+
+ /* Did the user wish to remove the mapping? */
+ if (keycode == KEY_RESERVED || keycode == KEY_UNKNOWN) {
+ IR_dprintk(1, "#%d: Deleting scan 0x%04x\n",
+ i, scancode);
+ rc_tab->len--;
+ memmove(&rc_tab->scan[i], &rc_tab->scan[i + 1],
+ (rc_tab->len - i) * sizeof(struct ir_scancode));
+ }
+
+ /* Possibly shrink the keytable, failure is not a problem */
+ ir_resize_table(rc_tab);
+ break;
+ }
+
+ if (old_keycode == KEY_RESERVED && keycode != KEY_RESERVED) {
+ /* No previous mapping found, we might need to grow the table */
+ if (resize && ir_resize_table(rc_tab))
+ return -ENOMEM;
+
+ IR_dprintk(1, "#%d: New scan 0x%04x with key 0x%04x\n",
+ i, scancode, keycode);
+
+ /* i is the proper index to insert our new keycode */
+ memmove(&rc_tab->scan[i + 1], &rc_tab->scan[i],
+ (rc_tab->len - i) * sizeof(struct ir_scancode));
+ rc_tab->scan[i].scancode = scancode;
+ rc_tab->scan[i].keycode = keycode;
+ rc_tab->len++;
+ set_bit(keycode, dev->keybit);
+ } else {
+ IR_dprintk(1, "#%d: Replacing scan 0x%04x with key 0x%04x\n",
+ i, scancode, keycode);
+ /* A previous mapping was updated... */
+ clear_bit(old_keycode, dev->keybit);
+ /* ...but another scancode might use the same keycode */
+ for (i = 0; i < rc_tab->len; i++) {
+ if (rc_tab->scan[i].keycode == old_keycode) {
+ set_bit(old_keycode, dev->keybit);
+ break;
+ }
+ }
+ }
+
+ return 0;
}
/**
- * ir_copy_table() - copies a keytable, discarding the unused entries
- * @destin: destin table
- * @origin: origin table
+ * ir_setkeycode() - set a keycode in the scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @scancode: the desired scancode
+ * @keycode: result
+ * @return: -EINVAL if the keycode could not be inserted, otherwise zero.
*
- * Copies all entries where the keycode is not KEY_UNKNOWN/KEY_RESERVED
- * Also copies table size and table protocol.
- * NOTE: It shouldn't copy the lock field
+ * This routine is used to handle evdev EVIOCSKEY ioctl.
*/
-
-static int ir_copy_table(struct ir_scancode_table *destin,
- const struct ir_scancode_table *origin)
+static int ir_setkeycode(struct input_dev *dev,
+ unsigned int scancode, unsigned int keycode)
{
- int i, j = 0;
-
- for (i = 0; i < origin->size; i++) {
- if (origin->scan[i].keycode == KEY_UNKNOWN ||
- origin->scan[i].keycode == KEY_RESERVED)
- continue;
+ int rc;
+ unsigned long flags;
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- memcpy(&destin->scan[j], &origin->scan[i], sizeof(struct ir_scancode));
- j++;
- }
- destin->size = j;
- destin->ir_type = origin->ir_type;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ rc = ir_do_setkeycode(dev, rc_tab, scancode, keycode, true);
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
+ return rc;
+}
- IR_dprintk(1, "Copied %d scancodes to the new keycode table\n", destin->size);
+/**
+ * ir_setkeytable() - sets several entries in the scancode->keycode table
+ * @dev: the struct input_dev device descriptor
+ * @to: the struct ir_scancode_table to copy entries to
+ * @from: the struct ir_scancode_table to copy entries from
+ * @return: -EINVAL if all keycodes could not be inserted, otherwise zero.
+ *
+ * This routine is used to handle table initialization.
+ */
+static int ir_setkeytable(struct input_dev *dev,
+ struct ir_scancode_table *to,
+ const struct ir_scancode_table *from)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
+ unsigned long flags;
+ unsigned int i;
+ int rc = 0;
- return 0;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ for (i = 0; i < from->size; i++) {
+ rc = ir_do_setkeycode(dev, to, from->scan[i].scancode,
+ from->scan[i].keycode, false);
+ if (rc)
+ break;
+ }
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
+ return rc;
}
/**
- * ir_getkeycode() - get a keycode at the evdev scancode ->keycode table
+ * ir_getkeycode() - get a keycode from the scancode->keycode table
* @dev: the struct input_dev device descriptor
* @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * @keycode: used to return the keycode, if found, or KEY_RESERVED
+ * @return: always returns zero.
*
* This routine is used to handle evdev EVIOCGKEY ioctl.
- * If the key is not found, returns -EINVAL, otherwise, returns 0.
*/
static int ir_getkeycode(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode)
{
- int elem;
+ int start, end, mid;
+ unsigned long flags;
+ int key = KEY_RESERVED;
struct ir_input_dev *ir_dev = input_get_drvdata(dev);
struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- elem = ir_seek_table(rc_tab, scancode);
- if (elem >= 0) {
- *keycode = rc_tab->scan[elem].keycode;
- return 0;
+ spin_lock_irqsave(&rc_tab->lock, flags);
+ start = 0;
+ end = rc_tab->len - 1;
+ while (start <= end) {
+ mid = (start + end) / 2;
+ if (rc_tab->scan[mid].scancode < scancode)
+ start = mid + 1;
+ else if (rc_tab->scan[mid].scancode > scancode)
+ end = mid - 1;
+ else {
+ key = rc_tab->scan[mid].keycode;
+ break;
+ }
}
+ spin_unlock_irqrestore(&rc_tab->lock, flags);
- /*
- * Scancode not found and table can't be expanded
- */
- if (elem < 0 && rc_tab->size == IR_TAB_MAX_SIZE)
- return -EINVAL;
+ if (key == KEY_RESERVED)
+ IR_dprintk(1, "unknown key for scancode 0x%04x\n",
+ scancode);
- /*
- * If is there extra space, returns KEY_RESERVED,
- * otherwise, input core won't let ir_setkeycode to work
- */
- *keycode = KEY_RESERVED;
+ *keycode = key;
return 0;
}
/**
- * ir_is_resize_needed() - Check if the table needs rezise
- * @table: keycode table that may need to resize
- * @n_elems: minimum number of entries to store keycodes
- *
- * Considering that kmalloc uses power of two storage areas, this
- * routine detects if the real alloced size will change. If not, it
- * just returns without doing nothing. Otherwise, it will extend or
- * reduce the table size to meet the new needs.
+ * ir_g_keycode_from_table() - gets the keycode that corresponds to a scancode
+ * @input_dev: the struct input_dev descriptor of the device
+ * @scancode: the scancode that we're seeking
*
- * It returns 0 if no resize is needed, 1 otherwise.
+ * This routine is used by the input routines when a key is pressed at the
+ * IR. The scancode is received and needs to be converted into a keycode.
+ * If the key is not found, it returns KEY_RESERVED. Otherwise, returns the
+ * corresponding keycode from the table.
*/
-static int ir_is_resize_needed(struct ir_scancode_table *table, int n_elems)
+u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
{
- int cur_size = ir_roundup_tablesize(table->size);
- int new_size = ir_roundup_tablesize(n_elems);
-
- if (cur_size == new_size)
- return 0;
+ int keycode;
- /* Resize is needed */
- return 1;
+ ir_getkeycode(dev, scancode, &keycode);
+ if (keycode != KEY_RESERVED)
+ IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n",
+ dev->name, scancode, keycode);
+ return keycode;
}
+EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
/**
- * ir_delete_key() - remove a keycode from the table
- * @rc_tab: keycode table
- * @elem: element to be removed
+ * ir_keyup() - generates input event to cleanup a key press
+ * @ir: the struct ir_input_dev descriptor of the device
*
+ * This routine is used to signal that a key has been released on the
+ * remote control. It reports a keyup input event via input_report_key().
*/
-static void ir_delete_key(struct ir_scancode_table *rc_tab, int elem)
+static void ir_keyup(struct ir_input_dev *ir)
{
- unsigned long flags = 0;
- int newsize = rc_tab->size - 1;
- int resize = ir_is_resize_needed(rc_tab, newsize);
- struct ir_scancode *oldkeymap = rc_tab->scan;
- struct ir_scancode *newkeymap = NULL;
-
- if (resize)
- newkeymap = kzalloc(ir_roundup_tablesize(newsize) *
- sizeof(*newkeymap), GFP_ATOMIC);
-
- /* There's no memory for resize. Keep the old table */
- if (!resize || !newkeymap) {
- newkeymap = oldkeymap;
-
- /* We'll modify the live table. Lock it */
- spin_lock_irqsave(&rc_tab->lock, flags);
- }
+ if (!ir->keypressed)
+ return;
- /*
- * Copy the elements before the one that will be deleted
- * if (!resize), both oldkeymap and newkeymap points
- * to the same place, so, there's no need to copy
- */
- if (resize && elem > 0)
- memcpy(newkeymap, oldkeymap,
- elem * sizeof(*newkeymap));
+ IR_dprintk(1, "keyup key 0x%04x\n", ir->last_keycode);
+ input_report_key(ir->input_dev, ir->last_keycode, 0);
+ input_sync(ir->input_dev);
+ ir->keypressed = false;
+}
+
+/**
+ * ir_timer_keyup() - generates a keyup event after a timeout
+ * @cookie: a pointer to struct ir_input_dev passed to setup_timer()
+ *
+ * This routine will generate a keyup event some time after a keydown event
+ * is generated when no further activity has been detected.
+ */
+static void ir_timer_keyup(unsigned long cookie)
+{
+ struct ir_input_dev *ir = (struct ir_input_dev *)cookie;
+ unsigned long flags;
/*
- * Copy the other elements overwriting the element to be removed
- * This operation applies to both resize and non-resize case
+ * ir->keyup_jiffies is used to prevent a race condition if a
+ * hardware interrupt occurs at this point and the keyup timer
+ * event is moved further into the future as a result.
+ *
+ * The timer will then be reactivated and this function called
+ * again in the future. We need to exit gracefully in that case
+ * to allow the input subsystem to do its auto-repeat magic or
+ * a keyup event might follow immediately after the keydown.
*/
- if (elem < newsize)
- memcpy(&newkeymap[elem], &oldkeymap[elem + 1],
- (newsize - elem) * sizeof(*newkeymap));
-
- if (resize) {
- /*
- * As the copy happened to a temporary table, only here
- * it needs to lock while replacing the table pointers
- * to use the new table
- */
- spin_lock_irqsave(&rc_tab->lock, flags);
- rc_tab->size = newsize;
- rc_tab->scan = newkeymap;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
-
- /* Frees the old keytable */
- kfree(oldkeymap);
- } else {
- rc_tab->size = newsize;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
- }
+ spin_lock_irqsave(&ir->keylock, flags);
+ if (time_is_after_eq_jiffies(ir->keyup_jiffies))
+ ir_keyup(ir);
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
/**
- * ir_insert_key() - insert a keycode at the table
- * @rc_tab: keycode table
- * @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * ir_repeat() - notifies the IR core that a key is still pressed
+ * @dev: the struct input_dev descriptor of the device
*
+ * This routine is used by IR decoders when a repeat message which does
+ * not include the necessary bits to reproduce the scancode has been
+ * received.
*/
-static int ir_insert_key(struct ir_scancode_table *rc_tab,
- int scancode, int keycode)
+void ir_repeat(struct input_dev *dev)
{
unsigned long flags;
- int elem = rc_tab->size;
- int newsize = rc_tab->size + 1;
- int resize = ir_is_resize_needed(rc_tab, newsize);
- struct ir_scancode *oldkeymap = rc_tab->scan;
- struct ir_scancode *newkeymap;
-
- if (resize) {
- newkeymap = kzalloc(ir_roundup_tablesize(newsize) *
- sizeof(*newkeymap), GFP_ATOMIC);
- if (!newkeymap)
- return -ENOMEM;
+ struct ir_input_dev *ir = input_get_drvdata(dev);
- memcpy(newkeymap, oldkeymap,
- rc_tab->size * sizeof(*newkeymap));
- } else
- newkeymap = oldkeymap;
+ spin_lock_irqsave(&ir->keylock, flags);
- /* Stores the new code at the table */
- IR_dprintk(1, "#%d: New scan 0x%04x with key 0x%04x\n",
- rc_tab->size, scancode, keycode);
+ if (!ir->keypressed)
+ goto out;
- spin_lock_irqsave(&rc_tab->lock, flags);
- rc_tab->size = newsize;
- if (resize) {
- rc_tab->scan = newkeymap;
- kfree(oldkeymap);
- }
- newkeymap[elem].scancode = scancode;
- newkeymap[elem].keycode = keycode;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
+ ir->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT);
+ mod_timer(&ir->timer_keyup, ir->keyup_jiffies);
- return 0;
+out:
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
+EXPORT_SYMBOL_GPL(ir_repeat);
/**
- * ir_setkeycode() - set a keycode at the evdev scancode ->keycode table
- * @dev: the struct input_dev device descriptor
- * @scancode: the desired scancode
- * @keycode: the keycode to be retorned.
+ * ir_keydown() - generates input event for a key press
+ * @dev: the struct input_dev descriptor of the device
+ * @scancode: the scancode that we're seeking
+ * @toggle: the toggle value (protocol dependent, if the protocol doesn't
+ * support toggle values, this should be set to zero)
*
- * This routine is used to handle evdev EVIOCSKEY ioctl.
- * There's one caveat here: how can we increase the size of the table?
- * If the key is not found, returns -EINVAL, otherwise, returns 0.
+ * This routine is used by the input routines when a key is pressed at the
+ * IR. It gets the keycode for a scancode and reports an input event via
+ * input_report_key().
*/
-static int ir_setkeycode(struct input_dev *dev,
- unsigned int scancode, unsigned int keycode)
+void ir_keydown(struct input_dev *dev, int scancode, u8 toggle)
{
- int rc = 0;
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
- struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- struct ir_scancode *keymap = rc_tab->scan;
unsigned long flags;
+ struct ir_input_dev *ir = input_get_drvdata(dev);
- /*
- * Handle keycode table deletions
- *
- * If userspace is adding a KEY_UNKNOWN or KEY_RESERVED,
- * deal as a trial to remove an existing scancode attribution
- * if table become too big, reduce it to save space
- */
- if (keycode == KEY_UNKNOWN || keycode == KEY_RESERVED) {
- rc = ir_seek_table(rc_tab, scancode);
- if (rc < 0)
- return 0;
-
- IR_dprintk(1, "#%d: Deleting scan 0x%04x\n", rc, scancode);
- clear_bit(keymap[rc].keycode, dev->keybit);
- ir_delete_key(rc_tab, rc);
-
- return 0;
- }
+ u32 keycode = ir_g_keycode_from_table(dev, scancode);
- /*
- * Handle keycode replacements
- *
- * If the scancode exists, just replace by the new value
- */
- rc = ir_seek_table(rc_tab, scancode);
- if (rc >= 0) {
- IR_dprintk(1, "#%d: Replacing scan 0x%04x with key 0x%04x\n",
- rc, scancode, keycode);
+ spin_lock_irqsave(&ir->keylock, flags);
- clear_bit(keymap[rc].keycode, dev->keybit);
+ /* Repeat event? */
+ if (ir->keypressed &&
+ ir->last_scancode == scancode &&
+ ir->last_toggle == toggle)
+ goto set_timer;
- spin_lock_irqsave(&rc_tab->lock, flags);
- keymap[rc].keycode = keycode;
- spin_unlock_irqrestore(&rc_tab->lock, flags);
+ /* Release old keypress */
+ ir_keyup(ir);
- set_bit(keycode, dev->keybit);
+ ir->last_scancode = scancode;
+ ir->last_toggle = toggle;
+ ir->last_keycode = keycode;
- return 0;
- }
+ if (keycode == KEY_RESERVED)
+ goto out;
- /*
- * Handle new scancode inserts
- *
- * reallocate table if needed and insert a new keycode
- */
+ /* Register a keypress */
+ ir->keypressed = true;
+ IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
+ dev->name, keycode, scancode);
+ input_report_key(dev, ir->last_keycode, 1);
+ input_sync(dev);
- /* Avoid growing the table indefinitely */
- if (rc_tab->size + 1 > IR_TAB_MAX_SIZE)
- return -EINVAL;
-
- rc = ir_insert_key(rc_tab, scancode, keycode);
- if (rc < 0)
- return rc;
- set_bit(keycode, dev->keybit);
-
- return 0;
+set_timer:
+ ir->keyup_jiffies = jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT);
+ mod_timer(&ir->timer_keyup, ir->keyup_jiffies);
+out:
+ spin_unlock_irqrestore(&ir->keylock, flags);
}
+EXPORT_SYMBOL_GPL(ir_keydown);
-/**
- * ir_g_keycode_from_table() - gets the keycode that corresponds to a scancode
- * @input_dev: the struct input_dev descriptor of the device
- * @scancode: the scancode that we're seeking
- *
- * This routine is used by the input routines when a key is pressed at the
- * IR. The scancode is received and needs to be converted into a keycode.
- * If the key is not found, it returns KEY_UNKNOWN. Otherwise, returns the
- * corresponding keycode from the table.
- */
-u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
+static int ir_open(struct input_dev *input_dev)
{
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
- struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
- struct ir_scancode *keymap = rc_tab->scan;
- int elem;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- elem = ir_seek_table(rc_tab, scancode);
- if (elem >= 0) {
- IR_dprintk(1, "%s: scancode 0x%04x keycode 0x%02x\n",
- dev->name, scancode, keymap[elem].keycode);
-
- return rc_tab->scan[elem].keycode;
- }
+ return ir_dev->props->open(ir_dev->props->priv);
+}
- printk(KERN_INFO "%s: unknown key for scancode 0x%04x\n",
- dev->name, scancode);
+static void ir_close(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- /* Reports userspace that an unknown keycode were got */
- return KEY_RESERVED;
+ ir_dev->props->close(ir_dev->props->priv);
}
-EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
/**
- * ir_input_register() - sets the IR keycode table and add the handlers
+ * __ir_input_register() - sets the IR keycode table and add the handlers
* for keymap table get/set
* @input_dev: the struct input_dev descriptor of the device
* @rc_tab: the struct ir_scancode_table table of scancode/keymap
@@ -402,13 +426,13 @@ EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
* It will register the input/evdev interface for the device and
* register the syfs code for IR class
*/
-int ir_input_register(struct input_dev *input_dev,
+int __ir_input_register(struct input_dev *input_dev,
const struct ir_scancode_table *rc_tab,
- const struct ir_dev_props *props)
+ const struct ir_dev_props *props,
+ const char *driver_name)
{
struct ir_input_dev *ir_dev;
- struct ir_scancode *keymap = rc_tab->scan;
- int i, rc;
+ int rc;
if (rc_tab->scan == NULL || !rc_tab->size)
return -EINVAL;
@@ -417,57 +441,77 @@ int ir_input_register(struct input_dev *input_dev,
if (!ir_dev)
return -ENOMEM;
- spin_lock_init(&ir_dev->rc_tab.lock);
-
- ir_dev->rc_tab.size = ir_roundup_tablesize(rc_tab->size);
- ir_dev->rc_tab.scan = kzalloc(ir_dev->rc_tab.size *
- sizeof(struct ir_scancode), GFP_KERNEL);
- if (!ir_dev->rc_tab.scan) {
- kfree(ir_dev);
- return -ENOMEM;
+ ir_dev->driver_name = kasprintf(GFP_KERNEL, "%s", driver_name);
+ if (!ir_dev->driver_name) {
+ rc = -ENOMEM;
+ goto out_dev;
}
- IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n",
- ir_dev->rc_tab.size,
- ir_dev->rc_tab.size * sizeof(ir_dev->rc_tab.scan));
+ input_dev->getkeycode = ir_getkeycode;
+ input_dev->setkeycode = ir_setkeycode;
+ input_set_drvdata(input_dev, ir_dev);
+ ir_dev->input_dev = input_dev;
- ir_copy_table(&ir_dev->rc_tab, rc_tab);
- ir_dev->props = props;
+ spin_lock_init(&ir_dev->rc_tab.lock);
+ spin_lock_init(&ir_dev->keylock);
+ setup_timer(&ir_dev->timer_keyup, ir_timer_keyup, (unsigned long)ir_dev);
+
+ ir_dev->rc_tab.name = rc_tab->name;
+ ir_dev->rc_tab.ir_type = rc_tab->ir_type;
+ ir_dev->rc_tab.alloc = roundup_pow_of_two(rc_tab->size *
+ sizeof(struct ir_scancode));
+ ir_dev->rc_tab.scan = kmalloc(ir_dev->rc_tab.alloc, GFP_KERNEL);
+ ir_dev->rc_tab.size = ir_dev->rc_tab.alloc / sizeof(struct ir_scancode);
+ if (props) {
+ ir_dev->props = props;
+ if (props->open)
+ input_dev->open = ir_open;
+ if (props->close)
+ input_dev->close = ir_close;
+ }
- /* set the bits for the keys */
- IR_dprintk(1, "key map size: %d\n", rc_tab->size);
- for (i = 0; i < rc_tab->size; i++) {
- IR_dprintk(1, "#%d: setting bit for keycode 0x%04x\n",
- i, keymap[i].keycode);
- set_bit(keymap[i].keycode, input_dev->keybit);
+ if (!ir_dev->rc_tab.scan) {
+ rc = -ENOMEM;
+ goto out_name;
}
- clear_bit(0, input_dev->keybit);
+
+ IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
+ ir_dev->rc_tab.size, ir_dev->rc_tab.alloc);
set_bit(EV_KEY, input_dev->evbit);
+ set_bit(EV_REP, input_dev->evbit);
- input_dev->getkeycode = ir_getkeycode;
- input_dev->setkeycode = ir_setkeycode;
- input_set_drvdata(input_dev, ir_dev);
+ if (ir_setkeytable(input_dev, &ir_dev->rc_tab, rc_tab)) {
+ rc = -ENOMEM;
+ goto out_table;
+ }
- rc = input_register_device(input_dev);
+ rc = ir_register_class(input_dev);
if (rc < 0)
- goto err;
+ goto out_table;
- rc = ir_register_class(input_dev);
- if (rc < 0) {
- input_unregister_device(input_dev);
- goto err;
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW) {
+ rc = ir_raw_event_register(input_dev);
+ if (rc < 0)
+ goto out_event;
}
+ IR_dprintk(1, "Registered input device on %s for %s remote.\n",
+ driver_name, rc_tab->name);
+
return 0;
-err:
- kfree(rc_tab->scan);
+out_event:
+ ir_unregister_class(input_dev);
+out_table:
+ kfree(ir_dev->rc_tab.scan);
+out_name:
+ kfree(ir_dev->driver_name);
+out_dev:
kfree(ir_dev);
- input_set_drvdata(input_dev, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(ir_input_register);
+EXPORT_SYMBOL_GPL(__ir_input_register);
/**
* ir_input_unregister() - unregisters IR and frees resources
@@ -475,9 +519,9 @@ EXPORT_SYMBOL_GPL(ir_input_register);
* This routine is used to free memory and de-register interfaces.
*/
-void ir_input_unregister(struct input_dev *dev)
+void ir_input_unregister(struct input_dev *input_dev)
{
- struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
struct ir_scancode_table *rc_tab;
if (!ir_dev)
@@ -485,15 +529,18 @@ void ir_input_unregister(struct input_dev *dev)
IR_dprintk(1, "Freed keycode table\n");
+ del_timer_sync(&ir_dev->timer_keyup);
+ if (ir_dev->props->driver_type == RC_DRIVER_IR_RAW)
+ ir_raw_event_unregister(input_dev);
rc_tab = &ir_dev->rc_tab;
rc_tab->size = 0;
kfree(rc_tab->scan);
rc_tab->scan = NULL;
- ir_unregister_class(dev);
+ ir_unregister_class(input_dev);
+ kfree(ir_dev->driver_name);
kfree(ir_dev);
- input_unregister_device(dev);
}
EXPORT_SYMBOL_GPL(ir_input_unregister);
diff --git a/drivers/media/IR/ir-nec-decoder.c b/drivers/media/IR/ir-nec-decoder.c
new file mode 100644
index 000000000000..ba79233112ef
--- /dev/null
+++ b/drivers/media/IR/ir-nec-decoder.c
@@ -0,0 +1,328 @@
+/* ir-nec-decoder.c - handle NEC IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define NEC_NBITS 32
+#define NEC_UNIT 562500 /* ns */
+#define NEC_HEADER_PULSE (16 * NEC_UNIT)
+#define NECX_HEADER_PULSE (8 * NEC_UNIT) /* Less common NEC variant */
+#define NEC_HEADER_SPACE (8 * NEC_UNIT)
+#define NEC_REPEAT_SPACE (8 * NEC_UNIT)
+#define NEC_BIT_PULSE (1 * NEC_UNIT)
+#define NEC_BIT_0_SPACE (1 * NEC_UNIT)
+#define NEC_BIT_1_SPACE (3 * NEC_UNIT)
+#define NEC_TRAILER_PULSE (1 * NEC_UNIT)
+#define NEC_TRAILER_SPACE (10 * NEC_UNIT) /* even longer in reality */
+
+/* Used to register nec_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum nec_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_TRAILER_PULSE,
+ STATE_TRAILER_SPACE,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum nec_state state;
+ u32 nec_bits;
+ unsigned count;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "nec_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_nec_decode() - Decode one NEC pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @duration: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_nec_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 address, not_address, command, not_command;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(2, "NEC decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_HEADER_PULSE, NEC_UNIT / 2) &&
+ !eq_margin(ev.duration, NECX_HEADER_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (eq_margin(ev.duration, NEC_HEADER_SPACE, NEC_UNIT / 2)) {
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
+ ir_repeat(input_dev);
+ IR_dprintk(1, "Repeat last key\n");
+ data->state = STATE_TRAILER_PULSE;
+ return 0;
+ }
+
+ break;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_BIT_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ data->nec_bits <<= 1;
+ if (eq_margin(ev.duration, NEC_BIT_1_SPACE, NEC_UNIT / 2))
+ data->nec_bits |= 1;
+ else if (!eq_margin(ev.duration, NEC_BIT_0_SPACE, NEC_UNIT / 2))
+ break;
+ data->count++;
+
+ if (data->count == NEC_NBITS)
+ data->state = STATE_TRAILER_PULSE;
+ else
+ data->state = STATE_BIT_PULSE;
+
+ return 0;
+
+ case STATE_TRAILER_PULSE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, NEC_TRAILER_PULSE, NEC_UNIT / 2))
+ break;
+
+ data->state = STATE_TRAILER_SPACE;
+ return 0;
+
+ case STATE_TRAILER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
+ break;
+
+ address = bitrev8((data->nec_bits >> 24) & 0xff);
+ not_address = bitrev8((data->nec_bits >> 16) & 0xff);
+ command = bitrev8((data->nec_bits >> 8) & 0xff);
+ not_command = bitrev8((data->nec_bits >> 0) & 0xff);
+
+ if ((command ^ not_command) != 0xff) {
+ IR_dprintk(1, "NEC checksum error: received 0x%08x\n",
+ data->nec_bits);
+ break;
+ }
+
+ if ((address ^ not_address) != 0xff) {
+ /* Extended NEC */
+ scancode = address << 16 |
+ not_address << 8 |
+ command;
+ IR_dprintk(1, "NEC (Ext) scancode 0x%06x\n", scancode);
+ } else {
+ /* Normal NEC */
+ scancode = address << 8 | command;
+ IR_dprintk(1, "NEC scancode 0x%04x\n", scancode);
+ }
+
+ ir_keydown(input_dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ IR_dprintk(1, "NEC decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_nec_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_nec_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler nec_handler = {
+ .decode = ir_nec_decode,
+ .raw_register = ir_nec_register,
+ .raw_unregister = ir_nec_unregister,
+};
+
+static int __init ir_nec_decode_init(void)
+{
+ ir_raw_handler_register(&nec_handler);
+
+ printk(KERN_INFO "IR NEC protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_nec_decode_exit(void)
+{
+ ir_raw_handler_unregister(&nec_handler);
+}
+
+module_init(ir_nec_decode_init);
+module_exit(ir_nec_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("NEC IR protocol decoder");
diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c
new file mode 100644
index 000000000000..ea68a3f2effa
--- /dev/null
+++ b/drivers/media/IR/ir-raw-event.c
@@ -0,0 +1,251 @@
+/* ir-raw-event.c - handle IR Pulse/Space event
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include "ir-core-priv.h"
+
+/* Define the max number of pulse/space transitions to buffer */
+#define MAX_IR_EVENT_SIZE 512
+
+/* Used to handle IR raw handler extensions */
+static LIST_HEAD(ir_raw_handler_list);
+static DEFINE_SPINLOCK(ir_raw_handler_lock);
+
+/**
+ * RUN_DECODER() - runs an operation on all IR decoders
+ * @ops: IR raw handler operation to be called
+ * @arg: arguments to be passed to the callback
+ *
+ * Calls ir_raw_handler::ops for all registered IR handlers. It prevents
+ * new decode addition/removal while running, by locking ir_raw_handler_lock
+ * mutex. If an error occurs, it stops the ops. Otherwise, it returns a sum
+ * of the return codes.
+ */
+#define RUN_DECODER(ops, ...) ({ \
+ struct ir_raw_handler *_ir_raw_handler; \
+ int _sumrc = 0, _rc; \
+ spin_lock(&ir_raw_handler_lock); \
+ list_for_each_entry(_ir_raw_handler, &ir_raw_handler_list, list) { \
+ if (_ir_raw_handler->ops) { \
+ _rc = _ir_raw_handler->ops(__VA_ARGS__); \
+ if (_rc < 0) \
+ break; \
+ _sumrc += _rc; \
+ } \
+ } \
+ spin_unlock(&ir_raw_handler_lock); \
+ _sumrc; \
+})
+
+#ifdef MODULE
+/* Used to load the decoders */
+static struct work_struct wq_load;
+#endif
+
+static void ir_raw_event_work(struct work_struct *work)
+{
+ struct ir_raw_event ev;
+ struct ir_raw_event_ctrl *raw =
+ container_of(work, struct ir_raw_event_ctrl, rx_work);
+
+ while (kfifo_out(&raw->kfifo, &ev, sizeof(ev)) == sizeof(ev))
+ RUN_DECODER(decode, raw->input_dev, ev);
+}
+
+int ir_raw_event_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ int rc;
+
+ ir->raw = kzalloc(sizeof(*ir->raw), GFP_KERNEL);
+ if (!ir->raw)
+ return -ENOMEM;
+
+ ir->raw->input_dev = input_dev;
+ INIT_WORK(&ir->raw->rx_work, ir_raw_event_work);
+
+ rc = kfifo_alloc(&ir->raw->kfifo, sizeof(s64) * MAX_IR_EVENT_SIZE,
+ GFP_KERNEL);
+ if (rc < 0) {
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return rc;
+ }
+
+ rc = RUN_DECODER(raw_register, input_dev);
+ if (rc < 0) {
+ kfifo_free(&ir->raw->kfifo);
+ kfree(ir->raw);
+ ir->raw = NULL;
+ return rc;
+ }
+
+ return rc;
+}
+
+void ir_raw_event_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return;
+
+ cancel_work_sync(&ir->raw->rx_work);
+ RUN_DECODER(raw_unregister, input_dev);
+
+ kfifo_free(&ir->raw->kfifo);
+ kfree(ir->raw);
+ ir->raw = NULL;
+}
+
+/**
+ * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
+ * @input_dev: the struct input_dev device descriptor
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This routine (which may be called from an interrupt context) stores a
+ * pulse/space duration for the raw ir decoding state machines. Pulses are
+ * signalled as positive values and spaces as negative values. A zero value
+ * will reset the decoding state machines.
+ */
+int ir_raw_event_store(struct input_dev *input_dev, struct ir_raw_event *ev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return -EINVAL;
+
+ if (kfifo_in(&ir->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store);
+
+/**
+ * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
+ * @input_dev: the struct input_dev device descriptor
+ * @type: the type of the event that has occurred
+ *
+ * This routine (which may be called from an interrupt context) is used to
+ * store the beginning of an ir pulse or space (or the start/end of ir
+ * reception) for the raw ir decoding state machines. This is used by
+ * hardware which does not provide durations directly but only interrupts
+ * (or similar events) on state change.
+ */
+int ir_raw_event_store_edge(struct input_dev *input_dev, enum raw_event_type type)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+ ktime_t now;
+ s64 delta; /* ns */
+ struct ir_raw_event ev;
+ int rc = 0;
+
+ if (!ir->raw)
+ return -EINVAL;
+
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, ir->raw->last_event));
+
+ /* Check for a long duration since last event or if we're
+ * being called for the first time, note that delta can't
+ * possibly be negative.
+ */
+ ev.duration = 0;
+ if (delta > IR_MAX_DURATION || !ir->raw->last_type)
+ type |= IR_START_EVENT;
+ else
+ ev.duration = delta;
+
+ if (type & IR_START_EVENT)
+ ir_raw_event_reset(input_dev);
+ else if (ir->raw->last_type & IR_SPACE) {
+ ev.pulse = false;
+ rc = ir_raw_event_store(input_dev, &ev);
+ } else if (ir->raw->last_type & IR_PULSE) {
+ ev.pulse = true;
+ rc = ir_raw_event_store(input_dev, &ev);
+ } else
+ return 0;
+
+ ir->raw->last_event = now;
+ ir->raw->last_type = type;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
+
+/**
+ * ir_raw_event_handle() - schedules the decoding of stored ir data
+ * @input_dev: the struct input_dev device descriptor
+ *
+ * This routine will signal the workqueue to start decoding stored ir data.
+ */
+void ir_raw_event_handle(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir = input_get_drvdata(input_dev);
+
+ if (!ir->raw)
+ return;
+
+ schedule_work(&ir->raw->rx_work);
+}
+EXPORT_SYMBOL_GPL(ir_raw_event_handle);
+
+/*
+ * Extension interface - used to register the IR decoders
+ */
+
+int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
+{
+ spin_lock(&ir_raw_handler_lock);
+ list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
+ spin_unlock(&ir_raw_handler_lock);
+ return 0;
+}
+EXPORT_SYMBOL(ir_raw_handler_register);
+
+void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
+{
+ spin_lock(&ir_raw_handler_lock);
+ list_del(&ir_raw_handler->list);
+ spin_unlock(&ir_raw_handler_lock);
+}
+EXPORT_SYMBOL(ir_raw_handler_unregister);
+
+#ifdef MODULE
+static void init_decoders(struct work_struct *work)
+{
+ /* Load the decoder modules */
+
+ load_nec_decode();
+ load_rc5_decode();
+ load_rc6_decode();
+ load_jvc_decode();
+ load_sony_decode();
+
+ /* If needed, we may later add some init code. In this case,
+ it is needed to change the CONFIG_MODULE test at ir-core.h
+ */
+}
+#endif
+
+void ir_raw_init(void)
+{
+#ifdef MODULE
+ INIT_WORK(&wq_load, init_decoders);
+ schedule_work(&wq_load);
+#endif
+}
diff --git a/drivers/media/IR/ir-rc5-decoder.c b/drivers/media/IR/ir-rc5-decoder.c
new file mode 100644
index 000000000000..23cdb1b1a3bc
--- /dev/null
+++ b/drivers/media/IR/ir-rc5-decoder.c
@@ -0,0 +1,324 @@
+/* ir-rc5-decoder.c - handle RC5(x) IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This code handles 14 bits RC5 protocols and 20 bits RC5x protocols.
+ * There are other variants that use a different number of bits.
+ * This is currently unsupported.
+ * It considers a carrier of 36 kHz, with a total of 14/20 bits, where
+ * the first two bits are start bits, and a third one is a filing bit
+ */
+
+#include "ir-core-priv.h"
+
+#define RC5_NBITS 14
+#define RC5X_NBITS 20
+#define CHECK_RC5X_NBITS 8
+#define RC5_UNIT 888888 /* ns */
+#define RC5_BIT_START (1 * RC5_UNIT)
+#define RC5_BIT_END (1 * RC5_UNIT)
+#define RC5X_SPACE (4 * RC5_UNIT)
+
+/* Used to register rc5_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum rc5_state {
+ STATE_INACTIVE,
+ STATE_BIT_START,
+ STATE_BIT_END,
+ STATE_CHECK_RC5X,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum rc5_state state;
+ u32 rc5_bits;
+ struct ir_raw_event prev_ev;
+ unsigned count;
+ unsigned wanted_bits;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "rc5_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_rc5_decode() - Decode one RC-5 pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc5_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u8 toggle;
+ u32 scancode;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC5(x) decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ data->state = STATE_BIT_START;
+ data->count = 1;
+ /* We just need enough bits to get to STATE_CHECK_RC5X */
+ data->wanted_bits = RC5X_NBITS;
+ decrease_duration(&ev, RC5_BIT_START);
+ goto again;
+
+ case STATE_BIT_START:
+ if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2))
+ break;
+
+ data->rc5_bits <<= 1;
+ if (!ev.pulse)
+ data->rc5_bits |= 1;
+ data->count++;
+ data->prev_ev = ev;
+ data->state = STATE_BIT_END;
+ return 0;
+
+ case STATE_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else if (data->count == CHECK_RC5X_NBITS)
+ data->state = STATE_CHECK_RC5X;
+ else
+ data->state = STATE_BIT_START;
+
+ decrease_duration(&ev, RC5_BIT_END);
+ goto again;
+
+ case STATE_CHECK_RC5X:
+ if (!ev.pulse && geq_margin(ev.duration, RC5X_SPACE, RC5_UNIT / 2)) {
+ /* RC5X */
+ data->wanted_bits = RC5X_NBITS;
+ decrease_duration(&ev, RC5X_SPACE);
+ } else {
+ /* RC5 */
+ data->wanted_bits = RC5_NBITS;
+ }
+ data->state = STATE_BIT_START;
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ if (data->wanted_bits == RC5X_NBITS) {
+ /* RC5X */
+ u8 xdata, command, system;
+ xdata = (data->rc5_bits & 0x0003F) >> 0;
+ command = (data->rc5_bits & 0x00FC0) >> 6;
+ system = (data->rc5_bits & 0x1F000) >> 12;
+ toggle = (data->rc5_bits & 0x20000) ? 1 : 0;
+ command += (data->rc5_bits & 0x01000) ? 0 : 0x40;
+ scancode = system << 16 | command << 8 | xdata;
+
+ IR_dprintk(1, "RC5X scancode 0x%06x (toggle: %u)\n",
+ scancode, toggle);
+
+ } else {
+ /* RC5 */
+ u8 command, system;
+ command = (data->rc5_bits & 0x0003F) >> 0;
+ system = (data->rc5_bits & 0x007C0) >> 6;
+ toggle = (data->rc5_bits & 0x00800) ? 1 : 0;
+ command += (data->rc5_bits & 0x01000) ? 0 : 0x40;
+ scancode = system << 8 | command;
+
+ IR_dprintk(1, "RC5 scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+ }
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC5(x) decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_rc5_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_rc5_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler rc5_handler = {
+ .decode = ir_rc5_decode,
+ .raw_register = ir_rc5_register,
+ .raw_unregister = ir_rc5_unregister,
+};
+
+static int __init ir_rc5_decode_init(void)
+{
+ ir_raw_handler_register(&rc5_handler);
+
+ printk(KERN_INFO "IR RC5(x) protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc5_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc5_handler);
+}
+
+module_init(ir_rc5_decode_init);
+module_exit(ir_rc5_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("RC5(x) IR protocol decoder");
diff --git a/drivers/media/IR/ir-rc6-decoder.c b/drivers/media/IR/ir-rc6-decoder.c
new file mode 100644
index 000000000000..2bf479f4f1bc
--- /dev/null
+++ b/drivers/media/IR/ir-rc6-decoder.c
@@ -0,0 +1,419 @@
+/* ir-rc6-decoder.c - A decoder for the RC6 IR protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ir-core-priv.h"
+
+/*
+ * This decoder currently supports:
+ * RC6-0-16 (standard toggle bit in header)
+ * RC6-6A-24 (no toggle bit)
+ * RC6-6A-32 (MCE version with toggle bit in body)
+ */
+
+#define RC6_UNIT 444444 /* us */
+#define RC6_HEADER_NBITS 4 /* not including toggle bit */
+#define RC6_0_NBITS 16
+#define RC6_6A_SMALL_NBITS 24
+#define RC6_6A_LARGE_NBITS 32
+#define RC6_PREFIX_PULSE (6 * RC6_UNIT)
+#define RC6_PREFIX_SPACE (2 * RC6_UNIT)
+#define RC6_BIT_START (1 * RC6_UNIT)
+#define RC6_BIT_END (1 * RC6_UNIT)
+#define RC6_TOGGLE_START (2 * RC6_UNIT)
+#define RC6_TOGGLE_END (2 * RC6_UNIT)
+#define RC6_MODE_MASK 0x07 /* for the header bits */
+#define RC6_STARTBIT_MASK 0x08 /* for the header bits */
+#define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */
+
+/* Used to register rc6_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum rc6_mode {
+ RC6_MODE_0,
+ RC6_MODE_6A,
+ RC6_MODE_UNKNOWN,
+};
+
+enum rc6_state {
+ STATE_INACTIVE,
+ STATE_PREFIX_SPACE,
+ STATE_HEADER_BIT_START,
+ STATE_HEADER_BIT_END,
+ STATE_TOGGLE_START,
+ STATE_TOGGLE_END,
+ STATE_BODY_BIT_START,
+ STATE_BODY_BIT_END,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum rc6_state state;
+ u8 header;
+ u32 body;
+ struct ir_raw_event prev_ev;
+ bool toggle;
+ unsigned count;
+ unsigned wanted_bits;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "rc6_decoder",
+ .attrs = decoder_attributes,
+};
+
+static enum rc6_mode rc6_mode(struct decoder_data *data) {
+ switch (data->header & RC6_MODE_MASK) {
+ case 0:
+ return RC6_MODE_0;
+ case 6:
+ if (!data->toggle)
+ return RC6_MODE_6A;
+ /* fall through */
+ default:
+ return RC6_MODE_UNKNOWN;
+ }
+}
+
+/**
+ * ir_rc6_decode() - Decode one RC6 pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_rc6_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 toggle;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "RC6 decode started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ /* Note: larger margin on first pulse since each RC6_UNIT
+ is quite short and some hardware takes some time to
+ adjust to the signal */
+ if (!eq_margin(ev.duration, RC6_PREFIX_PULSE, RC6_UNIT))
+ break;
+
+ data->state = STATE_PREFIX_SPACE;
+ data->count = 0;
+ return 0;
+
+ case STATE_PREFIX_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, RC6_PREFIX_SPACE, RC6_UNIT / 2))
+ break;
+
+ data->state = STATE_HEADER_BIT_START;
+ return 0;
+
+ case STATE_HEADER_BIT_START:
+ if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
+ break;
+
+ data->header <<= 1;
+ if (ev.pulse)
+ data->header |= 1;
+ data->count++;
+ data->prev_ev = ev;
+ data->state = STATE_HEADER_BIT_END;
+ return 0;
+
+ case STATE_HEADER_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == RC6_HEADER_NBITS)
+ data->state = STATE_TOGGLE_START;
+ else
+ data->state = STATE_HEADER_BIT_START;
+
+ decrease_duration(&ev, RC6_BIT_END);
+ goto again;
+
+ case STATE_TOGGLE_START:
+ if (!eq_margin(ev.duration, RC6_TOGGLE_START, RC6_UNIT / 2))
+ break;
+
+ data->toggle = ev.pulse;
+ data->prev_ev = ev;
+ data->state = STATE_TOGGLE_END;
+ return 0;
+
+ case STATE_TOGGLE_END:
+ if (!is_transition(&ev, &data->prev_ev) ||
+ !geq_margin(ev.duration, RC6_TOGGLE_END, RC6_UNIT / 2))
+ break;
+
+ if (!(data->header & RC6_STARTBIT_MASK)) {
+ IR_dprintk(1, "RC6 invalid start bit\n");
+ break;
+ }
+
+ data->state = STATE_BODY_BIT_START;
+ data->prev_ev = ev;
+ decrease_duration(&ev, RC6_TOGGLE_END);
+ data->count = 0;
+
+ switch (rc6_mode(data)) {
+ case RC6_MODE_0:
+ data->wanted_bits = RC6_0_NBITS;
+ break;
+ case RC6_MODE_6A:
+ /* This might look weird, but we basically
+ check the value of the first body bit to
+ determine the number of bits in mode 6A */
+ if ((!ev.pulse && !geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2)) ||
+ geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
+ data->wanted_bits = RC6_6A_LARGE_NBITS;
+ else
+ data->wanted_bits = RC6_6A_SMALL_NBITS;
+ break;
+ default:
+ IR_dprintk(1, "RC6 unknown mode\n");
+ goto out;
+ }
+ goto again;
+
+ case STATE_BODY_BIT_START:
+ if (!eq_margin(ev.duration, RC6_BIT_START, RC6_UNIT / 2))
+ break;
+
+ data->body <<= 1;
+ if (ev.pulse)
+ data->body |= 1;
+ data->count++;
+ data->prev_ev = ev;
+
+ data->state = STATE_BODY_BIT_END;
+ return 0;
+
+ case STATE_BODY_BIT_END:
+ if (!is_transition(&ev, &data->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else
+ data->state = STATE_BODY_BIT_START;
+
+ decrease_duration(&ev, RC6_BIT_END);
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ switch (rc6_mode(data)) {
+ case RC6_MODE_0:
+ scancode = data->body & 0xffff;
+ toggle = data->toggle;
+ IR_dprintk(1, "RC6(0) scancode 0x%04x (toggle: %u)\n",
+ scancode, toggle);
+ break;
+ case RC6_MODE_6A:
+ if (data->wanted_bits == RC6_6A_LARGE_NBITS) {
+ toggle = data->body & RC6_6A_MCE_TOGGLE_MASK ? 1 : 0;
+ scancode = data->body & ~RC6_6A_MCE_TOGGLE_MASK;
+ } else {
+ toggle = 0;
+ scancode = data->body & 0xffffff;
+ }
+
+ IR_dprintk(1, "RC6(6A) scancode 0x%08x (toggle: %u)\n",
+ scancode, toggle);
+ break;
+ default:
+ IR_dprintk(1, "RC6 unknown mode\n");
+ goto out;
+ }
+
+ ir_keydown(input_dev, scancode, toggle);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "RC6 decode failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_rc6_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_rc6_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler rc6_handler = {
+ .decode = ir_rc6_decode,
+ .raw_register = ir_rc6_register,
+ .raw_unregister = ir_rc6_unregister,
+};
+
+static int __init ir_rc6_decode_init(void)
+{
+ ir_raw_handler_register(&rc6_handler);
+
+ printk(KERN_INFO "IR RC6 protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_rc6_decode_exit(void)
+{
+ ir_raw_handler_unregister(&rc6_handler);
+}
+
+module_init(ir_rc6_decode_init);
+module_exit(ir_rc6_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("RC6 IR protocol decoder");
diff --git a/drivers/media/IR/ir-sony-decoder.c b/drivers/media/IR/ir-sony-decoder.c
new file mode 100644
index 000000000000..9f440c5c060d
--- /dev/null
+++ b/drivers/media/IR/ir-sony-decoder.c
@@ -0,0 +1,312 @@
+/* ir-sony-decoder.c - handle Sony IR Pulse/Space protocol
+ *
+ * Copyright (C) 2010 by David Härdeman <david@hardeman.nu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitrev.h>
+#include "ir-core-priv.h"
+
+#define SONY_UNIT 600000 /* ns */
+#define SONY_HEADER_PULSE (4 * SONY_UNIT)
+#define SONY_HEADER_SPACE (1 * SONY_UNIT)
+#define SONY_BIT_0_PULSE (1 * SONY_UNIT)
+#define SONY_BIT_1_PULSE (2 * SONY_UNIT)
+#define SONY_BIT_SPACE (1 * SONY_UNIT)
+#define SONY_TRAILER_SPACE (10 * SONY_UNIT) /* minimum */
+
+/* Used to register sony_decoder clients */
+static LIST_HEAD(decoder_list);
+static DEFINE_SPINLOCK(decoder_lock);
+
+enum sony_state {
+ STATE_INACTIVE,
+ STATE_HEADER_SPACE,
+ STATE_BIT_PULSE,
+ STATE_BIT_SPACE,
+ STATE_FINISHED,
+};
+
+struct decoder_data {
+ struct list_head list;
+ struct ir_input_dev *ir_dev;
+ int enabled:1;
+
+ /* State machine control */
+ enum sony_state state;
+ u32 sony_bits;
+ unsigned count;
+};
+
+
+/**
+ * get_decoder_data() - gets decoder data
+ * @input_dev: input device
+ *
+ * Returns the struct decoder_data that corresponds to a device
+ */
+static struct decoder_data *get_decoder_data(struct ir_input_dev *ir_dev)
+{
+ struct decoder_data *data = NULL;
+
+ spin_lock(&decoder_lock);
+ list_for_each_entry(data, &decoder_list, list) {
+ if (data->ir_dev == ir_dev)
+ break;
+ }
+ spin_unlock(&decoder_lock);
+ return data;
+}
+
+static ssize_t store_enabled(struct device *d,
+ struct device_attribute *mattr,
+ const char *buf,
+ size_t len)
+{
+ unsigned long value;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (strict_strtoul(buf, 10, &value) || value > 1)
+ return -EINVAL;
+
+ data->enabled = value;
+
+ return len;
+}
+
+static ssize_t show_enabled(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ struct decoder_data *data = get_decoder_data(ir_dev);
+
+ if (!data)
+ return -EINVAL;
+
+ if (data->enabled)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, show_enabled, store_enabled);
+
+static struct attribute *decoder_attributes[] = {
+ &dev_attr_enabled.attr,
+ NULL
+};
+
+static struct attribute_group decoder_attribute_group = {
+ .name = "sony_decoder",
+ .attrs = decoder_attributes,
+};
+
+/**
+ * ir_sony_decode() - Decode one Sony pulse or space
+ * @input_dev: the struct input_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_sony_decode(struct input_dev *input_dev, struct ir_raw_event ev)
+{
+ struct decoder_data *data;
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ u32 scancode;
+ u8 device, subdevice, function;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return -EINVAL;
+
+ if (!data->enabled)
+ return 0;
+
+ if (IS_RESET(ev)) {
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2))
+ goto out;
+
+ IR_dprintk(2, "Sony decode started at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SONY_HEADER_PULSE, SONY_UNIT / 2))
+ break;
+
+ data->count = 0;
+ data->state = STATE_HEADER_SPACE;
+ return 0;
+
+ case STATE_HEADER_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!eq_margin(ev.duration, SONY_HEADER_SPACE, SONY_UNIT / 2))
+ break;
+
+ data->state = STATE_BIT_PULSE;
+ return 0;
+
+ case STATE_BIT_PULSE:
+ if (!ev.pulse)
+ break;
+
+ data->sony_bits <<= 1;
+ if (eq_margin(ev.duration, SONY_BIT_1_PULSE, SONY_UNIT / 2))
+ data->sony_bits |= 1;
+ else if (!eq_margin(ev.duration, SONY_BIT_0_PULSE, SONY_UNIT / 2))
+ break;
+
+ data->count++;
+ data->state = STATE_BIT_SPACE;
+ return 0;
+
+ case STATE_BIT_SPACE:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SONY_BIT_SPACE, SONY_UNIT / 2))
+ break;
+
+ decrease_duration(&ev, SONY_BIT_SPACE);
+
+ if (!geq_margin(ev.duration, SONY_UNIT, SONY_UNIT / 2)) {
+ data->state = STATE_BIT_PULSE;
+ return 0;
+ }
+
+ data->state = STATE_FINISHED;
+ /* Fall through */
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ if (!geq_margin(ev.duration, SONY_TRAILER_SPACE, SONY_UNIT / 2))
+ break;
+
+ switch (data->count) {
+ case 12:
+ device = bitrev8((data->sony_bits << 3) & 0xF8);
+ subdevice = 0;
+ function = bitrev8((data->sony_bits >> 4) & 0xFE);
+ break;
+ case 15:
+ device = bitrev8((data->sony_bits >> 0) & 0xFF);
+ subdevice = 0;
+ function = bitrev8((data->sony_bits >> 7) & 0xFD);
+ break;
+ case 20:
+ device = bitrev8((data->sony_bits >> 5) & 0xF8);
+ subdevice = bitrev8((data->sony_bits >> 0) & 0xFF);
+ function = bitrev8((data->sony_bits >> 12) & 0xFE);
+ break;
+ default:
+ IR_dprintk(1, "Sony invalid bitcount %u\n", data->count);
+ goto out;
+ }
+
+ scancode = device << 16 | subdevice << 8 | function;
+ IR_dprintk(1, "Sony(%u) scancode 0x%05x\n", data->count, scancode);
+ ir_keydown(input_dev, scancode, 0);
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "Sony decode failed at state %d (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ return -EINVAL;
+}
+
+static int ir_sony_register(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct decoder_data *data;
+ int rc;
+
+ rc = sysfs_create_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ if (rc < 0)
+ return rc;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+ return -ENOMEM;
+ }
+
+ data->ir_dev = ir_dev;
+ data->enabled = 1;
+
+ spin_lock(&decoder_lock);
+ list_add_tail(&data->list, &decoder_list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static int ir_sony_unregister(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ static struct decoder_data *data;
+
+ data = get_decoder_data(ir_dev);
+ if (!data)
+ return 0;
+
+ sysfs_remove_group(&ir_dev->dev.kobj, &decoder_attribute_group);
+
+ spin_lock(&decoder_lock);
+ list_del(&data->list);
+ spin_unlock(&decoder_lock);
+
+ return 0;
+}
+
+static struct ir_raw_handler sony_handler = {
+ .decode = ir_sony_decode,
+ .raw_register = ir_sony_register,
+ .raw_unregister = ir_sony_unregister,
+};
+
+static int __init ir_sony_decode_init(void)
+{
+ ir_raw_handler_register(&sony_handler);
+
+ printk(KERN_INFO "IR Sony protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_sony_decode_exit(void)
+{
+ ir_raw_handler_unregister(&sony_handler);
+}
+
+module_init(ir_sony_decode_init);
+module_exit(ir_sony_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
+MODULE_DESCRIPTION("Sony IR protocol decoder");
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
index e14e6c486b52..d7da63e16c92 100644
--- a/drivers/media/IR/ir-sysfs.c
+++ b/drivers/media/IR/ir-sysfs.c
@@ -1,6 +1,6 @@
-/* ir-register.c - handle IR scancode->keycode tables
+/* ir-sysfs.c - sysfs interface for RC devices (/sys/class/rc)
*
- * Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ * Copyright (C) 2009-2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,15 +15,23 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/device.h>
-#include <media/ir-core.h>
+#include "ir-core-priv.h"
#define IRRCV_NUM_DEVICES 256
/* bit array to represent IR sysfs device number */
static unsigned long ir_core_dev_number;
-/* class for /sys/class/irrcv */
-static struct class *ir_input_class;
+/* class for /sys/class/rc */
+static char *ir_devnode(struct device *dev, mode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
+}
+
+static struct class ir_input_class = {
+ .name = "rc",
+ .devnode = ir_devnode,
+};
/**
* show_protocol() - shows the current IR protocol
@@ -32,7 +40,7 @@ static struct class *ir_input_class;
* @buf: a pointer to the output buffer
*
* This routine is a callback routine for input read the IR protocol type.
- * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * it is trigged by reading /sys/class/rc/rc?/current_protocol.
* It returns the protocol name, as understood by the driver.
*/
static ssize_t show_protocol(struct device *d,
@@ -48,13 +56,17 @@ static ssize_t show_protocol(struct device *d,
if (ir_type == IR_TYPE_UNKNOWN)
s = "Unknown";
else if (ir_type == IR_TYPE_RC5)
- s = "RC-5";
- else if (ir_type == IR_TYPE_PD)
- s = "Pulse/distance";
+ s = "rc-5";
else if (ir_type == IR_TYPE_NEC)
- s = "NEC";
+ s = "nec";
+ else if (ir_type == IR_TYPE_RC6)
+ s = "rc6";
+ else if (ir_type == IR_TYPE_JVC)
+ s = "jvc";
+ else if (ir_type == IR_TYPE_SONY)
+ s = "sony";
else
- s = "Other";
+ s = "other";
return sprintf(buf, "%s\n", s);
}
@@ -67,7 +79,7 @@ static ssize_t show_protocol(struct device *d,
* @len: length of the input buffer
*
* This routine is a callback routine for changing the IR protocol type.
- * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * it is trigged by reading /sys/class/rc/rc?/current_protocol.
* It changes the IR the protocol name, if the IR type is recognized
* by the driver.
* If an unknown protocol name is used, returns -EINVAL.
@@ -78,23 +90,24 @@ static ssize_t store_protocol(struct device *d,
size_t len)
{
struct ir_input_dev *ir_dev = dev_get_drvdata(d);
- u64 ir_type = IR_TYPE_UNKNOWN;
+ u64 ir_type = 0;
int rc = -EINVAL;
unsigned long flags;
char *buf;
- buf = strsep((char **) &data, "\n");
-
- if (!strcasecmp(buf, "rc-5"))
- ir_type = IR_TYPE_RC5;
- else if (!strcasecmp(buf, "pd"))
- ir_type = IR_TYPE_PD;
- else if (!strcasecmp(buf, "nec"))
- ir_type = IR_TYPE_NEC;
+ while ((buf = strsep((char **) &data, " \n")) != NULL) {
+ if (!strcasecmp(buf, "rc-5") || !strcasecmp(buf, "rc5"))
+ ir_type |= IR_TYPE_RC5;
+ if (!strcasecmp(buf, "nec"))
+ ir_type |= IR_TYPE_NEC;
+ if (!strcasecmp(buf, "jvc"))
+ ir_type |= IR_TYPE_JVC;
+ if (!strcasecmp(buf, "sony"))
+ ir_type |= IR_TYPE_SONY;
+ }
- if (ir_type == IR_TYPE_UNKNOWN) {
- IR_dprintk(1, "Error setting protocol to %lld\n",
- (long long)ir_type);
+ if (!ir_type) {
+ IR_dprintk(1, "Unknown protocol\n");
return -EINVAL;
}
@@ -112,25 +125,87 @@ static ssize_t store_protocol(struct device *d,
ir_dev->rc_tab.ir_type = ir_type;
spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
- IR_dprintk(1, "Current protocol is %lld\n",
+ IR_dprintk(1, "Current protocol(s) is(are) %lld\n",
(long long)ir_type);
return len;
}
+static ssize_t show_supported_protocols(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ char *orgbuf = buf;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+
+ /* FIXME: doesn't support multiple protocols at the same time */
+ if (ir_dev->props->allowed_protos == IR_TYPE_UNKNOWN)
+ buf += sprintf(buf, "unknown ");
+ if (ir_dev->props->allowed_protos & IR_TYPE_RC5)
+ buf += sprintf(buf, "rc-5 ");
+ if (ir_dev->props->allowed_protos & IR_TYPE_NEC)
+ buf += sprintf(buf, "nec ");
+ if (buf == orgbuf)
+ buf += sprintf(buf, "other ");
+
+ buf += sprintf(buf - 1, "\n");
+
+ return buf - orgbuf;
+}
+
+#define ADD_HOTPLUG_VAR(fmt, val...) \
+ do { \
+ int err = add_uevent_var(env, fmt, val); \
+ if (err) \
+ return err; \
+ } while (0)
+
+static int ir_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(device);
+
+ if (ir_dev->rc_tab.name)
+ ADD_HOTPLUG_VAR("NAME=%s", ir_dev->rc_tab.name);
+ if (ir_dev->driver_name)
+ ADD_HOTPLUG_VAR("DRV_NAME=%s", ir_dev->driver_name);
+
+ return 0;
+}
+
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static DEVICE_ATTR(current_protocol, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(protocol, S_IRUGO | S_IWUSR,
show_protocol, store_protocol);
-static struct attribute *ir_dev_attrs[] = {
- &dev_attr_current_protocol.attr,
+static DEVICE_ATTR(supported_protocols, S_IRUGO | S_IWUSR,
+ show_supported_protocols, NULL);
+
+static struct attribute *ir_hw_dev_attrs[] = {
+ &dev_attr_protocol.attr,
+ &dev_attr_supported_protocols.attr,
NULL,
};
+static struct attribute_group ir_hw_dev_attr_grp = {
+ .attrs = ir_hw_dev_attrs,
+};
+
+static const struct attribute_group *ir_hw_dev_attr_groups[] = {
+ &ir_hw_dev_attr_grp,
+ NULL
+};
+
+static struct device_type rc_dev_type = {
+ .groups = ir_hw_dev_attr_groups,
+ .uevent = ir_dev_uevent,
+};
+
+static struct device_type ir_raw_dev_type = {
+ .uevent = ir_dev_uevent,
+};
+
/**
- * ir_register_class() - creates the sysfs for /sys/class/irrcv/irrcv?
+ * ir_register_class() - creates the sysfs for /sys/class/rc/rc?
* @input_dev: the struct input_dev descriptor of the device
*
* This routine is used to register the syfs code for IR class
@@ -138,8 +213,7 @@ static struct attribute *ir_dev_attrs[] = {
int ir_register_class(struct input_dev *input_dev)
{
int rc;
- struct kobject *kobj;
-
+ const char *path;
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
int devno = find_first_zero_bit(&ir_core_dev_number,
IRRCV_NUM_DEVICES);
@@ -147,19 +221,36 @@ int ir_register_class(struct input_dev *input_dev)
if (unlikely(devno < 0))
return devno;
- ir_dev->attr.attrs = ir_dev_attrs;
- ir_dev->class_dev = device_create(ir_input_class, NULL,
- input_dev->dev.devt, ir_dev,
- "irrcv%d", devno);
- kobj = &ir_dev->class_dev->kobj;
-
- printk(KERN_WARNING "Creating IR device %s\n", kobject_name(kobj));
- rc = sysfs_create_group(kobj, &ir_dev->attr);
- if (unlikely(rc < 0)) {
- device_destroy(ir_input_class, input_dev->dev.devt);
- return -ENOMEM;
+ if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE)
+ ir_dev->dev.type = &rc_dev_type;
+ else
+ ir_dev->dev.type = &ir_raw_dev_type;
+
+ ir_dev->dev.class = &ir_input_class;
+ ir_dev->dev.parent = input_dev->dev.parent;
+ dev_set_name(&ir_dev->dev, "rc%d", devno);
+ dev_set_drvdata(&ir_dev->dev, ir_dev);
+ rc = device_register(&ir_dev->dev);
+ if (rc)
+ return rc;
+
+
+ input_dev->dev.parent = &ir_dev->dev;
+ rc = input_register_device(input_dev);
+ if (rc < 0) {
+ device_del(&ir_dev->dev);
+ return rc;
}
+ __module_get(THIS_MODULE);
+
+ path = kobject_get_path(&ir_dev->dev.kobj, GFP_KERNEL);
+ printk(KERN_INFO "%s: %s as %s\n",
+ dev_name(&ir_dev->dev),
+ input_dev->name ? input_dev->name : "Unspecified device",
+ path ? path : "N/A");
+ kfree(path);
+
ir_dev->devno = devno;
set_bit(devno, &ir_core_dev_number);
@@ -168,7 +259,7 @@ int ir_register_class(struct input_dev *input_dev)
/**
* ir_unregister_class() - removes the sysfs for sysfs for
- * /sys/class/irrcv/irrcv?
+ * /sys/class/rc/rc?
* @input_dev: the struct input_dev descriptor of the device
*
* This routine is used to unregister the syfs code for IR class
@@ -176,36 +267,35 @@ int ir_register_class(struct input_dev *input_dev)
void ir_unregister_class(struct input_dev *input_dev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
- struct kobject *kobj;
clear_bit(ir_dev->devno, &ir_core_dev_number);
+ input_unregister_device(input_dev);
+ device_del(&ir_dev->dev);
- kobj = &ir_dev->class_dev->kobj;
-
- sysfs_remove_group(kobj, &ir_dev->attr);
- device_destroy(ir_input_class, input_dev->dev.devt);
-
- kfree(ir_dev->attr.name);
+ module_put(THIS_MODULE);
}
/*
- * Init/exit code for the module. Basically, creates/removes /sys/class/irrcv
+ * Init/exit code for the module. Basically, creates/removes /sys/class/rc
*/
static int __init ir_core_init(void)
{
- ir_input_class = class_create(THIS_MODULE, "irrcv");
- if (IS_ERR(ir_input_class)) {
- printk(KERN_ERR "ir_core: unable to register irrcv class\n");
- return PTR_ERR(ir_input_class);
+ int rc = class_register(&ir_input_class);
+ if (rc) {
+ printk(KERN_ERR "ir_core: unable to register rc class\n");
+ return rc;
}
+ /* Initialize/load the decoders/keymap code that will be used */
+ ir_raw_init();
+
return 0;
}
static void __exit ir_core_exit(void)
{
- class_destroy(ir_input_class);
+ class_unregister(&ir_input_class);
}
module_init(ir_core_init);
diff --git a/drivers/media/IR/keymaps/Kconfig b/drivers/media/IR/keymaps/Kconfig
new file mode 100644
index 000000000000..14b22f58f823
--- /dev/null
+++ b/drivers/media/IR/keymaps/Kconfig
@@ -0,0 +1,15 @@
+config RC_MAP
+ tristate "Compile Remote Controller keymap modules"
+ depends on IR_CORE
+ default y
+
+ ---help---
+ This option enables the compilation of lots of Remote
+ Controller tables. They are short tables, but if you
+ don't use a remote controller, or prefer to load the
+ tables on userspace, you should disable it.
+
+ The ir-keytable program, available at v4l-utils package
+ provide the tool and the same RC maps for load from
+ userspace. Its available at
+ http://git.linuxtv.org/v4l-utils
diff --git a/drivers/media/IR/keymaps/Makefile b/drivers/media/IR/keymaps/Makefile
new file mode 100644
index 000000000000..ec25258a955f
--- /dev/null
+++ b/drivers/media/IR/keymaps/Makefile
@@ -0,0 +1,67 @@
+obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
+ rc-apac-viewcomp.o \
+ rc-asus-pc39.o \
+ rc-ati-tv-wonder-hd-600.o \
+ rc-avermedia-a16d.o \
+ rc-avermedia.o \
+ rc-avermedia-cardbus.o \
+ rc-avermedia-dvbt.o \
+ rc-avermedia-m135a-rm-jx.o \
+ rc-avertv-303.o \
+ rc-behold.o \
+ rc-behold-columbus.o \
+ rc-budget-ci-old.o \
+ rc-cinergy-1400.o \
+ rc-cinergy.o \
+ rc-dm1105-nec.o \
+ rc-dntv-live-dvb-t.o \
+ rc-dntv-live-dvbt-pro.o \
+ rc-empty.o \
+ rc-em-terratec.o \
+ rc-encore-enltv2.o \
+ rc-encore-enltv.o \
+ rc-encore-enltv-fm53.o \
+ rc-evga-indtube.o \
+ rc-eztv.o \
+ rc-flydvb.o \
+ rc-flyvideo.o \
+ rc-fusionhdtv-mce.o \
+ rc-gadmei-rm008z.o \
+ rc-genius-tvgo-a11mce.o \
+ rc-gotview7135.o \
+ rc-hauppauge-new.o \
+ rc-imon-mce.o \
+ rc-imon-pad.o \
+ rc-iodata-bctv7e.o \
+ rc-kaiomy.o \
+ rc-kworld-315u.o \
+ rc-kworld-plus-tv-analog.o \
+ rc-manli.o \
+ rc-msi-tvanywhere.o \
+ rc-msi-tvanywhere-plus.o \
+ rc-nebula.o \
+ rc-nec-terratec-cinergy-xs.o \
+ rc-norwood.o \
+ rc-npgtech.o \
+ rc-pctv-sedna.o \
+ rc-pinnacle-color.o \
+ rc-pinnacle-grey.o \
+ rc-pinnacle-pctv-hd.o \
+ rc-pixelview.o \
+ rc-pixelview-mk12.o \
+ rc-pixelview-new.o \
+ rc-powercolor-real-angel.o \
+ rc-proteus-2309.o \
+ rc-purpletv.o \
+ rc-pv951.o \
+ rc-rc5-hauppauge-new.o \
+ rc-rc5-tv.o \
+ rc-real-audio-220-32-keys.o \
+ rc-tbs-nec.o \
+ rc-terratec-cinergy-xs.o \
+ rc-tevii-nec.o \
+ rc-tt-1500.o \
+ rc-videomate-s350.o \
+ rc-videomate-tv-pvr.o \
+ rc-winfast.o \
+ rc-winfast-usbii-deluxe.o
diff --git a/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c b/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c
new file mode 100644
index 000000000000..b17283176ecd
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c
@@ -0,0 +1,89 @@
+/* adstech-dvb-t-pci.h - Keytable for adstech_dvb_t_pci Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* ADS Tech Instant TV DVB-T PCI Remote */
+
+static struct ir_scancode adstech_dvb_t_pci[] = {
+ /* Keys 0 to 9 */
+ { 0x4d, KEY_0 },
+ { 0x57, KEY_1 },
+ { 0x4f, KEY_2 },
+ { 0x53, KEY_3 },
+ { 0x56, KEY_4 },
+ { 0x4e, KEY_5 },
+ { 0x5e, KEY_6 },
+ { 0x54, KEY_7 },
+ { 0x4c, KEY_8 },
+ { 0x5c, KEY_9 },
+
+ { 0x5b, KEY_POWER },
+ { 0x5f, KEY_MUTE },
+ { 0x55, KEY_GOTO },
+ { 0x5d, KEY_SEARCH },
+ { 0x17, KEY_EPG }, /* Guide */
+ { 0x1f, KEY_MENU },
+ { 0x0f, KEY_UP },
+ { 0x46, KEY_DOWN },
+ { 0x16, KEY_LEFT },
+ { 0x1e, KEY_RIGHT },
+ { 0x0e, KEY_SELECT }, /* Enter */
+ { 0x5a, KEY_INFO },
+ { 0x52, KEY_EXIT },
+ { 0x59, KEY_PREVIOUS },
+ { 0x51, KEY_NEXT },
+ { 0x58, KEY_REWIND },
+ { 0x50, KEY_FORWARD },
+ { 0x44, KEY_PLAYPAUSE },
+ { 0x07, KEY_STOP },
+ { 0x1b, KEY_RECORD },
+ { 0x13, KEY_TUNER }, /* Live */
+ { 0x0a, KEY_A },
+ { 0x12, KEY_B },
+ { 0x03, KEY_PROG1 }, /* 1 */
+ { 0x01, KEY_PROG2 }, /* 2 */
+ { 0x00, KEY_PROG3 }, /* 3 */
+ { 0x06, KEY_DVD },
+ { 0x48, KEY_AUX }, /* Photo */
+ { 0x40, KEY_VIDEO },
+ { 0x19, KEY_AUDIO }, /* Music */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x08, KEY_CHANNELDOWN },
+ { 0x15, KEY_VOLUMEUP },
+ { 0x1c, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap adstech_dvb_t_pci_map = {
+ .map = {
+ .scan = adstech_dvb_t_pci,
+ .size = ARRAY_SIZE(adstech_dvb_t_pci),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ADSTECH_DVB_T_PCI,
+ }
+};
+
+static int __init init_rc_map_adstech_dvb_t_pci(void)
+{
+ return ir_register_map(&adstech_dvb_t_pci_map);
+}
+
+static void __exit exit_rc_map_adstech_dvb_t_pci(void)
+{
+ ir_unregister_map(&adstech_dvb_t_pci_map);
+}
+
+module_init(init_rc_map_adstech_dvb_t_pci)
+module_exit(exit_rc_map_adstech_dvb_t_pci)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-apac-viewcomp.c b/drivers/media/IR/keymaps/rc-apac-viewcomp.c
new file mode 100644
index 000000000000..0ef2b562baf0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-apac-viewcomp.c
@@ -0,0 +1,80 @@
+/* apac-viewcomp.h - Keytable for apac_viewcomp Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Attila Kondoros <attila.kondoros@chello.hu> */
+
+static struct ir_scancode apac_viewcomp[] = {
+
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x00, KEY_0 },
+ { 0x17, KEY_LAST }, /* +100 */
+ { 0x0a, KEY_LIST }, /* recall */
+
+
+ { 0x1c, KEY_TUNER }, /* TV/FM */
+ { 0x15, KEY_SEARCH }, /* scan */
+ { 0x12, KEY_POWER }, /* power */
+ { 0x1f, KEY_VOLUMEDOWN }, /* vol up */
+ { 0x1b, KEY_VOLUMEUP }, /* vol down */
+ { 0x1e, KEY_CHANNELDOWN }, /* chn up */
+ { 0x1a, KEY_CHANNELUP }, /* chn down */
+
+ { 0x11, KEY_VIDEO }, /* video */
+ { 0x0f, KEY_ZOOM }, /* full screen */
+ { 0x13, KEY_MUTE }, /* mute/unmute */
+ { 0x10, KEY_TEXT }, /* min */
+
+ { 0x0d, KEY_STOP }, /* freeze */
+ { 0x0e, KEY_RECORD }, /* record */
+ { 0x1d, KEY_PLAYPAUSE }, /* stop */
+ { 0x19, KEY_PLAY }, /* play */
+
+ { 0x16, KEY_GOTO }, /* osd */
+ { 0x14, KEY_REFRESH }, /* default */
+ { 0x0c, KEY_KPPLUS }, /* fine tune >>>> */
+ { 0x18, KEY_KPMINUS }, /* fine tune <<<< */
+};
+
+static struct rc_keymap apac_viewcomp_map = {
+ .map = {
+ .scan = apac_viewcomp,
+ .size = ARRAY_SIZE(apac_viewcomp),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_APAC_VIEWCOMP,
+ }
+};
+
+static int __init init_rc_map_apac_viewcomp(void)
+{
+ return ir_register_map(&apac_viewcomp_map);
+}
+
+static void __exit exit_rc_map_apac_viewcomp(void)
+{
+ ir_unregister_map(&apac_viewcomp_map);
+}
+
+module_init(init_rc_map_apac_viewcomp)
+module_exit(exit_rc_map_apac_viewcomp)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-asus-pc39.c b/drivers/media/IR/keymaps/rc-asus-pc39.c
new file mode 100644
index 000000000000..2aa068cd6c75
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-asus-pc39.c
@@ -0,0 +1,91 @@
+/* asus-pc39.h - Keytable for asus_pc39 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Marc Fargas <telenieko@telenieko.com>
+ * this is the remote control that comes with the asus p7131
+ * which has a label saying is "Model PC-39"
+ */
+
+static struct ir_scancode asus_pc39[] = {
+ /* Keys 0 to 9 */
+ { 0x15, KEY_0 },
+ { 0x29, KEY_1 },
+ { 0x2d, KEY_2 },
+ { 0x2b, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0d, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x31, KEY_7 },
+ { 0x35, KEY_8 },
+ { 0x33, KEY_9 },
+
+ { 0x3e, KEY_RADIO }, /* radio */
+ { 0x03, KEY_MENU }, /* dvd/menu */
+ { 0x2a, KEY_VOLUMEUP },
+ { 0x19, KEY_VOLUMEDOWN },
+ { 0x37, KEY_UP },
+ { 0x3b, KEY_DOWN },
+ { 0x27, KEY_LEFT },
+ { 0x2f, KEY_RIGHT },
+ { 0x25, KEY_VIDEO }, /* video */
+ { 0x39, KEY_AUDIO }, /* music */
+
+ { 0x21, KEY_TV }, /* tv */
+ { 0x1d, KEY_EXIT }, /* back */
+ { 0x0a, KEY_CHANNELUP }, /* channel / program + */
+ { 0x1b, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x1a, KEY_ENTER }, /* enter */
+
+ { 0x06, KEY_PAUSE }, /* play/pause */
+ { 0x1e, KEY_PREVIOUS }, /* rew */
+ { 0x26, KEY_NEXT }, /* forward */
+ { 0x0e, KEY_REWIND }, /* backward << */
+ { 0x3a, KEY_FASTFORWARD }, /* forward >> */
+ { 0x36, KEY_STOP },
+ { 0x2e, KEY_RECORD }, /* recording */
+ { 0x16, KEY_POWER }, /* the button that reads "close" */
+
+ { 0x11, KEY_ZOOM }, /* full screen */
+ { 0x13, KEY_MACRO }, /* recall */
+ { 0x23, KEY_HOME }, /* home */
+ { 0x05, KEY_PVR }, /* picture */
+ { 0x3d, KEY_MUTE }, /* mute */
+ { 0x01, KEY_DVD }, /* dvd */
+};
+
+static struct rc_keymap asus_pc39_map = {
+ .map = {
+ .scan = asus_pc39,
+ .size = ARRAY_SIZE(asus_pc39),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ASUS_PC39,
+ }
+};
+
+static int __init init_rc_map_asus_pc39(void)
+{
+ return ir_register_map(&asus_pc39_map);
+}
+
+static void __exit exit_rc_map_asus_pc39(void)
+{
+ ir_unregister_map(&asus_pc39_map);
+}
+
+module_init(init_rc_map_asus_pc39)
+module_exit(exit_rc_map_asus_pc39)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c b/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c
new file mode 100644
index 000000000000..8edfd293d010
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c
@@ -0,0 +1,69 @@
+/* ati-tv-wonder-hd-600.h - Keytable for ati_tv_wonder_hd_600 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* ATI TV Wonder HD 600 USB
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+
+static struct ir_scancode ati_tv_wonder_hd_600[] = {
+ { 0x00, KEY_RECORD}, /* Row 1 */
+ { 0x01, KEY_PLAYPAUSE},
+ { 0x02, KEY_STOP},
+ { 0x03, KEY_POWER},
+ { 0x04, KEY_PREVIOUS}, /* Row 2 */
+ { 0x05, KEY_REWIND},
+ { 0x06, KEY_FORWARD},
+ { 0x07, KEY_NEXT},
+ { 0x08, KEY_EPG}, /* Row 3 */
+ { 0x09, KEY_HOME},
+ { 0x0a, KEY_MENU},
+ { 0x0b, KEY_CHANNELUP},
+ { 0x0c, KEY_BACK}, /* Row 4 */
+ { 0x0d, KEY_UP},
+ { 0x0e, KEY_INFO},
+ { 0x0f, KEY_CHANNELDOWN},
+ { 0x10, KEY_LEFT}, /* Row 5 */
+ { 0x11, KEY_SELECT},
+ { 0x12, KEY_RIGHT},
+ { 0x13, KEY_VOLUMEUP},
+ { 0x14, KEY_LAST}, /* Row 6 */
+ { 0x15, KEY_DOWN},
+ { 0x16, KEY_MUTE},
+ { 0x17, KEY_VOLUMEDOWN},
+};
+
+static struct rc_keymap ati_tv_wonder_hd_600_map = {
+ .map = {
+ .scan = ati_tv_wonder_hd_600,
+ .size = ARRAY_SIZE(ati_tv_wonder_hd_600),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ATI_TV_WONDER_HD_600,
+ }
+};
+
+static int __init init_rc_map_ati_tv_wonder_hd_600(void)
+{
+ return ir_register_map(&ati_tv_wonder_hd_600_map);
+}
+
+static void __exit exit_rc_map_ati_tv_wonder_hd_600(void)
+{
+ ir_unregister_map(&ati_tv_wonder_hd_600_map);
+}
+
+module_init(init_rc_map_ati_tv_wonder_hd_600)
+module_exit(exit_rc_map_ati_tv_wonder_hd_600)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-a16d.c b/drivers/media/IR/keymaps/rc-avermedia-a16d.c
new file mode 100644
index 000000000000..12f043587f2e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-a16d.c
@@ -0,0 +1,75 @@
+/* avermedia-a16d.h - Keytable for avermedia_a16d Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode avermedia_a16d[] = {
+ { 0x20, KEY_LIST},
+ { 0x00, KEY_POWER},
+ { 0x28, KEY_1},
+ { 0x18, KEY_2},
+ { 0x38, KEY_3},
+ { 0x24, KEY_4},
+ { 0x14, KEY_5},
+ { 0x34, KEY_6},
+ { 0x2c, KEY_7},
+ { 0x1c, KEY_8},
+ { 0x3c, KEY_9},
+ { 0x12, KEY_SUBTITLE},
+ { 0x22, KEY_0},
+ { 0x32, KEY_REWIND},
+ { 0x3a, KEY_SHUFFLE},
+ { 0x02, KEY_PRINT},
+ { 0x11, KEY_CHANNELDOWN},
+ { 0x31, KEY_CHANNELUP},
+ { 0x0c, KEY_ZOOM},
+ { 0x1e, KEY_VOLUMEDOWN},
+ { 0x3e, KEY_VOLUMEUP},
+ { 0x0a, KEY_MUTE},
+ { 0x04, KEY_AUDIO},
+ { 0x26, KEY_RECORD},
+ { 0x06, KEY_PLAY},
+ { 0x36, KEY_STOP},
+ { 0x16, KEY_PAUSE},
+ { 0x2e, KEY_REWIND},
+ { 0x0e, KEY_FASTFORWARD},
+ { 0x30, KEY_TEXT},
+ { 0x21, KEY_GREEN},
+ { 0x01, KEY_BLUE},
+ { 0x08, KEY_EPG},
+ { 0x2a, KEY_MENU},
+};
+
+static struct rc_keymap avermedia_a16d_map = {
+ .map = {
+ .scan = avermedia_a16d,
+ .size = ARRAY_SIZE(avermedia_a16d),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_A16D,
+ }
+};
+
+static int __init init_rc_map_avermedia_a16d(void)
+{
+ return ir_register_map(&avermedia_a16d_map);
+}
+
+static void __exit exit_rc_map_avermedia_a16d(void)
+{
+ ir_unregister_map(&avermedia_a16d_map);
+}
+
+module_init(init_rc_map_avermedia_a16d)
+module_exit(exit_rc_map_avermedia_a16d)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-cardbus.c b/drivers/media/IR/keymaps/rc-avermedia-cardbus.c
new file mode 100644
index 000000000000..2a945b02e8ca
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-cardbus.c
@@ -0,0 +1,97 @@
+/* avermedia-cardbus.h - Keytable for avermedia_cardbus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Oldrich Jedlicka <oldium.pro@seznam.cz> */
+
+static struct ir_scancode avermedia_cardbus[] = {
+ { 0x00, KEY_POWER },
+ { 0x01, KEY_TUNER }, /* TV/FM */
+ { 0x03, KEY_TEXT }, /* Teletext */
+ { 0x04, KEY_EPG },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x08, KEY_AUDIO },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0c, KEY_ZOOM }, /* Full screen */
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+ { 0x10, KEY_PAGEUP }, /* 16-CH PREV */
+ { 0x11, KEY_0 },
+ { 0x12, KEY_INFO },
+ { 0x13, KEY_AGAIN }, /* CH RTN - channel return */
+ { 0x14, KEY_MUTE },
+ { 0x15, KEY_EDIT }, /* Autoscan */
+ { 0x17, KEY_SAVE }, /* Screenshot */
+ { 0x18, KEY_PLAYPAUSE },
+ { 0x19, KEY_RECORD },
+ { 0x1a, KEY_PLAY },
+ { 0x1b, KEY_STOP },
+ { 0x1c, KEY_FASTFORWARD },
+ { 0x1d, KEY_REWIND },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_VOLUMEUP },
+ { 0x22, KEY_SLEEP }, /* Sleep */
+ { 0x23, KEY_ZOOM }, /* Aspect */
+ { 0x26, KEY_SCREEN }, /* Pos */
+ { 0x27, KEY_ANGLE }, /* Size */
+ { 0x28, KEY_SELECT }, /* Select */
+ { 0x29, KEY_BLUE }, /* Blue/Picture */
+ { 0x2a, KEY_BACKSPACE }, /* Back */
+ { 0x2b, KEY_MEDIA }, /* PIP (Picture-in-picture) */
+ { 0x2c, KEY_DOWN },
+ { 0x2e, KEY_DOT },
+ { 0x2f, KEY_TV }, /* Live TV */
+ { 0x32, KEY_LEFT },
+ { 0x33, KEY_CLEAR }, /* Clear */
+ { 0x35, KEY_RED }, /* Red/TV */
+ { 0x36, KEY_UP },
+ { 0x37, KEY_HOME }, /* Home */
+ { 0x39, KEY_GREEN }, /* Green/Video */
+ { 0x3d, KEY_YELLOW }, /* Yellow/Music */
+ { 0x3e, KEY_OK }, /* Ok */
+ { 0x3f, KEY_RIGHT },
+ { 0x40, KEY_NEXT }, /* Next */
+ { 0x41, KEY_PREVIOUS }, /* Previous */
+ { 0x42, KEY_CHANNELDOWN }, /* Channel down */
+ { 0x43, KEY_CHANNELUP }, /* Channel up */
+};
+
+static struct rc_keymap avermedia_cardbus_map = {
+ .map = {
+ .scan = avermedia_cardbus,
+ .size = ARRAY_SIZE(avermedia_cardbus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_CARDBUS,
+ }
+};
+
+static int __init init_rc_map_avermedia_cardbus(void)
+{
+ return ir_register_map(&avermedia_cardbus_map);
+}
+
+static void __exit exit_rc_map_avermedia_cardbus(void)
+{
+ ir_unregister_map(&avermedia_cardbus_map);
+}
+
+module_init(init_rc_map_avermedia_cardbus)
+module_exit(exit_rc_map_avermedia_cardbus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-dvbt.c b/drivers/media/IR/keymaps/rc-avermedia-dvbt.c
new file mode 100644
index 000000000000..39dde6222875
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-dvbt.c
@@ -0,0 +1,78 @@
+/* avermedia-dvbt.h - Keytable for avermedia_dvbt Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Matt Jesson <dvb@jesson.eclipse.co.uk */
+
+static struct ir_scancode avermedia_dvbt[] = {
+ { 0x28, KEY_0 }, /* '0' / 'enter' */
+ { 0x22, KEY_1 }, /* '1' */
+ { 0x12, KEY_2 }, /* '2' / 'up arrow' */
+ { 0x32, KEY_3 }, /* '3' */
+ { 0x24, KEY_4 }, /* '4' / 'left arrow' */
+ { 0x14, KEY_5 }, /* '5' */
+ { 0x34, KEY_6 }, /* '6' / 'right arrow' */
+ { 0x26, KEY_7 }, /* '7' */
+ { 0x16, KEY_8 }, /* '8' / 'down arrow' */
+ { 0x36, KEY_9 }, /* '9' */
+
+ { 0x20, KEY_LIST }, /* 'source' */
+ { 0x10, KEY_TEXT }, /* 'teletext' */
+ { 0x00, KEY_POWER }, /* 'power' */
+ { 0x04, KEY_AUDIO }, /* 'audio' */
+ { 0x06, KEY_ZOOM }, /* 'full screen' */
+ { 0x18, KEY_VIDEO }, /* 'display' */
+ { 0x38, KEY_SEARCH }, /* 'loop' */
+ { 0x08, KEY_INFO }, /* 'preview' */
+ { 0x2a, KEY_REWIND }, /* 'backward <<' */
+ { 0x1a, KEY_FASTFORWARD }, /* 'forward >>' */
+ { 0x3a, KEY_RECORD }, /* 'capture' */
+ { 0x0a, KEY_MUTE }, /* 'mute' */
+ { 0x2c, KEY_RECORD }, /* 'record' */
+ { 0x1c, KEY_PAUSE }, /* 'pause' */
+ { 0x3c, KEY_STOP }, /* 'stop' */
+ { 0x0c, KEY_PLAY }, /* 'play' */
+ { 0x2e, KEY_RED }, /* 'red' */
+ { 0x01, KEY_BLUE }, /* 'blue' / 'cancel' */
+ { 0x0e, KEY_YELLOW }, /* 'yellow' / 'ok' */
+ { 0x21, KEY_GREEN }, /* 'green' */
+ { 0x11, KEY_CHANNELDOWN }, /* 'channel -' */
+ { 0x31, KEY_CHANNELUP }, /* 'channel +' */
+ { 0x1e, KEY_VOLUMEDOWN }, /* 'volume -' */
+ { 0x3e, KEY_VOLUMEUP }, /* 'volume +' */
+};
+
+static struct rc_keymap avermedia_dvbt_map = {
+ .map = {
+ .scan = avermedia_dvbt,
+ .size = ARRAY_SIZE(avermedia_dvbt),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA_DVBT,
+ }
+};
+
+static int __init init_rc_map_avermedia_dvbt(void)
+{
+ return ir_register_map(&avermedia_dvbt_map);
+}
+
+static void __exit exit_rc_map_avermedia_dvbt(void)
+{
+ ir_unregister_map(&avermedia_dvbt_map);
+}
+
+module_init(init_rc_map_avermedia_dvbt)
+module_exit(exit_rc_map_avermedia_dvbt)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
new file mode 100644
index 000000000000..101e7ea85941
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c
@@ -0,0 +1,90 @@
+/* avermedia-m135a-rm-jx.h - Keytable for avermedia_m135a_rm_jx Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Avermedia M135A with IR model RM-JX
+ * The same codes exist on both Positivo (BR) and original IR
+ * Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode avermedia_m135a_rm_jx[] = {
+ { 0x0200, KEY_POWER2 },
+ { 0x022e, KEY_DOT }, /* '.' */
+ { 0x0201, KEY_MODE }, /* TV/FM or SOURCE */
+
+ { 0x0205, KEY_1 },
+ { 0x0206, KEY_2 },
+ { 0x0207, KEY_3 },
+ { 0x0209, KEY_4 },
+ { 0x020a, KEY_5 },
+ { 0x020b, KEY_6 },
+ { 0x020d, KEY_7 },
+ { 0x020e, KEY_8 },
+ { 0x020f, KEY_9 },
+ { 0x0211, KEY_0 },
+
+ { 0x0213, KEY_RIGHT }, /* -> or L */
+ { 0x0212, KEY_LEFT }, /* <- or R */
+
+ { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
+ { 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
+
+ { 0x0303, KEY_CHANNELUP },
+ { 0x0302, KEY_CHANNELDOWN },
+ { 0x021f, KEY_VOLUMEUP },
+ { 0x021e, KEY_VOLUMEDOWN },
+ { 0x020c, KEY_ENTER }, /* Full Screen */
+
+ { 0x0214, KEY_MUTE },
+ { 0x0208, KEY_AUDIO },
+
+ { 0x0203, KEY_TEXT }, /* Teletext */
+ { 0x0204, KEY_EPG },
+ { 0x022b, KEY_TV2 }, /* TV2 or PIP */
+
+ { 0x021d, KEY_RED },
+ { 0x021c, KEY_YELLOW },
+ { 0x0301, KEY_GREEN },
+ { 0x0300, KEY_BLUE },
+
+ { 0x021a, KEY_PLAYPAUSE },
+ { 0x0219, KEY_RECORD },
+ { 0x0218, KEY_PLAY },
+ { 0x021b, KEY_STOP },
+};
+
+static struct rc_keymap avermedia_m135a_rm_jx_map = {
+ .map = {
+ .scan = avermedia_m135a_rm_jx,
+ .size = ARRAY_SIZE(avermedia_m135a_rm_jx),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_AVERMEDIA_M135A_RM_JX,
+ }
+};
+
+static int __init init_rc_map_avermedia_m135a_rm_jx(void)
+{
+ return ir_register_map(&avermedia_m135a_rm_jx_map);
+}
+
+static void __exit exit_rc_map_avermedia_m135a_rm_jx(void)
+{
+ ir_unregister_map(&avermedia_m135a_rm_jx_map);
+}
+
+module_init(init_rc_map_avermedia_m135a_rm_jx)
+module_exit(exit_rc_map_avermedia_m135a_rm_jx)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avermedia.c b/drivers/media/IR/keymaps/rc-avermedia.c
new file mode 100644
index 000000000000..21effd5bfb0d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avermedia.c
@@ -0,0 +1,86 @@
+/* avermedia.h - Keytable for avermedia Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Alex Hermann <gaaf@gmx.net> */
+
+static struct ir_scancode avermedia[] = {
+ { 0x28, KEY_1 },
+ { 0x18, KEY_2 },
+ { 0x38, KEY_3 },
+ { 0x24, KEY_4 },
+ { 0x14, KEY_5 },
+ { 0x34, KEY_6 },
+ { 0x2c, KEY_7 },
+ { 0x1c, KEY_8 },
+ { 0x3c, KEY_9 },
+ { 0x22, KEY_0 },
+
+ { 0x20, KEY_TV }, /* TV/FM */
+ { 0x10, KEY_CD }, /* CD */
+ { 0x30, KEY_TEXT }, /* TELETEXT */
+ { 0x00, KEY_POWER }, /* POWER */
+
+ { 0x08, KEY_VIDEO }, /* VIDEO */
+ { 0x04, KEY_AUDIO }, /* AUDIO */
+ { 0x0c, KEY_ZOOM }, /* FULL SCREEN */
+
+ { 0x12, KEY_SUBTITLE }, /* DISPLAY */
+ { 0x32, KEY_REWIND }, /* LOOP */
+ { 0x02, KEY_PRINT }, /* PREVIEW */
+
+ { 0x2a, KEY_SEARCH }, /* AUTOSCAN */
+ { 0x1a, KEY_SLEEP }, /* FREEZE */
+ { 0x3a, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+
+ { 0x26, KEY_RECORD }, /* RECORD */
+ { 0x16, KEY_PAUSE }, /* PAUSE */
+ { 0x36, KEY_STOP }, /* STOP */
+ { 0x06, KEY_PLAY }, /* PLAY */
+
+ { 0x2e, KEY_RED }, /* RED */
+ { 0x21, KEY_GREEN }, /* GREEN */
+ { 0x0e, KEY_YELLOW }, /* YELLOW */
+ { 0x01, KEY_BLUE }, /* BLUE */
+
+ { 0x1e, KEY_VOLUMEDOWN }, /* VOLUME- */
+ { 0x3e, KEY_VOLUMEUP }, /* VOLUME+ */
+ { 0x11, KEY_CHANNELDOWN }, /* CHANNEL/PAGE- */
+ { 0x31, KEY_CHANNELUP } /* CHANNEL/PAGE+ */
+};
+
+static struct rc_keymap avermedia_map = {
+ .map = {
+ .scan = avermedia,
+ .size = ARRAY_SIZE(avermedia),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERMEDIA,
+ }
+};
+
+static int __init init_rc_map_avermedia(void)
+{
+ return ir_register_map(&avermedia_map);
+}
+
+static void __exit exit_rc_map_avermedia(void)
+{
+ ir_unregister_map(&avermedia_map);
+}
+
+module_init(init_rc_map_avermedia)
+module_exit(exit_rc_map_avermedia)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-avertv-303.c b/drivers/media/IR/keymaps/rc-avertv-303.c
new file mode 100644
index 000000000000..971c59d6f9d6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-avertv-303.c
@@ -0,0 +1,85 @@
+/* avertv-303.h - Keytable for avertv_303 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* AVERTV STUDIO 303 Remote */
+
+static struct ir_scancode avertv_303[] = {
+ { 0x2a, KEY_1 },
+ { 0x32, KEY_2 },
+ { 0x3a, KEY_3 },
+ { 0x4a, KEY_4 },
+ { 0x52, KEY_5 },
+ { 0x5a, KEY_6 },
+ { 0x6a, KEY_7 },
+ { 0x72, KEY_8 },
+ { 0x7a, KEY_9 },
+ { 0x0e, KEY_0 },
+
+ { 0x02, KEY_POWER },
+ { 0x22, KEY_VIDEO },
+ { 0x42, KEY_AUDIO },
+ { 0x62, KEY_ZOOM },
+ { 0x0a, KEY_TV },
+ { 0x12, KEY_CD },
+ { 0x1a, KEY_TEXT },
+
+ { 0x16, KEY_SUBTITLE },
+ { 0x1e, KEY_REWIND },
+ { 0x06, KEY_PRINT },
+
+ { 0x2e, KEY_SEARCH },
+ { 0x36, KEY_SLEEP },
+ { 0x3e, KEY_SHUFFLE },
+ { 0x26, KEY_MUTE },
+
+ { 0x4e, KEY_RECORD },
+ { 0x56, KEY_PAUSE },
+ { 0x5e, KEY_STOP },
+ { 0x46, KEY_PLAY },
+
+ { 0x6e, KEY_RED },
+ { 0x0b, KEY_GREEN },
+ { 0x66, KEY_YELLOW },
+ { 0x03, KEY_BLUE },
+
+ { 0x76, KEY_LEFT },
+ { 0x7e, KEY_RIGHT },
+ { 0x13, KEY_DOWN },
+ { 0x1b, KEY_UP },
+};
+
+static struct rc_keymap avertv_303_map = {
+ .map = {
+ .scan = avertv_303,
+ .size = ARRAY_SIZE(avertv_303),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_AVERTV_303,
+ }
+};
+
+static int __init init_rc_map_avertv_303(void)
+{
+ return ir_register_map(&avertv_303_map);
+}
+
+static void __exit exit_rc_map_avertv_303(void)
+{
+ ir_unregister_map(&avertv_303_map);
+}
+
+module_init(init_rc_map_avertv_303)
+module_exit(exit_rc_map_avertv_303)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-behold-columbus.c b/drivers/media/IR/keymaps/rc-behold-columbus.c
new file mode 100644
index 000000000000..9f56c98fef5b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-behold-columbus.c
@@ -0,0 +1,108 @@
+/* behold-columbus.h - Keytable for behold_columbus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Beholder Intl. Ltd. 2008
+ * Dmitry Belimov d.belimov@google.com
+ * Keytable is used by BeholdTV Columbus
+ * The "ascii-art picture" below (in comments, first row
+ * is the keycode in hex, and subsequent row(s) shows
+ * the button labels (several variants when appropriate)
+ * helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode behold_columbus[] = {
+
+ /* 0x13 0x11 0x1C 0x12 *
+ * Mute Source TV/FM Power *
+ * */
+
+ { 0x13, KEY_MUTE },
+ { 0x11, KEY_PROPS },
+ { 0x1C, KEY_TUNER }, /* KEY_TV/KEY_RADIO */
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 0x0D *
+ * 1 2 3 Stereo *
+ * *
+ * 0x04 0x05 0x06 0x19 *
+ * 4 5 6 Snapshot *
+ * *
+ * 0x07 0x08 0x09 0x10 *
+ * 7 8 9 Zoom *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x0D, KEY_SETUP }, /* Setup key */
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x19, KEY_CAMERA }, /* Snapshot key */
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x10, KEY_ZOOM },
+
+ /* 0x0A 0x00 0x0B 0x0C *
+ * RECALL 0 ChannelUp VolumeUp *
+ * */
+ { 0x0A, KEY_AGAIN },
+ { 0x00, KEY_0 },
+ { 0x0B, KEY_CHANNELUP },
+ { 0x0C, KEY_VOLUMEUP },
+
+ /* 0x1B 0x1D 0x15 0x18 *
+ * Timeshift Record ChannelDown VolumeDown *
+ * */
+
+ { 0x1B, KEY_TIME },
+ { 0x1D, KEY_RECORD },
+ { 0x15, KEY_CHANNELDOWN },
+ { 0x18, KEY_VOLUMEDOWN },
+
+ /* 0x0E 0x1E 0x0F 0x1A *
+ * Stop Pause Previouse Next *
+ * */
+
+ { 0x0E, KEY_STOP },
+ { 0x1E, KEY_PAUSE },
+ { 0x0F, KEY_PREVIOUS },
+ { 0x1A, KEY_NEXT },
+
+};
+
+static struct rc_keymap behold_columbus_map = {
+ .map = {
+ .scan = behold_columbus,
+ .size = ARRAY_SIZE(behold_columbus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BEHOLD_COLUMBUS,
+ }
+};
+
+static int __init init_rc_map_behold_columbus(void)
+{
+ return ir_register_map(&behold_columbus_map);
+}
+
+static void __exit exit_rc_map_behold_columbus(void)
+{
+ ir_unregister_map(&behold_columbus_map);
+}
+
+module_init(init_rc_map_behold_columbus)
+module_exit(exit_rc_map_behold_columbus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-behold.c b/drivers/media/IR/keymaps/rc-behold.c
new file mode 100644
index 000000000000..abc140b2098b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-behold.c
@@ -0,0 +1,141 @@
+/* behold.h - Keytable for behold Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Igor Kuznetsov <igk72@ya.ru>
+ * Andrey J. Melnikov <temnota@kmv.ru>
+ *
+ * Keytable is used by BeholdTV 60x series, M6 series at
+ * least, and probably other cards too.
+ * The "ascii-art picture" below (in comments, first row
+ * is the keycode in hex, and subsequent row(s) shows
+ * the button labels (several variants when appropriate)
+ * helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode behold[] = {
+
+ /* 0x1c 0x12 *
+ * TV/FM POWER *
+ * */
+ { 0x1c, KEY_TUNER }, /* XXX KEY_TV / KEY_RADIO */
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 *
+ * 1 2 3 *
+ * *
+ * 0x04 0x05 0x06 *
+ * 4 5 6 *
+ * *
+ * 0x07 0x08 0x09 *
+ * 7 8 9 *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ /* 0x0a 0x00 0x17 *
+ * RECALL 0 MODE *
+ * */
+ { 0x0a, KEY_AGAIN },
+ { 0x00, KEY_0 },
+ { 0x17, KEY_MODE },
+
+ /* 0x14 0x10 *
+ * ASPECT FULLSCREEN *
+ * */
+ { 0x14, KEY_SCREEN },
+ { 0x10, KEY_ZOOM },
+
+ /* 0x0b *
+ * Up *
+ * *
+ * 0x18 0x16 0x0c *
+ * Left Ok Right *
+ * *
+ * 0x015 *
+ * Down *
+ * */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x18, KEY_VOLUMEDOWN },
+ { 0x16, KEY_OK }, /* XXX KEY_ENTER */
+ { 0x0c, KEY_VOLUMEUP },
+ { 0x15, KEY_CHANNELDOWN },
+
+ /* 0x11 0x0d *
+ * MUTE INFO *
+ * */
+ { 0x11, KEY_MUTE },
+ { 0x0d, KEY_INFO },
+
+ /* 0x0f 0x1b 0x1a *
+ * RECORD PLAY/PAUSE STOP *
+ * *
+ * 0x0e 0x1f 0x1e *
+ *TELETEXT AUDIO SOURCE *
+ * RED YELLOW *
+ * */
+ { 0x0f, KEY_RECORD },
+ { 0x1b, KEY_PLAYPAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x0e, KEY_TEXT },
+ { 0x1f, KEY_RED }, /*XXX KEY_AUDIO */
+ { 0x1e, KEY_YELLOW }, /*XXX KEY_SOURCE */
+
+ /* 0x1d 0x13 0x19 *
+ * SLEEP PREVIEW DVB *
+ * GREEN BLUE *
+ * */
+ { 0x1d, KEY_SLEEP },
+ { 0x13, KEY_GREEN },
+ { 0x19, KEY_BLUE }, /* XXX KEY_SAT */
+
+ /* 0x58 0x5c *
+ * FREEZE SNAPSHOT *
+ * */
+ { 0x58, KEY_SLOW },
+ { 0x5c, KEY_CAMERA },
+
+};
+
+static struct rc_keymap behold_map = {
+ .map = {
+ .scan = behold,
+ .size = ARRAY_SIZE(behold),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BEHOLD,
+ }
+};
+
+static int __init init_rc_map_behold(void)
+{
+ return ir_register_map(&behold_map);
+}
+
+static void __exit exit_rc_map_behold(void)
+{
+ ir_unregister_map(&behold_map);
+}
+
+module_init(init_rc_map_behold)
+module_exit(exit_rc_map_behold)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-budget-ci-old.c b/drivers/media/IR/keymaps/rc-budget-ci-old.c
new file mode 100644
index 000000000000..64c2ac913338
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-budget-ci-old.c
@@ -0,0 +1,92 @@
+/* budget-ci-old.h - Keytable for budget_ci_old Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* From reading the following remotes:
+ * Zenith Universal 7 / TV Mode 807 / VCR Mode 837
+ * Hauppauge (from NOVA-CI-s box product)
+ * This is a "middle of the road" approach, differences are noted
+ */
+
+static struct ir_scancode budget_ci_old[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_ENTER },
+ { 0x0b, KEY_RED },
+ { 0x0c, KEY_POWER }, /* RADIO on Hauppauge */
+ { 0x0d, KEY_MUTE },
+ { 0x0f, KEY_A }, /* TV on Hauppauge */
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x14, KEY_B },
+ { 0x1c, KEY_UP },
+ { 0x1d, KEY_DOWN },
+ { 0x1e, KEY_OPTION }, /* RESERVED on Hauppauge */
+ { 0x1f, KEY_BREAK },
+ { 0x20, KEY_CHANNELUP },
+ { 0x21, KEY_CHANNELDOWN },
+ { 0x22, KEY_PREVIOUS }, /* Prev Ch on Zenith, SOURCE on Hauppauge */
+ { 0x24, KEY_RESTART },
+ { 0x25, KEY_OK },
+ { 0x26, KEY_CYCLEWINDOWS }, /* MINIMIZE on Hauppauge */
+ { 0x28, KEY_ENTER }, /* VCR mode on Zenith */
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_RIGHT },
+ { 0x2c, KEY_LEFT },
+ { 0x2e, KEY_MENU }, /* FULL SCREEN on Hauppauge */
+ { 0x30, KEY_SLOW },
+ { 0x31, KEY_PREVIOUS }, /* VCR mode on Zenith */
+ { 0x32, KEY_REWIND },
+ { 0x34, KEY_FASTFORWARD },
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD },
+ { 0x38, KEY_TUNER }, /* TV/VCR on Zenith */
+ { 0x3a, KEY_C },
+ { 0x3c, KEY_EXIT },
+ { 0x3d, KEY_POWER2 },
+ { 0x3e, KEY_TUNER },
+};
+
+static struct rc_keymap budget_ci_old_map = {
+ .map = {
+ .scan = budget_ci_old,
+ .size = ARRAY_SIZE(budget_ci_old),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_BUDGET_CI_OLD,
+ }
+};
+
+static int __init init_rc_map_budget_ci_old(void)
+{
+ return ir_register_map(&budget_ci_old_map);
+}
+
+static void __exit exit_rc_map_budget_ci_old(void)
+{
+ ir_unregister_map(&budget_ci_old_map);
+}
+
+module_init(init_rc_map_budget_ci_old)
+module_exit(exit_rc_map_budget_ci_old)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-cinergy-1400.c b/drivers/media/IR/keymaps/rc-cinergy-1400.c
new file mode 100644
index 000000000000..074f2c2c2c61
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-cinergy-1400.c
@@ -0,0 +1,84 @@
+/* cinergy-1400.h - Keytable for cinergy_1400 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Cinergy 1400 DVB-T */
+
+static struct ir_scancode cinergy_1400[] = {
+ { 0x01, KEY_POWER },
+ { 0x02, KEY_1 },
+ { 0x03, KEY_2 },
+ { 0x04, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x06, KEY_5 },
+ { 0x07, KEY_6 },
+ { 0x08, KEY_7 },
+ { 0x09, KEY_8 },
+ { 0x0a, KEY_9 },
+ { 0x0c, KEY_0 },
+
+ { 0x0b, KEY_VIDEO },
+ { 0x0d, KEY_REFRESH },
+ { 0x0e, KEY_SELECT },
+ { 0x0f, KEY_EPG },
+ { 0x10, KEY_UP },
+ { 0x11, KEY_LEFT },
+ { 0x12, KEY_OK },
+ { 0x13, KEY_RIGHT },
+ { 0x14, KEY_DOWN },
+ { 0x15, KEY_TEXT },
+ { 0x16, KEY_INFO },
+
+ { 0x17, KEY_RED },
+ { 0x18, KEY_GREEN },
+ { 0x19, KEY_YELLOW },
+ { 0x1a, KEY_BLUE },
+
+ { 0x1b, KEY_CHANNELUP },
+ { 0x1c, KEY_VOLUMEUP },
+ { 0x1d, KEY_MUTE },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_CHANNELDOWN },
+
+ { 0x40, KEY_PAUSE },
+ { 0x4c, KEY_PLAY },
+ { 0x58, KEY_RECORD },
+ { 0x54, KEY_PREVIOUS },
+ { 0x48, KEY_STOP },
+ { 0x5c, KEY_NEXT },
+};
+
+static struct rc_keymap cinergy_1400_map = {
+ .map = {
+ .scan = cinergy_1400,
+ .size = ARRAY_SIZE(cinergy_1400),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_CINERGY_1400,
+ }
+};
+
+static int __init init_rc_map_cinergy_1400(void)
+{
+ return ir_register_map(&cinergy_1400_map);
+}
+
+static void __exit exit_rc_map_cinergy_1400(void)
+{
+ ir_unregister_map(&cinergy_1400_map);
+}
+
+module_init(init_rc_map_cinergy_1400)
+module_exit(exit_rc_map_cinergy_1400)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-cinergy.c b/drivers/media/IR/keymaps/rc-cinergy.c
new file mode 100644
index 000000000000..cf84c3dba742
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-cinergy.c
@@ -0,0 +1,78 @@
+/* cinergy.h - Keytable for cinergy Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode cinergy[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_POWER },
+ { 0x0b, KEY_PROG1 }, /* app */
+ { 0x0c, KEY_ZOOM }, /* zoom/fullscreen */
+ { 0x0d, KEY_CHANNELUP }, /* channel */
+ { 0x0e, KEY_CHANNELDOWN }, /* channel- */
+ { 0x0f, KEY_VOLUMEUP },
+ { 0x10, KEY_VOLUMEDOWN },
+ { 0x11, KEY_TUNER }, /* AV */
+ { 0x12, KEY_NUMLOCK }, /* -/-- */
+ { 0x13, KEY_AUDIO }, /* audio */
+ { 0x14, KEY_MUTE },
+ { 0x15, KEY_UP },
+ { 0x16, KEY_DOWN },
+ { 0x17, KEY_LEFT },
+ { 0x18, KEY_RIGHT },
+ { 0x19, BTN_LEFT, },
+ { 0x1a, BTN_RIGHT, },
+ { 0x1b, KEY_WWW }, /* text */
+ { 0x1c, KEY_REWIND },
+ { 0x1d, KEY_FORWARD },
+ { 0x1e, KEY_RECORD },
+ { 0x1f, KEY_PLAY },
+ { 0x20, KEY_PREVIOUSSONG },
+ { 0x21, KEY_NEXTSONG },
+ { 0x22, KEY_PAUSE },
+ { 0x23, KEY_STOP },
+};
+
+static struct rc_keymap cinergy_map = {
+ .map = {
+ .scan = cinergy,
+ .size = ARRAY_SIZE(cinergy),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_CINERGY,
+ }
+};
+
+static int __init init_rc_map_cinergy(void)
+{
+ return ir_register_map(&cinergy_map);
+}
+
+static void __exit exit_rc_map_cinergy(void)
+{
+ ir_unregister_map(&cinergy_map);
+}
+
+module_init(init_rc_map_cinergy)
+module_exit(exit_rc_map_cinergy)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dm1105-nec.c b/drivers/media/IR/keymaps/rc-dm1105-nec.c
new file mode 100644
index 000000000000..90684d0efea3
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dm1105-nec.c
@@ -0,0 +1,76 @@
+/* dm1105-nec.h - Keytable for dm1105_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DVBWorld remotes
+ Igor M. Liplianin <liplianin@me.by>
+ */
+
+static struct ir_scancode dm1105_nec[] = {
+ { 0x0a, KEY_POWER2}, /* power */
+ { 0x0c, KEY_MUTE}, /* mute */
+ { 0x11, KEY_1},
+ { 0x12, KEY_2},
+ { 0x13, KEY_3},
+ { 0x14, KEY_4},
+ { 0x15, KEY_5},
+ { 0x16, KEY_6},
+ { 0x17, KEY_7},
+ { 0x18, KEY_8},
+ { 0x19, KEY_9},
+ { 0x10, KEY_0},
+ { 0x1c, KEY_CHANNELUP}, /* ch+ */
+ { 0x0f, KEY_CHANNELDOWN}, /* ch- */
+ { 0x1a, KEY_VOLUMEUP}, /* vol+ */
+ { 0x0e, KEY_VOLUMEDOWN}, /* vol- */
+ { 0x04, KEY_RECORD}, /* rec */
+ { 0x09, KEY_CHANNEL}, /* fav */
+ { 0x08, KEY_BACKSPACE}, /* rewind */
+ { 0x07, KEY_FASTFORWARD}, /* fast */
+ { 0x0b, KEY_PAUSE}, /* pause */
+ { 0x02, KEY_ESC}, /* cancel */
+ { 0x03, KEY_TAB}, /* tab */
+ { 0x00, KEY_UP}, /* up */
+ { 0x1f, KEY_ENTER}, /* ok */
+ { 0x01, KEY_DOWN}, /* down */
+ { 0x05, KEY_RECORD}, /* cap */
+ { 0x06, KEY_STOP}, /* stop */
+ { 0x40, KEY_ZOOM}, /* full */
+ { 0x1e, KEY_TV}, /* tvmode */
+ { 0x1b, KEY_B}, /* recall */
+};
+
+static struct rc_keymap dm1105_nec_map = {
+ .map = {
+ .scan = dm1105_nec,
+ .size = ARRAY_SIZE(dm1105_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DM1105_NEC,
+ }
+};
+
+static int __init init_rc_map_dm1105_nec(void)
+{
+ return ir_register_map(&dm1105_nec_map);
+}
+
+static void __exit exit_rc_map_dm1105_nec(void)
+{
+ ir_unregister_map(&dm1105_nec_map);
+}
+
+module_init(init_rc_map_dm1105_nec)
+module_exit(exit_rc_map_dm1105_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c b/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c
new file mode 100644
index 000000000000..8a4027af964a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c
@@ -0,0 +1,78 @@
+/* dntv-live-dvb-t.h - Keytable for dntv_live_dvb_t Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DigitalNow DNTV Live DVB-T Remote */
+
+static struct ir_scancode dntv_live_dvb_t[] = {
+ { 0x00, KEY_ESC }, /* 'go up a level?' */
+ /* Keys 0 to 9 */
+ { 0x0a, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0b, KEY_TUNER }, /* tv/fm */
+ { 0x0c, KEY_SEARCH }, /* scan */
+ { 0x0d, KEY_STOP },
+ { 0x0e, KEY_PAUSE },
+ { 0x0f, KEY_LIST }, /* source */
+
+ { 0x10, KEY_MUTE },
+ { 0x11, KEY_REWIND }, /* backward << */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_CAMERA }, /* snap */
+ { 0x14, KEY_AUDIO }, /* stereo */
+ { 0x15, KEY_CLEAR }, /* reset */
+ { 0x16, KEY_PLAY },
+ { 0x17, KEY_ENTER },
+ { 0x18, KEY_ZOOM }, /* full screen */
+ { 0x19, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1c, KEY_INFO }, /* preview */
+ { 0x1d, KEY_RECORD }, /* record */
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x1f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap dntv_live_dvb_t_map = {
+ .map = {
+ .scan = dntv_live_dvb_t,
+ .size = ARRAY_SIZE(dntv_live_dvb_t),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DNTV_LIVE_DVB_T,
+ }
+};
+
+static int __init init_rc_map_dntv_live_dvb_t(void)
+{
+ return ir_register_map(&dntv_live_dvb_t_map);
+}
+
+static void __exit exit_rc_map_dntv_live_dvb_t(void)
+{
+ ir_unregister_map(&dntv_live_dvb_t_map);
+}
+
+module_init(init_rc_map_dntv_live_dvb_t)
+module_exit(exit_rc_map_dntv_live_dvb_t)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c b/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c
new file mode 100644
index 000000000000..6f4d60764d59
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c
@@ -0,0 +1,97 @@
+/* dntv-live-dvbt-pro.h - Keytable for dntv_live_dvbt_pro Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DigitalNow DNTV Live! DVB-T Pro Remote */
+
+static struct ir_scancode dntv_live_dvbt_pro[] = {
+ { 0x16, KEY_POWER },
+ { 0x5b, KEY_HOME },
+
+ { 0x55, KEY_TV }, /* live tv */
+ { 0x58, KEY_TUNER }, /* digital Radio */
+ { 0x5a, KEY_RADIO }, /* FM radio */
+ { 0x59, KEY_DVD }, /* dvd menu */
+ { 0x03, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x1d, KEY_5 },
+ { 0x1f, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x19, KEY_8 },
+ { 0x1b, KEY_9 },
+ { 0x0c, KEY_CANCEL },
+ { 0x15, KEY_0 },
+ { 0x4a, KEY_CLEAR },
+ { 0x13, KEY_BACK },
+ { 0x00, KEY_TAB },
+ { 0x4b, KEY_UP },
+ { 0x4e, KEY_LEFT },
+ { 0x4f, KEY_OK },
+ { 0x52, KEY_RIGHT },
+ { 0x51, KEY_DOWN },
+ { 0x1e, KEY_VOLUMEUP },
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x05, KEY_CHANNELUP },
+ { 0x11, KEY_RECORD },
+ { 0x14, KEY_PLAY },
+ { 0x4c, KEY_PAUSE },
+ { 0x1a, KEY_STOP },
+ { 0x40, KEY_REWIND },
+ { 0x12, KEY_FASTFORWARD },
+ { 0x41, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x42, KEY_NEXTSONG }, /* skip >| */
+ { 0x54, KEY_CAMERA }, /* capture */
+ { 0x50, KEY_LANGUAGE }, /* sap */
+ { 0x47, KEY_TV2 }, /* pip */
+ { 0x4d, KEY_SCREEN },
+ { 0x43, KEY_SUBTITLE },
+ { 0x10, KEY_MUTE },
+ { 0x49, KEY_AUDIO }, /* l/r */
+ { 0x07, KEY_SLEEP },
+ { 0x08, KEY_VIDEO }, /* a/v */
+ { 0x0e, KEY_PREVIOUS }, /* recall */
+ { 0x45, KEY_ZOOM }, /* zoom + */
+ { 0x46, KEY_ANGLE }, /* zoom - */
+ { 0x56, KEY_RED },
+ { 0x57, KEY_GREEN },
+ { 0x5c, KEY_YELLOW },
+ { 0x5d, KEY_BLUE },
+};
+
+static struct rc_keymap dntv_live_dvbt_pro_map = {
+ .map = {
+ .scan = dntv_live_dvbt_pro,
+ .size = ARRAY_SIZE(dntv_live_dvbt_pro),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_DNTV_LIVE_DVBT_PRO,
+ }
+};
+
+static int __init init_rc_map_dntv_live_dvbt_pro(void)
+{
+ return ir_register_map(&dntv_live_dvbt_pro_map);
+}
+
+static void __exit exit_rc_map_dntv_live_dvbt_pro(void)
+{
+ ir_unregister_map(&dntv_live_dvbt_pro_map);
+}
+
+module_init(init_rc_map_dntv_live_dvbt_pro)
+module_exit(exit_rc_map_dntv_live_dvbt_pro)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-em-terratec.c b/drivers/media/IR/keymaps/rc-em-terratec.c
new file mode 100644
index 000000000000..3130c9c29e6b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-em-terratec.c
@@ -0,0 +1,69 @@
+/* em-terratec.h - Keytable for em_terratec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode em_terratec[] = {
+ { 0x01, KEY_CHANNEL },
+ { 0x02, KEY_SELECT },
+ { 0x03, KEY_MUTE },
+ { 0x04, KEY_POWER },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x08, KEY_CHANNELUP },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_0 },
+ { 0x12, KEY_MENU },
+ { 0x13, KEY_PRINT },
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x16, KEY_PAUSE },
+ { 0x18, KEY_RECORD },
+ { 0x19, KEY_REWIND },
+ { 0x1a, KEY_PLAY },
+ { 0x1b, KEY_FORWARD },
+ { 0x1c, KEY_BACKSPACE },
+ { 0x1e, KEY_STOP },
+ { 0x40, KEY_ZOOM },
+};
+
+static struct rc_keymap em_terratec_map = {
+ .map = {
+ .scan = em_terratec,
+ .size = ARRAY_SIZE(em_terratec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EM_TERRATEC,
+ }
+};
+
+static int __init init_rc_map_em_terratec(void)
+{
+ return ir_register_map(&em_terratec_map);
+}
+
+static void __exit exit_rc_map_em_terratec(void)
+{
+ ir_unregister_map(&em_terratec_map);
+}
+
+module_init(init_rc_map_em_terratec)
+module_exit(exit_rc_map_em_terratec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-empty.c b/drivers/media/IR/keymaps/rc-empty.c
new file mode 100644
index 000000000000..3b338d84b476
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-empty.c
@@ -0,0 +1,44 @@
+/* empty.h - Keytable for empty Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* empty keytable, can be used as placeholder for not-yet created keytables */
+
+static struct ir_scancode empty[] = {
+ { 0x2a, KEY_COFFEE },
+};
+
+static struct rc_keymap empty_map = {
+ .map = {
+ .scan = empty,
+ .size = ARRAY_SIZE(empty),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EMPTY,
+ }
+};
+
+static int __init init_rc_map_empty(void)
+{
+ return ir_register_map(&empty_map);
+}
+
+static void __exit exit_rc_map_empty(void)
+{
+ ir_unregister_map(&empty_map);
+}
+
+module_init(init_rc_map_empty)
+module_exit(exit_rc_map_empty)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c b/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c
new file mode 100644
index 000000000000..4b816967877e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv-fm53.c
@@ -0,0 +1,81 @@
+/* encore-enltv-fm53.h - Keytable for encore_enltv_fm53 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV-FM v5.3
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode encore_enltv_fm53[] = {
+ { 0x10, KEY_POWER2},
+ { 0x06, KEY_MUTE},
+
+ { 0x09, KEY_1},
+ { 0x1d, KEY_2},
+ { 0x1f, KEY_3},
+ { 0x19, KEY_4},
+ { 0x1b, KEY_5},
+ { 0x11, KEY_6},
+ { 0x17, KEY_7},
+ { 0x12, KEY_8},
+ { 0x16, KEY_9},
+ { 0x48, KEY_0},
+
+ { 0x04, KEY_LIST}, /* -/-- */
+ { 0x40, KEY_LAST}, /* recall */
+
+ { 0x02, KEY_MODE}, /* TV/AV */
+ { 0x05, KEY_CAMERA}, /* SNAPSHOT */
+
+ { 0x4c, KEY_CHANNELUP}, /* UP */
+ { 0x00, KEY_CHANNELDOWN}, /* DOWN */
+ { 0x0d, KEY_VOLUMEUP}, /* RIGHT */
+ { 0x15, KEY_VOLUMEDOWN}, /* LEFT */
+ { 0x49, KEY_ENTER}, /* OK */
+
+ { 0x54, KEY_RECORD},
+ { 0x4d, KEY_PLAY}, /* pause */
+
+ { 0x1e, KEY_MENU}, /* video setting */
+ { 0x0e, KEY_RIGHT}, /* <- */
+ { 0x1a, KEY_LEFT}, /* -> */
+
+ { 0x0a, KEY_CLEAR}, /* video default */
+ { 0x0c, KEY_ZOOM}, /* hide pannel */
+ { 0x47, KEY_SLEEP}, /* shutdown */
+};
+
+static struct rc_keymap encore_enltv_fm53_map = {
+ .map = {
+ .scan = encore_enltv_fm53,
+ .size = ARRAY_SIZE(encore_enltv_fm53),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV_FM53,
+ }
+};
+
+static int __init init_rc_map_encore_enltv_fm53(void)
+{
+ return ir_register_map(&encore_enltv_fm53_map);
+}
+
+static void __exit exit_rc_map_encore_enltv_fm53(void)
+{
+ ir_unregister_map(&encore_enltv_fm53_map);
+}
+
+module_init(init_rc_map_encore_enltv_fm53)
+module_exit(exit_rc_map_encore_enltv_fm53)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv.c b/drivers/media/IR/keymaps/rc-encore-enltv.c
new file mode 100644
index 000000000000..9fabffd28cc9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv.c
@@ -0,0 +1,112 @@
+/* encore-enltv.h - Keytable for encore_enltv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV-FM - black plastic, white front cover with white glowing buttons
+ Juan Pablo Sormani <sorman@gmail.com> */
+
+static struct ir_scancode encore_enltv[] = {
+
+ /* Power button does nothing, neither in Windows app,
+ although it sends data (used for BIOS wakeup?) */
+ { 0x0d, KEY_MUTE },
+
+ { 0x1e, KEY_TV },
+ { 0x00, KEY_VIDEO },
+ { 0x01, KEY_AUDIO }, /* music */
+ { 0x02, KEY_MHP }, /* picture */
+
+ { 0x1f, KEY_1 },
+ { 0x03, KEY_2 },
+ { 0x04, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x1c, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x1d, KEY_9 },
+ { 0x0a, KEY_0 },
+
+ { 0x09, KEY_LIST }, /* -/-- */
+ { 0x0b, KEY_LAST }, /* recall */
+
+ { 0x14, KEY_HOME }, /* win start menu */
+ { 0x15, KEY_EXIT }, /* exit */
+ { 0x16, KEY_CHANNELUP }, /* UP */
+ { 0x12, KEY_CHANNELDOWN }, /* DOWN */
+ { 0x0c, KEY_VOLUMEUP }, /* RIGHT */
+ { 0x17, KEY_VOLUMEDOWN }, /* LEFT */
+
+ { 0x18, KEY_ENTER }, /* OK */
+
+ { 0x0e, KEY_ESC },
+ { 0x13, KEY_CYCLEWINDOWS }, /* desktop */
+ { 0x11, KEY_TAB },
+ { 0x19, KEY_SWITCHVIDEOMODE }, /* switch */
+
+ { 0x1a, KEY_MENU },
+ { 0x1b, KEY_ZOOM }, /* fullscreen */
+ { 0x44, KEY_TIME }, /* time shift */
+ { 0x40, KEY_MODE }, /* source */
+
+ { 0x5a, KEY_RECORD },
+ { 0x42, KEY_PLAY }, /* play/pause */
+ { 0x45, KEY_STOP },
+ { 0x43, KEY_CAMERA }, /* camera icon */
+
+ { 0x48, KEY_REWIND },
+ { 0x4a, KEY_FASTFORWARD },
+ { 0x49, KEY_PREVIOUS },
+ { 0x4b, KEY_NEXT },
+
+ { 0x4c, KEY_FAVORITES }, /* tv wall */
+ { 0x4d, KEY_SOUND }, /* DVD sound */
+ { 0x4e, KEY_LANGUAGE }, /* DVD lang */
+ { 0x4f, KEY_TEXT }, /* DVD text */
+
+ { 0x50, KEY_SLEEP }, /* shutdown */
+ { 0x51, KEY_MODE }, /* stereo > main */
+ { 0x52, KEY_SELECT }, /* stereo > sap */
+ { 0x53, KEY_PROG1 }, /* teletext */
+
+
+ { 0x59, KEY_RED }, /* AP1 */
+ { 0x41, KEY_GREEN }, /* AP2 */
+ { 0x47, KEY_YELLOW }, /* AP3 */
+ { 0x57, KEY_BLUE }, /* AP4 */
+};
+
+static struct rc_keymap encore_enltv_map = {
+ .map = {
+ .scan = encore_enltv,
+ .size = ARRAY_SIZE(encore_enltv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV,
+ }
+};
+
+static int __init init_rc_map_encore_enltv(void)
+{
+ return ir_register_map(&encore_enltv_map);
+}
+
+static void __exit exit_rc_map_encore_enltv(void)
+{
+ ir_unregister_map(&encore_enltv_map);
+}
+
+module_init(init_rc_map_encore_enltv)
+module_exit(exit_rc_map_encore_enltv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-encore-enltv2.c b/drivers/media/IR/keymaps/rc-encore-enltv2.c
new file mode 100644
index 000000000000..efefd5166618
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-encore-enltv2.c
@@ -0,0 +1,90 @@
+/* encore-enltv2.h - Keytable for encore_enltv2 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Encore ENLTV2-FM - silver plastic - "Wand Media" written at the botton
+ Mauro Carvalho Chehab <mchehab@infradead.org> */
+
+static struct ir_scancode encore_enltv2[] = {
+ { 0x4c, KEY_POWER2 },
+ { 0x4a, KEY_TUNER },
+ { 0x40, KEY_1 },
+ { 0x60, KEY_2 },
+ { 0x50, KEY_3 },
+ { 0x70, KEY_4 },
+ { 0x48, KEY_5 },
+ { 0x68, KEY_6 },
+ { 0x58, KEY_7 },
+ { 0x78, KEY_8 },
+ { 0x44, KEY_9 },
+ { 0x54, KEY_0 },
+
+ { 0x64, KEY_LAST }, /* +100 */
+ { 0x4e, KEY_AGAIN }, /* Recall */
+
+ { 0x6c, KEY_SWITCHVIDEOMODE }, /* Video Source */
+ { 0x5e, KEY_MENU },
+ { 0x56, KEY_SCREEN },
+ { 0x7a, KEY_SETUP },
+
+ { 0x46, KEY_MUTE },
+ { 0x5c, KEY_MODE }, /* Stereo */
+ { 0x74, KEY_INFO },
+ { 0x7c, KEY_CLEAR },
+
+ { 0x55, KEY_UP },
+ { 0x49, KEY_DOWN },
+ { 0x7e, KEY_LEFT },
+ { 0x59, KEY_RIGHT },
+ { 0x6a, KEY_ENTER },
+
+ { 0x42, KEY_VOLUMEUP },
+ { 0x62, KEY_VOLUMEDOWN },
+ { 0x52, KEY_CHANNELUP },
+ { 0x72, KEY_CHANNELDOWN },
+
+ { 0x41, KEY_RECORD },
+ { 0x51, KEY_CAMERA }, /* Snapshot */
+ { 0x75, KEY_TIME }, /* Timeshift */
+ { 0x71, KEY_TV2 }, /* PIP */
+
+ { 0x45, KEY_REWIND },
+ { 0x6f, KEY_PAUSE },
+ { 0x7d, KEY_FORWARD },
+ { 0x79, KEY_STOP },
+};
+
+static struct rc_keymap encore_enltv2_map = {
+ .map = {
+ .scan = encore_enltv2,
+ .size = ARRAY_SIZE(encore_enltv2),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_ENCORE_ENLTV2,
+ }
+};
+
+static int __init init_rc_map_encore_enltv2(void)
+{
+ return ir_register_map(&encore_enltv2_map);
+}
+
+static void __exit exit_rc_map_encore_enltv2(void)
+{
+ ir_unregister_map(&encore_enltv2_map);
+}
+
+module_init(init_rc_map_encore_enltv2)
+module_exit(exit_rc_map_encore_enltv2)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-evga-indtube.c b/drivers/media/IR/keymaps/rc-evga-indtube.c
new file mode 100644
index 000000000000..3f3fb13813b3
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-evga-indtube.c
@@ -0,0 +1,61 @@
+/* evga-indtube.h - Keytable for evga_indtube Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* EVGA inDtube
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+
+static struct ir_scancode evga_indtube[] = {
+ { 0x12, KEY_POWER},
+ { 0x02, KEY_MODE}, /* TV */
+ { 0x14, KEY_MUTE},
+ { 0x1a, KEY_CHANNELUP},
+ { 0x16, KEY_TV2}, /* PIP */
+ { 0x1d, KEY_VOLUMEUP},
+ { 0x05, KEY_CHANNELDOWN},
+ { 0x0f, KEY_PLAYPAUSE},
+ { 0x19, KEY_VOLUMEDOWN},
+ { 0x1c, KEY_REWIND},
+ { 0x0d, KEY_RECORD},
+ { 0x18, KEY_FORWARD},
+ { 0x1e, KEY_PREVIOUS},
+ { 0x1b, KEY_STOP},
+ { 0x1f, KEY_NEXT},
+ { 0x13, KEY_CAMERA},
+};
+
+static struct rc_keymap evga_indtube_map = {
+ .map = {
+ .scan = evga_indtube,
+ .size = ARRAY_SIZE(evga_indtube),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EVGA_INDTUBE,
+ }
+};
+
+static int __init init_rc_map_evga_indtube(void)
+{
+ return ir_register_map(&evga_indtube_map);
+}
+
+static void __exit exit_rc_map_evga_indtube(void)
+{
+ ir_unregister_map(&evga_indtube_map);
+}
+
+module_init(init_rc_map_evga_indtube)
+module_exit(exit_rc_map_evga_indtube)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-eztv.c b/drivers/media/IR/keymaps/rc-eztv.c
new file mode 100644
index 000000000000..660907a78db9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-eztv.c
@@ -0,0 +1,96 @@
+/* eztv.h - Keytable for eztv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Alfons Geser <a.geser@cox.net>
+ * updates from Job D. R. Borges <jobdrb@ig.com.br> */
+
+static struct ir_scancode eztv[] = {
+ { 0x12, KEY_POWER },
+ { 0x01, KEY_TV }, /* DVR */
+ { 0x15, KEY_DVD }, /* DVD */
+ { 0x17, KEY_AUDIO }, /* music */
+ /* DVR mode / DVD mode / music mode */
+
+ { 0x1b, KEY_MUTE }, /* mute */
+ { 0x02, KEY_LANGUAGE }, /* MTS/SAP / audio / autoseek */
+ { 0x1e, KEY_SUBTITLE }, /* closed captioning / subtitle / seek */
+ { 0x16, KEY_ZOOM }, /* full screen */
+ { 0x1c, KEY_VIDEO }, /* video source / eject / delall */
+ { 0x1d, KEY_RESTART }, /* playback / angle / del */
+ { 0x2f, KEY_SEARCH }, /* scan / menu / playlist */
+ { 0x30, KEY_CHANNEL }, /* CH surfing / bookmark / memo */
+
+ { 0x31, KEY_HELP }, /* help */
+ { 0x32, KEY_MODE }, /* num/memo */
+ { 0x33, KEY_ESC }, /* cancel */
+
+ { 0x0c, KEY_UP }, /* up */
+ { 0x10, KEY_DOWN }, /* down */
+ { 0x08, KEY_LEFT }, /* left */
+ { 0x04, KEY_RIGHT }, /* right */
+ { 0x03, KEY_SELECT }, /* select */
+
+ { 0x1f, KEY_REWIND }, /* rewind */
+ { 0x20, KEY_PLAYPAUSE },/* play/pause */
+ { 0x29, KEY_FORWARD }, /* forward */
+ { 0x14, KEY_AGAIN }, /* repeat */
+ { 0x2b, KEY_RECORD }, /* recording */
+ { 0x2c, KEY_STOP }, /* stop */
+ { 0x2d, KEY_PLAY }, /* play */
+ { 0x2e, KEY_CAMERA }, /* snapshot / shuffle */
+
+ { 0x00, KEY_0 },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+
+ { 0x2a, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x18, KEY_CHANNELUP },/* CH.tracking up */
+ { 0x19, KEY_CHANNELDOWN },/* CH.tracking down */
+
+ { 0x13, KEY_ENTER }, /* enter */
+ { 0x21, KEY_DOT }, /* . (decimal dot) */
+};
+
+static struct rc_keymap eztv_map = {
+ .map = {
+ .scan = eztv,
+ .size = ARRAY_SIZE(eztv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_EZTV,
+ }
+};
+
+static int __init init_rc_map_eztv(void)
+{
+ return ir_register_map(&eztv_map);
+}
+
+static void __exit exit_rc_map_eztv(void)
+{
+ ir_unregister_map(&eztv_map);
+}
+
+module_init(init_rc_map_eztv)
+module_exit(exit_rc_map_eztv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-flydvb.c b/drivers/media/IR/keymaps/rc-flydvb.c
new file mode 100644
index 000000000000..a173c81035f4
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-flydvb.c
@@ -0,0 +1,77 @@
+/* flydvb.h - Keytable for flydvb Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode flydvb[] = {
+ { 0x01, KEY_ZOOM }, /* Full Screen */
+ { 0x00, KEY_POWER }, /* Power */
+
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x08, KEY_5 },
+ { 0x09, KEY_6 },
+ { 0x0b, KEY_7 },
+ { 0x0c, KEY_8 },
+ { 0x0d, KEY_9 },
+ { 0x06, KEY_AGAIN }, /* Recall */
+ { 0x0f, KEY_0 },
+ { 0x10, KEY_MUTE }, /* Mute */
+ { 0x02, KEY_RADIO }, /* TV/Radio */
+ { 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
+
+ { 0x14, KEY_VOLUMEUP }, /* VOL+ */
+ { 0x17, KEY_VOLUMEDOWN }, /* VOL- */
+ { 0x12, KEY_CHANNELUP }, /* CH+ */
+ { 0x13, KEY_CHANNELDOWN }, /* CH- */
+ { 0x1d, KEY_ENTER }, /* Enter */
+
+ { 0x1a, KEY_MODE }, /* PIP */
+ { 0x18, KEY_TUNER }, /* Source */
+
+ { 0x1e, KEY_RECORD }, /* Record/Pause */
+ { 0x15, KEY_ANGLE }, /* Swap (no label on key) */
+ { 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
+ { 0x19, KEY_BACK }, /* Rewind << */
+ { 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
+ { 0x1f, KEY_FORWARD }, /* Forward >> */
+ { 0x16, KEY_PREVIOUS }, /* Back |<< */
+ { 0x11, KEY_STOP }, /* Stop */
+ { 0x0e, KEY_NEXT }, /* End >>| */
+};
+
+static struct rc_keymap flydvb_map = {
+ .map = {
+ .scan = flydvb,
+ .size = ARRAY_SIZE(flydvb),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FLYDVB,
+ }
+};
+
+static int __init init_rc_map_flydvb(void)
+{
+ return ir_register_map(&flydvb_map);
+}
+
+static void __exit exit_rc_map_flydvb(void)
+{
+ ir_unregister_map(&flydvb_map);
+}
+
+module_init(init_rc_map_flydvb)
+module_exit(exit_rc_map_flydvb)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-flyvideo.c b/drivers/media/IR/keymaps/rc-flyvideo.c
new file mode 100644
index 000000000000..9c73043cbdba
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-flyvideo.c
@@ -0,0 +1,70 @@
+/* flyvideo.h - Keytable for flyvideo Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode flyvideo[] = {
+ { 0x0f, KEY_0 },
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x08, KEY_5 },
+ { 0x09, KEY_6 },
+ { 0x0b, KEY_7 },
+ { 0x0c, KEY_8 },
+ { 0x0d, KEY_9 },
+
+ { 0x0e, KEY_MODE }, /* Air/Cable */
+ { 0x11, KEY_VIDEO }, /* Video */
+ { 0x15, KEY_AUDIO }, /* Audio */
+ { 0x00, KEY_POWER }, /* Power */
+ { 0x18, KEY_TUNER }, /* AV Source */
+ { 0x02, KEY_ZOOM }, /* Fullscreen */
+ { 0x1a, KEY_LANGUAGE }, /* Stereo */
+ { 0x1b, KEY_MUTE }, /* Mute */
+ { 0x14, KEY_VOLUMEUP }, /* Volume + */
+ { 0x17, KEY_VOLUMEDOWN },/* Volume - */
+ { 0x12, KEY_CHANNELUP },/* Channel + */
+ { 0x13, KEY_CHANNELDOWN },/* Channel - */
+ { 0x06, KEY_AGAIN }, /* Recall */
+ { 0x10, KEY_ENTER }, /* Enter */
+
+ { 0x19, KEY_BACK }, /* Rewind ( <<< ) */
+ { 0x1f, KEY_FORWARD }, /* Forward ( >>> ) */
+ { 0x0a, KEY_ANGLE }, /* no label, may be used as the PAUSE button */
+};
+
+static struct rc_keymap flyvideo_map = {
+ .map = {
+ .scan = flyvideo,
+ .size = ARRAY_SIZE(flyvideo),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FLYVIDEO,
+ }
+};
+
+static int __init init_rc_map_flyvideo(void)
+{
+ return ir_register_map(&flyvideo_map);
+}
+
+static void __exit exit_rc_map_flyvideo(void)
+{
+ ir_unregister_map(&flyvideo_map);
+}
+
+module_init(init_rc_map_flyvideo)
+module_exit(exit_rc_map_flyvideo)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c b/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c
new file mode 100644
index 000000000000..cdb10389b10e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-fusionhdtv-mce.c
@@ -0,0 +1,98 @@
+/* fusionhdtv-mce.h - Keytable for fusionhdtv_mce Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* DViCO FUSION HDTV MCE remote */
+
+static struct ir_scancode fusionhdtv_mce[] = {
+
+ { 0x0b, KEY_1 },
+ { 0x17, KEY_2 },
+ { 0x1b, KEY_3 },
+ { 0x07, KEY_4 },
+ { 0x50, KEY_5 },
+ { 0x54, KEY_6 },
+ { 0x48, KEY_7 },
+ { 0x4c, KEY_8 },
+ { 0x58, KEY_9 },
+ { 0x03, KEY_0 },
+
+ { 0x5e, KEY_OK },
+ { 0x51, KEY_UP },
+ { 0x53, KEY_DOWN },
+ { 0x5b, KEY_LEFT },
+ { 0x5f, KEY_RIGHT },
+
+ { 0x02, KEY_TV }, /* Labeled DTV on remote */
+ { 0x0e, KEY_MP3 },
+ { 0x1a, KEY_DVD },
+ { 0x1e, KEY_FAVORITES }, /* Labeled CPF on remote */
+ { 0x16, KEY_SETUP },
+ { 0x46, KEY_POWER2 }, /* TV On/Off button on remote */
+ { 0x0a, KEY_EPG }, /* Labeled Guide on remote */
+
+ { 0x49, KEY_BACK },
+ { 0x59, KEY_INFO }, /* Labeled MORE on remote */
+ { 0x4d, KEY_MENU }, /* Labeled DVDMENU on remote */
+ { 0x55, KEY_CYCLEWINDOWS }, /* Labeled ALT-TAB on remote */
+
+ { 0x0f, KEY_PREVIOUSSONG }, /* Labeled |<< REPLAY on remote */
+ { 0x12, KEY_NEXTSONG }, /* Labeled >>| SKIP on remote */
+ { 0x42, KEY_ENTER }, /* Labeled START with a green
+ MS windows logo on remote */
+
+ { 0x15, KEY_VOLUMEUP },
+ { 0x05, KEY_VOLUMEDOWN },
+ { 0x11, KEY_CHANNELUP },
+ { 0x09, KEY_CHANNELDOWN },
+
+ { 0x52, KEY_CAMERA },
+ { 0x5a, KEY_TUNER },
+ { 0x19, KEY_OPEN },
+
+ { 0x13, KEY_MODE }, /* 4:3 16:9 select */
+ { 0x1f, KEY_ZOOM },
+
+ { 0x43, KEY_REWIND },
+ { 0x47, KEY_PLAYPAUSE },
+ { 0x4f, KEY_FASTFORWARD },
+ { 0x57, KEY_MUTE },
+ { 0x0d, KEY_STOP },
+ { 0x01, KEY_RECORD },
+ { 0x4e, KEY_POWER },
+};
+
+static struct rc_keymap fusionhdtv_mce_map = {
+ .map = {
+ .scan = fusionhdtv_mce,
+ .size = ARRAY_SIZE(fusionhdtv_mce),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_FUSIONHDTV_MCE,
+ }
+};
+
+static int __init init_rc_map_fusionhdtv_mce(void)
+{
+ return ir_register_map(&fusionhdtv_mce_map);
+}
+
+static void __exit exit_rc_map_fusionhdtv_mce(void)
+{
+ ir_unregister_map(&fusionhdtv_mce_map);
+}
+
+module_init(init_rc_map_fusionhdtv_mce)
+module_exit(exit_rc_map_fusionhdtv_mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-gadmei-rm008z.c b/drivers/media/IR/keymaps/rc-gadmei-rm008z.c
new file mode 100644
index 000000000000..c16c0d1263ac
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-gadmei-rm008z.c
@@ -0,0 +1,81 @@
+/* gadmei-rm008z.h - Keytable for gadmei_rm008z Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* GADMEI UTV330+ RM008Z remote
+ Shine Liu <shinel@foxmail.com>
+ */
+
+static struct ir_scancode gadmei_rm008z[] = {
+ { 0x14, KEY_POWER2}, /* POWER OFF */
+ { 0x0c, KEY_MUTE}, /* MUTE */
+
+ { 0x18, KEY_TV}, /* TV */
+ { 0x0e, KEY_VIDEO}, /* AV */
+ { 0x0b, KEY_AUDIO}, /* SV */
+ { 0x0f, KEY_RADIO}, /* FM */
+
+ { 0x00, KEY_1},
+ { 0x01, KEY_2},
+ { 0x02, KEY_3},
+ { 0x03, KEY_4},
+ { 0x04, KEY_5},
+ { 0x05, KEY_6},
+ { 0x06, KEY_7},
+ { 0x07, KEY_8},
+ { 0x08, KEY_9},
+ { 0x09, KEY_0},
+ { 0x0a, KEY_INFO}, /* OSD */
+ { 0x1c, KEY_BACKSPACE}, /* LAST */
+
+ { 0x0d, KEY_PLAY}, /* PLAY */
+ { 0x1e, KEY_CAMERA}, /* SNAPSHOT */
+ { 0x1a, KEY_RECORD}, /* RECORD */
+ { 0x17, KEY_STOP}, /* STOP */
+
+ { 0x1f, KEY_UP}, /* UP */
+ { 0x44, KEY_DOWN}, /* DOWN */
+ { 0x46, KEY_TAB}, /* BACK */
+ { 0x4a, KEY_ZOOM}, /* FULLSECREEN */
+
+ { 0x10, KEY_VOLUMEUP}, /* VOLUMEUP */
+ { 0x11, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
+ { 0x12, KEY_CHANNELUP}, /* CHANNELUP */
+ { 0x13, KEY_CHANNELDOWN}, /* CHANNELDOWN */
+ { 0x15, KEY_ENTER}, /* OK */
+};
+
+static struct rc_keymap gadmei_rm008z_map = {
+ .map = {
+ .scan = gadmei_rm008z,
+ .size = ARRAY_SIZE(gadmei_rm008z),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GADMEI_RM008Z,
+ }
+};
+
+static int __init init_rc_map_gadmei_rm008z(void)
+{
+ return ir_register_map(&gadmei_rm008z_map);
+}
+
+static void __exit exit_rc_map_gadmei_rm008z(void)
+{
+ ir_unregister_map(&gadmei_rm008z_map);
+}
+
+module_init(init_rc_map_gadmei_rm008z)
+module_exit(exit_rc_map_gadmei_rm008z)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c b/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c
new file mode 100644
index 000000000000..89f8e384e52a
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c
@@ -0,0 +1,84 @@
+/* genius-tvgo-a11mce.h - Keytable for genius_tvgo_a11mce Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Remote control for the Genius TVGO A11MCE
+ * Adrian Pardini <pardo.bsso@gmail.com>
+ */
+
+static struct ir_scancode genius_tvgo_a11mce[] = {
+ /* Keys 0 to 9 */
+ { 0x48, KEY_0 },
+ { 0x09, KEY_1 },
+ { 0x1d, KEY_2 },
+ { 0x1f, KEY_3 },
+ { 0x19, KEY_4 },
+ { 0x1b, KEY_5 },
+ { 0x11, KEY_6 },
+ { 0x17, KEY_7 },
+ { 0x12, KEY_8 },
+ { 0x16, KEY_9 },
+
+ { 0x54, KEY_RECORD }, /* recording */
+ { 0x06, KEY_MUTE }, /* mute */
+ { 0x10, KEY_POWER },
+ { 0x40, KEY_LAST }, /* recall */
+ { 0x4c, KEY_CHANNELUP }, /* channel / program + */
+ { 0x00, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x0d, KEY_VOLUMEUP },
+ { 0x15, KEY_VOLUMEDOWN },
+ { 0x4d, KEY_OK }, /* also labeled as Pause */
+ { 0x1c, KEY_ZOOM }, /* full screen and Stop*/
+ { 0x02, KEY_MODE }, /* AV Source or Rewind*/
+ { 0x04, KEY_LIST }, /* -/-- */
+ /* small arrows above numbers */
+ { 0x1a, KEY_NEXT }, /* also Fast Forward */
+ { 0x0e, KEY_PREVIOUS }, /* also Rewind */
+ /* these are in a rather non standard layout and have
+ an alternate name written */
+ { 0x1e, KEY_UP }, /* Video Setting */
+ { 0x0a, KEY_DOWN }, /* Video Default */
+ { 0x05, KEY_CAMERA }, /* Snapshot */
+ { 0x0c, KEY_RIGHT }, /* Hide Panel */
+ /* Four buttons without label */
+ { 0x49, KEY_RED },
+ { 0x0b, KEY_GREEN },
+ { 0x13, KEY_YELLOW },
+ { 0x50, KEY_BLUE },
+};
+
+static struct rc_keymap genius_tvgo_a11mce_map = {
+ .map = {
+ .scan = genius_tvgo_a11mce,
+ .size = ARRAY_SIZE(genius_tvgo_a11mce),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GENIUS_TVGO_A11MCE,
+ }
+};
+
+static int __init init_rc_map_genius_tvgo_a11mce(void)
+{
+ return ir_register_map(&genius_tvgo_a11mce_map);
+}
+
+static void __exit exit_rc_map_genius_tvgo_a11mce(void)
+{
+ ir_unregister_map(&genius_tvgo_a11mce_map);
+}
+
+module_init(init_rc_map_genius_tvgo_a11mce)
+module_exit(exit_rc_map_genius_tvgo_a11mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-gotview7135.c b/drivers/media/IR/keymaps/rc-gotview7135.c
new file mode 100644
index 000000000000..52f025bb35f6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-gotview7135.c
@@ -0,0 +1,79 @@
+/* gotview7135.h - Keytable for gotview7135 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mike Baikov <mike@baikov.com> */
+
+static struct ir_scancode gotview7135[] = {
+
+ { 0x11, KEY_POWER },
+ { 0x35, KEY_TV },
+ { 0x1b, KEY_0 },
+ { 0x29, KEY_1 },
+ { 0x19, KEY_2 },
+ { 0x39, KEY_3 },
+ { 0x1f, KEY_4 },
+ { 0x2c, KEY_5 },
+ { 0x21, KEY_6 },
+ { 0x24, KEY_7 },
+ { 0x18, KEY_8 },
+ { 0x2b, KEY_9 },
+ { 0x3b, KEY_AGAIN }, /* LOOP */
+ { 0x06, KEY_AUDIO },
+ { 0x31, KEY_PRINT }, /* PREVIEW */
+ { 0x3e, KEY_VIDEO },
+ { 0x10, KEY_CHANNELUP },
+ { 0x20, KEY_CHANNELDOWN },
+ { 0x0c, KEY_VOLUMEDOWN },
+ { 0x28, KEY_VOLUMEUP },
+ { 0x08, KEY_MUTE },
+ { 0x26, KEY_SEARCH }, /* SCAN */
+ { 0x3f, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x12, KEY_RECORD },
+ { 0x32, KEY_STOP },
+ { 0x3c, KEY_PLAY },
+ { 0x1d, KEY_REWIND },
+ { 0x2d, KEY_PAUSE },
+ { 0x0d, KEY_FORWARD },
+ { 0x05, KEY_ZOOM }, /*FULL*/
+
+ { 0x2a, KEY_F21 }, /* LIVE TIMESHIFT */
+ { 0x0e, KEY_F22 }, /* MIN TIMESHIFT */
+ { 0x1e, KEY_TIME }, /* TIMESHIFT */
+ { 0x38, KEY_F24 }, /* NORMAL TIMESHIFT */
+};
+
+static struct rc_keymap gotview7135_map = {
+ .map = {
+ .scan = gotview7135,
+ .size = ARRAY_SIZE(gotview7135),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_GOTVIEW7135,
+ }
+};
+
+static int __init init_rc_map_gotview7135(void)
+{
+ return ir_register_map(&gotview7135_map);
+}
+
+static void __exit exit_rc_map_gotview7135(void)
+{
+ ir_unregister_map(&gotview7135_map);
+}
+
+module_init(init_rc_map_gotview7135)
+module_exit(exit_rc_map_gotview7135)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-hauppauge-new.c b/drivers/media/IR/keymaps/rc-hauppauge-new.c
new file mode 100644
index 000000000000..c6f8cd7c5186
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-hauppauge-new.c
@@ -0,0 +1,100 @@
+/* hauppauge-new.h - Keytable for hauppauge_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Hauppauge: the newer, gray remotes (seems there are multiple
+ * slightly different versions), shipped with cx88+ivtv cards.
+ * almost rc5 coding, but some non-standard keys */
+
+static struct ir_scancode hauppauge_new[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_TEXT }, /* keypad asterisk as well */
+ { 0x0b, KEY_RED }, /* red button */
+ { 0x0c, KEY_RADIO },
+ { 0x0d, KEY_MENU },
+ { 0x0e, KEY_SUBTITLE }, /* also the # key */
+ { 0x0f, KEY_MUTE },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x12, KEY_PREVIOUS }, /* previous channel */
+ { 0x14, KEY_UP },
+ { 0x15, KEY_DOWN },
+ { 0x16, KEY_LEFT },
+ { 0x17, KEY_RIGHT },
+ { 0x18, KEY_VIDEO }, /* Videos */
+ { 0x19, KEY_AUDIO }, /* Music */
+ /* 0x1a: Pictures - presume this means
+ "Multimedia Home Platform" -
+ no "PICTURES" key in input.h
+ */
+ { 0x1a, KEY_MHP },
+
+ { 0x1b, KEY_EPG }, /* Guide */
+ { 0x1c, KEY_TV },
+ { 0x1e, KEY_NEXTSONG }, /* skip >| */
+ { 0x1f, KEY_EXIT }, /* back/exit */
+ { 0x20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x22, KEY_CHANNEL }, /* source (old black remote) */
+ { 0x24, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x25, KEY_ENTER }, /* OK */
+ { 0x26, KEY_SLEEP }, /* minimize (old black remote) */
+ { 0x29, KEY_BLUE }, /* blue key */
+ { 0x2e, KEY_GREEN }, /* green button */
+ { 0x30, KEY_PAUSE }, /* pause */
+ { 0x32, KEY_REWIND }, /* backward << */
+ { 0x34, KEY_FASTFORWARD }, /* forward >> */
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD }, /* recording */
+ { 0x38, KEY_YELLOW }, /* yellow key */
+ { 0x3b, KEY_SELECT }, /* top right button */
+ { 0x3c, KEY_ZOOM }, /* full */
+ { 0x3d, KEY_POWER }, /* system power (green button) */
+};
+
+static struct rc_keymap hauppauge_new_map = {
+ .map = {
+ .scan = hauppauge_new,
+ .size = ARRAY_SIZE(hauppauge_new),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_HAUPPAUGE_NEW,
+ }
+};
+
+static int __init init_rc_map_hauppauge_new(void)
+{
+ return ir_register_map(&hauppauge_new_map);
+}
+
+static void __exit exit_rc_map_hauppauge_new(void)
+{
+ ir_unregister_map(&hauppauge_new_map);
+}
+
+module_init(init_rc_map_hauppauge_new)
+module_exit(exit_rc_map_hauppauge_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-imon-mce.c b/drivers/media/IR/keymaps/rc-imon-mce.c
new file mode 100644
index 000000000000..e49f350e3a0d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-imon-mce.c
@@ -0,0 +1,142 @@
+/* rc5-imon-mce.c - Keytable for Windows Media Center RC-6 remotes for use
+ * with the SoundGraph iMON/Antec Veris hardware IR decoder
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* mce-mode imon mce remote key table */
+static struct ir_scancode imon_mce[] = {
+ /* keys sorted mostly by frequency of use to optimize lookups */
+ { 0x800ff415, KEY_REWIND },
+ { 0x800ff414, KEY_FASTFORWARD },
+ { 0x800ff41b, KEY_PREVIOUS },
+ { 0x800ff41a, KEY_NEXT },
+
+ { 0x800ff416, KEY_PLAY },
+ { 0x800ff418, KEY_PAUSE },
+ { 0x800ff419, KEY_STOP },
+ { 0x800ff417, KEY_RECORD },
+
+ { 0x02000052, KEY_UP },
+ { 0x02000051, KEY_DOWN },
+ { 0x02000050, KEY_LEFT },
+ { 0x0200004f, KEY_RIGHT },
+
+ { 0x800ff41e, KEY_UP },
+ { 0x800ff41f, KEY_DOWN },
+ { 0x800ff420, KEY_LEFT },
+ { 0x800ff421, KEY_RIGHT },
+
+ /* 0x800ff40b also KEY_NUMERIC_POUND on some receivers */
+ { 0x800ff40b, KEY_ENTER },
+ { 0x02000028, KEY_ENTER },
+/* the OK and Enter buttons decode to the same value on some remotes
+ { 0x02000028, KEY_OK }, */
+ { 0x800ff422, KEY_OK },
+ { 0x0200002a, KEY_EXIT },
+ { 0x800ff423, KEY_EXIT },
+ { 0x02000029, KEY_DELETE },
+ /* 0x800ff40a also KEY_NUMERIC_STAR on some receivers */
+ { 0x800ff40a, KEY_DELETE },
+
+ { 0x800ff40e, KEY_MUTE },
+ { 0x800ff410, KEY_VOLUMEUP },
+ { 0x800ff411, KEY_VOLUMEDOWN },
+ { 0x800ff412, KEY_CHANNELUP },
+ { 0x800ff413, KEY_CHANNELDOWN },
+
+ { 0x0200001e, KEY_NUMERIC_1 },
+ { 0x0200001f, KEY_NUMERIC_2 },
+ { 0x02000020, KEY_NUMERIC_3 },
+ { 0x02000021, KEY_NUMERIC_4 },
+ { 0x02000022, KEY_NUMERIC_5 },
+ { 0x02000023, KEY_NUMERIC_6 },
+ { 0x02000024, KEY_NUMERIC_7 },
+ { 0x02000025, KEY_NUMERIC_8 },
+ { 0x02000026, KEY_NUMERIC_9 },
+ { 0x02000027, KEY_NUMERIC_0 },
+
+ { 0x800ff401, KEY_NUMERIC_1 },
+ { 0x800ff402, KEY_NUMERIC_2 },
+ { 0x800ff403, KEY_NUMERIC_3 },
+ { 0x800ff404, KEY_NUMERIC_4 },
+ { 0x800ff405, KEY_NUMERIC_5 },
+ { 0x800ff406, KEY_NUMERIC_6 },
+ { 0x800ff407, KEY_NUMERIC_7 },
+ { 0x800ff408, KEY_NUMERIC_8 },
+ { 0x800ff409, KEY_NUMERIC_9 },
+ { 0x800ff400, KEY_NUMERIC_0 },
+
+ { 0x02200025, KEY_NUMERIC_STAR },
+ { 0x02200020, KEY_NUMERIC_POUND },
+ /* 0x800ff41d also KEY_BLUE on some receivers */
+ { 0x800ff41d, KEY_NUMERIC_STAR },
+ /* 0x800ff41c also KEY_PREVIOUS on some receivers */
+ { 0x800ff41c, KEY_NUMERIC_POUND },
+
+ { 0x800ff446, KEY_TV },
+ { 0x800ff447, KEY_AUDIO }, /* My Music */
+ { 0x800ff448, KEY_PVR }, /* RecordedTV */
+ { 0x800ff449, KEY_CAMERA },
+ { 0x800ff44a, KEY_VIDEO },
+ /* 0x800ff424 also KEY_MENU on some receivers */
+ { 0x800ff424, KEY_DVD },
+ /* 0x800ff425 also KEY_GREEN on some receivers */
+ { 0x800ff425, KEY_TUNER }, /* LiveTV */
+ { 0x800ff450, KEY_RADIO },
+
+ { 0x800ff44c, KEY_LANGUAGE },
+ { 0x800ff427, KEY_ZOOM }, /* Aspect */
+
+ { 0x800ff45b, KEY_RED },
+ { 0x800ff45c, KEY_GREEN },
+ { 0x800ff45d, KEY_YELLOW },
+ { 0x800ff45e, KEY_BLUE },
+
+ { 0x800ff466, KEY_RED },
+ /* { 0x800ff425, KEY_GREEN }, */
+ { 0x800ff468, KEY_YELLOW },
+ /* { 0x800ff41d, KEY_BLUE }, */
+
+ { 0x800ff40f, KEY_INFO },
+ { 0x800ff426, KEY_EPG }, /* Guide */
+ { 0x800ff45a, KEY_SUBTITLE }, /* Caption/Teletext */
+ { 0x800ff44d, KEY_TITLE },
+
+ { 0x800ff40c, KEY_POWER },
+ { 0x800ff40d, KEY_PROG1 }, /* Windows MCE button */
+
+};
+
+static struct rc_keymap imon_mce_map = {
+ .map = {
+ .scan = imon_mce,
+ .size = ARRAY_SIZE(imon_mce),
+ /* its RC6, but w/a hardware decoder */
+ .ir_type = IR_TYPE_RC6,
+ .name = RC_MAP_IMON_MCE,
+ }
+};
+
+static int __init init_rc_map_imon_mce(void)
+{
+ return ir_register_map(&imon_mce_map);
+}
+
+static void __exit exit_rc_map_imon_mce(void)
+{
+ ir_unregister_map(&imon_mce_map);
+}
+
+module_init(init_rc_map_imon_mce)
+module_exit(exit_rc_map_imon_mce)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-imon-pad.c b/drivers/media/IR/keymaps/rc-imon-pad.c
new file mode 100644
index 000000000000..bc4db72f02e6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-imon-pad.c
@@ -0,0 +1,156 @@
+/* rc5-imon-pad.c - Keytable for SoundGraph iMON PAD and Antec Veris
+ * RM-200 Remote Control
+ *
+ * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * standard imon remote key table, which isn't really entirely
+ * "standard", as different receivers decode the same key on the
+ * same remote to different hex codes, and the silkscreened names
+ * vary a bit between the SoundGraph and Antec remotes... ugh.
+ */
+static struct ir_scancode imon_pad[] = {
+ /* keys sorted mostly by frequency of use to optimize lookups */
+ { 0x2a8195b7, KEY_REWIND },
+ { 0x298315b7, KEY_REWIND },
+ { 0x2b8115b7, KEY_FASTFORWARD },
+ { 0x2b8315b7, KEY_FASTFORWARD },
+ { 0x2b9115b7, KEY_PREVIOUS },
+ { 0x298195b7, KEY_NEXT },
+
+ { 0x2a8115b7, KEY_PLAY },
+ { 0x2a8315b7, KEY_PLAY },
+ { 0x2a9115b7, KEY_PAUSE },
+ { 0x2b9715b7, KEY_STOP },
+ { 0x298115b7, KEY_RECORD },
+
+ { 0x01008000, KEY_UP },
+ { 0x01007f00, KEY_DOWN },
+ { 0x01000080, KEY_LEFT },
+ { 0x0100007f, KEY_RIGHT },
+
+ { 0x2aa515b7, KEY_UP },
+ { 0x289515b7, KEY_DOWN },
+ { 0x29a515b7, KEY_LEFT },
+ { 0x2ba515b7, KEY_RIGHT },
+
+ { 0x0200002c, KEY_SPACE }, /* Select/Space */
+ { 0x2a9315b7, KEY_SPACE }, /* Select/Space */
+ { 0x02000028, KEY_ENTER },
+ { 0x28a195b7, KEY_ENTER },
+ { 0x288195b7, KEY_EXIT },
+ { 0x02000029, KEY_ESC },
+ { 0x2bb715b7, KEY_ESC },
+ { 0x0200002a, KEY_BACKSPACE },
+ { 0x28a115b7, KEY_BACKSPACE },
+
+ { 0x2b9595b7, KEY_MUTE },
+ { 0x28a395b7, KEY_VOLUMEUP },
+ { 0x28a595b7, KEY_VOLUMEDOWN },
+ { 0x289395b7, KEY_CHANNELUP },
+ { 0x288795b7, KEY_CHANNELDOWN },
+
+ { 0x0200001e, KEY_NUMERIC_1 },
+ { 0x0200001f, KEY_NUMERIC_2 },
+ { 0x02000020, KEY_NUMERIC_3 },
+ { 0x02000021, KEY_NUMERIC_4 },
+ { 0x02000022, KEY_NUMERIC_5 },
+ { 0x02000023, KEY_NUMERIC_6 },
+ { 0x02000024, KEY_NUMERIC_7 },
+ { 0x02000025, KEY_NUMERIC_8 },
+ { 0x02000026, KEY_NUMERIC_9 },
+ { 0x02000027, KEY_NUMERIC_0 },
+
+ { 0x28b595b7, KEY_NUMERIC_1 },
+ { 0x2bb195b7, KEY_NUMERIC_2 },
+ { 0x28b195b7, KEY_NUMERIC_3 },
+ { 0x2a8595b7, KEY_NUMERIC_4 },
+ { 0x299595b7, KEY_NUMERIC_5 },
+ { 0x2aa595b7, KEY_NUMERIC_6 },
+ { 0x2b9395b7, KEY_NUMERIC_7 },
+ { 0x2a8515b7, KEY_NUMERIC_8 },
+ { 0x2aa115b7, KEY_NUMERIC_9 },
+ { 0x2ba595b7, KEY_NUMERIC_0 },
+
+ { 0x02200025, KEY_NUMERIC_STAR },
+ { 0x28b515b7, KEY_NUMERIC_STAR },
+ { 0x02200020, KEY_NUMERIC_POUND },
+ { 0x29a115b7, KEY_NUMERIC_POUND },
+
+ { 0x2b8515b7, KEY_VIDEO },
+ { 0x299195b7, KEY_AUDIO },
+ { 0x2ba115b7, KEY_CAMERA },
+ { 0x28a515b7, KEY_TV },
+ { 0x29a395b7, KEY_DVD },
+ { 0x29a295b7, KEY_DVD },
+
+ /* the Menu key between DVD and Subtitle on the RM-200... */
+ { 0x2ba385b7, KEY_MENU },
+ { 0x2ba395b7, KEY_MENU },
+
+ { 0x288515b7, KEY_BOOKMARKS },
+ { 0x2ab715b7, KEY_MEDIA }, /* Thumbnail */
+ { 0x298595b7, KEY_SUBTITLE },
+ { 0x2b8595b7, KEY_LANGUAGE },
+
+ { 0x29a595b7, KEY_ZOOM },
+ { 0x2aa395b7, KEY_SCREEN }, /* FullScreen */
+
+ { 0x299115b7, KEY_KEYBOARD },
+ { 0x299135b7, KEY_KEYBOARD },
+
+ { 0x01010000, BTN_LEFT },
+ { 0x01020000, BTN_RIGHT },
+ { 0x01010080, BTN_LEFT },
+ { 0x01020080, BTN_RIGHT },
+ { 0x688301b7, BTN_LEFT },
+ { 0x688481b7, BTN_RIGHT },
+
+ { 0x2a9395b7, KEY_CYCLEWINDOWS }, /* TaskSwitcher */
+ { 0x2b8395b7, KEY_TIME }, /* Timer */
+
+ { 0x289115b7, KEY_POWER },
+ { 0x29b195b7, KEY_EJECTCD }, /* the one next to play */
+ { 0x299395b7, KEY_EJECTCLOSECD }, /* eject (by TaskSw) */
+
+ { 0x02800000, KEY_CONTEXT_MENU }, /* Left Menu */
+ { 0x2b8195b7, KEY_CONTEXT_MENU }, /* Left Menu*/
+ { 0x02000065, KEY_COMPOSE }, /* RightMenu */
+ { 0x28b715b7, KEY_COMPOSE }, /* RightMenu */
+ { 0x2ab195b7, KEY_PROG1 }, /* Go or MultiMon */
+ { 0x29b715b7, KEY_DASHBOARD }, /* AppLauncher */
+};
+
+static struct rc_keymap imon_pad_map = {
+ .map = {
+ .scan = imon_pad,
+ .size = ARRAY_SIZE(imon_pad),
+ /* actual protocol details unknown, hardware decoder */
+ .ir_type = IR_TYPE_OTHER,
+ .name = RC_MAP_IMON_PAD,
+ }
+};
+
+static int __init init_rc_map_imon_pad(void)
+{
+ return ir_register_map(&imon_pad_map);
+}
+
+static void __exit exit_rc_map_imon_pad(void)
+{
+ ir_unregister_map(&imon_pad_map);
+}
+
+module_init(init_rc_map_imon_pad)
+module_exit(exit_rc_map_imon_pad)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-iodata-bctv7e.c b/drivers/media/IR/keymaps/rc-iodata-bctv7e.c
new file mode 100644
index 000000000000..ef6600259fc0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-iodata-bctv7e.c
@@ -0,0 +1,88 @@
+/* iodata-bctv7e.h - Keytable for iodata_bctv7e Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* IO-DATA BCTV7E Remote */
+
+static struct ir_scancode iodata_bctv7e[] = {
+ { 0x40, KEY_TV },
+ { 0x20, KEY_RADIO }, /* FM */
+ { 0x60, KEY_EPG },
+ { 0x00, KEY_POWER },
+
+ /* Keys 0 to 9 */
+ { 0x44, KEY_0 }, /* 10 */
+ { 0x50, KEY_1 },
+ { 0x30, KEY_2 },
+ { 0x70, KEY_3 },
+ { 0x48, KEY_4 },
+ { 0x28, KEY_5 },
+ { 0x68, KEY_6 },
+ { 0x58, KEY_7 },
+ { 0x38, KEY_8 },
+ { 0x78, KEY_9 },
+
+ { 0x10, KEY_L }, /* Live */
+ { 0x08, KEY_TIME }, /* Time Shift */
+
+ { 0x18, KEY_PLAYPAUSE }, /* Play */
+
+ { 0x24, KEY_ENTER }, /* 11 */
+ { 0x64, KEY_ESC }, /* 12 */
+ { 0x04, KEY_M }, /* Multi */
+
+ { 0x54, KEY_VIDEO },
+ { 0x34, KEY_CHANNELUP },
+ { 0x74, KEY_VOLUMEUP },
+ { 0x14, KEY_MUTE },
+
+ { 0x4c, KEY_VCR }, /* SVIDEO */
+ { 0x2c, KEY_CHANNELDOWN },
+ { 0x6c, KEY_VOLUMEDOWN },
+ { 0x0c, KEY_ZOOM },
+
+ { 0x5c, KEY_PAUSE },
+ { 0x3c, KEY_RED }, /* || (red) */
+ { 0x7c, KEY_RECORD }, /* recording */
+ { 0x1c, KEY_STOP },
+
+ { 0x41, KEY_REWIND }, /* backward << */
+ { 0x21, KEY_PLAY },
+ { 0x61, KEY_FASTFORWARD }, /* forward >> */
+ { 0x01, KEY_NEXT }, /* skip >| */
+};
+
+static struct rc_keymap iodata_bctv7e_map = {
+ .map = {
+ .scan = iodata_bctv7e,
+ .size = ARRAY_SIZE(iodata_bctv7e),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_IODATA_BCTV7E,
+ }
+};
+
+static int __init init_rc_map_iodata_bctv7e(void)
+{
+ return ir_register_map(&iodata_bctv7e_map);
+}
+
+static void __exit exit_rc_map_iodata_bctv7e(void)
+{
+ ir_unregister_map(&iodata_bctv7e_map);
+}
+
+module_init(init_rc_map_iodata_bctv7e)
+module_exit(exit_rc_map_iodata_bctv7e)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kaiomy.c b/drivers/media/IR/keymaps/rc-kaiomy.c
new file mode 100644
index 000000000000..4c7883ba0f15
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kaiomy.c
@@ -0,0 +1,87 @@
+/* kaiomy.h - Keytable for kaiomy Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kaiomy TVnPC U2
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode kaiomy[] = {
+ { 0x43, KEY_POWER2},
+ { 0x01, KEY_LIST},
+ { 0x0b, KEY_ZOOM},
+ { 0x03, KEY_POWER},
+
+ { 0x04, KEY_1},
+ { 0x08, KEY_2},
+ { 0x02, KEY_3},
+
+ { 0x0f, KEY_4},
+ { 0x05, KEY_5},
+ { 0x06, KEY_6},
+
+ { 0x0c, KEY_7},
+ { 0x0d, KEY_8},
+ { 0x0a, KEY_9},
+
+ { 0x11, KEY_0},
+
+ { 0x09, KEY_CHANNELUP},
+ { 0x07, KEY_CHANNELDOWN},
+
+ { 0x0e, KEY_VOLUMEUP},
+ { 0x13, KEY_VOLUMEDOWN},
+
+ { 0x10, KEY_HOME},
+ { 0x12, KEY_ENTER},
+
+ { 0x14, KEY_RECORD},
+ { 0x15, KEY_STOP},
+ { 0x16, KEY_PLAY},
+ { 0x17, KEY_MUTE},
+
+ { 0x18, KEY_UP},
+ { 0x19, KEY_DOWN},
+ { 0x1a, KEY_LEFT},
+ { 0x1b, KEY_RIGHT},
+
+ { 0x1c, KEY_RED},
+ { 0x1d, KEY_GREEN},
+ { 0x1e, KEY_YELLOW},
+ { 0x1f, KEY_BLUE},
+};
+
+static struct rc_keymap kaiomy_map = {
+ .map = {
+ .scan = kaiomy,
+ .size = ARRAY_SIZE(kaiomy),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_KAIOMY,
+ }
+};
+
+static int __init init_rc_map_kaiomy(void)
+{
+ return ir_register_map(&kaiomy_map);
+}
+
+static void __exit exit_rc_map_kaiomy(void)
+{
+ ir_unregister_map(&kaiomy_map);
+}
+
+module_init(init_rc_map_kaiomy)
+module_exit(exit_rc_map_kaiomy)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kworld-315u.c b/drivers/media/IR/keymaps/rc-kworld-315u.c
new file mode 100644
index 000000000000..618c817374e6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kworld-315u.c
@@ -0,0 +1,83 @@
+/* kworld-315u.h - Keytable for kworld_315u Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kworld 315U
+ */
+
+static struct ir_scancode kworld_315u[] = {
+ { 0x6143, KEY_POWER },
+ { 0x6101, KEY_TUNER }, /* source */
+ { 0x610b, KEY_ZOOM },
+ { 0x6103, KEY_POWER2 }, /* shutdown */
+
+ { 0x6104, KEY_1 },
+ { 0x6108, KEY_2 },
+ { 0x6102, KEY_3 },
+ { 0x6109, KEY_CHANNELUP },
+
+ { 0x610f, KEY_4 },
+ { 0x6105, KEY_5 },
+ { 0x6106, KEY_6 },
+ { 0x6107, KEY_CHANNELDOWN },
+
+ { 0x610c, KEY_7 },
+ { 0x610d, KEY_8 },
+ { 0x610a, KEY_9 },
+ { 0x610e, KEY_VOLUMEUP },
+
+ { 0x6110, KEY_LAST },
+ { 0x6111, KEY_0 },
+ { 0x6112, KEY_ENTER },
+ { 0x6113, KEY_VOLUMEDOWN },
+
+ { 0x6114, KEY_RECORD },
+ { 0x6115, KEY_STOP },
+ { 0x6116, KEY_PLAY },
+ { 0x6117, KEY_MUTE },
+
+ { 0x6118, KEY_UP },
+ { 0x6119, KEY_DOWN },
+ { 0x611a, KEY_LEFT },
+ { 0x611b, KEY_RIGHT },
+
+ { 0x611c, KEY_RED },
+ { 0x611d, KEY_GREEN },
+ { 0x611e, KEY_YELLOW },
+ { 0x611f, KEY_BLUE },
+};
+
+static struct rc_keymap kworld_315u_map = {
+ .map = {
+ .scan = kworld_315u,
+ .size = ARRAY_SIZE(kworld_315u),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_KWORLD_315U,
+ }
+};
+
+static int __init init_rc_map_kworld_315u(void)
+{
+ return ir_register_map(&kworld_315u_map);
+}
+
+static void __exit exit_rc_map_kworld_315u(void)
+{
+ ir_unregister_map(&kworld_315u_map);
+}
+
+module_init(init_rc_map_kworld_315u)
+module_exit(exit_rc_map_kworld_315u)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c b/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c
new file mode 100644
index 000000000000..366732f1f7b7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c
@@ -0,0 +1,99 @@
+/* kworld-plus-tv-analog.h - Keytable for kworld_plus_tv_analog Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Kworld Plus TV Analog Lite PCI IR
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ */
+
+static struct ir_scancode kworld_plus_tv_analog[] = {
+ { 0x0c, KEY_PROG1 }, /* Kworld key */
+ { 0x16, KEY_CLOSECD }, /* -> ) */
+ { 0x1d, KEY_POWER2 },
+
+ { 0x00, KEY_1 },
+ { 0x01, KEY_2 },
+ { 0x02, KEY_3 }, /* Two keys have the same code: 3 and left */
+ { 0x03, KEY_4 }, /* Two keys have the same code: 3 and right */
+ { 0x04, KEY_5 },
+ { 0x05, KEY_6 },
+ { 0x06, KEY_7 },
+ { 0x07, KEY_8 },
+ { 0x08, KEY_9 },
+ { 0x0a, KEY_0 },
+
+ { 0x09, KEY_AGAIN },
+ { 0x14, KEY_MUTE },
+
+ { 0x20, KEY_UP },
+ { 0x21, KEY_DOWN },
+ { 0x0b, KEY_ENTER },
+
+ { 0x10, KEY_CHANNELUP },
+ { 0x11, KEY_CHANNELDOWN },
+
+ /* Couldn't map key left/key right since those
+ conflict with '3' and '4' scancodes
+ I dunno what the original driver does
+ */
+
+ { 0x13, KEY_VOLUMEUP },
+ { 0x12, KEY_VOLUMEDOWN },
+
+ /* The lower part of the IR
+ There are several duplicated keycodes there.
+ Most of them conflict with digits.
+ Add mappings just to the unused scancodes.
+ Somehow, the original driver has a way to know,
+ but this doesn't seem to be on some GPIO.
+ Also, it is not related to the time between keyup
+ and keydown.
+ */
+ { 0x19, KEY_TIME}, /* Timeshift */
+ { 0x1a, KEY_STOP},
+ { 0x1b, KEY_RECORD},
+
+ { 0x22, KEY_TEXT},
+
+ { 0x15, KEY_AUDIO}, /* ((*)) */
+ { 0x0f, KEY_ZOOM},
+ { 0x1c, KEY_CAMERA}, /* snapshot */
+
+ { 0x18, KEY_RED}, /* B */
+ { 0x23, KEY_GREEN}, /* C */
+};
+
+static struct rc_keymap kworld_plus_tv_analog_map = {
+ .map = {
+ .scan = kworld_plus_tv_analog,
+ .size = ARRAY_SIZE(kworld_plus_tv_analog),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_KWORLD_PLUS_TV_ANALOG,
+ }
+};
+
+static int __init init_rc_map_kworld_plus_tv_analog(void)
+{
+ return ir_register_map(&kworld_plus_tv_analog_map);
+}
+
+static void __exit exit_rc_map_kworld_plus_tv_analog(void)
+{
+ ir_unregister_map(&kworld_plus_tv_analog_map);
+}
+
+module_init(init_rc_map_kworld_plus_tv_analog)
+module_exit(exit_rc_map_kworld_plus_tv_analog)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-manli.c b/drivers/media/IR/keymaps/rc-manli.c
new file mode 100644
index 000000000000..1e9fbfa90a1e
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-manli.c
@@ -0,0 +1,135 @@
+/* manli.h - Keytable for manli Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Michael Tokarev <mjt@tls.msk.ru>
+ http://www.corpit.ru/mjt/beholdTV/remote_control.jpg
+ keytable is used by MANLI MTV00[0x0c] and BeholdTV 40[13] at
+ least, and probably other cards too.
+ The "ascii-art picture" below (in comments, first row
+ is the keycode in hex, and subsequent row(s) shows
+ the button labels (several variants when appropriate)
+ helps to descide which keycodes to assign to the buttons.
+ */
+
+static struct ir_scancode manli[] = {
+
+ /* 0x1c 0x12 *
+ * FUNCTION POWER *
+ * FM (|) *
+ * */
+ { 0x1c, KEY_RADIO }, /*XXX*/
+ { 0x12, KEY_POWER },
+
+ /* 0x01 0x02 0x03 *
+ * 1 2 3 *
+ * *
+ * 0x04 0x05 0x06 *
+ * 4 5 6 *
+ * *
+ * 0x07 0x08 0x09 *
+ * 7 8 9 *
+ * */
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ /* 0x0a 0x00 0x17 *
+ * RECALL 0 +100 *
+ * PLUS *
+ * */
+ { 0x0a, KEY_AGAIN }, /*XXX KEY_REWIND? */
+ { 0x00, KEY_0 },
+ { 0x17, KEY_DIGITS }, /*XXX*/
+
+ /* 0x14 0x10 *
+ * MENU INFO *
+ * OSD */
+ { 0x14, KEY_MENU },
+ { 0x10, KEY_INFO },
+
+ /* 0x0b *
+ * Up *
+ * *
+ * 0x18 0x16 0x0c *
+ * Left Ok Right *
+ * *
+ * 0x015 *
+ * Down *
+ * */
+ { 0x0b, KEY_UP },
+ { 0x18, KEY_LEFT },
+ { 0x16, KEY_OK }, /*XXX KEY_SELECT? KEY_ENTER? */
+ { 0x0c, KEY_RIGHT },
+ { 0x15, KEY_DOWN },
+
+ /* 0x11 0x0d *
+ * TV/AV MODE *
+ * SOURCE STEREO *
+ * */
+ { 0x11, KEY_TV }, /*XXX*/
+ { 0x0d, KEY_MODE }, /*XXX there's no KEY_STEREO */
+
+ /* 0x0f 0x1b 0x1a *
+ * AUDIO Vol+ Chan+ *
+ * TIMESHIFT??? *
+ * *
+ * 0x0e 0x1f 0x1e *
+ * SLEEP Vol- Chan- *
+ * */
+ { 0x0f, KEY_AUDIO },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1a, KEY_CHANNELUP },
+ { 0x0e, KEY_TIME },
+ { 0x1f, KEY_VOLUMEDOWN },
+ { 0x1e, KEY_CHANNELDOWN },
+
+ /* 0x13 0x19 *
+ * MUTE SNAPSHOT*
+ * */
+ { 0x13, KEY_MUTE },
+ { 0x19, KEY_CAMERA },
+
+ /* 0x1d unused ? */
+};
+
+static struct rc_keymap manli_map = {
+ .map = {
+ .scan = manli,
+ .size = ARRAY_SIZE(manli),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MANLI,
+ }
+};
+
+static int __init init_rc_map_manli(void)
+{
+ return ir_register_map(&manli_map);
+}
+
+static void __exit exit_rc_map_manli(void)
+{
+ ir_unregister_map(&manli_map);
+}
+
+module_init(init_rc_map_manli)
+module_exit(exit_rc_map_manli)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c b/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c
new file mode 100644
index 000000000000..eb8e42c18ff9
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c
@@ -0,0 +1,123 @@
+/* msi-tvanywhere-plus.h - Keytable for msi_tvanywhere_plus Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
+ is marked "KS003". The controller is I2C at address 0x30, but does not seem
+ to respond to probes until a read is performed from a valid device.
+ I don't know why...
+
+ Note: This remote may be of similar or identical design to the
+ Pixelview remote (?). The raw codes and duplicate button codes
+ appear to be the same.
+
+ Henry Wong <henry@stuffedcow.net>
+ Some changes to formatting and keycodes by Mark Schultz <n9xmj@yahoo.com>
+*/
+
+static struct ir_scancode msi_tvanywhere_plus[] = {
+
+/* ---- Remote Button Layout ----
+
+ POWER SOURCE SCAN MUTE
+ TV/FM 1 2 3
+ |> 4 5 6
+ <| 7 8 9
+ ^^UP 0 + RECALL
+ vvDN RECORD STOP PLAY
+
+ MINIMIZE ZOOM
+
+ CH+
+ VOL- VOL+
+ CH-
+
+ SNAPSHOT MTS
+
+ << FUNC >> RESET
+*/
+
+ { 0x01, KEY_1 }, /* 1 */
+ { 0x0b, KEY_2 }, /* 2 */
+ { 0x1b, KEY_3 }, /* 3 */
+ { 0x05, KEY_4 }, /* 4 */
+ { 0x09, KEY_5 }, /* 5 */
+ { 0x15, KEY_6 }, /* 6 */
+ { 0x06, KEY_7 }, /* 7 */
+ { 0x0a, KEY_8 }, /* 8 */
+ { 0x12, KEY_9 }, /* 9 */
+ { 0x02, KEY_0 }, /* 0 */
+ { 0x10, KEY_KPPLUS }, /* + */
+ { 0x13, KEY_AGAIN }, /* Recall */
+
+ { 0x1e, KEY_POWER }, /* Power */
+ { 0x07, KEY_TUNER }, /* Source */
+ { 0x1c, KEY_SEARCH }, /* Scan */
+ { 0x18, KEY_MUTE }, /* Mute */
+
+ { 0x03, KEY_RADIO }, /* TV/FM */
+ /* The next four keys are duplicates that appear to send the
+ same IR code as Ch+, Ch-, >>, and << . The raw code assigned
+ to them is the actual code + 0x20 - they will never be
+ detected as such unless some way is discovered to distinguish
+ these buttons from those that have the same code. */
+ { 0x3f, KEY_RIGHT }, /* |> and Ch+ */
+ { 0x37, KEY_LEFT }, /* <| and Ch- */
+ { 0x2c, KEY_UP }, /* ^^Up and >> */
+ { 0x24, KEY_DOWN }, /* vvDn and << */
+
+ { 0x00, KEY_RECORD }, /* Record */
+ { 0x08, KEY_STOP }, /* Stop */
+ { 0x11, KEY_PLAY }, /* Play */
+
+ { 0x0f, KEY_CLOSE }, /* Minimize */
+ { 0x19, KEY_ZOOM }, /* Zoom */
+ { 0x1a, KEY_CAMERA }, /* Snapshot */
+ { 0x0d, KEY_LANGUAGE }, /* MTS */
+
+ { 0x14, KEY_VOLUMEDOWN }, /* Vol- */
+ { 0x16, KEY_VOLUMEUP }, /* Vol+ */
+ { 0x17, KEY_CHANNELDOWN }, /* Ch- */
+ { 0x1f, KEY_CHANNELUP }, /* Ch+ */
+
+ { 0x04, KEY_REWIND }, /* << */
+ { 0x0e, KEY_MENU }, /* Function */
+ { 0x0c, KEY_FASTFORWARD }, /* >> */
+ { 0x1d, KEY_RESTART }, /* Reset */
+};
+
+static struct rc_keymap msi_tvanywhere_plus_map = {
+ .map = {
+ .scan = msi_tvanywhere_plus,
+ .size = ARRAY_SIZE(msi_tvanywhere_plus),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MSI_TVANYWHERE_PLUS,
+ }
+};
+
+static int __init init_rc_map_msi_tvanywhere_plus(void)
+{
+ return ir_register_map(&msi_tvanywhere_plus_map);
+}
+
+static void __exit exit_rc_map_msi_tvanywhere_plus(void)
+{
+ ir_unregister_map(&msi_tvanywhere_plus_map);
+}
+
+module_init(init_rc_map_msi_tvanywhere_plus)
+module_exit(exit_rc_map_msi_tvanywhere_plus)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-msi-tvanywhere.c b/drivers/media/IR/keymaps/rc-msi-tvanywhere.c
new file mode 100644
index 000000000000..ef411854f067
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-msi-tvanywhere.c
@@ -0,0 +1,69 @@
+/* msi-tvanywhere.h - Keytable for msi_tvanywhere Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* MSI TV@nywhere MASTER remote */
+
+static struct ir_scancode msi_tvanywhere[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0c, KEY_MUTE },
+ { 0x0f, KEY_SCREEN }, /* Full Screen */
+ { 0x10, KEY_FN }, /* Funtion */
+ { 0x11, KEY_TIME }, /* Time shift */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_MEDIA }, /* MTS */
+ { 0x14, KEY_SLOW },
+ { 0x16, KEY_REWIND }, /* backward << */
+ { 0x17, KEY_ENTER }, /* Return */
+ { 0x18, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x1f, KEY_VOLUMEDOWN },
+};
+
+static struct rc_keymap msi_tvanywhere_map = {
+ .map = {
+ .scan = msi_tvanywhere,
+ .size = ARRAY_SIZE(msi_tvanywhere),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_MSI_TVANYWHERE,
+ }
+};
+
+static int __init init_rc_map_msi_tvanywhere(void)
+{
+ return ir_register_map(&msi_tvanywhere_map);
+}
+
+static void __exit exit_rc_map_msi_tvanywhere(void)
+{
+ ir_unregister_map(&msi_tvanywhere_map);
+}
+
+module_init(init_rc_map_msi_tvanywhere)
+module_exit(exit_rc_map_msi_tvanywhere)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-nebula.c b/drivers/media/IR/keymaps/rc-nebula.c
new file mode 100644
index 000000000000..ccc50eb402ec
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-nebula.c
@@ -0,0 +1,96 @@
+/* nebula.h - Keytable for nebula Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode nebula[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_TV },
+ { 0x0b, KEY_AUX },
+ { 0x0c, KEY_DVD },
+ { 0x0d, KEY_POWER },
+ { 0x0e, KEY_MHP }, /* labelled 'Picture' */
+ { 0x0f, KEY_AUDIO },
+ { 0x10, KEY_INFO },
+ { 0x11, KEY_F13 }, /* 16:9 */
+ { 0x12, KEY_F14 }, /* 14:9 */
+ { 0x13, KEY_EPG },
+ { 0x14, KEY_EXIT },
+ { 0x15, KEY_MENU },
+ { 0x16, KEY_UP },
+ { 0x17, KEY_DOWN },
+ { 0x18, KEY_LEFT },
+ { 0x19, KEY_RIGHT },
+ { 0x1a, KEY_ENTER },
+ { 0x1b, KEY_CHANNELUP },
+ { 0x1c, KEY_CHANNELDOWN },
+ { 0x1d, KEY_VOLUMEUP },
+ { 0x1e, KEY_VOLUMEDOWN },
+ { 0x1f, KEY_RED },
+ { 0x20, KEY_GREEN },
+ { 0x21, KEY_YELLOW },
+ { 0x22, KEY_BLUE },
+ { 0x23, KEY_SUBTITLE },
+ { 0x24, KEY_F15 }, /* AD */
+ { 0x25, KEY_TEXT },
+ { 0x26, KEY_MUTE },
+ { 0x27, KEY_REWIND },
+ { 0x28, KEY_STOP },
+ { 0x29, KEY_PLAY },
+ { 0x2a, KEY_FASTFORWARD },
+ { 0x2b, KEY_F16 }, /* chapter */
+ { 0x2c, KEY_PAUSE },
+ { 0x2d, KEY_PLAY },
+ { 0x2e, KEY_RECORD },
+ { 0x2f, KEY_F17 }, /* picture in picture */
+ { 0x30, KEY_KPPLUS }, /* zoom in */
+ { 0x31, KEY_KPMINUS }, /* zoom out */
+ { 0x32, KEY_F18 }, /* capture */
+ { 0x33, KEY_F19 }, /* web */
+ { 0x34, KEY_EMAIL },
+ { 0x35, KEY_PHONE },
+ { 0x36, KEY_PC },
+};
+
+static struct rc_keymap nebula_map = {
+ .map = {
+ .scan = nebula,
+ .size = ARRAY_SIZE(nebula),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NEBULA,
+ }
+};
+
+static int __init init_rc_map_nebula(void)
+{
+ return ir_register_map(&nebula_map);
+}
+
+static void __exit exit_rc_map_nebula(void)
+{
+ ir_unregister_map(&nebula_map);
+}
+
+module_init(init_rc_map_nebula)
+module_exit(exit_rc_map_nebula)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c b/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c
new file mode 100644
index 000000000000..e1b54d20db60
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c
@@ -0,0 +1,105 @@
+/* nec-terratec-cinergy-xs.h - Keytable for nec_terratec_cinergy_xs Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Terratec Cinergy Hybrid T USB XS FM
+ Mauro Carvalho Chehab <mchehab@redhat.com>
+ */
+
+static struct ir_scancode nec_terratec_cinergy_xs[] = {
+ { 0x1441, KEY_HOME},
+ { 0x1401, KEY_POWER2},
+
+ { 0x1442, KEY_MENU}, /* DVD menu */
+ { 0x1443, KEY_SUBTITLE},
+ { 0x1444, KEY_TEXT}, /* Teletext */
+ { 0x1445, KEY_DELETE},
+
+ { 0x1402, KEY_1},
+ { 0x1403, KEY_2},
+ { 0x1404, KEY_3},
+ { 0x1405, KEY_4},
+ { 0x1406, KEY_5},
+ { 0x1407, KEY_6},
+ { 0x1408, KEY_7},
+ { 0x1409, KEY_8},
+ { 0x140a, KEY_9},
+ { 0x140c, KEY_0},
+
+ { 0x140b, KEY_TUNER}, /* AV */
+ { 0x140d, KEY_MODE}, /* A.B */
+
+ { 0x1446, KEY_TV},
+ { 0x1447, KEY_DVD},
+ { 0x1449, KEY_VIDEO},
+ { 0x144a, KEY_RADIO}, /* Music */
+ { 0x144b, KEY_CAMERA}, /* PIC */
+
+ { 0x1410, KEY_UP},
+ { 0x1411, KEY_LEFT},
+ { 0x1412, KEY_OK},
+ { 0x1413, KEY_RIGHT},
+ { 0x1414, KEY_DOWN},
+
+ { 0x140f, KEY_EPG},
+ { 0x1416, KEY_INFO},
+ { 0x144d, KEY_BACKSPACE},
+
+ { 0x141c, KEY_VOLUMEUP},
+ { 0x141e, KEY_VOLUMEDOWN},
+
+ { 0x144c, KEY_PLAY},
+ { 0x141d, KEY_MUTE},
+
+ { 0x141b, KEY_CHANNELUP},
+ { 0x141f, KEY_CHANNELDOWN},
+
+ { 0x1417, KEY_RED},
+ { 0x1418, KEY_GREEN},
+ { 0x1419, KEY_YELLOW},
+ { 0x141a, KEY_BLUE},
+
+ { 0x1458, KEY_RECORD},
+ { 0x1448, KEY_STOP},
+ { 0x1440, KEY_PAUSE},
+
+ { 0x1454, KEY_LAST},
+ { 0x144e, KEY_REWIND},
+ { 0x144f, KEY_FASTFORWARD},
+ { 0x145c, KEY_NEXT},
+};
+
+static struct rc_keymap nec_terratec_cinergy_xs_map = {
+ .map = {
+ .scan = nec_terratec_cinergy_xs,
+ .size = ARRAY_SIZE(nec_terratec_cinergy_xs),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_NEC_TERRATEC_CINERGY_XS,
+ }
+};
+
+static int __init init_rc_map_nec_terratec_cinergy_xs(void)
+{
+ return ir_register_map(&nec_terratec_cinergy_xs_map);
+}
+
+static void __exit exit_rc_map_nec_terratec_cinergy_xs(void)
+{
+ ir_unregister_map(&nec_terratec_cinergy_xs_map);
+}
+
+module_init(init_rc_map_nec_terratec_cinergy_xs)
+module_exit(exit_rc_map_nec_terratec_cinergy_xs)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-norwood.c b/drivers/media/IR/keymaps/rc-norwood.c
new file mode 100644
index 000000000000..e5849a6b3f05
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-norwood.c
@@ -0,0 +1,85 @@
+/* norwood.h - Keytable for norwood Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Norwood Micro (non-Pro) TV Tuner
+ By Peter Naulls <peter@chocky.org>
+ Key comments are the functions given in the manual */
+
+static struct ir_scancode norwood[] = {
+ /* Keys 0 to 9 */
+ { 0x20, KEY_0 },
+ { 0x21, KEY_1 },
+ { 0x22, KEY_2 },
+ { 0x23, KEY_3 },
+ { 0x24, KEY_4 },
+ { 0x25, KEY_5 },
+ { 0x26, KEY_6 },
+ { 0x27, KEY_7 },
+ { 0x28, KEY_8 },
+ { 0x29, KEY_9 },
+
+ { 0x78, KEY_TUNER }, /* Video Source */
+ { 0x2c, KEY_EXIT }, /* Open/Close software */
+ { 0x2a, KEY_SELECT }, /* 2 Digit Select */
+ { 0x69, KEY_AGAIN }, /* Recall */
+
+ { 0x32, KEY_BRIGHTNESSUP }, /* Brightness increase */
+ { 0x33, KEY_BRIGHTNESSDOWN }, /* Brightness decrease */
+ { 0x6b, KEY_KPPLUS }, /* (not named >>>>>) */
+ { 0x6c, KEY_KPMINUS }, /* (not named <<<<<) */
+
+ { 0x2d, KEY_MUTE }, /* Mute */
+ { 0x30, KEY_VOLUMEUP }, /* Volume up */
+ { 0x31, KEY_VOLUMEDOWN }, /* Volume down */
+ { 0x60, KEY_CHANNELUP }, /* Channel up */
+ { 0x61, KEY_CHANNELDOWN }, /* Channel down */
+
+ { 0x3f, KEY_RECORD }, /* Record */
+ { 0x37, KEY_PLAY }, /* Play */
+ { 0x36, KEY_PAUSE }, /* Pause */
+ { 0x2b, KEY_STOP }, /* Stop */
+ { 0x67, KEY_FASTFORWARD }, /* Foward */
+ { 0x66, KEY_REWIND }, /* Rewind */
+ { 0x3e, KEY_SEARCH }, /* Auto Scan */
+ { 0x2e, KEY_CAMERA }, /* Capture Video */
+ { 0x6d, KEY_MENU }, /* Show/Hide Control */
+ { 0x2f, KEY_ZOOM }, /* Full Screen */
+ { 0x34, KEY_RADIO }, /* FM */
+ { 0x65, KEY_POWER }, /* Computer power */
+};
+
+static struct rc_keymap norwood_map = {
+ .map = {
+ .scan = norwood,
+ .size = ARRAY_SIZE(norwood),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NORWOOD,
+ }
+};
+
+static int __init init_rc_map_norwood(void)
+{
+ return ir_register_map(&norwood_map);
+}
+
+static void __exit exit_rc_map_norwood(void)
+{
+ ir_unregister_map(&norwood_map);
+}
+
+module_init(init_rc_map_norwood)
+module_exit(exit_rc_map_norwood)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-npgtech.c b/drivers/media/IR/keymaps/rc-npgtech.c
new file mode 100644
index 000000000000..b9ece1e90296
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-npgtech.c
@@ -0,0 +1,80 @@
+/* npgtech.h - Keytable for npgtech Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode npgtech[] = {
+ { 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
+ { 0x2a, KEY_FRONT },
+
+ { 0x3e, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x0a, KEY_4 },
+ { 0x0e, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x16, KEY_7 },
+ { 0x1a, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x3a, KEY_0 },
+ { 0x22, KEY_NUMLOCK }, /* -/-- */
+ { 0x20, KEY_REFRESH },
+
+ { 0x03, KEY_BRIGHTNESSDOWN },
+ { 0x28, KEY_AUDIO },
+ { 0x3c, KEY_CHANNELUP },
+ { 0x3f, KEY_VOLUMEDOWN },
+ { 0x2e, KEY_MUTE },
+ { 0x3b, KEY_VOLUMEUP },
+ { 0x00, KEY_CHANNELDOWN },
+ { 0x07, KEY_BRIGHTNESSUP },
+ { 0x2c, KEY_TEXT },
+
+ { 0x37, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x13, KEY_PAUSE },
+ { 0x26, KEY_STOP },
+ { 0x18, KEY_FASTFORWARD },
+ { 0x14, KEY_REWIND },
+ { 0x33, KEY_ZOOM },
+ { 0x32, KEY_KEYBOARD },
+ { 0x30, KEY_GOTO }, /* Pointing arrow */
+ { 0x36, KEY_MACRO }, /* Maximize/Minimize (yellow) */
+ { 0x0b, KEY_RADIO },
+ { 0x10, KEY_POWER },
+
+};
+
+static struct rc_keymap npgtech_map = {
+ .map = {
+ .scan = npgtech,
+ .size = ARRAY_SIZE(npgtech),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_NPGTECH,
+ }
+};
+
+static int __init init_rc_map_npgtech(void)
+{
+ return ir_register_map(&npgtech_map);
+}
+
+static void __exit exit_rc_map_npgtech(void)
+{
+ ir_unregister_map(&npgtech_map);
+}
+
+module_init(init_rc_map_npgtech)
+module_exit(exit_rc_map_npgtech)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pctv-sedna.c b/drivers/media/IR/keymaps/rc-pctv-sedna.c
new file mode 100644
index 000000000000..4129bb44a25b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pctv-sedna.c
@@ -0,0 +1,80 @@
+/* pctv-sedna.h - Keytable for pctv_sedna Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mapping for the 28 key remote control as seen at
+ http://www.sednacomputer.com/photo/cardbus-tv.jpg
+ Pavel Mihaylov <bin@bash.info>
+ Also for the remote bundled with Kozumi KTV-01C card */
+
+static struct ir_scancode pctv_sedna[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0a, KEY_AGAIN }, /* Recall */
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_VOLUMEUP },
+ { 0x0d, KEY_MODE }, /* Stereo */
+ { 0x0e, KEY_STOP },
+ { 0x0f, KEY_PREVIOUSSONG },
+ { 0x10, KEY_ZOOM },
+ { 0x11, KEY_TUNER }, /* Source */
+ { 0x12, KEY_POWER },
+ { 0x13, KEY_MUTE },
+ { 0x15, KEY_CHANNELDOWN },
+ { 0x18, KEY_VOLUMEDOWN },
+ { 0x19, KEY_CAMERA }, /* Snapshot */
+ { 0x1a, KEY_NEXTSONG },
+ { 0x1b, KEY_TIME }, /* Time Shift */
+ { 0x1c, KEY_RADIO }, /* FM Radio */
+ { 0x1d, KEY_RECORD },
+ { 0x1e, KEY_PAUSE },
+ /* additional codes for Kozumi's remote */
+ { 0x14, KEY_INFO }, /* OSD */
+ { 0x16, KEY_OK }, /* OK */
+ { 0x17, KEY_DIGITS }, /* Plus */
+ { 0x1f, KEY_PLAY }, /* Play */
+};
+
+static struct rc_keymap pctv_sedna_map = {
+ .map = {
+ .scan = pctv_sedna,
+ .size = ARRAY_SIZE(pctv_sedna),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PCTV_SEDNA,
+ }
+};
+
+static int __init init_rc_map_pctv_sedna(void)
+{
+ return ir_register_map(&pctv_sedna_map);
+}
+
+static void __exit exit_rc_map_pctv_sedna(void)
+{
+ ir_unregister_map(&pctv_sedna_map);
+}
+
+module_init(init_rc_map_pctv_sedna)
+module_exit(exit_rc_map_pctv_sedna)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-color.c b/drivers/media/IR/keymaps/rc-pinnacle-color.c
new file mode 100644
index 000000000000..326e023ce126
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-color.c
@@ -0,0 +1,94 @@
+/* pinnacle-color.h - Keytable for pinnacle_color Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pinnacle_color[] = {
+ { 0x59, KEY_MUTE },
+ { 0x4a, KEY_POWER },
+
+ { 0x18, KEY_TEXT },
+ { 0x26, KEY_TV },
+ { 0x3d, KEY_PRINT },
+
+ { 0x48, KEY_RED },
+ { 0x04, KEY_GREEN },
+ { 0x11, KEY_YELLOW },
+ { 0x00, KEY_BLUE },
+
+ { 0x2d, KEY_VOLUMEUP },
+ { 0x1e, KEY_VOLUMEDOWN },
+
+ { 0x49, KEY_MENU },
+
+ { 0x16, KEY_CHANNELUP },
+ { 0x17, KEY_CHANNELDOWN },
+
+ { 0x20, KEY_UP },
+ { 0x21, KEY_DOWN },
+ { 0x22, KEY_LEFT },
+ { 0x23, KEY_RIGHT },
+ { 0x0d, KEY_SELECT },
+
+ { 0x08, KEY_BACK },
+ { 0x07, KEY_REFRESH },
+
+ { 0x2f, KEY_ZOOM },
+ { 0x29, KEY_RECORD },
+
+ { 0x4b, KEY_PAUSE },
+ { 0x4d, KEY_REWIND },
+ { 0x2e, KEY_PLAY },
+ { 0x4e, KEY_FORWARD },
+ { 0x53, KEY_PREVIOUS },
+ { 0x4c, KEY_STOP },
+ { 0x54, KEY_NEXT },
+
+ { 0x69, KEY_0 },
+ { 0x6a, KEY_1 },
+ { 0x6b, KEY_2 },
+ { 0x6c, KEY_3 },
+ { 0x6d, KEY_4 },
+ { 0x6e, KEY_5 },
+ { 0x6f, KEY_6 },
+ { 0x70, KEY_7 },
+ { 0x71, KEY_8 },
+ { 0x72, KEY_9 },
+
+ { 0x74, KEY_CHANNEL },
+ { 0x0a, KEY_BACKSPACE },
+};
+
+static struct rc_keymap pinnacle_color_map = {
+ .map = {
+ .scan = pinnacle_color,
+ .size = ARRAY_SIZE(pinnacle_color),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_COLOR,
+ }
+};
+
+static int __init init_rc_map_pinnacle_color(void)
+{
+ return ir_register_map(&pinnacle_color_map);
+}
+
+static void __exit exit_rc_map_pinnacle_color(void)
+{
+ ir_unregister_map(&pinnacle_color_map);
+}
+
+module_init(init_rc_map_pinnacle_color)
+module_exit(exit_rc_map_pinnacle_color)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-grey.c b/drivers/media/IR/keymaps/rc-pinnacle-grey.c
new file mode 100644
index 000000000000..14cb772515c6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-grey.c
@@ -0,0 +1,89 @@
+/* pinnacle-grey.h - Keytable for pinnacle_grey Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pinnacle_grey[] = {
+ { 0x3a, KEY_0 },
+ { 0x31, KEY_1 },
+ { 0x32, KEY_2 },
+ { 0x33, KEY_3 },
+ { 0x34, KEY_4 },
+ { 0x35, KEY_5 },
+ { 0x36, KEY_6 },
+ { 0x37, KEY_7 },
+ { 0x38, KEY_8 },
+ { 0x39, KEY_9 },
+
+ { 0x2f, KEY_POWER },
+
+ { 0x2e, KEY_P },
+ { 0x1f, KEY_L },
+ { 0x2b, KEY_I },
+
+ { 0x2d, KEY_SCREEN },
+ { 0x1e, KEY_ZOOM },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x0f, KEY_VOLUMEDOWN },
+ { 0x17, KEY_CHANNELUP },
+ { 0x1c, KEY_CHANNELDOWN },
+ { 0x25, KEY_INFO },
+
+ { 0x3c, KEY_MUTE },
+
+ { 0x3d, KEY_LEFT },
+ { 0x3b, KEY_RIGHT },
+
+ { 0x3f, KEY_UP },
+ { 0x3e, KEY_DOWN },
+ { 0x1a, KEY_ENTER },
+
+ { 0x1d, KEY_MENU },
+ { 0x19, KEY_AGAIN },
+ { 0x16, KEY_PREVIOUSSONG },
+ { 0x13, KEY_NEXTSONG },
+ { 0x15, KEY_PAUSE },
+ { 0x0e, KEY_REWIND },
+ { 0x0d, KEY_PLAY },
+ { 0x0b, KEY_STOP },
+ { 0x07, KEY_FORWARD },
+ { 0x27, KEY_RECORD },
+ { 0x26, KEY_TUNER },
+ { 0x29, KEY_TEXT },
+ { 0x2a, KEY_MEDIA },
+ { 0x18, KEY_EPG },
+};
+
+static struct rc_keymap pinnacle_grey_map = {
+ .map = {
+ .scan = pinnacle_grey,
+ .size = ARRAY_SIZE(pinnacle_grey),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_GREY,
+ }
+};
+
+static int __init init_rc_map_pinnacle_grey(void)
+{
+ return ir_register_map(&pinnacle_grey_map);
+}
+
+static void __exit exit_rc_map_pinnacle_grey(void)
+{
+ ir_unregister_map(&pinnacle_grey_map);
+}
+
+module_init(init_rc_map_pinnacle_grey)
+module_exit(exit_rc_map_pinnacle_grey)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c
new file mode 100644
index 000000000000..835bf4ef8de7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c
@@ -0,0 +1,73 @@
+/* pinnacle-pctv-hd.h - Keytable for pinnacle_pctv_hd Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Pinnacle PCTV HD 800i mini remote */
+
+static struct ir_scancode pinnacle_pctv_hd[] = {
+
+ { 0x0f, KEY_1 },
+ { 0x15, KEY_2 },
+ { 0x10, KEY_3 },
+ { 0x18, KEY_4 },
+ { 0x1b, KEY_5 },
+ { 0x1e, KEY_6 },
+ { 0x11, KEY_7 },
+ { 0x21, KEY_8 },
+ { 0x12, KEY_9 },
+ { 0x27, KEY_0 },
+
+ { 0x24, KEY_ZOOM },
+ { 0x2a, KEY_SUBTITLE },
+
+ { 0x00, KEY_MUTE },
+ { 0x01, KEY_ENTER }, /* Pinnacle Logo */
+ { 0x39, KEY_POWER },
+
+ { 0x03, KEY_VOLUMEUP },
+ { 0x09, KEY_VOLUMEDOWN },
+ { 0x06, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+
+ { 0x2d, KEY_REWIND },
+ { 0x30, KEY_PLAYPAUSE },
+ { 0x33, KEY_FASTFORWARD },
+ { 0x3c, KEY_STOP },
+ { 0x36, KEY_RECORD },
+ { 0x3f, KEY_EPG }, /* Labeled "?" */
+};
+
+static struct rc_keymap pinnacle_pctv_hd_map = {
+ .map = {
+ .scan = pinnacle_pctv_hd,
+ .size = ARRAY_SIZE(pinnacle_pctv_hd),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PINNACLE_PCTV_HD,
+ }
+};
+
+static int __init init_rc_map_pinnacle_pctv_hd(void)
+{
+ return ir_register_map(&pinnacle_pctv_hd_map);
+}
+
+static void __exit exit_rc_map_pinnacle_pctv_hd(void)
+{
+ ir_unregister_map(&pinnacle_pctv_hd_map);
+}
+
+module_init(init_rc_map_pinnacle_pctv_hd)
+module_exit(exit_rc_map_pinnacle_pctv_hd)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview-mk12.c b/drivers/media/IR/keymaps/rc-pixelview-mk12.c
new file mode 100644
index 000000000000..5a735d569a8b
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview-mk12.c
@@ -0,0 +1,83 @@
+/* rc-pixelview-mk12.h - Keytable for pixelview Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Keytable for MK-F12 IR remote provided together with Pixelview
+ * Ultra Pro Remote Controller. Uses NEC extended format.
+ */
+static struct ir_scancode pixelview_mk12[] = {
+ { 0x866b03, KEY_TUNER }, /* Timeshift */
+ { 0x866b1e, KEY_POWER2 }, /* power */
+
+ { 0x866b01, KEY_1 },
+ { 0x866b0b, KEY_2 },
+ { 0x866b1b, KEY_3 },
+ { 0x866b05, KEY_4 },
+ { 0x866b09, KEY_5 },
+ { 0x866b15, KEY_6 },
+ { 0x866b06, KEY_7 },
+ { 0x866b0a, KEY_8 },
+ { 0x866b12, KEY_9 },
+ { 0x866b02, KEY_0 },
+
+ { 0x866b13, KEY_AGAIN }, /* loop */
+ { 0x866b10, KEY_DIGITS }, /* +100 */
+
+ { 0x866b00, KEY_MEDIA }, /* source */
+ { 0x866b18, KEY_MUTE }, /* mute */
+ { 0x866b19, KEY_CAMERA }, /* snapshot */
+ { 0x866b1a, KEY_SEARCH }, /* scan */
+
+ { 0x866b16, KEY_CHANNELUP }, /* chn + */
+ { 0x866b14, KEY_CHANNELDOWN }, /* chn - */
+ { 0x866b1f, KEY_VOLUMEUP }, /* vol + */
+ { 0x866b17, KEY_VOLUMEDOWN }, /* vol - */
+ { 0x866b1c, KEY_ZOOM }, /* zoom */
+
+ { 0x866b04, KEY_REWIND },
+ { 0x866b0e, KEY_RECORD },
+ { 0x866b0c, KEY_FORWARD },
+
+ { 0x866b1d, KEY_STOP },
+ { 0x866b08, KEY_PLAY },
+ { 0x866b0f, KEY_PAUSE },
+
+ { 0x866b0d, KEY_TV },
+ { 0x866b07, KEY_RADIO }, /* FM */
+};
+
+static struct rc_keymap pixelview_map = {
+ .map = {
+ .scan = pixelview_mk12,
+ .size = ARRAY_SIZE(pixelview_mk12),
+ .ir_type = IR_TYPE_NEC,
+ .name = RC_MAP_PIXELVIEW_MK12,
+ }
+};
+
+static int __init init_rc_map_pixelview(void)
+{
+ return ir_register_map(&pixelview_map);
+}
+
+static void __exit exit_rc_map_pixelview(void)
+{
+ ir_unregister_map(&pixelview_map);
+}
+
+module_init(init_rc_map_pixelview)
+module_exit(exit_rc_map_pixelview)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview-new.c b/drivers/media/IR/keymaps/rc-pixelview-new.c
new file mode 100644
index 000000000000..7bbbbf5735e6
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview-new.c
@@ -0,0 +1,83 @@
+/* pixelview-new.h - Keytable for pixelview_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ Mauro Carvalho Chehab <mchehab@infradead.org>
+ present on PV MPEG 8000GT
+ */
+
+static struct ir_scancode pixelview_new[] = {
+ { 0x3c, KEY_TIME }, /* Timeshift */
+ { 0x12, KEY_POWER },
+
+ { 0x3d, KEY_1 },
+ { 0x38, KEY_2 },
+ { 0x18, KEY_3 },
+ { 0x35, KEY_4 },
+ { 0x39, KEY_5 },
+ { 0x15, KEY_6 },
+ { 0x36, KEY_7 },
+ { 0x3a, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x3e, KEY_0 },
+
+ { 0x1c, KEY_AGAIN }, /* LOOP */
+ { 0x3f, KEY_MEDIA }, /* Source */
+ { 0x1f, KEY_LAST }, /* +100 */
+ { 0x1b, KEY_MUTE },
+
+ { 0x17, KEY_CHANNELDOWN },
+ { 0x16, KEY_CHANNELUP },
+ { 0x10, KEY_VOLUMEUP },
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x13, KEY_ZOOM },
+
+ { 0x19, KEY_CAMERA }, /* SNAPSHOT */
+ { 0x1a, KEY_SEARCH }, /* scan */
+
+ { 0x37, KEY_REWIND }, /* << */
+ { 0x32, KEY_RECORD }, /* o (red) */
+ { 0x33, KEY_FORWARD }, /* >> */
+ { 0x11, KEY_STOP }, /* square */
+ { 0x3b, KEY_PLAY }, /* > */
+ { 0x30, KEY_PLAYPAUSE }, /* || */
+
+ { 0x31, KEY_TV },
+ { 0x34, KEY_RADIO },
+};
+
+static struct rc_keymap pixelview_new_map = {
+ .map = {
+ .scan = pixelview_new,
+ .size = ARRAY_SIZE(pixelview_new),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PIXELVIEW_NEW,
+ }
+};
+
+static int __init init_rc_map_pixelview_new(void)
+{
+ return ir_register_map(&pixelview_new_map);
+}
+
+static void __exit exit_rc_map_pixelview_new(void)
+{
+ ir_unregister_map(&pixelview_new_map);
+}
+
+module_init(init_rc_map_pixelview_new)
+module_exit(exit_rc_map_pixelview_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pixelview.c b/drivers/media/IR/keymaps/rc-pixelview.c
new file mode 100644
index 000000000000..82ff12e182a0
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pixelview.c
@@ -0,0 +1,82 @@
+/* pixelview.h - Keytable for pixelview Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode pixelview[] = {
+
+ { 0x1e, KEY_POWER }, /* power */
+ { 0x07, KEY_MEDIA }, /* source */
+ { 0x1c, KEY_SEARCH }, /* scan */
+
+
+ { 0x03, KEY_TUNER }, /* TV/FM */
+
+ { 0x00, KEY_RECORD },
+ { 0x08, KEY_STOP },
+ { 0x11, KEY_PLAY },
+
+ { 0x1a, KEY_PLAYPAUSE }, /* freeze */
+ { 0x19, KEY_ZOOM }, /* zoom */
+ { 0x0f, KEY_TEXT }, /* min */
+
+ { 0x01, KEY_1 },
+ { 0x0b, KEY_2 },
+ { 0x1b, KEY_3 },
+ { 0x05, KEY_4 },
+ { 0x09, KEY_5 },
+ { 0x15, KEY_6 },
+ { 0x06, KEY_7 },
+ { 0x0a, KEY_8 },
+ { 0x12, KEY_9 },
+ { 0x02, KEY_0 },
+ { 0x10, KEY_LAST }, /* +100 */
+ { 0x13, KEY_LIST }, /* recall */
+
+ { 0x1f, KEY_CHANNELUP }, /* chn down */
+ { 0x17, KEY_CHANNELDOWN }, /* chn up */
+ { 0x16, KEY_VOLUMEUP }, /* vol down */
+ { 0x14, KEY_VOLUMEDOWN }, /* vol up */
+
+ { 0x04, KEY_KPMINUS }, /* <<< */
+ { 0x0e, KEY_SETUP }, /* function */
+ { 0x0c, KEY_KPPLUS }, /* >>> */
+
+ { 0x0d, KEY_GOTO }, /* mts */
+ { 0x1d, KEY_REFRESH }, /* reset */
+ { 0x18, KEY_MUTE }, /* mute/unmute */
+};
+
+static struct rc_keymap pixelview_map = {
+ .map = {
+ .scan = pixelview,
+ .size = ARRAY_SIZE(pixelview),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PIXELVIEW,
+ }
+};
+
+static int __init init_rc_map_pixelview(void)
+{
+ return ir_register_map(&pixelview_map);
+}
+
+static void __exit exit_rc_map_pixelview(void)
+{
+ ir_unregister_map(&pixelview_map);
+}
+
+module_init(init_rc_map_pixelview)
+module_exit(exit_rc_map_pixelview)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-powercolor-real-angel.c b/drivers/media/IR/keymaps/rc-powercolor-real-angel.c
new file mode 100644
index 000000000000..7cef8190a224
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-powercolor-real-angel.c
@@ -0,0 +1,81 @@
+/* powercolor-real-angel.h - Keytable for powercolor_real_angel Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Remote control for Powercolor Real Angel 330
+ * Daniel Fraga <fragabr@gmail.com>
+ */
+
+static struct ir_scancode powercolor_real_angel[] = {
+ { 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
+ { 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+ { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */
+ { 0x29, KEY_PREVIOUS }, /* previous channel */
+ { 0x12, KEY_BRIGHTNESSUP },
+ { 0x13, KEY_BRIGHTNESSDOWN },
+ { 0x2b, KEY_MODE }, /* stereo/mono */
+ { 0x2c, KEY_TEXT }, /* teletext */
+ { 0x20, KEY_CHANNELUP }, /* channel up */
+ { 0x21, KEY_CHANNELDOWN }, /* channel down */
+ { 0x10, KEY_VOLUMEUP }, /* volume up */
+ { 0x11, KEY_VOLUMEDOWN }, /* volume down */
+ { 0x0d, KEY_MUTE },
+ { 0x1f, KEY_RECORD },
+ { 0x17, KEY_PLAY },
+ { 0x16, KEY_PAUSE },
+ { 0x0b, KEY_STOP },
+ { 0x27, KEY_FASTFORWARD },
+ { 0x26, KEY_REWIND },
+ { 0x1e, KEY_SEARCH }, /* autoscan */
+ { 0x0e, KEY_CAMERA }, /* snapshot */
+ { 0x2d, KEY_SETUP },
+ { 0x0f, KEY_SCREEN }, /* full screen */
+ { 0x14, KEY_RADIO }, /* FM radio */
+ { 0x25, KEY_POWER }, /* power */
+};
+
+static struct rc_keymap powercolor_real_angel_map = {
+ .map = {
+ .scan = powercolor_real_angel,
+ .size = ARRAY_SIZE(powercolor_real_angel),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_POWERCOLOR_REAL_ANGEL,
+ }
+};
+
+static int __init init_rc_map_powercolor_real_angel(void)
+{
+ return ir_register_map(&powercolor_real_angel_map);
+}
+
+static void __exit exit_rc_map_powercolor_real_angel(void)
+{
+ ir_unregister_map(&powercolor_real_angel_map);
+}
+
+module_init(init_rc_map_powercolor_real_angel)
+module_exit(exit_rc_map_powercolor_real_angel)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-proteus-2309.c b/drivers/media/IR/keymaps/rc-proteus-2309.c
new file mode 100644
index 000000000000..22e92d39dee5
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-proteus-2309.c
@@ -0,0 +1,69 @@
+/* proteus-2309.h - Keytable for proteus_2309 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Michal Majchrowicz <mmajchrowicz@gmail.com> */
+
+static struct ir_scancode proteus_2309[] = {
+ /* numeric */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x5c, KEY_POWER }, /* power */
+ { 0x20, KEY_ZOOM }, /* full screen */
+ { 0x0f, KEY_BACKSPACE }, /* recall */
+ { 0x1b, KEY_ENTER }, /* mute */
+ { 0x41, KEY_RECORD }, /* record */
+ { 0x43, KEY_STOP }, /* stop */
+ { 0x16, KEY_S },
+ { 0x1a, KEY_POWER2 }, /* off */
+ { 0x2e, KEY_RED },
+ { 0x1f, KEY_CHANNELDOWN }, /* channel - */
+ { 0x1c, KEY_CHANNELUP }, /* channel + */
+ { 0x10, KEY_VOLUMEDOWN }, /* volume - */
+ { 0x1e, KEY_VOLUMEUP }, /* volume + */
+ { 0x14, KEY_F1 },
+};
+
+static struct rc_keymap proteus_2309_map = {
+ .map = {
+ .scan = proteus_2309,
+ .size = ARRAY_SIZE(proteus_2309),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PROTEUS_2309,
+ }
+};
+
+static int __init init_rc_map_proteus_2309(void)
+{
+ return ir_register_map(&proteus_2309_map);
+}
+
+static void __exit exit_rc_map_proteus_2309(void)
+{
+ ir_unregister_map(&proteus_2309_map);
+}
+
+module_init(init_rc_map_proteus_2309)
+module_exit(exit_rc_map_proteus_2309)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-purpletv.c b/drivers/media/IR/keymaps/rc-purpletv.c
new file mode 100644
index 000000000000..4e20fc2269f7
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-purpletv.c
@@ -0,0 +1,81 @@
+/* purpletv.h - Keytable for purpletv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode purpletv[] = {
+ { 0x03, KEY_POWER },
+ { 0x6f, KEY_MUTE },
+ { 0x10, KEY_BACKSPACE }, /* Recall */
+
+ { 0x11, KEY_0 },
+ { 0x04, KEY_1 },
+ { 0x05, KEY_2 },
+ { 0x06, KEY_3 },
+ { 0x08, KEY_4 },
+ { 0x09, KEY_5 },
+ { 0x0a, KEY_6 },
+ { 0x0c, KEY_7 },
+ { 0x0d, KEY_8 },
+ { 0x0e, KEY_9 },
+ { 0x12, KEY_DOT }, /* 100+ */
+
+ { 0x07, KEY_VOLUMEUP },
+ { 0x0b, KEY_VOLUMEDOWN },
+ { 0x1a, KEY_KPPLUS },
+ { 0x18, KEY_KPMINUS },
+ { 0x15, KEY_UP },
+ { 0x1d, KEY_DOWN },
+ { 0x0f, KEY_CHANNELUP },
+ { 0x13, KEY_CHANNELDOWN },
+ { 0x48, KEY_ZOOM },
+
+ { 0x1b, KEY_VIDEO }, /* Video source */
+ { 0x1f, KEY_CAMERA }, /* Snapshot */
+ { 0x49, KEY_LANGUAGE }, /* MTS Select */
+ { 0x19, KEY_SEARCH }, /* Auto Scan */
+
+ { 0x4b, KEY_RECORD },
+ { 0x46, KEY_PLAY },
+ { 0x45, KEY_PAUSE }, /* Pause */
+ { 0x44, KEY_STOP },
+ { 0x43, KEY_TIME }, /* Time Shift */
+ { 0x17, KEY_CHANNEL }, /* SURF CH */
+ { 0x40, KEY_FORWARD }, /* Forward ? */
+ { 0x42, KEY_REWIND }, /* Backward ? */
+
+};
+
+static struct rc_keymap purpletv_map = {
+ .map = {
+ .scan = purpletv,
+ .size = ARRAY_SIZE(purpletv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PURPLETV,
+ }
+};
+
+static int __init init_rc_map_purpletv(void)
+{
+ return ir_register_map(&purpletv_map);
+}
+
+static void __exit exit_rc_map_purpletv(void)
+{
+ ir_unregister_map(&purpletv_map);
+}
+
+module_init(init_rc_map_purpletv)
+module_exit(exit_rc_map_purpletv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-pv951.c b/drivers/media/IR/keymaps/rc-pv951.c
new file mode 100644
index 000000000000..36679e706cf3
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-pv951.c
@@ -0,0 +1,78 @@
+/* pv951.h - Keytable for pv951 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Mark Phalan <phalanm@o2.ie> */
+
+static struct ir_scancode pv951[] = {
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x12, KEY_POWER },
+ { 0x10, KEY_MUTE },
+ { 0x1f, KEY_VOLUMEDOWN },
+ { 0x1b, KEY_VOLUMEUP },
+ { 0x1a, KEY_CHANNELUP },
+ { 0x1e, KEY_CHANNELDOWN },
+ { 0x0e, KEY_PAGEUP },
+ { 0x1d, KEY_PAGEDOWN },
+ { 0x13, KEY_SOUND },
+
+ { 0x18, KEY_KPPLUSMINUS }, /* CH +/- */
+ { 0x16, KEY_SUBTITLE }, /* CC */
+ { 0x0d, KEY_TEXT }, /* TTX */
+ { 0x0b, KEY_TV }, /* AIR/CBL */
+ { 0x11, KEY_PC }, /* PC/TV */
+ { 0x17, KEY_OK }, /* CH RTN */
+ { 0x19, KEY_MODE }, /* FUNC */
+ { 0x0c, KEY_SEARCH }, /* AUTOSCAN */
+
+ /* Not sure what to do with these ones! */
+ { 0x0f, KEY_SELECT }, /* SOURCE */
+ { 0x0a, KEY_KPPLUS }, /* +100 */
+ { 0x14, KEY_EQUAL }, /* SYNC */
+ { 0x1c, KEY_MEDIA }, /* PC/TV */
+};
+
+static struct rc_keymap pv951_map = {
+ .map = {
+ .scan = pv951,
+ .size = ARRAY_SIZE(pv951),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_PV951,
+ }
+};
+
+static int __init init_rc_map_pv951(void)
+{
+ return ir_register_map(&pv951_map);
+}
+
+static void __exit exit_rc_map_pv951(void)
+{
+ ir_unregister_map(&pv951_map);
+}
+
+module_init(init_rc_map_pv951)
+module_exit(exit_rc_map_pv951)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c b/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c
new file mode 100644
index 000000000000..cc6b8f548747
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c
@@ -0,0 +1,103 @@
+/* rc5-hauppauge-new.h - Keytable for rc5_hauppauge_new Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/*
+ * Hauppauge:the newer, gray remotes (seems there are multiple
+ * slightly different versions), shipped with cx88+ivtv cards.
+ *
+ * This table contains the complete RC5 code, instead of just the data part
+ */
+
+static struct ir_scancode rc5_hauppauge_new[] = {
+ /* Keys 0 to 9 */
+ { 0x1e00, KEY_0 },
+ { 0x1e01, KEY_1 },
+ { 0x1e02, KEY_2 },
+ { 0x1e03, KEY_3 },
+ { 0x1e04, KEY_4 },
+ { 0x1e05, KEY_5 },
+ { 0x1e06, KEY_6 },
+ { 0x1e07, KEY_7 },
+ { 0x1e08, KEY_8 },
+ { 0x1e09, KEY_9 },
+
+ { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
+ { 0x1e0b, KEY_RED }, /* red button */
+ { 0x1e0c, KEY_RADIO },
+ { 0x1e0d, KEY_MENU },
+ { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
+ { 0x1e0f, KEY_MUTE },
+ { 0x1e10, KEY_VOLUMEUP },
+ { 0x1e11, KEY_VOLUMEDOWN },
+ { 0x1e12, KEY_PREVIOUS }, /* previous channel */
+ { 0x1e14, KEY_UP },
+ { 0x1e15, KEY_DOWN },
+ { 0x1e16, KEY_LEFT },
+ { 0x1e17, KEY_RIGHT },
+ { 0x1e18, KEY_VIDEO }, /* Videos */
+ { 0x1e19, KEY_AUDIO }, /* Music */
+ /* 0x1e1a: Pictures - presume this means
+ "Multimedia Home Platform" -
+ no "PICTURES" key in input.h
+ */
+ { 0x1e1a, KEY_MHP },
+
+ { 0x1e1b, KEY_EPG }, /* Guide */
+ { 0x1e1c, KEY_TV },
+ { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
+ { 0x1e1f, KEY_EXIT }, /* back/exit */
+ { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
+ { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x1e25, KEY_ENTER }, /* OK */
+ { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
+ { 0x1e29, KEY_BLUE }, /* blue key */
+ { 0x1e2e, KEY_GREEN }, /* green button */
+ { 0x1e30, KEY_PAUSE }, /* pause */
+ { 0x1e32, KEY_REWIND }, /* backward << */
+ { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1e35, KEY_PLAY },
+ { 0x1e36, KEY_STOP },
+ { 0x1e37, KEY_RECORD }, /* recording */
+ { 0x1e38, KEY_YELLOW }, /* yellow key */
+ { 0x1e3b, KEY_SELECT }, /* top right button */
+ { 0x1e3c, KEY_ZOOM }, /* full */
+ { 0x1e3d, KEY_POWER }, /* system power (green button) */
+};
+
+static struct rc_keymap rc5_hauppauge_new_map = {
+ .map = {
+ .scan = rc5_hauppauge_new,
+ .size = ARRAY_SIZE(rc5_hauppauge_new),
+ .ir_type = IR_TYPE_RC5,
+ .name = RC_MAP_RC5_HAUPPAUGE_NEW,
+ }
+};
+
+static int __init init_rc_map_rc5_hauppauge_new(void)
+{
+ return ir_register_map(&rc5_hauppauge_new_map);
+}
+
+static void __exit exit_rc_map_rc5_hauppauge_new(void)
+{
+ ir_unregister_map(&rc5_hauppauge_new_map);
+}
+
+module_init(init_rc_map_rc5_hauppauge_new)
+module_exit(exit_rc_map_rc5_hauppauge_new)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-rc5-tv.c b/drivers/media/IR/keymaps/rc-rc5-tv.c
new file mode 100644
index 000000000000..73cce2f8ddfb
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-rc5-tv.c
@@ -0,0 +1,81 @@
+/* rc5-tv.h - Keytable for rc5_tv Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* generic RC5 keytable */
+/* see http://users.pandora.be/nenya/electronics/rc5/codes00.htm */
+/* used by old (black) Hauppauge remotes */
+
+static struct ir_scancode rc5_tv[] = {
+ /* Keys 0 to 9 */
+ { 0x00, KEY_0 },
+ { 0x01, KEY_1 },
+ { 0x02, KEY_2 },
+ { 0x03, KEY_3 },
+ { 0x04, KEY_4 },
+ { 0x05, KEY_5 },
+ { 0x06, KEY_6 },
+ { 0x07, KEY_7 },
+ { 0x08, KEY_8 },
+ { 0x09, KEY_9 },
+
+ { 0x0b, KEY_CHANNEL }, /* channel / program (japan: 11) */
+ { 0x0c, KEY_POWER }, /* standby */
+ { 0x0d, KEY_MUTE }, /* mute / demute */
+ { 0x0f, KEY_TV }, /* display */
+ { 0x10, KEY_VOLUMEUP },
+ { 0x11, KEY_VOLUMEDOWN },
+ { 0x12, KEY_BRIGHTNESSUP },
+ { 0x13, KEY_BRIGHTNESSDOWN },
+ { 0x1e, KEY_SEARCH }, /* search + */
+ { 0x20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x22, KEY_CHANNEL }, /* alt / channel */
+ { 0x23, KEY_LANGUAGE }, /* 1st / 2nd language */
+ { 0x26, KEY_SLEEP }, /* sleeptimer */
+ { 0x2e, KEY_MENU }, /* 2nd controls (USA: menu) */
+ { 0x30, KEY_PAUSE },
+ { 0x32, KEY_REWIND },
+ { 0x33, KEY_GOTO },
+ { 0x35, KEY_PLAY },
+ { 0x36, KEY_STOP },
+ { 0x37, KEY_RECORD }, /* recording */
+ { 0x3c, KEY_TEXT }, /* teletext submode (Japan: 12) */
+ { 0x3d, KEY_SUSPEND }, /* system standby */
+
+};
+
+static struct rc_keymap rc5_tv_map = {
+ .map = {
+ .scan = rc5_tv,
+ .size = ARRAY_SIZE(rc5_tv),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_RC5_TV,
+ }
+};
+
+static int __init init_rc_map_rc5_tv(void)
+{
+ return ir_register_map(&rc5_tv_map);
+}
+
+static void __exit exit_rc_map_rc5_tv(void)
+{
+ ir_unregister_map(&rc5_tv_map);
+}
+
+module_init(init_rc_map_rc5_tv)
+module_exit(exit_rc_map_rc5_tv)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c b/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c
new file mode 100644
index 000000000000..ab1a6d2baf72
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c
@@ -0,0 +1,78 @@
+/* real-audio-220-32-keys.h - Keytable for real_audio_220_32_keys Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Zogis Real Audio 220 - 32 keys IR */
+
+static struct ir_scancode real_audio_220_32_keys[] = {
+ { 0x1c, KEY_RADIO},
+ { 0x12, KEY_POWER2},
+
+ { 0x01, KEY_1},
+ { 0x02, KEY_2},
+ { 0x03, KEY_3},
+ { 0x04, KEY_4},
+ { 0x05, KEY_5},
+ { 0x06, KEY_6},
+ { 0x07, KEY_7},
+ { 0x08, KEY_8},
+ { 0x09, KEY_9},
+ { 0x00, KEY_0},
+
+ { 0x0c, KEY_VOLUMEUP},
+ { 0x18, KEY_VOLUMEDOWN},
+ { 0x0b, KEY_CHANNELUP},
+ { 0x15, KEY_CHANNELDOWN},
+ { 0x16, KEY_ENTER},
+
+ { 0x11, KEY_LIST}, /* Source */
+ { 0x0d, KEY_AUDIO}, /* stereo */
+
+ { 0x0f, KEY_PREVIOUS}, /* Prev */
+ { 0x1b, KEY_TIME}, /* Timeshift */
+ { 0x1a, KEY_NEXT}, /* Next */
+
+ { 0x0e, KEY_STOP},
+ { 0x1f, KEY_PLAY},
+ { 0x1e, KEY_PLAYPAUSE}, /* Pause */
+
+ { 0x1d, KEY_RECORD},
+ { 0x13, KEY_MUTE},
+ { 0x19, KEY_CAMERA}, /* Snapshot */
+
+};
+
+static struct rc_keymap real_audio_220_32_keys_map = {
+ .map = {
+ .scan = real_audio_220_32_keys,
+ .size = ARRAY_SIZE(real_audio_220_32_keys),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_REAL_AUDIO_220_32_KEYS,
+ }
+};
+
+static int __init init_rc_map_real_audio_220_32_keys(void)
+{
+ return ir_register_map(&real_audio_220_32_keys_map);
+}
+
+static void __exit exit_rc_map_real_audio_220_32_keys(void)
+{
+ ir_unregister_map(&real_audio_220_32_keys_map);
+}
+
+module_init(init_rc_map_real_audio_220_32_keys)
+module_exit(exit_rc_map_real_audio_220_32_keys)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tbs-nec.c b/drivers/media/IR/keymaps/rc-tbs-nec.c
new file mode 100644
index 000000000000..3309631e6f80
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tbs-nec.c
@@ -0,0 +1,73 @@
+/* tbs-nec.h - Keytable for tbs_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode tbs_nec[] = {
+ { 0x04, KEY_POWER2}, /*power*/
+ { 0x14, KEY_MUTE}, /*mute*/
+ { 0x07, KEY_1},
+ { 0x06, KEY_2},
+ { 0x05, KEY_3},
+ { 0x0b, KEY_4},
+ { 0x0a, KEY_5},
+ { 0x09, KEY_6},
+ { 0x0f, KEY_7},
+ { 0x0e, KEY_8},
+ { 0x0d, KEY_9},
+ { 0x12, KEY_0},
+ { 0x16, KEY_CHANNELUP}, /*ch+*/
+ { 0x11, KEY_CHANNELDOWN},/*ch-*/
+ { 0x13, KEY_VOLUMEUP}, /*vol+*/
+ { 0x0c, KEY_VOLUMEDOWN},/*vol-*/
+ { 0x03, KEY_RECORD}, /*rec*/
+ { 0x18, KEY_PAUSE}, /*pause*/
+ { 0x19, KEY_OK}, /*ok*/
+ { 0x1a, KEY_CAMERA}, /* snapshot */
+ { 0x01, KEY_UP},
+ { 0x10, KEY_LEFT},
+ { 0x02, KEY_RIGHT},
+ { 0x08, KEY_DOWN},
+ { 0x15, KEY_FAVORITES},
+ { 0x17, KEY_SUBTITLE},
+ { 0x1d, KEY_ZOOM},
+ { 0x1f, KEY_EXIT},
+ { 0x1e, KEY_MENU},
+ { 0x1c, KEY_EPG},
+ { 0x00, KEY_PREVIOUS},
+ { 0x1b, KEY_MODE},
+};
+
+static struct rc_keymap tbs_nec_map = {
+ .map = {
+ .scan = tbs_nec,
+ .size = ARRAY_SIZE(tbs_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TBS_NEC,
+ }
+};
+
+static int __init init_rc_map_tbs_nec(void)
+{
+ return ir_register_map(&tbs_nec_map);
+}
+
+static void __exit exit_rc_map_tbs_nec(void)
+{
+ ir_unregister_map(&tbs_nec_map);
+}
+
+module_init(init_rc_map_tbs_nec)
+module_exit(exit_rc_map_tbs_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c b/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c
new file mode 100644
index 000000000000..5326a0b444c1
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c
@@ -0,0 +1,92 @@
+/* terratec-cinergy-xs.h - Keytable for terratec_cinergy_xs Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Terratec Cinergy Hybrid T USB XS
+ Devin Heitmueller <dheitmueller@linuxtv.org>
+ */
+
+static struct ir_scancode terratec_cinergy_xs[] = {
+ { 0x41, KEY_HOME},
+ { 0x01, KEY_POWER},
+ { 0x42, KEY_MENU},
+ { 0x02, KEY_1},
+ { 0x03, KEY_2},
+ { 0x04, KEY_3},
+ { 0x43, KEY_SUBTITLE},
+ { 0x05, KEY_4},
+ { 0x06, KEY_5},
+ { 0x07, KEY_6},
+ { 0x44, KEY_TEXT},
+ { 0x08, KEY_7},
+ { 0x09, KEY_8},
+ { 0x0a, KEY_9},
+ { 0x45, KEY_DELETE},
+ { 0x0b, KEY_TUNER},
+ { 0x0c, KEY_0},
+ { 0x0d, KEY_MODE},
+ { 0x46, KEY_TV},
+ { 0x47, KEY_DVD},
+ { 0x49, KEY_VIDEO},
+ { 0x4b, KEY_AUX},
+ { 0x10, KEY_UP},
+ { 0x11, KEY_LEFT},
+ { 0x12, KEY_OK},
+ { 0x13, KEY_RIGHT},
+ { 0x14, KEY_DOWN},
+ { 0x0f, KEY_EPG},
+ { 0x16, KEY_INFO},
+ { 0x4d, KEY_BACKSPACE},
+ { 0x1c, KEY_VOLUMEUP},
+ { 0x4c, KEY_PLAY},
+ { 0x1b, KEY_CHANNELUP},
+ { 0x1e, KEY_VOLUMEDOWN},
+ { 0x1d, KEY_MUTE},
+ { 0x1f, KEY_CHANNELDOWN},
+ { 0x17, KEY_RED},
+ { 0x18, KEY_GREEN},
+ { 0x19, KEY_YELLOW},
+ { 0x1a, KEY_BLUE},
+ { 0x58, KEY_RECORD},
+ { 0x48, KEY_STOP},
+ { 0x40, KEY_PAUSE},
+ { 0x54, KEY_LAST},
+ { 0x4e, KEY_REWIND},
+ { 0x4f, KEY_FASTFORWARD},
+ { 0x5c, KEY_NEXT},
+};
+
+static struct rc_keymap terratec_cinergy_xs_map = {
+ .map = {
+ .scan = terratec_cinergy_xs,
+ .size = ARRAY_SIZE(terratec_cinergy_xs),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TERRATEC_CINERGY_XS,
+ }
+};
+
+static int __init init_rc_map_terratec_cinergy_xs(void)
+{
+ return ir_register_map(&terratec_cinergy_xs_map);
+}
+
+static void __exit exit_rc_map_terratec_cinergy_xs(void)
+{
+ ir_unregister_map(&terratec_cinergy_xs_map);
+}
+
+module_init(init_rc_map_terratec_cinergy_xs)
+module_exit(exit_rc_map_terratec_cinergy_xs)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tevii-nec.c b/drivers/media/IR/keymaps/rc-tevii-nec.c
new file mode 100644
index 000000000000..e30d411c07bb
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tevii-nec.c
@@ -0,0 +1,88 @@
+/* tevii-nec.h - Keytable for tevii_nec Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode tevii_nec[] = {
+ { 0x0a, KEY_POWER2},
+ { 0x0c, KEY_MUTE},
+ { 0x11, KEY_1},
+ { 0x12, KEY_2},
+ { 0x13, KEY_3},
+ { 0x14, KEY_4},
+ { 0x15, KEY_5},
+ { 0x16, KEY_6},
+ { 0x17, KEY_7},
+ { 0x18, KEY_8},
+ { 0x19, KEY_9},
+ { 0x10, KEY_0},
+ { 0x1c, KEY_MENU},
+ { 0x0f, KEY_VOLUMEDOWN},
+ { 0x1a, KEY_LAST},
+ { 0x0e, KEY_OPEN},
+ { 0x04, KEY_RECORD},
+ { 0x09, KEY_VOLUMEUP},
+ { 0x08, KEY_CHANNELUP},
+ { 0x07, KEY_PVR},
+ { 0x0b, KEY_TIME},
+ { 0x02, KEY_RIGHT},
+ { 0x03, KEY_LEFT},
+ { 0x00, KEY_UP},
+ { 0x1f, KEY_OK},
+ { 0x01, KEY_DOWN},
+ { 0x05, KEY_TUNER},
+ { 0x06, KEY_CHANNELDOWN},
+ { 0x40, KEY_PLAYPAUSE},
+ { 0x1e, KEY_REWIND},
+ { 0x1b, KEY_FAVORITES},
+ { 0x1d, KEY_BACK},
+ { 0x4d, KEY_FASTFORWARD},
+ { 0x44, KEY_EPG},
+ { 0x4c, KEY_INFO},
+ { 0x41, KEY_AB},
+ { 0x43, KEY_AUDIO},
+ { 0x45, KEY_SUBTITLE},
+ { 0x4a, KEY_LIST},
+ { 0x46, KEY_F1},
+ { 0x47, KEY_F2},
+ { 0x5e, KEY_F3},
+ { 0x5c, KEY_F4},
+ { 0x52, KEY_F5},
+ { 0x5a, KEY_F6},
+ { 0x56, KEY_MODE},
+ { 0x58, KEY_SWITCHVIDEOMODE},
+};
+
+static struct rc_keymap tevii_nec_map = {
+ .map = {
+ .scan = tevii_nec,
+ .size = ARRAY_SIZE(tevii_nec),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TEVII_NEC,
+ }
+};
+
+static int __init init_rc_map_tevii_nec(void)
+{
+ return ir_register_map(&tevii_nec_map);
+}
+
+static void __exit exit_rc_map_tevii_nec(void)
+{
+ ir_unregister_map(&tevii_nec_map);
+}
+
+module_init(init_rc_map_tevii_nec)
+module_exit(exit_rc_map_tevii_nec)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-tt-1500.c b/drivers/media/IR/keymaps/rc-tt-1500.c
new file mode 100644
index 000000000000..bc88de011d5d
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-tt-1500.c
@@ -0,0 +1,82 @@
+/* tt-1500.h - Keytable for tt_1500 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* for the Technotrend 1500 bundled remotes (grey and black): */
+
+static struct ir_scancode tt_1500[] = {
+ { 0x01, KEY_POWER },
+ { 0x02, KEY_SHUFFLE }, /* ? double-arrow key */
+ { 0x03, KEY_1 },
+ { 0x04, KEY_2 },
+ { 0x05, KEY_3 },
+ { 0x06, KEY_4 },
+ { 0x07, KEY_5 },
+ { 0x08, KEY_6 },
+ { 0x09, KEY_7 },
+ { 0x0a, KEY_8 },
+ { 0x0b, KEY_9 },
+ { 0x0c, KEY_0 },
+ { 0x0d, KEY_UP },
+ { 0x0e, KEY_LEFT },
+ { 0x0f, KEY_OK },
+ { 0x10, KEY_RIGHT },
+ { 0x11, KEY_DOWN },
+ { 0x12, KEY_INFO },
+ { 0x13, KEY_EXIT },
+ { 0x14, KEY_RED },
+ { 0x15, KEY_GREEN },
+ { 0x16, KEY_YELLOW },
+ { 0x17, KEY_BLUE },
+ { 0x18, KEY_MUTE },
+ { 0x19, KEY_TEXT },
+ { 0x1a, KEY_MODE }, /* ? TV/Radio */
+ { 0x21, KEY_OPTION },
+ { 0x22, KEY_EPG },
+ { 0x23, KEY_CHANNELUP },
+ { 0x24, KEY_CHANNELDOWN },
+ { 0x25, KEY_VOLUMEUP },
+ { 0x26, KEY_VOLUMEDOWN },
+ { 0x27, KEY_SETUP },
+ { 0x3a, KEY_RECORD }, /* these keys are only in the black remote */
+ { 0x3b, KEY_PLAY },
+ { 0x3c, KEY_STOP },
+ { 0x3d, KEY_REWIND },
+ { 0x3e, KEY_PAUSE },
+ { 0x3f, KEY_FORWARD },
+};
+
+static struct rc_keymap tt_1500_map = {
+ .map = {
+ .scan = tt_1500,
+ .size = ARRAY_SIZE(tt_1500),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_TT_1500,
+ }
+};
+
+static int __init init_rc_map_tt_1500(void)
+{
+ return ir_register_map(&tt_1500_map);
+}
+
+static void __exit exit_rc_map_tt_1500(void)
+{
+ ir_unregister_map(&tt_1500_map);
+}
+
+module_init(init_rc_map_tt_1500)
+module_exit(exit_rc_map_tt_1500)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-videomate-s350.c b/drivers/media/IR/keymaps/rc-videomate-s350.c
new file mode 100644
index 000000000000..4df7fcd1d2fc
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-videomate-s350.c
@@ -0,0 +1,85 @@
+/* videomate-s350.h - Keytable for videomate_s350 Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode videomate_s350[] = {
+ { 0x00, KEY_TV},
+ { 0x01, KEY_DVD},
+ { 0x04, KEY_RECORD},
+ { 0x05, KEY_VIDEO}, /* TV/Video */
+ { 0x07, KEY_STOP},
+ { 0x08, KEY_PLAYPAUSE},
+ { 0x0a, KEY_REWIND},
+ { 0x0f, KEY_FASTFORWARD},
+ { 0x10, KEY_CHANNELUP},
+ { 0x12, KEY_VOLUMEUP},
+ { 0x13, KEY_CHANNELDOWN},
+ { 0x14, KEY_MUTE},
+ { 0x15, KEY_VOLUMEDOWN},
+ { 0x16, KEY_1},
+ { 0x17, KEY_2},
+ { 0x18, KEY_3},
+ { 0x19, KEY_4},
+ { 0x1a, KEY_5},
+ { 0x1b, KEY_6},
+ { 0x1c, KEY_7},
+ { 0x1d, KEY_8},
+ { 0x1e, KEY_9},
+ { 0x1f, KEY_0},
+ { 0x21, KEY_SLEEP},
+ { 0x24, KEY_ZOOM},
+ { 0x25, KEY_LAST}, /* Recall */
+ { 0x26, KEY_SUBTITLE}, /* CC */
+ { 0x27, KEY_LANGUAGE}, /* MTS */
+ { 0x29, KEY_CHANNEL}, /* SURF */
+ { 0x2b, KEY_A},
+ { 0x2c, KEY_B},
+ { 0x2f, KEY_CAMERA}, /* Snapshot */
+ { 0x23, KEY_RADIO},
+ { 0x02, KEY_PREVIOUSSONG},
+ { 0x06, KEY_NEXTSONG},
+ { 0x03, KEY_EPG},
+ { 0x09, KEY_SETUP},
+ { 0x22, KEY_BACKSPACE},
+ { 0x0c, KEY_UP},
+ { 0x0e, KEY_DOWN},
+ { 0x0b, KEY_LEFT},
+ { 0x0d, KEY_RIGHT},
+ { 0x11, KEY_ENTER},
+ { 0x20, KEY_TEXT},
+};
+
+static struct rc_keymap videomate_s350_map = {
+ .map = {
+ .scan = videomate_s350,
+ .size = ARRAY_SIZE(videomate_s350),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_VIDEOMATE_S350,
+ }
+};
+
+static int __init init_rc_map_videomate_s350(void)
+{
+ return ir_register_map(&videomate_s350_map);
+}
+
+static void __exit exit_rc_map_videomate_s350(void)
+{
+ ir_unregister_map(&videomate_s350_map);
+}
+
+module_init(init_rc_map_videomate_s350)
+module_exit(exit_rc_map_videomate_s350)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c b/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c
new file mode 100644
index 000000000000..776b0a638d87
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-videomate-tv-pvr.c
@@ -0,0 +1,87 @@
+/* videomate-tv-pvr.h - Keytable for videomate_tv_pvr Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+static struct ir_scancode videomate_tv_pvr[] = {
+ { 0x14, KEY_MUTE },
+ { 0x24, KEY_ZOOM },
+
+ { 0x01, KEY_DVD },
+ { 0x23, KEY_RADIO },
+ { 0x00, KEY_TV },
+
+ { 0x0a, KEY_REWIND },
+ { 0x08, KEY_PLAYPAUSE },
+ { 0x0f, KEY_FORWARD },
+
+ { 0x02, KEY_PREVIOUS },
+ { 0x07, KEY_STOP },
+ { 0x06, KEY_NEXT },
+
+ { 0x0c, KEY_UP },
+ { 0x0e, KEY_DOWN },
+ { 0x0b, KEY_LEFT },
+ { 0x0d, KEY_RIGHT },
+ { 0x11, KEY_OK },
+
+ { 0x03, KEY_MENU },
+ { 0x09, KEY_SETUP },
+ { 0x05, KEY_VIDEO },
+ { 0x22, KEY_CHANNEL },
+
+ { 0x12, KEY_VOLUMEUP },
+ { 0x15, KEY_VOLUMEDOWN },
+ { 0x10, KEY_CHANNELUP },
+ { 0x13, KEY_CHANNELDOWN },
+
+ { 0x04, KEY_RECORD },
+
+ { 0x16, KEY_1 },
+ { 0x17, KEY_2 },
+ { 0x18, KEY_3 },
+ { 0x19, KEY_4 },
+ { 0x1a, KEY_5 },
+ { 0x1b, KEY_6 },
+ { 0x1c, KEY_7 },
+ { 0x1d, KEY_8 },
+ { 0x1e, KEY_9 },
+ { 0x1f, KEY_0 },
+
+ { 0x20, KEY_LANGUAGE },
+ { 0x21, KEY_SLEEP },
+};
+
+static struct rc_keymap videomate_tv_pvr_map = {
+ .map = {
+ .scan = videomate_tv_pvr,
+ .size = ARRAY_SIZE(videomate_tv_pvr),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_VIDEOMATE_TV_PVR,
+ }
+};
+
+static int __init init_rc_map_videomate_tv_pvr(void)
+{
+ return ir_register_map(&videomate_tv_pvr_map);
+}
+
+static void __exit exit_rc_map_videomate_tv_pvr(void)
+{
+ ir_unregister_map(&videomate_tv_pvr_map);
+}
+
+module_init(init_rc_map_videomate_tv_pvr)
+module_exit(exit_rc_map_videomate_tv_pvr)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c b/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c
new file mode 100644
index 000000000000..9d2d550aaa90
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c
@@ -0,0 +1,82 @@
+/* winfast-usbii-deluxe.h - Keytable for winfast_usbii_deluxe Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Leadtek Winfast TV USB II Deluxe remote
+ Magnus Alm <magnus.alm@gmail.com>
+ */
+
+static struct ir_scancode winfast_usbii_deluxe[] = {
+ { 0x62, KEY_0},
+ { 0x75, KEY_1},
+ { 0x76, KEY_2},
+ { 0x77, KEY_3},
+ { 0x79, KEY_4},
+ { 0x7a, KEY_5},
+ { 0x7b, KEY_6},
+ { 0x7d, KEY_7},
+ { 0x7e, KEY_8},
+ { 0x7f, KEY_9},
+
+ { 0x38, KEY_CAMERA}, /* SNAPSHOT */
+ { 0x37, KEY_RECORD}, /* RECORD */
+ { 0x35, KEY_TIME}, /* TIMESHIFT */
+
+ { 0x74, KEY_VOLUMEUP}, /* VOLUMEUP */
+ { 0x78, KEY_VOLUMEDOWN}, /* VOLUMEDOWN */
+ { 0x64, KEY_MUTE}, /* MUTE */
+
+ { 0x21, KEY_CHANNEL}, /* SURF */
+ { 0x7c, KEY_CHANNELUP}, /* CHANNELUP */
+ { 0x60, KEY_CHANNELDOWN}, /* CHANNELDOWN */
+ { 0x61, KEY_LAST}, /* LAST CHANNEL (RECALL) */
+
+ { 0x72, KEY_VIDEO}, /* INPUT MODES (TV/FM) */
+
+ { 0x70, KEY_POWER2}, /* TV ON/OFF */
+
+ { 0x39, KEY_CYCLEWINDOWS}, /* MINIMIZE (BOSS) */
+ { 0x3a, KEY_NEW}, /* PIP */
+ { 0x73, KEY_ZOOM}, /* FULLSECREEN */
+
+ { 0x66, KEY_INFO}, /* OSD (DISPLAY) */
+
+ { 0x31, KEY_DOT}, /* '.' */
+ { 0x63, KEY_ENTER}, /* ENTER */
+
+};
+
+static struct rc_keymap winfast_usbii_deluxe_map = {
+ .map = {
+ .scan = winfast_usbii_deluxe,
+ .size = ARRAY_SIZE(winfast_usbii_deluxe),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_WINFAST_USBII_DELUXE,
+ }
+};
+
+static int __init init_rc_map_winfast_usbii_deluxe(void)
+{
+ return ir_register_map(&winfast_usbii_deluxe_map);
+}
+
+static void __exit exit_rc_map_winfast_usbii_deluxe(void)
+{
+ ir_unregister_map(&winfast_usbii_deluxe_map);
+}
+
+module_init(init_rc_map_winfast_usbii_deluxe)
+module_exit(exit_rc_map_winfast_usbii_deluxe)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/keymaps/rc-winfast.c b/drivers/media/IR/keymaps/rc-winfast.c
new file mode 100644
index 000000000000..0e90a3bd9499
--- /dev/null
+++ b/drivers/media/IR/keymaps/rc-winfast.c
@@ -0,0 +1,102 @@
+/* winfast.h - Keytable for winfast Remote Controller
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+
+/* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */
+
+static struct ir_scancode winfast[] = {
+ /* Keys 0 to 9 */
+ { 0x12, KEY_0 },
+ { 0x05, KEY_1 },
+ { 0x06, KEY_2 },
+ { 0x07, KEY_3 },
+ { 0x09, KEY_4 },
+ { 0x0a, KEY_5 },
+ { 0x0b, KEY_6 },
+ { 0x0d, KEY_7 },
+ { 0x0e, KEY_8 },
+ { 0x0f, KEY_9 },
+
+ { 0x00, KEY_POWER },
+ { 0x1b, KEY_AUDIO }, /* Audio Source */
+ { 0x02, KEY_TUNER }, /* TV/FM, not on Y0400052 */
+ { 0x1e, KEY_VIDEO }, /* Video Source */
+ { 0x16, KEY_INFO }, /* Display information */
+ { 0x04, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0c, KEY_CHANNELUP },
+ { 0x10, KEY_CHANNELDOWN },
+ { 0x03, KEY_ZOOM }, /* fullscreen */
+ { 0x1f, KEY_TEXT }, /* closed caption/teletext */
+ { 0x20, KEY_SLEEP },
+ { 0x29, KEY_CLEAR }, /* boss key */
+ { 0x14, KEY_MUTE },
+ { 0x2b, KEY_RED },
+ { 0x2c, KEY_GREEN },
+ { 0x2d, KEY_YELLOW },
+ { 0x2e, KEY_BLUE },
+ { 0x18, KEY_KPPLUS }, /* fine tune + , not on Y040052 */
+ { 0x19, KEY_KPMINUS }, /* fine tune - , not on Y040052 */
+ { 0x2a, KEY_MEDIA }, /* PIP (Picture in picture */
+ { 0x21, KEY_DOT },
+ { 0x13, KEY_ENTER },
+ { 0x11, KEY_LAST }, /* Recall (last channel */
+ { 0x22, KEY_PREVIOUS },
+ { 0x23, KEY_PLAYPAUSE },
+ { 0x24, KEY_NEXT },
+ { 0x25, KEY_TIME }, /* Time Shifting */
+ { 0x26, KEY_STOP },
+ { 0x27, KEY_RECORD },
+ { 0x28, KEY_SAVE }, /* Screenshot */
+ { 0x2f, KEY_MENU },
+ { 0x30, KEY_CANCEL },
+ { 0x31, KEY_CHANNEL }, /* Channel Surf */
+ { 0x32, KEY_SUBTITLE },
+ { 0x33, KEY_LANGUAGE },
+ { 0x34, KEY_REWIND },
+ { 0x35, KEY_FASTFORWARD },
+ { 0x36, KEY_TV },
+ { 0x37, KEY_RADIO }, /* FM */
+ { 0x38, KEY_DVD },
+
+ { 0x1a, KEY_MODE}, /* change to MCE mode on Y04G0051 */
+ { 0x3e, KEY_F21 }, /* MCE +VOL, on Y04G0033 */
+ { 0x3a, KEY_F22 }, /* MCE -VOL, on Y04G0033 */
+ { 0x3b, KEY_F23 }, /* MCE +CH, on Y04G0033 */
+ { 0x3f, KEY_F24 } /* MCE -CH, on Y04G0033 */
+};
+
+static struct rc_keymap winfast_map = {
+ .map = {
+ .scan = winfast,
+ .size = ARRAY_SIZE(winfast),
+ .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */
+ .name = RC_MAP_WINFAST,
+ }
+};
+
+static int __init init_rc_map_winfast(void)
+{
+ return ir_register_map(&winfast_map);
+}
+
+static void __exit exit_rc_map_winfast(void)
+{
+ ir_unregister_map(&winfast_map);
+}
+
+module_init(init_rc_map_winfast)
+module_exit(exit_rc_map_winfast)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/IR/rc-map.c b/drivers/media/IR/rc-map.c
new file mode 100644
index 000000000000..46a8f1524b5b
--- /dev/null
+++ b/drivers/media/IR/rc-map.c
@@ -0,0 +1,84 @@
+/* ir-raw-event.c - handle IR Pulse/Space event
+ *
+ * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/ir-core.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+/* Used to handle IR raw handler extensions */
+static LIST_HEAD(rc_map_list);
+static DEFINE_SPINLOCK(rc_map_lock);
+
+static struct rc_keymap *seek_rc_map(const char *name)
+{
+ struct rc_keymap *map = NULL;
+
+ spin_lock(&rc_map_lock);
+ list_for_each_entry(map, &rc_map_list, list) {
+ if (!strcmp(name, map->map.name)) {
+ spin_unlock(&rc_map_lock);
+ return map;
+ }
+ }
+ spin_unlock(&rc_map_lock);
+
+ return NULL;
+}
+
+struct ir_scancode_table *get_rc_map(const char *name)
+{
+
+ struct rc_keymap *map;
+
+ map = seek_rc_map(name);
+#ifdef MODULE
+ if (!map) {
+ int rc = request_module(name);
+ if (rc < 0) {
+ printk(KERN_ERR "Couldn't load IR keymap %s\n", name);
+ return NULL;
+ }
+ msleep(20); /* Give some time for IR to register */
+
+ map = seek_rc_map(name);
+ }
+#endif
+ if (!map) {
+ printk(KERN_ERR "IR keymap %s not found\n", name);
+ return NULL;
+ }
+
+ printk(KERN_INFO "Registered IR keymap %s\n", map->map.name);
+
+ return &map->map;
+}
+EXPORT_SYMBOL_GPL(get_rc_map);
+
+int ir_register_map(struct rc_keymap *map)
+{
+ spin_lock(&rc_map_lock);
+ list_add_tail(&map->list, &rc_map_list);
+ spin_unlock(&rc_map_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ir_register_map);
+
+void ir_unregister_map(struct rc_keymap *map)
+{
+ spin_lock(&rc_map_lock);
+ list_del(&map->list);
+ spin_unlock(&rc_map_lock);
+}
+EXPORT_SYMBOL_GPL(ir_unregister_map);
+
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 96d61707f501..b6ce528e1889 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -100,6 +100,8 @@ struct xc2028_data {
if (size != _rc) \
tuner_info("i2c output error: rc = %d (should be %d)\n",\
_rc, (int)size); \
+ if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -119,6 +121,8 @@ struct xc2028_data {
if (isize != _rc) \
tuner_err("i2c input error: rc = %d (should be %d)\n", \
_rc, (int)isize); \
+ if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -129,8 +133,8 @@ struct xc2028_data {
(_rc = tuner_i2c_xfer_send(&priv->i2c_props, \
_val, sizeof(_val)))) { \
tuner_err("Error on line %d: %d\n", __LINE__, _rc); \
- } else \
- msleep(10); \
+ } else if (priv->ctrl.msleep) \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -809,10 +813,20 @@ check_device:
hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
(version & 0xf0) >> 4, version & 0xf);
+
+ if (priv->ctrl.read_not_reliable)
+ goto read_not_reliable;
+
/* Check firmware version against what we downloaded. */
if (priv->firm_version != ((version & 0xf0) << 4 | (version & 0x0f))) {
- tuner_err("Incorrect readback of firmware version.\n");
- goto fail;
+ if (!priv->ctrl.read_not_reliable) {
+ tuner_err("Incorrect readback of firmware version.\n");
+ goto fail;
+ } else {
+ tuner_err("Returned an incorrect version. However, "
+ "read is not reliable enough. Ignoring it.\n");
+ hwmodel = 3028;
+ }
}
/* Check that the tuner hardware model remains consistent over time. */
@@ -826,6 +840,7 @@ check_device:
goto fail;
}
+read_not_reliable:
memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
/*
@@ -996,6 +1011,8 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
The reset CLK is needed only with tm6000.
Driver should work fine even if this fails.
*/
+ if (priv->ctrl.msleep)
+ msleep(priv->ctrl.msleep);
do_tuner_callback(fe, XC2028_RESET_CLK, 1);
msleep(10);
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index a90c35d50add..9778c96a5006 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -33,12 +33,14 @@ enum firmware_type {
struct xc2028_ctrl {
char *fname;
int max_len;
+ int msleep;
unsigned int scode_table;
unsigned int mts :1;
unsigned int input1:1;
unsigned int vhfbw7:1;
unsigned int uhfbw8:1;
unsigned int disable_power_mgmt:1;
+ unsigned int read_not_reliable:1;
unsigned int demod;
enum firmware_type type:2;
};
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index 8b0cde38984d..248a2a9d8416 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -930,7 +930,6 @@ static int dst_fw_ver(struct dst_state *state)
dprintk(verbose, DST_INFO, 1, "Unsupported Command");
return -1;
}
- memset(&state->fw_version, '\0', 8);
memcpy(&state->fw_version, &state->rxbuffer, 8);
dprintk(verbose, DST_ERROR, 1, "Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x",
state->fw_version[0] >> 4, state->fw_version[0] & 0x0f,
@@ -1053,7 +1052,6 @@ static int dst_get_tuner_info(struct dst_state *state)
goto force;
}
}
- memset(&state->board_info, '\0', 8);
memcpy(&state->board_info, &state->rxbuffer, 8);
if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
dprintk(verbose, DST_ERROR, 1, "DST type has TS=188");
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index b6d46961a99e..b762e561a6d5 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -28,7 +28,7 @@
#include <linux/dma-mapping.h>
#include <linux/input.h>
#include <linux/slab.h>
-#include <media/ir-common.h>
+#include <media/ir-core.h>
#include "demux.h"
#include "dmxdev.h"
@@ -46,6 +46,8 @@
#include "z0194a.h"
#include "ds3000.h"
+#define MODULE_NAME "dm1105"
+
#define UNSET (-1U)
#define DM1105_BOARD_NOAUTO UNSET
@@ -265,7 +267,6 @@ static void dm1105_card_list(struct pci_dev *pci)
/* infrared remote control */
struct infrared {
struct input_dev *input_dev;
- struct ir_input_state ir;
char input_phys[32];
struct work_struct work;
u32 ir_command;
@@ -531,8 +532,7 @@ static void dm1105_emit_key(struct work_struct *work)
data = (ircom >> 8) & 0x7f;
- ir_input_keydown(ir->input_dev, &ir->ir, data);
- ir_input_nokey(ir->input_dev, &ir->ir);
+ ir_keydown(ir->input_dev, data, 0);
}
/* work handler */
@@ -594,8 +594,7 @@ static irqreturn_t dm1105_irq(int irq, void *dev_id)
int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
{
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = &ir_codes_dm1105_nec_table;
- u64 ir_type = IR_TYPE_OTHER;
+ char *ir_codes = NULL;
int err = -ENOMEM;
input_dev = input_allocate_device();
@@ -606,12 +605,6 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys),
"pci-%s/ir0", pci_name(dm1105->pdev));
- err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type);
- if (err < 0) {
- input_free_device(input_dev);
- return err;
- }
-
input_dev->name = "DVB on-card IR receiver";
input_dev->phys = dm1105->ir.input_phys;
input_dev->id.bustype = BUS_PCI;
@@ -628,9 +621,13 @@ int __devinit dm1105_ir_init(struct dm1105_dev *dm1105)
INIT_WORK(&dm1105->ir.work, dm1105_emit_key);
- err = ir_input_register(input_dev, ir_codes, NULL);
+ err = ir_input_register(input_dev, ir_codes, NULL, MODULE_NAME);
+ if (err < 0) {
+ input_free_device(input_dev);
+ return err;
+ }
- return err;
+ return 0;
}
void __devexit dm1105_ir_exit(struct dm1105_dev *dm1105)
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 9ddc57909d49..425862ffb285 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
+#include <linux/smp_lock.h>
#include <linux/poll.h>
#include <linux/ioctl.h>
#include <linux/wait.h>
@@ -963,7 +964,7 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count,
return ret;
}
-static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_demux_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
@@ -1084,10 +1085,16 @@ static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_demux_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
@@ -1139,7 +1146,7 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_demux_fops = {
.owner = THIS_MODULE,
.read = dvb_demux_read,
- .ioctl = dvb_demux_ioctl,
+ .unlocked_ioctl = dvb_demux_ioctl,
.open = dvb_demux_open,
.release = dvb_demux_release,
.poll = dvb_demux_poll,
@@ -1152,7 +1159,7 @@ static struct dvb_device dvbdev_demux = {
.fops = &dvb_demux_fops
};
-static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_dvr_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1176,10 +1183,16 @@ static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_dvr_ioctl(struct inode *inode, struct file *file,
+static long dvb_dvr_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
@@ -1208,7 +1221,7 @@ static const struct file_operations dvb_dvr_fops = {
.owner = THIS_MODULE,
.read = dvb_dvr_read,
.write = dvb_dvr_write,
- .ioctl = dvb_dvr_ioctl,
+ .unlocked_ioctl = dvb_dvr_ioctl,
.open = dvb_dvr_open,
.release = dvb_dvr_release,
.poll = dvb_dvr_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index cb22da53bfb0..ef259a0718ac 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include "dvb_ca_en50221.h"
@@ -1181,7 +1182,7 @@ static int dvb_ca_en50221_thread(void *data)
*
* @return 0 on success, <0 on error.
*/
-static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_ca_en50221_io_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1255,10 +1256,16 @@ static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file,
*
* @return 0 on success, <0 on error.
*/
-static int dvb_ca_en50221_io_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long dvb_ca_en50221_io_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
@@ -1611,7 +1618,7 @@ static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
.read = dvb_ca_en50221_io_read,
.write = dvb_ca_en50221_io_write,
- .ioctl = dvb_ca_en50221_io_ioctl,
+ .unlocked_ioctl = dvb_ca_en50221_io_ioctl,
.open = dvb_ca_en50221_io_open,
.release = dvb_ca_en50221_io_release,
.poll = dvb_ca_en50221_io_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 67f189b7aa1f..977ddba3e235 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,7 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
};
};
- if (demux->cnt_storage) {
+ if (demux->cnt_storage && dvb_demux_tscheck) {
/* check pkt counter */
if (pid < MAX_PID) {
if (buf[1] & 0x80)
@@ -1248,12 +1248,9 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->feed[i].index = i;
}
- if (dvb_demux_tscheck) {
- dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
-
- if (!dvbdemux->cnt_storage)
- printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
- }
+ dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
+ if (!dvbdemux->cnt_storage)
+ printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
INIT_LIST_HEAD(&dvbdemux->frontend_list);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 55ea260572bf..44ae89ecef94 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/freezer.h>
#include <linux/jiffies.h>
+#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include <asm/processor.h>
@@ -95,6 +96,10 @@ MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open(
* FESTATE_LOSTLOCK. When the lock has been lost, and we're searching it again.
*/
+#define DVB_FE_NO_EXIT 0
+#define DVB_FE_NORMAL_EXIT 1
+#define DVB_FE_DEVICE_REMOVED 2
+
static DEFINE_MUTEX(frontend_mutex);
struct dvb_frontend_private {
@@ -497,7 +502,7 @@ static int dvb_frontend_is_exiting(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- if (fepriv->exit)
+ if (fepriv->exit != DVB_FE_NO_EXIT)
return 1;
if (fepriv->dvbdev->writers == 1)
@@ -559,7 +564,7 @@ restart:
if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) {
/* got signal or quitting */
- fepriv->exit = 1;
+ fepriv->exit = DVB_FE_NORMAL_EXIT;
break;
}
@@ -673,7 +678,10 @@ restart:
}
fepriv->thread = NULL;
- fepriv->exit = 0;
+ if (kthread_should_stop())
+ fepriv->exit = DVB_FE_DEVICE_REMOVED;
+ else
+ fepriv->exit = DVB_FE_NO_EXIT;
mb();
dvb_frontend_wakeup(fe);
@@ -686,7 +694,7 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
dprintk ("%s\n", __func__);
- fepriv->exit = 1;
+ fepriv->exit = DVB_FE_NORMAL_EXIT;
mb();
if (!fepriv->thread)
@@ -755,7 +763,7 @@ static int dvb_frontend_start(struct dvb_frontend *fe)
dprintk ("%s\n", __func__);
if (fepriv->thread) {
- if (!fepriv->exit)
+ if (fepriv->exit == DVB_FE_NO_EXIT)
return 0;
else
dvb_frontend_stop (fe);
@@ -767,7 +775,7 @@ static int dvb_frontend_start(struct dvb_frontend *fe)
return -EINTR;
fepriv->state = FESTATE_IDLE;
- fepriv->exit = 0;
+ fepriv->exit = DVB_FE_NO_EXIT;
fepriv->thread = NULL;
mb();
@@ -1188,14 +1196,14 @@ static void dtv_property_cache_submit(struct dvb_frontend *fe)
}
}
-static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_legacy(struct file *file,
unsigned int cmd, void *parg);
-static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_properties(struct file *file,
unsigned int cmd, void *parg);
static int dtv_property_process_get(struct dvb_frontend *fe,
struct dtv_property *tvp,
- struct inode *inode, struct file *file)
+ struct file *file)
{
int r = 0;
@@ -1328,7 +1336,6 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
static int dtv_property_process_set(struct dvb_frontend *fe,
struct dtv_property *tvp,
- struct inode *inode,
struct file *file)
{
int r = 0;
@@ -1359,7 +1366,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
dprintk("%s() Finalised property cache\n", __func__);
dtv_property_cache_submit(fe);
- r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND,
+ r |= dvb_frontend_ioctl_legacy(file, FE_SET_FRONTEND,
&fepriv->parameters);
break;
case DTV_FREQUENCY:
@@ -1391,12 +1398,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
break;
case DTV_VOLTAGE:
fe->dtv_property_cache.voltage = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_VOLTAGE,
+ r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE,
(void *)fe->dtv_property_cache.voltage);
break;
case DTV_TONE:
fe->dtv_property_cache.sectone = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_TONE,
+ r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE,
(void *)fe->dtv_property_cache.sectone);
break;
case DTV_CODE_RATE_HP:
@@ -1480,7 +1487,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
return r;
}
-static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1490,7 +1497,7 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
dprintk("%s (%d)\n", __func__, _IOC_NR(cmd));
- if (fepriv->exit)
+ if (fepriv->exit != DVB_FE_NO_EXIT)
return -ENODEV;
if ((file->f_flags & O_ACCMODE) == O_RDONLY &&
@@ -1502,17 +1509,17 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
return -ERESTARTSYS;
if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
- err = dvb_frontend_ioctl_properties(inode, file, cmd, parg);
+ err = dvb_frontend_ioctl_properties(file, cmd, parg);
else {
fe->dtv_property_cache.state = DTV_UNDEFINED;
- err = dvb_frontend_ioctl_legacy(inode, file, cmd, parg);
+ err = dvb_frontend_ioctl_legacy(file, cmd, parg);
}
up(&fepriv->sem);
return err;
}
-static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_properties(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1548,7 +1555,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
}
for (i = 0; i < tvps->num; i++) {
- (tvp + i)->result = dtv_property_process_set(fe, tvp + i, inode, file);
+ (tvp + i)->result = dtv_property_process_set(fe, tvp + i, file);
err |= (tvp + i)->result;
}
@@ -1580,7 +1587,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
}
for (i = 0; i < tvps->num; i++) {
- (tvp + i)->result = dtv_property_process_get(fe, tvp + i, inode, file);
+ (tvp + i)->result = dtv_property_process_get(fe, tvp + i, file);
err |= (tvp + i)->result;
}
@@ -1597,7 +1604,7 @@ out:
return err;
}
-static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file,
+static int dvb_frontend_ioctl_legacy(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1916,6 +1923,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
int ret;
dprintk ("%s\n", __func__);
+ if (fepriv->exit == DVB_FE_DEVICE_REMOVED)
+ return -ENODEV;
if (adapter->mfe_shared) {
mutex_lock (&adapter->mfe_lock);
@@ -2008,7 +2017,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
ret = dvb_generic_release (inode, file);
if (dvbdev->users == -1) {
- if (fepriv->exit == 1) {
+ if (fepriv->exit != DVB_FE_NO_EXIT) {
fops_put(file->f_op);
file->f_op = NULL;
wake_up(&dvbdev->wait_queue);
@@ -2022,7 +2031,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_frontend_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.poll = dvb_frontend_poll,
.open = dvb_frontend_open,
.release = dvb_frontend_release
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 441c0642b30a..f6dac2bb0ac6 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -59,6 +59,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dvb/net.h>
+#include <linux/smp_lock.h>
#include <linux/uio.h>
#include <asm/uaccess.h>
#include <linux/crc32.h>
@@ -1109,14 +1110,14 @@ static int dvb_net_feed_stop(struct net_device *dev)
}
-static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc)
+static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
{
struct dvb_net_priv *priv = netdev_priv(dev);
if (priv->multi_num == DVB_NET_MULTICAST_MAX)
return -ENOMEM;
- memcpy(priv->multi_macs[priv->multi_num], mc->dmi_addr, 6);
+ memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
priv->multi_num++;
return 0;
@@ -1140,8 +1141,7 @@ static void wq_set_multicast_list (struct work_struct *work)
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
} else if (!netdev_mc_empty(dev)) {
- int mci;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
dprintk("%s: set_mc_list, %d entries\n",
dev->name, netdev_mc_count(dev));
@@ -1149,11 +1149,8 @@ static void wq_set_multicast_list (struct work_struct *work)
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
- for (mci = 0, mc=dev->mc_list;
- mci < netdev_mc_count(dev);
- mc = mc->next, mci++) {
- dvb_set_mc_filter(dev, mc);
- }
+ netdev_for_each_mc_addr(ha, dev)
+ dvb_set_mc_filter(dev, ha->addr);
}
netif_addr_unlock_bh(dev);
@@ -1333,7 +1330,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
return 0;
}
-static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
+static int dvb_net_do_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1435,10 +1432,16 @@ static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
return 0;
}
-static int dvb_net_ioctl(struct inode *inode, struct file *file,
+static long dvb_net_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return dvb_usercopy(inode, file, cmd, arg, dvb_net_do_ioctl);
+ int ret;
+
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
+ unlock_kernel();
+
+ return ret;
}
static int dvb_net_close(struct inode *inode, struct file *file)
@@ -1459,7 +1462,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
static const struct file_operations dvb_net_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_net_ioctl,
+ .unlocked_ioctl = dvb_net_ioctl,
.open = dvb_generic_open,
.release = dvb_net_close,
};
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 94159b90f733..b915c39d782f 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -154,10 +154,11 @@ int dvb_generic_release(struct inode *inode, struct file *file)
EXPORT_SYMBOL(dvb_generic_release);
-int dvb_generic_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+long dvb_generic_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
{
struct dvb_device *dvbdev = file->private_data;
+ int ret;
if (!dvbdev)
return -ENODEV;
@@ -165,7 +166,11 @@ int dvb_generic_ioctl(struct inode *inode, struct file *file,
if (!dvbdev->kernel_ioctl)
return -EINVAL;
- return dvb_usercopy (inode, file, cmd, arg, dvbdev->kernel_ioctl);
+ lock_kernel();
+ ret = dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl);
+ unlock_kernel();
+
+ return ret;
}
EXPORT_SYMBOL(dvb_generic_ioctl);
@@ -377,9 +382,9 @@ EXPORT_SYMBOL(dvb_unregister_adapter);
define this as video_usercopy(). this will introduce a dependecy
to the v4l "videodev.o" module, which is unnecessary for some
cards (ie. the budget dvb-cards don't need the v4l module...) */
-int dvb_usercopy(struct inode *inode, struct file *file,
+int dvb_usercopy(struct file *file,
unsigned int cmd, unsigned long arg,
- int (*func)(struct inode *inode, struct file *file,
+ int (*func)(struct file *file,
unsigned int cmd, void *arg))
{
char sbuf[128];
@@ -416,7 +421,7 @@ int dvb_usercopy(struct inode *inode, struct file *file,
}
/* call driver */
- if ((err = func(inode, file, cmd, parg)) == -ENOIOCTLCMD)
+ if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
err = -EINVAL;
if (err < 0)
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index f7b499d4a3c0..fcc6ae98745e 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -116,8 +116,7 @@ struct dvb_device {
wait_queue_head_t wait_queue;
/* don't really need those !? -- FIXME: use video_usercopy */
- int (*kernel_ioctl)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg);
+ int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg);
void *priv;
};
@@ -138,17 +137,15 @@ extern void dvb_unregister_device (struct dvb_device *dvbdev);
extern int dvb_generic_open (struct inode *inode, struct file *file);
extern int dvb_generic_release (struct inode *inode, struct file *file);
-extern int dvb_generic_ioctl (struct inode *inode, struct file *file,
+extern long dvb_generic_ioctl (struct file *file,
unsigned int cmd, unsigned long arg);
/* we don't mess with video_usercopy() any more,
we simply define out own dvb_usercopy(), which will hopefully become
generic_usercopy() someday... */
-extern int dvb_usercopy(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg,
- int (*func)(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg));
+extern int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+ int (*func)(struct file *file, unsigned int cmd, void *arg));
/** generic DVB attach function. */
#ifdef CONFIG_MEDIA_ATTACH
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index 6247239982e9..b6cbb1dfc5f1 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -37,7 +37,7 @@ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_pr
return 0;
}
-static struct dvb_usb_rc_key a800_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_a800_table[] = {
{ 0x0201, KEY_PROG1 }, /* SOURCE */
{ 0x0200, KEY_POWER }, /* POWER */
{ 0x0205, KEY_1 }, /* 1 */
@@ -147,8 +147,8 @@ static struct dvb_usb_device_properties a800_properties = {
.identify_state = a800_identify_state,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = a800_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(a800_rc_keys),
+ .rc_key_map = ir_codes_a800_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_a800_table),
.rc_query = a800_rc_query,
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/af9005-remote.c b/drivers/media/dvb/dvb-usb/af9005-remote.c
index f4379c650a19..b41fa873b04d 100644
--- a/drivers/media/dvb/dvb-usb/af9005-remote.c
+++ b/drivers/media/dvb/dvb-usb/af9005-remote.c
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(debug,
#define deb_decode(args...) dprintk(dvb_usb_af9005_remote_debug,0x01,args)
-struct dvb_usb_rc_key af9005_rc_keys[] = {
+struct dvb_usb_rc_key ir_codes_af9005_table[] = {
{0x01b7, KEY_POWER},
{0x01a7, KEY_VOLUMEUP},
@@ -74,7 +74,7 @@ struct dvb_usb_rc_key af9005_rc_keys[] = {
{0x00d5, KEY_GOTO}, /* marked jump on the remote */
};
-int af9005_rc_keys_size = ARRAY_SIZE(af9005_rc_keys);
+int ir_codes_af9005_table_size = ARRAY_SIZE(ir_codes_af9005_table);
static int repeatable_keys[] = {
KEY_VOLUMEUP,
@@ -130,10 +130,10 @@ int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
deb_decode("code != inverted code\n");
return 0;
}
- for (i = 0; i < af9005_rc_keys_size; i++) {
- if (rc5_custom(&af9005_rc_keys[i]) == cust
- && rc5_data(&af9005_rc_keys[i]) == dat) {
- *event = af9005_rc_keys[i].event;
+ for (i = 0; i < ir_codes_af9005_table_size; i++) {
+ if (rc5_custom(&ir_codes_af9005_table[i]) == cust
+ && rc5_data(&ir_codes_af9005_table[i]) == dat) {
+ *event = ir_codes_af9005_table[i].event;
*state = REMOTE_KEY_PRESSED;
deb_decode
("key pressed, event %x\n", *event);
@@ -146,8 +146,8 @@ int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
return 0;
}
-EXPORT_SYMBOL(af9005_rc_keys);
-EXPORT_SYMBOL(af9005_rc_keys_size);
+EXPORT_SYMBOL(ir_codes_af9005_table);
+EXPORT_SYMBOL(ir_codes_af9005_table_size);
EXPORT_SYMBOL(af9005_rc_decode);
MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
index ca5a0a4d2a47..cfd6107d5349 100644
--- a/drivers/media/dvb/dvb-usb/af9005.c
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -1109,8 +1109,8 @@ static int __init af9005_usb_module_init(void)
return result;
}
rc_decode = symbol_request(af9005_rc_decode);
- rc_keys = symbol_request(af9005_rc_keys);
- rc_keys_size = symbol_request(af9005_rc_keys_size);
+ rc_keys = symbol_request(ir_codes_af9005_table);
+ rc_keys_size = symbol_request(ir_codes_af9005_table_size);
if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
err("af9005_rc_decode function not found, disabling remote");
af9005_properties.rc_query = NULL;
@@ -1128,9 +1128,9 @@ static void __exit af9005_usb_module_exit(void)
if (rc_decode != NULL)
symbol_put(af9005_rc_decode);
if (rc_keys != NULL)
- symbol_put(af9005_rc_keys);
+ symbol_put(ir_codes_af9005_table);
if (rc_keys_size != NULL)
- symbol_put(af9005_rc_keys_size);
+ symbol_put(ir_codes_af9005_table_size);
/* deregister this driver from the USB subsystem */
usb_deregister(&af9005_usb_driver);
}
diff --git a/drivers/media/dvb/dvb-usb/af9005.h b/drivers/media/dvb/dvb-usb/af9005.h
index 0bc48a012187..088e7083a39b 100644
--- a/drivers/media/dvb/dvb-usb/af9005.h
+++ b/drivers/media/dvb/dvb-usb/af9005.h
@@ -3490,7 +3490,7 @@ extern u8 regmask[8];
/* remote control decoder */
extern int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len,
u32 * event, int *state);
-extern struct dvb_usb_rc_key af9005_rc_keys[];
-extern int af9005_rc_keys_size;
+extern struct dvb_usb_rc_key ir_codes_af9005_table[];
+extern int ir_codes_af9005_table_size;
#endif
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 74d94e45324d..66c7c3ea7990 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -752,19 +752,19 @@ static const struct af9015_setup *af9015_setup_match(unsigned int id,
static const struct af9015_setup af9015_setup_modparam[] = {
{ AF9015_REMOTE_A_LINK_DTU_M,
- af9015_rc_keys_a_link, ARRAY_SIZE(af9015_rc_keys_a_link),
+ ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
{ AF9015_REMOTE_MSI_DIGIVOX_MINI_II_V3,
- af9015_rc_keys_msi, ARRAY_SIZE(af9015_rc_keys_msi),
+ ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
{ AF9015_REMOTE_MYGICTV_U718,
- af9015_rc_keys_mygictv, ARRAY_SIZE(af9015_rc_keys_mygictv),
+ ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
{ AF9015_REMOTE_DIGITTRADE_DVB_T,
- af9015_rc_keys_digittrade, ARRAY_SIZE(af9015_rc_keys_digittrade),
+ ir_codes_af9015_table_digittrade, ARRAY_SIZE(ir_codes_af9015_table_digittrade),
af9015_ir_table_digittrade, ARRAY_SIZE(af9015_ir_table_digittrade) },
{ AF9015_REMOTE_AVERMEDIA_KS,
- af9015_rc_keys_avermedia, ARRAY_SIZE(af9015_rc_keys_avermedia),
+ ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
af9015_ir_table_avermedia_ks, ARRAY_SIZE(af9015_ir_table_avermedia_ks) },
{ }
};
@@ -772,32 +772,32 @@ static const struct af9015_setup af9015_setup_modparam[] = {
/* don't add new entries here anymore, use hashes instead */
static const struct af9015_setup af9015_setup_usbids[] = {
{ USB_VID_LEADTEK,
- af9015_rc_keys_leadtek, ARRAY_SIZE(af9015_rc_keys_leadtek),
+ ir_codes_af9015_table_leadtek, ARRAY_SIZE(ir_codes_af9015_table_leadtek),
af9015_ir_table_leadtek, ARRAY_SIZE(af9015_ir_table_leadtek) },
{ USB_VID_VISIONPLUS,
- af9015_rc_keys_twinhan, ARRAY_SIZE(af9015_rc_keys_twinhan),
+ ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
af9015_ir_table_twinhan, ARRAY_SIZE(af9015_ir_table_twinhan) },
{ USB_VID_KWORLD_2, /* TODO: use correct rc keys */
- af9015_rc_keys_twinhan, ARRAY_SIZE(af9015_rc_keys_twinhan),
+ ir_codes_af9015_table_twinhan, ARRAY_SIZE(ir_codes_af9015_table_twinhan),
af9015_ir_table_kworld, ARRAY_SIZE(af9015_ir_table_kworld) },
{ USB_VID_AVERMEDIA,
- af9015_rc_keys_avermedia, ARRAY_SIZE(af9015_rc_keys_avermedia),
+ ir_codes_af9015_table_avermedia, ARRAY_SIZE(ir_codes_af9015_table_avermedia),
af9015_ir_table_avermedia, ARRAY_SIZE(af9015_ir_table_avermedia) },
{ USB_VID_MSI_2,
- af9015_rc_keys_msi_digivox_iii, ARRAY_SIZE(af9015_rc_keys_msi_digivox_iii),
+ ir_codes_af9015_table_msi_digivox_iii, ARRAY_SIZE(ir_codes_af9015_table_msi_digivox_iii),
af9015_ir_table_msi_digivox_iii, ARRAY_SIZE(af9015_ir_table_msi_digivox_iii) },
{ }
};
static const struct af9015_setup af9015_setup_hashes[] = {
{ 0xb8feb708,
- af9015_rc_keys_msi, ARRAY_SIZE(af9015_rc_keys_msi),
+ ir_codes_af9015_table_msi, ARRAY_SIZE(ir_codes_af9015_table_msi),
af9015_ir_table_msi, ARRAY_SIZE(af9015_ir_table_msi) },
{ 0xa3703d00,
- af9015_rc_keys_a_link, ARRAY_SIZE(af9015_rc_keys_a_link),
+ ir_codes_af9015_table_a_link, ARRAY_SIZE(ir_codes_af9015_table_a_link),
af9015_ir_table_a_link, ARRAY_SIZE(af9015_ir_table_a_link) },
{ 0x9b7dc64e,
- af9015_rc_keys_mygictv, ARRAY_SIZE(af9015_rc_keys_mygictv),
+ ir_codes_af9015_table_mygictv, ARRAY_SIZE(ir_codes_af9015_table_mygictv),
af9015_ir_table_mygictv, ARRAY_SIZE(af9015_ir_table_mygictv) },
{ }
};
@@ -836,8 +836,8 @@ static void af9015_set_remote_config(struct usb_device *udev,
} else if (udev->descriptor.idProduct ==
cpu_to_le16(USB_PID_TREKSTOR_DVBT)) {
table = &(const struct af9015_setup){ 0,
- af9015_rc_keys_trekstor,
- ARRAY_SIZE(af9015_rc_keys_trekstor),
+ ir_codes_af9015_table_trekstor,
+ ARRAY_SIZE(ir_codes_af9015_table_trekstor),
af9015_ir_table_trekstor,
ARRAY_SIZE(af9015_ir_table_trekstor)
};
@@ -1297,6 +1297,8 @@ static struct usb_device_id af9015_usb_table[] = {
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
{USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
+/* 30 */{USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1500,7 +1502,8 @@ static struct dvb_usb_device_properties af9015_properties[] = {
"(VS-DVB-T 395U)",
.cold_ids = {&af9015_usb_table[16],
&af9015_usb_table[17],
- &af9015_usb_table[18], NULL},
+ &af9015_usb_table[18],
+ &af9015_usb_table[31], NULL},
.warm_ids = {NULL},
},
{
@@ -1569,7 +1572,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 7, /* max 9 */
+ .num_device_descs = 8, /* max 9 */
.devices = {
{
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
@@ -1608,6 +1611,12 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {&af9015_usb_table[29], NULL},
.warm_ids = {NULL},
},
+ {
+ .name = "KWorld USB DVB-T Stick Mobile " \
+ "(UB383-T)",
+ .cold_ids = {&af9015_usb_table[30], NULL},
+ .warm_ids = {NULL},
+ },
}
},
};
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index ef36b1831490..63b2a4907b7e 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -123,7 +123,7 @@ enum af9015_remote {
/* LeadTek - Y04G0051 */
/* Leadtek WinFast DTV Dongle Gold */
-static struct dvb_usb_rc_key af9015_rc_keys_leadtek[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_leadtek[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -227,7 +227,7 @@ static u8 af9015_ir_table_leadtek[] = {
};
/* TwinHan AzureWave AD-TU700(704J) */
-static struct dvb_usb_rc_key af9015_rc_keys_twinhan[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_twinhan[] = {
{ 0x053f, KEY_POWER },
{ 0x0019, KEY_FAVORITES }, /* Favorite List */
{ 0x0004, KEY_TEXT }, /* Teletext */
@@ -338,7 +338,7 @@ static u8 af9015_ir_table_twinhan[] = {
};
/* A-Link DTU(m) */
-static struct dvb_usb_rc_key af9015_rc_keys_a_link[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_a_link[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -381,7 +381,7 @@ static u8 af9015_ir_table_a_link[] = {
};
/* MSI DIGIVOX mini II V3.0 */
-static struct dvb_usb_rc_key af9015_rc_keys_msi[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_msi[] = {
{ 0x001e, KEY_1 },
{ 0x001f, KEY_2 },
{ 0x0020, KEY_3 },
@@ -424,7 +424,7 @@ static u8 af9015_ir_table_msi[] = {
};
/* MYGICTV U718 */
-static struct dvb_usb_rc_key af9015_rc_keys_mygictv[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_mygictv[] = {
{ 0x003d, KEY_SWITCHVIDEOMODE },
/* TV / AV */
{ 0x0545, KEY_POWER },
@@ -550,7 +550,7 @@ static u8 af9015_ir_table_kworld[] = {
};
/* AverMedia Volar X */
-static struct dvb_usb_rc_key af9015_rc_keys_avermedia[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_avermedia[] = {
{ 0x053d, KEY_PROG1 }, /* SOURCE */
{ 0x0512, KEY_POWER }, /* POWER */
{ 0x051e, KEY_1 }, /* 1 */
@@ -656,7 +656,7 @@ static u8 af9015_ir_table_avermedia_ks[] = {
};
/* Digittrade DVB-T USB Stick */
-static struct dvb_usb_rc_key af9015_rc_keys_digittrade[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_digittrade[] = {
{ 0x010f, KEY_LAST }, /* RETURN */
{ 0x0517, KEY_TEXT }, /* TELETEXT */
{ 0x0108, KEY_EPG }, /* EPG */
@@ -719,7 +719,7 @@ static u8 af9015_ir_table_digittrade[] = {
};
/* TREKSTOR DVB-T USB Stick */
-static struct dvb_usb_rc_key af9015_rc_keys_trekstor[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_trekstor[] = {
{ 0x0704, KEY_AGAIN }, /* Home */
{ 0x0705, KEY_MUTE }, /* Mute */
{ 0x0706, KEY_UP }, /* Up */
@@ -782,7 +782,7 @@ static u8 af9015_ir_table_trekstor[] = {
};
/* MSI DIGIVOX mini III */
-static struct dvb_usb_rc_key af9015_rc_keys_msi_digivox_iii[] = {
+static struct dvb_usb_rc_key ir_codes_af9015_table_msi_digivox_iii[] = {
{ 0x0713, KEY_POWER }, /* [red power button] */
{ 0x073b, KEY_VIDEO }, /* Source */
{ 0x073e, KEY_ZOOM }, /* Zoom */
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index bb69f3719f9a..faca1ad88a67 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -399,7 +399,7 @@ static int anysee_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
-static struct dvb_usb_rc_key anysee_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_anysee_table[] = {
{ 0x0100, KEY_0 },
{ 0x0101, KEY_1 },
{ 0x0102, KEY_2 },
@@ -518,8 +518,8 @@ static struct dvb_usb_device_properties anysee_properties = {
}
},
- .rc_key_map = anysee_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(anysee_rc_keys),
+ .rc_key_map = ir_codes_anysee_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_anysee_table),
.rc_query = anysee_rc_query,
.rc_interval = 200, /* windows driver uses 500ms */
diff --git a/drivers/media/dvb/dvb-usb/az6027.c b/drivers/media/dvb/dvb-usb/az6027.c
index d7290b2c0913..6681ac1c56e3 100644
--- a/drivers/media/dvb/dvb-usb/az6027.c
+++ b/drivers/media/dvb/dvb-usb/az6027.c
@@ -125,12 +125,12 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
- { STB0899_RTC , 0x23 },
+ { STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
- { STB0899_ACLC , 0x87 },
+ { STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
@@ -183,10 +183,10 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
+ { STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
- { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
@@ -195,7 +195,7 @@ static const struct stb0899_s1_reg az6027_stb0899_s1_init_3[] = {
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
- { STB0899_TSLPL , 0x12 },
+ { STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
@@ -386,7 +386,7 @@ static int az6027_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
/* keys for the enclosed remote control */
-static struct dvb_usb_rc_key az6027_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_az6027_table[] = {
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
};
@@ -417,11 +417,15 @@ static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
if (slot != 0)
return -EINVAL;
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
mutex_lock(&state->ca_mutex);
req = 0xC1;
@@ -438,6 +442,7 @@ static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
}
mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
@@ -485,11 +490,15 @@ static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca,
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
if (slot != 0)
return -EINVAL;
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
mutex_lock(&state->ca_mutex);
req = 0xC3;
@@ -510,6 +519,7 @@ static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca,
}
mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
@@ -556,7 +566,11 @@ static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
+
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
req = 0xC8;
value = 0;
@@ -570,6 +584,7 @@ static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
} else{
ret = b[0];
}
+ kfree(b);
return ret;
}
@@ -667,8 +682,11 @@ static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int o
u16 value;
u16 index;
int blen;
- u8 b[12];
+ u8 *b;
+ b = kmalloc(12, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
mutex_lock(&state->ca_mutex);
req = 0xC5;
@@ -683,15 +701,13 @@ static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int o
} else
ret = 0;
- if (b[0] == 0) {
- ret = 0;
-
- } else if (b[0] == 1) {
+ if (!ret && b[0] == 1) {
ret = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
}
mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
@@ -943,10 +959,16 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
u16 value;
int length;
u8 req;
- u8 data[256];
+ u8 *data;
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ data = kmalloc(256, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) {
+ kfree(data);
return -EAGAIN;
+ }
if (num > 2)
warn("more than 2 i2c messages at a time is not handled yet. TODO.");
@@ -976,17 +998,14 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
i++;
} else {
- if (msg[i].addr == 0xd0) {
- /* demod 16bit addr */
- req = 0xBD;
- index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
- value = msg[i].addr + (2 << 8);
- length = msg[i].len - 2;
- len = msg[i].len - 2;
- for (j = 0; j < len; j++)
- data[j] = msg[i].buf[j + 2];
-
- }
+ /* demod 16bit addr */
+ req = 0xBD;
+ index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
+ value = msg[i].addr + (2 << 8);
+ length = msg[i].len - 2;
+ len = msg[i].len - 2;
+ for (j = 0; j < len; j++)
+ data[j] = msg[i].buf[j + 2];
az6027_usb_out_op(d, req, value, index, data, length);
}
}
@@ -1019,6 +1038,7 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
}
}
mutex_unlock(&d->i2c_mutex);
+ kfree(data);
return i;
}
@@ -1039,8 +1059,14 @@ int az6027_identify_state(struct usb_device *udev,
struct dvb_usb_device_description **desc,
int *cold)
{
- u8 b[16];
- s16 ret = usb_control_msg(udev,
+ u8 *b;
+ s16 ret;
+
+ b = kmalloc(16, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
0xb7,
USB_TYPE_VENDOR | USB_DIR_IN,
@@ -1051,7 +1077,7 @@ int az6027_identify_state(struct usb_device *udev,
USB_CTRL_GET_TIMEOUT);
*cold = ret <= 0;
-
+ kfree(b);
deb_info("cold: %d\n", *cold);
return 0;
}
@@ -1059,8 +1085,10 @@ int az6027_identify_state(struct usb_device *udev,
static struct usb_device_id az6027_usb_table[] = {
{ USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_AZ6027) },
- { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI) },
- { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V1) },
+ { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_DVBS2CI_V2) },
+ { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V1) },
+ { USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) },
{ },
};
@@ -1097,18 +1125,34 @@ static struct dvb_usb_device_properties az6027_properties = {
.power_ctrl = az6027_power_ctrl,
.read_mac_address = az6027_read_mac_addr,
*/
- .rc_key_map = az6027_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(az6027_rc_keys),
+ .rc_key_map = ir_codes_az6027_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_az6027_table),
.rc_interval = 400,
.rc_query = az6027_rc_query,
.i2c_algo = &az6027_i2c_algo,
- .num_device_descs = 1,
+ .num_device_descs = 5,
.devices = {
{
.name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)",
.cold_ids = { &az6027_usb_table[0], NULL },
.warm_ids = { NULL },
+ }, {
+ .name = "TERRATEC S7",
+ .cold_ids = { &az6027_usb_table[1], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "TERRATEC S7 MKII",
+ .cold_ids = { &az6027_usb_table[2], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "Technisat SkyStar USB 2 HD CI",
+ .cold_ids = { &az6027_usb_table[3], NULL },
+ .warm_ids = { NULL },
+ }, {
+ .name = "Technisat SkyStar USB 2 HD CI",
+ .cold_ids = { &az6027_usb_table[4], NULL },
+ .warm_ids = { NULL },
},
{ NULL },
}
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index e37ac4d48602..5a9c14bdc980 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -84,7 +84,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key cinergyt2_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_cinergyt2_table[] = {
{ 0x0401, KEY_POWER },
{ 0x0402, KEY_1 },
{ 0x0403, KEY_2 },
@@ -218,8 +218,8 @@ static struct dvb_usb_device_properties cinergyt2_properties = {
.power_ctrl = cinergyt2_power_ctrl,
.rc_interval = 50,
- .rc_key_map = cinergyt2_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(cinergyt2_rc_keys),
+ .rc_key_map = ir_codes_cinergyt2_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_cinergyt2_table),
.rc_query = cinergyt2_rc_query,
.generic_bulk_ctrl_endpoint = 1,
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 960376da7d59..0eb490889162 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -461,7 +461,7 @@ static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d, u32 *event,
return 0;
}
-static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dvico_mce_table[] = {
{ 0xfe02, KEY_TV },
{ 0xfe0e, KEY_MP3 },
{ 0xfe1a, KEY_DVD },
@@ -509,7 +509,7 @@ static struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
{ 0xfe4e, KEY_POWER },
};
-static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dvico_portable_table[] = {
{ 0xfc02, KEY_SETUP }, /* Profile */
{ 0xfc43, KEY_POWER2 },
{ 0xfc06, KEY_EPG },
@@ -548,7 +548,7 @@ static struct dvb_usb_rc_key dvico_portable_rc_keys[] = {
{ 0xfc00, KEY_UNKNOWN }, /* HD */
};
-static struct dvb_usb_rc_key d680_dmb_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_d680_dmb_table[] = {
{ 0x0038, KEY_UNKNOWN }, /* TV/AV */
{ 0x080c, KEY_ZOOM },
{ 0x0800, KEY_0 },
@@ -1025,8 +1025,9 @@ static int cxusb_dualdig4_rev2_frontend_attach(struct dvb_usb_adapter *adap)
cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
- dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
- &cxusb_dualdig4_rev2_config);
+ if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18,
+ &cxusb_dualdig4_rev2_config) < 0)
+ return -ENODEV;
adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80,
&cxusb_dualdig4_rev2_config);
@@ -1449,8 +1450,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1500,8 +1501,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 150,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1559,8 +1560,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1609,8 +1610,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.i2c_algo = &cxusb_i2c_algo,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1658,8 +1659,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_bluebird2_rc_query,
.num_device_descs = 1,
@@ -1706,8 +1707,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_bluebird2_rc_query,
.num_device_descs = 1,
@@ -1756,8 +1757,8 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_portable_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_portable_rc_keys),
+ .rc_key_map = ir_codes_dvico_portable_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_portable_table),
.rc_query = cxusb_rc_query,
.num_device_descs = 1,
@@ -1847,8 +1848,8 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = dvico_mce_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_key_map = ir_codes_dvico_mce_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dvico_mce_table),
.rc_query = cxusb_rc_query,
.num_device_descs = 1,
@@ -1895,8 +1896,8 @@ static struct dvb_usb_device_properties cxusb_d680_dmb_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = d680_dmb_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(d680_dmb_rc_keys),
+ .rc_key_map = ir_codes_d680_dmb_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_d680_dmb_table),
.rc_query = cxusb_d680_dmb_rc_query,
.num_device_descs = 1,
@@ -1944,8 +1945,8 @@ static struct dvb_usb_device_properties cxusb_mygica_d689_properties = {
.generic_bulk_ctrl_endpoint = 0x01,
.rc_interval = 100,
- .rc_key_map = d680_dmb_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(d680_dmb_rc_keys),
+ .rc_key_map = ir_codes_d680_dmb_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_d680_dmb_table),
.rc_query = cxusb_d680_dmb_rc_query,
.num_device_descs = 1,
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 34eab05afc6c..800800a9649e 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -562,7 +562,7 @@ static int dib0700_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
-static struct dvb_usb_rc_key dib0700_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dib0700_table[] = {
/* Key codes for the tiny Pinnacle remote*/
{ 0x0700, KEY_MUTE },
{ 0x0701, KEY_MENU }, /* Pinnacle logo */
@@ -794,6 +794,43 @@ static struct dvb_usb_rc_key dib0700_rc_keys[] = {
{ 0x7a13, KEY_VOLUMEDOWN },
{ 0x7a40, KEY_POWER },
{ 0x7a41, KEY_MUTE },
+
+ /* Key codes for the Elgato EyeTV Diversity silver remote,
+ set dvb_usb_dib0700_ir_proto=0 */
+ { 0x4501, KEY_POWER },
+ { 0x4502, KEY_MUTE },
+ { 0x4503, KEY_1 },
+ { 0x4504, KEY_2 },
+ { 0x4505, KEY_3 },
+ { 0x4506, KEY_4 },
+ { 0x4507, KEY_5 },
+ { 0x4508, KEY_6 },
+ { 0x4509, KEY_7 },
+ { 0x450a, KEY_8 },
+ { 0x450b, KEY_9 },
+ { 0x450c, KEY_LAST },
+ { 0x450d, KEY_0 },
+ { 0x450e, KEY_ENTER },
+ { 0x450f, KEY_RED },
+ { 0x4510, KEY_CHANNELUP },
+ { 0x4511, KEY_GREEN },
+ { 0x4512, KEY_VOLUMEDOWN },
+ { 0x4513, KEY_OK },
+ { 0x4514, KEY_VOLUMEUP },
+ { 0x4515, KEY_YELLOW },
+ { 0x4516, KEY_CHANNELDOWN },
+ { 0x4517, KEY_BLUE },
+ { 0x4518, KEY_LEFT }, /* Skip backwards */
+ { 0x4519, KEY_PLAYPAUSE },
+ { 0x451a, KEY_RIGHT }, /* Skip forward */
+ { 0x451b, KEY_REWIND },
+ { 0x451c, KEY_L }, /* Live */
+ { 0x451d, KEY_FASTFORWARD },
+ { 0x451e, KEY_STOP }, /* 'Reveal' for Teletext */
+ { 0x451f, KEY_MENU }, /* KEY_TEXT for Teletext */
+ { 0x4540, KEY_RECORD }, /* Font 'Size' for Teletext */
+ { 0x4541, KEY_SCREEN }, /* Full screen toggle, 'Hold' for Teletext */
+ { 0x4542, KEY_SELECT }, /* Select video input, 'Select' for Teletext */
};
/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */
@@ -2049,6 +2086,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096GP) },
+ { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DIVERSITY) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -2131,8 +2169,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2160,8 +2198,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2214,8 +2252,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2251,8 +2289,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2321,8 +2359,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2360,8 +2398,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2393,7 +2431,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 6,
+ .num_device_descs = 7,
.devices = {
{ "DiBcom STK7070PD reference design",
{ &dib0700_usb_id_table[17], NULL },
@@ -2419,11 +2457,15 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ "Sony PlayTV",
{ &dib0700_usb_id_table[44], NULL },
{ NULL },
- }
+ },
+ { "Elgato EyeTV Diversity",
+ { &dib0700_usb_id_table[68], NULL },
+ { NULL },
+ },
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2484,8 +2526,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2513,8 +2555,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2574,8 +2616,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2612,8 +2654,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
@@ -2656,8 +2698,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
@@ -2687,8 +2729,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dib0700_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_key_map = ir_codes_dib0700_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dib0700_table),
.rc_query = dib0700_rc_query
},
};
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 9143b5631e88..bc08bc0b723c 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -327,7 +327,7 @@ EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
/*
* common remote control stuff
*/
-struct dvb_usb_rc_key dibusb_rc_keys[] = {
+struct dvb_usb_rc_key ir_codes_dibusb_table[] = {
/* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
@@ -456,7 +456,7 @@ struct dvb_usb_rc_key dibusb_rc_keys[] = {
{ 0x804e, KEY_ENTER },
{ 0x804f, KEY_VOLUMEDOWN },
};
-EXPORT_SYMBOL(dibusb_rc_keys);
+EXPORT_SYMBOL(ir_codes_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 5c0126dc1ff9..eb2e6f050fbe 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -212,7 +212,7 @@ static struct dvb_usb_device_properties dibusb1_1_properties = {
.power_ctrl = dibusb_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -296,7 +296,7 @@ static struct dvb_usb_device_properties dibusb1_1_an2235_properties = {
.power_ctrl = dibusb_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -360,7 +360,7 @@ static struct dvb_usb_device_properties dibusb2_0b_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
@@ -417,7 +417,7 @@ static struct dvb_usb_device_properties artec_t1_usb2_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */
.rc_query = dibusb_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index a05b9f875663..588308eb6638 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -82,7 +82,7 @@ static struct dvb_usb_device_properties dibusb_mc_properties = {
.power_ctrl = dibusb2_0_power_ctrl,
.rc_interval = DEFAULT_RC_INTERVAL,
- .rc_key_map = dibusb_rc_keys,
+ .rc_key_map = ir_codes_dibusb_table,
.rc_key_map_size = 111, /* FIXME */
.rc_query = dibusb_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/dibusb.h b/drivers/media/dvb/dvb-usb/dibusb.h
index 8e847aa73ba1..3d50ac59088f 100644
--- a/drivers/media/dvb/dvb-usb/dibusb.h
+++ b/drivers/media/dvb/dvb-usb/dibusb.h
@@ -124,7 +124,7 @@ extern int dibusb2_0_power_ctrl(struct dvb_usb_device *, int);
#define DEFAULT_RC_INTERVAL 150
//#define DEFAULT_RC_INTERVAL 100000
-extern struct dvb_usb_rc_key dibusb_rc_keys[];
+extern struct dvb_usb_rc_key ir_codes_dibusb_table[];
extern int dibusb_rc_query(struct dvb_usb_device *, u32 *, int *);
extern int dibusb_read_eeprom_byte(struct dvb_usb_device *, u8, u8 *);
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 955147d00756..e826077094fa 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -161,7 +161,7 @@ static int digitv_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key digitv_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_digitv_table[] = {
{ 0x5f55, KEY_0 },
{ 0x6f55, KEY_1 },
{ 0x9f55, KEY_2 },
@@ -311,8 +311,8 @@ static struct dvb_usb_device_properties digitv_properties = {
.identify_state = digitv_identify_state,
.rc_interval = 1000,
- .rc_key_map = digitv_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(digitv_rc_keys),
+ .rc_key_map = ir_codes_digitv_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_digitv_table),
.rc_query = digitv_rc_query,
.i2c_algo = &digitv_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index a1b12b01cbe4..f57e59044d4d 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -57,7 +57,7 @@ static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
/* remote control */
/* key list for the tiny remote control (Yakumo, don't know about the others) */
-static struct dvb_usb_rc_key dtt200u_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dtt200u_table[] = {
{ 0x8001, KEY_MUTE },
{ 0x8002, KEY_CHANNELDOWN },
{ 0x8003, KEY_VOLUMEDOWN },
@@ -162,8 +162,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -207,8 +207,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -252,8 +252,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
@@ -297,8 +297,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.power_ctrl = dtt200u_power_ctrl,
.rc_interval = 300,
- .rc_key_map = dtt200u_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dtt200u_rc_keys),
+ .rc_key_map = ir_codes_dtt200u_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dtt200u_table),
.rc_query = dtt200u_rc_query,
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index ae8b57acfe05..085c4e457e0e 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -124,9 +124,11 @@
#define USB_PID_KWORLD_395U 0xe396
#define USB_PID_KWORLD_395U_2 0xe39b
#define USB_PID_KWORLD_395U_3 0xe395
+#define USB_PID_KWORLD_395U_4 0xe39a
#define USB_PID_KWORLD_MC810 0xc810
#define USB_PID_KWORLD_PC160_2T 0xc160
#define USB_PID_KWORLD_PC160_T 0xc161
+#define USB_PID_KWORLD_UB383_T 0xe383
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
@@ -288,6 +290,7 @@
#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
#define USB_PID_SONY_PLAYTV 0x0003
#define USB_PID_MYGICA_D689 0xd811
+#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
@@ -296,6 +299,8 @@
#define USB_PID_TVWAY_PLUS 0x0002
#define USB_PID_SVEON_STV20 0xe39d
#define USB_PID_AZUREWAVE_AZ6027 0x3275
-#define USB_PID_TERRATEC_DVBS2CI 0x3275
-#define USB_PID_TECHNISAT_USB2_HDCI 0x0002
+#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
+#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
+#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
+#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index 6fe71c6745eb..bb46ba6a3573 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -42,6 +42,8 @@ int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
msleep(delay_ms);
ret = usb_bulk_msg(d->udev,usb_rcvbulkpipe(d->udev,
+ d->props.generic_bulk_ctrl_endpoint_response ?
+ d->props.generic_bulk_ctrl_endpoint_response :
d->props.generic_bulk_ctrl_endpoint),rbuf,rlen,&actlen,
2000);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 0143aef19ecd..4a9f676087bf 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -198,6 +198,12 @@ struct dvb_usb_adapter_properties {
* is non-zero, one can use dvb_usb_generic_rw and dvb_usb_generic_write-
* helper functions.
*
+ * @generic_bulk_ctrl_endpoint_response: some DVB USB devices use a separate
+ * endpoint for responses to control messages sent with bulk transfers via
+ * the generic_bulk_ctrl_endpoint. When this is non-zero, this will be used
+ * instead of the generic_bulk_ctrl_endpoint when reading usb responses in
+ * the dvb_usb_generic_rw helper function.
+ *
* @num_device_descs: number of struct dvb_usb_device_description in @devices
* @devices: array of struct dvb_usb_device_description compatibles with these
* properties.
@@ -239,6 +245,7 @@ struct dvb_usb_device_properties {
struct i2c_algorithm *i2c_algo;
int generic_bulk_ctrl_endpoint;
+ int generic_bulk_ctrl_endpoint_response;
int num_device_descs;
struct dvb_usb_device_description devices[12];
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index accc65509b07..e8fb85380672 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -73,7 +73,7 @@
"Please see linux/Documentation/dvb/ for more details " \
"on firmware-problems."
-struct dvb_usb_rc_keys_table {
+struct ir_codes_dvb_usb_table_table {
struct dvb_usb_rc_key *rc_keys;
int rc_keys_size;
};
@@ -948,7 +948,7 @@ static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static struct dvb_usb_rc_key dw210x_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_dw210x_table[] = {
{ 0xf80a, KEY_Q }, /*power*/
{ 0xf80c, KEY_M }, /*mute*/
{ 0xf811, KEY_1 },
@@ -982,7 +982,7 @@ static struct dvb_usb_rc_key dw210x_rc_keys[] = {
{ 0xf81b, KEY_B }, /*recall*/
};
-static struct dvb_usb_rc_key tevii_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_tevii_table[] = {
{ 0xf80a, KEY_POWER },
{ 0xf80c, KEY_MUTE },
{ 0xf811, KEY_1 },
@@ -1032,7 +1032,7 @@ static struct dvb_usb_rc_key tevii_rc_keys[] = {
{ 0xf858, KEY_SWITCHVIDEOMODE },
};
-static struct dvb_usb_rc_key tbs_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_tbs_table[] = {
{ 0xf884, KEY_POWER },
{ 0xf894, KEY_MUTE },
{ 0xf887, KEY_1 },
@@ -1067,10 +1067,10 @@ static struct dvb_usb_rc_key tbs_rc_keys[] = {
{ 0xf89b, KEY_MODE }
};
-static struct dvb_usb_rc_keys_table keys_tables[] = {
- { dw210x_rc_keys, ARRAY_SIZE(dw210x_rc_keys) },
- { tevii_rc_keys, ARRAY_SIZE(tevii_rc_keys) },
- { tbs_rc_keys, ARRAY_SIZE(tbs_rc_keys) },
+static struct ir_codes_dvb_usb_table_table keys_tables[] = {
+ { ir_codes_dw210x_table, ARRAY_SIZE(ir_codes_dw210x_table) },
+ { ir_codes_tevii_table, ARRAY_SIZE(ir_codes_tevii_table) },
+ { ir_codes_tbs_table, ARRAY_SIZE(ir_codes_tbs_table) },
};
static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
@@ -1185,14 +1185,14 @@ static int dw2102_load_firmware(struct usb_device *dev,
/* init registers */
switch (dev->descriptor.idProduct) {
case USB_PID_PROF_1100:
- s6x0_properties.rc_key_map = tbs_rc_keys;
+ s6x0_properties.rc_key_map = ir_codes_tbs_table;
s6x0_properties.rc_key_map_size =
- ARRAY_SIZE(tbs_rc_keys);
+ ARRAY_SIZE(ir_codes_tbs_table);
break;
case USB_PID_TEVII_S650:
- dw2104_properties.rc_key_map = tevii_rc_keys;
+ dw2104_properties.rc_key_map = ir_codes_tevii_table;
dw2104_properties.rc_key_map_size =
- ARRAY_SIZE(tevii_rc_keys);
+ ARRAY_SIZE(ir_codes_tevii_table);
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
@@ -1255,8 +1255,8 @@ static struct dvb_usb_device_properties dw2102_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2102_serit_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1306,8 +1306,8 @@ static struct dvb_usb_device_properties dw2104_properties = {
.no_reconnect = 1,
.i2c_algo = &dw2104_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1353,8 +1353,8 @@ static struct dvb_usb_device_properties dw3101_properties = {
.no_reconnect = 1,
.i2c_algo = &dw3101_i2c_algo,
- .rc_key_map = dw210x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
+ .rc_key_map = ir_codes_dw210x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_dw210x_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1396,8 +1396,8 @@ static struct dvb_usb_device_properties s6x0_properties = {
.no_reconnect = 1,
.i2c_algo = &s6x0_i2c_algo,
- .rc_key_map = tevii_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(tevii_rc_keys),
+ .rc_key_map = ir_codes_tevii_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_tevii_table),
.rc_interval = 150,
.rc_query = dw2102_rc_query,
@@ -1459,8 +1459,8 @@ static int dw2102_probe(struct usb_interface *intf,
/* fill only different fields */
p7500->firmware = "dvb-usb-p7500.fw";
p7500->devices[0] = d7500;
- p7500->rc_key_map = tbs_rc_keys;
- p7500->rc_key_map_size = ARRAY_SIZE(tbs_rc_keys);
+ p7500->rc_key_map = ir_codes_tbs_table;
+ p7500->rc_key_map_size = ARRAY_SIZE(ir_codes_tbs_table);
p7500->adapter->frontend_attach = prof_7500_frontend_attach;
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index d14bd227b502..93c21ddd0b77 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -300,7 +300,7 @@ static int jdvbt90502_set_frontend(struct dvb_frontend *fe,
struct dvb_frontend_parameters *p)
{
/**
- * NOTE: ignore all the paramters except frequency.
+ * NOTE: ignore all the parameters except frequency.
* others should be fixed to the proper value for ISDB-T,
* but don't check here.
*/
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index afb444db43ad..45106ac49674 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -105,6 +105,10 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
ptr = fw->data;
buf = kmalloc(64, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_rel_fw;
+ }
while (ptr[0] != 0xff) {
u16 buflen = ptr[0] + 4;
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index 737ffa36ac9c..c211fef45fc3 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -589,7 +589,7 @@ static struct m920x_inits pinnacle310e_init[] = {
};
/* ir keymaps */
-static struct dvb_usb_rc_key megasky_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_megasky_table [] = {
{ 0x0012, KEY_POWER },
{ 0x001e, KEY_CYCLEWINDOWS }, /* min/max */
{ 0x0002, KEY_CHANNELUP },
@@ -608,7 +608,7 @@ static struct dvb_usb_rc_key megasky_rc_keys [] = {
{ 0x000e, KEY_COFFEE }, /* "MTS" */
};
-static struct dvb_usb_rc_key tvwalkertwin_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_tvwalkertwin_table [] = {
{ 0x0001, KEY_ZOOM }, /* Full Screen */
{ 0x0002, KEY_CAMERA }, /* snapshot */
{ 0x0003, KEY_MUTE },
@@ -628,7 +628,7 @@ static struct dvb_usb_rc_key tvwalkertwin_rc_keys [] = {
{ 0x001e, KEY_VOLUMEUP },
};
-static struct dvb_usb_rc_key pinnacle310e_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_pinnacle310e_table[] = {
{ 0x16, KEY_POWER },
{ 0x17, KEY_FAVORITES },
{ 0x0f, KEY_TEXT },
@@ -785,8 +785,8 @@ static struct dvb_usb_device_properties megasky_properties = {
.download_firmware = m920x_firmware_download,
.rc_interval = 100,
- .rc_key_map = megasky_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(megasky_rc_keys),
+ .rc_key_map = ir_codes_megasky_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_megasky_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
@@ -886,8 +886,8 @@ static struct dvb_usb_device_properties tvwalkertwin_properties = {
.download_firmware = m920x_firmware_download,
.rc_interval = 100,
- .rc_key_map = tvwalkertwin_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(tvwalkertwin_rc_keys),
+ .rc_key_map = ir_codes_tvwalkertwin_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_tvwalkertwin_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
@@ -993,8 +993,8 @@ static struct dvb_usb_device_properties pinnacle_pctv310e_properties = {
.download_firmware = NULL,
.rc_interval = 100,
- .rc_key_map = pinnacle310e_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(pinnacle310e_rc_keys),
+ .rc_key_map = ir_codes_pinnacle310e_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_pinnacle310e_table),
.rc_query = m920x_rc_query,
.size_of_priv = sizeof(struct m920x_state),
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index b41d66ef8325..d195a587cc65 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -21,7 +21,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define deb_ee(args...) dprintk(debug,0x02,args)
/* Hauppauge NOVA-T USB2 keys */
-static struct dvb_usb_rc_key haupp_rc_keys [] = {
+static struct dvb_usb_rc_key ir_codes_haupp_table [] = {
{ 0x1e00, KEY_0 },
{ 0x1e01, KEY_1 },
{ 0x1e02, KEY_2 },
@@ -91,14 +91,14 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle);
- for (i = 0; i < ARRAY_SIZE(haupp_rc_keys); i++) {
- if (rc5_data(&haupp_rc_keys[i]) == data &&
- rc5_custom(&haupp_rc_keys[i]) == custom) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_haupp_table); i++) {
+ if (rc5_data(&ir_codes_haupp_table[i]) == data &&
+ rc5_custom(&ir_codes_haupp_table[i]) == custom) {
- deb_rc("c: %x, d: %x\n", rc5_data(&haupp_rc_keys[i]),
- rc5_custom(&haupp_rc_keys[i]));
+ deb_rc("c: %x, d: %x\n", rc5_data(&ir_codes_haupp_table[i]),
+ rc5_custom(&ir_codes_haupp_table[i]));
- *event = haupp_rc_keys[i].event;
+ *event = ir_codes_haupp_table[i].event;
*state = REMOTE_KEY_PRESSED;
if (st->old_toggle == toggle) {
if (st->last_repeat_count++ < 2)
@@ -196,8 +196,8 @@ static struct dvb_usb_device_properties nova_t_properties = {
.read_mac_address = nova_t_read_mac_address,
.rc_interval = 100,
- .rc_key_map = haupp_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(haupp_rc_keys),
+ .rc_key_map = ir_codes_haupp_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_haupp_table),
.rc_query = nova_t_rc_query,
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 830557696ae6..dfb81ff1d9a7 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -35,7 +35,7 @@
struct opera1_state {
u32 last_key_pressed;
};
-struct opera_rc_keys {
+struct ir_codes_opera_table {
u32 keycode;
u32 event;
};
@@ -331,7 +331,7 @@ static int opera1_pid_filter_control(struct dvb_usb_adapter *adap, int onoff)
return 0;
}
-static struct dvb_usb_rc_key opera1_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_opera1_table[] = {
{0x5fa0, KEY_1},
{0x51af, KEY_2},
{0x5da2, KEY_3},
@@ -404,12 +404,12 @@ static int opera1_rc_query(struct dvb_usb_device *dev, u32 * event, int *state)
send_key = (send_key & 0xffff) | 0x0100;
- for (i = 0; i < ARRAY_SIZE(opera1_rc_keys); i++) {
- if (rc5_scan(&opera1_rc_keys[i]) == (send_key & 0xffff)) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_opera1_table); i++) {
+ if (rc5_scan(&ir_codes_opera1_table[i]) == (send_key & 0xffff)) {
*state = REMOTE_KEY_PRESSED;
- *event = opera1_rc_keys[i].event;
+ *event = ir_codes_opera1_table[i].event;
opst->last_key_pressed =
- opera1_rc_keys[i].event;
+ ir_codes_opera1_table[i].event;
break;
}
opst->last_key_pressed = 0;
@@ -498,8 +498,8 @@ static struct dvb_usb_device_properties opera1_properties = {
.power_ctrl = opera1_power_ctrl,
.i2c_algo = &opera1_i2c_algo,
- .rc_key_map = opera1_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(opera1_rc_keys),
+ .rc_key_map = ir_codes_opera1_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_opera1_table),
.rc_interval = 200,
.rc_query = opera1_rc_query,
.read_mac_address = opera1_read_mac_address,
diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c
index f9702e3756b6..86d68933b6b4 100644
--- a/drivers/media/dvb/dvb-usb/usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/usb-urb.c
@@ -96,8 +96,9 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
while (stream->buf_num) {
stream->buf_num--;
deb_mem("freeing buffer %d\n",stream->buf_num);
- usb_buffer_free(stream->udev, stream->buf_size,
- stream->buf_list[stream->buf_num], stream->dma_addr[stream->buf_num]);
+ usb_free_coherent(stream->udev, stream->buf_size,
+ stream->buf_list[stream->buf_num],
+ stream->dma_addr[stream->buf_num]);
}
}
@@ -116,7 +117,7 @@ static int usb_allocate_stream_buffers(struct usb_data_stream *stream, int num,
for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) {
deb_mem("allocating buffer %d\n",stream->buf_num);
if (( stream->buf_list[stream->buf_num] =
- usb_buffer_alloc(stream->udev, size, GFP_ATOMIC,
+ usb_alloc_coherent(stream->udev, size, GFP_ATOMIC,
&stream->dma_addr[stream->buf_num]) ) == NULL) {
deb_mem("not enough memory for urb-buffer allocation.\n");
usb_free_stream_buffers(stream);
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index ef4e37d9c5ff..4d332451653b 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -174,7 +174,7 @@ static int vp702x_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
}
/* keys for the enclosed remote control */
-static struct dvb_usb_rc_key vp702x_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_vp702x_table[] = {
{ 0x0001, KEY_1 },
{ 0x0002, KEY_2 },
};
@@ -197,10 +197,10 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
- for (i = 0; i < ARRAY_SIZE(vp702x_rc_keys); i++)
- if (rc5_custom(&vp702x_rc_keys[i]) == key[1]) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_vp702x_table); i++)
+ if (rc5_custom(&ir_codes_vp702x_table[i]) == key[1]) {
*state = REMOTE_KEY_PRESSED;
- *event = vp702x_rc_keys[i].event;
+ *event = ir_codes_vp702x_table[i].event;
break;
}
return 0;
@@ -283,8 +283,8 @@ static struct dvb_usb_device_properties vp702x_properties = {
},
.read_mac_address = vp702x_read_mac_addr,
- .rc_key_map = vp702x_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(vp702x_rc_keys),
+ .rc_key_map = ir_codes_vp702x_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_vp702x_table),
.rc_interval = 400,
.rc_query = vp702x_rc_query,
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index a59faa27912a..036893fa4480 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -99,7 +99,7 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
/* The keymapping struct. Somehow this should be loaded to the driver, but
* currently it is hardcoded. */
-static struct dvb_usb_rc_key vp7045_rc_keys[] = {
+static struct dvb_usb_rc_key ir_codes_vp7045_table[] = {
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
{ 0x0003, KEY_1 },
@@ -165,10 +165,10 @@ static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
return 0;
}
- for (i = 0; i < ARRAY_SIZE(vp7045_rc_keys); i++)
- if (rc5_data(&vp7045_rc_keys[i]) == key) {
+ for (i = 0; i < ARRAY_SIZE(ir_codes_vp7045_table); i++)
+ if (rc5_data(&ir_codes_vp7045_table[i]) == key) {
*state = REMOTE_KEY_PRESSED;
- *event = vp7045_rc_keys[i].event;
+ *event = ir_codes_vp7045_table[i].event;
break;
}
return 0;
@@ -260,8 +260,8 @@ static struct dvb_usb_device_properties vp7045_properties = {
.read_mac_address = vp7045_read_mac_addr,
.rc_interval = 400,
- .rc_key_map = vp7045_rc_keys,
- .rc_key_map_size = ARRAY_SIZE(vp7045_rc_keys),
+ .rc_key_map = ir_codes_vp7045_table,
+ .rc_key_map_size = ARRAY_SIZE(ir_codes_vp7045_table),
.rc_query = vp7045_rc_query,
.num_device_descs = 2,
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 1b31bebc27d6..28294af752db 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1096,7 +1096,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[15] = msg[1]; /* Program number */
c->operand[16] = msg[2];
- c->operand[17] = 0x01; /* Version number=0 + current/next=1 */
+ c->operand[17] = msg[3]; /* Version number and current/next */
c->operand[18] = 0x00; /* Section number=0 */
c->operand[19] = 0x00; /* Last section number=0 */
c->operand[20] = 0x1f; /* PCR_PID=1FFF */
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index 853e04b7cb36..d3c2cf60de76 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -175,8 +175,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
return err;
}
-static int fdtv_ca_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg)
+static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct dvb_device *dvbdev = file->private_data;
struct firedtv *fdtv = dvbdev->priv;
@@ -217,7 +216,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
static const struct file_operations fdtv_ca_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_generic_open,
.release = dvb_generic_release,
.poll = fdtv_ca_io_poll,
diff --git a/drivers/media/dvb/frontends/atbm8830_priv.h b/drivers/media/dvb/frontends/atbm8830_priv.h
index ce960f76092a..d460058d497e 100644
--- a/drivers/media/dvb/frontends/atbm8830_priv.h
+++ b/drivers/media/dvb/frontends/atbm8830_priv.h
@@ -18,7 +18,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
+
#ifndef __ATBM8830_PRIV_H
#define __ATBM8830_PRIV_H
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 24268ef2753d..68dba3a4b4da 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -664,6 +664,13 @@ static int au8522_reset(struct v4l2_subdev *sd, u32 val)
{
struct au8522_state *state = to_state(sd);
+ state->operational_mode = AU8522_ANALOG_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
au8522_writereg(state, 0xa4, 1 << 5);
return 0;
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index a1fed0fa8ed4..65f6a36dfb21 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -84,6 +84,14 @@ static int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
dprintk("%s(%d)\n", __func__, enable);
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're being asked to manage the gate even though we're
+ not in digital mode. This can occur if we get switched
+ over to analog mode before the dvb_frontend kernel thread
+ has completely shutdown */
+ return 0;
+ }
+
if (enable)
return au8522_writereg(state, 0x106, 1);
else
@@ -608,6 +616,13 @@ int au8522_init(struct dvb_frontend *fe)
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
au8522_writereg(state, 0xa4, 1 << 5);
au8522_i2c_gate_ctrl(fe, 1);
@@ -704,6 +719,15 @@ int au8522_sleep(struct dvb_frontend *fe)
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
+ /* Only power down if the digital side is currently using the chip */
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're not in one of the expected power modes, which means
+ that the DVB thread is probably telling us to go to sleep
+ even though the analog frontend has already started using
+ the chip. So ignore the request */
+ return 0;
+ }
+
/* turn off led */
au8522_led_ctrl(state, 0);
@@ -932,6 +956,8 @@ struct dvb_frontend *au8522_attach(const struct au8522_config *config,
/* setup the state */
state->config = config;
state->i2c = i2c;
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
/* create dvb_frontend */
memcpy(&state->frontend.ops, &au8522_ops,
sizeof(struct dvb_frontend_ops));
diff --git a/drivers/media/dvb/frontends/au8522_priv.h b/drivers/media/dvb/frontends/au8522_priv.h
index c74c4e72fe91..609cf04bc312 100644
--- a/drivers/media/dvb/frontends/au8522_priv.h
+++ b/drivers/media/dvb/frontends/au8522_priv.h
@@ -34,10 +34,15 @@
#include "au8522.h"
#include "tuner-i2c.h"
+#define AU8522_ANALOG_MODE 0
+#define AU8522_DIGITAL_MODE 1
+
struct au8522_state {
struct i2c_client *c;
struct i2c_adapter *i2c;
+ u8 operational_mode;
+
/* Used for sharing of the state between analog and digital mode */
struct tuner_i2c_props i2c_props;
struct list_head hybrid_tuner_instance_list;
diff --git a/drivers/media/dvb/frontends/dib3000mc.c b/drivers/media/dvb/frontends/dib3000mc.c
index 40a099810279..afad252abf41 100644
--- a/drivers/media/dvb/frontends/dib3000mc.c
+++ b/drivers/media/dvb/frontends/dib3000mc.c
@@ -814,42 +814,51 @@ EXPORT_SYMBOL(dib3000mc_set_config);
int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib3000mc_config cfg[])
{
- struct dib3000mc_state st = { .i2c_adap = i2c };
+ struct dib3000mc_state *dmcst;
int k;
u8 new_addr;
static u8 DIB3000MC_I2C_ADDRESS[] = {20,22,24,26};
+ dmcst = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL);
+ if (dmcst == NULL)
+ return -ENODEV;
+
+ dmcst->i2c_adap = i2c;
+
for (k = no_of_demods-1; k >= 0; k--) {
- st.cfg = &cfg[k];
+ dmcst->cfg = &cfg[k];
/* designated i2c address */
new_addr = DIB3000MC_I2C_ADDRESS[k];
- st.i2c_addr = new_addr;
- if (dib3000mc_identify(&st) != 0) {
- st.i2c_addr = default_addr;
- if (dib3000mc_identify(&st) != 0) {
+ dmcst->i2c_addr = new_addr;
+ if (dib3000mc_identify(dmcst) != 0) {
+ dmcst->i2c_addr = default_addr;
+ if (dib3000mc_identify(dmcst) != 0) {
dprintk("-E- DiB3000P/MC #%d: not identified\n", k);
+ kfree(dmcst);
return -ENODEV;
}
}
- dib3000mc_set_output_mode(&st, OUTMODE_MPEG2_PAR_CONT_CLK);
+ dib3000mc_set_output_mode(dmcst, OUTMODE_MPEG2_PAR_CONT_CLK);
// set new i2c address and force divstr (Bit 1) to value 0 (Bit 0)
- dib3000mc_write_word(&st, 1024, (new_addr << 3) | 0x1);
- st.i2c_addr = new_addr;
+ dib3000mc_write_word(dmcst, 1024, (new_addr << 3) | 0x1);
+ dmcst->i2c_addr = new_addr;
}
for (k = 0; k < no_of_demods; k++) {
- st.cfg = &cfg[k];
- st.i2c_addr = DIB3000MC_I2C_ADDRESS[k];
+ dmcst->cfg = &cfg[k];
+ dmcst->i2c_addr = DIB3000MC_I2C_ADDRESS[k];
- dib3000mc_write_word(&st, 1024, st.i2c_addr << 3);
+ dib3000mc_write_word(dmcst, 1024, dmcst->i2c_addr << 3);
/* turn off data output */
- dib3000mc_set_output_mode(&st, OUTMODE_HIGH_Z);
+ dib3000mc_set_output_mode(dmcst, OUTMODE_HIGH_Z);
}
+
+ kfree(dmcst);
return 0;
}
EXPORT_SYMBOL(dib3000mc_i2c_enumeration);
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 85468a45c344..2e28b973dfd3 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -1324,46 +1324,54 @@ EXPORT_SYMBOL(dib7000p_pid_filter);
int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[])
{
- struct dib7000p_state st = { .i2c_adap = i2c };
+ struct dib7000p_state *dpst;
int k = 0;
u8 new_addr = 0;
+ dpst = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL);
+ if (!dpst)
+ return -ENOMEM;
+
+ dpst->i2c_adap = i2c;
+
for (k = no_of_demods-1; k >= 0; k--) {
- st.cfg = cfg[k];
+ dpst->cfg = cfg[k];
/* designated i2c address */
new_addr = (0x40 + k) << 1;
- st.i2c_addr = new_addr;
- dib7000p_write_word(&st, 1287, 0x0003); /* sram lead in, rdy */
- if (dib7000p_identify(&st) != 0) {
- st.i2c_addr = default_addr;
- dib7000p_write_word(&st, 1287, 0x0003); /* sram lead in, rdy */
- if (dib7000p_identify(&st) != 0) {
+ dpst->i2c_addr = new_addr;
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ if (dib7000p_identify(dpst) != 0) {
+ dpst->i2c_addr = default_addr;
+ dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */
+ if (dib7000p_identify(dpst) != 0) {
dprintk("DiB7000P #%d: not identified\n", k);
+ kfree(dpst);
return -EIO;
}
}
/* start diversity to pull_down div_str - just for i2c-enumeration */
- dib7000p_set_output_mode(&st, OUTMODE_DIVERSITY);
+ dib7000p_set_output_mode(dpst, OUTMODE_DIVERSITY);
/* set new i2c address and force divstart */
- dib7000p_write_word(&st, 1285, (new_addr << 2) | 0x2);
+ dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2);
dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
- st.cfg = cfg[k];
- st.i2c_addr = (0x40 + k) << 1;
+ dpst->cfg = cfg[k];
+ dpst->i2c_addr = (0x40 + k) << 1;
// unforce divstr
- dib7000p_write_word(&st, 1285, st.i2c_addr << 2);
+ dib7000p_write_word(dpst, 1285, dpst->i2c_addr << 2);
/* deactivate div - it was just for i2c-enumeration */
- dib7000p_set_output_mode(&st, OUTMODE_HIGH_Z);
+ dib7000p_set_output_mode(dpst, OUTMODE_HIGH_Z);
}
+ kfree(dpst);
return 0;
}
EXPORT_SYMBOL(dib7000p_i2c_enumeration);
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index b1ee20799639..e0a9ded11df4 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -109,6 +109,7 @@ static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
static inline s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
}
#endif
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index cff3535566fe..78001e8bcdb7 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -719,7 +719,7 @@ static int ds3000_read_snr(struct dvb_frontend *fe, u16 *snr)
(ds3000_readreg(state, 0x8d) << 4);
dvbs2_signal_reading = ds3000_readreg(state, 0x8e);
tmp = dvbs2_signal_reading * dvbs2_signal_reading >> 1;
- if (dvbs2_signal_reading == 0) {
+ if (tmp == 0) {
*snr = 0x0000;
return 0;
}
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 01f8f1f802fd..4f5e7d3a0e61 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -1583,7 +1583,7 @@ static enum dvbfe_search stv0900_search(struct dvb_frontend *fe,
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct stv0900_search_params p_search;
- struct stv0900_signal_info p_result;
+ struct stv0900_signal_info p_result = intp->result[demod];
enum fe_stv0900_error error = STV0900_NO_ERROR;
@@ -1842,6 +1842,19 @@ static void stv0900_release(struct dvb_frontend *fe)
kfree(state);
}
+static int stv0900_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct stv0900_state *state = fe->demodulator_priv;
+ struct stv0900_internal *intp = state->internal;
+ enum fe_stv0900_demod_num demod = state->demod;
+ struct stv0900_signal_info p_result = intp->result[demod];
+
+ p->frequency = p_result.locked ? p_result.frequency : 0;
+ p->u.qpsk.symbol_rate = p_result.locked ? p_result.symbol_rate : 0;
+ return 0;
+}
+
static struct dvb_frontend_ops stv0900_ops = {
.info = {
@@ -1862,6 +1875,7 @@ static struct dvb_frontend_ops stv0900_ops = {
},
.release = stv0900_release,
.init = stv0900_init,
+ .get_frontend = stv0900_get_frontend,
.get_frontend_algo = stv0900_frontend_algo,
.i2c_gate_ctrl = stv0900_i2c_gate_ctrl,
.diseqc_send_master_cmd = stv0900_send_master_cmd,
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 96972804f4ad..425e7a43ae19 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -754,11 +754,19 @@ static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 d
return stv090x_write_regs(state, reg, &data, 1);
}
-static int stv090x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
{
- struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
+ /*
+ * NOTE! A lock is used as a FSM to control the state in which
+ * access is serialized between two tuners on the same demod.
+ * This has nothing to do with a lock to protect a critical section
+ * which may in some other cases be confused with protecting I/O
+ * access to the demodulator gate.
+ * In case of any error, the lock is unlocked and exit within the
+ * relevant operations themselves.
+ */
if (enable)
mutex_lock(&state->internal->tuner_lock);
@@ -1778,7 +1786,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
freq -= cur_step * car_step;
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
@@ -1791,12 +1799,12 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
@@ -1809,7 +1817,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
@@ -1822,7 +1830,7 @@ static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
return srate_coarse;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -2167,7 +2175,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
freq -= cur_step * car_step;
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_frequency) {
@@ -2180,12 +2188,12 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status) {
@@ -2198,7 +2206,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
else
dprintk(FE_DEBUG, 1, "Tuner unlocked");
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
@@ -2222,7 +2230,7 @@ static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
return lock;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -2591,7 +2599,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
}
state->delsys = stv090x_get_std(state);
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
@@ -2599,7 +2607,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
offst_freq = stv090x_get_car_freq(state, state->internal->mclk) / 1000;
@@ -2619,7 +2627,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000)) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_frequency) {
@@ -2627,7 +2635,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
@@ -2646,7 +2654,7 @@ static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *st
return STV090x_OUTOFRANGE;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3000,7 +3008,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
if (state->algo != STV090x_WARM_SEARCH) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bandwidth) {
@@ -3008,7 +3016,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
}
@@ -3059,7 +3067,7 @@ static int stv090x_optimize_track(struct stv090x_state *state)
return 0;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3235,7 +3243,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
}
/* Setup tuner */
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_set_bbgain) {
@@ -3256,17 +3264,17 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
msleep(50);
if (state->config->tuner_get_status) {
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (state->config->tuner_get_status(fe, &reg) < 0)
goto err_gateoff;
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (reg)
@@ -3400,7 +3408,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
return signal_state;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -3839,6 +3847,17 @@ static int stv090x_sleep(struct dvb_frontend *fe)
struct stv090x_state *state = fe->demodulator_priv;
u32 reg;
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
+
+ if (state->config->tuner_sleep) {
+ if (state->config->tuner_sleep(fe) < 0)
+ goto err_gateoff;
+ }
+
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
+
dprintk(FE_DEBUG, 1, "Set %s to sleep",
state->device == STV0900 ? "STV0900" : "STV0903");
@@ -3853,6 +3872,9 @@ static int stv090x_sleep(struct dvb_frontend *fe)
goto err;
return 0;
+
+err_gateoff:
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -4311,6 +4333,20 @@ static int stv090x_init(struct dvb_frontend *fe)
u32 reg;
if (state->internal->mclk == 0) {
+ /* call tuner init to configure the tuner's clock output
+ divider directly before setting up the master clock of
+ the stv090x. */
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
+ goto err;
+
+ if (config->tuner_init) {
+ if (config->tuner_init(fe) < 0)
+ goto err_gateoff;
+ }
+
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
+ goto err;
+
stv090x_set_mclk(state, 135000000, config->xtal); /* 135 Mhz */
msleep(5);
if (stv090x_write_reg(state, STV090x_SYNTCTRL,
@@ -4336,7 +4372,7 @@ static int stv090x_init(struct dvb_frontend *fe)
if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
goto err;
- if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 1) < 0)
goto err;
if (config->tuner_set_mode) {
@@ -4349,7 +4385,7 @@ static int stv090x_init(struct dvb_frontend *fe)
goto err_gateoff;
}
- if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
+ if (stv090x_i2c_gate_ctrl(state, 0) < 0)
goto err;
if (stv090x_set_tspath(state) < 0)
@@ -4358,7 +4394,7 @@ static int stv090x_init(struct dvb_frontend *fe)
return 0;
err_gateoff:
- stv090x_i2c_gate_ctrl(fe, 0);
+ stv090x_i2c_gate_ctrl(state, 0);
err:
dprintk(FE_ERROR, 1, "I/O error");
return -1;
@@ -4503,8 +4539,6 @@ static struct dvb_frontend_ops stv090x_ops = {
.sleep = stv090x_sleep,
.get_frontend_algo = stv090x_frontend_algo,
- .i2c_gate_ctrl = stv090x_i2c_gate_ctrl,
-
.diseqc_send_master_cmd = stv090x_send_diseqc_msg,
.diseqc_send_burst = stv090x_send_diseqc_burst,
.diseqc_recv_slave_reply = stv090x_recv_slave_reply,
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
index 30f01a6902ac..dd1b93ae4e9d 100644
--- a/drivers/media/dvb/frontends/stv090x.h
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -87,6 +87,7 @@ struct stv090x_config {
bool diseqc_envelope_mode;
int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_sleep) (struct dvb_frontend *fe);
int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
diff --git a/drivers/media/dvb/frontends/stv6110x.c b/drivers/media/dvb/frontends/stv6110x.c
index dea4245f077c..42591ce1aaad 100644
--- a/drivers/media/dvb/frontends/stv6110x.c
+++ b/drivers/media/dvb/frontends/stv6110x.c
@@ -338,14 +338,12 @@ static struct dvb_tuner_ops stv6110x_ops = {
.frequency_max = 2150000,
.frequency_step = 0,
},
-
- .init = stv6110x_init,
- .sleep = stv6110x_sleep,
.release = stv6110x_release
};
static struct stv6110x_devctl stv6110x_ctl = {
.tuner_init = stv6110x_init,
+ .tuner_sleep = stv6110x_sleep,
.tuner_set_mode = stv6110x_set_mode,
.tuner_set_frequency = stv6110x_set_frequency,
.tuner_get_frequency = stv6110x_get_frequency,
@@ -363,11 +361,10 @@ struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
{
struct stv6110x_state *stv6110x;
u8 default_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
- int ret;
stv6110x = kzalloc(sizeof (struct stv6110x_state), GFP_KERNEL);
- if (stv6110x == NULL)
- goto error;
+ if (!stv6110x)
+ return NULL;
stv6110x->i2c = i2c;
stv6110x->config = config;
@@ -392,34 +389,11 @@ struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
break;
}
- if (fe->ops.i2c_gate_ctrl) {
- ret = fe->ops.i2c_gate_ctrl(fe, 1);
- if (ret < 0)
- goto error;
- }
-
- ret = stv6110x_write_regs(stv6110x, 0, stv6110x->regs,
- ARRAY_SIZE(stv6110x->regs));
- if (ret < 0) {
- dprintk(FE_ERROR, 1, "Initialization failed");
- goto error;
- }
-
- if (fe->ops.i2c_gate_ctrl) {
- ret = fe->ops.i2c_gate_ctrl(fe, 0);
- if (ret < 0)
- goto error;
- }
-
fe->tuner_priv = stv6110x;
fe->ops.tuner_ops = stv6110x_ops;
- printk("%s: Attaching STV6110x \n", __func__);
+ printk(KERN_INFO "%s: Attaching STV6110x\n", __func__);
return stv6110x->devctl;
-
-error:
- kfree(stv6110x);
- return NULL;
}
EXPORT_SYMBOL(stv6110x_attach);
diff --git a/drivers/media/dvb/frontends/stv6110x.h b/drivers/media/dvb/frontends/stv6110x.h
index 2429ae6d7847..47516753929a 100644
--- a/drivers/media/dvb/frontends/stv6110x.h
+++ b/drivers/media/dvb/frontends/stv6110x.h
@@ -40,6 +40,7 @@ enum tuner_status {
struct stv6110x_devctl {
int (*tuner_init) (struct dvb_frontend *fe);
+ int (*tuner_sleep) (struct dvb_frontend *fe);
int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
diff --git a/drivers/media/dvb/mantis/mantis_input.c b/drivers/media/dvb/mantis/mantis_input.c
index 4675a3b53c7d..3d4e4663220c 100644
--- a/drivers/media/dvb/mantis/mantis_input.c
+++ b/drivers/media/dvb/mantis/mantis_input.c
@@ -32,6 +32,8 @@
#include "mantis_reg.h"
#include "mantis_uart.h"
+#define MODULE_NAME "mantis_core"
+
static struct ir_scancode mantis_ir_table[] = {
{ 0x29, KEY_POWER },
{ 0x28, KEY_FAVORITES },
@@ -126,7 +128,7 @@ int mantis_input_init(struct mantis_pci *mantis)
rc->id.version = 1;
rc->dev = mantis->pdev->dev;
- err = ir_input_register(rc, &ir_mantis, NULL);
+ err = __ir_input_register(rc, &ir_mantis, NULL, MODULE_NAME);
if (err) {
dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err);
input_free_device(rc);
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.c b/drivers/media/dvb/mantis/mantis_vp1041.c
index 515346dd31d0..d1aa2bc0c155 100644
--- a/drivers/media/dvb/mantis/mantis_vp1041.c
+++ b/drivers/media/dvb/mantis/mantis_vp1041.c
@@ -136,12 +136,12 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
- { STB0899_RTC , 0x23 },
+ { STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
- { STB0899_ACLC , 0x87 },
+ { STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
@@ -194,10 +194,10 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
- { STB0899_FECM , 0x01 },
+ { STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
- { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
@@ -206,7 +206,7 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
- { STB0899_TSLPL , 0x12 },
+ { STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
diff --git a/drivers/media/dvb/ngene/Kconfig b/drivers/media/dvb/ngene/Kconfig
index 3ec8e6fcbb1d..cec242b7c00d 100644
--- a/drivers/media/dvb/ngene/Kconfig
+++ b/drivers/media/dvb/ngene/Kconfig
@@ -4,6 +4,8 @@ config DVB_NGENE
select DVB_LNBP21 if !DVB_FE_CUSTOMISE
select DVB_STV6110x if !DVB_FE_CUSTOMISE
select DVB_STV090x if !DVB_FE_CUSTOMISE
+ select DVB_LGDT330X if !DVB_FE_CUSTOMISE
+ select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE
---help---
Support for Micronas PCI express cards with nGene bridge.
diff --git a/drivers/media/dvb/ngene/Makefile b/drivers/media/dvb/ngene/Makefile
index 40435cad4819..0608aabb14ee 100644
--- a/drivers/media/dvb/ngene/Makefile
+++ b/drivers/media/dvb/ngene/Makefile
@@ -2,10 +2,10 @@
# Makefile for the nGene device driver
#
-ngene-objs := ngene-core.o
+ngene-objs := ngene-core.o ngene-i2c.o ngene-cards.o ngene-dvb.o
obj-$(CONFIG_DVB_NGENE) += ngene.o
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
-
+EXTRA_CFLAGS += -Idrivers/media/common/tuners/
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
new file mode 100644
index 000000000000..692c3e226e83
--- /dev/null
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -0,0 +1,328 @@
+/*
+ * ngene-cards.c: nGene PCIe bridge driver - card specific info
+ *
+ * Copyright (C) 2005-2007 Micronas
+ *
+ * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de>
+ * Modifications for new nGene firmware,
+ * support for EEPROM-copying,
+ * support for new dual DVB-S2 card prototype
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include "ngene.h"
+
+/* demods/tuners */
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "lnbh24.h"
+#include "lgdt330x.h"
+#include "mt2131.h"
+
+
+/****************************************************************************/
+/* Demod/tuner attachment ***************************************************/
+/****************************************************************************/
+
+static int tuner_attach_stv6110(struct ngene_channel *chan)
+{
+ struct stv090x_config *feconf = (struct stv090x_config *)
+ chan->dev->card_info->fe_config[chan->number];
+ struct stv6110x_config *tunerconf = (struct stv6110x_config *)
+ chan->dev->card_info->tuner_config[chan->number];
+ struct stv6110x_devctl *ctl;
+
+ ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf,
+ &chan->i2c_adapter);
+ if (ctl == NULL) {
+ printk(KERN_ERR DEVICE_NAME ": No STV6110X found!\n");
+ return -ENODEV;
+ }
+
+ feconf->tuner_init = ctl->tuner_init;
+ feconf->tuner_set_mode = ctl->tuner_set_mode;
+ feconf->tuner_set_frequency = ctl->tuner_set_frequency;
+ feconf->tuner_get_frequency = ctl->tuner_get_frequency;
+ feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
+ feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
+ feconf->tuner_set_refclk = ctl->tuner_set_refclk;
+ feconf->tuner_get_status = ctl->tuner_get_status;
+
+ return 0;
+}
+
+
+static int demod_attach_stv0900(struct ngene_channel *chan)
+{
+ struct stv090x_config *feconf = (struct stv090x_config *)
+ chan->dev->card_info->fe_config[chan->number];
+
+ chan->fe = dvb_attach(stv090x_attach,
+ feconf,
+ &chan->i2c_adapter,
+ chan->number == 0 ? STV090x_DEMODULATOR_0 :
+ STV090x_DEMODULATOR_1);
+ if (chan->fe == NULL) {
+ printk(KERN_ERR DEVICE_NAME ": No STV0900 found!\n");
+ return -ENODEV;
+ }
+
+ if (!dvb_attach(lnbh24_attach, chan->fe, &chan->i2c_adapter, 0,
+ 0, chan->dev->card_info->lnb[chan->number])) {
+ printk(KERN_ERR DEVICE_NAME ": No LNBH24 found!\n");
+ dvb_frontend_detach(chan->fe);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct lgdt330x_config aver_m780 = {
+ .demod_address = 0xb2 >> 1,
+ .demod_chip = LGDT3303,
+ .serial_mpeg = 0x00, /* PARALLEL */
+ .clock_polarity_flip = 1,
+};
+
+static struct mt2131_config m780_tunerconfig = {
+ 0xc0 >> 1
+};
+
+/* A single func to attach the demo and tuner, rather than
+ * use two sep funcs like the current design mandates.
+ */
+static int demod_attach_lg330x(struct ngene_channel *chan)
+{
+ chan->fe = dvb_attach(lgdt330x_attach, &aver_m780, &chan->i2c_adapter);
+ if (chan->fe == NULL) {
+ printk(KERN_ERR DEVICE_NAME ": No LGDT330x found!\n");
+ return -ENODEV;
+ }
+
+ dvb_attach(mt2131_attach, chan->fe, &chan->i2c_adapter,
+ &m780_tunerconfig, 0);
+
+ return (chan->fe) ? 0 : -ENODEV;
+}
+
+/****************************************************************************/
+/* Switch control (I2C gates, etc.) *****************************************/
+/****************************************************************************/
+
+
+static struct stv090x_config fe_cineS2 = {
+ .device = STV0900,
+ .demod_mode = STV090x_DUAL,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x68,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .adc1_range = STV090x_ADC_1Vpp,
+ .adc2_range = STV090x_ADC_1Vpp,
+
+ .diseqc_envelope_mode = true,
+};
+
+static struct stv6110x_config tuner_cineS2_0 = {
+ .addr = 0x60,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static struct stv6110x_config tuner_cineS2_1 = {
+ .addr = 0x63,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static struct ngene_info ngene_info_cineS2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Linux4Media cineS2 DVB-S2 Twin Tuner",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0b, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_satixS2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Mystique SaTiX-S2 Dual",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0b, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_satixS2v2 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Mystique SaTiX-S2 Dual (v2)",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_cineS2v5 = {
+ .type = NGENE_SIDEWINDER,
+ .name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
+ .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
+ .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
+ .fe_config = {&fe_cineS2, &fe_cineS2},
+ .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
+ .lnb = {0x0a, 0x08},
+ .tsf = {3, 3},
+ .fw_version = 15,
+};
+
+static struct ngene_info ngene_info_m780 = {
+ .type = NGENE_APP,
+ .name = "Aver M780 ATSC/QAM-B",
+
+ /* Channel 0 is analog, which is currently unsupported */
+ .io_type = { NGENE_IO_NONE, NGENE_IO_TSIN },
+ .demod_attach = { NULL, demod_attach_lg330x },
+
+ /* Ensure these are NULL else the frame will call them (as funcs) */
+ .tuner_attach = { 0, 0, 0, 0 },
+ .fe_config = { NULL, &aver_m780 },
+ .avf = { 0 },
+
+ /* A custom electrical interface config for the demod to bridge */
+ .tsf = { 4, 4 },
+ .fw_version = 15,
+};
+
+/****************************************************************************/
+
+
+
+/****************************************************************************/
+/* PCI Subsystem ID *********************************************************/
+/****************************************************************************/
+
+#define NGENE_ID(_subvend, _subdev, _driverdata) { \
+ .vendor = NGENE_VID, .device = NGENE_PID, \
+ .subvendor = _subvend, .subdevice = _subdev, \
+ .driver_data = (unsigned long) &_driverdata }
+
+/****************************************************************************/
+
+static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
+ NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
+ NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
+ NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
+ NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
+ NGENE_ID(0x1461, 0x062e, ngene_info_m780),
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, ngene_id_tbl);
+
+/****************************************************************************/
+/* Init/Exit ****************************************************************/
+/****************************************************************************/
+
+static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
+ enum pci_channel_state state)
+{
+ printk(KERN_ERR DEVICE_NAME ": PCI error\n");
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+ if (state == pci_channel_io_frozen)
+ return PCI_ERS_RESULT_NEED_RESET;
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t ngene_link_reset(struct pci_dev *dev)
+{
+ printk(KERN_INFO DEVICE_NAME ": link reset\n");
+ return 0;
+}
+
+static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
+{
+ printk(KERN_INFO DEVICE_NAME ": slot reset\n");
+ return 0;
+}
+
+static void ngene_resume(struct pci_dev *dev)
+{
+ printk(KERN_INFO DEVICE_NAME ": resume\n");
+}
+
+static struct pci_error_handlers ngene_errors = {
+ .error_detected = ngene_error_detected,
+ .link_reset = ngene_link_reset,
+ .slot_reset = ngene_slot_reset,
+ .resume = ngene_resume,
+};
+
+static struct pci_driver ngene_pci_driver = {
+ .name = "ngene",
+ .id_table = ngene_id_tbl,
+ .probe = ngene_probe,
+ .remove = __devexit_p(ngene_remove),
+ .err_handler = &ngene_errors,
+};
+
+static __init int module_init_ngene(void)
+{
+ printk(KERN_INFO
+ "nGene PCIE bridge driver, Copyright (C) 2005-2007 Micronas\n");
+ return pci_register_driver(&ngene_pci_driver);
+}
+
+static __exit void module_exit_ngene(void)
+{
+ pci_unregister_driver(&ngene_pci_driver);
+}
+
+module_init(module_init_ngene);
+module_exit(module_exit_ngene);
+
+MODULE_DESCRIPTION("nGene");
+MODULE_AUTHOR("Micronas, Ralph Metzler, Manfred Voelkel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index 645e8b8a7137..c8b4dfa0ab5f 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -34,20 +34,14 @@
#include <linux/io.h>
#include <asm/div64.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/smp_lock.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/byteorder/generic.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include "ngene.h"
-#include "stv6110x.h"
-#include "stv090x.h"
-#include "lnbh24.h"
-
static int one_adapter = 1;
module_param(one_adapter, int, 0444);
MODULE_PARM_DESC(one_adapter, "Use only one adapter.");
@@ -63,8 +57,6 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define dprintk if (debug) printk
-#define DEVICE_NAME "ngene"
-
#define ngwriteb(dat, adr) writeb((dat), (char *)(dev->iomem + (adr)))
#define ngwritel(dat, adr) writel((dat), (char *)(dev->iomem + (adr)))
#define ngwriteb(dat, adr) writeb((dat), (char *)(dev->iomem + (adr)))
@@ -352,7 +344,7 @@ static int ngene_command_mutex(struct ngene *dev, struct ngene_command *com)
return 0;
}
-static int ngene_command(struct ngene *dev, struct ngene_command *com)
+int ngene_command(struct ngene *dev, struct ngene_command *com)
{
int result;
@@ -363,55 +355,6 @@ static int ngene_command(struct ngene *dev, struct ngene_command *com)
}
-static int ngene_command_i2c_read(struct ngene *dev, u8 adr,
- u8 *out, u8 outlen, u8 *in, u8 inlen, int flag)
-{
- struct ngene_command com;
-
- com.cmd.hdr.Opcode = CMD_I2C_READ;
- com.cmd.hdr.Length = outlen + 3;
- com.cmd.I2CRead.Device = adr << 1;
- memcpy(com.cmd.I2CRead.Data, out, outlen);
- com.cmd.I2CRead.Data[outlen] = inlen;
- com.cmd.I2CRead.Data[outlen + 1] = 0;
- com.in_len = outlen + 3;
- com.out_len = inlen + 1;
-
- if (ngene_command(dev, &com) < 0)
- return -EIO;
-
- if ((com.cmd.raw8[0] >> 1) != adr)
- return -EIO;
-
- if (flag)
- memcpy(in, com.cmd.raw8, inlen + 1);
- else
- memcpy(in, com.cmd.raw8 + 1, inlen);
- return 0;
-}
-
-static int ngene_command_i2c_write(struct ngene *dev, u8 adr,
- u8 *out, u8 outlen)
-{
- struct ngene_command com;
-
-
- com.cmd.hdr.Opcode = CMD_I2C_WRITE;
- com.cmd.hdr.Length = outlen + 1;
- com.cmd.I2CRead.Device = adr << 1;
- memcpy(com.cmd.I2CRead.Data, out, outlen);
- com.in_len = outlen + 1;
- com.out_len = 1;
-
- if (ngene_command(dev, &com) < 0)
- return -EIO;
-
- if (com.cmd.raw8[0] == 1)
- return -EIO;
-
- return 0;
-}
-
static int ngene_command_load_firmware(struct ngene *dev,
u8 *ngene_fw, u32 size)
{
@@ -477,7 +420,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
return 0;
}
-static int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level)
+int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level)
{
struct ngene_command com;
@@ -514,11 +457,12 @@ static int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level)
/****************************************************************************/
-static u8 TSFeatureDecoderSetup[8 * 4] = {
+static u8 TSFeatureDecoderSetup[8 * 5] = {
0x42, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00,
0x40, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXH */
0x71, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXHser */
0x72, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
+ 0x40, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* LGDT3303 */
};
/* Set NGENE I2S Config to 16 bit packed */
@@ -559,7 +503,7 @@ static u8 ITUFeatureDecoderSetup[8] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x04, 0x00
};
-static void FillTSBuffer(void *Buffer, int Length, u32 Flags)
+void FillTSBuffer(void *Buffer, int Length, u32 Flags)
{
u32 *ptr = Buffer;
@@ -772,144 +716,7 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
return 0;
}
-
-/****************************************************************************/
-/* I2C **********************************************************************/
-/****************************************************************************/
-
-static void ngene_i2c_set_bus(struct ngene *dev, int bus)
-{
- if (!(dev->card_info->i2c_access & 2))
- return;
- if (dev->i2c_current_bus == bus)
- return;
-
- switch (bus) {
- case 0:
- ngene_command_gpio_set(dev, 3, 0);
- ngene_command_gpio_set(dev, 2, 1);
- break;
-
- case 1:
- ngene_command_gpio_set(dev, 2, 0);
- ngene_command_gpio_set(dev, 3, 1);
- break;
- }
- dev->i2c_current_bus = bus;
-}
-
-static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
- struct i2c_msg msg[], int num)
-{
- struct ngene_channel *chan =
- (struct ngene_channel *)i2c_get_adapdata(adapter);
- struct ngene *dev = chan->dev;
-
- down(&dev->i2c_switch_mutex);
- ngene_i2c_set_bus(dev, chan->number);
-
- if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
- if (!ngene_command_i2c_read(dev, msg[0].addr,
- msg[0].buf, msg[0].len,
- msg[1].buf, msg[1].len, 0))
- goto done;
-
- if (num == 1 && !(msg[0].flags & I2C_M_RD))
- if (!ngene_command_i2c_write(dev, msg[0].addr,
- msg[0].buf, msg[0].len))
- goto done;
- if (num == 1 && (msg[0].flags & I2C_M_RD))
- if (!ngene_command_i2c_read(dev, msg[0].addr, 0, 0,
- msg[0].buf, msg[0].len, 0))
- goto done;
-
- up(&dev->i2c_switch_mutex);
- return -EIO;
-
-done:
- up(&dev->i2c_switch_mutex);
- return num;
-}
-
-
-static u32 ngene_i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_SMBUS_EMUL;
-}
-
-static struct i2c_algorithm ngene_i2c_algo = {
- .master_xfer = ngene_i2c_master_xfer,
- .functionality = ngene_i2c_functionality,
-};
-
-static int ngene_i2c_init(struct ngene *dev, int dev_nr)
-{
- struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter);
-
- i2c_set_adapdata(adap, &(dev->channel[dev_nr]));
- adap->class = I2C_CLASS_TV_DIGITAL | I2C_CLASS_TV_ANALOG;
-
- strcpy(adap->name, "nGene");
-
- adap->algo = &ngene_i2c_algo;
- adap->algo_data = (void *)&(dev->channel[dev_nr]);
- adap->dev.parent = &dev->pci_dev->dev;
-
- return i2c_add_adapter(adap);
-}
-
-
-/****************************************************************************/
-/* DVB functions and API interface ******************************************/
-/****************************************************************************/
-
-static void swap_buffer(u32 *p, u32 len)
-{
- while (len) {
- *p = swab32(*p);
- p++;
- len -= 4;
- }
-}
-
-
-static void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
-{
- struct ngene_channel *chan = priv;
-
-
-#ifdef COMMAND_TIMEOUT_WORKAROUND
- if (chan->users > 0)
-#endif
- dvb_dmx_swfilter(&chan->demux, buf, len);
- return 0;
-}
-
-u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
-
-static void *tsout_exchange(void *priv, void *buf, u32 len,
- u32 clock, u32 flags)
-{
- struct ngene_channel *chan = priv;
- struct ngene *dev = chan->dev;
- u32 alen;
-
- alen = dvb_ringbuffer_avail(&dev->tsout_rbuf);
- alen -= alen % 188;
-
- if (alen < len)
- FillTSBuffer(buf + alen, len - alen, flags);
- else
- alen = len;
- dvb_ringbuffer_read(&dev->tsout_rbuf, buf, alen);
- if (flags & DF_SWAP32)
- swap_buffer((u32 *)buf, alen);
- wake_up_interruptible(&dev->tsout_rbuf.queue);
- return buf;
-}
-
-
-static void set_transfer(struct ngene_channel *chan, int state)
+void set_transfer(struct ngene_channel *chan, int state)
{
u8 control = 0, mode = 0, flags = 0;
struct ngene *dev = chan->dev;
@@ -970,85 +777,12 @@ static void set_transfer(struct ngene_channel *chan, int state)
state);
if (!state) {
spin_lock_irq(&chan->state_lock);
- chan->pBufferExchange = 0;
+ chan->pBufferExchange = NULL;
dvb_ringbuffer_flush(&dev->tsout_rbuf);
spin_unlock_irq(&chan->state_lock);
}
}
-static int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed)
-{
- struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
- struct ngene_channel *chan = dvbdmx->priv;
-
- if (chan->users == 0) {
-#ifdef COMMAND_TIMEOUT_WORKAROUND
- if (!chan->running)
-#endif
- set_transfer(chan, 1);
- /* msleep(10); */
- }
-
- return ++chan->users;
-}
-
-static int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
-{
- struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
- struct ngene_channel *chan = dvbdmx->priv;
-
- if (--chan->users)
- return chan->users;
-
-#ifndef COMMAND_TIMEOUT_WORKAROUND
- set_transfer(chan, 0);
-#endif
-
- return 0;
-}
-
-
-
-static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
- int (*start_feed)(struct dvb_demux_feed *),
- int (*stop_feed)(struct dvb_demux_feed *),
- void *priv)
-{
- dvbdemux->priv = priv;
-
- dvbdemux->filternum = 256;
- dvbdemux->feednum = 256;
- dvbdemux->start_feed = start_feed;
- dvbdemux->stop_feed = stop_feed;
- dvbdemux->write_to_decoder = 0;
- dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
- DMX_SECTION_FILTERING |
- DMX_MEMORY_BASED_FILTERING);
- return dvb_dmx_init(dvbdemux);
-}
-
-static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
- struct dvb_demux *dvbdemux,
- struct dmx_frontend *hw_frontend,
- struct dmx_frontend *mem_frontend,
- struct dvb_adapter *dvb_adapter)
-{
- int ret;
-
- dmxdev->filternum = 256;
- dmxdev->demux = &dvbdemux->dmx;
- dmxdev->capabilities = 0;
- ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
- if (ret < 0)
- return ret;
-
- hw_frontend->source = DMX_FRONTEND_0;
- dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
- mem_frontend->source = DMX_MEMORY_FE;
- dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
- return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
-}
-
/****************************************************************************/
/* nGene hardware init and release functions ********************************/
@@ -1094,8 +828,8 @@ static void free_idlebuffer(struct ngene *dev,
return;
free_ringbuffer(dev, rb);
for (j = 0; j < tb->NumBuffers; j++, Cur = Cur->Next) {
- Cur->Buffer2 = 0;
- Cur->scList2 = 0;
+ Cur->Buffer2 = NULL;
+ Cur->scList2 = NULL;
Cur->ngeneBuffer.Address_of_first_entry_2 = 0;
Cur->ngeneBuffer.Number_of_entries_2 = 0;
}
@@ -1141,7 +875,7 @@ static int create_ring_buffer(struct pci_dev *pci_dev,
u64 PARingBufferNext;
struct SBufferHeader *Cur, *Next;
- descr->Head = 0;
+ descr->Head = NULL;
descr->MemSize = 0;
descr->PAHead = 0;
descr->NumBuffers = 0;
@@ -1633,69 +1367,6 @@ fail:
-/****************************************************************************/
-/* Switch control (I2C gates, etc.) *****************************************/
-/****************************************************************************/
-
-
-/****************************************************************************/
-/* Demod/tuner attachment ***************************************************/
-/****************************************************************************/
-
-static int tuner_attach_stv6110(struct ngene_channel *chan)
-{
- struct stv090x_config *feconf = (struct stv090x_config *)
- chan->dev->card_info->fe_config[chan->number];
- struct stv6110x_config *tunerconf = (struct stv6110x_config *)
- chan->dev->card_info->tuner_config[chan->number];
- struct stv6110x_devctl *ctl;
-
- ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf,
- &chan->i2c_adapter);
- if (ctl == NULL) {
- printk(KERN_ERR DEVICE_NAME ": No STV6110X found!\n");
- return -ENODEV;
- }
-
- feconf->tuner_init = ctl->tuner_init;
- feconf->tuner_set_mode = ctl->tuner_set_mode;
- feconf->tuner_set_frequency = ctl->tuner_set_frequency;
- feconf->tuner_get_frequency = ctl->tuner_get_frequency;
- feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
- feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
- feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
- feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
- feconf->tuner_set_refclk = ctl->tuner_set_refclk;
- feconf->tuner_get_status = ctl->tuner_get_status;
-
- return 0;
-}
-
-
-static int demod_attach_stv0900(struct ngene_channel *chan)
-{
- struct stv090x_config *feconf = (struct stv090x_config *)
- chan->dev->card_info->fe_config[chan->number];
-
- chan->fe = dvb_attach(stv090x_attach,
- feconf,
- &chan->i2c_adapter,
- chan->number == 0 ? STV090x_DEMODULATOR_0 :
- STV090x_DEMODULATOR_1);
- if (chan->fe == NULL) {
- printk(KERN_ERR DEVICE_NAME ": No STV0900 found!\n");
- return -ENODEV;
- }
-
- if (!dvb_attach(lnbh24_attach, chan->fe, &chan->i2c_adapter, 0,
- 0, chan->dev->card_info->lnb[chan->number])) {
- printk(KERN_ERR DEVICE_NAME ": No LNBH24 found!\n");
- dvb_frontend_detach(chan->fe);
- return -ENODEV;
- }
-
- return 0;
-}
/****************************************************************************/
/****************************************************************************/
@@ -1719,7 +1390,7 @@ static void release_channel(struct ngene_channel *chan)
if (chan->fe) {
dvb_unregister_frontend(chan->fe);
dvb_frontend_detach(chan->fe);
- chan->fe = 0;
+ chan->fe = NULL;
}
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
@@ -1751,7 +1422,7 @@ static int init_channel(struct ngene_channel *chan)
if (io & (NGENE_IO_TSIN | NGENE_IO_TSOUT)) {
if (nr >= STREAM_AUDIOIN1)
chan->DataFormatFlags = DF_SWAP32;
- if (nr == 0 || !one_adapter) {
+ if (nr == 0 || !one_adapter || dev->first_adapter == NULL) {
adapter = &dev->adapter[nr];
ret = dvb_register_adapter(adapter, "nGene",
THIS_MODULE,
@@ -1759,8 +1430,10 @@ static int init_channel(struct ngene_channel *chan)
adapter_nr);
if (ret < 0)
return ret;
+ if (dev->first_adapter == NULL)
+ dev->first_adapter = adapter;
} else {
- adapter = &dev->adapter[0];
+ adapter = dev->first_adapter;
}
ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
@@ -1797,6 +1470,7 @@ static int init_channels(struct ngene *dev)
int i, j;
for (i = 0; i < MAX_STREAM; i++) {
+ dev->channel[i].number = i;
if (init_channel(&dev->channel[i]) < 0) {
for (j = i - 1; j >= 0; j--)
release_channel(&dev->channel[j]);
@@ -1810,7 +1484,7 @@ static int init_channels(struct ngene *dev)
/* device probe/remove calls ************************************************/
/****************************************************************************/
-static void __devexit ngene_remove(struct pci_dev *pdev)
+void __devexit ngene_remove(struct pci_dev *pdev)
{
struct ngene *dev = (struct ngene *)pci_get_drvdata(pdev);
int i;
@@ -1820,12 +1494,12 @@ static void __devexit ngene_remove(struct pci_dev *pdev)
release_channel(&dev->channel[i]);
ngene_stop(dev);
ngene_release_buffers(dev);
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
-static int __devinit ngene_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
+int __devinit ngene_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
{
struct ngene *dev;
int stat = 0;
@@ -1868,156 +1542,6 @@ fail1:
ngene_release_buffers(dev);
fail0:
pci_disable_device(pci_dev);
- pci_set_drvdata(pci_dev, 0);
+ pci_set_drvdata(pci_dev, NULL);
return stat;
}
-
-/****************************************************************************/
-/* Card configs *************************************************************/
-/****************************************************************************/
-
-static struct stv090x_config fe_cineS2 = {
- .device = STV0900,
- .demod_mode = STV090x_DUAL,
- .clk_mode = STV090x_CLK_EXT,
-
- .xtal = 27000000,
- .address = 0x68,
-
- .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
- .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
-
- .repeater_level = STV090x_RPTLEVEL_16,
-
- .adc1_range = STV090x_ADC_1Vpp,
- .adc2_range = STV090x_ADC_1Vpp,
-
- .diseqc_envelope_mode = true,
-};
-
-static struct stv6110x_config tuner_cineS2_0 = {
- .addr = 0x60,
- .refclk = 27000000,
- .clk_div = 1,
-};
-
-static struct stv6110x_config tuner_cineS2_1 = {
- .addr = 0x63,
- .refclk = 27000000,
- .clk_div = 1,
-};
-
-static struct ngene_info ngene_info_cineS2 = {
- .type = NGENE_SIDEWINDER,
- .name = "Linux4Media cineS2 DVB-S2 Twin Tuner",
- .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
- .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
- .fe_config = {&fe_cineS2, &fe_cineS2},
- .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
- .lnb = {0x0b, 0x08},
- .tsf = {3, 3},
- .fw_version = 15,
-};
-
-static struct ngene_info ngene_info_satixs2 = {
- .type = NGENE_SIDEWINDER,
- .name = "Mystique SaTiX-S2 Dual",
- .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
- .demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
- .fe_config = {&fe_cineS2, &fe_cineS2},
- .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
- .lnb = {0x0b, 0x08},
- .tsf = {3, 3},
- .fw_version = 15,
-};
-
-/****************************************************************************/
-
-
-
-/****************************************************************************/
-/* PCI Subsystem ID *********************************************************/
-/****************************************************************************/
-
-#define NGENE_ID(_subvend, _subdev, _driverdata) { \
- .vendor = NGENE_VID, .device = NGENE_PID, \
- .subvendor = _subvend, .subdevice = _subdev, \
- .driver_data = (unsigned long) &_driverdata }
-
-/****************************************************************************/
-
-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
- NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
- NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
- NGENE_ID(0x18c3, 0xdb01, ngene_info_satixs2),
- {0}
-};
-MODULE_DEVICE_TABLE(pci, ngene_id_tbl);
-
-/****************************************************************************/
-/* Init/Exit ****************************************************************/
-/****************************************************************************/
-
-static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
- enum pci_channel_state state)
-{
- printk(KERN_ERR DEVICE_NAME ": PCI error\n");
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
- if (state == pci_channel_io_frozen)
- return PCI_ERS_RESULT_NEED_RESET;
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static pci_ers_result_t ngene_link_reset(struct pci_dev *dev)
-{
- printk(KERN_INFO DEVICE_NAME ": link reset\n");
- return 0;
-}
-
-static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
-{
- printk(KERN_INFO DEVICE_NAME ": slot reset\n");
- return 0;
-}
-
-static void ngene_resume(struct pci_dev *dev)
-{
- printk(KERN_INFO DEVICE_NAME ": resume\n");
-}
-
-static struct pci_error_handlers ngene_errors = {
- .error_detected = ngene_error_detected,
- .link_reset = ngene_link_reset,
- .slot_reset = ngene_slot_reset,
- .resume = ngene_resume,
-};
-
-static struct pci_driver ngene_pci_driver = {
- .name = "ngene",
- .id_table = ngene_id_tbl,
- .probe = ngene_probe,
- .remove = __devexit_p(ngene_remove),
- .err_handler = &ngene_errors,
-};
-
-static __init int module_init_ngene(void)
-{
- printk(KERN_INFO
- "nGene PCIE bridge driver, Copyright (C) 2005-2007 Micronas\n");
- return pci_register_driver(&ngene_pci_driver);
-}
-
-static __exit void module_exit_ngene(void)
-{
- pci_unregister_driver(&ngene_pci_driver);
-}
-
-module_init(module_init_ngene);
-module_exit(module_exit_ngene);
-
-MODULE_DESCRIPTION("nGene");
-MODULE_AUTHOR("Micronas, Ralph Metzler, Manfred Voelkel");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
new file mode 100644
index 000000000000..96013eb353cd
--- /dev/null
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -0,0 +1,172 @@
+/*
+ * ngene-dvb.c: nGene PCIe bridge driver - DVB functions
+ *
+ * Copyright (C) 2005-2007 Micronas
+ *
+ * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de>
+ * Modifications for new nGene firmware,
+ * support for EEPROM-copying,
+ * support for new dual DVB-S2 card prototype
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+#include <linux/pci.h>
+#include <linux/smp_lock.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/byteorder/generic.h>
+#include <linux/firmware.h>
+#include <linux/vmalloc.h>
+
+#include "ngene.h"
+
+#define COMMAND_TIMEOUT_WORKAROUND
+
+
+/****************************************************************************/
+/* COMMAND API interface ****************************************************/
+/****************************************************************************/
+
+/****************************************************************************/
+/* DVB functions and API interface ******************************************/
+/****************************************************************************/
+
+static void swap_buffer(u32 *p, u32 len)
+{
+ while (len) {
+ *p = swab32(*p);
+ p++;
+ len -= 4;
+ }
+}
+
+void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
+{
+ struct ngene_channel *chan = priv;
+
+
+#ifdef COMMAND_TIMEOUT_WORKAROUND
+ if (chan->users > 0)
+#endif
+ dvb_dmx_swfilter(&chan->demux, buf, len);
+ return NULL;
+}
+
+u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
+
+void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
+{
+ struct ngene_channel *chan = priv;
+ struct ngene *dev = chan->dev;
+ u32 alen;
+
+ alen = dvb_ringbuffer_avail(&dev->tsout_rbuf);
+ alen -= alen % 188;
+
+ if (alen < len)
+ FillTSBuffer(buf + alen, len - alen, flags);
+ else
+ alen = len;
+ dvb_ringbuffer_read(&dev->tsout_rbuf, buf, alen);
+ if (flags & DF_SWAP32)
+ swap_buffer((u32 *)buf, alen);
+ wake_up_interruptible(&dev->tsout_rbuf.queue);
+ return buf;
+}
+
+
+
+int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ngene_channel *chan = dvbdmx->priv;
+
+ if (chan->users == 0) {
+#ifdef COMMAND_TIMEOUT_WORKAROUND
+ if (!chan->running)
+#endif
+ set_transfer(chan, 1);
+ /* msleep(10); */
+ }
+
+ return ++chan->users;
+}
+
+int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ngene_channel *chan = dvbdmx->priv;
+
+ if (--chan->users)
+ return chan->users;
+
+#ifndef COMMAND_TIMEOUT_WORKAROUND
+ set_transfer(chan, 0);
+#endif
+
+ return 0;
+}
+
+int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+ int (*start_feed)(struct dvb_demux_feed *),
+ int (*stop_feed)(struct dvb_demux_feed *),
+ void *priv)
+{
+ dvbdemux->priv = priv;
+
+ dvbdemux->filternum = 256;
+ dvbdemux->feednum = 256;
+ dvbdemux->start_feed = start_feed;
+ dvbdemux->stop_feed = stop_feed;
+ dvbdemux->write_to_decoder = NULL;
+ dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING);
+ return dvb_dmx_init(dvbdemux);
+}
+
+int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+ struct dvb_demux *dvbdemux,
+ struct dmx_frontend *hw_frontend,
+ struct dmx_frontend *mem_frontend,
+ struct dvb_adapter *dvb_adapter)
+{
+ int ret;
+
+ dmxdev->filternum = 256;
+ dmxdev->demux = &dvbdemux->dmx;
+ dmxdev->capabilities = 0;
+ ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
+ if (ret < 0)
+ return ret;
+
+ hw_frontend->source = DMX_FRONTEND_0;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
+ mem_frontend->source = DMX_MEMORY_FE;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
+ return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
+}
diff --git a/drivers/media/dvb/ngene/ngene-i2c.c b/drivers/media/dvb/ngene/ngene-i2c.c
new file mode 100644
index 000000000000..2ef54ca6badd
--- /dev/null
+++ b/drivers/media/dvb/ngene/ngene-i2c.c
@@ -0,0 +1,179 @@
+/*
+ * ngene-i2c.c: nGene PCIe bridge driver i2c functions
+ *
+ * Copyright (C) 2005-2007 Micronas
+ *
+ * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de>
+ * Modifications for new nGene firmware,
+ * support for EEPROM-copying,
+ * support for new dual DVB-S2 card prototype
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+/* FIXME - some of these can probably be removed */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <asm/div64.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/smp_lock.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/byteorder/generic.h>
+#include <linux/firmware.h>
+#include <linux/vmalloc.h>
+
+#include "ngene.h"
+
+/* Firmware command for i2c operations */
+static int ngene_command_i2c_read(struct ngene *dev, u8 adr,
+ u8 *out, u8 outlen, u8 *in, u8 inlen, int flag)
+{
+ struct ngene_command com;
+
+ com.cmd.hdr.Opcode = CMD_I2C_READ;
+ com.cmd.hdr.Length = outlen + 3;
+ com.cmd.I2CRead.Device = adr << 1;
+ memcpy(com.cmd.I2CRead.Data, out, outlen);
+ com.cmd.I2CRead.Data[outlen] = inlen;
+ com.cmd.I2CRead.Data[outlen + 1] = 0;
+ com.in_len = outlen + 3;
+ com.out_len = inlen + 1;
+
+ if (ngene_command(dev, &com) < 0)
+ return -EIO;
+
+ if ((com.cmd.raw8[0] >> 1) != adr)
+ return -EIO;
+
+ if (flag)
+ memcpy(in, com.cmd.raw8, inlen + 1);
+ else
+ memcpy(in, com.cmd.raw8 + 1, inlen);
+ return 0;
+}
+
+static int ngene_command_i2c_write(struct ngene *dev, u8 adr,
+ u8 *out, u8 outlen)
+{
+ struct ngene_command com;
+
+
+ com.cmd.hdr.Opcode = CMD_I2C_WRITE;
+ com.cmd.hdr.Length = outlen + 1;
+ com.cmd.I2CRead.Device = adr << 1;
+ memcpy(com.cmd.I2CRead.Data, out, outlen);
+ com.in_len = outlen + 1;
+ com.out_len = 1;
+
+ if (ngene_command(dev, &com) < 0)
+ return -EIO;
+
+ if (com.cmd.raw8[0] == 1)
+ return -EIO;
+
+ return 0;
+}
+
+static void ngene_i2c_set_bus(struct ngene *dev, int bus)
+{
+ if (!(dev->card_info->i2c_access & 2))
+ return;
+ if (dev->i2c_current_bus == bus)
+ return;
+
+ switch (bus) {
+ case 0:
+ ngene_command_gpio_set(dev, 3, 0);
+ ngene_command_gpio_set(dev, 2, 1);
+ break;
+
+ case 1:
+ ngene_command_gpio_set(dev, 2, 0);
+ ngene_command_gpio_set(dev, 3, 1);
+ break;
+ }
+ dev->i2c_current_bus = bus;
+}
+
+static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msg[], int num)
+{
+ struct ngene_channel *chan =
+ (struct ngene_channel *)i2c_get_adapdata(adapter);
+ struct ngene *dev = chan->dev;
+
+ down(&dev->i2c_switch_mutex);
+ ngene_i2c_set_bus(dev, chan->number);
+
+ if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
+ if (!ngene_command_i2c_read(dev, msg[0].addr,
+ msg[0].buf, msg[0].len,
+ msg[1].buf, msg[1].len, 0))
+ goto done;
+
+ if (num == 1 && !(msg[0].flags & I2C_M_RD))
+ if (!ngene_command_i2c_write(dev, msg[0].addr,
+ msg[0].buf, msg[0].len))
+ goto done;
+ if (num == 1 && (msg[0].flags & I2C_M_RD))
+ if (!ngene_command_i2c_read(dev, msg[0].addr, NULL, 0,
+ msg[0].buf, msg[0].len, 0))
+ goto done;
+
+ up(&dev->i2c_switch_mutex);
+ return -EIO;
+
+done:
+ up(&dev->i2c_switch_mutex);
+ return num;
+}
+
+
+static u32 ngene_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm ngene_i2c_algo = {
+ .master_xfer = ngene_i2c_master_xfer,
+ .functionality = ngene_i2c_functionality,
+};
+
+int ngene_i2c_init(struct ngene *dev, int dev_nr)
+{
+ struct i2c_adapter *adap = &(dev->channel[dev_nr].i2c_adapter);
+
+ i2c_set_adapdata(adap, &(dev->channel[dev_nr]));
+ adap->class = I2C_CLASS_TV_DIGITAL | I2C_CLASS_TV_ANALOG;
+
+ strcpy(adap->name, "nGene");
+
+ adap->algo = &ngene_i2c_algo;
+ adap->algo_data = (void *)&(dev->channel[dev_nr]);
+ adap->dev.parent = &dev->pci_dev->dev;
+
+ return i2c_add_adapter(adap);
+}
+
diff --git a/drivers/media/dvb/ngene/ngene.h b/drivers/media/dvb/ngene/ngene.h
index a7eb29846310..676fcbb79026 100644
--- a/drivers/media/dvb/ngene/ngene.h
+++ b/drivers/media/dvb/ngene/ngene.h
@@ -39,6 +39,8 @@
#include "dvb_frontend.h"
#include "dvb_ringbuffer.h"
+#define DEVICE_NAME "ngene"
+
#define NGENE_VID 0x18c3
#define NGENE_PID 0x0720
@@ -752,6 +754,7 @@ struct ngene {
spinlock_t cmd_lock;
struct dvb_adapter adapter[MAX_STREAM];
+ struct dvb_adapter *first_adapter; /* "one_adapter" modprobe opt */
struct ngene_channel channel[MAX_STREAM];
struct ngene_info *card_info;
@@ -853,6 +856,33 @@ struct ngene_buffer {
#endif
+/* Provided by ngene-core.c */
+int __devinit ngene_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id);
+void __devexit ngene_remove(struct pci_dev *pdev);
+int ngene_command(struct ngene *dev, struct ngene_command *com);
+int ngene_command_gpio_set(struct ngene *dev, u8 select, u8 level);
+void set_transfer(struct ngene_channel *chan, int state);
+void FillTSBuffer(void *Buffer, int Length, u32 Flags);
+
+/* Provided by ngene-i2c.c */
+int ngene_i2c_init(struct ngene *dev, int dev_nr);
+
+/* Provided by ngene-dvb.c */
+void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags);
+void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags);
+int ngene_start_feed(struct dvb_demux_feed *dvbdmxfeed);
+int ngene_stop_feed(struct dvb_demux_feed *dvbdmxfeed);
+int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+ int (*start_feed)(struct dvb_demux_feed *),
+ int (*stop_feed)(struct dvb_demux_feed *),
+ void *priv);
+int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+ struct dvb_demux *dvbdemux,
+ struct dmx_frontend *hw_frontend,
+ struct dmx_frontend *mem_frontend,
+ struct dvb_adapter *dvb_adapter);
+
#endif
/* LocalWords: Endif
diff --git a/drivers/media/dvb/pt1/pt1.c b/drivers/media/dvb/pt1/pt1.c
index 6aded234aa61..69ad94934ec2 100644
--- a/drivers/media/dvb/pt1/pt1.c
+++ b/drivers/media/dvb/pt1/pt1.c
@@ -1,5 +1,5 @@
/*
- * driver for Earthsoft PT1
+ * driver for Earthsoft PT1/PT2
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -77,6 +77,10 @@ struct pt1 {
struct pt1_adapter *adaps[PT1_NR_ADAPS];
struct pt1_table *tables;
struct task_struct *kthread;
+
+ struct mutex lock;
+ int power;
+ int reset;
};
struct pt1_adapter {
@@ -95,6 +99,11 @@ struct pt1_adapter {
struct dvb_frontend *fe;
int (*orig_set_voltage)(struct dvb_frontend *fe,
fe_sec_voltage_t voltage);
+ int (*orig_sleep)(struct dvb_frontend *fe);
+ int (*orig_init)(struct dvb_frontend *fe);
+
+ fe_sec_voltage_t voltage;
+ int sleep;
};
#define pt1_printk(level, pt1, format, arg...) \
@@ -219,8 +228,10 @@ static int pt1_do_enable_ram(struct pt1 *pt1)
static int pt1_enable_ram(struct pt1 *pt1)
{
int i, ret;
+ int phase;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
- for (i = 0; i < 10; i++) {
+ phase = pt1->pdev->device == 0x211a ? 128 : 166;
+ for (i = 0; i < phase; i++) {
ret = pt1_do_enable_ram(pt1);
if (ret < 0)
return ret;
@@ -485,33 +496,47 @@ static int pt1_stop_feed(struct dvb_demux_feed *feed)
}
static void
-pt1_set_power(struct pt1 *pt1, int power, int lnb, int reset)
+pt1_update_power(struct pt1 *pt1)
{
- pt1_write_reg(pt1, 1, power | lnb << 1 | !reset << 3);
+ int bits;
+ int i;
+ struct pt1_adapter *adap;
+ static const int sleep_bits[] = {
+ 1 << 4,
+ 1 << 6 | 1 << 7,
+ 1 << 5,
+ 1 << 6 | 1 << 8,
+ };
+
+ bits = pt1->power | !pt1->reset << 3;
+ mutex_lock(&pt1->lock);
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ adap = pt1->adaps[i];
+ switch (adap->voltage) {
+ case SEC_VOLTAGE_13: /* actually 11V */
+ bits |= 1 << 1;
+ break;
+ case SEC_VOLTAGE_18: /* actually 15V */
+ bits |= 1 << 1 | 1 << 2;
+ break;
+ default:
+ break;
+ }
+
+ /* XXX: The bits should be changed depending on adap->sleep. */
+ bits |= sleep_bits[i];
+ }
+ pt1_write_reg(pt1, 1, bits);
+ mutex_unlock(&pt1->lock);
}
static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct pt1_adapter *adap;
- int lnb;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
-
- switch (voltage) {
- case SEC_VOLTAGE_13: /* actually 11V */
- lnb = 2;
- break;
- case SEC_VOLTAGE_18: /* actually 15V */
- lnb = 3;
- break;
- case SEC_VOLTAGE_OFF:
- lnb = 0;
- break;
- default:
- return -EINVAL;
- }
-
- pt1_set_power(adap->pt1, 1, lnb, 0);
+ adap->voltage = voltage;
+ pt1_update_power(adap->pt1);
if (adap->orig_set_voltage)
return adap->orig_set_voltage(fe, voltage);
@@ -519,9 +544,37 @@ static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
return 0;
}
+static int pt1_sleep(struct dvb_frontend *fe)
+{
+ struct pt1_adapter *adap;
+
+ adap = container_of(fe->dvb, struct pt1_adapter, adap);
+ adap->sleep = 1;
+ pt1_update_power(adap->pt1);
+
+ if (adap->orig_sleep)
+ return adap->orig_sleep(fe);
+ else
+ return 0;
+}
+
+static int pt1_wakeup(struct dvb_frontend *fe)
+{
+ struct pt1_adapter *adap;
+
+ adap = container_of(fe->dvb, struct pt1_adapter, adap);
+ adap->sleep = 0;
+ pt1_update_power(adap->pt1);
+ schedule_timeout_uninterruptible((HZ + 999) / 1000);
+
+ if (adap->orig_init)
+ return adap->orig_init(fe);
+ else
+ return 0;
+}
+
static void pt1_free_adapter(struct pt1_adapter *adap)
{
- dvb_unregister_frontend(adap->fe);
dvb_net_release(&adap->net);
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
@@ -534,7 +587,7 @@ static void pt1_free_adapter(struct pt1_adapter *adap)
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static struct pt1_adapter *
-pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
+pt1_alloc_adapter(struct pt1 *pt1)
{
struct pt1_adapter *adap;
void *buf;
@@ -551,8 +604,8 @@ pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
adap->pt1 = pt1;
- adap->orig_set_voltage = fe->ops.set_voltage;
- fe->ops.set_voltage = pt1_set_voltage;
+ adap->voltage = SEC_VOLTAGE_OFF;
+ adap->sleep = 1;
buf = (u8 *)__get_free_page(GFP_KERNEL);
if (!buf) {
@@ -593,17 +646,8 @@ pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
dvb_net_init(dvb_adap, &adap->net, &demux->dmx);
- ret = dvb_register_frontend(dvb_adap, fe);
- if (ret < 0)
- goto err_net_release;
- adap->fe = fe;
-
return adap;
-err_net_release:
- dvb_net_release(&adap->net);
- adap->demux.dmx.close(&adap->demux.dmx);
- dvb_dmxdev_release(&adap->dmxdev);
err_dmx_release:
dvb_dmx_release(demux);
err_unregister_adapter:
@@ -623,6 +667,62 @@ static void pt1_cleanup_adapters(struct pt1 *pt1)
pt1_free_adapter(pt1->adaps[i]);
}
+static int pt1_init_adapters(struct pt1 *pt1)
+{
+ int i;
+ struct pt1_adapter *adap;
+ int ret;
+
+ for (i = 0; i < PT1_NR_ADAPS; i++) {
+ adap = pt1_alloc_adapter(pt1);
+ if (IS_ERR(adap)) {
+ ret = PTR_ERR(adap);
+ goto err;
+ }
+
+ adap->index = i;
+ pt1->adaps[i] = adap;
+ }
+ return 0;
+
+err:
+ while (i--)
+ pt1_free_adapter(pt1->adaps[i]);
+
+ return ret;
+}
+
+static void pt1_cleanup_frontend(struct pt1_adapter *adap)
+{
+ dvb_unregister_frontend(adap->fe);
+}
+
+static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
+{
+ int ret;
+
+ adap->orig_set_voltage = fe->ops.set_voltage;
+ adap->orig_sleep = fe->ops.sleep;
+ adap->orig_init = fe->ops.init;
+ fe->ops.set_voltage = pt1_set_voltage;
+ fe->ops.sleep = pt1_sleep;
+ fe->ops.init = pt1_wakeup;
+
+ ret = dvb_register_frontend(&adap->adap, fe);
+ if (ret < 0)
+ return ret;
+
+ adap->fe = fe;
+ return 0;
+}
+
+static void pt1_cleanup_frontends(struct pt1 *pt1)
+{
+ int i;
+ for (i = 0; i < PT1_NR_ADAPS; i++)
+ pt1_cleanup_frontend(pt1->adaps[i]);
+}
+
struct pt1_config {
struct va1j5jf8007s_config va1j5jf8007s_config;
struct va1j5jf8007t_config va1j5jf8007t_config;
@@ -630,29 +730,63 @@ struct pt1_config {
static const struct pt1_config pt1_configs[2] = {
{
- { .demod_address = 0x1b },
- { .demod_address = 0x1a },
+ {
+ .demod_address = 0x1b,
+ .frequency = VA1J5JF8007S_20MHZ,
+ },
+ {
+ .demod_address = 0x1a,
+ .frequency = VA1J5JF8007T_20MHZ,
+ },
}, {
- { .demod_address = 0x19 },
- { .demod_address = 0x18 },
+ {
+ .demod_address = 0x19,
+ .frequency = VA1J5JF8007S_20MHZ,
+ },
+ {
+ .demod_address = 0x18,
+ .frequency = VA1J5JF8007T_20MHZ,
+ },
},
};
-static int pt1_init_adapters(struct pt1 *pt1)
+static const struct pt1_config pt2_configs[2] = {
+ {
+ {
+ .demod_address = 0x1b,
+ .frequency = VA1J5JF8007S_25MHZ,
+ },
+ {
+ .demod_address = 0x1a,
+ .frequency = VA1J5JF8007T_25MHZ,
+ },
+ }, {
+ {
+ .demod_address = 0x19,
+ .frequency = VA1J5JF8007S_25MHZ,
+ },
+ {
+ .demod_address = 0x18,
+ .frequency = VA1J5JF8007T_25MHZ,
+ },
+ },
+};
+
+static int pt1_init_frontends(struct pt1 *pt1)
{
int i, j;
struct i2c_adapter *i2c_adap;
- const struct pt1_config *config;
+ const struct pt1_config *configs, *config;
struct dvb_frontend *fe[4];
- struct pt1_adapter *adap;
int ret;
i = 0;
j = 0;
i2c_adap = &pt1->i2c_adap;
+ configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
do {
- config = &pt1_configs[i / 2];
+ config = &configs[i / 2];
fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
i2c_adap);
@@ -681,11 +815,9 @@ static int pt1_init_adapters(struct pt1 *pt1)
} while (i < 4);
do {
- adap = pt1_alloc_adapter(pt1, fe[j]);
- if (IS_ERR(adap))
+ ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
+ if (ret < 0)
goto err;
- adap->index = j;
- pt1->adaps[j] = adap;
} while (++j < 4);
return 0;
@@ -695,7 +827,7 @@ err:
fe[i]->ops.release(fe[i]);
while (j--)
- pt1_free_adapter(pt1->adaps[j]);
+ dvb_unregister_frontend(fe[j]);
return ret;
}
@@ -890,9 +1022,12 @@ static void __devexit pt1_remove(struct pci_dev *pdev)
kthread_stop(pt1->kthread);
pt1_cleanup_tables(pt1);
- pt1_cleanup_adapters(pt1);
+ pt1_cleanup_frontends(pt1);
pt1_disable_ram(pt1);
- pt1_set_power(pt1, 0, 0, 1);
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+ pt1_cleanup_adapters(pt1);
i2c_del_adapter(&pt1->i2c_adap);
pci_set_drvdata(pdev, NULL);
kfree(pt1);
@@ -936,10 +1071,21 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_iounmap;
}
+ mutex_init(&pt1->lock);
pt1->pdev = pdev;
pt1->regs = regs;
pci_set_drvdata(pdev, pt1);
+ ret = pt1_init_adapters(pt1);
+ if (ret < 0)
+ goto err_kfree;
+
+ mutex_init(&pt1->lock);
+
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+
i2c_adap = &pt1->i2c_adap;
i2c_adap->class = I2C_CLASS_TV_DIGITAL;
i2c_adap->algo = &pt1_i2c_algo;
@@ -948,9 +1094,7 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i2c_set_adapdata(i2c_adap, pt1);
ret = i2c_add_adapter(i2c_adap);
if (ret < 0)
- goto err_kfree;
-
- pt1_set_power(pt1, 0, 0, 1);
+ goto err_pt1_cleanup_adapters;
pt1_i2c_init(pt1);
pt1_i2c_wait(pt1);
@@ -979,19 +1123,21 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pt1_init_streams(pt1);
- pt1_set_power(pt1, 1, 0, 1);
+ pt1->power = 1;
+ pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 49) / 50);
- pt1_set_power(pt1, 1, 0, 0);
+ pt1->reset = 0;
+ pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 999) / 1000);
- ret = pt1_init_adapters(pt1);
+ ret = pt1_init_frontends(pt1);
if (ret < 0)
goto err_pt1_disable_ram;
ret = pt1_init_tables(pt1);
if (ret < 0)
- goto err_pt1_cleanup_adapters;
+ goto err_pt1_cleanup_frontends;
kthread = kthread_run(pt1_thread, pt1, "pt1");
if (IS_ERR(kthread)) {
@@ -1004,11 +1150,15 @@ pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_pt1_cleanup_tables:
pt1_cleanup_tables(pt1);
-err_pt1_cleanup_adapters:
- pt1_cleanup_adapters(pt1);
+err_pt1_cleanup_frontends:
+ pt1_cleanup_frontends(pt1);
err_pt1_disable_ram:
pt1_disable_ram(pt1);
- pt1_set_power(pt1, 0, 0, 1);
+ pt1->power = 0;
+ pt1->reset = 1;
+ pt1_update_power(pt1);
+err_pt1_cleanup_adapters:
+ pt1_cleanup_adapters(pt1);
err_i2c_del_adapter:
i2c_del_adapter(i2c_adap);
err_kfree:
@@ -1027,6 +1177,7 @@ err:
static struct pci_device_id pt1_id_table[] = {
{ PCI_DEVICE(0x10ee, 0x211a) },
+ { PCI_DEVICE(0x10ee, 0x222a) },
{ },
};
MODULE_DEVICE_TABLE(pci, pt1_id_table);
@@ -1054,5 +1205,5 @@ module_init(pt1_init);
module_exit(pt1_cleanup);
MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
-MODULE_DESCRIPTION("Earthsoft PT1 Driver");
+MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/pt1/va1j5jf8007s.c b/drivers/media/dvb/pt1/va1j5jf8007s.c
index fc6594996e79..451641c0c1d2 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007s.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007s.c
@@ -1,5 +1,5 @@
/*
- * ISDB-S driver for VA1J5JF8007
+ * ISDB-S driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -580,7 +580,7 @@ static void va1j5jf8007s_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops va1j5jf8007s_ops = {
.info = {
- .name = "VA1J5JF8007 ISDB-S",
+ .name = "VA1J5JF8007/VA1J5JF8011 ISDB-S",
.type = FE_QPSK,
.frequency_min = 950000,
.frequency_max = 2150000,
@@ -628,28 +628,50 @@ static int va1j5jf8007s_prepare_1(struct va1j5jf8007s_state *state)
return 0;
}
-static const u8 va1j5jf8007s_prepare_bufs[][2] = {
+static const u8 va1j5jf8007s_20mhz_prepare_bufs[][2] = {
{0x04, 0x02}, {0x0d, 0x55}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01},
{0x1c, 0x0a}, {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0},
{0x52, 0x89}, {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69},
{0x87, 0x04}, {0x8e, 0x02}, {0xa3, 0xf7}, {0xa5, 0xc0},
};
+static const u8 va1j5jf8007s_25mhz_prepare_bufs[][2] = {
+ {0x04, 0x02}, {0x11, 0x40}, {0x13, 0x80}, {0x17, 0x01}, {0x1c, 0x0a},
+ {0x1d, 0xaa}, {0x1e, 0x20}, {0x1f, 0x88}, {0x51, 0xb0}, {0x52, 0x89},
+ {0x53, 0xb3}, {0x5a, 0x2d}, {0x5b, 0xd3}, {0x85, 0x69}, {0x87, 0x04},
+ {0x8e, 0x26}, {0xa3, 0xf7}, {0xa5, 0xc0},
+};
+
static int va1j5jf8007s_prepare_2(struct va1j5jf8007s_state *state)
{
+ const u8 (*bufs)[2];
+ int size;
u8 addr;
u8 buf[2];
struct i2c_msg msg;
int i;
+ switch (state->config->frequency) {
+ case VA1J5JF8007S_20MHZ:
+ bufs = va1j5jf8007s_20mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007s_20mhz_prepare_bufs);
+ break;
+ case VA1J5JF8007S_25MHZ:
+ bufs = va1j5jf8007s_25mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007s_25mhz_prepare_bufs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
addr = state->config->demod_address;
msg.addr = addr;
msg.flags = 0;
msg.len = 2;
msg.buf = buf;
- for (i = 0; i < ARRAY_SIZE(va1j5jf8007s_prepare_bufs); i++) {
- memcpy(buf, va1j5jf8007s_prepare_bufs[i], sizeof(buf));
+ for (i = 0; i < size; i++) {
+ memcpy(buf, bufs[i], sizeof(buf));
if (i2c_transfer(state->adap, &msg, 1) != 1)
return -EREMOTEIO;
}
diff --git a/drivers/media/dvb/pt1/va1j5jf8007s.h b/drivers/media/dvb/pt1/va1j5jf8007s.h
index aa228a816353..b7d6f05a0e02 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007s.h
+++ b/drivers/media/dvb/pt1/va1j5jf8007s.h
@@ -1,5 +1,5 @@
/*
- * ISDB-S driver for VA1J5JF8007
+ * ISDB-S driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -24,8 +24,14 @@
#ifndef VA1J5JF8007S_H
#define VA1J5JF8007S_H
+enum va1j5jf8007s_frequency {
+ VA1J5JF8007S_20MHZ,
+ VA1J5JF8007S_25MHZ,
+};
+
struct va1j5jf8007s_config {
u8 demod_address;
+ enum va1j5jf8007s_frequency frequency;
};
struct i2c_adapter;
diff --git a/drivers/media/dvb/pt1/va1j5jf8007t.c b/drivers/media/dvb/pt1/va1j5jf8007t.c
index 3db4f3e34e8f..0f085c3e571b 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007t.c
+++ b/drivers/media/dvb/pt1/va1j5jf8007t.c
@@ -1,5 +1,5 @@
/*
- * ISDB-T driver for VA1J5JF8007
+ * ISDB-T driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -429,7 +429,7 @@ static void va1j5jf8007t_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops va1j5jf8007t_ops = {
.info = {
- .name = "VA1J5JF8007 ISDB-T",
+ .name = "VA1J5JF8007/VA1J5JF8011 ISDB-T",
.type = FE_OFDM,
.frequency_min = 90000000,
.frequency_max = 770000000,
@@ -448,29 +448,50 @@ static struct dvb_frontend_ops va1j5jf8007t_ops = {
.release = va1j5jf8007t_release,
};
-static const u8 va1j5jf8007t_prepare_bufs[][2] = {
+static const u8 va1j5jf8007t_20mhz_prepare_bufs[][2] = {
{0x03, 0x90}, {0x14, 0x8f}, {0x1c, 0x2a}, {0x1d, 0xa8}, {0x1e, 0xa2},
{0x22, 0x83}, {0x31, 0x0d}, {0x32, 0xe0}, {0x39, 0xd3}, {0x3a, 0x00},
{0x5c, 0x40}, {0x5f, 0x80}, {0x75, 0x02}, {0x76, 0x4e}, {0x77, 0x03},
{0xef, 0x01}
};
+static const u8 va1j5jf8007t_25mhz_prepare_bufs[][2] = {
+ {0x03, 0x90}, {0x1c, 0x2a}, {0x1d, 0xa8}, {0x1e, 0xa2}, {0x22, 0x83},
+ {0x3a, 0x00}, {0x5c, 0x40}, {0x5f, 0x80}, {0x75, 0x0a}, {0x76, 0x4c},
+ {0x77, 0x03}, {0xef, 0x01}
+};
+
int va1j5jf8007t_prepare(struct dvb_frontend *fe)
{
struct va1j5jf8007t_state *state;
+ const u8 (*bufs)[2];
+ int size;
u8 buf[2];
struct i2c_msg msg;
int i;
state = fe->demodulator_priv;
+ switch (state->config->frequency) {
+ case VA1J5JF8007T_20MHZ:
+ bufs = va1j5jf8007t_20mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007t_20mhz_prepare_bufs);
+ break;
+ case VA1J5JF8007T_25MHZ:
+ bufs = va1j5jf8007t_25mhz_prepare_bufs;
+ size = ARRAY_SIZE(va1j5jf8007t_25mhz_prepare_bufs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
msg.addr = state->config->demod_address;
msg.flags = 0;
msg.len = sizeof(buf);
msg.buf = buf;
- for (i = 0; i < ARRAY_SIZE(va1j5jf8007t_prepare_bufs); i++) {
- memcpy(buf, va1j5jf8007t_prepare_bufs[i], sizeof(buf));
+ for (i = 0; i < size; i++) {
+ memcpy(buf, bufs[i], sizeof(buf));
if (i2c_transfer(state->adap, &msg, 1) != 1)
return -EREMOTEIO;
}
diff --git a/drivers/media/dvb/pt1/va1j5jf8007t.h b/drivers/media/dvb/pt1/va1j5jf8007t.h
index ed49906f7769..2903be519ef5 100644
--- a/drivers/media/dvb/pt1/va1j5jf8007t.h
+++ b/drivers/media/dvb/pt1/va1j5jf8007t.h
@@ -1,5 +1,5 @@
/*
- * ISDB-T driver for VA1J5JF8007
+ * ISDB-T driver for VA1J5JF8007/VA1J5JF8011
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
@@ -24,8 +24,14 @@
#ifndef VA1J5JF8007T_H
#define VA1J5JF8007T_H
+enum va1j5jf8007t_frequency {
+ VA1J5JF8007T_20MHZ,
+ VA1J5JF8007T_25MHZ,
+};
+
struct va1j5jf8007t_config {
u8 demod_address;
+ enum va1j5jf8007t_frequency frequency;
};
struct i2c_adapter;
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 38915591c6e5..a6be529eec5c 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -708,7 +708,7 @@ static void gpioirq(unsigned long cookie)
#ifdef CONFIG_DVB_AV7110_OSD
-static int dvb_osd_ioctl(struct inode *inode, struct file *file,
+static int dvb_osd_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -727,7 +727,7 @@ static int dvb_osd_ioctl(struct inode *inode, struct file *file,
static const struct file_operations dvb_osd_fops = {
.owner = THIS_MODULE,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_generic_open,
.release = dvb_generic_release,
};
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 53884814161c..13efba942dac 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -1089,7 +1089,7 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len
}
-static int dvb_video_ioctl(struct inode *inode, struct file *file,
+static int dvb_video_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1297,7 +1297,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
return ret;
}
-static int dvb_audio_ioctl(struct inode *inode, struct file *file,
+static int dvb_audio_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
@@ -1517,7 +1517,7 @@ static int dvb_audio_release(struct inode *inode, struct file *file)
static const struct file_operations dvb_video_fops = {
.owner = THIS_MODULE,
.write = dvb_video_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_video_open,
.release = dvb_video_release,
.poll = dvb_video_poll,
@@ -1535,7 +1535,7 @@ static struct dvb_device dvbdev_video = {
static const struct file_operations dvb_audio_fops = {
.owner = THIS_MODULE,
.write = dvb_audio_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_audio_open,
.release = dvb_audio_release,
.poll = dvb_audio_poll,
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index ac7779c45c5b..4eba35a018e3 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -248,8 +248,7 @@ static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
return mask;
}
-static int dvb_ca_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
@@ -350,7 +349,7 @@ static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
.read = dvb_ca_read,
.write = dvb_ca_write,
- .ioctl = dvb_generic_ioctl,
+ .unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_ca_open,
.release = dvb_generic_release,
.poll = dvb_ca_poll,
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 49c2a817a06f..461714396331 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -35,7 +35,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/spinlock.h>
-#include <media/ir-common.h>
+#include <media/ir-core.h>
#include "budget.h"
@@ -54,6 +54,8 @@
#include "tda1002x.h"
#include "tda827x.h"
+#define MODULE_NAME "budget_ci"
+
/*
* Regarding DEBIADDR_IR:
* Some CI modules hang if random addresses are read.
@@ -80,12 +82,6 @@
#define SLOTSTATUS_READY 8
#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
-/*
- * Milliseconds during which a key is regarded as pressed.
- * If an identical command arrives within this time, the timer will start over.
- */
-#define IR_KEYPRESS_TIMEOUT 250
-
/* RC5 device wildcard */
#define IR_DEVICE_ANY 255
@@ -102,12 +98,9 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_ci_ir {
struct input_dev *dev;
struct tasklet_struct msp430_irq_tasklet;
- struct timer_list timer_keyup;
char name[72]; /* 40 + 32 for (struct saa7146_dev).name */
char phys[32];
- struct ir_input_state state;
int rc5_device;
- u32 last_raw;
u32 ir_key;
bool have_command;
};
@@ -122,18 +115,11 @@ struct budget_ci {
u8 tuner_pll_address; /* used for philips_tdm1316l configs */
};
-static void msp430_ir_keyup(unsigned long data)
-{
- struct budget_ci_ir *ir = (struct budget_ci_ir *) data;
- ir_input_nokey(ir->dev, &ir->state);
-}
-
static void msp430_ir_interrupt(unsigned long data)
{
struct budget_ci *budget_ci = (struct budget_ci *) data;
struct input_dev *dev = budget_ci->ir.dev;
u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
- u32 raw;
/*
* The msp430 chip can generate two different bytes, command and device
@@ -169,20 +155,12 @@ static void msp430_ir_interrupt(unsigned long data)
return;
budget_ci->ir.have_command = false;
+ /* FIXME: We should generate complete scancodes with device info */
if (budget_ci->ir.rc5_device != IR_DEVICE_ANY &&
budget_ci->ir.rc5_device != (command & 0x1f))
return;
- /* Is this a repeated key sequence? (same device, command, toggle) */
- raw = budget_ci->ir.ir_key | (command << 8);
- if (budget_ci->ir.last_raw != raw || !timer_pending(&budget_ci->ir.timer_keyup)) {
- ir_input_nokey(dev, &budget_ci->ir.state);
- ir_input_keydown(dev, &budget_ci->ir.state,
- budget_ci->ir.ir_key);
- budget_ci->ir.last_raw = raw;
- }
-
- mod_timer(&budget_ci->ir.timer_keyup, jiffies + msecs_to_jiffies(IR_KEYPRESS_TIMEOUT));
+ ir_keydown(dev, budget_ci->ir.ir_key, (command & 0x20) ? 1 : 0);
}
static int msp430_ir_init(struct budget_ci *budget_ci)
@@ -190,7 +168,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
struct saa7146_dev *saa = budget_ci->budget.dev;
struct input_dev *input_dev = budget_ci->ir.dev;
int error;
- struct ir_scancode_table *ir_codes;
+ char *ir_codes = NULL;
budget_ci->ir.dev = input_dev = input_allocate_device();
@@ -230,7 +208,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
case 0x1011:
case 0x1012:
/* The hauppauge keymap is a superset of these remotes */
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
if (rc5_device < 0)
budget_ci->ir.rc5_device = 0x1f;
@@ -239,22 +217,15 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
case 0x1017:
case 0x101a:
/* for the Technotrend 1500 bundled remote */
- ir_codes = &ir_codes_tt_1500_table;
+ ir_codes = RC_MAP_TT_1500;
break;
default:
/* unknown remote */
- ir_codes = &ir_codes_budget_ci_old_table;
+ ir_codes = RC_MAP_BUDGET_CI_OLD;
break;
}
- ir_input_init(input_dev, &budget_ci->ir.state, IR_TYPE_RC5);
-
- /* initialise the key-up timeout handler */
- init_timer(&budget_ci->ir.timer_keyup);
- budget_ci->ir.timer_keyup.function = msp430_ir_keyup;
- budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir;
- budget_ci->ir.last_raw = 0xffff; /* An impossible value */
- error = ir_input_register(input_dev, ir_codes, NULL);
+ error = ir_input_register(input_dev, ir_codes, NULL, MODULE_NAME);
if (error) {
printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
return error;
@@ -282,9 +253,6 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci)
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
tasklet_kill(&budget_ci->ir.msp430_irq_tasklet);
- del_timer_sync(&dev->timer);
- ir_input_nokey(dev, &budget_ci->ir.state);
-
ir_input_unregister(dev);
}
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 1500210c06cf..874a10a9d493 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -442,6 +442,7 @@ static struct stv090x_config tt1600_stv090x_config = {
.repeater_level = STV090x_RPTLEVEL_16,
.tuner_init = NULL,
+ .tuner_sleep = NULL,
.tuner_set_mode = NULL,
.tuner_set_frequency = NULL,
.tuner_get_frequency = NULL,
@@ -627,22 +628,36 @@ static void frontend_init(struct budget *budget)
&tt1600_stv6110x_config,
&budget->i2c_adap);
- tt1600_stv090x_config.tuner_init = ctl->tuner_init;
- tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
- tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
- tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
- tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
- tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
- tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
- tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
- tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
- tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
-
- dvb_attach(isl6423_attach,
- budget->dvb_frontend,
- &budget->i2c_adap,
- &tt1600_isl6423_config);
-
+ if (ctl) {
+ tt1600_stv090x_config.tuner_init = ctl->tuner_init;
+ tt1600_stv090x_config.tuner_sleep = ctl->tuner_sleep;
+ tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
+ tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
+ tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
+ tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
+ tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
+ tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
+ tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
+
+ /* call the init function once to initialize
+ tuner's clock output divider and demod's
+ master clock */
+ if (budget->dvb_frontend->ops.init)
+ budget->dvb_frontend->ops.init(budget->dvb_frontend);
+
+ if (dvb_attach(isl6423_attach,
+ budget->dvb_frontend,
+ &budget->i2c_adap,
+ &tt1600_isl6423_config) == NULL) {
+ printk(KERN_ERR "%s: No Intersil ISL6423 found!\n", __func__);
+ goto error_out;
+ }
+ } else {
+ printk(KERN_ERR "%s: No STV6110(A) Silicon Tuner found!\n", __func__);
+ goto error_out;
+ }
}
}
break;
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index 53baccbab17f..fe1b8037b247 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1257,7 +1257,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
if(!dec->irq_urb) {
return -ENOMEM;
}
- dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE,
+ dec->irq_buffer = usb_alloc_coherent(dec->udev,IRQ_PACKET_SIZE,
GFP_ATOMIC, &dec->irq_dma_handle);
if(!dec->irq_buffer) {
usb_free_urb(dec->irq_urb);
@@ -1550,8 +1550,8 @@ static void ttusb_dec_exit_rc(struct ttusb_dec *dec)
usb_free_urb(dec->irq_urb);
- usb_buffer_free(dec->udev,IRQ_PACKET_SIZE,
- dec->irq_buffer, dec->irq_dma_handle);
+ usb_free_coherent(dec->udev,IRQ_PACKET_SIZE,
+ dec->irq_buffer, dec->irq_dma_handle);
if (dec->rc_input_dev) {
input_unregister_device(dec->rc_input_dev);
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 02a9cefc9a00..353b82855949 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -144,7 +144,10 @@ struct amradio_device {
int initialized;
};
-#define vdev_to_amradio(r) container_of(r, struct amradio_device, videodev)
+static inline struct amradio_device *to_amradio_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct amradio_device, v4l2_dev);
+}
/* USB Device ID List */
static struct usb_device_id usb_amradio_device_table[] = {
@@ -284,13 +287,12 @@ static int amradio_set_stereo(struct amradio_device *radio, char argument)
*/
static void usb_amradio_disconnect(struct usb_interface *intf)
{
- struct amradio_device *radio = usb_get_intfdata(intf);
+ struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
radio->usbdev = NULL;
mutex_unlock(&radio->lock);
- usb_set_intfdata(intf, NULL);
v4l2_device_disconnect(&radio->v4l2_dev);
video_unregister_device(&radio->videodev);
}
@@ -500,7 +502,7 @@ out:
/* open device - amradio_start() and amradio_setfreq() */
static int usb_amradio_open(struct file *file)
{
- struct amradio_device *radio = vdev_to_amradio(video_devdata(file));
+ struct amradio_device *radio = video_drvdata(file);
int retval = 0;
mutex_lock(&radio->lock);
@@ -566,7 +568,7 @@ unlock:
/* Suspend device - stop device. Need to be checked and fixed */
static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct amradio_device *radio = usb_get_intfdata(intf);
+ struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
@@ -584,7 +586,7 @@ static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
/* Resume device - start device. Need to be checked and fixed */
static int usb_amradio_resume(struct usb_interface *intf)
{
- struct amradio_device *radio = usb_get_intfdata(intf);
+ struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
@@ -633,9 +635,7 @@ static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = {
static void usb_amradio_video_device_release(struct video_device *videodev)
{
- struct amradio_device *radio = vdev_to_amradio(videodev);
-
- v4l2_device_unregister(&radio->v4l2_dev);
+ struct amradio_device *radio = video_get_drvdata(videodev);
/* free rest memory */
kfree(radio->buffer);
@@ -693,7 +693,6 @@ static int usb_amradio_probe(struct usb_interface *intf,
goto err_vdev;
}
- usb_set_intfdata(intf, radio);
return 0;
err_vdev:
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9644cf760aaa..ad9e6f9c22e9 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -45,6 +45,10 @@ config VIDEO_TUNER
tristate
depends on MEDIA_TUNER
+config V4L2_MEM2MEM_DEV
+ tristate
+ depends on VIDEOBUF_GEN
+
#
# Multimedia Video device configuration
#
@@ -480,6 +484,12 @@ config VIDEO_ADV7343
To compile this driver as a module, choose M here: the
module will be called adv7343.
+config VIDEO_AK881X
+ tristate "AK8813/AK8814 video encoders"
+ depends on I2C
+ help
+ Video output driver for AKM AK8813 and AK8814 TV encoders
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -520,6 +530,13 @@ config DISPLAY_DAVINCI_DM646X_EVM
To compile this driver as a module, choose M here: the
module will be called vpif_display.
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on VIDEO_DEV && ARCH_SHMOBILE
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
config CAPTURE_DAVINCI_DM646X_EVM
tristate "DM646x EVM Video Capture"
depends on VIDEO_DEV && MACH_DAVINCI_DM6467_EVM
@@ -542,7 +559,8 @@ config VIDEO_DAVINCI_VPIF
config VIDEO_VIVI
tristate "Virtual Video Driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
+ depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FONTS
+ select FONT_8x16
select VIDEOBUF_VMALLOC
default n
---help---
@@ -613,6 +631,8 @@ config VIDEO_ISIF
To compile this driver as a module, choose M here: the
module will be called vpfe.
+source "drivers/media/video/omap/Kconfig"
+
source "drivers/media/video/bt8xx/Kconfig"
config VIDEO_PMS
@@ -647,7 +667,7 @@ config VIDEO_CQCAM
config VIDEO_W9966
tristate "W9966CF Webcam (FlyCam Supra and others) Video For Linux"
- depends on PARPORT_1284 && PARPORT && VIDEO_V4L1
+ depends on PARPORT_1284 && PARPORT && VIDEO_V4L2
help
Video4linux driver for Winbond's w9966 based Webcams.
Currently tested with the LifeView FlyCam Supra.
@@ -740,7 +760,7 @@ source "drivers/media/video/zoran/Kconfig"
config VIDEO_MEYE
tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
- depends on PCI && SONY_LAPTOP && VIDEO_V4L1
+ depends on PCI && SONY_LAPTOP && VIDEO_V4L2
---help---
This is the video4linux driver for the Motion Eye camera found
in the Vaio Picturebook laptops. Please read the material in
@@ -807,7 +827,7 @@ source "drivers/media/video/saa7164/Kconfig"
config VIDEO_M32R_AR
tristate "AR devices"
- depends on M32R && VIDEO_V4L1
+ depends on M32R && VIDEO_V4L2
---help---
This is a video4linux driver for the Renesas AR (Artificial Retina)
camera module.
@@ -1107,3 +1127,27 @@ config USB_S2255
endif # V4L_USB_DRIVERS
endif # VIDEO_CAPTURE_DRIVERS
+
+menuconfig V4L_MEM2MEM_DRIVERS
+ bool "Memory-to-memory multimedia devices"
+ depends on VIDEO_V4L2
+ default n
+ ---help---
+ Say Y here to enable selecting drivers for V4L devices that
+ use system memory for both source and destination buffers, as opposed
+ to capture and output drivers, which use memory buffers for just
+ one of those.
+
+if V4L_MEM2MEM_DRIVERS
+
+config VIDEO_MEM2MEM_TESTDEV
+ tristate "Virtual test device for mem2mem framework"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select VIDEOBUF_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a virtual test device for the memory-to-memory driver
+ framework.
+
+endif # V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index c51c386559f2..cc93859d3164 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -10,7 +10,8 @@ stkwebcam-objs := stk-webcam.o stk-sensor.o
omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
-videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o
+videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
+ v4l2-event.o
# V4L2 core modules
@@ -117,6 +118,8 @@ obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
+obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+
obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
@@ -149,8 +152,11 @@ obj-$(CONFIG_VIDEO_IVTV) += ivtv/
obj-$(CONFIG_VIDEO_CX18) += cx18/
obj-$(CONFIG_VIDEO_VIVI) += vivi.o
+obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
obj-$(CONFIG_VIDEO_CX23885) += cx23885/
+obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
+
obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
@@ -160,6 +166,10 @@ obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
+obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+
+obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
+
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
@@ -169,6 +179,8 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+obj-$(CONFIG_ARCH_OMAP) += omap/
+
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/ak881x.c b/drivers/media/video/ak881x.c
new file mode 100644
index 000000000000..35390d4717b9
--- /dev/null
+++ b/drivers/media/video/ak881x.c
@@ -0,0 +1,368 @@
+/*
+ * Driver for AK8813 / AK8814 TV-ecoders from Asahi Kasei Microsystems Co., Ltd. (AKM)
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <media/ak881x.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+#define AK881X_INTERFACE_MODE 0
+#define AK881X_VIDEO_PROCESS1 1
+#define AK881X_VIDEO_PROCESS2 2
+#define AK881X_VIDEO_PROCESS3 3
+#define AK881X_DAC_MODE 5
+#define AK881X_STATUS 0x24
+#define AK881X_DEVICE_ID 0x25
+#define AK881X_DEVICE_REVISION 0x26
+
+struct ak881x {
+ struct v4l2_subdev subdev;
+ struct ak881x_pdata *pdata;
+ unsigned int lines;
+ int id; /* DEVICE_ID code V4L2_IDENT_AK881X code from v4l2-chip-ident.h */
+ char revision; /* DEVICE_REVISION content */
+};
+
+static int reg_read(struct i2c_client *client, const u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int reg_write(struct i2c_client *client, const u8 reg,
+ const u8 data)
+{
+ return i2c_smbus_write_byte_data(client, reg, data);
+}
+
+static int reg_set(struct i2c_client *client, const u8 reg,
+ const u8 data, u8 mask)
+{
+ int ret = reg_read(client, reg);
+ if (ret < 0)
+ return ret;
+ return reg_write(client, reg, (ret & ~mask) | (data & mask));
+}
+
+static struct ak881x *to_ak881x(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ak881x, subdev);
+}
+
+static int ak881x_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+
+ if (id->match.addr != client->addr)
+ return -ENODEV;
+
+ id->ident = ak881x->id;
+ id->revision = ak881x->revision;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ak881x_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26)
+ return -EINVAL;
+
+ if (reg->match.addr != client->addr)
+ return -ENODEV;
+
+ reg->val = reg_read(client, reg->reg);
+
+ if (reg->val > 0xffff)
+ return -EIO;
+
+ return 0;
+}
+
+static int ak881x_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26)
+ return -EINVAL;
+
+ if (reg->match.addr != client->addr)
+ return -ENODEV;
+
+ if (reg_write(client, reg->reg, reg->val) < 0)
+ return -EIO;
+
+ return 0;
+}
+#endif
+
+static int ak881x_try_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ v4l_bound_align_image(&mf->width, 0, 720, 2,
+ &mf->height, 0, ak881x->lines, 1, 0);
+ mf->field = V4L2_FIELD_INTERLACED;
+ mf->code = V4L2_MBUS_FMT_YUYV8_2X8_LE;
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ return 0;
+}
+
+static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ if (mf->field != V4L2_FIELD_INTERLACED ||
+ mf->code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EINVAL;
+
+ return ak881x_try_g_mbus_fmt(sd, mf);
+}
+
+static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index)
+ return -EINVAL;
+
+ *code = V4L2_MBUS_FMT_YUYV8_2X8_LE;
+ return 0;
+}
+
+static int ak881x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = 720;
+ a->bounds.height = ak881x->lines;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+ u8 vp1;
+
+ if (std == V4L2_STD_NTSC_443) {
+ vp1 = 3;
+ ak881x->lines = 480;
+ } else if (std == V4L2_STD_PAL_M) {
+ vp1 = 5;
+ ak881x->lines = 480;
+ } else if (std == V4L2_STD_PAL_60) {
+ vp1 = 7;
+ ak881x->lines = 480;
+ } else if (std && !(std & ~V4L2_STD_PAL)) {
+ vp1 = 0xf;
+ ak881x->lines = 576;
+ } else if (std && !(std & ~V4L2_STD_NTSC)) {
+ vp1 = 0;
+ ak881x->lines = 480;
+ } else {
+ /* No SECAM or PAL_N/Nc supported */
+ return -EINVAL;
+ }
+
+ reg_set(client, AK881X_VIDEO_PROCESS1, vp1, 0xf);
+
+ return 0;
+}
+
+static int ak881x_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ak881x *ak881x = to_ak881x(client);
+
+ if (enable) {
+ u8 dac;
+ /* For colour-bar testing set bit 6 of AK881X_VIDEO_PROCESS1 */
+ /* Default: composite output */
+ if (ak881x->pdata->flags & AK881X_COMPONENT)
+ dac = 3;
+ else
+ dac = 4;
+ /* Turn on the DAC(s) */
+ reg_write(client, AK881X_DAC_MODE, dac);
+ dev_dbg(&client->dev, "chip status 0x%x\n",
+ reg_read(client, AK881X_STATUS));
+ } else {
+ /* ...and clear bit 6 of AK881X_VIDEO_PROCESS1 here */
+ reg_write(client, AK881X_DAC_MODE, 0);
+ dev_dbg(&client->dev, "chip status 0x%x\n",
+ reg_read(client, AK881X_STATUS));
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops ak881x_subdev_core_ops = {
+ .g_chip_ident = ak881x_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ak881x_g_register,
+ .s_register = ak881x_s_register,
+#endif
+};
+
+static struct v4l2_subdev_video_ops ak881x_subdev_video_ops = {
+ .s_mbus_fmt = ak881x_s_mbus_fmt,
+ .g_mbus_fmt = ak881x_try_g_mbus_fmt,
+ .try_mbus_fmt = ak881x_try_g_mbus_fmt,
+ .cropcap = ak881x_cropcap,
+ .enum_mbus_fmt = ak881x_enum_mbus_fmt,
+ .s_std_output = ak881x_s_std_output,
+ .s_stream = ak881x_s_stream,
+};
+
+static struct v4l2_subdev_ops ak881x_subdev_ops = {
+ .core = &ak881x_subdev_core_ops,
+ .video = &ak881x_subdev_video_ops,
+};
+
+static int ak881x_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct ak881x *ak881x;
+ u8 ifmode, data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_warn(&adapter->dev,
+ "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
+ return -EIO;
+ }
+
+ ak881x = kzalloc(sizeof(struct ak881x), GFP_KERNEL);
+ if (!ak881x)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ak881x->subdev, client, &ak881x_subdev_ops);
+
+ data = reg_read(client, AK881X_DEVICE_ID);
+
+ switch (data) {
+ case 0x13:
+ ak881x->id = V4L2_IDENT_AK8813;
+ break;
+ case 0x14:
+ ak881x->id = V4L2_IDENT_AK8814;
+ break;
+ default:
+ dev_err(&client->dev,
+ "No ak881x chip detected, register read %x\n", data);
+ kfree(ak881x);
+ return -ENODEV;
+ }
+
+ ak881x->revision = reg_read(client, AK881X_DEVICE_REVISION);
+ ak881x->pdata = client->dev.platform_data;
+
+ if (ak881x->pdata) {
+ if (ak881x->pdata->flags & AK881X_FIELD)
+ ifmode = 4;
+ else
+ ifmode = 0;
+
+ switch (ak881x->pdata->flags & AK881X_IF_MODE_MASK) {
+ case AK881X_IF_MODE_BT656:
+ ifmode |= 1;
+ break;
+ case AK881X_IF_MODE_MASTER:
+ ifmode |= 2;
+ break;
+ case AK881X_IF_MODE_SLAVE:
+ default:
+ break;
+ }
+
+ dev_dbg(&client->dev, "IF mode %x\n", ifmode);
+
+ /*
+ * "Line Blanking No." seems to be the same as the number of
+ * "black" lines on, e.g., SuperH VOU, whose default value of 20
+ * "incidentally" matches ak881x' default
+ */
+ reg_write(client, AK881X_INTERFACE_MODE, ifmode | (20 << 3));
+ }
+
+ /* Hardware default: NTSC-M */
+ ak881x->lines = 480;
+
+ dev_info(&client->dev, "Detected an ak881x chip ID %x, revision %x\n",
+ data, ak881x->revision);
+
+ return 0;
+}
+
+static int ak881x_remove(struct i2c_client *client)
+{
+ struct ak881x *ak881x = to_ak881x(client);
+
+ v4l2_device_unregister_subdev(&ak881x->subdev);
+ kfree(ak881x);
+
+ return 0;
+}
+
+static const struct i2c_device_id ak881x_id[] = {
+ { "ak8813", 0 },
+ { "ak8814", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ak881x_id);
+
+static struct i2c_driver ak881x_i2c_driver = {
+ .driver = {
+ .name = "ak881x",
+ },
+ .probe = ak881x_probe,
+ .remove = ak881x_remove,
+ .id_table = ak881x_id,
+};
+
+static int __init ak881x_module_init(void)
+{
+ return i2c_add_driver(&ak881x_i2c_driver);
+}
+
+static void __exit ak881x_module_exit(void)
+{
+ i2c_del_driver(&ak881x_i2c_driver);
+}
+
+module_init(ak881x_module_init);
+module_exit(ak881x_module_exit);
+
+MODULE_DESCRIPTION("TV-output driver for ak8813/ak8814");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index a356d6bd3131..31e7a123d19a 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -27,8 +27,10 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/videodev.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/mutex.h>
@@ -39,7 +41,7 @@
#include <asm/byteorder.h>
#if 0
-#define DEBUG(n, args...) printk(args)
+#define DEBUG(n, args...) printk(KERN_INFO args)
#define CHECK_LOST 1
#else
#define DEBUG(n, args...)
@@ -52,10 +54,10 @@
*/
#define USE_INT 0 /* Don't modify */
-#define VERSION "0.03"
+#define VERSION "0.04"
#define ar_inl(addr) inl((unsigned long)(addr))
-#define ar_outl(val, addr) outl((unsigned long)(val),(unsigned long)(addr))
+#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr))
extern struct cpuinfo_m32r boot_cpu_data;
@@ -79,7 +81,7 @@ extern struct cpuinfo_m32r boot_cpu_data;
/* bits & bytes per pixel */
#define AR_BITS_PER_PIXEL 16
-#define AR_BYTES_PER_PIXEL (AR_BITS_PER_PIXEL/8)
+#define AR_BYTES_PER_PIXEL (AR_BITS_PER_PIXEL / 8)
/* line buffer size */
#define AR_LINE_BYTES_VGA (AR_WIDTH_VGA * AR_BYTES_PER_PIXEL)
@@ -104,8 +106,9 @@ extern struct cpuinfo_m32r boot_cpu_data;
#define AR_MODE_INTERLACE 0
#define AR_MODE_NORMAL 1
-struct ar_device {
- struct video_device *vdev;
+struct ar {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
unsigned int start_capture; /* duaring capture in INT. mode. */
#if USE_INT
unsigned char *line_buff; /* DMA line buffer */
@@ -116,12 +119,13 @@ struct ar_device {
int width, height;
int frame_bytes, line_bytes;
wait_queue_head_t wait;
- unsigned long in_use;
struct mutex lock;
};
+static struct ar ardev;
+
static int video_nr = -1; /* video device number (first free) */
-static unsigned char yuv[MAX_AR_FRAME_BYTES];
+static unsigned char yuv[MAX_AR_FRAME_BYTES];
/* module parameters */
/* default frequency */
@@ -133,9 +137,7 @@ module_param(freq, int, 0);
module_param(vga, int, 0);
module_param(vga_interlace, int, 0);
-static int ar_initialize(struct video_device *dev);
-
-static inline void wait_for_vsync(void)
+static void wait_for_vsync(void)
{
while (ar_inl(ARVCR0) & ARVCR0_VDS) /* wait for VSYNC */
cpu_relax();
@@ -143,7 +145,7 @@ static inline void wait_for_vsync(void)
cpu_relax();
}
-static inline void wait_acknowledge(void)
+static void wait_acknowledge(void)
{
int i;
@@ -156,7 +158,7 @@ static inline void wait_acknowledge(void)
/*******************************************************************
* I2C functions
*******************************************************************/
-void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
+static void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
unsigned long data3)
{
int i;
@@ -200,7 +202,7 @@ void iic(int n, unsigned long addr, unsigned long data1, unsigned long data2,
}
-void init_iic(void)
+static void init_iic(void)
{
DEBUG(1, "init_iic:\n");
@@ -214,13 +216,12 @@ void init_iic(void)
/* I2C CLK */
/* 50MH-100k */
- if (freq == 75) {
+ if (freq == 75)
ar_outl(369, PLDI2CFREQ); /* BCLK = 75MHz */
- } else if (freq == 50) {
+ else if (freq == 50)
ar_outl(244, PLDI2CFREQ); /* BCLK = 50MHz */
- } else {
+ else
ar_outl(244, PLDI2CFREQ); /* default: BCLK = 50MHz */
- }
ar_outl(0x1, PLDI2CCR); /* I2CCR Enable */
}
@@ -245,7 +246,7 @@ static inline void clear_dma_status(void)
ar_outl(0x8000, M32R_DMAEDET_PORTL); /* clear status */
}
-static inline void wait_for_vertical_sync(int exp_line)
+static void wait_for_vertical_sync(struct ar *ar, int exp_line)
{
#if CHECK_LOST
int tmout = 10000; /* FIXME */
@@ -260,7 +261,7 @@ static inline void wait_for_vertical_sync(int exp_line)
break;
}
if (tmout < 0)
- printk("arv: lost %d -> %d\n", exp_line, l);
+ v4l2_err(&ar->v4l2_dev, "lost %d -> %d\n", exp_line, l);
#else
while (ar_inl(ARVHCOUNT) != exp_line)
cpu_relax();
@@ -269,15 +270,14 @@ static inline void wait_for_vertical_sync(int exp_line)
static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
{
- struct video_device *v = video_devdata(file);
- struct ar_device *ar = video_get_drvdata(v);
+ struct ar *ar = video_drvdata(file);
long ret = ar->frame_bytes; /* return read bytes */
unsigned long arvcr1 = 0;
unsigned long flags;
unsigned char *p;
int h, w;
unsigned char *py, *pu, *pv;
-#if ! USE_INT
+#if !USE_INT
int l;
#endif
@@ -305,7 +305,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
ar_outl(ar->line_bytes, M32R_DMA0RBCUT_PORTL); /* reload count (bytes) */
/*
- * Okey , kicks AR LSI to invoke an interrupt
+ * Okay, kick AR LSI to invoke an interrupt
*/
ar->start_capture = 0;
ar_outl(arvcr1 | ARVCR1_HIEN, ARVCR1);
@@ -313,7 +313,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
/* .... AR interrupts .... */
interruptible_sleep_on(&ar->wait);
if (signal_pending(current)) {
- printk("arv: interrupted while get frame data.\n");
+ printk(KERN_ERR "arv: interrupted while get frame data.\n");
ret = -EINTR;
goto out_up;
}
@@ -334,7 +334,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
cpu_relax();
if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
for (h = 0; h < ar->height; h++) {
- wait_for_vertical_sync(h);
+ wait_for_vertical_sync(ar, h);
if (h < (AR_HEIGHT_VGA/2))
l = h << 1;
else
@@ -349,7 +349,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
}
} else {
for (h = 0; h < ar->height; h++) {
- wait_for_vertical_sync(h);
+ wait_for_vertical_sync(ar, h);
ar_outl(virt_to_phys(ar->frame[h]), M32R_DMA0CDA_PORTL);
enable_dma();
while (!(ar_inl(M32R_DMAEDET_PORTL) & 0x8000))
@@ -386,7 +386,7 @@ static ssize_t ar_read(struct file *file, char *buf, size_t count, loff_t *ppos)
}
}
if (copy_to_user(buf, yuv, ar->frame_bytes)) {
- printk("arv: failed while copy_to_user yuv.\n");
+ v4l2_err(&ar->v4l2_dev, "failed while copy_to_user yuv.\n");
ret = -EFAULT;
goto out_up;
}
@@ -396,153 +396,127 @@ out_up:
return ret;
}
-static long ar_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int ar_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- struct video_device *dev = video_devdata(file);
- struct ar_device *ar = video_get_drvdata(dev);
-
- DEBUG(1, "ar_ioctl()\n");
- switch(cmd) {
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
- DEBUG(1, "VIDIOCGCAP:\n");
- strcpy(b->name, ar->vdev->name);
- b->type = VID_TYPE_CAPTURE;
- b->channels = 0;
- b->audios = 0;
- b->maxwidth = MAX_AR_WIDTH;
- b->maxheight = MAX_AR_HEIGHT;
- b->minwidth = MIN_AR_WIDTH;
- b->minheight = MIN_AR_HEIGHT;
- return 0;
- }
- case VIDIOCGCHAN:
- DEBUG(1, "VIDIOCGCHAN:\n");
- return 0;
- case VIDIOCSCHAN:
- DEBUG(1, "VIDIOCSCHAN:\n");
- return 0;
- case VIDIOCGTUNER:
- DEBUG(1, "VIDIOCGTUNER:\n");
- return 0;
- case VIDIOCSTUNER:
- DEBUG(1, "VIDIOCSTUNER:\n");
- return 0;
- case VIDIOCGPICT:
- DEBUG(1, "VIDIOCGPICT:\n");
- return 0;
- case VIDIOCSPICT:
- DEBUG(1, "VIDIOCSPICT:\n");
- return 0;
- case VIDIOCCAPTURE:
- DEBUG(1, "VIDIOCCAPTURE:\n");
+ struct ar *ar = video_drvdata(file);
+
+ strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 0, 4);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int ar_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
+{
+ if (vin->index > 0)
return -EINVAL;
- case VIDIOCGWIN:
- {
- struct video_window *w = arg;
- DEBUG(1, "VIDIOCGWIN:\n");
- memset(w, 0, sizeof(*w));
- w->width = ar->width;
- w->height = ar->height;
- return 0;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = V4L2_STD_ALL;
+ vin->status = 0;
+ return 0;
+}
+
+static int ar_g_input(struct file *file, void *fh, unsigned int *inp)
+{
+ *inp = 0;
+ return 0;
+}
+
+static int ar_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return inp ? -EINVAL : 0;
+}
+
+static int ar_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = ar->width;
+ pix->height = ar->height;
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->field = (ar->mode == AR_MODE_NORMAL) ? V4L2_FIELD_NONE : V4L2_FIELD_INTERLACED;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->height <= AR_HEIGHT_QVGA || pix->width <= AR_WIDTH_QVGA) {
+ pix->height = AR_HEIGHT_QVGA;
+ pix->width = AR_WIDTH_QVGA;
+ pix->field = V4L2_FIELD_INTERLACED;
+ } else {
+ pix->height = AR_HEIGHT_VGA;
+ pix->width = AR_WIDTH_VGA;
+ pix->field = vga_interlace ? V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE;
}
- case VIDIOCSWIN:
- {
- struct video_window *w = arg;
- DEBUG(1, "VIDIOCSWIN:\n");
- if ((w->width != AR_WIDTH_VGA || w->height != AR_HEIGHT_VGA) &&
- (w->width != AR_WIDTH_QVGA || w->height != AR_HEIGHT_QVGA))
- return -EINVAL;
-
- mutex_lock(&ar->lock);
- ar->width = w->width;
- ar->height = w->height;
- if (ar->width == AR_WIDTH_VGA) {
- ar->size = AR_SIZE_VGA;
- ar->frame_bytes = AR_FRAME_BYTES_VGA;
- ar->line_bytes = AR_LINE_BYTES_VGA;
- if (vga_interlace)
- ar->mode = AR_MODE_INTERLACE;
- else
- ar->mode = AR_MODE_NORMAL;
- } else {
- ar->size = AR_SIZE_QVGA;
- ar->frame_bytes = AR_FRAME_BYTES_QVGA;
- ar->line_bytes = AR_LINE_BYTES_QVGA;
+ pix->pixelformat = V4L2_PIX_FMT_YUV422P;
+ pix->bytesperline = ar->width;
+ pix->sizeimage = 2 * ar->width * ar->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int ar_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct ar *ar = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = ar_try_fmt_vid_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ mutex_lock(&ar->lock);
+ ar->width = pix->width;
+ ar->height = pix->height;
+ if (ar->width == AR_WIDTH_VGA) {
+ ar->size = AR_SIZE_VGA;
+ ar->frame_bytes = AR_FRAME_BYTES_VGA;
+ ar->line_bytes = AR_LINE_BYTES_VGA;
+ if (vga_interlace)
ar->mode = AR_MODE_INTERLACE;
- }
- mutex_unlock(&ar->lock);
- return 0;
- }
- case VIDIOCGFBUF:
- DEBUG(1, "VIDIOCGFBUF:\n");
- return -EINVAL;
- case VIDIOCSFBUF:
- DEBUG(1, "VIDIOCSFBUF:\n");
- return -EINVAL;
- case VIDIOCKEY:
- DEBUG(1, "VIDIOCKEY:\n");
- return 0;
- case VIDIOCGFREQ:
- DEBUG(1, "VIDIOCGFREQ:\n");
- return -EINVAL;
- case VIDIOCSFREQ:
- DEBUG(1, "VIDIOCSFREQ:\n");
- return -EINVAL;
- case VIDIOCGAUDIO:
- DEBUG(1, "VIDIOCGAUDIO:\n");
- return -EINVAL;
- case VIDIOCSAUDIO:
- DEBUG(1, "VIDIOCSAUDIO:\n");
- return -EINVAL;
- case VIDIOCSYNC:
- DEBUG(1, "VIDIOCSYNC:\n");
- return -EINVAL;
- case VIDIOCMCAPTURE:
- DEBUG(1, "VIDIOCMCAPTURE:\n");
- return -EINVAL;
- case VIDIOCGMBUF:
- DEBUG(1, "VIDIOCGMBUF:\n");
- return -EINVAL;
- case VIDIOCGUNIT:
- DEBUG(1, "VIDIOCGUNIT:\n");
- return -EINVAL;
- case VIDIOCGCAPTURE:
- DEBUG(1, "VIDIOCGCAPTURE:\n");
- return -EINVAL;
- case VIDIOCSCAPTURE:
- DEBUG(1, "VIDIOCSCAPTURE:\n");
- return -EINVAL;
- case VIDIOCSPLAYMODE:
- DEBUG(1, "VIDIOCSPLAYMODE:\n");
- return -EINVAL;
- case VIDIOCSWRITEMODE:
- DEBUG(1, "VIDIOCSWRITEMODE:\n");
- return -EINVAL;
- case VIDIOCGPLAYINFO:
- DEBUG(1, "VIDIOCGPLAYINFO:\n");
- return -EINVAL;
- case VIDIOCSMICROCODE:
- DEBUG(1, "VIDIOCSMICROCODE:\n");
- return -EINVAL;
- case VIDIOCGVBIFMT:
- DEBUG(1, "VIDIOCGVBIFMT:\n");
- return -EINVAL;
- case VIDIOCSVBIFMT:
- DEBUG(1, "VIDIOCSVBIFMT:\n");
- return -EINVAL;
- default:
- DEBUG(1, "Unknown ioctl(0x%08x)\n", cmd);
- return -ENOIOCTLCMD;
+ else
+ ar->mode = AR_MODE_NORMAL;
+ } else {
+ ar->size = AR_SIZE_QVGA;
+ ar->frame_bytes = AR_FRAME_BYTES_QVGA;
+ ar->line_bytes = AR_LINE_BYTES_QVGA;
+ ar->mode = AR_MODE_INTERLACE;
}
+ /* Ok we figured out what to use from our wide choice */
+ mutex_unlock(&ar->lock);
return 0;
}
-static long ar_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int ar_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
{
- return video_usercopy(file, cmd, arg, ar_do_ioctl);
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "YUV 4:2:2 Planar", V4L2_PIX_FMT_YUV422P,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
+
+ if (fmt->index > 0)
+ return -EINVAL;
+
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
#if USE_INT
@@ -551,7 +525,7 @@ static long ar_ioctl(struct file *file, unsigned int cmd,
*/
static void ar_interrupt(int irq, void *dev)
{
- struct ar_device *ar = dev;
+ struct ar *ar = dev;
unsigned int line_count;
unsigned int line_number;
unsigned int arvcr1;
@@ -559,11 +533,11 @@ static void ar_interrupt(int irq, void *dev)
line_count = ar_inl(ARVHCOUNT); /* line number */
if (ar->mode == AR_MODE_INTERLACE && ar->size == AR_SIZE_VGA) {
/* operations for interlace mode */
- if ( line_count < (AR_HEIGHT_VGA/2) ) /* even line */
+ if (line_count < (AR_HEIGHT_VGA / 2)) /* even line */
line_number = (line_count << 1);
else /* odd line */
line_number =
- (((line_count - (AR_HEIGHT_VGA/2)) << 1) + 1);
+ (((line_count - (AR_HEIGHT_VGA / 2)) << 1) + 1);
} else {
line_number = line_count;
}
@@ -623,11 +597,10 @@ static void ar_interrupt(int irq, void *dev)
* 0 is returned in success.
*
*/
-static int ar_initialize(struct video_device *dev)
+static int ar_initialize(struct ar *ar)
{
- struct ar_device *ar = video_get_drvdata(dev);
unsigned long cr = 0;
- int i,found=0;
+ int i, found = 0;
DEBUG(1, "ar_initialize:\n");
@@ -666,129 +639,119 @@ static int ar_initialize(struct video_device *dev)
if (found == 0)
return -ENODEV;
- printk("arv: Initializing ");
-
- iic(2,0x78,0x11,0x01,0x00); /* start */
- iic(3,0x78,0x12,0x00,0x06);
- iic(3,0x78,0x12,0x12,0x30);
- iic(3,0x78,0x12,0x15,0x58);
- iic(3,0x78,0x12,0x17,0x30);
- printk(".");
- iic(3,0x78,0x12,0x1a,0x97);
- iic(3,0x78,0x12,0x1b,0xff);
- iic(3,0x78,0x12,0x1c,0xff);
- iic(3,0x78,0x12,0x26,0x10);
- iic(3,0x78,0x12,0x27,0x00);
- printk(".");
- iic(2,0x78,0x34,0x02,0x00);
- iic(2,0x78,0x7a,0x10,0x00);
- iic(2,0x78,0x80,0x39,0x00);
- iic(2,0x78,0x81,0xe6,0x00);
- iic(2,0x78,0x8d,0x00,0x00);
- printk(".");
- iic(2,0x78,0x8e,0x0c,0x00);
- iic(2,0x78,0x8f,0x00,0x00);
+ v4l2_info(&ar->v4l2_dev, "Initializing ");
+
+ iic(2, 0x78, 0x11, 0x01, 0x00); /* start */
+ iic(3, 0x78, 0x12, 0x00, 0x06);
+ iic(3, 0x78, 0x12, 0x12, 0x30);
+ iic(3, 0x78, 0x12, 0x15, 0x58);
+ iic(3, 0x78, 0x12, 0x17, 0x30);
+ printk(KERN_CONT ".");
+ iic(3, 0x78, 0x12, 0x1a, 0x97);
+ iic(3, 0x78, 0x12, 0x1b, 0xff);
+ iic(3, 0x78, 0x12, 0x1c, 0xff);
+ iic(3, 0x78, 0x12, 0x26, 0x10);
+ iic(3, 0x78, 0x12, 0x27, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x34, 0x02, 0x00);
+ iic(2, 0x78, 0x7a, 0x10, 0x00);
+ iic(2, 0x78, 0x80, 0x39, 0x00);
+ iic(2, 0x78, 0x81, 0xe6, 0x00);
+ iic(2, 0x78, 0x8d, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x8e, 0x0c, 0x00);
+ iic(2, 0x78, 0x8f, 0x00, 0x00);
#if 0
- iic(2,0x78,0x90,0x00,0x00); /* AWB on=1 off=0 */
+ iic(2, 0x78, 0x90, 0x00, 0x00); /* AWB on=1 off=0 */
#endif
- iic(2,0x78,0x93,0x01,0x00);
- iic(2,0x78,0x94,0xcd,0x00);
- iic(2,0x78,0x95,0x00,0x00);
- printk(".");
- iic(2,0x78,0x96,0xa0,0x00);
- iic(2,0x78,0x97,0x00,0x00);
- iic(2,0x78,0x98,0x60,0x00);
- iic(2,0x78,0x99,0x01,0x00);
- iic(2,0x78,0x9a,0x19,0x00);
- printk(".");
- iic(2,0x78,0x9b,0x02,0x00);
- iic(2,0x78,0x9c,0xe8,0x00);
- iic(2,0x78,0x9d,0x02,0x00);
- iic(2,0x78,0x9e,0x2e,0x00);
- iic(2,0x78,0xb8,0x78,0x00);
- iic(2,0x78,0xba,0x05,0x00);
+ iic(2, 0x78, 0x93, 0x01, 0x00);
+ iic(2, 0x78, 0x94, 0xcd, 0x00);
+ iic(2, 0x78, 0x95, 0x00, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x96, 0xa0, 0x00);
+ iic(2, 0x78, 0x97, 0x00, 0x00);
+ iic(2, 0x78, 0x98, 0x60, 0x00);
+ iic(2, 0x78, 0x99, 0x01, 0x00);
+ iic(2, 0x78, 0x9a, 0x19, 0x00);
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x9b, 0x02, 0x00);
+ iic(2, 0x78, 0x9c, 0xe8, 0x00);
+ iic(2, 0x78, 0x9d, 0x02, 0x00);
+ iic(2, 0x78, 0x9e, 0x2e, 0x00);
+ iic(2, 0x78, 0xb8, 0x78, 0x00);
+ iic(2, 0x78, 0xba, 0x05, 0x00);
#if 0
- iic(2,0x78,0x83,0x8c,0x00); /* brightness */
+ iic(2, 0x78, 0x83, 0x8c, 0x00); /* brightness */
#endif
- printk(".");
+ printk(KERN_CONT ".");
/* color correction */
- iic(3,0x78,0x49,0x00,0x95); /* a */
- iic(3,0x78,0x49,0x01,0x96); /* b */
- iic(3,0x78,0x49,0x03,0x85); /* c */
- iic(3,0x78,0x49,0x04,0x97); /* d */
- iic(3,0x78,0x49,0x02,0x7e); /* e(Lo) */
- iic(3,0x78,0x49,0x05,0xa4); /* f(Lo) */
- iic(3,0x78,0x49,0x06,0x04); /* e(Hi) */
- iic(3,0x78,0x49,0x07,0x04); /* e(Hi) */
- iic(2,0x78,0x48,0x01,0x00); /* on=1 off=0 */
-
- printk(".");
- iic(2,0x78,0x11,0x00,0x00); /* end */
- printk(" done\n");
+ iic(3, 0x78, 0x49, 0x00, 0x95); /* a */
+ iic(3, 0x78, 0x49, 0x01, 0x96); /* b */
+ iic(3, 0x78, 0x49, 0x03, 0x85); /* c */
+ iic(3, 0x78, 0x49, 0x04, 0x97); /* d */
+ iic(3, 0x78, 0x49, 0x02, 0x7e); /* e(Lo) */
+ iic(3, 0x78, 0x49, 0x05, 0xa4); /* f(Lo) */
+ iic(3, 0x78, 0x49, 0x06, 0x04); /* e(Hi) */
+ iic(3, 0x78, 0x49, 0x07, 0x04); /* e(Hi) */
+ iic(2, 0x78, 0x48, 0x01, 0x00); /* on=1 off=0 */
+
+ printk(KERN_CONT ".");
+ iic(2, 0x78, 0x11, 0x00, 0x00); /* end */
+ printk(KERN_CONT " done\n");
return 0;
}
-void ar_release(struct video_device *vfd)
-{
- struct ar_device *ar = video_get_drvdata(vfd);
- mutex_lock(&ar->lock);
- video_device_release(vfd);
-}
-
/****************************************************************************
*
* Video4Linux Module functions
*
****************************************************************************/
-static struct ar_device ardev;
-
-static int ar_exclusive_open(struct file *file)
-{
- return test_and_set_bit(0, &ardev.in_use) ? -EBUSY : 0;
-}
-
-static int ar_exclusive_release(struct file *file)
-{
- clear_bit(0, &ardev.in_use);
- return 0;
-}
static const struct v4l2_file_operations ar_fops = {
.owner = THIS_MODULE,
- .open = ar_exclusive_open,
- .release = ar_exclusive_release,
.read = ar_read,
- .ioctl = ar_ioctl,
+ .ioctl = video_ioctl2,
};
-static struct video_device ar_template = {
- .name = "Colour AR VGA",
- .fops = &ar_fops,
- .release = ar_release,
+static const struct v4l2_ioctl_ops ar_ioctl_ops = {
+ .vidioc_querycap = ar_querycap,
+ .vidioc_g_input = ar_g_input,
+ .vidioc_s_input = ar_s_input,
+ .vidioc_enum_input = ar_enum_input,
+ .vidioc_enum_fmt_vid_cap = ar_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = ar_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = ar_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = ar_try_fmt_vid_cap,
};
#define ALIGN4(x) ((((int)(x)) & 0x3) == 0)
static int __init ar_init(void)
{
- struct ar_device *ar;
+ struct ar *ar;
+ struct v4l2_device *v4l2_dev;
int ret;
int i;
- DEBUG(1, "ar_init:\n");
- ret = -EIO;
- printk(KERN_INFO "arv: Colour AR VGA driver %s\n", VERSION);
-
ar = &ardev;
- memset(ar, 0, sizeof(struct ar_device));
+ v4l2_dev = &ar->v4l2_dev;
+ strlcpy(v4l2_dev->name, "arv", sizeof(v4l2_dev->name));
+ v4l2_info(v4l2_dev, "Colour AR VGA driver %s\n", VERSION);
+
+ ret = v4l2_device_register(NULL, v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return ret;
+ }
+ ret = -EIO;
#if USE_INT
/* allocate a DMA buffer for 1 line. */
ar->line_buff = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL | GFP_DMA);
- if (ar->line_buff == NULL || ! ALIGN4(ar->line_buff)) {
- printk("arv: buffer allocation failed for DMA.\n");
+ if (ar->line_buff == NULL || !ALIGN4(ar->line_buff)) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for DMA.\n");
ret = -ENOMEM;
goto out_end;
}
@@ -796,20 +759,19 @@ static int __init ar_init(void)
/* allocate buffers for a frame */
for (i = 0; i < MAX_AR_HEIGHT; i++) {
ar->frame[i] = kmalloc(MAX_AR_LINE_BYTES, GFP_KERNEL);
- if (ar->frame[i] == NULL || ! ALIGN4(ar->frame[i])) {
- printk("arv: buffer allocation failed for frame.\n");
+ if (ar->frame[i] == NULL || !ALIGN4(ar->frame[i])) {
+ v4l2_err(v4l2_dev, "buffer allocation failed for frame.\n");
ret = -ENOMEM;
goto out_line_buff;
}
}
- ar->vdev = video_device_alloc();
- if (!ar->vdev) {
- printk(KERN_ERR "arv: video_device_alloc() failed\n");
- return -ENOMEM;
- }
- memcpy(ar->vdev, &ar_template, sizeof(ar_template));
- video_set_drvdata(ar->vdev, ar);
+ strlcpy(ar->vdev.name, "Colour AR VGA", sizeof(ar->vdev.name));
+ ar->vdev.v4l2_dev = v4l2_dev;
+ ar->vdev.fops = &ar_fops;
+ ar->vdev.ioctl_ops = &ar_ioctl_ops;
+ ar->vdev.release = video_device_release_empty;
+ video_set_drvdata(&ar->vdev, ar);
if (vga) {
ar->width = AR_WIDTH_VGA;
@@ -834,14 +796,14 @@ static int __init ar_init(void)
#if USE_INT
if (request_irq(M32R_IRQ_INT3, ar_interrupt, 0, "arv", ar)) {
- printk("arv: request_irq(%d) failed.\n", M32R_IRQ_INT3);
+ v4l2_err("request_irq(%d) failed.\n", M32R_IRQ_INT3);
ret = -EIO;
goto out_irq;
}
#endif
- if (ar_initialize(ar->vdev) != 0) {
- printk("arv: M64278 not found.\n");
+ if (ar_initialize(ar) != 0) {
+ v4l2_err(v4l2_dev, "M64278 not found.\n");
ret = -ENODEV;
goto out_dev;
}
@@ -852,15 +814,15 @@ static int __init ar_init(void)
* device is named "video[0-64]".
* video_register_device() initializes h/w using ar_initialize().
*/
- if (video_register_device(ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
+ if (video_register_device(&ar->vdev, VFL_TYPE_GRABBER, video_nr) != 0) {
/* return -1, -ENFILE(full) or others */
- printk("arv: register video (Colour AR) failed.\n");
+ v4l2_err(v4l2_dev, "register video (Colour AR) failed.\n");
ret = -ENODEV;
goto out_dev;
}
- printk("%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
- video_device_node_name(ar->vdev), M32R_IRQ_INT3, freq);
+ v4l2_info(v4l2_dev, "%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
+ video_device_node_name(&ar->vdev), M32R_IRQ_INT3, freq);
return 0;
@@ -879,6 +841,7 @@ out_line_buff:
out_end:
#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
return ret;
}
@@ -886,7 +849,7 @@ out_end:
static int __init ar_init_module(void)
{
freq = (boot_cpu_data.bus_clock / 1000000);
- printk("arv: Bus clock %d\n", freq);
+ printk(KERN_INFO "arv: Bus clock %d\n", freq);
if (freq != 50 && freq != 75)
freq = DEFAULT_FREQ;
return ar_init();
@@ -894,11 +857,11 @@ static int __init ar_init_module(void)
static void __exit ar_cleanup_module(void)
{
- struct ar_device *ar;
+ struct ar *ar;
int i;
ar = &ardev;
- video_unregister_device(ar->vdev);
+ video_unregister_device(&ar->vdev);
#if USE_INT
free_irq(M32R_IRQ_INT3, ar);
#endif
@@ -907,6 +870,7 @@ static void __exit ar_cleanup_module(void)
#if USE_INT
kfree(ar->line_buff);
#endif
+ v4l2_device_unregister(&ar->v4l2_dev);
}
module_init(ar_init_module);
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 8c140c01c5e6..52f25aabb6dc 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -177,7 +177,7 @@ void au0828_uninit_isoc(struct au0828_dev *dev)
usb_unlink_urb(urb);
if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->usbdev,
+ usb_free_coherent(dev->usbdev,
urb->transfer_buffer_length,
dev->isoc_ctl.transfer_buffer[i],
urb->transfer_dma);
@@ -247,7 +247,7 @@ int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->usbdev,
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->usbdev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
printk("unable to allocate %i bytes for transfer"
@@ -1105,7 +1105,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
tmp = input->index;
- if (tmp > AU0828_MAX_INPUT)
+ if (tmp >= AU0828_MAX_INPUT)
return -EINVAL;
if (AUVI_INPUT(tmp).type == 0)
return -EINVAL;
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 716870ae85d5..7af56cde0c79 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -241,6 +241,10 @@ static struct CARD {
{ 0xa1550101, BTTV_BOARD_IVC200, "IVC-200G" },
{ 0xa1550102, BTTV_BOARD_IVC200, "IVC-200G" },
{ 0xa1550103, BTTV_BOARD_IVC200, "IVC-200G" },
+ { 0xa1550800, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550801, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550802, BTTV_BOARD_IVC200, "IVC-200" },
+ { 0xa1550803, BTTV_BOARD_IVC200, "IVC-200" },
{ 0xa182ff00, BTTV_BOARD_IVC120, "IVC-120G" },
{ 0xa182ff01, BTTV_BOARD_IVC120, "IVC-120G" },
{ 0xa182ff02, BTTV_BOARD_IVC120, "IVC-120G" },
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index f4860f03dfc3..38c7f78ad9cf 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1525,7 +1525,7 @@ static int bttv_s_ctrl(struct file *file, void *f,
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1806,8 +1806,8 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = gbuffers;
- while (*size * *count > gbuffers * gbufsize)
- (*count)--;
+ if (*size * *count > gbuffers * gbufsize)
+ *count = (gbuffers * gbufsize) / *size;
return 0;
}
@@ -1859,7 +1859,7 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id *id)
unsigned int i;
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1941,7 +1941,7 @@ static int bttv_s_input(struct file *file, void *priv, unsigned int i)
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1961,7 +1961,7 @@ static int bttv_s_tuner(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -1987,11 +1987,6 @@ static int bttv_g_frequency(struct file *file, void *priv,
{
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
- int err;
-
- err = v4l2_prio_check(&btv->prio, &fh->prio);
- if (0 != err)
- return err;
f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = btv->freq;
@@ -2006,7 +2001,7 @@ static int bttv_s_frequency(struct file *file, void *priv,
struct bttv *btv = fh->btv;
int err;
- err = v4l2_prio_check(&btv->prio, &fh->prio);
+ err = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != err)
return err;
@@ -3029,7 +3024,7 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop)
crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- retval = v4l2_prio_check(&btv->prio, &fh->prio);
+ retval = v4l2_prio_check(&btv->prio, fh->prio);
if (0 != retval)
return retval;
@@ -3241,7 +3236,7 @@ static int bttv_open(struct file *file)
*fh = btv->init;
fh->type = type;
fh->ov.setup_ok = 0;
- v4l2_prio_open(&btv->prio,&fh->prio);
+ v4l2_prio_open(&btv->prio, &fh->prio);
videobuf_queue_sg_init(&fh->cap, &bttv_video_qops,
&btv->c.pci->dev, &btv->s_lock,
@@ -3312,7 +3307,7 @@ static int bttv_release(struct file *file)
/* free stuff */
videobuf_mmap_free(&fh->cap);
videobuf_mmap_free(&fh->vbi);
- v4l2_prio_close(&btv->prio,&fh->prio);
+ v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
@@ -3449,7 +3444,7 @@ static int radio_release(struct file *file)
struct bttv *btv = fh->btv;
struct rds_command cmd;
- v4l2_prio_close(&btv->prio,&fh->prio);
+ v4l2_prio_close(&btv->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index aa153a986ade..f68717a4bdec 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -49,6 +49,8 @@ module_param(ir_rc5_key_timeout, int, 0644);
#define DEVNAME "bttv-input"
+#define MODULE_NAME "bttv"
+
/* ---------------------------------------------------------------------- */
static void ir_handle_key(struct bttv *btv)
@@ -246,7 +248,7 @@ static void bttv_ir_stop(struct bttv *btv)
int bttv_input_init(struct bttv *btv)
{
struct card_ir *ir;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
struct input_dev *input_dev;
u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
@@ -264,7 +266,7 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_AVERMEDIA:
case BTTV_BOARD_AVPHONE98:
case BTTV_BOARD_AVERMEDIA98:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
ir->mask_keycode = 0xf88000;
ir->mask_keydown = 0x010000;
ir->polling = 50; // ms
@@ -272,14 +274,14 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_AVDVBT_761:
case BTTV_BOARD_AVDVBT_771:
- ir_codes = &ir_codes_avermedia_dvbt_table;
+ ir_codes = RC_MAP_AVERMEDIA_DVBT;
ir->mask_keycode = 0x0f00c0;
ir->mask_keydown = 0x000020;
ir->polling = 50; // ms
break;
case BTTV_BOARD_PXELVWPLTVPAK:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x003e00;
ir->mask_keyup = 0x010000;
ir->polling = 50; // ms
@@ -287,24 +289,24 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_PV_M4900:
case BTTV_BOARD_PV_BT878P_9B:
case BTTV_BOARD_PV_BT878P_PLUS:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x008000;
ir->polling = 50; // ms
break;
case BTTV_BOARD_WINFAST2000:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->mask_keycode = 0x1f8;
break;
case BTTV_BOARD_MAGICTVIEW061:
case BTTV_BOARD_MAGICTVIEW063:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->mask_keycode = 0x0008e000;
ir->mask_keydown = 0x00200000;
break;
case BTTV_BOARD_APAC_VIEWCOMP:
- ir_codes = &ir_codes_apac_viewcomp_table;
+ ir_codes = RC_MAP_APAC_VIEWCOMP;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x008000;
ir->polling = 50; // ms
@@ -312,30 +314,30 @@ int bttv_input_init(struct bttv *btv)
case BTTV_BOARD_ASKEY_CPH03X:
case BTTV_BOARD_CONCEPTRONIC_CTVFMI2:
case BTTV_BOARD_CONTVFMI:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->mask_keycode = 0x001F00;
ir->mask_keyup = 0x006000;
ir->polling = 50; // ms
break;
case BTTV_BOARD_NEBULA_DIGITV:
- ir_codes = &ir_codes_nebula_table;
+ ir_codes = RC_MAP_NEBULA;
btv->custom_irq = bttv_rc5_irq;
ir->rc5_gpio = 1;
break;
case BTTV_BOARD_MACHTV_MAGICTV:
- ir_codes = &ir_codes_apac_viewcomp_table;
+ ir_codes = RC_MAP_APAC_VIEWCOMP;
ir->mask_keycode = 0x001F00;
ir->mask_keyup = 0x004000;
ir->polling = 50; /* ms */
break;
case BTTV_BOARD_KOZUMI_KTV_01C:
- ir_codes = &ir_codes_pctv_sedna_table;
+ ir_codes = RC_MAP_PCTV_SEDNA;
ir->mask_keycode = 0x001f00;
ir->mask_keyup = 0x006000;
ir->polling = 50; /* ms */
break;
case BTTV_BOARD_ENLTV_FM_2:
- ir_codes = &ir_codes_encore_enltv2_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV2;
ir->mask_keycode = 0x00fd00;
ir->mask_keyup = 0x000080;
ir->polling = 1; /* ms */
@@ -390,7 +392,7 @@ int bttv_input_init(struct bttv *btv)
bttv_ir_start(btv, ir);
/* all done */
- err = ir_input_register(btv->remote->dev, ir_codes, NULL);
+ err = ir_input_register(btv->remote->dev, ir_codes, NULL, MODULE_NAME);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 9e39bc5f7b00..3c9e754d73a0 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -80,8 +80,8 @@ OTHER DEALINGS IN THE SOFTWARE.
#include "bw-qcam.h"
-static unsigned int maxpoll=250; /* Maximum busy-loop count for qcam I/O */
-static unsigned int yieldlines=4; /* Yield after this many during capture */
+static unsigned int maxpoll = 250; /* Maximum busy-loop count for qcam I/O */
+static unsigned int yieldlines = 4; /* Yield after this many during capture */
static int video_nr = -1;
static unsigned int force_init; /* Whether to probe aggressively */
@@ -156,7 +156,7 @@ static int qc_calibrate(struct qcam_device *q)
mdelay(1);
schedule();
count++;
- } while (value == 0xff && count<2048);
+ } while (value == 0xff && count < 2048);
q->whitebal = value;
return value;
@@ -170,16 +170,15 @@ static struct qcam_device *qcam_init(struct parport *port)
struct qcam_device *q;
q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if(q==NULL)
+ if (q == NULL)
return NULL;
q->pport = port;
q->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
- NULL, 0, NULL);
- if (q->pdev == NULL)
- {
+ NULL, 0, NULL);
+ if (q->pdev == NULL) {
printk(KERN_ERR "bw-qcam: couldn't register for %s.\n",
- port->name);
+ port->name);
kfree(q);
return NULL;
}
@@ -247,12 +246,10 @@ static int qc_readparam(struct qcam_device *q)
static int qc_waithand(struct qcam_device *q, int val)
{
int status;
- int runs=0;
+ int runs = 0;
- if (val)
- {
- while (!((status = read_lpstatus(q)) & 8))
- {
+ if (val) {
+ while (!((status = read_lpstatus(q)) & 8)) {
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
until the camera wakes up. However, we are
@@ -260,18 +257,13 @@ static int qc_waithand(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs>(maxpoll+1000)) /* 5 seconds */
+ if (runs > (maxpoll + 1000)) /* 5 seconds */
return -1;
}
- }
- else
- {
- while (((status = read_lpstatus(q)) & 8))
- {
+ } else {
+ while (((status = read_lpstatus(q)) & 8)) {
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
until the camera wakes up. However, we are
@@ -279,11 +271,9 @@ static int qc_waithand(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs++>(maxpoll+1000)) /* 5 seconds */
+ if (runs++ > (maxpoll + 1000)) /* 5 seconds */
return -1;
}
}
@@ -299,10 +289,9 @@ static int qc_waithand(struct qcam_device *q, int val)
static unsigned int qc_waithand2(struct qcam_device *q, int val)
{
unsigned int status;
- int runs=0;
+ int runs = 0;
- do
- {
+ do {
status = read_lpdata(q);
/* 1000 is enough spins on the I/O for all normal
cases, at that point we start to poll slowly
@@ -311,14 +300,11 @@ static unsigned int qc_waithand2(struct qcam_device *q, int val)
setting it lower is much better for interactive
response. */
- if(runs++>maxpoll)
- {
+ if (runs++ > maxpoll)
msleep_interruptible(5);
- }
- if(runs++>(maxpoll+1000)) /* 5 seconds */
+ if (runs++ > (maxpoll + 1000)) /* 5 seconds */
return 0;
- }
- while ((status & 1) != val);
+ } while ((status & 1) != val);
return status;
}
@@ -342,8 +328,7 @@ static int qc_detect(struct qcam_device *q)
lastreg = reg = read_lpstatus(q) & 0xf0;
- for (i = 0; i < 500; i++)
- {
+ for (i = 0; i < 500; i++) {
reg = read_lpstatus(q) & 0xf0;
if (reg != lastreg)
count++;
@@ -357,7 +342,7 @@ static int qc_detect(struct qcam_device *q)
won't be flashing these bits. Possibly unloading the module
in the middle of a grab? Or some timeout condition?
I've seen this parameter as low as 19 on my 450Mhz box - mpc */
- printk("Debugging: QCam detection counter <30-200 counts as detected>: %d\n", count);
+ printk(KERN_DEBUG "Debugging: QCam detection counter <30-200 counts as detected>: %d\n", count);
return 1;
#endif
@@ -367,7 +352,7 @@ static int qc_detect(struct qcam_device *q)
return 1; /* found */
} else {
printk(KERN_ERR "No Quickcam found on port %s\n",
- q->pport->name);
+ q->pport->name);
printk(KERN_DEBUG "Quickcam detection counter: %u\n", count);
return 0; /* not found */
}
@@ -381,26 +366,24 @@ static int qc_detect(struct qcam_device *q)
static void qc_reset(struct qcam_device *q)
{
- switch (q->port_mode & QC_FORCE_MASK)
- {
- case QC_FORCE_UNIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- break;
+ switch (q->port_mode & QC_FORCE_MASK) {
+ case QC_FORCE_UNIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
- case QC_FORCE_BIDIR:
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- break;
+ case QC_FORCE_BIDIR:
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ break;
- case QC_ANY:
- write_lpcontrol(q, 0x20);
- write_lpdata(q, 0x75);
+ case QC_ANY:
+ write_lpcontrol(q, 0x20);
+ write_lpdata(q, 0x75);
- if (read_lpdata(q) != 0x75) {
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
- } else {
- q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
- }
- break;
+ if (read_lpdata(q) != 0x75)
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_BIDIR;
+ else
+ q->port_mode = (q->port_mode & ~QC_MODE_MASK) | QC_UNIDIR;
+ break;
}
write_lpcontrol(q, 0xb);
@@ -423,36 +406,33 @@ static int qc_setscanmode(struct qcam_device *q)
{
int old_mode = q->mode;
- switch (q->transfer_scale)
- {
- case 1:
- q->mode = 0;
- break;
- case 2:
- q->mode = 4;
- break;
- case 4:
- q->mode = 8;
- break;
+ switch (q->transfer_scale) {
+ case 1:
+ q->mode = 0;
+ break;
+ case 2:
+ q->mode = 4;
+ break;
+ case 4:
+ q->mode = 8;
+ break;
}
- switch (q->bpp)
- {
- case 4:
- break;
- case 6:
- q->mode += 2;
- break;
+ switch (q->bpp) {
+ case 4:
+ break;
+ case 6:
+ q->mode += 2;
+ break;
}
- switch (q->port_mode & QC_MODE_MASK)
- {
- case QC_BIDIR:
- q->mode += 1;
- break;
- case QC_NOTSET:
- case QC_UNIDIR:
- break;
+ switch (q->port_mode & QC_MODE_MASK) {
+ case QC_BIDIR:
+ q->mode += 1;
+ break;
+ case QC_NOTSET:
+ case QC_UNIDIR:
+ break;
}
if (q->mode != old_mode)
@@ -493,7 +473,7 @@ static void qc_set(struct qcam_device *q)
} else {
val = q->width * q->bpp;
val2 = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
+ q->transfer_scale;
}
val = DIV_ROUND_UP(val, val2);
qc_command(q, 0x13);
@@ -521,85 +501,80 @@ static void qc_set(struct qcam_device *q)
static inline int qc_readbytes(struct qcam_device *q, char buffer[])
{
- int ret=1;
+ int ret = 1;
unsigned int hi, lo;
unsigned int hi2, lo2;
static int state;
- if (buffer == NULL)
- {
+ if (buffer == NULL) {
state = 0;
return 0;
}
- switch (q->port_mode & QC_MODE_MASK)
- {
- case QC_BIDIR: /* Bi-directional Port */
- write_lpcontrol(q, 0x26);
- lo = (qc_waithand2(q, 1) >> 1);
- hi = (read_lpstatus(q) >> 3) & 0x1f;
- write_lpcontrol(q, 0x2e);
- lo2 = (qc_waithand2(q, 0) >> 1);
- hi2 = (read_lpstatus(q) >> 3) & 0x1f;
- switch (q->bpp)
- {
- case 4:
- buffer[0] = lo & 0xf;
- buffer[1] = ((lo & 0x70) >> 4) | ((hi & 1) << 3);
- buffer[2] = (hi & 0x1e) >> 1;
- buffer[3] = lo2 & 0xf;
- buffer[4] = ((lo2 & 0x70) >> 4) | ((hi2 & 1) << 3);
- buffer[5] = (hi2 & 0x1e) >> 1;
- ret = 6;
- break;
- case 6:
- buffer[0] = lo & 0x3f;
- buffer[1] = ((lo & 0x40) >> 6) | (hi << 1);
- buffer[2] = lo2 & 0x3f;
- buffer[3] = ((lo2 & 0x40) >> 6) | (hi2 << 1);
- ret = 4;
- break;
- }
+ switch (q->port_mode & QC_MODE_MASK) {
+ case QC_BIDIR: /* Bi-directional Port */
+ write_lpcontrol(q, 0x26);
+ lo = (qc_waithand2(q, 1) >> 1);
+ hi = (read_lpstatus(q) >> 3) & 0x1f;
+ write_lpcontrol(q, 0x2e);
+ lo2 = (qc_waithand2(q, 0) >> 1);
+ hi2 = (read_lpstatus(q) >> 3) & 0x1f;
+ switch (q->bpp) {
+ case 4:
+ buffer[0] = lo & 0xf;
+ buffer[1] = ((lo & 0x70) >> 4) | ((hi & 1) << 3);
+ buffer[2] = (hi & 0x1e) >> 1;
+ buffer[3] = lo2 & 0xf;
+ buffer[4] = ((lo2 & 0x70) >> 4) | ((hi2 & 1) << 3);
+ buffer[5] = (hi2 & 0x1e) >> 1;
+ ret = 6;
+ break;
+ case 6:
+ buffer[0] = lo & 0x3f;
+ buffer[1] = ((lo & 0x40) >> 6) | (hi << 1);
+ buffer[2] = lo2 & 0x3f;
+ buffer[3] = ((lo2 & 0x40) >> 6) | (hi2 << 1);
+ ret = 4;
break;
+ }
+ break;
+
+ case QC_UNIDIR: /* Unidirectional Port */
+ write_lpcontrol(q, 6);
+ lo = (qc_waithand(q, 1) & 0xf0) >> 4;
+ write_lpcontrol(q, 0xe);
+ hi = (qc_waithand(q, 0) & 0xf0) >> 4;
- case QC_UNIDIR: /* Unidirectional Port */
- write_lpcontrol(q, 6);
- lo = (qc_waithand(q, 1) & 0xf0) >> 4;
- write_lpcontrol(q, 0xe);
- hi = (qc_waithand(q, 0) & 0xf0) >> 4;
-
- switch (q->bpp)
- {
- case 4:
- buffer[0] = lo;
- buffer[1] = hi;
- ret = 2;
- break;
- case 6:
- switch (state)
- {
- case 0:
- buffer[0] = (lo << 2) | ((hi & 0xc) >> 2);
- q->saved_bits = (hi & 3) << 4;
- state = 1;
- ret = 1;
- break;
- case 1:
- buffer[0] = lo | q->saved_bits;
- q->saved_bits = hi << 2;
- state = 2;
- ret = 1;
- break;
- case 2:
- buffer[0] = ((lo & 0xc) >> 2) | q->saved_bits;
- buffer[1] = ((lo & 3) << 4) | hi;
- state = 0;
- ret = 2;
- break;
- }
- break;
+ switch (q->bpp) {
+ case 4:
+ buffer[0] = lo;
+ buffer[1] = hi;
+ ret = 2;
+ break;
+ case 6:
+ switch (state) {
+ case 0:
+ buffer[0] = (lo << 2) | ((hi & 0xc) >> 2);
+ q->saved_bits = (hi & 3) << 4;
+ state = 1;
+ ret = 1;
+ break;
+ case 1:
+ buffer[0] = lo | q->saved_bits;
+ q->saved_bits = hi << 2;
+ state = 2;
+ ret = 1;
+ break;
+ case 2:
+ buffer[0] = ((lo & 0xc) >> 2) | q->saved_bits;
+ buffer[1] = ((lo & 3) << 4) | hi;
+ state = 0;
+ ret = 2;
+ break;
}
break;
+ }
+ break;
}
return ret;
}
@@ -615,7 +590,7 @@ static inline int qc_readbytes(struct qcam_device *q, char buffer[])
* n=2^(bit depth)-1. Ask me for more details if you don't understand
* this. */
-static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long len)
+static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long len)
{
int i, j, k, yield;
int bytes;
@@ -623,9 +598,9 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
int divisor;
int pixels_per_line;
int pixels_read = 0;
- int got=0;
+ int got = 0;
char buffer[6];
- int shift=8-q->bpp;
+ int shift = 8 - q->bpp;
char invert;
if (q->mode == -1)
@@ -634,13 +609,12 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
qc_command(q, 0x7);
qc_command(q, q->mode);
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR)
- {
+ if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
write_lpcontrol(q, 0x2e); /* turn port around */
write_lpcontrol(q, 0x26);
- (void) qc_waithand(q, 1);
+ qc_waithand(q, 1);
write_lpcontrol(q, 0x2e);
- (void) qc_waithand(q, 0);
+ qc_waithand(q, 0);
}
/* strange -- should be 15:63 below, but 4bpp is odd */
@@ -650,33 +624,28 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
pixels_per_line = q->width / q->transfer_scale;
transperline = q->width * q->bpp;
divisor = (((q->port_mode & QC_MODE_MASK) == QC_BIDIR) ? 24 : 8) *
- q->transfer_scale;
+ q->transfer_scale;
transperline = DIV_ROUND_UP(transperline, divisor);
- for (i = 0, yield = yieldlines; i < linestotrans; i++)
- {
- for (pixels_read = j = 0; j < transperline; j++)
- {
+ for (i = 0, yield = yieldlines; i < linestotrans; i++) {
+ for (pixels_read = j = 0; j < transperline; j++) {
bytes = qc_readbytes(q, buffer);
- for (k = 0; k < bytes && (pixels_read + k) < pixels_per_line; k++)
- {
+ for (k = 0; k < bytes && (pixels_read + k) < pixels_per_line; k++) {
int o;
- if (buffer[k] == 0 && invert == 16)
- {
+ if (buffer[k] == 0 && invert == 16) {
/* 4bpp is odd (again) -- inverter is 16, not 15, but output
must be 0-15 -- bls */
buffer[k] = 16;
}
- o=i*pixels_per_line + pixels_read + k;
- if(o<len)
- {
+ o = i * pixels_per_line + pixels_read + k;
+ if (o < len) {
got++;
- put_user((invert - buffer[k])<<shift, buf+o);
+ put_user((invert - buffer[k]) << shift, buf + o);
}
}
pixels_read += bytes;
}
- (void) qc_readbytes(q, NULL); /* reset state machine */
+ qc_readbytes(q, NULL); /* reset state machine */
/* Grabbing an entire frame from the quickcam is a lengthy
process. We don't (usually) want to busy-block the
@@ -690,14 +659,13 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
}
}
- if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR)
- {
+ if ((q->port_mode & QC_MODE_MASK) == QC_BIDIR) {
write_lpcontrol(q, 2);
write_lpcontrol(q, 6);
udelay(3);
write_lpcontrol(q, 0xe);
}
- if(got<len)
+ if (got < len)
return got;
return len;
}
@@ -709,11 +677,10 @@ static long qc_capture(struct qcam_device * q, char __user *buf, unsigned long l
static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)dev;
+ struct qcam_device *qcam = (struct qcam_device *)dev;
- switch(cmd)
- {
- case VIDIOCGCAP:
+ switch (cmd) {
+ case VIDIOCGCAP:
{
struct video_capability *b = arg;
strcpy(b->name, "Quickcam");
@@ -726,73 +693,73 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
b->minheight = 60;
return 0;
}
- case VIDIOCGCHAN:
+ case VIDIOCGCHAN:
{
struct video_channel *v = arg;
- if(v->channel!=0)
+ if (v->channel != 0)
return -EINVAL;
- v->flags=0;
- v->tuners=0;
+ v->flags = 0;
+ v->tuners = 0;
/* Good question.. its composite or SVHS so.. */
v->type = VIDEO_TYPE_CAMERA;
strcpy(v->name, "Camera");
return 0;
}
- case VIDIOCSCHAN:
+ case VIDIOCSCHAN:
{
struct video_channel *v = arg;
- if(v->channel!=0)
+ if (v->channel != 0)
return -EINVAL;
return 0;
}
- case VIDIOCGTUNER:
+ case VIDIOCGTUNER:
{
struct video_tuner *v = arg;
- if(v->tuner)
+ if (v->tuner)
return -EINVAL;
strcpy(v->name, "Format");
- v->rangelow=0;
- v->rangehigh=0;
- v->flags= 0;
+ v->rangelow = 0;
+ v->rangehigh = 0;
+ v->flags = 0;
v->mode = VIDEO_MODE_AUTO;
return 0;
}
- case VIDIOCSTUNER:
+ case VIDIOCSTUNER:
{
struct video_tuner *v = arg;
- if(v->tuner)
+ if (v->tuner)
return -EINVAL;
- if(v->mode!=VIDEO_MODE_AUTO)
+ if (v->mode != VIDEO_MODE_AUTO)
return -EINVAL;
return 0;
}
- case VIDIOCGPICT:
+ case VIDIOCGPICT:
{
struct video_picture *p = arg;
- p->colour=0x8000;
- p->hue=0x8000;
- p->brightness=qcam->brightness<<8;
- p->contrast=qcam->contrast<<8;
- p->whiteness=qcam->whitebal<<8;
- p->depth=qcam->bpp;
- p->palette=VIDEO_PALETTE_GREY;
+ p->colour = 0x8000;
+ p->hue = 0x8000;
+ p->brightness = qcam->brightness << 8;
+ p->contrast = qcam->contrast << 8;
+ p->whiteness = qcam->whitebal << 8;
+ p->depth = qcam->bpp;
+ p->palette = VIDEO_PALETTE_GREY;
return 0;
}
- case VIDIOCSPICT:
+ case VIDIOCSPICT:
{
struct video_picture *p = arg;
- if(p->palette!=VIDEO_PALETTE_GREY)
+ if (p->palette != VIDEO_PALETTE_GREY)
return -EINVAL;
- if(p->depth!=4 && p->depth!=6)
+ if (p->depth != 4 && p->depth != 6)
return -EINVAL;
/*
* Now load the camera.
*/
- qcam->brightness = p->brightness>>8;
- qcam->contrast = p->contrast>>8;
- qcam->whitebal = p->whiteness>>8;
+ qcam->brightness = p->brightness >> 8;
+ qcam->contrast = p->contrast >> 8;
+ qcam->whitebal = p->whiteness >> 8;
qcam->bpp = p->depth;
mutex_lock(&qcam->lock);
@@ -802,28 +769,25 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return 0;
}
- case VIDIOCSWIN:
+ case VIDIOCSWIN:
{
struct video_window *vw = arg;
- if(vw->flags)
+ if (vw->flags)
return -EINVAL;
- if(vw->clipcount)
+ if (vw->clipcount)
return -EINVAL;
- if(vw->height<60||vw->height>240)
+ if (vw->height < 60 || vw->height > 240)
return -EINVAL;
- if(vw->width<80||vw->width>320)
+ if (vw->width < 80 || vw->width > 320)
return -EINVAL;
qcam->width = 320;
qcam->height = 240;
qcam->transfer_scale = 4;
- if(vw->width>=160 && vw->height>=120)
- {
+ if (vw->width >= 160 && vw->height >= 120)
qcam->transfer_scale = 2;
- }
- if(vw->width>=320 && vw->height>=240)
- {
+ if (vw->width >= 320 && vw->height >= 240) {
qcam->width = 320;
qcam->height = 240;
qcam->transfer_scale = 1;
@@ -839,41 +803,42 @@ static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
/* Ok we figured out what to use from our wide choice */
return 0;
}
- case VIDIOCGWIN:
+ case VIDIOCGWIN:
{
struct video_window *vw = arg;
+
memset(vw, 0, sizeof(*vw));
- vw->width=qcam->width/qcam->transfer_scale;
- vw->height=qcam->height/qcam->transfer_scale;
+ vw->width = qcam->width / qcam->transfer_scale;
+ vw->height = qcam->height / qcam->transfer_scale;
return 0;
}
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
- default:
- return -ENOIOCTLCMD;
+ case VIDIOCKEY:
+ return 0;
+ case VIDIOCCAPTURE:
+ case VIDIOCGFBUF:
+ case VIDIOCSFBUF:
+ case VIDIOCGFREQ:
+ case VIDIOCSFREQ:
+ case VIDIOCGAUDIO:
+ case VIDIOCSAUDIO:
+ return -EINVAL;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
static long qcam_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
return video_usercopy(file, cmd, arg, qcam_do_ioctl);
}
static ssize_t qcam_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct video_device *v = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)v;
+ struct qcam_device *qcam = (struct qcam_device *)v;
int len;
parport_claim_or_block(qcam->pdev);
@@ -885,7 +850,7 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
if (qcam->status & QC_PARAM_CHANGE)
qc_set(qcam);
- len=qc_capture(qcam, buf,count);
+ len = qc_capture(qcam, buf, count);
mutex_unlock(&qcam->lock);
@@ -917,8 +882,7 @@ static const struct v4l2_file_operations qcam_fops = {
.ioctl = qcam_ioctl,
.read = qcam_read,
};
-static struct video_device qcam_template=
-{
+static struct video_device qcam_template = {
.name = "Connectix Quickcam",
.fops = &qcam_fops,
.release = video_device_release_empty,
@@ -932,22 +896,20 @@ static int init_bwqcam(struct parport *port)
{
struct qcam_device *qcam;
- if (num_cams == MAX_CAMS)
- {
+ if (num_cams == MAX_CAMS) {
printk(KERN_ERR "Too many Quickcams (max %d)\n", MAX_CAMS);
return -ENOSPC;
}
- qcam=qcam_init(port);
- if(qcam==NULL)
+ qcam = qcam_init(port);
+ if (qcam == NULL)
return -ENODEV;
parport_claim_or_block(qcam->pdev);
qc_reset(qcam);
- if(qc_detect(qcam)==0)
- {
+ if (qc_detect(qcam) == 0) {
parport_release(qcam->pdev);
parport_unregister_device(qcam->pdev);
kfree(qcam);
@@ -1045,12 +1007,12 @@ static int __init init_bw_qcams(void)
#ifdef MODULE
/* Do some sanity checks on the module parameters. */
if (maxpoll > 5000) {
- printk("Connectix Quickcam max-poll was above 5000. Using 5000.\n");
+ printk(KERN_INFO "Connectix Quickcam max-poll was above 5000. Using 5000.\n");
maxpoll = 5000;
}
if (yieldlines < 1) {
- printk("Connectix Quickcam yieldlines was less than 1. Using 1.\n");
+ printk(KERN_INFO "Connectix Quickcam yieldlines was less than 1. Using 1.\n");
yieldlines = 1;
}
#endif
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index e2cbebab959b..8f1dd88b32a6 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -79,17 +79,17 @@ static inline void qcam_set_ack(struct qcam_device *qcam, unsigned int i)
{
/* note: the QC specs refer to the PCAck pin by voltage, not
software level. PC ports have builtin inverters. */
- parport_frob_control(qcam->pport, 8, i?8:0);
+ parport_frob_control(qcam->pport, 8, i ? 8 : 0);
}
static inline unsigned int qcam_ready1(struct qcam_device *qcam)
{
- return (parport_read_status(qcam->pport) & 0x8)?1:0;
+ return (parport_read_status(qcam->pport) & 0x8) ? 1 : 0;
}
static inline unsigned int qcam_ready2(struct qcam_device *qcam)
{
- return (parport_read_data(qcam->pport) & 0x1)?1:0;
+ return (parport_read_data(qcam->pport) & 0x1) ? 1 : 0;
}
static unsigned int qcam_await_ready1(struct qcam_device *qcam,
@@ -99,14 +99,13 @@ static unsigned int qcam_await_ready1(struct qcam_device *qcam,
unsigned int i;
for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40)); )
+ time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
if (qcam_ready1(qcam) == value)
return 0;
/* If the camera didn't respond within 1/25 second, poll slowly
for a while. */
- for (i = 0; i < 50; i++)
- {
+ for (i = 0; i < 50; i++) {
if (qcam_ready1(qcam) == value)
return 0;
msleep_interruptible(100);
@@ -125,14 +124,13 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
unsigned int i;
for (oldjiffies = jiffies;
- time_before(jiffies, oldjiffies + msecs_to_jiffies(40)); )
+ time_before(jiffies, oldjiffies + msecs_to_jiffies(40));)
if (qcam_ready2(qcam) == value)
return 0;
/* If the camera didn't respond within 1/25 second, poll slowly
for a while. */
- for (i = 0; i < 50; i++)
- {
+ for (i = 0; i < 50; i++) {
if (qcam_ready2(qcam) == value)
return 0;
msleep_interruptible(100);
@@ -149,22 +147,25 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
static int qcam_read_data(struct qcam_device *qcam)
{
unsigned int idata;
+
qcam_set_ack(qcam, 0);
- if (qcam_await_ready1(qcam, 1)) return -1;
+ if (qcam_await_ready1(qcam, 1))
+ return -1;
idata = parport_read_status(qcam->pport) & 0xf0;
qcam_set_ack(qcam, 1);
- if (qcam_await_ready1(qcam, 0)) return -1;
- idata |= (parport_read_status(qcam->pport) >> 4);
+ if (qcam_await_ready1(qcam, 0))
+ return -1;
+ idata |= parport_read_status(qcam->pport) >> 4;
return idata;
}
static int qcam_write_data(struct qcam_device *qcam, unsigned int data)
{
unsigned int idata;
+
parport_write_data(qcam->pport, data);
idata = qcam_read_data(qcam);
- if (data != idata)
- {
+ if (data != idata) {
printk(KERN_WARNING "cqcam: sent %x but received %x\n", data,
idata);
return 1;
@@ -212,13 +213,12 @@ static int qc_detect(struct qcam_device *qcam)
/* look for a heartbeat */
ostat = stat = parport_read_status(qcam->pport);
- for (i=0; i<250; i++)
- {
+ for (i = 0; i < 250; i++) {
mdelay(1);
stat = parport_read_status(qcam->pport);
- if (ostat != stat)
- {
- if (++count >= 3) return 1;
+ if (ostat != stat) {
+ if (++count >= 3)
+ return 1;
ostat = stat;
}
}
@@ -232,13 +232,12 @@ static int qc_detect(struct qcam_device *qcam)
count = 0;
ostat = stat = parport_read_status(qcam->pport);
- for (i=0; i<250; i++)
- {
+ for (i = 0; i < 250; i++) {
mdelay(1);
stat = parport_read_status(qcam->pport);
- if (ostat != stat)
- {
- if (++count >= 3) return 1;
+ if (ostat != stat) {
+ if (++count >= 3)
+ return 1;
ostat = stat;
}
}
@@ -263,7 +262,7 @@ static void qc_setup(struct qcam_device *q)
{
qc_reset(q);
- /* Set the brightness. */
+ /* Set the brightness. */
qcam_set(q, 11, q->brightness);
/* Set the height and width. These refer to the actual
@@ -292,25 +291,25 @@ static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, u
unsigned int bytes = 0;
qcam_set_ack(q, 0);
- if (q->bidirectional)
- {
+ if (q->bidirectional) {
/* It's a bidirectional port */
- while (bytes < nbytes)
- {
+ while (bytes < nbytes) {
unsigned int lo1, hi1, lo2, hi2;
unsigned char r, g, b;
- if (qcam_await_ready2(q, 1)) return bytes;
+ if (qcam_await_ready2(q, 1))
+ return bytes;
lo1 = parport_read_data(q->pport) >> 1;
hi1 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
qcam_set_ack(q, 1);
- if (qcam_await_ready2(q, 0)) return bytes;
+ if (qcam_await_ready2(q, 0))
+ return bytes;
lo2 = parport_read_data(q->pport) >> 1;
hi2 = ((parport_read_status(q->pport) >> 3) & 0x1f) ^ 0x10;
qcam_set_ack(q, 0);
- r = (lo1 | ((hi1 & 1)<<7));
- g = ((hi1 & 0x1e)<<3) | ((hi2 & 0x1e)>>1);
- b = (lo2 | ((hi2 & 1)<<7));
+ r = lo1 | ((hi1 & 1) << 7);
+ g = ((hi1 & 0x1e) << 3) | ((hi2 & 0x1e) >> 1);
+ b = lo2 | ((hi2 & 1) << 7);
if (force_rgb) {
buf[bytes++] = r;
buf[bytes++] = g;
@@ -321,21 +320,20 @@ static unsigned int qcam_read_bytes(struct qcam_device *q, unsigned char *buf, u
buf[bytes++] = r;
}
}
- }
- else
- {
+ } else {
/* It's a unidirectional port */
int i = 0, n = bytes;
unsigned char rgb[3];
- while (bytes < nbytes)
- {
+ while (bytes < nbytes) {
unsigned int hi, lo;
- if (qcam_await_ready1(q, 1)) return bytes;
+ if (qcam_await_ready1(q, 1))
+ return bytes;
hi = (parport_read_status(q->pport) & 0xf0);
qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0)) return bytes;
+ if (qcam_await_ready1(q, 0))
+ return bytes;
lo = (parport_read_status(q->pport) & 0xf0);
qcam_set_ack(q, 0);
/* flip some bits */
@@ -374,28 +372,26 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
return -EFAULT;
/* Wait for camera to become ready */
- for (;;)
- {
+ for (;;) {
int i = qcam_get(q, 41);
+
if (i == -1) {
qc_setup(q);
return -EIO;
}
if ((i & 0x80) == 0)
break;
- else
- schedule();
+ schedule();
}
- if (qcam_set(q, 7, (q->mode | (is_bi_dir?1:0)) + 1))
+ if (qcam_set(q, 7, (q->mode | (is_bi_dir ? 1 : 0)) + 1))
return -EIO;
lines = q->height;
pixelsperline = q->width;
bitsperxfer = (is_bi_dir) ? 24 : 8;
- if (is_bi_dir)
- {
+ if (is_bi_dir) {
/* Turn the port around */
parport_data_reverse(q->pport);
mdelay(3);
@@ -413,16 +409,17 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
wantlen = lines * pixelsperline * 24 / 8;
- while (wantlen)
- {
+ while (wantlen) {
size_t t, s;
- s = (wantlen > BUFSZ)?BUFSZ:wantlen;
+
+ s = (wantlen > BUFSZ) ? BUFSZ : wantlen;
t = qcam_read_bytes(q, tmpbuf, s);
- if (outptr < len)
- {
+ if (outptr < len) {
size_t sz = len - outptr;
- if (sz > t) sz = t;
- if (__copy_to_user(buf+outptr, tmpbuf, sz))
+
+ if (sz > t)
+ sz = t;
+ if (__copy_to_user(buf + outptr, tmpbuf, sz))
break;
outptr += sz;
}
@@ -434,33 +431,31 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
len = outptr;
- if (wantlen)
- {
- printk("qcam: short read.\n");
+ if (wantlen) {
+ printk(KERN_ERR "qcam: short read.\n");
if (is_bi_dir)
parport_data_forward(q->pport);
qc_setup(q);
return len;
}
- if (is_bi_dir)
- {
+ if (is_bi_dir) {
int l;
+
do {
l = qcam_read_bytes(q, tmpbuf, 3);
cond_resched();
} while (l && (tmpbuf[0] == 0x7e || tmpbuf[1] == 0x7e || tmpbuf[2] == 0x7e));
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
}
qcam_set_ack(q, 0);
- if (qcam_await_ready1(q, 1))
- {
- printk("qcam: no ack after EOF\n");
+ if (qcam_await_ready1(q, 1)) {
+ printk(KERN_ERR "qcam: no ack after EOF\n");
parport_data_forward(q->pport);
qc_setup(q);
return len;
@@ -468,27 +463,25 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
parport_data_forward(q->pport);
mdelay(3);
qcam_set_ack(q, 1);
- if (qcam_await_ready1(q, 0))
- {
- printk("qcam: no ack to port turnaround\n");
+ if (qcam_await_ready1(q, 0)) {
+ printk(KERN_ERR "qcam: no ack to port turnaround\n");
qc_setup(q);
return len;
}
- }
- else
- {
+ } else {
int l;
+
do {
l = qcam_read_bytes(q, tmpbuf, 1);
cond_resched();
} while (l && tmpbuf[0] == 0x7e);
- l = qcam_read_bytes(q, tmpbuf+1, 2);
+ l = qcam_read_bytes(q, tmpbuf + 1, 2);
if (force_rgb) {
if (tmpbuf[0] != 0xe || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xf)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
} else {
if (tmpbuf[0] != 0xf || tmpbuf[1] != 0x0 || tmpbuf[2] != 0xe)
- printk("qcam: bad EOF\n");
+ printk(KERN_ERR "qcam: bad EOF\n");
}
}
@@ -503,164 +496,166 @@ static long qc_capture(struct qcam_device *q, char __user *buf, unsigned long le
static long qcam_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)dev;
+ struct qcam_device *qcam = (struct qcam_device *)dev;
- switch(cmd)
+ switch (cmd) {
+ case VIDIOCGCAP:
{
- case VIDIOCGCAP:
- {
- struct video_capability *b = arg;
- strcpy(b->name, "Quickcam");
- b->type = VID_TYPE_CAPTURE|VID_TYPE_SCALES;
- b->channels = 1;
- b->audios = 0;
- b->maxwidth = 320;
- b->maxheight = 240;
- b->minwidth = 80;
- b->minheight = 60;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *v = arg;
- if(v->channel!=0)
- return -EINVAL;
- v->flags=0;
- v->tuners=0;
- /* Good question.. its composite or SVHS so.. */
- v->type = VIDEO_TYPE_CAMERA;
- strcpy(v->name, "Camera");
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *v = arg;
- if(v->channel!=0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *v = arg;
- if(v->tuner)
- return -EINVAL;
- memset(v,0,sizeof(*v));
- strcpy(v->name, "Format");
- v->mode = VIDEO_MODE_AUTO;
- return 0;
- }
- case VIDIOCSTUNER:
- {
- struct video_tuner *v = arg;
- if(v->tuner)
- return -EINVAL;
- if(v->mode!=VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGPICT:
- {
- struct video_picture *p = arg;
- p->colour=0x8000;
- p->hue=0x8000;
- p->brightness=qcam->brightness<<8;
- p->contrast=qcam->contrast<<8;
- p->whiteness=qcam->whitebal<<8;
- p->depth=24;
- p->palette=VIDEO_PALETTE_RGB24;
- return 0;
+ struct video_capability *b = arg;
+
+ strcpy(b->name, "Quickcam");
+ b->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES;
+ b->channels = 1;
+ b->audios = 0;
+ b->maxwidth = 320;
+ b->maxheight = 240;
+ b->minwidth = 80;
+ b->minheight = 60;
+ return 0;
+ }
+ case VIDIOCGCHAN:
+ {
+ struct video_channel *v = arg;
+
+ if (v->channel != 0)
+ return -EINVAL;
+ v->flags = 0;
+ v->tuners = 0;
+ /* Good question.. its composite or SVHS so.. */
+ v->type = VIDEO_TYPE_CAMERA;
+ strcpy(v->name, "Camera");
+ return 0;
+ }
+ case VIDIOCSCHAN:
+ {
+ struct video_channel *v = arg;
+
+ if (v->channel != 0)
+ return -EINVAL;
+ return 0;
+ }
+ case VIDIOCGTUNER:
+ {
+ struct video_tuner *v = arg;
+
+ if (v->tuner)
+ return -EINVAL;
+ memset(v, 0, sizeof(*v));
+ strcpy(v->name, "Format");
+ v->mode = VIDEO_MODE_AUTO;
+ return 0;
+ }
+ case VIDIOCSTUNER:
+ {
+ struct video_tuner *v = arg;
+
+ if (v->tuner)
+ return -EINVAL;
+ if (v->mode != VIDEO_MODE_AUTO)
+ return -EINVAL;
+ return 0;
+ }
+ case VIDIOCGPICT:
+ {
+ struct video_picture *p = arg;
+
+ p->colour = 0x8000;
+ p->hue = 0x8000;
+ p->brightness = qcam->brightness << 8;
+ p->contrast = qcam->contrast << 8;
+ p->whiteness = qcam->whitebal << 8;
+ p->depth = 24;
+ p->palette = VIDEO_PALETTE_RGB24;
+ return 0;
+ }
+ case VIDIOCSPICT:
+ {
+ struct video_picture *p = arg;
+
+ /*
+ * Sanity check args
+ */
+ if (p->depth != 24 || p->palette != VIDEO_PALETTE_RGB24)
+ return -EINVAL;
+
+ /*
+ * Now load the camera.
+ */
+ qcam->brightness = p->brightness >> 8;
+ qcam->contrast = p->contrast >> 8;
+ qcam->whitebal = p->whiteness >> 8;
+
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ return 0;
+ }
+ case VIDIOCSWIN:
+ {
+ struct video_window *vw = arg;
+
+ if (vw->flags)
+ return -EINVAL;
+ if (vw->clipcount)
+ return -EINVAL;
+ if (vw->height < 60 || vw->height > 240)
+ return -EINVAL;
+ if (vw->width < 80 || vw->width > 320)
+ return -EINVAL;
+
+ qcam->width = 80;
+ qcam->height = 60;
+ qcam->mode = QC_DECIMATION_4;
+
+ if (vw->width >= 160 && vw->height >= 120) {
+ qcam->width = 160;
+ qcam->height = 120;
+ qcam->mode = QC_DECIMATION_2;
}
- case VIDIOCSPICT:
- {
- struct video_picture *p = arg;
-
- /*
- * Sanity check args
- */
- if (p->depth != 24 || p->palette != VIDEO_PALETTE_RGB24)
- return -EINVAL;
-
- /*
- * Now load the camera.
- */
- qcam->brightness = p->brightness>>8;
- qcam->contrast = p->contrast>>8;
- qcam->whitebal = p->whiteness>>8;
-
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
+ if (vw->width >= 320 && vw->height >= 240) {
+ qcam->width = 320;
+ qcam->height = 240;
+ qcam->mode = QC_DECIMATION_1;
}
- case VIDIOCSWIN:
- {
- struct video_window *vw = arg;
-
- if(vw->flags)
- return -EINVAL;
- if(vw->clipcount)
- return -EINVAL;
- if(vw->height<60||vw->height>240)
- return -EINVAL;
- if(vw->width<80||vw->width>320)
- return -EINVAL;
-
- qcam->width = 80;
- qcam->height = 60;
- qcam->mode = QC_DECIMATION_4;
-
- if(vw->width>=160 && vw->height>=120)
- {
- qcam->width = 160;
- qcam->height = 120;
- qcam->mode = QC_DECIMATION_2;
- }
- if(vw->width>=320 && vw->height>=240)
- {
- qcam->width = 320;
- qcam->height = 240;
- qcam->mode = QC_DECIMATION_1;
- }
- qcam->mode |= QC_MILLIONS;
+ qcam->mode |= QC_MILLIONS;
#if 0
- if(vw->width>=640 && vw->height>=480)
- {
- qcam->width = 640;
- qcam->height = 480;
- qcam->mode = QC_BILLIONS | QC_DECIMATION_1;
- }
-#endif
- /* Ok we figured out what to use from our
- wide choice */
- mutex_lock(&qcam->lock);
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- mutex_unlock(&qcam->lock);
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vw = arg;
- memset(vw, 0, sizeof(*vw));
- vw->width=qcam->width;
- vw->height=qcam->height;
- return 0;
+ if (vw->width >= 640 && vw->height >= 480) {
+ qcam->width = 640;
+ qcam->height = 480;
+ qcam->mode = QC_BILLIONS | QC_DECIMATION_1;
}
- case VIDIOCKEY:
- return 0;
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
- return -EINVAL;
- default:
- return -ENOIOCTLCMD;
+#endif
+ /* Ok we figured out what to use from our
+ wide choice */
+ mutex_lock(&qcam->lock);
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ mutex_unlock(&qcam->lock);
+ return 0;
+ }
+ case VIDIOCGWIN:
+ {
+ struct video_window *vw = arg;
+ memset(vw, 0, sizeof(*vw));
+ vw->width = qcam->width;
+ vw->height = qcam->height;
+ return 0;
+ }
+ case VIDIOCKEY:
+ return 0;
+ case VIDIOCCAPTURE:
+ case VIDIOCGFBUF:
+ case VIDIOCSFBUF:
+ case VIDIOCGFREQ:
+ case VIDIOCSFREQ:
+ case VIDIOCGAUDIO:
+ case VIDIOCSAUDIO:
+ return -EINVAL;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
@@ -675,13 +670,13 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct video_device *v = video_devdata(file);
- struct qcam_device *qcam=(struct qcam_device *)v;
+ struct qcam_device *qcam = (struct qcam_device *)v;
int len;
mutex_lock(&qcam->lock);
parport_claim_or_block(qcam->pdev);
/* Probably should have a semaphore against multiple users */
- len = qc_capture(qcam, buf,count);
+ len = qc_capture(qcam, buf, count);
parport_release(qcam->pdev);
mutex_unlock(&qcam->lock);
return len;
@@ -713,8 +708,7 @@ static const struct v4l2_file_operations qcam_fops = {
.read = qcam_read,
};
-static struct video_device qcam_template=
-{
+static struct video_device qcam_template = {
.name = "Colour QuickCam",
.fops = &qcam_fops,
.release = video_device_release_empty,
@@ -727,17 +721,16 @@ static struct qcam_device *qcam_init(struct parport *port)
struct qcam_device *q;
q = kmalloc(sizeof(struct qcam_device), GFP_KERNEL);
- if(q==NULL)
+ if (q == NULL)
return NULL;
q->pport = port;
q->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
NULL, 0, NULL);
- q->bidirectional = (q->pport->modes & PARPORT_MODE_TRISTATE)?1:0;
+ q->bidirectional = (q->pport->modes & PARPORT_MODE_TRISTATE) ? 1 : 0;
- if (q->pdev == NULL)
- {
+ if (q->pdev == NULL) {
printk(KERN_ERR "c-qcam: couldn't register for %s.\n",
port->name);
kfree(q);
@@ -765,12 +758,11 @@ static int init_cqcam(struct parport *port)
{
struct qcam_device *qcam;
- if (parport[0] != -1)
- {
+ if (parport[0] != -1) {
/* The user gave specific instructions */
int i, found = 0;
- for (i = 0; i < MAX_CAMS && parport[i] != -1; i++)
- {
+
+ for (i = 0; i < MAX_CAMS && parport[i] != -1; i++) {
if (parport[0] == port->number)
found = 1;
}
@@ -782,15 +774,14 @@ static int init_cqcam(struct parport *port)
return -ENOSPC;
qcam = qcam_init(port);
- if (qcam==NULL)
+ if (qcam == NULL)
return -ENODEV;
parport_claim_or_block(qcam->pdev);
qc_reset(qcam);
- if (probe && qc_detect(qcam)==0)
- {
+ if (probe && qc_detect(qcam) == 0) {
parport_release(qcam->pdev);
parport_unregister_device(qcam->pdev);
kfree(qcam);
@@ -840,14 +831,14 @@ static struct parport_driver cqcam_driver = {
.detach = cq_detach,
};
-static int __init cqcam_init (void)
+static int __init cqcam_init(void)
{
printk(BANNER "\n");
return parport_register_driver(&cqcam_driver);
}
-static void __exit cqcam_cleanup (void)
+static void __exit cqcam_cleanup(void)
{
unsigned int i;
@@ -862,9 +853,9 @@ MODULE_DESCRIPTION(BANNER);
MODULE_LICENSE("GPL");
/* FIXME: parport=auto would never have worked, surely? --RR */
-MODULE_PARM_DESC(parport ,"parport=<auto|n[,n]...> for port detection method\n\
-probe=<0|1|2> for camera detection method\n\
-force_rgb=<0|1> for RGB data format (default BGR)");
+MODULE_PARM_DESC(parport, "parport=<auto|n[,n]...> for port detection method\n"
+ "probe=<0|1|2> for camera detection method\n"
+ "force_rgb=<0|1> for RGB data format (default BGR)");
module_param_array(parport, int, NULL, 0);
module_param(probe, int, 0);
module_param(force_rgb, bool, 0);
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 6f91415eb7b4..5520789854da 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -324,7 +324,7 @@ static int cpia2_close(struct file *file)
{
if(fh->mmapped)
cam->mmapped = 0;
- v4l2_prio_close(&cam->prio,&fh->prio);
+ v4l2_prio_close(&cam->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
}
@@ -1592,7 +1592,7 @@ static long cpia2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_S_FMT:
{
struct cpia2_fh *fh = file->private_data;
- retval = v4l2_prio_check(&cam->prio, &fh->prio);
+ retval = v4l2_prio_check(&cam->prio, fh->prio);
if(retval) {
mutex_unlock(&cam->busy_lock);
return retval;
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
index 4392c76af5df..0e5006b14279 100644
--- a/drivers/media/video/cx18/cx18-av-core.c
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -579,6 +579,7 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
u8 afe_mux_cfg;
u8 adc2_cfg;
+ u8 input_mode;
u32 afe_cfg;
int i;
@@ -589,6 +590,30 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
vid_input <= CX18_AV_COMPOSITE8) {
afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
ch[0] = CVBS;
+ input_mode = 0x0;
+ } else if (vid_input >= CX18_AV_COMPONENT_LUMA1) {
+ int luma = vid_input & 0xf000;
+ int r_chroma = vid_input & 0xf0000;
+ int b_chroma = vid_input & 0xf00000;
+
+ if ((vid_input & ~0xfff000) ||
+ luma < CX18_AV_COMPONENT_LUMA1 ||
+ luma > CX18_AV_COMPONENT_LUMA8 ||
+ r_chroma < CX18_AV_COMPONENT_R_CHROMA4 ||
+ r_chroma > CX18_AV_COMPONENT_R_CHROMA6 ||
+ b_chroma < CX18_AV_COMPONENT_B_CHROMA7 ||
+ b_chroma > CX18_AV_COMPONENT_B_CHROMA8) {
+ CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n",
+ vid_input);
+ return -EINVAL;
+ }
+ afe_mux_cfg = (luma - CX18_AV_COMPONENT_LUMA1) >> 12;
+ ch[0] = Y;
+ afe_mux_cfg |= (r_chroma - CX18_AV_COMPONENT_R_CHROMA4) >> 12;
+ ch[1] = Pr;
+ afe_mux_cfg |= (b_chroma - CX18_AV_COMPONENT_B_CHROMA7) >> 14;
+ ch[2] = Pb;
+ input_mode = 0x6;
} else {
int luma = vid_input & 0xf0;
int chroma = vid_input & 0xf00;
@@ -598,7 +623,7 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
luma > CX18_AV_SVIDEO_LUMA8 ||
chroma < CX18_AV_SVIDEO_CHROMA4 ||
chroma > CX18_AV_SVIDEO_CHROMA8) {
- CX18_ERR_DEV(sd, "0x%04x is not a valid video input!\n",
+ CX18_ERR_DEV(sd, "0x%06x is not a valid video input!\n",
vid_input);
return -EINVAL;
}
@@ -613,8 +638,8 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
ch[1] = C;
}
+ input_mode = 0x2;
}
- /* TODO: LeadTek WinFast DVR3100 H & WinFast PVR2100 can do Y/Pb/Pr */
switch (aud_input) {
case CX18_AV_AUDIO_SERIAL1:
@@ -650,8 +675,8 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
/* Set up analog front end multiplexers */
cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7);
- /* Set INPUT_MODE to Composite (0) or S-Video (1) */
- cx18_av_and_or(cx, 0x401, ~0x6, ch[0] == CVBS ? 0 : 0x02);
+ /* Set INPUT_MODE to Composite, S-Video, or Component */
+ cx18_av_and_or(cx, 0x401, ~0x6, input_mode);
/* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
adc2_cfg = cx18_av_read(cx, 0x102);
@@ -998,9 +1023,9 @@ static int cx18_av_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
static int cx18_av_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- struct cx18 *cx = v4l2_get_subdevdata(sd);
-
- return cx18_av_vbi_g_fmt(cx, fmt);
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return cx18_av_g_sliced_fmt(sd, &fmt->fmt.sliced);
}
static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
@@ -1073,12 +1098,6 @@ static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
cx18_av_write(cx, 0x41e, 0x8 | filter);
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx18_av_vbi_s_fmt(cx, fmt);
-
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return cx18_av_vbi_s_fmt(cx, fmt);
-
default:
return -EINVAL;
}
@@ -1378,17 +1397,24 @@ static const struct v4l2_subdev_audio_ops cx18_av_audio_ops = {
static const struct v4l2_subdev_video_ops cx18_av_video_ops = {
.s_routing = cx18_av_s_video_routing,
- .decode_vbi_line = cx18_av_decode_vbi_line,
.s_stream = cx18_av_s_stream,
.g_fmt = cx18_av_g_fmt,
.s_fmt = cx18_av_s_fmt,
};
+static const struct v4l2_subdev_vbi_ops cx18_av_vbi_ops = {
+ .decode_vbi_line = cx18_av_decode_vbi_line,
+ .g_sliced_fmt = cx18_av_g_sliced_fmt,
+ .s_sliced_fmt = cx18_av_s_sliced_fmt,
+ .s_raw_fmt = cx18_av_s_raw_fmt,
+};
+
static const struct v4l2_subdev_ops cx18_av_ops = {
.core = &cx18_av_general_ops,
.tuner = &cx18_av_tuner_ops,
.audio = &cx18_av_audio_ops,
.video = &cx18_av_video_ops,
+ .vbi = &cx18_av_vbi_ops,
};
int cx18_av_probe(struct cx18 *cx)
diff --git a/drivers/media/video/cx18/cx18-av-core.h b/drivers/media/video/cx18/cx18-av-core.h
index cafb7e99b9a0..c106967bdcc3 100644
--- a/drivers/media/video/cx18/cx18-av-core.h
+++ b/drivers/media/video/cx18/cx18-av-core.h
@@ -61,6 +61,25 @@ enum cx18_av_video_input {
CX18_AV_SVIDEO2 = 0x620,
CX18_AV_SVIDEO3 = 0x730,
CX18_AV_SVIDEO4 = 0x840,
+
+ /* Component Video inputs consist of one luma input (In1-In8) ORed
+ with a red chroma (In4-In6) and blue chroma input (In7-In8) */
+ CX18_AV_COMPONENT_LUMA1 = 0x1000,
+ CX18_AV_COMPONENT_LUMA2 = 0x2000,
+ CX18_AV_COMPONENT_LUMA3 = 0x3000,
+ CX18_AV_COMPONENT_LUMA4 = 0x4000,
+ CX18_AV_COMPONENT_LUMA5 = 0x5000,
+ CX18_AV_COMPONENT_LUMA6 = 0x6000,
+ CX18_AV_COMPONENT_LUMA7 = 0x7000,
+ CX18_AV_COMPONENT_LUMA8 = 0x8000,
+ CX18_AV_COMPONENT_R_CHROMA4 = 0x40000,
+ CX18_AV_COMPONENT_R_CHROMA5 = 0x50000,
+ CX18_AV_COMPONENT_R_CHROMA6 = 0x60000,
+ CX18_AV_COMPONENT_B_CHROMA7 = 0x700000,
+ CX18_AV_COMPONENT_B_CHROMA8 = 0x800000,
+
+ /* Component Video aliases for common combinations */
+ CX18_AV_COMPONENT1 = 0x861000,
};
enum cx18_av_audio_input {
@@ -359,7 +378,8 @@ void cx18_av_audio_set_path(struct cx18 *cx);
/* cx18_av-vbi.c */
int cx18_av_decode_vbi_line(struct v4l2_subdev *sd,
struct v4l2_decode_vbi_line *vbi);
-int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt);
-int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt);
+int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
+int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
#endif
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
index a51732bcca4b..baa36fbcd4d4 100644
--- a/drivers/media/video/cx18/cx18-av-vbi.c
+++ b/drivers/media/video/cx18/cx18-av-vbi.c
@@ -129,10 +129,10 @@ static int decode_vps(u8 *dst, u8 *p)
return err & 0xf0;
}
-int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
+int cx18_av_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
- struct v4l2_sliced_vbi_format *svbi;
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
@@ -143,9 +143,6 @@ int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
memset(svbi, 0, sizeof(*svbi));
/* we're done if raw VBI is active */
if ((cx18_av_read(cx, 0x404) & 0x10) == 0)
@@ -173,30 +170,27 @@ int cx18_av_vbi_g_fmt(struct cx18 *cx, struct v4l2_format *fmt)
return 0;
}
-int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt)
+int cx18_av_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
struct cx18_av_state *state = &cx->av_state;
- struct v4l2_sliced_vbi_format *svbi;
- int is_pal = !(state->std & V4L2_STD_525_60);
- int i, x;
- u8 lcr[24];
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE &&
- fmt->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* raw VBI */
- memset(svbi, 0, sizeof(*svbi));
+ /* Setup standard */
+ cx18_av_std_setup(cx);
- /* Setup standard */
- cx18_av_std_setup(cx);
+ /* VBI Offset */
+ cx18_av_write(cx, 0x47f, state->slicer_line_delay);
+ cx18_av_write(cx, 0x404, 0x2e);
+ return 0;
+}
- /* VBI Offset */
- cx18_av_write(cx, 0x47f, state->slicer_line_delay);
- cx18_av_write(cx, 0x404, 0x2e);
- return 0;
- }
+int cx18_av_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ struct cx18 *cx = v4l2_get_subdevdata(sd);
+ struct cx18_av_state *state = &cx->av_state;
+ int is_pal = !(state->std & V4L2_STD_525_60);
+ int i, x;
+ u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index f808fb6fc1c1..74e122b5fc49 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -370,6 +370,7 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
+ { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
@@ -422,6 +423,7 @@ static const struct cx18_card cx18_card_leadtek_dvr3100h = {
{ CX18_CARD_INPUT_SVIDEO1, 1,
CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
{ CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
+ { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
@@ -480,7 +482,7 @@ int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input)
"S-Video 2",
"Composite 1",
"Composite 2",
- "Composite 3"
+ "Component 1"
};
memset(input, 0, sizeof(*input));
diff --git a/drivers/media/video/cx18/cx18-cards.h b/drivers/media/video/cx18/cx18-cards.h
index af3d71607dc9..796e517300ac 100644
--- a/drivers/media/video/cx18/cx18-cards.h
+++ b/drivers/media/video/cx18/cx18-cards.h
@@ -43,7 +43,7 @@
#define CX18_CARD_INPUT_SVIDEO2 3
#define CX18_CARD_INPUT_COMPOSITE1 4
#define CX18_CARD_INPUT_COMPOSITE2 5
-#define CX18_CARD_INPUT_COMPOSITE3 6
+#define CX18_CARD_INPUT_COMPONENT1 6
/* audio inputs */
#define CX18_CARD_INPUT_AUD_TUNER 1
@@ -62,7 +62,7 @@
struct cx18_card_video_input {
u8 video_type; /* video input type */
u8 audio_index; /* index in cx18_card_audio_input array */
- u16 video_input; /* hardware video input */
+ u32 video_input; /* hardware video input */
};
struct cx18_card_audio_input {
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 7fa589240ff2..4b4b46544d5a 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -263,7 +263,7 @@ int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
int ret;
struct v4l2_control ctrl;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 863ce7758239..e12a15020cda 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -700,7 +700,7 @@ int cx18_v4l2_close(struct file *filp)
CX18_DEBUG_IOCTL("close() of %s\n", s->name);
- v4l2_prio_close(&cx->prio, &id->prio);
+ v4l2_prio_close(&cx->prio, id->prio);
/* Easy case first: this stream was never claimed by us */
if (s->id != id->open_id) {
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index eecf29af916c..cfa1f289b0f5 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -109,7 +109,7 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
/* Our default information for ir-kbd-i2c.c to use */
switch (hw) {
case CX18_HW_Z8F0811_IR_RX_HAUP:
- init_data->ir_codes = &ir_codes_hauppauge_new_table;
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = IR_TYPE_RC5;
init_data->name = cx->card_name;
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index b81dd0ea8eb9..2530fc54daaf 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -208,7 +208,7 @@ static int cx18_g_fmt_sliced_vbi_cap(struct file *file, void *fh,
* digitizer/slicer. Note, cx18_av_vbi() wipes the passed in
* fmt->fmt.sliced under valid calling conditions
*/
- if (v4l2_subdev_call(cx->sd_av, video, g_fmt, fmt))
+ if (v4l2_subdev_call(cx->sd_av, vbi, g_sliced_fmt, &fmt->fmt.sliced))
return -EINVAL;
/* Ensure V4L2 spec compliant output */
@@ -277,7 +277,7 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
int ret;
int w, h;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -306,7 +306,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -322,7 +322,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
* Note cx18_av_vbi_wipes out alot of the passed in fmt under valid
* calling conditions
*/
- ret = v4l2_subdev_call(cx->sd_av, video, s_fmt, fmt);
+ ret = v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &fmt->fmt.vbi);
if (ret)
return ret;
@@ -341,7 +341,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
int ret;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -359,7 +359,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
* Note, cx18_av_vbi() wipes some "impossible" service lines in the
* passed in fmt->fmt.sliced under valid calling conditions
*/
- ret = v4l2_subdev_call(cx->sd_av, video, s_fmt, fmt);
+ ret = v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &fmt->fmt.sliced);
if (ret)
return ret;
/* Store our current v4l2 sliced VBI settings */
@@ -549,7 +549,7 @@ static int cx18_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -601,7 +601,7 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -647,7 +647,7 @@ int cx18_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -675,7 +675,7 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id *std)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
@@ -715,7 +715,7 @@ static int cx18_s_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
struct cx18 *cx = id->cx;
int ret;
- ret = v4l2_prio_check(&cx->prio, &id->prio);
+ ret = v4l2_prio_check(&cx->prio, id->prio);
if (ret)
return ret;
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 054450f65a60..f5c91261b2db 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -374,7 +374,10 @@ static void cx18_vbi_setup(struct cx18_stream *s)
}
/* setup VBI registers */
- v4l2_subdev_call(cx->sd_av, video, s_fmt, &cx->vbi.in);
+ if (raw)
+ v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &cx->vbi.in.fmt.vbi);
+ else
+ v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &cx->vbi.in.fmt.sliced);
/*
* Send the CX18_CPU_SET_RAW_VBI_PARAM API command to setup Encoder Raw
diff --git a/drivers/media/video/cx18/cx18-vbi.c b/drivers/media/video/cx18/cx18-vbi.c
index 574c1c6974f8..582227522cf0 100644
--- a/drivers/media/video/cx18/cx18-vbi.c
+++ b/drivers/media/video/cx18/cx18-vbi.c
@@ -174,7 +174,7 @@ static u32 compress_sliced_buf(struct cx18 *cx, u8 *buf, u32 size,
p[3] != sliced_vbi_eav_rp[1]))
continue;
vbi.p = p + 4;
- v4l2_subdev_call(cx->sd_av, video, decode_vbi_line, &vbi);
+ v4l2_subdev_call(cx->sd_av, vbi, decode_vbi_line, &vbi);
if (vbi.type) {
cx->vbi.sliced_data[line].id = vbi.type;
cx->vbi.sliced_data[line].field = vbi.is_second_field;
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 7793d60966db..7cae95a2245e 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -495,7 +495,7 @@ static int cx231xx_audio_init(struct cx231xx *dev)
pcm->info_flags = 0;
pcm->private_data = dev;
strcpy(pcm->name, "Conexant cx231xx Capture");
- strcpy(card->driver, "Conexant cx231xx Audio");
+ strcpy(card->driver, "Cx231xx-Audio");
strcpy(card->shortname, "Cx231xx Audio");
strcpy(card->longname, "Conexant cx231xx Audio");
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index b24eee115e7e..912a4d740206 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -96,10 +96,9 @@ int cx231xx_register_extension(struct cx231xx_ops *ops)
mutex_lock(&cx231xx_devlist_mutex);
mutex_lock(&cx231xx_extension_devlist_lock);
list_add_tail(&ops->next, &cx231xx_extension_devlist);
- list_for_each_entry(dev, &cx231xx_devlist, devlist) {
- if (dev)
- ops->init(dev);
- }
+ list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ ops->init(dev);
+
printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name);
mutex_unlock(&cx231xx_extension_devlist_lock);
mutex_unlock(&cx231xx_devlist_mutex);
@@ -112,10 +111,8 @@ void cx231xx_unregister_extension(struct cx231xx_ops *ops)
struct cx231xx *dev = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- list_for_each_entry(dev, &cx231xx_devlist, devlist) {
- if (dev)
- ops->fini(dev);
- }
+ list_for_each_entry(dev, &cx231xx_devlist, devlist)
+ ops->fini(dev);
mutex_lock(&cx231xx_extension_devlist_lock);
printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name);
@@ -679,11 +676,11 @@ void cx231xx_uninit_isoc(struct cx231xx *dev)
usb_unlink_urb(urb);
if (dev->video_mode.isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->udev,
- urb->transfer_buffer_length,
- dev->video_mode.isoc_ctl.
- transfer_buffer[i],
- urb->transfer_dma);
+ usb_free_coherent(dev->udev,
+ urb->transfer_buffer_length,
+ dev->video_mode.isoc_ctl.
+ transfer_buffer[i],
+ urb->transfer_dma);
}
usb_free_urb(urb);
dev->video_mode.isoc_ctl.urb[i] = NULL;
@@ -770,8 +767,8 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
dev->video_mode.isoc_ctl.urb[i] = urb;
dev->video_mode.isoc_ctl.transfer_buffer[i] =
- usb_buffer_alloc(dev->udev, sb_size, GFP_KERNEL,
- &urb->transfer_dma);
+ usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL,
+ &urb->transfer_dma);
if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) {
cx231xx_err("unable to allocate %i bytes for transfer"
" buffer %i%s\n",
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index b473cd8367f5..fd099153b746 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -35,6 +35,8 @@ static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
+#define MODULE_NAME "cx231xx"
+
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -59,7 +61,6 @@ struct cx231xx_ir_poll_result {
struct cx231xx_IR {
struct cx231xx *dev;
struct input_dev *input;
- struct ir_input_state ir;
char name[32];
char phys[32];
@@ -67,9 +68,7 @@ struct cx231xx_IR {
int polling;
struct work_struct work;
struct timer_list timer;
- unsigned int last_toggle:1;
unsigned int last_readcount;
- unsigned int repeat_interval;
int (*get_key) (struct cx231xx_IR *, struct cx231xx_ir_poll_result *);
};
@@ -81,7 +80,6 @@ struct cx231xx_IR {
static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
{
int result;
- int do_sendkey = 0;
struct cx231xx_ir_poll_result poll_result;
/* read the registers containing the IR status */
@@ -95,44 +93,23 @@ static void cx231xx_ir_handle_key(struct cx231xx_IR *ir)
poll_result.toggle_bit, poll_result.read_count,
ir->last_readcount, poll_result.rc_data[0]);
- if (ir->dev->chip_id == CHIP_ID_EM2874) {
+ if (poll_result.read_count > 0 &&
+ poll_result.read_count != ir->last_readcount)
+ ir_keydown(ir->input,
+ poll_result.rc_data[0],
+ poll_result.toggle_bit);
+
+ if (ir->dev->chip_id == CHIP_ID_EM2874)
/* The em2874 clears the readcount field every time the
register is read. The em2860/2880 datasheet says that it
is supposed to clear the readcount, but it doesn't. So with
the em2874, we are looking for a non-zero read count as
opposed to a readcount that is incrementing */
ir->last_readcount = 0;
- }
-
- if (poll_result.read_count == 0) {
- /* The button has not been pressed since the last read */
- } else if (ir->last_toggle != poll_result.toggle_bit) {
- /* A button has been pressed */
- dprintk("button has been pressed\n");
- ir->last_toggle = poll_result.toggle_bit;
- ir->repeat_interval = 0;
- do_sendkey = 1;
- } else if (poll_result.toggle_bit == ir->last_toggle &&
- poll_result.read_count > 0 &&
- poll_result.read_count != ir->last_readcount) {
- /* The button is still being held down */
- dprintk("button being held down\n");
-
- /* Debouncer for first keypress */
- if (ir->repeat_interval++ > 9) {
- /* Start repeating after 1 second */
- do_sendkey = 1;
- }
- }
+ else
+ ir->last_readcount = poll_result.read_count;
- if (do_sendkey) {
- dprintk("sending keypress\n");
- ir_input_keydown(ir->input, &ir->ir, poll_result.rc_data[0]);
- ir_input_nokey(ir->input, &ir->ir);
}
-
- ir->last_readcount = poll_result.read_count;
- return;
}
static void ir_timer(unsigned long data)
@@ -198,10 +175,6 @@ int cx231xx_ir_init(struct cx231xx *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
- if (err < 0)
- goto err_out_free;
-
input_dev->name = ir->name;
input_dev->phys = ir->phys;
input_dev->id.bustype = BUS_USB;
@@ -217,7 +190,8 @@ int cx231xx_ir_init(struct cx231xx *dev)
cx231xx_ir_start(ir);
/* all done */
- err = ir_input_register(ir->input, dev->board.ir_codes, NULL);
+ err = __ir_input_register(ir->input, dev->board.ir_codes,
+ NULL, MODULE_NAME);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index 16a73eab6726..2782709b263f 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -1669,7 +1669,7 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
f->fmt.sliced.service_set = 0;
- call_all(dev, video, g_fmt, f);
+ call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
@@ -1690,7 +1690,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
return rc;
mutex_lock(&dev->lock);
- call_all(dev, video, g_fmt, f);
+ call_all(dev, vbi, g_sliced_fmt, &f->fmt.sliced);
mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
@@ -1902,9 +1902,12 @@ static int radio_queryctrl(struct file *file, void *priv,
if (c->id < V4L2_CID_BASE || c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
- for (i = 0; i < CX231XX_CTLS; i++)
+ for (i = 0; i < CX231XX_CTLS; i++) {
if (cx231xx_ctls[i].v.id == c->id)
break;
+ }
+ if (i == CX231XX_CTLS)
+ return -EINVAL;
*c = cx231xx_ctls[i].v;
} else
*c = no_ctl;
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 17d4d1a800ce..38d417191a65 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -32,7 +32,7 @@
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
-#include <media/ir-kbd-i2c.h>
+#include <media/ir-core.h>
#if defined(CONFIG_VIDEO_CX231XX_DVB) || \
defined(CONFIG_VIDEO_CX231XX_DVB_MODULE)
#include <media/videobuf-dvb.h>
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 4c8e95853fa3..022b480918cd 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -1000,20 +1000,6 @@ int cx2341x_update(void *priv, cx2341x_mbox_func func,
h, w);
if (err) return err;
}
-
- if (new->width != 720 || new->height != (new->is_50hz ? 576 : 480)) {
- /* Adjust temporal filter if necessary. The problem with the
- temporal filter is that it works well with full resolution
- capturing, but not when the capture window is scaled (the
- filter introduces a ghosting effect). So if the capture
- window is scaled, then force the filter to 0.
-
- For full resolution the filter really improves the video
- quality, especially if the original video quality is
- suboptimal. */
- temporal = 0;
- }
-
if (force || NEQ(stream_type)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1,
mpeg_stream_type[new->stream_type]);
diff --git a/drivers/media/video/cx23885/cimax2.c b/drivers/media/video/cx23885/cimax2.c
index d4a9d2c5947c..c95e7bc14745 100644
--- a/drivers/media/video/cx23885/cimax2.c
+++ b/drivers/media/video/cx23885/cimax2.c
@@ -60,12 +60,18 @@ static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
+static unsigned int ci_irq_enable;
+module_param(ci_irq_enable, int, 0644);
+MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
+
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
+#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
+
/* stores all private variables for communication with CI */
struct netup_ci_state {
struct dvb_ca_en50221 ca;
@@ -392,7 +398,7 @@ int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open
if (0 != slot)
return -EINVAL;
- netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | NETUP_IRQ_IRQAM)
+ netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags())
: NETUP_IRQ_DETAM);
return state->status;
@@ -429,7 +435,7 @@ int netup_ci_init(struct cx23885_tsport *port)
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
- NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
+ ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
@@ -470,7 +476,7 @@ int netup_ci_init(struct cx23885_tsport *port)
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
- state->current_irq_mode = NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM;
+ state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index a8ddc227cf86..abd64e89f60f 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1356,7 +1356,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct cx23885_dev *dev = fh->dev;
struct cx23885_tsport *tsport = &dev->ts1;
- strcpy(cap->driver, dev->name);
+ strlcpy(cap->driver, dev->name, sizeof(cap->driver));
strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 939079d7bbb9..9e1460828b2f 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -1006,6 +1006,22 @@ static int dvb_register(struct cx23885_tsport *port)
netup_ci_init(port);
break;
}
+ case CX23885_BOARD_TEVII_S470: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+
+ if (port->nr != 1)
+ break;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
+ printk(KERN_INFO "TeVii S470 MAC= "
+ "%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eeprom[0xa0], eeprom[0xa1], eeprom[0xa2],
+ eeprom[0xa3], eeprom[0xa4], eeprom[0xa5]);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
+ break;
+ }
}
return ret;
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index 8e9d990dbe93..8d306d8bb61c 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -51,6 +51,8 @@
#define RC5_EXTENDED_COMMAND_OFFSET 64
+#define MODULE_NAME "cx23885"
+
static inline unsigned int rc5_command(u32 rc5_baseband)
{
return RC5_INSTR(rc5_baseband) +
@@ -338,7 +340,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
{
struct card_ir *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
int ir_type, ir_addr, ir_start;
int ret;
@@ -353,7 +355,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
/* Parameters for the grey Hauppauge remote for the HVR-1850 */
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
ir_type = IR_TYPE_RC5;
ir_addr = 0x1e; /* RC-5 system bits emitted by the remote */
ir_start = RC5_START_BITS_NORMAL; /* A basic RC-5 remote */
@@ -398,7 +400,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
dev->ir_input = ir;
cx23885_input_ir_start(dev);
- ret = ir_input_register(ir->dev, ir_codes, NULL);
+ ret = ir_input_register(ir->dev, ir_codes, NULL, MODULE_NAME);
if (ret)
goto err_out_stop;
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 2d3ac8b83dc3..543b854f6a62 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -514,8 +514,8 @@ static int buffer_setup(struct videobuf_queue *q, unsigned int *count,
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f2461cd3de5a..8b6fb3544376 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1018,7 +1018,7 @@ static int cx25840_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
switch (fmt->type) {
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx25840_vbi_g_fmt(sd, fmt);
+ return cx25840_g_sliced_fmt(sd, &fmt->fmt.sliced);
default:
return -EINVAL;
}
@@ -1079,12 +1079,6 @@ static int cx25840_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
cx25840_write(client, 0x41e, 0x8 | filter);
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return cx25840_vbi_s_fmt(sd, fmt);
-
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return cx25840_vbi_s_fmt(sd, fmt);
-
default:
return -EINVAL;
}
@@ -1635,15 +1629,22 @@ static const struct v4l2_subdev_video_ops cx25840_video_ops = {
.s_routing = cx25840_s_video_routing,
.g_fmt = cx25840_g_fmt,
.s_fmt = cx25840_s_fmt,
- .decode_vbi_line = cx25840_decode_vbi_line,
.s_stream = cx25840_s_stream,
};
+static const struct v4l2_subdev_vbi_ops cx25840_vbi_ops = {
+ .decode_vbi_line = cx25840_decode_vbi_line,
+ .s_raw_fmt = cx25840_s_raw_fmt,
+ .s_sliced_fmt = cx25840_s_sliced_fmt,
+ .g_sliced_fmt = cx25840_g_sliced_fmt,
+};
+
static const struct v4l2_subdev_ops cx25840_ops = {
.core = &cx25840_core_ops,
.tuner = &cx25840_tuner_ops,
.audio = &cx25840_audio_ops,
.video = &cx25840_video_ops,
+ .vbi = &cx25840_vbi_ops,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/cx25840/cx25840-core.h b/drivers/media/video/cx25840/cx25840-core.h
index 55345444417f..04393b971567 100644
--- a/drivers/media/video/cx25840/cx25840-core.h
+++ b/drivers/media/video/cx25840/cx25840-core.h
@@ -99,8 +99,9 @@ int cx25840_audio_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
/* ----------------------------------------------------------------------- */
/* cx25850-vbi.c */
-int cx25840_vbi_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt);
-int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt);
+int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt);
+int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
+int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt);
int cx25840_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi);
#endif
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index 35f6592f6c47..64a4004f8a97 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -82,11 +82,10 @@ static int decode_vps(u8 * dst, u8 * p)
return err & 0xf0;
}
-int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+int cx25840_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
- struct v4l2_sliced_vbi_format *svbi;
static const u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_WSS_625, 0, /* 4 */
@@ -97,9 +96,6 @@ int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
int is_pal = !(state->std & V4L2_STD_525_60);
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
memset(svbi, 0, sizeof(*svbi));
/* we're done if raw VBI is active */
if ((cx25840_read(client, 0x404) & 0x10) == 0)
@@ -127,32 +123,30 @@ int cx25840_vbi_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
-int cx25840_vbi_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+int cx25840_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
- struct v4l2_sliced_vbi_format *svbi;
int is_pal = !(state->std & V4L2_STD_525_60);
int vbi_offset = is_pal ? 1 : 0;
- int i, x;
- u8 lcr[24];
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE &&
- fmt->type != V4L2_BUF_TYPE_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* raw VBI */
- memset(svbi, 0, sizeof(*svbi));
+ /* Setup standard */
+ cx25840_std_setup(client);
- /* Setup standard */
- cx25840_std_setup(client);
+ /* VBI Offset */
+ cx25840_write(client, 0x47f, vbi_offset);
+ cx25840_write(client, 0x404, 0x2e);
+ return 0;
+}
- /* VBI Offset */
- cx25840_write(client, 0x47f, vbi_offset);
- cx25840_write(client, 0x404, 0x2e);
- return 0;
- }
+int cx25840_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct cx25840_state *state = to_state(sd);
+ int is_pal = !(state->std & V4L2_STD_525_60);
+ int vbi_offset = is_pal ? 1 : 0;
+ int i, x;
+ u8 lcr[24];
for (x = 0; x <= 23; x++)
lcr[x] = 0x00;
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index b35411160f04..8b21457111b1 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -847,7 +847,8 @@ static int set_tvaudio(struct cx88_core *core)
{
v4l2_std_id norm = core->tvnorm;
- if (CX88_VMUX_TELEVISION != INPUT(core->input).type)
+ if (CX88_VMUX_TELEVISION != INPUT(core->input).type &&
+ CX88_VMUX_CABLE != INPUT(core->input).type)
return 0;
if (V4L2_STD_PAL_BG & norm) {
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 94ab862f0219..faa8e8163a4a 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -1231,7 +1231,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
- case CX88_BOARD_PINNACLE_HYBRID_PCTV:
+ case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_pinnacle_hybrid_pctv,
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 6b6abf062c21..e185289e446c 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -32,12 +32,18 @@
#include "cx88.h"
#include <media/ir-common.h>
+#define MODULE_NAME "cx88xx"
+
/* ---------------------------------------------------------------------- */
struct cx88_IR {
struct cx88_core *core;
struct input_dev *input;
struct ir_input_state ir;
+ struct ir_dev_props props;
+
+ int users;
+
char name[32];
char phys[32];
@@ -159,8 +165,16 @@ static enum hrtimer_restart cx88_ir_work(struct hrtimer *timer)
return HRTIMER_RESTART;
}
-void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
+static int __cx88_ir_start(void *priv)
{
+ struct cx88_core *core = priv;
+ struct cx88_IR *ir;
+
+ if (!core || !core->ir)
+ return -EINVAL;
+
+ ir = core->ir;
+
if (ir->polling) {
hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ir->timer.function = cx88_ir_work;
@@ -173,10 +187,18 @@ void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
cx_write(MO_DDS_IO, 0xa80a80); /* 4 kHz sample rate */
cx_write(MO_DDSCFG_IO, 0x5); /* enable */
}
+ return 0;
}
-void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
+static void __cx88_ir_stop(void *priv)
{
+ struct cx88_core *core = priv;
+ struct cx88_IR *ir;
+
+ if (!core || !core->ir)
+ return;
+
+ ir = core->ir;
if (ir->sampling) {
cx_write(MO_DDSCFG_IO, 0x0);
core->pci_irqmask &= ~PCI_INT_IR_SMPINT;
@@ -186,15 +208,49 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
hrtimer_cancel(&ir->timer);
}
+int cx88_ir_start(struct cx88_core *core)
+{
+ if (core->ir->users)
+ return __cx88_ir_start(core);
+
+ return 0;
+}
+
+void cx88_ir_stop(struct cx88_core *core)
+{
+ if (core->ir->users)
+ __cx88_ir_stop(core);
+}
+
+static int cx88_ir_open(void *priv)
+{
+ struct cx88_core *core = priv;
+
+ core->ir->users++;
+ return __cx88_ir_start(core);
+}
+
+static void cx88_ir_close(void *priv)
+{
+ struct cx88_core *core = priv;
+
+ core->ir->users--;
+ if (!core->ir->users)
+ __cx88_ir_stop(core);
+}
+
/* ---------------------------------------------------------------------- */
int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
{
struct cx88_IR *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
+ u32 hardware_mask = 0; /* For devices with a hardware mask, when
+ * used with a full-code IR table
+ */
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
input_dev = input_allocate_device();
@@ -208,15 +264,15 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_DNTV_LIVE_DVB_T:
case CX88_BOARD_KWORLD_DVB_T:
case CX88_BOARD_KWORLD_DVB_T_CX22702:
- ir_codes = &ir_codes_dntv_live_dvb_t_table;
+ ir_codes = RC_MAP_DNTV_LIVE_DVB_T;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x60;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
- ir_codes = &ir_codes_cinergy_1400_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_CINERGY_1400;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xeb04; /* address */
break;
case CX88_BOARD_HAUPPAUGE:
@@ -230,14 +286,14 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_PCHDTV_HD3000:
case CX88_BOARD_PCHDTV_HD5500:
case CX88_BOARD_HAUPPAUGE_IRONLY:
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
break;
case CX88_BOARD_WINFAST_DTV2000H:
case CX88_BOARD_WINFAST_DTV2000H_J:
case CX88_BOARD_WINFAST_DTV1800H:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
ir->mask_keyup = 0x100;
@@ -246,14 +302,14 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_WINFAST2000XP_EXPERT:
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
ir->mask_keyup = 0x100;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_IODATA_GVBCTV7E:
- ir_codes = &ir_codes_iodata_bctv7e_table;
+ ir_codes = RC_MAP_IODATA_BCTV7E;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0xfd;
ir->mask_keydown = 0x02;
@@ -261,36 +317,43 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_PROLINK_PLAYTVPVR:
case CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO:
- ir_codes = &ir_codes_pixelview_table;
+ /*
+ * It seems that this hardware is paired with NEC extended
+ * address 0x866b. So, unfortunately, its usage with other
+ * IR's with different address won't work. Still, there are
+ * other IR's from the same manufacturer that works, like the
+ * 002-T mini RC, provided with newer PV hardware
+ */
+ ir_codes = RC_MAP_PIXELVIEW_MK12;
ir->gpio_addr = MO_GP1_IO;
- ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x80;
- ir->polling = 1; /* ms */
+ ir->polling = 10; /* ms */
+ hardware_mask = 0x3f; /* Hardware returns only 6 bits from command part */
break;
case CX88_BOARD_PROLINK_PV_8000GT:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
- ir_codes = &ir_codes_pixelview_new_table;
+ ir_codes = RC_MAP_PIXELVIEW_NEW;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x3f;
ir->mask_keyup = 0x80;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_KWORLD_LTV883:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x60;
ir->polling = 1; /* ms */
break;
case CX88_BOARD_ADSTECH_DVB_T_PCI:
- ir_codes = &ir_codes_adstech_dvb_t_pci_table;
+ ir_codes = RC_MAP_ADSTECH_DVB_T_PCI;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0xbf;
ir->mask_keyup = 0x40;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_MSI_TVANYWHERE_MASTER:
- ir_codes = &ir_codes_msi_tvanywhere_table;
+ ir_codes = RC_MAP_MSI_TVANYWHERE;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x1f;
ir->mask_keyup = 0x40;
@@ -298,7 +361,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
break;
case CX88_BOARD_AVERTV_303:
case CX88_BOARD_AVERTV_STUDIO_303:
- ir_codes = &ir_codes_avertv_303_table;
+ ir_codes = RC_MAP_AVERTV_303;
ir->gpio_addr = MO_GP2_IO;
ir->mask_keycode = 0xfb;
ir->mask_keydown = 0x02;
@@ -311,41 +374,41 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_PROF_7300:
case CX88_BOARD_PROF_7301:
case CX88_BOARD_PROF_6200:
- ir_codes = &ir_codes_tbs_nec_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_TBS_NEC;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_TEVII_S460:
case CX88_BOARD_TEVII_S420:
- ir_codes = &ir_codes_tevii_nec_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_TEVII_NEC;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
- ir_codes = &ir_codes_dntv_live_dvbt_pro_table;
- ir_type = IR_TYPE_PD;
+ ir_codes = RC_MAP_DNTV_LIVE_DVBT_PRO;
+ ir_type = IR_TYPE_NEC;
ir->sampling = 0xff00; /* address */
break;
case CX88_BOARD_NORWOOD_MICRO:
- ir_codes = &ir_codes_norwood_table;
+ ir_codes = RC_MAP_NORWOOD;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keycode = 0x0e;
ir->mask_keyup = 0x80;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
- ir_codes = &ir_codes_npgtech_table;
+ ir_codes = RC_MAP_NPGTECH;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0xfa;
ir->polling = 50; /* ms */
break;
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
- ir_codes = &ir_codes_pinnacle_pctv_hd_table;
+ ir_codes = RC_MAP_PINNACLE_PCTV_HD;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
break;
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
- ir_codes = &ir_codes_powercolor_real_angel_table;
+ ir_codes = RC_MAP_POWERCOLOR_REAL_ANGEL;
ir->gpio_addr = MO_GP2_IO;
ir->mask_keycode = 0x7e;
ir->polling = 100; /* ms */
@@ -357,6 +420,21 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
goto err_out_free;
}
+ /*
+ * The usage of mask_keycode were very convenient, due to several
+ * reasons. Among others, the scancode tables were using the scancode
+ * as the index elements. So, the less bits it was used, the smaller
+ * the table were stored. After the input changes, the better is to use
+ * the full scancodes, since it allows replacing the IR remote by
+ * another one. Unfortunately, there are still some hardware, like
+ * Pixelview Ultra Pro, where only part of the scancode is sent via
+ * GPIO. So, there's no way to get the full scancode. Due to that,
+ * hardware_mask were introduced here: it represents those hardware
+ * that has such limits.
+ */
+ if (hardware_mask && !ir->mask_keycode)
+ ir->mask_keycode = hardware_mask;
+
/* init input device */
snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci));
@@ -381,19 +459,20 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
ir->core = core;
core->ir = ir;
- cx88_ir_start(core, ir);
+ ir->props.priv = core;
+ ir->props.open = cx88_ir_open;
+ ir->props.close = cx88_ir_close;
+ ir->props.scanmask = hardware_mask;
/* all done */
- err = ir_input_register(ir->input, ir_codes, NULL);
+ err = ir_input_register(ir->input, ir_codes, &ir->props, MODULE_NAME);
if (err)
- goto err_out_stop;
+ goto err_out_free;
return 0;
- err_out_stop:
- cx88_ir_stop(core, ir);
- core->ir = NULL;
err_out_free:
+ core->ir = NULL;
kfree(ir);
return err;
}
@@ -406,7 +485,7 @@ int cx88_ir_fini(struct cx88_core *core)
if (NULL == ir)
return 0;
- cx88_ir_stop(core, ir);
+ cx88_ir_stop(core);
ir_input_unregister(ir->input);
kfree(ir);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 6aba7af9160a..499f8d512ad6 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -599,13 +599,22 @@ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board
static int cx8802_request_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
+ unsigned int i;
/* Fail a request for hardware if the device is busy. */
if (core->active_type_id != CX88_BOARD_NONE &&
core->active_type_id != drv->type_id)
return -EBUSY;
- core->input = CX88_VMUX_DVB;
+ core->input = 0;
+ for (i = 0;
+ i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+ i++) {
+ if (core->board.input[i].type == CX88_VMUX_DVB) {
+ core->input = i;
+ break;
+ }
+ }
if (drv->advise_acquire)
{
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 48c450f4a85a..0fab65c3ab39 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -426,12 +426,13 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
if (core->board.audio_chip &&
core->board.audio_chip == V4L2_IDENT_WM8775) {
call_all(core, audio, s_routing,
- INPUT(input).audioroute, 0, 0);
+ INPUT(input).audioroute, 0, 0);
}
/* cx2388's C-ADC is connected to the tuner only.
When used with S-Video, that ADC is busy dealing with
chroma, so an external must be used for baseband audio */
- if (INPUT(input).type != CX88_VMUX_TELEVISION ) {
+ if (INPUT(input).type != CX88_VMUX_TELEVISION &&
+ INPUT(input).type != CX88_VMUX_CABLE) {
/* "I2S ADC mode" */
core->tvaudio = WW_I2SADC;
cx88_set_tvaudio(core);
@@ -561,8 +562,8 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
@@ -1537,9 +1538,12 @@ static int radio_queryctrl (struct file *file, void *priv,
c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
- for (i = 0; i < CX8800_CTLS; i++)
+ for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == c->id)
break;
+ }
+ if (i == CX8800_CTLS)
+ return -EINVAL;
*c = cx8800_ctls[i].v;
} else
*c = no_ctl;
@@ -1977,7 +1981,7 @@ static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
}
if (core->ir)
- cx88_ir_stop(core, core->ir);
+ cx88_ir_stop(core);
cx88_shutdown(core); /* FIXME */
pci_disable_device(pci_dev);
@@ -2015,7 +2019,7 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
spin_unlock(&dev->slock);
if (core->ir)
- cx88_ir_stop(core, core->ir);
+ cx88_ir_stop(core);
/* FIXME -- shutdown device */
cx88_shutdown(core);
@@ -2056,7 +2060,7 @@ static int cx8800_resume(struct pci_dev *pci_dev)
/* FIXME: re-initialize hardware */
cx88_reset(core);
if (core->ir)
- cx88_ir_start(core, core->ir);
+ cx88_ir_start(core);
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 48b6c04fb497..bdb03d336536 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -41,7 +41,7 @@
#include <linux/version.h>
#include <linux/mutex.h>
-#define CX88_VERSION_CODE KERNEL_VERSION(0,0,7)
+#define CX88_VERSION_CODE KERNEL_VERSION(0, 0, 8)
#define UNSET (-1U)
@@ -290,7 +290,7 @@ struct cx88_subid {
#define RESOURCE_VIDEO 2
#define RESOURCE_VBI 4
-#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define BUFFER_TIMEOUT msecs_to_jiffies(2000)
/* buffer for one video frame */
struct cx88_buffer {
@@ -683,8 +683,8 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core);
int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci);
int cx88_ir_fini(struct cx88_core *core);
void cx88_ir_irq(struct cx88_core *core);
-void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir);
-void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir);
+int cx88_ir_start(struct cx88_core *core);
+void cx88_ir_stop(struct cx88_core *core);
/* ----------------------------------------------------------- */
/* cx88-mpeg.c */
diff --git a/drivers/media/video/davinci/dm644x_ccdc.c b/drivers/media/video/davinci/dm644x_ccdc.c
index b4cc96dc99ef..490aafb34e2f 100644
--- a/drivers/media/video/davinci/dm644x_ccdc.c
+++ b/drivers/media/video/davinci/dm644x_ccdc.c
@@ -101,6 +101,9 @@ static u32 ccdc_raw_bayer_pix_formats[] =
static u32 ccdc_raw_yuv_pix_formats[] =
{V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
+/* CCDC Save/Restore context */
+static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
+
/* register access routines */
static inline u32 regr(u32 offset)
{
@@ -400,7 +403,11 @@ void ccdc_config_ycbcr(void)
* configure the FID, VD, HD pin polarity,
* fld,hd pol positive, vd negative, 8-bit data
*/
- syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE | CCDC_SYN_MODE_8BITS;
+ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= CCDC_SYN_MODE_10BITS;
+ else
+ syn_mode |= CCDC_SYN_MODE_8BITS;
} else {
/* y/c external sync mode */
syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
@@ -419,8 +426,13 @@ void ccdc_config_ycbcr(void)
* configure the order of y cb cr in SDRAM, and disable latch
* internal register on vsync
*/
- regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
- CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
+ if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
+ CCDC_CCDCFG);
+ else
+ regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
+ CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
/*
* configure the horizontal line offset. This should be a
@@ -435,7 +447,6 @@ void ccdc_config_ycbcr(void)
ccdc_sbl_reset();
dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
- ccdc_readregs();
}
static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
@@ -827,6 +838,7 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
case VPFE_BT656:
case VPFE_YCBCR_SYNC_16:
case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
ccdc_cfg.ycbcr.vd_pol = params->vdpol;
ccdc_cfg.ycbcr.hd_pol = params->hdpol;
break;
@@ -837,6 +849,87 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
return 0;
}
+static void ccdc_save_context(void)
+{
+ ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
+ ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
+ ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
+ ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
+ ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
+ ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
+ ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
+ ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
+ ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
+ ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
+ ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
+ ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
+ ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
+ ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
+ ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
+ ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
+ ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
+ ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
+ ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
+ ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
+ ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
+ ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
+ ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
+ ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
+ ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
+ ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
+ ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
+ ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
+ ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
+ ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
+ ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
+ ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
+ ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
+ ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
+ ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
+ ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
+ ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
+}
+
+static void ccdc_restore_context(void)
+{
+ regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
+ regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
+ regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
+ regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
+ regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
+ regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
+ regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
+ regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
+ regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
+ regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
+ regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
+ regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
+ regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
+ regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
+ regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
+ regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
+ regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
+ regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
+ regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
+ regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
+ regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
+ regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
+ regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
+ regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
+ regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
+ regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
+ regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
+ regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
+ regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
+ regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
+ regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
+ regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
+ regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
+ regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
+ regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
+ regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
+ regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
+}
static struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM6446 CCDC",
.owner = THIS_MODULE,
@@ -945,10 +1038,40 @@ static int dm644x_ccdc_remove(struct platform_device *pdev)
return 0;
}
+static int dm644x_ccdc_suspend(struct device *dev)
+{
+ /* Save CCDC context */
+ ccdc_save_context();
+ /* Disable CCDC */
+ ccdc_enable(0);
+ /* Disable both master and slave clock */
+ clk_disable(ccdc_cfg.mclk);
+ clk_disable(ccdc_cfg.sclk);
+
+ return 0;
+}
+
+static int dm644x_ccdc_resume(struct device *dev)
+{
+ /* Enable both master and slave clock */
+ clk_enable(ccdc_cfg.mclk);
+ clk_enable(ccdc_cfg.sclk);
+ /* Restore CCDC context */
+ ccdc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
+ .suspend = dm644x_ccdc_suspend,
+ .resume = dm644x_ccdc_resume,
+};
+
static struct platform_driver dm644x_ccdc_driver = {
.driver = {
.name = "dm644x_ccdc",
.owner = THIS_MODULE,
+ .pm = &dm644x_ccdc_pm_ops,
},
.remove = __devexit_p(dm644x_ccdc_remove),
.probe = dm644x_ccdc_probe,
diff --git a/drivers/media/video/davinci/dm644x_ccdc_regs.h b/drivers/media/video/davinci/dm644x_ccdc_regs.h
index 6e5d05324466..90370e414e2c 100644
--- a/drivers/media/video/davinci/dm644x_ccdc_regs.h
+++ b/drivers/media/video/davinci/dm644x_ccdc_regs.h
@@ -59,7 +59,7 @@
#define CCDC_PRGODD_0 0x8c
#define CCDC_PRGODD_1 0x90
#define CCDC_VP_OUT 0x94
-
+#define CCDC_REG_END 0x98
/***************************************************************
* Define for various register bit mask and shifts for CCDC
@@ -135,11 +135,19 @@
#define CCDC_SYN_MODE_INPMOD_SHIFT 12
#define CCDC_SYN_MODE_INPMOD_MASK 3
#define CCDC_SYN_MODE_8BITS (7 << 8)
+#define CCDC_SYN_MODE_10BITS (6 << 8)
+#define CCDC_SYN_MODE_11BITS (5 << 8)
+#define CCDC_SYN_MODE_12BITS (4 << 8)
+#define CCDC_SYN_MODE_13BITS (3 << 8)
+#define CCDC_SYN_MODE_14BITS (2 << 8)
+#define CCDC_SYN_MODE_15BITS (1 << 8)
+#define CCDC_SYN_MODE_16BITS (0 << 8)
#define CCDC_SYN_FLDMODE_MASK 1
#define CCDC_SYN_FLDMODE_SHIFT 7
#define CCDC_REC656IF_BT656_EN 3
#define CCDC_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
#define CCDC_CCDCFG_Y8POS_SHIFT 11
+#define CCDC_CCDCFG_BW656_10BIT (1 << 5)
#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
#define CCDC_NO_CULLING 0xffff00ff
#endif
diff --git a/drivers/media/video/davinci/isif_regs.h b/drivers/media/video/davinci/isif_regs.h
index f7b8893a2957..aa69a463c122 100644
--- a/drivers/media/video/davinci/isif_regs.h
+++ b/drivers/media/video/davinci/isif_regs.h
@@ -158,7 +158,7 @@
/* gain - offset masks */
#define GAIN_INTEGER_SHIFT 9
-#define OFFSET_MASK 0xFFF
+#define OFFSET_MASK 0xFFF
#define GAIN_SDRAM_EN_SHIFT 12
#define GAIN_IPIPE_EN_SHIFT 13
#define GAIN_H3A_EN_SHIFT 14
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 398dbe71cb82..1c2588247289 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -475,6 +475,11 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
if (!ret)
vpfe_dev->initialized = 1;
+
+ /* Clear all VPFE/CCDC interrupts */
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(-1);
+
unlock:
mutex_unlock(&ccdc_lock);
return ret;
@@ -534,6 +539,16 @@ static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
list_del(&vpfe_dev->next_frm->queue);
vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
+
+ ccdc_dev->hw_ops.setfbaddr(addr);
+}
+
+static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
+{
+ unsigned long addr;
+
+ addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
+ addr += vpfe_dev->field_off;
ccdc_dev->hw_ops.setfbaddr(addr);
}
@@ -554,7 +569,6 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
{
struct vpfe_device *vpfe_dev = dev_id;
enum v4l2_field field;
- unsigned long addr;
int fid;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
@@ -562,7 +576,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
/* if streaming not started, don't do anything */
if (!vpfe_dev->started)
- return IRQ_HANDLED;
+ goto clear_intr;
/* only for 6446 this will be applicable */
if (NULL != ccdc_dev->hw_ops.reset)
@@ -574,7 +588,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
"frame format is progressive...\n");
if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
vpfe_process_buffer_complete(vpfe_dev);
- return IRQ_HANDLED;
+ goto clear_intr;
}
/* interlaced or TB capture check which field we are in hardware */
@@ -599,12 +613,9 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
* the CCDC memory address
*/
if (field == V4L2_FIELD_SEQ_TB) {
- addr =
- videobuf_to_dma_contig(vpfe_dev->cur_frm);
- addr += vpfe_dev->field_off;
- ccdc_dev->hw_ops.setfbaddr(addr);
+ vpfe_schedule_bottom_field(vpfe_dev);
}
- return IRQ_HANDLED;
+ goto clear_intr;
}
/*
* if one field is just being captured configure
@@ -624,6 +635,10 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
*/
vpfe_dev->field_id = fid;
}
+clear_intr:
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
return IRQ_HANDLED;
}
@@ -635,8 +650,11 @@ static irqreturn_t vdint1_isr(int irq, void *dev_id)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
/* if streaming not started, don't do anything */
- if (!vpfe_dev->started)
+ if (!vpfe_dev->started) {
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
return IRQ_HANDLED;
+ }
spin_lock(&vpfe_dev->dma_queue_lock);
if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
@@ -644,6 +662,10 @@ static irqreturn_t vdint1_isr(int irq, void *dev_id)
vpfe_dev->cur_frm == vpfe_dev->next_frm)
vpfe_schedule_next_buffer(vpfe_dev);
spin_unlock(&vpfe_dev->dma_queue_lock);
+
+ if (vpfe_dev->cfg->clr_intr)
+ vpfe_dev->cfg->clr_intr(irq);
+
return IRQ_HANDLED;
}
@@ -714,7 +736,7 @@ static int vpfe_release(struct file *file)
/* Decrement device usrs counter */
vpfe_dev->usrs--;
/* Close the priority */
- v4l2_prio_close(&vpfe_dev->prio, &fh->prio);
+ v4l2_prio_close(&vpfe_dev->prio, fh->prio);
/* If this is the last file handle */
if (!vpfe_dev->usrs) {
vpfe_dev->initialized = 0;
@@ -1218,7 +1240,10 @@ static int vpfe_videobuf_setup(struct videobuf_queue *vq,
struct vpfe_device *vpfe_dev = fh->vpfe_dev;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
- *size = config_params.device_bufsize;
+ *size = vpfe_dev->fmt.fmt.pix.sizeimage;
+ if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
+ vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
+ *size = config_params.device_bufsize;
if (*count < config_params.min_numbuffers)
*count = config_params.min_numbuffers;
@@ -1233,6 +1258,8 @@ static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
{
struct vpfe_fh *fh = vq->priv_data;
struct vpfe_device *vpfe_dev = fh->vpfe_dev;
+ unsigned long addr;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
@@ -1242,8 +1269,18 @@ static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
vb->height = vpfe_dev->fmt.fmt.pix.height;
vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
vb->field = field;
+
+ ret = videobuf_iolock(vq, vb, NULL);;
+ if (ret < 0)
+ return ret;
+
+ addr = videobuf_to_dma_contig(vb);
+ /* Make sure user addresses are aligned to 32 bytes */
+ if (!ALIGN(addr, 32))
+ return -EINVAL;
+
+ vb->state = VIDEOBUF_PREPARED;
}
- vb->state = VIDEOBUF_PREPARED;
return 0;
}
@@ -1311,13 +1348,6 @@ static int vpfe_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (V4L2_MEMORY_USERPTR == req_buf->memory) {
- /* we don't support user ptr IO */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs:"
- " USERPTR IO not supported\n");
- return -EINVAL;
- }
-
ret = mutex_lock_interruptible(&vpfe_dev->lock);
if (ret)
return ret;
@@ -1831,7 +1861,6 @@ static __init int vpfe_probe(struct platform_device *pdev)
goto probe_free_dev_mem;
}
- mutex_lock(&ccdc_lock);
/* Allocate memory for ccdc configuration */
ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
if (NULL == ccdc_cfg) {
@@ -1840,6 +1869,8 @@ static __init int vpfe_probe(struct platform_device *pdev)
goto probe_free_lock;
}
+ mutex_lock(&ccdc_lock);
+
strncpy(ccdc_cfg->name, vpfe_cfg->ccdc, 32);
/* Get VINT0 irq resource */
res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -2015,18 +2046,14 @@ static int __devexit vpfe_remove(struct platform_device *pdev)
return 0;
}
-static int
-vpfe_suspend(struct device *dev)
+static int vpfe_suspend(struct device *dev)
{
- /* add suspend code here later */
- return -1;
+ return 0;
}
-static int
-vpfe_resume(struct device *dev)
+static int vpfe_resume(struct device *dev)
{
- /* add resume code here later */
- return -1;
+ return 0;
}
static const struct dev_pm_ops vpfe_dev_pm_ops = {
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 2e5a7fb2d0c9..a7f48b53d3fc 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -869,7 +869,7 @@ static int vpif_release(struct file *filep)
mutex_unlock(&common->lock);
/* Close the priority */
- v4l2_prio_close(&ch->prio, &fh->prio);
+ v4l2_prio_close(&ch->prio, fh->prio);
if (fh->initialized)
ch->initialized = 0;
@@ -1444,7 +1444,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
}
}
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
@@ -1554,7 +1554,7 @@ static int vpif_s_input(struct file *file, void *priv, unsigned int index)
}
}
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
@@ -1710,7 +1710,7 @@ static int vpif_s_fmt_vid_cap(struct file *file, void *priv,
}
}
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 13c3a1b97760..da07607cbc55 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -384,7 +384,7 @@ static int vpif_get_std_info(struct channel_obj *ch)
int index;
std_info->stdid = vid_ch->stdid;
- if (!std_info)
+ if (!std_info->stdid)
return -1;
for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
@@ -671,7 +671,7 @@ static int vpif_release(struct file *filep)
ch->initialized = 0;
/* Close the priority */
- v4l2_prio_close(&ch->prio, &fh->prio);
+ v4l2_prio_close(&ch->prio, fh->prio);
filep->private_data = NULL;
fh->initialized = 0;
kfree(fh);
@@ -753,7 +753,7 @@ static int vpif_s_fmt_vid_out(struct file *file, void *priv,
}
/* Check for the priority */
- ret = v4l2_prio_check(&ch->prio, &fh->prio);
+ ret = v4l2_prio_check(&ch->prio, fh->prio);
if (0 != ret)
return ret;
fh->initialized = 1;
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index bd783387b37d..e182abf476c9 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -491,7 +491,7 @@ static int em28xx_audio_init(struct em28xx *dev)
strcpy(pcm->name, "Empia 28xx Capture");
snd_card_set_dev(card, &dev->udev->dev);
- strcpy(card->driver, "Empia Em28xx Audio");
+ strcpy(card->driver, "Em28xx-Audio");
strcpy(card->shortname, "Em28xx Audio");
strcpy(card->longname, "Empia Em28xx Audio");
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index b0fb08337710..3a4fd8514511 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -602,7 +602,7 @@ struct em28xx_board em28xx_boards[] = {
.name = "Gadmei UTV330+",
.tuner_type = TUNER_TNF_5335MF,
.tda9887_conf = TDA9887_PRESENT,
- .ir_codes = &ir_codes_gadmei_rm008z_table,
+ .ir_codes = RC_MAP_GADMEI_RM008Z,
.decoder = EM28XX_SAA711X,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.input = { {
@@ -681,6 +681,20 @@ struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_LINE_IN,
} },
},
+ [EM2860_BOARD_TVP5150_REFERENCE_DESIGN] = {
+ .name = "EM2860/TVP5150 Reference Design",
+ .tuner_type = TUNER_ABSENT, /* Capture only device */
+ .decoder = EM28XX_TVP5150,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
[EM2861_BOARD_PLEXTOR_PX_TV100U] = {
.name = "Plextor ConvertX PX-TV100U",
.tuner_type = TUNER_TNF_5335MF,
@@ -777,7 +791,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -802,7 +816,7 @@ struct em28xx_board em28xx_boards[] = {
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.mts_firmware = 1,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -828,7 +842,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_hauppauge_new_table,
+ .ir_codes = RC_MAP_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -854,7 +868,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_rc5_hauppauge_new_table,
+ .ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -880,7 +894,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_pinnacle_pctv_hd_table,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -906,7 +920,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_ati_tv_wonder_hd_600_table,
+ .ir_codes = RC_MAP_ATI_TV_WONDER_HD_600,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -932,7 +946,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
.dvb_gpio = default_digital,
- .ir_codes = &ir_codes_terratec_cinergy_xs_table,
+ .ir_codes = RC_MAP_TERRATEC_CINERGY_XS,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -1282,7 +1296,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_SAA711X,
.has_dvb = 1,
.dvb_gpio = em2882_kworld_315u_digital,
- .ir_codes = &ir_codes_kworld_315u_table,
+ .ir_codes = RC_MAP_KWORLD_315U,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE,
/* Analog mode - still not ready */
@@ -1404,10 +1418,14 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2882_BOARD_KWORLD_VS_DVBT] = {
.name = "Kworld VS-DVB-T 323UR",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
+ .mts_firmware = 1,
+ .has_dvb = 1,
+ .dvb_gpio = kworld_330u_digital,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
+ .ir_codes = RC_MAP_KWORLD_315U,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1430,7 +1448,7 @@ struct em28xx_board em28xx_boards[] = {
.decoder = EM28XX_TVP5150,
.has_dvb = 1,
.dvb_gpio = hauppauge_wintv_hvr_900_digital,
- .ir_codes = &ir_codes_terratec_cinergy_xs_table,
+ .ir_codes = RC_MAP_TERRATEC_CINERGY_XS,
.xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
@@ -1523,7 +1541,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.decoder = EM28XX_TVP5150,
.tuner_gpio = default_tuner_gpio,
- .ir_codes = &ir_codes_kaiomy_table,
+ .ir_codes = RC_MAP_KAIOMY,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1623,7 +1641,7 @@ struct em28xx_board em28xx_boards[] = {
.mts_firmware = 1,
.has_dvb = 1,
.dvb_gpio = evga_indtube_digital,
- .ir_codes = &ir_codes_evga_indtube_table,
+ .ir_codes = RC_MAP_EVGA_INDTUBE,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
@@ -1672,6 +1690,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2862),
.driver_info = EM2820_BOARD_UNKNOWN },
+ { USB_DEVICE(0xeb1a, 0x2863),
+ .driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2870),
.driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2881),
@@ -1792,6 +1812,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
{0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC},
{0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC},
{0x1ba50080, EM2860_BOARD_SAA711X_REFERENCE_DESIGN, TUNER_ABSENT},
+ {0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT},
{0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
{0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
};
@@ -2138,6 +2159,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC2028_DEFAULT_FIRMWARE;
break;
@@ -2313,21 +2335,21 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
switch (dev->model) {
case EM2800_BOARD_TERRATEC_CINERGY_200:
case EM2820_BOARD_TERRATEC_CINERGY_250:
- dev->init_data.ir_codes = &ir_codes_em_terratec_table;
+ dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
dev->init_data.get_key = em28xx_get_key_terratec;
dev->init_data.name = "i2c IR (EM28XX Terratec)";
break;
case EM2820_BOARD_PINNACLE_USB_2:
- dev->init_data.ir_codes = &ir_codes_pinnacle_grey_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
break;
case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = &ir_codes_rc5_hauppauge_new_table;
+ dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
dev->init_data.get_key = em28xx_get_key_em_haup;
dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
- dev->init_data.ir_codes = &ir_codes_winfast_usbii_deluxe_table;;
+ dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;;
dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
break;
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index a41cc5566778..331e1cac4272 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -782,11 +782,15 @@ int em28xx_resolution_set(struct em28xx *dev)
em28xx_accumulator_set(dev, 1, (width - 4) >> 2, 1, (height - 4) >> 2);
- /* If we don't set the start position to 4 in VBI mode, we end up
- with line 21 being YUYV encoded instead of being in 8-bit
- greyscale */
+ /* If we don't set the start position to 2 in VBI mode, we end up
+ with line 20/21 being YUYV encoded instead of being in 8-bit
+ greyscale. The core of the issue is that line 21 (and line 23 for
+ PAL WSS) are inside of active video region, and as a result they
+ get the pixelformatting associated with that area. So by cropping
+ it out, we end up with the same format as the rest of the VBI
+ region */
if (em28xx_vbi_supported(dev) == 1)
- em28xx_capture_area_set(dev, 0, 4, width >> 2, height >> 2);
+ em28xx_capture_area_set(dev, 0, 2, width >> 2, height >> 2);
else
em28xx_capture_area_set(dev, 0, 0, width >> 2, height >> 2);
@@ -966,7 +970,7 @@ void em28xx_uninit_isoc(struct em28xx *dev)
usb_unlink_urb(urb);
if (dev->isoc_ctl.transfer_buffer[i]) {
- usb_buffer_free(dev->udev,
+ usb_free_coherent(dev->udev,
urb->transfer_buffer_length,
dev->isoc_ctl.transfer_buffer[i],
urb->transfer_dma);
@@ -1041,7 +1045,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
}
dev->isoc_ctl.urb[i] = urb;
- dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->udev,
+ dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev,
sb_size, GFP_KERNEL, &urb->transfer_dma);
if (!dev->isoc_ctl.transfer_buffer[i]) {
em28xx_err("unable to allocate %i bytes for transfer"
@@ -1174,21 +1178,18 @@ void em28xx_add_into_devlist(struct em28xx *dev)
*/
static LIST_HEAD(em28xx_extension_devlist);
-static DEFINE_MUTEX(em28xx_extension_devlist_lock);
int em28xx_register_extension(struct em28xx_ops *ops)
{
struct em28xx *dev = NULL;
mutex_lock(&em28xx_devlist_mutex);
- mutex_lock(&em28xx_extension_devlist_lock);
list_add_tail(&ops->next, &em28xx_extension_devlist);
list_for_each_entry(dev, &em28xx_devlist, devlist) {
if (dev)
ops->init(dev);
}
printk(KERN_INFO "Em28xx: Initialized (%s) extension\n", ops->name);
- mutex_unlock(&em28xx_extension_devlist_lock);
mutex_unlock(&em28xx_devlist_mutex);
return 0;
}
@@ -1204,10 +1205,8 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
ops->fini(dev);
}
- mutex_lock(&em28xx_extension_devlist_lock);
printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
list_del(&ops->next);
- mutex_unlock(&em28xx_extension_devlist_lock);
mutex_unlock(&em28xx_devlist_mutex);
}
EXPORT_SYMBOL(em28xx_unregister_extension);
@@ -1216,26 +1215,26 @@ void em28xx_init_extension(struct em28xx *dev)
{
struct em28xx_ops *ops = NULL;
- mutex_lock(&em28xx_extension_devlist_lock);
+ mutex_lock(&em28xx_devlist_mutex);
if (!list_empty(&em28xx_extension_devlist)) {
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->init)
ops->init(dev);
}
}
- mutex_unlock(&em28xx_extension_devlist_lock);
+ mutex_unlock(&em28xx_devlist_mutex);
}
void em28xx_close_extension(struct em28xx *dev)
{
struct em28xx_ops *ops = NULL;
- mutex_lock(&em28xx_extension_devlist_lock);
+ mutex_lock(&em28xx_devlist_mutex);
if (!list_empty(&em28xx_extension_devlist)) {
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
if (ops->fini)
ops->fini(dev);
}
}
- mutex_unlock(&em28xx_extension_devlist_lock);
+ mutex_unlock(&em28xx_devlist_mutex);
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index bcd3c371009b..cf1d8c3655fc 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -467,6 +467,7 @@ static int dvb_init(struct em28xx *dev)
}
dev->dvb = dvb;
+ mutex_lock(&dev->lock);
em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
/* init frontend */
switch (dev->model) {
@@ -506,6 +507,7 @@ static int dvb_init(struct em28xx *dev)
case EM2880_BOARD_TERRATEC_HYBRID_XS_FR:
case EM2881_BOARD_PINNACLE_HYBRID_PRO:
case EM2882_BOARD_DIKOM_DK300:
+ case EM2882_BOARD_KWORLD_VS_DVBT:
dvb->frontend = dvb_attach(zl10353_attach,
&em28xx_zl10353_xc3028_no_i2c_gate,
&dev->i2c_adap);
@@ -589,15 +591,16 @@ static int dvb_init(struct em28xx *dev)
if (result < 0)
goto out_free;
- em28xx_set_mode(dev, EM28XX_SUSPEND);
em28xx_info("Successfully loaded em28xx-dvb\n");
- return 0;
+ret:
+ em28xx_set_mode(dev, EM28XX_SUSPEND);
+ mutex_unlock(&dev->lock);
+ return result;
out_free:
- em28xx_set_mode(dev, EM28XX_SUSPEND);
kfree(dvb);
dev->dvb = NULL;
- return result;
+ goto ret;
}
static int dvb_fini(struct em28xx *dev)
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 20a0001e8885..5c3fd9411b1f 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -39,6 +39,8 @@ static unsigned int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
+#define MODULE_NAME "em28xx"
+
#define i2cdprintk(fmt, arg...) \
if (ir_debug) { \
printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
@@ -360,14 +362,20 @@ static void em28xx_ir_work(struct work_struct *work)
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
}
-static void em28xx_ir_start(struct em28xx_IR *ir)
+static int em28xx_ir_start(void *priv)
{
+ struct em28xx_IR *ir = priv;
+
INIT_DELAYED_WORK(&ir->work, em28xx_ir_work);
schedule_delayed_work(&ir->work, 0);
+
+ return 0;
}
-static void em28xx_ir_stop(struct em28xx_IR *ir)
+static void em28xx_ir_stop(void *priv)
{
+ struct em28xx_IR *ir = priv;
+
cancel_delayed_work_sync(&ir->work);
}
@@ -380,7 +388,6 @@ int em28xx_ir_change_protocol(void *priv, u64 ir_type)
/* Adjust xclk based o IR table for RC5/NEC tables */
- dev->board.ir_codes->ir_type = IR_TYPE_OTHER;
if (ir_type == IR_TYPE_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
@@ -388,11 +395,9 @@ int em28xx_ir_change_protocol(void *priv, u64 ir_type)
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
ir_config = EM2874_IR_NEC;
ir->full_code = 1;
- } else
+ } else if (ir_type != IR_TYPE_UNKNOWN)
rc = -EINVAL;
- dev->board.ir_codes->ir_type = ir_type;
-
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
EM28XX_XCLK_IR_RC5_MODE);
@@ -443,6 +448,13 @@ int em28xx_ir_init(struct em28xx *dev)
ir->props.allowed_protos = IR_TYPE_RC5 | IR_TYPE_NEC;
ir->props.priv = ir;
ir->props.change_protocol = em28xx_ir_change_protocol;
+ ir->props.open = em28xx_ir_start;
+ ir->props.close = em28xx_ir_stop;
+
+ /* By default, keep protocol field untouched */
+ err = em28xx_ir_change_protocol(ir, IR_TYPE_UNKNOWN);
+ if (err)
+ goto err_out_free;
/* This is how often we ask the chip for IR information */
ir->polling = 100; /* ms */
@@ -455,7 +467,6 @@ int em28xx_ir_init(struct em28xx *dev)
strlcat(ir->phys, "/input0", sizeof(ir->phys));
/* Set IR protocol */
- em28xx_ir_change_protocol(ir, dev->board.ir_codes->ir_type);
err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
if (err < 0)
goto err_out_free;
@@ -470,17 +481,15 @@ int em28xx_ir_init(struct em28xx *dev)
input_dev->dev.parent = &dev->udev->dev;
- em28xx_ir_start(ir);
/* all done */
err = ir_input_register(ir->input, dev->board.ir_codes,
- &ir->props);
+ &ir->props, MODULE_NAME);
if (err)
goto err_out_stop;
return 0;
err_out_stop:
- em28xx_ir_stop(ir);
dev->ir = NULL;
err_out_free:
kfree(ir);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 0fe20110bfd6..20090e34173a 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -203,12 +203,6 @@ static void em28xx_copy_video(struct em28xx *dev,
if (dma_q->pos + len > buf->vb.size)
len = buf->vb.size - dma_q->pos;
- if (p[0] != 0x88 && p[0] != 0x22) {
- em28xx_isocdbg("frame is not complete\n");
- len += 4;
- } else
- p += 4;
-
startread = p;
remain = len;
@@ -309,14 +303,6 @@ static void em28xx_copy_vbi(struct em28xx *dev,
if (dma_q->pos + len > buf->vb.size)
len = buf->vb.size - dma_q->pos;
- if ((p[0] == 0x33 && p[1] == 0x95) ||
- (p[0] == 0x88 && p[1] == 0x88)) {
- /* Header field, advance past it */
- p += 4;
- } else {
- len += 4;
- }
-
startread = p;
startwrite = outp + dma_q->pos;
@@ -507,8 +493,15 @@ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb)
dma_q->pos = 0;
}
- if (buf != NULL)
+ if (buf != NULL) {
+ if (p[0] != 0x88 && p[0] != 0x22) {
+ em28xx_isocdbg("frame is not complete\n");
+ len += 4;
+ } else {
+ p += 4;
+ }
em28xx_copy_video(dev, dma_q, buf, p, outp, len);
+ }
}
return rc;
}
@@ -555,8 +548,7 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
continue;
}
- len = urb->iso_frame_desc[i].actual_length - 4;
-
+ len = urb->iso_frame_desc[i].actual_length;
if (urb->iso_frame_desc[i].actual_length <= 0) {
/* em28xx_isocdbg("packet %d is empty",i); - spammy */
continue;
@@ -577,6 +569,17 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
dev->vbi_read = 0;
em28xx_isocdbg("VBI START HEADER!!!\n");
dev->cur_field = p[2];
+ p += 4;
+ len -= 4;
+ } else if (p[0] == 0x88 && p[1] == 0x88 &&
+ p[2] == 0x88 && p[3] == 0x88) {
+ /* continuation */
+ p += 4;
+ len -= 4;
+ } else if (p[0] == 0x22 && p[1] == 0x5a) {
+ /* start video */
+ p += 4;
+ len -= 4;
}
vbi_size = dev->vbi_width * dev->vbi_height;
@@ -631,9 +634,6 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
if (dev->capture_type == 1) {
dev->capture_type = 2;
- em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2],
- len, (p[2] & 1) ? "odd" : "even");
-
if (dev->progressive || !(dev->cur_field & 1)) {
if (buf != NULL)
buffer_filled(dev, dma_q, buf);
@@ -652,8 +652,25 @@ static inline int em28xx_isoc_copy_vbi(struct em28xx *dev, struct urb *urb)
dma_q->pos = 0;
}
- if (buf != NULL && dev->capture_type == 2)
- em28xx_copy_video(dev, dma_q, buf, p, outp, len);
+
+ if (buf != NULL && dev->capture_type == 2) {
+ if (len > 4 && p[0] == 0x88 && p[1] == 0x88 &&
+ p[2] == 0x88 && p[3] == 0x88) {
+ p += 4;
+ len -= 4;
+ }
+ if (len > 4 && p[0] == 0x22 && p[1] == 0x5a) {
+ em28xx_isocdbg("Video frame %d, len=%i, %s\n",
+ p[2], len, (p[2] & 1) ?
+ "odd" : "even");
+ p += 4;
+ len -= 4;
+ }
+
+ if (len > 0)
+ em28xx_copy_video(dev, dma_q, buf, p, outp,
+ len);
+ }
}
return rc;
}
@@ -1483,6 +1500,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Tuner");
+ t->type = V4L2_TUNER_ANALOG_TV;
mutex_lock(&dev->lock);
v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_tuner, t);
@@ -1814,7 +1832,7 @@ static int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv,
mutex_lock(&dev->lock);
f->fmt.sliced.service_set = 0;
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, g_fmt, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
if (f->fmt.sliced.service_set == 0)
rc = -EINVAL;
@@ -1836,7 +1854,7 @@ static int vidioc_try_set_sliced_vbi_cap(struct file *file, void *priv,
return rc;
mutex_lock(&dev->lock);
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, g_fmt, f);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, vbi, g_sliced_fmt, &f->fmt.sliced);
mutex_unlock(&dev->lock);
if (f->fmt.sliced.service_set == 0)
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index ba6fe5daff84..b252d1b1b2a7 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -68,6 +68,7 @@
#define EM2820_BOARD_HERCULES_SMART_TV_USB2 26
#define EM2820_BOARD_PINNACLE_USB_2_FM1216ME 27
#define EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE 28
+#define EM2860_BOARD_TVP5150_REFERENCE_DESIGN 29
#define EM2820_BOARD_VIDEOLOGY_20K14XUSB 30
#define EM2821_BOARD_USBGEAR_VD204 31
#define EM2821_BOARD_SUPERCOMP_USB_2 32
@@ -140,10 +141,10 @@
#define EM28XX_NUM_BUFS 5
/* number of packets for each buffer
- windows requests only 40 packets .. so we better do the same
+ windows requests only 64 packets .. so we better do the same
this is what I found out for all alternate numbers there!
*/
-#define EM28XX_NUM_PACKETS 40
+#define EM28XX_NUM_PACKETS 64
#define EM28XX_INTERLACED_DEFAULT 1
@@ -411,7 +412,7 @@ struct em28xx_board {
struct em28xx_input input[MAX_EM28XX_INPUT];
struct em28xx_input radio;
- struct ir_scancode_table *ir_codes;
+ char *ir_codes;
};
struct em28xx_eeprom {
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index e6c23d509862..a5cfc76b40b7 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -1713,7 +1713,7 @@ et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -1723,7 +1723,9 @@ et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/font.h b/drivers/media/video/font.h
deleted file mode 100644
index 8b1fecc37599..000000000000
--- a/drivers/media/video/font.h
+++ /dev/null
@@ -1,407 +0,0 @@
-static unsigned char rom8x16_bits[] = {
-/* Character 0 (0x30):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** *** |
- |** **** |
- |**** ** |
- |*** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xce,
-0xde,
-0xf6,
-0xe6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 1 (0x31):
- ht=16, width=8
- +--------+
- | |
- | |
- | ** |
- | **** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ****** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x18,
-0x78,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x18,
-0x7e,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 2 (0x32):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- |** ** |
- |******* |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0x06,
-0x0c,
-0x18,
-0x30,
-0x60,
-0xc6,
-0xfe,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 3 (0x33):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- | ** |
- | ** |
- | **** |
- | ** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0x06,
-0x06,
-0x3c,
-0x06,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 4 (0x34):
- ht=16, width=8
- +--------+
- | |
- | |
- | ** |
- | *** |
- | **** |
- | ** ** |
- |** ** |
- |** ** |
- |******* |
- | ** |
- | ** |
- | **** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x0c,
-0x1c,
-0x3c,
-0x6c,
-0xcc,
-0xcc,
-0xfe,
-0x0c,
-0x0c,
-0x1e,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 5 (0x35):
- ht=16, width=8
- +--------+
- | |
- | |
- |******* |
- |** |
- |** |
- |** |
- |****** |
- | ** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0xfe,
-0xc0,
-0xc0,
-0xc0,
-0xfc,
-0x06,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 6 (0x36):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** |
- |** |
- |****** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc0,
-0xc0,
-0xfc,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 7 (0x37):
- ht=16, width=8
- +--------+
- | |
- | |
- |******* |
- |** ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | ** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0xfe,
-0xc6,
-0x06,
-0x0c,
-0x18,
-0x30,
-0x30,
-0x30,
-0x30,
-0x30,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 8 (0x38):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-
-/* Character 9 (0x39):
- ht=16, width=8
- +--------+
- | |
- | |
- | ***** |
- |** ** |
- |** ** |
- |** ** |
- |** ** |
- | ****** |
- | ** |
- | ** |
- |** ** |
- | ***** |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x7c,
-0xc6,
-0xc6,
-0xc6,
-0xc6,
-0x7e,
-0x06,
-0x06,
-0xc6,
-0x7c,
-0x00,
-0x00,
-0x00,
-0x00,
-/* Character : (0x3a):
- ht=16, width=8
- +--------+
- | |
- | |
- | |
- | |
- | |
- | ** |
- | ** |
- | |
- | |
- | ** |
- | ** |
- | |
- | |
- | |
- | |
- | |
- +--------+ */
-0x00,
-0x00,
-0x00,
-0x00,
-0x00,
-0x0c,
-0x0c,
-0x00,
-0x00,
-0x0c,
-0x0c,
-0x00,
-0x00,
-0x00,
-0x00,
-0x00,
-};
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index e0060c1f0544..5d920e584de7 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -172,12 +172,6 @@ config USB_GSPCA_SN9C20X
To compile this driver as a module, choose M here: the
module will be called gspca_sn9c20x.
-config USB_GSPCA_SN9C20X_EVDEV
- bool "Enable evdev support"
- depends on USB_GSPCA_SN9C20X && INPUT
- ---help---
- Say Y here in order to enable evdev support for sn9c20x webcam button.
-
config USB_GSPCA_SONIXB
tristate "SONIX Bayer USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 43ac4af8d3ed..fce8d9492641 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -117,13 +117,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
- urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
SD_PKT_SZ * SD_NPKT,
GFP_KERNEL,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_buffer_alloc failed");
+ err("usb_alloc_coherent failed");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index 82945ed5cbe5..58b696f455be 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -374,7 +374,7 @@ static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val);
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -861,7 +861,7 @@ static int save_camera_state(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0);
}
-int command_setformat(struct gspca_dev *gspca_dev)
+static int command_setformat(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret;
@@ -878,7 +878,7 @@ int command_setformat(struct gspca_dev *gspca_dev)
sd->params.roi.rowStart, sd->params.roi.rowEnd);
}
-int command_setcolourparams(struct gspca_dev *gspca_dev)
+static int command_setcolourparams(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetColourParams,
@@ -887,7 +887,7 @@ int command_setcolourparams(struct gspca_dev *gspca_dev)
sd->params.colourParams.saturation, 0);
}
-int command_setapcor(struct gspca_dev *gspca_dev)
+static int command_setapcor(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetApcor,
@@ -897,7 +897,7 @@ int command_setapcor(struct gspca_dev *gspca_dev)
sd->params.apcor.gain8);
}
-int command_setvloffset(struct gspca_dev *gspca_dev)
+static int command_setvloffset(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
return do_command(gspca_dev, CPIA_COMMAND_SetVLOffset,
@@ -907,7 +907,7 @@ int command_setvloffset(struct gspca_dev *gspca_dev)
sd->params.vlOffset.gain8);
}
-int command_setexposure(struct gspca_dev *gspca_dev)
+static int command_setexposure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret;
@@ -943,7 +943,7 @@ int command_setexposure(struct gspca_dev *gspca_dev)
return ret;
}
-int command_setcolourbalance(struct gspca_dev *gspca_dev)
+static int command_setcolourbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -973,7 +973,7 @@ int command_setcolourbalance(struct gspca_dev *gspca_dev)
return -EINVAL;
}
-int command_setcompressiontarget(struct gspca_dev *gspca_dev)
+static int command_setcompressiontarget(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -983,7 +983,7 @@ int command_setcompressiontarget(struct gspca_dev *gspca_dev)
sd->params.compressionTarget.targetQ, 0);
}
-int command_setyuvtresh(struct gspca_dev *gspca_dev)
+static int command_setyuvtresh(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -992,7 +992,7 @@ int command_setyuvtresh(struct gspca_dev *gspca_dev)
sd->params.yuvThreshold.uvThreshold, 0, 0);
}
-int command_setcompressionparams(struct gspca_dev *gspca_dev)
+static int command_setcompressionparams(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1009,7 +1009,7 @@ int command_setcompressionparams(struct gspca_dev *gspca_dev)
sd->params.compressionParams.decimationThreshMod);
}
-int command_setcompression(struct gspca_dev *gspca_dev)
+static int command_setcompression(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1018,7 +1018,7 @@ int command_setcompression(struct gspca_dev *gspca_dev)
sd->params.compression.decimation, 0, 0);
}
-int command_setsensorfps(struct gspca_dev *gspca_dev)
+static int command_setsensorfps(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1027,7 +1027,7 @@ int command_setsensorfps(struct gspca_dev *gspca_dev)
sd->params.sensorFps.baserate, 0, 0);
}
-int command_setflickerctrl(struct gspca_dev *gspca_dev)
+static int command_setflickerctrl(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1038,7 +1038,7 @@ int command_setflickerctrl(struct gspca_dev *gspca_dev)
0);
}
-int command_setecptiming(struct gspca_dev *gspca_dev)
+static int command_setecptiming(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1046,12 +1046,12 @@ int command_setecptiming(struct gspca_dev *gspca_dev)
sd->params.ecpTiming, 0, 0, 0);
}
-int command_pause(struct gspca_dev *gspca_dev)
+static int command_pause(struct gspca_dev *gspca_dev)
{
return do_command(gspca_dev, CPIA_COMMAND_EndStreamCap, 0, 0, 0, 0);
}
-int command_resume(struct gspca_dev *gspca_dev)
+static int command_resume(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1059,7 +1059,8 @@ int command_resume(struct gspca_dev *gspca_dev)
0, sd->params.streamStartLine, 0, 0);
}
-int command_setlights(struct gspca_dev *gspca_dev)
+#if 0 /* Currently unused */
+static int command_setlights(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret, p1, p2;
@@ -1078,6 +1079,7 @@ int command_setlights(struct gspca_dev *gspca_dev)
return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0,
p1 | p2 | 0xE0, 0);
}
+#endif
static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
{
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 222af479150b..678675bb3652 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1,7 +1,7 @@
/*
* Main USB camera driver
*
- * Copyright (C) 2008-2009 Jean-Francois Moine (http://moinejf.free.fr)
+ * Copyright (C) 2008-2010 Jean-François Moine <http://moinejf.free.fr>
*
* Camera button input handling by Márton Németh
* Copyright (C) 2009-2010 Márton Németh <nm127@freemail.hu>
@@ -35,12 +35,12 @@
#include <linux/io.h>
#include <asm/page.h>
#include <linux/uaccess.h>
-#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <media/v4l2-ioctl.h>
#include "gspca.h"
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
#include <linux/input.h>
#include <linux/usb/input.h>
#endif
@@ -51,7 +51,7 @@
#error "DEF_NURBS too big"
#endif
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -115,7 +115,7 @@ static const struct vm_operations_struct gspca_vm_ops = {
/*
* Input and interrupt endpoint handling functions
*/
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static void int_irq(struct urb *urb)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
@@ -199,7 +199,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
void *buffer = NULL;
int ret = -EINVAL;
- buffer_len = ep->wMaxPacketSize;
+ buffer_len = le16_to_cpu(ep->wMaxPacketSize);
interval = ep->bInterval;
PDEBUG(D_PROBE, "found int in endpoint: 0x%x, "
"buffer_len=%u, interval=%u",
@@ -213,7 +213,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
goto error;
}
- buffer = usb_buffer_alloc(dev, ep->wMaxPacketSize,
+ buffer = usb_alloc_coherent(dev, buffer_len,
GFP_KERNEL, &urb->transfer_dma);
if (!buffer) {
ret = -ENOMEM;
@@ -232,10 +232,10 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
return ret;
error_submit:
- usb_buffer_free(dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
error_buffer:
usb_free_urb(urb);
error:
@@ -272,17 +272,26 @@ static void gspca_input_destroy_urb(struct gspca_dev *gspca_dev)
if (urb) {
gspca_dev->int_urb = NULL;
usb_kill_urb(urb);
- usb_buffer_free(gspca_dev->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(gspca_dev->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
usb_free_urb(urb);
}
}
#else
-#define gspca_input_connect(gspca_dev) 0
-#define gspca_input_create_urb(gspca_dev)
-#define gspca_input_destroy_urb(gspca_dev)
+static inline void gspca_input_destroy_urb(struct gspca_dev *gspca_dev)
+{
+}
+
+static inline void gspca_input_create_urb(struct gspca_dev *gspca_dev)
+{
+}
+
+static inline int gspca_input_connect(struct gspca_dev *dev)
+{
+ return 0;
+}
#endif
/* get the current input frame buffer */
@@ -442,8 +451,7 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
* is not queued, discard the whole frame */
if (packet_type == FIRST_PACKET) {
frame->data_end = frame->data;
- jiffies_to_timeval(get_jiffies_64(),
- &frame->v4l2_buf.timestamp);
+ frame->v4l2_buf.timestamp = ktime_to_timeval(ktime_get());
frame->v4l2_buf.sequence = ++gspca_dev->sequence;
} else if (gspca_dev->last_packet_type == DISCARD_PACKET) {
if (packet_type == LAST_PACKET)
@@ -597,14 +605,45 @@ static void destroy_urbs(struct gspca_dev *gspca_dev)
gspca_dev->urb[i] = NULL;
usb_kill_urb(urb);
if (urb->transfer_buffer != NULL)
- usb_buffer_free(gspca_dev->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(gspca_dev->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
usb_free_urb(urb);
}
}
+static int gspca_set_alt0(struct gspca_dev *gspca_dev)
+{
+ int ret;
+
+ if (gspca_dev->alt == 0)
+ return 0;
+ ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
+ if (ret < 0)
+ PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
+ return ret;
+}
+
+/* Note: both the queue and the usb locks should be held when calling this */
+static void gspca_stream_off(struct gspca_dev *gspca_dev)
+{
+ gspca_dev->streaming = 0;
+ if (gspca_dev->present) {
+ if (gspca_dev->sd_desc->stopN)
+ gspca_dev->sd_desc->stopN(gspca_dev);
+ destroy_urbs(gspca_dev);
+ gspca_input_destroy_urb(gspca_dev);
+ gspca_set_alt0(gspca_dev);
+ gspca_input_create_urb(gspca_dev);
+ }
+
+ /* always call stop0 to free the subdriver's resources */
+ if (gspca_dev->sd_desc->stop0)
+ gspca_dev->sd_desc->stop0(gspca_dev);
+ PDEBUG(D_STREAM, "stream off OK");
+}
+
/*
* look for an input transfer endpoint in an alternate setting
*/
@@ -721,13 +760,13 @@ static int create_urbs(struct gspca_dev *gspca_dev,
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
- urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
+ urb->transfer_buffer = usb_alloc_coherent(gspca_dev->dev,
bsize,
GFP_KERNEL,
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- err("usb_buffer_alloc failed");
+ err("usb_alloc_coherent failed");
return -ENOMEM;
}
urb->dev = gspca_dev->dev;
@@ -830,8 +869,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
}
if (ret >= 0)
break;
- gspca_dev->streaming = 0;
- destroy_urbs(gspca_dev);
+ gspca_stream_off(gspca_dev);
if (ret != -ENOSPC) {
PDEBUG(D_ERR|D_STREAM,
"usb_submit_urb alt %d err %d",
@@ -861,37 +899,6 @@ out:
return ret;
}
-static int gspca_set_alt0(struct gspca_dev *gspca_dev)
-{
- int ret;
-
- if (gspca_dev->alt == 0)
- return 0;
- ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
- if (ret < 0)
- PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
- return ret;
-}
-
-/* Note: both the queue and the usb locks should be held when calling this */
-static void gspca_stream_off(struct gspca_dev *gspca_dev)
-{
- gspca_dev->streaming = 0;
- if (gspca_dev->present) {
- if (gspca_dev->sd_desc->stopN)
- gspca_dev->sd_desc->stopN(gspca_dev);
- destroy_urbs(gspca_dev);
- gspca_input_destroy_urb(gspca_dev);
- gspca_set_alt0(gspca_dev);
- gspca_input_create_urb(gspca_dev);
- }
-
- /* always call stop0 to free the subdriver's resources */
- if (gspca_dev->sd_desc->stop0)
- gspca_dev->sd_desc->stop0(gspca_dev);
- PDEBUG(D_STREAM, "stream off OK");
-}
-
static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
{
int i;
@@ -1495,7 +1502,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct gspca_dev *gspca_dev = priv;
- int i, ret = 0;
+ int i, ret = 0, streaming;
switch (rb->memory) {
case GSPCA_MEMORY_READ: /* (internal call) */
@@ -1530,7 +1537,8 @@ static int vidioc_reqbufs(struct file *file, void *priv,
}
/* stop streaming */
- if (gspca_dev->streaming) {
+ streaming = gspca_dev->streaming;
+ if (streaming) {
mutex_lock(&gspca_dev->usb_lock);
gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
@@ -1549,6 +1557,8 @@ static int vidioc_reqbufs(struct file *file, void *priv,
if (ret == 0) {
rb->count = gspca_dev->nframes;
gspca_dev->capt_file = file;
+ if (streaming)
+ ret = gspca_init_transfer(gspca_dev);
}
out:
mutex_unlock(&gspca_dev->queue_lock);
@@ -1582,6 +1592,12 @@ static int vidioc_streamon(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ /* check the capture file */
+ if (gspca_dev->capt_file != file) {
+ ret = -EBUSY;
+ goto out;
+ }
+
if (gspca_dev->nframes == 0
|| !(gspca_dev->frame[0].v4l2_buf.flags & V4L2_BUF_FLAG_QUEUED)) {
ret = -EINVAL;
@@ -1619,6 +1635,12 @@ static int vidioc_streamoff(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
+ /* check the capture file */
+ if (gspca_dev->capt_file != file) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* stop streaming */
if (mutex_lock_interruptible(&gspca_dev->usb_lock)) {
ret = -ERESTARTSYS;
@@ -2124,7 +2146,7 @@ static ssize_t dev_read(struct file *file, char __user *data,
}
/* get a frame */
- jiffies_to_timeval(get_jiffies_64(), &timestamp);
+ timestamp = ktime_to_timeval(ktime_get());
timestamp.tv_sec--;
n = 2;
for (;;) {
@@ -2315,7 +2337,7 @@ int gspca_dev_probe(struct usb_interface *intf,
return 0;
out:
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
if (gspca_dev->input_dev)
input_unregister_device(gspca_dev->input_dev);
#endif
@@ -2334,7 +2356,7 @@ EXPORT_SYMBOL(gspca_dev_probe);
void gspca_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
#endif
@@ -2348,7 +2370,7 @@ void gspca_disconnect(struct usb_interface *intf)
wake_up_interruptible(&gspca_dev->wq);
}
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
gspca_input_destroy_urb(gspca_dev);
input_dev = gspca_dev->input_dev;
if (input_dev) {
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 8bb242fb79de..8b963dfae861 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -130,7 +130,7 @@ struct sd_desc {
cam_reg_op get_register;
#endif
cam_ident_op get_chip_ident;
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
cam_int_pkt_op int_pkt_scan;
/* other_input makes the gspca core create gspca_dev->input even when
int_pkt_scan is NULL, for cams with non interrupt driven buttons */
@@ -158,7 +158,7 @@ struct gspca_dev {
struct module *module; /* subdriver handling the device */
struct usb_device *dev;
struct file *capt_file; /* file doing video capture */
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
char phys[64]; /* physical device path */
#endif
@@ -171,7 +171,7 @@ struct gspca_dev {
#define USB_BUF_SZ 64
__u8 *usb_buf; /* buffer for USB exchanges */
struct urb *urb[MAX_NURBS];
-#ifdef CONFIG_INPUT
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct urb *int_urb;
#endif
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 957e05e2d08f..dc1e4efe30fb 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -10,8 +10,8 @@
* https://jim.sh/svn/jim/devl/playstation/ps3/eye/test/
*
* PS3 Eye camera enhanced by Richard Kaswy http://kaswy.free.fr
- * PS3 Eye camera, brightness, contrast, hue, AWB control added
- * by Max Thrun <bear24rw@gmail.com>
+ * PS3 Eye camera - brightness, contrast, awb, agc, aec controls
+ * added by Max Thrun <bear24rw@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,15 +60,13 @@ struct sd {
u8 contrast;
u8 gain;
u8 exposure;
- u8 redblc;
- u8 blueblc;
- u8 hue;
- u8 autogain;
+ u8 agc;
u8 awb;
+ u8 aec;
s8 sharpness;
u8 hflip;
u8 vflip;
-
+ u8 freqfltr;
};
/* V4L2 controls supported by the driver */
@@ -76,197 +74,183 @@ static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu);
static const struct ctrl sd_ctrls[] = {
- { /* 0 */
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 20
- .default_value = BRIGHTNESS_DEF,
+ { /* 0 */
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+#define BRIGHTNESS_DEF 0
+ .default_value = BRIGHTNESS_DEF,
+ },
+ .set = sd_setbrightness,
+ .get = sd_getbrightness,
},
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- { /* 1 */
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define CONTRAST_DEF 37
- .default_value = CONTRAST_DEF,
+ { /* 1 */
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+#define CONTRAST_DEF 32
+ .default_value = CONTRAST_DEF,
+ },
+ .set = sd_setcontrast,
+ .get = sd_getcontrast,
},
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- { /* 2 */
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Main Gain",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
+ { /* 2 */
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Main Gain",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
#define GAIN_DEF 20
- .default_value = GAIN_DEF,
+ .default_value = GAIN_DEF,
+ },
+ .set = sd_setgain,
+ .get = sd_getgain,
},
- .set = sd_setgain,
- .get = sd_getgain,
- },
- { /* 3 */
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
+ { /* 3 */
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
#define EXPO_DEF 120
- .default_value = EXPO_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- { /* 4 */
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define RED_BALANCE_DEF 128
- .default_value = RED_BALANCE_DEF,
+ .default_value = EXPO_DEF,
+ },
+ .set = sd_setexposure,
+ .get = sd_getexposure,
},
- .set = sd_setredblc,
- .get = sd_getredblc,
- },
- { /* 5 */
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BLUE_BALANCE_DEF 128
- .default_value = BLUE_BALANCE_DEF,
+ { /* 4 */
+ {
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Gain",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AGC_DEF 1
+ .default_value = AGC_DEF,
+ },
+ .set = sd_setagc,
+ .get = sd_getagc,
},
- .set = sd_setblueblc,
- .get = sd_getblueblc,
- },
- { /* 6 */
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define HUE_DEF 143
- .default_value = HUE_DEF,
+#define AWB_IDX 5
+ { /* 5 */
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AWB_DEF 1
+ .default_value = AWB_DEF,
+ },
+ .set = sd_setawb,
+ .get = sd_getawb,
},
- .set = sd_sethue,
- .get = sd_gethue,
- },
- { /* 7 */
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Autogain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 0
- .default_value = AUTOGAIN_DEF,
+ { /* 6 */
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AEC_DEF 1
+ .default_value = AEC_DEF,
+ },
+ .set = sd_setaec,
+ .get = sd_getaec,
},
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-#define AWB_IDX 8
- { /* 8 */
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AWB_DEF 0
- .default_value = AWB_DEF,
- },
- .set = sd_setawb,
- .get = sd_getawb,
- },
- { /* 9 */
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
+ { /* 7 */
+ {
+ .id = V4L2_CID_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
#define SHARPNESS_DEF 0
- .default_value = SHARPNESS_DEF,
+ .default_value = SHARPNESS_DEF,
+ },
+ .set = sd_setsharpness,
+ .get = sd_getsharpness,
},
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
- { /* 10 */
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "HFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
+ { /* 8 */
+ {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "HFlip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
+ .default_value = HFLIP_DEF,
+ },
+ .set = sd_sethflip,
+ .get = sd_gethflip,
},
- .set = sd_sethflip,
- .get = sd_gethflip,
- },
- { /* 11 */
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "VFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
+ { /* 9 */
+ {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "VFlip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
+ .default_value = VFLIP_DEF,
+ },
+ .set = sd_setvflip,
+ .get = sd_getvflip,
+ },
+ { /* 10 */
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light Frequency Filter",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define FREQFLTR_DEF 0
+ .default_value = FREQFLTR_DEF,
+ },
+ .set = sd_setfreqfltr,
+ .get = sd_getfreqfltr,
},
- .set = sd_setvflip,
- .get = sd_getvflip,
- },
};
static const struct v4l2_pix_format ov772x_mode[] = {
@@ -675,14 +659,14 @@ static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x9B, sd->brightness);
+ sccb_reg_write(gspca_dev, 0x9b, sd->brightness);
}
static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x9C, sd->contrast);
+ sccb_reg_write(gspca_dev, 0x9c, sd->contrast);
}
static void setgain(struct gspca_dev *gspca_dev)
@@ -690,6 +674,9 @@ static void setgain(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (sd->agc)
+ return;
+
val = sd->gain;
switch (val & 0x30) {
case 0x00:
@@ -717,55 +704,68 @@ static void setexposure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
+ if (sd->aec)
+ return;
+
+ /* 'val' is one byte and represents half of the exposure value we are
+ * going to set into registers, a two bytes value:
+ *
+ * MSB: ((u16) val << 1) >> 8 == val >> 7
+ * LSB: ((u16) val << 1) & 0xff == val << 1
+ */
val = sd->exposure;
sccb_reg_write(gspca_dev, 0x08, val >> 7);
sccb_reg_write(gspca_dev, 0x10, val << 1);
}
-static void setredblc(struct gspca_dev *gspca_dev)
+static void setagc(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- sccb_reg_write(gspca_dev, 0x43, sd->redblc);
-}
-
-static void setblueblc(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sccb_reg_write(gspca_dev, 0x42, sd->blueblc);
-}
-
-static void sethue(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (sd->agc) {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x04);
+ sccb_reg_write(gspca_dev, 0x64,
+ sccb_reg_read(gspca_dev, 0x64) | 0x03);
+ } else {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x04);
+ sccb_reg_write(gspca_dev, 0x64,
+ sccb_reg_read(gspca_dev, 0x64) & ~0x03);
- sccb_reg_write(gspca_dev, 0x01, sd->hue);
+ setgain(gspca_dev);
+ }
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setawb(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->autogain) {
- sccb_reg_write(gspca_dev, 0x13, 0xf7); /* AGC,AEC,AWB ON */
- sccb_reg_write(gspca_dev, 0x64,
- sccb_reg_read(gspca_dev, 0x64) | 0x03);
+ if (sd->awb) {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x02);
+ sccb_reg_write(gspca_dev, 0x63,
+ sccb_reg_read(gspca_dev, 0x63) | 0xc0);
} else {
- sccb_reg_write(gspca_dev, 0x13, 0xf0); /* AGC,AEC,AWB OFF */
- sccb_reg_write(gspca_dev, 0x64,
- sccb_reg_read(gspca_dev, 0x64) & 0xfc);
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x02);
+ sccb_reg_write(gspca_dev, 0x63,
+ sccb_reg_read(gspca_dev, 0x63) & ~0xc0);
}
}
-static void setawb(struct gspca_dev *gspca_dev)
+static void setaec(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->awb)
- sccb_reg_write(gspca_dev, 0x63, 0xe0); /* AWB on */
- else
- sccb_reg_write(gspca_dev, 0x63, 0xaa); /* AWB off */
+ if (sd->aec)
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) | 0x01);
+ else {
+ sccb_reg_write(gspca_dev, 0x13,
+ sccb_reg_read(gspca_dev, 0x13) & ~0x01);
+ setexposure(gspca_dev);
+ }
}
static void setsharpness(struct gspca_dev *gspca_dev)
@@ -774,8 +774,8 @@ static void setsharpness(struct gspca_dev *gspca_dev)
u8 val;
val = sd->sharpness;
- sccb_reg_write(gspca_dev, 0x91, val); /* vga noise */
- sccb_reg_write(gspca_dev, 0x8e, val); /* qvga noise */
+ sccb_reg_write(gspca_dev, 0x91, val); /* Auto de-noise threshold */
+ sccb_reg_write(gspca_dev, 0x8e, val); /* De-noise threshold */
}
static void sethflip(struct gspca_dev *gspca_dev)
@@ -787,7 +787,7 @@ static void sethflip(struct gspca_dev *gspca_dev)
sccb_reg_read(gspca_dev, 0x0c) | 0x40);
else
sccb_reg_write(gspca_dev, 0x0c,
- sccb_reg_read(gspca_dev, 0x0c) & 0xbf);
+ sccb_reg_read(gspca_dev, 0x0c) & ~0x40);
}
static void setvflip(struct gspca_dev *gspca_dev)
@@ -799,9 +799,20 @@ static void setvflip(struct gspca_dev *gspca_dev)
sccb_reg_read(gspca_dev, 0x0c) | 0x80);
else
sccb_reg_write(gspca_dev, 0x0c,
- sccb_reg_read(gspca_dev, 0x0c) & 0x7f);
+ sccb_reg_read(gspca_dev, 0x0c) & ~0x80);
}
+static void setfreqfltr(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->freqfltr == 0)
+ sccb_reg_write(gspca_dev, 0x2b, 0x00);
+ else
+ sccb_reg_write(gspca_dev, 0x2b, 0x9e);
+}
+
+
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
@@ -825,26 +836,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->contrast = CONTRAST_DEF;
sd->gain = GAIN_DEF;
sd->exposure = EXPO_DEF;
- sd->redblc = RED_BALANCE_DEF;
- sd->blueblc = BLUE_BALANCE_DEF;
- sd->hue = HUE_DEF;
-#if AUTOGAIN_DEF != 0
- sd->autogain = AUTOGAIN_DEF;
+#if AGC_DEF != 0
+ sd->agc = AGC_DEF;
#else
gspca_dev->ctrl_inac |= (1 << AWB_IDX);
#endif
-#if AWB_DEF != 0
- sd->awb = AWB_DEF
-#endif
-#if SHARPNESS_DEF != 0
+ sd->awb = AWB_DEF;
+ sd->aec = AEC_DEF;
sd->sharpness = SHARPNESS_DEF;
-#endif
-#if HFLIP_DEF != 0
sd->hflip = HFLIP_DEF;
-#endif
-#if VFLIP_DEF != 0
sd->vflip = VFLIP_DEF;
-#endif
+ sd->freqfltr = FREQFLTR_DEF;
return 0;
}
@@ -904,18 +906,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
set_frame_rate(gspca_dev);
- setautogain(gspca_dev);
+ setagc(gspca_dev);
setawb(gspca_dev);
+ setaec(gspca_dev);
setgain(gspca_dev);
- setredblc(gspca_dev);
- setblueblc(gspca_dev);
- sethue(gspca_dev);
setexposure(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setsharpness(gspca_dev);
setvflip(gspca_dev);
sethflip(gspca_dev);
+ setfreqfltr(gspca_dev);
ov534_set_led(gspca_dev, 1);
ov534_reg_write(gspca_dev, 0xe0, 0x00);
@@ -1092,65 +1093,11 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
-static int sd_setredblc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->redblc = val;
- if (gspca_dev->streaming)
- setredblc(gspca_dev);
- return 0;
-}
-
-static int sd_getredblc(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->redblc;
- return 0;
-}
-
-static int sd_setblueblc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->blueblc = val;
- if (gspca_dev->streaming)
- setblueblc(gspca_dev);
- return 0;
-}
-
-static int sd_getblueblc(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->blueblc;
- return 0;
-}
-
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hue = val;
- if (gspca_dev->streaming)
- sethue(gspca_dev);
- return 0;
-}
-
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hue;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
+ sd->agc = val;
if (gspca_dev->streaming) {
@@ -1160,16 +1107,16 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
gspca_dev->ctrl_inac &= ~(1 << AWB_IDX);
else
gspca_dev->ctrl_inac |= (1 << AWB_IDX);
- setautogain(gspca_dev);
+ setagc(gspca_dev);
}
return 0;
}
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_getagc(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->autogain;
+ *val = sd->agc;
return 0;
}
@@ -1191,6 +1138,24 @@ static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setaec(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->aec = val;
+ if (gspca_dev->streaming)
+ setaec(gspca_dev);
+ return 0;
+}
+
+static int sd_getaec(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->aec;
+ return 0;
+}
+
static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1245,6 +1210,43 @@ static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
return 0;
}
+static int sd_setfreqfltr(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freqfltr = val;
+ if (gspca_dev->streaming)
+ setfreqfltr(gspca_dev);
+ return 0;
+}
+
+static int sd_getfreqfltr(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freqfltr;
+ return 0;
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "Disabled");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
/* get stream parameters (framerate) */
static int sd_get_streamparm(struct gspca_dev *gspca_dev,
struct v4l2_streamparm *parm)
@@ -1296,6 +1298,7 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
.get_streamparm = sd_get_streamparm,
.set_streamparm = sd_set_streamparm,
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 0c87c3490b1e..a40f8893310d 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -99,7 +99,7 @@ static const struct ctrl sd_ctrls[] = {
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
+ .name = "Exposure",
.minimum = PAC207_EXPOSURE_MIN,
.maximum = PAC207_EXPOSURE_MAX,
.step = 1,
@@ -130,7 +130,7 @@ static const struct ctrl sd_ctrls[] = {
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
+ .name = "Gain",
.minimum = PAC207_GAIN_MIN,
.maximum = PAC207_GAIN_MAX,
.step = 1,
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index dda5fd4aa69e..71d9447a7986 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -39,7 +39,7 @@ struct init_command {
};
/* V4L2 controls supported by the driver */
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
};
/* How to change the resolution of any of the VGA cams is unknown */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 3dee3e5844b6..644a7fd4701a 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -18,10 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <linux/usb/input.h>
+#ifdef CONFIG_INPUT
#include <linux/input.h>
#include <linux/slab.h>
#endif
@@ -30,6 +27,7 @@
#include "jpeg.h"
#include <media/v4l2-chip-ident.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, "
"microdia project <microdia@googlegroups.com>");
@@ -52,9 +50,15 @@ MODULE_LICENSE("GPL");
#define SENSOR_MT9V112 7
#define SENSOR_MT9M001 8
#define SENSOR_MT9M111 9
-#define SENSOR_HV7131R 10
+#define SENSOR_MT9M112 10
+#define SENSOR_HV7131R 11
#define SENSOR_MT9VPRB 20
+/* camera flags */
+#define HAS_NO_BUTTON 0x1
+#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
+#define FLIP_DETECT 0x4
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev;
@@ -88,11 +92,7 @@ struct sd {
u8 *jpeg_hdr;
u8 quality;
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- struct input_dev *input_dev;
- u8 input_gpio;
- struct task_struct *input_task;
-#endif
+ u8 flags;
};
struct i2c_reg_u8 {
@@ -130,6 +130,39 @@ static int sd_getexposure(struct gspca_dev *gspca_dev, s32 *val);
static int sd_setautoexposure(struct gspca_dev *gspca_dev, s32 val);
static int sd_getautoexposure(struct gspca_dev *gspca_dev, s32 *val);
+static const struct dmi_system_id flip_dmi_table[] = {
+ {
+ .ident = "MSI MS-1034",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1034"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
+ }
+ },
+ {
+ .ident = "MSI MS-1632",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1632")
+ }
+ },
+ {
+ .ident = "MSI MS-1635X",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1635X")
+ }
+ },
+ {
+ .ident = "ASUSTeK W7J",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "W7J ")
+ }
+ },
+ {}
+};
+
static const struct ctrl sd_ctrls[] = {
{
#define BRIGHTNESS_IDX 0
@@ -713,6 +746,7 @@ static u16 i2c_ident[] = {
V4L2_IDENT_MT9V112,
V4L2_IDENT_MT9M001C12ST,
V4L2_IDENT_MT9M111,
+ V4L2_IDENT_MT9M112,
V4L2_IDENT_HV7131R,
};
@@ -735,7 +769,8 @@ static u16 bridge_init[][2] = {
{0x11be, 0xf0}, {0x11bf, 0x00}, {0x118c, 0x1f},
{0x118d, 0x1f}, {0x118e, 0x1f}, {0x118f, 0x1f},
{0x1180, 0x01}, {0x1181, 0x00}, {0x1182, 0x01},
- {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80}
+ {0x1183, 0x00}, {0x1184, 0x50}, {0x1185, 0x80},
+ {0x1007, 0x00}
};
/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */
@@ -914,40 +949,30 @@ static struct i2c_reg_u8 ov9650_init[] = {
};
static struct i2c_reg_u8 ov9655_init[] = {
- {0x12, 0x80}, {0x12, 0x01}, {0x0d, 0x00}, {0x0e, 0x61},
- {0x11, 0x80}, {0x13, 0xba}, {0x14, 0x2e}, {0x16, 0x24},
- {0x1e, 0x04}, {0x1e, 0x04}, {0x1e, 0x04}, {0x27, 0x08},
- {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x32, 0xbf},
- {0x34, 0x3d}, {0x35, 0x00}, {0x36, 0xf8}, {0x38, 0x12},
- {0x39, 0x57}, {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c},
- {0x3d, 0x19}, {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40},
- {0x42, 0x80}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a},
- {0x48, 0x3c}, {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc},
- {0x4d, 0xdc}, {0x4e, 0xdc}, {0x69, 0x02}, {0x6c, 0x04},
- {0x6f, 0x9e}, {0x70, 0x05}, {0x71, 0x78}, {0x77, 0x02},
- {0x8a, 0x23}, {0x8c, 0x0d}, {0x90, 0x7e}, {0x91, 0x7c},
- {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68}, {0xa6, 0x60},
- {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92}, {0xab, 0x04},
- {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80},
- {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00}, {0xb6, 0xaf},
- {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44}, {0xbe, 0x3b},
- {0xbf, 0x3a}, {0xc0, 0xe2}, {0xc1, 0xc8}, {0xc2, 0x01},
+ {0x12, 0x80}, {0x0e, 0x61}, {0x11, 0x80}, {0x13, 0xba},
+ {0x14, 0x2e}, {0x16, 0x24}, {0x1e, 0x04}, {0x27, 0x08},
+ {0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x34, 0x3d},
+ {0x35, 0x00}, {0x38, 0x12}, {0x0f, 0x42}, {0x39, 0x57},
+ {0x3a, 0x00}, {0x3b, 0xcc}, {0x3c, 0x0c}, {0x3d, 0x19},
+ {0x3e, 0x0c}, {0x3f, 0x01}, {0x41, 0x40}, {0x42, 0x80},
+ {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, {0x48, 0x3c},
+ {0x4a, 0xf0}, {0x4b, 0xdc}, {0x4c, 0xdc}, {0x4d, 0xdc},
+ {0x4e, 0xdc}, {0x6c, 0x04}, {0x6f, 0x9e}, {0x70, 0x05},
+ {0x71, 0x78}, {0x77, 0x02}, {0x8a, 0x23}, {0x90, 0x7e},
+ {0x91, 0x7c}, {0x9f, 0x6e}, {0xa0, 0x6e}, {0xa5, 0x68},
+ {0xa6, 0x60}, {0xa8, 0xc1}, {0xa9, 0xfa}, {0xaa, 0x92},
+ {0xab, 0x04}, {0xac, 0x80}, {0xad, 0x80}, {0xae, 0x80},
+ {0xaf, 0x80}, {0xb2, 0xf2}, {0xb3, 0x20}, {0xb5, 0x00},
+ {0xb6, 0xaf}, {0xbb, 0xae}, {0xbc, 0x44}, {0xbd, 0x44},
+ {0xbe, 0x3b}, {0xbf, 0x3a}, {0xc1, 0xc8}, {0xc2, 0x01},
{0xc4, 0x00}, {0xc6, 0x85}, {0xc7, 0x81}, {0xc9, 0xe0},
- {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x12, 0x61},
+ {0xca, 0xe8}, {0xcc, 0xd8}, {0xcd, 0x93}, {0x2d, 0x00},
+ {0x2e, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x12, 0x61},
{0x36, 0xfa}, {0x8c, 0x8d}, {0xc0, 0xaa}, {0x69, 0x0a},
- {0x03, 0x12}, {0x17, 0x14}, {0x18, 0x00}, {0x19, 0x01},
- {0x1a, 0x3d}, {0x32, 0xbf}, {0x11, 0x80}, {0x2a, 0x10},
- {0x2b, 0x0a}, {0x92, 0x00}, {0x93, 0x00}, {0x1e, 0x04},
- {0x1e, 0x04}, {0x10, 0x7c}, {0x04, 0x03}, {0xa1, 0x00},
- {0x2d, 0x00}, {0x2e, 0x00}, {0x00, 0x00}, {0x01, 0x80},
- {0x02, 0x80}, {0x12, 0x61}, {0x36, 0xfa}, {0x8c, 0x8d},
- {0xc0, 0xaa}, {0x69, 0x0a}, {0x03, 0x12}, {0x17, 0x14},
- {0x18, 0x00}, {0x19, 0x01}, {0x1a, 0x3d}, {0x32, 0xbf},
- {0x11, 0x80}, {0x2a, 0x10}, {0x2b, 0x0a}, {0x92, 0x00},
- {0x93, 0x00}, {0x04, 0x01}, {0x10, 0x1f}, {0xa1, 0x00},
- {0x00, 0x0a}, {0xa1, 0x00}, {0x10, 0x5d}, {0x04, 0x03},
- {0x00, 0x01}, {0xa1, 0x00}, {0x10, 0x7c}, {0x04, 0x03},
- {0x00, 0x03}, {0x00, 0x0a}, {0x00, 0x10}, {0x00, 0x13},
+ {0x03, 0x09}, {0x17, 0x16}, {0x18, 0x6e}, {0x19, 0x01},
+ {0x1a, 0x3e}, {0x32, 0x09}, {0x2a, 0x10}, {0x2b, 0x0a},
+ {0x92, 0x00}, {0x93, 0x00}, {0xa1, 0x00}, {0x10, 0x7c},
+ {0x04, 0x03}, {0x00, 0x13},
};
static struct i2c_reg_u16 mt9v112_init[] = {
@@ -971,29 +996,12 @@ static struct i2c_reg_u16 mt9v112_init[] = {
static struct i2c_reg_u16 mt9v111_init[] = {
{0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000},
- {0x01, 0x0001}, {0x02, 0x0016}, {0x03, 0x01e1},
- {0x04, 0x0281}, {0x05, 0x0004}, {0x07, 0x3002},
- {0x21, 0x0000}, {0x25, 0x4024}, {0x26, 0xff03},
- {0x27, 0xff10}, {0x2b, 0x7828}, {0x2c, 0xb43c},
- {0x2d, 0xf0a0}, {0x2e, 0x0c64}, {0x2f, 0x0064},
- {0x67, 0x4010}, {0x06, 0x301e}, {0x08, 0x0480},
- {0x01, 0x0004}, {0x02, 0x0016}, {0x03, 0x01e6},
- {0x04, 0x0286}, {0x05, 0x0004}, {0x06, 0x0000},
- {0x07, 0x3002}, {0x08, 0x0008}, {0x0c, 0x0000},
- {0x0d, 0x0000}, {0x0e, 0x0000}, {0x0f, 0x0000},
- {0x10, 0x0000}, {0x11, 0x0000}, {0x12, 0x00b0},
- {0x13, 0x007c}, {0x14, 0x0000}, {0x15, 0x0000},
- {0x16, 0x0000}, {0x17, 0x0000}, {0x18, 0x0000},
- {0x19, 0x0000}, {0x1a, 0x0000}, {0x1b, 0x0000},
- {0x1c, 0x0000}, {0x1d, 0x0000}, {0x30, 0x0000},
- {0x30, 0x0005}, {0x31, 0x0000}, {0x02, 0x0016},
- {0x03, 0x01e1}, {0x04, 0x0281}, {0x05, 0x0004},
- {0x06, 0x0000}, {0x07, 0x3002}, {0x06, 0x002d},
- {0x05, 0x0004}, {0x09, 0x0064}, {0x2b, 0x00a0},
- {0x2c, 0x00a0}, {0x2d, 0x00a0}, {0x2e, 0x00a0},
- {0x02, 0x0016}, {0x03, 0x01e1}, {0x04, 0x0281},
- {0x05, 0x0004}, {0x06, 0x002d}, {0x07, 0x3002},
- {0x0e, 0x0008}, {0x06, 0x002d}, {0x05, 0x0004},
+ {0x01, 0x0001}, {0x05, 0x0004}, {0x2d, 0xe0a0},
+ {0x2e, 0x0c64}, {0x2f, 0x0064}, {0x06, 0x600e},
+ {0x08, 0x0480}, {0x01, 0x0004}, {0x02, 0x0016},
+ {0x03, 0x01e7}, {0x04, 0x0287}, {0x05, 0x0004},
+ {0x06, 0x002d}, {0x07, 0x3002}, {0x08, 0x0008},
+ {0x0e, 0x0008}, {0x20, 0x0000}
};
static struct i2c_reg_u16 mt9v011_init[] = {
@@ -1043,6 +1051,13 @@ static struct i2c_reg_u16 mt9m111_init[] = {
{0xf0, 0x0000},
};
+static struct i2c_reg_u16 mt9m112_init[] = {
+ {0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008},
+ {0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300},
+ {0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e},
+ {0xf0, 0x0000},
+};
+
static struct i2c_reg_u8 hv7131r_init[] = {
{0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08},
{0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0},
@@ -1240,8 +1255,8 @@ static int ov9655_init_sensor(struct gspca_dev *gspca_dev)
}
/* disable hflip and vflip */
gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX);
- sd->hstart = 0;
- sd->vstart = 7;
+ sd->hstart = 1;
+ sd->vstart = 2;
return 0;
}
@@ -1337,6 +1352,7 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
}
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V111;
@@ -1369,6 +1385,23 @@ static int mt9v_init_sensor(struct gspca_dev *gspca_dev)
return -ENODEV;
}
+static int mt9m112_init_sensor(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(mt9m112_init); i++) {
+ if (i2c_w2(gspca_dev, mt9m112_init[i].reg,
+ mt9m112_init[i].val) < 0) {
+ err("MT9M112 sensor initialization failed");
+ return -ENODEV;
+ }
+ }
+ gspca_dev->ctrl_dis = (1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX) | (1 << GAIN_IDX);
+ sd->hstart = 0;
+ sd->vstart = 2;
+ return 0;
+}
+
static int mt9m111_init_sensor(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1421,87 +1454,6 @@ static int hv7131r_init_sensor(struct gspca_dev *gspca_dev)
return 0;
}
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
-static int input_kthread(void *data)
-{
- struct gspca_dev *gspca_dev = (struct gspca_dev *)data;
- struct sd *sd = (struct sd *) gspca_dev;
-
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait);
- set_freezable();
- for (;;) {
- if (kthread_should_stop())
- break;
-
- if (reg_r(gspca_dev, 0x1005, 1) < 0)
- continue;
-
- input_report_key(sd->input_dev,
- KEY_CAMERA,
- gspca_dev->usb_buf[0] & sd->input_gpio);
- input_sync(sd->input_dev);
-
- wait_event_freezable_timeout(wait,
- kthread_should_stop(),
- msecs_to_jiffies(100));
- }
- return 0;
-}
-
-
-static int sn9c20x_input_init(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- if (sd->input_gpio == 0)
- return 0;
-
- sd->input_dev = input_allocate_device();
- if (!sd->input_dev)
- return -ENOMEM;
-
- sd->input_dev->name = "SN9C20X Webcam";
-
- sd->input_dev->phys = kasprintf(GFP_KERNEL, "usb-%s-%s",
- gspca_dev->dev->bus->bus_name,
- gspca_dev->dev->devpath);
-
- if (!sd->input_dev->phys)
- return -ENOMEM;
-
- usb_to_input_id(gspca_dev->dev, &sd->input_dev->id);
- sd->input_dev->dev.parent = &gspca_dev->dev->dev;
-
- set_bit(EV_KEY, sd->input_dev->evbit);
- set_bit(KEY_CAMERA, sd->input_dev->keybit);
-
- if (input_register_device(sd->input_dev))
- return -EINVAL;
-
- sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%s-%s",
- gspca_dev->dev->bus->bus_name,
- gspca_dev->dev->devpath);
-
- if (IS_ERR(sd->input_task))
- return -EINVAL;
-
- return 0;
-}
-
-static void sn9c20x_input_cleanup(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- if (sd->input_task != NULL && !IS_ERR(sd->input_task))
- kthread_stop(sd->input_task);
-
- if (sd->input_dev != NULL) {
- input_unregister_device(sd->input_dev);
- kfree(sd->input_dev->phys);
- input_free_device(sd->input_dev);
- sd->input_dev = NULL;
- }
-}
-#endif
-
static int set_cmatrix(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1579,17 +1531,26 @@ static int set_redblue(struct gspca_dev *gspca_dev)
static int set_hvflip(struct gspca_dev *gspca_dev)
{
- u8 value, tslb;
+ u8 value, tslb, hflip, vflip;
u16 value2;
struct sd *sd = (struct sd *) gspca_dev;
+
+ if ((sd->flags & FLIP_DETECT) && dmi_check_system(flip_dmi_table)) {
+ hflip = !sd->hflip;
+ vflip = !sd->vflip;
+ } else {
+ hflip = sd->hflip;
+ vflip = sd->vflip;
+ }
+
switch (sd->sensor) {
case SENSOR_OV9650:
i2c_r1(gspca_dev, 0x1e, &value);
value &= ~0x30;
tslb = 0x01;
- if (sd->hflip)
+ if (hflip)
value |= 0x20;
- if (sd->vflip) {
+ if (vflip) {
value |= 0x10;
tslb = 0x49;
}
@@ -1600,28 +1561,29 @@ static int set_hvflip(struct gspca_dev *gspca_dev)
case SENSOR_MT9V011:
i2c_r2(gspca_dev, 0x20, &value2);
value2 &= ~0xc0a0;
- if (sd->hflip)
+ if (hflip)
value2 |= 0x8080;
- if (sd->vflip)
+ if (vflip)
value2 |= 0x4020;
i2c_w2(gspca_dev, 0x20, value2);
break;
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
case SENSOR_MT9V112:
i2c_r2(gspca_dev, 0x20, &value2);
value2 &= ~0x0003;
- if (sd->hflip)
+ if (hflip)
value2 |= 0x0002;
- if (sd->vflip)
+ if (vflip)
value2 |= 0x0001;
i2c_w2(gspca_dev, 0x20, value2);
break;
case SENSOR_HV7131R:
i2c_r1(gspca_dev, 0x01, &value);
value &= ~0x03;
- if (sd->vflip)
+ if (vflip)
value |= 0x01;
- if (sd->hflip)
+ if (hflip)
value |= 0x02;
i2c_w1(gspca_dev, 0x01, value);
break;
@@ -1645,7 +1607,6 @@ static int set_exposure(struct gspca_dev *gspca_dev)
break;
case SENSOR_MT9M001:
case SENSOR_MT9V112:
- case SENSOR_MT9V111:
case SENSOR_MT9V011:
exp[0] |= (3 << 4);
exp[2] = 0x09;
@@ -1655,9 +1616,9 @@ static int set_exposure(struct gspca_dev *gspca_dev)
case SENSOR_HV7131R:
exp[0] |= (4 << 4);
exp[2] = 0x25;
- exp[3] = ((sd->exposure * 0xffffff) / 0xffff) >> 16;
- exp[4] = ((sd->exposure * 0xffffff) / 0xffff) >> 8;
- exp[5] = ((sd->exposure * 0xffffff) / 0xffff) & 0xff;
+ exp[3] = (sd->exposure >> 5) & 0xff;
+ exp[4] = (sd->exposure << 3) & 0xff;
+ exp[5] = 0;
break;
default:
return 0;
@@ -1680,7 +1641,6 @@ static int set_gain(struct gspca_dev *gspca_dev)
gain[3] = ov_gain[sd->gain];
break;
case SENSOR_MT9V011:
- case SENSOR_MT9V111:
gain[0] |= (3 << 4);
gain[2] = 0x35;
gain[3] = micron1_gain[sd->gain] >> 8;
@@ -1931,7 +1891,7 @@ static int sd_dbg_g_register(struct gspca_dev *gspca_dev,
if (reg->match.addr != sd->i2c_addr)
return -EINVAL;
if (sd->sensor >= SENSOR_MT9V011 &&
- sd->sensor <= SENSOR_MT9M111) {
+ sd->sensor <= SENSOR_MT9M112) {
if (i2c_r2(gspca_dev, reg->reg, (u16 *)&reg->val) < 0)
return -EINVAL;
} else {
@@ -1960,7 +1920,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
if (reg->match.addr != sd->i2c_addr)
return -EINVAL;
if (sd->sensor >= SENSOR_MT9V011 &&
- sd->sensor <= SENSOR_MT9M111) {
+ sd->sensor <= SENSOR_MT9M112) {
if (i2c_w2(gspca_dev, reg->reg, reg->val) < 0)
return -EINVAL;
} else {
@@ -2005,8 +1965,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor = (id->driver_info >> 8) & 0xff;
sd->i2c_addr = id->driver_info & 0xff;
+ sd->flags = (id->driver_info >> 16) & 0xff;
switch (sd->sensor) {
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
case SENSOR_OV9650:
case SENSOR_SOI968:
@@ -2039,11 +2001,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->quality = 95;
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- sd->input_gpio = (id->driver_info >> 16) & 0xff;
- if (sn9c20x_input_init(gspca_dev) < 0)
- return -ENODEV;
-#endif
return 0;
}
@@ -2063,6 +2020,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
}
}
+ if (sd->flags & LED_REVERSE)
+ reg_w1(gspca_dev, 0x1006, 0x00);
+ else
+ reg_w1(gspca_dev, 0x1006, 0x20);
+
if (reg_w(gspca_dev, 0x10c0, i2c_init, 9) < 0) {
err("Device initialization failed");
return -ENODEV;
@@ -2103,6 +2065,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
return -ENODEV;
info("MT9M111 sensor detected");
break;
+ case SENSOR_MT9M112:
+ if (mt9m112_init_sensor(gspca_dev) < 0)
+ return -ENODEV;
+ info("MT9M112 sensor detected");
+ break;
case SENSOR_MT9M001:
if (mt9m001_init_sensor(gspca_dev) < 0)
return -ENODEV;
@@ -2162,6 +2129,7 @@ static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode)
i2c_w1(gspca_dev, 0x12, (value & 0x7) | 0x40);
}
break;
+ case SENSOR_MT9M112:
case SENSOR_MT9M111:
if (mode & MODE_SXGA) {
i2c_w2(gspca_dev, 0xf0, 0x0002);
@@ -2243,6 +2211,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_exposure(gspca_dev);
set_hvflip(gspca_dev);
+ reg_w1(gspca_dev, 0x1007, 0x20);
+
reg_r(gspca_dev, 0x1061, 1);
reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] | 0x02);
return 0;
@@ -2250,6 +2220,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
+ reg_w1(gspca_dev, 0x1007, 0x00);
+
reg_r(gspca_dev, 0x1061, 1);
reg_w1(gspca_dev, 0x1061, gspca_dev->usb_buf[0] & ~0x02);
}
@@ -2343,6 +2315,24 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
do_autoexposure(gspca_dev, avg_lum);
}
+#ifdef CONFIG_INPUT
+static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data, /* interrupt packet */
+ int len) /* interrupt packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int ret = -EINVAL;
+ if (!(sd->flags & HAS_NO_BUTTON) && len == 1) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
+ input_sync(gspca_dev->input_dev);
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ ret = 0;
+ }
+ return ret;
+}
+#endif
+
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
@@ -2409,6 +2399,9 @@ static const struct sd_desc sd_desc = {
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
+#ifdef CONFIG_INPUT
+ .int_pkt_scan = sd_int_pkt_scan,
+#endif
.dq_callback = sd_dqcallback,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.set_register = sd_dbg_s_register,
@@ -2417,8 +2410,8 @@ static const struct sd_desc sd_desc = {
.get_chip_ident = sd_chip_ident,
};
-#define SN9C20X(sensor, i2c_addr, button_mask) \
- .driver_info = (button_mask << 16) \
+#define SN9C20X(sensor, i2c_addr, flags) \
+ .driver_info = ((flags & 0xff) << 16) \
| (SENSOR_ ## sensor << 8) \
| (i2c_addr)
@@ -2426,8 +2419,10 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
- {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, 0x10)},
- {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30, 0)},
+ {USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, LED_REVERSE)},
+ {USB_DEVICE(0x0c45, 0x624f), SN9C20X(OV9650, 0x30,
+ (FLIP_DETECT | HAS_NO_BUTTON))},
{USB_DEVICE(0x0c45, 0x6251), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x6253), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x6260), SN9C20X(OV7670, 0x21, 0)},
@@ -2438,6 +2433,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6280), SN9C20X(MT9M001, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6282), SN9C20X(MT9M111, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x6288), SN9C20X(OV9655, 0x30, 0)},
+ {USB_DEVICE(0x0c45, 0x628c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x628e), SN9C20X(SOI968, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x628f), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x62a0), SN9C20X(OV7670, 0x21, 0)},
@@ -2448,6 +2444,8 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
{USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
{USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
+ {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0xa168, 0x0611), SN9C20X(HV7131R, 0x11, 0)},
{USB_DEVICE(0xa168, 0x0613), SN9C20X(HV7131R, 0x11, 0)},
@@ -2467,22 +2465,11 @@ static int sd_probe(struct usb_interface *intf,
THIS_MODULE);
}
-static void sd_disconnect(struct usb_interface *intf)
-{
-#ifdef CONFIG_USB_GSPCA_SN9C20X_EVDEV
- struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
-
- sn9c20x_input_cleanup(gspca_dev);
-#endif
-
- gspca_disconnect(intf);
-}
-
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
- .disconnect = sd_disconnect,
+ .disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 1d61b92f6bfc..bb923efb75bd 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -1,7 +1,7 @@
/*
* Sonix sn9c102p sn9c105 sn9c120 (jpeg) subdriver
*
- * Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr>
* Copyright (C) 2005 Michel Xhaard mxhaard@magic.fr
*
* This program is free software; you can redistribute it and/or modify
@@ -28,7 +28,7 @@
#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
-MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -67,20 +67,25 @@ struct sd {
#define BRIDGE_SN9C110 2
#define BRIDGE_SN9C120 3
u8 sensor; /* Type of image sensor chip */
-#define SENSOR_ADCM1700 0
-#define SENSOR_HV7131R 1
-#define SENSOR_MI0360 2
-#define SENSOR_MO4000 3
-#define SENSOR_MT9V111 4
-#define SENSOR_OM6802 5
-#define SENSOR_OV7630 6
-#define SENSOR_OV7648 7
-#define SENSOR_OV7660 8
-#define SENSOR_PO1030 9
-#define SENSOR_SP80708 10
+enum {
+ SENSOR_ADCM1700,
+ SENSOR_GC0307,
+ SENSOR_HV7131R,
+ SENSOR_MI0360,
+ SENSOR_MO4000,
+ SENSOR_MT9V111,
+ SENSOR_OM6802,
+ SENSOR_OV7630,
+ SENSOR_OV7648,
+ SENSOR_OV7660,
+ SENSOR_PO1030,
+ SENSOR_PO2030N,
+ SENSOR_SOI768,
+ SENSOR_SP80708,
+} sensors;
u8 i2c_addr;
- u8 *jpeg_hdr;
+ u8 jpeg_hdr[JPEG_HDR_SZ];
};
/* V4L2 controls supported by the driver */
@@ -281,29 +286,60 @@ static const struct ctrl sd_ctrls[] = {
};
/* table of the disabled controls */
-static __u32 ctrl_dis[] = {
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX) |
- (1 << AUTOGAIN_IDX), /* SENSOR_ADCM1700 0 */
- (1 << INFRARED_IDX) | (1 << FREQ_IDX),
- /* SENSOR_HV7131R 1 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MI0360 2 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MO4000 3 */
- (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_MT9V111 4 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
- /* SENSOR_OM6802 5 */
- (1 << INFRARED_IDX),
- /* SENSOR_OV7630 6 */
- (1 << INFRARED_IDX),
- /* SENSOR_OV7648 7 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
- /* SENSOR_OV7660 8 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
- (1 << FREQ_IDX), /* SENSOR_PO1030 9 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
- (1 << FREQ_IDX), /* SENSOR_SP80708 10 */
+static const __u32 ctrl_dis[] = {
+[SENSOR_ADCM1700] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_GC0307] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_HV7131R] = (1 << INFRARED_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MI0360] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MO4000] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_MT9V111] = (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_OM6802] = (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_OV7630] = (1 << INFRARED_IDX),
+
+[SENSOR_OV7648] = (1 << INFRARED_IDX),
+
+[SENSOR_OV7660] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX),
+
+[SENSOR_PO1030] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_PO2030N] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+[SENSOR_SOI768] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
+
+[SENSOR_SP80708] = (1 << AUTOGAIN_IDX) |
+ (1 << INFRARED_IDX) |
+ (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX),
};
static const struct v4l2_pix_format cif_mode[] = {
@@ -343,7 +379,17 @@ static const u8 sn_adcm1700[0x1c] = {
0x06, 0x00, 0x00, 0x00
};
-/*Data from sn9c102p+hv7131r */
+static const u8 sn_gc0307[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x61, 0x62, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x80, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x03, 0x01, 0x08, 0x28, 0x1e, 0x02,
+/* reg18 reg19 reg1a reg1b */
+ 0x06, 0x00, 0x00, 0x00
+};
+
static const u8 sn_hv7131[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x03, 0x64, 0x00, 0x1a, 0x20, 0x20, 0x20,
@@ -401,7 +447,7 @@ static const u8 sn_om6802[0x1c] = {
static const u8 sn_ov7630[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
- 0x00, 0x21, 0x40, 0x00, 0x1a, 0x20, 0x1f, 0x20,
+ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
/* reg8 reg9 rega regb regc regd rege regf */
0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
@@ -443,6 +489,28 @@ static const u8 sn_po1030[0x1c] = {
0x07, 0x00, 0x00, 0x00
};
+static const u8 sn_po2030n[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x01, 0x14, 0x28, 0x1e, 0x00,
+/* reg18 reg19 reg1a reg1b */
+ 0x07, 0x00, 0x00, 0x00
+};
+
+static const u8 sn_soi768[0x1c] = {
+/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
+ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
+/* reg8 reg9 rega regb regc regd rege regf */
+ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
+ 0x03, 0x00, 0x00, 0x01, 0x08, 0x28, 0x1e, 0x00,
+/* reg18 reg19 reg1a reg1b */
+ 0x07, 0x00, 0x00, 0x00
+};
+
static const u8 sn_sp80708[0x1c] = {
/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
0x00, 0x63, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20,
@@ -456,17 +524,20 @@ static const u8 sn_sp80708[0x1c] = {
/* sequence specific to the sensors - !! index = SENSOR_xxx */
static const u8 *sn_tb[] = {
- sn_adcm1700,
- sn_hv7131,
- sn_mi0360,
- sn_mo4000,
- sn_mt9v111,
- sn_om6802,
- sn_ov7630,
- sn_ov7648,
- sn_ov7660,
- sn_po1030,
- sn_sp80708
+[SENSOR_ADCM1700] = sn_adcm1700,
+[SENSOR_GC0307] = sn_gc0307,
+[SENSOR_HV7131R] = sn_hv7131,
+[SENSOR_MI0360] = sn_mi0360,
+[SENSOR_MO4000] = sn_mo4000,
+[SENSOR_MT9V111] = sn_mt9v111,
+[SENSOR_OM6802] = sn_om6802,
+[SENSOR_OV7630] = sn_ov7630,
+[SENSOR_OV7648] = sn_ov7648,
+[SENSOR_OV7660] = sn_ov7660,
+[SENSOR_PO1030] = sn_po1030,
+[SENSOR_PO2030N] = sn_po2030n,
+[SENSOR_SOI768] = sn_soi768,
+[SENSOR_SP80708] = sn_sp80708,
};
/* default gamma table */
@@ -484,8 +555,13 @@ static const u8 gamma_spec_1[17] = {
0x08, 0x3a, 0x52, 0x65, 0x75, 0x83, 0x91, 0x9d,
0xa9, 0xb4, 0xbe, 0xc8, 0xd2, 0xdb, 0xe4, 0xed, 0xf5
};
-/* gamma for sensor SP80708 */
+/* gamma for sensor GC0307 */
static const u8 gamma_spec_2[17] = {
+ 0x14, 0x37, 0x50, 0x6a, 0x7c, 0x8d, 0x9d, 0xab,
+ 0xb5, 0xbf, 0xc2, 0xcb, 0xd1, 0xd6, 0xdb, 0xe1, 0xeb
+};
+/* gamma for sensor SP80708 */
+static const u8 gamma_spec_3[17] = {
0x0a, 0x2d, 0x4e, 0x68, 0x7d, 0x8f, 0x9f, 0xab,
0xb7, 0xc2, 0xcc, 0xd3, 0xd8, 0xde, 0xe2, 0xe5, 0xe6
};
@@ -533,6 +609,58 @@ static const u8 adcm1700_sensor_param1[][8] = {
{0xb0, 0x51, 0x32, 0x00, 0xa2, 0x00, 0x00, 0x10},
{}
};
+static const u8 gc0307_sensor_init[][8] = {
+ {0xa0, 0x21, 0x43, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x44, 0xa2, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x01, 0x6a, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x02, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x11, 0x05, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x08, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x09, 0x01, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0a, 0xe8, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0b, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0d, 0x22, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/
+ {0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x17, 0x52, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x18, 0x50, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1e, 0x0d, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x1f, 0x32, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x61, 0x90, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x63, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x65, 0x98, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x67, 0x90, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x04, 0x96, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x45, 0x27, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x47, 0x2c, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x43, 0x47, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x44, 0xd8, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 gc0307_sensor_param1[][8] = {
+ {0xa0, 0x21, 0x68, 0x13, 0x00, 0x00, 0x00, 0x10},
+ {0xd0, 0x21, 0x61, 0x80, 0x00, 0x80, 0x00, 0x10},
+ {0xc0, 0x21, 0x65, 0x80, 0x00, 0x80, 0x00, 0x10},
+ {0xc0, 0x21, 0x63, 0xa0, 0x00, 0xa6, 0x00, 0x10},
+/*param3*/
+ {0xa0, 0x21, 0x01, 0x6e, 0x00, 0x00, 0x00, 0x10},
+ {0xa0, 0x21, 0x02, 0x88, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+
static const u8 hv7131r_sensor_init[][8] = {
{0xc1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10},
{0xb1, 0x11, 0x34, 0x17, 0x7f, 0x00, 0x00, 0x10},
@@ -767,7 +895,9 @@ static const u8 ov7630_sensor_init[][8] = {
{0xc1, 0x21, 0x7b, 0x00, 0x4c, 0xf7, 0x00, 0x10},
{0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
{0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
-/* */
+ {}
+};
+static const u8 ov7630_sensor_param1[][8] = {
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
/*fixme: + 0x12, 0x04*/
@@ -984,6 +1114,113 @@ static const u8 po1030_sensor_param1[][8] = {
{}
};
+static const u8 po2030n_sensor_init[][8] = {
+ {0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */
+ {0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x08, 0x00, 0xd0, 0x00, 0x08, 0x10},
+ {0xd1, 0x6e, 0x0c, 0x03, 0x50, 0x01, 0xe8, 0x10},
+ {0xd1, 0x6e, 0x1d, 0x20, 0x0a, 0x19, 0x44, 0x10},
+ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x29, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x35, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x41, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x45, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x49, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x4d, 0x00, 0x00, 0x00, 0xed, 0x10},
+ {0xd1, 0x6e, 0x51, 0x17, 0x4a, 0x2f, 0xc0, 0x10},
+ {0xd1, 0x6e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x59, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x61, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x69, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x71, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x75, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x79, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x81, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x85, 0x00, 0x00, 0x00, 0x08, 0x10},
+ {0xd1, 0x6e, 0x89, 0x01, 0xe8, 0x00, 0x01, 0x10},
+ {0xa1, 0x6e, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x01, 0x10},
+ {0xd1, 0x6e, 0x29, 0xe6, 0x00, 0xbd, 0x03, 0x10},
+ {0xd1, 0x6e, 0x2d, 0x41, 0x38, 0x68, 0x40, 0x10},
+ {0xd1, 0x6e, 0x31, 0x2b, 0x00, 0x36, 0x00, 0x10},
+ {0xd1, 0x6e, 0x35, 0x30, 0x30, 0x08, 0x00, 0x10},
+ {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x33, 0x06, 0x10},
+ {0xb1, 0x6e, 0x3d, 0x06, 0x02, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 po2030n_sensor_param1[][8] = {
+ {0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */
+ {0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
+ {0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10},
+/*param2*/
+ {0xa1, 0x6e, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x05, 0x6f, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10},
+ {0xc1, 0x6e, 0x16, 0x52, 0x40, 0x48, 0x00, 0x10},
+/*after start*/
+ {0xa1, 0x6e, 0x15, 0x0f, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {0xa1, 0x6e, 0x1a, 0x05, 0x00, 0x00, 0x00, 0x10},
+ {0xdd, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */
+ {0xa1, 0x6e, 0x1b, 0x53, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+
+static const u8 soi768_sensor_init[][8] = {
+ {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */
+ {0xdd, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */
+ {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x19, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {}
+};
+static const u8 soi768_sensor_param1[][8] = {
+ {0xa1, 0x21, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x21, 0x01, 0x7f, 0x7f, 0x00, 0x00, 0x10},
+/* */
+/* {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, */
+/* {0xa1, 0x21, 0x2d, 0x25, 0x00, 0x00, 0x00, 0x10}, */
+ {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10},
+/* {0xb1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, */
+ {0xa1, 0x21, 0x02, 0x8d, 0x00, 0x00, 0x00, 0x10},
+/* the next sequence should be used for auto gain */
+ {0xa1, 0x21, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10},
+ /* global gain ? : 07 - change with 0x15 at the end */
+ {0xa1, 0x21, 0x10, 0x3f, 0x00, 0x00, 0x00, 0x10}, /* ???? : 063f */
+ {0xa1, 0x21, 0x04, 0x06, 0x00, 0x00, 0x00, 0x10},
+ {0xb1, 0x21, 0x2d, 0x00, 0x02, 0x00, 0x00, 0x10},
+ /* exposure ? : 0200 - change with 0x1e at the end */
+ {}
+};
+
static const u8 sp80708_sensor_init[][8] = {
{0xa1, 0x18, 0x06, 0xf9, 0x00, 0x00, 0x00, 0x10},
{0xa1, 0x18, 0x09, 0x1f, 0x00, 0x00, 0x00, 0x10},
@@ -1069,18 +1306,21 @@ static const u8 sp80708_sensor_param1[][8] = {
{}
};
-static const u8 (*sensor_init[11])[8] = {
- adcm1700_sensor_init, /* ADCM1700 0 */
- hv7131r_sensor_init, /* HV7131R 1 */
- mi0360_sensor_init, /* MI0360 2 */
- mo4000_sensor_init, /* MO4000 3 */
- mt9v111_sensor_init, /* MT9V111 4 */
- om6802_sensor_init, /* OM6802 5 */
- ov7630_sensor_init, /* OV7630 6 */
- ov7648_sensor_init, /* OV7648 7 */
- ov7660_sensor_init, /* OV7660 8 */
- po1030_sensor_init, /* PO1030 9 */
- sp80708_sensor_init, /* SP80708 10 */
+static const u8 (*sensor_init[])[8] = {
+[SENSOR_ADCM1700] = adcm1700_sensor_init,
+[SENSOR_GC0307] = gc0307_sensor_init,
+[SENSOR_HV7131R] = hv7131r_sensor_init,
+[SENSOR_MI0360] = mi0360_sensor_init,
+[SENSOR_MO4000] = mo4000_sensor_init,
+[SENSOR_MT9V111] = mt9v111_sensor_init,
+[SENSOR_OM6802] = om6802_sensor_init,
+[SENSOR_OV7630] = ov7630_sensor_init,
+[SENSOR_OV7648] = ov7648_sensor_init,
+[SENSOR_OV7660] = ov7660_sensor_init,
+[SENSOR_PO1030] = po1030_sensor_init,
+[SENSOR_PO2030N] = po2030n_sensor_init,
+[SENSOR_SOI768] = soi768_sensor_init,
+[SENSOR_SP80708] = sp80708_sensor_init,
};
/* read <len> bytes to gspca_dev->usb_buf */
@@ -1146,10 +1386,11 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- PDEBUG(D_USBO, "i2c_w2 [%02x] = %02x", reg, val);
+ PDEBUG(D_USBO, "i2c_w1 [%02x] = %02x", reg, val);
switch (sd->sensor) {
case SENSOR_ADCM1700:
- case SENSOR_OM6802: /* i2c command = a0 (100 kHz) */
+ case SENSOR_OM6802:
+ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */
gspca_dev->usb_buf[0] = 0x80 | (2 << 4);
break;
default: /* i2c command = a1 (400 kHz) */
@@ -1177,6 +1418,8 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
static void i2c_w8(struct gspca_dev *gspca_dev,
const u8 *buffer)
{
+ PDEBUG(D_USBO, "i2c_w8 [%02x] = %02x ..",
+ buffer[2], buffer[3]);
memcpy(gspca_dev->usb_buf, buffer, 8);
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
@@ -1196,7 +1439,8 @@ static void i2c_r(struct gspca_dev *gspca_dev, u8 reg, int len)
switch (sd->sensor) {
case SENSOR_ADCM1700:
- case SENSOR_OM6802: /* i2c command = 90 (100 kHz) */
+ case SENSOR_OM6802:
+ case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */
mode[0] = 0x80 | 0x10;
break;
default: /* i2c command = 91 (400 kHz) */
@@ -1300,39 +1544,100 @@ static void mi0360_probe(struct gspca_dev *gspca_dev)
}
}
-static void ov7648_probe(struct gspca_dev *gspca_dev)
+static void ov7630_probe(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
/* check ov76xx */
reg_w1(gspca_dev, 0x17, 0x62);
reg_w1(gspca_dev, 0x01, 0x08);
sd->i2c_addr = 0x21;
i2c_r(gspca_dev, 0x0a, 2);
- if (gspca_dev->usb_buf[3] == 0x76) { /* ov76xx */
- PDEBUG(D_PROBE, "Sensor ov%02x%02x",
- gspca_dev->usb_buf[3], gspca_dev->usb_buf[4]);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x7628) { /* soi768 */
+ sd->sensor = SENSOR_SOI768;
+/*fixme: only valid for 0c45:613e?*/
+ gspca_dev->cam.input_flags =
+ V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP;
+ PDEBUG(D_PROBE, "Sensor soi768");
return;
}
+ PDEBUG(D_PROBE, "Sensor ov%04x", val);
+}
- /* reset */
+static void ov7648_probe(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
+
+ /* check ov76xx */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x08);
+ sd->i2c_addr = 0x21;
+ i2c_r(gspca_dev, 0x0a, 2);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
reg_w1(gspca_dev, 0x01, 0x29);
reg_w1(gspca_dev, 0x17, 0x42);
+ if ((val & 0xff00) == 0x7600) { /* ov76xx */
+ PDEBUG(D_PROBE, "Sensor ov%04x", val);
+ return;
+ }
/* check po1030 */
reg_w1(gspca_dev, 0x17, 0x62);
reg_w1(gspca_dev, 0x01, 0x08);
sd->i2c_addr = 0x6e;
i2c_r(gspca_dev, 0x00, 2);
- if (gspca_dev->usb_buf[3] == 0x10 /* po1030 */
- && gspca_dev->usb_buf[4] == 0x30) {
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x1030) { /* po1030 */
PDEBUG(D_PROBE, "Sensor po1030");
sd->sensor = SENSOR_PO1030;
return;
}
- PDEBUG(D_PROBE, "Unknown sensor %02x%02x",
- gspca_dev->usb_buf[3], gspca_dev->usb_buf[4]);
+ PDEBUG(D_PROBE, "Unknown sensor %04x", val);
+}
+
+/* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */
+static void po2030n_probe(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 val;
+
+ /* check gc0307 */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x08);
+ reg_w1(gspca_dev, 0x02, 0x22);
+ sd->i2c_addr = 0x21;
+ i2c_r(gspca_dev, 0x00, 1);
+ val = gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29); /* reset */
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x99) { /* gc0307 (?) */
+ PDEBUG(D_PROBE, "Sensor gc0307");
+ sd->sensor = SENSOR_GC0307;
+ return;
+ }
+
+ /* check po2030n */
+ reg_w1(gspca_dev, 0x17, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x0a);
+ sd->i2c_addr = 0x6e;
+ i2c_r(gspca_dev, 0x00, 2);
+ val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4];
+ reg_w1(gspca_dev, 0x01, 0x29);
+ reg_w1(gspca_dev, 0x17, 0x42);
+ if (val == 0x2030) {
+ PDEBUG(D_PROBE, "Sensor po2030n");
+/* sd->sensor = SENSOR_PO2030N; */
+ } else {
+ PDEBUG(D_PROBE, "Unknown sensor ID %04x", val);
+ }
}
static void bridge_init(struct gspca_dev *gspca_dev,
@@ -1355,8 +1660,11 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2);
reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5);
switch (sd->sensor) {
+ case SENSOR_GC0307:
case SENSOR_OV7660:
case SENSOR_PO1030:
+ case SENSOR_PO2030N:
+ case SENSOR_SOI768:
case SENSOR_SP80708:
reg9a = reg9a_spec;
break;
@@ -1377,6 +1685,14 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x42);
reg_w1(gspca_dev, 0x01, 0x42);
break;
+ case SENSOR_GC0307:
+ msleep(50);
+ reg_w1(gspca_dev, 0x01, 0x61);
+ reg_w1(gspca_dev, 0x17, 0x22);
+ reg_w1(gspca_dev, 0x01, 0x60);
+ reg_w1(gspca_dev, 0x01, 0x40);
+ msleep(50);
+ break;
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x61);
@@ -1414,11 +1730,18 @@ static void bridge_init(struct gspca_dev *gspca_dev,
reg_w1(gspca_dev, 0x01, 0x42);
break;
case SENSOR_PO1030:
+ case SENSOR_SOI768:
reg_w1(gspca_dev, 0x01, 0x61);
reg_w1(gspca_dev, 0x17, 0x20);
reg_w1(gspca_dev, 0x01, 0x60);
reg_w1(gspca_dev, 0x01, 0x40);
break;
+ case SENSOR_PO2030N:
+ reg_w1(gspca_dev, 0x01, 0x63);
+ reg_w1(gspca_dev, 0x17, 0x20);
+ reg_w1(gspca_dev, 0x01, 0x62);
+ reg_w1(gspca_dev, 0x01, 0x42);
+ break;
case SENSOR_OV7660:
/* fall thru */
case SENSOR_SP80708:
@@ -1523,9 +1846,15 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SENSOR_MI0360:
mi0360_probe(gspca_dev);
break;
+ case SENSOR_OV7630:
+ ov7630_probe(gspca_dev);
+ break;
case SENSOR_OV7648:
ov7648_probe(gspca_dev);
break;
+ case SENSOR_PO2030N:
+ po2030n_probe(gspca_dev);
+ break;
}
regGpio[1] = 0x70;
reg_w(gspca_dev, 0x01, regGpio, 2);
@@ -1558,6 +1887,18 @@ static u32 setexposure(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
switch (sd->sensor) {
+ case SENSOR_GC0307: {
+ int a, b;
+
+ /* expo = 0..255 -> a = 19..43 */
+ a = 19 + expo * 25 / 256;
+ i2c_w1(gspca_dev, 0x68, a);
+ a -= 12;
+ b = a * a * 4; /* heuristic */
+ i2c_w1(gspca_dev, 0x03, b >> 8);
+ i2c_w1(gspca_dev, 0x04, b);
+ break;
+ }
case SENSOR_HV7131R: {
u8 Expodoit[] =
{ 0xc1, 0x11, 0x25, 0x00, 0x00, 0x00, 0x00, 0x16 };
@@ -1668,6 +2009,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
expo = sd->brightness >> 4;
sd->exposure = setexposure(gspca_dev, expo);
break;
+ case SENSOR_GC0307:
case SENSOR_MT9V111:
expo = sd->brightness >> 8;
sd->exposure = setexposure(gspca_dev, expo);
@@ -1703,7 +2045,7 @@ static void setcolors(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int i, v;
u8 reg8a[12]; /* U & V gains */
- static s16 uv[6] = { /* same as reg84 in signed decimal */
+ static const s16 uv[6] = { /* same as reg84 in signed decimal */
-24, -38, 64, /* UR UG UB */
62, -51, -9 /* VR VG VB */
};
@@ -1744,9 +2086,12 @@ static void setgamma(struct gspca_dev *gspca_dev)
case SENSOR_MT9V111:
gamma_base = gamma_spec_1;
break;
- case SENSOR_SP80708:
+ case SENSOR_GC0307:
gamma_base = gamma_spec_2;
break;
+ case SENSOR_SP80708:
+ gamma_base = gamma_spec_3;
+ break;
default:
gamma_base = gamma_def;
break;
@@ -1937,14 +2282,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
static const u8 CA_adcm1700[] =
{ 0x14, 0xec, 0x0a, 0xf6 };
+ static const u8 CA_po2030n[] =
+ { 0x1e, 0xe2, 0x14, 0xec };
static const u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */
+ static const u8 CE_gc0307[] =
+ { 0x32, 0xce, 0x2d, 0xd3 };
static const u8 CE_ov76xx[] =
{ 0x32, 0xdd, 0x32, 0xdd };
+ static const u8 CE_po2030n[] =
+ { 0x14, 0xe7, 0x1e, 0xdd };
/* create the JPEG header */
- sd->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL);
- if (!sd->jpeg_hdr)
- return -ENOMEM;
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x21); /* JPEG 422 */
jpeg_set_qual(sd->jpeg_hdr, sd->quality);
@@ -1996,6 +2344,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]);
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ reg17 = 0xa2;
+ break;
case SENSOR_MT9V111:
reg17 = 0xe0;
break;
@@ -2007,9 +2358,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0x20;
break;
case SENSOR_OV7660:
+ case SENSOR_SOI768:
reg17 = 0xa0;
break;
case SENSOR_PO1030:
+ case SENSOR_PO2030N:
reg17 = 0xa0;
break;
default:
@@ -2034,12 +2387,18 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_SP80708:
reg_w1(gspca_dev, 0x9a, 0x05);
break;
+ case SENSOR_GC0307:
case SENSOR_MT9V111:
reg_w1(gspca_dev, 0x9a, 0x07);
break;
+ case SENSOR_OV7630:
case SENSOR_OV7648:
reg_w1(gspca_dev, 0x9a, 0x0a);
break;
+ case SENSOR_PO2030N:
+ case SENSOR_SOI768:
+ reg_w1(gspca_dev, 0x9a, 0x06);
+ break;
default:
reg_w1(gspca_dev, 0x9a, 0x08);
break;
@@ -2064,6 +2423,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg1 = 0x46;
reg17 = 0xe2;
break;
+ case SENSOR_GC0307:
+ init = gc0307_sensor_param1;
+ reg17 = 0xa2;
+ reg1 = 0x44;
+ break;
case SENSOR_MO4000:
if (mode) {
/* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */
@@ -2087,6 +2451,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0x64; /* 640 MCKSIZE */
break;
case SENSOR_OV7630:
+ init = ov7630_sensor_param1;
reg17 = 0xe2;
reg1 = 0x44;
break;
@@ -2113,6 +2478,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg17 = 0xa2;
reg1 = 0x44;
break;
+ case SENSOR_PO2030N:
+ init = po2030n_sensor_param1;
+ reg1 = 0x46;
+ reg17 = 0xa2;
+ break;
+ case SENSOR_SOI768:
+ init = soi768_sensor_param1;
+ reg1 = 0x44;
+ reg17 = 0xa2;
+ break;
default:
/* case SENSOR_SP80708: */
init = sp80708_sensor_param1;
@@ -2132,17 +2507,33 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
reg_w(gspca_dev, 0xc0, C0, 6);
- if (sd->sensor == SENSOR_ADCM1700)
+ switch (sd->sensor) {
+ case SENSOR_ADCM1700:
+ case SENSOR_GC0307:
+ case SENSOR_SOI768:
reg_w(gspca_dev, 0xca, CA_adcm1700, 4);
- else
+ break;
+ case SENSOR_PO2030N:
+ reg_w(gspca_dev, 0xca, CA_po2030n, 4);
+ break;
+ default:
reg_w(gspca_dev, 0xca, CA, 4);
+ break;
+ }
switch (sd->sensor) {
case SENSOR_ADCM1700:
case SENSOR_OV7630:
case SENSOR_OV7648:
case SENSOR_OV7660:
+ case SENSOR_SOI768:
reg_w(gspca_dev, 0xce, CE_ov76xx, 4);
break;
+ case SENSOR_GC0307:
+ reg_w(gspca_dev, 0xce, CE_gc0307, 4);
+ break;
+ case SENSOR_PO2030N:
+ reg_w(gspca_dev, 0xce, CE_po2030n, 4);
+ break;
default:
reg_w(gspca_dev, 0xce, CE, 4);
/* ?? {0x1e, 0xdd, 0x2d, 0xe7} */
@@ -2161,6 +2552,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
setvflip(sd);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
+ setcolors(gspca_dev);
setautogain(gspca_dev);
setfreq(gspca_dev);
return 0;
@@ -2175,11 +2567,16 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
{ 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10 };
static const u8 stopov7648[] =
{ 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 };
+ static const u8 stopsoi768[] =
+ { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 };
u8 data;
const u8 *sn9c1xx;
data = 0x0b;
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ data = 0x29;
+ break;
case SENSOR_HV7131R:
i2c_w8(gspca_dev, stophv7131);
data = 0x2b;
@@ -2196,6 +2593,10 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
case SENSOR_PO1030:
data = 0x29;
break;
+ case SENSOR_SOI768:
+ i2c_w8(gspca_dev, stopsoi768);
+ data = 0x29;
+ break;
}
sn9c1xx = sn_tb[sd->sensor];
reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
@@ -2206,13 +2607,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
/* reg_w1(gspca_dev, 0xf1, 0x01); */
}
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- kfree(sd->jpeg_hdr);
-}
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -2233,6 +2627,14 @@ static void do_autogain(struct gspca_dev *gspca_dev)
if (delta < luma_mean - luma_delta ||
delta > luma_mean + luma_delta) {
switch (sd->sensor) {
+ case SENSOR_GC0307:
+ expotimes = sd->exposure;
+ expotimes += (luma_mean - delta) >> 6;
+ if (expotimes < 0)
+ expotimes = 0;
+ sd->exposure = setexposure(gspca_dev,
+ (unsigned int) expotimes);
+ break;
case SENSOR_HV7131R:
expotimes = sd->exposure >> 8;
expotimes += (luma_mean - delta) >> 4;
@@ -2584,7 +2986,6 @@ static const struct sd_desc sd_desc = {
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
.get_jcomp = sd_get_jcomp,
@@ -2656,7 +3057,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
#endif
{USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
{USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
-/* {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, *sn9c120b*/
+ {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/
{USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)}, /*sn9c120b*/
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index b9c80e2103b9..7bb2355005dc 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -22,6 +22,7 @@
#define MODULE_NAME "spca561"
+#include <linux/input.h>
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
@@ -249,9 +250,9 @@ static const __u16 Pb100_2map8300[][2] = {
};
static const __u16 spca561_161rev12A_data1[][2] = {
- {0x29, 0x8118}, /* white balance - was 21 */
- {0x08, 0x8114}, /* white balance - was 01 */
- {0x0e, 0x8112}, /* white balance - was 00 */
+ {0x29, 0x8118}, /* Control register (various enable bits) */
+ {0x08, 0x8114}, /* GPIO: Led off */
+ {0x0e, 0x8112}, /* 0x0e stream off 0x3e stream on */
{0x00, 0x8102}, /* white balance - new */
{0x92, 0x8804},
{0x04, 0x8802}, /* windows uses 08 */
@@ -263,15 +264,11 @@ static const __u16 spca561_161rev12A_data2[][2] = {
{0x07, 0x8601},
{0x07, 0x8602},
{0x04, 0x8501},
- {0x21, 0x8118},
{0x07, 0x8201}, /* windows uses 02 */
{0x08, 0x8200},
{0x01, 0x8200},
- {0x00, 0x8114},
- {0x01, 0x8114}, /* windows uses 00 */
-
{0x90, 0x8604},
{0x00, 0x8605},
{0xb0, 0x8603},
@@ -302,6 +299,9 @@ static const __u16 spca561_161rev12A_data2[][2] = {
{0xf0, 0x8505},
{0x32, 0x850a},
/* {0x99, 0x8700}, * - white balance - new (removed) */
+ /* HDG we used to do this in stop0, making the init state and the state
+ after a start / stop different, so do this here instead. */
+ {0x29, 0x8118},
{}
};
@@ -645,6 +645,9 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
setwhite(gspca_dev);
setgain(gspca_dev);
setexposure(gspca_dev);
+
+ /* Led ON (bit 3 -> 0 */
+ reg_w_val(gspca_dev->dev, 0x8114, 0x00);
return 0;
}
static int sd_start_72a(struct gspca_dev *gspca_dev)
@@ -691,26 +694,14 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
if (sd->chip_revision == Rev012A) {
reg_w_val(gspca_dev->dev, 0x8112, 0x0e);
+ /* Led Off (bit 3 -> 1 */
+ reg_w_val(gspca_dev->dev, 0x8114, 0x08);
} else {
reg_w_val(gspca_dev->dev, 0x8112, 0x20);
/* reg_w_val(gspca_dev->dev, 0x8102, 0x00); ?? */
}
}
-/* called on streamoff with alt 0 and on disconnect */
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (!gspca_dev->present)
- return;
- if (sd->chip_revision == Rev012A) {
- reg_w_val(gspca_dev->dev, 0x8118, 0x29);
- reg_w_val(gspca_dev->dev, 0x8114, 0x08);
- }
-/* reg_w_val(gspca_dev->dev, 0x8114, 0); */
-}
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -788,6 +779,23 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
switch (*data++) { /* sequence number */
case 0: /* start of frame */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+
+ /* This should never happen */
+ if (len < 2) {
+ PDEBUG(D_ERR, "Short SOF packet, ignoring");
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+
+#ifdef CONFIG_INPUT
+ if (data[0] & 0x20) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
+ input_sync(gspca_dev->input_dev);
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
+ input_sync(gspca_dev->input_dev);
+ }
+#endif
+
if (data[1] & 0x10) {
/* compressed bayer */
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
@@ -1028,8 +1036,10 @@ static const struct sd_desc sd_desc_12a = {
.init = sd_init_12a,
.start = sd_start_12a,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
+#ifdef CONFIG_INPUT
+ .other_input = 1,
+#endif
};
static const struct sd_desc sd_desc_72a = {
.name = MODULE_NAME,
@@ -1039,9 +1049,11 @@ static const struct sd_desc sd_desc_72a = {
.init = sd_init_72a,
.start = sd_start_72a,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
+#ifdef CONFIG_INPUT
+ .other_input = 1,
+#endif
};
static const struct sd_desc *sd_desc[2] = {
&sd_desc_12a,
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 668a7536af90..63014372adbc 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -44,7 +44,10 @@ struct sd {
u8 gamma;
u8 sharpness;
u8 freq;
- u8 whitebalance;
+ u8 red_balance; /* split balance */
+ u8 blue_balance;
+ u8 global_gain; /* aka gain */
+ u8 whitebalance; /* set default r/g/b and activate */
u8 mirror;
u8 effect;
@@ -70,8 +73,17 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
+
+
static int sd_setwhitebalance(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getwhitebalance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val);
+
static int sd_setflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val);
@@ -79,6 +91,7 @@ static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu);
+
static const struct ctrl sd_ctrls[] = {
{
{
@@ -139,7 +152,7 @@ static const struct ctrl sd_ctrls[] = {
},
{
{
- .id = V4L2_CID_GAIN, /* here, i activate only the lowlight,
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION, /* Activa lowlight,
* some apps dont bring up the
* backligth_compensation control) */
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -183,7 +196,7 @@ static const struct ctrl sd_ctrls[] = {
{
{
- .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "White Balance",
.minimum = 0,
@@ -223,6 +236,48 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_seteffect,
.get = sd_geteffect
},
+ {
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define BLUE_BALANCE_DEF 0x20
+ .default_value = BLUE_BALANCE_DEF,
+ },
+ .set = sd_setblue_balance,
+ .get = sd_getblue_balance,
+ },
+ {
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define RED_BALANCE_DEF 0x20
+ .default_value = RED_BALANCE_DEF,
+ },
+ .set = sd_setred_balance,
+ .get = sd_getred_balance,
+ },
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0x10,
+ .maximum = 0x40,
+ .step = 1,
+#define global_gain_DEF 0x20
+ .default_value = global_gain_DEF,
+ },
+ .set = sd_setglobal_gain,
+ .get = sd_getglobal_gain,
+ },
};
static char *effects_control[] = {
@@ -523,6 +578,10 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
u8 *tmpbuf;
tmpbuf = kmalloc(len, GFP_KERNEL);
+ if (!tmpbuf) {
+ err("Out of memory");
+ return;
+ }
memcpy(tmpbuf, buffer, len);
usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
@@ -542,10 +601,15 @@ static void reg_w_ixbuf(struct gspca_dev *gspca_dev,
int i;
u8 *p, *tmpbuf;
- if (len * 2 <= USB_BUF_SZ)
+ if (len * 2 <= USB_BUF_SZ) {
p = tmpbuf = gspca_dev->usb_buf;
- else
+ } else {
p = tmpbuf = kmalloc(len * 2, GFP_KERNEL);
+ if (!tmpbuf) {
+ err("Out of memory");
+ return;
+ }
+ }
i = len;
while (--i >= 0) {
*p++ = reg++;
@@ -642,6 +706,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->whitebalance = WHITE_BALANCE_DEF;
sd->sharpness = SHARPNESS_DEF;
sd->effect = EFFECTS_DEF;
+ sd->red_balance = RED_BALANCE_DEF;
+ sd->blue_balance = BLUE_BALANCE_DEF;
+ sd->global_gain = global_gain_DEF;
+
return 0;
}
@@ -693,18 +761,40 @@ static void setgamma(struct gspca_dev *gspca_dev)
reg_w_ixbuf(gspca_dev, 0x90,
gamma_table[sd->gamma], sizeof gamma_table[0]);
}
+static void setglobalgain(struct gspca_dev *gspca_dev)
+{
-static void setwhitebalance(struct gspca_dev *gspca_dev)
+ struct sd *sd = (struct sd *) gspca_dev;
+ reg_w(gspca_dev, (sd->red_balance << 8) + 0x87);
+ reg_w(gspca_dev, (sd->blue_balance << 8) + 0x88);
+ reg_w(gspca_dev, (sd->global_gain << 8) + 0x89);
+}
+
+/* Generic fnc for r/b balance, exposure and whitebalance */
+static void setbalance(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 white_balance[8] =
- {0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x80, 0x38};
+ /* on whitebalance leave defaults values */
+ if (sd->whitebalance) {
+ reg_w(gspca_dev, 0x3c80);
+ } else {
+ reg_w(gspca_dev, 0x3880);
+ /* shoud we wait here.. */
+ /* update and reset 'global gain' with webcam parameters */
+ sd->red_balance = reg_r(gspca_dev, 0x0087);
+ sd->blue_balance = reg_r(gspca_dev, 0x0088);
+ sd->global_gain = reg_r(gspca_dev, 0x0089);
+ setglobalgain(gspca_dev);
+ }
+
+}
+
- if (sd->whitebalance)
- white_balance[7] = 0x3c;
- reg_w_buf(gspca_dev, white_balance, sizeof white_balance);
+static void setwhitebalance(struct gspca_dev *gspca_dev)
+{
+ setbalance(gspca_dev);
}
static void setsharpness(struct gspca_dev *gspca_dev)
@@ -1018,6 +1108,66 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
+
+static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->blue_balance = val;
+ if (gspca_dev->streaming)
+ reg_w(gspca_dev, (val << 8) + 0x88);
+ return 0;
+}
+
+static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->blue_balance;
+ return 0;
+}
+
+static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->red_balance = val;
+ if (gspca_dev->streaming)
+ reg_w(gspca_dev, (val << 8) + 0x87);
+
+ return 0;
+}
+
+static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->red_balance;
+ return 0;
+}
+
+
+
+static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->global_gain = val;
+ if (gspca_dev->streaming)
+ setglobalgain(gspca_dev);
+
+ return 0;
+}
+
+static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->global_gain;
+ return 0;
+}
+
+
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 4989f9afb46e..732c3dfe46ff 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -1,9 +1,9 @@
/*
- * Z-star vc0321 library
- * Copyright (C) 2006 Koninski Artur takeshi87@o2.pl
- * Copyright (C) 2006 Michel Xhaard
+ * Z-star vc0321 library
*
- * V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
+ * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr>
+ * Copyright (C) 2006 Koninski Artur takeshi87@o2.pl
+ * Copyright (C) 2006 Michel Xhaard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,7 +24,7 @@
#include "gspca.h"
-MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
+MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/VC032X USB Camera Driver");
MODULE_LICENSE("GPL");
@@ -1971,268 +1971,489 @@ static const u8 ov7660_NoFliker[][4] = {
{}
};
-static const u8 ov7670_initVGA_JPG[][4] = {
+static const u8 ov7670_InitVGA[][4] = {
{0xb3, 0x01, 0x05, 0xcc},
- {0x00, 0x00, 0x30, 0xdd}, {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x30, 0xdd},
+ {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb0, 0x04, 0x02, 0xcc},
{0x00, 0x00, 0x10, 0xdd},
- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc},
+ {0xb3, 0x00, 0x66, 0xcc},
+ {0xb3, 0x00, 0x67, 0xcc},
+ {0xb0, 0x16, 0x01, 0xcc},
{0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */
{0xb3, 0x34, 0x01, 0xcc},
- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc},
- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc},
- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc},
- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc},
- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc},
- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc},
- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc},
- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0x41, 0xcc},
- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa},
- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa},
- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa},
- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa},
- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa},
- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa},
- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa},
- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa},
- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa},
- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa},
- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa},
- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa},
- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa},
- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa},
- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa},
- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa},
- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa},
- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa},
- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa},
- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
+ {0xb3, 0x05, 0x01, 0xcc},
+ {0xb3, 0x06, 0x01, 0xcc},
+ {0xb3, 0x08, 0x01, 0xcc},
+ {0xb3, 0x09, 0x0c, 0xcc},
+ {0xb3, 0x02, 0x02, 0xcc},
+ {0xb3, 0x03, 0x1f, 0xcc},
+ {0xb3, 0x14, 0x00, 0xcc},
+ {0xb3, 0x15, 0x00, 0xcc},
+ {0xb3, 0x16, 0x02, 0xcc},
+ {0xb3, 0x17, 0x7f, 0xcc},
+ {0xb3, 0x04, 0x05, 0xcc},
+ {0xb3, 0x20, 0x00, 0xcc},
+ {0xb3, 0x21, 0x00, 0xcc},
+ {0xb3, 0x22, 0x01, 0xcc},
+ {0xb3, 0x23, 0xe0, 0xcc},
+ {0xbc, 0x00, 0x41, 0xcc},
+ {0xbc, 0x01, 0x01, 0xcc},
+ {0x00, 0x12, 0x80, 0xaa},
+ {0x00, 0x00, 0x20, 0xdd},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x6b, 0x0a, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x7a, 0x29, 0xaa},
+ {0x00, 0x7b, 0x0e, 0xaa},
+ {0x00, 0x7c, 0x1a, 0xaa},
+ {0x00, 0x7d, 0x31, 0xaa},
+ {0x00, 0x7e, 0x53, 0xaa},
+ {0x00, 0x7f, 0x60, 0xaa},
+ {0x00, 0x80, 0x6b, 0xaa},
+ {0x00, 0x81, 0x73, 0xaa},
+ {0x00, 0x82, 0x7b, 0xaa},
+ {0x00, 0x83, 0x82, 0xaa},
+ {0x00, 0x84, 0x89, 0xaa},
+ {0x00, 0x85, 0x96, 0xaa},
+ {0x00, 0x86, 0xa1, 0xaa},
+ {0x00, 0x87, 0xb7, 0xaa},
+ {0x00, 0x88, 0xcc, 0xaa},
+ {0x00, 0x89, 0xe1, 0xaa},
+ {0x00, 0x13, 0xe0, 0xaa},
+ {0x00, 0x00, 0x00, 0xaa},
+ {0x00, 0x10, 0x00, 0xaa},
+ {0x00, 0x0d, 0x40, 0xaa},
+ {0x00, 0x14, 0x28, 0xaa},
+ {0x00, 0xa5, 0x05, 0xaa},
+ {0x00, 0xab, 0x07, 0xaa},
+ {0x00, 0x24, 0x95, 0xaa},
+ {0x00, 0x25, 0x33, 0xaa},
+ {0x00, 0x26, 0xe3, 0xaa},
+ {0x00, 0x9f, 0x88, 0xaa},
+ {0x00, 0xa0, 0x78, 0xaa},
+ {0x00, 0x55, 0x90, 0xaa},
+ {0x00, 0xa1, 0x03, 0xaa},
+ {0x00, 0xa6, 0xe0, 0xaa},
+ {0x00, 0xa7, 0xd8, 0xaa},
+ {0x00, 0xa8, 0xf0, 0xaa},
+ {0x00, 0xa9, 0x90, 0xaa},
+ {0x00, 0xaa, 0x14, 0xaa},
+ {0x00, 0x13, 0xe5, 0xaa},
+ {0x00, 0x0e, 0x61, 0xaa},
+ {0x00, 0x0f, 0x4b, 0xaa},
+ {0x00, 0x16, 0x02, 0xaa},
{0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
{0x00, 0x21, 0x02, 0xaa},
- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa},
- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa},
- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa},
- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa},
- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa},
- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa},
- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa},
- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa},
- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa},
- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa},
- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa},
- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa},
- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa},
- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa},
- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa},
- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa},
- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa},
- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa},
- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa},
- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa},
- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa},
- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa},
- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa},
- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa},
- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa},
- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa},
- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa},
- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa},
- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa},
- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa},
- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa},
- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa},
- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa},
- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa},
- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa},
- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa},
- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa},
- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa},
- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa},
- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa},
- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa},
- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa},
- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa},
- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa},
- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa},
- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa},
- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa},
- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0x22, 0x91, 0xaa},
+ {0x00, 0x29, 0x07, 0xaa},
+ {0x00, 0x33, 0x0b, 0xaa},
+ {0x00, 0x35, 0x0b, 0xaa},
+ {0x00, 0x37, 0x1d, 0xaa},
+ {0x00, 0x38, 0x71, 0xaa},
+ {0x00, 0x39, 0x2a, 0xaa},
+ {0x00, 0x3c, 0x78, 0xaa},
+ {0x00, 0x4d, 0x40, 0xaa},
+ {0x00, 0x4e, 0x20, 0xaa},
+ {0x00, 0x74, 0x19, 0xaa},
+ {0x00, 0x8d, 0x4f, 0xaa},
+ {0x00, 0x8e, 0x00, 0xaa},
+ {0x00, 0x8f, 0x00, 0xaa},
+ {0x00, 0x90, 0x00, 0xaa},
+ {0x00, 0x91, 0x00, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x9a, 0x80, 0xaa},
+ {0x00, 0xb0, 0x84, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0xb2, 0x0e, 0xaa},
+ {0x00, 0xb3, 0x82, 0xaa},
+ {0x00, 0xb8, 0x0a, 0xaa},
+ {0x00, 0x43, 0x14, 0xaa},
+ {0x00, 0x44, 0xf0, 0xaa},
+ {0x00, 0x45, 0x45, 0xaa},
+ {0x00, 0x46, 0x63, 0xaa},
+ {0x00, 0x47, 0x2d, 0xaa},
+ {0x00, 0x48, 0x46, 0xaa},
+ {0x00, 0x59, 0x88, 0xaa},
+ {0x00, 0x5a, 0xa0, 0xaa},
+ {0x00, 0x5b, 0xc6, 0xaa},
+ {0x00, 0x5c, 0x7d, 0xaa},
+ {0x00, 0x5d, 0x5f, 0xaa},
+ {0x00, 0x5e, 0x19, 0xaa},
+ {0x00, 0x6c, 0x0a, 0xaa},
+ {0x00, 0x6d, 0x55, 0xaa},
+ {0x00, 0x6e, 0x11, 0xaa},
+ {0x00, 0x6f, 0x9e, 0xaa},
+ {0x00, 0x69, 0x00, 0xaa},
+ {0x00, 0x6a, 0x40, 0xaa},
+ {0x00, 0x01, 0x40, 0xaa},
+ {0x00, 0x02, 0x40, 0xaa},
+ {0x00, 0x13, 0xe7, 0xaa},
+ {0x00, 0x5f, 0xf0, 0xaa},
+ {0x00, 0x60, 0xf0, 0xaa},
+ {0x00, 0x61, 0xf0, 0xaa},
+ {0x00, 0x27, 0xa0, 0xaa},
+ {0x00, 0x28, 0x80, 0xaa},
+ {0x00, 0x2c, 0x90, 0xaa},
+ {0x00, 0x4f, 0x66, 0xaa},
+ {0x00, 0x50, 0x66, 0xaa},
+ {0x00, 0x51, 0x00, 0xaa},
+ {0x00, 0x52, 0x22, 0xaa},
+ {0x00, 0x53, 0x5e, 0xaa},
+ {0x00, 0x54, 0x80, 0xaa},
+ {0x00, 0x58, 0x9e, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x85, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x0a, 0xaa},
+ {0x00, 0x3d, 0x88, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0x00, 0x62, 0x30, 0xaa},
+ {0x00, 0x63, 0x30, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x0b, 0xaa},
+ {0x00, 0x65, 0x00, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x56, 0x50, 0xaa},
+ {0x00, 0x34, 0x11, 0xaa},
+ {0x00, 0xa4, 0x88, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x97, 0x30, 0xaa},
+ {0x00, 0x98, 0x20, 0xaa},
+ {0x00, 0x99, 0x30, 0xaa},
+ {0x00, 0x9a, 0x84, 0xaa},
+ {0x00, 0x9b, 0x29, 0xaa},
+ {0x00, 0x9c, 0x03, 0xaa},
+ {0x00, 0x78, 0x04, 0xaa},
+ {0x00, 0x79, 0x01, 0xaa},
+ {0x00, 0xc8, 0xf0, 0xaa},
+ {0x00, 0x79, 0x0f, 0xaa},
+ {0x00, 0xc8, 0x00, 0xaa},
+ {0x00, 0x79, 0x10, 0xaa},
+ {0x00, 0xc8, 0x7e, 0xaa},
+ {0x00, 0x79, 0x0a, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x0b, 0xaa},
+ {0x00, 0xc8, 0x01, 0xaa},
+ {0x00, 0x79, 0x0c, 0xaa},
+ {0x00, 0xc8, 0x0f, 0xaa},
+ {0x00, 0x79, 0x0d, 0xaa},
+ {0x00, 0xc8, 0x20, 0xaa},
+ {0x00, 0x79, 0x09, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x02, 0xaa},
+ {0x00, 0xc8, 0xc0, 0xaa},
+ {0x00, 0x79, 0x03, 0xaa},
+ {0x00, 0xc8, 0x40, 0xaa},
+ {0x00, 0x79, 0x05, 0xaa},
+ {0x00, 0xc8, 0x30, 0xaa},
+ {0x00, 0x79, 0x26, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x17, 0x14, 0xaa},
+ {0x00, 0x18, 0x02, 0xaa},
+ {0x00, 0x32, 0x92, 0xaa},
+ {0x00, 0x19, 0x02, 0xaa},
+ {0x00, 0x1a, 0x7a, 0xaa},
+ {0x00, 0x03, 0x0a, 0xaa},
+ {0x00, 0x0c, 0x00, 0xaa},
+ {0x00, 0x3e, 0x00, 0xaa},
+ {0x00, 0x70, 0x3a, 0xaa},
+ {0x00, 0x71, 0x35, 0xaa},
+ {0x00, 0x72, 0x11, 0xaa},
+ {0x00, 0x73, 0xf0, 0xaa},
+ {0x00, 0xa2, 0x02, 0xaa},
+ {0x00, 0xb1, 0x00, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
{0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
{0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa},
- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc},
- {0xb6, 0x03, 0x02, 0xcc}, {0xb6, 0x02, 0x80, 0xcc},
- {0xb6, 0x05, 0x01, 0xcc}, {0xb6, 0x04, 0xe0, 0xcc},
- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x13, 0xcc},
- {0xb6, 0x18, 0x02, 0xcc}, {0xb6, 0x17, 0x58, 0xcc},
- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc},
- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc},
- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc},
- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc},
+ {0x00, 0x24, 0x80, 0xaa},
+ {0x00, 0x25, 0x74, 0xaa},
+ {0x00, 0x26, 0xd3, 0xaa},
+ {0x00, 0x0d, 0x00, 0xaa},
+ {0x00, 0x14, 0x18, 0xaa},
+ {0x00, 0x9d, 0x99, 0xaa},
+ {0x00, 0x9e, 0x7f, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x06, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x07, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x00, 0xaa},
+ {0x00, 0x3d, 0xc2, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0xbf, 0xc0, 0x26, 0xcc},
+ {0xbf, 0xc1, 0x02, 0xcc},
+ {0xbf, 0xcc, 0x04, 0xcc},
+ {0xb3, 0x5c, 0x01, 0xcc},
+ {0xb3, 0x01, 0x45, 0xcc},
{0x00, 0x77, 0x05, 0xaa},
{},
};
-static const u8 ov7670_initQVGA_JPG[][4] = {
- {0xb3, 0x01, 0x05, 0xcc}, {0x00, 0x00, 0x30, 0xdd},
- {0xb0, 0x03, 0x19, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb0, 0x04, 0x02, 0xcc}, {0x00, 0x00, 0x10, 0xdd},
- {0xb3, 0x00, 0x66, 0xcc}, {0xb3, 0x00, 0x67, 0xcc},
- {0xb3, 0x35, 0xa1, 0xcc}, {0xb3, 0x34, 0x01, 0xcc},
- {0xb3, 0x05, 0x01, 0xcc}, {0xb3, 0x06, 0x01, 0xcc},
- {0xb3, 0x08, 0x01, 0xcc}, {0xb3, 0x09, 0x0c, 0xcc},
- {0xb3, 0x02, 0x02, 0xcc}, {0xb3, 0x03, 0x1f, 0xcc},
- {0xb3, 0x14, 0x00, 0xcc}, {0xb3, 0x15, 0x00, 0xcc},
- {0xb3, 0x16, 0x02, 0xcc}, {0xb3, 0x17, 0x7f, 0xcc},
- {0xb3, 0x04, 0x05, 0xcc}, {0xb3, 0x20, 0x00, 0xcc},
- {0xb3, 0x21, 0x00, 0xcc}, {0xb3, 0x22, 0x01, 0xcc},
- {0xb3, 0x23, 0xe0, 0xcc}, {0xbc, 0x00, 0xd1, 0xcc},
- {0xbc, 0x01, 0x01, 0xcc}, {0x00, 0x12, 0x80, 0xaa},
- {0x00, 0x00, 0x20, 0xdd}, {0x00, 0x12, 0x00, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x6b, 0x0a, 0xaa},
- {0x00, 0x3a, 0x04, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x7a, 0x29, 0xaa},
- {0x00, 0x7b, 0x0e, 0xaa}, {0x00, 0x7c, 0x1a, 0xaa},
- {0x00, 0x7d, 0x31, 0xaa}, {0x00, 0x7e, 0x53, 0xaa},
- {0x00, 0x7f, 0x60, 0xaa}, {0x00, 0x80, 0x6b, 0xaa},
- {0x00, 0x81, 0x73, 0xaa}, {0x00, 0x82, 0x7b, 0xaa},
- {0x00, 0x83, 0x82, 0xaa}, {0x00, 0x84, 0x89, 0xaa},
- {0x00, 0x85, 0x96, 0xaa}, {0x00, 0x86, 0xa1, 0xaa},
- {0x00, 0x87, 0xb7, 0xaa}, {0x00, 0x88, 0xcc, 0xaa},
- {0x00, 0x89, 0xe1, 0xaa}, {0x00, 0x13, 0xe0, 0xaa},
- {0x00, 0x00, 0x00, 0xaa}, {0x00, 0x10, 0x00, 0xaa},
- {0x00, 0x0d, 0x40, 0xaa}, {0x00, 0x14, 0x28, 0xaa},
- {0x00, 0xa5, 0x05, 0xaa}, {0x00, 0xab, 0x07, 0xaa},
- {0x00, 0x24, 0x95, 0xaa}, {0x00, 0x25, 0x33, 0xaa},
- {0x00, 0x26, 0xe3, 0xaa}, {0x00, 0x9f, 0x88, 0xaa},
- {0x00, 0xa0, 0x78, 0xaa}, {0x00, 0x55, 0x90, 0xaa},
- {0x00, 0xa1, 0x03, 0xaa}, {0x00, 0xa6, 0xe0, 0xaa},
- {0x00, 0xa7, 0xd8, 0xaa}, {0x00, 0xa8, 0xf0, 0xaa},
- {0x00, 0xa9, 0x90, 0xaa}, {0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x13, 0xe5, 0xaa}, {0x00, 0x0e, 0x61, 0xaa},
- {0x00, 0x0f, 0x4b, 0xaa}, {0x00, 0x16, 0x02, 0xaa},
+static const u8 ov7670_InitQVGA[][4] = {
+ {0xb3, 0x01, 0x05, 0xcc},
+ {0x00, 0x00, 0x30, 0xdd},
+ {0xb0, 0x03, 0x19, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb0, 0x04, 0x02, 0xcc},
+ {0x00, 0x00, 0x10, 0xdd},
+ {0xb3, 0x00, 0x66, 0xcc},
+ {0xb3, 0x00, 0x67, 0xcc},
+ {0xb0, 0x16, 0x01, 0xcc},
+ {0xb3, 0x35, 0xa1, 0xcc}, /* i2c add: 21 */
+ {0xb3, 0x34, 0x01, 0xcc},
+ {0xb3, 0x05, 0x01, 0xcc},
+ {0xb3, 0x06, 0x01, 0xcc},
+ {0xb3, 0x08, 0x01, 0xcc},
+ {0xb3, 0x09, 0x0c, 0xcc},
+ {0xb3, 0x02, 0x02, 0xcc},
+ {0xb3, 0x03, 0x1f, 0xcc},
+ {0xb3, 0x14, 0x00, 0xcc},
+ {0xb3, 0x15, 0x00, 0xcc},
+ {0xb3, 0x16, 0x02, 0xcc},
+ {0xb3, 0x17, 0x7f, 0xcc},
+ {0xb3, 0x04, 0x05, 0xcc},
+ {0xb3, 0x20, 0x00, 0xcc},
+ {0xb3, 0x21, 0x00, 0xcc},
+ {0xb3, 0x22, 0x01, 0xcc},
+ {0xb3, 0x23, 0xe0, 0xcc},
+ {0xbc, 0x00, 0xd1, 0xcc},
+ {0xbc, 0x01, 0x01, 0xcc},
+ {0x00, 0x12, 0x80, 0xaa},
+ {0x00, 0x00, 0x20, 0xdd},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x6b, 0x0a, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x7a, 0x29, 0xaa},
+ {0x00, 0x7b, 0x0e, 0xaa},
+ {0x00, 0x7c, 0x1a, 0xaa},
+ {0x00, 0x7d, 0x31, 0xaa},
+ {0x00, 0x7e, 0x53, 0xaa},
+ {0x00, 0x7f, 0x60, 0xaa},
+ {0x00, 0x80, 0x6b, 0xaa},
+ {0x00, 0x81, 0x73, 0xaa},
+ {0x00, 0x82, 0x7b, 0xaa},
+ {0x00, 0x83, 0x82, 0xaa},
+ {0x00, 0x84, 0x89, 0xaa},
+ {0x00, 0x85, 0x96, 0xaa},
+ {0x00, 0x86, 0xa1, 0xaa},
+ {0x00, 0x87, 0xb7, 0xaa},
+ {0x00, 0x88, 0xcc, 0xaa},
+ {0x00, 0x89, 0xe1, 0xaa},
+ {0x00, 0x13, 0xe0, 0xaa},
+ {0x00, 0x00, 0x00, 0xaa},
+ {0x00, 0x10, 0x00, 0xaa},
+ {0x00, 0x0d, 0x40, 0xaa},
+ {0x00, 0x14, 0x28, 0xaa},
+ {0x00, 0xa5, 0x05, 0xaa},
+ {0x00, 0xab, 0x07, 0xaa},
+ {0x00, 0x24, 0x95, 0xaa},
+ {0x00, 0x25, 0x33, 0xaa},
+ {0x00, 0x26, 0xe3, 0xaa},
+ {0x00, 0x9f, 0x88, 0xaa},
+ {0x00, 0xa0, 0x78, 0xaa},
+ {0x00, 0x55, 0x90, 0xaa},
+ {0x00, 0xa1, 0x03, 0xaa},
+ {0x00, 0xa6, 0xe0, 0xaa},
+ {0x00, 0xa7, 0xd8, 0xaa},
+ {0x00, 0xa8, 0xf0, 0xaa},
+ {0x00, 0xa9, 0x90, 0xaa},
+ {0x00, 0xaa, 0x14, 0xaa},
+ {0x00, 0x13, 0xe5, 0xaa},
+ {0x00, 0x0e, 0x61, 0xaa},
+ {0x00, 0x0f, 0x4b, 0xaa},
+ {0x00, 0x16, 0x02, 0xaa},
{0x00, 0x1e, 0x07, 0xaa}, /* MVFP */
{0x00, 0x21, 0x02, 0xaa},
- {0x00, 0x22, 0x91, 0xaa}, {0x00, 0x29, 0x07, 0xaa},
- {0x00, 0x33, 0x0b, 0xaa}, {0x00, 0x35, 0x0b, 0xaa},
- {0x00, 0x37, 0x1d, 0xaa}, {0x00, 0x38, 0x71, 0xaa},
- {0x00, 0x39, 0x2a, 0xaa}, {0x00, 0x3c, 0x78, 0xaa},
- {0x00, 0x4d, 0x40, 0xaa}, {0x00, 0x4e, 0x20, 0xaa},
- {0x00, 0x74, 0x19, 0xaa}, {0x00, 0x8d, 0x4f, 0xaa},
- {0x00, 0x8e, 0x00, 0xaa}, {0x00, 0x8f, 0x00, 0xaa},
- {0x00, 0x90, 0x00, 0xaa}, {0x00, 0x91, 0x00, 0xaa},
- {0x00, 0x96, 0x00, 0xaa}, {0x00, 0x9a, 0x80, 0xaa},
- {0x00, 0xb0, 0x84, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
- {0x00, 0xb2, 0x0e, 0xaa}, {0x00, 0xb3, 0x82, 0xaa},
- {0x00, 0xb8, 0x0a, 0xaa}, {0x00, 0x43, 0x14, 0xaa},
- {0x00, 0x44, 0xf0, 0xaa}, {0x00, 0x45, 0x45, 0xaa},
- {0x00, 0x46, 0x63, 0xaa}, {0x00, 0x47, 0x2d, 0xaa},
- {0x00, 0x48, 0x46, 0xaa}, {0x00, 0x59, 0x88, 0xaa},
- {0x00, 0x5a, 0xa0, 0xaa}, {0x00, 0x5b, 0xc6, 0xaa},
- {0x00, 0x5c, 0x7d, 0xaa}, {0x00, 0x5d, 0x5f, 0xaa},
- {0x00, 0x5e, 0x19, 0xaa}, {0x00, 0x6c, 0x0a, 0xaa},
- {0x00, 0x6d, 0x55, 0xaa}, {0x00, 0x6e, 0x11, 0xaa},
- {0x00, 0x6f, 0x9e, 0xaa}, {0x00, 0x69, 0x00, 0xaa},
- {0x00, 0x6a, 0x40, 0xaa}, {0x00, 0x01, 0x40, 0xaa},
- {0x00, 0x02, 0x40, 0xaa}, {0x00, 0x13, 0xe7, 0xaa},
- {0x00, 0x5f, 0xf0, 0xaa}, {0x00, 0x60, 0xf0, 0xaa},
- {0x00, 0x61, 0xf0, 0xaa}, {0x00, 0x27, 0xa0, 0xaa},
- {0x00, 0x28, 0x80, 0xaa}, {0x00, 0x2c, 0x90, 0xaa},
- {0x00, 0x4f, 0x66, 0xaa}, {0x00, 0x50, 0x66, 0xaa},
- {0x00, 0x51, 0x00, 0xaa}, {0x00, 0x52, 0x22, 0xaa},
- {0x00, 0x53, 0x5e, 0xaa}, {0x00, 0x54, 0x80, 0xaa},
- {0x00, 0x58, 0x9e, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x85, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x0a, 0xaa}, {0x00, 0x3d, 0x88, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0x00, 0x62, 0x30, 0xaa},
- {0x00, 0x63, 0x30, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x0b, 0xaa},
- {0x00, 0x65, 0x00, 0xaa}, {0x00, 0x66, 0x05, 0xaa},
- {0x00, 0x56, 0x50, 0xaa}, {0x00, 0x34, 0x11, 0xaa},
- {0x00, 0xa4, 0x88, 0xaa}, {0x00, 0x96, 0x00, 0xaa},
- {0x00, 0x97, 0x30, 0xaa}, {0x00, 0x98, 0x20, 0xaa},
- {0x00, 0x99, 0x30, 0xaa}, {0x00, 0x9a, 0x84, 0xaa},
- {0x00, 0x9b, 0x29, 0xaa}, {0x00, 0x9c, 0x03, 0xaa},
- {0x00, 0x78, 0x04, 0xaa}, {0x00, 0x79, 0x01, 0xaa},
- {0x00, 0xc8, 0xf0, 0xaa}, {0x00, 0x79, 0x0f, 0xaa},
- {0x00, 0xc8, 0x00, 0xaa}, {0x00, 0x79, 0x10, 0xaa},
- {0x00, 0xc8, 0x7e, 0xaa}, {0x00, 0x79, 0x0a, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x0b, 0xaa},
- {0x00, 0xc8, 0x01, 0xaa}, {0x00, 0x79, 0x0c, 0xaa},
- {0x00, 0xc8, 0x0f, 0xaa}, {0x00, 0x79, 0x0d, 0xaa},
- {0x00, 0xc8, 0x20, 0xaa}, {0x00, 0x79, 0x09, 0xaa},
- {0x00, 0xc8, 0x80, 0xaa}, {0x00, 0x79, 0x02, 0xaa},
- {0x00, 0xc8, 0xc0, 0xaa}, {0x00, 0x79, 0x03, 0xaa},
- {0x00, 0xc8, 0x40, 0xaa}, {0x00, 0x79, 0x05, 0xaa},
- {0x00, 0xc8, 0x30, 0xaa}, {0x00, 0x79, 0x26, 0xaa},
- {0x00, 0x11, 0x40, 0xaa}, {0x00, 0x3a, 0x04, 0xaa},
- {0x00, 0x12, 0x00, 0xaa}, {0x00, 0x40, 0xc0, 0xaa},
- {0x00, 0x8c, 0x00, 0xaa}, {0x00, 0x17, 0x14, 0xaa},
- {0x00, 0x18, 0x02, 0xaa}, {0x00, 0x32, 0x92, 0xaa},
- {0x00, 0x19, 0x02, 0xaa}, {0x00, 0x1a, 0x7a, 0xaa},
- {0x00, 0x03, 0x0a, 0xaa}, {0x00, 0x0c, 0x00, 0xaa},
- {0x00, 0x3e, 0x00, 0xaa}, {0x00, 0x70, 0x3a, 0xaa},
- {0x00, 0x71, 0x35, 0xaa}, {0x00, 0x72, 0x11, 0xaa},
- {0x00, 0x73, 0xf0, 0xaa}, {0x00, 0xa2, 0x02, 0xaa},
- {0x00, 0xb1, 0x00, 0xaa}, {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0x22, 0x91, 0xaa},
+ {0x00, 0x29, 0x07, 0xaa},
+ {0x00, 0x33, 0x0b, 0xaa},
+ {0x00, 0x35, 0x0b, 0xaa},
+ {0x00, 0x37, 0x1d, 0xaa},
+ {0x00, 0x38, 0x71, 0xaa},
+ {0x00, 0x39, 0x2a, 0xaa},
+ {0x00, 0x3c, 0x78, 0xaa},
+ {0x00, 0x4d, 0x40, 0xaa},
+ {0x00, 0x4e, 0x20, 0xaa},
+ {0x00, 0x74, 0x19, 0xaa},
+ {0x00, 0x8d, 0x4f, 0xaa},
+ {0x00, 0x8e, 0x00, 0xaa},
+ {0x00, 0x8f, 0x00, 0xaa},
+ {0x00, 0x90, 0x00, 0xaa},
+ {0x00, 0x91, 0x00, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x9a, 0x80, 0xaa},
+ {0x00, 0xb0, 0x84, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
+ {0x00, 0xb2, 0x0e, 0xaa},
+ {0x00, 0xb3, 0x82, 0xaa},
+ {0x00, 0xb8, 0x0a, 0xaa},
+ {0x00, 0x43, 0x14, 0xaa},
+ {0x00, 0x44, 0xf0, 0xaa},
+ {0x00, 0x45, 0x45, 0xaa},
+ {0x00, 0x46, 0x63, 0xaa},
+ {0x00, 0x47, 0x2d, 0xaa},
+ {0x00, 0x48, 0x46, 0xaa},
+ {0x00, 0x59, 0x88, 0xaa},
+ {0x00, 0x5a, 0xa0, 0xaa},
+ {0x00, 0x5b, 0xc6, 0xaa},
+ {0x00, 0x5c, 0x7d, 0xaa},
+ {0x00, 0x5d, 0x5f, 0xaa},
+ {0x00, 0x5e, 0x19, 0xaa},
+ {0x00, 0x6c, 0x0a, 0xaa},
+ {0x00, 0x6d, 0x55, 0xaa},
+ {0x00, 0x6e, 0x11, 0xaa},
+ {0x00, 0x6f, 0x9e, 0xaa},
+ {0x00, 0x69, 0x00, 0xaa},
+ {0x00, 0x6a, 0x40, 0xaa},
+ {0x00, 0x01, 0x40, 0xaa},
+ {0x00, 0x02, 0x40, 0xaa},
+ {0x00, 0x13, 0xe7, 0xaa},
+ {0x00, 0x5f, 0xf0, 0xaa},
+ {0x00, 0x60, 0xf0, 0xaa},
+ {0x00, 0x61, 0xf0, 0xaa},
+ {0x00, 0x27, 0xa0, 0xaa},
+ {0x00, 0x28, 0x80, 0xaa},
+ {0x00, 0x2c, 0x90, 0xaa},
+ {0x00, 0x4f, 0x66, 0xaa},
+ {0x00, 0x50, 0x66, 0xaa},
+ {0x00, 0x51, 0x00, 0xaa},
+ {0x00, 0x52, 0x22, 0xaa},
+ {0x00, 0x53, 0x5e, 0xaa},
+ {0x00, 0x54, 0x80, 0xaa},
+ {0x00, 0x58, 0x9e, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x85, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x0a, 0xaa},
+ {0x00, 0x3d, 0x88, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0x00, 0x62, 0x30, 0xaa},
+ {0x00, 0x63, 0x30, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x0b, 0xaa},
+ {0x00, 0x65, 0x00, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x56, 0x50, 0xaa},
+ {0x00, 0x34, 0x11, 0xaa},
+ {0x00, 0xa4, 0x88, 0xaa},
+ {0x00, 0x96, 0x00, 0xaa},
+ {0x00, 0x97, 0x30, 0xaa},
+ {0x00, 0x98, 0x20, 0xaa},
+ {0x00, 0x99, 0x30, 0xaa},
+ {0x00, 0x9a, 0x84, 0xaa},
+ {0x00, 0x9b, 0x29, 0xaa},
+ {0x00, 0x9c, 0x03, 0xaa},
+ {0x00, 0x78, 0x04, 0xaa},
+ {0x00, 0x79, 0x01, 0xaa},
+ {0x00, 0xc8, 0xf0, 0xaa},
+ {0x00, 0x79, 0x0f, 0xaa},
+ {0x00, 0xc8, 0x00, 0xaa},
+ {0x00, 0x79, 0x10, 0xaa},
+ {0x00, 0xc8, 0x7e, 0xaa},
+ {0x00, 0x79, 0x0a, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x0b, 0xaa},
+ {0x00, 0xc8, 0x01, 0xaa},
+ {0x00, 0x79, 0x0c, 0xaa},
+ {0x00, 0xc8, 0x0f, 0xaa},
+ {0x00, 0x79, 0x0d, 0xaa},
+ {0x00, 0xc8, 0x20, 0xaa},
+ {0x00, 0x79, 0x09, 0xaa},
+ {0x00, 0xc8, 0x80, 0xaa},
+ {0x00, 0x79, 0x02, 0xaa},
+ {0x00, 0xc8, 0xc0, 0xaa},
+ {0x00, 0x79, 0x03, 0xaa},
+ {0x00, 0xc8, 0x40, 0xaa},
+ {0x00, 0x79, 0x05, 0xaa},
+ {0x00, 0xc8, 0x30, 0xaa},
+ {0x00, 0x79, 0x26, 0xaa},
+ {0x00, 0x11, 0x40, 0xaa},
+ {0x00, 0x3a, 0x04, 0xaa},
+ {0x00, 0x12, 0x00, 0xaa},
+ {0x00, 0x40, 0xc0, 0xaa},
+ {0x00, 0x8c, 0x00, 0xaa},
+ {0x00, 0x17, 0x14, 0xaa},
+ {0x00, 0x18, 0x02, 0xaa},
+ {0x00, 0x32, 0x92, 0xaa},
+ {0x00, 0x19, 0x02, 0xaa},
+ {0x00, 0x1a, 0x7a, 0xaa},
+ {0x00, 0x03, 0x0a, 0xaa},
+ {0x00, 0x0c, 0x00, 0xaa},
+ {0x00, 0x3e, 0x00, 0xaa},
+ {0x00, 0x70, 0x3a, 0xaa},
+ {0x00, 0x71, 0x35, 0xaa},
+ {0x00, 0x72, 0x11, 0xaa},
+ {0x00, 0x73, 0xf0, 0xaa},
+ {0x00, 0xa2, 0x02, 0xaa},
+ {0x00, 0xb1, 0x00, 0xaa},
+ {0x00, 0xb1, 0x0c, 0xaa},
{0x00, 0x1e, 0x37, 0xaa}, /* MVFP */
{0x00, 0xaa, 0x14, 0xaa},
- {0x00, 0x24, 0x80, 0xaa}, {0x00, 0x25, 0x74, 0xaa},
- {0x00, 0x26, 0xd3, 0xaa}, {0x00, 0x0d, 0x00, 0xaa},
- {0x00, 0x14, 0x18, 0xaa}, {0x00, 0x9d, 0x99, 0xaa},
- {0x00, 0x9e, 0x7f, 0xaa}, {0x00, 0x64, 0x08, 0xaa},
- {0x00, 0x94, 0x07, 0xaa}, {0x00, 0x95, 0x06, 0xaa},
- {0x00, 0x66, 0x05, 0xaa}, {0x00, 0x41, 0x08, 0xaa},
- {0x00, 0x3f, 0x00, 0xaa}, {0x00, 0x75, 0x07, 0xaa},
- {0x00, 0x76, 0xe1, 0xaa}, {0x00, 0x4c, 0x00, 0xaa},
- {0x00, 0x77, 0x00, 0xaa}, {0x00, 0x3d, 0xc2, 0xaa},
- {0x00, 0x4b, 0x09, 0xaa}, {0x00, 0xc9, 0x60, 0xaa},
- {0x00, 0x41, 0x38, 0xaa}, {0xb6, 0x00, 0x00, 0xcc},
- {0xb6, 0x03, 0x01, 0xcc}, {0xb6, 0x02, 0x40, 0xcc},
- {0xb6, 0x05, 0x00, 0xcc}, {0xb6, 0x04, 0xf0, 0xcc},
- {0xb6, 0x12, 0xf8, 0xcc}, {0xb6, 0x13, 0x21, 0xcc},
- {0xb6, 0x18, 0x00, 0xcc}, {0xb6, 0x17, 0x96, 0xcc},
- {0xb6, 0x16, 0x00, 0xcc}, {0xb6, 0x22, 0x12, 0xcc},
- {0xb6, 0x23, 0x0b, 0xcc}, {0xbf, 0xc0, 0x39, 0xcc},
- {0xbf, 0xc1, 0x04, 0xcc}, {0xbf, 0xcc, 0x00, 0xcc},
- {0xbc, 0x02, 0x18, 0xcc}, {0xbc, 0x03, 0x50, 0xcc},
- {0xbc, 0x04, 0x18, 0xcc}, {0xbc, 0x05, 0x00, 0xcc},
- {0xbc, 0x06, 0x00, 0xcc}, {0xbc, 0x08, 0x30, 0xcc},
- {0xbc, 0x09, 0x40, 0xcc}, {0xbc, 0x0a, 0x10, 0xcc},
- {0xbc, 0x0b, 0x00, 0xcc}, {0xbc, 0x0c, 0x00, 0xcc},
- {0xb3, 0x5c, 0x01, 0xcc}, {0xb3, 0x01, 0x45, 0xcc},
- {0x00, 0x77, 0x05, 0xaa },
+ {0x00, 0x24, 0x80, 0xaa},
+ {0x00, 0x25, 0x74, 0xaa},
+ {0x00, 0x26, 0xd3, 0xaa},
+ {0x00, 0x0d, 0x00, 0xaa},
+ {0x00, 0x14, 0x18, 0xaa},
+ {0x00, 0x9d, 0x99, 0xaa},
+ {0x00, 0x9e, 0x7f, 0xaa},
+ {0x00, 0x64, 0x08, 0xaa},
+ {0x00, 0x94, 0x07, 0xaa},
+ {0x00, 0x95, 0x06, 0xaa},
+ {0x00, 0x66, 0x05, 0xaa},
+ {0x00, 0x41, 0x08, 0xaa},
+ {0x00, 0x3f, 0x00, 0xaa},
+ {0x00, 0x75, 0x07, 0xaa},
+ {0x00, 0x76, 0xe1, 0xaa},
+ {0x00, 0x4c, 0x00, 0xaa},
+ {0x00, 0x77, 0x00, 0xaa},
+ {0x00, 0x3d, 0xc2, 0xaa},
+ {0x00, 0x4b, 0x09, 0xaa},
+ {0x00, 0xc9, 0x60, 0xaa},
+ {0x00, 0x41, 0x38, 0xaa},
+ {0xbc, 0x02, 0x18, 0xcc},
+ {0xbc, 0x03, 0x50, 0xcc},
+ {0xbc, 0x04, 0x18, 0xcc},
+ {0xbc, 0x05, 0x00, 0xcc},
+ {0xbc, 0x06, 0x00, 0xcc},
+ {0xbc, 0x08, 0x30, 0xcc},
+ {0xbc, 0x09, 0x40, 0xcc},
+ {0xbc, 0x0a, 0x10, 0xcc},
+ {0xbc, 0x0b, 0x00, 0xcc},
+ {0xbc, 0x0c, 0x00, 0xcc},
+ {0xbf, 0xc0, 0x26, 0xcc},
+ {0xbf, 0xc1, 0x02, 0xcc},
+ {0xbf, 0xcc, 0x04, 0xcc},
+ {0xb3, 0x5c, 0x01, 0xcc},
+ {0xb3, 0x01, 0x45, 0xcc},
+ {0x00, 0x77, 0x05, 0xaa},
{},
};
@@ -3117,6 +3338,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = bi_mode;
cam->nmodes = ARRAY_SIZE(bi_mode);
break;
+ case SENSOR_OV7670:
+ cam->cam_mode = bi_mode;
+ cam->nmodes = ARRAY_SIZE(bi_mode) - 1;
+ break;
default:
cam->cam_mode = vc0323_mode;
cam->nmodes = ARRAY_SIZE(vc0323_mode) - 1;
@@ -3329,14 +3554,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
else
init = ov7660_initVGA_data; /* 640x480 */
break;
- case SENSOR_OV7670:
- /*GammaT = ov7660_gamma; */
- /*MatrixT = ov7660_matrix; */
- if (mode)
- init = ov7670_initQVGA_JPG; /* 320x240 */
- else
- init = ov7670_initVGA_JPG; /* 640x480 */
- break;
case SENSOR_MI0360:
GammaT = mi1320_gamma;
MatrixT = mi0360_matrix;
@@ -3373,6 +3590,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
MatrixT = mi1320_matrix;
init = mi1320_soc_init[mode];
break;
+ case SENSOR_OV7670:
+ init = mode == 1 ? ov7670_InitVGA : ov7670_InitQVGA;
+ break;
case SENSOR_PO3130NC:
GammaT = po3130_gamma;
MatrixT = po3130_matrix;
@@ -3426,7 +3646,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
sethvflip(gspca_dev);
setlightfreq(gspca_dev);
}
- if (sd->sensor == SENSOR_POxxxx) {
+ switch (sd->sensor) {
+ case SENSOR_OV7670:
+ reg_w(gspca_dev->dev, 0x87, 0xffff, 0xffff);
+ reg_w(gspca_dev->dev, 0x88, 0xff00, 0xf0f1);
+ reg_w(gspca_dev->dev, 0xa0, 0x0000, 0xbfff);
+ break;
+ case SENSOR_POxxxx:
setcolors(gspca_dev);
setbrightness(gspca_dev);
setcontrast(gspca_dev);
@@ -3435,6 +3661,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
msleep(80);
reg_w(gspca_dev->dev, 0x89, 0xffff, 0xfdff);
usb_exchange(gspca_dev, poxxxx_init_end_2);
+ break;
}
return 0;
}
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 7d7814c43f92..d02aa5c8472a 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -40,7 +40,6 @@ static int force_sensor = -1;
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
u8 contrast;
u8 gamma;
u8 autogain;
@@ -80,8 +79,6 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
@@ -94,21 +91,6 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 128
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
{
{
.id = V4L2_CID_CONTRAST,
@@ -150,7 +132,7 @@ static const struct ctrl sd_ctrls[] = {
.set = sd_setautogain,
.get = sd_getautogain,
},
-#define LIGHTFREQ_IDX 4
+#define LIGHTFREQ_IDX 3
{
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -6004,33 +5986,6 @@ static void setmatrix(struct gspca_dev *gspca_dev)
reg_w(gspca_dev->dev, matrix[i], 0x010a + i);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 brightness;
-
- switch (sd->sensor) {
- case SENSOR_GC0305:
- case SENSOR_OV7620:
- case SENSOR_PAS202B:
- case SENSOR_PO2030:
- return;
- }
-/*fixme: is it really write to 011d and 018d for all other sensors? */
- brightness = sd->brightness;
- reg_w(gspca_dev->dev, brightness, 0x011d);
- switch (sd->sensor) {
- case SENSOR_ADCM2700:
- case SENSOR_HV7131B:
- return;
- }
- if (brightness < 0x70)
- brightness += 0x10;
- else
- brightness = 0x80;
- reg_w(gspca_dev->dev, brightness, 0x018d);
-}
-
static void setsharpness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -6059,8 +6014,8 @@ static void setcontrast(struct gspca_dev *gspca_dev)
int g, i, k, adj, gp;
u8 gr[16];
static const u8 delta_tb[16] = /* delta for contrast */
- {0x15, 0x0d, 0x0a, 0x09, 0x08, 0x08, 0x08, 0x08,
- 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08};
+ {0x2c, 0x1a, 0x12, 0x0c, 0x0a, 0x06, 0x06, 0x06,
+ 0x04, 0x06, 0x04, 0x04, 0x03, 0x03, 0x02, 0x02};
static const u8 gamma_tb[6][16] = {
{0x00, 0x00, 0x03, 0x0d, 0x1b, 0x2e, 0x45, 0x5f,
0x79, 0x93, 0xab, 0xc1, 0xd4, 0xe5, 0xf3, 0xff},
@@ -6082,11 +6037,11 @@ static void setcontrast(struct gspca_dev *gspca_dev)
adj = 0;
gp = 0;
for (i = 0; i < 16; i++) {
- g = Tgamma[i] - delta_tb[i] * k / 128 - adj / 2;
+ g = Tgamma[i] - delta_tb[i] * k / 256 - adj / 2;
if (g > 0xff)
g = 0xff;
- else if (g <= 0)
- g = 1;
+ else if (g < 0)
+ g = 0;
reg_w(dev, g, 0x0120 + i); /* gamma */
if (k > 0)
adj--;
@@ -6203,13 +6158,13 @@ static int setlightfreq(struct gspca_dev *gspca_dev)
pas106b_50HZ, pas106b_50HZ,
pas106b_60HZ, pas106b_60HZ},
/* SENSOR_PAS202B 13 */
- {pas202b_NoFlikerScale, pas202b_NoFliker,
- pas202b_50HZScale, pas202b_50HZ,
- pas202b_60HZScale, pas202b_60HZ},
+ {pas202b_NoFliker, pas202b_NoFlikerScale,
+ pas202b_50HZ, pas202b_50HZScale,
+ pas202b_60HZ, pas202b_60HZScale},
/* SENSOR_PB0330 14 */
- {pb0330_NoFlikerScale, pb0330_NoFliker,
- pb0330_50HZScale, pb0330_50HZ,
- pb0330_60HZScale, pb0330_60HZ},
+ {pb0330_NoFliker, pb0330_NoFlikerScale,
+ pb0330_50HZ, pb0330_50HZScale,
+ pb0330_60HZ, pb0330_60HZScale},
/* SENSOR_PO2030 15 */
{po2030_NoFliker, po2030_NoFliker,
po2030_50HZ, po2030_50HZ,
@@ -6789,7 +6744,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(broken_vga_mode);
break;
}
- sd->brightness = BRIGHTNESS_DEF;
sd->contrast = CONTRAST_DEF;
sd->gamma = gamma[sd->sensor];
sd->autogain = AUTOGAIN_DEF;
@@ -6797,12 +6751,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->quality = QUALITY_DEF;
switch (sd->sensor) {
- case SENSOR_GC0305:
- case SENSOR_OV7620:
- case SENSOR_PAS202B:
- case SENSOR_PO2030:
- gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX);
- break;
case SENSOR_HV7131B:
case SENSOR_HV7131C:
case SENSOR_OV7630C:
@@ -6893,7 +6841,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
setmatrix(gspca_dev);
- setbrightness(gspca_dev);
switch (sd->sensor) {
case SENSOR_ADCM2700:
case SENSOR_OV7620:
@@ -7015,24 +6962,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 2fc9865fd486..830d47b05e1d 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -373,9 +373,6 @@ static int hdpvr_probe(struct usb_interface *interface,
}
#endif /* CONFIG_I2C */
- /* save our data pointer in this interface device */
- usb_set_intfdata(interface, dev);
-
/* let the user know what node this device is now attached to */
v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
video_device_node_name(dev->video_dev));
@@ -391,44 +388,24 @@ error:
static void hdpvr_disconnect(struct usb_interface *interface)
{
- struct hdpvr_device *dev;
-
- dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
+ struct hdpvr_device *dev = to_hdpvr_dev(usb_get_intfdata(interface));
+ v4l2_info(&dev->v4l2_dev, "device %s disconnected\n",
+ video_device_node_name(dev->video_dev));
/* prevent more I/O from starting and stop any ongoing */
mutex_lock(&dev->io_mutex);
dev->status = STATUS_DISCONNECTED;
- v4l2_device_disconnect(&dev->v4l2_dev);
- video_unregister_device(dev->video_dev);
wake_up_interruptible(&dev->wait_data);
wake_up_interruptible(&dev->wait_buffer);
mutex_unlock(&dev->io_mutex);
+ v4l2_device_disconnect(&dev->v4l2_dev);
msleep(100);
flush_workqueue(dev->workqueue);
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
- destroy_workqueue(dev->workqueue);
mutex_unlock(&dev->io_mutex);
-
- /* deregister I2C adapter */
-#ifdef CONFIG_I2C
- mutex_lock(&dev->i2c_mutex);
- if (dev->i2c_adapter)
- i2c_del_adapter(dev->i2c_adapter);
- kfree(dev->i2c_adapter);
- dev->i2c_adapter = NULL;
- mutex_unlock(&dev->i2c_mutex);
-#endif /* CONFIG_I2C */
-
+ video_unregister_device(dev->video_dev);
atomic_dec(&dev_nr);
-
- v4l2_info(&dev->v4l2_dev, "device %s disconnected\n",
- video_device_node_name(dev->video_dev));
-
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev->usbc_buf);
- kfree(dev);
}
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 196f82de48f0..7cfccfd1b870 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -92,8 +92,8 @@ static int hdpvr_free_queue(struct list_head *q)
buf = list_entry(p, struct hdpvr_buffer, buff_list);
urb = buf->urb;
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
tmp = p->next;
list_del(p);
@@ -143,8 +143,8 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count)
}
buf->urb = urb;
- mem = usb_buffer_alloc(dev->udev, dev->bulk_in_size, GFP_KERNEL,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(dev->udev, dev->bulk_in_size, GFP_KERNEL,
+ &urb->transfer_dma);
if (!mem) {
v4l2_err(&dev->v4l2_dev,
"cannot allocate usb transfer buffer\n");
@@ -1214,6 +1214,24 @@ static void hdpvr_device_release(struct video_device *vdev)
struct hdpvr_device *dev = video_get_drvdata(vdev);
hdpvr_delete(dev);
+ mutex_lock(&dev->io_mutex);
+ destroy_workqueue(dev->workqueue);
+ mutex_unlock(&dev->io_mutex);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ /* deregister I2C adapter */
+#ifdef CONFIG_I2C
+ mutex_lock(&dev->i2c_mutex);
+ if (dev->i2c_adapter)
+ i2c_del_adapter(dev->i2c_adapter);
+ kfree(dev->i2c_adapter);
+ dev->i2c_adapter = NULL;
+ mutex_unlock(&dev->i2c_mutex);
+#endif /* CONFIG_I2C */
+
+ kfree(dev->usbc_buf);
+ kfree(dev);
}
static const struct video_device hdpvr_video_template = {
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 49ae25d83d10..b0f046df3cd8 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -111,6 +111,11 @@ struct hdpvr_device {
u8 *usbc_buf;
};
+static inline struct hdpvr_device *to_hdpvr_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct hdpvr_device, v4l2_dev);
+}
+
/* buffer one bulk urb of data */
struct hdpvr_buffer {
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index da18d698e7f2..29d439742653 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -61,9 +61,9 @@ module_param(hauppauge, int, 0644); /* Choose Hauppauge remote */
MODULE_PARM_DESC(hauppauge, "Specify Hauppauge remote: 0=black, 1=grey (defaults to 0)");
-#define DEVNAME "ir-kbd-i2c"
+#define MODULE_NAME "ir-kbd-i2c"
#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG DEVNAME ": " fmt , ## arg)
+ printk(KERN_DEBUG MODULE_NAME ": " fmt , ## arg)
/* ----------------------------------------------------------------------- */
@@ -297,7 +297,7 @@ static void ir_work(struct work_struct *work)
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
const char *name = NULL;
u64 ir_type = 0;
struct IR_i2c *ir;
@@ -322,13 +322,13 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
name = "Pixelview";
ir->get_key = get_key_pixelview;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_empty_table;
+ ir_codes = RC_MAP_EMPTY;
break;
case 0x4b:
name = "PV951";
ir->get_key = get_key_pv951;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_pv951_table;
+ ir_codes = RC_MAP_PV951;
break;
case 0x18:
case 0x1f:
@@ -337,22 +337,22 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->get_key = get_key_haup;
ir_type = IR_TYPE_RC5;
if (hauppauge == 1) {
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
} else {
- ir_codes = &ir_codes_rc5_tv_table;
+ ir_codes = RC_MAP_RC5_TV;
}
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_empty_table;
+ ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
ir_type = IR_TYPE_RC5;
- ir_codes = &ir_codes_fusionhdtv_mce_table;
+ ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x0b:
case 0x47:
@@ -365,9 +365,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_RC5;
ir->get_key = get_key_haup_xvr;
if (hauppauge == 1) {
- ir_codes = &ir_codes_hauppauge_new_table;
+ ir_codes = RC_MAP_HAUPPAUGE_NEW;
} else {
- ir_codes = &ir_codes_rc5_tv_table;
+ ir_codes = RC_MAP_RC5_TV;
}
} else {
/* Handled by saa7134-input */
@@ -379,7 +379,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
ir_type = IR_TYPE_OTHER;
- ir_codes = &ir_codes_avermedia_cardbus_table;
+ ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
}
@@ -447,11 +447,11 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
input_dev->name = ir->name;
input_dev->phys = ir->phys;
- err = ir_input_register(ir->input, ir->ir_codes, NULL);
+ err = ir_input_register(ir->input, ir->ir_codes, NULL, MODULE_NAME);
if (err)
goto err_out_free;
- printk(DEVNAME ": %s detected at %s [%s]\n",
+ printk(MODULE_NAME ": %s detected at %s [%s]\n",
ir->input->name, ir->input->phys, adap->name);
/* start polling via eventd */
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 9a250548be4d..1b79475ca134 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1293,7 +1293,6 @@ int ivtv_init_on_first_open(struct ivtv *itv)
ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_stream, 1);
ivtv_init_mpeg_decoder(itv);
}
- ivtv_s_std(NULL, &fh, &itv->tuner_std);
/* On a cx23416 this seems to be able to enable DMA to the chip? */
if (!itv->has_cx23415)
@@ -1310,6 +1309,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
}
else
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
+
+ /* For cards with video out, this call needs interrupts enabled */
+ ivtv_s_std(NULL, &fh, &itv->tuner_std);
+
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 5028e31c564a..5b45fd2b2645 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -63,6 +63,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
#include <media/tuner.h>
#include <media/cx2341x.h>
#include <media/ir-kbd-i2c.h>
@@ -116,6 +117,9 @@
#define IVTV_REG_VPU (0x9058)
#define IVTV_REG_APU (0xA064)
+/* Other registers */
+#define IVTV_REG_DEC_LINE_FIELD (0x28C0)
+
/* debugging */
extern int ivtv_debug;
@@ -372,6 +376,7 @@ struct ivtv_stream {
};
struct ivtv_open_id {
+ struct v4l2_fh fh;
u32 open_id; /* unique ID for this file descriptor */
int type; /* stream type */
int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */
@@ -379,6 +384,11 @@ struct ivtv_open_id {
struct ivtv *itv;
};
+static inline struct ivtv_open_id *fh2id(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct ivtv_open_id, fh);
+}
+
struct yuv_frame_info
{
u32 update;
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index babcabd73c08..abf410943cc9 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -32,6 +32,7 @@
#include "ivtv-yuv.h"
#include "ivtv-ioctl.h"
#include "ivtv-cards.h"
+#include <media/v4l2-event.h>
#include <media/saa7115.h>
/* This function tries to claim the stream for a specific file descriptor.
@@ -506,7 +507,7 @@ int ivtv_start_capture(struct ivtv_open_id *id)
ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int rc;
@@ -541,7 +542,7 @@ int ivtv_start_decoding(struct ivtv_open_id *id, int speed)
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -711,19 +712,31 @@ retry:
unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
- poll_wait(filp, &s->waitq, wait);
- set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
- if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
- test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
- res = POLLPRI;
+ /* If there are subscribed events, then only use the new event
+ API instead of the old video.h based API. */
+ if (!list_empty(&id->fh.events->subscribed)) {
+ poll_wait(filp, &id->fh.events->wait, wait);
+ /* Turn off the old-style vsync events */
+ clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
+ if (v4l2_event_pending(&id->fh))
+ res = POLLPRI;
+ } else {
+ /* This is the old-style API which is here only for backwards
+ compatibility. */
+ poll_wait(filp, &s->waitq, wait);
+ set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
+ if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) ||
+ test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags))
+ res = POLLPRI;
+ }
/* Allow write if buffers are available for writing */
if (s->q_free.buffers)
@@ -733,7 +746,7 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
@@ -833,13 +846,16 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
int ivtv_v4l2_close(struct file *filp)
{
- struct ivtv_open_id *id = filp->private_data;
+ struct v4l2_fh *fh = filp->private_data;
+ struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
IVTV_DEBUG_FILE("close %s\n", s->name);
- v4l2_prio_close(&itv->prio, &id->prio);
+ v4l2_prio_close(&itv->prio, id->prio);
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
/* Easy case first: this stream was never claimed by us */
if (s->id != id->open_id) {
@@ -895,6 +911,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
{
struct ivtv *itv = s->itv;
struct ivtv_open_id *item;
+ int res = 0;
IVTV_DEBUG_FILE("open %s\n", s->name);
@@ -915,17 +932,27 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
}
/* Allocate memory */
- item = kmalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
+ item = kzalloc(sizeof(struct ivtv_open_id), GFP_KERNEL);
if (NULL == item) {
IVTV_DEBUG_WARN("nomem on v4l2 open\n");
return -ENOMEM;
}
+ v4l2_fh_init(&item->fh, s->vdev);
+ if (s->type == IVTV_DEC_STREAM_TYPE_YUV ||
+ s->type == IVTV_DEC_STREAM_TYPE_MPG) {
+ res = v4l2_event_alloc(&item->fh, 60);
+ }
+ if (res < 0) {
+ v4l2_fh_exit(&item->fh);
+ kfree(item);
+ return res;
+ }
item->itv = itv;
item->type = s->type;
v4l2_prio_open(&itv->prio, &item->prio);
item->open_id = itv->open_id++;
- filp->private_data = item;
+ filp->private_data = &item->fh;
if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
/* Try to claim this stream */
@@ -940,6 +967,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
/* switching to radio while capture is
in progress is not polite */
ivtv_release_stream(s);
+ v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
}
@@ -970,6 +998,7 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
itv->yuv_info.stream_size = 0;
}
+ v4l2_fh_add(&item->fh);
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 2ee03c2a1b58..a5b92d109c6c 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -193,7 +193,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
/* Our default information for ir-kbd-i2c.c to use */
switch (hw) {
case IVTV_HW_I2C_IR_RX_AVER:
- init_data->ir_codes = &ir_codes_avermedia_cardbus_table;
+ init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
init_data->type = IR_TYPE_OTHER;
@@ -202,14 +202,14 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
/* Default to old black remote */
- init_data->ir_codes = &ir_codes_rc5_tv_table;
+ init_data->ir_codes = RC_MAP_RC5_TV;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
init_data->type = IR_TYPE_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_RX_HAUP:
/* Default to grey remote */
- init_data->ir_codes = &ir_codes_hauppauge_new_table;
+ init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = IR_TYPE_RC5;
init_data->name = itv->card_name;
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 99f3c39a118b..fa9f0d958f96 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -35,6 +35,7 @@
#include <media/saa7127.h>
#include <media/tveeprom.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-event.h>
#include <linux/dvb/audio.h>
#include <linux/i2c-id.h>
@@ -391,7 +392,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
return 0;
}
- v4l2_subdev_call(itv->sd_video, video, g_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, g_sliced_fmt, vbifmt);
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
@@ -597,7 +598,7 @@ static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
return -EBUSY;
itv->vbi.sliced_in->service_set = 0;
itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
- v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &fmt->fmt.vbi);
return ivtv_g_fmt_vbi_cap(file, fh, fmt);
}
@@ -615,7 +616,7 @@ static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
+ v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, vbifmt);
memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in));
return 0;
}
@@ -1087,8 +1088,10 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
{
+ DEFINE_WAIT(wait);
struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
+ int f;
if ((*std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -1128,6 +1131,25 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
itv->is_out_60hz = itv->is_60hz;
itv->is_out_50hz = itv->is_50hz;
ivtv_call_all(itv, video, s_std_output, itv->std_out);
+
+ /*
+ * The next firmware call is time sensitive. Time it to
+ * avoid risk of a hard lock, by trying to ensure the call
+ * happens within the first 100 lines of the top field.
+ * Make 4 attempts to sync to the decoder before giving up.
+ */
+ for (f = 0; f < 4; f++) {
+ prepare_to_wait(&itv->vsync_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
+ break;
+ schedule_timeout(msecs_to_jiffies(25));
+ }
+ finish_wait(&itv->vsync_waitq, &wait);
+
+ if (f == 4)
+ IVTV_WARN("Mode change failed to sync to decoder\n");
+
ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
itv->main_rect.left = itv->main_rect.top = 0;
itv->main_rect.width = 720;
@@ -1431,6 +1453,18 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
return 0;
}
+static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_VSYNC:
+ case V4L2_EVENT_EOS:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return v4l2_event_subscribe(fh, sub);
+}
+
static int ivtv_log_status(struct file *file, void *fh)
{
struct ivtv *itv = ((struct ivtv_open_id *)fh)->itv;
@@ -1539,10 +1573,11 @@ static int ivtv_log_status(struct file *file, void *fh)
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
int nonblocking = filp->f_flags & O_NONBLOCK;
struct ivtv_stream *s = &itv->streams[id->type];
+ unsigned long iarg = (unsigned long)arg;
switch (cmd) {
case IVTV_IOC_DMA_FRAME: {
@@ -1724,6 +1759,33 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
break;
}
+ case VIDEO_SELECT_SOURCE:
+ IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
+ if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
+ return -EINVAL;
+ return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX);
+
+ case AUDIO_SET_MUTE:
+ IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
+ itv->speed_mute_audio = iarg;
+ return 0;
+
+ case AUDIO_CHANNEL_SELECT:
+ IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
+ if (iarg > AUDIO_STEREO_SWAPPED)
+ return -EINVAL;
+ itv->audio_stereo_mode = iarg;
+ ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
+ return 0;
+
+ case AUDIO_BILINGUAL_CHANNEL_SELECT:
+ IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
+ if (iarg > AUDIO_STEREO_SWAPPED)
+ return -EINVAL;
+ itv->audio_bilingual_mode = iarg;
+ ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
+ return 0;
+
default:
return -EINVAL;
}
@@ -1755,6 +1817,10 @@ static long ivtv_default(struct file *file, void *fh, int cmd, void *arg)
case VIDEO_CONTINUE:
case VIDEO_COMMAND:
case VIDEO_TRY_COMMAND:
+ case VIDEO_SELECT_SOURCE:
+ case AUDIO_SET_MUTE:
+ case AUDIO_CHANNEL_SELECT:
+ case AUDIO_BILINGUAL_CHANNEL_SELECT:
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
@@ -1767,42 +1833,9 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct video_device *vfd = video_devdata(filp);
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
long ret;
- /* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */
- switch (cmd) {
- case VIDEO_SELECT_SOURCE:
- IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n");
- if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
- return -EINVAL;
- return ivtv_passthrough_mode(itv, arg == VIDEO_SOURCE_DEMUX);
-
- case AUDIO_SET_MUTE:
- IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n");
- itv->speed_mute_audio = arg;
- return 0;
-
- case AUDIO_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
- if (arg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- itv->audio_stereo_mode = arg;
- ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
- return 0;
-
- case AUDIO_BILINGUAL_CHANNEL_SELECT:
- IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
- if (arg > AUDIO_STEREO_SWAPPED)
- return -EINVAL;
- itv->audio_bilingual_mode = arg;
- ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
- return 0;
-
- default:
- break;
- }
-
/* check priority */
switch (cmd) {
case VIDIOC_S_CTRL:
@@ -1817,8 +1850,9 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
case VIDIOC_S_AUDOUT:
case VIDIOC_S_EXT_CTRLS:
case VIDIOC_S_FBUF:
+ case VIDIOC_S_PRIORITY:
case VIDIOC_OVERLAY:
- ret = v4l2_prio_check(&itv->prio, &id->prio);
+ ret = v4l2_prio_check(&itv->prio, id->prio);
if (ret)
return ret;
}
@@ -1832,10 +1866,13 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
+ struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
long res;
+ /* DQEVENT can block, so this should not run with the serialize lock */
+ if (cmd == VIDIOC_DQEVENT)
+ return ivtv_serialized_ioctl(itv, filp, cmd, arg);
mutex_lock(&itv->serialize_lock);
res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
mutex_unlock(&itv->serialize_lock);
@@ -1906,6 +1943,8 @@ static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_g_ext_ctrls = ivtv_g_ext_ctrls,
.vidioc_s_ext_ctrls = ivtv_s_ext_ctrls,
.vidioc_try_ext_ctrls = ivtv_try_ext_ctrls,
+ .vidioc_subscribe_event = ivtv_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
void ivtv_set_funcs(struct video_device *vdev)
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 12d36ca91d53..fea1ec33b0df 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -25,6 +25,7 @@
#include "ivtv-mailbox.h"
#include "ivtv-vbi.h"
#include "ivtv-yuv.h"
+#include <media/v4l2-event.h>
#define DMA_MAGIC_COOKIE 0x000001fe
@@ -752,7 +753,7 @@ static void ivtv_irq_vsync(struct ivtv *itv)
* to determine the line being displayed and ensure we handle
* one vsync per frame.
*/
- unsigned int frame = read_reg(0x28c0) & 1;
+ unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
struct yuv_playback_info *yi = &itv->yuv_info;
int last_dma_frame = atomic_read(&yi->next_dma_frame);
struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
@@ -778,6 +779,14 @@ static void ivtv_irq_vsync(struct ivtv *itv)
}
}
if (frame != (itv->last_vsync_field & 1)) {
+ static const struct v4l2_event evtop = {
+ .type = V4L2_EVENT_VSYNC,
+ .u.vsync.field = V4L2_FIELD_TOP,
+ };
+ static const struct v4l2_event evbottom = {
+ .type = V4L2_EVENT_VSYNC,
+ .u.vsync.field = V4L2_FIELD_BOTTOM,
+ };
struct ivtv_stream *s = ivtv_get_output_stream(itv);
itv->last_vsync_field += 1;
@@ -791,10 +800,12 @@ static void ivtv_irq_vsync(struct ivtv *itv)
if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
wake_up(&itv->event_waitq);
+ if (s)
+ wake_up(&s->waitq);
}
+ if (s && s->vdev)
+ v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
wake_up(&itv->vsync_waitq);
- if (s)
- wake_up(&s->waitq);
/* Send VBI to saa7127 */
if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
@@ -852,9 +863,11 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
*/
if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
/* vsync is enabled, see if we're in a new field */
- if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
+ if ((itv->last_vsync_field & 1) !=
+ (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
/* New field, looks like we missed it */
- IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
+ IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
+ read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
vsync_force = 1;
}
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 1f9387f6ca24..de4288cc1889 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -42,6 +42,7 @@
#include "ivtv-yuv.h"
#include "ivtv-cards.h"
#include "ivtv-streams.h"
+#include <media/v4l2-event.h>
static const struct v4l2_file_operations ivtv_v4l2_enc_fops = {
.owner = THIS_MODULE,
@@ -343,7 +344,10 @@ static void ivtv_vbi_setup(struct ivtv *itv)
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0);
/* setup VBI registers */
- v4l2_subdev_call(itv->sd_video, video, s_fmt, &itv->vbi.in);
+ if (raw)
+ v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &itv->vbi.in.fmt.vbi);
+ else
+ v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, &itv->vbi.in.fmt.sliced);
/* determine number of lines and total number of VBI bytes.
A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
@@ -581,10 +585,9 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
/* Avoid unpredictable PCI bus hang - disable video clocks */
v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
- ivtv_msleep_timeout(150, 1);
+ ivtv_msleep_timeout(300, 1);
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
- ivtv_msleep_timeout(150, 1);
}
/* begin_capture */
@@ -830,6 +833,10 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
}
+ /* Raw-passthrough is implied on start. Make sure it's stopped so
+ the encoder will re-initialize when next started */
+ ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);
+
wake_up(&s->waitq);
return 0;
@@ -837,6 +844,9 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_EOS,
+ };
struct ivtv *itv = s->itv;
if (s->vdev == NULL)
@@ -888,6 +898,7 @@ int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags);
wake_up(&itv->event_waitq);
+ v4l2_event_queue(s->vdev, &ev);
/* wake up wait queues */
wake_up(&s->waitq);
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index f420d31b937d..e1c347e5ebd8 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -38,7 +38,7 @@ static void ivtv_set_vps(struct ivtv *itv, int enabled)
data.data[9] = itv->vbi.vps_payload.data[2];
data.data[10] = itv->vbi.vps_payload.data[3];
data.data[11] = itv->vbi.vps_payload.data[4];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
@@ -52,12 +52,12 @@ static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
data.line = (mode & 1) ? 21 : 0;
data.data[0] = cc->odd[0];
data.data[1] = cc->odd[1];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
data.field = 1;
data.line = (mode & 2) ? 21 : 0;
data.data[0] = cc->even[0];
data.data[1] = cc->even[1];
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
@@ -80,7 +80,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
data.line = enabled ? 23 : 0;
data.data[0] = mode & 0xff;
data.data[1] = (mode >> 8) & 0xff;
- ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_vbi_data, &data);
+ ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static int odd_parity(u8 c)
@@ -134,7 +134,7 @@ void ivtv_write_vbi(struct ivtv *itv, const struct v4l2_sliced_vbi_data *sliced,
}
}
}
- if (found_cc && vi->cc_payload_idx < sizeof(vi->cc_payload)) {
+ if (found_cc && vi->cc_payload_idx < ARRAY_SIZE(vi->cc_payload)) {
vi->cc_payload[vi->cc_payload_idx++] = cc;
set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
}
@@ -316,7 +316,7 @@ static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8
continue;
}
vbi.p = p + 4;
- v4l2_subdev_call(itv->sd_video, video, decode_vbi_line, &vbi);
+ v4l2_subdev_call(itv->sd_video, vbi, decode_vbi_line, &vbi);
if (vbi.type && !(lines & (1 << vbi.line))) {
lines |= 1 << vbi.line;
itv->vbi.sliced_data[line].id = vbi.type;
@@ -440,7 +440,7 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
ivtv_set_wss(itv, 1, data.data[0] & 0xf);
vi->wss_missing_cnt = 0;
} else if (vi->wss_missing_cnt == 4) {
@@ -454,13 +454,13 @@ void ivtv_vbi_work_handler(struct ivtv *itv)
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 1;
cc.odd[0] = data.data[0];
cc.odd[1] = data.data[1];
}
data.field = 1;
- if (v4l2_subdev_call(itv->sd_video, video, g_vbi_data, &data) == 0) {
+ if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 2;
cc.even[0] = data.data[0];
cc.even[1] = data.data[1];
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index de2ff1c6ac34..49e1a283ed36 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -460,7 +460,7 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
FB_VBLANK_HAVE_VSYNC;
- trace = read_reg(0x028c0) >> 16;
+ trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
if (itv->is_50hz && trace > 312)
trace -= 312;
else if (itv->is_60hz && trace > 262)
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
new file mode 100644
index 000000000000..554eaf140128
--- /dev/null
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -0,0 +1,1052 @@
+/*
+ * A virtual v4l2-mem2mem example device.
+ *
+ * This is a virtual device driver for testing mem-to-mem videobuf framework.
+ * It simulates a device that uses memory buffers for both source and
+ * destination, processes the data and issues an "irq" (simulated by a timer).
+ * The device is capable of multi-instance, multi-buffer-per-transaction
+ * operation (via the mem2mem framework).
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <p.osciak@samsung.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-vmalloc.h>
+
+#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
+
+MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
+MODULE_AUTHOR("Pawel Osciak, <p.osciak@samsung.com>");
+MODULE_LICENSE("GPL");
+
+
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 640
+#define MAX_H 480
+#define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */
+
+/* Flags that indicate a format can be used for capture/output */
+#define MEM2MEM_CAPTURE (1 << 0)
+#define MEM2MEM_OUTPUT (1 << 1)
+
+#define MEM2MEM_NAME "m2m-testdev"
+
+/* Per queue */
+#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
+/* In bytes, per queue */
+#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
+
+/* Default transaction time in msec */
+#define MEM2MEM_DEF_TRANSTIME 1000
+/* Default number of buffers per transaction */
+#define MEM2MEM_DEF_TRANSLEN 1
+#define MEM2MEM_COLOR_STEP (0xff >> 4)
+#define MEM2MEM_NUM_TILES 8
+
+#define dprintk(dev, fmt, arg...) \
+ v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+
+void m2mtest_dev_release(struct device *dev)
+{}
+
+static struct platform_device m2mtest_pdev = {
+ .name = MEM2MEM_NAME,
+ .dev.release = m2mtest_dev_release,
+};
+
+struct m2mtest_fmt {
+ char *name;
+ u32 fourcc;
+ int depth;
+ /* Types the format can be used for */
+ u32 types;
+};
+
+static struct m2mtest_fmt formats[] = {
+ {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .depth = 16,
+ /* Both capture and output format */
+ .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
+ },
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ /* Output-only format */
+ .types = MEM2MEM_OUTPUT,
+ },
+};
+
+/* Per-queue, driver-specific private data */
+struct m2mtest_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int sizeimage;
+ struct m2mtest_fmt *fmt;
+};
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+/* Source and destination queue data */
+static struct m2mtest_q_data q_data[2];
+
+static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
+#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
+
+static struct v4l2_queryctrl m2mtest_ctrls[] = {
+ {
+ .id = V4L2_CID_TRANS_TIME_MSEC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Transaction time (msec)",
+ .minimum = 1,
+ .maximum = 10000,
+ .step = 100,
+ .default_value = 1000,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_TRANS_NUM_BUFS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Buffers per transaction",
+ .minimum = 1,
+ .maximum = MEM2MEM_DEF_NUM_BUFS,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct m2mtest_fmt *find_format(struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ break;
+ }
+
+ if (k == NUM_FORMATS)
+ return NULL;
+
+ return &formats[k];
+}
+
+struct m2mtest_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd;
+
+ atomic_t num_inst;
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+
+ struct timer_list timer;
+
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct m2mtest_ctx {
+ struct m2mtest_dev *dev;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+ /* Transaction time (i.e. simulated processing time) in milliseconds */
+ u32 transtime;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ struct v4l2_m2m_ctx *m2m_ctx;
+};
+
+struct m2mtest_buffer {
+ /* vb must be first! */
+ struct videobuf_buffer vb;
+};
+
+static struct v4l2_queryctrl *get_ctrl(int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(m2mtest_ctrls); ++i) {
+ if (id == m2mtest_ctrls[i].id)
+ return &m2mtest_ctrls[i];
+ }
+
+ return NULL;
+}
+
+static int device_process(struct m2mtest_ctx *ctx,
+ struct m2mtest_buffer *in_buf,
+ struct m2mtest_buffer *out_buf)
+{
+ struct m2mtest_dev *dev = ctx->dev;
+ u8 *p_in, *p_out;
+ int x, y, t, w;
+ int tile_w, bytes_left;
+ struct videobuf_queue *src_q;
+ struct videobuf_queue *dst_q;
+
+ src_q = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+ p_in = videobuf_queue_to_vaddr(src_q, &in_buf->vb);
+ p_out = videobuf_queue_to_vaddr(dst_q, &out_buf->vb);
+ if (!p_in || !p_out) {
+ v4l2_err(&dev->v4l2_dev,
+ "Acquiring kernel pointers to buffers failed\n");
+ return -EFAULT;
+ }
+
+ if (in_buf->vb.size < out_buf->vb.size) {
+ v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
+ return -EINVAL;
+ }
+
+ tile_w = (in_buf->vb.width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
+ / MEM2MEM_NUM_TILES;
+ bytes_left = in_buf->vb.bytesperline - tile_w * MEM2MEM_NUM_TILES;
+ w = 0;
+
+ for (y = 0; y < in_buf->vb.height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ + MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ - MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left;
+ }
+
+ return 0;
+}
+
+static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
+{
+ dprintk(dev, "Scheduling a simulated irq\n");
+ mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int job_ready(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
+ || v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
+ dprintk(ctx->dev, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void job_abort(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+/* device_run() - prepares and starts the device
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void device_run(void *priv)
+{
+ struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_dev *dev = ctx->dev;
+ struct m2mtest_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+
+ device_process(ctx, src_buf, dst_buf);
+
+ /* Run a timer, which simulates a hardware irq */
+ schedule_irq(dev, ctx->transtime);
+}
+
+
+static void device_isr(unsigned long priv)
+{
+ struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
+ struct m2mtest_ctx *curr_ctx;
+ struct m2mtest_buffer *src_buf, *dst_buf;
+ unsigned long flags;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
+
+ if (NULL == curr_ctx) {
+ printk(KERN_ERR
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
+ curr_ctx->num_processed++;
+
+ if (curr_ctx->num_processed == curr_ctx->translen
+ || curr_ctx->aborting) {
+ dprintk(curr_ctx->dev, "Finishing transaction\n");
+ curr_ctx->num_processed = 0;
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ wake_up(&src_buf->vb.done);
+ wake_up(&dst_buf->vb.done);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+ v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
+ } else {
+ spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
+ src_buf->vb.state = dst_buf->vb.state = VIDEOBUF_DONE;
+ wake_up(&src_buf->vb.done);
+ wake_up(&dst_buf->vb.done);
+ spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
+ device_run(curr_ctx);
+ }
+}
+
+
+/*
+ * video ioctls
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num;
+ struct m2mtest_fmt *fmt;
+
+ num = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (formats[i].types & type) {
+ /* index-th format of type type found ? */
+ if (num == f->index)
+ break;
+ /* Correct type but haven't reached our index yet,
+ * just increment per-type index */
+ ++num;
+ }
+ }
+
+ if (i < NUM_FORMATS) {
+ /* Format found */
+ fmt = &formats[i];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return enum_fmt(f, MEM2MEM_OUTPUT);
+}
+
+static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct videobuf_queue *vq;
+ struct m2mtest_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.field = vq->field;
+ f->fmt.pix.pixelformat = q_data->fmt->fourcc;
+ f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return vidioc_g_fmt(priv, f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
+{
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ if (f->fmt.pix.height < MIN_H)
+ f->fmt.pix.height = MIN_H;
+ else if (f->fmt.pix.height > MAX_H)
+ f->fmt.pix.height = MAX_H;
+
+ if (f->fmt.pix.width < MIN_W)
+ f->fmt.pix.width = MIN_W;
+ else if (f->fmt.pix.width > MAX_W)
+ f->fmt.pix.width = MAX_W;
+
+ f->fmt.pix.width &= ~DIM_ALIGN_MASK;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct m2mtest_fmt *fmt;
+ struct m2mtest_ctx *ctx = priv;
+
+ fmt = find_format(f);
+ if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
+{
+ struct m2mtest_q_data *q_data;
+ struct videobuf_queue *vq;
+ int ret = 0;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ mutex_lock(&vq->vb_lock);
+
+ if (videobuf_queue_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ q_data->fmt = find_format(f);
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->sizeimage = q_data->width * q_data->height
+ * q_data->fmt->depth >> 3;
+ vq->field = f->fmt.pix.field;
+
+ dprintk(ctx->dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
+
+out:
+ mutex_unlock(&vq->vb_lock);
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = vidioc_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(priv, f);
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct v4l2_queryctrl *c;
+
+ c = get_ctrl(qc->id);
+ if (!c)
+ return -EINVAL;
+
+ *qc = *c;
+ return 0;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctrl->value = ctx->transtime;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctrl->value = ctx->translen;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_ctrl_val(struct m2mtest_ctx *ctx, struct v4l2_control *ctrl)
+{
+ struct v4l2_queryctrl *c;
+
+ c = get_ctrl(ctrl->id);
+ if (!c)
+ return -EINVAL;
+
+ if (ctrl->value < c->minimum || ctrl->value > c->maximum) {
+ v4l2_err(&ctx->dev->v4l2_dev, "Value out of range\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct m2mtest_ctx *ctx = priv;
+ int ret = 0;
+
+ ret = check_ctrl_val(ctx, ctrl);
+ if (ret != 0)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TRANS_TIME_MSEC:
+ ctx->transtime = ctrl->value;
+ break;
+
+ case V4L2_CID_TRANS_NUM_BUFS:
+ ctx->translen = ctrl->value;
+ break;
+
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+};
+
+
+/*
+ * Queue operations
+ */
+
+static void m2mtest_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+
+ dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
+ vq->type, vb->i, vb->state);
+
+ videobuf_vmalloc_free(vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int m2mtest_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_q_data *q_data;
+
+ q_data = get_q_data(vq->type);
+
+ *size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
+ dprintk(ctx->dev, "size:%d, w/h %d/%d, depth: %d\n",
+ *size, q_data->width, q_data->height, q_data->fmt->depth);
+
+ if (0 == *count)
+ *count = MEM2MEM_DEF_NUM_BUFS;
+
+ while (*size * *count > MEM2MEM_VID_MEM_LIMIT)
+ (*count)--;
+
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "%d buffers of size %d set up.\n", *count, *size);
+
+ return 0;
+}
+
+static int m2mtest_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+ struct m2mtest_q_data *q_data;
+ int ret;
+
+ dprintk(ctx->dev, "type: %d, index: %d, state: %d\n",
+ vq->type, vb->i, vb->state);
+
+ q_data = get_q_data(vq->type);
+
+ if (vb->baddr) {
+ /* User-provided buffer */
+ if (vb->bsize < q_data->sizeimage) {
+ /* Buffer too small to fit a frame */
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "User-provided buffer too small\n");
+ return -EINVAL;
+ }
+ } else if (vb->state != VIDEOBUF_NEEDS_INIT
+ && vb->bsize < q_data->sizeimage) {
+ /* We provide the buffer, but it's already been initialized
+ * and is too small */
+ return -EINVAL;
+ }
+
+ vb->width = q_data->width;
+ vb->height = q_data->height;
+ vb->bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
+ vb->size = q_data->sizeimage;
+ vb->field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Iolock failed\n");
+ goto fail;
+ }
+ }
+
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+fail:
+ m2mtest_buf_release(vq, vb);
+ return ret;
+}
+
+static void m2mtest_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct m2mtest_ctx *ctx = vq->priv_data;
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vq, vb);
+}
+
+static struct videobuf_queue_ops m2mtest_qops = {
+ .buf_setup = m2mtest_buf_setup,
+ .buf_prepare = m2mtest_buf_prepare,
+ .buf_queue = m2mtest_buf_queue,
+ .buf_release = m2mtest_buf_release,
+};
+
+static void queue_init(void *priv, struct videobuf_queue *vq,
+ enum v4l2_buf_type type)
+{
+ struct m2mtest_ctx *ctx = priv;
+
+ videobuf_queue_vmalloc_init(vq, &m2mtest_qops, ctx->dev->v4l2_dev.dev,
+ &ctx->dev->irqlock, type, V4L2_FIELD_NONE,
+ sizeof(struct m2mtest_buffer), priv);
+}
+
+
+/*
+ * File operations
+ */
+static int m2mtest_open(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = NULL;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = dev;
+ ctx->translen = MEM2MEM_DEF_TRANSLEN;
+ ctx->transtime = MEM2MEM_DEF_TRANSTIME;
+ ctx->num_processed = 0;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(ctx, dev->m2m_dev, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int ret = PTR_ERR(ctx->m2m_ctx);
+
+ kfree(ctx);
+ return ret;
+ }
+
+ atomic_inc(&dev->num_inst);
+
+ dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int m2mtest_release(struct file *file)
+{
+ struct m2mtest_dev *dev = video_drvdata(file);
+ struct m2mtest_ctx *ctx = file->private_data;
+
+ dprintk(dev, "Releasing instance %p\n", ctx);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ kfree(ctx);
+
+ atomic_dec(&dev->num_inst);
+
+ return 0;
+}
+
+static unsigned int m2mtest_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct m2mtest_ctx *ctx = (struct m2mtest_ctx *)file->private_data;
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct m2mtest_ctx *ctx = (struct m2mtest_ctx *)file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations m2mtest_fops = {
+ .owner = THIS_MODULE,
+ .open = m2mtest_open,
+ .release = m2mtest_release,
+ .poll = m2mtest_poll,
+ .ioctl = video_ioctl2,
+ .mmap = m2mtest_mmap,
+};
+
+static struct video_device m2mtest_videodev = {
+ .name = MEM2MEM_NAME,
+ .fops = &m2mtest_fops,
+ .ioctl_ops = &m2mtest_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_ready = job_ready,
+ .job_abort = job_abort,
+};
+
+static int m2mtest_probe(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->irqlock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto free_dev;
+
+ atomic_set(&dev->num_inst, 0);
+ mutex_init(&dev->dev_mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ *vfd = m2mtest_videodev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ video_set_drvdata(vfd, dev);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ setup_timer(&dev->timer, device_isr, (long)dev);
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto err_m2m;
+ }
+
+ return 0;
+
+err_m2m:
+ video_unregister_device(dev->vfd);
+rel_vdev:
+ video_device_release(vfd);
+unreg_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+static int m2mtest_remove(struct platform_device *pdev)
+{
+ struct m2mtest_dev *dev =
+ (struct m2mtest_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ del_timer_sync(&dev->timer);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver m2mtest_pdrv = {
+ .probe = m2mtest_probe,
+ .remove = m2mtest_remove,
+ .driver = {
+ .name = MEM2MEM_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static void __exit m2mtest_exit(void)
+{
+ platform_driver_unregister(&m2mtest_pdrv);
+ platform_device_unregister(&m2mtest_pdev);
+}
+
+static int __init m2mtest_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&m2mtest_pdev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&m2mtest_pdrv);
+ if (ret)
+ platform_device_unregister(&m2mtest_pdev);
+
+ return 0;
+}
+
+module_init(m2mtest_init);
+module_exit(m2mtest_exit);
+
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 4404e5ef818f..2be23bccd3c8 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -30,9 +30,10 @@
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/videodev.h>
#include <linux/gfp.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -1168,22 +1169,22 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
case V4L2_CID_BRIGHTNESS:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERABRIGHTNESS, c->value);
- meye.picture.brightness = c->value << 10;
+ meye.brightness = c->value << 10;
break;
case V4L2_CID_HUE:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERAHUE, c->value);
- meye.picture.hue = c->value << 10;
+ meye.hue = c->value << 10;
break;
case V4L2_CID_CONTRAST:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERACONTRAST, c->value);
- meye.picture.contrast = c->value << 10;
+ meye.contrast = c->value << 10;
break;
case V4L2_CID_SATURATION:
sony_pic_camera_command(
SONY_PIC_COMMAND_SETCAMERACOLOR, c->value);
- meye.picture.colour = c->value << 10;
+ meye.colour = c->value << 10;
break;
case V4L2_CID_AGC:
sony_pic_camera_command(
@@ -1221,16 +1222,16 @@ static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
mutex_lock(&meye.lock);
switch (c->id) {
case V4L2_CID_BRIGHTNESS:
- c->value = meye.picture.brightness >> 10;
+ c->value = meye.brightness >> 10;
break;
case V4L2_CID_HUE:
- c->value = meye.picture.hue >> 10;
+ c->value = meye.hue >> 10;
break;
case V4L2_CID_CONTRAST:
- c->value = meye.picture.contrast >> 10;
+ c->value = meye.contrast >> 10;
break;
case V4L2_CID_SATURATION:
- c->value = meye.picture.colour >> 10;
+ c->value = meye.colour >> 10;
break;
case V4L2_CID_AGC:
c->value = meye.params.agc;
@@ -1729,6 +1730,7 @@ static int meye_resume(struct pci_dev *pdev)
static int __devinit meye_probe(struct pci_dev *pcidev,
const struct pci_device_id *ent)
{
+ struct v4l2_device *v4l2_dev = &meye.v4l2_dev;
int ret = -EBUSY;
unsigned long mchip_adr;
@@ -1737,70 +1739,75 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
goto outnotdev;
}
+ ret = v4l2_device_register(&pcidev->dev, v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return ret;
+ }
ret = -ENOMEM;
meye.mchip_dev = pcidev;
- meye.video_dev = video_device_alloc();
- if (!meye.video_dev) {
- printk(KERN_ERR "meye: video_device_alloc() failed!\n");
+ meye.vdev = video_device_alloc();
+ if (!meye.vdev) {
+ v4l2_err(v4l2_dev, "video_device_alloc() failed!\n");
goto outnotdev;
}
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
if (!meye.grab_temp) {
- printk(KERN_ERR "meye: grab buffer allocation failed\n");
+ v4l2_err(v4l2_dev, "grab buffer allocation failed\n");
goto outvmalloc;
}
spin_lock_init(&meye.grabq_lock);
if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
GFP_KERNEL)) {
- printk(KERN_ERR "meye: fifo allocation failed\n");
+ v4l2_err(v4l2_dev, "fifo allocation failed\n");
goto outkfifoalloc1;
}
spin_lock_init(&meye.doneq_lock);
if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
GFP_KERNEL)) {
- printk(KERN_ERR "meye: fifo allocation failed\n");
+ v4l2_err(v4l2_dev, "fifo allocation failed\n");
goto outkfifoalloc2;
}
- memcpy(meye.video_dev, &meye_template, sizeof(meye_template));
- meye.video_dev->parent = &meye.mchip_dev->dev;
+ memcpy(meye.vdev, &meye_template, sizeof(meye_template));
+ meye.vdev->v4l2_dev = &meye.v4l2_dev;
ret = -EIO;
if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
- printk(KERN_ERR "meye: unable to power on the camera\n");
- printk(KERN_ERR "meye: did you enable the camera in "
+ v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
+ v4l2_err(v4l2_dev, "meye: did you enable the camera in "
"sonypi using the module options ?\n");
goto outsonypienable;
}
if ((ret = pci_enable_device(meye.mchip_dev))) {
- printk(KERN_ERR "meye: pci_enable_device failed\n");
+ v4l2_err(v4l2_dev, "meye: pci_enable_device failed\n");
goto outenabledev;
}
mchip_adr = pci_resource_start(meye.mchip_dev,0);
if (!mchip_adr) {
- printk(KERN_ERR "meye: mchip has no device base address\n");
+ v4l2_err(v4l2_dev, "meye: mchip has no device base address\n");
goto outregions;
}
if (!request_mem_region(pci_resource_start(meye.mchip_dev, 0),
pci_resource_len(meye.mchip_dev, 0),
"meye")) {
- printk(KERN_ERR "meye: request_mem_region failed\n");
+ v4l2_err(v4l2_dev, "meye: request_mem_region failed\n");
goto outregions;
}
meye.mchip_mmregs = ioremap(mchip_adr, MCHIP_MM_REGS);
if (!meye.mchip_mmregs) {
- printk(KERN_ERR "meye: ioremap failed\n");
+ v4l2_err(v4l2_dev, "meye: ioremap failed\n");
goto outremap;
}
meye.mchip_irq = pcidev->irq;
if (request_irq(meye.mchip_irq, meye_irq,
IRQF_DISABLED | IRQF_SHARED, "meye", meye_irq)) {
- printk(KERN_ERR "meye: request_irq failed\n");
+ v4l2_err(v4l2_dev, "request_irq failed\n");
goto outreqirq;
}
@@ -1824,21 +1831,18 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
msleep(1);
mchip_set(MCHIP_MM_INTA, MCHIP_MM_INTA_HIC_1_MASK);
- if (video_register_device(meye.video_dev, VFL_TYPE_GRABBER,
+ if (video_register_device(meye.vdev, VFL_TYPE_GRABBER,
video_nr) < 0) {
- printk(KERN_ERR "meye: video_register_device failed\n");
+ v4l2_err(v4l2_dev, "video_register_device failed\n");
goto outvideoreg;
}
mutex_init(&meye.lock);
init_waitqueue_head(&meye.proc_list);
- meye.picture.depth = 16;
- meye.picture.palette = VIDEO_PALETTE_YUV422;
- meye.picture.brightness = 32 << 10;
- meye.picture.hue = 32 << 10;
- meye.picture.colour = 32 << 10;
- meye.picture.contrast = 32 << 10;
- meye.picture.whiteness = 0;
+ meye.brightness = 32 << 10;
+ meye.hue = 32 << 10;
+ meye.colour = 32 << 10;
+ meye.contrast = 32 << 10;
meye.params.subsample = 0;
meye.params.quality = 8;
meye.params.sharpness = 32;
@@ -1854,9 +1858,9 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0);
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48);
- printk(KERN_INFO "meye: Motion Eye Camera Driver v%s.\n",
+ v4l2_info(v4l2_dev, "Motion Eye Camera Driver v%s.\n",
MEYE_DRIVER_VERSION);
- printk(KERN_INFO "meye: mchip KL5A72002 rev. %d, base %lx, irq %d\n",
+ v4l2_info(v4l2_dev, "mchip KL5A72002 rev. %d, base %lx, irq %d\n",
meye.mchip_dev->revision, mchip_adr, meye.mchip_irq);
return 0;
@@ -1879,14 +1883,14 @@ outkfifoalloc2:
outkfifoalloc1:
vfree(meye.grab_temp);
outvmalloc:
- video_device_release(meye.video_dev);
+ video_device_release(meye.vdev);
outnotdev:
return ret;
}
static void __devexit meye_remove(struct pci_dev *pcidev)
{
- video_unregister_device(meye.video_dev);
+ video_unregister_device(meye.vdev);
mchip_hic_stop();
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h
index 1321ad5d6597..4bdeb03f1644 100644
--- a/drivers/media/video/meye.h
+++ b/drivers/media/video/meye.h
@@ -31,7 +31,7 @@
#define _MEYE_PRIV_H_
#define MEYE_DRIVER_MAJORVERSION 1
-#define MEYE_DRIVER_MINORVERSION 13
+#define MEYE_DRIVER_MINORVERSION 14
#define MEYE_DRIVER_VERSION __stringify(MEYE_DRIVER_MAJORVERSION) "." \
__stringify(MEYE_DRIVER_MINORVERSION)
@@ -289,6 +289,7 @@ struct meye_grab_buffer {
/* Motion Eye device structure */
struct meye {
+ struct v4l2_device v4l2_dev; /* Main v4l2_device struct */
struct pci_dev *mchip_dev; /* pci device */
u8 mchip_irq; /* irq */
u8 mchip_mode; /* actual mchip mode: HIC_MODE... */
@@ -308,8 +309,11 @@ struct meye {
struct kfifo doneq; /* queue for grabbed buffers */
spinlock_t doneq_lock; /* lock protecting the queue */
wait_queue_head_t proc_list; /* wait queue */
- struct video_device *video_dev; /* video device parameters */
- struct video_picture picture; /* video picture parameters */
+ struct video_device *vdev; /* video device parameters */
+ u16 brightness;
+ u16 hue;
+ u16 contrast;
+ u16 colour;
struct meye_params params; /* additional parameters */
unsigned long in_use; /* set to 1 if the device is in use */
#ifdef CONFIG_PM
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index a9061bff79b2..78b4e091d2d5 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -8,14 +8,16 @@
* published by the Free Software Foundation.
*/
-#include <linux/videodev2.h>
-#include <linux/slab.h>
+#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-subdev.h>
/*
* mt9t031 i2c address 0x5d
@@ -681,12 +683,66 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
}
/*
+ * Power Management:
+ * This function does nothing for now but must be present for pm to work
+ */
+static int mt9t031_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+/*
+ * Power Management:
+ * COLUMN_ADDRESS_MODE and ROW_ADDRESS_MODE are not rewritten if unchanged
+ * they are however changed at reset if the platform hook is present
+ * thus we rewrite them with the values stored by the driver
+ */
+static int mt9t031_runtime_resume(struct device *dev)
+{
+ struct video_device *vdev = to_video_device(dev);
+ struct soc_camera_device *icd = container_of(vdev->parent,
+ struct soc_camera_device, dev);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct i2c_client *client = sd->priv;
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ int ret;
+ u16 xbin, ybin;
+
+ xbin = min(mt9t031->xskip, (u16)3);
+ ybin = min(mt9t031->yskip, (u16)3);
+
+ ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE,
+ ((xbin - 1) << 4) | (mt9t031->xskip - 1));
+ if (ret < 0)
+ return ret;
+
+ ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE,
+ ((ybin - 1) << 4) | (mt9t031->yskip - 1));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct dev_pm_ops mt9t031_dev_pm_ops = {
+ .runtime_suspend = mt9t031_runtime_suspend,
+ .runtime_resume = mt9t031_runtime_resume,
+};
+
+static struct device_type mt9t031_dev_type = {
+ .name = "MT9T031",
+ .pm = &mt9t031_dev_pm_ops,
+};
+
+/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
static int mt9t031_video_probe(struct i2c_client *client)
{
struct mt9t031 *mt9t031 = to_mt9t031(client);
+ struct video_device *vdev = soc_camera_i2c_to_vdev(client);
s32 data;
int ret;
@@ -712,6 +768,8 @@ static int mt9t031_video_probe(struct i2c_client *client)
ret = mt9t031_idle(client);
if (ret < 0)
dev_err(&client->dev, "Failed to initialise the camera\n");
+ else
+ vdev->dev.type = &mt9t031_dev_type;
/* mt9t031_idle() has reset the chip to default. */
mt9t031->exposure = 255;
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 1a34d2993e94..e5bae4c9393b 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -325,7 +325,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (ret < 0)
return ret;
- dev_dbg(&client->dev, "Frame %ux%u pixel\n", rect.width, rect.height);
+ dev_dbg(&client->dev, "Frame %dx%d pixel\n", rect.width, rect.height);
mt9v022->rect = rect;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 34a66019190e..5c17f9ec3d7c 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -139,8 +139,8 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (!*count)
*count = 32;
- while (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
- (*count)--;
+ if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
+ *count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index bd297f567dc7..d477e3058002 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -796,7 +796,7 @@ static int acquire_dma_channel(struct mx3_camera_dev *mx3_cam)
* FIXME: learn to use stride != width, then we can keep stride properly aligned
* and support arbitrary (even) widths.
*/
-static inline void stride_align(__s32 *width)
+static inline void stride_align(__u32 *width)
{
if (((*width + 7) & ~7) < 4096)
*width = (*width + 7) & ~7;
@@ -844,7 +844,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
* So far only direct camera-to-memory is supported
*/
if (channel_change_requested(icd, rect)) {
- int ret = acquire_dma_channel(mx3_cam);
+ ret = acquire_dma_channel(mx3_cam);
if (ret < 0)
return ret;
}
diff --git a/drivers/media/video/omap/Kconfig b/drivers/media/video/omap/Kconfig
new file mode 100644
index 000000000000..97c53949ca89
--- /dev/null
+++ b/drivers/media/video/omap/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_OMAP2_VOUT
+ tristate "OMAP2/OMAP3 V4L2-Display driver"
+ depends on ARCH_OMAP24XX || ARCH_OMAP34XX
+ select VIDEOBUF_GEN
+ select VIDEOBUF_DMA_SG
+ select OMAP2_DSS
+ select OMAP2_VRAM
+ select OMAP2_VRFB
+ default n
+ ---help---
+ V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/video/omap/Makefile b/drivers/media/video/omap/Makefile
new file mode 100644
index 000000000000..b8bab00ad010
--- /dev/null
+++ b/drivers/media/video/omap/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the omap video device drivers.
+#
+
+# OMAP2/3 Display driver
+omap-vout-mod-objs := omap_vout.o omap_voutlib.o
+obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout-mod.o
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
new file mode 100644
index 000000000000..4c0ab499228b
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout.c
@@ -0,0 +1,2643 @@
+/*
+ * omap_vout.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Leveraged code from the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * History:
+ * 20-APR-2006 Khasim Modified VRFB based Rotation,
+ * The image data is always read from 0 degree
+ * view and written
+ * to the virtual space of desired rotation angle
+ * 4-DEC-2006 Jian Changed to support better memory management
+ *
+ * 17-Nov-2008 Hardik Changed driver to use video_ioctl2
+ *
+ * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/videodev2.h>
+
+#include <media/videobuf-dma-sg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include <plat/dma.h>
+#include <plat/vram.h>
+#include <plat/vrfb.h>
+#include <plat/display.h>
+
+#include "omap_voutlib.h"
+#include "omap_voutdef.h"
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
+MODULE_LICENSE("GPL");
+
+
+/* Driver Configuration macros */
+#define VOUT_NAME "omap_vout"
+
+enum omap_vout_channels {
+ OMAP_VIDEO1,
+ OMAP_VIDEO2,
+};
+
+enum dma_channel_state {
+ DMA_CHAN_NOT_ALLOTED,
+ DMA_CHAN_ALLOTED,
+};
+
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH 1280 /* Largest width */
+#define VID_MAX_HEIGHT 720 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE 2048
+
+#define VRFB_TX_TIMEOUT 1000
+#define VRFB_NUM_BUFS 4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+static struct videobuf_queue_ops video_vbq_ops;
+/* Variables configurable through module params*/
+static u32 video1_numbuffers = 3;
+static u32 video2_numbuffers = 3;
+static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
+static u32 vid1_static_vrfb_alloc;
+static u32 vid2_static_vrfb_alloc;
+static int debug;
+
+/* Module parameters */
+module_param(video1_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_numbuffers,
+ "Number of buffers to be allocated at init time for Video1 device.");
+
+module_param(video2_numbuffers, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_numbuffers,
+ "Number of buffers to be allocated at init time for Video2 device.");
+
+module_param(video1_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video1_bufsize,
+ "Size of the buffer to be allocated for video1 device");
+
+module_param(video2_bufsize, uint, S_IRUGO);
+MODULE_PARM_DESC(video2_bufsize,
+ "Size of the buffer to be allocated for video2 device");
+
+module_param(vid1_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid1_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video1 device");
+
+module_param(vid2_static_vrfb_alloc, bool, S_IRUGO);
+MODULE_PARM_DESC(vid2_static_vrfb_alloc,
+ "Static allocation of the VRFB buffer for video2 device");
+
+module_param(debug, bool, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+/* list of image formats supported by OMAP2 video pipelines */
+const static struct v4l2_fmtdesc omap_formats[] = {
+ {
+ /* Note: V4L2 defines RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3
+ *
+ * We interpret RGB565 as:
+ *
+ * Byte 0 Byte 1
+ * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3
+ */
+ .description = "RGB565, le",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use
+ * this for RGB24 unpack mode, the last 8 bits are ignored
+ * */
+ .description = "RGB32, le",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ },
+ {
+ /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use
+ * this for RGB24 packed mode
+ *
+ */
+ .description = "RGB24, le",
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ },
+ {
+ .description = "YUYV (YUV 4:2:2), packed",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .description = "UYVY, packed",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ },
+};
+
+#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
+
+/*
+ * Allocate buffers
+ */
+static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ u32 order, size;
+ unsigned long virt_addr, addr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+ virt_addr = __get_free_pages(GFP_KERNEL | GFP_DMA, order);
+ addr = virt_addr;
+
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+static void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+ u32 order, size;
+ unsigned long addr = virtaddr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages((unsigned long) virtaddr, order);
+}
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *) vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+
+/*
+ * Try format
+ */
+static int omap_vout_try_format(struct v4l2_pix_format *pix)
+{
+ int ifmt, bpp = 0;
+
+ pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT,
+ (u32)VID_MAX_HEIGHT);
+ pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH);
+
+ for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) {
+ if (pix->pixelformat == omap_formats[ifmt].pixelformat)
+ break;
+ }
+
+ if (ifmt == NUM_OUTPUT_FORMATS)
+ ifmt = 0;
+
+ pix->pixelformat = omap_formats[ifmt].pixelformat;
+ pix->field = V4L2_FIELD_ANY;
+ pix->priv = 0;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ default:
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ bpp = YUYV_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB565_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB24_BPP;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ bpp = RGB32_BPP;
+ break;
+ }
+ pix->bytesperline = pix->width * bpp;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return bpp;
+}
+
+/*
+ * omap_vout_uservirt_to_phys: This inline function is used to convert user
+ * space virtual address to physical address.
+ */
+static u32 omap_vout_uservirt_to_phys(u32 virtp)
+{
+ unsigned long physp = 0;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+
+ vma = find_vma(mm, virtp);
+ /* For kernel direct-mapped memory, take the easy way */
+ if (virtp >= PAGE_OFFSET) {
+ physp = virt_to_phys((void *) virtp);
+ } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ /* this will catch, kernel-allocated, mmaped-to-usermode
+ addresses */
+ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
+ } else {
+ /* otherwise, use get_user_pages() for general userland pages */
+ int res, nr_pages = 1;
+ struct page *pages;
+ down_read(&current->mm->mmap_sem);
+
+ res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+ 0, &pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (res == nr_pages) {
+ physp = __pa(page_address(&pages[0]) +
+ (virtp & ~PAGE_MASK));
+ } else {
+ printk(KERN_WARNING VOUT_NAME
+ "get_user_pages failed\n");
+ return 0;
+ }
+ }
+
+ return physp;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+static void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+ int i;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ }
+}
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int rotate_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+ if (!vout->mirror)
+ return vout->rotation;
+
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+}
+
+/*
+ * Free the V4L2 buffers
+ */
+static void omap_vout_free_buffers(struct omap_vout_device *vout)
+{
+ int i, numbuffers;
+
+ /* Allocate memory for the buffers */
+ numbuffers = (vout->vid) ? video2_numbuffers : video1_numbuffers;
+ vout->buffer_size = (vout->vid) ? video2_bufsize : video1_bufsize;
+
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_phy_addr[i] = 0;
+ vout->buf_virt_addr[i] = 0;
+ }
+}
+
+/*
+ * Free VRFB buffers
+ */
+static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < VRFB_NUM_BUFS; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+}
+
+/*
+ * Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if ((rotation_enabled(vout)) && !vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+ for (i = 0; i < *count; i++)
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i], vout->pix.width,
+ vout->pix.height, vout->bpp, yuv_mode);
+
+ return 0;
+}
+
+/*
+ * Convert V4L2 rotation to DSS rotation
+ * V4L2 understand 0, 90, 180, 270.
+ * Convert to 0, 1, 2 and 3 repsectively for DSS
+ */
+static int v4l2_rot_to_dss_rot(int v4l2_rotation,
+ enum dss_rotation *rotation, bool mirror)
+{
+ int ret = 0;
+
+ switch (v4l2_rotation) {
+ case 90:
+ *rotation = dss_rotation_90_degree;
+ break;
+ case 180:
+ *rotation = dss_rotation_180_degree;
+ break;
+ case 270:
+ *rotation = dss_rotation_270_degree;
+ break;
+ case 0:
+ *rotation = dss_rotation_0_degree;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+static int omap_vout_calculate_offset(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ enum dss_rotation rotation;
+ struct omapvideo_info *ovid;
+ bool mirroring = vout->mirror;
+ struct omap_dss_device *cur_display;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int vr_ps = 1, ps = 2, temp_ps = 2;
+ int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return -1;
+
+ cur_display = ovl->manager->device;
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+
+ if (rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (mirroring == 0) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (mirroring == 0) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
+ __func__, *cropped_offset);
+ return 0;
+}
+
+/*
+ * Convert V4L2 pixel format to DSS pixel format
+ */
+static int video_mode_to_dss_mode(struct omap_vout_device *vout)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct v4l2_pix_format *pix = &vout->pix;
+ enum omap_color_mode mode;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ switch (pix->pixelformat) {
+ case 0:
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ mode = OMAP_DSS_COLOR_YUV2;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mode = OMAP_DSS_COLOR_UYVY;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ mode = OMAP_DSS_COLOR_RGB16;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ mode = OMAP_DSS_COLOR_RGB24P;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ mode = (ovl->id == OMAP_DSS_VIDEO1) ?
+ OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ mode = OMAP_DSS_COLOR_RGBX32;
+ break;
+ default:
+ mode = -EINVAL;
+ }
+ return mode;
+}
+
+/*
+ * Setup the overlay
+ */
+int omapvid_setup_overlay(struct omap_vout_device *vout,
+ struct omap_overlay *ovl, int posx, int posy, int outw,
+ int outh, u32 addr)
+{
+ int ret = 0;
+ struct omap_overlay_info info;
+ int cropheight, cropwidth, pixheight, pixwidth;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
+ (outw != vout->pix.width || outh != vout->pix.height)) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ vout->dss_mode = video_mode_to_dss_mode(vout);
+ if (vout->dss_mode == -EINVAL) {
+ ret = -EINVAL;
+ goto setup_ovl_err;
+ }
+
+ /* Setup the input plane parameters according to
+ * rotation value selected.
+ */
+ if (rotate_90_or_270(vout)) {
+ cropheight = vout->crop.width;
+ cropwidth = vout->crop.height;
+ pixheight = vout->pix.width;
+ pixwidth = vout->pix.height;
+ } else {
+ cropheight = vout->crop.height;
+ cropwidth = vout->crop.width;
+ pixheight = vout->pix.height;
+ pixwidth = vout->pix.width;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+ info.paddr = addr;
+ info.vaddr = NULL;
+ info.width = cropwidth;
+ info.height = cropheight;
+ info.color_mode = vout->dss_mode;
+ info.mirror = vout->mirror;
+ info.pos_x = posx;
+ info.pos_y = posy;
+ info.out_width = outw;
+ info.out_height = outh;
+ info.global_alpha = vout->win.global_alpha;
+ if (!rotation_enabled(vout)) {
+ info.rotation = 0;
+ info.rotation_type = OMAP_DSS_ROT_DMA;
+ info.screen_width = pixwidth;
+ } else {
+ info.rotation = vout->rotation;
+ info.rotation_type = OMAP_DSS_ROT_VRFB;
+ info.screen_width = 2048;
+ }
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
+ "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
+ "out_height=%d rotation_type=%d screen_width=%d\n",
+ __func__, info.enabled, info.paddr, info.width, info.height,
+ info.color_mode, info.rotation, info.mirror, info.pos_x,
+ info.pos_y, info.out_width, info.out_height, info.rotation_type,
+ info.screen_width);
+
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ goto setup_ovl_err;
+
+ return 0;
+
+setup_ovl_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n");
+ return ret;
+}
+
+/*
+ * Initialize the overlay structure
+ */
+int omapvid_init(struct omap_vout_device *vout, u32 addr)
+{
+ int ret = 0, i;
+ struct v4l2_window *win;
+ struct omap_overlay *ovl;
+ int posx, posy, outw, outh, temp;
+ struct omap_video_timings *timing;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ win = &vout->win;
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+
+ timing = &ovl->manager->device->panel.timings;
+
+ outw = win->w.width;
+ outh = win->w.height;
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ /* Invert the height and width for 90
+ * and 270 degree rotation
+ */
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = (timing->y_res - win->w.width) - win->w.left;
+ posx = win->w.top;
+ break;
+
+ case dss_rotation_180_degree:
+ posx = (timing->x_res - win->w.width) - win->w.left;
+ posy = (timing->y_res - win->w.height) - win->w.top;
+ break;
+
+ case dss_rotation_270_degree:
+ temp = outw;
+ outw = outh;
+ outh = temp;
+ posy = win->w.left;
+ posx = (timing->x_res - win->w.height) - win->w.top;
+ break;
+
+ default:
+ posx = win->w.left;
+ posy = win->w.top;
+ break;
+ }
+
+ ret = omapvid_setup_overlay(vout, ovl, posx, posy,
+ outw, outh, addr);
+ if (ret)
+ goto omapvid_init_err;
+ }
+ return 0;
+
+omapvid_init_err:
+ v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n");
+ return ret;
+}
+
+/*
+ * Apply the changes set the go bit of DSS
+ */
+int omapvid_apply_changes(struct omap_vout_device *vout)
+{
+ int i;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ for (i = 0; i < ovid->num_overlays; i++) {
+ ovl = ovid->overlays[i];
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ ovl->manager->apply(ovl->manager);
+ }
+
+ return 0;
+}
+
+void omap_vout_isr(void *arg, unsigned int irqstatus)
+{
+ int ret;
+ u32 addr, fid;
+ struct omap_overlay *ovl;
+ struct timeval timevalue;
+ struct omapvideo_info *ovid;
+ struct omap_dss_device *cur_display;
+ struct omap_vout_device *vout = (struct omap_vout_device *)arg;
+
+ if (!vout->streaming)
+ return;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device)
+ return;
+
+ cur_display = ovl->manager->device;
+
+ spin_lock(&vout->vbq_lock);
+ do_gettimeofday(&timevalue);
+ if (cur_display->type == OMAP_DISPLAY_TYPE_DPI) {
+ if (!(irqstatus & DISPC_IRQ_VSYNC))
+ goto vout_isr_err;
+
+ if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ }
+ vout->first_int = 0;
+ if (list_empty(&vout->dma_queue))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+
+ addr = (unsigned long) vout->queued_buf_addr[vout->next_frm->i]
+ + vout->cropped_offset;
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME "failed to change mode\n");
+ } else {
+
+ if (vout->first_int) {
+ vout->first_int = 0;
+ goto vout_isr_err;
+ }
+ if (irqstatus & DISPC_IRQ_EVSYNC_ODD)
+ fid = 1;
+ else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN)
+ fid = 0;
+ else
+ goto vout_isr_err;
+
+ vout->field_id ^= 1;
+ if (fid != vout->field_id) {
+ if (0 == fid)
+ vout->field_id = fid;
+
+ goto vout_isr_err;
+ }
+ if (0 == fid) {
+ if (vout->cur_frm == vout->next_frm)
+ goto vout_isr_err;
+
+ vout->cur_frm->ts = timevalue;
+ vout->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&vout->cur_frm->done);
+ vout->cur_frm = vout->next_frm;
+ } else if (1 == fid) {
+ if (list_empty(&vout->dma_queue) ||
+ (vout->cur_frm != vout->next_frm))
+ goto vout_isr_err;
+
+ vout->next_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ list_del(&vout->next_frm->queue);
+
+ vout->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = (unsigned long)
+ vout->queued_buf_addr[vout->next_frm->i] +
+ vout->cropped_offset;
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ printk(KERN_ERR VOUT_NAME
+ "failed to change mode\n");
+ }
+
+ }
+
+vout_isr_err:
+ spin_unlock(&vout->vbq_lock);
+}
+
+
+/* Video buffer call backs */
+
+/*
+ * Buffer setup function is called by videobuf layer when REQBUF ioctl is
+ * called. This is used to setup buffers and return size and count of
+ * buffers allocated. After the call to this buffer, videobuf layer will
+ * setup buffer queue depending on the size and count of buffers
+ */
+static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
+ unsigned int *size)
+{
+ int startindex = 0, i, j;
+ u32 phy_addr = 0, virt_addr = 0;
+ struct omap_vout_device *vout = q->priv_data;
+
+ if (!vout)
+ return -EINVAL;
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != q->type)
+ return -EINVAL;
+
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
+ *count = startindex;
+
+ if ((rotation_enabled(vout)) && *count > VRFB_NUM_BUFS)
+ *count = VRFB_NUM_BUFS;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ if (rotation_enabled(vout))
+ if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
+ return -ENOMEM;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return 0;
+
+ /* Now allocated the V4L2 buffers */
+ *size = PAGE_ALIGN(vout->pix.width * vout->pix.height * vout->bpp);
+ startindex = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = startindex; i < *count; i++) {
+ vout->buffer_size = *size;
+
+ virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
+ &phy_addr);
+ if (!virt_addr) {
+ if (!rotation_enabled(vout))
+ break;
+ /* Free the VRFB buffers if no space for V4L2 buffers */
+ for (j = i; j < *count; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ }
+ vout->buf_virt_addr[i] = virt_addr;
+ vout->buf_phy_addr[i] = phy_addr;
+ }
+ *count = vout->buffer_allocated = i;
+
+ return 0;
+}
+
+/*
+ * Free the V4L2 buffers additionally allocated than default
+ * number of buffers and free all the VRFB buffers
+ */
+static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
+{
+ int num_buffers = 0, i;
+
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ if (vout->buf_virt_addr[i])
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (!vout->vrfb_static_allocation) {
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (vout->smsshado_virt_addr[i]) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[i],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[i] = 0;
+ vout->smsshado_phy_addr[i] = 0;
+ }
+ }
+ }
+ vout->buffer_allocated = num_buffers;
+}
+
+/*
+ * This function will be called when VIDIOC_QBUF ioctl is called.
+ * It prepare buffers before give out for the display. This function
+ * converts user space virtual address into physical address if userptr memory
+ * exchange mechanism is used. If rotation is enabled, it copies entire
+ * buffer into VRFB memory space before giving it to the DSS.
+ */
+static int omap_vout_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct vid_vrfb_dma *tx;
+ enum dss_rotation rotation;
+ struct videobuf_dmabuf *dmabuf = NULL;
+ struct omap_vout_device *vout = q->priv_data;
+ u32 dest_frame_index = 0, src_element_index = 0;
+ u32 dest_element_index = 0, src_frame_index = 0;
+ u32 elem_count = 0, frame_count = 0, pixsize = 2;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = vout->pix.width;
+ vb->height = vout->pix.height;
+ vb->size = vb->width * vb->height * vout->bpp;
+ vb->field = field;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ /* if user pointer memory mechanism is used, get the physical
+ * address of the buffer
+ */
+ if (V4L2_MEMORY_USERPTR == vb->memory) {
+ if (0 == vb->baddr)
+ return -EINVAL;
+ /* Virtual address */
+ /* priv points to struct videobuf_pci_sg_memory. But we went
+ * pointer to videobuf_dmabuf, which is member of
+ * videobuf_pci_sg_memory */
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ dmabuf->vmalloc = (void *) vb->baddr;
+
+ /* Physical address */
+ dmabuf->bus_addr =
+ (dma_addr_t) omap_vout_uservirt_to_phys(vb->baddr);
+ }
+
+ if (!rotation_enabled(vout)) {
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ vout->queued_buf_addr[vb->i] = (u8 *) dmabuf->bus_addr;
+ return 0;
+ }
+ dmabuf = videobuf_to_dma(q->bufs[vb->i]);
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ /*
+ * DMA transfer in double index mode
+ */
+
+ /* Frame index */
+ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ /* Source and destination parameters */
+ src_element_index = 0;
+ src_frame_index = 0;
+ dest_element_index = 1;
+ /* Number of elements per frame */
+ elem_count = vout->pix.width * vout->bpp;
+ frame_count = vout->pix.height;
+ tx = &vout->vrfb_dma_tx;
+ tx->tx_status = 0;
+ omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+ tx->dev_id, 0x0);
+ /* src_port required only for OMAP1 */
+ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dmabuf->bus_addr, src_element_index, src_frame_index);
+ /*set dma source burst mode for VRFB */
+ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ rotation = calc_rotation(vout);
+
+ /* dest_port required only for OMAP1 */
+ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+ vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+ dest_frame_index);
+ /*set dma dest burst mode for VRFB */
+ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+ omap_start_dma(tx->dma_ch);
+ interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+
+ if (tx->tx_status == 0) {
+ omap_stop_dma(tx->dma_ch);
+ return -EINVAL;
+ }
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+ return 0;
+}
+
+/*
+ * Buffer queue funtion will be called from the videobuf layer when _QBUF
+ * ioctl is called. It is used to enqueue buffer, which is ready to be
+ * displayed.
+ */
+static void omap_vout_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ /* Driver is also maintainig a queue. So enqueue buffer in the driver
+ * queue */
+ list_add_tail(&vb->queue, &vout->dma_queue);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * Buffer release function is called from videobuf layer to release buffer
+ * which are already allocated
+ */
+static void omap_vout_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ struct omap_vout_device *vout = q->priv_data;
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+
+ if (V4L2_MEMORY_MMAP != vout->memory)
+ return;
+}
+
+/*
+ * File operations
+ */
+static void omap_vout_vm_open(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_open [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count++;
+}
+
+static void omap_vout_vm_close(struct vm_area_struct *vma)
+{
+ struct omap_vout_device *vout = vma->vm_private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "vm_close [vma=%08lx-%08lx]\n", vma->vm_start, vma->vm_end);
+ vout->mmap_count--;
+}
+
+static struct vm_operations_struct omap_vout_vm_ops = {
+ .open = omap_vout_vm_open,
+ .close = omap_vout_vm_close,
+};
+
+static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int i;
+ void *pos;
+ unsigned long start = vma->vm_start;
+ unsigned long size = (vma->vm_end - vma->vm_start);
+ struct videobuf_dmabuf *dmabuf = NULL;
+ struct omap_vout_device *vout = file->private_data;
+ struct videobuf_queue *q = &vout->vbq;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ " %s pgoff=0x%lx, start=0x%lx, end=0x%lx\n", __func__,
+ vma->vm_pgoff, vma->vm_start, vma->vm_end);
+
+ /* look for the buffer to map */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[i]->memory)
+ continue;
+ if (q->bufs[i]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ break;
+ }
+
+ if (VIDEO_MAX_FRAME == i) {
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
+ "offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ return -EINVAL;
+ }
+ q->bufs[i]->baddr = vma->vm_start;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &omap_vout_vm_ops;
+ vma->vm_private_data = (void *) vout;
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ pos = dmabuf->vmalloc;
+ vma->vm_pgoff = virt_to_phys((void *)pos) >> PAGE_SHIFT;
+ while (size > 0) {
+ unsigned long pfn;
+ pfn = virt_to_phys((void *) pos) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, PAGE_SHARED))
+ return -EAGAIN;
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ vout->mmap_count++;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+
+ return 0;
+}
+
+static int omap_vout_release(struct file *file)
+{
+ unsigned int ret, i;
+ struct videobuf_queue *q;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = file->private_data;
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+ ovid = &vout->vid_info;
+
+ if (!vout)
+ return 0;
+
+ q = &vout->vbq;
+ /* Disable all the overlay managers connected with this interface */
+ for (i = 0; i < ovid->num_overlays; i++) {
+ struct omap_overlay *ovl = ovid->overlays[i];
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ ovl->set_overlay_info(ovl, &info);
+ }
+ }
+ /* Turn off the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "Unable to apply changes\n");
+
+ /* Free all buffers */
+ omap_vout_free_allbuffers(vout);
+ videobuf_mmap_free(q);
+
+ /* Even if apply changes fails we should continue
+ freeing allocated memeory */
+ if (vout->streaming) {
+ u32 mask = 0;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
+ DISPC_IRQ_EVSYNC_ODD;
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+ vout->streaming = 0;
+
+ videobuf_streamoff(q);
+ videobuf_queue_cancel(q);
+ }
+
+ if (vout->mmap_count != 0)
+ vout->mmap_count = 0;
+
+ vout->opened -= 1;
+ file->private_data = NULL;
+
+ if (vout->buffer_allocated)
+ videobuf_mmap_free(q);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return ret;
+}
+
+static int omap_vout_open(struct file *file)
+{
+ struct videobuf_queue *q;
+ struct omap_vout_device *vout = NULL;
+
+ vout = video_drvdata(file);
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
+ if (vout == NULL)
+ return -ENODEV;
+
+ /* for now, we only support single open */
+ if (vout->opened)
+ return -EBUSY;
+
+ vout->opened += 1;
+
+ file->private_data = vout;
+ vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ q = &vout->vbq;
+ video_vbq_ops.buf_setup = omap_vout_buffer_setup;
+ video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
+ video_vbq_ops.buf_release = omap_vout_buffer_release;
+ video_vbq_ops.buf_queue = omap_vout_buffer_queue;
+ spin_lock_init(&vout->vbq_lock);
+
+ videobuf_queue_sg_init(q, &video_vbq_ops, NULL, &vout->vbq_lock,
+ vout->type, V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vout);
+
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Exiting %s\n", __func__);
+ return 0;
+}
+
+/*
+ * V4L2 ioctls
+ */
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct omap_vout_device *vout = fh;
+
+ strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
+ cap->bus_info[0] = '\0';
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_vout_device *vout = fh;
+
+ f->fmt.pix = vout->pix;
+ return 0;
+
+}
+
+static int vidioc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device)
+ return -EINVAL;
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+
+ omap_vout_try_format(&f->fmt.pix);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret, bpp;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_video_timings *timing;
+ struct omap_vout_device *vout = fh;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* get the display device attached to the overlay */
+ if (!ovl->manager || !ovl->manager->device) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+ timing = &ovl->manager->device->panel.timings;
+
+ /* We dont support RGB24-packed mode if vrfb rotation
+ * is enabled*/
+ if ((rotation_enabled(vout)) &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ ret = -EINVAL;
+ goto s_fmt_vid_out_exit;
+ }
+
+ /* get the framebuffer parameters */
+
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ /* change to samller size is OK */
+
+ bpp = omap_vout_try_format(&f->fmt.pix);
+ f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp;
+
+ /* try & set the new output format */
+ vout->bpp = bpp;
+ vout->pix = f->fmt.pix;
+ vout->vrfb_bpp = 1;
+
+ /* If YUYV then vrfb bpp is 2, for others its 1 */
+ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat ||
+ V4L2_PIX_FMT_UYVY == vout->pix.pixelformat)
+ vout->vrfb_bpp = 2;
+
+ /* set default crop and win */
+ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /* Save the changes in the overlay strcuture */
+ ret = omapvid_init(vout, 0);
+ if (ret) {
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+ goto s_fmt_vid_out_exit;
+ }
+
+ ret = 0;
+
+s_fmt_vid_out_exit:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ret = omap_vout_try_window(&vout->fbuf, win);
+
+ if (!ret) {
+ if (vout->vid == OMAP_VIDEO1)
+ win->global_alpha = 255;
+ else
+ win->global_alpha = f->fmt.win.global_alpha;
+ }
+
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct v4l2_window *win = &f->fmt.win;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win);
+ if (!ret) {
+ /* Video1 plane does not support global alpha */
+ if (ovl->id == OMAP_DSS_VIDEO1)
+ vout->win.global_alpha = 255;
+ else
+ vout->win.global_alpha = f->fmt.win.global_alpha;
+
+ vout->win.chromakey = f->fmt.win.chromakey;
+ }
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ int index = fmt->index;
+ enum v4l2_buf_type type = fmt->type;
+
+ fmt->index = index;
+ fmt->type = type;
+ if (index >= NUM_OUTPUT_FORMATS)
+ return -EINVAL;
+
+ fmt->flags = omap_formats[index].flags;
+ strlcpy(fmt->description, omap_formats[index].description,
+ sizeof(fmt->description));
+ fmt->pixelformat = omap_formats[index].pixelformat;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ u32 key_value = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ struct v4l2_window *win = &f->fmt.win;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ win->w = vout->win.w;
+ win->field = vout->win.field;
+ win->global_alpha = vout->win.global_alpha;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ key_value = info.trans_key;
+ }
+ win->chromakey = key_value;
+ return 0;
+}
+
+static int vidioc_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cropcap)
+{
+ struct omap_vout_device *vout = fh;
+ struct v4l2_pix_format *pix = &vout->pix;
+
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Width and height are always even */
+ cropcap->bounds.width = pix->width & ~1;
+ cropcap->bounds.height = pix->height & ~1;
+
+ omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
+ cropcap->pixelaspect.numerator = 1;
+ cropcap->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct omap_vout_device *vout = fh;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ crop->c = vout->crop;
+ return 0;
+}
+
+static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ int ret = -EINVAL;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid;
+ struct omap_overlay *ovl;
+ struct omap_video_timings *timing;
+
+ if (vout->streaming)
+ return -EBUSY;
+
+ mutex_lock(&vout->lock);
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ if (!ovl->manager || !ovl->manager->device) {
+ ret = -EINVAL;
+ goto s_crop_err;
+ }
+ /* get the display device attached to the overlay */
+ timing = &ovl->manager->device->panel.timings;
+
+ if (rotate_90_or_270(vout)) {
+ vout->fbuf.fmt.height = timing->x_res;
+ vout->fbuf.fmt.width = timing->y_res;
+ } else {
+ vout->fbuf.fmt.height = timing->y_res;
+ vout->fbuf.fmt.width = timing->x_res;
+ }
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+ &vout->fbuf, &crop->c);
+
+s_crop_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_queryctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *ctrl)
+{
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
+ break;
+ case V4L2_CID_BG_COLOR:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
+ break;
+ default:
+ ctrl->name[0] = '\0';
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ROTATE:
+ ctrl->value = vout->control[0].value;
+ break;
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay_manager_info info;
+ struct omap_overlay *ovl;
+
+ ovl = vout->vid_info.overlays[0];
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ ctrl->value = info.default_color;
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ ctrl->value = vout->control[2].value;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ int ret = 0;
+ struct omap_vout_device *vout = fh;
+
+ switch (a->id) {
+ case V4L2_CID_ROTATE:
+ {
+ int rotation = a->value;
+
+ mutex_lock(&vout->lock);
+
+ if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (v4l2_rot_to_dss_rot(rotation, &vout->rotation,
+ vout->mirror)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[0].value = rotation;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_BG_COLOR:
+ {
+ struct omap_overlay *ovl;
+ unsigned int color = a->value;
+ struct omap_overlay_manager_info info;
+
+ ovl = vout->vid_info.overlays[0];
+
+ mutex_lock(&vout->lock);
+ if (!ovl->manager || !ovl->manager->get_manager_info) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.default_color = color;
+ if (ovl->manager->set_manager_info(ovl->manager, &info)) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+
+ vout->control[1].value = color;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ {
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ unsigned int mirror = a->value;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ mutex_lock(&vout->lock);
+
+ if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
+ mutex_unlock(&vout->lock);
+ ret = -EINVAL;
+ break;
+ }
+ vout->mirror = mirror;
+ vout->control[2].value = mirror;
+ mutex_unlock(&vout->lock);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ int ret = 0;
+ unsigned int i, num_buffers = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct videobuf_dmabuf *dmabuf = NULL;
+
+ if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
+ return -EINVAL;
+ /* if memory is not mmp or userptr
+ return error */
+ if ((V4L2_MEMORY_MMAP != req->memory) &&
+ (V4L2_MEMORY_USERPTR != req->memory))
+ return -EINVAL;
+
+ mutex_lock(&vout->lock);
+ /* Cannot be requested when streaming is on */
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+
+ /* If buffers are already allocated free them */
+ if (q->bufs[0] && (V4L2_MEMORY_MMAP == q->bufs[0]->memory)) {
+ if (vout->mmap_count) {
+ ret = -EBUSY;
+ goto reqbuf_err;
+ }
+ num_buffers = (vout->vid == OMAP_VIDEO1) ?
+ video1_numbuffers : video2_numbuffers;
+ for (i = num_buffers; i < vout->buffer_allocated; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ omap_vout_free_buffer((u32)dmabuf->vmalloc,
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ vout->buffer_allocated = num_buffers;
+ videobuf_mmap_free(q);
+ } else if (q->bufs[0] && (V4L2_MEMORY_USERPTR == q->bufs[0]->memory)) {
+ if (vout->buffer_allocated) {
+ videobuf_mmap_free(q);
+ for (i = 0; i < vout->buffer_allocated; i++) {
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ vout->buffer_allocated = 0;
+ }
+ }
+
+ /*store the memory type in data structure */
+ vout->memory = req->memory;
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+
+ /* call videobuf_reqbufs api */
+ ret = videobuf_reqbufs(q, req);
+ if (ret < 0)
+ goto reqbuf_err;
+
+ vout->buffer_allocated = req->count;
+ for (i = 0; i < req->count; i++) {
+ dmabuf = videobuf_to_dma(q->bufs[i]);
+ dmabuf->vmalloc = (void *) vout->buf_virt_addr[i];
+ dmabuf->bus_addr = (dma_addr_t) vout->buf_phy_addr[i];
+ dmabuf->sglen = 1;
+ }
+reqbuf_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+
+ return videobuf_querybuf(&vout->vbq, b);
+}
+
+static int vidioc_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buffer)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if ((V4L2_BUF_TYPE_VIDEO_OUTPUT != buffer->type) ||
+ (buffer->index >= vout->buffer_allocated) ||
+ (q->bufs[buffer->index]->memory != buffer->memory)) {
+ return -EINVAL;
+ }
+ if (V4L2_MEMORY_USERPTR == buffer->memory) {
+ if ((buffer->length < vout->pix.sizeimage) ||
+ (0 == buffer->m.userptr)) {
+ return -EINVAL;
+ }
+ }
+
+ if ((rotation_enabled(vout)) &&
+ vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
+ v4l2_warn(&vout->vid_dev->v4l2_dev,
+ "DMA Channel not allocated for Rotation\n");
+ return -EINVAL;
+ }
+
+ return videobuf_qbuf(q, buffer);
+}
+
+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+{
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+}
+
+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ int ret = 0, j;
+ u32 addr = 0, mask = 0;
+ struct omap_vout_device *vout = fh;
+ struct videobuf_queue *q = &vout->vbq;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ mutex_lock(&vout->lock);
+
+ if (vout->streaming) {
+ ret = -EBUSY;
+ goto streamon_err;
+ }
+
+ ret = videobuf_streamon(q);
+ if (ret)
+ goto streamon_err;
+
+ if (list_empty(&vout->dma_queue)) {
+ ret = -EIO;
+ goto streamon_err1;
+ }
+
+ /* Get the next frame from the buffer queue */
+ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&vout->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ vout->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ vout->field_id = 0;
+
+ /* set flag here. Next QBUF will start DMA */
+ vout->streaming = 1;
+
+ vout->first_int = 1;
+
+ if (omap_vout_calculate_offset(vout)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ + vout->cropped_offset;
+
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+
+ omap_dispc_register_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 1;
+ info.paddr = addr;
+ if (ovl->set_overlay_info(ovl, &info)) {
+ ret = -EINVAL;
+ goto streamon_err1;
+ }
+ }
+ }
+
+ /* First save the configuration in ovelray structure */
+ ret = omapvid_init(vout, addr);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to set overlay info\n");
+ /* Enable the pipeline and set the Go bit */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+
+ ret = 0;
+
+streamon_err1:
+ if (ret)
+ ret = videobuf_streamoff(q);
+streamon_err:
+ mutex_unlock(&vout->lock);
+ return ret;
+}
+
+static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
+{
+ u32 mask = 0;
+ int ret = 0, j;
+ struct omap_vout_device *vout = fh;
+ struct omapvideo_info *ovid = &vout->vid_info;
+
+ if (!vout->streaming)
+ return -EINVAL;
+
+ vout->streaming = 0;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+
+ omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
+
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = 0;
+ ret = ovl->set_overlay_info(ovl, &info);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "failed to update overlay info in streamoff\n");
+ }
+ }
+
+ /* Turn of the pipeline */
+ ret = omapvid_apply_changes(vout);
+ if (ret)
+ v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
+ " streamoff\n");
+
+ INIT_LIST_HEAD(&vout->dma_queue);
+ ret = videobuf_streamoff(&vout->vbq);
+
+ return ret;
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ int enable = 0;
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+ enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ /* OMAP DSS doesn't support Source and Destination color
+ key together */
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_CHROMAKEY))
+ return -EINVAL;
+ /* OMAP DSS Doesn't support the Destination color key
+ and alpha blending together */
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA))
+ return -EINVAL;
+
+ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_VID_SRC;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+
+ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ } else
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY;
+
+ if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY))
+ enable = 1;
+ else
+ enable = 0;
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.trans_enabled = enable;
+ info.trans_key_type = key_type;
+ info.trans_key = vout->win.chromakey;
+
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+ if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) {
+ vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 1;
+ } else {
+ vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ enable = 0;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info &&
+ ovl->manager->set_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ info.alpha_enabled = enable;
+ if (ovl->manager->set_manager_info(ovl->manager, &info))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct omap_overlay *ovl;
+ struct omapvideo_info *ovid;
+ struct omap_vout_device *vout = fh;
+ struct omap_overlay_manager_info info;
+
+ ovid = &vout->vid_info;
+ ovl = ovid->overlays[0];
+
+ a->flags = 0x0;
+ a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
+ | V4L2_FBUF_CAP_SRC_CHROMAKEY;
+
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC)
+ a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST)
+ a->flags |= V4L2_FBUF_FLAG_CHROMAKEY;
+ }
+ if (ovl->manager && ovl->manager->get_manager_info) {
+ ovl->manager->get_manager_info(ovl->manager, &info);
+ if (info.alpha_enabled)
+ a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vout_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static const struct v4l2_file_operations omap_vout_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = omap_vout_mmap,
+ .open = omap_vout_open,
+ .release = omap_vout_release,
+};
+
+/* Init functions used during driver initialization */
+/* Initial setup of video_data */
+static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+ struct v4l2_pix_format *pix;
+ struct v4l2_control *control;
+ struct omap_dss_device *display =
+ vout->vid_info.overlays[0]->manager->device;
+
+ /* set the default pix */
+ pix = &vout->pix;
+
+ /* Set the default picture of QVGA */
+ pix->width = QQVGA_WIDTH;
+ pix->height = QQVGA_HEIGHT;
+
+ /* Default pixel format is RGB 5-6-5 */
+ pix->pixelformat = V4L2_PIX_FMT_RGB565;
+ pix->field = V4L2_FIELD_ANY;
+ pix->bytesperline = pix->width * 2;
+ pix->sizeimage = pix->bytesperline * pix->height;
+ pix->priv = 0;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+
+ vout->bpp = RGB565_BPP;
+ vout->fbuf.fmt.width = display->panel.timings.x_res;
+ vout->fbuf.fmt.height = display->panel.timings.y_res;
+
+ /* Set the data structures for the overlay parameters*/
+ vout->win.global_alpha = 255;
+ vout->fbuf.flags = 0;
+ vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY;
+ vout->win.chromakey = 0;
+
+ omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
+
+ /*Initialize the control variables for
+ rotation, flipping and background color. */
+ control = vout->control;
+ control[0].id = V4L2_CID_ROTATE;
+ control[0].value = 0;
+ vout->rotation = 0;
+ vout->mirror = 0;
+ vout->control[2].id = V4L2_CID_HFLIP;
+ vout->control[2].value = 0;
+ vout->vrfb_bpp = 2;
+
+ control[1].id = V4L2_CID_BG_COLOR;
+ control[1].value = 0;
+
+ /* initialize the video_device struct */
+ vfd = vout->vfd = video_device_alloc();
+
+ if (!vfd) {
+ printk(KERN_ERR VOUT_NAME ": could not allocate"
+ " video device struct\n");
+ return -ENOMEM;
+ }
+ vfd->release = video_device_release;
+ vfd->ioctl_ops = &vout_ioctl_ops;
+
+ strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
+
+ /* need to register for a VID_HARDWARE_* ID in videodev.h */
+ vfd->fops = &omap_vout_fops;
+ vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
+ mutex_init(&vout->lock);
+
+ vfd->minor = -1;
+ return 0;
+
+}
+
+/* Setup video buffers */
+static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
+ int vid_num)
+{
+ u32 numbuffers;
+ int ret = 0, i, j;
+ int image_width, image_height;
+ struct video_device *vfd;
+ struct omap_vout_device *vout;
+ int static_vrfb_allocation = 0, vrfb_num_bufs = VRFB_NUM_BUFS;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+ numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
+ vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
+ dev_info(&pdev->dev, "Buffer Size = %d\n", vout->buffer_size);
+
+ for (i = 0; i < numbuffers; i++) {
+ vout->buf_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->buffer_size,
+ (u32 *) &vout->buf_phy_addr[i]);
+ if (!vout->buf_virt_addr[i]) {
+ numbuffers = i;
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ dev_info(&pdev->dev, ": VRFB allocation failed\n");
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+ vout->cropped_offset = 0;
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+ vout->vrfb_dma_tx.dma_ch = -1;
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+ ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+ omap_vout_vrfb_dma_tx_callback,
+ (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+ if (ret < 0) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
+ " video%d\n", vfd->minor);
+ }
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* Allocate VRFB buffers if selected through bootargs */
+ static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ ret = -ENOMEM;
+ goto release_vrfb_ctx;
+ }
+ vout->vrfb_static_allocation = 1;
+ }
+ return 0;
+
+release_vrfb_ctx:
+ for (j = 0; j < VRFB_NUM_BUFS; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+
+free_buffers:
+ for (i = 0; i < numbuffers; i++) {
+ omap_vout_free_buffer(vout->buf_virt_addr[i],
+ vout->buffer_size);
+ vout->buf_virt_addr[i] = 0;
+ vout->buf_phy_addr[i] = 0;
+ }
+ return ret;
+
+}
+
+/* Create video out devices */
+static int __init omap_vout_create_video_devices(struct platform_device *pdev)
+{
+ int ret = 0, k;
+ struct omap_vout_device *vout;
+ struct video_device *vfd = NULL;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev,
+ struct omap2video_device, v4l2_dev);
+
+ for (k = 0; k < pdev->num_resources; k++) {
+
+ vout = kmalloc(sizeof(struct omap_vout_device), GFP_KERNEL);
+ if (!vout) {
+ dev_err(&pdev->dev, ": could not allocate memory\n");
+ return -ENOMEM;
+ }
+ memset(vout, 0, sizeof(struct omap_vout_device));
+
+ vout->vid = k;
+ vid_dev->vouts[k] = vout;
+ vout->vid_dev = vid_dev;
+ /* Select video2 if only 1 overlay is controlled by V4L2 */
+ if (pdev->num_resources == 1)
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 2];
+ else
+ /* Else select video1 and video2 one by one. */
+ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1];
+ vout->vid_info.num_overlays = 1;
+ vout->vid_info.id = k + 1;
+
+ /* Setup the default configuration for the video devices
+ */
+ if (omap_vout_setup_video_data(vout) != 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Allocate default number of buffers for the video streaming
+ * and reserve the VRFB space for rotation
+ */
+ if (omap_vout_setup_video_bufs(pdev, k) != 0) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ /* Register the Video device with V4L2
+ */
+ vfd = vout->vfd;
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+ dev_err(&pdev->dev, ": Could not register "
+ "Video for Linux device\n");
+ vfd->minor = -1;
+ ret = -ENODEV;
+ goto error2;
+ }
+ video_set_drvdata(vfd, vout);
+
+ /* Configure the overlay structure */
+ ret = omapvid_init(vid_dev->vouts[k], 0);
+ if (!ret)
+ goto success;
+
+error2:
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+error1:
+ video_device_release(vfd);
+error:
+ kfree(vout);
+ return ret;
+
+success:
+ dev_info(&pdev->dev, ": registered and initialized"
+ " video device %d\n", vfd->minor);
+ if (k == (pdev->num_resources - 1))
+ return 0;
+ }
+
+ return -ENODEV;
+}
+/* Driver functions */
+static void omap_vout_cleanup_device(struct omap_vout_device *vout)
+{
+ struct video_device *vfd;
+
+ if (!vout)
+ return;
+
+ vfd = vout->vfd;
+ if (vfd) {
+ if (!video_is_registered(vfd)) {
+ /*
+ * The device was never registered, so release the
+ * video_device struct directly.
+ */
+ video_device_release(vfd);
+ } else {
+ /*
+ * The unregister function will release the video_device
+ * struct as well as unregistering it.
+ */
+ video_unregister_device(vfd);
+ }
+ }
+
+ omap_vout_release_vrfb(vout);
+ omap_vout_free_buffers(vout);
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+
+ kfree(vout);
+}
+
+static int omap_vout_remove(struct platform_device *pdev)
+{
+ int k;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev = container_of(v4l2_dev, struct
+ omap2video_device, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ for (k = 0; k < pdev->num_resources; k++)
+ omap_vout_cleanup_device(vid_dev->vouts[k]);
+
+ for (k = 0; k < vid_dev->num_displays; k++) {
+ if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED)
+ vid_dev->displays[k]->disable(vid_dev->displays[k]);
+
+ omap_dss_put_device(vid_dev->displays[k]);
+ }
+ kfree(vid_dev);
+ return 0;
+}
+
+static int __init omap_vout_probe(struct platform_device *pdev)
+{
+ int ret = 0, i;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *def_display;
+ struct omap2video_device *vid_dev = NULL;
+
+ if (pdev->num_resources == 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ return -ENODEV;
+ }
+
+ vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL);
+ if (vid_dev == NULL)
+ return -ENOMEM;
+
+ vid_dev->num_displays = 0;
+ for_each_dss_dev(dssdev) {
+ omap_dss_get_device(dssdev);
+ vid_dev->displays[vid_dev->num_displays++] = dssdev;
+ }
+
+ if (vid_dev->num_displays == 0) {
+ dev_err(&pdev->dev, "no displays\n");
+ ret = -EINVAL;
+ goto probe_err0;
+ }
+
+ vid_dev->num_overlays = omap_dss_get_num_overlays();
+ for (i = 0; i < vid_dev->num_overlays; i++)
+ vid_dev->overlays[i] = omap_dss_get_overlay(i);
+
+ vid_dev->num_managers = omap_dss_get_num_overlay_managers();
+ for (i = 0; i < vid_dev->num_managers; i++)
+ vid_dev->managers[i] = omap_dss_get_overlay_manager(i);
+
+ /* Get the Video1 overlay and video2 overlay.
+ * Setup the Display attached to that overlays
+ */
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager && ovl->manager->device) {
+ def_display = ovl->manager->device;
+ } else {
+ dev_warn(&pdev->dev, "cannot find display\n");
+ def_display = NULL;
+ }
+ if (def_display) {
+ ret = def_display->enable(def_display);
+ if (ret) {
+ /* Here we are not considering a error
+ * as display may be enabled by frame
+ * buffer driver
+ */
+ dev_warn(&pdev->dev,
+ "'%s' Display already enabled\n",
+ def_display->name);
+ }
+ /* set the update mode */
+ if (def_display->caps &
+ OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 1);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+#else /* MANUAL_UPDATE */
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 0);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_MANUAL);
+#endif
+ } else {
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+ }
+ }
+ }
+
+ if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) {
+ dev_err(&pdev->dev, "v4l2_device_register failed\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+
+ ret = omap_vout_create_video_devices(pdev);
+ if (ret)
+ goto probe_err2;
+
+ for (i = 0; i < vid_dev->num_displays; i++) {
+ struct omap_dss_device *display = vid_dev->displays[i];
+
+ if (display->update)
+ display->update(display, 0, 0,
+ display->panel.timings.x_res,
+ display->panel.timings.y_res);
+ }
+ return 0;
+
+probe_err2:
+ v4l2_device_unregister(&vid_dev->v4l2_dev);
+probe_err1:
+ for (i = 1; i < vid_dev->num_overlays; i++) {
+ def_display = NULL;
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager && ovl->manager->device)
+ def_display = ovl->manager->device;
+
+ if (def_display)
+ def_display->disable(def_display);
+ }
+probe_err0:
+ kfree(vid_dev);
+ return ret;
+}
+
+static struct platform_driver omap_vout_driver = {
+ .driver = {
+ .name = VOUT_NAME,
+ },
+ .probe = omap_vout_probe,
+ .remove = omap_vout_remove,
+};
+
+static int __init omap_vout_init(void)
+{
+ if (platform_driver_register(&omap_vout_driver) != 0) {
+ printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void omap_vout_cleanup(void)
+{
+ platform_driver_unregister(&omap_vout_driver);
+}
+
+late_initcall(omap_vout_init);
+module_exit(omap_vout_cleanup);
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
new file mode 100644
index 000000000000..ea3a047f8bca
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -0,0 +1,147 @@
+/*
+ * omap_voutdef.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef OMAP_VOUTDEF_H
+#define OMAP_VOUTDEF_H
+
+#include <plat/display.h>
+
+#define YUYV_BPP 2
+#define RGB565_BPP 2
+#define RGB24_BPP 3
+#define RGB32_BPP 4
+#define TILE_SIZE 32
+#define YUYV_VRFB_BPP 2
+#define RGB_VRFB_BPP 1
+#define MAX_CID 3
+#define MAC_VRFB_CTXS 4
+#define MAX_VOUT_DEV 2
+#define MAX_OVLS 3
+#define MAX_DISPLAYS 3
+#define MAX_MANAGERS 3
+
+/* Enum for Rotation
+ * DSS understands rotation in 0, 1, 2, 3 context
+ * while V4L2 driver understands it as 0, 90, 180, 270
+ */
+enum dss_rotation {
+ dss_rotation_0_degree = 0,
+ dss_rotation_90_degree = 1,
+ dss_rotation_180_degree = 2,
+ dss_rotation_270_degree = 3,
+};
+/*
+ * This structure is used to store the DMA transfer parameters
+ * for VRFB hidden buffer
+ */
+struct vid_vrfb_dma {
+ int dev_id;
+ int dma_ch;
+ int req_status;
+ int tx_status;
+ wait_queue_head_t wait;
+};
+
+struct omapvideo_info {
+ int id;
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+};
+
+struct omap2video_device {
+ struct mutex mtx;
+
+ int state;
+
+ struct v4l2_device v4l2_dev;
+ struct omap_vout_device *vouts[MAX_VOUT_DEV];
+
+ int num_displays;
+ struct omap_dss_device *displays[MAX_DISPLAYS];
+ int num_overlays;
+ struct omap_overlay *overlays[MAX_OVLS];
+ int num_managers;
+ struct omap_overlay_manager *managers[MAX_MANAGERS];
+};
+
+/* per-device data structure */
+struct omap_vout_device {
+
+ struct omapvideo_info vid_info;
+ struct video_device *vfd;
+ struct omap2video_device *vid_dev;
+ int vid;
+ int opened;
+
+ /* we don't allow to change image fmt/size once buffer has
+ * been allocated
+ */
+ int buffer_allocated;
+ /* allow to reuse previously allocated buffer which is big enough */
+ int buffer_size;
+ /* keep buffer info across opens */
+ unsigned long buf_virt_addr[VIDEO_MAX_FRAME];
+ unsigned long buf_phy_addr[VIDEO_MAX_FRAME];
+ enum omap_color_mode dss_mode;
+
+ /* we don't allow to request new buffer when old buffers are
+ * still mmaped
+ */
+ int mmap_count;
+
+ spinlock_t vbq_lock; /* spinlock for videobuf queues */
+ unsigned long field_count; /* field counter for videobuf_buffer */
+
+ /* non-NULL means streaming is in progress. */
+ bool streaming;
+
+ struct v4l2_pix_format pix;
+ struct v4l2_rect crop;
+ struct v4l2_window win;
+ struct v4l2_framebuffer fbuf;
+
+ /* Lock to protect the shared data structures in ioctl */
+ struct mutex lock;
+
+ /* V4L2 control structure for different control id */
+ struct v4l2_control control[MAX_CID];
+ enum dss_rotation rotation;
+ bool mirror;
+ int flicker_filter;
+ /* V4L2 control structure for different control id */
+
+ int bpp; /* bytes per pixel */
+ int vrfb_bpp; /* bytes per pixel with respect to VRFB */
+
+ struct vid_vrfb_dma vrfb_dma_tx;
+ unsigned int smsshado_phy_addr[MAC_VRFB_CTXS];
+ unsigned int smsshado_virt_addr[MAC_VRFB_CTXS];
+ struct vrfb vrfb_context[MAC_VRFB_CTXS];
+ bool vrfb_static_allocation;
+ unsigned int smsshado_size;
+ unsigned char pos;
+
+ int ps, vr_ps, line_length, first_int, field_id;
+ enum v4l2_memory memory;
+ struct videobuf_buffer *cur_frm, *next_frm;
+ struct list_head dma_queue;
+ u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ u32 cropped_offset;
+ s32 tv_field1_offset;
+ void *isr_handle;
+
+ /* Buffer queue variables */
+ struct omap_vout_device *vout;
+ enum v4l2_buf_type type;
+ struct videobuf_queue vbq;
+ int io_allowed;
+
+};
+#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
new file mode 100644
index 000000000000..b941c761eef9
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -0,0 +1,293 @@
+/*
+ * omap_voutlib.c
+ *
+ * Copyright (C) 2005-2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Based on the OMAP2 camera driver
+ * Video-for-Linux (Version 2) camera capture driver for
+ * the OMAP24xx camera controller.
+ *
+ * Author: Andy Lowe (source@mvista.com)
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <plat/cpu.h>
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("OMAP Video library");
+MODULE_LICENSE("GPL");
+
+/* Return the default overlay cropping rectangle in crop given the image
+ * size in pix and the video display size in fbuf. The default
+ * cropping rectangle is the largest rectangle no larger than the capture size
+ * that will fit on the display. The default cropping rectangle is centered in
+ * the image. All dimensions and offsets are rounded down to even numbers.
+ */
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
+{
+ crop->width = (pix->width < fbuf->fmt.width) ?
+ pix->width : fbuf->fmt.width;
+ crop->height = (pix->height < fbuf->fmt.height) ?
+ pix->height : fbuf->fmt.height;
+ crop->width &= ~1;
+ crop->height &= ~1;
+ crop->left = ((pix->width - crop->width) >> 1) & ~1;
+ crop->top = ((pix->height - crop->height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_default_crop);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The adjusted window parameters are
+ * returned in new_win.
+ * Returns zero if succesful, or -EINVAL if the requested window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ struct v4l2_rect try_win;
+
+ /* make a working copy of the new_win rectangle */
+ try_win = new_win->w;
+
+ /* adjust the preview window so it fits on the display by clipping any
+ * offscreen areas
+ */
+ if (try_win.left < 0) {
+ try_win.width += try_win.left;
+ try_win.left = 0;
+ }
+ if (try_win.top < 0) {
+ try_win.height += try_win.top;
+ try_win.top = 0;
+ }
+ try_win.width = (try_win.width < fbuf->fmt.width) ?
+ try_win.width : fbuf->fmt.width;
+ try_win.height = (try_win.height < fbuf->fmt.height) ?
+ try_win.height : fbuf->fmt.height;
+ if (try_win.left + try_win.width > fbuf->fmt.width)
+ try_win.width = fbuf->fmt.width - try_win.left;
+ if (try_win.top + try_win.height > fbuf->fmt.height)
+ try_win.height = fbuf->fmt.height - try_win.top;
+ try_win.width &= ~1;
+ try_win.height &= ~1;
+
+ if (try_win.width <= 0 || try_win.height <= 0)
+ return -EINVAL;
+
+ /* We now have a valid preview window, so go with it */
+ new_win->w = try_win;
+ new_win->field = V4L2_FIELD_ANY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_try_window);
+
+/* Given a new render window in new_win, adjust the window to the
+ * nearest supported configuration. The image cropping window in crop
+ * will also be adjusted if necessary. Preference is given to keeping the
+ * the window as close to the requested configuration as possible. If
+ * successful, new_win, vout->win, and crop are updated.
+ * Returns zero if succesful, or -EINVAL if the requested preview window is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win)
+{
+ int err;
+
+ err = omap_vout_try_window(fbuf, new_win);
+ if (err)
+ return err;
+
+ /* update our preview window */
+ win->w = new_win->w;
+ win->field = new_win->field;
+ win->chromakey = new_win->chromakey;
+
+ /* Adjust the cropping window to allow for resizing limitation */
+ if (cpu_is_omap24xx()) {
+ /* For 24xx limit is 8x to 1/2x scaling. */
+ if ((crop->height/win->w.height) >= 2)
+ crop->height = win->w.height * 2;
+
+ if ((crop->width/win->w.width) >= 2)
+ crop->width = win->w.width * 2;
+
+ if (crop->width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is 768
+ * pixels wide. If the cropped image is wider than
+ * 768 pixels then it cannot be vertically resized.
+ */
+ if (crop->height != win->w.height)
+ crop->width = 768;
+ }
+ } else if (cpu_is_omap34xx()) {
+ /* For 34xx limit is 8x to 1/4x scaling. */
+ if ((crop->height/win->w.height) >= 4)
+ crop->height = win->w.height * 4;
+
+ if ((crop->width/win->w.width) >= 4)
+ crop->width = win->w.width * 4;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_window);
+
+/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
+ * the nearest supported configuration. The image render window in win will
+ * also be adjusted if necessary. The preview window is adjusted such that the
+ * horizontal and vertical rescaling ratios stay constant. If the render
+ * window would fall outside the display boundaries, the cropping rectangle
+ * will also be adjusted to maintain the rescaling ratios. If successful, crop
+ * and win are updated.
+ * Returns zero if succesful, or -EINVAL if the requested cropping rectangle is
+ * impossible and cannot reasonably be adjusted.
+ */
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
+{
+ struct v4l2_rect try_crop;
+ unsigned long vresize, hresize;
+
+ /* make a working copy of the new_crop rectangle */
+ try_crop = *new_crop;
+
+ /* adjust the cropping rectangle so it fits in the image */
+ if (try_crop.left < 0) {
+ try_crop.width += try_crop.left;
+ try_crop.left = 0;
+ }
+ if (try_crop.top < 0) {
+ try_crop.height += try_crop.top;
+ try_crop.top = 0;
+ }
+ try_crop.width = (try_crop.width < pix->width) ?
+ try_crop.width : pix->width;
+ try_crop.height = (try_crop.height < pix->height) ?
+ try_crop.height : pix->height;
+ if (try_crop.left + try_crop.width > pix->width)
+ try_crop.width = pix->width - try_crop.left;
+ if (try_crop.top + try_crop.height > pix->height)
+ try_crop.height = pix->height - try_crop.top;
+
+ try_crop.width &= ~1;
+ try_crop.height &= ~1;
+
+ if (try_crop.width <= 0 || try_crop.height <= 0)
+ return -EINVAL;
+
+ if (cpu_is_omap24xx()) {
+ if (crop->height != win->w.height) {
+ /* If we're resizing vertically, we can't support a
+ * crop width wider than 768 pixels.
+ */
+ if (try_crop.width > 768)
+ try_crop.width = 768;
+ }
+ }
+ /* vertical resizing */
+ vresize = (1024 * crop->height) / win->w.height;
+ if (cpu_is_omap24xx() && (vresize > 2048))
+ vresize = 2048;
+ else if (cpu_is_omap34xx() && (vresize > 4096))
+ vresize = 4096;
+
+ win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
+ if (win->w.height == 0)
+ win->w.height = 2;
+ if (win->w.height + win->w.top > fbuf->fmt.height) {
+ /* We made the preview window extend below the bottom of the
+ * display, so clip it to the display boundary and resize the
+ * cropping height to maintain the vertical resizing ratio.
+ */
+ win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
+ if (try_crop.height == 0)
+ try_crop.height = 2;
+ }
+ /* horizontal resizing */
+ hresize = (1024 * crop->width) / win->w.width;
+ if (cpu_is_omap24xx() && (hresize > 2048))
+ hresize = 2048;
+ else if (cpu_is_omap34xx() && (hresize > 4096))
+ hresize = 4096;
+
+ win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
+ if (win->w.width == 0)
+ win->w.width = 2;
+ if (win->w.width + win->w.left > fbuf->fmt.width) {
+ /* We made the preview window extend past the right side of the
+ * display, so clip it to the display boundary and resize the
+ * cropping width to maintain the horizontal resizing ratio.
+ */
+ win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
+ if (try_crop.width == 0)
+ try_crop.width = 2;
+ }
+ if (cpu_is_omap24xx()) {
+ if ((try_crop.height/win->w.height) >= 2)
+ try_crop.height = win->w.height * 2;
+
+ if ((try_crop.width/win->w.width) >= 2)
+ try_crop.width = win->w.width * 2;
+
+ if (try_crop.width > 768) {
+ /* The OMAP2420 vertical resizing line buffer is
+ * 768 pixels wide. If the cropped image is wider
+ * than 768 pixels then it cannot be vertically resized.
+ */
+ if (try_crop.height != win->w.height)
+ try_crop.width = 768;
+ }
+ } else if (cpu_is_omap34xx()) {
+ if ((try_crop.height/win->w.height) >= 4)
+ try_crop.height = win->w.height * 4;
+
+ if ((try_crop.width/win->w.width) >= 4)
+ try_crop.width = win->w.width * 4;
+ }
+ /* update our cropping rectangle and we're done */
+ *crop = try_crop;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_crop);
+
+/* Given a new format in pix and fbuf, crop and win
+ * structures are initialized to default values. crop
+ * is initialized to the largest window size that will fit on the display. The
+ * crop window is centered in the image. win is initialized to
+ * the same size as crop and is centered on the display.
+ * All sizes and offsets are constrained to be even numbers.
+ */
+void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win)
+{
+ /* crop defines the preview source window in the image capture
+ * buffer
+ */
+ omap_vout_default_crop(pix, fbuf, crop);
+
+ /* win defines the preview target window on the display */
+ win->w.width = crop->width;
+ win->w.height = crop->height;
+ win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
+ win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
+}
+EXPORT_SYMBOL_GPL(omap_vout_new_format);
+
diff --git a/drivers/media/video/omap/omap_voutlib.h b/drivers/media/video/omap/omap_voutlib.h
new file mode 100644
index 000000000000..a60b16e8bfc3
--- /dev/null
+++ b/drivers/media/video/omap/omap_voutlib.h
@@ -0,0 +1,34 @@
+/*
+ * omap_voutlib.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUTLIB_H
+#define OMAP_VOUTLIB_H
+
+extern void omap_vout_default_crop(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
+
+extern int omap_vout_new_crop(struct v4l2_pix_format *pix,
+ struct v4l2_rect *crop, struct v4l2_window *win,
+ struct v4l2_framebuffer *fbuf,
+ const struct v4l2_rect *new_crop);
+
+extern int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern int omap_vout_new_window(struct v4l2_rect *crop,
+ struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
+ struct v4l2_window *new_win);
+
+extern void omap_vout_new_format(struct v4l2_pix_format *pix,
+ struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
+ struct v4l2_window *win);
+#endif /* #ifndef OMAP_VOUTLIB_H */
+
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index ce76d952e161..f85b2ed8a2d8 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -452,8 +452,8 @@ static int omap24xxcam_vbq_setup(struct videobuf_queue *vbq, unsigned int *cnt,
*size = fh->pix.sizeimage;
/* accessing fh->cam->capture_mem is ok, it's constant */
- while (*size * *cnt > fh->cam->capture_mem)
- (*cnt)--;
+ if (*size * *cnt > fh->cam->capture_mem)
+ *cnt = fh->cam->capture_mem / *size;
return 0;
}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index e0bce8dc74bf..a10912097b7a 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -57,8 +57,8 @@
#define DRIVER_VERSION "v1.64 for Linux 2.5"
#define EMAIL "mark@alpha.dyndns.org"
#define DRIVER_AUTHOR "Mark McClelland <mark@alpha.dyndns.org> & Bret Wallach \
- & Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
- <cpbotha@ieee.org> & Claudio Matsuoka <claudio@conectiva.com>"
+& Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
+<cpbotha@ieee.org> & Claudio Matsuoka <claudio@conectiva.com>"
#define DRIVER_DESC "ov511 USB Camera Driver"
#define OV511_I2C_RETRIES 3
@@ -5916,11 +5916,6 @@ ov51x_disconnect(struct usb_interface *intf)
mutex_lock(&ov->lock);
usb_set_intfdata (intf, NULL);
- if (!ov) {
- mutex_unlock(&ov->lock);
- return;
- }
-
/* Free device number */
ov511_devused &= ~(1 << ov->nr);
@@ -5945,7 +5940,7 @@ ov51x_disconnect(struct usb_interface *intf)
ov->dev = NULL;
/* Free the memory */
- if (ov && !ov->user) {
+ if (!ov->user) {
mutex_lock(&ov->cbuf_lock);
kfree(ov->cbuf);
ov->cbuf = NULL;
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index aaa50f9b8e78..91c886ab15c6 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -198,6 +198,7 @@ struct ov7670_info {
struct ov7670_format_struct *fmt; /* Current format */
unsigned char sat; /* Saturation value */
int hue; /* Hue value */
+ u8 clkrc; /* Clock divider value */
};
static inline struct ov7670_info *to_state(struct v4l2_subdev *sd)
@@ -351,7 +352,7 @@ static struct regval_list ov7670_default_regs[] = {
static struct regval_list ov7670_fmt_yuv422[] = {
{ REG_COM7, 0x0 }, /* Selects YUV mode */
{ REG_RGB444, 0 }, /* No RGB444 please */
- { REG_COM1, 0 },
+ { REG_COM1, 0 }, /* CCIR601 */
{ REG_COM15, COM15_R00FF },
{ REG_COM9, 0x18 }, /* 4x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0x80 }, /* "matrix coefficient 1" */
@@ -367,7 +368,7 @@ static struct regval_list ov7670_fmt_yuv422[] = {
static struct regval_list ov7670_fmt_rgb565[] = {
{ REG_COM7, COM7_RGB }, /* Selects RGB mode */
{ REG_RGB444, 0 }, /* No RGB444 please */
- { REG_COM1, 0x0 },
+ { REG_COM1, 0x0 }, /* CCIR601 */
{ REG_COM15, COM15_RGB565 },
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
@@ -383,7 +384,7 @@ static struct regval_list ov7670_fmt_rgb565[] = {
static struct regval_list ov7670_fmt_rgb444[] = {
{ REG_COM7, COM7_RGB }, /* Selects RGB mode */
{ REG_RGB444, R444_ENABLE }, /* Enable xxxxrrrr ggggbbbb */
- { REG_COM1, 0x40 }, /* Magic reserved bit */
+ { REG_COM1, 0x0 }, /* CCIR601 */
{ REG_COM15, COM15_R01FE|COM15_RGB565 }, /* Data range needed? */
{ REG_COM9, 0x38 }, /* 16x gain ceiling; 0x8 is reserved bit */
{ 0x4f, 0xb3 }, /* "matrix coefficient 1" */
@@ -408,8 +409,13 @@ static struct regval_list ov7670_fmt_raw[] = {
/*
* Low-level register I/O.
+ *
+ * Note that there are two versions of these. On the XO 1, the
+ * i2c controller only does SMBUS, so that's what we use. The
+ * ov7670 is not really an SMBUS device, though, so the communication
+ * is not always entirely reliable.
*/
-
+#ifdef CONFIG_OLPC_XO_1
static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
unsigned char *value)
{
@@ -432,9 +438,67 @@ static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
int ret = i2c_smbus_write_byte_data(client, reg, value);
if (reg == REG_COM7 && (value & COM7_RESET))
- msleep(2); /* Wait for reset to run */
+ msleep(5); /* Wait for reset to run */
+ return ret;
+}
+
+#else /* ! CONFIG_OLPC_XO_1 */
+/*
+ * On most platforms, we'd rather do straight i2c I/O.
+ */
+static int ov7670_read(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 data = reg;
+ struct i2c_msg msg;
+ int ret;
+
+ /*
+ * Send out the register address...
+ */
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 1;
+ msg.buf = &data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ printk(KERN_ERR "Error %d on register write\n", ret);
+ return ret;
+ }
+ /*
+ * ...then read back the result.
+ */
+ msg.flags = I2C_M_RD;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret >= 0) {
+ *value = data;
+ ret = 0;
+ }
+ return ret;
+}
+
+
+static int ov7670_write(struct v4l2_subdev *sd, unsigned char reg,
+ unsigned char value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_msg msg;
+ unsigned char data[2] = { reg, value };
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret > 0)
+ ret = 0;
+ if (reg == REG_COM7 && (value & COM7_RESET))
+ msleep(5); /* Wait for reset to run */
return ret;
}
+#endif /* CONFIG_OLPC_XO_1 */
/*
@@ -744,22 +808,12 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
- unsigned char com7, clkrc = 0;
+ unsigned char com7;
ret = ov7670_try_fmt_internal(sd, fmt, &ovfmt, &wsize);
if (ret)
return ret;
/*
- * HACK: if we're running rgb565 we need to grab then rewrite
- * CLKRC. If we're *not*, however, then rewriting clkrc hoses
- * the colors.
- */
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret)
- return ret;
- }
- /*
* COM7 is a pain in the ass, it doesn't like to be read then
* quickly written afterward. But we have everything we need
* to set it absolutely here, as long as the format-specific
@@ -779,8 +833,18 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
ret = ov7670_write_array(sd, wsize->regs);
info->fmt = ovfmt;
- if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565 && ret == 0)
- ret = ov7670_write(sd, REG_CLKRC, clkrc);
+ /*
+ * If we're running RGB565, we must rewrite clkrc after setting
+ * the other parameters or the image looks poor. If we're *not*
+ * doing RGB565, we must not rewrite clkrc or the image looks
+ * *really* poor.
+ *
+ * (Update) Now that we retain clkrc state, we should be able
+ * to write it unconditionally, and that will make the frame
+ * rate persistent too.
+ */
+ if (ret == 0)
+ ret = ov7670_write(sd, REG_CLKRC, info->clkrc);
return ret;
}
@@ -791,20 +855,17 @@ static int ov7670_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
- unsigned char clkrc;
- int ret;
+ struct ov7670_info *info = to_state(sd);
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret < 0)
- return ret;
+
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
cp->timeperframe.denominator = OV7670_FRAME_RATE;
- if ((clkrc & CLK_EXT) == 0 && (clkrc & CLK_SCALE) > 1)
- cp->timeperframe.denominator /= (clkrc & CLK_SCALE);
+ if ((info->clkrc & CLK_EXT) == 0 && (info->clkrc & CLK_SCALE) > 1)
+ cp->timeperframe.denominator /= (info->clkrc & CLK_SCALE);
return 0;
}
@@ -812,19 +873,14 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
struct v4l2_fract *tpf = &cp->timeperframe;
- unsigned char clkrc;
- int ret, div;
+ struct ov7670_info *info = to_state(sd);
+ int div;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cp->extendedmode != 0)
return -EINVAL;
- /*
- * CLKRC has a reserved bit, so let's preserve it.
- */
- ret = ov7670_read(sd, REG_CLKRC, &clkrc);
- if (ret < 0)
- return ret;
+
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
else
@@ -833,10 +889,10 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
div = 1;
else if (div > CLK_SCALE)
div = CLK_SCALE;
- clkrc = (clkrc & 0x80) | div;
+ info->clkrc = (info->clkrc & 0x80) | div;
tpf->numerator = 1;
tpf->denominator = OV7670_FRAME_RATE/div;
- return ov7670_write(sd, REG_CLKRC, clkrc);
+ return ov7670_write(sd, REG_CLKRC, info->clkrc);
}
@@ -1115,6 +1171,140 @@ static int ov7670_s_vflip(struct v4l2_subdev *sd, int value)
return ret;
}
+/*
+ * GAIN is split between REG_GAIN and REG_VREF[7:6]. If one believes
+ * the data sheet, the VREF parts should be the most significant, but
+ * experience shows otherwise. There seems to be little value in
+ * messing with the VREF bits, so we leave them alone.
+ */
+static int ov7670_g_gain(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char gain;
+
+ ret = ov7670_read(sd, REG_GAIN, &gain);
+ *value = gain;
+ return ret;
+}
+
+static int ov7670_s_gain(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_write(sd, REG_GAIN, value & 0xff);
+ /* Have to turn off AGC as well */
+ if (ret == 0) {
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ ret = ov7670_write(sd, REG_COM8, com8 & ~COM8_AGC);
+ }
+ return ret;
+}
+
+/*
+ * Tweak autogain.
+ */
+static int ov7670_g_autogain(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ *value = (com8 & COM8_AGC) != 0;
+ return ret;
+}
+
+static int ov7670_s_autogain(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (ret == 0) {
+ if (value)
+ com8 |= COM8_AGC;
+ else
+ com8 &= ~COM8_AGC;
+ ret = ov7670_write(sd, REG_COM8, com8);
+ }
+ return ret;
+}
+
+/*
+ * Exposure is spread all over the place: top 6 bits in AECHH, middle
+ * 8 in AECH, and two stashed in COM1 just for the hell of it.
+ */
+static int ov7670_g_exp(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com1, aech, aechh;
+
+ ret = ov7670_read(sd, REG_COM1, &com1) +
+ ov7670_read(sd, REG_AECH, &aech) +
+ ov7670_read(sd, REG_AECHH, &aechh);
+ *value = ((aechh & 0x3f) << 10) | (aech << 2) | (com1 & 0x03);
+ return ret;
+}
+
+static int ov7670_s_exp(struct v4l2_subdev *sd, int value)
+{
+ int ret;
+ unsigned char com1, com8, aech, aechh;
+
+ ret = ov7670_read(sd, REG_COM1, &com1) +
+ ov7670_read(sd, REG_COM8, &com8);
+ ov7670_read(sd, REG_AECHH, &aechh);
+ if (ret)
+ return ret;
+
+ com1 = (com1 & 0xfc) | (value & 0x03);
+ aech = (value >> 2) & 0xff;
+ aechh = (aechh & 0xc0) | ((value >> 10) & 0x3f);
+ ret = ov7670_write(sd, REG_COM1, com1) +
+ ov7670_write(sd, REG_AECH, aech) +
+ ov7670_write(sd, REG_AECHH, aechh);
+ /* Have to turn off AEC as well */
+ if (ret == 0)
+ ret = ov7670_write(sd, REG_COM8, com8 & ~COM8_AEC);
+ return ret;
+}
+
+/*
+ * Tweak autoexposure.
+ */
+static int ov7670_g_autoexp(struct v4l2_subdev *sd, __s32 *value)
+{
+ int ret;
+ unsigned char com8;
+ enum v4l2_exposure_auto_type *atype = (enum v4l2_exposure_auto_type *) value;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (com8 & COM8_AEC)
+ *atype = V4L2_EXPOSURE_AUTO;
+ else
+ *atype = V4L2_EXPOSURE_MANUAL;
+ return ret;
+}
+
+static int ov7670_s_autoexp(struct v4l2_subdev *sd,
+ enum v4l2_exposure_auto_type value)
+{
+ int ret;
+ unsigned char com8;
+
+ ret = ov7670_read(sd, REG_COM8, &com8);
+ if (ret == 0) {
+ if (value == V4L2_EXPOSURE_AUTO)
+ com8 |= COM8_AEC;
+ else
+ com8 &= ~COM8_AEC;
+ ret = ov7670_write(sd, REG_COM8, com8);
+ }
+ return ret;
+}
+
+
+
static int ov7670_queryctrl(struct v4l2_subdev *sd,
struct v4l2_queryctrl *qc)
{
@@ -1131,6 +1321,14 @@ static int ov7670_queryctrl(struct v4l2_subdev *sd,
return v4l2_ctrl_query_fill(qc, 0, 256, 1, 128);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, -180, 180, 5, 0);
+ case V4L2_CID_GAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
+ case V4L2_CID_AUTOGAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
+ case V4L2_CID_EXPOSURE:
+ return v4l2_ctrl_query_fill(qc, 0, 65535, 1, 500);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
}
return -EINVAL;
}
@@ -1150,6 +1348,14 @@ static int ov7670_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ov7670_g_vflip(sd, &ctrl->value);
case V4L2_CID_HFLIP:
return ov7670_g_hflip(sd, &ctrl->value);
+ case V4L2_CID_GAIN:
+ return ov7670_g_gain(sd, &ctrl->value);
+ case V4L2_CID_AUTOGAIN:
+ return ov7670_g_autogain(sd, &ctrl->value);
+ case V4L2_CID_EXPOSURE:
+ return ov7670_g_exp(sd, &ctrl->value);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov7670_g_autoexp(sd, &ctrl->value);
}
return -EINVAL;
}
@@ -1169,6 +1375,15 @@ static int ov7670_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ov7670_s_vflip(sd, ctrl->value);
case V4L2_CID_HFLIP:
return ov7670_s_hflip(sd, ctrl->value);
+ case V4L2_CID_GAIN:
+ return ov7670_s_gain(sd, ctrl->value);
+ case V4L2_CID_AUTOGAIN:
+ return ov7670_s_autogain(sd, ctrl->value);
+ case V4L2_CID_EXPOSURE:
+ return ov7670_s_exp(sd, ctrl->value);
+ case V4L2_CID_EXPOSURE_AUTO:
+ return ov7670_s_autoexp(sd,
+ (enum v4l2_exposure_auto_type) ctrl->value);
}
return -EINVAL;
}
@@ -1268,6 +1483,7 @@ static int ov7670_probe(struct i2c_client *client,
info->fmt = &ov7670_formats[0];
info->sat = 128; /* Review this */
+ info->clkrc = 1; /* 30fps */
return 0;
}
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 47bf60ceb7a2..36599a65f548 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -59,9 +59,9 @@ static const struct ov9640_reg ov9640_regs_dflt[] = {
* COM12 |= OV9640_COM12_YUV_AVG
*
* for RGB, alter the following registers:
- * COM7 |= OV9640_COM7_RGB
- * COM13 |= OV9640_COM13_RGB_AVG
- * COM15 |= proper RGB color encoding mode
+ * COM7 |= OV9640_COM7_RGB
+ * COM13 |= OV9640_COM13_RGB_AVG
+ * COM15 |= proper RGB color encoding mode
*/
static const struct ov9640_reg ov9640_regs_qqcif[] = {
{ OV9640_CLKRC, OV9640_CLKRC_DPLL_EN | OV9640_CLKRC_DIV(0x0f) },
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 0598bbd3f368..7129b50757db 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -61,7 +61,6 @@ struct pms {
int depth;
int input;
s32 brightness, saturation, hue, contrast;
- unsigned long in_use;
struct mutex lock;
int i2c_count;
struct i2c_info i2cinfo[64];
@@ -931,25 +930,8 @@ static ssize_t pms_read(struct file *file, char __user *buf,
return len;
}
-static int pms_exclusive_open(struct file *file)
-{
- struct pms *dev = video_drvdata(file);
-
- return test_and_set_bit(0, &dev->in_use) ? -EBUSY : 0;
-}
-
-static int pms_exclusive_release(struct file *file)
-{
- struct pms *dev = video_drvdata(file);
-
- clear_bit(0, &dev->in_use);
- return 0;
-}
-
static const struct v4l2_file_operations pms_fops = {
.owner = THIS_MODULE,
- .open = pms_exclusive_open,
- .release = pms_exclusive_release,
.ioctl = video_ioctl2,
.read = pms_read,
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 712b300f723f..301ef197d038 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2028,7 +2028,7 @@ static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
memset(&fmt, 0, sizeof(fmt));
fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
- video, s_fmt, &fmt);
+ vbi, s_sliced_fmt, &fmt.fmt.sliced);
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index bf1e0fe9f4d2..5ffa0d2b0b0d 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -49,7 +49,7 @@ struct pvr2_v4l2_dev {
struct pvr2_v4l2_fh {
struct pvr2_channel channel;
- struct pvr2_v4l2_dev *dev_info;
+ struct pvr2_v4l2_dev *pdi;
enum v4l2_priority prio;
struct pvr2_ioread *rhp;
struct file *file;
@@ -162,7 +162,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_v4l2 *vp = fh->vhead;
- struct pvr2_v4l2_dev *dev_info = fh->dev_info;
+ struct pvr2_v4l2_dev *pdi = fh->pdi;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
long ret = -EINVAL;
@@ -183,7 +183,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_S_INPUT:
case VIDIOC_S_TUNER:
case VIDIOC_S_FREQUENCY:
- ret = v4l2_prio_check(&vp->prio, &fh->prio);
+ ret = v4l2_prio_check(&vp->prio, fh->prio);
if (ret)
return ret;
}
@@ -564,14 +564,14 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_STREAMON:
{
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means
that we're not currently allowed to stream from
this node. */
ret = -EPERM;
break;
}
- ret = pvr2_hdw_set_stream_type(hdw,dev_info->config);
+ ret = pvr2_hdw_set_stream_type(hdw,pdi->config);
if (ret < 0) return ret;
ret = pvr2_hdw_set_streaming(hdw,!0);
break;
@@ -579,7 +579,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_STREAMOFF:
{
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means
that we're not currently allowed to stream from
this node. */
@@ -972,7 +972,7 @@ static int pvr2_v4l2_release(struct file *file)
fhp->rhp = NULL;
}
- v4l2_prio_close(&vp->prio, &fhp->prio);
+ v4l2_prio_close(&vp->prio, fhp->prio);
file->private_data = NULL;
if (fhp->vnext) {
@@ -1032,7 +1032,7 @@ static int pvr2_v4l2_open(struct file *file)
}
init_waitqueue_head(&fhp->wait_data);
- fhp->dev_info = dip;
+ fhp->pdi = dip;
pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp);
pvr2_channel_init(&fhp->channel,vp->channel.mc_head);
@@ -1093,7 +1093,7 @@ static int pvr2_v4l2_open(struct file *file)
fhp->file = file;
file->private_data = fhp;
- v4l2_prio_open(&vp->prio,&fhp->prio);
+ v4l2_prio_open(&vp->prio, &fhp->prio);
fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
@@ -1113,7 +1113,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
struct pvr2_hdw *hdw;
if (fh->rhp) return 0;
- if (!fh->dev_info->stream) {
+ if (!fh->pdi->stream) {
/* No stream defined for this node. This means that we're
not currently allowed to stream from this node. */
return -EPERM;
@@ -1122,21 +1122,21 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
/* First read() attempt. Try to claim the stream and start
it... */
if ((ret = pvr2_channel_claim_stream(&fh->channel,
- fh->dev_info->stream)) != 0) {
+ fh->pdi->stream)) != 0) {
/* Someone else must already have it */
return ret;
}
- fh->rhp = pvr2_channel_create_mpeg_stream(fh->dev_info->stream);
+ fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream);
if (!fh->rhp) {
pvr2_channel_claim_stream(&fh->channel,NULL);
return -ENOMEM;
}
hdw = fh->channel.mc_head->hdw;
- sp = fh->dev_info->stream->stream;
+ sp = fh->pdi->stream->stream;
pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
- pvr2_hdw_set_stream_type(hdw,fh->dev_info->config);
+ pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
return pvr2_ioread_set_enabled(fh->rhp,!0);
}
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 340f954aba34..11980db22d31 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -39,7 +39,7 @@ config USB_PWC_DEBUG
config USB_PWC_INPUT_EVDEV
bool "USB Philips Cameras input events device support"
default y
- depends on USB_PWC=INPUT || INPUT=y
+ depends on USB_PWC && (USB_PWC=INPUT || INPUT=y)
---help---
This option makes USB Philips cameras register the snapshot button as
an input device to report button events.
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 04bf5c11308d..7fe70e718656 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -253,8 +253,8 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (0 == *count)
*count = 32;
- while (*size * *count > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 9277194cd821..bbd9c11e2c5a 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -555,15 +555,15 @@ static int rj54n1_commit(struct i2c_client *client)
return ret;
}
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
- u32 *out_w, u32 *out_h);
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h);
static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct v4l2_rect *rect = &a->c;
- unsigned int dummy = 0, output_w, output_h,
+ int dummy = 0, output_w, output_h,
input_w = rect->width, input_h = rect->height;
int ret;
@@ -577,7 +577,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
- dev_dbg(&client->dev, "Scaling for %ux%u : %u = %ux%u\n",
+ dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n",
input_w, input_h, rj54n1->resize, output_w, output_h);
ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
@@ -638,8 +638,8 @@ static int rj54n1_g_fmt(struct v4l2_subdev *sd,
* the output one, updates the window sizes and returns an error or the resize
* coefficient on success. Note: we only use the "Fixed Scaling" on this camera.
*/
-static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
- u32 *out_w, u32 *out_h)
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
+ s32 *out_w, s32 *out_h)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
@@ -749,7 +749,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
* improve the image quality or stability for larger frames (see comment
* above), but I didn't check the framerate.
*/
- skip = min(resize / 1024, (unsigned)15);
+ skip = min(resize / 1024, 15U);
inc_sel = 1 << skip;
@@ -819,7 +819,7 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
*out_w = output_w;
*out_h = output_h;
- dev_dbg(&client->dev, "Scaled for %ux%u : %u = %ux%u, skip %u\n",
+ dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n",
*in_w, *in_h, resize, output_w, output_h, skip);
return resize;
@@ -1017,7 +1017,7 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd,
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
- unsigned int output_w, output_h, max_w, max_h,
+ int output_w, output_h, max_w, max_h,
input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
int ret;
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 3de914deb8ee..3c7a79f3812a 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -1,7 +1,7 @@
/*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
- * Copyright (C) 2007-2008 by Sensoray Company Inc.
+ * Copyright (C) 2007-2010 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
@@ -52,14 +52,19 @@
#include <linux/smp_lock.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
+#define S2255_MAJOR_VERSION 1
+#define S2255_MINOR_VERSION 20
+#define S2255_RELEASE 0
+#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
+ S2255_MINOR_VERSION, \
+ S2255_RELEASE)
#define FIRMWARE_FILE_NAME "f2255usb.bin"
-
-
/* default JPEG quality */
#define S2255_DEF_JPEG_QUAL 50
/* vendor request in */
@@ -76,14 +81,14 @@
#define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME)
#define S2255_DEF_BUFS 16
#define S2255_SETMODE_TIMEOUT 500
-#define MAX_CHANNELS 4
-#define S2255_MARKER_FRAME 0x2255DA4AL
-#define S2255_MARKER_RESPONSE 0x2255ACACL
-#define S2255_RESPONSE_SETMODE 0x01
-#define S2255_RESPONSE_FW 0x10
+#define S2255_VIDSTATUS_TIMEOUT 350
+#define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL)
+#define S2255_MARKER_RESPONSE cpu_to_le32(0x2255ACACL)
+#define S2255_RESPONSE_SETMODE cpu_to_le32(0x01)
+#define S2255_RESPONSE_FW cpu_to_le32(0x10)
+#define S2255_RESPONSE_STATUS cpu_to_le32(0x20)
#define S2255_USB_XFER_SIZE (16 * 1024)
#define MAX_CHANNELS 4
-#define MAX_PIPE_BUFFERS 1
#define SYS_FRAMES 4
/* maximum size is PAL full size plus room for the marker header(s) */
#define SYS_FRAMES_MAXSIZE (720*288*2*2 + 4096)
@@ -118,9 +123,10 @@
#define COLOR_YUVPK 2 /* YUV packed */
#define COLOR_Y8 4 /* monochrome */
#define COLOR_JPG 5 /* JPEG */
-#define MASK_COLOR 0xff
-#define MASK_JPG_QUALITY 0xff00
+#define MASK_COLOR 0x000000ff
+#define MASK_JPG_QUALITY 0x0000ff00
+#define MASK_INPUT_TYPE 0x000f0000
/* frame decimation. Not implemented by V4L yet(experimental in V4L) */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
@@ -139,12 +145,12 @@
#define DEF_HUE 0
/* usb config commands */
-#define IN_DATA_TOKEN 0x2255c0de
-#define CMD_2255 0xc2255000
-#define CMD_SET_MODE (CMD_2255 | 0x10)
-#define CMD_START (CMD_2255 | 0x20)
-#define CMD_STOP (CMD_2255 | 0x30)
-#define CMD_STATUS (CMD_2255 | 0x40)
+#define IN_DATA_TOKEN cpu_to_le32(0x2255c0de)
+#define CMD_2255 cpu_to_le32(0xc2255000)
+#define CMD_SET_MODE cpu_to_le32((CMD_2255 | 0x10))
+#define CMD_START cpu_to_le32((CMD_2255 | 0x20))
+#define CMD_STOP cpu_to_le32((CMD_2255 | 0x30))
+#define CMD_STATUS cpu_to_le32((CMD_2255 | 0x40))
struct s2255_mode {
u32 format; /* input video format (NTSC, PAL) */
@@ -194,7 +200,6 @@ struct s2255_dmaqueue {
#define S2255_FW_SUCCESS 2
#define S2255_FW_FAILED 3
#define S2255_FW_DISCONNECTING 4
-
#define S2255_FW_MARKER cpu_to_le32(0x22552f2f)
/* 2255 read states */
#define S2255_READ_IDLE 0
@@ -223,8 +228,10 @@ struct s2255_pipeinfo {
struct s2255_fmt; /*forward declaration */
struct s2255_dev {
+ struct video_device vdev[MAX_CHANNELS];
+ struct v4l2_device v4l2_dev;
+ atomic_t channels; /* number of channels registered */
int frames;
- int users[MAX_CHANNELS];
struct mutex lock;
struct mutex open_lock;
int resources[MAX_CHANNELS];
@@ -233,11 +240,10 @@ struct s2255_dev {
u8 read_endpoint;
struct s2255_dmaqueue vidq[MAX_CHANNELS];
- struct video_device *vdev[MAX_CHANNELS];
struct timer_list timer;
struct s2255_fw *fw_data;
- struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS];
- struct s2255_bufferi buffer[MAX_CHANNELS];
+ struct s2255_pipeinfo pipe;
+ struct s2255_bufferi buffer[MAX_CHANNELS];
struct s2255_mode mode[MAX_CHANNELS];
/* jpeg compression */
struct v4l2_jpegcompression jc[MAX_CHANNELS];
@@ -261,11 +267,21 @@ struct s2255_dev {
int chn_configured[MAX_CHANNELS];
wait_queue_head_t wait_setmode[MAX_CHANNELS];
int setmode_ready[MAX_CHANNELS];
+ /* video status items */
+ int vidstatus[MAX_CHANNELS];
+ wait_queue_head_t wait_vidstatus[MAX_CHANNELS];
+ int vidstatus_ready[MAX_CHANNELS];
int chn_ready;
- struct kref kref;
spinlock_t slock;
+ /* dsp firmware version (f2255usb.bin) */
+ int dsp_fw_ver;
+ u16 pid; /* product id */
};
-#define to_s2255_dev(d) container_of(d, struct s2255_dev, kref)
+
+static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct s2255_dev, v4l2_dev);
+}
struct s2255_fmt {
char *name;
@@ -296,17 +312,43 @@ struct s2255_fh {
/* current cypress EEPROM firmware version */
#define S2255_CUR_USB_FWVER ((3 << 8) | 6)
-#define S2255_MAJOR_VERSION 1
-#define S2255_MINOR_VERSION 14
-#define S2255_RELEASE 0
-#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
- S2255_MINOR_VERSION, \
- S2255_RELEASE)
-
-/* vendor ids */
-#define USB_S2255_VENDOR_ID 0x1943
-#define USB_S2255_PRODUCT_ID 0x2255
+/* current DSP FW version */
+#define S2255_CUR_DSP_FWVER 8
+/* Need DSP version 5+ for video status feature */
+#define S2255_MIN_DSP_STATUS 5
+#define S2255_MIN_DSP_COLORFILTER 8
#define S2255_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC)
+
+/* private V4L2 controls */
+
+/*
+ * The following chart displays how COLORFILTER should be set
+ * =========================================================
+ * = fourcc = COLORFILTER =
+ * = ===============================
+ * = = 0 = 1 =
+ * =========================================================
+ * = V4L2_PIX_FMT_GREY(Y8) = monochrome from = monochrome=
+ * = = s-video or = composite =
+ * = = B/W camera = input =
+ * =========================================================
+ * = other = color, svideo = color, =
+ * = = = composite =
+ * =========================================================
+ *
+ * Notes:
+ * channels 0-3 on 2255 are composite
+ * channels 0-1 on 2257 are composite, 2-3 are s-video
+ * If COLORFILTER is 0 with a composite color camera connected,
+ * the output will appear monochrome but hatching
+ * will occur.
+ * COLORFILTER is different from "color killer" and "color effects"
+ * for reasons above.
+ */
+#define S2255_V4L2_YC_ON 1
+#define S2255_V4L2_YC_OFF 0
+#define V4L2_CID_PRIVATE_COLORFILTER (V4L2_CID_PRIVATE_BASE + 0)
+
/* frame prefix size (sent once every frame) */
#define PREFIX_SIZE 512
@@ -325,9 +367,8 @@ static void s2255_fillbuff(struct s2255_dev *dev, struct s2255_buffer *buf,
static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
struct s2255_mode *mode);
static int s2255_board_shutdown(struct s2255_dev *dev);
-static void s2255_exit_v4l(struct s2255_dev *dev);
static void s2255_fwload_start(struct s2255_dev *dev, int reset);
-static void s2255_destroy(struct kref *kref);
+static void s2255_destroy(struct s2255_dev *dev);
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
u16 index, u16 value, void *buf,
s32 buf_len, int bOut);
@@ -347,7 +388,6 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
static struct usb_driver s2255_driver;
-
/* Declare static vars that will be used as parameters */
static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
@@ -362,58 +402,16 @@ module_param(video_nr, int, 0644);
MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)");
/* USB device table */
+#define USB_SENSORAY_VID 0x1943
static struct usb_device_id s2255_table[] = {
- {USB_DEVICE(USB_S2255_VENDOR_ID, USB_S2255_PRODUCT_ID)},
+ {USB_DEVICE(USB_SENSORAY_VID, 0x2255)},
+ {USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, s2255_table);
-
#define BUFFER_TIMEOUT msecs_to_jiffies(400)
-/* supported controls */
-static struct v4l2_queryctrl s2255_qctrl[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = -127,
- .maximum = 128,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_CONTRAST,
- .flags = 0,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_SATURATION,
- .flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = DEF_HUE,
- .flags = 0,
- }
-};
-
-static int qctl_regs[ARRAY_SIZE(s2255_qctrl)];
-
/* image formats. */
static const struct s2255_fmt formats[] = {
{
@@ -505,7 +503,7 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
static void s2255_timer(unsigned long user_data)
{
struct s2255_fw *data = (struct s2255_fw *)user_data;
- dprintk(100, "s2255 timer\n");
+ dprintk(100, "%s\n", __func__);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
printk(KERN_ERR "s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -527,7 +525,7 @@ static void s2255_fwchunk_complete(struct urb *urb)
struct s2255_fw *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
- dprintk(100, "udev %p urb %p", udev, urb);
+ dprintk(100, "%s: udev %p urb %p", __func__, udev, urb);
if (urb->status) {
dev_err(&udev->dev, "URB failed with status %d\n", urb->status);
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -573,8 +571,8 @@ static void s2255_fwchunk_complete(struct urb *urb)
data->fw_loaded += len;
} else {
atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT);
+ dprintk(100, "%s: firmware upload complete\n", __func__);
}
- dprintk(100, "2255 complete done\n");
return;
}
@@ -585,9 +583,7 @@ static int s2255_got_frame(struct s2255_dev *dev, int chn, int jpgsize)
struct s2255_buffer *buf;
unsigned long flags = 0;
int rc = 0;
- dprintk(2, "wakeup: %p channel: %d\n", &dma_q, chn);
spin_lock_irqsave(&dev->slock, flags);
-
if (list_empty(&dma_q->active)) {
dprintk(1, "No active queue to serve\n");
rc = -1;
@@ -595,23 +591,19 @@ static int s2255_got_frame(struct s2255_dev *dev, int chn, int jpgsize)
}
buf = list_entry(dma_q->active.next,
struct s2255_buffer, vb.queue);
-
list_del(&buf->vb.queue);
do_gettimeofday(&buf->vb.ts);
- dprintk(100, "[%p/%d] wakeup\n", buf, buf->vb.i);
s2255_fillbuff(dev, buf, dma_q->channel, jpgsize);
wake_up(&buf->vb.done);
- dprintk(2, "wakeup [buf/i] [%p/%d]\n", buf, buf->vb.i);
+ dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
return 0;
}
-
static const struct s2255_fmt *format_by_fourcc(int fourcc)
{
unsigned int i;
-
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (-1 == formats[i].fourcc)
continue;
@@ -621,9 +613,6 @@ static const struct s2255_fmt *format_by_fourcc(int fourcc)
return NULL;
}
-
-
-
/* video buffer vmalloc implementation based partly on VIVI driver which is
* Copyright (c) 2006 by
* Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
@@ -703,8 +692,8 @@ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
if (0 == *count)
*count = S2255_DEF_BUFS;
- while (*size * (*count) > vid_limit * 1024 * 1024)
- (*count)--;
+ if (*size * *count > vid_limit * 1024 * 1024)
+ *count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
@@ -727,10 +716,10 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
if (fh->fmt == NULL)
return -EINVAL;
- if ((fh->width < norm_minw(fh->dev->vdev[fh->channel])) ||
- (fh->width > norm_maxw(fh->dev->vdev[fh->channel])) ||
- (fh->height < norm_minh(fh->dev->vdev[fh->channel])) ||
- (fh->height > norm_maxh(fh->dev->vdev[fh->channel]))) {
+ if ((fh->width < norm_minw(&fh->dev->vdev[fh->channel])) ||
+ (fh->width > norm_maxw(&fh->dev->vdev[fh->channel])) ||
+ (fh->height < norm_minh(&fh->dev->vdev[fh->channel])) ||
+ (fh->height > norm_maxh(&fh->dev->vdev[fh->channel]))) {
dprintk(4, "invalid buffer prepare\n");
return -EINVAL;
}
@@ -747,7 +736,6 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
buf->vb.height = fh->height;
buf->vb.field = field;
-
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
@@ -767,9 +755,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
struct s2255_fh *fh = vq->priv_data;
struct s2255_dev *dev = fh->dev;
struct s2255_dmaqueue *vidq = &dev->vidq[fh->channel];
-
dprintk(1, "%s\n", __func__);
-
buf->vb.state = VIDEOBUF_QUEUED;
list_add_tail(&buf->vb.queue, &vidq->active);
}
@@ -828,6 +814,27 @@ static void res_free(struct s2255_dev *dev, struct s2255_fh *fh)
dprintk(1, "res: put\n");
}
+static int vidioc_querymenu(struct file *file, void *priv,
+ struct v4l2_querymenu *qmenu)
+{
+ static const char *colorfilter[] = {
+ "Off",
+ "On",
+ NULL
+ };
+ if (qmenu->id == V4L2_CID_PRIVATE_COLORFILTER) {
+ int i;
+ const char **menu_items = colorfilter;
+ for (i = 0; i < qmenu->index && menu_items[i]; i++)
+ ; /* do nothing (from v4l2-common.c) */
+ if (menu_items[i] == NULL || menu_items[i][0] == '\0')
+ return -EINVAL;
+ strlcpy(qmenu->name, menu_items[qmenu->index],
+ sizeof(qmenu->name));
+ return 0;
+ }
+ return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
+}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
@@ -883,7 +890,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
int is_ntsc;
is_ntsc =
- (dev->vdev[fh->channel]->current_norm & V4L2_STD_NTSC) ? 1 : 0;
+ (dev->vdev[fh->channel].current_norm & V4L2_STD_NTSC) ? 1 : 0;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -894,10 +901,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
if (field == V4L2_FIELD_ANY)
b_any_field = 1;
- dprintk(4, "try format %d \n", is_ntsc);
- /* supports 3 sizes. see s2255drv.h */
- dprintk(50, "width test %d, height %d\n",
- f->fmt.pix.width, f->fmt.pix.height);
+ dprintk(50, "%s NTSC: %d suggested width: %d, height: %d\n",
+ __func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
/* NTSC */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_NTSC * 2) {
@@ -952,29 +957,24 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_PAL) {
- dprintk(50, "pal 704\n");
f->fmt.pix.width = LINE_SZ_4CIFS_PAL;
field = V4L2_FIELD_SEQ_TB;
} else if (f->fmt.pix.width >= LINE_SZ_2CIFS_PAL) {
- dprintk(50, "pal 352A\n");
f->fmt.pix.width = LINE_SZ_2CIFS_PAL;
field = V4L2_FIELD_TOP;
} else if (f->fmt.pix.width >= LINE_SZ_1CIFS_PAL) {
- dprintk(50, "pal 352B\n");
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
} else {
- dprintk(50, "pal 352C\n");
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
}
}
-
- dprintk(50, "width %d height %d field %d \n", f->fmt.pix.width,
- f->fmt.pix.height, f->fmt.pix.field);
f->fmt.pix.field = field;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ dprintk(50, "%s: set width %d height %d field %d\n", __func__,
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
@@ -1006,7 +1006,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
if (res_locked(fh->dev, fh)) {
- dprintk(1, "can't change format after started\n");
+ dprintk(1, "%s: channel busy\n", __func__);
ret = -EBUSY;
goto out_s_fmt;
}
@@ -1016,17 +1016,14 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
fh->height = f->fmt.pix.height;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- norm = norm_minw(fh->dev->vdev[fh->channel]);
- if (fh->width > norm_minw(fh->dev->vdev[fh->channel])) {
- if (fh->height > norm_minh(fh->dev->vdev[fh->channel])) {
+ norm = norm_minw(&fh->dev->vdev[fh->channel]);
+ if (fh->width > norm_minw(&fh->dev->vdev[fh->channel])) {
+ if (fh->height > norm_minh(&fh->dev->vdev[fh->channel])) {
if (fh->dev->cap_parm[fh->channel].capturemode &
- V4L2_MODE_HIGHQUALITY) {
+ V4L2_MODE_HIGHQUALITY)
fh->mode.scale = SCALE_4CIFSI;
- dprintk(2, "scale 4CIFSI\n");
- } else {
+ else
fh->mode.scale = SCALE_4CIFS;
- dprintk(2, "scale 4CIFS\n");
- }
} else
fh->mode.scale = SCALE_2CIFS;
@@ -1037,19 +1034,23 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
/* color mode */
switch (fh->fmt->fourcc) {
case V4L2_PIX_FMT_GREY:
- fh->mode.color = COLOR_Y8;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_Y8;
break;
case V4L2_PIX_FMT_JPEG:
- fh->mode.color = COLOR_JPG |
- (fh->dev->jc[fh->channel].quality << 8);
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_JPG;
+ fh->mode.color |= (fh->dev->jc[fh->channel].quality << 8);
break;
case V4L2_PIX_FMT_YUV422P:
- fh->mode.color = COLOR_YUVPL;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_YUVPL;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
- fh->mode.color = COLOR_YUVPK;
+ fh->mode.color &= ~MASK_COLOR;
+ fh->mode.color |= COLOR_YUVPK;
break;
}
ret = 0;
@@ -1178,19 +1179,13 @@ static u32 get_transfer_size(struct s2255_mode *mode)
return usbInSize;
}
-static void dump_verify_mode(struct s2255_dev *sdev, struct s2255_mode *mode)
+static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode)
{
struct device *dev = &sdev->udev->dev;
dev_info(dev, "------------------------------------------------\n");
- dev_info(dev, "verify mode\n");
- dev_info(dev, "format: %d\n", mode->format);
- dev_info(dev, "scale: %d\n", mode->scale);
- dev_info(dev, "fdec: %d\n", mode->fdec);
- dev_info(dev, "color: %d\n", mode->color);
+ dev_info(dev, "format: %d\nscale %d\n", mode->format, mode->scale);
+ dev_info(dev, "fdec: %d\ncolor %d\n", mode->fdec, mode->color);
dev_info(dev, "bright: 0x%x\n", mode->bright);
- dev_info(dev, "restart: 0x%x\n", mode->restart);
- dev_info(dev, "usb_block: 0x%x\n", mode->usb_block);
- dev_info(dev, "single: 0x%x\n", mode->single);
dev_info(dev, "------------------------------------------------\n");
}
@@ -1206,44 +1201,38 @@ static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
struct s2255_mode *mode)
{
int res;
- u32 *buffer;
+ __le32 *buffer;
unsigned long chn_rev;
-
mutex_lock(&dev->lock);
chn_rev = G_chnmap[chn];
- dprintk(3, "mode scale [%ld] %p %d\n", chn, mode, mode->scale);
- dprintk(3, "mode scale [%ld] %p %d\n", chn, &dev->mode[chn],
- dev->mode[chn].scale);
- dprintk(2, "mode contrast %x\n", mode->contrast);
-
+ dprintk(3, "%s channel %lu\n", __func__, chn);
/* if JPEG, set the quality */
- if ((mode->color & MASK_COLOR) == COLOR_JPG)
- mode->color = (dev->jc[chn].quality << 8) | COLOR_JPG;
-
+ if ((mode->color & MASK_COLOR) == COLOR_JPG) {
+ mode->color &= ~MASK_COLOR;
+ mode->color |= COLOR_JPG;
+ mode->color &= ~MASK_JPG_QUALITY;
+ mode->color |= (dev->jc[chn].quality << 8);
+ }
/* save the mode */
dev->mode[chn] = *mode;
dev->req_image_size[chn] = get_transfer_size(mode);
- dprintk(1, "transfer size %ld\n", dev->req_image_size[chn]);
-
+ dprintk(1, "%s: reqsize %ld\n", __func__, dev->req_image_size[chn]);
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
mutex_unlock(&dev->lock);
return -ENOMEM;
}
-
/* set the mode */
buffer[0] = IN_DATA_TOKEN;
- buffer[1] = (u32) chn_rev;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_SET_MODE;
memcpy(&buffer[3], &dev->mode[chn], sizeof(struct s2255_mode));
dev->setmode_ready[chn] = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (debug)
- dump_verify_mode(dev, mode);
+ s2255_print_cfg(dev, mode);
kfree(buffer);
- dprintk(1, "set mode done chn %lu, %d\n", chn, res);
-
/* wait at least 3 frames before continuing */
if (mode->restart) {
wait_event_timeout(dev->wait_setmode[chn],
@@ -1254,10 +1243,46 @@ static int s2255_set_mode(struct s2255_dev *dev, unsigned long chn,
res = -EFAULT;
}
}
-
/* clear the restart flag */
dev->mode[chn].restart = 0;
mutex_unlock(&dev->lock);
+ dprintk(1, "%s chn %lu, result: %d\n", __func__, chn, res);
+ return res;
+}
+
+static int s2255_cmd_status(struct s2255_dev *dev, unsigned long chn,
+ u32 *pstatus)
+{
+ int res;
+ __le32 *buffer;
+ u32 chn_rev;
+ mutex_lock(&dev->lock);
+ chn_rev = G_chnmap[chn];
+ dprintk(4, "%s chan %lu\n", __func__, chn);
+ buffer = kzalloc(512, GFP_KERNEL);
+ if (buffer == NULL) {
+ dev_err(&dev->udev->dev, "out of mem\n");
+ mutex_unlock(&dev->lock);
+ return -ENOMEM;
+ }
+ /* form the get vid status command */
+ buffer[0] = IN_DATA_TOKEN;
+ buffer[1] = (__le32) cpu_to_le32(chn_rev);
+ buffer[2] = CMD_STATUS;
+ *pstatus = 0;
+ dev->vidstatus_ready[chn] = 0;
+ res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
+ kfree(buffer);
+ wait_event_timeout(dev->wait_vidstatus[chn],
+ (dev->vidstatus_ready[chn] != 0),
+ msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT));
+ if (dev->vidstatus_ready[chn] != 1) {
+ printk(KERN_DEBUG "s2255: no vidstatus response\n");
+ res = -EFAULT;
+ }
+ *pstatus = dev->vidstatus[chn];
+ dprintk(4, "%s, vid status %d\n", __func__, *pstatus);
+ mutex_unlock(&dev->lock);
return res;
}
@@ -1291,7 +1316,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
new_mode = &fh->mode;
old_mode = &fh->dev->mode[chn];
- if (new_mode->color != old_mode->color)
+ if ((new_mode->color & MASK_COLOR) != (old_mode->color & MASK_COLOR))
new_mode->restart = 1;
else if (new_mode->scale != old_mode->scale)
new_mode->restart = 1;
@@ -1302,7 +1327,6 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
new_mode->restart = 0;
*old_mode = *new_mode;
dev->cur_fmt[chn] = fh->fmt;
- dprintk(1, "%s[%d]\n", __func__, chn);
dev->last_frame[chn] = -1;
dev->bad_payload[chn] = 0;
dev->cur_frame[chn] = 0;
@@ -1325,7 +1349,6 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
-
dprintk(4, "%s\n, channel: %d", __func__, fh->channel);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
printk(KERN_ERR "invalid fh type0\n");
@@ -1347,27 +1370,32 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
struct s2255_mode *mode;
struct videobuf_queue *q = &fh->vb_vidq;
int ret = 0;
-
mutex_lock(&q->vb_lock);
if (videobuf_queue_is_busy(q)) {
dprintk(1, "queue busy\n");
ret = -EBUSY;
goto out_s_std;
}
-
if (res_locked(fh->dev, fh)) {
dprintk(1, "can't change standard after started\n");
ret = -EBUSY;
goto out_s_std;
}
mode = &fh->mode;
-
if (*i & V4L2_STD_NTSC) {
- dprintk(4, "vidioc_s_std NTSC\n");
- mode->format = FORMAT_NTSC;
+ dprintk(4, "%s NTSC\n", __func__);
+ /* if changing format, reset frame decimation/intervals */
+ if (mode->format != FORMAT_NTSC) {
+ mode->format = FORMAT_NTSC;
+ mode->fdec = FDEC_1;
+ }
} else if (*i & V4L2_STD_PAL) {
- dprintk(4, "vidioc_s_std PAL\n");
+ dprintk(4, "%s PAL\n", __func__);
mode->format = FORMAT_PAL;
+ if (mode->format != FORMAT_PAL) {
+ mode->format = FORMAT_PAL;
+ mode->fdec = FDEC_1;
+ }
} else {
ret = -EINVAL;
}
@@ -1386,12 +1414,32 @@ out_s_std:
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ u32 status = 0;
if (inp->index != 0)
return -EINVAL;
-
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = S2255_NORMS;
- strlcpy(inp->name, "Camera", sizeof(inp->name));
+ inp->status = 0;
+ if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) {
+ int rc;
+ rc = s2255_cmd_status(dev, fh->channel, &status);
+ dprintk(4, "s2255_cmd_status rc: %d status %x\n", rc, status);
+ if (rc == 0)
+ inp->status = (status & 0x01) ? 0
+ : V4L2_IN_ST_NO_SIGNAL;
+ }
+ switch (dev->pid) {
+ case 0x2255:
+ default:
+ strlcpy(inp->name, "Composite", sizeof(inp->name));
+ break;
+ case 0x2257:
+ strlcpy(inp->name, (fh->channel < 2) ? "Composite" : "S-Video",
+ sizeof(inp->name));
+ break;
+ }
return 0;
}
@@ -1411,74 +1459,113 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- if (qc->id && qc->id == s2255_qctrl[i].id) {
- memcpy(qc, &(s2255_qctrl[i]), sizeof(*qc));
- return 0;
- }
-
- dprintk(4, "query_ctrl -EINVAL %d\n", qc->id);
- return -EINVAL;
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ v4l2_ctrl_query_fill(qc, -127, 127, 1, DEF_BRIGHT);
+ break;
+ case V4L2_CID_CONTRAST:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_CONTRAST);
+ break;
+ case V4L2_CID_SATURATION:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_SATURATION);
+ break;
+ case V4L2_CID_HUE:
+ v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_HUE);
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ strlcpy(qc->name, "Color Filter", sizeof(qc->name));
+ qc->type = V4L2_CTRL_TYPE_MENU;
+ qc->minimum = 0;
+ qc->maximum = 1;
+ qc->step = 1;
+ qc->default_value = 1;
+ qc->flags = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dprintk(4, "%s, id %d\n", __func__, qc->id);
+ return 0;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- if (ctrl->id == s2255_qctrl[i].id) {
- ctrl->value = qctl_regs[i];
- return 0;
- }
- dprintk(4, "g_ctrl -EINVAL\n");
-
- return -EINVAL;
+ struct s2255_fh *fh = priv;
+ struct s2255_dev *dev = fh->dev;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = fh->mode.bright;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = fh->mode.contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = fh->mode.saturation;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = fh->mode.hue;
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ ctrl->value = !((fh->mode.color & MASK_INPUT_TYPE) >> 16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ dprintk(4, "%s, id %d val %d\n", __func__, ctrl->id, ctrl->value);
+ return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- int i;
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_mode *mode;
mode = &fh->mode;
- dprintk(4, "vidioc_s_ctrl\n");
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++) {
- if (ctrl->id == s2255_qctrl[i].id) {
- if (ctrl->value < s2255_qctrl[i].minimum ||
- ctrl->value > s2255_qctrl[i].maximum)
- return -ERANGE;
-
- qctl_regs[i] = ctrl->value;
- /* update the mode to the corresponding value */
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- mode->bright = ctrl->value;
- break;
- case V4L2_CID_CONTRAST:
- mode->contrast = ctrl->value;
- break;
- case V4L2_CID_HUE:
- mode->hue = ctrl->value;
- break;
- case V4L2_CID_SATURATION:
- mode->saturation = ctrl->value;
- break;
- }
- mode->restart = 0;
- /* set mode here. Note: stream does not need restarted.
- some V4L programs restart stream unnecessarily
- after a s_crtl.
- */
- s2255_set_mode(dev, fh->channel, mode);
- return 0;
- }
+ dprintk(4, "%s\n", __func__);
+ /* update the mode to the corresponding value */
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ mode->bright = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ mode->contrast = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ mode->hue = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ mode->saturation = ctrl->value;
+ break;
+ case V4L2_CID_PRIVATE_COLORFILTER:
+ if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
+ return -EINVAL;
+ if ((dev->pid == 0x2257) && (fh->channel > 1))
+ return -EINVAL;
+ mode->color &= ~MASK_INPUT_TYPE;
+ mode->color |= ((ctrl->value ? 0 : 1) << 16);
+ break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+ mode->restart = 0;
+ /* set mode here. Note: stream does not need restarted.
+ some V4L programs restart stream unnecessarily
+ after a s_crtl.
+ */
+ s2255_set_mode(dev, fh->channel, mode);
+ return 0;
}
static int vidioc_g_jpegcomp(struct file *file, void *priv,
@@ -1487,7 +1574,7 @@ static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
*jc = dev->jc[fh->channel];
- dprintk(2, "getting jpegcompression, quality %d\n", jc->quality);
+ dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
@@ -1499,7 +1586,7 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
if (jc->quality < 0 || jc->quality > 100)
return -EINVAL;
dev->jc[fh->channel].quality = jc->quality;
- dprintk(2, "setting jpeg quality %d\n", jc->quality);
+ dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
@@ -1508,10 +1595,34 @@ static int vidioc_g_parm(struct file *file, void *priv,
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
+ __u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ memset(sp, 0, sizeof(struct v4l2_streamparm));
+ sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
sp->parm.capture.capturemode = dev->cap_parm[fh->channel].capturemode;
- dprintk(2, "getting parm %d\n", sp->parm.capture.capturemode);
+ def_num = (fh->mode.format == FORMAT_NTSC) ? 1001 : 1000;
+ def_dem = (fh->mode.format == FORMAT_NTSC) ? 30000 : 25000;
+ sp->parm.capture.timeperframe.denominator = def_dem;
+ switch (fh->mode.fdec) {
+ default:
+ case FDEC_1:
+ sp->parm.capture.timeperframe.numerator = def_num;
+ break;
+ case FDEC_2:
+ sp->parm.capture.timeperframe.numerator = def_num * 2;
+ break;
+ case FDEC_3:
+ sp->parm.capture.timeperframe.numerator = def_num * 3;
+ break;
+ case FDEC_5:
+ sp->parm.capture.timeperframe.numerator = def_num * 5;
+ break;
+ }
+ dprintk(4, "%s capture mode, %d timeperframe %d/%d\n", __func__,
+ sp->parm.capture.capturemode,
+ sp->parm.capture.timeperframe.numerator,
+ sp->parm.capture.timeperframe.denominator);
return 0;
}
@@ -1520,15 +1631,79 @@ static int vidioc_s_parm(struct file *file, void *priv,
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
-
+ int fdec = FDEC_1;
+ __u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ /* high quality capture mode requires a stream restart */
+ if (dev->cap_parm[fh->channel].capturemode
+ != sp->parm.capture.capturemode && res_locked(fh->dev, fh))
+ return -EBUSY;
+ def_num = (fh->mode.format == FORMAT_NTSC) ? 1001 : 1000;
+ def_dem = (fh->mode.format == FORMAT_NTSC) ? 30000 : 25000;
+ if (def_dem != sp->parm.capture.timeperframe.denominator)
+ sp->parm.capture.timeperframe.numerator = def_num;
+ else if (sp->parm.capture.timeperframe.numerator <= def_num)
+ sp->parm.capture.timeperframe.numerator = def_num;
+ else if (sp->parm.capture.timeperframe.numerator <= (def_num * 2)) {
+ sp->parm.capture.timeperframe.numerator = def_num * 2;
+ fdec = FDEC_2;
+ } else if (sp->parm.capture.timeperframe.numerator <= (def_num * 3)) {
+ sp->parm.capture.timeperframe.numerator = def_num * 3;
+ fdec = FDEC_3;
+ } else {
+ sp->parm.capture.timeperframe.numerator = def_num * 5;
+ fdec = FDEC_5;
+ }
+ fh->mode.fdec = fdec;
+ sp->parm.capture.timeperframe.denominator = def_dem;
+ s2255_set_mode(dev, fh->channel, &fh->mode);
+ dprintk(4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
+ __func__,
+ sp->parm.capture.capturemode,
+ sp->parm.capture.timeperframe.numerator,
+ sp->parm.capture.timeperframe.denominator, fdec);
+ return 0;
+}
- dev->cap_parm[fh->channel].capturemode = sp->parm.capture.capturemode;
- dprintk(2, "setting param capture mode %d\n",
- sp->parm.capture.capturemode);
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fe)
+{
+ int is_ntsc = 0;
+#define NUM_FRAME_ENUMS 4
+ int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
+ if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
+ return -EINVAL;
+ switch (fe->width) {
+ case 640:
+ if (fe->height != 240 && fe->height != 480)
+ return -EINVAL;
+ is_ntsc = 1;
+ break;
+ case 320:
+ if (fe->height != 240)
+ return -EINVAL;
+ is_ntsc = 1;
+ break;
+ case 704:
+ if (fe->height != 288 && fe->height != 576)
+ return -EINVAL;
+ break;
+ case 352:
+ if (fe->height != 288)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fe->discrete.denominator = is_ntsc ? 30000 : 25000;
+ fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index];
+ dprintk(4, "%s discrete %d/%d\n", __func__, fe->discrete.numerator,
+ fe->discrete.denominator);
return 0;
}
+
static int s2255_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
@@ -1538,31 +1713,29 @@ static int s2255_open(struct file *file)
int i = 0;
int cur_channel = -1;
int state;
-
dprintk(1, "s2255: open called (dev=%s)\n",
video_device_node_name(vdev));
- lock_kernel();
-
for (i = 0; i < MAX_CHANNELS; i++) {
- if (dev->vdev[i] == vdev) {
+ if (&dev->vdev[i] == vdev) {
cur_channel = i;
break;
}
}
-
- if (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING) {
- unlock_kernel();
- printk(KERN_INFO "disconnecting\n");
+ if (i == MAX_CHANNELS)
return -ENODEV;
- }
- kref_get(&dev->kref);
- mutex_lock(&dev->open_lock);
-
- dev->users[cur_channel]++;
- dprintk(4, "s2255: open_handles %d\n", dev->users[cur_channel]);
- switch (atomic_read(&dev->fw_data->fw_state)) {
+ /*
+ * open lock necessary to prevent multiple instances
+ * of v4l-conf (or other programs) from simultaneously
+ * reloading firmware.
+ */
+ mutex_lock(&dev->open_lock);
+ state = atomic_read(&dev->fw_data->fw_state);
+ switch (state) {
+ case S2255_FW_DISCONNECTING:
+ mutex_unlock(&dev->open_lock);
+ return -ENODEV;
case S2255_FW_FAILED:
s2255_dev_err(&dev->udev->dev,
"firmware load failed. retrying.\n");
@@ -1573,6 +1746,8 @@ static int s2255_open(struct file *file)
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ /* state may have changed, re-read */
+ state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_NOTLOADED:
case S2255_FW_LOADED_DSPWAIT:
@@ -1584,53 +1759,50 @@ static int s2255_open(struct file *file)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
- msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ msecs_to_jiffies(S2255_LOAD_TIMEOUT));
+ /* state may have changed, re-read */
+ state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_SUCCESS:
default:
break;
}
- state = atomic_read(&dev->fw_data->fw_state);
- if (state != S2255_FW_SUCCESS) {
- int rc;
- switch (state) {
- case S2255_FW_FAILED:
- printk(KERN_INFO "2255 FW load failed. %d\n", state);
- rc = -ENODEV;
- break;
- case S2255_FW_DISCONNECTING:
- printk(KERN_INFO "%s: disconnecting\n", __func__);
- rc = -ENODEV;
- break;
- case S2255_FW_LOADED_DSPWAIT:
- case S2255_FW_NOTLOADED:
- printk(KERN_INFO "%s: firmware not loaded yet"
- "please try again later\n",
- __func__);
- rc = -EAGAIN;
- break;
- default:
- printk(KERN_INFO "%s: unknown state\n", __func__);
- rc = -EFAULT;
- break;
- }
- dev->users[cur_channel]--;
+ /* state may have changed in above switch statement */
+ switch (state) {
+ case S2255_FW_SUCCESS:
+ break;
+ case S2255_FW_FAILED:
+ printk(KERN_INFO "2255 firmware load failed.\n");
+ mutex_unlock(&dev->open_lock);
+ return -ENODEV;
+ case S2255_FW_DISCONNECTING:
+ printk(KERN_INFO "%s: disconnecting\n", __func__);
mutex_unlock(&dev->open_lock);
- kref_put(&dev->kref, s2255_destroy);
- unlock_kernel();
- return rc;
+ return -ENODEV;
+ case S2255_FW_LOADED_DSPWAIT:
+ case S2255_FW_NOTLOADED:
+ printk(KERN_INFO "%s: firmware not loaded yet"
+ "please try again later\n",
+ __func__);
+ /*
+ * Timeout on firmware load means device unusable.
+ * Set firmware failure state.
+ * On next s2255_open the firmware will be reloaded.
+ */
+ atomic_set(&dev->fw_data->fw_state,
+ S2255_FW_FAILED);
+ mutex_unlock(&dev->open_lock);
+ return -EAGAIN;
+ default:
+ printk(KERN_INFO "%s: unknown state\n", __func__);
+ mutex_unlock(&dev->open_lock);
+ return -EFAULT;
}
-
+ mutex_unlock(&dev->open_lock);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- dev->users[cur_channel]--;
- mutex_unlock(&dev->open_lock);
- kref_put(&dev->kref, s2255_destroy);
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
-
file->private_data = fh;
fh->dev = dev;
fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1640,35 +1812,23 @@ static int s2255_open(struct file *file)
fh->width = LINE_SZ_4CIFS_NTSC;
fh->height = NUM_LINES_4CIFS_NTSC * 2;
fh->channel = cur_channel;
-
/* configure channel to default state */
if (!dev->chn_configured[cur_channel]) {
s2255_set_mode(dev, cur_channel, &fh->mode);
dev->chn_configured[cur_channel] = 1;
}
-
-
- /* Put all controls at a sane state */
- for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
- qctl_regs[i] = s2255_qctrl[i].default_value;
-
- dprintk(1, "s2255drv: open dev=%s type=%s users=%d\n",
- video_device_node_name(vdev), v4l2_type_names[type],
- dev->users[cur_channel]);
- dprintk(2, "s2255drv: open: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n",
+ dprintk(1, "%s: dev=%s type=%s\n", __func__,
+ video_device_node_name(vdev), v4l2_type_names[type]);
+ dprintk(2, "%s: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n", __func__,
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&dev->vidq[cur_channel]);
- dprintk(4, "s2255drv: open: list_empty active=%d\n",
+ dprintk(4, "%s: list_empty active=%d\n", __func__,
list_empty(&dev->vidq[cur_channel].active));
-
videobuf_queue_vmalloc_init(&fh->vb_vidq, &s2255_video_qops,
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
sizeof(struct s2255_buffer), fh);
-
- mutex_unlock(&dev->open_lock);
- unlock_kernel();
return 0;
}
@@ -1679,39 +1839,19 @@ static unsigned int s2255_poll(struct file *file,
struct s2255_fh *fh = file->private_data;
int rc;
dprintk(100, "%s\n", __func__);
-
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
-
rc = videobuf_poll_stream(file, &fh->vb_vidq, wait);
return rc;
}
-static void s2255_destroy(struct kref *kref)
+static void s2255_destroy(struct s2255_dev *dev)
{
- struct s2255_dev *dev = to_s2255_dev(kref);
- int i;
- if (!dev) {
- printk(KERN_ERR "s2255drv: kref problem\n");
- return;
- }
- atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
- wake_up(&dev->fw_data->wait_fw);
- for (i = 0; i < MAX_CHANNELS; i++) {
- dev->setmode_ready[i] = 1;
- wake_up(&dev->wait_setmode[i]);
- }
- mutex_lock(&dev->open_lock);
- /* reset the DSP so firmware can be reload next time */
- s2255_reset_dsppower(dev);
- s2255_exit_v4l(dev);
/* board shutdown stops the read pipe if it is running */
s2255_board_shutdown(dev);
/* make sure firmware still not trying to load */
del_timer(&dev->timer); /* only started in .probe and .open */
-
if (dev->fw_data->fw_urb) {
- dprintk(2, "kill fw_urb\n");
usb_kill_urb(dev->fw_data->fw_urb);
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
@@ -1720,24 +1860,22 @@ static void s2255_destroy(struct kref *kref)
release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
+ /* reset the DSP so firmware can be reloaded next time */
+ s2255_reset_dsppower(dev);
+ mutex_destroy(&dev->open_lock);
+ mutex_destroy(&dev->lock);
usb_put_dev(dev->udev);
dprintk(1, "%s", __func__);
-
- mutex_unlock(&dev->open_lock);
kfree(dev);
}
-static int s2255_close(struct file *file)
+static int s2255_release(struct file *file)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
struct video_device *vdev = video_devdata(file);
-
if (!dev)
return -ENODEV;
-
- mutex_lock(&dev->open_lock);
-
/* turn off stream */
if (res_check(fh)) {
if (dev->b_acquire[fh->channel])
@@ -1745,15 +1883,8 @@ static int s2255_close(struct file *file)
videobuf_streamoff(&fh->vb_vidq);
res_free(dev, fh);
}
-
videobuf_mmap_free(&fh->vb_vidq);
- dev->users[fh->channel]--;
-
- mutex_unlock(&dev->open_lock);
-
- kref_put(&dev->kref, s2255_destroy);
- dprintk(1, "s2255: close called (dev=%s, users=%d)\n",
- video_device_node_name(vdev), dev->users[fh->channel]);
+ dprintk(1, "%s (dev=%s)\n", __func__, video_device_node_name(vdev));
kfree(fh);
return 0;
}
@@ -1765,27 +1896,25 @@ static int s2255_mmap_v4l(struct file *file, struct vm_area_struct *vma)
if (!fh)
return -ENODEV;
- dprintk(4, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
-
+ dprintk(4, "%s, vma=0x%08lx\n", __func__, (unsigned long)vma);
ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
-
- dprintk(4, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ dprintk(4, "%s vma start=0x%08lx, size=%ld, ret=%d\n", __func__,
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start, ret);
-
return ret;
}
static const struct v4l2_file_operations s2255_fops_v4l = {
.owner = THIS_MODULE,
.open = s2255_open,
- .release = s2255_close,
+ .release = s2255_release,
.poll = s2255_poll,
.ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = s2255_mmap_v4l,
};
static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
+ .vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
@@ -1811,13 +1940,23 @@ static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_g_parm = vidioc_g_parm,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
};
+static void s2255_video_device_release(struct video_device *vdev)
+{
+ struct s2255_dev *dev = video_get_drvdata(vdev);
+ dprintk(4, "%s, chnls: %d \n", __func__, atomic_read(&dev->channels));
+ if (atomic_dec_and_test(&dev->channels))
+ s2255_destroy(dev);
+ return;
+}
+
static struct video_device template = {
.name = "s2255v",
.fops = &s2255_fops_v4l,
.ioctl_ops = &s2255_ioctl_ops,
- .release = video_device_release,
+ .release = s2255_video_device_release,
.tvnorms = S2255_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -1827,7 +1966,9 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
int ret;
int i;
int cur_nr = video_nr;
-
+ ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
/* initialize all video 4 linux */
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
@@ -1835,45 +1976,39 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
dev->vidq[i].dev = dev;
dev->vidq[i].channel = i;
/* register 4 video devices */
- dev->vdev[i] = video_device_alloc();
- memcpy(dev->vdev[i], &template, sizeof(struct video_device));
- dev->vdev[i]->parent = &dev->interface->dev;
- video_set_drvdata(dev->vdev[i], dev);
+ memcpy(&dev->vdev[i], &template, sizeof(struct video_device));
+ dev->vdev[i].v4l2_dev = &dev->v4l2_dev;
+ video_set_drvdata(&dev->vdev[i], dev);
if (video_nr == -1)
- ret = video_register_device(dev->vdev[i],
+ ret = video_register_device(&dev->vdev[i],
VFL_TYPE_GRABBER,
video_nr);
else
- ret = video_register_device(dev->vdev[i],
+ ret = video_register_device(&dev->vdev[i],
VFL_TYPE_GRABBER,
cur_nr + i);
- video_set_drvdata(dev->vdev[i], dev);
-
- if (ret != 0) {
+ if (ret) {
dev_err(&dev->udev->dev,
"failed to register video device!\n");
- return ret;
+ break;
}
+ atomic_inc(&dev->channels);
+ v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(&dev->vdev[i]));
+
}
+
printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n",
S2255_MAJOR_VERSION,
S2255_MINOR_VERSION);
- return ret;
-}
-
-static void s2255_exit_v4l(struct s2255_dev *dev)
-{
-
- int i;
- for (i = 0; i < MAX_CHANNELS; i++) {
- if (video_is_registered(dev->vdev[i])) {
- video_unregister_device(dev->vdev[i]);
- printk(KERN_INFO "s2255 unregistered\n");
- } else {
- video_device_release(dev->vdev[i]);
- printk(KERN_INFO "s2255 released\n");
- }
+ /* if no channels registered, return error and probe will fail*/
+ if (atomic_read(&dev->channels) == 0) {
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
}
+ if (atomic_read(&dev->channels) != MAX_CHANNELS)
+ printk(KERN_WARNING "s2255: Not all channels available.\n");
+ return 0;
}
/* this function moves the usb stream read pipe data
@@ -1907,14 +2042,14 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
if (frm->ulState == S2255_READ_IDLE) {
int jj;
unsigned int cc;
- s32 *pdword;
+ __le32 *pdword; /*data from dsp is little endian */
int payload;
/* search for marker codes */
pdata = (unsigned char *)pipe_info->transfer_buffer;
+ pdword = (__le32 *)pdata;
for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) {
- switch (*(s32 *) pdata) {
+ switch (*pdword) {
case S2255_MARKER_FRAME:
- pdword = (s32 *)pdata;
dprintk(4, "found frame marker at offset:"
" %d [%x %x]\n", jj, pdata[0],
pdata[1]);
@@ -1938,7 +2073,6 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
dev->jpg_size[dev->cc] = pdword[4];
break;
case S2255_MARKER_RESPONSE:
- pdword = (s32 *)pdata;
pdata += DEF_USB_BLOCK;
jj += DEF_USB_BLOCK;
if (pdword[1] >= MAX_CHANNELS)
@@ -1955,7 +2089,6 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
dprintk(5, "setmode ready %d\n", cc);
break;
case S2255_RESPONSE_FW:
-
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
break;
@@ -1965,6 +2098,13 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
S2255_FW_SUCCESS);
wake_up(&dev->fw_data->wait_fw);
break;
+ case S2255_RESPONSE_STATUS:
+ dev->vidstatus[cc] = pdword[3];
+ dev->vidstatus_ready[cc] = 1;
+ wake_up(&dev->wait_vidstatus[cc]);
+ dprintk(5, "got vidstatus %x chan %d\n",
+ pdword[3], cc);
+ break;
default:
printk(KERN_INFO "s2255 unknown resp\n");
}
@@ -2165,28 +2305,22 @@ static int s2255_release_sys_buffers(struct s2255_dev *dev,
static int s2255_board_init(struct s2255_dev *dev)
{
- int j;
struct s2255_mode mode_def = DEF_MODEI_NTSC_CONT;
int fw_ver;
+ int j;
+ struct s2255_pipeinfo *pipe = &dev->pipe;
dprintk(4, "board init: %p", dev);
-
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe = &dev->pipes[j];
-
- memset(pipe, 0, sizeof(*pipe));
- pipe->dev = dev;
- pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
- pipe->max_transfer_size = S2255_USB_XFER_SIZE;
-
- pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
- GFP_KERNEL);
- if (pipe->transfer_buffer == NULL) {
- dprintk(1, "out of memory!\n");
- return -ENOMEM;
- }
-
+ memset(pipe, 0, sizeof(*pipe));
+ pipe->dev = dev;
+ pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
+ pipe->max_transfer_size = S2255_USB_XFER_SIZE;
+
+ pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
+ GFP_KERNEL);
+ if (pipe->transfer_buffer == NULL) {
+ dprintk(1, "out of memory!\n");
+ return -ENOMEM;
}
-
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
@@ -2203,6 +2337,8 @@ static int s2255_board_init(struct s2255_dev *dev)
for (j = 0; j < MAX_CHANNELS; j++) {
dev->b_acquire[j] = 0;
dev->mode[j] = mode_def;
+ if (dev->pid == 0x2257 && j > 1)
+ dev->mode[j].color |= (1 << 16);
dev->jc[j].quality = S2255_DEF_JPEG_QUAL;
dev->cur_fmt[j] = &formats[0];
dev->mode[j].restart = 1;
@@ -2213,16 +2349,14 @@ static int s2255_board_init(struct s2255_dev *dev)
}
/* start read pipe */
s2255_start_readpipe(dev);
-
- dprintk(1, "S2255: board initialized\n");
+ dprintk(1, "%s: success\n", __func__);
return 0;
}
static int s2255_board_shutdown(struct s2255_dev *dev)
{
u32 i;
-
- dprintk(1, "S2255: board shutdown: %p", dev);
+ dprintk(1, "%s: dev: %p", __func__, dev);
for (i = 0; i < MAX_CHANNELS; i++) {
if (dev->b_acquire[i])
@@ -2233,12 +2367,8 @@ static int s2255_board_shutdown(struct s2255_dev *dev)
for (i = 0; i < MAX_CHANNELS; i++)
s2255_release_sys_buffers(dev, i);
-
- /* release transfer buffers */
- for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
- struct s2255_pipeinfo *pipe = &dev->pipes[i];
- kfree(pipe->transfer_buffer);
- }
+ /* release transfer buffer */
+ kfree(dev->pipe.transfer_buffer);
return 0;
}
@@ -2248,9 +2378,8 @@ static void read_pipe_completion(struct urb *purb)
struct s2255_dev *dev;
int status;
int pipe;
-
pipe_info = purb->context;
- dprintk(100, "read pipe completion %p, status %d\n", purb,
+ dprintk(100, "%s: urb:%p, status %d\n", __func__, purb,
purb->status);
if (pipe_info == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
@@ -2265,13 +2394,13 @@ static void read_pipe_completion(struct urb *purb)
status = purb->status;
/* if shutting down, do not resubmit, exit immediately */
if (status == -ESHUTDOWN) {
- dprintk(2, "read_pipe_completion: err shutdown\n");
+ dprintk(2, "%s: err shutdown\n", __func__);
pipe_info->err_count++;
return;
}
if (pipe_info->state == 0) {
- dprintk(2, "exiting USB pipe");
+ dprintk(2, "%s: exiting USB pipe", __func__);
return;
}
@@ -2279,7 +2408,7 @@ static void read_pipe_completion(struct urb *purb)
s2255_read_video_callback(dev, pipe_info);
else {
pipe_info->err_count++;
- dprintk(1, "s2255drv: failed URB %d\n", status);
+ dprintk(1, "%s: failed URB %d\n", __func__, status);
}
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
@@ -2295,7 +2424,7 @@ static void read_pipe_completion(struct urb *purb)
dev_err(&dev->udev->dev, "error submitting urb\n");
}
} else {
- dprintk(2, "read pipe complete state 0\n");
+ dprintk(2, "%s :complete state 0\n", __func__);
}
return;
}
@@ -2304,35 +2433,28 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
{
int pipe;
int retval;
- int i;
- struct s2255_pipeinfo *pipe_info = dev->pipes;
+ struct s2255_pipeinfo *pipe_info = &dev->pipe;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
- dprintk(2, "start pipe IN %d\n", dev->read_endpoint);
-
- for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
- pipe_info->state = 1;
- pipe_info->err_count = 0;
- pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pipe_info->stream_urb) {
- dev_err(&dev->udev->dev,
- "ReadStream: Unable to alloc URB\n");
- return -ENOMEM;
- }
- /* transfer buffer allocated in board_init */
- usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
- pipe,
- pipe_info->transfer_buffer,
- pipe_info->cur_transfer_size,
- read_pipe_completion, pipe_info);
-
- dprintk(4, "submitting URB %p\n", pipe_info->stream_urb);
- retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
- if (retval) {
- printk(KERN_ERR "s2255: start read pipe failed\n");
- return retval;
- }
+ dprintk(2, "%s: IN %d\n", __func__, dev->read_endpoint);
+ pipe_info->state = 1;
+ pipe_info->err_count = 0;
+ pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pipe_info->stream_urb) {
+ dev_err(&dev->udev->dev,
+ "ReadStream: Unable to alloc URB\n");
+ return -ENOMEM;
+ }
+ /* transfer buffer allocated in board_init */
+ usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
+ pipe,
+ pipe_info->transfer_buffer,
+ pipe_info->cur_transfer_size,
+ read_pipe_completion, pipe_info);
+ retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
+ if (retval) {
+ printk(KERN_ERR "s2255: start read pipe failed\n");
+ return retval;
}
-
return 0;
}
@@ -2347,10 +2469,7 @@ static int s2255_start_acquire(struct s2255_dev *dev, unsigned long chn)
dprintk(2, "start acquire failed, bad channel %lu\n", chn);
return -1;
}
-
chn_rev = G_chnmap[chn];
- dprintk(1, "S2255: start acquire %lu \n", chn);
-
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
@@ -2366,9 +2485,9 @@ static int s2255_start_acquire(struct s2255_dev *dev, unsigned long chn)
}
/* send the start command */
- *(u32 *) buffer = IN_DATA_TOKEN;
- *((u32 *) buffer + 1) = (u32) chn_rev;
- *((u32 *) buffer + 2) = (u32) CMD_START;
+ *(__le32 *) buffer = IN_DATA_TOKEN;
+ *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
+ *((__le32 *) buffer + 2) = CMD_START;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_START error\n");
@@ -2383,65 +2502,41 @@ static int s2255_stop_acquire(struct s2255_dev *dev, unsigned long chn)
unsigned char *buffer;
int res;
unsigned long chn_rev;
-
if (chn >= MAX_CHANNELS) {
dprintk(2, "stop acquire failed, bad channel %lu\n", chn);
return -1;
}
chn_rev = G_chnmap[chn];
-
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
-
/* send the stop command */
- dprintk(4, "stop acquire %lu\n", chn);
- *(u32 *) buffer = IN_DATA_TOKEN;
- *((u32 *) buffer + 1) = (u32) chn_rev;
- *((u32 *) buffer + 2) = CMD_STOP;
+ *(__le32 *) buffer = IN_DATA_TOKEN;
+ *((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
+ *((__le32 *) buffer + 2) = CMD_STOP;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
-
if (res != 0)
dev_err(&dev->udev->dev, "CMD_STOP error\n");
-
- dprintk(4, "stop acquire: releasing states \n");
-
kfree(buffer);
dev->b_acquire[chn] = 0;
-
+ dprintk(4, "%s: chn %lu, res %d\n", __func__, chn, res);
return res;
}
static void s2255_stop_readpipe(struct s2255_dev *dev)
{
- int j;
-
- if (dev == NULL) {
- s2255_dev_err(&dev->udev->dev, "invalid device\n");
- return;
- }
- dprintk(4, "stop read pipe\n");
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe_info = &dev->pipes[j];
- if (pipe_info) {
- if (pipe_info->state == 0)
- continue;
- pipe_info->state = 0;
- }
- }
+ struct s2255_pipeinfo *pipe = &dev->pipe;
- for (j = 0; j < MAX_PIPE_BUFFERS; j++) {
- struct s2255_pipeinfo *pipe_info = &dev->pipes[j];
- if (pipe_info->stream_urb) {
- /* cancel urb */
- usb_kill_urb(pipe_info->stream_urb);
- usb_free_urb(pipe_info->stream_urb);
- pipe_info->stream_urb = NULL;
- }
+ pipe->state = 0;
+ if (pipe->stream_urb) {
+ /* cancel urb */
+ usb_kill_urb(pipe->stream_urb);
+ usb_free_urb(pipe->stream_urb);
+ pipe->stream_urb = NULL;
}
- dprintk(2, "s2255 stop read pipe: %d\n", j);
+ dprintk(4, "%s", __func__);
return;
}
@@ -2473,32 +2568,28 @@ static int s2255_probe(struct usb_interface *interface,
int retval = -ENOMEM;
__le32 *pdata;
int fw_size;
-
- dprintk(2, "s2255: probe\n");
-
+ dprintk(2, "%s\n", __func__);
/* allocate memory for our device state and initialize it to zero */
dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL);
if (dev == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
- goto error;
+ return -ENOMEM;
}
-
+ atomic_set(&dev->channels, 0);
+ dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
- goto error;
-
+ goto errorFWDATA1;
mutex_init(&dev->lock);
mutex_init(&dev->open_lock);
-
/* grab usb_device and save it */
dev->udev = usb_get_dev(interface_to_usbdev(interface));
if (dev->udev == NULL) {
dev_err(&interface->dev, "null usb device\n");
retval = -ENODEV;
- goto error;
+ goto errorUDEV;
}
- kref_init(&dev->kref);
- dprintk(1, "dev: %p, kref: %p udev %p interface %p\n", dev, &dev->kref,
+ dprintk(1, "dev: %p, udev %p interface %p\n", dev,
dev->udev, interface);
dev->interface = interface;
/* set up the endpoint information */
@@ -2514,39 +2605,33 @@ static int s2255_probe(struct usb_interface *interface,
if (!dev->read_endpoint) {
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
- goto error;
+ goto errorEP;
}
-
- /* set intfdata */
- usb_set_intfdata(interface, dev);
-
- dprintk(100, "after intfdata %p\n", dev);
-
init_timer(&dev->timer);
dev->timer.function = s2255_timer;
dev->timer.data = (unsigned long)dev->fw_data;
-
init_waitqueue_head(&dev->fw_data->wait_fw);
- for (i = 0; i < MAX_CHANNELS; i++)
+ for (i = 0; i < MAX_CHANNELS; i++) {
init_waitqueue_head(&dev->wait_setmode[i]);
-
+ init_waitqueue_head(&dev->wait_vidstatus[i]);
+ }
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
-
if (!dev->fw_data->fw_urb) {
dev_err(&interface->dev, "out of memory!\n");
- goto error;
+ goto errorFWURB;
}
+
dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL);
if (!dev->fw_data->pfw_data) {
dev_err(&interface->dev, "out of memory!\n");
- goto error;
+ goto errorFWDATA2;
}
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
FIRMWARE_FILE_NAME, &dev->udev->dev)) {
printk(KERN_ERR "sensoray 2255 failed to get firmware\n");
- goto error;
+ goto errorREQFW;
}
/* check the firmware is valid */
fw_size = dev->fw_data->fw->size;
@@ -2555,59 +2640,80 @@ static int s2255_probe(struct usb_interface *interface,
if (*pdata != S2255_FW_MARKER) {
printk(KERN_INFO "Firmware invalid.\n");
retval = -ENODEV;
- goto error;
+ goto errorFWMARKER;
} else {
/* make sure firmware is the latest */
__le32 *pRel;
pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4];
printk(KERN_INFO "s2255 dsp fw version %x\n", *pRel);
+ dev->dsp_fw_ver = *pRel;
+ if (*pRel < S2255_CUR_DSP_FWVER)
+ printk(KERN_INFO "s2255: f2255usb.bin out of date.\n");
+ if (dev->pid == 0x2257 && *pRel < S2255_MIN_DSP_COLORFILTER)
+ printk(KERN_WARNING "s2255: 2257 requires firmware %d"
+ "or above.\n", S2255_MIN_DSP_COLORFILTER);
}
- /* loads v4l specific */
- s2255_probe_v4l(dev);
usb_reset_device(dev->udev);
/* load 2255 board specific */
retval = s2255_board_init(dev);
if (retval)
- goto error;
-
- dprintk(4, "before probe done %p\n", dev);
+ goto errorBOARDINIT;
spin_lock_init(&dev->slock);
-
s2255_fwload_start(dev, 0);
+ /* loads v4l specific */
+ retval = s2255_probe_v4l(dev);
+ if (retval)
+ goto errorBOARDINIT;
dev_info(&interface->dev, "Sensoray 2255 detected\n");
return 0;
-error:
+errorBOARDINIT:
+ s2255_board_shutdown(dev);
+errorFWMARKER:
+ release_firmware(dev->fw_data->fw);
+errorREQFW:
+ kfree(dev->fw_data->pfw_data);
+errorFWDATA2:
+ usb_free_urb(dev->fw_data->fw_urb);
+errorFWURB:
+ del_timer(&dev->timer);
+errorEP:
+ usb_put_dev(dev->udev);
+errorUDEV:
+ kfree(dev->fw_data);
+ mutex_destroy(&dev->open_lock);
+ mutex_destroy(&dev->lock);
+errorFWDATA1:
+ kfree(dev);
+ printk(KERN_WARNING "Sensoray 2255 driver load failed: 0x%x\n", retval);
return retval;
}
/* disconnect routine. when board is removed physically or with rmmod */
static void s2255_disconnect(struct usb_interface *interface)
{
- struct s2255_dev *dev = NULL;
+ struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i;
- dprintk(1, "s2255: disconnect interface %p\n", interface);
- dev = usb_get_intfdata(interface);
-
- /*
- * wake up any of the timers to allow open_lock to be
- * acquired sooner
- */
+ int channels = atomic_read(&dev->channels);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ /*see comments in the uvc_driver.c usb disconnect function */
+ atomic_inc(&dev->channels);
+ /* unregister each video device. */
+ for (i = 0; i < channels; i++) {
+ if (video_is_registered(&dev->vdev[i]))
+ video_unregister_device(&dev->vdev[i]);
+ }
+ /* wake up any of our timers */
atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
wake_up(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
dev->setmode_ready[i] = 1;
wake_up(&dev->wait_setmode[i]);
+ dev->vidstatus_ready[i] = 1;
+ wake_up(&dev->wait_vidstatus[i]);
}
-
- mutex_lock(&dev->open_lock);
- usb_set_intfdata(interface, NULL);
- mutex_unlock(&dev->open_lock);
-
- if (dev) {
- kref_put(&dev->kref, s2255_destroy);
- dprintk(1, "s2255drv: disconnect\n");
- dev_info(&interface->dev, "s2255usb now disconnected\n");
- }
+ if (atomic_dec_and_test(&dev->channels))
+ s2255_destroy(dev);
+ dev_info(&interface->dev, "%s\n", __func__);
}
static struct usb_driver s2255_driver = {
@@ -2620,15 +2726,12 @@ static struct usb_driver s2255_driver = {
static int __init usb_s2255_init(void)
{
int result;
-
/* register this driver with the USB subsystem */
result = usb_register(&s2255_driver);
-
if (result)
pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", result);
-
- dprintk(2, "s2255_init: done\n");
+ ": usb_register failed. Error number %d\n", result);
+ dprintk(2, "%s\n", __func__);
return result;
}
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index c0a7f8a369f4..53b6fcde3800 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -74,6 +74,7 @@ struct saa711x_state {
int contrast;
int hue;
int sat;
+ int chroma_agc;
int width;
int height;
u32 ident;
@@ -592,7 +593,7 @@ static const unsigned char saa7115_init_misc[] = {
R_5D_DID, 0xbd,
R_5E_SDID, 0x35,
- R_02_INPUT_CNTL_1, 0x84, /* input tuner -> input 4, amplifier active */
+ R_02_INPUT_CNTL_1, 0xc4, /* input tuner -> input 4, amplifier active */
R_80_GLOBAL_CNTL_1, 0x20, /* enable task B */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0,
@@ -743,6 +744,7 @@ static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct saa711x_state *state = to_state(sd);
+ u8 val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -784,7 +786,21 @@ static int saa711x_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
state->hue = ctrl->value;
saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, state->hue);
break;
-
+ case V4L2_CID_CHROMA_AGC:
+ val = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL);
+ state->chroma_agc = ctrl->value;
+ if (ctrl->value)
+ val &= 0x7f;
+ else
+ val |= 0x80;
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, val);
+ break;
+ case V4L2_CID_CHROMA_GAIN:
+ /* Chroma gain cannot be set when AGC is enabled */
+ if (state->chroma_agc == 1)
+ return -EINVAL;
+ saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, ctrl->value | 0x80);
+ break;
default:
return -EINVAL;
}
@@ -809,6 +825,12 @@ static int saa711x_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_HUE:
ctrl->value = state->hue;
break;
+ case V4L2_CID_CHROMA_AGC:
+ ctrl->value = state->chroma_agc;
+ break;
+ case V4L2_CID_CHROMA_GAIN:
+ ctrl->value = saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
+ break;
default:
return -EINVAL;
}
@@ -1069,7 +1091,7 @@ static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_forma
saa7115_cfg_vbi_off);
}
-static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int saa711x_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *sliced)
{
static u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
@@ -1078,11 +1100,8 @@ static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0
};
- struct v4l2_sliced_vbi_format *sliced = &fmt->fmt.sliced;
int i;
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
memset(sliced, 0, sizeof(*sliced));
/* done if using raw VBI */
if (saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10)
@@ -1098,16 +1117,27 @@ static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
+static int saa711x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return saa711x_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
+
+static int saa711x_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
+{
+ saa711x_set_lcr(sd, NULL);
+ return 0;
+}
+
+static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
+{
+ saa711x_set_lcr(sd, fmt);
+ return 0;
+}
+
static int saa711x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- if (fmt->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
- saa711x_set_lcr(sd, &fmt->fmt.sliced);
- return 0;
- }
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- saa711x_set_lcr(sd, NULL);
- return 0;
- }
if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1209,6 +1239,10 @@ static int saa711x_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
return v4l2_ctrl_query_fill(qc, 0, 127, 1, 64);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
+ case V4L2_CID_CHROMA_AGC:
+ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
+ case V4L2_CID_CHROMA_GAIN:
+ return v4l2_ctrl_query_fill(qc, 0, 127, 1, 48);
default:
return -EINVAL;
}
@@ -1524,18 +1558,25 @@ static const struct v4l2_subdev_video_ops saa711x_video_ops = {
.s_crystal_freq = saa711x_s_crystal_freq,
.g_fmt = saa711x_g_fmt,
.s_fmt = saa711x_s_fmt,
- .g_vbi_data = saa711x_g_vbi_data,
- .decode_vbi_line = saa711x_decode_vbi_line,
.s_stream = saa711x_s_stream,
.querystd = saa711x_querystd,
.g_input_status = saa711x_g_input_status,
};
+static const struct v4l2_subdev_vbi_ops saa711x_vbi_ops = {
+ .g_vbi_data = saa711x_g_vbi_data,
+ .decode_vbi_line = saa711x_decode_vbi_line,
+ .g_sliced_fmt = saa711x_g_sliced_fmt,
+ .s_sliced_fmt = saa711x_s_sliced_fmt,
+ .s_raw_fmt = saa711x_s_raw_fmt,
+};
+
static const struct v4l2_subdev_ops saa711x_ops = {
.core = &saa711x_core_ops,
.tuner = &saa711x_tuner_ops,
.audio = &saa711x_audio_ops,
.video = &saa711x_video_ops,
+ .vbi = &saa711x_vbi_ops,
};
/* ----------------------------------------------------------------------- */
@@ -1593,6 +1634,7 @@ static int saa711x_probe(struct i2c_client *client,
state->contrast = 64;
state->hue = 0;
state->sat = 64;
+ state->chroma_agc = 1;
switch (chip_id) {
case '1':
state->ident = V4L2_IDENT_SAA7111;
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index 250ef84cf5ca..87986ad62f86 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -625,29 +625,33 @@ static int saa7127_s_stream(struct v4l2_subdev *sd, int enable)
return saa7127_set_video_enable(sd, enable);
}
-static int saa7127_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int saa7127_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
{
struct saa7127_state *state = to_state(sd);
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
-
- memset(&fmt->fmt.sliced, 0, sizeof(fmt->fmt.sliced));
+ memset(fmt, 0, sizeof(*fmt));
if (state->vps_enable)
- fmt->fmt.sliced.service_lines[0][16] = V4L2_SLICED_VPS;
+ fmt->service_lines[0][16] = V4L2_SLICED_VPS;
if (state->wss_enable)
- fmt->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
+ fmt->service_lines[0][23] = V4L2_SLICED_WSS_625;
if (state->cc_enable) {
- fmt->fmt.sliced.service_lines[0][21] = V4L2_SLICED_CAPTION_525;
- fmt->fmt.sliced.service_lines[1][21] = V4L2_SLICED_CAPTION_525;
+ fmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
+ fmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
}
- fmt->fmt.sliced.service_set =
+ fmt->service_set =
(state->vps_enable ? V4L2_SLICED_VPS : 0) |
(state->wss_enable ? V4L2_SLICED_WSS_625 : 0) |
(state->cc_enable ? V4L2_SLICED_CAPTION_525 : 0);
return 0;
}
+static int saa7127_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return saa7127_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
+
static int saa7127_s_vbi_data(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
switch (data->id) {
@@ -727,16 +731,21 @@ static const struct v4l2_subdev_core_ops saa7127_core_ops = {
};
static const struct v4l2_subdev_video_ops saa7127_video_ops = {
- .s_vbi_data = saa7127_s_vbi_data,
.g_fmt = saa7127_g_fmt,
.s_std_output = saa7127_s_std_output,
.s_routing = saa7127_s_routing,
.s_stream = saa7127_s_stream,
};
+static const struct v4l2_subdev_vbi_ops saa7127_vbi_ops = {
+ .s_vbi_data = saa7127_s_vbi_data,
+ .g_sliced_fmt = saa7127_g_sliced_fmt,
+};
+
static const struct v4l2_subdev_ops saa7127_ops = {
.core = &saa7127_core_ops,
.video = &saa7127_video_ops,
+ .vbi = &saa7127_vbi_ops,
};
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index d48c450ed77c..d3bd82ad010a 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -1011,8 +1011,6 @@ static int snd_card_saa7134_new_mixer(snd_card_saa7134_t * chip)
unsigned int idx;
int err, addr;
- if (snd_BUG_ON(!chip))
- return -EINVAL;
strcpy(card->mixername, "SAA7134 Mixer");
for (idx = 0; idx < ARRAY_SIZE(snd_saa7134_volume_controls); idx++) {
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 297833fb3b4a..72700d4e3941 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5355,6 +5355,79 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE2,
},
},
+ [SAA7134_BOARD_HAWELL_HW_404M7] = {
+ /* Hawell HW-404M7 & Hawell HW-808M7 */
+ /* Bogoslovskiy Viktor <bogovic@bk.ru> */
+ .name = "Hawell HW-404M7",
+ .audio_clock = 0x00200000,
+ .tuner_type = UNSET,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x389c00,
+ .inputs = {{
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x01fc00,
+ } },
+ },
+ [SAA7134_BOARD_BEHOLD_H7] = {
+ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV H7",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_XC5000,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
+ .ts_type = SAA7134_MPEG_TS_PARALLEL,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 2,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 9,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ },
+ },
+ [SAA7134_BOARD_BEHOLD_A7] = {
+ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
+ .name = "Beholder BeholdTV A7",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_XC5000,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .inputs = { {
+ .name = name_tv,
+ .vmux = 2,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE1,
+ }, {
+ .name = name_svideo,
+ .vmux = 9,
+ .amux = LINE1,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ },
+ },
};
@@ -6549,6 +6622,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = SAA7134_BOARD_UNKNOWN,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7190,
+ .driver_data = SAA7134_BOARD_BEHOLD_H7,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
+ .subdevice = 0x7090,
+ .driver_data = SAA7134_BOARD_BEHOLD_A7,
},{
/* --- end of list --- */
}
@@ -6602,6 +6687,8 @@ static int saa7134_xc5000_callback(struct saa7134_dev *dev,
{
switch (dev->board) {
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
if (command == XC5000_TUNER_RESET) {
/* Down and UP pheripherial RESET pin for reset all chips */
saa_writeb(SAA7134_SPECIAL_MODE, 0x00);
@@ -6973,6 +7060,8 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
case SAA7134_BOARD_BEHOLD_H6:
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
dev->has_remote = SAA7134_REMOTE_I2C;
break;
case SAA7134_BOARD_AVERMEDIA_A169_B:
@@ -7215,6 +7304,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
printk(KERN_INFO "%s: P7131 analog only, using "
"entry of %s\n",
dev->name, saa7134_boards[dev->board].name);
+
+ /* IR init has already happened for other cards, so
+ * we have to catch up. */
+ dev->has_remote = SAA7134_REMOTE_GPIO;
+ saa7134_input_init1(dev);
}
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1150:
@@ -7344,6 +7438,23 @@ int saa7134_board_init2(struct saa7134_dev *dev)
}
break;
}
+ case SAA7134_BOARD_BEHOLD_H6:
+ {
+ u8 data[] = { 0x09, 0x9f, 0x86, 0x11};
+ struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = data,
+ .len = sizeof(data)};
+
+ /* The tuner TUNER_PHILIPS_FMD1216MEX_MK3 after hardware */
+ /* start has disabled IF and enabled DVB-T. When saa7134 */
+ /* scan I2C devices it not detect IF tda9887 and can`t */
+ /* watch TV without software reboot. For solve this problem */
+ /* switch the tuner to analog TV mode manually. */
+ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
+ printk(KERN_WARNING
+ "%s: Unable to enable IF of the tuner.\n",
+ dev->name);
+ break;
+ }
} /* switch() */
/* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index a7ad7810fddc..90f231881297 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -471,7 +471,7 @@ static char *irqbits[] = {
"DONE_RA0", "DONE_RA1", "DONE_RA2", "DONE_RA3",
"AR", "PE", "PWR_ON", "RDCAP", "INTL", "FIDT", "MMC",
"TRIG_ERR", "CONF_ERR", "LOAD_ERR",
- "GPIO16?", "GPIO18", "GPIO22", "GPIO23"
+ "GPIO16", "GPIO18", "GPIO22", "GPIO23"
};
#define IRQBITS ARRAY_SIZE(irqbits)
@@ -601,12 +601,14 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
/* disable gpio16 IRQ */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO16 enable bit\n",dev->name);
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
/* disable gpio18 IRQs */
printk(KERN_WARNING "%s/irq: looping -- "
"clearing GPIO18 enable bit\n",dev->name);
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
} else {
/* disable all irqs */
printk(KERN_WARNING "%s/irq: looping -- "
@@ -698,11 +700,13 @@ static int saa7134_hw_enable2(struct saa7134_dev *dev)
if (dev->has_remote == SAA7134_REMOTE_GPIO && dev->remote) {
if (dev->remote->mask_keydown & 0x10000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO16;
- else if (dev->remote->mask_keydown & 0x40000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO18;
- else if (dev->remote->mask_keyup & 0x40000)
- irq2_mask |= SAA7134_IRQ2_INTE_GPIO18A;
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO16_N;
+ else { /* Allow enabling both IRQ edge triggers */
+ if (dev->remote->mask_keydown & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_P;
+ if (dev->remote->mask_keyup & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_N;
+ }
}
if (dev->has_remote == SAA7134_REMOTE_I2C) {
@@ -1227,7 +1231,7 @@ static int saa7134_resume(struct pci_dev *pci_dev)
if (card_has_mpeg(dev))
saa7134_ts_init_hw(dev);
if (dev->remote)
- saa7134_ir_start(dev, dev->remote);
+ saa7134_ir_start(dev);
saa7134_hw_enable1(dev);
msleep(100);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 4ab4a987c9b9..31e82be1b7e7 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1532,6 +1532,15 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, &behold_x7_tunerconfig);
}
break;
+ case SAA7134_BOARD_BEHOLD_H7:
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &behold_x7_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ dvb_attach(xc5000_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, &behold_x7_tunerconfig);
+ }
+ break;
case SAA7134_BOARD_AVERMEDIA_A700_PRO:
case SAA7134_BOARD_AVERMEDIA_A700_HYBRID:
/* Zarlink ZL10313 */
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 58a0cdc8414a..e5565e2fd426 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -28,6 +28,8 @@
#include "saa7134-reg.h"
#include "saa7134.h"
+#define MODULE_NAME "saa7134"
+
static unsigned int disable_ir;
module_param(disable_ir, int, 0444);
MODULE_PARM_DESC(disable_ir,"disable infrared remote support");
@@ -66,6 +68,7 @@ MODULE_PARM_DESC(disable_other_ir, "disable full codes of "
/* Helper functions for RC5 and NEC decoding at GPIO16 or GPIO18 */
static int saa7134_rc5_irq(struct saa7134_dev *dev);
static int saa7134_nec_irq(struct saa7134_dev *dev);
+static int saa7134_raw_decode_irq(struct saa7134_dev *dev);
static void nec_task(unsigned long data);
static void saa7134_nec_timer(unsigned long data);
@@ -397,14 +400,23 @@ static int get_key_pinnacle_color(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
void saa7134_input_irq(struct saa7134_dev *dev)
{
- struct card_ir *ir = dev->remote;
+ struct card_ir *ir;
+
+ if (!dev || !dev->remote)
+ return;
+
+ ir = dev->remote;
+ if (!ir->running)
+ return;
if (ir->nec_gpio) {
saa7134_nec_irq(dev);
- } else if (!ir->polling && !ir->rc5_gpio) {
+ } else if (!ir->polling && !ir->rc5_gpio && !ir->raw_decode) {
build_key(dev);
} else if (ir->rc5_gpio) {
saa7134_rc5_irq(dev);
+ } else if (ir->raw_decode) {
+ saa7134_raw_decode_irq(dev);
}
}
@@ -417,8 +429,32 @@ static void saa7134_input_timer(unsigned long data)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
+void ir_raw_decode_timer_end(unsigned long data)
+{
+ struct saa7134_dev *dev = (struct saa7134_dev *)data;
+ struct card_ir *ir = dev->remote;
+
+ ir_raw_event_handle(dev->remote->dev);
+
+ ir->active = 0;
+}
+
+static int __saa7134_ir_start(void *priv)
{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir;
+
+ if (!dev)
+ return -EINVAL;
+
+ ir = dev->remote;
+ if (!ir)
+ return -EINVAL;
+
+ if (ir->running)
+ return 0;
+
+ ir->running = 1;
if (ir->polling) {
setup_timer(&ir->timer, saa7134_input_timer,
(unsigned long)dev);
@@ -441,26 +477,125 @@ void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
setup_timer(&ir->timer_keyup, saa7134_nec_timer,
(unsigned long)dev);
tasklet_init(&ir->tlet, nec_task, (unsigned long)dev);
+ } else if (ir->raw_decode) {
+ /* set timer_end for code completion */
+ init_timer(&ir->timer_end);
+ ir->timer_end.function = ir_raw_decode_timer_end;
+ ir->timer_end.data = (unsigned long)dev;
+ ir->active = 0;
}
+
+ return 0;
}
-void saa7134_ir_stop(struct saa7134_dev *dev)
+static void __saa7134_ir_stop(void *priv)
{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir;
+
+ if (!dev)
+ return;
+
+ ir = dev->remote;
+ if (!ir)
+ return;
+
+ if (!ir->running)
+ return;
if (dev->remote->polling)
del_timer_sync(&dev->remote->timer);
+ else if (ir->rc5_gpio)
+ del_timer_sync(&ir->timer_end);
+ else if (ir->nec_gpio)
+ tasklet_kill(&ir->tlet);
+ else if (ir->raw_decode) {
+ del_timer_sync(&ir->timer_end);
+ ir->active = 0;
+ }
+
+ ir->running = 0;
+
+ return;
+}
+
+int saa7134_ir_start(struct saa7134_dev *dev)
+{
+ if (dev->remote->users)
+ return __saa7134_ir_start(dev);
+
+ return 0;
+}
+
+void saa7134_ir_stop(struct saa7134_dev *dev)
+{
+ if (dev->remote->users)
+ __saa7134_ir_stop(dev);
+}
+
+static int saa7134_ir_open(void *priv)
+{
+ struct saa7134_dev *dev = priv;
+
+ dev->remote->users++;
+ return __saa7134_ir_start(dev);
+}
+
+static void saa7134_ir_close(void *priv)
+{
+ struct saa7134_dev *dev = priv;
+
+ dev->remote->users--;
+ if (!dev->remote->users)
+ __saa7134_ir_stop(dev);
+}
+
+
+int saa7134_ir_change_protocol(void *priv, u64 ir_type)
+{
+ struct saa7134_dev *dev = priv;
+ struct card_ir *ir = dev->remote;
+ u32 nec_gpio, rc5_gpio;
+
+ if (ir_type == IR_TYPE_RC5) {
+ dprintk("Changing protocol to RC5\n");
+ nec_gpio = 0;
+ rc5_gpio = 1;
+ } else if (ir_type == IR_TYPE_NEC) {
+ dprintk("Changing protocol to NEC\n");
+ nec_gpio = 1;
+ rc5_gpio = 0;
+ } else {
+ dprintk("IR protocol type %ud is not supported\n",
+ (unsigned)ir_type);
+ return -EINVAL;
+ }
+
+ if (ir->running) {
+ saa7134_ir_stop(dev);
+ ir->nec_gpio = nec_gpio;
+ ir->rc5_gpio = rc5_gpio;
+ saa7134_ir_start(dev);
+ } else {
+ ir->nec_gpio = nec_gpio;
+ ir->rc5_gpio = rc5_gpio;
+ }
+
+ return 0;
}
int saa7134_input_init1(struct saa7134_dev *dev)
{
struct card_ir *ir;
struct input_dev *input_dev;
- struct ir_scancode_table *ir_codes = NULL;
+ char *ir_codes = NULL;
u32 mask_keycode = 0;
u32 mask_keydown = 0;
u32 mask_keyup = 0;
int polling = 0;
int rc5_gpio = 0;
int nec_gpio = 0;
+ int raw_decode = 0;
+ int allow_protocol_change = 0;
u64 ir_type = IR_TYPE_OTHER;
int err;
@@ -476,27 +611,27 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_FLYTVPLATINUM_FM:
case SAA7134_BOARD_FLYTVPLATINUM_MINI2:
case SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM:
- ir_codes = &ir_codes_flyvideo_table;
+ ir_codes = RC_MAP_FLYVIDEO;
mask_keycode = 0xEC00000;
mask_keydown = 0x0040000;
break;
case SAA7134_BOARD_CINERGY400:
case SAA7134_BOARD_CINERGY600:
case SAA7134_BOARD_CINERGY600_MK3:
- ir_codes = &ir_codes_cinergy_table;
+ ir_codes = RC_MAP_CINERGY;
mask_keycode = 0x00003f;
mask_keyup = 0x040000;
break;
case SAA7134_BOARD_ECS_TVP3XP:
case SAA7134_BOARD_ECS_TVP3XP_4CB5:
- ir_codes = &ir_codes_eztv_table;
+ ir_codes = RC_MAP_EZTV;
mask_keycode = 0x00017c;
mask_keyup = 0x000002;
polling = 50; // ms
break;
case SAA7134_BOARD_KWORLD_XPERT:
case SAA7134_BOARD_AVACSSMARTTV:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
mask_keycode = 0x00001F;
mask_keyup = 0x000020;
polling = 50; // ms
@@ -513,7 +648,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
case SAA7134_BOARD_AVERMEDIA_M102:
case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
mask_keycode = 0x0007C8;
mask_keydown = 0x000010;
polling = 50; // ms
@@ -522,14 +657,15 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS0, 0x4);
break;
case SAA7134_BOARD_AVERMEDIA_M135A:
- ir_codes = &ir_codes_avermedia_m135a_table;
- mask_keydown = 0x0040000;
- mask_keycode = 0x00013f;
- nec_gpio = 1;
+ ir_codes = RC_MAP_AVERMEDIA_M135A_RM_JX;
+ mask_keydown = 0x0040000; /* Enable GPIO18 line on both edges */
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = 1;
break;
case SAA7134_BOARD_AVERMEDIA_777:
case SAA7134_BOARD_AVERMEDIA_A16AR:
- ir_codes = &ir_codes_avermedia_table;
+ ir_codes = RC_MAP_AVERMEDIA;
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; // ms
@@ -538,7 +674,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS1, 0x1);
break;
case SAA7134_BOARD_AVERMEDIA_A16D:
- ir_codes = &ir_codes_avermedia_a16d_table;
+ ir_codes = RC_MAP_AVERMEDIA_A16D;
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; /* ms */
@@ -547,14 +683,14 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa_setb(SAA7134_GPIO_GPSTATUS1, 0x1);
break;
case SAA7134_BOARD_KWORLD_TERMINATOR:
- ir_codes = &ir_codes_pixelview_table;
+ ir_codes = RC_MAP_PIXELVIEW;
mask_keycode = 0x00001f;
mask_keyup = 0x000060;
polling = 50; // ms
break;
case SAA7134_BOARD_MANLI_MTV001:
case SAA7134_BOARD_MANLI_MTV002:
- ir_codes = &ir_codes_manli_table;
+ ir_codes = RC_MAP_MANLI;
mask_keycode = 0x001f00;
mask_keyup = 0x004000;
polling = 50; /* ms */
@@ -574,25 +710,25 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_507_9FM:
case SAA7134_BOARD_BEHOLD_507RDS_MK3:
case SAA7134_BOARD_BEHOLD_507RDS_MK5:
- ir_codes = &ir_codes_manli_table;
+ ir_codes = RC_MAP_MANLI;
mask_keycode = 0x003f00;
mask_keyup = 0x004000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM:
- ir_codes = &ir_codes_behold_columbus_table;
+ ir_codes = RC_MAP_BEHOLD_COLUMBUS;
mask_keycode = 0x003f00;
mask_keyup = 0x004000;
polling = 50; // ms
break;
case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
- ir_codes = &ir_codes_pctv_sedna_table;
+ ir_codes = RC_MAP_PCTV_SEDNA;
mask_keycode = 0x001f00;
mask_keyup = 0x004000;
polling = 50; // ms
break;
case SAA7134_BOARD_GOTVIEW_7135:
- ir_codes = &ir_codes_gotview7135_table;
+ ir_codes = RC_MAP_GOTVIEW7135;
mask_keycode = 0x0003CC;
mask_keydown = 0x000010;
polling = 5; /* ms */
@@ -601,80 +737,80 @@ int saa7134_input_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_VIDEOMATE_TV_PVR:
case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII:
- ir_codes = &ir_codes_videomate_tv_pvr_table;
+ ir_codes = RC_MAP_VIDEOMATE_TV_PVR;
mask_keycode = 0x00003F;
mask_keyup = 0x400000;
polling = 50; // ms
break;
case SAA7134_BOARD_PROTEUS_2309:
- ir_codes = &ir_codes_proteus_2309_table;
+ ir_codes = RC_MAP_PROTEUS_2309;
mask_keycode = 0x00007F;
mask_keyup = 0x000080;
polling = 50; // ms
break;
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_VIDEOMATE_DVBT_200:
- ir_codes = &ir_codes_videomate_tv_pvr_table;
+ ir_codes = RC_MAP_VIDEOMATE_TV_PVR;
mask_keycode = 0x003F00;
mask_keyup = 0x040000;
break;
case SAA7134_BOARD_FLYDVBS_LR300:
case SAA7134_BOARD_FLYDVBT_LR301:
case SAA7134_BOARD_FLYDVBTDUO:
- ir_codes = &ir_codes_flydvb_table;
+ ir_codes = RC_MAP_FLYDVB;
mask_keycode = 0x0001F00;
mask_keydown = 0x0040000;
break;
case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
case SAA7134_BOARD_ASUSTeK_P7131_ANALOG:
- ir_codes = &ir_codes_asus_pc39_table;
+ ir_codes = RC_MAP_ASUS_PC39;
mask_keydown = 0x0040000;
rc5_gpio = 1;
break;
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
- ir_codes = &ir_codes_encore_enltv_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV;
mask_keycode = 0x00007f;
mask_keyup = 0x040000;
polling = 50; // ms
break;
case SAA7134_BOARD_ENCORE_ENLTV_FM53:
- ir_codes = &ir_codes_encore_enltv_fm53_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV_FM53;
mask_keydown = 0x0040000;
mask_keycode = 0x00007f;
nec_gpio = 1;
break;
case SAA7134_BOARD_10MOONSTVMASTER3:
- ir_codes = &ir_codes_encore_enltv_table;
+ ir_codes = RC_MAP_ENCORE_ENLTV;
mask_keycode = 0x5f80000;
mask_keyup = 0x8000000;
polling = 50; //ms
break;
case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
- ir_codes = &ir_codes_genius_tvgo_a11mce_table;
+ ir_codes = RC_MAP_GENIUS_TVGO_A11MCE;
mask_keycode = 0xff;
mask_keydown = 0xf00000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_REAL_ANGEL_220:
- ir_codes = &ir_codes_real_audio_220_32_keys_table;
+ ir_codes = RC_MAP_REAL_AUDIO_220_32_KEYS;
mask_keycode = 0x3f00;
mask_keyup = 0x4000;
polling = 50; /* ms */
break;
case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
- ir_codes = &ir_codes_kworld_plus_tv_analog_table;
+ ir_codes = RC_MAP_KWORLD_PLUS_TV_ANALOG;
mask_keycode = 0x7f;
polling = 40; /* ms */
break;
case SAA7134_BOARD_VIDEOMATE_S350:
- ir_codes = &ir_codes_videomate_s350_table;
+ ir_codes = RC_MAP_VIDEOMATE_S350;
mask_keycode = 0x003f00;
mask_keydown = 0x040000;
break;
case SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S:
- ir_codes = &ir_codes_winfast_table;
+ ir_codes = RC_MAP_WINFAST;
mask_keycode = 0x5f00;
mask_keyup = 0x020000;
polling = 50; /* ms */
@@ -695,6 +831,9 @@ int saa7134_input_init1(struct saa7134_dev *dev)
}
ir->dev = input_dev;
+ dev->remote = ir;
+
+ ir->running = 0;
/* init hardware-specific stuff */
ir->mask_keycode = mask_keycode;
@@ -703,6 +842,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
ir->polling = polling;
ir->rc5_gpio = rc5_gpio;
ir->nec_gpio = nec_gpio;
+ ir->raw_decode = raw_decode;
/* init input device */
snprintf(ir->name, sizeof(ir->name), "saa7134 IR (%s)",
@@ -710,6 +850,19 @@ int saa7134_input_init1(struct saa7134_dev *dev)
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
pci_name(dev->pci));
+
+ ir->props.priv = dev;
+ ir->props.open = saa7134_ir_open;
+ ir->props.close = saa7134_ir_close;
+
+ if (raw_decode)
+ ir->props.driver_type = RC_DRIVER_IR_RAW;
+
+ if (!raw_decode && allow_protocol_change) {
+ ir->props.allowed_protos = IR_TYPE_RC5 | IR_TYPE_NEC;
+ ir->props.change_protocol = saa7134_ir_change_protocol;
+ }
+
err = ir_input_init(input_dev, &ir->ir, ir_type);
if (err < 0)
goto err_out_free;
@@ -727,12 +880,9 @@ int saa7134_input_init1(struct saa7134_dev *dev)
}
input_dev->dev.parent = &dev->pci->dev;
- dev->remote = ir;
- saa7134_ir_start(dev, ir);
-
- err = ir_input_register(ir->dev, ir_codes, NULL);
+ err = ir_input_register(ir->dev, ir_codes, &ir->props, MODULE_NAME);
if (err)
- goto err_out_stop;
+ goto err_out_free;
/* the remote isn't as bouncy as a keyboard */
ir->dev->rep[REP_DELAY] = repeat_delay;
@@ -740,10 +890,8 @@ int saa7134_input_init1(struct saa7134_dev *dev)
return 0;
- err_out_stop:
- saa7134_ir_stop(dev);
+err_out_free:
dev->remote = NULL;
- err_out_free:
kfree(ir);
return err;
}
@@ -787,24 +935,24 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "Pinnacle PCTV";
if (pinnacle_remote == 0) {
dev->init_data.get_key = get_key_pinnacle_color;
- dev->init_data.ir_codes = &ir_codes_pinnacle_color_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_COLOR;
info.addr = 0x47;
} else {
dev->init_data.get_key = get_key_pinnacle_grey;
- dev->init_data.ir_codes = &ir_codes_pinnacle_grey_table;
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
info.addr = 0x47;
}
break;
case SAA7134_BOARD_UPMOST_PURPLE_TV:
dev->init_data.name = "Purple TV";
dev->init_data.get_key = get_key_purpletv;
- dev->init_data.ir_codes = &ir_codes_purpletv_table;
+ dev->init_data.ir_codes = RC_MAP_PURPLETV;
info.addr = 0x7a;
break;
case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS:
dev->init_data.name = "MSI TV@nywhere Plus";
dev->init_data.get_key = get_key_msi_tvanywhere_plus;
- dev->init_data.ir_codes = &ir_codes_msi_tvanywhere_plus_table;
+ dev->init_data.ir_codes = RC_MAP_MSI_TVANYWHERE_PLUS;
info.addr = 0x30;
/* MSI TV@nywhere Plus controller doesn't seem to
respond to probes unless we read something from
@@ -818,7 +966,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
dev->init_data.name = "HVR 1110";
dev->init_data.get_key = get_key_hvr1110;
- dev->init_data.ir_codes = &ir_codes_hauppauge_new_table;
+ dev->init_data.ir_codes = RC_MAP_HAUPPAUGE_NEW;
info.addr = 0x71;
break;
case SAA7134_BOARD_BEHOLD_607FM_MK3:
@@ -834,9 +982,12 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_M6_EXTRA:
case SAA7134_BOARD_BEHOLD_H6:
case SAA7134_BOARD_BEHOLD_X7:
+ case SAA7134_BOARD_BEHOLD_H7:
+ case SAA7134_BOARD_BEHOLD_A7:
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
- dev->init_data.ir_codes = &ir_codes_behold_table;
+ dev->init_data.ir_codes = RC_MAP_BEHOLD;
+ dev->init_data.type = IR_TYPE_NEC;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
@@ -846,7 +997,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_FLYDVB_TRIO:
dev->init_data.name = "FlyDVB Trio";
dev->init_data.get_key = get_key_flydvb_trio;
- dev->init_data.ir_codes = &ir_codes_flydvb_table;
+ dev->init_data.ir_codes = RC_MAP_FLYDVB;
info.addr = 0x0b;
break;
default:
@@ -859,6 +1010,33 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
i2c_new_device(&dev->i2c_adap, &info);
}
+static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
+{
+ struct card_ir *ir = dev->remote;
+ unsigned long timeout;
+ int space;
+
+ /* Generate initial event */
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ space = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & ir->mask_keydown;
+ ir_raw_event_store_edge(dev->remote->dev, space ? IR_SPACE : IR_PULSE);
+
+
+ /*
+ * Wait 15 ms from the start of the first IR event before processing
+ * the event. This time is enough for NEC protocol. May need adjustments
+ * to work with other protocols.
+ */
+ if (!ir->active) {
+ timeout = jiffies + jiffies_to_msecs(15);
+ mod_timer(&ir->timer_end, timeout);
+ ir->active = 1;
+ }
+
+ return 1;
+}
+
static int saa7134_rc5_irq(struct saa7134_dev *dev)
{
struct card_ir *ir = dev->remote;
@@ -901,7 +1079,6 @@ static int saa7134_rc5_irq(struct saa7134_dev *dev)
return 1;
}
-
/* On NEC protocol, One has 2.25 ms, and zero has 1.125 ms
The first pulse (start) has 9 + 4.5 ms
*/
@@ -1011,14 +1188,14 @@ static void nec_task(unsigned long data)
/* Keep repeating the last key */
mod_timer(&ir->timer_keyup, jiffies + msecs_to_jiffies(150));
- saa_setl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_setl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
}
static int saa7134_nec_irq(struct saa7134_dev *dev)
{
struct card_ir *ir = dev->remote;
- saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
tasklet_schedule(&ir->tlet);
return 1;
diff --git a/drivers/media/video/saa7134/saa7134-reg.h b/drivers/media/video/saa7134/saa7134-reg.h
index cf89d96d7295..e7e0af101fa7 100644
--- a/drivers/media/video/saa7134/saa7134-reg.h
+++ b/drivers/media/video/saa7134/saa7134-reg.h
@@ -112,17 +112,17 @@
#define SAA7134_IRQ1_INTE_RA0_0 (1 << 0)
#define SAA7134_IRQ2 (0x2c8 >> 2)
-#define SAA7134_IRQ2_INTE_GPIO23A (1 << 17)
-#define SAA7134_IRQ2_INTE_GPIO23 (1 << 16)
-#define SAA7134_IRQ2_INTE_GPIO22A (1 << 15)
-#define SAA7134_IRQ2_INTE_GPIO22 (1 << 14)
-#define SAA7134_IRQ2_INTE_GPIO18A (1 << 13)
-#define SAA7134_IRQ2_INTE_GPIO18 (1 << 12)
-#define SAA7134_IRQ2_INTE_GPIO16 (1 << 11) /* not certain */
-#define SAA7134_IRQ2_INTE_SC2 (1 << 10)
-#define SAA7134_IRQ2_INTE_SC1 (1 << 9)
-#define SAA7134_IRQ2_INTE_SC0 (1 << 8)
-#define SAA7134_IRQ2_INTE_DEC5 (1 << 7)
+#define SAA7134_IRQ2_INTE_GPIO23_N (1 << 17) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO23_P (1 << 16) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO22_N (1 << 15) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO22_P (1 << 14) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO18_N (1 << 13) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO18_P (1 << 12) /* positive edge */
+#define SAA7134_IRQ2_INTE_GPIO16_N (1 << 11) /* negative edge */
+#define SAA7134_IRQ2_INTE_GPIO16_P (1 << 10) /* positive edge */
+#define SAA7134_IRQ2_INTE_SC2 (1 << 9)
+#define SAA7134_IRQ2_INTE_SC1 (1 << 8)
+#define SAA7134_IRQ2_INTE_SC0 (1 << 7)
#define SAA7134_IRQ2_INTE_DEC4 (1 << 6)
#define SAA7134_IRQ2_INTE_DEC3 (1 << 5)
#define SAA7134_IRQ2_INTE_DEC2 (1 << 4)
@@ -135,7 +135,7 @@
#define SAA7134_IRQ_REPORT_GPIO23 (1 << 17)
#define SAA7134_IRQ_REPORT_GPIO22 (1 << 16)
#define SAA7134_IRQ_REPORT_GPIO18 (1 << 15)
-#define SAA7134_IRQ_REPORT_GPIO16 (1 << 14) /* not certain */
+#define SAA7134_IRQ_REPORT_GPIO16 (1 << 14)
#define SAA7134_IRQ_REPORT_LOAD_ERR (1 << 13)
#define SAA7134_IRQ_REPORT_CONF_ERR (1 << 12)
#define SAA7134_IRQ_REPORT_TRIG_ERR (1 << 11)
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 31138d3e51bb..45f0ac8f3c0f 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1180,7 +1180,7 @@ int saa7134_s_ctrl_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, str
That needs to be fixed somehow, but for now this is
good enough. */
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
}
@@ -1359,7 +1359,7 @@ static int video_open(struct file *file)
fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
fh->width = 720;
fh->height = 576;
- v4l2_prio_open(&dev->prio,&fh->prio);
+ v4l2_prio_open(&dev->prio, &fh->prio);
videobuf_queue_sg_init(&fh->cap, &video_qops,
&dev->pci->dev, &dev->slock,
@@ -1502,7 +1502,7 @@ static int video_release(struct file *file)
saa7134_pgtable_free(dev->pci,&fh->pt_cap);
saa7134_pgtable_free(dev->pci,&fh->pt_vbi);
- v4l2_prio_close(&dev->prio,&fh->prio);
+ v4l2_prio_close(&dev->prio, fh->prio);
file->private_data = NULL;
kfree(fh);
return 0;
@@ -1632,8 +1632,15 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
}
f->fmt.pix.field = field;
- v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
- &f->fmt.pix.height, 32, maxh, 0, 0);
+ if (f->fmt.pix.width < 48)
+ f->fmt.pix.width = 48;
+ if (f->fmt.pix.height < 32)
+ f->fmt.pix.height = 32;
+ if (f->fmt.pix.width > maxw)
+ f->fmt.pix.width = maxw;
+ if (f->fmt.pix.height > maxh)
+ f->fmt.pix.height = maxh;
+ f->fmt.pix.width &= ~0x03;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
@@ -1778,7 +1785,7 @@ static int saa7134_s_input(struct file *file, void *priv, unsigned int i)
struct saa7134_dev *dev = fh->dev;
int err;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
@@ -1832,7 +1839,7 @@ int saa7134_s_std_internal(struct saa7134_dev *dev, struct saa7134_fh *fh, v4l2_
That needs to be fixed somehow, but for now this is
good enough. */
if (fh) {
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
} else if (res_locked(dev, RESOURCE_OVERLAY)) {
@@ -2016,7 +2023,7 @@ static int saa7134_s_tuner(struct file *file, void *priv,
struct saa7134_dev *dev = fh->dev;
int rx, mode, err;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
@@ -2050,7 +2057,7 @@ static int saa7134_s_frequency(struct file *file, void *priv,
struct saa7134_dev *dev = fh->dev;
int err;
- err = v4l2_prio_check(&dev->prio, &fh->prio);
+ err = v4l2_prio_check(&dev->prio, fh->prio);
if (0 != err)
return err;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index bf130967ed17..3962534267be 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -20,7 +20,7 @@
*/
#include <linux/version.h>
-#define SAA7134_VERSION_CODE KERNEL_VERSION(0,2,15)
+#define SAA7134_VERSION_CODE KERNEL_VERSION(0, 2, 16)
#include <linux/pci.h>
#include <linux/i2c.h>
@@ -300,6 +300,9 @@ struct saa7134_format {
#define SAA7134_BOARD_ASUS_EUROPA_HYBRID 174
#define SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S 175
#define SAA7134_BOARD_BEHOLD_505RDS_MK3 176
+#define SAA7134_BOARD_HAWELL_HW_404M7 177
+#define SAA7134_BOARD_BEHOLD_H7 178
+#define SAA7134_BOARD_BEHOLD_A7 179
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -809,7 +812,7 @@ int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
-void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir);
+int saa7134_ir_start(struct saa7134_dev *dev);
void saa7134_ir_stop(struct saa7134_dev *dev);
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 1ad980f8e770..4ac3b482fbb4 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -115,9 +115,20 @@ struct sh_mobile_ceu_dev {
};
struct sh_mobile_ceu_cam {
- struct v4l2_rect ceu_rect;
- unsigned int cam_width;
- unsigned int cam_height;
+ /* CEU offsets within scaled by the CEU camera output */
+ unsigned int ceu_left;
+ unsigned int ceu_top;
+ /* Client output, as seen by the CEU */
+ unsigned int width;
+ unsigned int height;
+ /*
+ * User window from S_CROP / G_CROP, produced by client cropping and
+ * scaling, CEU scaling and CEU cropping, mapped back onto the client
+ * input window
+ */
+ struct v4l2_rect subrect;
+ /* Camera cropping rectangle */
+ struct v4l2_rect rect;
const struct soc_mbus_pixelfmt *extra_fmt;
enum v4l2_mbus_pixelcode code;
};
@@ -213,8 +224,8 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq,
*count = 2;
if (pcdev->video_limit) {
- while (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
- (*count)--;
+ if (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
+ *count = pcdev->video_limit / PAGE_ALIGN(*size);
}
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
@@ -565,38 +576,36 @@ static u16 calc_scale(unsigned int src, unsigned int *dst)
}
/* rect is guaranteed to not exceed the scaled camera rectangle */
-static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
- unsigned int out_width,
- unsigned int out_height)
+static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *rect = &cam->ceu_rect;
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned int height, width, cdwdr_width, in_width, in_height;
unsigned int left_offset, top_offset;
u32 camor;
- dev_dbg(icd->dev.parent, "Crop %ux%u@%u:%u\n",
- rect->width, rect->height, rect->left, rect->top);
+ dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
- left_offset = rect->left;
- top_offset = rect->top;
+ left_offset = cam->ceu_left;
+ top_offset = cam->ceu_top;
+ /* CEU cropping (CFSZR) is applied _after_ the scaling filter (CFLCR) */
if (pcdev->image_mode) {
- in_width = rect->width;
+ in_width = cam->width;
if (!pcdev->is_16bit) {
in_width *= 2;
left_offset *= 2;
}
- width = out_width;
- cdwdr_width = out_width;
+ width = icd->user_width;
+ cdwdr_width = icd->user_width;
} else {
- int bytes_per_line = soc_mbus_bytes_per_line(out_width,
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
unsigned int w_factor;
- width = out_width;
+ width = icd->user_width;
switch (icd->current_fmt->host_fmt->packing) {
case SOC_MBUS_PACKING_2X8_PADHI:
@@ -606,17 +615,17 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
w_factor = 1;
}
- in_width = rect->width * w_factor;
+ in_width = cam->width * w_factor;
left_offset = left_offset * w_factor;
if (bytes_per_line < 0)
- cdwdr_width = out_width;
+ cdwdr_width = icd->user_width;
else
cdwdr_width = bytes_per_line;
}
- height = out_height;
- in_height = rect->height;
+ height = icd->user_height;
+ in_height = cam->height;
if (V4L2_FIELD_NONE != pcdev->field) {
height /= 2;
in_height /= 2;
@@ -775,9 +784,10 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
}
ceu_write(pcdev, CAIFR, value);
- sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height);
+ sh_mobile_ceu_set_rect(icd);
mdelay(1);
+ dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
/*
@@ -866,6 +876,8 @@ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
+static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
+
static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
struct soc_camera_format_xlate *xlate)
{
@@ -894,10 +906,55 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
return 0;
if (!icd->host_priv) {
+ struct v4l2_mbus_framefmt mf;
+ struct v4l2_rect rect;
+ struct device *dev = icd->dev.parent;
+ int shift = 0;
+
+ /* FIXME: subwindow is lost between close / open */
+
+ /* Cache current client geometry */
+ ret = client_g_rect(sd, &rect);
+ if (ret < 0)
+ return ret;
+
+ /* First time */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ while ((mf.width > 2560 || mf.height > 1920) && shift < 4) {
+ /* Try 2560x1920, 1280x960, 640x480, 320x240 */
+ mf.width = 2560 >> shift;
+ mf.height = 1920 >> shift;
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+ shift++;
+ }
+
+ if (shift == 4) {
+ dev_err(dev, "Failed to configure the client below %ux%x\n",
+ mf.width, mf.height);
+ return -EIO;
+ }
+
+ dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
+
cam = kzalloc(sizeof(*cam), GFP_KERNEL);
if (!cam)
return -ENOMEM;
+ /* We are called with current camera crop, initialise subrect with it */
+ cam->rect = rect;
+ cam->subrect = rect;
+
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ cam->width = mf.width;
+ cam->height = mf.height;
+
icd->host_priv = cam;
} else {
cam = icd->host_priv;
@@ -979,16 +1036,12 @@ static unsigned int scale_down(unsigned int size, unsigned int scale)
return (size * 4096 + scale / 2) / scale;
}
-static unsigned int scale_up(unsigned int size, unsigned int scale)
-{
- return (size * scale + 2048) / 4096;
-}
-
static unsigned int calc_generic_scale(unsigned int input, unsigned int output)
{
return (input * 4096 + output / 2) / output;
}
+/* Get and store current client crop */
static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
{
struct v4l2_crop crop;
@@ -1007,25 +1060,51 @@ static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
- if (ret < 0)
- return ret;
-
- *rect = cap.defrect;
+ if (!ret)
+ *rect = cap.defrect;
return ret;
}
+/* Client crop has changed, update our sub-rectangle to remain within the area */
+static void update_subrect(struct sh_mobile_ceu_cam *cam)
+{
+ struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect;
+
+ if (rect->width < subrect->width)
+ subrect->width = rect->width;
+
+ if (rect->height < subrect->height)
+ subrect->height = rect->height;
+
+ if (rect->left > subrect->left)
+ subrect->left = rect->left;
+ else if (rect->left + rect->width >
+ subrect->left + subrect->width)
+ subrect->left = rect->left + rect->width -
+ subrect->width;
+
+ if (rect->top > subrect->top)
+ subrect->top = rect->top;
+ else if (rect->top + rect->height >
+ subrect->top + subrect->height)
+ subrect->top = rect->top + rect->height -
+ subrect->height;
+}
+
/*
* The common for both scaling and cropping iterative approach is:
* 1. try if the client can produce exactly what requested by the user
* 2. if (1) failed, try to double the client image until we get one big enough
* 3. if (2) failed, try to request the maximum image
*/
-static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
+static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop,
struct v4l2_crop *cam_crop)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
struct device *dev = sd->v4l2_dev->dev;
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_cropcap cap;
int ret;
unsigned int width, height;
@@ -1041,13 +1120,14 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
*/
if (!memcmp(rect, cam_rect, sizeof(*rect))) {
/* Even if camera S_CROP failed, but camera rectangle matches */
- dev_dbg(dev, "Camera S_CROP successful for %ux%u@%u:%u\n",
+ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n",
rect->width, rect->height, rect->left, rect->top);
+ cam->rect = *cam_rect;
return 0;
}
/* Try to fix cropping, that camera hasn't managed to set */
- dev_geo(dev, "Fix camera S_CROP for %ux%u@%u:%u to %ux%u@%u:%u\n",
+ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top,
rect->width, rect->height, rect->left, rect->top);
@@ -1057,6 +1137,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
if (ret < 0)
return ret;
+ /* Put user requested rectangle within sensor bounds */
soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
cap.bounds.width);
soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
@@ -1069,6 +1150,10 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
width = max(cam_rect->width, 2);
height = max(cam_rect->height, 2);
+ /*
+ * Loop as long as sensor is not covering the requested rectangle and
+ * is still within its bounds
+ */
while (!ret && (is_smaller(cam_rect, rect) ||
is_inside(cam_rect, rect)) &&
(cap.bounds.width > width || cap.bounds.height > height)) {
@@ -1086,6 +1171,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
* target left, set it to the middle point between the current
* left and minimum left. But that would add too much
* complexity: we would have to iterate each border separately.
+ * Instead we just drop to the left and top bounds.
*/
if (cam_rect->left > rect->left)
cam_rect->left = cap.bounds.left;
@@ -1103,7 +1189,7 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for %ux%u@%u:%u\n", ret,
+ dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
@@ -1117,82 +1203,24 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
*cam_rect = cap.bounds;
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for max %ux%u@%u:%u\n", ret,
+ dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
- return ret;
-}
-
-static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect,
- unsigned int *scale_h, unsigned int *scale_v)
-{
- struct v4l2_mbus_framefmt mf;
- int ret;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
- if (ret < 0)
- return ret;
-
- *scale_h = calc_generic_scale(rect->width, mf.width);
- *scale_v = calc_generic_scale(rect->height, mf.height);
-
- return 0;
-}
-
-static int get_camera_subwin(struct soc_camera_device *icd,
- struct v4l2_rect *cam_subrect,
- unsigned int cam_hscale, unsigned int cam_vscale)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *ceu_rect = &cam->ceu_rect;
-
- if (!ceu_rect->width) {
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
- struct v4l2_mbus_framefmt mf;
- int ret;
- /* First time */
-
- ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
-
- if (mf.width > 2560) {
- ceu_rect->width = 2560;
- ceu_rect->left = (mf.width - 2560) / 2;
- } else {
- ceu_rect->width = mf.width;
- ceu_rect->left = 0;
- }
-
- if (mf.height > 1920) {
- ceu_rect->height = 1920;
- ceu_rect->top = (mf.height - 1920) / 2;
- } else {
- ceu_rect->height = mf.height;
- ceu_rect->top = 0;
- }
-
- dev_geo(dev, "initialised CEU rect %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
+ if (!ret) {
+ cam->rect = *cam_rect;
+ update_subrect(cam);
}
- cam_subrect->width = scale_up(ceu_rect->width, cam_hscale);
- cam_subrect->left = scale_up(ceu_rect->left, cam_hscale);
- cam_subrect->height = scale_up(ceu_rect->height, cam_vscale);
- cam_subrect->top = scale_up(ceu_rect->top, cam_vscale);
-
- return 0;
+ return ret;
}
+/* Iterative s_mbus_fmt, also updates cached client crop on success */
static int client_s_fmt(struct soc_camera_device *icd,
struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
@@ -1200,6 +1228,15 @@ static int client_s_fmt(struct soc_camera_device *icd,
struct v4l2_cropcap cap;
int ret;
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
+ if (ret < 0)
+ return ret;
+
+ dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
+
+ if ((width == mf->width && height == mf->height) || !ceu_can_scale)
+ goto update_cache;
+
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
@@ -1209,15 +1246,6 @@ static int client_s_fmt(struct soc_camera_device *icd,
max_width = min(cap.bounds.width, 2560);
max_height = min(cap.bounds.height, 1920);
- ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
-
- if ((width == mf->width && height == mf->height) || !ceu_can_scale)
- return 0;
-
/* Camera set a format, but geometry is not precise, try to improve */
tmp_w = mf->width;
tmp_h = mf->height;
@@ -1239,26 +1267,37 @@ static int client_s_fmt(struct soc_camera_device *icd,
}
}
+update_cache:
+ /* Update cache */
+ ret = client_g_rect(sd, &cam->rect);
+ if (ret < 0)
+ return ret;
+
+ update_subrect(cam);
+
return 0;
}
/**
- * @rect - camera cropped rectangle
- * @sub_rect - CEU cropped rectangle, mapped back to camera input area
- * @ceu_rect - on output calculated CEU crop rectangle
+ * @width - on output: user width, mapped back to input
+ * @height - on output: user height, mapped back to input
+ * @mf - in- / output camera output window
*/
-static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
- struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect,
- struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
+static int client_scale(struct soc_camera_device *icd,
+ struct v4l2_mbus_framefmt *mf,
+ unsigned int *width, unsigned int *height,
+ bool ceu_can_scale)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf_tmp = *mf;
unsigned int scale_h, scale_v;
int ret;
- /* 5. Apply iterative camera S_FMT for camera user window. */
+ /*
+ * 5. Apply iterative camera S_FMT for camera user window (also updates
+ * client crop cache and the imaginary sub-rectangle).
+ */
ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
if (ret < 0)
return ret;
@@ -1270,60 +1309,22 @@ static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
/* unneeded - it is already in "mf_tmp" */
- /* 7. Calculate new camera scales. */
- ret = get_camera_scales(sd, rect, &scale_h, &scale_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v);
+ /* 7. Calculate new client scales. */
+ scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width);
+ scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height);
- cam->cam_width = mf_tmp.width;
- cam->cam_height = mf_tmp.height;
mf->width = mf_tmp.width;
mf->height = mf_tmp.height;
mf->colorspace = mf_tmp.colorspace;
/*
* 8. Calculate new CEU crop - apply camera scales to previously
- * calculated "effective" crop.
+ * updated "effective" crop.
*/
- ceu_rect->left = scale_down(sub_rect->left, scale_h);
- ceu_rect->width = scale_down(sub_rect->width, scale_h);
- ceu_rect->top = scale_down(sub_rect->top, scale_v);
- ceu_rect->height = scale_down(sub_rect->height, scale_v);
+ *width = scale_down(cam->subrect.width, scale_h);
+ *height = scale_down(cam->subrect.height, scale_v);
- dev_geo(dev, "8: new CEU rect %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
-
- return 0;
-}
-
-/* Get combined scales */
-static int get_scales(struct soc_camera_device *icd,
- unsigned int *scale_h, unsigned int *scale_v)
-{
- struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_crop cam_crop;
- unsigned int width_in, height_in;
- int ret;
-
- cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = client_g_rect(sd, &cam_crop.c);
- if (ret < 0)
- return ret;
-
- ret = get_camera_scales(sd, &cam_crop.c, scale_h, scale_v);
- if (ret < 0)
- return ret;
-
- width_in = scale_up(cam->ceu_rect.width, *scale_h);
- height_in = scale_up(cam->ceu_rect.height, *scale_v);
-
- *scale_h = calc_generic_scale(width_in, icd->user_width);
- *scale_v = calc_generic_scale(height_in, icd->user_height);
+ dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height);
return 0;
}
@@ -1342,115 +1343,165 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_crop cam_crop;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect;
+ struct v4l2_rect *cam_rect = &cam_crop.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
- unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v,
- out_width, out_height;
+ unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
+ out_width, out_height, scale_h, scale_v;
+ int interm_width, interm_height;
u32 capsr, cflcr;
int ret;
- /* 1. Calculate current combined scales. */
- ret = get_scales(icd, &scale_comb_h, &scale_comb_v);
- if (ret < 0)
- return ret;
+ dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+ rect->left, rect->top);
- dev_geo(dev, "1: combined scales %u:%u\n", scale_comb_h, scale_comb_v);
+ /* During camera cropping its output window can change too, stop CEU */
+ capsr = capture_save_reset(pcdev);
+ dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
- /* 2. Apply iterative camera S_CROP for new input window. */
- ret = client_s_crop(sd, a, &cam_crop);
+ /* 1. - 2. Apply iterative camera S_CROP for new input window. */
+ ret = client_s_crop(icd, a, &cam_crop);
if (ret < 0)
return ret;
- dev_geo(dev, "2: camera cropped to %ux%u@%u:%u\n",
+ dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
/* On success cam_crop contains current camera crop */
- /*
- * 3. If old combined scales applied to new crop produce an impossible
- * user window, adjust scales to produce nearest possible window.
- */
- out_width = scale_down(rect->width, scale_comb_h);
- out_height = scale_down(rect->height, scale_comb_v);
-
- if (out_width > 2560)
- out_width = 2560;
- else if (out_width < 2)
- out_width = 2;
-
- if (out_height > 1920)
- out_height = 1920;
- else if (out_height < 4)
- out_height = 4;
-
- dev_geo(dev, "3: Adjusted output %ux%u\n", out_width, out_height);
-
- /* 4. Use G_CROP to retrieve actual input window: already in cam_crop */
-
- /*
- * 5. Using actual input window and calculated combined scales calculate
- * camera target output window.
- */
- mf.width = scale_down(cam_rect->width, scale_comb_h);
- mf.height = scale_down(cam_rect->height, scale_comb_v);
-
- dev_geo(dev, "5: camera target %ux%u\n", mf.width, mf.height);
-
- /* 6. - 9. */
- mf.code = cam->code;
- mf.field = pcdev->field;
-
- capsr = capture_save_reset(pcdev);
- dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
+ /* 3. Retrieve camera output window */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
- /* Make relative to camera rectangle */
- rect->left -= cam_rect->left;
- rect->top -= cam_rect->top;
+ if (mf.width > 2560 || mf.height > 1920)
+ return -EINVAL;
- ret = client_scale(icd, cam_rect, rect, ceu_rect, &mf,
- pcdev->image_mode &&
- V4L2_FIELD_NONE == pcdev->field);
+ /* Cache camera output window */
+ cam->width = mf.width;
+ cam->height = mf.height;
- dev_geo(dev, "6-9: %d\n", ret);
+ /* 4. Calculate camera scales */
+ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width);
+ scale_cam_v = calc_generic_scale(cam_rect->height, mf.height);
- /* 10. Use CEU cropping to crop to the new window. */
- sh_mobile_ceu_set_rect(icd, out_width, out_height);
+ /* Calculate intermediate window */
+ interm_width = scale_down(rect->width, scale_cam_h);
+ interm_height = scale_down(rect->height, scale_cam_v);
- dev_geo(dev, "10: CEU cropped to %ux%u@%u:%u\n",
- ceu_rect->width, ceu_rect->height,
- ceu_rect->left, ceu_rect->top);
+ if (pcdev->image_mode) {
+ out_width = min(interm_width, icd->user_width);
+ out_height = min(interm_height, icd->user_height);
+ } else {
+ out_width = interm_width;
+ out_height = interm_height;
+ }
/*
- * 11. Calculate CEU scales from camera scales from results of (10) and
- * user window from (3)
+ * 5. Calculate CEU scales from camera scales from results of (5) and
+ * the user window
*/
- scale_ceu_h = calc_scale(ceu_rect->width, &out_width);
- scale_ceu_v = calc_scale(ceu_rect->height, &out_height);
+ scale_ceu_h = calc_scale(interm_width, &out_width);
+ scale_ceu_v = calc_scale(interm_height, &out_height);
- dev_geo(dev, "11: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+ /* Calculate camera scales */
+ scale_h = calc_generic_scale(cam_rect->width, out_width);
+ scale_v = calc_generic_scale(cam_rect->height, out_height);
- /* 12. Apply CEU scales. */
+ dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
+
+ /* Apply CEU scales. */
cflcr = scale_ceu_h | (scale_ceu_v << 16);
if (cflcr != pcdev->cflcr) {
pcdev->cflcr = cflcr;
ceu_write(pcdev, CFLCR, cflcr);
}
+ icd->user_width = out_width;
+ icd->user_height = out_height;
+ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_h) & ~1;
+ cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_v) & ~1;
+
+ /* 6. Use CEU cropping to crop to the new window. */
+ sh_mobile_ceu_set_rect(icd);
+
+ cam->subrect = *rect;
+
+ dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height,
+ cam->ceu_left, cam->ceu_top);
+
/* Restore capture */
if (pcdev->active)
capsr |= 1;
capture_restore(pcdev, capsr);
- icd->user_width = out_width;
- icd->user_height = out_height;
-
/* Even if only camera cropping succeeded */
return ret;
}
+static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->c = cam->subrect;
+
+ return 0;
+}
+
+/*
+ * Calculate real client output window by applying new scales to the current
+ * client crop. New scales are calculated from the requested output format and
+ * CEU crop, mapped backed onto the client input (subrect).
+ */
+static void calculate_client_output(struct soc_camera_device *icd,
+ struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
+{
+ struct sh_mobile_ceu_cam *cam = icd->host_priv;
+ struct device *dev = icd->dev.parent;
+ struct v4l2_rect *cam_subrect = &cam->subrect;
+ unsigned int scale_v, scale_h;
+
+ if (cam_subrect->width == cam->rect.width &&
+ cam_subrect->height == cam->rect.height) {
+ /* No sub-cropping */
+ mf->width = pix->width;
+ mf->height = pix->height;
+ return;
+ }
+
+ /* 1.-2. Current camera scales and subwin - cached. */
+
+ dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
+ cam_subrect->width, cam_subrect->height,
+ cam_subrect->left, cam_subrect->top);
+
+ /*
+ * 3. Calculate new combined scales from input sub-window to requested
+ * user window.
+ */
+
+ /*
+ * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF
+ * (128x96) or larger than VGA
+ */
+ scale_h = calc_generic_scale(cam_subrect->width, pix->width);
+ scale_v = calc_generic_scale(cam_subrect->height, pix->height);
+
+ dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
+
+ /*
+ * 4. Calculate client output window by applying combined scales to real
+ * input window.
+ */
+ mf->width = scale_down(cam->rect.width, scale_h);
+ mf->height = scale_down(cam->rect.height, scale_v);
+}
+
/* Similar to set_crop multistage iterative algorithm */
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
@@ -1460,18 +1511,17 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
- struct v4l2_crop cam_crop;
- struct v4l2_rect *cam_rect = &cam_crop.c, cam_subrect, ceu_rect;
- unsigned int scale_cam_h, scale_cam_v;
+ unsigned int ceu_sub_width, ceu_sub_height;
u16 scale_v, scale_h;
int ret;
bool image_mode;
enum v4l2_field field;
+ dev_geo(dev, "S_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height);
+
switch (pix->field) {
default:
pix->field = V4L2_FIELD_NONE;
@@ -1492,46 +1542,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- /* 1. Calculate current camera scales. */
- cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = client_g_rect(sd, cam_rect);
- if (ret < 0)
- return ret;
-
- ret = get_camera_scales(sd, cam_rect, &scale_cam_h, &scale_cam_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "1: camera scales %u:%u\n", scale_cam_h, scale_cam_v);
-
- /*
- * 2. Calculate "effective" input crop (sensor subwindow) - CEU crop
- * scaled back at current camera scales onto input window.
- */
- ret = get_camera_subwin(icd, &cam_subrect, scale_cam_h, scale_cam_v);
- if (ret < 0)
- return ret;
-
- dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
- cam_subrect.width, cam_subrect.height,
- cam_subrect.left, cam_subrect.top);
-
- /*
- * 3. Calculate new combined scales from "effective" input window to
- * requested user window.
- */
- scale_h = calc_generic_scale(cam_subrect.width, pix->width);
- scale_v = calc_generic_scale(cam_subrect.height, pix->height);
-
- dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
-
- /*
- * 4. Calculate camera output window by applying combined scales to real
- * input window.
- */
- mf.width = scale_down(cam_rect->width, scale_h);
- mf.height = scale_down(cam_rect->height, scale_v);
+ /* 1.-4. Calculate client output geometry */
+ calculate_client_output(icd, &f->fmt.pix, &mf);
mf.field = pix->field;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
@@ -1547,17 +1559,17 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
image_mode = false;
}
- dev_geo(dev, "4: camera output %ux%u\n", mf.width, mf.height);
+ dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
/* 5. - 9. */
- ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &mf,
+ ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height,
image_mode && V4L2_FIELD_NONE == field);
- dev_geo(dev, "5-9: client scale %d\n", ret);
+ dev_geo(dev, "5-9: client scale return %d\n", ret);
/* Done with the camera. Now see if we can improve the result */
- dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
+ dev_geo(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
ret, mf.width, mf.height, pix->width, pix->height);
if (ret < 0)
return ret;
@@ -1565,40 +1577,44 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
if (mf.code != xlate->code)
return -EINVAL;
+ /* 9. Prepare CEU crop */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
/* 10. Use CEU scaling to scale to the requested user window. */
/* We cannot scale up */
- if (pix->width > mf.width)
- pix->width = mf.width;
- if (pix->width > ceu_rect.width)
- pix->width = ceu_rect.width;
+ if (pix->width > ceu_sub_width)
+ ceu_sub_width = pix->width;
- if (pix->height > mf.height)
- pix->height = mf.height;
- if (pix->height > ceu_rect.height)
- pix->height = ceu_rect.height;
+ if (pix->height > ceu_sub_height)
+ ceu_sub_height = pix->height;
pix->colorspace = mf.colorspace;
if (image_mode) {
/* Scale pix->{width x height} down to width x height */
- scale_h = calc_scale(ceu_rect.width, &pix->width);
- scale_v = calc_scale(ceu_rect.height, &pix->height);
-
- pcdev->cflcr = scale_h | (scale_v << 16);
+ scale_h = calc_scale(ceu_sub_width, &pix->width);
+ scale_v = calc_scale(ceu_sub_height, &pix->height);
} else {
- pix->width = ceu_rect.width;
- pix->height = ceu_rect.height;
- scale_h = scale_v = 0;
- pcdev->cflcr = 0;
+ pix->width = ceu_sub_width;
+ pix->height = ceu_sub_height;
+ scale_h = 0;
+ scale_v = 0;
}
+ pcdev->cflcr = scale_h | (scale_v << 16);
+
+ /*
+ * We have calculated CFLCR, the actual configuration will be performed
+ * in sh_mobile_ceu_set_bus_param()
+ */
+
dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
- ceu_rect.width, scale_h, pix->width,
- ceu_rect.height, scale_v, pix->height);
+ ceu_sub_width, scale_h, pix->width,
+ ceu_sub_height, scale_v, pix->height);
cam->code = xlate->code;
- cam->ceu_rect = ceu_rect;
icd->current_fmt = xlate;
pcdev->field = field;
@@ -1820,6 +1836,7 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.remove = sh_mobile_ceu_remove_device,
.get_formats = sh_mobile_ceu_get_formats,
.put_formats = sh_mobile_ceu_put_formats,
+ .get_crop = sh_mobile_ceu_get_crop,
.set_crop = sh_mobile_ceu_set_crop,
.set_fmt = sh_mobile_ceu_set_fmt,
.try_fmt = sh_mobile_ceu_try_fmt,
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
new file mode 100644
index 000000000000..f5b892a2a8ee
--- /dev/null
+++ b/drivers/media/video/sh_vou.c
@@ -0,0 +1,1476 @@
+/*
+ * SuperH Video Output Unit (VOU) driver
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+
+#include <media/sh_vou.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf-dma-contig.h>
+
+/* Mirror addresses are not available for all registers */
+#define VOUER 0
+#define VOUCR 4
+#define VOUSTR 8
+#define VOUVCR 0xc
+#define VOUISR 0x10
+#define VOUBCR 0x14
+#define VOUDPR 0x18
+#define VOUDSR 0x1c
+#define VOUVPR 0x20
+#define VOUIR 0x24
+#define VOUSRR 0x28
+#define VOUMSR 0x2c
+#define VOUHIR 0x30
+#define VOUDFR 0x34
+#define VOUAD1R 0x38
+#define VOUAD2R 0x3c
+#define VOUAIR 0x40
+#define VOUSWR 0x44
+#define VOURCR 0x48
+#define VOURPR 0x50
+
+enum sh_vou_status {
+ SH_VOU_IDLE,
+ SH_VOU_INITIALISING,
+ SH_VOU_RUNNING,
+};
+
+#define VOU_MAX_IMAGE_WIDTH 720
+#define VOU_MAX_IMAGE_HEIGHT 480
+
+struct sh_vou_device {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ atomic_t use_count;
+ struct sh_vou_pdata *pdata;
+ spinlock_t lock;
+ void __iomem *base;
+ /* State information */
+ struct v4l2_pix_format pix;
+ struct v4l2_rect rect;
+ struct list_head queue;
+ v4l2_std_id std;
+ int pix_idx;
+ struct videobuf_buffer *active;
+ enum sh_vou_status status;
+};
+
+struct sh_vou_file {
+ struct videobuf_queue vbq;
+};
+
+/* Register access routines for sides A, B and mirror addresses */
+static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+ __raw_writel(value, vou_dev->base + reg + 0x1000);
+}
+
+static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg + 0x2000);
+}
+
+static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg)
+{
+ return __raw_readl(vou_dev->base + reg);
+}
+
+static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ u32 old = __raw_readl(vou_dev->base + reg);
+
+ value = (value & mask) | (old & ~mask);
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask);
+}
+
+static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg, value, mask);
+ sh_vou_reg_b_set(vou_dev, reg, value, mask);
+}
+
+struct sh_vou_fmt {
+ u32 pfmt;
+ char *desc;
+ unsigned char bpp;
+ unsigned char rgb;
+ unsigned char yf;
+ unsigned char pkf;
+};
+
+/* Further pixel formats can be added */
+static struct sh_vou_fmt vou_fmt[] = {
+ {
+ .pfmt = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ .desc = "YVU420 planar",
+ .yf = 0,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ .desc = "YVYU planar",
+ .yf = 1,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB24,
+ .bpp = 24,
+ .desc = "RGB24",
+ .pkf = 2,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565,
+ .bpp = 16,
+ .desc = "RGB565",
+ .pkf = 3,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565X,
+ .bpp = 16,
+ .desc = "RGB565 byteswapped",
+ .pkf = 3,
+ .rgb = 1,
+ },
+};
+
+static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
+ struct videobuf_buffer *vb)
+{
+ dma_addr_t addr1, addr2;
+
+ addr1 = videobuf_to_dma_contig(vb);
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height;
+ break;
+ default:
+ addr2 = 0;
+ }
+
+ sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1);
+ sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
+}
+
+static void sh_vou_stream_start(struct sh_vou_device *vou_dev,
+ struct videobuf_buffer *vb)
+{
+ unsigned int row_coeff;
+#ifdef __LITTLE_ENDIAN
+ u32 dataswap = 7;
+#else
+ u32 dataswap = 0;
+#endif
+
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ row_coeff = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dataswap ^= 1;
+ case V4L2_PIX_FMT_RGB565X:
+ row_coeff = 2;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ row_coeff = 3;
+ break;
+ }
+
+ sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
+ sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
+ sh_vou_schedule_next(vou_dev, vb);
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ BUG_ON(in_interrupt());
+
+ /* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */
+ videobuf_waiton(vb, 0, 0);
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+/* Locking: caller holds vq->vb_lock mutex */
+static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ *size = vou_fmt[vou_dev->pix_idx].bpp * vou_dev->pix.width *
+ vou_dev->pix.height / 8;
+
+ if (*count < 2)
+ *count = 2;
+
+ /* Taking into account maximum frame size, *count will stay >= 2 */
+ if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024)
+ *count = 4 * 1024 * 1024 / PAGE_ALIGN(*size);
+
+ dev_dbg(vq->dev, "%s(): count=%d, size=%d\n", __func__, *count, *size);
+
+ return 0;
+}
+
+/* Locking: caller holds vq->vb_lock mutex */
+static int sh_vou_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+ int ret;
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ if (vb->width != pix->width ||
+ vb->height != pix->height ||
+ vb->field != pix->field) {
+ vb->width = pix->width;
+ vb->height = pix->height;
+ vb->field = field;
+ if (vb->state != VIDEOBUF_NEEDS_INIT)
+ free_buffer(vq, vb);
+ }
+
+ vb->size = vb->height * bytes_per_line;
+ if (vb->baddr && vb->bsize < vb->size) {
+ /* User buffer too small */
+ dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n",
+ vb->bsize, vb->baddr);
+ return -EINVAL;
+ }
+
+ if (vb->state == VIDEOBUF_NEEDS_INIT) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret < 0) {
+ dev_warn(vq->dev, "IOLOCK buf-type %d: %d\n",
+ vb->memory, ret);
+ return ret;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ }
+
+ dev_dbg(vq->dev,
+ "%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n",
+ __func__, vou_dev->pix_idx, bytes_per_line,
+ videobuf_to_dma_contig(vb), vb->memory, vb->state);
+
+ return 0;
+}
+
+/* Locking: caller holds vq->vb_lock mutex and vq->irqlock spinlock */
+static void sh_vou_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&vb->queue, &vou_dev->queue);
+
+ if (vou_dev->status == SH_VOU_RUNNING) {
+ return;
+ } else if (!vou_dev->active) {
+ vou_dev->active = vb;
+ /* Start from side A: we use mirror addresses, so, set B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+ dev_dbg(vq->dev, "%s: first buffer status 0x%x\n", __func__,
+ sh_vou_reg_a_read(vou_dev, VOUSTR));
+ sh_vou_schedule_next(vou_dev, vb);
+ /* Only activate VOU after the second buffer */
+ } else if (vou_dev->active->queue.next == &vb->queue) {
+ /* Second buffer - initialise register side B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+ sh_vou_stream_start(vou_dev, vb);
+
+ /* Register side switching with frame VSYNC */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+ dev_dbg(vq->dev, "%s: second buffer status 0x%x\n", __func__,
+ sh_vou_reg_a_read(vou_dev, VOUSTR));
+
+ /* Enable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+ /* Two buffers on the queue - activate the hardware */
+
+ vou_dev->status = SH_VOU_RUNNING;
+ sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+ }
+}
+
+static void sh_vou_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct video_device *vdev = vq->priv_data;
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ unsigned long flags;
+
+ dev_dbg(vq->dev, "%s()\n", __func__);
+
+ spin_lock_irqsave(&vou_dev->lock, flags);
+
+ if (vou_dev->active == vb) {
+ /* disable output */
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ /* ...but the current frame will complete */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ vou_dev->active = NULL;
+ }
+
+ if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED)) {
+ vb->state = VIDEOBUF_ERROR;
+ list_del(&vb->queue);
+ }
+
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+
+ free_buffer(vq, vb);
+}
+
+static struct videobuf_queue_ops sh_vou_video_qops = {
+ .buf_setup = sh_vou_buf_setup,
+ .buf_prepare = sh_vou_buf_prepare,
+ .buf_queue = sh_vou_buf_queue,
+ .buf_release = sh_vou_buf_release,
+};
+
+/* Video IOCTLs */
+static int sh_vou_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+/* Enumerate formats, that the device can accept from the user */
+static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ if (fmt->index >= ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ strlcpy(fmt->description, vou_fmt[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = vou_fmt[fmt->index].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ fmt->fmt.pix = vou_dev->pix;
+
+ return 0;
+}
+
+static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4};
+static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1};
+static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3};
+static const unsigned char vou_scale_v_num[] = {1, 2, 4};
+static const unsigned char vou_scale_v_den[] = {1, 1, 1};
+static const unsigned char vou_scale_v_fld[] = {0, 1};
+
+static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
+ int pix_idx, int w_idx, int h_idx)
+{
+ struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
+ unsigned int black_left, black_top, width_max, height_max,
+ frame_in_height, frame_out_height, frame_out_top;
+ struct v4l2_rect *rect = &vou_dev->rect;
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ u32 vouvcr = 0, dsr_h, dsr_v;
+
+ if (vou_dev->std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262;
+ } else {
+ width_max = 864;
+ height_max = 312;
+ }
+
+ frame_in_height = pix->height / 2;
+ frame_out_height = rect->height / 2;
+ frame_out_top = rect->top / 2;
+
+ /*
+ * Cropping scheme: max useful image is 720x480, and the total video
+ * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts
+ * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock,
+ * of which the first 33 / 25 clocks HSYNC must be held active. This
+ * has to be configured in CR[HW]. 1 pixel equals 2 clock periods.
+ * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives
+ * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area,
+ * beyond DSR, specified on the left and top by the VPR register "black
+ * pixels" and out-of-image area (DPR) "background pixels." We fix VPR
+ * at 138 / 144 : 20, because that's the HSYNC timing, that our first
+ * client requires, and that's exactly what leaves us 720 pixels for the
+ * image; we leave VPR[VVP] at default 20 for now, because the client
+ * doesn't seem to have any special requirements for it. Otherwise we
+ * could also set it to max - 240 = 22 / 72. Thus VPR depends only on
+ * the selected standard, and DPR and DSR are selected according to
+ * cropping. Q: how does the client detect the first valid line? Does
+ * HSYNC stay inactive during invalid (black) lines?
+ */
+ black_left = width_max - VOU_MAX_IMAGE_WIDTH;
+ black_top = 20;
+
+ dsr_h = rect->width + rect->left;
+ dsr_v = frame_out_height + frame_out_top;
+
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n",
+ pix->width, frame_in_height, black_left, black_top,
+ rect->left, frame_out_top, dsr_h, dsr_v);
+
+ /* VOUISR height - half of a frame height in frame mode */
+ sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height);
+ sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v);
+
+ /*
+ * if necessary, we could set VOUHIR to
+ * max(black_left + dsr_h, width_max) here
+ */
+
+ if (w_idx)
+ vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4);
+ if (h_idx)
+ vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1];
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: scaling 0x%x\n", fmt->desc, vouvcr);
+
+ /* To produce a colour bar for testing set bit 23 of VOUVCR */
+ sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr);
+ sh_vou_reg_ab_write(vou_dev, VOUDFR,
+ fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16));
+}
+
+struct sh_vou_geometry {
+ struct v4l2_rect output;
+ unsigned int in_width;
+ unsigned int in_height;
+ int scale_idx_h;
+ int scale_idx_v;
+};
+
+/*
+ * Find input geometry, that we can use to produce output, closest to the
+ * requested rectangle, using VOU scaling
+ */
+static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ /* The compiler cannot know, that best and idx will indeed be set */
+ unsigned int best_err = UINT_MAX, best = 0, width_max, height_max;
+ int i, idx = 0;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262;
+ } else {
+ width_max = 864;
+ height_max = 312;
+ }
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&geo->in_width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+ &geo->in_height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.width * vou_scale_h_den[i] /
+ vou_scale_h_num[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_width = best;
+ geo->scale_idx_h = idx;
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.height * vou_scale_v_den[i] /
+ vou_scale_v_num[i];
+
+ if (found > VOU_MAX_IMAGE_HEIGHT)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_height = best;
+ geo->scale_idx_v = idx;
+}
+
+/*
+ * Find output geometry, that we can produce, using VOU scaling, closest to
+ * the requested rectangle
+ */
+static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ unsigned int best_err = UINT_MAX, best, width_max, height_max;
+ int i, idx;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262 * 2;
+ } else {
+ width_max = 864;
+ height_max = 312 * 2;
+ }
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_width * vou_scale_h_num[i] /
+ vou_scale_h_den[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.width = best;
+ geo->scale_idx_h = idx;
+ if (geo->output.left + best > width_max)
+ geo->output.left = width_max - best;
+
+ pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width,
+ vou_scale_h_num[idx], vou_scale_h_den[idx], best);
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_height * vou_scale_v_num[i] /
+ vou_scale_v_den[i];
+
+ if (found > VOU_MAX_IMAGE_HEIGHT)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.height = best;
+ geo->scale_idx_v = idx;
+ if (geo->output.top + best > height_max)
+ geo->output.top = height_max - best;
+
+ pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height,
+ vou_scale_v_num[idx], vou_scale_v_den[idx], best);
+}
+
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int pix_idx;
+ struct sh_vou_geometry geo;
+ struct v4l2_mbus_framefmt mbfmt = {
+ /* Revisit: is this the correct code? */
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .field = V4L2_FIELD_INTERLACED,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ vou_dev->rect.width, vou_dev->rect.height,
+ pix->width, pix->height);
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ if (fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
+
+ if (pix_idx == ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 2,
+ &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+ geo.output = vou_dev->rect;
+
+ vou_adjust_output(&geo, vou_dev->std);
+
+ mbfmt.width = geo.output.width;
+ mbfmt.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_mbus_fmt, &mbfmt);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ geo.output.width, geo.output.height, mbfmt.width, mbfmt.height);
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT ||
+ mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EIO;
+
+ if (mbfmt.width != geo.output.width ||
+ mbfmt.height != geo.output.height) {
+ geo.output.width = mbfmt.width;
+ geo.output.height = mbfmt.height;
+
+ vou_adjust_input(&geo, vou_dev->std);
+ }
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__,
+ pix->width, pix->height);
+
+ vou_dev->pix_idx = pix_idx;
+
+ vou_dev->pix = *pix;
+
+ sh_vou_configure_geometry(vou_dev, pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_file *vou_file = priv;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int i;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ pix->field = V4L2_FIELD_NONE;
+
+ v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+ &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+ if (vou_fmt[i].pfmt == pix->pixelformat)
+ return 0;
+
+ pix->pixelformat = vou_fmt[0].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ return videobuf_reqbufs(&vou_file->vbq, req);
+}
+
+static int sh_vou_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_querybuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_qbuf(&vou_file->vbq, b);
+}
+
+static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK);
+}
+
+static int sh_vou_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buftype)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = priv;
+ int ret;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* This calls our .buf_queue() (== sh_vou_buf_queue) */
+ return videobuf_streamon(&vou_file->vbq);
+}
+
+static int sh_vou_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buftype)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ /*
+ * This calls buf_release from host driver's videobuf_queue_ops for all
+ * remaining buffers. When the last buffer is freed, stop streaming
+ */
+ videobuf_streamoff(&vou_file->vbq);
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0);
+
+ return 0;
+}
+
+static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
+{
+ switch (bus_fmt) {
+ default:
+ pr_warning("%s(): Invalid bus-format code %d, using default 8-bit\n",
+ __func__, bus_fmt);
+ case SH_VOU_BUS_8BIT:
+ return 1;
+ case SH_VOU_BUS_16BIT:
+ return 0;
+ case SH_VOU_BUS_BT656:
+ return 3;
+ }
+}
+
+static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, *std_id);
+
+ if (*std_id & ~vdev->tvnorms)
+ return -EINVAL;
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_std_output, *std_id);
+ /* Shall we continue, if the subdev doesn't support .s_std_output()? */
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (*std_id & V4L2_STD_525_60)
+ sh_vou_reg_ab_set(vou_dev, VOUCR,
+ sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
+ else
+ sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+
+ vou_dev->std = *std_id;
+
+ return 0;
+}
+
+static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ *std = vou_dev->std;
+
+ return 0;
+}
+
+static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->c = vou_dev->rect;
+
+ return 0;
+}
+
+/* Assume a dull encoder, do all the work ourselves. */
+static int sh_vou_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct v4l2_rect *rect = &a->c;
+ struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ struct sh_vou_geometry geo;
+ struct v4l2_mbus_framefmt mbfmt = {
+ /* Revisit: is this the correct code? */
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .field = V4L2_FIELD_INTERLACED,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u@%u:%u\n", __func__,
+ rect->width, rect->height, rect->left, rect->top);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ v4l_bound_align_image(&rect->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
+ &rect->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
+
+ if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
+ rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
+
+ if (rect->height + rect->top > VOU_MAX_IMAGE_HEIGHT)
+ rect->top = VOU_MAX_IMAGE_HEIGHT - rect->height;
+
+ geo.output = *rect;
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+
+ /* Configure the encoder one-to-one, position at 0, ignore errors */
+ sd_crop.c.width = geo.output.width;
+ sd_crop.c.height = geo.output.height;
+ /*
+ * We first issue a S_CROP, so that the subsequent S_FMT delivers the
+ * final encoder configuration.
+ */
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_crop, &sd_crop);
+ mbfmt.width = geo.output.width;
+ mbfmt.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_mbus_fmt, &mbfmt);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt.height > VOU_MAX_IMAGE_HEIGHT ||
+ mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8_LE)
+ return -EIO;
+
+ geo.output.width = mbfmt.width;
+ geo.output.height = mbfmt.height;
+
+ /*
+ * No down-scaling. According to the API, current call has precedence:
+ * http://v4l2spec.bytesex.org/spec/x1904.htm#AEN1954 paragraph two.
+ */
+ vou_adjust_input(&geo, vou_dev->std);
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+/*
+ * Total field: NTSC 858 x 2 * 262/263, PAL 864 x 2 * 312/313, default rectangle
+ * is the initial register values, height takes the interlaced format into
+ * account. The actual image can only go up to 720 x 2 * 240, So, VOUVPR can
+ * actually only meaningfully contain values <= 720 and <= 240 respectively, and
+ * not <= 864 and <= 312.
+ */
+static int sh_vou_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *a)
+{
+ struct sh_vou_file *vou_file = priv;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = VOU_MAX_IMAGE_WIDTH;
+ a->bounds.height = VOU_MAX_IMAGE_HEIGHT;
+ /* Default = max, set VOUDPR = 0, which is not hardware default */
+ a->defrect.left = 0;
+ a->defrect.top = 0;
+ a->defrect.width = VOU_MAX_IMAGE_WIDTH;
+ a->defrect.height = VOU_MAX_IMAGE_HEIGHT;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static irqreturn_t sh_vou_isr(int irq, void *dev_id)
+{
+ struct sh_vou_device *vou_dev = dev_id;
+ static unsigned long j;
+ struct videobuf_buffer *vb;
+ static int cnt;
+ static int side;
+ u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
+ u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
+
+ if (!(irq_status & 0x300)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n",
+ irq_status);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&vou_dev->lock);
+ if (!vou_dev->active || list_empty(&vou_dev->queue)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev,
+ "IRQ without active buffer: %x!\n", irq_status);
+ /* Just ack: buf_release will disable further interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ masked = ~(0x300 & irq_status) & irq_status & 0x30304;
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n",
+ irq_status, masked, vou_status, cnt);
+
+ cnt++;
+ side = vou_status & 0x10000;
+
+ /* Clear only set interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, masked);
+
+ vb = vou_dev->active;
+ list_del(&vb->queue);
+
+ vb->state = VIDEOBUF_DONE;
+ do_gettimeofday(&vb->ts);
+ vb->field_count++;
+ wake_up(&vb->done);
+
+ if (list_empty(&vou_dev->queue)) {
+ /* Stop VOU */
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: queue empty after %d\n",
+ __func__, cnt);
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ vou_dev->active = NULL;
+ vou_dev->status = SH_VOU_INITIALISING;
+ /* Disable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ vou_dev->active = list_entry(vou_dev->queue.next,
+ struct videobuf_buffer, queue);
+
+ if (vou_dev->active->queue.next != &vou_dev->queue) {
+ struct videobuf_buffer *new = list_entry(vou_dev->active->queue.next,
+ struct videobuf_buffer, queue);
+ sh_vou_schedule_next(vou_dev, new);
+ }
+
+ spin_unlock(&vou_dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
+{
+ struct sh_vou_pdata *pdata = vou_dev->pdata;
+ u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29;
+ int i = 100;
+
+ /* Disable all IRQs */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0);
+
+ /* Reset VOU interfaces - registers unaffected */
+ sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101);
+ while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101))
+ udelay(1);
+
+ if (!i)
+ return -ETIMEDOUT;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i);
+
+ if (pdata->flags & SH_VOU_PCLK_FALLING)
+ voucr |= 1 << 28;
+ if (pdata->flags & SH_VOU_HSYNC_LOW)
+ voucr |= 1 << 27;
+ if (pdata->flags & SH_VOU_VSYNC_LOW)
+ voucr |= 1 << 26;
+ sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000);
+
+ /* Manual register side switching at first */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 4);
+ /* Default - fixed HSYNC length, can be made configurable is required */
+ sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
+
+ return 0;
+}
+
+/* File operations */
+static int sh_vou_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file),
+ GFP_KERNEL);
+
+ if (!vou_file)
+ return -ENOMEM;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ file->private_data = vou_file;
+
+ if (atomic_inc_return(&vou_dev->use_count) == 1) {
+ int ret;
+ /* First open */
+ vou_dev->status = SH_VOU_INITIALISING;
+ pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0) {
+ atomic_dec(&vou_dev->use_count);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ vou_dev->status = SH_VOU_IDLE;
+ return ret;
+ }
+ }
+
+ videobuf_queue_dma_contig_init(&vou_file->vbq, &sh_vou_video_qops,
+ vou_dev->v4l2_dev.dev, &vou_dev->lock,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ V4L2_FIELD_NONE,
+ sizeof(struct videobuf_buffer), vdev);
+
+ return 0;
+}
+
+static int sh_vou_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ if (!atomic_dec_return(&vou_dev->use_count)) {
+ /* Last close */
+ vou_dev->status = SH_VOU_IDLE;
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ }
+
+ file->private_data = NULL;
+ kfree(vou_file);
+
+ return 0;
+}
+
+static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_mmap_mapper(&vou_file->vbq, vma);
+}
+
+static unsigned int sh_vou_poll(struct file *file, poll_table *wait)
+{
+ struct sh_vou_file *vou_file = file->private_data;
+
+ dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
+
+ return videobuf_poll_stream(file, &vou_file->vbq, wait);
+}
+
+static int sh_vou_g_chip_ident(struct file *file, void *fh,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_chip_ident, id);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int sh_vou_g_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_register, reg);
+}
+
+static int sh_vou_s_register(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct sh_vou_device *vou_dev = video_get_drvdata(vdev);
+
+ return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, s_register, reg);
+}
+#endif
+
+/* sh_vou display ioctl operations */
+static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = {
+ .vidioc_querycap = sh_vou_querycap,
+ .vidioc_enum_fmt_vid_out = sh_vou_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out,
+ .vidioc_reqbufs = sh_vou_reqbufs,
+ .vidioc_querybuf = sh_vou_querybuf,
+ .vidioc_qbuf = sh_vou_qbuf,
+ .vidioc_dqbuf = sh_vou_dqbuf,
+ .vidioc_streamon = sh_vou_streamon,
+ .vidioc_streamoff = sh_vou_streamoff,
+ .vidioc_s_std = sh_vou_s_std,
+ .vidioc_g_std = sh_vou_g_std,
+ .vidioc_cropcap = sh_vou_cropcap,
+ .vidioc_g_crop = sh_vou_g_crop,
+ .vidioc_s_crop = sh_vou_s_crop,
+ .vidioc_g_chip_ident = sh_vou_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = sh_vou_g_register,
+ .vidioc_s_register = sh_vou_s_register,
+#endif
+};
+
+static const struct v4l2_file_operations sh_vou_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_vou_open,
+ .release = sh_vou_release,
+ .ioctl = video_ioctl2,
+ .mmap = sh_vou_mmap,
+ .poll = sh_vou_poll,
+};
+
+static const struct video_device sh_vou_video_template = {
+ .name = "sh_vou",
+ .fops = &sh_vou_fops,
+ .ioctl_ops = &sh_vou_ioctl_ops,
+ .tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+static int __devinit sh_vou_probe(struct platform_device *pdev)
+{
+ struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
+ struct v4l2_rect *rect;
+ struct v4l2_pix_format *pix;
+ struct i2c_adapter *i2c_adap;
+ struct video_device *vdev;
+ struct sh_vou_device *vou_dev;
+ struct resource *reg_res, *region;
+ struct v4l2_subdev *subdev;
+ int irq, ret;
+
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+
+ if (!vou_pdata || !reg_res || irq <= 0) {
+ dev_err(&pdev->dev, "Insufficient VOU platform information.\n");
+ return -ENODEV;
+ }
+
+ vou_dev = kzalloc(sizeof(*vou_dev), GFP_KERNEL);
+ if (!vou_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vou_dev->queue);
+ spin_lock_init(&vou_dev->lock);
+ atomic_set(&vou_dev->use_count, 0);
+ vou_dev->pdata = vou_pdata;
+ vou_dev->status = SH_VOU_IDLE;
+
+ rect = &vou_dev->rect;
+ pix = &vou_dev->pix;
+
+ /* Fill in defaults */
+ vou_dev->std = sh_vou_video_template.current_norm;
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = VOU_MAX_IMAGE_WIDTH;
+ rect->height = VOU_MAX_IMAGE_HEIGHT;
+ pix->width = VOU_MAX_IMAGE_WIDTH;
+ pix->height = VOU_MAX_IMAGE_HEIGHT;
+ pix->pixelformat = V4L2_PIX_FMT_YVYU;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = VOU_MAX_IMAGE_WIDTH * 2;
+ pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * VOU_MAX_IMAGE_HEIGHT;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ region = request_mem_region(reg_res->start, resource_size(reg_res),
+ pdev->name);
+ if (!region) {
+ dev_err(&pdev->dev, "VOU region already claimed\n");
+ ret = -EBUSY;
+ goto ereqmemreg;
+ }
+
+ vou_dev->base = ioremap(reg_res->start, resource_size(reg_res));
+ if (!vou_dev->base) {
+ ret = -ENOMEM;
+ goto emap;
+ }
+
+ ret = request_irq(irq, sh_vou_isr, 0, "vou", vou_dev);
+ if (ret < 0)
+ goto ereqirq;
+
+ ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ goto ev4l2devreg;
+ }
+
+ /* Allocate memory for video device */
+ vdev = video_device_alloc();
+ if (vdev == NULL) {
+ ret = -ENOMEM;
+ goto evdevalloc;
+ }
+
+ *vdev = sh_vou_video_template;
+ if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT)
+ vdev->tvnorms |= V4L2_STD_PAL;
+ vdev->v4l2_dev = &vou_dev->v4l2_dev;
+ vdev->release = video_device_release;
+
+ vou_dev->vdev = vdev;
+ video_set_drvdata(vdev, vou_dev);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap);
+ if (!i2c_adap) {
+ ret = -ENODEV;
+ goto ei2cgadap;
+ }
+
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0)
+ goto ereset;
+
+ subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
+ vou_pdata->module_name, vou_pdata->board_info, NULL);
+ if (!subdev) {
+ ret = -ENOMEM;
+ goto ei2cnd;
+ }
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ goto evregdev;
+
+ return 0;
+
+evregdev:
+ei2cnd:
+ereset:
+ i2c_put_adapter(i2c_adap);
+ei2cgadap:
+ video_device_release(vdev);
+ pm_runtime_disable(&pdev->dev);
+evdevalloc:
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ev4l2devreg:
+ free_irq(irq, vou_dev);
+ereqirq:
+ iounmap(vou_dev->base);
+emap:
+ release_mem_region(reg_res->start, resource_size(reg_res));
+ereqmemreg:
+ kfree(vou_dev);
+ return ret;
+}
+
+static int __devexit sh_vou_remove(struct platform_device *pdev)
+{
+ int irq = platform_get_irq(pdev, 0);
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_vou_device *vou_dev = container_of(v4l2_dev,
+ struct sh_vou_device, v4l2_dev);
+ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct resource *reg_res;
+
+ if (irq > 0)
+ free_irq(irq, vou_dev);
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(vou_dev->vdev);
+ i2c_put_adapter(client->adapter);
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ iounmap(vou_dev->base);
+ reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (reg_res)
+ release_mem_region(reg_res->start, resource_size(reg_res));
+ kfree(vou_dev);
+ return 0;
+}
+
+static struct platform_driver __refdata sh_vou = {
+ .remove = __devexit_p(sh_vou_remove),
+ .driver = {
+ .name = "sh-vou",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_vou_init(void)
+{
+ return platform_driver_probe(&sh_vou, sh_vou_probe);
+}
+
+static void __exit sh_vou_exit(void)
+{
+ platform_driver_unregister(&sh_vou);
+}
+
+module_init(sh_vou_init);
+module_exit(sh_vou_exit);
+
+MODULE_DESCRIPTION("SuperH VOU driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index cbf8087b286f..28e19daadec9 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -2295,7 +2295,7 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -2305,7 +2305,9 @@ sn9c102_vidioc_s_ctrl(struct sn9c102_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index cc40d6ba9f22..522ba3f4c285 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -40,12 +40,12 @@ struct sn9c102_device;
static const struct usb_device_id sn9c102_id_table[] = {
/* SN9C101 and SN9C102 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6001, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6005, BRIDGE_SN9C102), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6007, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
@@ -53,13 +53,13 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
#endif
@@ -74,7 +74,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
@@ -86,7 +86,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
@@ -97,7 +97,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
{ SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
/* SN9C105 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
{ SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
@@ -121,11 +121,11 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
{ SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
#endif
diff --git a/drivers/media/video/sn9c102/sn9c102_hv7131d.c b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
index db2434948939..2dce5c908c8e 100644
--- a/drivers/media/video/sn9c102/sn9c102_hv7131d.c
+++ b/drivers/media/video/sn9c102/sn9c102_hv7131d.c
@@ -255,7 +255,7 @@ int sn9c102_probe_hv7131d(struct sn9c102_device* cam)
if (err || r0 < 0 || r1 < 0)
return -EIO;
- if (r0 != 0x00 || r1 != 0x04)
+ if ((r0 != 0x00 && r0 != 0x01) || r1 != 0x04)
return -ENODEV;
sn9c102_attach_sensor(cam, &hv7131d);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index a24174ddec46..db1ca0e90d76 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/vmalloc.h>
#include <media/soc_camera.h>
@@ -388,6 +389,11 @@ static int soc_camera_open(struct file *file)
goto eiciadd;
}
+ pm_runtime_enable(&icd->vdev->dev);
+ ret = pm_runtime_resume(&icd->vdev->dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto eresume;
+
/*
* Try to configure with default parameters. Notice: this is the
* very first open, so, we cannot race against other calls,
@@ -409,10 +415,12 @@ static int soc_camera_open(struct file *file)
return 0;
/*
- * First five errors are entered with the .video_lock held
+ * First four errors are entered with the .video_lock held
* and use_count == 1
*/
esfmt:
+ pm_runtime_disable(&icd->vdev->dev);
+eresume:
ici->ops->remove(icd);
eiciadd:
if (icl->power)
@@ -437,7 +445,11 @@ static int soc_camera_close(struct file *file)
if (!icd->use_count) {
struct soc_camera_link *icl = to_soc_camera_link(icd);
+ pm_runtime_suspend(&icd->vdev->dev);
+ pm_runtime_disable(&icd->vdev->dev);
+
ici->ops->remove(icd);
+
if (icl->power)
icl->power(icd->pdev, 0);
}
@@ -741,8 +753,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
/*
* According to the V4L2 API, drivers shall not update the struct v4l2_crop
* argument with the actual geometry, instead, the user shall use G_CROP to
- * retrieve it. However, we expect camera host and client drivers to update
- * the argument, which we then use internally, but do not return to the user.
+ * retrieve it.
*/
static int soc_camera_s_crop(struct file *file, void *fh,
struct v4l2_crop *a)
@@ -1319,6 +1330,7 @@ static int video_dev_create(struct soc_camera_device *icd)
*/
static int soc_camera_video_start(struct soc_camera_device *icd)
{
+ struct device_type *type = icd->vdev->dev.type;
int ret;
if (!icd->dev.parent)
@@ -1335,6 +1347,9 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
return ret;
}
+ /* Restore device type, possibly set by the subdevice driver */
+ icd->vdev->dev.type = type;
+
return 0;
}
diff --git a/drivers/media/video/tlg2300/pd-dvb.c b/drivers/media/video/tlg2300/pd-dvb.c
index ebd9cb5bec74..edd78f8b1baa 100644
--- a/drivers/media/video/tlg2300/pd-dvb.c
+++ b/drivers/media/video/tlg2300/pd-dvb.c
@@ -97,15 +97,17 @@ open_out:
return ret;
}
+#ifdef CONFIG_PM
static void poseidon_fe_release(struct dvb_frontend *fe)
{
struct poseidon *pd = fe->demodulator_priv;
-#ifdef CONFIG_PM
pd->pm_suspend = NULL;
pd->pm_resume = NULL;
-#endif
}
+#else
+#define poseidon_fe_release NULL
+#endif
static s32 poseidon_fe_sleep(struct dvb_frontend *fe)
{
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 2cf0ebf9f28b..256cc558ba13 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -24,7 +24,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -55,8 +54,8 @@ int debug_mode;
module_param(debug_mode, int, 0644);
MODULE_PARM_DESC(debug_mode, "0 = disable, 1 = enable, 2 = verbose");
-const char *firmware_name = "tlg2300_firmware.bin";
-struct usb_driver poseidon_driver;
+static const char *firmware_name = "tlg2300_firmware.bin";
+static struct usb_driver poseidon_driver;
static LIST_HEAD(pd_device_list);
/*
@@ -455,8 +454,8 @@ static int poseidon_probe(struct usb_interface *interface,
device_init_wakeup(&udev->dev, 1);
#ifdef CONFIG_PM
- pd->udev->autosuspend_disabled = 0;
pd->udev->autosuspend_delay = HZ * PM_SUSPEND_DELAY;
+ usb_enable_autosuspend(pd->udev);
if (in_hibernation(pd)) {
INIT_WORK(&pd->pm_work, hibernation_resume);
@@ -501,7 +500,7 @@ static void poseidon_disconnect(struct usb_interface *interface)
kref_put(&pd->kref, poseidon_delete);
}
-struct usb_driver poseidon_driver = {
+static struct usb_driver poseidon_driver = {
.name = "poseidon",
.probe = poseidon_probe,
.disconnect = poseidon_disconnect,
diff --git a/drivers/media/video/tlg2300/pd-radio.c b/drivers/media/video/tlg2300/pd-radio.c
index 755766b15157..fae84c2a0c39 100644
--- a/drivers/media/video/tlg2300/pd-radio.c
+++ b/drivers/media/video/tlg2300/pd-radio.c
@@ -161,7 +161,8 @@ static const struct v4l2_file_operations poseidon_fm_fops = {
.ioctl = video_ioctl2,
};
-int tlg_fm_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
+static int tlg_fm_vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *vt)
{
struct tuner_fm_sig_stat_s fm_stat = {};
int ret, status, count = 5;
@@ -203,7 +204,8 @@ int tlg_fm_vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
return 0;
}
-int fm_get_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
+static int fm_get_freq(struct file *file, void *priv,
+ struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
@@ -246,7 +248,8 @@ error:
return ret;
}
-int fm_set_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
+static int fm_set_freq(struct file *file, void *priv,
+ struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
@@ -258,13 +261,13 @@ int fm_set_freq(struct file *file, void *priv, struct v4l2_frequency *argp)
return set_frequency(p, argp->frequency);
}
-int tlg_fm_vidioc_g_ctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *arg)
{
return 0;
}
-int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
+static int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
struct poseidon *p = file->private_data;
@@ -285,7 +288,7 @@ int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
return 0;
}
-int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
+static int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
int i;
@@ -312,13 +315,13 @@ int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
return 0;
}
-int tlg_fm_vidioc_s_ctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
return 0;
}
-int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
+static int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *ctrl)
{
if (!(ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL))
@@ -337,7 +340,7 @@ int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
return -EINVAL;
}
-int tlg_fm_vidioc_querymenu(struct file *file, void *fh,
+static int tlg_fm_vidioc_querymenu(struct file *file, void *fh,
struct v4l2_querymenu *qmenu)
{
return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index cf8f18c007e6..d0cc012f7ae6 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -12,11 +12,13 @@
#include "pd-common.h"
#include "vendorcmds.h"
+#ifdef CONFIG_PM
static int pm_video_suspend(struct poseidon *pd);
static int pm_video_resume(struct poseidon *pd);
+#endif
static void iso_bubble_handler(struct work_struct *w);
-int usb_transfer_mode;
+static int usb_transfer_mode;
module_param(usb_transfer_mode, int, 0644);
MODULE_PARM_DESC(usb_transfer_mode, "0 = Bulk, 1 = Isochronous");
@@ -476,10 +478,10 @@ static int prepare_iso_urb(struct video_data *video)
goto out;
video->urb_array[i] = urb;
- mem = usb_buffer_alloc(udev,
- ISO_PKT_SIZE * PK_PER_URB,
- GFP_KERNEL,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(udev,
+ ISO_PKT_SIZE * PK_PER_URB,
+ GFP_KERNEL,
+ &urb->transfer_dma);
urb->complete = urb_complete_iso; /* handler */
urb->dev = udev;
@@ -519,8 +521,8 @@ int alloc_bulk_urbs_generic(struct urb **urb_array, int num,
if (urb == NULL)
return i;
- mem = usb_buffer_alloc(udev, buf_size, gfp_flags,
- &urb->transfer_dma);
+ mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
+ &urb->transfer_dma);
if (mem == NULL)
return i;
@@ -540,7 +542,7 @@ void free_all_urb_generic(struct urb **urb_array, int num)
for (i = 0; i < num; i++) {
urb = urb_array[i];
if (urb) {
- usb_buffer_free(urb->dev,
+ usb_free_coherent(urb->dev,
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
@@ -617,7 +619,7 @@ static int pd_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
return 0;
}
-int fire_all_urb(struct video_data *video)
+static int fire_all_urb(struct video_data *video)
{
int i, ret;
@@ -877,7 +879,7 @@ out:
return ret;
}
-int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *norm)
+static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *norm)
{
struct front_face *front = fh;
logs(front);
@@ -1020,7 +1022,7 @@ static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
return 0;
}
-int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
+static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
a->index = 0;
a->capability = V4L2_AUDCAP_STEREO;
@@ -1029,7 +1031,7 @@ int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
return 0;
}
-int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
+static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
return (0 == a->index) ? 0 : -EINVAL;
}
@@ -1189,7 +1191,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
}
/* Just stop the URBs, do not free the URBs */
-int usb_transfer_stop(struct video_data *video)
+static int usb_transfer_stop(struct video_data *video)
{
if (video->is_streaming) {
int i;
@@ -1518,13 +1520,13 @@ static int pd_video_mmap(struct file *file, struct vm_area_struct *vma)
return videobuf_mmap_mapper(&front->q, vma);
}
-unsigned int pd_video_poll(struct file *file, poll_table *table)
+static unsigned int pd_video_poll(struct file *file, poll_table *table)
{
struct front_face *front = file->private_data;
return videobuf_poll_stream(file, &front->q, table);
}
-ssize_t pd_video_read(struct file *file, char __user *buffer,
+static ssize_t pd_video_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct front_face *front = file->private_data;
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index e4815a1806e3..e826114b7fb8 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -79,6 +79,8 @@ struct tvp514x_std_info {
};
static struct tvp514x_reg tvp514x_reg_list_default[0x40];
+
+static int tvp514x_s_stream(struct v4l2_subdev *sd, int enable);
/**
* struct tvp514x_decoder - TVP5146/47 decoder object
* @sd: Subdevice Slave handle
@@ -644,6 +646,17 @@ static int tvp514x_s_routing(struct v4l2_subdev *sd,
/* Index out of bound */
return -EINVAL;
+ /*
+ * For the sequence streamon -> streamoff and again s_input
+ * it fails to lock the signal, since streamoff puts TVP514x
+ * into power off state which leads to failure in sub-sequent s_input.
+ *
+ * So power up the TVP514x device here, since it is important to lock
+ * the signal at this stage.
+ */
+ if (!decoder->streaming)
+ tvp514x_s_stream(sd, 1);
+
input_sel = input;
output_sel = output;
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 908ffb68e926..47f0582d50a5 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -891,29 +891,26 @@ static int tvp5150_s_routing(struct v4l2_subdev *sd,
return 0;
}
-static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int tvp5150_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
+{
+ /* this is for capturing 36 raw vbi lines
+ if there's a way to cut off the beginning 2 vbi lines
+ with the tvp5150 then the vbi line count could be lowered
+ to 17 lines/field again, although I couldn't find a register
+ which could do that cropping */
+ if (fmt->sample_format == V4L2_PIX_FMT_GREY)
+ tvp5150_write(sd, TVP5150_LUMA_PROC_CTL_1, 0x70);
+ if (fmt->count[0] == 18 && fmt->count[1] == 18) {
+ tvp5150_write(sd, TVP5150_VERT_BLANKING_START, 0x00);
+ tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, 0x01);
+ }
+ return 0;
+}
+
+static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
{
- struct v4l2_sliced_vbi_format *svbi;
int i;
- /* raw vbi */
- if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- /* this is for capturing 36 raw vbi lines
- if there's a way to cut off the beginning 2 vbi lines
- with the tvp5150 then the vbi line count could be lowered
- to 17 lines/field again, although I couldn't find a register
- which could do that cropping */
- if (fmt->fmt.vbi.sample_format == V4L2_PIX_FMT_GREY)
- tvp5150_write(sd, TVP5150_LUMA_PROC_CTL_1, 0x70);
- if (fmt->fmt.vbi.count[0] == 18 && fmt->fmt.vbi.count[1] == 18) {
- tvp5150_write(sd, TVP5150_VERT_BLANKING_START, 0x00);
- tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP, 0x01);
- }
- return 0;
- }
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
- return -EINVAL;
- svbi = &fmt->fmt.sliced;
if (svbi->service_set != 0) {
for (i = 0; i <= 23; i++) {
svbi->service_lines[1][i] = 0;
@@ -937,14 +934,21 @@ static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
-static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+static int tvp5150_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
- struct v4l2_sliced_vbi_format *svbi;
- int i, mask = 0;
+ switch (fmt->type) {
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ return tvp5150_s_sliced_fmt(sd, &fmt->fmt.sliced);
- if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ default:
return -EINVAL;
- svbi = &fmt->fmt.sliced;
+ }
+}
+
+static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *svbi)
+{
+ int i, mask = 0;
+
memset(svbi, 0, sizeof(*svbi));
for (i = 0; i <= 23; i++) {
@@ -956,6 +960,12 @@ static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
+static int tvp5150_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ return tvp5150_g_sliced_fmt(sd, &fmt->fmt.sliced);
+}
static int tvp5150_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
@@ -1046,13 +1056,20 @@ static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
.s_routing = tvp5150_s_routing,
.g_fmt = tvp5150_g_fmt,
.s_fmt = tvp5150_s_fmt,
+};
+
+static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
.g_sliced_vbi_cap = tvp5150_g_sliced_vbi_cap,
+ .g_sliced_fmt = tvp5150_g_sliced_fmt,
+ .s_sliced_fmt = tvp5150_s_sliced_fmt,
+ .s_raw_fmt = tvp5150_s_raw_fmt,
};
static const struct v4l2_subdev_ops tvp5150_ops = {
.core = &tvp5150_core_ops,
.tuner = &tvp5150_tuner_ops,
.video = &tvp5150_video_ops,
+ .vbi = &tvp5150_vbi_ops,
};
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index 4a69bcc738f3..8085ac392446 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -458,7 +458,7 @@ static inline struct tvp7002 *to_tvp7002(struct v4l2_subdev *sd)
/*
* tvp7002_read - Read a value from a register in an TVP7002
* @sd: ptr to v4l2_subdev struct
- * @reg: TVP7002 register address
+ * @addr: TVP7002 register address
* @dst: pointer to 8-bit destination
*
* Returns value read if successful, or non-zero (-1) otherwise.
@@ -488,7 +488,7 @@ static int tvp7002_read(struct v4l2_subdev *sd, u8 addr, u8 *dst)
* @sd: pointer to standard V4L2 sub-device structure
* @reg: destination register
* @val: value to be read
- * @error: pointer to error value
+ * @err: pointer to error value
*
* Read a value in a register and save error value in pointer.
* Also update the register table if successful
@@ -535,7 +535,7 @@ static int tvp7002_write(struct v4l2_subdev *sd, u8 addr, u8 value)
* @sd: pointer to standard V4L2 sub-device structure
* @reg: destination register
* @val: value to be written
- * @error: pointer to error value
+ * @err: pointer to error value
*
* Write a value in a register and save error value in pointer.
* Also update the register table if successful
@@ -596,7 +596,7 @@ static int tvp7002_write_inittab(struct v4l2_subdev *sd,
/*
* tvp7002_s_dv_preset() - Set digital video preset
* @sd: ptr to v4l2_subdev struct
- * @std: ptr to v4l2_dv_preset struct
+ * @dv_preset: ptr to v4l2_dv_preset struct
*
* Set the digital video preset for a TVP7002 decoder device.
* Returns zero when successful or -EINVAL if register access fails.
@@ -676,7 +676,7 @@ static int tvp7002_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
/*
* tvp7002_queryctrl() - Query a control
* @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to v4l2_queryctrl struct
+ * @qc: ptr to v4l2_queryctrl struct
*
* Query a control of a TVP7002 decoder device.
* Returns zero when successful or -EINVAL if register read fails.
@@ -776,7 +776,7 @@ static int tvp7002_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
/*
* tvp7002_query_dv_preset() - query DV preset
* @sd: pointer to standard V4L2 sub-device structure
- * @std_id: standard V4L2 v4l2_dv_preset
+ * @qpreset: standard V4L2 v4l2_dv_preset structure
*
* Returns the current DV preset by TVP7002. If no active input is
* detected, returns -EINVAL
@@ -785,7 +785,6 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
struct v4l2_dv_preset *qpreset)
{
const struct tvp7002_preset_definition *presets = tvp7002_presets;
- struct v4l2_dv_enum_preset e_preset;
struct tvp7002 *device;
u8 progressive;
u32 lpfr;
@@ -828,20 +827,18 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
}
if (index == NUM_PRESETS) {
- v4l2_err(sd, "querystd error, lpf = %x, cpl = %x\n",
+ v4l2_dbg(1, debug, sd, "detection failed: lpf = %x, cpl = %x\n",
lpfr, cpln);
- return -EINVAL;
+ /* Could not detect a signal, so return the 'invalid' preset */
+ qpreset->preset = V4L2_DV_INVALID;
+ return 0;
}
- if (v4l_fill_dv_preset_info(presets->preset, &e_preset))
- return -EINVAL;
-
/* Set values in found preset */
qpreset->preset = presets->preset;
/* Update lines per frame and clocks per line info */
- v4l2_dbg(1, debug, sd, "Current preset: %d %d",
- e_preset.width, e_preset.height);
+ v4l2_dbg(1, debug, sd, "detected preset: %d\n", presets->preset);
return 0;
}
@@ -849,7 +846,7 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
/*
* tvp7002_g_register() - Get the value of a register
* @sd: ptr to v4l2_subdev struct
- * @vreg: ptr to v4l2_dbg_register struct
+ * @reg: ptr to v4l2_dbg_register struct
*
* Get the value of a TVP7002 decoder device register.
* Returns zero when successful, -EINVAL if register read fails or
@@ -876,7 +873,7 @@ static int tvp7002_g_register(struct v4l2_subdev *sd,
/*
* tvp7002_s_register() - set a control
* @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to v4l2_control struct
+ * @reg: ptr to v4l2_dbg_register struct
*
* Get the value of a TVP7002 decoder device register.
* Returns zero when successful, -EINVAL if register read fails or
@@ -899,7 +896,7 @@ static int tvp7002_s_register(struct v4l2_subdev *sd,
/*
* tvp7002_enum_fmt() - Enum supported formats
* @sd: pointer to standard V4L2 sub-device structure
- * @enable: pointer to format struct
+ * @fmtdesc: pointer to format struct
*
* Enumerate supported formats.
*/
@@ -994,6 +991,23 @@ static int tvp7002_log_status(struct v4l2_subdev *sd)
return 0;
}
+/*
+ * tvp7002_enum_dv_presets() - Enum supported digital video formats
+ * @sd: pointer to standard V4L2 sub-device structure
+ * @preset: pointer to format struct
+ *
+ * Enumerate supported digital video formats.
+ */
+static int tvp7002_enum_dv_presets(struct v4l2_subdev *sd,
+ struct v4l2_dv_enum_preset *preset)
+{
+ /* Check requested format index is within range */
+ if (preset->index >= NUM_PRESETS)
+ return -EINVAL;
+
+ return v4l_fill_dv_preset_info(tvp7002_presets[preset->index].preset, preset);
+}
+
/* V4L2 core operation handlers */
static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
.g_chip_ident = tvp7002_g_chip_ident,
@@ -1009,6 +1023,7 @@ static const struct v4l2_subdev_core_ops tvp7002_core_ops = {
/* Specific video subsystem operation handlers */
static const struct v4l2_subdev_video_ops tvp7002_video_ops = {
+ .enum_dv_presets = tvp7002_enum_dv_presets,
.s_dv_preset = tvp7002_s_dv_preset,
.query_dv_preset = tvp7002_query_dv_preset,
.s_stream = tvp7002_s_stream,
@@ -1042,8 +1057,8 @@ static struct tvp7002 tvp7002_dev = {
/*
* tvp7002_probe - Probe a TVP7002 device
- * @sd: ptr to v4l2_subdev struct
- * @ctrl: ptr to i2c_device_id struct
+ * @c: ptr to i2c_client struct
+ * @id: ptr to i2c_device_id struct
*
* Initialize the TVP7002 device
* Returns zero when successful, -EINVAL if register read fails or
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index fab48ec6c0ea..fbd665fa1979 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -693,12 +693,13 @@ static int qcm_start_data(struct uvd *uvd)
static void qcm_stop_data(struct uvd *uvd)
{
- struct qcm *cam = (struct qcm *) uvd->user_data;
+ struct qcm *cam;
int i, j;
int ret;
if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL))
return;
+ cam = (struct qcm *) uvd->user_data;
ret = qcm_camera_off(uvd);
if (ret)
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index f7aae2293758..b9dd74fde212 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -2493,10 +2493,10 @@ int usbvision_init_isoc(struct usb_usbvision *usbvision)
}
usbvision->sbuf[bufIdx].urb = urb;
usbvision->sbuf[bufIdx].data =
- usb_buffer_alloc(usbvision->dev,
- sb_size,
- GFP_KERNEL,
- &urb->transfer_dma);
+ usb_alloc_coherent(usbvision->dev,
+ sb_size,
+ GFP_KERNEL,
+ &urb->transfer_dma);
urb->dev = dev;
urb->context = usbvision;
urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp);
@@ -2552,10 +2552,10 @@ void usbvision_stop_isoc(struct usb_usbvision *usbvision)
for (bufIdx = 0; bufIdx < USBVISION_NUMSBUF; bufIdx++) {
usb_kill_urb(usbvision->sbuf[bufIdx].urb);
if (usbvision->sbuf[bufIdx].data){
- usb_buffer_free(usbvision->dev,
- sb_size,
- usbvision->sbuf[bufIdx].data,
- usbvision->sbuf[bufIdx].urb->transfer_dma);
+ usb_free_coherent(usbvision->dev,
+ sb_size,
+ usbvision->sbuf[bufIdx].data,
+ usbvision->sbuf[bufIdx].urb->transfer_dma);
}
usb_free_urb(usbvision->sbuf[bufIdx].urb);
usbvision->sbuf[bufIdx].urb = NULL;
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 083765238a6a..42ba28785750 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -244,6 +244,9 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
switch (usbvision_device_data[usbvision->DevModel].Codec) {
case CODEC_SAA7113:
case CODEC_SAA7111:
+ /* Without this delay the detection of the saa711x is
+ hit-and-miss. */
+ mdelay(10);
v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
&usbvision->i2c_adap, "saa7115",
"saa7115_auto", 0, saa711x_addrs);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 7c17ec63c5da..6248a639ba2d 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -137,8 +137,6 @@ static int PowerOnAtOpen = 1;
static int video_nr = -1;
/* Sequential Number of Radio Device */
static int radio_nr = -1;
-/* Sequential Number of VBI Device */
-static int vbi_nr = -1;
/* Grab parameters for the device driver */
@@ -148,14 +146,12 @@ module_param(video_debug, int, 0444);
module_param(PowerOnAtOpen, int, 0444);
module_param(video_nr, int, 0444);
module_param(radio_nr, int, 0444);
-module_param(vbi_nr, int, 0444);
MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)");
MODULE_PARM_DESC(PowerOnAtOpen, " Set the default device to power on when device is opened. Default: 1 (On)");
MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)");
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-MODULE_PARM_DESC(vbi_nr, "Set vbi device number (/dev/vbiX). Default: -1 (autodetect)");
// Misc stuff
@@ -1244,36 +1240,6 @@ static int usbvision_radio_close(struct file *file)
return errCode;
}
-/*
- * Here comes the stuff for vbi on usbvision based devices
- *
- */
-static int usbvision_vbi_open(struct file *file)
-{
- /* TODO */
- return -ENODEV;
-}
-
-static int usbvision_vbi_close(struct file *file)
-{
- /* TODO */
- return -ENODEV;
-}
-
-static long usbvision_do_vbi_ioctl(struct file *file,
- unsigned int cmd, void *arg)
-{
- /* TODO */
- return -ENOIOCTLCMD;
-}
-
-static long usbvision_vbi_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(file, cmd, arg, usbvision_do_vbi_ioctl);
-}
-
-
//
// Video registration stuff
//
@@ -1367,21 +1333,6 @@ static struct video_device usbvision_radio_template = {
.current_norm = V4L2_STD_PAL
};
-// vbi template
-static const struct v4l2_file_operations usbvision_vbi_fops = {
- .owner = THIS_MODULE,
- .open = usbvision_vbi_open,
- .release = usbvision_vbi_close,
- .ioctl = usbvision_vbi_ioctl,
-};
-
-static struct video_device usbvision_vbi_template=
-{
- .fops = &usbvision_vbi_fops,
- .release = video_device_release,
- .name = "usbvision-vbi",
-};
-
static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
struct video_device *vdev_template,
@@ -1410,18 +1361,6 @@ static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
// unregister video4linux devices
static void usbvision_unregister_video(struct usb_usbvision *usbvision)
{
- // vbi Device:
- if (usbvision->vbi) {
- PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
- video_device_node_name(usbvision->vbi));
- if (video_is_registered(usbvision->vbi)) {
- video_unregister_device(usbvision->vbi);
- } else {
- video_device_release(usbvision->vbi);
- }
- usbvision->vbi = NULL;
- }
-
// Radio Device:
if (usbvision->rdev) {
PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
@@ -1482,22 +1421,6 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
usbvision->nr, video_device_node_name(usbvision->rdev));
}
- // vbi Device:
- if (usbvision_device_data[usbvision->DevModel].vbi) {
- usbvision->vbi = usbvision_vdev_init(usbvision,
- &usbvision_vbi_template,
- "USBVision VBI");
- if (usbvision->vbi == NULL) {
- goto err_exit;
- }
- if (video_register_device(usbvision->vbi,
- VFL_TYPE_VBI,
- vbi_nr)<0) {
- goto err_exit;
- }
- printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device %s [v4l2] (Not Working Yet!)\n",
- usbvision->nr, video_device_node_name(usbvision->vbi));
- }
// all done
return 0;
@@ -1726,8 +1649,6 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
usbvision_configure_video(usbvision);
mutex_unlock(&usbvision->lock);
-
- usb_set_intfdata (intf, usbvision);
usbvision_create_sysfs(usbvision->vdev);
PDEBUG(DBG_PROBE, "success");
@@ -1745,7 +1666,7 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
*/
static void __devexit usbvision_disconnect(struct usb_interface *intf)
{
- struct usb_usbvision *usbvision = usb_get_intfdata(intf);
+ struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
PDEBUG(DBG_PROBE, "");
@@ -1754,7 +1675,6 @@ static void __devexit usbvision_disconnect(struct usb_interface *intf)
"%s: usb_get_intfdata() failed\n", __func__);
return;
}
- usb_set_intfdata (intf, NULL);
mutex_lock(&usbvision->lock);
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index f8d7458daf3e..d1b3cc0cd87f 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -360,7 +360,6 @@ struct usb_usbvision {
struct v4l2_device v4l2_dev;
struct video_device *vdev; /* Video Device */
struct video_device *rdev; /* Radio Device */
- struct video_device *vbi; /* VBI Device */
/* i2c Declaration Section*/
struct i2c_adapter i2c_adap;
@@ -463,6 +462,11 @@ struct usb_usbvision {
int ComprBlockTypes[4];
};
+static inline struct usb_usbvision *to_usbvision(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct usb_usbvision, v4l2_dev);
+}
+
#define call_all(usbvision, o, f, args...) \
v4l2_device_call_all(&usbvision->v4l2_dev, 0, o, f, ##args)
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 6d3850b37161..aa0720af07a0 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -217,8 +217,7 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL,
.index = 4,
.size = 1,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
- | UVC_CONTROL_RESTORE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_RESTORE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -233,8 +232,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_FOCUS_RELATIVE_CONTROL,
.index = 6,
.size = 2,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -249,8 +249,7 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.index = 8,
.size = 1,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_CUR
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -265,8 +264,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.index = 10,
.size = 3,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -281,8 +281,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.index = 12,
.size = 4,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -297,8 +298,9 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_ROLL_RELATIVE_CONTROL,
.index = 14,
.size = 2,
- .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_RANGE
- | UVC_CONTROL_AUTO_UPDATE,
+ .flags = UVC_CONTROL_SET_CUR | UVC_CONTROL_GET_MIN
+ | UVC_CONTROL_GET_MAX | UVC_CONTROL_GET_RES
+ | UVC_CONTROL_GET_DEF | UVC_CONTROL_AUTO_UPDATE,
},
{
.entity = UVC_GUID_UVC_CAMERA,
@@ -562,6 +564,26 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
},
{
+ .id = V4L2_CID_IRIS_ABSOLUTE,
+ .name = "Iris, Absolute",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
+ .size = 16,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ },
+ {
+ .id = V4L2_CID_IRIS_RELATIVE,
+ .name = "Iris, Relative",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_IRIS_RELATIVE_CONTROL,
+ .size = 8,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ },
+ {
.id = V4L2_CID_ZOOM_ABSOLUTE,
.name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
@@ -822,6 +844,8 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name);
v4l2_ctrl->flags = 0;
+ if (!(ctrl->info->flags & UVC_CONTROL_GET_CUR))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
if (!(ctrl->info->flags & UVC_CONTROL_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -1047,6 +1071,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ if (step == 0)
+ step = 1;
xctrl->value = min + (xctrl->value - min + step/2) / step * step;
xctrl->value = clamp(xctrl->value, min, max);
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 86ff8c12ea58..838b56f097cf 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -91,11 +91,16 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_UYVY,
},
{
- .name = "Greyscale",
+ .name = "Greyscale (8-bit)",
.guid = UVC_GUID_FORMAT_Y800,
.fcc = V4L2_PIX_FMT_GREY,
},
{
+ .name = "Greyscale (16-bit)",
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
.name = "RGB Bayer",
.guid = UVC_GUID_FORMAT_BY8,
.fcc = V4L2_PIX_FMT_SBGGR8,
@@ -2105,6 +2110,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
+ /* Syntek (Packard Bell EasyNote MX52 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x174f,
+ .idProduct = 0x8a12,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_STREAM_NO_FID },
/* Syntek (Asus F9SG) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2169,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX },
+ /* Arkmicro unbranded */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x18ec,
+ .idProduct = 0x3290,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Bodelin ProScopeHR */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_DEV_HI
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 4a925a31b0e0..133c78d113ac 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -388,8 +388,12 @@ unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_wait(file, &buf->wait, wait);
if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR)
- mask |= POLLIN | POLLRDNORM;
+ buf->state == UVC_BUF_STATE_ERROR) {
+ if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ mask |= POLLIN | POLLRDNORM;
+ else
+ mask |= POLLOUT | POLLWRNORM;
+ }
done:
mutex_unlock(&queue->mutex);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 821a9969b7bf..53f3ef4635eb 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -739,7 +739,7 @@ static void uvc_free_urb_buffers(struct uvc_streaming *stream)
for (i = 0; i < UVC_URBS; ++i) {
if (stream->urb_buffer[i]) {
- usb_buffer_free(stream->dev->udev, stream->urb_size,
+ usb_free_coherent(stream->dev->udev, stream->urb_size,
stream->urb_buffer[i], stream->urb_dma[i]);
stream->urb_buffer[i] = NULL;
}
@@ -780,7 +780,7 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
for (; npackets > 1; npackets /= 2) {
for (i = 0; i < UVC_URBS; ++i) {
stream->urb_size = psize * npackets;
- stream->urb_buffer[i] = usb_buffer_alloc(
+ stream->urb_buffer[i] = usb_alloc_coherent(
stream->dev->udev, stream->urb_size,
gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
if (!stream->urb_buffer[i]) {
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 2bba059259e6..d1f88406a5e7 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -131,11 +131,13 @@ struct uvc_xu_control {
#define UVC_GUID_FORMAT_Y800 \
{ 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16 \
+ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_BY8 \
{ 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 36b5cb86fb57..4e53b0b3339c 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -51,6 +51,9 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -85,10 +88,9 @@ MODULE_LICENSE("GPL");
val == V4L2_PRIORITY_INTERACTIVE || \
val == V4L2_PRIORITY_RECORD)
-int v4l2_prio_init(struct v4l2_prio_state *global)
+void v4l2_prio_init(struct v4l2_prio_state *global)
{
- memset(global,0,sizeof(*global));
- return 0;
+ memset(global, 0, sizeof(*global));
}
EXPORT_SYMBOL(v4l2_prio_init);
@@ -108,17 +110,16 @@ int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
}
EXPORT_SYMBOL(v4l2_prio_change);
-int v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
+void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
{
- return v4l2_prio_change(global,local,V4L2_PRIORITY_DEFAULT);
+ v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT);
}
EXPORT_SYMBOL(v4l2_prio_open);
-int v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority *local)
+void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local)
{
- if (V4L2_PRIO_VALID(*local))
- atomic_dec(&global->prios[*local]);
- return 0;
+ if (V4L2_PRIO_VALID(local))
+ atomic_dec(&global->prios[local]);
}
EXPORT_SYMBOL(v4l2_prio_close);
@@ -134,11 +135,9 @@ enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
}
EXPORT_SYMBOL(v4l2_prio_max);
-int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority *local)
+int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local)
{
- if (*local < v4l2_prio_max(global))
- return -EBUSY;
- return 0;
+ return (local < v4l2_prio_max(global)) ? -EBUSY : 0;
}
EXPORT_SYMBOL(v4l2_prio_check);
@@ -340,6 +339,13 @@ const char **v4l2_ctrl_get_menu(u32 id)
"None",
"Black & White",
"Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky blue",
+ "Grass green",
+ "Skin whiten",
+ "Vivid",
NULL
};
static const char *tune_preemphasis[] = {
@@ -429,10 +435,13 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_SHARPNESS: return "Sharpness";
case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_COLOR_KILLER: return "Color Killer";
case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
case V4L2_CID_ROTATE: return "Rotate";
- case V4L2_CID_BG_COLOR: return "Background color";
+ case V4L2_CID_BG_COLOR: return "Background Color";
/* MPEG controls */
case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
@@ -483,6 +492,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
@@ -620,6 +631,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_BLUE_BALANCE:
case V4L2_CID_GAMMA:
case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
case V4L2_CID_RDS_TX_DEVIATION:
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
case V4L2_CID_AUDIO_LIMITER_DEVIATION:
@@ -636,6 +648,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
case V4L2_CID_PAN_RELATIVE:
case V4L2_CID_TILT_RELATIVE:
case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
case V4L2_CID_ZOOM_RELATIVE:
qctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
break;
@@ -951,6 +964,66 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
#endif /* defined(CONFIG_I2C) */
+#if defined(CONFIG_SPI)
+
+/* Load a spi sub-device. */
+
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
+ /* the owner is the same as the spi_device's driver owner */
+ sd->owner = spi->dev.driver->owner;
+ /* spi_device and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, spi);
+ spi_set_drvdata(spi, sd);
+ /* initialize name */
+ strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
+
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_master *master, struct spi_board_info *info)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct spi_device *spi = NULL;
+
+ BUG_ON(!v4l2_dev);
+
+ if (info->modalias)
+ request_module(info->modalias);
+
+ spi = spi_new_device(master, info);
+
+ if (spi == NULL || spi->dev.driver == NULL)
+ goto error;
+
+ if (!try_module_get(spi->dev.driver->owner))
+ goto error;
+
+ sd = spi_get_drvdata(spi);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(spi->dev.driver->owner);
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (spi && sd == NULL)
+ spi_unregister_device(spi);
+
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
+
+#endif /* defined(CONFIG_SPI) */
+
/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
* and max don't have to be aligned, but there must be at least one valid
* value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index f77f84bfe714..9004a5fe7643 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -1086,6 +1086,9 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_QUERY_DV_PRESET:
case VIDIOC_S_DV_TIMINGS:
case VIDIOC_G_DV_TIMINGS:
+ case VIDIOC_DQEVENT:
+ case VIDIOC_SUBSCRIBE_EVENT:
+ case VIDIOC_UNSUBSCRIBE_EVENT:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 709069916068..0ca7ec9ca902 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -421,6 +421,10 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
if (!vdev->release)
return -EINVAL;
+ /* v4l2_fh support */
+ spin_lock_init(&vdev->fh_lock);
+ INIT_LIST_HEAD(&vdev->fh_list);
+
/* Part 1: check device type */
switch (type) {
case VFL_TYPE_GRABBER:
@@ -596,9 +600,7 @@ void video_unregister_device(struct video_device *vdev)
if (!vdev || !video_is_registered(vdev))
return;
- mutex_lock(&videodev_lock);
clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
- mutex_unlock(&videodev_lock);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL(video_unregister_device);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 0d06e7cbd5b3..5a7dc4afe92a 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -21,6 +21,9 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/i2c.h>
+#if defined(CONFIG_SPI)
+#include <linux/spi/spi.h>
+#endif
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
@@ -97,6 +100,14 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
i2c_unregister_device(client);
}
#endif
+#if defined(CONFIG_SPI)
+ if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ if (spi)
+ spi_unregister_device(spi);
+ }
+#endif
}
}
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
new file mode 100644
index 000000000000..de74ce07b5e2
--- /dev/null
+++ b/drivers/media/video/v4l2-event.c
@@ -0,0 +1,292 @@
+/*
+ * v4l2-event.c
+ *
+ * V4L2 events.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+int v4l2_event_init(struct v4l2_fh *fh)
+{
+ fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
+ if (fh->events == NULL)
+ return -ENOMEM;
+
+ init_waitqueue_head(&fh->events->wait);
+
+ INIT_LIST_HEAD(&fh->events->free);
+ INIT_LIST_HEAD(&fh->events->available);
+ INIT_LIST_HEAD(&fh->events->subscribed);
+
+ fh->events->sequence = -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_init);
+
+int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
+{
+ struct v4l2_events *events = fh->events;
+ unsigned long flags;
+
+ if (!events) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ while (events->nallocated < n) {
+ struct v4l2_kevent *kev;
+
+ kev = kzalloc(sizeof(*kev), GFP_KERNEL);
+ if (kev == NULL)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add_tail(&kev->list, &events->free);
+ events->nallocated++;
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_alloc);
+
+#define list_kfree(list, type, member) \
+ while (!list_empty(list)) { \
+ type *hi; \
+ hi = list_first_entry(list, type, member); \
+ list_del(&hi->member); \
+ kfree(hi); \
+ }
+
+void v4l2_event_free(struct v4l2_fh *fh)
+{
+ struct v4l2_events *events = fh->events;
+
+ if (!events)
+ return;
+
+ list_kfree(&events->free, struct v4l2_kevent, list);
+ list_kfree(&events->available, struct v4l2_kevent, list);
+ list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
+
+ kfree(events);
+ fh->events = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_free);
+
+static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_kevent *kev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (list_empty(&events->available)) {
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ return -ENOENT;
+ }
+
+ WARN_ON(events->navailable == 0);
+
+ kev = list_first_entry(&events->available, struct v4l2_kevent, list);
+ list_move(&kev->list, &events->free);
+ events->navailable--;
+
+ kev->event.pending = events->navailable;
+ *event = kev->event;
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ return 0;
+}
+
+int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
+ int nonblocking)
+{
+ struct v4l2_events *events = fh->events;
+ int ret;
+
+ if (nonblocking)
+ return __v4l2_event_dequeue(fh, event);
+
+ do {
+ ret = wait_event_interruptible(events->wait,
+ events->navailable != 0);
+ if (ret < 0)
+ return ret;
+
+ ret = __v4l2_event_dequeue(fh, event);
+ } while (ret == -ENOENT);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
+
+/* Caller must hold fh->event->lock! */
+static struct v4l2_subscribed_event *v4l2_event_subscribed(
+ struct v4l2_fh *fh, u32 type)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ list_for_each_entry(sev, &events->subscribed, list) {
+ if (sev->type == type)
+ return sev;
+ }
+
+ return NULL;
+}
+
+void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+ struct timespec timestamp;
+
+ ktime_get_ts(&timestamp);
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list) {
+ struct v4l2_events *events = fh->events;
+ struct v4l2_kevent *kev;
+
+ /* Are we subscribed? */
+ if (!v4l2_event_subscribed(fh, ev->type))
+ continue;
+
+ /* Increase event sequence number on fh. */
+ events->sequence++;
+
+ /* Do we have any free events? */
+ if (list_empty(&events->free))
+ continue;
+
+ /* Take one and fill it. */
+ kev = list_first_entry(&events->free, struct v4l2_kevent, list);
+ kev->event.type = ev->type;
+ kev->event.u = ev->u;
+ kev->event.timestamp = timestamp;
+ kev->event.sequence = events->sequence;
+ list_move_tail(&kev->list, &events->available);
+
+ events->navailable++;
+
+ wake_up_all(&events->wait);
+ }
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue);
+
+int v4l2_event_pending(struct v4l2_fh *fh)
+{
+ return fh->events->navailable;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_pending);
+
+int v4l2_event_subscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ if (fh->events == NULL) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ sev = kmalloc(sizeof(*sev), GFP_KERNEL);
+ if (!sev)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (v4l2_event_subscribed(fh, sub->type) == NULL) {
+ INIT_LIST_HEAD(&sev->list);
+ sev->type = sub->type;
+
+ list_add(&sev->list, &events->subscribed);
+ sev = NULL;
+ }
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ kfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
+
+static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
+{
+ struct v4l2_events *events = fh->events;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ do {
+ sev = NULL;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ if (!list_empty(&events->subscribed)) {
+ sev = list_first_entry(&events->subscribed,
+ struct v4l2_subscribed_event, list);
+ list_del(&sev->list);
+ }
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ kfree(sev);
+ } while (sev);
+}
+
+int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ if (sub->type == V4L2_EVENT_ALL) {
+ v4l2_event_unsubscribe_all(fh);
+ return 0;
+ }
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ sev = v4l2_event_subscribed(fh, sub->type);
+ if (sev != NULL)
+ list_del(&sev->list);
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ kfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
diff --git a/drivers/media/video/v4l2-fh.c b/drivers/media/video/v4l2-fh.c
new file mode 100644
index 000000000000..d78f184f40c5
--- /dev/null
+++ b/drivers/media/video/v4l2-fh.c
@@ -0,0 +1,79 @@
+/*
+ * v4l2-fh.c
+ *
+ * V4L2 file handles.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/bitops.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
+{
+ fh->vdev = vdev;
+ INIT_LIST_HEAD(&fh->list);
+ set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
+
+ /*
+ * fh->events only needs to be initialized if the driver
+ * supports the VIDIOC_SUBSCRIBE_EVENT ioctl.
+ */
+ if (vdev->ioctl_ops && vdev->ioctl_ops->vidioc_subscribe_event)
+ return v4l2_event_init(fh);
+
+ fh->events = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_init);
+
+void v4l2_fh_add(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&fh->list, &fh->vdev->fh_list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_add);
+
+void v4l2_fh_del(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_del_init(&fh->list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_del);
+
+void v4l2_fh_exit(struct v4l2_fh *fh)
+{
+ if (fh->vdev == NULL)
+ return;
+
+ fh->vdev = NULL;
+
+ v4l2_event_free(fh);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7d59c107f13b..0eeceae50329 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -26,6 +26,8 @@
#endif
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-chip-ident.h>
#define dbgarg(cmd, fmt, arg...) \
@@ -291,6 +293,9 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET",
[_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS",
[_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS",
+ [_IOC_NR(VIDIOC_DQEVENT)] = "VIDIOC_DQEVENT",
+ [_IOC_NR(VIDIOC_SUBSCRIBE_EVENT)] = "VIDIOC_SUBSCRIBE_EVENT",
+ [_IOC_NR(VIDIOC_UNSUBSCRIBE_EVENT)] = "VIDIOC_UNSUBSCRIBE_EVENT",
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -610,17 +615,33 @@ static long __video_do_ioctl(struct file *file,
void *fh = file->private_data;
long ret = -EINVAL;
+ if (ops == NULL) {
+ printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
+ vfd->name);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ /********************************************************
+ All other V4L1 calls are handled by v4l1_compat module.
+ Those calls will be translated into V4L2 calls, and
+ __video_do_ioctl will be called again, with one or more
+ V4L2 ioctls.
+ ********************************************************/
+ if (_IOC_TYPE(cmd) == 'v' && cmd != VIDIOCGMBUF &&
+ _IOC_NR(cmd) < BASE_VIDIOCPRIVATE) {
+ return v4l_compat_translate_ioctl(file, cmd, arg,
+ __video_do_ioctl);
+ }
+#endif
+
if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
!(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
v4l_print_ioctl(vfd->name, cmd);
printk(KERN_CONT "\n");
}
- if (ops == NULL) {
- printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
- vfd->name);
- return -EINVAL;
- }
+ switch (cmd) {
#ifdef CONFIG_VIDEO_V4L1_COMPAT
/***********************************************************
@@ -630,31 +651,21 @@ static long __video_do_ioctl(struct file *file,
***********************************************************/
/* --- streaming capture ------------------------------------- */
- if (cmd == VIDIOCGMBUF) {
+ case VIDIOCGMBUF:
+ {
struct video_mbuf *p = arg;
if (!ops->vidiocgmbuf)
- return ret;
+ break;
ret = ops->vidiocgmbuf(file, fh, p);
if (!ret)
dbgarg(cmd, "size=%d, frames=%d, offsets=0x%08lx\n",
p->size, p->frames,
(unsigned long)p->offsets);
- return ret;
+ break;
}
-
- /********************************************************
- All other V4L1 calls are handled by v4l1_compat module.
- Those calls will be translated into V4L2 calls, and
- __video_do_ioctl will be called again, with one or more
- V4L2 ioctls.
- ********************************************************/
- if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
- return v4l_compat_translate_ioctl(file, cmd, arg,
- __video_do_ioctl);
#endif
- switch (cmd) {
/* --- capabilities ------------------------------------------ */
case VIDIOC_QUERYCAP:
{
@@ -1072,7 +1083,7 @@ static long __video_do_ioctl(struct file *file,
id &= ~curr_id;
}
if (i <= index)
- return -EINVAL;
+ break;
v4l2_video_std_construct(p, curr_id, descr);
@@ -1597,7 +1608,7 @@ static long __video_do_ioctl(struct file *file,
v4l2_std_id std = vfd->current_norm;
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
+ break;
ret = 0;
if (ops->vidioc_g_std)
@@ -1942,7 +1953,55 @@ static long __video_do_ioctl(struct file *file,
}
break;
}
+ case VIDIOC_DQEVENT:
+ {
+ struct v4l2_event *ev = arg;
+
+ if (!ops->vidioc_subscribe_event)
+ break;
+ ret = v4l2_event_dequeue(fh, ev, file->f_flags & O_NONBLOCK);
+ if (ret < 0) {
+ dbgarg(cmd, "no pending events?");
+ break;
+ }
+ dbgarg(cmd,
+ "pending=%d, type=0x%8.8x, sequence=%d, "
+ "timestamp=%lu.%9.9lu ",
+ ev->pending, ev->type, ev->sequence,
+ ev->timestamp.tv_sec, ev->timestamp.tv_nsec);
+ break;
+ }
+ case VIDIOC_SUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (!ops->vidioc_subscribe_event)
+ break;
+
+ ret = ops->vidioc_subscribe_event(fh, sub);
+ if (ret < 0) {
+ dbgarg(cmd, "failed, ret=%ld", ret);
+ break;
+ }
+ dbgarg(cmd, "type=0x%8.8x", sub->type);
+ break;
+ }
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ if (!ops->vidioc_unsubscribe_event)
+ break;
+
+ ret = ops->vidioc_unsubscribe_event(fh, sub);
+ if (ret < 0) {
+ dbgarg(cmd, "failed, ret=%ld", ret);
+ break;
+ }
+ dbgarg(cmd, "type=0x%8.8x", sub->type);
+ break;
+ }
default:
{
if (!ops->vidioc_default)
@@ -2006,7 +2065,7 @@ long video_ioctl2(struct file *file,
{
char sbuf[128];
void *mbuf = NULL;
- void *parg = NULL;
+ void *parg = (void *)arg;
long err = -EINVAL;
int is_ext_ctrl;
size_t ctrls_size = 0;
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
new file mode 100644
index 000000000000..f45f9405ea39
--- /dev/null
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -0,0 +1,633 @@
+/*
+ * Memory-to-memory device framework for Video for Linux 2 and videobuf.
+ *
+ * Helper functions for devices that use videobuf buffers for both their
+ * source and destination.
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <p.osciak@samsung.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/videobuf-core.h>
+#include <media/v4l2-mem2mem.h>
+
+MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
+MODULE_AUTHOR("Pawel Osciak, <p.osciak@samsung.com>");
+MODULE_LICENSE("GPL");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define dprintk(fmt, arg...) \
+ do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
+ } while (0)
+
+
+/* Instance is already queued on the job_queue */
+#define TRANS_QUEUED (1 << 0)
+/* Instance is currently running in hardware */
+#define TRANS_RUNNING (1 << 1)
+
+
+/* Offset base for buffers on the destination queue - used to distinguish
+ * between source and destination buffers when mmapping - they receive the same
+ * offsets but for different queues */
+#define DST_QUEUE_OFF_BASE (1 << 30)
+
+
+/**
+ * struct v4l2_m2m_dev - per-device context
+ * @curr_ctx: currently running instance
+ * @job_queue: instances queued to run
+ * @job_spinlock: protects job_queue
+ * @m2m_ops: driver callbacks
+ */
+struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+
+ struct list_head job_queue;
+ spinlock_t job_spinlock;
+
+ struct v4l2_m2m_ops *m2m_ops;
+};
+
+static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &m2m_ctx->cap_q_ctx;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &m2m_ctx->out_q_ctx;
+ default:
+ printk(KERN_ERR "Invalid buffer type\n");
+ return NULL;
+ }
+}
+
+/**
+ * v4l2_m2m_get_vq() - return videobuf_queue for the given type
+ */
+struct videobuf_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ return &q_ctx->q;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_vq);
+
+/**
+ * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
+ */
+void *v4l2_m2m_next_buf(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ struct videobuf_buffer *vb = NULL;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ spin_lock_irqsave(q_ctx->q.irqlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue))
+ goto end;
+
+ vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer, queue);
+ vb->state = VIDEOBUF_ACTIVE;
+
+end:
+ spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
+ return vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
+
+/**
+ * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
+ * return it
+ */
+void *v4l2_m2m_buf_remove(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ struct videobuf_buffer *vb = NULL;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ spin_lock_irqsave(q_ctx->q.irqlock, flags);
+ if (!list_empty(&q_ctx->rdy_queue)) {
+ vb = list_entry(q_ctx->rdy_queue.next, struct videobuf_buffer,
+ queue);
+ list_del(&vb->queue);
+ q_ctx->num_rdy--;
+ }
+ spin_unlock_irqrestore(q_ctx->q.irqlock, flags);
+
+ return vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
+
+/*
+ * Scheduling handlers
+ */
+
+/**
+ * v4l2_m2m_get_curr_priv() - return driver private data for the currently
+ * running instance or NULL if no instance is running
+ */
+void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ void *ret = NULL;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_dev->curr_ctx)
+ ret = m2m_dev->curr_ctx->priv;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
+
+/**
+ * v4l2_m2m_try_run() - select next job to perform and run it if possible
+ *
+ * Get next transaction (if present) from the waiting jobs list and run it.
+ */
+static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (NULL != m2m_dev->curr_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Another instance is running, won't run now\n");
+ return;
+ }
+
+ if (list_empty(&m2m_dev->job_queue)) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("No job pending\n");
+ return;
+ }
+
+ m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
+ struct v4l2_m2m_ctx, queue);
+ m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
+}
+
+/**
+ * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
+ * the pending job queue and add it if so.
+ * @m2m_ctx: m2m context assigned to the instance to be checked
+ *
+ * There are three basic requirements an instance has to meet to be able to run:
+ * 1) at least one source buffer has to be queued,
+ * 2) at least one destination buffer has to be queued,
+ * 3) streaming has to be on.
+ *
+ * There may also be additional, custom requirements. In such case the driver
+ * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
+ * return 1 if the instance is ready.
+ * An example of the above could be an instance that requires more than one
+ * src/dst buffer per transaction.
+ */
+static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags_job, flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
+
+ if (!m2m_ctx->out_q_ctx.q.streaming
+ || !m2m_ctx->cap_q_ctx.q.streaming) {
+ dprintk("Streaming needs to be on for both queues\n");
+ return;
+ }
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+ if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("On job queue already\n");
+ return;
+ }
+
+ spin_lock_irqsave(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No input buffers available\n");
+ return;
+ }
+ if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("No output buffers available\n");
+ return;
+ }
+ spin_unlock_irqrestore(m2m_ctx->out_q_ctx.q.irqlock, flags);
+
+ if (m2m_dev->m2m_ops->job_ready
+ && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ dprintk("Driver not ready\n");
+ return;
+ }
+
+ list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
+ m2m_ctx->job_flags |= TRANS_QUEUED;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+
+/**
+ * v4l2_m2m_job_finish() - inform the framework that a job has been finished
+ * and have it clean up
+ *
+ * Called by a driver to yield back the device after it has finished with it.
+ * Should be called as soon as possible after reaching a state which allows
+ * other instances to take control of the device.
+ *
+ * This function has to be called only after device_run() callback has been
+ * called on the driver. To prevent recursion, it should not be called directly
+ * from the device_run() callback though.
+ */
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Called by an instance not currently running\n");
+ return;
+ }
+
+ list_del(&m2m_dev->curr_ctx->queue);
+ m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ m2m_dev->curr_ctx = NULL;
+
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ /* This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes. */
+ v4l2_m2m_try_schedule(m2m_ctx);
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL(v4l2_m2m_job_finish);
+
+/**
+ * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
+ */
+int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
+ return videobuf_reqbufs(vq, reqbufs);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
+
+/**
+ * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
+ *
+ * See v4l2_m2m_mmap() documentation for details.
+ */
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = videobuf_querybuf(vq, buf);
+
+ if (buf->memory == V4L2_MEMORY_MMAP
+ && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ buf->m.offset += DST_QUEUE_OFF_BASE;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
+
+/**
+ * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = videobuf_qbuf(vq, buf);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
+
+/**
+ * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
+ * the type
+ */
+int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ return videobuf_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
+
+/**
+ * v4l2_m2m_streamon() - turn on streaming for a video queue
+ */
+int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct videobuf_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ ret = videobuf_streamon(vq);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
+
+/**
+ * v4l2_m2m_streamoff() - turn off streaming for a video queue
+ */
+int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct videobuf_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ return videobuf_streamoff(vq);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
+
+/**
+ * v4l2_m2m_poll() - poll replacement, for destination buffers only
+ *
+ * Call from the driver's poll() function. Will poll both queues. If a buffer
+ * is available to dequeue (with dqbuf) from the source queue, this will
+ * indicate that a non-blocking write can be performed, while read will be
+ * returned in case of the destination queue.
+ */
+unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct videobuf_queue *src_q, *dst_q;
+ struct videobuf_buffer *src_vb = NULL, *dst_vb = NULL;
+ unsigned int rc = 0;
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ mutex_lock(&src_q->vb_lock);
+ mutex_lock(&dst_q->vb_lock);
+
+ if (src_q->streaming && !list_empty(&src_q->stream))
+ src_vb = list_first_entry(&src_q->stream,
+ struct videobuf_buffer, stream);
+ if (dst_q->streaming && !list_empty(&dst_q->stream))
+ dst_vb = list_first_entry(&dst_q->stream,
+ struct videobuf_buffer, stream);
+
+ if (!src_vb && !dst_vb) {
+ rc = POLLERR;
+ goto end;
+ }
+
+ if (src_vb) {
+ poll_wait(file, &src_vb->done, wait);
+ if (src_vb->state == VIDEOBUF_DONE
+ || src_vb->state == VIDEOBUF_ERROR)
+ rc |= POLLOUT | POLLWRNORM;
+ }
+ if (dst_vb) {
+ poll_wait(file, &dst_vb->done, wait);
+ if (dst_vb->state == VIDEOBUF_DONE
+ || dst_vb->state == VIDEOBUF_ERROR)
+ rc |= POLLIN | POLLRDNORM;
+ }
+
+end:
+ mutex_unlock(&dst_q->vb_lock);
+ mutex_unlock(&src_q->vb_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
+
+/**
+ * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
+ *
+ * Call from driver's mmap() function. Will handle mmap() for both queues
+ * seamlessly for videobuffer, which will receive normal per-queue offsets and
+ * proper videobuf queue pointers. The differentiation is made outside videobuf
+ * by adding a predefined offset to buffers from one of the queues and
+ * subtracting it before passing it back to videobuf. Only drivers (and
+ * thus applications) receive modified offsets.
+ */
+int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct vm_area_struct *vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct videobuf_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return videobuf_mmap_mapper(vq, vma);
+}
+EXPORT_SYMBOL(v4l2_m2m_mmap);
+
+/**
+ * v4l2_m2m_init() - initialize per-driver m2m data
+ *
+ * Usually called from driver's probe() function.
+ */
+struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+
+ if (!m2m_ops)
+ return ERR_PTR(-EINVAL);
+
+ BUG_ON(!m2m_ops->device_run);
+ BUG_ON(!m2m_ops->job_abort);
+
+ m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
+ if (!m2m_dev)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_dev->curr_ctx = NULL;
+ m2m_dev->m2m_ops = m2m_ops;
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+
+ return m2m_dev;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_init);
+
+/**
+ * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
+ *
+ * Usually called from driver's remove() function.
+ */
+void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+{
+ kfree(m2m_dev);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
+/**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ * @priv - driver's instance private data
+ * @m2m_dev - a previously initialized m2m_dev struct
+ * @vq_init - a callback for queue type-specific initialization function to be
+ * used for initializing videobuf_queues
+ *
+ * Usually called from driver's open() function.
+ */
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(void *priv, struct v4l2_m2m_dev *m2m_dev,
+ void (*vq_init)(void *priv, struct videobuf_queue *,
+ enum v4l2_buf_type))
+{
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
+
+ if (!vq_init)
+ return ERR_PTR(-EINVAL);
+
+ m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
+ if (!m2m_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_ctx->priv = priv;
+ m2m_ctx->m2m_dev = m2m_dev;
+
+ out_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ cap_q_ctx = get_queue_ctx(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
+ INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
+
+ INIT_LIST_HEAD(&m2m_ctx->queue);
+
+ vq_init(priv, &out_q_ctx->q, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ vq_init(priv, &cap_q_ctx->q, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ out_q_ctx->q.priv_data = cap_q_ctx->q.priv_data = priv;
+
+ return m2m_ctx;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
+
+/**
+ * v4l2_m2m_ctx_release() - release m2m context
+ *
+ * Usually called from driver's release() function.
+ */
+void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ struct videobuf_buffer *vb;
+ unsigned long flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ vb = v4l2_m2m_next_dst_buf(m2m_ctx);
+ BUG_ON(NULL == vb);
+ wait_event(vb->done, vb->state != VIDEOBUF_ACTIVE
+ && vb->state != VIDEOBUF_QUEUED);
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+
+ videobuf_stop(&m2m_ctx->cap_q_ctx.q);
+ videobuf_stop(&m2m_ctx->out_q_ctx.q);
+
+ videobuf_mmap_free(&m2m_ctx->cap_q_ctx.q);
+ videobuf_mmap_free(&m2m_ctx->out_q_ctx.q);
+
+ kfree(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
+
+/**
+ * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
+ *
+ * Call from buf_queue(), videobuf_queue_ops callback.
+ *
+ * Locking: Caller holds q->irqlock (taken by videobuf before calling buf_queue
+ * callback in the driver).
+ */
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, vq->type);
+ if (!q_ctx)
+ return;
+
+ list_add_tail(&vb->queue, &q_ctx->rdy_queue);
+ q_ctx->num_rdy++;
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index bb0a1c8de414..7d3378437ded 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -24,10 +24,15 @@
#include <media/videobuf-core.h>
#define MAGIC_BUFFER 0x20070728
-#define MAGIC_CHECK(is, should) do { \
- if (unlikely((is) != (should))) { \
- printk(KERN_ERR "magic mismatch: %x (expected %x)\n", is, should); \
- BUG(); } } while (0)
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR \
+ "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ } \
+ } while (0)
static int debug;
module_param(debug, int, 0644);
@@ -36,16 +41,18 @@ MODULE_DESCRIPTION("helper module to manage video4linux buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) do { \
- if (debug >= level) \
- printk(KERN_DEBUG "vbuf: " fmt , ## arg); } while (0)
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
+ } while (0)
/* --------------------------------------------------------------------- */
#define CALL(q, f, arg...) \
((q->int_ops->f) ? q->int_ops->f(arg) : 0)
-void *videobuf_alloc(struct videobuf_queue *q)
+struct videobuf_buffer *videobuf_alloc(struct videobuf_queue *q)
{
struct videobuf_buffer *vb;
@@ -57,14 +64,14 @@ void *videobuf_alloc(struct videobuf_queue *q)
}
vb = q->int_ops->alloc(q->msize);
-
if (NULL != vb) {
init_waitqueue_head(&vb->done);
- vb->magic = MAGIC_BUFFER;
+ vb->magic = MAGIC_BUFFER;
}
return vb;
}
+EXPORT_SYMBOL_GPL(videobuf_alloc);
#define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
vb->state != VIDEOBUF_QUEUED)
@@ -86,6 +93,7 @@ int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr)
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_waiton);
int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
@@ -95,16 +103,16 @@ int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
return CALL(q, iolock, q, vb, fbuf);
}
+EXPORT_SYMBOL_GPL(videobuf_iolock);
-void *videobuf_queue_to_vmalloc (struct videobuf_queue *q,
- struct videobuf_buffer *buf)
+void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
{
- if (q->int_ops->vmalloc)
- return q->int_ops->vmalloc(buf);
- else
- return NULL;
+ if (q->int_ops->vaddr)
+ return q->int_ops->vaddr(buf);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(videobuf_queue_to_vmalloc);
+EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
/* --------------------------------------------------------------------- */
@@ -146,6 +154,7 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
init_waitqueue_head(&q->wait);
INIT_LIST_HEAD(&q->stream);
}
+EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
/* Locking: Only usage in bttv unsafe find way to remove */
int videobuf_queue_is_busy(struct videobuf_queue *q)
@@ -184,6 +193,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
}
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
/* Locking: Caller holds q->vb_lock */
void videobuf_queue_cancel(struct videobuf_queue *q)
@@ -216,6 +226,7 @@ void videobuf_queue_cancel(struct videobuf_queue *q)
}
INIT_LIST_HEAD(&q->stream);
}
+EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
/* --------------------------------------------------------------------- */
@@ -237,6 +248,7 @@ enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
}
return field;
}
+EXPORT_SYMBOL_GPL(videobuf_next_field);
/* Locking: Caller holds q->vb_lock */
static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
@@ -273,8 +285,10 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
case VIDEOBUF_ACTIVE:
b->flags |= V4L2_BUF_FLAG_QUEUED;
break;
- case VIDEOBUF_DONE:
case VIDEOBUF_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ /* fall through */
+ case VIDEOBUF_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
case VIDEOBUF_NEEDS_INIT:
@@ -298,20 +312,15 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
static int __videobuf_mmap_free(struct videobuf_queue *q)
{
int i;
- int rc;
if (!q)
return 0;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
- rc = CALL(q, mmap_free, q);
-
- q->is_mmapped = 0;
-
- if (rc < 0)
- return rc;
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ if (q->bufs[i] && q->bufs[i]->map)
+ return -EBUSY;
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
@@ -321,7 +330,7 @@ static int __videobuf_mmap_free(struct videobuf_queue *q)
q->bufs[i] = NULL;
}
- return rc;
+ return 0;
}
int videobuf_mmap_free(struct videobuf_queue *q)
@@ -332,6 +341,7 @@ int videobuf_mmap_free(struct videobuf_queue *q)
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_free);
/* Locking: Caller holds q->vb_lock */
int __videobuf_mmap_setup(struct videobuf_queue *q,
@@ -351,7 +361,7 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
for (i = 0; i < bcount; i++) {
q->bufs[i] = videobuf_alloc(q);
- if (q->bufs[i] == NULL)
+ if (NULL == q->bufs[i])
break;
q->bufs[i]->i = i;
@@ -372,11 +382,11 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
if (!i)
return -ENOMEM;
- dprintk(1, "mmap setup: %d buffers, %d bytes each\n",
- i, bsize);
+ dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
return i;
}
+EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
int videobuf_mmap_setup(struct videobuf_queue *q,
unsigned int bcount, unsigned int bsize,
@@ -388,6 +398,7 @@ int videobuf_mmap_setup(struct videobuf_queue *q,
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
int videobuf_reqbufs(struct videobuf_queue *q,
struct v4l2_requestbuffers *req)
@@ -432,7 +443,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
q->ops->buf_setup(q, &count, &size);
dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
count, size,
- (unsigned int)((count*PAGE_ALIGN(size))>>PAGE_SHIFT) );
+ (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
retval = __videobuf_mmap_setup(q, count, size, req->memory);
if (retval < 0) {
@@ -447,6 +458,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_reqbufs);
int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
@@ -473,9 +485,9 @@ done:
mutex_unlock(&q->vb_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_querybuf);
-int videobuf_qbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b)
+int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
{
struct videobuf_buffer *buf;
enum v4l2_field field;
@@ -534,6 +546,13 @@ int videobuf_qbuf(struct videobuf_queue *q,
"but buffer addr is zero!\n");
goto done;
}
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
+ || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
+ || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
+ buf->size = b->bytesused;
+ buf->field = b->field;
+ buf->ts = b->timestamp;
+ }
break;
case V4L2_MEMORY_USERPTR:
if (b->length < buf->bsize) {
@@ -567,11 +586,11 @@ int videobuf_qbuf(struct videobuf_queue *q,
q->ops->buf_queue(q, buf);
spin_unlock_irqrestore(q->irqlock, flags);
}
- dprintk(1, "qbuf: succeded\n");
+ dprintk(1, "qbuf: succeeded\n");
retval = 0;
wake_up_interruptible_sync(&q->wait);
- done:
+done:
mutex_unlock(&q->vb_lock);
if (b->memory == V4L2_MEMORY_MMAP)
@@ -579,7 +598,7 @@ int videobuf_qbuf(struct videobuf_queue *q,
return retval;
}
-
+EXPORT_SYMBOL_GPL(videobuf_qbuf);
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
@@ -624,7 +643,6 @@ done:
return retval;
}
-
/* Locking: Caller holds q->vb_lock */
static int stream_next_buffer(struct videobuf_queue *q,
struct videobuf_buffer **vb, int nonblocking)
@@ -647,13 +665,14 @@ done:
}
int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking)
+ struct v4l2_buffer *b, int nonblocking)
{
struct videobuf_buffer *buf = NULL;
int retval;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ memset(b, 0, sizeof(*b));
mutex_lock(&q->vb_lock);
retval = stream_next_buffer(q, &buf, nonblocking);
@@ -665,28 +684,25 @@ int videobuf_dqbuf(struct videobuf_queue *q,
switch (buf->state) {
case VIDEOBUF_ERROR:
dprintk(1, "dqbuf: state is error\n");
- retval = -EIO;
- CALL(q, sync, q, buf);
- buf->state = VIDEOBUF_IDLE;
break;
case VIDEOBUF_DONE:
dprintk(1, "dqbuf: state is done\n");
- CALL(q, sync, q, buf);
- buf->state = VIDEOBUF_IDLE;
break;
default:
dprintk(1, "dqbuf: state invalid\n");
retval = -EINVAL;
goto done;
}
- list_del(&buf->stream);
- memset(b, 0, sizeof(*b));
+ CALL(q, sync, q, buf);
videobuf_status(q, b, buf, q->type);
-
- done:
+ list_del(&buf->stream);
+ buf->state = VIDEOBUF_IDLE;
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_dqbuf);
int videobuf_streamon(struct videobuf_queue *q)
{
@@ -709,10 +725,11 @@ int videobuf_streamon(struct videobuf_queue *q)
spin_unlock_irqrestore(q->irqlock, flags);
wake_up_interruptible_sync(&q->wait);
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_streamon);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_streamoff(struct videobuf_queue *q)
@@ -735,6 +752,7 @@ int videobuf_streamoff(struct videobuf_queue *q)
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_streamoff);
/* Locking: Caller holds q->vb_lock */
static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
@@ -774,7 +792,7 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
retval = q->read_buf->size;
}
- done:
+done:
/* cleanup */
q->ops->buf_release(q, q->read_buf);
kfree(q->read_buf);
@@ -782,6 +800,49 @@ static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
return retval;
}
+static int __videobuf_copy_to_user(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count,
+ int nonblocking)
+{
+ void *vaddr = CALL(q, vaddr, buf);
+
+ /* copy to userspace */
+ if (count > buf->size - q->read_off)
+ count = buf->size - q->read_off;
+
+ if (copy_to_user(data, vaddr + q->read_off, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int __videobuf_copy_stream(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count, size_t pos,
+ int vbihack, int nonblocking)
+{
+ unsigned int *fc = CALL(q, vaddr, buf);
+
+ if (vbihack) {
+ /* dirty, undocumented hack -- pass the frame counter
+ * within the last four bytes of each vbi data block.
+ * We need that one to maintain backward compatibility
+ * to all vbi decoding software out there ... */
+ fc += (buf->size >> 2) - 1;
+ *fc = buf->field_count >> 1;
+ dprintk(1, "vbihack: %d\n", *fc);
+ }
+
+ /* copy stuff using the common method */
+ count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
+
+ if ((count == -EFAULT) && (pos == 0))
+ return -EFAULT;
+
+ return count;
+}
+
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking)
@@ -850,7 +911,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
}
/* Copy to userspace */
- retval = CALL(q, video_copy_to_user, q, data, count, nonblocking);
+ retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
if (retval < 0)
goto done;
@@ -862,10 +923,11 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
q->read_buf = NULL;
}
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_read_one);
/* Locking: Caller holds q->vb_lock */
static int __videobuf_read_start(struct videobuf_queue *q)
@@ -917,7 +979,6 @@ static void __videobuf_read_stop(struct videobuf_queue *q)
q->bufs[i] = NULL;
}
q->read_buf = NULL;
-
}
int videobuf_read_start(struct videobuf_queue *q)
@@ -930,6 +991,7 @@ int videobuf_read_start(struct videobuf_queue *q)
return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_read_start);
void videobuf_read_stop(struct videobuf_queue *q)
{
@@ -937,6 +999,7 @@ void videobuf_read_stop(struct videobuf_queue *q)
__videobuf_read_stop(q);
mutex_unlock(&q->vb_lock);
}
+EXPORT_SYMBOL_GPL(videobuf_read_stop);
void videobuf_stop(struct videobuf_queue *q)
{
@@ -950,7 +1013,7 @@ void videobuf_stop(struct videobuf_queue *q)
mutex_unlock(&q->vb_lock);
}
-
+EXPORT_SYMBOL_GPL(videobuf_stop);
ssize_t videobuf_read_stream(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
@@ -990,7 +1053,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
}
if (q->read_buf->state == VIDEOBUF_DONE) {
- rc = CALL(q, copy_stream, q, data + retval, count,
+ rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
retval, vbihack, nonblocking);
if (rc < 0) {
retval = rc;
@@ -1019,10 +1082,11 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
break;
}
- done:
+done:
mutex_unlock(&q->vb_lock);
return retval;
}
+EXPORT_SYMBOL_GPL(videobuf_read_stream);
unsigned int videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
@@ -1056,27 +1120,51 @@ unsigned int videobuf_poll_stream(struct file *file,
if (0 == rc) {
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE ||
- buf->state == VIDEOBUF_ERROR)
- rc = POLLIN|POLLRDNORM;
+ buf->state == VIDEOBUF_ERROR) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ rc = POLLOUT | POLLWRNORM;
+ break;
+ default:
+ rc = POLLIN | POLLRDNORM;
+ break;
+ }
+ }
}
mutex_unlock(&q->vb_lock);
return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-int videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
{
- int retval;
+ int rc = -EINVAL;
+ int i;
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
+ dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
+ return -EINVAL;
+ }
+
mutex_lock(&q->vb_lock);
- retval = CALL(q, mmap_mapper, q, vma);
- q->is_mmapped = 1;
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ struct videobuf_buffer *buf = q->bufs[i];
+
+ if (buf && buf->memory == V4L2_MEMORY_MMAP &&
+ buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
+ rc = CALL(q, mmap_mapper, q, buf, vma);
+ break;
+ }
+ }
mutex_unlock(&q->vb_lock);
- return retval;
+ return rc;
}
+EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
#ifdef CONFIG_VIDEO_V4L1_COMPAT
int videobuf_cgmbuf(struct videobuf_queue *q,
@@ -1107,33 +1195,3 @@ int videobuf_cgmbuf(struct videobuf_queue *q,
EXPORT_SYMBOL_GPL(videobuf_cgmbuf);
#endif
-/* --------------------------------------------------------------------- */
-
-EXPORT_SYMBOL_GPL(videobuf_waiton);
-EXPORT_SYMBOL_GPL(videobuf_iolock);
-
-EXPORT_SYMBOL_GPL(videobuf_alloc);
-
-EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
-EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
-EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
-
-EXPORT_SYMBOL_GPL(videobuf_next_field);
-EXPORT_SYMBOL_GPL(videobuf_reqbufs);
-EXPORT_SYMBOL_GPL(videobuf_querybuf);
-EXPORT_SYMBOL_GPL(videobuf_qbuf);
-EXPORT_SYMBOL_GPL(videobuf_dqbuf);
-EXPORT_SYMBOL_GPL(videobuf_streamon);
-EXPORT_SYMBOL_GPL(videobuf_streamoff);
-
-EXPORT_SYMBOL_GPL(videobuf_read_start);
-EXPORT_SYMBOL_GPL(videobuf_read_stop);
-EXPORT_SYMBOL_GPL(videobuf_stop);
-EXPORT_SYMBOL_GPL(videobuf_read_stream);
-EXPORT_SYMBOL_GPL(videobuf_read_one);
-EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-
-EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
-EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
-EXPORT_SYMBOL_GPL(videobuf_mmap_free);
-EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index dce4f3aa4af1..74730c624cfc 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -55,14 +55,14 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_queue *q = map->q;
int i;
- dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
- dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
+ dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
mutex_lock(&q->vb_lock);
/* We need first to cancel streams, before unmapping */
@@ -89,7 +89,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
- dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
+ dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
dma_free_coherent(q->dev, mem->size,
@@ -190,7 +190,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
return ret;
}
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
@@ -204,7 +204,7 @@ static void *__videobuf_alloc(size_t size)
return vb;
}
-static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -263,65 +263,34 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return 0;
}
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- unsigned int i;
-
- dev_dbg(q->dev, "%s\n", __func__);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i] && q->bufs[i]->map)
- return -EBUSY;
- }
-
- return 0;
-}
-
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
- unsigned int first;
int retval;
- unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size;
dev_dbg(q->dev, "%s\n", __func__);
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (!q->bufs[first])
- continue;
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == offset)
- break;
- }
- if (VIDEO_MAX_FRAME == first) {
- dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
- offset);
- return -EINVAL;
- }
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (!map)
return -ENOMEM;
- q->bufs[first]->map = map;
+ buf->map = map;
map->start = vma->vm_start;
map->end = vma->vm_end;
map->q = q;
- q->bufs[first]->baddr = vma->vm_start;
+ buf->baddr = vma->vm_start;
- mem = q->bufs[first]->priv;
+ mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
+ mem->size = PAGE_ALIGN(buf->bsize);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
@@ -354,8 +323,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int) q->bufs[first]->bsize,
- vma->vm_pgoff, first);
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -366,69 +335,13 @@ error:
return -ENOMEM;
}
-static int __videobuf_copy_to_user(struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking)
-{
- struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
- void *vaddr;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- BUG_ON(!mem->vaddr);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- vaddr = mem->vaddr;
-
- if (copy_to_user(data, vaddr + q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream(struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking)
-{
- unsigned int *fc;
- struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
-
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int *)mem->vaddr;
- fc += (q->read_buf->size >> 2) - 1;
- *fc = q->read_buf->field_count >> 1;
- dev_dbg(q->dev, "vbihack: %d\n", *fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user(q, data, count, nonblocking);
-
- if ((count == -EFAULT) && (pos == 0))
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = __videobuf_to_vmalloc,
+ .vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index fcd045e7a1c1..8359e6badd36 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -37,8 +37,12 @@
#define MAGIC_DMABUF 0x19721112
#define MAGIC_SG_MEM 0x17890714
-#define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \
- { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
static int debug;
module_param(debug, int, 0644);
@@ -47,13 +51,13 @@ MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
/* --------------------------------------------------------------------- */
-struct scatterlist*
-videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
+struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
struct scatterlist *sglist;
struct page *pg;
@@ -73,13 +77,14 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
}
return sglist;
- err:
+err:
vfree(sglist);
return NULL;
}
+EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
-struct scatterlist*
-videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
+struct scatterlist *videobuf_pages_to_sg(struct page **pages, int nr_pages,
+ int offset)
{
struct scatterlist *sglist;
int i;
@@ -104,20 +109,20 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
}
return sglist;
- nopage:
- dprintk(2,"sgl: oops - no page\n");
+nopage:
+ dprintk(2, "sgl: oops - no page\n");
vfree(sglist);
return NULL;
- highmem:
- dprintk(2,"sgl: oops - highmem page\n");
+highmem:
+ dprintk(2, "sgl: oops - highmem page\n");
vfree(sglist);
return NULL;
}
/* --------------------------------------------------------------------- */
-struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
+struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
@@ -126,17 +131,19 @@ struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
return &mem->dma;
}
+EXPORT_SYMBOL_GPL(videobuf_to_dma);
void videobuf_dma_init(struct videobuf_dmabuf *dma)
{
- memset(dma,0,sizeof(*dma));
+ memset(dma, 0, sizeof(*dma));
dma->magic = MAGIC_DMABUF;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init);
static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
int direction, unsigned long data, unsigned long size)
{
- unsigned long first,last;
+ unsigned long first, last;
int err, rw = 0;
dma->direction = direction;
@@ -155,21 +162,21 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
dma->offset = data & ~PAGE_MASK;
dma->nr_pages = last-first+1;
- dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*),
- GFP_KERNEL);
+ dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
if (NULL == dma->pages)
return -ENOMEM;
- dprintk(1,"init user [0x%lx+0x%lx => %d pages]\n",
- data,size,dma->nr_pages);
- err = get_user_pages(current,current->mm,
+ dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
+ data, size, dma->nr_pages);
+
+ err = get_user_pages(current, current->mm,
data & PAGE_MASK, dma->nr_pages,
rw == READ, 1, /* force */
dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1,"get_user_pages: err=%d [%d]\n",err,dma->nr_pages);
+ dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
@@ -179,48 +186,58 @@ int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
unsigned long data, unsigned long size)
{
int ret;
+
down_read(&current->mm->mmap_sem);
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
up_read(&current->mm->mmap_sem);
return ret;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
int nr_pages)
{
- dprintk(1,"init kernel [%d pages]\n",nr_pages);
+ dprintk(1, "init kernel [%d pages]\n", nr_pages);
+
dma->direction = direction;
dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == dma->vmalloc) {
- dprintk(1,"vmalloc_32(%d pages) failed\n",nr_pages);
+ dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
return -ENOMEM;
}
- dprintk(1,"vmalloc is at addr 0x%08lx, size=%d\n",
+
+ dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
(unsigned long)dma->vmalloc,
nr_pages << PAGE_SHIFT);
- memset(dma->vmalloc,0,nr_pages << PAGE_SHIFT);
+
+ memset(dma->vmalloc, 0, nr_pages << PAGE_SHIFT);
dma->nr_pages = nr_pages;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
dma_addr_t addr, int nr_pages)
{
- dprintk(1,"init overlay [%d pages @ bus 0x%lx]\n",
- nr_pages,(unsigned long)addr);
+ dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
+ nr_pages, (unsigned long)addr);
dma->direction = direction;
+
if (0 == addr)
return -EINVAL;
dma->bus_addr = addr;
dma->nr_pages = nr_pages;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
+int videobuf_dma_map(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
{
- MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(0 == dma->nr_pages);
if (dma->pages) {
@@ -228,20 +245,21 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
dma->offset);
}
if (dma->vmalloc) {
- dma->sglist = videobuf_vmalloc_to_sg
- (dma->vmalloc,dma->nr_pages);
+ dma->sglist = videobuf_vmalloc_to_sg(dma->vmalloc,
+ dma->nr_pages);
}
if (dma->bus_addr) {
dma->sglist = vmalloc(sizeof(*dma->sglist));
if (NULL != dma->sglist) {
- dma->sglen = 1;
- sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
- dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
- sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
+ dma->sglen = 1;
+ sg_dma_address(&dma->sglist[0]) = dma->bus_addr
+ & PAGE_MASK;
+ dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
+ sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
}
}
if (NULL == dma->sglist) {
- dprintk(1,"scatterlist is NULL\n");
+ dprintk(1, "scatterlist is NULL\n");
return -ENOMEM;
}
if (!dma->bus_addr) {
@@ -249,47 +267,43 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
dma->nr_pages, dma->direction);
if (0 == dma->sglen) {
printk(KERN_WARNING
- "%s: videobuf_map_sg failed\n",__func__);
+ "%s: videobuf_map_sg failed\n", __func__);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
return -ENOMEM;
}
}
- return 0;
-}
-
-int videobuf_dma_sync(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
-{
- MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
- BUG_ON(!dma->sglen);
- dma_sync_sg_for_cpu(q->dev, dma->sglist, dma->nr_pages, dma->direction);
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_map);
-int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
+int videobuf_dma_unmap(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
{
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+
if (!dma->sglen)
return 0;
- dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction);
+ dma_unmap_sg(q->dev, dma->sglist, dma->sglen, dma->direction);
vfree(dma->sglist);
dma->sglist = NULL;
dma->sglen = 0;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
int videobuf_dma_free(struct videobuf_dmabuf *dma)
{
- MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
+ int i;
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
BUG_ON(dma->sglen);
if (dma->pages) {
- int i;
- for (i=0; i < dma->nr_pages; i++)
+ for (i = 0; i < dma->nr_pages; i++)
page_cache_release(dma->pages[i]);
kfree(dma->pages);
dma->pages = NULL;
@@ -298,12 +312,13 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
vfree(dma->vmalloc);
dma->vmalloc = NULL;
- if (dma->bus_addr) {
+ if (dma->bus_addr)
dma->bus_addr = 0;
- }
dma->direction = DMA_NONE;
+
return 0;
}
+EXPORT_SYMBOL_GPL(videobuf_dma_free);
/* --------------------------------------------------------------------- */
@@ -315,6 +330,7 @@ int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
return videobuf_dma_map(&q, dma);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
{
@@ -324,49 +340,48 @@ int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
return videobuf_dma_unmap(&q, dma);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
/* --------------------------------------------------------------------- */
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
map->count++;
}
-static void
-videobuf_vm_close(struct vm_area_struct *vma)
+static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
struct videobuf_dma_sg_memory *mem;
int i;
- dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
- dprintk(1,"munmap %p q=%p\n",map,q);
+ dprintk(1, "munmap %p q=%p\n", map, q);
mutex_lock(&q->vb_lock);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
- mem=q->bufs[i]->priv;
-
+ mem = q->bufs[i]->priv;
if (!mem)
continue;
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
if (q->bufs[i]->map != map)
continue;
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
- q->ops->buf_release(q,q->bufs[i]);
+ q->ops->buf_release(q, q->bufs[i]);
}
mutex_unlock(&q->vb_lock);
kfree(map);
@@ -380,26 +395,27 @@ videobuf_vm_close(struct vm_area_struct *vma)
* now ...). Bounce buffers don't work very well for the data rates
* video capture has.
*/
-static int
-videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
- dprintk(3,"fault: fault @ %08lx [vma %08lx-%08lx]\n",
- (unsigned long)vmf->virtual_address,vma->vm_start,vma->vm_end);
+ dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
+ (unsigned long)vmf->virtual_address,
+ vma->vm_start, vma->vm_end);
+
page = alloc_page(GFP_USER | __GFP_DMA32);
if (!page)
return VM_FAULT_OOM;
clear_user_highpage(page, (unsigned long)vmf->virtual_address);
vmf->page = page;
+
return 0;
}
-static const struct vm_operations_struct videobuf_vm_ops =
-{
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
- .fault = videobuf_vm_fault,
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+ .fault = videobuf_vm_fault,
};
/* ---------------------------------------------------------------------
@@ -412,28 +428,28 @@ static const struct vm_operations_struct videobuf_vm_ops =
struct videobuf_dma_sg_memory
*/
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_sg_memory *mem;
struct videobuf_buffer *vb;
- vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
- mem = vb->priv = ((char *)vb)+size;
- mem->magic=MAGIC_SG_MEM;
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_SG_MEM;
videobuf_dma_init(&mem->dma);
- dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
- mem,(long)sizeof(*mem));
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
return vb;
}
-static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
BUG_ON(!mem);
@@ -443,11 +459,11 @@ static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
return mem->dma.vmalloc;
}
-static int __videobuf_iolock (struct videobuf_queue* q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
{
- int err,pages;
+ int err, pages;
dma_addr_t bus;
struct videobuf_dma_sg_memory *mem = vb->priv;
BUG_ON(!mem);
@@ -460,16 +476,16 @@ static int __videobuf_iolock (struct videobuf_queue* q,
if (0 == vb->baddr) {
/* no userspace addr -- kernel bounce buffer */
pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
- err = videobuf_dma_init_kernel( &mem->dma,
- DMA_FROM_DEVICE,
- pages );
+ err = videobuf_dma_init_kernel(&mem->dma,
+ DMA_FROM_DEVICE,
+ pages);
if (0 != err)
return err;
} else if (vb->memory == V4L2_MEMORY_USERPTR) {
/* dma directly to userspace */
- err = videobuf_dma_init_user( &mem->dma,
- DMA_FROM_DEVICE,
- vb->baddr,vb->bsize );
+ err = videobuf_dma_init_user(&mem->dma,
+ DMA_FROM_DEVICE,
+ vb->baddr, vb->bsize);
if (0 != err)
return err;
} else {
@@ -515,43 +531,27 @@ static int __videobuf_sync(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_sg_memory *mem = buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
+ BUG_ON(!mem || !mem->dma.sglen);
- return videobuf_dma_sync(q,&mem->dma);
-}
-
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- int i;
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i]) {
- if (q->bufs[i]->map)
- return -EBUSY;
- }
- }
+ dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
+ mem->dma.sglen, mem->dma.direction);
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
{
- struct videobuf_dma_sg_memory *mem;
+ struct videobuf_dma_sg_memory *mem = buf->priv;
struct videobuf_mapping *map;
- unsigned int first,last,size,i;
+ unsigned int first, last, size = 0, i;
int retval;
retval = -EINVAL;
- if (!(vma->vm_flags & VM_WRITE)) {
- dprintk(1,"mmap app bug: PROT_WRITE please\n");
- goto done;
- }
- if (!(vma->vm_flags & VM_SHARED)) {
- dprintk(1,"mmap app bug: MAP_SHARED please\n");
- goto done;
- }
/* This function maintains backwards compatibility with V4L1 and will
* map more than one buffer if the vma length is equal to the combined
@@ -561,48 +561,52 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
* TODO: Allow drivers to specify if they support this mode
*/
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
/* look for first buffer to map */
for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (NULL == q->bufs[first])
- continue;
- mem=q->bufs[first]->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
+ if (buf == q->bufs[first]) {
+ size = PAGE_ALIGN(q->bufs[first]->bsize);
break;
+ }
}
- if (VIDEO_MAX_FRAME == first) {
- dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
+
+ /* paranoia, should never happen since buf is always valid. */
+ if (!size) {
+ dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
goto done;
}
- /* look for last buffer to map */
- for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
- if (NULL == q->bufs[last])
- continue;
- if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
- continue;
- if (q->bufs[last]->map) {
- retval = -EBUSY;
+ last = first;
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ if (size != (vma->vm_end - vma->vm_start)) {
+ /* look for last buffer to map */
+ for (last = first + 1; last < VIDEO_MAX_FRAME; last++) {
+ if (NULL == q->bufs[last])
+ continue;
+ if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
+ continue;
+ if (q->bufs[last]->map) {
+ retval = -EBUSY;
+ goto done;
+ }
+ size += PAGE_ALIGN(q->bufs[last]->bsize);
+ if (size == (vma->vm_end - vma->vm_start))
+ break;
+ }
+ if (VIDEO_MAX_FRAME == last) {
+ dprintk(1, "mmap app bug: size invalid [size=0x%lx]\n",
+ (vma->vm_end - vma->vm_start));
goto done;
}
- size += PAGE_ALIGN(q->bufs[last]->bsize);
- if (size == (vma->vm_end - vma->vm_start))
- break;
- }
- if (VIDEO_MAX_FRAME == last) {
- dprintk(1,"mmap app bug: size invalid [size=0x%lx]\n",
- (vma->vm_end - vma->vm_start));
- goto done;
}
+#endif
/* create mapping + update buffer list */
retval = -ENOMEM;
- map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL);
+ map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
goto done;
@@ -623,72 +627,22 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
vma->vm_private_data = map;
- dprintk(1,"mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
- map,q,vma->vm_start,vma->vm_end,vma->vm_pgoff,first,last);
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
+ map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
retval = 0;
- done:
+done:
return retval;
}
-static int __videobuf_copy_to_user ( struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking )
-{
- struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- if (copy_to_user(data, mem->dma.vmalloc+q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream ( struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking )
-{
- unsigned int *fc;
- struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
- BUG_ON(!mem);
- MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int*)mem->dma.vmalloc;
- fc += (q->read_buf->size>>2) -1;
- *fc = q->read_buf->field_count >> 1;
- dprintk(1,"vbihack: %d\n",*fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user (q,data,count,nonblocking);
-
- if ( (count==-EFAULT) && (0 == pos) )
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops sg_ops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
.sync = __videobuf_sync,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = __videobuf_to_vmalloc,
+ .vaddr = __videobuf_to_vaddr,
};
void *videobuf_sg_alloc(size_t size)
@@ -702,8 +656,9 @@ void *videobuf_sg_alloc(size_t size)
return videobuf_alloc(&q);
}
+EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-void videobuf_queue_sg_init(struct videobuf_queue* q,
+void videobuf_queue_sg_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -715,29 +670,5 @@ void videobuf_queue_sg_init(struct videobuf_queue* q,
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &sg_ops);
}
-
-/* --------------------------------------------------------------------- */
-
-EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
-
-EXPORT_SYMBOL_GPL(videobuf_to_dma);
-EXPORT_SYMBOL_GPL(videobuf_dma_init);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
-EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
-EXPORT_SYMBOL_GPL(videobuf_dma_map);
-EXPORT_SYMBOL_GPL(videobuf_dma_sync);
-EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
-EXPORT_SYMBOL_GPL(videobuf_dma_free);
-
-EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
-EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
-EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 0afb62e63d99..3f76398968b8 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -67,7 +67,7 @@ static int videobuf_dvb_thread(void *data)
try_to_freeze();
/* feed buffer data to demux */
- outp = videobuf_queue_to_vmalloc (&dvb->dvbq, buf);
+ outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
if (buf->state == VIDEOBUF_DONE)
dvb_dmx_swfilter(&dvb->demux, outp,
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 136e09383c06..583728f4c221 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -30,8 +30,12 @@
#define MAGIC_DMABUF 0x17760309
#define MAGIC_VMAL_MEM 0x18221223
-#define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \
- { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
static int debug;
module_param(debug, int, 0644);
@@ -40,19 +44,19 @@ MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
-#define dprintk(level, fmt, arg...) if (debug >= level) \
- printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
/***************************************************************************/
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- dprintk(2,"vm_open %p [count=%u,vma=%08lx-%08lx]\n",map,
- map->count,vma->vm_start,vma->vm_end);
+ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
map->count++;
}
@@ -63,7 +67,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
struct videobuf_queue *q = map->q;
int i;
- dprintk(2,"vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
+ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
map->count--;
@@ -116,8 +120,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
return;
}
-static const struct vm_operations_struct videobuf_vm_ops =
-{
+static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
@@ -132,28 +135,28 @@ static const struct vm_operations_struct videobuf_vm_ops =
struct videobuf_dma_sg_memory
*/
-static void *__videobuf_alloc(size_t size)
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_buffer *vb;
- vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (!vb)
return vb;
- mem = vb->priv = ((char *)vb)+size;
- mem->magic=MAGIC_VMAL_MEM;
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_VMAL_MEM;
- dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
- __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
- mem,(long)sizeof(*mem));
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
return vb;
}
-static int __videobuf_iolock (struct videobuf_queue* q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf)
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
{
struct videobuf_vmalloc_memory *mem = vb->priv;
int pages;
@@ -177,15 +180,13 @@ static int __videobuf_iolock (struct videobuf_queue* q,
dprintk(1, "%s memory method USERPTR\n", __func__);
-#if 1
if (vb->baddr) {
printk(KERN_ERR "USERPTR is currently not supported\n");
return -EINVAL;
}
-#endif
/* The only USERPTR currently supported is the one needed for
- read() method.
+ * read() method.
*/
mem->vmalloc = vmalloc_user(pages);
@@ -210,7 +211,7 @@ static int __videobuf_iolock (struct videobuf_queue* q,
/* Try to remap memory */
rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
if (rc < 0) {
- printk(KERN_ERR "mmap: remap failed with error %d. ", rc);
+ printk(KERN_ERR "mmap: remap failed with error %d", rc);
return -ENOMEM;
}
#endif
@@ -228,69 +229,29 @@ static int __videobuf_iolock (struct videobuf_queue* q,
return 0;
}
-static int __videobuf_sync(struct videobuf_queue *q,
- struct videobuf_buffer *buf)
-{
- return 0;
-}
-
-static int __videobuf_mmap_free(struct videobuf_queue *q)
-{
- unsigned int i;
-
- dprintk(1, "%s\n", __func__);
- for (i = 0; i < VIDEO_MAX_FRAME; i++) {
- if (q->bufs[i]) {
- if (q->bufs[i]->map)
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma)
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
{
struct videobuf_vmalloc_memory *mem;
struct videobuf_mapping *map;
- unsigned int first;
int retval, pages;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
dprintk(1, "%s\n", __func__);
- if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- /* look for first buffer to map */
- for (first = 0; first < VIDEO_MAX_FRAME; first++) {
- if (NULL == q->bufs[first])
- continue;
-
- if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
- continue;
- if (q->bufs[first]->boff == offset)
- break;
- }
- if (VIDEO_MAX_FRAME == first) {
- dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
- (vma->vm_pgoff << PAGE_SHIFT));
- return -EINVAL;
- }
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (NULL == map)
return -ENOMEM;
- q->bufs[first]->map = map;
+ buf->map = map;
map->start = vma->vm_start;
map->end = vma->vm_end;
map->q = q;
- q->bufs[first]->baddr = vma->vm_start;
+ buf->baddr = vma->vm_start;
- mem = q->bufs[first]->priv;
+ mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
@@ -300,8 +261,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
goto error;
}
- dprintk(1, "vmalloc is at addr %p (%d pages)\n",
- mem->vmalloc, pages);
+ dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vmalloc, pages);
/* Try to remap memory */
retval = remap_vmalloc_range(vma, mem->vmalloc, 0);
@@ -315,10 +275,10 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_private_data = map;
- dprintk(1,"mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int) q->bufs[first]->bsize,
- vma->vm_pgoff, first);
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -330,69 +290,16 @@ error:
return -ENOMEM;
}
-static int __videobuf_copy_to_user ( struct videobuf_queue *q,
- char __user *data, size_t count,
- int nonblocking )
-{
- struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
-
- BUG_ON (!mem->vmalloc);
-
- /* copy to userspace */
- if (count > q->read_buf->size - q->read_off)
- count = q->read_buf->size - q->read_off;
-
- if (copy_to_user(data, mem->vmalloc+q->read_off, count))
- return -EFAULT;
-
- return count;
-}
-
-static int __videobuf_copy_stream ( struct videobuf_queue *q,
- char __user *data, size_t count, size_t pos,
- int vbihack, int nonblocking )
-{
- unsigned int *fc;
- struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
-
- if (vbihack) {
- /* dirty, undocumented hack -- pass the frame counter
- * within the last four bytes of each vbi data block.
- * We need that one to maintain backward compatibility
- * to all vbi decoding software out there ... */
- fc = (unsigned int*)mem->vmalloc;
- fc += (q->read_buf->size>>2) -1;
- *fc = q->read_buf->field_count >> 1;
- dprintk(1,"vbihack: %d\n",*fc);
- }
-
- /* copy stuff using the common method */
- count = __videobuf_copy_to_user (q,data,count,nonblocking);
-
- if ( (count==-EFAULT) && (0 == pos) )
- return -EFAULT;
-
- return count;
-}
-
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc = __videobuf_alloc,
.iolock = __videobuf_iolock,
- .sync = __videobuf_sync,
- .mmap_free = __videobuf_mmap_free,
.mmap_mapper = __videobuf_mmap_mapper,
- .video_copy_to_user = __videobuf_copy_to_user,
- .copy_stream = __videobuf_copy_stream,
- .vmalloc = videobuf_to_vmalloc,
+ .vaddr = videobuf_to_vmalloc,
};
-void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
+void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
@@ -404,20 +311,19 @@ void videobuf_queue_vmalloc_init(struct videobuf_queue* q,
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops);
}
-
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
-void *videobuf_to_vmalloc (struct videobuf_buffer *buf)
+void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
{
- struct videobuf_vmalloc_memory *mem=buf->priv;
- BUG_ON (!mem);
- MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
+ struct videobuf_vmalloc_memory *mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
return mem->vmalloc;
}
EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
-void videobuf_vmalloc_free (struct videobuf_buffer *buf)
+void videobuf_vmalloc_free(struct videobuf_buffer *buf)
{
struct videobuf_vmalloc_memory *mem = buf->priv;
@@ -442,8 +348,3 @@ void videobuf_vmalloc_free (struct videobuf_buffer *buf)
}
EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index cdbe70385c12..e17b6fee046b 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -13,29 +13,23 @@
* License, or (at your option) any later version
*/
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/font.h>
#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
#include <linux/kthread.h>
-#include <linux/highmem.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#include <linux/freezer.h>
+#endif
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include "font.h"
+#include <media/v4l2-common.h>
#define VIVI_MODULE_NAME "vivi"
@@ -44,8 +38,11 @@
#define WAKE_DENOMINATOR 1001
#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
#define VIVI_MAJOR_VERSION 0
-#define VIVI_MINOR_VERSION 6
+#define VIVI_MINOR_VERSION 7
#define VIVI_RELEASE 0
#define VIVI_VERSION \
KERNEL_VERSION(VIVI_MAJOR_VERSION, VIVI_MINOR_VERSION, VIVI_RELEASE)
@@ -70,56 +67,8 @@ static unsigned int vid_limit = 16;
module_param(vid_limit, uint, 0644);
MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
-
-/* supported controls */
-static struct v4l2_queryctrl vivi_qctrl[] = {
- {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535/100,
- .default_value = 65535,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- .type = V4L2_CTRL_TYPE_INTEGER,
- }, {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 0x10,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 127,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }
-};
+/* Global font descriptor */
+static const u8 *font8x16;
#define dprintk(dev, level, fmt, arg...) \
v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
@@ -214,41 +163,38 @@ struct vivi_dev {
struct list_head vivi_devlist;
struct v4l2_device v4l2_dev;
+ /* controls */
+ int brightness;
+ int contrast;
+ int saturation;
+ int hue;
+ int volume;
+
spinlock_t slock;
struct mutex mutex;
- int users;
-
/* various device info */
struct video_device *vfd;
struct vivi_dmaqueue vidq;
/* Several counters */
- int h, m, s, ms;
+ unsigned ms;
unsigned long jiffies;
- char timestr[13];
int mv_count; /* Controls bars movement */
/* Input Number */
int input;
- /* Control 'registers' */
- int qctl_regs[ARRAY_SIZE(vivi_qctrl)];
-};
-
-struct vivi_fh {
- struct vivi_dev *dev;
-
/* video capture */
struct vivi_fmt *fmt;
unsigned int width, height;
struct videobuf_queue vb_vidq;
- enum v4l2_buf_type type;
- unsigned char bars[8][3];
- int input; /* Input Number on bars */
+ unsigned long generating;
+ u8 bars[9][3];
+ u8 line[MAX_WIDTH * 4];
};
/* ------------------------------------------------------------------
@@ -259,19 +205,20 @@ struct vivi_fh {
enum colors {
WHITE,
- AMBAR,
+ AMBER,
CYAN,
GREEN,
MAGENTA,
RED,
BLUE,
BLACK,
+ TEXT_BLACK,
};
- /* R G B */
+/* R G B */
#define COLOR_WHITE {204, 204, 204}
-#define COLOR_AMBAR {208, 208, 0}
-#define COLOR_CIAN { 0, 206, 206}
+#define COLOR_AMBER {208, 208, 0}
+#define COLOR_CYAN { 0, 206, 206}
#define COLOR_GREEN { 0, 239, 0}
#define COLOR_MAGENTA {239, 0, 239}
#define COLOR_RED {205, 0, 0}
@@ -279,56 +226,24 @@ enum colors {
#define COLOR_BLACK { 0, 0, 0}
struct bar_std {
- u8 bar[8][3];
+ u8 bar[9][3];
};
/* Maximum number of bars are 10 - otherwise, the input print code
should be modified */
static struct bar_std bars[] = {
{ /* Standard ITU-R color bar sequence */
- {
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_CIAN,
- COLOR_GREEN,
- COLOR_MAGENTA,
- COLOR_RED,
- COLOR_BLUE,
- COLOR_BLACK,
- }
+ { COLOR_WHITE, COLOR_AMBER, COLOR_CYAN, COLOR_GREEN,
+ COLOR_MAGENTA, COLOR_RED, COLOR_BLUE, COLOR_BLACK, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_AMBAR,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_AMBAR,
- }
+ { COLOR_WHITE, COLOR_AMBER, COLOR_BLACK, COLOR_WHITE,
+ COLOR_AMBER, COLOR_BLACK, COLOR_WHITE, COLOR_AMBER, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_CIAN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_CIAN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_CIAN,
- }
+ { COLOR_WHITE, COLOR_CYAN, COLOR_BLACK, COLOR_WHITE,
+ COLOR_CYAN, COLOR_BLACK, COLOR_WHITE, COLOR_CYAN, COLOR_BLACK }
}, {
- {
- COLOR_WHITE,
- COLOR_GREEN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_GREEN,
- COLOR_BLACK,
- COLOR_WHITE,
- COLOR_GREEN,
- }
+ { COLOR_WHITE, COLOR_GREEN, COLOR_BLACK, COLOR_WHITE,
+ COLOR_GREEN, COLOR_BLACK, COLOR_WHITE, COLOR_GREEN, COLOR_BLACK }
},
};
@@ -344,21 +259,18 @@ static struct bar_std bars[] = {
(((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
/* precalculate color bar values to speed up rendering */
-static void precalculate_bars(struct vivi_fh *fh)
+static void precalculate_bars(struct vivi_dev *dev)
{
- struct vivi_dev *dev = fh->dev;
- unsigned char r, g, b;
+ u8 r, g, b;
int k, is_yuv;
- fh->input = dev->input;
-
- for (k = 0; k < 8; k++) {
- r = bars[fh->input].bar[k][0];
- g = bars[fh->input].bar[k][1];
- b = bars[fh->input].bar[k][2];
+ for (k = 0; k < 9; k++) {
+ r = bars[dev->input].bar[k][0];
+ g = bars[dev->input].bar[k][1];
+ b = bars[dev->input].bar[k][2];
is_yuv = 0;
- switch (fh->fmt->fourcc) {
+ switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
is_yuv = 1;
@@ -378,16 +290,15 @@ static void precalculate_bars(struct vivi_fh *fh)
}
if (is_yuv) {
- fh->bars[k][0] = TO_Y(r, g, b); /* Luma */
- fh->bars[k][1] = TO_U(r, g, b); /* Cb */
- fh->bars[k][2] = TO_V(r, g, b); /* Cr */
+ dev->bars[k][0] = TO_Y(r, g, b); /* Luma */
+ dev->bars[k][1] = TO_U(r, g, b); /* Cb */
+ dev->bars[k][2] = TO_V(r, g, b); /* Cr */
} else {
- fh->bars[k][0] = r;
- fh->bars[k][1] = g;
- fh->bars[k][2] = b;
+ dev->bars[k][0] = r;
+ dev->bars[k][1] = g;
+ dev->bars[k][2] = b;
}
}
-
}
#define TSTAMP_MIN_Y 24
@@ -395,20 +306,20 @@ static void precalculate_bars(struct vivi_fh *fh)
#define TSTAMP_INPUT_X 10
#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-static void gen_twopix(struct vivi_fh *fh, unsigned char *buf, int colorpos)
+static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
{
- unsigned char r_y, g_u, b_v;
- unsigned char *p;
+ u8 r_y, g_u, b_v;
int color;
+ u8 *p;
- r_y = fh->bars[colorpos][0]; /* R or precalculated Y */
- g_u = fh->bars[colorpos][1]; /* G or precalculated U */
- b_v = fh->bars[colorpos][2]; /* B or precalculated V */
+ r_y = dev->bars[colorpos][0]; /* R or precalculated Y */
+ g_u = dev->bars[colorpos][1]; /* G or precalculated U */
+ b_v = dev->bars[colorpos][2]; /* B or precalculated V */
for (color = 0; color < 4; color++) {
p = buf + color;
- switch (fh->fmt->fourcc) {
+ switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
switch (color) {
case 0:
@@ -489,123 +400,88 @@ static void gen_twopix(struct vivi_fh *fh, unsigned char *buf, int colorpos)
}
}
-static void gen_line(struct vivi_fh *fh, char *basep, int inipos, int wmax,
- int hmax, int line, int count, char *timestr)
+static void precalculate_line(struct vivi_dev *dev)
{
- int w, i, j;
- int pos = inipos;
- char *s;
- u8 chr;
-
- /* We will just duplicate the second pixel at the packet */
- wmax /= 2;
+ int w;
- /* Generate a standard color bar pattern */
- for (w = 0; w < wmax; w++) {
- int colorpos = ((w + count) * 8/(wmax + 1)) % 8;
+ for (w = 0; w < dev->width * 2; w += 2) {
+ int colorpos = (w / (dev->width / 8) % 8);
- gen_twopix(fh, basep + pos, colorpos);
- pos += 4; /* only 16 bpp supported for now */
+ gen_twopix(dev, dev->line + w * 2, colorpos);
}
+}
- /* Prints input entry number */
-
- /* Checks if it is possible to input number */
- if (TSTAMP_MAX_Y >= hmax)
- goto end;
-
- if (TSTAMP_INPUT_X + strlen(timestr) >= wmax)
- goto end;
-
- if (line >= TSTAMP_MIN_Y && line <= TSTAMP_MAX_Y) {
- chr = rom8x16_bits[fh->input * 16 + line - TSTAMP_MIN_Y];
- pos = TSTAMP_INPUT_X;
- for (i = 0; i < 7; i++) {
- /* Draw white font on black background */
- if (chr & 1 << (7 - i))
- gen_twopix(fh, basep + pos, WHITE);
- else
- gen_twopix(fh, basep + pos, BLACK);
- pos += 2;
- }
- }
+static void gen_text(struct vivi_dev *dev, char *basep,
+ int y, int x, char *text)
+{
+ int line;
- /* Checks if it is possible to show timestamp */
- if (TSTAMP_MIN_X + strlen(timestr) >= wmax)
- goto end;
+ /* Checks if it is possible to show string */
+ if (y + 16 >= dev->height || x + strlen(text) * 8 >= dev->width)
+ return;
/* Print stream time */
- if (line >= TSTAMP_MIN_Y && line <= TSTAMP_MAX_Y) {
- j = TSTAMP_MIN_X;
- for (s = timestr; *s; s++) {
- chr = rom8x16_bits[(*s-0x30)*16+line-TSTAMP_MIN_Y];
- for (i = 0; i < 7; i++) {
- pos = inipos + j * 2;
+ for (line = y; line < y + 16; line++) {
+ int j = 0;
+ char *pos = basep + line * dev->width * 2 + x * 2;
+ char *s;
+
+ for (s = text; *s; s++) {
+ u8 chr = font8x16[*s * 16 + line - y];
+ int i;
+
+ for (i = 0; i < 7; i++, j++) {
/* Draw white font on black background */
- if (chr & 1 << (7 - i))
- gen_twopix(fh, basep + pos, WHITE);
+ if (chr & (1 << (7 - i)))
+ gen_twopix(dev, pos + j * 2, WHITE);
else
- gen_twopix(fh, basep + pos, BLACK);
- j++;
+ gen_twopix(dev, pos + j * 2, TEXT_BLACK);
}
}
}
-
-end:
- return;
}
-static void vivi_fillbuff(struct vivi_fh *fh, struct vivi_buffer *buf)
+static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
{
- struct vivi_dev *dev = fh->dev;
- int h , pos = 0;
- int hmax = buf->vb.height;
- int wmax = buf->vb.width;
+ int hmax = buf->vb.height;
+ int wmax = buf->vb.width;
struct timeval ts;
- char *tmpbuf;
void *vbuf = videobuf_to_vmalloc(&buf->vb);
+ unsigned ms;
+ char str[100];
+ int h, line = 1;
if (!vbuf)
return;
- tmpbuf = kmalloc(wmax * 2, GFP_ATOMIC);
- if (!tmpbuf)
- return;
-
- for (h = 0; h < hmax; h++) {
- gen_line(fh, tmpbuf, 0, wmax, hmax, h, dev->mv_count,
- dev->timestr);
- memcpy(vbuf + pos, tmpbuf, wmax * 2);
- pos += wmax*2;
- }
-
- dev->mv_count++;
-
- kfree(tmpbuf);
+ for (h = 0; h < hmax; h++)
+ memcpy(vbuf + h * wmax * 2, dev->line + (dev->mv_count % wmax) * 2, wmax * 2);
/* Updates stream time */
- dev->ms += jiffies_to_msecs(jiffies-dev->jiffies);
+ dev->ms += jiffies_to_msecs(jiffies - dev->jiffies);
dev->jiffies = jiffies;
- if (dev->ms >= 1000) {
- dev->ms -= 1000;
- dev->s++;
- if (dev->s >= 60) {
- dev->s -= 60;
- dev->m++;
- if (dev->m > 60) {
- dev->m -= 60;
- dev->h++;
- if (dev->h > 24)
- dev->h -= 24;
- }
- }
- }
- sprintf(dev->timestr, "%02d:%02d:%02d:%03d",
- dev->h, dev->m, dev->s, dev->ms);
-
- dprintk(dev, 2, "vivifill at %s: Buffer 0x%08lx size= %d\n",
- dev->timestr, (unsigned long)tmpbuf, pos);
+ ms = dev->ms;
+ snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d ",
+ (ms / (60 * 60 * 1000)) % 24,
+ (ms / (60 * 1000)) % 60,
+ (ms / 1000) % 60,
+ ms % 1000);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " %dx%d, input %d ",
+ dev->width, dev->height, dev->input);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+
+ snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
+ dev->brightness,
+ dev->contrast,
+ dev->saturation,
+ dev->hue);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " volume %3d ", dev->volume);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+
+ dev->mv_count += 2;
/* Advice that buffer was filled */
buf->vb.field_count++;
@@ -614,12 +490,10 @@ static void vivi_fillbuff(struct vivi_fh *fh, struct vivi_buffer *buf)
buf->vb.state = VIDEOBUF_DONE;
}
-static void vivi_thread_tick(struct vivi_fh *fh)
+static void vivi_thread_tick(struct vivi_dev *dev)
{
- struct vivi_buffer *buf;
- struct vivi_dev *dev = fh->dev;
struct vivi_dmaqueue *dma_q = &dev->vidq;
-
+ struct vivi_buffer *buf;
unsigned long flags = 0;
dprintk(dev, 1, "Thread tick\n");
@@ -642,22 +516,20 @@ static void vivi_thread_tick(struct vivi_fh *fh)
do_gettimeofday(&buf->vb.ts);
/* Fill buffer */
- vivi_fillbuff(fh, buf);
+ vivi_fillbuff(dev, buf);
dprintk(dev, 1, "filled buffer %p\n", buf);
wake_up(&buf->vb.done);
dprintk(dev, 2, "[%p/%d] wakeup\n", buf, buf->vb. i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
- return;
}
#define frames_to_ms(frames) \
((frames * WAKE_NUMERATOR * 1000) / WAKE_DENOMINATOR)
-static void vivi_sleep(struct vivi_fh *fh)
+static void vivi_sleep(struct vivi_dev *dev)
{
- struct vivi_dev *dev = fh->dev;
struct vivi_dmaqueue *dma_q = &dev->vidq;
int timeout;
DECLARE_WAITQUEUE(wait, current);
@@ -672,7 +544,7 @@ static void vivi_sleep(struct vivi_fh *fh)
/* Calculate time to wake up */
timeout = msecs_to_jiffies(frames_to_ms(1));
- vivi_thread_tick(fh);
+ vivi_thread_tick(dev);
schedule_timeout_interruptible(timeout);
@@ -683,15 +555,14 @@ stop_task:
static int vivi_thread(void *data)
{
- struct vivi_fh *fh = data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = data;
dprintk(dev, 1, "thread started\n");
set_freezable();
for (;;) {
- vivi_sleep(fh);
+ vivi_sleep(dev);
if (kthread_should_stop())
break;
@@ -700,39 +571,61 @@ static int vivi_thread(void *data)
return 0;
}
-static int vivi_start_thread(struct vivi_fh *fh)
+static void vivi_start_generating(struct file *file)
{
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
struct vivi_dmaqueue *dma_q = &dev->vidq;
- dma_q->frame = 0;
- dma_q->ini_jiffies = jiffies;
-
dprintk(dev, 1, "%s\n", __func__);
- dma_q->kthread = kthread_run(vivi_thread, fh, "vivi");
+ if (test_and_set_bit(0, &dev->generating))
+ return;
+ file->private_data = dev;
+
+ /* Resets frame counters */
+ dev->ms = 0;
+ dev->mv_count = 0;
+ dev->jiffies = jiffies;
+
+ dma_q->frame = 0;
+ dma_q->ini_jiffies = jiffies;
+ dma_q->kthread = kthread_run(vivi_thread, dev, dev->v4l2_dev.name);
if (IS_ERR(dma_q->kthread)) {
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dma_q->kthread);
+ clear_bit(0, &dev->generating);
+ return;
}
/* Wakes thread */
wake_up_interruptible(&dma_q->wq);
dprintk(dev, 1, "returning from %s\n", __func__);
- return 0;
}
-static void vivi_stop_thread(struct vivi_dmaqueue *dma_q)
+static void vivi_stop_generating(struct file *file)
{
- struct vivi_dev *dev = container_of(dma_q, struct vivi_dev, vidq);
+ struct vivi_dev *dev = video_drvdata(file);
+ struct vivi_dmaqueue *dma_q = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
+
+ if (!file->private_data)
+ return;
+ if (!test_and_clear_bit(0, &dev->generating))
+ return;
+
/* shutdown control thread */
if (dma_q->kthread) {
kthread_stop(dma_q->kthread);
dma_q->kthread = NULL;
}
+ videobuf_stop(&dev->vb_vidq);
+ videobuf_mmap_free(&dev->vb_vidq);
+}
+
+static int vivi_is_generating(struct vivi_dev *dev)
+{
+ return test_bit(0, &dev->generating);
}
/* ------------------------------------------------------------------
@@ -741,10 +634,9 @@ static void vivi_stop_thread(struct vivi_dmaqueue *dma_q)
static int
buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
- *size = fh->width*fh->height*2;
+ *size = dev->width * dev->height * 2;
if (0 == *count)
*count = 32;
@@ -760,49 +652,43 @@ buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
static void free_buffer(struct videobuf_queue *vq, struct vivi_buffer *buf)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
dprintk(dev, 1, "%s, state: %i\n", __func__, buf->vb.state);
- if (in_interrupt())
- BUG();
-
videobuf_vmalloc_free(&buf->vb);
dprintk(dev, 1, "free_buffer: freed\n");
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
-#define norm_maxw() 1024
-#define norm_maxh() 768
static int
buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
int rc;
dprintk(dev, 1, "%s, field=%d\n", __func__, field);
- BUG_ON(NULL == fh->fmt);
+ BUG_ON(NULL == dev->fmt);
- if (fh->width < 48 || fh->width > norm_maxw() ||
- fh->height < 32 || fh->height > norm_maxh())
+ if (dev->width < 48 || dev->width > MAX_WIDTH ||
+ dev->height < 32 || dev->height > MAX_HEIGHT)
return -EINVAL;
- buf->vb.size = fh->width*fh->height*2;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ buf->vb.size = dev->width * dev->height * 2;
+ if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
/* These properties only change when queue is idle, see s_fmt */
- buf->fmt = fh->fmt;
- buf->vb.width = fh->width;
- buf->vb.height = fh->height;
+ buf->fmt = dev->fmt;
+ buf->vb.width = dev->width;
+ buf->vb.height = dev->height;
buf->vb.field = field;
- precalculate_bars(fh);
+ precalculate_bars(dev);
+ precalculate_line(dev);
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
@@ -811,7 +697,6 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
}
buf->vb.state = VIDEOBUF_PREPARED;
-
return 0;
fail:
@@ -822,9 +707,8 @@ fail:
static void
buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
+ struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
struct vivi_dmaqueue *vidq = &dev->vidq;
dprintk(dev, 1, "%s\n", __func__);
@@ -836,9 +720,8 @@ buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- struct vivi_fh *fh = vq->priv_data;
- struct vivi_dev *dev = (struct vivi_dev *)fh->dev;
+ struct vivi_dev *dev = vq->priv_data;
+ struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
dprintk(dev, 1, "%s\n", __func__);
@@ -858,16 +741,14 @@ static struct videobuf_queue_ops vivi_video_qops = {
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
strcpy(cap->driver, "vivi");
strcpy(cap->card, "vivi");
strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
cap->version = VIVI_VERSION;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \
+ V4L2_CAP_READWRITE;
return 0;
}
@@ -889,28 +770,25 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- f->fmt.pix.width = fh->width;
- f->fmt.pix.height = fh->height;
- f->fmt.pix.field = fh->vb_vidq.field;
- f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+ f->fmt.pix.field = dev->vb_vidq.field;
+ f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ (f->fmt.pix.width * dev->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
-
- return (0);
+ return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
struct vivi_fmt *fmt;
enum v4l2_field field;
- unsigned int maxw, maxh;
fmt = get_format(f);
if (!fmt) {
@@ -928,113 +806,109 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
- maxw = norm_maxw();
- maxh = norm_maxh();
-
f->fmt.pix.field = field;
- v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
- &f->fmt.pix.height, 32, maxh, 0, 0);
+ v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+ &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
-
return 0;
}
-/*FIXME: This seems to be generic enough to be at videodev2 */
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct vivi_fh *fh = priv;
- struct videobuf_queue *q = &fh->vb_vidq;
+ struct vivi_dev *dev = video_drvdata(file);
+ struct videobuf_queue *q = &dev->vb_vidq;
- int ret = vidioc_try_fmt_vid_cap(file, fh, f);
+ int ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret < 0)
return ret;
mutex_lock(&q->vb_lock);
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- dprintk(fh->dev, 1, "%s queue busy\n", __func__);
+ if (vivi_is_generating(dev)) {
+ dprintk(dev, 1, "%s device busy\n", __func__);
ret = -EBUSY;
goto out;
}
- fh->fmt = get_format(f);
- fh->width = f->fmt.pix.width;
- fh->height = f->fmt.pix.height;
- fh->vb_vidq.field = f->fmt.pix.field;
- fh->type = f->type;
-
+ dev->fmt = get_format(f);
+ dev->width = f->fmt.pix.width;
+ dev->height = f->fmt.pix.height;
+ dev->vb_vidq.field = f->fmt.pix.field;
ret = 0;
out:
mutex_unlock(&q->vb_lock);
-
return ret;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_reqbufs(&fh->vb_vidq, p));
+ return videobuf_reqbufs(&dev->vb_vidq, p);
}
static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_querybuf(&fh->vb_vidq, p));
+ return videobuf_querybuf(&dev->vb_vidq, p);
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_qbuf(&fh->vb_vidq, p));
+ return videobuf_qbuf(&dev->vb_vidq, p);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return (videobuf_dqbuf(&fh->vb_vidq, p,
- file->f_flags & O_NONBLOCK));
+ return videobuf_dqbuf(&dev->vb_vidq, p,
+ file->f_flags & O_NONBLOCK);
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
- return videobuf_cgmbuf(&fh->vb_vidq, mbuf, 8);
+ return videobuf_cgmbuf(&dev->vb_vidq, mbuf, 8);
}
#endif
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
+ int ret;
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (i != fh->type)
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ ret = videobuf_streamon(&dev->vb_vidq);
+ if (ret)
+ return ret;
- return videobuf_streamon(&fh->vb_vidq);
+ vivi_start_generating(file);
+ return 0;
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct vivi_fh *fh = priv;
+ struct vivi_dev *dev = video_drvdata(file);
+ int ret;
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (i != fh->type)
+ if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
-
- return videobuf_streamoff(&fh->vb_vidq);
+ ret = videobuf_streamoff(&dev->vb_vidq);
+ if (!ret)
+ vivi_stop_generating(file);
+ return ret;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
@@ -1052,80 +926,104 @@ static int vidioc_enum_input(struct file *file, void *priv,
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = V4L2_STD_525_60;
sprintf(inp->name, "Camera %u", inp->index);
-
- return (0);
+ return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
*i = dev->input;
-
- return (0);
+ return 0;
}
+
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
if (i >= NUM_INPUTS)
return -EINVAL;
dev->input = i;
- precalculate_bars(fh);
-
- return (0);
+ precalculate_bars(dev);
+ precalculate_line(dev);
+ return 0;
}
- /* --- controls ---------------------------------------------- */
+/* --- controls ---------------------------------------------- */
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (qc->id && qc->id == vivi_qctrl[i].id) {
- memcpy(qc, &(vivi_qctrl[i]),
- sizeof(*qc));
- return (0);
- }
-
+ switch (qc->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 200);
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 16);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 127);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
+ }
return -EINVAL;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (ctrl->id == vivi_qctrl[i].id) {
- ctrl->value = dev->qctl_regs[i];
- return 0;
- }
+ struct vivi_dev *dev = video_drvdata(file);
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ ctrl->value = dev->volume;
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = dev->brightness;
+ return 0;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = dev->contrast;
+ return 0;
+ case V4L2_CID_SATURATION:
+ ctrl->value = dev->saturation;
+ return 0;
+ case V4L2_CID_HUE:
+ ctrl->value = dev->hue;
+ return 0;
+ }
return -EINVAL;
}
+
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct vivi_fh *fh = priv;
- struct vivi_dev *dev = fh->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- if (ctrl->id == vivi_qctrl[i].id) {
- if (ctrl->value < vivi_qctrl[i].minimum ||
- ctrl->value > vivi_qctrl[i].maximum) {
- return -ERANGE;
- }
- dev->qctl_regs[i] = ctrl->value;
- return 0;
- }
+ struct vivi_dev *dev = video_drvdata(file);
+ struct v4l2_queryctrl qc;
+ int err;
+
+ qc.id = ctrl->id;
+ err = vidioc_queryctrl(file, priv, &qc);
+ if (err < 0)
+ return err;
+ if (ctrl->value < qc.minimum || ctrl->value > qc.maximum)
+ return -ERANGE;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ dev->volume = ctrl->value;
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ dev->brightness = ctrl->value;
+ return 0;
+ case V4L2_CID_CONTRAST:
+ dev->contrast = ctrl->value;
+ return 0;
+ case V4L2_CID_SATURATION:
+ dev->saturation = ctrl->value;
+ return 0;
+ case V4L2_CID_HUE:
+ dev->hue = ctrl->value;
+ return 0;
+ }
return -EINVAL;
}
@@ -1133,134 +1031,58 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
File operations for the device
------------------------------------------------------------------*/
-static int vivi_open(struct file *file)
-{
- struct vivi_dev *dev = video_drvdata(file);
- struct vivi_fh *fh = NULL;
- int retval = 0;
-
- mutex_lock(&dev->mutex);
- dev->users++;
-
- if (dev->users > 1) {
- dev->users--;
- mutex_unlock(&dev->mutex);
- return -EBUSY;
- }
-
- dprintk(dev, 1, "open %s type=%s users=%d\n",
- video_device_node_name(dev->vfd),
- v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- dev->users--;
- retval = -ENOMEM;
- }
- mutex_unlock(&dev->mutex);
-
- if (retval)
- return retval;
-
- file->private_data = fh;
- fh->dev = dev;
-
- fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fh->fmt = &formats[0];
- fh->width = 640;
- fh->height = 480;
-
- /* Resets frame counters */
- dev->h = 0;
- dev->m = 0;
- dev->s = 0;
- dev->ms = 0;
- dev->mv_count = 0;
- dev->jiffies = jiffies;
- sprintf(dev->timestr, "%02d:%02d:%02d:%03d",
- dev->h, dev->m, dev->s, dev->ms);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &vivi_video_qops,
- NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct vivi_buffer), fh);
-
- vivi_start_thread(fh);
-
- return 0;
-}
-
static ssize_t
vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
- struct vivi_fh *fh = file->private_data;
+ struct vivi_dev *dev = video_drvdata(file);
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- return videobuf_read_stream(&fh->vb_vidq, data, count, ppos, 0,
+ vivi_start_generating(file);
+ return videobuf_read_stream(&dev->vb_vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
- }
- return 0;
}
static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
- struct videobuf_queue *q = &fh->vb_vidq;
+ struct vivi_dev *dev = video_drvdata(file);
+ struct videobuf_queue *q = &dev->vb_vidq;
dprintk(dev, 1, "%s\n", __func__);
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
- return POLLERR;
-
+ vivi_start_generating(file);
return videobuf_poll_stream(file, q, wait);
}
static int vivi_close(struct file *file)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
- struct vivi_dmaqueue *vidq = &dev->vidq;
struct video_device *vdev = video_devdata(file);
+ struct vivi_dev *dev = video_drvdata(file);
- vivi_stop_thread(vidq);
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- kfree(fh);
-
- mutex_lock(&dev->mutex);
- dev->users--;
- mutex_unlock(&dev->mutex);
-
- dprintk(dev, 1, "close called (dev=%s, users=%d)\n",
- video_device_node_name(vdev), dev->users);
+ vivi_stop_generating(file);
+ dprintk(dev, 1, "close called (dev=%s)\n",
+ video_device_node_name(vdev));
return 0;
}
static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct vivi_fh *fh = file->private_data;
- struct vivi_dev *dev = fh->dev;
+ struct vivi_dev *dev = video_drvdata(file);
int ret;
dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
- ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
+ ret = videobuf_mmap_mapper(&dev->vb_vidq, vma);
dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
+ (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
ret);
-
return ret;
}
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
- .open = vivi_open,
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
@@ -1282,11 +1104,11 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
@@ -1330,7 +1152,7 @@ static int __init vivi_create_instance(int inst)
{
struct vivi_dev *dev;
struct video_device *vfd;
- int ret, i;
+ int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -1342,6 +1164,20 @@ static int __init vivi_create_instance(int inst)
if (ret)
goto free_dev;
+ dev->fmt = &formats[0];
+ dev->width = 640;
+ dev->height = 480;
+ dev->volume = 200;
+ dev->brightness = 127;
+ dev->contrast = 16;
+ dev->saturation = 127;
+ dev->hue = 0;
+
+ videobuf_queue_vmalloc_init(&dev->vb_vidq, &vivi_video_qops,
+ NULL, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct vivi_buffer), dev);
+
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
init_waitqueue_head(&dev->vidq.wq);
@@ -1357,6 +1193,7 @@ static int __init vivi_create_instance(int inst)
*vfd = vivi_template;
vfd->debug = debug;
+ vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
@@ -1364,10 +1201,6 @@ static int __init vivi_create_instance(int inst)
video_set_drvdata(vfd, dev);
- /* Set all controls to their default value. */
- for (i = 0; i < ARRAY_SIZE(vivi_qctrl); i++)
- dev->qctl_regs[i] = vivi_qctrl[i].default_value;
-
/* Now that everything is fine, let's add it to device list */
list_add_tail(&dev->vivi_devlist, &vivi_devlist);
@@ -1396,8 +1229,15 @@ free_dev:
*/
static int __init vivi_init(void)
{
+ const struct font_desc *font = find_font("VGA8x16");
int ret = 0, i;
+ if (font == NULL) {
+ printk(KERN_ERR "vivi: could not find font\n");
+ return -ENODEV;
+ }
+ font8x16 = font->data;
+
if (n_devs <= 0)
n_devs = 1;
@@ -1412,7 +1252,7 @@ static int __init vivi_init(void)
}
if (ret < 0) {
- printk(KERN_INFO "Error %d while loading vivi driver\n", ret);
+ printk(KERN_ERR "vivi: error %d while loading driver\n", ret);
return ret;
}
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index bf9bf650a317..635420d8d84a 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -1,7 +1,7 @@
/*
Winbond w9966cf Webcam parport driver.
- Version 0.32
+ Version 0.33
Copyright (C) 2001 Jakob Kemi <jakob.kemi@post.utfors.se>
@@ -57,10 +57,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/videodev.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
#include <linux/parport.h>
/*#define DEBUG*/ /* Undef me for production */
@@ -76,12 +78,12 @@
*/
#define W9966_DRIVERNAME "W9966CF Webcam"
-#define W9966_MAXCAMS 4 // Maximum number of cameras
-#define W9966_RBUFFER 2048 // Read buffer (must be an even number)
-#define W9966_SRAMSIZE 131072 // 128kb
-#define W9966_SRAMID 0x02 // check w9966cf.pdf
+#define W9966_MAXCAMS 4 /* Maximum number of cameras */
+#define W9966_RBUFFER 2048 /* Read buffer (must be an even number) */
+#define W9966_SRAMSIZE 131072 /* 128kb */
+#define W9966_SRAMID 0x02 /* check w9966cf.pdf */
-// Empirically determined window limits
+/* Empirically determined window limits */
#define W9966_WND_MIN_X 16
#define W9966_WND_MIN_Y 14
#define W9966_WND_MAX_X 705
@@ -89,7 +91,7 @@
#define W9966_WND_MAX_W (W9966_WND_MAX_X - W9966_WND_MIN_X)
#define W9966_WND_MAX_H (W9966_WND_MAX_Y - W9966_WND_MIN_Y)
-// Keep track of our current state
+/* Keep track of our current state */
#define W9966_STATE_PDEV 0x01
#define W9966_STATE_CLAIMED 0x02
#define W9966_STATE_VDEV 0x04
@@ -101,12 +103,13 @@
#define W9966_I2C_W_DATA 0x02
#define W9966_I2C_W_CLOCK 0x01
-struct w9966_dev {
+struct w9966 {
+ struct v4l2_device v4l2_dev;
unsigned char dev_state;
unsigned char i2c_state;
unsigned short ppmode;
- struct parport* pport;
- struct pardevice* pdev;
+ struct parport *pport;
+ struct pardevice *pdev;
struct video_device vdev;
unsigned short width;
unsigned short height;
@@ -114,7 +117,7 @@ struct w9966_dev {
signed char contrast;
signed char color;
signed char hue;
- unsigned long in_use;
+ struct mutex lock;
};
/*
@@ -127,15 +130,15 @@ MODULE_LICENSE("GPL");
#ifdef MODULE
-static const char* pardev[] = {[0 ... W9966_MAXCAMS] = ""};
+static const char *pardev[] = {[0 ... W9966_MAXCAMS] = ""};
#else
-static const char* pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"};
+static const char *pardev[] = {[0 ... W9966_MAXCAMS] = "aggressive"};
#endif
module_param_array(pardev, charp, NULL, 0);
-MODULE_PARM_DESC(pardev, "pardev: where to search for\n\
-\teach camera. 'aggressive' means brute-force search.\n\
-\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n\
-\tcam 1 to parport3 and search every parport for cam 2 etc...");
+MODULE_PARM_DESC(pardev, "pardev: where to search for\n"
+ "\teach camera. 'aggressive' means brute-force search.\n"
+ "\tEg: >pardev=parport3,aggressive,parport2,parport1< would assign\n"
+ "\tcam 1 to parport3 and search every parport for cam 2 etc...");
static int parmode;
module_param(parmode, int, 0);
@@ -144,117 +147,49 @@ MODULE_PARM_DESC(parmode, "parmode: transfer mode (0=auto, 1=ecp, 2=epp");
static int video_nr = -1;
module_param(video_nr, int, 0);
-/*
- * Private data
- */
-
-static struct w9966_dev w9966_cams[W9966_MAXCAMS];
-
-/*
- * Private function declares
- */
-
-static inline void w9966_setState(struct w9966_dev* cam, int mask, int val);
-static inline int w9966_getState(struct w9966_dev* cam, int mask, int val);
-static inline void w9966_pdev_claim(struct w9966_dev *vdev);
-static inline void w9966_pdev_release(struct w9966_dev *vdev);
-
-static int w9966_rReg(struct w9966_dev* cam, int reg);
-static int w9966_wReg(struct w9966_dev* cam, int reg, int data);
-#if 0
-static int w9966_rReg_i2c(struct w9966_dev* cam, int reg);
-#endif
-static int w9966_wReg_i2c(struct w9966_dev* cam, int reg, int data);
-static int w9966_findlen(int near, int size, int maxlen);
-static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsigned char* factor);
-static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, int w, int h);
-
-static int w9966_init(struct w9966_dev* cam, struct parport* port);
-static void w9966_term(struct w9966_dev* cam);
-
-static inline void w9966_i2c_setsda(struct w9966_dev* cam, int state);
-static inline int w9966_i2c_setscl(struct w9966_dev* cam, int state);
-static inline int w9966_i2c_getsda(struct w9966_dev* cam);
-static inline int w9966_i2c_getscl(struct w9966_dev* cam);
-static int w9966_i2c_wbyte(struct w9966_dev* cam, int data);
-#if 0
-static int w9966_i2c_rbyte(struct w9966_dev* cam);
-#endif
-
-static long w9966_v4l_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg);
-static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-
-static int w9966_exclusive_open(struct file *file)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- return test_and_set_bit(0, &cam->in_use) ? -EBUSY : 0;
-}
-
-static int w9966_exclusive_release(struct file *file)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- clear_bit(0, &cam->in_use);
- return 0;
-}
-
-static const struct v4l2_file_operations w9966_fops = {
- .owner = THIS_MODULE,
- .open = w9966_exclusive_open,
- .release = w9966_exclusive_release,
- .ioctl = w9966_v4l_ioctl,
- .read = w9966_v4l_read,
-};
-static struct video_device w9966_template = {
- .name = W9966_DRIVERNAME,
- .fops = &w9966_fops,
- .release = video_device_release_empty,
-};
+static struct w9966 w9966_cams[W9966_MAXCAMS];
/*
* Private function defines
*/
-// Set camera phase flags, so we know what to uninit when terminating
-static inline void w9966_setState(struct w9966_dev* cam, int mask, int val)
+/* Set camera phase flags, so we know what to uninit when terminating */
+static inline void w9966_set_state(struct w9966 *cam, int mask, int val)
{
cam->dev_state = (cam->dev_state & ~mask) ^ val;
}
-// Get camera phase flags
-static inline int w9966_getState(struct w9966_dev* cam, int mask, int val)
+/* Get camera phase flags */
+static inline int w9966_get_state(struct w9966 *cam, int mask, int val)
{
return ((cam->dev_state & mask) == val);
}
-// Claim parport for ourself
-static inline void w9966_pdev_claim(struct w9966_dev* cam)
+/* Claim parport for ourself */
+static void w9966_pdev_claim(struct w9966 *cam)
{
- if (w9966_getState(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED))
+ if (w9966_get_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED))
return;
parport_claim_or_block(cam->pdev);
- w9966_setState(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED);
+ w9966_set_state(cam, W9966_STATE_CLAIMED, W9966_STATE_CLAIMED);
}
-// Release parport for others to use
-static inline void w9966_pdev_release(struct w9966_dev* cam)
+/* Release parport for others to use */
+static void w9966_pdev_release(struct w9966 *cam)
{
- if (w9966_getState(cam, W9966_STATE_CLAIMED, 0))
+ if (w9966_get_state(cam, W9966_STATE_CLAIMED, 0))
return;
parport_release(cam->pdev);
- w9966_setState(cam, W9966_STATE_CLAIMED, 0);
+ w9966_set_state(cam, W9966_STATE_CLAIMED, 0);
}
-// Read register from W9966 interface-chip
-// Expects a claimed pdev
-// -1 on error, else register data (byte)
-static int w9966_rReg(struct w9966_dev* cam, int reg)
+/* Read register from W9966 interface-chip
+ Expects a claimed pdev
+ -1 on error, else register data (byte) */
+static int w9966_read_reg(struct w9966 *cam, int reg)
{
- // ECP, read, regtransfer, REG, REG, REG, REG, REG
+ /* ECP, read, regtransfer, REG, REG, REG, REG, REG */
const unsigned char addr = 0x80 | (reg & 0x1f);
unsigned char val;
@@ -270,12 +205,12 @@ static int w9966_rReg(struct w9966_dev* cam, int reg)
return val;
}
-// Write register to W9966 interface-chip
-// Expects a claimed pdev
-// -1 on error
-static int w9966_wReg(struct w9966_dev* cam, int reg, int data)
+/* Write register to W9966 interface-chip
+ Expects a claimed pdev
+ -1 on error */
+static int w9966_write_reg(struct w9966 *cam, int reg, int data)
{
- // ECP, write, regtransfer, REG, REG, REG, REG, REG
+ /* ECP, write, regtransfer, REG, REG, REG, REG, REG */
const unsigned char addr = 0xc0 | (reg & 0x1f);
const unsigned char val = data;
@@ -291,119 +226,186 @@ static int w9966_wReg(struct w9966_dev* cam, int reg, int data)
return 0;
}
-// Initialize camera device. Setup all internal flags, set a
-// default video mode, setup ccd-chip, register v4l device etc..
-// Also used for 'probing' of hardware.
-// -1 on error
-static int w9966_init(struct w9966_dev* cam, struct parport* port)
+/*
+ * Ugly and primitive i2c protocol functions
+ */
+
+/* Sets the data line on the i2c bus.
+ Expects a claimed pdev. */
+static void w9966_i2c_setsda(struct w9966 *cam, int state)
{
- if (cam->dev_state != 0)
- return -1;
+ if (state)
+ cam->i2c_state |= W9966_I2C_W_DATA;
+ else
+ cam->i2c_state &= ~W9966_I2C_W_DATA;
- cam->pport = port;
- cam->brightness = 128;
- cam->contrast = 64;
- cam->color = 64;
- cam->hue = 0;
+ w9966_write_reg(cam, 0x18, cam->i2c_state);
+ udelay(5);
+}
-// Select requested transfer mode
- switch(parmode)
- {
- default: // Auto-detect (priority: hw-ecp, hw-epp, sw-ecp)
- case 0:
- if (port->modes & PARPORT_MODE_ECP)
- cam->ppmode = IEEE1284_MODE_ECP;
- else if (port->modes & PARPORT_MODE_EPP)
- cam->ppmode = IEEE1284_MODE_EPP;
- else
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 1: // hw- or sw-ecp
- cam->ppmode = IEEE1284_MODE_ECP;
- break;
- case 2: // hw- or sw-epp
- cam->ppmode = IEEE1284_MODE_EPP;
- break;
+/* Get peripheral clock line
+ Expects a claimed pdev. */
+static int w9966_i2c_getscl(struct w9966 *cam)
+{
+ const unsigned char state = w9966_read_reg(cam, 0x18);
+ return ((state & W9966_I2C_R_CLOCK) > 0);
+}
+
+/* Sets the clock line on the i2c bus.
+ Expects a claimed pdev. -1 on error */
+static int w9966_i2c_setscl(struct w9966 *cam, int state)
+{
+ unsigned long timeout;
+
+ if (state)
+ cam->i2c_state |= W9966_I2C_W_CLOCK;
+ else
+ cam->i2c_state &= ~W9966_I2C_W_CLOCK;
+
+ w9966_write_reg(cam, 0x18, cam->i2c_state);
+ udelay(5);
+
+ /* we go to high, we also expect the peripheral to ack. */
+ if (state) {
+ timeout = jiffies + 100;
+ while (!w9966_i2c_getscl(cam)) {
+ if (time_after(jiffies, timeout))
+ return -1;
+ }
}
+ return 0;
+}
-// Tell the parport driver that we exists
- cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL);
- if (cam->pdev == NULL) {
- DPRINTF("parport_register_device() failed\n");
- return -1;
+#if 0
+/* Get peripheral data line
+ Expects a claimed pdev. */
+static int w9966_i2c_getsda(struct w9966 *cam)
+{
+ const unsigned char state = w9966_read_reg(cam, 0x18);
+ return ((state & W9966_I2C_R_DATA) > 0);
+}
+#endif
+
+/* Write a byte with ack to the i2c bus.
+ Expects a claimed pdev. -1 on error */
+static int w9966_i2c_wbyte(struct w9966 *cam, int data)
+{
+ int i;
+
+ for (i = 7; i >= 0; i--) {
+ w9966_i2c_setsda(cam, (data >> i) & 0x01);
+
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ w9966_i2c_setscl(cam, 0);
}
- w9966_setState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV);
- w9966_pdev_claim(cam);
+ w9966_i2c_setsda(cam, 1);
-// Setup a default capture mode
- if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) {
- DPRINTF("w9966_setup() failed.\n");
+ if (w9966_i2c_setscl(cam, 1) == -1)
return -1;
+ w9966_i2c_setscl(cam, 0);
+
+ return 0;
+}
+
+/* Read a data byte with ack from the i2c-bus
+ Expects a claimed pdev. -1 on error */
+#if 0
+static int w9966_i2c_rbyte(struct w9966 *cam)
+{
+ unsigned char data = 0x00;
+ int i;
+
+ w9966_i2c_setsda(cam, 1);
+
+ for (i = 0; i < 8; i++) {
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ data = data << 1;
+ if (w9966_i2c_getsda(cam))
+ data |= 0x01;
+
+ w9966_i2c_setscl(cam, 0);
}
+ return data;
+}
+#endif
- w9966_pdev_release(cam);
+/* Read a register from the i2c device.
+ Expects claimed pdev. -1 on error */
+#if 0
+static int w9966_read_reg_i2c(struct w9966 *cam, int reg)
+{
+ int data;
-// Fill in the video_device struct and register us to v4l
- memcpy(&cam->vdev, &w9966_template, sizeof(struct video_device));
- video_set_drvdata(&cam->vdev, cam);
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
- if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
+ w9966_i2c_wbyte(cam, reg) == -1)
+ return -1;
+
+ w9966_i2c_setsda(cam, 1);
+ if (w9966_i2c_setscl(cam, 1) == -1)
return -1;
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
- w9966_setState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
+ if (w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1)
+ return -1;
+ data = w9966_i2c_rbyte(cam);
+ if (data == -1)
+ return -1;
- // All ok
- printk(
- "w9966cf: Found and initialized a webcam on %s.\n",
- cam->pport->name
- );
- return 0;
-}
+ w9966_i2c_setsda(cam, 0);
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+ w9966_i2c_setsda(cam, 1);
-// Terminate everything gracefully
-static void w9966_term(struct w9966_dev* cam)
+ return data;
+}
+#endif
+
+/* Write a register to the i2c device.
+ Expects claimed pdev. -1 on error */
+static int w9966_write_reg_i2c(struct w9966 *cam, int reg, int data)
{
-// Unregister from v4l
- if (w9966_getState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) {
- video_unregister_device(&cam->vdev);
- w9966_setState(cam, W9966_STATE_VDEV, 0);
- }
+ w9966_i2c_setsda(cam, 0);
+ w9966_i2c_setscl(cam, 0);
-// Terminate from IEEE1284 mode and release pdev block
- if (w9966_getState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- w9966_pdev_claim(cam);
- parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT);
- w9966_pdev_release(cam);
- }
+ if (w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
+ w9966_i2c_wbyte(cam, reg) == -1 ||
+ w9966_i2c_wbyte(cam, data) == -1)
+ return -1;
-// Unregister from parport
- if (w9966_getState(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
- parport_unregister_device(cam->pdev);
- w9966_setState(cam, W9966_STATE_PDEV, 0);
- }
-}
+ w9966_i2c_setsda(cam, 0);
+ if (w9966_i2c_setscl(cam, 1) == -1)
+ return -1;
+
+ w9966_i2c_setsda(cam, 1);
+ return 0;
+}
-// Find a good length for capture window (used both for W and H)
-// A bit ugly but pretty functional. The capture length
-// have to match the downscale
+/* Find a good length for capture window (used both for W and H)
+ A bit ugly but pretty functional. The capture length
+ have to match the downscale */
static int w9966_findlen(int near, int size, int maxlen)
{
int bestlen = size;
int besterr = abs(near - bestlen);
int len;
- for(len = size+1;len < maxlen;len++)
- {
+ for (len = size + 1; len < maxlen; len++) {
int err;
- if ( ((64*size) %len) != 0)
+ if (((64 * size) % len) != 0)
continue;
err = abs(near - len);
- // Only continue as long as we keep getting better values
+ /* Only continue as long as we keep getting better values */
if (err > besterr)
break;
@@ -414,32 +416,32 @@ static int w9966_findlen(int near, int size, int maxlen)
return bestlen;
}
-// Modify capture window (if necessary)
-// and calculate downscaling
-// Return -1 on error
-static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsigned char* factor)
+/* Modify capture window (if necessary)
+ and calculate downscaling
+ Return -1 on error */
+static int w9966_calcscale(int size, int min, int max, int *beg, int *end, unsigned char *factor)
{
int maxlen = max - min;
int len = *end - *beg + 1;
int newlen = w9966_findlen(len, size, maxlen);
int err = newlen - len;
- // Check for bad format
+ /* Check for bad format */
if (newlen > maxlen || newlen < size)
return -1;
- // Set factor (6 bit fixed)
- *factor = (64*size) / newlen;
+ /* Set factor (6 bit fixed) */
+ *factor = (64 * size) / newlen;
if (*factor == 64)
- *factor = 0x00; // downscale is disabled
+ *factor = 0x00; /* downscale is disabled */
else
- *factor |= 0x80; // set downscale-enable bit
+ *factor |= 0x80; /* set downscale-enable bit */
- // Modify old beginning and end
+ /* Modify old beginning and end */
*beg -= err / 2;
*end += err - (err / 2);
- // Move window if outside borders
+ /* Move window if outside borders */
if (*beg < min) {
*end += min - *beg;
*beg += min - *beg;
@@ -452,10 +454,10 @@ static int w9966_calcscale(int size, int min, int max, int* beg, int* end, unsig
return 0;
}
-// Setup the cameras capture window etc.
-// Expects a claimed pdev
-// return -1 on error
-static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, int w, int h)
+/* Setup the cameras capture window etc.
+ Expects a claimed pdev
+ return -1 on error */
+static int w9966_setup(struct w9966 *cam, int x1, int y1, int x2, int y2, int w, int h)
{
unsigned int i;
unsigned int enh_s, enh_e;
@@ -469,443 +471,314 @@ static int w9966_setup(struct w9966_dev* cam, int x1, int y1, int x2, int y2, in
};
- if (w*h*2 > W9966_SRAMSIZE)
- {
+ if (w * h * 2 > W9966_SRAMSIZE) {
DPRINTF("capture window exceeds SRAM size!.\n");
- w = 200; h = 160; // Pick default values
+ w = 200; h = 160; /* Pick default values */
}
w &= ~0x1;
- if (w < 2) w = 2;
- if (h < 1) h = 1;
- if (w > W9966_WND_MAX_W) w = W9966_WND_MAX_W;
- if (h > W9966_WND_MAX_H) h = W9966_WND_MAX_H;
+ if (w < 2)
+ w = 2;
+ if (h < 1)
+ h = 1;
+ if (w > W9966_WND_MAX_W)
+ w = W9966_WND_MAX_W;
+ if (h > W9966_WND_MAX_H)
+ h = W9966_WND_MAX_H;
cam->width = w;
cam->height = h;
enh_s = 0;
- enh_e = w*h*2;
-
-// Modify capture window if necessary and calculate downscaling
- if (
- w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 ||
- w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0
- ) return -1;
-
- DPRINTF(
- "%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n",
- w, h, x1, x2, y1, y2, scale_x&~0x80, scale_y&~0x80
- );
-
-// Setup registers
- regs[0x00] = 0x00; // Set normal operation
- regs[0x01] = 0x18; // Capture mode
- regs[0x02] = scale_y; // V-scaling
- regs[0x03] = scale_x; // H-scaling
-
- // Capture window
- regs[0x04] = (x1 & 0x0ff); // X-start (8 low bits)
- regs[0x05] = (x1 & 0x300)>>8; // X-start (2 high bits)
- regs[0x06] = (y1 & 0x0ff); // Y-start (8 low bits)
- regs[0x07] = (y1 & 0x300)>>8; // Y-start (2 high bits)
- regs[0x08] = (x2 & 0x0ff); // X-end (8 low bits)
- regs[0x09] = (x2 & 0x300)>>8; // X-end (2 high bits)
- regs[0x0a] = (y2 & 0x0ff); // Y-end (8 low bits)
-
- regs[0x0c] = W9966_SRAMID; // SRAM-banks (1x 128kb)
-
- // Enhancement layer
- regs[0x0d] = (enh_s& 0x000ff); // Enh. start (0-7)
- regs[0x0e] = (enh_s& 0x0ff00)>>8; // Enh. start (8-15)
- regs[0x0f] = (enh_s& 0x70000)>>16; // Enh. start (16-17/18??)
- regs[0x10] = (enh_e& 0x000ff); // Enh. end (0-7)
- regs[0x11] = (enh_e& 0x0ff00)>>8; // Enh. end (8-15)
- regs[0x12] = (enh_e& 0x70000)>>16; // Enh. end (16-17/18??)
-
- // Misc
- regs[0x13] = 0x40; // VEE control (raw 4:2:2)
- regs[0x17] = 0x00; // ???
- regs[0x18] = cam->i2c_state = 0x00; // Serial bus
- regs[0x19] = 0xff; // I/O port direction control
- regs[0x1a] = 0xff; // I/O port data register
- regs[0x1b] = 0x10; // ???
-
- // SAA7111 chip settings
+ enh_e = w * h * 2;
+
+ /* Modify capture window if necessary and calculate downscaling */
+ if (w9966_calcscale(w, W9966_WND_MIN_X, W9966_WND_MAX_X, &x1, &x2, &scale_x) != 0 ||
+ w9966_calcscale(h, W9966_WND_MIN_Y, W9966_WND_MAX_Y, &y1, &y2, &scale_y) != 0)
+ return -1;
+
+ DPRINTF("%dx%d, x: %d<->%d, y: %d<->%d, sx: %d/64, sy: %d/64.\n",
+ w, h, x1, x2, y1, y2, scale_x & ~0x80, scale_y & ~0x80);
+
+ /* Setup registers */
+ regs[0x00] = 0x00; /* Set normal operation */
+ regs[0x01] = 0x18; /* Capture mode */
+ regs[0x02] = scale_y; /* V-scaling */
+ regs[0x03] = scale_x; /* H-scaling */
+
+ /* Capture window */
+ regs[0x04] = (x1 & 0x0ff); /* X-start (8 low bits) */
+ regs[0x05] = (x1 & 0x300)>>8; /* X-start (2 high bits) */
+ regs[0x06] = (y1 & 0x0ff); /* Y-start (8 low bits) */
+ regs[0x07] = (y1 & 0x300)>>8; /* Y-start (2 high bits) */
+ regs[0x08] = (x2 & 0x0ff); /* X-end (8 low bits) */
+ regs[0x09] = (x2 & 0x300)>>8; /* X-end (2 high bits) */
+ regs[0x0a] = (y2 & 0x0ff); /* Y-end (8 low bits) */
+
+ regs[0x0c] = W9966_SRAMID; /* SRAM-banks (1x 128kb) */
+
+ /* Enhancement layer */
+ regs[0x0d] = (enh_s & 0x000ff); /* Enh. start (0-7) */
+ regs[0x0e] = (enh_s & 0x0ff00) >> 8; /* Enh. start (8-15) */
+ regs[0x0f] = (enh_s & 0x70000) >> 16; /* Enh. start (16-17/18??) */
+ regs[0x10] = (enh_e & 0x000ff); /* Enh. end (0-7) */
+ regs[0x11] = (enh_e & 0x0ff00) >> 8; /* Enh. end (8-15) */
+ regs[0x12] = (enh_e & 0x70000) >> 16; /* Enh. end (16-17/18??) */
+
+ /* Misc */
+ regs[0x13] = 0x40; /* VEE control (raw 4:2:2) */
+ regs[0x17] = 0x00; /* ??? */
+ regs[0x18] = cam->i2c_state = 0x00; /* Serial bus */
+ regs[0x19] = 0xff; /* I/O port direction control */
+ regs[0x1a] = 0xff; /* I/O port data register */
+ regs[0x1b] = 0x10; /* ??? */
+
+ /* SAA7111 chip settings */
saa7111_regs[0x0a] = cam->brightness;
saa7111_regs[0x0b] = cam->contrast;
saa7111_regs[0x0c] = cam->color;
saa7111_regs[0x0d] = cam->hue;
-// Reset (ECP-fifo & serial-bus)
- if (w9966_wReg(cam, 0x00, 0x03) == -1)
+ /* Reset (ECP-fifo & serial-bus) */
+ if (w9966_write_reg(cam, 0x00, 0x03) == -1)
return -1;
-// Write regs to w9966cf chip
+ /* Write regs to w9966cf chip */
for (i = 0; i < 0x1c; i++)
- if (w9966_wReg(cam, i, regs[i]) == -1)
+ if (w9966_write_reg(cam, i, regs[i]) == -1)
return -1;
-// Write regs to saa7111 chip
+ /* Write regs to saa7111 chip */
for (i = 0; i < 0x20; i++)
- if (w9966_wReg_i2c(cam, i, saa7111_regs[i]) == -1)
+ if (w9966_write_reg_i2c(cam, i, saa7111_regs[i]) == -1)
return -1;
return 0;
}
/*
- * Ugly and primitive i2c protocol functions
+ * Video4linux interfacing
*/
-// Sets the data line on the i2c bus.
-// Expects a claimed pdev.
-static inline void w9966_i2c_setsda(struct w9966_dev* cam, int state)
+static int cam_querycap(struct file *file, void *priv,
+ struct v4l2_capability *vcap)
{
- if (state)
- cam->i2c_state |= W9966_I2C_W_DATA;
- else
- cam->i2c_state &= ~W9966_I2C_W_DATA;
+ struct w9966 *cam = video_drvdata(file);
- w9966_wReg(cam, 0x18, cam->i2c_state);
- udelay(5);
+ strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
+ strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
+ strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+ vcap->version = KERNEL_VERSION(0, 33, 0);
+ vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ return 0;
}
-// Get peripheral clock line
-// Expects a claimed pdev.
-static inline int w9966_i2c_getscl(struct w9966_dev* cam)
+static int cam_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- const unsigned char state = w9966_rReg(cam, 0x18);
- return ((state & W9966_I2C_R_CLOCK) > 0);
+ if (vin->index > 0)
+ return -EINVAL;
+ strlcpy(vin->name, "Camera", sizeof(vin->name));
+ vin->type = V4L2_INPUT_TYPE_CAMERA;
+ vin->audioset = 0;
+ vin->tuner = 0;
+ vin->std = 0;
+ vin->status = 0;
+ return 0;
}
-// Sets the clock line on the i2c bus.
-// Expects a claimed pdev. -1 on error
-static inline int w9966_i2c_setscl(struct w9966_dev* cam, int state)
+static int cam_g_input(struct file *file, void *fh, unsigned int *inp)
{
- unsigned long timeout;
-
- if (state)
- cam->i2c_state |= W9966_I2C_W_CLOCK;
- else
- cam->i2c_state &= ~W9966_I2C_W_CLOCK;
-
- w9966_wReg(cam, 0x18, cam->i2c_state);
- udelay(5);
-
- // we go to high, we also expect the peripheral to ack.
- if (state) {
- timeout = jiffies + 100;
- while (!w9966_i2c_getscl(cam)) {
- if (time_after(jiffies, timeout))
- return -1;
- }
- }
+ *inp = 0;
return 0;
}
-// Get peripheral data line
-// Expects a claimed pdev.
-static inline int w9966_i2c_getsda(struct w9966_dev* cam)
+static int cam_s_input(struct file *file, void *fh, unsigned int inp)
{
- const unsigned char state = w9966_rReg(cam, 0x18);
- return ((state & W9966_I2C_R_DATA) > 0);
+ return (inp > 0) ? -EINVAL : 0;
}
-// Write a byte with ack to the i2c bus.
-// Expects a claimed pdev. -1 on error
-static int w9966_i2c_wbyte(struct w9966_dev* cam, int data)
+static int cam_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
{
- int i;
- for (i = 7; i >= 0; i--)
- {
- w9966_i2c_setsda(cam, (data >> i) & 0x01);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
}
-
- w9966_i2c_setsda(cam, 1);
-
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setscl(cam, 0);
-
- return 0;
+ return -EINVAL;
}
-// Read a data byte with ack from the i2c-bus
-// Expects a claimed pdev. -1 on error
-#if 0
-static int w9966_i2c_rbyte(struct w9966_dev* cam)
+static int cam_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
{
- unsigned char data = 0x00;
- int i;
-
- w9966_i2c_setsda(cam, 1);
+ struct w9966 *cam = video_drvdata(file);
+ int ret = 0;
- for (i = 0; i < 8; i++)
- {
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- data = data << 1;
- if (w9966_i2c_getsda(cam))
- data |= 0x01;
-
- w9966_i2c_setscl(cam, 0);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = cam->brightness;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = cam->contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = cam->color;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = cam->hue;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
- return data;
+ return ret;
}
-#endif
-// Read a register from the i2c device.
-// Expects claimed pdev. -1 on error
-#if 0
-static int w9966_rReg_i2c(struct w9966_dev* cam, int reg)
+static int cam_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
{
- int data;
-
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1
- )
- return -1;
-
- w9966_i2c_setsda(cam, 1);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
+ struct w9966 *cam = video_drvdata(file);
+ int ret = 0;
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_R_ID) == -1 ||
- (data = w9966_i2c_rbyte(cam)) == -1
- )
- return -1;
+ mutex_lock(&cam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ cam->brightness = ctrl->value;
+ break;
+ case V4L2_CID_CONTRAST:
+ cam->contrast = ctrl->value;
+ break;
+ case V4L2_CID_SATURATION:
+ cam->color = ctrl->value;
+ break;
+ case V4L2_CID_HUE:
+ cam->hue = ctrl->value;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
- w9966_i2c_setsda(cam, 0);
+ if (ret == 0) {
+ w9966_pdev_claim(cam);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
- w9966_i2c_setsda(cam, 1);
+ if (w9966_write_reg_i2c(cam, 0x0a, cam->brightness) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0b, cam->contrast) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0c, cam->color) == -1 ||
+ w9966_write_reg_i2c(cam, 0x0d, cam->hue) == -1) {
+ ret = -EIO;
+ }
- return data;
+ w9966_pdev_release(cam);
+ }
+ mutex_unlock(&cam->lock);
+ return ret;
}
-#endif
-// Write a register to the i2c device.
-// Expects claimed pdev. -1 on error
-static int w9966_wReg_i2c(struct w9966_dev* cam, int reg, int data)
+static int cam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- w9966_i2c_setsda(cam, 0);
- w9966_i2c_setscl(cam, 0);
-
- if (
- w9966_i2c_wbyte(cam, W9966_I2C_W_ID) == -1 ||
- w9966_i2c_wbyte(cam, reg) == -1 ||
- w9966_i2c_wbyte(cam, data) == -1
- )
- return -1;
-
- w9966_i2c_setsda(cam, 0);
- if (w9966_i2c_setscl(cam, 1) == -1)
- return -1;
-
- w9966_i2c_setsda(cam, 1);
-
+ struct w9966 *cam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ pix->width = cam->width;
+ pix->height = cam->height;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 2 * cam->width;
+ pix->sizeimage = 2 * cam->width * cam->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
-/*
- * Video4linux interfacing
- */
-
-static long w9966_v4l_do_ioctl(struct file *file, unsigned int cmd, void *arg)
-{
- struct w9966_dev *cam = video_drvdata(file);
-
- switch(cmd)
- {
- case VIDIOCGCAP:
- {
- static struct video_capability vcap = {
- .name = W9966_DRIVERNAME,
- .type = VID_TYPE_CAPTURE | VID_TYPE_SCALES,
- .channels = 1,
- .maxwidth = W9966_WND_MAX_W,
- .maxheight = W9966_WND_MAX_H,
- .minwidth = 2,
- .minheight = 1,
- };
- struct video_capability *cap = arg;
- *cap = vcap;
- return 0;
- }
- case VIDIOCGCHAN:
- {
- struct video_channel *vch = arg;
- if(vch->channel != 0) // We only support one channel (#0)
- return -EINVAL;
- memset(vch,0,sizeof(*vch));
- strcpy(vch->name, "CCD-input");
- vch->type = VIDEO_TYPE_CAMERA;
- return 0;
- }
- case VIDIOCSCHAN:
- {
- struct video_channel *vch = arg;
- if(vch->channel != 0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGTUNER:
- {
- struct video_tuner *vtune = arg;
- if(vtune->tuner != 0)
- return -EINVAL;
- strcpy(vtune->name, "no tuner");
- vtune->rangelow = 0;
- vtune->rangehigh = 0;
- vtune->flags = VIDEO_TUNER_NORM;
- vtune->mode = VIDEO_MODE_AUTO;
- vtune->signal = 0xffff;
- return 0;
- }
- case VIDIOCSTUNER:
- {
- struct video_tuner *vtune = arg;
- if (vtune->tuner != 0)
- return -EINVAL;
- if (vtune->mode != VIDEO_MODE_AUTO)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGPICT:
- {
- struct video_picture vpic = {
- cam->brightness << 8, // brightness
- (cam->hue + 128) << 8, // hue
- cam->color << 9, // color
- cam->contrast << 9, // contrast
- 0x8000, // whiteness
- 16, VIDEO_PALETTE_YUV422// bpp, palette format
- };
- struct video_picture *pic = arg;
- *pic = vpic;
- return 0;
- }
- case VIDIOCSPICT:
- {
- struct video_picture *vpic = arg;
- if (vpic->depth != 16 || (vpic->palette != VIDEO_PALETTE_YUV422 && vpic->palette != VIDEO_PALETTE_YUYV))
- return -EINVAL;
-
- cam->brightness = vpic->brightness >> 8;
- cam->hue = (vpic->hue >> 8) - 128;
- cam->color = vpic->colour >> 9;
- cam->contrast = vpic->contrast >> 9;
+static int cam_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+
+ if (pix->width < 2)
+ pix->width = 2;
+ if (pix->height < 1)
+ pix->height = 1;
+ if (pix->width > W9966_WND_MAX_W)
+ pix->width = W9966_WND_MAX_W;
+ if (pix->height > W9966_WND_MAX_H)
+ pix->height = W9966_WND_MAX_H;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = 2 * pix->width;
+ pix->sizeimage = 2 * pix->width * pix->height;
+ /* Just a guess */
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
- w9966_pdev_claim(cam);
+static int cam_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct w9966 *cam = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ int ret = cam_try_fmt_vid_cap(file, fh, fmt);
- if (
- w9966_wReg_i2c(cam, 0x0a, cam->brightness) == -1 ||
- w9966_wReg_i2c(cam, 0x0b, cam->contrast) == -1 ||
- w9966_wReg_i2c(cam, 0x0c, cam->color) == -1 ||
- w9966_wReg_i2c(cam, 0x0d, cam->hue) == -1
- ) {
- w9966_pdev_release(cam);
- return -EIO;
- }
+ if (ret)
+ return ret;
- w9966_pdev_release(cam);
- return 0;
- }
- case VIDIOCSWIN:
- {
- int ret;
- struct video_window *vwin = arg;
-
- if (vwin->flags != 0)
- return -EINVAL;
- if (vwin->clipcount != 0)
- return -EINVAL;
- if (vwin->width < 2 || vwin->width > W9966_WND_MAX_W)
- return -EINVAL;
- if (vwin->height < 1 || vwin->height > W9966_WND_MAX_H)
- return -EINVAL;
-
- // Update camera regs
- w9966_pdev_claim(cam);
- ret = w9966_setup(cam, 0, 0, 1023, 1023, vwin->width, vwin->height);
- w9966_pdev_release(cam);
+ mutex_lock(&cam->lock);
+ /* Update camera regs */
+ w9966_pdev_claim(cam);
+ ret = w9966_setup(cam, 0, 0, 1023, 1023, pix->width, pix->height);
+ w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
+ return ret;
+}
- if (ret != 0) {
- DPRINTF("VIDIOCSWIN: w9966_setup() failed.\n");
- return -EIO;
- }
+static int cam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt)
+{
+ static struct v4l2_fmtdesc formats[] = {
+ { 0, 0, 0,
+ "YUV 4:2:2", V4L2_PIX_FMT_YUYV,
+ { 0, 0, 0, 0 }
+ },
+ };
+ enum v4l2_buf_type type = fmt->type;
- return 0;
- }
- case VIDIOCGWIN:
- {
- struct video_window *vwin = arg;
- memset(vwin, 0, sizeof(*vwin));
- vwin->width = cam->width;
- vwin->height = cam->height;
- return 0;
- }
- // Unimplemented
- case VIDIOCCAPTURE:
- case VIDIOCGFBUF:
- case VIDIOCSFBUF:
- case VIDIOCKEY:
- case VIDIOCGFREQ:
- case VIDIOCSFREQ:
- case VIDIOCGAUDIO:
- case VIDIOCSAUDIO:
+ if (fmt->index > 0)
return -EINVAL;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-static long w9966_v4l_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(file, cmd, arg, w9966_v4l_do_ioctl);
+ *fmt = formats[fmt->index];
+ fmt->type = type;
+ return 0;
}
-// Capture data
+/* Capture data */
static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
- struct w9966_dev *cam = video_drvdata(file);
- unsigned char addr = 0xa0; // ECP, read, CCD-transfer, 00000
+ struct w9966 *cam = video_drvdata(file);
+ unsigned char addr = 0xa0; /* ECP, read, CCD-transfer, 00000 */
unsigned char __user *dest = (unsigned char __user *)buf;
unsigned long dleft = count;
unsigned char *tbuf;
- // Why would anyone want more than this??
+ /* Why would anyone want more than this?? */
if (count > cam->width * cam->height * 2)
return -EINVAL;
+ mutex_lock(&cam->lock);
w9966_pdev_claim(cam);
- w9966_wReg(cam, 0x00, 0x02); // Reset ECP-FIFO buffer
- w9966_wReg(cam, 0x00, 0x00); // Return to normal operation
- w9966_wReg(cam, 0x01, 0x98); // Enable capture
-
- // write special capture-addr and negotiate into data transfer
- if (
- (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0 )||
- (parport_write(cam->pport, &addr, 1) != 1 )||
- (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0 )
- ) {
+ w9966_write_reg(cam, 0x00, 0x02); /* Reset ECP-FIFO buffer */
+ w9966_write_reg(cam, 0x00, 0x00); /* Return to normal operation */
+ w9966_write_reg(cam, 0x01, 0x98); /* Enable capture */
+
+ /* write special capture-addr and negotiate into data transfer */
+ if ((parport_negotiate(cam->pport, cam->ppmode|IEEE1284_ADDR) != 0) ||
+ (parport_write(cam->pport, &addr, 1) != 1) ||
+ (parport_negotiate(cam->pport, cam->ppmode|IEEE1284_DATA) != 0)) {
w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
return -EFAULT;
}
@@ -915,8 +788,7 @@ static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
goto out;
}
- while(dleft > 0)
- {
+ while (dleft > 0) {
unsigned long tsize = (dleft > W9966_RBUFFER) ? W9966_RBUFFER : dleft;
if (parport_read(cam->pport, tbuf, tsize) < tsize) {
@@ -931,43 +803,167 @@ static ssize_t w9966_v4l_read(struct file *file, char __user *buf,
dleft -= tsize;
}
- w9966_wReg(cam, 0x01, 0x18); // Disable capture
+ w9966_write_reg(cam, 0x01, 0x18); /* Disable capture */
out:
kfree(tbuf);
w9966_pdev_release(cam);
+ mutex_unlock(&cam->lock);
return count;
}
+static const struct v4l2_file_operations w9966_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = video_ioctl2,
+ .read = w9966_v4l_read,
+};
+
+static const struct v4l2_ioctl_ops w9966_ioctl_ops = {
+ .vidioc_querycap = cam_querycap,
+ .vidioc_g_input = cam_g_input,
+ .vidioc_s_input = cam_s_input,
+ .vidioc_enum_input = cam_enum_input,
+ .vidioc_queryctrl = cam_queryctrl,
+ .vidioc_g_ctrl = cam_g_ctrl,
+ .vidioc_s_ctrl = cam_s_ctrl,
+ .vidioc_enum_fmt_vid_cap = cam_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cam_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cam_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cam_try_fmt_vid_cap,
+};
+
-// Called once for every parport on init
+/* Initialize camera device. Setup all internal flags, set a
+ default video mode, setup ccd-chip, register v4l device etc..
+ Also used for 'probing' of hardware.
+ -1 on error */
+static int w9966_init(struct w9966 *cam, struct parport *port)
+{
+ struct v4l2_device *v4l2_dev = &cam->v4l2_dev;
+
+ if (cam->dev_state != 0)
+ return -1;
+
+ strlcpy(v4l2_dev->name, "w9966", sizeof(v4l2_dev->name));
+
+ if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ return -1;
+ }
+ cam->pport = port;
+ cam->brightness = 128;
+ cam->contrast = 64;
+ cam->color = 64;
+ cam->hue = 0;
+
+ /* Select requested transfer mode */
+ switch (parmode) {
+ default: /* Auto-detect (priority: hw-ecp, hw-epp, sw-ecp) */
+ case 0:
+ if (port->modes & PARPORT_MODE_ECP)
+ cam->ppmode = IEEE1284_MODE_ECP;
+ else if (port->modes & PARPORT_MODE_EPP)
+ cam->ppmode = IEEE1284_MODE_EPP;
+ else
+ cam->ppmode = IEEE1284_MODE_ECP;
+ break;
+ case 1: /* hw- or sw-ecp */
+ cam->ppmode = IEEE1284_MODE_ECP;
+ break;
+ case 2: /* hw- or sw-epp */
+ cam->ppmode = IEEE1284_MODE_EPP;
+ break;
+ }
+
+ /* Tell the parport driver that we exists */
+ cam->pdev = parport_register_device(port, "w9966", NULL, NULL, NULL, 0, NULL);
+ if (cam->pdev == NULL) {
+ DPRINTF("parport_register_device() failed\n");
+ return -1;
+ }
+ w9966_set_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV);
+
+ w9966_pdev_claim(cam);
+
+ /* Setup a default capture mode */
+ if (w9966_setup(cam, 0, 0, 1023, 1023, 200, 160) != 0) {
+ DPRINTF("w9966_setup() failed.\n");
+ return -1;
+ }
+
+ w9966_pdev_release(cam);
+
+ /* Fill in the video_device struct and register us to v4l */
+ strlcpy(cam->vdev.name, W9966_DRIVERNAME, sizeof(cam->vdev.name));
+ cam->vdev.v4l2_dev = v4l2_dev;
+ cam->vdev.fops = &w9966_fops;
+ cam->vdev.ioctl_ops = &w9966_ioctl_ops;
+ cam->vdev.release = video_device_release_empty;
+ video_set_drvdata(&cam->vdev, cam);
+
+ mutex_init(&cam->lock);
+
+ if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
+ return -1;
+
+ w9966_set_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
+
+ /* All ok */
+ v4l2_info(v4l2_dev, "Found and initialized a webcam on %s.\n",
+ cam->pport->name);
+ return 0;
+}
+
+
+/* Terminate everything gracefully */
+static void w9966_term(struct w9966 *cam)
+{
+ /* Unregister from v4l */
+ if (w9966_get_state(cam, W9966_STATE_VDEV, W9966_STATE_VDEV)) {
+ video_unregister_device(&cam->vdev);
+ w9966_set_state(cam, W9966_STATE_VDEV, 0);
+ }
+
+ /* Terminate from IEEE1284 mode and release pdev block */
+ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
+ w9966_pdev_claim(cam);
+ parport_negotiate(cam->pport, IEEE1284_MODE_COMPAT);
+ w9966_pdev_release(cam);
+ }
+
+ /* Unregister from parport */
+ if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
+ parport_unregister_device(cam->pdev);
+ w9966_set_state(cam, W9966_STATE_PDEV, 0);
+ }
+}
+
+
+/* Called once for every parport on init */
static void w9966_attach(struct parport *port)
{
int i;
- for (i = 0; i < W9966_MAXCAMS; i++)
- {
- if (w9966_cams[i].dev_state != 0) // Cam is already assigned
+ for (i = 0; i < W9966_MAXCAMS; i++) {
+ if (w9966_cams[i].dev_state != 0) /* Cam is already assigned */
continue;
- if (
- strcmp(pardev[i], "aggressive") == 0 ||
- strcmp(pardev[i], port->name) == 0
- ) {
+ if (strcmp(pardev[i], "aggressive") == 0 || strcmp(pardev[i], port->name) == 0) {
if (w9966_init(&w9966_cams[i], port) != 0)
- w9966_term(&w9966_cams[i]);
- break; // return
+ w9966_term(&w9966_cams[i]);
+ break; /* return */
}
}
}
-// Called once for every parport on termination
+/* Called once for every parport on termination */
static void w9966_detach(struct parport *port)
{
int i;
+
for (i = 0; i < W9966_MAXCAMS; i++)
- if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port)
- w9966_term(&w9966_cams[i]);
+ if (w9966_cams[i].dev_state != 0 && w9966_cams[i].pport == port)
+ w9966_term(&w9966_cams[i]);
}
@@ -977,17 +973,18 @@ static struct parport_driver w9966_ppd = {
.detach = w9966_detach,
};
-// Module entry point
+/* Module entry point */
static int __init w9966_mod_init(void)
{
int i;
+
for (i = 0; i < W9966_MAXCAMS; i++)
w9966_cams[i].dev_state = 0;
return parport_register_driver(&w9966_ppd);
}
-// Module cleanup
+/* Module cleanup */
static void __exit w9966_mod_term(void)
{
parport_unregister_driver(&w9966_ppd);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index e44e4b5f3e50..bb51cfb0c647 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -1153,7 +1153,7 @@ zc0301_vidioc_s_ctrl(struct zc0301_device* cam, void __user * arg)
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
+ for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -1163,7 +1163,9 @@ zc0301_vidioc_s_ctrl(struct zc0301_device* cam, void __user * arg)
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
-
+ }
+ if (i == ARRAY_SIZE(s->qctrl))
+ return -EINVAL;
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 3a408de91b9c..0be783c203f7 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -58,7 +58,7 @@ zc0301_attach_sensor(struct zc0301_device* cam, struct zc0301_sensor* sensor);
.idProduct = (prod), \
.bInterfaceClass = (intclass)
-#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
+#if !defined CONFIG_USB_GSPCA_ZC3XX && !defined CONFIG_USB_GSPCA_ZC3XX_MODULE
#define ZC0301_ID_TABLE \
static const struct usb_device_id zc0301_id_table[] = { \
{ ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index cb1de7ea197a..8997add1248e 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -33,6 +33,10 @@
#include <media/v4l2-device.h>
+#define ZORAN_VIDMODE_PAL 0
+#define ZORAN_VIDMODE_NTSC 1
+#define ZORAN_VIDMODE_SECAM 2
+
struct zoran_requestbuffers {
unsigned long count; /* Number of buffers for MJPEG grabbing */
unsigned long size; /* Size PER BUFFER in bytes */
@@ -48,7 +52,7 @@ struct zoran_sync {
struct zoran_status {
int input; /* Input channel, has to be set prior to BUZIOC_G_STATUS */
int signal; /* Returned: 1 if valid video signal detected */
- int norm; /* Returned: VIDEO_MODE_PAL or VIDEO_MODE_NTSC */
+ int norm; /* Returned: ZORAN_VIDMODE_PAL or ZORAN_VIDMODE_NTSC */
int color; /* Returned: 1 if color signal detected */
};
@@ -62,7 +66,7 @@ struct zoran_params {
/* Main control parameters */
int input; /* Input channel: 0 = Composite, 1 = S-VHS */
- int norm; /* Norm: VIDEO_MODE_PAL or VIDEO_MODE_NTSC */
+ int norm; /* Norm: ZORAN_VIDMODE_PAL or ZORAN_VIDMODE_NTSC */
int decimation; /* decimation of captured video,
* enlargement of video played back.
* Valid values are 1, 2, 4 or 0.
@@ -131,13 +135,13 @@ struct zoran_params {
/*
Private IOCTL to set up for displaying MJPEG
*/
-#define BUZIOC_G_PARAMS _IOR ('v', BASE_VIDIOCPRIVATE+0, struct zoran_params)
-#define BUZIOC_S_PARAMS _IOWR('v', BASE_VIDIOCPRIVATE+1, struct zoran_params)
-#define BUZIOC_REQBUFS _IOWR('v', BASE_VIDIOCPRIVATE+2, struct zoran_requestbuffers)
-#define BUZIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOCPRIVATE+3, int)
-#define BUZIOC_QBUF_PLAY _IOW ('v', BASE_VIDIOCPRIVATE+4, int)
-#define BUZIOC_SYNC _IOR ('v', BASE_VIDIOCPRIVATE+5, struct zoran_sync)
-#define BUZIOC_G_STATUS _IOWR('v', BASE_VIDIOCPRIVATE+6, struct zoran_status)
+#define BUZIOC_G_PARAMS _IOR ('v', BASE_VIDIOC_PRIVATE+0, struct zoran_params)
+#define BUZIOC_S_PARAMS _IOWR('v', BASE_VIDIOC_PRIVATE+1, struct zoran_params)
+#define BUZIOC_REQBUFS _IOWR('v', BASE_VIDIOC_PRIVATE+2, struct zoran_requestbuffers)
+#define BUZIOC_QBUF_CAPT _IOW ('v', BASE_VIDIOC_PRIVATE+3, int)
+#define BUZIOC_QBUF_PLAY _IOW ('v', BASE_VIDIOC_PRIVATE+4, int)
+#define BUZIOC_SYNC _IOR ('v', BASE_VIDIOC_PRIVATE+5, struct zoran_sync)
+#define BUZIOC_G_STATUS _IOWR('v', BASE_VIDIOC_PRIVATE+6, struct zoran_status)
#ifdef __KERNEL__
@@ -401,7 +405,7 @@ struct zoran {
spinlock_t spinlock; /* Spinlock */
/* Video for Linux parameters */
- int input; /* card's norm and input - norm=VIDEO_MODE_* */
+ int input; /* card's norm and input */
v4l2_std_id norm;
/* Current buffer params */
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index ec41303544e5..6f89d0a096ea 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -60,7 +60,7 @@
#include <linux/spinlock.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include "videocodec.h"
@@ -1549,11 +1549,11 @@ static long zoran_default(struct file *file, void *__fh, int cmd, void *arg)
mutex_lock(&zr->resource_lock);
if (zr->norm & V4L2_STD_NTSC)
- bparams->norm = VIDEO_MODE_NTSC;
- else if (zr->norm & V4L2_STD_PAL)
- bparams->norm = VIDEO_MODE_PAL;
+ bparams->norm = ZORAN_VIDMODE_NTSC;
+ else if (zr->norm & V4L2_STD_SECAM)
+ bparams->norm = ZORAN_VIDMODE_SECAM;
else
- bparams->norm = VIDEO_MODE_SECAM;
+ bparams->norm = ZORAN_VIDMODE_PAL;
bparams->input = zr->input;
@@ -1789,11 +1789,11 @@ gstat_unlock_and_return:
bstat->signal =
(status & V4L2_IN_ST_NO_SIGNAL) ? 0 : 1;
if (norm & V4L2_STD_NTSC)
- bstat->norm = VIDEO_MODE_NTSC;
+ bstat->norm = ZORAN_VIDMODE_NTSC;
else if (norm & V4L2_STD_SECAM)
- bstat->norm = VIDEO_MODE_SECAM;
+ bstat->norm = ZORAN_VIDMODE_SECAM;
else
- bstat->norm = VIDEO_MODE_PAL;
+ bstat->norm = ZORAN_VIDMODE_PAL;
bstat->color =
(status & V4L2_IN_ST_NO_COLOR) ? 0 : 1;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 3d4bac252902..a82b5bd18d26 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -376,8 +376,8 @@ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
if (*count == 0)
*count = ZR364XX_DEF_BUFS;
- while (*size * (*count) > ZR364XX_DEF_BUFS * 1024 * 1024)
- (*count)--;
+ if (*size * *count > ZR364XX_DEF_BUFS * 1024 * 1024)
+ *count = (ZR364XX_DEF_BUFS * 1024 * 1024) / *size;
return 0;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5382b5a44aff..a6a57011ba6c 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5064,7 +5064,7 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
if (!timeleft) {
printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
@@ -6456,10 +6456,15 @@ out:
issue_hard_reset = 0;
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ if (retry_count == 0) {
+ if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
+ retry_count++;
+ } else
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+
mpt_free_msg_frame(ioc, mf);
/* attempt one retry for a timed out command */
- if (!retry_count) {
+ if (retry_count < 2) {
printk(MYIOC_s_INFO_FMT
"Attempting Retry Config request"
" type 0x%x, page 0x%x,"
@@ -6904,6 +6909,172 @@ mpt_halt_firmware(MPT_ADAPTER *ioc)
}
EXPORT_SYMBOL(mpt_halt_firmware);
+/**
+ * mpt_SoftResetHandler - Issues a less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ * Message Unit Reset - instructs the IOC to reset the Reply Post and
+ * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
+ * All posted buffers are freed, and event notification is turned off.
+ * IOC doesnt reply to any outstanding request. This will transfer IOC
+ * to READY state.
+ **/
+int
+mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ int rc;
+ int ii;
+ u8 cb_idx;
+ unsigned long flags;
+ u32 ioc_state;
+ unsigned long time_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
+ ioc->name));
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ if (ioc_state == MPI_IOC_STATE_FAULT ||
+ ioc_state == MPI_IOC_STATE_RESET) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, either in FAULT or RESET state!\n", ioc->name));
+ return -1;
+ }
+
+ if (ioc->bus_type == FC) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, because the bus type is FC!\n", ioc->name));
+ return -1;
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ ioc->ioc_reset_in_progress = 1;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ rc = -1;
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->taskmgmt_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ /* Disable reply interrupts (also blocks FreeQ) */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+ time_count = jiffies;
+
+ rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
+ }
+
+ if (rc)
+ goto out;
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+ if (ioc_state != MPI_IOC_STATE_READY)
+ goto out;
+
+ for (ii = 0; ii < 5; ii++) {
+ /* Get IOC facts! Allow 5 retries */
+ rc = GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_RECOVER);
+ if (rc == 0)
+ break;
+ if (sleepFlag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+ }
+ if (ii == 5)
+ goto out;
+
+ rc = PrimeIocFifos(ioc);
+ if (rc != 0)
+ goto out;
+
+ rc = SendIocInit(ioc, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ rc = SendEventNotification(ioc, 1, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ if (ioc->hard_resets < -1)
+ ioc->hard_resets++;
+
+ /*
+ * At this point, we know soft reset succeeded.
+ */
+
+ ioc->active = 1;
+ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
+
+ out:
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->ioc_reset_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ ioc->taskmgmt_in_progress = 0;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ if (ioc->active) { /* otherwise, hard reset coming */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc,
+ MPT_IOC_POST_RESET);
+ }
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "SoftResetHandler: completed (%d seconds): %s\n",
+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
+ ((rc == 0) ? "SUCCESS" : "FAILED")));
+
+ return rc;
+}
+
+/**
+ * mpt_Soft_Hard_ResetHandler - Try less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ * Try for softreset first, only if it fails go for expensive
+ * HardReset.
+ **/
+int
+mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
+ int ret = -1;
+
+ ret = mpt_SoftResetHandler(ioc, sleepFlag);
+ if (ret == 0)
+ return ret;
+ ret = mpt_HardResetHandler(ioc, sleepFlag);
+ return ret;
+}
+EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 9718c8f2e959..b613eb3d4706 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.14"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14"
+#define MPT_LINUX_VERSION_COMMON "3.04.15"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.15"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -940,6 +940,7 @@ extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+extern int mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index caa8f568a41c..f06b29193b4e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -128,7 +128,6 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
struct buflist *buflist, MPT_ADAPTER *ioc);
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
/*
* Reset Handler cleanup function
@@ -275,45 +274,6 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
return 1;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/* mptctl_timeout_expired
- *
- * Expecting an interrupt, however timed out.
- *
- */
-static void
-mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
-{
- unsigned long flags;
-
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
- ioc->name, __func__));
-
- if (mpt_fwfault_debug)
- mpt_halt_firmware(ioc);
-
- spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
- if (ioc->ioc_reset_in_progress) {
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_free_msg_frame(ioc, mf);
- return;
- }
- spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
-
-
- if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
- return;
-
- /* Issue a reset for this device.
- * The IOC is not responding.
- */
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
- ioc->name));
- CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- mpt_free_msg_frame(ioc, mf);
-}
static int
mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
@@ -343,12 +303,8 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 0;
}
-/* mptctl_bus_reset
- *
- * Bus reset code.
- *
- */
-static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
+static int
+mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
{
MPT_FRAME_HDR *mf;
SCSITaskMgmt_t *pScsiTm;
@@ -359,13 +315,6 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
unsigned long time_count;
u16 iocstatus;
- /* bus reset is only good for SCSI IO, RAID PASSTHRU */
- if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
- function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, not SCSI_IO!!\n", ioc->name));
- return -EPERM;
- }
mutex_lock(&ioc->taskmgmt_cmds.mutex);
if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
@@ -375,15 +324,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
- /* Send request
- */
mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
if (mf == NULL) {
- dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
- "TaskMgmt, no msg frames!!\n", ioc->name));
+ dtmprintk(ioc,
+ printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n",
+ ioc->name));
mpt_clear_taskmgmt_in_progress_flag(ioc);
retval = -ENOMEM;
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
@@ -392,10 +340,13 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
pScsiTm = (SCSITaskMgmt_t *) mf;
memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
- pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
- pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
- pScsiTm->TargetID = 0;
- pScsiTm->Bus = 0;
+ pScsiTm->TaskType = tm_type;
+ if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) &&
+ (ioc->bus_type == FC))
+ pScsiTm->MsgFlags =
+ MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
+ pScsiTm->TargetID = target_id;
+ pScsiTm->Bus = bus_id;
pScsiTm->ChainOffset = 0;
pScsiTm->Reserved = 0;
pScsiTm->Reserved1 = 0;
@@ -413,17 +364,16 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
timeout = 30;
break;
case SPI:
- default:
- timeout = 2;
+ default:
+ timeout = 10;
break;
}
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "TaskMgmt type=%d timeout=%ld\n",
- ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
+ dtmprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n",
+ ioc->name, tm_type, timeout));
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
- CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
time_count = jiffies;
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
@@ -432,17 +382,20 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
if (retval != 0) {
- dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ dfailprintk(ioc,
+ printk(MYIOC_s_ERR_FMT
"TaskMgmt send_handshake FAILED!"
" (ioc %p, mf %p, rc=%d) \n", ioc->name,
ioc, mf, retval));
+ mpt_free_msg_frame(ioc, mf);
mpt_clear_taskmgmt_in_progress_flag(ioc);
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
}
/* Now wait for the command to complete */
ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
+
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
@@ -452,14 +405,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = 0;
else
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"TaskMgmt failed\n", ioc->name));
retval = -1; /* return failure */
- goto mptctl_bus_reset_done;
+ goto tm_done;
}
pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
@@ -467,7 +420,7 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
"TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
"iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
"term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
- pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ pScsiTmReply->TargetID, tm_type,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),
pScsiTmReply->ResponseCode,
@@ -485,13 +438,71 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
retval = -1; /* return failure */
}
-
- mptctl_bus_reset_done:
+ tm_done:
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
return retval;
}
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_timeout_expired
+ *
+ * Expecting an interrupt, however timed out.
+ *
+ */
+static void
+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ unsigned long flags;
+ int ret_val = -1;
+ SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf;
+ u8 function = mf->u.hdr.Function;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
+ ioc->name, __func__));
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+ mpt_free_msg_frame(ioc, mf);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+
+ if (ioc->bus_type == SAS) {
+ if (function == MPI_FUNCTION_SCSI_IO_REQUEST)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ scsi_req->Bus, scsi_req->TargetID);
+ else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ } else {
+ if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH))
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n",
+ ioc->name));
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptctl_ioc_reset
@@ -1318,6 +1329,8 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
if (ioc->sh) {
shost_for_each_device(sdev, ioc->sh) {
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1439,6 +1452,8 @@ mptctl_gettargetinfo (unsigned long arg)
if (!maxWordsLeft)
continue;
vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
if (vdevice->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
continue;
@@ -1967,6 +1982,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
+ if (vtarget == NULL)
+ continue;
+
if ((pScsiReq->TargetID == vtarget->id) &&
(pScsiReq->Bus == vtarget->channel) &&
(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
@@ -2991,6 +3009,14 @@ static int __init mptctl_init(void)
}
mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
+ if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
+ printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
+ mpt_deregister(mptctl_id);
+ misc_deregister(&mptctl_miscdev);
+ err = -EBUSY;
+ goto out_fail;
+ }
+
mpt_reset_register(mptctl_id, mptctl_ioc_reset);
mpt_event_register(mptctl_id, mptctl_event_process);
@@ -3010,12 +3036,15 @@ static void mptctl_exit(void)
printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+ /* De-register event handler from base module */
+ mpt_event_deregister(mptctl_id);
+
/* De-register reset handler from base module */
mpt_reset_deregister(mptctl_id);
/* De-register callback handler from base module */
+ mpt_deregister(mptctl_taskmgmt_id);
mpt_deregister(mptctl_id);
- mpt_reset_deregister(mptctl_taskmgmt_id);
mpt_device_driver_deregister(MPTCTL_DRIVER);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 33f7256055b1..b5f03ad81568 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -482,6 +482,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
if (vtarget) {
vtarget->id = pg0->CurrentTargetID;
vtarget->channel = pg0->CurrentBus;
+ vtarget->deleted = 0;
}
}
*((struct mptfc_rport_info **)rport->dd_data) = ri;
@@ -1092,6 +1093,8 @@ mptfc_setup_reset(struct work_struct *work)
container_of(work, MPT_ADAPTER, fc_setup_reset_work);
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* reset about to happen, delete (block) all rports */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1099,6 +1102,12 @@ mptfc_setup_reset(struct work_struct *work)
ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED;
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1119,6 +1128,8 @@ mptfc_rescan_devices(struct work_struct *work)
int ii;
u64 pn;
struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
/* start by tagging all ports as missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1146,6 +1157,12 @@ mptfc_rescan_devices(struct work_struct *work)
MPT_RPORT_INFO_FLAGS_MISSING);
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
@@ -1358,6 +1375,9 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
unsigned long flags;
int rc=1;
+ if (ioc->bus_type != FC)
+ return 0;
+
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
ioc->name, event));
@@ -1396,7 +1416,7 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
unsigned long flags;
rc = mptscsih_ioc_reset(ioc,reset_phase);
- if (rc == 0)
+ if ((ioc->bus_type != FC) || (!rc))
return rc;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 76687126b573..ac000e83db0e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1894,7 +1894,7 @@ static struct scsi_host_template mptsas_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptsas",
.proc_info = mptscsih_proc_info,
- .name = "MPT SPI Host",
+ .name = "MPT SAS Host",
.info = mptscsih_info,
.queuecommand = mptsas_qcmd,
.target_alloc = mptsas_target_alloc,
@@ -2038,11 +2038,13 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
10 * HZ);
- if (!timeleft) {
- /* On timeout reset the board */
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ error = -ETIME;
mpt_free_msg_frame(ioc, mf);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- error = -ETIMEDOUT;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out_unlock;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_unlock;
}
@@ -2223,11 +2225,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
- if (!timeleft) {
- printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
- /* On timeout reset the board */
- mpt_HardResetHandler(ioc, CAN_SLEEP);
- ret = -ETIMEDOUT;
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ mf = NULL;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto unmap;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto unmap;
}
mf = NULL;
@@ -2518,6 +2523,12 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
+
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out_free_consistent;
+ }
+
if (error)
goto out_free_consistent;
@@ -2594,14 +2605,14 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
- if (error)
- goto out_free_consistent;
-
- if (!buffer->NumPhys) {
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
goto out_free_consistent;
}
+ if (error)
+ goto out_free_consistent;
+
/* save config data */
port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
port_info->phy_info = kcalloc(port_info->num_phys,
@@ -2677,7 +2688,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
error = -ENODEV;
- goto out;
+ goto out_free_consistent;
}
if (error)
@@ -2833,7 +2844,7 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out_free;
if (!timeleft)
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
goto out_free;
}
@@ -4098,6 +4109,7 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
@@ -4717,7 +4729,7 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
if (issue_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
}
mptsas_free_fw_event(ioc, fw_event);
}
@@ -4779,6 +4791,9 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
struct fw_event_work *fw_event;
unsigned long delay;
+ if (ioc->bus_type != SAS)
+ return 0;
+
/* events turned off due to host reset or driver unloading */
if (ioc->fw_events_off)
return 0;
@@ -5073,6 +5088,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
struct mptsas_portinfo *p, *n;
int i;
+ if (!ioc->sh) {
+ printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
+ mpt_detach(pdev);
+ return;
+ }
+
mptsas_shutdown(pdev);
mptsas_del_device_components(ioc);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 953c2bfcf6aa..7b249edbda78 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
- u8 event_data[1];
+ u8 __attribute__((aligned(4))) event_data[1];
};
struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6796597dcee0..5c53624e0e87 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1149,11 +1149,6 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- if(!host) {
- mpt_detach(pdev);
- return;
- }
-
scsi_remove_host(host);
if((hd = shost_priv(host)) == NULL)
@@ -1711,7 +1706,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
if (issue_hard_reset) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
@@ -1728,6 +1723,7 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
case FC:
return 40;
case SAS:
+ return 30;
case SPI:
default:
return 10;
@@ -1777,7 +1773,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
- retval = 0;
+ retval = SUCCESS;
goto out;
}
@@ -1792,6 +1788,17 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
goto out;
}
+ /* Task aborts are not supported for volumes.
+ */
+ if (vdevice->vtarget->raidVolume) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: raid volume (sc=%p)\n",
+ ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
@@ -1991,7 +1998,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
if (retval < 0)
status = FAILED;
else
@@ -2344,6 +2351,8 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdevice = sdev->hostdata;
+ if (!vdevice)
+ return;
mptscsih_search_running_cmds(hd, vdevice);
vtarget->num_luns--;
@@ -2561,9 +2570,7 @@ mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
}
/**
- * mptscsih_set_scsi_lookup
- *
- * writes a scmd entry into the ScsiLookup[] array list
+ * mptscsih_set_scsi_lookup - write a scmd entry into the ScsiLookup[] array list
*
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
@@ -2726,7 +2733,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
/**
- * mptscsih_get_completion_code -
+ * mptscsih_get_completion_code - get completion code from MPT request
* @ioc: Pointer to MPT_ADAPTER structure
* @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
@@ -3040,7 +3047,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
if (!timeleft) {
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
ioc->name, __func__);
- mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e44365193fdf..1abaa5d01ae3 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -210,6 +210,10 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
target->maxOffset = offset;
target->maxWidth = width;
+ spi_min_period(scsi_target(sdev)) = factor;
+ spi_max_offset(scsi_target(sdev)) = offset;
+ spi_max_width(scsi_target(sdev)) = width;
+
target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
/* Disable unused features.
@@ -558,6 +562,7 @@ static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.pageAddr = starget->id;
+ cfg.timeout = 60;
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name);
@@ -1152,6 +1157,9 @@ mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ if (ioc->bus_type != SPI)
+ return 0;
+
if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
int reason
= (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
@@ -1283,6 +1291,8 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
int rc;
rc = mptscsih_ioc_reset(ioc, reset_phase);
+ if ((ioc->bus_type != SPI) || (!rc))
+ return rc;
/* only try to do a renegotiation if we're properly set up
* if we get an ioc fault on bringup, ioc->sh will be NULL */
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 11073fa3d9f4..c4b117f5fb70 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
if (!dev)
return -ENXIO;
- ops = kmalloc(kcmd.oplen, GFP_KERNEL);
- if (!ops)
- return -ENOMEM;
-
- if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
- kfree(ops);
- return -EFAULT;
- }
+ ops = memdup_user(kcmd.opbuf, kcmd.oplen);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
/*
* It's possible to have a _very_ large table
@@ -314,22 +309,22 @@ static int i2o_cfg_swul(unsigned long arg)
int ret = 0;
if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
- goto return_fault;
+ return -EFAULT;
if (get_user(swlen, kxfer.swlen) < 0)
- goto return_fault;
+ return -EFAULT;
if (get_user(maxfrag, kxfer.maxfrag) < 0)
- goto return_fault;
+ return -EFAULT;
if (get_user(curfrag, kxfer.curfrag) < 0)
- goto return_fault;
+ return -EFAULT;
if (curfrag == maxfrag)
fragsize = swlen - (maxfrag - 1) * 8192;
if (!kxfer.buf)
- goto return_fault;
+ return -EFAULT;
c = i2o_find_iop(kxfer.iop);
if (!c)
@@ -373,12 +368,8 @@ static int i2o_cfg_swul(unsigned long arg)
i2o_dma_free(&c->pdev->dev, &buffer);
- return_ret:
return ret;
- return_fault:
- ret = -EFAULT;
- goto return_ret;
-};
+}
static int i2o_cfg_swdel(unsigned long arg)
{
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 6a14d2b1ccf0..2c65a2c57294 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -173,33 +173,35 @@ static struct resource regulator_resources[] = {
PM8607_REG_RESOURCE(LDO9, LDO9),
PM8607_REG_RESOURCE(LDO10, LDO10),
PM8607_REG_RESOURCE(LDO12, LDO12),
+ PM8607_REG_RESOURCE(VIBRATOR_SET, VIBRATOR_SET),
PM8607_REG_RESOURCE(LDO14, LDO14),
};
-#define PM8607_REG_DEVS(_name, _id) \
+#define PM8607_REG_DEVS(_id) \
{ \
- .name = "88pm8607-" #_name, \
+ .name = "88pm860x-regulator", \
.num_resources = 1, \
.resources = &regulator_resources[PM8607_ID_##_id], \
.id = PM8607_ID_##_id, \
}
static struct mfd_cell regulator_devs[] = {
- PM8607_REG_DEVS(buck1, BUCK1),
- PM8607_REG_DEVS(buck2, BUCK2),
- PM8607_REG_DEVS(buck3, BUCK3),
- PM8607_REG_DEVS(ldo1, LDO1),
- PM8607_REG_DEVS(ldo2, LDO2),
- PM8607_REG_DEVS(ldo3, LDO3),
- PM8607_REG_DEVS(ldo4, LDO4),
- PM8607_REG_DEVS(ldo5, LDO5),
- PM8607_REG_DEVS(ldo6, LDO6),
- PM8607_REG_DEVS(ldo7, LDO7),
- PM8607_REG_DEVS(ldo8, LDO8),
- PM8607_REG_DEVS(ldo9, LDO9),
- PM8607_REG_DEVS(ldo10, LDO10),
- PM8607_REG_DEVS(ldo12, LDO12),
- PM8607_REG_DEVS(ldo14, LDO14),
+ PM8607_REG_DEVS(BUCK1),
+ PM8607_REG_DEVS(BUCK2),
+ PM8607_REG_DEVS(BUCK3),
+ PM8607_REG_DEVS(LDO1),
+ PM8607_REG_DEVS(LDO2),
+ PM8607_REG_DEVS(LDO3),
+ PM8607_REG_DEVS(LDO4),
+ PM8607_REG_DEVS(LDO5),
+ PM8607_REG_DEVS(LDO6),
+ PM8607_REG_DEVS(LDO7),
+ PM8607_REG_DEVS(LDO8),
+ PM8607_REG_DEVS(LDO9),
+ PM8607_REG_DEVS(LDO10),
+ PM8607_REG_DEVS(LDO12),
+ PM8607_REG_DEVS(LDO13),
+ PM8607_REG_DEVS(LDO14),
};
struct pm860x_irq_data {
@@ -564,7 +566,7 @@ out:
return ret;
}
-static void __devexit device_irq_exit(struct pm860x_chip *chip)
+static void device_irq_exit(struct pm860x_chip *chip)
{
if (chip->core_irq)
free_irq(chip->core_irq, chip);
@@ -701,7 +703,7 @@ out:
return;
}
-int pm860x_device_init(struct pm860x_chip *chip,
+int __devinit pm860x_device_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
chip->core_irq = 0;
@@ -729,7 +731,7 @@ int pm860x_device_init(struct pm860x_chip *chip,
return 0;
}
-void pm860x_device_exit(struct pm860x_chip *chip)
+void __devexit pm860x_device_exit(struct pm860x_chip *chip)
{
device_irq_exit(chip);
mfd_remove_devices(chip->dev);
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index 4a6e7186334e..c933b64d1283 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -200,8 +200,8 @@ static int __devexit pm860x_remove(struct i2c_client *client)
pm860x_device_exit(chip);
i2c_unregister_device(chip->companion);
- i2c_set_clientdata(chip->companion, NULL);
i2c_set_clientdata(chip->client, NULL);
+ i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2a5a0b78f84e..9da0e504bbe9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2,8 +2,14 @@
# Multifunction miscellaneous devices
#
-menu "Multifunction device drivers"
+menuconfig MFD_SUPPORT
+ bool "Multifunction device drivers"
depends on HAS_IOMEM
+ default y
+ help
+ Configure MFD device drivers.
+
+if MFD_SUPPORT
config MFD_CORE
tristate
@@ -49,10 +55,15 @@ config MFD_SH_MOBILE_SDHI
bool "Support for SuperH Mobile SDHI"
depends on SUPERH || ARCH_SHMOBILE
select MFD_CORE
+ select TMIO_MMC_DMA
---help---
This driver supports the SDHI hardware block found in many
SuperH Mobile SoCs.
+config MFD_DAVINCI_VOICECODEC
+ tristate
+ select MFD_CORE
+
config MFD_DM355EVM_MSP
bool "DaVinci DM355 EVM microcontroller"
depends on I2C && MACH_DAVINCI_DM355_EVM
@@ -111,6 +122,18 @@ config TPS65010
This driver can also be built as a module. If so, the module
will be called tps65010.
+config TPS6507X
+ tristate "TPS6507x Power Management / Touch Screen chips"
+ select MFD_CORE
+ depends on I2C
+ help
+ If you say yes here you get support for the TPS6507x series of
+ Power Management / Touch Screen chips. These include voltage
+ regulators, lithium ion/polymer battery charging, touch screen
+ and other features that are often used in portable devices.
+ This driver can also be built as a module. If so, the module
+ will be called tps6507x.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -154,10 +177,26 @@ config TWL4030_CODEC
select MFD_CORE
default n
+config MFD_TC35892
+ bool "Support Toshiba TC35892"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ Support for the Toshiba TC35892 I/O Expander.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config MFD_TMIO
bool
default n
+config TMIO_MMC_DMA
+ bool
+ select DMA_ENGINE
+ select DMADEVICES
+
config MFD_T7L66XB
bool "Support Toshiba T7L66XB"
depends on ARM && HAVE_CLK
@@ -297,9 +336,9 @@ config MFD_WM8350_I2C
selected to enable support for the functionality of the chip.
config MFD_WM8994
- tristate "Support Wolfson Microelectronics WM8994"
+ bool "Support Wolfson Microelectronics WM8994"
select MFD_CORE
- depends on I2C
+ depends on I2C=y && GENERIC_HARDIRQS
help
The WM8994 is a highly integrated hi-fi CODEC designed for
smartphone applicatiosn. As well as audio functionality it
@@ -341,9 +380,19 @@ config PCF50633_GPIO
Say yes here if you want to include support GPIO for pins on
the PCF50633 chip.
+config ABX500_CORE
+ bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
+ default y if ARCH_U300
+ help
+ Say yes here if you have the ABX500 Mixed Signal IC family
+ chips. This core driver expose register access functions.
+ Functionality specific drivers using these functions can
+ remain unchanged when IC changes. Binding of the functions to
+ actual register access is done by the IC core driver.
+
config AB3100_CORE
bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
- depends on I2C=y
+ depends on I2C=y && ABX500_CORE
default y if ARCH_U300
help
Select this to enable the AB3100 Mixed Signal IC core
@@ -371,15 +420,30 @@ config EZX_PCAP
This enables the PCAP ASIC present on EZX Phones. This is
needed for MMC, TouchScreen, Sound, USB, etc..
-config AB4500_CORE
- tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip"
- depends on SPI
+config AB8500_CORE
+ bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
+ depends on SPI=y && GENERIC_HARDIRQS
+ select MFD_CORE
help
- Select this option to enable access to AB4500 power management
+ Select this option to enable access to AB8500 power management
chip. This connects to U8500 on the SSP/SPI bus and exports
read/write functions for the devices to get access to this chip.
This chip embeds various other multimedia funtionalities as well.
+config AB3550_CORE
+ bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
+ select MFD_CORE
+ depends on I2C=y && GENERIC_HARDIRQS && ABX500_CORE
+ help
+ Select this to enable the AB3550 Mixed Signal IC core
+ functionality. This connects to a AB3550 on the I2C bus
+ and expose a number of symbols needed for dependent devices
+ to read and write registers and subscribe to events from
+ this multi-functional IC. This is needed to use other features
+ of the AB3550 such as battery-backed RTC, charging control,
+ LEDs, vibrator, system power and temperature, power management
+ and ALSA sound.
+
config MFD_TIMBERDALE
tristate "Support for the Timberdale FPGA"
select MFD_CORE
@@ -399,7 +463,26 @@ config LPC_SCH
LPC bridge function of the Intel SCH provides support for
System Management Bus and General Purpose I/O.
-endmenu
+config MFD_RDC321X
+ tristate "Support for RDC-R321x southbridge"
+ select MFD_CORE
+ depends on PCI
+ help
+ Say yes here if you want to have support for the RDC R-321x SoC
+ southbridge which provides access to GPIOs and Watchdog using the
+ southbridge PCI device configuration space.
+
+config MFD_JANZ_CMODIO
+ tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board"
+ select MFD_CORE
+ depends on PCI
+ help
+ This is the core driver for the Janz CMOD-IO PCI MODULbus
+ carrier board. This device is a PCI to MODULbus bridge which may
+ host many different types of MODULbus daughterboards, including
+ CAN and GPIO controllers.
+
+endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 22715add99a7..fb503e77dc60 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -12,8 +12,10 @@ obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
+obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
+obj-$(CONFIG_MFD_TC35892) += tc35892.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
@@ -25,9 +27,10 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
-obj-$(CONFIG_MFD_WM8994) += wm8994-core.o
+obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
obj-$(CONFIG_TPS65010) += tps65010.o
+obj-$(CONFIG_TPS6507X) += tps6507x.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
@@ -54,12 +57,17 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
-obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
+pcf50633-objs := pcf50633-core.o pcf50633-irq.o
+obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
+obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
-obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
+obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
-obj-$(CONFIG_LPC_SCH) += lpc_sch.o \ No newline at end of file
+obj-$(CONFIG_LPC_SCH) += lpc_sch.o
+obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
+obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index e4ca5909e424..53ebfee548fa 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -19,7 +19,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
/* These are the only registers inside AB3100 used in this main file */
@@ -59,24 +59,15 @@
* The AB3100 is usually assigned address 0x48 (7-bit)
* The chip is defined in the platform i2c_board_data section.
*/
-
-u8 ab3100_get_chip_type(struct ab3100 *ab3100)
+static int ab3100_get_chip_id(struct device *dev)
{
- u8 chip = ABUNKNOWN;
-
- switch (ab3100->chip_id & 0xf0) {
- case 0xa0:
- chip = AB3000;
- break;
- case 0xc0:
- chip = AB3100;
- break;
- }
- return chip;
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return (int)ab3100->chip_id;
}
-EXPORT_SYMBOL(ab3100_get_chip_type);
-int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval)
+static int ab3100_set_register_interruptible(struct ab3100 *ab3100,
+ u8 reg, u8 regval)
{
u8 regandval[2] = {reg, regval};
int err;
@@ -108,8 +99,14 @@ int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval)
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_set_register_interruptible);
+static int set_register_interruptible(struct device *dev,
+ u8 bank, u8 reg, u8 value)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_set_register_interruptible(ab3100, reg, value);
+}
/*
* The test registers exist at an I2C bus address up one
@@ -148,8 +145,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100,
return err;
}
-
-int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval)
+static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
+ u8 reg, u8 *regval)
{
int err;
@@ -203,10 +200,16 @@ int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval)
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_get_register_interruptible);
+static int get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_get_register_interruptible(ab3100, reg, value);
+}
-int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
+static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
u8 first_reg, u8 *regvals, u8 numregs)
{
int err;
@@ -260,10 +263,17 @@ int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_get_register_page_interruptible);
+static int get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_get_register_page_interruptible(ab3100,
+ first_reg, regvals, numregs);
+}
-int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
+static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
u8 reg, u8 andmask, u8 ormask)
{
u8 regandval[2] = {reg, 0};
@@ -331,8 +341,15 @@ int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
mutex_unlock(&ab3100->access_mutex);
return err;
}
-EXPORT_SYMBOL(ab3100_mask_and_set_register_interruptible);
+static int mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
+
+ return ab3100_mask_and_set_register_interruptible(ab3100,
+ reg, bitmask, (bitmask & bitvalues));
+}
/*
* Register a simple callback for handling any AB3100 events.
@@ -357,15 +374,27 @@ int ab3100_event_unregister(struct ab3100 *ab3100,
EXPORT_SYMBOL(ab3100_event_unregister);
-int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
- u32 *fatevent)
+static int ab3100_event_registers_startup_state_get(struct device *dev,
+ u8 *event)
{
+ struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
if (!ab3100->startup_events_read)
return -EAGAIN; /* Try again later */
- *fatevent = ab3100->startup_events;
+ memcpy(event, ab3100->startup_events, 3);
return 0;
}
-EXPORT_SYMBOL(ab3100_event_registers_startup_state_get);
+
+static struct abx500_ops ab3100_ops = {
+ .get_chip_id = ab3100_get_chip_id,
+ .set_register = set_register_interruptible,
+ .get_register = get_register_interruptible,
+ .get_register_page = get_register_page_interruptible,
+ .set_register_page = NULL,
+ .mask_and_set_register = mask_and_set_register_interruptible,
+ .event_registers_startup_state_get =
+ ab3100_event_registers_startup_state_get,
+ .startup_irq_enabled = NULL,
+};
/*
* This is a threaded interrupt handler so we can make some
@@ -390,7 +419,9 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
event_regs[2];
if (!ab3100->startup_events_read) {
- ab3100->startup_events = fatevent;
+ ab3100->startup_events[0] = event_regs[0];
+ ab3100->startup_events[1] = event_regs[1];
+ ab3100->startup_events[2] = event_regs[2];
ab3100->startup_events_read = true;
}
/*
@@ -703,7 +734,8 @@ static int __init ab3100_setup(struct ab3100 *ab3100)
dev_warn(ab3100->dev,
"AB3100 P1E variant detected, "
"forcing chip to 32KHz\n");
- err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08);
+ err = ab3100_set_test_register_interruptible(ab3100,
+ 0x02, 0x08);
}
exit_no_setup:
@@ -898,6 +930,10 @@ static int __init ab3100_probe(struct i2c_client *client,
if (err)
goto exit_no_irq;
+ err = abx500_register_ops(&client->dev, &ab3100_ops);
+ if (err)
+ goto exit_no_ops;
+
/* Set parent and a pointer back to the container in device data */
for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) {
ab3100_platform_devs[i]->dev.parent =
@@ -915,11 +951,13 @@ static int __init ab3100_probe(struct i2c_client *client,
return 0;
+ exit_no_ops:
exit_no_irq:
exit_no_setup:
i2c_unregister_device(ab3100->testreg_client);
exit_no_testreg_client:
exit_no_detect:
+ i2c_set_clientdata(client, NULL);
kfree(ab3100);
return err;
}
@@ -941,6 +979,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
* their notifiers so deactivate IRQ
*/
free_irq(client->irq, ab3100);
+ i2c_set_clientdata(client, NULL);
kfree(ab3100);
return 0;
}
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index 2d14655fdebd..63d2b727ddbb 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab3100.h>
+#include <linux/mfd/abx500.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -30,7 +30,6 @@
/**
* struct ab3100_otp
* @dev containing device
- * @ab3100 a pointer to the parent ab3100 device struct
* @locked whether the OTP is locked, after locking, no more bits
* can be changed but before locking it is still possible
* to change bits from 1->0.
@@ -49,7 +48,6 @@
*/
struct ab3100_otp {
struct device *dev;
- struct ab3100 *ab3100;
bool locked;
u32 freq;
bool paf;
@@ -63,19 +61,19 @@ struct ab3100_otp {
static int __init ab3100_otp_read(struct ab3100_otp *otp)
{
- struct ab3100 *ab = otp->ab3100;
u8 otpval[8];
u8 otpp;
int err;
- err = ab3100_get_register_interruptible(ab, AB3100_OTPP, &otpp);
+ err = abx500_get_register_interruptible(otp->dev, 0,
+ AB3100_OTPP, &otpp);
if (err) {
dev_err(otp->dev, "unable to read OTPP register\n");
return err;
}
- err = ab3100_get_register_page_interruptible(ab, AB3100_OTP0,
- otpval, 8);
+ err = abx500_get_register_page_interruptible(otp->dev, 0,
+ AB3100_OTP0, otpval, 8);
if (err) {
dev_err(otp->dev, "unable to read OTP register page\n");
return err;
@@ -197,7 +195,6 @@ static int __init ab3100_otp_probe(struct platform_device *pdev)
otp->dev = &pdev->dev;
/* Replace platform data coming in with a local struct */
- otp->ab3100 = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, otp);
err = ab3100_otp_read(otp);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
new file mode 100644
index 000000000000..1060f8e1c40a
--- /dev/null
+++ b/drivers/mfd/ab3550-core.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (C) 2007-2010 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Low-level core for exclusive access to the AB3550 IC on the I2C bus
+ * and some basic chip-configuration.
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/abx500.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/core.h>
+
+#define AB3550_NAME_STRING "ab3550"
+#define AB3550_ID_FORMAT_STRING "AB3550 %s"
+#define AB3550_NUM_BANKS 2
+#define AB3550_NUM_EVENT_REG 5
+
+/* These are the only registers inside AB3550 used in this main file */
+
+/* Chip ID register */
+#define AB3550_CID_REG 0x20
+
+/* Interrupt event registers */
+#define AB3550_EVENT_BANK 0
+#define AB3550_EVENT_REG 0x22
+
+/* Read/write operation values. */
+#define AB3550_PERM_RD (0x01)
+#define AB3550_PERM_WR (0x02)
+
+/* Read/write permissions. */
+#define AB3550_PERM_RO (AB3550_PERM_RD)
+#define AB3550_PERM_RW (AB3550_PERM_RD | AB3550_PERM_WR)
+
+/**
+ * struct ab3550
+ * @access_mutex: lock out concurrent accesses to the AB registers
+ * @i2c_client: I2C client for this chip
+ * @chip_name: name of this chip variant
+ * @chip_id: 8 bit chip ID for this chip variant
+ * @mask_work: a worker for writing to mask registers
+ * @event_lock: a lock to protect the event_mask
+ * @event_mask: a local copy of the mask event registers
+ * @startup_events: a copy of the first reading of the event registers
+ * @startup_events_read: whether the first events have been read
+ */
+struct ab3550 {
+ struct mutex access_mutex;
+ struct i2c_client *i2c_client[AB3550_NUM_BANKS];
+ char chip_name[32];
+ u8 chip_id;
+ struct work_struct mask_work;
+ spinlock_t event_lock;
+ u8 event_mask[AB3550_NUM_EVENT_REG];
+ u8 startup_events[AB3550_NUM_EVENT_REG];
+ bool startup_events_read;
+#ifdef CONFIG_DEBUG_FS
+ unsigned int debug_bank;
+ unsigned int debug_address;
+#endif
+};
+
+/**
+ * struct ab3550_reg_range
+ * @first: the first address of the range
+ * @last: the last address of the range
+ * @perm: access permissions for the range
+ */
+struct ab3550_reg_range {
+ u8 first;
+ u8 last;
+ u8 perm;
+};
+
+/**
+ * struct ab3550_reg_ranges
+ * @count: the number of ranges in the list
+ * @range: the list of register ranges
+ */
+struct ab3550_reg_ranges {
+ u8 count;
+ const struct ab3550_reg_range *range;
+};
+
+/*
+ * Permissible register ranges for reading and writing per device and bank.
+ *
+ * The ranges must be listed in increasing address order, and no overlaps are
+ * allowed. It is assumed that write permission implies read permission
+ * (i.e. only RO and RW permissions should be used). Ranges with write
+ * permission must not be split up.
+ */
+
+#define NO_RANGE {.count = 0, .range = NULL,}
+
+static struct
+ab3550_reg_ranges ab3550_reg_ranges[AB3550_NUM_DEVICES][AB3550_NUM_BANKS] = {
+ [AB3550_DEVID_DAC] = {
+ NO_RANGE,
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0xb0,
+ .last = 0xba,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0xbc,
+ .last = 0xc3,
+ .perm = AB3550_PERM_RW,
+ },
+ },
+ },
+ },
+ [AB3550_DEVID_LEDS] = {
+ NO_RANGE,
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x5a,
+ .last = 0x88,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x8a,
+ .last = 0xad,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_POWER] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_REGULATORS] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x69,
+ .last = 0xa3,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x14,
+ .last = 0x16,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_SIM] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x14,
+ .last = 0x17,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+
+ },
+ },
+ [AB3550_DEVID_UART] = {
+ NO_RANGE,
+ NO_RANGE,
+ },
+ [AB3550_DEVID_RTC] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0c,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_CHARGER] = {
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x1a,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+ [AB3550_DEVID_ADC] = {
+ NO_RANGE,
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x20,
+ .last = 0x56,
+ .perm = AB3550_PERM_RW,
+ },
+
+ }
+ },
+ },
+ [AB3550_DEVID_FUELGAUGE] = {
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x21,
+ .last = 0x21,
+ .perm = AB3550_PERM_RO,
+ },
+ }
+ },
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ },
+ [AB3550_DEVID_VIBRATOR] = {
+ NO_RANGE,
+ {
+ .count = 1,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x10,
+ .last = 0x13,
+ .perm = AB3550_PERM_RW,
+ },
+
+ }
+ },
+ },
+ [AB3550_DEVID_CODEC] = {
+ {
+ .count = 2,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x31,
+ .last = 0x63,
+ .perm = AB3550_PERM_RW,
+ },
+ {
+ .first = 0x65,
+ .last = 0x68,
+ .perm = AB3550_PERM_RW,
+ },
+ }
+ },
+ NO_RANGE,
+ },
+};
+
+static struct mfd_cell ab3550_devs[AB3550_NUM_DEVICES] = {
+ [AB3550_DEVID_DAC] = {
+ .name = "ab3550-dac",
+ .id = AB3550_DEVID_DAC,
+ .num_resources = 0,
+ },
+ [AB3550_DEVID_LEDS] = {
+ .name = "ab3550-leds",
+ .id = AB3550_DEVID_LEDS,
+ },
+ [AB3550_DEVID_POWER] = {
+ .name = "ab3550-power",
+ .id = AB3550_DEVID_POWER,
+ },
+ [AB3550_DEVID_REGULATORS] = {
+ .name = "ab3550-regulators",
+ .id = AB3550_DEVID_REGULATORS,
+ },
+ [AB3550_DEVID_SIM] = {
+ .name = "ab3550-sim",
+ .id = AB3550_DEVID_SIM,
+ },
+ [AB3550_DEVID_UART] = {
+ .name = "ab3550-uart",
+ .id = AB3550_DEVID_UART,
+ },
+ [AB3550_DEVID_RTC] = {
+ .name = "ab3550-rtc",
+ .id = AB3550_DEVID_RTC,
+ },
+ [AB3550_DEVID_CHARGER] = {
+ .name = "ab3550-charger",
+ .id = AB3550_DEVID_CHARGER,
+ },
+ [AB3550_DEVID_ADC] = {
+ .name = "ab3550-adc",
+ .id = AB3550_DEVID_ADC,
+ .num_resources = 10,
+ .resources = (struct resource[]) {
+ {
+ .name = "TRIGGER-0",
+ .flags = IORESOURCE_IRQ,
+ .start = 16,
+ .end = 16,
+ },
+ {
+ .name = "TRIGGER-1",
+ .flags = IORESOURCE_IRQ,
+ .start = 17,
+ .end = 17,
+ },
+ {
+ .name = "TRIGGER-2",
+ .flags = IORESOURCE_IRQ,
+ .start = 18,
+ .end = 18,
+ },
+ {
+ .name = "TRIGGER-3",
+ .flags = IORESOURCE_IRQ,
+ .start = 19,
+ .end = 19,
+ },
+ {
+ .name = "TRIGGER-4",
+ .flags = IORESOURCE_IRQ,
+ .start = 20,
+ .end = 20,
+ },
+ {
+ .name = "TRIGGER-5",
+ .flags = IORESOURCE_IRQ,
+ .start = 21,
+ .end = 21,
+ },
+ {
+ .name = "TRIGGER-6",
+ .flags = IORESOURCE_IRQ,
+ .start = 22,
+ .end = 22,
+ },
+ {
+ .name = "TRIGGER-7",
+ .flags = IORESOURCE_IRQ,
+ .start = 23,
+ .end = 23,
+ },
+ {
+ .name = "TRIGGER-VBAT-TXON",
+ .flags = IORESOURCE_IRQ,
+ .start = 13,
+ .end = 13,
+ },
+ {
+ .name = "TRIGGER-VBAT",
+ .flags = IORESOURCE_IRQ,
+ .start = 12,
+ .end = 12,
+ },
+ },
+ },
+ [AB3550_DEVID_FUELGAUGE] = {
+ .name = "ab3550-fuelgauge",
+ .id = AB3550_DEVID_FUELGAUGE,
+ },
+ [AB3550_DEVID_VIBRATOR] = {
+ .name = "ab3550-vibrator",
+ .id = AB3550_DEVID_VIBRATOR,
+ },
+ [AB3550_DEVID_CODEC] = {
+ .name = "ab3550-codec",
+ .id = AB3550_DEVID_CODEC,
+ },
+};
+
+/*
+ * I2C transactions with error messages.
+ */
+static int ab3550_i2c_master_send(struct ab3550 *ab, u8 bank, u8 *data,
+ u8 count)
+{
+ int err;
+
+ err = i2c_master_send(ab->i2c_client[bank], data, count);
+ if (err < 0) {
+ dev_err(&ab->i2c_client[0]->dev, "send error: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int ab3550_i2c_master_recv(struct ab3550 *ab, u8 bank, u8 *data,
+ u8 count)
+{
+ int err;
+
+ err = i2c_master_recv(ab->i2c_client[bank], data, count);
+ if (err < 0) {
+ dev_err(&ab->i2c_client[0]->dev, "receive error: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Functionality for getting/setting register values.
+ */
+static int get_register_interruptible(struct ab3550 *ab, u8 bank, u8 reg,
+ u8 *value)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ err = ab3550_i2c_master_send(ab, bank, &reg, 1);
+ if (!err)
+ err = ab3550_i2c_master_recv(ab, bank, value, 1);
+
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+static int get_register_page_interruptible(struct ab3550 *ab, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ err = ab3550_i2c_master_send(ab, bank, &first_reg, 1);
+ if (!err)
+ err = ab3550_i2c_master_recv(ab, bank, regvals, numregs);
+
+ mutex_unlock(&ab->access_mutex);
+ return err;
+}
+
+static int mask_and_set_register_interruptible(struct ab3550 *ab, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ int err = 0;
+
+ if (likely(bitmask)) {
+ u8 reg_bits[2] = {reg, 0};
+
+ err = mutex_lock_interruptible(&ab->access_mutex);
+ if (err)
+ return err;
+
+ if (bitmask == 0xFF) /* No need to read in this case. */
+ reg_bits[1] = bitvalues;
+ else { /* Read and modify the register value. */
+ u8 bits;
+
+ err = ab3550_i2c_master_send(ab, bank, &reg, 1);
+ if (err)
+ goto unlock_and_return;
+ err = ab3550_i2c_master_recv(ab, bank, &bits, 1);
+ if (err)
+ goto unlock_and_return;
+ reg_bits[1] = ((~bitmask & bits) |
+ (bitmask & bitvalues));
+ }
+ /* Write the new value. */
+ err = ab3550_i2c_master_send(ab, bank, reg_bits, 2);
+unlock_and_return:
+ mutex_unlock(&ab->access_mutex);
+ }
+ return err;
+}
+
+/*
+ * Read/write permission checking functions.
+ */
+static bool page_write_allowed(const struct ab3550_reg_ranges *ranges,
+ u8 first_reg, u8 last_reg)
+{
+ u8 i;
+
+ if (last_reg < first_reg)
+ return false;
+
+ for (i = 0; i < ranges->count; i++) {
+ if (first_reg < ranges->range[i].first)
+ break;
+ if ((last_reg <= ranges->range[i].last) &&
+ (ranges->range[i].perm & AB3550_PERM_WR))
+ return true;
+ }
+ return false;
+}
+
+static bool reg_write_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
+{
+ return page_write_allowed(ranges, reg, reg);
+}
+
+static bool page_read_allowed(const struct ab3550_reg_ranges *ranges,
+ u8 first_reg, u8 last_reg)
+{
+ u8 i;
+
+ if (last_reg < first_reg)
+ return false;
+ /* Find the range (if it exists in the list) that includes first_reg. */
+ for (i = 0; i < ranges->count; i++) {
+ if (first_reg < ranges->range[i].first)
+ return false;
+ if (first_reg <= ranges->range[i].last)
+ break;
+ }
+ /* Make sure that the entire range up to and including last_reg is
+ * readable. This may span several of the ranges in the list.
+ */
+ while ((i < ranges->count) &&
+ (ranges->range[i].perm & AB3550_PERM_RD)) {
+ if (last_reg <= ranges->range[i].last)
+ return true;
+ if ((++i >= ranges->count) ||
+ (ranges->range[i].first !=
+ (ranges->range[i - 1].last + 1))) {
+ break;
+ }
+ }
+ return false;
+}
+
+static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
+{
+ return page_read_allowed(ranges, reg, reg);
+}
+
+/*
+ * The exported register access functionality.
+ */
+int ab3550_get_chip_id(struct device *dev)
+{
+ struct ab3550 *ab = dev_get_drvdata(dev->parent);
+ return (int)ab->chip_id;
+}
+
+int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !reg_write_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return mask_and_set_register_interruptible(ab, bank, reg,
+ bitmask, bitvalues);
+}
+
+int ab3550_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value)
+{
+ return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF,
+ value);
+}
+
+int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !reg_read_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return get_register_interruptible(ab, bank, reg, value);
+}
+
+int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct ab3550 *ab;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if ((AB3550_NUM_BANKS <= bank) ||
+ !page_read_allowed(&ab3550_reg_ranges[pdev->id][bank],
+ first_reg, (first_reg + numregs - 1)))
+ return -EINVAL;
+
+ ab = dev_get_drvdata(dev->parent);
+ return get_register_page_interruptible(ab, bank, first_reg, regvals,
+ numregs);
+}
+
+int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event)
+{
+ struct ab3550 *ab;
+
+ ab = dev_get_drvdata(dev->parent);
+ if (!ab->startup_events_read)
+ return -EAGAIN; /* Try again later */
+
+ memcpy(event, ab->startup_events, AB3550_NUM_EVENT_REG);
+ return 0;
+}
+
+int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
+{
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+ bool val;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+ val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0);
+
+ return val;
+}
+
+static struct abx500_ops ab3550_ops = {
+ .get_chip_id = ab3550_get_chip_id,
+ .get_register = ab3550_get_register_interruptible,
+ .set_register = ab3550_set_register_interruptible,
+ .get_register_page = ab3550_get_register_page_interruptible,
+ .set_register_page = NULL,
+ .mask_and_set_register = ab3550_mask_and_set_register_interruptible,
+ .event_registers_startup_state_get =
+ ab3550_event_registers_startup_state_get,
+ .startup_irq_enabled = ab3550_startup_irq_enabled,
+};
+
+static irqreturn_t ab3550_irq_handler(int irq, void *data)
+{
+ struct ab3550 *ab = data;
+ int err;
+ unsigned int i;
+ u8 e[AB3550_NUM_EVENT_REG];
+ u8 *events;
+ unsigned long flags;
+
+ events = (ab->startup_events_read ? e : ab->startup_events);
+
+ err = get_register_page_interruptible(ab, AB3550_EVENT_BANK,
+ AB3550_EVENT_REG, events, AB3550_NUM_EVENT_REG);
+ if (err)
+ goto err_event_rd;
+
+ if (!ab->startup_events_read) {
+ dev_info(&ab->i2c_client[0]->dev,
+ "startup events 0x%x,0x%x,0x%x,0x%x,0x%x\n",
+ ab->startup_events[0], ab->startup_events[1],
+ ab->startup_events[2], ab->startup_events[3],
+ ab->startup_events[4]);
+ ab->startup_events_read = true;
+ goto out;
+ }
+
+ /* The two highest bits in event[4] are not used. */
+ events[4] &= 0x3f;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
+ events[i] &= ~ab->event_mask[i];
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
+ u8 bit;
+ u8 event_reg;
+
+ dev_dbg(&ab->i2c_client[0]->dev, "IRQ Event[%d]: 0x%2x\n",
+ i, events[i]);
+
+ event_reg = events[i];
+ for (bit = 0; event_reg; bit++, event_reg /= 2) {
+ if (event_reg % 2) {
+ unsigned int irq;
+ struct ab3550_platform_data *plf_data;
+
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq = plf_data->irq.base + (i * 8) + bit;
+ handle_nested_irq(irq);
+ }
+ }
+ }
+out:
+ return IRQ_HANDLED;
+
+err_event_rd:
+ dev_dbg(&ab->i2c_client[0]->dev, "error reading event registers\n");
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct ab3550_reg_ranges debug_ranges[AB3550_NUM_BANKS] = {
+ {
+ .count = 6,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ },
+ {
+ .first = 0x10,
+ .last = 0x1a,
+ },
+ {
+ .first = 0x1e,
+ .last = 0x4f,
+ },
+ {
+ .first = 0x51,
+ .last = 0x63,
+ },
+ {
+ .first = 0x65,
+ .last = 0xa3,
+ },
+ {
+ .first = 0xa5,
+ .last = 0xa8,
+ },
+ }
+ },
+ {
+ .count = 8,
+ .range = (struct ab3550_reg_range[]) {
+ {
+ .first = 0x00,
+ .last = 0x0e,
+ },
+ {
+ .first = 0x10,
+ .last = 0x17,
+ },
+ {
+ .first = 0x1a,
+ .last = 0x1c,
+ },
+ {
+ .first = 0x20,
+ .last = 0x56,
+ },
+ {
+ .first = 0x5a,
+ .last = 0x88,
+ },
+ {
+ .first = 0x8a,
+ .last = 0xad,
+ },
+ {
+ .first = 0xb0,
+ .last = 0xba,
+ },
+ {
+ .first = 0xbc,
+ .last = 0xc3,
+ },
+ }
+ },
+};
+
+static int ab3550_registers_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+ int bank;
+
+ seq_printf(s, AB3550_NAME_STRING " register values:\n");
+
+ for (bank = 0; bank < AB3550_NUM_BANKS; bank++) {
+ unsigned int i;
+
+ seq_printf(s, " bank %d:\n", bank);
+ for (i = 0; i < debug_ranges[bank].count; i++) {
+ u8 reg;
+
+ for (reg = debug_ranges[bank].range[i].first;
+ reg <= debug_ranges[bank].range[i].last;
+ reg++) {
+ u8 value;
+
+ get_register_interruptible(ab, bank, reg,
+ &value);
+ seq_printf(s, " [%d/0x%02X]: 0x%02X\n", bank,
+ reg, value);
+ }
+ }
+ }
+ return 0;
+}
+
+static int ab3550_registers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_registers_print, inode->i_private);
+}
+
+static const struct file_operations ab3550_registers_fops = {
+ .open = ab3550_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int ab3550_bank_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+
+ seq_printf(s, "%d\n", ab->debug_bank);
+ return 0;
+}
+
+static int ab3550_bank_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_bank_print, inode->i_private);
+}
+
+static ssize_t ab3550_bank_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_bank;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_bank);
+ if (err)
+ return -EINVAL;
+
+ if (user_bank >= AB3550_NUM_BANKS) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > number of banks\n");
+ return -EINVAL;
+ }
+
+ ab->debug_bank = user_bank;
+
+ return buf_size;
+}
+
+static int ab3550_address_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+
+ seq_printf(s, "0x%02X\n", ab->debug_address);
+ return 0;
+}
+
+static int ab3550_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_address_print, inode->i_private);
+}
+
+static ssize_t ab3550_address_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_address;
+ int err;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_address);
+ if (err)
+ return -EINVAL;
+ if (user_address > 0xff) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ ab->debug_address = user_address;
+ return buf_size;
+}
+
+static int ab3550_val_print(struct seq_file *s, void *p)
+{
+ struct ab3550 *ab = s->private;
+ int err;
+ u8 regvalue;
+
+ err = get_register_interruptible(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err)
+ return -EINVAL;
+ seq_printf(s, "0x%02X\n", regvalue);
+
+ return 0;
+}
+
+static int ab3550_val_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3550_val_print, inode->i_private);
+}
+
+static ssize_t ab3550_val_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
+ char buf[32];
+ int buf_size;
+ unsigned long user_val;
+ int err;
+ u8 regvalue;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ err = strict_strtoul(buf, 0, &user_val);
+ if (err)
+ return -EINVAL;
+ if (user_val > 0xff) {
+ dev_err(&ab->i2c_client[0]->dev,
+ "debugfs error input > 0xff\n");
+ return -EINVAL;
+ }
+ err = mask_and_set_register_interruptible(
+ ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, 0xFF, (u8)user_val);
+ if (err)
+ return -EINVAL;
+
+ get_register_interruptible(ab, (u8)ab->debug_bank,
+ (u8)ab->debug_address, &regvalue);
+ if (err)
+ return -EINVAL;
+
+ return buf_size;
+}
+
+static const struct file_operations ab3550_bank_fops = {
+ .open = ab3550_bank_open,
+ .write = ab3550_bank_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab3550_address_fops = {
+ .open = ab3550_address_open,
+ .write = ab3550_address_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ab3550_val_fops = {
+ .open = ab3550_val_open,
+ .write = ab3550_val_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *ab3550_dir;
+static struct dentry *ab3550_reg_file;
+static struct dentry *ab3550_bank_file;
+static struct dentry *ab3550_address_file;
+static struct dentry *ab3550_val_file;
+
+static inline void ab3550_setup_debugfs(struct ab3550 *ab)
+{
+ ab->debug_bank = 0;
+ ab->debug_address = 0x00;
+
+ ab3550_dir = debugfs_create_dir(AB3550_NAME_STRING, NULL);
+ if (!ab3550_dir)
+ goto exit_no_debugfs;
+
+ ab3550_reg_file = debugfs_create_file("all-registers",
+ S_IRUGO, ab3550_dir, ab, &ab3550_registers_fops);
+ if (!ab3550_reg_file)
+ goto exit_destroy_dir;
+
+ ab3550_bank_file = debugfs_create_file("register-bank",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops);
+ if (!ab3550_bank_file)
+ goto exit_destroy_reg;
+
+ ab3550_address_file = debugfs_create_file("register-address",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops);
+ if (!ab3550_address_file)
+ goto exit_destroy_bank;
+
+ ab3550_val_file = debugfs_create_file("register-value",
+ (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops);
+ if (!ab3550_val_file)
+ goto exit_destroy_address;
+
+ return;
+
+exit_destroy_address:
+ debugfs_remove(ab3550_address_file);
+exit_destroy_bank:
+ debugfs_remove(ab3550_bank_file);
+exit_destroy_reg:
+ debugfs_remove(ab3550_reg_file);
+exit_destroy_dir:
+ debugfs_remove(ab3550_dir);
+exit_no_debugfs:
+ dev_err(&ab->i2c_client[0]->dev, "failed to create debugfs entries.\n");
+ return;
+}
+
+static inline void ab3550_remove_debugfs(void)
+{
+ debugfs_remove(ab3550_val_file);
+ debugfs_remove(ab3550_address_file);
+ debugfs_remove(ab3550_bank_file);
+ debugfs_remove(ab3550_reg_file);
+ debugfs_remove(ab3550_dir);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+static inline void ab3550_setup_debugfs(struct ab3550 *ab)
+{
+}
+static inline void ab3550_remove_debugfs(void)
+{
+}
+#endif
+
+/*
+ * Basic set-up, datastructure creation/destruction and I2C interface.
+ * This sets up a default config in the AB3550 chip so that it
+ * will work as expected.
+ */
+static int __init ab3550_setup(struct ab3550 *ab)
+{
+ int err = 0;
+ int i;
+ struct ab3550_platform_data *plf_data;
+ struct abx500_init_settings *settings;
+
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ settings = plf_data->init_settings;
+
+ for (i = 0; i < plf_data->init_settings_sz; i++) {
+ err = mask_and_set_register_interruptible(ab,
+ settings[i].bank,
+ settings[i].reg,
+ 0xFF, settings[i].setting);
+ if (err)
+ goto exit_no_setup;
+
+ /* If event mask register update the event mask in ab3550 */
+ if ((settings[i].bank == 0) &&
+ (AB3550_IMR1 <= settings[i].reg) &&
+ (settings[i].reg <= AB3550_IMR5)) {
+ ab->event_mask[settings[i].reg - AB3550_IMR1] =
+ settings[i].setting;
+ }
+ }
+exit_no_setup:
+ return err;
+}
+
+static void ab3550_mask_work(struct work_struct *work)
+{
+ struct ab3550 *ab = container_of(work, struct ab3550, mask_work);
+ int i;
+ unsigned long flags;
+ u8 mask[AB3550_NUM_EVENT_REG];
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
+ mask[i] = ab->event_mask[i];
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
+ int err;
+
+ err = mask_and_set_register_interruptible(ab, 0,
+ (AB3550_IMR1 + i), ~0, mask[i]);
+ if (err)
+ dev_err(&ab->i2c_client[0]->dev,
+ "ab3550_mask_work failed 0x%x,0x%x\n",
+ (AB3550_IMR1 + i), mask[i]);
+ }
+}
+
+static void ab3550_mask(unsigned int irq)
+{
+ unsigned long flags;
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ ab->event_mask[irq / 8] |= BIT(irq % 8);
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ schedule_work(&ab->mask_work);
+}
+
+static void ab3550_unmask(unsigned int irq)
+{
+ unsigned long flags;
+ struct ab3550 *ab;
+ struct ab3550_platform_data *plf_data;
+
+ ab = get_irq_chip_data(irq);
+ plf_data = ab->i2c_client[0]->dev.platform_data;
+ irq -= plf_data->irq.base;
+
+ spin_lock_irqsave(&ab->event_lock, flags);
+ ab->event_mask[irq / 8] &= ~BIT(irq % 8);
+ spin_unlock_irqrestore(&ab->event_lock, flags);
+
+ schedule_work(&ab->mask_work);
+}
+
+static void noop(unsigned int irq)
+{
+}
+
+static struct irq_chip ab3550_irq_chip = {
+ .name = "ab3550-core", /* Keep the same name as the request */
+ .startup = NULL, /* defaults to enable */
+ .shutdown = NULL, /* defaults to disable */
+ .enable = NULL, /* defaults to unmask */
+ .disable = ab3550_mask, /* No default to mask in chip.c */
+ .ack = noop,
+ .mask = ab3550_mask,
+ .unmask = ab3550_unmask,
+ .end = NULL,
+};
+
+struct ab_family_id {
+ u8 id;
+ char *name;
+};
+
+static const struct ab_family_id ids[] __initdata = {
+ /* AB3550 */
+ {
+ .id = AB3550_P1A,
+ .name = "P1A"
+ },
+ /* Terminator */
+ {
+ .id = 0x00,
+ }
+};
+
+static int __init ab3550_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ab3550 *ab;
+ struct ab3550_platform_data *ab3550_plf_data =
+ client->dev.platform_data;
+ int err;
+ int i;
+ int num_i2c_clients = 0;
+
+ ab = kzalloc(sizeof(struct ab3550), GFP_KERNEL);
+ if (!ab) {
+ dev_err(&client->dev,
+ "could not allocate " AB3550_NAME_STRING " device\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize data structure */
+ mutex_init(&ab->access_mutex);
+ spin_lock_init(&ab->event_lock);
+ ab->i2c_client[0] = client;
+
+ i2c_set_clientdata(client, ab);
+
+ /* Read chip ID register */
+ err = get_register_interruptible(ab, 0, AB3550_CID_REG, &ab->chip_id);
+ if (err) {
+ dev_err(&client->dev, "could not communicate with the analog "
+ "baseband chip\n");
+ goto exit_no_detect;
+ }
+
+ for (i = 0; ids[i].id != 0x0; i++) {
+ if (ids[i].id == ab->chip_id) {
+ snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1,
+ AB3550_ID_FORMAT_STRING, ids[i].name);
+ break;
+ }
+ }
+
+ if (ids[i].id == 0x0) {
+ dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
+ ab->chip_id);
+ dev_err(&client->dev, "driver not started!\n");
+ goto exit_no_detect;
+ }
+
+ dev_info(&client->dev, "detected AB chip: %s\n", &ab->chip_name[0]);
+
+ /* Attach other dummy I2C clients. */
+ while (++num_i2c_clients < AB3550_NUM_BANKS) {
+ ab->i2c_client[num_i2c_clients] =
+ i2c_new_dummy(client->adapter,
+ (client->addr + num_i2c_clients));
+ if (!ab->i2c_client[num_i2c_clients]) {
+ err = -ENOMEM;
+ goto exit_no_dummy_client;
+ }
+ strlcpy(ab->i2c_client[num_i2c_clients]->name, id->name,
+ sizeof(ab->i2c_client[num_i2c_clients]->name));
+ }
+
+ err = ab3550_setup(ab);
+ if (err)
+ goto exit_no_setup;
+
+ INIT_WORK(&ab->mask_work, ab3550_mask_work);
+
+ for (i = 0; i < ab3550_plf_data->irq.count; i++) {
+ unsigned int irq;
+
+ irq = ab3550_plf_data->irq.base + i;
+ set_irq_chip_data(irq, ab);
+ set_irq_chip_and_handler(irq, &ab3550_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
+ IRQF_ONESHOT, "ab3550-core", ab);
+ /* This real unpredictable IRQ is of course sampled for entropy */
+ rand_initialize_irq(client->irq);
+
+ if (err)
+ goto exit_no_irq;
+
+ err = abx500_register_ops(&client->dev, &ab3550_ops);
+ if (err)
+ goto exit_no_ops;
+
+ /* Set up and register the platform devices. */
+ for (i = 0; i < AB3550_NUM_DEVICES; i++) {
+ ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
+ ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i];
+ }
+
+ err = mfd_add_devices(&client->dev, 0, ab3550_devs,
+ ARRAY_SIZE(ab3550_devs), NULL,
+ ab3550_plf_data->irq.base);
+
+ ab3550_setup_debugfs(ab);
+
+ return 0;
+
+exit_no_ops:
+exit_no_irq:
+exit_no_setup:
+exit_no_dummy_client:
+ /* Unregister the dummy i2c clients. */
+ while (--num_i2c_clients)
+ i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
+exit_no_detect:
+ kfree(ab);
+ return err;
+}
+
+static int __exit ab3550_remove(struct i2c_client *client)
+{
+ struct ab3550 *ab = i2c_get_clientdata(client);
+ int num_i2c_clients = AB3550_NUM_BANKS;
+
+ mfd_remove_devices(&client->dev);
+ ab3550_remove_debugfs();
+
+ while (--num_i2c_clients)
+ i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
+
+ /*
+ * At this point, all subscribers should have unregistered
+ * their notifiers so deactivate IRQ
+ */
+ free_irq(client->irq, ab);
+ i2c_set_clientdata(client, NULL);
+ kfree(ab);
+ return 0;
+}
+
+static const struct i2c_device_id ab3550_id[] = {
+ {AB3550_NAME_STRING, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ab3550_id);
+
+static struct i2c_driver ab3550_driver = {
+ .driver = {
+ .name = AB3550_NAME_STRING,
+ .owner = THIS_MODULE,
+ },
+ .id_table = ab3550_id,
+ .probe = ab3550_probe,
+ .remove = __exit_p(ab3550_remove),
+};
+
+static int __init ab3550_i2c_init(void)
+{
+ return i2c_add_driver(&ab3550_driver);
+}
+
+static void __exit ab3550_i2c_exit(void)
+{
+ i2c_del_driver(&ab3550_driver);
+}
+
+subsys_initcall(ab3550_i2c_init);
+module_exit(ab3550_i2c_exit);
+
+MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
+MODULE_DESCRIPTION("AB3550 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c
deleted file mode 100644
index c275daa3ab1a..000000000000
--- a/drivers/mfd/ab4500-core.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2009 ST-Ericsson
- *
- * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
- *
- * This program is free software; you can redistribute it
- * and/or modify it under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation.
- *
- * AB4500 is a companion power management chip used with U8500.
- * On this platform, this is interfaced with SSP0 controller
- * which is a ARM primecell pl022.
- *
- * At the moment the module just exports read/write features.
- * Interrupt management to be added - TODO.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ab4500.h>
-
-/* just required if probe fails, we need to
- * unregister the device
- */
-static struct spi_driver ab4500_driver;
-
-/*
- * This funtion writes to any AB4500 registers using
- * SPI protocol & before it writes it packs the data
- * in the below 24 bit frame format
- *
- * *|------------------------------------|
- * *| 23|22...18|17.......10|9|8|7......0|
- * *| r/w bank adr data |
- * * ------------------------------------
- *
- * This function shouldn't be called from interrupt
- * context
- */
-int ab4500_write(struct ab4500 *ab4500, unsigned char block,
- unsigned long addr, unsigned char data)
-{
- struct spi_transfer xfer;
- struct spi_message msg;
- int err;
- unsigned long spi_data =
- block << 18 | addr << 10 | data;
-
- mutex_lock(&ab4500->lock);
- ab4500->tx_buf[0] = spi_data;
- ab4500->rx_buf[0] = 0;
-
- xfer.tx_buf = ab4500->tx_buf;
- xfer.rx_buf = NULL;
- xfer.len = sizeof(unsigned long);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- err = spi_sync(ab4500->spi, &msg);
- mutex_unlock(&ab4500->lock);
-
- return err;
-}
-EXPORT_SYMBOL(ab4500_write);
-
-int ab4500_read(struct ab4500 *ab4500, unsigned char block,
- unsigned long addr)
-{
- struct spi_transfer xfer;
- struct spi_message msg;
- unsigned long spi_data =
- 1 << 23 | block << 18 | addr << 10;
-
- mutex_lock(&ab4500->lock);
- ab4500->tx_buf[0] = spi_data;
- ab4500->rx_buf[0] = 0;
-
- xfer.tx_buf = ab4500->tx_buf;
- xfer.rx_buf = ab4500->rx_buf;
- xfer.len = sizeof(unsigned long);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- spi_sync(ab4500->spi, &msg);
- mutex_unlock(&ab4500->lock);
-
- return ab4500->rx_buf[0];
-}
-EXPORT_SYMBOL(ab4500_read);
-
-/* ref: ab3100 core */
-#define AB4500_DEVICE(devname, devid) \
-static struct platform_device ab4500_##devname##_device = { \
- .name = devid, \
- .id = -1, \
-}
-
-/* list of childern devices of ab4500 - all are
- * not populated here - TODO
- */
-AB4500_DEVICE(charger, "ab4500-charger");
-AB4500_DEVICE(audio, "ab4500-audio");
-AB4500_DEVICE(usb, "ab4500-usb");
-AB4500_DEVICE(tvout, "ab4500-tvout");
-AB4500_DEVICE(sim, "ab4500-sim");
-AB4500_DEVICE(gpadc, "ab4500-gpadc");
-AB4500_DEVICE(clkmgt, "ab4500-clkmgt");
-AB4500_DEVICE(misc, "ab4500-misc");
-
-static struct platform_device *ab4500_platform_devs[] = {
- &ab4500_charger_device,
- &ab4500_audio_device,
- &ab4500_usb_device,
- &ab4500_tvout_device,
- &ab4500_sim_device,
- &ab4500_gpadc_device,
- &ab4500_clkmgt_device,
- &ab4500_misc_device,
-};
-
-static int __init ab4500_probe(struct spi_device *spi)
-{
- struct ab4500 *ab4500;
- unsigned char revision;
- int err = 0;
- int i;
-
- ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL);
- if (!ab4500) {
- dev_err(&spi->dev, "could not allocate AB4500\n");
- err = -ENOMEM;
- goto not_detect;
- }
-
- ab4500->spi = spi;
- spi_set_drvdata(spi, ab4500);
-
- mutex_init(&ab4500->lock);
-
- /* read the revision register */
- revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG);
-
- /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */
- if (revision == 0x0 || revision == 0x10)
- dev_info(&spi->dev, "Detected chip: %s, revision = %x\n",
- ab4500_driver.driver.name, revision);
- else {
- dev_err(&spi->dev, "unknown chip: 0x%x\n", revision);
- goto not_detect;
- }
-
- for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) {
- ab4500_platform_devs[i]->dev.parent =
- &spi->dev;
- platform_set_drvdata(ab4500_platform_devs[i], ab4500);
- }
-
- /* register the ab4500 platform devices */
- platform_add_devices(ab4500_platform_devs,
- ARRAY_SIZE(ab4500_platform_devs));
-
- return err;
-
- not_detect:
- spi_unregister_driver(&ab4500_driver);
- kfree(ab4500);
- return err;
-}
-
-static int __devexit ab4500_remove(struct spi_device *spi)
-{
- struct ab4500 *ab4500 =
- spi_get_drvdata(spi);
-
- kfree(ab4500);
-
- return 0;
-}
-
-static struct spi_driver ab4500_driver = {
- .driver = {
- .name = "ab4500",
- .owner = THIS_MODULE,
- },
- .probe = ab4500_probe,
- .remove = __devexit_p(ab4500_remove)
-};
-
-static int __devinit ab4500_init(void)
-{
- return spi_register_driver(&ab4500_driver);
-}
-
-static void __exit ab4500_exit(void)
-{
- spi_unregister_driver(&ab4500_driver);
-}
-
-subsys_initcall(ab4500_init);
-module_exit(ab4500_exit);
-
-MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
-MODULE_DESCRIPTION("AB4500 core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
new file mode 100644
index 000000000000..f3d26fa9c34d
--- /dev/null
+++ b/drivers/mfd/ab8500-core.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ab8500.h>
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB8500_IT_SOURCE1_REG 0x0E00
+#define AB8500_IT_SOURCE2_REG 0x0E01
+#define AB8500_IT_SOURCE3_REG 0x0E02
+#define AB8500_IT_SOURCE4_REG 0x0E03
+#define AB8500_IT_SOURCE5_REG 0x0E04
+#define AB8500_IT_SOURCE6_REG 0x0E05
+#define AB8500_IT_SOURCE7_REG 0x0E06
+#define AB8500_IT_SOURCE8_REG 0x0E07
+#define AB8500_IT_SOURCE19_REG 0x0E12
+#define AB8500_IT_SOURCE20_REG 0x0E13
+#define AB8500_IT_SOURCE21_REG 0x0E14
+#define AB8500_IT_SOURCE22_REG 0x0E15
+#define AB8500_IT_SOURCE23_REG 0x0E16
+#define AB8500_IT_SOURCE24_REG 0x0E17
+
+/*
+ * latch registers
+ */
+#define AB8500_IT_LATCH1_REG 0x0E20
+#define AB8500_IT_LATCH2_REG 0x0E21
+#define AB8500_IT_LATCH3_REG 0x0E22
+#define AB8500_IT_LATCH4_REG 0x0E23
+#define AB8500_IT_LATCH5_REG 0x0E24
+#define AB8500_IT_LATCH6_REG 0x0E25
+#define AB8500_IT_LATCH7_REG 0x0E26
+#define AB8500_IT_LATCH8_REG 0x0E27
+#define AB8500_IT_LATCH9_REG 0x0E28
+#define AB8500_IT_LATCH10_REG 0x0E29
+#define AB8500_IT_LATCH19_REG 0x0E32
+#define AB8500_IT_LATCH20_REG 0x0E33
+#define AB8500_IT_LATCH21_REG 0x0E34
+#define AB8500_IT_LATCH22_REG 0x0E35
+#define AB8500_IT_LATCH23_REG 0x0E36
+#define AB8500_IT_LATCH24_REG 0x0E37
+
+/*
+ * mask registers
+ */
+
+#define AB8500_IT_MASK1_REG 0x0E40
+#define AB8500_IT_MASK2_REG 0x0E41
+#define AB8500_IT_MASK3_REG 0x0E42
+#define AB8500_IT_MASK4_REG 0x0E43
+#define AB8500_IT_MASK5_REG 0x0E44
+#define AB8500_IT_MASK6_REG 0x0E45
+#define AB8500_IT_MASK7_REG 0x0E46
+#define AB8500_IT_MASK8_REG 0x0E47
+#define AB8500_IT_MASK9_REG 0x0E48
+#define AB8500_IT_MASK10_REG 0x0E49
+#define AB8500_IT_MASK11_REG 0x0E4A
+#define AB8500_IT_MASK12_REG 0x0E4B
+#define AB8500_IT_MASK13_REG 0x0E4C
+#define AB8500_IT_MASK14_REG 0x0E4D
+#define AB8500_IT_MASK15_REG 0x0E4E
+#define AB8500_IT_MASK16_REG 0x0E4F
+#define AB8500_IT_MASK17_REG 0x0E50
+#define AB8500_IT_MASK18_REG 0x0E51
+#define AB8500_IT_MASK19_REG 0x0E52
+#define AB8500_IT_MASK20_REG 0x0E53
+#define AB8500_IT_MASK21_REG 0x0E54
+#define AB8500_IT_MASK22_REG 0x0E55
+#define AB8500_IT_MASK23_REG 0x0E56
+#define AB8500_IT_MASK24_REG 0x0E57
+
+#define AB8500_REV_REG 0x1080
+
+/*
+ * Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
+ * numbers are indexed into this array with (num / 8).
+ *
+ * This is one off from the register names, i.e. AB8500_IT_MASK1_REG is at
+ * offset 0.
+ */
+static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
+ 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21,
+};
+
+static int __ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ dev_vdbg(ab8500->dev, "wr: addr %#x <= %#x\n", addr, data);
+
+ ret = ab8500->write(ab8500, addr, data);
+ if (ret < 0)
+ dev_err(ab8500->dev, "failed to write reg %#x: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+/**
+ * ab8500_write() - write an AB8500 register
+ * @ab8500: device to write to
+ * @addr: address of the register
+ * @data: value to write
+ */
+int ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ mutex_lock(&ab8500->lock);
+ ret = __ab8500_write(ab8500, addr, data);
+ mutex_unlock(&ab8500->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ab8500_write);
+
+static int __ab8500_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+
+ ret = ab8500->read(ab8500, addr);
+ if (ret < 0)
+ dev_err(ab8500->dev, "failed to read reg %#x: %d\n",
+ addr, ret);
+
+ dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
+
+ return ret;
+}
+
+/**
+ * ab8500_read() - read an AB8500 register
+ * @ab8500: device to read from
+ * @addr: address of the register
+ */
+int ab8500_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+
+ mutex_lock(&ab8500->lock);
+ ret = __ab8500_read(ab8500, addr);
+ mutex_unlock(&ab8500->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ab8500_read);
+
+/**
+ * ab8500_set_bits() - set a bitfield in an AB8500 register
+ * @ab8500: device to read from
+ * @addr: address of the register
+ * @mask: mask of the bitfield to modify
+ * @data: value to set to the bitfield
+ */
+int ab8500_set_bits(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data)
+{
+ int ret;
+
+ mutex_lock(&ab8500->lock);
+
+ ret = __ab8500_read(ab8500, addr);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= data;
+
+ ret = __ab8500_write(ab8500, addr, ret);
+
+out:
+ mutex_unlock(&ab8500->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ab8500_set_bits);
+
+static void ab8500_irq_lock(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+
+ mutex_lock(&ab8500->irq_lock);
+}
+
+static void ab8500_irq_sync_unlock(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
+ u8 old = ab8500->oldmask[i];
+ u8 new = ab8500->mask[i];
+ int reg;
+
+ if (new == old)
+ continue;
+
+ ab8500->oldmask[i] = new;
+
+ reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
+ ab8500_write(ab8500, reg, new);
+ }
+
+ mutex_unlock(&ab8500->irq_lock);
+}
+
+static void ab8500_irq_mask(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ int offset = irq - ab8500->irq_base;
+ int index = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ ab8500->mask[index] |= mask;
+}
+
+static void ab8500_irq_unmask(unsigned int irq)
+{
+ struct ab8500 *ab8500 = get_irq_chip_data(irq);
+ int offset = irq - ab8500->irq_base;
+ int index = offset / 8;
+ int mask = 1 << (offset % 8);
+
+ ab8500->mask[index] &= ~mask;
+}
+
+static struct irq_chip ab8500_irq_chip = {
+ .name = "ab8500",
+ .bus_lock = ab8500_irq_lock,
+ .bus_sync_unlock = ab8500_irq_sync_unlock,
+ .mask = ab8500_irq_mask,
+ .unmask = ab8500_irq_unmask,
+};
+
+static irqreturn_t ab8500_irq(int irq, void *dev)
+{
+ struct ab8500 *ab8500 = dev;
+ int i;
+
+ dev_vdbg(ab8500->dev, "interrupt\n");
+
+ for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
+ int regoffset = ab8500_irq_regoffset[i];
+ int status;
+
+ status = ab8500_read(ab8500, AB8500_IT_LATCH1_REG + regoffset);
+ if (status <= 0)
+ continue;
+
+ do {
+ int bit = __ffs(status);
+ int line = i * 8 + bit;
+
+ handle_nested_irq(ab8500->irq_base + line);
+ status &= ~(1 << bit);
+ } while (status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ab8500_irq_init(struct ab8500 *ab8500)
+{
+ int base = ab8500->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + AB8500_NR_IRQS; irq++) {
+ set_irq_chip_data(irq, ab8500);
+ set_irq_chip_and_handler(irq, &ab8500_irq_chip,
+ handle_simple_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void ab8500_irq_remove(struct ab8500 *ab8500)
+{
+ int base = ab8500->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + AB8500_NR_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static struct resource ab8500_gpadc_resources[] = {
+ {
+ .name = "HW_CONV_END",
+ .start = AB8500_INT_GP_HW_ADC_CONV_END,
+ .end = AB8500_INT_GP_HW_ADC_CONV_END,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "SW_CONV_END",
+ .start = AB8500_INT_GP_SW_ADC_CONV_END,
+ .end = AB8500_INT_GP_SW_ADC_CONV_END,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource ab8500_rtc_resources[] = {
+ {
+ .name = "60S",
+ .start = AB8500_INT_RTC_60S,
+ .end = AB8500_INT_RTC_60S,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ALARM",
+ .start = AB8500_INT_RTC_ALARM,
+ .end = AB8500_INT_RTC_ALARM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell ab8500_devs[] = {
+ {
+ .name = "ab8500-gpadc",
+ .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
+ .resources = ab8500_gpadc_resources,
+ },
+ {
+ .name = "ab8500-rtc",
+ .num_resources = ARRAY_SIZE(ab8500_rtc_resources),
+ .resources = ab8500_rtc_resources,
+ },
+ { .name = "ab8500-charger", },
+ { .name = "ab8500-audio", },
+ { .name = "ab8500-usb", },
+ { .name = "ab8500-pwm", },
+};
+
+int __devinit ab8500_init(struct ab8500 *ab8500)
+{
+ struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
+ int ret;
+ int i;
+
+ if (plat)
+ ab8500->irq_base = plat->irq_base;
+
+ mutex_init(&ab8500->lock);
+ mutex_init(&ab8500->irq_lock);
+
+ ret = ab8500_read(ab8500, AB8500_REV_REG);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 0x0 - Early Drop
+ * 0x10 - Cut 1.0
+ * 0x11 - Cut 1.1
+ */
+ if (ret == 0x0 || ret == 0x10 || ret == 0x11) {
+ ab8500->revision = ret;
+ dev_info(ab8500->dev, "detected chip, revision: %#x\n", ret);
+ } else {
+ dev_err(ab8500->dev, "unknown chip, revision: %#x\n", ret);
+ return -EINVAL;
+ }
+
+ if (plat && plat->init)
+ plat->init(ab8500);
+
+ /* Clear and mask all interrupts */
+ for (i = 0; i < 10; i++) {
+ ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
+ ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
+ }
+
+ for (i = 18; i < 24; i++) {
+ ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
+ ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
+ }
+
+ for (i = 0; i < AB8500_NUM_IRQ_REGS; i++)
+ ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
+
+ if (ab8500->irq_base) {
+ ret = ab8500_irq_init(ab8500);
+ if (ret)
+ return ret;
+
+ ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
+ IRQF_ONESHOT, "ab8500", ab8500);
+ if (ret)
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(ab8500->dev, -1, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
+
+ return ret;
+
+out_freeirq:
+ if (ab8500->irq_base) {
+ free_irq(ab8500->irq, ab8500);
+out_removeirq:
+ ab8500_irq_remove(ab8500);
+ }
+ return ret;
+}
+
+int __devexit ab8500_exit(struct ab8500 *ab8500)
+{
+ mfd_remove_devices(ab8500->dev);
+ if (ab8500->irq_base) {
+ free_irq(ab8500->irq, ab8500);
+ ab8500_irq_remove(ab8500);
+ }
+
+ return 0;
+}
+
+MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent");
+MODULE_DESCRIPTION("AB8500 MFD core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
new file mode 100644
index 000000000000..b81d4f768ef6
--- /dev/null
+++ b/drivers/mfd/ab8500-spi.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/ab8500.h>
+
+/*
+ * This funtion writes to any AB8500 registers using
+ * SPI protocol & before it writes it packs the data
+ * in the below 24 bit frame format
+ *
+ * *|------------------------------------|
+ * *| 23|22...18|17.......10|9|8|7......0|
+ * *| r/w bank adr data |
+ * * ------------------------------------
+ *
+ * This function shouldn't be called from interrupt
+ * context
+ */
+static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
+ dev);
+ unsigned long spi_data = addr << 10 | data;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+
+ ab8500->tx_buf[0] = spi_data;
+ ab8500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab8500->tx_buf;
+ xfer.rx_buf = NULL;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
+{
+ struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
+ dev);
+ unsigned long spi_data = 1 << 23 | addr << 10;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int ret;
+
+ ab8500->tx_buf[0] = spi_data;
+ ab8500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab8500->tx_buf;
+ xfer.rx_buf = ab8500->rx_buf;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (!ret)
+ ret = ab8500->rx_buf[0];
+
+ return ret;
+}
+
+static int __devinit ab8500_spi_probe(struct spi_device *spi)
+{
+ struct ab8500 *ab8500;
+ int ret;
+
+ ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ if (!ab8500)
+ return -ENOMEM;
+
+ ab8500->dev = &spi->dev;
+ ab8500->irq = spi->irq;
+
+ ab8500->read = ab8500_spi_read;
+ ab8500->write = ab8500_spi_write;
+
+ spi_set_drvdata(spi, ab8500);
+
+ ret = ab8500_init(ab8500);
+ if (ret)
+ kfree(ab8500);
+
+ return ret;
+}
+
+static int __devexit ab8500_spi_remove(struct spi_device *spi)
+{
+ struct ab8500 *ab8500 = spi_get_drvdata(spi);
+
+ ab8500_exit(ab8500);
+ kfree(ab8500);
+
+ return 0;
+}
+
+static struct spi_driver ab8500_spi_driver = {
+ .driver = {
+ .name = "ab8500",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_spi_probe,
+ .remove = __devexit_p(ab8500_spi_remove)
+};
+
+static int __init ab8500_spi_init(void)
+{
+ return spi_register_driver(&ab8500_spi_driver);
+}
+subsys_initcall(ab8500_spi_init);
+
+static void __exit ab8500_spi_exit(void)
+{
+ spi_unregister_driver(&ab8500_spi_driver);
+}
+module_exit(ab8500_spi_exit);
+
+MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
+MODULE_DESCRIPTION("AB8500 SPI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
new file mode 100644
index 000000000000..3b3b97ec32a7
--- /dev/null
+++ b/drivers/mfd/abx500-core.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2007-2010 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Register access functions for the ABX500 Mixed Signal IC family.
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/mfd/abx500.h>
+
+static LIST_HEAD(abx500_list);
+
+struct abx500_device_entry {
+ struct list_head list;
+ struct abx500_ops ops;
+ struct device *dev;
+};
+
+static void lookup_ops(struct device *dev, struct abx500_ops **ops)
+{
+ struct abx500_device_entry *dev_entry;
+
+ *ops = NULL;
+ list_for_each_entry(dev_entry, &abx500_list, list) {
+ if (dev_entry->dev == dev) {
+ *ops = &dev_entry->ops;
+ return;
+ }
+ }
+}
+
+int abx500_register_ops(struct device *dev, struct abx500_ops *ops)
+{
+ struct abx500_device_entry *dev_entry;
+
+ dev_entry = kzalloc(sizeof(struct abx500_device_entry), GFP_KERNEL);
+ if (IS_ERR(dev_entry)) {
+ dev_err(dev, "register_ops kzalloc failed");
+ return -ENOMEM;
+ }
+ dev_entry->dev = dev;
+ memcpy(&dev_entry->ops, ops, sizeof(struct abx500_ops));
+
+ list_add_tail(&dev_entry->list, &abx500_list);
+ return 0;
+}
+EXPORT_SYMBOL(abx500_register_ops);
+
+void abx500_remove_ops(struct device *dev)
+{
+ struct abx500_device_entry *dev_entry, *tmp;
+
+ list_for_each_entry_safe(dev_entry, tmp, &abx500_list, list)
+ {
+ if (dev_entry->dev == dev) {
+ list_del(&dev_entry->list);
+ kfree(dev_entry);
+ }
+ }
+}
+EXPORT_SYMBOL(abx500_remove_ops);
+
+int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->set_register != NULL))
+ return ops->set_register(dev, bank, reg, value);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_set_register_interruptible);
+
+int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register != NULL))
+ return ops->get_register(dev, bank, reg, value);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_register_interruptible);
+
+int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_register_page != NULL))
+ return ops->get_register_page(dev, bank,
+ first_reg, regvals, numregs);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_register_page_interruptible);
+
+int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->mask_and_set_register != NULL))
+ return ops->mask_and_set_register(dev, bank,
+ reg, bitmask, bitvalues);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_mask_and_set_register_interruptible);
+
+int abx500_get_chip_id(struct device *dev)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->get_chip_id != NULL))
+ return ops->get_chip_id(dev);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_get_chip_id);
+
+int abx500_event_registers_startup_state_get(struct device *dev, u8 *event)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->event_registers_startup_state_get != NULL))
+ return ops->event_registers_startup_state_get(dev, event);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_event_registers_startup_state_get);
+
+int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
+{
+ struct abx500_ops *ops;
+
+ lookup_ops(dev->parent, &ops);
+ if ((ops != NULL) && (ops->startup_irq_enabled != NULL))
+ return ops->startup_irq_enabled(dev, irq);
+ else
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(abx500_startup_irq_enabled);
+
+MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
+MODULE_DESCRIPTION("ABX500 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 67181b147ab3..3ad915d0589c 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -544,6 +544,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
new file mode 100644
index 000000000000..3e75f02e4778
--- /dev/null
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -0,0 +1,190 @@
+/*
+ * DaVinci Voice Codec Core Interface for TI platforms
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <sound/pcm.h>
+
+#include <linux/mfd/davinci_voicecodec.h>
+
+u32 davinci_vc_read(struct davinci_vc *davinci_vc, int reg)
+{
+ return __raw_readl(davinci_vc->base + reg);
+}
+
+void davinci_vc_write(struct davinci_vc *davinci_vc,
+ int reg, u32 val)
+{
+ __raw_writel(val, davinci_vc->base + reg);
+}
+
+static int __init davinci_vc_probe(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc;
+ struct resource *res, *mem;
+ struct mfd_cell *cell = NULL;
+ int ret;
+
+ davinci_vc = kzalloc(sizeof(struct davinci_vc), GFP_KERNEL);
+ if (!davinci_vc) {
+ dev_dbg(&pdev->dev,
+ "could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ davinci_vc->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(davinci_vc->clk)) {
+ dev_dbg(&pdev->dev,
+ "could not get the clock for voice codec\n");
+ ret = -ENODEV;
+ goto fail1;
+ }
+ clk_enable(davinci_vc->clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource\n");
+ ret = -ENODEV;
+ goto fail2;
+ }
+
+ davinci_vc->pbase = res->start;
+ davinci_vc->base_size = resource_size(res);
+
+ mem = request_mem_region(davinci_vc->pbase, davinci_vc->base_size,
+ pdev->name);
+ if (!mem) {
+ dev_err(&pdev->dev, "VCIF region already claimed\n");
+ ret = -EBUSY;
+ goto fail2;
+ }
+
+ davinci_vc->base = ioremap(davinci_vc->pbase, davinci_vc->base_size);
+ if (!davinci_vc->base) {
+ dev_err(&pdev->dev, "can't ioremap mem resource.\n");
+ ret = -ENOMEM;
+ goto fail3;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ return -ENXIO;
+ }
+
+ davinci_vc->davinci_vcif.dma_tx_channel = res->start;
+ davinci_vc->davinci_vcif.dma_tx_addr =
+ (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_WFIFO);
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ return -ENXIO;
+ }
+
+ davinci_vc->davinci_vcif.dma_rx_channel = res->start;
+ davinci_vc->davinci_vcif.dma_rx_addr =
+ (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_RFIFO);
+
+ davinci_vc->dev = &pdev->dev;
+ davinci_vc->pdev = pdev;
+
+ /* Voice codec interface client */
+ cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
+ cell->name = "davinci_vcif";
+ cell->driver_data = davinci_vc;
+
+ /* Voice codec CQ93VC client */
+ cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
+ cell->name = "cq93vc";
+ cell->driver_data = davinci_vc;
+
+ ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
+ DAVINCI_VC_CELLS, NULL, 0);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "fail to register client devices\n");
+ goto fail4;
+ }
+
+ return 0;
+
+fail4:
+ iounmap(davinci_vc->base);
+fail3:
+ release_mem_region(davinci_vc->pbase, davinci_vc->base_size);
+fail2:
+ clk_disable(davinci_vc->clk);
+ clk_put(davinci_vc->clk);
+ davinci_vc->clk = NULL;
+fail1:
+ kfree(davinci_vc);
+
+ return ret;
+}
+
+static int __devexit davinci_vc_remove(struct platform_device *pdev)
+{
+ struct davinci_vc *davinci_vc = platform_get_drvdata(pdev);
+
+ mfd_remove_devices(&pdev->dev);
+
+ iounmap(davinci_vc->base);
+ release_mem_region(davinci_vc->pbase, davinci_vc->base_size);
+
+ clk_disable(davinci_vc->clk);
+ clk_put(davinci_vc->clk);
+ davinci_vc->clk = NULL;
+
+ kfree(davinci_vc);
+
+ return 0;
+}
+
+static struct platform_driver davinci_vc_driver = {
+ .driver = {
+ .name = "davinci_voicecodec",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(davinci_vc_remove),
+};
+
+static int __init davinci_vc_init(void)
+{
+ return platform_driver_probe(&davinci_vc_driver, davinci_vc_probe);
+}
+module_init(davinci_vc_init);
+
+static void __exit davinci_vc_exit(void)
+{
+ platform_driver_unregister(&davinci_vc_driver);
+}
+module_exit(davinci_vc_exit);
+
+MODULE_AUTHOR("Miguel Aguilar");
+MODULE_DESCRIPTION("Texas Instruments DaVinci Voice Codec Core Interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
new file mode 100644
index 000000000000..9ed630799acc
--- /dev/null
+++ b/drivers/mfd/janz-cmodio.c
@@ -0,0 +1,304 @@
+/*
+ * Janz CMOD-IO MODULbus Carrier Board PCI Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * Lots of inspiration and code was copied from drivers/mfd/sm501.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+
+#include <linux/mfd/janz.h>
+
+#define DRV_NAME "janz-cmodio"
+
+/* Size of each MODULbus module in PCI BAR4 */
+#define CMODIO_MODULBUS_SIZE 0x200
+
+/* Maximum number of MODULbus modules on a CMOD-IO carrier board */
+#define CMODIO_MAX_MODULES 4
+
+/* Module Parameters */
+static unsigned int num_modules = CMODIO_MAX_MODULES;
+static unsigned char *modules[CMODIO_MAX_MODULES] = {
+ "empty", "empty", "empty", "empty",
+};
+
+module_param_array(modules, charp, &num_modules, S_IRUGO);
+MODULE_PARM_DESC(modules, "MODULbus modules attached to the carrier board");
+
+/* Unique Device Id */
+static unsigned int cmodio_id;
+
+struct cmodio_device {
+ /* Parent PCI device */
+ struct pci_dev *pdev;
+
+ /* PLX control registers */
+ struct janz_cmodio_onboard_regs __iomem *ctrl;
+
+ /* hex switch position */
+ u8 hex;
+
+ /* mfd-core API */
+ struct mfd_cell cells[CMODIO_MAX_MODULES];
+ struct resource resources[3 * CMODIO_MAX_MODULES];
+ struct janz_platform_data pdata[CMODIO_MAX_MODULES];
+};
+
+/*
+ * Subdevices using the mfd-core API
+ */
+
+static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
+ char *name, unsigned int devno,
+ unsigned int modno)
+{
+ struct janz_platform_data *pdata;
+ struct mfd_cell *cell;
+ struct resource *res;
+ struct pci_dev *pci;
+
+ pci = priv->pdev;
+ cell = &priv->cells[devno];
+ res = &priv->resources[devno * 3];
+ pdata = &priv->pdata[devno];
+
+ cell->name = name;
+ cell->resources = res;
+ cell->num_resources = 3;
+
+ /* Setup the subdevice ID -- must be unique */
+ cell->id = cmodio_id++;
+
+ /* Add platform data */
+ pdata->modno = modno;
+ cell->platform_data = pdata;
+ cell->data_size = sizeof(*pdata);
+
+ /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
+ res->flags = IORESOURCE_MEM;
+ res->parent = &pci->resource[3];
+ res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno);
+ res->end = res->start + CMODIO_MODULBUS_SIZE - 1;
+ res++;
+
+ /* PLX Control Registers -- PCI BAR4 is interrupt and other registers */
+ res->flags = IORESOURCE_MEM;
+ res->parent = &pci->resource[4];
+ res->start = pci->resource[4].start;
+ res->end = pci->resource[4].end;
+ res++;
+
+ /*
+ * IRQ
+ *
+ * The start and end fields are used as an offset to the irq_base
+ * parameter passed into the mfd_add_devices() function call. All
+ * devices share the same IRQ.
+ */
+ res->flags = IORESOURCE_IRQ;
+ res->parent = NULL;
+ res->start = 0;
+ res->end = 0;
+ res++;
+
+ return 0;
+}
+
+/* Probe each submodule using kernel parameters */
+static int __devinit cmodio_probe_submodules(struct cmodio_device *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ unsigned int num_probed = 0;
+ char *name;
+ int i;
+
+ for (i = 0; i < num_modules; i++) {
+ name = modules[i];
+ if (!strcmp(name, "") || !strcmp(name, "empty"))
+ continue;
+
+ dev_dbg(&priv->pdev->dev, "MODULbus %d: name %s\n", i, name);
+ cmodio_setup_subdevice(priv, name, num_probed, i);
+ num_probed++;
+ }
+
+ /* print an error message if no modules were probed */
+ if (num_probed == 0) {
+ dev_err(&priv->pdev->dev, "no MODULbus modules specified, "
+ "please set the ``modules'' kernel "
+ "parameter according to your "
+ "hardware configuration\n");
+ return -ENODEV;
+ }
+
+ return mfd_add_devices(&pdev->dev, 0, priv->cells,
+ num_probed, NULL, pdev->irq);
+}
+
+/*
+ * SYSFS Attributes
+ */
+
+static ssize_t mbus_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cmodio_device *priv = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex);
+}
+
+static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL);
+
+static struct attribute *cmodio_sysfs_attrs[] = {
+ &dev_attr_modulbus_number.attr,
+ NULL,
+};
+
+static const struct attribute_group cmodio_sysfs_attr_group = {
+ .attrs = cmodio_sysfs_attrs,
+};
+
+/*
+ * PCI Driver
+ */
+
+static int __devinit cmodio_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct cmodio_device *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&dev->dev, "unable to allocate private data\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ pci_set_drvdata(dev, priv);
+ priv->pdev = dev;
+
+ /* Hardware Initialization */
+ ret = pci_enable_device(dev);
+ if (ret) {
+ dev_err(&dev->dev, "unable to enable device\n");
+ goto out_free_priv;
+ }
+
+ pci_set_master(dev);
+ ret = pci_request_regions(dev, DRV_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "unable to request regions\n");
+ goto out_pci_disable_device;
+ }
+
+ /* Onboard configuration registers */
+ priv->ctrl = pci_ioremap_bar(dev, 4);
+ if (!priv->ctrl) {
+ dev_err(&dev->dev, "unable to remap onboard regs\n");
+ ret = -ENOMEM;
+ goto out_pci_release_regions;
+ }
+
+ /* Read the hex switch on the carrier board */
+ priv->hex = ioread8(&priv->ctrl->int_enable);
+
+ /* Add the MODULbus number (hex switch value) to the device's sysfs */
+ ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+ if (ret) {
+ dev_err(&dev->dev, "unable to create sysfs attributes\n");
+ goto out_unmap_ctrl;
+ }
+
+ /*
+ * Disable all interrupt lines, each submodule will enable its
+ * own interrupt line if needed
+ */
+ iowrite8(0xf, &priv->ctrl->int_disable);
+
+ /* Register drivers for all submodules */
+ ret = cmodio_probe_submodules(priv);
+ if (ret) {
+ dev_err(&dev->dev, "unable to probe submodules\n");
+ goto out_sysfs_remove_group;
+ }
+
+ return 0;
+
+out_sysfs_remove_group:
+ sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+out_unmap_ctrl:
+ iounmap(priv->ctrl);
+out_pci_release_regions:
+ pci_release_regions(dev);
+out_pci_disable_device:
+ pci_disable_device(dev);
+out_free_priv:
+ kfree(priv);
+out_return:
+ return ret;
+}
+
+static void __devexit cmodio_pci_remove(struct pci_dev *dev)
+{
+ struct cmodio_device *priv = pci_get_drvdata(dev);
+
+ mfd_remove_devices(&dev->dev);
+ sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
+ iounmap(priv->ctrl);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ kfree(priv);
+}
+
+#define PCI_VENDOR_ID_JANZ 0x13c3
+
+/* The list of devices that this module will support */
+static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = {
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
+
+static struct pci_driver cmodio_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cmodio_pci_ids,
+ .probe = cmodio_pci_probe,
+ .remove = __devexit_p(cmodio_pci_remove),
+};
+
+/*
+ * Module Init / Exit
+ */
+
+static int __init cmodio_init(void)
+{
+ return pci_register_driver(&cmodio_pci_driver);
+}
+
+static void __exit cmodio_exit(void)
+{
+ pci_unregister_driver(&cmodio_pci_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
+MODULE_LICENSE("GPL");
+
+module_init(cmodio_init);
+module_exit(cmodio_exit);
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 85d63c04749b..f621bcea3d02 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -508,7 +508,7 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2);
max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ);
max8925_reg_read(chip->adc, MAX8925_TSC_IRQ);
- /* mask all interrupts */
+ /* mask all interrupts except for TSC */
max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0);
max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0);
max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff);
@@ -516,7 +516,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff);
max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff);
max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff);
- max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0xff);
mutex_init(&chip->irq_lock);
chip->core_irq = irq;
@@ -547,7 +546,11 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret);
chip->core_irq = 0;
}
+
tsc_irq:
+ /* mask TSC interrupt */
+ max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f);
+
if (!pdata->tsc_irq) {
dev_warn(chip->dev, "No interrupt support on TSC IRQ\n");
return 0;
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index d9fd8785da4d..e73f3f5252a8 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -173,8 +173,6 @@ static int __devexit max8925_remove(struct i2c_client *client)
max8925_device_exit(chip);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
- i2c_set_clientdata(chip->adc, NULL);
- i2c_set_clientdata(chip->rtc, NULL);
i2c_set_clientdata(chip->i2c, NULL);
kfree(chip);
return 0;
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index 1f68ecadddc2..fecf38a4f025 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -679,6 +679,10 @@ err_revision:
if (pdata->flags & MC13783_USE_TOUCHSCREEN)
mc13783_add_subdevice(mc13783, "mc13783-ts");
+ if (pdata->flags & MC13783_USE_LED)
+ mc13783_add_subdevice_pdata(mc13783, "mc13783-led",
+ pdata->leds, sizeof(*pdata->leds));
+
return 0;
}
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index a94b131a18ef..721948be12c7 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1228,6 +1228,7 @@ fail2:
free_irq(client->irq, menelaus);
flush_scheduled_work();
fail1:
+ i2c_set_clientdata(client, NULL);
kfree(menelaus);
return err;
}
@@ -1237,8 +1238,8 @@ static int __exit menelaus_remove(struct i2c_client *client)
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
free_irq(client->irq, menelaus);
- kfree(menelaus);
i2c_set_clientdata(client, NULL);
+ kfree(menelaus);
the_menelaus = NULL;
return 0;
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 8ffbb7a85a7e..7dd76bceaae8 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -48,7 +48,7 @@ static int mfd_add_device(struct device *parent, int id,
res[r].flags = cell->resources[r].flags;
/* Find out base to use */
- if (cell->resources[r].flags & IORESOURCE_MEM) {
+ if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) {
res[r].parent = mem_base;
res[r].start = mem_base->start +
cell->resources[r].start;
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index fe8f922f6654..aed0d2a9b032 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -30,13 +30,13 @@
struct pcf50633_adc_request {
int mux;
int avg;
- int result;
void (*callback)(struct pcf50633 *, void *, int);
void *callback_param;
+};
- /* Used in case of sync requests */
+struct pcf50633_adc_sync_request {
+ int result;
struct completion completion;
-
};
#define PCF50633_MAX_ADC_FIFO_DEPTH 8
@@ -109,10 +109,10 @@ adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
return 0;
}
-static void
-pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result)
+static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param,
+ int result)
{
- struct pcf50633_adc_request *req = param;
+ struct pcf50633_adc_sync_request *req = param;
req->result = result;
complete(&req->completion);
@@ -120,28 +120,19 @@ pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result)
int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
{
- struct pcf50633_adc_request *req;
- int err;
+ struct pcf50633_adc_sync_request req;
+ int ret;
- /* req is freed when the result is ready, in interrupt handler */
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- req->mux = mux;
- req->avg = avg;
- req->callback = pcf50633_adc_sync_read_callback;
- req->callback_param = req;
+ init_completion(&req.completion);
- init_completion(&req->completion);
- err = adc_enqueue_request(pcf, req);
- if (err)
- return err;
+ ret = pcf50633_adc_async_read(pcf, mux, avg,
+ pcf50633_adc_sync_read_callback, &req);
+ if (ret)
+ return ret;
- wait_for_completion(&req->completion);
+ wait_for_completion(&req.completion);
- /* FIXME by this time req might be already freed */
- return req->result;
+ return req.result;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 63a614d696c1..704736e6e9b9 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -21,16 +21,16 @@
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/mfd/pcf50633/core.h>
-/* Two MBCS registers used during cold start */
-#define PCF50633_REG_MBCS1 0x4b
-#define PCF50633_REG_MBCS2 0x4c
-#define PCF50633_MBCS1_USBPRES 0x01
-#define PCF50633_MBCS1_ADAPTPRES 0x01
+int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
+void pcf50633_irq_free(struct pcf50633 *pcf);
+#ifdef CONFIG_PM
+int pcf50633_irq_suspend(struct pcf50633 *pcf);
+int pcf50633_irq_resume(struct pcf50633 *pcf);
+#endif
static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
{
@@ -215,244 +215,6 @@ static struct attribute_group pcf_attr_group = {
.attrs = pcf_sysfs_entries,
};
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data)
-{
- if (irq < 0 || irq > PCF50633_NUM_IRQ || !handler)
- return -EINVAL;
-
- if (WARN_ON(pcf->irq_handler[irq].handler))
- return -EBUSY;
-
- mutex_lock(&pcf->lock);
- pcf->irq_handler[irq].handler = handler;
- pcf->irq_handler[irq].data = data;
- mutex_unlock(&pcf->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcf50633_register_irq);
-
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
-{
- if (irq < 0 || irq > PCF50633_NUM_IRQ)
- return -EINVAL;
-
- mutex_lock(&pcf->lock);
- pcf->irq_handler[irq].handler = NULL;
- mutex_unlock(&pcf->lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pcf50633_free_irq);
-
-static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
-{
- u8 reg, bits, tmp;
- int ret = 0, idx;
-
- idx = irq >> 3;
- reg = PCF50633_REG_INT1M + idx;
- bits = 1 << (irq & 0x07);
-
- mutex_lock(&pcf->lock);
-
- if (mask) {
- ret = __pcf50633_read(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- tmp |= bits;
-
- ret = __pcf50633_write(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- pcf->mask_regs[idx] &= ~bits;
- pcf->mask_regs[idx] |= bits;
- } else {
- ret = __pcf50633_read(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- tmp &= ~bits;
-
- ret = __pcf50633_write(pcf, reg, 1, &tmp);
- if (ret < 0)
- goto out;
-
- pcf->mask_regs[idx] &= ~bits;
- }
-out:
- mutex_unlock(&pcf->lock);
-
- return ret;
-}
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
-{
- dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
-
- return __pcf50633_irq_mask_set(pcf, irq, 1);
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
-
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
-{
- dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
-
- return __pcf50633_irq_mask_set(pcf, irq, 0);
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
-
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
-{
- u8 reg, bits;
-
- reg = irq >> 3;
- bits = 1 << (irq & 0x07);
-
- return pcf->mask_regs[reg] & bits;
-}
-EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
-
-static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
-{
- if (pcf->irq_handler[irq].handler)
- pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
-}
-
-/* Maximum amount of time ONKEY is held before emergency action is taken */
-#define PCF50633_ONKEY1S_TIMEOUT 8
-
-static void pcf50633_irq_worker(struct work_struct *work)
-{
- struct pcf50633 *pcf;
- int ret, i, j;
- u8 pcf_int[5], chgstat;
-
- pcf = container_of(work, struct pcf50633, irq_work);
-
- /* Read the 5 INT regs in one transaction */
- ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
- ARRAY_SIZE(pcf_int), pcf_int);
- if (ret != ARRAY_SIZE(pcf_int)) {
- dev_err(pcf->dev, "Error reading INT registers\n");
-
- /*
- * If this doesn't ACK the interrupt to the chip, we'll be
- * called once again as we're level triggered.
- */
- goto out;
- }
-
- /* defeat 8s death from lowsys on A5 */
- pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
-
- /* We immediately read the usb and adapter status. We thus make sure
- * only of USBINS/USBREM IRQ handlers are called */
- if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
- chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
- if (chgstat & (0x3 << 4))
- pcf_int[0] &= ~(1 << PCF50633_INT1_USBREM);
- else
- pcf_int[0] &= ~(1 << PCF50633_INT1_USBINS);
- }
-
- /* Make sure only one of ADPINS or ADPREM is set */
- if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
- chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
- if (chgstat & (0x3 << 4))
- pcf_int[0] &= ~(1 << PCF50633_INT1_ADPREM);
- else
- pcf_int[0] &= ~(1 << PCF50633_INT1_ADPINS);
- }
-
- dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
- "INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
- pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
-
- /* Some revisions of the chip don't have a 8s standby mode on
- * ONKEY1S press. We try to manually do it in such cases. */
- if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
- dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
- pcf->onkey1s_held);
- if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
- if (pcf->pdata->force_shutdown)
- pcf->pdata->force_shutdown(pcf);
- }
-
- if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
- dev_info(pcf->dev, "ONKEY1S held\n");
- pcf->onkey1s_held = 1 ;
-
- /* Unmask IRQ_SECOND */
- pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
- PCF50633_INT1_SECOND);
-
- /* Unmask IRQ_ONKEYR */
- pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
- PCF50633_INT2_ONKEYR);
- }
-
- if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
- pcf->onkey1s_held = 0;
-
- /* Mask SECOND and ONKEYR interrupts */
- if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
- pcf50633_reg_set_bit_mask(pcf,
- PCF50633_REG_INT1M,
- PCF50633_INT1_SECOND,
- PCF50633_INT1_SECOND);
-
- if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
- pcf50633_reg_set_bit_mask(pcf,
- PCF50633_REG_INT2M,
- PCF50633_INT2_ONKEYR,
- PCF50633_INT2_ONKEYR);
- }
-
- /* Have we just resumed ? */
- if (pcf->is_suspended) {
- pcf->is_suspended = 0;
-
- /* Set the resume reason filtering out non resumers */
- for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
- pcf->resume_reason[i] = pcf_int[i] &
- pcf->pdata->resumers[i];
-
- /* Make sure we don't pass on any ONKEY events to
- * userspace now */
- pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
- }
-
- for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
- /* Unset masked interrupts */
- pcf_int[i] &= ~pcf->mask_regs[i];
-
- for (j = 0; j < 8 ; j++)
- if (pcf_int[i] & (1 << j))
- pcf50633_irq_call_handler(pcf, (i * 8) + j);
- }
-
-out:
- put_device(pcf->dev);
- enable_irq(pcf->irq);
-}
-
-static irqreturn_t pcf50633_irq(int irq, void *data)
-{
- struct pcf50633 *pcf = data;
-
- dev_dbg(pcf->dev, "pcf50633_irq\n");
-
- get_device(pcf->dev);
- disable_irq_nosync(pcf->irq);
- queue_work(pcf->work_queue, &pcf->irq_work);
-
- return IRQ_HANDLED;
-}
-
static void
pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
struct platform_device **pdev)
@@ -479,70 +241,17 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
{
struct pcf50633 *pcf;
- int ret = 0, i;
- u8 res[5];
-
pcf = i2c_get_clientdata(client);
- /* Make sure our interrupt handlers are not called
- * henceforth */
- disable_irq(pcf->irq);
-
- /* Make sure that any running IRQ worker has quit */
- cancel_work_sync(&pcf->irq_work);
-
- /* Save the masks */
- ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(pcf->suspend_irq_masks),
- pcf->suspend_irq_masks);
- if (ret < 0) {
- dev_err(pcf->dev, "error saving irq masks\n");
- goto out;
- }
-
- /* Write wakeup irq masks */
- for (i = 0; i < ARRAY_SIZE(res); i++)
- res[i] = ~pcf->pdata->resumers[i];
-
- ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(res), &res[0]);
- if (ret < 0) {
- dev_err(pcf->dev, "error writing wakeup irq masks\n");
- goto out;
- }
-
- pcf->is_suspended = 1;
-
-out:
- return ret;
+ return pcf50633_irq_suspend(pcf);
}
static int pcf50633_resume(struct i2c_client *client)
{
struct pcf50633 *pcf;
- int ret;
-
pcf = i2c_get_clientdata(client);
- /* Write the saved mask registers */
- ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
- ARRAY_SIZE(pcf->suspend_irq_masks),
- pcf->suspend_irq_masks);
- if (ret < 0)
- dev_err(pcf->dev, "Error restoring saved suspend masks\n");
-
- /* Restore regulators' state */
-
-
- get_device(pcf->dev);
-
- /*
- * Clear any pending interrupts and set resume reason if any.
- * This will leave with enable_irq()
- */
- pcf50633_irq_worker(&pcf->irq_work);
-
- return 0;
+ return pcf50633_irq_resume(pcf);
}
#else
#define pcf50633_suspend NULL
@@ -573,43 +282,19 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
i2c_set_clientdata(client, pcf);
pcf->dev = &client->dev;
pcf->i2c_client = client;
- pcf->irq = client->irq;
- pcf->work_queue = create_singlethread_workqueue("pcf50633");
-
- if (!pcf->work_queue) {
- dev_err(&client->dev, "Failed to alloc workqueue\n");
- ret = -ENOMEM;
- goto err_free;
- }
-
- INIT_WORK(&pcf->irq_work, pcf50633_irq_worker);
version = pcf50633_reg_read(pcf, 0);
variant = pcf50633_reg_read(pcf, 1);
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err_destroy_workqueue;
+ goto err_free;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
version, variant);
- /* Enable all interrupts except RTC SECOND */
- pcf->mask_regs[0] = 0x80;
- pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
- pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
- pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
-
- ret = request_irq(client->irq, pcf50633_irq,
- IRQF_TRIGGER_LOW, "pcf50633", pcf);
-
- if (ret) {
- dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
- goto err_destroy_workqueue;
- }
+ pcf50633_irq_init(pcf, client->irq);
/* Create sub devices */
pcf50633_client_dev_register(pcf, "pcf50633-input",
@@ -620,6 +305,9 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
&pcf->mbc_pdev);
pcf50633_client_dev_register(pcf, "pcf50633-adc",
&pcf->adc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-backlight",
+ &pcf->bl_pdev);
+
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
struct platform_device *pdev;
@@ -638,10 +326,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
platform_device_add(pdev);
}
- if (enable_irq_wake(client->irq) < 0)
- dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
- "in this hardware revision", client->irq);
-
ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group);
if (ret)
dev_err(pcf->dev, "error creating sysfs entries\n");
@@ -651,8 +335,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return 0;
-err_destroy_workqueue:
- destroy_workqueue(pcf->work_queue);
err_free:
i2c_set_clientdata(client, NULL);
kfree(pcf);
@@ -665,8 +347,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
struct pcf50633 *pcf = i2c_get_clientdata(client);
int i;
- free_irq(pcf->irq, pcf);
- destroy_workqueue(pcf->work_queue);
+ pcf50633_irq_free(pcf);
platform_device_unregister(pcf->input_pdev);
platform_device_unregister(pcf->rtc_pdev);
@@ -676,6 +357,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
+ i2c_set_clientdata(client, NULL);
kfree(pcf);
return 0;
diff --git a/drivers/mfd/pcf50633-irq.c b/drivers/mfd/pcf50633-irq.c
new file mode 100644
index 000000000000..1b0192f1efff
--- /dev/null
+++ b/drivers/mfd/pcf50633-irq.c
@@ -0,0 +1,318 @@
+/* NXP PCF50633 Power Management Unit (PMU) driver
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ * Balaji Rao <balajirrao@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/pcf50633/core.h>
+
+/* Two MBCS registers used during cold start */
+#define PCF50633_REG_MBCS1 0x4b
+#define PCF50633_REG_MBCS2 0x4c
+#define PCF50633_MBCS1_USBPRES 0x01
+#define PCF50633_MBCS1_ADAPTPRES 0x01
+
+int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
+ void (*handler) (int, void *), void *data)
+{
+ if (irq < 0 || irq >= PCF50633_NUM_IRQ || !handler)
+ return -EINVAL;
+
+ if (WARN_ON(pcf->irq_handler[irq].handler))
+ return -EBUSY;
+
+ mutex_lock(&pcf->lock);
+ pcf->irq_handler[irq].handler = handler;
+ pcf->irq_handler[irq].data = data;
+ mutex_unlock(&pcf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcf50633_register_irq);
+
+int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
+{
+ if (irq < 0 || irq >= PCF50633_NUM_IRQ)
+ return -EINVAL;
+
+ mutex_lock(&pcf->lock);
+ pcf->irq_handler[irq].handler = NULL;
+ mutex_unlock(&pcf->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcf50633_free_irq);
+
+static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
+{
+ u8 reg, bit;
+ int ret = 0, idx;
+
+ idx = irq >> 3;
+ reg = PCF50633_REG_INT1M + idx;
+ bit = 1 << (irq & 0x07);
+
+ pcf50633_reg_set_bit_mask(pcf, reg, bit, mask ? bit : 0);
+
+ mutex_lock(&pcf->lock);
+
+ if (mask)
+ pcf->mask_regs[idx] |= bit;
+ else
+ pcf->mask_regs[idx] &= ~bit;
+
+ mutex_unlock(&pcf->lock);
+
+ return ret;
+}
+
+int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
+{
+ dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
+
+ return __pcf50633_irq_mask_set(pcf, irq, 1);
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
+
+int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
+{
+ dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
+
+ return __pcf50633_irq_mask_set(pcf, irq, 0);
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
+
+int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
+{
+ u8 reg, bits;
+
+ reg = irq >> 3;
+ bits = 1 << (irq & 0x07);
+
+ return pcf->mask_regs[reg] & bits;
+}
+EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
+
+static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
+{
+ if (pcf->irq_handler[irq].handler)
+ pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
+}
+
+/* Maximum amount of time ONKEY is held before emergency action is taken */
+#define PCF50633_ONKEY1S_TIMEOUT 8
+
+static irqreturn_t pcf50633_irq(int irq, void *data)
+{
+ struct pcf50633 *pcf = data;
+ int ret, i, j;
+ u8 pcf_int[5], chgstat;
+
+ /* Read the 5 INT regs in one transaction */
+ ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
+ ARRAY_SIZE(pcf_int), pcf_int);
+ if (ret != ARRAY_SIZE(pcf_int)) {
+ dev_err(pcf->dev, "Error reading INT registers\n");
+
+ /*
+ * If this doesn't ACK the interrupt to the chip, we'll be
+ * called once again as we're level triggered.
+ */
+ goto out;
+ }
+
+ /* defeat 8s death from lowsys on A5 */
+ pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
+
+ /* We immediately read the usb and adapter status. We thus make sure
+ * only of USBINS/USBREM IRQ handlers are called */
+ if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
+ chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+ if (chgstat & (0x3 << 4))
+ pcf_int[0] &= ~PCF50633_INT1_USBREM;
+ else
+ pcf_int[0] &= ~PCF50633_INT1_USBINS;
+ }
+
+ /* Make sure only one of ADPINS or ADPREM is set */
+ if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
+ chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+ if (chgstat & (0x3 << 4))
+ pcf_int[0] &= ~PCF50633_INT1_ADPREM;
+ else
+ pcf_int[0] &= ~PCF50633_INT1_ADPINS;
+ }
+
+ dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
+ "INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
+ pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
+
+ /* Some revisions of the chip don't have a 8s standby mode on
+ * ONKEY1S press. We try to manually do it in such cases. */
+ if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
+ dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
+ pcf->onkey1s_held);
+ if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
+ if (pcf->pdata->force_shutdown)
+ pcf->pdata->force_shutdown(pcf);
+ }
+
+ if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
+ dev_info(pcf->dev, "ONKEY1S held\n");
+ pcf->onkey1s_held = 1 ;
+
+ /* Unmask IRQ_SECOND */
+ pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
+ PCF50633_INT1_SECOND);
+
+ /* Unmask IRQ_ONKEYR */
+ pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
+ PCF50633_INT2_ONKEYR);
+ }
+
+ if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
+ pcf->onkey1s_held = 0;
+
+ /* Mask SECOND and ONKEYR interrupts */
+ if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
+ pcf50633_reg_set_bit_mask(pcf,
+ PCF50633_REG_INT1M,
+ PCF50633_INT1_SECOND,
+ PCF50633_INT1_SECOND);
+
+ if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
+ pcf50633_reg_set_bit_mask(pcf,
+ PCF50633_REG_INT2M,
+ PCF50633_INT2_ONKEYR,
+ PCF50633_INT2_ONKEYR);
+ }
+
+ /* Have we just resumed ? */
+ if (pcf->is_suspended) {
+ pcf->is_suspended = 0;
+
+ /* Set the resume reason filtering out non resumers */
+ for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
+ pcf->resume_reason[i] = pcf_int[i] &
+ pcf->pdata->resumers[i];
+
+ /* Make sure we don't pass on any ONKEY events to
+ * userspace now */
+ pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
+ /* Unset masked interrupts */
+ pcf_int[i] &= ~pcf->mask_regs[i];
+
+ for (j = 0; j < 8 ; j++)
+ if (pcf_int[i] & (1 << j))
+ pcf50633_irq_call_handler(pcf, (i * 8) + j);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+
+int pcf50633_irq_suspend(struct pcf50633 *pcf)
+{
+ int ret;
+ int i;
+ u8 res[5];
+
+
+ /* Make sure our interrupt handlers are not called
+ * henceforth */
+ disable_irq(pcf->irq);
+
+ /* Save the masks */
+ ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
+ ARRAY_SIZE(pcf->suspend_irq_masks),
+ pcf->suspend_irq_masks);
+ if (ret < 0) {
+ dev_err(pcf->dev, "error saving irq masks\n");
+ goto out;
+ }
+
+ /* Write wakeup irq masks */
+ for (i = 0; i < ARRAY_SIZE(res); i++)
+ res[i] = ~pcf->pdata->resumers[i];
+
+ ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
+ ARRAY_SIZE(res), &res[0]);
+ if (ret < 0) {
+ dev_err(pcf->dev, "error writing wakeup irq masks\n");
+ goto out;
+ }
+
+ pcf->is_suspended = 1;
+
+out:
+ return ret;
+}
+
+int pcf50633_irq_resume(struct pcf50633 *pcf)
+{
+ int ret;
+
+ /* Write the saved mask registers */
+ ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
+ ARRAY_SIZE(pcf->suspend_irq_masks),
+ pcf->suspend_irq_masks);
+ if (ret < 0)
+ dev_err(pcf->dev, "Error restoring saved suspend masks\n");
+
+ enable_irq(pcf->irq);
+
+ return ret;
+}
+
+#endif
+
+int pcf50633_irq_init(struct pcf50633 *pcf, int irq)
+{
+ int ret;
+
+ pcf->irq = irq;
+
+ /* Enable all interrupts except RTC SECOND */
+ pcf->mask_regs[0] = 0x80;
+ pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
+ pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
+
+ ret = request_threaded_irq(irq, NULL, pcf50633_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "pcf50633", pcf);
+
+ if (ret)
+ dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
+
+ if (enable_irq_wake(irq) < 0)
+ dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
+ "in this hardware revision", irq);
+
+ return ret;
+}
+
+void pcf50633_irq_free(struct pcf50633 *pcf)
+{
+ free_irq(pcf->irq, pcf);
+}
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
new file mode 100644
index 000000000000..50922975bda3
--- /dev/null
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -0,0 +1,123 @@
+/*
+ * RDC321x MFD southbrige driver
+ *
+ * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Bernhard Loos <bernhardloos@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/rdc321x.h>
+
+static struct rdc321x_wdt_pdata rdc321x_wdt_pdata;
+
+static struct resource rdc321x_wdt_resource[] = {
+ {
+ .name = "wdt-reg",
+ .start = RDC321X_WDT_CTRL,
+ .end = RDC321X_WDT_CTRL + 0x3,
+ .flags = IORESOURCE_IO,
+ }
+};
+
+static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = {
+ .max_gpios = RDC321X_MAX_GPIO,
+};
+
+static struct resource rdc321x_gpio_resources[] = {
+ {
+ .name = "gpio-reg1",
+ .start = RDC321X_GPIO_CTRL_REG1,
+ .end = RDC321X_GPIO_CTRL_REG1 + 0x7,
+ .flags = IORESOURCE_IO,
+ }, {
+ .name = "gpio-reg2",
+ .start = RDC321X_GPIO_CTRL_REG2,
+ .end = RDC321X_GPIO_CTRL_REG2 + 0x7,
+ .flags = IORESOURCE_IO,
+ }
+};
+
+static struct mfd_cell rdc321x_sb_cells[] = {
+ {
+ .name = "rdc321x-wdt",
+ .resources = rdc321x_wdt_resource,
+ .num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
+ .driver_data = &rdc321x_wdt_pdata,
+ }, {
+ .name = "rdc321x-gpio",
+ .resources = rdc321x_gpio_resources,
+ .num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
+ .driver_data = &rdc321x_gpio_pdata,
+ },
+};
+
+static int __devinit rdc321x_sb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable device\n");
+ return err;
+ }
+
+ rdc321x_gpio_pdata.sb_pdev = pdev;
+ rdc321x_wdt_pdata.sb_pdev = pdev;
+
+ return mfd_add_devices(&pdev->dev, -1,
+ rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0);
+}
+
+static void __devexit rdc321x_sb_remove(struct pci_dev *pdev)
+{
+ mfd_remove_devices(&pdev->dev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) },
+ {}
+};
+
+static struct pci_driver rdc321x_sb_driver = {
+ .name = "RDC321x Southbridge",
+ .id_table = rdc321x_sb_table,
+ .probe = rdc321x_sb_probe,
+ .remove = __devexit_p(rdc321x_sb_remove),
+};
+
+static int __init rdc321x_sb_init(void)
+{
+ return pci_register_driver(&rdc321x_sb_driver);
+}
+
+static void __exit rdc321x_sb_exit(void)
+{
+ pci_unregister_driver(&rdc321x_sb_driver);
+}
+
+module_init(rdc321x_sb_init);
+module_exit(rdc321x_sb_exit);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver");
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index 497f91b6138e..cd164595f08a 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -26,11 +26,15 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/sh_dma.h>
struct sh_mobile_sdhi {
struct clk *clk;
struct tmio_mmc_data mmc_data;
struct mfd_cell cell_mmc;
+ struct sh_dmae_slave param_tx;
+ struct sh_dmae_slave param_rx;
+ struct tmio_mmc_dma dma_priv;
};
static struct resource sh_mobile_sdhi_resources[] = {
@@ -64,6 +68,8 @@ static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state)
static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
{
struct sh_mobile_sdhi *priv;
+ struct tmio_mmc_data *mmc_data;
+ struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
struct resource *mem;
char clk_name[8];
int ret, irq;
@@ -85,6 +91,8 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mmc_data = &priv->mmc_data;
+
snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
priv->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->clk)) {
@@ -96,12 +104,24 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
clk_enable(priv->clk);
- priv->mmc_data.hclk = clk_get_rate(priv->clk);
- priv->mmc_data.set_pwr = sh_mobile_sdhi_set_pwr;
- priv->mmc_data.capabilities = MMC_CAP_MMC_HIGHSPEED;
+ mmc_data->hclk = clk_get_rate(priv->clk);
+ mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
+ mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
+ if (p) {
+ mmc_data->flags = p->tmio_flags;
+ mmc_data->ocr_mask = p->tmio_ocr_mask;
+ }
+
+ if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+ priv->param_tx.slave_id = p->dma_slave_tx;
+ priv->param_rx.slave_id = p->dma_slave_rx;
+ priv->dma_priv.chan_priv_tx = &priv->param_tx;
+ priv->dma_priv.chan_priv_rx = &priv->param_rx;
+ mmc_data->dma = &priv->dma_priv;
+ }
memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
- priv->cell_mmc.driver_data = &priv->mmc_data;
+ priv->cell_mmc.driver_data = mmc_data;
priv->cell_mmc.platform_data = &priv->cell_mmc;
priv->cell_mmc.data_size = sizeof(priv->cell_mmc);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index da6383a934ac..5041d33adf0b 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -318,6 +318,9 @@ static int t7l66xb_probe(struct platform_device *dev)
struct resource *iomem, *rscr;
int ret;
+ if (pdata == NULL)
+ return -EINVAL;
+
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem)
return -EINVAL;
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
new file mode 100644
index 000000000000..715f095dd7a6
--- /dev/null
+++ b/drivers/mfd/tc35892.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tc35892.h>
+
+/**
+ * tc35892_reg_read() - read a single TC35892 register
+ * @tc35892: Device to read from
+ * @reg: Register to read
+ */
+int tc35892_reg_read(struct tc35892 *tc35892, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(tc35892->i2c, reg);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_read);
+
+/**
+ * tc35892_reg_read() - write a single TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to read
+ * @data: Value to write
+ */
+int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write reg %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_reg_write);
+
+/**
+ * tc35892_block_read() - read multiple TC35892 registers
+ * @tc35892: Device to read from
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Buffer to write to
+ */
+int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to read regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_read);
+
+/**
+ * tc35892_block_write() - write multiple TC35892 registers
+ * @tc35892: Device to write to
+ * @reg: First register
+ * @length: Number of registers
+ * @values: Values to write
+ */
+int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length,
+ values);
+ if (ret < 0)
+ dev_err(tc35892->dev, "failed to write regs %#x: %d\n",
+ reg, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_block_write);
+
+/**
+ * tc35892_set_bits() - set the value of a bitfield in a TC35892 register
+ * @tc35892: Device to write to
+ * @reg: Register to write
+ * @mask: Mask of bits to set
+ * @values: Value to set
+ */
+int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val)
+{
+ int ret;
+
+ mutex_lock(&tc35892->lock);
+
+ ret = tc35892_reg_read(tc35892, reg);
+ if (ret < 0)
+ goto out;
+
+ ret &= ~mask;
+ ret |= val;
+
+ ret = tc35892_reg_write(tc35892, reg, ret);
+
+out:
+ mutex_unlock(&tc35892->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tc35892_set_bits);
+
+static struct resource gpio_resources[] = {
+ {
+ .start = TC35892_INT_GPIIRQ,
+ .end = TC35892_INT_GPIIRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell tc35892_devs[] = {
+ {
+ .name = "tc35892-gpio",
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ .resources = &gpio_resources[0],
+ },
+};
+
+static irqreturn_t tc35892_irq(int irq, void *data)
+{
+ struct tc35892 *tc35892 = data;
+ int status;
+
+ status = tc35892_reg_read(tc35892, TC35892_IRQST);
+ if (status < 0)
+ return IRQ_NONE;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ handle_nested_irq(tc35892->irq_base + bit);
+ status &= ~(1 << bit);
+ }
+
+ /*
+ * A dummy read or write (to any register) appears to be necessary to
+ * have the last interrupt clear (for example, GPIO IC write) take
+ * effect.
+ */
+ tc35892_reg_read(tc35892, TC35892_IRQST);
+
+ return IRQ_HANDLED;
+}
+
+static void tc35892_irq_dummy(unsigned int irq)
+{
+ /* No mask/unmask at this level */
+}
+
+static struct irq_chip tc35892_irq_chip = {
+ .name = "tc35892",
+ .mask = tc35892_irq_dummy,
+ .unmask = tc35892_irq_dummy,
+};
+
+static int tc35892_irq_init(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+ set_irq_chip_data(irq, tc35892);
+ set_irq_chip_and_handler(irq, &tc35892_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ return 0;
+}
+
+static void tc35892_irq_remove(struct tc35892 *tc35892)
+{
+ int base = tc35892->irq_base;
+ int irq;
+
+ for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, 0);
+#endif
+ set_irq_chip_and_handler(irq, NULL, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+static int tc35892_chip_init(struct tc35892 *tc35892)
+{
+ int manf, ver, ret;
+
+ manf = tc35892_reg_read(tc35892, TC35892_MANFCODE);
+ if (manf < 0)
+ return manf;
+
+ ver = tc35892_reg_read(tc35892, TC35892_VERSION);
+ if (ver < 0)
+ return ver;
+
+ if (manf != TC35892_MANFCODE_MAGIC) {
+ dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf);
+ return -EINVAL;
+ }
+
+ dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
+
+ /* Put everything except the IRQ module into reset */
+ ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL,
+ TC35892_RSTCTRL_TIMRST
+ | TC35892_RSTCTRL_ROTRST
+ | TC35892_RSTCTRL_KBDRST
+ | TC35892_RSTCTRL_GPIRST);
+ if (ret < 0)
+ return ret;
+
+ /* Clear the reset interrupt. */
+ return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1);
+}
+
+static int __devinit tc35892_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tc35892_platform_data *pdata = i2c->dev.platform_data;
+ struct tc35892 *tc35892;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL);
+ if (!tc35892)
+ return -ENOMEM;
+
+ mutex_init(&tc35892->lock);
+
+ tc35892->dev = &i2c->dev;
+ tc35892->i2c = i2c;
+ tc35892->pdata = pdata;
+ tc35892->irq_base = pdata->irq_base;
+ tc35892->num_gpio = id->driver_data;
+
+ i2c_set_clientdata(i2c, tc35892);
+
+ ret = tc35892_chip_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = tc35892_irq_init(tc35892);
+ if (ret)
+ goto out_free;
+
+ ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tc35892", tc35892);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret);
+ goto out_removeirq;
+ }
+
+ ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs,
+ ARRAY_SIZE(tc35892_devs), NULL,
+ tc35892->irq_base);
+ if (ret) {
+ dev_err(tc35892->dev, "failed to add children\n");
+ goto out_freeirq;
+ }
+
+ return 0;
+
+out_freeirq:
+ free_irq(tc35892->i2c->irq, tc35892);
+out_removeirq:
+ tc35892_irq_remove(tc35892);
+out_free:
+ i2c_set_clientdata(i2c, NULL);
+ kfree(tc35892);
+ return ret;
+}
+
+static int __devexit tc35892_remove(struct i2c_client *client)
+{
+ struct tc35892 *tc35892 = i2c_get_clientdata(client);
+
+ mfd_remove_devices(tc35892->dev);
+
+ free_irq(tc35892->i2c->irq, tc35892);
+ tc35892_irq_remove(tc35892);
+
+ i2c_set_clientdata(client, NULL);
+ kfree(tc35892);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc35892_id[] = {
+ { "tc35892", 24 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc35892_id);
+
+static struct i2c_driver tc35892_driver = {
+ .driver.name = "tc35892",
+ .driver.owner = THIS_MODULE,
+ .probe = tc35892_probe,
+ .remove = __devexit_p(tc35892_remove),
+ .id_table = tc35892_id,
+};
+
+static int __init tc35892_init(void)
+{
+ return i2c_add_driver(&tc35892_driver);
+}
+subsys_initcall(tc35892_init);
+
+static void __exit tc35892_exit(void)
+{
+ i2c_del_driver(&tc35892_driver);
+}
+module_exit(tc35892_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TC35892 MFD core driver");
+MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 7f478ec4184b..ac5995026c88 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -31,6 +31,7 @@
#include <linux/i2c.h>
#include <linux/i2c-ocores.h>
+#include <linux/i2c-xiic.h>
#include <linux/i2c/tsc2007.h>
#include <linux/spi/spi.h>
@@ -40,6 +41,8 @@
#include <media/timb_radio.h>
+#include <linux/timb_dma.h>
+
#include "timberdale.h"
#define DRIVER_NAME "timberdale"
@@ -69,6 +72,12 @@ static struct i2c_board_info timberdale_i2c_board_info[] = {
},
};
+static __devinitdata struct xiic_i2c_platform_data
+timberdale_xiic_platform_data = {
+ .devices = timberdale_i2c_board_info,
+ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
+};
+
static __devinitdata struct ocores_i2c_platform_data
timberdale_ocores_platform_data = {
.regstep = 4,
@@ -77,7 +86,20 @@ timberdale_ocores_platform_data = {
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
};
-const static __devinitconst struct resource timberdale_ocores_resources[] = {
+static const __devinitconst struct resource timberdale_xiic_resources[] = {
+ {
+ .start = XIICOFFSET,
+ .end = XIICEND,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_TIMBERDALE_I2C,
+ .end = IRQ_TIMBERDALE_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const __devinitconst struct resource timberdale_ocores_resources[] = {
{
.start = OCORESOFFSET,
.end = OCORESEND,
@@ -126,7 +148,7 @@ static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
*/
};
-const static __devinitconst struct resource timberdale_spi_resources[] = {
+static const __devinitconst struct resource timberdale_spi_resources[] = {
{
.start = SPIOFFSET,
.end = SPIEND,
@@ -139,7 +161,7 @@ const static __devinitconst struct resource timberdale_spi_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_eth_resources[] = {
+static const __devinitconst struct resource timberdale_eth_resources[] = {
{
.start = ETHOFFSET,
.end = ETHEND,
@@ -159,7 +181,7 @@ static __devinitdata struct timbgpio_platform_data
.irq_base = 200,
};
-const static __devinitconst struct resource timberdale_gpio_resources[] = {
+static const __devinitconst struct resource timberdale_gpio_resources[] = {
{
.start = GPIOOFFSET,
.end = GPIOEND,
@@ -172,7 +194,7 @@ const static __devinitconst struct resource timberdale_gpio_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
+static const __devinitconst struct resource timberdale_mlogicore_resources[] = {
{
.start = MLCOREOFFSET,
.end = MLCOREEND,
@@ -190,7 +212,7 @@ const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_uart_resources[] = {
+static const __devinitconst struct resource timberdale_uart_resources[] = {
{
.start = UARTOFFSET,
.end = UARTEND,
@@ -203,7 +225,7 @@ const static __devinitconst struct resource timberdale_uart_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_uartlite_resources[] = {
+static const __devinitconst struct resource timberdale_uartlite_resources[] = {
{
.start = UARTLITEOFFSET,
.end = UARTLITEEND,
@@ -216,7 +238,7 @@ const static __devinitconst struct resource timberdale_uartlite_resources[] = {
},
};
-const static __devinitconst struct resource timberdale_radio_resources[] = {
+static const __devinitconst struct resource timberdale_radio_resources[] = {
{
.start = RDSOFFSET,
.end = RDSEND,
@@ -250,7 +272,66 @@ static __devinitdata struct timb_radio_platform_data
}
};
-const static __devinitconst struct resource timberdale_dma_resources[] = {
+static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
+ .nr_channels = 10,
+ .channels = {
+ {
+ /* UART RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* UART TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* MLB RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* MLB TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* Video RX */
+ .rx = true,
+ .bytes_per_line = 1440,
+ .descriptors = 2,
+ .descriptor_elements = 16
+ },
+ {
+ /* Video framedrop */
+ },
+ {
+ /* SDHCI RX */
+ .rx = true,
+ },
+ {
+ /* SDHCI TX */
+ },
+ {
+ /* ETH RX */
+ .rx = true,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ {
+ /* ETH TX */
+ .rx = false,
+ .descriptors = 2,
+ .descriptor_elements = 1
+ },
+ }
+};
+
+static const __devinitconst struct resource timberdale_dma_resources[] = {
{
.start = DMAOFFSET,
.end = DMAEND,
@@ -265,11 +346,25 @@ const static __devinitconst struct resource timberdale_dma_resources[] = {
static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
{
+ .name = "timb-dma",
+ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
+ .resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
+ },
+ {
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -295,14 +390,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -314,6 +411,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.resources = timberdale_uartlite_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -344,20 +448,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
.resources = timberdale_uart_resources,
},
{
+ .name = "xiic-i2c",
+ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
+ .resources = timberdale_xiic_resources,
+ .platform_data = &timberdale_xiic_platform_data,
+ .data_size = sizeof(timberdale_xiic_platform_data),
+ },
+ {
.name = "timb-gpio",
.num_resources = ARRAY_SIZE(timberdale_gpio_resources),
.resources = timberdale_gpio_resources,
@@ -378,14 +491,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
.platform_data = &timberdale_xspi_platform_data,
.data_size = sizeof(timberdale_xspi_platform_data),
},
+};
+
+static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-dma",
.num_resources = ARRAY_SIZE(timberdale_dma_resources),
.resources = timberdale_dma_resources,
+ .platform_data = &timb_dma_platform_data,
+ .data_size = sizeof(timb_dma_platform_data),
},
-};
-
-static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
{
.name = "timb-uart",
.num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -424,11 +539,6 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
.num_resources = ARRAY_SIZE(timberdale_eth_resources),
.resources = timberdale_eth_resources,
},
- {
- .name = "timb-dma",
- .num_resources = ARRAY_SIZE(timberdale_dma_resources),
- .resources = timberdale_dma_resources,
- },
};
static const __devinitconst struct resource timberdale_sdhc_resources[] = {
diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h
index 8d27ffabc25d..c11bf6ebfe00 100644
--- a/drivers/mfd/timberdale.h
+++ b/drivers/mfd/timberdale.h
@@ -23,7 +23,7 @@
#ifndef MFD_TIMBERDALE_H
#define MFD_TIMBERDALE_H
-#define DRV_VERSION "0.1"
+#define DRV_VERSION "0.2"
/* This driver only support versions >= 3.8 and < 4.0 */
#define TIMB_SUPPORTED_MAJOR 3
@@ -66,7 +66,7 @@
#define CHIPCTLOFFSET 0x800
#define CHIPCTLEND 0x8ff
-#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
+#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET + 1)
#define INTCOFFSET 0xc00
#define INTCEND 0xfff
@@ -127,4 +127,16 @@
#define GPIO_PIN_BT_RST 15
#define GPIO_NR_PINS 16
+/* DMA Channels */
+#define DMA_UART_RX 0
+#define DMA_UART_TX 1
+#define DMA_MLB_RX 2
+#define DMA_MLB_TX 3
+#define DMA_VIDEO_RX 4
+#define DMA_VIDEO_DROP 5
+#define DMA_SDHCI_RX 6
+#define DMA_SDHCI_TX 7
+#define DMA_ETH_RX 8
+#define DMA_ETH_TX 9
+
#endif
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index e5955306c2fa..9b22a77f70f5 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -530,8 +530,8 @@ static int __exit tps65010_remove(struct i2c_client *client)
cancel_delayed_work(&tps->work);
flush_scheduled_work();
debugfs_remove(tps->file);
- kfree(tps);
i2c_set_clientdata(client, NULL);
+ kfree(tps);
the_tps = NULL;
return 0;
}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
new file mode 100644
index 000000000000..d859dffed39f
--- /dev/null
+++ b/drivers/mfd/tps6507x.c
@@ -0,0 +1,159 @@
+/*
+ * tps6507x.c -- TPS6507x chip family multi-function driver
+ *
+ * Copyright (c) 2010 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ * Author: Todd Fischer
+ * todd.fischer@ridgerun.com
+ *
+ * Credits:
+ *
+ * Using code from wm831x-*.c, wm8400-core, Wolfson Microelectronics PLC.
+ *
+ * For licencing details see kernel-base/COPYING
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps6507x.h>
+
+static struct mfd_cell tps6507x_devs[] = {
+ {
+ .name = "tps6507x-pmic",
+ },
+ {
+ .name = "tps6507x-ts",
+ },
+};
+
+
+static int tps6507x_i2c_read_device(struct tps6507x_dev *tps6507x, char reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps6507x->i2c_client;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int tps6507x_i2c_write_device(struct tps6507x_dev *tps6507x, char reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps6507x->i2c_client;
+ /* we add 1 byte for device register */
+ u8 msg[TPS6507X_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > (TPS6507X_MAX_REGISTER + 1))
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+ return 0;
+}
+
+static int tps6507x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps6507x_dev *tps6507x;
+ int ret = 0;
+
+ tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
+ if (tps6507x == NULL) {
+ kfree(i2c);
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(i2c, tps6507x);
+ tps6507x->dev = &i2c->dev;
+ tps6507x->i2c_client = i2c;
+ tps6507x->read_dev = tps6507x_i2c_read_device;
+ tps6507x->write_dev = tps6507x_i2c_write_device;
+
+ ret = mfd_add_devices(tps6507x->dev, -1,
+ tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
+ NULL, 0);
+
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(tps6507x->dev);
+ kfree(tps6507x);
+ return ret;
+}
+
+static int tps6507x_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(tps6507x->dev);
+ kfree(tps6507x);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps6507x_i2c_id[] = {
+ { "tps6507x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
+
+
+static struct i2c_driver tps6507x_i2c_driver = {
+ .driver = {
+ .name = "tps6507x",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps6507x_i2c_probe,
+ .remove = tps6507x_i2c_remove,
+ .id_table = tps6507x_i2c_id,
+};
+
+static int __init tps6507x_i2c_init(void)
+{
+ return i2c_add_driver(&tps6507x_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps6507x_i2c_init);
+
+static void __exit tps6507x_i2c_exit(void)
+{
+ i2c_del_driver(&tps6507x_i2c_driver);
+}
+module_exit(tps6507x_i2c_exit);
+
+MODULE_DESCRIPTION("TPS6507x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 562cd4935e17..720e099e506d 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -109,7 +109,7 @@
#endif
#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
- defined(CONFIG_SND_SOC_TWL6030) || defined(CONFIG_SND_SOC_TWL6030_MODULE)
+ defined(CONFIG_SND_SOC_TWL6040) || defined(CONFIG_SND_SOC_TWL6040_MODULE)
#define twl_has_codec() true
#else
#define twl_has_codec() false
@@ -708,7 +708,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
/* Phoenix*/
if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl6030_codec",
+ child = add_child(sub_chip_id, "twl6040_codec",
pdata->codec, sizeof(*pdata->codec),
false, 0, 0);
if (IS_ERR(child))
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 202bdd59632d..097f24d8bceb 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -232,10 +232,11 @@ static const struct sih sih_modules_twl5031[8] = {
},
[6] = {
/*
- * ACI doesn't use the same SIH organization.
- * For example, it supports only one interrupt line
+ * ECI/DBI doesn't use the same SIH organization.
+ * For example, it supports only one interrupt output line.
+ * That is, the interrupts are seen on both INT1 and INT2 lines.
*/
- .name = "aci",
+ .name = "eci_dbi",
.module = TWL5031_MODULE_ACCESSORY,
.bits = 9,
.bytes_ixr = 2,
@@ -247,8 +248,8 @@ static const struct sih sih_modules_twl5031[8] = {
},
[7] = {
- /* Accessory */
- .name = "acc",
+ /* Audio accessory */
+ .name = "audio",
.module = TWL5031_MODULE_ACCESSORY,
.control_offset = TWL5031_ACCSIHCTRL,
.bits = 2,
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index a3d5728b6449..1a968f34d679 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -322,7 +322,11 @@ EXPORT_SYMBOL_GPL(wm831x_set_bits);
*/
int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
{
- int ret, src;
+ int ret, src, irq_masked, timeout;
+
+ /* Are we using the interrupt? */
+ irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
+ irq_masked &= WM831X_AUXADC_DATA_EINT;
mutex_lock(&wm831x->auxadc_lock);
@@ -342,6 +346,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
goto out;
}
+ /* Clear any notification from a very late arriving interrupt */
+ try_wait_for_completion(&wm831x->auxadc_done);
+
ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
if (ret < 0) {
@@ -349,19 +356,46 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
goto disable;
}
- /* Ignore the result to allow us to soldier on without IRQ hookup */
- wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5));
-
- ret = wm831x_reg_read(wm831x, WM831X_AUXADC_CONTROL);
- if (ret < 0) {
- dev_err(wm831x->dev, "AUXADC status read failed: %d\n", ret);
- goto disable;
- }
-
- if (ret & WM831X_AUX_CVT_ENA) {
- dev_err(wm831x->dev, "Timed out reading AUXADC\n");
- ret = -EBUSY;
- goto disable;
+ if (irq_masked) {
+ /* If we're not using interrupts then poll the
+ * interrupt status register */
+ timeout = 5;
+ while (timeout) {
+ msleep(1);
+
+ ret = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
+
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ break;
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
+ }
+ }
+ } else {
+ /* If we are using interrupts then wait for the
+ * interrupt to complete. Use an extremely long
+ * timeout to handle situations with heavy load where
+ * the notification of the interrupt may be delayed by
+ * threaded IRQ handling. */
+ if (!wait_for_completion_timeout(&wm831x->auxadc_done,
+ msecs_to_jiffies(500))) {
+ dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
+ ret = -EBUSY;
+ goto disable;
+ }
}
ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
@@ -1460,6 +1494,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
parent = WM8310;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1471,6 +1506,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8311:
parent = WM8311;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1482,6 +1518,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8312:
parent = WM8312;
wm831x->num_gpio = 16;
+ wm831x->charger_irq_wake = 1;
if (rev > 0) {
wm831x->has_gpio_ena = 1;
wm831x->has_cs_sts = 1;
@@ -1620,6 +1657,42 @@ static void wm831x_device_exit(struct wm831x *wm831x)
kfree(wm831x);
}
+static int wm831x_device_suspend(struct wm831x *wm831x)
+{
+ int reg, mask;
+
+ /* If the charger IRQs are a wake source then make sure we ack
+ * them even if they're not actively being used (eg, no power
+ * driver or no IRQ line wired up) then acknowledge the
+ * interrupts otherwise suspend won't last very long.
+ */
+ if (wm831x->charger_irq_wake) {
+ reg = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_2_MASK);
+
+ mask = WM831X_CHG_BATT_HOT_EINT |
+ WM831X_CHG_BATT_COLD_EINT |
+ WM831X_CHG_BATT_FAIL_EINT |
+ WM831X_CHG_OV_EINT | WM831X_CHG_END_EINT |
+ WM831X_CHG_TO_EINT | WM831X_CHG_MODE_EINT |
+ WM831X_CHG_START_EINT;
+
+ /* If any of the interrupts are masked read the statuses */
+ if (reg & mask)
+ reg = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_2);
+
+ if (reg & mask) {
+ dev_info(wm831x->dev,
+ "Acknowledging masked charger IRQs: %x\n",
+ reg & mask);
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_2,
+ reg & mask);
+ }
+ }
+
+ return 0;
+}
+
static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg,
int bytes, void *dest)
{
@@ -1694,6 +1767,13 @@ static int wm831x_i2c_remove(struct i2c_client *i2c)
return 0;
}
+static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+{
+ struct wm831x *wm831x = i2c_get_clientdata(i2c);
+
+ return wm831x_device_suspend(wm831x);
+}
+
static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8310", WM8310 },
{ "wm8311", WM8311 },
@@ -1711,6 +1791,7 @@ static struct i2c_driver wm831x_i2c_driver = {
},
.probe = wm831x_i2c_probe,
.remove = wm831x_i2c_remove,
+ .suspend = wm831x_i2c_suspend,
.id_table = wm831x_i2c_id,
};
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 301327697117..7dabe4dbd373 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -21,6 +21,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/gpio.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/delay.h>
@@ -38,8 +39,6 @@ struct wm831x_irq_data {
int primary;
int reg;
int mask;
- irq_handler_t handler;
- void *handler_data;
};
static struct wm831x_irq_data wm831x_irqs[] = {
@@ -388,12 +387,41 @@ static void wm831x_irq_mask(unsigned int irq)
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
+static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ int val;
+
+ irq = irq - wm831x->irq_base;
+
+ if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11)
+ return -EINVAL;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = WM831X_GPN_INT_MODE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = WM831X_GPN_POL;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + irq,
+ WM831X_GPN_INT_MODE | WM831X_GPN_POL, val);
+}
+
static struct irq_chip wm831x_irq_chip = {
.name = "wm831x",
.bus_lock = wm831x_irq_lock,
.bus_sync_unlock = wm831x_irq_sync_unlock,
.mask = wm831x_irq_mask,
.unmask = wm831x_irq_unmask,
+ .set_type = wm831x_irq_set_type,
};
/* The processing of the primary interrupt occurs in a thread so that
@@ -462,6 +490,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
mutex_init(&wm831x->irq_lock);
+ /* Mask the individual interrupt sources */
+ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
+ wm831x->irq_masks_cur[i] = 0xffff;
+ wm831x->irq_masks_cache[i] = 0xffff;
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
+ 0xffff);
+ }
+
if (!irq) {
dev_warn(wm831x->dev,
"No interrupt specified - functionality limited\n");
@@ -477,14 +513,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
wm831x->irq = irq;
wm831x->irq_base = pdata->irq_base;
- /* Mask the individual interrupt sources */
- for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
- wm831x->irq_masks_cur[i] = 0xffff;
- wm831x->irq_masks_cache[i] = 0xffff;
- wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
- 0xffff);
- }
-
/* Register them with genirq */
for (cur_irq = wm831x->irq_base;
cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index e400a3bed063..b5807484b4c9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -363,6 +363,10 @@ int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref)
reg |= 1 << channel | WM8350_AUXADC_POLL;
wm8350_reg_write(wm8350, WM8350_DIGITISER_CONTROL_1, reg);
+ /* If a late IRQ left the completion signalled then consume
+ * the completion. */
+ try_wait_for_completion(&wm8350->auxadc_done);
+
/* We ignore the result of the completion and just check for a
* conversion result, allowing us to soldier on if the IRQ
* infrastructure is not set up for the chip. */
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 65830f57c093..7795af4b1fe1 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -64,10 +64,8 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
int ret = 0;
wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL);
- if (wm8350 == NULL) {
- kfree(i2c);
+ if (wm8350 == NULL)
return -ENOMEM;
- }
i2c_set_clientdata(i2c, wm8350);
wm8350->dev = &i2c->dev;
@@ -82,6 +80,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
return ret;
err:
+ i2c_set_clientdata(i2c, NULL);
kfree(wm8350);
return ret;
}
@@ -91,6 +90,7 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
struct wm8350 *wm8350 = i2c_get_clientdata(i2c);
wm8350_device_exit(wm8350);
+ i2c_set_clientdata(i2c, NULL);
kfree(wm8350);
return 0;
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 865ce013a821..e08aafa663dc 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -118,7 +118,7 @@ static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
{
int i, ret = 0;
- BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache));
+ BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
/* If there are any volatile reads then read back the entire block */
for (i = reg; i < reg + num_regs; i++)
@@ -144,7 +144,7 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
{
int ret, i;
- BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache));
+ BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
for (i = 0; i < num_regs; i++) {
BUG_ON(!reg_data[reg + i].writable);
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index cc524df10aa1..ec71c9368906 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -173,9 +173,34 @@ static struct mfd_cell wm8994_regulator_devs[] = {
{ .name = "wm8994-ldo", .id = 2 },
};
+static struct resource wm8994_codec_resources[] = {
+ {
+ .start = WM8994_IRQ_TEMP_SHUT,
+ .end = WM8994_IRQ_TEMP_WARN,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource wm8994_gpio_resources[] = {
+ {
+ .start = WM8994_IRQ_GPIO(1),
+ .end = WM8994_IRQ_GPIO(11),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct mfd_cell wm8994_devs[] = {
- { .name = "wm8994-codec" },
- { .name = "wm8994-gpio" },
+ {
+ .name = "wm8994-codec",
+ .num_resources = ARRAY_SIZE(wm8994_codec_resources),
+ .resources = wm8994_codec_resources,
+ },
+
+ {
+ .name = "wm8994-gpio",
+ .num_resources = ARRAY_SIZE(wm8994_gpio_resources),
+ .resources = wm8994_gpio_resources,
+ },
};
/*
@@ -236,6 +261,11 @@ static int wm8994_device_resume(struct device *dev)
return ret;
}
+ ret = wm8994_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK,
+ WM8994_NUM_IRQ_REGS * 2, &wm8994->irq_masks_cur);
+ if (ret < 0)
+ dev_err(dev, "Failed to restore interrupt masks: %d\n", ret);
+
ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
if (ret < 0)
@@ -348,6 +378,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
if (pdata) {
+ wm8994->irq_base = pdata->irq_base;
wm8994->gpio_base = pdata->gpio_base;
/* GPIO configuration is only applied if it's non-zero */
@@ -375,16 +406,20 @@ static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
WM8994_LDO1_DISCH, 0);
}
+ wm8994_irq_init(wm8994);
+
ret = mfd_add_devices(wm8994->dev, -1,
wm8994_devs, ARRAY_SIZE(wm8994_devs),
NULL, 0);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to add children: %d\n", ret);
- goto err_enable;
+ goto err_irq;
}
return 0;
+err_irq:
+ wm8994_irq_exit(wm8994);
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
wm8994->supplies);
@@ -401,6 +436,7 @@ err:
static void wm8994_device_exit(struct wm8994 *wm8994)
{
mfd_remove_devices(wm8994->dev);
+ wm8994_irq_exit(wm8994);
regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
wm8994->supplies);
regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
@@ -469,6 +505,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
wm8994->control_data = i2c;
wm8994->read_dev = wm8994_i2c_read_device;
wm8994->write_dev = wm8994_i2c_write_device;
+ wm8994->irq = i2c->irq;
return wm8994_device_init(wm8994, id->driver_data, i2c->irq);
}
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
new file mode 100644
index 000000000000..8400eb1ee5db
--- /dev/null
+++ b/drivers/mfd/wm8994-irq.c
@@ -0,0 +1,310 @@
+/*
+ * wm8994-irq.c -- Interrupt controller support for Wolfson WM8994
+ *
+ * Copyright 2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/interrupt.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+
+#include <linux/delay.h>
+
+struct wm8994_irq_data {
+ int reg;
+ int mask;
+};
+
+static struct wm8994_irq_data wm8994_irqs[] = {
+ [WM8994_IRQ_TEMP_SHUT] = {
+ .reg = 2,
+ .mask = WM8994_TEMP_SHUT_EINT,
+ },
+ [WM8994_IRQ_MIC1_DET] = {
+ .reg = 2,
+ .mask = WM8994_MIC1_DET_EINT,
+ },
+ [WM8994_IRQ_MIC1_SHRT] = {
+ .reg = 2,
+ .mask = WM8994_MIC1_SHRT_EINT,
+ },
+ [WM8994_IRQ_MIC2_DET] = {
+ .reg = 2,
+ .mask = WM8994_MIC2_DET_EINT,
+ },
+ [WM8994_IRQ_MIC2_SHRT] = {
+ .reg = 2,
+ .mask = WM8994_MIC2_SHRT_EINT,
+ },
+ [WM8994_IRQ_FLL1_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_FLL1_LOCK_EINT,
+ },
+ [WM8994_IRQ_FLL2_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_FLL2_LOCK_EINT,
+ },
+ [WM8994_IRQ_SRC1_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_SRC1_LOCK_EINT,
+ },
+ [WM8994_IRQ_SRC2_LOCK] = {
+ .reg = 2,
+ .mask = WM8994_SRC2_LOCK_EINT,
+ },
+ [WM8994_IRQ_AIF1DRC1_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF1DRC1_SIG_DET,
+ },
+ [WM8994_IRQ_AIF1DRC2_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF1DRC2_SIG_DET_EINT,
+ },
+ [WM8994_IRQ_AIF2DRC_SIG_DET] = {
+ .reg = 2,
+ .mask = WM8994_AIF2DRC_SIG_DET_EINT,
+ },
+ [WM8994_IRQ_FIFOS_ERR] = {
+ .reg = 2,
+ .mask = WM8994_FIFOS_ERR_EINT,
+ },
+ [WM8994_IRQ_WSEQ_DONE] = {
+ .reg = 2,
+ .mask = WM8994_WSEQ_DONE_EINT,
+ },
+ [WM8994_IRQ_DCS_DONE] = {
+ .reg = 2,
+ .mask = WM8994_DCS_DONE_EINT,
+ },
+ [WM8994_IRQ_TEMP_WARN] = {
+ .reg = 2,
+ .mask = WM8994_TEMP_WARN_EINT,
+ },
+ [WM8994_IRQ_GPIO(1)] = {
+ .reg = 1,
+ .mask = WM8994_GP1_EINT,
+ },
+ [WM8994_IRQ_GPIO(2)] = {
+ .reg = 1,
+ .mask = WM8994_GP2_EINT,
+ },
+ [WM8994_IRQ_GPIO(3)] = {
+ .reg = 1,
+ .mask = WM8994_GP3_EINT,
+ },
+ [WM8994_IRQ_GPIO(4)] = {
+ .reg = 1,
+ .mask = WM8994_GP4_EINT,
+ },
+ [WM8994_IRQ_GPIO(5)] = {
+ .reg = 1,
+ .mask = WM8994_GP5_EINT,
+ },
+ [WM8994_IRQ_GPIO(6)] = {
+ .reg = 1,
+ .mask = WM8994_GP6_EINT,
+ },
+ [WM8994_IRQ_GPIO(7)] = {
+ .reg = 1,
+ .mask = WM8994_GP7_EINT,
+ },
+ [WM8994_IRQ_GPIO(8)] = {
+ .reg = 1,
+ .mask = WM8994_GP8_EINT,
+ },
+ [WM8994_IRQ_GPIO(9)] = {
+ .reg = 1,
+ .mask = WM8994_GP8_EINT,
+ },
+ [WM8994_IRQ_GPIO(10)] = {
+ .reg = 1,
+ .mask = WM8994_GP10_EINT,
+ },
+ [WM8994_IRQ_GPIO(11)] = {
+ .reg = 1,
+ .mask = WM8994_GP11_EINT,
+ },
+};
+
+static inline int irq_data_to_status_reg(struct wm8994_irq_data *irq_data)
+{
+ return WM8994_INTERRUPT_STATUS_1 - 1 + irq_data->reg;
+}
+
+static inline int irq_data_to_mask_reg(struct wm8994_irq_data *irq_data)
+{
+ return WM8994_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
+}
+
+static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994,
+ int irq)
+{
+ return &wm8994_irqs[irq - wm8994->irq_base];
+}
+
+static void wm8994_irq_lock(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+
+ mutex_lock(&wm8994->irq_lock);
+}
+
+static void wm8994_irq_sync_unlock(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ /* If there's been a change in the mask write it back
+ * to the hardware. */
+ if (wm8994->irq_masks_cur[i] != wm8994->irq_masks_cache[i]) {
+ wm8994->irq_masks_cache[i] = wm8994->irq_masks_cur[i];
+ wm8994_reg_write(wm8994,
+ WM8994_INTERRUPT_STATUS_1_MASK + i,
+ wm8994->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&wm8994->irq_lock);
+}
+
+static void wm8994_irq_unmask(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+
+ wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void wm8994_irq_mask(unsigned int irq)
+{
+ struct wm8994 *wm8994 = get_irq_chip_data(irq);
+ struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+
+ wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip wm8994_irq_chip = {
+ .name = "wm8994",
+ .bus_lock = wm8994_irq_lock,
+ .bus_sync_unlock = wm8994_irq_sync_unlock,
+ .mask = wm8994_irq_mask,
+ .unmask = wm8994_irq_unmask,
+};
+
+/* The processing of the primary interrupt occurs in a thread so that
+ * we can interact with the device over I2C or SPI. */
+static irqreturn_t wm8994_irq_thread(int irq, void *data)
+{
+ struct wm8994 *wm8994 = data;
+ unsigned int i;
+ u16 status[WM8994_NUM_IRQ_REGS];
+ int ret;
+
+ ret = wm8994_bulk_read(wm8994, WM8994_INTERRUPT_STATUS_1,
+ WM8994_NUM_IRQ_REGS, status);
+ if (ret < 0) {
+ dev_err(wm8994->dev, "Failed to read interrupt status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ /* Apply masking */
+ for (i = 0; i < WM8994_NUM_IRQ_REGS; i++)
+ status[i] &= ~wm8994->irq_masks_cur[i];
+
+ /* Report */
+ for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
+ if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
+ handle_nested_irq(wm8994->irq_base + i);
+ }
+
+ /* Ack any unmasked IRQs */
+ for (i = 0; i < ARRAY_SIZE(status); i++) {
+ if (status[i])
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1 + i,
+ status[i]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int wm8994_irq_init(struct wm8994 *wm8994)
+{
+ int i, cur_irq, ret;
+
+ mutex_init(&wm8994->irq_lock);
+
+ /* Mask the individual interrupt sources */
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ wm8994->irq_masks_cur[i] = 0xffff;
+ wm8994->irq_masks_cache[i] = 0xffff;
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK + i,
+ 0xffff);
+ }
+
+ if (!wm8994->irq) {
+ dev_warn(wm8994->dev,
+ "No interrupt specified, no interrupts\n");
+ wm8994->irq_base = 0;
+ return 0;
+ }
+
+ if (!wm8994->irq_base) {
+ dev_err(wm8994->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
+ }
+
+ /* Register them with genirq */
+ for (cur_irq = wm8994->irq_base;
+ cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base;
+ cur_irq++) {
+ set_irq_chip_data(cur_irq, wm8994);
+ set_irq_chip_and_handler(cur_irq, &wm8994_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(wm8994->irq, NULL, wm8994_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "wm8994", wm8994);
+ if (ret != 0) {
+ dev_err(wm8994->dev, "Failed to request IRQ %d: %d\n",
+ wm8994->irq, ret);
+ return ret;
+ }
+
+ /* Enable top level interrupt if it was masked */
+ wm8994_reg_write(wm8994, WM8994_INTERRUPT_CONTROL, 0);
+
+ return 0;
+}
+
+void wm8994_irq_exit(struct wm8994 *wm8994)
+{
+ if (wm8994->irq)
+ free_irq(wm8994->irq, wm8994);
+}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0d0d625fece2..26386a92f5aa 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -14,11 +14,17 @@ menuconfig MISC_DEVICES
if MISC_DEVICES
config AD525X_DPOT
- tristate "Analog Devices AD525x Digital Potentiometers"
- depends on I2C && SYSFS
+ tristate "Analog Devices Digital Potentiometers"
+ depends on (I2C || SPI) && SYSFS
help
If you say yes here, you get support for the Analog Devices
- AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255
+ AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255
+ AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203,
+ AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235,
+ AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
+ AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
+ AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
+ ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173
digital potentiometer chips.
See Documentation/misc-devices/ad525x_dpot.txt for the
@@ -27,6 +33,26 @@ config AD525X_DPOT
This driver can also be built as a module. If so, the module
will be called ad525x_dpot.
+config AD525X_DPOT_I2C
+ tristate "support I2C bus connection"
+ depends on AD525X_DPOT && I2C
+ help
+ Say Y here if you have a digital potentiometers hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad525x_dpot-i2c.
+
+config AD525X_DPOT_SPI
+ tristate "support SPI bus connection"
+ depends on AD525X_DPOT && SPI_MASTER
+ help
+ Say Y here if you have a digital potentiometers hooked to an SPI bus.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad525x_dpot-spi.
+
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7b6f7eefdf8d..6ed06a19474a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -3,8 +3,9 @@
#
obj-$(CONFIG_IBM_ASM) += ibmasm/
-obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
+obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
+obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
new file mode 100644
index 000000000000..374352af7979
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -0,0 +1,134 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (I2C bus)
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+/* ------------------------------------------------------------------------- */
+/* I2C bus functions */
+static int write_d8(void *client, u8 val)
+{
+ return i2c_smbus_write_byte(client, val);
+}
+
+static int write_r8d8(void *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int write_r8d16(void *client, u8 reg, u16 val)
+{
+ return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int read_d8(void *client)
+{
+ return i2c_smbus_read_byte(client);
+}
+
+static int read_r8d8(void *client, u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int read_r8d16(void *client, u8 reg)
+{
+ return i2c_smbus_read_word_data(client, reg);
+}
+
+static const struct ad_dpot_bus_ops bops = {
+ .read_d8 = read_d8,
+ .read_r8d8 = read_r8d8,
+ .read_r8d16 = read_r8d16,
+ .write_d8 = write_d8,
+ .write_r8d8 = write_r8d8,
+ .write_r8d16 = write_r8d16,
+};
+
+static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad_dpot_bus_data bdata = {
+ .client = client,
+ .bops = &bops,
+ };
+
+ struct ad_dpot_id dpot_id = {
+ .name = (char *) &id->name,
+ .devid = id->driver_data,
+ };
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA)) {
+ dev_err(&client->dev, "SMBUS Word Data not Supported\n");
+ return -EIO;
+ }
+
+ return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
+}
+
+static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
+{
+ return ad_dpot_remove(&client->dev);
+}
+
+static const struct i2c_device_id ad_dpot_id[] = {
+ {"ad5258", AD5258_ID},
+ {"ad5259", AD5259_ID},
+ {"ad5251", AD5251_ID},
+ {"ad5252", AD5252_ID},
+ {"ad5253", AD5253_ID},
+ {"ad5254", AD5254_ID},
+ {"ad5255", AD5255_ID},
+ {"ad5241", AD5241_ID},
+ {"ad5242", AD5242_ID},
+ {"ad5243", AD5243_ID},
+ {"ad5245", AD5245_ID},
+ {"ad5246", AD5246_ID},
+ {"ad5247", AD5247_ID},
+ {"ad5248", AD5248_ID},
+ {"ad5280", AD5280_ID},
+ {"ad5282", AD5282_ID},
+ {"adn2860", ADN2860_ID},
+ {"ad5273", AD5273_ID},
+ {"ad5171", AD5171_ID},
+ {"ad5170", AD5170_ID},
+ {"ad5172", AD5172_ID},
+ {"ad5173", AD5173_ID},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
+
+static struct i2c_driver ad_dpot_i2c_driver = {
+ .driver = {
+ .name = "ad_dpot",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad_dpot_i2c_probe,
+ .remove = __devexit_p(ad_dpot_i2c_remove),
+ .id_table = ad_dpot_id,
+};
+
+static int __init ad_dpot_i2c_init(void)
+{
+ return i2c_add_driver(&ad_dpot_i2c_driver);
+}
+module_init(ad_dpot_i2c_init);
+
+static void __exit ad_dpot_i2c_exit(void)
+{
+ i2c_del_driver(&ad_dpot_i2c_driver);
+}
+module_exit(ad_dpot_i2c_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("i2c:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
new file mode 100644
index 000000000000..b8c6df9c8437
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -0,0 +1,172 @@
+/*
+ * Driver for the Analog Devices digital potentiometers (SPI bus)
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "ad525x_dpot.h"
+
+static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
+ {.name = "ad5160", .devid = AD5160_ID},
+ {.name = "ad5161", .devid = AD5161_ID},
+ {.name = "ad5162", .devid = AD5162_ID},
+ {.name = "ad5165", .devid = AD5165_ID},
+ {.name = "ad5200", .devid = AD5200_ID},
+ {.name = "ad5201", .devid = AD5201_ID},
+ {.name = "ad5203", .devid = AD5203_ID},
+ {.name = "ad5204", .devid = AD5204_ID},
+ {.name = "ad5206", .devid = AD5206_ID},
+ {.name = "ad5207", .devid = AD5207_ID},
+ {.name = "ad5231", .devid = AD5231_ID},
+ {.name = "ad5232", .devid = AD5232_ID},
+ {.name = "ad5233", .devid = AD5233_ID},
+ {.name = "ad5235", .devid = AD5235_ID},
+ {.name = "ad5260", .devid = AD5260_ID},
+ {.name = "ad5262", .devid = AD5262_ID},
+ {.name = "ad5263", .devid = AD5263_ID},
+ {.name = "ad5290", .devid = AD5290_ID},
+ {.name = "ad5291", .devid = AD5291_ID},
+ {.name = "ad5292", .devid = AD5292_ID},
+ {.name = "ad5293", .devid = AD5293_ID},
+ {.name = "ad7376", .devid = AD7376_ID},
+ {.name = "ad8400", .devid = AD8400_ID},
+ {.name = "ad8402", .devid = AD8402_ID},
+ {.name = "ad8403", .devid = AD8403_ID},
+ {.name = "adn2850", .devid = ADN2850_ID},
+ {}
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* SPI bus functions */
+static int write8(void *client, u8 val)
+{
+ u8 data = val;
+ return spi_write(client, &data, 1);
+}
+
+static int write16(void *client, u8 reg, u8 val)
+{
+ u8 data[2] = {reg, val};
+ return spi_write(client, data, 1);
+}
+
+static int write24(void *client, u8 reg, u16 val)
+{
+ u8 data[3] = {reg, val >> 8, val};
+ return spi_write(client, data, 1);
+}
+
+static int read8(void *client)
+{
+ int ret;
+ u8 data;
+ ret = spi_read(client, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ return data;
+}
+
+static int read16(void *client, u8 reg)
+{
+ int ret;
+ u8 buf_rx[2];
+
+ write16(client, reg, 0);
+ ret = spi_read(client, buf_rx, 2);
+ if (ret < 0)
+ return ret;
+
+ return (buf_rx[0] << 8) | buf_rx[1];
+}
+
+static int read24(void *client, u8 reg)
+{
+ int ret;
+ u8 buf_rx[3];
+
+ write24(client, reg, 0);
+ ret = spi_read(client, buf_rx, 3);
+ if (ret < 0)
+ return ret;
+
+ return (buf_rx[1] << 8) | buf_rx[2];
+}
+
+static const struct ad_dpot_bus_ops bops = {
+ .read_d8 = read8,
+ .read_r8d8 = read16,
+ .read_r8d16 = read24,
+ .write_d8 = write8,
+ .write_r8d8 = write16,
+ .write_r8d16 = write24,
+};
+
+static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
+ char *name)
+{
+ while (id->name && id->name[0]) {
+ if (strcmp(name, id->name) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
+{
+ char *name = spi->dev.platform_data;
+ const struct ad_dpot_id *dpot_id;
+
+ struct ad_dpot_bus_data bdata = {
+ .client = spi,
+ .bops = &bops,
+ };
+
+ dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
+
+ if (dpot_id == NULL) {
+ dev_err(&spi->dev, "%s not in supported device list", name);
+ return -ENODEV;
+ }
+
+ return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
+}
+
+static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
+{
+ return ad_dpot_remove(&spi->dev);
+}
+
+static struct spi_driver ad_dpot_spi_driver = {
+ .driver = {
+ .name = "ad_dpot",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad_dpot_spi_probe,
+ .remove = __devexit_p(ad_dpot_spi_remove),
+};
+
+static int __init ad_dpot_spi_init(void)
+{
+ return spi_register_driver(&ad_dpot_spi_driver);
+}
+module_init(ad_dpot_spi_init);
+
+static void __exit ad_dpot_spi_exit(void)
+{
+ spi_unregister_driver(&ad_dpot_spi_driver);
+}
+module_exit(ad_dpot_spi_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 30a59f2bacd2..5e6fa8449e8b 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,6 +1,6 @@
/*
- * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers
- * Copyright (c) 2009 Analog Devices, Inc.
+ * ad525x_dpot: Driver for the Analog Devices digital potentiometers
+ * Copyright (c) 2009-2010 Analog Devices, Inc.
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* DEVID #Wipers #Positions Resistor Options (kOhm)
@@ -11,6 +11,47 @@
* AD5255 3 512 25, 250
* AD5253 4 64 1, 10, 50, 100
* AD5254 4 256 1, 10, 50, 100
+ * AD5160 1 256 5, 10, 50, 100
+ * AD5161 1 256 5, 10, 50, 100
+ * AD5162 2 256 2.5, 10, 50, 100
+ * AD5165 1 256 100
+ * AD5200 1 256 10, 50
+ * AD5201 1 33 10, 50
+ * AD5203 4 64 10, 100
+ * AD5204 4 256 10, 50, 100
+ * AD5206 6 256 10, 50, 100
+ * AD5207 2 256 10, 50, 100
+ * AD5231 1 1024 10, 50, 100
+ * AD5232 2 256 10, 50, 100
+ * AD5233 4 64 10, 50, 100
+ * AD5235 2 1024 25, 250
+ * AD5260 1 256 20, 50, 200
+ * AD5262 2 256 20, 50, 200
+ * AD5263 4 256 20, 50, 200
+ * AD5290 1 256 10, 50, 100
+ * AD5291 1 256 20
+ * AD5292 1 1024 20
+ * AD5293 1 1024 20
+ * AD7376 1 128 10, 50, 100, 1M
+ * AD8400 1 256 1, 10, 50, 100
+ * AD8402 2 256 1, 10, 50, 100
+ * AD8403 4 256 1, 10, 50, 100
+ * ADN2850 3 512 25, 250
+ * AD5241 1 256 10, 100, 1M
+ * AD5246 1 128 5, 10, 50, 100
+ * AD5247 1 128 5, 10, 50, 100
+ * AD5245 1 256 5, 10, 50, 100
+ * AD5243 2 256 2.5, 10, 50, 100
+ * AD5248 2 256 2.5, 10, 50, 100
+ * AD5242 2 256 20, 50, 200
+ * AD5280 1 256 20, 50, 200
+ * AD5282 2 256 20, 50, 200
+ * ADN2860 3 512 25, 250
+ * AD5273 1 64 1, 10, 50, 100 (OTP)
+ * AD5171 1 64 5, 10, 50, 100 (OTP)
+ * AD5170 1 256 2.5, 10, 50, 100 (OTP)
+ * AD5172 2 256 2.5, 10, 50, 100 (OTP)
+ * AD5173 2 256 2.5, 10, 50, 100 (OTP)
*
* See Documentation/misc-devices/ad525x_dpot.txt for more info.
*
@@ -28,77 +69,283 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/slab.h>
-#define DRIVER_NAME "ad525x_dpot"
-#define DRIVER_VERSION "0.1"
-
-enum dpot_devid {
- AD5258_ID,
- AD5259_ID,
- AD5251_ID,
- AD5252_ID,
- AD5253_ID,
- AD5254_ID,
- AD5255_ID,
-};
+#define DRIVER_VERSION "0.2"
-#define AD5258_MAX_POSITION 64
-#define AD5259_MAX_POSITION 256
-#define AD5251_MAX_POSITION 64
-#define AD5252_MAX_POSITION 256
-#define AD5253_MAX_POSITION 64
-#define AD5254_MAX_POSITION 256
-#define AD5255_MAX_POSITION 512
-
-#define AD525X_RDAC0 0
-#define AD525X_RDAC1 1
-#define AD525X_RDAC2 2
-#define AD525X_RDAC3 3
-
-#define AD525X_REG_TOL 0x18
-#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0)
-#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1)
-#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2)
-#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3)
-
-/* RDAC-to-EEPROM Interface Commands */
-#define AD525X_I2C_RDAC (0x00 << 5)
-#define AD525X_I2C_EEPROM (0x01 << 5)
-#define AD525X_I2C_CMD (0x80)
-
-#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3))
-#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3))
-#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3))
-#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3))
-
-static s32 ad525x_read(struct i2c_client *client, u8 reg);
-static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value);
+#include "ad525x_dpot.h"
/*
* Client data (each client gets its own)
*/
struct dpot_data {
+ struct ad_dpot_bus_data bdata;
struct mutex update_lock;
unsigned rdac_mask;
unsigned max_pos;
- unsigned devid;
+ unsigned long devid;
+ unsigned uid;
+ unsigned feat;
+ unsigned wipers;
+ u16 rdac_cache[MAX_RDACS];
+ DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
};
+static inline int dpot_read_d8(struct dpot_data *dpot)
+{
+ return dpot->bdata.bops->read_d8(dpot->bdata.client);
+}
+
+static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg)
+{
+ return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg);
+}
+
+static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg)
+{
+ return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg);
+}
+
+static inline int dpot_write_d8(struct dpot_data *dpot, u8 val)
+{
+ return dpot->bdata.bops->write_d8(dpot->bdata.client, val);
+}
+
+static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val)
+{
+ return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val);
+}
+
+static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
+{
+ return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val);
+}
+
+static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
+{
+ unsigned ctrl = 0;
+
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+
+ if (dpot->feat & F_RDACS_WONLY)
+ return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
+
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID) ||
+ dpot->uid == DPOT_UID(AD5293_ID))
+ return dpot_read_r8d8(dpot,
+ DPOT_AD5291_READ_RDAC << 2);
+
+ ctrl = DPOT_SPI_READ_RDAC;
+ } else if (reg & DPOT_ADDR_EEPROM) {
+ ctrl = DPOT_SPI_READ_EEPROM;
+ }
+
+ if (dpot->feat & F_SPI_16BIT)
+ return dpot_read_r8d8(dpot, ctrl);
+ else if (dpot->feat & F_SPI_24BIT)
+ return dpot_read_r8d16(dpot, ctrl);
+
+ return -EFAULT;
+}
+
+static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+{
+ unsigned ctrl = 0;
+ switch (dpot->uid) {
+ case DPOT_UID(AD5246_ID):
+ case DPOT_UID(AD5247_ID):
+ return dpot_read_d8(dpot);
+ case DPOT_UID(AD5245_ID):
+ case DPOT_UID(AD5241_ID):
+ case DPOT_UID(AD5242_ID):
+ case DPOT_UID(AD5243_ID):
+ case DPOT_UID(AD5248_ID):
+ case DPOT_UID(AD5280_ID):
+ case DPOT_UID(AD5282_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5291_RDAC_AB;
+ return dpot_read_r8d8(dpot, ctrl);
+ case DPOT_UID(AD5170_ID):
+ case DPOT_UID(AD5171_ID):
+ case DPOT_UID(AD5273_ID):
+ return dpot_read_d8(dpot);
+ case DPOT_UID(AD5172_ID):
+ case DPOT_UID(AD5173_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5272_3_A0;
+ return dpot_read_r8d8(dpot, ctrl);
+ default:
+ if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
+ return dpot_read_r8d16(dpot, (reg & 0xF8) |
+ ((reg & 0x7) << 1));
+ else
+ return dpot_read_r8d8(dpot, reg);
+ }
+}
+
+static s32 dpot_read(struct dpot_data *dpot, u8 reg)
+{
+ if (dpot->feat & F_SPI)
+ return dpot_read_spi(dpot, reg);
+ else
+ return dpot_read_i2c(dpot, reg);
+}
+
+static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ unsigned val = 0;
+
+ if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
+ if (dpot->feat & F_RDACS_WONLY)
+ dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
+
+ if (dpot->feat & F_AD_APPDATA) {
+ if (dpot->feat & F_SPI_8BIT) {
+ val = ((reg & DPOT_RDAC_MASK) <<
+ DPOT_MAX_POS(dpot->devid)) |
+ value;
+ return dpot_write_d8(dpot, val);
+ } else if (dpot->feat & F_SPI_16BIT) {
+ val = ((reg & DPOT_RDAC_MASK) <<
+ DPOT_MAX_POS(dpot->devid)) |
+ value;
+ return dpot_write_r8d8(dpot, val >> 8,
+ val & 0xFF);
+ } else
+ BUG();
+ } else {
+ if (dpot->uid == DPOT_UID(AD5291_ID) ||
+ dpot->uid == DPOT_UID(AD5292_ID) ||
+ dpot->uid == DPOT_UID(AD5293_ID))
+ return dpot_write_r8d8(dpot,
+ (DPOT_AD5291_RDAC << 2) |
+ (value >> 8), value & 0xFF);
+
+ val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
+ }
+ } else if (reg & DPOT_ADDR_EEPROM) {
+ val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK);
+ } else if (reg & DPOT_ADDR_CMD) {
+ switch (reg) {
+ case DPOT_DEC_ALL_6DB:
+ val = DPOT_SPI_DEC_ALL_6DB;
+ break;
+ case DPOT_INC_ALL_6DB:
+ val = DPOT_SPI_INC_ALL_6DB;
+ break;
+ case DPOT_DEC_ALL:
+ val = DPOT_SPI_DEC_ALL;
+ break;
+ case DPOT_INC_ALL:
+ val = DPOT_SPI_INC_ALL;
+ break;
+ }
+ } else
+ BUG();
+
+ if (dpot->feat & F_SPI_16BIT)
+ return dpot_write_r8d8(dpot, val, value);
+ else if (dpot->feat & F_SPI_24BIT)
+ return dpot_write_r8d16(dpot, val, value);
+
+ return -EFAULT;
+}
+
+static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ /* Only write the instruction byte for certain commands */
+ unsigned tmp = 0, ctrl = 0;
+
+ switch (dpot->uid) {
+ case DPOT_UID(AD5246_ID):
+ case DPOT_UID(AD5247_ID):
+ return dpot_write_d8(dpot, value);
+ break;
+
+ case DPOT_UID(AD5245_ID):
+ case DPOT_UID(AD5241_ID):
+ case DPOT_UID(AD5242_ID):
+ case DPOT_UID(AD5243_ID):
+ case DPOT_UID(AD5248_ID):
+ case DPOT_UID(AD5280_ID):
+ case DPOT_UID(AD5282_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5291_RDAC_AB;
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5171_ID):
+ case DPOT_UID(AD5273_ID):
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_d8(dpot);
+ if (tmp >> 6) /* Ready to Program? */
+ return -EFAULT;
+ ctrl = DPOT_AD5273_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5172_ID):
+ case DPOT_UID(AD5173_ID):
+ ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
+ 0 : DPOT_AD5272_3_A0;
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_r8d16(dpot, ctrl);
+ if (tmp >> 14) /* Ready to Program? */
+ return -EFAULT;
+ ctrl |= DPOT_AD5270_2_3_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ case DPOT_UID(AD5170_ID):
+ if (reg & DPOT_ADDR_OTP) {
+ tmp = dpot_read_r8d16(dpot, tmp);
+ if (tmp >> 14) /* Ready to Program? */
+ return -EFAULT;
+ ctrl = DPOT_AD5270_2_3_FUSE;
+ }
+ return dpot_write_r8d8(dpot, ctrl, value);
+ break;
+ default:
+ if (reg & DPOT_ADDR_CMD)
+ return dpot_write_d8(dpot, reg);
+
+ if (dpot->max_pos > 256)
+ return dpot_write_r8d16(dpot, (reg & 0xF8) |
+ ((reg & 0x7) << 1), value);
+ else
+ /* All other registers require instruction + data bytes */
+ return dpot_write_r8d8(dpot, reg, value);
+ }
+}
+
+
+static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
+{
+ if (dpot->feat & F_SPI)
+ return dpot_write_spi(dpot, reg, value);
+ else
+ return dpot_write_i2c(dpot, reg, value);
+}
+
/* sysfs functions */
static ssize_t sysfs_show_reg(struct device *dev,
- struct device_attribute *attr, char *buf, u32 reg)
+ struct device_attribute *attr,
+ char *buf, u32 reg)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct dpot_data *data = i2c_get_clientdata(client);
+ struct dpot_data *data = dev_get_drvdata(dev);
s32 value;
+ if (reg & DPOT_ADDR_OTP_EN)
+ return sprintf(buf, "%s\n",
+ test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
+ "enabled" : "disabled");
+
+
mutex_lock(&data->update_lock);
- value = ad525x_read(client, reg);
+ value = dpot_read(data, reg);
mutex_unlock(&data->update_lock);
if (value < 0)
@@ -111,7 +358,7 @@ static ssize_t sysfs_show_reg(struct device *dev,
* datasheet (Rev. A) for more details.
*/
- if (reg & AD525X_REG_TOL)
+ if (reg & DPOT_REG_TOL)
return sprintf(buf, "0x%04x\n", value & 0xFFFF);
else
return sprintf(buf, "%u\n", value & data->rdac_mask);
@@ -121,11 +368,23 @@ static ssize_t sysfs_set_reg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count, u32 reg)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct dpot_data *data = i2c_get_clientdata(client);
+ struct dpot_data *data = dev_get_drvdata(dev);
unsigned long value;
int err;
+ if (reg & DPOT_ADDR_OTP_EN) {
+ if (!strncmp(buf, "enabled", sizeof("enabled")))
+ set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+ else
+ clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
+
+ return count;
+ }
+
+ if ((reg & DPOT_ADDR_OTP) &&
+ !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))
+ return -EPERM;
+
err = strict_strtoul(buf, 10, &value);
if (err)
return err;
@@ -134,9 +393,11 @@ static ssize_t sysfs_set_reg(struct device *dev,
value = data->rdac_mask;
mutex_lock(&data->update_lock);
- ad525x_write(client, reg, value);
- if (reg & AD525X_I2C_EEPROM)
+ dpot_write(data, reg, value);
+ if (reg & DPOT_ADDR_EEPROM)
msleep(26); /* Sleep while the EEPROM updates */
+ else if (reg & DPOT_ADDR_OTP)
+ msleep(400); /* Sleep while the OTP updates */
mutex_unlock(&data->update_lock);
return count;
@@ -146,11 +407,10 @@ static ssize_t sysfs_do_cmd(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count, u32 reg)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct dpot_data *data = i2c_get_clientdata(client);
+ struct dpot_data *data = dev_get_drvdata(dev);
mutex_lock(&data->update_lock);
- ad525x_write(client, reg, 0);
+ dpot_write(data, reg, 0);
mutex_unlock(&data->update_lock);
return count;
@@ -158,244 +418,131 @@ static ssize_t sysfs_do_cmd(struct device *dev,
/* ------------------------------------------------------------------------- */
-static ssize_t show_rdac0(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0);
-}
-
-static ssize_t set_rdac0(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC0);
-}
-
-static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0);
-
-static ssize_t show_eeprom0(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0);
-}
-
-static ssize_t set_eeprom0(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC0);
-}
-
-static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0);
-
-static ssize_t show_tolerance0(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC0);
-}
-
-static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac1(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1);
-}
-
-static ssize_t set_rdac1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC1);
-}
-
-static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1);
-
-static ssize_t show_eeprom1(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1);
-}
-
-static ssize_t set_eeprom1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC1);
-}
-
-static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1);
-
-static ssize_t show_tolerance1(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC1);
-}
-
-static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2);
-}
-
-static ssize_t set_rdac2(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC2);
-}
-
-static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2);
-
-static ssize_t show_eeprom2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2);
-}
-
-static ssize_t set_eeprom2(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC2);
-}
-
-static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2);
-
-static ssize_t show_tolerance2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC2);
-}
-
-static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t show_rdac3(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3);
-}
-
-static ssize_t set_rdac3(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_RDAC | AD525X_RDAC3);
-}
-
-static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3);
-
-static ssize_t show_eeprom3(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3);
-}
-
-static ssize_t set_eeprom3(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_set_reg(dev, attr, buf, count,
- AD525X_I2C_EEPROM | AD525X_RDAC3);
-}
+#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \
+show_##_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sysfs_show_reg(dev, attr, buf, _reg); \
+}
+
+#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \
+set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ return sysfs_set_reg(dev, attr, buf, count, _reg); \
+}
+
+#define DPOT_DEVICE_SHOW_SET(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+DPOT_DEVICE_SET(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
+
+#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
+DPOT_DEVICE_SHOW(name, reg) \
+static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
+
+DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0);
+DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0);
+
+DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1);
+DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1);
+
+DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2);
+DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2);
+
+DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3);
+DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3);
+
+DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4);
+DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4);
+
+DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5);
+DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5);
+
+static const struct attribute *dpot_attrib_wipers[] = {
+ &dev_attr_rdac0.attr,
+ &dev_attr_rdac1.attr,
+ &dev_attr_rdac2.attr,
+ &dev_attr_rdac3.attr,
+ &dev_attr_rdac4.attr,
+ &dev_attr_rdac5.attr,
+ NULL
+};
-static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3);
+static const struct attribute *dpot_attrib_eeprom[] = {
+ &dev_attr_eeprom0.attr,
+ &dev_attr_eeprom1.attr,
+ &dev_attr_eeprom2.attr,
+ &dev_attr_eeprom3.attr,
+ &dev_attr_eeprom4.attr,
+ &dev_attr_eeprom5.attr,
+ NULL
+};
-static ssize_t show_tolerance3(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_show_reg(dev, attr, buf,
- AD525X_I2C_EEPROM | AD525X_TOL_RDAC3);
-}
+static const struct attribute *dpot_attrib_otp[] = {
+ &dev_attr_otp0.attr,
+ &dev_attr_otp1.attr,
+ &dev_attr_otp2.attr,
+ &dev_attr_otp3.attr,
+ &dev_attr_otp4.attr,
+ &dev_attr_otp5.attr,
+ NULL
+};
-static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL);
-
-static struct attribute *ad525x_attributes_wipers[4][4] = {
- {
- &dev_attr_rdac0.attr,
- &dev_attr_eeprom0.attr,
- &dev_attr_tolerance0.attr,
- NULL
- }, {
- &dev_attr_rdac1.attr,
- &dev_attr_eeprom1.attr,
- &dev_attr_tolerance1.attr,
- NULL
- }, {
- &dev_attr_rdac2.attr,
- &dev_attr_eeprom2.attr,
- &dev_attr_tolerance2.attr,
- NULL
- }, {
- &dev_attr_rdac3.attr,
- &dev_attr_eeprom3.attr,
- &dev_attr_tolerance3.attr,
- NULL
- }
+static const struct attribute *dpot_attrib_otp_en[] = {
+ &dev_attr_otp0en.attr,
+ &dev_attr_otp1en.attr,
+ &dev_attr_otp2en.attr,
+ &dev_attr_otp3en.attr,
+ &dev_attr_otp4en.attr,
+ &dev_attr_otp5en.attr,
+ NULL
};
-static const struct attribute_group ad525x_group_wipers[] = {
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]},
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]},
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]},
- {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]},
+static const struct attribute *dpot_attrib_tolerance[] = {
+ &dev_attr_tolerance0.attr,
+ &dev_attr_tolerance1.attr,
+ &dev_attr_tolerance2.attr,
+ &dev_attr_tolerance3.attr,
+ &dev_attr_tolerance4.attr,
+ &dev_attr_tolerance5.attr,
+ NULL
};
/* ------------------------------------------------------------------------- */
-static ssize_t set_inc_all(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL);
-}
+#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \
+set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
+} \
+static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
-static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all);
-
-static ssize_t set_dec_all(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL);
-}
-
-static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all);
-
-static ssize_t set_inc_all_6db(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB);
-}
-
-static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db);
-
-static ssize_t set_dec_all_6db(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB);
-}
-
-static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db);
+DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
+DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
+DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB);
+DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB);
static struct attribute *ad525x_attributes_commands[] = {
&dev_attr_inc_all.attr,
@@ -409,74 +556,56 @@ static const struct attribute_group ad525x_group_commands = {
.attrs = ad525x_attributes_commands,
};
-/* ------------------------------------------------------------------------- */
-
-/* i2c device functions */
+__devinit int ad_dpot_add_files(struct device *dev,
+ unsigned features, unsigned rdac)
+{
+ int err = sysfs_create_file(&dev->kobj,
+ dpot_attrib_wipers[rdac]);
+ if (features & F_CMD_EEP)
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_eeprom[rdac]);
+ if (features & F_CMD_TOL)
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_tolerance[rdac]);
+ if (features & F_CMD_OTP) {
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_otp_en[rdac]);
+ err |= sysfs_create_file(&dev->kobj,
+ dpot_attrib_otp[rdac]);
+ }
-/**
- * ad525x_read - return the value contained in the specified register
- * on the AD5258 device.
- * @client: value returned from i2c_new_device()
- * @reg: the register to read
- *
- * If the tolerance register is specified, 2 bytes are returned.
- * Otherwise, 1 byte is returned. A negative value indicates an error
- * occurred while reading the register.
- */
-static s32 ad525x_read(struct i2c_client *client, u8 reg)
-{
- struct dpot_data *data = i2c_get_clientdata(client);
+ if (err)
+ dev_err(dev, "failed to register sysfs hooks for RDAC%d\n",
+ rdac);
- if ((reg & AD525X_REG_TOL) || (data->max_pos > 256))
- return i2c_smbus_read_word_data(client, (reg & 0xF8) |
- ((reg & 0x7) << 1));
- else
- return i2c_smbus_read_byte_data(client, reg);
+ return err;
}
-/**
- * ad525x_write - store the given value in the specified register on
- * the AD5258 device.
- * @client: value returned from i2c_new_device()
- * @reg: the register to write
- * @value: the byte to store in the register
- *
- * For certain instructions that do not require a data byte, "NULL"
- * should be specified for the "value" parameter. These instructions
- * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM.
- *
- * A negative return value indicates an error occurred while reading
- * the register.
- */
-static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value)
-{
- struct dpot_data *data = i2c_get_clientdata(client);
-
- /* Only write the instruction byte for certain commands */
- if (reg & AD525X_I2C_CMD)
- return i2c_smbus_write_byte(client, reg);
-
- if (data->max_pos > 256)
- return i2c_smbus_write_word_data(client, (reg & 0xF8) |
- ((reg & 0x7) << 1), value);
- else
- /* All other registers require instruction + data bytes */
- return i2c_smbus_write_byte_data(client, reg, value);
+inline void ad_dpot_remove_files(struct device *dev,
+ unsigned features, unsigned rdac)
+{
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_wipers[rdac]);
+ if (features & F_CMD_EEP)
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_eeprom[rdac]);
+ if (features & F_CMD_TOL)
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_tolerance[rdac]);
+ if (features & F_CMD_OTP) {
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_otp_en[rdac]);
+ sysfs_remove_file(&dev->kobj,
+ dpot_attrib_otp[rdac]);
+ }
}
-static int ad525x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+__devinit int ad_dpot_probe(struct device *dev,
+ struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
{
- struct device *dev = &client->dev;
- struct dpot_data *data;
- int err = 0;
- dev_dbg(dev, "%s\n", __func__);
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
- dev_err(dev, "missing I2C functionality for this driver\n");
- goto exit;
- }
+ struct dpot_data *data;
+ int i, err = 0;
data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
if (!data) {
@@ -484,183 +613,74 @@ static int ad525x_probe(struct i2c_client *client,
goto exit;
}
- i2c_set_clientdata(client, data);
+ dev_set_drvdata(dev, data);
mutex_init(&data->update_lock);
- switch (id->driver_data) {
- case AD5258_ID:
- data->max_pos = AD5258_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- break;
- case AD5259_ID:
- data->max_pos = AD5259_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- break;
- case AD5251_ID:
- data->max_pos = AD5251_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5252_ID:
- data->max_pos = AD5252_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5253_ID:
- data->max_pos = AD5253_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5254_ID:
- data->max_pos = AD5254_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5255_ID:
- data->max_pos = AD5255_MAX_POSITION;
- err = sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- err |= sysfs_create_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
- break;
- default:
- err = -ENODEV;
- goto exit_free;
- }
+ data->bdata = *bdata;
+ data->devid = id->devid;
+
+ data->max_pos = 1 << DPOT_MAX_POS(data->devid);
+ data->rdac_mask = data->max_pos - 1;
+ data->feat = DPOT_FEAT(data->devid);
+ data->uid = DPOT_UID(data->devid);
+ data->wipers = DPOT_WIPERS(data->devid);
+
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i)) {
+ err = ad_dpot_add_files(dev, data->feat, i);
+ if (err)
+ goto exit_remove_files;
+ /* power-up midscale */
+ if (data->feat & F_RDACS_WONLY)
+ data->rdac_cache[i] = data->max_pos / 2;
+ }
+
+ if (data->feat & F_CMD_INC)
+ err = sysfs_create_group(&dev->kobj, &ad525x_group_commands);
if (err) {
dev_err(dev, "failed to register sysfs hooks\n");
goto exit_free;
}
- data->devid = id->driver_data;
- data->rdac_mask = data->max_pos - 1;
-
dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
id->name, data->max_pos);
return 0;
+exit_remove_files:
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i))
+ ad_dpot_remove_files(dev, data->feat, i);
+
exit_free:
kfree(data);
- i2c_set_clientdata(client, NULL);
+ dev_set_drvdata(dev, NULL);
exit:
- dev_err(dev, "failed to create client\n");
+ dev_err(dev, "failed to create client for %s ID 0x%lX\n",
+ id->name, id->devid);
return err;
}
+EXPORT_SYMBOL(ad_dpot_probe);
-static int __devexit ad525x_remove(struct i2c_client *client)
+__devexit int ad_dpot_remove(struct device *dev)
{
- struct dpot_data *data = i2c_get_clientdata(client);
- struct device *dev = &client->dev;
-
- switch (data->devid) {
- case AD5258_ID:
- case AD5259_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- break;
- case AD5251_ID:
- case AD5252_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5253_ID:
- case AD5254_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC3]);
- sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
- break;
- case AD5255_ID:
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC0]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC1]);
- sysfs_remove_group(&dev->kobj,
- &ad525x_group_wipers[AD525X_RDAC2]);
- sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
- break;
- }
+ struct dpot_data *data = dev_get_drvdata(dev);
+ int i;
+
+ for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
+ if (data->wipers & (1 << i))
+ ad_dpot_remove_files(dev, data->feat, i);
- i2c_set_clientdata(client, NULL);
kfree(data);
return 0;
}
+EXPORT_SYMBOL(ad_dpot_remove);
-static const struct i2c_device_id ad525x_idtable[] = {
- {"ad5258", AD5258_ID},
- {"ad5259", AD5259_ID},
- {"ad5251", AD5251_ID},
- {"ad5252", AD5252_ID},
- {"ad5253", AD5253_ID},
- {"ad5254", AD5254_ID},
- {"ad5255", AD5255_ID},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ad525x_idtable);
-
-static struct i2c_driver ad525x_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- },
- .id_table = ad525x_idtable,
- .probe = ad525x_probe,
- .remove = __devexit_p(ad525x_remove),
-};
-
-static int __init ad525x_init(void)
-{
- return i2c_add_driver(&ad525x_driver);
-}
-
-module_init(ad525x_init);
-
-static void __exit ad525x_exit(void)
-{
- i2c_del_driver(&ad525x_driver);
-}
-
-module_exit(ad525x_exit);
MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
- "Michael Hennerich <hennerich@blackfin.uclinux.org>, ");
-MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver");
+ "Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
new file mode 100644
index 000000000000..78b89fd2e2fd
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.h
@@ -0,0 +1,202 @@
+/*
+ * Driver for the Analog Devices digital potentiometers
+ *
+ * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _AD_DPOT_H_
+#define _AD_DPOT_H_
+
+#include <linux/types.h>
+
+#define DPOT_CONF(features, wipers, max_pos, uid) \
+ (((features) << 18) | (((wipers) & 0xFF) << 10) | \
+ ((max_pos & 0xF) << 6) | (uid & 0x3F))
+
+#define DPOT_UID(conf) (conf & 0x3F)
+#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF)
+#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF)
+#define DPOT_FEAT(conf) (conf >> 18)
+
+#define BRDAC0 (1 << 0)
+#define BRDAC1 (1 << 1)
+#define BRDAC2 (1 << 2)
+#define BRDAC3 (1 << 3)
+#define BRDAC4 (1 << 4)
+#define BRDAC5 (1 << 5)
+#define MAX_RDACS 6
+
+#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */
+#define F_CMD_EEP (1 << 1) /* Features EEPROM */
+#define F_CMD_OTP (1 << 2) /* Features OTP */
+#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */
+#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */
+#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */
+#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */
+#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */
+#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */
+#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */
+
+#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL)
+#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP)
+#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT)
+
+enum dpot_devid {
+ AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
+ AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
+ AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC3, 6, 2),
+ AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC3, 8, 3),
+ AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
+ AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5),
+ AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2, 9, 6),
+ AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 7), /* SPI */
+ AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 8),
+ AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 9),
+ AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 10),
+ AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 11),
+ AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 5, 12),
+ AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13),
+ AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14),
+ AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5,
+ 8, 15),
+ AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 16),
+ AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0, 10, 17),
+ AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 18),
+ AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19),
+ AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0 | BRDAC1, 10, 20),
+ AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 21),
+ AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 22),
+ AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
+ AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 24),
+ AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25),
+ AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26),
+ AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
+ AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 7, 28),
+ AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ BRDAC0, 8, 29),
+ AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1, 8, 30),
+ AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
+ BRDAC0 | BRDAC1 | BRDAC2, 8, 31),
+ ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
+ BRDAC0 | BRDAC1, 10, 32),
+ AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33),
+ AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34),
+ AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35),
+ AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36),
+ AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37),
+ AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38),
+ AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39),
+ AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40),
+ AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41),
+ ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
+ BRDAC0 | BRDAC1 | BRDAC2, 9, 42),
+ AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43),
+ AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44),
+ AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
+ AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
+ AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
+};
+
+#define DPOT_RDAC0 0
+#define DPOT_RDAC1 1
+#define DPOT_RDAC2 2
+#define DPOT_RDAC3 3
+#define DPOT_RDAC4 4
+#define DPOT_RDAC5 5
+
+#define DPOT_RDAC_MASK 0x1F
+
+#define DPOT_REG_TOL 0x18
+#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0)
+#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1)
+#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2)
+#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3)
+#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4)
+#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5)
+
+/* RDAC-to-EEPROM Interface Commands */
+#define DPOT_ADDR_RDAC (0x0 << 5)
+#define DPOT_ADDR_EEPROM (0x1 << 5)
+#define DPOT_ADDR_OTP (0x1 << 6)
+#define DPOT_ADDR_CMD (0x1 << 7)
+#define DPOT_ADDR_OTP_EN (0x1 << 9)
+
+#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3))
+#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3))
+#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3))
+#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3))
+
+#define DPOT_SPI_RDAC 0xB0
+#define DPOT_SPI_EEPROM 0x30
+#define DPOT_SPI_READ_RDAC 0xA0
+#define DPOT_SPI_READ_EEPROM 0x90
+#define DPOT_SPI_DEC_ALL_6DB 0x50
+#define DPOT_SPI_INC_ALL_6DB 0xD0
+#define DPOT_SPI_DEC_ALL 0x70
+#define DPOT_SPI_INC_ALL 0xF0
+
+/* AD5291/2/3 use special commands */
+#define DPOT_AD5291_RDAC 0x01
+#define DPOT_AD5291_READ_RDAC 0x02
+
+/* AD524x use special commands */
+#define DPOT_AD5291_RDAC_AB 0x80
+
+#define DPOT_AD5273_FUSE 0x80
+#define DPOT_AD5270_2_3_FUSE 0x20
+#define DPOT_AD5270_2_3_OW 0x08
+#define DPOT_AD5272_3_A0 0x08
+#define DPOT_AD5270_2FUSE 0x80
+
+struct dpot_data;
+
+struct ad_dpot_bus_ops {
+ int (*read_d8) (void *client);
+ int (*read_r8d8) (void *client, u8 reg);
+ int (*read_r8d16) (void *client, u8 reg);
+ int (*write_d8) (void *client, u8 val);
+ int (*write_r8d8) (void *client, u8 reg, u8 val);
+ int (*write_r8d16) (void *client, u8 reg, u16 val);
+};
+
+struct ad_dpot_bus_data {
+ void *client;
+ const struct ad_dpot_bus_ops *bops;
+};
+
+struct ad_dpot_id {
+ char *name;
+ unsigned long devid;
+};
+
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
+int ad_dpot_remove(struct device *dev);
+
+#endif
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index ed090e77c9cd..19fc7c1cb428 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -707,7 +707,7 @@ static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
return nread;
}
-static ssize_t c2port_read_flash_data(struct kobject *kobj,
+static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
@@ -824,7 +824,7 @@ static ssize_t __c2port_write_flash_data(struct c2port_device *dev,
return nwrite;
}
-static ssize_t c2port_write_flash_data(struct kobject *kobj,
+static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 9197cfc55015..a513f0aa6432 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -140,7 +140,8 @@ static const struct attribute_group ds1682_group = {
/*
* User data attribute
*/
-static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
@@ -163,7 +164,8 @@ static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *at
return count;
}
-static ssize_t ds1682_eeprom_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db7d0f21b65d..f7ca3a42b490 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -54,7 +54,7 @@
struct at24_data {
struct at24_platform_data chip;
struct memory_accessor macc;
- bool use_smbus;
+ int use_smbus;
/*
* Lock protects against activities from other Linux tasks,
@@ -184,11 +184,19 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
if (count > io_limit)
count = io_limit;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
/* Smaller eeproms can work given some SMBus extension calls */
if (count > I2C_SMBUS_BLOCK_MAX)
count = I2C_SMBUS_BLOCK_MAX;
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ count = 2;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ count = 1;
+ break;
+ default:
/*
* When we have a better choice than SMBus calls, use a
* combined I2C message. Write address; then read up to
@@ -219,10 +227,27 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
timeout = jiffies + msecs_to_jiffies(write_timeout);
do {
read_time = jiffies;
- if (at24->use_smbus) {
+ switch (at24->use_smbus) {
+ case I2C_SMBUS_I2C_BLOCK_DATA:
status = i2c_smbus_read_i2c_block_data(client, offset,
count, buf);
- } else {
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ status = i2c_smbus_read_word_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status & 0xff;
+ buf[1] = status >> 8;
+ status = count;
+ }
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ status = i2c_smbus_read_byte_data(client, offset);
+ if (status >= 0) {
+ buf[0] = status;
+ status = count;
+ }
+ break;
+ default:
status = i2c_transfer(client->adapter, msg, 2);
if (status == 2)
status = count;
@@ -274,7 +299,8 @@ static ssize_t at24_read(struct at24_data *at24,
return retval;
}
-static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
@@ -395,7 +421,8 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
return retval;
}
-static ssize_t at24_bin_write(struct kobject *kobj, struct bin_attribute *attr,
+static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct at24_data *at24;
@@ -434,7 +461,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct at24_platform_data chip;
bool writable;
- bool use_smbus = false;
+ int use_smbus = 0;
struct at24_data *at24;
int err;
unsigned i, num_addresses;
@@ -475,12 +502,19 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
err = -EPFNOSUPPORT;
goto err_out;
}
- if (!i2c_check_functionality(client->adapter,
+ if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ use_smbus = I2C_SMBUS_I2C_BLOCK_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ use_smbus = I2C_SMBUS_WORD_DATA;
+ } else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
+ use_smbus = I2C_SMBUS_BYTE_DATA;
+ } else {
err = -EPFNOSUPPORT;
goto err_out;
}
- use_smbus = true;
}
if (chip.flags & AT24_FLAG_TAKE8ADDR)
@@ -566,11 +600,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
at24->bin.size, client->name,
writable ? "(writable)" : "(read-only)");
+ if (use_smbus == I2C_SMBUS_WORD_DATA ||
+ use_smbus == I2C_SMBUS_BYTE_DATA) {
+ dev_notice(&client->dev, "Falling back to %s reads, "
+ "performance will suffer\n", use_smbus ==
+ I2C_SMBUS_WORD_DATA ? "word" : "byte");
+ }
dev_dbg(&client->dev,
- "page_size %d, num_addresses %d, write_max %d%s\n",
+ "page_size %d, num_addresses %d, write_max %d, use_smbus %d\n",
chip.page_size, num_addresses,
- at24->write_max,
- use_smbus ? ", use_smbus" : "");
+ at24->write_max, use_smbus);
/* export data to kernel code */
if (chip.setup)
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index d194212a41f6..c627e4174ccd 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -126,7 +126,8 @@ at25_ee_read(
}
static ssize_t
-at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
@@ -253,7 +254,8 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
}
static ssize_t
-at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+at25_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev;
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index f939ebc2507c..45060ddc4e59 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -1,24 +1,20 @@
/*
- Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
- Philip Edelbrock <phil@netroedge.com>
- Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
- Copyright (C) 2003 IBM Corp.
- Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
+ * Philip Edelbrock <phil@netroedge.com>
+ * Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2003 IBM Corp.
+ * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#include <linux/kernel.h>
#include <linux/init.h>
@@ -85,7 +81,8 @@ exit:
mutex_unlock(&data->update_lock);
}
-static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 15b1780025c8..7b33de95c4bf 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -1,27 +1,20 @@
/*
- Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
- <http://rt2x00.serialmonkey.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- Module: eeprom_93cx6
- Abstract: EEPROM reader routines for 93cx6 chipsets.
- Supported chipsets: 93c46 & 93c66.
+ * Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+ * <http://rt2x00.serialmonkey.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Module: eeprom_93cx6
+ * Abstract: EEPROM reader routines for 93cx6 chipsets.
+ * Supported chipsets: 93c46 & 93c66.
*/
#include <linux/kernel.h>
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 5a6b2bce8ad5..5653a3ce0517 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -1,30 +1,30 @@
/*
- max6875.c - driver for MAX6874/MAX6875
-
- Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
-
- Based on eeprom.c
-
- The MAX6875 has a bank of registers and two banks of EEPROM.
- Address ranges are defined as follows:
- * 0x0000 - 0x0046 = configuration registers
- * 0x8000 - 0x8046 = configuration EEPROM
- * 0x8100 - 0x82FF = user EEPROM
-
- This driver makes the user EEPROM available for read.
-
- The registers & config EEPROM should be accessed via i2c-dev.
-
- The MAX6875 ignores the lowest address bit, so each chip responds to
- two addresses - 0x50/0x51 and 0x52/0x53.
-
- Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
- address, so this driver is destructive if loaded for the wrong EEPROM chip.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-*/
+ * max6875.c - driver for MAX6874/MAX6875
+ *
+ * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
+ *
+ * Based on eeprom.c
+ *
+ * The MAX6875 has a bank of registers and two banks of EEPROM.
+ * Address ranges are defined as follows:
+ * * 0x0000 - 0x0046 = configuration registers
+ * * 0x8000 - 0x8046 = configuration EEPROM
+ * * 0x8100 - 0x82FF = user EEPROM
+ *
+ * This driver makes the user EEPROM available for read.
+ *
+ * The registers & config EEPROM should be accessed via i2c-dev.
+ *
+ * The MAX6875 ignores the lowest address bit, so each chip responds to
+ * two addresses - 0x50/0x51 and 0x52/0x53.
+ *
+ * Note that the MAX6875 uses i2c_smbus_write_byte_data() to set the read
+ * address, so this driver is destructive if loaded for the wrong EEPROM chip.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
#include <linux/kernel.h>
#include <linux/init.h>
@@ -107,7 +107,7 @@ exit_up:
mutex_unlock(&data->update_lock);
}
-static ssize_t max6875_read(struct kobject *kobj,
+static ssize_t max6875_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
diff --git a/drivers/misc/hdpuftrs/Makefile b/drivers/misc/hdpuftrs/Makefile
deleted file mode 100644
index ac74ae679230..000000000000
--- a/drivers/misc/hdpuftrs/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_HDPU_FEATURES) := hdpu_cpustate.o hdpu_nexus.o
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
deleted file mode 100644
index 176fe4e09d3f..000000000000
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Sky CPU State Driver
- *
- * Copyright (C) 2002 Brian Waite
- *
- * This driver allows use of the CPU state bits
- * It exports the /dev/sky_cpustate and also
- * /proc/sky_cpustate pseudo-file for status information.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
-#include <linux/miscdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <asm/uaccess.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-#define SKY_CPUSTATE_VERSION "1.1"
-
-static int hdpu_cpustate_probe(struct platform_device *pdev);
-static int hdpu_cpustate_remove(struct platform_device *pdev);
-
-static unsigned char cpustate_get_state(void);
-static int cpustate_proc_open(struct inode *inode, struct file *file);
-static int cpustate_proc_read(struct seq_file *seq, void *offset);
-
-static struct cpustate_t cpustate;
-
-static const struct file_operations proc_cpustate = {
- .open = cpustate_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int cpustate_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cpustate_proc_read, NULL);
-}
-
-static int cpustate_proc_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "CPU State: %04x\n", cpustate_get_state());
- return 0;
-}
-
-static int cpustate_get_ref(int excl)
-{
-
- int retval = -EBUSY;
-
- spin_lock(&cpustate.lock);
-
- if (cpustate.excl)
- goto out_busy;
-
- if (excl) {
- if (cpustate.open_count)
- goto out_busy;
- cpustate.excl = 1;
- }
-
- cpustate.open_count++;
- retval = 0;
-
- out_busy:
- spin_unlock(&cpustate.lock);
- return retval;
-}
-
-static int cpustate_free_ref(void)
-{
-
- spin_lock(&cpustate.lock);
-
- cpustate.excl = 0;
- cpustate.open_count--;
-
- spin_unlock(&cpustate.lock);
- return 0;
-}
-
-static unsigned char cpustate_get_state(void)
-{
-
- return cpustate.cached_val;
-}
-
-static void cpustate_set_state(unsigned char new_state)
-{
- unsigned int state = (new_state << 21);
-
-#ifdef DEBUG_CPUSTATE
- printk("CPUSTATE -> 0x%x\n", new_state);
-#endif
- spin_lock(&cpustate.lock);
- cpustate.cached_val = new_state;
- writel((0xff << 21), cpustate.clr_addr);
- writel(state, cpustate.set_addr);
- spin_unlock(&cpustate.lock);
-}
-
-/*
- * Now all the various file operations that we export.
- */
-
-static ssize_t cpustate_read(struct file *file, char *buf,
- size_t count, loff_t * ppos)
-{
- unsigned char data;
-
- if (count < 0)
- return -EFAULT;
- if (count == 0)
- return 0;
-
- data = cpustate_get_state();
- if (copy_to_user(buf, &data, sizeof(unsigned char)))
- return -EFAULT;
- return sizeof(unsigned char);
-}
-
-static ssize_t cpustate_write(struct file *file, const char *buf,
- size_t count, loff_t * ppos)
-{
- unsigned char data;
-
- if (count < 0)
- return -EFAULT;
-
- if (count == 0)
- return 0;
-
- if (copy_from_user((unsigned char *)&data, buf, sizeof(unsigned char)))
- return -EFAULT;
-
- cpustate_set_state(data);
- return sizeof(unsigned char);
-}
-
-static int cpustate_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- lock_kernel();
- ret = cpustate_get_ref((file->f_flags & O_EXCL));
- unlock_kernel();
-
- return ret;
-}
-
-static int cpustate_release(struct inode *inode, struct file *file)
-{
- return cpustate_free_ref();
-}
-
-static struct platform_driver hdpu_cpustate_driver = {
- .probe = hdpu_cpustate_probe,
- .remove = hdpu_cpustate_remove,
- .driver = {
- .name = HDPU_CPUSTATE_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-/*
- * The various file operations we support.
- */
-static const struct file_operations cpustate_fops = {
- .owner = THIS_MODULE,
- .open = cpustate_open,
- .release = cpustate_release,
- .read = cpustate_read,
- .write = cpustate_write,
- .llseek = no_llseek,
-};
-
-static struct miscdevice cpustate_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sky_cpustate",
- .fops = &cpustate_fops,
-};
-
-static int hdpu_cpustate_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct proc_dir_entry *proc_de;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- printk(KERN_ERR "sky_cpustate: "
- "Invalid memory resource.\n");
- return -EINVAL;
- }
- cpustate.set_addr = (unsigned long *)res->start;
- cpustate.clr_addr = (unsigned long *)res->end - 1;
-
- ret = misc_register(&cpustate_dev);
- if (ret) {
- printk(KERN_WARNING "sky_cpustate: "
- "Unable to register misc device.\n");
- cpustate.set_addr = NULL;
- cpustate.clr_addr = NULL;
- return ret;
- }
-
- proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
- if (!proc_de) {
- printk(KERN_WARNING "sky_cpustate: "
- "Unable to create proc entry\n");
- }
-
- printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
- return 0;
-}
-
-static int hdpu_cpustate_remove(struct platform_device *pdev)
-{
- cpustate.set_addr = NULL;
- cpustate.clr_addr = NULL;
-
- remove_proc_entry("sky_cpustate", NULL);
- misc_deregister(&cpustate_dev);
-
- return 0;
-}
-
-static int __init cpustate_init(void)
-{
- return platform_driver_register(&hdpu_cpustate_driver);
-}
-
-static void __exit cpustate_exit(void)
-{
- platform_driver_unregister(&hdpu_cpustate_driver);
-}
-
-module_init(cpustate_init);
-module_exit(cpustate_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_CPUSTATE_NAME);
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
deleted file mode 100644
index ce39fa54949b..000000000000
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Sky Nexus Register Driver
- *
- * Copyright (C) 2002 Brian Waite
- *
- * This driver allows reading the Nexus register
- * It exports the /proc/sky_chassis_id and also
- * /proc/sky_slot_id pseudo-file for status information.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/hdpu_features.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <asm/io.h>
-
-static int hdpu_nexus_probe(struct platform_device *pdev);
-static int hdpu_nexus_remove(struct platform_device *pdev);
-static int hdpu_slot_id_open(struct inode *inode, struct file *file);
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset);
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file);
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset);
-
-static struct proc_dir_entry *hdpu_slot_id;
-static struct proc_dir_entry *hdpu_chassis_id;
-static int slot_id = -1;
-static int chassis_id = -1;
-
-static const struct file_operations proc_slot_id = {
- .open = hdpu_slot_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static const struct file_operations proc_chassis_id = {
- .open = hdpu_chassis_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static struct platform_driver hdpu_nexus_driver = {
- .probe = hdpu_nexus_probe,
- .remove = hdpu_nexus_remove,
- .driver = {
- .name = HDPU_NEXUS_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int hdpu_slot_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hdpu_slot_id_read, NULL);
-}
-
-static int hdpu_slot_id_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%d\n", slot_id);
- return 0;
-}
-
-static int hdpu_chassis_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hdpu_chassis_id_read, NULL);
-}
-
-static int hdpu_chassis_id_read(struct seq_file *seq, void *offset)
-{
- seq_printf(seq, "%d\n", chassis_id);
- return 0;
-}
-
-static int hdpu_nexus_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int *nexus_id_addr;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- printk(KERN_ERR "sky_nexus: "
- "Invalid memory resource.\n");
- return -EINVAL;
- }
- nexus_id_addr = ioremap(res->start,
- (unsigned long)(res->end - res->start));
- if (nexus_id_addr) {
- slot_id = (*nexus_id_addr >> 8) & 0x1f;
- chassis_id = *nexus_id_addr & 0xff;
- iounmap(nexus_id_addr);
- } else {
- printk(KERN_ERR "sky_nexus: Could not map slot id\n");
- }
-
- hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
- if (!hdpu_slot_id) {
- printk(KERN_WARNING "sky_nexus: "
- "Unable to create proc dir entry: sky_slot_id\n");
- }
-
- hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
- &proc_chassis_id);
- if (!hdpu_chassis_id)
- printk(KERN_WARNING "sky_nexus: "
- "Unable to create proc dir entry: sky_chassis_id\n");
-
- return 0;
-}
-
-static int hdpu_nexus_remove(struct platform_device *pdev)
-{
- slot_id = -1;
- chassis_id = -1;
-
- remove_proc_entry("sky_slot_id", NULL);
- remove_proc_entry("sky_chassis_id", NULL);
-
- hdpu_slot_id = 0;
- hdpu_chassis_id = 0;
-
- return 0;
-}
-
-static int __init nexus_init(void)
-{
- return platform_driver_register(&hdpu_nexus_driver);
-}
-
-static void __exit nexus_exit(void)
-{
- platform_driver_unregister(&hdpu_nexus_driver);
-}
-
-module_init(nexus_init);
-module_exit(nexus_exit);
-
-MODULE_AUTHOR("Brian Waite");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" HDPU_NEXUS_NAME);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a991161f0a..5bfb2a2041b8 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -75,6 +75,9 @@ enum ctype {
UNALIGNED_LOAD_STORE_WRITE,
OVERWRITE_ALLOCATION,
WRITE_AFTER_FREE,
+ SOFTLOCKUP,
+ HARDLOCKUP,
+ HUNG_TASK,
};
static char* cp_name[] = {
@@ -99,6 +102,9 @@ static char* cp_type[] = {
"UNALIGNED_LOAD_STORE_WRITE",
"OVERWRITE_ALLOCATION",
"WRITE_AFTER_FREE",
+ "SOFTLOCKUP",
+ "HARDLOCKUP",
+ "HUNG_TASK",
};
static struct jprobe lkdtm;
@@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which)
memset(data, 0x78, len);
break;
}
+ case SOFTLOCKUP:
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+ break;
+ case HARDLOCKUP:
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+ break;
+ case HUNG_TASK:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ break;
case NONE:
default:
break;
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
index e7161c4e3798..db9cd0240c6f 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmware_balloon.c
@@ -41,7 +41,7 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <asm/vmware.h>
+#include <asm/hypervisor.h>
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
@@ -767,7 +767,7 @@ static int __init vmballoon_init(void)
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
*/
- if (!vmware_platform())
+ if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
vmballoon_wq = create_freezeable_workqueue("vmmemctl");
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd616b2..569e94da844c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep);
/**
* mmc_suspend_host - suspend a host
* @host: mmc host
- * @state: suspend mode (PM_SUSPEND_xxx)
*/
-int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
+int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080d44b0..63772e7e7608 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
* we cannot use the retries field in mmc_command.
*/
for (i = 0;i <= retries;i++) {
- memset(&mrq, 0, sizeof(struct mmc_request));
-
err = mmc_app_cmd(host, card);
if (err) {
/* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2dd4cfe7ca17..b9dee28ee7d0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -296,6 +296,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
card->type = MMC_TYPE_SDIO;
/*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
+ /*
* For native busses: set card RCA and quit open drain mode.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c71355..0f687cdeb064 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
EXPORT_SYMBOL_GPL(sdio_writeb);
/**
+ * sdio_writeb_readb - write and read a byte from SDIO function
+ * @func: SDIO function to access
+ * @write_byte: byte to write
+ * @addr: address to write to
+ * @err_ret: optional status value from transfer
+ *
+ * Performs a RAW (Read after Write) operation as defined by SDIO spec -
+ * single byte is written to address space of a given SDIO function and
+ * response is read back from the same address, both using single request.
+ * If there is a problem with the operation, 0xff is returned and
+ * @err_ret will contain the error code.
+ */
+u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
+ unsigned int addr, int *err_ret)
+{
+ int ret;
+ u8 val;
+
+ ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
+ write_byte, &val);
+ if (err_ret)
+ *err_ret = ret;
+ if (ret)
+ val = 0xff;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_writeb_readb);
+
+/**
* sdio_memcpy_fromio - read a chunk of memory from a SDIO function
* @func: SDIO function to access
* @dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94769fd..e171e77f6129 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@ config MMC_SDHCI_S3C
If unsure, say N.
+config MMC_SDHCI_SPEAR
+ tristate "SDHCI support on ST SPEAr platform"
+ depends on MMC_SDHCI && PLAT_SPEAR
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the ST SPEAR range
+ of SoC
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_S3C_DMA
bool "DMA support on S3C SDHCI"
depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
depends on SDH_BFIN
help
If you say yes here SD-Cards may work on the EZkit.
+
+config MMC_SH_MMCIF
+ tristate "SuperH Internal MMCIF support"
+ depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
+ help
+ This selects the MMC Host Interface controler (MMCIF).
+
+ This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4803977dfce..e30c2ee48894 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
+obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index a6dd7da37357..5f3a599ead07 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -314,8 +314,8 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
dmabuf = (unsigned *)tmpv;
}
+ flush_kernel_dcache_page(sg_page(sg));
kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
- dmac_flush_range((void *)sgbuffer, ((void *)sgbuffer) + amount);
data->bytes_xfered += amount;
if (size == 0)
break;
@@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
enable_irq_wake(host->board->det_pin);
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 88be37d9e9a5..95ef864ad8f9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@ struct atmel_mci {
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
* @sdc_reg: Value of SDCR to be written before using this slot.
+ * @sdio_irq: SDIO irq mask for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
* @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@ struct atmel_mci_slot {
struct atmel_mci *host;
u32 sdc_reg;
+ u32 sdio_irq;
struct mmc_request *mrq;
struct list_head queue_node;
@@ -266,7 +268,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
cmd->opcode, cmd->arg, cmd->flags,
cmd->resp[0], cmd->resp[1], cmd->resp[2],
- cmd->resp[2], cmd->error);
+ cmd->resp[3], cmd->error);
if (data)
seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
data->bytes_xfered, data->blocks,
@@ -276,7 +278,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
stop->opcode, stop->arg, stop->flags,
stop->resp[0], stop->resp[1], stop->resp[2],
- stop->resp[2], stop->error);
+ stop->resp[3], stop->error);
}
spin_unlock_bh(&slot->host->lock);
@@ -569,9 +571,10 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
{
struct mmc_data *data = host->data;
- dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
- ((data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ if (data)
+ dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
static void atmci_stop_dma(struct atmel_mci *host)
@@ -579,7 +582,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan;
if (chan) {
- chan->device->device_terminate_all(chan);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
@@ -791,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host,
mci_writel(host, SDCR, slot->sdc_reg);
iflags = mci_readl(host, IMR);
- if (iflags)
+ if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
@@ -951,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (mci_has_rwproof())
host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
- if (list_empty(&host->queue))
+ if (atmci_is_mci2()) {
+ /* setup High Speed mode in relation with card capacity */
+ if (ios->timing == MMC_TIMING_SD_HS)
+ host->cfg_reg |= MCI_CFG_HSMODE;
+ else
+ host->cfg_reg &= ~MCI_CFG_HSMODE;
+ }
+
+ if (list_empty(&host->queue)) {
mci_writel(host, MR, host->mode_reg);
- else
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
+ } else {
host->need_clock_update = true;
+ }
spin_unlock_bh(&host->lock);
} else {
@@ -1029,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc)
return present;
}
+static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct atmel_mci_slot *slot = mmc_priv(mmc);
+ struct atmel_mci *host = slot->host;
+
+ if (enable)
+ mci_writel(host, IER, slot->sdio_irq);
+ else
+ mci_writel(host, IDR, slot->sdio_irq);
+}
+
static const struct mmc_host_ops atmci_ops = {
.request = atmci_request,
.set_ios = atmci_set_ios,
.get_ro = atmci_get_ro,
.get_cd = atmci_get_cd,
+ .enable_sdio_irq = atmci_enable_sdio_irq,
};
/* Called with host->lock held */
@@ -1051,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
* necessary if set_ios() is called when a different slot is
* busy transfering data.
*/
- if (host->need_clock_update)
+ if (host->need_clock_update) {
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
+ }
host->cur_slot->mrq = NULL;
host->mrq = NULL;
@@ -1099,8 +1128,8 @@ static void atmci_command_complete(struct atmel_mci *host,
"command error: status=0x%08x\n", status);
if (cmd->data) {
- host->data = NULL;
atmci_stop_dma(host);
+ host->data = NULL;
mci_writel(host, IDR, MCI_NOTBUSY
| MCI_TXRDY | MCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
@@ -1293,6 +1322,7 @@ static void atmci_tasklet_func(unsigned long priv)
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
+ mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS);
}
if (!data->stop) {
@@ -1481,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
tasklet_schedule(&host->tasklet);
}
+static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
+{
+ int i;
+
+ for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ struct atmel_mci_slot *slot = host->slot[i];
+ if (slot && (status & slot->sdio_irq)) {
+ mmc_signal_sdio_irq(slot->mmc);
+ }
+ }
+}
+
+
static irqreturn_t atmci_interrupt(int irq, void *dev_id)
{
struct atmel_mci *host = dev_id;
@@ -1520,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
if (pending & MCI_CMDRDY)
atmci_cmd_interrupt(host, status);
+
+ if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
+ atmci_sdio_interrupt(host, status);
+
} while (pass_count++ < 5);
return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1542,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
static int __init atmci_init_slot(struct atmel_mci *host,
struct mci_slot_pdata *slot_data, unsigned int id,
- u32 sdc_reg)
+ u32 sdc_reg, u32 sdio_irq)
{
struct mmc_host *mmc;
struct atmel_mci_slot *slot;
@@ -1558,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host,
slot->wp_pin = slot_data->wp_pin;
slot->detect_is_active_high = slot_data->detect_is_active_high;
slot->sdc_reg = sdc_reg;
+ slot->sdio_irq = sdio_irq;
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
mmc->f_max = host->bus_hz / 2;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ if (sdio_irq)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ if (atmci_is_mci2())
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
if (slot_data->bus_width >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
@@ -1751,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev)
ret = -ENODEV;
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
- MCI_SDCSEL_SLOT_A, 0);
+ 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
if (!ret)
nr_slots++;
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
- MCI_SDCSEL_SLOT_B, 1);
+ 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
if (!ret)
nr_slots++;
}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5834449400e..c8da5d30a861 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
struct au1xmmc_host *host = platform_get_drvdata(pdev);
int ret;
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
if (ret)
return ret;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e844072c..4b0e677d7295 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7417c..ca3bdc831900 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
int err;
- err = mmc_suspend_host(mmc, state);
+ err = mmc_suspend_host(mmc);
if (err)
return err;
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..33d9f1b00862 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
/*
* One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
- * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
+ * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
* for drivers with max_hw_segs == 1, making the segments bigger (64KB)
- * than the page or two that's otherwise typical. NR_SG == 16 gives at
- * least the same throughput boost, using EDMA transfer linkage instead
- * of spending CPU time copying pages.
+ * than the page or two that's otherwise typical. nr_sg (passed from
+ * platform data) == 16 gives at least the same throughput boost, using
+ * EDMA transfer linkage instead of spending CPU time copying pages.
*/
#define MAX_CCNT ((1 << 16) - 1)
-#define NR_SG 16
+#define MAX_NR_SG 16
static unsigned rw_threshold = 32;
module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
#define DAVINCI_MMC_DATADIR_READ 1
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
+ unsigned char suspended;
/* buffer is used during PIO of one scatterlist segment, and
* is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
struct edmacc_param tx_template;
struct edmacc_param rx_template;
unsigned n_link;
- u32 links[NR_SG - 1];
+ u32 links[MAX_NR_SG - 1];
/* For PIO we walk scatterlists one segment at a time. */
unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
u8 version;
/* for ns in one cycle calculation */
unsigned ns_in_one_cycle;
+ /* Number of sg segments */
+ u8 nr_sg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
+ u32 link_size;
int r, i;
/* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
/* Allocate parameter RAM slots, which will later be bound to a
* channel as needed to handle a scatterlist.
*/
- for (i = 0; i < ARRAY_SIZE(host->links); i++) {
+ link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
+ for (i = 0; i < link_size; i++) {
r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
if (r < 0) {
dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
}
}
-static void
-davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
+ int val)
{
u32 temp;
- /* reset command and data state machines */
temp = readl(host->base + DAVINCI_MMCCTL);
- writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
- host->base + DAVINCI_MMCCTL);
+ if (val) /* reset */
+ temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
+ else /* enable */
+ temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
- temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
- udelay(10);
writel(temp, host->base + DAVINCI_MMCCTL);
+ udelay(10);
+}
+
+static void
+davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ mmc_davinci_reset_ctrl(host, 1);
+ mmc_davinci_reset_ctrl(host, 0);
}
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
#endif
static void __init init_mmcsd_host(struct mmc_davinci_host *host)
{
- /* DAT line portion is diabled and in reset state */
- writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
- host->base + DAVINCI_MMCCTL);
-
- /* CMD line portion is diabled and in reset state */
- writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
- host->base + DAVINCI_MMCCTL);
- udelay(10);
+ mmc_davinci_reset_ctrl(host, 1);
writel(0, host->base + DAVINCI_MMCCLK);
writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
writel(0x1FFF, host->base + DAVINCI_MMCTOR);
writel(0xFFFF, host->base + DAVINCI_MMCTOD);
- writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
- host->base + DAVINCI_MMCCTL);
- writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
- host->base + DAVINCI_MMCCTL);
-
- udelay(10);
+ mmc_davinci_reset_ctrl(host, 0);
}
static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
init_mmcsd_host(host);
+ if (pdata->nr_sg)
+ host->nr_sg = pdata->nr_sg - 1;
+
+ if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
+ host->nr_sg = MAX_NR_SG;
+
host->use_dma = use_dma;
host->irq = irq;
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg)
+static int davinci_mmcsd_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+ int ret;
- return mmc_suspend_host(host->mmc, msg);
+ mmc_host_enable(host->mmc);
+ ret = mmc_suspend_host(host->mmc);
+ if (!ret) {
+ writel(0, host->base + DAVINCI_MMCIM);
+ mmc_davinci_reset_ctrl(host, 1);
+ mmc_host_disable(host->mmc);
+ clk_disable(host->clk);
+ host->suspended = 1;
+ } else {
+ host->suspended = 0;
+ mmc_host_disable(host->mmc);
+ }
+
+ return ret;
}
-static int davinci_mmcsd_resume(struct platform_device *pdev)
+static int davinci_mmcsd_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ if (!host->suspended)
+ return 0;
- return mmc_resume_host(host->mmc);
+ clk_enable(host->clk);
+ mmc_host_enable(host->mmc);
+
+ mmc_davinci_reset_ctrl(host, 0);
+ ret = mmc_resume_host(host->mmc);
+ if (!ret)
+ host->suspended = 0;
+
+ return ret;
}
+
+static const struct dev_pm_ops davinci_mmcsd_pm = {
+ .suspend = davinci_mmcsd_suspend,
+ .resume = davinci_mmcsd_resume,
+};
+
+#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
#else
-#define davinci_mmcsd_suspend NULL
-#define davinci_mmcsd_resume NULL
+#define davinci_mmcsd_pm_ops NULL
#endif
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
.owner = THIS_MODULE,
+ .pm = davinci_mmcsd_pm_ops,
},
.remove = __exit_p(davinci_mmcsd_remove),
- .suspend = davinci_mmcsd_suspend,
- .resume = davinci_mmcsd_resume,
};
static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7cc928a..9a68ff4353a2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 84c103a7ee13..4917af96bae1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -55,14 +55,16 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
host->cclk = host->mclk / (2 * (clk + 1));
}
if (host->hw_designer == AMBA_VENDOR_ST)
- clk |= MCI_FCEN; /* Bug fix in ST IP block */
+ clk |= MCI_ST_FCEN; /* Bug fix in ST IP block */
clk |= MCI_CLK_ENABLE;
/* This hasn't proven to be worthwhile */
/* clk |= MCI_CLK_PWRSAVE; */
}
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
- clk |= MCI_WIDE_BUS;
+ clk |= MCI_4BIT_BUS;
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ clk |= MCI_ST_8BIT_BUS;
writel(clk, host->base + MMCICLOCK);
}
@@ -629,7 +631,18 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
mmc->ops = &mmci_ops;
mmc->f_min = (host->mclk + 511) / 512;
- mmc->f_max = min(host->mclk, fmax);
+ /*
+ * If the platform data supplies a maximum operating
+ * frequency, this takes precedence. Else, we fall back
+ * to using the module parameter, which has a (low)
+ * default value in case it is not specified. Either
+ * value must not exceed the clock rate into the block,
+ * of course.
+ */
+ if (plat->f_max)
+ mmc->f_max = min(host->mclk, plat->f_max);
+ else
+ mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
#ifdef CONFIG_REGULATOR
@@ -811,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
if (ret == 0)
writel(0, host->base + MMCIMASK0);
}
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 1ceb9a90f59b..d77062e5e3af 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -25,9 +25,11 @@
#define MCI_CLK_ENABLE (1 << 8)
#define MCI_CLK_PWRSAVE (1 << 9)
#define MCI_CLK_BYPASS (1 << 10)
-#define MCI_WIDE_BUS (1 << 11)
+#define MCI_4BIT_BUS (1 << 11)
+/* 8bit wide buses supported in ST Micro versions */
+#define MCI_ST_8BIT_BUS (1 << 12)
/* HW flow control on the ST Micro version */
-#define MCI_FCEN (1 << 13)
+#define MCI_ST_FCEN (1 << 13)
#define MMCIARGUMENT 0x008
#define MMCICOMMAND 0x00c
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 04ae884383f6..24e09454e522 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007 Google Inc,
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ * Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,7 @@
#include <linux/log2.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/platform_device.h>
@@ -47,6 +49,8 @@
#define DRIVER_NAME "msm-sdcc"
+#define BUSCLK_PWRSAVE 1
+#define BUSCLK_TIMEOUT (HZ)
static unsigned int msmsdcc_fmin = 144000;
static unsigned int msmsdcc_fmax = 50000000;
static unsigned int msmsdcc_4bit = 1;
@@ -57,6 +61,67 @@ static unsigned int msmsdcc_sdioirq;
#define PIO_SPINMAX 30
#define CMD_SPINMAX 20
+
+static inline void
+msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
+{
+ WARN_ON(!host->clks_on);
+
+ BUG_ON(host->curr.mrq);
+
+ if (deferr) {
+ mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
+ } else {
+ del_timer_sync(&host->busclk_timer);
+ /* Need to check clks_on again in case the busclk
+ * timer fired
+ */
+ if (host->clks_on) {
+ clk_disable(host->clk);
+ clk_disable(host->pclk);
+ host->clks_on = 0;
+ }
+ }
+}
+
+static inline int
+msmsdcc_enable_clocks(struct msmsdcc_host *host)
+{
+ int rc;
+
+ del_timer_sync(&host->busclk_timer);
+
+ if (!host->clks_on) {
+ rc = clk_enable(host->pclk);
+ if (rc)
+ return rc;
+ rc = clk_enable(host->clk);
+ if (rc) {
+ clk_disable(host->pclk);
+ return rc;
+ }
+ udelay(1 + ((3 * USEC_PER_SEC) /
+ (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+ host->clks_on = 1;
+ }
+ return 0;
+}
+
+static inline unsigned int
+msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
+{
+ return readl(host->base + reg);
+}
+
+static inline void
+msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
+{
+ writel(data, host->base + reg);
+ /* 3 clk delay required! */
+ udelay(1 + ((3 * USEC_PER_SEC) /
+ (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
+}
+
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
u32 c);
@@ -64,8 +129,6 @@ msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
static void
msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
{
- writel(0, host->base + MMCICOMMAND);
-
BUG_ON(host->curr.data);
host->curr.mrq = NULL;
@@ -76,6 +139,9 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
if (mrq->cmd->error == -ETIMEDOUT)
mdelay(5);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@@ -88,7 +154,6 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
static void
msmsdcc_stop_data(struct msmsdcc_host *host)
{
- writel(0, host->base + MMCIDATACTRL);
host->curr.data = NULL;
host->curr.got_dataend = host->curr.got_datablkend = 0;
}
@@ -109,6 +174,31 @@ uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
return 0;
}
+static inline void
+msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
+ msmsdcc_writel(host, arg, MMCIARGUMENT);
+ msmsdcc_writel(host, c, MMCICOMMAND);
+}
+
+static void
+msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
+{
+ struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
+
+ msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
+ msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
+ MMCIDATALENGTH);
+ msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
+
+ if (host->cmd_cmd) {
+ msmsdcc_start_command_exec(host,
+ (u32) host->cmd_cmd->arg,
+ (u32) host->cmd_c);
+ }
+ host->dma.active = 1;
+}
+
static void
msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
unsigned int result,
@@ -121,8 +211,11 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
struct mmc_request *mrq;
spin_lock_irqsave(&host->lock, flags);
+ host->dma.active = 0;
+
mrq = host->curr.mrq;
BUG_ON(!mrq);
+ WARN_ON(!mrq->data);
if (!(result & DMOV_RSLT_VALID)) {
pr_err("msmsdcc: Invalid DataMover result\n");
@@ -146,7 +239,6 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
if (!mrq->data->error)
mrq->data->error = -EIO;
}
- host->dma.busy = 0;
dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
host->dma.dir);
@@ -159,6 +251,7 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
}
host->dma.sg = NULL;
+ host->dma.busy = 0;
if ((host->curr.got_dataend && host->curr.got_datablkend)
|| mrq->data->error) {
@@ -172,12 +265,14 @@ msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
if (!mrq->data->error)
host->curr.data_xfered = host->curr.xfer_size;
if (!mrq->data->stop || mrq->cmd->error) {
- writel(0, host->base + MMCICOMMAND);
host->curr.mrq = NULL;
host->curr.cmd = NULL;
mrq->data->bytes_xfered = host->curr.data_xfered;
spin_unlock_irqrestore(&host->lock, flags);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
mmc_request_done(host->mmc, mrq);
return;
} else
@@ -218,6 +313,8 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
host->dma.sg = data->sg;
host->dma.num_ents = data->sg_len;
+ BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
+
nc = host->dma.nc;
switch (host->pdev_id) {
@@ -246,22 +343,15 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
host->curr.user_pages = 0;
- n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
- host->dma.num_ents, host->dma.dir);
-
- if (n != host->dma.num_ents) {
- pr_err("%s: Unable to map in all sg elements\n",
- mmc_hostname(host->mmc));
- host->dma.sg = NULL;
- host->dma.num_ents = 0;
- return -ENOMEM;
- }
-
box = &nc->cmd[0];
for (i = 0; i < host->dma.num_ents; i++) {
box->cmd = CMD_MODE_BOX;
- if (i == (host->dma.num_ents - 1))
+ /* Initialize sg dma address */
+ sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
+ + sg->offset;
+
+ if (i == (host->dma.num_ents - 1))
box->cmd |= CMD_LC;
rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -300,15 +390,70 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+ n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+ host->dma.num_ents, host->dma.dir);
+/* dsb inside dma_map_sg will write nc out to mem as well */
+
+ if (n != host->dma.num_ents) {
+ printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+ mmc_hostname(host->mmc));
+ host->dma.sg = NULL;
+ host->dma.num_ents = 0;
+ return -ENOMEM;
+ }
+
return 0;
}
+static int
+snoop_cccr_abort(struct mmc_command *cmd)
+{
+ if ((cmd->opcode == 52) &&
+ (cmd->arg & 0x80000000) &&
+ (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
+ return 1;
+ return 0;
+}
+
+static void
+msmsdcc_start_command_deferred(struct msmsdcc_host *host,
+ struct mmc_command *cmd, u32 *c)
+{
+ *c |= (cmd->opcode | MCI_CPSM_ENABLE);
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136)
+ *c |= MCI_CPSM_LONGRSP;
+ *c |= MCI_CPSM_RESPONSE;
+ }
+
+ if (/*interrupt*/0)
+ *c |= MCI_CPSM_INTERRUPT;
+
+ if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
+ ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
+ (cmd->opcode == 53))
+ *c |= MCI_CSPM_DATCMD;
+
+ if (cmd == cmd->mrq->stop)
+ *c |= MCI_CSPM_MCIABORT;
+
+ if (snoop_cccr_abort(cmd))
+ *c |= MCI_CSPM_MCIABORT;
+
+ if (host->curr.cmd != NULL) {
+ printk(KERN_ERR "%s: Overlapping command requests\n",
+ mmc_hostname(host->mmc));
+ }
+ host->curr.cmd = cmd;
+}
+
static void
-msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
+msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
+ struct mmc_command *cmd, u32 c)
{
unsigned int datactrl, timeout;
unsigned long long clks;
- void __iomem *base = host->base;
unsigned int pio_irqmask = 0;
host->curr.data = data;
@@ -320,13 +465,6 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
memset(&host->pio, 0, sizeof(host->pio));
- clks = (unsigned long long)data->timeout_ns * host->clk_rate;
- do_div(clks, NSEC_PER_SEC);
- timeout = data->timeout_clks + (unsigned int)clks;
- writel(timeout, base + MMCIDATATIMER);
-
- writel(host->curr.xfer_size, base + MMCIDATALENGTH);
-
datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
if (!msmsdcc_config_dma(host, data))
@@ -347,47 +485,51 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
- writel(pio_irqmask, base + MMCIMASK1);
- writel(datactrl, base + MMCIDATACTRL);
+ clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+ do_div(clks, NSEC_PER_SEC);
+ timeout = data->timeout_clks + (unsigned int)clks*2 ;
if (datactrl & MCI_DPSM_DMAENABLE) {
+ /* Save parameters for the exec function */
+ host->cmd_timeout = timeout;
+ host->cmd_pio_irqmask = pio_irqmask;
+ host->cmd_datactrl = datactrl;
+ host->cmd_cmd = cmd;
+
+ host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
+ host->dma.hdr.data = (void *)host;
host->dma.busy = 1;
+
+ if (cmd) {
+ msmsdcc_start_command_deferred(host, cmd, &c);
+ host->cmd_c = c;
+ }
msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
+ } else {
+ msmsdcc_writel(host, timeout, MMCIDATATIMER);
+
+ msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
+
+ msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
+ msmsdcc_writel(host, datactrl, MMCIDATACTRL);
+
+ if (cmd) {
+ /* Daisy-chain the command if requested */
+ msmsdcc_start_command(host, cmd, c);
+ }
}
}
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
{
- void __iomem *base = host->base;
-
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
- writel(0, base + MMCICOMMAND);
- udelay(2 + ((5 * 1000000) / host->clk_rate));
- }
-
- c |= cmd->opcode | MCI_CPSM_ENABLE;
-
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136)
- c |= MCI_CPSM_LONGRSP;
- c |= MCI_CPSM_RESPONSE;
- }
-
- if (cmd->opcode == 17 || cmd->opcode == 18 ||
- cmd->opcode == 24 || cmd->opcode == 25 ||
- cmd->opcode == 53)
- c |= MCI_CSPM_DATCMD;
-
if (cmd == cmd->mrq->stop)
c |= MCI_CSPM_MCIABORT;
- host->curr.cmd = cmd;
-
host->stats.cmds++;
- writel(cmd->arg, base + MMCIARGUMENT);
- writel(c, base + MMCICOMMAND);
+ msmsdcc_start_command_deferred(host, cmd, &c);
+ msmsdcc_start_command_exec(host, cmd->arg, c);
}
static void
@@ -421,13 +563,11 @@ msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
static int
msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
{
- void __iomem *base = host->base;
uint32_t *ptr = (uint32_t *) buffer;
int count = 0;
- while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
-
- *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
+ while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
+ *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
ptr++;
count += sizeof(uint32_t);
@@ -459,7 +599,7 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
if (remain == 0)
break;
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
} while (status & MCI_TXFIFOHALFEMPTY);
return ptr - buffer;
@@ -469,7 +609,7 @@ static int
msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
{
while (maxspin) {
- if ((readl(host->base + MMCISTATUS) & mask))
+ if ((msmsdcc_readl(host, MMCISTATUS) & mask))
return 0;
udelay(1);
--maxspin;
@@ -477,14 +617,13 @@ msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
return -ETIMEDOUT;
}
-static int
+static irqreturn_t
msmsdcc_pio_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
- void __iomem *base = host->base;
uint32_t status;
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
do {
unsigned long flags;
@@ -539,14 +678,14 @@ msmsdcc_pio_irq(int irq, void *dev_id)
host->pio.sg_off = 0;
}
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
} while (1);
if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
- writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+ msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
if (!host->curr.xfer_remain)
- writel(0, base + MMCIMASK1);
+ msmsdcc_writel(host, 0, MMCIMASK1);
return IRQ_HANDLED;
}
@@ -554,15 +693,13 @@ msmsdcc_pio_irq(int irq, void *dev_id)
static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
{
struct mmc_command *cmd = host->curr.cmd;
- void __iomem *base = host->base;
host->curr.cmd = NULL;
- cmd->resp[0] = readl(base + MMCIRESPONSE0);
- cmd->resp[1] = readl(base + MMCIRESPONSE1);
- cmd->resp[2] = readl(base + MMCIRESPONSE2);
- cmd->resp[3] = readl(base + MMCIRESPONSE3);
+ cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
+ cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
+ cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
+ cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
- del_timer(&host->command_timer);
if (status & MCI_CMDTIMEOUT) {
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL &&
@@ -580,8 +717,10 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
msmsdcc_request_end(host, cmd->mrq);
} else /* host->data == NULL */
msmsdcc_request_end(host, cmd->mrq);
- } else if (!(cmd->data->flags & MMC_DATA_READ))
- msmsdcc_start_data(host, cmd->data);
+ } else if (cmd->data)
+ if (!(cmd->data->flags & MMC_DATA_READ))
+ msmsdcc_start_data(host, cmd->data,
+ NULL, 0);
}
static void
@@ -590,6 +729,11 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
{
struct mmc_data *data = host->curr.data;
+ if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
+ MCI_CMDTIMEOUT) && host->curr.cmd) {
+ msmsdcc_do_cmdirq(host, status);
+ }
+
if (!data)
return;
@@ -602,7 +746,8 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
msm_dmov_stop_cmd(host->dma.channel,
&host->dma.hdr, 0);
else {
- msmsdcc_stop_data(host);
+ if (host->curr.data)
+ msmsdcc_stop_data(host);
if (!data->stop)
msmsdcc_request_end(host, data->mrq);
else
@@ -657,17 +802,18 @@ msmsdcc_irq(int irq, void *dev_id)
spin_lock(&host->lock);
do {
- status = readl(base + MMCISTATUS);
+ status = msmsdcc_readl(host, MMCISTATUS);
+ status &= (msmsdcc_readl(host, MMCIMASK0) |
+ MCI_DATABLOCKENDMASK);
+ msmsdcc_writel(host, status, MMCICLEAR);
- status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
- writel(status, base + MMCICLEAR);
+ if (status & MCI_SDIOINTR)
+ status &= ~MCI_SDIOINTR;
- msmsdcc_handle_irq_data(host, status, base);
+ if (!status)
+ break;
- if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
- MCI_CMDTIMEOUT) && host->curr.cmd) {
- msmsdcc_do_cmdirq(host, status);
- }
+ msmsdcc_handle_irq_data(host, status, base);
if (status & MCI_SDIOINTOPER) {
cardint = 1;
@@ -714,24 +860,27 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
return;
}
+ msmsdcc_enable_clocks(host);
+
host->curr.mrq = mrq;
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
- msmsdcc_start_data(host, mrq->data);
-
- msmsdcc_start_command(host, mrq->cmd, 0);
+ /* Queue/read data, daisy-chain command when data starts */
+ msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
+ else
+ msmsdcc_start_command(host, mrq->cmd, 0);
if (host->cmdpoll && !msmsdcc_spin_on_status(host,
MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
CMD_SPINMAX)) {
- uint32_t status = readl(host->base + MMCISTATUS);
+ uint32_t status = msmsdcc_readl(host, MMCISTATUS);
msmsdcc_do_cmdirq(host, status);
- writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
- host->base + MMCICLEAR);
+ msmsdcc_writel(host,
+ MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
+ MMCICLEAR);
host->stats.cmdpoll_hits++;
} else {
host->stats.cmdpoll_misses++;
- mod_timer(&host->command_timer, jiffies + HZ);
}
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -742,14 +891,13 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct msmsdcc_host *host = mmc_priv(mmc);
u32 clk = 0, pwr = 0;
int rc;
+ unsigned long flags;
- if (ios->clock) {
+ spin_lock_irqsave(&host->lock, flags);
- if (!host->clks_on) {
- clk_enable(host->pclk);
- clk_enable(host->clk);
- host->clks_on = 1;
- }
+ msmsdcc_enable_clocks(host);
+
+ if (ios->clock) {
if (ios->clock != host->clk_rate) {
rc = clk_set_rate(host->clk, ios->clock);
if (rc < 0)
@@ -787,18 +935,16 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pwr |= MCI_OD;
- writel(clk, host->base + MMCICLOCK);
+ msmsdcc_writel(host, clk, MMCICLOCK);
if (host->pwr != pwr) {
host->pwr = pwr;
- writel(pwr, host->base + MMCIPOWER);
- }
-
- if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
- clk_disable(host->clk);
- clk_disable(host->pclk);
- host->clks_on = 0;
+ msmsdcc_writel(host, pwr, MMCIPOWER);
}
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
+ spin_unlock_irqrestore(&host->lock, flags);
}
static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -809,13 +955,13 @@ static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
spin_lock_irqsave(&host->lock, flags);
if (msmsdcc_sdioirq == 1) {
- status = readl(host->base + MMCIMASK0);
+ status = msmsdcc_readl(host, MMCIMASK0);
if (enable)
status |= MCI_SDIOINTOPERMASK;
else
status &= ~MCI_SDIOINTOPERMASK;
host->saved_irq0mask = status;
- writel(status, host->base + MMCIMASK0);
+ msmsdcc_writel(host, status, MMCIMASK0);
}
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -875,42 +1021,13 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id)
msmsdcc_check_status((unsigned long) host);
}
-/*
- * called when a command expires.
- * Dump some debugging, and then error
- * out the transaction.
- */
static void
-msmsdcc_command_expired(unsigned long _data)
+msmsdcc_busclk_expired(unsigned long _data)
{
struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
- struct mmc_request *mrq;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- mrq = host->curr.mrq;
-
- if (!mrq) {
- pr_info("%s: Command expiry misfire\n",
- mmc_hostname(host->mmc));
- spin_unlock_irqrestore(&host->lock, flags);
- return;
- }
-
- pr_err("%s: Command timeout (%p %p %p %p)\n",
- mmc_hostname(host->mmc), mrq, mrq->cmd,
- mrq->data, host->dma.sg);
- mrq->cmd->error = -ETIMEDOUT;
- msmsdcc_stop_data(host);
-
- writel(0, host->base + MMCICOMMAND);
-
- host->curr.mrq = NULL;
- host->curr.cmd = NULL;
-
- spin_unlock_irqrestore(&host->lock, flags);
- mmc_request_done(host->mmc, mrq);
+ if (host->clks_on)
+ msmsdcc_disable_clocks(host, 0);
}
static int
@@ -1012,6 +1129,7 @@ msmsdcc_probe(struct platform_device *pdev)
host->pdev_id = pdev->id;
host->plat = plat;
host->mmc = mmc;
+ host->curr.cmd = NULL;
host->cmdpoll = 1;
@@ -1027,36 +1145,35 @@ msmsdcc_probe(struct platform_device *pdev)
host->dmares = dmares;
spin_lock_init(&host->lock);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (plat->embedded_sdio)
+ mmc_set_embedded_sdio_data(mmc,
+ &plat->embedded_sdio->cis,
+ &plat->embedded_sdio->cccr,
+ plat->embedded_sdio->funcs,
+ plat->embedded_sdio->num_funcs);
+#endif
+
/*
* Setup DMA
*/
msmsdcc_init_dma(host);
- /*
- * Setup main peripheral bus clock
- */
+ /* Get our clocks */
host->pclk = clk_get(&pdev->dev, "sdc_pclk");
if (IS_ERR(host->pclk)) {
ret = PTR_ERR(host->pclk);
goto host_free;
}
- ret = clk_enable(host->pclk);
- if (ret)
- goto pclk_put;
-
- host->pclk_rate = clk_get_rate(host->pclk);
-
- /*
- * Setup SDC MMC clock
- */
host->clk = clk_get(&pdev->dev, "sdc_clk");
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto pclk_disable;
+ goto pclk_put;
}
- ret = clk_enable(host->clk);
+ /* Enable clocks */
+ ret = msmsdcc_enable_clocks(host);
if (ret)
goto clk_put;
@@ -1066,10 +1183,9 @@ msmsdcc_probe(struct platform_device *pdev)
goto clk_disable;
}
+ host->pclk_rate = clk_get_rate(host->pclk);
host->clk_rate = clk_get_rate(host->clk);
- host->clks_on = 1;
-
/*
* Setup MMC host structure
*/
@@ -1092,10 +1208,10 @@ msmsdcc_probe(struct platform_device *pdev)
mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
mmc->max_seg_size = mmc->max_req_size;
- writel(0, host->base + MMCIMASK0);
- writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
+ msmsdcc_writel(host, 0, MMCIMASK0);
+ msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
host->saved_irq0mask = MCI_IRQENABLE;
/*
@@ -1137,13 +1253,9 @@ msmsdcc_probe(struct platform_device *pdev)
host->eject = !host->oldstat;
}
- /*
- * Setup a command timer. We currently need this due to
- * some 'strange' timeout / error handling situations.
- */
- init_timer(&host->command_timer);
- host->command_timer.data = (unsigned long) host;
- host->command_timer.function = msmsdcc_command_expired;
+ init_timer(&host->busclk_timer);
+ host->busclk_timer.data = (unsigned long) host;
+ host->busclk_timer.function = msmsdcc_busclk_expired;
ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
DRIVER_NAME " (cmd)", host);
@@ -1181,6 +1293,9 @@ msmsdcc_probe(struct platform_device *pdev)
if (host->timer.function)
pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
return 0;
cmd_irq_free:
free_irq(cmd_irqres->start, host);
@@ -1188,11 +1303,9 @@ msmsdcc_probe(struct platform_device *pdev)
if (host->stat_irq)
free_irq(host->stat_irq, host);
clk_disable:
- clk_disable(host->clk);
+ msmsdcc_disable_clocks(host, 0);
clk_put:
clk_put(host->clk);
- pclk_disable:
- clk_disable(host->pclk);
pclk_put:
clk_put(host->pclk);
host_free:
@@ -1214,16 +1327,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
disable_irq(host->stat_irq);
if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
- rc = mmc_suspend_host(mmc, state);
- if (!rc) {
- writel(0, host->base + MMCIMASK0);
-
- if (host->clks_on) {
- clk_disable(host->clk);
- clk_disable(host->pclk);
- host->clks_on = 0;
- }
- }
+ rc = mmc_suspend_host(mmc);
+ if (!rc)
+ msmsdcc_writel(host, 0, MMCIMASK0);
+ if (host->clks_on)
+ msmsdcc_disable_clocks(host, 0);
}
return rc;
}
@@ -1232,27 +1340,21 @@ static int
msmsdcc_resume(struct platform_device *dev)
{
struct mmc_host *mmc = mmc_get_drvdata(dev);
- unsigned long flags;
if (mmc) {
struct msmsdcc_host *host = mmc_priv(mmc);
- spin_lock_irqsave(&host->lock, flags);
+ msmsdcc_enable_clocks(host);
- if (!host->clks_on) {
- clk_enable(host->pclk);
- clk_enable(host->clk);
- host->clks_on = 1;
- }
-
- writel(host->saved_irq0mask, host->base + MMCIMASK0);
-
- spin_unlock_irqrestore(&host->lock, flags);
+ msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
mmc_resume_host(mmc);
if (host->stat_irq)
enable_irq(host->stat_irq);
+#if BUSCLK_PWRSAVE
+ msmsdcc_disable_clocks(host, 1);
+#endif
}
return 0;
}
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index 8c8448469811..da0039c9285e 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -171,6 +171,7 @@ struct msmsdcc_dma_data {
int channel;
struct msmsdcc_host *host;
int busy; /* Set if DM is busy */
+ int active;
};
struct msmsdcc_pio_data {
@@ -213,7 +214,7 @@ struct msmsdcc_host {
struct clk *clk; /* main MMC bus clock */
struct clk *pclk; /* SDCC peripheral bus clock */
unsigned int clks_on; /* set if clocks are enabled */
- struct timer_list command_timer;
+ struct timer_list busclk_timer;
unsigned int eject; /* eject state */
@@ -233,6 +234,18 @@ struct msmsdcc_host {
struct msmsdcc_pio_data pio;
int cmdpoll;
struct msmsdcc_stats stats;
+
+#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
+ struct work_struct resume_task;
+#endif
+
+ /* Command parameters */
+ unsigned int cmd_timeout;
+ unsigned int cmd_pio_irqmask;
+ unsigned int cmd_datactrl;
+ struct mmc_command *cmd_cmd;
+ u32 cmd_c;
+
};
#endif
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e23489811a..366eefa77c5a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 2df90412abb5..d9d4a72e0ec7 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -119,6 +119,7 @@ struct mxcmci_host {
int detect_irq;
int dma;
int do_dma;
+ int use_sdio;
unsigned int power_mode;
struct imxmmc_platform_data *pdata;
@@ -138,6 +139,7 @@ struct mxcmci_host {
int clock;
struct work_struct datawork;
+ spinlock_t lock;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -151,6 +153,8 @@ static void mxcmci_softreset(struct mxcmci_host *host)
{
int i;
+ dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
+
/* reset sequence */
writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
@@ -224,6 +228,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
unsigned int cmdat)
{
+ u32 int_cntr;
+ unsigned long flags;
+
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
@@ -247,12 +254,16 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
return -EINVAL;
}
+ int_cntr = INT_END_CMD_RES_EN;
+
if (mxcmci_use_dma(host))
- writel(INT_READ_OP_EN | INT_WRITE_OP_DONE_EN |
- INT_END_CMD_RES_EN,
- host->base + MMC_REG_INT_CNTR);
- else
- writel(INT_END_CMD_RES_EN, host->base + MMC_REG_INT_CNTR);
+ int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
writew(cmd->opcode, host->base + MMC_REG_CMD);
writel(cmd->arg, host->base + MMC_REG_ARG);
@@ -264,7 +275,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
static void mxcmci_finish_request(struct mxcmci_host *host,
struct mmc_request *req)
{
- writel(0, host->base + MMC_REG_INT_CNTR);
+ u32 int_cntr = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->use_sdio)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
host->req = NULL;
host->cmd = NULL;
@@ -290,16 +308,25 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
stat);
if (stat & STATUS_CRC_READ_ERR) {
+ dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
} else if (stat & STATUS_CRC_WRITE_ERR) {
u32 err_code = (stat >> 9) & 0x3;
- if (err_code == 2) /* No CRC response */
+ if (err_code == 2) { /* No CRC response */
+ dev_err(mmc_dev(host->mmc),
+ "%s: No CRC -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
- else
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
+ }
} else if (stat & STATUS_TIME_OUT_READ) {
+ dev_err(mmc_dev(host->mmc),
+ "%s: read -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
} else {
+ dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
data->error = -EIO;
}
} else {
@@ -433,8 +460,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
struct scatterlist *sg;
int stat, i;
- host->datasize = 0;
-
host->data = data;
host->datasize = 0;
@@ -464,6 +489,9 @@ static void mxcmci_datawork(struct work_struct *work)
struct mxcmci_host *host = container_of(work, struct mxcmci_host,
datawork);
int datastat = mxcmci_transfer_data(host);
+
+ writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
+ host->base + MMC_REG_STATUS);
mxcmci_finish_data(host, datastat);
if (host->req->stop) {
@@ -523,15 +551,35 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
static irqreturn_t mxcmci_irq(int irq, void *devid)
{
struct mxcmci_host *host = devid;
+ unsigned long flags;
+ bool sdio_irq;
u32 stat;
stat = readl(host->base + MMC_REG_STATUS);
- writel(stat, host->base + MMC_REG_STATUS);
+ writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
+ STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
+ spin_lock_irqsave(&host->lock, flags);
+ sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+#ifdef HAS_DMA
+ if (mxcmci_use_dma(host) &&
+ (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
+ writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
+ host->base + MMC_REG_STATUS);
+#endif
+
+ if (sdio_irq) {
+ writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
+ mmc_signal_sdio_irq(host->mmc);
+ }
+
if (stat & STATUS_END_CMD_RESP)
mxcmci_cmd_done(host, stat);
+
#ifdef HAS_DMA
if (mxcmci_use_dma(host) &&
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
@@ -668,11 +716,46 @@ static int mxcmci_get_ro(struct mmc_host *mmc)
return -ENOSYS;
}
+static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mxcmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ u32 int_cntr;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->use_sdio = enable;
+ int_cntr = readl(host->base + MMC_REG_INT_CNTR);
+
+ if (enable)
+ int_cntr |= INT_SDIO_IRQ_EN;
+ else
+ int_cntr &= ~INT_SDIO_IRQ_EN;
+
+ writel(int_cntr, host->base + MMC_REG_INT_CNTR);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
+{
+ /*
+ * MX3 SoCs have a silicon bug which corrupts CRC calculation of
+ * multi-block transfers when connected SDIO peripheral doesn't
+ * drive the BUSY line as required by the specs.
+ * One way to prevent this is to only allow 1-bit transfers.
+ */
+
+ if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
+ host->caps &= ~MMC_CAP_4_BIT_DATA;
+ else
+ host->caps |= MMC_CAP_4_BIT_DATA;
+}
static const struct mmc_host_ops mxcmci_ops = {
- .request = mxcmci_request,
- .set_ios = mxcmci_set_ios,
- .get_ro = mxcmci_get_ro,
+ .request = mxcmci_request,
+ .set_ios = mxcmci_set_ios,
+ .get_ro = mxcmci_get_ro,
+ .enable_sdio_irq = mxcmci_enable_sdio_irq,
+ .init_card = mxcmci_init_card,
};
static int mxcmci_probe(struct platform_device *pdev)
@@ -700,7 +783,7 @@ static int mxcmci_probe(struct platform_device *pdev)
}
mmc->ops = &mxcmci_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
/* MMC core transfer sizes tunable parameters */
mmc->max_hw_segs = 64;
@@ -719,6 +802,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host->mmc = mmc;
host->pdata = pdev->dev.platform_data;
+ spin_lock_init(&host->lock);
if (host->pdata && host->pdata->ocr_avail)
mmc->ocr_avail = host->pdata->ocr_avail;
@@ -848,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index bb6cc54b558e..1247e5de9faa 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -64,7 +64,7 @@ static int of_mmc_spi_get_ro(struct device *dev)
struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ struct device_node *np = dev->of_node;
struct of_mmc_spi *oms;
const u32 *voltage_ranges;
int num_ranges;
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(mmc_spi_get_pdata);
void mmc_spi_put_pdata(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ struct device_node *np = dev->of_node;
struct of_mmc_spi *oms = to_of_mmc_spi(dev);
int i;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d280406341..2b281680e320 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
#include <plat/fpga.h>
#define OMAP_MMC_REG_CMD 0x00
-#define OMAP_MMC_REG_ARGL 0x04
-#define OMAP_MMC_REG_ARGH 0x08
-#define OMAP_MMC_REG_CON 0x0c
-#define OMAP_MMC_REG_STAT 0x10
-#define OMAP_MMC_REG_IE 0x14
-#define OMAP_MMC_REG_CTO 0x18
-#define OMAP_MMC_REG_DTO 0x1c
-#define OMAP_MMC_REG_DATA 0x20
-#define OMAP_MMC_REG_BLEN 0x24
-#define OMAP_MMC_REG_NBLK 0x28
-#define OMAP_MMC_REG_BUF 0x2c
-#define OMAP_MMC_REG_SDIO 0x34
-#define OMAP_MMC_REG_REV 0x3c
-#define OMAP_MMC_REG_RSP0 0x40
-#define OMAP_MMC_REG_RSP1 0x44
-#define OMAP_MMC_REG_RSP2 0x48
-#define OMAP_MMC_REG_RSP3 0x4c
-#define OMAP_MMC_REG_RSP4 0x50
-#define OMAP_MMC_REG_RSP5 0x54
-#define OMAP_MMC_REG_RSP6 0x58
-#define OMAP_MMC_REG_RSP7 0x5c
-#define OMAP_MMC_REG_IOSR 0x60
-#define OMAP_MMC_REG_SYSC 0x64
-#define OMAP_MMC_REG_SYSS 0x68
+#define OMAP_MMC_REG_ARGL 0x01
+#define OMAP_MMC_REG_ARGH 0x02
+#define OMAP_MMC_REG_CON 0x03
+#define OMAP_MMC_REG_STAT 0x04
+#define OMAP_MMC_REG_IE 0x05
+#define OMAP_MMC_REG_CTO 0x06
+#define OMAP_MMC_REG_DTO 0x07
+#define OMAP_MMC_REG_DATA 0x08
+#define OMAP_MMC_REG_BLEN 0x09
+#define OMAP_MMC_REG_NBLK 0x0a
+#define OMAP_MMC_REG_BUF 0x0b
+#define OMAP_MMC_REG_SDIO 0x0d
+#define OMAP_MMC_REG_REV 0x0f
+#define OMAP_MMC_REG_RSP0 0x10
+#define OMAP_MMC_REG_RSP1 0x11
+#define OMAP_MMC_REG_RSP2 0x12
+#define OMAP_MMC_REG_RSP3 0x13
+#define OMAP_MMC_REG_RSP4 0x14
+#define OMAP_MMC_REG_RSP5 0x15
+#define OMAP_MMC_REG_RSP6 0x16
+#define OMAP_MMC_REG_RSP7 0x17
+#define OMAP_MMC_REG_IOSR 0x18
+#define OMAP_MMC_REG_SYSC 0x19
+#define OMAP_MMC_REG_SYSS 0x1a
#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
@@ -78,8 +78,9 @@
#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
-#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
-#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
+#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
+#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
+#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
/*
* Command types
@@ -133,6 +134,7 @@ struct mmc_omap_host {
int irq;
unsigned char bus_mode;
unsigned char hw_bus_mode;
+ unsigned int reg_shift;
struct work_struct cmd_abort_work;
unsigned abort:1;
@@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
host->data->bytes_xfered += n;
if (write) {
- __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
+ __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
} else {
- __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
+ __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
}
}
@@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
int dst_port = 0;
int sync_dev = 0;
- data_addr = host->phys_base + OMAP_MMC_REG_DATA;
+ data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
frame = data->blksz;
count = sg_dma_len(sg);
@@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
}
}
+ host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
return 0;
err_plat_cleanup:
@@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
struct mmc_omap_slot *slot;
slot = host->slots[i];
- ret = mmc_suspend_host(slot->mmc, mesg);
+ ret = mmc_suspend_host(slot->mmc);
if (ret < 0) {
while (--i >= 0) {
slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59e..b032828c6126 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
*/
struct regulator *vcc;
struct regulator *vcc_aux;
- struct semaphore sem;
struct work_struct mmc_carddetect_work;
void __iomem *base;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
- unsigned long flags;
unsigned int id;
unsigned int dma_len;
unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
int protect_card;
int reqs_blocked;
int use_reg;
+ int req_in_progress;
struct omap_mmc_platform_data *pdata;
};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
}
+static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
+{
+ unsigned int irq_mask;
+
+ if (host->use_dma)
+ irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
+ else
+ irq_mask = INT_EN_MASK;
+
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+ OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
+ OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
+}
+
+static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
+{
+ OMAP_HSMMC_WRITE(host->base, ISE, 0);
+ OMAP_HSMMC_WRITE(host->base, IE, 0);
+ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
+}
+
#ifdef CONFIG_PM
/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
&& time_before(jiffies, timeout))
;
- OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
- OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
- OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ omap_hsmmc_disable_irq(host);
/* Do not initialize card-specific things if the power is off */
if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
return;
disable_irq(host->irq);
+
+ OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
OMAP_HSMMC_WRITE(host->base, CON,
OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
host->cmd = cmd;
- /*
- * Clear status bits and enable interrupts
- */
- OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
- OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
-
- if (host->use_dma)
- OMAP_HSMMC_WRITE(host->base, IE,
- INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
- else
- OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ omap_hsmmc_enable_irq(host);
host->response_busy = 0;
if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
if (host->use_dma)
cmdreg |= DMA_EN;
- /*
- * In an interrupt context (i.e. STOP command), the spinlock is unlocked
- * by the interrupt handler, otherwise (i.e. for a new request) it is
- * unlocked here.
- */
- if (!in_interrupt())
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
+ host->req_in_progress = 1;
OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
return DMA_FROM_DEVICE;
}
+static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
+{
+ int dma_ch;
+
+ spin_lock(&host->irq_lock);
+ host->req_in_progress = 0;
+ dma_ch = host->dma_ch;
+ spin_unlock(&host->irq_lock);
+
+ omap_hsmmc_disable_irq(host);
+ /* Do not complete the request if DMA is still in progress */
+ if (mrq->data && host->use_dma && dma_ch != -1)
+ return;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
/*
* Notify the transfer complete to MMC core
*/
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
return;
}
- host->mrq = NULL;
- mmc_request_done(host->mmc, mrq);
+ omap_hsmmc_request_done(host, mrq);
return;
}
host->data = NULL;
- if (host->use_dma && host->dma_ch != -1)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
- omap_hsmmc_get_dma_dir(host, data));
-
if (!data->error)
data->bytes_xfered += data->blocks * (data->blksz);
else
data->bytes_xfered = 0;
if (!data->stop) {
- host->mrq = NULL;
- mmc_request_done(host->mmc, data->mrq);
+ omap_hsmmc_request_done(host, data->mrq);
return;
}
omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
}
}
- if ((host->data == NULL && !host->response_busy) || cmd->error) {
- host->mrq = NULL;
- mmc_request_done(host->mmc, cmd->mrq);
- }
+ if ((host->data == NULL && !host->response_busy) || cmd->error)
+ omap_hsmmc_request_done(host, cmd->mrq);
}
/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
*/
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
+ int dma_ch;
+
host->data->error = errno;
- if (host->use_dma && host->dma_ch != -1) {
+ spin_lock(&host->irq_lock);
+ dma_ch = host->dma_ch;
+ host->dma_ch = -1;
+ spin_unlock(&host->irq_lock);
+
+ if (host->use_dma && dma_ch != -1) {
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
omap_hsmmc_get_dma_dir(host, host->data));
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
- up(&host->sem);
+ omap_free_dma(dma_ch);
}
host->data = NULL;
}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
__func__);
}
-/*
- * MMC controller IRQ handler
- */
-static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
{
- struct omap_hsmmc_host *host = dev_id;
struct mmc_data *data;
- int end_cmd = 0, end_trans = 0, status;
-
- spin_lock(&host->irq_lock);
-
- if (host->mrq == NULL) {
- OMAP_HSMMC_WRITE(host->base, STAT,
- OMAP_HSMMC_READ(host->base, STAT));
- /* Flush posted write */
- OMAP_HSMMC_READ(host->base, STAT);
- spin_unlock(&host->irq_lock);
- return IRQ_HANDLED;
+ int end_cmd = 0, end_trans = 0;
+
+ if (!host->req_in_progress) {
+ do {
+ OMAP_HSMMC_WRITE(host->base, STAT, status);
+ /* Flush posted write */
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ } while (status & INT_EN_MASK);
+ return;
}
data = host->data;
- status = OMAP_HSMMC_READ(host->base, STAT);
dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
}
OMAP_HSMMC_WRITE(host->base, STAT, status);
- /* Flush posted write */
- OMAP_HSMMC_READ(host->base, STAT);
if (end_cmd || ((status & CC) && host->cmd))
omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC)) && host->mrq)
omap_hsmmc_xfer_done(host, data);
+}
- spin_unlock(&host->irq_lock);
+/*
+ * MMC controller IRQ handler
+ */
+static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
+{
+ struct omap_hsmmc_host *host = dev_id;
+ int status;
+
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ do {
+ omap_hsmmc_do_irq(host, status);
+ /* Flush posted write */
+ status = OMAP_HSMMC_READ(host->base, STAT);
+ } while (status & INT_EN_MASK);
return IRQ_HANDLED;
}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
/*
* DMA call back function
*/
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
{
- struct omap_hsmmc_host *host = data;
+ struct omap_hsmmc_host *host = cb_data;
+ struct mmc_data *data = host->mrq->data;
+ int dma_ch, req_in_progress;
if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
- if (host->dma_ch < 0)
+ spin_lock(&host->irq_lock);
+ if (host->dma_ch < 0) {
+ spin_unlock(&host->irq_lock);
return;
+ }
host->dma_sg_idx++;
if (host->dma_sg_idx < host->dma_len) {
/* Fire up the next transfer. */
- omap_hsmmc_config_dma_params(host, host->data,
- host->data->sg + host->dma_sg_idx);
+ omap_hsmmc_config_dma_params(host, data,
+ data->sg + host->dma_sg_idx);
+ spin_unlock(&host->irq_lock);
return;
}
- omap_free_dma(host->dma_ch);
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
+ omap_hsmmc_get_dma_dir(host, data));
+
+ req_in_progress = host->req_in_progress;
+ dma_ch = host->dma_ch;
host->dma_ch = -1;
- /*
- * DMA Callback: run in interrupt context.
- * mutex_unlock will throw a kernel warning if used.
- */
- up(&host->sem);
+ spin_unlock(&host->irq_lock);
+
+ omap_free_dma(dma_ch);
+
+ /* If DMA has finished after TC, complete the request */
+ if (!req_in_progress) {
+ struct mmc_request *mrq = host->mrq;
+
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+ }
}
/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- int dma_ch = 0, ret = 0, err = 1, i;
+ int dma_ch = 0, ret = 0, i;
struct mmc_data *data = req->data;
/* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
*/
return -EINVAL;
- /*
- * If for some reason the DMA transfer is still active,
- * we wait for timeout period and free the dma
- */
- if (host->dma_ch != -1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(100);
- if (down_trylock(&host->sem)) {
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
- up(&host->sem);
- return err;
- }
- } else {
- if (down_trylock(&host->sem))
- return err;
- }
+ BUG_ON(host->dma_ch != -1);
ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
"MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
struct omap_hsmmc_host *host = mmc_priv(mmc);
int err;
- /*
- * Prevent races with the interrupt handler because of unexpected
- * interrupts, but not if we are already in interrupt context i.e.
- * retries.
- */
- if (!in_interrupt()) {
- spin_lock_irqsave(&host->irq_lock, host->flags);
- /*
- * Protect the card from I/O if there is a possibility
- * it can be removed.
- */
- if (host->protect_card) {
- if (host->reqs_blocked < 3) {
- /*
- * Ensure the controller is left in a consistent
- * state by resetting the command and data state
- * machines.
- */
- omap_hsmmc_reset_controller_fsm(host, SRD);
- omap_hsmmc_reset_controller_fsm(host, SRC);
- host->reqs_blocked += 1;
- }
- req->cmd->error = -EBADF;
- if (req->data)
- req->data->error = -EBADF;
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
- mmc_request_done(mmc, req);
- return;
- } else if (host->reqs_blocked)
- host->reqs_blocked = 0;
- }
+ BUG_ON(host->req_in_progress);
+ BUG_ON(host->dma_ch != -1);
+ if (host->protect_card) {
+ if (host->reqs_blocked < 3) {
+ /*
+ * Ensure the controller is left in a consistent
+ * state by resetting the command and data state
+ * machines.
+ */
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRC);
+ host->reqs_blocked += 1;
+ }
+ req->cmd->error = -EBADF;
+ if (req->data)
+ req->data->error = -EBADF;
+ req->cmd->retries = 0;
+ mmc_request_done(mmc, req);
+ return;
+ } else if (host->reqs_blocked)
+ host->reqs_blocked = 0;
WARN_ON(host->mrq != NULL);
host->mrq = req;
err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
if (req->data)
req->data->error = err;
host->mrq = NULL;
- if (!in_interrupt())
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
mmc_request_done(mmc, req);
return;
}
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
mmc->f_min = 400000;
mmc->f_max = 52000000;
- sema_init(&host->sem, 1);
spin_lock_init(&host->irq_lock);
host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
}
}
- OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
- OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
+ omap_hsmmc_disable_irq(host);
mmc_host_lazy_disable(host->mmc);
@@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int omap_hsmmc_suspend(struct device *dev)
{
int ret = 0;
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
+ pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
if (host && host->suspended)
return 0;
@@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
}
cancel_work_sync(&host->mmc_carddetect_work);
mmc_host_enable(host->mmc);
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
if (ret == 0) {
- OMAP_HSMMC_WRITE(host->base, ISE, 0);
- OMAP_HSMMC_WRITE(host->base, IE, 0);
-
-
+ omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
}
/* Routine to resume the MMC device */
-static int omap_hsmmc_resume(struct platform_device *pdev)
+static int omap_hsmmc_resume(struct device *dev)
{
int ret = 0;
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
if (host && !host->suspended)
@@ -2363,13 +2372,17 @@ clk_en_err:
#define omap_hsmmc_resume NULL
#endif
-static struct platform_driver omap_hsmmc_driver = {
- .remove = omap_hsmmc_remove,
+static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
.suspend = omap_hsmmc_suspend,
.resume = omap_hsmmc_resume,
+};
+
+static struct platform_driver omap_hsmmc_driver = {
+ .remove = omap_hsmmc_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &omap_hsmmc_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 0ed48959b590..0a4e43f37140 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -544,7 +544,7 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)
{
struct pxamci_host *host = mmc_priv(devid);
- mmc_detect_change(devid, host->pdata->detect_delay);
+ mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
return IRQ_HANDLED;
}
@@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev)
int ret = 0;
if (mmc)
- ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
+ ret = mmc_suspend_host(mmc);
return ret;
}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf7689ae6c..2e16e0a90a5e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
static int s3cmci_suspend(struct device *dev)
{
struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
- struct pm_message event = { PM_EVENT_SUSPEND };
- return mmc_suspend_host(mmc, event);
+ return mmc_suspend_host(mmc);
}
static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 55e33135edb4..a2e9820cd42f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
{
struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
- return mmc_suspend_host(host->mmc, state);
+ return mmc_suspend_host(host->mmc);
}
static int sdhci_of_resume(struct of_device *ofdev)
@@ -118,7 +118,7 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
static int __devinit sdhci_of_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct sdhci_of_data *sdhci_of_data = match->data;
struct sdhci_host *host;
struct sdhci_of_host *of_host;
@@ -205,8 +205,11 @@ static const struct of_device_id sdhci_of_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_of_match);
static struct of_platform_driver sdhci_of_driver = {
- .driver.name = "sdhci-of",
- .match_table = sdhci_of_match,
+ .driver = {
+ .name = "sdhci-of",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_of_match,
+ },
.probe = sdhci_of_probe,
.remove = __devexit_p(sdhci_of_remove),
.suspend = sdhci_of_suspend,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a17e648..c8623de13af3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = {
SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
SDHCI_QUIRK_NO_CARD_NO_RESET,
.ops = {
- .readl = sdhci_be32bs_readl,
- .readw = esdhc_readw,
- .readb = sdhci_be32bs_readb,
- .writel = sdhci_be32bs_writel,
- .writew = esdhc_writew,
- .writeb = esdhc_writeb,
+ .read_l = sdhci_be32bs_readl,
+ .read_w = esdhc_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_be32bs_writel,
+ .write_w = esdhc_writew,
+ .write_b = esdhc_writeb,
.set_clock = esdhc_set_clock,
.enable_dma = esdhc_enable_dma,
.get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3ed757..68ddb7546ae2 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = {
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE,
.ops = {
- .readl = sdhci_be32bs_readl,
- .readw = sdhci_be32bs_readw,
- .readb = sdhci_be32bs_readb,
- .writel = sdhci_hlwd_writel,
- .writew = sdhci_hlwd_writew,
- .writeb = sdhci_hlwd_writeb,
+ .read_l = sdhci_be32bs_readl,
+ .read_w = sdhci_be32bs_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_hlwd_writel,
+ .write_w = sdhci_hlwd_writew,
+ .write_b = sdhci_hlwd_writeb,
},
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af629c30..65483fdea45b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
if (IS_ERR(host)) {
dev_err(&pdev->dev, "cannot allocate host\n");
- return ERR_PTR(PTR_ERR(host));
+ return ERR_CAST(host);
}
slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40ae6ad5..b6ee0d719698 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
#include <linux/mmc/host.h>
#include <linux/io.h>
+#include <linux/sdhci-pltfm.h>
#include "sdhci.h"
@@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = {
static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
{
+ struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
struct sdhci_host *host;
struct resource *iomem;
int ret;
- BUG_ON(pdev == NULL);
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
goto err;
}
- if (resource_size(iomem) != 0x100)
+ if (resource_size(iomem) < 0x100)
dev_err(&pdev->dev, "Invalid iomem size. You may "
"experience problems.\n");
@@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
}
host->hw_name = "platform";
- host->ops = &sdhci_pltfm_ops;
+ if (pdata && pdata->ops)
+ host->ops = pdata->ops;
+ else
+ host->ops = &sdhci_pltfm_ops;
+ if (pdata)
+ host->quirks = pdata->quirks;
host->irq = platform_get_irq(pdev, 0);
if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
goto err_remap;
}
+ if (pdata && pdata->init) {
+ ret = pdata->init(host);
+ if (ret)
+ goto err_plat_init;
+ }
+
ret = sdhci_add_host(host);
if (ret)
goto err_add_host;
@@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
return 0;
err_add_host:
+ if (pdata && pdata->exit)
+ pdata->exit(host);
+err_plat_init:
iounmap(host->ioaddr);
err_remap:
release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@ err:
static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
{
+ struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int dead;
@@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
dead = 1;
sdhci_remove_host(host, dead);
+ if (pdata && pdata->exit)
+ pdata->exit(host);
iounmap(host->ioaddr);
release_mem_region(iomem->start, resource_size(iomem));
sdhci_free_host(host);
@@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sdhci");
-
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794c0cfa..af217924a76e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
host->irq = irq;
/* Setup quirks for the controller */
-
- /* Currently with ADMA enabled we are getting some length
- * interrupts that are not being dealt with, do disable
- * ADMA until this is sorted out. */
- host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
- host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
+ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
#ifndef CONFIG_MMC_SDHCI_S3C_DMA
@@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
* support as well. */
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
- /* PIO currently has problems with multi-block IO */
- host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
-
#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
/* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 000000000000..d70c54c7b70a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
+/*
+ * drivers/mmc/host/sdhci-spear.c
+ *
+ * Support of SDHCI platform devices for spear soc family
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Viresh Kumar<viresh.kumar@st.com>
+ *
+ * Inspired by sdhci-pltfm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdhci-spear.h>
+#include <linux/io.h>
+#include "sdhci.h"
+
+struct spear_sdhci {
+ struct clk *clk;
+ struct sdhci_plat_data *data;
+};
+
+/* sdhci ops */
+static struct sdhci_ops sdhci_pltfm_ops = {
+ /* Nothing to do for now. */
+};
+
+/* gpio card detection interrupt handler */
+static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
+ unsigned long gpio_irq_type;
+ int val;
+
+ val = gpio_get_value(sdhci->data->card_int_gpio);
+
+ /* val == 1 -> card removed, val == 0 -> card inserted */
+ /* if card removed - set irq for low level, else vice versa */
+ gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
+ set_irq_type(irq, gpio_irq_type);
+
+ if (sdhci->data->card_power_gpio >= 0) {
+ if (!sdhci->data->power_always_enb) {
+ /* if card inserted, give power, otherwise remove it */
+ val = sdhci->data->power_active_high ? !val : val ;
+ gpio_set_value(sdhci->data->card_power_gpio, val);
+ }
+ }
+
+ /* inform sdhci driver about card insertion/removal */
+ tasklet_schedule(&host->card_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct resource *iomem;
+ struct spear_sdhci *sdhci;
+ int ret;
+
+ BUG_ON(pdev == NULL);
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "memory resource not defined\n");
+ goto err;
+ }
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ "spear-sdhci")) {
+ ret = -EBUSY;
+ dev_dbg(&pdev->dev, "cannot request region\n");
+ goto err;
+ }
+
+ sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+ if (!sdhci) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
+ goto err_kzalloc;
+ }
+
+ /* clk enable */
+ sdhci->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdhci->clk)) {
+ ret = PTR_ERR(sdhci->clk);
+ dev_dbg(&pdev->dev, "Error getting clock\n");
+ goto err_clk_get;
+ }
+
+ ret = clk_enable(sdhci->clk);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Error enabling clock\n");
+ goto err_clk_enb;
+ }
+
+ /* overwrite platform_data */
+ sdhci->data = dev_get_platdata(&pdev->dev);
+ pdev->dev.platform_data = sdhci;
+
+ if (pdev->dev.parent)
+ host = sdhci_alloc_host(pdev->dev.parent, 0);
+ else
+ host = sdhci_alloc_host(&pdev->dev, 0);
+
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ dev_dbg(&pdev->dev, "error allocating host\n");
+ goto err_alloc_host;
+ }
+
+ host->hw_name = "sdhci";
+ host->ops = &sdhci_pltfm_ops;
+ host->irq = platform_get_irq(pdev, 0);
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
+
+ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ if (!host->ioaddr) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "failed to remap registers\n");
+ goto err_ioremap;
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_dbg(&pdev->dev, "error adding host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ /*
+ * It is optional to use GPIOs for sdhci Power control & sdhci card
+ * interrupt detection. If sdhci->data is NULL, then use original sdhci
+ * lines otherwise GPIO lines.
+ * If GPIO is selected for power control, then power should be disabled
+ * after card removal and should be enabled when card insertion
+ * interrupt occurs
+ */
+ if (!sdhci->data)
+ return 0;
+
+ if (sdhci->data->card_power_gpio >= 0) {
+ int val = 0;
+
+ ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "gpio request fail: %d\n",
+ sdhci->data->card_power_gpio);
+ goto err_pgpio_request;
+ }
+
+ if (sdhci->data->power_always_enb)
+ val = sdhci->data->power_active_high;
+ else
+ val = !sdhci->data->power_active_high;
+
+ ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
+ if (ret) {
+ dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
+ sdhci->data->card_power_gpio);
+ goto err_pgpio_direction;
+ }
+
+ gpio_set_value(sdhci->data->card_power_gpio, 1);
+ }
+
+ if (sdhci->data->card_int_gpio >= 0) {
+ ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "gpio request fail: %d\n",
+ sdhci->data->card_int_gpio);
+ goto err_igpio_request;
+ }
+
+ ret = gpio_direction_input(sdhci->data->card_int_gpio);
+ if (ret) {
+ dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
+ sdhci->data->card_int_gpio);
+ goto err_igpio_direction;
+ }
+ ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+ sdhci_gpio_irq, IRQF_TRIGGER_LOW,
+ mmc_hostname(host->mmc), pdev);
+ if (ret) {
+ dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
+ sdhci->data->card_int_gpio);
+ goto err_igpio_request_irq;
+ }
+
+ }
+
+ return 0;
+
+err_igpio_request_irq:
+err_igpio_direction:
+ if (sdhci->data->card_int_gpio >= 0)
+ gpio_free(sdhci->data->card_int_gpio);
+err_igpio_request:
+err_pgpio_direction:
+ if (sdhci->data->card_power_gpio >= 0)
+ gpio_free(sdhci->data->card_power_gpio);
+err_pgpio_request:
+ platform_set_drvdata(pdev, NULL);
+ sdhci_remove_host(host, 1);
+err_add_host:
+ iounmap(host->ioaddr);
+err_ioremap:
+ sdhci_free_host(host);
+err_alloc_host:
+ clk_disable(sdhci->clk);
+err_clk_enb:
+ clk_put(sdhci->clk);
+err_clk_get:
+ kfree(sdhci);
+err_kzalloc:
+ release_mem_region(iomem->start, resource_size(iomem));
+err:
+ dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
+ return ret;
+}
+
+static int __devexit sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
+ int dead;
+ u32 scratch;
+
+ if (sdhci->data) {
+ if (sdhci->data->card_int_gpio >= 0) {
+ free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
+ gpio_free(sdhci->data->card_int_gpio);
+ }
+
+ if (sdhci->data->card_power_gpio >= 0)
+ gpio_free(sdhci->data->card_power_gpio);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ dead = 0;
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ if (scratch == (u32)-1)
+ dead = 1;
+
+ sdhci_remove_host(host, dead);
+ iounmap(host->ioaddr);
+ sdhci_free_host(host);
+ clk_disable(sdhci->clk);
+ clk_put(sdhci->clk);
+ kfree(sdhci);
+ if (iomem)
+ release_mem_region(iomem->start, resource_size(iomem));
+
+ return 0;
+}
+
+static struct platform_driver sdhci_driver = {
+ .driver = {
+ .name = "sdhci",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_probe,
+ .remove = __devexit_p(sdhci_remove),
+};
+
+static int __init sdhci_init(void)
+{
+ return platform_driver_register(&sdhci_driver);
+}
+module_init(sdhci_init);
+
+static void __exit sdhci_exit(void)
+{
+ platform_driver_unregister(&sdhci_driver);
+}
+module_exit(sdhci_exit);
+
+MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa685e5..c6d1bd8d4ac4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
}
- /*
- * Add a terminating entry.
- */
+ if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
+ /*
+ * Mark the last descriptor as the terminating descriptor
+ */
+ if (desc != host->adma_desc) {
+ desc -= 8;
+ desc[0] |= 0x2; /* end */
+ }
+ } else {
+ /*
+ * Add a terminating entry.
+ */
- /* nop, end, valid */
- sdhci_set_adma_desc(desc, 0, 0, 0x3);
+ /* nop, end, valid */
+ sdhci_set_adma_desc(desc, 0, 0, 0x3);
+ }
/*
* Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
sdhci_disable_card_detection(host);
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
if (ret)
return ret;
@@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host)
host->max_clk =
(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
host->max_clk *= 1000000;
- if (host->max_clk == 0) {
+ if (host->max_clk == 0 || host->quirks &
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
if (!host->ops->get_max_clock) {
printk(KERN_ERR
"%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f94284..c8468134adc9 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
- SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR)
+ SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define SDHCI_ACMD12_ERR 0x3C
@@ -236,6 +236,10 @@ struct sdhci_host {
#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
/* Controller uses SDCLK instead of TMCLK for data timeouts */
#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
+/* Controller reports wrong base clock capability */
+#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
+/* Controller cannot support End Attribute in NOP ADMA descriptor */
+#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
@@ -294,12 +298,12 @@ struct sdhci_host {
struct sdhci_ops {
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
- u32 (*readl)(struct sdhci_host *host, int reg);
- u16 (*readw)(struct sdhci_host *host, int reg);
- u8 (*readb)(struct sdhci_host *host, int reg);
- void (*writel)(struct sdhci_host *host, u32 val, int reg);
- void (*writew)(struct sdhci_host *host, u16 val, int reg);
- void (*writeb)(struct sdhci_host *host, u8 val, int reg);
+ u32 (*read_l)(struct sdhci_host *host, int reg);
+ u16 (*read_w)(struct sdhci_host *host, int reg);
+ u8 (*read_b)(struct sdhci_host *host, int reg);
+ void (*write_l)(struct sdhci_host *host, u32 val, int reg);
+ void (*write_w)(struct sdhci_host *host, u16 val, int reg);
+ void (*write_b)(struct sdhci_host *host, u8 val, int reg);
#endif
void (*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@ struct sdhci_ops {
static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
- if (unlikely(host->ops->writel))
- host->ops->writel(host, val, reg);
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
else
writel(val, host->ioaddr + reg);
}
static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
{
- if (unlikely(host->ops->writew))
- host->ops->writew(host, val, reg);
+ if (unlikely(host->ops->write_w))
+ host->ops->write_w(host, val, reg);
else
writew(val, host->ioaddr + reg);
}
static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
{
- if (unlikely(host->ops->writeb))
- host->ops->writeb(host, val, reg);
+ if (unlikely(host->ops->write_b))
+ host->ops->write_b(host, val, reg);
else
writeb(val, host->ioaddr + reg);
}
static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
{
- if (unlikely(host->ops->readl))
- return host->ops->readl(host, reg);
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
else
return readl(host->ioaddr + reg);
}
static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
{
- if (unlikely(host->ops->readw))
- return host->ops->readw(host, reg);
+ if (unlikely(host->ops->read_w))
+ return host->ops->read_w(host, reg);
else
return readw(host->ioaddr + reg);
}
static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
{
- if (unlikely(host->ops->readb))
- return host->ops->readb(host, reg);
+ if (unlikely(host->ops->read_b))
+ return host->ops->read_b(host, reg);
else
return readb(host->ioaddr + reg);
}
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c3ac07..e7507af3856e 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
{
struct mmc_host *mmc = link->priv;
dev_dbg(&link->dev, "suspend\n");
- mmc_suspend_host(mmc, PMSG_SUSPEND);
+ mmc_suspend_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 000000000000..eb97830c0344
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
+/*
+ * MMCIF eMMC driver.
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Yusuke Goda <yusuke.goda.sx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ *
+ * TODO
+ * 1. DMA
+ * 2. Power management
+ * 3. Handle MMC errors better
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/sh_mmcif.h>
+
+#define DRIVER_NAME "sh_mmcif"
+#define DRIVER_VERSION "2010-04-28"
+
+#define MMCIF_CE_CMD_SET 0x00000000
+#define MMCIF_CE_ARG 0x00000008
+#define MMCIF_CE_ARG_CMD12 0x0000000C
+#define MMCIF_CE_CMD_CTRL 0x00000010
+#define MMCIF_CE_BLOCK_SET 0x00000014
+#define MMCIF_CE_CLK_CTRL 0x00000018
+#define MMCIF_CE_BUF_ACC 0x0000001C
+#define MMCIF_CE_RESP3 0x00000020
+#define MMCIF_CE_RESP2 0x00000024
+#define MMCIF_CE_RESP1 0x00000028
+#define MMCIF_CE_RESP0 0x0000002C
+#define MMCIF_CE_RESP_CMD12 0x00000030
+#define MMCIF_CE_DATA 0x00000034
+#define MMCIF_CE_INT 0x00000040
+#define MMCIF_CE_INT_MASK 0x00000044
+#define MMCIF_CE_HOST_STS1 0x00000048
+#define MMCIF_CE_HOST_STS2 0x0000004C
+#define MMCIF_CE_VERSION 0x0000007C
+
+/* CE_CMD_SET */
+#define CMD_MASK 0x3f000000
+#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
+#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
+#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
+#define CMD_SET_RBSY (1 << 21) /* R1b */
+#define CMD_SET_CCSEN (1 << 20)
+#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
+#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
+#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
+#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
+#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
+#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
+#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
+#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
+#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
+#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
+#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
+#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
+#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
+#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
+#define CMD_SET_CCSH (1 << 5)
+#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
+#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
+#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
+
+/* CE_CMD_CTRL */
+#define CMD_CTRL_BREAK (1 << 0)
+
+/* CE_BLOCK_SET */
+#define BLOCK_SIZE_MASK 0x0000ffff
+
+/* CE_CLK_CTRL */
+#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
+#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
+#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
+ (1 << 9) | (1 << 8)) /* resp busy timeout */
+#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
+ (1 << 5) | (1 << 4)) /* read/write timeout */
+#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
+ (1 << 1) | (1 << 0)) /* ccs timeout */
+
+/* CE_BUF_ACC */
+#define BUF_ACC_DMAWEN (1 << 25)
+#define BUF_ACC_DMAREN (1 << 24)
+#define BUF_ACC_BUSW_32 (0 << 17)
+#define BUF_ACC_BUSW_16 (1 << 17)
+#define BUF_ACC_ATYP (1 << 16)
+
+/* CE_INT */
+#define INT_CCSDE (1 << 29)
+#define INT_CMD12DRE (1 << 26)
+#define INT_CMD12RBE (1 << 25)
+#define INT_CMD12CRE (1 << 24)
+#define INT_DTRANE (1 << 23)
+#define INT_BUFRE (1 << 22)
+#define INT_BUFWEN (1 << 21)
+#define INT_BUFREN (1 << 20)
+#define INT_CCSRCV (1 << 19)
+#define INT_RBSYE (1 << 17)
+#define INT_CRSPE (1 << 16)
+#define INT_CMDVIO (1 << 15)
+#define INT_BUFVIO (1 << 14)
+#define INT_WDATERR (1 << 11)
+#define INT_RDATERR (1 << 10)
+#define INT_RIDXERR (1 << 9)
+#define INT_RSPERR (1 << 8)
+#define INT_CCSTO (1 << 5)
+#define INT_CRCSTO (1 << 4)
+#define INT_WDATTO (1 << 3)
+#define INT_RDATTO (1 << 2)
+#define INT_RBSYTO (1 << 1)
+#define INT_RSPTO (1 << 0)
+#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
+ INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
+ INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
+ INT_RDATTO | INT_RBSYTO | INT_RSPTO)
+
+/* CE_INT_MASK */
+#define MASK_ALL 0x00000000
+#define MASK_MCCSDE (1 << 29)
+#define MASK_MCMD12DRE (1 << 26)
+#define MASK_MCMD12RBE (1 << 25)
+#define MASK_MCMD12CRE (1 << 24)
+#define MASK_MDTRANE (1 << 23)
+#define MASK_MBUFRE (1 << 22)
+#define MASK_MBUFWEN (1 << 21)
+#define MASK_MBUFREN (1 << 20)
+#define MASK_MCCSRCV (1 << 19)
+#define MASK_MRBSYE (1 << 17)
+#define MASK_MCRSPE (1 << 16)
+#define MASK_MCMDVIO (1 << 15)
+#define MASK_MBUFVIO (1 << 14)
+#define MASK_MWDATERR (1 << 11)
+#define MASK_MRDATERR (1 << 10)
+#define MASK_MRIDXERR (1 << 9)
+#define MASK_MRSPERR (1 << 8)
+#define MASK_MCCSTO (1 << 5)
+#define MASK_MCRCSTO (1 << 4)
+#define MASK_MWDATTO (1 << 3)
+#define MASK_MRDATTO (1 << 2)
+#define MASK_MRBSYTO (1 << 1)
+#define MASK_MRSPTO (1 << 0)
+
+/* CE_HOST_STS1 */
+#define STS1_CMDSEQ (1 << 31)
+
+/* CE_HOST_STS2 */
+#define STS2_CRCSTE (1 << 31)
+#define STS2_CRC16E (1 << 30)
+#define STS2_AC12CRCE (1 << 29)
+#define STS2_RSPCRC7E (1 << 28)
+#define STS2_CRCSTEBE (1 << 27)
+#define STS2_RDATEBE (1 << 26)
+#define STS2_AC12REBE (1 << 25)
+#define STS2_RSPEBE (1 << 24)
+#define STS2_AC12IDXE (1 << 23)
+#define STS2_RSPIDXE (1 << 22)
+#define STS2_CCSTO (1 << 15)
+#define STS2_RDATTO (1 << 14)
+#define STS2_DATBSYTO (1 << 13)
+#define STS2_CRCSTTO (1 << 12)
+#define STS2_AC12BSYTO (1 << 11)
+#define STS2_RSPBSYTO (1 << 10)
+#define STS2_AC12RSPTO (1 << 9)
+#define STS2_RSPTO (1 << 8)
+#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
+ STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
+#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
+ STS2_DATBSYTO | STS2_CRCSTTO | \
+ STS2_AC12BSYTO | STS2_RSPBSYTO | \
+ STS2_AC12RSPTO | STS2_RSPTO)
+
+/* CE_VERSION */
+#define SOFT_RST_ON (1 << 31)
+#define SOFT_RST_OFF (0 << 31)
+
+#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
+#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
+#define CLKDEV_INIT 400000 /* 400 KHz */
+
+struct sh_mmcif_host {
+ struct mmc_host *mmc;
+ struct mmc_data *data;
+ struct mmc_command *cmd;
+ struct platform_device *pd;
+ struct clk *hclk;
+ unsigned int clk;
+ int bus_width;
+ u16 wait_int;
+ u16 sd_error;
+ long timeout;
+ void __iomem *addr;
+ wait_queue_head_t intr_wait;
+};
+
+static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
+{
+ return readl(host->addr + reg);
+}
+
+static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(val, host->addr + reg);
+}
+
+static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
+}
+
+static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
+ unsigned int reg, u32 val)
+{
+ writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
+}
+
+
+static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
+{
+ struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+
+ sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+ sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
+
+ if (!clk)
+ return;
+ if (p->sup_pclk && clk == host->clk)
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
+ else
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
+ (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
+
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
+}
+
+static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
+{
+ u32 tmp;
+
+ tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
+
+ sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
+ sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
+ sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
+ SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+ /* byte swap on */
+ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
+}
+
+static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
+{
+ u32 state1, state2;
+ int ret, timeout = 10000000;
+
+ host->sd_error = 0;
+ host->wait_int = 0;
+
+ state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
+ state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
+ pr_debug("%s: ERR HOST_STS1 = %08x\n", \
+ DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
+ pr_debug("%s: ERR HOST_STS2 = %08x\n", \
+ DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
+
+ if (state1 & STS1_CMDSEQ) {
+ sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
+ sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
+ while (1) {
+ timeout--;
+ if (timeout < 0) {
+ pr_err(DRIVER_NAME": Forceed end of " \
+ "command sequence timeout err\n");
+ return -EIO;
+ }
+ if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
+ & STS1_CMDSEQ))
+ break;
+ mdelay(1);
+ }
+ sh_mmcif_sync_reset(host);
+ pr_debug(DRIVER_NAME": Forced end of command sequence\n");
+ return -EIO;
+ }
+
+ if (state2 & STS2_CRC_ERR) {
+ pr_debug(DRIVER_NAME": Happened CRC error\n");
+ ret = -EIO;
+ } else if (state2 & STS2_TIMEOUT_ERR) {
+ pr_debug(DRIVER_NAME": Happened Timeout error\n");
+ ret = -ETIMEDOUT;
+ } else {
+ pr_debug(DRIVER_NAME": Happened End/Index error\n");
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static int sh_mmcif_single_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 blocksize, i, *p = sg_virt(data->sg);
+
+ host->wait_int = 0;
+
+ /* buf read enable */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ blocksize = (BLOCK_SIZE_MASK &
+ sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
+ for (i = 0; i < blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
+
+ /* buffer read end */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ return 0;
+}
+
+static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 blocksize, i, j, sec, *p;
+
+ blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
+ for (j = 0; j < data->sg_len; j++) {
+ p = sg_virt(data->sg);
+ host->wait_int = 0;
+ for (sec = 0; sec < data->sg->length / blocksize; sec++) {
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ /* buf read enable */
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+
+ if (host->wait_int != 1 &&
+ (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ for (i = 0; i < blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
+ }
+ if (j < data->sg_len - 1)
+ data->sg++;
+ }
+ return 0;
+}
+
+static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 blocksize, i, *p = sg_virt(data->sg);
+
+ host->wait_int = 0;
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+ /* buf write enable */
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ blocksize = (BLOCK_SIZE_MASK &
+ sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
+ for (i = 0; i < blocksize / 4; i++)
+ sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
+
+ /* buffer write end */
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ return 0;
+}
+
+static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ long time;
+ u32 i, sec, j, blocksize, *p;
+
+ blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
+
+ for (j = 0; j < data->sg_len; j++) {
+ p = sg_virt(data->sg);
+ host->wait_int = 0;
+ for (sec = 0; sec < data->sg->length / blocksize; sec++) {
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+ /* buf write enable*/
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+
+ if (host->wait_int != 1 &&
+ (time == 0 || host->sd_error != 0))
+ return sh_mmcif_error_manage(host);
+
+ host->wait_int = 0;
+ for (i = 0; i < blocksize / 4; i++)
+ sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
+ }
+ if (j < data->sg_len - 1)
+ data->sg++;
+ }
+ return 0;
+}
+
+static void sh_mmcif_get_response(struct sh_mmcif_host *host,
+ struct mmc_command *cmd)
+{
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
+ cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
+ cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
+ cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
+ } else
+ cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
+}
+
+static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
+ struct mmc_command *cmd)
+{
+ cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
+}
+
+static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
+{
+ u32 tmp = 0;
+
+ /* Response Type check */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ tmp |= CMD_SET_RTYP_NO;
+ break;
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ tmp |= CMD_SET_RTYP_6B;
+ break;
+ case MMC_RSP_R2:
+ tmp |= CMD_SET_RTYP_17B;
+ break;
+ default:
+ pr_err(DRIVER_NAME": Not support type response.\n");
+ break;
+ }
+ switch (opc) {
+ /* RBSY */
+ case MMC_SWITCH:
+ case MMC_STOP_TRANSMISSION:
+ case MMC_SET_WRITE_PROT:
+ case MMC_CLR_WRITE_PROT:
+ case MMC_ERASE:
+ case MMC_GEN_CMD:
+ tmp |= CMD_SET_RBSY;
+ break;
+ }
+ /* WDAT / DATW */
+ if (host->data) {
+ tmp |= CMD_SET_WDAT;
+ switch (host->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ tmp |= CMD_SET_DATW_1;
+ break;
+ case MMC_BUS_WIDTH_4:
+ tmp |= CMD_SET_DATW_4;
+ break;
+ case MMC_BUS_WIDTH_8:
+ tmp |= CMD_SET_DATW_8;
+ break;
+ default:
+ pr_err(DRIVER_NAME": Not support bus width.\n");
+ break;
+ }
+ }
+ /* DWEN */
+ if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
+ tmp |= CMD_SET_DWEN;
+ /* CMLTE/CMD12EN */
+ if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
+ tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
+ sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
+ mrq->data->blocks << 16);
+ }
+ /* RIDXC[1:0] check bits */
+ if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
+ opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+ tmp |= CMD_SET_RIDXC_BITS;
+ /* RCRC7C[1:0] check bits */
+ if (opc == MMC_SEND_OP_COND)
+ tmp |= CMD_SET_CRC7C_BITS;
+ /* RCRC7C[1:0] internal CRC7 */
+ if (opc == MMC_ALL_SEND_CID ||
+ opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
+ tmp |= CMD_SET_CRC7C_INTERNAL;
+
+ return opc = ((opc << 24) | tmp);
+}
+
+static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, u32 opc)
+{
+ u32 ret;
+
+ switch (opc) {
+ case MMC_READ_MULTIPLE_BLOCK:
+ ret = sh_mmcif_multi_read(host, mrq);
+ break;
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ ret = sh_mmcif_multi_write(host, mrq);
+ break;
+ case MMC_WRITE_BLOCK:
+ ret = sh_mmcif_single_write(host, mrq);
+ break;
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_SEND_EXT_CSD:
+ ret = sh_mmcif_single_read(host, mrq);
+ break;
+ default:
+ pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ long time;
+ int ret = 0, mask = 0;
+ u32 opc = cmd->opcode;
+
+ host->cmd = cmd;
+
+ switch (opc) {
+ /* respons busy check */
+ case MMC_SWITCH:
+ case MMC_STOP_TRANSMISSION:
+ case MMC_SET_WRITE_PROT:
+ case MMC_CLR_WRITE_PROT:
+ case MMC_ERASE:
+ case MMC_GEN_CMD:
+ mask = MASK_MRBSYE;
+ break;
+ default:
+ mask = MASK_MCRSPE;
+ break;
+ }
+ mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
+ MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
+ MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
+ MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
+
+ if (host->data) {
+ sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
+ sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
+ }
+ opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
+
+ sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
+ sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
+ /* set arg */
+ sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
+ host->wait_int = 0;
+ /* set cmd */
+ sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
+
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 || host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && time == 0) {
+ cmd->error = sh_mmcif_error_manage(host);
+ return;
+ }
+ if (host->sd_error) {
+ switch (cmd->opcode) {
+ case MMC_ALL_SEND_CID:
+ case MMC_SELECT_CARD:
+ case MMC_APP_CMD:
+ cmd->error = -ETIMEDOUT;
+ break;
+ default:
+ pr_debug("%s: Cmd(d'%d) err\n",
+ DRIVER_NAME, cmd->opcode);
+ cmd->error = sh_mmcif_error_manage(host);
+ break;
+ }
+ host->sd_error = 0;
+ host->wait_int = 0;
+ return;
+ }
+ if (!(cmd->flags & MMC_RSP_PRESENT)) {
+ cmd->error = ret;
+ host->wait_int = 0;
+ return;
+ }
+ if (host->wait_int == 1) {
+ sh_mmcif_get_response(host, cmd);
+ host->wait_int = 0;
+ }
+ if (host->data) {
+ ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
+ if (ret < 0)
+ mrq->data->bytes_xfered = 0;
+ else
+ mrq->data->bytes_xfered =
+ mrq->data->blocks * mrq->data->blksz;
+ }
+ cmd->error = ret;
+}
+
+static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
+ struct mmc_request *mrq, struct mmc_command *cmd)
+{
+ long time;
+
+ if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
+ else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
+ else {
+ pr_err(DRIVER_NAME": not support stop cmd\n");
+ cmd->error = sh_mmcif_error_manage(host);
+ return;
+ }
+
+ time = wait_event_interruptible_timeout(host->intr_wait,
+ host->wait_int == 1 ||
+ host->sd_error == 1, host->timeout);
+ if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
+ cmd->error = sh_mmcif_error_manage(host);
+ return;
+ }
+ sh_mmcif_get_cmd12response(host, cmd);
+ host->wait_int = 0;
+ cmd->error = 0;
+}
+
+static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+
+ switch (mrq->cmd->opcode) {
+ /* MMCIF does not support SD/SDIO command */
+ case SD_IO_SEND_OP_COND:
+ case MMC_APP_CMD:
+ mrq->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, mrq);
+ return;
+ case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
+ if (!mrq->data) {
+ /* send_if_cond cmd (not support) */
+ mrq->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ host->data = mrq->data;
+ sh_mmcif_start_cmd(host, mrq, mrq->cmd);
+ host->data = NULL;
+
+ if (mrq->cmd->error != 0) {
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ if (mrq->stop)
+ sh_mmcif_stop_cmd(host, mrq, mrq->stop);
+ mmc_request_done(mmc, mrq);
+}
+
+static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sh_mmcif_host *host = mmc_priv(mmc);
+ struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+
+ if (ios->power_mode == MMC_POWER_OFF) {
+ /* clock stop */
+ sh_mmcif_clock_control(host, 0);
+ if (p->down_pwr)
+ p->down_pwr(host->pd);
+ return;
+ } else if (ios->power_mode == MMC_POWER_UP) {
+ if (p->set_pwr)
+ p->set_pwr(host->pd, ios->power_mode);
+ }
+
+ if (ios->clock)
+ sh_mmcif_clock_control(host, ios->clock);
+
+ host->bus_width = ios->bus_width;
+}
+
+static struct mmc_host_ops sh_mmcif_ops = {
+ .request = sh_mmcif_request,
+ .set_ios = sh_mmcif_set_ios,
+};
+
+static void sh_mmcif_detect(struct mmc_host *mmc)
+{
+ mmc_detect_change(mmc, 0);
+}
+
+static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+{
+ struct sh_mmcif_host *host = dev_id;
+ u32 state = 0;
+ int err = 0;
+
+ state = sh_mmcif_readl(host, MMCIF_CE_INT);
+
+ if (state & INT_RBSYE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
+ } else if (state & INT_CRSPE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
+ } else if (state & INT_BUFREN) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ } else if (state & INT_BUFWEN) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+ } else if (state & INT_CMD12DRE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT,
+ ~(INT_CMD12DRE | INT_CMD12RBE |
+ INT_CMD12CRE | INT_BUFRE));
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
+ } else if (state & INT_BUFRE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
+ } else if (state & INT_DTRANE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+ } else if (state & INT_CMD12RBE) {
+ sh_mmcif_writel(host, MMCIF_CE_INT,
+ ~(INT_CMD12RBE | INT_CMD12CRE));
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
+ } else if (state & INT_ERR_STS) {
+ /* err interrupts */
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+ err = 1;
+ } else {
+ pr_debug("%s: Not support int\n", DRIVER_NAME);
+ sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+ err = 1;
+ }
+ if (err) {
+ host->sd_error = 1;
+ pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
+ }
+ host->wait_int = 1;
+ wake_up(&host->intr_wait);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit sh_mmcif_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq[2];
+ struct mmc_host *mmc;
+ struct sh_mmcif_host *host = NULL;
+ struct sh_mmcif_plat_data *pd = NULL;
+ struct resource *res;
+ void __iomem *reg;
+ char clk_name[8];
+
+ irq[0] = platform_get_irq(pdev, 0);
+ irq[1] = platform_get_irq(pdev, 1);
+ if (irq[0] < 0 || irq[1] < 0) {
+ pr_err(DRIVER_NAME": Get irq error\n");
+ return -ENXIO;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "platform_get_resource error.\n");
+ return -ENXIO;
+ }
+ reg = ioremap(res->start, resource_size(res));
+ if (!reg) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ return -ENOMEM;
+ }
+ pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
+ if (!pd) {
+ dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
+ ret = -ENXIO;
+ goto clean_up;
+ }
+ mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto clean_up;
+ }
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->addr = reg;
+ host->timeout = 1000;
+
+ snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
+ host->hclk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(host->hclk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ ret = PTR_ERR(host->hclk);
+ goto clean_up1;
+ }
+ clk_enable(host->hclk);
+ host->clk = clk_get_rate(host->hclk);
+ host->pd = pdev;
+
+ init_waitqueue_head(&host->intr_wait);
+
+ mmc->ops = &sh_mmcif_ops;
+ mmc->f_max = host->clk;
+ /* close to 400KHz */
+ if (mmc->f_max < 51200000)
+ mmc->f_min = mmc->f_max / 128;
+ else if (mmc->f_max < 102400000)
+ mmc->f_min = mmc->f_max / 256;
+ else
+ mmc->f_min = mmc->f_max / 512;
+ if (pd->ocr)
+ mmc->ocr_avail = pd->ocr;
+ mmc->caps = MMC_CAP_MMC_HIGHSPEED;
+ if (pd->caps)
+ mmc->caps |= pd->caps;
+ mmc->max_phys_segs = 128;
+ mmc->max_hw_segs = 128;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = 65535;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ sh_mmcif_sync_reset(host);
+ platform_set_drvdata(pdev, host);
+ mmc_add_host(mmc);
+
+ ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+ if (ret) {
+ pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
+ goto clean_up2;
+ }
+ ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+ if (ret) {
+ free_irq(irq[0], host);
+ pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
+ goto clean_up2;
+ }
+
+ sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
+ sh_mmcif_detect(host->mmc);
+
+ pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
+ pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
+ sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
+ return ret;
+
+clean_up2:
+ clk_disable(host->hclk);
+clean_up1:
+ mmc_free_host(mmc);
+clean_up:
+ if (reg)
+ iounmap(reg);
+ return ret;
+}
+
+static int __devexit sh_mmcif_remove(struct platform_device *pdev)
+{
+ struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+ int irq[2];
+
+ sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
+
+ irq[0] = platform_get_irq(pdev, 0);
+ irq[1] = platform_get_irq(pdev, 1);
+
+ if (host->addr)
+ iounmap(host->addr);
+
+ platform_set_drvdata(pdev, NULL);
+ mmc_remove_host(host->mmc);
+
+ free_irq(irq[0], host);
+ free_irq(irq[1], host);
+
+ clk_disable(host->hclk);
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static struct platform_driver sh_mmcif_driver = {
+ .probe = sh_mmcif_probe,
+ .remove = sh_mmcif_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_mmcif_init(void)
+{
+ return platform_driver_register(&sh_mmcif_driver);
+}
+
+static void __exit sh_mmcif_exit(void)
+{
+ platform_driver_unregister(&sh_mmcif_driver);
+}
+
+module_init(sh_mmcif_init);
+module_exit(sh_mmcif_exit);
+
+
+MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554ddec6b3..cec99958b652 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
{
- return mmc_suspend_host(tifm_get_drvdata(sock), state);
+ return mmc_suspend_host(tifm_get_drvdata(sock));
}
static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b2b577f6afd4..ee7d0a5a51c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -29,6 +29,7 @@
#include <linux/irq.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
#include <linux/mmc/host.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
@@ -131,8 +132,8 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
host->cmd = cmd;
-/* FIXME - this seems to be ok comented out but the spec suggest this bit should
- * be set when issuing app commands.
+/* FIXME - this seems to be ok commented out but the spec suggest this bit
+ * should be set when issuing app commands.
* if(cmd->flags & MMC_FLAG_ACMD)
* c |= APP_CMD;
*/
@@ -155,12 +156,12 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
return 0;
}
-/* This chip always returns (at least?) as much data as you ask for.
+/*
+ * This chip always returns (at least?) as much data as you ask for.
* I'm unsure what happens if you ask for less than a block. This should be
* looked into to ensure that a funny length read doesnt hose the controller.
- *
*/
-static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
+static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
unsigned short *buf;
@@ -180,7 +181,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
count = data->blksz;
pr_debug("count: %08x offset: %08x flags %08x\n",
- count, host->sg_off, data->flags);
+ count, host->sg_off, data->flags);
/* Transfer the data */
if (data->flags & MMC_DATA_READ)
@@ -198,7 +199,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
-static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
struct mmc_command *stop;
@@ -206,7 +207,7 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
host->data = NULL;
if (!data) {
- pr_debug("Spurious data end IRQ\n");
+ dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
return;
}
stop = data->stop;
@@ -219,7 +220,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
pr_debug("Completed data request\n");
- /*FIXME - other drivers allow an optional stop command of any given type
+ /*
+ * FIXME: other drivers allow an optional stop command of any given type
* which we dont do, as the chip can auto generate them.
* Perhaps we can be smarter about when to use auto CMD12 and
* only issue the auto request when we know this is the desired
@@ -227,10 +229,17 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
* upper layers expect. For now, we do what works.
*/
- if (data->flags & MMC_DATA_READ)
- disable_mmc_irqs(host, TMIO_MASK_READOP);
- else
- disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ if (data->flags & MMC_DATA_READ) {
+ if (!host->chan_rx)
+ disable_mmc_irqs(host, TMIO_MASK_READOP);
+ dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
+ host->mrq);
+ } else {
+ if (!host->chan_tx)
+ disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
+ host->mrq);
+ }
if (stop) {
if (stop->opcode == 12 && !stop->arg)
@@ -242,7 +251,35 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
tmio_mmc_finish_request(host);
}
-static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
+static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data = host->data;
+
+ if (!data)
+ return;
+
+ if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
+ /*
+ * Has all data been written out yet? Testing on SuperH showed,
+ * that in most cases the first interrupt comes already with the
+ * BUSY status bit clear, but on some operations, like mount or
+ * in the beginning of a write / sync / umount, there is one
+ * DATAEND interrupt with the BUSY bit set, in this cases
+ * waiting for one more interrupt fixes the problem.
+ */
+ if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
+ disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ }
+ } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
+ disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ } else {
+ tmio_mmc_do_data_irq(host);
+ }
+}
+
+static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
@@ -282,10 +319,16 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
* If theres no data or we encountered an error, finish now.
*/
if (host->data && !cmd->error) {
- if (host->data->flags & MMC_DATA_READ)
- enable_mmc_irqs(host, TMIO_MASK_READOP);
- else
- enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ if (host->data->flags & MMC_DATA_READ) {
+ if (!host->chan_rx)
+ enable_mmc_irqs(host, TMIO_MASK_READOP);
+ } else {
+ struct dma_chan *chan = host->chan_tx;
+ if (!chan)
+ enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ else
+ tasklet_schedule(&host->dma_issue);
+ }
} else {
tmio_mmc_finish_request(host);
}
@@ -293,7 +336,6 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
return;
}
-
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
@@ -311,7 +353,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (!ireg) {
disable_mmc_irqs(host, status & ~irq_mask);
- pr_debug("tmio_mmc: Spurious irq, disabling! "
+ pr_warning("tmio_mmc: Spurious irq, disabling! "
"0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
pr_debug_status(status);
@@ -363,16 +405,265 @@ out:
return IRQ_HANDLED;
}
+#ifdef CONFIG_TMIO_MMC_DMA
+static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
+ /* Switch DMA mode on or off - SuperH specific? */
+ sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
+#endif
+}
+
+static void tmio_dma_complete(void *arg)
+{
+ struct tmio_mmc_host *host = arg;
+
+ dev_dbg(&host->pdev->dev, "Command completed\n");
+
+ if (!host->data)
+ dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
+ else
+ enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+}
+
+static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_rx;
+ int ret;
+
+ ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+ if (ret > 0) {
+ host->dma_sglen = ret;
+ desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ host->desc = desc;
+ desc->callback = tmio_dma_complete;
+ desc->callback_param = host;
+ host->cookie = desc->tx_submit(desc);
+ if (host->cookie < 0) {
+ host->desc = NULL;
+ ret = host->cookie;
+ } else {
+ chan->device->device_issue_pending(chan);
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, host->cookie, host->mrq);
+
+ if (!host->desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ /* Free the Tx channel too */
+ chan = host->chan_tx;
+ if (chan) {
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ tmio_mmc_enable_dma(host, false);
+ reset(host);
+ /* Fail this request, let above layers recover */
+ host->mrq->cmd->error = ret;
+ tmio_mmc_finish_request(host);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+ desc, host->cookie, host->sg_len);
+
+ return ret > 0 ? 0 : ret;
+}
+
+static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_tx;
+ int ret;
+
+ ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
+ if (ret > 0) {
+ host->dma_sglen = ret;
+ desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ }
+
+ if (desc) {
+ host->desc = desc;
+ desc->callback = tmio_dma_complete;
+ desc->callback_param = host;
+ host->cookie = desc->tx_submit(desc);
+ if (host->cookie < 0) {
+ host->desc = NULL;
+ ret = host->cookie;
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, host->cookie, host->mrq);
+
+ if (!host->desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ /* Free the Rx channel too */
+ chan = host->chan_rx;
+ if (chan) {
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ tmio_mmc_enable_dma(host, false);
+ reset(host);
+ /* Fail this request, let above layers recover */
+ host->mrq->cmd->error = ret;
+ tmio_mmc_finish_request(host);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
+ desc, host->cookie);
+
+ return ret > 0 ? 0 : ret;
+}
+
+static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx)
+ return tmio_mmc_start_dma_rx(host);
+ } else {
+ if (host->chan_tx)
+ return tmio_mmc_start_dma_tx(host);
+ }
+
+ return 0;
+}
+
+static void tmio_issue_tasklet_fn(unsigned long priv)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
+ struct dma_chan *chan = host->chan_tx;
+
+ chan->device->device_issue_pending(chan);
+}
+
+static void tmio_tasklet_fn(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+
+ if (host->data->flags & MMC_DATA_READ)
+ dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
+ DMA_TO_DEVICE);
+
+ tmio_mmc_do_data_irq(host);
+}
+
+/* It might be necessary to make filter MFD specific */
+static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
+{
+ dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
+ chan->private = arg;
+ return true;
+}
+
+static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ host->cookie = -EINVAL;
+ host->desc = NULL;
+
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ if (pdata->dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
+ pdata->dma->chan_priv_tx);
+ dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
+ host->chan_tx);
+
+ if (!host->chan_tx)
+ return;
+
+ host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
+ pdata->dma->chan_priv_rx);
+ dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
+ host->chan_rx);
+
+ if (!host->chan_rx) {
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+ return;
+ }
+
+ tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
+ tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
+
+ tmio_mmc_enable_dma(host, true);
+ }
+}
+
+static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+ if (host->chan_tx) {
+ struct dma_chan *chan = host->chan_tx;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->chan_rx) {
+ struct dma_chan *chan = host->chan_rx;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+
+ host->cookie = -EINVAL;
+ host->desc = NULL;
+}
+#else
+static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ return 0;
+}
+
+static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ host->chan_tx = NULL;
+ host->chan_rx = NULL;
+}
+
+static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+}
+#endif
+
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
- data->blksz, data->blocks);
+ data->blksz, data->blocks);
/* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
- printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n",
- mmc_hostname(host->mmc), data->blksz);
+ pr_err("%s: %d byte block unsupported in 4 bit mode\n",
+ mmc_hostname(host->mmc), data->blksz);
return -EINVAL;
}
@@ -383,7 +674,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
- return 0;
+ return tmio_mmc_start_dma(host, data);
}
/* Process requests from the MMC layer */
@@ -404,7 +695,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
ret = tmio_mmc_start_command(host, mrq->cmd);
-
if (!ret)
return;
@@ -458,11 +748,14 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mfd_cell *cell = host->pdev->dev.platform_data;
+ struct tmio_mmc_data *pdata = cell->driver_data;
- return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
+ return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
}
-static struct mmc_host_ops tmio_mmc_ops = {
+static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -475,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
struct mmc_host *mmc = platform_get_drvdata(dev);
int ret;
- ret = mmc_suspend_host(mmc, state);
+ ret = mmc_suspend_host(mmc);
/* Tell MFD core it can disable us now.*/
if (!ret && cell->disable)
@@ -515,6 +808,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
struct tmio_mmc_host *host;
struct mmc_host *mmc;
int ret = -EINVAL;
+ u32 irq_mask = TMIO_MASK_CMD;
if (dev->num_resources != 2)
goto out;
@@ -553,7 +847,10 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
mmc->caps |= pdata->capabilities;
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ if (pdata->ocr_mask)
+ mmc->ocr_avail = pdata->ocr_mask;
+ else
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Tell the MFD core we are ready to be enabled */
if (cell->enable) {
@@ -578,13 +875,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (ret)
goto cell_disable;
+ /* See if we also get DMA */
+ tmio_mmc_request_dma(host, pdata);
+
mmc_add_host(mmc);
- printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
- (unsigned long)host->ctl, host->irq);
+ pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
+ (unsigned long)host->ctl, host->irq);
/* Unmask the IRQs we want to know about */
- enable_mmc_irqs(host, TMIO_MASK_IRQ);
+ if (!host->chan_rx)
+ irq_mask |= TMIO_MASK_READOP;
+ if (!host->chan_tx)
+ irq_mask |= TMIO_MASK_WRITEOP;
+ enable_mmc_irqs(host, irq_mask);
return 0;
@@ -609,6 +913,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
if (mmc) {
struct tmio_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
+ tmio_mmc_release_dma(host);
free_irq(host->irq, host);
if (cell->disable)
cell->disable(dev);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index dafecfbcd91a..64f7d5dfc106 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -10,6 +10,8 @@
*/
#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
@@ -106,6 +108,17 @@ struct tmio_mmc_host {
unsigned int sg_off;
struct platform_device *pdev;
+
+ /* DMA support */
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct tasklet_struct dma_complete;
+ struct tasklet_struct dma_issue;
+#ifdef CONFIG_TMIO_MMC_DMA
+ struct dma_async_tx_descriptor *desc;
+ unsigned int dma_sglen;
+ dma_cookie_t cookie;
+#endif
};
#include <linux/io.h>
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a94376..19f2d72dbca5 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
via_save_pcictrlreg(host);
via_save_sdcreg(host);
- ret = mmc_suspend_host(host->mmc, state);
+ ret = mmc_suspend_host(host->mmc);
pci_save_state(pcidev);
pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01eece8..0012f5d13d28 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
{
BUG_ON(host == NULL);
- return mmc_suspend_host(host->mmc, state);
+ return mmc_suspend_host(host->mmc);
}
static int wbsd_resume(struct wbsd_host *host)
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index ecf90f5c97c2..f8210bf2d241 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,6 +304,19 @@ config SSFDC
This enables read only access to SmartMedia formatted NAND
flash. You can mount it with FAT file system.
+
+config SM_FTL
+ tristate "SmartMedia/xD new translation layer"
+ depends on EXPERIMENTAL && BLOCK
+ select MTD_BLKDEVS
+ select MTD_NAND_ECC
+ help
+ This enables new and very EXPERMENTAL support for SmartMedia/xD
+ FTL (Flash translation layer).
+ Write support isn't yet well tested, therefore this code IS likely to
+ eat your card, so please don't use it together with valuable data.
+ Use readonly driver (CONFIG_SSFDC) instead.
+
config MTD_OOPS
tristate "Log panic/oops to an MTD buffer"
depends on MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 4521b1ecce45..760abc533395 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_NFTL) += nftl.o
obj-$(CONFIG_INFTL) += inftl.o
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
obj-$(CONFIG_SSFDC) += ssfdc.o
+obj-$(CONFIG_SM_FTL) += sm_ftl.o
obj-$(CONFIG_MTD_OOPS) += mtdoops.o
nftl-objs := nftlcore.o nftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 5fbf29e1e64f..62f3ea9de848 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -615,10 +615,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
return NULL;
}
@@ -727,8 +725,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -774,9 +771,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
@@ -823,9 +820,9 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -852,10 +849,10 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -901,20 +898,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -923,10 +920,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
@@ -936,10 +933,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -969,12 +966,12 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -1144,7 +1141,7 @@ static int __xipram xip_wait_for_operation(
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -1154,15 +1151,15 @@ static int __xipram xip_wait_for_operation(
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -1218,10 +1215,10 @@ static int inval_cache_and_wait_for_operation(
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
@@ -1241,7 +1238,7 @@ static int inval_cache_and_wait_for_operation(
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -1256,17 +1253,17 @@ static int inval_cache_and_wait_for_operation(
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occured while sleep: reset timeout */
@@ -1302,7 +1299,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
@@ -1313,7 +1310,7 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a
chip->state = FL_POINT;
chip->ref_point_counter++;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1398,7 +1395,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
@@ -1407,7 +1404,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -1426,10 +1423,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1443,7 +1440,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1506,10 +1503,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
return -EINVAL;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1555,7 +1552,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1664,10 +1661,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1798,7 +1795,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, cmd_adr);
out: put_chip(map, chip, cmd_adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1877,10 +1874,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1936,7 +1933,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
@@ -1948,7 +1945,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1981,7 +1978,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
@@ -1992,7 +1989,7 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2000,14 +1997,14 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2053,10 +2050,10 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2090,7 +2087,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2155,10 +2152,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2177,7 +2174,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -2452,7 +2449,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
@@ -2484,7 +2481,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
case FL_PM_SUSPENDED:
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2493,7 +2490,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -2503,7 +2500,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2544,7 +2541,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -2553,7 +2550,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
@@ -2573,14 +2570,14 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
return 0;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f3600e8d5382..d81079ef91a5 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/reboot.h>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
@@ -43,10 +44,6 @@
#define MAX_WORD_RETRIES 3
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001F
-#define MANUFACTURER_MACRONIX 0x00C2
-#define MANUFACTURER_SST 0x00BF
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -60,6 +57,7 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
+static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static void cfi_amdstd_destroy(struct mtd_info *);
@@ -168,7 +166,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
* This reduces the risk of false detection due to
* the 8-bit device ID.
*/
- (cfi->mfr == MANUFACTURER_MACRONIX)) {
+ (cfi->mfr == CFI_MFR_MACRONIX)) {
DEBUG(MTD_DEBUG_LEVEL1,
"%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
@@ -260,6 +258,42 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
mtd->flags |= MTD_POWERUP_LOCK;
}
+static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * These flashes report two seperate eraseblock regions based on the
+ * sector_erase-size and block_erase-size, although they both operate on the
+ * same memory. This is not allowed according to CFI, so we just pick the
+ * sector_erase-size.
+ */
+ cfi->cfiq->NumEraseRegions = 1;
+}
+
+static void fixup_sst39vf(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x5555;
+ cfi->addr_unlock2 = 0x2AAA;
+}
+
+static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ fixup_old_sst_eraseregion(mtd);
+
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2AA;
+}
+
static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
@@ -282,11 +316,24 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
}
}
+/* Used to fix CFI-Tables of chips without Extended Query Tables */
+static struct cfi_fixup cfi_nopri_fixup_table[] = {
+ { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
+ { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
+ { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
+ { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
+ { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
+ { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
+ { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
+ { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
+ { 0, 0, NULL, NULL }
+};
+
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
- { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+ { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
@@ -304,9 +351,9 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@@ -355,67 +402,72 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->name = map->name;
mtd->writesize = 1;
+ mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
+
if (cfi->cfi_mode==CFI_MODE_CFI){
unsigned char bootloc;
- /*
- * It's a real CFI chip, not one for which the probe
- * routine faked a CFI structure. So we read the feature
- * table from it.
- */
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_amdstd *extp;
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
- if (!extp) {
- kfree(mtd);
- return NULL;
- }
-
- cfi_fixup_major_minor(cfi, extp);
-
- if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
- printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
- "version %c.%c.\n", extp->MajorVersion,
- extp->MinorVersion);
- kfree(extp);
- kfree(mtd);
- return NULL;
- }
+ if (extp) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure.
+ */
+ cfi_fixup_major_minor(cfi, extp);
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
- /* Install our own private info structure */
- cfi->cmdset_priv = extp;
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
- /* Apply cfi device specific fixups */
- cfi_fixup(mtd, cfi_fixup_table);
+ /* Apply cfi device specific fixups */
+ cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
- /* Tell the user about it in lots of lovely detail */
- cfi_tell_features(extp);
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
#endif
- bootloc = extp->TopBottom;
- if ((bootloc != 2) && (bootloc != 3)) {
- printk(KERN_WARNING "%s: CFI does not contain boot "
- "bank location. Assuming top.\n", map->name);
- bootloc = 2;
- }
+ bootloc = extp->TopBottom;
+ if ((bootloc < 2) || (bootloc > 5)) {
+ printk(KERN_WARNING "%s: CFI contains unrecognised boot "
+ "bank location (%d). Assuming bottom.\n",
+ map->name, bootloc);
+ bootloc = 2;
+ }
- if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
- printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
+ if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
+ printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
- for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
- int j = (cfi->cfiq->NumEraseRegions-1)-i;
- __u32 swap;
+ for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
+ int j = (cfi->cfiq->NumEraseRegions-1)-i;
+ __u32 swap;
- swap = cfi->cfiq->EraseRegionInfo[i];
- cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
- cfi->cfiq->EraseRegionInfo[j] = swap;
+ swap = cfi->cfiq->EraseRegionInfo[i];
+ cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
+ cfi->cfiq->EraseRegionInfo[j] = swap;
+ }
}
+ /* Set the default CFI lock/unlock addresses */
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ }
+ cfi_fixup(mtd, cfi_nopri_fixup_table);
+
+ if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
+ kfree(mtd);
+ return NULL;
}
- /* Set the default CFI lock/unlock addresses */
- cfi->addr_unlock1 = 0x555;
- cfi->addr_unlock2 = 0x2aa;
} /* CFI mode */
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
@@ -437,7 +489,11 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
return cfi_amdstd_setup(mtd);
}
+struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
+struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
+EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
{
@@ -491,13 +547,12 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
#endif
__module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
- if(mtd) {
- kfree(mtd->eraseregions);
- kfree(mtd);
- }
+ kfree(mtd->eraseregions);
+ kfree(mtd);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
return NULL;
@@ -571,9 +626,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
goto retry;
}
@@ -617,9 +672,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -634,6 +689,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
chip->state = FL_READY;
return 0;
+ case FL_SHUTDOWN:
+ /* The machine is rebooting */
+ return -EIO;
+
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
@@ -643,10 +702,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto resettime;
}
}
@@ -778,7 +837,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -788,15 +847,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -858,17 +917,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
#define UDELAY(map, chip, adr, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
do { \
- spin_unlock(chip->mutex); \
+ mutex_unlock(&chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
cfi_udelay(usec); \
- spin_lock(chip->mutex); \
+ mutex_lock(&chip->mutex); \
} while (0)
#endif
@@ -884,10 +943,10 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -900,7 +959,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -954,7 +1013,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
struct cfi_private *cfi = map->fldrv_priv;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
#if 0
@@ -963,7 +1022,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -992,7 +1051,7 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
wake_up(&chip->wq);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1061,10 +1120,10 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1107,11 +1166,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1143,7 +1202,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1175,7 +1234,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1184,7 +1243,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1198,7 +1257,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs+chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map)-i);
@@ -1253,7 +1312,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
map_word tmp_buf;
retry1:
- spin_lock(cfi->chips[chipnum].mutex);
+ mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
#if 0
@@ -1262,7 +1321,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
@@ -1275,7 +1334,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_read(map, ofs + chipstart);
- spin_unlock(cfi->chips[chipnum].mutex);
+ mutex_unlock(&cfi->chips[chipnum].mutex);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
@@ -1310,10 +1369,10 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
adr += chip->start;
cmd_adr = adr;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1368,11 +1427,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -1400,7 +1459,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1500,10 +1559,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
adr = cfi->addr_unlock1;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1536,10 +1595,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1573,7 +1632,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_READY;
xip_enable(map, chip, adr);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1588,10 +1647,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1624,10 +1683,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
@@ -1663,7 +1722,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_READY;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1715,7 +1774,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
@@ -1741,7 +1800,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1751,7 +1810,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
@@ -1769,7 +1828,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
ret = 0;
out_unlock:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1797,7 +1856,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1811,7 +1870,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1819,7 +1878,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
@@ -1834,13 +1893,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1856,7 +1915,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1876,7 +1935,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1885,13 +1944,13 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1910,7 +1969,7 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
@@ -1920,15 +1979,62 @@ static void cfi_amdstd_resume(struct mtd_info *mtd)
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
+
+/*
+ * Ensure that the flash device is put back into read array mode before
+ * unloading the driver or rebooting. On some systems, rebooting while
+ * the flash is in query/program/erase mode will prevent the CPU from
+ * fetching the bootloader code, requiring a hard reset or power cycle.
+ */
+static int cfi_amdstd_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+ struct flchip *chip;
+
+ for (i = 0; i < cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ mutex_lock(&chip->mutex);
+
+ ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
+ if (!ret) {
+ map_write(map, CMD(0xF0), chip->start);
+ chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
+ }
+
+ mutex_unlock(&chip->mutex);
+ }
+
+ return 0;
+}
+
+
+static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_amdstd_reset(mtd);
+ return NOTIFY_DONE;
+}
+
+
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
+ cfi_amdstd_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi);
@@ -1938,3 +2044,5 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
+MODULE_ALIAS("cfi_cmdset_0006");
+MODULE_ALIAS("cfi_cmdset_0701");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 0667a671525d..e54e8c169d76 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -265,7 +265,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* If it's in FL_ERASING state, suspend it and make it talk now.
@@ -296,15 +296,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* make sure we're in 'read status' mode */
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready after erase "
"suspended: status = 0x%lx\n", status.x[0]);
return -EIO;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
suspended = 1;
@@ -335,13 +335,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -351,7 +351,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -376,7 +376,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -445,7 +445,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: chip->state[%d]\n", __func__, chip->state);
#endif
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us.
* Later, we can actually think about interrupting it
@@ -470,14 +470,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
break;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
status.x[0], map_read(map, cmd_adr).x[0]);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -486,7 +486,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -503,16 +503,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
if (map_word_andequal(map, status, status_OK, status_OK))
break;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (++z > 100) {
/* Argh. Not ready for write to buffer */
DISABLE_VPP(map);
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
return -EIO;
}
@@ -532,9 +532,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(chip->buffer_write_time);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = jiffies + (HZ/2);
z = 0;
@@ -543,11 +543,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* Someone's suspended the write. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -563,16 +563,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
z++;
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (!z) {
chip->buffer_write_time--;
@@ -596,11 +596,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
/* put back into read status register mode */
map_write(map, CMD(0x70), adr);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -749,7 +749,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -766,13 +766,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -781,7 +781,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -797,9 +797,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -810,11 +810,11 @@ retry:
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ*20); /* FIXME */
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
continue;
}
@@ -828,14 +828,14 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
DISABLE_VPP(map);
@@ -878,7 +878,7 @@ retry:
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
timeo = jiffies + HZ;
chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
}
printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
@@ -887,7 +887,7 @@ retry:
}
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -995,7 +995,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1009,7 +1009,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* with the chip now anyway.
*/
case FL_SYNCING:
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
break;
default:
@@ -1017,7 +1017,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
@@ -1030,13 +1030,13 @@ static void cfi_staa_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1054,7 +1054,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1071,13 +1071,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1086,7 +1086,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1098,9 +1098,9 @@ retry:
map_write(map, CMD(0x01), adr);
chip->state = FL_LOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1118,21 +1118,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1203,7 +1203,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Check that the chip's ready to talk to us. */
switch (chip->state) {
@@ -1220,13 +1220,13 @@ retry:
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
return -EIO;
}
/* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
goto retry;
@@ -1235,7 +1235,7 @@ retry:
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
@@ -1247,9 +1247,9 @@ retry:
map_write(map, CMD(0xD0), adr);
chip->state = FL_UNLOCKING;
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
msleep(1000);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* FIXME. Use a timer to check this, and return immediately. */
/* Once the state machine's known to be working I'll do that */
@@ -1267,21 +1267,21 @@ retry:
chip->state = FL_STATUS;
printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return -EIO;
}
/* Latency issues. Drop the unlock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Done and happy. */
chip->state = FL_STATUS;
DISABLE_VPP(map);
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -1334,7 +1334,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -1354,7 +1354,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1363,7 +1363,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -1372,7 +1372,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd)
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -1390,7 +1390,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -1399,7 +1399,7 @@ static void cfi_staa_resume(struct mtd_info *mtd)
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index e63e6749429a..b2acd32f4fbf 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -158,6 +158,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
#ifdef DEBUG_CFI
@@ -181,29 +182,6 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
- /* Note we put the device back into Read Mode BEFORE going into Auto
- * Select Mode, as some devices support nesting of modes, others
- * don't. This way should always work.
- * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
- * so should be treated as nops or illegal (and so put the device
- * back into Read Mode, which is a nop in this case).
- */
- cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi->mfr = cfi_read_query16(map, base);
- cfi->id = cfi_read_query16(map, base + ofs_factor);
-
- /* Get AMD/Spansion extended JEDEC ID */
- if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
- cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
- cfi_read_query(map, base + 0xf * ofs_factor);
-
- /* Put it back into Read Mode */
- cfi_qry_mode_off(base, map, cfi);
- xip_allowed(base, map);
-
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@@ -228,6 +206,35 @@ static int __xipram cfi_chip_setup(struct map_info *map,
#endif
}
+ if (cfi->cfiq->P_ID == P_ID_SST_OLD) {
+ addr_unlock1 = 0x5555;
+ addr_unlock2 = 0x2AAA;
+ }
+
+ /*
+ * Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi->interleave, cfi->device_type*8, base,
map->bankwidth*8);
@@ -269,6 +276,9 @@ static char *vendorname(__u16 vendor)
case P_ID_SST_PAGE:
return "SST Page Write";
+ case P_ID_SST_OLD:
+ return "SST 39VF160x/39VF320x";
+
case P_ID_INTEL_PERFORMANCE:
return "Intel Performance Code";
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index ca584d0380b4..d7c2c672757e 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -104,10 +104,11 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n
int i;
struct cfi_extquery *extp = NULL;
- printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
if (!adr)
goto out;
+ printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
+
extp = kmalloc(size, GFP_KERNEL);
if (!extp) {
printk(KERN_ERR "Failed to allocate memory\n");
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 57e0e4e921f9..d18064977192 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -58,10 +58,10 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
* to flash memory - that means that we don't have to check status
* and timeout.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -72,7 +72,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
/* Done and happy. */
chip->state = chip->oldstate;
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index e2dc96441e05..3b9a2843c5f8 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -155,8 +155,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
pchip->start = (i << cfi.chipshift);
pchip->state = FL_READY;
init_waitqueue_head(&pchip->wq);
- spin_lock_init(&pchip->_spinlock);
- pchip->mutex = &pchip->_spinlock;
+ mutex_init(&pchip->mutex);
}
}
@@ -242,17 +241,19 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
/* We need these for the !CONFIG_MODULES case,
because symbol_get() doesn't work there */
#ifdef CONFIG_MTD_CFI_INTELEXT
- case 0x0001:
- case 0x0003:
- case 0x0200:
+ case P_ID_INTEL_EXT:
+ case P_ID_INTEL_STD:
+ case P_ID_INTEL_PERFORMANCE:
return cfi_cmdset_0001(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_AMDSTD
- case 0x0002:
+ case P_ID_AMD_STD:
+ case P_ID_SST_OLD:
+ case P_ID_WINBOND:
return cfi_cmdset_0002(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_STAA
- case 0x0020:
+ case P_ID_ST_ADV:
return cfi_cmdset_0020(map, primary);
#endif
default:
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 8db1148dfa47..d72a5fb2d041 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -22,24 +22,6 @@
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
-/* Manufacturers */
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_ATMEL 0x001f
-#define MANUFACTURER_EON 0x001c
-#define MANUFACTURER_FUJITSU 0x0004
-#define MANUFACTURER_HYUNDAI 0x00AD
-#define MANUFACTURER_INTEL 0x0089
-#define MANUFACTURER_MACRONIX 0x00C2
-#define MANUFACTURER_NEC 0x0010
-#define MANUFACTURER_PMC 0x009D
-#define MANUFACTURER_SHARP 0x00b0
-#define MANUFACTURER_SST 0x00BF
-#define MANUFACTURER_ST 0x0020
-#define MANUFACTURER_TOSHIBA 0x0098
-#define MANUFACTURER_WINBOND 0x00da
-#define CONTINUATION_CODE 0x007f
-
-
/* AMD */
#define AM29DL800BB 0x22CB
#define AM29DL800BT 0x224A
@@ -166,6 +148,8 @@
#define SST39LF160 0x2782
#define SST39VF1601 0x234b
#define SST39VF3201 0x235b
+#define SST39WF1601 0x274b
+#define SST39WF1602 0x274a
#define SST39LF512 0x00D4
#define SST39LF010 0x00D5
#define SST39LF020 0x00D6
@@ -309,7 +293,7 @@ struct amd_flash_info {
*/
static const struct amd_flash_info jedec_table[] = {
{
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F032B,
.name = "AMD AM29F032B",
.uaddr = MTD_UADDR_0x0555_0x02AA,
@@ -321,7 +305,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,64)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DT,
.name = "AMD AM29LV160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -336,7 +320,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DB,
.name = "AMD AM29LV160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -351,7 +335,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BB,
.name = "AMD AM29LV400BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -366,7 +350,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BT,
.name = "AMD AM29LV400BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -381,7 +365,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BB,
.name = "AMD AM29LV800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -397,7 +381,7 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
/* add DL */
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BB,
.name = "AMD AM29DL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -414,7 +398,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,14)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BT,
.name = "AMD AM29DL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -431,7 +415,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -446,7 +430,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BT,
.name = "AMD AM29LV800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -461,7 +445,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BT,
.name = "AMD AM29F800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -476,7 +460,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F017D,
.name = "AMD AM29F017D",
.devtypes = CFI_DEVICETYPE_X8,
@@ -488,7 +472,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F016D,
.name = "AMD AM29F016D",
.devtypes = CFI_DEVICETYPE_X8,
@@ -500,7 +484,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F080,
.name = "AMD AM29F080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -512,7 +496,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F040,
.name = "AMD AM29F040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -524,7 +508,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV040B,
.name = "AMD AM29LV040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -536,7 +520,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29F002T,
.name = "AMD AM29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -551,7 +535,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DT,
.name = "AMD AM29SL800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -566,7 +550,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_AMD,
+ .mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DB,
.name = "AMD AM29SL800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -581,7 +565,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -593,7 +577,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,1)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT29LV512,
.name = "Atmel AT29LV512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -606,7 +590,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x80,256)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16X,
.name = "Atmel AT49BV16X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -619,7 +603,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16XT,
.name = "Atmel AT49BV16XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -632,7 +616,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32X,
.name = "Atmel AT49BV32X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -645,7 +629,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_ATMEL,
+ .mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32XT,
.name = "Atmel AT49BV32XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -658,7 +642,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_EON,
+ .mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BT,
.name = "Eon EN29SL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -673,7 +657,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_EON,
+ .mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BB,
.name = "Eon EN29SL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -688,7 +672,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
.devtypes = CFI_DEVICETYPE_X8,
@@ -700,7 +684,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F800BA,
.name = "Fujitsu MBM29F800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -715,7 +699,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV650UE,
.name = "Fujitsu MBM29LV650UE",
.devtypes = CFI_DEVICETYPE_X8,
@@ -727,7 +711,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,128)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320TE,
.name = "Fujitsu MBM29LV320TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -740,7 +724,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320BE,
.name = "Fujitsu MBM29LV320BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -753,7 +737,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160TE,
.name = "Fujitsu MBM29LV160TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -768,7 +752,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160BE,
.name = "Fujitsu MBM29LV160BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -783,7 +767,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800BA,
.name = "Fujitsu MBM29LV800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -798,7 +782,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800TA,
.name = "Fujitsu MBM29LV800TA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -813,7 +797,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400BC,
.name = "Fujitsu MBM29LV400BC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -828,7 +812,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_FUJITSU,
+ .mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400TC,
.name = "Fujitsu MBM29LV400TC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -843,7 +827,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_HYUNDAI,
+ .mfr_id = CFI_MFR_HYUNDAI,
.dev_id = HY29F002T,
.name = "Hyundai HY29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -858,7 +842,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3B,
.name = "Intel 28F004B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -871,7 +855,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 7),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3T,
.name = "Intel 28F004B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -884,7 +868,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3B,
.name = "Intel 28F400B3B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -897,7 +881,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 7),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3T,
.name = "Intel 28F400B3T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -910,7 +894,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3B,
.name = "Intel 28F008B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -923,7 +907,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 15),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3T,
.name = "Intel 28F008B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -936,7 +920,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008S5,
.name = "Intel 28F008S5",
.devtypes = CFI_DEVICETYPE_X8,
@@ -948,7 +932,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S5,
.name = "Intel 28F016S5",
.devtypes = CFI_DEVICETYPE_X8,
@@ -960,7 +944,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008SA,
.name = "Intel 28F008SA",
.devtypes = CFI_DEVICETYPE_X8,
@@ -972,7 +956,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 16),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3B,
.name = "Intel 28F800B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -985,7 +969,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 15),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3T,
.name = "Intel 28F800B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -998,7 +982,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3B,
.name = "Intel 28F016B3B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1011,7 +995,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 31),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S3,
.name = "Intel I28F016S3",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1023,7 +1007,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 32),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3T,
.name = "Intel 28F016B3T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1036,7 +1020,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3B,
.name = "Intel 28F160B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1049,7 +1033,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 31),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3T,
.name = "Intel 28F160B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1062,7 +1046,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3B,
.name = "Intel 28F320B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1075,7 +1059,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 63),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3T,
.name = "Intel 28F320B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1088,7 +1072,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3B,
.name = "Intel 28F640B3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1101,7 +1085,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 127),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3T,
.name = "Intel 28F640B3T",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1114,7 +1098,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000, 8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640C3B,
.name = "Intel 28F640C3B",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1127,7 +1111,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000, 127),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1139,7 +1123,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_INTEL,
+ .mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AC,
.name = "Intel 82802AC",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1151,7 +1135,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV040C,
.name = "Macronix MX29LV040C",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1163,7 +1147,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160T,
.name = "MXIC MX29LV160T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1178,7 +1162,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_NEC,
+ .mfr_id = CFI_MFR_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1192,7 +1176,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x2000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160B,
.name = "MXIC MX29LV160B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1207,7 +1191,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1219,7 +1203,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1231,7 +1215,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004T,
.name = "Macronix MX29F004T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1246,7 +1230,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004B,
.name = "Macronix MX29F004B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1261,7 +1245,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7),
}
}, {
- .mfr_id = MANUFACTURER_MACRONIX,
+ .mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F002T,
.name = "Macronix MX29F002T",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1276,7 +1260,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1),
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL002,
.name = "PMC Pm49FL002",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1288,7 +1272,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 64 )
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL004,
.name = "PMC Pm49FL004",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1300,7 +1284,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 128 )
}
}, {
- .mfr_id = MANUFACTURER_PMC,
+ .mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL008,
.name = "PMC Pm49FL008",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1312,7 +1296,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO( 0x01000, 256 )
}
}, {
- .mfr_id = MANUFACTURER_SHARP,
+ .mfr_id = CFI_MFR_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1324,7 +1308,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x40000,16),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF512,
.name = "SST 39LF512",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1336,7 +1320,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,16),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF010,
.name = "SST 39LF010",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1348,8 +1332,8 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,32),
}
}, {
- .mfr_id = MANUFACTURER_SST,
- .dev_id = SST29EE020,
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST29EE020,
.name = "SST 29EE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
@@ -1359,9 +1343,9 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST29LE020,
- .name = "SST 29LE020",
+ .name = "SST 29LE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
@@ -1370,7 +1354,7 @@ static const struct amd_flash_info jedec_table[] = {
.regions = {ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1382,7 +1366,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39LF040,
.name = "SST 39LF040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1394,7 +1378,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF010A,
.name = "SST 39SF010A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1406,7 +1390,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,32),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF020A,
.name = "SST 39SF020A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1418,7 +1402,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,64),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST39SF040,
.name = "SST 39SF040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1430,7 +1414,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1443,7 +1427,7 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF004B,
.name = "SST 49LF004B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1455,7 +1439,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF008A,
.name = "SST 49LF008A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1467,7 +1451,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF030A,
.name = "SST 49LF030A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1479,7 +1463,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,96),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040A,
.name = "SST 49LF040A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1491,7 +1475,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,128),
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST49LF080A,
.name = "SST 49LF080A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1503,7 +1487,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39LF160,
.name = "SST 39LF160",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1516,7 +1500,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF1601,
.name = "SST 39VF1601",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1529,7 +1513,35 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1601,
+ .name = "SST 39WF1601",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ /* CFI is broken: reports AMD_STD, but needs custom uaddr */
+ .mfr_id = CFI_MFR_SST,
+ .dev_id = SST39WF1602,
+ .name = "SST 39WF1602",
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_0xAAAA_0x5555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF3201,
.name = "SST 39VF3201",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1544,7 +1556,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,256)
}
}, {
- .mfr_id = MANUFACTURER_SST,
+ .mfr_id = CFI_MFR_SST,
.dev_id = SST36VF3203,
.name = "SST 36VF3203",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1556,7 +1568,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,64),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29F800AB,
.name = "ST M29F800AB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1571,7 +1583,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15),
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DT,
.name = "ST M29W800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1586,7 +1598,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DB,
.name = "ST M29W800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1601,7 +1613,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,15)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W400DT,
.name = "ST M29W400DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1616,7 +1628,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W400DB,
.name = "ST M29W400DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1631,7 +1643,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,7)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DT,
.name = "ST M29W160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1646,7 +1658,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DB,
.name = "ST M29W160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1661,7 +1673,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M29W040B,
.name = "ST M29W040B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1673,7 +1685,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW040,
.name = "ST M50FW040",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1685,7 +1697,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,8),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW080,
.name = "ST M50FW080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1697,7 +1709,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FW016,
.name = "ST M50FW016",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1709,7 +1721,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,32),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50LPW080,
.name = "ST M50LPW080",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1721,7 +1733,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
},
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080A,
.name = "ST M50FLW080A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1736,7 +1748,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,16),
}
}, {
- .mfr_id = MANUFACTURER_ST,
+ .mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080B,
.name = "ST M50FLW080B",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1751,7 +1763,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x1000,16),
}
}, {
- .mfr_id = 0xff00 | MANUFACTURER_ST,
+ .mfr_id = 0xff00 | CFI_MFR_ST,
.dev_id = 0xff00 | PSD4256G6V,
.name = "ST PSD4256G6V",
.devtypes = CFI_DEVICETYPE_X16,
@@ -1763,7 +1775,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,16),
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1778,7 +1790,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x04000,1)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB160,
.name = "Toshiba TC58FVB160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1793,7 +1805,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB321,
.name = "Toshiba TC58FVB321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1806,7 +1818,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,63)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT321,
.name = "Toshiba TC58FVT321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1819,7 +1831,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB641,
.name = "Toshiba TC58FVB641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1832,7 +1844,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,127)
}
}, {
- .mfr_id = MANUFACTURER_TOSHIBA,
+ .mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT641,
.name = "Toshiba TC58FVT641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
@@ -1845,7 +1857,7 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x02000,8)
}
}, {
- .mfr_id = MANUFACTURER_WINBOND,
+ .mfr_id = CFI_MFR_WINBOND,
.dev_id = W49V002A,
.name = "Winbond W49V002A",
.devtypes = CFI_DEVICETYPE_X8,
@@ -1878,7 +1890,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
mask = (1 << (cfi->device_type * 8)) - 1;
result = map_read(map, base + ofs);
bank++;
- } while ((result.x[0] & mask) == CONTINUATION_CODE);
+ } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
return result.x[0] & mask;
}
@@ -1969,7 +1981,7 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
- return 1; /* ok */
+ return 1; /* ok */
}
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index ab5c9b92ac82..f3226b1d38fc 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,5 +1,5 @@
#
-# linux/drivers/devices/Makefile
+# linux/drivers/mtd/devices/Makefile
#
obj-$(CONFIG_MTD_DOC2000) += doc2000.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ce6424008ed9..93651865ddbe 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -276,12 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Setup the MTD structure */
/* make the name contain the block device in */
- name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1,
- GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
if (!name)
goto devinit_err;
- sprintf(name, "block2mtd: %s", devname);
dev->mtd.name = name;
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index d2fd550f7e09..fc8ea0a57ac2 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -668,7 +668,7 @@ static int __init init_pmc551(void)
{
struct pci_dev *PCI_Device = NULL;
struct mypriv *priv;
- int count, found = 0;
+ int found = 0;
struct mtd_info *mtd;
u32 length = 0;
@@ -695,7 +695,7 @@ static int __init init_pmc551(void)
/*
* PCU-bus chipset probe.
*/
- for (count = 0; count < MAX_MTD_DEVICES; count++) {
+ for (;;) {
if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
PCI_DEVICE_ID_V3_SEMI_V370PDC,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index fe17054ee2fe..ab5d8cd02a15 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -73,15 +73,25 @@ static struct flash_info __initdata sst25l_flash_info[] = {
static int sst25l_status(struct sst25l_flash *flash, int *status)
{
- unsigned char command, response;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[2];
int err;
- command = SST25L_CMD_RDSR;
- err = spi_write_then_read(flash->spi, &command, 1, &response, 1);
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_RDSR;
+ cmd_resp[1] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(flash->spi, &m);
if (err < 0)
return err;
- *status = response;
+ *status = cmd_resp[1];
return 0;
}
@@ -328,33 +338,32 @@ out:
static struct flash_info *__init sst25l_match_device(struct spi_device *spi)
{
struct flash_info *flash_info = NULL;
- unsigned char command[4], response;
+ struct spi_message m;
+ struct spi_transfer t;
+ unsigned char cmd_resp[6];
int i, err;
uint16_t id;
- command[0] = SST25L_CMD_READ_ID;
- command[1] = 0;
- command[2] = 0;
- command[3] = 0;
- err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(struct spi_transfer));
+
+ cmd_resp[0] = SST25L_CMD_READ_ID;
+ cmd_resp[1] = 0;
+ cmd_resp[2] = 0;
+ cmd_resp[3] = 0;
+ cmd_resp[4] = 0xff;
+ cmd_resp[5] = 0xff;
+ t.tx_buf = cmd_resp;
+ t.rx_buf = cmd_resp;
+ t.len = sizeof(cmd_resp);
+ spi_message_add_tail(&t, &m);
+ err = spi_sync(spi, &m);
if (err < 0) {
- dev_err(&spi->dev, "error reading device id msb\n");
+ dev_err(&spi->dev, "error reading device id\n");
return NULL;
}
- id = response << 8;
-
- command[0] = SST25L_CMD_READ_ID;
- command[1] = 0;
- command[2] = 0;
- command[3] = 1;
- err = spi_write_then_read(spi, command, sizeof(command), &response, 1);
- if (err < 0) {
- dev_err(&spi->dev, "error reading device id lsb\n");
- return NULL;
- }
-
- id |= response;
+ id = (cmd_resp[4] << 8) | cmd_resp[5];
for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++)
if (sst25l_flash_info[i].device_id == id)
@@ -411,17 +420,6 @@ static int __init sst25l_probe(struct spi_device *spi)
flash->mtd.erasesize, flash->mtd.erasesize / 1024,
flash->mtd.numeraseregions);
- if (flash->mtd.numeraseregions)
- for (i = 0; i < flash->mtd.numeraseregions; i++)
- DEBUG(MTD_DEBUG_LEVEL2,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)flash->mtd.eraseregions[i].offset,
- flash->mtd.eraseregions[i].erasesize,
- flash->mtd.eraseregions[i].erasesize / 1024,
- flash->mtd.eraseregions[i].numblocks);
-
if (mtd_has_partitions()) {
struct mtd_partition *parts = NULL;
int nr_parts = 0;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index e56d6b42f020..62da9eb7032b 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1082,7 +1082,6 @@ static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
ftl_freepart((partition_t *)dev);
- kfree(dev);
}
static struct mtd_blktrans_ops ftl_tr = {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8aca5523a337..015a7fe1b6ee 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -139,7 +139,6 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
kfree(inftl->PUtable);
kfree(inftl->VUtable);
- kfree(inftl);
}
/*
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 32e82aef3e53..8f988d7d3c5c 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -100,9 +100,10 @@ static int find_boot_record(struct INFTLrecord *inftl)
}
/* To be safer with BIOS, also use erase mark as discriminant */
- if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize +
- SECTORSIZE + 8, 8, &retlen,
- (char *)&h1) < 0)) {
+ ret = inftl_read_oob(mtd,
+ block * inftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen,(char *)&h1);
+ if (ret < 0) {
printk(KERN_WARNING "INFTL: ANAND header found at "
"0x%x in mtd%d, but OOB data read failed "
"(err %d)\n", block * inftl->EraseSize,
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index a73ee12aad81..fece5be58715 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -107,8 +107,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -144,7 +143,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -159,17 +158,17 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended || chip->write_suspended) {
/* Suspend has occured while sleep: reset timeout */
@@ -230,20 +229,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -252,10 +251,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
* state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we have suspended erase on this chip.
@@ -265,10 +264,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode)
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -337,10 +336,10 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -356,12 +355,12 @@ static void put_chip(struct map_info *map, struct flchip *chip)
if (shared->writing && shared->writing != chip) {
/* give back the ownership */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -414,10 +413,10 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
wbufsize = 1 << lpddr->qinfo->BufSizeShift;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
/* Figure out the number of words to write */
@@ -478,7 +477,7 @@ int do_write_buffer(struct map_info *map, struct flchip *chip,
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -490,10 +489,10 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
struct flchip *chip = &lpddr->chips[chipnum];
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
@@ -505,7 +504,7 @@ int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -518,10 +517,10 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
struct flchip *chip = &lpddr->chips[chipnum];
int ret = 0;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -529,7 +528,7 @@ static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
*retlen = len;
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -569,9 +568,9 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
else
thislen = len;
/* get the chip */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_POINT);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (ret)
break;
@@ -611,7 +610,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if (chip->ref_point_counter == 0)
@@ -621,7 +620,7 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
"pointed region\n", map->name);
put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -727,10 +726,10 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -750,7 +749,7 @@ int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
goto out;
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -771,10 +770,10 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
int chipnum = adr >> lpddr->chipshift;
struct flchip *chip = &lpddr->chips[chipnum];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -788,7 +787,7 @@ int word_program(struct map_info *map, loff_t adr, uint32_t curval)
}
out: put_chip(map, chip);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 79bf40f48b75..dbfe17baf046 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -134,13 +134,12 @@ out:
static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr)
{
- lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
+ lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL);
if (!lpddr->qinfo) {
printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n",
map->name);
return 0;
}
- memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip));
/* Get the ManuID */
lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID));
@@ -185,13 +184,11 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map)
lpddr.numchips = 1;
numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum;
- retlpddr = kmalloc(sizeof(struct lpddr_private) +
+ retlpddr = kzalloc(sizeof(struct lpddr_private) +
numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!retlpddr)
return NULL;
- memset(retlpddr, 0, sizeof(struct lpddr_private) +
- numvirtchips * sizeof(struct flchip));
memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private));
retlpddr->numchips = numvirtchips;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index aa2807d0ce72..f22bc9f05ddb 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -435,7 +435,7 @@ config MTD_PCI
config MTD_PCMCIA
tristate "PCMCIA MTD driver"
- depends on PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN
+ depends on PCMCIA && MTD_COMPLEX_MAPPINGS
help
Map driver for accessing PCMCIA linear flash memory cards. These
cards are usually around 4-16MiB in size. This does not include
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index c0fd99b0c525..85dd18193cf2 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -70,7 +70,7 @@ static void switch_back(struct async_state *state)
local_irq_restore(state->irq_flags);
}
-static map_word bfin_read(struct map_info *map, unsigned long ofs)
+static map_word bfin_flash_read(struct map_info *map, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t word;
@@ -86,7 +86,7 @@ static map_word bfin_read(struct map_info *map, unsigned long ofs)
return test;
}
-static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -97,7 +97,7 @@ static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, s
switch_back(state);
}
-static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
+static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
uint16_t d;
@@ -112,7 +112,7 @@ static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
switch_back(state);
}
-static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
struct async_state *state = (struct async_state *)map->map_priv_1;
@@ -141,10 +141,10 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
return -ENOMEM;
state->map.name = DRIVER_NAME;
- state->map.read = bfin_read;
- state->map.copy_from = bfin_copy_from;
- state->map.write = bfin_write;
- state->map.copy_to = bfin_copy_to;
+ state->map.read = bfin_flash_read;
+ state->map.copy_from = bfin_flash_copy_from;
+ state->map.write = bfin_flash_write;
+ state->map.copy_to = bfin_flash_copy_to;
state->map.bankwidth = pdata->width;
state->map.size = memory->end - memory->start + 1;
state->map.virt = (void __iomem *)memory->start;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index d41f34766e53..c09f4f57093e 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -253,7 +253,7 @@ static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd
static int __init clps_setup_flash(void)
{
- int nr;
+ int nr = 0;
#ifdef CONFIG_ARCH_CEIVA
if (machine_is_ceiva()) {
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 1bdf0ee6d0b6..9639d83a9d6c 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -165,12 +165,11 @@ static int ixp2000_flash_probe(struct platform_device *dev)
return -EIO;
}
- info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
- memset(info, 0, sizeof(struct ixp2000_flash_info));
platform_set_drvdata(dev, info);
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7b0515297411..e0a5e0426ead 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -107,8 +107,8 @@ static void ixp4xx_copy_from(struct map_info *map, void *to,
return;
if (from & 1) {
- *dest++ = BYTE1(flash_read16(src));
- src++;
+ *dest++ = BYTE1(flash_read16(src-1));
+ src++;
--len;
}
@@ -196,12 +196,11 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
return err;
}
- info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
if(!info) {
err = -ENOMEM;
goto Error;
}
- memset(info, 0, sizeof(struct ixp4xx_flash_info));
platform_set_drvdata(dev, info);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 689d6a79ffc0..e699e6ac23df 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -40,10 +40,7 @@ MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
static const int debug = 0;
#endif
-#define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg)
#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg)
-
#define DRIVER_DESC "PCMCIA Flash memory card driver"
@@ -52,7 +49,6 @@ static const int debug = 0;
struct pcmciamtd_dev {
struct pcmcia_device *p_dev;
- dev_node_t node; /* device node */
caddr_t win_base; /* ioremapped address of PCMCIA window */
unsigned int win_size; /* size of window */
unsigned int offset; /* offset into card the window currently points at */
@@ -100,7 +96,9 @@ module_param(mem_type, int, 0);
MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
-/* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */
+/* read/write{8,16} copy_{from,to} routines with window remapping
+ * to access whole card
+ */
static caddr_t remap_window(struct map_info *map, unsigned long to)
{
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
@@ -137,7 +135,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
return d;
}
@@ -152,7 +150,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(addr);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
return d;
}
@@ -162,7 +160,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
while(len) {
int toread = win_size - (from & (win_size-1));
caddr_t addr;
@@ -190,7 +188,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]);
writeb(d.x[0], addr);
}
@@ -201,7 +199,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long
if(!addr)
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]);
writew(d.x[0], addr);
}
@@ -211,7 +209,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v
struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
unsigned long win_size = dev->win_size;
- DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
while(len) {
int towrite = win_size - (to & (win_size-1));
caddr_t addr;
@@ -245,7 +243,8 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readb(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
+ ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -259,7 +258,8 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
return d;
d.x[0] = readw(win_base + ofs);
- DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
+ ofs, win_base + ofs, d.x[0]);
return d;
}
@@ -271,32 +271,34 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from,
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
memcpy_fromio(to, win_base + from, len);
}
-static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr)
+static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d);
- writeb(d, win_base + adr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx",
+ adr, win_base + adr, d.x[0]);
+ writeb(d.x[0], win_base + adr);
}
-static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr)
+static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr)
{
caddr_t win_base = (caddr_t)map->map_priv_2;
if(DEV_REMOVED(map))
return;
- DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d);
- writew(d, win_base + adr);
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx",
+ adr, win_base + adr, d.x[0]);
+ writew(d.x[0], win_base + adr);
}
@@ -307,7 +309,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
if(DEV_REMOVED(map))
return;
- DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
memcpy_toio(win_base + to, from, len);
}
@@ -376,7 +378,8 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
if (!pcmcia_parse_tuple(tuple, &parse)) {
cistpl_jedec_t *t = &parse.jedec;
for (i = 0; i < t->nid; i++)
- DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
+ DEBUG(2, "JEDEC: 0x%02x 0x%02x",
+ t->id[i].mfr, t->id[i].info);
}
return -ENOSPC;
}
@@ -432,7 +435,7 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
}
-static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
+static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name)
{
int i;
@@ -477,7 +480,8 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link,
}
DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
- dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
+ dev->pcmcia_map.size,
+ dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
}
@@ -490,7 +494,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
{
struct pcmciamtd_dev *dev = link->priv;
struct mtd_info *mtd = NULL;
- cs_status_t status;
win_req_t req;
int ret;
int i;
@@ -514,9 +517,11 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(setvpp == 1)
dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
- /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum
- that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the
- whole card - otherwise we try smaller windows until we succeed */
+ /* Request a memory window for PCMCIA. Some architeures can map windows
+ * upto the maximum that PCMCIA can support (64MiB) - this is ideal and
+ * we aim for a window the size of the whole card - otherwise we try
+ * smaller windows until we succeed
+ */
req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
@@ -544,7 +549,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "dev->win_size = %d", dev->win_size);
if(!dev->win_size) {
- err("Cant allocate memory window");
+ dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -554,7 +559,8 @@ static int pcmciamtd_config(struct pcmcia_device *link)
DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win);
dev->win_base = ioremap(req.Base, req.Size);
if(!dev->win_base) {
- err("ioremap(%lu, %u) failed", req.Base, req.Size);
+ dev_err(&dev->p_dev->dev, "ioremap(%lu, %u) failed\n",
+ req.Base, req.Size);
pcmciamtd_release(link);
return -ENODEV;
}
@@ -565,7 +571,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
- dev->vpp = (vpp) ? vpp : link->socket.socket.Vpp;
+ dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp;
link->conf.Attributes = 0;
if(setvpp == 2) {
link->conf.Vpp = dev->vpp;
@@ -601,7 +607,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
}
if(!mtd) {
- DEBUG(1, "Cant find an MTD");
+ DEBUG(1, "Can not find an MTD");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -612,8 +618,9 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(new_name) {
int size = 0;
char unit = ' ';
- /* Since we are using a default name, make it better by adding in the
- size */
+ /* Since we are using a default name, make it better by adding
+ * in the size
+ */
if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
size = mtd->size >> 10;
unit = 'K';
@@ -643,17 +650,15 @@ static int pcmciamtd_config(struct pcmcia_device *link)
if(add_mtd_device(mtd)) {
map_destroy(mtd);
dev->mtd_info = NULL;
- err("Couldnt register MTD device");
+ dev_err(&dev->p_dev->dev,
+ "Could not register the MTD device\n");
pcmciamtd_release(link);
return -ENODEV;
}
- snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
- info("mtd%d: %s", mtd->index, mtd->name);
- link->dev_node = &dev->node;
+ dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name);
return 0;
- failed:
- err("CS Error, exiting");
+ dev_err(&dev->p_dev->dev, "CS Error, exiting\n");
pcmciamtd_release(link);
return -ENODEV;
}
@@ -692,8 +697,9 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
if(dev->mtd_info) {
del_mtd_device(dev->mtd_info);
+ dev_info(&dev->p_dev->dev, "mtd%d: Removing\n",
+ dev->mtd_info->index);
map_destroy(dev->mtd_info);
- info("mtd%d: Removed", dev->mtd_info->index);
}
pcmciamtd_release(link);
@@ -737,8 +743,11 @@ static struct pcmcia_device_id pcmciamtd_ids[] = {
PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
+ PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965),
+ PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
+ PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
/* the following was commented out in pcmcia-cs-3.2.7 */
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index d9603f7f9652..426461a5f0d4 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -264,8 +264,11 @@ static int __init physmap_init(void)
err = platform_driver_register(&physmap_flash_driver);
#ifdef CONFIG_MTD_PHYSMAP_COMPAT
- if (err == 0)
- platform_device_register(&physmap_flash);
+ if (err == 0) {
+ err = platform_device_register(&physmap_flash);
+ if (err)
+ platform_driver_unregister(&physmap_flash_driver);
+ }
#endif
return err;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 101ee6ead05c..ba124baa646d 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -143,7 +143,7 @@ static int of_flash_remove(struct of_device *dev)
static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
struct map_info *map)
{
- struct device_node *dp = dev->node;
+ struct device_node *dp = dev->dev.of_node;
const char *of_probe;
struct mtd_info *mtd;
static const char *rom_probe_types[]
@@ -173,14 +173,55 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
}
}
+#ifdef CONFIG_MTD_PARTITIONS
+/* When partitions are set we look for a linux,part-probe property which
+ specifies the list of partition probers to use. If none is given then the
+ default is use. These take precedence over other device tree
+ information. */
+static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
+static const char ** __devinit of_get_probes(struct device_node *dp)
+{
+ const char *cp;
+ int cplen;
+ unsigned int l;
+ unsigned int count;
+ const char **res;
+
+ cp = of_get_property(dp, "linux,part-probe", &cplen);
+ if (cp == NULL)
+ return part_probe_types_def;
+
+ count = 0;
+ for (l = 0; l != cplen; l++)
+ if (cp[l] == 0)
+ count++;
+
+ res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
+ count = 0;
+ while (cplen > 0) {
+ res[count] = cp;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ count++;
+ }
+ return res;
+}
+
+static void __devinit of_free_probes(const char **probes)
+{
+ if (probes != part_probe_types_def)
+ kfree(probes);
+}
+#endif
+
static int __devinit of_flash_probe(struct of_device *dev,
const struct of_device_id *match)
{
#ifdef CONFIG_MTD_PARTITIONS
- static const char *part_probe_types[]
- = { "cmdlinepart", "RedBoot", NULL };
+ const char **part_probe_types;
#endif
- struct device_node *dp = dev->node;
+ struct device_node *dp = dev->dev.of_node;
struct resource res;
struct of_flash *info;
const char *probe_type = match->data;
@@ -204,7 +245,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
p = of_get_property(dp, "reg", &count);
if (count % reg_tuple_size != 0) {
dev_err(&dev->dev, "Malformed reg property on %s\n",
- dev->node->full_name);
+ dev->dev.of_node->full_name);
err = -EINVAL;
goto err_flash_remove;
}
@@ -218,7 +259,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
dev_set_drvdata(&dev->dev, info);
- mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
+ mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
if (!mtd_list)
goto err_flash_remove;
@@ -307,12 +348,14 @@ static int __devinit of_flash_probe(struct of_device *dev,
goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
- /* First look for RedBoot table or partitions on the command
- * line, these take precedence over device tree information */
+ part_probe_types = of_get_probes(dp);
err = parse_mtd_partitions(info->cmtd, part_probe_types,
&info->parts, 0);
- if (err < 0)
+ if (err < 0) {
+ of_free_probes(part_probe_types);
return err;
+ }
+ of_free_probes(part_probe_types);
#ifdef CONFIG_MTD_OF_PARTS
if (err == 0) {
@@ -375,8 +418,11 @@ static struct of_device_id of_flash_match[] = {
MODULE_DEVICE_TABLE(of, of_flash_match);
static struct of_platform_driver of_flash_driver = {
- .name = "of-flash",
- .match_table = of_flash_match,
+ .driver = {
+ .name = "of-flash",
+ .owner = THIS_MODULE,
+ .of_match_table = of_flash_match,
+ },
.probe = of_flash_probe,
.remove = of_flash_remove,
};
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 60c068db452d..eb476b7f8d11 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -234,6 +234,7 @@ static int __devexit pismo_remove(struct i2c_client *client)
/* FIXME: set_vpp needs saner arguments */
pismo_setvpp_remove_fix(pismo);
+ i2c_set_clientdata(client, NULL);
kfree(pismo);
return 0;
@@ -272,7 +273,7 @@ static int __devinit pismo_probe(struct i2c_client *client,
ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
if (ret < 0) {
dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
- return ret;
+ goto exit_free;
}
dev_info(&client->dev, "%.15s board found\n", eeprom.board);
@@ -283,6 +284,11 @@ static int __devinit pismo_probe(struct i2c_client *client,
pdata->cs_addrs[i]);
return 0;
+
+ exit_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(pismo);
+ return ret;
}
static const struct i2c_device_id pismo_id[] = {
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 91dc6331053f..dd90880048cf 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -63,11 +63,10 @@ static int __init pxa2xx_flash_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- memset(info, 0, sizeof(struct pxa2xx_flash_info));
info->map.name = (char *) flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index fadc4c45b455..0391c2527bd7 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -110,7 +110,7 @@ int uflash_devinit(struct of_device *op, struct device_node *dp)
static int __devinit uflash_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
/* Flashprom must have the "user" property in order to
* be used by this driver.
@@ -149,8 +149,11 @@ static const struct of_device_id uflash_match[] = {
MODULE_DEVICE_TABLE(of, uflash_match);
static struct of_platform_driver uflash_driver = {
- .name = DRIVER_NAME,
- .match_table = uflash_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = uflash_match,
+ },
.probe = uflash_probe,
.remove = __devexit_p(uflash_remove),
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index c82e09bbc5fd..03e19c1965cc 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -14,7 +14,6 @@
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
-#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@@ -25,12 +24,42 @@
#include "mtdcore.h"
static LIST_HEAD(blktrans_majors);
+static DEFINE_MUTEX(blktrans_ref_mutex);
+
+void blktrans_dev_release(struct kref *kref)
+{
+ struct mtd_blktrans_dev *dev =
+ container_of(kref, struct mtd_blktrans_dev, ref);
+
+ dev->disk->private_data = NULL;
+ blk_cleanup_queue(dev->rq);
+ put_disk(dev->disk);
+ list_del(&dev->list);
+ kfree(dev);
+}
+
+static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
+{
+ struct mtd_blktrans_dev *dev;
+
+ mutex_lock(&blktrans_ref_mutex);
+ dev = disk->private_data;
+
+ if (!dev)
+ goto unlock;
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&blktrans_ref_mutex);
+ return dev;
+}
+
+void blktrans_dev_put(struct mtd_blktrans_dev *dev)
+{
+ mutex_lock(&blktrans_ref_mutex);
+ kref_put(&dev->ref, blktrans_dev_release);
+ mutex_unlock(&blktrans_ref_mutex);
+}
-struct mtd_blkcore_priv {
- struct task_struct *thread;
- struct request_queue *rq;
- spinlock_t queue_lock;
-};
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
@@ -61,7 +90,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
return -EIO;
rq_flush_dcache_pages(req);
return 0;
-
case WRITE:
if (!tr->writesect)
return -EIO;
@@ -71,7 +99,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (tr->writesect(dev, block, buf))
return -EIO;
return 0;
-
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
return -EIO;
@@ -80,14 +107,13 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
static int mtd_blktrans_thread(void *arg)
{
- struct mtd_blktrans_ops *tr = arg;
- struct request_queue *rq = tr->blkcore_priv->rq;
+ struct mtd_blktrans_dev *dev = arg;
+ struct request_queue *rq = dev->rq;
struct request *req = NULL;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
- struct mtd_blktrans_dev *dev;
int res;
if (!req && !(req = blk_fetch_request(rq))) {
@@ -98,13 +124,10 @@ static int mtd_blktrans_thread(void *arg)
continue;
}
- dev = req->rq_disk->private_data;
- tr = dev->tr;
-
spin_unlock_irq(rq->queue_lock);
mutex_lock(&dev->lock);
- res = do_blktrans_request(tr, dev, req);
+ res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
@@ -123,81 +146,112 @@ static int mtd_blktrans_thread(void *arg)
static void mtd_blktrans_request(struct request_queue *rq)
{
- struct mtd_blktrans_ops *tr = rq->queuedata;
- wake_up_process(tr->blkcore_priv->thread);
-}
+ struct mtd_blktrans_dev *dev;
+ struct request *req = NULL;
+
+ dev = rq->queuedata;
+ if (!dev)
+ while ((req = blk_fetch_request(rq)) != NULL)
+ __blk_end_request_all(req, -ENODEV);
+ else
+ wake_up_process(dev->thread);
+}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = -ENODEV;
-
- if (!get_mtd_device(NULL, dev->mtd->index))
- goto out;
-
- if (!try_module_get(tr->owner))
- goto out_tr;
-
- /* FIXME: Locking. A hot pluggable device can go away
- (del_mtd_device can be called for it) without its module
- being unloaded. */
- dev->mtd->usecount++;
-
- ret = 0;
- if (tr->open && (ret = tr->open(dev))) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- out_tr:
- module_put(tr->owner);
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret;
+
+ if (!dev)
+ return -ERESTARTSYS;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd) {
+ ret = -ENXIO;
+ goto unlock;
}
- out:
+
+ ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
+
+ /* Take another reference on the device so it won't go away till
+ last release */
+ if (!ret)
+ kref_get(&dev->ref);
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
- struct mtd_blktrans_dev *dev = disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
- int ret = 0;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
+ int ret = -ENXIO;
- if (tr->release)
- ret = tr->release(dev);
+ if (!dev)
+ return ret;
- if (!ret) {
- dev->mtd->usecount--;
- put_mtd_device(dev->mtd);
- module_put(tr->owner);
- }
+ mutex_lock(&dev->lock);
+
+ /* Release one reference, we sure its not the last one here*/
+ kref_put(&dev->ref, blktrans_dev_release);
+ if (!dev->mtd)
+ goto unlock;
+
+ ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
return ret;
}
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
- if (dev->tr->getgeo)
- return dev->tr->getgeo(dev, geo);
- return -ENOTTY;
+ ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
- struct mtd_blktrans_ops *tr = dev->tr;
+ struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
+ int ret = -ENXIO;
+
+ if (!dev)
+ return ret;
+
+ mutex_lock(&dev->lock);
+
+ if (!dev->mtd)
+ goto unlock;
switch (cmd) {
case BLKFLSBUF:
- if (tr->flush)
- return tr->flush(dev);
- /* The core code did the work, we had nothing to do. */
- return 0;
+ ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
default:
- return -ENOTTY;
+ ret = -ENOTTY;
}
+unlock:
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
}
static const struct block_device_operations mtd_blktrans_ops = {
@@ -214,12 +268,14 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
+ int ret;
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
+ mutex_lock(&blktrans_ref_mutex);
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
@@ -231,6 +287,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
+ mutex_unlock(&blktrans_ref_mutex);
return -EBUSY;
} else if (d->devnum > new->devnum) {
/* Required number was free */
@@ -239,24 +296,38 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
last_devnum = d->devnum;
}
+
+ ret = -EBUSY;
if (new->devnum == -1)
new->devnum = last_devnum+1;
- if ((new->devnum << tr->part_bits) > 256) {
- return -EBUSY;
+ /* Check that the device and any partitions will get valid
+ * minor numbers and that the disk naming code below can cope
+ * with this number. */
+ if (new->devnum > (MINORMASK >> tr->part_bits) ||
+ (tr->part_bits && new->devnum >= 27 * 26)) {
+ mutex_unlock(&blktrans_ref_mutex);
+ goto error1;
}
list_add_tail(&new->list, &tr->devs);
added:
+ mutex_unlock(&blktrans_ref_mutex);
+
mutex_init(&new->lock);
+ kref_init(&new->ref);
if (!tr->writesect)
new->readonly = 1;
+ /* Create gendisk */
+ ret = -ENOMEM;
gd = alloc_disk(1 << tr->part_bits);
- if (!gd) {
- list_del(&new->list);
- return -ENOMEM;
- }
+
+ if (!gd)
+ goto error2;
+
+ new->disk = gd;
+ gd->private_data = new;
gd->major = tr->major;
gd->first_minor = (new->devnum) << tr->part_bits;
gd->fops = &mtd_blktrans_ops;
@@ -274,13 +345,35 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
- /* 2.5 has capacity in units of 512 bytes while still
- having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
set_capacity(gd, (new->size * tr->blksize) >> 9);
- gd->private_data = new;
- new->blkcore_priv = gd;
- gd->queue = tr->blkcore_priv->rq;
+ /* Create the request queue */
+ spin_lock_init(&new->queue_lock);
+ new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+
+ if (!new->rq)
+ goto error3;
+
+ new->rq->queuedata = new;
+ blk_queue_logical_block_size(new->rq, tr->blksize);
+
+ if (tr->discard)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ new->rq);
+
+ gd->queue = new->rq;
+
+ __get_mtd_device(new->mtd);
+ __module_get(tr->owner);
+
+ /* Create processing thread */
+ /* TODO: workqueue ? */
+ new->thread = kthread_run(mtd_blktrans_thread, new,
+ "%s%d", tr->name, new->mtd->index);
+ if (IS_ERR(new->thread)) {
+ ret = PTR_ERR(new->thread);
+ goto error4;
+ }
gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
@@ -288,21 +381,65 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
add_disk(gd);
+ if (new->disk_attributes) {
+ ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
+ new->disk_attributes);
+ WARN_ON(ret);
+ }
return 0;
+error4:
+ module_put(tr->owner);
+ __put_mtd_device(new->mtd);
+ blk_cleanup_queue(new->rq);
+error3:
+ put_disk(new->disk);
+error2:
+ list_del(&new->list);
+error1:
+ kfree(new);
+ return ret;
}
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
+ unsigned long flags;
+
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
- list_del(&old->list);
+ /* Stop new requests to arrive */
+ del_gendisk(old->disk);
+
+ if (old->disk_attributes)
+ sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
+ old->disk_attributes);
+
+ /* Stop the thread */
+ kthread_stop(old->thread);
+
+ /* Kill current requests */
+ spin_lock_irqsave(&old->queue_lock, flags);
+ old->rq->queuedata = NULL;
+ blk_start_queue(old->rq);
+ spin_unlock_irqrestore(&old->queue_lock, flags);
+
+ /* Ask trans driver for release to the mtd device */
+ mutex_lock(&old->lock);
+ if (old->open && old->tr->release) {
+ old->tr->release(old);
+ old->open = 0;
+ }
+
+ __put_mtd_device(old->mtd);
+ module_put(old->tr->owner);
- del_gendisk(old->blkcore_priv);
- put_disk(old->blkcore_priv);
+ /* At that point, we don't touch the mtd anymore */
+ old->mtd = NULL;
+ mutex_unlock(&old->lock);
+ blktrans_dev_put(old);
return 0;
}
@@ -335,7 +472,8 @@ static struct mtd_notifier blktrans_notifier = {
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
- int ret, i;
+ struct mtd_info *mtd;
+ int ret;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
@@ -343,9 +481,6 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
- tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
- if (!tr->blkcore_priv)
- return -ENOMEM;
mutex_lock(&mtd_table_mutex);
@@ -353,49 +488,20 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
- kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return ret;
}
- spin_lock_init(&tr->blkcore_priv->queue_lock);
-
- tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
- if (!tr->blkcore_priv->rq) {
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return -ENOMEM;
- }
-
- tr->blkcore_priv->rq->queuedata = tr;
- blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
- if (tr->discard)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- tr->blkcore_priv->rq);
tr->blkshift = ffs(tr->blksize) - 1;
- tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
- "%sd", tr->name);
- if (IS_ERR(tr->blkcore_priv->thread)) {
- ret = PTR_ERR(tr->blkcore_priv->thread);
- blk_cleanup_queue(tr->blkcore_priv->rq);
- unregister_blkdev(tr->major, tr->name);
- kfree(tr->blkcore_priv);
- mutex_unlock(&mtd_table_mutex);
- return ret;
- }
-
INIT_LIST_HEAD(&tr->devs);
list_add(&tr->list, &blktrans_majors);
- for (i=0; i<MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
- tr->add_mtd(tr, mtd_table[i]);
- }
+ mtd_for_each_device(mtd)
+ if (mtd->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd);
mutex_unlock(&mtd_table_mutex);
-
return 0;
}
@@ -405,22 +511,15 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
mutex_lock(&mtd_table_mutex);
- /* Clean up the kernel thread */
- kthread_stop(tr->blkcore_priv->thread);
-
/* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_entry_safe(dev, next, &tr->devs, list)
tr->remove_dev(dev);
- blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
-
mutex_unlock(&mtd_table_mutex);
- kfree(tr->blkcore_priv);
-
BUG_ON(!list_empty(&tr->devs));
return 0;
}
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 9f41b1a853c1..e6edbec609fd 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -19,15 +19,15 @@
#include <linux/mutex.h>
-static struct mtdblk_dev {
- struct mtd_info *mtd;
+struct mtdblk_dev {
+ struct mtd_blktrans_dev mbd;
int count;
struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
-} *mtdblks[MAX_MTD_DEVICES];
+};
static struct mutex mtdblks_lock;
@@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
static int write_cached_data (struct mtdblk_dev *mtdblk)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
int ret;
if (mtdblk->cache_state != STATE_DIRTY)
@@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
- struct mtd_info *mtd = mtdblk->mtd;
+ struct mtd_info *mtd = mtdblk->mbd.mtd;
unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
@@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
return do_cached_read(mtdblk, block<<9, 512, buf);
}
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
unsigned long block, char *buf)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
- mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
+ mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
if (!mtdblk->cache_data)
return -EINTR;
/* -EINTR is not really correct, but it is the best match
@@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
- struct mtdblk_dev *mtdblk;
- struct mtd_info *mtd = mbd->mtd;
- int dev = mbd->devnum;
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
mutex_lock(&mtdblks_lock);
- if (mtdblks[dev]) {
- mtdblks[dev]->count++;
+ if (mtdblk->count) {
+ mtdblk->count++;
mutex_unlock(&mtdblks_lock);
return 0;
}
/* OK, it's not open. Create cache info for it */
- mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
- if (!mtdblk) {
- mutex_unlock(&mtdblks_lock);
- return -ENOMEM;
- }
-
mtdblk->count = 1;
- mtdblk->mtd = mtd;
-
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
- if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
- mtdblk->cache_size = mtdblk->mtd->erasesize;
+ if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
+ mtdblk->cache_size = mbd->mtd->erasesize;
mtdblk->cache_data = NULL;
}
- mtdblks[dev] = mtdblk;
mutex_unlock(&mtdblks_lock);
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
@@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
- int dev = mbd->devnum;
- struct mtdblk_dev *mtdblk = mtdblks[dev];
+ struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
@@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
mutex_unlock(&mtdblk->cache_mutex);
if (!--mtdblk->count) {
- /* It was the last usage. Free the device */
- mtdblks[dev] = NULL;
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ /* It was the last usage. Free the cache */
+ if (mbd->mtd->sync)
+ mbd->mtd->sync(mbd->mtd);
vfree(mtdblk->cache_data);
- kfree(mtdblk);
}
mutex_unlock(&mtdblks_lock);
@@ -335,40 +321,40 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
- struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
+ if (dev->mtd->sync)
+ dev->mtd->sync(dev->mtd);
return 0;
}
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
- struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
- dev->mtd = mtd;
- dev->devnum = mtd->index;
+ dev->mbd.mtd = mtd;
+ dev->mbd.devnum = mtd->index;
- dev->size = mtd->size >> 9;
- dev->tr = tr;
+ dev->mbd.size = mtd->size >> 9;
+ dev->mbd.tr = tr;
if (!(mtd->flags & MTD_WRITEABLE))
- dev->readonly = 1;
+ dev->mbd.readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(&dev->mbd))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 852165f8b1c3..d0d3f79f9d03 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -43,13 +43,13 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->tr = tr;
dev->readonly = 1;
- add_mtd_blktrans_dev(dev);
+ if (add_mtd_blktrans_dev(dev))
+ kfree(dev);
}
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
del_mtd_blktrans_dev(dev);
- kfree(dev);
}
static struct mtd_blktrans_ops mtdblock_tr = {
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 5b081cb84351..000d65ea55a4 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -15,12 +15,15 @@
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
#include <linux/compat.h>
+#include <linux/mount.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
#include <asm/uaccess.h>
+#define MTD_INODE_FS_MAGIC 0x11307854
+static struct vfsmount *mtd_inode_mnt __read_mostly;
/*
* Data structure to hold the pointer to the mtd device as well
@@ -28,6 +31,7 @@
*/
struct mtd_file_info {
struct mtd_info *mtd;
+ struct inode *ino;
enum mtd_file_modes mode;
};
@@ -64,12 +68,10 @@ static int mtd_open(struct inode *inode, struct file *file)
int ret = 0;
struct mtd_info *mtd;
struct mtd_file_info *mfi;
+ struct inode *mtd_ino;
DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
- if (devnum >= MAX_MTD_DEVICES)
- return -ENODEV;
-
/* You can't open the RO devices RW */
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
@@ -88,11 +90,23 @@ static int mtd_open(struct inode *inode, struct file *file)
goto out;
}
- if (mtd->backing_dev_info)
- file->f_mapping->backing_dev_info = mtd->backing_dev_info;
+ mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
+ if (!mtd_ino) {
+ put_mtd_device(mtd);
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (mtd_ino->i_state & I_NEW) {
+ mtd_ino->i_private = mtd;
+ mtd_ino->i_mode = S_IFCHR;
+ mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
+ unlock_new_inode(mtd_ino);
+ }
+ file->f_mapping = mtd_ino->i_mapping;
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
+ iput(mtd_ino);
put_mtd_device(mtd);
ret = -EACCES;
goto out;
@@ -100,10 +114,12 @@ static int mtd_open(struct inode *inode, struct file *file)
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
if (!mfi) {
+ iput(mtd_ino);
put_mtd_device(mtd);
ret = -ENOMEM;
goto out;
}
+ mfi->ino = mtd_ino;
mfi->mtd = mtd;
file->private_data = mfi;
@@ -125,6 +141,8 @@ static int mtd_close(struct inode *inode, struct file *file)
if ((file->f_mode & FMODE_WRITE) && mtd->sync)
mtd->sync(mtd);
+ iput(mfi->ino);
+
put_mtd_device(mtd);
file->private_data = NULL;
kfree(mfi);
@@ -373,7 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
if (!mtd->write_oob)
ret = -EOPNOTSUPP;
else
- ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
if (ret)
return ret;
@@ -450,8 +468,7 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
return ret;
}
-static int mtd_ioctl(struct inode *inode, struct file *file,
- u_int cmd, u_long arg)
+static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -482,7 +499,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
uint32_t ur_idx;
struct mtd_erase_region_info *kr;
- struct region_info_user *ur = (struct region_info_user *) argp;
+ struct region_info_user __user *ur = argp;
if (get_user(ur_idx, &(ur->regionindex)))
return -EFAULT;
@@ -822,6 +839,17 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
return ret;
} /* memory_ioctl */
+static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = mtd_ioctl(file, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
struct mtd_oob_buf32 {
@@ -836,7 +864,6 @@ struct mtd_oob_buf32 {
static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
void __user *argp = compat_ptr(arg);
@@ -874,7 +901,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
break;
}
default:
- ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
+ ret = mtd_ioctl(file, cmd, (unsigned long)argp);
}
unlock_kernel();
@@ -942,7 +969,7 @@ static const struct file_operations mtd_fops = {
.llseek = mtd_lseek,
.read = mtd_read,
.write = mtd_write,
- .ioctl = mtd_ioctl,
+ .unlocked_ioctl = mtd_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtd_compat_ioctl,
#endif
@@ -954,22 +981,81 @@ static const struct file_operations mtd_fops = {
#endif
};
+static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC,
+ mnt);
+}
+
+static struct file_system_type mtd_inodefs_type = {
+ .name = "mtd_inodefs",
+ .get_sb = mtd_inodefs_get_sb,
+ .kill_sb = kill_anon_super,
+};
+
+static void mtdchar_notify_add(struct mtd_info *mtd)
+{
+}
+
+static void mtdchar_notify_remove(struct mtd_info *mtd)
+{
+ struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
+
+ if (mtd_ino) {
+ /* Destroy the inode if it exists */
+ mtd_ino->i_nlink = 0;
+ iput(mtd_ino);
+ }
+}
+
+static struct mtd_notifier mtdchar_notifier = {
+ .add = mtdchar_notify_add,
+ .remove = mtdchar_notify_remove,
+};
+
static int __init init_mtdchar(void)
{
- int status;
+ int ret;
- status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops);
- if (status < 0) {
- printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
- MTD_CHAR_MAJOR);
+ ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
+ "mtd", &mtd_fops);
+ if (ret < 0) {
+ pr_notice("Can't allocate major number %d for "
+ "Memory Technology Devices.\n", MTD_CHAR_MAJOR);
+ return ret;
}
- return status;
+ ret = register_filesystem(&mtd_inodefs_type);
+ if (ret) {
+ pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
+ goto err_unregister_chdev;
+ }
+
+ mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
+ if (IS_ERR(mtd_inode_mnt)) {
+ ret = PTR_ERR(mtd_inode_mnt);
+ pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
+ goto err_unregister_filesystem;
+ }
+ register_mtd_user(&mtdchar_notifier);
+
+ return ret;
+
+err_unregister_filesystem:
+ unregister_filesystem(&mtd_inodefs_type);
+err_unregister_chdev:
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
+ return ret;
}
static void __exit cleanup_mtdchar(void)
{
- unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
+ unregister_mtd_user(&mtdchar_notifier);
+ mntput(mtd_inode_mnt);
+ unregister_filesystem(&mtd_inodefs_type);
+ __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
module_init(init_mtdchar);
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index db6de74082ad..7e075621bbf4 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -183,10 +183,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
/* make a copy of vecs */
- vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
+ vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
if (!vecs_copy)
return -ENOMEM;
- memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
entry_low = 0;
for (i = 0; i < concat->num_subdev; i++) {
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b177e750efc3..a1b8b70d2d0a 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -19,7 +19,9 @@
#include <linux/init.h>
#include <linux/mtd/compatmac.h>
#include <linux/proc_fs.h>
+#include <linux/idr.h>
#include <linux/backing-dev.h>
+#include <linux/gfp.h>
#include <linux/mtd/mtd.h>
@@ -63,13 +65,18 @@ static struct class mtd_class = {
.resume = mtd_cls_resume,
};
+static DEFINE_IDR(mtd_idr);
+
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
DEFINE_MUTEX(mtd_table_mutex);
-struct mtd_info *mtd_table[MAX_MTD_DEVICES];
-
EXPORT_SYMBOL_GPL(mtd_table_mutex);
-EXPORT_SYMBOL_GPL(mtd_table);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+ return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
@@ -265,13 +272,13 @@ static struct device_type mtd_devtype = {
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or 1 on failure, which currently will only happen
- * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
- * or there's a sysfs error.
+ * if there is insufficient memory or a sysfs error.
*/
int add_mtd_device(struct mtd_info *mtd)
{
- int i;
+ struct mtd_notifier *not;
+ int i, error;
if (!mtd->backing_dev_info) {
switch (mtd->type) {
@@ -290,70 +297,73 @@ int add_mtd_device(struct mtd_info *mtd)
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
- for (i=0; i < MAX_MTD_DEVICES; i++)
- if (!mtd_table[i]) {
- struct mtd_notifier *not;
-
- mtd_table[i] = mtd;
- mtd->index = i;
- mtd->usecount = 0;
-
- if (is_power_of_2(mtd->erasesize))
- mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
- else
- mtd->erasesize_shift = 0;
-
- if (is_power_of_2(mtd->writesize))
- mtd->writesize_shift = ffs(mtd->writesize) - 1;
- else
- mtd->writesize_shift = 0;
-
- mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
- mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
-
- /* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
- printk(KERN_WARNING
- "%s: unlock failed, "
- "writes may not work\n",
- mtd->name);
- }
+ do {
+ if (!idr_pre_get(&mtd_idr, GFP_KERNEL))
+ goto fail_locked;
+ error = idr_get_new(&mtd_idr, mtd, &i);
+ } while (error == -EAGAIN);
- /* Caller should have set dev.parent to match the
- * physical device.
- */
- mtd->dev.type = &mtd_devtype;
- mtd->dev.class = &mtd_class;
- mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
- dev_set_drvdata(&mtd->dev, mtd);
- if (device_register(&mtd->dev) != 0) {
- mtd_table[i] = NULL;
- break;
- }
+ if (error)
+ goto fail_locked;
- if (MTD_DEVT(i))
- device_create(&mtd_class, mtd->dev.parent,
- MTD_DEVT(i) + 1,
- NULL, "mtd%dro", i);
-
- DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->add(mtd);
-
- mutex_unlock(&mtd_table_mutex);
- /* We _know_ we aren't being removed, because
- our caller is still holding us here. So none
- of this try_ nonsense, and no bitching about it
- either. :) */
- __module_get(THIS_MODULE);
- return 0;
- }
+ mtd->index = i;
+ mtd->usecount = 0;
+
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE)
+ && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
+ if (mtd->unlock(mtd, 0, mtd->size))
+ printk(KERN_WARNING
+ "%s: unlock failed, writes may not work\n",
+ mtd->name);
+ }
+
+ /* Caller should have set dev.parent to match the
+ * physical device.
+ */
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
+ if (device_register(&mtd->dev) != 0)
+ goto fail_added;
+
+ if (MTD_DEVT(i))
+ device_create(&mtd_class, mtd->dev.parent,
+ MTD_DEVT(i) + 1,
+ NULL, "mtd%dro", i);
+
+ DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+fail_added:
+ idr_remove(&mtd_idr, i);
+fail_locked:
mutex_unlock(&mtd_table_mutex);
return 1;
}
@@ -371,31 +381,34 @@ int add_mtd_device(struct mtd_info *mtd)
int del_mtd_device (struct mtd_info *mtd)
{
int ret;
+ struct mtd_notifier *not;
mutex_lock(&mtd_table_mutex);
- if (mtd_table[mtd->index] != mtd) {
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
ret = -ENODEV;
- } else if (mtd->usecount) {
+ goto out_error;
+ }
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->remove(mtd);
+
+ if (mtd->usecount) {
printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
mtd->index, mtd->name, mtd->usecount);
ret = -EBUSY;
} else {
- struct mtd_notifier *not;
-
device_unregister(&mtd->dev);
- /* No need to get a refcount on the module containing
- the notifier, since we hold the mtd_table_mutex */
- list_for_each_entry(not, &mtd_notifiers, list)
- not->remove(mtd);
-
- mtd_table[mtd->index] = NULL;
+ idr_remove(&mtd_idr, mtd->index);
module_put(THIS_MODULE);
ret = 0;
}
+out_error:
mutex_unlock(&mtd_table_mutex);
return ret;
}
@@ -411,7 +424,7 @@ int del_mtd_device (struct mtd_info *mtd)
void register_mtd_user (struct mtd_notifier *new)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
@@ -419,9 +432,8 @@ void register_mtd_user (struct mtd_notifier *new)
__module_get(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- new->add(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ new->add(mtd);
mutex_unlock(&mtd_table_mutex);
}
@@ -438,15 +450,14 @@ void register_mtd_user (struct mtd_notifier *new)
int unregister_mtd_user (struct mtd_notifier *old)
{
- int i;
+ struct mtd_info *mtd;
mutex_lock(&mtd_table_mutex);
module_put(THIS_MODULE);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- old->remove(mtd_table[i]);
+ mtd_for_each_device(mtd)
+ old->remove(mtd);
list_del(&old->list);
mutex_unlock(&mtd_table_mutex);
@@ -468,42 +479,56 @@ int unregister_mtd_user (struct mtd_notifier *old)
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
- struct mtd_info *ret = NULL;
- int i, err = -ENODEV;
+ struct mtd_info *ret = NULL, *other;
+ int err = -ENODEV;
mutex_lock(&mtd_table_mutex);
if (num == -1) {
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i] == mtd)
- ret = mtd_table[i];
- } else if (num >= 0 && num < MAX_MTD_DEVICES) {
- ret = mtd_table[num];
+ mtd_for_each_device(other) {
+ if (other == mtd) {
+ ret = mtd;
+ break;
+ }
+ }
+ } else if (num >= 0) {
+ ret = idr_find(&mtd_idr, num);
if (mtd && mtd != ret)
ret = NULL;
}
- if (!ret)
- goto out_unlock;
-
- if (!try_module_get(ret->owner))
- goto out_unlock;
-
- if (ret->get_device) {
- err = ret->get_device(ret);
- if (err)
- goto out_put;
+ if (!ret) {
+ ret = ERR_PTR(err);
+ goto out;
}
- ret->usecount++;
+ err = __get_mtd_device(ret);
+ if (err)
+ ret = ERR_PTR(err);
+out:
mutex_unlock(&mtd_table_mutex);
return ret;
+}
-out_put:
- module_put(ret->owner);
-out_unlock:
- mutex_unlock(&mtd_table_mutex);
- return ERR_PTR(err);
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+ int err;
+
+ if (!try_module_get(mtd->owner))
+ return -ENODEV;
+
+ if (mtd->get_device) {
+
+ err = mtd->get_device(mtd);
+
+ if (err) {
+ module_put(mtd->owner);
+ return err;
+ }
+ }
+ mtd->usecount++;
+ return 0;
}
/**
@@ -517,14 +542,14 @@ out_unlock:
struct mtd_info *get_mtd_device_nm(const char *name)
{
- int i, err = -ENODEV;
- struct mtd_info *mtd = NULL;
+ int err = -ENODEV;
+ struct mtd_info *mtd = NULL, *other;
mutex_lock(&mtd_table_mutex);
- for (i = 0; i < MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
- mtd = mtd_table[i];
+ mtd_for_each_device(other) {
+ if (!strcmp(name, other->name)) {
+ mtd = other;
break;
}
}
@@ -554,14 +579,19 @@ out_unlock:
void put_mtd_device(struct mtd_info *mtd)
{
- int c;
-
mutex_lock(&mtd_table_mutex);
- c = --mtd->usecount;
+ __put_mtd_device(mtd);
+ mutex_unlock(&mtd_table_mutex);
+
+}
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+
if (mtd->put_device)
mtd->put_device(mtd);
- mutex_unlock(&mtd_table_mutex);
- BUG_ON(c < 0);
module_put(mtd->owner);
}
@@ -599,7 +629,9 @@ EXPORT_SYMBOL_GPL(add_mtd_device);
EXPORT_SYMBOL_GPL(del_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device);
EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+EXPORT_SYMBOL_GPL(__get_mtd_device);
EXPORT_SYMBOL_GPL(put_mtd_device);
+EXPORT_SYMBOL_GPL(__put_mtd_device);
EXPORT_SYMBOL_GPL(register_mtd_user);
EXPORT_SYMBOL_GPL(unregister_mtd_user);
EXPORT_SYMBOL_GPL(default_mtd_writev);
@@ -611,14 +643,9 @@ EXPORT_SYMBOL_GPL(default_mtd_writev);
static struct proc_dir_entry *proc_mtd;
-static inline int mtd_proc_info (char *buf, int i)
+static inline int mtd_proc_info(char *buf, struct mtd_info *this)
{
- struct mtd_info *this = mtd_table[i];
-
- if (!this)
- return 0;
-
- return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i,
+ return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index,
(unsigned long long)this->size,
this->erasesize, this->name);
}
@@ -626,15 +653,15 @@ static inline int mtd_proc_info (char *buf, int i)
static int mtd_read_proc (char *page, char **start, off_t off, int count,
int *eof, void *data_unused)
{
- int len, l, i;
+ struct mtd_info *mtd;
+ int len, l;
off_t begin = 0;
mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
- for (i=0; i< MAX_MTD_DEVICES; i++) {
-
- l = mtd_proc_info(page + len, i);
+ mtd_for_each_device(mtd) {
+ l = mtd_proc_info(page + len, mtd);
len += l;
if (len+begin > off+count)
goto done;
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index a33251f4b872..6a64fdebc898 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,4 +8,9 @@
should not use them for _anything_ else */
extern struct mutex mtd_table_mutex;
-extern struct mtd_info *mtd_table[MAX_MTD_DEVICES];
+extern struct mtd_info *__mtd_next_device(int i);
+
+#define mtd_for_each_device(mtd) \
+ for ((mtd) = __mtd_next_device(0); \
+ (mtd) != NULL; \
+ (mtd) = __mtd_next_device(mtd->index + 1))
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 92e12df0917f..328313c3dccb 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -429,11 +429,6 @@ static int __init mtdoops_init(void)
mtd_index = simple_strtoul(mtddev, &endp, 0);
if (*endp == '\0')
cxt->mtd_index = mtd_index;
- if (cxt->mtd_index > MAX_MTD_DEVICES) {
- printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
- mtd_index);
- return -EINVAL;
- }
cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf) {
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 7c003191fca4..bd9a443ccf69 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -152,18 +152,12 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
- for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
- mtd = get_mtd_device(NULL, mtdnr);
- if (!IS_ERR(mtd)) {
- if (!strcmp(mtd->name, dev_name + 4))
- return get_sb_mtd_aux(
- fs_type, flags,
- dev_name, data, mtd,
- fill_super, mnt);
-
- put_mtd_device(mtd);
- }
- }
+ mtd = get_mtd_device_nm(dev_name + 4);
+ if (!IS_ERR(mtd))
+ return get_sb_mtd_aux(
+ fs_type, flags,
+ dev_name, data, mtd,
+ fill_super, mnt);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 42e5ea49e975..98a04b3c9526 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -2,11 +2,23 @@ menuconfig MTD_NAND
tristate "NAND Device Support"
depends on MTD
select MTD_NAND_IDS
+ select MTD_NAND_ECC
help
This enables support for accessing all type of NAND flash
devices. For further information see
<http://www.linux-mtd.infradead.org/doc/nand.html>.
+config MTD_NAND_ECC
+ tristate
+
+config MTD_NAND_ECC_SMC
+ bool "NAND ECC Smart Media byte order"
+ depends on MTD_NAND_ECC
+ default n
+ help
+ Software ECC according to the Smart Media Specification.
+ The original Linux implementation had byte 0 and 1 swapped.
+
if MTD_NAND
config MTD_NAND_VERIFY_WRITE
@@ -18,12 +30,9 @@ config MTD_NAND_VERIFY_WRITE
device thinks the write was successful, a bit could have been
flipped accidentally due to device wear or something else.
-config MTD_NAND_ECC_SMC
- bool "NAND ECC Smart Media byte order"
+config MTD_SM_COMMON
+ tristate
default n
- help
- Software ECC according to the Smart Media Specification.
- The original Linux implementation had byte 0 and 1 swapped.
config MTD_NAND_MUSEUM_IDS
bool "Enable chip ids for obsolete ancient NAND devices"
@@ -41,6 +50,23 @@ config MTD_NAND_AUTCPU12
This enables the driver for the autronix autcpu12 board to
access the SmartMediaCard.
+config MTD_NAND_DENALI
+ depends on PCI
+ tristate "Support Denali NAND controller on Intel Moorestown"
+ help
+ Enable the driver for NAND flash on Intel Moorestown, using the
+ Denali NAND controller core.
+
+config MTD_NAND_DENALI_SCRATCH_REG_ADDR
+ hex "Denali NAND size scratch register address"
+ default "0xFF108018"
+ help
+ Some platforms place the NAND chip size in a scratch register
+ because (some versions of) the driver aren't able to automatically
+ determine the size of certain chips. Set the address of the
+ scratch register here to enable this feature. On Intel Moorestown
+ boards, the scratch register is at 0xFF108018.
+
config MTD_NAND_EDB7312
tristate "Support for Cirrus Logic EBD7312 evaluation board"
depends on ARCH_EDB7312
@@ -95,15 +121,21 @@ config MTD_NAND_OMAP_PREFETCH_DMA
or in DMA interrupt mode.
Say y for DMA mode or MPU mode will be used
-config MTD_NAND_TS7250
- tristate "NAND Flash device on TS-7250 board"
- depends on MACH_TS72XX
- help
- Support for NAND flash on Technologic Systems TS-7250 platform.
-
config MTD_NAND_IDS
tristate
+config MTD_NAND_RICOH
+ tristate "Ricoh xD card reader"
+ default n
+ depends on PCI
+ select MTD_SM_COMMON
+ help
+ Enable support for Ricoh R5C852 xD card reader
+ You also need to enable ether
+ NAND SSFDC (SmartMedia) read only translation layer' or new
+ expermental, readwrite
+ 'SmartMedia/xD new translation layer'
+
config MTD_NAND_AU1550
tristate "Au1550/1200 NAND support"
depends on SOC_AU1200 || SOC_AU1550
@@ -358,8 +390,6 @@ config MTD_NAND_ATMEL_ECC_NONE
If unsure, say N
- endchoice
-
endchoice
config MTD_NAND_PXA3xx
@@ -442,6 +472,13 @@ config MTD_NAND_FSL_UPM
Enables support for NAND Flash chips wired onto Freescale PowerPC
processor localbus with User-Programmable Machine support.
+config MTD_NAND_MPC5121_NFC
+ tristate "MPC5121 built-in NAND Flash Controller support"
+ depends on PPC_MPC512x
+ help
+ This enables the driver for the NAND flash controller on the
+ MPC5121 SoC.
+
config MTD_NAND_MXC
tristate "MXC NAND support"
depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3
@@ -481,11 +518,11 @@ config MTD_NAND_SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
-config MTD_NAND_W90P910
- tristate "Support for NAND on w90p910 evaluation board."
+config MTD_NAND_NUC900
+ tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
depends on ARCH_W90X900 && MTD_PARTITIONS
help
This enables the driver for the NAND Flash on evaluation board based
- on w90p910.
+ on w90p910 / NUC9xx.
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1407bd144015..e8ab884ba47b 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -2,13 +2,16 @@
# linux/drivers/nand/Makefile
#
-obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
+obj-$(CONFIG_MTD_NAND) += nand.o
+obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o
obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
+obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o
@@ -19,7 +22,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
-obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@@ -39,8 +41,10 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
-obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
+obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 2d6773281fd9..8691e0482ed2 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -49,7 +49,7 @@
#define TIMEOUT HZ
-static struct usb_device_id alauda_table [] = {
+static const struct usb_device_id alauda_table[] = {
{ USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
{ USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
{ }
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 524e6c9e0672..04d30887ca7f 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -474,7 +474,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto err_scan_ident;
}
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 43d46e424040..3ffe05db4923 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -451,7 +451,7 @@ static int __init au1xxx_nand_init(void)
u32 nand_phys;
/* Allocate memory for MTD device structure and private data */
- au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!au1550_mtd) {
printk("Unable to allocate NAND MTD dev structure.\n");
return -ENOMEM;
@@ -460,10 +460,6 @@ static int __init au1xxx_nand_init(void)
/* Get pointer to private data */
this = (struct nand_chip *)(&au1550_mtd[1]);
- /* Initialize structures */
- memset(au1550_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
/* Link the private data with the MTD structure */
au1550_mtd->priv = this;
au1550_mtd->owner = THIS_MODULE;
@@ -544,7 +540,7 @@ static int __init au1xxx_nand_init(void)
}
nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = (void __iomem *)ioremap(nand_phys, 0x1000);
+ p_nand = ioremap(nand_phys, 0x1000);
/* make controller and MTD agree */
if (NAND_CS == 0)
@@ -589,7 +585,7 @@ static int __init au1xxx_nand_init(void)
return 0;
outio:
- iounmap((void *)p_nand);
+ iounmap(p_nand);
outmem:
kfree(au1550_mtd);
@@ -610,7 +606,7 @@ static void __exit au1550_cleanup(void)
kfree(au1550_mtd);
/* Unmap */
- iounmap((void *)p_nand);
+ iounmap(p_nand);
}
module_exit(au1550_cleanup);
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index c997f98eeb3d..dfe262c726fb 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -13,7 +13,6 @@
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -447,7 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
* layout we'll be using.
*/
- err = nand_scan_ident(board_mtd, 1);
+ err = nand_scan_ident(board_mtd, 1, NULL);
if (err) {
printk(KERN_ERR "nand_scan failed: %d\n", err);
iounmap(bcm_umi_io_base);
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 8506e7e606fd..2974995e194d 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -68,6 +68,27 @@
#define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>"
#define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver"
+/* NFC_STAT Masks */
+#define NBUSY 0x01 /* Not Busy */
+#define WB_FULL 0x02 /* Write Buffer Full */
+#define PG_WR_STAT 0x04 /* Page Write Pending */
+#define PG_RD_STAT 0x08 /* Page Read Pending */
+#define WB_EMPTY 0x10 /* Write Buffer Empty */
+
+/* NFC_IRQSTAT Masks */
+#define NBUSYIRQ 0x01 /* Not Busy IRQ */
+#define WB_OVF 0x02 /* Write Buffer Overflow */
+#define WB_EDGE 0x04 /* Write Buffer Edge Detect */
+#define RD_RDY 0x08 /* Read Data Ready */
+#define WR_DONE 0x10 /* Page Write Done */
+
+/* NFC_RST Masks */
+#define ECC_RST 0x01 /* ECC (and NFC counters) Reset */
+
+/* NFC_PGCTL Masks */
+#define PG_RD_START 0x01 /* Page Read Start */
+#define PG_WR_START 0x02 /* Page Write Start */
+
#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
static int hardware_ecc = 1;
#else
@@ -487,7 +508,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
* transferred to generate the correct ECC register
* values.
*/
- bfin_write_NFC_RST(0x1);
+ bfin_write_NFC_RST(ECC_RST);
SSYNC();
disable_dma(CH_NFC);
@@ -497,7 +518,7 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
-/* The DMAs have different size on BF52x and BF54x */
+ /* The DMAs have different size on BF52x and BF54x */
#ifdef CONFIG_BF52x
set_dma_x_count(CH_NFC, (page_size >> 1));
set_dma_x_modify(CH_NFC, 2);
@@ -517,9 +538,9 @@ static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* Start PAGE read/write operation */
if (is_read)
- bfin_write_NFC_PGCTL(0x1);
+ bfin_write_NFC_PGCTL(PG_RD_START);
else
- bfin_write_NFC_PGCTL(0x2);
+ bfin_write_NFC_PGCTL(PG_WR_START);
wait_for_completion(&info->dma_completion);
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e5a9f9ccea60..db1dfc5a1b11 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -762,7 +762,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
/* Scan to find existence of the device */
- if (nand_scan_ident(mtd, 2)) {
+ if (nand_scan_ident(mtd, 2, NULL)) {
err = -ENXIO;
goto out_irq;
}
@@ -849,7 +849,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
kfree(mtd);
}
-static struct pci_device_id cafe_nand_tbl[] = {
+static const struct pci_device_id cafe_nand_tbl[] = {
{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
PCI_ANY_ID, PCI_ANY_ID },
{ }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 76e2dc8e62f7..9c9d893affeb 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -567,8 +567,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, res1->end - res1->start);
- base = ioremap(res2->start, res2->end - res2->start);
+ vaddr = ioremap(res1->start, resource_size(res1));
+ base = ioremap(res2->start, resource_size(res2));
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
@@ -691,7 +691,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
new file mode 100644
index 000000000000..ca03428b59cc
--- /dev/null
+++ b/drivers/mtd/nand/denali.c
@@ -0,0 +1,2134 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+
+#include "denali.h"
+
+MODULE_LICENSE("GPL");
+
+/* We define a module parameter that allows the user to override
+ * the hardware and decide what timing mode should be used.
+ */
+#define NAND_DEFAULT_TIMINGS -1
+
+static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
+module_param(onfi_timing_mode, int, S_IRUGO);
+MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates"
+ " use default timings");
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/* We define a macro here that combines all interrupts this driver uses into
+ * a single constant value, for convenience. */
+#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
+ INTR_STATUS0__ECC_TRANSACTION_DONE | \
+ INTR_STATUS0__ECC_ERR | \
+ INTR_STATUS0__PROGRAM_FAIL | \
+ INTR_STATUS0__LOAD_COMP | \
+ INTR_STATUS0__PROGRAM_COMP | \
+ INTR_STATUS0__TIME_OUT | \
+ INTR_STATUS0__ERASE_FAIL | \
+ INTR_STATUS0__RST_COMP | \
+ INTR_STATUS0__ERASE_COMP)
+
+/* indicates whether or not the internal value for the flash bank is
+ valid or not */
+#define CHIP_SELECT_INVALID -1
+
+#define SUPPORT_8BITECC 1
+
+/* This macro divides two integers and rounds fractional values up
+ * to the nearest integer value. */
+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+
+/* this macro allows us to convert from an MTD structure to our own
+ * device context (denali) structure.
+ */
+#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
+
+/* These constants are defined by the driver to enable common driver
+ configuration options. */
+#define SPARE_ACCESS 0x41
+#define MAIN_ACCESS 0x42
+#define MAIN_SPARE_ACCESS 0x43
+
+#define DENALI_READ 0
+#define DENALI_WRITE 0x100
+
+/* types of device accesses. We can issue commands and get status */
+#define COMMAND_CYCLE 0
+#define ADDR_CYCLE 1
+#define STATUS_CYCLE 2
+
+/* this is a helper macro that allows us to
+ * format the bank into the proper bits for the controller */
+#define BANK(x) ((x) << 24)
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+ { /* end: all zeroes */ }
+};
+
+
+/* these are static lookup tables that give us easy access to
+ registers in the NAND controller.
+ */
+static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
+ INTR_STATUS1,
+ INTR_STATUS2,
+ INTR_STATUS3};
+
+static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
+ DEVICE_RESET__BANK1,
+ DEVICE_RESET__BANK2,
+ DEVICE_RESET__BANK3};
+
+static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
+ INTR_STATUS1__TIME_OUT,
+ INTR_STATUS2__TIME_OUT,
+ INTR_STATUS3__TIME_OUT};
+
+static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
+ INTR_STATUS1__RST_COMP,
+ INTR_STATUS2__RST_COMP,
+ INTR_STATUS3__RST_COMP};
+
+/* specifies the debug level of the driver */
+static int nand_debug_level = 0;
+
+/* forward declarations */
+static void clear_interrupts(struct denali_nand_info *denali);
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask);
+static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask);
+static uint32_t read_interrupt_status(struct denali_nand_info *denali);
+
+#define DEBUG_DENALI 0
+
+/* This is a wrapper for writing to the denali registers.
+ * this allows us to create debug information so we can
+ * observe how the driver is programming the device.
+ * it uses standard linux convention for (val, addr) */
+static void denali_write32(uint32_t value, void *addr)
+{
+ iowrite32(value, addr);
+
+#if DEBUG_DENALI
+ printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff));
+#endif
+}
+
+/* Certain operations for the denali NAND controller use an indexed mode to read/write
+ data. The operation is performed by writing the address value of the command to
+ the device memory followed by the data. This function abstracts this common
+ operation.
+*/
+static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data)
+{
+ denali_write32(address, denali->flash_mem);
+ denali_write32(data, denali->flash_mem + 0x10);
+}
+
+/* Perform an indexed read of the device */
+static void index_addr_read_data(struct denali_nand_info *denali,
+ uint32_t address, uint32_t *pdata)
+{
+ denali_write32(address, denali->flash_mem);
+ *pdata = ioread32(denali->flash_mem + 0x10);
+}
+
+/* We need to buffer some data for some of the NAND core routines.
+ * The operations manage buffering that data. */
+static void reset_buf(struct denali_nand_info *denali)
+{
+ denali->buf.head = denali->buf.tail = 0;
+}
+
+static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
+{
+ BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
+ denali->buf.buf[denali->buf.tail++] = byte;
+}
+
+/* reads the status of the device */
+static void read_status(struct denali_nand_info *denali)
+{
+ uint32_t cmd = 0x0;
+
+ /* initialize the data buffer to store status */
+ reset_buf(denali);
+
+ /* initiate a device status read */
+ cmd = MODE_11 | BANK(denali->flash_bank);
+ index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
+ denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
+
+ /* update buffer with status value */
+ write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
+
+#if DEBUG_DENALI
+ printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]);
+#endif
+}
+
+/* resets a specific device connected to the core */
+static void reset_bank(struct denali_nand_info *denali)
+{
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = reset_complete[denali->flash_bank] |
+ operation_timeout[denali->flash_bank];
+ int bank = 0;
+
+ clear_interrupts(denali);
+
+ bank = device_reset_banks[denali->flash_bank];
+ denali_write32(bank, denali->flash_reg + DEVICE_RESET);
+
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status & operation_timeout[denali->flash_bank])
+ {
+ printk(KERN_ERR "reset bank failed.\n");
+ }
+}
+
+/* Reset the flash controller */
+static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali)
+{
+ uint32_t i;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
+ denali_write32(reset_complete[i] | operation_timeout[i],
+ denali->flash_reg + intr_status_addresses[i]);
+
+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
+ denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET);
+ while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) &
+ (reset_complete[i] | operation_timeout[i])))
+ ;
+ if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
+ operation_timeout[i])
+ nand_dbg_print(NAND_DBG_WARN,
+ "NAND Reset operation timed out on bank %d\n", i);
+ }
+
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
+ denali_write32(reset_complete[i] | operation_timeout[i],
+ denali->flash_reg + intr_status_addresses[i]);
+
+ return PASS;
+}
+
+/* this routine calculates the ONFI timing values for a given mode and programs
+ * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
+ routine.
+ */
+static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode)
+{
+ uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
+ uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
+ uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
+ uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
+ uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
+ uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
+ uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
+ uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
+ uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
+ uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
+ uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
+
+ uint16_t TclsRising = 1;
+ uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
+ uint16_t dv_window = 0;
+ uint16_t en_lo, en_hi;
+ uint16_t acc_clks;
+ uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ en_lo = CEIL_DIV(Trp[mode], CLK_X);
+ en_hi = CEIL_DIV(Treh[mode], CLK_X);
+#if ONFI_BLOOM_TIME
+ if ((en_hi * CLK_X) < (Treh[mode] + 2))
+ en_hi++;
+#endif
+
+ if ((en_lo + en_hi) * CLK_X < Trc[mode])
+ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
+
+ if ((en_lo + en_hi) < CLK_MULTI)
+ en_lo += CLK_MULTI - en_lo - en_hi;
+
+ while (dv_window < 8) {
+ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
+
+ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
+
+ data_invalid =
+ data_invalid_rhoh <
+ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
+
+ dv_window = data_invalid - Trea[mode];
+
+ if (dv_window < 8)
+ en_lo++;
+ }
+
+ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
+
+ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
+ acc_clks++;
+
+ if ((data_invalid - acc_clks * CLK_X) < 2)
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
+ __FILE__, __LINE__);
+
+ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
+ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
+ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
+ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
+ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
+ if (!TclsRising)
+ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
+ if (cs_cnt == 0)
+ cs_cnt = 1;
+
+ if (Tcea[mode]) {
+ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
+ cs_cnt++;
+ }
+
+#if MODE5_WORKAROUND
+ if (mode == 5)
+ acc_clks = 5;
+#endif
+
+ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
+ if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
+ (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
+ acc_clks = 6;
+
+ denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
+ denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
+ denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
+ denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
+ denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
+ denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
+ denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
+ denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
+}
+
+/* configures the initial ECC settings for the controller */
+static void set_ecc_config(struct denali_nand_info *denali)
+{
+#if SUPPORT_8BITECC
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+
+ if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
+ == 1) {
+ denali->dev_info.wECCBytesPerSector = 4;
+ denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
+ denali->dev_info.wNumPageSpareFlag =
+ denali->dev_info.wPageSpareSize -
+ denali->dev_info.wPageDataSize /
+ (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
+ denali->dev_info.wECCBytesPerSector
+ - denali->dev_info.wSpareSkipBytes;
+ } else {
+ denali->dev_info.wECCBytesPerSector =
+ (ioread32(denali->flash_reg + ECC_CORRECTION) &
+ ECC_CORRECTION__VALUE) * 13 / 8;
+ if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
+ denali->dev_info.wECCBytesPerSector += 2;
+ else
+ denali->dev_info.wECCBytesPerSector += 1;
+
+ denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
+ denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize -
+ denali->dev_info.wPageDataSize /
+ (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
+ denali->dev_info.wECCBytesPerSector
+ - denali->dev_info.wSpareSkipBytes;
+ }
+}
+
+/* queries the NAND device to see what ONFI modes it supports. */
+static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
+{
+ int i;
+ uint16_t blks_lun_l, blks_lun_h, n_of_luns;
+ uint32_t blockperlun, id;
+
+ denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
+
+ while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
+ INTR_STATUS0__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS0) &
+ INTR_STATUS0__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS1) &
+ INTR_STATUS1__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK2,
+ denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__TIME_OUT)))
+ ;
+
+ if (ioread32(denali->flash_reg + INTR_STATUS2) &
+ INTR_STATUS2__RST_COMP) {
+ denali_write32(DEVICE_RESET__BANK3,
+ denali->flash_reg + DEVICE_RESET);
+ while (!((ioread32(denali->flash_reg + INTR_STATUS3) &
+ INTR_STATUS3__RST_COMP) |
+ (ioread32(denali->flash_reg + INTR_STATUS3) &
+ INTR_STATUS3__TIME_OUT)))
+ ;
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 2!\n");
+ }
+ } else {
+ printk(KERN_ERR "Getting a time out for bank 1!\n");
+ }
+ }
+
+ denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0);
+ denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1);
+ denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2);
+ denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3);
+
+ denali->dev_info.wONFIDevFeatures =
+ ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
+ denali->dev_info.wONFIOptCommands =
+ ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
+ denali->dev_info.wONFITimingMode =
+ ioread32(denali->flash_reg + ONFI_TIMING_MODE);
+ denali->dev_info.wONFIPgmCacheTimingMode =
+ ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
+
+ n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
+ blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
+ blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
+
+ blockperlun = (blks_lun_h << 16) | blks_lun_l;
+
+ denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
+
+ if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+ ONFI_TIMING_MODE__VALUE))
+ return FAIL;
+
+ for (i = 5; i > 0; i--) {
+ if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i))
+ break;
+ }
+
+ NAND_ONFi_Timing_Mode(denali, i);
+
+ index_addr(denali, MODE_11 | 0, 0x90);
+ index_addr(denali, MODE_11 | 1, 0);
+
+ for (i = 0; i < 3; i++)
+ index_addr_read_data(denali, MODE_11 | 2, &id);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
+
+ denali->dev_info.MLCDevice = id & 0x0C;
+
+ /* By now, all the ONFI devices we know support the page cache */
+ /* rw feature. So here we enable the pipeline_rw_ahead feature */
+ /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
+ /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
+
+ return PASS;
+}
+
+static void get_samsung_nand_para(struct denali_nand_info *denali)
+{
+ uint8_t no_of_planes;
+ uint32_t blk_size;
+ uint64_t plane_size, capacity;
+ uint32_t id_bytes[5];
+ int i;
+
+ index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
+ index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
+ for (i = 0; i < 5; i++)
+ index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+ id_bytes[0], id_bytes[1], id_bytes[2],
+ id_bytes[3], id_bytes[4]);
+
+ if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
+ /* Set timing register values according to datasheet */
+ denali_write32(5, denali->flash_reg + ACC_CLKS);
+ denali_write32(20, denali->flash_reg + RE_2_WE);
+ denali_write32(12, denali->flash_reg + WE_2_RE);
+ denali_write32(14, denali->flash_reg + ADDR_2_DATA);
+ denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
+ denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
+ denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
+ }
+
+ no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
+ plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
+ blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4);
+ capacity = (uint64_t)128 * plane_size * no_of_planes;
+
+ do_div(capacity, blk_size);
+ denali->dev_info.wTotalBlocks = capacity;
+}
+
+static void get_toshiba_nand_para(struct denali_nand_info *denali)
+{
+ void __iomem *scratch_reg;
+ uint32_t tmp;
+
+ /* Workaround to fix a controller bug which reports a wrong */
+ /* spare area size for some kind of Toshiba NAND device */
+ if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
+ (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
+ denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+#if SUPPORT_15BITECC
+ denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ }
+
+ /* As Toshiba NAND can not provide it's block number, */
+ /* so here we need user to provide the correct block */
+ /* number in a scratch register before the Linux NAND */
+ /* driver is loaded. If no valid value found in the scratch */
+ /* register, then we use default block number value */
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (denali->dev_info.wTotalBlocks < 512)
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+static void get_hynix_nand_para(struct denali_nand_info *denali)
+{
+ void __iomem *scratch_reg;
+ uint32_t main_size, spare_size;
+
+ switch (denali->dev_info.wDeviceID) {
+ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
+ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
+ denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
+ denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+ main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
+ denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+ denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+ denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
+#if SUPPORT_15BITECC
+ denali_write32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+ denali_write32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+ denali->dev_info.MLCDevice = 1;
+ break;
+ default:
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
+ "Will use default parameter values instead.\n",
+ denali->dev_info.wDeviceID);
+ }
+
+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
+ if (!scratch_reg) {
+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
+ __FILE__, __LINE__);
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ } else {
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
+ denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
+ if (denali->dev_info.wTotalBlocks < 512)
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ iounmap(scratch_reg);
+ }
+}
+
+/* determines how many NAND chips are connected to the controller. Note for
+ Intel CE4100 devices we don't support more than one device.
+ */
+static void find_valid_banks(struct denali_nand_info *denali)
+{
+ uint32_t id[LLD_MAX_FLASH_BANKS];
+ int i;
+
+ denali->total_used_banks = 1;
+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
+ index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
+ index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
+
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
+
+ if (i == 0) {
+ if (!(id[i] & 0x0ff))
+ break; /* WTF? */
+ } else {
+ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
+ denali->total_used_banks++;
+ else
+ break;
+ }
+ }
+
+ if (denali->platform == INTEL_CE4100)
+ {
+ /* Platform limitations of the CE4100 device limit
+ * users to a single chip solution for NAND.
+ * Multichip support is not enabled.
+ */
+ if (denali->total_used_banks != 1)
+ {
+ printk(KERN_ERR "Sorry, Intel CE4100 only supports "
+ "a single NAND device.\n");
+ BUG();
+ }
+ }
+ nand_dbg_print(NAND_DBG_DEBUG,
+ "denali->total_used_banks: %d\n", denali->total_used_banks);
+}
+
+static void detect_partition_feature(struct denali_nand_info *denali)
+{
+ if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
+ if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
+ PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
+ denali->dev_info.wSpectraStartBlock =
+ ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MIN_VALUE) *
+ denali->dev_info.wTotalBlocks)
+ +
+ (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
+ MIN_BLK_ADDR_1__VALUE);
+
+ denali->dev_info.wSpectraEndBlock =
+ (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
+ MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
+ denali->dev_info.wTotalBlocks)
+ +
+ (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
+ MAX_BLK_ADDR_1__VALUE);
+
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+
+ if (denali->dev_info.wSpectraEndBlock >=
+ denali->dev_info.wTotalBlocks) {
+ denali->dev_info.wSpectraEndBlock =
+ denali->dev_info.wTotalBlocks - 1;
+ }
+
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ } else {
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+ denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ denali->dev_info.wSpectraEndBlock =
+ denali->dev_info.wTotalBlocks - 1;
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ }
+ } else {
+ denali->dev_info.wTotalBlocks *= denali->total_used_banks;
+ denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
+ denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1;
+ denali->dev_info.wDataBlockNum =
+ denali->dev_info.wSpectraEndBlock -
+ denali->dev_info.wSpectraStartBlock + 1;
+ }
+}
+
+static void dump_device_info(struct denali_nand_info *denali)
+{
+ nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
+ denali->dev_info.wDeviceMaker);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
+ denali->dev_info.wDeviceID);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
+ denali->dev_info.wDeviceType);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
+ denali->dev_info.wSpectraStartBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
+ denali->dev_info.wSpectraEndBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
+ denali->dev_info.wTotalBlocks);
+ nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
+ denali->dev_info.wPagesPerBlock);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
+ denali->dev_info.wPageSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
+ denali->dev_info.wPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
+ denali->dev_info.wPageSpareSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
+ denali->dev_info.wNumPageSpareFlag);
+ nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
+ denali->dev_info.wECCBytesPerSector);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
+ denali->dev_info.wBlockSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
+ denali->dev_info.wBlockDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
+ denali->dev_info.wDataBlockNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
+ denali->dev_info.bPlaneNum);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
+ denali->dev_info.wDeviceMainAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
+ denali->dev_info.wDeviceSpareAreaSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
+ denali->dev_info.wDevicesConnected);
+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
+ denali->dev_info.wDeviceWidth);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
+ denali->dev_info.wHWRevision);
+ nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
+ denali->dev_info.wHWFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
+ denali->dev_info.wONFIDevFeatures);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
+ denali->dev_info.wONFIOptCommands);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
+ denali->dev_info.wONFITimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
+ denali->dev_info.wONFIPgmCacheTimingMode);
+ nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
+ denali->dev_info.MLCDevice ? "Yes" : "No");
+ nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
+ denali->dev_info.wSpareSkipBytes);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
+ denali->dev_info.nBitsInPageNumber);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
+ denali->dev_info.nBitsInPageDataSize);
+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
+ denali->dev_info.nBitsInBlockDataSize);
+}
+
+static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
+{
+ uint16_t status = PASS;
+ uint8_t no_of_planes;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID);
+ denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID);
+ denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0);
+ denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1);
+ denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2);
+
+ denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c;
+
+ if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
+ if (FAIL == get_onfi_nand_para(denali))
+ return FAIL;
+ } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */
+ get_samsung_nand_para(denali);
+ } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */
+ get_toshiba_nand_para(denali);
+ } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */
+ get_hynix_nand_para(denali);
+ } else {
+ denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
+ denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
+
+ denali->dev_info.wDeviceMainAreaSize =
+ ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+ denali->dev_info.wDeviceSpareAreaSize =
+ ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+
+ denali->dev_info.wPageDataSize =
+ ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+
+ /* Note: When using the Micon 4K NAND device, the controller will report
+ * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
+ * And if force set it to 218 bytes, the controller can not work
+ * correctly. So just let it be. But keep in mind that this bug may
+ * cause
+ * other problems in future. - Yunpeng 2008-10-10
+ */
+ denali->dev_info.wPageSpareSize =
+ ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+
+ denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK);
+
+ denali->dev_info.wPageSize =
+ denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
+ denali->dev_info.wBlockSize =
+ denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
+ denali->dev_info.wBlockDataSize =
+ denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
+
+ denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH);
+ denali->dev_info.wDeviceType =
+ ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
+
+ denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+
+ denali->dev_info.wSpareSkipBytes =
+ ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
+ denali->dev_info.wDevicesConnected;
+
+ denali->dev_info.nBitsInPageNumber =
+ ilog2(denali->dev_info.wPagesPerBlock);
+ denali->dev_info.nBitsInPageDataSize =
+ ilog2(denali->dev_info.wPageDataSize);
+ denali->dev_info.nBitsInBlockDataSize =
+ ilog2(denali->dev_info.wBlockDataSize);
+
+ set_ecc_config(denali);
+
+ no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
+ NUMBER_OF_PLANES__VALUE;
+
+ switch (no_of_planes) {
+ case 0:
+ case 1:
+ case 3:
+ case 7:
+ denali->dev_info.bPlaneNum = no_of_planes + 1;
+ break;
+ default:
+ status = FAIL;
+ break;
+ }
+
+ find_valid_banks(denali);
+
+ detect_partition_feature(denali);
+
+ dump_device_info(denali);
+
+ /* If the user specified to override the default timings
+ * with a specific ONFI mode, we apply those changes here.
+ */
+ if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
+ {
+ NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
+ }
+
+ return status;
+}
+
+static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali,
+ uint16_t INT_ENABLE)
+{
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ if (INT_ENABLE)
+ denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
+ else
+ denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
+}
+
+/* validation function to verify that the controlling software is making
+ a valid request
+ */
+static inline bool is_flash_bank_valid(int flash_bank)
+{
+ return (flash_bank >= 0 && flash_bank < 4);
+}
+
+static void denali_irq_init(struct denali_nand_info *denali)
+{
+ uint32_t int_mask = 0;
+
+ /* Disable global interrupts */
+ NAND_LLD_Enable_Disable_Interrupts(denali, false);
+
+ int_mask = DENALI_IRQ_ALL;
+
+ /* Clear all status bits */
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
+ denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
+
+ denali_irq_enable(denali, int_mask);
+}
+
+static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
+{
+ NAND_LLD_Enable_Disable_Interrupts(denali, false);
+ free_irq(irqnum, denali);
+}
+
+static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask)
+{
+ denali_write32(int_mask, denali->flash_reg + INTR_EN0);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN1);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN2);
+ denali_write32(int_mask, denali->flash_reg + INTR_EN3);
+}
+
+/* This function only returns when an interrupt that this driver cares about
+ * occurs. This is to reduce the overhead of servicing interrupts
+ */
+static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
+{
+ return (read_interrupt_status(denali) & DENALI_IRQ_ALL);
+}
+
+/* Interrupts are cleared by writing a 1 to the appropriate status bit */
+static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = intr_status_addresses[denali->flash_bank];
+
+ denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
+}
+
+static void clear_interrupts(struct denali_nand_info *denali)
+{
+ uint32_t status = 0x0;
+ spin_lock_irq(&denali->irq_lock);
+
+ status = read_interrupt_status(denali);
+
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
+ denali->idx %= 32;
+#endif
+
+ denali->irq_status = 0x0;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static uint32_t read_interrupt_status(struct denali_nand_info *denali)
+{
+ uint32_t intr_status_reg = 0;
+
+ intr_status_reg = intr_status_addresses[denali->flash_bank];
+
+ return ioread32(denali->flash_reg + intr_status_reg);
+}
+
+#if DEBUG_DENALI
+static void print_irq_log(struct denali_nand_info *denali)
+{
+ int i = 0;
+
+ printk("ISR debug log index = %X\n", denali->idx);
+ for (i = 0; i < 32; i++)
+ {
+ printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
+ }
+}
+#endif
+
+/* This is the interrupt service routine. It handles all interrupts
+ * sent to this device. Note that on CE4100, this is a shared
+ * interrupt.
+ */
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+ struct denali_nand_info *denali = dev_id;
+ uint32_t irq_status = 0x0;
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&denali->irq_lock);
+
+ /* check to see if a valid NAND chip has
+ * been selected.
+ */
+ if (is_flash_bank_valid(denali->flash_bank))
+ {
+ /* check to see if controller generated
+ * the interrupt, since this is a shared interrupt */
+ if ((irq_status = denali_irq_detected(denali)) != 0)
+ {
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
+ denali->idx %= 32;
+
+ printk("IRQ status = 0x%04x\n", irq_status);
+#endif
+ /* handle interrupt */
+ /* first acknowledge it */
+ clear_interrupt(denali, irq_status);
+ /* store the status in the device context for someone
+ to read */
+ denali->irq_status |= irq_status;
+ /* notify anyone who cares that it happened */
+ complete(&denali->complete);
+ /* tell the OS that we've handled this */
+ result = IRQ_HANDLED;
+ }
+ }
+ spin_unlock(&denali->irq_lock);
+ return result;
+}
+#define BANK(x) ((x) << 24)
+
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+ unsigned long comp_res = 0;
+ uint32_t intr_status = 0;
+ bool retry = false;
+ unsigned long timeout = msecs_to_jiffies(1000);
+
+ do
+ {
+#if DEBUG_DENALI
+ printk("waiting for 0x%x\n", irq_mask);
+#endif
+ comp_res = wait_for_completion_timeout(&denali->complete, timeout);
+ spin_lock_irq(&denali->irq_lock);
+ intr_status = denali->irq_status;
+
+#if DEBUG_DENALI
+ denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status;
+ denali->idx %= 32;
+#endif
+
+ if (intr_status & irq_mask)
+ {
+ denali->irq_status &= ~irq_mask;
+ spin_unlock_irq(&denali->irq_lock);
+#if DEBUG_DENALI
+ if (retry) printk("status on retry = 0x%x\n", intr_status);
+#endif
+ /* our interrupt was detected */
+ break;
+ }
+ else
+ {
+ /* these are not the interrupts you are looking for -
+ need to wait again */
+ spin_unlock_irq(&denali->irq_lock);
+#if DEBUG_DENALI
+ print_irq_log(denali);
+ printk("received irq nobody cared: irq_status = 0x%x,"
+ " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res);
+#endif
+ retry = true;
+ }
+ } while (comp_res != 0);
+
+ if (comp_res == 0)
+ {
+ /* timeout */
+ printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
+ intr_status, irq_mask);
+
+ intr_status = 0;
+ }
+ return intr_status;
+}
+
+/* This helper function setups the registers for ECC and whether or not
+ the spare area will be transfered. */
+static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare)
+{
+ int ecc_en_flag = 0, transfer_spare_flag = 0;
+
+ /* set ECC, transfer spare bits if needed */
+ ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
+ transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
+
+ /* Enable spare area/ECC per user's request. */
+ denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
+ denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
+}
+
+/* sends a pipeline command operation to the controller. See the Denali NAND
+ controller's user guide for more information (section 4.2.3.6).
+ */
+static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en,
+ bool transfer_spare, int access_type,
+ int op)
+{
+ int status = PASS;
+ uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
+ irq_mask = 0;
+
+ if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP;
+ else if (op == DENALI_WRITE) irq_mask = 0;
+ else BUG();
+
+ setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4);
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+
+
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ addr = BANK(denali->flash_bank) | denali->page;
+
+ if (op == DENALI_WRITE && access_type != SPARE_ACCESS)
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
+ {
+ /* read spare area */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else if (op == DENALI_READ)
+ {
+ /* setup page read request for access type */
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, access_type);
+
+ /* page 33 of the NAND controller spec indicates we should not
+ use the pipeline commands in Spare area only mode. So we
+ don't.
+ */
+ if (access_type == SPARE_ACCESS)
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ else
+ {
+ index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "cmd, page, addr on timeout "
+ "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
+ status = FAIL;
+ }
+ else
+ {
+ cmd = MODE_01 | addr;
+ denali_write32(cmd, denali->flash_mem);
+ }
+ }
+ }
+ return status;
+}
+
+/* helper function that simply writes a buffer to the flash */
+static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* verify that the len is a multiple of 4. see comment in
+ * read_data_from_flash_mem() */
+ BUG_ON((len % 4) != 0);
+
+ /* write the data to the flash memory */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ {
+ denali_write32(*buf32++, denali->flash_mem + 0x10);
+ }
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* helper function that simply reads a buffer from the flash */
+static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf,
+ int len)
+{
+ uint32_t i = 0, *buf32;
+
+ /* we assume that len will be a multiple of 4, if not
+ * it would be nice to know about it ASAP rather than
+ * have random failures...
+ *
+ * This assumption is based on the fact that this
+ * function is designed to be used to read flash pages,
+ * which are typically multiples of 4...
+ */
+
+ BUG_ON((len % 4) != 0);
+
+ /* transfer the data from the flash */
+ buf32 = (uint32_t *)buf;
+ for (i = 0; i < len / 4; i++)
+ {
+ *buf32++ = ioread32(denali->flash_mem + 0x10);
+ }
+ return i*4; /* intent is to return the number of bytes read */
+}
+
+/* writes OOB data to the device */
+static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
+ INTR_STATUS0__PROGRAM_FAIL;
+ int status = 0;
+
+ denali->page = page;
+
+ if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
+ DENALI_WRITE) == PASS)
+ {
+ write_data_to_flash_mem(denali, buf, mtd->oobsize);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize;
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "OOB write failed\n");
+ status = -EIO;
+ }
+ }
+ else
+ {
+ printk(KERN_ERR "unable to send pipeline command\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* reads OOB data from the device */
+static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0;
+
+ denali->page = page;
+
+#if DEBUG_DENALI
+ printk("read_oob %d\n", page);
+#endif
+ if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
+ DENALI_READ) == PASS)
+ {
+ read_data_from_flash_mem(denali, buf, mtd->oobsize);
+
+ /* wait for command to be accepted
+ * can always use status0 bit as the mask is identical for each
+ * bank. */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
+ }
+
+ /* We set the device back to MAIN_ACCESS here as I observed
+ * instability with the controller if you do a block erase
+ * and the last transaction was a SPARE_ACCESS. Block erase
+ * is reliable (according to the MTD test infrastructure)
+ * if you are in MAIN_ACCESS.
+ */
+ addr = BANK(denali->flash_bank) | denali->page;
+ cmd = MODE_10 | addr;
+ index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
+
+#if DEBUG_DENALI
+ spin_lock_irq(&denali->irq_lock);
+ denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize;
+ denali->idx %= 32;
+ spin_unlock_irq(&denali->irq_lock);
+#endif
+ }
+}
+
+/* this function examines buffers to see if they contain data that
+ * indicate that the buffer is part of an erased region of flash.
+ */
+bool is_erased(uint8_t *buf, int len)
+{
+ int i = 0;
+ for (i = 0; i < len; i++)
+ {
+ if (buf[i] != 0xFF)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+#define ECC_SECTOR_SIZE 512
+
+#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
+#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
+#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
+#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
+#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
+#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
+
+static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
+ uint8_t *oobbuf, uint32_t irq_status)
+{
+ bool check_erased_page = false;
+
+ if (irq_status & INTR_STATUS0__ECC_ERR)
+ {
+ /* read the ECC errors. we'll ignore them for now */
+ uint32_t err_address = 0, err_correction_info = 0;
+ uint32_t err_byte = 0, err_sector = 0, err_device = 0;
+ uint32_t err_correction_value = 0;
+
+ do
+ {
+ err_address = ioread32(denali->flash_reg +
+ ECC_ERROR_ADDRESS);
+ err_sector = ECC_SECTOR(err_address);
+ err_byte = ECC_BYTE(err_address);
+
+
+ err_correction_info = ioread32(denali->flash_reg +
+ ERR_CORRECTION_INFO);
+ err_correction_value =
+ ECC_CORRECTION_VALUE(err_correction_info);
+ err_device = ECC_ERR_DEVICE(err_correction_info);
+
+ if (ECC_ERROR_CORRECTABLE(err_correction_info))
+ {
+ /* offset in our buffer is computed as:
+ sector number * sector size + offset in
+ sector
+ */
+ int offset = err_sector * ECC_SECTOR_SIZE +
+ err_byte;
+ if (offset < denali->mtd.writesize)
+ {
+ /* correct the ECC error */
+ buf[offset] ^= err_correction_value;
+ denali->mtd.ecc_stats.corrected++;
+ }
+ else
+ {
+ /* bummer, couldn't correct the error */
+ printk(KERN_ERR "ECC offset invalid\n");
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ else
+ {
+ /* if the error is not correctable, need to
+ * look at the page to see if it is an erased page.
+ * if so, then it's not a real ECC error */
+ check_erased_page = true;
+ }
+
+#if DEBUG_DENALI
+ printk("Detected ECC error in page %d: err_addr = 0x%08x,"
+ " info to fix is 0x%08x\n", denali->page, err_address,
+ err_correction_info);
+#endif
+ } while (!ECC_LAST_ERR(err_correction_info));
+ }
+ return check_erased_page;
+}
+
+/* programs the controller to either enable/disable DMA transfers */
+static void denali_enable_dma(struct denali_nand_info *denali, bool en)
+{
+ uint32_t reg_val = 0x0;
+
+ if (en) reg_val = DMA_ENABLE__FLAG;
+
+ denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
+ ioread32(denali->flash_reg + DMA_ENABLE);
+}
+
+/* setups the HW to perform the data DMA */
+static void denali_setup_dma(struct denali_nand_info *denali, int op)
+{
+ uint32_t mode = 0x0;
+ const int page_count = 1;
+ dma_addr_t addr = denali->buf.dma_buf;
+
+ mode = MODE_10 | BANK(denali->flash_bank);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes*/
+ index_addr(denali, mode | 0x14000, 0x2400);
+}
+
+/* writes a page. user specifies type, and this function handles the
+ configuration details. */
+static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, bool raw_xfer)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
+ INTR_STATUS0__PROGRAM_FAIL;
+
+ /* if it is a raw xfer, we want to disable ecc, and send
+ * the spare area.
+ * !raw_xfer - enable ecc
+ * raw_xfer - transfer spare
+ */
+ setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+
+ /* copy buffer into DMA buffer */
+ memcpy(denali->buf.buf, buf, mtd->writesize);
+
+ if (raw_xfer)
+ {
+ /* transfer the data to the spare area */
+ memcpy(denali->buf.buf + mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize);
+ }
+
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
+
+ clear_interrupts(denali);
+ denali_enable_dma(denali, true);
+
+ denali_setup_dma(denali, DENALI_WRITE);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ if (irq_status == 0)
+ {
+ printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
+ denali->status =
+ (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
+ PASS;
+ }
+
+ denali_enable_dma(denali, false);
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
+}
+
+/* NAND core entry points */
+
+/* this is the callback that the NAND core calls to write a page. Since
+ writing a page with ECC or without is similar, all the work is done
+ by write_page above. */
+static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for regular page writes, we let HW handle all the ECC
+ * data written to the device. */
+ write_page(mtd, chip, buf, false);
+}
+
+/* This is the callback that the NAND core calls to write a page without ECC.
+ raw access is similiar to ECC page writes, so all the work is done in the
+ write_page() function above.
+ */
+static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ /* for raw page writes, we want to disable ECC and simply write
+ whatever data is in the buffer. */
+ write_page(mtd, chip, buf, true);
+}
+
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return write_oob_data(mtd, chip->oob_poi, page);
+}
+
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ read_oob_data(mtd, chip->oob_poi, page);
+
+ return 0; /* notify NAND core to send command to
+ * NAND device. */
+}
+
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
+ INTR_STATUS0__ECC_ERR;
+ bool check_erased_page = false;
+
+ setup_ecc_for_xfer(denali, true, false);
+
+ denali_enable_dma(denali, true);
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+
+ check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
+ denali_enable_dma(denali, false);
+
+ if (check_erased_page)
+ {
+ read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
+
+ /* check ECC failures that may have occurred on erased pages */
+ if (check_erased_page)
+ {
+ if (!is_erased(buf, denali->mtd.writesize))
+ {
+ denali->mtd.ecc_stats.failed++;
+ }
+ if (!is_erased(buf, denali->mtd.oobsize))
+ {
+ denali->mtd.ecc_stats.failed++;
+ }
+ }
+ }
+ return 0;
+}
+
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct pci_dev *pci_dev = denali->dev;
+
+ dma_addr_t addr = denali->buf.dma_buf;
+ size_t size = denali->mtd.writesize + denali->mtd.oobsize;
+
+ uint32_t irq_status = 0;
+ uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
+
+ setup_ecc_for_xfer(denali, false, true);
+ denali_enable_dma(denali, true);
+
+ pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ clear_interrupts(denali);
+ denali_setup_dma(denali, DENALI_READ);
+
+ /* wait for operation to complete */
+ irq_status = wait_for_irq(denali, irq_mask);
+
+ pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
+
+ denali_enable_dma(denali, false);
+
+ memcpy(buf, denali->buf.buf, mtd->writesize);
+ memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
+
+ return 0;
+}
+
+static uint8_t denali_read_byte(struct mtd_info *mtd)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t result = 0xff;
+
+ if (denali->buf.head < denali->buf.tail)
+ {
+ result = denali->buf.buf[denali->buf.head++];
+ }
+
+#if DEBUG_DENALI
+ printk("read byte -> 0x%02x\n", result);
+#endif
+ return result;
+}
+
+static void denali_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+#if DEBUG_DENALI
+ printk("denali select chip %d\n", chip);
+#endif
+ spin_lock_irq(&denali->irq_lock);
+ denali->flash_bank = chip;
+ spin_unlock_irq(&denali->irq_lock);
+}
+
+static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int status = denali->status;
+ denali->status = 0;
+
+#if DEBUG_DENALI
+ printk("waitfunc %d\n", status);
+#endif
+ return status;
+}
+
+static void denali_erase(struct mtd_info *mtd, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ uint32_t cmd = 0x0, irq_status = 0;
+
+#if DEBUG_DENALI
+ printk("erase page: %d\n", page);
+#endif
+ /* clear interrupts */
+ clear_interrupts(denali);
+
+ /* setup page read request for access type */
+ cmd = MODE_10 | BANK(denali->flash_bank) | page;
+ index_addr(denali, (uint32_t)cmd, 0x1);
+
+ /* wait for erase to complete or failure to occur */
+ irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
+ INTR_STATUS0__ERASE_FAIL);
+
+ denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL :
+ PASS;
+}
+
+static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
+ int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+#if DEBUG_DENALI
+ printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
+#endif
+ switch (cmd)
+ {
+ case NAND_CMD_PAGEPROG:
+ break;
+ case NAND_CMD_STATUS:
+ read_status(denali);
+ break;
+ case NAND_CMD_READID:
+ reset_buf(denali);
+ if (denali->flash_bank < denali->total_used_banks)
+ {
+ /* write manufacturer information into nand
+ buffer for NAND subsystem to fetch.
+ */
+ write_byte_to_buf(denali, denali->dev_info.wDeviceMaker);
+ write_byte_to_buf(denali, denali->dev_info.wDeviceID);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
+ write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < 5; i++)
+ write_byte_to_buf(denali, 0xff);
+ }
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_SEQIN:
+ denali->page = page;
+ break;
+ case NAND_CMD_RESET:
+ reset_bank(denali);
+ break;
+ case NAND_CMD_READOOB:
+ /* TODO: Read OOB data */
+ break;
+ default:
+ printk(KERN_ERR ": unsupported command received 0x%x\n", cmd);
+ break;
+ }
+}
+
+/* stubs for ECC functions not used by the NAND core */
+static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
+ uint8_t *ecc_code)
+{
+ printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
+ BUG();
+ return -EIO;
+}
+
+static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
+ BUG();
+}
+/* end NAND core entry points */
+
+/* Initialization code to bring the device up to a known good state */
+static void denali_hw_init(struct denali_nand_info *denali)
+{
+ denali_irq_init(denali);
+ NAND_Flash_Reset(denali);
+ denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
+ denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+
+ denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
+ denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+
+ /* Should set value for these registers when init */
+ denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
+ denali_write32(1, denali->flash_reg + ECC_ENABLE);
+}
+
+/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
+#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
+static struct nand_ecclayout nand_oob_slc = {
+ .eccbytes = 4,
+ .eccpos = { 0, 1, 2, 3 }, /* not used */
+ .oobfree = {{
+ .offset = ECC_BYTES_SLC,
+ .length = 64 - ECC_BYTES_SLC
+ }}
+};
+
+#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
+static struct nand_ecclayout nand_oob_mlc_14bit = {
+ .eccbytes = 14,
+ .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
+ .oobfree = {{
+ .offset = ECC_BYTES_MLC,
+ .length = 64 - ECC_BYTES_MLC
+ }}
+};
+
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/* initalize driver data structures */
+void denali_drv_init(struct denali_nand_info *denali)
+{
+ denali->idx = 0;
+
+ /* setup interrupt handler */
+ /* the completion object will be used to notify
+ * the callee that the interrupt is done */
+ init_completion(&denali->complete);
+
+ /* the spinlock will be used to synchronize the ISR
+ * with any element that might be access shared
+ * data (interrupt status) */
+ spin_lock_init(&denali->irq_lock);
+
+ /* indicate that MTD has not selected a valid bank yet */
+ denali->flash_bank = CHIP_SELECT_INVALID;
+
+ /* initialize our irq_status variable to indicate no interrupts */
+ denali->irq_status = 0;
+}
+
+/* driver entry point */
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ int ret = -ENODEV;
+ resource_size_t csr_base, mem_base;
+ unsigned long csr_len, mem_len;
+ struct denali_nand_info *denali;
+
+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+ if (!denali)
+ return -ENOMEM;
+
+ ret = pci_enable_device(dev);
+ if (ret) {
+ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
+ goto failed_enable;
+ }
+
+ if (id->driver_data == INTEL_CE4100) {
+ /* Due to a silicon limitation, we can only support
+ * ONFI timing mode 1 and below.
+ */
+ if (onfi_timing_mode < -1 || onfi_timing_mode > 1)
+ {
+ printk("Intel CE4100 only supports ONFI timing mode 1 "
+ "or below\n");
+ ret = -EINVAL;
+ goto failed_enable;
+ }
+ denali->platform = INTEL_CE4100;
+ mem_base = pci_resource_start(dev, 0);
+ mem_len = pci_resource_len(dev, 1);
+ csr_base = pci_resource_start(dev, 1);
+ csr_len = pci_resource_len(dev, 1);
+ } else {
+ denali->platform = INTEL_MRST;
+ csr_base = pci_resource_start(dev, 0);
+ csr_len = pci_resource_start(dev, 0);
+ mem_base = pci_resource_start(dev, 1);
+ mem_len = pci_resource_len(dev, 1);
+ if (!mem_len) {
+ mem_base = csr_base + csr_len;
+ mem_len = csr_len;
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: No second BAR for PCI device; assuming %08Lx\n",
+ (uint64_t)csr_base);
+ }
+ }
+
+ /* Is 32-bit DMA supported? */
+ ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+
+ if (ret)
+ {
+ printk(KERN_ERR "Spectra: no usable DMA configuration\n");
+ goto failed_enable;
+ }
+ denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev, denali->buf.dma_buf))
+ {
+ printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
+ goto failed_enable;
+ }
+
+ pci_set_master(dev);
+ denali->dev = dev;
+
+ ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Unable to request memory regions\n");
+ goto failed_req_csr;
+ }
+
+ denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+ if (!denali->flash_reg) {
+ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
+ ret = -ENOMEM;
+ goto failed_remap_csr;
+ }
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
+ (uint64_t)csr_base, denali->flash_reg, csr_len);
+
+ denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+ if (!denali->flash_mem) {
+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
+ iounmap(denali->flash_reg);
+ ret = -ENOMEM;
+ goto failed_remap_csr;
+ }
+
+ nand_dbg_print(NAND_DBG_WARN,
+ "Spectra: Remapped flash base address: "
+ "0x%p, len: %ld\n",
+ denali->flash_mem, csr_len);
+
+ denali_hw_init(denali);
+ denali_drv_init(denali);
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
+ if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
+ DENALI_NAND_NAME, denali)) {
+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+ ret = -ENODEV;
+ goto failed_request_irq;
+ }
+
+ /* now that our ISR is registered, we can enable interrupts */
+ NAND_LLD_Enable_Disable_Interrupts(denali, true);
+
+ pci_set_drvdata(dev, denali);
+
+ NAND_Read_Device_ID(denali);
+
+ /* MTD supported page sizes vary by kernel. We validate our
+ kernel supports the device here.
+ */
+ if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
+ {
+ ret = -ENODEV;
+ printk(KERN_ERR "Spectra: device size not supported by this "
+ "version of MTD.");
+ goto failed_nand;
+ }
+
+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+ ioread32(denali->flash_reg + ACC_CLKS),
+ ioread32(denali->flash_reg + RE_2_WE),
+ ioread32(denali->flash_reg + WE_2_RE),
+ ioread32(denali->flash_reg + ADDR_2_DATA),
+ ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+ ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+ ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+ denali->mtd.name = "Denali NAND";
+ denali->mtd.owner = THIS_MODULE;
+ denali->mtd.priv = &denali->nand;
+
+ /* register the driver with the NAND core subsystem */
+ denali->nand.select_chip = denali_select_chip;
+ denali->nand.cmdfunc = denali_cmdfunc;
+ denali->nand.read_byte = denali_read_byte;
+ denali->nand.waitfunc = denali_waitfunc;
+
+ /* scan for NAND devices attached to the controller
+ * this is the first stage in a two step process to register
+ * with the nand subsystem */
+ if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL))
+ {
+ ret = -ENXIO;
+ goto failed_nand;
+ }
+
+ /* second stage of the NAND scan
+ * this stage requires information regarding ECC and
+ * bad block management. */
+
+ /* Bad block management */
+ denali->nand.bbt_td = &bbt_main_descr;
+ denali->nand.bbt_md = &bbt_mirror_descr;
+
+ /* skip the scan for now until we have OOB read and write support */
+ denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+ denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+
+ if (denali->dev_info.MLCDevice)
+ {
+ denali->nand.ecc.layout = &nand_oob_mlc_14bit;
+ denali->nand.ecc.bytes = ECC_BYTES_MLC;
+ }
+ else /* SLC */
+ {
+ denali->nand.ecc.layout = &nand_oob_slc;
+ denali->nand.ecc.bytes = ECC_BYTES_SLC;
+ }
+
+ /* These functions are required by the NAND core framework, otherwise,
+ the NAND core will assert. However, we don't need them, so we'll stub
+ them out. */
+ denali->nand.ecc.calculate = denali_ecc_calculate;
+ denali->nand.ecc.correct = denali_ecc_correct;
+ denali->nand.ecc.hwctl = denali_ecc_hwctl;
+
+ /* override the default read operations */
+ denali->nand.ecc.size = denali->mtd.writesize;
+ denali->nand.ecc.read_page = denali_read_page;
+ denali->nand.ecc.read_page_raw = denali_read_page_raw;
+ denali->nand.ecc.write_page = denali_write_page;
+ denali->nand.ecc.write_page_raw = denali_write_page_raw;
+ denali->nand.ecc.read_oob = denali_read_oob;
+ denali->nand.ecc.write_oob = denali_write_oob;
+ denali->nand.erase_cmd = denali_erase;
+
+ if (nand_scan_tail(&denali->mtd))
+ {
+ ret = -ENXIO;
+ goto failed_nand;
+ }
+
+ ret = add_mtd_device(&denali->mtd);
+ if (ret) {
+ printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret);
+ goto failed_nand;
+ }
+ return 0;
+
+ failed_nand:
+ denali_irq_cleanup(dev->irq, denali);
+ failed_request_irq:
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ failed_remap_csr:
+ pci_release_regions(dev);
+ failed_req_csr:
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ failed_enable:
+ kfree(denali);
+ return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+ struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+ __FILE__, __LINE__, __func__);
+
+ nand_release(&denali->mtd);
+ del_mtd_device(&denali->mtd);
+
+ denali_irq_cleanup(dev->irq, denali);
+
+ iounmap(denali->flash_reg);
+ iounmap(denali->flash_mem);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ pci_set_drvdata(dev, NULL);
+ kfree(denali);
+}
+
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static struct pci_driver denali_pci_driver = {
+ .name = DENALI_NAND_NAME,
+ .id_table = denali_pci_ids,
+ .probe = denali_pci_probe,
+ .remove = denali_pci_remove,
+};
+
+static int __devinit denali_init(void)
+{
+ printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
+ return pci_register_driver(&denali_pci_driver);
+}
+
+/* Free memory */
+static void __devexit denali_exit(void)
+{
+ pci_unregister_driver(&denali_pci_driver);
+}
+
+module_init(denali_init);
+module_exit(denali_exit);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
new file mode 100644
index 000000000000..422a29ab2f60
--- /dev/null
+++ b/drivers/mtd/nand/denali.h
@@ -0,0 +1,816 @@
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/mtd/nand.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK0 0x0001
+#define DEVICE_RESET__BANK1 0x0002
+#define DEVICE_RESET__BANK2 0x0004
+#define DEVICE_RESET__BANK3 0x0008
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG 0x0001
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE 0xffff
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE 0xffff
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE 0xffff
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE 0xffff
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK0 0x0001
+#define RB_PIN_ENABLED__BANK1 0x0002
+#define RB_PIN_ENABLED__BANK2 0x0004
+#define RB_PIN_ENABLED__BANK3 0x0008
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG 0x0001
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG 0x0001
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG 0x0001
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG 0x0001
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN 0x0001
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG 0x01
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG 0x0001
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG 0x01
+
+#define WE_2_RE 0x100
+#define WE_2_RE__VALUE 0x003f
+
+#define ADDR_2_DATA 0x110
+#define ADDR_2_DATA__VALUE 0x003f
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE 0x003f
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE 0x000f
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE 0x0007
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE 0xffff
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE 0x0003
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE 0x001f
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE 0x000f
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE 0x000f
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE 0x000f
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE 0x001f
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE 0x001f
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE 0x000f
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE 0x001f
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE 0xffff
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE 0x0007
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE 0x00ff
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG 0x0001
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE 0x003f
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE 0x00ff
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE 0x00ff
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE 0x00ff
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE 0x00ff
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE 0x00ff
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
+
+#define REVISION 0x370
+#define REVISION__VALUE 0xffff
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE 0x003f
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS 0x0003
+#define FEATURES__ECC_MAX_ERR 0x003c
+#define FEATURES__DMA 0x0040
+#define FEATURES__CMD_DMA 0x0080
+#define FEATURES__PARTITION 0x0100
+#define FEATURES__XDMA_SIDEBAND 0x0200
+#define FEATURES__GPREG 0x0400
+#define FEATURES__INDEX_ADDR 0x0800
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE 0x0003
+
+#define INTR_STATUS0 0x410
+#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS0__ECC_ERR 0x0002
+#define INTR_STATUS0__DMA_CMD_COMP 0x0004
+#define INTR_STATUS0__TIME_OUT 0x0008
+#define INTR_STATUS0__PROGRAM_FAIL 0x0010
+#define INTR_STATUS0__ERASE_FAIL 0x0020
+#define INTR_STATUS0__LOAD_COMP 0x0040
+#define INTR_STATUS0__PROGRAM_COMP 0x0080
+#define INTR_STATUS0__ERASE_COMP 0x0100
+#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS0__LOCKED_BLK 0x0400
+#define INTR_STATUS0__UNSUP_CMD 0x0800
+#define INTR_STATUS0__INT_ACT 0x1000
+#define INTR_STATUS0__RST_COMP 0x2000
+#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS0__PAGE_XFER_INC 0x8000
+
+#define INTR_EN0 0x420
+#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN0__ECC_ERR 0x0002
+#define INTR_EN0__DMA_CMD_COMP 0x0004
+#define INTR_EN0__TIME_OUT 0x0008
+#define INTR_EN0__PROGRAM_FAIL 0x0010
+#define INTR_EN0__ERASE_FAIL 0x0020
+#define INTR_EN0__LOAD_COMP 0x0040
+#define INTR_EN0__PROGRAM_COMP 0x0080
+#define INTR_EN0__ERASE_COMP 0x0100
+#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN0__LOCKED_BLK 0x0400
+#define INTR_EN0__UNSUP_CMD 0x0800
+#define INTR_EN0__INT_ACT 0x1000
+#define INTR_EN0__RST_COMP 0x2000
+#define INTR_EN0__PIPE_CMD_ERR 0x4000
+#define INTR_EN0__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT0 0x430
+#define PAGE_CNT0__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR0 0x440
+#define ERR_PAGE_ADDR0__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR0 0x450
+#define ERR_BLOCK_ADDR0__VALUE 0xffff
+
+#define INTR_STATUS1 0x460
+#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS1__ECC_ERR 0x0002
+#define INTR_STATUS1__DMA_CMD_COMP 0x0004
+#define INTR_STATUS1__TIME_OUT 0x0008
+#define INTR_STATUS1__PROGRAM_FAIL 0x0010
+#define INTR_STATUS1__ERASE_FAIL 0x0020
+#define INTR_STATUS1__LOAD_COMP 0x0040
+#define INTR_STATUS1__PROGRAM_COMP 0x0080
+#define INTR_STATUS1__ERASE_COMP 0x0100
+#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS1__LOCKED_BLK 0x0400
+#define INTR_STATUS1__UNSUP_CMD 0x0800
+#define INTR_STATUS1__INT_ACT 0x1000
+#define INTR_STATUS1__RST_COMP 0x2000
+#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS1__PAGE_XFER_INC 0x8000
+
+#define INTR_EN1 0x470
+#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN1__ECC_ERR 0x0002
+#define INTR_EN1__DMA_CMD_COMP 0x0004
+#define INTR_EN1__TIME_OUT 0x0008
+#define INTR_EN1__PROGRAM_FAIL 0x0010
+#define INTR_EN1__ERASE_FAIL 0x0020
+#define INTR_EN1__LOAD_COMP 0x0040
+#define INTR_EN1__PROGRAM_COMP 0x0080
+#define INTR_EN1__ERASE_COMP 0x0100
+#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN1__LOCKED_BLK 0x0400
+#define INTR_EN1__UNSUP_CMD 0x0800
+#define INTR_EN1__INT_ACT 0x1000
+#define INTR_EN1__RST_COMP 0x2000
+#define INTR_EN1__PIPE_CMD_ERR 0x4000
+#define INTR_EN1__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT1 0x480
+#define PAGE_CNT1__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR1 0x490
+#define ERR_PAGE_ADDR1__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR1 0x4a0
+#define ERR_BLOCK_ADDR1__VALUE 0xffff
+
+#define INTR_STATUS2 0x4b0
+#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS2__ECC_ERR 0x0002
+#define INTR_STATUS2__DMA_CMD_COMP 0x0004
+#define INTR_STATUS2__TIME_OUT 0x0008
+#define INTR_STATUS2__PROGRAM_FAIL 0x0010
+#define INTR_STATUS2__ERASE_FAIL 0x0020
+#define INTR_STATUS2__LOAD_COMP 0x0040
+#define INTR_STATUS2__PROGRAM_COMP 0x0080
+#define INTR_STATUS2__ERASE_COMP 0x0100
+#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS2__LOCKED_BLK 0x0400
+#define INTR_STATUS2__UNSUP_CMD 0x0800
+#define INTR_STATUS2__INT_ACT 0x1000
+#define INTR_STATUS2__RST_COMP 0x2000
+#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS2__PAGE_XFER_INC 0x8000
+
+#define INTR_EN2 0x4c0
+#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN2__ECC_ERR 0x0002
+#define INTR_EN2__DMA_CMD_COMP 0x0004
+#define INTR_EN2__TIME_OUT 0x0008
+#define INTR_EN2__PROGRAM_FAIL 0x0010
+#define INTR_EN2__ERASE_FAIL 0x0020
+#define INTR_EN2__LOAD_COMP 0x0040
+#define INTR_EN2__PROGRAM_COMP 0x0080
+#define INTR_EN2__ERASE_COMP 0x0100
+#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN2__LOCKED_BLK 0x0400
+#define INTR_EN2__UNSUP_CMD 0x0800
+#define INTR_EN2__INT_ACT 0x1000
+#define INTR_EN2__RST_COMP 0x2000
+#define INTR_EN2__PIPE_CMD_ERR 0x4000
+#define INTR_EN2__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT2 0x4d0
+#define PAGE_CNT2__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR2 0x4e0
+#define ERR_PAGE_ADDR2__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR2 0x4f0
+#define ERR_BLOCK_ADDR2__VALUE 0xffff
+
+#define INTR_STATUS3 0x500
+#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_STATUS3__ECC_ERR 0x0002
+#define INTR_STATUS3__DMA_CMD_COMP 0x0004
+#define INTR_STATUS3__TIME_OUT 0x0008
+#define INTR_STATUS3__PROGRAM_FAIL 0x0010
+#define INTR_STATUS3__ERASE_FAIL 0x0020
+#define INTR_STATUS3__LOAD_COMP 0x0040
+#define INTR_STATUS3__PROGRAM_COMP 0x0080
+#define INTR_STATUS3__ERASE_COMP 0x0100
+#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_STATUS3__LOCKED_BLK 0x0400
+#define INTR_STATUS3__UNSUP_CMD 0x0800
+#define INTR_STATUS3__INT_ACT 0x1000
+#define INTR_STATUS3__RST_COMP 0x2000
+#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
+#define INTR_STATUS3__PAGE_XFER_INC 0x8000
+
+#define INTR_EN3 0x510
+#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
+#define INTR_EN3__ECC_ERR 0x0002
+#define INTR_EN3__DMA_CMD_COMP 0x0004
+#define INTR_EN3__TIME_OUT 0x0008
+#define INTR_EN3__PROGRAM_FAIL 0x0010
+#define INTR_EN3__ERASE_FAIL 0x0020
+#define INTR_EN3__LOAD_COMP 0x0040
+#define INTR_EN3__PROGRAM_COMP 0x0080
+#define INTR_EN3__ERASE_COMP 0x0100
+#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
+#define INTR_EN3__LOCKED_BLK 0x0400
+#define INTR_EN3__UNSUP_CMD 0x0800
+#define INTR_EN3__INT_ACT 0x1000
+#define INTR_EN3__RST_COMP 0x2000
+#define INTR_EN3__PIPE_CMD_ERR 0x4000
+#define INTR_EN3__PAGE_XFER_INC 0x8000
+
+#define PAGE_CNT3 0x520
+#define PAGE_CNT3__VALUE 0x00ff
+
+#define ERR_PAGE_ADDR3 0x530
+#define ERR_PAGE_ADDR3__VALUE 0xffff
+
+#define ERR_BLOCK_ADDR3 0x540
+#define ERR_BLOCK_ADDR3__VALUE 0xffff
+
+#define DATA_INTR 0x550
+#define DATA_INTR__WRITE_SPACE_AV 0x0001
+#define DATA_INTR__READ_DATA_AV 0x0002
+
+#define DATA_INTR_EN 0x560
+#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
+#define DATA_INTR_EN__READ_DATA_AV 0x0002
+
+#define GPREG_0 0x570
+#define GPREG_0__VALUE 0xffff
+
+#define GPREG_1 0x580
+#define GPREG_1__VALUE 0xffff
+
+#define GPREG_2 0x590
+#define GPREG_2__VALUE 0xffff
+
+#define GPREG_3 0x5a0
+#define GPREG_3__VALUE 0xffff
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE 0x03ff
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
+#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
+#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
+#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
+#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
+#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG 0x0001
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG 0x0001
+
+#define DMA_INTR 0x720
+#define DMA_INTR__TARGET_ERROR 0x0001
+#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
+
+#define DMA_INTR_EN 0x730
+#define DMA_INTR_EN__TARGET_ERROR 0x0001
+#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
+#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
+#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
+#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
+#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE 0xffff
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE 0xffff
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 0x0001
+#define CHNL_ACTIVE__CHANNEL1 0x0002
+#define CHNL_ACTIVE__CHANNEL2 0x0004
+#define CHNL_ACTIVE__CHANNEL3 0x0008
+
+#define ACTIVE_SRC_ID 0x800
+#define ACTIVE_SRC_ID__VALUE 0x00ff
+
+#define PTN_INTR 0x810
+#define PTN_INTR__CONFIG_ERROR 0x0001
+#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR__REG_ACCESS_ERROR 0x0020
+
+#define PTN_INTR_EN 0x820
+#define PTN_INTR_EN__CONFIG_ERROR 0x0001
+#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
+#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
+#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
+#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
+#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
+
+#define PERM_SRC_ID_0 0x830
+#define PERM_SRC_ID_0__SRCID 0x00ff
+#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_0 0x840
+#define MIN_BLK_ADDR_0__VALUE 0xffff
+
+#define MAX_BLK_ADDR_0 0x850
+#define MAX_BLK_ADDR_0__VALUE 0xffff
+
+#define MIN_MAX_BANK_0 0x860
+#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_1 0x870
+#define PERM_SRC_ID_1__SRCID 0x00ff
+#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_1 0x880
+#define MIN_BLK_ADDR_1__VALUE 0xffff
+
+#define MAX_BLK_ADDR_1 0x890
+#define MAX_BLK_ADDR_1__VALUE 0xffff
+
+#define MIN_MAX_BANK_1 0x8a0
+#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_2 0x8b0
+#define PERM_SRC_ID_2__SRCID 0x00ff
+#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_2 0x8c0
+#define MIN_BLK_ADDR_2__VALUE 0xffff
+
+#define MAX_BLK_ADDR_2 0x8d0
+#define MAX_BLK_ADDR_2__VALUE 0xffff
+
+#define MIN_MAX_BANK_2 0x8e0
+#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_3 0x8f0
+#define PERM_SRC_ID_3__SRCID 0x00ff
+#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_3 0x900
+#define MIN_BLK_ADDR_3__VALUE 0xffff
+
+#define MAX_BLK_ADDR_3 0x910
+#define MAX_BLK_ADDR_3__VALUE 0xffff
+
+#define MIN_MAX_BANK_3 0x920
+#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_4 0x930
+#define PERM_SRC_ID_4__SRCID 0x00ff
+#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_4 0x940
+#define MIN_BLK_ADDR_4__VALUE 0xffff
+
+#define MAX_BLK_ADDR_4 0x950
+#define MAX_BLK_ADDR_4__VALUE 0xffff
+
+#define MIN_MAX_BANK_4 0x960
+#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_5 0x970
+#define PERM_SRC_ID_5__SRCID 0x00ff
+#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_5 0x980
+#define MIN_BLK_ADDR_5__VALUE 0xffff
+
+#define MAX_BLK_ADDR_5 0x990
+#define MAX_BLK_ADDR_5__VALUE 0xffff
+
+#define MIN_MAX_BANK_5 0x9a0
+#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_6 0x9b0
+#define PERM_SRC_ID_6__SRCID 0x00ff
+#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_6 0x9c0
+#define MIN_BLK_ADDR_6__VALUE 0xffff
+
+#define MAX_BLK_ADDR_6 0x9d0
+#define MAX_BLK_ADDR_6__VALUE 0xffff
+
+#define MIN_MAX_BANK_6 0x9e0
+#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
+
+#define PERM_SRC_ID_7 0x9f0
+#define PERM_SRC_ID_7__SRCID 0x00ff
+#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
+#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
+#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
+#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
+
+#define MIN_BLK_ADDR_7 0xa00
+#define MIN_BLK_ADDR_7__VALUE 0xffff
+
+#define MAX_BLK_ADDR_7 0xa10
+#define MAX_BLK_ADDR_7__VALUE 0xffff
+
+#define MIN_MAX_BANK_7 0xa20
+#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
+#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
+
+/* flash.h */
+struct device_info_tag {
+ uint16_t wDeviceMaker;
+ uint16_t wDeviceID;
+ uint8_t bDeviceParam0;
+ uint8_t bDeviceParam1;
+ uint8_t bDeviceParam2;
+ uint32_t wDeviceType;
+ uint32_t wSpectraStartBlock;
+ uint32_t wSpectraEndBlock;
+ uint32_t wTotalBlocks;
+ uint16_t wPagesPerBlock;
+ uint16_t wPageSize;
+ uint16_t wPageDataSize;
+ uint16_t wPageSpareSize;
+ uint16_t wNumPageSpareFlag;
+ uint16_t wECCBytesPerSector;
+ uint32_t wBlockSize;
+ uint32_t wBlockDataSize;
+ uint32_t wDataBlockNum;
+ uint8_t bPlaneNum;
+ uint16_t wDeviceMainAreaSize;
+ uint16_t wDeviceSpareAreaSize;
+ uint16_t wDevicesConnected;
+ uint16_t wDeviceWidth;
+ uint16_t wHWRevision;
+ uint16_t wHWFeatures;
+
+ uint16_t wONFIDevFeatures;
+ uint16_t wONFIOptCommands;
+ uint16_t wONFITimingMode;
+ uint16_t wONFIPgmCacheTimingMode;
+
+ uint16_t MLCDevice;
+ uint16_t wSpareSkipBytes;
+
+ uint8_t nBitsInPageNumber;
+ uint8_t nBitsInPageDataSize;
+ uint8_t nBitsInBlockDataSize;
+};
+
+/* ffsdefs.h */
+#define CLEAR 0 /*use this to clear a field instead of "fail"*/
+#define SET 1 /*use this to set a field instead of "pass"*/
+#define FAIL 1 /*failed flag*/
+#define PASS 0 /*success flag*/
+#define ERR -1 /*error flag*/
+
+/* lld.h */
+#define GOOD_BLOCK 0
+#define DEFECTIVE_BLOCK 1
+#define READ_ERROR 2
+
+#define CLK_X 5
+#define CLK_MULTI 4
+
+/* ffsport.h */
+#define VERBOSE 1
+
+#define NAND_DBG_WARN 1
+#define NAND_DBG_DEBUG 2
+#define NAND_DBG_TRACE 3
+
+#ifdef VERBOSE
+#define nand_dbg_print(level, args...) \
+ do { \
+ if (level <= nand_debug_level) \
+ printk(KERN_ALERT args); \
+ } while (0)
+#else
+#define nand_dbg_print(level, args...)
+#endif
+
+
+/* spectraswconfig.h */
+#define CMD_DMA 0
+
+#define SPECTRA_PARTITION_ID 0
+/**** Block Table and Reserved Block Parameters *****/
+#define SPECTRA_START_BLOCK 3
+#define NUM_FREE_BLOCKS_GATE 30
+
+/* KBV - Updated to LNW scratch register address */
+#define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
+#define SCRATCH_REG_SIZE 64
+
+#define GLOB_HWCTL_DEFAULT_BLKS 2048
+
+#define SUPPORT_15BITECC 1
+#define SUPPORT_8BITECC 1
+
+#define CUSTOM_CONF_PARAMS 0
+
+#define ONFI_BLOOM_TIME 1
+#define MODE5_WORKAROUND 0
+
+/* lld_nand.h */
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LLD_NAND_
+#define _LLD_NAND_
+
+#define MODE_00 0x00000000
+#define MODE_01 0x04000000
+#define MODE_10 0x08000000
+#define MODE_11 0x0C000000
+
+
+#define DATA_TRANSFER_MODE 0
+#define PROTECTION_PER_BLOCK 1
+#define LOAD_WAIT_COUNT 2
+#define PROGRAM_WAIT_COUNT 3
+#define ERASE_WAIT_COUNT 4
+#define INT_MONITOR_CYCLE_COUNT 5
+#define READ_BUSY_PIN_ENABLED 6
+#define MULTIPLANE_OPERATION_SUPPORT 7
+#define PRE_FETCH_MODE 8
+#define CE_DONT_CARE_SUPPORT 9
+#define COPYBACK_SUPPORT 10
+#define CACHE_WRITE_SUPPORT 11
+#define CACHE_READ_SUPPORT 12
+#define NUM_PAGES_IN_BLOCK 13
+#define ECC_ENABLE_SELECT 14
+#define WRITE_ENABLE_2_READ_ENABLE 15
+#define ADDRESS_2_DATA 16
+#define READ_ENABLE_2_WRITE_ENABLE 17
+#define TWO_ROW_ADDRESS_CYCLES 18
+#define MULTIPLANE_ADDRESS_RESTRICT 19
+#define ACC_CLOCKS 20
+#define READ_WRITE_ENABLE_LOW_COUNT 21
+#define READ_WRITE_ENABLE_HIGH_COUNT 22
+
+#define ECC_SECTOR_SIZE 512
+#define LLD_MAX_FLASH_BANKS 4
+
+#define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE
+
+struct nand_buf
+{
+ int head;
+ int tail;
+ uint8_t buf[DENALI_BUF_SIZE];
+ dma_addr_t dma_buf;
+};
+
+#define INTEL_CE4100 1
+#define INTEL_MRST 2
+
+struct denali_nand_info {
+ struct mtd_info mtd;
+ struct nand_chip nand;
+ struct device_info_tag dev_info;
+ int flash_bank; /* currently selected chip */
+ int status;
+ int platform;
+ struct nand_buf buf;
+ struct pci_dev *dev;
+ int total_used_banks;
+ uint32_t block; /* stored for future use */
+ uint16_t page;
+ void __iomem *flash_reg; /* Mapped io reg base address */
+ void __iomem *flash_mem; /* Mapped io reg base address */
+
+ /* elements used by ISR */
+ struct completion complete;
+ spinlock_t irq_lock;
+ uint32_t irq_status;
+ int irq_debug_array[32];
+ int idx;
+};
+
+static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali);
+static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali);
+static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE);
+
+#endif /*_LLD_NAND_*/
+
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ae30fb6eed97..5084cc517944 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
priv->ctrl = ctrl;
priv->dev = ctrl->dev;
- priv->vbase = ioremap(res.start, res.end - res.start + 1);
+ priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(ctrl->dev, "failed to map chip region\n");
ret = -ENOMEM;
@@ -891,7 +891,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
if (ret)
goto err;
- ret = nand_scan_ident(&priv->mtd, 1);
+ ret = nand_scan_ident(&priv->mtd, 1, NULL);
if (ret)
goto err;
@@ -1030,14 +1030,14 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
init_waitqueue_head(&ctrl->controller.wq);
init_waitqueue_head(&ctrl->irq_wait);
- ctrl->regs = of_iomap(ofdev->node, 0);
+ ctrl->regs = of_iomap(ofdev->dev.of_node, 0);
if (!ctrl->regs) {
dev_err(&ofdev->dev, "failed to get memory region\n");
ret = -ENODEV;
goto err;
}
- ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL);
+ ctrl->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (ctrl->irq == NO_IRQ) {
dev_err(&ofdev->dev, "failed to get irq resource\n");
ret = -ENODEV;
@@ -1058,7 +1058,7 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
goto err;
}
- for_each_child_of_node(ofdev->node, child)
+ for_each_child_of_node(ofdev->dev.of_node, child)
if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
fsl_elbc_chip_probe(ctrl, child);
@@ -1078,9 +1078,10 @@ static const struct of_device_id fsl_elbc_match[] = {
static struct of_platform_driver fsl_elbc_ctrl_driver = {
.driver = {
- .name = "fsl-elbc",
+ .name = "fsl-elbc",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_elbc_match,
},
- .match_table = fsl_elbc_match,
.probe = fsl_elbc_ctrl_probe,
.remove = fsl_elbc_ctrl_remove,
};
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 4b96296af321..00aea6f7d1f1 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -49,7 +49,10 @@ struct fsl_upm_nand {
uint32_t wait_flags;
};
-#define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd)
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct fsl_upm_nand, mtd);
+}
static int fun_chip_ready(struct mtd_info *mtd)
{
@@ -303,7 +306,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
FSL_UPM_WAIT_WRITE_BYTE;
fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
- io_res.end - io_res.start + 1);
+ resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
goto err2;
@@ -350,15 +353,18 @@ static int __devexit fun_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id of_fun_match[] = {
+static const struct of_device_id of_fun_match[] = {
{ .compatible = "fsl,upm-nand" },
{},
};
MODULE_DEVICE_TABLE(of, of_fun_match);
static struct of_platform_driver of_fun_driver = {
- .name = "fsl,upm-nand",
- .match_table = of_fun_match,
+ .driver = {
+ .name = "fsl,upm-nand",
+ .owner = THIS_MODULE,
+ .of_match_table = of_fun_match,
+ },
.probe = fun_probe,
.remove = __devexit_p(fun_remove),
};
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8f902e75aa85..0cde618bcc1e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
iounmap(gpiomtd->io_sync);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
{
void __iomem *ptr;
- if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
+ if (!request_mem_region(res->start, resource_size(res), name)) {
*err = -EBUSY;
return NULL;
}
ptr = ioremap(res->start, size);
if (!ptr) {
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
*err = -ENOMEM;
}
return ptr;
@@ -338,10 +338,10 @@ err_nwp:
err_nce:
iounmap(gpiomtd->io_sync);
if (res1)
- release_mem_region(res1->start, res1->end - res1->start + 1);
+ release_mem_region(res1->start, resource_size(res1));
err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res0->start, res0->end - res0->start + 1);
+ release_mem_region(res0->start, resource_size(res0));
err_map:
kfree(gpiomtd);
return ret;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
new file mode 100644
index 000000000000..3d0867d829cb
--- /dev/null
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS 8
+#define NFC_SPARE_LEN 0x40
+#define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR 0x1E04
+#define NFC_FLASH_ADDR 0x1E06
+#define NFC_FLASH_CMD 0x1E08
+#define NFC_CONFIG 0x1E0A
+#define NFC_ECC_STATUS1 0x1E0C
+#define NFC_ECC_STATUS2 0x1E0E
+#define NFC_SPAS 0x1E10
+#define NFC_WRPROT 0x1E12
+#define NFC_NF_WRPRST 0x1E18
+#define NFC_CONFIG1 0x1E1A
+#define NFC_CONFIG2 0x1E1C
+#define NFC_UNLOCKSTART_BLK0 0x1E20
+#define NFC_UNLOCKEND_BLK0 0x1E22
+#define NFC_UNLOCKSTART_BLK1 0x1E24
+#define NFC_UNLOCKEND_BLK1 0x1E26
+#define NFC_UNLOCKSTART_BLK2 0x1E28
+#define NFC_UNLOCKEND_BLK2 0x1E2A
+#define NFC_UNLOCKSTART_BLK3 0x1E2C
+#define NFC_UNLOCKEND_BLK3 0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK (7 << 0)
+#define NFC_ACTIVE_CS_SHIFT 5
+#define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED (1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT (1 << 0)
+#define NFC_FULL_PAGE_DMA (1 << 1)
+#define NFC_SPARE_ONLY (1 << 2)
+#define NFC_ECC_ENABLE (1 << 3)
+#define NFC_INT_MASK (1 << 4)
+#define NFC_BIG_ENDIAN (1 << 5)
+#define NFC_RESET (1 << 6)
+#define NFC_CE (1 << 7)
+#define NFC_ONE_CYCLE (1 << 8)
+#define NFC_PPB_32 (0 << 9)
+#define NFC_PPB_64 (1 << 9)
+#define NFC_PPB_128 (2 << 9)
+#define NFC_PPB_256 (3 << 9)
+#define NFC_PPB_MASK (3 << 9)
+#define NFC_FULL_PAGE_INT (1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND (1 << 0)
+#define NFC_ADDRESS (1 << 1)
+#define NFC_INPUT (1 << 2)
+#define NFC_OUTPUT (1 << 3)
+#define NFC_ID (1 << 4)
+#define NFC_STATUS (1 << 5)
+#define NFC_CMD_FAIL (1 << 15)
+#define NFC_INT (1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT (1 << 0)
+#define NFC_WPC_LOCK (1 << 1)
+#define NFC_WPC_UNLOCK (1 << 2)
+
+#define DRV_NAME "mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT 1000 /* 1 ms */
+#define NFC_TIMEOUT (HZ / 10) /* 1/10 s */
+
+struct mpc5121_nfc_prv {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ wait_queue_head_t irq_waitq;
+ uint column;
+ int spareonly;
+ void __iomem *csreg;
+ struct device *dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
+#endif
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+ nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+ nfc_write(mtd, NFC_FLASH_ADDR, addr);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+ nfc_write(mtd, NFC_FLASH_CMD, cmd);
+ nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+ mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+ nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+ mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+ struct mtd_info *mtd = data;
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ wake_up(&prv->irq_waitq);
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ int rv;
+
+ if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+ rv = wait_event_timeout(prv->irq_waitq,
+ (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+ if (!rv)
+ dev_warn(prv->dev,
+ "Timeout while waiting for interrupt.\n");
+ }
+
+ nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ u32 pagemask = chip->pagemask;
+
+ if (column != -1) {
+ mpc5121_nfc_send_addr(mtd, column);
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_addr(mtd, column >> 8);
+ }
+
+ if (page != -1) {
+ do {
+ mpc5121_nfc_send_addr(mtd, page & 0xFF);
+ page >>= 8;
+ pagemask >>= 8;
+ } while (pagemask);
+ }
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ if (chip < 0) {
+ nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+ return;
+ }
+
+ nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+ NFC_ACTIVE_CS_MASK);
+ nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+ if (dn) {
+ prv->csreg = of_iomap(dn, 0);
+ of_node_put(dn);
+ if (!prv->csreg)
+ return -ENOMEM;
+
+ /* CPLD Register 9 controls NAND /CE Lines */
+ prv->csreg += 9;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ u8 v;
+
+ v = in_8(prv->csreg);
+ v |= 0x0F;
+
+ if (chip >= 0) {
+ mpc5121_nfc_select_chip(mtd, 0);
+ v &= ~(1 << chip);
+ } else
+ mpc5121_nfc_select_chip(mtd, -1);
+
+ out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles ready/busy signal internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ prv->column = (column >= 0) ? column : 0;
+ prv->spareonly = 0;
+
+ switch (command) {
+ case NAND_CMD_PAGEPROG:
+ mpc5121_nfc_send_prog_page(mtd);
+ break;
+ /*
+ * NFC does not support sub-page reads and writes,
+ * so emulate them using full page transfers.
+ */
+ case NAND_CMD_READ0:
+ column = 0;
+ break;
+
+ case NAND_CMD_READ1:
+ prv->column += 256;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_READOOB:
+ prv->spareonly = 1;
+ command = NAND_CMD_READ0;
+ column = 0;
+ break;
+
+ case NAND_CMD_SEQIN:
+ mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ column = 0;
+ break;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_READID:
+ case NAND_CMD_STATUS:
+ break;
+
+ default:
+ return;
+ }
+
+ mpc5121_nfc_send_cmd(mtd, command);
+ mpc5121_nfc_addr_cycle(mtd, column, page);
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ if (mtd->writesize > 512)
+ mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+ mpc5121_nfc_send_read_page(mtd);
+ break;
+
+ case NAND_CMD_READID:
+ mpc5121_nfc_send_read_id(mtd);
+ break;
+
+ case NAND_CMD_STATUS:
+ mpc5121_nfc_send_read_status(mtd);
+ if (chip->options & NAND_BUSWIDTH_16)
+ prv->column = 1;
+ else
+ prv->column = 0;
+ break;
+ }
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+ u8 *buffer, uint size, int wr)
+{
+ struct nand_chip *nand = mtd->priv;
+ struct mpc5121_nfc_prv *prv = nand->priv;
+ uint o, s, sbsize, blksize;
+
+ /*
+ * NAND spare area is available through NFC spare buffers.
+ * The NFC divides spare area into (page_size / 512) chunks.
+ * Each chunk is placed into separate spare memory area, using
+ * first (spare_size / num_of_chunks) bytes of the buffer.
+ *
+ * For NAND device in which the spare area is not divided fully
+ * by the number of chunks, number of used bytes in each spare
+ * buffer is rounded down to the nearest even number of bytes,
+ * and all remaining bytes are added to the last used spare area.
+ *
+ * For more information read section 26.6.10 of MPC5121e
+ * Microcontroller Reference Manual, Rev. 3.
+ */
+
+ /* Calculate number of valid bytes in each spare buffer */
+ sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+ while (size) {
+ /* Calculate spare buffer number */
+ s = offset / sbsize;
+ if (s > NFC_SPARE_BUFFERS - 1)
+ s = NFC_SPARE_BUFFERS - 1;
+
+ /*
+ * Calculate offset to requested data block in selected spare
+ * buffer and its size.
+ */
+ o = offset - (s * sbsize);
+ blksize = min(sbsize - o, size);
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+ buffer, blksize);
+ else
+ memcpy_fromio(buffer,
+ prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+ buffer += blksize;
+ offset += blksize;
+ size -= blksize;
+ };
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+ int wr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ uint c = prv->column;
+ uint l;
+
+ /* Handle spare area access */
+ if (prv->spareonly || c >= mtd->writesize) {
+ /* Calculate offset from beginning of spare area */
+ if (c >= mtd->writesize)
+ c -= mtd->writesize;
+
+ prv->column += len;
+ mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+ return;
+ }
+
+ /*
+ * Handle main area access - limit copy length to prevent
+ * crossing main/spare boundary.
+ */
+ l = min((uint)len, mtd->writesize - c);
+ prv->column += l;
+
+ if (wr)
+ memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+ else
+ memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+ /* Handle crossing main/spare boundary */
+ if (l != len) {
+ buf += l;
+ len -= l;
+ mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+ }
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+}
+
+/* Compare buffer with NAND flash */
+static int mpc5121_nfc_verify_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ u_char tmp[256];
+ uint bsize;
+
+ while (len) {
+ bsize = min(len, 256);
+ mpc5121_nfc_read_buf(mtd, tmp, bsize);
+
+ if (memcmp(buf, tmp, bsize))
+ return 1;
+
+ buf += bsize;
+ len -= bsize;
+ }
+
+ return 0;
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+ u8 tmp;
+
+ mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+
+ return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+ struct mpc512x_reset_module *rm;
+ struct device_node *rmnode;
+ uint rcw_pagesize = 0;
+ uint rcw_sparesize = 0;
+ uint rcw_width;
+ uint rcwh;
+ uint romloc, ps;
+
+ rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+ if (!rmnode) {
+ dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+ "node in device tree!\n");
+ return -ENODEV;
+ }
+
+ rm = of_iomap(rmnode, 0);
+ if (!rm) {
+ dev_err(prv->dev, "Error mapping reset module node!\n");
+ return -EBUSY;
+ }
+
+ rcwh = in_be32(&rm->rcwhr);
+
+ /* Bit 6: NFC bus width */
+ rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+ /* Bit 7: NFC Page/Spare size */
+ ps = (rcwh >> 7) & 0x1;
+
+ /* Bits [22:21]: ROM Location */
+ romloc = (rcwh >> 21) & 0x3;
+
+ /* Decode RCW bits */
+ switch ((ps << 2) | romloc) {
+ case 0x00:
+ case 0x01:
+ rcw_pagesize = 512;
+ rcw_sparesize = 16;
+ break;
+ case 0x02:
+ case 0x03:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 128;
+ break;
+ case 0x04:
+ case 0x05:
+ rcw_pagesize = 2048;
+ rcw_sparesize = 64;
+ break;
+ case 0x06:
+ case 0x07:
+ rcw_pagesize = 4096;
+ rcw_sparesize = 218;
+ break;
+ }
+
+ mtd->writesize = rcw_pagesize;
+ mtd->oobsize = rcw_sparesize;
+ if (rcw_width == 2)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ dev_notice(prv->dev, "Configured for "
+ "%u-bit NAND, page size %u "
+ "with %u spare.\n",
+ rcw_width * 8, rcw_pagesize,
+ rcw_sparesize);
+ iounmap(rm);
+ of_node_put(rmnode);
+ return 0;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ if (prv->clk) {
+ clk_disable(prv->clk);
+ clk_put(prv->clk);
+ }
+
+ if (prv->csreg)
+ iounmap(prv->csreg);
+}
+
+static int __devinit mpc5121_nfc_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *rootnode, *dn = op->node;
+ struct device *dev = &op->dev;
+ struct mpc5121_nfc_prv *prv;
+ struct resource res;
+ struct mtd_info *mtd;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
+ struct nand_chip *chip;
+ unsigned long regs_paddr, regs_size;
+ const uint *chips_no;
+ int resettime = 0;
+ int retval = 0;
+ int rev, len;
+
+ /*
+ * Check SoC revision. This driver supports only NFC
+ * in MPC5121 revision 2 and MPC5123 revision 3.
+ */
+ rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+ if ((rev != 2) && (rev != 3)) {
+ dev_err(dev, "SoC revision %u is not supported!\n", rev);
+ return -ENXIO;
+ }
+
+ prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+ if (!prv) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mtd = &prv->mtd;
+ chip = &prv->chip;
+
+ mtd->priv = chip;
+ chip->priv = prv;
+ prv->dev = dev;
+
+ /* Read NFC configuration from Reset Config Word */
+ retval = mpc5121_nfc_read_hw_config(mtd);
+ if (retval) {
+ dev_err(dev, "Unable to read NFC config!\n");
+ return retval;
+ }
+
+ prv->irq = irq_of_parse_and_map(dn, 0);
+ if (prv->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ chips_no = of_get_property(dn, "chips", &len);
+ if (!chips_no || len != sizeof(*chips_no)) {
+ dev_err(dev, "Invalid/missing 'chips' property!\n");
+ return -EINVAL;
+ }
+
+ regs_paddr = res.start;
+ regs_size = res.end - res.start + 1;
+
+ if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+ if (!prv->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mtd->name = "MPC5121 NAND";
+ chip->dev_ready = mpc5121_nfc_dev_ready;
+ chip->cmdfunc = mpc5121_nfc_command;
+ chip->read_byte = mpc5121_nfc_read_byte;
+ chip->read_word = mpc5121_nfc_read_word;
+ chip->read_buf = mpc5121_nfc_read_buf;
+ chip->write_buf = mpc5121_nfc_write_buf;
+ chip->verify_buf = mpc5121_nfc_verify_buf;
+ chip->select_chip = mpc5121_nfc_select_chip;
+ chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Support external chip-select logic on ADS5121 board */
+ rootnode = of_find_node_by_path("/");
+ if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ retval = ads5121_chipselect_init(mtd);
+ if (retval) {
+ dev_err(dev, "Chipselect init error!\n");
+ of_node_put(rootnode);
+ return retval;
+ }
+
+ chip->select_chip = ads5121_select_chip;
+ }
+ of_node_put(rootnode);
+
+ /* Enable NFC clock */
+ prv->clk = clk_get(dev, "nfc_clk");
+ if (!prv->clk) {
+ dev_err(dev, "Unable to acquire NFC clock!\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ clk_enable(prv->clk);
+
+ /* Reset NAND Flash controller */
+ nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+ while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+ if (resettime++ >= NFC_RESET_TIMEOUT) {
+ dev_err(dev, "Timeout while resetting NFC!\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
+ udelay(1);
+ }
+
+ /* Enable write to NFC memory */
+ nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+ /* Enable write to all NAND pages */
+ nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+ nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+ nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+ /*
+ * Setup NFC:
+ * - Big Endian transfers,
+ * - Interrupt after full page read/write.
+ */
+ nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+ NFC_FULL_PAGE_INT);
+
+ /* Set spare area size */
+ nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+ init_waitqueue_head(&prv->irq_waitq);
+ retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+ mtd);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ goto error;
+ }
+
+ /* Detect NAND chips */
+ if (nand_scan(mtd, *chips_no)) {
+ dev_err(dev, "NAND Flash not found !\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ /* Set erase block size */
+ switch (mtd->erasesize / mtd->writesize) {
+ case 32:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+ break;
+
+ case 64:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+ break;
+
+ case 128:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+ break;
+
+ case 256:
+ nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported NAND flash!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -ENXIO;
+ goto error;
+ }
+
+ dev_set_drvdata(dev, mtd);
+
+ /* Register device in MTD */
+#ifdef CONFIG_MTD_PARTITIONS
+ retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
+#ifdef CONFIG_MTD_OF_PARTS
+ if (retval == 0)
+ retval = of_mtd_parse_partitions(dev, dn, &parts);
+#endif
+ if (retval < 0) {
+ dev_err(dev, "Error parsing MTD partitions!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ retval = -EINVAL;
+ goto error;
+ }
+
+ if (retval > 0)
+ retval = add_mtd_partitions(mtd, parts, retval);
+ else
+#endif
+ retval = add_mtd_device(mtd);
+
+ if (retval) {
+ dev_err(dev, "Error adding MTD device!\n");
+ devm_free_irq(dev, prv->irq, mtd);
+ goto error;
+ }
+
+ return 0;
+error:
+ mpc5121_nfc_free(dev, mtd);
+ return retval;
+}
+
+static int __devexit mpc5121_nfc_remove(struct of_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct nand_chip *chip = mtd->priv;
+ struct mpc5121_nfc_prv *prv = chip->priv;
+
+ nand_release(mtd);
+ devm_free_irq(dev, prv->irq, mtd);
+ mpc5121_nfc_free(dev, mtd);
+
+ return 0;
+}
+
+static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
+ { .compatible = "fsl,mpc5121-nfc", },
+ {},
+};
+
+static struct of_platform_driver mpc5121_nfc_driver = {
+ .match_table = mpc5121_nfc_match,
+ .probe = mpc5121_nfc_probe,
+ .remove = __devexit_p(mpc5121_nfc_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc5121_nfc_init(void)
+{
+ return of_register_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_init(mpc5121_nfc_init);
+
+static void __exit mpc5121_nfc_cleanup(void)
+{
+ of_unregister_platform_driver(&mpc5121_nfc_driver);
+}
+
+module_exit(mpc5121_nfc_cleanup);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index b2900d8406d3..82e94389824e 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -38,7 +38,7 @@
#define DRIVER_NAME "mxc_nand"
#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
-#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
+#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
/* Addresses for NFC registers */
#define NFC_BUF_SIZE 0xE00
@@ -168,11 +168,7 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
{
struct mxc_nand_host *host = dev_id;
- uint16_t tmp;
-
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK; /* Disable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ disable_irq_nosync(irq);
wake_up(&host->irq_waitq);
@@ -184,15 +180,13 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
*/
static void wait_op_done(struct mxc_nand_host *host, int useirq)
{
- uint32_t tmp;
- int max_retries = 2000;
+ uint16_t tmp;
+ int max_retries = 8000;
if (useirq) {
if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_INT_MSK; /* Enable interrupt */
- writew(tmp, host->regs + NFC_CONFIG1);
+ enable_irq(host->irq);
wait_event(host->irq_waitq,
readw(host->regs + NFC_CONFIG2) & NFC_INT);
@@ -226,8 +220,23 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(cmd, host->regs + NFC_FLASH_CMD);
writew(NFC_CMD, host->regs + NFC_CONFIG2);
- /* Wait for operation to complete */
- wait_op_done(host, useirq);
+ if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ int max_retries = 100;
+ /* Reset completion is indicated by NFC_CONFIG2 */
+ /* being set to 0 */
+ while (max_retries-- > 0) {
+ if (readw(host->regs + NFC_CONFIG2) == 0) {
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0)
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
+ __func__);
+ } else {
+ /* Wait for operation to complete */
+ wait_op_done(host, useirq);
+ }
}
/* This function sends an address (or partial address) to the
@@ -542,6 +551,41 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
}
}
+static void preset(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t tmp;
+
+ /* enable interrupt, disable spare enable */
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_INT_MSK;
+ tmp &= ~NFC_SP_EN;
+ if (nand_chip->ecc.mode == NAND_ECC_HW) {
+ tmp |= NFC_ECC_EN;
+ } else {
+ tmp &= ~NFC_ECC_EN;
+ }
+ writew(tmp, host->regs + NFC_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, host->regs + NFC_CONFIG);
+
+ /* Blocks to be unlocked */
+ if (nfc_is_v21()) {
+ writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
+ writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
+ } else if (nfc_is_v1()) {
+ writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
+ } else
+ BUG();
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, host->regs + NFC_WRPROT);
+}
+
/* Used by the upper layer to write command to NAND Flash for
* different operations to be carried out on NAND Flash */
static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
@@ -559,6 +603,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
+ case NAND_CMD_RESET:
+ send_cmd(host, command, false);
+ preset(mtd);
+ break;
case NAND_CMD_STATUS:
host->buf_start = 0;
@@ -679,7 +727,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
- uint16_t tmp;
int err = 0, nr_parts = 0;
struct nand_ecclayout *oob_smallpage, *oob_largepage;
@@ -743,51 +790,17 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->spare_len = 64;
oob_smallpage = &nandv2_hw_eccoob_smallpage;
oob_largepage = &nandv2_hw_eccoob_largepage;
+ this->ecc.bytes = 9;
} else if (nfc_is_v1()) {
host->regs = host->base;
host->spare0 = host->base + 0x800;
host->spare_len = 16;
oob_smallpage = &nandv1_hw_eccoob_smallpage;
oob_largepage = &nandv1_hw_eccoob_largepage;
- } else
- BUG();
-
- /* disable interrupt and spare enable */
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_INT_MSK;
- tmp &= ~NFC_SP_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
-
- init_waitqueue_head(&host->irq_waitq);
-
- host->irq = platform_get_irq(pdev, 0);
-
- err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
- if (err)
- goto eirq;
-
- /* Reset NAND */
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- /* preset operation */
- /* Unlock the internal RAM Buffer */
- writew(0x2, host->regs + NFC_CONFIG);
-
- /* Blocks to be unlocked */
- if (nfc_is_v21()) {
- writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
- writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
- this->ecc.bytes = 9;
- } else if (nfc_is_v1()) {
- writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
this->ecc.bytes = 3;
} else
BUG();
- /* Unlock Block Command for given address range */
- writew(0x4, host->regs + NFC_WRPROT);
-
this->ecc.size = 512;
this->ecc.layout = oob_smallpage;
@@ -796,14 +809,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.hwctl = mxc_nand_enable_hwecc;
this->ecc.correct = mxc_nand_correct_data;
this->ecc.mode = NAND_ECC_HW;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp |= NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
} else {
this->ecc.mode = NAND_ECC_SOFT;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
}
/* NAND bus width determines access funtions used by upper layer */
@@ -817,8 +824,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_USE_FLASH_BBT;
}
+ init_waitqueue_head(&host->irq_waitq);
+
+ host->irq = platform_get_irq(pdev, 0);
+
+ err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
+ if (err)
+ goto eirq;
+
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
err = -ENXIO;
goto escan;
}
@@ -886,11 +901,14 @@ static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
int ret = 0;
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
- if (mtd) {
- ret = mtd->suspend(mtd);
- /* Disable the NFC clock */
- clk_disable(host->clk);
- }
+
+ ret = mtd->suspend(mtd);
+
+ /*
+ * nand_suspend locks the device for exclusive access, so
+ * the clock must already be off.
+ */
+ BUG_ON(!ret && host->clk_act);
return ret;
}
@@ -904,11 +922,7 @@ static int mxcnd_resume(struct platform_device *pdev)
DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
- if (mtd) {
- /* Enable the NFC clock */
- clk_enable(host->clk);
- mtd->resume(mtd);
- }
+ mtd->resume(mtd);
return ret;
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8f2958fe2148..4a7b86423ee9 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -108,6 +108,35 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
*/
DEFINE_LED_TRIGGER(nand_led_trigger);
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ /* Do not allow past end of device */
+ if (ofs + len > mtd->size) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* nand_release_device - [GENERIC] release chip
* @mtd: MTD device structure
@@ -318,6 +347,9 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
struct nand_chip *chip = mtd->priv;
u16 bad;
+ if (chip->options & NAND_BB_LAST_PAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
if (getchip) {
@@ -335,14 +367,18 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
bad = cpu_to_le16(chip->read_word(mtd));
if (chip->badblockpos & 0x1)
bad >>= 8;
- if ((bad & 0xFF) != 0xff)
- res = 1;
+ else
+ bad &= 0xFF;
} else {
chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
- if (chip->read_byte(mtd) != 0xff)
- res = 1;
+ bad = chip->read_byte(mtd);
}
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+
if (getchip)
nand_release_device(mtd);
@@ -363,6 +399,9 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
uint8_t buf[2] = { 0, 0 };
int block, ret;
+ if (chip->options & NAND_BB_LAST_PAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
/* Get block number */
block = (int)(ofs >> chip->bbt_erase_shift);
if (chip->bbt)
@@ -401,6 +440,11 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
+
+ /* broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
/* Check the WP bit */
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
@@ -744,9 +788,6 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
chip->state = FL_PM_SUSPENDED;
spin_unlock(lock);
return 0;
- } else {
- spin_unlock(lock);
- return -EAGAIN;
}
}
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -835,6 +876,168 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
}
/**
+ * __nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ * @invert - when = 0, unlock the range of blocks within the lower and
+ * upper boundary address
+ * whne = 1, unlock the range of blocks outside the boundaries
+ * of the lower and upper boundary address
+ *
+ * @return - unlock status
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len, int invert)
+{
+ int ret = 0;
+ int status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ /* Submit address of first page to unlock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* Submit address of last page to unlock */
+ page = (ofs + len) >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+ (page | invert) & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_unlock - [REPLACABLE] unlocks specified locked blockes
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - unlock status
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ /* Align to last block address if size addresses end of the device */
+ if (ofs + len == mtd->size)
+ len -= mtd->erasesize;
+
+ nand_get_device(chip, mtd, FL_UNLOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_lock - [REPLACABLE] locks all blockes present in the device
+ *
+ * @param mtd - mtd info
+ * @param ofs - offset to start unlock from
+ * @param len - length to unlock
+ *
+ * @return - lock status
+ *
+ * This feature is not support in many NAND parts. 'Micron' NAND parts
+ * do have this feature, but it allows only to lock all blocks not for
+ * specified range for block.
+ *
+ * Implementing 'lock' feature by making use of 'unlock', for now.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ int ret = 0;
+ int chipnr, status, page;
+ struct nand_chip *chip = mtd->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)ofs, len);
+
+ if (check_offs_len(mtd, ofs, len))
+ ret = -EINVAL;
+
+ nand_get_device(chip, mtd, FL_LOCKING);
+
+ /* Shift to get chip number */
+ chipnr = ofs >> chip->chip_shift;
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+ __func__);
+ status = MTD_ERASE_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Submit address of first page to lock */
+ page = ofs >> chip->page_shift;
+ chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+ /* Call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ udelay(1000);
+ /* See if device thinks it succeeded */
+ if (status & 0x01) {
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+ __func__, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
* nand_read_page_raw - [Intern] read raw page data without ecc
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1232,6 +1435,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *bufpoi, *oob, *buf;
stats = mtd->ecc_stats;
@@ -1282,18 +1488,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf += bytes;
if (unlikely(oob)) {
- /* Raw mode does data:oob:data:oob */
- if (ops->mode != MTD_OOB_RAW) {
- int toread = min(oobreadlen,
- chip->ecc.layout->oobavail);
- if (toread) {
- oob = nand_transfer_oob(chip,
- oob, ops, toread);
- oobreadlen -= toread;
- }
- } else
- buf = nand_transfer_oob(chip,
- buf, ops, mtd->oobsize);
+
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
}
if (!(chip->options & NAND_NO_READRDY)) {
@@ -1880,11 +2082,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* @oob: oob data buffer
* @ops: oob ops structure
*/
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
- struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
{
- size_t len = ops->ooblen;
-
switch(ops->mode) {
case MTD_OOB_PLACE:
@@ -1939,6 +2139,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int chipnr, realpage, page, blockmask, column;
struct nand_chip *chip = mtd->priv;
uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
+ mtd->oobavail : mtd->oobsize;
+
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret, subpage;
@@ -1980,6 +2185,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (likely(!oob))
memset(chip->oob_poi, 0xff, mtd->oobsize);
+ /* Don't allow multipage oob writes with offset */
+ if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
+ return -EINVAL;
+
while(1) {
int bytes = mtd->writesize;
int cached = writelen > bytes && page != blockmask;
@@ -1995,8 +2204,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
wbuf = chip->buffers->databuf;
}
- if (unlikely(oob))
- oob = nand_fill_oob(chip, oob, ops);
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(chip, oob, len, ops);
+ oobwritelen -= len;
+ }
ret = chip->write_page(mtd, chip, wbuf, page, cached,
(ops->mode == MTD_OOB_RAW));
@@ -2170,7 +2382,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
chip->pagebuf = -1;
memset(chip->oob_poi, 0xff, mtd->oobsize);
- nand_fill_oob(chip, ops->oobbuf, ops);
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
memset(chip->oob_poi, 0xff, mtd->oobsize);
@@ -2293,25 +2505,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
- /* Start address must align on block boundary */
- if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+ if (check_offs_len(mtd, instr->addr, instr->len))
return -EINVAL;
- }
-
- /* Length must align on block boundary */
- if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow erase past end of device */
- if ((instr->len + instr->addr) > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n",
- __func__);
- return -EINVAL;
- }
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
@@ -2582,11 +2777,11 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
*/
static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
struct nand_chip *chip,
- int busw, int *maf_id)
+ int busw, int *maf_id,
+ struct nand_flash_dev *type)
{
- struct nand_flash_dev *type = NULL;
int i, dev_id, maf_idx;
- int tmp_id, tmp_manf;
+ u8 id_data[8];
/* Select the device */
chip->select_chip(mtd, 0);
@@ -2612,27 +2807,26 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- /* Read manufacturer and device IDs */
+ /* Read entire ID string */
- tmp_manf = chip->read_byte(mtd);
- tmp_id = chip->read_byte(mtd);
+ for (i = 0; i < 8; i++)
+ id_data[i] = chip->read_byte(mtd);
- if (tmp_manf != *maf_id || tmp_id != dev_id) {
+ if (id_data[0] != *maf_id || id_data[1] != dev_id) {
printk(KERN_INFO "%s: second ID read did not match "
"%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, dev_id, tmp_manf, tmp_id);
+ *maf_id, dev_id, id_data[0], id_data[1]);
return ERR_PTR(-ENODEV);
}
- /* Lookup the flash id */
- for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (dev_id == nand_flash_ids[i].id) {
- type = &nand_flash_ids[i];
- break;
- }
- }
-
if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++)
+ if (dev_id == type->id)
+ break;
+
+ if (!type->name)
return ERR_PTR(-ENODEV);
if (!mtd->name)
@@ -2644,21 +2838,45 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (!type->pagesize) {
int extid;
/* The 3rd id byte holds MLC / multichip data */
- chip->cellinfo = chip->read_byte(mtd);
+ chip->cellinfo = id_data[2];
/* The 4th id byte is the important one */
- extid = chip->read_byte(mtd);
- /* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x3);
- extid >>= 2;
- /* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
- extid >>= 2;
- /* Calc blocksize. Blocksize is multiples of 64KiB */
- mtd->erasesize = (64 * 1024) << (extid & 0x03);
- extid >>= 2;
- /* Get buswidth information */
- busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ extid = id_data[3];
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New style (6 byte ID): Samsung K9GAG08U0D (p.40)
+ *
+ * Check for wraparound + Samsung ID + nonzero 6th byte
+ * to decide what to do.
+ */
+ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
+ id_data[0] == NAND_MFR_SAMSUNG &&
+ id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218;
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+ }
} else {
/*
* Old devices have chip data hardcoded in the device id table
@@ -2704,6 +2922,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
/* Set the bad block position */
chip->badblockpos = mtd->writesize > 512 ?
NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+ chip->badblockbits = 8;
/* Get chip options, preserve non chip based options */
chip->options &= ~NAND_CHIPOPTIONS_MSK;
@@ -2720,6 +2939,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ /*
+ * Bad block marker is stored in the last page of each block
+ * on Samsung and Hynix MLC devices
+ */
+ if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+ (*maf_id == NAND_MFR_SAMSUNG ||
+ *maf_id == NAND_MFR_HYNIX))
+ chip->options |= NAND_BB_LAST_PAGE;
+
/* Check for AND chips with 4 page planes */
if (chip->options & NAND_4PAGE_ARRAY)
chip->erase_cmd = multi_erase_cmd;
@@ -2741,13 +2969,15 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
* nand_scan_ident - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
* @maxchips: Number of chips to scan for
+ * @table: Alternative NAND ID table
*
* This is the first phase of the normal nand_scan() function. It
* reads the flash ID and sets up MTD fields accordingly.
*
* The mtd->owner field must be set to the module of the caller.
*/
-int nand_scan_ident(struct mtd_info *mtd, int maxchips)
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
{
int i, busw, nand_maf_id;
struct nand_chip *chip = mtd->priv;
@@ -2759,7 +2989,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
nand_set_defaults(chip, busw);
/* Read the flash type */
- type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
+ type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table);
if (IS_ERR(type)) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
@@ -2989,7 +3219,8 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Fill in remaining MTD driver data */
mtd->type = MTD_NANDFLASH;
- mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
mtd->erase = nand_erase;
mtd->point = NULL;
mtd->unpoint = NULL;
@@ -3050,7 +3281,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
BUG();
}
- ret = nand_scan_ident(mtd, maxchips);
+ ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
return ret;
@@ -3077,6 +3308,8 @@ void nand_release(struct mtd_info *mtd)
kfree(chip->buffers);
}
+EXPORT_SYMBOL_GPL(nand_lock);
+EXPORT_SYMBOL_GPL(nand_unlock);
EXPORT_SYMBOL_GPL(nand_scan);
EXPORT_SYMBOL_GPL(nand_scan_ident);
EXPORT_SYMBOL_GPL(nand_scan_tail);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 55c23e5cd210..ad97c0ce73b2 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -237,15 +237,33 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_oob_ops ops;
+ int res;
ops.mode = MTD_OOB_RAW;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
- ops.oobbuf = buf;
- ops.datbuf = buf;
- ops.len = len;
- return mtd->read_oob(mtd, offs, &ops);
+
+ while (len > 0) {
+ if (len <= mtd->writesize) {
+ ops.oobbuf = buf + len;
+ ops.datbuf = buf;
+ ops.len = len;
+ return mtd->read_oob(mtd, offs, &ops);
+ } else {
+ ops.oobbuf = buf + mtd->writesize;
+ ops.datbuf = buf;
+ ops.len = mtd->writesize;
+ res = mtd->read_oob(mtd, offs, &ops);
+
+ if (res)
+ return res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ }
+ return 0;
}
/*
@@ -414,6 +432,9 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
from = (loff_t)startblock << (this->bbt_erase_shift - 1);
}
+ if (this->options & NAND_BB_LAST_PAGE)
+ from += mtd->erasesize - (mtd->writesize * len);
+
for (i = startblock; i < numblocks;) {
int ret;
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
index 7cec2cd97854..198b304d6f72 100644
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -167,18 +167,27 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
int numToRead = 16; /* There are 16 bytes per sector in the OOB */
/* ECC is already paused when this function is called */
+ if (pageSize != NAND_DATA_ACCESS_SIZE) {
+ /* skip BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
- if (pageSize == NAND_DATA_ACCESS_SIZE) {
- while (numToRead > numEccBytes) {
- /* skip free oob region */
+ while (numToRead > numEccBytes) {
+ /* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp++ = REG_NAND_DATA8;
#else
- REG_NAND_DATA8;
+ REG_NAND_DATA8;
#endif
- numToRead--;
- }
+ numToRead--;
+ }
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
/* read ECC bytes before BI */
nand_bcm_umi_bch_resume_read_ecc_calc();
@@ -190,6 +199,7 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
#else
eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
+ numToRead--;
}
nand_bcm_umi_bch_pause_read_ecc_calc();
@@ -204,49 +214,18 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
numToRead--;
}
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
- } else {
- /* skip BI */
+ }
+ /* read ECC bytes */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+ while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
#else
- REG_NAND_DATA8;
+ eccCalc[eccPos++] = REG_NAND_DATA8;
#endif
numToRead--;
-
- while (numToRead > numEccBytes) {
- /* skip free oob region */
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
-#else
- REG_NAND_DATA8;
-#endif
- numToRead--;
- }
-
- /* read ECC bytes */
- nand_bcm_umi_bch_resume_read_ecc_calc();
- while (numToRead) {
-#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
- eccCalc[eccPos++] = *oobp;
- oobp++;
-#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
-#endif
- numToRead--;
- }
}
}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 69ee2c90eb0b..89907ed99009 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -82,6 +82,7 @@ struct nand_flash_dev nand_flash_ids[] = {
/* 1 Gigabit */
{"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
+ {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS},
{"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
{"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 7281000fef2d..261337efe0ee 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -80,6 +80,9 @@
#ifndef CONFIG_NANDSIM_DBG
#define CONFIG_NANDSIM_DBG 0
#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS 32
+#endif
static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
@@ -94,7 +97,7 @@ static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
static uint do_delays = CONFIG_NANDSIM_DO_DELAYS;
static uint log = CONFIG_NANDSIM_LOG;
static uint dbg = CONFIG_NANDSIM_DBG;
-static unsigned long parts[MAX_MTD_DEVICES];
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
static unsigned int parts_num;
static char *badblocks = NULL;
static char *weakblocks = NULL;
@@ -135,8 +138,8 @@ MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read I
MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)");
MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
-MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)");
-MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
MODULE_PARM_DESC(log, "Perform logging if not zero");
@@ -288,7 +291,7 @@ union ns_mem {
* The structure which describes all the internal simulator data.
*/
struct nandsim {
- struct mtd_partition partitions[MAX_MTD_DEVICES];
+ struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
unsigned int nbparts;
uint busw; /* flash chip bus width (8 or 16) */
@@ -312,7 +315,7 @@ struct nandsim {
union ns_mem buf;
/* NAND flash "geometry" */
- struct nandsin_geometry {
+ struct {
uint64_t totsz; /* total flash size, bytes */
uint32_t secsz; /* flash sector (erase block) size, bytes */
uint pgsz; /* NAND flash page size, bytes */
@@ -331,7 +334,7 @@ struct nandsim {
} geom;
/* NAND flash internal registers */
- struct nandsim_regs {
+ struct {
unsigned command; /* the command register */
u_char status; /* the status register */
uint row; /* the page number */
@@ -342,7 +345,7 @@ struct nandsim {
} regs;
/* NAND flash lines state */
- struct ns_lines_status {
+ struct {
int ce; /* chip Enable */
int cle; /* command Latch Enable */
int ale; /* address Latch Enable */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index b983cae8c298..98fd2bdf8be1 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -239,14 +239,14 @@ static int __devinit ndfc_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, ndfc);
/* Read the reg property to get the chip select */
- reg = of_get_property(ofdev->node, "reg", &len);
+ reg = of_get_property(ofdev->dev.of_node, "reg", &len);
if (reg == NULL || len != 12) {
dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
return -ENOENT;
}
ndfc->chip_select = reg[0];
- ndfc->ndfcbase = of_iomap(ofdev->node, 0);
+ ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
if (!ndfc->ndfcbase) {
dev_err(&ofdev->dev, "failed to get memory\n");
return -EIO;
@@ -255,20 +255,20 @@ static int __devinit ndfc_probe(struct of_device *ofdev,
ccr = NDFC_CCR_BS(ndfc->chip_select);
/* It is ok if ccr does not exist - just default to 0 */
- reg = of_get_property(ofdev->node, "ccr", NULL);
+ reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
if (reg)
ccr |= *reg;
out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
/* Set the bank settings if given */
- reg = of_get_property(ofdev->node, "bank-settings", NULL);
+ reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
if (reg) {
int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
out_be32(ndfc->ndfcbase + offset, *reg);
}
- err = ndfc_chip_init(ndfc, ofdev->node);
+ err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
if (err) {
iounmap(ndfc->ndfcbase);
return err;
@@ -294,9 +294,10 @@ MODULE_DEVICE_TABLE(of, ndfc_match);
static struct of_platform_driver ndfc_driver = {
.driver = {
- .name = "ndfc",
+ .name = "ndfc",
+ .owner = THIS_MODULE,
+ .of_match_table = ndfc_match,
},
- .match_table = ndfc_match,
.probe = ndfc_probe,
.remove = __devexit_p(ndfc_remove),
};
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 1f6f741af5da..8c0b69375224 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -105,21 +105,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
ret = -EIO;
goto err_unmap;
}
- host->addr_va = ioremap(res->start, res->end - res->start + 1);
+ host->addr_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->data_va = ioremap(res->start, res->end - res->start + 1);
+ host->data_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->cmd_va = ioremap(res->start, res->end - res->start + 1);
+ host->cmd_va = ioremap(res->start, resource_size(res));
if (!host->addr_va || !host->data_va || !host->cmd_va) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7680e731348a..6eddf7361ed7 100644
--- a/drivers/mtd/nand/w90p910_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009 Nuvoton technology corporation.
+ * Copyright © 2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
@@ -55,7 +55,7 @@
#define write_addr_reg(dev, val) \
__raw_writel((val), (dev)->reg + REG_SMADDR)
-struct w90p910_nand {
+struct nuc900_nand {
struct mtd_info mtd;
struct nand_chip chip;
void __iomem *reg;
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
}
};
-static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd)
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
{
unsigned char ret;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
ret = (unsigned char)read_data_reg(nand);
return ret;
}
-static void w90p910_nand_read_buf(struct mtd_info *mtd,
- unsigned char *buf, int len)
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
buf[i] = (unsigned char)read_data_reg(nand);
}
-static void w90p910_nand_write_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
write_data_reg(nand, buf[i]);
}
-static int w90p910_verify_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static int nuc900_verify_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++) {
if (buf[i] != (unsigned char)read_data_reg(nand))
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
return 0;
}
-static int w90p910_check_rb(struct w90p910_nand *nand)
+static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
return val;
}
-static int w90p910_nand_devready(struct mtd_info *mtd)
+static int nuc900_nand_devready(struct mtd_info *mtd)
{
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
int ready;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
- ready = (w90p910_check_rb(nand)) ? 1 : 0;
+ ready = (nuc900_check_rb(nand)) ? 1 : 0;
return ready;
}
-static void w90p910_nand_command_lp(struct mtd_info *mtd,
- unsigned int command, int column, int page_addr)
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
write_cmd_reg(nand, NAND_CMD_STATUS);
write_cmd_reg(nand, command);
- while (!w90p910_check_rb(nand))
+ while (!nuc900_check_rb(nand))
;
return;
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
}
-static void w90p910_nand_enable(struct w90p910_nand *nand)
+static void nuc900_nand_enable(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit w90p910_nand_probe(struct platform_device *pdev)
+static int __devinit nuc900_nand_probe(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand;
+ struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
int retval;
struct resource *res;
retval = 0;
- w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL);
- if (!w90p910_nand)
+ nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ if (!nuc900_nand)
return -ENOMEM;
- chip = &(w90p910_nand->chip);
+ chip = &(nuc900_nand->chip);
- w90p910_nand->mtd.priv = chip;
- w90p910_nand->mtd.owner = THIS_MODULE;
- spin_lock_init(&w90p910_nand->lock);
+ nuc900_nand->mtd.priv = chip;
+ nuc900_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&nuc900_nand->lock);
- w90p910_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(w90p910_nand->clk)) {
+ nuc900_nand->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk)) {
retval = -ENOENT;
goto fail1;
}
- clk_enable(w90p910_nand->clk);
-
- chip->cmdfunc = w90p910_nand_command_lp;
- chip->dev_ready = w90p910_nand_devready;
- chip->read_byte = w90p910_nand_read_byte;
- chip->write_buf = w90p910_nand_write_buf;
- chip->read_buf = w90p910_nand_read_buf;
- chip->verify_buf = w90p910_verify_buf;
+ clk_enable(nuc900_nand->clk);
+
+ chip->cmdfunc = nuc900_nand_command_lp;
+ chip->dev_ready = nuc900_nand_devready;
+ chip->read_byte = nuc900_nand_read_byte;
+ chip->write_buf = nuc900_nand_write_buf;
+ chip->read_buf = nuc900_nand_read_buf;
+ chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
goto fail1;
}
- w90p910_nand->reg = ioremap(res->start, resource_size(res));
- if (!w90p910_nand->reg) {
+ nuc900_nand->reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_nand->reg) {
retval = -ENOMEM;
goto fail2;
}
- w90p910_nand_enable(w90p910_nand);
+ nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(w90p910_nand->mtd), 1)) {
+ if (nand_scan(&(nuc900_nand->mtd), 1)) {
retval = -ENXIO;
goto fail3;
}
- add_mtd_partitions(&(w90p910_nand->mtd), partitions,
+ add_mtd_partitions(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
- platform_set_drvdata(pdev, w90p910_nand);
+ platform_set_drvdata(pdev, nuc900_nand);
return retval;
-fail3: iounmap(w90p910_nand->reg);
+fail3: iounmap(nuc900_nand->reg);
fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(w90p910_nand);
+fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit w90p910_nand_remove(struct platform_device *pdev)
+static int __devexit nuc900_nand_remove(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev);
+ struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
- iounmap(w90p910_nand->reg);
+ iounmap(nuc900_nand->reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- clk_disable(w90p910_nand->clk);
- clk_put(w90p910_nand->clk);
+ clk_disable(nuc900_nand->clk);
+ clk_put(nuc900_nand->clk);
- kfree(w90p910_nand);
+ kfree(nuc900_nand);
platform_set_drvdata(pdev, NULL);
return 0;
}
-static struct platform_driver w90p910_nand_driver = {
- .probe = w90p910_nand_probe,
- .remove = __devexit_p(w90p910_nand_remove),
+static struct platform_driver nuc900_nand_driver = {
+ .probe = nuc900_nand_probe,
+ .remove = __devexit_p(nuc900_nand_remove),
.driver = {
- .name = "w90p910-fmi",
+ .name = "nuc900-fmi",
.owner = THIS_MODULE,
},
};
-static int __init w90p910_nand_init(void)
+static int __init nuc900_nand_init(void)
{
- return platform_driver_register(&w90p910_nand_driver);
+ return platform_driver_register(&nuc900_nand_driver);
}
-static void __exit w90p910_nand_exit(void)
+static void __exit nuc900_nand_exit(void)
{
- platform_driver_unregister(&w90p910_nand_driver);
+ platform_driver_unregister(&nuc900_nand_driver);
}
-module_init(w90p910_nand_init);
-module_exit(w90p910_nand_exit);
+module_init(nuc900_nand_init);
+module_exit(nuc900_nand_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 nand driver!");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-fmi");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 7545568fce47..ee87325c7712 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -292,11 +292,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
u32 *p = (u32 *)buf;
/* take care of subpage reads */
- for (; len % 4 != 0; ) {
- *buf++ = __raw_readb(info->nand.IO_ADDR_R);
- len--;
+ if (len % 4) {
+ if (info->nand.options & NAND_BUSWIDTH_16)
+ omap_read_buf16(mtd, buf, len % 4);
+ else
+ omap_read_buf8(mtd, buf, len % 4);
+ p = (u32 *) (buf + len % 4);
+ len -= len % 4;
}
- p = (u32 *) buf;
/* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
@@ -502,7 +505,7 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd,
omap_write_buf_pref(mtd, buf, len);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, buf, len, 0x1);
+ omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
}
/**
@@ -1028,7 +1031,8 @@ out_free_info:
static int omap_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct omap_nand_info *info = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
platform_set_drvdata(pdev, NULL);
if (use_dma)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index d60fc5719fef..da6e75343052 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -80,6 +80,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
+ struct resource *res;
void __iomem *io_base;
int ret = 0;
#ifdef CONFIG_MTD_PARTITIONS
@@ -95,8 +96,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd = (struct mtd_info *)(nc + 1);
- io_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto no_res;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
printk(KERN_ERR "orion_nand: ioremap failed\n");
ret = -EIO;
@@ -120,6 +126,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
if (board->width == 16)
nc->options |= NAND_BUSWIDTH_16;
+ if (board->dev_ready)
+ nc->dev_ready = board->dev_ready;
+
platform_set_drvdata(pdev, mtd);
if (nand_scan(mtd, 1)) {
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a8b9376cf324..f02af24d033a 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -93,7 +93,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct pci_dev *pdev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct nand_chip *chip;
int err = 0;
@@ -209,7 +209,7 @@ static int __devexit pasemi_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id pasemi_nand_match[] =
+static const struct of_device_id pasemi_nand_match[] =
{
{
.compatible = "pasemi,localbus-nand",
@@ -221,8 +221,11 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);
static struct of_platform_driver pasemi_nand_driver =
{
- .name = (char*)driver_name,
- .match_table = pasemi_nand_match,
+ .driver = {
+ .name = (char*)driver_name,
+ .owner = THIS_MODULE,
+ .of_match_table = pasemi_nand_match,
+ },
.probe = pasemi_nand_probe,
.remove = pasemi_nand_remove,
};
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5d55152162cf..e02fa4f0e3c9 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,17 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
goto fail_free_irq;
}
+ if (mtd_has_cmdlinepart()) {
+ static const char *probes[] = { "cmdlinepart", NULL };
+ struct mtd_partition *parts;
+ int nr_parts;
+
+ nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
+
+ if (nr_parts)
+ return add_mtd_partitions(mtd, parts, nr_parts);
+ }
+
return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
fail_free_irq:
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
new file mode 100644
index 000000000000..78a423295474
--- /dev/null
+++ b/drivers/mtd/nand/r852.c
@@ -0,0 +1,1140 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static int r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+ uint8_t reg = readb(dev->mmio + address);
+ return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+ int address, uint8_t value)
+{
+ writeb(value, dev->mmio + address);
+ mmiowb();
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+ uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+ return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+ int address, uint32_t value)
+{
+ writel(cpu_to_le32(value), dev->mmio + address);
+ mmiowb();
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ return (struct r852_device *)chip->priv;
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+ dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+ (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+ if (!dev->dma_usable)
+ message("Non dma capable device detected, dma disabled");
+
+ if (!r852_enable_dma) {
+ message("disabling dma on user request");
+ dev->dma_usable = 0;
+ }
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+ uint8_t dma_reg, dma_irq_reg;
+
+ /* Set up dma settings */
+ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+ dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+ if (dev->dma_dir)
+ dma_reg |= R852_DMA_READ;
+
+ if (dev->dma_state == DMA_INTERNAL) {
+ dma_reg |= R852_DMA_INTERNAL;
+ /* Precaution to make sure HW doesn't write */
+ /* to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ } else {
+ dma_reg |= R852_DMA_MEMORY;
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_dma_addr));
+ }
+
+ /* Precaution: make sure write reached the device */
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+ /* Set dma irq */
+ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ dma_irq_reg |
+ R852_DMA_IRQ_INTERNAL |
+ R852_DMA_IRQ_ERROR |
+ R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+ WARN_ON(dev->dma_stage == 0);
+
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+ r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+ r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+ /* Precaution to make sure HW doesn't write to random kernel memory */
+ r852_write_reg_dword(dev, R852_DMA_ADDR,
+ cpu_to_le32(dev->phys_bounce_buffer));
+ r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+ dev->dma_error = error;
+ dev->dma_stage = 0;
+
+ if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+ pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
+ dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ complete(&dev->dma_done);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+ long timeout = wait_for_completion_timeout(&dev->dma_done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ dbg("timeout waiting for DMA interrupt");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+ int bounce = 0;
+ unsigned long flags;
+ int error;
+
+ dev->dma_error = 0;
+
+ /* Set dma direction */
+ dev->dma_dir = do_read;
+ dev->dma_stage = 1;
+
+ dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+ /* Set intial dma state: for reading first fill on board buffer,
+ from device, for writes first fill the buffer from memory*/
+ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+ /* if incoming buffer is not page aligned, we should do bounce */
+ if ((unsigned long)buf & (R852_DMA_LEN-1))
+ bounce = 1;
+
+ if (!bounce) {
+ dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ R852_DMA_LEN,
+ (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ bounce = 1;
+ }
+
+ if (bounce) {
+ dbg_verbose("dma: using bounce buffer");
+ dev->phys_dma_addr = dev->phys_bounce_buffer;
+ if (!do_read)
+ memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+ }
+
+ /* Enable DMA */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ r852_dma_enable(dev);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* Wait till complete */
+ error = r852_dma_wait(dev);
+
+ if (error) {
+ r852_dma_done(dev, error);
+ return;
+ }
+
+ if (do_read && bounce)
+ memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ /* Don't allow any access to hardware if we suspect card removal */
+ if (dev->card_unstable)
+ return;
+
+ /* Special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, (uint8_t *)buf, 0);
+ return;
+ }
+
+ /* write DWORD chinks - faster */
+ while (len) {
+ reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+ r852_write_reg_dword(dev, R852_DATALINE, reg);
+ buf += 4;
+ len -= 4;
+
+ }
+
+ /* write rest */
+ while (len)
+ r852_write_reg(dev, R852_DATALINE, *buf++);
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ uint32_t reg;
+
+ if (dev->card_unstable) {
+ /* since we can't signal error here, at least, return
+ predictable buffer */
+ memset(buf, 0, len);
+ return;
+ }
+
+ /* special case for whole sector read */
+ if (len == R852_DMA_LEN && dev->dma_usable) {
+ r852_do_dma(dev, buf, 1);
+ return;
+ }
+
+ /* read in dword sized chunks */
+ while (len >= 4) {
+
+ reg = r852_read_reg_dword(dev, R852_DATALINE);
+ *buf++ = reg & 0xFF;
+ *buf++ = (reg >> 8) & 0xFF;
+ *buf++ = (reg >> 16) & 0xFF;
+ *buf++ = (reg >> 24) & 0xFF;
+ len -= 4;
+ }
+
+ /* read the reset by bytes */
+ while (len--)
+ *buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* Same problem as in r852_read_buf.... */
+ if (dev->card_unstable)
+ return 0;
+
+ return r852_read_reg(dev, R852_DATALINE);
+}
+
+
+/*
+ * Readback the buffer to verify it
+ */
+int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ /* We can't be sure about anything here... */
+ if (dev->card_unstable)
+ return -1;
+
+ /* This will never happen, unless you wired up a nand chip
+ with > 512 bytes page size to the reader */
+ if (len > SM_SECTOR_SIZE)
+ return 0;
+
+ r852_read_buf(mtd, dev->tmp_buffer, len);
+ return memcmp(buf, dev->tmp_buffer, len);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+
+ dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+ R852_CTL_ON | R852_CTL_CARDENABLE);
+
+ if (ctrl & NAND_ALE)
+ dev->ctlreg |= R852_CTL_DATA;
+
+ if (ctrl & NAND_CLE)
+ dev->ctlreg |= R852_CTL_COMMAND;
+
+ if (ctrl & NAND_NCE)
+ dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+ else
+ dev->ctlreg &= ~R852_CTL_WRITE;
+
+ /* when write is stareted, enable write access */
+ if (dat == NAND_CMD_ERASE1)
+ dev->ctlreg |= R852_CTL_WRITE;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+ to set write mode */
+ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+ dev->ctlreg |= R852_CTL_WRITE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+
+ if (dat != NAND_CMD_NONE)
+ r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct r852_device *dev = (struct r852_device *)chip->priv;
+
+ unsigned long timeout;
+ int status;
+
+ timeout = jiffies + (chip->state == FL_ERASING ?
+ msecs_to_jiffies(400) : msecs_to_jiffies(20));
+
+ while (time_before(jiffies, timeout))
+ if (chip->dev_ready(mtd))
+ break;
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ status = (int)chip->read_byte(mtd);
+
+ /* Unfortunelly, no way to send detailed error status... */
+ if (dev->dma_error) {
+ status |= NAND_STATUS_FAIL;
+ dev->dma_error = 0;
+ }
+ return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+int r852_ready(struct mtd_info *mtd)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return;
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ /* enable ecc generation/check*/
+ dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+ /* flush ecc buffer */
+ r852_write_reg(dev, R852_CTL,
+ dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return;
+
+ case NAND_ECC_READSYN:
+ /* disable ecc generation */
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ }
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct r852_device *dev = r852_get_dev(mtd);
+ struct sm_oob *oob = (struct sm_oob *)ecc_code;
+ uint32_t ecc1, ecc2;
+
+ if (dev->card_unstable)
+ return 0;
+
+ dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+ ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+ ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+ oob->ecc1[0] = (ecc1) & 0xFF;
+ oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+ oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+ oob->ecc2[0] = (ecc2) & 0xFF;
+ oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+ oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+ return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint16_t ecc_reg;
+ uint8_t ecc_status, err_byte;
+ int i, error = 0;
+
+ struct r852_device *dev = r852_get_dev(mtd);
+
+ if (dev->card_unstable)
+ return 0;
+
+ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+ ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+ r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+ for (i = 0 ; i <= 1 ; i++) {
+
+ ecc_status = (ecc_reg >> 8) & 0xFF;
+
+ /* ecc uncorrectable error */
+ if (ecc_status & R852_ECC_FAIL) {
+ dbg("ecc: unrecoverable error, in half %d", i);
+ error = -1;
+ goto exit;
+ }
+
+ /* correctable error */
+ if (ecc_status & R852_ECC_CORRECTABLE) {
+
+ err_byte = ecc_reg & 0xFF;
+ dbg("ecc: recoverable error, "
+ "in half %d, byte %d, bit %d", i,
+ err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+ dat[err_byte] ^=
+ 1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+ error++;
+ }
+
+ dat += 256;
+ ecc_reg >>= 16;
+ }
+exit:
+ return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int sndcmd)
+{
+ if (sndcmd) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ sndcmd = 0;
+ }
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return sndcmd;
+}
+
+/*
+ * Start the nand engine
+ */
+
+void r852_engine_enable(struct r852_device *dev)
+{
+ if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ } else {
+ r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+ }
+ msleep(300);
+ r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+void r852_engine_disable(struct r852_device *dev)
+{
+ r852_write_reg_dword(dev, R852_HW, 0);
+ r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+void r852_card_update_present(struct r852_device *dev)
+{
+ unsigned long flags;
+ uint8_t reg;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ reg = r852_read_reg(dev, R852_CARD_STA);
+ dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+void r852_update_card_detect(struct r852_device *dev)
+{
+ int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ dev->card_unstable = 0;
+
+ card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+ card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+ card_detect_reg |= dev->card_detected ?
+ R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+ssize_t r852_media_type_show(struct device *sys_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+ struct r852_device *dev = r852_get_dev(mtd);
+ char *data = dev->sm ? "smartmedia" : "xd";
+
+ strcpy(buf, data);
+ return strlen(data);
+}
+
+DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+void r852_update_media_status(struct r852_device *dev)
+{
+ uint8_t reg;
+ unsigned long flags;
+ int readonly;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!dev->card_detected) {
+ message("card removed");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ;
+ }
+
+ readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+ reg = r852_read_reg(dev, R852_DMA_CAP);
+ dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+ message("detected %s %s card in slot",
+ dev->sm ? "SmartMedia" : "xD",
+ readonly ? "readonly" : "writeable");
+
+ dev->readonly = readonly;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+int r852_register_nand_device(struct r852_device *dev)
+{
+ dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+
+ if (!dev->mtd)
+ goto error1;
+
+ WARN_ON(dev->card_registred);
+
+ dev->mtd->owner = THIS_MODULE;
+ dev->mtd->priv = dev->chip;
+ dev->mtd->dev.parent = &dev->pci_dev->dev;
+
+ if (dev->readonly)
+ dev->chip->options |= NAND_ROM;
+
+ r852_engine_enable(dev);
+
+ if (sm_register_device(dev->mtd, dev->sm))
+ goto error2;
+
+ if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
+ message("can't create media type sysfs attribute");
+
+ dev->card_registred = 1;
+ return 0;
+error2:
+ kfree(dev->mtd);
+error1:
+ /* Force card redetect */
+ dev->card_detected = 0;
+ return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+void r852_unregister_nand_device(struct r852_device *dev)
+{
+ if (!dev->card_registred)
+ return;
+
+ device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
+ nand_release(dev->mtd);
+ r852_engine_disable(dev);
+ dev->card_registred = 0;
+ kfree(dev->mtd);
+ dev->mtd = NULL;
+}
+
+/* Card state updater */
+void r852_card_detect_work(struct work_struct *work)
+{
+ struct r852_device *dev =
+ container_of(work, struct r852_device, card_detect_work.work);
+
+ r852_card_update_present(dev);
+ dev->card_unstable = 0;
+
+ /* False alarm */
+ if (dev->card_detected == dev->card_registred)
+ goto exit;
+
+ /* Read media properties */
+ r852_update_media_status(dev);
+
+ /* Register the card */
+ if (dev->card_detected)
+ r852_register_nand_device(dev);
+ else
+ r852_unregister_nand_device(dev);
+exit:
+ /* Update detection logic */
+ r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+ uint8_t reg;
+ reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+ r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+ reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+ reg & ~R852_DMA_IRQ_MASK);
+
+ r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+ struct r852_device *dev = (struct r852_device *)data;
+
+ uint8_t card_status, dma_status;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ /* We can recieve shared interrupt while pci is suspended
+ in that case reads will return 0xFFFFFFFF.... */
+ if (dev->insuspend)
+ goto out;
+
+ /* handle card detection interrupts first */
+ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+ r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+ if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+ ret = IRQ_HANDLED;
+ dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+ /* we shouldn't recieve any interrupts if we wait for card
+ to settle */
+ WARN_ON(dev->card_unstable);
+
+ /* disable irqs while card is unstable */
+ /* this will timeout DMA if active, but better that garbage */
+ r852_disable_irqs(dev);
+
+ if (dev->card_unstable)
+ goto out;
+
+ /* let, card state to settle a bit, and then do the work */
+ dev->card_unstable = 1;
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, msecs_to_jiffies(100));
+ goto out;
+ }
+
+
+ /* Handle dma interrupts */
+ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+ r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+ if (dma_status & R852_DMA_IRQ_MASK) {
+
+ ret = IRQ_HANDLED;
+
+ if (dma_status & R852_DMA_IRQ_ERROR) {
+ dbg("recieved dma error IRQ");
+ r852_dma_done(dev, -EIO);
+ goto out;
+ }
+
+ /* recieved DMA interrupt out of nowhere? */
+ WARN_ON_ONCE(dev->dma_stage == 0);
+
+ if (dev->dma_stage == 0)
+ goto out;
+
+ /* done device access */
+ if (dev->dma_state == DMA_INTERNAL &&
+ (dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+ dev->dma_state = DMA_MEMORY;
+ dev->dma_stage++;
+ }
+
+ /* done memory DMA */
+ if (dev->dma_state == DMA_MEMORY &&
+ (dma_status & R852_DMA_IRQ_MEMORY)) {
+ dev->dma_state = DMA_INTERNAL;
+ dev->dma_stage++;
+ }
+
+ /* Enable 2nd half of dma dance */
+ if (dev->dma_stage == 2)
+ r852_dma_enable(dev);
+
+ /* Operation done */
+ if (dev->dma_stage == 3)
+ r852_dma_done(dev, 0);
+ goto out;
+ }
+
+ /* Handle unknown interrupts */
+ if (dma_status)
+ dbg("bad dma IRQ status = %x", dma_status);
+
+ if (card_status & ~R852_CARD_STA_CD)
+ dbg("strange card status = %x", card_status);
+
+out:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return ret;
+}
+
+int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+ int error;
+ struct nand_chip *chip;
+ struct r852_device *dev;
+
+ /* pci initialization */
+ error = pci_enable_device(pci_dev);
+
+ if (error)
+ goto error1;
+
+ pci_set_master(pci_dev);
+
+ error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (error)
+ goto error2;
+
+ error = pci_request_regions(pci_dev, DRV_NAME);
+
+ if (error)
+ goto error3;
+
+ error = -ENOMEM;
+
+ /* init nand chip, but register it only on card insert */
+ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+ if (!chip)
+ goto error4;
+
+ /* commands */
+ chip->cmd_ctrl = r852_cmdctl;
+ chip->waitfunc = r852_wait;
+ chip->dev_ready = r852_ready;
+
+ /* I/O */
+ chip->read_byte = r852_read_byte;
+ chip->read_buf = r852_read_buf;
+ chip->write_buf = r852_write_buf;
+ chip->verify_buf = r852_verify_buf;
+
+ /* ecc */
+ chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.size = R852_DMA_LEN;
+ chip->ecc.bytes = SM_OOB_SIZE;
+ chip->ecc.hwctl = r852_ecc_hwctl;
+ chip->ecc.calculate = r852_ecc_calculate;
+ chip->ecc.correct = r852_ecc_correct;
+
+ /* TODO: hack */
+ chip->ecc.read_oob = r852_read_oob;
+
+ /* init our device structure */
+ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+ if (!dev)
+ goto error5;
+
+ chip->priv = dev;
+ dev->chip = chip;
+ dev->pci_dev = pci_dev;
+ pci_set_drvdata(pci_dev, dev);
+
+ dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer);
+
+ if (!dev->bounce_buffer)
+ goto error6;
+
+
+ error = -ENODEV;
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+ if (!dev->mmio)
+ goto error7;
+
+ error = -ENOMEM;
+ dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+ if (!dev->tmp_buffer)
+ goto error8;
+
+ init_completion(&dev->dma_done);
+
+ dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+
+ if (!dev->card_workqueue)
+ goto error9;
+
+ INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+ /* shutdown everything - precation */
+ r852_engine_disable(dev);
+ r852_disable_irqs(dev);
+
+ r852_dma_test(dev);
+
+ /*register irq handler*/
+ error = -ENODEV;
+ if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+ DRV_NAME, dev))
+ goto error10;
+
+ dev->irq = pci_dev->irq;
+ spin_lock_init(&dev->irqlock);
+
+ /* kick initial present test */
+ dev->card_detected = 0;
+ r852_card_update_present(dev);
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 0);
+
+
+ printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n");
+ return 0;
+
+error10:
+ destroy_workqueue(dev->card_workqueue);
+error9:
+ kfree(dev->tmp_buffer);
+error8:
+ pci_iounmap(pci_dev, dev->mmio);
+error7:
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+error6:
+ kfree(dev);
+error5:
+ kfree(chip);
+error4:
+ pci_release_regions(pci_dev);
+error3:
+error2:
+ pci_disable_device(pci_dev);
+error1:
+ return error;
+}
+
+void r852_remove(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ /* Stop detect workqueue -
+ we are going to unregister the device anyway*/
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ destroy_workqueue(dev->card_workqueue);
+
+ /* Unregister the device, this might make more IO */
+ r852_unregister_nand_device(dev);
+
+ /* Stop interrupts */
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ free_irq(dev->irq, dev);
+
+ /* Cleanup */
+ kfree(dev->tmp_buffer);
+ pci_iounmap(pci_dev, dev->mmio);
+ pci_free_consistent(pci_dev, R852_DMA_LEN,
+ dev->bounce_buffer, dev->phys_bounce_buffer);
+
+ kfree(dev->chip);
+ kfree(dev);
+
+ /* Shutdown the PCI device */
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+}
+
+void r852_shutdown(struct pci_dev *pci_dev)
+{
+ struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+ cancel_delayed_work_sync(&dev->card_detect_work);
+ r852_disable_irqs(dev);
+ synchronize_irq(dev->irq);
+ pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM
+int r852_suspend(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ if (dev->ctlreg & R852_CTL_CARDENABLE)
+ return -EBUSY;
+
+ /* First make sure the detect work is gone */
+ cancel_delayed_work_sync(&dev->card_detect_work);
+
+ /* Turn off the interrupts and stop the device */
+ r852_disable_irqs(dev);
+ r852_engine_disable(dev);
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 1;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ /* At that point, even if interrupt handler is running, it will quit */
+ /* So wait for this to happen explictly */
+ synchronize_irq(dev->irq);
+
+ /* If card was pulled off just during the suspend, which is very
+ unlikely, we will remove it on resume, it too late now
+ anyway... */
+ dev->card_unstable = 0;
+
+ pci_save_state(to_pci_dev(device));
+ return pci_prepare_to_sleep(to_pci_dev(device));
+}
+
+int r852_resume(struct device *device)
+{
+ struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+ unsigned long flags;
+
+ /* Turn on the hardware */
+ pci_back_from_sleep(to_pci_dev(device));
+ pci_restore_state(to_pci_dev(device));
+
+ r852_disable_irqs(dev);
+ r852_card_update_present(dev);
+ r852_engine_disable(dev);
+
+
+ /* Now its safe for IRQ to run */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dev->insuspend = 0;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+
+ /* If card status changed, just do the work */
+ if (dev->card_detected != dev->card_registred) {
+ dbg("card was %s during low power state",
+ dev->card_detected ? "added" : "removed");
+
+ queue_delayed_work(dev->card_workqueue,
+ &dev->card_detect_work, 1000);
+ return 0;
+ }
+
+ /* Otherwise, initialize the card */
+ if (dev->card_registred) {
+ r852_engine_enable(dev);
+ dev->chip->select_chip(dev->mtd, 0);
+ dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
+ dev->chip->select_chip(dev->mtd, -1);
+ }
+
+ /* Program card detection IRQ */
+ r852_update_card_detect(dev);
+ return 0;
+}
+#else
+#define r852_suspend NULL
+#define r852_resume NULL
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+ { PCI_VDEVICE(RICOH, 0x0852), },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+
+static struct pci_driver r852_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = r852_pci_id_tbl,
+ .probe = r852_probe,
+ .remove = r852_remove,
+ .shutdown = r852_shutdown,
+ .driver.pm = &r852_pm_ops,
+};
+
+static __init int r852_module_init(void)
+{
+ return pci_register_driver(&r852_pci_driver);
+}
+
+static void __exit r852_module_exit(void)
+{
+ pci_unregister_driver(&r852_pci_driver);
+}
+
+module_init(r852_module_init);
+module_exit(r852_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
new file mode 100644
index 000000000000..8096cc280c73
--- /dev/null
+++ b/drivers/mtd/nand/r852.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/nand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+ byte write/read does one cycle on nand data lines.
+ dword write/read does 4 cycles
+ if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+ results of ecc correction, if DMA read was done before.
+ If write was done two dword reads read generated ecc checksums
+*/
+#define R852_DATALINE 0x00
+
+/* control register */
+#define R852_CTL 0x04
+#define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/
+#define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/
+#define R852_CTL_ON 0x04 /* only seem to controls the hd led, */
+ /* but has to be set on start...*/
+#define R852_CTL_RESET 0x08 /* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */
+#define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/
+#define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA 0x05
+
+#define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO 0x02 /* card is readonly */
+#define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */
+#define R852_CARD_STA_ABSENT 0x08 /* card is absent */
+#define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA 0x06 /* IRQ status */
+#define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */
+
+#define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */
+#define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */
+#define R852_CARD_IRQ_UNK1 0x10 /* unknown */
+#define R852_CARD_IRQ_GENABLE 0x80 /* general enable */
+#define R852_CARD_IRQ_MASK 0x1D
+
+
+
+/* hardware enable */
+#define R852_HW 0x08
+#define R852_HW_ENABLED 0x01 /* hw enabled */
+#define R852_HW_UNKNOWN 0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP 0x09
+#define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */
+ /* hw is smartmedia */
+#define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */
+#define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR 0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS 0x10
+#define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_READ 0x02 /* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA 0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE 0x18
+
+#define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR 0x02 /* error did happen */
+#define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+ each half of the page.
+ first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */
+#define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */
+#define R852_ECC_FAIL 0x40 /* non correctable error detected */
+
+#define R852_DMA_LEN 512
+
+#define DMA_INTERNAL 0
+#define DMA_MEMORY 1
+
+struct r852_device {
+ void __iomem *mmio; /* mmio */
+ struct mtd_info *mtd; /* mtd backpointer */
+ struct nand_chip *chip; /* nand chip backpointer */
+ struct pci_dev *pci_dev; /* pci backpointer */
+
+ /* dma area */
+ dma_addr_t phys_dma_addr; /* bus address of buffer*/
+ struct completion dma_done; /* data transfer done */
+
+ dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */
+ uint8_t *bounce_buffer; /* virtual address of bounce buffer */
+
+ int dma_dir; /* 1 = read, 0 = write */
+ int dma_stage; /* 0 - idle, 1 - first step,
+ 2 - second step */
+
+ int dma_state; /* 0 = internal, 1 = memory */
+ int dma_error; /* dma errors */
+ int dma_usable; /* is it possible to use dma */
+
+ /* card status area */
+ struct delayed_work card_detect_work;
+ struct workqueue_struct *card_workqueue;
+ int card_registred; /* card registered with mtd */
+ int card_detected; /* card detected in slot */
+ int card_unstable; /* whenever the card is inserted,
+ is not known yet */
+ int readonly; /* card is readonly */
+ int sm; /* Is card smartmedia */
+
+ /* interrupt handling */
+ spinlock_t irqlock; /* IRQ protecting lock */
+ int irq; /* irq num */
+ int insuspend; /* device is suspended */
+
+ /* misc */
+ void *tmp_buffer; /* temporary buffer */
+ uint8_t ctlreg; /* cached contents of control reg */
+};
+
+#define DRV_NAME "r852"
+
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+ printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index fa6e9c7fe511..239aadfd01b0 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -929,14 +929,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
dev_err(&pdev->dev, "no memory for flash info\n");
err = -ENOMEM;
goto exit_error;
}
- memset(info, 0, sizeof(*info));
platform_set_drvdata(pdev, info);
spin_lock_init(&info->controller.lock);
@@ -957,7 +956,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* currently we assume we have the one resource */
res = pdev->resource;
- size = res->end - res->start + 1;
+ size = resource_size(res);
info->area = request_mem_region(res->start, size, pdev->name);
@@ -994,15 +993,13 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* allocate our information */
size = nr_sets * sizeof(*info->mtds);
- info->mtds = kmalloc(size, GFP_KERNEL);
+ info->mtds = kzalloc(size, GFP_KERNEL);
if (info->mtds == NULL) {
dev_err(&pdev->dev, "failed to allocate mtd storage\n");
err = -ENOMEM;
goto exit_error;
}
- memset(info->mtds, 0, size);
-
/* initialise all possible chips */
nmtd = info->mtds;
@@ -1013,7 +1010,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_init_chip(info, nmtd, sets);
nmtd->scan_res = nand_scan_ident(&nmtd->mtd,
- (sets) ? sets->nr_chips : 1);
+ (sets) ? sets->nr_chips : 1,
+ NULL);
if (nmtd->scan_res == 0) {
s3c2410_nand_update_chip(info, nmtd);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 34752fce0793..546c2f0eb2e8 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -855,7 +855,7 @@ static int __devinit flctl_probe(struct platform_device *pdev)
nand->read_word = flctl_read_word;
}
- ret = nand_scan_ident(flctl_mtd, 1);
+ ret = nand_scan_ident(flctl_mtd, 1, NULL);
if (ret)
goto err;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
new file mode 100644
index 000000000000..ac80fb362e63
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/nand.h>
+#include "sm_common.h"
+
+static struct nand_ecclayout nand_oob_sm = {
+ .eccbytes = 6,
+ .eccpos = {8, 9, 10, 13, 14, 15},
+ .oobfree = {
+ {.offset = 0 , .length = 4}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ {.offset = 11, .length = 2} /* LBA2 */
+ }
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static struct nand_ecclayout nand_oob_sm_small = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3 , .length = 2}, /* reserved */
+ {.offset = 6 , .length = 2}, /* LBA1 */
+ }
+};
+
+
+static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ struct sm_oob oob;
+ int ret, error = 0;
+
+ memset(&oob, -1, SM_OOB_SIZE);
+ oob.block_status = 0x0F;
+
+ /* As long as this function is called on erase block boundaries
+ it will work correctly for 256 byte nand */
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = (void *)&oob;
+ ops.datbuf = NULL;
+
+
+ ret = mtd->write_oob(mtd, ofs, &ops);
+ if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+ printk(KERN_NOTICE
+ "sm_common: can't mark sector at %i as bad\n",
+ (int)ofs);
+ error = -EIO;
+ } else
+ mtd->ecc_stats.badblocks++;
+
+ return error;
+}
+
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+ {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0},
+ {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0},
+ {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM},
+ {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0},
+ {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM},
+ {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0},
+ {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM},
+ {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM},
+ {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM},
+ {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM},
+ {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM},
+ {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 },
+ {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM},
+ {NULL,}
+};
+
+#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+
+ {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
+ {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
+ {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
+ {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
+ {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
+ {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
+ {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
+ {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
+ {NULL,}
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ int ret;
+
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /* Scan for card properties */
+ ret = nand_scan_ident(mtd, 1, smartmedia ?
+ nand_smartmedia_flash_ids : nand_xd_flash_ids);
+
+ if (ret)
+ return ret;
+
+ /* Bad block marker postion */
+ chip->badblockpos = 0x05;
+ chip->badblockbits = 7;
+ chip->block_markbad = sm_block_markbad;
+
+ /* ECC layout */
+ if (mtd->writesize == SM_SECTOR_SIZE)
+ chip->ecc.layout = &nand_oob_sm;
+ else if (mtd->writesize == SM_SMALL_PAGE)
+ chip->ecc.layout = &nand_oob_sm_small;
+ else
+ return -ENODEV;
+
+ ret = nand_scan_tail(mtd);
+
+ if (ret)
+ return ret;
+
+ return add_mtd_device(mtd);
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
new file mode 100644
index 000000000000..00f4a83359b2
--- /dev/null
+++ b/drivers/mtd/nand/sm_common.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+ uint32_t reserved;
+ uint8_t data_status;
+ uint8_t block_status;
+ uint8_t lba_copy1[2];
+ uint8_t ecc2[3];
+ uint8_t lba_copy2[2];
+ uint8_t ecc1[3];
+} __attribute__((packed));
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE 512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE 16
+
+/* This is maximum zone size, and all devices that have more that one zone
+ have this size */
+#define SM_MAX_ZONE_SIZE 1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 256
+#define SM_SMALL_OOB_SIZE 8
+
+
+extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+ return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+ return 1;
+ return 0;
+}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index a4519a7bd683..884852dc7eb4 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -220,7 +220,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
dev_set_drvdata(&ofdev->dev, host);
/* first scan to find the device and get the page size */
- if (nand_scan_ident(mtd, 1)) {
+ if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto out;
}
@@ -290,7 +290,7 @@ static int __devexit socrates_nand_remove(struct of_device *ofdev)
return 0;
}
-static struct of_device_id socrates_nand_match[] =
+static const struct of_device_id socrates_nand_match[] =
{
{
.compatible = "abb,socrates-nand",
@@ -301,8 +301,11 @@ static struct of_device_id socrates_nand_match[] =
MODULE_DEVICE_TABLE(of, socrates_nand_match);
static struct of_platform_driver socrates_nand_driver = {
- .name = "socrates_nand",
- .match_table = socrates_nand_match,
+ .driver = {
+ .name = "socrates_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = socrates_nand_match,
+ },
.probe = socrates_nand_probe,
.remove = __devexit_p(socrates_nand_remove),
};
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index fa28f01ae009..3041d1f7ae3f 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -319,7 +319,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
int ret;
if (cell->enable) {
@@ -363,7 +363,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
struct tmio_nand_data *data = cell->driver_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
@@ -405,14 +405,14 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1);
+ tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
+ tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
@@ -516,7 +516,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
if (cell->suspend)
cell->suspend(dev);
@@ -527,7 +527,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
deleted file mode 100644
index 0f5562aeedc1..000000000000
--- a/drivers/mtd/nand/ts7250.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * drivers/mtd/nand/ts7250.c
- *
- * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
- *
- * Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * TS-7250 board which utilizes a Samsung 32 Mbyte part.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/ts72xx.h>
-
-#include <asm/sizes.h>
-#include <asm/mach-types.h>
-
-/*
- * MTD structure for TS7250 board
- */
-static struct mtd_info *ts7250_mtd = NULL;
-
-#ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
-#define NUM_PARTITIONS 3
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info32[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x01d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x01d04000,
- .size = 0x002fc000,
- },
-};
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info128[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x07d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x07d04000,
- .size = 0x002fc000,
- },
-};
-#endif
-
-
-/*
- * hardware specific access to control-lines
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 1
- * NAND_ALE: bit 2 -> bit 0
- */
-static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
- unsigned char bits;
-
- bits = (ctrl & NAND_NCE) << 2;
- bits |= ctrl & NAND_CLE;
- bits |= (ctrl & NAND_ALE) >> 2;
-
- __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-static int ts7250_device_ready(struct mtd_info *mtd)
-{
- return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
-}
-
-/*
- * Main initialization routine
- */
-static int __init ts7250_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
-
- if (!machine_is_ts72xx() || board_is_ts7200())
- return -ENXIO;
-
- /* Allocate memory for MTD device structure and private data */
- ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ts7250_mtd) {
- printk("Unable to allocate TS7250 NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ts7250_mtd[1]);
-
- /* Initialize structures */
- memset(ts7250_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ts7250_mtd->priv = this;
- ts7250_mtd->owner = THIS_MODULE;
-
- /* insert callbacks */
- this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->cmd_ctrl = ts7250_hwcontrol;
- this->dev_ready = ts7250_device_ready;
- this->chip_delay = 15;
- this->ecc.mode = NAND_ECC_SOFT;
-
- printk("Searching for NAND flash...\n");
- /* Scan to find existence of the device */
- if (nand_scan(ts7250_mtd, 1)) {
- kfree(ts7250_mtd);
- return -ENXIO;
- }
-#ifdef CONFIG_MTD_PARTITIONS
- ts7250_mtd->name = "ts7250-nand";
- mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info32;
- if (ts7250_mtd->size >= (128 * 0x100000))
- mtd_parts = partition_info128;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ts7250_init);
-
-/*
- * Clean up routine
- */
-static void __exit ts7250_cleanup(void)
-{
- /* Unregister the device */
- del_mtd_device(ts7250_mtd);
-
- /* Free the MTD device structure */
- kfree(ts7250_mtd);
-}
-
-module_exit(ts7250_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
-MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 863513c3b69a..054a41c0ef4a 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -274,7 +274,7 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
int ret;
- ret = nand_scan_ident(mtd, 1);
+ ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
chip->ecc.size = mtd->writesize;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 1002e1882996..a4578bf903aa 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -126,7 +126,6 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(nftl->ReplUnitTable);
kfree(nftl->EUNtable);
- kfree(nftl);
}
/*
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 3a9f15784600..9a49d68ba5f9 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -30,6 +30,13 @@ config MTD_ONENAND_OMAP2
Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
via the GPMC memory controller.
+config MTD_ONENAND_SAMSUNG
+ tristate "OneNAND on Samsung SOC controller support"
+ depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210)
+ help
+ Support for a OneNAND flash device connected to an Samsung SOC
+ S3C64XX/S5PC1XX controller.
+
config MTD_ONENAND_OTP
bool "OneNAND OTP Support"
select HAVE_MTD_OTP
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 64b6cc61a520..2b7884c7577e 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
# Simulator
obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index fd406348fdfd..9f322f1a7f22 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -309,7 +309,7 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -386,7 +386,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
/* panic_write() may be in an interrupt context */
- if (in_interrupt())
+ if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
@@ -403,7 +403,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -426,7 +426,7 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
if (*done)
break;
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (!*done) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
@@ -521,7 +521,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+ if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
@@ -539,7 +539,7 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
+ dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
return 0;
}
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 32f0ed33afe0..26caf2590dae 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -397,7 +397,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) ||
+ ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -426,7 +427,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
/* It is always BufferRAM0 */
dataram = ONENAND_SET_BUFFERRAM0(this);
else
@@ -466,11 +467,11 @@ static inline int onenand_read_ecc(struct onenand_chip *this)
{
int ecc, i, result = 0;
- if (!FLEXONENAND(this))
+ if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this))
return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
for (i = 0; i < 4; i++) {
- ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2);
if (likely(!ecc))
continue;
if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
@@ -1425,7 +1426,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
@@ -1460,7 +1461,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
@@ -1634,7 +1635,6 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
- void __iomem *dataram;
int ret = 0;
int thislen, column;
@@ -1654,10 +1654,9 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
onenand_update_bufferram(mtd, addr, 1);
- dataram = this->base + ONENAND_DATARAM;
- dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM);
+ this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize);
- if (memcmp(buf, dataram + column, thislen))
+ if (memcmp(buf, this->verify_buf, thislen))
return -EBADMSG;
len -= thislen;
@@ -1926,7 +1925,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
* 2 PLANE, MLC, and Flex-OneNAND do not support
* write-while-program feature.
*/
- if (!ONENAND_IS_2PLANE(this) && !first) {
+ if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) {
ONENAND_SET_PREV_BUFFERRAM(this);
ret = this->wait(mtd, FL_WRITING);
@@ -1957,7 +1956,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/*
* 2 PLANE, MLC, and Flex-OneNAND wait here
*/
- if (ONENAND_IS_2PLANE(this)) {
+ if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) {
ret = this->wait(mtd, FL_WRITING);
/* In partial page write we don't update bufferram */
@@ -2084,7 +2083,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- if (ONENAND_IS_MLC(this)) {
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) {
/* Set main area of DataRAM to 0xff*/
memset(this->page_buf, 0xff, mtd->writesize);
this->write_bufferram(mtd, ONENAND_DATARAM,
@@ -3027,7 +3026,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = ONENAND_IS_MLC(this) ?
+ ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, &ops) :
onenand_read_ops_nolock(mtd, from, &ops);
@@ -3372,7 +3371,10 @@ static void onenand_check_features(struct mtd_info *mtd)
/* Lock scheme */
switch (density) {
case ONENAND_DEVICE_DENSITY_4Gb:
- this->options |= ONENAND_HAS_2PLANE;
+ if (ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ else
+ this->options |= ONENAND_HAS_4KB_PAGE;
case ONENAND_DEVICE_DENSITY_2Gb:
/* 2Gb DDP does not have 2 plane */
@@ -3393,7 +3395,7 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
this->options &= ~ONENAND_HAS_2PLANE;
if (FLEXONENAND(this)) {
@@ -3407,6 +3409,8 @@ static void onenand_check_features(struct mtd_info *mtd)
printk(KERN_DEBUG "Chip support all block unlock\n");
if (this->options & ONENAND_HAS_2PLANE)
printk(KERN_DEBUG "Chip has 2 plane\n");
+ if (this->options & ONENAND_HAS_4KB_PAGE)
+ printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
}
/**
@@ -3759,6 +3763,12 @@ static int onenand_probe(struct mtd_info *mtd)
/* Restore system configuration 1 */
this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ /* Workaround */
+ if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) {
+ bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ }
+
/* Check manufacturer ID */
if (onenand_check_maf(bram_maf_id))
return -ENXIO;
@@ -3778,6 +3788,9 @@ static int onenand_probe(struct mtd_info *mtd)
this->device_id = dev_id;
this->version_id = ver_id;
+ /* Check OneNAND features */
+ onenand_check_features(mtd);
+
density = onenand_get_density(dev_id);
if (FLEXONENAND(this)) {
this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
@@ -3799,7 +3812,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
/* We use the full BufferRAM */
- if (ONENAND_IS_MLC(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this))
mtd->writesize <<= 1;
mtd->oobsize = mtd->writesize >> 5;
@@ -3829,9 +3842,6 @@ static int onenand_probe(struct mtd_info *mtd)
else
mtd->size = this->chipsize;
- /* Check OneNAND features */
- onenand_check_features(mtd);
-
/*
* We emulate the 4KiB page and 256KiB erase block size
* But oobsize is still 64 bytes.
@@ -3926,6 +3936,13 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
__func__);
return -ENOMEM;
}
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->verify_buf) {
+ kfree(this->page_buf);
+ return -ENOMEM;
+ }
+#endif
this->options |= ONENAND_PAGEBUF_ALLOC;
}
if (!this->oob_buf) {
@@ -4053,8 +4070,12 @@ void onenand_release(struct mtd_info *mtd)
kfree(this->bbm);
}
/* Buffers allocated by onenand_scan */
- if (this->options & ONENAND_PAGEBUF_ALLOC)
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
kfree(this->page_buf);
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ kfree(this->verify_buf);
+#endif
+ }
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
kfree(mtd->eraseregions);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
new file mode 100644
index 000000000000..2750317cb58f
--- /dev/null
+++ b/drivers/mtd/onenand/samsung.c
@@ -0,0 +1,1071 @@
+/*
+ * Samsung S3C64XX/S5PC1XX OneNAND driver
+ *
+ * Copyright © 2008-2010 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Implementation:
+ * S3C64XX and S5PC100: emulate the pseudo BufferRAM
+ * S5PC110: use DMA
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach/flash.h>
+#include <plat/regs-onenand.h>
+
+#include <linux/io.h>
+
+enum soc_type {
+ TYPE_S3C6400,
+ TYPE_S3C6410,
+ TYPE_S5PC100,
+ TYPE_S5PC110,
+};
+
+#define ONENAND_ERASE_STATUS 0x00
+#define ONENAND_MULTI_ERASE_SET 0x01
+#define ONENAND_ERASE_START 0x03
+#define ONENAND_UNLOCK_START 0x08
+#define ONENAND_UNLOCK_END 0x09
+#define ONENAND_LOCK_START 0x0A
+#define ONENAND_LOCK_END 0x0B
+#define ONENAND_LOCK_TIGHT_START 0x0C
+#define ONENAND_LOCK_TIGHT_END 0x0D
+#define ONENAND_UNLOCK_ALL 0x0E
+#define ONENAND_OTP_ACCESS 0x12
+#define ONENAND_SPARE_ACCESS_ONLY 0x13
+#define ONENAND_MAIN_ACCESS_ONLY 0x14
+#define ONENAND_ERASE_VERIFY 0x15
+#define ONENAND_MAIN_SPARE_ACCESS 0x16
+#define ONENAND_PIPELINE_READ 0x4000
+
+#define MAP_00 (0x0)
+#define MAP_01 (0x1)
+#define MAP_10 (0x2)
+#define MAP_11 (0x3)
+
+#define S3C64XX_CMD_MAP_SHIFT 24
+#define S5PC1XX_CMD_MAP_SHIFT 26
+
+#define S3C6400_FBA_SHIFT 10
+#define S3C6400_FPA_SHIFT 4
+#define S3C6400_FSA_SHIFT 2
+
+#define S3C6410_FBA_SHIFT 12
+#define S3C6410_FPA_SHIFT 6
+#define S3C6410_FSA_SHIFT 4
+
+#define S5PC100_FBA_SHIFT 13
+#define S5PC100_FPA_SHIFT 7
+#define S5PC100_FSA_SHIFT 5
+
+/* S5PC110 specific definitions */
+#define S5PC110_DMA_SRC_ADDR 0x400
+#define S5PC110_DMA_SRC_CFG 0x404
+#define S5PC110_DMA_DST_ADDR 0x408
+#define S5PC110_DMA_DST_CFG 0x40C
+#define S5PC110_DMA_TRANS_SIZE 0x414
+#define S5PC110_DMA_TRANS_CMD 0x418
+#define S5PC110_DMA_TRANS_STATUS 0x41C
+#define S5PC110_DMA_TRANS_DIR 0x420
+
+#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
+#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
+#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
+#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
+
+#define S5PC110_DMA_CFG_INC (0x0 << 8)
+#define S5PC110_DMA_CFG_CNT (0x1 << 8)
+
+#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
+#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
+#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
+
+#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_32BIT)
+#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
+ S5PC110_DMA_CFG_INC | \
+ S5PC110_DMA_CFG_16BIT)
+
+#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
+#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
+#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
+
+#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
+#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
+#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
+
+#define S5PC110_DMA_DIR_READ 0x0
+#define S5PC110_DMA_DIR_WRITE 0x1
+
+struct s3c_onenand {
+ struct mtd_info *mtd;
+ struct platform_device *pdev;
+ enum soc_type type;
+ void __iomem *base;
+ struct resource *base_res;
+ void __iomem *ahb_addr;
+ struct resource *ahb_res;
+ int bootram_command;
+ void __iomem *page_buf;
+ void __iomem *oob_buf;
+ unsigned int (*mem_addr)(int fba, int fpa, int fsa);
+ unsigned int (*cmd_map)(unsigned int type, unsigned int val);
+ void __iomem *dma_addr;
+ struct resource *dma_res;
+ unsigned long phys_base;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
+};
+
+#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
+#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
+#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
+#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
+
+static struct s3c_onenand *onenand;
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL, };
+#endif
+
+static inline int s3c_read_reg(int offset)
+{
+ return readl(onenand->base + offset);
+}
+
+static inline void s3c_write_reg(int value, int offset)
+{
+ writel(value, onenand->base + offset);
+}
+
+static inline int s3c_read_cmd(unsigned int cmd)
+{
+ return readl(onenand->ahb_addr + cmd);
+}
+
+static inline void s3c_write_cmd(int value, unsigned int cmd)
+{
+ writel(value, onenand->ahb_addr + cmd);
+}
+
+#ifdef SAMSUNG_DEBUG
+static void s3c_dump_reg(void)
+{
+ int i;
+
+ for (i = 0; i < 0x400; i += 0x40) {
+ printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ (unsigned int) onenand->base + i,
+ s3c_read_reg(i), s3c_read_reg(i + 0x10),
+ s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
+ }
+}
+#endif
+
+static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S3C64XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
+{
+ return (type << S5PC1XX_CMD_MAP_SHIFT) | val;
+}
+
+static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
+ (fsa << S3C6400_FSA_SHIFT);
+}
+
+static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
+ (fsa << S3C6410_FSA_SHIFT);
+}
+
+static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
+ (fsa << S5PC100_FSA_SHIFT);
+}
+
+static void s3c_onenand_reset(void)
+{
+ unsigned long timeout = 0x10000;
+ int stat;
+
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ while (1 && timeout--) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & RST_CMP)
+ break;
+ }
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /* Clear interrupt */
+ s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
+ /* Clear the ECC status */
+ s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+ int value;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_MANUFACTURER_ID:
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ case ONENAND_REG_DEVICE_ID:
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ case ONENAND_REG_VERSION_ID:
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ case ONENAND_REG_DATA_BUFFER_SIZE:
+ return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
+ case ONENAND_REG_TECHNOLOGY:
+ return s3c_read_reg(TECH_OFFSET);
+ case ONENAND_REG_SYS_CFG1:
+ return s3c_read_reg(MEM_CFG_OFFSET);
+
+ /* Used at unlock all status */
+ case ONENAND_REG_CTRL_STATUS:
+ return 0;
+
+ case ONENAND_REG_WP_STATUS:
+ return ONENAND_WP_US;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
+ if (word_addr == 0)
+ return s3c_read_reg(MANUFACT_ID_OFFSET);
+ if (word_addr == 1)
+ return s3c_read_reg(DEVICE_ID_OFFSET);
+ if (word_addr == 2)
+ return s3c_read_reg(FLASH_VER_ID_OFFSET);
+ }
+
+ value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+ return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int reg = addr - this->base;
+ unsigned int word_addr = reg >> 1;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_SYS_CFG1:
+ s3c_write_reg(value, MEM_CFG_OFFSET);
+ return;
+
+ case ONENAND_REG_START_ADDRESS1:
+ case ONENAND_REG_START_ADDRESS2:
+ return;
+
+ /* Lock/lock-tight/unlock/unlock_all */
+ case ONENAND_REG_START_BLOCK_ADDRESS:
+ return;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if ((unsigned int)addr < ONENAND_DATARAM) {
+ if (value == ONENAND_CMD_READID) {
+ onenand->bootram_command = 1;
+ return;
+ }
+ if (value == ONENAND_CMD_RESET) {
+ s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
+ onenand->bootram_command = 0;
+ return;
+ }
+ }
+
+ dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
+ word_addr, value);
+
+ s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int flags = INT_ACT;
+ unsigned int stat, ecc;
+ unsigned long timeout;
+
+ switch (state) {
+ case FL_READING:
+ flags |= BLK_RW_CMP | LOAD_CMP;
+ break;
+ case FL_WRITING:
+ flags |= BLK_RW_CMP | PGM_CMP;
+ break;
+ case FL_ERASING:
+ flags |= BLK_RW_CMP | ERS_CMP;
+ break;
+ case FL_LOCKING:
+ flags |= BLK_RW_CMP;
+ break;
+ default:
+ break;
+ }
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+
+ if (state != FL_READING)
+ cond_resched();
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (stat & LOAD_CMP) {
+ ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
+ ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ }
+ }
+
+ if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+ dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
+ stat);
+ if (stat & LOCKED_BLK)
+ dev_info(dev, "%s: it's locked error = 0x%04x\n",
+ __func__, stat);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int *m, *s;
+ int fba, fpa, fsa = 0;
+ unsigned int mem_addr, cmd_map_01, cmd_map_10;
+ int i, mcount, scount;
+ int index;
+
+ fba = (int) (addr >> this->erase_shift);
+ fpa = (int) (addr >> this->page_shift);
+ fpa &= this->page_mask;
+
+ mem_addr = onenand->mem_addr(fba, fpa, fsa);
+ cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
+ cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ case ONENAND_CMD_BUFFERRAM:
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ default:
+ break;
+ }
+
+ index = ONENAND_CURRENT_BUFFERRAM(this);
+
+ /*
+ * Emulate Two BufferRAMs and access with 4 bytes pointer
+ */
+ m = (unsigned int *) onenand->page_buf;
+ s = (unsigned int *) onenand->oob_buf;
+
+ if (index) {
+ m += (this->writesize >> 2);
+ s += (mtd->oobsize >> 2);
+ }
+
+ mcount = mtd->writesize >> 2;
+ scount = mtd->oobsize >> 2;
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_READOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ *s++ = s3c_read_cmd(cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_PROG:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(*m++, cmd_map_01);
+ return 0;
+
+ case ONENAND_CMD_PROGOOB:
+ s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
+
+ /* Main - dummy write */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(0xffffffff, cmd_map_01);
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ s3c_write_cmd(*s++, cmd_map_01);
+
+ s3c_write_reg(0, TRANS_SPARE_OFFSET);
+ return 0;
+
+ case ONENAND_CMD_UNLOCK_ALL:
+ s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
+ return 0;
+
+ case ONENAND_CMD_ERASE:
+ s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+ int index = ONENAND_CURRENT_BUFFERRAM(this);
+ unsigned char *p;
+
+ if (area == ONENAND_DATARAM) {
+ p = (unsigned char *) onenand->page_buf;
+ if (index == 1)
+ p += this->writesize;
+ } else {
+ p = (unsigned char *) onenand->oob_buf;
+ if (index == 1)
+ p += mtd->oobsize;
+ }
+
+ return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(buffer, p + offset, count);
+ return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(p + offset, buffer, count);
+ return 0;
+}
+
+static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
+{
+ void __iomem *base = onenand->dma_addr;
+ int status;
+
+ writel(src, base + S5PC110_DMA_SRC_ADDR);
+ writel(dst, base + S5PC110_DMA_DST_ADDR);
+
+ if (direction == S5PC110_DMA_DIR_READ) {
+ writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
+ } else {
+ writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
+ writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
+ }
+
+ writel(count, base + S5PC110_DMA_TRANS_SIZE);
+ writel(direction, base + S5PC110_DMA_TRANS_DIR);
+
+ writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
+
+ do {
+ status = readl(base + S5PC110_DMA_TRANS_STATUS);
+ } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
+
+ if (status & S5PC110_DMA_TRANS_STATUS_TE) {
+ writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+ return -EIO;
+ }
+
+ writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
+
+ return 0;
+}
+
+static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+ void __iomem *p;
+ void *buf = (void *) buffer;
+ dma_addr_t dma_src, dma_dst;
+ int err;
+
+ p = bufferram = this->base + area;
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ p += this->writesize;
+ else
+ p += mtd->oobsize;
+ }
+
+ if (offset & 3 || (size_t) buf & 3 ||
+ !onenand->dma_addr || count != mtd->writesize)
+ goto normal;
+
+ /* Handle vmalloc address */
+ if (buf >= high_memory) {
+ struct page *page;
+
+ if (((size_t) buf & PAGE_MASK) !=
+ ((size_t) (buf + count - 1) & PAGE_MASK))
+ goto normal;
+ page = vmalloc_to_page(buf);
+ if (!page)
+ goto normal;
+ buf = page_address(page) + ((size_t) buf & ~PAGE_MASK);
+ }
+
+ /* DMA routine */
+ dma_src = onenand->phys_base + (p - this->base);
+ dma_dst = dma_map_single(&onenand->pdev->dev,
+ buf, count, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&onenand->pdev->dev, dma_dst)) {
+ dev_err(&onenand->pdev->dev,
+ "Couldn't map a %d byte buffer for DMA\n", count);
+ goto normal;
+ }
+ err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
+ count, S5PC110_DMA_DIR_READ);
+ dma_unmap_single(&onenand->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+
+ if (!err)
+ return 0;
+
+normal:
+ if (count != mtd->writesize) {
+ /* Copy the bufferram to memory to prevent unaligned access */
+ memcpy(this->page_buf, bufferram, mtd->writesize);
+ p = this->page_buf + offset;
+ }
+
+ memcpy(buffer, p, count);
+
+ return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ unsigned int flags = INT_ACT | LOAD_CMP;
+ unsigned int stat;
+ unsigned long timeout;
+
+ /* The 20 msec is enough */
+ timeout = jiffies + msecs_to_jiffies(20);
+ while (time_before(jiffies, timeout)) {
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ if (stat & flags)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
+ s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
+
+ if (stat & LD_FAIL_ECC_ERR) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ if (stat & LOAD_CMP) {
+ int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct device *dev = &onenand->pdev->dev;
+ unsigned int block, end;
+ int tmp;
+
+ end = this->chipsize >> this->erase_shift;
+
+ for (block = 0; block < end; block++) {
+ unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
+ tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
+
+ if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
+ dev_err(dev, "block %d is write-protected!\n", block);
+ s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
+ }
+ }
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+ size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, start_mem_addr, end_mem_addr;
+
+ start = ofs >> this->erase_shift;
+ start_mem_addr = onenand->mem_addr(start, 0, 0);
+ end = start + (len >> this->erase_shift) - 1;
+ end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+ if (cmd == ONENAND_CMD_LOCK) {
+ s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ } else {
+ s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
+ start_mem_addr));
+ s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
+ end_mem_addr));
+ }
+
+ this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = this->chipsize;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* No need to check return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Workaround for all block unlock in DDP */
+ if (!ONENAND_IS_DDP(this)) {
+ s3c_onenand_check_lock_status(mtd);
+ return;
+ }
+
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+
+ s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+
+ s3c_onenand_check_lock_status(mtd);
+}
+
+static void s3c_onenand_setup(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ onenand->mtd = mtd;
+
+ if (onenand->type == TYPE_S3C6400) {
+ onenand->mem_addr = s3c6400_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S3C6410) {
+ onenand->mem_addr = s3c6410_mem_addr;
+ onenand->cmd_map = s3c64xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC100) {
+ onenand->mem_addr = s5pc100_mem_addr;
+ onenand->cmd_map = s5pc1xx_cmd_map;
+ } else if (onenand->type == TYPE_S5PC110) {
+ /* Use generic onenand functions */
+ onenand->cmd_map = s5pc1xx_cmd_map;
+ this->read_bufferram = s5pc110_read_bufferram;
+ return;
+ } else {
+ BUG();
+ }
+
+ this->read_word = s3c_onenand_readw;
+ this->write_word = s3c_onenand_writew;
+
+ this->wait = s3c_onenand_wait;
+ this->bbt_wait = s3c_onenand_bbt_wait;
+ this->unlock_all = s3c_unlock_all;
+ this->command = s3c_onenand_command;
+
+ this->read_bufferram = onenand_read_bufferram;
+ this->write_bufferram = onenand_write_bufferram;
+}
+
+static int s3c_onenand_probe(struct platform_device *pdev)
+{
+ struct onenand_platform_data *pdata;
+ struct onenand_chip *this;
+ struct mtd_info *mtd;
+ struct resource *r;
+ int size, err;
+ unsigned long onenand_ctrl_cfg = 0;
+
+ pdata = pdev->dev.platform_data;
+ /* No need to check pdata. the platform data is optional */
+
+ size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
+ mtd = kzalloc(size, GFP_KERNEL);
+ if (!mtd) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
+ if (!onenand) {
+ err = -ENOMEM;
+ goto onenand_fail;
+ }
+
+ this = (struct onenand_chip *) &mtd[1];
+ mtd->priv = this;
+ mtd->dev.parent = &pdev->dev;
+ mtd->owner = THIS_MODULE;
+ onenand->pdev = pdev;
+ onenand->type = platform_get_device_id(pdev)->driver_data;
+
+ s3c_onenand_setup(mtd);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->base_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->base_res) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ err = -EBUSY;
+ goto resource_failed;
+ }
+
+ onenand->base = ioremap(r->start, resource_size(r));
+ if (!onenand->base) {
+ dev_err(&pdev->dev, "failed to map memory resource\n");
+ err = -EFAULT;
+ goto ioremap_failed;
+ }
+ /* Set onenand_chip also */
+ this->base = onenand->base;
+
+ /* Use runtime badblock check */
+ this->options |= ONENAND_SKIP_UNLOCK_CHECK;
+
+ if (onenand->type != TYPE_S5PC110) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no buffer memory resource defined\n");
+ return -ENOENT;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->ahb_res) {
+ dev_err(&pdev->dev, "failed to request buffer memory resource\n");
+ err = -EBUSY;
+ goto ahb_resource_failed;
+ }
+
+ onenand->ahb_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->ahb_addr) {
+ dev_err(&pdev->dev, "failed to map buffer memory resource\n");
+ err = -EINVAL;
+ goto ahb_ioremap_failed;
+ }
+
+ /* Allocate 4KiB BufferRAM */
+ onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!onenand->page_buf) {
+ err = -ENOMEM;
+ goto page_buf_fail;
+ }
+
+ /* Allocate 128 SpareRAM */
+ onenand->oob_buf = kzalloc(128, GFP_KERNEL);
+ if (!onenand->oob_buf) {
+ err = -ENOMEM;
+ goto oob_buf_fail;
+ }
+
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+
+ } else { /* S5PC110 */
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ dev_err(&pdev->dev, "no dma memory resource defined\n");
+ return -ENOENT;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_res = request_mem_region(r->start, resource_size(r),
+ pdev->name);
+ if (!onenand->dma_res) {
+ dev_err(&pdev->dev, "failed to request dma memory resource\n");
+ err = -EBUSY;
+ goto dma_resource_failed;
+ }
+
+ onenand->dma_addr = ioremap(r->start, resource_size(r));
+ if (!onenand->dma_addr) {
+ dev_err(&pdev->dev, "failed to map dma memory resource\n");
+ err = -EINVAL;
+ goto dma_ioremap_failed;
+ }
+
+ onenand->phys_base = onenand->base_res->start;
+
+ onenand_ctrl_cfg = readl(onenand->dma_addr + 0x100);
+ if ((onenand_ctrl_cfg & ONENAND_SYS_CFG1_SYNC_WRITE) &&
+ onenand->dma_addr)
+ writel(onenand_ctrl_cfg & ~ONENAND_SYS_CFG1_SYNC_WRITE,
+ onenand->dma_addr + 0x100);
+ else
+ onenand_ctrl_cfg = 0;
+ }
+
+ if (onenand_scan(mtd, 1)) {
+ err = -EFAULT;
+ goto scan_failed;
+ }
+
+ if (onenand->type == TYPE_S5PC110) {
+ if (onenand_ctrl_cfg && onenand->dma_addr)
+ writel(onenand_ctrl_cfg, onenand->dma_addr + 0x100);
+ } else {
+ /* S3C doesn't handle subpage write */
+ mtd->subpage_sft = 0;
+ this->subpagesize = mtd->writesize;
+ }
+
+ if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
+ dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(mtd, onenand->parts, err);
+ else if (err <= 0 && pdata && pdata->parts)
+ add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ err = add_mtd_device(mtd);
+
+ platform_set_drvdata(pdev, mtd);
+
+ return 0;
+
+scan_failed:
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+dma_ioremap_failed:
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+ kfree(onenand->oob_buf);
+oob_buf_fail:
+ kfree(onenand->page_buf);
+page_buf_fail:
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ahb_ioremap_failed:
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+dma_resource_failed:
+ahb_resource_failed:
+ iounmap(onenand->base);
+ioremap_failed:
+ if (onenand->base_res)
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+resource_failed:
+ kfree(onenand);
+onenand_fail:
+ kfree(mtd);
+ return err;
+}
+
+static int __devexit s3c_onenand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+
+ onenand_release(mtd);
+ if (onenand->ahb_addr)
+ iounmap(onenand->ahb_addr);
+ if (onenand->ahb_res)
+ release_mem_region(onenand->ahb_res->start,
+ resource_size(onenand->ahb_res));
+ if (onenand->dma_addr)
+ iounmap(onenand->dma_addr);
+ if (onenand->dma_res)
+ release_mem_region(onenand->dma_res->start,
+ resource_size(onenand->dma_res));
+
+ iounmap(onenand->base);
+ release_mem_region(onenand->base_res->start,
+ resource_size(onenand->base_res));
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(onenand->oob_buf);
+ kfree(onenand->page_buf);
+ kfree(onenand);
+ kfree(mtd);
+ return 0;
+}
+
+static int s3c_pm_ops_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ this->wait(mtd, FL_PM_SUSPENDED);
+ return mtd->suspend(mtd);
+}
+
+static int s3c_pm_ops_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct onenand_chip *this = mtd->priv;
+
+ mtd->resume(mtd);
+ this->unlock_all(mtd);
+ return 0;
+}
+
+static const struct dev_pm_ops s3c_pm_ops = {
+ .suspend = s3c_pm_ops_suspend,
+ .resume = s3c_pm_ops_resume,
+};
+
+static struct platform_device_id s3c_onenand_driver_ids[] = {
+ {
+ .name = "s3c6400-onenand",
+ .driver_data = TYPE_S3C6400,
+ }, {
+ .name = "s3c6410-onenand",
+ .driver_data = TYPE_S3C6410,
+ }, {
+ .name = "s5pc100-onenand",
+ .driver_data = TYPE_S5PC100,
+ }, {
+ .name = "s5pc110-onenand",
+ .driver_data = TYPE_S5PC110,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
+
+static struct platform_driver s3c_onenand_driver = {
+ .driver = {
+ .name = "samsung-onenand",
+ .pm = &s3c_pm_ops,
+ },
+ .id_table = s3c_onenand_driver_ids,
+ .probe = s3c_onenand_probe,
+ .remove = __devexit_p(s3c_onenand_remove),
+};
+
+static int __init s3c_onenand_init(void)
+{
+ return platform_driver_register(&s3c_onenand_driver);
+}
+
+static void __exit s3c_onenand_exit(void)
+{
+ platform_driver_unregister(&s3c_onenand_driver);
+}
+
+module_init(s3c_onenand_init);
+module_exit(s3c_onenand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("Samsung OneNAND controller support");
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index d2aa9c46530f..63b83c0d9a13 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -817,7 +817,6 @@ static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
vfree(part->sector_map);
kfree(part->header_cache);
kfree(part->blocks);
- kfree(part);
}
static struct mtd_blktrans_ops rfd_ftl_tr = {
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 000000000000..67822cf6c025
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1284 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/hdreg.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand_ecc.h>
+#include "nand/sm_common.h"
+#include "sm_ftl.h"
+
+
+
+struct workqueue_struct *cache_flush_workqueue;
+
+static int cache_timeout = 1000;
+module_param(cache_timeout, bool, S_IRUGO);
+MODULE_PARM_DESC(cache_timeout,
+ "Timeout (in ms) for cache flush (1000 ms default");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+
+/* ------------------- sysfs attributtes ---------------------------------- */
+struct sm_sysfs_attribute {
+ struct device_attribute dev_attr;
+ char *data;
+ int len;
+};
+
+ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(attr, struct sm_sysfs_attribute, dev_attr);
+
+ strncpy(buf, sm_attr->data, sm_attr->len);
+ return sm_attr->len;
+}
+
+
+#define NUM_ATTRIBUTES 1
+#define SM_CIS_VENDOR_OFFSET 0x59
+struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute_group *attr_group;
+ struct attribute **attributes;
+ struct sm_sysfs_attribute *vendor_attribute;
+
+ int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
+ SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
+
+ char *vendor = kmalloc(vendor_len, GFP_KERNEL);
+ memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
+ vendor[vendor_len] = 0;
+
+ /* Initialize sysfs attributes */
+ vendor_attribute =
+ kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
+
+ sysfs_attr_init(&vendor_attribute->dev_attr.attr);
+
+ vendor_attribute->data = vendor;
+ vendor_attribute->len = vendor_len;
+ vendor_attribute->dev_attr.attr.name = "vendor";
+ vendor_attribute->dev_attr.attr.mode = S_IRUGO;
+ vendor_attribute->dev_attr.show = sm_attr_show;
+
+
+ /* Create array of pointers to the attributes */
+ attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
+ GFP_KERNEL);
+ attributes[0] = &vendor_attribute->dev_attr.attr;
+
+ /* Finally create the attribute group */
+ attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ attr_group->attrs = attributes;
+ return attr_group;
+}
+
+void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+{
+ struct attribute **attributes = ftl->disk_attributes->attrs;
+ int i;
+
+ for (i = 0; attributes[i] ; i++) {
+
+ struct device_attribute *dev_attr = container_of(attributes[i],
+ struct device_attribute, attr);
+
+ struct sm_sysfs_attribute *sm_attr =
+ container_of(dev_attr,
+ struct sm_sysfs_attribute, dev_attr);
+
+ kfree(sm_attr->data);
+ kfree(sm_attr);
+ }
+
+ kfree(ftl->disk_attributes->attrs);
+ kfree(ftl->disk_attributes);
+}
+
+
+/* ----------------------- oob helpers -------------------------------------- */
+
+static int sm_get_lba(uint8_t *lba)
+{
+ /* check fixed bits */
+ if ((lba[0] & 0xF8) != 0x10)
+ return -2;
+
+ /* check parity - endianess doesn't matter */
+ if (hweight16(*(uint16_t *)lba) & 1)
+ return -2;
+
+ return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
+}
+
+
+/*
+ * Read LBA asscociated with block
+ * returns -1, if block is erased
+ * returns -2 if error happens
+ */
+static int sm_read_lba(struct sm_oob *oob)
+{
+ static const uint32_t erased_pattern[4] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+ uint16_t lba_test;
+ int lba;
+
+ /* First test for erased block */
+ if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
+ return -1;
+
+ /* Now check is both copies of the LBA differ too much */
+ lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
+ if (lba_test && !is_power_of_2(lba_test))
+ return -2;
+
+ /* And read it */
+ lba = sm_get_lba(oob->lba_copy1);
+
+ if (lba == -2)
+ lba = sm_get_lba(oob->lba_copy2);
+
+ return lba;
+}
+
+static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
+{
+ uint8_t tmp[2];
+
+ WARN_ON(lba >= 1000);
+
+ tmp[0] = 0x10 | ((lba >> 7) & 0x07);
+ tmp[1] = (lba << 1) & 0xFF;
+
+ if (hweight16(*(uint16_t *)tmp) & 0x01)
+ tmp[1] |= 1;
+
+ oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
+ oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
+}
+
+
+/* Make offset from parts */
+static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
+{
+ WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
+ WARN_ON(zone < 0 || zone >= ftl->zone_count);
+ WARN_ON(block >= ftl->zone_size);
+ WARN_ON(boffset >= ftl->block_size);
+
+ if (block == -1)
+ return -1;
+
+ return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
+}
+
+/* Breaks offset into parts */
+static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
+ int *zone, int *block, int *boffset)
+{
+ *boffset = do_div(offset, ftl->block_size);
+ *block = do_div(offset, ftl->max_lba);
+ *zone = offset >= ftl->zone_count ? -1 : offset;
+}
+
+/* ---------------------- low level IO ------------------------------------- */
+
+static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
+{
+ uint8_t ecc[3];
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
+ return -EIO;
+
+ buffer += SM_SMALL_PAGE;
+
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
+ if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
+ return -EIO;
+ return 0;
+}
+
+/* Reads a sector + oob*/
+static int sm_read_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct mtd_oob_ops ops;
+ struct sm_oob tmp_oob;
+ int ret = -EIO;
+ int try = 0;
+
+ /* FTL can contain -1 entries that are by default filled with bits */
+ if (block == -1) {
+ memset(buffer, 0xFF, SM_SECTOR_SIZE);
+ return 0;
+ }
+
+ /* User might not need the oob, but we do for data vertification */
+ if (!oob)
+ oob = &tmp_oob;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+
+again:
+ if (try++) {
+ /* Avoid infinite recursion on CIS reads, sm_recheck_media
+ won't help anyway */
+ if (zone == 0 && block == ftl->cis_block && boffset ==
+ ftl->cis_boffset)
+ return ret;
+
+ /* Test if media is stable */
+ if (try == 3 || sm_recheck_media(ftl))
+ return ret;
+ }
+
+ /* Unfortunelly, oob read will _always_ succeed,
+ despite card removal..... */
+ ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Test for unknown errors */
+ if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
+ dbg("read of block %d at zone %d, failed due to error (%d)",
+ block, zone, ret);
+ goto again;
+ }
+
+ /* Do a basic test on the oob, to guard against returned garbage */
+ if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
+ goto again;
+
+ /* This should never happen, unless there is a bug in the mtd driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ if (!buffer)
+ return 0;
+
+ /* Test if sector marked as bad */
+ if (!sm_sector_valid(oob)) {
+ dbg("read of block %d at zone %d, failed because it is marked"
+ " as bad" , block, zone);
+ goto again;
+ }
+
+ /* Test ECC*/
+ if (ret == -EBADMSG ||
+ (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
+
+ dbg("read of block %d at zone %d, failed due to ECC error",
+ block, zone);
+ goto again;
+ }
+
+ return 0;
+}
+
+/* Writes a sector to media */
+static int sm_write_sector(struct sm_ftl *ftl,
+ int zone, int block, int boffset,
+ uint8_t *buffer, struct sm_oob *oob)
+{
+ struct mtd_oob_ops ops;
+ struct mtd_info *mtd = ftl->trans->mtd;
+ int ret;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone == 0 && (block == ftl->cis_block || block == 0)) {
+ dbg("attempted to write the CIS!");
+ return -EIO;
+ }
+
+ if (ftl->unstable)
+ return -EIO;
+
+ ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+ ops.len = SM_SECTOR_SIZE;
+ ops.datbuf = buffer;
+ ops.ooboffs = 0;
+ ops.ooblen = SM_OOB_SIZE;
+ ops.oobbuf = (void *)oob;
+
+ ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+
+ /* Now we assume that hardware will catch write bitflip errors */
+ /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
+
+ if (ret) {
+ dbg("write to block %d at zone %d, failed with error %d",
+ block, zone, ret);
+
+ sm_recheck_media(ftl);
+ return ret;
+ }
+
+ /* This should never happen, unless there is a bug in the driver */
+ WARN_ON(ops.oobretlen != SM_OOB_SIZE);
+ WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
+
+ return 0;
+}
+
+/* ------------------------ block IO ------------------------------------- */
+
+/* Write a block using data and lba, and invalid sector bitmap */
+static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
+ int zone, int block, int lba,
+ unsigned long invalid_bitmap)
+{
+ struct sm_oob oob;
+ int boffset;
+ int retry = 0;
+
+ /* Initialize the oob with requested values */
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ sm_write_lba(&oob, lba);
+restart:
+ if (ftl->unstable)
+ return -EIO;
+
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ oob.data_status = 0xFF;
+
+ if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
+
+ sm_printk("sector %d of block at LBA %d of zone %d"
+ " coudn't be read, marking it as invalid",
+ boffset / SM_SECTOR_SIZE, lba, zone);
+
+ oob.data_status = 0;
+ }
+
+ if (ftl->smallpagenand) {
+ __nand_calculate_ecc(buf + boffset,
+ SM_SMALL_PAGE, oob.ecc1);
+
+ __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
+ SM_SMALL_PAGE, oob.ecc2);
+ }
+ if (!sm_write_sector(ftl, zone, block, boffset,
+ buf + boffset, &oob))
+ continue;
+
+ if (!retry) {
+
+ /* If write fails. try to erase the block */
+ /* This is safe, because we never write in blocks
+ that contain valuable data.
+ This is intended to repair block that are marked
+ as erased, but that isn't fully erased*/
+
+ if (sm_erase_block(ftl, zone, block, 0))
+ return -EIO;
+
+ retry = 1;
+ goto restart;
+ } else {
+ sm_mark_block_bad(ftl, zone, block);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+
+/* Mark whole block at offset 'offs' as bad. */
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
+{
+ struct sm_oob oob;
+ int boffset;
+
+ memset(&oob, 0xFF, SM_OOB_SIZE);
+ oob.block_status = 0xF0;
+
+ if (ftl->unstable)
+ return;
+
+ if (sm_recheck_media(ftl))
+ return;
+
+ sm_printk("marking block %d of zone %d as bad", block, zone);
+
+ /* We aren't checking the return value, because we don't care */
+ /* This also fails on fake xD cards, but I guess these won't expose
+ any bad blocks till fail completly */
+ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
+ sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
+}
+
+/*
+ * Erase a block within a zone
+ * If erase succedes, it updates free block fifo, otherwise marks block as bad
+ */
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct mtd_info *mtd = ftl->trans->mtd;
+ struct erase_info erase;
+
+ erase.mtd = mtd;
+ erase.callback = sm_erase_callback;
+ erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
+ erase.len = ftl->block_size;
+ erase.priv = (u_long)ftl;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(ftl->readonly);
+
+ if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
+ sm_printk("attempted to erase the CIS!");
+ return -EIO;
+ }
+
+ if (mtd->erase(mtd, &erase)) {
+ sm_printk("erase of block %d in zone %d failed",
+ block, zone_num);
+ goto error;
+ }
+
+ if (erase.state == MTD_ERASE_PENDING)
+ wait_for_completion(&ftl->erase_completion);
+
+ if (erase.state != MTD_ERASE_DONE) {
+ sm_printk("erase of block %d in zone %d failed after wait",
+ block, zone_num);
+ goto error;
+ }
+
+ if (put_free)
+ kfifo_in(&zone->free_sectors,
+ (const unsigned char *)&block, sizeof(block));
+
+ return 0;
+error:
+ sm_mark_block_bad(ftl, zone_num, block);
+ return -EIO;
+}
+
+static void sm_erase_callback(struct erase_info *self)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
+ complete(&ftl->erase_completion);
+}
+
+/* Throughtly test that block is valid. */
+static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
+{
+ int boffset;
+ struct sm_oob oob;
+ int lbas[] = { -3, 0, 0, 0 };
+ int i = 0;
+ int test_lba;
+
+
+ /* First just check that block doesn't look fishy */
+ /* Only blocks that are valid or are sliced in two parts, are
+ accepted */
+ for (boffset = 0; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ /* This shoudn't happen anyway */
+ if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
+ return -2;
+
+ test_lba = sm_read_lba(&oob);
+
+ if (lbas[i] != test_lba)
+ lbas[++i] = test_lba;
+
+ /* If we found three different LBAs, something is fishy */
+ if (i == 3)
+ return -EIO;
+ }
+
+ /* If the block is sliced (partialy erased usually) erase it */
+ if (i == 2) {
+ sm_erase_block(ftl, zone, block, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* ----------------- media scanning --------------------------------- */
+static const struct chs_entry chs_table[] = {
+ { 1, 125, 4, 4 },
+ { 2, 125, 4, 8 },
+ { 4, 250, 4, 8 },
+ { 8, 250, 4, 16 },
+ { 16, 500, 4, 16 },
+ { 32, 500, 8, 16 },
+ { 64, 500, 8, 32 },
+ { 128, 500, 16, 32 },
+ { 256, 1000, 16, 32 },
+ { 512, 1015, 32, 63 },
+ { 1024, 985, 33, 63 },
+ { 2048, 985, 33, 63 },
+ { 0 },
+};
+
+
+static const uint8_t cis_signature[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+/* Find out media parameters.
+ * This ideally has to be based on nand id, but for now device size is enough */
+int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+{
+ int i;
+ int size_in_megs = mtd->size / (1024 * 1024);
+
+ ftl->readonly = mtd->type == MTD_ROM;
+
+ /* Manual settings for very old devices */
+ ftl->zone_count = 1;
+ ftl->smallpagenand = 0;
+
+ switch (size_in_megs) {
+ case 1:
+ /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+
+ break;
+ case 2:
+ /* 2 MiB flash SmartMedia (256 byte pages)*/
+ if (mtd->writesize == SM_SMALL_PAGE) {
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 8 * SM_SECTOR_SIZE;
+ ftl->smallpagenand = 1;
+ /* 2 MiB rom SmartMedia */
+ } else {
+
+ if (!ftl->readonly)
+ return -ENODEV;
+
+ ftl->zone_size = 256;
+ ftl->max_lba = 250;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+ break;
+ case 4:
+ /* 4 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 512;
+ ftl->max_lba = 500;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ break;
+ case 8:
+ /* 8 MiB flash/rom SmartMedia device */
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 16 * SM_SECTOR_SIZE;
+ }
+
+ /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
+ sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
+ if (size_in_megs >= 16) {
+ ftl->zone_count = size_in_megs / 16;
+ ftl->zone_size = 1024;
+ ftl->max_lba = 1000;
+ ftl->block_size = 32 * SM_SECTOR_SIZE;
+ }
+
+ /* Test for proper write,erase and oob sizes */
+ if (mtd->erasesize > ftl->block_size)
+ return -ENODEV;
+
+ if (mtd->writesize > SM_SECTOR_SIZE)
+ return -ENODEV;
+
+ if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
+ return -ENODEV;
+
+ if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
+ return -ENODEV;
+
+ /* We use these functions for IO */
+ if (!mtd->read_oob || !mtd->write_oob)
+ return -ENODEV;
+
+ /* Find geometry information */
+ for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
+ if (chs_table[i].size == size_in_megs) {
+ ftl->cylinders = chs_table[i].cyl;
+ ftl->heads = chs_table[i].head;
+ ftl->sectors = chs_table[i].sec;
+ return 0;
+ }
+ }
+
+ sm_printk("media has unknown size : %dMiB", size_in_megs);
+ ftl->cylinders = 985;
+ ftl->heads = 33;
+ ftl->sectors = 63;
+ return 0;
+}
+
+/* Validate the CIS */
+static int sm_read_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+
+ if (sm_read_sector(ftl,
+ 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
+ return -EIO;
+
+ if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
+ return -EIO;
+
+ if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
+ cis_signature, sizeof(cis_signature))) {
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Scan the media for the CIS */
+static int sm_find_cis(struct sm_ftl *ftl)
+{
+ struct sm_oob oob;
+ int block, boffset;
+ int block_found = 0;
+ int cis_found = 0;
+
+ /* Search for first valid block */
+ for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
+
+ if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
+ continue;
+
+ if (!sm_block_valid(&oob))
+ continue;
+ block_found = 1;
+ break;
+ }
+
+ if (!block_found)
+ return -EIO;
+
+ /* Search for first valid sector in this block */
+ for (boffset = 0 ; boffset < ftl->block_size;
+ boffset += SM_SECTOR_SIZE) {
+
+ if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
+ continue;
+
+ if (!sm_sector_valid(&oob))
+ continue;
+ break;
+ }
+
+ if (boffset == ftl->block_size)
+ return -EIO;
+
+ ftl->cis_block = block;
+ ftl->cis_boffset = boffset;
+ ftl->cis_page_offset = 0;
+
+ cis_found = !sm_read_cis(ftl);
+
+ if (!cis_found) {
+ ftl->cis_page_offset = SM_SMALL_PAGE;
+ cis_found = !sm_read_cis(ftl);
+ }
+
+ if (cis_found) {
+ dbg("CIS block found at offset %x",
+ block * ftl->block_size +
+ boffset + ftl->cis_page_offset);
+ return 0;
+ }
+ return -EIO;
+}
+
+/* Basic test to determine if underlying mtd device if functional */
+static int sm_recheck_media(struct sm_ftl *ftl)
+{
+ if (sm_read_cis(ftl)) {
+
+ if (!ftl->unstable) {
+ sm_printk("media unstable, not allowing writes");
+ ftl->unstable = 1;
+ }
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Initialize a FTL zone */
+static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone = &ftl->zones[zone_num];
+ struct sm_oob oob;
+ uint16_t block;
+ int lba;
+ int i = 0;
+ int len;
+
+ dbg("initializing zone %d", zone_num);
+
+ /* Allocate memory for FTL table */
+ zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
+
+ if (!zone->lba_to_phys_table)
+ return -ENOMEM;
+ memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
+
+
+ /* Allocate memory for free sectors FIFO */
+ if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
+ kfree(zone->lba_to_phys_table);
+ return -ENOMEM;
+ }
+
+ /* Now scan the zone */
+ for (block = 0 ; block < ftl->zone_size ; block++) {
+
+ /* Skip blocks till the CIS (including) */
+ if (zone_num == 0 && block <= ftl->cis_block)
+ continue;
+
+ /* Read the oob of first sector */
+ if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
+ return -EIO;
+
+ /* Test to see if block is erased. It is enough to test
+ first sector, because erase happens in one shot */
+ if (sm_block_erased(&oob)) {
+ kfifo_in(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ continue;
+ }
+
+ /* If block is marked as bad, skip it */
+ /* This assumes we can trust first sector*/
+ /* However the way the block valid status is defined, ensures
+ very low probability of failure here */
+ if (!sm_block_valid(&oob)) {
+ dbg("PH %04d <-> <marked bad>", block);
+ continue;
+ }
+
+
+ lba = sm_read_lba(&oob);
+
+ /* Invalid LBA means that block is damaged. */
+ /* We can try to erase it, or mark it as bad, but
+ lets leave that to recovery application */
+ if (lba == -2 || lba >= ftl->max_lba) {
+ dbg("PH %04d <-> LBA %04d(bad)", block, lba);
+ continue;
+ }
+
+
+ /* If there is no collision,
+ just put the sector in the FTL table */
+ if (zone->lba_to_phys_table[lba] < 0) {
+ dbg_verbose("PH %04d <-> LBA %04d", block, lba);
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ sm_printk("collision"
+ " of LBA %d between blocks %d and %d in zone %d",
+ lba, zone->lba_to_phys_table[lba], block, zone_num);
+
+ /* Test that this block is valid*/
+ if (sm_check_block(ftl, zone_num, block))
+ continue;
+
+ /* Test now the old block */
+ if (sm_check_block(ftl, zone_num,
+ zone->lba_to_phys_table[lba])) {
+ zone->lba_to_phys_table[lba] = block;
+ continue;
+ }
+
+ /* If both blocks are valid and share same LBA, it means that
+ they hold different versions of same data. It not
+ known which is more recent, thus just erase one of them
+ */
+ sm_printk("both blocks are valid, erasing the later");
+ sm_erase_block(ftl, zone_num, block, 1);
+ }
+
+ dbg("zone initialized");
+ zone->initialized = 1;
+
+ /* No free sectors, means that the zone is heavily damaged, write won't
+ work, but it can still can be (partially) read */
+ if (!kfifo_len(&zone->free_sectors)) {
+ sm_printk("no free blocks in zone %d", zone_num);
+ return 0;
+ }
+
+ /* Randomize first block we write to */
+ get_random_bytes(&i, 2);
+ i %= (kfifo_len(&zone->free_sectors) / 2);
+
+ while (i--) {
+ len = kfifo_out(&zone->free_sectors,
+ (unsigned char *)&block, 2);
+ WARN_ON(len != 2);
+ kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
+ }
+ return 0;
+}
+
+/* Get and automaticly initialize an FTL mapping for one zone */
+struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+{
+ struct ftl_zone *zone;
+ int error;
+
+ BUG_ON(zone_num >= ftl->zone_count);
+ zone = &ftl->zones[zone_num];
+
+ if (!zone->initialized) {
+ error = sm_init_zone(ftl, zone_num);
+
+ if (error)
+ return ERR_PTR(error);
+ }
+ return zone;
+}
+
+
+/* ----------------- cache handling ------------------------------------------*/
+
+/* Initialize the one block cache */
+void sm_cache_init(struct sm_ftl *ftl)
+{
+ ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
+ ftl->cache_clean = 1;
+ ftl->cache_zone = -1;
+ ftl->cache_block = -1;
+ /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
+}
+
+/* Put sector in one block cache */
+void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
+ clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
+ ftl->cache_clean = 0;
+}
+
+/* Read a sector from the cache */
+int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+{
+ if (test_bit(boffset / SM_SECTOR_SIZE,
+ &ftl->cache_data_invalid_bitmap))
+ return -1;
+
+ memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
+ return 0;
+}
+
+/* Write the cache to hardware */
+int sm_cache_flush(struct sm_ftl *ftl)
+{
+ struct ftl_zone *zone;
+
+ int sector_num;
+ uint16_t write_sector;
+ int zone_num = ftl->cache_zone;
+ int block_num;
+
+ if (ftl->cache_clean)
+ return 0;
+
+ if (ftl->unstable)
+ return -EIO;
+
+ BUG_ON(zone_num < 0);
+ zone = &ftl->zones[zone_num];
+ block_num = zone->lba_to_phys_table[ftl->cache_block];
+
+
+ /* Try to read all unread areas of the cache block*/
+ for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
+ ftl->block_size / SM_SECTOR_SIZE) {
+
+ if (!sm_read_sector(ftl,
+ zone_num, block_num, sector_num * SM_SECTOR_SIZE,
+ ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
+ clear_bit(sector_num,
+ &ftl->cache_data_invalid_bitmap);
+ }
+restart:
+
+ if (ftl->unstable)
+ return -EIO;
+
+ /* If there are no spare blocks, */
+ /* we could still continue by erasing/writing the current block,
+ but for such worn out media it doesn't worth the trouble,
+ and the dangers */
+ if (kfifo_out(&zone->free_sectors,
+ (unsigned char *)&write_sector, 2) != 2) {
+ dbg("no free sectors for write!");
+ return -EIO;
+ }
+
+
+ if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
+ ftl->cache_block, ftl->cache_data_invalid_bitmap))
+ goto restart;
+
+ /* Update the FTL table */
+ zone->lba_to_phys_table[ftl->cache_block] = write_sector;
+
+ /* Write succesfull, so erase and free the old block */
+ if (block_num > 0)
+ sm_erase_block(ftl, zone_num, block_num, 1);
+
+ sm_cache_init(ftl);
+ return 0;
+}
+
+
+/* flush timer, runs a second after last write */
+static void sm_cache_flush_timer(unsigned long data)
+{
+ struct sm_ftl *ftl = (struct sm_ftl *)data;
+ queue_work(cache_flush_workqueue, &ftl->flush_work);
+}
+
+/* cache flush work, kicked by timer */
+static void sm_cache_flush_work(struct work_struct *work)
+{
+ struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
+ mutex_lock(&ftl->mutex);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return;
+}
+
+/* ---------------- outside interface -------------------------------------- */
+
+/* outside interface: read a sector */
+static int sm_read(struct mtd_blktrans_dev *dev,
+ unsigned long sect_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error = 0, in_cache = 0;
+ int zone_num, block, boffset;
+
+ sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
+ mutex_lock(&ftl->mutex);
+
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* Have to look at cache first */
+ if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
+ in_cache = 1;
+ if (!sm_cache_get(ftl, buf, boffset))
+ goto unlock;
+ }
+
+ /* Translate the block and return if doesn't exist in the table */
+ block = zone->lba_to_phys_table[block];
+
+ if (block == -1) {
+ memset(buf, 0xFF, SM_SECTOR_SIZE);
+ goto unlock;
+ }
+
+ if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
+ error = -EIO;
+ goto unlock;
+ }
+
+ if (in_cache)
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: write a sector */
+static int sm_write(struct mtd_blktrans_dev *dev,
+ unsigned long sec_no, char *buf)
+{
+ struct sm_ftl *ftl = dev->priv;
+ struct ftl_zone *zone;
+ int error, zone_num, block, boffset;
+
+ BUG_ON(ftl->readonly);
+ sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
+
+ /* No need in flush thread running now */
+ del_timer(&ftl->timer);
+ mutex_lock(&ftl->mutex);
+
+ zone = sm_get_zone(ftl, zone_num);
+ if (IS_ERR(zone)) {
+ error = PTR_ERR(zone);
+ goto unlock;
+ }
+
+ /* If entry is not in cache, flush it */
+ if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
+
+ error = sm_cache_flush(ftl);
+ if (error)
+ goto unlock;
+
+ ftl->cache_block = block;
+ ftl->cache_zone = zone_num;
+ }
+
+ sm_cache_put(ftl, buf, boffset);
+unlock:
+ mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
+ mutex_unlock(&ftl->mutex);
+ return error;
+}
+
+/* outside interface: flush everything */
+static int sm_flush(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int retval;
+
+ mutex_lock(&ftl->mutex);
+ retval = sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return retval;
+}
+
+/* outside interface: device is released */
+static int sm_release(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+
+ mutex_lock(&ftl->mutex);
+ del_timer_sync(&ftl->timer);
+ cancel_work_sync(&ftl->flush_work);
+ sm_cache_flush(ftl);
+ mutex_unlock(&ftl->mutex);
+ return 0;
+}
+
+/* outside interface: get geometry */
+static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct sm_ftl *ftl = dev->priv;
+ geo->heads = ftl->heads;
+ geo->sectors = ftl->sectors;
+ geo->cylinders = ftl->cylinders;
+ return 0;
+}
+
+/* external interface: main initialization function */
+static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct mtd_blktrans_dev *trans;
+ struct sm_ftl *ftl;
+
+ /* Allocate & initialize our private structure */
+ ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
+ if (!ftl)
+ goto error1;
+
+
+ mutex_init(&ftl->mutex);
+ setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
+ init_completion(&ftl->erase_completion);
+
+ /* Read media information */
+ if (sm_get_media_info(ftl, mtd)) {
+ dbg("found unsupported mtd device, aborting");
+ goto error2;
+ }
+
+
+ /* Allocate temporary CIS buffer for read retry support */
+ ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+ if (!ftl->cis_buffer)
+ goto error2;
+
+ /* Allocate zone array, it will be initialized on demand */
+ ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
+ GFP_KERNEL);
+ if (!ftl->zones)
+ goto error3;
+
+ /* Allocate the cache*/
+ ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
+
+ if (!ftl->cache_data)
+ goto error4;
+
+ sm_cache_init(ftl);
+
+
+ /* Allocate upper layer structure and initialize it */
+ trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
+ if (!trans)
+ goto error5;
+
+ ftl->trans = trans;
+ trans->priv = ftl;
+
+ trans->tr = tr;
+ trans->mtd = mtd;
+ trans->devnum = -1;
+ trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
+ trans->readonly = ftl->readonly;
+
+ if (sm_find_cis(ftl)) {
+ dbg("CIS not found on mtd device, aborting");
+ goto error6;
+ }
+
+ ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
+ trans->disk_attributes = ftl->disk_attributes;
+
+ sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
+ (int)(mtd->size / (1024 * 1024)), mtd->index);
+
+ dbg("FTL layout:");
+ dbg("%d zone(s), each consists of %d blocks (+%d spares)",
+ ftl->zone_count, ftl->max_lba,
+ ftl->zone_size - ftl->max_lba);
+ dbg("each block consists of %d bytes",
+ ftl->block_size);
+
+
+ /* Register device*/
+ if (add_mtd_blktrans_dev(trans)) {
+ dbg("error in mtdblktrans layer");
+ goto error6;
+ }
+ return;
+error6:
+ kfree(trans);
+error5:
+ kfree(ftl->cache_data);
+error4:
+ kfree(ftl->zones);
+error3:
+ kfree(ftl->cis_buffer);
+error2:
+ kfree(ftl);
+error1:
+ return;
+}
+
+/* main interface: device {surprise,} removal */
+static void sm_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct sm_ftl *ftl = dev->priv;
+ int i;
+
+ del_mtd_blktrans_dev(dev);
+ ftl->trans = NULL;
+
+ for (i = 0 ; i < ftl->zone_count; i++) {
+
+ if (!ftl->zones[i].initialized)
+ continue;
+
+ kfree(ftl->zones[i].lba_to_phys_table);
+ kfifo_free(&ftl->zones[i].free_sectors);
+ }
+
+ sm_delete_sysfs_attributes(ftl);
+ kfree(ftl->cis_buffer);
+ kfree(ftl->zones);
+ kfree(ftl->cache_data);
+ kfree(ftl);
+}
+
+static struct mtd_blktrans_ops sm_ftl_ops = {
+ .name = "smblk",
+ .major = -1,
+ .part_bits = SM_FTL_PARTN_BITS,
+ .blksize = SM_SECTOR_SIZE,
+ .getgeo = sm_getgeo,
+
+ .add_mtd = sm_add_mtd,
+ .remove_dev = sm_remove_dev,
+
+ .readsect = sm_read,
+ .writesect = sm_write,
+
+ .flush = sm_flush,
+ .release = sm_release,
+
+ .owner = THIS_MODULE,
+};
+
+static __init int sm_module_init(void)
+{
+ int error = 0;
+ cache_flush_workqueue = create_freezeable_workqueue("smflush");
+
+ if (IS_ERR(cache_flush_workqueue))
+ return PTR_ERR(cache_flush_workqueue);
+
+ error = register_mtd_blktrans(&sm_ftl_ops);
+ if (error)
+ destroy_workqueue(cache_flush_workqueue);
+ return error;
+
+}
+
+static void __exit sm_module_exit(void)
+{
+ destroy_workqueue(cache_flush_workqueue);
+ deregister_mtd_blktrans(&sm_ftl_ops);
+}
+
+module_init(sm_module_init);
+module_exit(sm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
diff --git a/drivers/mtd/sm_ftl.h b/drivers/mtd/sm_ftl.h
new file mode 100644
index 000000000000..e30e48e7f63d
--- /dev/null
+++ b/drivers/mtd/sm_ftl.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * SmartMedia/xD translation layer
+ *
+ * Based loosly on ssfdc.c which is
+ * © 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/blktrans.h>
+#include <linux/kfifo.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+
+
+
+struct ftl_zone {
+ int initialized;
+ int16_t *lba_to_phys_table; /* LBA to physical table */
+ struct kfifo free_sectors; /* queue of free sectors */
+};
+
+struct sm_ftl {
+ struct mtd_blktrans_dev *trans;
+
+ struct mutex mutex; /* protects the structure */
+ struct ftl_zone *zones; /* FTL tables for each zone */
+
+ /* Media information */
+ int block_size; /* block size in bytes */
+ int zone_size; /* zone size in blocks */
+ int zone_count; /* number of zones */
+ int max_lba; /* maximum lba in a zone */
+ int smallpagenand; /* 256 bytes/page nand */
+ int readonly; /* is FS readonly */
+ int unstable;
+ int cis_block; /* CIS block location */
+ int cis_boffset; /* CIS offset in the block */
+ int cis_page_offset; /* CIS offset in the page */
+ void *cis_buffer; /* tmp buffer for cis reads */
+
+ /* Cache */
+ int cache_block; /* block number of cached block */
+ int cache_zone; /* zone of cached block */
+ unsigned char *cache_data; /* cached block data */
+ long unsigned int cache_data_invalid_bitmap;
+ int cache_clean;
+ struct work_struct flush_work;
+ struct timer_list timer;
+
+ /* Async erase stuff */
+ struct completion erase_completion;
+
+ /* Geometry stuff */
+ int heads;
+ int sectors;
+ int cylinders;
+
+ struct attribute_group *disk_attributes;
+};
+
+struct chs_entry {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+};
+
+
+#define SM_FTL_PARTN_BITS 3
+
+#define sm_printk(format, ...) \
+ printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg(format, ...) \
+ if (debug) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+ if (debug > 1) \
+ printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__)
+
+
+static void sm_erase_callback(struct erase_info *self);
+static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
+ int put_free);
+static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
+
+static int sm_recheck_media(struct sm_ftl *ftl);
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 3f67e00d98e0..81c4ecdc11f5 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -375,7 +375,6 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
del_mtd_blktrans_dev(dev);
kfree(ssfdc->logic_block_map);
- kfree(ssfdc);
}
static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 921a85df9196..6bc1b8276c62 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -480,12 +480,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 7107fccbc7de..afe71aa15c4b 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -141,12 +141,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 56ca62bb96bf..161feeb7b8b9 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -295,12 +295,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 3854afec56d0..531625fc9259 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -221,12 +221,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
/* NOR flash does not implement block_isbad */
if (mtd->block_isbad == NULL)
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 700237a3d120..11204e8aab5f 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -354,12 +354,11 @@ static int scan_for_bad_eraseblocks(void)
{
int i, bad = 0;
- bbt = kmalloc(ebcnt, GFP_KERNEL);
+ bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt) {
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 0a8c7ea764ae..f702a163d8df 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,7 +27,7 @@ config MTD_UBI_WL_THRESHOLD
The default value should be OK for SLC NAND flashes, NOR flashes and
other flashes which have eraseblock life-cycle 100000 or more.
However, in case of MLC NAND flashes which typically have eraseblock
- life-cycle less then 10000, the threshold should be lessened (e.g.,
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
config MTD_UBI_BEB_RESERVE
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 55c726dde942..13b05cb33b08 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -42,7 +42,6 @@
#include <linux/miscdevice.h>
#include <linux/log2.h>
#include <linux/kthread.h>
-#include <linux/reboot.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "ubi.h"
@@ -50,6 +49,12 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD character device node path, MTD device name, or MTD device number
@@ -832,34 +837,6 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot.
- * @n: reboot notifier object
- * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF
- * @cmd: pointer to command string for RESTART2
- *
- * This function stops the UBI background thread so that the flash device
- * remains quiescent when Linux restarts the system. Any queued work will be
- * discarded, but this function will block until do_work() finishes if an
- * operation is already in progress.
- *
- * This function solves a real-life problem observed on NOR flashes when an
- * PEB erase operation starts, then the system is rebooted before the erase is
- * finishes, and the boot loader gets confused and dies. So we prefer to finish
- * the ongoing operation before rebooting.
- */
-static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
- void *cmd)
-{
- struct ubi_device *ubi;
-
- ubi = container_of(n, struct ubi_device, reboot_notifier);
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
- ubi_sync(ubi->ubi_num);
- return NOTIFY_DONE;
-}
-
-/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
@@ -1016,11 +993,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- /* Flash device priority is 0 - UBI needs to shut down first */
- ubi->reboot_notifier.priority = 1;
- ubi->reboot_notifier.notifier_call = ubi_reboot_notifier;
- register_reboot_notifier(&ubi->reboot_notifier);
-
ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1091,7 +1063,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
- unregister_reboot_notifier(&ubi->reboot_notifier);
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
@@ -1241,9 +1212,24 @@ static int __init ubi_init(void)
p->vid_hdr_offs);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
- put_mtd_device(mtd);
ubi_err("cannot attach mtd%d", mtd->index);
- goto out_detach;
+ put_mtd_device(mtd);
+
+ /*
+ * Originally UBI stopped initializing on any error.
+ * However, later on it was found out that this
+ * behavior is not very good when UBI is compiled into
+ * the kernel and the MTD devices to attach are passed
+ * through the command line. Indeed, UBI failure
+ * stopped whole boot sequence.
+ *
+ * To fix this, we changed the behavior for the
+ * non-module case, but preserved the old behavior for
+ * the module case, just for compatibility. This is a
+ * little inconsistent, though.
+ */
+ if (ubi_is_module())
+ goto out_detach;
}
}
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 72ebb3f06b86..4dfa6b90c21c 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -189,8 +189,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
return new_offset;
}
-static int vol_cdev_fsync(struct file *file, struct dentry *dentry,
- int datasync)
+static int vol_cdev_fsync(struct file *file, int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 533b1a4b9af1..4b979e34b159 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -64,9 +64,9 @@
* device, e.g., make @ubi->min_io_size = 512 in the example above?
*
* A: because when writing a sub-page, MTD still writes a full 2K page but the
- * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing
- * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we
- * prefer to use sub-pages only for EV and VID headers.
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
*
* As it was noted above, the VID header may start at a non-aligned offset.
* For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 17f287decc36..69fa4ef03c53 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -488,7 +488,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
- * data, which has to be aligned. The length may be shorter then the logical
+ * data, which has to be aligned. The length may be shorter than the logical
* eraseblock size, ant the logical eraseblock may be appended to more times
* later on. This function guarantees that in case of an unclean reboot the old
* contents is preserved. Returns zero in case of success and a negative error
@@ -571,7 +571,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* This function un-maps logical eraseblock @lnum and schedules the
* corresponding physical eraseblock for erasure, so that it will eventually be
- * physically erased in background. This operation is much faster then the
+ * physically erased in background. This operation is much faster than the
* erase operation.
*
* Unlike erase, the un-map operation does not guarantee that the logical
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_erase);
*
* The main and obvious use-case of this function is when the contents of a
* logical eraseblock has to be re-written. Then it is much more efficient to
- * first un-map it, then write new data, rather then first erase it, then write
+ * first un-map it, then write new data, rather than first erase it, then write
* new data. Note, once new data has been written to the logical eraseblock,
* UBI guarantees that the old contents has gone forever. In other words, if an
* unclean reboot happens after the logical eraseblock has been un-mapped and
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index dc5f688699da..aed19f33b8f3 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -231,7 +231,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer then the
+ * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -452,7 +452,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
if (cmp_res & 1) {
/*
- * This logical eraseblock is newer then the one
+ * This logical eraseblock is newer than the one
* found earlier.
*/
err = validate_vid_hdr(vid_hdr, sv, pnum);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5176d4886518..a637f0283add 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -350,7 +350,6 @@ struct ubi_wl_entry;
* @bgt_thread: background thread description object
* @thread_enabled: if the background thread is enabled
* @bgt_name: background thread name
- * @reboot_notifier: notifier to terminate background thread before rebooting
*
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
@@ -436,7 +435,6 @@ struct ubi_device {
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
- struct notifier_block reboot_notifier;
/* I/O sub-system's stuff */
long long flash_size;
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index cd90ff3b76b1..14c10bed94ee 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -414,7 +414,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* 0 contains more recent information.
*
* So the plan is to first check LEB 0. Then
- * a. if LEB 0 is OK, it must be containing the most resent data; then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
* we compare it with LEB 1, and if they are different, we copy LEB
* 0 to LEB 1;
* b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
@@ -848,7 +848,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
goto out_free;
/*
- * Get sure that the scanning information is consistent to the
+ * Make sure that the scanning information is consistent to the
* information stored in the volume table.
*/
err = check_scanning_info(ubi, si);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f64ddabd4ac8..ee7b1d8fbb92 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -350,7 +350,7 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
* @max: highest possible erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
- * @max and less then @max.
+ * @max and less than @max.
*/
static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
{
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3ea42ff17657..1776ab61b05f 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -480,7 +480,6 @@ static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fire ... Trigger xmit. */
outb(AX_XMIT, AX_CMD);
lp->loading = 0;
- dev->trans_start = jiffies;
if (el_debug > 2)
pr_debug(" queued xmit.\n");
dev_kfree_skb(skb);
@@ -727,7 +726,6 @@ static void el_receive(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
- return;
}
/**
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 66e0323c1839..baac246561b9 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -380,6 +380,12 @@ out:
return retval;
}
+static irqreturn_t el2_probe_interrupt(int irq, void *seen)
+{
+ *(bool *)seen = true;
+ return IRQ_HANDLED;
+}
+
static int
el2_open(struct net_device *dev)
{
@@ -391,23 +397,35 @@ el2_open(struct net_device *dev)
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {
- retval = request_irq(*irqp, NULL, 0, "bogus", dev);
- if (retval >= 0) {
+ bool seen;
+
+ retval = request_irq(*irqp, el2_probe_interrupt, 0,
+ dev->name, &seen);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
+
/* Twinkle the interrupt, and check if it's seen. */
- unsigned long cookie = probe_irq_on();
+ seen = false;
+ smp_wmb();
outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
outb_p(0x00, E33G_IDCFR);
- if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */
- ((retval = request_irq(dev->irq = *irqp,
- eip_interrupt, 0,
- dev->name, dev)) == 0))
- break;
- } else {
- if (retval != -EBUSY)
- return retval;
- }
+ msleep(1);
+ free_irq(*irqp, el2_probe_interrupt);
+ if (!seen)
+ continue;
+
+ retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
+ dev->name, dev);
+ if (retval == -EBUSY)
+ continue;
+ if (retval < 0)
+ goto err_disable;
} while (*++irqp);
+
if (*irqp == 0) {
+ err_disable:
outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
return -EAGAIN;
}
@@ -555,7 +573,6 @@ el2_block_output(struct net_device *dev, int count,
}
blocked:;
outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- return;
}
/* Read the 4 byte, page aligned 8390 specific header. */
@@ -671,7 +688,6 @@ el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring
}
blocked:;
outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- return;
}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 29b8d1d63bde..88d766ee0e1b 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1055,7 +1055,7 @@ static void elp_timeout(struct net_device *dev)
(stat & ACRF) ? "interrupt" : "command");
if (elp_debug >= 1)
pr_debug("%s: status %#02x\n", dev->name, stat);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_dropped++;
netif_wake_queue(dev);
}
@@ -1093,11 +1093,6 @@ static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (elp_debug >= 3)
pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
- /*
- * start the transmit timeout
- */
- dev->trans_start = jiffies;
-
prime_rx(dev);
spin_unlock_irqrestore(&adapter->lock, flags);
netif_start_queue(dev);
@@ -1216,7 +1211,7 @@ static int elp_close(struct net_device *dev)
static void elp_set_mc_list(struct net_device *dev)
{
elp_device *adapter = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
unsigned long flags;
@@ -1231,8 +1226,9 @@ static void elp_set_mc_list(struct net_device *dev)
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++],
+ ha->addr, 6);
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
pr_err("%s: couldn't send set_multicast command\n", dev->name);
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index b32b7a1710b7..ea9b7a098c9b 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -449,7 +449,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
pr_debug("%s", version);
lp = netdev_priv(dev);
- memset(lp, 0, sizeof(*lp));
spin_lock_init(&lp->lock);
lp->base = ioremap(dev->mem_start, RX_BUF_END);
if (!lp->base) {
@@ -505,7 +504,7 @@ static void el16_tx_timeout (struct net_device *dev)
outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -529,7 +528,6 @@ static netdev_tx_t el16_send_packet (struct sk_buff *skb,
hardware_send_packet (dev, buf, skb->len, length - skb->len);
- dev->trans_start = jiffies;
/* Enable the 82586 interrupt input. */
outb (0x84, ioaddr + MISC_CTRL);
@@ -553,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
void __iomem *shmem;
if (dev == NULL) {
- pr_err("%s: net_interrupt(): irq %d for unknown device.\n",
- dev->name, irq);
+ pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
@@ -766,7 +763,6 @@ static void init_82586_mem(struct net_device *dev)
if (net_debug > 4)
pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
readw(shmem+iSCB_STATUS));
- return;
}
static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index ab9bb3c52002..91abb965fb44 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -807,7 +807,7 @@ el3_tx_timeout (struct net_device *dev)
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -868,7 +868,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) > 1536)
netif_start_queue(dev);
else
@@ -1038,7 +1037,6 @@ static void update_stats(struct net_device *dev)
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
- return;
}
static int
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 2e17837be546..3bba835f1a21 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -958,7 +958,6 @@ static void corkscrew_timer(unsigned long data)
dev->name, media_tbl[dev->if_port].name);
#endif /* AUTOMEDIA */
- return;
}
static void corkscrew_timeout(struct net_device *dev)
@@ -992,7 +991,7 @@ static void corkscrew_timeout(struct net_device *dev)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
outw(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
@@ -1055,7 +1054,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
prev_entry->status &= ~0x80000000;
netif_wake_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/* Put out the doubleword header... */
@@ -1091,7 +1089,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
#endif /* bus master */
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
@@ -1518,7 +1515,6 @@ static void update_stats(int ioaddr, struct net_device *dev)
/* We change back to window 7 (not 1) with the Vortex. */
EL3WINDOW(7);
- return;
}
/* This new version of set_rx_mode() supports v1.4 kernels.
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 1719079cc498..a7b0e5e43a52 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -503,7 +503,6 @@ static int __init do_elmc_probe(struct net_device *dev)
break;
}
- memset(pr, 0, sizeof(struct priv));
pr->slot = slot;
pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
@@ -624,7 +623,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -787,8 +786,9 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ ha->addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
@@ -1152,7 +1152,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->scb->cmd = CUC_START;
p->xmit_cmds[0]->cmd_status = 0;
elmc_attn586();
- dev->trans_start = jiffies;
if (!i) {
dev_kfree_skb(skb);
}
@@ -1176,7 +1175,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
#endif
@@ -1190,7 +1188,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
= make16((p->nop_cmds[next_nop]));
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
if (p->xmit_count != p->xmit_last)
netif_wake_queue(dev);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 5c07b147ec99..38395dfa4963 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
unsigned char block[62];
unsigned char *bp;
- struct dev_mc_list *dmc;
+ struct netdev_hw_addr *ha;
if(retry==0)
lp->mc_list_valid = 0;
@@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
block[0]=netdev_mc_count(dev);
bp=block+2;
- netdev_for_each_mc_addr(dmc, dev) {
- memcpy(bp, dmc->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(bp, ha->addr, 6);
bp+=6;
}
if(mc32_command_nowait(dev, 2, block,
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 5f92fdbe66e2..d75803e6e527 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1855,7 +1855,6 @@ leave_media_alone:
mod_timer(&vp->timer, RUN_AT(next_tick));
if (vp->deferred)
iowrite16(FakeIntr, ioaddr + EL3_CMD);
- return;
}
static void vortex_tx_timeout(struct net_device *dev)
@@ -1917,7 +1916,7 @@ static void vortex_tx_timeout(struct net_device *dev)
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
@@ -2063,7 +2062,6 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
@@ -2129,8 +2127,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb->len-skb->data_len, PCI_DMA_TODEVICE));
- vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
+ skb_headlen(skb), PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2174,7 +2172,6 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -2800,7 +2797,6 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
}
EL3WINDOW(old_window >> 13);
- return;
}
static int vortex_nway_reset(struct net_device *dev)
@@ -3122,7 +3118,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
- return;
}
/* ACPI: Advanced Configuration and Power Interface. */
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 500e135723bd..903bcb3ef5bd 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -262,7 +262,7 @@ static int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance (lp);
#ifdef DEBUG_DRIVER
printk ("Lance restart=%d\n", status);
@@ -526,7 +526,7 @@ void lance_tx_timeout(struct net_device *dev)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -574,7 +574,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
outs++;
/* Kick the lance: transmit now */
WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
spin_lock_irqsave (&lp->devlock, flags);
@@ -594,7 +593,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -609,8 +608,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -620,7 +619,6 @@ static void lance_load_multicast (struct net_device *dev)
crc = crc >> 26;
mcast_table [crc >> 4] |= 1 << (crc & 0xf);
}
- return;
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a09e6ce3eaa0..9c149750e2bf 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -882,7 +882,6 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -910,11 +909,11 @@ static void __cp_set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
@@ -1225,8 +1224,6 @@ static void cp_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
spin_unlock_irqrestore(&cp->lock, flags);
-
- return;
}
#ifdef BROKEN
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f0d23de32967..4ba72933f0da 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1716,8 +1716,6 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
- dev->trans_start = jiffies;
-
tp->cur_tx++;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
@@ -2503,11 +2501,11 @@ static void __set_rx_mode (struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 56e68db48861..dd8dc15556cb 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1050,7 +1050,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1060,7 +1060,6 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
dev->name, skb->len, skb->data));
@@ -1542,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1552,10 +1551,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = CmdMulticastList;
cmd->mc_cnt = cnt * ETH_ALEN;
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, ETH_ALEN);
+ memcpy(cp, ha->addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
dev->name, cp));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7b832c727f87..2decc597bda7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,7 +483,7 @@ config XTENSA_XT2000_SONIC
This is the driver for the onboard card of the Xtensa XT2000 board.
config MIPS_AU1X00_ENET
- bool "MIPS AU1000 Ethernet support"
+ tristate "MIPS AU1000 Ethernet support"
depends on SOC_AU1X00
select PHYLIB
select CRC32
@@ -887,6 +887,13 @@ config BFIN_MAC_RMII
help
Use Reduced PHY MII Interface
+config BFIN_MAC_USE_HWSTAMP
+ bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
+ default y
+ help
+ To support the IEEE 1588 Precision Time Protocol (PTP), select y here
+
config SMC9194
tristate "SMC 9194 support"
depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -1453,20 +1460,6 @@ config FORCEDETH
To compile this driver as a module, choose M here. The module
will be called forcedeth.
-config FORCEDETH_NAPI
- bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
- depends on FORCEDETH && EXPERIMENTAL
- help
- NAPI is a new driver API designed to reduce CPU and interrupt load
- when the driver is receiving lots of packets from the card. It is
- still somewhat experimental and thus not yet enabled by default.
-
- If your estimated Rx load is 10kpps or more, or if the card will be
- deployed on potentially unfriendly networks (e.g. in a firewall),
- then say Y here.
-
- If in doubt, say N.
-
config CS89x0
tristate "CS89x0 support"
depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
@@ -1916,6 +1909,7 @@ config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+ select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2434,8 +2428,8 @@ config MV643XX_ETH
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ depends on PPC || MICROBLAZE
select PHYLIB
- depends on PPC_DCR_NATIVE
help
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
core used in Xilinx Spartan and Virtex FPGAs
@@ -2618,11 +2612,11 @@ config EHEA
will be called ehea.
config ENIC
- tristate "Cisco 10G Ethernet NIC support"
+ tristate "Cisco VIC Ethernet NIC Support"
depends on PCI && INET
select INET_LRO
help
- This enables the support for the Cisco 10G Ethernet card.
+ This enables the support for the Cisco VIC Ethernet card.
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
@@ -2862,6 +2856,8 @@ source "drivers/ieee802154/Kconfig"
source "drivers/s390/net/Kconfig"
+source "drivers/net/caif/Kconfig"
+
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
@@ -3180,17 +3176,12 @@ config PPPOATM
config PPPOL2TP
tristate "PPP over L2TP (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP && INET
+ depends on EXPERIMENTAL && L2TP && PPP
help
Support for PPP-over-L2TP socket family. L2TP is a protocol
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
- This kernel component handles only L2TP data packets: a
- userland daemon handles L2TP the control protocol (tunnel
- and session setup). One such daemon is OpenL2TP
- (http://openl2tp.sourceforge.net/).
-
config SLIP
tristate "SLIP (serial line) support"
---help---
@@ -3277,15 +3268,14 @@ config NET_FC
"SCSI generic support".
config NETCONSOLE
- tristate "Network console logging support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Network console logging support"
---help---
If you want to log kernel messages over the network, enable this.
See <file:Documentation/networking/netconsole.txt> for details.
config NETCONSOLE_DYNAMIC
- bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)"
- depends on NETCONSOLE && SYSFS && EXPERIMENTAL
+ bool "Dynamic reconfiguration of logging targets"
+ depends on NETCONSOLE && SYSFS
select CONFIGFS_FS
help
This option enables the ability to dynamically reconfigure target
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 12b280afdd51..0a0512ae77da 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
-obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
@@ -292,5 +292,6 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index ed5e9742be2c..f142cc21e453 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -525,7 +525,7 @@ static inline int lance_reset (struct net_device *dev)
load_csrs (lp);
lance_init_ring (dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
status = init_restart_lance (lp);
@@ -588,7 +588,6 @@ static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
local_irq_restore(flags);
@@ -602,7 +601,7 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -617,8 +616,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -628,7 +627,6 @@ static void lance_load_multicast (struct net_device *dev)
crc = crc >> 26;
mcast_table [crc >> 4] |= 1 << (crc & 0xf);
}
- return;
}
static void lance_set_multicast (struct net_device *dev)
@@ -674,6 +672,7 @@ static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
{ ZORRO_PROD_AMERISTAR_A2065 },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
static struct zorro_driver a2065_driver = {
.name = "a2065",
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index eac73382c087..b9115a776fdd 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -307,8 +307,6 @@ static void ac_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 97a3dfd94dfa..b9a591604e5b 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -661,7 +661,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_std_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -681,7 +681,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_mini_skbuff[i];
- mapping = pci_unmap_addr(ringp,mapping);
+ mapping = dma_unmap_addr(ringp,mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -700,7 +700,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
dma_addr_t mapping;
ringp = &ap->skb->rx_jumbo_skbuff[i];
- mapping = pci_unmap_addr(ringp, mapping);
+ mapping = dma_unmap_addr(ringp, mapping);
pci_unmap_page(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
@@ -1683,7 +1683,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_STD_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
mapping, mapping);
rd = &ap->rx_std_ring[idx];
@@ -1744,7 +1744,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_MINI_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
mapping, mapping);
rd = &ap->rx_mini_ring[idx];
@@ -1800,7 +1800,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
ACE_JUMBO_BUFSIZE,
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
- pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
+ dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
mapping, mapping);
rd = &ap->rx_jumbo_ring[idx];
@@ -2013,7 +2013,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
skb = rip->skb;
rip->skb = NULL;
pci_unmap_page(ap->pdev,
- pci_unmap_addr(rip, mapping),
+ dma_unmap_addr(rip, mapping),
mapsize,
PCI_DMA_FROMDEVICE);
skb_put(skb, retdesc->size);
@@ -2078,18 +2078,16 @@ static inline void ace_tx_int(struct net_device *dev,
do {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + idx;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ if (dma_unmap_len(info, maplen)) {
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
@@ -2377,14 +2375,12 @@ static int ace_close(struct net_device *dev)
for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
struct sk_buff *skb;
- dma_addr_t mapping;
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + i;
skb = info->skb;
- mapping = pci_unmap_addr(info, mapping);
- if (mapping) {
+ if (dma_unmap_len(info, maplen)) {
if (ACE_IS_TIGON_I(ap)) {
/* NB: TIGON_1 is special, tx_ring is in io space */
struct tx_desc __iomem *tx;
@@ -2395,10 +2391,10 @@ static int ace_close(struct net_device *dev)
} else
memset(ap->tx_ring + i, 0,
sizeof(struct tx_desc));
- pci_unmap_page(ap->pdev, mapping,
- pci_unmap_len(info, maplen),
+ pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+ dma_unmap_len(info, maplen),
PCI_DMA_TODEVICE);
- pci_unmap_addr_set(info, mapping, 0);
+ dma_unmap_len_set(info, maplen, 0);
}
if (skb) {
dev_kfree_skb(skb);
@@ -2433,8 +2429,8 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, skb->len);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, skb->len);
return mapping;
}
@@ -2553,8 +2549,8 @@ restart:
} else {
info->skb = NULL;
}
- pci_unmap_addr_set(info, mapping, mapping);
- pci_unmap_len_set(info, maplen, frag->size);
+ dma_unmap_addr_set(info, mapping, mapping);
+ dma_unmap_len_set(info, maplen, frag->size);
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
@@ -2923,8 +2919,6 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz
dest += tsize;
size -= tsize;
}
-
- return;
}
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 17079b927ffa..0681da7e8753 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -589,7 +589,7 @@ struct ace_info {
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -600,8 +600,8 @@ struct ring_info {
*/
struct tx_ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
- DECLARE_PCI_UNMAP_LEN(maplen)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 8d58f0a8f42f..585c25f4b60c 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1339,8 +1339,6 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
writel( VAL1 | TDMD0, lp->mmio + CMD0);
writel( VAL2 | RDMD0,lp->mmio + CMD0);
- dev->trans_start = jiffies;
-
if(amd8111e_tx_queue_avail(lp) < 0){
netif_stop_queue(dev);
}
@@ -1376,7 +1374,7 @@ list to the device.
*/
static void amd8111e_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct amd8111e_priv *lp = netdev_priv(dev);
u32 mc_filter[2] ;
int bit_num;
@@ -1407,8 +1405,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
/* load all the multicast addresses in the logic filter */
lp->options |= OPTION_MULTICAST_ENABLE;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mc_ptr, dev) {
- bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 1437f5d12121..2fe60f168108 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -521,7 +521,6 @@ apne_block_output(struct net_device *dev, int count,
outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static irqreturn_t apne_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 6f8d6206b5c4..748c9f526e71 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -593,8 +593,6 @@ static void cops_load (struct net_device *dev)
tangent_wait_reset(ioaddr);
inb(ioaddr); /* Clear initial ready signal. */
}
-
- return;
}
/*
@@ -701,8 +699,6 @@ static void cops_poll(unsigned long ltdev)
/* poll 20 times per second */
cops_timer.expires = jiffies + HZ/20;
add_timer(&cops_timer);
-
- return;
}
/*
@@ -866,7 +862,7 @@ static void cops_timeout(struct net_device *dev)
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -919,9 +915,8 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb,
/* Done sending packet, update counters and cleanup. */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/*
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 6af65b656f31..adc07551739e 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -641,7 +641,6 @@ done:
inb_p(base+7);
inb_p(base+7);
}
- return;
}
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d8f029303754..a746ba272f04 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -654,7 +654,6 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
}
}
retval = NETDEV_TX_OK;
- dev->trans_start = jiffies;
lp->next_tx = txbuf;
} else {
retval = NETDEV_TX_BUSY;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 2c712af6c265..48a1dbf01e60 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
- { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
+ { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
{ 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{ 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
{0,}
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index fa1a2354f5f9..39214e512452 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -145,6 +145,7 @@ static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
static struct zorro_driver ariadne_driver = {
.name = "ariadne",
@@ -676,8 +677,6 @@ static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
lance->RAP = CSR0; /* PCnet-ISA Controller Status */
lance->RDP = INEA|TDMD;
- dev->trans_start = jiffies;
-
if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) {
netif_stop_queue(dev);
priv->tx_full = 1;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index f1f58c5e27bf..8c496fb1ac9e 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
memset(multi_hash, 0xff, sizeof(multi_hash));
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(multi_hash, 0x00, sizeof(multi_hash));
- netdev_for_each_mc_addr(dmi, dev)
- am79c961_mc_hash(dmi->dmi_addr, multi_hash);
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, multi_hash);
}
spin_lock_irqsave(&priv->chip_lock, flags);
@@ -469,7 +469,6 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&priv->chip_lock, flags);
write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&priv->chip_lock, flags);
/*
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index aed5b5479b50..e07b314ed8fd 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -557,14 +557,14 @@ static int hash_get_index(__u8 *addr)
*/
static void at91ether_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -824,7 +824,6 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set length of the packet in the Transmit Control register */
at91_emac_write(AT91_EMAC_TCR, skb->len);
- dev->trans_start = jiffies;
} else {
printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index cd17d09f385c..4a5ec9470aa1 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -374,8 +374,6 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
-
spin_lock_irq(&ep->tx_pending_lock);
ep->tx_pending++;
if (ep->tx_pending == TX_QUEUE_ENTRIES)
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index e47c0d962857..b17ab5153f51 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -736,7 +736,6 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
local_irq_restore(flags);
/* handle transmit */
- dev->trans_start = jiffies;
/* check to see if we have room for a full sized ether frame */
tmp = priv(dev)->tx_head;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index d9de9bce2395..1361b7367c28 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -529,7 +529,6 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; /* unable to queue */
}
- dev->trans_start = jiffies;
ptr = 0x600 * priv(dev)->tx_head;
priv(dev)->tx_head = next_ptr;
next_ptr *= 0x600;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6be8b098b8b4..24df0325090c 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -708,7 +708,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* NPE firmware pads short frames with zeros internally */
wmb();
queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
@@ -736,7 +735,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u8 diffs[ETH_ALEN], *addr;
int i;
@@ -749,11 +748,11 @@ static void eth_set_mcast_list(struct net_device *dev)
memset(diffs, 0, ETH_ALEN);
addr = NULL;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!addr)
- addr = mclist->dmi_addr; /* first MAC address */
+ addr = ha->addr; /* first MAC address */
for (i = 0; i < ETH_ALEN; i++)
- diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
+ diffs[i] |= addr[i] ^ ha->addr[i];
}
for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 84f8a8f73802..54c6d849cf25 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -332,16 +332,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp,
{
u32 low, high;
int i;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(dmi, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
/* Ran out of space in chip? */
BUG_ON(i == KS8695_NR_ADDRESSES);
- low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) |
- (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]);
- high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]);
+ low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
+ (ha->addr[4] << 8) | (ha->addr[5]);
+ high = (ha->addr[0] << 8) | (ha->addr[1]);
ks8695_writereg(ksp, KS8695_AAL_(i), low);
ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
@@ -1302,8 +1302,6 @@ ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++ksp->tx_ring_used == MAX_TX_DESC)
netif_stop_queue(ndev);
- ndev->trans_start = jiffies;
-
/* Kick the TX DMA in case it decided to go IDLE */
ks8695_writereg(ksp, KS8695_DTSC, 0);
@@ -1472,7 +1470,6 @@ ks8695_probe(struct platform_device *pdev)
/* Configure our private structure a little */
ksp = netdev_priv(ndev);
- memset(ksp, 0, sizeof(struct ks8695_priv));
ksp->dev = &pdev->dev;
ksp->ndev = ndev;
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index f7c9ca1dfb17..2e852463382b 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -483,7 +483,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_init_desc(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
@@ -497,7 +497,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
@@ -634,8 +634,6 @@ static int w90p910_send_frame(struct net_device *dev,
txbd = &ether->tdesc->desclist[ether->cur_tx];
- dev->trans_start = jiffies;
-
if (txbd->mode & TX_OWEN_DMA)
netif_stop_queue(dev);
@@ -744,7 +742,6 @@ static void netdev_rx(struct net_device *dev)
return;
}
- skb->dev = dev;
skb_reserve(skb, 2);
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 10a20fb9ae65..93185f5f09ac 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -583,7 +583,7 @@ static void net_tx_timeout (struct net_device *dev)
outb (0x00, ioaddr + TX_START);
outb (0x03, ioaddr + COL16CNTL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
lp->tx_started = 0;
lp->tx_queue_ready = 1;
@@ -636,7 +636,6 @@ static netdev_tx_t net_send_packet (struct sk_buff *skb,
outb (0x80 | lp->tx_queue, ioaddr + TX_START);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue (dev);
} else if (lp->tx_queue_len < 4096 - 1502)
@@ -796,7 +795,6 @@ net_rx(struct net_device *dev)
printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
dev->name, inb(ioaddr + RX_MODE), i);
}
- return;
}
/* The inverse routine to net_open(). */
@@ -847,12 +845,12 @@ set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << bit);
}
outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
@@ -870,7 +868,6 @@ set_rx_mode(struct net_device *dev)
outw(saved_bank, ioaddr + CONFIG_0);
}
spin_unlock_irqrestore (&lp->lock, flags);
- return;
}
#ifdef MODULE
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index a8686bfec7a1..b57d7dee389a 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -767,8 +767,8 @@ static void lance_tx_timeout (struct net_device *dev)
/* lance_restart, essentially */
lance_init_ring(dev);
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
- dev->trans_start = jiffies;
- netif_wake_queue (dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
}
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
@@ -836,7 +836,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Trigger an immediate send poll. */
DREG = CSR0_INEA | CSR0_TDMD;
- dev->trans_start = jiffies;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
TMD1_OWN_HOST)
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 32339243d61f..7c521508313c 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -263,8 +263,6 @@ static void atl1c_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
-
- return;
}
static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 50dc531a02d8..1c3c046d5f34 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -317,8 +317,6 @@ static void atl1c_common_task(struct work_struct *work)
if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
atl1c_check_link_status(adapter);
-
- return;
}
@@ -354,7 +352,7 @@ static void atl1c_set_multi(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data;
u32 hash_value;
@@ -377,8 +375,8 @@ static void atl1c_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1c_hash_mc_addr(hw, ha->addr);
atl1c_hash_set(hw, hash_value);
}
}
@@ -1817,7 +1815,6 @@ rrs_checked:
atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
skb_put(skb, length - ETH_FCS_LEN);
skb->protocol = eth_type_trans(skb, netdev);
- skb->dev = netdev;
atl1c_rx_checksum(adapter, skb, rrs);
if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
u16 vlan;
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index ffd696ee7c8e..6943a6c3b948 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -338,8 +338,6 @@ static void atl1e_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & AT_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
-
- return;
}
static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 73302ae468aa..1acea5774e89 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
struct atl1e_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 mac_ctrl_data = 0;
u32 hash_value;
@@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl1e_hash_mc_addr(hw, ha->addr);
atl1e_hash_set(hw, hash_value);
}
}
@@ -707,8 +707,6 @@ static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
adapter->ring_vir_addr = NULL;
adapter->rx_ring.desc = NULL;
rwlock_init(&adapter->tx_ring.tx_lock);
-
- return;
}
/*
@@ -905,8 +903,6 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
/* Load all of base address above */
AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
-
- return;
}
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
@@ -950,7 +946,6 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
(((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
<< TXQ_CTRL_NUM_TPD_BURST_SHIFT)
| TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
- return;
}
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
@@ -1004,7 +999,6 @@ static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
- return;
}
static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
@@ -1024,7 +1018,6 @@ static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
- return;
}
static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
@@ -1428,7 +1421,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
"Memory squeeze, deferring packet\n");
goto skip_pkt;
}
- skb->dev = netdev;
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -1680,7 +1672,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
{
struct atl1e_tpd_desc *use_tpd = NULL;
struct atl1e_tx_buffer *tx_buffer = NULL;
- u16 buf_len = skb->len - skb->data_len;
+ u16 buf_len = skb_headlen(skb);
u16 map_len = 0;
u16 mapped_len = 0;
u16 hdr_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ebd8208f606..63b9ba0cc67e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1830,8 +1830,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
adapter->hw_csum_good++;
return;
}
-
- return;
}
/*
@@ -2347,7 +2345,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
- int len = skb->len;
+ int len;
int tso;
int count = 1;
int ret_val;
@@ -2359,7 +2357,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
unsigned int f;
unsigned int proto_hdr_len;
- len -= skb->data_len;
+ len = skb_headlen(skb);
if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
@@ -3390,7 +3388,6 @@ static void atl1_get_wol(struct net_device *netdev,
wol->wolopts = 0;
if (adapter->wol & ATLX_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
- return;
}
static int atl1_set_wol(struct net_device *netdev,
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 54662f24f9bb..8da87383fb39 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -136,7 +136,7 @@ static void atl2_set_multi(struct net_device *netdev)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
struct atl2_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -158,8 +158,8 @@ static void atl2_set_multi(struct net_device *netdev)
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atl2_hash_mc_addr(hw, ha->addr);
atl2_hash_set(hw, hash_value);
}
}
@@ -422,7 +422,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
netdev->stats.rx_dropped++;
break;
}
- skb->dev = netdev;
memcpy(skb->data, rxd->packet, rx_size);
skb_put(skb, rx_size);
skb->protocol = eth_type_trans(skb, netdev);
@@ -893,7 +892,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
(adapter->txd_write_ptr >> 2));
mmiowb();
- netdev->trans_start = jiffies;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 72f3306352e2..f979ea2d6d3c 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
struct atlx_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
u32 hash_value;
@@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev)
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
/* compute mc addresses' hash value ,and put it into hash table */
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash_value = atlx_hash_mc_addr(hw, ha->addr);
atlx_hash_set(hw, hash_value);
}
}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 55039d44dc47..bd2f9d331dac 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -547,7 +547,7 @@ static void tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
/* Try to restart the adapter. */
hardware_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
dev->stats.tx_errors++;
}
@@ -586,7 +586,6 @@ static netdev_tx_t atp_send_packet(struct sk_buff *skb,
write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
write_reg_high(ioaddr, IMR, ISRh_RxErr);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
@@ -803,7 +802,6 @@ static void net_rx(struct net_device *dev)
done:
write_reg(ioaddr, CMR1, CMR1_NextPkt);
lp->last_rx_time = jiffies;
- return;
}
static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
@@ -882,11 +880,11 @@ static void set_rx_mode_8012(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
new_mode = CMR2h_Normal;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4da191b87b0d..ece6128bef14 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -75,14 +75,19 @@ static int au1000_debug = 5;
static int au1000_debug = 3;
#endif
+#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
#define DRV_NAME "au1000_eth"
-#define DRV_VERSION "1.6"
+#define DRV_VERSION "1.7"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
/*
* Theory of operation
@@ -148,7 +153,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* specific irq-map
*/
-static void enable_mac(struct net_device *dev, int force_reset)
+static void au1000_enable_mac(struct net_device *dev, int force_reset)
{
unsigned long flags;
struct au1000_private *aup = netdev_priv(dev);
@@ -182,8 +187,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: read_MII busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "read_MII busy timeout!!\n");
return -1;
}
}
@@ -197,8 +201,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_read busy timeout!!\n");
return -1;
}
}
@@ -217,8 +220,7 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
while (*mii_control_reg & MAC_MII_BUSY) {
mdelay(1);
if (--timedout == 0) {
- printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
- dev->name);
+ netdev_err(dev, "mdio_write busy timeout!!\n");
return;
}
}
@@ -236,7 +238,7 @@ static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
* _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return au1000_mdio_read(dev, phy_addr, regnum);
}
@@ -246,7 +248,7 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
au1000_mdio_write(dev, phy_addr, regnum, value);
return 0;
@@ -256,28 +258,26 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
{
struct net_device *const dev = bus->priv;
- enable_mac(dev, 0); /* make sure the MAC associated with this
+ au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
* mii_bus is enabled */
return 0;
}
-static void hard_stop(struct net_device *dev)
+static void au1000_hard_stop(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: hard stop\n", dev->name);
+ netif_dbg(aup, drv, dev, "hard stop\n");
aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
}
-static void enable_rx_tx(struct net_device *dev)
+static void au1000_enable_rx_tx(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
+ netif_dbg(aup, hw, dev, "enable_rx_tx\n");
aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
au_sync_delay(10);
@@ -297,16 +297,15 @@ au1000_adjust_link(struct net_device *dev)
spin_lock_irqsave(&aup->lock, flags);
if (phydev->link && (aup->old_speed != phydev->speed)) {
- // speed changed
+ /* speed changed */
- switch(phydev->speed) {
+ switch (phydev->speed) {
case SPEED_10:
case SPEED_100:
break;
default:
- printk(KERN_WARNING
- "%s: Speed (%d) is not 10/100 ???\n",
- dev->name, phydev->speed);
+ netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
+ phydev->speed);
break;
}
@@ -316,10 +315,10 @@ au1000_adjust_link(struct net_device *dev)
}
if (phydev->link && (aup->old_duplex != phydev->duplex)) {
- // duplex mode changed
+ /* duplex mode changed */
/* switching duplex mode requires to disable rx and tx! */
- hard_stop(dev);
+ au1000_hard_stop(dev);
if (DUPLEX_FULL == phydev->duplex)
aup->mac->control = ((aup->mac->control
@@ -331,14 +330,14 @@ au1000_adjust_link(struct net_device *dev)
| MAC_DISABLE_RX_OWN);
au_sync_delay(1);
- enable_rx_tx(dev);
+ au1000_enable_rx_tx(dev);
aup->old_duplex = phydev->duplex;
status_change = 1;
}
- if(phydev->link != aup->old_link) {
- // link state changed
+ if (phydev->link != aup->old_link) {
+ /* link state changed */
if (!phydev->link) {
/* link went down */
@@ -354,15 +353,15 @@ au1000_adjust_link(struct net_device *dev)
if (status_change) {
if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
else
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
-static int mii_probe (struct net_device *dev)
+static int au1000_mii_probe (struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
@@ -373,8 +372,7 @@ static int mii_probe (struct net_device *dev)
if (aup->phy_addr)
phydev = aup->mii_bus->phy_map[aup->phy_addr];
else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
+ netdev_info(dev, "using PHY-less setup\n");
return 0;
} else {
int phy_addr;
@@ -391,7 +389,7 @@ static int mii_probe (struct net_device *dev)
/* try harder to find a PHY */
if (!phydev && (aup->mac_id == 1)) {
/* no PHY found, maybe we have a dual PHY? */
- printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
"let's see if it's attached to MAC0...\n");
/* find the first (lowest address) non-attached PHY on
@@ -417,7 +415,7 @@ static int mii_probe (struct net_device *dev)
}
if (!phydev) {
- printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -1;
}
@@ -428,7 +426,7 @@ static int mii_probe (struct net_device *dev)
0, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -449,8 +447,8 @@ static int mii_probe (struct net_device *dev)
aup->old_duplex = -1;
aup->phy_dev = phydev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ netdev_info(dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
return 0;
@@ -462,7 +460,7 @@ static int mii_probe (struct net_device *dev)
* has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *GetFreeDB(struct au1000_private *aup)
+static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
{
db_dest_t *pDB;
pDB = aup->pDBfree;
@@ -473,7 +471,7 @@ static db_dest_t *GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
+void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
{
db_dest_t *pDBfree = aup->pDBfree;
if (pDBfree)
@@ -481,12 +479,12 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
aup->pDBfree = pDB;
}
-static void reset_mac_unlocked(struct net_device *dev)
+static void au1000_reset_mac_unlocked(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
int i;
- hard_stop(dev);
+ au1000_hard_stop(dev);
*aup->enable = MAC_EN_CLOCK_ENABLE;
au_sync_delay(2);
@@ -507,18 +505,17 @@ static void reset_mac_unlocked(struct net_device *dev)
}
-static void reset_mac(struct net_device *dev)
+static void au1000_reset_mac(struct net_device *dev)
{
struct au1000_private *const aup = netdev_priv(dev);
unsigned long flags;
- if (au1000_debug > 4)
- printk(KERN_INFO "%s: reset mac, aup %x\n",
- dev->name, (unsigned)aup);
+ netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
+ (unsigned)aup);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
spin_unlock_irqrestore(&aup->lock, flags);
}
@@ -529,7 +526,7 @@ static void reset_mac(struct net_device *dev)
* these are not descriptors sitting in memory.
*/
static void
-setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
+au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
{
int i;
@@ -582,11 +579,25 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
info->regdump_len = 0;
}
+static void au1000_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ aup->msg_enable = value;
+}
+
+static u32 au1000_get_msglevel(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ return aup->msg_enable;
+}
+
static const struct ethtool_ops au1000_ethtool_ops = {
.get_settings = au1000_get_settings,
.set_settings = au1000_set_settings,
.get_drvinfo = au1000_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_msglevel = au1000_get_msglevel,
+ .set_msglevel = au1000_set_msglevel,
};
@@ -606,11 +617,10 @@ static int au1000_init(struct net_device *dev)
int i;
u32 control;
- if (au1000_debug > 4)
- printk("%s: au1000_init\n", dev->name);
+ netif_dbg(aup, hw, dev, "au1000_init\n");
/* bring the device out of reset */
- enable_mac(dev, 1);
+ au1000_enable_mac(dev, 1);
spin_lock_irqsave(&aup->lock, flags);
@@ -649,7 +659,7 @@ static int au1000_init(struct net_device *dev)
return 0;
}
-static inline void update_rx_stats(struct net_device *dev, u32 status)
+static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
{
struct net_device_stats *ps = &dev->stats;
@@ -667,8 +677,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
ps->rx_crc_errors++;
if (status & RX_COLL)
ps->collisions++;
- }
- else
+ } else
ps->rx_bytes += status & RX_FRAME_LEN_MASK;
}
@@ -685,15 +694,14 @@ static int au1000_rx(struct net_device *dev)
db_dest_t *pDB;
u32 frmlen;
- if (au1000_debug > 5)
- printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
+ netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
prxd = aup->rx_dma_ring[aup->rx_head];
buff_stat = prxd->buff_stat;
while (buff_stat & RX_T_DONE) {
status = prxd->status;
pDB = aup->rx_db_inuse[aup->rx_head];
- update_rx_stats(dev, status);
+ au1000_update_rx_stats(dev, status);
if (!(status & RX_ERROR)) {
/* good frame */
@@ -701,9 +709,7 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = dev_alloc_skb(frmlen + 2);
if (skb == NULL) {
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
@@ -713,8 +719,7 @@ static int au1000_rx(struct net_device *dev)
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
- }
- else {
+ } else {
if (au1000_debug > 4) {
if (status & RX_MISSED_FRAME)
printk("rx miss\n");
@@ -747,7 +752,7 @@ static int au1000_rx(struct net_device *dev)
return 0;
}
-static void update_tx_stats(struct net_device *dev, u32 status)
+static void au1000_update_tx_stats(struct net_device *dev, u32 status)
{
struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
@@ -760,8 +765,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
ps->tx_errors++;
ps->tx_aborted_errors++;
}
- }
- else {
+ } else {
ps->tx_errors++;
ps->tx_aborted_errors++;
if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
@@ -783,7 +787,7 @@ static void au1000_tx_ack(struct net_device *dev)
ptxd = aup->tx_dma_ring[aup->tx_tail];
while (ptxd->buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->buff_stat &= ~TX_T_DONE;
ptxd->len = 0;
au_sync();
@@ -817,18 +821,18 @@ static int au1000_open(struct net_device *dev)
int retval;
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: open: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
- if ((retval = request_irq(dev->irq, au1000_interrupt, 0,
- dev->name, dev))) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ retval = request_irq(dev->irq, au1000_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
- if ((retval = au1000_init(dev))) {
- printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
+ retval = au1000_init(dev);
+ if (retval) {
+ netdev_err(dev, "error in au1000_init\n");
free_irq(dev->irq, dev);
return retval;
}
@@ -841,8 +845,7 @@ static int au1000_open(struct net_device *dev)
netif_start_queue(dev);
- if (au1000_debug > 4)
- printk("%s: open: Initialization done.\n", dev->name);
+ netif_dbg(aup, drv, dev, "open: Initialization done.\n");
return 0;
}
@@ -852,15 +855,14 @@ static int au1000_close(struct net_device *dev)
unsigned long flags;
struct au1000_private *const aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: close: dev=%p\n", dev->name, dev);
+ netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
if (aup->phy_dev)
phy_stop(aup->phy_dev);
spin_lock_irqsave(&aup->lock, flags);
- reset_mac_unlocked (dev);
+ au1000_reset_mac_unlocked (dev);
/* stop the device */
netif_stop_queue(dev);
@@ -884,9 +886,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
db_dest_t *pDB;
int i;
- if (au1000_debug > 5)
- printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
- dev->name, (unsigned)aup, skb->len,
+ netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
+ (unsigned)aup, skb->len,
skb->data, aup->tx_head);
ptxd = aup->tx_dma_ring[aup->tx_head];
@@ -896,9 +897,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
aup->tx_full = 1;
return NETDEV_TX_BUSY;
- }
- else if (buff_stat & TX_T_DONE) {
- update_tx_stats(dev, ptxd->status);
+ } else if (buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
ptxd->len = 0;
}
@@ -910,12 +910,11 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
pDB = aup->tx_db_inuse[aup->tx_head];
skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
if (skb->len < ETH_ZLEN) {
- for (i=skb->len; i<ETH_ZLEN; i++) {
+ for (i = skb->len; i < ETH_ZLEN; i++) {
((char *)pDB->vaddr)[i] = 0;
}
ptxd->len = ETH_ZLEN;
- }
- else
+ } else
ptxd->len = skb->len;
ps->tx_packets++;
@@ -925,7 +924,6 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
au_sync();
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -935,10 +933,10 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
*/
static void au1000_tx_timeout(struct net_device *dev)
{
- printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
- reset_mac(dev);
+ netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
+ au1000_reset_mac(dev);
au1000_init(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -946,8 +944,7 @@ static void au1000_multicast_list(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- if (au1000_debug > 4)
- printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
+ netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
@@ -955,14 +952,14 @@ static void au1000_multicast_list(struct net_device *dev)
netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
- printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
+ netdev_info(dev, "Pass all multicast\n");
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev)
- set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
(long *)mc_filter);
aup->mac->multi_hash_high = mc_filter[1];
aup->mac->multi_hash_low = mc_filter[0];
@@ -975,9 +972,11 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct au1000_private *aup = netdev_priv(dev);
- if (!netif_running(dev)) return -EINVAL;
+ if (!netif_running(dev))
+ return -EINVAL;
- if (!aup->phy_dev) return -EINVAL; // PHY not controllable
+ if (!aup->phy_dev)
+ return -EINVAL; /* PHY not controllable */
return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
}
@@ -996,7 +995,7 @@ static const struct net_device_ops au1000_netdev_ops = {
static int __devinit au1000_probe(struct platform_device *pdev)
{
- static unsigned version_printed = 0;
+ static unsigned version_printed;
struct au1000_private *aup = NULL;
struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
@@ -1007,40 +1006,40 @@ static int __devinit au1000_probe(struct platform_device *pdev)
base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!base) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
+ dev_err(&pdev->dev, "failed to retrieve base register\n");
err = -ENODEV;
goto out;
}
macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!macen) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
+ dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
err = -ENODEV;
goto out;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
+ dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
+ dev_err(&pdev->dev, "failed to request memory region for base registers\n");
err = -ENXIO;
goto out;
}
if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
- printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
+ dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
err = -ENXIO;
goto err_request;
}
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
- printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
err = -ENOMEM;
goto err_alloc;
}
@@ -1050,6 +1049,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
+ aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
/* Allocate the data buffers */
/* Snooping works fine with eth on all au1xxx */
@@ -1057,7 +1057,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
if (!aup->vaddr) {
- printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
+ dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
goto err_vaddr;
}
@@ -1065,7 +1065,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* aup->mac is the base address of the MAC's registers */
aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
if (!aup->mac) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
err = -ENXIO;
goto err_remap1;
}
@@ -1073,7 +1073,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* Setup some variables for quick register address access */
aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
if (!aup->enable) {
- printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
+ dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
err = -ENXIO;
goto err_remap2;
}
@@ -1083,14 +1083,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (prom_get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
else {
- printk(KERN_INFO "%s: No MAC address found\n",
- dev->name);
+ netdev_info(dev, "No MAC address found\n");
/* Use the hard coded MAC addresses */
}
- setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
} else if (pdev->id == 1)
- setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+ au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
/*
* Assign to the Ethernet ports two consecutive MAC addresses
@@ -1104,7 +1103,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
pd = pdev->dev.platform_data;
if (!pd) {
- printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
+ dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
aup->phy1_search_mac0 = 1;
} else {
aup->phy_static_config = pd->phy_static_config;
@@ -1116,7 +1115,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
}
if (aup->phy_busid && aup->phy_busid > 0) {
- printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
"bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
@@ -1124,7 +1123,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->mii_bus = mdiobus_alloc();
if (aup->mii_bus == NULL) {
- printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
+ dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
}
@@ -1139,7 +1138,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
if (aup->mii_bus->irq == NULL)
goto err_out;
- for(i = 0; i < PHY_MAX_ADDR; ++i)
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
/* if known, set corresponding PHY IRQs */
if (aup->phy_static_config)
@@ -1148,11 +1147,11 @@ static int __devinit au1000_probe(struct platform_device *pdev)
err = mdiobus_register(aup->mii_bus);
if (err) {
- printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
+ dev_err(&pdev->dev, "failed to register MDIO bus\n");
goto err_mdiobus_reg;
}
- if (mii_probe(dev) != 0)
+ if (au1000_mii_probe(dev) != 0)
goto err_out;
pDBfree = NULL;
@@ -1168,7 +1167,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->pDBfree = pDBfree;
for (i = 0; i < NUM_RX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1176,7 +1175,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->rx_db_inuse[i] = pDB;
}
for (i = 0; i < NUM_TX_DMA; i++) {
- pDB = GetFreeDB(aup);
+ pDB = au1000_GetFreeDB(aup);
if (!pDB) {
goto err_out;
}
@@ -1195,17 +1194,16 @@ static int __devinit au1000_probe(struct platform_device *pdev)
* The boot code uses the ethernet controller, so reset it to start
* fresh. au1000_init() expects that the device is in reset state.
*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
- dev->name);
+ netdev_err(dev, "Cannot register net device, aborting.\n");
goto err_out;
}
- printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n",
- dev->name, (unsigned long)base->start, irq);
+ netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ (unsigned long)base->start, irq);
if (version_printed++ == 0)
printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
@@ -1217,15 +1215,15 @@ err_out:
/* here we should have a valid dev plus aup-> register addresses
* so we can reset the mac properly.*/
- reset_mac(dev);
+ au1000_reset_mac(dev);
for (i = 0; i < NUM_RX_DMA; i++) {
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
}
for (i = 0; i < NUM_TX_DMA; i++) {
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
}
err_mdiobus_reg:
mdiobus_free(aup->mii_bus);
@@ -1261,11 +1259,11 @@ static int __devexit au1000_remove(struct platform_device *pdev)
for (i = 0; i < NUM_RX_DMA; i++)
if (aup->rx_db_inuse[i])
- ReleaseDB(aup, aup->rx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
for (i = 0; i < NUM_TX_DMA; i++)
if (aup->tx_db_inuse[i])
- ReleaseDB(aup, aup->tx_db_inuse[i]);
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_noncoherent(NULL, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index f9d29a29b8fd..d06ec008fbf1 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -35,7 +35,7 @@
#define NUM_TX_BUFFS 4
#define MAX_BUF_SIZE 2048
-#define ETH_TX_TIMEOUT HZ/4
+#define ETH_TX_TIMEOUT (HZ/4)
#define MAC_MIN_PKT_SIZE 64
#define MULTICAST_FILTER_LIMIT 64
@@ -125,4 +125,6 @@ struct au1000_private {
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
+
+ u32 msg_enable;
};
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b718dc60afc4..55c9958043c4 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -303,7 +303,6 @@ static void ax_block_output(struct net_device *dev, int count,
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
/* definitions for accessing MII/EEPROM interface */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 69d9f3d368ae..293f9c16e786 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1014,8 +1014,6 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
out_unlock:
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1681,15 +1679,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev)
static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i, num_ents;
num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i == num_ents)
break;
- __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
+ __b44_cam_write(bp, ha->addr, i++ + 1);
}
return i+1;
}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 17460aba3bae..faf5add894d7 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -341,11 +341,9 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
skb_put(skb, len);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
- dev->last_rx = jiffies;
netif_receive_skb(skb);
} while (--budget > 0);
@@ -567,7 +565,6 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_bytes += skb->len;
priv->stats.tx_packets++;
- dev->trans_start = jiffies;
ret = NETDEV_TX_OK;
out_unlock:
@@ -605,7 +602,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
struct bcm_enet_priv *priv;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 val;
int i;
@@ -633,14 +630,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
}
i = 0;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u8 *dmi_addr;
u32 tmp;
if (i == 3)
break;
/* update perfect match registers */
- dmi_addr = mc_list->dmi_addr;
+ dmi_addr = ha->addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
(dmi_addr[4] << 8) | dmi_addr[5];
enet_writel(priv, tmp, ENET_PML_REG(i + 1));
@@ -960,7 +957,9 @@ static int bcm_enet_open(struct net_device *dev)
/* all set, enable mac and interrupts, start dma engine and
* kick rx dma channel */
wmb();
- enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
+ val = enet_readl(priv, ENET_CTL_REG);
+ val |= ENET_CTL_ENABLE_MASK;
+ enet_writel(priv, val, ENET_CTL_REG);
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
@@ -1647,7 +1646,6 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
ret = compute_hw_mtu(priv, dev->mtu);
if (ret)
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 56387b191c96..b46be490cd2a 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,6 +84,8 @@ static inline char *nic_name(struct pci_dev *pdev)
#define FW_VER_LEN 32
+#define BE_MAX_VF 32
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -207,7 +209,7 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
- dma_addr_t bus;
+ DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
bool last_page_user;
};
@@ -281,8 +283,17 @@ struct be_adapter {
u8 port_type;
u8 transceiver;
u8 generation; /* BladeEngine ASIC generation */
+ u32 flash_status;
+ struct completion flash_compl;
+
+ bool sriov_enabled;
+ u32 vf_if_handle[BE_MAX_VF];
+ u32 vf_pmac_id[BE_MAX_VF];
+ u8 base_eq_id;
};
+#define be_physfn(adapter) (!adapter->pdev->is_virtfn)
+
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d0ef4ac987cd..9d11dbf5e4da 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -59,6 +59,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
CQE_STATUS_COMPL_MASK;
+
+ if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
+ adapter->flash_status = compl_status;
+ complete(&adapter->flash_compl);
+ }
+
if (compl_status == MCC_STATUS_SUCCESS) {
if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
struct be_cmd_resp_get_stats *resp =
@@ -287,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter)
} else {
return 0;
}
- } while (timeout < 20);
+ } while (timeout < 40);
dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
return -1;
@@ -843,7 +850,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
* Uses mbox
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
+ u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -860,6 +868,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
+ req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
req->pmac_invalid = pmac_invalid;
@@ -1111,6 +1120,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_PROMISCUOUS, sizeof(*req));
+ /* In FW versions X.102.149/X.101.487 and later,
+ * the port setting associated only with the
+ * issuing pci function will take effect
+ */
if (port_num)
req->port1_promiscuous = en;
else
@@ -1157,13 +1170,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
req->interface_id = if_id;
if (netdev) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
@@ -1411,6 +1424,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
int status;
spin_lock_bh(&adapter->mcc_lock);
+ adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1422,6 +1436,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
OPCODE_COMMON_WRITE_FLASHROM);
+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
@@ -1433,10 +1448,16 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->params.op_code = cpu_to_le32(flash_opcode);
req->params.data_buf_size = cpu_to_le32(buf_size);
- status = be_mcc_notify_wait(adapter);
+ be_mcc_notify(adapter);
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->flash_compl,
+ msecs_to_jiffies(12000)))
+ status = -1;
+ else
+ status = adapter->flash_status;
err:
- spin_unlock_bh(&adapter->mcc_lock);
return status;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index cce61f9a3714..763dc199e337 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -878,7 +878,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
- u32 *if_handle, u32 *pmac_id);
+ u32 *if_handle, u32 *pmac_id, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 51e1065e7897..200e98515909 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -276,8 +276,6 @@ be_get_ethtool_stats(struct net_device *netdev,
data[i] = (et_stats[i].size == sizeof(u64)) ?
*(u64 *)p: *(u32 *)p;
}
-
- return;
}
static void
@@ -466,7 +464,6 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
else
wol->wolopts = 0;
memset(&wol->sopass, 0, sizeof(wol->sopass));
- return;
}
static int
@@ -496,7 +493,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
&ddrdma_cmd.dma);
if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure \n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 2d4a4b827637..063026de4957 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,9 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/********** SRIOV VF PCICFG OFFSET ********/
+#define SRIOV_VF_PCICFG_OFFSET (4096)
+
/* Flashrom related descriptors */
#define IMAGE_TYPE_FIRMWARE 160
#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index ec6ace802256..54b14272f333 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -26,8 +26,11 @@ MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
static unsigned int rx_frag_size = 2048;
+static unsigned int num_vfs;
module_param(rx_frag_size, uint, S_IRUGO);
+module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -138,12 +141,19 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* MAC addr configuration will be done in hardware for VFs
+ * by their corresponding PFs. Just copy to netdev addr here
+ */
+ if (!be_physfn(adapter))
+ goto netdev_addr;
+
status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
if (status)
return status;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
adapter->if_handle, &adapter->pmac_id);
+netdev_addr:
if (!status)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -386,26 +396,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
+static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+ bool unmap_single)
+{
+ dma_addr_t dma;
+
+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
+
+ dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
+ if (wrb->frag_len) {
+ if (unmap_single)
+ pci_unmap_single(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, dma, wrb->frag_len,
+ PCI_DMA_TODEVICE);
+ }
+}
static int make_tx_wrbs(struct be_adapter *adapter,
struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
{
- u64 busaddr;
- u32 i, copied = 0;
+ dma_addr_t busaddr;
+ int i, copied = 0;
struct pci_dev *pdev = adapter->pdev;
struct sk_buff *first_skb = skb;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
struct be_eth_hdr_wrb *hdr;
+ bool map_single = false;
+ u16 map_head;
hdr = queue_head_node(txq);
- atomic_add(wrb_cnt, &txq->used);
queue_head_inc(txq);
+ map_head = txq->head;
if (skb->len > skb->data_len) {
- int len = skb->len - skb->data_len;
+ int len = skb_headlen(skb);
busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
+ map_single = true;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, len);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -419,6 +451,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
busaddr = pci_map_page(pdev, frag->page,
frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, busaddr))
+ goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, frag->size);
be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -438,6 +472,16 @@ static int make_tx_wrbs(struct be_adapter *adapter,
be_dws_cpu_to_le(hdr, sizeof(*hdr));
return copied;
+dma_err:
+ txq->head = map_head;
+ while (copied) {
+ wrb = queue_head_node(txq);
+ unmap_tx_frag(pdev, wrb, map_single);
+ map_single = false;
+ copied -= wrb->frag_len;
+ queue_head_inc(txq);
+ }
+ return 0;
}
static netdev_tx_t be_xmit(struct sk_buff *skb,
@@ -462,6 +506,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
* *BEFORE* ringing the tx doorbell, so that we serialze the
* tx compls of the current transmit which'll wake up the queue
*/
+ atomic_add(wrb_cnt, &txq->used);
if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
txq->len) {
netif_stop_queue(netdev);
@@ -541,6 +586,9 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 1;
adapter->vlans_added++;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
@@ -551,6 +599,9 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ if (!be_physfn(adapter))
+ return;
+
adapter->vlan_tag[vid] = 0;
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
adapter->vlans_added--;
@@ -588,6 +639,28 @@ done:
return;
}
+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int status;
+
+ if (!adapter->sriov_enabled)
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
+ adapter->vf_pmac_id[vf]);
+
+ status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
+ &adapter->vf_pmac_id[vf]);
+ if (!status)
+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
+ mac, vf);
+ return status;
+}
+
static void be_rx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -647,7 +720,7 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
- pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
+ pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
rx_page_info->last_page_user = false;
}
@@ -757,7 +830,6 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
- return;
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -791,7 +863,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- skb->dev = adapter->netdev;
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
@@ -812,8 +883,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
} else {
netif_receive_skb(skb);
}
-
- return;
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
@@ -893,7 +962,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
}
be_rx_stats_update(adapter, pkt_size, num_rcvd);
- return;
}
static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -959,7 +1027,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
}
page_offset = page_info->page_offset;
page_info->page = pagep;
- pci_unmap_addr_set(page_info, bus, page_dmaaddr);
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd = queue_head_node(rxq);
@@ -987,8 +1055,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
/* Let be_worker replenish when memory is available */
adapter->rx_post_starved = true;
}
-
- return;
}
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
@@ -1012,35 +1078,26 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
struct be_eth_wrb *wrb;
struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
struct sk_buff *sent_skb;
- u64 busaddr;
- u16 cur_index, num_wrbs = 0;
+ u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
+ bool unmap_skb_hdr = true;
- cur_index = txq->tail;
- sent_skb = sent_skbs[cur_index];
+ sent_skb = sent_skbs[txq->tail];
BUG_ON(!sent_skb);
- sent_skbs[cur_index] = NULL;
- wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_single(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
- num_wrbs++;
+ sent_skbs[txq->tail] = NULL;
+
+ /* skip header wrb */
queue_tail_inc(txq);
- while (cur_index != last_index) {
+ do {
cur_index = txq->tail;
wrb = queue_tail_node(txq);
- be_dws_le_to_cpu(wrb, sizeof(*wrb));
- busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
- if (busaddr != 0) {
- pci_unmap_page(adapter->pdev, busaddr,
- wrb->frag_len, PCI_DMA_TODEVICE);
- }
+ unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
+ skb_headlen(sent_skb)));
+ unmap_skb_hdr = false;
+
num_wrbs++;
queue_tail_inc(txq);
- }
+ } while (cur_index != last_index);
atomic_sub(num_wrbs, &txq->used);
@@ -1255,6 +1312,8 @@ static int be_tx_queues_create(struct be_adapter *adapter)
/* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
+ adapter->base_eq_id = adapter->tx_eq.q.id;
+
/* Alloc TX eth compl queue */
cq = &adapter->tx_obj.cq;
if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
@@ -1382,7 +1441,7 @@ rx_eq_free:
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
{
- return eq_id % 8;
+ return eq_id - adapter->base_eq_id;
}
static irqreturn_t be_intx(int irq, void *dev)
@@ -1557,7 +1616,27 @@ static void be_msix_enable(struct be_adapter *adapter)
BE_NUM_MSIX_VECTORS);
if (status == 0)
adapter->msix_enabled = true;
- return;
+}
+
+static void be_sriov_enable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ int status;
+ if (be_physfn(adapter) && num_vfs) {
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ adapter->sriov_enabled = status ? false : true;
+ }
+#endif
+}
+
+static void be_sriov_disable(struct be_adapter *adapter)
+{
+#ifdef CONFIG_PCI_IOV
+ if (adapter->sriov_enabled) {
+ pci_disable_sriov(adapter->pdev);
+ adapter->sriov_enabled = false;
+ }
+#endif
}
static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
@@ -1617,6 +1696,9 @@ static int be_irq_register(struct be_adapter *adapter)
status = be_msix_register(adapter);
if (status == 0)
goto done;
+ /* INTx is not supported for VF */
+ if (!be_physfn(adapter))
+ return status;
}
/* INTx */
@@ -1651,7 +1733,6 @@ static void be_irq_unregister(struct be_adapter *adapter)
be_free_irq(adapter, &adapter->rx_eq);
done:
adapter->isr_registered = false;
- return;
}
static int be_open(struct net_device *netdev)
@@ -1690,14 +1771,17 @@ static int be_open(struct net_device *netdev)
goto ret_sts;
be_link_status_update(adapter, link_up);
- status = be_vid_config(adapter);
+ if (be_physfn(adapter))
+ status = be_vid_config(adapter);
if (status)
goto ret_sts;
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- goto ret_sts;
+ if (be_physfn(adapter)) {
+ status = be_cmd_set_flow_control(adapter,
+ adapter->tx_fc, adapter->rx_fc);
+ if (status)
+ goto ret_sts;
+ }
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
ret_sts:
@@ -1723,7 +1807,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan \n");
+ "Could not enable Wake-on-lan\n");
pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
cmd.dma);
return status;
@@ -1745,22 +1829,48 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- u32 cap_flags, en_flags;
+ u32 cap_flags, en_flags, vf = 0;
int status;
+ u8 mac[ETH_ALEN];
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ if (be_physfn(adapter)) {
+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ }
status = be_cmd_if_create(adapter, cap_flags, en_flags,
netdev->dev_addr, false/* pmac_invalid */,
- &adapter->if_handle, &adapter->pmac_id);
+ &adapter->if_handle, &adapter->pmac_id, 0);
if (status != 0)
goto do_none;
+ if (be_physfn(adapter)) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
+ | BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ mac, true, &adapter->vf_if_handle[vf],
+ NULL, vf+1);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n", vf);
+ goto if_destroy;
+ }
+ vf++;
+ }
+ } else if (!be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
+
status = be_tx_queues_create(adapter);
if (status != 0)
goto if_destroy;
@@ -1782,6 +1892,9 @@ rx_qs_destroy:
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_if_handle[vf])
+ be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
be_cmd_if_destroy(adapter, adapter->if_handle);
do_none:
return status;
@@ -2061,6 +2174,7 @@ static struct net_device_ops be_netdev_ops = {
.ndo_vlan_rx_register = be_vlan_register,
.ndo_vlan_rx_add_vid = be_vlan_add_vid,
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
+ .ndo_set_vf_mac = be_set_vf_mac
};
static void be_netdev_init(struct net_device *netdev)
@@ -2102,37 +2216,48 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
iounmap(adapter->csr);
if (adapter->db)
iounmap(adapter->db);
- if (adapter->pcicfg)
+ if (adapter->pcicfg && be_physfn(adapter))
iounmap(adapter->pcicfg);
}
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
- int pcicfg_reg;
+ int pcicfg_reg, db_reg;
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
- pci_resource_len(adapter->pdev, 2));
- if (addr == NULL)
- return -ENOMEM;
- adapter->csr = addr;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
- 128 * 1024);
- if (addr == NULL)
- goto pci_map_err;
- adapter->db = addr;
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
+ pci_resource_len(adapter->pdev, 2));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->csr = addr;
+ }
- if (adapter->generation == BE_GEN2)
+ if (adapter->generation == BE_GEN2) {
pcicfg_reg = 1;
- else
+ db_reg = 4;
+ } else {
pcicfg_reg = 0;
-
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
- pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (be_physfn(adapter))
+ db_reg = 4;
+ else
+ db_reg = 0;
+ }
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
+ pci_resource_len(adapter->pdev, db_reg));
if (addr == NULL)
goto pci_map_err;
- adapter->pcicfg = addr;
+ adapter->db = addr;
+
+ if (be_physfn(adapter)) {
+ addr = ioremap_nocache(
+ pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
+ if (addr == NULL)
+ goto pci_map_err;
+ adapter->pcicfg = addr;
+ } else
+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
return 0;
pci_map_err:
@@ -2194,6 +2319,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
+ init_completion(&adapter->flash_compl);
pci_save_state(adapter->pdev);
return 0;
@@ -2246,6 +2372,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
+ be_sriov_disable(adapter);
+
be_msix_disable(adapter);
pci_set_drvdata(pdev, NULL);
@@ -2270,16 +2398,20 @@ static int be_get_config(struct be_adapter *adapter)
return status;
memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac,
+
+ if (be_physfn(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
- if (status)
- return status;
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
+ if (status)
+ return status;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
if (adapter->cap & 0x400)
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
@@ -2296,6 +2428,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
struct be_adapter *adapter;
struct net_device *netdev;
+
status = pci_enable_device(pdev);
if (status)
goto do_none;
@@ -2344,23 +2477,29 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
}
+ be_sriov_enable(adapter);
+
status = be_ctrl_init(adapter);
if (status)
goto free_netdev;
/* sync up with fw's ready state */
- status = be_cmd_POST(adapter);
- if (status)
- goto ctrl_clean;
+ if (be_physfn(adapter)) {
+ status = be_cmd_POST(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
goto ctrl_clean;
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
+ if (be_physfn(adapter)) {
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
+ }
status = be_stats_init(adapter);
if (status)
@@ -2391,6 +2530,7 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
be_msix_disable(adapter);
+ be_sriov_disable(adapter);
free_netdev(adapter->netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
@@ -2474,8 +2614,6 @@ static void be_shutdown(struct pci_dev *pdev)
be_setup_wol(adapter, true);
pci_disable_device(pdev);
-
- return;
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
@@ -2557,7 +2695,6 @@ static void be_eeh_resume(struct pci_dev *pdev)
return;
err:
dev_err(&adapter->pdev->dev, "EEH resume failed\n");
- return;
}
static struct pci_error_handlers be_eeh_handlers = {
@@ -2587,6 +2724,13 @@ static int __init be_init_module(void)
rx_frag_size = 2048;
}
+ if (num_vfs > 32) {
+ printk(KERN_WARNING DRV_NAME
+ " : Module param num_vfs must not be greater than 32."
+ "Using 32\n");
+ num_vfs = 32;
+ }
+
return pci_register_driver(&be_driver);
}
module_init(be_init_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 587f93cf03f6..368f33313fb6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
+#include <asm/div64.h>
#include <asm/dpmc.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
@@ -80,9 +81,6 @@ static u16 pin_req[] = P_RMII0;
static u16 pin_req[] = P_MII0;
#endif
-static void bfin_mac_disable(void);
-static void bfin_mac_enable(void);
-
static void desc_list_free(void)
{
struct net_dma_desc_rx *r;
@@ -202,6 +200,11 @@ static int desc_list_init(void)
goto init_error;
}
skb_reserve(new_skb, NET_IP_ALIGN);
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
r->skb = new_skb;
/*
@@ -254,7 +257,7 @@ init_error:
* MII operations
*/
/* Wait until the previous MDC/MDIO transaction has completed */
-static void bfin_mdio_poll(void)
+static int bfin_mdio_poll(void)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
@@ -264,22 +267,30 @@ static void bfin_mdio_poll(void)
if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME
": wait MDC/MDIO transaction to complete timeout\n");
- break;
+ return -ETIMEDOUT;
}
}
+
+ return 0;
}
/* Read an off-chip register in a PHY through the MDC/MDIO port */
static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
- bfin_mdio_poll();
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
/* read mode */
bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
SET_REGAD((u16) regnum) |
STABUSY);
- bfin_mdio_poll();
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
return (int) bfin_read_EMAC_STADAT();
}
@@ -288,7 +299,11 @@ static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
u16 value)
{
- bfin_mdio_poll();
+ int ret;
+
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
bfin_write_EMAC_STADAT((u32) value);
@@ -298,9 +313,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
STAOP |
STABUSY);
- bfin_mdio_poll();
-
- return 0;
+ return bfin_mdio_poll();
}
static int bfin_mdiobus_reset(struct mii_bus *bus)
@@ -458,6 +471,14 @@ static int mii_probe(struct net_device *dev)
* Ethtool support
*/
+/*
+ * interrupt routine for magic packet wakeup
+ */
+static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
static int
bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -492,11 +513,57 @@ static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
strcpy(info->bus_info, dev_name(&dev->dev));
}
+static void bfin_mac_ethtool_getwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+
+ wolinfo->supported = WAKE_MAGIC;
+ wolinfo->wolopts = lp->wol;
+}
+
+static int bfin_mac_ethtool_setwol(struct net_device *dev,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int rc;
+
+ if (wolinfo->wolopts & (WAKE_MAGICSECURE |
+ WAKE_UCAST |
+ WAKE_MCAST |
+ WAKE_BCAST |
+ WAKE_ARP))
+ return -EOPNOTSUPP;
+
+ lp->wol = wolinfo->wolopts;
+
+ if (lp->wol && !lp->irq_wake_requested) {
+ /* register wake irq handler */
+ rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
+ IRQF_DISABLED, "EMAC_WAKE", dev);
+ if (rc)
+ return rc;
+ lp->irq_wake_requested = true;
+ }
+
+ if (!lp->wol && lp->irq_wake_requested) {
+ free_irq(IRQ_MAC_WAKEDET, dev);
+ lp->irq_wake_requested = false;
+ }
+
+ /* Make sure the PHY driver doesn't suspend */
+ device_init_wakeup(&dev->dev, lp->wol);
+
+ return 0;
+}
+
static const struct ethtool_ops bfin_mac_ethtool_ops = {
.get_settings = bfin_mac_ethtool_getsettings,
.set_settings = bfin_mac_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
+ .get_wol = bfin_mac_ethtool_getwol,
+ .set_wol = bfin_mac_ethtool_setwol,
};
/**************************************************************************/
@@ -509,10 +576,11 @@ void setup_system_regs(struct net_device *dev)
* Configure checksum support and rcve frame word alignment
*/
sysctl = bfin_read_EMAC_SYSCTL();
+ sysctl |= RXDWA;
#if defined(BFIN_MAC_CSUM_OFFLOAD)
- sysctl |= RXDWA | RXCKS;
+ sysctl |= RXCKS;
#else
- sysctl |= RXDWA;
+ sysctl &= ~RXCKS;
#endif
bfin_write_EMAC_SYSCTL(sysctl);
@@ -551,6 +619,309 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
+#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
+
+static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u16 ptpctl;
+ u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ if ((config.tx_type != HWTSTAMP_TX_OFF) &&
+ (config.tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ ptpctl = bfin_read_EMAC_PTP_CTL();
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /*
+ * Dont allow any timestamping
+ */
+ ptpfv3 = 0xFFFFFFFF;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /*
+ * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
+ * to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register.
+ */
+ ptpfoff = 0x4A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * The default value (0xFFFC) allows the timestamping of both
+ * received Sync messages and Delay_Req messages.
+ */
+ ptpfv3 = 0xFFFFFFFC;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Clear all five comparison mask bits (bits[12:8]) in the
+ * EMAC_PTP_CTL register to enable all the field matches.
+ */
+ ptpctl &= ~0x1F00;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of the EMAC_PTP_FOFF register, except set
+ * the PTPCOF field to 0x2A.
+ */
+ ptpfoff = 0x2A24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
+ * registers.
+ */
+ ptpfv1 = 0x11040800;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
+ * the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /*
+ * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
+ * EFTM and PTPCM field comparison.
+ */
+ ptpctl &= ~0x1100;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+ /*
+ * Keep the default values of all the fields of the EMAC_PTP_FOFF
+ * register, except set the PTPCOF field to 0x0E.
+ */
+ ptpfoff = 0x0E24170C;
+ bfin_write_EMAC_PTP_FOFF(ptpfoff);
+ /*
+ * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
+ * corresponds to PTP messages on the MAC layer.
+ */
+ ptpfv1 = 0x110488F7;
+ bfin_write_EMAC_PTP_FV1(ptpfv1);
+ ptpfv2 = 0x0140013F;
+ bfin_write_EMAC_PTP_FV2(ptpfv2);
+ /*
+ * To allow the timestamping of Pdelay_Req and Pdelay_Resp
+ * messages, set the value to 0xFFF0.
+ */
+ ptpfv3 = 0xFFFFFFF0;
+ bfin_write_EMAC_PTP_FV3(ptpfv3);
+
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (config.tx_type == HWTSTAMP_TX_OFF &&
+ bfin_mac_hwtstamp_is_none(config.rx_filter)) {
+ ptpctl &= ~PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ SSYNC();
+ } else {
+ ptpctl |= PTP_EN;
+ bfin_write_EMAC_PTP_CTL(ptpctl);
+
+ /*
+ * clear any existing timestamp
+ */
+ bfin_read_EMAC_PTP_RXSNAPLO();
+ bfin_read_EMAC_PTP_RXSNAPHI();
+
+ bfin_read_EMAC_PTP_TXSNAPLO();
+ bfin_read_EMAC_PTP_TXSNAPHI();
+
+ /*
+ * Set registers so that rollover occurs soon to test this.
+ */
+ bfin_write_EMAC_PTP_TIMELO(0x00000000);
+ bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
+
+ SSYNC();
+
+ lp->compare.last_update = 0;
+ timecounter_init(&lp->clock,
+ &lp->cycles,
+ ktime_to_ns(ktime_get_real()));
+ timecompare_update(&lp->compare, 0);
+ }
+
+ lp->stamp_cfg = config;
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
+{
+ ktime_t sys = ktime_get_real();
+
+ pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
+ __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
+ sys.tv.nsec, cmp->offset, cmp->skew);
+}
+
+static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ union skb_shared_tx *shtx = skb_tx(skb);
+
+ if (shtx->hardware) {
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ /* When doing time stamping, keep the connection to the socket
+ * a while longer
+ */
+ shtx->in_progress = 1;
+
+ /*
+ * The timestamping is done at the EMAC module's MII/RMII interface
+ * when the module sees the Start of Frame of an event message packet. This
+ * interface is the closest possible place to the physical Ethernet transmission
+ * medium, providing the best timing accuracy.
+ */
+ while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
+ udelay(1);
+ if (timeout_cnt == 0)
+ printk(KERN_ERR DRV_NAME
+ ": fails to timestamp the TX packet\n");
+ else {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 ns;
+ u64 regval;
+
+ regval = bfin_read_EMAC_PTP_TXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ns = timecounter_cyc2time(&lp->clock,
+ regval);
+ timecompare_update(&lp->compare, ns);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ shhwtstamps.syststamp =
+ timecompare_transform(&lp->compare, ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
+ }
+ }
+}
+
+static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u32 valid;
+ u64 regval, ns;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
+ return;
+
+ valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
+ if (!valid)
+ return;
+
+ shhwtstamps = skb_hwtstamps(skb);
+
+ regval = bfin_read_EMAC_PTP_RXSNAPLO();
+ regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
+ ns = timecounter_cyc2time(&lp->clock, regval);
+ timecompare_update(&lp->compare, ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+
+ bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
+}
+
+/*
+ * bfin_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t bfin_read_clock(const struct cyclecounter *tc)
+{
+ u64 stamp;
+
+ stamp = bfin_read_EMAC_PTP_TIMELO();
+ stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
+
+ return stamp;
+}
+
+#define PTP_CLK 25000000
+
+static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+ u64 append;
+
+ /* Initialize hardware timer */
+ append = PTP_CLK * (1ULL << 32);
+ do_div(append, get_sclk());
+ bfin_write_EMAC_PTP_ADDEND((u32)append);
+
+ memset(&lp->cycles, 0, sizeof(lp->cycles));
+ lp->cycles.read = bfin_read_clock;
+ lp->cycles.mask = CLOCKSOURCE_MASK(64);
+ lp->cycles.mult = 1000000000 / PTP_CLK;
+ lp->cycles.shift = 0;
+
+ /* Synchronize our NIC clock against system wall clock */
+ memset(&lp->compare, 0, sizeof(lp->compare));
+ lp->compare.source = &lp->clock;
+ lp->compare.target = ktime_get_real;
+ lp->compare.num_samples = 10;
+
+ /* Initialize hwstamp config */
+ lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+}
+
+#else
+# define bfin_mac_hwtstamp_is_none(cfg) 0
+# define bfin_mac_hwtstamp_init(dev)
+# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
+# define bfin_rx_hwtstamp(dev, skb)
+# define bfin_tx_hwtstamp(dev, skb)
+#endif
+
static void adjust_tx_list(void)
{
int timeout_cnt = MAX_TIMEOUT_CNT;
@@ -608,18 +979,32 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
{
u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3;
+ union skb_shared_tx *shtx = skb_tx(skb);
+
current_tx_ptr->skb = skb;
if (data_align == 0x2) {
/* move skb->data to current_tx_ptr payload */
data = (u16 *)(skb->data) - 1;
- *data = (u16)(skb->len);
+ *data = (u16)(skb->len);
+ /*
+ * When transmitting an Ethernet packet, the PTP_TSYNC module requires
+ * a DMA_Length_Word field associated with the packet. The lower 12 bits
+ * of this field are the length of the packet payload in bytes and the higher
+ * 4 bits are the timestamping enable field.
+ */
+ if (shtx->hardware)
+ *data |= 0x1000;
+
current_tx_ptr->desc_a.start_addr = (u32)data;
/* this is important! */
blackfin_dcache_flush_range((u32)data,
(u32)((u8 *)data + skb->len + 4));
} else {
*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
+ /* enable timestamping for the sent packet */
+ if (shtx->hardware)
+ *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
skb->len);
current_tx_ptr->desc_a.start_addr =
@@ -653,20 +1038,42 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
out:
adjust_tx_list();
+
+ bfin_tx_hwtstamp(dev, skb);
+
current_tx_ptr = current_tx_ptr->next;
- dev->trans_start = jiffies;
dev->stats.tx_packets++;
dev->stats.tx_bytes += (skb->len);
return NETDEV_TX_OK;
}
+#define IP_HEADER_OFF 0
+#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
+ RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
+
static void bfin_mac_rx(struct net_device *dev)
{
struct sk_buff *skb, *new_skb;
unsigned short len;
+ struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ unsigned int i;
+ unsigned char fcs[ETH_FCS_LEN + 1];
+#endif
+
+ /* check if frame status word reports an error condition
+ * we which case we simply drop the packet
+ */
+ if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
+ printk(KERN_NOTICE DRV_NAME
+ ": rx: receive error - packet dropped\n");
+ dev->stats.rx_dropped++;
+ goto out;
+ }
/* allocate a new skb for next time receive */
skb = current_rx_ptr->skb;
+
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
printk(KERN_NOTICE DRV_NAME
@@ -676,34 +1083,59 @@ static void bfin_mac_rx(struct net_device *dev)
}
/* reserve 2 bytes for RXDWA padding */
skb_reserve(new_skb, NET_IP_ALIGN);
- current_rx_ptr->skb = new_skb;
- current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
-
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
*/
blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
(unsigned long)new_skb->end);
+ current_rx_ptr->skb = new_skb;
+ current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+
len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+ /* Deduce Ethernet FCS length from Ethernet payload length */
+ len -= ETH_FCS_LEN;
skb_put(skb, len);
- blackfin_dcache_invalidate_range((unsigned long)skb->head,
- (unsigned long)skb->tail);
skb->protocol = eth_type_trans(skb, dev);
+
+ bfin_rx_hwtstamp(dev, skb);
+
#if defined(BFIN_MAC_CSUM_OFFLOAD)
- skb->csum = current_rx_ptr->status.ip_payload_csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
+ /* Checksum offloading only works for IPv4 packets with the standard IP header
+ * length of 20 bytes, because the blackfin MAC checksum calculation is
+ * based on that assumption. We must NOT use the calculated checksum if our
+ * IP version or header break that assumption.
+ */
+ if (skb->data[IP_HEADER_OFF] == 0x45) {
+ skb->csum = current_rx_ptr->status.ip_payload_csum;
+ /*
+ * Deduce Ethernet FCS from hardware generated IP payload checksum.
+ * IP checksum is based on 16-bit one's complement algorithm.
+ * To deduce a value from checksum is equal to add its inversion.
+ * If the IP payload len is odd, the inversed FCS should also
+ * begin from odd address and leave first byte zero.
+ */
+ if (skb->len % 2) {
+ fcs[0] = 0;
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i + 1] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
+ } else {
+ for (i = 0; i < ETH_FCS_LEN; i++)
+ fcs[i] = ~skb->data[skb->len + i];
+ skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
+ }
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
#endif
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
+out:
current_rx_ptr->status.status_word = 0x00000000;
current_rx_ptr = current_rx_ptr->next;
-
-out:
- return;
}
/* interrupt routine to handle rx and error signal */
@@ -755,8 +1187,9 @@ static void bfin_mac_disable(void)
/*
* Enable Interrupts, Receive, and Transmit
*/
-static void bfin_mac_enable(void)
+static int bfin_mac_enable(void)
{
+ int ret;
u32 opmode;
pr_debug("%s: %s\n", DRV_NAME, __func__);
@@ -766,7 +1199,9 @@ static void bfin_mac_enable(void)
bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
/* Wait MII done */
- bfin_mdio_poll();
+ ret = bfin_mdio_poll();
+ if (ret)
+ return ret;
/* We enable only RX here */
/* ASTP : Enable Automatic Pad Stripping
@@ -790,6 +1225,8 @@ static void bfin_mac_enable(void)
#endif
/* Turn on the EMAC rx */
bfin_write_EMAC_OPMODE(opmode);
+
+ return 0;
}
/* Our watchdog timed out. Called by the networking layer */
@@ -805,21 +1242,21 @@ static void bfin_mac_timeout(struct net_device *dev)
bfin_mac_enable();
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
emac_hashhi = emac_hashlo = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* skip non-multicast addresses */
if (!(*addrs & 1))
@@ -836,8 +1273,6 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
bfin_write_EMAC_HASHHI(emac_hashhi);
bfin_write_EMAC_HASHLO(emac_hashlo);
-
- return;
}
/*
@@ -853,7 +1288,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
sysctl = bfin_read_EMAC_OPMODE();
- sysctl |= RAF;
+ sysctl |= PR;
bfin_write_EMAC_OPMODE(sysctl);
} else if (dev->flags & IFF_ALLMULTI) {
/* accept all multicast */
@@ -874,6 +1309,16 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
}
}
+static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
/*
* this puts the device in an inactive state
*/
@@ -894,7 +1339,7 @@ static void bfin_mac_shutdown(struct net_device *dev)
static int bfin_mac_open(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
- int retval;
+ int ret;
pr_debug("%s: %s\n", dev->name, __func__);
/*
@@ -908,18 +1353,21 @@ static int bfin_mac_open(struct net_device *dev)
}
/* initial rx and tx list */
- retval = desc_list_init();
-
- if (retval)
- return retval;
+ ret = desc_list_init();
+ if (ret)
+ return ret;
phy_start(lp->phydev);
phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
+
bfin_mac_disable();
- bfin_mac_enable();
+ ret = bfin_mac_enable();
+ if (ret)
+ return ret;
pr_debug("hardware init finished\n");
+
netif_start_queue(dev);
netif_carrier_on(dev);
@@ -958,6 +1406,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
.ndo_set_mac_address = bfin_mac_set_mac_address,
.ndo_tx_timeout = bfin_mac_timeout,
.ndo_set_multicast_list = bfin_mac_set_multicast_list,
+ .ndo_do_ioctl = bfin_mac_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1017,6 +1466,11 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
}
pd = pdev->dev.platform_data;
lp->mii_bus = platform_get_drvdata(pd);
+ if (!lp->mii_bus) {
+ dev_err(&pdev->dev, "Cannot get mii_bus!\n");
+ rc = -ENODEV;
+ goto out_err_mii_bus_probe;
+ }
lp->mii_bus->priv = ndev;
rc = mii_probe(ndev);
@@ -1049,6 +1503,8 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_reg_ndev;
}
+ bfin_mac_hwtstamp_init(ndev);
+
/* now, print out the card info, in a short format.. */
dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
@@ -1060,6 +1516,7 @@ out_err_request_irq:
out_err_mii_probe:
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
+out_err_mii_bus_probe:
peripheral_free_list(pin_req);
out_err_probe_mac:
platform_set_drvdata(pdev, NULL);
@@ -1092,9 +1549,16 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
- if (netif_running(net_dev))
- bfin_mac_close(net_dev);
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
+ bfin_write_EMAC_WKUP_CTL(MPKE);
+ enable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_close(net_dev);
+ }
return 0;
}
@@ -1102,9 +1566,16 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
static int bfin_mac_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
+ struct bfin_mac_local *lp = netdev_priv(net_dev);
- if (netif_running(net_dev))
- bfin_mac_open(net_dev);
+ if (lp->wol) {
+ bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+ bfin_write_EMAC_WKUP_CTL(0);
+ disable_irq_wake(IRQ_MAC_WAKEDET);
+ } else {
+ if (netif_running(net_dev))
+ bfin_mac_open(net_dev);
+ }
return 0;
}
@@ -1155,6 +1626,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
return 0;
out_err_mdiobus_register:
+ kfree(miibus->irq);
mdiobus_free(miibus);
out_err_alloc:
peripheral_free_list(pin_req);
@@ -1167,6 +1639,7 @@ static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
struct mii_bus *miibus = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
mdiobus_unregister(miibus);
+ kfree(miibus->irq);
mdiobus_free(miibus);
peripheral_free_list(pin_req);
return 0;
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 052b5dce3e3c..1ae7b82ceeee 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -7,6 +7,12 @@
*
* Licensed under the GPL-2 or later.
*/
+#ifndef _BFIN_MAC_H_
+#define _BFIN_MAC_H_
+
+#include <linux/net_tstamp.h>
+#include <linux/clocksource.h>
+#include <linux/timecompare.h>
#define BFIN_MAC_CSUM_OFFLOAD
@@ -60,6 +66,9 @@ struct bfin_mac_local {
unsigned char Mac[6]; /* MAC address of the board */
spinlock_t lock;
+ int wol; /* Wake On Lan */
+ int irq_wake_requested;
+
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
int old_speed;
@@ -67,6 +76,15 @@ struct bfin_mac_local {
struct phy_device *phydev;
struct mii_bus *mii_bus;
+
+#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct timecompare compare;
+ struct hwtstamp_config stamp_cfg;
+#endif
};
extern void bfin_get_ether_addr(char *addr);
+
+#endif
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 598b007f1991..39250b2ca886 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -167,7 +167,6 @@ static inline void
dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
{
__asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
- return;
}
static inline unsigned long
@@ -382,8 +381,6 @@ bmac_init_registers(struct net_device *dev)
bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
bmwrite(dev, INTDISABLE, EnableNormal);
-
- return;
}
#if 0
@@ -972,7 +969,7 @@ bmac_remove_multi(struct net_device *dev,
*/
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct bmac_data *bp = netdev_priv(dev);
int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
@@ -1001,8 +998,8 @@ static void bmac_set_multicast(struct net_device *dev)
rx_cfg = bmac_rx_on(dev, 0, 0);
XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
} else {
- netdev_for_each_mc_addr(dmi, dev)
- bmac_addhash(bp, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ bmac_addhash(bp, ha->addr);
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
@@ -1016,7 +1013,7 @@ static void bmac_set_multicast(struct net_device *dev)
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
unsigned short rx_cfg;
@@ -1040,8 +1037,8 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if(!(*addrs & 1))
continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ac90a3828f69..188e356c30a3 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,11 +58,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.9"
-#define DRV_MODULE_RELDATE "April 27, 2010"
+#define DRV_MODULE_VERSION "2.0.15"
+#define DRV_MODULE_RELDATE "May 4, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
@@ -656,19 +656,11 @@ bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
if (stop_cnic)
bnx2_cnic_stop(bp);
if (netif_running(bp->dev)) {
- int i;
-
bnx2_napi_disable(bp);
netif_tx_disable(bp->dev);
- /* prevent tx timeout */
- for (i = 0; i < bp->dev->num_tx_queues; i++) {
- struct netdev_queue *txq;
-
- txq = netdev_get_tx_queue(bp->dev, i);
- txq->trans_start = jiffies;
- }
}
bnx2_disable_int_sync(bp);
+ netif_carrier_off(bp->dev); /* prevent tx timeout */
}
static void
@@ -677,6 +669,10 @@ bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_tx_wake_all_queues(bp->dev);
+ spin_lock_bh(&bp->phy_lock);
+ if (bp->link_up)
+ netif_carrier_on(bp->dev);
+ spin_unlock_bh(&bp->phy_lock);
bnx2_napi_enable(bp);
bnx2_enable_int(bp);
if (start_cnic)
@@ -2672,7 +2668,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_pg->page = page;
- pci_unmap_addr_set(rx_pg, mapping, mapping);
+ dma_unmap_addr_set(rx_pg, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
return 0;
@@ -2687,7 +2683,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
+ pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
PCI_DMA_FROMDEVICE);
__free_page(page);
@@ -2719,7 +2715,8 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ rx_buf->desc = (struct l2_fhdr *) skb->data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2818,7 +2815,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
}
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
tx_buf->skb = NULL;
@@ -2828,7 +2825,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
pci_unmap_page(bp->pdev,
- pci_unmap_addr(
+ dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
@@ -2910,8 +2907,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
if (prod != cons) {
prod_rx_pg->page = cons_rx_pg->page;
cons_rx_pg->page = NULL;
- pci_unmap_addr_set(prod_rx_pg, mapping,
- pci_unmap_addr(cons_rx_pg, mapping));
+ dma_unmap_addr_set(prod_rx_pg, mapping,
+ dma_unmap_addr(cons_rx_pg, mapping));
prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2935,18 +2932,19 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_rx_buf = &rxr->rx_buf_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
+ dma_unmap_addr(cons_rx_buf, mapping),
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
rxr->rx_prod_bseq += bp->rx_buf_use_size;
prod_rx_buf->skb = skb;
+ prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
if (cons == prod)
return;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3019,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
/* Don't unmap yet. If we're unable to allocate a new
* page, we need to recycle the page and the DMA addr.
*/
- mapping_old = pci_unmap_addr(rx_pg, mapping);
+ mapping_old = dma_unmap_addr(rx_pg, mapping);
if (i == pages - 1)
frag_len -= 4;
@@ -3074,6 +3072,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0, pg_ring_used = 0;
+ struct pci_dev *pdev = bp->pdev;
hw_cons = bnx2_get_hw_rx_cons(bnapi);
sw_cons = rxr->rx_cons;
@@ -3086,7 +3085,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
while (sw_cons != hw_cons) {
unsigned int len, hdr_len;
u32 status;
- struct sw_bd *rx_buf;
+ struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
u16 vtag = 0;
@@ -3097,16 +3096,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
+ prefetchw(skb);
+ if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
+ next_rx_buf =
+ &rxr->rx_buf_ring[
+ RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(next_rx_buf->desc);
+ }
rx_buf->skb = NULL;
- dma_addr = pci_unmap_addr(rx_buf, mapping);
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = (struct l2_fhdr *) skb->data;
+ rx_hdr = rx_buf->desc;
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3207,10 +3213,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
#ifdef BCM_VLAN
if (hw_vlan)
- vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
+ vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&bnapi->napi, skb);
rx_pkt++;
@@ -3548,7 +3554,6 @@ bnx2_set_rx_mode(struct net_device *dev)
}
else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
u32 mc_filter[NUM_MC_HASH_REGISTERS];
u32 regidx;
u32 bit;
@@ -3556,8 +3561,8 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & 0xff;
regidx = (bit & 0xe0) >> 5;
bit &= 0x1f;
@@ -5318,7 +5323,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
pci_unmap_single(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -5329,7 +5334,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
for (k = 0; k < last; k++, j++) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
pci_unmap_page(bp->pdev,
- pci_unmap_addr(tx_buf, mapping),
+ dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
}
@@ -5359,7 +5364,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
continue;
pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
@@ -5765,11 +5770,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
rx_skb = rx_buf->skb;
- rx_hdr = (struct l2_fhdr *) rx_skb->data;
+ rx_hdr = rx_buf->desc;
skb_reserve(rx_skb, BNX2_RX_OFFSET);
pci_dma_sync_single_for_cpu(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
@@ -6292,14 +6297,23 @@ static void
bnx2_dump_state(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
+ u32 mcp_p0, mcp_p1;
netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
- netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
+ netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
REG_RD(bp, BNX2_EMAC_TX_STATUS),
+ REG_RD(bp, BNX2_EMAC_RX_STATUS));
+ netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ mcp_p0 = BNX2_MCP_STATE_P0;
+ mcp_p1 = BNX2_MCP_STATE_P1;
+ } else {
+ mcp_p0 = BNX2_MCP_STATE_P0_5708;
+ mcp_p1 = BNX2_MCP_STATE_P1_5708;
+ }
netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
+ bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6429,7 +6443,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = skb;
- pci_unmap_addr_set(tx_buf, mapping, mapping);
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd = &txr->tx_desc_ring[ring_prod];
@@ -6454,7 +6468,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(bp->pdev, mapping))
goto dma_error;
- pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+ dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
mapping);
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6491,7 +6505,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
tx_buf->skb = NULL;
- pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
/* unmap remaining mapped pages */
@@ -6499,7 +6513,7 @@ dma_error:
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
- pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
+ pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
@@ -8297,7 +8311,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(dev->dev_addr, bp->mac_addr, 6);
memcpy(dev->perm_addr, bp->mac_addr, 6);
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
dev->features |= NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index cd4b0e4637ab..ddaa3fc99876 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6347,6 +6347,8 @@ struct l2_fhdr {
#define BNX2_MCP_SCRATCH 0x00160000
#define BNX2_MCP_STATE_P1 0x0016f9c8
#define BNX2_MCP_STATE_P0 0x0016fdc8
+#define BNX2_MCP_STATE_P1_5708 0x001699c8
+#define BNX2_MCP_STATE_P0_5708 0x00169dc8
#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH
#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000
@@ -6551,17 +6553,18 @@ struct l2_fhdr {
struct sw_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ struct l2_fhdr *desc;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_pg {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
unsigned short is_gso;
unsigned short nr_frags;
};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a68308..8bd23687c530 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,16 +24,25 @@
#define BCM_VLAN 1
#endif
+#define BNX2X_MULTI_QUEUE
+
+#define BNX2X_NEW_NAPI
+
+
+
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
-#define BNX2X_MULTI_QUEUE
-
-#define BNX2X_NEW_NAPI
-
+#ifdef BCM_CNIC
+#define BNX2X_MIN_MSIX_VEC_CNT 3
+#define BNX2X_MSIX_VEC_FP_START 2
+#else
+#define BNX2X_MIN_MSIX_VEC_CNT 2
+#define BNX2X_MSIX_VEC_FP_START 1
+#endif
#include <linux/mdio.h>
#include "bnx2x_reg.h"
@@ -83,7 +92,12 @@ do { \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
##__args); \
-} while (0)
+ } while (0)
+
+#define BNX2X_ERROR(__fmt, __args...) do { \
+ pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
+ } while (0)
+
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(__fmt, __args...) \
@@ -155,15 +169,21 @@ do { \
#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
+#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
+
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
/* fast path */
struct sw_rx_bd {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct sw_tx_bd {
@@ -176,7 +196,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
union db_prod {
@@ -261,7 +281,7 @@ struct bnx2x_eth_q_stats {
u32 hw_csum_err;
};
-#define BNX2X_NUM_Q_STATS 11
+#define BNX2X_NUM_Q_STATS 13
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
@@ -767,7 +787,7 @@ struct bnx2x_eth_stats {
u32 nig_timer_max;
};
-#define BNX2X_NUM_STATS 41
+#define BNX2X_NUM_STATS 43
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
@@ -818,6 +838,12 @@ struct attn_route {
u32 sig[4];
};
+typedef enum {
+ BNX2X_RECOVERY_DONE,
+ BNX2X_RECOVERY_INIT,
+ BNX2X_RECOVERY_WAIT,
+} bnx2x_recovery_state_t;
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -835,6 +861,9 @@ struct bnx2x {
struct pci_dev *pdev;
atomic_t intr_sem;
+
+ bnx2x_recovery_state_t recovery_state;
+ int is_leader;
#ifdef BCM_CNIC
struct msix_entry msix_table[MAX_CONTEXT+2];
#else
@@ -842,7 +871,6 @@ struct bnx2x {
#endif
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-#define INT_MODE_MSIX 3
int tx_ring_size;
@@ -924,8 +952,7 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
- struct work_struct reset_task;
-
+ struct delayed_work reset_task;
struct timer_list timer;
int current_interval;
@@ -961,6 +988,8 @@ struct bnx2x {
u16 rx_quick_cons_trip;
u16 rx_ticks_int;
u16 rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
u32 lin_cnt;
@@ -1075,6 +1104,7 @@ struct bnx2x {
#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+ char fw_ver[32];
const struct firmware *firmware;
};
@@ -1125,6 +1155,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_DIAG 2
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
+#define UNLOAD_RECOVERY 2
/* DMAE command defines */
@@ -1152,7 +1183,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
#define DMAE_LEN32_RD_MAX 0x80
-#define DMAE_LEN32_WR_MAX 0x400
+#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0xe0d0d0ae
@@ -1294,8 +1325,12 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
-#define MULTI_FLAGS(bp) \
+#define RSS_FLAGS(bp) \
(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
@@ -1333,6 +1368,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
+#define BNX2X_VPD_LEN 128
+#define VENDOR_ID_LEN 4
+
/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 760069345b11..fd1f29e0317d 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -683,7 +683,7 @@ struct drv_func_mb {
#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
/*
- * The optic module verification commands requris bootcode
+ * The optic module verification commands require bootcode
* v5.0.6 or later
*/
#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 32e79c359e89..ff70be898765 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1594,7 +1594,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
pause_result);
bnx2x_pause_resolve(vars, pause_result);
if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
@@ -1616,7 +1616,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
bnx2x_pause_resolve(vars, pause_result);
- DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
+ DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
pause_result);
}
}
@@ -1974,7 +1974,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
}
}
- DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n",
+ DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
gp_status, vars->phy_link_up, vars->line_speed);
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
" autoneg 0x%x\n",
@@ -3852,7 +3852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_AN_REG_ADV, 0x20);
@@ -4234,14 +4234,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
ext_phy_addr,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x \n", tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
} else if ((params->req_line_speed ==
SPEED_AUTO_NEG) &&
((params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
- DP(NETIF_MSG_LINK, "Setting 1G clause37 \n");
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
bnx2x_cl45_write(bp, params->port, ext_phy_type,
ext_phy_addr, MDIO_AN_DEVAD,
MDIO_PMA_REG_8727_MISC_CTRL, 0);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6cc..57ff5b3bcce6 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
-#define DRV_MODULE_VERSION "1.52.1-7"
-#define DRV_MODULE_RELDATE "2010/02/28"
+#define DRV_MODULE_VERSION "1.52.53-1"
+#define DRV_MODULE_RELDATE "2010/18/04"
#define BNX2X_BC_VER 0x040200
#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
module_param(int_mode, int, 0);
-MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
+ "(1 INT#x; 2 MSI)");
static int dropless_fc;
module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
{
+ int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
- while (len > DMAE_LEN32_WR_MAX) {
+ while (len > dmae_wr_max) {
bnx2x_write_dmae(bp, phys_addr + offset,
- addr + offset, DMAE_LEN32_WR_MAX);
- offset += DMAE_LEN32_WR_MAX * 4;
- len -= DMAE_LEN32_WR_MAX;
+ addr + offset, dmae_wr_max);
+ offset += dmae_wr_max * 4;
+ len -= dmae_wr_max;
}
bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
static void bnx2x_fw_dump(struct bnx2x *bp)
{
+ u32 addr;
u32 mark, offset;
__be32 data[9];
int word;
- mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
- mark = ((mark + 0x3) & ~0x3);
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERR("NO MCP - can not dump\n");
+ return;
+ }
+
+ addr = bp->common.shmem_base - 0x0800 + 4;
+ mark = REG_RD(bp, addr);
+ mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
pr_err("begin fw dump (mark 0x%x)\n", mark);
pr_err("");
- for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
+ for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
- for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
+ for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
- offset + 4*word));
+ data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
- BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
- " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
- " spq_prod_idx(%u)\n",
+ BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
+ " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
+ " spq_prod_idx(0x%x)\n",
bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
- " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
- " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
+ " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
+ " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
- BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
- " fp_u_idx(%x) *sb_u_idx(%x)\n",
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
+ " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
le16_to_cpu(fp->fp_u_idx),
fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
- " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
- BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
- " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
+ BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
+ " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
fp->status_blk->c_status_block.status_block_index,
fp->tx_db.data.prod);
}
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
* General service functions
*/
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+ u32 lock_status;
+ u32 resource_bit = (1 << resource);
+ int func = BP_FUNC(bp);
+ u32 hw_lock_control_reg;
+
+ DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+ /* Validating that the resource is within range */
+ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+ DP(NETIF_MSG_HW,
+ "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ resource, HW_LOCK_MAX_RESOURCE_VALUE);
+ return -EINVAL;
+ }
+
+ if (func <= 5)
+ hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+ else
+ hw_lock_control_reg =
+ (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+ /* Try to acquire the lock */
+ REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+ lock_status = REG_RD(bp, hw_lock_control_reg);
+ if (lock_status & resource_bit)
+ return true;
+
+ DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ return false;
+}
+
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
u8 storm, u16 index, u8 op, u8 update)
{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* unmap first bd */
DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
- pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
- pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
- BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
+ dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
if (--nbd)
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
default:
BNX2X_ERR("unexpected MC reply (%d) "
- "fp->state is %x\n", command, fp->state);
+ "fp[%d] state is %x\n",
+ command, fp->index, fp->state);
break;
}
mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
+ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
if (unlikely(page == NULL))
return -ENOMEM;
- mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, page, 0,
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
return -ENOMEM;
}
sw_buf->page = page;
- pci_unmap_addr_set(sw_buf, mapping, mapping);
+ dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
if (unlikely(skb == NULL))
return -ENOMEM;
- mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
return -ENOMEM;
}
rx_buf->skb = skb;
- pci_unmap_addr_set(rx_buf, mapping, mapping);
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(cons_rx_buf, mapping),
- RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(cons_rx_buf, mapping),
+ RX_COPY_THRESH, DMA_FROM_DEVICE);
prod_rx_buf->skb = cons_rx_buf->skb;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
*prod_bd = *cons_bd;
}
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
- mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
+ mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
+ bp->rx_buf_size, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
-#ifdef __powerpc64__
+#ifdef _ASM_GENERIC_INT_L64_H
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
max(frag_size, (u32)len_on_bd));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages >
- min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* Unmap the page as we r going to pass it to the stack */
- pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
- pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
#ifdef BCM_VLAN
if ((bp->vlgrp != NULL) && is_vlan_cqe &&
(!is_not_hwaccel_vlan_cqe))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.
- vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.
+ vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
} else {
DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
" - dropping packet!\n");
@@ -1539,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct sw_rx_bd *rx_buf = NULL;
struct sk_buff *skb;
union eth_rx_cqe *cqe;
- u8 cqe_fp_flags;
+ u8 cqe_fp_flags, cqe_fp_status_flags;
u16 len, pad;
comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1555,6 +1598,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1573,7 +1617,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
rx_buf = &fp->rx_buf_ring[bd_cons];
skb = rx_buf->skb;
prefetch(skb);
- prefetch((u8 *)skb + 256);
len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
pad = cqe->fast_path_cqe.placement_offset;
@@ -1620,11 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
}
}
- pci_dma_sync_single_for_device(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- PCI_DMA_FROMDEVICE);
- prefetch(skb);
+ dma_sync_single_for_device(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
prefetch(((char *)(skb)) + 128);
/* is this an error packet? */
@@ -1665,10 +1707,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
} else
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -1684,6 +1726,12 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe_fp_status_flags &
+ ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ skb->rxhash = le32_to_cpu(
+ cqe->fast_path_cqe.rss_hash_result);
+
skb->ip_summed = CHECKSUM_NONE;
if (bp->rx_csum) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
@@ -1699,11 +1747,11 @@ reuse_rx:
if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
PARSING_FLAGS_VLAN))
- vlan_hwaccel_receive_skb(skb, bp->vlgrp,
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+ vlan_gro_receive(&fp->napi, bp->vlgrp,
+ le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
else
#endif
- netif_receive_skb(skb);
+ napi_gro_receive(&fp->napi, skb);
next_rx:
@@ -1831,8 +1879,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
}
- if (status)
- DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
status);
return IRQ_HANDLED;
@@ -1900,6 +1948,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
+ DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
DP(NETIF_MSG_HW,
@@ -2254,11 +2304,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
static u8 bnx2x_link_test(struct bnx2x *bp)
{
- u8 rc;
+ u8 rc = 0;
- bnx2x_acquire_phy_lock(bp);
- rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
- bnx2x_release_phy_lock(bp);
+ if (!BP_NOMCP(bp)) {
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
+ bnx2x_release_phy_lock(bp);
+ } else
+ BNX2X_ERR("Bootcode is missing - can not test link\n");
return rc;
}
@@ -2387,10 +2440,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
than zero */
m_fair_vn.vn_credit_delta =
- max((u32)(vn_min_rate * (T_FAIR_COEF /
- (8 * bp->vn_weight_sum))),
- (u32)(bp->cmng.fair_vars.fair_threshold * 2));
- DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
+ max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+ (8 * bp->vn_weight_sum))),
+ (bp->cmng.fair_vars.fair_threshold * 2));
+ DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2410,6 +2463,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
+ u32 prev_link_status = bp->link_vars.link_status;
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -2442,8 +2496,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- /* indicate link status */
- bnx2x_link_report(bp);
+ /* indicate link status only if link status actually changed */
+ if (prev_link_status != bp->link_vars.link_status)
+ bnx2x_link_report(bp);
if (IS_E1HMF(bp)) {
int port = BP_PORT(bp);
@@ -2560,7 +2615,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
return rc;
}
-static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
static void bnx2x_set_rx_mode(struct net_device *dev);
@@ -2696,12 +2750,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
{
struct eth_spe *spe;
- DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
- (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
- (void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EIO;
@@ -2720,8 +2768,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* CID needs port number to be encoded int it */
spe->hdr.conn_and_cmd_data =
- cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
- HW_CID(bp, cid)));
+ cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+ HW_CID(bp, cid));
spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
if (common)
spe->hdr.type |=
@@ -2732,6 +2780,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
bp->spq_left--;
+ DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+ "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
+ bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+ (u32)(U64_LO(bp->spq_mapping) +
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+ HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
+
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
return 0;
@@ -2740,12 +2795,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
/* acquire split MCP access lock register */
static int bnx2x_acquire_alr(struct bnx2x *bp)
{
- u32 i, j, val;
+ u32 j, val;
int rc = 0;
might_sleep();
- i = 100;
- for (j = 0; j < i*10; j++) {
+ for (j = 0; j < 1000; j++) {
val = (1UL << 31);
REG_WR(bp, GRCBASE_MCP + 0x9c, val);
val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2819,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
/* release split MCP access lock register */
static void bnx2x_release_alr(struct bnx2x *bp)
{
- u32 val = 0;
-
- REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+ REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
}
static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2875,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
aeu_mask, asserted);
- aeu_mask &= ~(asserted & 0xff);
+ aeu_mask &= ~(asserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2962,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
bp->link_params.ext_phy_config);
/* log the failure */
- netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
- "Please contact Dell Support for assistance.\n");
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+ " the driver to shutdown the card to prevent permanent"
+ " damage. Please contact OEM Support for assistance\n");
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3157,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
}
}
-static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+
+#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
+#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
+#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
+#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
+#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
+#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val &= ~(1 << RESET_DONE_FLAG_SHIFT);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ val |= (1 << 16);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & RESET_DONE_FLAG_MASK) ? false : true;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+ u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+
+ DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+ val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
+ REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ barrier();
+ mmiowb();
+
+ return val1;
+}
+
+/*
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
+{
+ return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+}
+
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+ if (idx)
+ pr_cont(", ");
+ pr_cont("%s", blk);
+}
+
+static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+ _print_next_block(par_num++, "BRB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+ _print_next_block(par_num++, "PARSER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "TSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+ _print_next_block(par_num++, "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++, "PBCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "XSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+ _print_next_block(par_num++, "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+ _print_next_block(par_num++, "VAUX PCI CORE");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+ _print_next_block(par_num++, "DEBUG");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "USEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+ _print_next_block(par_num++, "UPB");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+ _print_next_block(par_num++, "CSDM");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+ _print_next_block(par_num++, "CSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+ _print_next_block(par_num++, "PXP");
+ break;
+ case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+ _print_next_block(par_num++,
+ "PXPPCICLOCKCLIENT");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+ _print_next_block(par_num++, "CFC");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+ _print_next_block(par_num++, "IGU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+ _print_next_block(par_num++, "MISC");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+ _print_next_block(par_num++, "MCP ROM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+ _print_next_block(par_num++, "MCP UMP RX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+ _print_next_block(par_num++, "MCP UMP TX");
+ break;
+ case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+ _print_next_block(par_num++, "MCP SCPAD");
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
+ u32 sig2, u32 sig3)
+{
+ if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
+ (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
+ int par_num = 0;
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+ "[0]:0x%08x [1]:0x%08x "
+ "[2]:0x%08x [3]:0x%08x\n",
+ sig0 & HW_PRTY_ASSERT_SET_0,
+ sig1 & HW_PRTY_ASSERT_SET_1,
+ sig2 & HW_PRTY_ASSERT_SET_2,
+ sig3 & HW_PRTY_ASSERT_SET_3);
+ printk(KERN_ERR"%s: Parity errors detected in blocks: ",
+ bp->dev->name);
+ par_num = bnx2x_print_blocks_with_parity0(
+ sig0 & HW_PRTY_ASSERT_SET_0, par_num);
+ par_num = bnx2x_print_blocks_with_parity1(
+ sig1 & HW_PRTY_ASSERT_SET_1, par_num);
+ par_num = bnx2x_print_blocks_with_parity2(
+ sig2 & HW_PRTY_ASSERT_SET_2, par_num);
+ par_num = bnx2x_print_blocks_with_parity3(
+ sig3 & HW_PRTY_ASSERT_SET_3, par_num);
+ printk("\n");
+ return true;
+ } else
+ return false;
+}
+
+static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
{
struct attn_route attn;
- struct attn_route group_mask;
+ int port = BP_PORT(bp);
+
+ attn.sig[0] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+ port*4);
+ attn.sig[1] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+ port*4);
+ attn.sig[2] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+ port*4);
+ attn.sig[3] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
+ attn.sig[3]);
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+ struct attn_route attn, *group_mask;
int port = BP_PORT(bp);
int index;
u32 reg_addr;
@@ -3118,6 +3472,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */
bnx2x_acquire_alr(bp);
+ if (bnx2x_chk_parity_attn(bp)) {
+ bp->recovery_state = BNX2X_RECOVERY_INIT;
+ bnx2x_set_reset_in_progress(bp);
+ schedule_delayed_work(&bp->reset_task, 0);
+ /* Disable HW interrupts */
+ bnx2x_int_disable(bp);
+ bnx2x_release_alr(bp);
+ /* In case of parity errors don't handle attentions so that
+ * other function would "see" parity errors.
+ */
+ return;
+ }
+
attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3494,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
if (deasserted & (1 << index)) {
- group_mask = bp->attn_group[index];
+ group_mask = &bp->attn_group[index];
DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
- index, group_mask.sig[0], group_mask.sig[1],
- group_mask.sig[2], group_mask.sig[3]);
+ index, group_mask->sig[0], group_mask->sig[1],
+ group_mask->sig[2], group_mask->sig[3]);
bnx2x_attn_int_deasserted3(bp,
- attn.sig[3] & group_mask.sig[3]);
+ attn.sig[3] & group_mask->sig[3]);
bnx2x_attn_int_deasserted1(bp,
- attn.sig[1] & group_mask.sig[1]);
+ attn.sig[1] & group_mask->sig[1]);
bnx2x_attn_int_deasserted2(bp,
- attn.sig[2] & group_mask.sig[2]);
+ attn.sig[2] & group_mask->sig[2]);
bnx2x_attn_int_deasserted0(bp,
- attn.sig[0] & group_mask.sig[0]);
-
- if ((attn.sig[0] & group_mask.sig[0] &
- HW_PRTY_ASSERT_SET_0) ||
- (attn.sig[1] & group_mask.sig[1] &
- HW_PRTY_ASSERT_SET_1) ||
- (attn.sig[2] & group_mask.sig[2] &
- HW_PRTY_ASSERT_SET_2))
- BNX2X_ERR("FATAL HW block parity attention\n");
+ attn.sig[0] & group_mask->sig[0]);
}
}
@@ -3172,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
aeu_mask, deasserted);
- aeu_mask |= (deasserted & 0xff);
+ aeu_mask |= (deasserted & 0x3ff);
DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3575,6 @@ static void bnx2x_sp_task(struct work_struct *work)
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
u16 status;
-
/* Return here if interrupt is disabled */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3585,23 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
+ DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
/* HW attentions */
- if (status & 0x1)
+ if (status & 0x1) {
bnx2x_attn_int(bp);
+ status &= ~0x1;
+ }
+
+ /* CStorm events: STAT_QUERY */
+ if (status & 0x2) {
+ DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
+ status &= ~0x2;
+ }
+
+ if (unlikely(status))
+ DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ status);
bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
IGU_INT_NOP, 1);
@@ -3243,7 +3613,6 @@ static void bnx2x_sp_task(struct work_struct *work)
IGU_INT_NOP, 1);
bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
IGU_INT_ENABLE, 1);
-
}
static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4316,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
u32 lo;
u32 hi;
} diff;
- u32 nig_timer_max;
if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4346,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_start = ++pstats->host_port_stats_end;
- nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
- if (nig_timer_max != estats->nig_timer_max) {
- estats->nig_timer_max = nig_timer_max;
- BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
+ if (!BP_NOMCP(bp)) {
+ u32 nig_timer_max =
+ SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ BNX2X_ERR("NIG timer max (%u)\n",
+ estats->nig_timer_max);
+ }
}
return 0;
@@ -4025,21 +4397,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
- " xstorm counter (%d) != stats_counter (%d)\n",
+ " xstorm counter (0x%x) != stats_counter (0x%x)\n",
i, xclient->stats_counter, bp->stats_counter);
return -1;
}
if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
- " tstorm counter (%d) != stats_counter (%d)\n",
+ " tstorm counter (0x%x) != stats_counter (0x%x)\n",
i, tclient->stats_counter, bp->stats_counter);
return -2;
}
if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
bp->stats_counter) {
DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
- " ustorm counter (%d) != stats_counter (%d)\n",
+ " ustorm counter (0x%x) != stats_counter (0x%x)\n",
i, uclient->stats_counter, bp->stats_counter);
return -4;
}
@@ -4059,6 +4431,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
qstats->total_bytes_received_lo,
le32_to_cpu(tclient->rcv_unicast_bytes.lo));
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
+
+ SUB_64(qstats->total_bytes_received_hi,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
+ qstats->total_bytes_received_lo,
+ le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+
qstats->valid_bytes_received_hi =
qstats->total_bytes_received_hi;
qstats->valid_bytes_received_lo =
@@ -4307,47 +4694,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_drv_stats_update(bp);
if (netif_msg_timer(bp)) {
- struct bnx2x_fastpath *fp0_rx = bp->fp;
- struct bnx2x_fastpath *fp0_tx = bp->fp;
- struct tstorm_per_client_stats *old_tclient =
- &bp->fp->old_tclient;
- struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct net_device_stats *nstats = &bp->dev->stats;
int i;
- netdev_printk(KERN_DEBUG, bp->dev, "\n");
- printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
- " tx pkt (%lx)\n",
- bnx2x_tx_avail(fp0_tx),
- le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
- printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
- " rx pkt (%lx)\n",
- (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
- fp0_rx->rx_comp_cons),
- le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
- printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
- "brb truncate %u\n",
- (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
- qstats->driver_xoff,
+ printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
+ bp->dev->name,
estats->brb_drop_lo, estats->brb_truncate_lo);
- printk(KERN_DEBUG "tstats: checksum_discard %u "
- "packets_too_big_discard %lu no_buff_discard %lu "
- "mac_discard %u mac_filter_discard %u "
- "xxovrflow_discard %u brb_truncate_discard %u "
- "ttl0_discard %u\n",
- le32_to_cpu(old_tclient->checksum_discard),
- bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
- bnx2x_hilo(&qstats->no_buff_discard_hi),
- estats->mac_discard, estats->mac_filter_discard,
- estats->xxoverflow_discard, estats->brb_truncate_discard,
- le32_to_cpu(old_tclient->ttl0_discard));
for_each_queue(bp, i) {
- printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
- bnx2x_fp(bp, i, tx_pkt),
- bnx2x_fp(bp, i, rx_pkt),
- bnx2x_fp(bp, i, rx_calls));
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+ printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
+ " rx pkt(%lu) rx calls(%lu %lu)\n",
+ fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+ fp->rx_comp_cons),
+ le16_to_cpu(*fp->rx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_received_hi),
+ fp->rx_calls, fp->rx_pkt);
+ }
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(bp->dev, i);
+
+ printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
+ " tx pkt(%lu) tx calls (%lu)"
+ " %s (Xoff events %u)\n",
+ fp->name, bnx2x_tx_avail(fp),
+ le16_to_cpu(*fp->tx_cons_sb),
+ bnx2x_hilo(&qstats->
+ total_unicast_packets_transmitted_hi),
+ fp->tx_pkt,
+ (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
+ qstats->driver_xoff);
}
}
@@ -4468,6 +4851,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state = bp->stats_state;
+ if (unlikely(bp->panic))
+ return;
+
bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
@@ -4940,9 +5326,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
}
if (fp->tpa_state[i] == BNX2X_TPA_START)
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
@@ -4978,7 +5364,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->disable_tpa = 1;
break;
}
- pci_unmap_addr_set((struct sw_rx_bd *)
+ dma_unmap_addr_set((struct sw_rx_bd *)
&bp->fp->tpa_pool[i],
mapping, 0);
fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5458,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_bd_prod = ring_prod;
/* must not have more available CQEs than BDs */
- fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
- cqe_ring_prod);
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
/* Warning!
@@ -5179,8 +5565,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
context->ustorm_st_context.common.flags |=
USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
context->ustorm_st_context.common.sge_buff_size =
- (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
- (u32)0xffff);
+ (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
+ 0xffff);
context->ustorm_st_context.common.sge_page_base_hi =
U64_HI(fp->rx_sge_mapping);
context->ustorm_st_context.common.sge_page_base_lo =
@@ -5369,10 +5755,10 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
u32 offset;
u16 max_agg_size;
- if (is_multi(bp)) {
- tstorm_config.config_flags = MULTI_FLAGS(bp);
+ tstorm_config.config_flags = RSS_FLAGS(bp);
+
+ if (is_multi(bp))
tstorm_config.rss_result_mask = MULTI_MASK;
- }
/* Enable TPA if needed */
if (bp->flags & TPA_ENABLE_FLAG)
@@ -5477,10 +5863,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
- max_agg_size =
- min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE),
- (u32)0xffff);
+ max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -5566,7 +5950,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
}
- /* Store it to internal memory */
+ /* Store cmng structures to internal memory */
if (bp->port.pmf)
for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6042,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
static int bnx2x_gunzip_init(struct bnx2x *bp)
{
- bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
- &bp->gunzip_mapping);
+ bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+ &bp->gunzip_mapping, GFP_KERNEL);
if (bp->gunzip_buf == NULL)
goto gunzip_nomem1;
@@ -5679,12 +6063,13 @@ gunzip_nomem3:
bp->strm = NULL;
gunzip_nomem2:
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
gunzip_nomem1:
- netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+ " un-compression\n");
return -ENOMEM;
}
@@ -5696,8 +6081,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
bp->strm = NULL;
if (bp->gunzip_buf) {
- pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
- bp->gunzip_mapping);
+ dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+ bp->gunzip_mapping);
bp->gunzip_buf = NULL;
}
}
@@ -5735,8 +6120,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
- bp->gunzip_outlen);
+ netdev_err(bp->dev, "Firmware decompression error:"
+ " gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
zlib_inflateEnd(bp->strm);
@@ -5962,6 +6348,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
}
+static const struct {
+ u32 addr;
+ u32 mask;
+} bnx2x_parity_mask[] = {
+ {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
+ {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
+ {HC_REG_HC_PRTY_MASK, 0xffffffff},
+ {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
+ {QM_REG_QM_PRTY_MASK, 0x0},
+ {DORQ_REG_DORQ_PRTY_MASK, 0x0},
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
+ {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
+ {CDU_REG_CDU_PRTY_MASK, 0x0},
+ {CFC_REG_CFC_PRTY_MASK, 0x0},
+ {DBG_REG_DBG_PRTY_MASK, 0x0},
+ {DMAE_REG_DMAE_PRTY_MASK, 0x0},
+ {BRB1_REG_BRB1_PRTY_MASK, 0x0},
+ {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
+ {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
+ {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
+ {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
+ {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
+ {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_0, 0x0},
+ {USEM_REG_USEM_PRTY_MASK_1, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
+ {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
+ {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
+};
+
+static void enable_blocks_parity(struct bnx2x *bp)
+{
+ int i, mask_arr_len =
+ sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
+
+ for (i = 0; i < mask_arr_len; i++)
+ REG_WR(bp, bnx2x_parity_mask[i].addr,
+ bnx2x_parity_mask[i].mask);
+}
+
static void bnx2x_reset_common(struct bnx2x *bp)
{
@@ -5992,10 +6422,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
{
+ int is_required;
u32 val;
- u8 port;
- u8 is_required = 0;
+ int port;
+
+ if (BP_NOMCP(bp))
+ return;
+ is_required = 0;
val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
SHARED_HW_CFG_FAN_FAILURE_MASK;
@@ -6034,7 +6468,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
/* set to active low mode */
val = REG_RD(bp, MISC_REG_SPIO_INT);
val |= ((1 << MISC_REGISTERS_SPIO_5) <<
- MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+ MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
REG_WR(bp, MISC_REG_SPIO_INT, val);
/* enable interrupt to signal the IGU */
@@ -6200,10 +6634,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
REG_WR(bp, SRC_REG_SOFT_RST, 1);
- for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
- REG_WR(bp, i, 0xc0cac01a);
- /* TODO: replace with something meaningful */
- }
+ for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
+ REG_WR(bp, i, random32());
bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
#ifdef BCM_CNIC
REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -6221,7 +6653,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- pr_alert("please adjust the size of cdu_context(%ld)\n",
+ dev_alert(&bp->pdev->dev, "please adjust the size "
+ "of cdu_context(%ld)\n",
(long)sizeof(union cdu_context));
bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6738,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
enable_blocks_attention(bp);
+ if (CHIP_PARITY_SUPPORTED(bp))
+ enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6758,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
u32 low, high;
u32 val;
- DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
+ DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -6342,6 +6777,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
#endif
+
bnx2x_init_block(bp, DQ_BLOCK, init_stage);
bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6970,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
u32 addr, val;
int i;
- DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
+ DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
/* set MSI reconfigure capability */
addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7128,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
#define BNX2X_PCI_FREE(x, y, size) \
do { \
if (x) { \
- pci_free_consistent(bp->pdev, size, x, y); \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
@@ -6773,7 +7209,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
- x = pci_alloc_consistent(bp->pdev, size, y); \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
memset(x, 0, size); \
@@ -6906,9 +7342,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
if (skb == NULL)
continue;
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@@ -6987,7 +7423,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
BNX2X_NUM_QUEUES(bp) + offset);
- if (rc) {
+
+ /*
+ * reconfigure number of tx/rx queues according to available
+ * MSI-X vectors
+ */
+ if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+ /* vectors available for FP */
+ int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
+
+ DP(NETIF_MSG_IFUP,
+ "Trying to use less MSI-X vectors: %d\n", rc);
+
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+ if (rc) {
+ DP(NETIF_MSG_IFUP,
+ "MSI-X is not attainable rc %d\n", rc);
+ return rc;
+ }
+
+ bp->num_queues = min(bp->num_queues, fp_vec);
+
+ DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ bp->num_queues);
+ } else if (rc) {
DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -7028,10 +7488,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_QUEUES(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
+ " ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
return 0;
}
@@ -7409,8 +7870,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
-
- case INT_MODE_MSIX:
default:
/* Set number of queues according to bp->multi_mode value */
bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8115,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
+ bnx2x_inc_load_cnt(bp);
return 0;
@@ -7843,33 +8303,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
-/* must be called with rtnl_lock */
-static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
{
int port = BP_PORT(bp);
u32 reset_code = 0;
int i, cnt, rc;
-#ifdef BCM_CNIC
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
-#endif
- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
-
- /* Set "drop all" */
- bp->rx_mode = BNX2X_RX_MODE_NONE;
- bnx2x_set_storm_rx_mode(bp);
-
- /* Disable HW interrupts, NAPI and Tx */
- bnx2x_netif_stop(bp, 1);
-
- del_timer_sync(&bp->timer);
- SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
- (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp, false);
-
/* Wait until tx fastpath tasks complete */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8449,70 @@ unload_error:
if (!BP_NOMCP(bp))
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+}
+
+static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+ u32 val;
+
+ DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+ if (CHIP_IS_E1(bp)) {
+ int port = BP_PORT(bp);
+ u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+ val = REG_RD(bp, addr);
+ val &= ~(0x300);
+ REG_WR(bp, addr, val);
+ } else if (CHIP_IS_E1H(bp)) {
+ val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+ val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+ MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+ }
+}
+
+/* must be called with rtnl_lock */
+static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+ int i;
+
+ if (bp->state == BNX2X_STATE_CLOSED) {
+ /* Interface has been removed - nothing to recover */
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+
+ return -EINVAL;
+ }
+
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+
+ /* Set "drop all" */
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Disable HW interrupts, NAPI and Tx */
+ bnx2x_netif_stop(bp, 1);
+ netif_carrier_off(bp->dev);
+
+ del_timer_sync(&bp->timer);
+ SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
+ (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp, false);
+
+ /* Cleanup the chip if needed */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_chip_cleanup(bp, unload_mode);
+
bp->port.pmf = 0;
/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8022,19 +8525,448 @@ unload_error:
bp->state = BNX2X_STATE_CLOSED;
- netif_carrier_off(bp->dev);
+ /* The last driver must disable a "close the gate" if there is no
+ * parity attention or "process kill" pending.
+ */
+ if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
+ bnx2x_reset_is_done(bp))
+ bnx2x_disable_close_the_gate(bp);
+
+ /* Reset MCP mail box sequence if there is on going recovery */
+ if (unload_mode == UNLOAD_RECOVERY)
+ bp->fw_seq = 0;
+
+ return 0;
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+ u32 val, addr;
+
+ /* Gates #2 and #4a are closed/opened for "not E1" only */
+ if (!CHIP_IS_E1(bp)) {
+ /* #4 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ /* #2 */
+ val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
+ close ? (val | 0x1) : (val & (~(u32)1)));
+ }
+
+ /* #3 */
+ addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ val = REG_RD(bp, addr);
+ REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
+
+ DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ close ? "closing" : "opening");
+ mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ /* Do some magic... */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ *magic_val = val & SHARED_MF_CLP_MAGIC;
+ MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/* Restore the value of the `magic' bit.
+ *
+ * @param pdev Device handle.
+ * @param magic_val Old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+ /* Restore the `magic' bit value... */
+ /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
+ SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
+ u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+ MF_CFG_WR(bp, shared_mf_config.clp_mb,
+ (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/* Prepares for MCP reset: takes care of CLP configurations.
+ *
+ * @param bp
+ * @param magic_val Old value of 'magic' bit.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+ u32 shmem;
+ u32 validity_offset;
+
+ DP(NETIF_MSG_HW, "Starting\n");
+
+ /* Set `magic' bit in order to save MF config */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_prep(bp, magic_val);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Clear validity map flags */
+ if (shmem > 0)
+ REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT 100 /* 100 ms */
+
+/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
+ * depending on the HW type.
+ *
+ * @param bp
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+ /* special handling for emulation and FPGA,
+ wait 10 times longer */
+ if (CHIP_REV_IS_SLOW(bp))
+ msleep(MCP_ONE_TIMEOUT*10);
+ else
+ msleep(MCP_ONE_TIMEOUT);
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+ u32 shmem, cnt, validity_offset, val;
+ int rc = 0;
+
+ msleep(100);
+
+ /* Get shmem offset */
+ shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+ if (shmem == 0) {
+ BNX2X_ERR("Shmem 0 return failure\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+ validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+ /* Wait for MCP to come up */
+ for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
+ /* TBD: its best to check validity map of last port.
+ * currently checks on port 0.
+ */
+ val = REG_RD(bp, shmem + validity_offset);
+ DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
+ shmem + validity_offset, val);
+
+ /* check that shared memory is valid. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+ break;
+
+ bnx2x_mcp_wait_one(bp);
+ }
+
+ DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
+
+ /* Check that shared memory is valid. This indicates that MCP is up. */
+ if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
+ (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
+ BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
+ rc = -ENOTTY;
+ goto exit_lbl;
+ }
+
+exit_lbl:
+ /* Restore the `magic' bit value */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_clp_reset_done(bp, magic_val);
+
+ return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+ if (!CHIP_IS_E1(bp)) {
+ REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+ REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+ REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
+ mmiowb();
+ }
+}
+
+/*
+ * Reset the whole chip except for:
+ * - PCIE core
+ * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ * one reset bit)
+ * - IGU
+ * - MISC (including AEU)
+ * - GRC
+ * - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+{
+ u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+
+ not_reset_mask1 =
+ MISC_REGISTERS_RESET_REG_1_RST_HC |
+ MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+ MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+ not_reset_mask2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+ MISC_REGISTERS_RESET_REG_2_RST_GRC |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
+
+ reset_mask1 = 0xffffffff;
+
+ if (CHIP_IS_E1(bp))
+ reset_mask2 = 0xffff;
+ else
+ reset_mask2 = 0x1ffff;
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ reset_mask2 & (~not_reset_mask2));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
+ mmiowb();
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp)
+{
+ int cnt = 1000;
+ u32 val = 0;
+ u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+ /* Empty the Tetris buffer, wait for 1s */
+ do {
+ sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+ blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+ port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+ port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+ pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+ if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+ ((port_is_idle_0 & 0x1) == 0x1) &&
+ ((port_is_idle_1 & 0x1) == 0x1) &&
+ (pgl_exp_rom2 == 0xffffffff))
+ break;
+ msleep(1);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+ " are still"
+ " outstanding read requests after 1s!\n");
+ DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+ " port_is_idle_0=0x%08x,"
+ " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+ pgl_exp_rom2);
+ return -EAGAIN;
+ }
+
+ barrier();
+
+ /* Close gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, true);
+
+ /* TBD: Indicate that "process kill" is in progress to MCP */
+
+ /* Clear "unprepared" bit */
+ REG_WR(bp, MISC_REG_UNPREPARED, 0);
+ barrier();
+
+ /* Make sure all is written to the chip before the reset */
+ mmiowb();
+
+ /* Wait for 1ms to empty GLUE and PCI-E core queues,
+ * PSWHST, GRC and PSWRD Tetris buffer.
+ */
+ msleep(1);
+
+ /* Prepare to chip reset: */
+ /* MCP */
+ bnx2x_reset_mcp_prep(bp, &val);
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+ barrier();
+
+ /* reset the chip */
+ bnx2x_process_kill_chip_reset(bp);
+ barrier();
+
+ /* Recover after reset: */
+ /* MCP */
+ if (bnx2x_reset_mcp_comp(bp, val))
+ return -EAGAIN;
+
+ /* PXP */
+ bnx2x_pxp_prep(bp);
+
+ /* Open the gates #2, #3 and #4 */
+ bnx2x_set_234_gates(bp, false);
+
+ /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+ * reset state, re-enable attentions. */
return 0;
}
+static int bnx2x_leader_reset(struct bnx2x *bp)
+{
+ int rc = 0;
+ /* Try to recover after the failure */
+ if (bnx2x_process_kill(bp)) {
+ printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
+ bp->dev->name);
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+
+ /* Clear "reset is in progress" bit and update the driver state */
+ bnx2x_set_reset_done(bp);
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
+exit_leader_reset:
+ bp->is_leader = 0;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
+ smp_wmb();
+ return rc;
+}
+
+static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/* Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_reset_task() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+ DP(NETIF_MSG_HW, "Handling parity\n");
+ while (1) {
+ switch (bp->recovery_state) {
+ case BNX2X_RECOVERY_INIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ /* Try to get a LEADER_LOCK HW lock */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08))
+ bp->is_leader = 1;
+
+ /* Stop the driver */
+ /* If interface has been removed - break */
+ if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+ return;
+
+ bp->recovery_state = BNX2X_RECOVERY_WAIT;
+ /* Ensure "is_leader" and "recovery_state"
+ * update values are seen on other CPUs
+ */
+ smp_wmb();
+ break;
+
+ case BNX2X_RECOVERY_WAIT:
+ DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+ if (bp->is_leader) {
+ u32 load_counter = bnx2x_get_load_cnt(bp);
+ if (load_counter) {
+ /* Wait until all other functions get
+ * down.
+ */
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+ } else {
+ /* If all other functions got down -
+ * try to bring the chip back to
+ * normal. In any case it's an exit
+ * point for a leader.
+ */
+ if (bnx2x_leader_reset(bp) ||
+ bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ printk(KERN_ERR"%s: Recovery "
+ "has failed. Power cycle is "
+ "needed.\n", bp->dev->name);
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+ /* Block ifup for all function
+ * of this ASIC until
+ * "process kill" or power
+ * cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+ /* Shut down the power */
+ bnx2x_set_power_state(bp,
+ PCI_D3hot);
+ return;
+ }
+
+ return;
+ }
+ } else { /* non-leader */
+ if (!bnx2x_reset_is_done(bp)) {
+ /* Try to get a LEADER_LOCK HW lock as
+ * long as a former leader may have
+ * been unloaded by the user or
+ * released a leadership by another
+ * reason.
+ */
+ if (bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08)) {
+ /* I'm a leader now! Restart a
+ * switch case.
+ */
+ bp->is_leader = 1;
+ break;
+ }
+
+ schedule_delayed_work(&bp->reset_task,
+ HZ/10);
+ return;
+
+ } else { /* A leader has completed
+ * the "process kill". It's an exit
+ * point for a non-leader.
+ */
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_wmb();
+ return;
+ }
+ }
+ default:
+ return;
+ }
+ }
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
static void bnx2x_reset_task(struct work_struct *work)
{
- struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
+ struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
#ifdef BNX2X_STOP_ON_ERROR
BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
" so reset not done to allow debug dump,\n"
- " you will need to reboot when done\n");
+ KERN_ERR " you will need to reboot when done\n");
return;
#endif
@@ -8043,8 +8975,12 @@ static void bnx2x_reset_task(struct work_struct *work)
if (!netif_running(bp->dev))
goto reset_task_exit;
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
+ bnx2x_parity_recover(bp);
+ else {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
reset_task_exit:
rtnl_unlock();
@@ -8264,7 +9200,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
- BNX2X_ERR("BAD MCP validity signature\n");
+ BNX2X_ERROR("BAD MCP validity signature\n");
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9224,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
if (val < BNX2X_BC_VER) {
/* for now only warn
* later we might need to enforce this */
- BNX2X_ERR("This driver needs bc_ver %X but found %X,"
- " please upgrade BC\n", BNX2X_BC_VER, val);
+ BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
+ "please upgrade BC\n", BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9246,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
- pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
+ dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+ val, val2, val3, val4);
}
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9525,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8604,11 +9541,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8619,11 +9556,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8635,11 +9572,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8650,11 +9587,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8665,11 +9602,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
@@ -8682,19 +9619,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
bp->port.advertising = (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
- bp->port.link_config,
- bp->link_params.speed_cap_mask);
+ BNX2X_ERROR("NVRAM config error. "
+ "Invalid link_config 0x%x"
+ " speed_cap_mask 0x%x\n",
+ bp->port.link_config,
+ bp->link_params.speed_cap_mask);
return;
}
break;
default:
- BNX2X_ERR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
- bp->port.link_config);
+ BNX2X_ERROR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ bp->port.link_config);
bp->link_params.req_line_speed = SPEED_AUTO_NEG;
bp->port.advertising = bp->port.supported;
break;
@@ -8823,7 +9760,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->e1hov = 0;
bp->e1hmf = 0;
- if (CHIP_IS_E1H(bp)) {
+ if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
bp->mf_config =
SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
@@ -8844,14 +9781,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
"(0x%04x)\n",
func, bp->e1hov, bp->e1hov);
} else {
- BNX2X_ERR("!!! No valid E1HOV for func %d,"
- " aborting\n", func);
+ BNX2X_ERROR("No valid E1HOV for func %d,"
+ " aborting\n", func);
rc = -EPERM;
}
} else {
if (BP_E1HVN(bp)) {
- BNX2X_ERR("!!! VN %d in single function mode,"
- " aborting\n", BP_E1HVN(bp));
+ BNX2X_ERROR("VN %d in single function mode,"
+ " aborting\n", BP_E1HVN(bp));
rc = -EPERM;
}
}
@@ -8887,7 +9824,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
/* only supposed to happen on emulation/FPGA */
- BNX2X_ERR("warning random MAC workaround active\n");
+ BNX2X_ERROR("warning: random MAC workaround active\n");
random_ether_addr(bp->dev->dev_addr);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
}
@@ -8895,6 +9832,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
return rc;
}
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+ int cnt, i, block_end, rodi;
+ char vpd_data[BNX2X_VPD_LEN+1];
+ char str_id_reg[VENDOR_ID_LEN+1];
+ char str_id_cap[VENDOR_ID_LEN+1];
+ u8 len;
+
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+ if (cnt < BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
+
+
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&vpd_data[i]);
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+
+ if (block_end > BNX2X_VPD_LEN)
+ goto out_not_found;
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (rodi < 0)
+ goto out_not_found;
+
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ if (len != VENDOR_ID_LEN)
+ goto out_not_found;
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ /* vendor specific info */
+ snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+ if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+ !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+ rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (rodi >= 0) {
+ len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+ rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], len);
+ bp->fw_ver[len] = ' ';
+ }
+ }
+ return;
+ }
+out_not_found:
+ return;
+}
+
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -8912,29 +9913,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
#endif
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
- INIT_WORK(&bp->reset_task, bnx2x_reset_task);
+ INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
rc = bnx2x_get_hwinfo(bp);
+ bnx2x_read_fwinfo(bp);
/* need to reset chip if undi was active */
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
if (CHIP_REV_IS_FPGA(bp))
- pr_err("FPGA detected\n");
+ dev_err(&bp->pdev->dev, "FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- pr_err("MCP disabled, must load devices in order!\n");
+ dev_err(&bp->pdev->dev, "MCP disabled, "
+ "must load devices in order!\n");
/* Set multi queue mode */
if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- pr_err("Multi disabled since int_mode requested is not MSI-X\n");
+ dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
+ "requested is not MSI-X\n");
multi_mode = ETH_RSS_MODE_DISABLED;
}
bp->multi_mode = multi_mode;
+ bp->dev->features |= NETIF_F_GRO;
+
/* Set TPA flags */
if (disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10310,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
+ strncpy(info->fw_version, bp->fw_ver, 32);
+ snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strcpy(info->bus_info, pci_name(bp->pdev));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10850,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
return 0;
}
-#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
static int bnx2x_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnx2x *bp = netdev_priv(dev);
- bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
- if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+ if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
- bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
- if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
- bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
+ bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+ if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+ bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
if (netif_running(dev))
bnx2x_update_coalesce(bp);
@@ -9885,6 +10892,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10982,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
int changed = 0;
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
/* TPA requires Rx CSUM offloading */
if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
if (!disable_tpa) {
@@ -9986,6 +11003,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
changed = 1;
}
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
if (changed && netif_running(dev)) {
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
rc = bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -10006,6 +11028,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
bp->rx_csum = data;
/* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11077,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
u32 wr_val = 0;
int port = BP_PORT(bp);
static const struct {
- u32 offset0;
- u32 offset1;
- u32 mask;
+ u32 offset0;
+ u32 offset1;
+ u32 mask;
} reg_tbl[] = {
/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
{ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11146,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
- REG_WR(bp, offset, wr_val);
+ REG_WR(bp, offset, (wr_val & mask));
val = REG_RD(bp, offset);
/* Restore the original register's value */
REG_WR(bp, offset, save_val);
- /* verify that value is as expected value */
- if ((val & mask) != (wr_val & mask))
+ /* verify value is as expected */
+ if ((val & mask) != (wr_val & mask)) {
+ DP(NETIF_MSG_PROBE,
+ "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+ offset, val, wr_val, mask);
goto test_reg_exit;
+ }
}
}
@@ -10267,8 +11298,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
bd_prod = TX_BD(fp_tx->tx_bd_prod);
tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11375,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
{
int rc = 0, res;
+ if (BP_NOMCP(bp))
+ return rc;
+
if (!netif_running(bp->dev))
return BNX2X_LOOPBACK_FAILED;
@@ -10391,6 +11425,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
int i, rc;
u32 magic, crc;
+ if (BP_NOMCP(bp))
+ return 0;
+
rc = bnx2x_nvram_read(bp, 0, data, 4);
if (rc) {
DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11505,12 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
if (!netif_running(dev))
@@ -10556,7 +11599,11 @@ static const struct {
/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%d]: tx_packets" }
+ 8, "[%d]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%d]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%d]: tx_bcast_packets" }
};
static const struct {
@@ -10618,16 +11665,20 @@ static const struct {
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_packets" },
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8, STATS_FLAGS_PORT, "tx_mac_errors" },
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8, STATS_FLAGS_PORT, "tx_carrier_errors" },
- { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_multi_collisions" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11694,11 @@ static const struct {
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
-/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11715,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
struct bnx2x *bp = netdev_priv(dev);
int i, num_stats;
- switch(stringset) {
+ switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11944,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
break;
case PCI_D3hot:
+ /* If there are other clients above don't
+ shut down the power */
+ if (atomic_read(&bp->pdev->enable_cnt) != 1)
+ return 0;
+ /* Don't shut down the power for emulation and FPGA */
+ if (CHIP_REV_IS_SLOW(bp))
+ return 0;
+
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= 3;
@@ -11182,6 +12241,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
u8 hlen = 0;
__le16 pkt_size = 0;
+ struct ethhdr *eth;
+ u8 mac_type = UNICAST_ADDRESS;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -11205,6 +12266,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ eth = (struct ethhdr *)skb->data;
+
+ /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (is_broadcast_ether_addr(eth->h_dest))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12309,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data = (UNICAST_ADDRESS <<
- ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+ tx_start_bd->general_data = (mac_type <<
+ ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
/* header nbd */
tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
@@ -11314,8 +12385,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- mapping = pci_map_single(bp->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12443,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (total_pkt_bd == NULL)
total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
- mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ mapping = dma_map_page(&bp->pdev->dev, frag->page,
+ frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12524,40 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
+ if (!bnx2x_reset_is_done(bp)) {
+ do {
+ /* Reset MCP mail box sequence if there is on going
+ * recovery
+ */
+ bp->fw_seq = 0;
+
+ /* If it's the first function to load and reset done
+ * is still not cleared it may mean that. We don't
+ * check the attention state here because it may have
+ * already been cleared by a "common" reset but we
+ * shell proceed with "process kill" anyway.
+ */
+ if ((bnx2x_get_load_cnt(bp) == 0) &&
+ bnx2x_trylock_hw_lock(bp,
+ HW_LOCK_RESOURCE_RESERVED_08) &&
+ (!bnx2x_leader_reset(bp))) {
+ DP(NETIF_MSG_HW, "Recovered in open\n");
+ break;
+ }
+
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ printk(KERN_ERR"%s: Recovery flow hasn't been properly"
+ " completed yet. Try again later. If u still see this"
+ " message after a few retries then power cycle is"
+ " required.\n", bp->dev->name);
+
+ return -EAGAIN;
+ } while (0);
+ }
+
+ bp->recovery_state = BNX2X_RECOVERY_DONE;
+
return bnx2x_nic_load(bp, LOAD_OPEN);
}
@@ -11462,9 +12568,7 @@ static int bnx2x_close(struct net_device *dev)
/* Unload the driver, release IRQs */
bnx2x_nic_unload(bp, UNLOAD_CLOSE);
- if (atomic_read(&bp->pdev->enable_cnt) == 1)
- if (!CHIP_REV_IS_SLOW(bp))
- bnx2x_set_power_state(bp, PCI_D3hot);
+ bnx2x_set_power_state(bp, PCI_D3hot);
return 0;
}
@@ -11494,21 +12598,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
else { /* some multicasts */
if (CHIP_IS_E1(bp)) {
int i, old, offset;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
config->config_table[i].
cam_entry.msb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[0]);
+ swab16(*(u16 *)&ha->addr[0]);
config->config_table[i].
cam_entry.middle_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[2]);
+ swab16(*(u16 *)&ha->addr[2]);
config->config_table[i].
cam_entry.lsb_mac_addr =
- swab16(*(u16 *)&mclist->dmi_addr[4]);
+ swab16(*(u16 *)&ha->addr[4]);
config->config_table[i].cam_entry.flags =
cpu_to_le16(port);
config->config_table[i].
@@ -11562,18 +12666,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
0);
} else { /* E1H */
/* Accept one or more multicasts */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[MC_HASH_SIZE];
u32 crc, bit, regidx;
int i;
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- mclist->dmi_addr);
+ ha->addr);
- crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
+ crc = crc32c_le(0, ha->addr, ETH_ALEN);
bit = (crc >> 24) & 0xff;
regidx = bit >> 5;
bit &= 0x1f;
@@ -11690,6 +12794,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
return -EINVAL;
@@ -11717,7 +12826,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_work(&bp->reset_task);
+ schedule_delayed_work(&bp->reset_task, 0);
}
#ifdef BCM_VLAN
@@ -11789,18 +12898,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
rc = pci_enable_device(pdev);
if (rc) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- pr_err("Cannot find PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- pr_err("Cannot find second PCI device base address, aborting\n");
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+ " base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11808,7 +12920,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -11818,28 +12931,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
- pr_err("Cannot find power management capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (bp->pcie_cap == 0) {
- pr_err("Cannot find PCI Express capability, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot find PCI Express capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- pr_err("pci_set_consistent_dma_mask failed, aborting\n");
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
+ " failed, aborting\n");
rc = -EIO;
goto err_out_release;
}
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- pr_err("System does not support DMA, aborting\n");
+ } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&bp->pdev->dev,
+ "System does not support DMA, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11852,7 +12969,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->regview = pci_ioremap_bar(pdev, 0);
if (!bp->regview) {
- pr_err("Cannot map register space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -11861,7 +12979,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
min_t(u64, BNX2X_DB_SIZE,
pci_resource_len(pdev, 2)));
if (!bp->doorbells) {
- pr_err("Cannot map doorbell space, aborting\n");
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
goto err_out_unmap;
}
@@ -11876,6 +12995,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+ /* Reset the load counter */
+ bnx2x_clear_load_cnt(bp);
+
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13085,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- pr_err("Section %d length is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11975,7 +13098,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- pr_err("Section offset %d is out of bounds\n", i);
+ dev_err(&bp->pdev->dev,
+ "Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11987,7 +13111,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ dev_err(&bp->pdev->dev,
+ "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
fw_ver[0], fw_ver[1], fw_ver[2],
fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13147,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
for (i = 0, j = 0; i < n/8; i++, j += 2) {
tmp = be32_to_cpu(source[j]);
target[i].op = (tmp >> 24) & 0xff;
- target[i].offset = tmp & 0xffffff;
- target[i].raw_data = be32_to_cpu(source[j+1]);
+ target[i].offset = tmp & 0xffffff;
+ target[i].raw_data = be32_to_cpu(source[j + 1]);
}
}
@@ -12057,20 +13182,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
if (CHIP_IS_E1(bp))
fw_file_name = FW_FILE_NAME_E1;
- else
+ else if (CHIP_IS_E1H(bp))
fw_file_name = FW_FILE_NAME_E1H;
+ else {
+ dev_err(dev, "Unsupported chip revision\n");
+ return -EINVAL;
+ }
- pr_info("Loading %s\n", fw_file_name);
+ dev_info(dev, "Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, dev);
if (rc) {
- pr_err("Can't load firmware file %s\n", fw_file_name);
+ dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- pr_err("Corrupt firmware file %s\n", fw_file_name);
+ dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -12129,7 +13258,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
if (!dev) {
- pr_err("Cannot allocate net device\n");
+ dev_err(&pdev->dev, "Cannot allocate net device\n");
return -ENOMEM;
}
@@ -12151,7 +13280,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Set init arrays */
rc = bnx2x_init_firmware(bp, &pdev->dev);
if (rc) {
- pr_err("Error loading firmware\n");
+ dev_err(&pdev->dev, "Error loading firmware\n");
goto init_one_exit;
}
@@ -12162,11 +13291,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
- board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
+ " IRQ %d, ", board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq);
+ pr_cont("node addr %pM\n", dev->dev_addr);
return 0;
@@ -12194,13 +13324,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return;
}
bp = netdev_priv(dev);
unregister_netdev(dev);
+ /* Make sure RESET task is not scheduled before continuing */
+ cancel_delayed_work_sync(&bp->reset_task);
+
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
kfree(bp->init_data);
@@ -12227,7 +13360,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
struct bnx2x *bp;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12259,11 +13392,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
int rc;
if (!dev) {
- pr_err("BAD net device from bnx2x_init_one\n");
+ dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
rtnl_lock();
pci_restore_state(pdev);
@@ -12292,6 +13430,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_netif_stop(bp, 0);
+ netif_carrier_off(bp->dev);
del_timer_sync(&bp->timer);
bp->stats_state = STATS_STATE_DISABLED;
@@ -12318,8 +13457,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->state = BNX2X_STATE_CLOSED;
- netif_carrier_off(bp->dev);
-
return 0;
}
@@ -12430,6 +13567,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+ return;
+ }
+
rtnl_lock();
bnx2x_eeh_recover(bp);
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 944964e78c81..a1f3bf0cd630 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -766,6 +766,8 @@
#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
#define MCP_REG_MCPR_NVM_WRITE 0x86408
#define MCP_REG_MCPR_SCRATCH 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
/* [R 32] read first 32 bit after inversion of function 0. mapped as
follows: [0] NIG attention for function0; [1] NIG attention for
function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
@@ -1249,6 +1251,8 @@
#define MISC_REG_E1HMF_MODE 0xa5f8
/* [RW 32] Debug only: spare RW register reset by core reset */
#define MISC_REG_GENERIC_CR_0 0xa460
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1 0xa474
/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
these bits is written as a '1'; the corresponding SPIO bit will turn off
it's drivers and become an input. This is the reset state of all GPIO
@@ -1438,7 +1442,7 @@
(~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
- in this register. addres 0 - timer 1; address - timer 2�address 7 -
+ in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
timer 8 */
#define MISC_REG_SW_TIMER_VAL 0xa5c0
/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2407,10 +2411,16 @@
/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
this client is waiting for the arbiter. */
#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+ block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
should update accoring to 'hst_discard_doorbells' register when the state
machine is idle */
#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+ Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
means this PSWHST is discarding inputs from this client. Each bit should
update accoring to 'hst_discard_internal_writes' register when the state
@@ -4422,11 +4432,21 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
#define MISC_REGISTERS_RESET_REG_2_SET 0x594
#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
@@ -4454,6 +4474,7 @@
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RESERVED_08 8
#define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5
#define PRS_FLAG_OVERETH_IPV4 1
@@ -4474,6 +4495,10 @@
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 6dd64cf3cb76..969ffed86b9f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -37,7 +37,6 @@
static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
{
struct inet6_dev *idev;
- struct inet6_ifaddr *ifa;
if (!dev)
return;
@@ -47,10 +46,12 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
return;
read_lock_bh(&idev->lock);
- ifa = idev->addr_list;
- if (ifa)
+ if (!list_empty(&idev->addr_list)) {
+ struct inet6_ifaddr *ifa
+ = list_first_entry(&idev->addr_list,
+ struct inet6_ifaddr, if_list);
ipv6_addr_copy(addr, &ifa->addr);
- else
+ } else
ipv6_addr_set(addr, 0, 0, 0, 0);
read_unlock_bh(&idev->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0075514bf32f..5e12462a9d5e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,6 +59,7 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
@@ -430,7 +431,18 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
}
skb->priority = 1;
- dev_queue_xmit(skb);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
+ struct netpoll *np = bond->dev->npinfo->netpoll;
+ slave_dev->npinfo = bond->dev->npinfo;
+ np->real_dev = np->dev = skb->dev;
+ slave_dev->priv_flags |= IFF_IN_NETPOLL;
+ netpoll_send_skb(np, skb);
+ slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
+ np->dev = bond->dev;
+ } else
+#endif
+ dev_queue_xmit(skb);
return 0;
}
@@ -762,32 +774,6 @@ static int bond_check_dev_link(struct bonding *bond,
/*----------------------------- Multicast list ------------------------------*/
/*
- * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
- */
-static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
- const struct dev_mc_list *dmi2)
-{
- return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
- dmi1->dmi_addrlen == dmi2->dmi_addrlen;
-}
-
-/*
- * returns dmi entry if found, NULL otherwise
- */
-static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
- struct dev_mc_list *mc_list)
-{
- struct dev_mc_list *idmi;
-
- for (idmi = mc_list; idmi; idmi = idmi->next) {
- if (bond_is_dmi_same(dmi, idmi))
- return idmi;
- }
-
- return NULL;
-}
-
-/*
* Push the promiscuity flag down to appropriate slaves
*/
static int bond_set_promiscuity(struct bonding *bond, int inc)
@@ -839,18 +825,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
* Add a Multicast address to slaves
* according to mode
*/
-static void bond_mc_add(struct bonding *bond, void *addr, int alen)
+static void bond_mc_add(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0);
+ dev_mc_add(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i)
- dev_mc_add(slave->dev, addr, alen, 0);
+ dev_mc_add(slave->dev, addr);
}
}
@@ -858,18 +844,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen)
* Remove a multicast address from slave
* according to mode
*/
-static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
+static void bond_mc_del(struct bonding *bond, void *addr)
{
if (USES_PRIMARY(bond->params.mode)) {
/* write lock already acquired */
if (bond->curr_active_slave)
- dev_mc_delete(bond->curr_active_slave->dev, addr,
- alen, 0);
+ dev_mc_del(bond->curr_active_slave->dev, addr);
} else {
struct slave *slave;
int i;
bond_for_each_slave(bond, slave, i) {
- dev_mc_delete(slave->dev, addr, alen, 0);
+ dev_mc_del(slave->dev, addr);
}
}
}
@@ -896,66 +881,22 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
}
/*
- * Totally destroys the mc_list in bond
- */
-static void bond_mc_list_destroy(struct bonding *bond)
-{
- struct dev_mc_list *dmi;
-
- dmi = bond->mc_list;
- while (dmi) {
- bond->mc_list = dmi->next;
- kfree(dmi);
- dmi = bond->mc_list;
- }
-
- bond->mc_list = NULL;
-}
-
-/*
- * Copy all the Multicast addresses from src to the bonding device dst
- */
-static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
- gfp_t gfp_flag)
-{
- struct dev_mc_list *dmi, *new_dmi;
-
- for (dmi = mc_list; dmi; dmi = dmi->next) {
- new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
-
- if (!new_dmi) {
- /* FIXME: Potential memory leak !!! */
- return -ENOMEM;
- }
-
- new_dmi->next = bond->mc_list;
- bond->mc_list = new_dmi;
- new_dmi->dmi_addrlen = dmi->dmi_addrlen;
- memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
- new_dmi->dmi_users = dmi->dmi_users;
- new_dmi->dmi_gusers = dmi->dmi_gusers;
- }
-
- return 0;
-}
-
-/*
* flush all members of flush->mc_list from device dev->mc_list
*/
static void bond_mc_list_flush(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_del(slave_dev, ha->addr);
if (bond->params.mode == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_del(slave_dev, lacpdu_multicast);
}
}
@@ -969,7 +910,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (!USES_PRIMARY(bond->params.mode))
/* nothing to do - mc list is already up-to-date on
@@ -984,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_delete(old_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_del(old_active->dev, ha->addr);
}
if (new_active) {
@@ -997,9 +937,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(new_active->dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond->dev)
+ dev_mc_add(new_active->dev, ha->addr);
bond_resend_igmp_join_requests(bond);
}
}
@@ -1329,6 +1268,61 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
bond->slave_cnt--;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * You must hold read lock on bond->lock before calling this.
+ */
+static bool slaves_support_netpoll(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ int i = 0;
+ bool ret = true;
+
+ bond_for_each_slave(bond, slave, i) {
+ if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
+ !slave->dev->netdev_ops->ndo_poll_controller)
+ ret = false;
+ }
+ return i != 0 && ret;
+}
+
+static void bond_poll_controller(struct net_device *bond_dev)
+{
+ struct net_device *dev = bond_dev->npinfo->netpoll->real_dev;
+ if (dev != bond_dev)
+ netpoll_poll_dev(dev);
+}
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ const struct net_device_ops *ops;
+ int i;
+
+ read_lock(&bond->lock);
+ bond_dev->npinfo = NULL;
+ bond_for_each_slave(bond, slave, i) {
+ if (slave->dev) {
+ ops = slave->dev->netdev_ops;
+ if (ops->ndo_netpoll_cleanup)
+ ops->ndo_netpoll_cleanup(slave->dev);
+ else
+ slave->dev->npinfo = NULL;
+ }
+ }
+ read_unlock(&bond->lock);
+}
+
+#else
+
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
+{
+}
+
+#endif
+
/*---------------------------------- IOCTL ----------------------------------*/
static int bond_sethwaddr(struct net_device *bond_dev,
@@ -1411,7 +1405,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct sockaddr addr;
int link_reporting;
int old_features = bond_dev->features;
@@ -1485,14 +1479,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name,
bond_dev->type, slave_dev->type);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
+ res = netdev_bonding_change(bond_dev,
+ NETDEV_PRE_TYPE_CHANGE);
+ res = notifier_to_errno(res);
+ if (res) {
+ pr_err("%s: refused to change device type\n",
+ bond_dev->name);
+ res = -EBUSY;
+ goto err_undo_flags;
+ }
+
+ /* Flush unicast and multicast addresses */
+ dev_uc_flush(bond_dev);
+ dev_mc_flush(bond_dev);
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
else
ether_setup(bond_dev);
- netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
+ netdev_bonding_change(bond_dev,
+ NETDEV_POST_TYPE_CHANGE);
}
} else if (bond_dev->type != slave_dev->type) {
pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1593,9 +1600,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
- dev_mc_add(slave_dev, dmi->dmi_addr,
- dmi->dmi_addrlen, 0);
+ netdev_for_each_mc_addr(ha, bond_dev)
+ dev_mc_add(slave_dev, ha->addr);
netif_addr_unlock_bh(bond_dev);
}
@@ -1603,7 +1609,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
- dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
+ dev_mc_add(slave_dev, lacpdu_multicast);
}
bond_add_vlans_on_slave(bond, slave_dev);
@@ -1735,6 +1741,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (slaves_support_netpoll(bond_dev)) {
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ if (bond_dev->npinfo)
+ slave_dev->npinfo = bond_dev->npinfo;
+ } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ pr_info("New slave device %s does not support netpoll\n",
+ slave_dev->name);
+ pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ }
+#endif
read_unlock(&bond->lock);
res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1801,6 +1819,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
return -EINVAL;
}
+ netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
write_lock_bh(&bond->lock);
slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -1929,6 +1948,17 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_set_master(slave_dev, NULL);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ read_lock_bh(&bond->lock);
+ if (slaves_support_netpoll(bond_dev))
+ bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
+ read_unlock_bh(&bond->lock);
+ if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
+ slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
+ else
+ slave_dev->npinfo = NULL;
+#endif
+
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -3905,10 +3935,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
return res;
}
+static bool bond_addr_in_mc_list(unsigned char *addr,
+ struct netdev_hw_addr_list *list,
+ int addrlen)
+{
+ struct netdev_hw_addr *ha;
+
+ netdev_hw_addr_list_for_each(ha, list)
+ if (!memcmp(ha->addr, addr, addrlen))
+ return true;
+
+ return false;
+}
+
static void bond_set_multicast_list(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
+ bool found;
/*
* Do promisc before checking multicast_mode
@@ -3943,20 +3987,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */
- for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond->mc_list))
- bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_for_each_mc_addr(ha, bond_dev) {
+ found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_add(bond, ha->addr);
}
/* looking for addresses to delete from slaves' list */
- for (dmi = bond->mc_list; dmi; dmi = dmi->next) {
- if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list))
- bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
+ netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
+ found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
+ bond_dev->addr_len);
+ if (!found)
+ bond_mc_del(bond, ha->addr);
}
/* save master's multicast list */
- bond_mc_list_destroy(bond);
- bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
+ __hw_addr_flush(&bond->mc_list);
+ __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
+ bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
read_unlock(&bond->lock);
}
@@ -4448,6 +4497,10 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_register = bond_vlan_rx_register,
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_cleanup = bond_netpoll_cleanup,
+ .ndo_poll_controller = bond_poll_controller,
+#endif
};
static void bond_destructor(struct net_device *bond_dev)
@@ -4541,6 +4594,8 @@ static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ bond_netpoll_cleanup(bond_dev);
+
/* Release the bonded slaves */
bond_release_all(bond_dev);
@@ -4550,9 +4605,7 @@ static void bond_uninit(struct net_device *bond_dev)
bond_remove_proc_entry(bond);
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_destroy(bond);
- netif_addr_unlock_bh(bond_dev);
+ __hw_addr_flush(&bond->mc_list);
}
/*------------------------- Module initialization ---------------------------*/
@@ -4683,13 +4736,13 @@ static int bond_check_params(struct bond_params *params)
}
if (num_grat_arp < 0 || num_grat_arp > 255) {
- pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
num_grat_arp);
num_grat_arp = 1;
}
if (num_unsol_na < 0 || num_unsol_na > 255) {
- pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
+ pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
num_unsol_na);
num_unsol_na = 1;
}
@@ -4924,6 +4977,8 @@ static int bond_init(struct net_device *bond_dev)
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
+
+ __hw_addr_init(&bond->mc_list);
return 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 257a7a4dfce9..2aa336720591 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -202,7 +202,7 @@ struct bonding {
char proc_file_name[IFNAMSIZ];
#endif /* CONFIG_PROC_FS */
struct list_head bond_list;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr_list mc_list;
int (*xmit_hash_policy)(struct sk_buff *, int);
__be32 master_ip;
u16 flags;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
new file mode 100644
index 000000000000..0b28e0107697
--- /dev/null
+++ b/drivers/net/caif/Kconfig
@@ -0,0 +1,17 @@
+#
+# CAIF physical drivers
+#
+
+if CAIF
+
+comment "CAIF transport drivers"
+
+config CAIF_TTY
+ tristate "CAIF TTY transport driver"
+ default n
+ ---help---
+ The CAIF TTY transport driver is a Line Discipline (ldisc)
+ identified as N_CAIF. When this ldisc is opened from user space
+ it will redirect the TTY's traffic into the CAIF stack.
+
+endif # CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
new file mode 100644
index 000000000000..52b6d1f826f8
--- /dev/null
+++ b/drivers/net/caif/Makefile
@@ -0,0 +1,12 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+clean-dirs:= .tmp_versions
+clean-files:= Module.symvers modules.order *.cmd *~ \
+
+# Serial interface
+obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
new file mode 100644
index 000000000000..09257ca8f563
--- /dev/null
+++ b/drivers/net/caif/caif_serial.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/if_arp.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/cfcnfg.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>");
+MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_LDISC(N_CAIF);
+
+#define SEND_QUEUE_LOW 10
+#define SEND_QUEUE_HIGH 100
+#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
+#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
+#define MAX_WRITE_CHUNK 4096
+#define ON 1
+#define OFF 0
+#define CAIF_MAX_MTU 4096
+
+/*This list is protected by the rtnl lock. */
+static LIST_HEAD(ser_list);
+
+static int ser_loop;
+module_param(ser_loop, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
+
+static int ser_use_stx = 1;
+module_param(ser_use_stx, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
+
+static int ser_use_fcs = 1;
+
+module_param(ser_use_fcs, bool, S_IRUGO);
+MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
+
+static int ser_write_chunk = MAX_WRITE_CHUNK;
+module_param(ser_write_chunk, int, S_IRUGO);
+
+MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
+
+static struct dentry *debugfsdir;
+
+static int caif_net_open(struct net_device *dev);
+static int caif_net_close(struct net_device *dev);
+
+struct ser_device {
+ struct caif_dev_common common;
+ struct list_head node;
+ struct net_device *dev;
+ struct sk_buff_head head;
+ struct tty_struct *tty;
+ bool tx_started;
+ unsigned long state;
+ char *tty_name;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_tty_dir;
+ struct debugfs_blob_wrapper tx_blob;
+ struct debugfs_blob_wrapper rx_blob;
+ u8 rx_data[128];
+ u8 tx_data[128];
+ u8 tty_status;
+
+#endif
+};
+
+static void caifdev_setup(struct net_device *dev);
+static void ldisc_tx_wakeup(struct tty_struct *tty);
+#ifdef CONFIG_DEBUG_FS
+static inline void update_tty_status(struct ser_device *ser)
+{
+ ser->tty_status =
+ ser->tty->stopped << 5 |
+ ser->tty->hw_stopped << 4 |
+ ser->tty->flow_stopped << 3 |
+ ser->tty->packet << 2 |
+ ser->tty->low_latency << 1 |
+ ser->tty->warned;
+}
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+ ser->debugfs_tty_dir =
+ debugfs_create_dir(tty->name, debugfsdir);
+ if (!IS_ERR(ser->debugfs_tty_dir)) {
+ debugfs_create_blob("last_tx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tx_blob);
+
+ debugfs_create_blob("last_rx_msg", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->rx_blob);
+
+ debugfs_create_x32("ser_state", S_IRUSR,
+ ser->debugfs_tty_dir,
+ (u32 *)&ser->state);
+
+ debugfs_create_x8("tty_status", S_IRUSR,
+ ser->debugfs_tty_dir,
+ &ser->tty_status);
+
+ }
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = 0;
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = 0;
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+ debugfs_remove_recursive(ser->debugfs_tty_dir);
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->rx_data))
+ size = sizeof(ser->rx_data);
+ memcpy(ser->rx_data, data, size);
+ ser->rx_blob.data = ser->rx_data;
+ ser->rx_blob.size = size;
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+ if (size > sizeof(ser->tx_data))
+ size = sizeof(ser->tx_data);
+ memcpy(ser->tx_data, data, size);
+ ser->tx_blob.data = ser->tx_data;
+ ser->tx_blob.size = size;
+}
+#else
+static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
+{
+}
+
+static inline void debugfs_deinit(struct ser_device *ser)
+{
+}
+
+static inline void update_tty_status(struct ser_device *ser)
+{
+}
+
+static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
+{
+}
+
+#endif
+
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
+{
+ struct sk_buff *skb = NULL;
+ struct ser_device *ser;
+ int ret;
+ u8 *p;
+ ser = tty->disc_data;
+
+ /*
+ * NOTE: flags may contain information about break or overrun.
+ * This is not yet handled.
+ */
+
+
+ /*
+ * Workaround for garbage at start of transmission,
+ * only enable if STX handling is not enabled.
+ */
+ if (!ser->common.use_stx && !ser->tx_started) {
+ dev_info(&ser->dev->dev,
+ "Bytes received before initial transmission -"
+ "bytes discarded.\n");
+ return;
+ }
+
+ BUG_ON(ser->dev == NULL);
+
+ /* Get a suitable caif packet and copy in data. */
+ skb = netdev_alloc_skb(ser->dev, count+1);
+ if (skb == NULL)
+ return;
+ p = skb_put(skb, count);
+ memcpy(p, data, count);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = ser->dev;
+ debugfs_rx(ser, data, count);
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+ if (!ret) {
+ ser->dev->stats.rx_packets++;
+ ser->dev->stats.rx_bytes += count;
+ } else
+ ++ser->dev->stats.rx_dropped;
+ update_tty_status(ser);
+}
+
+static int handle_tx(struct ser_device *ser)
+{
+ struct tty_struct *tty;
+ struct sk_buff *skb;
+ int tty_wr, len, room;
+ tty = ser->tty;
+ ser->tx_started = true;
+
+ /* Enter critical section */
+ if (test_and_set_bit(CAIF_SENDING, &ser->state))
+ return 0;
+
+ /* skb_peek is safe because handle_tx is called after skb_queue_tail */
+ while ((skb = skb_peek(&ser->head)) != NULL) {
+
+ /* Make sure you don't write too much */
+ len = skb->len;
+ room = tty_write_room(tty);
+ if (!room)
+ break;
+ if (room > ser_write_chunk)
+ room = ser_write_chunk;
+ if (len > room)
+ len = room;
+
+ /* Write to tty or loopback */
+ if (!ser_loop) {
+ tty_wr = tty->ops->write(tty, skb->data, len);
+ update_tty_status(ser);
+ } else {
+ tty_wr = len;
+ ldisc_receive(tty, skb->data, NULL, len);
+ }
+ ser->dev->stats.tx_packets++;
+ ser->dev->stats.tx_bytes += tty_wr;
+
+ /* Error on TTY ?! */
+ if (tty_wr < 0)
+ goto error;
+ /* Reduce buffer written, and discard if empty */
+ skb_pull(skb, tty_wr);
+ if (skb->len == 0) {
+ struct sk_buff *tmp = skb_dequeue(&ser->head);
+ BUG_ON(tmp != skb);
+ if (in_interrupt())
+ dev_kfree_skb_irq(skb);
+ else
+ kfree_skb(skb);
+ }
+ }
+ /* Send flow off if queue is empty */
+ if (ser->head.qlen <= SEND_QUEUE_LOW &&
+ test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+ ser->common.flowctrl(ser->dev, ON);
+ clear_bit(CAIF_SENDING, &ser->state);
+ return 0;
+error:
+ clear_bit(CAIF_SENDING, &ser->state);
+ return tty_wr;
+}
+
+static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ser_device *ser;
+ BUG_ON(dev == NULL);
+ ser = netdev_priv(dev);
+
+ /* Send flow off once, on high water mark */
+ if (ser->head.qlen > SEND_QUEUE_HIGH &&
+ !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
+ ser->common.flowctrl != NULL)
+
+ ser->common.flowctrl(ser->dev, OFF);
+
+ skb_queue_tail(&ser->head, skb);
+ return handle_tx(ser);
+}
+
+
+static void ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ ser = tty->disc_data;
+ BUG_ON(ser == NULL);
+ BUG_ON(ser->tty != tty);
+ handle_tx(ser);
+}
+
+
+static int ldisc_open(struct tty_struct *tty)
+{
+ struct ser_device *ser;
+ struct net_device *dev;
+ char name[64];
+ int result;
+
+ /* No write no play */
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
+ return -EPERM;
+
+ sprintf(name, "cf%s", tty->name);
+ dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+ ser = netdev_priv(dev);
+ ser->tty = tty_kref_get(tty);
+ ser->dev = dev;
+ debugfs_init(ser, tty);
+ tty->receive_room = N_TTY_BUF_SIZE;
+ tty->disc_data = ser;
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ rtnl_lock();
+ result = register_netdevice(dev);
+ if (result) {
+ rtnl_unlock();
+ free_netdev(dev);
+ return -ENODEV;
+ }
+
+ list_add(&ser->node, &ser_list);
+ rtnl_unlock();
+ netif_stop_queue(dev);
+ update_tty_status(ser);
+ return 0;
+}
+
+static void ldisc_close(struct tty_struct *tty)
+{
+ struct ser_device *ser = tty->disc_data;
+ /* Remove may be called inside or outside of rtnl_lock */
+ int islocked = rtnl_is_locked();
+ if (!islocked)
+ rtnl_lock();
+ /* device is freed automagically by net-sysfs */
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(&ser->node);
+ debugfs_deinit(ser);
+ tty_kref_put(ser->tty);
+ if (!islocked)
+ rtnl_unlock();
+}
+
+/* The line discipline structure. */
+static struct tty_ldisc_ops caif_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "n_caif",
+ .open = ldisc_open,
+ .close = ldisc_close,
+ .receive_buf = ldisc_receive,
+ .write_wakeup = ldisc_tx_wakeup
+};
+
+static int register_ldisc(void)
+{
+ int result;
+ result = tty_register_ldisc(N_CAIF, &caif_ldisc);
+ if (result < 0) {
+ pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
+ result);
+ return result;
+ }
+ return result;
+}
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = caif_net_open,
+ .ndo_stop = caif_net_close,
+ .ndo_start_xmit = caif_xmit
+};
+
+static void caifdev_setup(struct net_device *dev)
+{
+ struct ser_device *serdev = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &netdev_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = CAIF_MAX_MTU;
+ dev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ dev->tx_queue_len = 0;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&serdev->head);
+ serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
+ serdev->common.use_frag = true;
+ serdev->common.use_stx = ser_use_stx;
+ serdev->common.use_fcs = ser_use_fcs;
+ serdev->dev = dev;
+}
+
+
+static int caif_net_open(struct net_device *dev)
+{
+ struct ser_device *ser;
+ ser = netdev_priv(dev);
+ netif_wake_queue(dev);
+ return 0;
+}
+
+static int caif_net_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int __init caif_ser_init(void)
+{
+ int ret;
+ ret = register_ldisc();
+ debugfsdir = debugfs_create_dir("caif_serial", NULL);
+ return ret;
+}
+
+static void __exit caif_ser_exit(void)
+{
+ struct ser_device *ser = NULL;
+ struct list_head *node;
+ struct list_head *_tmp;
+ list_for_each_safe(node, _tmp, &ser_list) {
+ ser = list_entry(node, struct ser_device, node);
+ dev_close(ser->dev);
+ unregister_netdevice(ser->dev);
+ list_del(node);
+ }
+ tty_unregister_ldisc(N_CAIF);
+ debugfs_remove_recursive(debugfsdir);
+}
+
+module_init(caif_ser_init);
+module_exit(caif_ser_exit);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 05b751719bd5..2c5227c02fa0 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -63,6 +63,16 @@ config CAN_BFIN
To compile this driver as a module, choose M here: the
module will be called bfin_can.
+config CAN_JANZ_ICAN3
+ tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
+ depends on CAN_DEV && MFD_JANZ_CMODIO
+ ---help---
+ Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
+ connects to a MODULbus carrier board.
+
+ This driver can also be built as a module. If so, the module will be
+ called janz-ican3.ko.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7a702f28d01c..9047cd066fea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
+obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a2f29a38798a..2d8bd86bc5e2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -35,7 +35,6 @@
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -376,7 +375,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
at91_write(priv, AT91_MCR(mb), reg_mcr);
stats->tx_bytes += cf->can_dlc;
- dev->trans_start = jiffies;
/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
@@ -662,7 +660,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
at91_poll_err_frame(dev, cf, reg_sr);
netif_receive_skb(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
@@ -899,7 +896,6 @@ static void at91_irq_err(struct net_device *dev)
at91_irq_err_state(dev, cf, new_state);
netif_rx(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 03489864376d..b6e890d28366 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -18,7 +18,6 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -270,8 +269,6 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fill data length code */
bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
/* set transmit request */
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
new file mode 100644
index 000000000000..6e533dcc36c0
--- /dev/null
+++ b/drivers/net/can/janz-ican3.c
@@ -0,0 +1,1830 @@
+/*
+ * Janz MODULbus VMOD-ICAN3 CAN Interface Driver
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <linux/mfd/janz.h>
+
+/* the DPM has 64k of memory, organized into 256x 256 byte pages */
+#define DPM_NUM_PAGES 256
+#define DPM_PAGE_SIZE 256
+#define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE)
+
+/* JANZ ICAN3 "old-style" host interface queue page numbers */
+#define QUEUE_OLD_CONTROL 0
+#define QUEUE_OLD_RB0 1
+#define QUEUE_OLD_RB1 2
+#define QUEUE_OLD_WB0 3
+#define QUEUE_OLD_WB1 4
+
+/* Janz ICAN3 "old-style" host interface control registers */
+#define MSYNC_PEER 0x00 /* ICAN only */
+#define MSYNC_LOCL 0x01 /* host only */
+#define TARGET_RUNNING 0x02
+
+#define MSYNC_RB0 0x01
+#define MSYNC_RB1 0x02
+#define MSYNC_RBLW 0x04
+#define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1)
+
+#define MSYNC_WB0 0x10
+#define MSYNC_WB1 0x20
+#define MSYNC_WBLW 0x40
+#define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1)
+
+/* Janz ICAN3 "new-style" host interface queue page numbers */
+#define QUEUE_TOHOST 5
+#define QUEUE_FROMHOST_MID 6
+#define QUEUE_FROMHOST_HIGH 7
+#define QUEUE_FROMHOST_LOW 8
+
+/* The first free page in the DPM is #9 */
+#define DPM_FREE_START 9
+
+/* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */
+#define DESC_VALID 0x80
+#define DESC_WRAP 0x40
+#define DESC_INTERRUPT 0x20
+#define DESC_IVALID 0x10
+#define DESC_LEN(len) (len)
+
+/* Janz ICAN3 Firmware Messages */
+#define MSG_CONNECTI 0x02
+#define MSG_DISCONNECT 0x03
+#define MSG_IDVERS 0x04
+#define MSG_MSGLOST 0x05
+#define MSG_NEWHOSTIF 0x08
+#define MSG_INQUIRY 0x0a
+#define MSG_SETAFILMASK 0x10
+#define MSG_INITFDPMQUEUE 0x11
+#define MSG_HWCONF 0x12
+#define MSG_FMSGLOST 0x15
+#define MSG_CEVTIND 0x37
+#define MSG_CBTRREQ 0x41
+#define MSG_COFFREQ 0x42
+#define MSG_CONREQ 0x43
+#define MSG_CCONFREQ 0x47
+
+/*
+ * Janz ICAN3 CAN Inquiry Message Types
+ *
+ * NOTE: there appears to be a firmware bug here. You must send
+ * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED
+ * NOTE: response. The controller never responds to a message with
+ * NOTE: the INQUIRY_EXTENDED subspec :(
+ */
+#define INQUIRY_STATUS 0x00
+#define INQUIRY_TERMINATION 0x01
+#define INQUIRY_EXTENDED 0x04
+
+/* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */
+#define SETAFILMASK_REJECT 0x00
+#define SETAFILMASK_FASTIF 0x02
+
+/* Janz ICAN3 CAN Hardware Configuration Message Types */
+#define HWCONF_TERMINATE_ON 0x01
+#define HWCONF_TERMINATE_OFF 0x00
+
+/* Janz ICAN3 CAN Event Indication Message Types */
+#define CEVTIND_EI 0x01
+#define CEVTIND_DOI 0x02
+#define CEVTIND_LOST 0x04
+#define CEVTIND_FULL 0x08
+#define CEVTIND_BEI 0x10
+
+#define CEVTIND_CHIP_SJA1000 0x02
+
+#define ICAN3_BUSERR_QUOTA_MAX 255
+
+/* Janz ICAN3 CAN Frame Conversion */
+#define ICAN3_ECHO 0x10
+#define ICAN3_EFF_RTR 0x40
+#define ICAN3_SFF_RTR 0x10
+#define ICAN3_EFF 0x80
+
+#define ICAN3_CAN_TYPE_MASK 0x0f
+#define ICAN3_CAN_TYPE_SFF 0x00
+#define ICAN3_CAN_TYPE_EFF 0x01
+
+#define ICAN3_CAN_DLC_MASK 0x0f
+
+/*
+ * SJA1000 Status and Error Register Definitions
+ *
+ * Copied from drivers/net/can/sja1000/sja1000.h
+ */
+
+/* status register content */
+#define SR_BS 0x80
+#define SR_ES 0x40
+#define SR_TS 0x20
+#define SR_RS 0x10
+#define SR_TCS 0x08
+#define SR_TBS 0x04
+#define SR_DOS 0x02
+#define SR_RBS 0x01
+
+#define SR_CRIT (SR_BS|SR_ES)
+
+/* ECC register */
+#define ECC_SEG 0x1F
+#define ECC_DIR 0x20
+#define ECC_ERR 6
+#define ECC_BIT 0x00
+#define ECC_FORM 0x40
+#define ECC_STUFF 0x80
+#define ECC_MASK 0xc0
+
+/* Number of buffers for use in the "new-style" host interface */
+#define ICAN3_NEW_BUFFERS 16
+
+/* Number of buffers for use in the "fast" host interface */
+#define ICAN3_TX_BUFFERS 512
+#define ICAN3_RX_BUFFERS 1024
+
+/* SJA1000 Clock Input */
+#define ICAN3_CAN_CLOCK 8000000
+
+/* Driver Name */
+#define DRV_NAME "janz-ican3"
+
+/* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */
+struct ican3_dpm_control {
+ /* window address register */
+ u8 window_address;
+ u8 unused1;
+
+ /*
+ * Read access: clear interrupt from microcontroller
+ * Write access: send interrupt to microcontroller
+ */
+ u8 interrupt;
+ u8 unused2;
+
+ /* write-only: reset all hardware on the module */
+ u8 hwreset;
+ u8 unused3;
+
+ /* write-only: generate an interrupt to the TPU */
+ u8 tpuinterrupt;
+};
+
+struct ican3_dev {
+
+ /* must be the first member */
+ struct can_priv can;
+
+ /* CAN network device */
+ struct net_device *ndev;
+ struct napi_struct napi;
+
+ /* Device for printing */
+ struct device *dev;
+
+ /* module number */
+ unsigned int num;
+
+ /* base address of registers and IRQ */
+ struct janz_cmodio_onboard_regs __iomem *ctrl;
+ struct ican3_dpm_control __iomem *dpmctrl;
+ void __iomem *dpm;
+ int irq;
+
+ /* CAN bus termination status */
+ struct completion termination_comp;
+ bool termination_enabled;
+
+ /* CAN bus error status registers */
+ struct completion buserror_comp;
+ struct can_berr_counter bec;
+
+ /* old and new style host interface */
+ unsigned int iftype;
+
+ /*
+ * Any function which changes the current DPM page must hold this
+ * lock while it is performing data accesses. This ensures that the
+ * function will not be preempted and end up reading data from a
+ * different DPM page than it expects.
+ */
+ spinlock_t lock;
+
+ /* new host interface */
+ unsigned int rx_int;
+ unsigned int rx_num;
+ unsigned int tx_num;
+
+ /* fast host interface */
+ unsigned int fastrx_start;
+ unsigned int fastrx_int;
+ unsigned int fastrx_num;
+ unsigned int fasttx_start;
+ unsigned int fasttx_num;
+
+ /* first free DPM page */
+ unsigned int free_page;
+};
+
+struct ican3_msg {
+ u8 control;
+ u8 spec;
+ __le16 len;
+ u8 data[252];
+};
+
+struct ican3_new_desc {
+ u8 control;
+ u8 pointer;
+};
+
+struct ican3_fast_desc {
+ u8 control;
+ u8 command;
+ u8 data[14];
+};
+
+/* write to the window basic address register */
+static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page)
+{
+ BUG_ON(page >= DPM_NUM_PAGES);
+ iowrite8(page, &mod->dpmctrl->window_address);
+}
+
+/*
+ * ICAN3 "old-style" host interface
+ */
+
+/*
+ * Recieve a message from the ICAN3 "old-style" firmware interface
+ *
+ * LOCKING: must hold mod->lock
+ *
+ * returns 0 on success, -ENOMEM when no message exists
+ */
+static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned int mbox, mbox_page;
+ u8 locl, peer, xord;
+
+ /* get the MSYNC registers */
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ peer = ioread8(mod->dpm + MSYNC_PEER);
+ locl = ioread8(mod->dpm + MSYNC_LOCL);
+ xord = locl ^ peer;
+
+ if ((xord & MSYNC_RB_MASK) == 0x00) {
+ dev_dbg(mod->dev, "no mbox for reading\n");
+ return -ENOMEM;
+ }
+
+ /* find the first free mbox to read */
+ if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK)
+ mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1;
+ else
+ mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1;
+
+ /* copy the message */
+ mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1;
+ ican3_set_page(mod, mbox_page);
+ memcpy_fromio(msg, mod->dpm, sizeof(*msg));
+
+ /*
+ * notify the firmware that the read buffer is available
+ * for it to fill again
+ */
+ locl ^= mbox;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ iowrite8(locl, mod->dpm + MSYNC_LOCL);
+ return 0;
+}
+
+/*
+ * Send a message through the "old-style" firmware interface
+ *
+ * LOCKING: must hold mod->lock
+ *
+ * returns 0 on success, -ENOMEM when no free space exists
+ */
+static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned int mbox, mbox_page;
+ u8 locl, peer, xord;
+
+ /* get the MSYNC registers */
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ peer = ioread8(mod->dpm + MSYNC_PEER);
+ locl = ioread8(mod->dpm + MSYNC_LOCL);
+ xord = locl ^ peer;
+
+ if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
+ dev_err(mod->dev, "no mbox for writing\n");
+ return -ENOMEM;
+ }
+
+ /* calculate a free mbox to use */
+ mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0;
+
+ /* copy the message to the DPM */
+ mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1;
+ ican3_set_page(mod, mbox_page);
+ memcpy_toio(mod->dpm, msg, sizeof(*msg));
+
+ locl ^= mbox;
+ if (mbox == MSYNC_WB1)
+ locl |= MSYNC_WBLW;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ iowrite8(locl, mod->dpm + MSYNC_LOCL);
+ return 0;
+}
+
+/*
+ * ICAN3 "new-style" Host Interface Setup
+ */
+
+static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
+{
+ struct ican3_new_desc desc;
+ unsigned long flags;
+ void __iomem *dst;
+ int i;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* setup the internal datastructures for RX */
+ mod->rx_num = 0;
+ mod->rx_int = 0;
+
+ /* tohost queue descriptors are in page 5 */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ dst = mod->dpm;
+
+ /* initialize the tohost (rx) queue descriptors: pages 9-24 */
+ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
+ desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */
+ desc.pointer = mod->free_page;
+
+ /* set wrap flag on last buffer */
+ if (i == ICAN3_NEW_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ memcpy_toio(dst, &desc, sizeof(desc));
+ dst += sizeof(desc);
+ mod->free_page++;
+ }
+
+ /* fromhost (tx) mid queue descriptors are in page 6 */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ dst = mod->dpm;
+
+ /* setup the internal datastructures for TX */
+ mod->tx_num = 0;
+
+ /* initialize the fromhost mid queue descriptors: pages 25-40 */
+ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
+ desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */
+ desc.pointer = mod->free_page;
+
+ /* set wrap flag on last buffer */
+ if (i == ICAN3_NEW_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ memcpy_toio(dst, &desc, sizeof(desc));
+ dst += sizeof(desc);
+ mod->free_page++;
+ }
+
+ /* fromhost hi queue descriptors are in page 7 */
+ ican3_set_page(mod, QUEUE_FROMHOST_HIGH);
+ dst = mod->dpm;
+
+ /* initialize only a single buffer in the fromhost hi queue (unused) */
+ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
+ desc.pointer = mod->free_page;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ mod->free_page++;
+
+ /* fromhost low queue descriptors are in page 8 */
+ ican3_set_page(mod, QUEUE_FROMHOST_LOW);
+ dst = mod->dpm;
+
+ /* initialize only a single buffer in the fromhost low queue (unused) */
+ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
+ desc.pointer = mod->free_page;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ mod->free_page++;
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+}
+
+/*
+ * ICAN3 Fast Host Interface Setup
+ */
+
+static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
+{
+ struct ican3_fast_desc desc;
+ unsigned long flags;
+ unsigned int addr;
+ void __iomem *dst;
+ int i;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* save the start recv page */
+ mod->fastrx_start = mod->free_page;
+ mod->fastrx_num = 0;
+ mod->fastrx_int = 0;
+
+ /* build a single fast tohost queue descriptor */
+ memset(&desc, 0, sizeof(desc));
+ desc.control = 0x00;
+ desc.command = 1;
+
+ /* build the tohost queue descriptor ring in memory */
+ addr = 0;
+ for (i = 0; i < ICAN3_RX_BUFFERS; i++) {
+
+ /* set the wrap bit on the last buffer */
+ if (i == ICAN3_RX_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ /* switch to the correct page */
+ ican3_set_page(mod, mod->free_page);
+
+ /* copy the descriptor to the DPM */
+ dst = mod->dpm + addr;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ addr += sizeof(desc);
+
+ /* move to the next page if necessary */
+ if (addr >= DPM_PAGE_SIZE) {
+ addr = 0;
+ mod->free_page++;
+ }
+ }
+
+ /* make sure we page-align the next queue */
+ if (addr != 0)
+ mod->free_page++;
+
+ /* save the start xmit page */
+ mod->fasttx_start = mod->free_page;
+ mod->fasttx_num = 0;
+
+ /* build a single fast fromhost queue descriptor */
+ memset(&desc, 0, sizeof(desc));
+ desc.control = DESC_VALID;
+ desc.command = 1;
+
+ /* build the fromhost queue descriptor ring in memory */
+ addr = 0;
+ for (i = 0; i < ICAN3_TX_BUFFERS; i++) {
+
+ /* set the wrap bit on the last buffer */
+ if (i == ICAN3_TX_BUFFERS - 1)
+ desc.control |= DESC_WRAP;
+
+ /* switch to the correct page */
+ ican3_set_page(mod, mod->free_page);
+
+ /* copy the descriptor to the DPM */
+ dst = mod->dpm + addr;
+ memcpy_toio(dst, &desc, sizeof(desc));
+ addr += sizeof(desc);
+
+ /* move to the next page if necessary */
+ if (addr >= DPM_PAGE_SIZE) {
+ addr = 0;
+ mod->free_page++;
+ }
+ }
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+}
+
+/*
+ * ICAN3 "new-style" Host Interface Message Helpers
+ */
+
+/*
+ * LOCKING: must hold mod->lock
+ */
+static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct ican3_new_desc desc;
+ void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc));
+
+ /* switch to the fromhost mid queue, and read the buffer descriptor */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ if (!(desc.control & DESC_VALID)) {
+ dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* switch to the data page, copy the data */
+ ican3_set_page(mod, desc.pointer);
+ memcpy_toio(mod->dpm, msg, sizeof(*msg));
+
+ /* switch back to the descriptor, set the valid bit, write it back */
+ ican3_set_page(mod, QUEUE_FROMHOST_MID);
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the tx number */
+ mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1);
+ return 0;
+}
+
+/*
+ * LOCKING: must hold mod->lock
+ */
+static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct ican3_new_desc desc;
+ void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc));
+
+ /* switch to the tohost queue, and read the buffer descriptor */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ if (!(desc.control & DESC_VALID)) {
+ dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* switch to the data page, copy the data */
+ ican3_set_page(mod, desc.pointer);
+ memcpy_fromio(msg, mod->dpm, sizeof(*msg));
+
+ /* switch back to the descriptor, toggle the valid bit, write it back */
+ ican3_set_page(mod, QUEUE_TOHOST);
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the rx number */
+ mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1);
+ return 0;
+}
+
+/*
+ * Message Send / Recv Helpers
+ */
+
+static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ if (mod->iftype == 0)
+ ret = ican3_old_send_msg(mod, msg);
+ else
+ ret = ican3_new_send_msg(mod, msg);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return ret;
+}
+
+static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ if (mod->iftype == 0)
+ ret = ican3_old_recv_msg(mod, msg);
+ else
+ ret = ican3_new_recv_msg(mod, msg);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return ret;
+}
+
+/*
+ * Quick Pre-constructed Messages
+ */
+
+static int __devinit ican3_msg_connect(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CONNECTI;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_DISCONNECT;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_NEWHOSTIF;
+ msg.len = cpu_to_le16(0);
+
+ /* If we're not using the old interface, switching seems bogus */
+ WARN_ON(mod->iftype != 0);
+
+ ret = ican3_send_msg(mod, &msg);
+ if (ret)
+ return ret;
+
+ /* mark the module as using the new host interface */
+ mod->iftype = 1;
+ return 0;
+}
+
+static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
+{
+ struct ican3_msg msg;
+ unsigned int addr;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_INITFDPMQUEUE;
+ msg.len = cpu_to_le16(8);
+
+ /* write the tohost queue start address */
+ addr = DPM_PAGE_ADDR(mod->fastrx_start);
+ msg.data[0] = addr & 0xff;
+ msg.data[1] = (addr >> 8) & 0xff;
+ msg.data[2] = (addr >> 16) & 0xff;
+ msg.data[3] = (addr >> 24) & 0xff;
+
+ /* write the fromhost queue start address */
+ addr = DPM_PAGE_ADDR(mod->fasttx_start);
+ msg.data[4] = addr & 0xff;
+ msg.data[5] = (addr >> 8) & 0xff;
+ msg.data[6] = (addr >> 16) & 0xff;
+ msg.data[7] = (addr >> 24) & 0xff;
+
+ /* If we're not using the new interface yet, we cannot do this */
+ WARN_ON(mod->iftype != 1);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * Setup the CAN filter to either accept or reject all
+ * messages from the CAN bus.
+ */
+static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept)
+{
+ struct ican3_msg msg;
+ int ret;
+
+ /* Standard Frame Format */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_SETAFILMASK;
+ msg.len = cpu_to_le16(5);
+ msg.data[0] = 0x00; /* IDLo LSB */
+ msg.data[1] = 0x00; /* IDLo MSB */
+ msg.data[2] = 0xff; /* IDHi LSB */
+ msg.data[3] = 0x07; /* IDHi MSB */
+
+ /* accept all frames for fast host if, or reject all frames */
+ msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
+
+ ret = ican3_send_msg(mod, &msg);
+ if (ret)
+ return ret;
+
+ /* Extended Frame Format */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_SETAFILMASK;
+ msg.len = cpu_to_le16(13);
+ msg.data[0] = 0; /* MUX = 0 */
+ msg.data[1] = 0x00; /* IDLo LSB */
+ msg.data[2] = 0x00;
+ msg.data[3] = 0x00;
+ msg.data[4] = 0x20; /* IDLo MSB */
+ msg.data[5] = 0xff; /* IDHi LSB */
+ msg.data[6] = 0xff;
+ msg.data[7] = 0xff;
+ msg.data[8] = 0x3f; /* IDHi MSB */
+
+ /* accept all frames for fast host if, or reject all frames */
+ msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * Bring the CAN bus online or offline
+ */
+static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_termination(struct ican3_dev *mod, bool on)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_HWCONF;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_INQUIRY;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = subspec;
+ msg.data[1] = 0x00;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
+{
+ struct ican3_msg msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CCONFREQ;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = quota;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+/*
+ * ICAN3 to Linux CAN Frame Conversion
+ */
+
+static void ican3_to_can_frame(struct ican3_dev *mod,
+ struct ican3_fast_desc *desc,
+ struct can_frame *cf)
+{
+ if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) {
+ if (desc->data[1] & ICAN3_SFF_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ cf->can_id |= desc->data[0] << 3;
+ cf->can_id |= (desc->data[1] & 0xe0) >> 5;
+ cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK;
+ memcpy(cf->data, &desc->data[2], sizeof(cf->data));
+ } else {
+ cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK;
+ if (desc->data[0] & ICAN3_EFF_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ if (desc->data[0] & ICAN3_EFF) {
+ cf->can_id |= CAN_EFF_FLAG;
+ cf->can_id |= desc->data[2] << 21; /* 28-21 */
+ cf->can_id |= desc->data[3] << 13; /* 20-13 */
+ cf->can_id |= desc->data[4] << 5; /* 12-5 */
+ cf->can_id |= (desc->data[5] & 0xf8) >> 3;
+ } else {
+ cf->can_id |= desc->data[2] << 3; /* 10-3 */
+ cf->can_id |= desc->data[3] >> 5; /* 2-0 */
+ }
+
+ memcpy(cf->data, &desc->data[6], sizeof(cf->data));
+ }
+}
+
+static void can_frame_to_ican3(struct ican3_dev *mod,
+ struct can_frame *cf,
+ struct ican3_fast_desc *desc)
+{
+ /* clear out any stale data in the descriptor */
+ memset(desc->data, 0, sizeof(desc->data));
+
+ /* we always use the extended format, with the ECHO flag set */
+ desc->command = ICAN3_CAN_TYPE_EFF;
+ desc->data[0] |= cf->can_dlc;
+ desc->data[1] |= ICAN3_ECHO;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ desc->data[0] |= ICAN3_EFF_RTR;
+
+ /* pack the id into the correct places */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ desc->data[0] |= ICAN3_EFF;
+ desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */
+ desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */
+ desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */
+ desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */
+ } else {
+ desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */
+ desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */
+ }
+
+ /* copy the data bits into the descriptor */
+ memcpy(&desc->data[6], cf->data, sizeof(cf->data));
+}
+
+/*
+ * Interrupt Handling
+ */
+
+/*
+ * Handle an ID + Version message response from the firmware. We never generate
+ * this message in production code, but it is very useful when debugging to be
+ * able to display this message.
+ */
+static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
+}
+
+static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct net_device *dev = mod->ndev;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /*
+ * Report that communication messages with the microcontroller firmware
+ * are being lost. These are never CAN frames, so we do not generate an
+ * error frame for userspace
+ */
+ if (msg->spec == MSG_MSGLOST) {
+ dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
+ return;
+ }
+
+ /*
+ * Oops, this indicates that we have lost messages in the fast queue,
+ * which are exclusively CAN messages. Our driver isn't reading CAN
+ * frames fast enough.
+ *
+ * We'll pretend that the SJA1000 told us that it ran out of buffer
+ * space, because there is not a better message for this.
+ */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_errors++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/*
+ * Handle CAN Event Indication Messages from the firmware
+ *
+ * The ICAN3 firmware provides the values of some SJA1000 registers when it
+ * generates this message. The code below is largely copied from the
+ * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary
+ */
+static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ struct net_device *dev = mod->ndev;
+ struct net_device_stats *stats = &dev->stats;
+ enum can_state state = mod->can.state;
+ u8 status, isrc, rxerr, txerr;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* we can only handle the SJA1000 part */
+ if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
+ dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
+ return -ENODEV;
+ }
+
+ /* check the message length for sanity */
+ if (le16_to_cpu(msg->len) < 6) {
+ dev_err(mod->dev, "error message too short\n");
+ return -EINVAL;
+ }
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ isrc = msg->data[0];
+ status = msg->data[3];
+ rxerr = msg->data[4];
+ txerr = msg->data[5];
+
+ /* data overrun interrupt */
+ if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
+ dev_dbg(mod->dev, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ /* error warning + passive interrupt */
+ if (isrc == CEVTIND_EI) {
+ dev_dbg(mod->dev, "error warning + passive interrupt\n");
+ if (status & SR_BS) {
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(dev);
+ } else if (status & SR_ES) {
+ if (rxerr >= 128 || txerr >= 128)
+ state = CAN_STATE_ERROR_PASSIVE;
+ else
+ state = CAN_STATE_ERROR_WARNING;
+ } else {
+ state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ /* bus error interrupt */
+ if (isrc == CEVTIND_BEI) {
+ u8 ecc = msg->data[2];
+
+ dev_dbg(mod->dev, "bus error interrupt\n");
+ mod->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (ecc & ECC_MASK) {
+ case ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = ecc & ECC_SEG;
+ break;
+ }
+
+ if ((ecc & ECC_DIR) == 0)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
+ state == CAN_STATE_ERROR_PASSIVE)) {
+ cf->can_id |= CAN_ERR_CRTL;
+ if (state == CAN_STATE_ERROR_WARNING) {
+ mod->can.can_stats.error_warning++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ } else {
+ mod->can.can_stats.error_passive++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ mod->can.state = state;
+ stats->rx_errors++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ return 0;
+}
+
+static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ switch (msg->data[0]) {
+ case INQUIRY_STATUS:
+ case INQUIRY_EXTENDED:
+ mod->bec.rxerr = msg->data[5];
+ mod->bec.txerr = msg->data[6];
+ complete(&mod->buserror_comp);
+ break;
+ case INQUIRY_TERMINATION:
+ mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON;
+ complete(&mod->termination_comp);
+ break;
+ default:
+ dev_err(mod->dev, "recieved an unknown inquiry response\n");
+ break;
+ }
+}
+
+static void ican3_handle_unknown_message(struct ican3_dev *mod,
+ struct ican3_msg *msg)
+{
+ dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n",
+ msg->spec, le16_to_cpu(msg->len));
+}
+
+/*
+ * Handle a control message from the firmware
+ */
+static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
+ mod->num, msg->spec, le16_to_cpu(msg->len));
+
+ switch (msg->spec) {
+ case MSG_IDVERS:
+ ican3_handle_idvers(mod, msg);
+ break;
+ case MSG_MSGLOST:
+ case MSG_FMSGLOST:
+ ican3_handle_msglost(mod, msg);
+ break;
+ case MSG_CEVTIND:
+ ican3_handle_cevtind(mod, msg);
+ break;
+ case MSG_INQUIRY:
+ ican3_handle_inquiry(mod, msg);
+ break;
+ default:
+ ican3_handle_unknown_message(mod, msg);
+ break;
+ }
+}
+
+/*
+ * Check that there is room in the TX ring to transmit another skb
+ *
+ * LOCKING: must hold mod->lock
+ */
+static bool ican3_txok(struct ican3_dev *mod)
+{
+ struct ican3_fast_desc __iomem *desc;
+ u8 control;
+
+ /* copy the control bits of the descriptor */
+ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
+ desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc));
+ control = ioread8(&desc->control);
+
+ /* if the control bits are not valid, then we have no more space */
+ if (!(control & DESC_VALID))
+ return false;
+
+ return true;
+}
+
+/*
+ * Recieve one CAN frame from the hardware
+ *
+ * This works like the core of a NAPI function, but is intended to be called
+ * from workqueue context instead. This driver already needs a workqueue to
+ * process control messages, so we use the workqueue instead of using NAPI.
+ * This was done to simplify locking.
+ *
+ * CONTEXT: must be called from user context
+ */
+static int ican3_recv_skb(struct ican3_dev *mod)
+{
+ struct net_device *ndev = mod->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct ican3_fast_desc desc;
+ void __iomem *desc_addr;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* copy the whole descriptor */
+ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
+ desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc));
+ memcpy_fromio(&desc, desc_addr, sizeof(desc));
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+
+ /* check that we actually have a CAN frame */
+ if (!(desc.control & DESC_VALID))
+ return -ENOBUFS;
+
+ /* allocate an skb */
+ skb = alloc_can_skb(ndev, &cf);
+ if (unlikely(skb == NULL)) {
+ stats->rx_dropped++;
+ goto err_noalloc;
+ }
+
+ /* convert the ICAN3 frame into Linux CAN format */
+ ican3_to_can_frame(mod, &desc, cf);
+
+ /* receive the skb, update statistics */
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+err_noalloc:
+ /* toggle the valid bit and return the descriptor to the ring */
+ desc.control ^= DESC_VALID;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
+ memcpy_toio(desc_addr, &desc, 1);
+
+ /* update the next buffer pointer */
+ mod->fastrx_num = (desc.control & DESC_WRAP) ? 0
+ : (mod->fastrx_num + 1);
+
+ /* there are still more buffers to process */
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return 0;
+}
+
+static int ican3_napi(struct napi_struct *napi, int budget)
+{
+ struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi);
+ struct ican3_msg msg;
+ unsigned long flags;
+ int received = 0;
+ int ret;
+
+ /* process all communication messages */
+ while (true) {
+ ret = ican3_recv_msg(mod, &msg);
+ if (ret)
+ break;
+
+ ican3_handle_message(mod, &msg);
+ }
+
+ /* process all CAN frames from the fast interface */
+ while (received < budget) {
+ ret = ican3_recv_skb(mod);
+ if (ret)
+ break;
+
+ received++;
+ }
+
+ /* We have processed all packets that the adapter had, but it
+ * was less than our budget, stop polling */
+ if (received < budget)
+ napi_complete(napi);
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* Wake up the transmit queue if necessary */
+ if (netif_queue_stopped(mod->ndev) && ican3_txok(mod))
+ netif_wake_queue(mod->ndev);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+
+ /* re-enable interrupt generation */
+ iowrite8(1 << mod->num, &mod->ctrl->int_enable);
+ return received;
+}
+
+static irqreturn_t ican3_irq(int irq, void *dev_id)
+{
+ struct ican3_dev *mod = dev_id;
+ u8 stat;
+
+ /*
+ * The interrupt status register on this device reports interrupts
+ * as zeroes instead of using ones like most other devices
+ */
+ stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num);
+ if (stat == (1 << mod->num))
+ return IRQ_NONE;
+
+ /* clear the MODULbus interrupt from the microcontroller */
+ ioread8(&mod->dpmctrl->interrupt);
+
+ /* disable interrupt generation, schedule the NAPI poller */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ napi_schedule(&mod->napi);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Firmware reset, startup, and shutdown
+ */
+
+/*
+ * Reset an ICAN module to its power-on state
+ *
+ * CONTEXT: no network device registered
+ * LOCKING: work function disabled
+ */
+static int ican3_reset_module(struct ican3_dev *mod)
+{
+ u8 val = 1 << mod->num;
+ unsigned long start;
+ u8 runold, runnew;
+
+ /* disable interrupts so no more work is scheduled */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+
+ /* flush any pending work */
+ flush_scheduled_work();
+
+ /* the first unallocated page in the DPM is #9 */
+ mod->free_page = DPM_FREE_START;
+
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ runold = ioread8(mod->dpm + TARGET_RUNNING);
+
+ /* reset the module */
+ iowrite8(val, &mod->ctrl->reset_assert);
+ iowrite8(val, &mod->ctrl->reset_deassert);
+
+ /* wait until the module has finished resetting and is running */
+ start = jiffies;
+ do {
+ ican3_set_page(mod, QUEUE_OLD_CONTROL);
+ runnew = ioread8(mod->dpm + TARGET_RUNNING);
+ if (runnew == (runold ^ 0xff))
+ return 0;
+
+ msleep(10);
+ } while (time_before(jiffies, start + HZ / 4));
+
+ dev_err(mod->dev, "failed to reset CAN module\n");
+ return -ETIMEDOUT;
+}
+
+static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
+{
+ ican3_msg_disconnect(mod);
+ ican3_reset_module(mod);
+}
+
+/*
+ * Startup an ICAN module, bringing it into fast mode
+ */
+static int __devinit ican3_startup_module(struct ican3_dev *mod)
+{
+ int ret;
+
+ ret = ican3_reset_module(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to reset module\n");
+ return ret;
+ }
+
+ /* re-enable interrupts so we can send messages */
+ iowrite8(1 << mod->num, &mod->ctrl->int_enable);
+
+ ret = ican3_msg_connect(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to connect to module\n");
+ return ret;
+ }
+
+ ican3_init_new_host_interface(mod);
+ ret = ican3_msg_newhostif(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to switch to new-style interface\n");
+ return ret;
+ }
+
+ /* default to "termination on" */
+ ret = ican3_set_termination(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to enable termination\n");
+ return ret;
+ }
+
+ /* default to "bus errors enabled" */
+ ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-error\n");
+ return ret;
+ }
+
+ ican3_init_fast_host_interface(mod);
+ ret = ican3_msg_fasthostif(mod);
+ if (ret) {
+ dev_err(mod->dev, "unable to switch to fast host interface\n");
+ return ret;
+ }
+
+ ret = ican3_set_id_filter(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set acceptance filter\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * CAN Network Device
+ */
+
+static int ican3_open(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ u8 quota;
+ int ret;
+
+ /* open the CAN layer */
+ ret = open_candev(ndev);
+ if (ret) {
+ dev_err(mod->dev, "unable to start CAN layer\n");
+ return ret;
+ }
+
+ /* set the bus error generation state appropriately */
+ if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ quota = ICAN3_BUSERR_QUOTA_MAX;
+ else
+ quota = 0;
+
+ ret = ican3_set_buserror(mod, quota);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-error\n");
+ close_candev(ndev);
+ return ret;
+ }
+
+ /* bring the bus online */
+ ret = ican3_set_bus_state(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-on\n");
+ close_candev(ndev);
+ return ret;
+ }
+
+ /* start up the network device */
+ mod->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int ican3_stop(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ /* stop the network device xmit routine */
+ netif_stop_queue(ndev);
+ mod->can.state = CAN_STATE_STOPPED;
+
+ /* bring the bus offline, stop receiving packets */
+ ret = ican3_set_bus_state(mod, false);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-off\n");
+ return ret;
+ }
+
+ /* close the CAN layer */
+ close_candev(ndev);
+ return 0;
+}
+
+static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct ican3_fast_desc desc;
+ void __iomem *desc_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mod->lock, flags);
+
+ /* check that we can actually transmit */
+ if (!ican3_txok(mod)) {
+ dev_err(mod->dev, "no free descriptors, stopping queue\n");
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* copy the control bits of the descriptor */
+ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
+ desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc));
+ memset(&desc, 0, sizeof(desc));
+ memcpy_fromio(&desc, desc_addr, 1);
+
+ /* convert the Linux CAN frame into ICAN3 format */
+ can_frame_to_ican3(mod, cf, &desc);
+
+ /*
+ * the programming manual says that you must set the IVALID bit, then
+ * interrupt, then set the valid bit. Quite weird, but it seems to be
+ * required for this to work
+ */
+ desc.control |= DESC_IVALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* generate a MODULbus interrupt to the microcontroller */
+ iowrite8(0x01, &mod->dpmctrl->interrupt);
+
+ desc.control ^= DESC_VALID;
+ memcpy_toio(desc_addr, &desc, sizeof(desc));
+
+ /* update the next buffer pointer */
+ mod->fasttx_num = (desc.control & DESC_WRAP) ? 0
+ : (mod->fasttx_num + 1);
+
+ /* update statistics */
+ stats->tx_packets++;
+ stats->tx_bytes += cf->can_dlc;
+ kfree_skb(skb);
+
+ /*
+ * This hardware doesn't have TX-done notifications, so we'll try and
+ * emulate it the best we can using ECHO skbs. Get the next TX
+ * descriptor, and see if we have room to send. If not, stop the queue.
+ * It will be woken when the ECHO skb for the current packet is recv'd.
+ */
+
+ /* copy the control bits of the descriptor */
+ if (!ican3_txok(mod))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&mod->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ican3_netdev_ops = {
+ .ndo_open = ican3_open,
+ .ndo_stop = ican3_stop,
+ .ndo_start_xmit = ican3_xmit,
+};
+
+/*
+ * Low-level CAN Device
+ */
+
+/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */
+static struct can_bittiming_const ican3_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+/*
+ * This routine was stolen from drivers/net/can/sja1000/sja1000.c
+ *
+ * The bittiming register command for the ICAN3 just sets the bit timing
+ * registers on the SJA1000 chip directly
+ */
+static int ican3_set_bittiming(struct net_device *ndev)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ struct can_bittiming *bt = &mod->can.bittiming;
+ struct ican3_msg msg;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CBTRREQ;
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = 0x00;
+ msg.data[1] = 0x00;
+ msg.data[2] = btr0;
+ msg.data[3] = btr1;
+
+ return ican3_send_msg(mod, &msg);
+}
+
+static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ if (mode != CAN_MODE_START)
+ return -ENOTSUPP;
+
+ /* bring the bus online */
+ ret = ican3_set_bus_state(mod, true);
+ if (ret) {
+ dev_err(mod->dev, "unable to set bus-on\n");
+ return ret;
+ }
+
+ /* start up the network device */
+ mod->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+static int ican3_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct ican3_dev *mod = netdev_priv(ndev);
+ int ret;
+
+ ret = ican3_send_inquiry(mod, INQUIRY_STATUS);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
+ if (ret <= 0) {
+ dev_info(mod->dev, "%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ bec->rxerr = mod->bec.rxerr;
+ bec->txerr = mod->bec.txerr;
+ return 0;
+}
+
+/*
+ * Sysfs Attributes
+ */
+
+static ssize_t ican3_sysfs_show_term(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+ int ret;
+
+ ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
+ if (ret <= 0) {
+ dev_info(mod->dev, "%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
+}
+
+static ssize_t ican3_sysfs_set_term(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+ unsigned long enable;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &enable))
+ return -EINVAL;
+
+ ret = ican3_set_termination(mod, enable);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+ ican3_sysfs_set_term);
+
+static struct attribute *ican3_sysfs_attrs[] = {
+ &dev_attr_termination.attr,
+ NULL,
+};
+
+static struct attribute_group ican3_sysfs_attr_group = {
+ .attrs = ican3_sysfs_attrs,
+};
+
+/*
+ * PCI Subsystem
+ */
+
+static int __devinit ican3_probe(struct platform_device *pdev)
+{
+ struct janz_platform_data *pdata;
+ struct net_device *ndev;
+ struct ican3_dev *mod;
+ struct resource *res;
+ struct device *dev;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -ENXIO;
+
+ dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno);
+
+ /* save the struct device for printing */
+ dev = &pdev->dev;
+
+ /* allocate the CAN device and private data */
+ ndev = alloc_candev(sizeof(*mod), 0);
+ if (!ndev) {
+ dev_err(dev, "unable to allocate CANdev\n");
+ ret = -ENOMEM;
+ goto out_return;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ mod = netdev_priv(ndev);
+ mod->ndev = ndev;
+ mod->dev = &pdev->dev;
+ mod->num = pdata->modno;
+ netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ spin_lock_init(&mod->lock);
+ init_completion(&mod->termination_comp);
+ init_completion(&mod->buserror_comp);
+
+ /* setup device-specific sysfs attributes */
+ ndev->sysfs_groups[0] = &ican3_sysfs_attr_group;
+
+ /* the first unallocated page in the DPM is 9 */
+ mod->free_page = DPM_FREE_START;
+
+ ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ mod->can.clock.freq = ICAN3_CAN_CLOCK;
+ mod->can.bittiming_const = &ican3_bittiming_const;
+ mod->can.do_set_bittiming = ican3_set_bittiming;
+ mod->can.do_set_mode = ican3_set_mode;
+ mod->can.do_get_berr_counter = ican3_get_berr_counter;
+ mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
+ | CAN_CTRLMODE_BERR_REPORTING;
+
+ /* find our IRQ number */
+ mod->irq = platform_get_irq(pdev, 0);
+ if (mod->irq < 0) {
+ dev_err(dev, "IRQ line not found\n");
+ ret = -ENODEV;
+ goto out_free_ndev;
+ }
+
+ ndev->irq = mod->irq;
+
+ /* get access to the MODULbus registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "MODULbus registers not found\n");
+ ret = -ENODEV;
+ goto out_free_ndev;
+ }
+
+ mod->dpm = ioremap(res->start, resource_size(res));
+ if (!mod->dpm) {
+ dev_err(dev, "MODULbus registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_free_ndev;
+ }
+
+ mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE;
+
+ /* get access to the control registers for this module */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "CONTROL registers not found\n");
+ ret = -ENODEV;
+ goto out_iounmap_dpm;
+ }
+
+ mod->ctrl = ioremap(res->start, resource_size(res));
+ if (!mod->ctrl) {
+ dev_err(dev, "CONTROL registers not ioremap\n");
+ ret = -ENOMEM;
+ goto out_iounmap_dpm;
+ }
+
+ /* disable our IRQ, then hookup the IRQ handler */
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod);
+ if (ret) {
+ dev_err(dev, "unable to request IRQ\n");
+ goto out_iounmap_ctrl;
+ }
+
+ /* reset and initialize the CAN controller into fast mode */
+ napi_enable(&mod->napi);
+ ret = ican3_startup_module(mod);
+ if (ret) {
+ dev_err(dev, "%s: unable to start CANdev\n", __func__);
+ goto out_free_irq;
+ }
+
+ /* register with the Linux CAN layer */
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "%s: unable to register CANdev\n", __func__);
+ goto out_free_irq;
+ }
+
+ dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
+ return 0;
+
+out_free_irq:
+ napi_disable(&mod->napi);
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ free_irq(mod->irq, mod);
+out_iounmap_ctrl:
+ iounmap(mod->ctrl);
+out_iounmap_dpm:
+ iounmap(mod->dpm);
+out_free_ndev:
+ free_candev(ndev);
+out_return:
+ return ret;
+}
+
+static int __devexit ican3_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ican3_dev *mod = netdev_priv(ndev);
+
+ /* unregister the netdevice, stop interrupts */
+ unregister_netdev(ndev);
+ napi_disable(&mod->napi);
+ iowrite8(1 << mod->num, &mod->ctrl->int_disable);
+ free_irq(mod->irq, mod);
+
+ /* put the module into reset */
+ ican3_shutdown_module(mod);
+
+ /* unmap all registers */
+ iounmap(mod->ctrl);
+ iounmap(mod->dpm);
+
+ free_candev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver ican3_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ican3_probe,
+ .remove = __devexit_p(ican3_remove),
+};
+
+static int __init ican3_init(void)
+{
+ return platform_driver_register(&ican3_driver);
+}
+
+static void __exit ican3_exit(void)
+{
+ platform_driver_unregister(&ican3_driver);
+}
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
+MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:janz-ican3");
+
+module_init(ican3_init);
+module_exit(ican3_exit);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index b39b108318b4..b11a0cb5ed81 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -58,7 +58,6 @@
*
*/
-#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/platform/mcp251x.h>
@@ -476,7 +475,6 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
netif_stop_queue(net);
priv->tx_skb = skb;
- net->trans_start = jiffies;
queue_work(priv->wq, &priv->tx_work);
return NETDEV_TX_OK;
@@ -923,12 +921,16 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
struct net_device *net;
struct mcp251x_priv *priv;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int model = spi_get_device_id(spi)->driver_data;
int ret = -ENODEV;
if (!pdata)
/* Platform data is required for osc freq */
goto error_out;
+ if (model)
+ pdata->model = model;
+
/* Allocate can/net device */
net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
if (!net) {
@@ -1118,6 +1120,15 @@ static int mcp251x_can_resume(struct spi_device *spi)
#define mcp251x_can_resume NULL
#endif
+static struct spi_device_id mcp251x_id_table[] = {
+ { "mcp251x", 0 /* Use pdata.model */ },
+ { "mcp2510", CAN_MCP251X_MCP2510 },
+ { "mcp2515", CAN_MCP251X_MCP2515 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
@@ -1125,6 +1136,7 @@ static struct spi_driver mcp251x_can_driver = {
.owner = THIS_MODULE,
},
+ .id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = __devexit_p(mcp251x_can_remove),
.suspend = mcp251x_can_suspend,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 03e7c48465a2..8af8442c694a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -25,7 +25,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
@@ -393,15 +392,17 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
};
static struct of_platform_driver mpc5xxx_can_driver = {
- .owner = THIS_MODULE,
- .name = "mpc5xxx_can",
+ .driver = {
+ .name = "mpc5xxx_can",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc5xxx_can_table,
+ },
.probe = mpc5xxx_can_probe,
.remove = __devexit_p(mpc5xxx_can_remove),
#ifdef CONFIG_PM
.suspend = mpc5xxx_can_suspend,
.resume = mpc5xxx_can_resume,
#endif
- .match_table = mpc5xxx_can_table,
};
static int __init mpc5xxx_can_init(void)
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 6b7dd578d417..64c378cd0c34 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -28,7 +28,6 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 9e277d64a318..ae3505afd682 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -53,7 +53,9 @@ config CAN_PLX_PCI
Driver supports now:
- Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
- Adlink PCI-7841/cPCI-7841 SE card
+ - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
+ - esd CAN-PCI/PMC/266
+ - esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
-
endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5f53da0bc40c..36f4f9780c30 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -24,7 +24,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 441e776a7f59..ed004cebd31f 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -36,7 +36,6 @@
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 4aff4070db96..437b5c716a24 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -27,7 +27,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/io.h>
@@ -41,7 +40,10 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"Adlink PCI-7841/cPCI-7841 SE, "
"Marathon CAN-bus-PCI, "
- "TEWS TECHNOLOGIES TPMC810");
+ "TEWS TECHNOLOGIES TPMC810, "
+ "esd CAN-PCI/CPCI/PCI104/200, "
+ "esd CAN-PCI/PMC/266, "
+ "esd CAN-PCIe/2000")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -50,11 +52,14 @@ struct plx_pci_card {
int channels; /* detected channels count */
struct net_device *net_dev[PLX_PCI_MAX_CHAN];
void __iomem *conf_addr;
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
};
#define PLX_PCI_CAN_CLOCK (16000000 / 2)
-/* PLX90xx registers */
+/* PLX9030/9050/9052 registers */
#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
* Serial EEPROM, and Initialization
@@ -66,6 +71,14 @@ struct plx_pci_card {
#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+/* PLX9056 registers */
+#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */
+#define PLX9056_CNTRL 0x6c /* Control / Software Reset */
+
+#define PLX9056_LINTI (1 << 11)
+#define PLX9056_PCI_INT_EN (1 << 8)
+#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */
+
/*
* The board configuration is probably following:
* RX1 is connected to ground.
@@ -101,6 +114,13 @@ struct plx_pci_card {
#define ADLINK_PCI_VENDOR_ID 0x144A
#define ADLINK_PCI_DEVICE_ID 0x7841
+#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004
+#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009
+#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e
+#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b
+#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
+#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
+
#define MARATHON_PCI_DEVICE_ID 0x2715
#define TEWS_PCI_VENDOR_ID 0x1498
@@ -108,6 +128,7 @@ struct plx_pci_card {
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon(struct pci_dev *pdev);
+static void plx9056_pci_reset_common(struct pci_dev *pdev);
struct plx_pci_channel_map {
u32 bar;
@@ -148,6 +169,30 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
/* based on PLX9052 */
};
+static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
+ "esd CAN-PCI/CPCI/PCI104/200", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030/9050 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
+ "esd CAN-PCI/PMC/266", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PLX9056 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
+ "esd CAN-PCIe/2000", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+ &plx9056_pci_reset_common
+ /* based on PEX8311 */
+};
+
static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -180,6 +225,48 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
(kernel_ulong_t)&plx_pci_card_info_adlink_se
},
{
+ /* esd CAN-PCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-CPCI/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI104/200 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd200
+ },
+ {
+ /* esd CAN-PCI/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PMC/266 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd266
+ },
+ {
+ /* esd CAN-PCIE/2000 */
+ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+ PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_esd2000
+ },
+ {
/* Marathon CAN-bus-PCI card */
PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID,
@@ -242,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
}
/*
- * PLX90xx software reset
+ * PLX9030/50/52 software reset
* Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
* For most cards it's enough for reset the SJA1000 chips.
*/
@@ -259,6 +346,38 @@ static void plx_pci_reset_common(struct pci_dev *pdev)
iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
};
+/*
+ * PLX9056 software reset
+ * Assert LRESET# and reset device(s) on the Local Bus (if wired).
+ */
+static void plx9056_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ /* issue a local bus reset */
+ cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /* reload local configuration from EEPROM */
+ cntrl |= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+ /*
+ * There is no safe way to poll for the end
+ * of reconfiguration process. Waiting for 10ms
+ * is safe.
+ */
+ mdelay(10);
+
+ cntrl ^= PLX9056_PCI_RCR;
+ iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+};
+
/* Special reset function for Marathon card */
static void plx_pci_reset_marathon(struct pci_dev *pdev)
{
@@ -302,13 +421,16 @@ static void plx_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- plx_pci_reset_common(pdev);
+ card->reset_func(pdev);
/*
- * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
- * Local_2 interrupts
+ * Disable interrupts from PCI-card and disable local
+ * interrupts
*/
- iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+ else
+ iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
if (card->conf_addr)
pci_iounmap(pdev, card->conf_addr);
@@ -367,6 +489,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
card->conf_addr = addr + ci->conf_map.offset;
ci->reset_func(pdev);
+ card->reset_func = ci->reset_func;
/* Detect available channels */
for (i = 0; i < ci->channel_count; i++) {
@@ -438,10 +561,17 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
* Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
* Local_2 interrupts from the SJA1000 chips
*/
- val = ioread32(card->conf_addr + PLX_INTCSR);
- val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
- iowrite32(val, card->conf_addr + PLX_INTCSR);
-
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
+ val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
+ else
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+ } else {
+ iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
+ card->conf_addr + PLX9056_INTCSR);
+ }
return 0;
failure_cleanup:
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 145b1a731a53..0a8de01d52f7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,7 +60,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -84,6 +83,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
.brp_inc = 1,
};
+static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+{
+ unsigned long flags;
+
+ /*
+ * The command register needs some locking and time to settle
+ * the write_reg() operation - especially on SMP systems.
+ */
+ spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ priv->write_reg(priv, REG_CMR, val);
+ priv->read_reg(priv, REG_SR);
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+}
+
static int sja1000_probe_chip(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
@@ -293,11 +306,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < dlc; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- dev->trans_start = jiffies;
-
can_put_echo_skb(skb, dev, 0);
- priv->write_reg(priv, REG_CMR, CMD_TR);
+ sja1000_write_cmdreg(priv, CMD_TR);
return NETDEV_TX_OK;
}
@@ -346,7 +357,7 @@ static void sja1000_rx(struct net_device *dev)
cf->can_id = id;
/* release receive buffer */
- priv->write_reg(priv, REG_CMR, CMD_RRB);
+ sja1000_write_cmdreg(priv, CMD_RRB);
netif_rx(skb);
@@ -374,7 +385,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
+ sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
}
if (isrc & IRQ_EI) {
@@ -588,6 +599,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_BERR_REPORTING;
+ spin_lock_init(&priv->cmdreg_lock);
+
if (sizeof_priv)
priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 97a622b9302f..de8e778f6832 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -167,6 +167,7 @@ struct sja1000_priv {
void __iomem *reg_base; /* ioremap'ed address to registers */
unsigned long irq_flags; /* for request_irq() */
+ spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
u16 flags; /* custom mode flags */
u8 ocr; /* output control register */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index a6a51f155962..496223e9e2fc 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 9dd076a626a5..ac1a83d7c204 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -38,7 +38,6 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
@@ -72,7 +71,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct resource res;
dev_set_drvdata(&ofdev->dev, NULL);
@@ -91,7 +90,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
struct sja1000_priv *priv;
struct resource res;
@@ -216,11 +215,13 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
static struct of_platform_driver sja1000_ofp_driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = sja1000_ofp_table,
+ },
.probe = sja1000_ofp_probe,
.remove = __devexit_p(sja1000_ofp_remove),
- .match_table = sja1000_ofp_table,
};
static int __init sja1000_ofp_init(void)
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 628374c2a05f..d9fadc489b32 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
@@ -37,16 +36,36 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_LICENSE("GPL v2");
-static u8 sp_read_reg(const struct sja1000_priv *priv, int reg)
+static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
{
return ioread8(priv->reg_base + reg);
}
-static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val)
+static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val)
{
iowrite8(val, priv->reg_base + reg);
}
+static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 2);
+}
+
+static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 2);
+}
+
+static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg * 4);
+}
+
+static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ iowrite8(val, priv->reg_base + reg * 4);
+}
+
static int sp_probe(struct platform_device *pdev)
{
int err;
@@ -90,14 +109,29 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
priv->reg_base = addr;
- priv->read_reg = sp_read_reg;
- priv->write_reg = sp_write_reg;
- priv->can.clock.freq = pdata->clock;
+ /* The CAN clock frequency is half the oscillator clock frequency */
+ priv->can.clock.freq = pdata->osc_freq / 2;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
+ switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case IORESOURCE_MEM_8BIT:
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ break;
+ }
+
dev_set_drvdata(&pdev->dev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 0c3d2ba0d178..4d07f1ee7168 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -47,7 +47,6 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/platform/ti_hecc.h>
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index d800b598ae3d..1fc0871d2ef7 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -300,8 +300,6 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
else if (err)
dev_err(netdev->dev.parent,
"failed resubmitting intr urb: %d\n", err);
-
- return;
}
static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -497,8 +495,6 @@ resubmit_urb:
else if (retval)
dev_err(netdev->dev.parent,
"failed resubmitting read bulk urb: %d\n", retval);
-
- return;
}
/*
@@ -516,8 +512,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
netdev = dev->netdev;
/* free up our allocated buffer */
- usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
atomic_dec(&dev->active_tx_urbs);
@@ -614,8 +610,8 @@ static int ems_usb_start(struct ems_usb *dev)
return -ENOMEM;
}
- buf = usb_buffer_alloc(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
- &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
if (!buf) {
dev_err(netdev->dev.parent,
"No memory left for USB buffer\n");
@@ -635,8 +631,8 @@ static int ems_usb_start(struct ems_usb *dev)
netif_device_detach(dev->netdev);
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, RX_BUFFER_SIZE, buf,
- urb->transfer_dma);
+ usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
break;
}
@@ -777,7 +773,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
goto nomem;
}
- buf = usb_buffer_alloc(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
+ buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
if (!buf) {
dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
usb_free_urb(urb);
@@ -820,7 +816,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
*/
if (!context) {
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
dev_warn(netdev->dev.parent, "couldn't find free context\n");
@@ -845,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
can_free_echo_skb(netdev, context->echo_index);
usb_unanchor_urb(urb);
- usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
dev_kfree_skb(skb);
atomic_dec(&dev->active_tx_urbs);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 9bd155e4111c..04a03f7003a0 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2889,7 +2889,6 @@ static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
return NETDEV_TX_BUSY;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -2957,20 +2956,20 @@ static void cas_process_mc_list(struct cas *cp)
{
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i = 1;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, cp->dev) {
+ netdev_for_each_mc_addr(ha, cp->dev) {
if (i <= CAS_MC_EXACT_MATCH_SIZE) {
/* use the alternate mac address registers for the
* first 15 multicast addresses
*/
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ writel((ha->addr[4] << 8) | ha->addr[5],
cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ writel((ha->addr[2] << 8) | ha->addr[3],
cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ writel((ha->addr[0] << 8) | ha->addr[1],
cp->regs + REG_MAC_ADDRN(i*3 + 2));
i++;
}
@@ -2978,7 +2977,7 @@ static void cas_process_mc_list(struct cas *cp)
/* use hw hash table for the next series of
* multicast addresses
*/
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
@@ -4825,7 +4824,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
default:
break;
- };
+ }
mutex_unlock(&cp->pm_mutex);
return rc;
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 9e631b9d3948..7dbb16d36fff 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -377,12 +377,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int bit;
u16 mc_filter[4] = { 0, };
- netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) {
- bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */
+ netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
+ /* bit[23:28] */
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index df3a1410696e..f01cfdb995de 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -162,14 +162,14 @@ struct respQ_e {
*/
struct cmdQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
struct freelQ_ce {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
- DECLARE_PCI_UNMAP_LEN(dma_len);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
};
/*
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
again:
for (i = 0; i < MAX_NPORTS; i++) {
- s->port = ++s->port & (MAX_NPORTS - 1);
+ s->port = (s->port + 1) & (MAX_NPORTS - 1);
skbq = &s->p[s->port].skbq;
skb = skb_peek(skbq);
@@ -518,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
@@ -633,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
q->in_use -= n;
ce = &q->centries[cidx];
while (n--) {
- if (likely(pci_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ if (likely(dma_unmap_len(ce, dma_len))) {
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_TODEVICE);
if (q->sop)
q->sop = 0;
@@ -851,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, dma_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, dma_len);
e->addr_lo = (u32)mapping;
e->addr_hi = (u64)mapping >> 32;
e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
@@ -1059,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
pci_dma_sync_single_for_device(pdev,
- pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len),
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
PCI_DMA_FROMDEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
@@ -1077,8 +1077,8 @@ use_orig_buf:
return NULL;
}
- pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
skb = ce->skb;
prefetch(skb->data);
@@ -1100,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
- pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
@@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
unsigned int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int i, len = skb->len - skb->data_len;
+ unsigned int i, len = skb_headlen(skb);
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
@@ -1182,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
*gen, nfrags == 0 && *desc_len == 0);
ce1->skb = NULL;
- pci_unmap_len_set(ce1, dma_len, 0);
+ dma_unmap_len_set(ce1, dma_len, 0);
*desc_mapping += SGE_TX_DESC_MAX_PLEN;
if (*desc_len) {
ce1++;
@@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
ce = &q->centries[pidx];
mapping = pci_map_single(adapter->pdev, skb->data,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
desc_mapping = mapping;
- desc_len = skb->len - skb->data_len;
+ desc_len = skb_headlen(skb);
flags = F_CMD_DATAVALID | F_CMD_SOP |
V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
@@ -1233,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e->addr_hi = (u64)desc_mapping >> 32;
e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
ce->skb = NULL;
- pci_unmap_len_set(ce, dma_len, 0);
+ dma_unmap_len_set(ce, dma_len, 0);
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
desc_len > SGE_TX_DESC_MAX_PLEN) {
@@ -1257,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1284,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
write_tx_desc(e1, desc_mapping, desc_len, gen,
nfrags == 0);
ce->skb = NULL;
- pci_unmap_addr_set(ce, dma_addr, mapping);
- pci_unmap_len_set(ce, dma_len, frag->size);
+ dma_unmap_addr_set(ce, dma_addr, mapping);
+ dma_unmap_len_set(ce, dma_len, frag->size);
}
ce->skb = skb;
wmb();
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4b451a7c03e9..fe925663d39a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1143,12 +1143,12 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
spin_lock_bh(&cp->cnic_ulp_lock);
if (num_wqes > cnic_kwq_avail(cp) &&
- !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+ !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
spin_unlock_bh(&cp->cnic_ulp_lock);
return -EAGAIN;
}
- cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+ clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
prod = cp->kwq_prod_idx;
sw_prod = prod & MAX_KWQ_IDX;
@@ -2092,7 +2092,6 @@ end:
i += j;
j = 1;
}
- return;
}
static u16 cnic_bnx2_next_idx(u16 idx)
@@ -2146,17 +2145,56 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
return last_cnt;
}
+static int cnic_l2_completion(struct cnic_local *cp)
+{
+ u16 hw_cons, sw_cons;
+ union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
+ (cp->l2_ring + (2 * BCM_PAGE_SIZE));
+ u32 cmd;
+ int comp = 0;
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
+ return 0;
+
+ hw_cons = *cp->rx_cons_ptr;
+ if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
+ hw_cons++;
+
+ sw_cons = cp->rx_cons;
+ while (sw_cons != hw_cons) {
+ u8 cqe_fp_flags;
+
+ cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
+ cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
+ cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
+ cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
+ cmd == RAMROD_CMD_ID_ETH_HALT)
+ comp++;
+ }
+ sw_cons = BNX2X_NEXT_RCQE(sw_cons);
+ }
+ return comp;
+}
+
static void cnic_chk_pkt_rings(struct cnic_local *cp)
{
u16 rx_cons = *cp->rx_cons_ptr;
u16 tx_cons = *cp->tx_cons_ptr;
+ int comp = 0;
if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ comp = cnic_l2_completion(cp);
+
cp->tx_cons = tx_cons;
cp->rx_cons = rx_cons;
uio_event_notify(cp->cnic_uinfo);
}
+ if (comp)
+ clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
}
static int cnic_service_bnx2(void *data, void *status_blk)
@@ -2325,7 +2363,6 @@ done:
status_idx, IGU_INT_ENABLE, 1);
cp->kcq_prod_idx = sw_prod;
- return;
}
static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -3330,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
static void cnic_init_context(struct cnic_dev *dev, u32 cid)
{
- struct cnic_local *cp = dev->cnic_priv;
u32 cid_addr;
int i;
- if (CHIP_NUM(cp) == CHIP_NUM_5709)
- return;
-
cid_addr = GET_CID_ADDR(cid);
for (i = 0; i < CTX_SIZE; i += 4)
@@ -3493,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
sb_id = cp->status_blk_num;
tx_cid = 20;
- cnic_init_context(dev, tx_cid);
- cnic_init_context(dev, tx_cid + 1);
cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
struct status_block_msix *sblk = cp->status_blk.bnx2;
tx_cid = TX_TSS_CID + sb_id - 1;
- cnic_init_context(dev, tx_cid);
CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
(TX_TSS_CID << 7));
cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
@@ -3519,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
} else {
+ cnic_init_context(dev, tx_cid);
+ cnic_init_context(dev, tx_cid + 1);
+
offset0 = BNX2_L2CTX_TYPE;
offset1 = BNX2_L2CTX_CMD_TYPE;
offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
@@ -3692,7 +3725,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->max_kwq_idx = MAX_KWQ_IDX;
cp->kwq_prod_idx = 0;
cp->kwq_con_idx = 0;
- cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+ set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
@@ -4170,6 +4203,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
cnic_init_bnx2x_tx_ring(dev);
cnic_init_bnx2x_rx_ring(dev);
@@ -4177,6 +4212,15 @@ static void cnic_init_rings(struct cnic_dev *dev)
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_SETUP did not complete\n");
+ cnic_kwq_completion(dev, 1);
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
}
}
@@ -4189,14 +4233,25 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
union l5cm_specific_data l5_data;
+ int i;
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
+ set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
l5_data.phy_address.lo = cli;
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
- msleep(10);
+ i = 0;
+ while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+ ++i < 10)
+ msleep(1);
+
+ if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+ netdev_err(dev->netdev,
+ "iSCSI CLIENT_HALT did not complete\n");
+ cnic_kwq_completion(dev, 1);
memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
@@ -4317,7 +4372,15 @@ static void cnic_stop_hw(struct cnic_dev *dev)
{
if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
+ int i = 0;
+ /* Need to wait for the ring shutdown event to complete
+ * before clearing the CNIC_UP flag.
+ */
+ while (cp->uio_dev != -1 && i < 15) {
+ msleep(100);
+ i++;
+ }
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -4628,7 +4691,6 @@ static void __exit cnic_exit(void)
{
unregister_netdevice_notifier(&cnic_netdev_notifier);
cnic_release();
- return;
}
module_init(cnic_init);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index a0d853dff983..08b1235d987d 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -179,9 +179,9 @@ struct cnic_local {
#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
- /* protected by ulp_lock */
- u32 cnic_local_flags;
-#define CNIC_LCL_FL_KWQ_INIT 0x00000001
+ unsigned long cnic_local_flags;
+#define CNIC_LCL_FL_KWQ_INIT 0x0
+#define CNIC_LCL_FL_L2_WAIT 0x1
struct cnic_dev *dev;
@@ -349,6 +349,10 @@ struct bnx2x_bd_chain_next {
#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
+#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
+ (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
+ ((x) + 2) : ((x) + 1)
+
#define BNX2X_DEF_SB_ID 16
#define BNX2X_ISCSI_RX_SB_INDEX_NUM \
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 110c62072e6f..0c55177db046 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.1.1"
-#define CNIC_MODULE_RELDATE "Feb 22, 2010"
+#define CNIC_MODULE_VERSION "2.1.2"
+#define CNIC_MODULE_RELDATE "May 26, 2010"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 60777fd90b33..3c58db595285 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
static void cpmac_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *iter;
+ struct netdev_hw_addr *ha;
u8 tmp;
u32 mbp, bit, hash[2] = { 0, };
struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- netdev_for_each_mc_addr(iter, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
bit = 0;
- tmp = iter->dmi_addr[0];
+ tmp = ha->addr[0];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[1];
+ tmp = ha->addr[1];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[2];
+ tmp = ha->addr[2];
bit ^= (tmp >> 6) ^ tmp;
- tmp = iter->dmi_addr[3];
+ tmp = ha->addr[3];
bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = iter->dmi_addr[4];
+ tmp = ha->addr[4];
bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = iter->dmi_addr[5];
+ tmp = ha->addr[5];
bit ^= (tmp >> 6) ^ tmp;
bit &= 0x3f;
hash[bit / 32] |= 1 << (bit % 32);
@@ -579,7 +579,6 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock(&priv->lock);
- dev->trans_start = jiffies;
spin_unlock(&priv->lock);
desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
desc->skb = skb;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 61a33914e96f..7e00027b9f8e 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1108,7 +1108,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
myNextTxDesc->skb = skb;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
e100_hardware_send_packet(np, buf, skb->len);
@@ -1595,16 +1595,16 @@ set_multicast_list(struct net_device *dev)
} else {
/* MC mode, receive normal and MC packets */
char hash_ix;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *baddr;
lo_bits = 0x00000000ul;
hi_bits = 0x00000000ul;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate the hash index for the GA registers */
hash_ix = 0;
- baddr = dmi->dmi_addr;
+ baddr = ha->addr;
hash_ix ^= (*baddr) & 0x3f;
hash_ix ^= ((*baddr) >> 6) & 0x03;
++baddr;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 4c38491b8efb..2ccb9f12805b 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -902,7 +902,6 @@ get_dma_channel(struct net_device *dev)
return;
}
}
- return;
}
static void
@@ -1554,7 +1553,6 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
spin_unlock_irqrestore(&lp->lock, flags);
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/*
@@ -1673,7 +1671,6 @@ count_rx_errors(int status, struct net_local *lp)
/* per str 172 */
lp->stats.rx_crc_errors++;
if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
- return;
}
/* We have a good packet(s), get it/them out of the buffers. */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 2f3ee721c3e1..f452c4003253 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -207,7 +207,6 @@ again:
*/
neigh_event_send(e->neigh, NULL);
}
- return;
}
EXPORT_SYMBOL(t3_l2t_send_event);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 07d7e7fab3f5..5962b911b5bd 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
struct sk_buff *skb;
struct fl_pg_chunk pg_chunk;
};
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct rsp_desc { /* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
*/
struct dummy {
- DECLARE_PCI_UNMAP_ADDR(addr);
+ DEFINE_DMA_UNMAP_ADDR(addr);
};
return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL;
} else {
- pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
+ pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
kfree_skb(d->skb);
d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
if (unlikely(pci_dma_mapping_error(pdev, mapping)))
return -ENOMEM;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
break;
}
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
- pci_unmap_addr_set(sd, dma_addr, mapping);
+ dma_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
if (likely(skb != NULL)) {
__skb_put(skb, len);
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
memcpy(skb->data, sd->skb->data, len);
pci_dma_sync_single_for_device(adap->pdev,
- pci_unmap_addr(sd, dma_addr), len,
+ dma_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
} else if (!drop_thres)
goto use_orig_buf;
@@ -810,7 +810,7 @@ recycle:
goto recycle;
use_orig_buf:
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
skb = sd->skb;
skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
struct sk_buff *newskb, *skb;
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
- dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
newskb = skb = q->pg_skb;
if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->credits--;
pci_dma_sync_single_for_cpu(adap->pdev,
- pci_unmap_addr(sd, dma_addr),
+ dma_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD,
PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c142a2132e9f..3af19a550372 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++,
- dmi->dmi_addr);
+ ha->addr);
else {
- int hash = hash_hw_addr(dmi->dmi_addr);
+ int hash = hash_hw_addr(ha->addr);
if (hash < 32)
hash_lo |= (1 << hash);
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d8ff4889b56..dd1770e075e6 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -53,7 +53,7 @@
enum {
MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 16, /* Serial # length */
+ SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
};
@@ -477,7 +477,6 @@ struct adapter {
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned long registered_device_map;
- unsigned long open_device_map;
unsigned long flags;
const char *name;
@@ -651,14 +650,11 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
int t4_check_fw_version(struct adapter *adapter);
int t4_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
-void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
int filter_index, int enable);
void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
@@ -709,7 +705,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index a7e30a23d322..58045b00cf40 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -240,9 +240,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u16 filt_idx[7];
const u8 *addr[7];
int ret, naddr = 0;
- const struct dev_addr_list *d;
const struct netdev_hw_addr *ha;
int uc_cnt = netdev_uc_count(dev);
+ int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
@@ -260,9 +260,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- netdev_for_each_mc_addr(d, dev) {
- addr[naddr++] = d->dmi_addr;
- if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
+ netdev_for_each_mc_addr(ha, dev) {
+ addr[naddr++] = ha->addr;
+ if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
naddr, addr, filt_idx, &mhash, sleep);
if (ret < 0)
@@ -290,7 +290,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
if (ret == 0)
ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
- (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
return ret;
}
@@ -311,11 +311,11 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
- true);
+ pi->vlan_grp != NULL, true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, 0, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
- false);
+ true);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
@@ -859,6 +859,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
+ "GROpackets ",
+ "GROmerged ",
};
static int get_sset_count(struct net_device *dev, int sset)
@@ -922,6 +924,8 @@ struct queue_port_stats {
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
+ u64 gro_pkts;
+ u64 gro_merged;
};
static void collect_sge_port_stats(const struct adapter *adap,
@@ -938,6 +942,8 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->rx_csum += rx->stats.rx_cso;
s->vlan_ex += rx->stats.vlan_ex;
s->vlan_ins += tx->vlan_ins;
+ s->gro_pkts += rx->stats.lro_pkts;
+ s->gro_merged += rx->stats.lro_merged;
}
}
@@ -1711,6 +1717,18 @@ static int set_tso(struct net_device *dev, u32 value)
return 0;
}
+static int set_flags(struct net_device *dev, u32 flags)
+{
+ if (flags & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (flags & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
@@ -1741,6 +1759,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.set_tso = set_tso,
+ .set_flags = set_flags,
.flash_device = set_flash,
};
@@ -2308,6 +2327,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
+
+ if (adap->flags & FULL_INIT_DONE)
+ ulds[uld].state_change(handle, CXGB4_STATE_UP);
}
static void attach_ulds(struct adapter *adap)
@@ -2414,23 +2436,17 @@ EXPORT_SYMBOL(cxgb4_unregister_uld);
*/
static int cxgb_up(struct adapter *adap)
{
- int err = 0;
+ int err;
- if (!(adap->flags & FULL_INIT_DONE)) {
- err = setup_sge_queues(adap);
- if (err)
- goto out;
- err = setup_rss(adap);
- if (err) {
- t4_free_sge_resources(adap);
- goto out;
- }
- if (adap->flags & USING_MSIX)
- name_msix_vecs(adap);
- adap->flags |= FULL_INIT_DONE;
- }
+ err = setup_sge_queues(adap);
+ if (err)
+ goto out;
+ err = setup_rss(adap);
+ if (err)
+ goto freeq;
if (adap->flags & USING_MSIX) {
+ name_msix_vecs(adap);
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
adap->msix_info[0].desc, adap);
if (err)
@@ -2451,11 +2467,14 @@ static int cxgb_up(struct adapter *adap)
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
notify_ulds(adap, CXGB4_STATE_UP);
out:
return err;
irq_err:
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+ freeq:
+ t4_free_sge_resources(adap);
goto out;
}
@@ -2471,6 +2490,9 @@ static void cxgb_down(struct adapter *adapter)
} else
free_irq(adapter->pdev->irq, adapter);
quiesce_rx(adapter);
+ t4_sge_stop(adapter);
+ t4_free_sge_resources(adapter);
+ adapter->flags &= ~FULL_INIT_DONE;
}
/*
@@ -2482,11 +2504,13 @@ static int cxgb_open(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
- return err;
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgb_up(adapter);
+ if (err < 0)
+ return err;
+ }
dev->real_num_tx_queues = pi->nqsets;
- set_bit(pi->tx_chan, &adapter->open_device_map);
link_start(dev);
netif_tx_start_all_queues(dev);
return 0;
@@ -2494,19 +2518,12 @@ static int cxgb_open(struct net_device *dev)
static int cxgb_close(struct net_device *dev)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
-
- clear_bit(pi->tx_chan, &adapter->open_device_map);
-
- if (!adapter->open_device_map)
- cxgb_down(adapter);
- return 0;
+ return t4_enable_vi(adapter, 0, pi->viid, false, false);
}
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
@@ -2601,7 +2618,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
+ ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
true);
if (!ret)
dev->mtu = new_mtu;
@@ -2632,7 +2649,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
struct port_info *pi = netdev_priv(dev);
pi->vlan_grp = grp;
- t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
+ t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
+ true);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3066,6 +3084,12 @@ static void __devinit print_port_info(struct adapter *adap)
int i;
char buf[80];
+ const char *spd = "";
+
+ if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+ spd = " 2.5 GT/s";
+ else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
+ spd = " 5 GT/s";
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
@@ -3085,10 +3109,10 @@ static void __devinit print_port_info(struct adapter *adap)
--bufp;
sprintf(bufp, "BASE-%s", base[pi->port_type]);
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id, adap->params.rev,
buf, is_offload(adap) ? "R" : "",
- adap->params.pci.width,
+ adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name)
@@ -3203,7 +3227,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_GRO | highdma;
+ netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_features = netdev->features & VLAN_FEAT;
@@ -3334,8 +3358,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
if (adapter->debugfs_root)
debugfs_remove_recursive(adapter->debugfs_root);
- t4_sge_stop(adapter);
- t4_free_sge_resources(adapter);
+ if (adapter->flags & FULL_INIT_DONE)
+ cxgb_down(adapter);
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
disable_msi(adapter);
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 14adc58e71c3..d1f8f225e45a 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1471,7 +1471,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
* Releases the pages of a packet gather list. We do not own the last
* page on the list and do not free it.
*/
-void t4_pktgl_free(const struct pkt_gl *gl)
+static void t4_pktgl_free(const struct pkt_gl *gl)
{
int n;
const skb_frag_t *p;
@@ -1524,6 +1524,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
+ if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
struct port_info *pi = netdev_priv(rxq->rspq.netdev);
@@ -1565,7 +1567,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
return handle_trace_pkt(q->adap, si);
- pkt = (void *)&rsp[1];
+ pkt = (const struct cpl_rx_pkt *)rsp;
csum_ok = pkt->csum_calc && !pkt->err_vec;
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -1583,6 +1585,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
__skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
skb->protocol = eth_type_trans(skb, q->netdev);
skb_record_rx_queue(skb, q->idx);
+ if (skb->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
+
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
@@ -2047,7 +2052,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
adap->sge.ingr_map[iq->cntxt_id] = iq;
if (fl) {
- fl->cntxt_id = htons(c.fl0id);
+ fl->cntxt_id = ntohs(c.fl0id);
fl->avail = fl->pend_cred = 0;
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index a814a3afe123..da272a98fdbc 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -53,8 +53,8 @@
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
-int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
- int polarity, int attempts, int delay, u32 *valp)
+static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
{
while (1) {
u32 val = t4_read_reg(adapter, reg);
@@ -109,9 +109,9 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
-void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, u32 *vals, unsigned int nregs,
- unsigned int start_idx)
+static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx);
@@ -120,6 +120,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
}
}
+#if 0
/**
* t4_write_indirect - write indirectly addressed registers
* @adap: the adapter
@@ -132,15 +133,16 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
* Writes a sequential block of registers that are accessed indirectly
* through an address/data register pair.
*/
-void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
- unsigned int data_reg, const u32 *vals,
- unsigned int nregs, unsigned int start_idx)
+static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx++);
t4_write_reg(adap, data_reg, *vals++);
}
}
+#endif
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
@@ -345,33 +347,21 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
return 0;
}
-#define VPD_ENTRY(name, len) \
- u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
-
/*
* Partial EEPROM Vital Product Data structure. Includes only the ID and
- * VPD-R sections.
+ * VPD-R header.
*/
-struct t4_vpd {
+struct t4_vpd_hdr {
u8 id_tag;
u8 id_len[2];
u8 id_data[ID_LEN];
u8 vpdr_tag;
u8 vpdr_len[2];
- VPD_ENTRY(pn, 16); /* part number */
- VPD_ENTRY(ec, EC_LEN); /* EC level */
- VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
- VPD_ENTRY(na, 12); /* MAC address base */
- VPD_ENTRY(port_type, 8); /* port types */
- VPD_ENTRY(gpio, 14); /* GPIO usage */
- VPD_ENTRY(cclk, 6); /* core clock */
- VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
- VPD_ENTRY(rv, 1); /* csum */
- u32 pad; /* for multiple-of-4 sizing and alignment */
};
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
+#define VPD_LEN 512
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -396,16 +386,36 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int ret;
- struct t4_vpd vpd;
- u8 *q = (u8 *)&vpd, csum;
+ int i, ret;
+ int ec, sn, v2;
+ u8 vpd[VPD_LEN], csum;
+ unsigned int vpdr_len;
+ const struct t4_vpd_hdr *v;
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
if (ret < 0)
return ret;
- for (csum = 0; q <= vpd.rv_data; q++)
- csum += *q;
+ v = (const struct t4_vpd_hdr *)vpd;
+ vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
+ if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+ dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
+ return -EINVAL;
+ }
+
+#define FIND_VPD_KW(var, name) do { \
+ var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
+ vpdr_len, name); \
+ if (var < 0) { \
+ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
+ return -EINVAL; \
+ } \
+ var += PCI_VPD_INFO_FLD_HDR_SIZE; \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
if (csum) {
dev_err(adapter->pdev_dev,
@@ -413,12 +423,18 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
return -EINVAL;
}
- p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
- memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(v2, "V2");
+#undef FIND_VPD_KW
+
+ p->cclk = simple_strtoul(vpd + v2, NULL, 10);
+ memcpy(p->id, v->id_data, ID_LEN);
strim(p->id);
- memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
+ memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
- memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
+ i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
return 0;
}
@@ -537,8 +553,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianess.
*/
-int t4_read_flash(struct adapter *adapter, unsigned int addr,
- unsigned int nwords, u32 *data, int byte_oriented)
+static int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
{
int ret;
@@ -870,22 +886,6 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
-/**
- * t4_set_vlan_accel - configure HW VLAN extraction
- * @adap: the adapter
- * @ports: bitmap of adapter ports to operate on
- * @on: enable (1) or disable (0) HW VLAN extraction
- *
- * Enables or disables HW extraction of VLAN tags for the ports specified
- * by @ports. @ports is a bitmap with the ith bit designating the port
- * associated with the ith adapter channel.
- */
-void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
-{
- ports <<= VLANEXTENABLE_SHIFT;
- t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
-}
-
struct intr_info {
unsigned int mask; /* bits to check in interrupt status */
const char *msg; /* message to print or NULL */
@@ -2608,12 +2608,14 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
* @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Sets Rx properties of a virtual interface.
*/
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
{
struct fw_vi_rxmode_cmd c;
@@ -2626,15 +2628,18 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
if (bcast < 0)
bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+ if (vlanex < 0)
+ vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
+ c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index fdb117443144..7a981b81afaf 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -503,6 +503,7 @@ struct cpl_rx_data_ack {
};
struct cpl_rx_pkt {
+ struct rss_header rsshdr;
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 3393d05a388a..63991d68950e 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -876,7 +876,7 @@ struct fw_vi_mac_cmd {
struct fw_vi_rxmode_cmd {
__be32 op_to_viid;
__be32 retval_len16;
- __be32 mtu_to_broadcasten;
+ __be32 mtu_to_vlanexen;
__be32 r4_lo;
};
@@ -888,6 +888,8 @@ struct fw_vi_rxmode_cmd {
#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
+#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
struct fw_vi_enable_cmd {
__be32 op_to_viid;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2b8edd2efbf6..08e82b1a0b33 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -952,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev)
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
+
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
/* program multicast address list into EMAC hardware */
- netdev_for_each_mc_addr(mc_ptr, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
emac_add_mcast(priv, EMAC_MULTICAST_ADD,
- (u8 *) mc_ptr->dmi_addr);
+ (u8 *) ha->addr);
}
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -1467,7 +1468,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_buf.length = skb->len;
tx_buf.buf_token = (void *)skb;
tx_buf.data_ptr = skb->data;
- ndev->trans_start = jiffies;
ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
if (unlikely(ret_code != 0)) {
if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 6b13f4fd2e96..23a65398d011 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -166,8 +166,8 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
- tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 5)
+ tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/20)
return NETDEV_TX_BUSY;
/* else */
printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index a0a6830b5e6d..f3650fd096f4 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -535,7 +535,6 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
de620_write_block(dev, buffer, skb->len, len-skb->len);
- dev->trans_start = jiffies;
if(!(using_txbuf == (TXBF0 | TXBF1)))
netif_wake_queue(dev);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 8cf3cc6f20e2..1d973db27c32 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -874,7 +874,7 @@ static inline int lance_reset(struct net_device *dev)
lance_init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -930,7 +930,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -940,7 +939,7 @@ static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -959,8 +958,8 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -970,7 +969,6 @@ static void lance_load_multicast(struct net_device *dev)
crc = crc >> 26;
*lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
}
- return;
}
static void lance_set_multicast(struct net_device *dev)
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ed53a8d45f4e..e5667c55844e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
{
DFX_board_t *bp = netdev_priv(dev);
int i; /* used as index in for loop */
- struct dev_mc_list *dmi; /* ptr to multicast addr entry */
+ struct netdev_hw_addr *ha;
/* Enable LLC frame promiscuous mode, if necessary */
@@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
/* Copy addresses to multicast address table, then update adapter CAM */
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
- dmi->dmi_addr, FDDI_K_ALEN);
+ ha->addr, FDDI_K_ALEN);
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 744c1928dfca..bf66e9b3b19e 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -921,7 +921,7 @@ static void depca_tx_timeout(struct net_device *dev)
STOP_DEPCA;
depca_init_ring(dev);
LoadCSRs(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
InitRestartDepca(dev);
}
@@ -954,7 +954,6 @@ static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
outw(CSR0, DEPCA_ADDR);
outw(INEA | TDMD, DEPCA_DATA);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
}
if (TX_BUFFS_AVAIL)
@@ -1204,8 +1203,6 @@ static void LoadCSRs(struct net_device *dev)
outw(ACON, DEPCA_DATA);
outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
-
- return;
}
static int InitRestartDepca(struct net_device *dev)
@@ -1272,7 +1269,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i, j, bit, byte;
u16 hashcode;
@@ -1287,8 +1284,8 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc(ETH_ALEN, addrs);
hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
@@ -1303,8 +1300,6 @@ static void SetMulticastFilter(struct net_device *dev)
}
}
}
-
- return;
}
static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
@@ -1909,8 +1904,6 @@ static void depca_dbg_open(struct net_device *dev)
outw(CSR3, DEPCA_ADDR);
printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
}
-
- return;
}
/*
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index b05bad829827..a2f238d20caa 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -596,8 +596,6 @@ alloc_list (struct net_device *dev)
/* Set RFDListPtr */
writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
writel (0, dev->base_addr + RFDListPtr1);
-
- return;
}
static netdev_tx_t
@@ -1132,14 +1130,14 @@ set_multicast (struct net_device *dev)
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
- int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 7f9960f718e3..abcc838e18af 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -476,17 +476,13 @@ static uint32_t dm9000_get_rx_csum(struct net_device *dev)
return dm->rx_csum;
}
-static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
- unsigned long flags;
if (dm->can_csum) {
dm->rx_csum = data;
-
- spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
- spin_unlock_irqrestore(&dm->lock, flags);
return 0;
}
@@ -494,6 +490,19 @@ static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
return -EOPNOTSUPP;
}
+static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
+{
+ board_info_t *dm = to_dm9000_board(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dm->lock, flags);
+ ret = dm9000_set_rx_csum_unlocked(dev, data);
+ spin_unlock_irqrestore(&dm->lock, flags);
+
+ return ret;
+}
+
static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
{
board_info_t *dm = to_dm9000_board(dev);
@@ -722,20 +731,17 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
* Set DM9000 multicast address
*/
static void
-dm9000_hash_table(struct net_device *dev)
+dm9000_hash_table_unlocked(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
int i, oft;
u32 hash_val;
u16 hash_table[4];
u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
- unsigned long flags;
dm9000_dbg(db, 1, "entering %s\n", __func__);
- spin_lock_irqsave(&db->lock, flags);
-
for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
iow(db, oft, dev->dev_addr[i]);
@@ -753,8 +759,8 @@ dm9000_hash_table(struct net_device *dev)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = ether_crc_le(6, ha->addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -765,11 +771,21 @@ dm9000_hash_table(struct net_device *dev)
}
iow(db, DM9000_RCR, rcr);
+}
+
+static void
+dm9000_hash_table(struct net_device *dev)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&db->lock, flags);
+ dm9000_hash_table_unlocked(dev);
spin_unlock_irqrestore(&db->lock, flags);
}
/*
- * Initilize dm9000 board
+ * Initialize dm9000 board
*/
static void
dm9000_init_dm9000(struct net_device *dev)
@@ -784,7 +800,7 @@ dm9000_init_dm9000(struct net_device *dev)
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
/* Checksum mode */
- dm9000_set_rx_csum(dev, db->rx_csum);
+ dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
/* GPIO0 on pre-activate PHY */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
@@ -811,7 +827,7 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
/* Set address filter table */
- dm9000_hash_table(dev);
+ dm9000_hash_table_unlocked(dev);
imr = IMR_PAR | IMR_PTM | IMR_PRM;
if (db->type != TYPE_DM9000E)
@@ -825,7 +841,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
- dev->trans_start = 0;
+ dev->trans_start = jiffies;
}
/* Our watchdog timed out. Called by the networking layer */
@@ -843,7 +859,7 @@ static void dm9000_timeout(struct net_device *dev)
dm9000_reset(db);
dm9000_init_dm9000(dev);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 234685213f1a..8b0f50bbf3e5 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -594,8 +594,6 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -918,7 +916,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
bp->regs, mem_base, dev->irq, dev->dev_addr);
- dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n",
+ dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
(bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
(bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
(bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 791080303db1..b194bad29ace 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -147,6 +147,8 @@
* - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -175,7 +177,6 @@
#define DRV_VERSION "3.5.24-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
-#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
#define E100_NAPI_WEIGHT 16
@@ -201,10 +202,6 @@ module_param(use_io, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
- __func__ , ## args))
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
@@ -690,12 +687,13 @@ static int e100_self_test(struct nic *nic)
/* Check results of self-test */
if (nic->mem->selftest.result != 0) {
- DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
- nic->mem->selftest.result);
+ netif_err(nic, hw, nic->netdev,
+ "Self-test failed: result=0x%08X\n",
+ nic->mem->selftest.result);
return -ETIMEDOUT;
}
if (nic->mem->selftest.signature == 0) {
- DPRINTK(HW, ERR, "Self-test failed: timed out\n");
+ netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
return -ETIMEDOUT;
}
@@ -798,7 +796,7 @@ static int e100_eeprom_load(struct nic *nic)
/* The checksum, stored in the last word, is calculated such that
* the sum of words should be 0xBABA */
if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
- DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
+ netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
if (!eeprom_bad_csum_allow)
return -EAGAIN;
}
@@ -954,8 +952,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
udelay(20);
}
if (unlikely(!i)) {
- printk("e100.mdio_ctrl(%s) won't go Ready\n",
- nic->netdev->name );
+ netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
spin_unlock_irqrestore(&nic->mdio_lock, flags);
return 0; /* No way to indicate timeout error */
}
@@ -967,9 +964,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
break;
}
spin_unlock_irqrestore(&nic->mdio_lock, flags);
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data, data_out);
return (u16)data_out;
}
@@ -1029,17 +1027,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
return ADVERTISE_10HALF |
ADVERTISE_10FULL;
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
} else {
switch (reg) {
default:
- DPRINTK(HW, DEBUG,
- "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
- dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
+ dir == mdi_read ? "READ" : "WRITE",
+ addr, reg, data);
return 0xFFFF;
}
}
@@ -1156,12 +1156,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
}
}
- DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
- DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
}
/*************************************************************************
@@ -1254,16 +1257,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
err = request_firmware(&fw, fw_name, &nic->pdev->dev);
if (err) {
- DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
- fw_name, err);
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
return ERR_PTR(err);
}
/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
if (fw->size != UCODE_SIZE * 4 + 3) {
- DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
- fw_name, fw->size);
+ netif_err(nic, probe, nic->netdev,
+ "Firmware \"%s\" has wrong size %zu\n",
+ fw_name, fw->size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1275,9 +1280,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
min_size >= UCODE_SIZE) {
- DPRINTK(PROBE, ERR,
- "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
- fw_name, timer, bundle, min_size);
+ netif_err(nic, probe, nic->netdev,
+ "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
+ fw_name, timer, bundle, min_size);
release_firmware(fw);
return ERR_PTR(-EINVAL);
}
@@ -1329,7 +1334,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return PTR_ERR(fw);
if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
- DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
+ netif_err(nic, probe, nic->netdev,
+ "ucode cmd failed with error %d\n", err);
/* must restart cuc */
nic->cuc_cmd = cuc_start;
@@ -1349,7 +1355,7 @@ static inline int e100_load_ucode_wait(struct nic *nic)
/* if the command failed, or is not OK, notify and return */
if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
- DPRINTK(PROBE,ERR, "ucode load failed\n");
+ netif_err(nic, probe, nic->netdev, "ucode load failed\n");
err = -EPERM;
}
@@ -1387,8 +1393,8 @@ static int e100_phy_check_without_mii(struct nic *nic)
* media is sensed automatically based on how the link partner
* is configured. This is, in essence, manual configuration.
*/
- DPRINTK(PROBE, INFO,
- "found MII-less i82503 or 80c24 or other PHY\n");
+ netif_info(nic, probe, nic->netdev,
+ "found MII-less i82503 or 80c24 or other PHY\n");
nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
@@ -1435,18 +1441,20 @@ static int e100_phy_init(struct nic *nic)
return 0; /* simply return and hope for the best */
else {
/* for unknown cases log a fatal error */
- DPRINTK(HW, ERR,
- "Failed to locate any known PHY, aborting.\n");
+ netif_err(nic, hw, nic->netdev,
+ "Failed to locate any known PHY, aborting\n");
return -EAGAIN;
}
} else
- DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy_addr = %d\n", nic->mii.phy_id);
/* Get phy ID */
id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
nic->phy = (u32)id_hi << 16 | (u32)id_lo;
- DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "phy ID = 0x%08X\n", nic->phy);
/* Select the phy and isolate the rest */
for (addr = 0; addr < 32; addr++) {
@@ -1508,7 +1516,7 @@ static int e100_hw_init(struct nic *nic)
e100_hw_reset(nic);
- DPRINTK(HW, ERR, "e100_hw_init\n");
+ netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
if (!in_interrupt() && (err = e100_self_test(nic)))
return err;
@@ -1538,16 +1546,16 @@ static int e100_hw_init(struct nic *nic)
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == count)
break;
- memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
+ memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
}
@@ -1556,8 +1564,9 @@ static void e100_set_multicast_list(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
- DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev_mc_count(netdev), netdev->flags);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
+ "mc_count=%d, flags=0x%04X\n",
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1630,7 +1639,8 @@ static void e100_update_stats(struct nic *nic)
if (e100_exec_cmd(nic, cuc_dump_reset, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_dump_reset failed\n");
}
static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1660,20 +1670,19 @@ static void e100_watchdog(unsigned long data)
struct nic *nic = (struct nic *)data;
struct ethtool_cmd cmd;
- DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
+ netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
+ "right now = %ld\n", jiffies);
/* mii library handles link maintenance tasks */
mii_ethtool_gset(&nic->mii, &cmd);
if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
- nic->netdev->name,
- cmd.speed == SPEED_100 ? "100" : "10",
- cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
+ netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
+ cmd.speed == SPEED_100 ? 100 : 10,
+ cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
- printk(KERN_INFO "e100: %s NIC Link is Down\n",
- nic->netdev->name);
+ netdev_info(nic->netdev, "NIC Link is Down\n");
}
mii_check_link(&nic->mii);
@@ -1733,7 +1742,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
if (e100_exec_cmd(nic, cuc_nop, 0))
- DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "exec cuc_nop failed\n");
udelay(1);
}
@@ -1742,17 +1752,18 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
switch (err) {
case -ENOSPC:
/* We queued the skb, but now we're out of space. */
- DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "No space for CB\n");
netif_stop_queue(netdev);
break;
case -ENOMEM:
/* This is a hard error - log it. */
- DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "Out of Tx resources, returning skb\n");
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -1768,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic)
for (cb = nic->cb_to_clean;
cb->status & cpu_to_le16(cb_complete);
cb = nic->cb_to_clean = cb->next) {
- DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
- (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
- cb->status);
+ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
+ "cb[%d]->status = 0x%04X\n",
+ (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
+ cb->status);
if (likely(cb->skb != NULL)) {
dev->stats.tx_packets++;
@@ -1913,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
- DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
+ netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
+ "status=0x%04X\n", rfd_status);
/* If data isn't ready, nothing to indicate */
if (unlikely(!(rfd_status & cb_complete))) {
@@ -2124,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
struct nic *nic = netdev_priv(netdev);
u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
- DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
+ netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
+ "stat_ack = 0x%02X\n", stat_ack);
if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
stat_ack == stat_ack_not_present) /* Hardware is ejected */
@@ -2264,8 +2278,8 @@ static void e100_tx_timeout_task(struct work_struct *work)
struct nic *nic = container_of(work, struct nic, tx_timeout_task);
struct net_device *netdev = nic->netdev;
- DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
- ioread8(&nic->csr->scb.status));
+ netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
+ "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
rtnl_lock();
if (netif_running(netdev)) {
@@ -2532,8 +2546,8 @@ static int e100_set_ringparam(struct net_device *netdev,
rfds->count = min(rfds->count, rfds->max);
cbs->count = max(ring->tx_pending, cbs->min);
cbs->count = min(cbs->count, cbs->max);
- DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
- rfds->count, cbs->count);
+ netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
+ rfds->count, cbs->count);
if (netif_running(netdev))
e100_up(nic);
@@ -2710,7 +2724,7 @@ static int e100_open(struct net_device *netdev)
netif_carrier_off(netdev);
if ((err = e100_up(nic)))
- DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
+ netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
return err;
}
@@ -2744,7 +2758,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
if (((1 << debug) - 1) & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
return -ENOMEM;
}
@@ -2762,35 +2776,34 @@ static int __devinit e100_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, netdev);
if ((err = pci_enable_device(pdev))) {
- DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
goto err_out_free_dev;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
- "base address, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
if ((err = pci_request_regions(pdev, DRV_NAME))) {
- DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
goto err_out_free_res;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
if (use_io)
- DPRINTK(PROBE, INFO, "using i/o access mode\n");
+ netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
if (!nic->csr) {
- DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -2824,7 +2837,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
if ((err = e100_alloc(nic))) {
- DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
goto err_out_iounmap;
}
@@ -2837,13 +2850,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
if (!is_valid_ether_addr(netdev->perm_addr)) {
if (!eeprom_bad_csum_allow) {
- DPRINTK(PROBE, ERR, "Invalid MAC address from "
- "EEPROM, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
err = -EAGAIN;
goto err_out_free;
} else {
- DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
- "you MUST configure one.\n");
+ netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
}
}
@@ -2859,7 +2870,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
strcpy(netdev->name, "eth%d");
if ((err = register_netdev(netdev))) {
- DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
+ netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
goto err_out_free;
}
nic->cbs_pool = pci_pool_create(netdev->name,
@@ -2867,9 +2878,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
- DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
- (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
- pdev->irq, netdev->dev_addr);
+ netif_info(nic, probe, nic->netdev,
+ "addr 0x%llx, irq %d, MAC addr %pM\n",
+ (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
+ pdev->irq, netdev->dev_addr);
return 0;
@@ -3027,7 +3039,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
struct nic *nic = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -3086,8 +3098,8 @@ static struct pci_driver e100_driver = {
static int __init e100_init_module(void)
{
if (((1 << debug) - 1) & NETIF_MSG_DRV) {
- printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
+ pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s\n", DRV_COPYRIGHT);
}
return pci_register_driver(&e100_driver);
}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2f29c2131851..40b62b406b08 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -81,23 +81,6 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#ifdef DBG
-#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
-#else
-#define E1000_DBG(args...)
-#endif
-
-#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
-
-#define PFX "e1000: "
-
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-do { \
- if (NETIF_MSG_##nlevel & adapter->msg_enable) \
- printk(KERN_##klevel PFX "%s: %s: " fmt, \
- adapter->netdev->name, __func__, ##args); \
-} while (0)
-
#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
@@ -335,6 +318,25 @@ enum e1000_state_t {
__E1000_DOWN
};
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+#define e_dbg(format, arg...) \
+ netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
+#define e_err(format, arg...) \
+ netdev_err(adapter->netdev, format, ## arg)
+#define e_info(format, arg...) \
+ netdev_info(adapter->netdev, format, ## arg)
+#define e_warn(format, arg...) \
+ netdev_warn(adapter->netdev, format, ## arg)
+#define e_notice(format, arg...) \
+ netdev_notice(adapter->netdev, format, ## arg)
+#define e_dev_info(format, arg...) \
+ dev_info(&adapter->pdev->dev, format, ## arg)
+#define e_dev_warn(format, arg...) \
+ dev_warn(&adapter->pdev->dev, format, ## arg)
+
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
@@ -352,5 +354,6 @@ extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
+extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c67e93117271..d5ff029aa7b2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
- DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+ e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = true;
return 0;
}
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & test[i], address);
read = readl(address);
if (read != (write & test[i] & mask)) {
- DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, read, (write & test[i] & mask));
+ e_info("pattern test reg %04X failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, read, (write & test[i] & mask));
*data = reg;
return true;
}
@@ -734,9 +734,9 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
writel(write & mask, address);
read = readl(address);
if ((read & mask) != (write & mask)) {
- DPRINTK(DRV, ERR, "set/check reg %04X test failed: "
- "got 0x%08X expected 0x%08X\n",
- reg, (read & mask), (write & mask));
+ e_err("set/check reg %04X test failed: "
+ "got 0x%08X expected 0x%08X\n",
+ reg, (read & mask), (write & mask));
*data = reg;
return true;
}
@@ -779,8 +779,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
ew32(STATUS, toggle);
after = er32(STATUS) & toggle;
if (value != after) {
- DPRINTK(DRV, ERR, "failed STATUS register test got: "
- "0x%08X expected: 0x%08X\n", after, value);
+ e_err("failed STATUS register test got: "
+ "0x%08X expected: 0x%08X\n", after, value);
*data = 1;
return 1;
}
@@ -894,8 +894,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
- DPRINTK(HW, INFO, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
@@ -980,9 +979,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (txdr->desc && txdr->buffer_info) {
for (i = 0; i < txdr->count; i++) {
if (txdr->buffer_info[i].dma)
- pci_unmap_single(pdev, txdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (txdr->buffer_info[i].skb)
dev_kfree_skb(txdr->buffer_info[i].skb);
}
@@ -991,20 +991,23 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rxdr->desc && rxdr->buffer_info) {
for (i = 0; i < rxdr->count; i++) {
if (rxdr->buffer_info[i].dma)
- pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
+ dma_unmap_single(&pdev->dev,
+ rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rxdr->buffer_info[i].skb)
dev_kfree_skb(rxdr->buffer_info[i].skb);
}
}
if (txdr->desc) {
- pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
txdr->desc = NULL;
}
if (rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
rxdr->desc = NULL;
}
@@ -1012,8 +1015,6 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info = NULL;
kfree(rxdr->buffer_info);
rxdr->buffer_info = NULL;
-
- return;
}
static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
@@ -1039,7 +1040,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1070,8 +1072,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].skb = skb;
txdr->buffer_info[i].length = skb->len;
txdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1093,7 +1095,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 5;
goto err_nomem;
@@ -1126,8 +1129,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].skb = skb;
rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
rxdr->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
@@ -1444,10 +1447,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
- txdr->buffer_info[k].dma,
- txdr->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&pdev->dev,
+ txdr->buffer_info[k].dma,
+ txdr->buffer_info[k].length,
+ DMA_TO_DEVICE);
if (unlikely(++k == txdr->count)) k = 0;
}
ew32(TDT, k);
@@ -1455,10 +1458,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
- rxdr->buffer_info[l].dma,
- rxdr->buffer_info[l].length,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev,
+ rxdr->buffer_info[l].dma,
+ rxdr->buffer_info[l].length,
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
@@ -1558,7 +1561,7 @@ static void e1000_diag_test(struct net_device *netdev,
u8 forced_speed_duplex = hw->forced_speed_duplex;
u8 autoneg = hw->autoneg;
- DPRINTK(HW, INFO, "offline testing starting\n");
+ e_info("offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
@@ -1598,7 +1601,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- DPRINTK(HW, INFO, "online testing starting\n");
+ e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1691,7 +1694,7 @@ static void e1000_get_wol(struct net_device *netdev,
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
- DPRINTK(DRV, ERR, "Interface does not support "
+ e_err("Interface does not support "
"directed (unicast) frame wake-up packets\n");
break;
default:
@@ -1706,8 +1709,6 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
}
static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1725,8 +1726,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
if (wol->wolopts & WAKE_UCAST) {
- DPRINTK(DRV, ERR, "Interface does not support "
- "directed (unicast) frame wake-up packets\n");
+ e_err("Interface does not support "
+ "directed (unicast) frame wake-up packets\n");
return -EOPNOTSUPP;
}
break;
@@ -1803,7 +1804,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
if (adapter->hw.mac_type < e1000_82545)
return -EOPNOTSUPP;
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1821,12 +1822,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
return -EOPNOTSUPP;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8d7d87f12827..c7e242b69a18 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -30,7 +30,7 @@
* Shared functions for accessing and configuring the MAC
*/
-#include "e1000_hw.h"
+#include "e1000.h"
static s32 e1000_check_downshift(struct e1000_hw *hw);
static s32 e1000_check_polarity(struct e1000_hw *hw,
@@ -114,7 +114,7 @@ static DEFINE_SPINLOCK(e1000_eeprom_lock);
*/
static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_phy_type");
+ e_dbg("e1000_set_phy_type");
if (hw->mac_type == e1000_undefined)
return -E1000_ERR_PHY_TYPE;
@@ -152,7 +152,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
u32 ret_val;
u16 phy_saved_data;
- DEBUGFUNC("e1000_phy_init_script");
+ e_dbg("e1000_phy_init_script");
if (hw->phy_init_script) {
msleep(20);
@@ -245,7 +245,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
*/
s32 e1000_set_mac_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_mac_type");
+ e_dbg("e1000_set_mac_type");
switch (hw->device_id) {
case E1000_DEV_ID_82542:
@@ -354,7 +354,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
{
u32 status;
- DEBUGFUNC("e1000_set_media_type");
+ e_dbg("e1000_set_media_type");
if (hw->mac_type != e1000_82543) {
/* tbi_compatibility is only valid on 82543 */
@@ -401,16 +401,16 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_reset_hw");
+ e_dbg("e1000_reset_hw");
/* For 82542 (rev 2.0), disable MWI before issuing a device reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -442,7 +442,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ e_dbg("Issuing a global reset to MAC\n");
switch (hw->mac_type) {
case e1000_82544:
@@ -516,7 +516,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -549,12 +549,12 @@ s32 e1000_init_hw(struct e1000_hw *hw)
u32 mta_size;
u32 ctrl_ext;
- DEBUGFUNC("e1000_init_hw");
+ e_dbg("e1000_init_hw");
/* Initialize Identification LED */
ret_val = e1000_id_led_init(hw);
if (ret_val) {
- DEBUGOUT("Error Initializing Identification LED\n");
+ e_dbg("Error Initializing Identification LED\n");
return ret_val;
}
@@ -562,14 +562,14 @@ s32 e1000_init_hw(struct e1000_hw *hw)
e1000_set_media_type(hw);
/* Disabling VLAN filtering. */
- DEBUGOUT("Initializing the IEEE VLAN\n");
+ e_dbg("Initializing the IEEE VLAN\n");
if (hw->mac_type < e1000_82545_rev_3)
ew32(VET, 0);
e1000_clear_vfta(hw);
/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e_dbg("Disabling MWI on 82542 rev 2.0\n");
e1000_pci_clear_mwi(hw);
ew32(RCTL, E1000_RCTL_RST);
E1000_WRITE_FLUSH();
@@ -591,7 +591,7 @@ s32 e1000_init_hw(struct e1000_hw *hw)
}
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ e_dbg("Zeroing the MTA\n");
mta_size = E1000_MC_TBL_SIZE;
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -662,7 +662,7 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
u16 eeprom_data;
s32 ret_val;
- DEBUGFUNC("e1000_adjust_serdes_amplitude");
+ e_dbg("e1000_adjust_serdes_amplitude");
if (hw->media_type != e1000_media_type_internal_serdes)
return E1000_SUCCESS;
@@ -709,7 +709,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_setup_link");
+ e_dbg("e1000_setup_link");
/* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
@@ -723,7 +723,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
@@ -747,7 +747,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
hw->original_fc = hw->fc;
- DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
/* Take the 4 bits from EEPROM word 0x0F that determine the initial
* polarity value for the SW controlled pins, and setup the
@@ -760,7 +760,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1, &eeprom_data);
if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
@@ -777,8 +777,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
* control is disabled, because it does not hurt anything to
* initialize these registers.
*/
- DEBUGOUT
- ("Initializing the Flow Control address, type and timer regs\n");
+ e_dbg("Initializing the Flow Control address, type and timer regs\n");
ew32(FCT, FLOW_CONTROL_TYPE);
ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
@@ -827,7 +826,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
u32 signal = 0;
s32 ret_val;
- DEBUGFUNC("e1000_setup_fiber_serdes_link");
+ e_dbg("e1000_setup_fiber_serdes_link");
/* On adapters with a MAC newer than 82544, SWDP 1 will be
* set when the optics detect a signal. On older adapters, it will be
@@ -893,7 +892,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
break;
}
@@ -904,7 +903,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
* link-up status bit will be set and the flow control enable bits (RFCE
* and TFCE) will be set according to their negotiated value.
*/
- DEBUGOUT("Auto-negotiation enabled\n");
+ e_dbg("Auto-negotiation enabled\n");
ew32(TXCW, txcw);
ew32(CTRL, ctrl);
@@ -921,7 +920,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
- DEBUGOUT("Looking for Link\n");
+ e_dbg("Looking for Link\n");
for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
msleep(10);
status = er32(STATUS);
@@ -929,7 +928,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
if (i == (LINK_UP_TIMEOUT / 10)) {
- DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
* e1000_check_for_link. This routine will force the link up if
@@ -938,16 +937,16 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
- DEBUGOUT("Error while checking for link\n");
+ e_dbg("Error while checking for link\n");
return ret_val;
}
hw->autoneg_failed = 0;
} else {
hw->autoneg_failed = 0;
- DEBUGOUT("Valid Link Found\n");
+ e_dbg("Valid Link Found\n");
}
} else {
- DEBUGOUT("No Signal Detected\n");
+ e_dbg("No Signal Detected\n");
}
return E1000_SUCCESS;
}
@@ -964,7 +963,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_preconfig");
+ e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to what
@@ -987,10 +986,10 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
/* Make sure we have a valid PHY */
ret_val = e1000_detect_gig_phy(hw);
if (ret_val) {
- DEBUGOUT("Error, did not detect valid phy.\n");
+ e_dbg("Error, did not detect valid phy.\n");
return ret_val;
}
- DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+ e_dbg("Phy ID = %x\n", hw->phy_id);
/* Set PHY to class A mode (if necessary) */
ret_val = e1000_set_phy_mode(hw);
@@ -1025,14 +1024,14 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_igp_setup");
+ e_dbg("e1000_copper_link_igp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1049,7 +1048,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* disable lplu d3 during driver init */
ret_val = e1000_set_d3_lplu_state(hw, false);
if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D3\n");
+ e_dbg("Error Disabling LPLU D3\n");
return ret_val;
}
}
@@ -1166,7 +1165,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_mgp_setup");
+ e_dbg("e1000_copper_link_mgp_setup");
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1255,7 +1254,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
/* SW Reset the PHY so all changes take effect */
ret_val = e1000_phy_reset(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
+ e_dbg("Error Resetting the PHY\n");
return ret_val;
}
@@ -1274,7 +1273,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_copper_link_autoneg");
+ e_dbg("e1000_copper_link_autoneg");
/* Perform some bounds checking on the hw->autoneg_advertised
* parameter. If this variable is zero, then set it to the default.
@@ -1287,13 +1286,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) {
- DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ e_dbg("Error Setting up Auto-Negotiation\n");
return ret_val;
}
- DEBUGOUT("Restarting Auto-Neg\n");
+ e_dbg("Restarting Auto-Neg\n");
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
@@ -1313,7 +1312,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->wait_autoneg_complete) {
ret_val = e1000_wait_autoneg(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error while waiting for autoneg to complete\n");
return ret_val;
}
@@ -1340,20 +1339,20 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_copper_link_postconfig");
+ e_dbg("e1000_copper_link_postconfig");
if (hw->mac_type >= e1000_82544) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT("Error configuring MAC to PHY settings\n");
+ e_dbg("Error configuring MAC to PHY settings\n");
return ret_val;
}
}
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error Configuring Flow Control\n");
+ e_dbg("Error Configuring Flow Control\n");
return ret_val;
}
@@ -1361,7 +1360,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_config_dsp_after_link_change(hw, true);
if (ret_val) {
- DEBUGOUT("Error Configuring DSP after link up\n");
+ e_dbg("Error Configuring DSP after link up\n");
return ret_val;
}
}
@@ -1381,7 +1380,7 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_setup_copper_link");
+ e_dbg("e1000_setup_copper_link");
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
@@ -1407,10 +1406,10 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
- DEBUGOUT("Forcing speed and duplex\n");
+ e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
- DEBUGOUT("Error Forcing Speed and Duplex\n");
+ e_dbg("Error Forcing Speed and Duplex\n");
return ret_val;
}
}
@@ -1432,13 +1431,13 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT("Valid link established!!!\n");
+ e_dbg("Valid link established!!!\n");
return E1000_SUCCESS;
}
udelay(10);
}
- DEBUGOUT("Unable to establish link!!!\n");
+ e_dbg("Unable to establish link!!!\n");
return E1000_SUCCESS;
}
@@ -1454,7 +1453,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
- DEBUGFUNC("e1000_phy_setup_autoneg");
+ e_dbg("e1000_phy_setup_autoneg");
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
@@ -1481,41 +1480,41 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
- DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+ e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
- DEBUGOUT("Advertise 10mb Half duplex\n");
+ e_dbg("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
- DEBUGOUT("Advertise 10mb Full duplex\n");
+ e_dbg("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
- DEBUGOUT("Advertise 100mb Half duplex\n");
+ e_dbg("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
- DEBUGOUT("Advertise 100mb Full duplex\n");
+ e_dbg("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
- DEBUGOUT
+ e_dbg
("Advertise 1000mb Half duplex requested, request denied!\n");
}
/* Do we want to advertise 1000 Mb Full Duplex? */
if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
- DEBUGOUT("Advertise 1000mb Full duplex\n");
+ e_dbg("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
@@ -1568,7 +1567,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1576,7 +1575,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+ e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
if (ret_val)
@@ -1600,12 +1599,12 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
u16 phy_data;
u16 i;
- DEBUGFUNC("e1000_phy_force_speed_duplex");
+ e_dbg("e1000_phy_force_speed_duplex");
/* Turn off Flow control if we are forcing speed and duplex. */
hw->fc = E1000_FC_NONE;
- DEBUGOUT1("hw->fc = %d\n", hw->fc);
+ e_dbg("hw->fc = %d\n", hw->fc);
/* Read the Device Control Register. */
ctrl = er32(CTRL);
@@ -1634,14 +1633,14 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
/* We want to force half duplex so we CLEAR the full duplex bits in
* the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
- DEBUGOUT("Half Duplex\n");
+ e_dbg("Half Duplex\n");
}
/* Are we forcing 100Mbps??? */
@@ -1651,13 +1650,13 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
ctrl |= E1000_CTRL_SPD_100;
mii_ctrl_reg |= MII_CR_SPEED_100;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
- DEBUGOUT("Forcing 100mb ");
+ e_dbg("Forcing 100mb ");
} else {
/* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
mii_ctrl_reg |= MII_CR_SPEED_10;
mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
- DEBUGOUT("Forcing 10mb ");
+ e_dbg("Forcing 10mb ");
}
e1000_config_collision_dist(hw);
@@ -1680,7 +1679,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+ e_dbg("M88E1000 PSCR: %x\n", phy_data);
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
@@ -1720,7 +1719,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
*/
if (hw->wait_autoneg_complete) {
/* We will wait for autoneg to complete. */
- DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -1746,7 +1745,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* We didn't get link. Reset the DSP and wait again for link. */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
- DEBUGOUT("Error Resetting PHY DSP\n");
+ e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
@@ -1826,7 +1825,7 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
{
u32 tctl, coll_dist;
- DEBUGFUNC("e1000_config_collision_dist");
+ e_dbg("e1000_config_collision_dist");
if (hw->mac_type < e1000_82543)
coll_dist = E1000_COLLISION_DISTANCE_82542;
@@ -1857,7 +1856,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_config_mac_to_phy");
+ e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
@@ -1913,7 +1912,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
u32 ctrl;
- DEBUGFUNC("e1000_force_mac_fc");
+ e_dbg("e1000_force_mac_fc");
/* Get the current configuration of the Device Control Register */
ctrl = er32(CTRL);
@@ -1952,7 +1951,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
@@ -1984,7 +1983,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
u16 speed;
u16 duplex;
- DEBUGFUNC("e1000_config_fc_after_link_up");
+ e_dbg("e1000_config_fc_after_link_up");
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
@@ -1997,7 +1996,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
&& (!hw->autoneg))) {
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
+ e_dbg("Error forcing flow control settings\n");
return ret_val;
}
}
@@ -2079,10 +2078,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
- DEBUGOUT("Flow Control = FULL.\n");
+ e_dbg("Flow Control = FULL.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
}
@@ -2100,7 +2099,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_TX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
@@ -2117,7 +2116,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
{
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
@@ -2144,10 +2143,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
hw->original_fc == E1000_FC_TX_PAUSE) ||
hw->fc_strict_ieee) {
hw->fc = E1000_FC_NONE;
- DEBUGOUT("Flow Control = NONE.\n");
+ e_dbg("Flow Control = NONE.\n");
} else {
hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT
+ e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
@@ -2158,7 +2157,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2171,12 +2170,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
ret_val = e1000_force_mac_fc(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error forcing flow control settings\n");
return ret_val;
}
} else {
- DEBUGOUT
+ e_dbg
("Copper PHY and Auto Neg has not completed.\n");
}
}
@@ -2197,7 +2196,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
u32 status;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_check_for_serdes_link_generic");
+ e_dbg("e1000_check_for_serdes_link_generic");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2216,7 +2215,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->autoneg_failed = 1;
goto out;
}
- DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2229,7 +2228,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
/* Configure Flow Control after forcing link up. */
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -2239,7 +2238,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, hw->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2256,11 +2255,11 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - forced.\n");
+ e_dbg("SERDES: Link up - forced.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - force failed.\n");
+ e_dbg("SERDES: Link down - force failed.\n");
}
}
@@ -2273,20 +2272,20 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
hw->serdes_has_link = true;
- DEBUGOUT("SERDES: Link up - autoneg "
+ e_dbg("SERDES: Link up - autoneg "
"completed successfully.\n");
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - invalid"
+ e_dbg("SERDES: Link down - invalid"
"codewords detected in autoneg.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - no sync.\n");
+ e_dbg("SERDES: Link down - no sync.\n");
}
} else {
hw->serdes_has_link = false;
- DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ e_dbg("SERDES: Link down - autoneg failed\n");
}
}
@@ -2312,7 +2311,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_for_link");
+ e_dbg("e1000_check_for_link");
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2407,7 +2406,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
else {
ret_val = e1000_config_mac_to_phy(hw);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error configuring MAC to PHY settings\n");
return ret_val;
}
@@ -2419,7 +2418,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
+ e_dbg("Error configuring flow control\n");
return ret_val;
}
@@ -2435,7 +2434,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
ret_val =
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT
+ e_dbg
("Error getting link speed and duplex\n");
return ret_val;
}
@@ -2487,30 +2486,30 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_get_speed_and_duplex");
+ e_dbg("e1000_get_speed_and_duplex");
if (hw->mac_type >= e1000_82543) {
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000) {
*speed = SPEED_1000;
- DEBUGOUT("1000 Mbs, ");
+ e_dbg("1000 Mbs, ");
} else if (status & E1000_STATUS_SPEED_100) {
*speed = SPEED_100;
- DEBUGOUT("100 Mbs, ");
+ e_dbg("100 Mbs, ");
} else {
*speed = SPEED_10;
- DEBUGOUT("10 Mbs, ");
+ e_dbg("10 Mbs, ");
}
if (status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
+ e_dbg("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
- DEBUGOUT(" Half Duplex\n");
+ e_dbg(" Half Duplex\n");
}
} else {
- DEBUGOUT("1000 Mbs, Full Duplex\n");
+ e_dbg("1000 Mbs, Full Duplex\n");
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
}
@@ -2554,8 +2553,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- DEBUGFUNC("e1000_wait_autoneg");
- DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+ e_dbg("e1000_wait_autoneg");
+ e_dbg("Waiting for Auto-Neg to complete.\n");
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
@@ -2718,7 +2717,7 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_read_phy_reg");
+ e_dbg("e1000_read_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2741,10 +2740,10 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_read_phy_reg_ex");
+ e_dbg("e1000_read_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2767,11 +2766,11 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Read did not complete\n");
+ e_dbg("MDI Read did not complete\n");
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- DEBUGOUT("MDI Error\n");
+ e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
*phy_data = (u16) mdic;
@@ -2820,7 +2819,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
- DEBUGFUNC("e1000_write_phy_reg");
+ e_dbg("e1000_write_phy_reg");
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2843,10 +2842,10 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = 1;
- DEBUGFUNC("e1000_write_phy_reg_ex");
+ e_dbg("e1000_write_phy_reg_ex");
if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
}
@@ -2870,7 +2869,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Write did not complete\n");
+ e_dbg("MDI Write did not complete\n");
return -E1000_ERR_PHY;
}
} else {
@@ -2910,9 +2909,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- DEBUGFUNC("e1000_phy_hw_reset");
+ e_dbg("e1000_phy_hw_reset");
- DEBUGOUT("Resetting Phy...\n");
+ e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
/* Read the device control register and assert the E1000_CTRL_PHY_RST
@@ -2973,7 +2972,7 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_reset");
+ e_dbg("e1000_phy_reset");
switch (hw->phy_type) {
case e1000_phy_igp:
@@ -3013,7 +3012,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
u16 phy_id_high, phy_id_low;
bool match = false;
- DEBUGFUNC("e1000_detect_gig_phy");
+ e_dbg("e1000_detect_gig_phy");
if (hw->phy_id != 0)
return E1000_SUCCESS;
@@ -3057,16 +3056,16 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
match = true;
break;
default:
- DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ e_dbg("Invalid MAC type %d\n", hw->mac_type);
return -E1000_ERR_CONFIG;
}
phy_init_status = e1000_set_phy_type(hw);
if ((match) && (phy_init_status == E1000_SUCCESS)) {
- DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
return E1000_SUCCESS;
}
- DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
return -E1000_ERR_PHY;
}
@@ -3079,7 +3078,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
- DEBUGFUNC("e1000_phy_reset_dsp");
+ e_dbg("e1000_phy_reset_dsp");
do {
ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3111,7 +3110,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
u16 phy_data, min_length, max_length, average;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_igp_get_info");
+ e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3189,7 +3188,7 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
u16 phy_data;
e1000_rev_polarity polarity;
- DEBUGFUNC("e1000_phy_m88_get_info");
+ e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter. */
@@ -3261,7 +3260,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_phy_get_info");
+ e_dbg("e1000_phy_get_info");
phy_info->cable_length = e1000_cable_length_undefined;
phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
@@ -3273,7 +3272,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
phy_info->remote_rx = e1000_1000t_rx_status_undefined;
if (hw->media_type != e1000_media_type_copper) {
- DEBUGOUT("PHY info is only valid for copper media\n");
+ e_dbg("PHY info is only valid for copper media\n");
return -E1000_ERR_CONFIG;
}
@@ -3286,7 +3285,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
return ret_val;
if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
- DEBUGOUT("PHY info is only valid if link is up\n");
+ e_dbg("PHY info is only valid if link is up\n");
return -E1000_ERR_CONFIG;
}
@@ -3298,10 +3297,10 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_validate_mdi_settings");
+ e_dbg("e1000_validate_mdi_settings");
if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
- DEBUGOUT("Invalid MDI setting detected\n");
+ e_dbg("Invalid MDI setting detected\n");
hw->mdix = 1;
return -E1000_ERR_CONFIG;
}
@@ -3322,7 +3321,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 eeprom_size;
- DEBUGFUNC("e1000_init_eeprom_params");
+ e_dbg("e1000_init_eeprom_params");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -3539,7 +3538,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd, i = 0;
- DEBUGFUNC("e1000_acquire_eeprom");
+ e_dbg("e1000_acquire_eeprom");
eecd = er32(EECD);
@@ -3557,7 +3556,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
if (!(eecd & E1000_EECD_GNT)) {
eecd &= ~E1000_EECD_REQ;
ew32(EECD, eecd);
- DEBUGOUT("Could not acquire EEPROM grant\n");
+ e_dbg("Could not acquire EEPROM grant\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3639,7 +3638,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
{
u32 eecd;
- DEBUGFUNC("e1000_release_eeprom");
+ e_dbg("e1000_release_eeprom");
eecd = er32(EECD);
@@ -3687,7 +3686,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
u16 retry_count = 0;
u8 spi_stat_reg;
- DEBUGFUNC("e1000_spi_eeprom_ready");
+ e_dbg("e1000_spi_eeprom_ready");
/* Read "Status Register" repeatedly until the LSB is cleared. The
* EEPROM will signal that the command has been completed by clearing
@@ -3712,7 +3711,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
* only 0-5mSec on 5V devices)
*/
if (retry_count >= EEPROM_MAX_RETRY_SPI) {
- DEBUGOUT("SPI EEPROM Status error\n");
+ e_dbg("SPI EEPROM Status error\n");
return -E1000_ERR_EEPROM;
}
@@ -3741,7 +3740,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
- DEBUGFUNC("e1000_read_eeprom");
+ e_dbg("e1000_read_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3752,9 +3751,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT2
- ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
- offset, eeprom->word_size);
+ e_dbg("\"words\" parameter out of bounds. Words = %d,"
+ "size = %d\n", offset, eeprom->word_size);
return -E1000_ERR_EEPROM;
}
@@ -3832,11 +3830,11 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_validate_eeprom_checksum");
+ e_dbg("e1000_validate_eeprom_checksum");
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
@@ -3845,7 +3843,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS;
else {
- DEBUGOUT("EEPROM Checksum Invalid\n");
+ e_dbg("EEPROM Checksum Invalid\n");
return -E1000_ERR_EEPROM;
}
}
@@ -3862,18 +3860,18 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- DEBUGFUNC("e1000_update_eeprom_checksum");
+ e_dbg("e1000_update_eeprom_checksum");
for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
checksum += eeprom_data;
}
checksum = (u16) EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
- DEBUGOUT("EEPROM Write Error\n");
+ e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
}
return E1000_SUCCESS;
@@ -3904,7 +3902,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
- DEBUGFUNC("e1000_write_eeprom");
+ e_dbg("e1000_write_eeprom");
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
@@ -3915,7 +3913,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
- DEBUGOUT("\"words\" parameter out of bounds\n");
+ e_dbg("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
}
@@ -3949,7 +3947,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u16 widx = 0;
- DEBUGFUNC("e1000_write_eeprom_spi");
+ e_dbg("e1000_write_eeprom_spi");
while (widx < words) {
u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4013,7 +4011,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words_written = 0;
u16 i = 0;
- DEBUGFUNC("e1000_write_eeprom_microwire");
+ e_dbg("e1000_write_eeprom_microwire");
/* Send the write enable command to the EEPROM (3-bit opcode plus
* 6/8-bit dummy address beginning with 11). It's less work to include
@@ -4056,7 +4054,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
udelay(50);
}
if (i == 200) {
- DEBUGOUT("EEPROM Write did not complete\n");
+ e_dbg("EEPROM Write did not complete\n");
return -E1000_ERR_EEPROM;
}
@@ -4092,12 +4090,12 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
u16 offset;
u16 eeprom_data, i;
- DEBUGFUNC("e1000_read_mac_addr");
+ e_dbg("e1000_read_mac_addr");
for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
offset = i >> 1;
if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
@@ -4132,17 +4130,17 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
u32 i;
u32 rar_num;
- DEBUGFUNC("e1000_init_rx_addrs");
+ e_dbg("e1000_init_rx_addrs");
/* Setup the receive address. */
- DEBUGOUT("Programming MAC Address into RAR[0]\n");
+ e_dbg("Programming MAC Address into RAR[0]\n");
e1000_rar_set(hw, hw->mac_addr, 0);
rar_num = E1000_RAR_ENTRIES;
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ e_dbg("Clearing RAR[1-15]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH();
@@ -4290,7 +4288,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
u16 eeprom_data, i, temp;
const u16 led_mask = 0x0F;
- DEBUGFUNC("e1000_id_led_init");
+ e_dbg("e1000_id_led_init");
if (hw->mac_type < e1000_82540) {
/* Nothing to do */
@@ -4303,7 +4301,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
hw->ledctl_mode2 = hw->ledctl_default;
if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
+ e_dbg("EEPROM Read Error\n");
return -E1000_ERR_EEPROM;
}
@@ -4363,7 +4361,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
u32 ledctl;
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_setup_led");
+ e_dbg("e1000_setup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4415,7 +4413,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
- DEBUGFUNC("e1000_cleanup_led");
+ e_dbg("e1000_cleanup_led");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4451,7 +4449,7 @@ s32 e1000_led_on(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_on");
+ e_dbg("e1000_led_on");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4497,7 +4495,7 @@ s32 e1000_led_off(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- DEBUGFUNC("e1000_led_off");
+ e_dbg("e1000_led_off");
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -4626,7 +4624,7 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
*/
void e1000_reset_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_reset_adaptive");
+ e_dbg("e1000_reset_adaptive");
if (hw->adaptive_ifs) {
if (!hw->ifs_params_forced) {
@@ -4639,7 +4637,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
hw->in_ifs_mode = false;
ew32(AIT, 0);
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4654,7 +4652,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
*/
void e1000_update_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_update_adaptive");
+ e_dbg("e1000_update_adaptive");
if (hw->adaptive_ifs) {
if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
@@ -4679,7 +4677,7 @@ void e1000_update_adaptive(struct e1000_hw *hw)
}
}
} else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
+ e_dbg("Not in Adaptive IFS mode!\n");
}
}
@@ -4851,7 +4849,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 i, phy_data;
u16 cable_length;
- DEBUGFUNC("e1000_get_cable_length");
+ e_dbg("e1000_get_cable_length");
*min_length = *max_length = 0;
@@ -4968,7 +4966,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_polarity");
+ e_dbg("e1000_check_polarity");
if (hw->phy_type == e1000_phy_m88) {
/* return the Polarity bit in the Status register. */
@@ -5034,7 +5032,7 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_check_downshift");
+ e_dbg("e1000_check_downshift");
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
@@ -5081,7 +5079,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
};
u16 min_length, max_length;
- DEBUGFUNC("e1000_config_dsp_after_link_change");
+ e_dbg("e1000_config_dsp_after_link_change");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5089,7 +5087,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
if (link_up) {
ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
+ e_dbg("Error getting link speed and duplex\n");
return ret_val;
}
@@ -5289,7 +5287,7 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- DEBUGFUNC("e1000_set_phy_mode");
+ e_dbg("e1000_set_phy_mode");
if ((hw->mac_type == e1000_82545_rev_3) &&
(hw->media_type == e1000_media_type_copper)) {
@@ -5337,7 +5335,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
s32 ret_val;
u16 phy_data;
- DEBUGFUNC("e1000_set_d3_lplu_state");
+ e_dbg("e1000_set_d3_lplu_state");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5440,7 +5438,7 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
u16 default_page = 0;
u16 phy_data;
- DEBUGFUNC("e1000_set_vco_speed");
+ e_dbg("e1000_set_vco_speed");
switch (hw->mac_type) {
case e1000_82545_rev_3:
@@ -5613,7 +5611,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
*/
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_auto_rd_done");
+ e_dbg("e1000_get_auto_rd_done");
msleep(5);
return E1000_SUCCESS;
}
@@ -5628,7 +5626,7 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
*/
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_get_phy_cfg_done");
+ e_dbg("e1000_get_phy_cfg_done");
mdelay(10);
return E1000_SUCCESS;
}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 9acfddb0dafb..ecd9f6c6bcd5 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,6 +35,7 @@
#include "e1000_osdep.h"
+
/* Forward declarations of structures used by the shared code */
struct e1000_hw;
struct e1000_hw_stats;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b15ece26ed84..ebdea0891665 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k5-NAPI"
+#define DRV_VERSION "7.3.21-k6-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -214,6 +214,17 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
+ * e1000_get_hw_dev - return device
+ * used by hardware layer to print debugging information
+ *
+ **/
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
+{
+ struct e1000_adapter *adapter = hw->back;
+ return adapter->netdev;
+}
+
+/**
* e1000_init_module - Driver Registration Routine
*
* e1000_init_module is the first routine called when the driver is
@@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- e1000_driver_string, e1000_driver_version);
+ pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
- printk(KERN_INFO "%s\n", e1000_copyright);
+ pr_info("%s\n", e1000_copyright);
ret = pci_register_driver(&e1000_driver);
if (copybreak != COPYBREAK_DEFAULT) {
if (copybreak == 0)
- printk(KERN_INFO "e1000: copybreak disabled\n");
+ pr_info("copybreak disabled\n");
else
- printk(KERN_INFO "e1000: copybreak enabled for "
- "packets <= %u bytes\n", copybreak);
+ pr_info("copybreak enabled for "
+ "packets <= %u bytes\n", copybreak);
}
return ret;
}
@@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
+ e_err("Unable to allocate interrupt Error: %d\n", err);
}
return err;
@@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(WUC, 0);
if (e1000_init_hw(hw))
- DPRINTK(PROBE, ERR, "Hardware Error\n");
+ e_err("Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
data = kmalloc(eeprom.len, GFP_KERNEL);
if (!data) {
- printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
- " data\n");
+ pr_err("Unable to allocate memory to dump EEPROM data\n");
return;
}
@@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
csum_new += data[i] + (data[i + 1] << 8);
csum_new = EEPROM_SUM - csum_new;
- printk(KERN_ERR "/*********************/\n");
- printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
- printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
+ pr_err("/*********************/\n");
+ pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
+ pr_err("Calculated : 0x%04x\n", csum_new);
- printk(KERN_ERR "Offset Values\n");
- printk(KERN_ERR "======== ======\n");
+ pr_err("Offset Values\n");
+ pr_err("======== ======\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
- printk(KERN_ERR "Include this output when contacting your support "
- "provider.\n");
- printk(KERN_ERR "This is not a software error! Something bad "
- "happened to your hardware or\n");
- printk(KERN_ERR "EEPROM image. Ignoring this "
- "problem could result in further problems,\n");
- printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
- printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
- "which is invalid\n");
- printk(KERN_ERR "and requires you to set the proper MAC "
- "address manually before continuing\n");
- printk(KERN_ERR "to enable this network device.\n");
- printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
- "to your hardware vendor\n");
- printk(KERN_ERR "or Intel Customer Support.\n");
- printk(KERN_ERR "/*********************/\n");
+ pr_err("Include this output when contacting your support provider.\n");
+ pr_err("This is not a software error! Something bad happened to\n");
+ pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
+ pr_err("result in further problems, possibly loss of data,\n");
+ pr_err("corruption or system hangs!\n");
+ pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
+ pr_err("which is invalid and requires you to set the proper MAC\n");
+ pr_err("address manually before continuing to enable this network\n");
+ pr_err("device. Please inspect the EEPROM dump and report the\n");
+ pr_err("issue to your hardware vendor or Intel Customer Support.\n");
+ pr_err("/*********************/\n");
kfree(data);
}
@@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
- E1000_ERR("No usable DMA configuration, "
- "aborting\n");
+ pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
}
@@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* initialize eeprom parameters */
if (e1000_init_eeprom_params(hw)) {
- E1000_ERR("EEPROM initialization failed\n");
+ e_err("EEPROM initialization failed\n");
goto err_eeprom;
}
@@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if (e1000_validate_eeprom_checksum(hw) < 0) {
- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ e_err("The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
/*
* set MAC address to all zeroes to invalidate and temporary
@@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
} else {
/* copy the MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw))
- DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+ e_err("EEPROM Read Error\n");
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr))
- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ e_err("Invalid MAC Address\n");
e1000_get_bus_info(hw);
@@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* reset the hardware with the new settings */
+ e1000_reset(adapter);
+
+ strcpy(netdev->name, "eth%d");
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
/* print bus type/speed/width info */
- DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+ e_info("(PCI%s:%s:%s) ",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
(hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
@@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
(hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
- printk("%pM\n", netdev->dev_addr);
-
- /* reset the hardware with the new settings */
- e1000_reset(adapter);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ e_info("%pM\n", netdev->dev_addr);
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+ e_info("Intel(R) PRO/1000 Network Connection\n");
cards_found++;
return 0;
@@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
/* identify the MAC */
if (e1000_set_mac_type(hw)) {
- DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+ e_err("Unknown MAC Type\n");
return -EIO;
}
@@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->num_rx_queues = 1;
if (e1000_alloc_queues(adapter)) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ e_err("Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -1408,29 +1410,32 @@ setup_tx_desc_die:
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
void *olddesc = txdr->desc;
dma_addr_t olddma = txdr->dma;
- DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
- "at %p\n", txdr->size, txdr->desc);
+ e_err("txdr align check failed: %u bytes at %p\n",
+ txdr->size, txdr->desc);
/* Try again, without freeing the previous */
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
+ &txdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!txdr->desc) {
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
goto setup_tx_desc_die;
}
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
/* give up */
- pci_free_consistent(pdev, txdr->size, txdr->desc,
- txdr->dma);
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the transmit descriptor ring\n");
+ dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+ txdr->dma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory "
+ "for the transmit descriptor ring\n");
vfree(txdr->buffer_info);
return -ENOMEM;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+ olddma);
}
}
memset(txdr->desc, 0, txdr->size);
@@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Tx Queue %u failed\n", i);
+ e_err("Allocation for Tx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_tx_resources(adapter,
&adapter->tx_ring[i]);
@@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->size = rxdr->count * desc_len;
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory for the receive descriptor ring\n");
+ e_err("Unable to allocate memory for the Rx descriptor ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1604,31 +1607,33 @@ setup_rx_desc_die:
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = rxdr->desc;
dma_addr_t olddma = rxdr->dma;
- DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
- "at %p\n", rxdr->size, rxdr->desc);
+ e_err("rxdr align check failed: %u bytes at %p\n",
+ rxdr->size, rxdr->desc);
/* Try again, without freeing the previous */
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
+ &rxdr->dma, GFP_KERNEL);
/* Failed allocation, critical failure */
if (!rxdr->desc) {
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate memory for the Rx descriptor "
+ "ring\n");
goto setup_rx_desc_die;
}
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
/* give up */
- pci_free_consistent(pdev, rxdr->size, rxdr->desc,
- rxdr->dma);
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
- DPRINTK(PROBE, ERR,
- "Unable to allocate aligned memory "
- "for the receive descriptor ring\n");
+ dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+ rxdr->dma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
+ e_err("Unable to allocate aligned memory for the Rx "
+ "descriptor ring\n");
goto setup_rx_desc_die;
} else {
/* Free old allocation, new allocation was successful */
- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
+ dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+ olddma);
}
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (err) {
- DPRINTK(PROBE, ERR,
- "Allocation for Rx Queue %u failed\n", i);
+ e_err("Allocation for Rx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_rx_resources(adapter,
&adapter->rx_ring[i]);
@@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_rx_irq) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else if (buffer_info->dma &&
adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length,
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
@@ -2098,7 +2104,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
bool use_uc = false;
- struct dev_addr_list *mc_ptr;
u32 rctl;
u32 hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
@@ -2106,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
if (!mcarray) {
- DPRINTK(PROBE, ERR, "memory allocation failed\n");
+ e_err("memory allocation failed\n");
return;
}
@@ -2156,19 +2161,17 @@ static void e1000_set_rx_mode(struct net_device *netdev)
e1000_rar_set(hw, ha->addr, i++);
}
- WARN_ON(i == rar_entries);
-
- netdev_for_each_mc_addr(mc_ptr, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == rar_entries) {
/* load any remaining addresses into the hash table */
u32 hash_reg, hash_bit, mta;
- hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ hash_value = e1000_hash_mc_addr(hw, ha->addr);
hash_reg = (hash_value >> 5) & 0x7F;
hash_bit = hash_value & 0x1F;
mta = (1 << hash_bit);
mcarray[hash_reg] |= mta;
} else {
- e1000_rar_set(hw, mc_ptr->da_addr, i++);
+ e1000_rar_set(hw, ha->addr, i++);
}
}
@@ -2302,16 +2305,16 @@ static void e1000_watchdog(unsigned long data)
&adapter->link_duplex);
ctrl = er32(CTRL);
- printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl &
- E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
- E1000_CTRL_RFCE) ? "RX" : ((ctrl &
- E1000_CTRL_TFCE) ? "TX" : "None" )));
+ pr_info("%s NIC Link is Up %d Mbps %s, "
+ "Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+ E1000_CTRL_TFCE) ? "TX" : "None")));
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -2341,8 +2344,8 @@ static void e1000_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "e1000: %s NIC Link is Down\n",
- netdev->name);
+ pr_info("%s NIC Link is Down\n",
+ netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -2381,6 +2384,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
+ u32 dif = (adapter->gotcl > adapter->gorcl ?
+ adapter->gotcl - adapter->gorcl :
+ adapter->gorcl - adapter->gotcl) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure rx ring is cleaned */
ew32(ICS, E1000_ICS_RXDMT0);
@@ -2525,8 +2544,6 @@ set_itr_now:
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
-
- return;
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -2632,8 +2649,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit()))
- DPRINTK(DRV, WARNING,
- "checksum_partial proto=%x!\n", skb->protocol);
+ e_warn("checksum_partial proto=%x!\n", skb->protocol);
break;
}
@@ -2715,9 +2731,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2761,10 +2778,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2930,7 +2947,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -2976,12 +2993,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- DPRINTK(DRV, ERR,
- "__pskb_pull_tail failed.\n");
+ e_err("__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
break;
default:
/* do nothing */
@@ -3125,7 +3141,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+ e_err("Invalid MTU setting\n");
return -EINVAL;
}
@@ -3133,7 +3149,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+ e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
break;
@@ -3171,8 +3187,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
- netdev->name, netdev->mtu, new_mtu);
+ pr_info("%s changing MTU from %d to %d\n",
+ netdev->name, netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (netif_running(netdev))
@@ -3485,17 +3501,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " Tx Queue <%lu>\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
+ e_err("Detected Tx Unit Hang\n"
+ " Tx Queue <%lu>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
(unsigned long)((tx_ring - adapter->tx_ring) /
sizeof(struct e1000_tx_ring)),
readl(hw->hw_addr + tx_ring->tdh),
@@ -3635,8 +3651,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3734,7 +3750,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
- DPRINTK(DRV, ERR, "pskb_may_pull failed.\n");
+ e_err("pskb_may_pull failed.\n");
dev_kfree_skb(skb);
goto next_desc;
}
@@ -3769,6 +3785,31 @@ next_desc:
return cleaned;
}
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void e1000_check_copybreak(struct net_device *netdev,
+ struct e1000_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
/**
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
@@ -3818,8 +3859,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -3834,8 +3875,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (adapter->discarding) {
/* All receives must fit into a single buffer */
- E1000_DBG("%s: Receive packet consumed multiple"
- " buffers\n", netdev->name);
+ e_info("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
if (status & E1000_RXD_STAT_EOP)
@@ -3868,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
total_rx_bytes += length;
total_rx_packets++;
- /* code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- /* else just continue with the old one */
- }
- /* end copybreak code */
+ e1000_check_copybreak(netdev, buffer_info, length, &skb);
+
skb_put(skb, length);
/* Receive Checksum Offload */
@@ -3965,8 +3987,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -3999,11 +4021,11 @@ check_page:
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ buffer_info->length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
put_page(buffer_info->page);
dev_kfree_skb(skb);
buffer_info->page = NULL;
@@ -4074,8 +4096,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
- "at %p\n", bufsz, skb->data);
+ e_err("skb align check failed: %u bytes at %p\n",
+ bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -4099,11 +4121,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->dma = 0;
@@ -4120,16 +4142,15 @@ map_skb:
if (!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma,
adapter->rx_buffer_len)) {
- DPRINTK(RX_ERR, ERR,
- "dma align check failed: %u bytes at %p\n",
- adapter->rx_buffer_len,
- (void *)(unsigned long)buffer_info->dma);
+ e_err("dma align check failed: %u bytes at %p\n",
+ adapter->rx_buffer_len,
+ (void *)(unsigned long)buffer_info->dma);
dev_kfree_skb(skb);
buffer_info->skb = NULL;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
@@ -4335,7 +4356,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
int ret_val = pci_set_mwi(adapter->pdev);
if (ret_val)
- DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+ e_err("Error in setting MWI\n");
}
void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4466,7 +4487,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((hw->media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -4489,7 +4510,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+ e_err("Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
return 0;
@@ -4612,7 +4633,7 @@ static int e1000_resume(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+ pr_err("Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -4715,7 +4736,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
else
err = pci_enable_device_mem(pdev);
if (err) {
- printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -4746,7 +4767,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (e1000_up(adapter)) {
- printk("e1000: can't bring device back up after reset\n");
+ pr_info("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index d9298522f5ae..edd1c75aa895 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,20 +41,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F "\n")
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
-
-
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg)))
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38d2741ccae9..10d8d98bb797 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -188,14 +188,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
-/* Enable Kumeran Lock Loss workaround
- *
- * Valid Range: 0, 1
- *
- * Default Value: 1 (enabled)
- */
-E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
-
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
@@ -226,17 +218,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- DPRINTK(PROBE, INFO,
- "%s set to %i\n", opt->name, *value);
+ e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -248,7 +239,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ e_dev_info("%s\n", ent->str);
return 0;
}
}
@@ -258,7 +249,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+ e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
@@ -283,9 +274,8 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- DPRINTK(PROBE, NOTICE,
- "Warning: no configuration for board #%i\n", bd);
- DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+ e_dev_warn("Warning: no configuration for board #%i "
+ "using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
@@ -472,27 +462,31 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
- DPRINTK(PROBE, INFO, "%s turned off\n",
- opt.name);
+ e_dev_info("%s turned off\n", opt.name);
break;
case 1:
- DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic mode\n",
+ opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
- DPRINTK(PROBE, INFO,
- "%s set to dynamic conservative mode\n",
- opt.name);
+ e_dev_info("%s set to dynamic conservative "
+ "mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_dev_info("%s set to simplified "
+ "(2000-8000) ints mode\n", opt.name);
+ adapter->itr_setting = adapter->itr;
+ break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
- /* save the setting, because the dynamic bits change itr */
- /* clear the lower two bits because they are
+ /* save the setting, because the dynamic bits
+ * change itr.
+ * clear the lower two bits because they are
* used as control */
adapter->itr_setting = adapter->itr & ~3;
break;
@@ -543,19 +537,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
- DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Speed not valid for fiber adapters, parameter "
+ "ignored\n");
}
if (num_Duplex > bd) {
- DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("Duplex not valid for fiber adapters, parameter "
+ "ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
- DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
- "not valid for fiber adapters, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
+ "adapters, parameter ignored\n");
}
}
@@ -619,9 +612,8 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
- DPRINTK(PROBE, INFO,
- "AutoNeg specified along with Speed or Duplex, "
- "parameter ignored\n");
+ e_dev_info("AutoNeg specified along with Speed or Duplex, "
+ "parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
@@ -680,79 +672,72 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
- DPRINTK(PROBE, INFO,
- "Speed and duplex autonegotiation enabled\n");
+ e_dev_info("Speed and duplex autonegotiation "
+ "enabled\n");
break;
case HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Half Duplex only\n");
+ e_dev_info("Half Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "Full Duplex only\n");
+ e_dev_info("Full Duplex specified without Speed\n");
+ e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
- DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+ e_dev_info("10 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
- DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
- "without Duplex\n");
- DPRINTK(PROBE, INFO, "Using Autonegotiation at "
- "100 Mbps only\n");
+ e_dev_info("100 Mbps Speed specified without Duplex\n");
+ e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
- DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+ e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
- DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
- "Duplex\n");
+ e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
- DPRINTK(PROBE, INFO,
- "Half Duplex is not supported at 1000 Mbps\n");
+ e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
/* fall through */
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
- DPRINTK(PROBE, INFO,
- "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+ e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
+ "only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
@@ -762,9 +747,8 @@ full_duplex_only:
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
- DPRINTK(PROBE, INFO,
- "Speed, AutoNeg and MDI-X specifications are "
- "incompatible. Setting MDI-X to a compatible value.\n");
+ e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
+ "Setting MDI-X to a compatible value.\n");
}
}
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 90155552ea09..f654db9121de 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -234,9 +234,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -271,6 +268,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /*
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
break;
case e1000_82574:
case e1000_82583:
@@ -281,6 +288,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
default:
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
break;
}
@@ -323,7 +333,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
/*
- * Initialze device specific counter of SMBI acquisition
+ * Initialize device specific counter of SMBI acquisition
* timeouts.
*/
hw->dev_spec.e82571.smb_counter = 0;
@@ -993,9 +1003,10 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* ...for both queues. */
switch (mac->type) {
case e1000_82573:
+ e1000e_enable_tx_pkt_filtering(hw);
+ /* fall through */
case e1000_82574:
case e1000_82583:
- e1000e_enable_tx_pkt_filtering(hw);
reg_data = er32(GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
ew32(GCR, reg_data);
@@ -1137,8 +1148,6 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -1642,8 +1651,6 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
/* If the management interface is not enabled, then power down */
if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
@@ -1845,7 +1852,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
@@ -1862,7 +1869,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e301e26d6897..4dc02c71ffd6 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -138,6 +138,11 @@
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
@@ -214,6 +219,8 @@
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -622,6 +629,8 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ee32b9b27a9f..c0b3db40bd73 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -43,25 +43,16 @@
struct e1000_info;
-#define e_printk(level, adapter, format, arg...) \
- printk(level "%s: %s: " format, pci_name(adapter->pdev), \
- adapter->netdev->name, ## arg)
-
-#ifdef DEBUG
#define e_dbg(format, arg...) \
- e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
-#else
-#define e_dbg(format, arg...) do { (void)(hw); } while (0)
-#endif
-
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
#define e_err(format, arg...) \
- e_printk(KERN_ERR, adapter, format, ## arg)
+ netdev_err(adapter->netdev, format, ## arg)
#define e_info(format, arg...) \
- e_printk(KERN_INFO, adapter, format, ## arg)
+ netdev_info(adapter->netdev, format, ## arg)
#define e_warn(format, arg...) \
- e_printk(KERN_WARNING, adapter, format, ## arg)
+ netdev_warn(adapter->netdev, format, ## arg)
#define e_notice(format, arg...) \
- e_printk(KERN_NOTICE, adapter, format, ## arg)
+ netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */
@@ -159,6 +150,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
enum e1000_boards {
board_82571,
board_82572,
@@ -195,6 +189,8 @@ struct e1000_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
u16 mapped_as_page;
};
/* Rx */
@@ -370,6 +366,8 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct led_blink_task;
struct work_struct print_hang_task;
+
+ bool idle_check;
};
struct e1000_info {
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 27d21589a69a..38d79a669059 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -221,9 +221,12 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
/* Adaptive IFS not supported */
mac->adaptive_ifs = false;
@@ -1380,8 +1383,6 @@ static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f2330c..2c521218102b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -412,7 +412,6 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
}
- e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -1069,10 +1068,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
if (tx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
tx_ring->buffer_info[i].dma,
tx_ring->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (tx_ring->buffer_info[i].skb)
dev_kfree_skb(tx_ring->buffer_info[i].skb);
}
@@ -1081,9 +1080,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
if (rx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
rx_ring->buffer_info[i].dma,
- 2048, PCI_DMA_FROMDEVICE);
+ 2048, DMA_FROM_DEVICE);
if (rx_ring->buffer_info[i].skb)
dev_kfree_skb(rx_ring->buffer_info[i].skb);
}
@@ -1163,9 +1162,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1226,9 +1226,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, 2048,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
@@ -1556,10 +1557,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->buffer_info[k].dma,
tx_ring->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1569,9 +1570,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->buffer_info[l].dma, 2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rx_ring->buffer_info[l].skb, 1024);
@@ -1736,6 +1737,12 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
+ clear_bit(__E1000_TESTING, &adapter->state);
+ dev_open(netdev);
+ set_bit(__E1000_TESTING, &adapter->state);
+ }
+
e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
@@ -1747,6 +1754,9 @@ static void e1000_diag_test(struct net_device *netdev,
data[2] = 0;
data[3] = 0;
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT))
+ dev_close(netdev);
+
clear_bit(__E1000_TESTING, &adapter->state);
}
msleep_interruptible(4 * 1000);
@@ -1889,7 +1899,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1904,12 +1914,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 8bdcd5f24eff..5d1220d188d4 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -208,6 +208,8 @@ enum e1e_registers {
E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
+ E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
+#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
E1000_GCR = 0x05B00, /* PCI-Ex Control */
E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
@@ -380,6 +382,7 @@ enum e1e_registers {
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
@@ -828,6 +831,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
bool adaptive_ifs;
+ bool has_fwsm;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
@@ -898,6 +902,7 @@ struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_mode current_mode; /* FC mode in effect */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b5e157e9c87..b2507d93de99 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -83,6 +83,8 @@
#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
#define E1000_ICH_MNG_IAMT_MODE 0x2
@@ -259,6 +261,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
+ u32 ctrl;
s32 ret_val = 0;
phy->addr = 1;
@@ -274,6 +277,33 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. Toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode, but only if there is no
+ * firmware present otherwise firmware will have done it.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, ctrl);
+ udelay(10);
+ ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, ctrl);
+ msleep(50);
+ }
+
+ /*
+ * Reset the PHY before any acccess to it. Doing so, ensures that
+ * the PHY is in a known good state before we read/write PHY registers.
+ * The generic reset is sufficient here, because we haven't determined
+ * the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -300,6 +330,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000e_phy_sw_reset;
+ break;
case e1000_phy_82578:
phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
@@ -472,8 +503,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
if (mac->type == e1000_ich8lan)
mac->rar_entry_count--;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -657,8 +690,6 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
{
mutex_unlock(&nvm_mutex);
-
- return;
}
static DEFINE_MUTEX(swflag_mutex);
@@ -737,8 +768,6 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
ew32(EXTCNF_CTRL, extcnf_ctrl);
mutex_unlock(&swflag_mutex);
-
- return;
}
/**
@@ -785,11 +814,16 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
+ struct e1000_adapter *adapter = hw->adapter;
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
- s32 ret_val;
+ s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
+ if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
+ !(hw->mac.type == e1000_pchlan))
+ return ret_val;
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
@@ -801,97 +835,87 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
- (hw->mac.type == e1000_pchlan)) {
- struct e1000_adapter *adapter = hw->adapter;
-
- /* Check if SW needs to configure the PHY */
- if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+ (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
+ (hw->mac.type == e1000_pchlan))
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ else
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
- data = er32(FEXTNVM);
- if (!(data & sw_cfg_mask))
- goto out;
+ data = er32(FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto out;
- /* Wait for basic configuration completes before proceeding */
- e1000_lan_init_done_ich8lan(hw);
+ /*
+ * Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = er32(EXTCNF_CTRL);
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+
+ cnf_size = er32(EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto out;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+ if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) {
/*
- * Make sure HW does not configure LCD from PHY
- * extended configuration before SW configuration
+ * HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
*/
- data = er32(EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ data = er32(STRAP);
+ data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+ reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
+ reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
+ reg_data);
+ if (ret_val)
goto out;
- cnf_size = er32(EXTCNF_SIZE);
- cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
- cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
- if (!cnf_size)
+ data = er32(LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
goto out;
+ }
- cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
- cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
-
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
- /*
- * HW configures the SMBus address and LEDs when the
- * OEM and LCD Write Enable bits are set in the NVM.
- * When both NVM bits are cleared, SW will configure
- * them instead.
- */
- data = er32(STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
- if (ret_val)
- goto out;
-
- data = er32(LEDCTL);
- ret_val = e1000_write_phy_reg_hv_locked(hw,
- HV_LED_CONFIG,
- (u16)data);
- if (ret_val)
- goto out;
- }
- /* Configure LCD from extended configuration region. */
+ /* Configure LCD from extended configuration region. */
- /* cnf_base_addr is in DWORD */
- word_addr = (u16)(cnf_base_addr << 1);
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
- for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
- &reg_data);
- if (ret_val)
- goto out;
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto out;
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
- 1, &reg_addr);
- if (ret_val)
- goto out;
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto out;
- /* Save off the PHY page for future writes. */
- if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
- phy_page = reg_data;
- continue;
- }
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
- reg_addr &= PHY_REG_MASK;
- reg_addr |= phy_page;
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
- ret_val = phy->ops.write_reg_locked(hw,
- (u32)reg_addr,
- reg_data);
- if (ret_val)
- goto out;
- }
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto out;
}
out:
@@ -1229,30 +1253,26 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
* @hw: pointer to the HW structure
- *
- * Resets the PHY
- * This is a function pointer entry point called by drivers
- * or other shared routines.
**/
-static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 reg;
- ret_val = e1000e_phy_hw_reset_generic(hw);
- if (ret_val)
- return ret_val;
-
- /* Allow time for h/w to get to a quiescent state after reset */
- mdelay(10);
+ if (e1000_check_reset_block(hw))
+ goto out;
/* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan) {
+ switch (hw->mac.type) {
+ case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
- return ret_val;
+ goto out;
+ break;
+ default:
+ break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -1265,11 +1285,32 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
goto out;
/* Configure the LCD with the OEM bits in NVM */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
out:
- return 0;
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+
+out:
+ return ret_val;
}
/**
@@ -1622,7 +1663,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Check if the flash descriptor is valid */
if (hsfsts.hsf_status.fldesvalid == 0) {
e_dbg("Flash descriptor invalid. "
- "SW Sequencing must be used.");
+ "SW Sequencing must be used.\n");
return -E1000_ERR_NVM;
}
@@ -1671,7 +1712,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
hsfsts.hsf_status.flcdone = 1;
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
- e_dbg("Flash controller busy, cannot get access");
+ e_dbg("Flash controller busy, cannot get access\n");
}
}
@@ -1822,7 +1863,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
continue;
} else if (hsfsts.hsf_status.flcdone == 0) {
e_dbg("Timeout error - flash cycle "
- "did not complete.");
+ "did not complete.\n");
break;
}
}
@@ -1908,18 +1949,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
} else {
old_bank_offset = nvm->flash_bank_size;
new_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
}
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -1975,8 +2012,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val) {
/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
e_dbg("Flash commit failed.\n");
- nvm->ops.release(hw);
- goto out;
+ goto release;
}
/*
@@ -1987,18 +2023,15 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
+
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
(u8)(data >> 8));
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/*
* And invalidate the previously valid segment by setting
@@ -2008,10 +2041,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/* Great! Everything worked, we can now clear the cached entries. */
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -2019,14 +2050,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
+release:
nvm->ops.release(hw);
/*
* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
- e1000e_reload_nvm(hw);
- msleep(10);
+ if (!ret_val) {
+ e1000e_reload_nvm(hw);
+ msleep(10);
+ }
out:
if (ret_val)
@@ -2487,9 +2521,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
- if (ret_val) {
+ if (ret_val)
e_dbg("PCI-E Master disable polling has failed.\n");
- }
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
@@ -2528,14 +2561,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!e1000_check_reset_block(hw)) {
- /* Clear PHY Reset Asserted bit */
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- }
-
/*
- * PHY HW reset requires MAC CORE reset at the same
+ * Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
@@ -2549,39 +2576,16 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
- /* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
- if (ctrl & E1000_CTRL_PHY_RST)
+ if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ goto out;
- if (hw->mac.type >= e1000_ich10lan) {
- e1000_lan_init_done_ich8lan(hw);
- } else {
- ret_val = e1000e_get_auto_rd_done(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- e_dbg("Auto Read Done did not complete\n");
- }
- }
- /* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
- e1e_rphy(hw, BM_WUC, &reg);
-
- ret_val = e1000_sw_lcd_config_ich8lan(hw);
- if (ret_val)
- goto out;
-
- if (hw->mac.type == e1000_pchlan) {
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
if (ret_val)
goto out;
}
+
/*
* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
@@ -2748,8 +2752,6 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
reg = er32(RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
ew32(RFCTL, reg);
-
- return;
}
/**
@@ -2799,6 +2801,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
ew32(FCTTV, hw->fc.pause_time);
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
+ ew32(FCRTV_PCH, hw->fc.refresh_time);
+
ret_val = hw->phy.ops.write_reg(hw,
PHY_REG(BM_PORT_CTRL_PAGE, 27),
hw->fc.pause_time);
@@ -3127,8 +3131,6 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -3265,33 +3267,50 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
/**
- * e1000_get_cfg_done_ich8lan - Read config done bit
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
* @hw: pointer to the HW structure
*
- * Read the management control register for the config done bit for
- * completion status. NOTE: silicon which is EEPROM-less will fail trying
- * to read the config done bit, so an error is *ONLY* logged and returns
- * 0. If we were to return with error, EEPROM-less silicon
- * would not be able to be reset or change link.
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
**/
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
+ s32 ret_val = 0;
u32 bank = 0;
+ u32 status;
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
+ e1000e_get_cfg_done(hw);
- if (status & E1000_STATUS_PHYRA)
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- else
- e_dbg("PHY Reset Asserted not set - needs delay\n");
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ e_dbg("Auto Read Done did not complete\n");
+ ret_val = 0;
+ }
}
- e1000e_get_cfg_done(hw);
+ /* Clear PHY Reset Asserted bit */
+ status = er32(STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ e_dbg("PHY Reset Asserted not set - needs delay\n");
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
- if ((hw->mac.type != e1000_ich10lan) &&
- (hw->mac.type != e1000_pchlan)) {
+ if (hw->mac.type <= e1000_ich9lan) {
if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
(hw->phy.type == e1000_phy_igp_3)) {
e1000e_phy_init_script_igp3(hw);
@@ -3300,11 +3319,11 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
/* Maybe we should do a basic PHY config */
e_dbg("EEPROM not present\n");
- return -E1000_ERR_CONFIG;
+ ret_val = -E1000_ERR_CONFIG;
}
}
- return 0;
+ return ret_val;
}
/**
@@ -3320,8 +3339,6 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a8b2c0de27c4..a968e3a416ac 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
u32 status;
status = er32(STATUS);
- if (status & E1000_STATUS_SPEED_1000) {
+ if (status & E1000_STATUS_SPEED_1000)
*speed = SPEED_1000;
- e_dbg("1000 Mbs, ");
- } else if (status & E1000_STATUS_SPEED_100) {
+ else if (status & E1000_STATUS_SPEED_100)
*speed = SPEED_100;
- e_dbg("100 Mbs, ");
- } else {
+ else
*speed = SPEED_10;
- e_dbg("10 Mbs, ");
- }
- if (status & E1000_STATUS_FD) {
+ if (status & E1000_STATUS_FD)
*duplex = FULL_DUPLEX;
- e_dbg("Full Duplex\n");
- } else {
+ else
*duplex = HALF_DUPLEX;
- e_dbg("Half Duplex\n");
- }
+
+ e_dbg("%u Mbps, %s Duplex\n",
+ *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+ *duplex == FULL_DUPLEX ? "Full" : "Half");
return 0;
}
@@ -2275,6 +2272,11 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
u32 hicr;
u8 i;
+ if (!(hw->mac.arc_subsystem_valid)) {
+ e_dbg("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
/* Check that the host interface is enabled. */
hicr = er32(HICR);
if ((hicr & E1000_HICR_EN) == 0) {
@@ -2518,10 +2520,11 @@ s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
}
/**
- * e1000e_enable_mng_pass_thru - Enable processing of ARP's
+ * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
* @hw: pointer to the HW structure
*
- * Verifies the hardware needs to allow ARPs to be processed by the host.
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
**/
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
{
@@ -2531,11 +2534,10 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
manc = er32(MANC);
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
- return ret_val;
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ goto out;
- if (hw->mac.arc_subsystem_valid) {
+ if (hw->mac.has_fwsm) {
fwsm = er32(FWSM);
factps = er32(FACTPS);
@@ -2543,16 +2545,28 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
((fwsm & E1000_FWSM_MODE_MASK) ==
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
ret_val = true;
- return ret_val;
+ goto out;
}
- } else {
- if ((manc & E1000_MANC_SMBUS_EN) &&
- !(manc & E1000_MANC_ASF_EN)) {
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+
+ factps = er32(FACTPS);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13))) {
ret_val = true;
- return ret_val;
+ goto out;
}
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
}
+out:
return ret_val;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index dbf81788bb40..24507f3b8b17 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -45,11 +47,12 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include "e1000.h"
-#define DRV_VERSION "1.0.2-k2"
+#define DRV_VERSION "1.0.2-k4"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pchlan] = &e1000_pch_info,
};
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN, "RDLEN"},
+ {E1000_RDH, "RDH"},
+ {E1000_RDT, "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL, "RDBAL"},
+ {E1000_RDBAH, "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL, "TDBAL"},
+ {E1000_TDBAH, "TDBAH"},
+ {E1000_TDLEN, "TDLEN"},
+ {E1000_TDH, "TDH"},
+ {E1000_TDT, "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 2; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * e1000e_dump - Print registers, tx-ring and rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ struct e1000_rx_desc *rx_desc;
+ struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
+ printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
+ printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->length,
+ buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ printk(KERN_INFO " %5d %5X %5X\n", 0,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
+ "[buffer 1 63:0 ] "
+ "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
+ "[vl l0 ee es] "
+ "[ l3 l2 l1 hs] [reserved ] ---------------- "
+ "[bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %016llX %016llX %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_ps_bsize0, true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ break;
+ default:
+ case 0:
+ /* Legacy Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+ * +-----------------------------------------------------+
+ * 63 48 47 40 39 32 31 16 15 0
+ */
+ printk(KERN_INFO "Rl[desc] [address 63:0 ] "
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
+ for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)rx_desc;
+ printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
+ "%016llX %p",
+ i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->skb);
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
+ }
+ }
+
+exit:
+ return;
+}
+
/**
* e1000_desc_unused - calculate if we have unused descriptors
**/
@@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
map_skb:
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -190,26 +548,23 @@ map_skb:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ }
+
i++;
if (i == rx_ring->count)
i = 0;
@@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
-
- if (!(i--))
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- /*
- * Hardware increments by 16 bytes, but packet split
- * descriptors are 32 bytes...so we increment tail
- * twice as much.
- */
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -366,10 +714,10 @@ check_page:
}
if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
cleaned = (i == eop);
if (cleaned) {
- struct sk_buff *skb = buffer_info->skb;
- unsigned int segs, bytecount;
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_tx_packets += segs;
- total_tx_bytes += bytecount;
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
}
e1000_put_txbuf(adapter, buffer_info);
@@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* see !EOP comment in other rx routine */
@@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
* kmap_atomic, so we can't hold the mapping
* very long
*/
- pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- pci_dma_sync_single_for_device(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* remove the CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
break;
ps_page = &buffer_info->ps_pages[j];
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
skb_fill_page_desc(skb, j, ps_page->page, 0, length);
ps_page->page = NULL;
@@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->clean_rx == e1000_clean_rx_irq)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
- pci_unmap_page(pdev, buffer_info->dma,
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
put_page(ps_page->page);
ps_page->page = NULL;
@@ -1426,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
pci_disable_msi(adapter->pdev);
adapter->flags &= ~FLAG_MSI_ENABLED;
}
-
- return;
}
/**
@@ -1479,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
/* Don't do anything; this is the system default */
break;
}
-
- return;
}
/**
@@ -2185,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
}
}
-static void e1000_init_manageability(struct e1000_adapter *adapter)
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 manc, manc2h;
+ u32 manc, manc2h, mdef, i, j;
if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
return;
@@ -2202,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
*/
manc |= E1000_MANC_EN_MNG2HOST;
manc2h = er32(MANC2H);
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
- manc2h |= E1000_MNG2HOST_PORT_623;
- manc2h |= E1000_MNG2HOST_PORT_664;
+
+ switch (hw->mac.type) {
+ default:
+ manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /*
+ * Check if IPMI pass-through decision filter already exists;
+ * if so, enable it.
+ */
+ for (i = 0, j = 0; i < 8; i++) {
+ mdef = er32(MDEF(i));
+
+ /* Ignore filters with anything other than IPMI ports */
+ if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ continue;
+
+ /* Enable this decision filter in MANC2H */
+ if (mdef)
+ manc2h |= (1 << i);
+
+ j |= mdef;
+ }
+
+ if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ break;
+
+ /* Create new decision filter in an empty filter */
+ for (i = 0, j = 0; i < 8; i++)
+ if (er32(MDEF(i)) == 0) {
+ ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+ E1000_MDEF_PORT_664));
+ manc2h |= (1 << 1);
+ j++;
+ break;
+ }
+
+ if (!j)
+ e_warn("Unable to create IPMI pass-through filter\n");
+ break;
+ }
+
ew32(MANC2H, manc2h);
ew32(MANC, manc);
}
@@ -2524,12 +2900,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* excessive C-state transition latencies result in
* dropped transactions.
*/
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name, 55);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req, 55);
} else {
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
}
@@ -2565,7 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
int i;
@@ -2597,9 +2973,8 @@ static void e1000_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN),
- mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -2621,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_set_multi(adapter->netdev);
e1000_restore_vlan(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
@@ -2755,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->high_water = 0x5000;
fc->low_water = 0x3000;
}
+ fc->refresh_time = 0x1000;
} else {
if ((adapter->flags & FLAG_HAS_ERT) &&
(adapter->netdev->mtu > ETH_DATA_LEN))
@@ -2792,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
if (mac->ops.init_hw(hw))
e_err("Hardware Error\n");
- /* additional part of the flow-control workaround above */
- if (hw->mac.type == e1000_pchlan)
- ew32(FCRTV_PCH, 0x1000);
-
e1000_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -2824,8 +3196,8 @@ int e1000e_up(struct e1000_adapter *adapter)
/* DMA latency requirement to workaround early-receive/jumbo issue */
if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
+ adapter->netdev->pm_qos_req =
+ pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* hardware has been reset, we need to reload some things */
@@ -2841,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
netif_wake_queue(adapter->netdev);
/* fire a link change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
return 0;
}
@@ -2887,9 +3263,11 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
- if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name);
+ if (adapter->flags & FLAG_HAS_ERT) {
+ pm_qos_remove_request(
+ adapter->netdev->pm_qos_req);
+ adapter->netdev->pm_qos_req = NULL;
+ }
/*
* TODO: for power management, we could drop the link and
@@ -3083,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
return -EBUSY;
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -3101,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now open and reset the part to a known state.
+ */
+ if (adapter->flags & FLAG_HAS_AMT) {
+ e1000_get_hw_control(adapter);
+ e1000e_reset(adapter);
+ }
+
e1000e_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3109,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/*
- * If AMT is enabled, let the firmware know that the network
- * interface is now open
- */
- if (adapter->flags & FLAG_HAS_AMT)
- e1000_get_hw_control(adapter);
-
- /*
* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
@@ -3149,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);
+ adapter->idle_check = true;
+ pm_runtime_put(&pdev->dev);
+
/* fire a link status change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
return 0;
@@ -3162,6 +3551,7 @@ err_setup_rx:
e1000e_free_tx_resources(adapter);
err_setup_tx:
e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
return err;
}
@@ -3180,11 +3570,17 @@ err_setup_tx:
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
e1000_power_down_phy(adapter);
- e1000_free_irq(adapter);
e1000e_free_tx_resources(adapter);
e1000e_free_rx_resources(adapter);
@@ -3206,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
if (adapter->flags & FLAG_HAS_AMT)
e1000_release_hw_control(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
/**
@@ -3550,6 +3948,9 @@ static void e1000_watchdog_task(struct work_struct *work)
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
e1000e_enable_receives(adapter);
goto link_up;
}
@@ -3561,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
bool txb2b = 1;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
@@ -3670,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
if (adapter->flags & FLAG_RX_NEEDS_RESTART)
schedule_work(&adapter->reset_task);
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
}
}
@@ -3705,6 +4113,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->msix_entries)
ew32(ICS, adapter->rx_ring->ims_val);
@@ -3879,7 +4303,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
- unsigned int f;
+ unsigned int f, bytecount, segs;
i = tx_ring->next_to_use;
@@ -3890,10 +4314,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3925,11 +4350,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3938,7 +4363,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring->buffer_info[first].next_to_watch = i;
return count;
@@ -4105,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4155,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
}
}
@@ -4241,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
e1000e_reinit_locked(adapter);
}
@@ -4475,13 +4908,15 @@ out:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
- u32 wufc = adapter->wol;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
int retval = 0;
netif_device_detach(netdev);
@@ -4651,20 +5086,13 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_OPS
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
-
- return retval;
+ return !!adapter->tx_ring->buffer_info;
}
-static int e1000_resume(struct pci_dev *pdev)
+static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4677,18 +5105,6 @@ static int e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "Cannot enable PCI device from suspend\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
err = e1000_request_irq(adapter);
@@ -4729,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev)
e1000e_reset(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev))
e1000e_up(adapter);
@@ -4746,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
return 0;
}
-#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake, false);
+ if (!retval)
+ e1000_complete_shutdown(pdev, true, wake);
+
+ return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter))
+ adapter->idle_check = true;
+
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter)) {
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake, true);
+ }
+
+ return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ if (adapter->idle_check) {
+ adapter->idle_check = false;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ adapter->idle_check = !dev->power.runtime_auto;
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM_OPS */
static void e1000_shutdown(struct pci_dev *pdev)
{
bool wake = false;
- __e1000_shutdown(pdev, &wake);
+ __e1000_shutdown(pdev, &wake, false);
if (system_state == SYSTEM_POWER_OFF)
e1000_complete_shutdown(pdev, false, wake);
@@ -4826,8 +5317,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
+ pdev->state_saved = true;
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4855,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev)) {
if (e1000e_up(adapter)) {
@@ -4968,16 +5459,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -5008,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->irq = pdev->irq;
+
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
hw = &adapter->hw;
@@ -5228,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_print_device_info(adapter);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+
return 0;
err_register:
@@ -5270,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ pm_runtime_get_sync(&pdev->dev);
/*
* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled
*/
- set_bit(__E1000_DOWN, &adapter->state);
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -5289,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/*
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -5380,6 +5892,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
@@ -5390,16 +5903,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+#ifdef CONFIG_PM_OPS
+static const struct dev_pm_ops e1000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+ e1000_runtime_resume, e1000_idle)
+};
+#endif
+
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
.name = e1000e_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = e1000_suspend,
- .resume = e1000_resume,
+#ifdef CONFIG_PM_OPS
+ .driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
@@ -5414,10 +5933,9 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
- e1000e_driver_name, e1000e_driver_version);
- printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
- e1000e_driver_name);
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 2e399778cae5..a150e48a117f 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
{ /* Transmit Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of "
@@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Transmit Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of "
@@ -286,7 +286,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Interrupt Delay */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of "
@@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
@@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Throttling Rate */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of "
@@ -351,6 +351,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_info("%s set to simplified (2000-8000 ints) "
+ "mode\n", opt.name);
+ adapter->itr_setting = 4;
+ break;
default:
/*
* Save the setting, because the dynamic bits
@@ -381,7 +386,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Mode */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
.err = "defaulting to 2 (MSI-X)",
@@ -399,7 +404,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Smart Power Down */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
@@ -415,7 +420,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* CRC Stripping */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
.err = "defaulting to enabled",
@@ -432,7 +437,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Kumeran Lock Loss Workaround */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
@@ -452,7 +457,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Write-protect NVM */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
.err = "defaulting to Enabled",
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 7f3ceb9dad6a..b4ac82d51b20 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -3116,9 +3116,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
* e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
* @hw: pointer to the HW structure
*
- * Calls the PHY setup function to force speed and duplex. Clears the
- * auto-crossover to force MDI manually. Waits for link and returns
- * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ * Calls the PHY setup function to force speed and duplex.
**/
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
{
@@ -3137,23 +3135,6 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
- if (ret_val)
- goto out;
-
- phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
- phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
-
- ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
- if (ret_val)
- goto out;
-
- e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data);
-
udelay(1);
if (phy->autoneg_wait_to_complete) {
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index ca93c9a9d372..06e72fbef862 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -328,7 +328,6 @@ e21_reset_8390(struct net_device *dev)
/* Set up the ASIC registers, just in case something changed them. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. We put the 2k window so the header page
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 27c7bdbfa003..8d97f168f018 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -645,7 +645,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
- printk(KERN_DEBUG "port(s) \n");
+ printk(KERN_DEBUG "port(s)\n");
Word = lp->word[6];
printk(KERN_DEBUG "Word6:\n");
@@ -765,7 +765,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
/* Grab the region so we can find another board if autoIRQ fails. */
if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
if (!autoprobe)
- printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
+ printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
ioaddr);
return -EBUSY;
}
@@ -1161,8 +1161,7 @@ static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
/* we won't wake queue here because we're out of space */
dev->stats.tx_dropped++;
else {
- dev->stats.tx_bytes+=skb->len;
- dev->trans_start = jiffies;
+ dev->stats.tx_bytes+=skb->len;
netif_wake_queue(dev);
}
@@ -1286,7 +1285,7 @@ set_multicast_list(struct net_device *dev)
struct eepro_local *lp = netdev_priv(dev);
short ioaddr = dev->base_addr;
unsigned short mode;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int mc_count = netdev_mc_count(dev);
if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
@@ -1331,8 +1330,8 @@ set_multicast_list(struct net_device *dev)
outw(0, ioaddr + IO_PORT);
outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- netdev_for_each_mc_addr(dmi, dev) {
- eaddrs = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (unsigned short *) ha->addr;
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1a7322b80ea7..12c37d264108 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -543,7 +543,7 @@ static void unstick_cu(struct net_device *dev)
if (lp->started)
{
- if (time_after(jiffies, dev->trans_start + 50))
+ if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
{
if (lp->tx_link==lp->last_tx_restart)
{
@@ -1018,7 +1018,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
outw(lp->tx_head+0x16, ioaddr + DATAPORT);
outw(0, ioaddr + DATAPORT);
- outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
+ outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
outw(lp->tx_head, ioaddr + DATAPORT);
@@ -1570,12 +1570,11 @@ static void eexp_hw_init586(struct net_device *dev)
#if NET_DEBUG > 6
printk("%s: leaving eexp_hw_init586()\n", dev->name);
#endif
- return;
}
static void eexp_setup_filter(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned short ioaddr = dev->base_addr;
int count = netdev_mc_count(dev);
int i;
@@ -1588,8 +1587,8 @@ static void eexp_setup_filter(struct net_device *dev)
outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- unsigned short *data = (unsigned short *) dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned short *data = (unsigned short *) ha->addr;
if (i == count)
break;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a950996..0630980a2722 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0102"
+#define DRV_VERSION "EHEA_0103"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 809ccc9ff09c..f547894ff48f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -122,8 +122,11 @@ static struct of_device_id ehea_device_table[] = {
MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct of_platform_driver ehea_driver = {
- .name = "ehea",
- .match_table = ehea_device_table,
+ .driver = {
+ .name = "ehea",
+ .owner = THIS_MODULE,
+ .of_match_table = ehea_device_table,
+ },
.probe = ehea_probe_adapter,
.remove = ehea_remove,
};
@@ -791,11 +794,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
cqe_counter++;
rmb();
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
- ehea_error("Send Completion Error: Resetting port");
+ ehea_error("Bad send completion status=0x%04X",
+ cqe->status);
+
if (netif_msg_tx_err(pr->port))
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
- ehea_schedule_port_reset(pr->port);
- break;
+
+ if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(pr->port);
+ break;
+ }
}
if (netif_msg_tx_done(pr->port))
@@ -814,7 +823,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
quota--;
cqe = ehea_poll_cq(send_cq);
- };
+ }
ehea_update_feca(send_cq, cqe_counter);
atomic_add(swqe_av, &pr->swqe_avail);
@@ -901,6 +910,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
struct ehea_eqe *eqe;
struct ehea_qp *qp;
u32 qp_token;
+ u64 resource_type, aer, aerr;
+ int reset_port = 0;
eqe = ehea_poll_eq(port->qp_eq);
@@ -910,11 +921,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
eqe->entry, qp_token);
qp = port->port_res[qp_token].qp;
- ehea_error_data(port->adapter, qp->fw_handle);
+
+ resource_type = ehea_error_data(port->adapter, qp->fw_handle,
+ &aer, &aerr);
+
+ if (resource_type == EHEA_AER_RESTYPE_QP) {
+ if ((aer & EHEA_AER_RESET_MASK) ||
+ (aerr & EHEA_AERR_RESET_MASK))
+ reset_port = 1;
+ } else
+ reset_port = 1; /* Reset in case of CQ or EQ error */
+
eqe = ehea_poll_eq(port->qp_eq);
}
- ehea_schedule_port_reset(port);
+ if (reset_port) {
+ ehea_error("Resetting port");
+ ehea_schedule_port_reset(port);
+ }
return IRQ_HANDLED;
}
@@ -1618,7 +1642,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
{
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
int headersize;
/* Packet is TCP with TSO enabled */
@@ -1629,7 +1653,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
*/
headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
- skb_data_size = skb->len - skb->data_len;
+ skb_data_size = skb_headlen(skb);
if (skb_data_size >= headersize) {
/* copy immediate data */
@@ -1651,7 +1675,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
static void write_swqe2_nonTSO(struct sk_buff *skb,
struct ehea_swqe *swqe, u32 lkey)
{
- int skb_data_size = skb->len - skb->data_len;
+ int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
@@ -1860,7 +1884,6 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
port->promisc = enable;
out:
free_page((unsigned long)cb7);
- return;
}
static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
@@ -1967,7 +1990,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
static void ehea_set_multicast_list(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
- struct dev_mc_list *k_mcl_entry;
+ struct netdev_hw_addr *ha;
int ret;
if (dev->flags & IFF_PROMISC) {
@@ -1998,13 +2021,12 @@ static void ehea_set_multicast_list(struct net_device *dev)
goto out;
}
- netdev_for_each_mc_addr(k_mcl_entry, dev)
- ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ ehea_add_multicast_entry(port, ha->addr);
}
out:
ehea_update_bcmc_registrations();
- return;
}
static int ehea_change_mtu(struct net_device *dev, int new_mtu)
@@ -2108,8 +2130,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
} else {
/* first copy data from the skb->data buffer ... */
skb_copy_from_linear_data(skb, imm_data,
- skb->len - skb->data_len);
- imm_data += skb->len - skb->data_len;
+ skb_headlen(skb));
+ imm_data += skb_headlen(skb);
/* ... then copy data from the fragments */
for (i = 0; i < nfrags; i++) {
@@ -2220,7 +2242,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&pr->netif_queue, flags);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
spin_unlock(&pr->xmit_lock);
return NETDEV_TX_OK;
@@ -2317,7 +2339,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
ehea_error("modify_ehea_port failed");
out:
free_page((unsigned long)cb1);
- return;
}
int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2860,7 +2881,6 @@ static void ehea_reset_port(struct work_struct *work)
netif_wake_queue(dev);
out:
mutex_unlock(&port->port_lock);
- return;
}
static void ehea_rereg_mrs(struct work_struct *work)
@@ -2868,7 +2888,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
int ret, i;
struct ehea_adapter *adapter;
- mutex_lock(&dlpar_mem_lock);
ehea_info("LPAR memory changed - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
@@ -2938,7 +2957,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
ehea_info("re-initializing driver complete");
out:
- mutex_unlock(&dlpar_mem_lock);
return;
}
@@ -3035,7 +3053,7 @@ static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
static void __devinit logical_port_release(struct device *dev)
{
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
- of_node_put(port->ofdev.node);
+ of_node_put(port->ofdev.dev.of_node);
}
static struct device *ehea_register_port(struct ehea_port *port,
@@ -3043,7 +3061,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
{
int ret;
- port->ofdev.node = of_node_get(dn);
+ port->ofdev.dev.of_node = of_node_get(dn);
port->ofdev.dev.parent = &port->adapter->ofdev->dev;
port->ofdev.dev.bus = &ibmebus_bus_type;
@@ -3210,7 +3228,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
const u32 *dn_log_port_id;
int i = 0;
- lhea_dn = adapter->ofdev->node;
+ lhea_dn = adapter->ofdev->dev.of_node;
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3238,7 +3256,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
ehea_remove_adapter_mr(adapter);
i++;
- };
+ }
return 0;
}
@@ -3249,7 +3267,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
struct device_node *eth_dn = NULL;
const u32 *dn_log_port_id;
- lhea_dn = adapter->ofdev->node;
+ lhea_dn = adapter->ofdev->dev.of_node;
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3257,7 +3275,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
if (dn_log_port_id)
if (*dn_log_port_id == logical_port_id)
return eth_dn;
- };
+ }
return NULL;
}
@@ -3379,7 +3397,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
const u64 *adapter_handle;
int ret;
- if (!dev || !dev->node) {
+ if (!dev || !dev->dev.of_node) {
ehea_error("Invalid ibmebus device probed");
return -EINVAL;
}
@@ -3395,14 +3413,14 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
adapter->ofdev = dev;
- adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
+ adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
NULL);
if (adapter_handle)
adapter->handle = *adapter_handle;
if (!adapter->handle) {
dev_err(&dev->dev, "failed getting handle for adapter"
- " '%s'\n", dev->node->full_name);
+ " '%s'\n", dev->dev.of_node->full_name);
ret = -ENODEV;
goto out_free_ad;
}
@@ -3521,7 +3539,14 @@ void ehea_crash_handler(void)
static int ehea_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
+ int ret = NOTIFY_BAD;
struct memory_notify *arg = data;
+
+ if (!mutex_trylock(&dlpar_mem_lock)) {
+ ehea_info("ehea_mem_notifier must not be called parallelized");
+ goto out;
+ }
+
switch (action) {
case MEM_CANCEL_OFFLINE:
ehea_info("memory offlining canceled");
@@ -3530,14 +3555,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
ehea_info("memory is going online");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
case MEM_GOING_OFFLINE:
ehea_info("memory is going offline");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
- return NOTIFY_BAD;
+ goto out_unlock;
ehea_rereg_mrs(NULL);
break;
default:
@@ -3545,8 +3570,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
}
ehea_update_firmware_handles();
+ ret = NOTIFY_OK;
- return NOTIFY_OK;
+out_unlock:
+ mutex_unlock(&dlpar_mem_lock);
+out:
+ return ret;
}
static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e56367..89128b6373e3 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
int ehea_destroy_cq(struct ehea_cq *cq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!cq)
return 0;
hcp_epas_dtor(&cq->epas);
hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(cq->adapter, cq->fw_handle);
+ ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
int ehea_destroy_eq(struct ehea_eq *eq)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!eq)
return 0;
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(eq->adapter, eq->fw_handle);
+ ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
int ehea_destroy_qp(struct ehea_qp *qp)
{
- u64 hret;
+ u64 hret, aer, aerr;
if (!qp)
return 0;
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
if (hret == H_R_STATE) {
- ehea_error_data(qp->adapter, qp->fw_handle);
+ ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
if (length > EHEA_PAGESIZE)
length = EHEA_PAGESIZE;
- if (type == 0x8) /* Queue Pair */
+ if (type == EHEA_AER_RESTYPE_QP)
ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
"port=%llX", resource, data[6], data[12], data[22]);
-
- if (type == 0x4) /* Completion Queue */
+ else if (type == EHEA_AER_RESTYPE_CQ)
ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
-
- if (type == 0x3) /* Event Queue */
+ else if (type == EHEA_AER_RESTYPE_EQ)
ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
data[6]);
ehea_dump(data, length, "error data");
}
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr)
{
unsigned long ret;
u64 *rblock;
+ u64 type = 0;
rblock = (void *)get_zeroed_page(GFP_KERNEL);
if (!rblock) {
ehea_error("Cannot allocate rblock memory.");
- return;
+ goto out;
}
- ret = ehea_h_error_data(adapter->handle,
- res_handle,
- rblock);
+ ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
- if (ret == H_R_STATE)
- ehea_error("No error data is available: %llX.", res_handle);
- else if (ret == H_SUCCESS)
+ if (ret == H_SUCCESS) {
+ type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
+ *aer = rblock[6];
+ *aerr = rblock[12];
print_error_data(rblock);
- else
+ } else if (ret == H_R_STATE) {
+ ehea_error("No error data available: %llX.", res_handle);
+ } else
ehea_error("Error data could not be fetched: %llX", res_handle);
free_page((unsigned long)rblock);
+out:
+ return type;
}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a19..882c50c9c34f 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
#define EHEA_CQE_STAT_ERR_IP 0x2000
#define EHEA_CQE_STAT_ERR_CRC 0x1000
+/* Defines which bad send cqe stati lead to a port reset */
+#define EHEA_CQE_STAT_RESET_MASK 0x0002
+
struct ehea_cqe {
u64 wr_id; /* work request ID from WQE */
u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
+#define EHEA_AER_RESTYPE_QP 0x8
+#define EHEA_AER_RESTYPE_CQ 0x4
+#define EHEA_AER_RESTYPE_EQ 0x3
+
+/* Defines which affiliated errors lead to a port reset */
+#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
+#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
+
struct ehea_eqe {
u64 entry;
};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
int ehea_rem_mr(struct ehea_mr *mr);
-void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
+ u64 *aer, u64 *aerr);
int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index ff27f728fd9d..112c5aa9af7f 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1293,8 +1293,6 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
*/
netif_stop_queue(dev);
- /* save the timestamp */
- priv->netdev->trans_start = jiffies;
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
schedule_work(&priv->tx_work);
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 391c3bce5b79..e7b6c31880ba 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o vnic_dev.o vnic_rq.o
+ enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 03dce9ed612c..337d1943af46 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
{
- u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- u16 q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index ee01f5a6d0d4..85f2a2e7030a 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,8 +33,8 @@
#include "vnic_rss.h"
#define DRV_NAME "enic"
-#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.241a"
+#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
+#define DRV_VERSION "1.3.1.1-pp"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
@@ -74,6 +74,13 @@ struct enic_msix_entry {
void *devid;
};
+struct enic_port_profile {
+ u8 request;
+ char name[PORT_PROFILE_MAX];
+ u8 instance_uuid[PORT_UUID_MAX];
+ u8 host_uuid[PORT_UUID_MAX];
+};
+
/* Per-instance private data structure */
struct enic {
struct net_device *netdev;
@@ -95,6 +102,7 @@ struct enic {
u32 port_mtu;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
+ struct enic_port_profile pp;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index cf098bb636b8..6586b5c7e4b6 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/if_link.h>
#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
@@ -40,6 +41,7 @@
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
+#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
@@ -49,10 +51,12 @@
#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
/* Supported devices */
static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
+ { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
{ 0, } /* end of table */
};
@@ -113,6 +117,11 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+static int enic_is_dynamic(struct enic *enic)
+{
+ return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
+}
+
static int enic_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -810,26 +819,90 @@ static void enic_reset_mcaddrs(struct enic *enic)
static int enic_set_mac_addr(struct net_device *netdev, char *addr)
{
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic_is_dynamic(enic)) {
+ if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ } else {
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+ }
memcpy(netdev->dev_addr, addr, netdev->addr_len);
return 0;
}
+static int enic_dev_add_station_addr(struct enic *enic)
+{
+ int err = 0;
+
+ if (is_valid_ether_addr(enic->netdev->dev_addr)) {
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+ }
+
+ return err;
+}
+
+static int enic_dev_del_station_addr(struct enic *enic)
+{
+ int err = 0;
+
+ if (is_valid_ether_addr(enic->netdev->dev_addr)) {
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+ }
+
+ return err;
+}
+
+static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct sockaddr *saddr = p;
+ char *addr = saddr->sa_data;
+ int err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_del_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ err = enic_set_mac_addr(netdev, addr);
+ if (err)
+ return err;
+
+ if (netif_running(enic->netdev)) {
+ err = enic_dev_add_station_addr(enic);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int enic_set_mac_address(struct net_device *netdev, void *p)
+{
+ return -EOPNOTSUPP;
+}
+
/* netif_tx_lock held, BHs disabled */
static void enic_set_multicast_list(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int directed = 1;
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int i, j;
@@ -852,10 +925,10 @@ static void enic_set_multicast_list(struct net_device *netdev)
*/
i = 0;
- netdev_for_each_mc_addr(list, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
if (i == mc_count)
break;
- memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN);
+ memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
}
for (i = 0; i < enic->mc_count; i++) {
@@ -922,6 +995,226 @@ static void enic_tx_timeout(struct net_device *netdev)
schedule_work(&enic->reset);
}
+static int enic_vnic_dev_deinit(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_prov(enic->vdev,
+ (u8 *)vp, vic_provinfo_size(vp));
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_done(enic->vdev, done, error);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
+ char *name, u8 *instance_uuid, u8 *host_uuid)
+{
+ struct vic_provinfo *vp;
+ u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ u8 *uuid;
+ char uuid_str[38];
+ static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-"
+ "%02X%02X-%02X%02X%02X%02X%0X%02X";
+ int err;
+
+ if (!name)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE);
+ if (!vp)
+ return -ENOMEM;
+
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
+ strlen(name) + 1, name);
+
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, mac);
+
+ if (instance_uuid) {
+ uuid = instance_uuid;
+ sprintf(uuid_str, uuid_fmt,
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7],
+ uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ if (host_uuid) {
+ uuid = host_uuid;
+ sprintf(uuid_str, uuid_fmt,
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7],
+ uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+ vic_provinfo_add_tlv(vp,
+ VIC_LINUX_PROV_TLV_HOST_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ err = enic_vnic_dev_deinit(enic);
+ if (err)
+ goto err_out;
+
+ memset(&enic->pp, 0, sizeof(enic->pp));
+
+ err = enic_dev_init_prov(enic, vp);
+ if (err)
+ goto err_out;
+
+ enic->pp.request = request;
+ memcpy(enic->pp.name, name, PORT_PROFILE_MAX);
+ if (instance_uuid)
+ memcpy(enic->pp.instance_uuid,
+ instance_uuid, PORT_UUID_MAX);
+ if (host_uuid)
+ memcpy(enic->pp.host_uuid,
+ host_uuid, PORT_UUID_MAX);
+
+err_out:
+ vic_provinfo_free(vp);
+
+ return err;
+}
+
+static int enic_unset_port_profile(struct enic *enic)
+{
+ memset(&enic->pp, 0, sizeof(enic->pp));
+ return enic_vnic_dev_deinit(enic);
+}
+
+static int enic_set_vf_port(struct net_device *netdev, int vf,
+ struct nlattr *port[])
+{
+ struct enic *enic = netdev_priv(netdev);
+ char *name = NULL;
+ u8 *instance_uuid = NULL;
+ u8 *host_uuid = NULL;
+ u8 request = PORT_REQUEST_DISASSOCIATE;
+
+ /* don't support VFs, yet */
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
+
+ if (port[IFLA_PORT_REQUEST])
+ request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+
+ switch (request) {
+ case PORT_REQUEST_ASSOCIATE:
+
+ /* If the interface mac addr hasn't been assigned,
+ * assign a random mac addr before setting port-
+ * profile.
+ */
+
+ if (is_zero_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+
+ if (port[IFLA_PORT_PROFILE])
+ name = nla_data(port[IFLA_PORT_PROFILE]);
+
+ if (port[IFLA_PORT_INSTANCE_UUID])
+ instance_uuid =
+ nla_data(port[IFLA_PORT_INSTANCE_UUID]);
+
+ if (port[IFLA_PORT_HOST_UUID])
+ host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]);
+
+ return enic_set_port_profile(enic, request,
+ netdev->dev_addr, name,
+ instance_uuid, host_uuid);
+
+ case PORT_REQUEST_DISASSOCIATE:
+
+ return enic_unset_port_profile(enic);
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int enic_get_vf_port(struct net_device *netdev, int vf,
+ struct sk_buff *skb)
+{
+ struct enic *enic = netdev_priv(netdev);
+ int err, error, done;
+ u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
+
+ /* don't support VFs, yet */
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
+
+ err = enic_dev_init_done(enic, &done, &error);
+
+ if (err)
+ return err;
+
+ switch (error) {
+ case ERR_SUCCESS:
+ if (!done)
+ response = PORT_PROFILE_RESPONSE_INPROGRESS;
+ break;
+ case ERR_EINVAL:
+ response = PORT_PROFILE_RESPONSE_INVALID;
+ break;
+ case ERR_EBADSTATE:
+ response = PORT_PROFILE_RESPONSE_BADSTATE;
+ break;
+ case ERR_ENOMEM:
+ response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
+ break;
+ default:
+ response = PORT_PROFILE_RESPONSE_ERROR;
+ break;
+ }
+
+ NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
+ NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
+ NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
+ enic->pp.name);
+ NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+ enic->pp.instance_uuid);
+ NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
+ enic->pp.host_uuid);
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -1440,9 +1733,7 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
- spin_lock(&enic->devcmd_lock);
- enic_add_station_addr(enic);
- spin_unlock(&enic->devcmd_lock);
+ enic_dev_add_station_addr(enic);
enic_set_multicast_list(netdev);
netif_wake_queue(netdev);
@@ -1489,6 +1780,8 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ enic_dev_del_station_addr(enic);
+
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
if (err)
@@ -1774,14 +2067,34 @@ static void enic_clear_intr_mode(struct enic *enic)
vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
}
+static const struct net_device_ops enic_netdev_dynamic_ops = {
+ .ndo_open = enic_open,
+ .ndo_stop = enic_stop,
+ .ndo_start_xmit = enic_hard_start_xmit,
+ .ndo_get_stats = enic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_mac_address = enic_set_mac_address_dynamic,
+ .ndo_change_mtu = enic_change_mtu,
+ .ndo_vlan_rx_register = enic_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
+ .ndo_tx_timeout = enic_tx_timeout,
+ .ndo_set_vf_port = enic_set_vf_port,
+ .ndo_get_vf_port = enic_get_vf_port,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = enic_poll_controller,
+#endif
+};
+
static const struct net_device_ops enic_netdev_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
.ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
.ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_mac_address = enic_set_mac_address,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2010,11 +2323,13 @@ static int __devinit enic_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
- err = vnic_dev_init(enic->vdev, 0);
- if (err) {
- printk(KERN_ERR PFX
- "vNIC dev init failed, aborting.\n");
- goto err_out_dev_close;
+ if (!enic_is_dynamic(enic)) {
+ err = vnic_dev_init(enic->vdev, 0);
+ if (err) {
+ printk(KERN_ERR PFX
+ "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_close;
+ }
}
err = enic_dev_init(enic);
@@ -2054,12 +2369,15 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
- netdev->netdev_ops = &enic_netdev_ops;
+ if (enic_is_dynamic(enic))
+ netdev->netdev_ops = &enic_netdev_dynamic_ops;
+ else
+ netdev->netdev_ops = &enic_netdev_ops;
+
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
- netdev->features |= NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if (ENIC_SETTING(enic, TXCSUM))
netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
if (ENIC_SETTING(enic, TSO))
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 02839bf0fe8b..9b18840cba96 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -103,11 +103,6 @@ int enic_get_vnic_config(struct enic *enic)
return 0;
}
-void enic_add_station_addr(struct enic *enic)
-{
- vnic_dev_add_addr(enic->vdev, enic->mac_addr);
-}
-
void enic_add_multicast_addr(struct enic *enic, u8 *addr)
{
vnic_dev_add_addr(enic->vdev, addr);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index abc19741ab02..494664f7fccc 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -131,7 +131,6 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
struct enic;
int enic_get_vnic_config(struct enic *);
-void enic_add_station_addr(struct enic *enic);
void enic_add_multicast_addr(struct enic *enic, u8 *addr);
void enic_del_multicast_addr(struct enic *enic, u8 *addr);
void enic_add_vlan(struct enic *enic, u16 vlanid);
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index cf22de71014e..2b3e16db5c82 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -530,7 +530,7 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
printk(KERN_ERR "Can't set packet filter\n");
}
-void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -543,9 +543,11 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err);
+
+ return err;
}
-void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -558,6 +560,8 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err);
+
+ return err;
}
int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
@@ -574,22 +578,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
return err;
}
-int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr)
{
u64 a0, a1;
int wait = 1000;
int r;
- if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
- if (!vdev->notify)
- return -ENOMEM;
- memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify));
- }
+ memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
+ vdev->notify = notify_addr;
+ vdev->notify_pa = notify_pa;
- a0 = vdev->notify_pa;
+ a0 = (u64)notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
@@ -598,7 +598,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return r;
}
-void vnic_dev_notify_unset(struct vnic_dev *vdev)
+int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
+{
+ void *notify_addr;
+ dma_addr_t notify_pa;
+
+ if (vdev->notify || vdev->notify_pa) {
+ printk(KERN_ERR "notify block %p still allocated",
+ vdev->notify);
+ return -EINVAL;
+ }
+
+ notify_addr = pci_alloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa);
+ if (!notify_addr)
+ return -ENOMEM;
+
+ return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
+}
+
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
@@ -608,9 +628,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
a1 += sizeof(struct vnic_devcmd_notify);
vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+ vdev->notify = NULL;
+ vdev->notify_pa = 0;
vdev->notify_sz = 0;
}
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+ if (vdev->notify) {
+ pci_free_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify,
+ vdev->notify_pa);
+ }
+
+ vnic_dev_notify_unsetcmd(vdev);
+}
+
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
@@ -652,6 +686,56 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
return r;
}
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ *done = 0;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
+ if (ret)
+ return ret;
+
+ *done = (a0 == 0);
+
+ *err = (a0 == 0) ? a1 : 0;
+
+ return 0;
+}
+
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
+{
+ u64 a0, a1 = len;
+ int wait = 1000;
+ u64 prov_pa;
+ void *prov_buf;
+ int ret;
+
+ prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
+ if (!prov_buf)
+ return -ENOMEM;
+
+ memcpy(prov_buf, buf, len);
+
+ a0 = prov_pa;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
+
+ pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
+
+ return ret;
+}
+
+int vnic_dev_deinit(struct vnic_dev *vdev)
+{
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
+}
+
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index fc5e3eb35a5e..caccce36957b 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -103,11 +103,14 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
-void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
+int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
+ void *notify_addr, dma_addr_t notify_pa, u16 intr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
+void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
void vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
@@ -121,6 +124,9 @@ int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
+int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
+int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_deinit(struct vnic_dev *vdev);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index e186efaf9da1..cc580cfec41d 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -168,10 +168,10 @@ int vnic_rq_disable(struct vnic_rq *rq)
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
new file mode 100644
index 000000000000..d769772998c6
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "vnic_vic.h"
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
+{
+ struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
+
+ if (!vp || !oui)
+ return NULL;
+
+ memcpy(vp->oui, oui, sizeof(vp->oui));
+ vp->type = type;
+ vp->length = htonl(sizeof(vp->num_tlvs));
+
+ return vp;
+}
+
+void vic_provinfo_free(struct vic_provinfo *vp)
+{
+ kfree(vp);
+}
+
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ void *value)
+{
+ struct vic_provinfo_tlv *tlv;
+
+ if (!vp || !value)
+ return -EINVAL;
+
+ if (ntohl(vp->length) + sizeof(*tlv) + length >
+ VIC_PROVINFO_MAX_TLV_DATA)
+ return -ENOMEM;
+
+ tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
+ ntohl(vp->length) - sizeof(vp->num_tlvs));
+
+ tlv->type = htons(type);
+ tlv->length = htons(length);
+ memcpy(tlv->value, value, length);
+
+ vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
+ vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length);
+
+ return 0;
+}
+
+size_t vic_provinfo_size(struct vic_provinfo *vp)
+{
+ return vp ? ntohl(vp->length) + sizeof(*vp) - sizeof(vp->num_tlvs) : 0;
+}
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
new file mode 100644
index 000000000000..085c2a274cb1
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _VNIC_VIC_H_
+#define _VNIC_VIC_H_
+
+/* Note: All integer fields in NETWORK byte order */
+
+/* Note: String field lengths include null char */
+
+#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
+#define VIC_PROVINFO_LINUX_TYPE 0x2
+
+enum vic_linux_prov_tlv_type {
+ VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
+ VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */
+ VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2,
+ VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8,
+ VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9,
+};
+
+struct vic_provinfo {
+ u8 oui[3]; /* OUI of data provider */
+ u8 type; /* provider-specific type */
+ u32 length; /* length of data below */
+ u32 num_tlvs; /* number of tlvs */
+ struct vic_provinfo_tlv {
+ u16 type;
+ u16 length;
+ u8 value[0];
+ } tlv[0];
+} __attribute__ ((packed));
+
+#define VIC_PROVINFO_MAX_DATA 1385
+#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
+ sizeof(struct vic_provinfo))
+
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type);
+void vic_provinfo_free(struct vic_provinfo *vp);
+int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
+ void *value);
+size_t vic_provinfo_size(struct vic_provinfo *vp);
+
+#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index d5f984357f5c..1378afbdfe67 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -161,10 +161,10 @@ int vnic_wq_disable(struct vnic_wq *wq)
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
- for (wait = 0; wait < 100; wait++) {
+ for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
- udelay(1);
+ udelay(10);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 7a567201e829..6838dfc9ef23 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -652,7 +652,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
break;
}
- return;
}
@@ -840,7 +839,6 @@ static void epic_restart(struct net_device *dev)
" interrupt %4.4x.\n",
dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
(int)inl(ioaddr + INTSTAT));
- return;
}
static void check_media(struct net_device *dev)
@@ -908,7 +906,7 @@ static void epic_tx_timeout(struct net_device *dev)
outl(TxQueued, dev->base_addr + COMMAND);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
ep->stats.tx_errors++;
if (!ep->tx_full)
netif_wake_queue(dev);
@@ -958,7 +956,6 @@ static void epic_init_ring(struct net_device *dev)
(i+1)*sizeof(struct epic_tx_desc);
}
ep->tx_ring[i-1].next = ep->tx_ring_dma;
- return;
}
static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1006,7 +1003,6 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Trigger an immediate transmit demand. */
outl(TxQueued, dev->base_addr + COMMAND);
- dev->trans_start = jiffies;
if (debug > 4)
printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
"flag %2.2x Tx status %8.8x.\n",
@@ -1399,12 +1395,12 @@ static void set_rx_mode(struct net_device *dev)
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit_nr =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 3] |= (1 << bit_nr);
}
}
@@ -1414,7 +1410,6 @@ static void set_rx_mode(struct net_device *dev)
outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
}
- return;
}
static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index b34a2ddeef4c..dda2c7944da9 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -288,7 +288,7 @@ static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return eql_s_master_cfg(dev, ifr->ifr_data);
default:
return -EOPNOTSUPP;
- };
+ }
}
/* queue->lock must be held */
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 5569f2ffb62c..0ba5e7b90584 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -319,8 +319,6 @@ static void es_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + ES_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index d4e24f08b3ba..874973f558e9 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1027,7 +1027,7 @@ static void eth16i_timeout(struct net_device *dev)
inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
"IRQ conflict" : "network cable problem");
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Let's dump all registers */
if(eth16i_debug > 0) {
@@ -1047,7 +1047,7 @@ static void eth16i_timeout(struct net_device *dev)
}
dev->stats.tx_errors++;
eth16i_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
netif_wake_queue(dev);
}
@@ -1109,7 +1109,6 @@ static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a8d92503226e..6ed2df14ec84 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -174,6 +174,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @iobase: pointer to I/O memory region
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
+ * @io_region_size: I/O memory region size
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -193,6 +194,7 @@ struct ethoc {
void __iomem *iobase;
void __iomem *membase;
int dma_alloc;
+ resource_size_t io_region_size;
unsigned int num_tx;
unsigned int cur_tx;
@@ -756,7 +758,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 mode = ethoc_read(priv, MODER);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u32 hash[2] = { 0, 0 };
/* set loopback mode if requested */
@@ -784,8 +786,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
hash[0] = 0xffffffff;
hash[1] = 0xffffffff;
} else {
- netdev_for_each_mc_addr(mc, dev) {
- u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
int bit = (crc >> 26) & 0x3f;
hash[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -851,7 +853,6 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
spin_unlock_irq(&priv->lock);
out:
dev_kfree_skb(skb);
@@ -944,6 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dma_alloc = 0;
+ priv->io_region_size = mmio->end - mmio->start + 1;
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
resource_size(mmio));
@@ -1040,7 +1042,6 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->features |= 0;
/* setup NAPI */
- memset(&priv->napi, 0, sizeof(priv->napi));
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
spin_lock_init(&priv->rx_lock);
@@ -1049,20 +1050,34 @@ static int ethoc_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret < 0) {
dev_err(&netdev->dev, "failed to register interface\n");
- goto error;
+ goto error2;
}
goto out;
+error2:
+ netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
- if (priv->dma_alloc)
- dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
- netdev->mem_start);
+ if (priv) {
+ if (priv->dma_alloc)
+ dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
+ netdev->mem_start);
+ else if (priv->membase)
+ devm_iounmap(&pdev->dev, priv->membase);
+ if (priv->iobase)
+ devm_iounmap(&pdev->dev, priv->iobase);
+ }
+ if (mem)
+ devm_release_mem_region(&pdev->dev, mem->start,
+ mem->end - mem->start + 1);
+ if (mmio)
+ devm_release_mem_region(&pdev->dev, mmio->start,
+ mmio->end - mmio->start + 1);
free_netdev(netdev);
out:
return ret;
@@ -1080,6 +1095,7 @@ static int ethoc_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
if (netdev) {
+ netif_napi_del(&priv->napi);
phy_disconnect(priv->phy);
priv->phy = NULL;
@@ -1091,6 +1107,14 @@ static int ethoc_remove(struct platform_device *pdev)
if (priv->dma_alloc)
dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
netdev->mem_start);
+ else {
+ devm_iounmap(&pdev->dev, priv->membase);
+ devm_release_mem_region(&pdev->dev, netdev->mem_start,
+ netdev->mem_end - netdev->mem_start + 1);
+ }
+ devm_iounmap(&pdev->dev, priv->iobase);
+ devm_release_mem_region(&pdev->dev, netdev->base_addr,
+ priv->io_region_size);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 91e59f3a9d6d..380d0614a89a 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -757,7 +757,7 @@ static void ewrk3_timeout(struct net_device *dev)
*/
ENABLE_IRQs;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -862,7 +862,6 @@ static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq (&lp->hw_lock);
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
/* Check for free resources: stop Tx queue if there are none */
@@ -1169,7 +1168,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct ewrk3_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i;
char *addrs, bit, byte;
@@ -1213,8 +1212,8 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
@@ -1370,8 +1369,6 @@ static void __init EthwrkSignature(char *name, char *eeprom_image)
name[EWRK3_STRLEN] = '\0';
} else
name[0] = '\0';
-
- return;
}
/*
@@ -1776,8 +1773,7 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case EWRK3_SET_MCA: /* Set a multicast address */
if (capable(CAP_NET_ADMIN)) {
- if (ioc->len > 1024)
- {
+ if (ioc->len > HASH_TABLE_LEN) {
status = -EINVAL;
break;
}
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d11ae5197f01..15f4f8d3d46d 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1233,7 +1233,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev); /* or .._start_.. ?? */
}
@@ -1374,7 +1374,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
++np->really_tx_count;
iowrite32(0, np->mem + TXPDR);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&np->lock, flags);
return NETDEV_TX_OK;
@@ -1791,12 +1790,12 @@ static void __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_AB | CR_W_AM;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
- bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
+ bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
mc_filter[bit >> 5] |= (1 << bit);
}
rx_mode = CR_W_AB | CR_W_AM;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9b4e8f797a7a..ddf7a86cd466 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -40,6 +40,8 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
#include <asm/cacheflush.h>
@@ -61,7 +63,6 @@
* Define the fixed address of the FEC hardware.
*/
#if defined(CONFIG_M5272)
-#define HAVE_mii_link_interrupt
static unsigned char fec_mac_default[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -86,23 +87,6 @@ static unsigned char fec_mac_default[] = {
#endif
#endif /* CONFIG_M5272 */
-/* Forward declarations of some structures to support different PHYs */
-
-typedef struct {
- uint mii_data;
- void (*funct)(uint mii_reg, struct net_device *dev);
-} phy_cmd_t;
-
-typedef struct {
- uint id;
- char *name;
-
- const phy_cmd_t *config;
- const phy_cmd_t *startup;
- const phy_cmd_t *ack_int;
- const phy_cmd_t *shutdown;
-} phy_info_t;
-
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
* to keep them that size.
@@ -189,29 +173,22 @@ struct fec_enet_private {
uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
- /* hold while accessing the mii_list_t() elements */
- spinlock_t mii_lock;
-
- uint phy_id;
- uint phy_id_done;
- uint phy_status;
- uint phy_speed;
- phy_info_t const *phy;
- struct work_struct phy_task;
- uint sequence_done;
- uint mii_phy_task_queued;
+ struct platform_device *pdev;
- uint phy_addr;
+ int opened;
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
+ phy_interface_t phy_interface;
int index;
- int opened;
int link;
- int old_link;
int full_duplex;
};
-static void fec_enet_mii(struct net_device *dev);
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
static void fec_enet_tx(struct net_device *dev);
static void fec_enet_rx(struct net_device *dev);
@@ -219,67 +196,20 @@ static int fec_enet_close(struct net_device *dev);
static void fec_restart(struct net_device *dev, int duplex);
static void fec_stop(struct net_device *dev);
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
+#define FEC_MMFR_TA (2 << 16)
+#define FEC_MMFR_DATA(v) (v & 0xffff)
-/* MII processing. We keep this as simple as possible. Requests are
- * placed on the list (if there is room). When the request is finished
- * by the MII, an optional function may be called.
- */
-typedef struct mii_list {
- uint mii_regval;
- void (*mii_func)(uint val, struct net_device *dev);
- struct mii_list *mii_next;
-} mii_list_t;
-
-#define NMII 20
-static mii_list_t mii_cmds[NMII];
-static mii_list_t *mii_free;
-static mii_list_t *mii_head;
-static mii_list_t *mii_tail;
-
-static int mii_queue(struct net_device *dev, int request,
- void (*func)(uint, struct net_device *));
-
-/* Make MII read/write commands for the FEC */
-#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
-#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
- (VAL & 0xffff))
-#define mk_mii_end 0
+#define FEC_MII_TIMEOUT 10000
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
-/* Register definitions for the PHY */
-
-#define MII_REG_CR 0 /* Control Register */
-#define MII_REG_SR 1 /* Status Register */
-#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
-#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
-#define MII_REG_ANAR 4 /* A-N Advertisement Register */
-#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
-#define MII_REG_ANER 6 /* A-N Expansion Register */
-#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
-#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
-
-/* values for phy_status */
-
-#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
-#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
-#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
-#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
-#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
-#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
-#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
-
-#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
-#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
-#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
-#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
-#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
-#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
-#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
-#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
-
-
static int
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -347,8 +277,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
- dev->trans_start = jiffies;
-
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -406,12 +334,6 @@ fec_enet_interrupt(int irq, void * dev_id)
ret = IRQ_HANDLED;
fec_enet_tx(dev);
}
-
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- fec_enet_mii(dev);
- }
-
} while (int_events);
return ret;
@@ -607,827 +529,313 @@ rx_processing_done:
spin_unlock(&fep->hw_lock);
}
-/* called from interrupt context */
-static void
-fec_enet_mii(struct net_device *dev)
-{
- struct fec_enet_private *fep;
- mii_list_t *mip;
-
- fep = netdev_priv(dev);
- spin_lock(&fep->mii_lock);
-
- if ((mip = mii_head) == NULL) {
- printk("MII and no head!\n");
- goto unlock;
- }
-
- if (mip->mii_func != NULL)
- (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
-
- mii_head = mip->mii_next;
- mip->mii_next = mii_free;
- mii_free = mip;
-
- if ((mip = mii_head) != NULL)
- writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
-
-unlock:
- spin_unlock(&fep->mii_lock);
-}
-
-static int
-mii_queue_unlocked(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
+/* ------------------------------------------------------------------------- */
+#ifdef CONFIG_M5272
+static void __inline__ fec_get_mac(struct net_device *dev)
{
- struct fec_enet_private *fep;
- mii_list_t *mip;
- int retval;
-
- /* Add PHY address to register command */
- fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(dev);
+ unsigned char *iap, tmpaddr[ETH_ALEN];
- regval |= fep->phy_addr << 23;
- retval = 0;
-
- if ((mip = mii_free) != NULL) {
- mii_free = mip->mii_next;
- mip->mii_regval = regval;
- mip->mii_func = func;
- mip->mii_next = NULL;
- if (mii_head) {
- mii_tail->mii_next = mip;
- mii_tail = mip;
- } else {
- mii_head = mii_tail = mip;
- writel(regval, fep->hwp + FEC_MII_DATA);
- }
+ if (FEC_FLASHMAC) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = (unsigned char *)FEC_FLASHMAC;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
} else {
- retval = 1;
+ *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
+ *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ iap = &tmpaddr[0];
}
- return retval;
-}
-
-static int
-mii_queue(struct net_device *dev, int regval,
- void (*func)(uint, struct net_device *))
-{
- struct fec_enet_private *fep;
- unsigned long flags;
- int retval;
- fep = netdev_priv(dev);
- spin_lock_irqsave(&fep->mii_lock, flags);
- retval = mii_queue_unlocked(dev, regval, func);
- spin_unlock_irqrestore(&fep->mii_lock, flags);
- return retval;
-}
-
-static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
-{
- if(!c)
- return;
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
- for (; c->mii_data != mk_mii_end; c++)
- mii_queue(dev, c->mii_data, c->funct);
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
}
+#endif
-static void mii_parse_sr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
-
- if (mii_reg & 0x0004)
- status |= PHY_STAT_LINK;
- if (mii_reg & 0x0010)
- status |= PHY_STAT_FAULT;
- if (mii_reg & 0x0020)
- status |= PHY_STAT_ANC;
- *s = status;
-}
+/* ------------------------------------------------------------------------- */
-static void mii_parse_cr(uint mii_reg, struct net_device *dev)
+/*
+ * Phy section
+ */
+static void fec_enet_adjust_link(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
-
- if (mii_reg & 0x1000)
- status |= PHY_CONF_ANE;
- if (mii_reg & 0x4000)
- status |= PHY_CONF_LOOP;
- *s = status;
-}
+ struct phy_device *phy_dev = fep->phy_dev;
+ unsigned long flags;
-static void mii_parse_anar(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_CONF_SPMASK);
-
- if (mii_reg & 0x0020)
- status |= PHY_CONF_10HDX;
- if (mii_reg & 0x0040)
- status |= PHY_CONF_10FDX;
- if (mii_reg & 0x0080)
- status |= PHY_CONF_100HDX;
- if (mii_reg & 0x00100)
- status |= PHY_CONF_100FDX;
- *s = status;
-}
+ int status_change = 0;
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT970 is used by many boards */
+ spin_lock_irqsave(&fep->hw_lock, flags);
-#define MII_LXT970_MIRROR 16 /* Mirror register */
-#define MII_LXT970_IER 17 /* Interrupt Enable Register */
-#define MII_LXT970_ISR 18 /* Interrupt Status Register */
-#define MII_LXT970_CONFIG 19 /* Configuration Register */
-#define MII_LXT970_CSR 20 /* Chip Status Register */
+ /* Prevent a state halted on mii error */
+ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
+ phy_dev->state = PHY_RESUMING;
+ goto spin_unlock;
+ }
-static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ /* Duplex link change */
+ if (phy_dev->link) {
+ if (fep->full_duplex != phy_dev->duplex) {
+ fec_restart(dev, phy_dev->duplex);
+ status_change = 1;
+ }
+ }
- status = *s & ~(PHY_STAT_SPMASK);
- if (mii_reg & 0x0800) {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x1000)
- status |= PHY_STAT_10FDX;
+ /* Link on or off change */
+ if (phy_dev->link != fep->link) {
+ fep->link = phy_dev->link;
+ if (phy_dev->link)
+ fec_restart(dev, phy_dev->duplex);
else
- status |= PHY_STAT_10HDX;
+ fec_stop(dev);
+ status_change = 1;
}
- *s = status;
-}
-static phy_cmd_t const phy_cmd_lxt970_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
- /* read SR and ISR to acknowledge */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT970_ISR), NULL },
-
- /* find out the current status */
- { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt970 = {
- .id = 0x07810000,
- .name = "LXT970",
- .config = phy_cmd_lxt970_config,
- .startup = phy_cmd_lxt970_startup,
- .ack_int = phy_cmd_lxt970_ack_int,
- .shutdown = phy_cmd_lxt970_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Level one LXT971 is used on some of my custom boards */
-
-/* register definitions for the 971 */
+spin_unlock:
+ spin_unlock_irqrestore(&fep->hw_lock, flags);
-#define MII_LXT971_PCR 16 /* Port Control Register */
-#define MII_LXT971_SR2 17 /* Status Register 2 */
-#define MII_LXT971_IER 18 /* Interrupt Enable Register */
-#define MII_LXT971_ISR 19 /* Interrupt Status Register */
-#define MII_LXT971_LCR 20 /* LED Control Register */
-#define MII_LXT971_TCR 30 /* Transmit Control Register */
+ if (status_change)
+ phy_print_status(phy_dev);
+}
/*
- * I had some nice ideas of running the MDIO faster...
- * The 971 should support 8MHz and I tried it, but things acted really
- * weird, so 2.5 MHz ought to be enough for anyone...
+ * NOTE: a MII transaction is during around 25 us, so polling it...
*/
-
-static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
+ fep->mii_timeout = 0;
- if (mii_reg & 0x0400) {
- fep->link = 1;
- status |= PHY_STAT_LINK;
- } else {
- fep->link = 0;
- }
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x4000) {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_100FDX;
- else
- status |= PHY_STAT_100HDX;
- } else {
- if (mii_reg & 0x0200)
- status |= PHY_STAT_10FDX;
- else
- status |= PHY_STAT_10HDX;
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO read timeout\n");
+ return -ETIMEDOUT;
+ }
}
- if (mii_reg & 0x0008)
- status |= PHY_STAT_FAULT;
- *s = status;
+ /* return value */
+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
}
-static phy_cmd_t const phy_cmd_lxt971_config[] = {
- /* limit to 10MBit because my prototype board
- * doesn't work with 100. */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
- /* Somehow does the 971 tell me that the link is down
- * the first read after power-up.
- * read here to get a valid value in ack_int */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
- /* acknowledge the int before reading status ! */
- { mk_mii_read(MII_LXT971_ISR), NULL },
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_lxt971 = {
- .id = 0x0001378e,
- .name = "LXT971",
- .config = phy_cmd_lxt971_config,
- .startup = phy_cmd_lxt971_startup,
- .ack_int = phy_cmd_lxt971_ack_int,
- .shutdown = phy_cmd_lxt971_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
-
-/* register definitions */
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ int timeout = FEC_MII_TIMEOUT;
-#define MII_QS6612_MCR 17 /* Mode Control Register */
-#define MII_QS6612_FTR 27 /* Factory Test Register */
-#define MII_QS6612_MCO 28 /* Misc. Control Register */
-#define MII_QS6612_ISR 29 /* Interrupt Source Register */
-#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
-#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
+ fep->mii_timeout = 0;
-static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
+ /* clear MII end of transfer bit*/
+ writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
- status = *s & ~(PHY_STAT_SPMASK);
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
+ cpu_relax();
+ if (timeout-- < 0) {
+ fep->mii_timeout = 1;
+ printk(KERN_ERR "FEC: MDIO write timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
- switch((mii_reg >> 2) & 7) {
- case 1: status |= PHY_STAT_10HDX; break;
- case 2: status |= PHY_STAT_100HDX; break;
- case 5: status |= PHY_STAT_10FDX; break;
- case 6: status |= PHY_STAT_100FDX; break;
+ return 0;
}
- *s = status;
+static int fec_enet_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
}
-static phy_cmd_t const phy_cmd_qs6612_config[] = {
- /* The PHY powers up isolated on the RPX,
- * so send a command to allow operation.
- */
- { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
-
- /* parse cr and anar to get some info */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
- /* we need to read ISR, SR and ANER to acknowledge */
- { mk_mii_read(MII_QS6612_ISR), NULL },
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_REG_ANER), NULL },
-
- /* read pcr to get info */
- { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_qs6612 = {
- .id = 0x00181440,
- .name = "QS6612",
- .config = phy_cmd_qs6612_config,
- .startup = phy_cmd_qs6612_startup,
- .ack_int = phy_cmd_qs6612_ack_int,
- .shutdown = phy_cmd_qs6612_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* AMD AM79C874 phy */
-
-/* register definitions for the 874 */
-
-#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
-#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
-#define MII_AM79C874_DR 18 /* Diagnostic Register */
-#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
-#define MII_AM79C874_MCR 21 /* ModeControl Register */
-#define MII_AM79C874_DC 23 /* Disconnect Counter */
-#define MII_AM79C874_REC 24 /* Recieve Error Counter */
-
-static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
- uint status;
-
- status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
-
- if (mii_reg & 0x0080)
- status |= PHY_STAT_ANC;
- if (mii_reg & 0x0400)
- status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
- else
- status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
-
- *s = status;
-}
-
-static phy_cmd_t const phy_cmd_am79c874_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_AM79C874_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_am79c874 = {
- .id = 0x00022561,
- .name = "AM79C874",
- .config = phy_cmd_am79c874_config,
- .startup = phy_cmd_am79c874_startup,
- .ack_int = phy_cmd_am79c874_ack_int,
- .shutdown = phy_cmd_am79c874_shutdown
-};
+ struct phy_device *phy_dev = NULL;
+ int phy_addr;
+ fep->phy_dev = NULL;
-/* ------------------------------------------------------------------------- */
-/* Kendin KS8721BL phy */
-
-/* register definitions for the 8721 */
-
-#define MII_KS8721BL_RXERCR 21
-#define MII_KS8721BL_ICSR 27
-#define MII_KS8721BL_PHYCR 31
-
-static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
- /* find out the current status */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- /* we only need to read ISR to acknowledge */
- { mk_mii_read(MII_KS8721BL_ICSR), NULL },
- { mk_mii_end, }
- };
-static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
- { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
- { mk_mii_end, }
- };
-static phy_info_t const phy_info_ks8721bl = {
- .id = 0x00022161,
- .name = "KS8721BL",
- .config = phy_cmd_ks8721bl_config,
- .startup = phy_cmd_ks8721bl_startup,
- .ack_int = phy_cmd_ks8721bl_ack_int,
- .shutdown = phy_cmd_ks8721bl_shutdown
-};
-
-/* ------------------------------------------------------------------------- */
-/* register definitions for the DP83848 */
-
-#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
-
-static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
-
- *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
-
- /* Link up */
- if (mii_reg & 0x0001) {
- fep->link = 1;
- *s |= PHY_STAT_LINK;
- } else
- fep->link = 0;
- /* Status of link */
- if (mii_reg & 0x0010) /* Autonegotioation complete */
- *s |= PHY_STAT_ANC;
- if (mii_reg & 0x0002) { /* 10MBps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_10FDX;
- else
- *s |= PHY_STAT_10HDX;
- } else { /* 100 Mbps? */
- if (mii_reg & 0x0004) /* Full Duplex? */
- *s |= PHY_STAT_100FDX;
- else
- *s |= PHY_STAT_100HDX;
+ /* find the first phy */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (fep->mii_bus->phy_map[phy_addr]) {
+ phy_dev = fep->mii_bus->phy_map[phy_addr];
+ break;
+ }
}
- if (mii_reg & 0x0008)
- *s |= PHY_STAT_FAULT;
-}
-static phy_info_t phy_info_dp83848= {
- 0x020005c9,
- "DP83848",
-
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup - enable interrupts */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
+ if (!phy_dev) {
+ printk(KERN_ERR "%s: no PHY found\n", dev->name);
+ return -ENODEV;
+ }
-static phy_info_t phy_info_lan8700 = {
- 0x0007C0C,
- "LAN8700",
- (const phy_cmd_t []) { /* config */
- { mk_mii_read(MII_REG_CR), mii_parse_cr },
- { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* startup */
- { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
- { mk_mii_read(MII_REG_SR), mii_parse_sr },
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* act_int */
- { mk_mii_end, }
- },
- (const phy_cmd_t []) { /* shutdown */
- { mk_mii_end, }
- },
-};
-/* ------------------------------------------------------------------------- */
+ /* attach the mac to the phy */
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
+ &fec_enet_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phy_dev);
+ }
-static phy_info_t const * const phy_info[] = {
- &phy_info_lxt970,
- &phy_info_lxt971,
- &phy_info_qs6612,
- &phy_info_am79c874,
- &phy_info_ks8721bl,
- &phy_info_dp83848,
- &phy_info_lan8700,
- NULL
-};
+ /* mask with MAC supported features */
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
-/* ------------------------------------------------------------------------- */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id);
+ fep->phy_dev = phy_dev;
+ fep->link = 0;
+ fep->full_duplex = 0;
-/*
- * This is specific to the MII interrupt setup of the M5272EVB.
- */
-static void __inline__ fec_request_mii_intr(struct net_device *dev)
-{
- if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
- printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
-}
+ printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+ fep->phy_dev->irq);
-static void __inline__ fec_disable_phy_intr(struct net_device *dev)
-{
- free_irq(66, dev);
+ return 0;
}
-#endif
-#ifdef CONFIG_M5272
-static void __inline__ fec_get_mac(struct net_device *dev)
+static int fec_enet_mii_init(struct platform_device *pdev)
{
+ struct net_device *dev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(dev);
- unsigned char *iap, tmpaddr[ETH_ALEN];
+ int err = -ENXIO, i;
- if (FEC_FLASHMAC) {
- /*
- * Get MAC address from FLASH.
- * If it is all 1's or 0's, use the default.
- */
- iap = (unsigned char *)FEC_FLASHMAC;
- if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
- (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
- iap = fec_mac_default;
- if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
- (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
- iap = fec_mac_default;
- } else {
- *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
- *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
- iap = &tmpaddr[0];
- }
+ fep->mii_timeout = 0;
- memcpy(dev->dev_addr, iap, ETH_ALEN);
-
- /* Adjust MAC if using default MAC address */
- if (iap == fec_mac_default)
- dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
-}
-#endif
-
-/* ------------------------------------------------------------------------- */
-
-static void mii_display_status(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- volatile uint *s = &(fep->phy_status);
+ /*
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ */
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- if (!fep->link && !fep->old_link) {
- /* Link is still down - don't print anything */
- return;
+ fep->mii_bus = mdiobus_alloc();
+ if (fep->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
}
- printk("%s: status: ", dev->name);
-
- if (!fep->link) {
- printk("link down");
- } else {
- printk("link up");
-
- switch(*s & PHY_STAT_SPMASK) {
- case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
- case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
- case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
- case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
- default:
- printk(", Unknown speed/duplex");
- }
-
- if (*s & PHY_STAT_ANC)
- printk(", auto-negotiation complete");
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->reset = fec_enet_mdio_reset;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ fep->mii_bus->priv = fep;
+ fep->mii_bus->parent = &pdev->dev;
+
+ fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!fep->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
}
- if (*s & PHY_STAT_FAULT)
- printk(", remote fault");
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ fep->mii_bus->irq[i] = PHY_POLL;
- printk(".\n");
-}
-
-static void mii_display_config(struct work_struct *work)
-{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- uint status = fep->phy_status;
+ platform_set_drvdata(dev, fep->mii_bus);
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- printk("%s: config: auto-negotiation ", dev->name);
-
- if (status & PHY_CONF_ANE)
- printk("on");
- else
- printk("off");
+ if (mdiobus_register(fep->mii_bus))
+ goto err_out_free_mdio_irq;
- if (status & PHY_CONF_100FDX)
- printk(", 100FDX");
- if (status & PHY_CONF_100HDX)
- printk(", 100HDX");
- if (status & PHY_CONF_10FDX)
- printk(", 10FDX");
- if (status & PHY_CONF_10HDX)
- printk(", 10HDX");
- if (!(status & PHY_CONF_SPMASK))
- printk(", No speed/duplex selected?");
-
- if (status & PHY_CONF_LOOP)
- printk(", loopback enabled");
-
- printk(".\n");
+ return 0;
- fep->sequence_done = 1;
+err_out_free_mdio_irq:
+ kfree(fep->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(fep->mii_bus);
+err_out:
+ return err;
}
-static void mii_relink(struct work_struct *work)
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
- struct net_device *dev = fep->netdev;
- int duplex;
-
- /*
- ** When we get here, phy_task is already removed from
- ** the workqueue. It is thus safe to allow to reuse it.
- */
- fep->mii_phy_task_queued = 0;
- fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
- mii_display_status(dev);
- fep->old_link = fep->link;
-
- if (fep->link) {
- duplex = 0;
- if (fep->phy_status
- & (PHY_STAT_100FDX | PHY_STAT_10FDX))
- duplex = 1;
- fec_restart(dev, duplex);
- } else
- fec_stop(dev);
+ if (fep->phy_dev)
+ phy_disconnect(fep->phy_dev);
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
}
-/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
-static void mii_queue_relink(uint mii_reg, struct net_device *dev)
+static int fec_enet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- /*
- * We cannot queue phy_task twice in the workqueue. It
- * would cause an endless loop in the workqueue.
- * Fortunately, if the last mii_relink entry has not yet been
- * executed now, it will do the job for the current interrupt,
- * which is just what we want.
- */
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_relink);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_gset(phydev, cmd);
}
-/* mii_queue_config is called in interrupt context from fec_enet_mii */
-static void mii_queue_config(uint mii_reg, struct net_device *dev)
+static int fec_enet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- if (fep->mii_phy_task_queued)
- return;
+ if (!phydev)
+ return -ENODEV;
- fep->mii_phy_task_queued = 1;
- INIT_WORK(&fep->phy_task, mii_display_config);
- schedule_work(&fep->phy_task);
+ return phy_ethtool_sset(phydev, cmd);
}
-phy_cmd_t const phy_cmd_relink[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_relink },
- { mk_mii_end, }
- };
-phy_cmd_t const phy_cmd_config[] = {
- { mk_mii_read(MII_REG_CR), mii_queue_config },
- { mk_mii_end, }
- };
-
-/* Read remainder of PHY ID. */
-static void
-mii_discover_phy3(uint mii_reg, struct net_device *dev)
+static void fec_enet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- struct fec_enet_private *fep;
- int i;
-
- fep = netdev_priv(dev);
- fep->phy_id |= (mii_reg & 0xffff);
- printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
-
- for(i = 0; phy_info[i]; i++) {
- if(phy_info[i]->id == (fep->phy_id >> 4))
- break;
- }
-
- if (phy_info[i])
- printk(" -- %s\n", phy_info[i]->name);
- else
- printk(" -- unknown PHY!\n");
+ struct fec_enet_private *fep = netdev_priv(dev);
- fep->phy = phy_info[i];
- fep->phy_id_done = 1;
+ strcpy(info->driver, fep->pdev->dev.driver->name);
+ strcpy(info->version, "Revision: 1.0");
+ strcpy(info->bus_info, dev_name(&dev->dev));
}
-/* Scan all of the MII PHY addresses looking for someone to respond
- * with a valid ID. This usually happens quickly.
- */
-static void
-mii_discover_phy(uint mii_reg, struct net_device *dev)
-{
- struct fec_enet_private *fep;
- uint phytype;
-
- fep = netdev_priv(dev);
-
- if (fep->phy_addr < 32) {
- if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
-
- /* Got first part of ID, now get remainder */
- fep->phy_id = phytype << 16;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2),
- mii_discover_phy3);
- } else {
- fep->phy_addr++;
- mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1),
- mii_discover_phy);
- }
- } else {
- printk("FEC: No PHY device found.\n");
- /* Disable external MII interface */
- writel(0, fep->hwp + FEC_MII_SPEED);
- fep->phy_speed = 0;
-#ifdef HAVE_mii_link_interrupt
- fec_disable_phy_intr(dev);
-#endif
- }
-}
+static struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
-/* This interrupt occurs when the PHY detects a link change */
-#ifdef HAVE_mii_link_interrupt
-static irqreturn_t
-mii_link_interrupt(int irq, void * dev_id)
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct net_device *dev = dev_id;
struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
+ if (!netif_running(dev))
+ return -EINVAL;
- return IRQ_HANDLED;
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-#endif
static void fec_enet_free_buffers(struct net_device *dev)
{
@@ -1509,35 +917,13 @@ fec_enet_open(struct net_device *dev)
if (ret)
return ret;
- fep->sequence_done = 0;
- fep->link = 0;
-
- fec_restart(dev, 1);
-
- if (fep->phy) {
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, fep->phy->config);
- mii_do_cmd(dev, phy_cmd_config); /* display configuration */
-
- /* Poll until the PHY tells us its configuration
- * (not link state).
- * Request is initiated by mii_do_cmd above, but answer
- * comes by interrupt.
- * This should take about 25 usec per register at 2.5 MHz,
- * and we read approximately 5 registers.
- */
- while(!fep->sequence_done)
- schedule();
-
- mii_do_cmd(dev, fep->phy->startup);
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(dev);
+ if (ret) {
+ fec_enet_free_buffers(dev);
+ return ret;
}
-
- /* Set the initial link state to true. A lot of hardware
- * based on this device does not implement a PHY interrupt,
- * so we are never notified of link change.
- */
- fep->link = 1;
-
+ phy_start(fep->phy_dev);
netif_start_queue(dev);
fep->opened = 1;
return 0;
@@ -1553,6 +939,9 @@ fec_enet_close(struct net_device *dev)
netif_stop_queue(dev);
fec_stop(dev);
+ if (fep->phy_dev)
+ phy_disconnect(fep->phy_dev);
+
fec_enet_free_buffers(dev);
return 0;
@@ -1574,7 +963,7 @@ fec_enet_close(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
@@ -1604,16 +993,16 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now */
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* calculate crc32 value of mac address */
crc = 0xffffffff;
- for (i = 0; i < dmi->dmi_addrlen; i++) {
- data = dmi->dmi_addr[i];
+ for (i = 0; i < dev->addr_len; i++) {
+ data = ha->addr[i];
for (bit = 0; bit < 8; bit++, data >>= 1) {
crc = (crc >> 1) ^
(((crc ^ data) & 1) ? CRC32_POLY : 0);
@@ -1666,6 +1055,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_do_ioctl = fec_enet_ioctl,
};
/*
@@ -1689,7 +1079,6 @@ static int fec_enet_init(struct net_device *dev, int index)
}
spin_lock_init(&fep->hw_lock);
- spin_lock_init(&fep->mii_lock);
fep->index = index;
fep->hwp = (void __iomem *)dev->base_addr;
@@ -1716,20 +1105,10 @@ static int fec_enet_init(struct net_device *dev, int index)
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
-#ifdef HAVE_mii_link_interrupt
- fec_request_mii_intr(dev);
-#endif
/* The FEC Ethernet specific entries in the device structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &fec_netdev_ops;
-
- for (i=0; i<NMII-1; i++)
- mii_cmds[i].mii_next = &mii_cmds[i+1];
- mii_free = mii_cmds;
-
- /* Set MII speed to 2.5 MHz */
- fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
- / 2500000) / 2) & 0x3F) << 1;
+ dev->ethtool_ops = &fec_enet_ethtool_ops;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
@@ -1760,13 +1139,6 @@ static int fec_enet_init(struct net_device *dev, int index)
fec_restart(dev, 0);
- /* Queue up command to detect the PHY and initialize the
- * remainder of the interface.
- */
- fep->phy_id_done = 0;
- fep->phy_addr = 0;
- mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
-
return 0;
}
@@ -1830,13 +1202,27 @@ fec_restart(struct net_device *dev, int duplex)
/* Set MII speed */
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+#ifdef FEC_MIIGSK_ENR
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
+ writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
+ }
+#endif
+
/* And last, enable the transmit and receive processing */
writel(2, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
/* Enable interrupts we wish to service */
- writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
- fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK);
}
static void
@@ -1859,7 +1245,6 @@ fec_stop(struct net_device *dev)
/* Clear outstanding MII command interrupts. */
writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
}
@@ -1867,6 +1252,7 @@ static int __devinit
fec_probe(struct platform_device *pdev)
{
struct fec_enet_private *fep;
+ struct fec_platform_data *pdata;
struct net_device *ndev;
int i, irq, ret = 0;
struct resource *r;
@@ -1891,6 +1277,7 @@ fec_probe(struct platform_device *pdev)
memset(fep, 0, sizeof(*fep));
ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+ fep->pdev = pdev;
if (!ndev->base_addr) {
ret = -ENOMEM;
@@ -1899,6 +1286,10 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ pdata = pdev->dev.platform_data;
+ if (pdata)
+ fep->phy_interface = pdata->phy;
+
/* This device has up to three irqs on some platforms */
for (i = 0; i < 3; i++) {
irq = platform_get_irq(pdev, i);
@@ -1926,6 +1317,10 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_init;
+ ret = fec_enet_mii_init(pdev);
+ if (ret)
+ goto failed_mii_init;
+
ret = register_netdev(ndev);
if (ret)
goto failed_register;
@@ -1933,6 +1328,8 @@ fec_probe(struct platform_device *pdev)
return 0;
failed_register:
+ fec_enet_mii_remove(fep);
+failed_mii_init:
failed_init:
clk_disable(fep->clk);
clk_put(fep->clk);
@@ -1959,6 +1356,7 @@ fec_drv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
fec_stop(ndev);
+ fec_enet_mii_remove(fep);
clk_disable(fep->clk);
clk_put(fep->clk);
iounmap((void __iomem *)ndev->base_addr);
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index cc47f3f057c7..2c48b25668d5 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -43,6 +43,8 @@
#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
+#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
#else
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4a43e56b7394..25e6cc6840b1 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -327,7 +327,6 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_lock_irqsave(&priv->lock, flags);
- dev->trans_start = jiffies;
bd = (struct bcom_fec_bd *)
bcom_prepare_next_buffer(priv->tx_dmatsk);
@@ -436,7 +435,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
DMA_FROM_DEVICE);
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
- rskb->dev = dev;
rskb->protocol = eth_type_trans(rskb, dev);
netif_rx(rskb);
@@ -576,12 +574,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
out_be32(&fec->gaddr2, 0xffffffff);
} else {
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 gaddr1 = 0x00000000;
u32 gaddr2 = 0x00000000;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
else
@@ -873,7 +871,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
priv->ndev = ndev;
/* Reserve FEC control zone */
- rv = of_address_to_resource(op->node, 0, &mem);
+ rv = of_address_to_resource(op->dev.of_node, 0, &mem);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error while parsing device node resource\n" );
@@ -921,7 +919,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
/* Get the IRQ we need one by one */
/* Control */
- ndev->irq = irq_of_parse_and_map(op->node, 0);
+ ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
/* RX */
priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -944,20 +942,20 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->node) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
- prop = of_get_property(op->node, "current-speed", &prop_size);
+ prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
if (prop && (prop_size >= sizeof(u32) * 2)) {
priv->speed = prop[0];
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
/* If there is a phy handle, then get the PHY node */
- priv->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
+ priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(op->node, "fsl,7-wire-mode", NULL)) {
+ if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
@@ -1065,9 +1063,11 @@ static struct of_device_id mpc52xx_fec_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
static struct of_platform_driver mpc52xx_fec_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .match_table = mpc52xx_fec_match,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_match,
+ },
.probe = mpc52xx_fec_probe,
.remove = mpc52xx_fec_remove,
#ifdef CONFIG_PM
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 7658a082e390..006f64d9f96a 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -66,7 +66,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
const struct of_device_id *match)
{
struct device *dev = &of->dev;
- struct device_node *np = of->node;
+ struct device_node *np = of->dev.of_node;
struct mii_bus *bus;
struct mpc52xx_fec_mdio_priv *priv;
struct resource res = {};
@@ -107,7 +107,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
/* set MII speed */
out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->node) >> 20) / 5) << 1);
+ ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
@@ -159,10 +159,13 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
struct of_platform_driver mpc52xx_fec_mdio_driver = {
- .name = "mpc5200b-fec-phy",
+ .driver = {
+ .name = "mpc5200b-fec-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_mdio_match,
+ },
.probe = mpc52xx_fec_mdio_probe,
.remove = mpc52xx_fec_mdio_remove,
- .match_table = mpc52xx_fec_mdio_match,
};
/* let fec driver call it, since this has to be registered before it */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5c98f7c22425..268ea4d566d7 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
static void nv_napi_enable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_enable(&np->napi);
-#endif
}
static void nv_napi_disable(struct net_device *dev)
{
-#ifdef CONFIG_FORCEDETH_NAPI
struct fe_priv *np = get_nvpriv(dev);
napi_disable(&np->napi);
-#endif
}
#define MII_READ (-1)
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
}
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
-#ifdef CONFIG_FORCEDETH_NAPI
static void nv_do_rx_refill(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
}
-#else
-static void nv_do_rx_refill(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
- int retcode;
-
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- disable_irq(np->pci_dev->irq);
- } else {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
- if (!nv_optimized(np))
- retcode = nv_alloc_rx(dev);
- else
- retcode = nv_alloc_rx_optimized(dev);
- if (retcode) {
- spin_lock_irq(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
- }
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq(np->pci_dev->irq);
- } else {
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- }
-}
-#endif
static void nv_init_rx(struct net_device *dev)
{
@@ -2148,7 +2108,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc* put_tx;
@@ -2254,7 +2214,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2269,7 +2228,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
unsigned int i;
u32 offset = 0;
u32 bcnt;
- u32 size = skb->len-skb->data_len;
+ u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
struct ring_desc_ex* put_tx;
@@ -2409,7 +2368,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
dprintk("\n");
}
- dev->trans_start = jiffies;
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2816,11 +2774,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
dev->name, len, skb->protocol);
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
next_pkt:
@@ -2909,27 +2863,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
dev->name, len, skb->protocol);
if (likely(!np->vlangrp)) {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
} else {
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
-#ifdef CONFIG_FORCEDETH_NAPI
- vlan_hwaccel_receive_skb(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#else
- vlan_hwaccel_rx(skb, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK);
-#endif
+ vlan_gro_receive(&np->napi, np->vlangrp,
+ vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
} else {
-#ifdef CONFIG_FORCEDETH_NAPI
- netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
+ napi_gro_receive(&np->napi, skb);
}
}
@@ -3104,12 +3045,14 @@ static void nv_set_multicast(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
} else {
- struct dev_mc_list *walk;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(walk, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned char *addr = ha->addr;
u32 a, b;
- a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
- b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
+
+ a = le32_to_cpu(*(__le32 *) addr);
+ b = le16_to_cpu(*(__le16 *) (&addr[4]));
alwaysOn[0] &= a;
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
@@ -3494,10 +3437,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
@@ -3514,7 +3453,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3523,65 +3461,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
return IRQ_HANDLED;
@@ -3597,10 +3476,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
-#ifndef CONFIG_FORCEDETH_NAPI
- int total_work = 0;
- int loop_count = 0;
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
@@ -3617,7 +3492,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
nv_msi_workaround(np);
-#ifdef CONFIG_FORCEDETH_NAPI
if (napi_schedule_prep(&np->napi)) {
/*
* Disable further irq's (msix not enabled with napi)
@@ -3625,66 +3499,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
-#else
- do
- {
- int work = 0;
- if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
- if (unlikely(nv_alloc_rx_optimized(dev))) {
- spin_lock(&np->lock);
- if (!np->in_shutdown)
- mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
- }
- }
-
- spin_lock(&np->lock);
- work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
-
- if (!work)
- break;
-
- total_work += work;
-
- loop_count++;
- }
- while (loop_count < max_interrupt_work);
-
- if (nv_change_interrupt_mode(dev, total_work)) {
- /* setup new irq mask */
- writel(np->irqmask, base + NvRegIrqMask);
- }
-
- if (unlikely(np->events & NVREG_IRQ_LINK)) {
- spin_lock(&np->lock);
- nv_link_irq(dev);
- spin_unlock(&np->lock);
- }
- if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
- spin_lock(&np->lock);
- nv_linkchange(dev);
- spin_unlock(&np->lock);
- np->link_timeout = jiffies + LINK_TIMEOUT;
- }
- if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
- spin_lock(&np->lock);
- /* disable interrupts on the nic */
- if (!(np->msi_flags & NV_MSI_X_ENABLED))
- writel(0, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- pci_push(base);
-
- if (!np->in_shutdown) {
- np->nic_poll_irq = np->irqmask;
- np->recover_error = 1;
- mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
- }
- spin_unlock(&np->lock);
- }
-
-#endif
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
return IRQ_HANDLED;
@@ -3733,7 +3547,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
return IRQ_RETVAL(i);
}
-#ifdef CONFIG_FORCEDETH_NAPI
static int nv_napi_poll(struct napi_struct *napi, int budget)
{
struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3741,23 +3554,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
int retcode;
- int tx_work, rx_work;
+ int rx_count, tx_work=0, rx_work=0;
- if (!nv_optimized(np)) {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ do {
+ if (!nv_optimized(np)) {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process(dev, budget);
- retcode = nv_alloc_rx(dev);
- } else {
- spin_lock_irqsave(&np->lock, flags);
- tx_work = nv_tx_done_optimized(dev, np->tx_ring_size);
- spin_unlock_irqrestore(&np->lock, flags);
+ rx_count = nv_rx_process(dev, budget - rx_work);
+ retcode = nv_alloc_rx(dev);
+ } else {
+ spin_lock_irqsave(&np->lock, flags);
+ tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
- rx_work = nv_rx_process_optimized(dev, budget);
- retcode = nv_alloc_rx_optimized(dev);
- }
+ rx_count = nv_rx_process_optimized(dev,
+ budget - rx_work);
+ retcode = nv_alloc_rx_optimized(dev);
+ }
+ } while (retcode == 0 &&
+ rx_count > 0 && (rx_work += rx_count) < budget);
if (retcode) {
spin_lock_irqsave(&np->lock, flags);
@@ -3800,7 +3617,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
}
return rx_work;
}
-#endif
static irqreturn_t nv_nic_irq_rx(int foo, void *data)
{
@@ -5706,6 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
dev->features |= NETIF_F_TSO;
+ dev->features |= NETIF_F_GRO;
}
np->vlanctl_bits = 0;
@@ -5758,9 +5575,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
else
dev->netdev_ops = &nv_netdev_ops_optimized;
-#ifdef CONFIG_FORCEDETH_NAPI
netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
-#endif
SET_ETHTOOL_OPS(dev, &ops);
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -5863,7 +5678,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
/* msix has had reported issues when modifying irqmask
as in the case of napi, therefore, disable for now
*/
-#ifndef CONFIG_FORCEDETH_NAPI
+#if 0
np->msi_flags |= NV_MSI_X_CAPABLE;
#endif
}
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 0770e2f6da6b..309a0eaddd81 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -674,8 +674,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data, skb->len, DMA_TO_DEVICE));
CBDW_DATLEN(bdp, skb->len);
- dev->trans_start = jiffies;
-
/*
* If this was the last BD in the ring, start at the beginning again.
*/
@@ -1015,7 +1013,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
return -ENOMEM;
if (!IS_FEC(match)) {
- data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4)
goto out_free_fpi;
@@ -1027,8 +1025,8 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
fpi->rx_copybreak = 240;
fpi->use_napi = 1;
fpi->napi_weight = 17;
- fpi->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
- if ((!fpi->phy_node) && (!of_get_property(ofdev->node, "fixed-link",
+ fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+ if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
NULL)))
goto out_free_fpi;
@@ -1061,7 +1059,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- mac_addr = of_get_mac_address(ofdev->node);
+ mac_addr = of_get_mac_address(ofdev->dev.of_node);
if (mac_addr)
memcpy(ndev->dev_addr, mac_addr, 6);
@@ -1158,8 +1156,11 @@ static struct of_device_id fs_enet_match[] = {
MODULE_DEVICE_TABLE(of, fs_enet_match);
static struct of_platform_driver fs_enet_driver = {
- .name = "fs_enet",
- .match_table = fs_enet_match,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "fs_enet",
+ .of_match_table = fs_enet_match,
+ },
.probe = fs_enet_probe,
.remove = fs_enet_remove,
};
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a973e71876b..5d45084b287d 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -88,19 +88,19 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct fs_platform_info *fpi = fep->fpi;
int ret = -EINVAL;
- fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
goto out;
- fep->fcc.fccp = of_iomap(ofdev->node, 0);
+ fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->fcc.fccp)
goto out;
- fep->fcc.ep = of_iomap(ofdev->node, 1);
+ fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
if (!fep->fcc.ep)
goto out_fccp;
- fep->fcc.fcccp = of_iomap(ofdev->node, 2);
+ fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
if (!fep->fcc.fcccp)
goto out_ep;
@@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ec81f50d5919..7ca1642276d0 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -98,11 +98,11 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct of_device *ofdev = to_of_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
- fep->fec.fecp = of_iomap(ofdev->node, 0);
+ fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->fcc.fccp)
return -EINVAL;
@@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 34d3da751eb4..a3c44544846d 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -98,15 +98,15 @@ static int do_pd_setup(struct fs_enet_private *fep)
{
struct of_device *ofdev = to_of_device(fep->dev);
- fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
+ fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
if (fep->interrupt == NO_IRQ)
return -EINVAL;
- fep->scc.sccp = of_iomap(ofdev->node, 0);
+ fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
if (!fep->scc.sccp)
return -EINVAL;
- fep->scc.ep = of_iomap(ofdev->node, 1);
+ fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
if (!fep->scc.ep) {
iounmap(fep->scc.sccp);
return -EINVAL;
@@ -223,12 +223,12 @@ static void set_multicast_finish(struct net_device *dev)
static void set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *pmc;
+ struct netdev_hw_addr *ha;
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- netdev_for_each_mc_addr(pmc, dev)
- set_multicast_one(dev, pmc->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
set_multicast_finish(dev);
} else
set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 24ff9f43a62b..0f90685d3d19 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -224,8 +224,11 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
static struct of_platform_driver fs_enet_bb_mdio_driver = {
- .name = "fsl-bb-mdio",
- .match_table = fs_enet_mdio_bb_match,
+ .driver = {
+ .name = "fsl-bb-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fs_enet_mdio_bb_match,
+ },
.probe = fs_enet_mdio_probe,
.remove = fs_enet_mdio_remove,
};
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 5944b65082cb..bddffd169b93 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -124,7 +124,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
new_bus->write = &fs_enet_fec_mii_write;
new_bus->reset = &fs_enet_fec_mii_reset;
- ret = of_address_to_resource(ofdev->node, 0, &res);
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
goto out_res;
@@ -135,7 +135,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
goto out_fec;
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->node);
+ clock = get_bus_freq(ofdev->dev.of_node);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
@@ -172,7 +172,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
- ret = of_mdiobus_register(new_bus, ofdev->node);
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
if (ret)
goto out_free_irqs;
@@ -222,8 +222,11 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
static struct of_platform_driver fs_enet_fec_mdio_driver = {
- .name = "fsl-fec-mdio",
- .match_table = fs_enet_mdio_fec_match,
+ .driver = {
+ .name = "fsl-fec-mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fs_enet_mdio_fec_match,
+ },
.probe = fs_enet_mdio_probe,
.remove = fs_enet_mdio_remove,
};
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3acac5f930c8..b4c41d72c423 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -267,7 +267,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
static int fsl_pq_mdio_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct device_node *tbi;
struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
@@ -277,15 +277,17 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
int tbiaddr = -1;
const u32 *addrp;
u64 addr = 0, size = 0;
- int err = 0;
+ int err;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
new_bus = mdiobus_alloc();
- if (NULL == new_bus)
+ if (!new_bus) {
+ err = -ENOMEM;
goto err_free_priv;
+ }
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
@@ -469,10 +471,13 @@ static struct of_device_id fsl_pq_mdio_match[] = {
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static struct of_platform_driver fsl_pq_mdio_driver = {
- .name = "fsl-pq_mdio",
+ .driver = {
+ .name = "fsl-pq_mdio",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_pq_mdio_match,
+ },
.probe = fsl_pq_mdio_probe,
.remove = fsl_pq_mdio_remove,
- .match_table = fsl_pq_mdio_match,
};
int __init fsl_pq_mdio_init(void)
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4e97ca182997..1830f3199cb5 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -82,6 +82,7 @@
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
rctrl |= RCTRL_PADDING(priv->padding);
}
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
+ priv->padding = 8;
+ }
+
/* keep vlan related bits if it's enabled */
if (priv->vlgrp) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
- return priv->vlgrp || priv->rx_csum_enable;
+ return priv->vlgrp || priv->rx_csum_enable ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
@@ -599,7 +608,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
int err = 0, i;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct device_node *child = NULL;
const u32 *stash;
const u32 *stash_len;
@@ -637,7 +646,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
return -ENOMEM;
priv = netdev_priv(dev);
- priv->node = ofdev->node;
+ priv->node = ofdev->dev.of_node;
priv->ndev = dev;
dev->num_tx_queues = num_tx_qs;
@@ -738,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -768,6 +778,48 @@ err_grp_init:
return err;
}
+static int gfar_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->hwts_rx_en = 0;
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
@@ -776,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP)
+ return gfar_hwtstamp_ioctl(dev, rq, cmd);
+
if (!priv->phydev)
return -ENODEV;
@@ -884,7 +939,7 @@ static int gfar_probe(struct of_device *ofdev,
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
- priv->node = ofdev->node;
+ priv->node = ofdev->dev.of_node;
SET_NETDEV_DEV(dev, &ofdev->dev);
spin_lock_init(&priv->bflock);
@@ -978,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
else
priv->padding = 0;
- if (dev->features & NETIF_F_IP_CSUM)
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->hard_header_len += GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1288,21 +1344,9 @@ static struct dev_pm_ops gfar_pm_ops = {
#define GFAR_PM_OPS (&gfar_pm_ops)
-static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
-{
- return gfar_suspend(&ofdev->dev);
-}
-
-static int gfar_legacy_resume(struct of_device *ofdev)
-{
- return gfar_resume(&ofdev->dev);
-}
-
#else
#define GFAR_PM_OPS NULL
-#define gfar_legacy_suspend NULL
-#define gfar_legacy_resume NULL
#endif
@@ -1649,6 +1693,7 @@ static void free_skb_resources(struct gfar_private *priv)
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base);
+ skb_queue_purge(&priv->rx_recycle);
}
void gfar_start(struct net_device *dev)
@@ -1682,7 +1727,7 @@ void gfar_start(struct net_device *dev)
gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1922,23 +1967,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct gfar __iomem *regs = NULL;
struct txfcb *fcb = NULL;
- struct txbd8 *txbdp, *txbdp_start, *base;
+ struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
- int i, rq = 0;
+ int i, rq = 0, do_tstamp = 0;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, length;
-
+ unsigned int nr_frags, nr_txbds, length;
+ union skb_shared_tx *shtx;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
txq = netdev_get_tx_queue(dev, rq);
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
+ shtx = skb_tx(skb);
+
+ /* check if time stamp should be generated */
+ if (unlikely(shtx->hardware && priv->hwts_tx_en))
+ do_tstamp = 1;
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- (priv->vlgrp && vlan_tx_tag_present(skb))) &&
+ (priv->vlgrp && vlan_tx_tag_present(skb)) ||
+ unlikely(do_tstamp)) &&
(skb_headroom(skb) < GMAC_FCB_LEN)) {
struct sk_buff *skb_new;
@@ -1955,8 +2006,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
+ /* calculate the required number of TxBDs for this skb */
+ if (unlikely(do_tstamp))
+ nr_txbds = nr_frags + 2;
+ else
+ nr_txbds = nr_frags + 1;
+
/* check if there is space to queue this packet */
- if ((nr_frags+1) > tx_queue->num_txbdfree) {
+ if (nr_txbds > tx_queue->num_txbdfree) {
/* no space, stop the queue */
netif_tx_stop_queue(txq);
dev->stats.tx_fifo_errors++;
@@ -1968,9 +2025,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
+ lstatus = txbdp->lstatus;
+
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
- lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (unlikely(do_tstamp))
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
+ TXBD_INTERRUPT);
+ else
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
@@ -2016,11 +2083,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
gfar_tx_vlan(skb, fcb);
}
- /* setup the TxBD length and buffer pointer for the first BD */
+ /* Setup tx hardware time stamping if requested */
+ if (unlikely(do_tstamp)) {
+ shtx->in_progress = 1;
+ if (fcb == NULL)
+ fcb = gfar_add_fcb(skb);
+ fcb->ptp = 1;
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ /*
+ * If time stamping is requested one additional TxBD must be set up. The
+ * first TxBD points to the FCB and must have a data length of
+ * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+ * the full frame length.
+ */
+ if (unlikely(do_tstamp)) {
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+ txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - GMAC_FCB_LEN);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+ } else {
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ }
/*
* We can work in parallel with gfar_clean_tx_ring(), except
@@ -2060,9 +2148,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
/* reduce TxBD free count */
- tx_queue->num_txbdfree -= (nr_frags + 1);
-
- dev->trans_start = jiffies;
+ tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
@@ -2088,7 +2174,6 @@ static int gfar_close(struct net_device *dev)
disable_napi(priv);
- skb_queue_purge(&priv->rx_recycle);
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
@@ -2251,16 +2336,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
struct net_device *dev = tx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
- struct txbd8 *bdp;
+ struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
struct sk_buff *skb;
int skb_dirtytx;
int tx_ring_size = tx_queue->tx_ring_size;
- int frags = 0;
+ int frags = 0, nr_txbds = 0;
int i;
int howmany = 0;
u32 lstatus;
+ size_t buflen;
+ union skb_shared_tx *shtx;
rx_queue = priv->rx_queue[tx_queue->qindex];
bdp = tx_queue->dirty_tx;
@@ -2270,7 +2357,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
- lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
+
+ /*
+ * When time stamping, one additional TxBD must be freed.
+ * Also, we need to dma_unmap_single() the TxPAL.
+ */
+ shtx = skb_tx(skb);
+ if (unlikely(shtx->in_progress))
+ nr_txbds = frags + 2;
+ else
+ nr_txbds = frags + 1;
+
+ lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
lstatus = lbdp->lstatus;
@@ -2279,10 +2377,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- dma_unmap_single(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ if (unlikely(shtx->in_progress)) {
+ next = next_txbd(bdp, base, tx_ring_size);
+ buflen = next->length + GMAC_FCB_LEN;
+ } else
+ buflen = bdp->length;
+
+ dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ buflen, DMA_TO_DEVICE);
+
+ if (unlikely(shtx->in_progress)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next;
+ }
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2314,7 +2426,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
- tx_queue->num_txbdfree += frags + 1;
+ tx_queue->num_txbdfree += nr_txbds;
spin_unlock_irqrestore(&tx_queue->txlock, flags);
}
@@ -2470,6 +2582,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
skb_pull(skb, amount_pull);
}
+ /* Get receive timestamp from the skb */
+ if (priv->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ u64 *ns = (u64 *) skb->data;
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
+ if (priv->padding)
+ skb_pull(skb, priv->padding);
+
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2506,8 +2629,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
- priv->padding;
+ amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2794,7 +2916,7 @@ static void adjust_link(struct net_device *dev)
* whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
@@ -2867,17 +2989,14 @@ static void gfar_set_multi(struct net_device *dev)
return;
/* Parse the list, and set the appropriate bits */
- netdev_for_each_mc_addr(mc_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx < em_num) {
- gfar_set_mac_for_addr(dev, idx,
- mc_ptr->dmi_addr);
+ gfar_set_mac_for_addr(dev, idx, ha->addr);
idx++;
} else
- gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
+ gfar_set_hash_for_addr(dev, ha->addr);
}
}
-
- return;
}
@@ -2918,8 +3037,6 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
tempval = gfar_read(priv->hash_regs[whichreg]);
tempval |= value;
gfar_write(priv->hash_regs[whichreg], tempval);
-
- return;
}
@@ -3050,14 +3167,14 @@ MODULE_DEVICE_TABLE(of, gfar_match);
/* Structure for a device driver */
static struct of_platform_driver gfar_driver = {
- .name = "fsl-gianfar",
- .match_table = gfar_match,
-
+ .driver = {
+ .name = "fsl-gianfar",
+ .owner = THIS_MODULE,
+ .pm = GFAR_PM_OPS,
+ .of_match_table = gfar_match,
+ },
.probe = gfar_probe,
.remove = gfar_remove,
- .suspend = gfar_legacy_suspend,
- .resume = gfar_legacy_resume,
- .driver.pm = GFAR_PM_OPS,
};
static int __init gfar_init(void)
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 17d25e714236..ac4a92e08c09 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -262,6 +262,7 @@ extern const char gfar_driver_version[];
#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
+#define RCTRL_TS_ENABLE 0x01000000
#define RCTRL_PAL_MASK 0x001f0000
#define RCTRL_VLEX 0x00002000
#define RCTRL_FILREN 0x00001000
@@ -539,7 +540,7 @@ struct txbd8
struct txfcb {
u8 flags;
- u8 reserved;
+ u8 ptp; /* Flag to enable tx timestamping */
u8 l4os; /* Level 4 Header Offset */
u8 l3os; /* Level 3 Header Offset */
u16 phcs; /* Pseudo-header Checksum */
@@ -885,6 +886,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
@@ -1100,6 +1102,10 @@ struct gfar_private {
/* Network Statistics */
struct gfar_extra_stats extra_stats;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
};
extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a90430de918..f37a4c143ddd 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -895,7 +895,6 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
netif_receive_skb(skb);
@@ -990,7 +989,7 @@ static u32 greth_hash_get_index(__u8 *addr)
static void greth_set_hash_filter(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
struct greth_regs *regs = (struct greth_regs *) greth->regs;
u32 mc_filter[2];
@@ -998,8 +997,8 @@ static void greth_set_hash_filter(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = greth_hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = greth_hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -1500,7 +1499,8 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
if (i == 6) {
const unsigned char *addr;
int len;
- addr = of_get_property(ofdev->node, "local-mac-address", &len);
+ addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
+ &len);
if (addr != NULL && len == 6) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 5d6f13879592..61f2b1cfcd46 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -859,7 +859,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
for (i = 10000; i >= 0; i--)
if ((readw(ioaddr + MII_Status) & 1) == 0)
break;
- return;
}
@@ -1225,8 +1224,6 @@ static void hamachi_init_ring(struct net_device *dev)
}
/* Mark the last entry of the ring */
hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
-
- return;
}
@@ -1857,12 +1854,12 @@ static void set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
- writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
+ netdev_for_each_mc_addr(ha, dev) {
+ writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
+ writel(0x20000 | (*(u16 *)&ha->addr[4]),
ioaddr + 0x104 + i*8);
i++;
}
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 0cab992b3d1a..3e25f10cabd6 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -429,7 +429,7 @@ static int ser12_open(struct net_device *dev)
return -EINVAL;
}
if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
- printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n",
+ printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n",
dev->base_addr);
return -EACCES;
}
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index f3a96b843911..9f64c8637208 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1629,7 +1629,6 @@ static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
skb->protocol = ax25_type_trans(skb, scc->dev);
netif_rx(skb);
- return;
}
/* ----> transmit frame <---- */
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 694132e04af6..4e7d1d0a2340 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void)
dev = alloc_netdev(sizeof(struct yam_port), name,
yam_setup);
if (!dev) {
- printk(KERN_ERR "yam: cannot allocate net device %s\n",
- dev->name);
+ pr_err("yam: cannot allocate net device\n");
err = -ENOMEM;
goto error;
}
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index efdbcad63c67..82bffc3cabdf 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -351,7 +351,6 @@ hpp_reset_8390(struct net_device *dev)
printk("%s: hp_reset_8390() did not complete.\n", dev->name);
if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
- return;
}
/* The programmed-I/O version of reading the 4 byte 8390 specific header.
@@ -422,7 +421,6 @@ hpp_io_block_output(struct net_device *dev, int count,
int ioaddr = dev->base_addr - NIC_OFFSET;
outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
- return;
}
static void
@@ -436,8 +434,6 @@ hpp_mem_block_output(struct net_device *dev, int count,
outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
outw(option_reg, ioaddr + HPP_OPTION);
-
- return;
}
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 5c4d78c1ff42..86ececd3c658 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -240,7 +240,6 @@ hp_reset_8390(struct net_device *dev)
printk("%s: hp_reset_8390() did not complete.\n", dev->name);
if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
- return;
}
static void
@@ -360,7 +359,6 @@ hp_block_output(struct net_device *dev, int count,
dev->name, (start_page << 8) + count, addr);
}
outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
- return;
}
/* This function resets the ethercard if something screws up. */
@@ -371,7 +369,6 @@ hp_init_card(struct net_device *dev)
NS8390p_init(dev, 0);
outb_p(irqmap[irq&0x0f] | HP_RUN,
dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
- return;
}
#ifdef MODULE
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 4daad8cd56ea..68e5ac8832ad 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
return -EAGAIN;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
lp->lan_type = hp100_sense_lan(dev);
@@ -1510,7 +1510,7 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
#endif
/* not waited long enough since last tx? */
- if (time_before(jiffies, dev->trans_start + HZ))
+ if (time_before(jiffies, dev_trans_start(dev) + HZ))
goto drop;
if (hp100_check_lan(dev))
@@ -1547,7 +1547,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1585,7 +1584,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
/* Update statistics */
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1663,7 +1661,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
#endif
/* not waited long enough since last failed tx try? */
- if (time_before(jiffies, dev->trans_start + HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
#ifdef HP100_DEBUG
printk("hp100: %s: trans_start timing problem\n",
dev->name);
@@ -1701,7 +1699,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
mdelay(1);
}
}
- dev->trans_start = jiffies;
goto drop;
}
@@ -1745,7 +1742,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
hp100_ints_on();
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2099,15 +2095,15 @@ static void hp100_set_multicast_list(struct net_device *dev)
} else {
int i, idx;
u_char *addrs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
printk("hp100: %s: computing hash filter - mc_count = %i\n",
dev->name, netdev_mc_count(dev));
#endif
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
printk("hp100: %s: multicast = %pM, ",
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 24724b4ad709..07d8e5b634f3 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -71,6 +71,7 @@ static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
static struct zorro_driver hydra_driver = {
.name = "hydra",
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index dd873cc41c2b..b150c102ca5a 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -136,7 +136,8 @@ static inline void emac_report_timeout_error(struct emac_instance *dev,
EMAC_FTR_440EP_PHY_CLK_FIX))
DBG(dev, "%s" NL, error);
else if (net_ratelimit())
- printk(KERN_ERR "%s: %s\n", dev->ofdev->node->full_name, error);
+ printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
+ error);
}
/* EMAC PHY clock workaround:
@@ -389,18 +390,19 @@ static void emac_hash_mc(struct emac_instance *dev)
const int regs = EMAC_XAHT_REGS(dev);
u32 *gaht_base = emac_gaht_base(dev);
u32 gaht_temp[regs];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
- netdev_for_each_mc_addr(dmi, dev->ndev) {
+ netdev_for_each_mc_addr(ha, dev->ndev) {
int slot, reg, mask;
- DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
+ DBG2(dev, "mc %pM" NL, ha->addr);
- slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
+ slot = EMAC_XAHT_CRC_TO_SLOT(dev,
+ ether_crc(ETH_ALEN, ha->addr));
reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
@@ -1177,7 +1179,7 @@ static int emac_open(struct net_device *ndev)
netif_carrier_on(dev->ndev);
/* Required for Pause packet support in EMAC */
- dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
+ dev_mc_add_global(ndev, default_mcast_addr);
emac_configure(dev);
mal_poll_add(dev->mal, &dev->commac);
@@ -1700,7 +1702,6 @@ static int emac_poll_rx(void *param, int budget)
skb_put(skb, len);
push_packet:
- skb->dev = dev->ndev;
skb->protocol = eth_type_trans(skb, dev->ndev);
emac_rx_csum(dev, skb, ctrl);
@@ -2185,7 +2186,7 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
strcpy(info->version, DRV_VERSION);
info->fw_version[0] = '\0';
sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
- dev->cell_index, dev->ofdev->node->full_name);
+ dev->cell_index, dev->ofdev->dev.of_node->full_name);
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
@@ -2379,7 +2380,7 @@ static int __devinit emac_read_uint_prop(struct device_node *np, const char *nam
static int __devinit emac_init_phy(struct emac_instance *dev)
{
- struct device_node *np = dev->ofdev->node;
+ struct device_node *np = dev->ofdev->dev.of_node;
struct net_device *ndev = dev->ndev;
u32 phy_map, adv;
int i;
@@ -2514,7 +2515,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
static int __devinit emac_init_config(struct emac_instance *dev)
{
- struct device_node *np = dev->ofdev->node;
+ struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
unsigned int plen;
const char *pm, *phy_modes[] = {
@@ -2723,7 +2724,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
{
struct net_device *ndev;
struct emac_instance *dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct device_node **blist = NULL;
int err, i;
@@ -2810,7 +2811,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
err = mal_register_commac(dev->mal, &dev->commac);
if (err) {
printk(KERN_ERR "%s: failed to register with mal %s!\n",
- np->full_name, dev->mal_dev->node->full_name);
+ np->full_name, dev->mal_dev->dev.of_node->full_name);
goto err_rel_deps;
}
dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
@@ -2995,9 +2996,11 @@ static struct of_device_id emac_match[] =
MODULE_DEVICE_TABLE(of, emac_match);
static struct of_platform_driver emac_driver = {
- .name = "emac",
- .match_table = emac_match,
-
+ .driver = {
+ .name = "emac",
+ .owner = THIS_MODULE,
+ .of_match_table = emac_match,
+ },
.probe = emac_probe,
.remove = emac_remove,
};
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 775c850a425a..3995fafc1e08 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -33,7 +33,7 @@ static void emac_desc_dump(struct emac_instance *p)
int i;
printk("** EMAC %s TX BDs **\n"
" tx_cnt = %d tx_slot = %d ack_slot = %d\n",
- p->ofdev->node->full_name,
+ p->ofdev->dev.of_node->full_name,
p->tx_cnt, p->tx_slot, p->ack_slot);
for (i = 0; i < NUM_TX_BUFF / 2; ++i)
printk
@@ -49,7 +49,7 @@ static void emac_desc_dump(struct emac_instance *p)
printk("** EMAC %s RX BDs **\n"
" rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
" rx_sg_skb = 0x%p\n",
- p->ofdev->node->full_name,
+ p->ofdev->dev.of_node->full_name,
p->rx_slot, p->commac.flags, p->rx_skb_size,
p->rx_sync_size, p->rx_sg_skb);
for (i = 0; i < NUM_RX_BUFF / 2; ++i)
@@ -77,7 +77,8 @@ static void emac_mac_dump(struct emac_instance *dev)
"MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
"RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
"IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
- dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1),
+ dev->ofdev->dev.of_node->full_name,
+ in_be32(&p->mr0), in_be32(&p->mr1),
in_be32(&p->tmr0), in_be32(&p->tmr1),
in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
@@ -128,7 +129,7 @@ static void emac_mal_dump(struct mal_instance *mal)
"CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
"TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
"RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
- mal->ofdev->node->full_name,
+ mal->ofdev->dev.of_node->full_name,
get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
get_mal_dcrn(mal, MAL_IER),
get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h
index b631842ec8d0..e596c77ccdf7 100644
--- a/drivers/net/ibm_newemac/debug.h
+++ b/drivers/net/ibm_newemac/debug.h
@@ -53,8 +53,8 @@ extern void emac_dbg_dump_all(void);
#endif
-#define EMAC_DBG(dev, name, fmt, arg...) \
- printk(KERN_DEBUG #name "%s: " fmt, dev->ofdev->node->full_name, ## arg)
+#define EMAC_DBG(d, name, fmt, arg...) \
+ printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
#if DBG_LEVEL > 0
# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 5b3d94419fe6..fcff9e0bd382 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -538,11 +538,11 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
mal->index = index;
mal->ofdev = ofdev;
- mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1;
+ mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
MAL_DBG(mal, "probe" NL);
- prop = of_get_property(ofdev->node, "num-tx-chans", NULL);
+ prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
"mal%d: can't find MAL num-tx-chans property!\n",
@@ -552,7 +552,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
mal->num_tx_chans = prop[0];
- prop = of_get_property(ofdev->node, "num-rx-chans", NULL);
+ prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
"mal%d: can't find MAL num-rx-chans property!\n",
@@ -562,14 +562,14 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
mal->num_rx_chans = prop[0];
- dcr_base = dcr_resource_start(ofdev->node, 0);
+ dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
if (dcr_base == 0) {
printk(KERN_ERR
"mal%d: can't find DCR resource!\n", index);
err = -ENODEV;
goto fail;
}
- mal->dcr_host = dcr_map(ofdev->node, dcr_base, 0x100);
+ mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
if (!DCR_MAP_OK(mal->dcr_host)) {
printk(KERN_ERR
"mal%d: failed to map DCRs !\n", index);
@@ -577,28 +577,28 @@ static int __devinit mal_probe(struct of_device *ofdev,
goto fail;
}
- if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
MAL_FTR_COMMON_ERR_INT);
#else
printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
- ofdev->node->full_name);
+ ofdev->dev.of_node->full_name);
err = -ENODEV;
goto fail;
#endif
}
- mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
- mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
- mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2);
+ mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
+ mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
mal->txde_irq = mal->rxde_irq = mal->serr_irq;
} else {
- mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3);
- mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4);
+ mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
+ mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
}
if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
@@ -629,7 +629,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
/* Current Axon is not happy with priority being non-0, it can
* deadlock, fix it up here
*/
- if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon"))
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
/* Apply configuration */
@@ -701,7 +701,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
printk(KERN_INFO
"MAL v%d %s, %d TX channels, %d RX channels\n",
- mal->version, ofdev->node->full_name,
+ mal->version, ofdev->dev.of_node->full_name,
mal->num_tx_chans, mal->num_rx_chans);
/* Advertise this instance to the rest of the world */
@@ -790,9 +790,11 @@ static struct of_device_id mal_platform_match[] =
};
static struct of_platform_driver mal_of_driver = {
- .name = "mcmal",
- .match_table = mal_platform_match,
-
+ .driver = {
+ .name = "mcmal",
+ .owner = THIS_MODULE,
+ .of_match_table = mal_platform_match,
+ },
.probe = mal_probe,
.remove = mal_remove,
};
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 5b90d34c8455..108919bcdf13 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -103,7 +103,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
/* Check if we need to attach to a RGMII */
if (input < 0 || !rgmii_valid_mode(mode)) {
printk(KERN_ERR "%s: unsupported settings !\n",
- ofdev->node->full_name);
+ ofdev->dev.of_node->full_name);
return -ENODEV;
}
@@ -113,7 +113,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
printk(KERN_NOTICE "%s: input %d in %s mode\n",
- ofdev->node->full_name, input, rgmii_mode_name(mode));
+ ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode));
++dev->users;
@@ -231,7 +231,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
static int __devinit rgmii_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
struct resource regs;
int rc;
@@ -264,11 +264,11 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
}
/* Check for RGMII flags */
- if (of_get_property(ofdev->node, "has-mdio", NULL))
+ if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
/* CAB lacks the right properties, fix this up */
- if (of_device_is_compatible(ofdev->node, "ibm,rgmii-axon"))
+ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon"))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n",
@@ -279,7 +279,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
printk(KERN_INFO
"RGMII %s initialized with%s MDIO support\n",
- ofdev->node->full_name,
+ ofdev->dev.of_node->full_name,
(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
wmb();
@@ -319,9 +319,11 @@ static struct of_device_id rgmii_match[] =
};
static struct of_platform_driver rgmii_driver = {
- .name = "emac-rgmii",
- .match_table = rgmii_match,
-
+ .driver = {
+ .name = "emac-rgmii",
+ .owner = THIS_MODULE,
+ .of_match_table = rgmii_match,
+ },
.probe = rgmii_probe,
.remove = rgmii_remove,
};
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 30173a9fb557..044637144c43 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -57,7 +57,8 @@ void tah_reset(struct of_device *ofdev)
--n;
if (unlikely(!n))
- printk(KERN_ERR "%s: reset timeout\n", ofdev->node->full_name);
+ printk(KERN_ERR "%s: reset timeout\n",
+ ofdev->dev.of_node->full_name);
/* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
out_be32(&p->mr,
@@ -89,7 +90,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
static int __devinit tah_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
struct resource regs;
int rc;
@@ -127,7 +128,7 @@ static int __devinit tah_probe(struct of_device *ofdev,
tah_reset(ofdev);
printk(KERN_INFO
- "TAH %s initialized\n", ofdev->node->full_name);
+ "TAH %s initialized\n", ofdev->dev.of_node->full_name);
wmb();
return 0;
@@ -165,9 +166,11 @@ static struct of_device_id tah_match[] =
};
static struct of_platform_driver tah_driver = {
- .name = "emac-tah",
- .match_table = tah_match,
-
+ .driver = {
+ .name = "emac-tah",
+ .owner = THIS_MODULE,
+ .of_match_table = tah_match,
+ },
.probe = tah_probe,
.remove = tah_remove,
};
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 1f038f808ab3..046dcd069c45 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -121,13 +121,14 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
dev->mode = *mode;
printk(KERN_NOTICE "%s: bridge in %s mode\n",
- ofdev->node->full_name, zmii_mode_name(dev->mode));
+ ofdev->dev.of_node->full_name,
+ zmii_mode_name(dev->mode));
} else {
/* All inputs must use the same mode */
if (*mode != PHY_MODE_NA && *mode != dev->mode) {
printk(KERN_ERR
"%s: invalid mode %d specified for input %d\n",
- ofdev->node->full_name, *mode, input);
+ ofdev->dev.of_node->full_name, *mode, input);
mutex_unlock(&dev->lock);
return -EINVAL;
}
@@ -233,7 +234,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
static int __devinit zmii_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
struct resource regs;
int rc;
@@ -273,7 +274,7 @@ static int __devinit zmii_probe(struct of_device *ofdev,
out_be32(&dev->base->fer, 0);
printk(KERN_INFO
- "ZMII %s initialized\n", ofdev->node->full_name);
+ "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
wmb();
dev_set_drvdata(&ofdev->dev, dev);
@@ -312,9 +313,11 @@ static struct of_device_id zmii_match[] =
};
static struct of_platform_driver zmii_driver = {
- .name = "emac-zmii",
- .match_table = zmii_match,
-
+ .driver = {
+ .name = "emac-zmii",
+ .owner = THIS_MODULE,
+ .of_match_table = zmii_match,
+ },
.probe = zmii_probe,
.remove = zmii_remove,
};
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 7d6cf3340c11..294ccfb427cf 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -384,7 +384,7 @@ static void InitBoard(struct net_device *dev)
int camcnt;
camentry_t cams[16];
u32 cammask;
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 rcrval;
/* reset the SONIC */
@@ -419,8 +419,8 @@ static void InitBoard(struct net_device *dev)
/* start putting the multicast addresses into the CAM list. Stop if
it is full. */
- netdev_for_each_mc_addr(mcptr, dev) {
- putcam(cams, &camcnt, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ putcam(cams, &camcnt, ha->addr);
if (camcnt == 16)
break;
}
@@ -478,7 +478,7 @@ static void InitBoard(struct net_device *dev)
/* if still multicast addresses left or ALLMULTI is set, set the multicast
enable bit */
- if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL))
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
rcrval |= RCREG_AMC;
/* promiscous mode ? */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cd508a8ee25b..7acb3edc47ef 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -45,6 +45,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/pm.h>
#include <linux/ethtool.h>
#include <linux/proc_fs.h>
#include <linux/in.h>
@@ -199,7 +200,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return -1;
}
- pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
+ pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
if(!pool->skbuff) {
kfree(pool->dma_addr);
@@ -210,7 +211,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
return -1;
}
- memset(pool->skbuff, 0, sizeof(void*) * pool->size);
memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
for(i = 0; i < pool->size; ++i) {
@@ -957,7 +957,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
} else {
tx_packets++;
tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
}
if (!used_bounce)
@@ -1073,7 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* clear the filter table & disable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
@@ -1084,10 +1084,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- netdev_for_each_mc_addr(mclist, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
+ memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1421,7 +1421,6 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
if (!entry)
ibmveth_error_printk("Cannot create adapter proc entry");
}
- return;
}
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
@@ -1589,6 +1588,12 @@ static struct kobj_type ktype_veth_pool = {
.default_attrs = veth_pool_attrs,
};
+static int ibmveth_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ ibmveth_interrupt(netdev->irq, netdev);
+ return 0;
+}
static struct vio_device_id ibmveth_device_table[] __devinitdata= {
{ "network", "IBM,l-lan"},
@@ -1596,6 +1601,10 @@ static struct vio_device_id ibmveth_device_table[] __devinitdata= {
};
MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
+static struct dev_pm_ops ibmveth_pm_ops = {
+ .resume = ibmveth_resume
+};
+
static struct vio_driver ibmveth_driver = {
.id_table = ibmveth_device_table,
.probe = ibmveth_probe,
@@ -1604,6 +1613,7 @@ static struct vio_driver ibmveth_driver = {
.driver = {
.name = ibmveth_driver_name,
.owner = THIS_MODULE,
+ .pm = &ibmveth_pm_ops,
}
};
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index f4081c0a2d9c..ab9f675c5b8b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -182,7 +182,6 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
skb_queue_tail(&dp->rq, skb);
if (!dp->tasklet_pending) {
dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 4a32bed77c71..86438b59fa21 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -104,6 +104,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82580_COPPER_DUAL:
mac->type = e1000_82580;
break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
default:
return -E1000_ERR_MAC_INIT;
break;
@@ -153,8 +159,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ if (mac->type == e1000_i350)
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
/* reset */
- if (mac->type == e1000_82580)
+ if (mac->type >= e1000_82580)
mac->ops.reset_hw = igb_reset_hw_82580;
else
mac->ops.reset_hw = igb_reset_hw_82575;
@@ -225,7 +233,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if (hw->mac.type == e1000_82580) {
+ } else if (hw->mac.type >= e1000_82580) {
phy->ops.reset = igb_phy_hw_reset;
phy->ops.read_reg = igb_read_phy_reg_82580;
phy->ops.write_reg = igb_write_phy_reg_82580;
@@ -261,6 +269,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
phy->type = e1000_phy_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
phy->ops.get_cable_length = igb_get_cable_length_82580;
@@ -1205,8 +1214,6 @@ void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
/* If the management interface is not enabled, then power down */
if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
igb_power_down_phy_copper(hw);
-
- return;
}
/**
@@ -1445,7 +1452,6 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
**/
static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1453,15 +1459,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_read_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1480,7 +1477,6 @@ out:
**/
static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
{
- u32 mdicnfg = 0;
s32 ret_val;
@@ -1488,15 +1484,6 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
if (ret_val)
goto out;
- /*
- * We config the phy address in MDICNFG register now. Same bits
- * as before. The values in MDIC can be written but will be
- * ignored. This allows us to call the old function after
- * configuring the PHY address in the new register
- */
- mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
- wr32(E1000_MDICNFG, mdicnfg);
-
ret_val = igb_write_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index fbe1c99c193c..cbd1e1259e4d 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,9 +38,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_OFF1_ON2))
-#define E1000_RAR_ENTRIES_82575 16
-#define E1000_RAR_ENTRIES_82576 24
-#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
#define E1000_SW_SYNCH_MB 0x00000100
#define E1000_STAT_DEV_RST_SET 0x00100000
@@ -52,6 +53,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
#define E1000_SRRCTL_DROP_EN 0x80000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
@@ -108,6 +110,7 @@ union e1000_adv_rx_desc {
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index fe6cf1b696c7..24d9be64342f 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -610,11 +610,7 @@
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
-#define PCIE_LINK_STATUS 0x12
#define PCIE_DEVICE_CONTROL2 0x28
-
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
#define PCIE_DEVICE_CONTROL2_16ms 0x0005
#define PHY_REVISION_MASK 0xFFFFFFF0
@@ -629,6 +625,7 @@
#define M88E1111_I_PHY_ID 0x01410CC0
#define IGP03E1000_E_PHY_ID 0x02A80390
#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 82a533f5192a..cb8db78b1a05 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/netdevice.h>
#include "e1000_regs.h"
#include "e1000_defines.h"
@@ -53,6 +54,10 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_I350_COPPER 0x1521
+#define E1000_DEV_ID_I350_FIBER 0x1522
+#define E1000_DEV_ID_I350_SERDES 0x1523
+#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_REVISION_2 2
#define E1000_REVISION_4 4
@@ -72,6 +77,7 @@ enum e1000_mac_type {
e1000_82575,
e1000_82576,
e1000_82580,
+ e1000_i350,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
@@ -502,14 +508,11 @@ struct e1000_hw {
u8 revision_id;
};
-#ifdef DEBUG
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
+extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
- printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg)
-#else
-#define hw_dbg(format, arg...)
-#endif
-#endif
+ netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+
/* These functions must be implemented by drivers */
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index be8d010e4021..90c5e01e9235 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -53,17 +53,30 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
u16 pcie_link_status;
bus->type = e1000_bus_type_pci_express;
- bus->speed = e1000_bus_speed_2500;
ret_val = igb_read_pcie_cap_reg(hw,
- PCIE_LINK_STATUS,
- &pcie_link_status);
- if (ret_val)
+ PCI_EXP_LNKSTA,
+ &pcie_link_status);
+ if (ret_val) {
bus->width = e1000_bus_width_unknown;
- else
+ bus->speed = e1000_bus_speed_unknown;
+ } else {
+ switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ bus->speed = e1000_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ bus->speed = e1000_bus_speed_5000;
+ break;
+ default:
+ bus->speed = e1000_bus_speed_unknown;
+ break;
+ }
+
bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >>
- PCIE_LINK_WIDTH_SHIFT);
+ PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT);
+ }
reg = rd32(E1000_STATUS);
bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3b772b822a5d..6e63d9a7fc75 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -107,6 +107,7 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_64 64 /* Used for packet split */
#define IGB_RXBUFFER_128 128 /* Used for packet split */
#define IGB_RXBUFFER_1024 1024
#define IGB_RXBUFFER_2048 2048
@@ -140,8 +141,10 @@ struct igb_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- u16 mapped_as_page;
+ unsigned int bytecount;
u16 gso_segs;
+ union skb_shared_tx shtx;
+ u8 mapped_as_page;
};
/* RX */
struct {
@@ -185,7 +188,7 @@ struct igb_q_vector {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
- struct pci_dev *pdev; /* pci device for dma mapping */
+ struct device *dev; /* device pointer for dma mapping */
dma_addr_t dma; /* phys address of the ring */
void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */
@@ -323,6 +326,7 @@ struct igb_adapter {
#define IGB_82576_TSYNC_SHIFT 19
#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
@@ -336,7 +340,6 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 743038490104..f2ebf927e4bc 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -902,6 +902,49 @@ struct igb_reg_test {
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
/* 82580 reg test */
static struct igb_reg_test reg_test_82580[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1077,6 +1120,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
u32 i, toggle;
switch (adapter->hw.mac.type) {
+ case e1000_i350:
+ test = reg_test_i350;
+ toggle = 0x7FEFF3FF;
+ break;
case e1000_82580:
test = reg_test_82580;
toggle = 0x7FEFF3FF;
@@ -1238,6 +1285,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
case e1000_82580:
ics_mask = 0x77DCFED5;
break;
+ case e1000_i350:
+ ics_mask = 0x77DCFED5;
+ break;
default:
ics_mask = 0x7FFFFFFF;
break;
@@ -1344,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->pdev = adapter->pdev;
+ tx_ring->dev = &adapter->pdev->dev;
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1358,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->pdev = adapter->pdev;
+ rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev;
rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1554,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
buffer_info = &rx_ring->buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c9baa2aa98cd..3881918f5382 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -62,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
};
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -197,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+struct igb_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igb_reg_info igb_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN(0), "RDLEN"},
+ {E1000_RDH(0), "RDH"},
+ {E1000_RDT(0), "RDT"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_RDBAL(0), "RDBAL"},
+ {E1000_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL(0), "TDBAL"},
+ {E1000_TDBAH(0), "TDBAH"},
+ {E1000_TDLEN(0), "TDLEN"},
+ {E1000_TDH(0), "TDH"},
+ {E1000_TDT(0), "TDT"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * igb_regdump - register printout routine
+ */
+static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDLEN(n));
+ break;
+ case E1000_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDH(n));
+ break;
+ case E1000_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDT(n));
+ break;
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RXDCTL(n));
+ break;
+ case E1000_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAH(n));
+ break;
+ case E1000_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_RDBAL(n));
+ break;
+ case E1000_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDBAH(n));
+ break;
+ case E1000_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDLEN(n));
+ break;
+ case E1000_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDH(n));
+ break;
+ case E1000_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TDT(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(E1000_TXDCTL(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 4; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * igb_dump - Print registers, tx-rings and rx-rings
+ */
+static void igb_dump(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_reg_info *reginfo;
+ int n = 0;
+ struct igb_ring *tx_ring;
+ union e1000_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct igb_buffer *buffer_info;
+ struct igb_ring *rx_ring;
+ union e1000_adv_rx_desc *rx_desc;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igb_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOCIStDDM Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO " %5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ rx_ring->rx_buffer_len, true);
+ if (rx_ring->rx_buffer_len
+ < IGB_RXBUFFER_1024)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(
+ buffer_info->page_dma +
+ buffer_info->page_offset),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
+
/**
* igb_read_clock - read raw cycle counter (to be used by time counter)
*/
@@ -223,41 +557,15 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
return stamp;
}
-#ifdef DEBUG
/**
- * igb_get_hw_dev_name - return device name string
+ * igb_get_hw_dev - return device
* used by hardware layer to print debugging information
**/
-char *igb_get_hw_dev_name(struct e1000_hw *hw)
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
{
struct igb_adapter *adapter = hw->back;
- return adapter->netdev->name;
-}
-
-/**
- * igb_get_time_str - format current NIC and system time as string
- */
-static char *igb_get_time_str(struct igb_adapter *adapter,
- char buffer[160])
-{
- cycle_t hw = adapter->cycles.read(&adapter->cycles);
- struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
- struct timespec sys;
- struct timespec delta;
- getnstimeofday(&sys);
-
- delta = timespec_sub(nic, sys);
-
- sprintf(buffer,
- "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
- hw,
- (long)nic.tv_sec, nic.tv_nsec,
- (long)sys.tv_sec, sys.tv_nsec,
- (long)delta.tv_sec, delta.tv_nsec);
-
- return buffer;
+ return adapter->netdev;
}
-#endif
/**
* igb_init_module - Driver Registration Routine
@@ -328,6 +636,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
case e1000_82575:
case e1000_82580:
+ case e1000_i350:
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -371,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
@@ -385,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
- ring->pdev = adapter->pdev;
+ ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -471,6 +780,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
q_vector->eims_value = 1 << msix_vector;
break;
case e1000_82580:
+ case e1000_i350:
/* 82580 uses the same table-based approach as 82576 but has fewer
entries as a result we carry over for queues greater than 4. */
if (rx_queue > IGB_N0_QUEUE) {
@@ -551,6 +861,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
case e1000_82576:
case e1000_82580:
+ case e1000_i350:
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -743,7 +1054,6 @@ msi_only:
out:
/* Notify the stack of the (possibly) reduced Tx Queue count. */
adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
- return;
}
/**
@@ -1253,6 +1563,7 @@ void igb_reset(struct igb_adapter *adapter)
* To take effect CTRL.RST is required.
*/
switch (mac->type) {
+ case e1000_i350:
case e1000_82580:
pba = rd32(E1000_RXPBS);
pba = igb_rxpbs_adjust_82580(pba);
@@ -1416,15 +1727,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -1656,6 +1967,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
netdev->name,
((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
"unknown"),
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
(hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -1826,6 +2138,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
memset(&adapter->cycles, 0, sizeof(adapter->cycles));
adapter->cycles.read = igb_read_clock;
@@ -2096,7 +2409,7 @@ static int igb_close(struct net_device *netdev)
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2109,9 +2422,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev,
- tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(dev,
+ tx_ring->size,
+ &tx_ring->dma,
+ GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2122,7 +2436,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->buffer_info);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
@@ -2246,7 +2560,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
int size, desc_len;
size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2261,8 +2575,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(dev,
+ rx_ring->size,
+ &rx_ring->dma,
+ GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -2275,8 +2591,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- dev_err(&pdev->dev, "Unable to allocate memory for "
- "the receive descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the receive descriptor"
+ " ring\n");
return -ENOMEM;
}
@@ -2339,6 +2655,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
/* 82575 and 82576 supports 2 RSS queues for VMDq */
switch (hw->mac.type) {
+ case e1000_i350:
case e1000_82580:
num_rx_queues = 1;
shift = 0;
@@ -2590,6 +2907,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
+ if (hw->mac.type == e1000_82580)
+ srrctl |= E1000_SRRCTL_TIMESTAMP;
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2649,8 +2968,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
if (!tx_ring->desc)
return;
- pci_free_consistent(tx_ring->pdev, tx_ring->size,
- tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2674,15 +2993,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(tx_ring->pdev,
+ dma_unmap_page(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(tx_ring->pdev,
+ dma_unmap_single(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -2753,8 +3072,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->desc)
return;
- pci_free_consistent(rx_ring->pdev, rx_ring->size,
- rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2790,10 +3109,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(rx_ring->pdev,
+ dma_unmap_single(rx_ring->dev,
buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -2802,10 +3121,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
buffer_info->skb = NULL;
}
if (buffer_info->page_dma) {
- pci_unmap_page(rx_ring->pdev,
+ dma_unmap_page(rx_ring->dev,
buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
}
if (buffer_info->page) {
@@ -2876,7 +3195,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
int i;
@@ -2893,8 +3212,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
/* The shared function expects a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -3397,8 +3716,6 @@ set_itr_now:
q_vector->itr_val = new_itr;
q_vector->set_itr = 1;
}
-
- return;
}
#define IGB_TX_FLAGS_CSUM 0x00000001
@@ -3493,7 +3810,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
struct e1000_adv_tx_context_desc *context_desc;
- struct pci_dev *pdev = tx_ring->pdev;
+ struct device *dev = tx_ring->dev;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
unsigned int i;
@@ -3544,7 +3861,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
break;
default:
if (unlikely(net_ratelimit()))
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"partial checksum but proto=%x!\n",
skb->protocol);
break;
@@ -3578,59 +3895,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
unsigned int first)
{
struct igb_buffer *buffer_info;
- struct pci_dev *pdev = tx_ring->pdev;
- unsigned int len = skb_headlen(skb);
+ struct device *dev = tx_ring->dev;
+ unsigned int hlen = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
+ u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
- BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
- buffer_info->length = len;
+ BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
+ buffer_info->length = hlen;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(dev, skb->data, hlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
+ unsigned int len = frag->size;
count++;
i++;
if (i == tx_ring->count)
i = 0;
- frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
-
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
+ tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
+ /* multiply data chunks by size of headers */
+ tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
+ tx_ring->buffer_info[i].gso_segs = gso_segs;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed buffer_info mapping */
buffer_info->dma = 0;
@@ -3868,6 +4187,8 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ igb_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
}
@@ -3920,6 +4241,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
* i.e. RXBUFFER_2048 --> size-4096 slab
*/
+ if (adapter->hw.mac.type == e1000_82580)
+ max_frame += IGB_TS_HDR_LEN;
+
if (max_frame <= IGB_RXBUFFER_1024)
rx_buffer_len = IGB_RXBUFFER_1024;
else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3927,6 +4251,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
else
rx_buffer_len = IGB_RXBUFFER_128;
+ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
+ rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
+
+ if ((adapter->hw.mac.type == e1000_82580) &&
+ (rx_buffer_len == IGB_RXBUFFER_128))
+ rx_buffer_len += IGB_RXBUFFER_64;
+
if (netif_running(netdev))
igb_down(adapter);
@@ -4955,22 +5287,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
/**
* igb_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: pointer to q_vector containing needed info
- * @skb: packet that was just sent
+ * @buffer: pointer to igb_buffer structure
*
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
*/
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
+static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
{
struct igb_adapter *adapter = q_vector->adapter;
- union skb_shared_tx *shtx = skb_tx(skb);
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!shtx->hardware) ||
+ if (likely(!buffer_info->shtx.hardware) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;
@@ -4978,7 +5309,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
}
/**
@@ -4993,7 +5324,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct net_device *netdev = tx_ring->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info;
- struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0;
@@ -5009,19 +5339,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
- skb = buffer_info->skb;
- if (skb) {
- unsigned int segs, bytecount;
+ if (buffer_info->skb) {
+ total_bytes += buffer_info->bytecount;
/* gso_segs is currently only valid for tcp */
- segs = buffer_info->gso_segs;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
-
- igb_tx_hwtstamp(q_vector, skb);
+ total_packets += buffer_info->gso_segs;
+ igb_tx_hwtstamp(q_vector, buffer_info);
}
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5061,7 +5384,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- dev_err(&tx_ring->pdev->dev,
+ dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH <%x>\n"
@@ -5140,10 +5463,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
+ dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
}
-static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
+static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
@@ -5161,13 +5484,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
- return;
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (staterr & E1000_RXDADV_STAT_TSIP) {
+ u32 *stamp = (u32 *)skb->data;
+ regval = le32_to_cpu(*(stamp + 2));
+ regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
+ skb_pull(skb, IGB_TS_HDR_LEN);
+ } else {
+ if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ }
igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
@@ -5190,7 +5518,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
{
struct igb_ring *rx_ring = q_vector->rx_ring;
struct net_device *netdev = rx_ring->netdev;
- struct pci_dev *pdev = rx_ring->pdev;
+ struct device *dev = rx_ring->dev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
struct igb_buffer *buffer_info , *next_buffer;
struct sk_buff *skb;
@@ -5230,9 +5558,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
cleaned_count++;
if (buffer_info->dma) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(dev, buffer_info->dma,
rx_ring->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
skb_put(skb, length);
@@ -5242,11 +5570,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(dev, buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -5275,7 +5603,8 @@ send_up:
goto next_desc;
}
- igb_rx_hwtstamp(q_vector, staterr, skb);
+ if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
+ igb_rx_hwtstamp(q_vector, staterr, skb);
total_bytes += skb->len;
total_packets++;
@@ -5350,12 +5679,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(rx_ring->pdev, buffer_info->page,
+ dma_map_page(rx_ring->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->page_dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->page_dma)) {
buffer_info->page_dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5373,12 +5702,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
buffer_info->skb = skb;
}
if (!buffer_info->dma) {
- buffer_info->dma = pci_map_single(rx_ring->pdev,
+ buffer_info->dma = dma_map_single(rx_ring->dev,
skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(rx_ring->pdev,
- buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ buffer_info->dma)) {
buffer_info->dma = 0;
rx_ring->rx_stats.alloc_failed++;
goto no_buffers;
@@ -5555,6 +5884,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
return 0;
}
+ /*
+ * Per-packet timestamping only works if all packets are
+ * timestamped, so enable timestamping in all packets as
+ * long as one rx filter was configured.
+ */
+ if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
+ tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ }
+
/* enable/disable TX */
regval = rd32(E1000_TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6131,19 +6470,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- /* replication is not supported for 82575 */
- if (hw->mac.type == e1000_82575)
+ switch (hw->mac.type) {
+ case e1000_82575:
+ default:
+ /* replication is not supported for 82575 */
return;
-
- /* enable replication vlan tag stripping */
- reg = rd32(E1000_RPLOLR);
- reg |= E1000_RPLOLR_STRVLAN;
- wr32(E1000_RPLOLR, reg);
-
- /* notify HW that the MAC is adding vlan tags */
- reg = rd32(E1000_DTXCTL);
- reg |= E1000_DTXCTL_VLAN_ADDED;
- wr32(E1000_DTXCTL, reg);
+ case e1000_82576:
+ /* notify HW that the MAC is adding vlan tags */
+ reg = rd32(E1000_DTXCTL);
+ reg |= E1000_DTXCTL_VLAN_ADDED;
+ wr32(E1000_DTXCTL, reg);
+ case e1000_82580:
+ /* enable replication vlan tag stripping */
+ reg = rd32(E1000_RPLOLR);
+ reg |= E1000_RPLOLR_STRVLAN;
+ wr32(E1000_RPLOLR, reg);
+ case e1000_i350:
+ /* none of the above registers are supported by i350 */
+ break;
+ }
if (adapter->vfs_allocated_count) {
igb_vmdq_set_loopback_pf(hw, true);
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 8afff07ff559..103b3aa1afc2 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -390,8 +390,6 @@ static void igbvf_get_wol(struct net_device *netdev,
{
wol->supported = 0;
wol->wolopts = 0;
-
- return;
}
static int igbvf_set_wol(struct net_device *netdev,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1b1edad1eb5e..5e2b2a8c56c6 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -48,6 +48,7 @@
#define DRV_VERSION "1.0.0-k0"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
+struct pm_qos_request_list *igbvf_driver_pm_qos_req;
static const char igbvf_driver_string[] =
"Intel(R) Virtual Function Network Driver";
static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -164,10 +165,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(pdev, buffer_info->page,
+ dma_map_page(&pdev->dev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!buffer_info->skb) {
@@ -178,9 +179,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -268,28 +269,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
prefetch(skb->data - NET_IP_ALIGN);
buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
if (!skb_shinfo(skb)->nr_frags) {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb, hlen);
}
if (length) {
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev, buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
buffer_info->page,
buffer_info->page_offset,
length);
@@ -369,15 +370,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -438,8 +439,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -480,8 +481,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
@@ -549,7 +550,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -574,13 +576,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->rx_ps_hdr_size){
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
buffer_info->dma = 0;
}
@@ -592,9 +594,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
if (buffer_info->page) {
if (buffer_info->page_dma)
- pci_unmap_page(pdev, buffer_info->page_dma,
+ dma_unmap_page(&pdev->dev,
+ buffer_info->page_dma,
PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(buffer_info->page);
buffer_info->page = NULL;
buffer_info->page_dma = 0;
@@ -1398,7 +1401,7 @@ static void igbvf_set_multi(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list = NULL;
int i;
@@ -1413,8 +1416,8 @@ static void igbvf_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
kfree(mta_list);
@@ -2104,9 +2107,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -2127,12 +2130,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
frag->page,
frag->page_offset,
len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
}
@@ -2644,16 +2647,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -2899,7 +2902,7 @@ static int __init igbvf_init_module(void)
printk(KERN_INFO "%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name,
+ igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
return ret;
@@ -2915,7 +2918,8 @@ module_init(igbvf_init_module);
static void __exit igbvf_exit_module(void)
{
pci_unregister_driver(&igbvf_driver);
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name);
+ pm_qos_remove_request(igbvf_driver_pm_qos_req);
+ igbvf_driver_pm_qos_req = NULL;
}
module_exit(igbvf_exit_module);
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8f6197d647c0..e3b5e9490601 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1503,7 +1503,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
BARRIER();
- dev->trans_start = jiffies;
ip->tx_skbs[produce] = skb; /* Remember skb */
produce = (produce + 1) & 127;
ip->tx_pi = produce;
@@ -1665,7 +1664,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
u64 ehar = 0;
@@ -1689,8 +1688,8 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- netdev_for_each_mc_addr(dmi, dev) {
- char *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addr = ha->addr;
if (!(*addr & 1))
continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 639bf9fb0279..72e3d2da9e9f 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -570,7 +570,7 @@ static int ipg_config_autoneg(struct net_device *dev)
static void ipg_nic_set_multicast_list(struct net_device *dev)
{
void __iomem *ioaddr = ipg_ioaddr(dev);
- struct dev_mc_list *mc_list_ptr;
+ struct netdev_hw_addr *ha;
unsigned int hashindex;
u32 hashtable[2];
u8 receivemode;
@@ -609,9 +609,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
hashtable[1] = 0x00000000;
/* Cycle through all multicast addresses to filter. */
- netdev_for_each_mc_addr(mc_list_ptr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Calculate CRC result for each multicast address. */
- hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
+ hashindex = crc32_le(0xffffffff, ha->addr,
ETH_ALEN);
/* Use only the least significant 6 bits. */
@@ -1548,8 +1548,6 @@ static void ipg_reset_after_host_error(struct work_struct *work)
container_of(work, struct ipg_nic_private, task.work);
struct net_device *dev = sp->dev;
- IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
-
/*
* Acknowledge HostError interrupt by resetting
* IPG DMA and HOST.
@@ -1826,9 +1824,6 @@ static int ipg_nic_stop(struct net_device *dev)
netif_stop_queue(dev);
- IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
- IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
- IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
IPG_DUMPTFDLIST(dev);
do {
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dfc2541bb556..6ce027355fcf 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -29,7 +29,7 @@
/* GMII based PHY IDs */
#define NS 0x2000
#define MARVELL 0x0141
-#define ICPLUS_PHY 0x243
+#define ICPLUS_PHY 0x243
/* NIC Physical Layer Device MII register fields. */
#define MII_PHY_SELECTOR_IEEE8023 0x0001
@@ -96,31 +96,31 @@ enum ipg_regs {
};
/* Ethernet MIB statistic register offsets. */
-#define IPG_OCTETRCVOK 0xA8
+#define IPG_OCTETRCVOK 0xA8
#define IPG_MCSTOCTETRCVDOK 0xAC
#define IPG_BCSTOCTETRCVOK 0xB0
#define IPG_FRAMESRCVDOK 0xB4
#define IPG_MCSTFRAMESRCVDOK 0xB8
#define IPG_BCSTFRAMESRCVDOK 0xBE
#define IPG_MACCONTROLFRAMESRCVD 0xC6
-#define IPG_FRAMETOOLONGERRRORS 0xC8
-#define IPG_INRANGELENGTHERRORS 0xCA
-#define IPG_FRAMECHECKSEQERRORS 0xCC
-#define IPG_FRAMESLOSTRXERRORS 0xCE
-#define IPG_OCTETXMTOK 0xD0
+#define IPG_FRAMETOOLONGERRRORS 0xC8
+#define IPG_INRANGELENGTHERRORS 0xCA
+#define IPG_FRAMECHECKSEQERRORS 0xCC
+#define IPG_FRAMESLOSTRXERRORS 0xCE
+#define IPG_OCTETXMTOK 0xD0
#define IPG_MCSTOCTETXMTOK 0xD4
#define IPG_BCSTOCTETXMTOK 0xD8
#define IPG_FRAMESXMTDOK 0xDC
#define IPG_MCSTFRAMESXMTDOK 0xE0
-#define IPG_FRAMESWDEFERREDXMT 0xE4
+#define IPG_FRAMESWDEFERREDXMT 0xE4
#define IPG_LATECOLLISIONS 0xE8
#define IPG_MULTICOLFRAMES 0xEC
#define IPG_SINGLECOLFRAMES 0xF0
#define IPG_BCSTFRAMESXMTDOK 0xF6
-#define IPG_CARRIERSENSEERRORS 0xF8
+#define IPG_CARRIERSENSEERRORS 0xF8
#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
-#define IPG_FRAMESABORTXSCOLLS 0xFC
-#define IPG_FRAMESWEXDEFERRAL 0xFE
+#define IPG_FRAMESABORTXSCOLLS 0xFC
+#define IPG_FRAMESWEXDEFERRAL 0xFE
/* RMON statistic register offsets. */
#define IPG_ETHERSTATSCOLLISIONS 0x100
@@ -134,8 +134,8 @@ enum ipg_regs {
#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
-#define IPG_ETHERSTATSFRAGMENTS 0x12C
-#define IPG_ETHERSTATSJABBERS 0x130
+#define IPG_ETHERSTATSFRAGMENTS 0x12C
+#define IPG_ETHERSTATSJABBERS 0x130
#define IPG_ETHERSTATSOCTETS 0x134
#define IPG_ETHERSTATSPKTS 0x138
#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
@@ -154,10 +154,10 @@ enum ipg_regs {
#define IPG_ETHERSTATSDROPEVENTS 0xCE
/* Serial EEPROM offsets */
-#define IPG_EEPROM_CONFIGPARAM 0x00
+#define IPG_EEPROM_CONFIGPARAM 0x00
#define IPG_EEPROM_ASICCTRL 0x01
#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
-#define IPG_EEPROM_SUBSYSTEMID 0x03
+#define IPG_EEPROM_SUBSYSTEMID 0x03
#define IPG_EEPROM_STATIONADDRESS0 0x10
#define IPG_EEPROM_STATIONADDRESS1 0x11
#define IPG_EEPROM_STATIONADDRESS2 0x12
@@ -168,16 +168,16 @@ enum ipg_regs {
/* IOBaseAddress */
#define IPG_PIB_RSVD_MASK 0xFFFFFE01
-#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
-#define IPG_PIB_IOBASEADDRIND 0x00000001
+#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
+#define IPG_PIB_IOBASEADDRIND 0x00000001
/* MemBaseAddress */
#define IPG_PMB_RSVD_MASK 0xFFFFFE07
-#define IPG_PMB_MEMBASEADDRIND 0x00000001
+#define IPG_PMB_MEMBASEADDRIND 0x00000001
#define IPG_PMB_MEMMAPTYPE 0x00000006
#define IPG_PMB_MEMMAPTYPE0 0x00000002
#define IPG_PMB_MEMMAPTYPE1 0x00000004
-#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
+#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
/* ConfigStatus */
#define IPG_CS_RSVD_MASK 0xFFB0
@@ -196,20 +196,20 @@ enum ipg_regs {
/* TFDList, TFC */
#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
-#define IPG_TFC_FRAMEID 0x000000000000FFFF
+#define IPG_TFC_FRAMEID 0x000000000000FFFF
#define IPG_TFC_WORDALIGN 0x0000000000030000
#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
-#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
+#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
#define IPG_TFC_TXINDICATE 0x0000000000400000
-#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
+#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
#define IPG_TFC_FRAGCOUNT 0x000000000F000000
-#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
-#define IPG_TFC_TFDDONE 0x0000000080000000
+#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
+#define IPG_TFC_TFDDONE 0x0000000080000000
#define IPG_TFC_VID 0x00000FFF00000000
#define IPG_TFC_CFI 0x0000100000000000
#define IPG_TFC_USERPRIORITY 0x0000E00000000000
@@ -217,35 +217,35 @@ enum ipg_regs {
/* TFDList, FragInfo */
#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
/* RFD data structure masks. */
/* RFDList, RFS */
#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
-#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
+#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
#define IPG_RFS_RXFCSERROR 0x0000000000080000
#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
-#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
+#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
#define IPG_RFS_VLANDETECTED 0x0000000000400000
#define IPG_RFS_TCPDETECTED 0x0000000000800000
#define IPG_RFS_TCPERROR 0x0000000001000000
#define IPG_RFS_UDPDETECTED 0x0000000002000000
#define IPG_RFS_UDPERROR 0x0000000004000000
#define IPG_RFS_IPDETECTED 0x0000000008000000
-#define IPG_RFS_IPERROR 0x0000000010000000
+#define IPG_RFS_IPERROR 0x0000000010000000
#define IPG_RFS_FRAMESTART 0x0000000020000000
#define IPG_RFS_FRAMEEND 0x0000000040000000
-#define IPG_RFS_RFDDONE 0x0000000080000000
+#define IPG_RFS_RFDDONE 0x0000000080000000
#define IPG_RFS_TCI 0x0000FFFF00000000
/* RFDList, FragInfo */
#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
-#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
+#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
/* I/O Register masks. */
@@ -254,37 +254,37 @@ enum ipg_regs {
/* Statistics Mask */
#define IPG_SM_ALL 0x0FFFFFFF
-#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
-#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
-#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
+#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
+#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
+#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
#define IPG_SM_RXJUMBOFRAMES 0x00000008
#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
-#define IPG_SM_IPCHECKSUMERRORS 0x00000020
+#define IPG_SM_IPCHECKSUMERRORS 0x00000020
#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
#define IPG_SM_INRANGELENGTHERRORS 0x00000200
#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
-#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
-#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
-#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
+#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
+#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
+#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
-#define IPG_SM_LATECOLLISIONS 0x00010000
-#define IPG_SM_MULTICOLFRAMES 0x00020000
-#define IPG_SM_SINGLECOLFRAMES 0x00040000
+#define IPG_SM_LATECOLLISIONS 0x00010000
+#define IPG_SM_MULTICOLFRAMES 0x00020000
+#define IPG_SM_SINGLECOLFRAMES 0x00040000
#define IPG_SM_TXJUMBOFRAMES 0x00080000
#define IPG_SM_CARRIERSENSEERRORS 0x00100000
#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
-#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
+#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
/* Countdown */
#define IPG_CD_RSVD_MASK 0x0700FFFF
#define IPG_CD_COUNT 0x0000FFFF
-#define IPG_CD_COUNTDOWNSPEED 0x01000000
+#define IPG_CD_COUNTDOWNSPEED 0x01000000
#define IPG_CD_COUNTDOWNMODE 0x02000000
-#define IPG_CD_COUNTINTENABLED 0x04000000
+#define IPG_CD_COUNTINTENABLED 0x04000000
/* TxDMABurstThresh */
#define IPG_TB_RSVD_MASK 0xFF
@@ -653,15 +653,28 @@ enum ipg_regs {
* Miscellaneous macros.
*/
-/* Marco for printing debug statements. */
+/* Macros for printing debug statements. */
#ifdef IPG_DEBUG
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args)
# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
#else
-# define IPG_DEBUG_MSG(args...)
-# define IPG_DDEBUG_MSG(args...)
+# define IPG_DEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
+# define IPG_DDEBUG_MSG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "IPG: " fmt, ##args); \
+} while (0)
# define IPG_DUMPRFDLIST(args)
# define IPG_DUMPTFDLIST(args)
#endif
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index af10e97345ce..25bb2a015e18 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -397,5 +397,11 @@ config MCS_FIR
To compile it as a module, choose M here: the module will be called
mcs7780.
+config SH_IRDA
+ tristate "SuperH IrDA driver"
+ depends on IRDA && ARCH_SHMOBILE
+ help
+ Say Y here if your want to enable SuperH IrDA devices.
+
endmenu
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e030d47e2793..dfc64537f62f 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
+obj-$(CONFIG_SH_IRDA) += sh_irda.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 28992c815cba..a3cb109006a5 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -753,18 +753,18 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
}
if (ali_ircc_dma_receive_complete(self))
{
- IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
self->ier = IER_EOM;
}
else
{
- IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
self->ier = IER_EOM | IER_TIMER;
}
@@ -777,7 +777,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
- IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__);
+ IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
@@ -942,7 +942,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
// benjamin 2000/11/10 06:32PM
if (self->io.speed > 115200)
{
- IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ );
+ IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
self->ier = IER_EOM;
// SetCOMInterrupts(self, TRUE);
@@ -970,7 +970,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud);
+ IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
@@ -1500,7 +1500,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
diff = self->now.tv_usec - self->stamp.tv_usec;
/* self->stamp is set from ali_ircc_dma_receive_complete() */
- IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff);
+ IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
if (diff < 0)
diff += 1000000;
@@ -1641,7 +1641,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
tmp = inb(iobase+FIR_LCR_B);
tmp &= ~0x20; // Disable SIP
outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
- IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
outb(0, iobase+FIR_LSR);
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
//switch_bank(iobase, BANK0);
tmp = inb(iobase+FIR_LCR_B);
outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
- IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B));
+ IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
/* Set Rx Threshold */
switch_bank(iobase, BANK1);
@@ -1840,7 +1840,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
/* Check for errors */
if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
{
- IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
/* Skip frame */
self->netdev->stats.rx_errors++;
@@ -1850,29 +1850,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
if (status & LSR_FIFO_UR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
}
if (status & LSR_FRAME_ERROR)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
}
if (status & LSR_CRC_ERROR)
{
self->netdev->stats.rx_crc_errors++;
- IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
}
if(self->rcvFramesOverflow)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
}
if(len == 0)
{
self->netdev->stats.rx_frame_errors++;
- IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
+ IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
}
}
else
@@ -1884,7 +1884,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
val = inb(iobase+FIR_BSR);
if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
{
- IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ );
+ IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
/* Put this entry back in fifo */
st_fifo->head--;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5cbd39d0685..a3d696a9456a 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -546,7 +546,6 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 911c082cee5a..f940dfa1f7f8 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -107,8 +107,12 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
case 57600:
case 115200:
- quot = (port->clk + (8 * speed)) / (16 * speed)\
- - ANOMALY_05000230;
+ /*
+ * IRDA is not affected by anomaly 05000230, so there is no
+ * need to tweak the divisor like he UART driver (which will
+ * slightly speed up the baud rate on us).
+ */
+ quot = (port->clk + (8 * speed)) / (16 * speed);
do {
udelay(utime);
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b7e6625ca75e..48bd5ec9f29b 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1002,8 +1002,6 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
toshoboe_checkstuck (self);
- dev->trans_start = jiffies;
-
/* Check if we need to change the speed */
/* But not now. Wait after transmission if mtt not required */
speed=irda_get_next_speed(skb);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 2c9b3af16612..4441fa3389c2 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -839,7 +839,7 @@ static void irda_usb_receive(struct urb *urb)
/* Usually precursor to a hot-unplug on OHCI. */
default:
self->netdev->stats.rx_errors++;
- IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
+ IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
break;
}
/* If we received an error, we don't want to resubmit the
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index c0e0bb9401d3..5b1036ac38d7 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -434,8 +434,6 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
-
- return;
}
/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is
@@ -487,8 +485,6 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
mcs->netdev->stats.rx_packets++;
mcs->netdev->stats.rx_bytes += new_len;
-
- return;
}
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1a54f6bb68c5..c192c31e4c5c 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -556,7 +556,6 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1dcdce0631aa..da2705061a60 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -715,8 +715,6 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
}
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
new file mode 100644
index 000000000000..9a828b06a57e
--- /dev/null
+++ b/drivers/net/irda/sh_irda.c
@@ -0,0 +1,865 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on sh_sir.c
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * CAUTION
+ *
+ * This driver is very simple.
+ * So, it doesn't have below support now
+ * - MIR/FIR support
+ * - DMA transfer support
+ * - FIFO mode support
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#define DRIVER_NAME "sh_irda"
+
+#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
+#define __IRDARAM_LEN 0x13FF
+#else
+#define __IRDARAM_LEN 0x1039
+#endif
+
+#define IRTMR 0x1F00 /* Transfer mode */
+#define IRCFR 0x1F02 /* Configuration */
+#define IRCTR 0x1F04 /* IR control */
+#define IRTFLR 0x1F20 /* Transmit frame length */
+#define IRTCTR 0x1F22 /* Transmit control */
+#define IRRFLR 0x1F40 /* Receive frame length */
+#define IRRCTR 0x1F42 /* Receive control */
+#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
+#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
+#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
+#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
+#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
+#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
+#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
+#define CRCCTR 0x1F80 /* CRC engine control */
+#define CRCIR 0x1F86 /* CRC engine input data */
+#define CRCCR 0x1F8A /* CRC engine calculation */
+#define CRCOR 0x1F8E /* CRC engine output data */
+#define FIFOCP 0x1FC0 /* FIFO current pointer */
+#define FIFOFP 0x1FC2 /* FIFO follow pointer */
+#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
+#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
+#define FIFOSEL 0x1FC8 /* FIFO select */
+#define FIFORS 0x1FCA /* FIFO receive status */
+#define FIFORFL 0x1FCC /* FIFO receive frame length */
+#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
+#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
+#define BIFCTL 0x1FD2 /* BUS interface control */
+#define IRDARAM 0x0000 /* IrDA buffer RAM */
+#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
+
+/* IRTMR */
+#define TMD_MASK (0x3 << 14) /* Transfer Mode */
+#define TMD_SIR (0x0 << 14)
+#define TMD_MIR (0x3 << 14)
+#define TMD_FIR (0x2 << 14)
+
+#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
+#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
+#define SIM (1 << 0) /* SIR Interrupt Mask */
+#define xIM_MASK (FIFORIM | MIM | SIM)
+
+/* IRCFR */
+#define RTO_SHIFT 8 /* shift for Receive Timeout */
+#define RTO (0x3 << RTO_SHIFT)
+
+/* IRTCTR */
+#define ARMOD (1 << 15) /* Auto-Receive Mode */
+#define TE (1 << 0) /* Transmit Enable */
+
+/* IRRFLR */
+#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
+
+/* IRRCTR */
+#define RE (1 << 0) /* Receive Enable */
+
+/*
+ * SIRISR, SIRIMR, SIRICR,
+ * MFIRISR, MFIRIMR, MFIRICR
+ */
+#define FRE (1 << 15) /* Frame Receive End */
+#define TROV (1 << 11) /* Transfer Area Overflow */
+#define xIR_9 (1 << 9)
+#define TOT xIR_9 /* for SIR Timeout */
+#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
+#define xIR_8 (1 << 8)
+#define FER xIR_8 /* for SIR Framing Error */
+#define CRCER xIR_8 /* for MIR/FIR CRC error */
+#define FTE (1 << 7) /* Frame Transmit End */
+#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
+
+/* SIRBCR */
+#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
+
+/* CRCCTR */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
+
+/* CRCIR */
+#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
+
+/************************************************************************
+
+
+ enum / structure
+
+
+************************************************************************/
+enum sh_irda_mode {
+ SH_IRDA_NONE = 0,
+ SH_IRDA_SIR,
+ SH_IRDA_MIR,
+ SH_IRDA_FIR,
+};
+
+struct sh_irda_self;
+struct sh_irda_xir_func {
+ int (*xir_fre) (struct sh_irda_self *self);
+ int (*xir_trov) (struct sh_irda_self *self);
+ int (*xir_9) (struct sh_irda_self *self);
+ int (*xir_8) (struct sh_irda_self *self);
+ int (*xir_fte) (struct sh_irda_self *self);
+};
+
+struct sh_irda_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+
+ enum sh_irda_mode mode;
+ spinlock_t lock;
+
+ struct sh_irda_xir_func *xir_func;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
+{
+ unsigned long flags;
+ u16 ret;
+
+ spin_lock_irqsave(&self->lock, flags);
+ ret = ioread16(self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ return ret;
+}
+
+static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ unsigned long flags;
+ u16 old, new;
+
+ spin_lock_irqsave(&self->lock, flags);
+ old = ioread16(self->membase + offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ iowrite16(data, self->membase + offset);
+ spin_unlock_irqrestore(&self->lock, flags);
+}
+
+/************************************************************************
+
+
+ mode function
+
+
+************************************************************************/
+/*=====================================
+ *
+ * common
+ *
+ *=====================================*/
+static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
+{
+ struct device *dev = &self->ndev->dev;
+
+ sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
+ dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
+}
+
+static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
+{
+ struct device *dev = &self->ndev->dev;
+
+ if (SH_IRDA_SIR != self->mode)
+ interval = 0;
+
+ if (interval < 0 || interval > 2) {
+ dev_err(dev, "unsupported timeout interval\n");
+ return -EINVAL;
+ }
+
+ sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
+ return 0;
+}
+
+static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 val;
+
+ if (baudrate < 0)
+ return 0;
+
+ if (SH_IRDA_SIR != self->mode) {
+ dev_err(dev, "it is not SIR mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Baud rate (bits/s) =
+ * (48 MHz / 26) / (baud rate counter value + 1) x 16
+ */
+ val = (48000000 / 26 / 16 / baudrate) - 1;
+ dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
+
+ sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
+
+ return 0;
+}
+
+static int xir_get_rcv_length(struct sh_irda_self *self)
+{
+ return RFL_MASK & sh_irda_read(self, IRRFLR);
+}
+
+/*=====================================
+ *
+ * NONE MODE
+ *
+ *=====================================*/
+static int xir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame recv\n");
+ return 0;
+}
+
+static int xir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: buffer ram over\n");
+ return 0;
+}
+
+static int xir_9(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: time over\n");
+ return 0;
+}
+
+static int xir_8(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: framing error\n");
+ return 0;
+}
+
+static int xir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ dev_err(dev, "none mode: frame transmit end\n");
+ return 0;
+}
+
+static struct sh_irda_xir_func xir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * MIR/FIR MODE
+ *
+ * MIR/FIR are not supported now
+ *=====================================*/
+static struct sh_irda_xir_func mfir_func = {
+ .xir_fre = xir_fre,
+ .xir_trov = xir_trov,
+ .xir_9 = xir_9,
+ .xir_8 = xir_8,
+ .xir_fte = xir_fte,
+};
+
+/*=====================================
+ *
+ * SIR MODE
+ *
+ *=====================================*/
+static int sir_fre(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ u16 data16;
+ u8 *data = (u8 *)&data16;
+ int len = xir_get_rcv_length(self);
+ int i, j;
+
+ if (len > IRDARAM_LEN)
+ len = IRDARAM_LEN;
+
+ dev_dbg(dev, "frame recv length = %d\n", len);
+
+ for (i = 0; i < len; i++) {
+ j = i % 2;
+ if (!j)
+ data16 = sh_irda_read(self, IRDARAM + i);
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, data[j]);
+ }
+ self->ndev->last_rx = jiffies;
+
+ sh_irda_rcv_ctrl(self, 1);
+
+ return 0;
+}
+
+static int sir_trov(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "buffer ram over\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_tot(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "time over\n");
+ sh_irda_set_baudrate(self, 9600);
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fer(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_err(dev, "framing error\n");
+ sh_irda_rcv_ctrl(self, 1);
+ return 0;
+}
+
+static int sir_fte(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+
+ dev_dbg(dev, "frame transmit end\n");
+ netif_wake_queue(self->ndev);
+
+ return 0;
+}
+
+static struct sh_irda_xir_func sir_func = {
+ .xir_fre = sir_fre,
+ .xir_trov = sir_trov,
+ .xir_9 = sir_tot,
+ .xir_8 = sir_fer,
+ .xir_fte = sir_fte,
+};
+
+static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
+{
+ struct device *dev = &self->ndev->dev;
+ struct sh_irda_xir_func *func;
+ const char *name;
+ u16 data;
+
+ switch (mode) {
+ case SH_IRDA_SIR:
+ name = "SIR";
+ data = TMD_SIR;
+ func = &sir_func;
+ break;
+ case SH_IRDA_MIR:
+ name = "MIR";
+ data = TMD_MIR;
+ func = &mfir_func;
+ break;
+ case SH_IRDA_FIR:
+ name = "FIR";
+ data = TMD_FIR;
+ func = &mfir_func;
+ break;
+ default:
+ name = "NONE";
+ data = 0;
+ func = &xir_func;
+ break;
+ }
+
+ self->mode = mode;
+ self->xir_func = func;
+ sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
+
+ dev_dbg(dev, "switch to %s mode", name);
+}
+
+/************************************************************************
+
+
+ irq function
+
+
+************************************************************************/
+static void sh_irda_set_irq_mask(struct sh_irda_self *self)
+{
+ u16 tmr_hole;
+ u16 xir_reg;
+
+ /* set all mask */
+ sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
+ sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
+
+ /* clear irq */
+ sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
+ sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
+
+ switch (self->mode) {
+ case SH_IRDA_SIR:
+ tmr_hole = SIM;
+ xir_reg = SIRIMR;
+ break;
+ case SH_IRDA_MIR:
+ case SH_IRDA_FIR:
+ tmr_hole = MIM;
+ xir_reg = MFIRIMR;
+ break;
+ default:
+ tmr_hole = 0;
+ xir_reg = 0;
+ break;
+ }
+
+ /* open mask */
+ if (xir_reg) {
+ sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
+ sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
+ }
+}
+
+static irqreturn_t sh_irda_irq(int irq, void *dev_id)
+{
+ struct sh_irda_self *self = dev_id;
+ struct sh_irda_xir_func *func = self->xir_func;
+ u16 isr = sh_irda_read(self, SIRISR);
+
+ /* clear irq */
+ sh_irda_write(self, SIRICR, isr);
+
+ if (isr & FRE)
+ func->xir_fre(self);
+ if (isr & TROV)
+ func->xir_trov(self);
+ if (isr & xIR_9)
+ func->xir_9(self);
+ if (isr & xIR_8)
+ func->xir_8(self);
+ if (isr & FTE)
+ func->xir_fte(self);
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_irda_crc_reset(struct sh_irda_self *self)
+{
+ sh_irda_write(self, CRCCTR, CRC_RST);
+}
+
+static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
+{
+ sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
+}
+
+static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
+{
+ return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
+}
+
+static u16 sh_irda_crc_out(struct sh_irda_self *self)
+{
+ return sh_irda_read(self, CRCOR);
+}
+
+static int sh_irda_crc_init(struct sh_irda_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_irda_crc_reset(self);
+
+ sh_irda_crc_add(self, 0xCC);
+ sh_irda_crc_add(self, 0xF5);
+ sh_irda_crc_add(self, 0xF1);
+ sh_irda_crc_add(self, 0xA7);
+
+ val = sh_irda_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_irda_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_irda_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static void sh_irda_remove_iobuf(struct sh_irda_self *self)
+{
+ kfree(self->rx_buff.head);
+
+ self->tx_buff.head = NULL;
+ self->tx_buff.data = NULL;
+ self->rx_buff.head = NULL;
+ self->rx_buff.data = NULL;
+}
+
+static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
+{
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return -EINVAL;
+ }
+
+ /* rx_buff */
+ self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
+ if (!self->rx_buff.head)
+ return -ENOMEM;
+
+ self->rx_buff.truesize = rxsize;
+ self->rx_buff.in_frame = FALSE;
+ self->rx_buff.state = OUTSIDE_FRAME;
+ self->rx_buff.data = self->rx_buff.head;
+
+ /* tx_buff */
+ self->tx_buff.head = self->membase + IRDARAM;
+ self->tx_buff.truesize = IRDARAM_LEN;
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ struct device *dev = &self->ndev->dev;
+ int speed = irda_get_next_speed(skb);
+ int ret;
+
+ dev_dbg(dev, "hard xmit\n");
+
+ netif_stop_queue(ndev);
+ sh_irda_rcv_ctrl(self, 0);
+
+ ret = sh_irda_set_baudrate(self, speed);
+ if (ret < 0)
+ return ret;
+
+ self->tx_buff.len = 0;
+ if (skb->len) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&self->lock, flags);
+ self->tx_buff.len = async_wrap_skb(skb,
+ self->tx_buff.head,
+ self->tx_buff.truesize);
+ spin_unlock_irqrestore(&self->lock, flags);
+
+ if (self->tx_buff.len > self->tx_buff.truesize)
+ self->tx_buff.len = self->tx_buff.truesize;
+
+ sh_irda_write(self, IRTFLR, self->tx_buff.len);
+ sh_irda_write(self, IRTCTR, ARMOD | TE);
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_irda_open(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_irda_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_irda_set_mode(self, SH_IRDA_SIR);
+ sh_irda_set_timeout(self, 2);
+ sh_irda_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap) {
+ err = -ENODEV;
+ goto open_err;
+ }
+
+ netif_start_queue(ndev);
+ sh_irda_rcv_ctrl(self, 1);
+ sh_irda_set_irq_mask(self);
+
+ dev_info(&ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_irda_stop(struct net_device *ndev)
+{
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_irda_ndo = {
+ .ndo_open = sh_irda_open,
+ .ndo_stop = sh_irda_stop,
+ .ndo_start_xmit = sh_irda_hard_xmit,
+ .ndo_do_ioctl = sh_irda_ioctl,
+ .ndo_get_stats = sh_irda_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_irda_self *self;
+ struct resource *res;
+ char clk_name[8];
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_irda_ndo;
+ ndev->irq = irq;
+
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+ spin_lock_init(&self->lock);
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_irda_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_irda_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_irda_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_irda_driver = {
+ .probe = sh_irda_probe,
+ .remove = __devexit_p(sh_irda_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_irda_init(void)
+{
+ return platform_driver_register(&sh_irda_driver);
+}
+
+static void __exit sh_irda_exit(void)
+{
+ platform_driver_unregister(&sh_irda_driver);
+}
+
+module_init(sh_irda_init);
+module_exit(sh_irda_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 0745581c4b5e..5c5f99d50341 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -646,8 +646,10 @@ static int sh_sir_open(struct net_device *ndev)
sh_sir_set_baudrate(self, 9600);
self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
- if (!self->irlap)
+ if (!self->irlap) {
+ err = -ENODEV;
goto open_err;
+ }
/*
* Now enable the interrupt then start the queue
@@ -707,7 +709,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
struct sh_sir_self *self;
struct resource *res;
char clk_name[8];
- void __iomem *base;
unsigned int irq;
int err = -ENOMEM;
@@ -722,14 +723,14 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
if (!ndev)
goto exit;
- base = ioremap_nocache(res->start, resource_size(res));
- if (!base) {
+ self = netdev_priv(ndev);
+ self->membase = ioremap_nocache(res->start, resource_size(res));
+ if (!self->membase) {
err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap.\n");
goto err_mem_1;
}
- self = netdev_priv(ndev);
err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
if (err)
goto err_mem_2;
@@ -746,7 +747,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
ndev->netdev_ops = &sh_sir_ndo;
ndev->irq = irq;
- self->membase = base;
self->ndev = ndev;
self->qos.baud_rate.bits &= IR_9600; /* FIXME */
self->qos.min_turn_time.bits = 1; /* 10 ms or more */
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index de91cd14016b..1b051dab7b29 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -655,7 +655,6 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
if (likely(actual > 0)) {
dev->tx_skb = skb;
- ndev->trans_start = jiffies;
dev->tx_buff.data += actual;
dev->tx_buff.len -= actual;
}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6af84d88cd03..d67e48418e55 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -868,7 +868,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&self->lock, flags);
}
@@ -2822,7 +2822,6 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
tmpbyte |= mask;
pci_write_config_byte(dev, reg, tmpbyte);
IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
- return;
}
static int __init preconfigure_through_ali(struct pci_dev *dev,
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index d9d1db03fa2d..5a84822b5a43 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -774,7 +774,7 @@ static void SetBaudRate(__u16 iobase, __u32 rate)
break;
default:
break;
- };
+ }
} else if (IsMIROn(iobase)) {
value = 0; // will automatically be fixed in 1.152M
} else if (IsFIROn(iobase)) {
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 209d4bcfaced..c3d07382b7fa 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1037,7 +1037,6 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
wmb();
outw(0, iobase+VLSI_PIO_PROMPT);
}
- ndev->trans_start = jiffies;
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
@@ -1742,7 +1741,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
@@ -1781,7 +1780,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
vlsi_irda_dev_t *idev;
if (!ndev) {
- IRDA_ERROR("%s - %s: no netdevice \n",
+ IRDA_ERROR("%s - %s: no netdevice\n",
__func__, pci_name(pdev));
return 0;
}
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index cb0cb758be64..1f9c3f08d1a3 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -515,7 +515,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
w83977af_change_speed(self, speed);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -549,7 +548,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
switch_bank(iobase, SET0);
outb(ICR_ETXTHI, iobase+ICR);
}
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
/* Restore set register */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 773c59c89691..ba1de5973fb2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -962,15 +962,15 @@ static void veth_set_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
port->promiscuous = 0;
/* Update table */
port->num_mcast = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- u8 *addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addr = ha->addr;
u64 xaddr = 0;
if (addr[0] & 0x01) {/* multicast address? */
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 92d2e71d0c8b..521c0c732998 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -78,9 +78,13 @@ struct ixgb_adapter;
#define PFX "ixgb: "
#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
+#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
#else
-#define IXGB_DBG(args...)
+#define IXGB_DBG(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG PFX fmt, ##args); \
+} while (0)
#endif
/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 89ffa7264a12..813993f9c65c 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ee.h"
/* Local prototypes */
@@ -56,7 +58,6 @@ ixgb_raise_clock(struct ixgb_hw *hw,
*eecd_reg = *eecd_reg | IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -75,7 +76,6 @@ ixgb_lower_clock(struct ixgb_hw *hw,
*eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -125,7 +125,6 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eecd_reg &= ~IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
}
/******************************************************************************
@@ -190,7 +189,6 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
/* Set CS */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
- return;
}
/******************************************************************************
@@ -224,7 +222,6 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -248,7 +245,6 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
udelay(50);
- return;
}
/******************************************************************************
@@ -268,7 +264,6 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
IXGB_WRITE_REG(hw, EECD, eecd_reg);
ixgb_clock_eeprom(hw);
- return;
}
/******************************************************************************
@@ -357,7 +352,6 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
checksum = (u16) EEPROM_SUM - checksum;
ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
- return;
}
/******************************************************************************
@@ -412,8 +406,6 @@ ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
/* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
-
- return;
}
/******************************************************************************
@@ -467,11 +459,11 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
u16 checksum = 0;
struct ixgb_ee_map_type *ee_map;
- DEBUGFUNC("ixgb_get_eeprom_data");
+ ENTER();
ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGOUT("ixgb_ee: Reading eeprom data\n");
+ pr_debug("Reading eeprom data\n");
for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
u16 ee_data;
ee_data = ixgb_read_eeprom(hw, i);
@@ -480,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
}
if (checksum != (u16) EEPROM_SUM) {
- DEBUGOUT("ixgb_ee: Checksum invalid.\n");
+ pr_debug("Checksum invalid\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
@@ -489,7 +481,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
!= cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- DEBUGOUT("ixgb_ee: Signature invalid.\n");
+ pr_debug("Signature invalid\n");
return(false);
}
@@ -555,13 +547,13 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
int i;
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
- DEBUGFUNC("ixgb_get_ee_mac_addr");
+ ENTER();
if (ixgb_check_and_get_eeprom_data(hw) == true) {
for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
mac_addr[i] = ee_map->mac_addr[i];
- DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
}
+ pr_debug("eeprom mac address = %pM\n", mac_addr);
}
}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index ff67a84e6802..397acabccab6 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -30,9 +30,13 @@
* Shared functions for accessing and configuring the adapter
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb_hw.h"
#include "ixgb_ids.h"
+#include <linux/etherdevice.h>
+
/* Local function prototypes */
static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
@@ -120,13 +124,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
u32 ctrl_reg;
u32 icr_reg;
- DEBUGFUNC("ixgb_adapter_stop");
+ ENTER();
/* If we are stopped or resetting exit gracefully and wait to be
* started again before accessing the hardware.
*/
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
+ pr_debug("Exiting because the adapter is already stopped!!!\n");
return false;
}
@@ -136,7 +140,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
hw->adapter_stopped = true;
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
/* Disable the Transmit and Receive units. Then delay to allow
@@ -152,12 +156,12 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
/* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
+ pr_debug("Masking off all interrupts\n");
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
@@ -183,7 +187,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
u16 vendor_name[5];
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_xpak_vendor");
+ ENTER();
/* Read the first few bytes of the vendor string from the XPAK NVR
* registers. These are standard XENPAK/XPAK registers, so all XPAK
@@ -222,12 +226,12 @@ ixgb_identify_phy(struct ixgb_hw *hw)
ixgb_phy_type phy_type;
ixgb_xpak_vendor xpak_vendor;
- DEBUGFUNC("ixgb_identify_phy");
+ ENTER();
/* Infer the transceiver/phy type from the device id */
switch (hw->device_id) {
case IXGB_DEVICE_ID_82597EX:
- DEBUGOUT("Identified TXN17401 optics\n");
+ pr_debug("Identified TXN17401 optics\n");
phy_type = ixgb_phy_type_txn17401;
break;
@@ -237,30 +241,30 @@ ixgb_identify_phy(struct ixgb_hw *hw)
* type of optics. */
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
case IXGB_DEVICE_ID_82597EX_LR:
- DEBUGOUT("Identified G6104 optics\n");
+ pr_debug("Identified G6104 optics\n");
phy_type = ixgb_phy_type_g6104;
break;
case IXGB_DEVICE_ID_82597EX_CX4:
- DEBUGOUT("Identified CX4\n");
+ pr_debug("Identified CX4\n");
xpak_vendor = ixgb_identify_xpak_vendor(hw);
if (xpak_vendor == ixgb_xpak_vendor_intel) {
- DEBUGOUT("Identified TXN17201 optics\n");
+ pr_debug("Identified TXN17201 optics\n");
phy_type = ixgb_phy_type_txn17201;
} else {
- DEBUGOUT("Identified G6005 optics\n");
+ pr_debug("Identified G6005 optics\n");
phy_type = ixgb_phy_type_g6005;
}
break;
default:
- DEBUGOUT("Unknown physical layer module\n");
+ pr_debug("Unknown physical layer module\n");
phy_type = ixgb_phy_type_unknown;
break;
}
@@ -296,18 +300,18 @@ ixgb_init_hw(struct ixgb_hw *hw)
u32 ctrl_reg;
bool status;
- DEBUGFUNC("ixgb_init_hw");
+ ENTER();
/* Issue a global reset to the MAC. This will reset the chip's
* transmit, receive, DMA, and link units. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
- DEBUGOUT("Issuing a global reset to MAC\n");
+ pr_debug("Issuing a global reset to MAC\n");
ctrl_reg = ixgb_mac_reset(hw);
- DEBUGOUT("Issuing an EE reset to MAC\n");
+ pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
/* Workaround for 82597EX reset errata */
IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
@@ -335,7 +339,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
* If it is not valid, we fail hardware init.
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
- DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
+ pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
return(false);
}
@@ -346,7 +350,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
ixgb_get_bus_info(hw);
/* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
+ pr_debug("Zeroing the MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -379,7 +383,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
u32 i;
- DEBUGFUNC("ixgb_init_rx_addrs");
+ ENTER();
/*
* If the current mac address is valid, assume it is a software override
@@ -391,35 +395,24 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
/* Get the MAC address from the eeprom for later reference */
ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
- DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Keeping Permanent MAC Addr = %pM\n",
+ hw->curr_mac_addr);
} else {
/* Setup the receive address. */
- DEBUGOUT("Overriding MAC Address in RAR[0]\n");
- DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
- hw->curr_mac_addr[0],
- hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
- DEBUGOUT3("%.2X %.2X %.2X\n",
- hw->curr_mac_addr[3],
- hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
+ pr_debug("Overriding MAC Address in RAR[0]\n");
+ pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
ixgb_rar_set(hw, hw->curr_mac_addr, 0);
}
/* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
/* Write high reg first to disable the AV bit first */
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
}
-
- return;
}
/******************************************************************************
@@ -444,65 +437,50 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
u32 hash_value;
u32 i;
u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
+ u8 *mca;
- DEBUGFUNC("ixgb_mc_addr_list_update");
+ ENTER();
/* Set the new number of MC addresses that we are being requested to use. */
hw->num_mc_addrs = mc_addr_count;
/* Clear RAR[1-15] */
- DEBUGOUT(" Clearing RAR[1-15]\n");
+ pr_debug("Clearing RAR[1-15]\n");
for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
}
/* Clear the MTA */
- DEBUGOUT(" Clearing MTA\n");
+ pr_debug("Clearing MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Add the new addresses */
+ mca = mc_addr_list;
for (i = 0; i < mc_addr_count; i++) {
- DEBUGOUT(" Adding the multicast addresses:\n");
- DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 1],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 2],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 3],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 4],
- mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
- 5]);
+ pr_debug("Adding the multicast addresses:\n");
+ pr_debug("MC Addr #%d = %pM\n", i, mca);
/* Place this multicast address in the RAR if there is room, *
* else put it in the MTA
*/
if (rar_used_count < IXGB_RAR_ENTRIES) {
- ixgb_rar_set(hw,
- mc_addr_list +
- (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
- rar_used_count);
- DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
+ ixgb_rar_set(hw, mca, rar_used_count);
+ pr_debug("Added a multicast address to RAR[%d]\n", i);
rar_used_count++;
} else {
- hash_value = ixgb_hash_mc_addr(hw,
- mc_addr_list +
- (i *
- (IXGB_ETH_LENGTH_OF_ADDRESS
- + pad)));
+ hash_value = ixgb_hash_mc_addr(hw, mca);
- DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+ pr_debug("Hash value = 0x%03X\n", hash_value);
ixgb_mta_set(hw, hash_value);
}
+
+ mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad;
}
- DEBUGOUT("MC Update Complete\n");
- return;
+ pr_debug("MC Update Complete\n");
}
/******************************************************************************
@@ -520,7 +498,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
{
u32 hash_value = 0;
- DEBUGFUNC("ixgb_hash_mc_addr");
+ ENTER();
/* The portion of the address that is used for the hash table is
* determined by the mc_filter_type setting.
@@ -547,7 +525,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
break;
default:
/* Invalid mc_filter_type, what should we do? */
- DEBUGOUT("MC filter type param set incorrectly\n");
+ pr_debug("MC filter type param set incorrectly\n");
ASSERT(0);
break;
}
@@ -585,8 +563,6 @@ ixgb_mta_set(struct ixgb_hw *hw,
mta_reg |= (1 << hash_bit);
IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
-
- return;
}
/******************************************************************************
@@ -603,7 +579,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
{
u32 rar_low, rar_high;
- DEBUGFUNC("ixgb_rar_set");
+ ENTER();
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
@@ -619,7 +595,6 @@ ixgb_rar_set(struct ixgb_hw *hw,
IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
- return;
}
/******************************************************************************
@@ -635,7 +610,6 @@ ixgb_write_vfta(struct ixgb_hw *hw,
u32 value)
{
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- return;
}
/******************************************************************************
@@ -650,7 +624,6 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
- return;
}
/******************************************************************************
@@ -666,7 +639,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
u32 pap_reg = 0; /* by default, assume no pause time */
bool status = true;
- DEBUGFUNC("ixgb_setup_fc");
+ ENTER();
/* Get the current control reg 0 settings */
ctrl_reg = IXGB_READ_REG(hw, CTRL0);
@@ -710,7 +683,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
break;
default:
/* We should never get here. The value should be 0-3. */
- DEBUGOUT("Flow control param set incorrectly\n");
+ pr_debug("Flow control param set incorrectly\n");
ASSERT(0);
break;
}
@@ -940,7 +913,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
u32 status_reg;
u32 xpcss_reg;
- DEBUGFUNC("ixgb_check_for_link");
+ ENTER();
xpcss_reg = IXGB_READ_REG(hw, XPCSS);
status_reg = IXGB_READ_REG(hw, STATUS);
@@ -950,7 +923,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
hw->link_up = true;
} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
- DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
+ pr_debug("XPCSS Not Aligned while Status:LU is set\n");
hw->link_up = ixgb_link_reset(hw);
} else {
/*
@@ -981,8 +954,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
newRFC = IXGB_READ_REG(hw, RFC);
if ((hw->lastLFC + 250 < newLFC)
|| (hw->lastRFC + 250 < newRFC)) {
- DEBUGOUT
- ("BAD LINK! too many LFC/RFC since last check\n");
+ pr_debug("BAD LINK! too many LFC/RFC since last check\n");
bad_link_returncode = true;
}
hw->lastLFC = newLFC;
@@ -1002,11 +974,11 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
volatile u32 temp_reg;
- DEBUGFUNC("ixgb_clear_hw_cntrs");
+ ENTER();
/* if we are stopped or resetting exit gracefully */
if (hw->adapter_stopped) {
- DEBUGOUT("Exiting because the adapter is stopped!!!\n");
+ pr_debug("Exiting because the adapter is stopped!!!\n");
return;
}
@@ -1070,7 +1042,6 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
temp_reg = IXGB_READ_REG(hw, XOFFRXC);
temp_reg = IXGB_READ_REG(hw, XOFFTXC);
temp_reg = IXGB_READ_REG(hw, RJC);
- return;
}
/******************************************************************************
@@ -1086,7 +1057,6 @@ ixgb_led_on(struct ixgb_hw *hw)
/* To turn on the LED, clear software-definable pin 0 (SDP0). */
ctrl0_reg &= ~IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
}
/******************************************************************************
@@ -1102,7 +1072,6 @@ ixgb_led_off(struct ixgb_hw *hw)
/* To turn off the LED, set software-definable pin 0 (SDP0). */
ctrl0_reg |= IXGB_CTRL0_SDP0;
IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
- return;
}
/******************************************************************************
@@ -1142,8 +1111,6 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
ixgb_bus_width_64 : ixgb_bus_width_32;
-
- return;
}
/******************************************************************************
@@ -1156,26 +1123,21 @@ static bool
mac_addr_valid(u8 *mac_addr)
{
bool is_valid = true;
- DEBUGFUNC("mac_addr_valid");
+ ENTER();
/* Make sure it is not a multicast address */
- if (IS_MULTICAST(mac_addr)) {
- DEBUGOUT("MAC address is multicast\n");
+ if (is_multicast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is multicast\n");
is_valid = false;
}
/* Not a broadcast address */
- else if (IS_BROADCAST(mac_addr)) {
- DEBUGOUT("MAC address is broadcast\n");
+ else if (is_broadcast_ether_addr(mac_addr)) {
+ pr_debug("MAC address is broadcast\n");
is_valid = false;
}
/* Reject the zero address */
- else if (mac_addr[0] == 0 &&
- mac_addr[1] == 0 &&
- mac_addr[2] == 0 &&
- mac_addr[3] == 0 &&
- mac_addr[4] == 0 &&
- mac_addr[5] == 0) {
- DEBUGOUT("MAC address is all zeros\n");
+ else if (is_zero_ether_addr(mac_addr)) {
+ pr_debug("MAC address is all zeros\n");
is_valid = false;
}
return (is_valid);
@@ -1235,8 +1197,6 @@ ixgb_optics_reset(struct ixgb_hw *hw)
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD);
}
-
- return;
}
/******************************************************************************
@@ -1297,6 +1257,4 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
-
- return;
}
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index af6ca3aab5ad..873d32b89fba 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -636,18 +636,6 @@ struct ixgb_flash_buffer {
u8 filler3[0xAAAA];
};
-/*
- * This is a little-endian specific check.
- */
-#define IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/*
- * Check whether an address is broadcast.
- */
-#define IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
-
/* Flow control parameters */
struct ixgb_fc {
u32 high_water; /* Flow Control High-water */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c9fef65cb98b..c6b75c83100c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
char ixgb_driver_name[] = "ixgb";
@@ -146,10 +148,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static int __init
ixgb_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n",
- ixgb_driver_string, ixgb_driver_version);
-
- printk(KERN_INFO "%s\n", ixgb_copyright);
+ pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+ pr_info("%s\n", ixgb_copyright);
return pci_register_driver(&ixgb_driver);
}
@@ -368,17 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
- pci_using_dac = 1;
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- printk(KERN_ERR
- "ixgb: No usable DMA configuration, aborting\n");
- goto err_dma_mask;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
+ }
}
- pci_using_dac = 0;
}
err = pci_request_regions(pdev, ixgb_driver_name);
@@ -674,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
+ txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
@@ -763,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
+ rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
@@ -884,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
vfree(adapter->tx_ring.buffer_info);
adapter->tx_ring.buffer_info = NULL;
- pci_free_consistent(pdev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
+ dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
+ adapter->tx_ring.desc, adapter->tx_ring.dma);
adapter->tx_ring.desc = NULL;
}
@@ -896,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -967,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -991,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
buffer_info->length = 0;
}
@@ -1058,7 +1065,7 @@ ixgb_set_multi(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u32 rctl;
int i;
@@ -1089,9 +1096,9 @@ ixgb_set_multi(struct net_device *netdev)
IXGB_WRITE_REG(hw, RCTL, rctl);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
+ netdev_for_each_mc_addr(ha, netdev)
memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
- mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
+ ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
@@ -1118,15 +1125,14 @@ ixgb_watchdog(unsigned long data)
if (adapter->hw.link_up) {
if (!netif_carrier_ok(netdev)) {
- printk(KERN_INFO "ixgb: %s NIC Link is Up 10 Gbps "
- "Full Duplex, Flow Control: %s\n",
- netdev->name,
- (adapter->hw.fc.type == ixgb_fc_full) ?
- "RX/TX" :
- ((adapter->hw.fc.type == ixgb_fc_rx_pause) ?
- "RX" :
- ((adapter->hw.fc.type == ixgb_fc_tx_pause) ?
- "TX" : "None")));
+ netdev_info(netdev,
+ "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
+ (adapter->hw.fc.type == ixgb_fc_full) ?
+ "RX/TX" :
+ (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
+ "RX" :
+ (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
+ "TX" : "None");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
@@ -1135,8 +1141,7 @@ ixgb_watchdog(unsigned long data)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
- netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
}
}
@@ -1303,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
WARN_ON(buffer_info->dma != 0);
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = false;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1344,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->mapped_as_page = true;
buffer_info->dma =
- pci_map_page(pdev, frag->page,
- offset, size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ dma_map_page(&pdev->dev, frag->page,
+ offset, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = 0;
@@ -1916,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
}
}
+/*
+ * this should improve performance for small packets with large amounts
+ * of reassembly being done in the stack
+ */
+static void ixgb_check_copybreak(struct net_device *netdev,
+ struct ixgb_buffer *buffer_info,
+ u32 length, struct sk_buff **skb)
+{
+ struct sk_buff *new_skb;
+
+ if (length > copybreak)
+ return;
+
+ new_skb = netdev_alloc_skb_ip_align(netdev, length);
+ if (!new_skb)
+ return;
+
+ skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
+ (*skb)->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = *skb;
+ *skb = new_skb;
+}
+
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
@@ -1952,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
prefetch(skb->data - NET_IP_ALIGN);
- if (++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count)
+ i = 0;
next_rxd = IXGB_RX_DESC(*rx_ring, i);
prefetch(next_rxd);
- if ((j = i + 1) == rx_ring->count) j = 0;
+ j = i + 1;
+ if (j == rx_ring->count)
+ j = 0;
next2_buffer = &rx_ring->buffer_info[j];
prefetch(next2_buffer);
@@ -1965,10 +1998,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
cleaned = true;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
buffer_info->length,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1992,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done;
}
- /* code added for copybreak, this should improve
- * performance for small packets with large amounts
- * of reassembly being done in the stack */
- if (length < copybreak) {
- struct sk_buff *new_skb =
- netdev_alloc_skb_ip_align(netdev, length);
- if (new_skb) {
- skb_copy_to_linear_data_offset(new_skb,
- -NET_IP_ALIGN,
- (skb->data -
- NET_IP_ALIGN),
- (length +
- NET_IP_ALIGN));
- /* save the skb in buffer_info as good */
- buffer_info->skb = skb;
- skb = new_skb;
- }
- }
- /* end copybreak code */
+ ixgb_check_copybreak(netdev, buffer_info, length, &skb);
/* Good Receive */
skb_put(skb, length);
@@ -2091,10 +2106,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
map_skb:
- buffer_info->dma = pci_map_single(pdev,
+ buffer_info->dma = dma_map_single(&pdev->dev,
skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2322,7 +2337,7 @@ static void ixgb_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (ixgb_up(adapter)) {
- printk ("ixgb: can't bring device back up after reset\n");
+ pr_err("can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 371a6be4d965..e361185920ef 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -41,20 +41,8 @@
#undef ASSERT
#define ASSERT(x) BUG_ON(!(x))
-#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
-
-#ifdef DBG
-#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
-#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
-#else
-#define DEBUGOUT(S)
-#define DEBUGOUT1(S, A...)
-#endif
-
-#define DEBUGFUNC(F) DEBUGOUT(F)
-#define DEBUGOUT2 DEBUGOUT1
-#define DEBUGOUT3 DEBUGOUT2
-#define DEBUGOUT7 DEBUGOUT3
+
+#define ENTER() pr_debug("%s\n", __func__);
#define IXGB_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg)))
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index af35e1ddadd6..88a08f056241 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ixgb.h"
/* This is the only thing that needs to be changed to adjust the
@@ -209,16 +211,16 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- printk(KERN_INFO "%s Enabled\n", opt->name);
+ pr_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- printk(KERN_INFO "%s Disabled\n", opt->name);
+ pr_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- printk(KERN_INFO "%s set to %i\n", opt->name, *value);
+ pr_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
@@ -230,7 +232,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- printk(KERN_INFO "%s\n", ent->str);
+ pr_info("%s\n", ent->str);
return 0;
}
}
@@ -240,8 +242,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
BUG();
}
- printk(KERN_INFO "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -261,9 +262,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= IXGB_MAX_NIC) {
- printk(KERN_NOTICE
- "Warning: no configuration for board #%i\n", bd);
- printk(KERN_NOTICE "Using defaults for all values\n");
+ pr_notice("Warning: no configuration for board #%i\n", bd);
+ pr_notice("Using defaults for all values\n");
}
{ /* Transmit Descriptor Count */
@@ -363,8 +363,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.high_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCHighThresh when no RxFC\n");
+ pr_info("Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
const struct ixgb_option opt = {
@@ -383,8 +382,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.low_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring RxFCLowThresh when no RxFC\n");
+ pr_info("Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
const struct ixgb_option opt = {
@@ -404,17 +402,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.pause_time = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- printk(KERN_INFO
- "Ignoring FCReqTimeout when no RxFC\n");
+ pr_info("Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
- printk(KERN_INFO
- "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
- "Using Defaults\n");
+ pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 79c35ae3718c..ffae480587ae 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
u16 default_vf_vlan_id;
u16 vlans_enabled;
bool clear_to_send;
+ bool pf_set_mac;
int rar;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
/* wrapper around a pointer to a socket buffer,
@@ -357,6 +360,7 @@ struct ixgbe_adapter {
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -404,6 +408,8 @@ struct ixgbe_adapter {
u16 eeprom_version;
int node;
+ struct work_struct check_overtemp_task;
+ u32 interrupt_event;
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 35a06b47587b..9c02d6014cc4 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -42,9 +42,9 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
@@ -1221,7 +1221,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eeprom_generic,
+ .read = &ixgbe_read_eerd_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
@@ -1236,6 +1236,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82598_info = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 12fc0e7ba2ca..a4e2901f2f08 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -133,27 +133,6 @@ setup_sfp_out:
return ret_val;
}
-/**
- * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
- * @hw: pointer to hardware structure
- *
- * Read PCIe configuration space, and get the MSI-X vector count from
- * the capabilities table.
- **/
-static u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
-{
- struct ixgbe_adapter *adapter = hw->back;
- u16 msix_count;
- pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
- &msix_count);
- msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
- /* MSI-X count is zero-based in HW, so increment to give proper value */
- msix_count++;
-
- return msix_count;
-}
-
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -165,7 +144,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw);
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
return 0;
}
@@ -642,6 +621,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
s32 i, j;
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ struct ixgbe_adapter *adapter = hw->back;
hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
@@ -726,64 +706,14 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
autoneg_wait_to_complete);
out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
+ " downgraded the link speed from the maximum"
+ " advertised\n");
return status;
}
/**
- * ixgbe_check_mac_link_82599 - Determine link and speed status
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @link_up: true when link is up
- * @link_up_wait_to_complete: bool used to wait for link up or not
- *
- * Reads the links register to determine if link is up and the current speed
- **/
-static s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up,
- bool link_up_wait_to_complete)
-{
- u32 links_reg;
- u32 i;
-
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
- if (links_reg & IXGBE_LINKS_UP) {
- *link_up = true;
- break;
- } else {
- *link_up = false;
- }
- msleep(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
- }
- } else {
- if (links_reg & IXGBE_LINKS_UP)
- *link_up = true;
- else
- *link_up = false;
- }
-
- if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_10G_82599)
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- else
- *speed = IXGBE_LINK_SPEED_100_FULL;
-
- /* if link is down, zero out the current_mode */
- if (*link_up == false) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = false;
- }
-
- return 0;
-}
-
-/**
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1045,243 +975,6 @@ reset_hw_out:
}
/**
- * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to disassociate
- * @vmdq: VMDq pool index to remove from the rar
- **/
-static s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar_lo, mpsar_hi;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- if (rar < rar_entries) {
- mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-
- if (!mpsar_lo && !mpsar_hi)
- goto done;
-
- if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
- if (mpsar_lo) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- mpsar_lo = 0;
- }
- if (mpsar_hi) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
- mpsar_hi = 0;
- }
- } else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
- } else {
- mpsar_hi &= ~(1 << (vmdq - 32));
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
- }
-
- /* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- hw->mac.ops.clear_rar(hw, rar);
- } else {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- }
-
-done:
- return 0;
-}
-
-/**
- * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @rar: receive address register index to associate with a VMDq index
- * @vmdq: VMDq pool index
- **/
-static s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
-{
- u32 mpsar;
- u32 rar_entries = hw->mac.num_rar_entries;
-
- if (rar < rar_entries) {
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
- } else {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
- }
- return 0;
-}
-
-/**
- * ixgbe_set_vfta_82599 - Set VLAN filter table
- * @hw: pointer to hardware structure
- * @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- *
- * Turn on/off specified VLAN in the VLAN filter table.
- **/
-static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
-{
- u32 regindex;
- u32 vlvf_index;
- u32 bitindex;
- u32 bits;
- u32 first_empty_slot;
- u32 vt_ctl;
-
- if (vlan > 4095)
- return IXGBE_ERR_PARAM;
-
- /*
- * this is a 2 part operation - first the VFTA, then the
- * VLVF and VLVFB if vind is set
- */
-
- /* Part 1
- * The VFTA is a bitstring made up of 128 32-bit registers
- * that enable the particular VLAN id, much like the MTA:
- * bits[11-5]: which register
- * bits[4-0]: which bit in the register
- */
- regindex = (vlan >> 5) & 0x7F;
- bitindex = vlan & 0x1F;
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- if (vlan_on)
- bits |= (1 << bitindex);
- else
- bits &= ~(1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
-
-
- /* Part 2
- * If VT mode is set
- * Either vlan_on
- * make sure the vlan is in VLVF
- * set the vind bit in the matching VLVFB
- * Or !vlan_on
- * clear the pool bit and possibly the vind
- */
- vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
- goto out;
-
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
- if (!bits && !first_empty_slot)
- first_empty_slot = vlvf_index;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
-
- if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- vlvf_index = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
- }
-
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits |= (1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
- }
- } else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits &= ~(1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- }
- }
-
- if (bits) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
- (IXGBE_VLVF_VIEN | vlan));
- /* if bits is non-zero then some pools/VFs are still
- * using this VLAN ID. Force the VFTA entry to on */
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- bits |= (1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
- }
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
-
-out:
- return 0;
-}
-
-/**
- * ixgbe_clear_vfta_82599 - Clear VLAN filter table
- * @hw: pointer to hardware structure
- *
- * Clears the VLAN filer table, and the VMDq index associated with the filter
- **/
-static s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
-{
- u32 offset;
-
- for (offset = 0; offset < hw->mac.vft_size; offset++)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
-
- for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
- }
-
- return 0;
-}
-
-/**
- * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
- * @hw: pointer to hardware structure
- **/
-static s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
-{
- int i;
- hw_dbg(hw, " Clearing UTA\n");
-
- for (i = 0; i < 128; i++)
- IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
-
- return 0;
-}
-
-/**
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
@@ -1303,7 +996,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
hw_dbg(hw ,"Flow Director previous command isn't complete, "
- "aborting table re-initialization. \n");
+ "aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -2462,10 +2155,14 @@ sfp_check:
goto out;
switch (hw->phy.type) {
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
@@ -2545,75 +2242,6 @@ static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
}
/**
- * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
- * @hw: pointer to hardware structure
- * @san_mac_offset: SAN MAC address offset
- *
- * This function will read the EEPROM location for the SAN MAC address
- * pointer, and returns the value at that location. This is used in both
- * get and set mac_addr routines.
- **/
-static s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
-{
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available.
- */
- hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
-
- return 0;
-}
-
-/**
- * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
- * @hw: pointer to hardware structure
- * @san_mac_addr: SAN MAC address
- *
- * Reads the SAN MAC address from the EEPROM, if it's available. This is
- * per-port, so set_lan_id() must be called before reading the addresses.
- * set_lan_id() is called by identify_sfp(), but this cannot be relied
- * upon for non-SFP connections, so we must call it here.
- **/
-static s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
-{
- u16 san_mac_data, san_mac_offset;
- u8 i;
-
- /*
- * First read the EEPROM pointer to see if the MAC addresses are
- * available. If they're not, no point in calling set_lan_id() here.
- */
- ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
-
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- /*
- * No addresses available in this EEPROM. It's not an
- * error though, so just wipe the local address and return.
- */
- for (i = 0; i < 6; i++)
- san_mac_addr[i] = 0xFF;
-
- goto san_mac_addr_out;
- }
-
- /* make sure we know which port we need to program */
- hw->mac.ops.set_lan_id(hw);
- /* apply the port offset to the address offset */
- (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
- for (i = 0; i < 3; i++) {
- hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
- san_mac_addr[i * 2] = (u8)(san_mac_data);
- san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
- san_mac_offset++;
- }
-
-san_mac_addr_out:
- return 0;
-}
-
-/**
* ixgbe_verify_fw_version_82599 - verify fw version for 82599
* @hw: pointer to hardware structure
*
@@ -2715,7 +2343,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
- .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_82599,
.get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
.stop_adapter = &ixgbe_stop_adapter_generic,
@@ -2724,7 +2352,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
- .check_link = &ixgbe_check_mac_link_82599,
+ .check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_link_capabilities_82599,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
@@ -2732,23 +2360,23 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.blink_led_stop = &ixgbe_blink_led_stop_generic,
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
- .set_vmdq = &ixgbe_set_vmdq_82599,
- .clear_vmdq = &ixgbe_clear_vmdq_82599,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
.enable_mc = &ixgbe_enable_mc_generic,
.disable_mc = &ixgbe_disable_mc_generic,
- .clear_vfta = &ixgbe_clear_vfta_82599,
- .set_vfta = &ixgbe_set_vfta_82599,
- .fc_enable = &ixgbe_fc_enable_generic,
- .init_uta_tables = &ixgbe_init_uta_tables_82599,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eeprom_generic,
+ .read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
@@ -2757,7 +2385,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
static struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
- .init = &ixgbe_init_phy_ops_82599,
+ .init = &ixgbe_init_phy_ops_82599,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
@@ -2767,6 +2395,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82599_info = {
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020903c1..1159d9138f05 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -34,7 +34,6 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
@@ -595,14 +594,14 @@ out:
}
/**
- * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
+ * ixgbe_read_eerd_generic - Read EEPROM word using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
u32 eerd;
s32 status;
@@ -614,15 +613,15 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
goto out;
}
- eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
- IXGBE_EEPROM_READ_REG_START;
+ eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
- status = ixgbe_poll_eeprom_eerd_done(hw);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
if (status == 0)
*data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
- IXGBE_EEPROM_READ_REG_DATA);
+ IXGBE_EEPROM_RW_REG_DATA);
else
hw_dbg(hw, "Eeprom read timed out\n");
@@ -631,20 +630,26 @@ out:
}
/**
- * ixgbe_poll_eeprom_eerd_done - Poll EERD status
+ * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
+ * @ee_reg: EEPROM flag for polling
*
- * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ * read or write is done respectively.
**/
-static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
s32 status = IXGBE_ERR_EEPROM;
- for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EERD);
- if (reg & IXGBE_EEPROM_READ_REG_DONE) {
+ for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+ if (ee_reg == IXGBE_NVM_POLL_READ)
+ reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+ else
+ reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+ if (reg & IXGBE_EEPROM_RW_REG_DONE) {
status = 0;
break;
}
@@ -1392,14 +1397,17 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ hw->addr_ctrl.uc_set_promisc = true;
}
} else {
/* only disable if set by overflow, not by user */
- if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+ if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
+ !(hw->addr_ctrl.user_set_promisc)) {
hw_dbg(hw, " Leaving address overflow promisc mode\n");
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl &= ~IXGBE_FCTRL_UPE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ hw->addr_ctrl.uc_set_promisc = false;
}
}
@@ -1484,26 +1492,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
/**
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
- * @mc_addr_list: the list of new multicast addresses
- * @mc_addr_count: number of addresses
- * @next: iterator function to walk the multicast address list
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the MC addrs from receive
* address registers and the multicast table. Uses unused receive address
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, ixgbe_mc_addr_itr next)
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
u32 i;
- u32 vmdq;
/*
* Set the new number of MC addresses that we are being requested to
* use.
*/
- hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+ hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
hw->addr_ctrl.mta_in_use = 0;
/* Clear the MTA */
@@ -1512,9 +1518,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
/* Add the new addresses */
- for (i = 0; i < mc_addr_count; i++) {
+ netdev_for_each_mc_addr(ha, netdev) {
hw_dbg(hw, " Adding the multicast addresses:\n");
- ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+ ixgbe_set_mta(hw, ha->addr);
}
/* Enable mta */
@@ -2254,3 +2260,490 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
return 0;
}
+
+/**
+ * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_offset: SAN MAC address offset
+ *
+ * This function will read the EEPROM location for the SAN MAC address
+ * pointer, and returns the value at that location. This is used in both
+ * get and set mac_addr routines.
+ **/
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
+{
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available.
+ */
+ hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ * @hw: pointer to hardware structure
+ * @san_mac_addr: SAN MAC address
+ *
+ * Reads the SAN MAC address from the EEPROM, if it's available. This is
+ * per-port, so set_lan_id() must be called before reading the addresses.
+ * set_lan_id() is called by identify_sfp(), but this cannot be relied
+ * upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+ u16 san_mac_data, san_mac_offset;
+ u8 i;
+
+ /*
+ * First read the EEPROM pointer to see if the MAC addresses are
+ * available. If they're not, no point in calling set_lan_id() here.
+ */
+ ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+ if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+ /*
+ * No addresses available in this EEPROM. It's not an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+
+ goto san_mac_addr_out;
+ }
+
+ /* make sure we know which port we need to program */
+ hw->mac.ops.set_lan_id(hw);
+ /* apply the port offset to the address offset */
+ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ for (i = 0; i < 3; i++) {
+ hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ san_mac_addr[i * 2] = (u8)(san_mac_data);
+ san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+ san_mac_offset++;
+ }
+
+san_mac_addr_out:
+ return 0;
+}
+
+/**
+ * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ * @hw: pointer to hardware structure
+ *
+ * Read PCIe configuration space, and get the MSI-X vector count from
+ * the capabilities table.
+ **/
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 msix_count;
+ pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
+ &msix_count);
+ msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+ /* MSI-X count is zero-based in HW, so increment to give proper value */
+ msix_count++;
+
+ return msix_count;
+}
+
+/**
+ * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to disassociate
+ * @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar_lo, mpsar_hi;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ if (rar < rar_entries) {
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
+
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+ } else {
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+ }
+
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
+ } else {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ }
+
+done:
+ return 0;
+}
+
+/**
+ * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @rar: receive address register index to associate with a VMDq index
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+ u32 mpsar;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ if (rar < rar_entries) {
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+ }
+ } else {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ }
+ return 0;
+}
+
+/**
+ * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ * @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+ int i;
+
+
+ for (i = 0; i < 128; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+ return 0;
+}
+
+/**
+ * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 bits = 0;
+ u32 first_empty_slot = 0;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /*
+ * Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way
+ */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if (!bits && !(first_empty_slot))
+ first_empty_slot = regindex;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ /*
+ * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+ * in the VLVF. Else use the first empty VLVF register for this
+ * vlan id.
+ */
+ if (regindex >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ regindex = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ regindex = IXGBE_ERR_NO_SPACE;
+ }
+ }
+
+ return regindex;
+}
+
+/**
+ * ixgbe_set_vfta_generic - Set VLAN filter table
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ s32 regindex;
+ u32 bitindex;
+ u32 vfta;
+ u32 bits;
+ u32 vt;
+ u32 targetbit;
+ bool vfta_changed = false;
+
+ if (vlan > 4095)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regindex = (vlan >> 5) & 0x7F;
+ bitindex = vlan & 0x1F;
+ targetbit = (1 << bitindex);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+ if (vlan_on) {
+ if (!(vfta & targetbit)) {
+ vfta |= targetbit;
+ vfta_changed = true;
+ }
+ } else {
+ if ((vfta & targetbit)) {
+ vfta &= ~targetbit;
+ vfta_changed = true;
+ }
+ }
+
+ /* Part 2
+ * If VT Mode is set
+ * Either vlan_on
+ * make sure the vlan is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+ s32 vlvf_index;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits |= (1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ bits &= ~(1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits &= ~(1 << (vind-32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index*2)+1),
+ bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index*2));
+ }
+ }
+
+ /*
+ * If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ if (!vlan_on) {
+ /* someone wants to clear the vfta entry
+ * but some pools/VFs are still using it.
+ * Ignore it. */
+ vfta_changed = false;
+ }
+ }
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+ return 0;
+}
+
+/**
+ * ixgbe_clear_vfta_generic - Clear VLAN filter table
+ * @hw: pointer to hardware structure
+ *
+ * Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+ u32 offset;
+
+ for (offset = 0; offset < hw->mac.vft_size; offset++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+ for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_mac_link_generic - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ u32 links_reg;
+ u32 i;
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (link_up_wait_to_complete) {
+ for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ if (links_reg & IXGBE_LINKS_UP) {
+ *link_up = true;
+ break;
+ } else {
+ *link_up = false;
+ }
+ msleep(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ }
+ } else {
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+ }
+
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_1G_82599)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+
+ /* if link is down, zero out the current_mode */
+ if (*link_up == false) {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw->fc.fc_was_autonegged = false;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4809c9..3080afb12bdf 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -30,6 +30,7 @@
#include "ixgbe_type.h"
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -45,20 +46,20 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+ struct net_device *netdev);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
@@ -71,9 +72,16 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
-
-s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+ u32 vind, bool vlan_on);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index dd4883f642be..71da325dfa80 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -488,7 +488,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
if (adapter->temp_dcb_cfg.pfc_mode_enable !=
adapter->dcb_cfg.pfc_mode_enable)
adapter->dcb_set_bitmap |= BIT_PFC;
- return;
}
/**
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 8f461d5cee77..c50a7541ffec 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -212,8 +212,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
@@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.disable_fc_autoneg = false;
- if (pause->rx_pause && pause->tx_pause)
+ if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
fc.requested_mode = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_tx_buffer *buf =
&(tx_ring->tx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma, buf->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, buf->dma,
+ buf->length, DMA_TO_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
struct ixgbe_rx_buffer *buf =
&(rx_ring->rx_buffer_info[i]);
if (buf->dma)
- pci_unmap_single(pdev, buf->dma,
+ dma_unmap_single(&pdev->dev, buf->dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (buf->skb)
dev_kfree_skb(buf->skb);
}
}
if (tx_ring->desc) {
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
if (rx_ring->desc) {
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1493,8 +1493,6 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info = NULL;
kfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
-
- return;
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1520,8 +1518,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma))) {
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!(tx_ring->desc)) {
ret_val = 2;
goto err_nomem;
}
@@ -1563,8 +1562,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[i].length = skb->len;
tx_ring->tx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
desc->read.buffer_addr =
cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1592,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma))) {
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!(rx_ring->desc)) {
ret_val = 5;
goto err_nomem;
}
@@ -1661,8 +1661,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->rx_buffer_info[i].skb = skb;
rx_ring->rx_buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ dma_map_single(&pdev->dev, skb->data,
+ IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1775,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
ixgbe_create_lbtest_frame(
tx_ring->tx_buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->tx_buffer_info[k].dma,
tx_ring->tx_buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (unlikely(++k == tx_ring->count))
k = 0;
}
@@ -1789,10 +1789,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
good_cnt = 0;
do {
/* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->rx_buffer_info[l].dma,
IXGBE_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = ixgbe_check_lbtest_frame(
rx_ring->rx_buffer_info[l].skb, 1024);
if (!ret_val)
@@ -1971,8 +1971,6 @@ static void ixgbe_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & IXGBE_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
}
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2079,12 +2077,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
return 0;
}
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ /* check the old value and enable RSC if necessary */
+ if ((adapter->rx_itr_setting == 0) &&
+ (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->netdev->features |= NETIF_F_LRO;
+ DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ return true;
+ }
+ return false;
+}
+
static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
+ bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2095,11 +2113,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
+ u32 max_int;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ max_int = IXGBE_MAX_RSC_INT_RATE;
+ else
+ max_int = IXGBE_MAX_INT_RATE;
+
/* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+ if ((1000000/ec->rx_coalesce_usecs > max_int) ||
(1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
return -EINVAL;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* store the value in ints/second */
adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
@@ -2108,6 +2135,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* clear the lower bit as its used for dynamic state */
adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_reenable_rsc(adapter, ec);
+
/* 1 means dynamic mode */
adapter->rx_eitr_param = 20000;
adapter->rx_itr_setting = 1;
@@ -2116,14 +2146,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
- else
- adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
adapter->rx_itr_setting = 0;
+
+ /*
+ * if hardware RSC is enabled, disable it when
+ * setting low latency mode, to avoid errata, assuming
+ * that when the user set low latency mode they want
+ * it at the cost of anything else
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ netdev->features &= ~NETIF_F_LRO;
+ DPRINTK(PROBE, INFO,
+ "rx-usecs set to 0, disabling RSC\n");
+
+ need_reset = true;
+ }
}
if (ec->tx_coalesce_usecs > 1) {
+ /*
+ * don't have to worry about max_int as above because
+ * tx vectors don't do hardware RSC (an rx function)
+ */
/* check the limits */
if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
(1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2167,6 +2213,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
ixgbe_write_eitr(q_vector);
}
+ /*
+ * do reset here at the end to make sure EITR==0 case is handled
+ * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
+ * also locks in RSC enable/disable which requires reset
+ */
+ if (need_reset) {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+
return 0;
}
@@ -2178,10 +2236,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
ethtool_op_set_flags(netdev, data);
/* if state changes we need to update adapter->flags and reset */
- if ((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
- adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
- need_reset = true;
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
+ /*
+ * cast both to bool and verify if they are set the same
+ * but only enable RSC if itr is non-zero, as
+ * itr=0 and RSC are mutually exclusive
+ */
+ if (((!!(data & ETH_FLAG_LRO)) !=
+ (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
+ adapter->rx_itr_setting) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ } else if (!adapter->rx_itr_setting) {
+ netdev->features &= ~ETH_FLAG_LRO;
+ }
}
/*
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6493049b663d..45182ab41d6b 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -32,6 +32,7 @@
#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
#include <linux/gfp.h>
+#include <linux/if_vlan.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
@@ -312,10 +313,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (fcerr == IXGBE_FCERR_BADCRC)
skb->ip_summed = CHECKSUM_NONE;
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, skb_network_offset(skb) +
- sizeof(struct fcoe_hdr));
- fh = (struct fc_frame_header *)skb_transport_header(skb);
+ if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
+ else
+ fh = (struct fc_frame_header *)(skb->data +
+ sizeof(struct fcoe_hdr));
fctl = ntoh24(fh->fh_f_ctl);
if (fctl & FC_FC_EX_CTX)
xid = be16_to_cpu(fh->fh_ox_id);
@@ -536,12 +539,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- fcoe_i = f->mask;
- fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
- IXGBE_ETQS_QUEUE_EN |
- (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6c00ee493a3b..d571d101de08 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -108,6 +108,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
board_82599 },
@@ -175,6 +177,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
+struct ixgbe_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
+
+ /* General Registers */
+ {IXGBE_CTRL, "CTRL"},
+ {IXGBE_STATUS, "STATUS"},
+ {IXGBE_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {IXGBE_EICR, "EICR"},
+
+ /* RX Registers */
+ {IXGBE_SRRCTL(0), "SRRCTL"},
+ {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
+ {IXGBE_RDLEN(0), "RDLEN"},
+ {IXGBE_RDH(0), "RDH"},
+ {IXGBE_RDT(0), "RDT"},
+ {IXGBE_RXDCTL(0), "RXDCTL"},
+ {IXGBE_RDBAL(0), "RDBAL"},
+ {IXGBE_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IXGBE_TDBAL(0), "TDBAL"},
+ {IXGBE_TDBAH(0), "TDBAH"},
+ {IXGBE_TDLEN(0), "TDLEN"},
+ {IXGBE_TDH(0), "TDH"},
+ {IXGBE_TDT(0), "TDT"},
+ {IXGBE_TXDCTL(0), "TXDCTL"},
+
+ /* List Terminator */
+ {}
+};
+
+
+/*
+ * ixgbe_regdump - register printout routine
+ */
+static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
+{
+ int i = 0, j = 0;
+ char rname[16];
+ u32 regs[64];
+
+ switch (reginfo->ofs) {
+ case IXGBE_SRRCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+ break;
+ case IXGBE_DCA_RXCTRL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ break;
+ case IXGBE_RDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
+ break;
+ case IXGBE_RDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
+ break;
+ case IXGBE_RDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
+ break;
+ case IXGBE_RXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ break;
+ case IXGBE_RDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
+ break;
+ case IXGBE_RDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
+ break;
+ case IXGBE_TDBAL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
+ break;
+ case IXGBE_TDBAH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
+ break;
+ case IXGBE_TDLEN(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
+ break;
+ case IXGBE_TDH(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
+ break;
+ case IXGBE_TDT(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
+ break;
+ case IXGBE_TXDCTL(0):
+ for (i = 0; i < 64; i++)
+ regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n", reginfo->name,
+ IXGBE_READ_REG(hw, reginfo->ofs));
+ return;
+ }
+
+ for (i = 0; i < 8; i++) {
+ snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
+ printk(KERN_ERR "%-15s ", rname);
+ for (j = 0; j < 8; j++)
+ printk(KERN_CONT "%08x ", regs[i*8+j]);
+ printk(KERN_CONT "\n");
+ }
+
+}
+
+/*
+ * ixgbe_dump - Print registers, tx-rings and rx-rings
+ */
+static void ixgbe_dump(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_reg_info *reginfo;
+ int n = 0;
+ struct ixgbe_ring *tx_ring;
+ struct ixgbe_tx_buffer *tx_buffer_info;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct ixgbe_ring *rx_ring;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *rx_buffer_info;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ ixgbe_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
+ "leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ tx_buffer_info =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "T [desc] [address 63:0 ] "
+ "[PlPOIdStDDt Ln] [bi->dma ] "
+ "leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
+ " %04X %3X %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)tx_buffer_info->dma,
+ tx_buffer_info->length,
+ tx_buffer_info->next_to_watch,
+ (u64)tx_buffer_info->time_stamp,
+ tx_buffer_info->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(tx_buffer_info->dma),
+ tx_buffer_info->length, true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "%5d %5X %5X\n", n,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 16 15 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ printk(KERN_INFO "------------------------------------\n");
+ printk(KERN_INFO "R [desc] [ PktBuf A0] "
+ "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ "<-- Adv Rx Read format\n");
+ printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
+ "[vl er S cks ln] ---------------- [bi->skb] "
+ "<-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (staterr & IXGBE_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX ---------------- %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ rx_buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %p", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)rx_buffer_info->dma,
+ rx_buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter)) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(rx_buffer_info->dma),
+ rx_ring->rx_buf_len, true);
+
+ if (rx_ring->rx_buf_len
+ < IXGBE_RXBUFFER_2048)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(
+ rx_buffer_info->page_dma +
+ rx_buffer_info->page_offset
+ ),
+ PAGE_SIZE/2, true);
+ }
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ }
+ }
+
+exit:
+ return;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -266,15 +607,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -286,16 +627,16 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_tx_is_paused - check if the tx ring is paused
+ * ixgbe_tx_xon_state - check the tx ring xon state
* @adapter: the ixgbe adapter
* @tx_ring: the corresponding tx_ring
*
* If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
* corresponding TC of this tx_ring when checking TFCS.
*
- * Returns : true if paused
+ * Returns : true if in xon state (currently not paused)
*/
-static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
u32 txoff = IXGBE_TFCS_TXOFF;
@@ -351,7 +692,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
adapter->detect_tx_hung = false;
if (tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
- !ixgbe_tx_is_paused(adapter, tx_ring)) {
+ ixgbe_tx_xon_state(adapter, tx_ring)) {
/* detected Tx unit hang */
union ixgbe_adv_tx_desc *tx_desc;
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -721,10 +1062,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (!bi->skb) {
@@ -743,9 +1084,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- skb->data));
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -821,6 +1162,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
struct ixgbe_rsc_cb {
dma_addr_t dma;
+ bool delay_unmap;
};
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
@@ -861,9 +1203,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (len > IXGBE_RX_HDR_SIZE)
- len = IXGBE_RX_HDR_SIZE;
upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ if ((len > IXGBE_RX_HDR_SIZE) ||
+ (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
+ len = IXGBE_RX_HDR_SIZE;
} else {
len = le16_to_cpu(rx_desc->wb.upper.length);
}
@@ -876,7 +1219,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (rx_buffer_info->dma) {
if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(!(staterr & IXGBE_RXD_STAT_EOP)) &&
- (!(skb->prev)))
+ (!(skb->prev))) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
@@ -884,18 +1227,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
* access the header after the writeback.
* Only unmap it when EOP is reached
*/
+ IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
- else
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ } else {
+ dma_unmap_single(&pdev->dev,
+ rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
+ }
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -936,11 +1282,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- if (IXGBE_RSC_CB(skb)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
}
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
@@ -1190,6 +1538,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
itr_reg |= (itr_reg << 16);
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
/*
+ * 82599 can support a value of zero, so allow it for
+ * max interrupt rate, but there is an errata where it can
+ * not be zero with RSC
+ */
+ if (itr_reg == 8 &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ itr_reg = 0;
+
+ /*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
*/
@@ -1261,8 +1618,48 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
ixgbe_write_eitr(q_vector);
}
+}
- return;
+/**
+ * ixgbe_check_overtemp_task - worker thread to check over tempurature
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_check_overtemp_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ check_overtemp_task);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr = adapter->interrupt_event;
+
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM: {
+ u32 autoneg;
+ bool link_up = false;
+
+ if (hw->mac.ops.check_link)
+ hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+
+ if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
+ (eicr & IXGBE_EICR_LSC))
+ /* Check if this is due to overtemp */
+ if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
+ break;
+ }
+ return;
+ default:
+ if (!(eicr & IXGBE_EICR_GPI_SDP0))
+ return;
+ break;
+ }
+ DPRINTK(DRV, ERR, "Network adapter has been stopped because it "
+ "has over heated. Restart the computer. If the problem "
+ "persists, power off the system and replace the "
+ "adapter\n");
+ /* write to clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+ }
}
static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1336,6 +1733,10 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (hw->mac.type == ixgbe_mac_82599EB) {
ixgbe_check_sfp_event(adapter, eicr);
+ adapter->interrupt_event = eicr;
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
+ schedule_work(&adapter->check_overtemp_task);
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
@@ -1826,8 +2227,6 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
ixgbe_write_eitr(q_vector);
}
-
- return;
}
/**
@@ -1839,6 +2238,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
u32 mask;
mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ mask |= IXGBE_EIMS_GPI_SDP0;
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -1899,6 +2300,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
ixgbe_check_fan_failure(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
+ schedule_work(&adapter->check_overtemp_task);
if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0]->total_packets = 0;
@@ -2372,7 +2776,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- ixgbe_set_vmolr(hw, adapter->num_vfs);
+ ixgbe_set_vmolr(hw, adapter->num_vfs, true);
}
/* Program MRQC for the distribution of queues */
@@ -2482,12 +2886,82 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
+/**
+ * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+#ifdef CONFIG_IXGBE_DCB
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ vlnctrl &= ~IXGBE_VLNCTRL_VME;
+#endif
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+#ifdef CONFIG_IXGBE_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ break;
+#endif
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
+ * @adapter: driver data
+ */
+static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ int i, j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i]->reg_idx;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
+ vlnctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static void ixgbe_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 ctrl;
- int i, j;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2498,25 +2972,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
* still receive traffic from a DCB-enabled host even if we're
* not in DCB mode.
*/
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
-
- /* Disable CFI check */
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-
- /* enable VLAN tag stripping */
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- ctrl |= IXGBE_VLNCTRL_VME;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 ctrl;
- j = adapter->rx_ring[i]->reg_idx;
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
- }
- }
-
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ ixgbe_vlan_filter_enable(adapter);
ixgbe_vlan_rx_add_vid(netdev, 0);
@@ -2538,21 +2994,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
}
-static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -2566,42 +3007,36 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 fctrl, vlnctrl;
- u8 *addr_list = NULL;
- int addr_count = 0;
+ u32 fctrl;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
if (netdev->flags & IFF_PROMISC) {
- hw->addr_ctrl.user_set_promisc = 1;
+ hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+ /* don't hardware filter vlans in promisc mode */
+ ixgbe_vlan_filter_disable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
fctrl &= ~IXGBE_FCTRL_UPE;
- } else {
+ } else if (!hw->addr_ctrl.uc_set_promisc) {
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
}
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- hw->addr_ctrl.user_set_promisc = 0;
+ ixgbe_vlan_filter_enable(adapter);
+ hw->addr_ctrl.user_set_promisc = false;
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbe_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+
if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
}
@@ -2661,7 +3096,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl, vlnctrl;
+ u32 txdctl;
int i, j;
ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +3114,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
/* Enable VLAN tag insert/strip */
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- if (hw->mac.type == ixgbe_mac_82598EB) {
- vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
- vlnctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
- }
- }
+ ixgbe_vlan_filter_enable(adapter);
+
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
}
@@ -2750,8 +3171,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_sfp_ftl_active:
return true;
default:
return false;
@@ -2895,6 +3318,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
}
+ /* Enable Thermal over heat sensor interrupt */
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie |= IXGBE_SDP0_GPIEN;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ }
+
/* Enable fan failure interrupt if media type is copper */
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
@@ -2927,8 +3357,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
+ if (adapter->rx_itr_setting == 0) {
+ /* cannot set wthresh when itr==0 */
+ txdctl &= ~0x007F0000;
+ } else {
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ }
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
}
@@ -3131,9 +3566,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -3141,11 +3576,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info->skb = NULL;
do {
struct sk_buff *this = skb;
- if (IXGBE_RSC_CB(this)->dma) {
- pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ if (IXGBE_RSC_CB(this)->delay_unmap) {
+ dma_unmap_single(&pdev->dev,
+ IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
IXGBE_RSC_CB(this)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
}
skb = skb->prev;
dev_kfree_skb(this);
@@ -3154,8 +3591,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
put_page(rx_buffer_info->page);
@@ -3268,26 +3705,30 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- netif_tx_disable(netdev);
-
IXGBE_WRITE_FLUSH(hw);
msleep(10);
netif_tx_stop_all_queues(netdev);
- ixgbe_irq_disable(adapter);
-
- ixgbe_napi_disable_all(adapter);
-
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ ixgbe_irq_disable(adapter);
+
+ ixgbe_napi_disable_all(adapter);
+
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
cancel_work_sync(&adapter->fdir_reinit_task);
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ cancel_work_sync(&adapter->check_overtemp_task);
+
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
j = adapter->tx_ring[i]->reg_idx;
@@ -3301,8 +3742,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
- netif_carrier_off(netdev);
-
/* clear n-tuple filters that are cached */
ethtool_ntuple_flush(netdev);
@@ -3379,6 +3818,8 @@ static void ixgbe_reset_task(struct work_struct *work)
adapter->tx_timeout_count++;
+ ixgbe_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
ixgbe_reinit_locked(adapter);
}
@@ -3479,12 +3920,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
ixgbe_set_dcb_queues(adapter);
}
#endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
+ DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter);
@@ -4095,7 +4536,6 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
pci_disable_msi(adapter->pdev);
}
- return;
}
/**
@@ -4268,6 +4708,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
if (dev->features & NETIF_F_NTUPLE) {
/* Flow Director perfect filter enabled */
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
@@ -4381,8 +4823,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4452,7 +4894,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
DPRINTK(PROBE, ERR,
@@ -4513,7 +4956,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -4550,7 +4994,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5100,7 +5545,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
&(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
- "ignored adding FDIR ATR filters \n");
+ "ignored adding FDIR ATR filters\n");
}
/* Done FDIR Re-initialization, enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -5420,10 +5865,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(pdev,
+ tx_buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5456,12 +5901,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5697,7 +6142,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
- } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
+ skb->priority != TC_PRIO_CONTROL) {
tx_flags |= ((skb->queue_mapping & 0x7) << 13);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
@@ -5942,6 +6388,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
+ .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -6039,13 +6489,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -6175,7 +6626,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
/* reset_hw fills in the perm_addr as well */
+ hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
+ hw->phy.reset_if_overtemp = false;
if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
hw->mac.type == ixgbe_mac_82598EB) {
/*
@@ -6344,6 +6797,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd386956..09e1911ff510 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -135,6 +135,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
**/
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ return 0;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -475,7 +480,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
msleep(edata);
break;
case IXGBE_DATA_NL:
- hw_dbg(hw, "DATA: \n");
+ hw_dbg(hw, "DATA:\n");
data_offset++;
hw->eeprom.ops.read(hw, data_offset++,
&phy_offset);
@@ -491,7 +496,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
break;
case IXGBE_CONTROL_NL:
data_offset++;
- hw_dbg(hw, "CONTROL: \n");
+ hw_dbg(hw, "CONTROL:\n");
if (edata == IXGBE_CONTROL_EOL_NL) {
hw_dbg(hw, "EOL\n");
end_data = true;
@@ -531,6 +536,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
+ u8 cable_spec = 0;
u16 enforce_sfp = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
@@ -580,14 +586,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core1;
- else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
@@ -637,10 +659,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_tyco;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
- hw->phy.type = ixgbe_phy_sfp_ftl;
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
break;
case IXGBE_SFF_VENDOR_OUI_AVAGO:
hw->phy.type = ixgbe_phy_sfp_avago;
@@ -650,7 +676,11 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
break;
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_unknown;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
@@ -658,7 +688,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
/* All passive DA cables are supported */
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
status = 0;
goto out;
}
@@ -1319,3 +1350,28 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
return status;
}
+/**
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u16 phy_data = 0;
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ MDIO_MMD_PMAPMD, &phy_data);
+
+ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ status = IXGBE_ERR_OVERTEMP;
+out:
+ return status;
+}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9cf5f3b4cc5d..ef4ba834c593 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -40,9 +40,12 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
@@ -77,6 +80,8 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
@@ -103,6 +108,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f30199..f6cee94ec8e8 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf)
{
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ struct ixgbe_hw *hw = &adapter->hw;
int i;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vfinfo->vf_mc_hashes[i] = hash_list[i];;
}
- /* Flush and reset the mta with the new values */
- ixgbe_set_rx_mode(adapter->netdev);
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
return 0;
}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
{
- u32 ctrl;
-
- /* Check if global VLAN already set, if not set it */
- ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
- if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
- /* enable VLAN tag insert/strip */
- ctrl |= IXGBE_VLNCTRL_VFE;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
- }
-
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_AUPE |
- IXGBE_VMOLR_ROMPE |
+ vmolr |= (IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE |
IXGBE_VMOLR_BAM);
+ if (aupe)
+ vmolr |= IXGBE_VMOLR_AUPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_AUPE;
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (vid)
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
+ (vid | IXGBE_VMVIR_VLANA_DEFAULT));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
+
inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
/* reset offloads to defaults */
- ixgbe_set_vmolr(hw, vf);
-
+ if (adapter->vfinfo[vf].pf_vlan) {
+ ixgbe_set_vf_vlan(adapter, true,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter,
+ (adapter->vfinfo[vf].pf_vlan |
+ (adapter->vfinfo[vf].pf_qos <<
+ VLAN_PRIO_SHIFT)), vf);
+ ixgbe_set_vmolr(hw, vf, false);
+ } else {
+ ixgbe_set_vmvir(adapter, 0, vf);
+ ixgbe_set_vmolr(hw, vf, true);
+ }
/* reset multicast table array for vf */
adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_SET_MAC_ADDR:
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac))
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac)
ixgbe_set_vf_mac(adapter, vf, new_mac);
else
- retval = -1;
+ ixgbe_set_vf_mac(adapter,
+ vf, adapter->vfinfo[vf].vf_mac_addresses);
}
break;
case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+ return -EINVAL;
+ adapter->vfinfo[vf].pf_set_mac = true;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return ixgbe_set_vf_mac(adapter, vf, mac);
+}
+
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
+ if (err)
+ goto out;
+ ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, false);
+ adapter->vfinfo[vf].pf_vlan = vlan;
+ adapter->vfinfo[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IXGBE_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan, vf);
+ ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_set_vmolr(&adapter->hw, vf, true);
+ adapter->vfinfo[vf].pf_vlan = 0;
+ adapter->vfinfo[vf].pf_qos = 0;
+ }
+out:
+ return err;
+}
+
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+ ivi->qos = adapter->vfinfo[vf].pf_qos;
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106c45a1..184730ecdfb6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf);
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
-void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
+ u8 qos);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi);
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 534affcc38ca..2eb6e151016c 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -51,6 +51,7 @@
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
#define IXGBE_DEV_ID_82599_KR 0x1517
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
@@ -73,6 +74,7 @@
/* NVM Registers */
#define IXGBE_EEC 0x10010
#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
#define IXGBE_FLA 0x1001C
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -219,6 +221,7 @@
#define IXGBE_MTQC 0x08120
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -698,6 +701,7 @@
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
#define IXGBE_DCA_CTRL 0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
/* PCIe registers 82599-specific */
#define IXGBE_GCR_EXT 0x11050
@@ -1311,6 +1315,10 @@
#define IXGBE_VLVF_ENTRIES 64
#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
/* STATUS Bit Masks */
@@ -1458,8 +1466,9 @@
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
-/* GSSR definitions */
+/* SW_FW_SYNC/GSSR definitions */
#define IXGBE_GSSR_EEP_SM 0x0001
#define IXGBE_GSSR_PHY0_SM 0x0002
#define IXGBE_GSSR_PHY1_SM 0x0004
@@ -1479,6 +1488,8 @@
#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
@@ -1534,10 +1545,12 @@
#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
/* EEPROM Read Register */
-#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */
-#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */
-#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */
-#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
@@ -1545,9 +1558,15 @@
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
-#ifndef IXGBE_EERD_ATTEMPTS
-/* Number of 5 microseconds we wait for EERD read to complete */
-#define IXGBE_EERD_ATTEMPTS 100000
+#ifndef IXGBE_EERD_EEWR_ATTEMPTS
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+#endif
+
+#ifndef IXGBE_FLUDONE_ATTEMPTS
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
#endif
#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
@@ -2090,6 +2109,7 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@@ -2159,10 +2179,12 @@ enum ixgbe_phy_type {
ixgbe_phy_qt,
ixgbe_phy_xaui,
ixgbe_phy_nl,
- ixgbe_phy_tw_tyco,
- ixgbe_phy_tw_unknown,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
ixgbe_phy_sfp_avago,
ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
ixgbe_phy_sfp_unsupported,
@@ -2190,6 +2212,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_cu_core1 = 4,
ixgbe_sfp_type_srlr_core0 = 5,
ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2263,6 +2287,7 @@ struct ixgbe_addr_filter_info {
u32 mc_addr_in_rar_count;
u32 mta_in_use;
u32 overflow_promisc;
+ bool uc_set_promisc;
bool user_set_promisc;
};
@@ -2419,8 +2444,7 @@ struct ixgbe_mac_operations {
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
@@ -2447,6 +2471,7 @@ struct ixgbe_phy_operations {
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ s32 (*check_overtemp)(struct ixgbe_hw *);
};
struct ixgbe_eeprom_info {
@@ -2471,6 +2496,7 @@ struct ixgbe_mac_info {
u32 mcft_size;
u32 vft_size;
u32 num_rar_entries;
+ u32 rar_highwater;
u32 max_tx_queues;
u32 max_rx_queues;
u32 max_msix_vectors;
@@ -2494,6 +2520,7 @@ struct ixgbe_phy_info {
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
+ bool reset_if_overtemp;
};
#include "ixgbe_mbx.h"
@@ -2577,8 +2604,11 @@ struct ixgbe_info {
#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
#define IXGBE_ERR_SFP_NOT_PRESENT -20
#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
#define IXGBE_ERR_FDIR_REINIT_FAILED -23
#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index c44fdb05447a..ca2c81f49a05 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -41,11 +41,13 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
-#define IXGBE_LINKS_UP 0x40000000
-#define IXGBE_LINKS_SPEED 0x20000000
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED_82599 0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 0cd6202dfacc..a16cff7e54a3 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev,
+ dma_unmap_single(&adapter->pdev->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->dma = 0;
}
if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->page_offset ^= (PAGE_SIZE / 2);
}
- bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_dma = dma_map_page(&pdev->dev, bi->page,
bi->page_offset,
(PAGE_SIZE / 2),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = pci_map_single(pdev, skb->data,
+ bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
if (upper_len) {
- pci_unmap_page(pdev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
@@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* packets not getting split correctly
*/
if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb->len - skb->data_len;
+ u32 header_fixup_len = skb_headlen(skb);
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
skb->protocol = eth_type_trans(skb, adapter->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
- adapter->netdev->last_rx = jiffies;
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -947,8 +946,6 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
ixgbevf_write_eitr(adapter, v_idx, itr_reg);
}
-
- return;
}
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
@@ -962,12 +959,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+ if (!hw->mbx.ops.check_for_ack(hw)) {
+ /*
+ * checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it. Also
+ * avoid the read below because the code to read
+ * the mailbox will also clear the ack bit. This was
+ * causing lost acks. Just cache the bit and exit
+ * the IRQ handler.
+ */
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ goto out;
+ }
+
+ /* Not an ack interrupt, go ahead and read the message */
hw->mbx.ops.read(hw, &msg, 1);
if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 1));
+out:
return IRQ_HANDLED;
}
@@ -1496,22 +1509,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
}
}
-static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq)
-{
- struct dev_mc_list *mc_ptr;
- u8 *addr = *mc_addr_ptr;
- *vmdq = 0;
-
- mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
- if (mc_ptr->next)
- *mc_addr_ptr = mc_ptr->next->dmi_addr;
- else
- *mc_addr_ptr = NULL;
-
- return addr;
-}
-
/**
* ixgbevf_set_rx_mode - Multicast set
* @netdev: network interface device structure
@@ -1524,16 +1521,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u8 *addr_list = NULL;
- int addr_count = 0;
/* reprogram multicast list */
- addr_count = netdev_mc_count(netdev);
- if (addr_count)
- addr_list = netdev->mc_list->dmi_addr;
if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
- ixgbevf_addr_list_itr);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1744,9 +1735,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
+ dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
}
if (rx_buffer_info->skb) {
@@ -1760,8 +1751,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
@@ -2158,8 +2149,6 @@ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-
- return;
}
/**
@@ -2418,9 +2407,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
if (link_up) {
if (!netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
- ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- "10 Gbps\n" : "1 Gbps\n"));
+ hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ 10 : 1);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
} else {
@@ -2468,7 +2457,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -2513,8 +2503,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
- &tx_ring->dma);
+ tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2584,8 +2574,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
- &rx_ring->dma);
+ rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
hw_dbg(&adapter->hw,
@@ -2646,7 +2636,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -2958,10 +2949,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
skb->data + offset,
- size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -2987,13 +2978,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
frag->page,
offset,
size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -3189,8 +3180,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
- netdev->trans_start = jiffies;
-
ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -3334,14 +3323,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -3482,7 +3471,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled \n");
+ hw_dbg(hw, "LRO is disabled\n");
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index 4b5dec0ec140..f6f929958ba0 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -252,22 +252,18 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @next: caller supplied function to return next address in list
+ * @netdev: pointer to net device structure
*
* Updates the Multicast Table Array.
**/
-static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count,
- ixgbe_mc_addr_itr next)
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
{
+ struct netdev_hw_addr *ha;
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
- u32 vector;
u32 cnt, i;
- u32 vmdq;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
@@ -278,13 +274,17 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
* addresses except for in large enterprise network environments.
*/
- cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ cnt = netdev_mc_count(netdev);
+ if (cnt > 30)
+ cnt = 30;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
- for (i = 0; i < cnt; i++) {
- vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
- vector_list[i] = vector;
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (i == cnt)
+ break;
+ vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
@@ -359,7 +359,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
else
*link_up = false;
- if (links_reg & IXGBE_LINKS_SPEED)
+ if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 1f31b052d4b4..94b750b8874f 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/netdevice.h>
#include "defines.h"
#include "regs.h"
@@ -62,8 +63,7 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
- ixgbe_mc_addr_itr);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5932ca3e27d..78ddd8b79e7e 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -64,8 +64,6 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
ixp2000_reg_write(RING_TX_PENDING,
TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
- dev->trans_start = jiffies;
-
local_irq_save(flags);
ip->tx_queue_entries++;
if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index b705ad3a53a7..99f24f5cac53 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -103,8 +103,6 @@ jme_mdio_write(struct net_device *netdev,
if (i == 0)
jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
-
- return;
}
static inline void
@@ -130,8 +128,6 @@ jme_reset_phy_processor(struct jme_adapter *jme)
jme_mdio_write(jme->dev,
jme->mii_if.phy_id,
MII_BMCR, val | BMCR_RESET);
-
- return;
}
static void
@@ -2010,12 +2006,12 @@ jme_set_multi(struct net_device *netdev)
} else if (netdev->flags & IFF_ALLMULTI) {
jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
} else if (netdev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit_nr;
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
- netdev_for_each_mc_addr(mclist, netdev) {
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
+ netdev_for_each_mc_addr(ha, netdev) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
}
@@ -2839,7 +2835,7 @@ jme_init_one(struct pci_dev *pdev,
default:
jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
break;
- };
+ }
/*
* Must check before reset_mac_processor
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 300c2249812d..26bf1b76b997 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
int i;
@@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1135,7 +1135,7 @@ static int korina_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
- lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->eth_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
rc = -ENXIO;
@@ -1143,7 +1143,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
- lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->rx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
rc = -ENXIO;
@@ -1151,7 +1151,7 @@ static int korina_probe(struct platform_device *pdev)
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
- lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
+ lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
if (!lp->tx_dma_regs) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
rc = -ENXIO;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 5c45cb58d023..f852ab3ae9cf 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -1,5 +1,5 @@
/*
- * ks8842_main.c timberdale KS8842 ethernet driver
+ * ks8842.c timberdale KS8842 ethernet driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -20,12 +20,15 @@
* The Micrel KS8842 behind the timberdale FPGA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/ks8842.h>
#define DRV_NAME "ks8842"
@@ -302,6 +305,20 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
ks8842_write16(adapter, 39, mac, REG_MACAR3);
}
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+{
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ for (i = 0; i < ETH_ALEN; i++) {
+ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+ REG_MACAR1 + i);
+ }
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
{
return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
@@ -520,13 +537,14 @@ static int ks8842_open(struct net_device *netdev)
/* reset the HW */
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
adapter);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- adapter->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
return err;
}
@@ -567,10 +585,8 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
static int ks8842_set_mac(struct net_device *netdev, void *p)
{
struct ks8842_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
struct sockaddr *addr = p;
char *mac = (u8 *)addr->sa_data;
- int i;
dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
@@ -579,13 +595,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, mac, netdev->addr_len);
- spin_lock_irqsave(&adapter->lock, flags);
- for (i = 0; i < ETH_ALEN; i++) {
- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
- REG_MACAR1 + i);
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
+ ks8842_write_mac_addr(adapter, mac);
return 0;
}
@@ -604,6 +614,8 @@ static void ks8842_tx_timeout(struct net_device *netdev)
ks8842_reset_hw(adapter);
+ ks8842_write_mac_addr(adapter, netdev->dev_addr);
+
ks8842_update_link_status(netdev, adapter);
}
@@ -626,7 +638,9 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
+ struct ks8842_platform_data *pdata = pdev->dev.platform_data;
u16 id;
+ unsigned i;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
@@ -657,7 +671,25 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
netdev->netdev_ops = &ks8842_netdev_ops;
netdev->ethtool_ops = &ks8842_ethtool_ops;
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ /* Check if a mac address was given */
+ i = netdev->addr_len;
+ if (pdata) {
+ for (i = 0; i < netdev->addr_len; i++)
+ if (pdata->macaddr[i] != 0)
+ break;
+
+ if (i < netdev->addr_len)
+ /* an address was passed, use it */
+ memcpy(netdev->dev_addr, pdata->macaddr,
+ netdev->addr_len);
+ }
+
+ if (i == netdev->addr_len) {
+ ks8842_read_mac_addr(adapter, netdev->dev_addr);
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ random_ether_addr(netdev->dev_addr);
+ }
id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
@@ -668,8 +700,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 9e9f9b349766..b4fb07a6f13f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DEBUG
#include <linux/module.h>
@@ -76,7 +78,9 @@ union ks8851_tx_hdr {
* @msg_enable: The message flags controlling driver output (see ethtool).
* @fid: Incrementing frame id tag.
* @rc_ier: Cached copy of KS_IER.
+ * @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -107,6 +111,8 @@ struct ks8851_net {
u16 rc_ier;
u16 rc_rxqcr;
+ u16 rc_ccr;
+ u16 eeprom_size;
struct mii_if_info mii;
struct ks8851_rxctrl rxctrl;
@@ -125,11 +131,6 @@ struct ks8851_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
-
/* shift for byte-enable data */
#define BYTE_EN(_x) ((_x) << 2)
@@ -167,7 +168,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -197,7 +198,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "spi_sync() failed\n");
+ netdev_err(ks->netdev, "spi_sync() failed\n");
}
/**
@@ -263,7 +264,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "read: spi_sync() failed\n");
+ netdev_err(ks->netdev, "read: spi_sync() failed\n");
else if (ks8851_rx_1msg(ks))
memcpy(rxb, trx + 2, rxl);
else
@@ -417,8 +418,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
u8 txb[1];
int ret;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d@%p\n", __func__, len, buff);
/* set the operation we're issuing */
txb[0] = KS_SPIOP_RXFIFO;
@@ -434,7 +435,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -446,10 +447,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
*/
static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
{
- ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
- rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
- rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
- rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
+ netdev_dbg(ks->netdev,
+ "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
+ rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
+ rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
}
/**
@@ -471,8 +473,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxfc = ks8851_rdreg8(ks, KS_RXFC);
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "%s: %d packets\n", __func__, rxfc);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "%s: %d packets\n", __func__, rxfc);
/* Currently we're issuing a read per packet, but we could possibly
* improve the code by issuing a single read, getting the receive
@@ -489,9 +491,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
rxstat = rxh & 0xffff;
rxlen = rxh >> 16;
- if (netif_msg_rx_status(ks))
- ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n",
- rxstat, rxlen);
+ netif_dbg(ks, rx_status, ks->netdev,
+ "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
/* the length of the packet includes the 32bit CRC */
@@ -553,9 +554,8 @@ static void ks8851_irq_work(struct work_struct *work)
status = ks8851_rdreg16(ks, KS_ISR);
- if (netif_msg_intr(ks))
- dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n",
- __func__, status);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: status 0x%04x\n", __func__, status);
if (status & IRQ_LCI) {
/* should do something about checking link status */
@@ -582,8 +582,8 @@ static void ks8851_irq_work(struct work_struct *work)
* system */
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
- if (netif_msg_intr(ks))
- ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space);
+ netif_dbg(ks, intr, ks->netdev,
+ "%s: txspace %d\n", __func__, ks->tx_space);
}
if (status & IRQ_RXI)
@@ -659,9 +659,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
unsigned fid = 0;
int ret;
- if (netif_msg_tx_queued(ks))
- dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n",
- __func__, txp, txp->len, txp->data, irq);
+ netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
+ __func__, txp, txp->len, txp->data, irq);
fid = ks->fid++;
fid &= TXFR_TXFID_MASK;
@@ -685,7 +684,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
ret = spi_sync(ks->spidev, msg);
if (ret < 0)
- ks_err(ks, "%s: spi_sync() failed\n", __func__);
+ netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
}
/**
@@ -746,8 +745,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
pmecr = ks8851_rdreg16(ks, KS_PMECR);
pmecr &= ~PMECR_PM_MASK;
@@ -771,8 +769,7 @@ static int ks8851_net_open(struct net_device *dev)
* else at the moment */
mutex_lock(&ks->lock);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "opening %s\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "opening\n");
/* bring chip out of any power saving mode it was in */
ks8851_set_powermode(ks, PMECR_PM_NORMAL);
@@ -828,8 +825,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", dev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
return 0;
@@ -847,8 +843,7 @@ static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", dev->name);
+ netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
@@ -876,8 +871,8 @@ static int ks8851_net_stop(struct net_device *dev)
while (!skb_queue_empty(&ks->txq)) {
struct sk_buff *txb = skb_dequeue(&ks->txq);
- if (netif_msg_ifdown(ks))
- ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb);
+ netif_dbg(ks, ifdown, ks->netdev,
+ "%s: freeing txb %p\n", __func__, txb);
dev_kfree_skb(txb);
}
@@ -906,9 +901,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
unsigned needed = calc_txlen(skb->len);
netdev_tx_t ret = NETDEV_TX_OK;
- if (netif_msg_tx_queued(ks))
- ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
- skb, skb->len, skb->data);
+ netif_dbg(ks, tx_queued, ks->netdev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
spin_lock(&ks->statelock);
@@ -968,13 +962,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
} else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u32 crc;
/* accept some multicast */
- netdev_for_each_mc_addr(mcptr, dev) {
- crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
crc >>= (32 - 6); /* get top six bits */
rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
@@ -1040,6 +1034,234 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+/* Companion eeprom access */
+
+enum { /* EEPROM programming states */
+ EEPROM_CONTROL,
+ EEPROM_ADDRESS,
+ EEPROM_DATA,
+ EEPROM_COMPLETE
+};
+
+/**
+ * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @addr: EEPROM address to read
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
+ * Warning: The READ feature is not supported on ks8851 revision 0.
+ *
+ * Rough programming model:
+ * - on period start: set clock high and read value on bus
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int ctrl = EEPROM_OP_READ;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int data = 0;
+ int dummy;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((ctrl >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ bit_count--;
+ break;
+ case EEPROM_DATA:
+ /* Change to receive mode */
+ eepcr &= ~EEPCR_EESRWA;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* waitread period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ mutex_lock(&ks->lock);
+ eepcr |= EEPCR_EESCK;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* Manage read */
+ switch (state) {
+ case EEPROM_ADDRESS:
+ if (bit_count < 0) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ }
+ break;
+ case EEPROM_DATA:
+ mutex_lock(&ks->lock);
+ dummy = ks8851_rdreg16(ks, KS_EEPCR);
+ mutex_unlock(&ks->lock);
+ data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ return data;
+}
+
+/**
+ * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
+ * @dev: The network device the PHY is on.
+ * @op: operand (can be WRITE, EWEN, EWDS)
+ * @addr: EEPROM address to write
+ * @data: data to write
+ *
+ * eeprom_size: used to define the data coding length. Can be changed
+ * through debug-fs.
+ *
+ * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
+ *
+ * Note that a write enable is required before writing data.
+ *
+ * Rough programming model:
+ * - on period start: set clock high
+ * - on period / 2: set clock low and program value on bus
+ * - start on period / 2
+ */
+void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
+ unsigned int addr, unsigned int data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int eepcr;
+ int state = EEPROM_CONTROL;
+ int bit_count = EEPROM_OP_LEN - 1;
+ unsigned int addr_len;
+
+ addr_len = (ks->eeprom_size == 128) ? 6 : 8;
+
+ switch (op) {
+ case EEPROM_OP_EWEN:
+ addr = 0x30;
+ break;
+ case EEPROM_OP_EWDS:
+ addr = 0;
+ break;
+ }
+
+ /* start transaction: chip select high, authorize write */
+ mutex_lock(&ks->lock);
+ eepcr = EEPCR_EESA | EEPCR_EESRWA;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr |= EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ while (state != EEPROM_COMPLETE) {
+ /* falling clock period starts... */
+ /* set EED_IO pin for control and address */
+ eepcr &= ~EEPCR_EEDO;
+ switch (state) {
+ case EEPROM_CONTROL:
+ eepcr |= ((op >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ bit_count = addr_len - 1;
+ state = EEPROM_ADDRESS;
+ }
+ break;
+ case EEPROM_ADDRESS:
+ eepcr |= ((addr >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0) {
+ if (op == EEPROM_OP_WRITE) {
+ bit_count = EEPROM_DATA_LEN - 1;
+ state = EEPROM_DATA;
+ } else {
+ state = EEPROM_COMPLETE;
+ }
+ }
+ break;
+ case EEPROM_DATA:
+ eepcr |= ((data >> bit_count) & 1) << 2;
+ if (bit_count-- <= 0)
+ state = EEPROM_COMPLETE;
+ break;
+ }
+
+ /* lower clock */
+ eepcr &= ~EEPCR_EESCK;
+
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+
+ /* rising clock period starts... */
+
+ /* raise clock */
+ eepcr |= EEPCR_EESCK;
+ mutex_lock(&ks->lock);
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+ /* wait period / 2 */
+ udelay(EEPROM_SK_PERIOD / 2);
+ }
+
+ /* close transaction */
+ mutex_lock(&ks->lock);
+ eepcr &= ~EEPCR_EECS;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ eepcr = 0;
+ ks8851_wrreg16(ks, KS_EEPCR, eepcr);
+ mutex_unlock(&ks->lock);
+
+}
+
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1086,6 +1308,117 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ return ks->eeprom_size;
+}
+
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) *
+ (last_word - first_word + 1), GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+
+ /* Device's eeprom is little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int ks8851_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len;
+ int first_word;
+ int last_word;
+ int ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EOPNOTSUPP;
+
+ if (eeprom->len > ks->eeprom_size)
+ return -EINVAL;
+
+ if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
+ return -EFAULT;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ max_len = (last_word - first_word + 1) * 2;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = (void *)eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1)
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ eeprom_buff[last_word - first_word] =
+ ks8851_eeprom_read(dev, last_word);
+
+
+ /* Device's eeprom is little-endian, word addressable */
+ le16_to_cpus(&eeprom_buff[0]);
+ le16_to_cpus(&eeprom_buff[last_word - first_word]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
+ eeprom_buff[i]);
+ mdelay(EEPROM_WRITE_TIME);
+ }
+
+ ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
static const struct ethtool_ops ks8851_ethtool_ops = {
.get_drvinfo = ks8851_get_drvinfo,
.get_msglevel = ks8851_get_msglevel,
@@ -1094,6 +1427,9 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
.set_settings = ks8851_set_settings,
.get_link = ks8851_get_link,
.nway_reset = ks8851_nway_reset,
+ .get_eeprom_len = ks8851_get_eeprom_len,
+ .get_eeprom = ks8851_get_eeprom,
+ .set_eeprom = ks8851_set_eeprom,
};
/* MII interface controls */
@@ -1187,17 +1523,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
rd = ks8851_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fail\n");
+ netdev_err(ks->netdev, "TX memory selftest fail\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fail\n");
+ netdev_err(ks->netdev, "RX memory selftest fail\n");
ret |= 2;
}
@@ -1279,6 +1615,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_id;
}
+ /* cache the contents of the CCR register for EEPROM, etc. */
+ ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
+
+ if (ks->rc_ccr & CCR_EEPROM)
+ ks->eeprom_size = 128;
+ else
+ ks->eeprom_size = 0;
+
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
@@ -1295,9 +1639,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n",
- CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
+ ndev->dev_addr, ndev->irq);
return 0;
@@ -1316,7 +1660,7 @@ static int __devexit ks8851_remove(struct spi_device *spi)
struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
if (netif_msg_drv(priv))
- dev_info(&spi->dev, "remove");
+ dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index f52c312cc356..537fb06e5932 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -25,12 +25,24 @@
#define OBCR_ODS_16mA (1 << 6)
#define KS_EEPCR 0x22
+#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB (1 << 3)
+#define EEPCR_EESB_OFFSET 3
+#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
+#define EEPROM_OP_LEN 3 /* bits:*/
+#define EEPROM_OP_READ 0x06
+#define EEPROM_OP_EWEN 0x04
+#define EEPROM_OP_WRITE 0x05
+#define EEPROM_OP_EWDS 0x14
+
+#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
+#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
+#define EEPROM_SK_PERIOD 400 /* in us */
+
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 6354ab3a45a6..2e2c69b24062 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -21,6 +21,8 @@
* KS8851 16bit MLL chip from Micrel Inc.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -361,7 +363,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
#define MAX_MCAST_LST 32
#define HW_MCAST_SIZE 8
-#define MAC_ADDR_LEN 6
/**
* union ks_tx_hdr - tx header data
@@ -449,7 +450,7 @@ struct ks_net {
u16 promiscuous;
u16 all_mcast;
u16 mcast_lst_size;
- u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
+ u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
u8 mcast_bits[HW_MCAST_SIZE];
u8 mac_addr[6];
u8 fid;
@@ -459,11 +460,6 @@ struct ks_net {
static int msg_enable;
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define BE3 0x8000 /* Byte Enable 3 */
#define BE2 0x4000 /* Byte Enable 2 */
#define BE1 0x2000 /* Byte Enable 1 */
@@ -625,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
{
unsigned pmecr;
- if (netif_msg_hw(ks))
- ks_dbg(ks, "setting power mode %d\n", pwrmode);
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
ks_rdreg16(ks, KS_GRR);
pmecr = ks_rdreg16(ks, KS_PMECR);
@@ -806,11 +801,10 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
/* read data block including CRC 4 bytes */
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
skb_put(skb, frame_hdr->len);
- skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
} else {
- printk(KERN_ERR "%s: err:skb alloc\n", __func__);
+ pr_err("%s: err:skb alloc\n", __func__);
ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
if (skb)
dev_kfree_skb_irq(skb);
@@ -837,9 +831,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
netif_carrier_off(netdev);
link_up_status = false;
}
- if (netif_msg_link(ks))
- ks_dbg(ks, "%s: %s\n",
- __func__, link_up_status ? "UP" : "DOWN");
+ netif_dbg(ks, link, ks->netdev,
+ "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
}
/**
@@ -909,15 +902,13 @@ static int ks_net_open(struct net_device *netdev)
* else at the moment.
*/
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "%s - entry\n", __func__);
+ netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
/* reset the HW */
err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
if (err) {
- printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
- ks->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
return err;
}
@@ -930,8 +921,7 @@ static int ks_net_open(struct net_device *netdev)
ks_enable_qmu(ks);
netif_start_queue(ks->netdev);
- if (netif_msg_ifup(ks))
- ks_dbg(ks, "network device %s up\n", netdev->name);
+ netif_dbg(ks, ifup, ks->netdev, "network device up\n");
return 0;
}
@@ -948,8 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- if (netif_msg_ifdown(ks))
- ks_info(ks, "%s: shutting down\n", netdev->name);
+ netif_info(ks, ifdown, netdev, "shutting down\n");
netif_stop_queue(netdev);
@@ -1181,7 +1170,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast)
static void ks_set_rx_mode(struct net_device *netdev)
{
struct ks_net *ks = netdev_priv(netdev);
- struct dev_mc_list *ptr;
+ struct netdev_hw_addr *ha;
/* Turn on/off promiscuous mode. */
if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
@@ -1198,13 +1187,12 @@ static void ks_set_rx_mode(struct net_device *netdev)
if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
- netdev_for_each_mc_addr(ptr, netdev) {
- if (!(*ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MCAST_LST)
break;
- memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
}
ks->mcast_lst_size = (u8)i;
ks_set_grpaddr(ks);
@@ -1430,21 +1418,21 @@ static int ks_read_selftest(struct ks_net *ks)
rd = ks_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
- ks_warn(ks, "Memory selftest not finished\n");
+ netdev_warn(ks->netdev, "Memory selftest not finished\n");
return 0;
}
if (rd & MBIR_TXMBFA) {
- ks_err(ks, "TX memory selftest fails\n");
+ netdev_err(ks->netdev, "TX memory selftest fails\n");
ret |= 1;
}
if (rd & MBIR_RXMBFA) {
- ks_err(ks, "RX memory selftest fails\n");
+ netdev_err(ks->netdev, "RX memory selftest fails\n");
ret |= 2;
}
- ks_info(ks, "the selftest passes\n");
+ netdev_info(ks->netdev, "the selftest passes\n");
return ret;
}
@@ -1515,7 +1503,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->frame_head_info = (struct type_frame_head *) \
kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
- printk(KERN_ERR "Error: Fail to allocate frame memory\n");
+ pr_err("Error: Fail to allocate frame memory\n");
return false;
}
@@ -1581,7 +1569,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks->mii.mdio_read = ks_phy_read;
ks->mii.mdio_write = ks_phy_write;
- ks_info(ks, "message enable is %d\n", msg_enable);
+ netdev_info(netdev, "message enable is %d\n", msg_enable);
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1590,13 +1578,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
/* simple check for a valid chip being connected to the bus */
if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
if (ks_read_selftest(ks)) {
- ks_err(ks, "failed to read device ID\n");
+ netdev_err(netdev, "failed to read device ID\n");
err = -ENODEV;
goto err_register;
}
@@ -1627,9 +1615,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
id = ks_rdreg16(ks, KS_CIDER);
- printk(KERN_INFO DRV_NAME
- " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
- (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+ netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
err_register:
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 0606a1f359fb..c80ca64277b2 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -14,10 +14,11 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
@@ -1484,11 +1485,6 @@ struct dev_priv {
int promiscuous;
};
-#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
-#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
-#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
-#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
-
#define DRV_NAME "KSZ884X PCI"
#define DEVICE_NAME "KSZ884x PCI"
#define DRV_VERSION "1.0.0"
@@ -3835,7 +3831,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info)
alloc >>= 1;
}
if (alloc != 1 || shift < MIN_DESC_SHIFT) {
- printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ pr_alert("Hardware descriptor numbers not right!\n");
while (alloc) {
shift++;
alloc >>= 1;
@@ -4546,8 +4542,7 @@ static int ksz_alloc_mem(struct dev_info *adapter)
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
- printk(KERN_ALERT
- "Hardware descriptor size not right!\n");
+ pr_alert("Hardware descriptor size not right!\n");
ksz_check_desc_num(&hw->rx_desc_info);
ksz_check_desc_num(&hw->tx_desc_info);
@@ -4689,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
int frag;
skb_frag_t *this_frag;
- dma_buf->len = skb->len - skb->data_len;
+ dma_buf->len = skb_headlen(skb);
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
@@ -5049,8 +5044,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
dma_buf->skb->data, packet_len);
} while (0);
- skb->dev = dev;
-
skb->protocol = eth_type_trans(skb, dev);
if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
@@ -5061,8 +5054,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
priv->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
- dev->last_rx = jiffies;
-
rx_status = netif_rx(skb);
return 0;
@@ -5320,10 +5311,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
u32 data;
hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
- printk(KERN_INFO "Tx stopped\n");
+ pr_info("Tx stopped\n");
data = readl(hw->io + KS_DMA_TX_CTRL);
if (!(data & DMA_TX_ENABLE))
- printk(KERN_INFO "Tx disabled\n");
+ pr_info("Tx disabled\n");
break;
}
} while (0);
@@ -5496,6 +5487,18 @@ static int prepare_hardware(struct net_device *dev)
return 0;
}
+static void set_media_state(struct net_device *dev, int media_state)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ if (media_state == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ netif_info(priv, link, dev, "link %s\n",
+ media_state == priv->media_state ? "on" : "off");
+}
+
/**
* netdev_open - open network device
* @dev: Network device.
@@ -5585,15 +5588,7 @@ static int netdev_open(struct net_device *dev)
priv->media_state = port->linked->state;
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
-
+ set_media_state(dev, media_connected);
netif_start_queue(dev);
return 0;
@@ -5767,7 +5762,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int multicast = (dev->flags & IFF_ALLMULTI);
dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
@@ -5784,7 +5779,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
int i = 0;
/* List too big to support so turn on all multicast mode. */
- if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
if (MAX_MULTICAST_LIST != hw->multi_list_size) {
hw->multi_list_size = MAX_MULTICAST_LIST;
++hw->all_multi;
@@ -5793,13 +5788,12 @@ static void netdev_set_rx_mode(struct net_device *dev)
return;
}
- netdev_for_each_mc_addr(mc_ptr, dev) {
- if (!(*mc_ptr->dmi_addr & 1))
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!(*ha->addr & 1))
continue;
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
- MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6683,16 +6677,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv,
{
if (priv->media_state != port->linked->state) {
priv->media_state = port->linked->state;
- if (netif_running(dev)) {
- if (media_connected == priv->media_state)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- if (netif_msg_link(priv))
- printk(KERN_INFO "%s link %s\n", dev->name,
- (media_connected == priv->media_state ?
- "on" : "off"));
- }
+ if (netif_running(dev))
+ set_media_state(dev, media_connected);
}
}
@@ -6986,7 +6972,7 @@ static int __init pcidev_init(struct pci_dev *pdev,
int pi;
int port_count;
int result;
- char banner[80];
+ char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
@@ -7010,10 +6996,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
result = -ENOMEM;
- info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
if (!info)
goto pcidev_init_dev_err;
- memset(info, 0, sizeof(struct platform_info));
hw_priv = &info->dev_info;
hw_priv->pdev = pdev;
@@ -7027,15 +7012,15 @@ static int __init pcidev_init(struct pci_dev *pdev,
cnt = hw_init(hw);
if (!cnt) {
if (msg_enable & NETIF_MSG_PROBE)
- printk(KERN_ALERT "chip not detected\n");
+ pr_alert("chip not detected\n");
result = -ENODEV;
goto pcidev_init_alloc_err;
}
- sprintf(banner, "%s\n", version);
- banner[13] = cnt + '0';
- ks_info(hw_priv, "%s", banner);
- ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+ snprintf(banner, sizeof(banner), "%s", version);
+ banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
+ dev_info(&hw_priv->pdev->dev, "%s\n", banner);
+ dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
/* Assume device is KSZ8841. */
hw->dev_count = 1;
@@ -7064,10 +7049,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
mib_port_count = SWITCH_PORT_NUM;
}
hw->mib_port_cnt = TOTAL_PORT_NUM;
- hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
if (!hw->ksz_switch)
goto pcidev_init_alloc_err;
- memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
sw = hw->ksz_switch;
}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 7b9447646f8a..21f8adaa87c1 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -945,7 +945,7 @@ static void lance_tx_timeout (struct net_device *dev)
#endif
lance_restart (dev, 0x0043, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -1011,8 +1011,6 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
outw(0x0000, ioaddr+LANCE_ADDR);
outw(0x0048, ioaddr+LANCE_DATA);
- dev->trans_start = jiffies;
-
if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
netif_stop_queue(dev);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 973390b82ec2..ce5d6e909218 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -963,7 +963,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -974,7 +974,6 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_cmd *tx_cmd;
struct i596_tbd *tbd;
short length = skb->len;
- dev->trans_start = jiffies;
DEB(DEB_STARTTX, printk(KERN_DEBUG
"%s: i596_start_xmit(%x,%p) called\n",
@@ -1092,7 +1091,7 @@ static int __devinit i82596_probe(struct net_device *dev)
DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
(void *)dma, lp->dma_addr);
return i;
- };
+ }
DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
dev->name, dev->base_addr, dev->dev_addr,
@@ -1388,7 +1387,7 @@ static void set_multicast_list(struct net_device *dev)
}
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1396,10 +1395,10 @@ static void set_multicast_list(struct net_device *dev)
cmd->cmd.command = SWAP16(CmdMulticastList);
cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (!cnt--)
break;
- memcpy(cp, dmi->dmi_addr, 6);
+ memcpy(cp, ha->addr, 6);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 56f66f485400..316bb70775b1 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -257,7 +257,7 @@ static void __ei_tx_timeout(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -386,7 +386,6 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
@@ -445,14 +444,14 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
if (ei_local->irqlock)
{
-#if 1 /* This might just be an interrupt for a PCI device sharing this line */
- /* The "irqlock" check is only for testing. */
- printk(ei_local->irqlock
- ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
- : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
+ /*
+ * This might just be an interrupt for a PCI device sharing
+ * this line
+ */
+ printk("%s: Interrupted while interrupts are masked!"
+ " isr=%#2x imr=%#2x.\n",
dev->name, ei_inb_p(e8390_base + EN0_ISR),
ei_inb_p(e8390_base + EN0_IMR));
-#endif
spin_unlock(&ei_local->page_lock);
return IRQ_NONE;
}
@@ -792,7 +791,6 @@ static void ei_receive(struct net_device *dev)
/* We used to also ack ENISR_OVER here, but that would sometimes mask
a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
- return;
}
/**
@@ -905,10 +903,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(dmi, dev) {
- u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index 1af66a1e6911..522abe2ff25a 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -5,8 +5,11 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/spinlock.h>
+
+#ifdef CONFIG_PPC_DCR
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
+#endif
/* packet size info */
#define XTE_HDR_SIZE 14 /* size of Ethernet header */
@@ -290,11 +293,12 @@ This option defaults to enabled (set) */
#define TX_CONTROL_CALC_CSUM_MASK 1
-#define XTE_ALIGN 32
-#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
-
#define MULTICAST_CAM_TABLE_NUM 4
+/* TEMAC Synthesis features */
+#define TEMAC_FEATURE_RX_CSUM (1 << 0)
+#define TEMAC_FEATURE_TX_CSUM (1 << 1)
+
/* TX/RX CURDESC_PTR points to first descriptor */
/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
@@ -335,9 +339,15 @@ struct temac_local {
struct mii_bus *mii_bus; /* MII bus reference */
int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
- /* IO registers and IRQs */
+ /* IO registers, dma functions and IRQs */
void __iomem *regs;
+ void __iomem *sdma_regs;
+#ifdef CONFIG_PPC_DCR
dcr_host_t sdma_dcrs;
+#endif
+ u32 (*dma_in)(struct temac_local *, int);
+ void (*dma_out)(struct temac_local *, int, u32);
+
int tx_irq;
int rx_irq;
int emac_num;
@@ -347,6 +357,7 @@ struct temac_local {
struct mutex indirect_mutex;
u32 options; /* Current options word */
int last_link;
+ unsigned int temac_features;
/* Buffer descriptors */
struct cdmac_bd *tx_bd_v;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index ba617e3cf1bb..52dcc8495647 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -20,9 +20,6 @@
* or rx, so this should be okay.
*
* TODO:
- * - Fix driver to work on more than just Virtex5. Right now the driver
- * assumes that the locallink DMA registers are accessed via DCR
- * instructions.
* - Factor out locallink DMA code into separate driver
* - Fix multicast assignment.
* - Fix support for hardware checksumming.
@@ -116,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
}
+/**
+ * temac_dma_in32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return dcr_read(lp->sdma_dcrs, reg);
+ return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
}
+/**
+ * temac_dma_out32 - Memory mapped DMA read, this function expects a
+ * register input that is based on DCR word addresses which
+ * are then converted to memory mapped byte addresses
+ */
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
+ out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+}
+
+/* DMA register access functions can be DCR based or memory mapped.
+ * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
+ * memory mapped.
+ */
+#ifdef CONFIG_PPC_DCR
+
+/**
+ * temac_dma_dcr_in32 - DCR based DMA read
+ */
+static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
+{
+ return dcr_read(lp->sdma_dcrs, reg);
+}
+
+/**
+ * temac_dma_dcr_out32 - DCR based DMA write
+ */
+static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
+{
dcr_write(lp->sdma_dcrs, reg, value);
}
/**
+ * temac_dcr_setup - If the DMA is DCR based, then setup the address and
+ * I/O functions
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ unsigned int dcrs;
+
+ /* setup the dcr address mapping if it's in the device tree */
+
+ dcrs = dcr_resource_start(np, 0);
+ if (dcrs != 0) {
+ lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
+ lp->dma_in = temac_dma_dcr_in;
+ lp->dma_out = temac_dma_dcr_out;
+ dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
+ return 0;
+ }
+ /* no DCR in the device tree, indicate a failure */
+ return -1;
+}
+
+#else
+
+/*
+ * temac_dcr_setup - This is a stub for when DCR is not supported,
+ * such as with MicroBlaze
+ */
+static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
+ struct device_node *np)
+{
+ return -1;
+}
+
+#endif
+
+/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
@@ -156,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
- skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
- + XTE_ALIGN, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
return -1;
}
lp->rx_skb[i] = skb;
- skb_reserve(skb, BUFFER_ALIGN(skb->data));
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
@@ -173,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
- temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
+ lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
- temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
+ lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
- temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p);
- temac_dma_out32(lp, RX_TAILDESC_PTR,
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
- temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
}
@@ -251,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev)
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
i = 0;
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
- multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
- (mclist->dmi_addr[2] << 16) |
- (mclist->dmi_addr[1] << 8) |
- (mclist->dmi_addr[0]));
+ multi_addr_msw = ((ha->addr[3] << 24) |
+ (ha->addr[2] << 16) |
+ (ha->addr[1] << 8) |
+ (ha->addr[0]));
temac_indirect_out32(lp, XTE_MAW0_OFFSET,
multi_addr_msw);
- multi_addr_lsw = ((mclist->dmi_addr[5] << 8) |
- (mclist->dmi_addr[4]) | (i << 16));
+ multi_addr_lsw = ((ha->addr[5] << 8) |
+ (ha->addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
i++;
@@ -427,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev)
temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
/* Reset Local Link (DMA) */
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
timeout = 1000;
- while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
+ while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
@@ -437,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev)
break;
}
}
- temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
temac_dma_bd_init(ndev);
@@ -461,7 +527,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
- ndev->trans_start = 0;
+ ndev->trans_start = jiffies; /* prevent tx timeout */
}
void temac_adjust_link(struct net_device *ndev)
@@ -508,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev)
if (cur_p->app4)
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += cur_p->len;
@@ -523,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev)
netif_wake_queue(ndev);
}
+static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
+{
+ struct cdmac_bd *cur_p;
+ int tail;
+
+ tail = lp->tx_bd_tail;
+ cur_p = &lp->tx_bd_v[tail];
+
+ do {
+ if (cur_p->app0)
+ return NETDEV_TX_BUSY;
+
+ tail++;
+ if (tail >= TX_BD_NUM)
+ tail = 0;
+
+ cur_p = &lp->tx_bd_v[tail];
+ num_frag--;
+ } while (num_frag >= 0);
+
+ return 0;
+}
+
static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -537,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (cur_p->app0 & STS_CTRL_APP0_CMPLT) {
+ if (temac_check_tx_bd_space(lp, num_frag)) {
if (!netif_queue_stopped(ndev)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
@@ -547,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const struct iphdr *ip = ip_hdr(skb);
- int length = 0, start = 0, insert = 0;
-
- switch (ip->protocol) {
- case IPPROTO_TCP:
- start = sizeof(struct iphdr) + ETH_HLEN;
- insert = sizeof(struct iphdr) + ETH_HLEN + 16;
- length = ip->tot_len - sizeof(struct iphdr);
- break;
- case IPPROTO_UDP:
- start = sizeof(struct iphdr) + ETH_HLEN;
- insert = sizeof(struct iphdr) + ETH_HLEN + 6;
- length = ip->tot_len - sizeof(struct iphdr);
- break;
- default:
- break;
- }
- cur_p->app1 = ((start << 16) | insert);
- cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
- length, ip->protocol, 0);
- skb->data[insert] = 0;
- skb->data[insert + 1] = 0;
+ unsigned int csum_start_off = skb_transport_offset(skb);
+ unsigned int csum_index_off = csum_start_off + skb->csum_offset;
+
+ cur_p->app0 |= 1; /* TX Checksum Enabled */
+ cur_p->app1 = (csum_start_off << 16) | csum_index_off;
+ cur_p->app2 = 0; /* initial checksum seed */
}
+
cur_p->app0 |= STS_CTRL_APP0_SOP;
cur_p->len = skb_headlen(skb);
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
@@ -598,7 +676,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
/* Kick off the transfer */
- temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
+ lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
}
@@ -612,7 +690,6 @@ static void ll_temac_recv(struct net_device *ndev)
struct cdmac_bd *cur_p;
dma_addr_t tail_p;
int length;
- unsigned long skb_vaddr;
unsigned long flags;
spin_lock_irqsave(&lp->rx_lock, flags);
@@ -626,8 +703,7 @@ static void ll_temac_recv(struct net_device *ndev)
skb = lp->rx_skb[lp->rx_bd_ci];
length = cur_p->app4 & 0x3FFF;
- skb_vaddr = virt_to_bus(skb->data);
- dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
+ dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
DMA_FROM_DEVICE);
skb_put(skb, length);
@@ -635,21 +711,29 @@ static void ll_temac_recv(struct net_device *ndev)
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
+ /* if we're doing rx csum offload, set it up */
+ if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
+ (skb->protocol == __constant_htons(ETH_P_IP)) &&
+ (skb->len > 64)) {
+
+ skb->csum = cur_p->app3 & 0xFFFF;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
netif_rx(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
- new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
- GFP_ATOMIC);
+ new_skb = netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE);
+
if (new_skb == 0) {
dev_err(&ndev->dev, "no memory for new sk_buff\n");
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
- skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
-
cur_p->app0 = STS_CTRL_APP0_IRQONEND;
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
@@ -664,7 +748,7 @@ static void ll_temac_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
}
- temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
@@ -675,8 +759,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
- status = temac_dma_in32(lp, TX_IRQ_REG);
- temac_dma_out32(lp, TX_IRQ_REG, status);
+ status = lp->dma_in(lp, TX_IRQ_REG);
+ lp->dma_out(lp, TX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
@@ -693,8 +777,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
unsigned int status;
/* Read and clear the status registers */
- status = temac_dma_in32(lp, RX_IRQ_REG);
- temac_dma_out32(lp, RX_IRQ_REG, status);
+ status = lp->dma_in(lp, RX_IRQ_REG);
+ lp->dma_out(lp, RX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
@@ -795,7 +879,7 @@ static ssize_t temac_show_llink_regs(struct device *dev,
int i, len = 0;
for (i = 0; i < 0x11; i++)
- len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
+ len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
(i % 8) == 7 ? "\n" : " ");
len += sprintf(buf + len, "\n");
@@ -820,8 +904,8 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
struct temac_local *lp;
struct net_device *ndev;
const void *addr;
+ __be32 *p;
int size, rc = 0;
- unsigned int dcrs;
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
@@ -858,30 +942,49 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
mutex_init(&lp->indirect_mutex);
/* map device registers */
- lp->regs = of_iomap(op->node, 0);
+ lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
goto nodev;
}
+ /* Setup checksum offload, but default to off if not specified */
+ lp->temac_features = 0;
+ p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
+ if (p && be32_to_cpu(*p)) {
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ /* Can checksum TCP/UDP over IPv4. */
+ ndev->features |= NETIF_F_IP_CSUM;
+ }
+ p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
+ if (p && be32_to_cpu(*p))
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
+
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->node, "llink-connected", 0);
+ np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
goto nodev;
}
- dcrs = dcr_resource_start(np, 0);
- if (dcrs == 0) {
- dev_err(&op->dev, "could not get DMA register address\n");
- goto nodev;
+ /* Setup the DMA register accesses, could be DCR or memory mapped */
+ if (temac_dcr_setup(lp, op, np)) {
+
+ /* no DCR in the device tree, try non-DCR */
+ lp->sdma_regs = of_iomap(np, 0);
+ if (lp->sdma_regs) {
+ lp->dma_in = temac_dma_in32;
+ lp->dma_out = temac_dma_out32;
+ dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
+ } else {
+ dev_err(&op->dev, "unable to map DMA registers\n");
+ goto nodev;
+ }
}
- lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
- dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
lp->rx_irq = irq_of_parse_and_map(np, 0);
lp->tx_irq = irq_of_parse_and_map(np, 1);
- if (!lp->rx_irq || !lp->tx_irq) {
+ if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto nodev;
@@ -890,7 +993,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
of_node_put(np); /* Finished with the DMA node; drop the reference */
/* Retrieve the MAC address */
- addr = of_get_property(op->node, "local-mac-address", &size);
+ addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
if ((!addr) || (size != 6)) {
dev_err(&op->dev, "could not find MAC address\n");
rc = -ENODEV;
@@ -898,11 +1001,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
}
temac_set_mac_address(ndev, (void *)addr);
- rc = temac_mdio_setup(lp, op->node);
+ rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
dev_warn(&op->dev, "error registering MDIO bus\n");
- lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
+ lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
if (lp->phy_node)
dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
@@ -955,12 +1058,12 @@ static struct of_device_id temac_of_match[] __devinitdata = {
MODULE_DEVICE_TABLE(of, temac_of_match);
static struct of_platform_driver temac_of_driver = {
- .match_table = temac_of_match,
.probe = temac_of_probe,
.remove = __devexit_p(temac_of_remove),
.driver = {
.owner = THIS_MODULE,
.name = "xilinx_temac",
+ .of_match_table = temac_of_match,
},
};
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 41cbaaef0654..8a1097cf8a83 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -307,8 +307,6 @@ static void lne390_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + LNE390_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3e3cc04defd0..3df046a58b1d 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -875,8 +875,6 @@ static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev)
length = ETH_ZLEN;
}
- dev->trans_start = jiffies;
-
tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
if (tx_cmd == NULL) {
printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
@@ -1256,7 +1254,7 @@ static void set_multicast_list(struct net_device *dev) {
dev->name, netdev_mc_count(dev));
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *cp;
cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
netdev_mc_count(dev) * 6, GFP_ATOMIC);
@@ -1267,8 +1265,8 @@ static void set_multicast_list(struct net_device *dev) {
cmd->command = CmdMulticastList;
*((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(cp, dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(cp, ha->addr, 6);
cp += 6;
}
if (i596_debug & LOG_SRCDST)
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index c8e68fde0664..1136c9a22b67 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -661,7 +661,6 @@ static void mac8390_no_reset(struct net_device *dev)
ei_status.txing = 0;
if (ei_debug > 1)
pr_info("reset not supported\n");
- return;
}
static void interlan_reset(struct net_device *dev)
@@ -673,7 +672,6 @@ static void interlan_reset(struct net_device *dev)
target[0xC0000] = 0;
if (ei_debug > 1)
pr_cont("reset complete\n");
- return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index c0876e915eed..69fa4ef64dd2 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -408,7 +408,6 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
skb->len+1);
local_irq_restore(flags);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index c8a18a6203c8..40797fbdca9f 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -666,8 +666,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -793,6 +791,7 @@ static void macb_init_hw(struct macb *bp)
config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
if (bp->dev->flags & IFF_PROMISC)
config |= MACB_BIT(CAF); /* Copy All Frames */
if (!(bp->dev->flags & IFF_BROADCAST))
@@ -882,15 +881,15 @@ static int hash_get_index(__u8 *addr)
*/
static void macb_sethashtable(struct net_device *dev)
{
- struct dev_mc_list *curr;
+ struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
mc_filter[0] = mc_filter[1] = 0;
- netdev_for_each_mc_addr(curr, dev) {
- bitnr = hash_get_index(curr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 962c41d0c8df..b6855a6476f8 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -599,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev)
mp->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
@@ -607,8 +607,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
i = crc >> 26; /* bit number in multicast_filter */
multicast_filter[i >> 3] |= 1 << (i & 7);
}
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 52e9a51c4c4f..c685a4656878 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -488,7 +488,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -509,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev)
mb->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++) {
@@ -518,8 +517,8 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc_le(6, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
/* bit number in multicast_filter */
i = crc >> 26;
multicast_filter[i >> 3] |= 1 << (i & 7);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 40faa368b07a..87e8d4cb4057 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -145,19 +145,15 @@ static void macvlan_broadcast(struct sk_buff *skb,
}
/* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
+static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
+ struct sk_buff *skb)
{
const struct ethhdr *eth = eth_hdr(skb);
- const struct macvlan_port *port;
const struct macvlan_dev *vlan;
const struct macvlan_dev *src;
struct net_device *dev;
unsigned int len;
- port = rcu_dereference(skb->dev->macvlan_port);
- if (port == NULL)
- return skb;
-
if (is_multicast_ether_addr(eth->h_dest)) {
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
@@ -243,7 +239,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
int ret;
ret = macvlan_queue_xmit(skb, dev);
- if (likely(ret == NET_XMIT_SUCCESS)) {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
txq->tx_packets++;
txq->tx_bytes += len;
} else
@@ -282,7 +278,7 @@ static int macvlan_open(struct net_device *dev)
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
- err = dev_unicast_add(lowerdev, dev->dev_addr);
+ err = dev_uc_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
if (dev->flags & IFF_ALLMULTI) {
@@ -294,7 +290,7 @@ static int macvlan_open(struct net_device *dev)
return 0;
del_unicast:
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
out:
return err;
}
@@ -308,7 +304,7 @@ static int macvlan_stop(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_del(vlan);
return 0;
@@ -332,11 +328,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (macvlan_addr_busy(vlan->port, addr->sa_data))
return -EBUSY;
- err = dev_unicast_add(lowerdev, addr->sa_data);
+ err = dev_uc_add(lowerdev, addr->sa_data);
if (err)
return err;
- dev_unicast_delete(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_change_addr(vlan, addr->sa_data);
}
@@ -638,11 +634,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
err = register_netdevice(dev);
if (err < 0)
- return err;
+ goto destroy_port;
list_add_tail(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
+
return 0;
+
+destroy_port:
+ if (list_empty(&port->vlans))
+ macvlan_port_destroy(lowerdev);
+
+ return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
@@ -748,6 +751,9 @@ static int macvlan_device_event(struct notifier_block *unused,
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid underlaying device to change its type. */
+ return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index abba3cc81f12..a8a94e2f6ddc 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,8 @@
struct macvtap_queue {
struct sock sk;
struct socket sock;
+ struct socket_wq wq;
+ int vnet_hdr_sz;
struct macvlan_dev *vlan;
struct file *file;
unsigned int flags;
@@ -181,7 +183,7 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
return -ENOLINK;
skb_queue_tail(&q->sk.sk_receive_queue, skb);
- wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
+ wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
return 0;
}
@@ -242,12 +244,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
static void macvtap_sock_write_space(struct sock *sk)
{
+ wait_queue_head_t *wqueue;
+
if (!sock_writeable(sk) ||
!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +277,8 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!q)
goto out;
- init_waitqueue_head(&q->sock.wait);
+ q->sock.wq = &q->wq;
+ init_waitqueue_head(&q->wq.wait);
q->sock.type = SOCK_RAW;
q->sock.state = SS_CONNECTED;
q->sock.file = file;
@@ -280,6 +286,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
sock_init_data(&q->sock, &q->sk);
q->sk.sk_write_space = macvtap_sock_write_space;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+ q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = macvtap_set_queue(dev, file, q);
if (err)
@@ -308,7 +315,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
goto out;
mask = 0;
- poll_wait(file, &q->sock.wait, wait);
+ poll_wait(file, &q->wq.wait, wait);
if (!skb_queue_empty(&q->sk.sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -440,14 +447,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
int vnet_hdr_len = 0;
if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = sizeof(vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
err = -EINVAL;
if ((len -= vnet_hdr_len) < 0)
goto err;
err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
- vnet_hdr_len);
+ sizeof(vnet_hdr));
if (err < 0)
goto err;
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -529,7 +536,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = sizeof (vnet_hdr);
+ vnet_hdr_len = q->vnet_hdr_sz;
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
@@ -537,7 +544,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (ret)
return ret;
- if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
@@ -562,7 +569,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(q->sk.sk_sleep, &wait);
+ add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -587,7 +594,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
}
current->state = TASK_RUNNING;
- remove_wait_queue(q->sk.sk_sleep, &wait);
+ remove_wait_queue(sk_sleep(&q->sk), &wait);
return ret;
}
@@ -622,6 +629,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
struct ifreq __user *ifr = argp;
unsigned int __user *up = argp;
unsigned int u;
+ int __user *sp = argp;
+ int s;
int ret;
switch (cmd) {
@@ -667,6 +676,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
q->sk.sk_sndbuf = u;
return 0;
+ case TUNGETVNETHDRSZ:
+ s = q->vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ q->vnet_hdr_sz = s;
+ return 0;
+
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 9f72cb45f4af..42e3294671d7 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -746,10 +746,8 @@ static void meth_tx_timeout(struct net_device *dev)
/* Enable interrupt */
spin_unlock_irqrestore(&priv->meth_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
-
- return;
}
/*
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 86467b444ac6..d5afd037cd7d 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -140,8 +140,6 @@ static void mlx4_en_get_wol(struct net_device *netdev,
{
wol->supported = 0;
wol->wolopts = 0;
-
- return;
}
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 73c3d20c6453..96180c0ec206 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -161,39 +161,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *plist = priv->mc_list;
- struct dev_mc_list *next;
- while (plist) {
- next = plist->next;
- kfree(plist);
- plist = next;
- }
- priv->mc_list = NULL;
+ kfree(priv->mc_addrs);
+ priv->mc_addrs_cnt = 0;
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct dev_mc_list *mclist;
- struct dev_mc_list *tmp;
- struct dev_mc_list *plist = NULL;
-
- for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
- tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
- if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
- mlx4_en_clear_list(dev);
- return;
- }
- memcpy(tmp, mclist, sizeof(struct dev_mc_list));
- tmp->next = NULL;
- if (plist)
- plist->next = tmp;
- else
- priv->mc_list = tmp;
- plist = tmp;
+ struct netdev_hw_addr *ha;
+ char *mc_addrs;
+ int mc_addrs_cnt = netdev_mc_count(dev);
+ int i;
+
+ mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
+ if (!mc_addrs) {
+ en_err(priv, "failed to allocate multicast list\n");
+ return;
}
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ priv->mc_addrs = mc_addrs;
+ priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -213,7 +203,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- struct dev_mc_list *mclist;
u64 mcast_addr = 0;
int err;
@@ -289,6 +278,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
} else {
+ int i;
+
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
@@ -303,8 +294,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
- mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
+ for (i = 0; i < priv->mc_addrs_cnt; i++) {
+ mcast_addr =
+ mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -512,7 +504,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
- en_dbg(HW, priv, "Could not update stats \n");
+ en_dbg(HW, priv, "Could not update stats\n");
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
@@ -985,7 +977,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->flags = prof->flags;
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
- priv->mc_list = NULL;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 7365bf488b81..423053482ed5 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -239,7 +239,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
break;
- };
+ }
++eq->cons_index;
eqes_found = 1;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 57288ca1395f..b07e4dee80aa 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask);
- if (!ret) {
- ++chunk->npages;
-
- if (coherent)
- ++chunk->nsg;
- else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
- chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ if (ret) {
+ if (--cur_order < 0)
+ goto fail;
+ else
+ continue;
+ }
- if (chunk->nsg <= 0)
- goto fail;
+ ++chunk->npages;
- chunk = NULL;
- }
+ if (coherent)
+ ++chunk->nsg;
+ else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
+ chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+ chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
- npages -= 1 << cur_order;
- } else {
- --cur_order;
- if (cur_order < 0)
+ if (chunk->nsg <= 0)
goto fail;
}
+
+ if (chunk->npages == MLX4_ICM_CHUNK_LEN)
+ chunk = NULL;
+
+ npages -= 1 << cur_order;
}
if (!coherent && chunk) {
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index bc72d6e4919b..13343e884999 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include <linux/timer.h>
+#include <linux/semaphore.h>
#include <linux/workqueue.h>
#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 82c3ebc584e3..b55e46c8b682 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -492,7 +492,8 @@ struct mlx4_en_priv {
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
- struct dev_mc_list *mc_list;
+ char *mc_addrs;
+ int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
};
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8613a52ddf17..e345ec8cb473 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -882,7 +882,6 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq->tx_bytes += skb->len;
txq->tx_packets++;
- dev->trans_start = jiffies;
entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
@@ -1770,7 +1769,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 *mc_spec;
u32 *mc_other;
- struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
int i;
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
@@ -1795,8 +1794,8 @@ oom:
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
- netdev_for_each_mc_addr(addr, dev) {
- u8 *a = addr->da_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *a = ha->addr;
u32 *table;
int entry;
@@ -2609,10 +2608,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
goto out;
ret = -ENOMEM;
- msp = kmalloc(sizeof(*msp), GFP_KERNEL);
+ msp = kzalloc(sizeof(*msp), GFP_KERNEL);
if (msp == NULL)
goto out;
- memset(msp, 0, sizeof(*msp));
msp->base = ioremap(res->start, res->end - res->start + 1);
if (msp->base == NULL)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ecde0876a785..e0b47cc8a86e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -110,15 +110,15 @@ MODULE_LICENSE("Dual BSD/GPL");
struct myri10ge_rx_buffer_state {
struct page *page;
int page_offset;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_tx_buffer_state {
struct sk_buff *skb;
int last;
- DECLARE_PCI_UNMAP_ADDR(bus)
- DECLARE_PCI_UNMAP_LEN(len)
+ DEFINE_DMA_UNMAP_ADDR(bus);
+ DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_cmd {
@@ -1234,7 +1234,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
rx->info[idx].page_offset = rx->page_offset;
/* note that this is the address of the start of the
* page */
- pci_unmap_addr_set(&rx->info[idx], bus, rx->bus);
+ dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
rx->shadow[idx].addr_low =
htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
rx->shadow[idx].addr_high =
@@ -1266,7 +1266,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
/* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
- pci_unmap_page(pdev, (pci_unmap_addr(info, bus)
+ pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
}
@@ -1373,21 +1373,21 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
tx->info[idx].last = 0;
}
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
dev_kfree_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2094,20 +2094,20 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
/* Mark as free */
tx->info[idx].skb = NULL;
tx->done++;
- len = pci_unmap_len(&tx->info[idx], len);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ len = dma_unmap_len(&tx->info[idx], len);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
@@ -2757,12 +2757,12 @@ again:
}
/* map the skb for DMA */
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
frag_cnt = skb_shinfo(skb)->nr_frags;
frag_idx = 0;
@@ -2865,8 +2865,8 @@ again:
len = frag->size;
bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&tx->info[idx], bus, bus);
- pci_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], bus, bus);
+ dma_unmap_len_set(&tx->info[idx], len, len);
}
(req - rdma_count)->rdma_count = rdma_count;
@@ -2903,19 +2903,19 @@ abort_linearize:
idx = tx->req & tx->mask;
tx->info[idx].skb = NULL;
do {
- len = pci_unmap_len(&tx->info[idx], len);
+ len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
- pci_unmap_addr(&tx->info[idx],
+ dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
- pci_unmap_len_set(&tx->info[idx], len, 0);
+ dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
@@ -3002,7 +3002,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
__be32 data[2] = { 0, 0 };
int err;
@@ -3039,8 +3039,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
}
/* Walk the multicast list, and add each address */
- netdev_for_each_mc_addr(mc_list, dev) {
- memcpy(data, &mc_list->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(data, &ha->addr, 6);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3048,7 +3048,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
- err, mc_list->dmi_addr);
+ err, ha->addr);
goto abort;
}
}
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b72e749afdf1..1a57c3da1f49 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -865,7 +865,7 @@ static inline void determine_reg_space_size(struct myri_eth *mp)
printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
mp->eeprom.cpuvers);
mp->reg_size = (3 * 128 * 1024) + 4096;
- };
+ }
}
#ifdef DEBUG_DETECT
@@ -928,7 +928,7 @@ static const struct net_device_ops myri_ops = {
static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
struct net_device *dev;
struct myri_eth *mp;
@@ -1161,8 +1161,11 @@ static const struct of_device_id myri_sbus_match[] = {
MODULE_DEVICE_TABLE(of, myri_sbus_match);
static struct of_platform_driver myri_sbus_driver = {
- .name = "myri",
- .match_table = myri_sbus_match,
+ .driver = {
+ .name = "myri",
+ .owner = THIS_MODULE,
+ .of_match_table = myri_sbus_match,
+ },
.probe = myri_sbus_probe,
.remove = __devexit_p(myri_sbus_remove),
};
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index e52038783245..2a17b503feaa 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1905,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
enable_irq(dev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -2119,8 +2119,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(np)) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
@@ -2493,12 +2491,12 @@ static void __set_rx_mode(struct net_device *dev)
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
}
rx_mode = RxFilterEnable | AcceptBroadcast
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 7bd6662d5b04..e0b0ef11f110 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -608,7 +608,6 @@ retry:
outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index f4347f88b6f2..b8e2923a1d69 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -785,7 +785,6 @@ retry:
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static int __init ne_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index ff3c4c814988..70cdc6996342 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -730,7 +730,6 @@ retry:
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 85aec4f10131..3c333cb5d34e 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -631,7 +631,6 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
- return;
}
static void ne2k_pci_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index a00bbfb9aed0..243ed2aee88e 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -255,8 +255,6 @@ static void ne3210_reset_8390(struct net_device *dev)
ei_status.txing = 0;
outb(0x01, ioaddr + NE3210_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
-
- return;
}
/*
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a361dea35574..ca142c47b2e4 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -665,7 +665,8 @@ static int netconsole_netdev_event(struct notifier_block *this,
struct netconsole_target *nt;
struct net_device *dev = ptr;
- if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER))
+ if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
+ event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
goto done;
spin_lock_irqsave(&target_list_lock, flags);
@@ -677,19 +678,21 @@ static int netconsole_netdev_event(struct notifier_block *this,
strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
break;
case NETDEV_UNREGISTER:
- if (!nt->enabled)
- break;
netpoll_cleanup(&nt->np);
+ /* Fall through */
+ case NETDEV_GOING_DOWN:
+ case NETDEV_BONDING_DESLAVE:
nt->enabled = 0;
- printk(KERN_INFO "netconsole: network logging stopped"
- ", interface %s unregistered\n",
- dev->name);
break;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
+ if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
+ printk(KERN_INFO "netconsole: network logging stopped, "
+ "interface %s %s\n", dev->name,
+ event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
done:
return NOTIFY_DONE;
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 64770298c4f7..2e4b42175f3f 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -126,7 +126,6 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
FIFO_PTR_FRAMENO(1) |
FIFO_PTR_FRAMELEN(len));
- ndev->trans_start = jiffies;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 0f703838e21a..ffa1b9ce1cc5 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -95,6 +95,9 @@
#define ADDR_IN_WINDOW1(off) \
((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
/*
* normalize a 64MB crb address to 32MB PCI window
* To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
@@ -420,7 +423,6 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
@@ -1353,6 +1355,8 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable);
int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd);
int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
+void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
+void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index f8499e56cbee..20f7c58bd092 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -632,6 +632,9 @@ static int netxen_nic_reg_test(struct net_device *dev)
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
data_written = (u32)0xa5a5a5a5;
NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
@@ -703,6 +706,11 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
}
}
+static u32 netxen_nic_get_tx_csum(struct net_device *dev)
+{
+ return dev->features & NETIF_F_IP_CSUM;
+}
+
static u32 netxen_nic_get_rx_csum(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
@@ -909,6 +917,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
.set_ringparam = netxen_nic_set_ringparam,
.get_pauseparam = netxen_nic_get_pauseparam,
.set_pauseparam = netxen_nic_set_pauseparam,
+ .get_tx_csum = netxen_nic_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = netxen_nic_get_tso,
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 622e4c8be937..d8bd73d7e296 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -681,14 +681,8 @@ enum {
#define MIU_TEST_AGT_ADDR_HI (0x08)
#define MIU_TEST_AGT_WRDATA_LO (0x10)
#define MIU_TEST_AGT_WRDATA_HI (0x14)
-#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
-#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
-#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
#define MIU_TEST_AGT_RDDATA_LO (0x18)
#define MIU_TEST_AGT_RDDATA_HI (0x1c)
-#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
-#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
-#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
@@ -789,9 +783,7 @@ enum {
* for backward compability
*/
#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
-#define CRB_NIC_CAPABILITIES_FW NETXEN_NIC_REG(0x1dc)
#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
-#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274)
#define INTR_SCHEME_PERPORT 0x1
#define MSI_MODE_MULTIFUNC 0x1
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index b1cf46a0c48c..5c496f8d7c49 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -32,7 +32,6 @@
#define MASK(n) ((1ULL<<(n))-1)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
-#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
#define MS_WIN(addr) (addr & 0x0ffc0000)
#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
@@ -63,9 +62,6 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) < (high)) && ((addr) >= (low)))
-
#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
((adapter)->ahw.pci_base0 + (off))
#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
@@ -538,7 +534,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 null_addr[6];
int i;
@@ -572,8 +568,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
netxen_nic_enable_mcast_filter(adapter);
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
/* Clear out remaining addresses */
while (i < adapter->max_mc_count)
@@ -681,7 +677,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
LIST_HEAD(del_list);
@@ -708,8 +704,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev)
- nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
+ netdev_for_each_mc_addr(ha, netdev)
+ nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
}
send_fw_cmd:
@@ -1391,18 +1387,8 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
u64 addr, u32 *start)
{
u32 window;
- struct pci_dev *pdev = adapter->pdev;
- if ((addr & 0x00ff800) == 0xff800) {
- if (printk_ratelimit())
- dev_warn(&pdev->dev, "QM access not handled\n");
- return -EIO;
- }
-
- if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
- window = OCM_WIN_P3P(addr);
- else
- window = OCM_WIN(addr);
+ window = OCM_WIN(addr);
writel(window, adapter->ahw.ocm_win_crb);
/* read back to flush */
@@ -1419,7 +1405,7 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
{
void __iomem *addr, *mem_ptr = NULL;
resource_size_t mem_base;
- int ret = -EIO;
+ int ret;
u32 start;
spin_lock(&adapter->ahw.mem_lock);
@@ -1428,20 +1414,23 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
if (ret != 0)
goto unlock;
- addr = pci_base_offset(adapter, start);
- if (addr)
- goto noremap;
-
- mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ addr = adapter->ahw.pci_base0 + start;
+ } else {
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) +
+ (start & PAGE_MASK);
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
- mem_ptr = ioremap(mem_base, PAGE_SIZE);
- if (mem_ptr == NULL) {
- ret = -EIO;
- goto unlock;
+ addr = mem_ptr + (start & (PAGE_SIZE-1));
}
-
- addr = mem_ptr + (start & (PAGE_SIZE - 1));
-
noremap:
if (op == 0) /* read */
*data = readq(addr);
@@ -1456,6 +1445,28 @@ unlock:
return ret;
}
+void
+netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
#define MAX_CTL_CHECK 1000
static int
@@ -1621,9 +1632,8 @@ static int
netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
u64 off, u64 data)
{
- int i, j, ret;
+ int j, ret;
u32 temp, off8;
- u64 stride;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1650,44 +1660,17 @@ netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
return -EIO;
correct:
- stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & 0xfffffff8;
spin_lock(&adapter->ahw.mem_lock);
writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
- i = 0;
- if (stride == 16) {
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
- if ((temp & TA_CTL_BUSY) == 0)
- break;
- }
-
- if (j >= MAX_CTL_CHECK) {
- ret = -EIO;
- goto done;
- }
-
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
- }
-
writel(data & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i));
+ mem_crb + MIU_TEST_AGT_WRDATA_LO);
writel((data >> 32) & 0xffffffff,
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ mem_crb + MIU_TEST_AGT_WRDATA_HI);
writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
@@ -1707,7 +1690,6 @@ correct:
} else
ret = 0;
-done:
spin_unlock(&adapter->ahw.mem_lock);
return ret;
@@ -1719,7 +1701,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
{
int j, ret;
u32 temp, off8;
- u64 val, stride;
+ u64 val;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1748,9 +1730,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
return -EIO;
correct:
- stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & 0xfffffff8;
spin_lock(&adapter->ahw.mem_lock);
@@ -1771,13 +1751,8 @@ correct:
"failed to read through agent\n");
ret = -EIO;
} else {
- off8 = MIU_TEST_AGT_RDDATA_LO;
- if ((stride == 16) && (off & 0xf))
- off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
-
- temp = readl(mem_crb + off8 + 4);
- val = (u64)temp << 32;
- val |= readl(mem_crb + off8);
+ val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
+ val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
*data = val;
ret = 0;
}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02876f59cbb2..045a7c8f5bdf 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -614,22 +614,123 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
return NULL;
}
+#define QLCNIC_FILEHEADER_SIZE (14 * 4)
+
static int
-nx_set_product_offs(struct netxen_adapter *adapter)
-{
- struct uni_table_desc *ptab_descr;
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+ {
const u8 *unirom = adapter->fw->data;
- uint32_t i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 fw_file_size = adapter->fw->size;
+ u32 tab_size;
__le32 entries;
+ __le32 entry_size;
+
+ if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_BOOTLD_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_FIRMWARE_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+ return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
1 : netxen_p3_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ u32 tab_size;
+ u32 i;
ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
if (ptab_descr == NULL)
- return -1;
+ return -EINVAL;
entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
nomn:
for (i = 0; i < entries; i++) {
@@ -658,9 +759,38 @@ nomn:
goto nomn;
}
- return -1;
+ return -EINVAL;
}
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+ if (netxen_nic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
u32 section, u32 idx_offset)
@@ -890,6 +1020,16 @@ netxen_load_firmware(struct netxen_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)nx_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -934,27 +1074,23 @@ static int
netxen_validate_firmware(struct netxen_adapter *adapter)
{
__le32 val;
- u32 ver, min_ver, bios, min_size;
+ u32 ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
if (fw_type == NX_UNIFIED_ROMIMAGE) {
- if (nx_set_product_offs(adapter))
+ if (netxen_nic_validate_unified_romimage(adapter))
return -EINVAL;
-
- min_size = NX_UNI_FW_MIN_SIZE;
} else {
val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
if ((__force u32)val != NETXEN_BDINFO_MAGIC)
return -EINVAL;
- min_size = NX_FW_MIN_SIZE;
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
}
- if (fw->size < min_size)
- return -EINVAL;
-
val = nx_get_fw_version(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
@@ -1225,10 +1361,12 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
return err;
NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
- NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+
return err;
}
@@ -1763,6 +1901,5 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
void netxen_nic_clear_stats(struct netxen_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
- return;
}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ce838f7c8b0f..6ce6ce1df6d2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -782,15 +782,22 @@ netxen_check_options(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->msix_supported = !!use_msi_x;
adapter->rss_supported = !!use_msi_x;
- } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
- switch (adapter->ahw.board_type) {
- case NETXEN_BRDTYPE_P2_SB31_10G:
- case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
- adapter->msix_supported = !!use_msi_x;
- adapter->rss_supported = !!use_msi_x;
- break;
- default:
- break;
+ } else {
+ u32 flashed_ver = 0;
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
}
}
@@ -2304,6 +2311,7 @@ netxen_fwinit_work(struct work_struct *work)
}
break;
+ case NX_DEV_NEED_RESET:
case NX_DEV_INITALIZING:
if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
netxen_schedule_work(adapter,
@@ -2347,6 +2355,9 @@ netxen_detach_work(struct work_struct *work)
ref_cnt = nx_decr_dev_ref_cnt(adapter);
+ if (ref_cnt == -EIO)
+ goto err_ret;
+
delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
adapter->fw_wait_cnt = 0;
@@ -2526,51 +2537,81 @@ static int
netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
loff_t offset, size_t size)
{
+ size_t crb_size = 4;
+
if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
return -EIO;
- if ((size != 4) || (offset & 0x3))
- return -EINVAL;
+ if (offset < NETXEN_PCI_CRBSPACE) {
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EINVAL;
- if (offset < NETXEN_PCI_CRBSPACE)
- return -EINVAL;
+ if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
return 0;
}
static ssize_t
-netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- data = NXRD32(adapter, offset);
- memcpy(buf, &data, size);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = NXRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+
return size;
}
static ssize_t
-netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = netxen_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- memcpy(&data, buf, size);
- NXWR32(adapter, offset, data);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ memcpy(&qmdata, buf, size);
+ netxen_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ NXWR32(adapter, offset, data);
+ }
+
return size;
}
@@ -2588,7 +2629,8 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
}
static ssize_t
-netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2608,7 +2650,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
return size;
}
-static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
@@ -2742,7 +2784,6 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
} endfor_ifa(indev);
in_dev_put(indev);
- return;
}
static int netxen_netdev_event(struct notifier_block *this,
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3892330f244a..4d3f2e2b28bd 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -444,7 +444,7 @@ static void ni5010_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
/* FIXME: Give it a real kick here */
chipset_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -460,7 +460,6 @@ static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
- dev->trans_start = jiffies;
dev_kfree_skb (skb);
return NETDEV_TX_OK;
}
@@ -515,8 +514,6 @@ static void dump_packet(void *buf, int len)
if (i % 16 == 15) printk("\n");
}
printk("\n");
-
- return;
}
/* We have a good packet, get it out of the buffer. */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index f7a8f707361e..9bddb5fa7a96 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -595,7 +595,7 @@ static int init586(struct net_device *dev)
struct iasetup_cmd_struct __iomem *ias_cmd;
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
@@ -724,8 +724,8 @@ static int init586(struct net_device *dev)
writew(num_addrs * 6, &mc_cmd->mc_cnt);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
- memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
writew(make16(mc_cmd), &p->scb->cbl_offset);
writeb(CUC_START, &p->scb->cmd_cuc);
@@ -1147,7 +1147,7 @@ static void ni52_timeout(struct net_device *dev)
writeb(CUC_START, &p->scb->cmd_cuc);
ni_attn586();
wait_for_scb_cmd(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -1165,7 +1165,7 @@ static void ni52_timeout(struct net_device *dev)
ni52_close(dev);
ni52_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1218,7 +1218,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writeb(CUC_START, &p->scb->cmd_cuc);
}
ni_attn586();
- dev->trans_start = jiffies;
if (!i)
dev_kfree_skb(skb);
wait_for_scb_cmd(dev);
@@ -1240,7 +1239,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1256,7 +1254,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
writew(0, &p->nop_cmds[next_nop]->cmd_status);
writew(make16(p->xmit_cmds[p->xmit_count]),
&p->nop_cmds[p->xmit_count]->cmd_link);
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
unsigned long flags;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 9225c76cac40..da228a0dd6cd 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -784,7 +784,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
@@ -1150,7 +1150,7 @@ static void ni65_timeout(struct net_device *dev)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1213,7 +1213,6 @@ static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
netif_wake_queue(dev);
p->lock = 0;
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&p->ring_lock, flags);
}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d5cd16bfc907..63e8e3893bd6 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -36,8 +36,8 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "Nov 14, 2008"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "Apr 22, 2010"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -3444,6 +3444,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
struct rx_ring_info *rp)
{
unsigned int index = rp->rcr_index;
+ struct rx_pkt_hdr1 *rh;
struct sk_buff *skb;
int len, num_rcr;
@@ -3477,9 +3478,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
if (num_rcr == 1) {
int ptype;
- off += 2;
- append_size -= 2;
-
ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
if ((ptype == RCR_PKT_TYPE_TCP ||
ptype == RCR_PKT_TYPE_UDP) &&
@@ -3488,8 +3486,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
- }
- if (!(val & RCR_ENTRY_MULTI))
+ } else if (!(val & RCR_ENTRY_MULTI))
append_size = len - skb->len;
niu_rx_skb_append(skb, page, off, append_size);
@@ -3510,8 +3507,17 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
}
rp->rcr_index = index;
- skb_reserve(skb, NET_IP_ALIGN);
- __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
+ len += sizeof(*rh);
+ len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
+ __pskb_pull_tail(skb, len);
+
+ rh = (struct rx_pkt_hdr1 *) skb->data;
+ if (np->dev->features & NETIF_F_RXHASH)
+ skb->rxhash = ((u32)rh->hashval2_0 << 24 |
+ (u32)rh->hashval2_1 << 16 |
+ (u32)rh->hashval1_1 << 8 |
+ (u32)rh->hashval1_2 << 0);
+ skb_pull(skb, sizeof(*rh));
rp->rx_packets++;
rp->rx_bytes += skb->len;
@@ -4946,7 +4952,9 @@ static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
RX_DMA_CTL_STAT_RCRTO |
RX_DMA_CTL_STAT_RBR_EMPTY));
nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
- nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0));
+ nw64(RXDMA_CFIG2(channel),
+ ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
+ RXDMA_CFIG2_FULL_HDR));
nw64(RBR_CFIG_A(channel),
((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
(rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
@@ -6314,7 +6322,6 @@ static void niu_set_rx_mode(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
int i, alt_cnt, err;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
unsigned long flags;
u16 hash[16] = { 0, };
@@ -6366,8 +6373,8 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
} else if (!netdev_mc_empty(dev)) {
- netdev_for_each_mc_addr(addr, dev) {
- u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
crc >>= 24;
hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
@@ -7911,6 +7918,18 @@ static int niu_phys_id(struct net_device *dev, u32 data)
return 0;
}
+static int niu_set_flags(struct net_device *dev, u32 data)
+{
+ if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+ return 0;
+}
+
static const struct ethtool_ops niu_ethtool_ops = {
.get_drvinfo = niu_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -7927,6 +7946,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.phys_id = niu_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .set_flags = niu_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -9094,7 +9115,7 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
const u32 *int_prop;
int i;
- int_prop = of_get_property(op->node, "interrupts", NULL);
+ int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
if (!int_prop)
return -ENODEV;
@@ -9245,7 +9266,7 @@ static int __devinit niu_get_of_props(struct niu *np)
int prop_len;
if (np->parent->plat_type == PLAT_TYPE_NIU)
- dp = np->op->node;
+ dp = np->op->dev.of_node;
else
dp = pci_device_to_OF_node(np->pdev);
@@ -9755,6 +9776,12 @@ static void __devinit niu_device_announce(struct niu *np)
}
}
+static void __devinit niu_set_basic_features(struct net_device *dev)
+{
+ dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_GRO | NETIF_F_RXHASH);
+}
+
static int __devinit niu_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -9839,7 +9866,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
}
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
@@ -10056,10 +10083,10 @@ static int __devinit niu_of_probe(struct of_device *op,
niu_driver_version();
- reg = of_get_property(op->node, "reg", NULL);
+ reg = of_get_property(op->dev.of_node, "reg", NULL);
if (!reg) {
dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
- op->node->full_name);
+ op->dev.of_node->full_name);
return -ENODEV;
}
@@ -10072,7 +10099,7 @@ static int __devinit niu_of_probe(struct of_device *op,
np = netdev_priv(dev);
memset(&parent_id, 0, sizeof(parent_id));
- parent_id.of = of_get_parent(op->node);
+ parent_id.of = of_get_parent(op->dev.of_node);
np->parent = niu_get_parent(np, &parent_id,
PLAT_TYPE_NIU);
@@ -10081,7 +10108,7 @@ static int __devinit niu_of_probe(struct of_device *op,
goto err_out_free_dev;
}
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
+ niu_set_basic_features(dev);
np->regs = of_ioremap(&op->resource[1], 0,
resource_size(&op->resource[1]),
@@ -10207,8 +10234,11 @@ static const struct of_device_id niu_match[] = {
MODULE_DEVICE_TABLE(of, niu_match);
static struct of_platform_driver niu_of_driver = {
- .name = "niu",
- .match_table = niu_match,
+ .driver = {
+ .name = "niu",
+ .owner = THIS_MODULE,
+ .of_match_table = niu_match,
+ },
.probe = niu_of_probe,
.remove = __devexit_p(niu_of_remove),
};
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 3bd0b5933d59..d6715465f35d 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2706,7 +2706,7 @@ struct rx_pkt_hdr0 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 inputport:2,
maccheck:1,
- class:4;
+ class:5;
u8 vlan:1,
llcsnap:1,
noport:1,
@@ -2715,7 +2715,7 @@ struct rx_pkt_hdr0 {
tres:2,
tzfvld:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
- u8 class:4,
+ u8 class:5,
maccheck:1,
inputport:2;
u8 tzfvld:1,
@@ -2775,6 +2775,9 @@ struct rx_pkt_hdr1 {
/* Bits 7:0 of hash value, H1. */
u8 hashval1_2;
+ u8 hwrsvd5;
+ u8 hwrsvd6;
+
u8 usrdata_0; /* Bits 39:32 of user data. */
u8 usrdata_1; /* Bits 31:24 of user data. */
u8 usrdata_2; /* Bits 23:16 of user data. */
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 8aadc8e2ddd7..000e792d57c0 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
while (mix_orcnt.s.orcnt) {
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+
+ if (mix_orcnt.s.orcnt == 0) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ break;
+ }
+
dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
DMA_BIDIRECTIONAL);
- spin_lock_irqsave(&p->tx_list.lock, flags);
-
re.d64 = p->tx_ring[p->tx_next_clean];
p->tx_next_clean =
(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
@@ -317,7 +324,6 @@ good:
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
- netdev->last_rx = jiffies;
netif_receive_skb(skb);
rc = 0;
} else if (re.s.code == RING_ENTRY_CODE_MORE) {
@@ -374,7 +380,6 @@ done:
mix_ircnt.s.ircnt = 1;
cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
return rc;
-
}
static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
@@ -384,7 +389,6 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
union cvmx_mixx_ircnt mix_ircnt;
int rc;
-
mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
while (work_done < budget && mix_ircnt.s.ircnt) {
@@ -475,13 +479,12 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
struct octeon_mgmt_cam_state cam_state;
- struct dev_addr_list *list;
- struct list_head *pos;
+ struct netdev_hw_addr *ha;
int available_cam_entries;
memset(&cam_state, 0, sizeof(cam_state));
- if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
+ if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
cam_mode = 0;
available_cam_entries = 8;
} else {
@@ -489,13 +492,13 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
* One CAM entry for the primary address, leaves seven
* for the secondary addresses.
*/
- available_cam_entries = 7 - netdev->dev_addrs.count;
+ available_cam_entries = 7 - netdev->uc.count;
}
if (netdev->flags & IFF_MULTICAST) {
if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
netdev_mc_count(netdev) > available_cam_entries)
- multicast_mode = 2; /* 1 - Accept all multicast. */
+ multicast_mode = 2; /* 2 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
}
@@ -503,19 +506,14 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
if (cam_mode == 1) {
/* Add primary address. */
octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
- list_for_each(pos, &netdev->dev_addrs.list) {
- struct netdev_hw_addr *hw_addr;
- hw_addr = list_entry(pos, struct netdev_hw_addr, list);
- octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
- list = list->next;
- }
+ netdev_for_each_uc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
if (multicast_mode == 0) {
- netdev_for_each_mc_addr(list, netdev)
- octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
+ netdev_for_each_mc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
}
-
spin_lock_irqsave(&p->lock, flags);
/* Disable packet I/O. */
@@ -524,7 +522,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
agl_gmx_prtx.s.en = 0;
cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
-
adr_ctl.u64 = 0;
adr_ctl.s.cam_mode = cam_mode;
adr_ctl.s.mcst = multicast_mode;
@@ -597,8 +594,7 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
/* Clear any pending interrupts */
- cvmx_write_csr(CVMX_MIXX_ISR(port),
- cvmx_read_csr(CVMX_MIXX_ISR(port)));
+ cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
cvmx_read_csr(CVMX_MIXX_ISR(port));
if (mixx_isr.s.irthresh) {
@@ -832,9 +828,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_irhwm.s.irhwm = 0;
cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
- /* Interrupt when we have 5 or more packets to clean. */
+ /* Interrupt when we have 1 or more packets to clean. */
mix_orhwm.u64 = 0;
- mix_orhwm.s.orhwm = 5;
+ mix_orhwm.s.orhwm = 1;
cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
/* Enable receive and transmit interrupts */
@@ -928,7 +924,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
octeon_mgmt_reset_hw(p);
-
free_irq(p->irq, netdev);
/* dma_unmap is a nop on Octeon, so just free everything. */
@@ -945,7 +940,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
DMA_BIDIRECTIONAL);
kfree(p->tx_ring);
-
return 0;
}
@@ -955,6 +949,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
int port = p->port;
union mgmt_port_ring_entry re;
unsigned long flags;
+ int rv = NETDEV_TX_BUSY;
re.d64 = 0;
re.s.len = skb->len;
@@ -964,15 +959,18 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
spin_lock_irqsave(&p->tx_list.lock, flags);
+ if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ netif_stop_queue(netdev);
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+ }
+
if (unlikely(p->tx_current_fill >=
ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
spin_unlock_irqrestore(&p->tx_list.lock, flags);
-
dma_unmap_single(p->dev, re.s.addr, re.s.len,
DMA_TO_DEVICE);
-
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
+ goto out;
}
__skb_queue_tail(&p->tx_list, skb);
@@ -994,10 +992,10 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
- netdev->trans_start = jiffies;
- octeon_mgmt_clean_tx_buffers(p);
+ rv = NETDEV_TX_OK;
+out:
octeon_mgmt_update_tx_stats(netdev);
- return NETDEV_TX_OK;
+ return rv;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1007,7 +1005,6 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
octeon_mgmt_receive_packets(p, 16);
octeon_mgmt_update_rx_stats(netdev);
- return;
}
#endif
@@ -1107,7 +1104,6 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
-
/* The mgmt ports get the first N MACs. */
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 370c147d08a3..8ab6ae0a6107 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1472,8 +1472,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
txring->next_to_fill = fill;
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
-
- return;
}
static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 36785853a149..56f3fc45dbaa 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1354,7 +1354,6 @@ static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
- dev->trans_start = jiffies;
atomic_inc(&tp->cur_tx);
if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
netif_stop_queue(dev);
@@ -1813,12 +1812,12 @@ static void netdrv_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 757f87bb1db3..10ee106a1617 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -93,7 +93,6 @@ earlier 3Com products.
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <pcmcia/mem_op.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -200,7 +199,6 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
struct el3_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
u16 advertising, partner; /* NWay media advertisement */
unsigned char phys; /* MII device address */
unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
@@ -283,8 +281,6 @@ static int tc574_probe(struct pcmcia_device *link)
spin_lock_init(&lp->window_lock);
link->io.NumPorts1 = 32;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &el3_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -311,8 +307,7 @@ static void tc574_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c574_detach()\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
tc574_release(link);
@@ -353,7 +348,7 @@ static int tc574_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, el3_interrupt);
if (ret)
goto failed;
@@ -361,7 +356,7 @@ static int tc574_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
@@ -446,17 +441,13 @@ static int tc574_config(struct pcmcia_device *link)
}
}
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
"hw_addr %pM.\n",
dev->name, cardname, dev->base_addr, dev->irq,
@@ -622,8 +613,6 @@ static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value)
outw(MDIO_ENB_IN, mdio_addr);
outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
}
-
- return;
}
/* Reset and restore all of the 3c574 registers. */
@@ -739,7 +728,7 @@ static void el3_tx_timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -790,8 +779,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
- dev->trans_start = jiffies;
-
/* TxFree appears only in Window 1, not offset 0x1c. */
if (inw(ioaddr + TxFree) <= 1536) {
netif_stop_queue(dev);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 091e0b00043e..ce63c3773b4c 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -1,20 +1,20 @@
/*======================================================================
A PCMCIA ethernet driver for the 3com 3c589 card.
-
+
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
3c589_cs.c 1.162 2001/10/13 00:08:50
The network driver code is based on Donald Becker's 3c589 code:
-
+
Written 1994 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU General Public License,
incorporated herein by reference.
Donald Becker may be reached at becker@scyld.com
-
+
Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
======================================================================*/
@@ -69,31 +69,54 @@
/* The top five bits written to EL3_CMD are a command, the lower
11 bits are the parameter, if applicable. */
enum c509cmd {
- TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
- RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
- TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
- FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
- SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
- SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
- StatsDisable = 22<<11, StopCoax = 23<<11,
+ TotalReset = 0<<11,
+ SelectWindow = 1<<11,
+ StartCoax = 2<<11,
+ RxDisable = 3<<11,
+ RxEnable = 4<<11,
+ RxReset = 5<<11,
+ RxDiscard = 8<<11,
+ TxEnable = 9<<11,
+ TxDisable = 10<<11,
+ TxReset = 11<<11,
+ FakeIntr = 12<<11,
+ AckIntr = 13<<11,
+ SetIntrEnb = 14<<11,
+ SetStatusEnb = 15<<11,
+ SetRxFilter = 16<<11,
+ SetRxThreshold = 17<<11,
+ SetTxThreshold = 18<<11,
+ SetTxStart = 19<<11,
+ StatsEnable = 21<<11,
+ StatsDisable = 22<<11,
+ StopCoax = 23<<11
};
enum c509status {
- IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
- TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
- IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000
+ IntLatch = 0x0001,
+ AdapterFailure = 0x0002,
+ TxComplete = 0x0004,
+ TxAvailable = 0x0008,
+ RxComplete = 0x0010,
+ RxEarly = 0x0020,
+ IntReq = 0x0040,
+ StatsFull = 0x0080,
+ CmdBusy = 0x1000
};
/* The SetRxFilter command accepts the following classes: */
enum RxFilter {
- RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8
+ RxStation = 1,
+ RxMulticast = 2,
+ RxBroadcast = 4,
+ RxProm = 8
};
/* Register window 1 offsets, the window used in normal operation. */
#define TX_FIFO 0x00
#define RX_FIFO 0x00
-#define RX_STATUS 0x08
-#define TX_STATUS 0x0B
+#define RX_STATUS 0x08
+#define TX_STATUS 0x0B
#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
@@ -106,13 +129,12 @@ enum RxFilter {
struct el3_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
- /* For transceiver monitoring */
- struct timer_list media;
- u16 media_status;
- u16 fast_poll;
- unsigned long last_irq;
- spinlock_t lock;
+ /* For transceiver monitoring */
+ struct timer_list media;
+ u16 media_status;
+ u16 fast_poll;
+ unsigned long last_irq;
+ spinlock_t lock;
};
static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
@@ -164,15 +186,15 @@ static void tc589_detach(struct pcmcia_device *p_dev);
======================================================================*/
static const struct net_device_ops el3_netdev_ops = {
- .ndo_open = el3_open,
- .ndo_stop = el3_close,
+ .ndo_open = el3_open,
+ .ndo_stop = el3_close,
.ndo_start_xmit = el3_start_xmit,
- .ndo_tx_timeout = el3_tx_timeout,
+ .ndo_tx_timeout = el3_tx_timeout,
.ndo_set_config = el3_config,
.ndo_get_stats = el3_get_stats,
.ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -194,8 +216,7 @@ static int tc589_probe(struct pcmcia_device *link)
spin_lock_init(&lp->lock);
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &el3_interrupt;
+
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -223,8 +244,7 @@ static void tc589_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c589_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
tc589_release(link);
@@ -236,20 +256,19 @@ static void tc589_detach(struct pcmcia_device *link)
tc589_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
ethernet device available to the system.
-
+
======================================================================*/
static int tc589_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- struct el3_private *lp = netdev_priv(dev);
__be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
unsigned int ioaddr;
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
size_t len;
-
+
dev_dbg(&link->dev, "3c589_config\n");
phys_addr = (__be16 *)dev->dev_addr;
@@ -271,15 +290,15 @@ static int tc589_config(struct pcmcia_device *link)
if (i != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, el3_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
-
- dev->irq = link->irq.AssignedIRQ;
+
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
EL3WINDOW(0);
@@ -312,25 +331,20 @@ static int tc589_config(struct pcmcia_device *link)
dev->if_port = if_port;
else
printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
-
- link->dev_node = &lp->node;
+
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
- printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, "
- "hw_addr %pM\n",
- dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
return 0;
failed:
@@ -343,7 +357,7 @@ failed:
After a card is removed, tc589_release() will unregister the net
device, and release the PCMCIA configuration. If the device is
still open, this will be postponed until it is closed.
-
+
======================================================================*/
static void tc589_release(struct pcmcia_device *link)
@@ -365,7 +379,7 @@ static int tc589_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- if (link->open) {
+ if (link->open) {
tc589_reset(dev);
netif_device_attach(dev);
}
@@ -385,8 +399,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
while (--i > 0)
if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
if (i == 0)
- printk(KERN_WARNING "%s: command 0x%04x did not complete!\n",
- dev->name, cmd);
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
/*
@@ -412,7 +425,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
struct el3_private *lp = netdev_priv(dev);
unsigned int ioaddr = dev->base_addr;
-
+
EL3WINDOW(0);
switch (if_port) {
case 0: case 1: outw(0, ioaddr + 6); break;
@@ -435,14 +448,13 @@ static void dump_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
- printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
- "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
- inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS),
- inw(ioaddr+TX_FREE));
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
EL3WINDOW(4);
- printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x"
- " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06),
- inw(ioaddr+0x08), inw(ioaddr+0x0a));
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
EL3WINDOW(1);
}
@@ -451,18 +463,18 @@ static void tc589_reset(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x0001, ioaddr + 4); /* Activate board. */
outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
+
/* Set the station address in window 2. */
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
tc589_set_xcvr(dev, dev->if_port);
-
+
/* Switch to the stats window, and clear all stats by reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
@@ -470,7 +482,7 @@ static void tc589_reset(struct net_device *dev)
inb(ioaddr+i);
inw(ioaddr + 10);
inw(ioaddr + 12);
-
+
/* Switch to register set 1 for normal use. */
EL3WINDOW(1);
@@ -504,8 +516,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
dev->if_port = map->port;
- printk(KERN_INFO "%s: switched to %s port\n",
- dev->name, if_names[dev->if_port]);
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else
return -EINVAL;
@@ -517,13 +528,13 @@ static int el3_open(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
-
+
if (!pcmcia_dev_present(link))
return -ENODEV;
link->open++;
netif_start_queue(dev);
-
+
tc589_reset(dev);
init_timer(&lp->media);
lp->media.function = &media_check;
@@ -533,18 +544,18 @@ static int el3_open(struct net_device *dev)
dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
-
+
return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
-
- printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
+
+ netdev_warn(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc589_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
@@ -555,19 +566,18 @@ static void pop_tx_status(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
int i;
-
+
/* Clear the Tx status stack. */
for (i = 32; i > 0; i--) {
u_char tx_status = inb(ioaddr + TX_STATUS);
if (!(tx_status & 0x84)) break;
/* reset transmitter on jabber error or underrun */
if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
+ tc589_wait_for_completion(dev, TxReset);
if (tx_status & 0x38) {
- pr_debug("%s: transmit error: status 0x%02x\n",
- dev->name, tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
}
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
@@ -580,11 +590,10 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct el3_private *priv = netdev_priv(dev);
unsigned long flags;
- pr_debug("%s: el3_start_xmit(length = %ld) called, "
- "status %4.4x.\n", dev->name, (long)skb->len,
- inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ (long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -594,7 +603,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) <= 1536) {
netif_stop_queue(dev);
/* Interrupt us when the FIFO has room for max-sized packet. */
@@ -602,9 +610,9 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
}
pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
dev_kfree_skb(skb);
-
+
return NETDEV_TX_OK;
}
@@ -616,37 +624,32 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
unsigned int ioaddr;
__u16 status;
int i = 0, handled = 1;
-
+
if (!netif_device_present(dev))
return IRQ_NONE;
ioaddr = dev->base_addr;
- pr_debug("%s: interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
+ spin_lock(&lp->lock);
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
if ((status & 0xe000) != 0x2000) {
- pr_debug("%s: interrupt from dead card\n", dev->name);
- handled = 0;
- break;
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
}
-
if (status & RxComplete)
- el3_rx(dev);
-
+ el3_rx(dev);
if (status & TxAvailable) {
- pr_debug(" TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
-
if (status & TxComplete)
- pop_tx_status(dev);
-
+ pop_tx_status(dev);
if (status & (AdapterFailure | RxEarly | StatsFull)) {
/* Handle all uncommon interrupts. */
if (status & StatsFull) /* Empty statistics. */
@@ -660,8 +663,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
EL3WINDOW(4);
fifo_diag = inw(ioaddr + 4);
EL3WINDOW(1);
- printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic"
- " register %04x.\n", dev->name, fifo_diag);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ fifo_diag);
if (fifo_diag & 0x0400) {
/* Tx overrun */
tc589_wait_for_completion(dev, TxReset);
@@ -676,22 +679,20 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
}
}
-
if (++i > 10) {
- printk(KERN_ERR "%s: infinite loop in interrupt, "
- "status %4.4x.\n", dev->name, status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
-
lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- pr_debug("%s: exiting interrupt, status %4.4x.\n",
- dev->name, inw(ioaddr + EL3_STATUS));
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
return IRQ_RETVAL(handled);
}
@@ -710,7 +711,7 @@ static void media_check(unsigned long arg)
if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
if (!lp->fast_poll)
- printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
+ netdev_warn(dev, "interrupt(s) dropped!\n");
local_irq_save(flags);
el3_interrupt(dev->irq, dev);
@@ -727,7 +728,7 @@ static void media_check(unsigned long arg)
/* lp->lock guards the EL3 window. Window should always be 1 except
when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
EL3WINDOW(4);
media = inw(ioaddr+WN4_MEDIA) & 0xc810;
@@ -747,32 +748,30 @@ static void media_check(unsigned long arg)
if (media != lp->media_status) {
if ((media & lp->media_status & 0x8000) &&
((lp->media_status ^ media) & 0x0800))
- printk(KERN_INFO "%s: %s link beat\n", dev->name,
- (lp->media_status & 0x0800 ? "lost" : "found"));
+ netdev_info(dev, "%s link beat\n",
+ (lp->media_status & 0x0800 ? "lost" : "found"));
else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
- printk(KERN_INFO "%s: coax cable %s\n", dev->name,
- (lp->media_status & 0x0010 ? "ok" : "problem"));
+ netdev_info(dev, "coax cable %s\n",
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
if (dev->if_port == 0) {
if (media & 0x8000) {
if (media & 0x0800)
- printk(KERN_INFO "%s: flipped to 10baseT\n",
- dev->name);
+ netdev_info(dev, "flipped to 10baseT\n");
else
- tc589_set_xcvr(dev, 2);
+ tc589_set_xcvr(dev, 2);
} else if (media & 0x4000) {
if (media & 0x0010)
tc589_set_xcvr(dev, 1);
else
- printk(KERN_INFO "%s: flipped to 10base2\n",
- dev->name);
+ netdev_info(dev, "flipped to 10base2\n");
}
}
lp->media_status = media;
}
-
+
EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
lp->media.expires = jiffies + HZ;
@@ -786,7 +785,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
struct pcmcia_device *link = lp->p_dev;
if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->lock, flags);
}
@@ -798,21 +797,21 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
single-threaded if the device is active. This is expected to be a rare
operation, and it's simpler for the rest of the driver to assume that
window 1 is always valid rather than use a special window-state variable.
-
+
Caller must hold the lock for this
*/
static void update_stats(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
- pr_debug("%s: updating the statistics.\n", dev->name);
+ netdev_dbg(dev, "updating the statistics.\n");
/* Turn off statistics updates while reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
+ /* Multiple collisions. */ inb(ioaddr + 2);
dev->stats.collisions += inb(ioaddr + 3);
dev->stats.tx_window_errors += inb(ioaddr + 4);
dev->stats.rx_fifo_errors += inb(ioaddr + 5);
@@ -821,7 +820,7 @@ static void update_stats(struct net_device *dev)
/* Tx deferrals */ inb(ioaddr + 8);
/* Rx octets */ inw(ioaddr + 10);
/* Tx octets */ inw(ioaddr + 12);
-
+
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
@@ -832,9 +831,9 @@ static int el3_rx(struct net_device *dev)
unsigned int ioaddr = dev->base_addr;
int worklimit = 32;
short rx_status;
-
- pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
- dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
+
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
worklimit--;
@@ -852,11 +851,11 @@ static int el3_rx(struct net_device *dev)
} else {
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
-
+
skb = dev_alloc_skb(pkt_len+5);
-
- pr_debug(" Receiving packet size %d status %4.4x.\n",
- pkt_len, rx_status);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2);
insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
@@ -866,8 +865,8 @@ static int el3_rx(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
} else {
- pr_debug("%s: couldn't allocate a sk_buff of"
- " size %d.\n", dev->name, pkt_len);
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ pkt_len);
dev->stats.rx_dropped++;
}
}
@@ -875,7 +874,7 @@ static int el3_rx(struct net_device *dev)
tc589_wait_for_completion(dev, RxDiscard);
}
if (worklimit == 0)
- printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name);
+ netdev_warn(dev, "too much work in el3_rx!\n");
return 0;
}
@@ -906,17 +905,17 @@ static int el3_close(struct net_device *dev)
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
unsigned int ioaddr = dev->base_addr;
-
+
dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
if (pcmcia_dev_present(link)) {
/* Turn off statistics ASAP. We update dev->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
-
+
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
-
+
if (dev->if_port == 2)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
@@ -925,12 +924,12 @@ static int el3_close(struct net_device *dev)
EL3WINDOW(4);
outw(0, ioaddr + WN4_MEDIA);
}
-
+
/* Switching back to window 0 disables the IRQ. */
EL3WINDOW(0);
/* But we explicitly zero the IRQ line select anyway. */
outw(0x0f00, ioaddr + WN0_IRQ);
-
+
/* Check if the card still exists */
if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
update_stats(dev);
@@ -939,7 +938,7 @@ static int el3_close(struct net_device *dev)
link->open--;
netif_stop_queue(dev);
del_timer_sync(&lp->media);
-
+
return 0;
}
@@ -961,7 +960,7 @@ static struct pcmcia_driver tc589_driver = {
},
.probe = tc589_probe,
.remove = tc589_detach,
- .id_table = tc589_ids,
+ .id_table = tc589_ids,
.suspend = tc589_suspend,
.resume = tc589_resume,
};
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 9f3d593f14ed..5b3dfb4ab279 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -113,7 +113,6 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
caddr_t base;
struct timer_list watchdog;
int stale, fast_poll;
@@ -168,7 +167,6 @@ static int axnet_probe(struct pcmcia_device *link)
info = PRIV(dev);
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -195,8 +193,7 @@ static void axnet_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
axnet_release(link);
@@ -265,12 +262,9 @@ static int try_io_port(struct pcmcia_device *link)
int j, ret;
if (link->io.NumPorts1 == 32) {
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (link->io.NumPorts2 > 0) {
- /* for master/slave multifunction cards */
+ /* for master/slave multifunction cards */
+ if (link->io.NumPorts2 > 0)
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
- }
} else {
/* This should be two 16-port windows */
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -336,8 +330,7 @@ static int axnet_config(struct pcmcia_device *link)
if (ret != 0)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
if (link->io.NumPorts2 == 8) {
@@ -349,7 +342,7 @@ static int axnet_config(struct pcmcia_device *link)
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (!get_prom(link)) {
@@ -397,17 +390,13 @@ static int axnet_config(struct pcmcia_device *link)
}
info->phy_id = (i < 32) ? i : -1;
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
"hw_addr %pM\n",
dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
@@ -1005,7 +994,7 @@ static void axnet_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- int txsr, isr, tickssofar = jiffies - dev->trans_start;
+ int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
dev->stats.tx_errors++;
@@ -1510,8 +1499,6 @@ static void ei_receive(struct net_device *dev)
ei_local->current_page = next_frame;
outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
}
-
- return;
}
/**
@@ -1622,11 +1609,11 @@ static struct net_device_stats *get_stats(struct net_device *dev)
static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 crc;
- netdev_for_each_mc_addr(dmi, dev) {
- crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc(ETH_ALEN, ha->addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 21d9c9d815d1..5643f94541bc 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -122,7 +122,6 @@ static void com20020_detach(struct pcmcia_device *p_dev);
typedef struct com20020_dev_t {
struct net_device *dev;
- dev_node_t node;
} com20020_dev_t;
/*======================================================================
@@ -163,7 +162,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
p_dev->io.NumPorts1 = 16;
p_dev->io.IOAddrLines = 16;
- p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -196,18 +194,16 @@ static void com20020_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "com20020_detach\n");
- if (link->dev_node) {
- dev_dbg(&link->dev, "unregister...\n");
+ dev_dbg(&link->dev, "unregister...\n");
- unregister_netdev(dev);
+ unregister_netdev(dev);
- /*
- * this is necessary because we register our IRQ separately
- * from card services.
- */
- if (dev->irq)
+ /*
+ * this is necessary because we register our IRQ separately
+ * from card services.
+ */
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
com20020_release(link);
@@ -275,15 +271,14 @@ static int com20020_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
dev_dbg(&link->dev, "request IRQ %d\n",
- link->irq.AssignedIRQ);
- i = pcmcia_request_irq(link, &link->irq);
- if (i != 0)
+ link->irq);
+ if (!link->irq)
{
dev_dbg(&link->dev, "requestIRQ failed totally!\n");
goto failed;
}
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
@@ -299,7 +294,6 @@ static int com20020_config(struct pcmcia_device *link)
lp->card_name = "PCMCIA COM20020";
lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
i = com20020_found(dev, 0); /* calls register_netdev */
@@ -307,12 +301,9 @@ static int com20020_config(struct pcmcia_device *link)
if (i != 0) {
dev_printk(KERN_NOTICE, &link->dev,
"com20020_cs: com20020_found() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
dev->name, dev->base_addr, dev->irq);
return 0;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b9dc80b9d04a..7c27c50211a5 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -110,7 +110,6 @@ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
*/
typedef struct local_info_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
long open_time;
uint tx_started:1;
uint tx_queue;
@@ -254,10 +253,6 @@ static int fmvj18x_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 5;
- /* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = fjn_interrupt;
-
/* General socket configuration */
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -278,8 +273,7 @@ static void fmvj18x_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "fmvj18x_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
fmvj18x_release(link);
@@ -425,8 +419,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
if (link->io.NumPorts2 != 0) {
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
ret = mfc_try_io_port(link);
if (ret != 0) goto failed;
} else if (cardtype == UNGERMANN) {
@@ -437,14 +429,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
if (ret)
goto failed;
}
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_irq(link, fjn_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (link->io.BasePort2 != 0) {
@@ -529,17 +521,13 @@ static int fmvj18x_config(struct pcmcia_device *link)
}
lp->cardtype = cardtype;
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
/* print current configuration */
printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
"hw_addr %pM\n",
@@ -890,7 +878,6 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
lp->sent = lp->tx_queue ;
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
lp->tx_started = 1;
netif_start_queue(dev);
} else {
@@ -1082,8 +1069,6 @@ static void fjn_rx(struct net_device *dev)
"%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
}
*/
-
- return;
} /* fjn_rx */
/*====================================================================*/
@@ -1196,11 +1181,11 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit >> 3] |= (1 << (bit & 7));
}
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 37f4a6fdc3ef..67ee9851a8ed 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -104,7 +104,6 @@ static void ibmtr_detach(struct pcmcia_device *p_dev);
typedef struct ibmtr_dev_t {
struct pcmcia_device *p_dev;
struct net_device *dev;
- dev_node_t node;
window_handle_t sram_win_handle;
struct tok_info *ti;
} ibmtr_dev_t;
@@ -156,8 +155,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 4;
link->io.IOAddrLines = 16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = ibmtr_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.Present = PRESENT_OPTION;
@@ -192,8 +189,7 @@ static void ibmtr_detach(struct pcmcia_device *link)
*/
ti->sram_phys |= 1;
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
del_timer_sync(&(ti->tr_timer));
@@ -238,11 +234,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
}
dev->base_addr = link->io.BasePort1;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
- ti->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
+ ti->irq = link->irq;
ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
/* Allocate the MMIO memory window */
@@ -291,18 +287,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
Adapters Technical Reference" SC30-3585 for this info. */
ibmtr_hw_setup(dev, mmiobase);
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
i = ibmtr_probe_card(dev);
if (i != 0) {
printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
printk(KERN_INFO
"%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
dev->name, dev->base_addr, dev->irq,
@@ -402,8 +394,6 @@ static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
/* 0x40 will release the card for use */
outb(0x40, dev->base_addr);
-
- return;
}
static struct pcmcia_device_id ibmtr_ids[] = {
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index c717b143f11a..9b63dec549cb 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -363,7 +363,6 @@ typedef struct _mace_statistics {
typedef struct _mace_private {
struct pcmcia_device *p_dev;
- dev_node_t node;
struct net_device_stats linux_stats; /* Linux statistics counters */
mace_statistics mace_stats; /* MACE chip statistics counters */
@@ -463,8 +462,6 @@ static int nmclan_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 32;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 5;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- link->irq.Handler = mace_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
@@ -493,8 +490,7 @@ static void nmclan_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "nmclan_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
nmclan_release(link);
@@ -652,14 +648,14 @@ static int nmclan_config(struct pcmcia_device *link)
ret = pcmcia_request_io(link, &link->io);
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
+ ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
if (ret)
goto failed;
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
ioaddr = dev->base_addr;
@@ -698,18 +694,14 @@ static int nmclan_config(struct pcmcia_device *link)
else
printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
- link->dev_node = &lp->node;
SET_NETDEV_DEV(dev, &link->dev);
i = register_netdev(dev);
if (i != 0) {
printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(lp->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
" hw_addr %pM\n",
dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
@@ -903,7 +895,7 @@ static void mace_tx_timeout(struct net_device *dev)
#else /* #if RESET_ON_TIMEOUT */
printk("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -945,8 +937,6 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
}
- dev->trans_start = jiffies;
-
#if MULTI_TX
if (lp->tx_free_frames > 0)
netif_start_queue(dev);
@@ -1315,8 +1305,6 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
/* lp->linux_stats.tx_window_errors; */
-
- return;
} /* update_stats */
/* ----------------------------------------------------------------------------
@@ -1475,7 +1463,7 @@ static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
{
@@ -1495,8 +1483,8 @@ static void set_multicast_list(struct net_device *dev)
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- netdev_for_each_mc_addr(dmi, dev) {
- memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(adr, ha->addr, ETHER_ADDR_LEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 4c0368de1815..6f77a768ba88 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -208,7 +208,6 @@ static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
typedef struct pcnet_dev_t {
struct pcmcia_device *p_dev;
- dev_node_t node;
u_int flags;
void __iomem *base;
struct timer_list watchdog;
@@ -264,7 +263,6 @@ static int pcnet_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -288,8 +286,7 @@ static void pcnet_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "pcnet_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
pcnet_release(link);
@@ -488,8 +485,6 @@ static int try_io_port(struct pcmcia_device *link)
if (link->io.NumPorts2 > 0) {
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
}
} else {
/* This should be two 16-port windows */
@@ -559,8 +554,7 @@ static int pcnet_config(struct pcmcia_device *link)
if (ret)
goto failed;
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
+ if (!link->irq)
goto failed;
if (link->io.NumPorts2 == 8) {
@@ -574,7 +568,7 @@ static int pcnet_config(struct pcmcia_device *link)
ret = pcmcia_request_configuration(link, &link->conf);
if (ret)
goto failed;
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (info->flags & HAS_MISC_REG) {
if ((if_port == 1) || (if_port == 2))
@@ -643,17 +637,13 @@ static int pcnet_config(struct pcmcia_device *link)
if (info->flags & (IS_DL10019|IS_DL10022))
mii_phy_probe(dev);
- link->dev_node = &info->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto failed;
}
- strcpy(info->node.dev_name, dev->name);
-
if (info->flags & (IS_DL10019|IS_DL10022)) {
u_char id = inb(dev->base_addr + 0x1a);
printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index ccc553782a0d..7b6fe89f9db0 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -103,7 +103,6 @@ struct smc_private {
u_short manfid;
u_short cardid;
- dev_node_t node;
struct sk_buff *saved_skb;
int packets_waiting;
void __iomem *base;
@@ -323,14 +322,11 @@ static int smc91c92_probe(struct pcmcia_device *link)
return -ENOMEM;
smc = netdev_priv(dev);
smc->p_dev = link;
- link->priv = dev;
spin_lock_init(&smc->lock);
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 4;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- link->irq.Handler = &smc_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
@@ -363,8 +359,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "smc91c92_detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
smc91c92_release(link);
@@ -453,7 +448,6 @@ static int mhz_mfc_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.IOAddrLines = 16;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -652,7 +646,6 @@ static int osi_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 64;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -877,7 +870,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if (i)
goto config_failed;
- i = pcmcia_request_irq(link, &link->irq);
+ i = pcmcia_request_irq(link, smc_interrupt);
if (i)
goto config_failed;
i = pcmcia_request_configuration(link, &link->conf);
@@ -887,7 +880,7 @@ static int smc91c92_config(struct pcmcia_device *link)
if (smc->manfid == MANFID_MOTOROLA)
mot_config(link);
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
if ((if_port >= 0) && (if_port <= 2))
dev->if_port = if_port;
@@ -960,17 +953,13 @@ static int smc91c92_config(struct pcmcia_device *link)
SMC_SELECT_BANK(0);
}
- link->dev_node = &smc->node;
SET_NETDEV_DEV(dev, &link->dev);
if (register_netdev(dev) != 0) {
printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
- link->dev_node = NULL;
goto config_undo;
}
- strcpy(smc->node.dev_name, dev->name);
-
printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
"hw_addr %pM\n",
dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
@@ -1239,7 +1228,6 @@ static void smc_hardware_send_packet(struct net_device * dev)
dev_kfree_skb_irq(skb);
dev->trans_start = jiffies;
netif_start_queue(dev);
- return;
}
/*====================================================================*/
@@ -1254,7 +1242,7 @@ static void smc_tx_timeout(struct net_device *dev)
dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
smc->saved_skb = NULL;
netif_wake_queue(dev);
}
@@ -1369,7 +1357,6 @@ static void smc_tx_err(struct net_device * dev)
smc->packets_waiting--;
outw(saved_packet, ioaddr + PNR_ARR);
- return;
}
/*====================================================================*/
@@ -1589,8 +1576,6 @@ static void smc_rx(struct net_device *dev)
}
/* Let the MMU free the memory of this packet. */
outw(MC_RELEASE, ioaddr + MMU_CMD);
-
- return;
}
/*======================================================================
@@ -1621,10 +1606,10 @@ static void set_rx_mode(struct net_device *dev)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_addr;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_addr, dev) {
- u_int position = ether_crc(6, mc_addr->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u_int position = ether_crc(6, ha->addr);
multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
}
}
@@ -1640,8 +1625,6 @@ static void set_rx_mode(struct net_device *dev)
outw(rx_cfg_setting, ioaddr + RCR);
SMC_SELECT_BANK(2);
spin_unlock_irqrestore(&smc->lock, flags);
-
- return;
}
/*======================================================================
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 4d1802e457be..b6c3644888cd 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -297,31 +297,9 @@ static void xirc2ps_detach(struct pcmcia_device *p_dev);
static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
-/****************
- * A linked list of "instances" of the device. Each actual
- * PCMCIA card corresponds to one device instance, and is described
- * by one struct pcmcia_device structure (defined in ds.h).
- *
- * You may not want to use a linked list for this -- for example, the
- * memory card driver uses an array of struct pcmcia_device pointers, where minor
- * device numbers are used to derive the corresponding array index.
- */
-
-/****************
- * A driver needs to provide a dev_node_t structure for each device
- * on a card. In some cases, there is only one device per card (for
- * example, ethernet cards, modems). In other cases, there may be
- * many actual or logical devices (SCSI adapters, memory cards with
- * multiple partitions). The dev_node_t structures need to be kept
- * in a linked list starting at the 'dev' field of a struct pcmcia_device
- * structure. We allocate them in the card's private data structure,
- * because they generally can't be allocated dynamically.
- */
-
typedef struct local_info_t {
struct net_device *dev;
struct pcmcia_device *p_dev;
- dev_node_t node;
int card_type;
int probe_port;
@@ -555,7 +533,6 @@ xirc2ps_probe(struct pcmcia_device *link)
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
link->conf.ConfigIndex = 1;
- link->irq.Handler = xirc2ps_interrupt;
/* Fill in card specific entries */
dev->netdev_ops = &netdev_ops;
@@ -580,8 +557,7 @@ xirc2ps_detach(struct pcmcia_device *link)
dev_dbg(&link->dev, "detach\n");
- if (link->dev_node)
- unregister_netdev(dev);
+ unregister_netdev(dev);
xirc2ps_release(link);
@@ -841,7 +817,6 @@ xirc2ps_config(struct pcmcia_device * link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status |= CCSR_AUDIO_ENA;
}
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts2 = 8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
if (local->dingo) {
@@ -866,7 +841,6 @@ xirc2ps_config(struct pcmcia_device * link)
}
printk(KNOT_XIRC "no ports available\n");
} else {
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 16;
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
link->io.BasePort1 = ioaddr;
@@ -885,7 +859,7 @@ xirc2ps_config(struct pcmcia_device * link)
* Now allocate an interrupt line. Note that this does not
* actually assign a handler to the interrupt.
*/
- if ((err=pcmcia_request_irq(link, &link->irq)))
+ if ((err=pcmcia_request_irq(link, xirc2ps_interrupt)))
goto config_error;
/****************
@@ -982,23 +956,19 @@ xirc2ps_config(struct pcmcia_device * link)
printk(KNOT_XIRC "invalid if_port requested\n");
/* we can now register the device with the net subsystem */
- dev->irq = link->irq.AssignedIRQ;
+ dev->irq = link->irq;
dev->base_addr = link->io.BasePort1;
if (local->dingo)
do_reset(dev, 1); /* a kludge to make the cem56 work */
- link->dev_node = &local->node;
SET_NETDEV_DEV(dev, &link->dev);
if ((err=register_netdev(dev))) {
printk(KNOT_XIRC "register_netdev() failed\n");
- link->dev_node = NULL;
goto config_error;
}
- strcpy(local->node.dev_name, dev->name);
-
/* give some infos about the hardware */
printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
@@ -1295,7 +1265,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1358,7 +1328,6 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
dev_kfree_skb (skb);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += pktlen;
netif_start_queue(dev);
return NETDEV_TX_OK;
@@ -1398,7 +1367,7 @@ static void set_addresses(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct set_address_info sa_info;
int i;
@@ -1413,10 +1382,10 @@ static void set_addresses(struct net_device *dev)
set_address(&sa_info, dev->dev_addr);
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i++ == 9)
break;
- set_address(&sa_info, dmi->dmi_addr);
+ set_address(&sa_info, ha->addr);
}
while (i++ < 9)
set_address(&sa_info, dev->dev_addr);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 084d78dd1637..c200c2821730 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
@@ -647,7 +647,6 @@ free_new_rx_ring:
(1 << size),
new_rx_ring,
new_ring_dma_addr);
- return;
}
static void pcnet32_purge_rx_ring(struct net_device *dev)
@@ -1215,7 +1214,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
dev->stats.rx_packets++;
- return;
}
static int pcnet32_rx(struct net_device *dev, int budget)
@@ -2398,7 +2396,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
}
pcnet32_restart(dev, CSR0_NORMAL);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2449,8 +2447,6 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
/* Trigger an immediate send poll. */
lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
- dev->trans_start = jiffies;
-
if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
lp->tx_full = 1;
netif_stop_queue(dev);
@@ -2590,7 +2586,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
volatile struct pcnet32_init_block *ib = lp->init_block;
volatile __le16 *mcast_table = (__le16 *)ib->filter;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned long ioaddr = dev->base_addr;
char *addrs;
int i;
@@ -2611,8 +2607,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -2625,7 +2621,6 @@ static void pcnet32_load_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
le16_to_cpu(mcast_table[i]));
- return;
}
/*
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 4fed95e8350e..c12815679837 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void)
module_init(bcm63xx_phy_init);
module_exit(bcm63xx_phy_exit);
+
+static struct mdio_device_id bcm63xx_tbl[] = {
+ { 0x00406000, 0xfffffc00 },
+ { 0x002bdc00, 0xfffffc00 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, bcm63xx_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f482fc4f8cf1..cecdbbd549ec 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -908,3 +908,19 @@ static void __exit broadcom_exit(void)
module_init(broadcom_init);
module_exit(broadcom_exit);
+
+static struct mdio_device_id broadcom_tbl[] = {
+ { 0x00206070, 0xfffffff0 },
+ { 0x002060e0, 0xfffffff0 },
+ { 0x002060c0, 0xfffffff0 },
+ { 0x002060b0, 0xfffffff0 },
+ { 0x0143bca0, 0xfffffff0 },
+ { 0x0143bcb0, 0xfffffff0 },
+ { PHY_ID_BCM50610, 0xfffffff0 },
+ { PHY_ID_BCM50610M, 0xfffffff0 },
+ { PHY_ID_BCM57780, 0xfffffff0 },
+ { PHY_ID_BCMAC131, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 92282b31d94b..1a325d63756b 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -158,3 +158,11 @@ static void __exit cicada_exit(void)
module_init(cicada_init);
module_exit(cicada_exit);
+
+static struct mdio_device_id cicada_tbl[] = {
+ { 0x000fc410, 0x000ffff0 },
+ { 0x000fc440, 0x000fffc0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, cicada_tbl);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index c722e95853ff..29c17617a2ec 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -218,3 +218,12 @@ static void __exit davicom_exit(void)
module_init(davicom_init);
module_exit(davicom_exit);
+
+static struct mdio_device_id davicom_tbl[] = {
+ { 0x0181b880, 0x0ffffff0 },
+ { 0x0181b8a0, 0x0ffffff0 },
+ { 0x00181b80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, davicom_tbl);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 7712ebeba9bf..13995f52d6af 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -110,3 +110,10 @@ static void __exit et1011c_exit(void)
module_init(et1011c_init);
module_exit(et1011c_exit);
+
+static struct mdio_device_id et1011c_tbl[] = {
+ { 0x0282f014, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 904208b95d4b..439adafeacb1 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -131,3 +131,10 @@ static void __exit ip175c_exit(void)
module_init(ip175c_init);
module_exit(ip175c_exit);
+
+static struct mdio_device_id icplus_tbl[] = {
+ { 0x02430d80, 0x0ffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, icplus_tbl);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 057ecaacde6b..8ee929b796d8 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -173,3 +173,11 @@ static void __exit lxt_exit(void)
module_init(lxt_init);
module_exit(lxt_exit);
+
+static struct mdio_device_id lxt_tbl[] = {
+ { 0x78100000, 0xfffffff0 },
+ { 0x001378e0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, lxt_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 64c7fbe0a8e7..78b74e83ce5d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -648,3 +648,16 @@ static void __exit marvell_exit(void)
module_init(marvell_init);
module_exit(marvell_exit);
+
+static struct mdio_device_id marvell_tbl[] = {
+ { 0x01410c60, 0xfffffff0 },
+ { 0x01410c90, 0xfffffff0 },
+ { 0x01410cc0, 0xfffffff0 },
+ { 0x01410e10, 0xfffffff0 },
+ { 0x01410cb0, 0xfffffff0 },
+ { 0x01410cd0, 0xfffffff0 },
+ { 0x01410e30, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, marvell_tbl);
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 19e70d7e27ab..65391891d8c4 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -22,8 +22,13 @@
#include <linux/types.h>
#include <linux/delay.h>
-#define MDIO_READ 1
-#define MDIO_WRITE 0
+#define MDIO_READ 2
+#define MDIO_WRITE 1
+
+#define MDIO_C45 (1<<15)
+#define MDIO_C45_ADDR (MDIO_C45 | 0)
+#define MDIO_C45_READ (MDIO_C45 | 3)
+#define MDIO_C45_WRITE (MDIO_C45 | 1)
#define MDIO_SETUP_TIME 10
#define MDIO_HOLD_TIME 10
@@ -89,7 +94,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits)
/* Utility to send the preamble, address, and
* register (common to read and write).
*/
-static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
+static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
{
const struct mdiobb_ops *ops = ctrl->ops;
int i;
@@ -108,23 +113,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
for (i = 0; i < 32; i++)
mdiobb_send_bit(ctrl, 1);
- /* send the start bit (01) and the read opcode (10) or write (10) */
+ /* send the start bit (01) and the read opcode (10) or write (10).
+ Clause 45 operation uses 00 for the start and 11, 10 for
+ read/write */
mdiobb_send_bit(ctrl, 0);
- mdiobb_send_bit(ctrl, 1);
- mdiobb_send_bit(ctrl, read);
- mdiobb_send_bit(ctrl, !read);
+ if (op & MDIO_C45)
+ mdiobb_send_bit(ctrl, 0);
+ else
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, (op >> 1) & 1);
+ mdiobb_send_bit(ctrl, (op >> 0) & 1);
mdiobb_send_num(ctrl, phy, 5);
mdiobb_send_num(ctrl, reg, 5);
}
+/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
+ lower 16 bits of the 21 bit address. This transfer is done identically to a
+ MDIO_WRITE except for a different code. To enable clause 45 mode or
+ MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
+ can exist on the same bus. Normal devices should ignore the MDIO_ADDR
+ phase. */
+static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
+{
+ unsigned int dev_addr = (addr >> 16) & 0x1F;
+ unsigned int reg = addr & 0xFFFF;
+ mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
+
+ /* send the turnaround (10) */
+ mdiobb_send_bit(ctrl, 1);
+ mdiobb_send_bit(ctrl, 0);
+
+ mdiobb_send_num(ctrl, reg, 16);
+
+ ctrl->ops->set_mdio_dir(ctrl, 0);
+ mdiobb_get_bit(ctrl);
+
+ return dev_addr;
+}
static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
- mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
+
ctrl->ops->set_mdio_dir(ctrl, 0);
/* check the turnaround bit: the PHY should be driving it to zero */
@@ -147,7 +185,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mdiobb_ctrl *ctrl = bus->priv;
- mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
+ if (reg & MII_ADDR_C45) {
+ reg = mdiobb_cmd_addr(ctrl, phy, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
+ } else
+ mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
/* send the turnaround (10) */
mdiobb_send_bit(ctrl, 1);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 35897134a5dd..fc5fef2a8175 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -199,12 +199,12 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
if (!pdata)
return -ENOMEM;
- ret = of_get_gpio(ofdev->node, 0);
+ ret = of_get_gpio(ofdev->dev.of_node, 0);
if (ret < 0)
goto out_free;
pdata->mdc = ret;
- ret = of_get_gpio(ofdev->node, 1);
+ ret = of_get_gpio(ofdev->dev.of_node, 1);
if (ret < 0)
goto out_free;
pdata->mdio = ret;
@@ -213,7 +213,7 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
if (!new_bus)
goto out_free;
- ret = of_mdiobus_register(new_bus, ofdev->node);
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
if (ret)
mdio_gpio_bus_deinit(&ofdev->dev);
@@ -241,8 +241,11 @@ static struct of_device_id mdio_ofgpio_match[] = {
MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
static struct of_platform_driver mdio_ofgpio_driver = {
- .name = "mdio-gpio",
- .match_table = mdio_ofgpio_match,
+ .driver = {
+ .name = "mdio-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = mdio_ofgpio_match,
+ },
.probe = mdio_ofgpio_probe,
.remove = __devexit_p(mdio_ofgpio_remove),
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index e17b70291bbc..6a6b8199a0d6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum)
+int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
@@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read);
* because the bus read/write functions may wait for an interrupt
* to conclude the operation.
*/
-int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val)
+int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0cd80e4d71d9..0692f750c404 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -32,6 +32,7 @@ static int kszphy_config_init(struct phy_device *phydev)
static struct phy_driver ks8001_driver = {
.phy_id = PHY_ID_KS8001,
+ .name = "Micrel KS8001",
.phy_id_mask = 0x00fffff0,
.features = PHY_BASIC_FEATURES,
.flags = PHY_POLL,
@@ -102,3 +103,12 @@ module_exit(ksphy_exit);
MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
+
+static struct mdio_device_id micrel_tbl[] = {
+ { PHY_ID_KSZ9021, 0x000fff10 },
+ { PHY_ID_VSC8201, 0x00fffff0 },
+ { PHY_ID_KS8001, 0x00fffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 6c636eb72089..a73ba0bcc0ce 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -97,7 +97,6 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
phy_write(phydev, NS_EXP_MEM_DATA, 0x0008);
phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN));
phy_write(phydev, LED_CTRL_REG, mode);
- return;
}
static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
@@ -110,8 +109,6 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
(ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
-
- return;
}
static int ns_config_init(struct phy_device *phydev)
@@ -153,3 +150,10 @@ MODULE_LICENSE("GPL");
module_init(ns_init);
module_exit(ns_exit);
+
+static struct mdio_device_id ns_tbl[] = {
+ { DP83865_PHY_ID, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ns_tbl);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1794546c56..1a99bb244106 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
{
struct phy_device *dev;
+
/* We allocate the device, and initialize the
* default values */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
+ /* Request the appropriate module unconditionally; don't
+ bother trying to do so only if it isn't already loaded,
+ because that gets complicated. A hotplug event would have
+ done an unconditional modprobe anyway.
+ We don't do normal hotplug because it won't work for MDIO
+ -- because it relies on the device staying around for long
+ enough for the driver to get loaded. With MDIO, the NIC
+ driver will get bored and give up as soon as it finds that
+ there's no driver _already_ loaded. */
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
+
return dev;
}
EXPORT_SYMBOL(phy_device_create);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index f6e190f73c32..6736b23f1b28 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -137,3 +137,10 @@ static void __exit qs6612_exit(void)
module_init(qs6612_init);
module_exit(qs6612_exit);
+
+static struct mdio_device_id qs6612_tbl[] = {
+ { 0x00181440, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a052a6744a51..f567c0e1aaa1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
module_init(realtek_init);
module_exit(realtek_exit);
+
+static struct mdio_device_id realtek_tbl[] = {
+ { 0x001cc912, 0x001fffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, realtek_tbl);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ed2644a57500..78fa988256fc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -253,3 +253,14 @@ MODULE_LICENSE("GPL");
module_init(smsc_init);
module_exit(smsc_exit);
+
+static struct mdio_device_id smsc_tbl[] = {
+ { 0x0007c0a0, 0xfffffff0 },
+ { 0x0007c0b0, 0xfffffff0 },
+ { 0x0007c0c0, 0xfffffff0 },
+ { 0x0007c0d0, 0xfffffff0 },
+ { 0x0007c0f0, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, smsc_tbl);
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 6bdb0d53aaf9..72290099e5e1 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
module_init(ste10Xp_init);
module_exit(ste10Xp_exit);
+static struct mdio_device_id ste10Xp_tbl[] = {
+ { STE101P_PHY_ID, 0xfffffff0 },
+ { STE100P_PHY_ID, 0xffffffff },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
+
MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dd3b2447e85a..45cce50a2799 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
module_init(vsc82xx_init);
module_exit(vsc82xx_exit);
+
+static struct mdio_device_id vitesse_tbl[] = {
+ { PHY_ID_VSC8244, 0x000fffc0 },
+ { PHY_ID_VSC8221, 0x000ffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 9a2103a69e17..ec0349e84a8a 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -979,7 +979,6 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: send request\n", dev->name);
spin_lock_irq(&nl->lock);
- dev->trans_start = jiffies;
snd->skb = skb;
snd->length.h = skb->len;
snd->state = PLIP_PK_TRIGGER;
@@ -1192,8 +1191,6 @@ plip_wakeup(void *handle)
/* Clear the data port. */
write_data (dev, 0x00);
}
-
- return;
}
static int
@@ -1309,7 +1306,6 @@ err_parport_unregister:
parport_unregister_device(nl->pardev);
err_free_dev:
free_netdev(dev);
- return;
}
/* plip_detach() is called (by the parport code) when a port is
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 8518a2e58e53..c5f8eb102bf7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2174,6 +2174,24 @@ int ppp_unit_number(struct ppp_channel *chan)
}
/*
+ * Return the PPP device interface name of a channel.
+ */
+char *ppp_dev_name(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ char *name = NULL;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ name = pch->ppp->dev->name;
+ read_unlock_bh(&pch->upl);
+ }
+ return name;
+}
+
+
+/*
* Disconnect a channel from the generic layer.
* This must be called in process context.
*/
@@ -2901,11 +2919,12 @@ EXPORT_SYMBOL(ppp_register_channel);
EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
+EXPORT_SYMBOL(ppp_dev_name);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR);
-MODULE_ALIAS("/dev/ppp");
+MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index cdd11ba100ea..805b64d1e893 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
-static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
@@ -258,7 +257,7 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
- pn = net_generic(net, pppoe_net_id);
+ pn = pppoe_pernet(net);
pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
sp->sa_addr.pppoe.remote, ifindex);
}
@@ -290,12 +289,7 @@ static void pppoe_flush_dev(struct net_device *dev)
struct pppoe_net *pn;
int i;
- BUG_ON(dev == NULL);
-
pn = pppoe_pernet(dev_net(dev));
- if (!pn) /* already freed */
- return;
-
write_lock_bh(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
struct pppox_sock *po = pn->hash_table[i];
@@ -368,7 +362,7 @@ static int pppoe_device_event(struct notifier_block *this,
default:
break;
- };
+ }
return NOTIFY_DONE;
}
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
deleted file mode 100644
index 449a9825200d..000000000000
--- a/drivers/net/pppol2tp.c
+++ /dev/null
@@ -1,2680 +0,0 @@
-/*****************************************************************************
- * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
- *
- * PPPoX --- Generic PPP encapsulation socket family
- * PPPoL2TP --- PPP over L2TP (RFC 2661)
- *
- * Version: 1.0.0
- *
- * Authors: Martijn van Oosterhout <kleptog@svana.org>
- * James Chapman (jchapman@katalix.com)
- * Contributors:
- * Michal Ostrowski <mostrows@speakeasy.net>
- * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
- * David S. Miller (davem@redhat.com)
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-/* This driver handles only L2TP data frames; control frames are handled by a
- * userspace application.
- *
- * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
- * attaches it to a bound UDP socket with local tunnel_id / session_id and
- * peer tunnel_id / session_id set. Data can then be sent or received using
- * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
- * can be read or modified using ioctl() or [gs]etsockopt() calls.
- *
- * When a PPPoL2TP socket is connected with local and peer session_id values
- * zero, the socket is treated as a special tunnel management socket.
- *
- * Here's example userspace code to create a socket for sending/receiving data
- * over an L2TP session:-
- *
- * struct sockaddr_pppol2tp sax;
- * int fd;
- * int session_fd;
- *
- * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
- *
- * sax.sa_family = AF_PPPOX;
- * sax.sa_protocol = PX_PROTO_OL2TP;
- * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
- * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
- * sax.pppol2tp.addr.sin_port = addr->sin_port;
- * sax.pppol2tp.addr.sin_family = AF_INET;
- * sax.pppol2tp.s_tunnel = tunnel_id;
- * sax.pppol2tp.s_session = session_id;
- * sax.pppol2tp.d_tunnel = peer_tunnel_id;
- * sax.pppol2tp.d_session = peer_session_id;
- *
- * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
- *
- * A pppd plugin that allows PPP traffic to be carried over L2TP using
- * this driver is available from the OpenL2TP project at
- * http://openl2tp.sourceforge.net.
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <asm/uaccess.h>
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/jiffies.h>
-
-#include <linux/netdevice.h>
-#include <linux/net.h>
-#include <linux/inetdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/if_pppox.h>
-#include <linux/if_pppol2tp.h>
-#include <net/sock.h>
-#include <linux/ppp_channel.h>
-#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
-#include <linux/file.h>
-#include <linux/hash.h>
-#include <linux/sort.h>
-#include <linux/proc_fs.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/dst.h>
-#include <net/ip.h>
-#include <net/udp.h>
-#include <net/xfrm.h>
-
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-
-
-#define PPPOL2TP_DRV_VERSION "V1.0"
-
-/* L2TP header constants */
-#define L2TP_HDRFLAG_T 0x8000
-#define L2TP_HDRFLAG_L 0x4000
-#define L2TP_HDRFLAG_S 0x0800
-#define L2TP_HDRFLAG_O 0x0200
-#define L2TP_HDRFLAG_P 0x0100
-
-#define L2TP_HDR_VER_MASK 0x000F
-#define L2TP_HDR_VER 0x0002
-
-/* Space for UDP, L2TP and PPP headers */
-#define PPPOL2TP_HEADER_OVERHEAD 40
-
-/* Just some random numbers */
-#define L2TP_TUNNEL_MAGIC 0x42114DDA
-#define L2TP_SESSION_MAGIC 0x0C04EB7D
-
-#define PPPOL2TP_HASH_BITS 4
-#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
-
-/* Default trace flags */
-#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
-
-#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
- do { \
- if ((_mask) & (_type)) \
- printk(_lvl "PPPOL2TP: " _fmt, ##args); \
- } while(0)
-
-/* Number of bytes to build transmit L2TP headers.
- * Unfortunately the size is different depending on whether sequence numbers
- * are enabled.
- */
-#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
-#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
-
-struct pppol2tp_tunnel;
-
-/* Describes a session. It is the sk_user_data field in the PPPoL2TP
- * socket. Contains information to determine incoming packets and transmit
- * outgoing ones.
- */
-struct pppol2tp_session
-{
- int magic; /* should be
- * L2TP_SESSION_MAGIC */
- int owner; /* pid that opened the socket */
-
- struct sock *sock; /* Pointer to the session
- * PPPoX socket */
- struct sock *tunnel_sock; /* Pointer to the tunnel UDP
- * socket */
-
- struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
-
- struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
- * context */
-
- char name[20]; /* "sess xxxxx/yyyyy", where
- * x=tunnel_id, y=session_id */
- int mtu;
- int mru;
- int flags; /* accessed by PPPIOCGFLAGS.
- * Unused. */
- unsigned recv_seq:1; /* expect receive packets with
- * sequence numbers? */
- unsigned send_seq:1; /* send packets with sequence
- * numbers? */
- unsigned lns_mode:1; /* behave as LNS? LAC enables
- * sequence numbers under
- * control of LNS. */
- int debug; /* bitmask of debug message
- * categories */
- int reorder_timeout; /* configured reorder timeout
- * (in jiffies) */
- u16 nr; /* session NR state (receive) */
- u16 ns; /* session NR state (send) */
- struct sk_buff_head reorder_q; /* receive reorder queue */
- struct pppol2tp_ioc_stats stats;
- struct hlist_node hlist; /* Hash list node */
-};
-
-/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
- * all the associated sessions so incoming packets can be sorted out
- */
-struct pppol2tp_tunnel
-{
- int magic; /* Should be L2TP_TUNNEL_MAGIC */
- rwlock_t hlist_lock; /* protect session_hlist */
- struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
- /* hashed list of sessions,
- * hashed by id */
- int debug; /* bitmask of debug message
- * categories */
- char name[12]; /* "tunl xxxxx" */
- struct pppol2tp_ioc_stats stats;
-
- void (*old_sk_destruct)(struct sock *);
-
- struct sock *sock; /* Parent socket */
- struct list_head list; /* Keep a list of all open
- * prepared sockets */
- struct net *pppol2tp_net; /* the net we belong to */
-
- atomic_t ref_count;
-};
-
-/* Private data stored for received packets in the skb.
- */
-struct pppol2tp_skb_cb {
- u16 ns;
- u16 nr;
- u16 has_seq;
- u16 length;
- unsigned long expires;
-};
-
-#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
-
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
-
-static atomic_t pppol2tp_tunnel_count;
-static atomic_t pppol2tp_session_count;
-static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
-static const struct proto_ops pppol2tp_ops;
-
-/* per-net private data for this module */
-static int pppol2tp_net_id __read_mostly;
-struct pppol2tp_net {
- struct list_head pppol2tp_tunnel_list;
- rwlock_t pppol2tp_tunnel_list_lock;
-};
-
-static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
-{
- BUG_ON(!net);
-
- return net_generic(net, pppol2tp_net_id);
-}
-
-/* Helpers to obtain tunnel/session contexts from sockets.
- */
-static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
-{
- struct pppol2tp_session *session;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- session = (struct pppol2tp_session *)(sk->sk_user_data);
- if (session == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-out:
- return session;
-}
-
-static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- if (sk == NULL)
- return NULL;
-
- sock_hold(sk);
- tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
- if (tunnel == NULL) {
- sock_put(sk);
- goto out;
- }
-
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-out:
- return tunnel;
-}
-
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- pppol2tp_tunnel_free(tunnel);
-}
-
-/* Session hash list.
- * The session_id SHOULD be random according to RFC2661, but several
- * L2TP implementations (Cisco and Microsoft) use incrementing
- * session_ids. So we do a real hash on the session_id, rather than a
- * simple bitmask.
- */
-static inline struct hlist_head *
-pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- unsigned long hash_val = (unsigned long) session_id;
- return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
-}
-
-/* Lookup a session by id
- */
-static struct pppol2tp_session *
-pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
-{
- struct hlist_head *session_list =
- pppol2tp_session_id_hash(tunnel, session_id);
- struct pppol2tp_session *session;
- struct hlist_node *walk;
-
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, walk, session_list, hlist) {
- if (session->tunnel_addr.s_session == session_id) {
- read_unlock_bh(&tunnel->hlist_lock);
- return session;
- }
- }
- read_unlock_bh(&tunnel->hlist_lock);
-
- return NULL;
-}
-
-/* Lookup a tunnel by id
- */
-static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
-{
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
- if (tunnel->stats.tunnel_id == tunnel_id) {
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- return tunnel;
- }
- }
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return NULL;
-}
-
-/*****************************************************************************
- * Receive data handling
- *****************************************************************************/
-
-/* Queue a skb in order. We come here only if the skb has an L2TP sequence
- * number.
- */
-static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct sk_buff *skbp;
- struct sk_buff *tmp;
- u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
-
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
- if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
- __skb_queue_before(&session->reorder_q, skbp, skb);
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
- session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
- skb_queue_len(&session->reorder_q));
- session->stats.rx_oos_packets++;
- goto out;
- }
- }
-
- __skb_queue_tail(&session->reorder_q, skb);
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-/* Dequeue a single skb.
- */
-static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel = session->tunnel;
- int length = PPPOL2TP_SKB_CB(skb)->length;
- struct sock *session_sock = NULL;
-
- /* We're about to requeue the skb, so return resources
- * to its current owner (a socket receive buffer).
- */
- skb_orphan(skb);
-
- tunnel->stats.rx_packets++;
- tunnel->stats.rx_bytes += length;
- session->stats.rx_packets++;
- session->stats.rx_bytes += length;
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- /* Bump our Nr */
- session->nr++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated nr to %hu\n", session->name, session->nr);
- }
-
- /* If the socket is bound, send it in to PPP's input queue. Otherwise
- * queue it on the session socket.
- */
- session_sock = session->sock;
- if (session_sock->sk_state & PPPOX_BOUND) {
- struct pppox_sock *po;
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv %d byte data frame, passing to ppp\n",
- session->name, length);
-
- /* We need to forget all info related to the L2TP packet
- * gathered in the skb as we are going to reuse the same
- * skb for the inner packet.
- * Namely we need to:
- * - reset xfrm (IPSec) information as it applies to
- * the outer L2TP packet and not to the inner one
- * - release the dst to force a route lookup on the inner
- * IP packet since skb->dst currently points to the dst
- * of the UDP tunnel
- * - reset netfilter information as it doesn't apply
- * to the inner packet either
- */
- secpath_reset(skb);
- skb_dst_drop(skb);
- nf_reset(skb);
-
- po = pppox_sk(session_sock);
- ppp_input(&po->chan, skb);
- } else {
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: socket not bound\n", session->name);
-
- /* Not bound. Nothing we can do, so discard. */
- session->stats.rx_errors++;
- kfree_skb(skb);
- }
-
- sock_put(session->sock);
-}
-
-/* Dequeue skbs from the session's reorder_q, subject to packet order.
- * Skbs that have been in the queue for too long are simply discarded.
- */
-static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
-{
- struct sk_buff *skb;
- struct sk_buff *tmp;
-
- /* If the pkt at the head of the queue has the nr that we
- * expect to send up next, dequeue it and any other
- * in-sequence packets behind it.
- */
- spin_lock_bh(&session->reorder_q.lock);
- skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
- if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
- session->stats.rx_seq_discards++;
- session->stats.rx_errors++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded (too old), "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- __skb_unlink(skb, &session->reorder_q);
- kfree_skb(skb);
- sock_put(session->sock);
- continue;
- }
-
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: holding oos pkt %hu len %d, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto out;
- }
- }
- __skb_unlink(skb, &session->reorder_q);
-
- /* Process the skb. We release the queue lock while we
- * do so to let other contexts process the queue.
- */
- spin_unlock_bh(&session->reorder_q.lock);
- pppol2tp_recv_dequeue_skb(session, skb);
- spin_lock_bh(&session->reorder_q.lock);
- }
-
-out:
- spin_unlock_bh(&session->reorder_q.lock);
-}
-
-static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
- struct sk_buff *skb)
-{
- struct udphdr *uh = udp_hdr(skb);
- u16 ulen = ntohs(uh->len);
- struct inet_sock *inet;
- __wsum psum;
-
- if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
- return 0;
-
- inet = inet_sk(sk);
- psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
- IPPROTO_UDP, 0);
-
- if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
- !csum_fold(csum_add(psum, skb->csum)))
- return 0;
-
- skb->csum = psum;
-
- return __skb_checksum_complete(skb);
-}
-
-/* Internal receive frame. Do the real work of receiving an L2TP data frame
- * here. The skb is not on a list when we get here.
- * Returns 0 if the packet was a data packet and was successfully passed on.
- * Returns 1 if the packet was not a good data packet and could not be
- * forwarded. All such packets are passed up to userspace to deal with.
- */
-static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
-{
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- unsigned char *ptr, *optr;
- u16 hdrflags;
- u16 tunnel_id, session_id;
- int length;
- int offset;
-
- tunnel = pppol2tp_sock_to_tunnel(sock);
- if (tunnel == NULL)
- goto no_tunnel;
-
- if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb))
- goto discard_bad_csum;
-
- /* UDP always verifies the packet length. */
- __skb_pull(skb, sizeof(struct udphdr));
-
- /* Short packet? */
- if (!pskb_may_pull(skb, 12)) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
- goto error;
- }
-
- /* Point to L2TP header */
- optr = ptr = skb->data;
-
- /* Get L2TP header flags */
- hdrflags = ntohs(*(__be16*)ptr);
-
- /* Trace packet contents, if enabled */
- if (tunnel->debug & PPPOL2TP_MSG_DATA) {
- length = min(16u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto error;
-
- printk(KERN_DEBUG "%s: recv: ", tunnel->name);
-
- offset = 0;
- do {
- printk(" %02X", ptr[offset]);
- } while (++offset < length);
-
- printk("\n");
- }
-
- /* Get length of L2TP packet */
- length = skb->len;
-
- /* If type is control packet, it is handled by userspace. */
- if (hdrflags & L2TP_HDRFLAG_T) {
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: recv control packet, len=%d\n", tunnel->name, length);
- goto error;
- }
-
- /* Skip flags */
- ptr += 2;
-
- /* If length is present, skip it */
- if (hdrflags & L2TP_HDRFLAG_L)
- ptr += 2;
-
- /* Extract tunnel and session ID */
- tunnel_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
- session_id = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Find the session context */
- session = pppol2tp_session_find(tunnel, session_id);
- if (!session) {
- /* Not found? Pass to userspace to deal with */
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
- "%s: no socket found (%hu/%hu). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
- goto error;
- }
- sock_hold(session->sock);
-
- /* The ref count on the socket was increased by the above call since
- * we now hold a pointer to the session. Take care to do sock_put()
- * when exiting this function from now on...
- */
-
- /* Handle the optional sequence numbers. If we are the LAC,
- * enable/disable sequence numbers under the control of the LNS. If
- * no sequence numbers present but we were expecting them, discard
- * frame.
- */
- if (hdrflags & L2TP_HDRFLAG_S) {
- u16 ns, nr;
- ns = ntohs(*(__be16 *) ptr);
- ptr += 2;
- nr = ntohs(*(__be16 *) ptr);
- ptr += 2;
-
- /* Received a packet with sequence numbers. If we're the LNS,
- * check if we sre sending sequence numbers and if not,
- * configure it so.
- */
- if ((!session->lns_mode) && (!session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to enable seq numbers by LNS\n",
- session->name);
- session->send_seq = -1;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->ns = ns;
- PPPOL2TP_SKB_CB(skb)->nr = nr;
- PPPOL2TP_SKB_CB(skb)->has_seq = 1;
-
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
- session->name, ns, nr, session->nr);
- } else {
- /* No sequence numbers.
- * If user has configured mandatory sequence numbers, discard.
- */
- if (session->recv_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* If we're the LAC and we're sending sequence numbers, the
- * LNS has requested that we no longer send sequence numbers.
- * If we're the LNS and we're sending sequence numbers, the
- * LAC is broken. Discard the frame.
- */
- if ((!session->lns_mode) && (session->send_seq)) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
- "%s: requested to disable seq numbers by LNS\n",
- session->name);
- session->send_seq = 0;
- } else if (session->send_seq) {
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
- "%s: recv data has no seq numbers when required. "
- "Discarding\n", session->name);
- session->stats.rx_seq_discards++;
- goto discard;
- }
-
- /* Store L2TP info in the skb */
- PPPOL2TP_SKB_CB(skb)->has_seq = 0;
- }
-
- /* If offset bit set, skip it. */
- if (hdrflags & L2TP_HDRFLAG_O) {
- offset = ntohs(*(__be16 *)ptr);
- ptr += 2 + offset;
- }
-
- offset = ptr - optr;
- if (!pskb_may_pull(skb, offset))
- goto discard;
-
- __skb_pull(skb, offset);
-
- /* Skip PPP header, if present. In testing, Microsoft L2TP clients
- * don't send the PPP header (PPP header compression enabled), but
- * other clients can include the header. So we cope with both cases
- * here. The PPP header is always FF03 when using L2TP.
- *
- * Note that skb->data[] isn't dereferenced from a u16 ptr here since
- * the field may be unaligned.
- */
- if (!pskb_may_pull(skb, 2))
- goto discard;
-
- if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
- skb_pull(skb, 2);
-
- /* Prepare skb for adding to the session's reorder_q. Hold
- * packets for max reorder_timeout or 1 second if not
- * reordering.
- */
- PPPOL2TP_SKB_CB(skb)->length = length;
- PPPOL2TP_SKB_CB(skb)->expires = jiffies +
- (session->reorder_timeout ? session->reorder_timeout : HZ);
-
- /* Add packet to the session's receive queue. Reordering is done here, if
- * enabled. Saved L2TP protocol info is stored in skb->sb[].
- */
- if (PPPOL2TP_SKB_CB(skb)->has_seq) {
- if (session->reorder_timeout != 0) {
- /* Packet reordering enabled. Add skb to session's
- * reorder queue, in order of ns.
- */
- pppol2tp_recv_queue_skb(session, skb);
- } else {
- /* Packet reordering disabled. Discard out-of-sequence
- * packets
- */
- if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
- session->stats.rx_seq_discards++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: oos pkt %hu len %d discarded, "
- "waiting for %hu, reorder_q_len=%d\n",
- session->name, PPPOL2TP_SKB_CB(skb)->ns,
- PPPOL2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
- goto discard;
- }
- skb_queue_tail(&session->reorder_q, skb);
- }
- } else {
- /* No sequence numbers. Add the skb to the tail of the
- * reorder queue. This ensures that it will be
- * delivered after all previous sequenced skbs.
- */
- skb_queue_tail(&session->reorder_q, skb);
- }
-
- /* Try to dequeue as many skbs from reorder_q as we can. */
- pppol2tp_recv_dequeue(session);
- sock_put(sock);
-
- return 0;
-
-discard:
- session->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(session->sock);
- sock_put(sock);
-
- return 0;
-
-discard_bad_csum:
- LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
- UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
- tunnel->stats.rx_errors++;
- kfree_skb(skb);
- sock_put(sock);
-
- return 0;
-
-error:
- /* Put UDP header back */
- __skb_push(skb, sizeof(struct udphdr));
- sock_put(sock);
-
-no_tunnel:
- return 1;
-}
-
-/* UDP encapsulation receive handler. See net/ipv4/udp.c.
- * Return codes:
- * 0 : success.
- * <0: error
- * >0: skb should be passed up to userspace as UDP.
- */
-static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = pppol2tp_sock_to_tunnel(sk);
- if (tunnel == NULL)
- goto pass_up;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: received %d bytes\n", tunnel->name, skb->len);
-
- if (pppol2tp_recv_core(sk, skb))
- goto pass_up_put;
-
- sock_put(sk);
- return 0;
-
-pass_up_put:
- sock_put(sk);
-pass_up:
- return 1;
-}
-
-/* Receive message. This is the recvmsg for the PPPoL2TP socket.
- */
-static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len,
- int flags)
-{
- int err;
- struct sk_buff *skb;
- struct sock *sk = sock->sk;
-
- err = -EIO;
- if (sk->sk_state & PPPOX_BOUND)
- goto end;
-
- msg->msg_namelen = 0;
-
- err = 0;
- skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
- flags & MSG_DONTWAIT, &err);
- if (!skb)
- goto end;
-
- if (len > skb->len)
- len = skb->len;
- else if (len < skb->len)
- msg->msg_flags |= MSG_TRUNC;
-
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
- if (likely(err == 0))
- err = len;
-
- kfree_skb(skb);
-end:
- return err;
-}
-
-/************************************************************************
- * Transmit handling
- ***********************************************************************/
-
-/* Tell how big L2TP headers are for a particular session. This
- * depends on whether sequence numbers are being used.
- */
-static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
-{
- if (session->send_seq)
- return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
-
- return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-}
-
-/* Build an L2TP header for the session into the buffer provided.
- */
-static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
- void *buf)
-{
- __be16 *bufp = buf;
- u16 flags = L2TP_HDR_VER;
-
- if (session->send_seq)
- flags |= L2TP_HDRFLAG_S;
-
- /* Setup L2TP header.
- * FIXME: Can this ever be unaligned? Is direct dereferencing of
- * 16-bit header fields safe here for all architectures?
- */
- *bufp++ = htons(flags);
- *bufp++ = htons(session->tunnel_addr.d_tunnel);
- *bufp++ = htons(session->tunnel_addr.d_session);
- if (session->send_seq) {
- *bufp++ = htons(session->ns);
- *bufp++ = 0;
- session->ns++;
- PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
- "%s: updated ns to %hu\n", session->name, session->ns);
- }
-}
-
-/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
- * when a user application does a sendmsg() on the session socket. L2TP and
- * PPP headers must be inserted into the user's data.
- */
-static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
- size_t total_len)
-{
- static const unsigned char ppph[2] = { 0xff, 0x03 };
- struct sock *sk = sock->sk;
- struct inet_sock *inet;
- __wsum csum;
- struct sk_buff *skb;
- int error;
- int hdr_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- struct udphdr *uh;
- unsigned int len;
- struct sock *sk_tun;
- u16 udp_len;
-
- error = -ENOTCONN;
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto error;
-
- /* Get session and tunnel contexts */
- error = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto error;
-
- sk_tun = session->tunnel_sock;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto error_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Allocate a socket buffer */
- error = -ENOMEM;
- skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len +
- sizeof(ppph) + total_len,
- 0, GFP_KERNEL);
- if (!skb)
- goto error_put_sess_tun;
-
- /* Reserve space for headers. */
- skb_reserve(skb, NET_SKB_PAD);
- skb_reset_network_header(skb);
- skb_reserve(skb, sizeof(struct iphdr));
- skb_reset_transport_header(skb);
-
- /* Build UDP header */
- inet = inet_sk(sk_tun);
- udp_len = hdr_len + sizeof(ppph) + total_len;
- uh = (struct udphdr *) skb->data;
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
- skb_put(skb, sizeof(struct udphdr));
-
- /* Build L2TP header */
- pppol2tp_build_l2tp_header(session, skb->data);
- skb_put(skb, hdr_len);
-
- /* Add PPP header */
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
- skb_put(skb, 2);
-
- /* Copy user data into skb */
- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
- if (error < 0) {
- kfree_skb(skb);
- goto error_put_sess_tun;
- }
- skb_put(skb, total_len);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes, ns=%hu\n", session->name,
- total_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %Zd bytes\n", session->name, total_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < total_len; i++) {
- printk(" %02X", *datap++);
- if (i == 15) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- error = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (error >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- return error;
-
-error_put_sess_tun:
- sock_put(session->tunnel_sock);
-error_put_sess:
- sock_put(sk);
-error:
- return error;
-}
-
-/* Automatically called when the skb is freed.
- */
-static void pppol2tp_sock_wfree(struct sk_buff *skb)
-{
- sock_put(skb->sk);
-}
-
-/* For data skbs that we transmit, we associate with the tunnel socket
- * but don't do accounting.
- */
-static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
- sock_hold(sk);
- skb->sk = sk;
- skb->destructor = pppol2tp_sock_wfree;
-}
-
-/* Transmit function called by generic PPP driver. Sends PPP frame
- * over PPPoL2TP socket.
- *
- * This is almost the same as pppol2tp_sendmsg(), but rather than
- * being called with a msghdr from userspace, it is called with a skb
- * from the kernel.
- *
- * The supplied skb from ppp doesn't have enough headroom for the
- * insertion of L2TP, UDP and IP headers so we need to allocate more
- * headroom in the skb. This will create a cloned skb. But we must be
- * careful in the error case because the caller will expect to free
- * the skb it supplied, not our cloned skb. So we take care to always
- * leave the original skb unfreed if we return an error.
- */
-static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
-{
- static const u8 ppph[2] = { 0xff, 0x03 };
- struct sock *sk = (struct sock *) chan->private;
- struct sock *sk_tun;
- int hdr_len;
- u16 udp_len;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int rc;
- int headroom;
- int data_len = skb->len;
- struct inet_sock *inet;
- __wsum csum;
- struct udphdr *uh;
- unsigned int len;
- int old_headroom;
- int new_headroom;
-
- if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
- goto abort;
-
- /* Get session and tunnel contexts from the socket */
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto abort;
-
- sk_tun = session->tunnel_sock;
- if (sk_tun == NULL)
- goto abort_put_sess;
- tunnel = pppol2tp_sock_to_tunnel(sk_tun);
- if (tunnel == NULL)
- goto abort_put_sess;
-
- /* What header length is configured for this session? */
- hdr_len = pppol2tp_l2tp_header_len(session);
-
- /* Check that there's enough headroom in the skb to insert IP,
- * UDP and L2TP and PPP headers. If not enough, expand it to
- * make room. Adjust truesize.
- */
- headroom = NET_SKB_PAD + sizeof(struct iphdr) +
- sizeof(struct udphdr) + hdr_len + sizeof(ppph);
- old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, headroom))
- goto abort_put_sess_tun;
-
- new_headroom = skb_headroom(skb);
- skb_orphan(skb);
- skb->truesize += new_headroom - old_headroom;
-
- /* Setup PPP header */
- __skb_push(skb, sizeof(ppph));
- skb->data[0] = ppph[0];
- skb->data[1] = ppph[1];
-
- /* Setup L2TP header */
- pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
-
- udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len;
-
- /* Setup UDP header */
- inet = inet_sk(sk_tun);
- __skb_push(skb, sizeof(*uh));
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
- uh->source = inet->inet_sport;
- uh->dest = inet->inet_dport;
- uh->len = htons(udp_len);
- uh->check = 0;
-
- /* Debug */
- if (session->send_seq)
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes, ns=%hu\n", session->name,
- data_len, session->ns - 1);
- else
- PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
- "%s: send %d bytes\n", session->name, data_len);
-
- if (session->debug & PPPOL2TP_MSG_DATA) {
- int i;
- unsigned char *datap = skb->data;
-
- printk(KERN_DEBUG "%s: xmit:", session->name);
- for (i = 0; i < data_len; i++) {
- printk(" %02X", *datap++);
- if (i == 31) {
- printk(" ...");
- break;
- }
- }
- printk("\n");
- }
-
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
- IPSKB_REROUTED);
- nf_reset(skb);
-
- /* Get routing info from the tunnel socket */
- skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
- pppol2tp_skb_set_owner_w(skb, sk_tun);
-
- /* Calculate UDP checksum if configured to do so */
- if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
- skb->ip_summed = CHECKSUM_NONE;
- else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
- (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
- skb->ip_summed = CHECKSUM_COMPLETE;
- csum = skb_checksum(skb, 0, udp_len, 0);
- uh->check = csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
- inet->inet_daddr,
- udp_len, IPPROTO_UDP, 0);
- }
-
- /* Queue the packet to IP for output */
- len = skb->len;
- rc = ip_queue_xmit(skb, 1);
-
- /* Update stats */
- if (rc >= 0) {
- tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += len;
- session->stats.tx_packets++;
- session->stats.tx_bytes += len;
- } else {
- tunnel->stats.tx_errors++;
- session->stats.tx_errors++;
- }
-
- sock_put(sk_tun);
- sock_put(sk);
- return 1;
-
-abort_put_sess_tun:
- sock_put(sk_tun);
-abort_put_sess:
- sock_put(sk);
-abort:
- /* Free the original skb */
- kfree_skb(skb);
- return 1;
-}
-
-/*****************************************************************************
- * Session (and tunnel control) socket create/destroy.
- *****************************************************************************/
-
-/* When the tunnel UDP socket is closed, all the attached sockets need to go
- * too.
- */
-static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
-{
- int hash;
- struct hlist_node *walk;
- struct hlist_node *tmp;
- struct pppol2tp_session *session;
- struct sock *sk;
-
- BUG_ON(tunnel == NULL);
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing all sessions...\n", tunnel->name);
-
- write_lock_bh(&tunnel->hlist_lock);
- for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
-again:
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- struct sk_buff *skb;
-
- session = hlist_entry(walk, struct pppol2tp_session, hlist);
-
- sk = session->sock;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing session\n", session->name);
-
- hlist_del_init(&session->hlist);
-
- /* Since we should hold the sock lock while
- * doing any unbinding, we need to release the
- * lock we're holding before taking that lock.
- * Hold a reference to the sock so it doesn't
- * disappear as we're jumping between locks.
- */
- sock_hold(sk);
- write_unlock_bh(&tunnel->hlist_lock);
- lock_sock(sk);
-
- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
- pppox_unbind_sock(sk);
- sk->sk_state = PPPOX_DEAD;
- sk->sk_state_change(sk);
- }
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
-
- release_sock(sk);
- sock_put(sk);
-
- /* Now restart from the beginning of this hash
- * chain. We always remove a session from the
- * list so we are guaranteed to make forward
- * progress.
- */
- write_lock_bh(&tunnel->hlist_lock);
- goto again;
- }
- }
- write_unlock_bh(&tunnel->hlist_lock);
-}
-
-/* Really kill the tunnel.
- * Come here only when all sessions have been cleared from the tunnel.
- */
-static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
-
- /* Remove from socket list */
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_del_init(&tunnel->list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- atomic_dec(&pppol2tp_tunnel_count);
- kfree(tunnel);
-}
-
-/* Tunnel UDP socket destruct hook.
- * The tunnel context is deleted only when all session sockets have been
- * closed.
- */
-static void pppol2tp_tunnel_destruct(struct sock *sk)
-{
- struct pppol2tp_tunnel *tunnel;
-
- tunnel = sk->sk_user_data;
- if (tunnel == NULL)
- goto end;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: closing...\n", tunnel->name);
-
- /* Close all sessions */
- pppol2tp_tunnel_closeall(tunnel);
-
- /* No longer an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = 0;
- (udp_sk(sk))->encap_rcv = NULL;
-
- /* Remove hooks into tunnel socket */
- tunnel->sock = NULL;
- sk->sk_destruct = tunnel->old_sk_destruct;
- sk->sk_user_data = NULL;
-
- /* Call original (UDP) socket descructor */
- if (sk->sk_destruct != NULL)
- (*sk->sk_destruct)(sk);
-
- pppol2tp_tunnel_dec_refcount(tunnel);
-
-end:
- return;
-}
-
-/* Really kill the session socket. (Called from sock_put() if
- * refcnt == 0.)
- */
-static void pppol2tp_session_destruct(struct sock *sk)
-{
- struct pppol2tp_session *session = NULL;
-
- if (sk->sk_user_data != NULL) {
- struct pppol2tp_tunnel *tunnel;
-
- session = sk->sk_user_data;
- if (session == NULL)
- goto out;
-
- BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-
- /* Don't use pppol2tp_sock_to_tunnel() here to
- * get the tunnel context because the tunnel
- * socket might have already been closed (its
- * sk->sk_user_data will be NULL) so use the
- * session's private tunnel ptr instead.
- */
- tunnel = session->tunnel;
- if (tunnel != NULL) {
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* If session_id is zero, this is a null
- * session context, which was created for a
- * socket that is being used only to manage
- * tunnels.
- */
- if (session->tunnel_addr.s_session != 0) {
- /* Delete the session socket from the
- * hash
- */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_del_init(&session->hlist);
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_dec(&pppol2tp_session_count);
- }
-
- /* This will delete the tunnel context if this
- * is the last session on the tunnel.
- */
- session->tunnel = NULL;
- session->tunnel_sock = NULL;
- pppol2tp_tunnel_dec_refcount(tunnel);
- }
- }
-
- kfree(session);
-out:
- return;
-}
-
-/* Called when the PPPoX socket (session) is closed.
- */
-static int pppol2tp_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- int error;
-
- if (!sk)
- return 0;
-
- error = -EBADF;
- lock_sock(sk);
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto error;
-
- pppox_unbind_sock(sk);
-
- /* Signal the death of the socket. */
- sk->sk_state = PPPOX_DEAD;
- sock_orphan(sk);
- sock->sk = NULL;
-
- session = pppol2tp_sock_to_session(sk);
-
- /* Purge any queued data */
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
- if (session != NULL) {
- struct sk_buff *skb;
- while ((skb = skb_dequeue(&session->reorder_q))) {
- kfree_skb(skb);
- sock_put(sk);
- }
- sock_put(sk);
- }
-
- release_sock(sk);
-
- /* This will delete the session context via
- * pppol2tp_session_destruct() if the socket's refcnt drops to
- * zero.
- */
- sock_put(sk);
-
- return 0;
-
-error:
- release_sock(sk);
- return error;
-}
-
-/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
- * sockets attached to it.
- */
-static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
- int fd, u16 tunnel_id, int *error)
-{
- int err;
- struct socket *sock = NULL;
- struct sock *sk;
- struct pppol2tp_tunnel *tunnel;
- struct pppol2tp_net *pn;
- struct sock *ret = NULL;
-
- /* Get the tunnel UDP socket from the fd, which was opened by
- * the userspace L2TP daemon.
- */
- err = -EBADF;
- sock = sockfd_lookup(fd, &err);
- if (!sock) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
- tunnel_id, fd, err);
- goto err;
- }
-
- sk = sock->sk;
-
- /* Quick sanity checks */
- err = -EPROTONOSUPPORT;
- if (sk->sk_protocol != IPPROTO_UDP) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
- tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
- goto err;
- }
- err = -EAFNOSUPPORT;
- if (sock->ops->family != AF_INET) {
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
- "tunl %hu: fd %d wrong family, got %d, expected %d\n",
- tunnel_id, fd, sock->ops->family, AF_INET);
- goto err;
- }
-
- err = -ENOTCONN;
-
- /* Check if this socket has already been prepped */
- tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
- if (tunnel != NULL) {
- /* User-data field already set */
- err = -EBUSY;
- BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
-
- /* This socket has already been prepped */
- ret = tunnel->sock;
- goto out;
- }
-
- /* This socket is available and needs prepping. Create a new tunnel
- * context and init it.
- */
- sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
- if (sk->sk_user_data == NULL) {
- err = -ENOMEM;
- goto err;
- }
-
- tunnel->magic = L2TP_TUNNEL_MAGIC;
- sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
-
- tunnel->stats.tunnel_id = tunnel_id;
- tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
-
- /* Hook on the tunnel socket destructor so that we can cleanup
- * if the tunnel socket goes away.
- */
- tunnel->old_sk_destruct = sk->sk_destruct;
- sk->sk_destruct = pppol2tp_tunnel_destruct;
-
- tunnel->sock = sk;
- sk->sk_allocation = GFP_ATOMIC;
-
- /* Misc init */
- rwlock_init(&tunnel->hlist_lock);
-
- /* The net we belong to */
- tunnel->pppol2tp_net = net;
- pn = pppol2tp_pernet(net);
-
- /* Add tunnel to our list */
- INIT_LIST_HEAD(&tunnel->list);
- write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
- write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
- atomic_inc(&pppol2tp_tunnel_count);
-
- /* Bump the reference count. The tunnel context is deleted
- * only when this drops to zero.
- */
- pppol2tp_tunnel_inc_refcount(tunnel);
-
- /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
- (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
- (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
-
- ret = tunnel->sock;
-
- *error = 0;
-out:
- if (sock)
- sockfd_put(sock);
-
- return ret;
-
-err:
- *error = err;
- goto out;
-}
-
-static struct proto pppol2tp_sk_proto = {
- .name = "PPPOL2TP",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct pppox_sock),
-};
-
-/* socket() handler. Initialize a new struct sock.
- */
-static int pppol2tp_create(struct net *net, struct socket *sock)
-{
- int error = -ENOMEM;
- struct sock *sk;
-
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
- if (!sk)
- goto out;
-
- sock_init_data(sock, sk);
-
- sock->state = SS_UNCONNECTED;
- sock->ops = &pppol2tp_ops;
-
- sk->sk_backlog_rcv = pppol2tp_recv_core;
- sk->sk_protocol = PX_PROTO_OL2TP;
- sk->sk_family = PF_PPPOX;
- sk->sk_state = PPPOX_NONE;
- sk->sk_type = SOCK_STREAM;
- sk->sk_destruct = pppol2tp_session_destruct;
-
- error = 0;
-
-out:
- return error;
-}
-
-/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
- */
-static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
- int sockaddr_len, int flags)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
- struct pppox_sock *po = pppox_sk(sk);
- struct sock *tunnel_sock = NULL;
- struct pppol2tp_session *session = NULL;
- struct pppol2tp_tunnel *tunnel;
- struct dst_entry *dst;
- int error = 0;
-
- lock_sock(sk);
-
- error = -EINVAL;
- if (sp->sa_protocol != PX_PROTO_OL2TP)
- goto end;
-
- /* Check for already bound sockets */
- error = -EBUSY;
- if (sk->sk_state & PPPOX_CONNECTED)
- goto end;
-
- /* We don't supporting rebinding anyway */
- error = -EALREADY;
- if (sk->sk_user_data)
- goto end; /* socket is already attached */
-
- /* Don't bind if s_tunnel is 0 */
- error = -EINVAL;
- if (sp->pppol2tp.s_tunnel == 0)
- goto end;
-
- /* Special case: prepare tunnel socket if s_session and
- * d_session is 0. Otherwise look up tunnel using supplied
- * tunnel id.
- */
- if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
- tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
- sp->pppol2tp.fd,
- sp->pppol2tp.s_tunnel,
- &error);
- if (tunnel_sock == NULL)
- goto end;
-
- sock_hold(tunnel_sock);
- tunnel = tunnel_sock->sk_user_data;
- } else {
- tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
-
- /* Error if we can't find the tunnel */
- error = -ENOENT;
- if (tunnel == NULL)
- goto end;
-
- tunnel_sock = tunnel->sock;
- }
-
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
- if (session != NULL)
- goto end;
-
- /* Allocate and initialize a new session context. */
- session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
- if (session == NULL) {
- error = -ENOMEM;
- goto end;
- }
-
- skb_queue_head_init(&session->reorder_q);
-
- session->magic = L2TP_SESSION_MAGIC;
- session->owner = current->pid;
- session->sock = sk;
- session->tunnel = tunnel;
- session->tunnel_sock = tunnel_sock;
- session->tunnel_addr = sp->pppol2tp;
- sprintf(&session->name[0], "sess %hu/%hu",
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session);
-
- session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
- session->stats.session_id = session->tunnel_addr.s_session;
-
- INIT_HLIST_NODE(&session->hlist);
-
- /* Inherit debug options from tunnel */
- session->debug = tunnel->debug;
-
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
- */
- session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
-
- /* If PMTU discovery was enabled, use the MTU that was discovered */
- dst = sk_dst_get(sk);
- if (dst != NULL) {
- u32 pmtu = dst_mtu(__sk_dst_get(sk));
- if (pmtu != 0)
- session->mtu = session->mru = pmtu -
- PPPOL2TP_HEADER_OVERHEAD;
- dst_release(dst);
- }
-
- /* Special case: if source & dest session_id == 0x0000, this socket is
- * being created to manage the tunnel. Don't add the session to the
- * session hash list, just set up the internal context for use by
- * ioctl() and sockopt() handlers.
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- error = 0;
- sk->sk_user_data = session;
- goto out_no_ppp;
- }
-
- /* Get tunnel context from the tunnel socket */
- tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
- if (tunnel == NULL) {
- error = -EBADF;
- goto end;
- }
-
- /* Right now, because we don't have a way to push the incoming skb's
- * straight through the UDP layer, the only header we need to worry
- * about is the L2TP header. This size is different depending on
- * whether sequence numbers are enabled for the data channel.
- */
- po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
-
- po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
- po->chan.mtu = session->mtu;
-
- error = ppp_register_net_channel(sock_net(sk), &po->chan);
- if (error)
- goto end_put_tun;
-
- /* This is how we get the session context from the socket. */
- sk->sk_user_data = session;
-
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- pppol2tp_session_id_hash(tunnel,
- session->tunnel_addr.s_session));
- write_unlock_bh(&tunnel->hlist_lock);
-
- atomic_inc(&pppol2tp_session_count);
-
-out_no_ppp:
- pppol2tp_tunnel_inc_refcount(tunnel);
- sk->sk_state = PPPOX_CONNECTED;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: created\n", session->name);
-
-end_put_tun:
- sock_put(tunnel_sock);
-end:
- release_sock(sk);
-
- if (error != 0) {
- if (session)
- PRINTK(session->debug,
- PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "%s: connect failed: %d\n",
- session->name, error);
- else
- PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "connect failed: %d\n", error);
- }
-
- return error;
-}
-
-/* getname() support.
- */
-static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
- int *usockaddr_len, int peer)
-{
- int len = sizeof(struct sockaddr_pppol2tp);
- struct sockaddr_pppol2tp sp;
- int error = 0;
- struct pppol2tp_session *session;
-
- error = -ENOTCONN;
- if (sock->sk->sk_state != PPPOX_CONNECTED)
- goto end;
-
- session = pppol2tp_sock_to_session(sock->sk);
- if (session == NULL) {
- error = -EBADF;
- goto end;
- }
-
- sp.sa_family = AF_PPPOX;
- sp.sa_protocol = PX_PROTO_OL2TP;
- memcpy(&sp.pppol2tp, &session->tunnel_addr,
- sizeof(struct pppol2tp_addr));
-
- memcpy(uaddr, &sp, len);
-
- *usockaddr_len = len;
-
- error = 0;
- sock_put(sock->sk);
-
-end:
- return error;
-}
-
-/****************************************************************************
- * ioctl() handlers.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. However, in order to control kernel tunnel features, we allow
- * userspace to create a special "tunnel" PPPoX socket which is used for
- * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
- * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
- * calls.
- ****************************************************************************/
-
-/* Session ioctl helper.
- */
-static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
- unsigned int cmd, unsigned long arg)
-{
- struct ifreq ifr;
- int err = 0;
- struct sock *sk = session->sock;
- int val = (int) arg;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
- session->name, cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case SIOCGIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
- ifr.ifr_mtu = session->mtu;
- if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case SIOCSIFMTU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
- break;
-
- session->mtu = ifr.ifr_mtu;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mtu=%d\n", session->name, session->mtu);
- err = 0;
- break;
-
- case PPPIOCGMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (put_user(session->mru, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCSMRU:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- err = -EFAULT;
- if (get_user(val,(int __user *) arg))
- break;
-
- session->mru = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set mru=%d\n", session->name, session->mru);
- err = 0;
- break;
-
- case PPPIOCGFLAGS:
- err = -EFAULT;
- if (put_user(session->flags, (int __user *) arg))
- break;
-
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCSFLAGS:
- err = -EFAULT;
- if (get_user(val, (int __user *) arg))
- break;
- session->flags = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set flags=%d\n", session->name, session->flags);
- err = 0;
- break;
-
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_to_user((void __user *) arg, &session->stats,
- sizeof(session->stats)))
- break;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", session->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Tunnel ioctl helper.
- *
- * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
- * specifies a session_id, the session ioctl handler is called. This allows an
- * application to retrieve session stats via a tunnel socket.
- */
-static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
- unsigned int cmd, unsigned long arg)
-{
- int err = 0;
- struct sock *sk = tunnel->sock;
- struct pppol2tp_ioc_stats stats_req;
-
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
- "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
- cmd, arg);
-
- sock_hold(sk);
-
- switch (cmd) {
- case PPPIOCGL2TPSTATS:
- err = -ENXIO;
- if (!(sk->sk_state & PPPOX_CONNECTED))
- break;
-
- if (copy_from_user(&stats_req, (void __user *) arg,
- sizeof(stats_req))) {
- err = -EFAULT;
- break;
- }
- if (stats_req.session_id != 0) {
- /* resend to session ioctl handler */
- struct pppol2tp_session *session =
- pppol2tp_session_find(tunnel, stats_req.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
- err = -EBADR;
- break;
- }
-#ifdef CONFIG_XFRM
- tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
-#endif
- if (copy_to_user((void __user *) arg, &tunnel->stats,
- sizeof(tunnel->stats))) {
- err = -EFAULT;
- break;
- }
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get L2TP stats\n", tunnel->name);
- err = 0;
- break;
-
- default:
- err = -ENOSYS;
- break;
- }
-
- sock_put(sk);
-
- return err;
-}
-
-/* Main ioctl() handler.
- * Dispatch to tunnel or session helpers depending on the socket.
- */
-static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session;
- struct pppol2tp_tunnel *tunnel;
- int err;
-
- if (!sk)
- return 0;
-
- err = -EBADF;
- if (sock_flag(sk, SOCK_DEAD) != 0)
- goto end;
-
- err = -ENOTCONN;
- if ((sk->sk_user_data == NULL) ||
- (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session's session_id is zero, treat ioctl as a
- * tunnel ioctl
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
- sock_put(session->tunnel_sock);
- goto end_put_sess;
- }
-
- err = pppol2tp_session_ioctl(session, cmd, arg);
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * setsockopt() / getsockopt() support.
- *
- * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
- * sockets. In order to control kernel tunnel features, we allow userspace to
- * create a special "tunnel" PPPoX socket which is used for control only.
- * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
- * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
- *****************************************************************************/
-
-/* Tunnel setsockopt() helper.
- */
-static int pppol2tp_tunnel_setsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- tunnel->debug = val;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session setsockopt helper.
- */
-static int pppol2tp_session_setsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->recv_seq = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set recv_seq=%d\n", session->name,
- session->recv_seq);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->send_seq = val ? -1 : 0;
- {
- struct sock *ssk = session->sock;
- struct pppox_sock *po = pppox_sk(ssk);
- po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
- PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
- }
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set send_seq=%d\n", session->name, session->send_seq);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- if ((val != 0) && (val != 1)) {
- err = -EINVAL;
- break;
- }
- session->lns_mode = val ? -1 : 0;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set lns_mode=%d\n", session->name,
- session->lns_mode);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- session->debug = val;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set debug=%x\n", session->name, session->debug);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- session->reorder_timeout = msecs_to_jiffies(val);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: set reorder_timeout=%d\n", session->name,
- session->reorder_timeout);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Main setsockopt() entry point.
- * Does API checks, then calls either the tunnel or session setsockopt
- * handler, according to whether the PPPoL2TP socket is a for a regular
- * session or the special tunnel type.
- */
-static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.setsockopt(sk, level, optname, optval, optlen);
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (get_user(val, (int __user *)optval))
- return -EFAULT;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get session context from the socket */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel
- */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_setsockopt(sk, session, optname, val);
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/* Tunnel getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_tunnel_getsockopt(struct sock *sk,
- struct pppol2tp_tunnel *tunnel,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_DEBUG:
- *val = tunnel->debug;
- PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%x\n", tunnel->name, tunnel->debug);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- return err;
-}
-
-/* Session getsockopt helper. Called with sock locked.
- */
-static int pppol2tp_session_getsockopt(struct sock *sk,
- struct pppol2tp_session *session,
- int optname, int *val)
-{
- int err = 0;
-
- switch (optname) {
- case PPPOL2TP_SO_RECVSEQ:
- *val = session->recv_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get recv_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_SENDSEQ:
- *val = session->send_seq;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get send_seq=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_LNSMODE:
- *val = session->lns_mode;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get lns_mode=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_DEBUG:
- *val = session->debug;
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get debug=%d\n", session->name, *val);
- break;
-
- case PPPOL2TP_SO_REORDERTO:
- *val = (int) jiffies_to_msecs(session->reorder_timeout);
- PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
- "%s: get reorder_timeout=%d\n", session->name, *val);
- break;
-
- default:
- err = -ENOPROTOOPT;
- }
-
- return err;
-}
-
-/* Main getsockopt() entry point.
- * Does API checks, then calls either the tunnel or session getsockopt
- * handler, according to whether the PPPoX socket is a for a regular session
- * or the special tunnel type.
- */
-static int pppol2tp_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct pppol2tp_session *session = sk->sk_user_data;
- struct pppol2tp_tunnel *tunnel;
- int val, len;
- int err;
-
- if (level != SOL_PPPOL2TP)
- return udp_prot.getsockopt(sk, level, optname, optval, optlen);
-
- if (get_user(len, (int __user *) optlen))
- return -EFAULT;
-
- len = min_t(unsigned int, len, sizeof(int));
-
- if (len < 0)
- return -EINVAL;
-
- err = -ENOTCONN;
- if (sk->sk_user_data == NULL)
- goto end;
-
- /* Get the session context */
- err = -EBADF;
- session = pppol2tp_sock_to_session(sk);
- if (session == NULL)
- goto end;
-
- /* Special case: if session_id == 0x0000, treat as operation on tunnel */
- if ((session->tunnel_addr.s_session == 0) &&
- (session->tunnel_addr.d_session == 0)) {
- err = -EBADF;
- tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
- if (tunnel == NULL)
- goto end_put_sess;
-
- err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
- sock_put(session->tunnel_sock);
- } else
- err = pppol2tp_session_getsockopt(sk, session, optname, &val);
-
- err = -EFAULT;
- if (put_user(len, (int __user *) optlen))
- goto end_put_sess;
-
- if (copy_to_user((void __user *) optval, &val, len))
- goto end_put_sess;
-
- err = 0;
-
-end_put_sess:
- sock_put(sk);
-end:
- return err;
-}
-
-/*****************************************************************************
- * /proc filesystem for debug
- *****************************************************************************/
-
-#ifdef CONFIG_PROC_FS
-
-#include <linux/seq_file.h>
-
-struct pppol2tp_seq_data {
- struct seq_net_private p;
- struct pppol2tp_tunnel *tunnel; /* current tunnel */
- struct pppol2tp_session *session; /* NULL means get first session in tunnel */
-};
-
-static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
-{
- struct pppol2tp_session *session = NULL;
- struct hlist_node *walk;
- int found = 0;
- int next = 0;
- int i;
-
- read_lock_bh(&tunnel->hlist_lock);
- for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
- hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
- if (curr == NULL) {
- found = 1;
- goto out;
- }
- if (session == curr) {
- next = 1;
- continue;
- }
- if (next) {
- found = 1;
- goto out;
- }
- }
- }
-out:
- read_unlock_bh(&tunnel->hlist_lock);
- if (!found)
- session = NULL;
-
- return session;
-}
-
-static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
- struct pppol2tp_tunnel *curr)
-{
- struct pppol2tp_tunnel *tunnel = NULL;
-
- read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
- if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
- goto out;
- }
- tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
-out:
- read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
-
- return tunnel;
-}
-
-static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
-{
- struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
- struct pppol2tp_net *pn;
- loff_t pos = *offs;
-
- if (!pos)
- goto out;
-
- BUG_ON(m->private == NULL);
- pd = m->private;
- pn = pppol2tp_pernet(seq_file_net(m));
-
- if (pd->tunnel == NULL) {
- if (!list_empty(&pn->pppol2tp_tunnel_list))
- pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
- } else {
- pd->session = next_session(pd->tunnel, pd->session);
- if (pd->session == NULL) {
- pd->tunnel = next_tunnel(pn, pd->tunnel);
- }
- }
-
- /* NULL tunnel and session indicates end of list */
- if ((pd->tunnel == NULL) && (pd->session == NULL))
- pd = NULL;
-
-out:
- return pd;
-}
-
-static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
-{
- (*pos)++;
- return NULL;
-}
-
-static void pppol2tp_seq_stop(struct seq_file *p, void *v)
-{
- /* nothing to do */
-}
-
-static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_tunnel *tunnel = v;
-
- seq_printf(m, "\nTUNNEL '%s', %c %d\n",
- tunnel->name,
- (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
- atomic_read(&tunnel->ref_count) - 1);
- seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
- tunnel->debug,
- (unsigned long long)tunnel->stats.tx_packets,
- (unsigned long long)tunnel->stats.tx_bytes,
- (unsigned long long)tunnel->stats.tx_errors,
- (unsigned long long)tunnel->stats.rx_packets,
- (unsigned long long)tunnel->stats.rx_bytes,
- (unsigned long long)tunnel->stats.rx_errors);
-}
-
-static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_session *session = v;
-
- seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
- "%04X/%04X %d %c\n",
- session->name,
- ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
- ntohs(session->tunnel_addr.addr.sin_port),
- session->tunnel_addr.s_tunnel,
- session->tunnel_addr.s_session,
- session->tunnel_addr.d_tunnel,
- session->tunnel_addr.d_session,
- session->sock->sk_state,
- (session == session->sock->sk_user_data) ?
- 'Y' : 'N');
- seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
- session->mtu, session->mru,
- session->recv_seq ? 'R' : '-',
- session->send_seq ? 'S' : '-',
- session->lns_mode ? "LNS" : "LAC",
- session->debug,
- jiffies_to_msecs(session->reorder_timeout));
- seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
- session->nr, session->ns,
- (unsigned long long)session->stats.tx_packets,
- (unsigned long long)session->stats.tx_bytes,
- (unsigned long long)session->stats.tx_errors,
- (unsigned long long)session->stats.rx_packets,
- (unsigned long long)session->stats.rx_bytes,
- (unsigned long long)session->stats.rx_errors);
-}
-
-static int pppol2tp_seq_show(struct seq_file *m, void *v)
-{
- struct pppol2tp_seq_data *pd = v;
-
- /* display header on line 1 */
- if (v == SEQ_START_TOKEN) {
- seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
- seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
- seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- seq_puts(m, " SESSION name, addr/port src-tid/sid "
- "dest-tid/sid state user-data-ok\n");
- seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
- seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
- goto out;
- }
-
- /* Show the tunnel or session context.
- */
- if (pd->session == NULL)
- pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
- pppol2tp_seq_session_show(m, pd->session);
-
-out:
- return 0;
-}
-
-static const struct seq_operations pppol2tp_seq_ops = {
- .start = pppol2tp_seq_start,
- .next = pppol2tp_seq_next,
- .stop = pppol2tp_seq_stop,
- .show = pppol2tp_seq_show,
-};
-
-/* Called when our /proc file is opened. We allocate data for use when
- * iterating our tunnel / session contexts and store it in the private
- * data of the seq_file.
- */
-static int pppol2tp_proc_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &pppol2tp_seq_ops,
- sizeof(struct pppol2tp_seq_data));
-}
-
-static const struct file_operations pppol2tp_proc_fops = {
- .owner = THIS_MODULE,
- .open = pppol2tp_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-/*****************************************************************************
- * Init and cleanup
- *****************************************************************************/
-
-static const struct proto_ops pppol2tp_ops = {
- .family = AF_PPPOX,
- .owner = THIS_MODULE,
- .release = pppol2tp_release,
- .bind = sock_no_bind,
- .connect = pppol2tp_connect,
- .socketpair = sock_no_socketpair,
- .accept = sock_no_accept,
- .getname = pppol2tp_getname,
- .poll = datagram_poll,
- .listen = sock_no_listen,
- .shutdown = sock_no_shutdown,
- .setsockopt = pppol2tp_setsockopt,
- .getsockopt = pppol2tp_getsockopt,
- .sendmsg = pppol2tp_sendmsg,
- .recvmsg = pppol2tp_recvmsg,
- .mmap = sock_no_mmap,
- .ioctl = pppox_ioctl,
-};
-
-static struct pppox_proto pppol2tp_proto = {
- .create = pppol2tp_create,
- .ioctl = pppol2tp_ioctl
-};
-
-static __net_init int pppol2tp_init_net(struct net *net)
-{
- struct pppol2tp_net *pn = pppol2tp_pernet(net);
- struct proc_dir_entry *pde;
-
- INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
- rwlock_init(&pn->pppol2tp_tunnel_list_lock);
-
- pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
-#ifdef CONFIG_PROC_FS
- if (!pde)
- return -ENOMEM;
-#endif
-
- return 0;
-}
-
-static __net_exit void pppol2tp_exit_net(struct net *net)
-{
- proc_net_remove(net, "pppol2tp");
-}
-
-static struct pernet_operations pppol2tp_net_ops = {
- .init = pppol2tp_init_net,
- .exit = pppol2tp_exit_net,
- .id = &pppol2tp_net_id,
- .size = sizeof(struct pppol2tp_net),
-};
-
-static int __init pppol2tp_init(void)
-{
- int err;
-
- err = proto_register(&pppol2tp_sk_proto, 0);
- if (err)
- goto out;
- err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
- if (err)
- goto out_unregister_pppol2tp_proto;
-
- err = register_pernet_device(&pppol2tp_net_ops);
- if (err)
- goto out_unregister_pppox_proto;
-
- printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
- PPPOL2TP_DRV_VERSION);
-
-out:
- return err;
-out_unregister_pppox_proto:
- unregister_pppox_proto(PX_PROTO_OL2TP);
-out_unregister_pppol2tp_proto:
- proto_unregister(&pppol2tp_sk_proto);
- goto out;
-}
-
-static void __exit pppol2tp_exit(void)
-{
- unregister_pppox_proto(PX_PROTO_OL2TP);
- unregister_pernet_device(&pppol2tp_net_ops);
- proto_unregister(&pppol2tp_sk_proto);
-}
-
-module_init(pppol2tp_init);
-module_exit(pppol2tp_exit);
-
-MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>, "
- "James Chapman <jchapman@katalix.com>");
-MODULE_DESCRIPTION("PPP over L2TP over UDP");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5bf229bb34c2..87d6b8f36304 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -327,7 +327,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
- dev_info(ctodev(card), "%s: ERROR status \n", __func__);
+ dev_info(ctodev(card), "%s: ERROR status\n", __func__);
/* we need to round up the buffer size to a multiple of 128 */
bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
@@ -547,7 +547,7 @@ out:
void gelic_net_set_multi(struct net_device *netdev)
{
struct gelic_card *card = netdev_card(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
unsigned int i;
uint8_t *p;
u64 addr;
@@ -581,9 +581,9 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/* set multicast addresses */
- netdev_for_each_mc_addr(mc, netdev) {
+ netdev_for_each_mc_addr(ha, netdev) {
addr = 0;
- p = mc->dmi_addr;
+ p = ha->addr;
for (i = 0; i < ETH_ALEN; i++) {
addr <<= 8;
addr |= *p++;
@@ -903,9 +903,6 @@ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
gelic_descr_release_tx(card, descr->next);
card->tx_chain.tail = descr->next->next;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
- } else {
- /* OK, DMA started/reserved */
- netdev->trans_start = jiffies;
}
spin_unlock_irqrestore(&card->tx_lock, flags);
@@ -1435,7 +1432,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work)
container_of(work, struct gelic_card, tx_timeout_task);
struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
- dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__);
+ dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__);
if (!(netdev->flags & IFF_UP))
goto out;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index f0be507e5324..43b8d7797f0a 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -96,7 +96,7 @@ static inline int precise_ie(void)
* post_eurus_cmd helpers
*/
struct eurus_cmd_arg_info {
- int pre_arg; /* command requres arg1, arg2 at POST COMMAND */
+ int pre_arg; /* command requires arg1, arg2 at POST COMMAND */
int post_arg; /* command requires arg1, arg2 at GET_RESULT */
};
@@ -301,7 +301,6 @@ static void gelic_wl_get_ch_info(struct gelic_wl_info *wl)
/* 16 bits of MSB has available channels */
wl->ch_info = ch_info_raw >> 48;
}
- return;
}
/* SIOGIWRANGE */
@@ -528,7 +527,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
u8 item_len;
u8 item_id;
- pr_debug("%s: data=%p len=%ld \n", __func__,
+ pr_debug("%s: data=%p len=%ld\n", __func__,
data, len);
memset(ie_info, 0, sizeof(struct ie_info));
@@ -897,7 +896,7 @@ static int gelic_wl_set_auth(struct net_device *netdev,
default:
ret = -EOPNOTSUPP;
break;
- };
+ }
if (!ret)
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -979,7 +978,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
pr_debug("%s: essid = '%s'\n", __func__, extra);
set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
} else {
- pr_debug("%s: ESSID any \n", __func__);
+ pr_debug("%s: ESSID any\n", __func__);
clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
}
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -987,7 +986,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
gelic_wl_try_associate(netdev); /* FIXME */
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return 0;
}
@@ -998,7 +997,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
unsigned long irqflag;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
mutex_lock(&wl->assoc_stat_lock);
spin_lock_irqsave(&wl->lock, irqflag);
if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
@@ -1011,7 +1010,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
mutex_unlock(&wl->assoc_stat_lock);
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> len=%d \n", __func__, data->essid.length);
+ pr_debug("%s: -> len=%d\n", __func__, data->essid.length);
return 0;
}
@@ -1028,7 +1027,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1087,7 +1086,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1101,7 +1100,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
unsigned int key_index, index_specified;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
key_index = enc->flags & IW_ENCODE_INDEX;
pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
enc->flags, enc->pointer, enc->length, extra);
@@ -1215,7 +1214,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
int key_index;
int ret = 0;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
flags = enc->flags & IW_ENCODE_FLAGS;
alg = ext->alg;
key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1288,7 +1287,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
}
done:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
@@ -1304,7 +1303,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
int ret = 0;
int max_key_len;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
max_key_len = enc->length - sizeof(struct iw_encode_ext);
if (max_key_len < 0)
@@ -1359,7 +1358,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
}
out:
spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s: -> \n", __func__);
+ pr_debug("%s: ->\n", __func__);
return ret;
}
/* SIOC{S,G}IWMODE */
@@ -1370,7 +1369,7 @@ static int gelic_wl_set_mode(struct net_device *netdev,
__u32 mode = data->mode;
int ret;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
if (mode == IW_MODE_INFRA)
ret = 0;
else
@@ -1384,7 +1383,7 @@ static int gelic_wl_get_mode(struct net_device *netdev,
union iwreq_data *data, char *extra)
{
__u32 *mode = &data->mode;
- pr_debug("%s: <- \n", __func__);
+ pr_debug("%s: <-\n", __func__);
*mode = IW_MODE_INFRA;
pr_debug("%s: ->\n", __func__);
return 0;
@@ -1992,7 +1991,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
case GELIC_WL_WPA_LEVEL_WPA2:
ret = gelic_wl_do_wpa_setup(wl);
break;
- };
+ }
if (ret) {
pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
@@ -2022,7 +2021,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
if (!rc) {
/* timeouted. Maybe key or cyrpt mode is wrong */
- pr_info("%s: connect timeout \n", __func__);
+ pr_info("%s: connect timeout\n", __func__);
cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
NULL, 0);
kfree(cmd);
@@ -2063,7 +2062,7 @@ static void gelic_wl_connected_event(struct gelic_wl_info *wl,
}
if (desired_event == event) {
- pr_debug("%s: completed \n", __func__);
+ pr_debug("%s: completed\n", __func__);
complete(&wl->assoc_done);
netif_carrier_on(port_to_netdev(wl_port(wl)));
} else
@@ -2280,26 +2279,25 @@ void gelic_wl_interrupt(struct net_device *netdev, u64 status)
/*
* driver helpers
*/
-#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
static const iw_handler gelic_wl_wext_handler[] =
{
- IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name,
- IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range,
- IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan,
- IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan,
- IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth,
- IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth,
- IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid,
- IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid,
- IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode,
- IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode,
- IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap,
- IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap,
- IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext,
- IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext,
- IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode,
- IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode,
- IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
+ IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name),
+ IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range),
+ IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan),
+ IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan),
+ IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth),
+ IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth),
+ IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid),
+ IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid),
+ IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode),
+ IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode),
+ IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap),
+ IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap),
+ IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext),
+ IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext),
+ IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode),
+ IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode),
+ IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick),
};
static const struct iw_handler_def gelic_wl_wext_handler_def = {
@@ -2318,7 +2316,7 @@ static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
pr_debug("%s:start\n", __func__);
netdev = alloc_etherdev(sizeof(struct gelic_port) +
sizeof(struct gelic_wl_info));
- pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card);
+ pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card);
if (!netdev)
return NULL;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 4ef0afbcbe1b..54ebb65ada18 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -222,7 +222,6 @@ static void ql_write_common_reg_l(struct ql3_adapter *qdev,
writel(value, reg);
readl(reg);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return;
}
static void ql_write_common_reg(struct ql3_adapter *qdev,
@@ -230,7 +229,6 @@ static void ql_write_common_reg(struct ql3_adapter *qdev,
{
writel(value, reg);
readl(reg);
- return;
}
static void ql_write_nvram_reg(struct ql3_adapter *qdev,
@@ -239,7 +237,6 @@ static void ql_write_nvram_reg(struct ql3_adapter *qdev,
writel(value, reg);
readl(reg);
udelay(1);
- return;
}
static void ql_write_page0_reg(struct ql3_adapter *qdev,
@@ -249,7 +246,6 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,0);
writel(value, reg);
readl(reg);
- return;
}
/*
@@ -262,7 +258,6 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,1);
writel(value, reg);
readl(reg);
- return;
}
/*
@@ -275,7 +270,6 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev,
ql_set_register_page(qdev,2);
writel(value, reg);
readl(reg);
- return;
}
static void ql_disable_interrupts(struct ql3_adapter *qdev)
@@ -343,8 +337,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
}
@@ -1924,8 +1918,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map));
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
--qdev->lrg_buf_skb_check;
@@ -2041,16 +2035,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[i],
+ dma_unmap_addr(&tx_cb->map[i],
mapaddr),
- pci_unmap_len(&tx_cb->map[i], maplen),
+ dma_unmap_len(&tx_cb->map[i], maplen),
PCI_DMA_TODEVICE);
}
}
@@ -2119,8 +2113,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb->data);
skb->ip_summed = CHECKSUM_NONE;
@@ -2165,8 +2159,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb_put(skb2, length); /* Just the second buffer length here. */
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb2, mapaddr),
- pci_unmap_len(lrg_buf_cb2, maplen),
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE);
prefetch(skb2->data);
@@ -2258,7 +2252,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
"%x.\n",
ndev->name, net_rsp->opcode);
printk(KERN_ERR PFX
- "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
(unsigned long int)tmp[0],
(unsigned long int)tmp[1],
(unsigned long int)tmp[2],
@@ -2454,8 +2448,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(len);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
seg++;
if (seg_cnt == 1) {
@@ -2488,9 +2482,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->len =
cpu_to_le32(sizeof(struct oal) |
OAL_CONT_ENTRY);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
sizeof(struct oal));
oal_entry = (struct oal_entry *)oal;
oal++;
@@ -2512,8 +2506,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- pci_unmap_len_set(&tx_cb->map[seg], maplen,
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
frag->size);
}
/* Terminate the last segment. */
@@ -2539,22 +2533,22 @@ map_error:
(seg == 12 && seg_cnt > 13) || /* but necessary. */
(seg == 17 && seg_cnt > 18)) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
oal++;
seg++;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[seg], mapaddr),
- pci_unmap_len(&tx_cb->map[seg], maplen),
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE);
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_addr(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_addr(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
return NETDEV_TX_BUSY;
@@ -2841,8 +2835,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(lrg_buf_cb, mapaddr),
- pci_unmap_len(lrg_buf_cb, maplen),
+ dma_unmap_addr(lrg_buf_cb, mapaddr),
+ dma_unmap_len(lrg_buf_cb, maplen),
PCI_DMA_FROMDEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
@@ -2912,8 +2906,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM;
}
- pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
- pci_unmap_len_set(lrg_buf_cb, maplen,
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
QL_HEADER_SPACE);
lrg_buf_cb->buf_phy_addr_low =
@@ -3793,13 +3787,13 @@ static void ql_reset_work(struct work_struct *work)
"%s: Freeing lost SKB.\n",
qdev->ndev->name);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[0], mapaddr),
- pci_unmap_len(&tx_cb->map[0], maplen),
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE);
for(j=1;j<tx_cb->seg_count;j++) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_cb->map[j],mapaddr),
- pci_unmap_len(&tx_cb->map[j],maplen),
+ dma_unmap_addr(&tx_cb->map[j],mapaddr),
+ dma_unmap_len(&tx_cb->map[j],maplen),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(tx_cb->skb);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 7113e71b15a1..3362a661248c 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -998,8 +998,8 @@ enum link_state_t {
struct ql_rcv_buf_cb {
struct ql_rcv_buf_cb *next;
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
__le32 buf_phy_addr_low;
__le32 buf_phy_addr_high;
int index;
@@ -1029,8 +1029,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct ql_tx_buf_cb {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 0da94b208db1..896d40df9a13 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,9 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 0
-#define QLCNIC_LINUX_VERSIONID "5.0.0"
+#define _QLCNIC_LINUX_SUBVERSION 2
+#define QLCNIC_LINUX_VERSIONID "5.0.2"
+#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -98,8 +99,6 @@
#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
#define QLCNIC_LRO_BUFFER_EXTRA 2048
-#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
-
/* Opcodes to be used with the commands */
#define TX_ETHER_PKT 0x01
#define TX_TCP_PKT 0x02
@@ -133,7 +132,6 @@
#define RCV_RING_NORMAL 0
#define RCV_RING_JUMBO 1
-#define RCV_RING_LRO 2
#define MIN_CMD_DESCRIPTORS 64
#define MIN_RCV_DESCRIPTORS 64
@@ -144,7 +142,6 @@
#define MAX_RCV_DESCRIPTORS_10G 8192
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
-#define MAX_LRO_RCV_DESCRIPTORS 8
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
@@ -152,8 +149,6 @@
#define get_next_index(index, length) \
(((index) + 1) & ((length) - 1))
-#define MPORT_MULTI_FUNCTION_MODE 0x2222
-
/*
* Following data structures describe the descriptors that will be used.
* Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
@@ -399,13 +394,9 @@ struct qlcnic_hardware_context {
unsigned long pci_len0;
- u32 ocm_win;
- u32 crb_win;
-
rwlock_t crb_lock;
struct mutex mem_lock;
- u8 cut_through;
u8 revision_id;
u8 pci_func;
u8 linkup;
@@ -428,6 +419,10 @@ struct qlcnic_adapter_stats {
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
+ u64 null_skb;
+ u64 null_rxbuf;
+ u64 rx_dma_map_error;
+ u64 tx_dma_map_error;
};
/*
@@ -916,14 +911,12 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
- u16 num_lro_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
u8 driver_mismatch;
u8 msix_supported;
u8 rx_csum;
- u8 pci_using_dac;
u8 portnum;
u8 physical_port;
@@ -958,11 +951,15 @@ struct qlcnic_adapter {
u8 dev_state;
u8 diag_test;
u8 diag_cnt;
+ u8 reset_ack_timeo;
+ u8 dev_init_timeo;
u8 rsrd1;
- u16 rsrd2;
+ u16 msg_enable;
u8 mac_addr[ETH_ALEN];
+ u64 dev_rst_time;
+
struct qlcnic_adapter_stats stats;
struct qlcnic_recv_context recv_ctx;
@@ -994,6 +991,11 @@ u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
+void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
(qlcnic_hw_read_wx_2M(adapter, off))
@@ -1035,6 +1037,7 @@ int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1128,4 +1131,11 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
+#define QLCDB(adapter, lvl, _fmt, _args...) do { \
+ if (NETIF_MSG_##lvl & adapter->msg_enable) \
+ printk(KERN_INFO "%s: %s: " _fmt, \
+ dev_name(&adapter->pdev->dev), \
+ __func__, ##_args); \
+ } while (0)
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 0a6a39914aec..c2c1f5cc16c6 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -421,7 +421,8 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_free;
}
tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index f83e15fe3e1b..3bd514ec7e8f 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,6 +69,14 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
QLC_OFF(stats.skb_alloc_failure)},
+ {"null skb",
+ QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
+ {"null rxbuf",
+ QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
};
@@ -404,7 +412,6 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_pending = adapter->num_rxd;
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
- ring->rx_jumbo_pending += adapter->num_lro_rxd;
ring->tx_pending = adapter->num_txd;
if (adapter->ahw.port_type == QLCNIC_GBE) {
@@ -598,19 +605,12 @@ qlcnic_set_pauseparam(struct net_device *netdev,
static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- u32 data_read, data_written;
+ u32 data_read;
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
- data_written = (u32)0xa5a5a5a5;
-
- QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
- data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
- if (data_written != data_read)
- return 1;
-
return 0;
}
@@ -998,6 +998,20 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
return 0;
}
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = msglvl;
+}
+
const struct ethtool_ops qlcnic_ethtool_ops = {
.get_settings = qlcnic_get_settings,
.set_settings = qlcnic_set_settings,
@@ -1029,4 +1043,6 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_flags = ethtool_op_get_flags,
.set_flags = qlcnic_set_flags,
.phys_id = qlcnic_blink_led,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 0469f84360a4..ad9d167723c4 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -435,9 +435,10 @@ enum {
#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_CAMQM (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
-#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -448,7 +449,7 @@ enum {
#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
-#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
/*
* Register offsets for MN
@@ -562,39 +563,16 @@ enum {
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
-#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
-#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
-
#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
#define CRB_V2P_0 (QLCNIC_REG(0x290))
#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
-#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
-#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
-#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
-#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
-
#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
- * capabilities register, can be used to selectively enable/disable features
- * for backward compability
- */
-#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
-#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
-#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
-#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
-
-#define INTR_SCHEME_PERPORT 0x1
-#define MSI_MODE_MULTIFUNC 0x1
-
-/* used for ethtool tests */
-#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
-
-/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
* which can be read by the Phantom host to get producer/consumer indexes from
* Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
@@ -693,15 +671,24 @@ enum {
#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
-
- /* Device State */
-#define QLCNIC_DEV_COLD 1
-#define QLCNIC_DEV_INITALIZING 2
-#define QLCNIC_DEV_READY 3
-#define QLCNIC_DEV_NEED_RESET 4
-#define QLCNIC_DEV_NEED_QUISCENT 5
-#define QLCNIC_DEV_FAILED 6
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD 0x1
+#define QLCNIC_DEV_INITIALIZING 0x2
+#define QLCNIC_DEV_READY 0x3
+#define QLCNIC_DEV_NEED_RESET 0x4
+#define QLCNIC_DEV_NEED_QUISCENT 0x5
+#define QLCNIC_DEV_FAILED 0x6
+#define QLCNIC_DEV_QUISCENT 0x7
+
+#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
@@ -709,9 +696,8 @@ enum {
#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
-#define FW_POLL_DELAY (2 * HZ)
-#define FW_FAIL_THRESH 3
-#define FW_POLL_THRESH 10
+#define FW_POLL_DELAY (1 * HZ)
+#define FW_FAIL_THRESH 2
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e73ba455aa20..0c2e1f08f459 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -54,21 +54,6 @@ static inline void writeq(u64 val, void __iomem *addr)
}
#endif
-#define ADDR_IN_RANGE(addr, low, high) \
- (((addr) < (high)) && ((addr) >= (low)))
-
-#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
- ((adapter)->ahw.pci_base0 + (off))
-
-static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
- unsigned long off)
-{
- if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
- return PCI_OFFSET_FIRST_RANGE(adapter, off);
-
- return NULL;
-}
-
static const struct crb_128M_2M_block_map
crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
{{{0, 0, 0, 0} } }, /* 0: PCI */
@@ -310,8 +295,12 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
if (done == 1)
break;
- if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock;reg_id=%d\n",
+ sem, id_reg);
return -EIO;
+ }
msleep(1);
}
@@ -427,7 +416,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
@@ -449,8 +438,8 @@ void qlcnic_set_multi(struct net_device *netdev)
}
if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(mc_ptr, netdev) {
- qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ qlcnic_nic_add_mac(adapter, ha->addr);
}
}
@@ -787,9 +776,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
window = CRB_HI(off);
- if (adapter->ahw.crb_win == window)
- return;
-
writel(window, addr);
if (readl(addr) != window) {
if (printk_ratelimit())
@@ -797,7 +783,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
"failed to set CRB window to %d off 0x%lx\n",
window, off);
}
- adapter->ahw.crb_win = window;
}
int
@@ -878,13 +863,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
u64 addr, u32 *start)
{
u32 window;
- struct pci_dev *pdev = adapter->pdev;
-
- if ((addr & 0x00ff800) == 0xff800) {
- if (printk_ratelimit())
- dev_warn(&pdev->dev, "QM access not handled\n");
- return -EIO;
- }
window = OCM_WIN_P3P(addr);
@@ -892,7 +870,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
/* read back to flush */
readl(adapter->ahw.ocm_win_crb);
- adapter->ahw.ocm_win = window;
*start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
return 0;
}
@@ -901,8 +878,7 @@ static int
qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
u64 *data, int op)
{
- void __iomem *addr, *mem_ptr = NULL;
- resource_size_t mem_base;
+ void __iomem *addr;
int ret;
u32 start;
@@ -912,21 +888,8 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
if (ret != 0)
goto unlock;
- addr = pci_base_offset(adapter, start);
- if (addr)
- goto noremap;
-
- mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
-
- mem_ptr = ioremap(mem_base, PAGE_SIZE);
- if (mem_ptr == NULL) {
- ret = -EIO;
- goto unlock;
- }
+ addr = adapter->ahw.pci_base0 + start;
- addr = mem_ptr + (start & (PAGE_SIZE - 1));
-
-noremap:
if (op == 0) /* read */
*data = readq(addr);
else /* write */
@@ -935,11 +898,31 @@ noremap:
unlock:
mutex_unlock(&adapter->ahw.mem_lock);
- if (mem_ptr)
- iounmap(mem_ptr);
return ret;
}
+void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ mutex_unlock(&adapter->ahw.mem_lock);
+}
+
#define MAX_CTL_CHECK 1000
int
@@ -948,7 +931,6 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
{
int i, j, ret;
u32 temp, off8;
- u64 stride;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -957,7 +939,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -975,9 +957,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -985,30 +965,28 @@ correct:
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
i = 0;
- if (stride == 16) {
- writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
- writel((TA_CTL_START | TA_CTL_ENABLE),
- (mem_crb + TEST_AGT_CTRL));
-
- for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = readl(mem_crb + TEST_AGT_CTRL);
- if ((temp & TA_CTL_BUSY) == 0)
- break;
- }
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
- if (j >= MAX_CTL_CHECK) {
- ret = -EIO;
- goto done;
- }
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
- i = (off & 0xf) ? 0 : 2;
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
- mem_crb + MIU_TEST_AGT_WRDATA(i));
- writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
- mem_crb + MIU_TEST_AGT_WRDATA(i+1));
- i = (off & 0xf) ? 2 : 0;
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
}
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+
writel(data & 0xffffffff,
mem_crb + MIU_TEST_AGT_WRDATA(i));
writel((data >> 32) & 0xffffffff,
@@ -1044,7 +1022,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
{
int j, ret;
u32 temp, off8;
- u64 val, stride;
+ u64 val;
void __iomem *mem_crb;
/* Only 64-bit aligned access */
@@ -1053,7 +1031,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
/* P3 onward, test agent base for MIU and SIU is same */
if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
- QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ QLCNIC_ADDR_QDR_NET_MAX)) {
mem_crb = qlcnic_get_ioaddr(adapter,
QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
goto correct;
@@ -1073,9 +1051,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
return -EIO;
correct:
- stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
-
- off8 = off & ~(stride-1);
+ off8 = off & ~0xf;
mutex_lock(&adapter->ahw.mem_lock);
@@ -1097,7 +1073,7 @@ correct:
ret = -EIO;
} else {
off8 = MIU_TEST_AGT_RDDATA_LO;
- if ((stride == 16) && (off & 0xf))
+ if (off & 0xf)
off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
temp = readl(mem_crb + off8 + 4);
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9d2c124048fa..71a4e664ad76 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -210,7 +210,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
- return -ENOMEM;
+ goto err_out;
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
@@ -221,7 +221,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring = kzalloc(size, GFP_KERNEL);
if (rds_ring == NULL) {
dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
- return -ENOMEM;
+ goto err_out;
}
recv_ctx->rds_rings = rds_ring;
@@ -230,17 +230,8 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
switch (ring) {
case RCV_RING_NORMAL:
rds_ring->num_desc = adapter->num_rxd;
- if (adapter->ahw.cut_through) {
- rds_ring->dma_size =
- QLCNIC_CT_DEFAULT_RX_BUF_LEN;
- rds_ring->skb_size =
- QLCNIC_CT_DEFAULT_RX_BUF_LEN;
- } else {
- rds_ring->dma_size =
- QLCNIC_P3_RX_BUF_MAX_LEN;
- rds_ring->skb_size =
- rds_ring->dma_size + NET_IP_ALIGN;
- }
+ rds_ring->dma_size = QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
break;
case RCV_RING_JUMBO:
@@ -254,13 +245,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->skb_size =
rds_ring->dma_size + NET_IP_ALIGN;
break;
-
- case RCV_RING_LRO:
- rds_ring->num_desc = adapter->num_lro_rxd;
- rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
- rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
- break;
-
}
rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
@@ -530,6 +514,36 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
return 0;
}
+int
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+ int timeo;
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = (val >> (adapter->portnum * 4)) & 0xf;
+
+ if ((val & 0x3) != 1) {
+ dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n",
+ val);
+ return -EIO;
+ }
+
+ adapter->physical_port = (val >> 2);
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+ timeo = 30;
+
+ adapter->dev_init_timeo = timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+ timeo = 10;
+
+ adapter->reset_ack_timeo = timeo;
+
+ return 0;
+}
+
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
@@ -540,12 +554,10 @@ qlcnic_has_mn(struct qlcnic_adapter *adapter)
QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
- if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
- capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
- if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
- return 1;
- }
return 0;
}
@@ -612,7 +624,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -621,7 +633,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -647,7 +659,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
return -EINVAL;
tab_size = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size * (idx + 1)));
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
if (adapter->fw->size < tab_size)
return -EINVAL;
@@ -655,7 +667,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
offs = cpu_to_le32(tab_desc->findex) +
(cpu_to_le32(tab_desc->entry_size) * (idx));
descr = (struct uni_data_desc *)&unirom[offs];
- data_size = descr->findex + cpu_to_le32(descr->size);
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
if (adapter->fw->size < data_size)
return -EINVAL;
@@ -950,6 +962,16 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
flashaddr += 8;
}
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
} else {
u64 data;
u32 hi, lo;
@@ -1162,9 +1184,6 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
if (err)
return err;
- QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
- QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
- QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
@@ -1254,13 +1273,13 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
skb = buffer->skb;
- if (!adapter->ahw.cut_through)
- skb_reserve(skb, 2);
+ skb_reserve(skb, 2);
dma = pci_map_single(pdev, skb->data,
rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
+ adapter->stats.rx_dma_map_error++;
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return -ENOMEM;
@@ -1285,8 +1304,10 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
- if (!skb)
+ if (!skb) {
+ adapter->stats.null_skb++;
goto no_skb;
+ }
if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
@@ -1476,6 +1497,8 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
if (rxbuf)
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
skip:
for (; desc_cnt > 0; desc_cnt--) {
@@ -1523,9 +1546,10 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
int producer, count = 0;
struct list_head *head;
+ spin_lock(&rds_ring->lock);
+
producer = rds_ring->producer;
- spin_lock(&rds_ring->lock);
head = &rds_ring->free_list;
while (!list_empty(head)) {
@@ -1547,13 +1571,13 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
producer = get_next_index(producer, rds_ring->num_desc);
}
- spin_unlock(&rds_ring->lock);
if (count) {
rds_ring->producer = producer;
writel((producer-1) & (rds_ring->num_desc-1),
rds_ring->crb_rcv_producer);
}
+ spin_unlock(&rds_ring->lock);
}
static void
@@ -1565,10 +1589,11 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
int producer, count = 0;
struct list_head *head;
- producer = rds_ring->producer;
if (!spin_trylock(&rds_ring->lock))
return;
+ producer = rds_ring->producer;
+
head = &rds_ring->free_list;
while (!list_empty(head)) {
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 234dab1f9982..23ea9caa5261 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -61,6 +61,10 @@ static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+static int load_fw_file;
+module_param(load_fw_file, int, 0644);
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+
static int __devinit qlcnic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void __devexit qlcnic_remove(struct pci_dev *pdev);
@@ -84,6 +88,7 @@ static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -208,6 +213,9 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
@@ -222,6 +230,9 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
@@ -233,67 +244,6 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));
- return;
-}
-
-static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- u64 mask, cmask;
-
- adapter->pci_using_dac = 0;
-
- mask = DMA_BIT_MASK(39);
- cmask = mask;
-
- if (pci_set_dma_mask(pdev, mask) == 0 &&
- pci_set_consistent_dma_mask(pdev, cmask) == 0) {
- adapter->pci_using_dac = 1;
- return 0;
- }
-
- return -EIO;
-}
-
-/* Update addressable range if firmware supports it */
-static int
-qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
-{
- int change, shift, err;
- u64 mask, old_mask, old_cmask;
- struct pci_dev *pdev = adapter->pdev;
-
- change = 0;
-
- shift = QLCRD32(adapter, CRB_DMA_SHIFT);
- if (shift > 32)
- return 0;
-
- if (shift > 9)
- change = 1;
-
- if (change) {
- old_mask = pdev->dma_mask;
- old_cmask = pdev->dev.coherent_dma_mask;
-
- mask = DMA_BIT_MASK(32+shift);
-
- err = pci_set_dma_mask(pdev, mask);
- if (err)
- goto err_out;
-
- err = pci_set_consistent_dma_mask(pdev, mask);
- if (err)
- goto err_out;
- dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
- }
-
- return 0;
-
-err_out:
- pci_set_dma_mask(pdev, old_mask);
- pci_set_consistent_dma_mask(pdev, old_cmask);
- return err;
}
static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
@@ -512,13 +462,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func;
- /*
- * Set the CRB window to invalid. If any register in window 0 is
- * accessed it should set the window to 0 and then reset it to 1.
- */
- adapter->ahw.crb_win = -1;
- adapter->ahw.ocm_win = -1;
-
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
@@ -556,7 +499,9 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
qlcnic_boards[i].device == pdev->device &&
qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
qlcnic_boards[i].sub_device == pdev->subsystem_device) {
- strcpy(name, qlcnic_boards[i].short_name);
+ sprintf(name, "%pM: %s" ,
+ adapter->mac_addr,
+ qlcnic_boards[i].short_name);
found = 1;
break;
}
@@ -605,22 +550,10 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
brd_name, adapter->ahw.revision_id);
}
- if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
- adapter->driver_mismatch = 1;
- dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
- fw_major, fw_minor, fw_build);
- return;
- }
-
- i = QLCRD32(adapter, QLCNIC_SRE_MISC);
- adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
-
- dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
- fw_major, fw_minor, fw_build,
- adapter->ahw.cut_through ? "cut-through" : "legacy");
+ dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
+ fw_major, fw_minor, fw_build);
- if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
- adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
adapter->flags &= ~QLCNIC_LRO_ENABLED;
@@ -637,7 +570,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_txd = MAX_CMD_DESCRIPTORS;
- adapter->num_lro_rxd = 0;
adapter->max_rds_rings = 2;
}
@@ -646,11 +578,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
{
int val, err, first_boot;
- err = qlcnic_set_dma_mask(adapter);
- if (err)
+ err = qlcnic_can_start_firmware(adapter);
+ if (err < 0)
return err;
-
- if (!qlcnic_can_start_firmware(adapter))
+ else if (!err)
goto wait_init;
first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
@@ -658,7 +589,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
/* This is the first boot after power up */
QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
- qlcnic_request_firmware(adapter);
+ if (load_fw_file)
+ qlcnic_request_firmware(adapter);
+ else
+ adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
err = qlcnic_need_fw_reset(adapter);
if (err < 0)
@@ -672,7 +606,6 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
msleep(1);
}
- QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
@@ -696,16 +629,18 @@ wait_init:
goto err_out;
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
-
- qlcnic_update_dma_mask(adapter);
+ qlcnic_idc_debug_info(adapter, 1);
qlcnic_check_options(adapter);
adapter->need_fw_reset = 0;
- /* fall through and release firmware */
+ qlcnic_release_firmware(adapter);
+ return 0;
err_out:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_err(&adapter->pdev->dev, "Device state set to failed\n");
qlcnic_release_firmware(adapter);
return err;
}
@@ -937,6 +872,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
struct qlcnic_host_sds_ring *sds_ring;
int ring;
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -950,11 +886,11 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
adapter->max_sds_rings = max_sds_rings;
if (qlcnic_attach(adapter))
- return;
+ goto out;
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
-
+out:
netif_device_attach(netdev);
}
@@ -976,8 +912,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->diag_test = test;
ret = qlcnic_attach(adapter);
- if (ret)
+ if (ret) {
+ netif_device_attach(netdev);
return ret;
+ }
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -985,6 +923,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_enable_int(sds_ring);
}
}
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -1010,23 +949,19 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (!err)
- err = __qlcnic_up(adapter, netdev);
-
- if (err)
- goto done;
+ __qlcnic_up(adapter, netdev);
}
netif_device_attach(netdev);
}
-done:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
}
static int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
- struct net_device *netdev)
+ struct net_device *netdev, u8 pci_using_dac)
{
int err;
struct pci_dev *pdev = adapter->pdev;
@@ -1049,7 +984,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- if (adapter->pci_using_dac) {
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
@@ -1079,6 +1014,22 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
return 0;
}
+static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
+{
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ *pci_using_dac = 1;
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ *pci_using_dac = 0;
+ else {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int __devinit
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1087,6 +1038,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
+ uint8_t pci_using_dac;
err = pci_enable_device(pdev);
if (err)
@@ -1097,6 +1049,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
+ err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
+ if (err)
+ goto err_out_disable_pdev;
+
err = pci_request_regions(pdev, qlcnic_driver_name);
if (err)
goto err_out_disable_pdev;
@@ -1115,6 +1071,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->dev_rst_time = jiffies;
adapter->ahw.pci_func = pci_func_id;
revision_id = pdev->revision;
@@ -1139,21 +1096,23 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ if (qlcnic_setup_idc_param(adapter))
+ goto err_out_iounmap;
err = qlcnic_start_firmware(adapter);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
goto err_out_decr_ref;
-
- /*
- * See if the firmware gave us a virtual-physical port mapping.
- */
- adapter->physical_port = adapter->portnum;
+ }
qlcnic_clear_stats(adapter);
qlcnic_setup_intr(adapter);
- err = qlcnic_setup_netdev(adapter, netdev);
+ err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
goto err_out_disable_msi;
@@ -1304,9 +1263,6 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- adapter->ahw.crb_win = -1;
- adapter->ahw.ocm_win = -1;
-
err = qlcnic_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
@@ -1334,6 +1290,7 @@ err_out_detach:
qlcnic_detach(adapter);
err_out:
qlcnic_clr_all_drv_state(adapter);
+ netif_device_attach(netdev);
return err;
}
#endif
@@ -1570,6 +1527,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 4 fragments per cmd des */
@@ -1586,8 +1548,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
pdev = adapter->pdev;
- if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
goto drop_packet;
+ }
pbuf->skb = skb;
pbuf->frag_count = frag_count;
@@ -1739,6 +1703,7 @@ static void qlcnic_tx_timeout_task(struct work_struct *work)
request_reset:
adapter->need_fw_reset = 1;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ QLCDB(adapter, DRV, "Resetting adapter\n");
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1750,7 +1715,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
- stats->rx_bytes = adapter->stats.rxbytes;
+ stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
stats->tx_bytes = adapter->stats.txbytes;
stats->rx_dropped = adapter->stats.rxdropped;
stats->tx_dropped = adapter->stats.txdropped;
@@ -1944,7 +1909,20 @@ static void qlcnic_poll_controller(struct net_device *netdev)
#endif
static void
-qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
+{
+ u32 val;
+
+ val = adapter->portnum & 0xf;
+ val |= encoding << 7;
+ val |= (jiffies - adapter->dev_rst_time) << 8;
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ adapter->dev_rst_time = jiffies;
+}
+
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
{
u32 val;
@@ -1952,18 +1930,20 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
state != QLCNIC_DEV_NEED_QUISCENT);
if (qlcnic_api_lock(adapter))
- return ;
+ return -EIO;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
- val |= ((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
- val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
+
+ return 0;
}
static int
@@ -1975,7 +1955,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
return -EBUSY;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -1992,14 +1972,14 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
goto err;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
if (!(val & 0x11111111))
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2009,6 +1989,7 @@ err:
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
+/* Grab api lock, before checking state */
static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
@@ -2024,73 +2005,103 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
return 1;
}
+static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+
+ if (val != QLCNIC_DRV_IDC_VER) {
+ dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
+ " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
+ }
+
+ return 0;
+}
+
static int
qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
{
u32 val, prev_state;
- int cnt = 0;
- int portnum = adapter->portnum;
+ u8 dev_init_timeo = adapter->dev_init_timeo;
+ u8 portnum = adapter->portnum;
+ u8 ret;
+
+ if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+ return 1;
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
- if (!(val & ((int)0x1 << (portnum * 4)))) {
- val |= ((u32)0x1 << (portnum * 4));
+ if (!(val & (1 << (portnum * 4)))) {
+ QLC_DEV_SET_REF_CNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
- } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
- goto start_fw;
}
prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
-start_fw:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ qlcnic_idc_debug_info(adapter, 0);
qlcnic_api_unlock(adapter);
return 1;
case QLCNIC_DEV_READY:
+ ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
- return 0;
+ return ret;
case QLCNIC_DEV_NEED_RESET:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << (portnum * 4));
+ QLC_DEV_SET_RST_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLC_DEV_SET_QSCNT_RDY(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
+ dev_err(&adapter->pdev->dev, "Device in failed state.\n");
qlcnic_api_unlock(adapter);
return -1;
+
+ case QLCNIC_DEV_INITIALIZING:
+ case QLCNIC_DEV_QUISCENT:
+ break;
}
qlcnic_api_unlock(adapter);
- msleep(1000);
- while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
- ++cnt < 20)
+
+ do {
msleep(1000);
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (prev_state == QLCNIC_DEV_QUISCENT)
+ continue;
+ } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
- if (cnt >= 20)
+ if (!dev_init_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for device to initialize timeout\n");
return -1;
+ }
if (qlcnic_api_lock(adapter))
return -1;
val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- val &= ~((u32)0x3 << (portnum * 4));
+ QLC_DEV_CLR_RST_QSCNT(val, portnum);
QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
- return 0;
+ return ret;
}
static void
@@ -2098,44 +2109,84 @@ qlcnic_fwinit_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
- int dev_state;
+ u32 dev_state = 0xf;
- if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ if (qlcnic_api_lock(adapter))
goto err_ret;
- if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (dev_state == QLCNIC_DEV_QUISCENT) {
+ qlcnic_api_unlock(adapter);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ return;
+ }
+
+ if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+ dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+ adapter->reset_ack_timeo);
+ goto skip_ack_check;
+ }
+
+ if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (qlcnic_check_drv_state(adapter)) {
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, FW_POLL_DELAY);
+ if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_QUISCENT);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ QLCDB(adapter, DRV, "Quiscing the driver\n");
+ qlcnic_idc_debug_info(adapter, 0);
+
+ qlcnic_api_unlock(adapter);
return;
}
+ if (dev_state == QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ }
+
+ qlcnic_api_unlock(adapter);
+
if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
return;
}
-
goto err_ret;
}
+ qlcnic_api_unlock(adapter);
+
dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
switch (dev_state) {
- case QLCNIC_DEV_READY:
- if (!qlcnic_start_firmware(adapter)) {
- qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
- return;
- }
+ case QLCNIC_DEV_QUISCENT:
+ case QLCNIC_DEV_NEED_QUISCENT:
+ case QLCNIC_DEV_NEED_RESET:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
case QLCNIC_DEV_FAILED:
break;
default:
- qlcnic_schedule_work(adapter,
- qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
- return;
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
}
err_ret:
+ dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+ "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+ netif_device_attach(adapter->netdev);
qlcnic_clr_all_drv_state(adapter);
}
@@ -2163,7 +2214,8 @@ qlcnic_detach_work(struct work_struct *work)
if (adapter->temp == QLCNIC_TEMP_PANIC)
goto err_ret;
- qlcnic_set_drv_state(adapter, adapter->dev_state);
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state))
+ goto err_ret;
adapter->fw_wait_cnt = 0;
@@ -2172,10 +2224,14 @@ qlcnic_detach_work(struct work_struct *work)
return;
err_ret:
+ dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
+ status, adapter->temp);
+ netif_device_attach(netdev);
qlcnic_clr_all_drv_state(adapter);
}
+/*Transit to RESET state from READY state only */
static void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
{
@@ -2186,9 +2242,10 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ if (state == QLCNIC_DEV_READY) {
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
- set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "NEED_RESET state set\n");
+ qlcnic_idc_debug_info(adapter, 0);
}
qlcnic_api_unlock(adapter);
@@ -2233,9 +2290,8 @@ qlcnic_attach_work(struct work_struct *work)
qlcnic_config_indev_addr(netdev, NETDEV_UP);
}
- netif_device_attach(netdev);
-
done:
+ netif_device_attach(netdev);
adapter->fw_fail_cnt = 0;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -2253,10 +2309,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (qlcnic_check_temp(adapter))
goto detach;
- if (adapter->need_fw_reset) {
+ if (adapter->need_fw_reset)
qlcnic_dev_request_reset(adapter);
- goto detach;
- }
state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
@@ -2285,8 +2339,11 @@ detach:
QLCNIC_DEV_NEED_RESET;
if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
- !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
+
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+ QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ }
return 1;
}
@@ -2387,51 +2444,72 @@ static int
qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
{
+ size_t crb_size = 4;
+
if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
return -EIO;
- if ((size != 4) || (offset & 0x3))
- return -EINVAL;
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
- if (offset < QLCNIC_PCI_CRBSPACE)
- return -EINVAL;
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
return 0;
}
static ssize_t
-qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
return size;
}
static ssize_t
-qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u32 data;
+ u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
return size;
}
@@ -2449,7 +2527,8 @@ qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t
-qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2470,7 +2549,8 @@ qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
}
static ssize_t
-qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -2553,24 +2633,12 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
-static int
-qlcnic_destip_supported(struct qlcnic_adapter *adapter)
-{
- if (adapter->ahw.cut_through)
- return 0;
-
- return 1;
-}
-
static void
qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
{
struct in_device *indev;
struct qlcnic_adapter *adapter = netdev_priv(dev);
- if (!qlcnic_destip_supported(adapter))
- return;
-
indev = in_dev_get(dev);
if (!indev)
return;
@@ -2591,7 +2659,6 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
} endfor_ifa(indev);
in_dev_put(indev);
- return;
}
static int qlcnic_netdev_event(struct notifier_block *this,
@@ -2650,7 +2717,7 @@ recheck:
adapter = netdev_priv(dev);
- if (!adapter || !qlcnic_destip_supported(adapter))
+ if (!adapter)
goto done;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 8b742b639ceb..20624ba44a37 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1344,8 +1344,8 @@ struct oal {
};
struct map_list {
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct tx_ring_desc {
@@ -1373,8 +1373,8 @@ struct bq_desc {
} p;
__le64 *addr;
u32 index;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 362664628937..68a1c9b91e74 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1340,7 +1340,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
for (i = 0; i < count; i += 8) {
printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
- "%.08x %.08x %.08x \n", i,
+ "%.08x %.08x %.08x\n", i,
tmp[i + 0],
tmp[i + 1],
tmp[i + 2],
@@ -2058,7 +2058,7 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- printk(KERN_ERR PFX "flags3 = %s %s %s \n",
+ printk(KERN_ERR PFX "flags3 = %s %s %s\n",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 7e09ff4a5755..4892d64f4e05 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -181,8 +181,6 @@ quit:
spin_unlock(&qdev->stats_lock);
QL_DUMP_STAT(qdev);
-
- return;
}
static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fd34f266c0a8..fa4b24c49f42 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr(lbq_desc, mapaddr),
+ dma_unmap_addr(lbq_desc, mapaddr),
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
- pci_unmap_addr_set(lbq_desc, mapaddr, map);
- pci_unmap_len_set(lbq_desc, maplen,
+ dma_unmap_addr_set(lbq_desc, mapaddr, map);
+ dma_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map);
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
sbq_desc->p.skb = NULL;
return;
}
- pci_unmap_addr_set(sbq_desc, mapaddr, map);
- pci_unmap_len_set(sbq_desc, maplen,
+ dma_unmap_addr_set(sbq_desc, mapaddr, map);
+ dma_unmap_len_set(sbq_desc, maplen,
rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev,
"unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen),
PCI_DMA_TODEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(&tx_ring_desc->map[i],
+ dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
- pci_unmap_len(&tx_ring_desc->map[i],
+ dma_unmap_len(&tx_ring_desc->map[i],
maplen), PCI_DMA_TODEVICE);
}
}
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len = cpu_to_le32(len);
tbd->addr = cpu_to_le64(map);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
map_idx++;
/*
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->len =
cpu_to_le32((sizeof(struct tx_buf_desc) *
(frag_cnt - frag_idx)) | TX_DESC_C);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
sizeof(struct oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev,
tbd->addr = cpu_to_le64(map);
tbd->len = cpu_to_le32(frag->size);
- pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
+ dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
+ dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
frag->size);
}
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc, mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
memcpy(skb_put(skb, length),
sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
- pci_unmap_addr
+ dma_unmap_addr
(sbq_desc,
mapaddr),
- pci_unmap_len
+ dma_unmap_len
(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
ql_realign_skb(skb, length);
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc,
+ dma_unmap_addr(sbq_desc,
mapaddr),
- pci_unmap_len(sbq_desc,
+ dma_unmap_len(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
return NULL;
}
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(lbq_desc,
+ dma_unmap_addr(lbq_desc,
mapaddr),
- pci_unmap_len(lbq_desc, maplen),
+ dma_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
int size, i = 0;
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
}
if (sbq_desc->p.skb) {
pci_unmap_single(qdev->pdev,
- pci_unmap_addr(sbq_desc, mapaddr),
- pci_unmap_len(sbq_desc, maplen),
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
int i, status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
@@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (status)
goto exit;
i = 0;
- netdev_for_each_mc_addr(mc_ptr, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0298d8c1dcb6..9a251acf5ab8 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -330,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
- printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name);
+ netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
@@ -400,9 +400,6 @@ static void r6040_init_mac_regs(struct net_device *dev)
* we may got called by r6040_tx_timeout which has left
* some unsent tx buffers */
iowrite16(0x01, ioaddr + MTPR);
-
- /* Check media */
- mii_check_media(&lp->mii_if, 1, 1);
}
static void r6040_tx_timeout(struct net_device *dev)
@@ -410,9 +407,9 @@ static void r6040_tx_timeout(struct net_device *dev)
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
- printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x "
+ netdev_warn(dev, "transmit timed out, int enable %4.4x "
"status %4.4x, PHY status %4.4x\n",
- dev->name, ioread16(ioaddr + MIER),
+ ioread16(ioaddr + MIER),
ioread16(ioaddr + MISR),
r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
@@ -530,8 +527,6 @@ static int r6040_phy_mode_chk(struct net_device *dev)
phy_dat = 0x0000;
}
- mii_check_media(&lp->mii_if, 0, 1);
-
return phy_dat;
};
@@ -813,6 +808,9 @@ static void r6040_timer(unsigned long data)
/* Timer active again */
mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
+
+ /* Check media */
+ mii_check_media(&lp->mii_if, 1, 1);
}
/* Read/set MAC address routines */
@@ -897,7 +895,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc) {
spin_unlock_irqrestore(&lp->lock, flags);
netif_stop_queue(dev);
- printk(KERN_ERR DRV_NAME ": no tx descriptor\n");
+ netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
@@ -924,7 +922,6 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
if (!lp->tx_free_desc)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
@@ -937,7 +934,7 @@ static void r6040_multicast_list(struct net_device *dev)
u16 *adrp;
u16 reg;
unsigned long flags;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
/* MAC Address */
@@ -972,8 +969,8 @@ static void r6040_multicast_list(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -990,9 +987,9 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Multicast Address 1~4 case */
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (i < MCAST_MAX) {
- adrp = (u16 *) dmi->dmi_addr;
+ adrp = (u16 *) ha->addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
@@ -1090,20 +1087,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses"
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
goto err_out;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
- printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
goto err_out;
}
@@ -1112,7 +1109,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
- printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n");
+ dev_err(&pdev->dev, "Failed to allocate etherdev\n");
err = -ENOMEM;
goto err_out;
}
@@ -1122,14 +1119,13 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions\n");
goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
- printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "ioremap failed for device\n");
err = -EIO;
goto err_out_free_res;
}
@@ -1156,7 +1152,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(adrp[0] || adrp[1] || adrp[2])) {
- printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n");
+ netdev_warn(dev, "MAC address not initialized, generating random\n");
random_ether_addr(dev->dev_addr);
}
@@ -1184,7 +1180,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Check the vendor ID on the PHY, if 0xffff assume none attached */
if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
- printk(KERN_ERR DRV_NAME ": Failed to detect an attached PHY\n");
+ dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
err = -ENODEV;
goto err_out_unmap;
}
@@ -1192,7 +1188,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
/* Register net device. After this dev->name assign */
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR DRV_NAME ": Failed to register net device\n");
+ dev_err(&pdev->dev, "Failed to register net device\n");
goto err_out_unmap;
}
return 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dd8106ff35aa..217e709bda3e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -23,6 +23,7 @@
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -509,6 +510,7 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
+ u32 saved_wolopts;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -748,53 +750,61 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
+ /* This is to cancel a scheduled suspend if there's one. */
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
-static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
{
- struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u8 options;
-
- wol->wolopts = 0;
-
-#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
- wol->supported = WAKE_ANY;
-
- spin_lock_irq(&tp->lock);
+ u32 wolopts = 0;
options = RTL_R8(Config1);
if (!(options & PMEnable))
- goto out_unlock;
+ return 0;
options = RTL_R8(Config3);
if (options & LinkUp)
- wol->wolopts |= WAKE_PHY;
+ wolopts |= WAKE_PHY;
if (options & MagicPacket)
- wol->wolopts |= WAKE_MAGIC;
+ wolopts |= WAKE_MAGIC;
options = RTL_R8(Config5);
if (options & UWF)
- wol->wolopts |= WAKE_UCAST;
+ wolopts |= WAKE_UCAST;
if (options & BWF)
- wol->wolopts |= WAKE_BCAST;
+ wolopts |= WAKE_BCAST;
if (options & MWF)
- wol->wolopts |= WAKE_MCAST;
+ wolopts |= WAKE_MCAST;
-out_unlock:
- spin_unlock_irq(&tp->lock);
+ return wolopts;
}
-static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl8169_get_wol(tp);
+
+ spin_unlock_irq(&tp->lock);
+}
+
+static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+{
void __iomem *ioaddr = tp->mmio_addr;
unsigned int i;
static const struct {
@@ -811,23 +821,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{ WAKE_ANY, Config5, LanWake }
};
- spin_lock_irq(&tp->lock);
-
RTL_W8(Cfg9346, Cfg9346_Unlock);
for (i = 0; i < ARRAY_SIZE(cfg); i++) {
u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
- if (wol->wolopts & cfg[i].opt)
+ if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(cfg[i].reg, options);
}
RTL_W8(Cfg9346, Cfg9346_Lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ spin_lock_irq(&tp->lock);
if (wol->wolopts)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
+ __rtl8169_set_wol(tp, wol->wolopts);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
spin_unlock_irq(&tp->lock);
@@ -3192,6 +3208,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_runtime_idle(&pdev->dev);
+
out:
return rc;
@@ -3213,10 +3235,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ pm_runtime_get_sync(&pdev->dev);
+
flush_scheduled_work();
unregister_netdev(dev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/* restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
@@ -3243,6 +3273,7 @@ static int rtl8169_open(struct net_device *dev)
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
+ pm_runtime_get_sync(&pdev->dev);
/*
* Note that we use a magic value here, its wierd I know
@@ -3263,7 +3294,7 @@ static int rtl8169_open(struct net_device *dev)
tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr);
if (!tp->TxDescArray)
- goto out;
+ goto err_pm_runtime_put;
tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr);
@@ -3290,6 +3321,9 @@ static int rtl8169_open(struct net_device *dev)
rtl8169_request_timer(dev);
+ tp->saved_wolopts = 0;
+ pm_runtime_put_noidle(&pdev->dev);
+
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
out:
return retval;
@@ -3299,9 +3333,13 @@ err_release_ring_2:
err_free_rx_1:
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
+ tp->RxDescArray = NULL;
err_free_tx_0:
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
+ tp->TxDescArray = NULL;
+err_pm_runtime_put:
+ pm_runtime_put_noidle(&pdev->dev);
goto out;
}
@@ -4720,6 +4758,8 @@ static int rtl8169_close(struct net_device *dev)
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ pm_runtime_get_sync(&pdev->dev);
+
/* update counters before going down */
rtl8169_update_counters(dev);
@@ -4734,6 +4774,8 @@ static int rtl8169_close(struct net_device *dev)
tp->TxDescArray = NULL;
tp->RxDescArray = NULL;
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -4759,12 +4801,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
@@ -4832,21 +4874,74 @@ static int rtl8169_suspend(struct device *device)
return 0;
}
+static void __rtl8169_resume(struct net_device *dev)
+{
+ netif_device_attach(dev);
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+}
+
static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
- if (!netif_running(dev))
- goto out;
+ if (netif_running(dev))
+ __rtl8169_resume(dev);
- netif_device_attach(dev);
+ return 0;
+}
+
+static int rtl8169_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
+ __rtl8169_set_wol(tp, WAKE_ANY);
+ spin_unlock_irq(&tp->lock);
+
+ rtl8169_net_suspend(dev);
+
+ return 0;
+}
+
+static int rtl8169_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ spin_lock_irq(&tp->lock);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
+ tp->saved_wolopts = 0;
+ spin_unlock_irq(&tp->lock);
+
+ __rtl8169_resume(dev);
- rtl8169_schedule_work(dev, rtl8169_reset_task);
-out:
return 0;
}
+static int rtl8169_runtime_idle(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (!tp->TxDescArray)
+ return 0;
+
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ return -EBUSY;
+}
+
static const struct dev_pm_ops rtl8169_pm_ops = {
.suspend = rtl8169_suspend,
.resume = rtl8169_resume,
@@ -4854,6 +4949,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
.thaw = rtl8169_resume,
.poweroff = rtl8169_suspend,
.restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
};
#define RTL8169_PM_OPS (&rtl8169_pm_ops)
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index f2e335f0d1b7..e26e107f93e0 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1467,7 +1467,6 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&rrpriv->lock, flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 92ae8d3de39b..668327ccd8d0 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2400,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
return NULL;
}
pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
- skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ skb_headlen(skb), PCI_DMA_TODEVICE);
frg_cnt = skb_shinfo(skb)->nr_frags;
if (frg_cnt) {
txds++;
@@ -2943,7 +2943,6 @@ static void s2io_netpoll(struct net_device *dev)
}
}
enable_irq(dev->irq);
- return;
}
#endif
@@ -4202,7 +4201,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
}
- frg_len = skb->len - skb->data_len;
+ frg_len = skb_headlen(skb);
if (offload_type == SKB_GSO_UDP) {
int ufo_size;
@@ -4756,7 +4755,6 @@ reset:
s2io_stop_all_tx_queue(sp);
schedule_work(&sp->rst_timer_task);
sw_stat->soft_reset_cnt++;
- return;
}
/**
@@ -4965,7 +4963,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
static void s2io_set_multicast(struct net_device *dev)
{
int i, j, prev_cnt;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
@@ -5094,12 +5092,12 @@ static void s2io_set_multicast(struct net_device *dev)
/* Create the new Rx filter list and update the same in H/W. */
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(sp->usr_addrs[i].addr, ha->addr,
ETH_ALEN);
mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
- mac_addr |= mclist->dmi_addr[j];
+ mac_addr |= ha->addr[j];
mac_addr <<= 8;
}
mac_addr >>= 8;
@@ -8645,7 +8643,6 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
first->truesize += skb->truesize;
lro->last_frag = skb;
swstats->clubbed_frms_cnt++;
- return;
}
/**
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 45f26344b368..a7ff8ea342b4 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -396,7 +396,6 @@ static void s6gmac_rx_interrupt(struct net_device *dev)
} else {
skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
& S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx(skb);
@@ -853,8 +852,8 @@ static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
{
struct s6gmac *pd = netdev_priv(dev);
unsigned long flags;
+
spin_lock_irqsave(&pd->lock, flags);
- dev->trans_start = jiffies;
writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
0 << S6_GMAC_BURST_PREWR_CFE |
1 << S6_GMAC_BURST_PREWR_PPE |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index abc8eefdd4b6..a9ae505e1baf 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -426,7 +426,6 @@ sb1000_send_command(const int ioaddr[], const char* name,
if (sb1000_debug > 3)
printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
"%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
- return;
}
/* Card Read Status (to be used during frame rx) */
@@ -438,7 +437,6 @@ sb1000_read_status(const int ioaddr[], unsigned char in[])
in[3] = inb(ioaddr[0] + 3);
in[4] = inb(ioaddr[0] + 4);
in[0] = inb(ioaddr[0] + 5);
- return;
}
/* Issue Read Command (to be used during frame rx) */
@@ -450,7 +448,6 @@ sb1000_issue_read_command(const int ioaddr[], const char* name)
sb1000_wait_for_ready_clear(ioaddr, name);
outb(0xa0, ioaddr[0] + 6);
sb1000_send_command(ioaddr, name, Command0);
- return;
}
@@ -733,7 +730,6 @@ sb1000_print_status_buffer(const char* name, unsigned char st[],
printk("\n");
}
}
- return;
}
/*
@@ -926,7 +922,6 @@ sb1000_error_dpc(struct net_device *dev)
sb1000_read_status(ioaddr, st);
if (st[1] & 0x10)
lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
- return;
}
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 04efc0c1bda9..1f3acc3a5dfd 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -48,23 +48,6 @@
#include <asm/io.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-/* This is only here until the firmware is ready. In that case,
- the firmware leaves the ethernet address in the register for us. */
-#ifdef CONFIG_SIBYTE_STANDALONE
-#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
-#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
-#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
-#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
-#endif
-
-
-/* These identify the driver base version and may not be removed. */
-#if 0
-static char version1[] __initdata =
-"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
-#endif
-
-
/* Operational parameters that usually are not changed. */
#define CONFIG_SBMAC_COALESCE
@@ -349,7 +332,6 @@ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
********************************************************************* */
static char sbmac_string[] = "sb1250-mac";
-static char sbmac_pretty[] = "SB1250 MAC";
static char sbmac_mdio_string[] = "sb1250-mac-mdio";
@@ -2086,8 +2068,6 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->sbm_lock, flags);
return NETDEV_TX_OK;
@@ -2112,7 +2092,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
uint64_t reg;
void __iomem *port;
int idx;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct net_device *dev = sc->sbm_dev;
/*
@@ -2161,10 +2141,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
* XXX if the table overflows */
idx = 1; /* skip station address */
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (idx == MAC_ADDR_COUNT)
break;
- reg = sbmac_addr2reg(mclist->dmi_addr);
+ reg = sbmac_addr2reg(ha->addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
@@ -2182,85 +2162,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
}
}
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-/**********************************************************************
- * SBMAC_PARSE_XDIGIT(str)
- *
- * Parse a hex digit, returning its value
- *
- * Input parameters:
- * str - character
- *
- * Return value:
- * hex value, or -1 if invalid
- ********************************************************************* */
-
-static int sbmac_parse_xdigit(char str)
-{
- int digit;
-
- if ((str >= '0') && (str <= '9'))
- digit = str - '0';
- else if ((str >= 'a') && (str <= 'f'))
- digit = str - 'a' + 10;
- else if ((str >= 'A') && (str <= 'F'))
- digit = str - 'A' + 10;
- else
- return -1;
-
- return digit;
-}
-
-/**********************************************************************
- * SBMAC_PARSE_HWADDR(str,hwaddr)
- *
- * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
- * Ethernet address.
- *
- * Input parameters:
- * str - string
- * hwaddr - pointer to hardware address
- *
- * Return value:
- * 0 if ok, else -1
- ********************************************************************* */
-
-static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
-{
- int digit1,digit2;
- int idx = 6;
-
- while (*str && (idx > 0)) {
- digit1 = sbmac_parse_xdigit(*str);
- if (digit1 < 0)
- return -1;
- str++;
- if (!*str)
- return -1;
-
- if ((*str == ':') || (*str == '-')) {
- digit2 = digit1;
- digit1 = 0;
- }
- else {
- digit2 = sbmac_parse_xdigit(*str);
- if (digit2 < 0)
- return -1;
- str++;
- }
-
- *hwaddr++ = (digit1 << 4) | digit2;
- idx--;
-
- if (*str == '-')
- str++;
- if (*str == ':')
- str++;
- }
- return 0;
-}
-#endif
-
static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
{
if (new_mtu > ENET_PACKET_SIZE)
@@ -2585,7 +2486,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
spin_lock_irqsave(&sc->sbm_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
@@ -2662,7 +2563,6 @@ static int sbmac_close(struct net_device *dev)
static int sbmac_poll(struct napi_struct *napi, int budget)
{
struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
- struct net_device *dev = sc->sbm_dev;
int work_done;
work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
@@ -2766,162 +2666,6 @@ static int __exit sbmac_remove(struct platform_device *pldev)
return 0;
}
-
-static struct platform_device **sbmac_pldev;
-static int sbmac_max_units;
-
-#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
-static void __init sbmac_setup_hwaddr(int idx, char *addr)
-{
- void __iomem *sbm_base;
- unsigned long start, end;
- uint8_t eaddr[6];
- uint64_t val;
-
- if (idx >= sbmac_max_units)
- return;
-
- start = A_MAC_CHANNEL_BASE(idx);
- end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- sbm_base = ioremap_nocache(start, end - start + 1);
- if (!sbm_base) {
- printk(KERN_ERR "%s: unable to map device registers\n",
- sbmac_string);
- return;
- }
-
- sbmac_parse_hwaddr(addr, eaddr);
- val = sbmac_addr2reg(eaddr);
- __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
- val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
-
- iounmap(sbm_base);
-}
-#endif
-
-static int __init sbmac_platform_probe_one(int idx)
-{
- struct platform_device *pldev;
- struct {
- struct resource r;
- char name[strlen(sbmac_pretty) + 4];
- } *res;
- int err;
-
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res) {
- printk(KERN_ERR "%s.%d: unable to allocate memory\n",
- sbmac_string, idx);
- err = -ENOMEM;
- goto out_err;
- }
-
- /*
- * This is the base address of the MAC.
- */
- snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
- res->r.name = res->name;
- res->r.flags = IORESOURCE_MEM;
- res->r.start = A_MAC_CHANNEL_BASE(idx);
- res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
-
- pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
- if (IS_ERR(pldev)) {
- printk(KERN_ERR "%s.%d: unable to register platform device\n",
- sbmac_string, idx);
- err = PTR_ERR(pldev);
- goto out_kfree;
- }
-
- if (!pldev->dev.driver) {
- err = 0; /* No hardware at this address. */
- goto out_unregister;
- }
-
- sbmac_pldev[idx] = pldev;
- return 0;
-
-out_unregister:
- platform_device_unregister(pldev);
-
-out_kfree:
- kfree(res);
-
-out_err:
- return err;
-}
-
-static void __init sbmac_platform_probe(void)
-{
- int i;
-
- /* Set the number of available units based on the SOC type. */
- switch (soc_type) {
- case K_SYS_SOC_TYPE_BCM1250:
- case K_SYS_SOC_TYPE_BCM1250_ALT:
- sbmac_max_units = 3;
- break;
- case K_SYS_SOC_TYPE_BCM1120:
- case K_SYS_SOC_TYPE_BCM1125:
- case K_SYS_SOC_TYPE_BCM1125H:
- case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
- sbmac_max_units = 2;
- break;
- case K_SYS_SOC_TYPE_BCM1x55:
- case K_SYS_SOC_TYPE_BCM1x80:
- sbmac_max_units = 4;
- break;
- default:
- return; /* none */
- }
-
- /*
- * For bringup when not using the firmware, we can pre-fill
- * the MAC addresses using the environment variables
- * specified in this file (or maybe from the config file?)
- */
-#ifdef SBMAC_ETH0_HWADDR
- sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
-#endif
-#ifdef SBMAC_ETH1_HWADDR
- sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
-#endif
-#ifdef SBMAC_ETH2_HWADDR
- sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
-#endif
-#ifdef SBMAC_ETH3_HWADDR
- sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
-#endif
-
- sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
- GFP_KERNEL);
- if (!sbmac_pldev) {
- printk(KERN_ERR "%s: unable to allocate memory\n",
- sbmac_string);
- return;
- }
-
- /*
- * Walk through the Ethernet controllers and find
- * those who have their MAC addresses set.
- */
- for (i = 0; i < sbmac_max_units; i++)
- if (sbmac_platform_probe_one(i))
- break;
-}
-
-
-static void __exit sbmac_platform_cleanup(void)
-{
- int i;
-
- for (i = 0; i < sbmac_max_units; i++)
- platform_device_unregister(sbmac_pldev[i]);
- kfree(sbmac_pldev);
-}
-
-
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
.remove = __exit_p(sbmac_remove),
@@ -2932,20 +2676,11 @@ static struct platform_driver sbmac_driver = {
static int __init sbmac_init_module(void)
{
- int err;
-
- err = platform_driver_register(&sbmac_driver);
- if (err)
- return err;
-
- sbmac_platform_probe();
-
- return err;
+ return platform_driver_register(&sbmac_driver);
}
static void __exit sbmac_cleanup_module(void)
{
- sbmac_platform_cleanup();
platform_driver_unregister(&sbmac_driver);
}
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index d87c4787fffa..8c4067af32b0 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev)
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 crc;
unsigned bit = 0;
- crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
+ crc = ~ether_crc(ETH_ALEN, ha->addr);
crc >>= 24;
if (crc & 0x01) bit |= 0x02;
@@ -987,8 +987,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
mmiowb();
- dev->trans_start = jiffies;
-
if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
netif_stop_queue(dev);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 374832cca11f..d2fce98f557f 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -390,7 +390,7 @@ static void seeq8005_timeout(struct net_device *dev)
tx_done(dev) ? "IRQ conflict" : "network cable problem");
/* Try to restart the adaptor. */
seeq8005_init(dev, 1);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -411,7 +411,6 @@ static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
netif_stop_queue(dev);
hardware_send_packet(dev, buf, length);
- dev->trans_start = jiffies;
dev->stats.tx_bytes += length;
dev_kfree_skb (skb);
/* You might need to clean up and record Tx statistics here. */
@@ -579,7 +578,6 @@ static void seeq8005_rx(struct net_device *dev)
/* If any worth-while packets have been received, netif_rx()
has done a mark_bh(NET_BH) for us and will work on them
when we get to the bottom-half routine. */
- return;
}
/* The inverse routine to net_open(). */
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 649a264d6a81..156460527231 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -225,17 +225,17 @@ static void efx_fini_channels(struct efx_nic *efx);
* never be concurrently called more than once on the same channel,
* though different channels may be being processed concurrently.
*/
-static int efx_process_channel(struct efx_channel *channel, int rx_quota)
+static int efx_process_channel(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
- int rx_packets;
+ int spent;
if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
!channel->enabled))
return 0;
- rx_packets = efx_nic_process_eventq(channel, rx_quota);
- if (rx_packets == 0)
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent == 0)
return 0;
/* Deliver last RX packet. */
@@ -249,7 +249,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
- return rx_packets;
+ return spent;
}
/* Mark channel as finished processing
@@ -278,17 +278,17 @@ static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
- int rx_packets;
+ int spent;
EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
channel->channel, raw_smp_processor_id());
- rx_packets = efx_process_channel(channel, budget);
+ spent = efx_process_channel(channel, budget);
- if (rx_packets < budget) {
+ if (spent < budget) {
struct efx_nic *efx = channel->efx;
- if (channel->used_flags & EFX_USED_BY_RX &&
+ if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
if (unlikely(channel->irq_mod_score <
@@ -318,7 +318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
efx_channel_processed(channel);
}
- return rx_packets;
+ return spent;
}
/* Process the eventq of the specified channel immediately on this CPU
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- BUG_ON(!channel->used_flags);
BUG_ON(!channel->enabled);
/* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
number = channel->channel;
- if (efx->n_channels > efx->n_rx_queues) {
- if (channel->channel < efx->n_rx_queues) {
+ if (efx->n_channels > efx->n_rx_channels) {
+ if (channel->channel < efx->n_rx_channels) {
type = "-rx";
} else {
type = "-tx";
- number -= efx->n_rx_queues;
+ number -= efx->n_rx_channels;
}
}
snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
-
- channel->used_flags = 0;
}
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
pci_disable_device(efx->pci_dev);
}
-/* Get number of RX queues wanted. Return number of online CPU
- * packages in the expectation that an IRQ balancer will spread
- * interrupts across them. */
-static int efx_wanted_rx_queues(void)
+/* Get number of channels wanted. Each channel will have its own IRQ,
+ * 1 RX queue and/or 2 TX queues. */
+static int efx_wanted_channels(void)
{
cpumask_var_t core_mask;
int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
struct msix_entry xentries[EFX_MAX_CHANNELS];
- int wanted_ints;
- int rx_queues;
+ int n_channels;
- /* We want one RX queue and interrupt per CPU package
- * (or as specified by the rss_cpus module parameter).
- * We will need one channel per interrupt.
- */
- rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
- wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
- wanted_ints = min(wanted_ints, max_channels);
+ n_channels = efx_wanted_channels();
+ if (separate_tx_channels)
+ n_channels *= 2;
+ n_channels = min(n_channels, max_channels);
- for (i = 0; i < wanted_ints; i++)
+ for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
+ rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
- " available (%d < %d).\n", rc, wanted_ints);
+ " available (%d < %d).\n", rc, n_channels);
EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= wanted_ints);
- wanted_ints = rc;
+ EFX_BUG_ON_PARANOID(rc >= n_channels);
+ n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
- wanted_ints);
+ n_channels);
}
if (rc == 0) {
- efx->n_rx_queues = min(rx_queues, wanted_ints);
- efx->n_channels = wanted_ints;
- for (i = 0; i < wanted_ints; i++)
+ efx->n_channels = n_channels;
+ if (separate_tx_channels) {
+ efx->n_tx_channels =
+ max(efx->n_channels / 2, 1U);
+ efx->n_rx_channels =
+ max(efx->n_channels -
+ efx->n_tx_channels, 1U);
+ } else {
+ efx->n_tx_channels = efx->n_channels;
+ efx->n_rx_channels = efx->n_channels;
+ }
+ for (i = 0; i < n_channels; i++)
efx->channel[i].irq = xentries[i].vector;
} else {
/* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Try single interrupt MSI */
if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
- efx->n_rx_queues = 1;
efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_rx_queues = 1;
efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx)
{
+ struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
+ unsigned tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- efx_for_each_tx_queue(tx_queue, efx) {
- if (separate_tx_channels)
- tx_queue->channel = &efx->channel[efx->n_channels-1];
- else
- tx_queue->channel = &efx->channel[0];
- tx_queue->channel->used_flags |= EFX_USED_BY_TX;
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
+ channel->tx_queue = &efx->tx_queue[
+ (channel->channel - tx_channel_offset) *
+ EFX_TXQ_TYPES];
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->channel = channel;
+ }
}
- efx_for_each_rx_queue(rx_queue, efx) {
+ efx_for_each_rx_queue(rx_queue, efx)
rx_queue->channel = &efx->channel[rx_queue->queue];
- rx_queue->channel->used_flags |= EFX_USED_BY_RX;
- }
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
return rc;
- /* Determine the number of channels and RX queues by trying to hook
+ /* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
efx_set_channels(efx);
+ efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
/* Initialise the interrupt moderation settings */
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
/* Mark the port as enabled so port reconfigurations can start, then
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
- efx_wake_queue(efx);
- efx_for_each_channel(channel, efx)
+ efx_for_each_channel(channel, efx) {
+ if (efx_dev_registered(efx))
+ efx_wake_queue(channel);
efx_start_channel(channel);
+ }
efx_nic_enable_interrupts(efx);
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
- efx_stop_queue(efx);
+ struct efx_channel *channel;
+ efx_for_each_channel(channel, efx)
+ efx_stop_queue(channel);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
- " resetting channels\n",
- atomic_read(&efx->netif_stop_count), efx->port_enabled);
+ EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
@@ -1603,7 +1612,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
@@ -1615,8 +1624,8 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(mc_list, net_dev) {
- crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
}
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->net_dev = net_dev;
efx->rx_checksum_enabled = true;
- spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
- atomic_set(&efx->netif_stop_count, 1);
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
channel = &efx->channel[i];
channel->efx = efx;
channel->channel = i;
channel->work_pending = false;
+ spin_lock_init(&channel->tx_stop_lock);
+ atomic_set(&channel->tx_stop_count, 1);
}
- for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
+ for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
tx_queue = &efx->tx_queue[i];
tx_queue->efx = efx;
tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev(sizeof(*efx));
+ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a615cb3..ffd708c5304a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern void efx_stop_queue(struct efx_nic *efx);
-extern void efx_wake_queue(struct efx_nic *efx);
+extern void efx_stop_queue(struct efx_channel *channel);
+extern void efx_wake_queue(struct efx_channel *channel);
#define EFX_TXQ_SIZE 1024
#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d9f9c02a928e..22026bfbc4c1 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -304,7 +304,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
{
struct efx_tx_queue *tx_queue;
- efx_for_each_tx_queue(tx_queue, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue),
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
efx_for_each_tx_queue(tx_queue, efx) {
channel = tx_queue->channel;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
- if (channel->used_flags != EFX_USED_BY_RX_TX)
+ if (channel->channel < efx->n_rx_channels)
coalesce->tx_coalesce_usecs_irq =
channel->irq_moderation;
else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_tx_queue(tx_queue, efx) {
- if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
+ if ((tx_queue->channel->channel < efx->n_rx_channels) &&
tx_usecs) {
EFX_ERR(efx, "Channel is shared. "
"Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 08278e7302b3..655b697b45b2 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -175,16 +175,19 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- /* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
-
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
*/
BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
+
+ /* Check to see if we have a serious error condition */
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
+
EFX_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
@@ -504,6 +507,9 @@ static void falcon_reset_macs(struct efx_nic *efx)
/* Ensure the correct MAC is selected before statistics
* are re-enabled by the caller */
efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ /* This can run even when the GMAC is selected */
+ falcon_setup_xaui(efx);
}
void falcon_drain_tx_fifo(struct efx_nic *efx)
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 8ccab2c67a20..c84a2ce2ccbb 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -26,7 +26,7 @@
*************************************************************************/
/* Configure the XAUI driver that is an output from Falcon */
-static void falcon_setup_xaui(struct efx_nic *efx)
+void falcon_setup_xaui(struct efx_nic *efx)
{
efx_oword_t sdctl, txdrv;
@@ -85,14 +85,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
+static void falcon_ack_status_intr(struct efx_nic *efx)
{
efx_oword_t reg;
if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
return;
- /* We expect xgmii faults if the wireside link is up */
+ /* We expect xgmii faults if the wireside link is down */
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
return;
@@ -101,14 +101,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
if (efx->xmac_poll_required)
return;
- /* Flush the ISR */
- if (enable)
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MSK_RMTFLT, !enable,
- FRF_AB_XM_MSK_LCLFLT, !enable);
- efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
}
static bool falcon_xgxs_link_ok(struct efx_nic *efx)
@@ -283,15 +276,13 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
- falcon_mask_status_intr(efx, false);
-
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
falcon_reconfigure_mac_wrapper(efx);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
return 0;
}
@@ -362,9 +353,8 @@ void falcon_poll_xmac(struct efx_nic *efx)
!efx->xmac_poll_required)
return;
- falcon_mask_status_intr(efx, false);
efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_mask_status_intr(efx, true);
+ falcon_ack_status_intr(efx);
}
struct efx_mac_operations falcon_xmac_operations = {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index c48669c77414..93cc3c1b9450 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -613,7 +613,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
}
if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -647,8 +647,10 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN)
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -676,7 +678,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -738,8 +740,10 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
return 0;
@@ -765,8 +769,10 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
outbuf, sizeof(outbuf), &outlen);
if (rc)
goto fail;
- if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN)
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
goto fail;
+ }
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
@@ -926,20 +932,26 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc)
- return rc;
+ goto fail1;
type = 0;
while (nvram_types != 0) {
if (nvram_types & 1) {
rc = efx_mcdi_nvram_test(efx, type);
if (rc)
- return rc;
+ goto fail2;
}
type++;
nvram_types >>= 1;
}
return 0;
+
+fail2:
+ EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
+fail1:
+ EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
}
static int efx_mcdi_read_assertion(struct efx_nic *efx)
@@ -968,7 +980,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
if (rc)
return rc;
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
- return -EINVAL;
+ return -EIO;
/* Print out any recorded assertion state */
flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
@@ -1086,7 +1098,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
goto fail;
if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -1121,7 +1133,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
goto fail;
if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 06d24a1e412a..39182631ac92 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -80,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
int rc;
efx_dword_t *cmd_ptr;
- int period = 1000;
+ int period = enable ? 1000 : 0;
u32 addr_hi;
u32 addr_lo;
@@ -92,21 +92,14 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- if (enable)
- EFX_POPULATE_DWORD_6(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 1,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
- else
- EFX_POPULATE_DWORD_5(*cmd_ptr,
- MC_CMD_MAC_STATS_CMD_DMA, 0,
- MC_CMD_MAC_STATS_CMD_CLEAR, clear,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
- MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
+ EFX_POPULATE_DWORD_7(*cmd_ptr,
+ MC_CMD_MAC_STATS_CMD_DMA, !!enable,
+ MC_CMD_MAC_STATS_CMD_CLEAR, clear,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
+ MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
+ MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index bd59302695b3..90359e644006 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -863,7 +863,7 @@
* bist output. The driver should only consume the BIST output
* after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * If a driver can't succesfully parse the BIST output, it should
+ * If a driver can't successfully parse the BIST output, it should
* still respect the pass/Fail in OUT.RESULT
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -872,7 +872,7 @@
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
-#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
@@ -882,15 +882,14 @@
/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* SFT9001-specific: */
-/* (offset 4 unused?) */
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
-#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
@@ -1054,9 +1053,13 @@
/* MC_CMD_PHY_STATS:
* Get generic PHY statistics
*
- * This call returns the statistics for a generic PHY, by direct DMA
- * into host memory, in a sparse array (indexed by the enumerate).
- * Each value is represented by a 32bit number.
+ * This call returns the statistics for a generic PHY in a sparse
+ * array (indexed by the enumerate). Each value is represented by
+ * a 32bit number.
+ *
+ * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
+ * may be read directly out of shared memory. If DMA_ADDR != 0, then
+ * the statistics are dmad to that (page-aligned location)
*
* Locks required: None
* Returns: 0, ETIME
@@ -1066,7 +1069,8 @@
#define MC_CMD_PHY_STATS_IN_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
-#define MC_CMD_PHY_STATS_OUT_LEN 0
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
/* Unified MAC statistics enumeration */
#define MC_CMD_MAC_GENERATION_START 0
@@ -1158,11 +1162,13 @@
#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
-/* Fields only relevent when PERIODIC_CHANGE is set */
+/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
@@ -1729,6 +1735,39 @@
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
+ * Change bits of network port state for test purposes in ways that would never be
+ * useful in normal operation and so need a special command to change. */
+#define MC_CMD_TEST_HACK 0x2f
+#define MC_CMD_TEST_HACK_IN_LEN 8
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
+#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
+#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
+#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
+#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
+#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
+#define MC_CMD_TEST_HACK_OUT_LEN 0
+
+/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
+ * is a warranty-voiding operation.
+ *
+ * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
+ * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
+ * of these limits are meaningful and what their interpretation is is sensor-specific.
+ *
+ * OUT: nothing
+ *
+ * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
+ * out of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
* used for post-3.0 extensions. If you run out of space, look for gaps or
* commands that are unused in the existing range. */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 2f2354696663..6032c0e1f1f8 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -17,6 +17,8 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "mdio_10g.h"
+#include "nic.h"
+#include "selftest.h"
struct efx_mcdi_phy_cfg {
u32 flags;
@@ -48,7 +50,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
goto fail;
if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -111,7 +113,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
goto fail;
if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
- rc = -EMSGSIZE;
+ rc = -EIO;
goto fail;
}
@@ -587,13 +589,153 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
return rc;
if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
- return -EMSGSIZE;
+ return -EIO;
if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
return -EINVAL;
return 0;
}
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ u8 *buf, *ptr;
+ int rc;
+
+ buf = kzalloc(0x100, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
+ NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ buf, 0x100, &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ kfree(buf);
+
+ return rc;
+}
+
+static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned flags)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
+{
+ struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
@@ -604,6 +746,6 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.test_alive = efx_mcdi_phy_test_alive,
- .run_tests = NULL,
- .test_name = NULL,
+ .run_tests = efx_mcdi_phy_run_tests,
+ .test_name = efx_mcdi_phy_test_name,
};
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cb018e272097..2e6fd89f2a72 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
#define EFX_MAX_CHANNELS 32
#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
-#define EFX_TX_QUEUE_NO_CSUM 1
-#define EFX_TX_QUEUE_COUNT 2
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
+#define EFX_TXQ_TYPE_OFFLOAD 1
+#define EFX_TXQ_TYPES 2
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
/**
* struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
- int queue;
+ unsigned queue;
struct efx_channel *channel;
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
};
-/* Flags for channel->used_flags */
-#define EFX_USED_BY_RX 1
-#define EFX_USED_BY_TX 2
-#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @name: Name for channel and IRQ
- * @used_flags: Channel is used by net driver
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
+ * @tx_stop_count: Core TX queue stop count
+ * @tx_stop_lock: Core TX queue stop lock
*/
struct efx_channel {
struct efx_nic *efx;
int channel;
char name[IFNAMSIZ + 6];
- int used_flags;
bool enabled;
int irq;
unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
struct efx_rx_buffer *rx_pkt;
bool rx_pkt_csummed;
+ struct efx_tx_queue *tx_queue;
+ atomic_t tx_stop_count;
+ spinlock_t tx_stop_lock;
};
enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
* @rx_queue: RX DMA queues
* @channel: Channels
* @next_buffer_table: First available buffer table id
- * @n_rx_queues: Number of RX queues
* @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @int_error_count: Number of internal errors seen recently
@@ -672,6 +676,8 @@ union efx_multicast_hash {
* This register is written with the SMP processor ID whenever an
* interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @fatal_irq_level: IRQ level (bit number) used for serious errors
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
* @spi_eeprom: SPI EEPROM device
@@ -691,8 +697,6 @@ union efx_multicast_hash {
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
- * @netif_stop_count: Port stop count
- * @netif_stop_lock: Port stop lock
* @mac_stats: MAC statistics. These include all statistics the MACs
* can provide. Generic code converts these into a standard
* &struct net_device_stats.
@@ -740,13 +744,14 @@ struct efx_nic {
enum nic_state state;
enum reset_type reset_pending;
- struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
+ struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
struct efx_channel channel[EFX_MAX_CHANNELS];
unsigned next_buffer_table;
- int n_rx_queues;
- int n_channels;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
@@ -755,7 +760,8 @@ struct efx_nic {
struct efx_buffer irq_status;
volatile signed int last_irq_cpu;
- unsigned long irq_zero_count;
+ unsigned irq_zero_count;
+ unsigned fatal_irq_level;
struct efx_spi_device *spi_flash;
struct efx_spi_device *spi_eeprom;
@@ -777,9 +783,6 @@ struct efx_nic {
struct net_device *net_dev;
bool rx_checksum_enabled;
- atomic_t netif_stop_count;
- spinlock_t netif_stop_lock;
-
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
@@ -924,40 +927,35 @@ struct efx_nic_type {
/* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx) \
- for (_channel = &_efx->channel[0]; \
- _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
- _channel++) \
- if (!_channel->used_flags) \
- continue; \
- else
+ for (_channel = &((_efx)->channel[0]); \
+ _channel < &((_efx)->channel[(efx)->n_channels]); \
+ _channel++)
/* Iterate over all used TX queues */
#define efx_for_each_tx_queue(_tx_queue, _efx) \
- for (_tx_queue = &_efx->tx_queue[0]; \
- _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
+ for (_tx_queue = &((_efx)->tx_queue[0]); \
+ _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
+ (_efx)->n_tx_channels]); \
_tx_queue++)
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = &_channel->efx->tx_queue[0]; \
- _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
- _tx_queue++) \
- if (_tx_queue->channel != _channel) \
- continue; \
- else
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ _tx_queue++)
/* Iterate over all used RX queues */
#define efx_for_each_rx_queue(_rx_queue, _efx) \
- for (_rx_queue = &_efx->rx_queue[0]; \
- _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
+ for (_rx_queue = &((_efx)->rx_queue[0]); \
+ _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
_rx_queue++)
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \
+ for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
_rx_queue; \
_rx_queue = NULL) \
- if (_rx_queue->channel != _channel) \
+ if (_rx_queue->channel != (_channel)) \
continue; \
else
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index b06f8e348307..5d3aaec58556 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_oword_t reg;
/* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
clear_bit_le(tx_queue->queue, (void *)&reg);
else
set_bit_le(tx_queue->queue, (void *)&reg);
@@ -654,22 +654,23 @@ void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
-static void
+static int
efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
- channel->irq_mod_score +=
- (tx_ev_desc_ptr - tx_queue->read_count) &
- EFX_TXQ_MASK;
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ EFX_TXQ_MASK);
+ channel->irq_mod_score += tx_packets;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -689,6 +690,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
+
+ return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -947,16 +950,17 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
}
}
-int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
+int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
- int rx_packets = 0;
+ int tx_packets = 0;
+ int spent = 0;
read_ptr = channel->eventq_read_ptr;
- do {
+ for (;;) {
p_event = efx_event(channel, read_ptr);
event = *p_event;
@@ -970,15 +974,23 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
+ /* Increment read pointer */
+ read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
+
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV:
efx_handle_rx_event(channel, &event);
- ++rx_packets;
+ if (++spent == budget)
+ goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
- efx_handle_tx_event(channel, &event);
+ tx_packets += efx_handle_tx_event(channel, &event);
+ if (tx_packets >= EFX_TXQ_SIZE) {
+ spent = budget;
+ goto out;
+ }
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1001,14 +1013,11 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
" (data " EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
+ }
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
-
- } while (rx_packets < rx_quota);
-
+out:
channel->eventq_read_ptr = read_ptr;
- return rx_packets;
+ return spent;
}
@@ -1123,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
ev_queue = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_SUBDATA);
- if (ev_queue < EFX_TX_QUEUE_COUNT) {
+ if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx->tx_queue + ev_queue;
tx_queue->flushed = FLUSH_DONE;
}
@@ -1133,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
ev_failed = EFX_QWORD_FIELD(
*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (ev_queue < efx->n_rx_queues) {
+ if (ev_queue < efx->n_rx_channels) {
rx_queue = efx->rx_queue + ev_queue;
rx_queue->flushed =
ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1229,15 +1238,9 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
bool enabled, bool force)
{
efx_oword_t int_en_reg_ker;
- unsigned int level = 0;
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Set the level always even if we're generating a test
- * interrupt, because our legacy interrupt handler is safe */
- level = 0x1f;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, level,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1291,11 +1294,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
- if (error == 0)
- goto out;
/* If this is a memory parity error dump which blocks are offending */
- mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
@@ -1324,7 +1326,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1346,9 +1348,11 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (queues & (1U << efx->fatal_irq_level)) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
@@ -1362,33 +1366,28 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
}
result = IRQ_HANDLED;
- } else if (EFX_WORKAROUND_15783(efx) &&
- efx->irq_zero_count++ == 0) {
+ } else if (EFX_WORKAROUND_15783(efx)) {
efx_qword_t *event;
- /* Ensure we rearm all event queues */
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event))
efx_schedule_channel(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
}
-
- result = IRQ_HANDLED;
}
if (result == IRQ_HANDLED) {
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
- } else if (EFX_WORKAROUND_15783(efx)) {
- /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
- * because this might be a shared interrupt, but we do need to
- * check the channel every time and preemptively rearm it if
- * it's idle. */
- efx_for_each_channel(channel, efx) {
- if (!channel->work_pending)
- efx_nic_eventq_read_ack(channel);
- }
}
return result;
@@ -1413,9 +1412,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ if (channel->channel == efx->fatal_irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_nic_fatal_interrupt(efx);
+ }
/* Schedule processing of the channel */
efx_schedule_channel(channel);
@@ -1440,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
offset += 0x10) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- i % efx->n_rx_queues);
+ i % efx->n_rx_channels);
efx_writed(efx, &dword, offset);
i++;
}
@@ -1553,6 +1554,13 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->fatal_irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->fatal_irq_level = 0;
+
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
@@ -1563,6 +1571,8 @@ void efx_nic_init_common(struct efx_nic *efx)
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 3166bafdfbef..bbc2c0c2f843 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -135,12 +135,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
* @fw_build: Firmware build number
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
+ * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
*/
struct siena_nic_data {
u64 fw_version;
u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
+ u8 ipv6_rss_key[40];
};
extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -203,6 +205,7 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
+extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0106b1d9aae2..371e86cc090f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -616,10 +616,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out;
}
- /* Test every TX queue */
- efx_for_each_tx_queue(tx_queue, efx) {
- state->offload_csum = (tx_queue->queue ==
- EFX_TX_QUEUE_OFFLOAD_CSUM);
+ /* Test both types of TX queue */
+ efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
+ state->offload_csum = (tx_queue->queue &
+ EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef72b99d..aed495a4dad7 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
*/
struct efx_loopback_self_tests {
- int tx_sent[EFX_TX_QUEUE_COUNT];
- int tx_done[EFX_TX_QUEUE_COUNT];
+ int tx_sent[EFX_TXQ_TYPES];
+ int tx_done[EFX_TXQ_TYPES];
int rx_good;
int rx_bad;
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index e0c46f59d1f8..727b4228e081 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
@@ -274,6 +275,9 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ get_random_bytes(&nic_data->ipv6_rss_key,
+ sizeof(nic_data->ipv6_rss_key));
+
return 0;
fail5:
@@ -293,6 +297,7 @@ fail1:
*/
static int siena_init_nic(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t temp;
int rc;
@@ -319,6 +324,20 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
/* No MCDI operation has been defined to set thresholds */
EFX_ERR(efx, "ignoring RX flow control thresholds\n");
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f73..6bb12a87ef2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
*/
#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
-/* We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_stop_queue(struct efx_nic *efx)
+/* We need to be able to nest calls to netif_tx_stop_queue(), partly
+ * because of the 2 hardware queues associated with each core queue,
+ * but also so that we can inhibit TX for reasons other than a full
+ * hardware queue. */
+void efx_stop_queue(struct efx_channel *channel)
{
- spin_lock_bh(&efx->netif_stop_lock);
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
+ spin_lock_bh(&channel->tx_stop_lock);
EFX_TRACE(efx, "stop TX queue\n");
- atomic_inc(&efx->netif_stop_count);
- netif_stop_queue(efx->net_dev);
+ atomic_inc(&channel->tx_stop_count);
+ netif_tx_stop_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
- spin_unlock_bh(&efx->netif_stop_lock);
+ spin_unlock_bh(&channel->tx_stop_lock);
}
-/* Wake netif's TX queue
- * We want to be able to nest calls to netif_stop_queue(), since each
- * channel can have an individual stop on the queue.
- */
-void efx_wake_queue(struct efx_nic *efx)
+/* Decrement core TX queue stop count and wake it if the count is 0 */
+void efx_wake_queue(struct efx_channel *channel)
{
+ struct efx_nic *efx = channel->efx;
+
+ if (!channel->tx_queue)
+ return;
+
local_bh_disable();
- if (atomic_dec_and_lock(&efx->netif_stop_count,
- &efx->netif_stop_lock)) {
+ if (atomic_dec_and_lock(&channel->tx_stop_count,
+ &channel->tx_stop_lock)) {
EFX_TRACE(efx, "waking TX queue\n");
- netif_wake_queue(efx->net_dev);
- spin_unlock(&efx->netif_stop_lock);
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(
+ efx->net_dev,
+ channel->tx_queue->queue / EFX_TXQ_TYPES));
+ spin_unlock(&channel->tx_stop_lock);
}
local_bh_enable();
}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
rc = NETDEV_TX_BUSY;
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
+ tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
- else
- tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
+ tx_queue += EFX_TXQ_TYPE_OFFLOAD;
return efx_enqueue_skb(tx_queue, skb);
}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
netif_tx_lock(efx->net_dev);
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(efx);
+ efx_wake_queue(tx_queue->channel);
}
netif_tx_unlock(efx->net_dev);
}
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Release queue's stop on port, if any */
if (tx_queue->stopped) {
tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->efx);
+ efx_wake_queue(tx_queue->channel);
}
}
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Stop the queue if it wasn't stopped before. */
if (tx_queue->stopped == 1)
- efx_stop_queue(efx);
+ efx_stop_queue(tx_queue->channel);
unwind:
/* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index acd9c734e483..518f7fc91473 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -37,7 +37,7 @@
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA
+#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index c8fc896fc460..cc4bd8c65f8b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -574,7 +574,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
if (err)
return err;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
@@ -638,8 +638,6 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
kick_tx(dev, sp, hregs);
- dev->trans_start = jiffies;
-
if (!TX_BUFFS_AVAIL(sp))
netif_stop_queue(dev);
spin_unlock_irqrestore(&sp->tx_lock, flags);
@@ -652,7 +650,7 @@ static void timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 6242b85d5d15..501a55ffce57 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1148,8 +1148,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
- ndev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1296,6 +1294,9 @@ static int sh_mdio_release(struct net_device *ndev)
/* remove mdio bus info from net_device */
dev_set_drvdata(&ndev->dev, NULL);
+ /* free interrupts memory */
+ kfree(bus->irq);
+
/* free bitbang info */
free_mdio_bitbang(bus);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b30ce752bbf3..a5d6a6bd0c1a 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -849,13 +849,13 @@ static void sis190_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cc0c731c4f09..bbbded76ff14 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -858,7 +858,6 @@ static void mdio_reset(long mdio_addr)
outl(MDDIR | MDIO | MDC, mdio_addr);
mdio_delay();
}
- return;
}
/**
@@ -953,8 +952,6 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
mdio_delay();
}
outl(0x00, mdio_addr);
-
- return;
}
@@ -1264,7 +1261,6 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
(reg14h | 0x2000) & 0xBFFF);
}
- return;
}
/**
@@ -1499,7 +1495,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
}
if(netif_msg_link(sis_priv))
- printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
+ printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
net_dev->name,
*speed == HW_SPEED_100_MBPS ?
"100mbps" : "10mbps",
@@ -1523,7 +1519,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
int i;
if(netif_msg_tx_err(sis_priv))
- printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n",
+ printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
/* Disable interrupts by clearing the interrupt mask. */
@@ -1553,14 +1549,13 @@ static void sis900_tx_timeout(struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
+ net_dev->trans_start = jiffies; /* prevent tx timeout */
/* load Transmit Descriptor Register */
outl(sis_priv->tx_ring_dma, ioaddr + txdp);
/* Enable all known interrupts by setting the interrupt mask. */
outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
- return;
}
/**
@@ -1623,8 +1618,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies;
-
if (netif_msg_tx_queued(sis_priv))
printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
"to slot %d.\n",
@@ -2298,12 +2291,14 @@ static void set_rx_mode(struct net_device *net_dev)
/* Accept Broadcast packet, destination address matchs our
* MAC address, use Receive Filter to reject unwanted MCAST
* packets */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
rx_mode = RFAAB;
- netdev_for_each_mc_addr(mclist, net_dev) {
- unsigned int bit_nr =
- sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
+ netdev_for_each_mc_addr(ha, net_dev) {
+ unsigned int bit_nr;
+
+ bit_nr = sis900_mcast_bitnr(ha->addr,
+ sis_priv->chipset_rev);
mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
}
}
@@ -2330,8 +2325,6 @@ static void set_rx_mode(struct net_device *net_dev)
/* restore cr */
outl(cr_saved, ioaddr + cr);
}
-
- return;
}
/**
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 6028bbb3b28a..9d8d1ac48176 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -1352,7 +1352,7 @@ void rtm_set_timer(struct s_smc *smc)
/*
* MIB timer and hardware timer have the same resolution of 80nS
*/
- DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n",
+ DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
(int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
}
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index e6b33ee05ede..ba45bc794d77 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -1277,7 +1277,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
mib = phy->mib ;
- DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ;
+ DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
bit++ ;
switch(bit) {
@@ -1580,7 +1580,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
- DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ;
+ DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
}
/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index d9016b75abc2..31b2dabf094c 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -844,7 +844,6 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
spin_lock_irqsave(&bp->DriverLock, Flags);
skfp_ctl_set_multicast_list_wo_lock(dev);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
- return;
} // skfp_ctl_set_multicast_list
@@ -852,7 +851,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
@@ -876,13 +875,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* use exact filtering */
// point to first multicast addr
- netdev_for_each_mc_addr(dmi, dev) {
- mac_add_multicast(smc,
- (struct fddi_addr *)dmi->dmi_addr,
- 1);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_add_multicast(smc,
+ (struct fddi_addr *)ha->addr,
+ 1);
pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
- dmi->dmi_addr);
+ ha->addr);
}
} else { // more MC addresses than HW supports
@@ -898,7 +897,6 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
/* Update adapter filters */
mac_update_multicast(smc);
}
- return;
} // skfp_ctl_set_multicast_list_wo_lock
@@ -1076,7 +1074,6 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
if (bp->QueueSkb == 0) {
netif_stop_queue(dev);
}
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} // skfp_send_pkt
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 83d16fecfac4..6f35bb77595f 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -574,7 +574,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
if (smt_check_para(smc,sm,plist_nif)) {
DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ;
break ;
- } ;
+ }
switch (sm->smt_type) {
case SMT_ANNOUNCE :
case SMT_REQUEST :
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 6caf713b744c..40882b3faba6 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ;
+ DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
DB_SMT("SRF: state SR%d Threshold %d\n",
smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
#ifdef DEBUG
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 50eb70609f20..40e5c46e7571 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -984,8 +984,8 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
wmb();
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, bufsize);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, bufsize);
}
/* Resume receiving using existing skb,
@@ -1018,8 +1018,8 @@ static void skge_rx_clean(struct skge_port *skge)
rd->control = 0;
if (e->skb) {
pci_unmap_single(hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(e->skb);
e->skb = NULL;
@@ -2756,8 +2756,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, len);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, len);
td->dma_lo = map;
td->dma_hi = map >> 32;
@@ -2799,8 +2799,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
- pci_unmap_addr_set(e, mapaddr, map);
- pci_unmap_len_set(e, maplen, frag->size);
+ dma_unmap_addr_set(e, mapaddr, map);
+ dma_unmap_len_set(e, maplen, frag->size);
tf->control = BMU_OWN | BMU_SW | control | frag->size;
}
@@ -2837,12 +2837,12 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
/* skb header vs. fragment */
if (control & BMU_STF)
- pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
else
- pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
@@ -2918,7 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u32 mode;
u8 filter[8];
@@ -2938,8 +2938,8 @@ static void genesis_set_multicast(struct net_device *dev)
skge->flow_status == FLOW_STAT_SYMMETRIC)
genesis_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- genesis_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ genesis_add_filter(filter, ha->addr);
}
xm_write32(hw, port, XM_MODE, mode);
@@ -2957,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
@@ -2980,8 +2980,8 @@ static void yukon_set_multicast(struct net_device *dev)
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- yukon_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ yukon_add_filter(filter, ha->addr);
}
@@ -3060,11 +3060,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
goto resubmit;
pci_dma_sync_single_for_cpu(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
+ dma_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(e->skb, skb->data, len);
pci_dma_sync_single_for_device(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
+ dma_unmap_addr(e, mapaddr),
len, PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
@@ -3075,8 +3075,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
goto resubmit;
pci_unmap_single(skge->hw->pdev,
- pci_unmap_addr(e, mapaddr),
- pci_unmap_len(e, maplen),
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
@@ -3667,7 +3667,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
t->csum_offs, t->csum_write, t->csum_start);
}
- seq_printf(seq, "\nRx Ring: \n");
+ seq_printf(seq, "\nRx Ring:\n");
for (e = skge->rx_ring.to_clean; ; e = e->next) {
const struct skge_rx_desc *r = e->desc;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 831de1b6e96e..507addcaffa3 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2393,8 +2393,8 @@ struct skge_element {
struct skge_element *next;
void *desc;
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct skge_ring {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 088c797eb73b..2111c7bbf578 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -53,7 +53,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.27"
+#define DRV_VERSION "1.28"
/*
* The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -70,18 +70,15 @@
VLAN:GSO + CKSUM + Data + skb_frags * DMA */
#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
-#define TX_MAX_PENDING 4096
+#define TX_MAX_PENDING 1024
#define TX_DEF_PENDING 127
-#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
-#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
#define TX_WATCHDOG (5 * HZ)
#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
#define SKY2_EEPROM_MAGIC 0x9955aabb
-
#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
static const u32 default_msg =
@@ -227,7 +224,7 @@ static void sky2_power_on(struct sky2_hw *hw)
/* disable Core Clock Division, */
sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
/* enable bits are inverted */
sky2_write8(hw, B2_Y2_CLK_GATE,
Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
@@ -269,7 +266,7 @@ static void sky2_power_on(struct sky2_hw *hw)
static void sky2_power_aux(struct sky2_hw *hw)
{
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
else
/* enable bits are inverted */
@@ -652,7 +649,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
@@ -824,7 +821,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
+ if (hw->chip_id == CHIP_ID_YUKON_XL &&
+ hw->chip_rev == CHIP_REV_YU_XL_A0 &&
+ port == 1) {
/* WA DEV_472 -- looks like crossed wires on port 2 */
/* clear GMAC 1 Control reset */
sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
@@ -878,6 +877,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
if (hw->dev[port]->mtu > ETH_DATA_LEN)
reg |= GM_SMOD_JUMBO_ENA;
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
+ hw->chip_rev == CHIP_REV_YU_EC_U_B1)
+ reg |= GM_NEW_FLOW_CTRL;
+
gma_write16(hw, port, GM_SERIAL_MODE, reg);
/* virtual address for data */
@@ -1126,7 +1129,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
if (pci_dma_mapping_error(pdev, re->data_addr))
goto mapping_error;
- pci_unmap_len_set(re, data_size, size);
+ dma_unmap_len_set(re, data_size, size);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1148,7 +1151,7 @@ map_page_error:
PCI_DMA_FROMDEVICE);
}
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
mapping_error:
@@ -1163,7 +1166,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
struct sk_buff *skb = re->skb;
int i;
- pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
PCI_DMA_FROMDEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -1190,6 +1193,39 @@ static void rx_set_checksum(struct sky2_port *sky2)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
+/* Enable/disable receive hash calculation (RSS) */
+static void rx_set_rss(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ int i, nkeys = 4;
+
+ /* Supports IPv6 and other modes */
+ if (hw->flags & SKY2_HW_NEW_LE) {
+ nkeys = 10;
+ sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
+ }
+
+ /* Program RSS initial values */
+ if (dev->features & NETIF_F_RXHASH) {
+ u32 key[nkeys];
+
+ get_random_bytes(key, nkeys * sizeof(u32));
+ for (i = 0; i < nkeys; i++)
+ sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
+ key[i]);
+
+ /* Need to turn on (undocumented) flag to make hashing work */
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
+ RX_STFW_ENA);
+
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_ENA_RX_RSS_HASH);
+ } else
+ sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_RSS_HASH);
+}
+
/*
* The RX Stop command will not work for Yukon-2 if the BMU does not
* reach the end of packet and since we can't make sure that we have
@@ -1414,8 +1450,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
/* These chips have no ram buffer?
* MAC Rx RAM Read is controlled by hardware */
if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
- (hw->chip_rev == CHIP_REV_YU_EC_U_A1 ||
- hw->chip_rev == CHIP_REV_YU_EC_U_B0))
+ hw->chip_rev > CHIP_REV_YU_EC_U_A0)
sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
@@ -1423,6 +1458,9 @@ static void sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ rx_set_rss(sky2->netdev);
+
/* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
@@ -1657,12 +1695,12 @@ static unsigned tx_le_req(const struct sk_buff *skb)
static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
- pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
else if (re->flags & TX_MAP_PAGE)
- pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
- pci_unmap_len(re, maplen),
+ pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
re->flags = 0;
}
@@ -1773,8 +1811,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_SINGLE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, len);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, len);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -1802,8 +1840,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_PAGE;
- pci_unmap_addr_set(re, mapaddr, mapping);
- pci_unmap_len_set(re, maplen, frag->size);
+ dma_unmap_addr_set(re, mapaddr, mapping);
+ dma_unmap_len_set(re, maplen, frag->size);
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -2142,7 +2180,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
- if (sky2_autoneg_done(sky2, phystat) == 0)
+ if (sky2_autoneg_done(sky2, phystat) == 0 &&
+ !netif_carrier_ok(dev))
sky2_link_up(sky2);
goto out;
}
@@ -2236,8 +2275,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write32(hw, B0_IMSK, 0);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_stop_queue(dev);
napi_disable(&hw->napi);
+ netif_tx_disable(dev);
synchronize_irq(hw->pdev->irq);
@@ -2531,6 +2570,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
}
}
+static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->rxhash = le32_to_cpu(status);
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2552,7 +2599,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (!(opcode & HW_OWNER))
break;
- hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
+ hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
port = le->css & CSS_LINK_BIT;
dev = hw->dev[port];
@@ -2603,6 +2650,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
sky2_rx_checksum(sky2, status);
break;
+ case OP_RSS_HASH:
+ sky2_rx_hash(sky2, status);
+ break;
+
case OP_TXINDEXLE:
/* TX index reports status for both ports */
sky2_tx_done(hw->dev[0], status & 0xfff);
@@ -2957,6 +3008,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < CHIP_REV_YU_XL_A2)
+ hw->flags |= SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_EC_U:
@@ -2982,10 +3035,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT;
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE:
+ hw->flags = SKY2_HW_RSS_BROKEN;
break;
case CHIP_ID_YUKON_FE_P:
@@ -3192,7 +3246,7 @@ static void sky2_reset(struct sky2_hw *hw)
for (i = 0; i < hw->ports; i++)
sky2_gmac_reset(hw, i);
- memset(hw->st_le, 0, STATUS_LE_BYTES);
+ memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
hw->st_idx = 0;
sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
@@ -3202,7 +3256,7 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
/* Set the list last index */
- sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
+ sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
sky2_write16(hw, STAT_TX_IDX_TH, 10);
sky2_write8(hw, STAT_FIFO_WM, 16);
@@ -3258,18 +3312,14 @@ static int sky2_reattach(struct net_device *dev)
return err;
}
-static void sky2_restart(struct work_struct *work)
+static void sky2_all_down(struct sky2_hw *hw)
{
- struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
- u32 imask;
int i;
- rtnl_lock();
-
- napi_disable(&hw->napi);
- synchronize_irq(hw->pdev->irq);
- imask = sky2_read32(hw, B0_IMSK);
+ sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+ synchronize_irq(hw->pdev->irq);
+ napi_disable(&hw->napi);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
@@ -3282,8 +3332,12 @@ static void sky2_restart(struct work_struct *work)
netif_tx_disable(dev);
sky2_hw_down(sky2);
}
+}
- sky2_reset(hw);
+static void sky2_all_up(struct sky2_hw *hw)
+{
+ u32 imask = Y2_IS_BASE;
+ int i;
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
@@ -3293,6 +3347,8 @@ static void sky2_restart(struct work_struct *work)
continue;
sky2_hw_up(sky2);
+ sky2_set_multicast(dev);
+ imask |= portirq_msk[i];
netif_wake_queue(dev);
}
@@ -3301,6 +3357,17 @@ static void sky2_restart(struct work_struct *work)
sky2_read32(hw, B0_Y2_SP_LISR);
napi_enable(&hw->napi);
+}
+
+static void sky2_restart(struct work_struct *work)
+{
+ struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+
+ rtnl_lock();
+
+ sky2_all_down(hw);
+ sky2_reset(hw);
+ sky2_all_up(hw);
rtnl_unlock();
}
@@ -3622,7 +3689,7 @@ static void sky2_set_multicast(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- struct dev_mc_list *list;
+ struct netdev_hw_addr *ha;
u16 reg;
u8 filter[8];
int rx_pause;
@@ -3646,8 +3713,8 @@ static void sky2_set_multicast(struct net_device *dev)
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- netdev_for_each_mc_addr(list, dev)
- sky2_add_filter(filter, list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev)
+ sky2_add_filter(filter, ha->addr);
}
gma_write16(hw, port, GM_MC_ADDR_H1,
@@ -4109,6 +4176,25 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
+static int sky2_set_flags(struct net_device *dev, u32 data)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (data & ETH_FLAG_RXHASH) {
+ if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
+ return -EINVAL;
+
+ dev->features |= NETIF_F_RXHASH;
+ } else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ rx_set_rss(dev);
+
+ return 0;
+}
static const struct ethtool_ops sky2_ethtool_ops = {
.get_settings = sky2_get_settings,
@@ -4140,6 +4226,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.phys_id = sky2_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
+ .set_flags = sky2_set_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4250,12 +4337,13 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
napi_disable(&hw->napi);
last = sky2_read16(hw, STAT_PUT_IDX);
+ seq_printf(seq, "Status ring %u\n", hw->st_size);
if (hw->st_idx == last)
seq_puts(seq, "Status ring (empty)\n");
else {
seq_puts(seq, "Status ring\n");
- for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE;
- idx = RING_NEXT(idx, STATUS_RING_SIZE)) {
+ for (idx = hw->st_idx; idx != last && idx < hw->st_size;
+ idx = RING_NEXT(idx, hw->st_size)) {
const struct sky2_status_le *le = hw->st_le + idx;
seq_printf(seq, "[%d] %#x %d %#x\n",
idx, le->opcode, le->length, le->status);
@@ -4492,6 +4580,10 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
+ /* Enable receive hashing unless hardware is known broken */
+ if (!(hw->flags & SKY2_HW_RSS_BROKEN))
+ dev->features |= NETIF_F_RXHASH;
+
#ifdef SKY2_VLAN_TAG_USED
/* The workaround for FE+ status conflicts with VLAN tag detection. */
if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
@@ -4683,15 +4775,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
- /* ring for status responses */
- hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
- if (!hw->st_le)
- goto err_out_iounmap;
-
err = sky2_init(hw);
if (err)
goto err_out_iounmap;
+ /* ring for status responses */
+ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
+ hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ &hw->st_dma);
+ if (!hw->st_le)
+ goto err_out_reset;
+
dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
@@ -4765,8 +4859,10 @@ err_out_unregister:
err_out_free_netdev:
free_netdev(dev);
err_out_free_pci:
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
+err_out_reset:
sky2_write8(hw, B0_CTST, CS_RST_SET);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
err_out_iounmap:
iounmap(hw->regs);
err_out_free_hw:
@@ -4804,7 +4900,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
free_irq(pdev->irq, hw);
if (hw->flags & SKY2_HW_USE_MSI)
pci_disable_msi(pdev);
- pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
+ pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -4829,12 +4926,12 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
cancel_work_sync(&hw->restart_work);
rtnl_lock();
+
+ sky2_all_down(hw);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct sky2_port *sky2 = netdev_priv(dev);
- sky2_detach(dev);
-
if (sky2->wol)
sky2_wol_init(sky2);
@@ -4843,8 +4940,6 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
device_set_wakeup_enable(&pdev->dev, wol != 0);
- sky2_write32(hw, B0_IMSK, 0);
- napi_disable(&hw->napi);
sky2_power_aux(hw);
rtnl_unlock();
@@ -4859,12 +4954,11 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
static int sky2_resume(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, err;
+ int err;
if (!hw)
return 0;
- rtnl_lock();
err = pci_set_power_state(pdev, PCI_D0);
if (err)
goto out;
@@ -4882,20 +4976,13 @@ static int sky2_resume(struct pci_dev *pdev)
goto out;
}
+ rtnl_lock();
sky2_reset(hw);
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
-
- for (i = 0; i < hw->ports; i++) {
- err = sky2_reattach(hw->dev[i]);
- if (err)
- goto out;
- }
+ sky2_all_up(hw);
rtnl_unlock();
return 0;
out:
- rtnl_unlock();
dev_err(&pdev->dev, "resume failed (%d)\n", err);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index a5e182dd9819..084eff21b67a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -548,6 +548,14 @@ enum {
CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
};
+
+enum yukon_xl_rev {
+ CHIP_REV_YU_XL_A0 = 0,
+ CHIP_REV_YU_XL_A1 = 1,
+ CHIP_REV_YU_XL_A2 = 2,
+ CHIP_REV_YU_XL_A3 = 3,
+};
+
enum yukon_ec_rev {
CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
@@ -557,6 +565,7 @@ enum yukon_ec_u_rev {
CHIP_REV_YU_EC_U_A0 = 1,
CHIP_REV_YU_EC_U_A1 = 2,
CHIP_REV_YU_EC_U_B0 = 3,
+ CHIP_REV_YU_EC_U_B1 = 5,
};
enum yukon_fe_rev {
CHIP_REV_YU_FE_A1 = 1,
@@ -685,8 +694,21 @@ enum {
TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
+
+ RSS_KEY = 0x0220, /* RSS Key setup */
+ RSS_CFG = 0x0248, /* RSS Configuration */
};
+enum {
+ HASH_TCP_IPV6_EX_CTRL = 1<<5,
+ HASH_IPV6_EX_CTRL = 1<<4,
+ HASH_TCP_IPV6_CTRL = 1<<3,
+ HASH_IPV6_CTRL = 1<<2,
+ HASH_TCP_IPV4_CTRL = 1<<1,
+ HASH_IPV4_CTRL = 1<<0,
+
+ HASH_ALL = 0x3f,
+};
enum {
B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
@@ -1775,10 +1797,13 @@ enum {
/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
enum {
GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
- GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
- GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
- GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
- GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
+ GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
+ GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
+ GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
+
+ GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
+
+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
};
#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
@@ -2157,14 +2182,14 @@ struct tx_ring_info {
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
#define TX_MAP_PAGE 0x0002
- DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_LEN(maplen);
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
};
struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
- DECLARE_PCI_UNMAP_LEN(data_size);
+ DEFINE_DMA_UNMAP_LEN(data_size);
dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
};
@@ -2249,6 +2274,7 @@ struct sky2_hw {
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
+#define SKY2_HW_RSS_BROKEN 0x00000100
u8 chip_id;
u8 chip_rev;
@@ -2256,6 +2282,7 @@ struct sky2_hw {
u8 ports;
struct sky2_status_le *st_le;
+ u32 st_size;
u32 st_idx;
dma_addr_t st_dma;
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index 140d63f3cafa..ac279fad9d45 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -731,7 +731,6 @@ void
slhc_free(struct slcompress *comp)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free");
- return;
}
struct slcompress *
slhc_init(int rslots, int tslots)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 89696156c059..fa434fb8fb7c 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -458,7 +458,7 @@ static void sl_tx_timeout(struct net_device *dev)
* 14 Oct 1994 Dmitry Gorodchanin.
*/
#ifdef SL_CHECK_TRANSMIT
- if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
goto out;
}
@@ -1269,7 +1269,7 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGLEASE:
*p = sl->leased;
- };
+ }
spin_unlock_bh(&sl->lock);
return 0;
}
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index a93f122e9a96..d07c39cb4daf 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -460,7 +460,6 @@ static void ultramca_reset_8390(struct net_device *dev)
if (ei_debug > 1)
printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 0291ea098a06..d2dd8e6113ab 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -421,7 +421,6 @@ ultra_reset_8390(struct net_device *dev)
outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index 7a554adc70fb..e459c3b2510a 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -352,7 +352,6 @@ static void ultra32_reset_8390(struct net_device *dev)
outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
outb(0x01, ioaddr + 6); /* Enable Interrupts. */
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 635820d42b19..66831f378396 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
dev->name, __func__);
status = SMC_GET_RX_STS_FIFO(lp);
- DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
+ DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
if (status & RX_STS_ES_) {
@@ -1135,7 +1135,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
}
#else
if (status & INT_STS_TSFL_) {
- DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
+ DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
smc911x_tx(dev);
SMC_ACK_INT(lp, INT_STS_TSFL_);
}
@@ -1274,7 +1274,7 @@ static void smc911x_timeout(struct net_device *dev)
status = SMC_GET_INT(lp);
mask = SMC_GET_INT_EN(lp);
spin_unlock_irqrestore(&lp->lock, flags);
- DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
dev->name, status, mask);
/* Dump the current TX FIFO contents and restart */
@@ -1289,7 +1289,7 @@ static void smc911x_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* Set the Hash perfec mode */
mcr |= MAC_CR_HPFILT_;
@@ -1348,19 +1348,16 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 position;
- /* do we have a pointer here? */
- if (!cur_addr)
- break;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
- continue;
+ if (!(*ha->addr & 1))
+ continue;
/* upper 6 bits are used as hash index */
- position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26;
+ position = ether_crc(ETH_ALEN, ha->addr)>>26;
multicast_table[position>>5] |= 1 << (position&0x1f);
}
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 3f2f7843aa4e..7486d0908064 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -416,7 +416,7 @@ static void smc_shutdown( int ioaddr )
/*
- . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds )
+ . Function: smc_setmulticast( int ioaddr, struct net_device *dev )
. Purpose:
. This sets the internal hardware table to filter out unwanted multicast
. packets before they take up memory.
@@ -437,26 +437,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev)
{
int i;
unsigned char multicast_table[ 8 ];
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
/* start with a table of all zeros: reject all */
memset( multicast_table, 0, sizeof( multicast_table ) );
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
- /* do we have a pointer here? */
- if ( !cur_addr )
- break;
/* make sure this is a multicast address - shouldn't this
be a given if we have it here ? */
- if ( !( *cur_addr->dmi_addr & 1 ) )
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f;
+ position = ether_crc_le(6, ha->addr) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
@@ -528,7 +525,7 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
numPages = ((length & 0xfffe) + 6) / 256;
if (numPages > 7 ) {
- printk(CARDNAME": Far too big packet error. \n");
+ printk(CARDNAME": Far too big packet error.\n");
/* freeing the packet is a good thing here... but should
. any packets of this size get down here? */
dev_kfree_skb (skb);
@@ -570,9 +567,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
if ( !time_out ) {
/* oh well, wait until the chip finds memory later */
SMC_ENABLE_INT( IM_ALLOC_INT );
- PRINTK2((CARDNAME": memory allocation deferred. \n"));
+ PRINTK2((CARDNAME": memory allocation deferred.\n"));
/* it's deferred, but I'll handle it later */
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* or YES! I can send the packet now.. */
smc_hardware_send_packet(dev);
@@ -610,7 +607,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
ioaddr = dev->base_addr;
if ( !skb ) {
- PRINTK((CARDNAME": In XMIT with no packet to send \n"));
+ PRINTK((CARDNAME": In XMIT with no packet to send\n"));
return;
}
length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
@@ -620,7 +617,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
packet_no = inb( ioaddr + PNR_ARR + 1 );
if ( packet_no & 0x80 ) {
/* or isn't there? BAD CHIP! */
- printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n");
+ printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
dev_kfree_skb_any(skb);
lp->saved_skb = NULL;
netif_wake_queue(dev);
@@ -685,7 +682,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* and let the chipset deal with it */
outw( MC_ENQUEUE , ioaddr + MMU_CMD );
- PRINTK2((CARDNAME": Sent packet of length %d \n",length));
+ PRINTK2((CARDNAME": Sent packet of length %d\n", length));
lp->saved_skb = NULL;
dev_kfree_skb_any (skb);
@@ -694,8 +691,6 @@ static void smc_hardware_send_packet( struct net_device * dev )
/* we can send another packet */
netif_wake_queue(dev);
-
- return;
}
/*-------------------------------------------------------------------------
@@ -937,7 +932,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
/* I don't recognize this chip, so... */
printk(CARDNAME ": IO %x: Unrecognized revision register:"
- " %x, Contact author. \n", ioaddr, revision_register );
+ " %x, Contact author.\n", ioaddr, revision_register);
retval = -ENODEV;
goto err_out;
@@ -1045,9 +1040,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
*/
printk("ADDR: %pM\n", dev->dev_addr);
- /* set the private data to zero by default */
- memset(netdev_priv(dev), 0, sizeof(struct smc_local));
-
/* Grab the IRQ */
retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
if (retval) {
@@ -1074,7 +1066,7 @@ static void print_packet( byte * buf, int length )
int remainder;
int lines;
- printk("Packet of length %d \n", length );
+ printk("Packet of length %d\n", length);
lines = length / 16;
remainder = length % 16;
@@ -1170,7 +1162,7 @@ static void smc_timeout(struct net_device *dev)
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
/* clear anything saved */
((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
netif_wake_queue(dev);
@@ -1201,7 +1193,7 @@ static void smc_rcv(struct net_device *dev)
if ( packet_number & FP_RXEMPTY ) {
/* we got called , but nothing was on the FIFO */
- PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n"));
+ PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n"));
/* don't need to restore anything */
return;
}
@@ -1257,14 +1249,14 @@ static void smc_rcv(struct net_device *dev)
to send the DWORDs or the bytes first, or some
mixture. A mixture might improve already slow PIO
performance */
- PRINTK3((" Reading %d dwords (and %d bytes) \n",
+ PRINTK3((" Reading %d dwords (and %d bytes)\n",
packet_length >> 2, packet_length & 3 ));
insl(ioaddr + DATA_1 , data, packet_length >> 2 );
/* read the left over bytes */
insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
packet_length & 0x3 );
#else
- PRINTK3((" Reading %d words and %d byte(s) \n",
+ PRINTK3((" Reading %d words and %d byte(s)\n",
(packet_length >> 1 ), packet_length & 1 ));
insw(ioaddr + DATA_1 , data, packet_length >> 1);
if ( packet_length & 1 ) {
@@ -1333,7 +1325,7 @@ static void smc_tx( struct net_device * dev )
outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
tx_status = inw( ioaddr + DATA_1 );
- PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status ));
+ PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status));
dev->stats.tx_errors++;
if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
@@ -1347,7 +1339,7 @@ static void smc_tx( struct net_device * dev )
#endif
if ( tx_status & TS_SUCCESS ) {
- printk(CARDNAME": Successful packet caused interrupt \n");
+ printk(CARDNAME": Successful packet caused interrupt\n");
}
/* re-enable transmit */
SMC_SELECT_BANK( 0 );
@@ -1361,7 +1353,6 @@ static void smc_tx( struct net_device * dev )
lp->packets_waiting--;
outb( saved_packet, ioaddr + PNR_ARR );
- return;
}
/*--------------------------------------------------------------------
@@ -1393,7 +1384,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
int handled = 0;
- PRINTK3((CARDNAME": SMC interrupt started \n"));
+ PRINTK3((CARDNAME": SMC interrupt started\n"));
saved_bank = inw( ioaddr + BANK_SELECT );
@@ -1408,7 +1399,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
/* set a timeout value, so I don't stay here forever */
timeout = 4;
- PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask ));
+ PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask));
do {
/* read the status flag, and mask it */
status = inb( ioaddr + INTERRUPT ) & mask;
@@ -1418,7 +1409,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
handled = 1;
PRINTK3((KERN_WARNING CARDNAME
- ": Handling interrupt status %x \n", status ));
+ ": Handling interrupt status %x\n", status));
if (status & IM_RCV_INT) {
/* Got a packet(s). */
@@ -1452,7 +1443,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
} else if (status & IM_ALLOC_INT ) {
PRINTK2((KERN_DEBUG CARDNAME
- ": Allocation interrupt \n"));
+ ": Allocation interrupt\n"));
/* clear this interrupt so it doesn't happen again */
mask &= ~IM_ALLOC_INT;
@@ -1470,9 +1461,9 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
dev->stats.rx_fifo_errors++;
outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
} else if (status & IM_EPH_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n"));
} else if (status & IM_ERCV_INT ) {
- PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n"));
+ PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n"));
outb( IM_ERCV_INT, ioaddr + INTERRUPT );
}
} while ( timeout -- );
@@ -1482,7 +1473,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
SMC_SELECT_BANK( 2 );
outb( mask, ioaddr + INT_MASK );
- PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask ));
+ PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask));
outw( saved_pointer, ioaddr + POINTER );
SMC_SELECT_BANK( saved_bank );
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 860339d51d58..10cf0cbc2185 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1285,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
smc_phy_interrupt(dev);
} else if (status & IM_ERCV_INT) {
SMC_ACK_INT(lp, IM_ERCV_INT);
- PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name);
+ PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
}
} while (--timeout);
@@ -1360,7 +1360,7 @@ static void smc_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* within that register.
*/
else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
/* table for flipping the order of 3 bits */
static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
@@ -1420,16 +1420,16 @@ static void smc_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
int position;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
+ if (!(*ha->addr & 1))
continue;
/* only use the low order bits */
- position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+ position = crc32_le(~0, ha->addr, 6) & 0x3f;
/* do some messy swapping to put the bit in the right spot */
multicast_table[invert3[position&7]] |=
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index cbf520d38eac..cc559741b0fa 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -736,7 +736,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
SMSC_TRACE(HW, "configuring for carrier OK");
if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
- /* Restore orginal GPIO configuration */
+ /* Restore original GPIO configuration */
pdata->gpio_setting = pdata->gpio_orig_setting;
smsc911x_reg_write(pdata, GPIO_CFG,
pdata->gpio_setting);
@@ -750,7 +750,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
/* Force 10/100 LED off, after saving
- * orginal GPIO configuration */
+ * original GPIO configuration */
pdata->gpio_orig_setting = pdata->gpio_setting;
pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_;
@@ -1335,7 +1335,6 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
smsc911x_tx_update_txcounters(dev);
@@ -1382,13 +1381,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->set_bits_mask = MAC_CR_HPFILT_;
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, dev) {
- unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ unsigned int bitnum = smsc911x_hash(ha->addr);
unsigned int mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index aafaebf45748..6cdee6a15f9f 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1034,8 +1034,6 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
smsc9420_pci_flush_write(pd);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1064,12 +1062,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 hash_lo = 0, hash_hi = 0;
smsc_dbg(HW, "Multicast filter enabled");
- netdev_for_each_mc_addr(mc_list, dev) {
- u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 bit_num = smsc9420_hash(ha->addr);
u32 mask = 1 << (bit_num & 0x1F);
if (bit_num & 0x20)
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 287c251075e5..26e25d7f5829 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -263,8 +263,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -531,7 +529,7 @@ static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char *addr;
int i;
@@ -550,8 +548,8 @@ static void sonic_multicast_list(struct net_device *dev)
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
i = 1;
- netdev_for_each_mc_addr(dmi, dev) {
- addr = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addr = ha->addr;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index dd3cb0f2d21f..1636a34d95dd 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
static void
spider_net_set_multi(struct net_device *netdev)
{
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 hash;
int i;
u32 reg;
@@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev)
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
- netdev_for_each_mc_addr(mc, netdev) {
- hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ hash = spider_net_get_multicast_hash(netdev, ha->addr);
set_bit(hash, bitmask);
}
@@ -2095,8 +2095,6 @@ static void spider_net_link_phy(unsigned long data)
card->netdev->name, phy->speed,
phy->duplex == 1 ? "Full" : "Half",
phy->autoneg == 1 ? "" : "no ");
-
- return;
}
/**
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 6dfa69899019..74b7ae76906e 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1173,7 +1173,7 @@ static void tx_timeout(struct net_device *dev)
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1221,8 +1221,6 @@ static void init_ring(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++)
memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
-
- return;
}
@@ -1312,8 +1310,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -1766,7 +1762,7 @@ static void set_rx_mode(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
u32 rx_mode = MinVLANPrio;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
#ifdef VLAN_SUPPORT
@@ -1804,8 +1800,8 @@ static void set_rx_mode(struct net_device *dev)
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (__be16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
@@ -1825,10 +1821,10 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
- int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
+ int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
__le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
*fptr |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index c776af15fe1a..9691733ddb8e 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
- dwmac100.o $(stmmac-y)
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 2a58172e986a..144f76fd3e39 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -22,8 +22,26 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include "descs.h"
#include <linux/netdevice.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+
+#undef CHIP_DEBUG_PRINT
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
+/* #define CHIP_DEBUG_PRINT */
+
+#ifdef CHIP_DEBUG_PRINT
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define CHIP_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -231,3 +249,4 @@ extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
deleted file mode 100644
index 4cacca614fc1..000000000000
--- a/drivers/net/stmmac/dwmac100.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*******************************************************************************
- This is the driver for the MAC 10/100 on-chip Ethernet controller
- currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
-
- DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
- this code.
-
- Copyright (C) 2007-2009 STMicroelectronics Ltd
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-*******************************************************************************/
-
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-
-#include "common.h"
-#include "dwmac100.h"
-#include "dwmac_dma.h"
-
-#undef DWMAC100_DEBUG
-/*#define DWMAC100_DEBUG*/
-#ifdef DWMAC100_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
-static void dwmac100_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
-
-#ifdef STMMAC_VLAN_TAG_USED
- writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
-#endif
- return;
-}
-
-static void dwmac100_dump_mac_regs(unsigned long ioaddr)
-{
- pr_info("\t----------------------------------------------\n"
- "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
- pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
- pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
- pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
- pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
- pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
- pr_info("\tflow control (offset 0x%x): 0x%08x\n",
- MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
- pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
- pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
- pr_info("\n\tMAC management counter registers\n");
- pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
- pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
- pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
- pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
- pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
- return;
-}
-
-static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
- u32 dma_rx)
-{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
-
- /* Enable Application Access by writing to DMA CSR0 */
- writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
- ioaddr + DMA_BUS_MODE);
-
- /* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
-
- /* The base address of the RX/TX descriptor lists must be written into
- * DMA CSR3 and CSR4, respectively. */
- writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
- writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
-}
-
-/* Store and Forward capability is not used at all..
- * The transmit threshold can be programmed by
- * setting the TTC bits in the DMA control register.*/
-static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
- int rxmode)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
-
- if (txmode <= 32)
- csr6 |= DMA_CONTROL_TTC_32;
- else if (txmode <= 64)
- csr6 |= DMA_CONTROL_TTC_64;
- else
- csr6 |= DMA_CONTROL_TTC_128;
-
- writel(csr6, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void dwmac100_dump_dma_regs(unsigned long ioaddr)
-{
- int i;
-
- DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
- for (i = 0; i < 9; i++)
- pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
- (DMA_BUS_MODE + i * 4),
- readl(ioaddr + DMA_BUS_MODE + i * 4));
- DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
- DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
- DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
- DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
- return;
-}
-
-/* DMA controller has two counters to track the number of
- * the receive missed frames. */
-static void dwmac100_dma_diagnostic_fr(void *data,
- struct stmmac_extra_stats *x,
- unsigned long ioaddr)
-{
- struct net_device_stats *stats = (struct net_device_stats *)data;
- u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
-
- if (unlikely(csr8)) {
- if (csr8 & DMA_MISSED_FRAME_OVE) {
- stats->rx_over_errors += 0x800;
- x->rx_overflow_cntr += 0x800;
- } else {
- unsigned int ove_cntr;
- ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
- stats->rx_over_errors += ove_cntr;
- x->rx_overflow_cntr += ove_cntr;
- }
-
- if (csr8 & DMA_MISSED_FRAME_OVE_M) {
- stats->rx_missed_errors += 0xffff;
- x->rx_missed_cntr += 0xffff;
- } else {
- unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
- stats->rx_missed_errors += miss_f;
- x->rx_missed_cntr += miss_f;
- }
- }
- return;
-}
-
-static int dwmac100_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.tx.error_summary)) {
- if (unlikely(p->des01.tx.underflow_error)) {
- x->tx_underflow++;
- stats->tx_fifo_errors++;
- }
- if (unlikely(p->des01.tx.no_carrier)) {
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.tx.loss_carrier)) {
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely((p->des01.tx.excessive_deferral) ||
- (p->des01.tx.excessive_collisions) ||
- (p->des01.tx.late_collision)))
- stats->collisions += p->des01.tx.collision_count;
- ret = -1;
- }
- if (unlikely(p->des01.tx.heartbeat_fail)) {
- x->tx_heartbeat++;
- stats->tx_heartbeat_errors++;
- ret = -1;
- }
- if (unlikely(p->des01.tx.deferred))
- x->tx_deferred++;
-
- return ret;
-}
-
-static int dwmac100_get_tx_len(struct dma_desc *p)
-{
- return p->des01.tx.buffer1_size;
-}
-
-/* This function verifies if each incoming frame has some errors
- * and, if required, updates the multicast statistics.
- * In case of success, it returns csum_none becasue the device
- * is not able to compute the csum in HW. */
-static int dwmac100_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p)
-{
- int ret = csum_none;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("dwmac100 Error: Oversized Ethernet "
- "frame spanned multiple buffers\n");
- stats->rx_length_errors++;
- return discard_frame;
- }
-
- if (unlikely(p->des01.rx.error_summary)) {
- if (unlikely(p->des01.rx.descriptor_error))
- x->rx_desc++;
- if (unlikely(p->des01.rx.partial_frame_error))
- x->rx_partial++;
- if (unlikely(p->des01.rx.run_frame))
- x->rx_runt++;
- if (unlikely(p->des01.rx.frame_too_long))
- x->rx_toolong++;
- if (unlikely(p->des01.rx.collision)) {
- x->rx_collision++;
- stats->collisions++;
- }
- if (unlikely(p->des01.rx.crc_error)) {
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.dribbling))
- ret = discard_frame;
-
- if (unlikely(p->des01.rx.length_error)) {
- x->rx_length++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.rx.mii_error)) {
- x->rx_mii++;
- ret = discard_frame;
- }
- if (p->des01.rx.multicast_frame) {
- x->rx_multicast++;
- stats->multicast++;
- }
- return ret;
-}
-
-static void dwmac100_irq_status(unsigned long ioaddr)
-{
- return;
-}
-
-static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
-}
-
-static void dwmac100_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- u32 value = readl(ioaddr + MAC_CONTROL);
-
- if (dev->flags & IFF_PROMISC) {
- value |= MAC_CONTROL_PR;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
- MAC_CONTROL_HP);
- } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value |= MAC_CONTROL_PM;
- value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
- writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (netdev_mc_empty(dev)) { /* no multicast */
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
- MAC_CONTROL_HO | MAC_CONTROL_HP);
- } else {
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Perfect filter mode for physical address and Hash
- filter for multicast */
- value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
- MAC_CONTROL_IF | MAC_CONTROL_HO);
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- /* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table */
- int bit_nr =
- ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
- }
-
- writel(value, ioaddr + MAC_CONTROL);
-
- DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
- "HI 0x%08x, LO 0x%08x\n",
- __func__, readl(ioaddr + MAC_CONTROL),
- readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
- return;
-}
-
-static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = MAC_FLOW_CTRL_ENABLE;
-
- if (duplex)
- flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
- writel(flow, ioaddr + MAC_FLOW_CTRL);
-
- return;
-}
-
-/* No PMT module supported for this Ethernet Controller.
- * Tested on ST platforms only.
- */
-static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
-{
- return;
-}
-
-static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- if (i == ring_size - 1)
- p->des01.rx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.tx.own = 0;
- if (i == ring_size - 1)
- p->des01.tx.end_ring = 1;
- p++;
- }
- return;
-}
-
-static int dwmac100_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.tx.own;
-}
-
-static int dwmac100_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.rx.own;
-}
-
-static void dwmac100_set_tx_owner(struct dma_desc *p)
-{
- p->des01.tx.own = 1;
-}
-
-static void dwmac100_set_rx_owner(struct dma_desc *p)
-{
- p->des01.rx.own = 1;
-}
-
-static int dwmac100_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.tx.last_segment;
-}
-
-static void dwmac100_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.tx.end_ring;
-
- /* clean field used within the xmit */
- p->des01.tx.first_segment = 0;
- p->des01.tx.last_segment = 0;
- p->des01.tx.buffer1_size = 0;
-
- /* clean status reported */
- p->des01.tx.error_summary = 0;
- p->des01.tx.underflow_error = 0;
- p->des01.tx.no_carrier = 0;
- p->des01.tx.loss_carrier = 0;
- p->des01.tx.excessive_deferral = 0;
- p->des01.tx.excessive_collisions = 0;
- p->des01.tx.late_collision = 0;
- p->des01.tx.heartbeat_fail = 0;
- p->des01.tx.deferred = 0;
-
- /* set termination field */
- p->des01.tx.end_ring = ter;
-
- return;
-}
-
-static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.tx.first_segment = is_fs;
- p->des01.tx.buffer1_size = len;
-}
-
-static void dwmac100_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.tx.interrupt = 0;
-}
-
-static void dwmac100_close_tx_desc(struct dma_desc *p)
-{
- p->des01.tx.last_segment = 1;
- p->des01.tx.interrupt = 1;
-}
-
-static int dwmac100_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.rx.frame_length;
-}
-
-struct stmmac_ops dwmac100_ops = {
- .core_init = dwmac100_core_init,
- .dump_regs = dwmac100_dump_mac_regs,
- .host_irq_status = dwmac100_irq_status,
- .set_filter = dwmac100_set_filter,
- .flow_ctrl = dwmac100_flow_ctrl,
- .pmt = dwmac100_pmt,
- .set_umac_addr = dwmac100_set_umac_addr,
- .get_umac_addr = dwmac100_get_umac_addr,
-};
-
-struct stmmac_dma_ops dwmac100_dma_ops = {
- .init = dwmac100_dma_init,
- .dump_regs = dwmac100_dump_dma_regs,
- .dma_mode = dwmac100_dma_operation_mode,
- .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
- .enable_dma_transmission = dwmac_enable_dma_transmission,
- .enable_dma_irq = dwmac_enable_dma_irq,
- .disable_dma_irq = dwmac_disable_dma_irq,
- .start_tx = dwmac_dma_start_tx,
- .stop_tx = dwmac_dma_stop_tx,
- .start_rx = dwmac_dma_start_rx,
- .stop_rx = dwmac_dma_stop_rx,
- .dma_interrupt = dwmac_dma_interrupt,
-};
-
-struct stmmac_desc_ops dwmac100_desc_ops = {
- .tx_status = dwmac100_get_tx_frame_status,
- .rx_status = dwmac100_get_rx_frame_status,
- .get_tx_len = dwmac100_get_tx_len,
- .init_rx_desc = dwmac100_init_rx_desc,
- .init_tx_desc = dwmac100_init_tx_desc,
- .get_tx_owner = dwmac100_get_tx_owner,
- .get_rx_owner = dwmac100_get_rx_owner,
- .release_tx_desc = dwmac100_release_tx_desc,
- .prepare_tx_desc = dwmac100_prepare_tx_desc,
- .clear_tx_ic = dwmac100_clear_tx_ic,
- .close_tx_desc = dwmac100_close_tx_desc,
- .get_tx_ls = dwmac100_get_tx_ls,
- .set_tx_owner = dwmac100_set_tx_owner,
- .set_rx_owner = dwmac100_set_rx_owner,
- .get_rx_frame_len = dwmac100_get_rx_frame_len,
-};
-
-struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- pr_info("\tDWMAC100\n");
-
- mac->mac = &dwmac100_ops;
- mac->desc = &dwmac100_desc_ops;
- mac->dma = &dwmac100_dma_ops;
-
- mac->pmt = PMT_NOT_SUPPORTED;
- mac->link.port = MAC_CONTROL_PS;
- mac->link.duplex = MAC_CONTROL_F;
- mac->link.speed = 0;
- mac->mii.addr = MAC_MII_ADDR;
- mac->mii.data = MAC_MII_DATA;
-
- return mac;
-}
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..97956cbf1cb4 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
/*----------------------------------------------------------------------------
* MAC BLOCK defines
*---------------------------------------------------------------------------*/
@@ -114,3 +117,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
+
+extern struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 62dca0e384e7..d8d0f3553770 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -172,7 +172,6 @@ enum rfd {
deac_full_minus_4 = 0x00401800,
};
#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
-#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
enum ttc_control {
DMA_CONTROL_TTC_64 = 0x00000000,
@@ -206,15 +205,4 @@ enum rtc_control {
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
-#undef DWMAC1000_DEBUG
-/* #define DWMAC1000__DEBUG */
-#undef FRAME_FILTER_DEBUG
-/* #define FRAME_FILTER_DEBUG */
-#ifdef DWMAC1000__DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
-
extern struct stmmac_dma_ops dwmac1000_dma_ops;
-extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 5bd95ebfe498..917b4e16923b 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -48,7 +48,6 @@ static void dwmac1000_core_init(unsigned long ioaddr)
/* Tag detection without filtering */
writel(0x0, ioaddr + GMAC_VLAN_TAG);
#endif
- return;
}
static void dwmac1000_dump_regs(unsigned long ioaddr)
@@ -61,7 +60,6 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
offset, readl(ioaddr + offset));
}
- return;
}
static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -83,8 +81,8 @@ static void dwmac1000_set_filter(struct net_device *dev)
unsigned long ioaddr = dev->base_addr;
unsigned int value = 0;
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+ CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
if (dev->flags & IFF_PROMISC)
value = GMAC_FRAME_FILTER_PR;
@@ -95,17 +93,17 @@ static void dwmac1000_set_filter(struct net_device *dev)
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
} else if (!netdev_mc_empty(dev)) {
u32 mc_filter[2];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
/* Hash filter for multicast */
value = GMAC_FRAME_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* The upper 6 bits of the calculated CRC are used to
index the contens of the hash table */
int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
/* The most significant bit determines the register to
* use (H/L) while the other 5 bits determine the bit
* within the register. */
@@ -136,11 +134,9 @@ static void dwmac1000_set_filter(struct net_device *dev)
#endif
writel(value, ioaddr + GMAC_FRAME_FILTER);
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
"HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
}
static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
@@ -148,23 +144,22 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
{
unsigned int flow = 0;
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_RFE;
}
if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
flow |= GMAC_FLOW_CTRL_TFE;
}
if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
}
writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
}
static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
@@ -172,15 +167,14 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
pmt |= power_down | magic_pkt_en;
} else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
pmt |= global_unicast;
}
writel(pmt, ioaddr + GMAC_PMT);
- return;
}
@@ -190,22 +184,20 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
}
-
- return;
}
struct stmmac_ops dwmac1000_ops = {
@@ -230,7 +222,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
mac->mac = &dwmac1000_ops;
- mac->desc = &dwmac1000_desc_ops;
mac->dma = &dwmac1000_dma_ops;
mac->pmt = PMT_SUPPORTED;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 39d436a2da68..415805057cb0 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,7 +3,7 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
- This contains the functions to handle the dma and descriptors.
+ This contains the functions to handle the dma.
Copyright (C) 2007-2009 STMicroelectronics Ltd
@@ -58,29 +58,20 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
return 0;
}
-/* Transmit FIFO flush operation */
-static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
-{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
- writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
-
- do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
-}
-
static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (txmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF;
/* Operating on second frame increase the performance
* especially when transmit store-and-forward is used.*/
csr6 |= DMA_CONTROL_OSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
" (threshold = %d)\n", txmode);
csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK;
@@ -98,10 +89,10 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
}
if (rxmode == SF_DMA_MODE) {
- DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
+ CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
csr6 |= DMA_CONTROL_RSF;
} else {
- DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
" (threshold = %d)\n", rxmode);
csr6 &= ~DMA_CONTROL_RSF;
csr6 &= DMA_CONTROL_TC_RX_MASK;
@@ -116,7 +107,6 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
}
writel(csr6, ioaddr + DMA_CONTROL);
- return;
}
/* Not yet implemented --- no RMON module */
@@ -138,306 +128,6 @@ static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
readl(ioaddr + DMA_BUS_MODE + offset));
}
}
- return;
-}
-
-static int dwmac1000_get_tx_frame_status(void *data,
- struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
-{
- int ret = 0;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.etx.error_summary)) {
- DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
- if (unlikely(p->des01.etx.jabber_timeout)) {
- DBG(KERN_ERR "\tjabber_timeout error\n");
- x->tx_jabber++;
- }
-
- if (unlikely(p->des01.etx.frame_flushed)) {
- DBG(KERN_ERR "\tframe_flushed error\n");
- x->tx_frame_flushed++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- if (unlikely(p->des01.etx.loss_carrier)) {
- DBG(KERN_ERR "\tloss_carrier error\n");
- x->tx_losscarrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.no_carrier)) {
- DBG(KERN_ERR "\tno_carrier error\n");
- x->tx_carrier++;
- stats->tx_carrier_errors++;
- }
- if (unlikely(p->des01.etx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_collisions)) {
- DBG(KERN_ERR "\texcessive_collisions\n");
- stats->collisions += p->des01.etx.collision_count;
- }
- if (unlikely(p->des01.etx.excessive_deferral)) {
- DBG(KERN_INFO "\texcessive tx_deferral\n");
- x->tx_deferred++;
- }
-
- if (unlikely(p->des01.etx.underflow_error)) {
- DBG(KERN_ERR "\tunderflow error\n");
- dwmac1000_flush_tx_fifo(ioaddr);
- x->tx_underflow++;
- }
-
- if (unlikely(p->des01.etx.ip_header_error)) {
- DBG(KERN_ERR "\tTX IP header csum error\n");
- x->tx_ip_header_error++;
- }
-
- if (unlikely(p->des01.etx.payload_error)) {
- DBG(KERN_ERR "\tAddr/Payload csum error\n");
- x->tx_payload_error++;
- dwmac1000_flush_tx_fifo(ioaddr);
- }
-
- ret = -1;
- }
-
- if (unlikely(p->des01.etx.deferred)) {
- DBG(KERN_INFO "GMAC TX status: tx deferred\n");
- x->tx_deferred++;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.etx.vlan_frame) {
- DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
- x->tx_vlan++;
- }
-#endif
-
- return ret;
-}
-
-static int dwmac1000_get_tx_len(struct dma_desc *p)
-{
- return p->des01.etx.buffer1_size;
-}
-
-static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
-{
- int ret = good_frame;
- u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
-
- /* bits 5 7 0 | Frame status
- * ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
- * 1 0 0 | IPv4/6 No CSUM errorS.
- * 1 0 1 | IPv4/6 CSUM PAYLOAD error
- * 1 1 0 | IPv4/6 CSUM IP HR error
- * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
- * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
- * 0 1 1 | COE bypassed.. no IPv4/6 frame
- * 0 1 0 | Reserved.
- */
- if (status == 0x0) {
- DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
- ret = good_frame;
- } else if (status == 0x4) {
- DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
- ret = good_frame;
- } else if (status == 0x5) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x6) {
- DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
- ret = csum_none;
- } else if (status == 0x7) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 Header and Payload Error.\n");
- ret = csum_none;
- } else if (status == 0x1) {
- DBG(KERN_ERR
- "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
- ret = discard_frame;
- } else if (status == 0x3) {
- DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
- ret = discard_frame;
- }
- return ret;
-}
-
-static int dwmac1000_get_rx_frame_status(void *data,
- struct stmmac_extra_stats *x, struct dma_desc *p)
-{
- int ret = good_frame;
- struct net_device_stats *stats = (struct net_device_stats *)data;
-
- if (unlikely(p->des01.erx.error_summary)) {
- DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
- if (unlikely(p->des01.erx.descriptor_error)) {
- DBG(KERN_ERR "\tdescriptor error\n");
- x->rx_desc++;
- stats->rx_length_errors++;
- }
- if (unlikely(p->des01.erx.overflow_error)) {
- DBG(KERN_ERR "\toverflow error\n");
- x->rx_gmac_overflow++;
- }
-
- if (unlikely(p->des01.erx.ipc_csum_error))
- DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
-
- if (unlikely(p->des01.erx.late_collision)) {
- DBG(KERN_ERR "\tlate_collision error\n");
- stats->collisions++;
- stats->collisions++;
- }
- if (unlikely(p->des01.erx.receive_watchdog)) {
- DBG(KERN_ERR "\treceive_watchdog error\n");
- x->rx_watchdog++;
- }
- if (unlikely(p->des01.erx.error_gmii)) {
- DBG(KERN_ERR "\tReceive Error\n");
- x->rx_mii++;
- }
- if (unlikely(p->des01.erx.crc_error)) {
- DBG(KERN_ERR "\tCRC error\n");
- x->rx_crc++;
- stats->rx_crc_errors++;
- }
- ret = discard_frame;
- }
-
- /* After a payload csum error, the ES bit is set.
- * It doesn't match with the information reported into the databook.
- * At any rate, we need to understand if the CSUM hw computation is ok
- * and report this info to the upper layers. */
- ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
-
- if (unlikely(p->des01.erx.dribbling)) {
- DBG(KERN_ERR "GMAC RX: dribbling error\n");
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.sa_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
- x->sa_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.da_filter_fail)) {
- DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
- x->da_rx_filter_fail++;
- ret = discard_frame;
- }
- if (unlikely(p->des01.erx.length_error)) {
- DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_length++;
- ret = discard_frame;
- }
-#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.erx.vlan_tag) {
- DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
- x->rx_vlan++;
- }
-#endif
- return ret;
-}
-
-static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
-{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- /* To support jumbo frames */
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (i == ring_size - 1)
- p->des01.erx.end_ring = 1;
- if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
- p++;
- }
- return;
-}
-
-static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
-{
- int i;
-
- for (i = 0; i < ring_size; i++) {
- p->des01.etx.own = 0;
- if (i == ring_size - 1)
- p->des01.etx.end_ring = 1;
- p++;
- }
-
- return;
-}
-
-static int dwmac1000_get_tx_owner(struct dma_desc *p)
-{
- return p->des01.etx.own;
-}
-
-static int dwmac1000_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.erx.own;
-}
-
-static void dwmac1000_set_tx_owner(struct dma_desc *p)
-{
- p->des01.etx.own = 1;
-}
-
-static void dwmac1000_set_rx_owner(struct dma_desc *p)
-{
- p->des01.erx.own = 1;
-}
-
-static int dwmac1000_get_tx_ls(struct dma_desc *p)
-{
- return p->des01.etx.last_segment;
-}
-
-static void dwmac1000_release_tx_desc(struct dma_desc *p)
-{
- int ter = p->des01.etx.end_ring;
-
- memset(p, 0, sizeof(struct dma_desc));
- p->des01.etx.end_ring = ter;
-
- return;
-}
-
-static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
-{
- p->des01.etx.first_segment = is_fs;
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
- } else {
- p->des01.etx.buffer1_size = len;
- }
- if (likely(csum_flag))
- p->des01.etx.checksum_insertion = cic_full;
-}
-
-static void dwmac1000_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.etx.interrupt = 0;
-}
-
-static void dwmac1000_close_tx_desc(struct dma_desc *p)
-{
- p->des01.etx.last_segment = 1;
- p->des01.etx.interrupt = 1;
-}
-
-static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
-{
- return p->des01.erx.frame_length;
}
struct stmmac_dma_ops dwmac1000_dma_ops = {
@@ -454,21 +144,3 @@ struct stmmac_dma_ops dwmac1000_dma_ops = {
.stop_rx = dwmac_dma_stop_rx,
.dma_interrupt = dwmac_dma_interrupt,
};
-
-struct stmmac_desc_ops dwmac1000_desc_ops = {
- .tx_status = dwmac1000_get_tx_frame_status,
- .rx_status = dwmac1000_get_rx_frame_status,
- .get_tx_len = dwmac1000_get_tx_len,
- .init_rx_desc = dwmac1000_init_rx_desc,
- .init_tx_desc = dwmac1000_init_tx_desc,
- .get_tx_owner = dwmac1000_get_tx_owner,
- .get_rx_owner = dwmac1000_get_rx_owner,
- .release_tx_desc = dwmac1000_release_tx_desc,
- .prepare_tx_desc = dwmac1000_prepare_tx_desc,
- .clear_tx_ic = dwmac1000_clear_tx_ic,
- .close_tx_desc = dwmac1000_close_tx_desc,
- .get_tx_ls = dwmac1000_get_tx_ls,
- .set_tx_owner = dwmac1000_set_tx_owner,
- .set_rx_owner = dwmac1000_set_rx_owner,
- .get_rx_frame_len = dwmac1000_get_rx_frame_len,
-};
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
new file mode 100644
index 000000000000..6f270a0e151a
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -0,0 +1,196 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac100.h"
+
+static void dwmac100_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+}
+
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+ pr_info("\n\tMAC management counter registers\n");
+ pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+}
+
+static void dwmac100_irq_status(unsigned long ioaddr)
+{
+ return;
+}
+
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr =
+ ether_crc(ETH_ALEN, ha->addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+ "HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+}
+
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+}
+
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ return;
+}
+
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->dma = &dwmac100_dma_ops;
+
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
new file mode 100644
index 000000000000..2fece7b72727
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -0,0 +1,134 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "dwmac100.h"
+#include "dwmac_dma.h"
+
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+
+ CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+}
+
+/* DMA controller has two counters to track the number of
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+}
+
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index de848d9f6060..7b815a1b7b8c 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -95,6 +95,7 @@
#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
extern void dwmac_enable_dma_irq(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d4adb1eaa447..a85415216ef4 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -52,7 +52,6 @@ void dwmac_dma_start_tx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
- return;
}
void dwmac_dma_stop_tx(unsigned long ioaddr)
@@ -60,7 +59,6 @@ void dwmac_dma_stop_tx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_ST;
writel(value, ioaddr + DMA_CONTROL);
- return;
}
void dwmac_dma_start_rx(unsigned long ioaddr)
@@ -68,8 +66,6 @@ void dwmac_dma_start_rx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value |= DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
-
- return;
}
void dwmac_dma_stop_rx(unsigned long ioaddr)
@@ -77,8 +73,6 @@ void dwmac_dma_stop_rx(unsigned long ioaddr)
u32 value = readl(ioaddr + DMA_CONTROL);
value &= ~DMA_CONTROL_SR;
writel(value, ioaddr + DMA_CONTROL);
-
- return;
}
#ifdef DWMAC_DMA_DEBUG
@@ -111,7 +105,6 @@ static void show_tx_process_state(unsigned int status)
default:
break;
}
- return;
}
static void show_rx_process_state(unsigned int status)
@@ -149,7 +142,6 @@ static void show_rx_process_state(unsigned int status)
default:
break;
}
- return;
}
#endif
@@ -227,6 +219,13 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
return ret;
}
+void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
unsigned int high, unsigned int low)
@@ -237,8 +236,6 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
writel(data, ioaddr + high);
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + low);
-
- return;
}
void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -257,7 +254,5 @@ void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
addr[3] = (lo_addr >> 24) & 0xff;
addr[4] = hi_addr & 0xff;
addr[5] = (hi_addr >> 8) & 0xff;
-
- return;
}
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
new file mode 100644
index 000000000000..3c18ebece043
--- /dev/null
+++ b/drivers/net/stmmac/enh_desc.c
@@ -0,0 +1,337 @@
+/*******************************************************************************
+ This contains the functions to handle the enhanced descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ CHIP_DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ CHIP_DBG(KERN_ERR "\tunderflow error\n");
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int enh_desc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = good_frame;
+ } else if (status == 0x4) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
+ p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ CHIP_DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ CHIP_DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ CHIP_DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ CHIP_DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_length++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+ return ret;
+}
+
+static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ /* To support jumbo frames */
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.erx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ p->des01.etx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.etx.end_ring = 1;
+ p++;
+ }
+}
+
+static int enh_desc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int enh_desc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void enh_desc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void enh_desc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int enh_desc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void enh_desc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, sizeof(struct dma_desc));
+ p->des01.etx.end_ring = ter;
+}
+
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.etx.first_segment = is_fs;
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else {
+ p->des01.etx.buffer1_size = len;
+ }
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void enh_desc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void enh_desc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.erx.frame_length;
+}
+
+struct stmmac_desc_ops enh_desc_ops = {
+ .tx_status = enh_desc_get_tx_status,
+ .rx_status = enh_desc_get_rx_status,
+ .get_tx_len = enh_desc_get_tx_len,
+ .init_rx_desc = enh_desc_init_rx_desc,
+ .init_tx_desc = enh_desc_init_tx_desc,
+ .get_tx_owner = enh_desc_get_tx_owner,
+ .get_rx_owner = enh_desc_get_rx_owner,
+ .release_tx_desc = enh_desc_release_tx_desc,
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
+ .clear_tx_ic = enh_desc_clear_tx_ic,
+ .close_tx_desc = enh_desc_close_tx_desc,
+ .get_tx_ls = enh_desc_get_tx_ls,
+ .set_tx_owner = enh_desc_set_tx_owner,
+ .set_rx_owner = enh_desc_set_rx_owner,
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
new file mode 100644
index 000000000000..31ad53643792
--- /dev/null
+++ b/drivers/net/stmmac/norm_desc.c
@@ -0,0 +1,236 @@
+/*******************************************************************************
+ This contains the functions to handle the normal descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "common.h"
+
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
+ x->tx_heartbeat++;
+ stats->tx_heartbeat_errors++;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int ndesc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none becasue the device
+ * is not able to compute the csum in HW. */
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warning("ndesc Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.partial_frame_error))
+ x->rx_partial++;
+ if (unlikely(p->des01.rx.run_frame))
+ x->rx_runt++;
+ if (unlikely(p->des01.rx.frame_too_long))
+ x->rx_toolong++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ ret = discard_frame;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+ if (p->des01.rx.multicast_frame) {
+ x->rx_multicast++;
+ stats->multicast++;
+ }
+ return ret;
+}
+
+static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.rx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+ p++;
+ }
+}
+
+static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.tx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.tx.end_ring = 1;
+ p++;
+ }
+}
+
+static int ndesc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int ndesc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void ndesc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void ndesc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int ndesc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void ndesc_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.tx.end_ring;
+
+ /* clean field used within the xmit */
+ p->des01.tx.first_segment = 0;
+ p->des01.tx.last_segment = 0;
+ p->des01.tx.buffer1_size = 0;
+
+ /* clean status reported */
+ p->des01.tx.error_summary = 0;
+ p->des01.tx.underflow_error = 0;
+ p->des01.tx.no_carrier = 0;
+ p->des01.tx.loss_carrier = 0;
+ p->des01.tx.excessive_deferral = 0;
+ p->des01.tx.excessive_collisions = 0;
+ p->des01.tx.late_collision = 0;
+ p->des01.tx.heartbeat_fail = 0;
+ p->des01.tx.deferred = 0;
+
+ /* set termination field */
+ p->des01.tx.end_ring = ter;
+}
+
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.tx.first_segment = is_fs;
+ p->des01.tx.buffer1_size = len;
+}
+
+static void ndesc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void ndesc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int ndesc_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.rx.frame_length;
+}
+
+struct stmmac_desc_ops ndesc_ops = {
+ .tx_status = ndesc_get_tx_status,
+ .rx_status = ndesc_get_rx_status,
+ .get_tx_len = ndesc_get_tx_len,
+ .init_rx_desc = ndesc_init_rx_desc,
+ .init_tx_desc = ndesc_init_tx_desc,
+ .get_tx_owner = ndesc_get_tx_owner,
+ .get_rx_owner = ndesc_get_rx_owner,
+ .release_tx_desc = ndesc_release_tx_desc,
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
+ .clear_tx_ic = ndesc_clear_tx_ic,
+ .close_tx_desc = ndesc_close_tx_desc,
+ .get_tx_ls = ndesc_get_tx_ls,
+ .set_tx_owner = ndesc_set_tx_owner,
+ .set_rx_owner = ndesc_set_rx_owner,
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ba35e6943cf4..ebebc644b1b8 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,14 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Jan_2010"
+#define DRV_MODULE_VERSION "Apr_2010"
#include <linux/stmmac.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define STMMAC_VLAN_TAG_USED
-#include <linux/if_vlan.h>
-#endif
-
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -93,6 +88,7 @@ struct stmmac_priv {
#ifdef STMMAC_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
+ int enh_desc;
};
#ifdef CONFIG_STM_DRIVERS
@@ -120,3 +116,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+extern struct stmmac_desc_ops enh_desc_ops;
+extern struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index c021eaa3ca69..f080509923f0 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -102,7 +102,6 @@ void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strcpy(info->version, DRV_MODULE_VERSION);
info->fw_version[0] = '\0';
info->n_stats = STMMAC_STATS_LEN;
- return;
}
int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -194,8 +193,6 @@ void stmmac_ethtool_gregs(struct net_device *dev,
reg_space[i + 55] =
readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
}
-
- return;
}
int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
@@ -233,7 +230,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
spin_unlock(&priv->lock);
- return;
}
static int
@@ -292,8 +288,6 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
-
- return;
}
static int stmmac_get_sset_count(struct net_device *netdev, int sset)
@@ -323,7 +317,6 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
WARN_ON(1);
break;
}
- return;
}
/* Currently only support WOL through Magic packet. */
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 4111a85ec80e..a31d580f306d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -169,8 +169,6 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
-
- return;
}
#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
@@ -184,7 +182,6 @@ static void print_pkt(unsigned char *buf, int len)
pr_info(" %02x", buf[j]);
}
pr_info("\n");
- return;
}
#endif
@@ -514,7 +511,6 @@ static void init_dma_desc_rings(struct net_device *dev)
pr_info("TX descriptor ring:\n");
display_ring(priv->dma_tx, txsize);
}
- return;
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -529,7 +525,6 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv)
}
priv->rx_skbuff[i] = NULL;
}
- return;
}
static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -547,7 +542,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
priv->tx_skbuff[i] = NULL;
}
}
- return;
}
static void free_dma_desc_resources(struct stmmac_priv *priv)
@@ -567,8 +561,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
kfree(priv->rx_skbuff_dma);
kfree(priv->rx_skbuff);
kfree(priv->tx_skbuff);
-
- return;
}
/**
@@ -598,8 +590,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
tx_coe = priv->tx_coe;
-
- return;
}
/**
@@ -675,7 +665,6 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
- return;
}
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
@@ -731,8 +720,6 @@ void stmmac_schedule(struct net_device *dev)
priv->xstats.sched_timer_n++;
_stmmac_schedule(priv);
-
- return;
}
static void stmmac_no_timer_started(unsigned int x)
@@ -763,8 +750,6 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
-
- return;
}
@@ -788,8 +773,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
-
- return;
}
/**
@@ -837,7 +820,7 @@ static int stmmac_open(struct net_device *dev)
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
- pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
+ pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
return -ENOMEM;
}
priv->tm->freq = tmrate;
@@ -1197,7 +1180,6 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
priv->hw->desc->set_rx_owner(p + entry);
}
- return;
}
static int stmmac_rx(struct stmmac_priv *priv, int limit)
@@ -1280,7 +1262,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
- priv->dev->last_rx = jiffies;
}
entry = next_entry;
p = p_next; /* use prefetched values */
@@ -1332,7 +1313,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
/* Clear Tx resources and restart transmitting again */
stmmac_tx_err(priv);
- return;
}
/* Configuration changes (passed on by ifconfig) */
@@ -1374,7 +1354,6 @@ static void stmmac_multicast_list(struct net_device *dev)
spin_lock(&priv->lock);
priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
- return;
}
/**
@@ -1490,8 +1469,6 @@ static void stmmac_vlan_rx_register(struct net_device *dev,
spin_lock(&priv->lock);
priv->vlgrp = grp;
spin_unlock(&priv->lock);
-
- return;
}
#endif
@@ -1587,6 +1564,12 @@ static int stmmac_mac_device_setup(struct net_device *dev)
else
device = dwmac100_setup(ioaddr);
+ if (priv->enh_desc) {
+ device->desc = &enh_desc_ops;
+ pr_info("\tEnhanced descriptor structure\n");
+ } else
+ device->desc = &ndesc_ops;
+
if (!device)
return -ENOMEM;
@@ -1727,6 +1710,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
+ priv->enh_desc = plat_dat->enh_desc;
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
index 679f61ffb1f8..2a0e1abde7e7 100644
--- a/drivers/net/stmmac/stmmac_timer.c
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -31,8 +31,6 @@ static void stmmac_timer_handler(void *data)
struct net_device *dev = (struct net_device *)data;
stmmac_schedule(dev);
-
- return;
}
#define STMMAC_TIMER_MSG(timer, freq) \
@@ -47,13 +45,11 @@ static void stmmac_rtc_start(unsigned int new_freq)
{
rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
- return;
}
static void stmmac_rtc_stop(void)
{
rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
- return;
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
@@ -102,13 +98,11 @@ static void stmmac_tmu_start(unsigned int new_freq)
{
clk_set_rate(timer_clock, new_freq);
clk_enable(timer_clock);
- return;
}
static void stmmac_tmu_stop(void)
{
clk_disable(timer_clock);
- return;
}
int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index 87a6b8eabc67..d85f0a84bc7b 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -280,7 +280,6 @@ stnic_init (struct net_device *dev)
{
stnic_reset (dev);
NS8390_init (dev, 0);
- return;
}
static void __exit stnic_cleanup(void)
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 8b28c89a9a77..151312342243 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -412,7 +412,7 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,9 +536,9 @@ static int init586(struct net_device *dev)
mc_cmd->mc_cnt = swab16(num_addrs * 6);
i = 0;
- netdev_for_each_mc_addr(dmi, dev)
+ netdev_for_each_mc_addr(ha, dev)
memcpy((char *) mc_cmd->mc_list[i++],
- dmi->dmi_addr, ETH_ALEN);
+ ha->addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
@@ -985,7 +985,7 @@ static void sun3_82586_timeout(struct net_device *dev)
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
@@ -998,7 +998,7 @@ static void sun3_82586_timeout(struct net_device *dev)
sun3_82586_close(dev);
sun3_82586_open(dev);
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
@@ -1062,7 +1062,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
}
sun3_attn586();
- dev->trans_start = jiffies;
if(!i)
dev_kfree_skb(skb);
WAIT_4_SCB_CMD();
@@ -1082,7 +1081,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
- dev->trans_start = jiffies;
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
@@ -1097,7 +1095,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
- dev->trans_start = jiffies;
p->xmit_count = next_nop;
{
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 1694ca5bfb41..358c22f9acbe 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -523,8 +523,8 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
/* Transmitter timeout, serious problems. */
if (netif_queue_stopped(dev)) {
- int tickssofar = jiffies - dev->trans_start;
- if (tickssofar < 20)
+ int tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/5)
return NETDEV_TX_BUSY;
DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
@@ -559,7 +559,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
netif_start_queue(dev);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -637,8 +636,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
AREG = CSR0;
DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
dev->name, DREG ));
- dev->trans_start = jiffies;
- dev_kfree_skb( skb );
+ dev_kfree_skb(skb);
lp->lock = 0;
if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index ed7865a0b5b2..367e96f317d4 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -362,7 +362,7 @@ static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
default:
printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
return;
- };
+ }
idle_transceiver(tregs);
write_tcvr_bit(bp, tregs, 0);
@@ -401,7 +401,7 @@ static unsigned short bigmac_tcvr_read(struct bigmac *bp,
default:
printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
return 0xffff;
- };
+ }
idle_transceiver(tregs);
write_tcvr_bit(bp, tregs, 0);
@@ -982,8 +982,6 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -999,7 +997,7 @@ static void bigmac_set_multicast(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
int i;
u32 tmp, crc;
@@ -1028,8 +1026,8 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1133,8 +1131,8 @@ static int __devinit bigmac_ether_init(struct of_device *op,
goto fail_and_cleanup;
/* Get supported SBUS burst sizes. */
- bsizes = of_getintprop_default(qec_op->node, "burst-sizes", 0xff);
- bsizes_more = of_getintprop_default(qec_op->node, "burst-sizes", 0xff);
+ bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
+ bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
bsizes &= 0xff;
if (bsizes_more != 0xff)
@@ -1186,7 +1184,7 @@ static int __devinit bigmac_ether_init(struct of_device *op,
}
/* Get the board revision of this BigMAC. */
- bp->board_rev = of_getintprop_default(bp->bigmac_op->node,
+ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
"board-version", 1);
/* Init auto-negotiation timer state. */
@@ -1292,8 +1290,11 @@ static const struct of_device_id bigmac_sbus_match[] = {
MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
static struct of_platform_driver bigmac_sbus_driver = {
- .name = "sunbmac",
- .match_table = bigmac_sbus_match,
+ .driver = {
+ .name = "sunbmac",
+ .owner = THIS_MODULE,
+ .of_match_table = bigmac_sbus_match,
+ },
.probe = bigmac_sbus_probe,
.remove = __devexit_p(bigmac_sbus_remove),
};
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 8249a394a4e1..2678588ea4b2 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -788,7 +788,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay();
}
- return;
}
static int mdio_wait_link(struct net_device *dev, int wait)
@@ -972,7 +971,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
@@ -1022,7 +1021,6 @@ static void init_ring(struct net_device *dev)
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = 0;
}
- return;
}
static void tx_poll (unsigned long data)
@@ -1049,7 +1047,6 @@ static void tx_poll (unsigned long data)
if (ioread32 (np->base + TxListPtr) == 0)
iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
np->base + TxListPtr);
- return;
}
static netdev_tx_t
@@ -1084,7 +1081,6 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
} else {
netif_stop_queue (dev);
}
- dev->trans_start = jiffies;
if (netif_msg_tx_queued(np)) {
printk (KERN_DEBUG
"%s: Transmit frame #%d queued in slot %d.\n",
@@ -1379,7 +1375,6 @@ not_done:
if (np->budget <= 0)
np->budget = RX_BUDGET;
tasklet_schedule(&np->rx_tasklet);
- return;
}
static void refill_rx (struct net_device *dev)
@@ -1410,7 +1405,6 @@ static void refill_rx (struct net_device *dev)
np->rx_ring[entry].status = 0;
cnt++;
}
- return;
}
static void netdev_error(struct net_device *dev, int intr_status)
{
@@ -1522,13 +1516,13 @@ static void set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
if (crc & 0x80000000) index |= 1 << bit;
mc_filter[index/16] |= (1 << (index % 16));
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index e6880f1c4e8c..434f9d735333 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1136,7 +1136,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irqrestore(&gp->tx_lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
return NETDEV_TX_OK;
}
@@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp)
} else {
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
int i;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, gp->dev) {
- char *addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, gp->dev) {
+ char *addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -2923,7 +2923,6 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(dev_addr + 3, 3);
- return;
}
#endif /* not Sparc and not PPC */
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b17dbb11bd67..3d9650b8d38f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -855,7 +855,7 @@ static void happy_meal_timer(unsigned long data)
hp->timer_ticks = 0;
hp->timer_state = asleep; /* foo on you */
break;
- };
+ }
if (restart_timer) {
hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
@@ -1488,7 +1488,7 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("external, disable MII, "));
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
- };
+ }
if (happy_meal_tcvr_reset(hp, tregs))
return -EAGAIN;
@@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, hp->dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, hp->dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -1734,7 +1734,7 @@ static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
case external:
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
- };
+ }
if (happy_meal_tcvr_reset(hp, tregs))
return;
@@ -2341,8 +2341,6 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
spin_unlock_irq(&hp->happy_lock);
- dev->trans_start = jiffies;
-
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
}
@@ -2362,7 +2360,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
@@ -2380,8 +2378,8 @@ static void happy_meal_set_multicast(struct net_device *dev)
u16 hash_table[4];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -2483,7 +2481,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
else {
const struct linux_prom_registers *regs;
struct of_device *op = hp->happy_dev;
- regs = of_get_property(op->node, "regs", NULL);
+ regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
sprintf(info->bus_info, "SBUS:%d",
regs->which_io);
@@ -2643,14 +2641,14 @@ static const struct net_device_ops hme_netdev_ops = {
#ifdef CONFIG_SBUS
static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
{
- struct device_node *dp = op->node, *sbus_dp;
+ struct device_node *dp = op->dev.of_node, *sbus_dp;
struct quattro *qp = NULL;
struct happy_meal *hp;
struct net_device *dev;
int i, qfe_slot = -1;
int err = -ENODEV;
- sbus_dp = to_of_device(op->dev.parent)->node;
+ sbus_dp = to_of_device(op->dev.parent)->dev.of_node;
/* We can match PCI devices too, do not accept those here. */
if (strcmp(sbus_dp->name, "sbus"))
@@ -2945,7 +2943,6 @@ static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(&dev_addr[3], 3);
- return;
}
#endif /* !(CONFIG_SPARC) */
@@ -3004,7 +3001,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
dev->base_addr = (long) pdev;
hp = netdev_priv(dev);
- memset(hp, 0, sizeof(*hp));
hp->happy_dev = pdev;
hp->dma_dev = &pdev->dev;
@@ -3241,7 +3237,7 @@ static void happy_meal_pci_exit(void)
#ifdef CONFIG_SBUS
static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
int is_qfe = (match->data != NULL);
@@ -3295,8 +3291,11 @@ static const struct of_device_id hme_sbus_match[] = {
MODULE_DEVICE_TABLE(of, hme_sbus_match);
static struct of_platform_driver hme_sbus_driver = {
- .name = "hme",
- .match_table = hme_sbus_match,
+ .driver = {
+ .name = "hme",
+ .owner = THIS_MODULE,
+ .of_match_table = hme_sbus_match,
+ },
.probe = hme_sbus_probe,
.remove = __devexit_p(hme_sbus_remove),
};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0c21653ff9f9..7d9c33dd9d1a 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1003,7 +1003,7 @@ static int lance_reset(struct net_device *dev)
}
lp->init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
@@ -1054,7 +1054,7 @@ static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int l
}
src = (char *) p16;
break;
- };
+ }
if (len >= 2) {
u16 val = src[0] << 8 | src[1];
sbus_writew(val, piobuf);
@@ -1160,7 +1160,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&lp->lock);
- dev->trans_start = jiffies;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1170,7 +1169,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
u32 val;
@@ -1195,8 +1194,8 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
@@ -1324,7 +1323,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
struct of_device *ledma,
struct of_device *lebuffer)
{
- struct device_node *dp = op->node;
+ struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
struct lance_private *lp;
struct net_device *dev;
@@ -1411,7 +1410,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
lp->burst_sizes = 0;
if (lp->ledma) {
- struct device_node *ledma_dp = ledma->node;
+ struct device_node *ledma_dp = ledma->dev.of_node;
struct device_node *sbus_dp;
unsigned int sbmask;
const char *prop;
@@ -1507,7 +1506,7 @@ fail:
static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match)
{
struct of_device *parent = to_of_device(op->dev.parent);
- struct device_node *parent_dp = parent->node;
+ struct device_node *parent_dp = parent->dev.of_node;
int err;
if (!strcmp(parent_dp->name, "ledma")) {
@@ -1546,8 +1545,11 @@ static const struct of_device_id sunlance_sbus_match[] = {
MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
static struct of_platform_driver sunlance_sbus_driver = {
- .name = "sunlance",
- .match_table = sunlance_sbus_match,
+ .driver = {
+ .name = "sunlance",
+ .owner = THIS_MODULE,
+ .of_match_table = sunlance_sbus_match,
+ },
.probe = sunlance_sbus_probe,
.remove = __devexit_p(sunlance_sbus_remove),
};
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index be637dce944c..72b579c8d812 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -602,7 +602,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
qep->tx_new = NEXT_TX(entry);
/* Get it going. */
- dev->trans_start = jiffies;
sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
dev->stats.tx_packets++;
@@ -627,7 +626,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
char *addrs;
int i;
@@ -651,8 +650,8 @@ static void qe_set_multicast(struct net_device *dev)
u8 *hbytes = (unsigned char *) &hash_table[0];
memset(hash_table, 0, sizeof(hash_table));
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if (!(*addrs & 1))
continue;
@@ -696,7 +695,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strcpy(info->version, "3.0");
op = qep->op;
- regs = of_get_property(op->node, "reg", NULL);
+ regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
sprintf(info->bus_info, "SBUS:%d", regs->which_io);
@@ -800,7 +799,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
if (qec_global_reset(qecp->gregs))
goto fail;
- qecp->qec_bursts = qec_get_burst(op->node);
+ qecp->qec_bursts = qec_get_burst(op->dev.of_node);
qec_init_once(qecp, op);
@@ -858,7 +857,7 @@ static int __devinit qec_ether_init(struct of_device *op)
res = -ENODEV;
- i = of_getintprop_default(op->node, "channel#", -1);
+ i = of_getintprop_default(op->dev.of_node, "channel#", -1);
if (i == -1)
goto fail;
qe->channel = i;
@@ -978,8 +977,11 @@ static const struct of_device_id qec_sbus_match[] = {
MODULE_DEVICE_TABLE(of, qec_sbus_match);
static struct of_platform_driver qec_sbus_driver = {
- .name = "qec",
- .match_table = qec_sbus_match,
+ .driver = {
+ .name = "qec",
+ .owner = THIS_MODULE,
+ .of_match_table = qec_sbus_match,
+ },
.probe = qec_sbus_probe,
.remove = __devexit_p(qec_sbus_remove),
};
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 6b1b7cea7f6b..d281a7b34701 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -717,7 +717,6 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
out_dropped_unlock:
@@ -763,12 +762,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
static void __update_mc_list(struct vnet *vp, struct net_device *dev)
{
- struct dev_addr_list *p;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(p, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
struct vnet_mcast_entry *m;
- m = __vnet_mc_find(vp, p->dmi_addr);
+ m = __vnet_mc_find(vp, ha->addr);
if (m) {
m->hit = 1;
continue;
@@ -778,7 +777,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
continue;
- memcpy(m->addr, p->dmi_addr, ETH_ALEN);
+ memcpy(m->addr, ha->addr, ETH_ALEN);
m->hit = 1;
m->next = vp->mcast_list;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 49bd84c0d583..be08b75dbc15 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1357,8 +1357,6 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
}
lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
- dev->trans_start = jiffies;
-
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
* layer to send no more.
@@ -1954,16 +1952,16 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *cur_addr;
+ struct netdev_hw_addr *ha;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
i = 0;
- netdev_for_each_mc_addr(cur_addr, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* entry 0,1 is reserved. */
- tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
+ tc35815_set_cam_entry(dev, i + 2, ha->addr);
ena_bits |= CAM_Ena_Bit(i + 2);
i++;
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index f5493092521a..20ab16192325 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
} else if (!netdev_mc_empty(ndev)) {
u8 hash;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 reg, val;
/* set IMF to deny all multicast frames */
@@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev)
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
/* accept the rest of addresses throu IMF */
- netdev_for_each_mc_addr(mclist, ndev) {
+ netdev_for_each_mc_addr(ha, ndev) {
hash = 0;
for (i = 0; i < ETH_ALEN; i++)
- hash ^= mclist->dmi_addr[i];
+ hash ^= ha->addr[i];
reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
val = READ_REG(priv, reg);
val |= (1 << (hash % 32));
@@ -1303,7 +1303,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
priv->net_stats.rx_bytes += len;
skb_put(skb, len);
- skb->dev = priv->ndev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, priv->ndev);
@@ -1509,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- db->wptr->len = skb->len - skb->data_len;
+ db->wptr->len = skb_headlen(skb);
db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
db->wptr->len, PCI_DMA_TODEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
@@ -2034,7 +2033,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************** priv ****************/
priv = nic->priv[port] = netdev_priv(ndev);
- memset(priv, 0, sizeof(struct bdx_priv));
priv->pBdxRegs = nic->regs + port * 0x8000;
priv->port = port;
priv->pdev = pdev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ecc41cffb470..573054ae7b58 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define DRV_MODULE_VERSION "3.108"
-#define DRV_MODULE_RELDATE "February 17, 2010"
+#define DRV_MODULE_VERSION "3.110"
+#define DRV_MODULE_RELDATE "April 9, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -101,7 +101,7 @@
#define TG3_DEF_RX_RING_PENDING 200
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
+#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -126,6 +126,9 @@
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
+#define TG3_RX_DMA_ALIGN 16
+#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
+
#define TG3_DMA_BYTE_ENAB 64
#define TG3_RX_STD_DMA_SZ 1536
@@ -142,6 +145,26 @@
#define TG3_RX_JMB_BUFF_RING_SIZE \
(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+#define TG3_RSS_MIN_NUM_MSIX_VECS 2
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode. The driver
+ * works around this bug by double copying the packet. This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient. For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path. Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD 256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
+#else
+ #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
+#endif
+
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
@@ -152,6 +175,8 @@
#define TG3_NUM_TEST 6
+#define TG3_FW_UPDATE_TIMEOUT_SEC 5
+
#define FIRMWARE_TG3 "tigon/tg3.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -167,8 +192,6 @@ MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
-#define TG3_RSS_MIN_NUM_MSIX_VECS 2
-
static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
@@ -360,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off));
+ return readl(tp->regs + off);
}
static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
@@ -370,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
{
- return (readl(tp->aperegs + off));
+ return readl(tp->aperegs + off);
}
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
@@ -488,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
{
- return (readl(tp->regs + off + GRCMBOX_BASE));
+ return readl(tp->regs + off + GRCMBOX_BASE);
}
static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
@@ -496,16 +519,16 @@ static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
writel(val, tp->regs + off + GRCMBOX_BASE);
}
-#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
-#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
-#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
-#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
+#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
-#define tw32(reg,val) tp->write32(tp, reg, val)
-#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
-#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
-#define tr32(reg) tp->read32(tp, reg)
+#define tw32(reg, val) tp->write32(tp, reg, val)
+#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg) tp->read32(tp, reg)
static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
@@ -579,11 +602,11 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return 0;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return -EINVAL;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return -EINVAL;
}
off = 4 * locknum;
@@ -617,11 +640,11 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
switch (locknum) {
- case TG3_APE_LOCK_GRC:
- case TG3_APE_LOCK_MEM:
- break;
- default:
- return;
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return;
}
off = 4 * locknum;
@@ -651,6 +674,7 @@ static void tg3_enable_ints(struct tg3 *tp)
tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
+
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -1098,7 +1122,7 @@ static int tg3_mdio_init(struct tg3 *tp)
i = mdiobus_register(tp->mdio_bus);
if (i) {
- netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
+ dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
@@ -1106,7 +1130,7 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
- netdev_warn(tp->dev, "No PHY devices\n");
+ dev_warn(&tp->pdev->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
@@ -1437,7 +1461,7 @@ static void tg3_adjust_link(struct net_device *dev)
phydev->speed != tp->link_config.active_speed ||
phydev->duplex != tp->link_config.active_duplex ||
oldflowctrl != tp->link_config.active_flowctrl)
- linkmesg = 1;
+ linkmesg = 1;
tp->link_config.active_speed = phydev->speed;
tp->link_config.active_duplex = phydev->duplex;
@@ -1464,7 +1488,7 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
phydev->dev_flags, phydev->interface);
if (IS_ERR(phydev)) {
- netdev_err(tp->dev, "Could not attach to PHY\n");
+ dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -1855,8 +1879,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
/* Set Extended packet length bit for jumbo frames */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
- }
- else {
+ } else {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
}
@@ -1974,8 +1997,7 @@ out:
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
@@ -2007,8 +2029,8 @@ out:
u32 phy_reg;
if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3425,7 +3447,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ap->rxconfig = rx_cfg_reg;
ret = ANEG_OK;
- switch(ap->state) {
+ switch (ap->state) {
case ANEG_STATE_UNKNOWN:
if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
ap->state = ANEG_STATE_AN_ENABLE;
@@ -3463,11 +3485,10 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
/* fallthru */
case ANEG_STATE_RESTART:
delta = ap->cur_time - ap->link_time;
- if (delta > ANEG_STATE_SETTLE_TIME) {
+ if (delta > ANEG_STATE_SETTLE_TIME)
ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
- } else {
+ else
ret = ANEG_TIMER_ENAB;
- }
break;
case ANEG_STATE_DISABLE_LINK_OK:
@@ -3491,9 +3512,8 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
break;
case ANEG_STATE_ABILITY_DETECT:
- if (ap->ability_match != 0 && ap->rxconfig != 0) {
+ if (ap->ability_match != 0 && ap->rxconfig != 0)
ap->state = ANEG_STATE_ACK_DETECT_INIT;
- }
break;
case ANEG_STATE_ACK_DETECT_INIT:
@@ -4171,9 +4191,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
- }
- else
+ } else {
current_link_up = 0;
+ }
}
}
@@ -4211,6 +4231,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->serdes_counter--;
return;
}
+
if (!netif_carrier_ok(tp->dev) &&
(tp->link_config.autoneg == AUTONEG_ENABLE)) {
u32 bmcr;
@@ -4240,10 +4261,9 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
}
}
- }
- else if (netif_carrier_ok(tp->dev) &&
- (tp->link_config.autoneg == AUTONEG_ENABLE) &&
- (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
+ } else if (netif_carrier_ok(tp->dev) &&
+ (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+ (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
u32 phy2;
/* Select expansion interrupt status register */
@@ -4266,13 +4286,12 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
{
int err;
- if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
err = tg3_setup_fiber_phy(tp, force_reset);
- } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
+ else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
err = tg3_setup_fiber_mii_phy(tp, force_reset);
- } else {
+ else
err = tg3_setup_copper_phy(tp, force_reset);
- }
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
u32 val, scale;
@@ -4335,8 +4354,11 @@ static void tg3_tx_recover(struct tg3 *tp)
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
- netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
- "Please report the problem to the driver maintainer and include system chipset information.\n");
+ netdev_warn(tp->dev,
+ "The system may be re-ordering memory-mapped I/O "
+ "cycles to the network device, attempting to recover. "
+ "Please report the problem to the driver maintainer "
+ "and include system chipset information.\n");
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4378,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
@@ -4392,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
tx_bug = 1;
pci_unmap_page(tp->pdev,
- pci_unmap_addr(ri, mapping),
+ dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
sw_idx = NEXT_TX(sw_idx);
@@ -4430,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
if (!ri->skb)
return;
- pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+ pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(ri->skb);
ri->skb = NULL;
@@ -4496,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
}
map->skb = skb;
- pci_unmap_addr_set(map, mapping, mapping);
+ dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4516,8 +4538,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3 *tp = tnapi->tp;
struct tg3_rx_buffer_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
- int dest_idx;
struct tg3_rx_prodring_set *spr = &tp->prodring[0];
+ int dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
@@ -4541,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
}
dest_map->skb = src_map->skb;
- pci_unmap_addr_set(dest_map, mapping,
- pci_unmap_addr(src_map, mapping));
+ dma_unmap_addr_set(dest_map, mapping,
+ dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
@@ -4605,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ bool hw_vlan __maybe_unused = false;
+ u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->prodring[0].rx_std_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
- dma_addr = pci_unmap_addr(ri, mapping);
+ dma_addr = dma_unmap_addr(ri, mapping);
skb = ri->skb;
post_ptr = &jmb_prod_idx;
} else
@@ -4638,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
- if (len > RX_COPY_THRESHOLD &&
- tp->rx_offset == NET_IP_ALIGN) {
- /* rx_offset will likely not equal NET_IP_ALIGN
- * if this is a 5701 card running in PCI-X mode
- * [see tg3_get_invariants()]
- */
+ if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4668,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev,
- len + TG3_RAW_IP_ALIGN);
+ copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+ TG3_RAW_IP_ALIGN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4699,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ if (desc->type_flags & RXD_FLAG_VLAN &&
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
+ vtag = desc->err_vlan & RXD_VLAN_MASK;
#if TG3_VLAN_TAG_USED
- if (tp->vlgrp != NULL &&
- desc->type_flags & RXD_FLAG_VLAN) {
- vlan_gro_receive(&tnapi->napi, tp->vlgrp,
- desc->err_vlan & RXD_VLAN_MASK, skb);
- } else
+ if (tp->vlgrp)
+ hw_vlan = true;
+ else
+#endif
+ {
+ struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
+ __skb_push(skb, VLAN_HLEN);
+
+ memmove(ve, skb->data + VLAN_HLEN,
+ ETH_ALEN * 2);
+ ve->h_vlan_proto = htons(ETH_P_8021Q);
+ ve->h_vlan_TCI = htons(vtag);
+ }
+ }
+
+#if TG3_VLAN_TAG_USED
+ if (hw_vlan)
+ vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
+ else
#endif
napi_gro_receive(&tnapi->napi, skb);
@@ -4978,7 +5014,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
if (unlikely(work_done >= budget))
break;
- /* tp->last_tag is used in tg3_restart_ints() below
+ /* tp->last_tag is used in tg3_int_reenable() below
* to tell the hw how much work has been processed,
* so we must read it before checking for more work.
*/
@@ -4987,8 +5023,8 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
rmb();
/* check for RX/TX work to do */
- if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
- *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
+ if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+ *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
napi_complete(napi);
/* Reenable interrupts. */
tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -5260,7 +5296,8 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
err = tg3_init_hw(tp, reset_phy);
if (err) {
- netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
+ netdev_err(tp->dev,
+ "Failed to re-initialize device, aborting\n");
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
@@ -5437,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
len = skb_shinfo(skb)->frags[i-1].size;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
len, PCI_DMA_TODEVICE);
if (i == 0) {
tnapi->tx_buffers[entry].skb = new_skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
new_addr);
} else {
tnapi->tx_buffers[entry].skb = NULL;
@@ -5492,7 +5529,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5508,7 +5544,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5552,9 +5589,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
tcp_hdr(skb)->check = 0;
- }
- else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ }
+
#if TG3_VLAN_TAG_USED
if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
@@ -5571,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
!mss && skb->len > ETH_DATA_LEN)
@@ -5597,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
goto dma_error;
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
tg3_set_txd(tnapi, entry, mapping, len,
@@ -5627,7 +5665,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5635,7 +5673,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5695,7 +5733,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
struct netdev_queue *txq;
unsigned int i, last;
-
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5711,7 +5748,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_err(dev,
+ "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5737,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
- return (tg3_tso_bug(tp, skb));
+ return tg3_tso_bug(tp, skb);
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
@@ -5797,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
tnapi->tx_buffers[entry].skb = skb;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
would_hit_hwbug = 0;
@@ -5833,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
len, PCI_DMA_TODEVICE);
tnapi->tx_buffers[entry].skb = NULL;
- pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+ dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
mapping);
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
@@ -5898,7 +5936,7 @@ dma_error:
entry = tnapi->tx_prod;
tnapi->tx_buffers[entry].skb = NULL;
pci_unmap_single(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
+ dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
@@ -5906,7 +5944,7 @@ dma_error:
entry = NEXT_TX(entry);
pci_unmap_page(tp->pdev,
- pci_unmap_addr(&tnapi->tx_buffers[entry],
+ dma_unmap_addr(&tnapi->tx_buffers[entry],
mapping),
frag->size, PCI_DMA_TODEVICE);
}
@@ -5924,9 +5962,9 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
ethtool_op_set_tso(dev, 0);
- }
- else
+ } else {
tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+ }
} else {
if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -6007,7 +6045,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
}
-/* Initialize tx/rx rings for packet processing.
+/* Initialize rx rings for packet processing.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
@@ -6058,8 +6096,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX standard ring. Only "
+ "%d out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_pending);
if (i == 0)
goto initfail;
tp->rx_pending = i;
@@ -6088,8 +6128,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
- netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
- i, tp->rx_jumbo_pending);
+ netdev_warn(tp->dev,
+ "Using a smaller RX jumbo ring. Only %d "
+ "out of %d buffers were allocated "
+ "successfully\n", i, tp->rx_jumbo_pending);
if (i == 0)
goto initfail;
tp->rx_jumbo_pending = i;
@@ -6187,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
}
pci_unmap_single(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
txp->skb = NULL;
@@ -6197,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
pci_unmap_page(tp->pdev,
- pci_unmap_addr(txp, mapping),
+ dma_unmap_addr(txp, mapping),
skb_shinfo(skb)->frags[k].size,
PCI_DMA_TODEVICE);
i++;
@@ -6433,8 +6475,9 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
if (i == MAX_WAIT_CNT && !silent) {
- pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
- ofs, enable_bit);
+ dev_err(&tp->pdev->dev,
+ "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+ ofs, enable_bit);
return -ENODEV;
}
@@ -6480,8 +6523,9 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
break;
}
if (i >= MAX_WAIT_CNT) {
- netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
- __func__, tr32(MAC_TX_MODE));
+ dev_err(&tp->pdev->dev,
+ "%s timed out, TX_MODE_ENABLE will not clear "
+ "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
err |= -ENODEV;
}
@@ -6551,35 +6595,35 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
return;
switch (kind) {
- case RESET_KIND_INIT:
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
- APE_HOST_SEG_SIG_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
- APE_HOST_SEG_LEN_MAGIC);
- apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
- tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
- tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
- APE_HOST_DRIVER_ID_MAGIC);
- tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
- APE_HOST_BEHAV_NO_PHYLOCK);
-
- event = APE_EVENT_STATUS_STATE_START;
- break;
- case RESET_KIND_SHUTDOWN:
- /* With the interface we are currently using,
- * APE does not track driver state. Wiping
- * out the HOST SEGMENT SIGNATURE forces
- * the APE to assume OS absent status.
- */
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ /* With the interface we are currently using,
+ * APE does not track driver state. Wiping
+ * out the HOST SEGMENT SIGNATURE forces
+ * the APE to assume OS absent status.
+ */
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
- event = APE_EVENT_STATUS_STATE_UNLOAD;
- break;
- case RESET_KIND_SUSPEND:
- event = APE_EVENT_STATUS_STATE_SUSPEND;
- break;
- default:
- return;
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
}
event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
@@ -7156,7 +7200,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
if (cpu_base == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
+ netdev_err(tp->dev,
+ "%s: Trying to load TX cpu firmware which is 5705\n",
__func__);
return -EINVAL;
}
@@ -7236,7 +7281,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+ "should be %08x\n", __func__,
tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7300,7 +7346,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
+ netdev_err(tp->dev,
+ "%s fails to set CPU PC, is %08x should be %08x\n",
__func__, tr32(cpu_base + CPU_PC), info.fw_base);
return -ENODEV;
}
@@ -7568,9 +7615,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
- if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
+ if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
tg3_abort_hw(tp, 1);
- }
if (reset_phy)
tg3_phy_reset(tp);
@@ -7631,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+ tw32(GRC_MODE, grc_mode);
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7679,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7723,8 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
- }
- else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
int fw_len;
fw_len = tp->fw_len;
@@ -7839,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
- (RX_STD_MAX_SIZE << 2);
+ (TG3_RX_STD_DMA_SZ << 2);
else
- val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
+ val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
} else
val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
@@ -8476,8 +8542,8 @@ static void tg3_timer(unsigned long __opaque)
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
FWCMD_NICDRV_ALIVE3);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
- /* 5 seconds timeout */
- tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
+ tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+ TG3_FW_UPDATE_TIMEOUT_SEC);
tg3_generate_fw_event(tp);
}
@@ -8625,8 +8691,9 @@ static int tg3_test_msi(struct tg3 *tp)
return err;
/* MSI test failed, go back to INTx mode */
- netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
- "Please report this failure to the PCI maintainer and include system chipset information\n");
+ netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+ "to INTx mode. Please report this failure to the PCI "
+ "maintainer and include system chipset information\n");
free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
@@ -8739,7 +8806,8 @@ static void tg3_ints_init(struct tg3 *tp)
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
- netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
+ netdev_warn(tp->dev,
+ "MSI without TAGGED_STATUS? Not using MSI\n");
goto defcfg;
}
@@ -8914,236 +8982,6 @@ err_out1:
return err;
}
-#if 0
-/*static*/ void tg3_dump_state(struct tg3 *tp)
-{
- u32 val32, val32_2, val32_3, val32_4, val32_5;
- u16 val16;
- int i;
- struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
-
- pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
- pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
- printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
- val16, val32);
-
- /* MAC block */
- printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
- tr32(MAC_MODE), tr32(MAC_STATUS));
- printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
- tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
- printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
- tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
- printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
-
- /* Send data initiator control block */
- printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
- tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
- printk(" SNDDATAI_STATSCTRL[%08x]\n",
- tr32(SNDDATAI_STATSCTRL));
-
- /* Send data completion control block */
- printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
-
- /* Send BD ring selector block */
- printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
- tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
-
- /* Send BD initiator control block */
- printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
- tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
-
- /* Send BD completion control block */
- printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
-
- /* Receive list placement control block */
- printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
- tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
- printk(" RCVLPC_STATSCTRL[%08x]\n",
- tr32(RCVLPC_STATSCTRL));
-
- /* Receive data and receive BD initiator control block */
- printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
- tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
-
- /* Receive data completion control block */
- printk("DEBUG: RCVDCC_MODE[%08x]\n",
- tr32(RCVDCC_MODE));
-
- /* Receive BD initiator control block */
- printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
- tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
-
- /* Receive BD completion control block */
- printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
- tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
-
- /* Receive list selector control block */
- printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
- tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
-
- /* Mbuf cluster free block */
- printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
- tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
-
- /* Host coalescing control block */
- printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
- tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
- printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
- tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
- printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATS_BLK_NIC_ADDR));
- printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
- tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
-
- /* Memory arbiter control block */
- printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
- tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
-
- /* Buffer manager control block */
- printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
- tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
- printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
- printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
- "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
- tr32(BUFMGR_DMA_DESC_POOL_ADDR),
- tr32(BUFMGR_DMA_DESC_POOL_SIZE));
-
- /* Read DMA control block */
- printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
- tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
-
- /* Write DMA control block */
- printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
- tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
-
- /* DMA completion block */
- printk("DEBUG: DMAC_MODE[%08x]\n",
- tr32(DMAC_MODE));
-
- /* GRC block */
- printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
- tr32(GRC_MODE), tr32(GRC_MISC_CFG));
- printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
- tr32(GRC_LOCAL_CTRL));
-
- /* TG3_BDINFOs */
- printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_JUMBO_BD + 0x0),
- tr32(RCVDBDI_JUMBO_BD + 0x4),
- tr32(RCVDBDI_JUMBO_BD + 0x8),
- tr32(RCVDBDI_JUMBO_BD + 0xc));
- printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_STD_BD + 0x0),
- tr32(RCVDBDI_STD_BD + 0x4),
- tr32(RCVDBDI_STD_BD + 0x8),
- tr32(RCVDBDI_STD_BD + 0xc));
- printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
- tr32(RCVDBDI_MINI_BD + 0x0),
- tr32(RCVDBDI_MINI_BD + 0x4),
- tr32(RCVDBDI_MINI_BD + 0x8),
- tr32(RCVDBDI_MINI_BD + 0xc));
-
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
- printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4);
-
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
- tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
- printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
- val32, val32_2, val32_3, val32_4, val32_5);
-
- /* SW status block */
- printk(KERN_DEBUG
- "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
- sblk->status,
- sblk->status_tag,
- sblk->rx_jumbo_consumer,
- sblk->rx_consumer,
- sblk->rx_mini_consumer,
- sblk->idx[0].rx_producer,
- sblk->idx[0].tx_consumer);
-
- /* SW statistics block */
- printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
- ((u32 *)tp->hw_stats)[0],
- ((u32 *)tp->hw_stats)[1],
- ((u32 *)tp->hw_stats)[2],
- ((u32 *)tp->hw_stats)[3]);
-
- /* Mailboxes */
- printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
- tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
-
- /* NIC side send descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long txd;
-
- txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
- + (i * sizeof(struct tg3_tx_buffer_desc));
- printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
- i,
- readl(txd + 0x0), readl(txd + 0x4),
- readl(txd + 0x8), readl(txd + 0xc));
- }
-
- /* NIC side RX descriptors. */
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-
- for (i = 0; i < 6; i++) {
- unsigned long rxd;
-
- rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
- + (i * sizeof(struct tg3_rx_buffer_desc));
- printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- rxd += (4 * sizeof(u32));
- printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
- i,
- readl(rxd + 0x0), readl(rxd + 0x4),
- readl(rxd + 0x8), readl(rxd + 0xc));
- }
-}
-#endif
-
static struct net_device_stats *tg3_get_stats(struct net_device *);
static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
@@ -9162,9 +9000,6 @@ static int tg3_close(struct net_device *dev)
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
-#if 0
- tg3_dump_state(tp);
-#endif
tg3_disable_ints(tp);
@@ -9406,9 +9241,8 @@ static inline u32 calc_crc(unsigned char *buf, int len)
reg >>= 1;
- if (tmp) {
+ if (tmp)
reg ^= 0xedb88320;
- }
}
}
@@ -9452,20 +9286,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
rx_mode |= RX_MODE_PROMISC;
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
- tg3_set_multi (tp, 1);
+ tg3_set_multi(tp, 1);
} else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
- tg3_set_multi (tp, 0);
+ tg3_set_multi(tp, 0);
} else {
/* Accept one or more multicast(s). */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u32 mc_filter[4] = { 0, };
u32 regidx;
u32 bit;
u32 crc;
- netdev_for_each_mc_addr(mclist, dev) {
- crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = calc_crc(ha->addr, ETH_ALEN);
bit = ~crc & 0x7f;
regidx = (bit & 0x60) >> 5;
bit &= 0x1f;
@@ -9618,7 +9452,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
memcpy(data, ((char*)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
- eeprom->len += b_count;
+ eeprom->len += b_count;
}
/* read bytes upto the last 4 byte boundary */
@@ -10166,8 +10000,8 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
spin_lock_bh(&tp->lock);
if (data)
@@ -10186,8 +10020,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
if (data != 0)
return -EINVAL;
- return 0;
- }
+ return 0;
+ }
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
ethtool_op_set_tx_ipv6_csum(dev, data);
@@ -10197,7 +10031,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
-static int tg3_get_sset_count (struct net_device *dev, int sset)
+static int tg3_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -10209,7 +10043,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset)
}
}
-static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
@@ -10256,7 +10090,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
return 0;
}
-static void tg3_get_ethtool_stats (struct net_device *dev,
+static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
@@ -10362,8 +10196,7 @@ static int tg3_test_nvram(struct tg3 *tp)
for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
parity[k++] = buf8[i] & msk;
i++;
- }
- else if (i == 16) {
+ } else if (i == 16) {
int l;
u8 msk;
@@ -10461,7 +10294,7 @@ static int tg3_test_registers(struct tg3 *tp)
{ MAC_ADDR_0_HIGH, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_ADDR_0_LOW, 0x0000,
- 0x00000000, 0xffffffff },
+ 0x00000000, 0xffffffff },
{ MAC_RX_MTU_SIZE, 0x0000,
0x00000000, 0x0000ffff },
{ MAC_TX_MODE, 0x0000,
@@ -10649,7 +10482,8 @@ static int tg3_test_registers(struct tg3 *tp)
out:
if (netif_msg_hw(tp))
- pr_err("Register test failed at offset %x\n", offset);
+ netdev_err(tp->dev,
+ "Register test failed at offset %x\n", offset);
tw32(offset, save_val);
return -EIO;
}
@@ -10825,9 +10659,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
- }
- else
+ } else {
return -EINVAL;
+ }
err = -EIO;
@@ -10909,7 +10743,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
rx_skb = tpr->rx_std_buffers[desc_idx].skb;
- map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
+ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
for (i = 14; i < tx_len; i++) {
@@ -11083,7 +10917,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phydev, data, cmd);
}
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = tp->phy_addr;
@@ -11776,7 +11610,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_NVRAM;
if (tg3_nvram_lock(tp)) {
- netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
+ netdev_warn(tp->dev,
+ "Cannot get nvram lock, %s failed\n",
__func__);
return;
}
@@ -11895,7 +11730,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
if (ret)
break;
- page_off = offset & pagemask;
+ page_off = offset & pagemask;
size = pagesize;
if (len < size)
size = len;
@@ -11923,7 +11758,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
- if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+ if (tg3_nvram_exec_cmd(tp, nvram_cmd))
break;
/* Issue another write enable to start the write. */
@@ -11977,7 +11812,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
memcpy(&data, buf + i, 4);
tw32(NVRAM_WRDATA, be32_to_cpu(data));
- page_off = offset % tp->nvram_pagesize;
+ page_off = offset % tp->nvram_pagesize;
phy_addr = tg3_nvram_phys_addr(tp, offset);
@@ -11985,7 +11820,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
- if ((page_off == 0) || (i == 0))
+ if (page_off == 0 || i == 0)
nvram_cmd |= NVRAM_CMD_FIRST;
if (page_off == (tp->nvram_pagesize - 4))
nvram_cmd |= NVRAM_CMD_LAST;
@@ -12028,8 +11863,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
- }
- else {
+ } else {
u32 grc_mode;
ret = tg3_nvram_lock(tp);
@@ -12049,8 +11883,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
ret = tg3_nvram_write_block_buffered(tp, offset, len,
buf);
- }
- else {
+ } else {
ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
buf);
}
@@ -12545,11 +12378,11 @@ skip_phy_reset:
return err;
}
-static void __devinit tg3_read_partno(struct tg3 *tp)
+static void __devinit tg3_read_vpd(struct tg3 *tp)
{
- unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
+ u8 vpd_data[TG3_NVM_VPD_LEN];
unsigned int block_end, rosize, len;
- int i = 0;
+ int j, i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12598,6 +12431,32 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
if (block_end > TG3_NVM_VPD_LEN)
goto out_not_found;
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j > 0) {
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&vpd_data[j], "1028", 4))
+ goto partno;
+
+ j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto partno;
+
+ len = pci_vpd_info_field_size(&vpd_data[j]);
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end)
+ goto partno;
+
+ memcpy(tp->fw_ver, &vpd_data[j], len);
+ strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
+ }
+
+partno:
i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
PCI_VPD_RO_KEYWORD_PARTNO);
if (i < 0)
@@ -12667,7 +12526,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
static void __devinit tg3_read_bc_ver(struct tg3 *tp)
{
u32 val, offset, start, ver_offset;
- int i;
+ int i, dst_off;
bool newver = false;
if (tg3_nvram_read(tp, 0xc, &offset) ||
@@ -12687,8 +12546,11 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
newver = true;
}
+ dst_off = strlen(tp->fw_ver);
+
if (newver) {
- if (tg3_nvram_read(tp, offset + 8, &ver_offset))
+ if (TG3_VER_SIZE - dst_off < 16 ||
+ tg3_nvram_read(tp, offset + 8, &ver_offset))
return;
offset = offset + ver_offset - start;
@@ -12697,7 +12559,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
if (tg3_nvram_read_be32(tp, offset + i, &v))
return;
- memcpy(tp->fw_ver + i, &v, sizeof(v));
+ memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
}
} else {
u32 major, minor;
@@ -12708,7 +12570,8 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
TG3_NVM_BCVER_MAJSFT;
minor = ver_offset & TG3_NVM_BCVER_MINMSK;
- snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
+ snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+ "v%d.%02d", major, minor);
}
}
@@ -12732,9 +12595,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
{
u32 offset, major, minor, build;
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
return;
@@ -12771,11 +12632,14 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
if (minor > 99 || build > 26)
return;
- snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
+ offset = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+ " v%d.%02d", major, minor);
if (build > 0) {
- tp->fw_ver[8] = 'a' + build - 1;
- tp->fw_ver[9] = '\0';
+ offset = strlen(tp->fw_ver);
+ if (offset < TG3_VER_SIZE - 1)
+ tp->fw_ver[offset] = 'a' + build - 1;
}
}
@@ -12862,12 +12726,13 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
+ bool vpd_vers = false;
- if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
- tp->fw_ver[0] = 's';
- tp->fw_ver[1] = 'b';
- tp->fw_ver[2] = '\0';
+ if (tp->fw_ver[0] != 0)
+ vpd_vers = true;
+ if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
+ strcat(tp->fw_ver, "sb");
return;
}
@@ -12884,11 +12749,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
return;
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
- return;
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
+ goto done;
tg3_read_mgmtfw_ver(tp);
+done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
@@ -12898,9 +12764,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ PCI_DEVICE_ID_AMD_FE_GATE_700C) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },
@@ -13066,8 +12932,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
- }
- else {
+ } else {
struct pci_dev *bridge = NULL;
do {
@@ -13129,6 +12994,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
tp->dev->features |= NETIF_F_IPV6_CSUM;
+ tp->dev->features |= NETIF_F_GRO;
}
/* Determine TSO capabilities */
@@ -13189,8 +13055,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -13224,7 +13090,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
- pr_err("Cannot find PCI-X capability, aborting\n");
+ dev_err(&tp->pdev->dev,
+ "Cannot find PCI-X capability, aborting\n");
return -EIO;
}
@@ -13421,7 +13288,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
- pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
+ dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
return err;
}
@@ -13595,13 +13462,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
err = tg3_phy_probe(tp);
if (err) {
- pr_err("(%s) phy probe failed, err %d\n",
- pci_name(tp->pdev), err);
+ dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
/* ... but do not return immediately ... */
tg3_mdio_fini(tp);
}
- tg3_read_partno(tp);
+ tg3_read_vpd(tp);
tg3_read_fw_ver(tp);
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
@@ -13639,10 +13505,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+ tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
- tp->rx_offset = 0;
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ tp->rx_offset -= NET_IP_ALIGN;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ tp->rx_copy_thresh = ~(u16)0;
+#endif
+ }
tp->rx_std_max_post = TG3_RX_RING_SIZE;
@@ -13965,11 +13836,10 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
}
pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
- if (to_device) {
+ if (to_device)
tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
- } else {
+ else
tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
- }
ret = -ENODEV;
for (i = 0; i < 40; i++) {
@@ -14105,8 +13975,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) {
- pr_err("tg3_test_dma() Write the buffer failed %d\n",
- ret);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer write failed. err = %d\n",
+ __func__, ret);
break;
}
@@ -14116,8 +13987,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (le32_to_cpu(val) != p[i]) {
- pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
- val, i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on device! "
+ "(%d != %d)\n", __func__, val, i);
/* ret = -ENODEV here? */
}
p[i] = 0;
@@ -14126,9 +13998,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) {
- pr_err("tg3_test_dma() Read the buffer failed %d\n",
- ret);
-
+ dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+ "err = %d\n", __func__, ret);
break;
}
@@ -14144,8 +14015,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break;
} else {
- pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
- p[i], i);
+ dev_err(&tp->pdev->dev,
+ "%s: Buffer corrupted on read back! "
+ "(%d != %d)\n", __func__, p[i], i);
ret = -ENODEV;
goto out;
}
@@ -14172,10 +14044,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
if (pci_dev_present(dma_wait_state_chipsets)) {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
- }
- else
+ } else {
/* Safe to use the calculated DMA boundary. */
tp->dma_rwctrl = saved_dma_rwctrl;
+ }
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
}
@@ -14437,13 +14309,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- pr_err("Cannot enable PCI device, aborting\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- pr_err("Cannot obtain PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
@@ -14452,14 +14324,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Find power-management capability. */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
- pr_err("Cannot find PowerManagement capability, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot find Power Management capability, aborting\n");
err = -EIO;
goto err_out_free_res;
}
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- pr_err("Etherdev alloc failed, aborting\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -14509,7 +14382,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->regs = pci_ioremap_bar(pdev, BAR_0);
if (!tp->regs) {
- netdev_err(dev, "Cannot map device registers, aborting\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -14525,7 +14398,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_invariants(tp);
if (err) {
- netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
+ dev_err(&pdev->dev,
+ "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
@@ -14560,7 +14434,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
- netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
+ dev_err(&pdev->dev, "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
@@ -14568,7 +14443,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- netdev_err(dev, "No usable DMA configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_out_iounmap;
}
}
@@ -14617,14 +14493,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_device_address(tp);
if (err) {
- netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
+ dev_err(&pdev->dev,
+ "Could not obtain valid ethernet address, aborting\n");
goto err_out_iounmap;
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
- netdev_err(dev, "Cannot map APE registers, aborting\n");
+ dev_err(&pdev->dev,
+ "Cannot map APE registers, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -14648,7 +14526,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp);
if (err) {
- netdev_err(dev, "DMA engine test failed, aborting\n");
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
goto err_out_apeunmap;
}
@@ -14709,7 +14587,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- netdev_err(dev, "Cannot register net device, aborting\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_apeunmap;
}
@@ -14722,11 +14600,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
struct phy_device *phydev;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
phydev->drv->name, dev_name(&phydev->dev));
} else
- netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
- tg3_phy_string(tp),
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+ "(WireSpeed[%d])\n", tg3_phy_string(tp),
((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
"10/100/1000Base-T")),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 574a1cc4d353..ce9c4918c318 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,8 @@
#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
#define TG3_BDINFO_SIZE 0x10UL
-#define RX_COPY_THRESHOLD 256
-
#define TG3_RX_INTERNAL_RING_SZ_5906 32
-#define RX_STD_MAX_SIZE 1536
#define RX_STD_MAX_SIZE_5705 512
#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
@@ -183,6 +180,7 @@
#define METAL_REV_B2 0x02
#define TG3PCI_DMA_RW_CTRL 0x0000006c
#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
+#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -252,7 +250,7 @@
/* 0x94 --> 0x98 unused */
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
-/* 0xa0 --> 0xb8 unused */
+/* 0xa8 --> 0xb8 unused */
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
@@ -1854,6 +1852,8 @@
#define TG3_PCIE_TLDLPL_PORT 0x00007c00
#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
+#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
@@ -2082,7 +2082,7 @@
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_AADJ1CH3 0x601f
#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
-#define MII_TG3_DSP_EXP8 0x0708
+#define MII_TG3_DSP_EXP8 0x0f08
#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
#define MII_TG3_DSP_EXP8_AEDW 0x0200
#define MII_TG3_DSP_EXP75 0x0f75
@@ -2512,7 +2512,7 @@ struct tg3_hw_stats {
*/
struct ring_info {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ DEFINE_DMA_UNMAP_ADDR(mapping);
};
struct tg3_config_info {
@@ -2561,7 +2561,7 @@ struct tg3_bufmgr_config {
struct tg3_ethtool_stats {
/* Statistics maintained by Receive MAC. */
- u64 rx_octets;
+ u64 rx_octets;
u64 rx_fragments;
u64 rx_ucast_packets;
u64 rx_mcast_packets;
@@ -2751,9 +2751,11 @@ struct tg3 {
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
void (*write32_rx_mbox) (struct tg3 *, u32,
u32);
+ u32 rx_copy_thresh;
u32 rx_pending;
u32 rx_jumbo_pending;
u32 rx_std_max_post;
+ u32 rx_offset;
u32 rx_pkt_map_sz;
#if TG3_VLAN_TAG_USED
struct vlan_group *vlgrp;
@@ -2773,7 +2775,6 @@ struct tg3 {
unsigned long last_event_jiffies;
};
- u32 rx_offset;
u32 tg3_flags;
#define TG3_FLAG_TAGGED_STATUS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 390540c101c7..ccee3eddc5f4 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1034,7 +1034,7 @@ static void TLan_tx_timeout(struct net_device *dev)
TLan_ResetLists( dev );
TLan_ReadAndClearStats( dev, TLAN_IGNORE );
TLan_ResetAdapter( dev );
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue( dev );
}
@@ -1147,7 +1147,6 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
} /* TLan_StartTx */
@@ -1314,7 +1313,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
static void TLan_SetMulticastList( struct net_device *dev )
{
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u32 hash1 = 0;
u32 hash2 = 0;
int i;
@@ -1336,12 +1335,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
i = 0;
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
- (char *) &dmi->dmi_addr );
+ (char *) &ha->addr);
} else {
- offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr );
+ offset = TLan_HashFunc((u8 *)&ha->addr);
if ( offset < 32 )
hash1 |= ( 1 << offset );
else
@@ -2464,7 +2463,7 @@ static void TLan_PhyPrint( struct net_device *dev )
printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
} else if ( phy <= TLAN_PHY_MAX_ADDR ) {
printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3 \n" );
+ printk( "TLAN: Off. +0 +1 +2 +3\n" );
for ( i = 0; i < 0x20; i+= 4 ) {
printk( "TLAN: 0x%02x", i );
TLan_MiiReadReg( dev, phy, i, &data0 );
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7d7f3eef1ab3..10800f16a231 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -77,7 +77,7 @@ static char version[] __devinitdata =
#define FW_NAME "3com/3C359.bin"
MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
-MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
+MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
MODULE_FIRMWARE(FW_NAME);
/* Module parameters */
@@ -163,19 +163,19 @@ static void print_tx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head,
+ printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
- printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n");
+ printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
for (i = 0; i < 16; i++) {
txd = &(xl_priv->xl_tx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
}
- printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) );
+ printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
+ printk("Queue status = %0x\n",netif_running(dev) ) ;
}
static void print_rx_state(struct net_device *dev)
@@ -186,19 +186,19 @@ static void print_rx_state(struct net_device *dev)
u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
int i ;
- printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ;
- printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n");
+ printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
+ printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
for (i = 0; i < 16; i++) {
/* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
rxd = &(xl_priv->xl_rx_ring[i]) ;
- printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd),
+ printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
}
- printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) );
+ printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
- printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) );
- printk("Queue status = %0x \n",netif_running(dev) ) ;
+ printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
+ printk("Queue status = %0x\n",netif_running(dev));
}
#endif
@@ -391,7 +391,7 @@ static int __devinit xl_init(struct net_device *dev)
struct xl_private *xl_priv = netdev_priv(dev);
int err;
- printk(KERN_INFO "%s \n", version);
+ printk(KERN_INFO "%s\n", version);
printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
@@ -463,7 +463,7 @@ static int xl_hw_reset(struct net_device *dev)
writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
#if XL_DEBUG
- printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ;
+ printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
#endif
if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
@@ -591,9 +591,9 @@ static int xl_hw_reset(struct net_device *dev)
#if XL_DEBUG
writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
- printk(KERN_INFO "Default ring speed 4 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 4 mbps\n");
} else {
- printk(KERN_INFO "Default ring speed 16 mbps \n") ;
+ printk(KERN_INFO "Default ring speed 16 mbps\n");
}
printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
#endif
@@ -651,7 +651,7 @@ static int xl_open(struct net_device *dev)
if (open_err != 0) { /* Something went wrong with the open command */
if (open_err & 0x07) { /* Wrong speed, retry at different speed */
- printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
switchsettings = switchsettings ^ 2 ;
xl_ee_write(dev,0x08,switchsettings) ;
xl_hw_reset(dev) ;
@@ -703,7 +703,7 @@ static int xl_open(struct net_device *dev)
}
if (i==0) {
- printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
free_irq(dev->irq,dev) ;
kfree(xl_priv->xl_tx_ring);
kfree(xl_priv->xl_rx_ring);
@@ -853,7 +853,7 @@ static int xl_open_hw(struct net_device *dev)
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
- printk(", ARB: %04x \n",xl_priv->arb ) ;
+ printk(", ARB: %04x\n",xl_priv->arb );
writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -867,7 +867,7 @@ static int xl_open_hw(struct net_device *dev)
ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
}
ver_str[i] = '\0' ;
- printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str);
+ printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
}
/*
@@ -991,7 +991,7 @@ static void xl_rx(struct net_device *dev)
skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
if (skb==NULL) { /* Still need to fix the rx ring */
- printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ;
+ printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
adv_rx_ring(dev) ;
dev->stats.rx_dropped++ ;
writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
@@ -1092,7 +1092,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus == 0x0001) {
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
- printk(KERN_INFO "%s: 00001 int received \n",dev->name) ;
+ printk(KERN_INFO "%s: 00001 int received\n",dev->name);
} else {
if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
@@ -1103,9 +1103,9 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
*/
if (intstatus & HOSTERRINT) {
- printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ;
+ printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1128,7 +1128,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
Must put a timeout check here ! */
/* Empty Loop */
}
- printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ;
+ printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
} /* TxUnderRun */
@@ -1157,13 +1157,13 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
macstatus = readw(xl_mmio + MMIO_MACDATA) ;
printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
if (macstatus & (1<<14))
- printk(KERN_WARNING "tchk error: Unrecoverable error \n") ;
+ printk(KERN_WARNING "tchk error: Unrecoverable error\n");
if (macstatus & (1<<3))
- printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ;
+ printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
if (macstatus & (1<<2))
- printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ;
+ printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
- printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name);
+ printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
netif_stop_queue(dev) ;
xl_freemem(dev) ;
free_irq(dev->irq,dev);
@@ -1175,7 +1175,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
} else {
- printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ;
+ printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
}
}
@@ -1350,11 +1350,11 @@ static int xl_close(struct net_device *dev)
writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
- printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ;
+ printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
} else {
writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
if (readb(xl_mmio + MMIO_MACDATA)==0) {
- printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ;
+ printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
xl_freemem(dev) ;
@@ -1391,7 +1391,7 @@ static int xl_close(struct net_device *dev)
static void xl_set_rx_mode(struct net_device *dev)
{
struct xl_private *xl_priv = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
u16 options ;
@@ -1408,11 +1408,11 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
@@ -1447,11 +1447,11 @@ static void xl_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
break ;
case 4:
- printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
break ;
case 6:
- printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ;
+ printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
break ;
case 0: /* Successful command execution */
@@ -1472,11 +1472,11 @@ static void xl_srb_bh(struct net_device *dev)
break ;
case SET_FUNC_ADDRESS:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
break ;
case CLOSE_NIC:
if(xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ;
+ printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
break ;
case SET_MULTICAST_MODE:
if(xl_priv->xl_message_level)
@@ -1485,9 +1485,9 @@ static void xl_srb_bh(struct net_device *dev)
case SET_RECEIVE_MODE:
if(xl_priv->xl_message_level) {
if (xl_priv->xl_copy_all_options == 0x0004)
- printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ;
+ printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
else
- printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ;
+ printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
}
break ;
@@ -1557,20 +1557,20 @@ static void xl_arb_cmd(struct net_device *dev)
xl_freemem(dev) ;
free_irq(dev->irq,dev);
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (xl_priv->xl_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1579,7 +1579,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_CO) {
if (xl_priv->xl_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
xl_srb_cmd(dev, READ_LOG) ;
}
@@ -1595,7 +1595,7 @@ static void xl_arb_cmd(struct net_device *dev)
} /* Lan.change.status */
else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
#if XL_DEBUG
- printk(KERN_INFO "Received.Data \n") ;
+ printk(KERN_INFO "Received.Data\n");
#endif
writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -1630,7 +1630,7 @@ static void xl_arb_cmd(struct net_device *dev)
xl_asb_cmd(dev) ;
} else {
- printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ;
+ printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
}
/* Acknowledge the arb interrupt */
@@ -1687,13 +1687,13 @@ static void xl_asb_bh(struct net_device *dev)
ret_code = readb(xl_mmio + MMIO_MACDATA) ;
switch (ret_code) {
case 0x01:
- printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
break ;
case 0x26:
- printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
break ;
case 0x40:
- printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ;
+ printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
break ;
}
xl_priv->asb_queued = 0 ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 1a0967246e2f..91e6c78271a3 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev)
static void tok_set_multicast_list(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
unsigned char address[4];
int i;
@@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- address[0] |= mclist->dmi_addr[2];
- address[1] |= mclist->dmi_addr[3];
- address[2] |= mclist->dmi_addr[4];
- address[3] |= mclist->dmi_addr[5];
+ netdev_for_each_mc_addr(ha, dev) {
+ address[0] |= ha->addr[2];
+ address[1] |= ha->addr[3];
+ address[2] |= ha->addr[4];
+ address[3] |= ha->addr[5];
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
@@ -1041,7 +1041,6 @@ static netdev_tx_t tok_send_packet(struct sk_buff *skb,
writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
spin_unlock_irqrestore(&(ti->lock), flags);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 7a5fbf5a9d71..5bd140704533 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -358,7 +358,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
pcr |= PCI_COMMAND_SERR;
pci_write_config_word (pdev, PCI_COMMAND, pcr);
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
streamer_priv->streamer_card_name,
(unsigned int) dev->base_addr,
@@ -651,7 +651,7 @@ static int streamer_open(struct net_device *dev)
#if STREAMER_DEBUG
writew(readw(streamer_mmio + LAPWWO),
streamer_mmio + LAPA);
- printk("srb open request: \n");
+ printk("srb open request:\n");
for (i = 0; i < 16; i++) {
printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
}
@@ -701,7 +701,7 @@ static int streamer_open(struct net_device *dev)
if (srb_word != 0) {
if (srb_word == 0x07) {
if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n",
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n",
dev->name);
open_finished = 0;
} else {
@@ -717,7 +717,7 @@ static int streamer_open(struct net_device *dev)
((error_code & 0x0f) == 0x0d))
{
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
free_irq(dev->irq, dev);
return -EIO;
}
@@ -923,7 +923,7 @@ static void streamer_rx(struct net_device *dev)
if (rx_desc->status & 0x7E830000) { /* errors */
if (streamer_priv->streamer_message_level) {
- printk(KERN_WARNING "%s: Rx Error %x \n",
+ printk(KERN_WARNING "%s: Rx Error %x\n",
dev->name, rx_desc->status);
}
} else { /* received without errors */
@@ -936,7 +936,7 @@ static void streamer_rx(struct net_device *dev)
if (skb == NULL)
{
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
dev->stats.rx_dropped++;
} else { /* we allocated an skb OK */
if (buffer_cnt == 1) {
@@ -1267,7 +1267,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
netdev_priv(dev);
__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
__u8 options = 0;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[5];
writel(streamer_priv->srb, streamer_mmio + LAPA);
@@ -1303,11 +1303,11 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
@@ -1364,7 +1364,7 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1392,13 +1392,13 @@ static void streamer_srb_bh(struct net_device *dev)
case 0x00:
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n", dev->name);
+ printk(KERN_INFO "%s: Group address not found\n", dev->name);
break;
default:
break;
@@ -1414,10 +1414,10 @@ static void streamer_srb_bh(struct net_device *dev)
switch (srb_word) {
case 0x00:
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name);
+ printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1448,7 +1448,7 @@ static void streamer_srb_bh(struct net_device *dev)
}
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1467,7 +1467,7 @@ static void streamer_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
break;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1556,7 +1556,7 @@ static void streamer_arb_cmd(struct net_device *dev)
(streamer_mmio + LAPDINC)));
}
- printk("next %04x, fs %02x, len %04x \n", next,
+ printk("next %04x, fs %02x, len %04x\n", next,
status, len);
}
#endif
@@ -1593,7 +1593,7 @@ static void streamer_arb_cmd(struct net_device *dev)
mac_frame->protocol = tr_type_trans(mac_frame, dev);
#if STREAMER_NETWORK_MONITOR
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING
@@ -1669,15 +1669,15 @@ drop_frame:
/* If serious error */
if (streamer_priv->streamer_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name);
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n", dev->name);
+ printk(KERN_INFO "%s: Beaconing\n", dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1686,7 +1686,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (streamer_priv->streamer_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1716,7 +1716,7 @@ drop_frame:
streamer_priv->streamer_lan_status = lan_status;
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void streamer_asb_bh(struct net_device *dev)
@@ -1747,10 +1747,10 @@ static void streamer_asb_bh(struct net_device *dev)
rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
switch (rc) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 53f631ebb162..785ad1a2157b 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -109,7 +109,6 @@ static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsign
SIFWRITEB(val, reg);
madgemc_setregpage(dev, 0);
}
- return;
}
/*
@@ -140,7 +139,6 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
SIFWRITEW(val, reg);
madgemc_setregpage(dev, 0);
}
- return;
}
static struct net_device_ops madgemc_netdev_ops __read_mostly;
@@ -505,8 +503,6 @@ static void madgemc_setregpage(struct net_device *dev, int page)
dev->base_addr + MC_CONTROL_REG1);
}
reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-
- return;
}
/*
@@ -527,8 +523,6 @@ static void madgemc_setsifsel(struct net_device *dev, int val)
dev->base_addr + MC_CONTROL_REG0);
}
reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
-
- return;
}
/*
@@ -550,8 +544,6 @@ static void madgemc_setint(struct net_device *dev, int val)
outb(reg1 | MC_CONTROL_REG1_SINTEN,
dev->base_addr + MC_CONTROL_REG1);
}
-
- return;
}
/*
@@ -594,8 +586,6 @@ static void madgemc_chipset_close(struct net_device *dev)
madgemc_setint(dev, 0);
/* unmap SIF registers */
madgemc_setsifsel(dev, 0);
-
- return;
}
/*
@@ -656,8 +646,6 @@ static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
/* Restore original register values */
outb(reg0, ioaddr + MC_CONTROL_REG0);
outb(reg1, ioaddr + MC_CONTROL_REG1);
-
- return;
}
static int madgemc_open(struct net_device *dev)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3a25e0434ae2..3d2fbe60b46e 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -302,7 +302,7 @@ static int olympic_init(struct net_device *dev)
olympic_priv=netdev_priv(dev);
olympic_mmio=olympic_priv->olympic_mmio;
- printk("%s \n", version);
+ printk("%s\n", version);
printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
@@ -468,7 +468,7 @@ static int olympic_open(struct net_device *dev)
#if OLYMPIC_DEBUG
printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
- printk("Before the open command \n");
+ printk("Before the open command\n");
#endif
do {
memset_io(init_srb,0,SRB_COMMAND_SIZE);
@@ -520,7 +520,7 @@ static int olympic_open(struct net_device *dev)
break;
}
if (time_after(jiffies, t + 10*HZ)) {
- printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
olympic_priv->srb_queued=0;
break ;
}
@@ -549,7 +549,7 @@ static int olympic_open(struct net_device *dev)
break;
case 0x07:
if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
- printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
+ printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
open_finished = 0 ;
continue;
}
@@ -558,7 +558,7 @@ static int olympic_open(struct net_device *dev)
if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
- printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
+ printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
} else {
printk(KERN_WARNING "%s: %s - %s\n", dev->name,
open_maj_error[(err & 0xf0) >> 4],
@@ -759,7 +759,7 @@ static void olympic_rx(struct net_device *dev)
olympic_priv->rx_status_last_received++ ;
olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
#if OLYMPIC_DEBUG
- printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
+ printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
#endif
length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
@@ -774,15 +774,15 @@ static void olympic_rx(struct net_device *dev)
if (l_status_buffercnt & 0x3B000000) {
if (olympic_priv->olympic_message_level) {
if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
- printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
- printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
+ printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
if (l_status_buffercnt & (1<<27)) /* No receive buffers */
- printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
+ printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
- printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
+ printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
- printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
+ printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
}
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -796,7 +796,7 @@ static void olympic_rx(struct net_device *dev)
}
if (skb == NULL) {
- printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
+ printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
dev->stats.rx_dropped++;
/* Update counters even though we don't transfer the frame */
olympic_priv->rx_ring_last_received += i ;
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
}
if (t == 0) {
- printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
+ printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
}
olympic_priv->srb_queued=0;
}
@@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
@@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- netdev_for_each_mc_addr(dmi, dev) {
- dev_mc_address[0] |= dmi->dmi_addr[2] ;
- dev_mc_address[1] |= dmi->dmi_addr[3] ;
- dev_mc_address[2] |= dmi->dmi_addr[4] ;
- dev_mc_address[3] |= dmi->dmi_addr[5] ;
+ netdev_for_each_mc_addr(ha, dev) {
+ dev_mc_address[0] |= ha->addr[2];
+ dev_mc_address[1] |= ha->addr[3];
+ dev_mc_address[2] |= ha->addr[4];
+ dev_mc_address[3] |= ha->addr[5];
}
writeb(SRB_SET_FUNC_ADDRESS,srb+0);
@@ -1239,7 +1239,7 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
@@ -1266,13 +1266,13 @@ static void olympic_srb_bh(struct net_device *dev)
case 0x00:
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
case 0x39: /* Must deal with this if individual multicast addresses used */
- printk(KERN_INFO "%s: Group address not found \n",dev->name);
+ printk(KERN_INFO "%s: Group address not found\n",dev->name);
break ;
default:
break ;
@@ -1287,10 +1287,10 @@ static void olympic_srb_bh(struct net_device *dev)
switch (readb(srb+2)) {
case 0x00:
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
+ printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1310,7 +1310,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1328,7 +1328,7 @@ static void olympic_srb_bh(struct net_device *dev)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
break ;
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
+ printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1404,7 +1404,7 @@ static void olympic_arb_cmd(struct net_device *dev)
printk("Loc %d = %02x\n",i,readb(frame_data + i));
}
- printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
+ printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
}
#endif
mac_frame = dev_alloc_skb(frame_len) ;
@@ -1426,7 +1426,7 @@ static void olympic_arb_cmd(struct net_device *dev)
if (olympic_priv->olympic_network_monitor) {
struct trh_hdr *mac_hdr;
- printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
+ printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
dev->name, mac_hdr->daddr);
@@ -1489,20 +1489,20 @@ drop_frame:
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
netif_stop_queue(dev);
olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
- printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
+ printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (olympic_priv->olympic_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
- printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
+ printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
- printk(KERN_INFO "%s: Beaconing \n",dev->name);
+ printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
- printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
+ printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
- printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
+ printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
@@ -1512,7 +1512,7 @@ drop_frame:
if (lan_status_diff & LSC_CO) {
if (olympic_priv->olympic_message_level)
- printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
+ printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
@@ -1551,7 +1551,7 @@ drop_frame:
} /* Lan.change.status */
else
- printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
+ printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void olympic_asb_bh(struct net_device *dev)
@@ -1578,10 +1578,10 @@ static void olympic_asb_bh(struct net_device *dev)
if (olympic_priv->asb_queued == 2) {
switch (readb(asb_block+2)) {
case 0x01:
- printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break ;
case 0x26:
- printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
+ printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break ;
case 0xFF:
/* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index e40560137c46..0929fff5982c 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -4562,7 +4562,7 @@ static void smctr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in sktr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -5147,8 +5147,6 @@ static void smctr_set_multicast_list(struct net_device *dev)
{
if(smctr_debug > 10)
printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
-
- return;
}
static int smctr_set_page(struct net_device *dev, __u8 *buf)
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 8b508c922410..435ef7d5470f 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -325,8 +325,6 @@ static void tms380tr_timer_end_wait(unsigned long data)
tp->Sleeping = 0;
wake_up_interruptible(&tp->wait_for_tok_int);
}
-
- return;
}
/*
@@ -460,8 +458,6 @@ static void tms380tr_init_net_local(struct net_device *dev)
tp->RplHead = &tp->Rpl[0];
tp->RplTail = &tp->Rpl[RPL_NUM-1];
tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
-
- return;
}
/*
@@ -481,8 +477,6 @@ static void tms380tr_init_ipb(struct net_local *tp)
tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
tp->ipb.SCB_Addr = 0;
tp->ipb.SSB_Addr = 0;
-
- return;
}
/*
@@ -527,8 +521,6 @@ static void tms380tr_init_opb(struct net_device *dev)
tp->ocpl.ProdIDAddr[0] = LOWORD(Addr);
tp->ocpl.ProdIDAddr[1] = HIWORD(Addr);
-
- return;
}
/*
@@ -543,8 +535,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
tp->OpenCommandIssued = 1;
tms380tr_exec_cmd(dev, OC_OPEN);
-
- return;
}
/*
@@ -554,8 +544,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
static void tms380tr_disable_interrupts(struct net_device *dev)
{
SIFWRITEB(0, SIFACL);
-
- return;
}
/*
@@ -565,8 +553,6 @@ static void tms380tr_disable_interrupts(struct net_device *dev)
static void tms380tr_enable_interrupts(struct net_device *dev)
{
SIFWRITEB(ACL_SINTEN, SIFACL);
-
- return;
}
/*
@@ -578,8 +564,6 @@ static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
tp->CMDqueue |= Command;
tms380tr_chk_outstanding_cmds(dev);
-
- return;
}
static void tms380tr_timeout(struct net_device *dev)
@@ -592,7 +576,7 @@ static void tms380tr_timeout(struct net_device *dev)
* fake transmission time and go on trying. Our own timeout
* routine is in tms380tr_timer_chk()
*/
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -712,8 +696,6 @@ static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
SRBit = frame[8] & 0x80;
memcpy(&frame[8], hw_addr, 6);
frame[8] |= SRBit;
-
- return;
}
/*
@@ -743,8 +725,6 @@ static void tms380tr_timer_chk(unsigned long data)
return;
tp->ReOpenInProgress = 1;
tms380tr_open_adapter(dev);
-
- return;
}
/*
@@ -863,8 +843,6 @@ static void tms380tr_reset_interrupt(struct net_device *dev)
* and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
*/
tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
-
- return;
}
/*
@@ -1119,8 +1097,6 @@ static void tms380tr_cmd_status_irq(struct net_device *dev)
tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
}
-
- return;
}
/*
@@ -1211,17 +1187,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
}
else
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
((char *)(&tp->ocpl.FunctAddr))[0] |=
- mclist->dmi_addr[2];
+ ha->addr[2];
((char *)(&tp->ocpl.FunctAddr))[1] |=
- mclist->dmi_addr[3];
+ ha->addr[3];
((char *)(&tp->ocpl.FunctAddr))[2] |=
- mclist->dmi_addr[4];
+ ha->addr[4];
((char *)(&tp->ocpl.FunctAddr))[3] |=
- mclist->dmi_addr[5];
+ ha->addr[5];
}
}
tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@ -1229,7 +1205,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
tp->ocpl.OPENOptions = OpenOptions;
tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
- return;
}
/*
@@ -1247,7 +1222,6 @@ void tms380tr_wait(unsigned long time)
#else
udelay(time);
#endif
- return;
}
/*
@@ -1266,8 +1240,6 @@ static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue
SifStsValue = SIFREADW(SIFSTS);
} while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
SIFWRITEW(cmd, SIFCMD);
-
- return;
}
/*
@@ -1390,7 +1362,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
Status &= STS_MASK;
if(tms380tr_debug > 3)
- printk(KERN_DEBUG " %04X \n", Status);
+ printk(KERN_DEBUG " %04X\n", Status);
/* BUD successfully completed */
if(Status == STS_INITIALIZE)
return (1);
@@ -1700,8 +1672,6 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
/* Execute SCB and generate IRQ when done. */
tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
-
- return;
}
/*
@@ -1774,8 +1744,6 @@ static void tms380tr_ring_status_irq(struct net_device *dev)
tp->AdapterOpenFlag = 0;
tms380tr_open_adapter(dev);
}
-
- return;
}
/*
@@ -1846,7 +1814,7 @@ static void tms380tr_chk_irq(struct net_device *dev)
break;
case DMA_WRITE_ABORT:
- printk(KERN_INFO "%s: DMA write operation aborted: \n",
+ printk(KERN_INFO "%s: DMA write operation aborted:\n",
dev->name);
switch (AdapterCheckBlock[1])
{
@@ -1932,8 +1900,6 @@ static void tms380tr_chk_irq(struct net_device *dev)
/* Restart of firmware successful */
tp->AdapterOpenFlag = 1;
}
-
- return;
}
/*
@@ -1988,8 +1954,6 @@ static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
/* Restore original values */
SIFWRITEW(old_sifadx, SIFADX);
SIFWRITEW(old_sifadr, SIFADR);
-
- return;
}
/*
@@ -2021,8 +1985,6 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(tpl->Skb);
}
-
- return;
}
/*
@@ -2094,7 +2056,6 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
if(!tp->TplFree->NextTPLPtr->BusyFlag)
netif_wake_queue(dev);
- return;
}
/*
@@ -2255,8 +2216,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
/* Inform adapter about RPL valid. */
tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
}
-
- return;
}
/*
@@ -2269,8 +2228,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
{
rpl->Status = Status;
-
- return;
}
/*
@@ -2287,8 +2244,6 @@ static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPt
/* Test functional bit */
if(DataPtr[2] & GROUP_BIT)
tp->MacStat.multicast++;
-
- return;
}
static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
@@ -2318,8 +2273,6 @@ static void tms380tr_dump(unsigned char *Data, int length)
Data[j+0],Data[j+1],Data[j+2],Data[j+3],
Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
}
-
- return;
}
#endif
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5b1fbb3c3b51..a03730bd1da5 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static int mii_speed(struct mii_if_info *mii)
@@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
if (i == 0) {
data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
- skb->len - skb->data_len, DMA_TO_DEVICE);
- data->txring[tx].len = skb->len - skb->data_len;
+ skb_headlen(skb), DMA_TO_DEVICE);
+ data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1056,7 +1056,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
return;
udelay(10);
}
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
}
static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(data->mc_hash, 0, sizeof(data->mc_hash));
- netdev_for_each_mc_addr(mc, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
u32 hash, crc;
- crc = ether_crc(6, mc->dmi_addr);
+ crc = ether_crc(6, ha->addr);
hash = crc >> 23;
__set_bit(hash, &data->mc_hash[0]);
}
@@ -1233,7 +1233,7 @@ static void tsi108_init_phy(struct net_device *dev)
udelay(10);
}
if (i == 0)
- printk(KERN_ERR "%s function time out \n", __func__);
+ printk(KERN_ERR "%s function time out\n", __func__);
if (data->phy_type == TSI108_PHY_BCM54XX) {
tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 19cafc2b418d..c0e70006374e 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -654,7 +654,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
/* Trigger an immediate transmit demand. */
dw32(TxPoll, NormalTxPoll);
- dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
@@ -671,15 +670,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -700,13 +699,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 09b57193a16a..75a64c88cf7a 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1337,7 +1337,7 @@ de4x5_open(struct net_device *dev)
}
lp->interrupt = UNMASK_INTERRUPTS;
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
START_DE4X5;
@@ -1507,7 +1507,6 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
- dev->trans_start = jiffies;
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
@@ -1884,8 +1883,6 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
if (lp->pktStats.bins[0] == 0) { /* Reset counters */
memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
}
-
- return;
}
/*
@@ -1937,7 +1934,7 @@ set_multicast_list(struct net_device *dev)
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
}
}
}
@@ -1951,7 +1948,7 @@ static void
SetMulticastFilter(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i, bit, byte;
u16 hashcode;
@@ -1966,8 +1963,8 @@ SetMulticastFilter(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1983,8 +1980,8 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- netdev_for_each_mc_addr(dmi, dev) {
- addrs = dmi->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrs = ha->addr;
for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
@@ -1992,8 +1989,6 @@ SetMulticastFilter(struct net_device *dev)
}
}
outl(omr, DE4X5_OMR);
-
- return;
}
#ifdef CONFIG_EISA
@@ -2188,8 +2183,6 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
return;
}
}
-
- return;
}
/*
@@ -3292,8 +3285,6 @@ de4x5_init_connection(struct net_device *dev)
outl(POLL_DEMAND, DE4X5_TPD);
netif_wake_queue(dev);
-
- return;
}
/*
@@ -3665,8 +3656,6 @@ de4x5_free_rx_buffs(struct net_device *dev)
lp->rx_ring[i].status = 0;
lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
}
-
- return;
}
static void
@@ -3709,8 +3698,6 @@ de4x5_save_skbs(struct net_device *dev)
lp->cache.save_cnt++;
START_DE4X5;
}
-
- return;
}
static void
@@ -3742,8 +3729,6 @@ de4x5_rst_desc_ring(struct net_device *dev)
lp->cache.save_cnt--;
START_DE4X5;
}
-
- return;
}
static void
@@ -3772,8 +3757,6 @@ de4x5_cache_state(struct net_device *dev, int flag)
}
break;
}
-
- return;
}
static void
@@ -3846,8 +3829,6 @@ de4x5_setup_intr(struct net_device *dev)
outl(sts, DE4X5_STS);
ENABLE_IRQs;
}
-
- return;
}
/*
@@ -3880,8 +3861,6 @@ reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
outl(csr13, DE4X5_SICR);
mdelay(10);
-
- return;
}
/*
@@ -3902,8 +3881,6 @@ create_packet(struct net_device *dev, char *frame, int len)
*buf++ = 0; /* Packet length (2 bytes) */
*buf++ = 1;
-
- return;
}
/*
@@ -4007,8 +3984,6 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
}
de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
}
-
- return;
}
/*
@@ -4046,8 +4021,6 @@ enet_addr_rst(u_long aprom_addr)
}
}
}
-
- return;
}
/*
@@ -4187,8 +4160,6 @@ srom_repair(struct net_device *dev, int card)
lp->useSROM = true;
break;
}
-
- return;
}
/*
@@ -4262,8 +4233,6 @@ srom_latch(u_int command, u_long addr)
sendto_srom(command, addr);
sendto_srom(command | DT_CLK, addr);
sendto_srom(command, addr);
-
- return;
}
static void
@@ -4272,8 +4241,6 @@ srom_command(u_int command, u_long addr)
srom_latch(command, addr);
srom_latch(command, addr);
srom_latch((command & 0x0000ff00) | DT_CS, addr);
-
- return;
}
static void
@@ -4288,8 +4255,6 @@ srom_address(u_int command, u_long addr, u_char offset)
udelay(1);
i = (getfrom_srom(addr) >> 3) & 0x01;
-
- return;
}
static short
@@ -4323,8 +4288,6 @@ srom_busy(u_int command, u_long addr)
}
sendto_srom(command & 0x0000ff00, addr);
-
- return;
}
*/
@@ -4333,8 +4296,6 @@ sendto_srom(u_int command, u_long addr)
{
outl(command, addr);
udelay(1);
-
- return;
}
static int
@@ -4433,8 +4394,6 @@ srom_init(struct net_device *dev)
p += ((*p & BLOCK_LEN) + 1);
}
}
-
- return;
}
/*
@@ -4463,8 +4422,6 @@ srom_exec(struct net_device *dev, u_char *p)
outl(lp->cache.csr14, DE4X5_STRR);
outl(lp->cache.csr13, DE4X5_SICR);
}
-
- return;
}
/*
@@ -4889,8 +4846,6 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
data = mii_swap(data, 16); /* Swap data bit ordering */
mii_wdata(data, 16, ioaddr); /* Write data */
-
- return;
}
static int
@@ -4916,8 +4871,6 @@ mii_wdata(int data, int len, u_long ioaddr)
sendto_mii(MII_MWR | MII_WR, data, ioaddr);
data >>= 1;
}
-
- return;
}
static void
@@ -4930,8 +4883,6 @@ mii_address(u_char addr, u_long ioaddr)
sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
addr >>= 1;
}
-
- return;
}
static void
@@ -4943,8 +4894,6 @@ mii_ta(u_long rw, u_long ioaddr)
} else {
getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
}
-
- return;
}
static int
@@ -4971,8 +4920,6 @@ sendto_mii(u32 command, int data, u_long ioaddr)
udelay(1);
outl(command | MII_MDC | j, ioaddr);
udelay(1);
-
- return;
}
static int
@@ -5077,7 +5024,7 @@ mii_get_phy(struct net_device *dev)
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
lp->mii_cnt++;
lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name);
+ printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
j = de4x5_debug;
de4x5_debug |= DEBUG_MII;
de4x5_dbg_mii(dev, k);
@@ -5186,8 +5133,6 @@ gep_wr(s32 data, struct net_device *dev)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
}
-
- return;
}
static int
@@ -5247,8 +5192,6 @@ yawn(struct net_device *dev, int state)
break;
}
}
-
- return;
}
static void
@@ -5290,8 +5233,6 @@ de4x5_parse_params(struct net_device *dev)
}
*q = t;
}
-
- return;
}
static void
@@ -5337,12 +5278,10 @@ de4x5_dbg_open(struct net_device *dev)
}
}
printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size: \nRX: %d\nTX: %d\n",
+ printk("Ring size:\nRX: %d\nTX: %d\n",
(short)lp->rxRingSize,
(short)lp->txRingSize);
}
-
- return;
}
static void
@@ -5369,8 +5308,6 @@ de4x5_dbg_mii(struct net_device *dev, int k)
printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
}
}
-
- return;
}
static void
@@ -5395,8 +5332,6 @@ de4x5_dbg_media(struct net_device *dev)
}
lp->c_media = lp->media;
}
-
- return;
}
static void
@@ -5417,8 +5352,6 @@ de4x5_dbg_srom(struct de4x5_srom *p)
printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
}
}
-
- return;
}
static void
@@ -5440,8 +5373,6 @@ de4x5_dbg_rx(struct sk_buff *skb, int len)
printk("\n");
}
}
-
- return;
}
/*
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9568156dea98..29e6c63d39fd 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -1118,7 +1118,6 @@ static void dmfe_ethtool_get_wol(struct net_device *dev,
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = db->wol_mode;
- return;
}
@@ -1180,11 +1179,11 @@ static void dmfe_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
/* TX Timeout */
- if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
+ if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
dev_warn(&dev->dev, "Tx timeout - resetting\n");
@@ -1453,7 +1452,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void dm9132_id_table(struct DEVICE *dev)
{
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
u16 * addrptr;
unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
u32 hash_val;
@@ -1477,8 +1476,8 @@ static void dm9132_id_table(struct DEVICE *dev)
hash_table[3] = 0x8000;
/* the multicast address in Hash Table : 64 bits */
- netdev_for_each_mc_addr(mcptr, dev) {
- hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -1496,7 +1495,7 @@ static void dm9132_id_table(struct DEVICE *dev)
static void send_filter_frame(struct DEVICE *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1519,8 +1518,8 @@ static void send_filter_frame(struct DEVICE *dev)
*suptr++ = 0xffff;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index 68b170ae4d15..a0c770ee4b64 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -396,8 +396,6 @@ void tulip_select_media(struct net_device *dev, int startup)
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
mdelay(1);
-
- return;
}
/*
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index 966efa1a27d7..a63e64b6863d 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -67,7 +67,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
*/
if (tulip_media_cap[dev->if_port] & MediaIsMII)
return;
- if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) {
+ if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) {
tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
iowrite32(tp->csr6, ioaddr + CSR6);
iowrite32(0x30, ioaddr + CSR12);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3810db9dc2de..254643ed945e 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
@@ -707,8 +707,6 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tp->lock, flags);
- dev->trans_start = jiffies;
-
return NETDEV_TX_OK;
}
@@ -991,15 +989,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
int i;
u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- netdev_for_each_mc_addr(mclist, dev) {
- int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
+ netdev_for_each_mc_addr(ha, dev) {
+ int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
set_bit_le(index, hash_table);
}
@@ -1019,13 +1017,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- netdev_for_each_mc_addr(mclist, dev) {
- eaddrs = (u16 *)mclist->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ eaddrs = (u16 *) ha->addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1062,7 +1060,7 @@ static void set_rx_mode(struct net_device *dev)
} else if (tp->flags & MC_HASH_ONLY) {
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (netdev_mc_count(dev) > 64) {
/* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
@@ -1070,18 +1068,21 @@ static void set_rx_mode(struct net_device *dev)
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- netdev_for_each_mc_addr(mclist, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
if (tp->flags & COMET_MAC_ADDR)
- filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
+ filterbit = ether_crc_le(ETH_ALEN,
+ ha->addr);
else
- filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ filterbit = ether_crc(ETH_ALEN,
+ ha->addr) >> 26;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
dev_info(&dev->dev,
"Added filter for %pM %08x bit %d\n",
- mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ ha->addr,
+ ether_crc(ETH_ALEN, ha->addr),
+ filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a589dd34891e..96de5829b940 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1040,11 +1040,11 @@ static void uli526x_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
- time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
+ time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
outl(0x1, dev->base_addr + DCR1); // Tx polling again
// TX Timeout
- if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
+ if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
printk( "%s: Tx timeout - resetting\n",
@@ -1393,7 +1393,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
static void send_filter_frame(struct net_device *dev, int mc_cnt)
{
struct uli526x_board_info *db = netdev_priv(dev);
- struct dev_mc_list *mcptr;
+ struct netdev_hw_addr *ha;
struct tx_desc *txptr;
u16 * addrptr;
u32 * suptr;
@@ -1416,8 +1416,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
*suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
- netdev_for_each_mc_addr(mcptr, dev) {
- addrptr = (u16 *) mcptr->dmi_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ addrptr = (u16 *) ha->addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 98dbf6cc1d68..608b279b921b 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -626,7 +626,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
- return;
}
@@ -969,9 +968,8 @@ static void tx_timeout(struct net_device *dev)
enable_irq(dev->irq);
netif_wake_queue(dev);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
np->stats.tx_errors++;
- return;
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
@@ -1055,8 +1053,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irq(&np->lock);
- dev->trans_start = jiffies;
-
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
dev->name, np->cur_tx, entry);
@@ -1366,13 +1362,15 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
- filterbit &= 0x3f;
- mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
+ netdev_for_each_mc_addr(ha, dev) {
+ int filbit;
+
+ filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
+ filbit &= 0x3f;
+ mc_filter[filbit >> 5] |= 1 << (filbit & 31);
}
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index acfeeb980562..a439e93be22d 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -350,9 +350,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",
+ printk("tx status 0x%08x 0x%08x\n",
card->tx_buffer[0], card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",
+ printk("rx status 0x%08x 0x%08x\n",
card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n",
dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 43265207d463..6ad6fe706312 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,6 +109,9 @@ struct tun_struct {
struct tap_filter txflt;
struct socket socket;
+ struct socket_wq wq;
+
+ int vnet_hdr_sz;
#ifdef TUN_DEBUG
int debug;
@@ -323,7 +326,7 @@ static void tun_net_uninit(struct net_device *dev)
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
- wake_up_all(&tun->socket.wait);
+ wake_up_all(&tun->wq.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
@@ -393,12 +396,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Enqueue packet */
skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
- dev->trans_start = jiffies;
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
@@ -415,7 +417,6 @@ static void tun_net_mclist(struct net_device *dev)
* _rx_ path and has nothing to do with the _tx_ path.
* In rx path we always accept everything userspace gives us.
*/
- return;
}
#define MIN_MTU 68
@@ -498,7 +499,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
- poll_wait(file, &tun->socket.wait, wait);
+ poll_wait(file, &tun->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -525,6 +526,8 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
struct sk_buff *skb;
int err;
+ sock_update_classid(sk);
+
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
@@ -563,7 +566,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= sizeof(gso)) > count)
+ if ((len -= tun->vnet_hdr_sz) > count)
return -EINVAL;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -575,7 +578,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
if (gso.hdr_len > len)
return -EINVAL;
- offset += sizeof(gso);
+ offset += tun->vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -718,7 +721,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (tun->flags & TUN_VNET_HDR) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= sizeof(gso)) < 0)
+ if ((len -= tun->vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
@@ -749,7 +752,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
- total += sizeof(gso);
+ total += tun->vnet_hdr_sz;
}
len = min_t(int, skb->len, len);
@@ -773,7 +776,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- add_wait_queue(&tun->socket.wait, &wait);
+ add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -804,7 +807,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->socket.wait, &wait);
+ remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}
@@ -861,6 +864,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
static void tun_sock_write_space(struct sock *sk)
{
struct tun_struct *tun;
+ wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
@@ -868,8 +872,9 @@ static void tun_sock_write_space(struct sock *sk)
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
- if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ wqueue = sk_sleep(sk);
+ if (wqueue && waitqueue_active(wqueue))
+ wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
@@ -1033,13 +1038,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->dev = dev;
tun->flags = flags;
tun->txflt.count = 0;
+ tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = -ENOMEM;
sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
if (!sk)
goto err_free_dev;
- init_waitqueue_head(&tun->socket.wait);
+ tun->socket.wq = &tun->wq;
+ init_waitqueue_head(&tun->wq.wait);
tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
@@ -1174,6 +1181,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
+ int vnet_hdr_sz;
int ret;
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
@@ -1319,6 +1327,25 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->socket.sk->sk_sndbuf = sndbuf;
break;
+ case TUNGETVNETHDRSZ:
+ vnet_hdr_sz = tun->vnet_hdr_sz;
+ if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
+ ret = -EFAULT;
+ break;
+
+ case TUNSETVNETHDRSZ:
+ if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ tun->vnet_hdr_sz = vnet_hdr_sz;
+ break;
+
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
@@ -1342,7 +1369,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
default:
ret = -EINVAL;
break;
- };
+ }
unlock:
rtnl_unlock();
@@ -1624,3 +1651,4 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(TUN_MINOR);
+MODULE_ALIAS("devname:net/tun");
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 98d818daa77e..22bde49262c0 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -881,8 +881,6 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
wmb();
iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
- dev->trans_start = jiffies;
-
/* If we don't have room to put the worst case packet on the
* queue, then we must stop the queue. We need 2 extra
* descriptors -- one to prevent ring wrap, and one for the
@@ -920,11 +918,11 @@ typhoon_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
} else if (!netdev_mc_empty(dev)) {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 1b0aef37e495..4a34833b85dd 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
static void ucc_geth_set_multi(struct net_device *dev)
{
struct ucc_geth_private *ugeth;
- struct dev_mc_list *dmi;
+ struct netdev_hw_addr *ha;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
@@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev)
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
- netdev_for_each_mc_addr(dmi, dev) {
+ netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
- if (!(dmi->dmi_addr[0] & 1))
+ if (!(ha->addr[0] & 1))
continue;
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
- hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
+ hw_add_addr_in_hash(ugeth, ha->addr);
}
}
}
@@ -3148,8 +3148,6 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
out_be32((u32 __iomem *)bd, bd_status);
- dev->trans_start = jiffies;
-
/* Move to next BD in the ring */
if (!(bd_status & T_W))
bd += sizeof(struct qe_bd);
@@ -3721,7 +3719,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
{
struct device *device = &ofdev->dev;
- struct device_node *np = ofdev->node;
+ struct device_node *np = ofdev->dev.of_node;
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
@@ -3883,7 +3881,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
}
if (netif_msg_probe(&debug))
- printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
ug_info->uf_info.irq);
@@ -3965,8 +3963,11 @@ static struct of_device_id ucc_geth_match[] = {
MODULE_DEVICE_TABLE(of, ucc_geth_match);
static struct of_platform_driver ucc_geth_driver = {
- .name = DRV_NAME,
- .match_table = ucc_geth_match,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ucc_geth_match,
+ },
.probe = ucc_geth_probe,
.remove = ucc_geth_remove,
.suspend = ucc_geth_suspend,
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 35f56fc82803..1f802e90474c 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -224,10 +224,9 @@ static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
cmd, value, index, size);
if (data) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
goto out;
- memcpy(buf, data, size);
}
err = usb_control_msg(
@@ -322,8 +321,29 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* get the packet length */
size = (u16) (header & 0x0000ffff);
- if ((skb->len) - ((size + 1) & 0xfffe) == 0)
+ if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
+ u8 alignment = (unsigned long)skb->data & 0x3;
+ if (alignment != 0x2) {
+ /*
+ * not 16bit aligned so use the room provided by
+ * the 32 bit header to align the data
+ *
+ * note we want 16bit alignment as MAC header is
+ * 14bytes thus ip header will be aligned on
+ * 32bit boundary so accessing ipheader elements
+ * using a cast to struct ip header wont cause
+ * an unaligned accesses.
+ */
+ u8 realignment = (alignment + 2) & 0x3;
+ memmove(skb->data - realignment,
+ skb->data,
+ size);
+ skb->data -= realignment;
+ skb_set_tail_pointer(skb, size);
+ }
return 2;
+ }
+
if (size > ETH_FRAME_LEN) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
@@ -331,7 +351,18 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (ax_skb) {
+ u8 alignment = (unsigned long)packet & 0x3;
ax_skb->len = size;
+
+ if (alignment != 0x2) {
+ /*
+ * not 16bit aligned use the room provided by
+ * the 32 bit header to align the data
+ */
+ u8 realignment = (alignment + 2) & 0x3;
+ memmove(packet - realignment, packet, size);
+ packet -= realignment;
+ }
ax_skb->data = packet;
skb_set_tail_pointer(ax_skb, size);
usbnet_skb_return(dev, ax_skb);
@@ -558,16 +589,14 @@ static void asix_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
@@ -794,16 +823,14 @@ static void ax88172_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits =
- ether_crc(ETH_ALEN,
- mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 602e123b2741..97687d335903 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast)
static void catc_set_multicast_list(struct net_device *netdev)
{
struct catc *catc = netdev_priv(netdev);
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
u8 broadcast[6];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
@@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- netdev_for_each_mc_addr(mc, netdev) {
- u32 crc = ether_crc_le(6, mc->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 crc = ether_crc_le(6, ha->addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
} else {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3547cf13d219..b3fe0de40469 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -64,6 +64,11 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
#endif
+static const u8 mbm_guid[16] = {
+ 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
+ 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
+};
+
/*
* probes control interface, claims data interface, collects the bulk
* endpoints, activates data interface (if needed), maybe sets MTU.
@@ -79,6 +84,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
int status;
int rndis;
struct usb_driver *driver = driver_of(intf);
+ struct usb_cdc_mdlm_desc *desc = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
if (sizeof dev->data < sizeof *info)
return -EDOM;
@@ -229,6 +236,34 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
* side link address we were given.
*/
break;
+ case USB_CDC_MDLM_TYPE:
+ if (desc) {
+ dev_dbg(&intf->dev, "extra MDLM descriptor\n");
+ goto bad_desc;
+ }
+
+ desc = (void *)buf;
+
+ if (desc->bLength != sizeof(*desc))
+ goto bad_desc;
+
+ if (memcmp(&desc->bGUID, mbm_guid, 16))
+ goto bad_desc;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+ if (detail) {
+ dev_dbg(&intf->dev, "extra MDLM detail descriptor\n");
+ goto bad_desc;
+ }
+
+ detail = (void *)buf;
+
+ if (detail->bGuidDescriptorType == 0) {
+ if (detail->bLength < (sizeof(*detail) + 1))
+ goto bad_desc;
+ } else
+ goto bad_desc;
+ break;
}
next_desc:
len -= buf [0]; /* bLength */
@@ -543,80 +578,10 @@ static const struct usb_device_id products [] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
}, {
- /* Ericsson F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3507g ver. 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3607gw ver 3 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson F3307 ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Ericsson C3607w ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Toshiba F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3507g */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
-}, {
- /* Dell F3607gw ver 2 */
- USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &mbm_info,
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&mbm_info,
+
},
{ }, // END
};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 5dfed9297b22..02b622e3b9fb 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -93,10 +93,9 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
if (data) {
- buf = kmalloc(length, GFP_KERNEL);
+ buf = kmemdup(data, length, GFP_KERNEL);
if (!buf)
goto out;
- memcpy(buf, data, length);
}
err = usb_control_msg(dev->udev,
@@ -387,10 +386,10 @@ static void dm9601_set_multicast(struct net_device *net)
netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
} else if (!netdev_mc_empty(net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(mc_list, net) {
- u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
}
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index be0cc99e881a..0a3c41faea9c 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -475,6 +475,9 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0x8302)},
{USB_DEVICE(0x0af0, 0x8304)},
{USB_DEVICE(0x0af0, 0x8400)},
+ {USB_DEVICE(0x0af0, 0x8600)},
+ {USB_DEVICE(0x0af0, 0x8800)},
+ {USB_DEVICE(0x0af0, 0x8900)},
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
@@ -834,8 +837,6 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
} else {
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
- /* And tell the kernel when the last transmit started. */
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
/* we're done */
@@ -1474,7 +1475,6 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
spin_unlock_irqrestore(&serial->serial_lock, flags);
/* done */
- return;
}
/* how many characters in the buffer */
@@ -1994,7 +1994,6 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
hso_kick_transmit(serial);
D1(" ");
- return;
}
/* called for writing diag or CS serial port */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 418825d26f90..197c352c47fb 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -128,17 +128,13 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (rx_urb == NULL)
goto free_tx_urb;
- tx_buf = usb_buffer_alloc(iphone->udev,
- IPHETH_BUF_SIZE,
- GFP_KERNEL,
- &tx_urb->transfer_dma);
+ tx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ GFP_KERNEL, &tx_urb->transfer_dma);
if (tx_buf == NULL)
goto free_rx_urb;
- rx_buf = usb_buffer_alloc(iphone->udev,
- IPHETH_BUF_SIZE,
- GFP_KERNEL,
- &rx_urb->transfer_dma);
+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -150,8 +146,8 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
return 0;
free_tx_buf:
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
- tx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
+ tx_urb->transfer_dma);
free_rx_urb:
usb_free_urb(rx_urb);
free_tx_urb:
@@ -162,10 +158,10 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
- iphone->rx_urb->transfer_dma);
- usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
- iphone->tx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ iphone->rx_urb->transfer_dma);
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
+ iphone->tx_urb->transfer_dma);
usb_free_urb(iphone->rx_urb);
usb_free_urb(iphone->tx_urb);
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index c4c334d9770f..d6078b8c4273 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -856,7 +856,6 @@ skip:
{
kaweth->stats.tx_packets++;
kaweth->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
spin_unlock_irq(&kaweth->device_lock);
@@ -1156,13 +1155,13 @@ err_fw:
if (!kaweth->irq_urb)
goto err_tx_and_rx;
- kaweth->intbuffer = usb_buffer_alloc( kaweth->dev,
+ kaweth->intbuffer = usb_alloc_coherent( kaweth->dev,
INTBUFFERSIZE,
GFP_KERNEL,
&kaweth->intbufferhandle);
if (!kaweth->intbuffer)
goto err_tx_and_rx_and_irq;
- kaweth->rx_buf = usb_buffer_alloc( kaweth->dev,
+ kaweth->rx_buf = usb_alloc_coherent( kaweth->dev,
KAWETH_BUF_SIZE,
GFP_KERNEL,
&kaweth->rxbufferhandle);
@@ -1203,9 +1202,9 @@ err_fw:
err_intfdata:
usb_set_intfdata(intf, NULL);
- usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
+ usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
err_all_but_rxbuf:
- usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
+ usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
err_tx_and_rx_and_irq:
usb_free_urb(kaweth->irq_urb);
err_tx_and_rx:
@@ -1242,8 +1241,8 @@ static void kaweth_disconnect(struct usb_interface *intf)
usb_free_urb(kaweth->tx_urb);
usb_free_urb(kaweth->irq_urb);
- usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
- usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
+ usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
+ usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
free_netdev(netdev);
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 9f24e3f871e1..a6281e3987b5 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -142,12 +142,10 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *
int ret;
void *buffer;
- buffer = kmalloc(size, GFP_NOIO);
+ buffer = kmemdup(data, size, GFP_NOIO);
if (buffer == NULL)
return -ENOMEM;
- memcpy(buffer, data, size);
-
ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
MCS7830_WR_BMREQ, 0x0000, index, buffer,
size, MCS7830_CTRL_TIMEOUT);
@@ -453,12 +451,12 @@ static void mcs7830_data_set_multicast(struct net_device *net)
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
u32 crc_bits;
/* Build the multicast hash filter. */
- netdev_for_each_mc_addr(mc_list, net) {
- crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
}
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 41838773b568..974d17f0263e 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -203,13 +203,12 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
char *buffer;
DECLARE_WAITQUEUE(wait, current);
- buffer = kmalloc(size, GFP_KERNEL);
+ buffer = kmemdup(data, size, GFP_KERNEL);
if (!buffer) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
- memcpy(buffer, data, size);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -255,13 +254,12 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
char *tmp;
DECLARE_WAITQUEUE(wait, current);
- tmp = kmalloc(1, GFP_KERNEL);
+ tmp = kmemdup(&data, 1, GFP_KERNEL);
if (!tmp) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
- memcpy(tmp, &data, 1);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
@@ -808,7 +806,7 @@ static void write_bulk_callback(struct urb *urb)
break;
}
- net->trans_start = jiffies;
+ net->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(net);
}
@@ -909,7 +907,6 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
} else {
pegasus->stats.tx_packets++;
pegasus->stats.tx_bytes += skb->len;
- net->trans_start = jiffies;
}
dev_kfree_skb(skb);
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index b90d8766ab74..29f5211e645b 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -256,7 +256,7 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
+PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
DEFAULT_GPIO_RESET)
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index dd8a4adf48ca..28d3ee175e7b 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,8 +104,10 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
{
struct cdc_state *info = (void *) &dev->data;
+ struct usb_cdc_notification notification;
int master_ifnum;
int retval;
+ int partial;
unsigned count;
__le32 rsp;
u32 xid = 0, msg_len, request_id;
@@ -133,13 +135,17 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
if (unlikely(retval < 0 || xid == 0))
return retval;
- // FIXME Seems like some devices discard responses when
- // we time out and cancel our "get response" requests...
- // so, this is fragile. Probably need to poll for status.
+ /* Some devices don't respond on the control channel until
+ * polled on the status channel, so do that first. */
+ retval = usb_interrupt_msg(
+ dev->udev,
+ usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress),
+ &notification, sizeof(notification), &partial,
+ RNDIS_CONTROL_TIMEOUT_MS);
+ if (unlikely(retval < 0))
+ return retval;
- /* ignore status endpoint, just poll the control channel;
- * the request probably completed immediately
- */
+ /* Poll the control channel; the request probably completed immediately */
rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
for (count = 0; count < 10; count++) {
memset(buf, 0, CONTROL_BUFFER_SIZE);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 35b98b1b79e4..753ee6eb7edd 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -445,14 +445,14 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc75xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc75xx_hash(ha->addr);
pdata->multicast_hash_table[bitnum / 32] |=
(1 << (bitnum % 32));
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3135af63d378..12a3c88c5282 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -385,13 +385,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
} else if (!netdev_mc_empty(dev->net)) {
- struct dev_mc_list *mc_list;
+ struct netdev_hw_addr *ha;
pdata->mac_cr |= MAC_CR_HPFILT_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- netdev_for_each_mc_addr(mc_list, netdev) {
- u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 bitnum = smsc95xx_hash(ha->addr);
u32 mask = 0x01 << (bitnum & 0x1F);
if (bitnum & 0x20)
hash_hi |= mask;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7177abc78dc6..a95c73de5824 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1069,12 +1069,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
*/
- if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
- urb->transfer_buffer_length++;
- if (skb_tailroom(skb)) {
- skb->data[skb->len] = 0;
- __skb_put(skb, 1);
- }
+ if (length % dev->maxpacket == 0) {
+ if (!(info->flags & FLAG_SEND_ZLP)) {
+ urb->transfer_buffer_length++;
+ if (skb_tailroom(skb)) {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ }
+ } else
+ urb->transfer_flags |= URB_ZERO_PACKET;
}
spin_lock_irqsave(&dev->txq.lock, flags);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f9f0730b53d5..5ec542dd5b50 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -187,7 +187,6 @@ tx_drop:
return NETDEV_TX_OK;
rx_drop:
- kfree_skb(skb);
rcv_stats->rx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 388751aa66e0..4930f9dbc493 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1209,7 +1209,7 @@ static void rhine_reset_task(struct work_struct *work)
spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
@@ -1294,8 +1294,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
netif_stop_queue(dev);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&rp->lock, flags);
if (debug > 4) {
@@ -1703,11 +1701,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
rx_mode = 0x0C;
} else {
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(mclist, dev) {
- int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(ha, dev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bc278d4ee89d..42dffd3e5795 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -719,30 +719,30 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
u32 status = 0;
u16 ANAR;
- if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
+ if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
status |= VELOCITY_LINK_FAIL;
- if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
- else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
+ else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
status |= (VELOCITY_SPEED_1000);
else {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if (ANAR & ANAR_TXFD)
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if (ANAR & ADVERTISE_100FULL)
status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
- else if (ANAR & ANAR_TX)
+ else if (ANAR & ADVERTISE_100HALF)
status |= VELOCITY_SPEED_100;
- else if (ANAR & ANAR_10FD)
+ else if (ANAR & ADVERTISE_10FULL)
status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
else
status |= (VELOCITY_SPEED_10);
}
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -801,23 +801,23 @@ static void set_mii_flow_control(struct velocity_info *vptr)
/*Enable or Disable PAUSE in ANAR */
switch (vptr->options.flow_cntl) {
case FLOW_CNTL_TX:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_TX_RX:
- MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
case FLOW_CNTL_DISABLE:
- MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
break;
default:
break;
@@ -832,10 +832,10 @@ static void set_mii_flow_control(struct velocity_info *vptr)
*/
static void mii_set_auto_on(struct velocity_info *vptr)
{
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
else
- MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
}
static u32 check_connection_type(struct mac_regs __iomem *regs)
@@ -860,11 +860,11 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
else
status |= VELOCITY_SPEED_100;
- if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
- velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
- if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
- == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
- if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
+ if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
+ velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
+ if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
+ == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
+ if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
status |= VELOCITY_AUTONEG_ENABLE;
}
}
@@ -905,7 +905,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
*/
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
/*
* If connection type is AUTO
@@ -915,9 +915,9 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
/* clear force MAC mode bit */
BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
/* set duplex mode of MAC according to duplex mode of MII */
- MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
- MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
+ MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
@@ -952,31 +952,31 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
}
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
else
BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
- /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
- velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
- ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
+ /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
+ velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
+ ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
if (mii_status & VELOCITY_SPEED_100) {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_TXFD;
+ ANAR |= ADVERTISE_100FULL;
else
- ANAR |= ANAR_TX;
+ ANAR |= ADVERTISE_100HALF;
} else {
if (mii_status & VELOCITY_DUPLEX_FULL)
- ANAR |= ANAR_10FD;
+ ANAR |= ADVERTISE_10FULL;
else
- ANAR |= ANAR_10;
+ ANAR |= ADVERTISE_10HALF;
}
- velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
+ velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
/* enable AUTO-NEGO mode */
mii_set_auto_on(vptr);
- /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
+ /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
}
/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
@@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev)
struct mac_regs __iomem *regs = vptr->mac_regs;
u8 rx_mode;
int i;
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writel(0xffffffff, &regs->MARCAM[0]);
@@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev)
mac_get_cam_mask(regs, vptr->mCAMmask);
i = 0;
- netdev_for_each_mc_addr(mclist, dev) {
- mac_set_cam(regs, i + offset, mclist->dmi_addr);
+ netdev_for_each_mc_addr(ha, dev) {
+ mac_set_cam(regs, i + offset, ha->addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
i++;
}
@@ -1178,36 +1178,36 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue.
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
/*
* Turn on Link/Activity LED enable bit for CIS8201
*/
- MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
+ MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
break;
case PHYID_VT3216_32BIT:
case PHYID_VT3216_64BIT:
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
/*
* Turn on ECHODIS bit in NWay-forced full mode and turn it
* off it in NWay-forced half mode for NWay-forced v.s.
* legacy-forced issue
*/
if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
- MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
else
- MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
+ MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
break;
case PHYID_MARVELL_1000:
@@ -1219,15 +1219,15 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/*
* Reset to hardware default
*/
- MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
break;
default:
;
}
- velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
- if (BMCR & BMCR_ISO) {
- BMCR &= ~BMCR_ISO;
- velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
+ velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
+ if (BMCR & BMCR_ISOLATE) {
+ BMCR &= ~BMCR_ISOLATE;
+ velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
}
}
@@ -2606,7 +2606,6 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
- dev->trans_start = jiffies;
spin_unlock_irqrestore(&vptr->lock, flags);
out:
return NETDEV_TX_OK;
@@ -2953,13 +2952,13 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
- MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
+ MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
- MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
}
if (vptr->mii_status & VELOCITY_SPEED_1000)
- MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
+ MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ef4a0f64ba16..c38191179fae 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1240,86 +1240,16 @@ struct velocity_context {
u32 pattern[8];
};
-
-/*
- * MII registers.
- */
-
-
/*
* Registers in the MII (offset unit is WORD)
*/
-#define MII_REG_BMCR 0x00 // physical address
-#define MII_REG_BMSR 0x01 //
-#define MII_REG_PHYID1 0x02 // OUI
-#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
-#define MII_REG_ANAR 0x04 //
-#define MII_REG_ANLPAR 0x05 //
-#define MII_REG_G1000CR 0x09 //
-#define MII_REG_G1000SR 0x0A //
-#define MII_REG_MODCFG 0x10 //
-#define MII_REG_TCSR 0x16 //
-#define MII_REG_PLED 0x1B //
-// NS, MYSON only
-#define MII_REG_PCR 0x17 //
-// ESI only
-#define MII_REG_PCSR 0x17 //
-#define MII_REG_AUXCR 0x1C //
-
// Marvell 88E1000/88E1000S
#define MII_REG_PSCR 0x10 // PHY specific control register
//
-// Bits in the BMCR register
-//
-#define BMCR_RESET 0x8000 //
-#define BMCR_LBK 0x4000 //
-#define BMCR_SPEED100 0x2000 //
-#define BMCR_AUTO 0x1000 //
-#define BMCR_PD 0x0800 //
-#define BMCR_ISO 0x0400 //
-#define BMCR_REAUTO 0x0200 //
-#define BMCR_FDX 0x0100 //
-#define BMCR_SPEED1G 0x0040 //
-//
-// Bits in the BMSR register
-//
-#define BMSR_AUTOCM 0x0020 //
-#define BMSR_LNK 0x0004 //
-
-//
-// Bits in the ANAR register
-//
-#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANAR_T4 0x0200 //
-#define ANAR_TXFD 0x0100 //
-#define ANAR_TX 0x0080 //
-#define ANAR_10FD 0x0040 //
-#define ANAR_10 0x0020 //
-//
-// Bits in the ANLPAR register
-//
-#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
-#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
-#define ANLPAR_T4 0x0200 //
-#define ANLPAR_TXFD 0x0100 //
-#define ANLPAR_TX 0x0080 //
-#define ANLPAR_10FD 0x0040 //
-#define ANLPAR_10 0x0020 //
-
-//
-// Bits in the G1000CR register
-//
-#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
-#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
-
-//
-// Bits in the G1000SR register
+// Bits in the Silicon revision register
//
-#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
-#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
#define TCSR_ECHODIS 0x2000 //
#define AUXCR_MDPPS 0x0004 //
@@ -1338,7 +1268,6 @@ struct velocity_context {
#define PHYID_REV_ID_MASK 0x0000000FUL
-#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
#define MII_REG_BITS_ON(x,i,p) do {\
@@ -1362,8 +1291,8 @@ struct velocity_context {
#define MII_GET_PHY_ID(p) ({\
u32 id;\
- velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\
- velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\
+ velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
+ velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
(id);})
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0577dd1a42d..78eb3190b9b1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,8 +40,7 @@ module_param(gso, bool, 0444);
#define VIRTNET_SEND_COMMAND_SG_MAX 2
-struct virtnet_info
-{
+struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq, *cvq;
struct net_device *dev;
@@ -62,6 +61,10 @@ struct virtnet_info
/* Chain pages by the private ptr. */
struct page *pages;
+
+ /* fragments + linear part + virtio header */
+ struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
+ struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
};
struct skb_vnet_hdr {
@@ -119,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
struct virtnet_info *vi = svq->vdev->priv;
/* Suppress further interrupts. */
- svq->vq_ops->disable_cb(svq);
+ virtqueue_disable_cb(svq);
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
@@ -207,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
return -EINVAL;
}
- page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ page = virtqueue_get_buf(vi->rvq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->name, hdr->mhdr.num_buffers);
@@ -324,10 +327,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct skb_vnet_hdr *hdr;
- struct scatterlist sg[2];
int err;
- sg_init_table(sg, 2);
skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
return -ENOMEM;
@@ -335,11 +336,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
- skb_to_sgvec(skb, sg + 1, 0, skb->len);
+ skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
if (err < 0)
dev_kfree_skb(skb);
@@ -348,13 +349,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
{
- struct scatterlist sg[MAX_SKB_FRAGS + 2];
struct page *first, *list = NULL;
char *p;
int i, err, offset;
- sg_init_table(sg, MAX_SKB_FRAGS + 2);
- /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
first = get_a_page(vi, gfp);
if (!first) {
@@ -362,7 +361,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
give_pages(vi, list);
return -ENOMEM;
}
- sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
+ sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
/* chain new page in list head to match sg */
first->private = (unsigned long)list;
@@ -376,17 +375,17 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
}
p = page_address(first);
- /* sg[0], sg[1] share the same page */
- /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
- sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+ /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
+ /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
+ sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
- /* sg[1] for data packet, from offset */
+ /* vi->rx_sg[1] for data packet, from offset */
offset = sizeof(struct padded_vnet_hdr);
- sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+ sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
/* chain first in list head */
first->private = (unsigned long)list;
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
first);
if (err < 0)
give_pages(vi, first);
@@ -397,16 +396,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
struct page *page;
- struct scatterlist sg;
int err;
page = get_a_page(vi, gfp);
if (!page)
return -ENOMEM;
- sg_init_one(&sg, page_address(page), PAGE_SIZE);
+ sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
if (err < 0)
give_pages(vi, page);
@@ -435,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
+ virtqueue_kick(vi->rvq);
return !oom;
}
@@ -444,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
/* Schedule NAPI, Suppress further interrupts if successful. */
if (napi_schedule_prep(&vi->napi)) {
- rvq->vq_ops->disable_cb(rvq);
+ virtqueue_disable_cb(rvq);
__napi_schedule(&vi->napi);
}
}
@@ -473,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
again:
while (received < budget &&
- (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
receive_buf(vi->dev, buf, len);
--vi->num;
received++;
@@ -487,9 +485,9 @@ again:
/* Out of packets? */
if (received < budget) {
napi_complete(napi);
- if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
+ if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
napi_schedule_prep(napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(napi);
goto again;
}
@@ -503,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
- while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
+ while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
@@ -515,12 +513,9 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
- struct scatterlist sg[2+MAX_SKB_FRAGS];
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
-
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -554,12 +549,13 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
+ sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
+ sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
- hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
+ hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
+ return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
+ 0, skb);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -578,14 +574,14 @@ again:
if (unlikely(capacity < 0)) {
netif_stop_queue(dev);
dev_warn(&dev->dev, "Unexpected full queue\n");
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- vi->svq->vq_ops->disable_cb(vi->svq);
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ virtqueue_disable_cb(vi->svq);
netif_start_queue(dev);
goto again;
}
return NETDEV_TX_BUSY;
}
- vi->svq->vq_ops->kick(vi->svq);
+ virtqueue_kick(vi->svq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
@@ -595,12 +591,12 @@ again:
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
netif_start_queue(dev);
- vi->svq->vq_ops->disable_cb(vi->svq);
+ virtqueue_disable_cb(vi->svq);
}
}
}
@@ -645,7 +641,7 @@ static int virtnet_open(struct net_device *dev)
* now. virtnet_poll wants re-enable the queue, so we disable here.
* We synchronize against interrupts via NAPI_STATE_SCHED */
if (napi_schedule_prep(&vi->napi)) {
- vi->rvq->vq_ops->disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->rvq);
__napi_schedule(&vi->napi);
}
return 0;
@@ -682,15 +678,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
- vi->cvq->vq_ops->kick(vi->cvq);
+ virtqueue_kick(vi->cvq);
/*
* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
*/
- while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
+ while (!virtqueue_get_buf(vi->cvq, &tmp))
cpu_relax();
return status == VIRTIO_NET_OK;
@@ -722,7 +718,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct scatterlist sg[2];
u8 promisc, allmulti;
struct virtio_net_ctrl_mac *mac_data;
- struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
int uc_count;
int mc_count;
@@ -779,8 +774,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
mac_data->entries = mc_count;
i = 0;
- netdev_for_each_mc_addr(addr, dev)
- memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev)
+ memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -942,6 +937,8 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
vi->pages = NULL;
INIT_DELAYED_WORK(&vi->refill, refill_work);
+ sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
+ sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -1006,13 +1003,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
while (1) {
- buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ buf = virtqueue_detach_unused_buf(vi->svq);
if (!buf)
break;
dev_kfree_skb(buf);
}
while (1) {
- buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ buf = virtqueue_detach_unused_buf(vi->rvq);
if (!buf)
break;
if (vi->mergeable_rx_bufs || vi->big_packets)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index cff3485d9673..989b742551ac 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -992,7 +992,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
tq->tx_ring.next2fill);
}
- netdev->trans_start = jiffies;
return NETDEV_TX_OK;
@@ -1174,7 +1173,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
netif_receive_skb(skb);
}
- adapter->netdev->last_rx = jiffies;
ctx->skb = NULL;
}
@@ -1371,13 +1369,12 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = kmalloc(sz, GFP_KERNEL);
+ bi = kzalloc(sz, GFP_KERNEL);
if (!bi) {
printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
adapter->netdev->name);
goto err;
}
- memset(bi, 0, sz);
rq->buf_info[0] = bi;
rq->buf_info[1] = bi + rq->rx_ring[0].size;
@@ -1675,11 +1672,11 @@ vmxnet3_copy_mc(struct net_device *netdev)
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
- struct dev_mc_list *mc;
+ struct netdev_hw_addr *ha;
int i = 0;
- netdev_for_each_mc_addr(mc, netdev)
- memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr,
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(buf + i++ * ETH_ALEN, ha->addr,
ETH_ALEN);
}
}
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index a21a25d218b6..297f0d202073 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -183,8 +183,6 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
pci_save_state(hldev->pdev);
-
- return;
}
/*
@@ -342,8 +340,6 @@ void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
hldev->minor_revision =
(u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-
- return;
}
/*
@@ -357,8 +353,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
switch (host_type) {
case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
- access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
- VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ if (func_id == 0) {
+ access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
+ VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
+ }
break;
case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -426,8 +424,6 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
hldev->first_vp_id = i;
break;
}
-
- return;
}
/*
@@ -633,8 +629,10 @@ vxge_hw_device_initialize(
__vxge_hw_device_pci_e_init(hldev);
status = __vxge_hw_device_reg_addr_get(hldev);
- if (status != VXGE_HW_OK)
+ if (status != VXGE_HW_OK) {
+ vfree(hldev);
goto exit;
+ }
__vxge_hw_device_id_get(hldev);
__vxge_hw_device_host_info_get(hldev);
@@ -1213,19 +1211,16 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
/* link this RxD block with previous one */
__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
}
-
- return;
}
/*
- * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
+ * __vxge_hw_ring_replenish - Initial replenish of RxDs
* This function replenishes the RxDs from reserve array to work array
*/
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
{
void *rxd;
- int i = 0;
struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -1246,11 +1241,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
}
vxge_hw_ring_rxd_post(ring, rxd);
- if (min_flag) {
- i++;
- if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
- break;
- }
}
status = VXGE_HW_OK;
exit:
@@ -1355,7 +1345,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK) {
__vxge_hw_ring_delete(vp);
goto exit;
@@ -1417,7 +1407,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
goto exit;
if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring, 1);
+ status = vxge_hw_ring_replenish(ring);
if (status != VXGE_HW_OK)
goto exit;
}
@@ -2320,8 +2310,6 @@ __vxge_hw_fifo_mempool_item_alloc(
txdl_priv->first_txdp = txdp;
txdl_priv->next_txdl_priv = NULL;
txdl_priv->alloc_frags = 0;
-
- return;
}
/*
@@ -2578,7 +2566,6 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
writeq(0, &vpath_reg->rts_access_steer_data1);
wmb();
- return;
}
@@ -3486,7 +3473,6 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
writeq(val64, &vp_reg->prc_cfg4);
- return;
}
/*
@@ -3905,7 +3891,6 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
}
}
- return;
}
/*
* __vxge_hw_vpath_initialize
@@ -5039,8 +5024,6 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
if (status == VXGE_HW_OK)
__vxge_hw_blockpool_blocks_remove(blockpool);
}
-
- return;
}
/*
@@ -5096,6 +5079,4 @@ __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
}
__vxge_hw_blockpool_blocks_remove(blockpool);
-
- return;
}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 13f5416307f8..4ae2625d4d8f 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -765,10 +765,18 @@ struct vxge_hw_device_hw_info {
#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
#define VXGE_HW_VH_NORMAL_FUNCTION 7
u64 function_mode;
-#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0
-#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1
+#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
#define VXGE_HW_FUNCTION_MODE_SRIOV 2
#define VXGE_HW_FUNCTION_MODE_MRIOV 3
+#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
+#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
+#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
+#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
+#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
+
u32 func_id;
u64 vpath_mask;
struct vxge_hw_device_version fw_version;
@@ -1915,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
gfp_t flags;
void *vaddr;
unsigned long misaligned = 0;
+ int realloc_flag = 0;
*p_dma_acch = *p_dmah = NULL;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
-
- size += VXGE_CACHE_LINE_SIZE;
-
+realloc:
vaddr = kmalloc((size), flags);
if (vaddr == NULL)
return vaddr;
- misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr),
+ misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
VXGE_CACHE_LINE_SIZE);
+ if (realloc_flag)
+ goto out;
+
+ if (misaligned) {
+ /* misaligned, free current one and try allocating
+ * size + VXGE_CACHE_LINE_SIZE memory
+ */
+ kfree((void *) vaddr);
+ size += VXGE_CACHE_LINE_SIZE;
+ realloc_flag = 1;
+ goto realloc;
+ }
+out:
*(unsigned long *)p_dma_acch = misaligned;
vaddr = (void *)((u8 *)vaddr + misaligned);
return vaddr;
@@ -2254,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
struct vxge_hw_rth_hash_types *hash_type,
u16 bucket_size);
+enum vxge_hw_status
+__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index aaf374cfd322..cadef8549c06 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -109,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
int index, offset;
enum vxge_hw_status status;
u64 reg;
- u8 *reg_space = (u8 *) space;
+ u64 *reg_space = (u64 *) space;
struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
pci_get_drvdata(vdev->pdev);
@@ -129,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
__func__, __LINE__);
return;
}
-
- memcpy((reg_space + offset), &reg, 8);
+ *reg_space++ = reg;
}
}
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index ba6d0da78c30..b504bd561362 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
ring->ndev->name, __func__, __LINE__);
ring->pkts_processed = 0;
- vxge_hw_ring_replenish(ringh, 0);
+ vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
*/
static void vxge_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *mclist;
+ struct netdev_hw_addr *ha;
struct vxgedev *vdev;
int i, mcast_cnt = 0;
struct __vxge_hw_device *hldev;
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- netdev_for_each_mc_addr(mclist, dev) {
- memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
vpath_idx++) {
mac_info.vpath_no = vpath_idx;
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
- int msix_id, alarm_msix_id;
- int tim_msix_id[4] = {[0 ...3] = 0};
+ int msix_id = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+ int alarm_msix_id = VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_intr_enable(vpath->handle);
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
-
- tim_msix_id[0] = msix_id;
- tim_msix_id[1] = msix_id + 1;
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
alarm_msix_id);
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
/* enable the alarm vector */
- vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id);
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
+ vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
}
}
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
if (vdev->config.intr_type == INTA)
vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
else {
- msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
+ msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
/* disable the alarm vector */
- msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ msix_id = (vpath->handle->vpath->hldev->first_vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
}
}
@@ -1765,7 +1764,6 @@ static void vxge_netpoll(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
- return;
}
#endif
@@ -2224,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
enum vxge_hw_status status;
struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
struct vxgedev *vdev = vpath->vdev;
- int alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
+ int msix_id = (vpath->handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
- vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- alarm_msix_id);
+ msix_id);
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2249,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
static int vxge_alloc_msix(struct vxgedev *vdev)
{
int j, i, ret = 0;
- int intr_cnt = 0;
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int msix_intr_vect = 0, temp;
vdev->intr_cnt = 0;
+start:
/* Tx/Rx MSIX Vectors count */
vdev->intr_cnt = vdev->no_of_vpath * 2;
/* Alarm MSIX Vectors count */
vdev->intr_cnt++;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
- vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
+ vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
GFP_KERNEL);
if (!vdev->entries) {
vxge_debug_init(VXGE_ERR,
@@ -2269,8 +2265,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry),
- GFP_KERNEL);
+ vdev->vxge_entries =
+ kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
+ GFP_KERNEL);
if (!vdev->vxge_entries) {
vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
VXGE_DRIVER_NAME);
@@ -2278,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
return -ENOMEM;
}
- /* Last vector in the list is used for alarm */
- alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
- for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
+ for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
@@ -2298,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
}
/* Initialize the alarm vector */
- vdev->entries[j].entry = alarm_msix_id;
- vdev->vxge_entries[j].entry = alarm_msix_id;
+ vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
+ vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- /* if driver request exceeeds available irq's, request with a small
- * number.
- */
- if (ret > 0) {
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enable failed for %d vectors, available: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
- vdev->max_vpath_supported = vdev->no_of_vpath;
- intr_cnt = (vdev->max_vpath_supported * 2) + 1;
-
- /* Reset the alarm vector setting */
- vdev->entries[j].entry = 0;
- vdev->vxge_entries[j].entry = 0;
-
- /* Initialize the alarm vector with new setting */
- vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
- vdev->vxge_entries[intr_cnt - 1].in_use = 0;
-
- ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
- if (!ret)
- vxge_debug_init(VXGE_ERR,
- "%s: MSI-X enabled for %d vectors",
- VXGE_DRIVER_NAME, intr_cnt);
- }
+ ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret) {
+ if (ret > 0) {
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
- VXGE_DRIVER_NAME, intr_cnt, ret);
+ VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
kfree(vdev->entries);
kfree(vdev->vxge_entries);
vdev->entries = NULL;
vdev->vxge_entries = NULL;
+
+ if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
+ return -ENODEV;
+ /* Try with less no of vector by reducing no of vpaths count */
+ temp = (ret - 1)/2;
+ vxge_close_vpaths(vdev, temp);
+ vdev->no_of_vpath = temp;
+ goto start;
+ } else if (ret < 0)
return -ENODEV;
- }
+
return 0;
}
@@ -2346,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
{
int i, ret = 0;
- enum vxge_hw_status status;
/* 0 - Tx, 1 - Rx */
- int tim_msix_id[4];
- int alarm_msix_id = 0, msix_intr_vect = 0;
+ int tim_msix_id[4] = {0, 1, 0, 0};
+
vdev->intr_cnt = 0;
/* allocate msix vectors */
ret = vxge_alloc_msix(vdev);
if (!ret) {
- /* Last vector in the list is used for alarm */
- alarm_msix_id =
- VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
for (i = 0; i < vdev->no_of_vpath; i++) {
/* If fifo or ring are not enabled
the MSIX vector for that should be set to 0
Hence initializeing this array to all 0s.
*/
- memset(tim_msix_id, 0, sizeof(tim_msix_id));
- msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
- tim_msix_id[0] = msix_intr_vect;
-
- tim_msix_id[1] = msix_intr_vect + 1;
- vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
+ vdev->vpaths[i].ring.rx_vector_no =
+ (vdev->vpaths[i].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
- status = vxge_hw_vpath_msix_set(
- vdev->vpaths[i].handle,
- tim_msix_id, alarm_msix_id);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "vxge_hw_vpath_msix_set "
- "failed with status : %x", status);
- kfree(vdev->entries);
- kfree(vdev->vxge_entries);
- pci_disable_msix(vdev->pdev);
- return -ENODEV;
- }
+ vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
+ tim_msix_id, VXGE_ALARM_MSIX_ID);
}
}
@@ -2393,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
{
int intr_cnt;
- for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1);
+ for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
intr_cnt++) {
if (vdev->vxge_entries[intr_cnt].in_use) {
synchronize_irq(vdev->entries[intr_cnt].vector);
@@ -2458,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
@@ -2472,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
- vdev->ndev->name, pci_fun, vp_idx,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun, vp_idx);
ret = request_irq(
vdev->entries[intr_cnt].vector,
vxge_rx_msix_napi_handle,
@@ -2502,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
if (irq_req) {
/* We requested for this msix interrupt */
vdev->vxge_entries[intr_cnt].in_use = 1;
+ msix_idx += vdev->vpaths[vp_idx].device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE;
vxge_hw_vpath_msix_unmask(
vdev->vpaths[vp_idx].handle,
- intr_idx);
+ msix_idx);
intr_cnt++;
}
@@ -2514,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
vp_idx++;
}
- intr_cnt = vdev->max_vpath_supported * 2;
+ intr_cnt = vdev->no_of_vpath * 2;
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge Alarm fn: %d MSI-X: %d",
- vdev->ndev->name, pci_fun,
- vdev->entries[intr_cnt].entry);
+ "%s:vxge:MSI-X %d - Alarm - fn:%d",
+ vdev->ndev->name,
+ vdev->entries[intr_cnt].entry,
+ pci_fun);
/* For Alarm interrupts */
ret = request_irq(vdev->entries[intr_cnt].vector,
vxge_alarm_msix_handle, 0,
vdev->desc[intr_cnt],
- &vdev->vpaths[vp_idx]);
+ &vdev->vpaths[0]);
if (ret) {
vxge_debug_init(VXGE_ERR,
"%s: MSIX - %d Registration failed",
@@ -2536,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
goto INTA_MODE;
}
+ msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
- intr_idx - 2);
+ msix_idx);
vdev->vxge_entries[intr_cnt].in_use = 1;
- vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx];
+ vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
}
INTA_MODE:
#endif
- snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
if (vdev->config.intr_type == INTA) {
+ snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
+ "%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
vxge_hw_vpath_tti_ci_set(vdev->devh,
@@ -2844,7 +2814,6 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
for (i = 0; i < vdev->no_of_vpath; i++)
netif_napi_del(&vdev->vpaths[i].ring.napi);
}
- return;
}
int do_vxge_close(struct net_device *dev, int do_io)
@@ -3529,8 +3498,6 @@ static void verify_bandwidth(void)
for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
bw_percentage[i] = bw_percentage[0];
}
-
- return;
}
/*
@@ -3995,6 +3962,36 @@ static void vxge_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
+static inline u32 vxge_get_num_vfs(u64 function_mode)
+{
+ u32 num_functions = 0;
+
+ switch (function_mode) {
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
+ case VXGE_HW_FUNCTION_MODE_SRIOV_8:
+ num_functions = 8;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
+ num_functions = 1;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV:
+ case VXGE_HW_FUNCTION_MODE_MRIOV:
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
+ num_functions = 17;
+ break;
+ case VXGE_HW_FUNCTION_MODE_SRIOV_4:
+ num_functions = 4;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
+ num_functions = 2;
+ break;
+ case VXGE_HW_FUNCTION_MODE_MRIOV_8:
+ num_functions = 8; /* TODO */
+ break;
+ }
+ return num_functions;
+}
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -4022,14 +4019,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
u8 *macaddr;
struct vxge_mac_addrs *entry;
static int bus = -1, device = -1;
+ u32 host_type;
u8 new_device = 0;
+ enum vxge_hw_status is_privileged;
+ u32 function_mode;
+ u32 num_vfs = 0;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
attr.pdev = pdev;
- if (bus != pdev->bus->number)
- new_device = 1;
- if (device != PCI_SLOT(pdev->devfn))
+ /* In SRIOV-17 mode, functions of the same adapter
+ * can be deployed on different buses */
+ if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
+ (device != PCI_SLOT(pdev->devfn))))
new_device = 1;
bus = pdev->bus->number;
@@ -4046,9 +4048,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
driver_config->total_dev_cnt);
driver_config->config_dev_cnt = 0;
driver_config->total_dev_cnt = 0;
- driver_config->g_no_cpus = 0;
}
-
+ /* Now making the CPU based no of vpath calculation
+ * applicable for individual functions as well.
+ */
+ driver_config->g_no_cpus = 0;
driver_config->vpath_per_dev = max_config_vpath;
driver_config->total_dev_cnt++;
@@ -4161,6 +4165,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s:%d Vpath mask = %llx", __func__, __LINE__,
(unsigned long long)vpath_mask);
+ function_mode = ll_config.device_hw_info.function_mode;
+ host_type = ll_config.device_hw_info.host_type;
+ is_privileged = __vxge_hw_device_is_privilaged(host_type,
+ ll_config.device_hw_info.func_id);
+
/* Check how many vpaths are available */
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!((vpath_mask) & vxge_mBIT(i)))
@@ -4168,14 +4177,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
max_vpath_supported++;
}
+ if (new_device)
+ num_vfs = vxge_get_num_vfs(function_mode) - 1;
+
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
- ll_config.device_hw_info.function_mode) &&
- (max_config_dev > 1) && (pdev->is_physfn)) {
- ret = pci_enable_sriov(pdev, max_config_dev - 1);
- if (ret)
- vxge_debug_ll_config(VXGE_ERR,
- "Failed to enable SRIOV: %d \n", ret);
+ if (is_sriov(function_mode) && (max_config_dev > 1) &&
+ (ll_config.intr_type != INTA) &&
+ (is_privileged == VXGE_HW_OK)) {
+ ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
+ ? (max_config_dev - 1) : num_vfs);
+ if (ret)
+ vxge_debug_ll_config(VXGE_ERR,
+ "Failed in enabling SRIOV mode: %d\n", ret);
}
/*
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 7c83ba4be9d7..60276b20fa5e 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -31,6 +31,7 @@
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
+#define VXGE_ALARM_MSIX_ID 2
#define VXGE_HW_RXSYNC_FREQ_CNT 4
#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
#define VXGE_LL_RX_COPY_THRESHOLD 256
@@ -89,6 +90,11 @@
#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
+#define is_sriov(function_mode) \
+ ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
+ (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
+
enum vxge_reset_event {
/* reset events */
VXGE_LL_VPATH_RESET = 0,
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 2c012f4ce465..6cc1dd79b40b 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -231,11 +231,8 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->set_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -252,11 +249,8 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
{
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
- 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -331,8 +325,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
val64 = readq(&hldev->common_reg->titan_general_int_status);
vxge_hw_device_unmask_all(hldev);
-
- return;
}
/**
@@ -364,8 +356,6 @@ void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
vxge_hw_vpath_intr_disable(
VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
}
-
- return;
}
/**
@@ -385,8 +375,6 @@ void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
-
- return;
}
/**
@@ -406,8 +394,6 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
-
- return;
}
/**
@@ -649,8 +635,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status1);
}
-
- return;
}
/*
@@ -878,7 +862,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
if (ring->stats->common_stats.usage_cnt > 0)
ring->stats->common_stats.usage_cnt--;
@@ -902,7 +886,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
channel = &ring->channel;
wmb();
- rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
vxge_hw_channel_dtr_post(channel, rxdh);
@@ -966,6 +950,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
struct __vxge_hw_channel *channel;
struct vxge_hw_ring_rxd_1 *rxdp;
enum vxge_hw_status status = VXGE_HW_OK;
+ u64 control_0, own;
channel = &ring->channel;
@@ -977,8 +962,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
goto exit;
}
+ control_0 = rxdp->control_0;
+ own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+ *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
+
/* check whether it is not the end */
- if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
+ if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
0);
@@ -986,8 +975,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
++ring->cmpl_cnt;
vxge_hw_channel_dtr_complete(channel);
- *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
-
vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
ring->stats->common_stats.usage_cnt++;
@@ -1035,12 +1022,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode(
* such as unknown UPV6 header), Drop it !!!
*/
- if (t_code == 0 || t_code == 5) {
+ if (t_code == VXGE_HW_RING_T_CODE_OK ||
+ t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
status = VXGE_HW_OK;
goto exit;
}
- if (t_code > 0xF) {
+ if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
status = VXGE_HW_ERR_INVALID_TCODE;
goto exit;
}
@@ -2216,29 +2204,24 @@ exit:
* This API will associate a given MSIX vector numbers with the four TIM
* interrupts and alarm interrupt.
*/
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
int alarm_msix_id)
{
u64 val64;
struct __vxge_hw_virtualpath *vpath = vp->vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
- u32 first_vp_id = vpath->hldev->first_vp_id;
+ u32 vp_id = vp->vpath->vp_id;
val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[0]) |
+ (vp_id * 4) + tim_msix_id[0]) |
VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[1]) |
- VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[2]);
-
- val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
- (first_vp_id * 4) + tim_msix_id[3]);
+ (vp_id * 4) + tim_msix_id[1]);
writeq(val64, &vp_reg->interrupt_cfg0);
writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
- (first_vp_id * 4) + alarm_msix_id),
+ (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
&vp_reg->interrupt_cfg2);
if (vpath->hldev->config.intr_mode ==
@@ -2258,8 +2241,6 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
0, 32), &vp_reg->one_shot_vect3_en);
}
-
- return VXGE_HW_OK;
}
/**
@@ -2279,11 +2260,8 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id / 4)), 0, 32),
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
-
- return;
}
/**
@@ -2305,19 +2283,15 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
if (hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clr_msix_one_shot_vec[msix_id%4]);
} else {
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clear_msix_mask_vect[msix_id%4]);
}
-
- return;
}
/**
@@ -2337,11 +2311,8 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
__vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
- (msix_id/4)), 0, 32),
+ (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
-
- return;
}
/**
@@ -2358,8 +2329,6 @@ vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
&vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-
- return;
}
/**
@@ -2398,8 +2367,6 @@ void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
&hldev->common_reg->tim_int_mask1);
}
-
- return;
}
/**
@@ -2436,8 +2403,6 @@ void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
&hldev->common_reg->tim_int_mask1);
}
-
- return;
}
/**
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 861c853e3e84..c252f3d3f650 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info {
u32 rth_hash_type;
u32 rth_value;
};
+/**
+ * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
+ * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
+ * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
+ * configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
+ * presentation configuration mismatch.
+ * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
+ * such as unknown IPv6 header.
+ * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
+ * error, such as FCS or ECC).
+ * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
+ * s) were not appropriately sized and data loss occurred.
+ * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
+ * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
+ * Segment1 exceeded the capacity of Buffer1 and the remainder
+ * was placed in Buffer2. Segment2 now starts in Buffer3.
+ * No data loss or errors occurred.
+ * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
+ * assigned buffers has a size of 0 bytes.
+ * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
+ * VPath Reset or because of a VPIN mismatch.
+ * @VXGE_HW_RING_T_CODE_UNUSED: Unused
+ * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
+ * transfer code condition occurred.
+ *
+ * Transfer codes returned by adapter.
+ */
+enum vxge_hw_ring_tcode {
+ VXGE_HW_RING_T_CODE_OK = 0x0,
+ VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
+ VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
+ VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
+ VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
+ VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
+ VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
+ VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
+ VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
+ VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
+ VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
+ VXGE_HW_RING_T_CODE_UNUSED = 0xE,
+ VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
+};
/**
* enum enum vxge_hw_ring_hash_type - RTH hash types
@@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post(
void *rxdh);
enum vxge_hw_status
-vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag);
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
void
vxge_hw_ring_rxd_post_post_wmb(
@@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free(
#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
-#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
/*
* struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
@@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 skip_alarms);
-enum vxge_hw_status
+void
vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
int *tim_msix_id, int alarm_msix_id);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 77c2a754b7b8..5da7ab1fd307 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "6"
-#define VXGE_VERSION_BUILD "18937"
+#define VXGE_VERSION_FIX "8"
+#define VXGE_VERSION_BUILD "20182"
#define VXGE_VERSION_FOR "k"
#endif
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index cd8cb95c5bd7..cf9e15fd8d91 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -634,11 +634,12 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
}
} else { /* chan->protocol == ETH_P_X25 */
switch (skb->data[0]) {
- case 0: break;
- case 1: /* Connect request */
+ case X25_IFACE_DATA:
+ break;
+ case X25_IFACE_CONNECT:
cycx_x25_chan_connect(dev);
goto free_packet;
- case 2: /* Disconnect request */
+ case X25_IFACE_DISCONNECT:
cycx_x25_chan_disconnect(dev);
goto free_packet;
default:
@@ -1406,7 +1407,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
reset_timer(dev);
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 1);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_CONNECT);
break;
case WAN_CONNECTING:
@@ -1424,7 +1426,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
}
if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev, 2);
+ cycx_x25_chan_send_event(dev,
+ X25_IFACE_DISCONNECT);
netif_wake_queue(dev);
break;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index a4859f7a7cc0..d45b08d1dbc9 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1175,8 +1175,6 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
spin_unlock(&dpriv->lock);
#endif
- dev->trans_start = jiffies;
-
if (debug > 2)
dscc4_tx_print(dev, dpriv, "Xmit");
/* To be cleaned(unsigned int)/optimized. Later, ok ? */
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 4dde2ea4a189..a3ea27ce04f2 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -658,7 +658,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = next_desc(port, port->txin, 1);
sca_outw(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index aad9ed45c254..ea476cbd38b5 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -585,7 +585,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
- dev->trans_start = jiffies;
port->txin = (port->txin + 1) % card->tx_ring_buffers;
sca_outl(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index c7adbb79f7cc..70527e5a54a2 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -49,14 +49,14 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
static void x25_connected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 1);
+ x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
}
static void x25_disconnected(struct net_device *dev, int reason)
{
- x25_connect_disconnect(dev, reason, 2);
+ x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
}
@@ -71,7 +71,7 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -94,13 +94,13 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
/* X.25 to LAPB */
switch (skb->data[0]) {
- case 0: /* Data to be transmitted */
+ case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
- case 1:
+ case X25_IFACE_CONNECT:
if ((result = lapb_connect_request(dev))!= LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
@@ -112,7 +112,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
}
break;
- case 2:
+ case X25_IFACE_DISCONNECT:
if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0c2cdde686a0..88e363033e23 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -891,7 +891,6 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
- dev->trans_start = jiffies;
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 98e2f99903d7..4d4dc38c7290 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -139,7 +139,7 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
ptr = skb->data;
- *ptr = 0x00;
+ *ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
return netif_rx(skb);
@@ -161,14 +161,14 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
goto drop;
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01:
+ case X25_IFACE_CONNECT:
if ((err = lapb_connect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_connect_request "
"error: %d\n", err);
goto drop;
- case 0x02:
+ case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_disconnect_request "
"err: %d\n", err);
@@ -225,7 +225,7 @@ static void lapbeth_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
@@ -242,7 +242,7 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b27850377121..e2c6f7f4f51c 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1506,8 +1506,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
/* send now! */
LMC_CSR_WRITE (sc, csr_txpoll, 0);
- dev->trans_start = jiffies;
-
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_start_xmit_out");
@@ -2103,7 +2101,7 @@ static void lmc_driver_timeout(struct net_device *dev)
printk("%s: Xmitter busy|\n", dev->name);
sc->extra_stats.tx_tbusy_calls++;
- if (jiffies - dev->trans_start < TX_TIMEOUT)
+ if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
goto bug_out;
/*
@@ -2135,7 +2133,7 @@ static void lmc_driver_timeout(struct net_device *dev)
sc->lmc_device->stats.tx_errors++;
sc->extra_stats.tx_ProcTimeout++; /* -baz */
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
bug_out:
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3f744c643094..c6aa66e5b52f 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -396,7 +396,7 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
u16 next_bd = card->chan[ch].tx_next_bd;
u32 scabase = card->hw.scabase;
- printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
+ printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd);
printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
first_bd, TX_BD_ADDR(ch, first_bd),
next_bd, TX_BD_ADDR(ch, next_bd));
@@ -1790,7 +1790,7 @@ static void cpc_tx_timeout(struct net_device *dev)
cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
~(CPLD_REG2_FALC_LED1 << (2 * ch)));
}
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies; /* prevent tx timeout */
CPC_UNLOCK(card, flags);
netif_wake_queue(dev);
}
@@ -1849,7 +1849,6 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
if (d->trace_on) {
cpc_trace(dev, skb, 'T');
}
- dev->trans_start = jiffies;
/* Start transmission */
CPC_LOCK(card, flags);
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4917a94943bd..4293889e287e 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -366,7 +366,7 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
int res;
if (!tty || !tty->driver_data ) {
- CPC_TTY_DBG("hdlx-tty: no TTY in close \n");
+ CPC_TTY_DBG("hdlx-tty: no TTY in close\n");
return;
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 31c41af2246d..43ae6f440bfb 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1352,7 +1352,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
return(-EINVAL);
if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
- printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr);
+ printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
return(-EINVAL);
}
base = map->base_addr;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 541c700dceef..db73a7be199f 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -298,7 +298,6 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
desc->stat = PACKET_FULL;
writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
port->card->plx + PLX_DOORBELL_TO_CARD);
- dev->trans_start = jiffies;
port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 80d5c5834a0b..166e77dfffda 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -29,12 +29,12 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
-#include <linux/x25.h>
#include <linux/lapb.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <linux/slab.h>
+#include <net/x25device.h>
#include "x25_asy.h"
#include <net/x25device.h>
@@ -315,15 +315,15 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
}
switch (skb->data[0]) {
- case 0x00:
+ case X25_IFACE_DATA:
break;
- case 0x01: /* Connection request .. do nothing */
+ case X25_IFACE_CONNECT: /* Connection request .. do nothing */
err = lapb_connect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
kfree_skb(skb);
return NETDEV_TX_OK;
- case 0x02: /* Disconnect request .. do nothing - hang up ?? */
+ case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
err = lapb_disconnect_request(dev);
if (err != LAPB_OK)
printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
@@ -411,7 +411,7 @@ static void x25_asy_connected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x01;
+ *ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
@@ -430,7 +430,7 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
}
ptr = skb_put(skb, 1);
- *ptr = 0x02;
+ *ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, sl->dev);
netif_rx(skb);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index d8322d2d1e29..746a5ee32f33 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -395,7 +395,6 @@ wd_reset_8390(struct net_device *dev)
outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
if (ei_debug > 1) printk("reset done\n");
- return;
}
/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 6180772dcc09..d86e8f31e7fc 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -83,6 +83,21 @@
#define D_SUBMODULE control
#include "debug-levels.h"
+static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
+module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
+MODULE_PARM_DESC(idle_mode_disabled,
+ "If true, the device will not enable idle mode negotiation "
+ "with the base station (when connected) to save power.");
+
+/* 0 (power saving enabled) by default */
+static int i2400m_power_save_disabled;
+module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
+MODULE_PARM_DESC(power_save_disabled,
+ "If true, the driver will not tell the device to enter "
+ "power saving mode when it reports it is ready for it. "
+ "False by default (so the device is told to do power "
+ "saving).");
+
int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
MODULE_PARM_DESC(passive_mode,
@@ -346,7 +361,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
i2400m_state);
i2400m_reset(i2400m, I2400M_RT_WARM);
break;
- };
+ }
d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
i2400m, ss, i2400m_state);
}
@@ -395,7 +410,7 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
default:
dev_err(dev, "HW BUG? unknown media status %u\n",
status);
- };
+ }
d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
i2400m, ms, status);
}
@@ -524,7 +539,7 @@ void i2400m_report_hook(struct i2400m *i2400m,
}
}
break;
- };
+ }
d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
i2400m, l3l4_hdr, size);
}
@@ -567,8 +582,7 @@ void i2400m_msg_ack_hook(struct i2400m *i2400m,
size);
}
break;
- };
- return;
+ }
}
@@ -740,7 +754,7 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
break;
default:
ack_timeout = HZ;
- };
+ }
if (unlikely(i2400m->trace_msg_from_user))
wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
@@ -1419,5 +1433,4 @@ void i2400m_dev_shutdown(struct i2400m *i2400m)
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
- return;
}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 94dc83c3969d..9c8b78d4abd2 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -75,25 +75,6 @@
#include "debug-levels.h"
-int i2400m_idle_mode_disabled; /* 0 (idle mode enabled) by default */
-module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
-MODULE_PARM_DESC(idle_mode_disabled,
- "If true, the device will not enable idle mode negotiation "
- "with the base station (when connected) to save power.");
-
-int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
-module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
-MODULE_PARM_DESC(rx_reorder_disabled,
- "If true, RX reordering will be disabled.");
-
-int i2400m_power_save_disabled; /* 0 (power saving enabled) by default */
-module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
-MODULE_PARM_DESC(power_save_disabled,
- "If true, the driver will not tell the device to enter "
- "power saving mode when it reports it is ready for it. "
- "False by default (so the device is told to do power "
- "saving).");
-
static char i2400m_debug_params[128];
module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
0644);
@@ -395,6 +376,16 @@ retry:
result = i2400m_dev_initialize(i2400m);
if (result < 0)
goto error_dev_initialize;
+
+ /* We don't want any additional unwanted error recovery triggered
+ * from any other context so if anything went wrong before we come
+ * here, let's keep i2400m->error_recovery untouched and leave it to
+ * dev_reset_handle(). See dev_reset_handle(). */
+
+ atomic_dec(&i2400m->error_recovery);
+ /* Every thing works so far, ok, now we are ready to
+ * take error recovery if it's required. */
+
/* At this point, reports will come for the device and set it
* to the right state if it is different than UNINITIALIZED */
d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
@@ -403,10 +394,10 @@ retry:
error_dev_initialize:
error_check_mac_addr:
+error_fw_check:
i2400m->ready = 0;
wmb(); /* see i2400m->ready's documentation */
flush_workqueue(i2400m->work_queue);
-error_fw_check:
if (i2400m->bus_dev_stop)
i2400m->bus_dev_stop(i2400m);
error_bus_dev_start:
@@ -436,7 +427,8 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
result = __i2400m_dev_start(i2400m, bm_flags);
if (result >= 0) {
i2400m->updown = 1;
- wmb(); /* see i2400m->updown's documentation */
+ i2400m->alive = 1;
+ wmb();/* see i2400m->updown and i2400m->alive's doc */
}
}
mutex_unlock(&i2400m->init_mutex);
@@ -497,7 +489,8 @@ void i2400m_dev_stop(struct i2400m *i2400m)
if (i2400m->updown) {
__i2400m_dev_stop(i2400m);
i2400m->updown = 0;
- wmb(); /* see i2400m->updown's documentation */
+ i2400m->alive = 0;
+ wmb(); /* see i2400m->updown and i2400m->alive's doc */
}
mutex_unlock(&i2400m->init_mutex);
}
@@ -617,12 +610,12 @@ int i2400m_post_reset(struct i2400m *i2400m)
error_dev_start:
if (i2400m->bus_release)
i2400m->bus_release(i2400m);
-error_bus_setup:
/* even if the device was up, it could not be recovered, so we
* mark it as down. */
i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */
mutex_unlock(&i2400m->init_mutex);
+error_bus_setup:
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
return result;
}
@@ -669,6 +662,9 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
+ i2400m->boot_mode = 1;
+ wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
+
result = 0;
if (mutex_trylock(&i2400m->init_mutex) == 0) {
/* We are still in i2400m_dev_start() [let it fail] or
@@ -679,39 +675,68 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
complete(&i2400m->msg_completion);
goto out;
}
- if (i2400m->updown == 0) {
- dev_info(dev, "%s: device is down, doing nothing\n", reason);
- goto out_unlock;
- }
+
dev_err(dev, "%s: reinitializing driver\n", reason);
- __i2400m_dev_stop(i2400m);
- result = __i2400m_dev_start(i2400m,
- I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
- if (result < 0) {
+ rmb();
+ if (i2400m->updown) {
+ __i2400m_dev_stop(i2400m);
i2400m->updown = 0;
wmb(); /* see i2400m->updown's documentation */
- dev_err(dev, "%s: cannot start the device: %d\n",
- reason, result);
- result = -EUCLEAN;
}
-out_unlock:
+
+ if (i2400m->alive) {
+ result = __i2400m_dev_start(i2400m,
+ I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+ if (result < 0) {
+ dev_err(dev, "%s: cannot start the device: %d\n",
+ reason, result);
+ result = -EUCLEAN;
+ if (atomic_read(&i2400m->bus_reset_retries)
+ >= I2400M_BUS_RESET_RETRIES) {
+ result = -ENODEV;
+ dev_err(dev, "tried too many times to "
+ "reset the device, giving up\n");
+ }
+ }
+ }
+
if (i2400m->reset_ctx) {
ctx->result = result;
complete(&ctx->completion);
}
mutex_unlock(&i2400m->init_mutex);
if (result == -EUCLEAN) {
+ /*
+ * We come here because the reset during operational mode
+ * wasn't successully done and need to proceed to a bus
+ * reset. For the dev_reset_handle() to be able to handle
+ * the reset event later properly, we restore boot_mode back
+ * to the state before previous reset. ie: just like we are
+ * issuing the bus reset for the first time
+ */
+ i2400m->boot_mode = 0;
+ wmb();
+
+ atomic_inc(&i2400m->bus_reset_retries);
/* ops, need to clean up [w/ init_mutex not held] */
result = i2400m_reset(i2400m, I2400M_RT_BUS);
if (result >= 0)
result = -ENODEV;
+ } else {
+ rmb();
+ if (i2400m->alive) {
+ /* great, we expect the device state up and
+ * dev_start() actually brings the device state up */
+ i2400m->updown = 1;
+ wmb();
+ atomic_set(&i2400m->bus_reset_retries, 0);
+ }
}
out:
i2400m_put(i2400m);
kfree(iw);
d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
ws, i2400m, reason);
- return;
}
@@ -729,14 +754,72 @@ out:
*/
int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
{
- i2400m->boot_mode = 1;
- wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
GFP_ATOMIC, &reason, sizeof(reason));
}
EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
+ /*
+ * The actual work of error recovery.
+ *
+ * The current implementation of error recovery is to trigger a bus reset.
+ */
+static
+void __i2400m_error_recovery(struct work_struct *ws)
+{
+ struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
+ struct i2400m *i2400m = iw->i2400m;
+
+ i2400m_reset(i2400m, I2400M_RT_BUS);
+
+ i2400m_put(i2400m);
+ kfree(iw);
+ return;
+}
+
+/*
+ * Schedule a work struct for error recovery.
+ *
+ * The intention of error recovery is to bring back the device to some
+ * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to
+ * the device. The TX failure could mean a device bus stuck, so the current
+ * error recovery implementation is to trigger a bus reset to the device
+ * and hopefully it can bring back the device.
+ *
+ * The actual work of error recovery has to be in a thread context because
+ * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be
+ * destroyed by the error recovery mechanism (currently a bus reset).
+ *
+ * Also, there may be already a queue of TX works that all hit
+ * the -ETIMEOUT error condition because the device is stuck already.
+ * Since bus reset is used as the error recovery mechanism and we don't
+ * want consecutive bus resets simply because the multiple TX works
+ * in the queue all hit the same device erratum, the flag "error_recovery"
+ * is introduced for preventing unwanted consecutive bus resets.
+ *
+ * Error recovery shall only be invoked again if previous one was completed.
+ * The flag error_recovery is set when error recovery mechanism is scheduled,
+ * and is checked when we need to schedule another error recovery. If it is
+ * in place already, then we shouldn't schedule another one.
+ */
+void i2400m_error_recovery(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
+ if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
+ GFP_ATOMIC, NULL, 0) < 0) {
+ dev_err(dev, "run out of memory for "
+ "scheduling an error recovery ?\n");
+ atomic_dec(&i2400m->error_recovery);
+ }
+ } else
+ atomic_dec(&i2400m->error_recovery);
+ return;
+}
+EXPORT_SYMBOL_GPL(i2400m_error_recovery);
+
/*
* Alloc the command and ack buffers for boot mode
*
@@ -803,6 +886,13 @@ void i2400m_init(struct i2400m *i2400m)
mutex_init(&i2400m->init_mutex);
/* wake_tx_ws is initialized in i2400m_tx_setup() */
+ atomic_set(&i2400m->bus_reset_retries, 0);
+
+ i2400m->alive = 0;
+
+ /* initialize error_recovery to 1 for denoting we
+ * are not yet ready to take any error recovery */
+ atomic_set(&i2400m->error_recovery, 1);
}
EXPORT_SYMBOL_GPL(i2400m_init);
@@ -996,7 +1086,6 @@ void __exit i2400m_driver_exit(void)
/* for scheds i2400m_dev_reset_handle() */
flush_scheduled_work();
i2400m_barker_db_exit();
- return;
}
module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index b9c4bed3b457..360d4fb195f4 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -99,7 +99,10 @@ enum {
*
* @tx_workqueue: workqeueue used for data TX; we don't use the
* system's workqueue as that might cause deadlocks with code in
- * the bus-generic driver.
+ * the bus-generic driver. The read/write operation to the queue
+ * is protected with spinlock (tx_lock in struct i2400m) to avoid
+ * the queue being destroyed in the middle of a the queue read/write
+ * operation.
*
* @debugfs_dentry: dentry for the SDIO specific debugfs files
*
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 820b128705ec..fa74777fd65f 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -160,6 +160,16 @@
#include <linux/wimax/i2400m.h>
#include <asm/byteorder.h>
+enum {
+/* netdev interface */
+ /*
+ * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
+ *
+ * The MTU is 1400 or less
+ */
+ I2400M_MAX_MTU = 1400,
+};
+
/* Misc constants */
enum {
/* Size of the Boot Mode Command buffer */
@@ -167,6 +177,11 @@ enum {
I2400M_BM_ACK_BUF_SIZE = 256,
};
+enum {
+ /* Maximum number of bus reset can be retried */
+ I2400M_BUS_RESET_RETRIES = 3,
+};
+
/**
* struct i2400m_poke_table - Hardware poke table for the Intel 2400m
*
@@ -227,6 +242,11 @@ struct i2400m_barker_db;
* so we have a tx_blk_size variable that the bus layer sets to
* tell the engine how much of that we need.
*
+ * @bus_tx_room_min: [fill] Minimum room required while allocating
+ * TX queue's buffer space for message header. SDIO requires
+ * 224 bytes and USB 16 bytes. Refer bus specific driver code
+ * for details.
+ *
* @bus_pl_size_max: [fill] Maximum payload size.
*
* @bus_setup: [optional fill] Function called by the bus-generic code
@@ -397,7 +417,7 @@ struct i2400m_barker_db;
*
* @tx_size_max: biggest TX message sent.
*
- * @rx_lock: spinlock to protect RX members
+ * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
*
* @rx_pl_num: total number of payloads received
*
@@ -421,6 +441,10 @@ struct i2400m_barker_db;
* delivered. Then the driver can release them to the host. See
* drivers/net/i2400m/rx.c for details.
*
+ * @rx_roq_refcount: refcount rx_roq. This refcounts any access to
+ * rx_roq thus preventing rx_roq being destroyed when rx_roq
+ * is being accessed. rx_roq_refcount is protected by rx_lock.
+ *
* @rx_reports: reports received from the device that couldn't be
* processed because the driver wasn't still ready; when ready,
* they are pulled from here and chewed.
@@ -507,6 +531,38 @@ struct i2400m_barker_db;
* same.
*
* @pm_notifier: used to register for PM events
+ *
+ * @bus_reset_retries: counter for the number of bus resets attempted for
+ * this boot. It's not for tracking the number of bus resets during
+ * the whole driver life cycle (from insmod to rmmod) but for the
+ * number of dev_start() executed until dev_start() returns a success
+ * (ie: a good boot means a dev_stop() followed by a successful
+ * dev_start()). dev_reset_handler() increments this counter whenever
+ * it is triggering a bus reset. It checks this counter to decide if a
+ * subsequent bus reset should be retried. dev_reset_handler() retries
+ * the bus reset until dev_start() succeeds or the counter reaches
+ * I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in
+ * dev_reset_handle() when dev_start() returns a success,
+ * ie: a successul boot is completed.
+ *
+ * @alive: flag to denote if the device *should* be alive. This flag is
+ * everything like @updown (see doc for @updown) except reflecting
+ * the device state *we expect* rather than the actual state as denoted
+ * by @updown. It is set 1 whenever @updown is set 1 in dev_start().
+ * Then the device is expected to be alive all the time
+ * (i2400m->alive remains 1) until the driver is removed. Therefore
+ * all the device reboot events detected can be still handled properly
+ * by either dev_reset_handle() or .pre_reset/.post_reset as long as
+ * the driver presents. It is set 0 along with @updown in dev_stop().
+ *
+ * @error_recovery: flag to denote if we are ready to take an error recovery.
+ * 0 for ready to take an error recovery; 1 for not ready. It is
+ * initialized to 1 while probe() since we don't tend to take any error
+ * recovery during probe(). It is decremented by 1 whenever dev_start()
+ * succeeds to indicate we are ready to take error recovery from now on.
+ * It is checked every time we wanna schedule an error recovery. If an
+ * error recovery is already in place (error_recovery was set 1), we
+ * should not schedule another one until the last one is done.
*/
struct i2400m {
struct wimax_dev wimax_dev; /* FIRST! See doc */
@@ -522,6 +578,7 @@ struct i2400m {
wait_queue_head_t state_wq; /* Woken up when on state updates */
size_t bus_tx_block_size;
+ size_t bus_tx_room_min;
size_t bus_pl_size_max;
unsigned bus_bm_retries;
@@ -550,10 +607,12 @@ struct i2400m {
tx_num, tx_size_acc, tx_size_min, tx_size_max;
/* RX stuff */
- spinlock_t rx_lock; /* protect RX state */
+ /* protect RX state and rx_roq_refcount */
+ spinlock_t rx_lock;
unsigned rx_pl_num, rx_pl_max, rx_pl_min,
rx_num, rx_size_acc, rx_size_min, rx_size_max;
- struct i2400m_roq *rx_roq; /* not under rx_lock! */
+ struct i2400m_roq *rx_roq; /* access is refcounted */
+ struct kref rx_roq_refcount; /* refcount access to rx_roq */
u8 src_mac_addr[ETH_HLEN];
struct list_head rx_reports; /* under rx_lock! */
struct work_struct rx_report_ws;
@@ -581,6 +640,16 @@ struct i2400m {
struct i2400m_barker_db *barker;
struct notifier_block pm_notifier;
+
+ /* counting bus reset retries in this boot */
+ atomic_t bus_reset_retries;
+
+ /* if the device is expected to be alive */
+ unsigned alive;
+
+ /* 0 if we are ready for error recovery; 1 if not ready */
+ atomic_t error_recovery;
+
};
@@ -803,6 +872,7 @@ void i2400m_put(struct i2400m *i2400m)
extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
extern int i2400m_pre_reset(struct i2400m *);
extern int i2400m_post_reset(struct i2400m *);
+extern void i2400m_error_recovery(struct i2400m *);
/*
* _setup()/_release() are called by the probe/disconnect functions of
@@ -815,7 +885,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
extern void i2400m_tx_msg_sent(struct i2400m *);
-extern int i2400m_power_save_disabled;
/*
* Utility functions
@@ -922,10 +991,5 @@ extern int i2400m_barker_db_init(const char *);
extern void i2400m_barker_db_exit(void);
-/* Module parameters */
-
-extern int i2400m_idle_mode_disabled;
-extern int i2400m_rx_reorder_disabled;
-
#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index b811c2f1f5e9..94742e1eafe0 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -84,17 +84,15 @@
enum {
/* netdev interface */
- /*
- * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
- *
- * The MTU is 1400 or less
- */
- I2400M_MAX_MTU = 1400,
/* 20 secs? yep, this is the maximum timeout that the device
* might take to get out of IDLE / negotiate it with the base
* station. We add 1sec for good measure. */
I2400M_TX_TIMEOUT = 21 * HZ,
- I2400M_TX_QLEN = 5,
+ /*
+ * Experimentation has determined that, 20 to be a good value
+ * for minimizing the jitter in the throughput.
+ */
+ I2400M_TX_QLEN = 20,
};
@@ -255,7 +253,6 @@ void i2400m_net_wake_stop(struct i2400m *i2400m)
kfree_skb(wake_tx_skb);
}
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
- return;
}
@@ -434,7 +431,6 @@ void i2400m_tx_timeout(struct net_device *net_dev)
* this, there might be data pending to be sent or not...
*/
net_dev->stats.tx_errors++;
- return;
}
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index fa2e11e5b4b9..8cc9e319f435 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -155,6 +155,11 @@
#define D_SUBMODULE rx
#include "debug-levels.h"
+static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
+module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
+MODULE_PARM_DESC(rx_reorder_disabled,
+ "If true, RX reordering will be disabled.");
+
struct i2400m_report_hook_args {
struct sk_buff *skb_rx;
const struct i2400m_l3l4_hdr *l3l4_hdr;
@@ -300,20 +305,18 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m,
d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
goto error_waiter_cancelled;
}
- if (ack_skb == NULL) {
+ if (IS_ERR(ack_skb))
dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
- i2400m->ack_skb = ERR_PTR(-ENOMEM);
- } else
- i2400m->ack_skb = ack_skb;
+ i2400m->ack_skb = ack_skb;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
complete(&i2400m->msg_completion);
return;
error_waiter_cancelled:
- kfree_skb(ack_skb);
+ if (!IS_ERR(ack_skb))
+ kfree_skb(ack_skb);
error_no_waiter:
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
- return;
}
@@ -718,7 +721,6 @@ void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
out:
d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
i2400m, roq, skb, sn, nsn);
- return;
}
@@ -743,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
unsigned new_nws, nsn_itr;
new_nws = __i2400m_roq_nsn(roq, sn);
- if (unlikely(new_nws >= 1024) && d_test(1)) {
- dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n",
- new_nws, sn, roq->ws);
- WARN_ON(1);
- i2400m_roq_log_dump(i2400m, roq);
- }
+ /*
+ * For type 2(update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
@@ -798,7 +800,6 @@ void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
}
roq->ws = 0;
d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
- return;
}
@@ -837,7 +838,6 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
}
d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
i2400m, roq, skb, lbn);
- return;
}
@@ -863,7 +863,6 @@ void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
old_ws, len, sn, nsn, roq->ws);
d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
- return;
}
@@ -890,33 +889,52 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
i2400m, roq, skb, sn);
len = skb_queue_len(&roq->queue);
nsn = __i2400m_roq_nsn(roq, sn);
+ /*
+ * For type 3(queue_update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
old_ws = roq->ws;
- if (unlikely(nsn >= 1024)) {
- dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
- nsn, sn, roq->ws);
- i2400m_roq_log_dump(i2400m, roq);
- i2400m_reset(i2400m, I2400M_RT_WARM);
- } else {
- /* if the queue is empty, don't bother as we'd queue
- * it and inmediately unqueue it -- just deliver it */
- if (len == 0) {
- struct i2400m_roq_data *roq_data;
- roq_data = (struct i2400m_roq_data *) &skb->cb;
- i2400m_net_erx(i2400m, skb, roq_data->cs);
- }
- else
- __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
- __i2400m_roq_update_ws(i2400m, roq, sn + 1);
- i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
- old_ws, len, sn, nsn, roq->ws);
- }
+ /* If the queue is empty, don't bother as we'd queue
+ * it and immediately unqueue it -- just deliver it.
+ */
+ if (len == 0) {
+ struct i2400m_roq_data *roq_data;
+ roq_data = (struct i2400m_roq_data *) &skb->cb;
+ i2400m_net_erx(i2400m, skb, roq_data->cs);
+ } else
+ __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
+
+ __i2400m_roq_update_ws(i2400m, roq, sn + 1);
+ i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
+ old_ws, len, sn, nsn, roq->ws);
+
d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
i2400m, roq, skb, sn);
- return;
}
/*
+ * This routine destroys the memory allocated for rx_roq, when no
+ * other thread is accessing it. Access to rx_roq is refcounted by
+ * rx_roq_refcount, hence memory allocated must be destroyed when
+ * rx_roq_refcount becomes zero. This routine gets executed when
+ * rx_roq_refcount becomes zero.
+ */
+void i2400m_rx_roq_destroy(struct kref *ref)
+{
+ unsigned itr;
+ struct i2400m *i2400m
+ = container_of(ref, struct i2400m, rx_roq_refcount);
+ for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
+ __skb_queue_purge(&i2400m->rx_roq[itr].queue);
+ kfree(i2400m->rx_roq[0].log);
+ kfree(i2400m->rx_roq);
+ i2400m->rx_roq = NULL;
+}
+
+/*
* Receive and send up an extended data packet
*
* @i2400m: device descriptor
@@ -969,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
unsigned ro_needed, ro_type, ro_cin, ro_sn;
struct i2400m_roq *roq;
struct i2400m_roq_data *roq_data;
+ unsigned long flags;
BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
@@ -1007,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ if (i2400m->rx_roq == NULL) {
+ kfree_skb(skb); /* rx_roq is already destroyed */
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ goto error;
+ }
roq = &i2400m->rx_roq[ro_cin];
+ kref_get(&i2400m->rx_roq_refcount);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+
roq_data = (struct i2400m_roq_data *) &skb->cb;
roq_data->sn = ro_sn;
roq_data->cs = cs;
@@ -1034,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
default:
dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
}
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
}
else
i2400m_net_erx(i2400m, skb, cs);
@@ -1041,7 +1073,6 @@ error_skb_clone:
error:
d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
"size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
- return;
}
@@ -1344,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
__i2400m_roq_init(&i2400m->rx_roq[itr]);
i2400m->rx_roq[itr].log = &rd[itr];
}
+ kref_init(&i2400m->rx_roq_refcount);
}
return 0;
@@ -1357,12 +1389,12 @@ error_roq_alloc:
/* Tear down the RX queue and infrastructure */
void i2400m_rx_release(struct i2400m *i2400m)
{
+ unsigned long flags;
+
if (i2400m->rx_reorder) {
- unsigned itr;
- for(itr = 0; itr < I2400M_RO_CIN + 1; itr++)
- __skb_queue_purge(&i2400m->rx_roq[itr].queue);
- kfree(i2400m->rx_roq[0].log);
- kfree(i2400m->rx_roq);
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
}
/* at this point, nothing can be received... */
i2400m_report_hook_flush(i2400m);
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index d619da33f20b..8b809c2ead6c 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -197,7 +197,6 @@ error_alloc_skb:
error_get_size:
error_bad_size:
d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
- return;
}
@@ -229,7 +228,6 @@ void i2400ms_irq(struct sdio_func *func)
i2400ms_rx(i2400ms);
error_no_irq:
d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
- return;
}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index de66d068c9cb..b53cd1c80e3e 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -98,6 +98,10 @@ void i2400ms_tx_submit(struct work_struct *ws)
tx_msg_size, result);
}
+ if (result == -ETIMEDOUT) {
+ i2400m_error_recovery(i2400m);
+ break;
+ }
d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
}
@@ -114,13 +118,17 @@ void i2400ms_bus_tx_kick(struct i2400m *i2400m)
{
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = &i2400ms->func->dev;
+ unsigned long flags;
d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
/* schedule tx work, this is because tx may block, therefore
* it has to run in a thread context.
*/
- queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ if (i2400ms->tx_workqueue != NULL)
+ queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
@@ -130,27 +138,40 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
int result;
struct device *dev = &i2400ms->func->dev;
struct i2400m *i2400m = &i2400ms->i2400m;
+ struct workqueue_struct *tx_workqueue;
+ unsigned long flags;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
"%s-tx", i2400m->wimax_dev.name);
- i2400ms->tx_workqueue =
+ tx_workqueue =
create_singlethread_workqueue(i2400ms->tx_wq_name);
- if (NULL == i2400ms->tx_workqueue) {
+ if (tx_workqueue == NULL) {
dev_err(dev, "TX: failed to create workqueue\n");
result = -ENOMEM;
} else
result = 0;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400ms->tx_workqueue = tx_workqueue;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
return result;
}
void i2400ms_tx_release(struct i2400ms *i2400ms)
{
- if (i2400ms->tx_workqueue) {
- destroy_workqueue(i2400ms->tx_workqueue);
- i2400ms->tx_workqueue = NULL;
- }
+ struct i2400m *i2400m = &i2400ms->i2400m;
+ struct workqueue_struct *tx_workqueue;
+ unsigned long flags;
+
+ tx_workqueue = i2400ms->tx_workqueue;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400ms->tx_workqueue = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ if (tx_workqueue)
+ destroy_workqueue(tx_workqueue);
}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 7632f80954e3..9bfc26e1bc6b 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -483,6 +483,13 @@ int i2400ms_probe(struct sdio_func *func,
sdio_set_drvdata(func, i2400ms);
i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
+ /*
+ * Room required in the TX queue for SDIO message to accommodate
+ * a smallest payload while allocating header space is 224 bytes,
+ * which is the smallest message size(the block size 256 bytes)
+ * minus the smallest message header size(32 bytes).
+ */
+ i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
i2400m->bus_setup = i2400ms_bus_setup;
i2400m->bus_dev_start = i2400ms_bus_dev_start;
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb90624cf6..3f819efc06b5 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -258,8 +258,10 @@ enum {
* Doc says maximum transaction is 16KiB. If we had 16KiB en
* route and 16KiB being queued, it boils down to needing
* 32KiB.
+ * 32KiB is insufficient for 1400 MTU, hence increasing
+ * tx buffer size to 64KiB.
*/
- I2400M_TX_BUF_SIZE = 32768,
+ I2400M_TX_BUF_SIZE = 65536,
/**
* Message header and payload descriptors have to be 16
* aligned (16 + 4 * N = 16 * M). If we take that average sent
@@ -270,10 +272,21 @@ enum {
* at the end there are less, we pad up to the nearest
* multiple of 16.
*/
- I2400M_TX_PLD_MAX = 12,
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum number of payloads per message can be
+ * up to 60. Increasing the number of payloads to 60 per message
+ * helps to accommodate smaller payloads in a single transaction.
+ */
+ I2400M_TX_PLD_MAX = 60,
I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
+ I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
I2400M_TX_SKIP = 0x80000000,
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum size of each message can be up to 16KiB.
+ */
+ I2400M_TX_MSG_SIZE = 16384,
};
#define TAIL_FULL ((void *)~(unsigned long)NULL)
@@ -328,6 +341,14 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* @padding: ensure that there is at least this many bytes of free
* contiguous space in the fifo. This is needed because later on
* we might need to add padding.
+ * @try_head: specify either to allocate head room or tail room space
+ * in the TX FIFO. This boolean is required to avoids a system hang
+ * due to an infinite loop caused by i2400m_tx_fifo_push().
+ * The caller must always try to allocate tail room space first by
+ * calling this routine with try_head = 0. In case if there
+ * is not enough tail room space but there is enough head room space,
+ * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
+ * room space, by calling this routine again with try_head = 1.
*
* Returns:
*
@@ -359,6 +380,48 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* fail and return TAIL_FULL and let the caller figure out if we wants to
* skip the tail room and try to allocate from the head.
*
+ * There is a corner case, wherein i2400m_tx_new() can get into
+ * an infinite loop calling i2400m_tx_fifo_push().
+ * In certain situations, tx_in would have reached on the top of TX FIFO
+ * and i2400m_tx_tail_room() returns 0, as described below:
+ *
+ * N ___________ tail room is zero
+ * |<- IN ->|
+ * | |
+ * | |
+ * | |
+ * | data |
+ * |<- OUT ->|
+ * | |
+ * | |
+ * | head room |
+ * 0 -----------
+ * During such a time, where tail room is zero in the TX FIFO and if there
+ * is a request to add a payload to TX FIFO, which calls:
+ * i2400m_tx()
+ * ->calls i2400m_tx_close()
+ * ->calls i2400m_tx_skip_tail()
+ * goto try_new;
+ * ->calls i2400m_tx_new()
+ * |----> [try_head:]
+ * infinite loop | ->calls i2400m_tx_fifo_push()
+ * | if (tail_room < needed)
+ * | if (head_room => needed)
+ * | return TAIL_FULL;
+ * |<---- goto try_head;
+ *
+ * i2400m_tx() calls i2400m_tx_close() to close the message, since there
+ * is no tail room to accommodate the payload and calls
+ * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
+ * i2400m_tx_new() to allocate space for new message header calling
+ * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
+ * to accommodate the message header, but there is enough head space.
+ * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
+ * ending up in a loop causing system freeze.
+ *
+ * This corner case is avoided by using a try_head boolean,
+ * as an argument to i2400m_tx_fifo_push().
+ *
* Note:
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
@@ -367,7 +430,8 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
* pop data off the queue
*/
static
-void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
+void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
+ size_t padding, bool try_head)
{
struct device *dev = i2400m_dev(i2400m);
size_t room, tail_room, needed_size;
@@ -382,9 +446,21 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
}
/* Is there space at the tail? */
tail_room = __i2400m_tx_tail_room(i2400m);
- if (tail_room < needed_size) {
- if (i2400m->tx_out % I2400M_TX_BUF_SIZE
- < i2400m->tx_in % I2400M_TX_BUF_SIZE) {
+ if (!try_head && tail_room < needed_size) {
+ /*
+ * If the tail room space is not enough to push the message
+ * in the TX FIFO, then there are two possibilities:
+ * 1. There is enough head room space to accommodate
+ * this message in the TX FIFO.
+ * 2. There is not enough space in the head room and
+ * in tail room of the TX FIFO to accommodate the message.
+ * In the case (1), return TAIL_FULL so that the caller
+ * can figure out, if the caller wants to push the message
+ * into the head room space.
+ * In the case (2), return NULL, indicating that the TX FIFO
+ * cannot accommodate the message.
+ */
+ if (room - tail_room >= needed_size) {
d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
size, padding);
return TAIL_FULL; /* There might be head space */
@@ -485,14 +561,25 @@ void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
+ bool try_head = 0;
BUG_ON(i2400m->tx_msg != NULL);
+ /*
+ * In certain situations, TX queue might have enough space to
+ * accommodate the new message header I2400M_TX_PLD_SIZE, but
+ * might not have enough space to accommodate the payloads.
+ * Adding bus_tx_room_min padding while allocating a new TX message
+ * increases the possibilities of including at least one payload of the
+ * size <= bus_tx_room_min.
+ */
try_head:
- tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0);
+ tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
+ i2400m->bus_tx_room_min, try_head);
if (tx_msg == NULL)
goto out;
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
+ try_head = 1;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -566,7 +653,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
padding = aligned_size - tx_msg_moved->size;
if (padding > 0) {
- pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0);
+ pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
if (unlikely(WARN_ON(pad_buf == NULL
|| pad_buf == TAIL_FULL))) {
/* This should not happen -- append should verify
@@ -632,6 +719,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
unsigned long flags;
size_t padded_len;
void *ptr;
+ bool try_head = 0;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
@@ -643,9 +731,11 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
* current one is out of payload slots or we have a singleton,
* close it and start a new one */
spin_lock_irqsave(&i2400m->tx_lock, flags);
- result = -ESHUTDOWN;
- if (i2400m->tx_buf == NULL)
+ /* If tx_buf is NULL, device is shutdown */
+ if (i2400m->tx_buf == NULL) {
+ result = -ESHUTDOWN;
goto error_tx_new;
+ }
try_new:
if (unlikely(i2400m->tx_msg == NULL))
i2400m_tx_new(i2400m);
@@ -659,7 +749,13 @@ try_new:
}
if (i2400m->tx_msg == NULL)
goto error_tx_new;
- if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
+ /*
+ * Check if this skb will fit in the TX queue's current active
+ * TX message. The total message size must not exceed the maximum
+ * size of each message I2400M_TX_MSG_SIZE. If it exceeds,
+ * close the current message and push this skb into the new message.
+ */
+ if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
d_printf(2, dev, "TX: message too big, going new\n");
i2400m_tx_close(i2400m);
i2400m_tx_new(i2400m);
@@ -669,11 +765,12 @@ try_new:
/* So we have a current message header; now append space for
* the message -- if there is not enough, try the head */
ptr = i2400m_tx_fifo_push(i2400m, padded_len,
- i2400m->bus_tx_block_size);
+ i2400m->bus_tx_block_size, try_head);
if (ptr == TAIL_FULL) { /* Tail is full, try head */
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
+ try_head = 1;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
@@ -689,7 +786,7 @@ try_new:
pl_type, buf_len);
tx_msg->num_pls = le16_to_cpu(num_pls+1);
tx_msg->size += padded_len;
- d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n",
+ d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
padded_len, tx_msg->size, num_pls+1);
d_printf(2, dev,
"TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
@@ -860,25 +957,43 @@ EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
* i2400m_tx_setup - Initialize the TX queue and infrastructure
*
* Make sure we reset the TX sequence to zero, as when this function
- * is called, the firmware has been just restarted.
+ * is called, the firmware has been just restarted. Same rational
+ * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
+ * the memory for TX queue is reallocated.
*/
int i2400m_tx_setup(struct i2400m *i2400m)
{
- int result;
+ int result = 0;
+ void *tx_buf;
+ unsigned long flags;
/* Do this here only once -- can't do on
* i2400m_hard_start_xmit() as we'll cause race conditions if
* the WS was scheduled on another CPU */
INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
- i2400m->tx_sequence = 0;
- i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL);
- if (i2400m->tx_buf == NULL)
+ tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
+ if (tx_buf == NULL) {
result = -ENOMEM;
- else
- result = 0;
+ goto error_kmalloc;
+ }
+
+ /*
+ * Fail the build if we can't fit at least two maximum size messages
+ * on the TX FIFO [one being delivered while one is constructed].
+ */
+ BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400m->tx_sequence = 0;
+ i2400m->tx_in = 0;
+ i2400m->tx_out = 0;
+ i2400m->tx_msg_size = 0;
+ i2400m->tx_msg = NULL;
+ i2400m->tx_buf = tx_buf;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
/* Huh? the bus layer has to define this... */
BUG_ON(i2400m->bus_tx_block_size == 0);
+error_kmalloc:
return result;
}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 7b6a1d98bd74..d44b545f4082 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -178,7 +178,6 @@ error_submit:
out:
d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
urb, urb->status, urb->actual_length);
- return;
}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d8c4d6497fdf..0d5081d77dc0 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -82,6 +82,8 @@ MODULE_PARM_DESC(debug,
/* Our firmware file name */
static const char *i2400mu_bus_fw_names_5x50[] = {
+#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
+ I2400MU_FW_FILE_NAME_v1_5,
#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
I2400MU_FW_FILE_NAME_v1_4,
NULL,
@@ -467,6 +469,13 @@ int i2400mu_probe(struct usb_interface *iface,
usb_set_intfdata(iface, i2400mu);
i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
+ /*
+ * Room required in the Tx queue for USB message to accommodate
+ * a smallest payload while allocating header space is 16 bytes.
+ * Adding this room for the new tx message increases the
+ * possibilities of including any payload with size <= 16 bytes.
+ */
+ i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
i2400m->bus_setup = NULL;
i2400m->bus_dev_start = i2400mu_bus_dev_start;
@@ -505,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
device_init_wakeup(dev, 1);
usb_dev->autosuspend_delay = 15 * HZ;
- usb_dev->autosuspend_disabled = 0;
+ usb_enable_autosuspend(usb_dev);
#endif
result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
@@ -778,4 +787,5 @@ MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
"(5x50 & 6050)");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4);
+MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
+MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 588943660755..174e3442d519 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
menuconfig WLAN
bool "Wireless LAN"
depends on !S390
+ depends on NET
select WIRELESS
default y
---help---
@@ -38,6 +39,12 @@ config LIBERTAS_THINFIRM
---help---
A library for Marvell Libertas 8xxx devices using thinfirm.
+config LIBERTAS_THINFIRM_DEBUG
+ bool "Enable full debugging output in the Libertas thin firmware module."
+ depends on LIBERTAS_THINFIRM
+ ---help---
+ Debugging support.
+
config LIBERTAS_THINFIRM_USB
tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
depends on LIBERTAS_THINFIRM && USB
@@ -210,90 +217,7 @@ config USB_NET_RNDIS_WLAN
If you choose to build a module, it'll be called rndis_wlan.
-config RTL8180
- tristate "Realtek 8180/8185 PCI support"
- depends on MAC80211 && PCI && EXPERIMENTAL
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8180 and RTL8185 based cards.
- These are PCI based chips found in cards such as:
-
- (RTL8185 802.11g)
- A-Link WL54PC
-
- (RTL8180 802.11b)
- Belkin F5D6020 v3
- Belkin F5D6020 v3
- Dlink DWL-610
- Dlink DWL-510
- Netgear MA521
- Level-One WPC-0101
- Acer Aspire 1357 LMi
- VCTnet PC-11B1
- Ovislink AirLive WL-1120PCM
- Mentor WL-PCI
- Linksys WPC11 v4
- TrendNET TEW-288PI
- D-Link DWL-520 Rev D
- Repotec RP-WP7126
- TP-Link TL-WN250/251
- Zonet ZEW1000
- Longshine LCS-8031-R
- HomeLine HLW-PCC200
- GigaFast WF721-AEX
- Planet WL-3553
- Encore ENLWI-PCI1-NT
- TrendNET TEW-266PC
- Gigabyte GN-WLMR101
- Siemens-fujitsu Amilo D1840W
- Edimax EW-7126
- PheeNet WL-11PCIR
- Tonze PC-2100T
- Planet WL-8303
- Dlink DWL-650 v M1
- Edimax EW-7106
- Q-Tec 770WC
- Topcom Skyr@cer 4011b
- Roper FreeLan 802.11b (edition 2004)
- Wistron Neweb Corp CB-200B
- Pentagram HorNET
- QTec 775WC
- TwinMOS Booming B Series
- Micronet SP906BB
- Sweex LC700010
- Surecom EP-9428
- Safecom SWLCR-1100
-
- Thanks to Realtek for their support!
-
-config RTL8187
- tristate "Realtek 8187 and 8187B USB support"
- depends on MAC80211 && USB
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8187 and RTL8187B based cards.
- These are USB based chips found in devices such as:
-
- Netgear WG111v2
- Level 1 WNC-0301USB
- Micronet SP907GK V5
- Encore ENUWI-G2
- Trendnet TEW-424UB
- ASUS P5B Deluxe/P5K Premium motherboards
- Toshiba Satellite Pro series of laptops
- Asus Wireless Link
- Linksys WUSB54GC-EU v2
- (v1 = rt73usb; v3 is rt2070-based,
- use staging/rt3070 or try rt2800usb)
-
- Thanks to Realtek for their support!
-
-# If possible, automatically enable LEDs for RTL8187.
-
-config RTL8187_LEDS
- bool
- depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
- default y
+source "drivers/net/wireless/rtl818x/Kconfig"
config ADM8211
tristate "ADMtek ADM8211 support"
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index ab61d2b558d6..880ad9d170c2 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1318,21 +1318,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
}
static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
- unsigned int bit_nr, i;
+ unsigned int bit_nr;
u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
bit_nr &= 0x3F;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- mclist = mclist->next;
}
return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index dc5018a6d9ed..3b7ab20a5c54 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2876,7 +2876,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
- airo_print_info(dev->name, "Firmware version %x.%x.%02x",
+ airo_print_info(dev->name, "Firmware version %x.%x.%02d",
((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
(le16_to_cpu(cap_rid.softVer) & 0xFF),
le16_to_cpu(cap_rid.softSubVer));
@@ -3193,19 +3193,26 @@ static void airo_print_status(const char *devname, u16 status)
{
u8 reason = status & 0xFF;
- switch (status) {
+ switch (status & 0xFF00) {
case STAT_NOBEACON:
- airo_print_dbg(devname, "link lost (missed beacons)");
- break;
- case STAT_MAXRETRIES:
- case STAT_MAXARL:
- airo_print_dbg(devname, "link lost (max retries)");
- break;
- case STAT_FORCELOSS:
- airo_print_dbg(devname, "link lost (local choice)");
- break;
- case STAT_TSFSYNC:
- airo_print_dbg(devname, "link lost (TSF sync lost)");
+ switch (status) {
+ case STAT_NOBEACON:
+ airo_print_dbg(devname, "link lost (missed beacons)");
+ break;
+ case STAT_MAXRETRIES:
+ case STAT_MAXARL:
+ airo_print_dbg(devname, "link lost (max retries)");
+ break;
+ case STAT_FORCELOSS:
+ airo_print_dbg(devname, "link lost (local choice)");
+ break;
+ case STAT_TSFSYNC:
+ airo_print_dbg(devname, "link lost (TSF sync lost)");
+ break;
+ default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
+ break;
+ }
break;
case STAT_DEAUTH:
airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
@@ -3221,7 +3228,11 @@ static void airo_print_status(const char *devname, u16 status)
airo_print_dbg(devname, "authentication failed (reason: %d)",
reason);
break;
+ case STAT_ASSOC:
+ case STAT_REASSOC:
+ break;
default:
+ airo_print_dbg(devname, "unknow status %x\n", status);
break;
}
}
@@ -5151,13 +5162,6 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
enable_MAC(ai, 1);
}
-static inline u8 hexVal(char c) {
- if (c>='0' && c<='9') return c -= '0';
- if (c>='a' && c<='f') return c -= 'a'-10;
- if (c>='A' && c<='F') return c -= 'A'-10;
- return 0;
-}
-
static void proc_APList_on_close( struct inode *inode, struct file *file ) {
struct proc_data *data = (struct proc_data *)file->private_data;
struct proc_dir_entry *dp = PDE(inode);
@@ -5177,11 +5181,11 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
switch(j%3) {
case 0:
APList_rid.ap[i][j/3]=
- hexVal(data->wbuffer[j+i*6*3])<<4;
+ hex_to_bin(data->wbuffer[j+i*6*3])<<4;
break;
case 1:
APList_rid.ap[i][j/3]|=
- hexVal(data->wbuffer[j+i*6*3]);
+ hex_to_bin(data->wbuffer[j+i*6*3]);
break;
}
}
@@ -5329,10 +5333,10 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
switch(i%3) {
case 0:
- key[i/3] = hexVal(data->wbuffer[i+j])<<4;
+ key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
break;
case 1:
- key[i/3] |= hexVal(data->wbuffer[i+j]);
+ key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
break;
}
}
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index f6036fb42319..33bdc6a84e81 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -75,42 +75,7 @@ static void airo_release(struct pcmcia_device *link);
static void airo_detach(struct pcmcia_device *p_dev);
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'pcmem_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A linked list of "instances" of the aironet device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers,
- where minor device numbers are used to derive the corresponding
- array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
typedef struct local_info_t {
- dev_node_t node;
struct net_device *eth_dev;
} local_info_t;
@@ -132,10 +97,6 @@ static int airo_probe(struct pcmcia_device *p_dev)
dev_dbg(&p_dev->dev, "airo_attach()\n");
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.Handler = NULL;
-
/*
General socket configuration defaults can go here. In this
client, we assume very little, and rely on the CIS for almost
@@ -212,9 +173,7 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
+ p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
/* IO window settings */
p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -300,16 +259,8 @@ static int airo_config(struct pcmcia_device *link)
if (ret)
goto failed;
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ) {
- ret = pcmcia_request_irq(link, &link->irq);
- if (ret)
- goto failed;
- }
+ if (!link->irq)
+ goto failed;
/*
This actually configures the PCMCIA socket -- setting up
@@ -320,26 +271,17 @@ static int airo_config(struct pcmcia_device *link)
if (ret)
goto failed;
((local_info_t *)link->priv)->eth_dev =
- init_airo_card(link->irq.AssignedIRQ,
+ init_airo_card(link->irq,
link->io.BasePort1, 1, &link->dev);
if (!((local_info_t *)link->priv)->eth_dev)
goto failed;
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev_node.
- */
- strcpy(dev->node.dev_name, ((local_info_t *)link->priv)->eth_dev->name);
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
/* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
+ dev_info(&link->dev, "index 0x%02x: ",
+ link->conf.ConfigIndex);
if (link->conf.Vpp)
printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
- if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
+ printk(", irq %d", link->irq);
if (link->io.NumPorts1)
printk(", io 0x%04x-0x%04x", link->io.BasePort1,
link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 0fb419936dff..8a2d4afc74f8 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1223,7 +1223,6 @@ static void at76_rx_callback(struct urb *urb)
priv->rx_tasklet.data = (unsigned long)urb;
tasklet_schedule(&priv->rx_tasklet);
- return;
}
static int at76_submit_rx_urb(struct at76_priv *priv)
@@ -1889,6 +1888,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
}
static int at76_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 4e7a7fd695c8..0a75be027afa 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -3,7 +3,7 @@ menuconfig ATH_COMMON
depends on CFG80211
---help---
This will enable the support for the Atheros wireless drivers.
- ath5k, ath9k and ar9170 drivers share some common code, this option
+ ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
enables the common ath.ko module which shares common helpers.
For more information and documentation on this module you can visit:
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index dc662b76a1c8..4f845f80c098 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,41 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_TID 16
-#define WME_BA_BMP_SIZE 64
-#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
-
-#define WME_AC_BE 2
-#define WME_AC_BK 3
-#define WME_AC_VI 1
-#define WME_AC_VO 0
-
-#define TID_TO_WME_AC(_tid) \
- ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
- (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
- (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
- WME_AC_VO)
-
-#define BAW_WITHIN(_start, _bawsz, _seqno) \
- ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
-
-enum ar9170_tid_state {
- AR9170_TID_STATE_INVALID,
- AR9170_TID_STATE_SHUTDOWN,
- AR9170_TID_STATE_PROGRESS,
- AR9170_TID_STATE_COMPLETE,
-};
-
-struct ar9170_sta_tid {
- struct list_head list;
- struct sk_buff_head queue;
- u8 addr[ETH_ALEN];
- u16 ssn;
- u16 tid;
- enum ar9170_tid_state state;
- bool active;
-};
-
struct ar9170_tx_queue_stats {
unsigned int len;
unsigned int limit;
@@ -152,14 +117,11 @@ struct ar9170_tx_queue_stats {
#define AR9170_QUEUE_TIMEOUT 64
#define AR9170_TX_TIMEOUT 8
-#define AR9170_BA_TIMEOUT 4
#define AR9170_JANITOR_DELAY 128
#define AR9170_TX_INVALID_RATE 0xffffffff
-#define AR9170_NUM_TX_STATUS 128
-#define AR9170_NUM_TX_AGG_MAX 30
-#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
-#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
@@ -234,11 +196,6 @@ struct ar9170 {
struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
struct delayed_work tx_janitor;
- /* tx ampdu */
- struct sk_buff_head tx_status_ampdu;
- spinlock_t tx_ampdu_list_lock;
- struct list_head tx_ampdu_list;
- atomic_t tx_ampdu_pending;
/* rxstream mpdu merge */
struct ar9170_rxstream_mpdu_merge rx_mpdu;
@@ -250,11 +207,6 @@ struct ar9170 {
u8 global_ampdu_factor;
};
-struct ar9170_sta_info {
- struct ar9170_sta_tid agg[AR9170_NUM_TID];
- unsigned int ampdu_max_len;
-};
-
struct ar9170_tx_info {
unsigned long timeout;
};
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index 826c45e6b274..ec8134b4b949 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -79,7 +79,7 @@ __regwrite_out : \
if (__nreg) { \
if (IS_ACCEPTING_CMD(__ar)) \
__err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
- 8 * __nreg, \
+ 8 * __nreg, \
(u8 *) &__ar->cmdbuf[1], \
0, NULL); \
__nreg = 0; \
diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
index d2c8cc83f1dd..6c4663883423 100644
--- a/drivers/net/wireless/ath/ar9170/eeprom.h
+++ b/drivers/net/wireless/ath/ar9170/eeprom.h
@@ -127,8 +127,8 @@ struct ar9170_eeprom {
__le16 checksum;
__le16 version;
u8 operating_flags;
-#define AR9170_OPFLAG_5GHZ 1
-#define AR9170_OPFLAG_2GHZ 2
+#define AR9170_OPFLAG_5GHZ 1
+#define AR9170_OPFLAG_2GHZ 2
u8 misc;
__le16 reg_domain[2];
u8 mac_address[6];
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 0a1d4c28e68a..06f1f3c951a4 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -425,5 +425,6 @@ enum ar9170_txq {
#define AR9170_TXQ_DEPTH 32
#define AR9170_TX_MAX_PENDING 128
+#define AR9170_RX_STREAM_MAX_SIZE 65535
#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c53692980990..2abc87578994 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -50,10 +50,6 @@ static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_ht;
-module_param_named(ht, modparam_ht, bool, S_IRUGO);
-MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
-
#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
.bitrate = (_bitrate), \
.flags = (_flags), \
@@ -182,7 +178,6 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
};
static void ar9170_tx(struct ar9170 *ar);
-static bool ar9170_tx_ampdu(struct ar9170 *ar);
static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
{
@@ -195,21 +190,7 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
-static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
-{
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
-static inline u16 ar9170_get_tid(struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
- return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
-}
-
-#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
-#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
-
-#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
+#ifdef AR9170_QUEUE_DEBUG
static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
@@ -236,7 +217,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
skb_queue_walk(queue, skb) {
- printk(KERN_DEBUG "index:%d => \n", i++);
+ printk(KERN_DEBUG "index:%d =>\n", i++);
ar9170_print_txheader(ar, skb);
}
if (i != skb_queue_len(queue))
@@ -244,7 +225,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
"mismatch %d != %d\n", skb_queue_len(queue), i);
printk(KERN_DEBUG "---[ end ]---\n");
}
-#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */
+#endif /* AR9170_QUEUE_DEBUG */
#ifdef AR9170_QUEUE_DEBUG
static void ar9170_dump_txqueue(struct ar9170 *ar,
@@ -275,20 +256,6 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
}
#endif /* AR9170_QUEUE_STOP_DEBUG */
-#ifdef AR9170_TXAGG_DEBUG
-static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
- printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
- spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
-}
-
-#endif /* AR9170_TXAGG_DEBUG */
-
/* caller must guarantee exclusive access for _bin_ queue. */
static void ar9170_recycle_expired(struct ar9170 *ar,
struct sk_buff_head *queue,
@@ -308,7 +275,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
if (time_is_before_jiffies(arinfo->timeout)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
- "recycle \n", wiphy_name(ar->hw->wiphy),
+ "recycle\n", wiphy_name(ar->hw->wiphy),
jiffies, arinfo->timeout);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
@@ -360,70 +327,6 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
-static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
-{
- struct sk_buff_head success;
- struct sk_buff *skb;
- unsigned int i;
- unsigned long queue_bitmap = 0;
-
- skb_queue_head_init(&success);
-
- while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
- __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
-
- ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
- wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
- __ar9170_dump_txqueue(ar, &success);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = __skb_dequeue(&success))) {
- struct ieee80211_tx_info *txinfo;
-
- queue_bitmap |= BIT(skb_get_queue_mapping(skb));
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- txinfo->flags |= IEEE80211_TX_STAT_ACK;
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- }
-
- for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
-#ifdef AR9170_QUEUE_STOP_DEBUG
- printk(KERN_DEBUG "%s: wake queue %d\n",
- wiphy_name(ar->hw->wiphy), i);
- __ar9170_dump_txstats(ar);
-#endif /* AR9170_QUEUE_STOP_DEBUG */
- ieee80211_wake_queue(ar->hw, i);
- }
-
- if (queue_bitmap)
- ar9170_tx(ar);
-}
-
-static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
-
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_BA_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status_ampdu, skb);
- ar9170_tx_fake_ampdu_status(ar);
-
- if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
- !list_empty(&ar->tx_ampdu_list))
- ar9170_tx_ampdu(ar);
-}
-
void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -447,14 +350,10 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
- skb_queue_tail(&ar->tx_status[queue], skb);
- }
+ skb_queue_tail(&ar->tx_status[queue], skb);
}
if (!ar->tx_stats[queue].len &&
@@ -524,38 +423,6 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
return NULL;
}
-static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
-{
- struct sk_buff *skb;
- struct ieee80211_tx_info *txinfo;
-
- while (count) {
- skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
- if (!skb)
- break;
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- /* FIXME: maybe more ? */
- txinfo->status.rates[0].count = 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
- count--;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- if (count) {
- printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
- "suitable frames left in tx_status queue.\n",
- wiphy_name(ar->hw->wiphy), count);
-
- ar9170_dump_tx_status_ampdu(ar);
- }
-#endif /* AR9170_TXAGG_DEBUG */
-}
-
/*
* This worker tries to keeps an maintain tx_status queues.
* So we can guarantee that incoming tx_status reports are
@@ -592,8 +459,6 @@ static void ar9170_tx_janitor(struct work_struct *work)
resched = true;
}
- ar9170_tx_fake_ampdu_status(ar);
-
if (!resched)
return;
@@ -673,10 +538,6 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
case 0xc5:
/* BlockACK events */
- ar9170_handle_block_ack(ar,
- le16_to_cpu(cmd->ba_fail_cnt.failed),
- le16_to_cpu(cmd->ba_fail_cnt.rate));
- ar9170_tx_fake_ampdu_status(ar);
break;
case 0xc6:
@@ -689,7 +550,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
/* firmware debug */
case 0xca:
- printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4);
+ printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
+ (char *)buf + 4);
break;
case 0xcb:
len -= 4;
@@ -926,7 +788,6 @@ static void ar9170_rx_phy_status(struct ar9170 *ar,
/* TODO: we could do something with phy_errors */
status->signal = ar->noise[0] + phy->rssi_combined;
- status->noise = ar->noise[0];
}
static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
@@ -1247,7 +1108,6 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
ar->global_ampdu_density = 6;
ar->global_ampdu_factor = 3;
- atomic_set(&ar->tx_ampdu_pending, 0);
ar->bad_hw_nagger = jiffies;
err = ar->open(ar);
@@ -1310,40 +1170,10 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
skb_queue_purge(&ar->tx_pending[i]);
skb_queue_purge(&ar->tx_status[i]);
}
- skb_queue_purge(&ar->tx_status_ampdu);
mutex_unlock(&ar->mutex);
}
-static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
-}
-
-static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
- struct sk_buff *src)
-{
- struct ar9170_tx_control *dst_txc, *src_txc;
- struct ieee80211_tx_info *dst_info, *src_info;
- struct ar9170_tx_info *dst_arinfo, *src_arinfo;
-
- src_txc = (void *) src->data;
- src_info = IEEE80211_SKB_CB(src);
- src_arinfo = (void *) src_info->rate_driver_data;
-
- dst_txc = (void *) dst->data;
- dst_info = IEEE80211_SKB_CB(dst);
- dst_arinfo = (void *) dst_info->rate_driver_data;
-
- dst_txc->phy_control = src_txc->phy_control;
-
- /* same MCS for the whole aggregate */
- memcpy(dst_info->driver_rates, src_info->driver_rates,
- sizeof(dst_info->driver_rates));
-}
-
static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -1420,14 +1250,7 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- } else {
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
- }
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
}
return 0;
@@ -1537,158 +1360,6 @@ static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
}
-static bool ar9170_tx_ampdu(struct ar9170 *ar)
-{
- struct sk_buff_head agg;
- struct ar9170_sta_tid *tid_info = NULL, *tmp;
- struct sk_buff *skb, *first = NULL;
- unsigned long flags, f2;
- unsigned int i = 0;
- u16 seq, queue, tmpssn;
- bool run = false;
-
- skb_queue_head_init(&agg);
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (list_empty(&ar->tx_ampdu_list)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: aggregation list is empty.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- goto out_unlock;
- }
-
- list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
- if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- if (++i > 64) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: enough frames aggregated.\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- break;
- }
-
- queue = TID_TO_WME_AC(tid_info->tid);
-
- if (skb_queue_len(&ar->tx_pending[queue]) >=
- AR9170_NUM_TX_AGG_MAX) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queue %d full.\n",
- wiphy_name(ar->hw->wiphy), queue);
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- list_del_init(&tid_info->list);
-
- spin_lock_irqsave(&tid_info->queue.lock, f2);
- tmpssn = seq = tid_info->ssn;
- first = skb_peek(&tid_info->queue);
-
- if (likely(first))
- tmpssn = ar9170_get_seq(first);
-
- if (unlikely(tmpssn != seq)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
- wiphy_name(ar->hw->wiphy), seq, tmpssn);
-#endif /* AR9170_TXAGG_DEBUG */
- tid_info->ssn = tmpssn;
- }
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
- "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid, tid_info->ssn,
- skb_queue_len(&tid_info->queue));
- __ar9170_dump_txqueue(ar, &tid_info->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- while ((skb = skb_peek(&tid_info->queue))) {
- if (unlikely(ar9170_get_seq(skb) != seq))
- break;
-
- __skb_unlink(skb, &tid_info->queue);
- tid_info->ssn = seq = GET_NEXT_SEQ(seq);
-
- if (unlikely(skb_get_queue_mapping(skb) != queue)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
- "!match.\n", wiphy_name(ar->hw->wiphy),
- tid_info->tid,
- TID_TO_WME_AC(tid_info->tid),
- skb_get_queue_mapping(skb));
-#endif /* AR9170_TXAGG_DEBUG */
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (unlikely(first == skb)) {
- ar9170_tx_prepare_phy(ar, skb);
- __skb_queue_tail(&agg, skb);
- first = skb;
- } else {
- ar9170_tx_copy_phy(ar, skb, first);
- __skb_queue_tail(&agg, skb);
- }
-
- if (unlikely(skb_queue_len(&agg) ==
- AR9170_NUM_TX_AGG_MAX))
- break;
- }
-
- if (skb_queue_empty(&tid_info->queue))
- tid_info->active = false;
- else
- list_add_tail(&tid_info->list,
- &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&tid_info->queue.lock, f2);
-
- if (unlikely(skb_queue_empty(&agg))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: queued empty list!\n",
- wiphy_name(ar->hw->wiphy));
-#endif /* AR9170_TXAGG_DEBUG */
- continue;
- }
-
- /*
- * tell the FW/HW that this is the last frame,
- * that way it will wait for the immediate block ack.
- */
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
- wiphy_name(ar->hw->wiphy));
- __ar9170_dump_txqueue(ar, &agg);
-#endif /* AR9170_TXAGG_DEBUG */
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-
- spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
- skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
- spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
- run = true;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- }
-
-out_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- __skb_queue_purge(&agg);
-
- return run;
-}
-
static void ar9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@@ -1728,7 +1399,7 @@ static void ar9170_tx(struct ar9170 *ar)
printk(KERN_DEBUG "%s: queue %d full\n",
wiphy_name(ar->hw->wiphy), i);
- printk(KERN_DEBUG "%s: stuck frames: ===> \n",
+ printk(KERN_DEBUG "%s: stuck frames: ===>\n",
wiphy_name(ar->hw->wiphy));
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@@ -1763,9 +1434,6 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_inc(&ar->tx_ampdu_pending);
-
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: send frame q:%d =>\n",
wiphy_name(ar->hw->wiphy), i);
@@ -1774,9 +1442,6 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- atomic_dec(&ar->tx_ampdu_pending);
-
frames_failed++;
dev_kfree_skb_any(skb);
} else {
@@ -1823,94 +1488,11 @@ static void ar9170_tx(struct ar9170 *ar)
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
-static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *txinfo;
- struct ar9170_sta_info *sta_info;
- struct ar9170_sta_tid *agg;
- struct sk_buff *iter;
- unsigned long flags, f2;
- unsigned int max;
- u16 tid, seq, qseq;
- bool run = false, queue = false;
-
- tid = ar9170_get_tid(skb);
- seq = ar9170_get_seq(skb);
- txinfo = IEEE80211_SKB_CB(skb);
- sta_info = (void *) txinfo->control.sta->drv_priv;
- agg = &sta_info->agg[tid];
- max = sta_info->ampdu_max_len;
-
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
-
- if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
- "for ESS:%pM tid:%d state:%d.\n",
- wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
- agg->state);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- if (!agg->active) {
- agg->active = true;
- agg->ssn = seq;
- queue = true;
- }
-
- /* check if seq is within the BA window */
- if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
- "fit into BA window (%d - %d)\n",
- wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
- (agg->ssn + max) & 0xfff);
-#endif /* AR9170_TXAGG_DEBUG */
- goto err_unlock;
- }
-
- spin_lock_irqsave(&agg->queue.lock, f2);
-
- skb_queue_reverse_walk(&agg->queue, iter) {
- qseq = ar9170_get_seq(iter);
-
- if (GET_NEXT_SEQ(qseq) == seq) {
- __skb_queue_after(&agg->queue, iter, skb);
- goto queued;
- }
- }
-
- __skb_queue_head(&agg->queue, skb);
-
-queued:
- spin_unlock_irqrestore(&agg->queue.lock, f2);
-
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
- wiphy_name(ar->hw->wiphy), skb);
- __ar9170_dump_txqueue(ar, &agg->queue);
-#endif /* AR9170_TXAGG_DEBUG */
-
- if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
- run = true;
-
- if (queue)
- list_add_tail(&agg->list, &ar->tx_ampdu_list);
-
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- return run;
-
-err_unlock:
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- dev_kfree_skb_irq(skb);
- return false;
-}
-
int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
+ unsigned int queue;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
@@ -1918,18 +1500,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (unlikely(ar9170_tx_prepare(ar, skb)))
goto err_free;
+ queue = skb_get_queue_mapping(skb);
info = IEEE80211_SKB_CB(skb);
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- bool run = ar9170_tx_ampdu_queue(ar, skb);
-
- if (run || !atomic_read(&ar->tx_ampdu_pending))
- ar9170_tx_ampdu(ar);
- } else {
- unsigned int queue = skb_get_queue_mapping(skb);
-
- ar9170_tx_prepare_phy(ar, skb);
- skb_queue_tail(&ar->tx_pending[queue], skb);
- }
+ ar9170_tx_prepare_phy(ar, skb);
+ skb_queue_tail(&ar->tx_pending[queue], skb);
ar9170_tx(ar);
return NETDEV_TX_OK;
@@ -2046,21 +1620,17 @@ out:
return err;
}
-static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
- struct dev_addr_list *mclist)
+static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
{
u64 mchash;
- int i;
+ struct netdev_hw_addr *ha;
/* always get broadcast frames */
mchash = 1ULL << (0xff >> 2);
- for (i = 0; i < mc_count; i++) {
- if (WARN_ON(!mclist))
- break;
- mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
- mclist = mclist->next;
- }
+ netdev_hw_addr_list_for_each(ha, mc_list)
+ mchash |= 1ULL << (ha->addr[5] >> 2);
return mchash;
}
@@ -2330,57 +1900,6 @@ out:
return err;
}
-static int ar9170_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- memset(sta_info, 0, sizeof(*sta_info));
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
- ar->global_ampdu_density = sta->ht_cap.ampdu_density;
-
- if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
- ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
- sta_info->agg[i].active = false;
- sta_info->agg[i].ssn = 0;
- sta_info->agg[i].tid = i;
- INIT_LIST_HEAD(&sta_info->agg[i].list);
- skb_queue_head_init(&sta_info->agg[i].queue);
- }
-
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
-
- return 0;
-}
-
-static int ar9170_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- unsigned int i;
-
- if (!sta->ht_cap.ht_supported)
- return 0;
-
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
- skb_queue_purge(&sta_info->agg[i].queue);
- }
-
- return 0;
-}
-
static int ar9170_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2423,55 +1942,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
- struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
- unsigned long flags;
-
- if (!modparam_ht)
- return -EOPNOTSUPP;
-
switch (action) {
- case IEEE80211_AMPDU_TX_START:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
- !list_empty(&tid_info->list)) {
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
- "is in a very bad state!\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- return -EBUSY;
- }
-
- *ssn = tid_info->ssn;
- tid_info->state = AR9170_TID_STATE_PROGRESS;
- tid_info->active = false;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_STOP:
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- tid_info->state = AR9170_TID_STATE_SHUTDOWN;
- list_del_init(&tid_info->list);
- tid_info->active = false;
- skb_queue_purge(&tid_info->queue);
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
- case IEEE80211_AMPDU_TX_OPERATIONAL:
-#ifdef AR9170_TXAGG_DEBUG
- printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
- wiphy_name(hw->wiphy), sta->addr, tid);
-#endif /* AR9170_TXAGG_DEBUG */
- spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
- sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
- spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
- break;
-
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
/* Handled by firmware */
@@ -2497,8 +1968,6 @@ static const struct ieee80211_ops ar9170_ops = {
.bss_info_changed = ar9170_op_bss_info_changed,
.get_tsf = ar9170_op_get_tsf,
.set_key = ar9170_set_key,
- .sta_add = ar9170_sta_add,
- .sta_remove = ar9170_sta_remove,
.get_stats = ar9170_get_stats,
.ampdu_action = ar9170_ampdu_action,
};
@@ -2516,7 +1985,7 @@ void *ar9170_alloc(size_t priv_size)
* tends to split the streams into separate rx descriptors.
*/
- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
+ skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;
@@ -2531,8 +2000,6 @@ void *ar9170_alloc(size_t priv_size)
mutex_init(&ar->mutex);
spin_lock_init(&ar->cmdlock);
spin_lock_init(&ar->tx_stats_lock);
- spin_lock_init(&ar->tx_ampdu_list_lock);
- skb_queue_head_init(&ar->tx_status_ampdu);
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
@@ -2540,7 +2007,6 @@ void *ar9170_alloc(size_t priv_size)
ar9170_rx_reset_rx_mpdu(ar);
INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
- INIT_LIST_HEAD(&ar->tx_ampdu_list);
/* all hw supports 2.4 GHz, so set channel to 1 by default */
ar->channel = &ar9170_2ghz_chantable[0];
@@ -2551,19 +2017,10 @@ void *ar9170_alloc(size_t priv_size)
BIT(NL80211_IFTYPE_ADHOC);
ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
-
- if (modparam_ht) {
- ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
- } else {
- ar9170_band_2GHz.ht_cap.ht_supported = false;
- ar9170_band_5GHz.ht_cap.ht_supported = false;
- }
+ IEEE80211_HW_SIGNAL_DBM;
ar->hw->queues = __AR9170_NUM_TXQ;
ar->hw->extra_tx_headroom = 8;
- ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
ar->hw->max_rates = 1;
ar->hw->max_rate_tries = 3;
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 99a6da464bd3..82ab532a4923 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -42,6 +42,7 @@
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/device.h>
#include <net/mac80211.h>
#include "ar9170.h"
#include "cmd.h"
@@ -67,18 +68,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x1001) },
/* TP-Link TL-WN821N v2 */
{ USB_DEVICE(0x0cf3, 0x1002) },
+ /* 3Com Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1010) },
+ /* H3C Dual Band 802.11n USB Adapter */
+ { USB_DEVICE(0x0cf3, 0x1011) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160 A1 */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* D-Link DWA 160 A2 */
{ USB_DEVICE(0x07d1, 0x3a09) },
+ /* Netgear WNA1000 */
+ { USB_DEVICE(0x0846, 0x9040) },
/* Netgear WNDA3100 */
{ USB_DEVICE(0x0846, 0x9010) },
/* Netgear WN111 v2 */
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
+ /* Proxim ORiNOCO 802.11n USB */
+ { USB_DEVICE(0x1435, 0x0804) },
+ /* WNC Generic 11n USB Dongle */
+ { USB_DEVICE(0x1435, 0x0326) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
@@ -99,6 +110,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0409, 0x0249) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
{ USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
+ /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
+ { USB_DEVICE(0x1668, 0x1200) },
/* terminate */
{}
@@ -202,7 +215,7 @@ resubmit:
return;
free:
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
+ usb_free_coherent(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
}
static void ar9170_usb_rx_completed(struct urb *urb)
@@ -283,7 +296,7 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
if (!urb)
goto out;
- ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
+ ibuf = usb_alloc_coherent(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
if (!ibuf)
goto out;
@@ -296,8 +309,8 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
err = usb_submit_urb(urb, GFP_KERNEL);
if (err) {
usb_unanchor_urb(urb);
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer,
- urb->transfer_dma);
+ usb_free_coherent(aru->udev, 64, urb->transfer_buffer,
+ urb->transfer_dma);
}
out:
@@ -727,12 +740,16 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
{
struct device *parent = aru->udev->dev.parent;
+ complete(&aru->firmware_loading_complete);
+
/* unbind anything failed */
if (parent)
- down(&parent->sem);
+ device_lock(parent);
device_release_driver(&aru->udev->dev);
if (parent)
- up(&parent->sem);
+ device_unlock(parent);
+
+ usb_put_dev(aru->udev);
}
static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
@@ -761,6 +778,8 @@ static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
if (err)
goto err_unrx;
+ complete(&aru->firmware_loading_complete);
+ usb_put_dev(aru->udev);
return;
err_unrx:
@@ -858,6 +877,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
init_usb_anchor(&aru->tx_pending);
init_usb_anchor(&aru->tx_submitted);
init_completion(&aru->cmd_wait);
+ init_completion(&aru->firmware_loading_complete);
spin_lock_init(&aru->tx_urb_lock);
aru->tx_pending_urbs = 0;
@@ -877,6 +897,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
if (err)
goto err_freehw;
+ usb_get_dev(aru->udev);
return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw",
&aru->udev->dev, GFP_KERNEL, aru,
ar9170_usb_firmware_step2);
@@ -896,6 +917,9 @@ static void ar9170_usb_disconnect(struct usb_interface *intf)
return;
aru->common.state = AR9170_IDLE;
+
+ wait_for_completion(&aru->firmware_loading_complete);
+
ar9170_unregister(&aru->common);
ar9170_usb_cancel_urbs(aru);
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index a2ce3b169ceb..919b06046eb3 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -71,6 +71,7 @@ struct ar9170_usb {
unsigned int tx_pending_urbs;
struct completion cmd_wait;
+ struct completion firmware_loading_complete;
int readlen;
u8 *readbuf;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 71fc960814f0..d32f2828b098 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -48,6 +48,12 @@ enum ath_device_state {
ATH_HW_INITIALIZED,
};
+enum ath_bus_type {
+ ATH_PCI,
+ ATH_AHB,
+ ATH_USB,
+};
+
struct reg_dmn_pair_mapping {
u16 regDmnEnum;
u16 reg_5ghz_ctl;
@@ -65,17 +71,30 @@ struct ath_regulatory {
struct reg_dmn_pair_mapping *regpair;
};
+/**
+ * struct ath_ops - Register read/write operations
+ *
+ * @read: Register read
+ * @write: Register write
+ * @enable_write_buffer: Enable multiple register writes
+ * @disable_write_buffer: Disable multiple register writes
+ * @write_flush: Flush buffered register writes
+ */
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
- void (*write)(void *, u32 val, u32 reg_offset);
+ void (*write)(void *, u32 val, u32 reg_offset);
+ void (*enable_write_buffer)(void *);
+ void (*disable_write_buffer)(void *);
+ void (*write_flush) (void *);
};
struct ath_common;
struct ath_bus_ops {
- void (*read_cachesize)(struct ath_common *common, int *csz);
- bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
- void (*bt_coex_prep)(struct ath_common *common);
+ enum ath_bus_type ath_bus_type;
+ void (*read_cachesize)(struct ath_common *common, int *csz);
+ bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
+ void (*bt_coex_prep)(struct ath_common *common);
};
struct ath_common {
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 090dc6d268a3..cc09595b781a 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -12,5 +12,6 @@ ath5k-y += attach.o
ath5k-y += base.o
ath5k-y += led.o
ath5k-y += rfkill.o
+ath5k-y += ani.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
new file mode 100644
index 000000000000..f2311ab35504
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath5k.h"
+#include "base.h"
+#include "reg.h"
+#include "debug.h"
+#include "ani.h"
+
+/**
+ * DOC: Basic ANI Operation
+ *
+ * Adaptive Noise Immunity (ANI) controls five noise immunity parameters
+ * depending on the amount of interference in the environment, increasing
+ * or reducing sensitivity as necessary.
+ *
+ * The parameters are:
+ * - "noise immunity"
+ * - "spur immunity"
+ * - "firstep level"
+ * - "OFDM weak signal detection"
+ * - "CCK weak signal detection"
+ *
+ * Basically we look at the amount of ODFM and CCK timing errors we get and then
+ * raise or lower immunity accordingly by setting one or more of these
+ * parameters.
+ * Newer chipsets have PHY error counters in hardware which will generate a MIB
+ * interrupt when they overflow. Older hardware has too enable PHY error frames
+ * by setting a RX flag and then count every single PHY error. When a specified
+ * threshold of errors has been reached we will raise immunity.
+ * Also we regularly check the amount of errors and lower or raise immunity as
+ * necessary.
+ */
+
+
+/*** ANI parameter control ***/
+
+/**
+ * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
+ */
+void
+ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
+{
+ /* TODO:
+ * ANI documents suggest the following five levels to use, but the HAL
+ * and ath9k use only use the last two levels, making this
+ * essentially an on/off option. There *may* be a reason for this (???),
+ * so i stick with the HAL version for now...
+ */
+#if 0
+ const s8 hi[] = { -18, -18, -16, -14, -12 };
+ const s8 lo[] = { -52, -56, -60, -64, -70 };
+ const s8 sz[] = { -34, -41, -48, -55, -62 };
+ const s8 fr[] = { -70, -72, -75, -78, -80 };
+#else
+ const s8 sz[] = { -55, -62 };
+ const s8 lo[] = { -64, -70 };
+ const s8 hi[] = { -14, -12 };
+ const s8 fr[] = { -78, -80 };
+#endif
+ if (level < 0 || level >= ARRAY_SIZE(sz)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
+ AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_LO, lo[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
+ AR5K_PHY_AGCCOARSE_HI, hi[level]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRPWR, fr[level]);
+
+ ah->ah_sc->ani_state.noise_imm_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
+ *
+ * @level: level between 0 and @max_spur_level (the maximum level is dependent
+ * on the chip revision).
+ */
+void
+ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val) ||
+ level > ah->ah_sc->ani_state.max_spur_level) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
+ AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
+
+ ah->ah_sc->ani_state.spur_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_firstep_level() - Set "firstep" level
+ *
+ * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
+ */
+void
+ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
+{
+ const int val[] = { 0, 4, 8 };
+
+ if (level < 0 || level >= ARRAY_SIZE(val)) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "level out of range %d", level);
+ return;
+ }
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
+ AR5K_PHY_SIG_FIRSTEP, val[level]);
+
+ ah->ah_sc->ani_state.firstep_level = level;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+}
+
+
+/**
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
+ * detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int m1l[] = { 127, 50 };
+ const int m2l[] = { 127, 40 };
+ const int m1[] = { 127, 0x4d };
+ const int m2[] = { 127, 0x40 };
+ const int m2cnt[] = { 31, 16 };
+ const int m2lcnt[] = { 63, 48 };
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
+ AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
+
+ if (on)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
+ AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
+
+ ah->ah_sc->ani_state.ofdm_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/**
+ * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
+ *
+ * @on: turn on or off
+ */
+void
+ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
+{
+ const int val[] = { 8, 6 };
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
+ AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
+ ah->ah_sc->ani_state.cck_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ on ? "on" : "off");
+}
+
+
+/*** ANI algorithm ***/
+
+/**
+ * ath5k_ani_raise_immunity() - Increase noise immunity
+ *
+ * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
+ * the algorithm will tune more parameters then.
+ *
+ * Try to raise noise immunity (=decrease sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
+ bool ofdm_trigger)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
+ ofdm_trigger ? "ODFM" : "CCK");
+
+ /* first: raise noise immunity */
+ if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
+ return;
+ }
+
+ /* only OFDM: raise spur immunity level */
+ if (ofdm_trigger &&
+ as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
+ return;
+ }
+
+ /* AP mode */
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+
+ /* STA and IBSS mode */
+
+ /* TODO: for IBSS mode it would be better to keep a beacon RSSI average
+ * per each neighbour node and use the minimum of these, to make sure we
+ * don't shut out a remote node by raising immunity too high. */
+
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI high");
+ /* only OFDM: beacon RSSI is high, we can disable ODFM weak
+ * signal detection */
+ if (ofdm_trigger && as->ofdm_weak_sig == true) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ return;
+ }
+ /* as a last resort or CCK: raise firstep level */
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ }
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI in mid range, we need OFDM weak signal detect,
+ * but can raise firstep level */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI mid");
+ if (ofdm_trigger && as->ofdm_weak_sig == false)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
+ ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
+ return;
+ } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+ /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
+ * detect and zero firstep level to maximize CCK sensitivity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "beacon RSSI low, 2GHz");
+ if (ofdm_trigger && as->ofdm_weak_sig == true)
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ if (as->firstep_level > 0)
+ ath5k_ani_set_firstep_level(ah, 0);
+ return;
+ }
+
+ /* TODO: why not?:
+ if (as->cck_weak_sig == true) {
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+ */
+}
+
+
+/**
+ * ath5k_ani_lower_immunity() - Decrease noise immunity
+ *
+ * Try to lower noise immunity (=increase sensitivity) in several steps
+ * depending on the average RSSI of the beacons we received.
+ */
+static void
+ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int rssi = ah->ah_beacon_rssi_avg.avg;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
+
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ /* AP mode */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* STA and IBSS mode (see TODO above) */
+ if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
+ /* beacon signal is high, leave OFDM weak signal
+ * detection off or it may oscillate
+ * TODO: who said it's off??? */
+ } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
+ /* beacon RSSI is mid-range: turn on ODFM weak signal
+ * detection and next, lower firstep level */
+ if (as->ofdm_weak_sig == false) {
+ ath5k_ani_set_ofdm_weak_signal_detection(ah,
+ true);
+ return;
+ }
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ } else {
+ /* beacon signal is low: only reduce firstep level */
+ if (as->firstep_level > 0) {
+ ath5k_ani_set_firstep_level(ah,
+ as->firstep_level - 1);
+ return;
+ }
+ }
+ }
+
+ /* all modes */
+ if (as->spur_level > 0) {
+ ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
+ return;
+ }
+
+ /* finally, reduce noise immunity */
+ if (as->noise_imm_level > 0) {
+ ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
+ return;
+ }
+}
+
+
+/**
+ * ath5k_hw_ani_get_listen_time() - Calculate time spent listening
+ *
+ * Return an approximation of the time spent "listening" in milliseconds (ms)
+ * since the last call of this function by deducting the cycles spent
+ * transmitting and receiving from the total cycle count.
+ * Save profile count values for debugging/statistics and because we might want
+ * to use them later.
+ *
+ * We assume no one else clears these registers!
+ */
+static int
+ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ int listen;
+
+ /* freeze */
+ ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC);
+ /* read */
+ as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE);
+ as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR);
+ as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX);
+ as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX);
+ /* clear */
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
+ ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
+ /* un-freeze */
+ ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
+
+ /* TODO: where does 44000 come from? (11g clock rate?) */
+ listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
+
+ if (as->pfc_cycles == 0 || listen < 0)
+ return 0;
+ return listen;
+}
+
+
+/**
+ * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ *
+ * Clear the PHY error counters as soon as possible, since this might be called
+ * from a MIB interrupt and we want to make sure we don't get interrupted again.
+ * Add the count of CCK and OFDM errors to our internal state, so it can be used
+ * by the algorithm later.
+ *
+ * Will be called from interrupt and tasklet context.
+ * Returns 0 if both counters are zero.
+ */
+static int
+ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
+ struct ath5k_ani_state *as)
+{
+ unsigned int ofdm_err, cck_err;
+
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return 0;
+
+ ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
+ cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
+
+ /* reset counters first, we might be in a hurry (interrupt) */
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+
+ ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
+ cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
+
+ /* sometimes both can be zero, especially when there is a superfluous
+ * second interrupt. detect that here and return an error. */
+ if (ofdm_err <= 0 && cck_err <= 0)
+ return 0;
+
+ /* avoid negative values should one of the registers overflow */
+ if (ofdm_err > 0) {
+ as->ofdm_errors += ofdm_err;
+ as->sum_ofdm_errors += ofdm_err;
+ }
+ if (cck_err > 0) {
+ as->cck_errors += cck_err;
+ as->sum_cck_errors += cck_err;
+ }
+ return 1;
+}
+
+
+/**
+ * ath5k_ani_period_restart() - Restart ANI period
+ *
+ * Just reset counters, so they are clear for the next "ani period".
+ */
+static void
+ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+{
+ /* keep last values for debugging */
+ as->last_ofdm_errors = as->ofdm_errors;
+ as->last_cck_errors = as->cck_errors;
+ as->last_listen = as->listen_time;
+
+ as->ofdm_errors = 0;
+ as->cck_errors = 0;
+ as->listen_time = 0;
+}
+
+
+/**
+ * ath5k_ani_calibration() - The main ANI calibration function
+ *
+ * We count OFDM and CCK errors relative to the time where we did not send or
+ * receive ("listen" time) and raise or lower immunity accordingly.
+ * This is called regularly (every second) from the calibration timer, but also
+ * when an error threshold has been reached.
+ *
+ * In order to synchronize access from different contexts, this should be
+ * called only indirectly by scheduling the ANI tasklet!
+ */
+void
+ath5k_ani_calibration(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+ int listen, ofdm_high, ofdm_low, cck_high, cck_low;
+
+ if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* get listen time since last call and add it to the counter because we
+ * might not have restarted the "ani period" last time */
+ listen = ath5k_hw_ani_get_listen_time(ah, as);
+ as->listen_time += listen;
+
+ ath5k_ani_save_and_clear_phy_errors(ah, as);
+
+ ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
+ cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
+ ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
+ cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
+
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "listen %d (now %d)", as->listen_time, listen);
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check high ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
+
+ if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
+ /* too many PHY errors - we have to raise immunity */
+ bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
+ ath5k_ani_raise_immunity(ah, as, ofdm_flag);
+ ath5k_ani_period_restart(ah, as);
+
+ } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
+ /* If more than 5 (TODO: why 5?) periods have passed and we got
+ * relatively little errors we can try to lower immunity */
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "check low ofdm %d/%d cck %d/%d",
+ as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
+
+ if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
+ ath5k_ani_lower_immunity(ah, as);
+
+ ath5k_ani_period_restart(ah, as);
+ }
+}
+
+
+/*** INTERRUPT HANDLER ***/
+
+/**
+ * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ *
+ * Just read & reset the registers quickly, so they don't generate more
+ * interrupts, save the counters and schedule the tasklet to decide whether
+ * to raise immunity or not.
+ *
+ * We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
+ * should take care of all "normal" MIB interrupts.
+ */
+void
+ath5k_ani_mib_intr(struct ath5k_hw *ah)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ /* nothing to do here if HW does not have PHY error counters - they
+ * can't be the reason for the MIB interrupt then */
+ if (!ah->ah_capabilities.cap_has_phyerr_counters)
+ return;
+
+ /* not in use but clear anyways */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+
+ if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
+ return;
+
+ /* if one of the errors triggered, we can get a superfluous second
+ * interrupt, even though we have already reset the register. the
+ * function detects that so we can return early */
+ if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
+ return;
+
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
+ as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+}
+
+
+/**
+ * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ *
+ * This is used by hardware without PHY error counters to report PHY errors
+ * on a frame-by-frame basis, instead of the interrupt.
+ */
+void
+ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr)
+{
+ struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+
+ if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
+ as->ofdm_errors++;
+ if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ } else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
+ as->cck_errors++;
+ if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ }
+}
+
+
+/*** INIT ***/
+
+/**
+ * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ *
+ * Enable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
+ AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
+ AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ *
+ * Disable PHY error counters for OFDM and CCK timing errors.
+ */
+static void
+ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
+{
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
+ ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
+
+ /* not in use */
+ ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
+ ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
+}
+
+
+/**
+ * ath5k_ani_init() - Initialize ANI
+ * @mode: Which mode to use (auto, manual high, manual low, off)
+ *
+ * Initialize ANI according to mode.
+ */
+void
+ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
+{
+ /* ANI is only possible on 5212 and newer */
+ if (ah->ah_version < AR5K_AR5212)
+ return;
+
+ /* clear old state information */
+ memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
+
+ /* older hardware has more spur levels than newer */
+ if (ah->ah_mac_srev < AR5K_SREV_AR2414)
+ ah->ah_sc->ani_state.max_spur_level = 7;
+ else
+ ah->ah_sc->ani_state.max_spur_level = 2;
+
+ /* initial values for our ani parameters */
+ if (mode == ATH5K_ANI_MODE_OFF) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual low -> high sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, true);
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ "ANI manual high -> low sensitivity\n");
+ ath5k_ani_set_noise_immunity_level(ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ ath5k_ani_set_spur_immunity_level(ah,
+ ah->ah_sc->ani_state.max_spur_level);
+ ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ } else if (mode == ATH5K_ANI_MODE_AUTO) {
+ ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
+ ath5k_ani_set_noise_immunity_level(ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
+ }
+
+ /* newer hardware has PHY error counter registers which we can use to
+ * get OFDM and CCK error counts. older hardware has to set rxfilter and
+ * report every single PHY error by calling ath5k_ani_phy_error_report()
+ */
+ if (mode == ATH5K_ANI_MODE_AUTO) {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_enable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
+ AR5K_RX_FILTER_PHYERR);
+ } else {
+ if (ah->ah_capabilities.cap_has_phyerr_counters)
+ ath5k_disable_phy_err_counters(ah);
+ else
+ ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
+ ~AR5K_RX_FILTER_PHYERR);
+ }
+
+ ah->ah_sc->ani_state.ani_mode = mode;
+}
+
+
+/*** DEBUG ***/
+
+#ifdef CONFIG_ATH5K_DEBUG
+
+void
+ath5k_ani_print_counters(struct ath5k_hw *ah)
+{
+ /* clears too */
+ printk(KERN_NOTICE "ACK fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
+ printk(KERN_NOTICE "RTS fail\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
+ printk(KERN_NOTICE "RTS success\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_RTS_OK));
+ printk(KERN_NOTICE "FCS error\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
+
+ /* no clear */
+ printk(KERN_NOTICE "tx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
+ printk(KERN_NOTICE "rx\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
+ printk(KERN_NOTICE "busy\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
+ printk(KERN_NOTICE "cycles\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
+
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
+ printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
+ printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
+ printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
+ ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
new file mode 100644
index 000000000000..55cf26d8522c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef ANI_H
+#define ANI_H
+
+/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
+#define ATH5K_ANI_LISTEN_PERIOD 100
+#define ATH5K_ANI_OFDM_TRIG_HIGH 500
+#define ATH5K_ANI_OFDM_TRIG_LOW 200
+#define ATH5K_ANI_CCK_TRIG_HIGH 200
+#define ATH5K_ANI_CCK_TRIG_LOW 100
+
+/* average beacon RSSI thresholds */
+#define ATH5K_ANI_RSSI_THR_HIGH 40
+#define ATH5K_ANI_RSSI_THR_LOW 7
+
+/* maximum availabe levels */
+#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
+#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
+
+
+/**
+ * enum ath5k_ani_mode - mode for ANI / noise sensitivity
+ *
+ * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
+ * algorithm after it has been on auto mode.
+ * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
+ */
+enum ath5k_ani_mode {
+ ATH5K_ANI_MODE_OFF = 0,
+ ATH5K_ANI_MODE_MANUAL_LOW = 1,
+ ATH5K_ANI_MODE_MANUAL_HIGH = 2,
+ ATH5K_ANI_MODE_AUTO = 3
+};
+
+
+/**
+ * struct ath5k_ani_state - ANI state and associated counters
+ *
+ * @max_spur_level: the maximum spur level is chip dependent
+ */
+struct ath5k_ani_state {
+ enum ath5k_ani_mode ani_mode;
+
+ /* state */
+ int noise_imm_level;
+ int spur_level;
+ int firstep_level;
+ bool ofdm_weak_sig;
+ bool cck_weak_sig;
+
+ int max_spur_level;
+
+ /* used by the algorithm */
+ unsigned int listen_time;
+ unsigned int ofdm_errors;
+ unsigned int cck_errors;
+
+ /* debug/statistics only: numbers from last ANI calibration */
+ unsigned int pfc_tx;
+ unsigned int pfc_rx;
+ unsigned int pfc_busy;
+ unsigned int pfc_cycles;
+ unsigned int last_listen;
+ unsigned int last_ofdm_errors;
+ unsigned int last_cck_errors;
+ unsigned int sum_ofdm_errors;
+ unsigned int sum_cck_errors;
+};
+
+void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
+void ath5k_ani_mib_intr(struct ath5k_hw *ah);
+void ath5k_ani_calibration(struct ath5k_hw *ah);
+void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
+ enum ath5k_phy_error_code phyerr);
+
+/* for manual control */
+void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
+void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
+void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
+
+void ath5k_ani_print_counters(struct ath5k_hw *ah);
+
+#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ac67f02e26d8..2785946f659a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -202,7 +202,8 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define AR5K_TUNE_HWTXTRIES 4
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -614,28 +615,6 @@ struct ath5k_rx_status {
#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/
#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/
-#if 0
-/**
- * struct ath5k_beacon_state - Per-station beacon timer state.
- * @bs_interval: in TU's, can also include the above flags
- * @bs_cfp_max_duration: if non-zero hw is setup to coexist with a
- * Point Coordination Function capable AP
- */
-struct ath5k_beacon_state {
- u32 bs_next_beacon;
- u32 bs_next_dtim;
- u32 bs_interval;
- u8 bs_dtim_period;
- u8 bs_cfp_period;
- u16 bs_cfp_max_duration;
- u16 bs_cfp_du_remain;
- u16 bs_tim_offset;
- u16 bs_sleep_duration;
- u16 bs_bmiss_threshold;
- u32 bs_cfp_next;
-};
-#endif
-
/*
* TSF to TU conversion:
@@ -822,9 +801,9 @@ struct ath5k_athchan_2ghz {
* @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
* We currently do increments on interrupt by
* (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
- * @AR5K_INT_MIB: Indicates the Management Information Base counters should be
- * checked. We should do this with ath5k_hw_update_mib_counters() but
- * it seems we should also then do some noise immunity work.
+ * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
+ * one of the PHY error counters reached the maximum value and should be
+ * read and cleared.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
@@ -912,10 +891,11 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* Software interrupts used for calibration */
-enum ath5k_software_interrupt {
- AR5K_SWI_FULL_CALIBRATION = 0x01,
- AR5K_SWI_SHORT_CALIBRATION = 0x02,
+/* mask which calibration is active at the moment */
+enum ath5k_calibration_mask {
+ AR5K_CALIBRATION_FULL = 0x01,
+ AR5K_CALIBRATION_SHORT = 0x02,
+ AR5K_CALIBRATION_ANI = 0x04,
};
/*
@@ -1004,6 +984,8 @@ struct ath5k_capabilities {
struct {
u8 q_tx_num;
} cap_queues;
+
+ bool cap_has_phyerr_counters;
};
/* size of noise floor history (keep it a power of two) */
@@ -1014,6 +996,15 @@ struct ath5k_nfcal_hist
s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
};
+/**
+ * struct avg_val - Helper structure for average calculation
+ * @avg: contains the actual average value
+ * @avg_weight: is used internally during calculation to prevent rounding errors
+ */
+struct ath5k_avg_val {
+ int avg;
+ int avg_weight;
+};
/***************************************\
HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1028,7 +1019,6 @@ struct ath5k_nfcal_hist
/* TODO: Clean up and merge with ath5k_softc */
struct ath5k_hw {
- u32 ah_magic;
struct ath_common common;
struct ath5k_softc *ah_sc;
@@ -1036,7 +1026,6 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
- enum nl80211_iftype ah_op_mode;
struct ieee80211_channel *ah_current_channel;
bool ah_turbo;
bool ah_calibration;
@@ -1049,7 +1038,6 @@ struct ath5k_hw {
u32 ah_phy;
u32 ah_mac_srev;
u16 ah_mac_version;
- u16 ah_mac_revision;
u16 ah_phy_revision;
u16 ah_radio_5ghz_revision;
u16 ah_radio_2ghz_revision;
@@ -1071,8 +1059,6 @@ struct ath5k_hw {
u8 ah_def_ant;
bool ah_software_retry;
- int ah_gpio_npins;
-
struct ath5k_capabilities ah_capabilities;
struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
@@ -1123,17 +1109,18 @@ struct ath5k_hw {
struct ath5k_nfcal_hist ah_nfcal_hist;
+ /* average beacon RSSI in our BSS (used by ANI) */
+ struct ath5k_avg_val ah_beacon_rssi_avg;
+
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
/* Calibration timestamp */
- unsigned long ah_cal_tstamp;
-
- /* Calibration interval (secs) */
- u8 ah_cal_intval;
+ unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_ani;
- /* Software interrupt mask */
- u8 ah_swi_mask;
+ /* Calibration mask */
+ u8 ah_cal_mask;
/*
* Function pointers
@@ -1141,9 +1128,9 @@ struct ath5k_hw {
int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
u32 size, unsigned int flags);
int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
- unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
+ unsigned int, unsigned int, int, enum ath5k_pkt_type,
unsigned int, unsigned int, unsigned int, unsigned int,
- unsigned int, unsigned int, unsigned int);
+ unsigned int, unsigned int, unsigned int, unsigned int);
int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
unsigned int, unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int);
@@ -1158,158 +1145,145 @@ struct ath5k_hw {
*/
/* Attach/Detach Functions */
-extern int ath5k_hw_attach(struct ath5k_softc *sc);
-extern void ath5k_hw_detach(struct ath5k_hw *ah);
+int ath5k_hw_attach(struct ath5k_softc *sc);
+void ath5k_hw_detach(struct ath5k_hw *ah);
/* LED functions */
-extern int ath5k_init_leds(struct ath5k_softc *sc);
-extern void ath5k_led_enable(struct ath5k_softc *sc);
-extern void ath5k_led_off(struct ath5k_softc *sc);
-extern void ath5k_unregister_leds(struct ath5k_softc *sc);
+int ath5k_init_leds(struct ath5k_softc *sc);
+void ath5k_led_enable(struct ath5k_softc *sc);
+void ath5k_led_off(struct ath5k_softc *sc);
+void ath5k_unregister_leds(struct ath5k_softc *sc);
/* Reset Functions */
-extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
-extern int ath5k_hw_on_hold(struct ath5k_hw *ah);
-extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
+int ath5k_hw_on_hold(struct ath5k_hw *ah);
+int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ struct ieee80211_channel *channel, bool change_channel);
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set);
/* Power management functions */
-extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
/* DMA Related Functions */
-extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
-extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
-extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
-extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
+void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
+int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
+u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
+void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
+int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
u32 phys_addr);
-extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
+int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
/* Interrupt handling */
-extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
-extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
-extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum
-ath5k_int new_mask);
-extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
+bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
+int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
+enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
/* EEPROM access functions */
-extern int ath5k_eeprom_init(struct ath5k_hw *ah);
-extern void ath5k_eeprom_detach(struct ath5k_hw *ah);
-extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
-extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
+int ath5k_eeprom_init(struct ath5k_hw *ah);
+void ath5k_eeprom_detach(struct ath5k_hw *ah);
+int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
/* Protocol Control Unit Functions */
-extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
-extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
+extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
-extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
-extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
-extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
+int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
+void ath5k_hw_set_associd(struct ath5k_hw *ah);
+void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
/* Receive start/stop functions */
-extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
-extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
/* RX Filter functions */
-extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
-extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
-extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
-extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
+void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
+u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
+void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
/* Beacon control functions */
-extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
-extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
-extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
-extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
-#if 0
-extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state);
-extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah);
-extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr);
-#endif
+u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
+void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
+void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
+void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
/* ACK bit rate */
void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
-/* ACK/CTS Timeouts */
-extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
-extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
-extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
/* Clock rate related functions */
unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
-extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
-extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac);
-extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
+int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
+int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
+ const struct ieee80211_key_conf *key, const u8 *mac);
+int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
/* Queue Control Unit, DFS Control Unit Functions */
-extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
- const struct ath5k_txq_info *queue_info);
-extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
- enum ath5k_tx_queue queue_type,
- struct ath5k_txq_info *queue_info);
-extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
-extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah);
-extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
+int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+ struct ath5k_txq_info *queue_info);
+int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+ const struct ath5k_txq_info *queue_info);
+int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
+ enum ath5k_tx_queue queue_type,
+ struct ath5k_txq_info *queue_info);
+u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
+void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
/* Hardware Descriptor Functions */
-extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
+int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
/* GPIO Functions */
-extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
-extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
-extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
-extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
-extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
+void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
+int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
+u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
+int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
+void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+ u32 interrupt_level);
/* rfkill Functions */
-extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
-extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
+void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
/* Misc functions */
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
-extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
-extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
-extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
+int ath5k_hw_get_capability(struct ath5k_hw *ah,
+ enum ath5k_capability_type cap_type, u32 capability,
+ u32 *result);
+int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
+int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
/* Initial register settings functions */
-extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
+int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
/* Initialize RF */
-extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel,
- unsigned int mode);
-extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
-extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
-extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
+int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode);
+int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
+enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
+int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
/* PHY/RF channel functions */
-extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
-extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
+bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
+int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
/* PHY calibration */
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
-extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
-extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
-extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
-extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
+int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
/* Spur mitigation */
bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
+ struct ieee80211_channel *channel);
/* Misc PHY functions */
-extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
-extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
+int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Antenna control */
-extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
-extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant);
-extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
+void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
/* TX power setup */
-extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower);
-extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
+int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 ee_mode, u8 txpower);
+int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
/*
* Functions used internaly
@@ -1335,29 +1309,6 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
iowrite32(val, ah->ah_iobase + reg);
}
-#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
-/*
- * Check if a register write has been completed
- */
-static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
- u32 val, bool is_set)
-{
- int i;
- u32 data;
-
- for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
- data = ath5k_hw_reg_read(ah, reg);
- if (is_set && (data & flag))
- break;
- else if ((data & flag) == val)
- break;
- udelay(15);
- }
-
- return (i <= 0) ? -EAGAIN : 0;
-}
-#endif
-
static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
{
u32 retval = 0, bit, i;
@@ -1370,9 +1321,27 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
return retval;
}
-static inline int ath5k_pad_size(int hdrlen)
+#define AVG_SAMPLES 8
+#define AVG_FACTOR 1000
+
+/**
+ * ath5k_moving_average - Exponentially weighted moving average
+ * @avg: average structure
+ * @val: current value
+ *
+ * This implementation make use of a struct ath5k_avg_val to prevent rounding
+ * errors.
+ */
+static inline struct ath5k_avg_val
+ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
{
- return (hdrlen < 24) ? 0 : hdrlen & 3;
+ struct ath5k_avg_val new;
+ new.avg_weight = avg.avg_weight ?
+ (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
+ (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
+ (val * (AVG_FACTOR));
+ new.avg = new.avg_weight / (AVG_FACTOR);
+ return new;
}
#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index dc0786cc2639..e0c244b02f05 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -114,7 +114,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/*
* HW information
*/
- ah->ah_op_mode = NL80211_IFTYPE_STATION;
ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
ah->ah_turbo = false;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
@@ -124,6 +123,9 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ah->ah_cw_min = AR5K_TUNE_CWMIN;
ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
ah->ah_software_retry = false;
+ ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
+ ah->ah_noise_floor = -95; /* until first NF calibration is run */
+ sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
/*
* Find the mac version
@@ -149,7 +151,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Get MAC, PHY and RADIO revisions */
ah->ah_mac_srev = srev;
ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
- ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -328,7 +329,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
ath5k_hw_set_associd(ah);
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
ath5k_hw_rfgain_opt_init(ah);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3abbe7513ab5..cc6d41dec332 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -59,8 +59,8 @@
#include "base.h"
#include "reg.h"
#include "debug.h"
+#include "ani.h"
-static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -199,7 +199,7 @@ static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
static int ath5k_pci_suspend(struct device *dev);
static int ath5k_pci_resume(struct device *dev);
-SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
+static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
@@ -231,7 +231,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mc_list);
+ struct netdev_hw_addr_list *mc_list);
static void ath5k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -242,6 +242,8 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
+static int ath5k_get_survey(struct ieee80211_hw *hw,
+ int idx, struct survey_info *survey);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -267,6 +269,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.configure_filter = ath5k_configure_filter,
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
+ .get_survey = ath5k_get_survey,
.conf_tx = NULL,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
@@ -308,7 +311,7 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf);
static int ath5k_txbuf_setup(struct ath5k_softc *sc,
struct ath5k_buf *bf,
- struct ath5k_txq *txq);
+ struct ath5k_txq *txq, int padsize);
static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
struct ath5k_buf *bf)
{
@@ -365,6 +368,7 @@ static void ath5k_beacon_send(struct ath5k_softc *sc);
static void ath5k_beacon_config(struct ath5k_softc *sc);
static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
static void ath5k_tasklet_beacon(unsigned long data);
+static void ath5k_tasklet_ani(unsigned long data);
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
@@ -544,8 +548,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
SET_IEEE80211_DEV(hw, &pdev->dev);
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
+ IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
@@ -830,6 +833,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
+ tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
ret = ath5k_eeprom_read_mac(ah, mac);
if (ret) {
@@ -1138,8 +1142,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
struct ath5k_hw *ah = sc->ah;
u32 rfilt;
- ah->ah_op_mode = sc->opmode;
-
/* configure rx filter */
rfilt = sc->filter_flags;
ath5k_hw_set_rx_filter(ah, rfilt);
@@ -1148,8 +1150,9 @@ ath5k_mode_setup(struct ath5k_softc *sc)
ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
/* configure operational mode */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, sc->opmode);
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
@@ -1211,6 +1214,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct ath5k_hw *ah = sc->ah;
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
+ int ret;
if (!skb) {
skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
@@ -1237,9 +1241,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ds = bf->desc;
ds->ds_link = bf->daddr; /* link to self */
ds->ds_data = bf->skbaddr;
- ah->ah_setup_rx_desc(ah, ds,
- skb_tailroom(skb), /* buffer size */
- 0);
+ ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
+ if (ret)
+ return ret;
if (sc->rxlink != NULL)
*sc->rxlink = bf->daddr;
@@ -1272,7 +1276,7 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
- struct ath5k_txq *txq)
+ struct ath5k_txq *txq, int padsize)
{
struct ath5k_hw *ah = sc->ah;
struct ath5k_desc *ds = bf->desc;
@@ -1324,7 +1328,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
@@ -1636,7 +1640,6 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
sc->txqs[i].link);
}
}
- ieee80211_wake_queues(sc->hw); /* XXX move to callers */
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
if (sc->txqs[i].setup)
@@ -1807,6 +1810,86 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
}
static void
+ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ /* only beacons from our BSSID */
+ if (!ieee80211_is_beacon(mgmt->frame_control) ||
+ memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
+ return;
+
+ ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
+ rssi);
+
+ /* in IBSS mode we should keep RSSI statistics per neighbour */
+ /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
+}
+
+/*
+ * Compute padding position. skb must contains an IEEE 802.11 frame
+ */
+static int ath5k_common_padpos(struct sk_buff *skb)
+{
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 frame_control = hdr->frame_control;
+ int padpos = 24;
+
+ if (ieee80211_has_a4(frame_control)) {
+ padpos += ETH_ALEN;
+ }
+ if (ieee80211_is_data_qos(frame_control)) {
+ padpos += IEEE80211_QOS_CTL_LEN;
+ }
+
+ return padpos;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes added, or -1 if we don't have enought header room.
+ */
+
+static int ath5k_add_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>padpos) {
+
+ if (skb_headroom(skb) < padsize)
+ return -1;
+
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data+padsize, padpos);
+ return padsize;
+ }
+
+ return 0;
+}
+
+/*
+ * This function expects a 802.11 frame and returns the number of
+ * bytes removed
+ */
+
+static int ath5k_remove_padding(struct sk_buff *skb)
+{
+ int padpos = ath5k_common_padpos(skb);
+ int padsize = padpos & 3;
+
+ if (padsize && skb->len>=padpos+padsize) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ return padsize;
+ }
+
+ return 0;
+}
+
+static void
ath5k_tasklet_rx(unsigned long data)
{
struct ieee80211_rx_status *rxs;
@@ -1819,8 +1902,6 @@ ath5k_tasklet_rx(unsigned long data)
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
- int hdrlen;
- int padsize;
int rx_flag;
spin_lock(&sc->rxbuflock);
@@ -1845,18 +1926,24 @@ ath5k_tasklet_rx(unsigned long data)
break;
else if (unlikely(ret)) {
ATH5K_ERR(sc, "error in processing rx descriptor\n");
+ sc->stats.rxerr_proc++;
spin_unlock(&sc->rxbuflock);
return;
}
- if (unlikely(rs.rs_more)) {
- ATH5K_WARN(sc, "unsupported jumbo\n");
- goto next;
- }
+ sc->stats.rx_all_count++;
if (unlikely(rs.rs_status)) {
- if (rs.rs_status & AR5K_RXERR_PHY)
+ if (rs.rs_status & AR5K_RXERR_CRC)
+ sc->stats.rxerr_crc++;
+ if (rs.rs_status & AR5K_RXERR_FIFO)
+ sc->stats.rxerr_fifo++;
+ if (rs.rs_status & AR5K_RXERR_PHY) {
+ sc->stats.rxerr_phy++;
+ if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
+ sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
goto next;
+ }
if (rs.rs_status & AR5K_RXERR_DECRYPT) {
/*
* Decrypt error. If the error occurred
@@ -1868,12 +1955,14 @@ ath5k_tasklet_rx(unsigned long data)
*
* XXX do key cache faulting
*/
+ sc->stats.rxerr_decrypt++;
if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
!(rs.rs_status & AR5K_RXERR_CRC))
goto accept;
}
if (rs.rs_status & AR5K_RXERR_MIC) {
rx_flag |= RX_FLAG_MMIC_ERROR;
+ sc->stats.rxerr_mic++;
goto accept;
}
@@ -1883,6 +1972,12 @@ ath5k_tasklet_rx(unsigned long data)
sc->opmode != NL80211_IFTYPE_MONITOR)
goto next;
}
+
+ if (unlikely(rs.rs_more)) {
+ sc->stats.rxerr_jumbo++;
+ goto next;
+
+ }
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
@@ -1905,12 +2000,8 @@ accept:
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
+ ath5k_remove_padding(skb);
+
rxs = IEEE80211_SKB_RXCB(skb);
/*
@@ -1939,10 +2030,15 @@ accept:
rxs->freq = sc->curchan->center_freq;
rxs->band = sc->curband->band;
- rxs->noise = sc->ah->ah_noise_floor;
- rxs->signal = rxs->noise + rs.rs_rssi;
+ rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
rxs->antenna = rs.rs_antenna;
+
+ if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
+ sc->stats.antenna_rx[rs.rs_antenna]++;
+ else
+ sc->stats.antenna_rx[0]++; /* invalid */
+
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -1952,6 +2048,8 @@ accept:
ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
+
/* check beacons in IBSS mode */
if (sc->opmode == NL80211_IFTYPE_ADHOC)
ath5k_check_ibss_tsf(sc, skb, rxs);
@@ -1988,6 +2086,17 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ds = bf->desc;
+ /*
+ * It's possible that the hardware can say the buffer is
+ * completed when it hasn't yet loaded the ds_link from
+ * host memory and moved on. If there are more TX
+ * descriptors in the queue, wait for TXDP to change
+ * before processing this one.
+ */
+ if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
+ !list_is_last(&bf->list, &txq->q))
+ break;
+
ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
@@ -1997,6 +2106,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
break;
}
+ sc->stats.tx_all_count++;
skb = bf->skb;
info = IEEE80211_SKB_CB(skb);
bf->skb = NULL;
@@ -2022,14 +2132,31 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
info->status.rates[ts.ts_final_idx].count++;
if (unlikely(ts.ts_status)) {
- sc->ll_stats.dot11ACKFailureCount++;
- if (ts.ts_status & AR5K_TXERR_FILT)
+ sc->stats.ack_fail++;
+ if (ts.ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ sc->stats.txerr_filt++;
+ }
+ if (ts.ts_status & AR5K_TXERR_XRETRY)
+ sc->stats.txerr_retry++;
+ if (ts.ts_status & AR5K_TXERR_FIFO)
+ sc->stats.txerr_fifo++;
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts.ts_rssi;
}
+ /*
+ * Remove MAC header padding before giving the frame
+ * back to mac80211.
+ */
+ ath5k_remove_padding(skb);
+
+ if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
+ sc->stats.antenna_tx[ts.ts_antenna]++;
+ else
+ sc->stats.antenna_tx[0]++; /* invalid */
+
ieee80211_tx_status(sc->hw, skb);
spin_lock(&sc->txbuflock);
@@ -2073,6 +2200,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
int ret = 0;
u8 antenna;
u32 flags;
+ const int padsize = 0;
bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -2120,7 +2248,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* from tx power (value is in dB units already) */
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
- ieee80211_get_hdrlen_from_skb(skb),
+ ieee80211_get_hdrlen_from_skb(skb), padsize,
AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
@@ -2407,9 +2535,6 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
- /* Set PHY calibration interval */
- ah->ah_cal_intval = ath5k_calinterval;
-
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@@ -2421,7 +2546,8 @@ ath5k_init(struct ath5k_softc *sc)
sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI;
+ AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+
ret = ath5k_reset(sc, NULL);
if (ret)
goto done;
@@ -2435,8 +2561,7 @@ ath5k_init(struct ath5k_softc *sc)
for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
ath5k_hw_reset_key(ah, i);
- /* Set ack to be sent at low bit-rates */
- ath5k_hw_set_ack_bitrate_high(ah, false);
+ ath5k_hw_set_ack_bitrate_high(ah, true);
ret = 0;
done:
mmiowb();
@@ -2533,12 +2658,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
tasklet_kill(&sc->restq);
tasklet_kill(&sc->calib);
tasklet_kill(&sc->beacontq);
+ tasklet_kill(&sc->ani_tasklet);
ath5k_rfkill_hw_stop(sc->ah);
return ret;
}
+static void
+ath5k_intr_calibration_poll(struct ath5k_hw *ah)
+{
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
+ /* run ANI only when full calibration is not active */
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ tasklet_schedule(&ah->ah_sc->ani_tasklet);
+
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ tasklet_schedule(&ah->ah_sc->calib);
+ }
+ /* we could use SWI to generate enough interrupts to meet our
+ * calibration interval requirements, if necessary:
+ * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
+}
+
static irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
@@ -2562,7 +2708,20 @@ ath5k_intr(int irq, void *dev_id)
*/
tasklet_schedule(&sc->restq);
} else if (unlikely(status & AR5K_INT_RXORN)) {
- tasklet_schedule(&sc->restq);
+ /*
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ * Older chipsets need a reset to come out of this
+ * condition, but we treat it as RX for newer chips.
+ * We don't know exactly which versions need a reset -
+ * this guess is copied from the HAL.
+ */
+ sc->stats.rxorn_intr++;
+ if (ah->ah_mac_srev < AR5K_SREV_AR5212)
+ tasklet_schedule(&sc->restq);
+ else
+ tasklet_schedule(&sc->rxtq);
} else {
if (status & AR5K_INT_SWBA) {
tasklet_hi_schedule(&sc->beacontq);
@@ -2587,15 +2746,10 @@ ath5k_intr(int irq, void *dev_id)
if (status & AR5K_INT_BMISS) {
/* TODO */
}
- if (status & AR5K_INT_SWI) {
- tasklet_schedule(&sc->calib);
- }
if (status & AR5K_INT_MIB) {
- /*
- * These stats are also used for ANI i think
- * so how about updating them more often ?
- */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ sc->stats.mib_intr++;
+ ath5k_hw_update_mib_counters(ah);
+ ath5k_ani_mib_intr(ah);
}
if (status & AR5K_INT_GPIO)
tasklet_schedule(&sc->rf_kill.toggleq);
@@ -2606,7 +2760,7 @@ ath5k_intr(int irq, void *dev_id)
if (unlikely(!counter))
ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
- ath5k_hw_calibration_poll(ah);
+ ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
}
@@ -2630,8 +2784,7 @@ ath5k_tasklet_calibrate(unsigned long data)
struct ath5k_hw *ah = sc->ah;
/* Only full calibration for now */
- if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION)
- return;
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
/* Stop queues so that calibration
* doesn't interfere with tx */
@@ -2647,18 +2800,29 @@ ath5k_tasklet_calibrate(unsigned long data)
* to load new gain values.
*/
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ath5k_reset_wake(sc);
+ ath5k_reset(sc, sc->curchan);
}
if (ath5k_hw_phy_calibrate(ah, sc->curchan))
ATH5K_ERR(sc, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
sc->curchan->center_freq));
- ah->ah_swi_mask = 0;
-
/* Wake queues */
ieee80211_wake_queues(sc->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+}
+
+
+static void
+ath5k_tasklet_ani(unsigned long data)
+{
+ struct ath5k_softc *sc = (void *)data;
+ struct ath5k_hw *ah = sc->ah;
+
+ ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
+ ath5k_ani_calibration(ah);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
}
@@ -2680,7 +2844,6 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_softc *sc = hw->priv;
struct ath5k_buf *bf;
unsigned long flags;
- int hdrlen;
int padsize;
ath5k_debug_dump_skb(sc, skb, "TX ", 1);
@@ -2692,17 +2855,11 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
* the hardware expects the header padded to 4 byte boundaries
* if this is not the case we add the padding after the header
*/
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = ath5k_pad_size(hdrlen);
- if (padsize) {
-
- if (skb_headroom(skb) < padsize) {
- ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
- " headroom to pad %d\n", hdrlen, padsize);
- goto drop_packet;
- }
- skb_push(skb, padsize);
- memmove(skb->data, skb->data+padsize, hdrlen);
+ padsize = ath5k_add_padding(skb);
+ if (padsize < 0) {
+ ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
+ " headroom to pad");
+ goto drop_packet;
}
spin_lock_irqsave(&sc->txbuflock, flags);
@@ -2721,7 +2878,7 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
bf->skb = skb;
- if (ath5k_txbuf_setup(sc, bf, txq)) {
+ if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
bf->skb = NULL;
spin_lock_irqsave(&sc->txbuflock, flags);
list_add_tail(&bf->list, &sc->txbuf);
@@ -2768,6 +2925,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
goto err;
}
+ ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
+
/*
* Change channels and update the h/w rate map if we're switching;
* e.g. 11a to 11b/g.
@@ -2836,6 +2995,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode);
+
ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
@@ -2906,7 +3067,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
* then we must allow the user to set how many tx antennas we
* have available
*/
- ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
unlock:
mutex_unlock(&sc->lock);
@@ -2914,22 +3075,20 @@ unlock:
}
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- int mc_count, struct dev_addr_list *mclist)
+ struct netdev_hw_addr_list *mc_list)
{
u32 mfilt[2], val;
- int i;
u8 pos;
+ struct netdev_hw_addr *ha;
mfilt[0] = 0;
mfilt[1] = 1;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
- val = get_unaligned_le32(mclist->dmi_addr + 0);
+ val = get_unaligned_le32(ha->addr + 0);
pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- val = get_unaligned_le32(mclist->dmi_addr + 3);
+ val = get_unaligned_le32(ha->addr + 3);
pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
pos &= 0x3f;
mfilt[pos / 32] |= (1 << (pos % 32));
@@ -2937,8 +3096,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
* but not sure, needs testing, if we do use this we'd
* neet to inform below to not reset the mcast */
/* ath5k_hw_set_mcast_filterindex(ah,
- * mclist->dmi_addr[5]); */
- mclist = mclist->next;
+ * ha->addr[5]); */
}
return ((u64)(mfilt[1]) << 32) | mfilt[0];
@@ -3124,12 +3282,30 @@ ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
/* Force update */
- ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
+ ath5k_hw_update_mib_counters(sc->ah);
+
+ stats->dot11ACKFailureCount = sc->stats.ack_fail;
+ stats->dot11RTSFailureCount = sc->stats.rts_fail;
+ stats->dot11RTSSuccessCount = sc->stats.rts_ok;
+ stats->dot11FCSErrorCount = sc->stats.fcs_error;
+
+ return 0;
+}
+
+static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (idx != 0)
+ return -ENOENT;
- memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats));
+ survey->channel = conf->channel;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = sc->ah->ah_noise_floor;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 7e1a88a5abdb..56221bc7c8cd 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -50,6 +50,7 @@
#include "ath5k.h"
#include "debug.h"
+#include "ani.h"
#include "../regd.h"
#include "../ath.h"
@@ -105,6 +106,38 @@ struct ath5k_rfkill {
struct tasklet_struct toggleq;
};
+/* statistics */
+struct ath5k_statistics {
+ /* antenna use */
+ unsigned int antenna_rx[5]; /* frames count per antenna RX */
+ unsigned int antenna_tx[5]; /* frames count per antenna TX */
+
+ /* frame errors */
+ unsigned int rx_all_count; /* all RX frames, including errors */
+ unsigned int tx_all_count; /* all TX frames, including errors */
+ unsigned int rxerr_crc;
+ unsigned int rxerr_phy;
+ unsigned int rxerr_phy_code[32];
+ unsigned int rxerr_fifo;
+ unsigned int rxerr_decrypt;
+ unsigned int rxerr_mic;
+ unsigned int rxerr_proc;
+ unsigned int rxerr_jumbo;
+ unsigned int txerr_retry;
+ unsigned int txerr_fifo;
+ unsigned int txerr_filt;
+
+ /* MIB counters */
+ unsigned int ack_fail;
+ unsigned int rts_fail;
+ unsigned int rts_ok;
+ unsigned int fcs_error;
+ unsigned int beacons;
+
+ unsigned int mib_intr;
+ unsigned int rxorn_intr;
+};
+
#if CHAN_DEBUG
#define ATH_CHAN_MAX (26+26+26+200+200)
#else
@@ -117,7 +150,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
@@ -191,6 +223,11 @@ struct ath5k_softc {
int power_level; /* Requested tx power in dbm */
bool assoc; /* associate state */
bool enable_beacon; /* true if beacons are on */
+
+ struct ath5k_statistics stats;
+
+ struct ath5k_ani_state ani_state;
+ struct tasklet_struct ani_tasklet; /* ANI calibration */
};
#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 367a6c7d3cc7..74f007126f41 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -102,9 +102,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
}
}
- /* GPIO */
- ah->ah_gpio_npins = AR5K_NUM_GPIO;
-
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
ah->ah_capabilities.cap_queues.q_tx_num =
@@ -112,6 +109,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+ /* newer hardware has PHY error counters */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
+ ah->ah_capabilities.cap_has_phyerr_counters = true;
+ else
+ ah->ah_capabilities.cap_has_phyerr_counters = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 747508c15d34..6fb5c5ffa5b1 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -69,6 +69,7 @@ module_param_named(debug, ath5k_debug, uint, 0);
#include <linux/seq_file.h>
#include "reg.h"
+#include "ani.h"
static struct dentry *ath5k_global_debugfs;
@@ -307,6 +308,7 @@ static const struct {
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
+ { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
@@ -364,6 +366,369 @@ static const struct file_operations fops_debug = {
};
+/* debugfs: antenna */
+
+static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[700];
+ unsigned int len = 0;
+ unsigned int i;
+ unsigned int v;
+
+ len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
+ sc->ah->ah_ant_mode);
+ len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
+ sc->ah->ah_def_ant);
+ len += snprintf(buf+len, sizeof(buf)-len, "tx antenna\t%d\n",
+ sc->ah->ah_tx_ant);
+
+ len += snprintf(buf+len, sizeof(buf)-len, "\nANTENNA\t\tRX\tTX\n");
+ for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "[antenna %d]\t%d\t%d\n",
+ i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
+ }
+ len += snprintf(buf+len, sizeof(buf)-len, "[invalid]\t%d\t%d\n",
+ sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
+ (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
+ (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
+ (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
+ (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
+
+ v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
+ (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_antenna(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ unsigned int i;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "diversity", 9) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
+ printk(KERN_INFO "ath5k debug: enable diversity\n");
+ } else if (strncmp(buf, "fixed-a", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
+ printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
+ } else if (strncmp(buf, "fixed-b", 7) == 0) {
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
+ printk(KERN_INFO "ath5k debug: fixed antenna B\n");
+ } else if (strncmp(buf, "clear", 5) == 0) {
+ for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
+ sc->stats.antenna_rx[i] = 0;
+ sc->stats.antenna_tx[i] = 0;
+ }
+ printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_antenna = {
+ .read = read_file_antenna,
+ .write = write_file_antenna,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: frameerrors */
+
+static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[700];
+ unsigned int len = 0;
+ int i;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "RX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
+ st->rxerr_crc,
+ st->rx_all_count > 0 ?
+ st->rxerr_crc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
+ st->rxerr_phy,
+ st->rx_all_count > 0 ?
+ st->rxerr_phy*100/st->rx_all_count : 0);
+ for (i = 0; i < 32; i++) {
+ if (st->rxerr_phy_code[i])
+ len += snprintf(buf+len, sizeof(buf)-len,
+ " phy_err[%d]\t%d\n",
+ i, st->rxerr_phy_code[i]);
+ }
+
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->rxerr_fifo,
+ st->rx_all_count > 0 ?
+ st->rxerr_fifo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
+ st->rxerr_decrypt,
+ st->rx_all_count > 0 ?
+ st->rxerr_decrypt*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
+ st->rxerr_mic,
+ st->rx_all_count > 0 ?
+ st->rxerr_mic*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
+ st->rxerr_proc,
+ st->rx_all_count > 0 ?
+ st->rxerr_proc*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
+ st->rxerr_jumbo,
+ st->rx_all_count > 0 ?
+ st->rxerr_jumbo*100/st->rx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
+ st->rx_all_count);
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nTX\n---------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
+ st->txerr_retry,
+ st->tx_all_count > 0 ?
+ st->txerr_retry*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ st->txerr_fifo,
+ st->tx_all_count > 0 ?
+ st->txerr_fifo*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
+ st->txerr_filt,
+ st->tx_all_count > 0 ?
+ st->txerr_filt*100/st->tx_all_count : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
+ st->tx_all_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_frameerrors(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "clear", 5) == 0) {
+ st->rxerr_crc = 0;
+ st->rxerr_phy = 0;
+ st->rxerr_fifo = 0;
+ st->rxerr_decrypt = 0;
+ st->rxerr_mic = 0;
+ st->rxerr_proc = 0;
+ st->rxerr_jumbo = 0;
+ st->rx_all_count = 0;
+ st->txerr_retry = 0;
+ st->txerr_fifo = 0;
+ st->txerr_filt = 0;
+ st->tx_all_count = 0;
+ printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
+ }
+ return count;
+}
+
+static const struct file_operations fops_frameerrors = {
+ .read = read_file_frameerrors,
+ .write = write_file_frameerrors,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
+/* debugfs: ani */
+
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ struct ath5k_statistics *st = &sc->stats;
+ struct ath5k_ani_state *as = &sc->ani_state;
+
+ char buf[700];
+ unsigned int len = 0;
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW has PHY error counters:\t%s\n",
+ sc->ah->ah_capabilities.cap_has_phyerr_counters ?
+ "yes" : "no");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "HW max spur immunity level:\t%d\n",
+ as->max_spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nANI state\n--------------------------------------------\n");
+ len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
+ switch (as->ani_mode) {
+ case ATH5K_ANI_MODE_OFF:
+ len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_LOW:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL LOW\n");
+ break;
+ case ATH5K_ANI_MODE_MANUAL_HIGH:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "MANUAL HIGH\n");
+ break;
+ case ATH5K_ANI_MODE_AUTO:
+ len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
+ break;
+ default:
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "??? (not good)\n");
+ break;
+ }
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "noise immunity level:\t\t%d\n",
+ as->noise_imm_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "spur immunity level:\t\t%d\n",
+ as->spur_level);
+ len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
+ as->firstep_level);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM weak signal detection:\t%s\n",
+ as->ofdm_weak_sig ? "on" : "off");
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK weak signal detection:\t%s\n",
+ as->cck_weak_sig ? "on" : "off");
+
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "\nMIB INTERRUPTS:\t\t%u\n",
+ st->mib_intr);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "beacon RSSI average:\t%d\n",
+ sc->ah->ah_beacon_rssi_avg.avg);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
+ as->pfc_tx,
+ as->pfc_cycles > 0 ?
+ as->pfc_tx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
+ as->pfc_rx,
+ as->pfc_cycles > 0 ?
+ as->pfc_rx*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
+ as->pfc_busy,
+ as->pfc_cycles > 0 ?
+ as->pfc_busy*100/as->pfc_cycles : 0);
+ len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
+ as->pfc_cycles);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "listen time\t\t%d\tlast: %d\n",
+ as->listen_time, as->last_listen);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->ofdm_errors, as->last_ofdm_errors,
+ as->sum_ofdm_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "CCK errors\t\t%u\tlast: %u\tsum: %u\n",
+ as->cck_errors, as->last_cck_errors,
+ as->sum_cck_errors);
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
+ ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
+ len += snprintf(buf+len, sizeof(buf)-len,
+ "AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
+ ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
+ ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_ani(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath5k_softc *sc = file->private_data;
+ char buf[20];
+
+ if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ return -EFAULT;
+
+ if (strncmp(buf, "sens-low", 8) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
+ } else if (strncmp(buf, "sens-high", 9) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
+ } else if (strncmp(buf, "ani-off", 7) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
+ } else if (strncmp(buf, "ani-on", 6) == 0) {
+ ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
+ } else if (strncmp(buf, "noise-low", 9) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "noise-high", 10) == 0) {
+ ath5k_ani_set_noise_immunity_level(sc->ah,
+ ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ } else if (strncmp(buf, "spur-low", 8) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah, 0);
+ } else if (strncmp(buf, "spur-high", 9) == 0) {
+ ath5k_ani_set_spur_immunity_level(sc->ah,
+ sc->ani_state.max_spur_level);
+ } else if (strncmp(buf, "fir-low", 7) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, 0);
+ } else if (strncmp(buf, "fir-high", 8) == 0) {
+ ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ } else if (strncmp(buf, "ofdm-off", 8) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "ofdm-on", 7) == 0) {
+ ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
+ } else if (strncmp(buf, "cck-off", 7) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
+ } else if (strncmp(buf, "cck-on", 6) == 0) {
+ ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
+ }
+ return count;
+}
+
+static const struct file_operations fops_ani = {
+ .read = read_file_ani,
+ .write = write_file_ani,
+ .open = ath5k_debugfs_open,
+ .owner = THIS_MODULE,
+};
+
+
/* init */
void
@@ -393,6 +758,20 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
sc->debug.debugfs_phydir, sc, &fops_reset);
+
+ sc->debug.debugfs_antenna = debugfs_create_file("antenna",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc, &fops_antenna);
+
+ sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_frameerrors);
+
+ sc->debug.debugfs_ani = debugfs_create_file("ani",
+ S_IWUSR | S_IRUSR,
+ sc->debug.debugfs_phydir, sc,
+ &fops_ani);
}
void
@@ -408,6 +787,9 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
debugfs_remove(sc->debug.debugfs_registers);
debugfs_remove(sc->debug.debugfs_beacon);
debugfs_remove(sc->debug.debugfs_reset);
+ debugfs_remove(sc->debug.debugfs_antenna);
+ debugfs_remove(sc->debug.debugfs_frameerrors);
+ debugfs_remove(sc->debug.debugfs_ani);
debugfs_remove(sc->debug.debugfs_phydir);
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 66f69f04e55e..ddd5b3a99e8d 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -74,6 +74,9 @@ struct ath5k_dbg_info {
struct dentry *debugfs_registers;
struct dentry *debugfs_beacon;
struct dentry *debugfs_reset;
+ struct dentry *debugfs_antenna;
+ struct dentry *debugfs_frameerrors;
+ struct dentry *debugfs_ani;
};
/**
@@ -113,6 +116,7 @@ enum ath5k_debug_level {
ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_TRACE = 0x00001000,
+ ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dc30a2b70a6b..7d7b646ab65a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -35,7 +35,8 @@
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
+ unsigned int pkt_len, unsigned int hdr_len, int padsize,
+ enum ath5k_pkt_type type,
unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
@@ -71,7 +72,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -100,7 +101,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
}
- /*Diferences between 5210-5211*/
+ /*Differences between 5210-5211*/
if (ah->ah_version == AR5K_AR5210) {
switch (type) {
case AR5K_PKT_TYPE_BEACON:
@@ -165,6 +166,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
*/
static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
unsigned int tx_tries0, unsigned int key_index,
unsigned int antenna_mode, unsigned int flags,
@@ -206,7 +208,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/* Verify and set frame length */
/* remove padding we might have added before */
- frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
+ frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
@@ -229,7 +231,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
- tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
+ tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
@@ -643,6 +645,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
+ ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rx_status->rx_status_1 &
@@ -668,12 +671,6 @@ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
ah->ah_version != AR5K_AR5212)
return -ENOTSUPP;
- /* XXX: What is this magic value and where is it used ? */
- if (ah->ah_version == AR5K_AR5212)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
- else if (ah->ah_version == AR5K_AR5211)
- ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
-
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 56158c804e3e..64538fbe4167 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -112,15 +112,32 @@ struct ath5k_hw_rx_error {
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
-/* PHY Error codes */
-#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00
-#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20
-#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40
-#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60
-#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80
-#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0
-#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0
-#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0
+/**
+ * enum ath5k_phy_error_code - PHY Error codes
+ */
+enum ath5k_phy_error_code {
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
+ AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
+ AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
+ AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
+ AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
+ AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
+ AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
+ AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
+ /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
+ AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
+ AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
+ AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
+ AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
+ AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
+ AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
+ AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
+ AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
+ AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
+ AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
+ AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
+};
/*
* 5210/5211 hardware 2-word TX control descriptor
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 67665cdc7afe..ed0263672d6d 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -331,7 +331,8 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_x_gain[mode] = (val >> 1) & 0xf;
ee->ee_xpd[mode] = val & 0x1;
- if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
+ if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
+ mode != AR5K_EEPROM_MODE_11B)
ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
@@ -341,6 +342,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
if (mode == AR5K_EEPROM_MODE_11A)
ee->ee_xr_power[mode] = val & 0x3f;
else {
+ /* b_DB_11[bg] and b_OB_11[bg] */
ee->ee_ob[mode][0] = val & 0x7;
ee->ee_db[mode][0] = (val >> 3) & 0x7;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 473a483bb9c3..c4a6d5f26af4 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -24,9 +24,6 @@
* SERDES infos are present */
#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
-#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
-#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
-#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
@@ -78,9 +75,9 @@
#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
-#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
-#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
-#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
+#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz */
+#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
+#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
@@ -101,7 +98,7 @@
#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
-#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
+#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
@@ -114,26 +111,27 @@
#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
-#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3)
-#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3)
+#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
+#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
-#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1)
-#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1)
-#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1)
-#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1)
-#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf)
-#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf)
+#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
+#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
+#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
+#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
+#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
+#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heayy clipping */
+#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
-#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8)
-#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8)
-#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1)
-#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1)
-#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1)
-#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1)
-#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1)
+#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
+#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
+#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
+#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
+#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
+#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
+#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
/* calibration settings */
#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
@@ -389,7 +387,49 @@ struct ath5k_edge_power {
bool flag;
};
-/* EEPROM calibration data */
+/**
+ * struct ath5k_eeprom_info - EEPROM calibration data
+ *
+ * @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
+ * flags
+ * @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
+ * @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
+ * OFDM and CCK packets
+ * @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
+ * (11Mbps) rate in G mode. 0.1dB steps
+ * @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
+ *
+ * @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
+ * @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
+ * @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
+ * @ee_switch_settling: RX/TX Switch settling time
+ * @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
+ * @ee_ant_control: Antenna Control Settings
+ * @ee_ob: Bias current for Output stage of PA
+ * B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
+ * A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
+ * @ee_db: Bias current for Output stage of PA. see @ee_ob
+ * @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
+ * to when the external LNA is activated
+ * @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
+ * to when the external PA switch is deactivated
+ * @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
+ * external PA switch is activated
+ * @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
+ * (IEEE802.11a section 17.3.10.5 )
+ * @ee_xlna_gain: Total gain of the LNA (information only)
+ * @ee_xpd: Use external (1) or internal power detector
+ * @ee_x_gain: Gain for external power detector output (differences in EEMAP
+ * versions!)
+ * @ee_i_gain: Initial gain value after reset
+ * @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
+ *
+ * @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
+ * @ee_noise_floor_thr: Noise floor threshold in 1dB steps
+ * @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
+ * @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
+ * @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
+ */
struct ath5k_eeprom_info {
/* Header information */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index aefe84f9c04b..5212e275f1c7 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -39,16 +39,16 @@
* ath5k_hw_set_opmode - Set PCU operating mode
*
* @ah: The &struct ath5k_hw
+ * @op_mode: &enum nl80211_iftype operating mode
*
* Initialize PCU for the various operating modes (AP/STA etc)
- *
- * NOTE: ah->ah_op_mode must be set before calling this.
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah)
+int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
@@ -61,7 +61,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
ATH5K_TRACE(ah->ah_sc);
- switch (ah->ah_op_mode) {
+ switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
@@ -113,39 +113,26 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update - Update mib counters (mac layer statistics)
+ * ath5k_hw_update - Update MIB counters (mac layer statistics)
*
* @ah: The &struct ath5k_hw
- * @stats: The &struct ieee80211_low_level_stats we use to track
- * statistics on the driver
*
- * Reads MIB counters from PCU and updates sw statistics. Must be
- * called after a MIB interrupt.
+ * Reads MIB counters from PCU and updates sw statistics. Is called after a
+ * MIB interrupt, because one of these counters might have reached their maximum
+ * and triggered the MIB interrupt, to let us read and clear the counter.
+ *
+ * Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
- struct ieee80211_low_level_stats *stats)
+void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
- ATH5K_TRACE(ah->ah_sc);
+ struct ath5k_statistics *stats = &ah->ah_sc->stats;
/* Read-And-Clear */
- stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
- stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
- stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
- stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
-
- /* XXX: Should we use this to track beacon count ?
- * -we read it anyway to clear the register */
- ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
-
- /* Reset profile count registers on 5212*/
- if (ah->ah_version == AR5K_AR5212) {
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
- ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
- }
-
- /* TODO: Handle ANI stats */
+ stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
+ stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
+ stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
+ stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
+ stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/**
@@ -167,9 +154,9 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
else {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (high)
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
- else
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
}
@@ -179,25 +166,12 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
\******************/
/**
- * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
-}
-
-/**
* ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
@@ -211,24 +185,12 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
- *
- * @ah: The &struct ath5k_hw
- */
-unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
-}
-
-/**
* ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
*
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
@@ -290,7 +252,7 @@ unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -308,7 +270,7 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
@@ -417,7 +379,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
- * TODO: Init ANI here
*/
void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
@@ -451,42 +412,6 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
-/*
- * Set multicast filter by index
- */
-int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
-/*
- * Clear Multicast filter by index
- */
-int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
-{
-
- ATH5K_TRACE(ah->ah_sc);
- if (index >= 64)
- return -EINVAL;
- else if (index >= 32)
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
- (1 << (index - 32)));
- else
- AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
-
- return 0;
-}
-
/**
* ath5k_hw_get_rx_filter - Get current rx filter
*
@@ -571,18 +496,7 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
* Beacon control *
\****************/
-/**
- * ath5k_hw_get_tsf32 - Get a 32bit TSF
- *
- * @ah: The &struct ath5k_hw
- *
- * Returns lower 32 bits of current TSF
- */
-u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
-}
+#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64 - Get the full 64bit TSF
@@ -593,10 +507,35 @@ u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
*/
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
- u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ /*
+ * While reading TSF upper and then lower part, the clock is still
+ * counting (or jumping in case of IBSS merge) so we might get
+ * inconsistent values. To avoid this, we read the upper part again
+ * and check it has not been changed. We make the hypothesis that a
+ * maximum of 3 changes can happens in a row (we use 10 as a safe
+ * value).
+ *
+ * Impact on performance is pretty small, since in most cases, only
+ * 3 register reads are needed.
+ */
+
+ tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
+ tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
+ tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
+
+ WARN_ON( i == ATH5K_MAX_TSF_READ );
+
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
/**
@@ -651,7 +590,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/*
* Set the additional timers by mode
*/
- switch (ah->ah_op_mode) {
+ switch (ah->ah_sc->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
@@ -688,8 +627,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
- if (ah->ah_op_mode == NL80211_IFTYPE_AP ||
- ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT)
+ if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
+ ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
@@ -722,203 +661,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
-#if 0
-/*
- * Set beacon timers
- */
-int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
- const struct ath5k_beacon_state *state)
-{
- u32 cfp_period, next_cfp, dtim, interval, next_beacon;
-
- /*
- * TODO: should be changed through *state
- * review struct ath5k_beacon_state struct
- *
- * XXX: These are used for cfp period bellow, are they
- * ok ? Is it O.K. for tsf here to be 0 or should we use
- * get_tsf ?
- */
- u32 dtim_count = 0; /* XXX */
- u32 cfp_count = 0; /* XXX */
- u32 tsf = 0; /* XXX */
-
- ATH5K_TRACE(ah->ah_sc);
- /* Return on an invalid beacon state */
- if (state->bs_interval < 1)
- return -EINVAL;
-
- interval = state->bs_interval;
- dtim = state->bs_dtim_period;
-
- /*
- * PCF support?
- */
- if (state->bs_cfp_period > 0) {
- /*
- * Enable PCF mode and set the CFP
- * (Contention Free Period) and timer registers
- */
- cfp_period = state->bs_cfp_period * state->bs_dtim_period *
- state->bs_interval;
- next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
- state->bs_interval;
-
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
- ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
- AR5K_CFP_DUR);
- ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
- next_cfp)) << 3, AR5K_TIMER2);
- } else {
- /* Disable PCF mode */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_PCF);
- }
-
- /*
- * Enable the beacon timer register
- */
- ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
-
- /*
- * Start the beacon timers
- */
- ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
- ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
- AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
- AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
- AR5K_BEACON_PERIOD), AR5K_BEACON);
-
- /*
- * Write new beacon miss threshold, if it appears to be valid
- * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
- * and return if its not in range. We can test this by reading value and
- * setting value to a largest value and seeing which values register.
- */
-
- AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
- state->bs_bmiss_threshold);
-
- /*
- * Set sleep control register
- * XXX: Didn't find this in 5210 code but since this register
- * exists also in ar5k's 5210 headers i leave it as common code.
- */
- AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
- (state->bs_sleep_duration - 3) << 3);
-
- /*
- * Set enhanced sleep registers on 5212
- */
- if (ah->ah_version == AR5K_AR5212) {
- if (state->bs_sleep_duration > state->bs_interval &&
- roundup(state->bs_sleep_duration, interval) ==
- state->bs_sleep_duration)
- interval = state->bs_sleep_duration;
-
- if (state->bs_sleep_duration > dtim && (dtim == 0 ||
- roundup(state->bs_sleep_duration, dtim) ==
- state->bs_sleep_duration))
- dtim = state->bs_sleep_duration;
-
- if (interval > dtim)
- return -EINVAL;
-
- next_beacon = interval == dtim ? state->bs_next_dtim :
- state->bs_next_beacon;
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
- AR5K_SLEEP0_NEXT_DTIM) |
- AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
- AR5K_SLEEP0_ENH_SLEEP_EN |
- AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
-
- ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
- AR5K_SLEEP1_NEXT_TIM) |
- AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
-
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
- AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
- }
-
- return 0;
-}
-
-/*
- * Reset beacon timers
- */
-void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
- /*
- * Disable beacon timer
- */
- ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
-
- /*
- * Disable some beacon register values
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
- AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
- ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
-}
-
-/*
- * Wait for beacon queue to finish
- */
-int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
-{
- unsigned int i;
- int ret;
-
- ATH5K_TRACE(ah->ah_sc);
-
- /* 5210 doesn't have QCU*/
- if (ah->ah_version == AR5K_AR5210) {
- /*
- * Wait for beaconn queue to finish by checking
- * Control Register and Beacon Status Register.
- */
- for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
- if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
- ||
- !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
- break;
- udelay(10);
- }
-
- /* Timeout... */
- if (i <= 0) {
- /*
- * Re-schedule the beacon queue
- */
- ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
- ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
- AR5K_BCR);
-
- return -EIO;
- }
- ret = 0;
- } else {
- /*5211/5212*/
- ret = ath5k_hw_register_timeout(ah,
- AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
- AR5K_QCU_STS_FRMPENDCNT, 0, false);
-
- if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
- return -EIO;
- }
-
- return ret;
-}
-#endif
-
/*********************\
* Key table functions *
@@ -971,19 +713,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
return 0;
}
-/*
- * Check if a table entry is valid
- */
-int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
-{
- ATH5K_TRACE(ah->ah_sc);
- AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
-
- /* Check the validation flag at the end of the entry */
- return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
- AR5K_KEYTABLE_VALID;
-}
-
static
int ath5k_keycache_type(const struct ieee80211_key_conf *key)
{
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 68e2bccd90d3..1b81c4778800 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -20,8 +20,6 @@
*
*/
-#define _ATH5K_PHY
-
#include <linux/delay.h>
#include <linux/slab.h>
@@ -982,7 +980,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
data2 = ath5k_hw_bitswap(3, 2);
@@ -995,7 +993,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
} else
return -EINVAL;
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1023,7 +1021,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
data0 = ath5k_hw_bitswap((c - 2272), 8);
data2 = 0;
/* ? 5GHz ? */
- } else if ((c - (c % 5)) != 2 || c > 5435) {
+ } else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c < 5120)
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
else if (!(c % 10))
@@ -1034,7 +1032,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return -EINVAL;
data2 = ath5k_hw_bitswap(1, 2);
} else {
- data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8);
+ data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
@@ -1105,28 +1103,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
PHY calibration
\*****************/
-void
-ath5k_hw_calibration_poll(struct ath5k_hw *ah)
-{
- /* Calibration interval in jiffies */
- unsigned long cal_intval;
-
- cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000);
-
- /* Initialize timestamp if needed */
- if (!ah->ah_cal_tstamp)
- ah->ah_cal_tstamp = jiffies;
-
- /* For now we always do full calibration
- * Mark software interrupt mask and fire software
- * interrupt (bit gets auto-cleared) */
- if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) {
- ah->ah_cal_tstamp = jiffies;
- ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
- AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
- }
-}
-
static int sign_extend(int val, const int nbits)
{
int order = BIT(nbits-1);
@@ -1191,7 +1167,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
* The median of the values in the history is then loaded into the
* hardware for its own use for RSSI and CCA measurements.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1400,7 +1376,11 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
}
i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
- q_coffd = q_pwr >> 7;
+
+ if (ah->ah_version == AR5K_AR5211)
+ q_coffd = q_pwr >> 6;
+ else
+ q_coffd = q_pwr >> 7;
/* protect against divide by 0 and loss of sign bits */
if (i_coffd == 0 || q_coffd < 2)
@@ -1409,7 +1389,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
- q_coff = (i_pwr / q_coffd) - 128;
+ if (ah->ah_version == AR5K_AR5211)
+ q_coff = (i_pwr / q_coffd) - 64;
+ else
+ q_coff = (i_pwr / q_coffd) - 128;
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
@@ -1769,7 +1752,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
* Antenna control *
\*****************/
-void /*TODO:Boundary check*/
+static void /*TODO:Boundary check*/
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
ATH5K_TRACE(ah->ah_sc);
@@ -1778,16 +1761,6 @@ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
-{
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version != AR5K_AR5210)
- return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7;
-
- return false; /*XXX: What do we return for 5210 ?*/
-}
-
/*
* Enable/disable fast rx antenna diversity
*/
@@ -1931,6 +1904,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
ah->ah_tx_ant = tx_ant;
ah->ah_ant_mode = ant_mode;
+ ah->ah_def_ant = def_ant;
sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
@@ -2171,8 +2145,6 @@ ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
done:
*pcinfo_l = &pcinfo[idx_l];
*pcinfo_r = &pcinfo[idx_r];
-
- return;
}
/*
@@ -2441,19 +2413,6 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
pcdac_tmp = pcdac_high_pwr;
edge_flag = 0x40;
-#if 0
- /* If both min and max power limits are in lower
- * power curve's range, only use the low power curve.
- * TODO: min/max levels are related to target
- * power values requested from driver/user
- * XXX: Is this really needed ? */
- if (min_pwr < table_max[1] &&
- max_pwr < table_max[1]) {
- edge_flag = 0;
- pcdac_tmp = pcdac_low_pwr;
- max_pwr_idx = (table_max[1] - table_min[1])/2;
- }
-#endif
} else {
pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
pcdac_high_pwr = ah->ah_txpower.tmpL[0];
@@ -2600,7 +2559,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
/* Fill pdadc_out table */
- while (pdadc_0 < max_idx)
+ while (pdadc_0 < max_idx && pdadc_i < 128)
pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
/* Need to extrapolate above this pdgain? */
@@ -3144,5 +3103,3 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
}
-
-#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 9122a8556f45..f5831da33f7b 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -517,23 +517,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
}
/*
- * Get slot time from DCU
- */
-unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
-{
- unsigned int slot_time_clock;
-
- ATH5K_TRACE(ah->ah_sc);
-
- if (ah->ah_version == AR5K_AR5210)
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
- else
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
-
- return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
-}
-
-/*
* Set slot time on DCU
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 1464f89b249c..55b4ac6d236f 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -212,10 +212,10 @@
* MIB control register
*/
#define AR5K_MIBC 0x0040 /* Register Address */
-#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */
+#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
-#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */
-#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */
+#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
+#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
/*
* Timeout prescale register
@@ -1139,8 +1139,8 @@
#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
-#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
-#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */
+#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
+#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
@@ -1516,7 +1516,14 @@
AR5K_NAV_5210 : AR5K_NAV_5211)
/*
- * RTS success register
+ * MIB counters:
+ *
+ * max value is 0xc000, if this is reached we get a MIB interrupt.
+ * they can be controlled via AR5K_MIBC and are cleared on read.
+ */
+
+/*
+ * RTS success (MIB counter)
*/
#define AR5K_RTS_OK_5210 0x8090
#define AR5K_RTS_OK_5211 0x8088
@@ -1524,7 +1531,7 @@
AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
/*
- * RTS failure register
+ * RTS failure (MIB counter)
*/
#define AR5K_RTS_FAIL_5210 0x8094
#define AR5K_RTS_FAIL_5211 0x808c
@@ -1532,7 +1539,7 @@
AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
/*
- * ACK failure register
+ * ACK failure (MIB counter)
*/
#define AR5K_ACK_FAIL_5210 0x8098
#define AR5K_ACK_FAIL_5211 0x8090
@@ -1540,7 +1547,7 @@
AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
/*
- * FCS failure register
+ * FCS failure (MIB counter)
*/
#define AR5K_FCS_FAIL_5210 0x809c
#define AR5K_FCS_FAIL_5211 0x8094
@@ -1667,11 +1674,17 @@
/*
* Profile count registers
+ *
+ * These registers can be cleared and freezed with ATH5K_MIBC, but they do not
+ * generate a MIB interrupt.
+ * Instead of overflowing, they shift by one bit to the right. All registers
+ * shift together, i.e. when one reaches the max, all shift at the same time by
+ * one bit to the right. This way we should always get consistent values.
*/
#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
-#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */
-#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
+#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
+#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
/*
* Quiet period control registers
@@ -1758,7 +1771,7 @@
#define AR5K_CCK_FIL_CNT 0x8128
/*
- * PHY Error Counters (?)
+ * PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
*/
#define AR5K_PHYERR_CNT1 0x812c
#define AR5K_PHYERR_CNT1_MASK 0x8130
@@ -1766,6 +1779,9 @@
#define AR5K_PHYERR_CNT2 0x8134
#define AR5K_PHYERR_CNT2_MASK 0x8138
+/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
+#define ATH5K_PHYERR_CNT_MAX 0x00c00000
+
/*
* TSF Threshold register (?)
*/
@@ -1974,7 +1990,7 @@
#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
#define AR5K_PHY_SETTLING_AGC_S 0
-#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */
+#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settling time */
#define AR5K_PHY_SETTLING_SWITCH_S 7
/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index cbf28e379843..307f80e83f94 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,8 +19,6 @@
*
*/
-#define _ATH5K_RESET
-
/*****************************\
Reset functions and helpers
\*****************************/
@@ -34,6 +32,27 @@
#include "base.h"
#include "debug.h"
+/*
+ * Check if a register write has been completed
+ */
+int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+ bool is_set)
+{
+ int i;
+ u32 data;
+
+ for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
+ data = ath5k_hw_reg_read(ah, reg);
+ if (is_set && (data & flag))
+ break;
+ else if ((data & flag) == val)
+ break;
+ udelay(15);
+ }
+
+ return (i <= 0) ? -EAGAIN : 0;
+}
+
/**
* ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
*
@@ -221,8 +240,8 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
/*
* Sleep control
*/
-int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
- bool set_chip, u16 sleep_duration)
+static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+ bool set_chip, u16 sleep_duration)
{
unsigned int i;
u32 staid, data;
@@ -608,7 +627,6 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
}
- return;
}
/* TODO: Half/Quarter rate */
@@ -864,8 +882,6 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
/* Heavy clipping -disable for now */
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
-
- return;
}
/*
@@ -1017,11 +1033,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret)
return ret;
- /*
- * Initialize operating mode
- */
- ah->ah_op_mode = op_mode;
-
/* PHY access enable */
if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
@@ -1192,7 +1203,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_set_associd(ah);
/* Set PCU config */
- ath5k_hw_set_opmode(ah);
+ ath5k_hw_set_opmode(ah, op_mode);
/* Clear any pending interrupts
* PISR/SISR Not available on 5210 */
@@ -1378,7 +1389,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* external 32KHz crystal when sleeping if one
* exists */
if (ah->ah_version == AR5K_AR5212 &&
- ah->ah_op_mode != NL80211_IFTYPE_AP)
+ op_mode != NL80211_IFTYPE_AP)
ath5k_hw_set_sleep_clock(ah, true);
/*
@@ -1388,5 +1399,3 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_reset_tsf(ah);
return 0;
}
-
-#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 5774cea23a3b..35f23bdc442f 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,3 +32,24 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_HTC
+ tristate "Atheros HTC based wireless cards support"
+ depends on USB && MAC80211
+ select ATH9K_HW
+ select MAC80211_LEDS
+ select LEDS_CLASS
+ select NEW_LEDS
+ select ATH9K_COMMON
+ ---help---
+ Support for Atheros HTC based cards.
+ Chipsets supported: AR9271
+
+ For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+
+ The built module will be ath9k_htc.
+
+config ATH9K_HTC_DEBUGFS
+ bool "Atheros ath9k_htc debugging"
+ depends on ATH9K_HTC && DEBUG_FS
+ ---help---
+ Say Y, if you need access to ath9k_htc's statistics.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6b50d5eb9ec3..dd112be218ab 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -13,18 +13,38 @@ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
obj-$(CONFIG_ATH9K) += ath9k.o
-ath9k_hw-y:= hw.o \
+ath9k_hw-y:= \
+ ar9002_hw.o \
+ ar9003_hw.o \
+ hw.o \
+ ar9003_phy.o \
+ ar9002_phy.o \
+ ar5008_phy.o \
+ ar9002_calib.o \
+ ar9003_calib.o \
+ calib.o \
eeprom.o \
eeprom_def.o \
eeprom_4k.o \
eeprom_9287.o \
- calib.o \
ani.o \
- phy.o \
btcoex.o \
mac.o \
+ ar9002_mac.o \
+ ar9003_mac.o \
+ ar9003_eeprom.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o
+
+ath9k_htc-y += htc_hst.o \
+ hif_usb.o \
+ wmi.o \
+ htc_drv_txrx.o \
+ htc_drv_main.o \
+ htc_drv_beacon.o \
+ htc_drv_init.o
+
+obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index ca4994f13151..85fdd26039c8 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -47,6 +47,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
}
static struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
.read_cachesize = ath_ahb_read_cachesize,
.eeprom_read = ath_ahb_eeprom_read,
};
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2a0cd64c2bfb..ba8b20f01594 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
struct ath9k_channel *chan)
@@ -37,190 +38,6 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
return 0;
}
-static bool ath9k_hw_ani_control(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd, int param)
-{
- struct ar5416AniState *aniState = ah->curani;
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR,
- ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- const int m1ThreshLow[] = { 127, 50 };
- const int m2ThreshLow[] = { 127, 40 };
- const int m1Thresh[] = { 127, 0x4d };
- const int m2Thresh[] = { 127, 0x40 };
- const int m2CountThr[] = { 31, 16 };
- const int m2CountThrLow[] = { 63, 48 };
- u32 on = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2_THRESH,
- m2Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR,
- AR_PHY_SFCORR_M2COUNT_THR,
- m2CountThr[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
- m2CountThrLow[on]);
-
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
- m1ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
- m2ThreshLow[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M1_THRESH,
- m1Thresh[on]);
- REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
- AR_PHY_SFCORR_EXT_M2_THRESH,
- m2Thresh[on]);
-
- if (on)
- REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- else
- REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
-
- if (!on != aniState->ofdmWeakSigDetectOff) {
- if (on)
- ah->stats.ast_ani_ofdmon++;
- else
- ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
- }
- break;
- }
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
- case ATH9K_ANI_FIRSTEP_LEVEL:{
- const int firstep[] = { 0, 4, 8 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(firstep)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(firstep));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
- break;
- }
- case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- const int cycpwrThr1[] =
- { 2, 4, 6, 8, 10, 12, 14, 16 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(cycpwrThr1));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_PRESENT:
- break;
- default:
- ath_print(common, ATH_DBG_ANI,
- "invalid cmd %u\n", cmd);
- return false;
- }
-
- ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_print(common, ATH_DBG_ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
- "ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_print(common, ATH_DBG_ANI,
- "cckWeakSigThreshold=%d, "
- "firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
- aniState->firstepLevel,
- aniState->listenTime);
- ath_print(common, ATH_DBG_ANI,
- "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->cycleCount,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
- return true;
-}
-
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
struct ath9k_mib_stats *stats)
{
@@ -262,11 +79,17 @@ static void ath9k_ani_restart(struct ath_hw *ah)
"Writing ofdmbase=%u cckbase=%u\n",
aniState->ofdmPhyErrBase,
aniState->cckPhyErrBase);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
aniState->ofdmPhyErrCount = 0;
@@ -540,8 +363,14 @@ void ath9k_ani_reset(struct ath_hw *ah)
ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
~ATH9K_RX_FILTER_PHYERR);
ath9k_ani_restart(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
void ath9k_hw_ani_monitor(struct ath_hw *ah,
@@ -639,6 +468,8 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_FILT_OFDM, 0);
REG_WRITE(ah, AR_FILT_CCK, 0);
REG_WRITE(ah, AR_MIBC,
@@ -646,6 +477,9 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
& 0x0f);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
/* Freeze the MIB counters, get the stats and then clear them */
@@ -809,20 +643,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
ah->ani[0].cckPhyErrBase);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ath9k_enable_mib_counters(ah);
ah->aniperiod = ATH9K_ANI_PERIOD;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
}
-
-void ath9k_hw_ani_disable(struct ath_hw *ah)
-{
- ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
-
- ath9k_hw_disable_mib_counters(ah);
- REG_WRITE(ah, AR_PHY_ERR_1, 0);
- REG_WRITE(ah, AR_PHY_ERR_2, 0);
-}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 4e1ab94a5153..3356762ea384 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -118,6 +118,5 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
void ath9k_hw_procmibevent(struct ath_hw *ah);
void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
-void ath9k_hw_ani_disable(struct ath_hw *ah);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
new file mode 100644
index 000000000000..025c31ac6146
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_AR5008_H
+#define INITVALS_AR5008_H
+
+static const u32 ar5416Modes[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
+ { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
+ { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
+ { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000000 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008264, 0x88000010 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00070000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a002e },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5d50e188 },
+ { 0x00009958, 0x00081fff },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x001fff00 },
+ { 0x000099ac, 0x00000000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x000000aa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x00000bb5 },
+ { 0x0000a22c, 0x00000011 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000000 },
+ { 0x0000a26c, 0x0e79e5c6 },
+ { 0x0000b26c, 0x0e79e5c6 },
+ { 0x0000c26c, 0x0e79e5c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x051701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa1f },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x08000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x201400df, 0x201400df },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007081, 0x00007081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x0000000c },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000030 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000060 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000058 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9100[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
+ { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
+ { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
+ { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
+#ifdef TB243
+ { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
+ { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
+#else
+ { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+#endif
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+#endif /* INITVALS_AR5008_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
new file mode 100644
index 000000000000..b2c17c98bb38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -0,0 +1,1374 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "../regd.h"
+#include "ar9002_phy.h"
+
+/* All code below is for non single-chip solutions */
+
+/**
+ * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
+ * @rfbuf:
+ * @reg32:
+ * @numBits:
+ * @firstBit:
+ * @column:
+ *
+ * Performs analog "swizzling" of parameters into their location.
+ * Used on external AR2133/AR5133 radios.
+ */
+static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
+ u32 numBits, u32 firstBit,
+ u32 column)
+{
+ u32 tmp32, mask, arrayEntry, lastBit;
+ int32_t bitPosition, bitsLeft;
+
+ tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
+ arrayEntry = (firstBit - 1) / 8;
+ bitPosition = (firstBit - 1) % 8;
+ bitsLeft = numBits;
+ while (bitsLeft > 0) {
+ lastBit = (bitPosition + bitsLeft > 8) ?
+ 8 : bitPosition + bitsLeft;
+ mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
+ (column * 8);
+ rfBuf[arrayEntry] &= ~mask;
+ rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
+ (column * 8)) & mask;
+ bitsLeft -= 8 - bitPosition;
+ tmp32 = tmp32 >> (8 - bitPosition);
+ bitPosition = 0;
+ arrayEntry++;
+ }
+}
+
+/*
+ * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
+ * rf_pwd_icsyndiv.
+ *
+ * Theoretical Rules:
+ * if 2 GHz band
+ * if forceBiasAuto
+ * if synth_freq < 2412
+ * bias = 0
+ * else if 2412 <= synth_freq <= 2422
+ * bias = 1
+ * else // synth_freq > 2422
+ * bias = 2
+ * else if forceBias > 0
+ * bias = forceBias & 7
+ * else
+ * no change, use value from ini file
+ * else
+ * no change, invalid band
+ *
+ * 1st Mod:
+ * 2422 also uses value of 2
+ * <approved>
+ *
+ * 2nd Mod:
+ * Less than 2412 uses value of 0, 2412 and above uses value of 2
+ */
+static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 tmp_reg;
+ int reg_writes = 0;
+ u32 new_bias = 0;
+
+ if (!AR_SREV_5416(ah) || synth_freq >= 3000)
+ return;
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ if (synth_freq < 2412)
+ new_bias = 0;
+ else if (synth_freq < 2422)
+ new_bias = 1;
+ else
+ new_bias = 2;
+
+ /* pre-reverse this field */
+ tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ new_bias, synth_freq);
+
+ /* swizzle rf_pwd_icsyndiv */
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
+
+ /* write Bank 6 with new params */
+ REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
+}
+
+/**
+ * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
+ * @ah: atheros hardware stucture
+ * @chan:
+ *
+ * For the external AR2133/AR5133 radios, takes the MHz channel value and set
+ * the channel value. Assumes writes enabled to analog bus and bank6 register
+ * cache in ah->analogBank6Data.
+ */
+static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 channelSel = 0;
+ u32 bModeSynth = 0;
+ u32 aModeRefSel = 0;
+ u32 reg32 = 0;
+ u16 freq;
+ struct chan_centers centers;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) {
+ u32 txctl;
+
+ if (((freq - 2192) % 5) == 0) {
+ channelSel = ((freq - 672) * 2 - 3040) / 10;
+ bModeSynth = 0;
+ } else if (((freq - 2224) % 5) == 0) {
+ channelSel = ((freq - 704) * 2 - 3040) / 10;
+ bModeSynth = 1;
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ channelSel = (channelSel << 2) & 0xff;
+ channelSel = ath9k_hw_reverse_bits(channelSel, 8);
+
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+
+ } else if ((freq % 20) == 0 && freq >= 5120) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 10) == 0) {
+ channelSel =
+ ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
+ aModeRefSel = ath9k_hw_reverse_bits(2, 2);
+ else
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else if ((freq % 5) == 0) {
+ channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
+ aModeRefSel = ath9k_hw_reverse_bits(1, 2);
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Invalid channel %u MHz\n", freq);
+ return -EINVAL;
+ }
+
+ ar5008_hw_force_bias(ah, freq);
+
+ reg32 =
+ (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
+ (1 << 5) | 0x1;
+
+ REG_WRITE(ah, AR_PHY(0x37), reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin, cur_bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, new;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+}
+
+/**
+ * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
+ * @ah: atheros hardware structure
+ *
+ * Only required for older devices with external AR2133/AR5133 radios.
+ */
+static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+#define ATH_ALLOC_BANK(bank, size) do { \
+ bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
+ if (!bank) { \
+ ath_print(common, ATH_DBG_FATAL, \
+ "Cannot allocate RF banks\n"); \
+ return -ENOMEM; \
+ } \
+ } while (0);
+
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
+ ATH_ALLOC_BANK(ah->addac5416_21,
+ ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
+ ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
+
+ return 0;
+#undef ATH_ALLOC_BANK
+}
+
+
+/**
+ * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
+ * @ah: atheros hardware struture
+ * For the external AR2133/AR5133 radios banks.
+ */
+static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+#define ATH_FREE_BANK(bank) do { \
+ kfree(bank); \
+ bank = NULL; \
+ } while (0);
+
+ BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
+
+ ATH_FREE_BANK(ah->analogBank0Data);
+ ATH_FREE_BANK(ah->analogBank1Data);
+ ATH_FREE_BANK(ah->analogBank2Data);
+ ATH_FREE_BANK(ah->analogBank3Data);
+ ATH_FREE_BANK(ah->analogBank6Data);
+ ATH_FREE_BANK(ah->analogBank6TPCData);
+ ATH_FREE_BANK(ah->analogBank7Data);
+ ATH_FREE_BANK(ah->addac5416_21);
+ ATH_FREE_BANK(ah->bank6Temp);
+
+#undef ATH_FREE_BANK
+}
+
+/* *
+ * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
+ * @ah: atheros hardware structure
+ * @chan:
+ * @modesIndex:
+ *
+ * Used for the external AR2133/AR5133 radios.
+ *
+ * Reads the EEPROM header info from the device structure and programs
+ * all rf registers. This routine requires access to the analog
+ * rf device. This is not required for single-chip devices.
+ */
+static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ u32 eepMinorRev;
+ u32 ob5GHz = 0, db5GHz = 0;
+ u32 ob2GHz = 0, db2GHz = 0;
+ int regWrites = 0;
+
+ /*
+ * Software does not need to program bank data
+ * for single chip devices, that is AR9280 or anything
+ * after that.
+ */
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ return true;
+
+ /* Setup rf parameters */
+ eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
+
+ /* Setup Bank 0 Write */
+ RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
+
+ /* Setup Bank 1 Write */
+ RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
+
+ /* Setup Bank 2 Write */
+ RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
+
+ /* Setup Bank 6 Write */
+ RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
+ modesIndex);
+ {
+ int i;
+ for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
+ ah->analogBank6Data[i] =
+ INI_RA(&ah->iniBank6TPC, i, modesIndex);
+ }
+ }
+
+ /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
+ if (eepMinorRev >= 2) {
+ if (IS_CHAN_2GHZ(chan)) {
+ ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
+ db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob2GHz, 3, 197, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db2GHz, 3, 194, 0);
+ } else {
+ ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
+ db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ ob5GHz, 3, 203, 0);
+ ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
+ db5GHz, 3, 200, 0);
+ }
+ }
+
+ /* Setup Bank 7 Setup */
+ RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
+
+ /* Write Analog registers */
+ REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
+ regWrites);
+ REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
+ regWrites);
+
+ return true;
+}
+
+static void ar5008_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
+{
+ int rx_chainmask, tx_chainmask;
+
+ rx_chainmask = ah->rxchainmask;
+ tx_chainmask = ah->txchainmask;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ switch (rx_chainmask) {
+ case 0x5:
+ DISABLE_REGWRITE_BUFFER(ah);
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ ENABLE_REGWRITE_BUFFER(ah);
+ case 0x3:
+ if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
+ break;
+ }
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (tx_chainmask == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+ if (AR_SREV_9100(ah))
+ REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
+ REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
+}
+
+static void ar5008_hw_override_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear if off only after
+ * RXE is set for MAC. This prevents frames with corrupted
+ * descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ val = REG_READ(ah, AR_PCU_MISC_MODE2);
+
+ if (!AR_SREV_9271(ah))
+ val &= ~AR_PCU_MISC_MODE2_HWWAR1;
+
+ if (AR_SREV_9287_10_OR_LATER(ah))
+ val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+ }
+
+ if (!AR_SREV_5416_20_OR_LATER(ah) ||
+ AR_SREV_9280_10_OR_LATER(ah))
+ return;
+ /*
+ * Disable BB clock gating
+ * Necessary to avoid issues on AR5416 2.0
+ */
+ REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
+}
+
+static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
+ AR_PHY_FC_ENABLE_DAC_FIFO);
+
+ phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
+ | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
+
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_FC_DYN2040_EN;
+
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_FC_DYN2040_PRI_CH;
+
+ }
+ REG_WRITE(ah, AR_PHY_TURBO, phymode);
+
+ ath9k_hw_set11nmac2040(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+
+static int ar5008_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ int i, regWrites = 0;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ /* Enable ASYNC FIFO */
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+ REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+ REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ }
+
+ /*
+ * Set correct baseband to analog shift setting to
+ * access analog chips.
+ */
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ /* Write ADDAC shifts */
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
+ ah->eep_ops->set_addac(ah, chan);
+
+ if (AR_SREV_5416_22_OR_LATER(ah)) {
+ REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
+ } else {
+ struct ar5416IniArray temp;
+ u32 addacSize =
+ sizeof(u32) * ah->iniAddac.ia_rows *
+ ah->iniAddac.ia_columns;
+
+ /* For AR5416 2.0/2.1 */
+ memcpy(ah->addac5416_21,
+ ah->iniAddac.ia_array, addacSize);
+
+ /* override CLKDRV value at [row, column] = [31, 1] */
+ (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
+
+ temp.ia_array = ah->addac5416_21;
+ temp.ia_columns = ah->iniAddac.ia_columns;
+ temp.ia_rows = ah->iniAddac.ia_rows;
+ REG_WRITE_ARRAY(&temp, 1, regWrites);
+ }
+
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < ah->iniModes.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniModes, i, 0);
+ u32 val = INI_RA(&ah->iniModes, i, modesIndex);
+
+ if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
+ val &= ~AR_AN_TOP2_PWDCLKIND;
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
+ AR_SREV_9287_10_OR_LATER(ah))
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ if (AR_SREV_9271_10(ah))
+ REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
+ modesIndex, regWrites);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ /* Write common array parameters */
+ for (i = 0; i < ah->iniCommon.ia_rows; i++) {
+ u32 reg = INI_RA(&ah->iniCommon, i, 0);
+ u32 val = INI_RA(&ah->iniCommon, i, 1);
+
+ REG_WRITE(ah, reg, val);
+
+ if (reg >= 0x7800 && reg < 0x78a0
+ && ah->config.analog_shiftreg) {
+ udelay(100);
+ }
+
+ DO_DELAY(regWrites);
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9271(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
+ REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ modesIndex, regWrites);
+ else
+ REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ modesIndex, regWrites);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan)) {
+ REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
+ regWrites);
+ }
+
+ ar5008_hw_override_ini(ah, chan);
+ ar5008_hw_set_channel_regs(ah, chan);
+ ar5008_hw_init_chain_masks(ah);
+ ath9k_olc_init(ah);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ /* Write analog registers */
+ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "ar5416SetRfRegs failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (!AR_SREV_9280_10_OR_LATER(ah))
+ rfMode |= (IS_CHAN_5GHZ(chan)) ?
+ AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_HALFGI,
+ AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+static void ar5008_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar5008_restore_chainmask(struct ath_hw *ah)
+{
+ int rx_chainmask = ah->rxchainmask;
+
+ if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
+ }
+}
+
+static void ar5008_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ if (chan && IS_CHAN_5GHZ(chan))
+ return 0x1450;
+ return 0x1458;
+}
+
+static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
+ else
+ pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
+
+ return pll;
+}
+
+static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan))
+ pll |= SM(0xa, AR_RTC_PLL_DIV);
+ else
+ pll |= SM(0xb, AR_RTC_PLL_DIV);
+
+ return pll;
+}
+
+static bool ar5008_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
+ AR_PHY_AGC_CTL1_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR,
+ ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH,
+ m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR,
+ m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH,
+ m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH,
+ m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar5008_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ int i, j;
+ int32_t val;
+ const u32 ar5416_cca_regs[6] = {
+ AR_PHY_CCA,
+ AR_PHY_CH1_CCA,
+ AR_PHY_CH2_CCA,
+ AR_PHY_EXT_CCA,
+ AR_PHY_CH1_EXT_CCA,
+ AR_PHY_CH2_EXT_CCA
+ };
+ u8 chainmask, rx_chain_status;
+
+ rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+ chainmask = 0x9;
+ else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
+ if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ } else {
+ if (rx_chain_status & 0x4)
+ chainmask = 0x3F;
+ else if (rx_chain_status & 0x2)
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ }
+
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ for (j = 0; j < 5; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(50);
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar5416_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar5416_cca_regs[i], val);
+ }
+ }
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar5008_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
+
+ priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
+ priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
+ priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
+ priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
+ priv_ops->init_bb = ar5008_hw_init_bb;
+ priv_ops->process_ini = ar5008_hw_process_ini;
+ priv_ops->set_rfmode = ar5008_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar5008_hw_rfbus_req;
+ priv_ops->rfbus_done = ar5008_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
+ priv_ops->restore_chainmask = ar5008_restore_chainmask;
+ priv_ops->set_diversity = ar5008_set_diversity;
+ priv_ops->ani_control = ar5008_hw_ani_control;
+ priv_ops->do_getnf = ar5008_hw_do_getnf;
+ priv_ops->loadnf = ar5008_hw_loadnf;
+
+ if (AR_SREV_9100(ah))
+ priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
+ else if (AR_SREV_9160_10_OR_LATER(ah))
+ priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
+ else
+ priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
new file mode 100644
index 000000000000..0b94bd385b0a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -0,0 +1,1254 @@
+
+static const u32 ar5416Common_9100[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00020010, 0x00000003 },
+ { 0x00020038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00004000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0x00000000 },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00000000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x0000c968, 0x000003ce },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889ae },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000a000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79a8aa33 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9100[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9100[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9100[][2] = {
+ { 0x000098b0, 0x02108421},
+ { 0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2_9100[][2] = {
+ { 0x000098b0, 0x0e73ff17},
+ { 0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3_9100[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014000f, 0x0014000f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x000180d6, 0x000180d6 },
+ { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
+ { 0x0000989c, 0x000000b1, 0x000000b1 },
+ { 0x0000989c, 0x00002000, 0x00002000 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+
+static const u32 ar5416Bank6TPC_9100[][3] = {
+
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9100[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9100[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000010 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000015 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Modes_9160[][6] = {
+ { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
+ { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
+ { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
+ { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
+ { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
+ { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
+ { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
+ { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
+ { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
+ { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
+ { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
+ { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
+ { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
+ { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
+ { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
+ { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
+ { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
+ { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
+ { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
+ { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
+ { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
+ { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
+ { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
+ { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
+ { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
+ { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
+ { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
+ { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
+ { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
+ { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
+ { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
+ { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
+ { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
+ { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
+ { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
+ { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
+ { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
+ { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
+ { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
+ { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
+ { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
+ { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
+ { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
+ { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
+ { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
+ { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
+ { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
+ { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
+ { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
+ { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+};
+
+static const u32 ar5416Common_9160[][2] = {
+ { 0x0000000c, 0x00000000 },
+ { 0x00000030, 0x00020015 },
+ { 0x00000034, 0x00000005 },
+ { 0x00000040, 0x00000000 },
+ { 0x00000044, 0x00000008 },
+ { 0x00000048, 0x00000008 },
+ { 0x0000004c, 0x00000010 },
+ { 0x00000050, 0x00000000 },
+ { 0x00000054, 0x0000001f },
+ { 0x00000800, 0x00000000 },
+ { 0x00000804, 0x00000000 },
+ { 0x00000808, 0x00000000 },
+ { 0x0000080c, 0x00000000 },
+ { 0x00000810, 0x00000000 },
+ { 0x00000814, 0x00000000 },
+ { 0x00000818, 0x00000000 },
+ { 0x0000081c, 0x00000000 },
+ { 0x00000820, 0x00000000 },
+ { 0x00000824, 0x00000000 },
+ { 0x00001040, 0x002ffc0f },
+ { 0x00001044, 0x002ffc0f },
+ { 0x00001048, 0x002ffc0f },
+ { 0x0000104c, 0x002ffc0f },
+ { 0x00001050, 0x002ffc0f },
+ { 0x00001054, 0x002ffc0f },
+ { 0x00001058, 0x002ffc0f },
+ { 0x0000105c, 0x002ffc0f },
+ { 0x00001060, 0x002ffc0f },
+ { 0x00001064, 0x002ffc0f },
+ { 0x00001230, 0x00000000 },
+ { 0x00001270, 0x00000000 },
+ { 0x00001038, 0x00000000 },
+ { 0x00001078, 0x00000000 },
+ { 0x000010b8, 0x00000000 },
+ { 0x000010f8, 0x00000000 },
+ { 0x00001138, 0x00000000 },
+ { 0x00001178, 0x00000000 },
+ { 0x000011b8, 0x00000000 },
+ { 0x000011f8, 0x00000000 },
+ { 0x00001238, 0x00000000 },
+ { 0x00001278, 0x00000000 },
+ { 0x000012b8, 0x00000000 },
+ { 0x000012f8, 0x00000000 },
+ { 0x00001338, 0x00000000 },
+ { 0x00001378, 0x00000000 },
+ { 0x000013b8, 0x00000000 },
+ { 0x000013f8, 0x00000000 },
+ { 0x00001438, 0x00000000 },
+ { 0x00001478, 0x00000000 },
+ { 0x000014b8, 0x00000000 },
+ { 0x000014f8, 0x00000000 },
+ { 0x00001538, 0x00000000 },
+ { 0x00001578, 0x00000000 },
+ { 0x000015b8, 0x00000000 },
+ { 0x000015f8, 0x00000000 },
+ { 0x00001638, 0x00000000 },
+ { 0x00001678, 0x00000000 },
+ { 0x000016b8, 0x00000000 },
+ { 0x000016f8, 0x00000000 },
+ { 0x00001738, 0x00000000 },
+ { 0x00001778, 0x00000000 },
+ { 0x000017b8, 0x00000000 },
+ { 0x000017f8, 0x00000000 },
+ { 0x0000103c, 0x00000000 },
+ { 0x0000107c, 0x00000000 },
+ { 0x000010bc, 0x00000000 },
+ { 0x000010fc, 0x00000000 },
+ { 0x0000113c, 0x00000000 },
+ { 0x0000117c, 0x00000000 },
+ { 0x000011bc, 0x00000000 },
+ { 0x000011fc, 0x00000000 },
+ { 0x0000123c, 0x00000000 },
+ { 0x0000127c, 0x00000000 },
+ { 0x000012bc, 0x00000000 },
+ { 0x000012fc, 0x00000000 },
+ { 0x0000133c, 0x00000000 },
+ { 0x0000137c, 0x00000000 },
+ { 0x000013bc, 0x00000000 },
+ { 0x000013fc, 0x00000000 },
+ { 0x0000143c, 0x00000000 },
+ { 0x0000147c, 0x00000000 },
+ { 0x00004030, 0x00000002 },
+ { 0x0000403c, 0x00000002 },
+ { 0x00007010, 0x00000020 },
+ { 0x00007038, 0x000004c2 },
+ { 0x00008004, 0x00000000 },
+ { 0x00008008, 0x00000000 },
+ { 0x0000800c, 0x00000000 },
+ { 0x00008018, 0x00000700 },
+ { 0x00008020, 0x00000000 },
+ { 0x00008038, 0x00000000 },
+ { 0x0000803c, 0x00000000 },
+ { 0x00008048, 0x40000000 },
+ { 0x00008054, 0x00000000 },
+ { 0x00008058, 0x00000000 },
+ { 0x0000805c, 0x000fc78f },
+ { 0x00008060, 0x0000000f },
+ { 0x00008064, 0x00000000 },
+ { 0x000080c0, 0x2a82301a },
+ { 0x000080c4, 0x05dc01e0 },
+ { 0x000080c8, 0x1f402710 },
+ { 0x000080cc, 0x01f40000 },
+ { 0x000080d0, 0x00001e00 },
+ { 0x000080d4, 0x00000000 },
+ { 0x000080d8, 0x00400000 },
+ { 0x000080e0, 0xffffffff },
+ { 0x000080e4, 0x0000ffff },
+ { 0x000080e8, 0x003f3f3f },
+ { 0x000080ec, 0x00000000 },
+ { 0x000080f0, 0x00000000 },
+ { 0x000080f4, 0x00000000 },
+ { 0x000080f8, 0x00000000 },
+ { 0x000080fc, 0x00020000 },
+ { 0x00008100, 0x00020000 },
+ { 0x00008104, 0x00000001 },
+ { 0x00008108, 0x00000052 },
+ { 0x0000810c, 0x00000000 },
+ { 0x00008110, 0x00000168 },
+ { 0x00008118, 0x000100aa },
+ { 0x0000811c, 0x00003210 },
+ { 0x00008120, 0x08f04800 },
+ { 0x00008124, 0x00000000 },
+ { 0x00008128, 0x00000000 },
+ { 0x0000812c, 0x00000000 },
+ { 0x00008130, 0x00000000 },
+ { 0x00008134, 0x00000000 },
+ { 0x00008138, 0x00000000 },
+ { 0x0000813c, 0x00000000 },
+ { 0x00008144, 0xffffffff },
+ { 0x00008168, 0x00000000 },
+ { 0x0000816c, 0x00000000 },
+ { 0x00008170, 0x32143320 },
+ { 0x00008174, 0xfaa4fa50 },
+ { 0x00008178, 0x00000100 },
+ { 0x0000817c, 0x00000000 },
+ { 0x000081c4, 0x00000000 },
+ { 0x000081d0, 0x00003210 },
+ { 0x000081ec, 0x00000000 },
+ { 0x000081f0, 0x00000000 },
+ { 0x000081f4, 0x00000000 },
+ { 0x000081f8, 0x00000000 },
+ { 0x000081fc, 0x00000000 },
+ { 0x00008200, 0x00000000 },
+ { 0x00008204, 0x00000000 },
+ { 0x00008208, 0x00000000 },
+ { 0x0000820c, 0x00000000 },
+ { 0x00008210, 0x00000000 },
+ { 0x00008214, 0x00000000 },
+ { 0x00008218, 0x00000000 },
+ { 0x0000821c, 0x00000000 },
+ { 0x00008220, 0x00000000 },
+ { 0x00008224, 0x00000000 },
+ { 0x00008228, 0x00000000 },
+ { 0x0000822c, 0x00000000 },
+ { 0x00008230, 0x00000000 },
+ { 0x00008234, 0x00000000 },
+ { 0x00008238, 0x00000000 },
+ { 0x0000823c, 0x00000000 },
+ { 0x00008240, 0x00100000 },
+ { 0x00008244, 0x0010f400 },
+ { 0x00008248, 0x00000100 },
+ { 0x0000824c, 0x0001e800 },
+ { 0x00008250, 0x00000000 },
+ { 0x00008254, 0x00000000 },
+ { 0x00008258, 0x00000000 },
+ { 0x0000825c, 0x400000ff },
+ { 0x00008260, 0x00080922 },
+ { 0x00008270, 0x00000000 },
+ { 0x00008274, 0x40000000 },
+ { 0x00008278, 0x003e4180 },
+ { 0x0000827c, 0x00000000 },
+ { 0x00008284, 0x0000002c },
+ { 0x00008288, 0x0000002c },
+ { 0x0000828c, 0x00000000 },
+ { 0x00008294, 0x00000000 },
+ { 0x00008298, 0x00000000 },
+ { 0x00008300, 0x00000000 },
+ { 0x00008304, 0x00000000 },
+ { 0x00008308, 0x00000000 },
+ { 0x0000830c, 0x00000000 },
+ { 0x00008310, 0x00000000 },
+ { 0x00008314, 0x00000000 },
+ { 0x00008318, 0x00000000 },
+ { 0x00008328, 0x00000000 },
+ { 0x0000832c, 0x00000007 },
+ { 0x00008330, 0x00000302 },
+ { 0x00008334, 0x00000e00 },
+ { 0x00008338, 0x00ff0000 },
+ { 0x0000833c, 0x00000000 },
+ { 0x00008340, 0x000107ff },
+ { 0x00009808, 0x00000000 },
+ { 0x0000980c, 0xad848e19 },
+ { 0x00009810, 0x7d14e000 },
+ { 0x00009814, 0x9c0a9f6b },
+ { 0x0000981c, 0x00000000 },
+ { 0x0000982c, 0x0000a000 },
+ { 0x00009830, 0x00000000 },
+ { 0x0000983c, 0x00200400 },
+ { 0x00009840, 0x206a01ae },
+ { 0x0000984c, 0x1284233c },
+ { 0x00009854, 0x00000859 },
+ { 0x00009900, 0x00000000 },
+ { 0x00009904, 0x00000000 },
+ { 0x00009908, 0x00000000 },
+ { 0x0000990c, 0x00000000 },
+ { 0x0000991c, 0x10000fff },
+ { 0x00009920, 0x05100000 },
+ { 0x0000a920, 0x05100000 },
+ { 0x0000b920, 0x05100000 },
+ { 0x00009928, 0x00000001 },
+ { 0x0000992c, 0x00000004 },
+ { 0x00009934, 0x1e1f2022 },
+ { 0x00009938, 0x0a0b0c0d },
+ { 0x0000993c, 0x00000000 },
+ { 0x00009948, 0x9280b212 },
+ { 0x0000994c, 0x00020028 },
+ { 0x00009954, 0x5f3ca3de },
+ { 0x00009958, 0x2108ecff },
+ { 0x00009940, 0x00750604 },
+ { 0x0000c95c, 0x004b6a8e },
+ { 0x00009970, 0x190fb515 },
+ { 0x00009974, 0x00000000 },
+ { 0x00009978, 0x00000001 },
+ { 0x0000997c, 0x00000000 },
+ { 0x00009980, 0x00000000 },
+ { 0x00009984, 0x00000000 },
+ { 0x00009988, 0x00000000 },
+ { 0x0000998c, 0x00000000 },
+ { 0x00009990, 0x00000000 },
+ { 0x00009994, 0x00000000 },
+ { 0x00009998, 0x00000000 },
+ { 0x0000999c, 0x00000000 },
+ { 0x000099a0, 0x00000000 },
+ { 0x000099a4, 0x00000001 },
+ { 0x000099a8, 0x201fff00 },
+ { 0x000099ac, 0x006f0000 },
+ { 0x000099b0, 0x03051000 },
+ { 0x000099dc, 0x00000000 },
+ { 0x000099e0, 0x00000200 },
+ { 0x000099e4, 0xaaaaaaaa },
+ { 0x000099e8, 0x3c466478 },
+ { 0x000099ec, 0x0cc80caa },
+ { 0x000099fc, 0x00001042 },
+ { 0x00009b00, 0x00000000 },
+ { 0x00009b04, 0x00000001 },
+ { 0x00009b08, 0x00000002 },
+ { 0x00009b0c, 0x00000003 },
+ { 0x00009b10, 0x00000004 },
+ { 0x00009b14, 0x00000005 },
+ { 0x00009b18, 0x00000008 },
+ { 0x00009b1c, 0x00000009 },
+ { 0x00009b20, 0x0000000a },
+ { 0x00009b24, 0x0000000b },
+ { 0x00009b28, 0x0000000c },
+ { 0x00009b2c, 0x0000000d },
+ { 0x00009b30, 0x00000010 },
+ { 0x00009b34, 0x00000011 },
+ { 0x00009b38, 0x00000012 },
+ { 0x00009b3c, 0x00000013 },
+ { 0x00009b40, 0x00000014 },
+ { 0x00009b44, 0x00000015 },
+ { 0x00009b48, 0x00000018 },
+ { 0x00009b4c, 0x00000019 },
+ { 0x00009b50, 0x0000001a },
+ { 0x00009b54, 0x0000001b },
+ { 0x00009b58, 0x0000001c },
+ { 0x00009b5c, 0x0000001d },
+ { 0x00009b60, 0x00000020 },
+ { 0x00009b64, 0x00000021 },
+ { 0x00009b68, 0x00000022 },
+ { 0x00009b6c, 0x00000023 },
+ { 0x00009b70, 0x00000024 },
+ { 0x00009b74, 0x00000025 },
+ { 0x00009b78, 0x00000028 },
+ { 0x00009b7c, 0x00000029 },
+ { 0x00009b80, 0x0000002a },
+ { 0x00009b84, 0x0000002b },
+ { 0x00009b88, 0x0000002c },
+ { 0x00009b8c, 0x0000002d },
+ { 0x00009b90, 0x00000030 },
+ { 0x00009b94, 0x00000031 },
+ { 0x00009b98, 0x00000032 },
+ { 0x00009b9c, 0x00000033 },
+ { 0x00009ba0, 0x00000034 },
+ { 0x00009ba4, 0x00000035 },
+ { 0x00009ba8, 0x00000035 },
+ { 0x00009bac, 0x00000035 },
+ { 0x00009bb0, 0x00000035 },
+ { 0x00009bb4, 0x00000035 },
+ { 0x00009bb8, 0x00000035 },
+ { 0x00009bbc, 0x00000035 },
+ { 0x00009bc0, 0x00000035 },
+ { 0x00009bc4, 0x00000035 },
+ { 0x00009bc8, 0x00000035 },
+ { 0x00009bcc, 0x00000035 },
+ { 0x00009bd0, 0x00000035 },
+ { 0x00009bd4, 0x00000035 },
+ { 0x00009bd8, 0x00000035 },
+ { 0x00009bdc, 0x00000035 },
+ { 0x00009be0, 0x00000035 },
+ { 0x00009be4, 0x00000035 },
+ { 0x00009be8, 0x00000035 },
+ { 0x00009bec, 0x00000035 },
+ { 0x00009bf0, 0x00000035 },
+ { 0x00009bf4, 0x00000035 },
+ { 0x00009bf8, 0x00000010 },
+ { 0x00009bfc, 0x0000001a },
+ { 0x0000a210, 0x40806333 },
+ { 0x0000a214, 0x00106c10 },
+ { 0x0000a218, 0x009c4060 },
+ { 0x0000a220, 0x018830c6 },
+ { 0x0000a224, 0x00000400 },
+ { 0x0000a228, 0x001a0bb5 },
+ { 0x0000a22c, 0x00000000 },
+ { 0x0000a234, 0x20202020 },
+ { 0x0000a238, 0x20202020 },
+ { 0x0000a23c, 0x13c889af },
+ { 0x0000a240, 0x38490a20 },
+ { 0x0000a244, 0x00007bb6 },
+ { 0x0000a248, 0x0fff3ffc },
+ { 0x0000a24c, 0x00000001 },
+ { 0x0000a250, 0x0000e000 },
+ { 0x0000a254, 0x00000000 },
+ { 0x0000a258, 0x0cc75380 },
+ { 0x0000a25c, 0x0f0f0f01 },
+ { 0x0000a260, 0xdfa91f01 },
+ { 0x0000a268, 0x00000001 },
+ { 0x0000a26c, 0x0ebae9c6 },
+ { 0x0000b26c, 0x0ebae9c6 },
+ { 0x0000c26c, 0x0ebae9c6 },
+ { 0x0000d270, 0x00820820 },
+ { 0x0000a278, 0x1ce739ce },
+ { 0x0000a27c, 0x050701ce },
+ { 0x0000a338, 0x00000000 },
+ { 0x0000a33c, 0x00000000 },
+ { 0x0000a340, 0x00000000 },
+ { 0x0000a344, 0x00000000 },
+ { 0x0000a348, 0x3fffffff },
+ { 0x0000a34c, 0x3fffffff },
+ { 0x0000a350, 0x3fffffff },
+ { 0x0000a354, 0x0003ffff },
+ { 0x0000a358, 0x79bfaa03 },
+ { 0x0000d35c, 0x07ffffef },
+ { 0x0000d360, 0x0fffffe7 },
+ { 0x0000d364, 0x17ffffe5 },
+ { 0x0000d368, 0x1fffffe4 },
+ { 0x0000d36c, 0x37ffffe3 },
+ { 0x0000d370, 0x3fffffe3 },
+ { 0x0000d374, 0x57ffffe3 },
+ { 0x0000d378, 0x5fffffe2 },
+ { 0x0000d37c, 0x7fffffe2 },
+ { 0x0000d380, 0x7f3c7bba },
+ { 0x0000d384, 0xf3307ff0 },
+ { 0x0000a388, 0x0c000000 },
+ { 0x0000a38c, 0x20202020 },
+ { 0x0000a390, 0x20202020 },
+ { 0x0000a394, 0x1ce739ce },
+ { 0x0000a398, 0x000001ce },
+ { 0x0000a39c, 0x00000001 },
+ { 0x0000a3a0, 0x00000000 },
+ { 0x0000a3a4, 0x00000000 },
+ { 0x0000a3a8, 0x00000000 },
+ { 0x0000a3ac, 0x00000000 },
+ { 0x0000a3b0, 0x00000000 },
+ { 0x0000a3b4, 0x00000000 },
+ { 0x0000a3b8, 0x00000000 },
+ { 0x0000a3bc, 0x00000000 },
+ { 0x0000a3c0, 0x00000000 },
+ { 0x0000a3c4, 0x00000000 },
+ { 0x0000a3c8, 0x00000246 },
+ { 0x0000a3cc, 0x20202020 },
+ { 0x0000a3d0, 0x20202020 },
+ { 0x0000a3d4, 0x20202020 },
+ { 0x0000a3dc, 0x1ce739ce },
+ { 0x0000a3e0, 0x000001ce },
+};
+
+static const u32 ar5416Bank0_9160[][2] = {
+ { 0x000098b0, 0x1e5795e5 },
+ { 0x000098e0, 0x02008020 },
+};
+
+static const u32 ar5416BB_RfGain_9160[][3] = {
+ { 0x00009a00, 0x00000000, 0x00000000 },
+ { 0x00009a04, 0x00000040, 0x00000040 },
+ { 0x00009a08, 0x00000080, 0x00000080 },
+ { 0x00009a0c, 0x000001a1, 0x00000141 },
+ { 0x00009a10, 0x000001e1, 0x00000181 },
+ { 0x00009a14, 0x00000021, 0x000001c1 },
+ { 0x00009a18, 0x00000061, 0x00000001 },
+ { 0x00009a1c, 0x00000168, 0x00000041 },
+ { 0x00009a20, 0x000001a8, 0x000001a8 },
+ { 0x00009a24, 0x000001e8, 0x000001e8 },
+ { 0x00009a28, 0x00000028, 0x00000028 },
+ { 0x00009a2c, 0x00000068, 0x00000068 },
+ { 0x00009a30, 0x00000189, 0x000000a8 },
+ { 0x00009a34, 0x000001c9, 0x00000169 },
+ { 0x00009a38, 0x00000009, 0x000001a9 },
+ { 0x00009a3c, 0x00000049, 0x000001e9 },
+ { 0x00009a40, 0x00000089, 0x00000029 },
+ { 0x00009a44, 0x00000170, 0x00000069 },
+ { 0x00009a48, 0x000001b0, 0x00000190 },
+ { 0x00009a4c, 0x000001f0, 0x000001d0 },
+ { 0x00009a50, 0x00000030, 0x00000010 },
+ { 0x00009a54, 0x00000070, 0x00000050 },
+ { 0x00009a58, 0x00000191, 0x00000090 },
+ { 0x00009a5c, 0x000001d1, 0x00000151 },
+ { 0x00009a60, 0x00000011, 0x00000191 },
+ { 0x00009a64, 0x00000051, 0x000001d1 },
+ { 0x00009a68, 0x00000091, 0x00000011 },
+ { 0x00009a6c, 0x000001b8, 0x00000051 },
+ { 0x00009a70, 0x000001f8, 0x00000198 },
+ { 0x00009a74, 0x00000038, 0x000001d8 },
+ { 0x00009a78, 0x00000078, 0x00000018 },
+ { 0x00009a7c, 0x00000199, 0x00000058 },
+ { 0x00009a80, 0x000001d9, 0x00000098 },
+ { 0x00009a84, 0x00000019, 0x00000159 },
+ { 0x00009a88, 0x00000059, 0x00000199 },
+ { 0x00009a8c, 0x00000099, 0x000001d9 },
+ { 0x00009a90, 0x000000d9, 0x00000019 },
+ { 0x00009a94, 0x000000f9, 0x00000059 },
+ { 0x00009a98, 0x000000f9, 0x00000099 },
+ { 0x00009a9c, 0x000000f9, 0x000000d9 },
+ { 0x00009aa0, 0x000000f9, 0x000000f9 },
+ { 0x00009aa4, 0x000000f9, 0x000000f9 },
+ { 0x00009aa8, 0x000000f9, 0x000000f9 },
+ { 0x00009aac, 0x000000f9, 0x000000f9 },
+ { 0x00009ab0, 0x000000f9, 0x000000f9 },
+ { 0x00009ab4, 0x000000f9, 0x000000f9 },
+ { 0x00009ab8, 0x000000f9, 0x000000f9 },
+ { 0x00009abc, 0x000000f9, 0x000000f9 },
+ { 0x00009ac0, 0x000000f9, 0x000000f9 },
+ { 0x00009ac4, 0x000000f9, 0x000000f9 },
+ { 0x00009ac8, 0x000000f9, 0x000000f9 },
+ { 0x00009acc, 0x000000f9, 0x000000f9 },
+ { 0x00009ad0, 0x000000f9, 0x000000f9 },
+ { 0x00009ad4, 0x000000f9, 0x000000f9 },
+ { 0x00009ad8, 0x000000f9, 0x000000f9 },
+ { 0x00009adc, 0x000000f9, 0x000000f9 },
+ { 0x00009ae0, 0x000000f9, 0x000000f9 },
+ { 0x00009ae4, 0x000000f9, 0x000000f9 },
+ { 0x00009ae8, 0x000000f9, 0x000000f9 },
+ { 0x00009aec, 0x000000f9, 0x000000f9 },
+ { 0x00009af0, 0x000000f9, 0x000000f9 },
+ { 0x00009af4, 0x000000f9, 0x000000f9 },
+ { 0x00009af8, 0x000000f9, 0x000000f9 },
+ { 0x00009afc, 0x000000f9, 0x000000f9 },
+};
+
+static const u32 ar5416Bank1_9160[][2] = {
+ { 0x000098b0, 0x02108421 },
+ { 0x000098ec, 0x00000008 },
+};
+
+static const u32 ar5416Bank2_9160[][2] = {
+ { 0x000098b0, 0x0e73ff17 },
+ { 0x000098e0, 0x00000420 },
+};
+
+static const u32 ar5416Bank3_9160[][3] = {
+ { 0x000098f0, 0x01400018, 0x01c00018 },
+};
+
+static const u32 ar5416Bank6_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x004210a2, 0x004210a2 },
+ { 0x0000989c, 0x0014008f, 0x0014008f },
+ { 0x0000989c, 0x00c40003, 0x00c40003 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000f1, 0x000000f1 },
+ { 0x0000989c, 0x00002081, 0x00002081 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank6TPC_9160[][3] = {
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00000000, 0x00000000 },
+ { 0x0000989c, 0x00e00000, 0x00e00000 },
+ { 0x0000989c, 0x005e0000, 0x005e0000 },
+ { 0x0000989c, 0x00120000, 0x00120000 },
+ { 0x0000989c, 0x00620000, 0x00620000 },
+ { 0x0000989c, 0x00020000, 0x00020000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x40ff0000, 0x40ff0000 },
+ { 0x0000989c, 0x005f0000, 0x005f0000 },
+ { 0x0000989c, 0x00870000, 0x00870000 },
+ { 0x0000989c, 0x00f90000, 0x00f90000 },
+ { 0x0000989c, 0x007b0000, 0x007b0000 },
+ { 0x0000989c, 0x00ff0000, 0x00ff0000 },
+ { 0x0000989c, 0x00f50000, 0x00f50000 },
+ { 0x0000989c, 0x00dc0000, 0x00dc0000 },
+ { 0x0000989c, 0x00110000, 0x00110000 },
+ { 0x0000989c, 0x006100a8, 0x006100a8 },
+ { 0x0000989c, 0x00423022, 0x00423022 },
+ { 0x0000989c, 0x2014008f, 0x2014008f },
+ { 0x0000989c, 0x00c40002, 0x00c40002 },
+ { 0x0000989c, 0x003000f2, 0x003000f2 },
+ { 0x0000989c, 0x00440016, 0x00440016 },
+ { 0x0000989c, 0x00410040, 0x00410040 },
+ { 0x0000989c, 0x0001805e, 0x0001805e },
+ { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
+ { 0x0000989c, 0x000000e1, 0x000000e1 },
+ { 0x0000989c, 0x00007080, 0x00007080 },
+ { 0x0000989c, 0x000000d4, 0x000000d4 },
+ { 0x000098d0, 0x0000000f, 0x0010000f },
+};
+
+static const u32 ar5416Bank7_9160[][2] = {
+ { 0x0000989c, 0x00000500 },
+ { 0x0000989c, 0x00000800 },
+ { 0x000098cc, 0x0000000e },
+};
+
+static const u32 ar5416Addac_9160[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000003 },
+ {0x0000989c, 0x00000008 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
+static const u32 ar5416Addac_91601_1[][2] = {
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000018 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x000000c0 },
+ {0x0000989c, 0x00000019 },
+ {0x0000989c, 0x00000004 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x0000989c, 0x00000000 },
+ {0x000098cc, 0x00000000 },
+};
+
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
new file mode 100644
index 000000000000..5fdbb53b47e0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9002_phy.h"
+
+#define AR9285_CLCAL_REDO_THRESH 1
+
+static void ar9002_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+ break;
+ case ADC_GAIN_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC Gain Calibration\n");
+ break;
+ case ADC_DC_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting ADC DC Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Init ADC DC Calibration\n");
+ break;
+ case TEMP_COMP_CAL:
+ break; /* Not supported */
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_DO_CAL);
+}
+
+static bool ar9002_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ bool iscaldone = false;
+
+ if (currCal->calState == CAL_RUNNING) {
+ if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
+ AR_PHY_TIMING_CTRL4_DO_CAL)) {
+
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ int i, numChains = 0;
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ currCal->calData->calPostProc(ah, numChains);
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ ar9002_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+/* Assumes you are talking about the currently configured channel */
+static bool ar9002_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
+ conf_is_ht20(conf)))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcIOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcIEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcQOddPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcQEvenPhase[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcIOddPhase[i],
+ ah->totalAdcIEvenPhase[i],
+ ah->totalAdcQOddPhase[i],
+ ah->totalAdcQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalAdcDcOffsetIOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalAdcDcOffsetIEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalAdcDcOffsetQOddPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ah->totalAdcDcOffsetQEvenPhase[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
+ "oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcDcOffsetIOddPhase[i],
+ ah->totalAdcDcOffsetIEvenPhase[i],
+ ah->totalAdcDcOffsetQOddPhase[i],
+ ah->totalAdcDcOffsetQEvenPhase[i]);
+ }
+}
+
+static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
+ (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ iCoff = iCoff & 0x3f;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+ if (iqCorrNeg == 0x0)
+ iCoff = 0x40 - iCoff;
+
+ if (qCoff > 15)
+ qCoff = 15;
+ else if (qCoff <= -16)
+ qCoff = 16;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
+ AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
+}
+
+static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
+ u32 qGainMismatch, iGainMismatch, val, i;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC Gain Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = 0x%08x\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = 0x%08x\n", i,
+ qEvenMeasOffset);
+
+ if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
+ iGainMismatch =
+ ((iEvenMeasOffset * 32) /
+ iOddMeasOffset) & 0x3f;
+ qGainMismatch =
+ ((qOddMeasOffset * 32) /
+ qEvenMeasOffset) & 0x3f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n", i,
+ iGainMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n", i,
+ qGainMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xfffff000;
+ val |= (qGainMismatch) | (iGainMismatch << 6);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC Gain Cal done for Chain %d\n", i);
+ }
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
+}
+
+static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 iOddMeasOffset, iEvenMeasOffset, val, i;
+ int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
+ const struct ath9k_percal_data *calData =
+ ah->cal_list_curr->calData;
+ u32 numSamples =
+ (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
+
+ for (i = 0; i < numChains; i++) {
+ iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
+ iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
+ qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
+ qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = %d\n", i,
+ iOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = %d\n", i,
+ iEvenMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = %d\n", i,
+ qOddMeasOffset);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = %d\n", i,
+ qEvenMeasOffset);
+
+ iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+ qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
+ numSamples) & 0x1ff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
+ iDcMismatch);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
+ qDcMismatch);
+
+ val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
+ val &= 0xc0000fff;
+ val |= (qDcMismatch << 12) | (iDcMismatch << 21);
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "ADC DC Offset Cal done for Chain %d\n", i);
+ }
+
+ REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
+ REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
+ AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
+}
+
+static void ar9287_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata;
+ int32_t delta, currPDADC, slope;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0) {
+ /*
+ * Zero value indicates that no frames have been transmitted
+ * yet, can't do temperature compensation until frames are
+ * transmitted.
+ */
+ return;
+ } else {
+ slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
+
+ if (slope == 0) { /* to avoid divide by zero case */
+ delta = 0;
+ } else {
+ delta = ((currPDADC - ah->initPDADC)*4) / slope;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ }
+}
+
+static void ar9280_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ u32 rddata, i;
+ int delta, currPDADC, regval;
+
+ rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ if (ah->initPDADC == 0 || currPDADC == 0)
+ return;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
+ delta = (currPDADC - ah->initPDADC + 4) / 8;
+ else
+ delta = (currPDADC - ah->initPDADC + 5) / 10;
+
+ if (delta != ah->PDADCdelta) {
+ ah->PDADCdelta = delta;
+ for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
+ regval = ah->originalGain[i] - delta;
+ if (regval < 0)
+ regval = 0;
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_TX_GAIN_TBL1 + i * 4,
+ AR_PHY_TX_GAIN, regval);
+ }
+ }
+}
+
+static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ u32 regVal;
+ unsigned int i;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 } ,
+ { 0x7828, 0 } ,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ /* 786c,b23,1, pwddac=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ /* 7854, b5,1, pdrxtxbb=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ /* 7854, b7,1, pdv2i=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ /* 7854, b8,1, pddacinterface=1 */
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ /* 7824,b12,0, offcal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ /* 7838, b1,0, pwddb=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ /* 7820,b11,0, enpacal=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ /* 7820,b25,1, pdpadrv1=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ /* 7820,b24,0, pdpadrv2=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ /* 7820,b23,0, pdpaout=0 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ /* 783c,b14-16,7, padrvgn2tab_0=7 */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ /*
+ * 7838,b29-31,0, padrvgn1tab_0=0
+ * does not matter since we turn it off
+ */
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
+
+ /* Set:
+ * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
+ * txon=1,paon=1,oscon=1,synthon_force=1
+ */
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
+
+ /* find off_6_1; */
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ /* regVal = REG_READ(ah, 0x7834); */
+ regVal &= (~(0x1 << (20 + i)));
+ regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
+ << (20 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ regVal = (regVal >> 20) & 0x7f;
+
+ /* Update PA cal info */
+ if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = regVal;
+ }
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 regVal;
+ int i, offset, offs_6_1, offs_0;
+ u32 ccomp_org, reg_field;
+ u32 regList[][2] = {
+ { 0x786c, 0 },
+ { 0x7854, 0 },
+ { 0x7820, 0 },
+ { 0x7824, 0 },
+ { 0x7868, 0 },
+ { 0x783c, 0 },
+ { 0x7838, 0 },
+ };
+
+ ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+
+ /* PA CAL is not needed for high power solution */
+ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
+ AR5416_EEP_TXGAIN_HIGH_POWER)
+ return;
+
+ if (AR_SREV_9285_11(ah)) {
+ REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
+ udelay(10);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ regList[i][1] = REG_READ(ah, regList[i][0]);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1));
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal |= (0x1 << 27);
+ REG_WRITE(ah, 0x9808, regVal);
+
+ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
+ ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
+
+ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
+ udelay(30);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
+
+ for (i = 6; i > 0; i--) {
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= (1 << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ udelay(1);
+ regVal = REG_READ(ah, 0x7834);
+ regVal &= (~(0x1 << (19 + i)));
+ reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
+ regVal |= (reg_field << (19 + i));
+ REG_WRITE(ah, 0x7834, regVal);
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
+ udelay(1);
+ reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
+ offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
+ offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
+
+ offset = (offs_6_1<<1) | offs_0;
+ offset = offset - 0;
+ offs_6_1 = offset>>1;
+ offs_0 = offset & 1;
+
+ if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
+ if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
+ ah->pacal_info.max_skipcount =
+ 2 * ah->pacal_info.max_skipcount;
+ ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
+ } else {
+ ah->pacal_info.max_skipcount = 1;
+ ah->pacal_info.skipcount = 0;
+ ah->pacal_info.prev_offset = offset;
+ }
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
+
+ regVal = REG_READ(ah, 0x7834);
+ regVal |= 0x1;
+ REG_WRITE(ah, 0x7834, regVal);
+ regVal = REG_READ(ah, 0x9808);
+ regVal &= (~(0x1 << 27));
+ REG_WRITE(ah, 0x9808, regVal);
+
+ for (i = 0; i < ARRAY_SIZE(regList); i++)
+ REG_WRITE(ah, regList[i][0], regList[i][1]);
+
+ REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
+
+ if (AR_SREV_9285_11(ah))
+ REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
+
+}
+
+static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
+{
+ if (AR_SREV_9271(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9271_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ } else if (AR_SREV_9285_11_OR_LATER(ah)) {
+ if (is_reset || !ah->pacal_info.skipcount)
+ ar9285_hw_pa_cal(ah, is_reset);
+ else
+ ah->pacal_info.skipcount--;
+ }
+}
+
+static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
+{
+ if (OLC_FOR_AR9287_10_LATER)
+ ar9287_hw_olc_temp_compensation(ah);
+ else if (OLC_FOR_AR9280_20_LATER)
+ ar9280_hw_olc_temp_compensation(ah);
+}
+
+static bool ar9002_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9002_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /* Do periodic PAOffset Cal */
+ ar9002_hw_pa_cal(ah, false);
+ ar9002_hw_olc_temp_compensation(ah);
+
+ /*
+ * Get the value from the previous NF cal and update
+ * history buffer.
+ */
+ ath9k_hw_getnf(ah, chan);
+
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+/* Carrier leakage Calibration fix */
+static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ if (IS_CHAN_HT20(chan)) {
+ REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset "
+ "calibration failed to complete in "
+ "1ms; noisy ??\n");
+ return false;
+ }
+ REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ }
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
+ "failed to complete in 1ms; noisy ??\n");
+ return false;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+
+ return true;
+}
+
+static bool ar9285_hw_clc(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ int i;
+ u_int32_t txgain_max;
+ u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
+ u_int32_t reg_clc_I0, reg_clc_Q0;
+ u_int32_t i0_num = 0;
+ u_int32_t q0_num = 0;
+ u_int32_t total_num = 0;
+ u_int32_t reg_rf2g5_org;
+ bool retv = true;
+
+ if (!(ar9285_hw_cl_cal(ah, chan)))
+ return false;
+
+ txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
+ AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
+
+ for (i = 0; i < (txgain_max+1); i++) {
+ clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
+ AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
+ if (!(gain_mask & (1 << clc_gain))) {
+ gain_mask |= (1 << clc_gain);
+ clc_num++;
+ }
+ }
+
+ for (i = 0; i < clc_num; i++) {
+ reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
+ reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
+ & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
+ if (reg_clc_I0 == 0)
+ i0_num++;
+
+ if (reg_clc_Q0 == 0)
+ q0_num++;
+ }
+ total_num = i0_num + q0_num;
+ if (total_num > AR9285_CLCAL_REDO_THRESH) {
+ reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
+ if (AR_SREV_9285E_20(ah)) {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_XE_SET);
+ } else {
+ REG_WRITE(ah, AR9285_RF2G5,
+ (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
+ AR9285_RF2G5_IC50TX_SET);
+ }
+ retv = ar9285_hw_cl_cal(ah, chan);
+ REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
+ }
+ return retv;
+}
+
+static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+ if (!ar9285_hw_clc(ah, chan))
+ return false;
+ } else {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (!AR_SREV_9287_10_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_PHY_ADC_CTL,
+ AR_PHY_ADC_CTL_OFF_PWDADC);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_FLTR_CAL);
+ }
+ }
+
+ /* Do PA Calibration */
+ ar9002_hw_pa_cal(ah, true);
+
+ /* Do NF Calibration after DC offset and other calibrations */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
+
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ /* Enable IQ, ADC Gain and ADC DC offset CALs */
+ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
+ if (ar9002_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
+ INIT_CAL(&ah->adcgain_caldata);
+ INSERT_CAL(ah, &ah->adcgain_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC Gain Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, ADC_DC_CAL)) {
+ INIT_CAL(&ah->adcdc_caldata);
+ INSERT_CAL(ah, &ah->adcdc_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling ADC DC Calibration.\n");
+ }
+ if (ar9002_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+ }
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+static const struct ath9k_percal_data iq_cal_multi_sample = {
+ IQ_MISMATCH_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_iqcal_collect,
+ ar9002_hw_iqcalibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_multi_sample = {
+ ADC_GAIN_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_gain_cal_single_sample = {
+ ADC_GAIN_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_gaincal_collect,
+ ar9002_hw_adc_gaincal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_multi_sample = {
+ ADC_DC_CAL,
+ MAX_CAL_SAMPLES,
+ PER_MIN_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_dc_cal_single_sample = {
+ ADC_DC_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+static const struct ath9k_percal_data adc_init_dc_cal = {
+ ADC_DC_INIT_CAL,
+ MIN_CAL_SAMPLES,
+ INIT_LOG_COUNT,
+ ar9002_hw_adc_dccal_collect,
+ ar9002_hw_adc_dccal_calibrate
+};
+
+static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
+{
+ if (AR_SREV_9100(ah)) {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+ return;
+ }
+
+ if (AR_SREV_9160_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_single_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_single_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ } else {
+ ah->iq_caldata.calData = &iq_cal_multi_sample;
+ ah->adcgain_caldata.calData =
+ &adc_gain_cal_multi_sample;
+ ah->adcdc_caldata.calData =
+ &adc_dc_cal_multi_sample;
+ ah->adcdc_calinitdata.calData =
+ &adc_init_dc_cal;
+ }
+ ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+ }
+}
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
+ priv_ops->init_cal = ar9002_hw_init_cal;
+ priv_ops->setup_calibration = ar9002_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9002_hw_iscal_supported;
+
+ ops->calibrate = ar9002_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
new file mode 100644
index 000000000000..a8a8cdc04afa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -0,0 +1,598 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar5008_initvals.h"
+#include "ar9001_initvals.h"
+#include "ar9002_initvals.h"
+
+/* General hardware code for the A5008/AR9001/AR9002 hadware families */
+
+static bool ar9002_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9271(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
+ ARRAY_SIZE(ar9271Modes_9271), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
+ ARRAY_SIZE(ar9271Common_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
+ ar9271Common_normal_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
+ ar9271Common_japan_2484_cck_fir_coeff_9271,
+ ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
+ INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
+ ar9271Modes_9271_1_0_only,
+ ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
+ INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
+ ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
+ INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
+ ar9271Modes_high_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
+ INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
+ ar9271Modes_normal_power_tx_gain_9271,
+ ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
+ return;
+ }
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
+ ARRAY_SIZE(ar9287Common_9287_1_1), 2);
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
+ 2);
+ } else if (AR_SREV_9287_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
+ ARRAY_SIZE(ar9287Common_9287_1_0), 2);
+
+ if (ah->config.pcie_clock_req)
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
+ ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
+ 2);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+
+
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
+ ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
+ ARRAY_SIZE(ar9285Common_9285_1_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
+ 2);
+ }
+ } else if (AR_SREV_9285_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
+ ARRAY_SIZE(ar9285Modes_9285), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
+ ARRAY_SIZE(ar9285Common_9285), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_off_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9285PciePhy_clkreq_always_on_L1_9285,
+ ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
+ }
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
+ ARRAY_SIZE(ar9280Modes_9280_2), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
+ ARRAY_SIZE(ar9280Common_9280_2), 2);
+
+ if (ah->config.pcie_clock_req) {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_off_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9280PciePhy_clkreq_always_on_L1_9280,
+ ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
+ }
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9280Modes_fast_clock_9280_2,
+ ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
+ } else if (AR_SREV_9280_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
+ ARRAY_SIZE(ar9280Modes_9280), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
+ ARRAY_SIZE(ar9280Common_9280), 2);
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
+ ARRAY_SIZE(ar5416Modes_9160), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
+ ARRAY_SIZE(ar5416Common_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
+ ARRAY_SIZE(ar5416Bank0_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
+ ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
+ ARRAY_SIZE(ar5416Bank1_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
+ ARRAY_SIZE(ar5416Bank2_9160), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
+ ARRAY_SIZE(ar5416Bank3_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
+ ARRAY_SIZE(ar5416Bank6_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
+ ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
+ ARRAY_SIZE(ar5416Bank7_9160), 2);
+ if (AR_SREV_9160_11(ah)) {
+ INIT_INI_ARRAY(&ah->iniAddac,
+ ar5416Addac_91601_1,
+ ARRAY_SIZE(ar5416Addac_91601_1), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
+ ARRAY_SIZE(ar5416Addac_9160), 2);
+ }
+ } else if (AR_SREV_9100_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
+ ARRAY_SIZE(ar5416Modes_9100), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
+ ARRAY_SIZE(ar5416Common_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
+ ARRAY_SIZE(ar5416Bank0_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
+ ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
+ ARRAY_SIZE(ar5416Bank1_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
+ ARRAY_SIZE(ar5416Bank2_9100), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
+ ARRAY_SIZE(ar5416Bank3_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
+ ARRAY_SIZE(ar5416Bank6_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
+ ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
+ ARRAY_SIZE(ar5416Bank7_9100), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
+ ARRAY_SIZE(ar5416Addac_9100), 2);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
+ ARRAY_SIZE(ar5416Modes), 6);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
+ ARRAY_SIZE(ar5416Common), 2);
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
+ ARRAY_SIZE(ar5416Bank0), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
+ ARRAY_SIZE(ar5416BB_RfGain), 3);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
+ ARRAY_SIZE(ar5416Bank1), 2);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
+ ARRAY_SIZE(ar5416Bank2), 2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
+ ARRAY_SIZE(ar5416Bank3), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
+ ARRAY_SIZE(ar5416Bank6), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
+ ARRAY_SIZE(ar5416Bank6TPC), 3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
+ ARRAY_SIZE(ar5416Bank7), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
+ ARRAY_SIZE(ar5416Addac), 2);
+ }
+}
+
+/* Support for Japan ch.14 (2484) spread */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniCckfirNormal,
+ ar9287Common_normal_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1),
+ 2);
+ INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+ ar9287Common_japan_2484_cck_fir_coeff_92871_1,
+ ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1),
+ 2);
+ }
+}
+
+static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
+{
+ u32 rxgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_17) {
+ rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
+
+ if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_13db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
+ else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_backoff_23db_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9280Modes_original_rxgain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
+ }
+}
+
+static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
+{
+ u32 txgain_type;
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
+ AR5416_EEP_MINOR_VER_19) {
+ txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_high_power_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9280Modes_original_tx_gain_9280_2,
+ ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
+ }
+}
+
+static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_11_OR_LATER(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
+ else if (AR_SREV_9287_10(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9287Modes_rx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
+ else if (AR_SREV_9280_20(ah))
+ ar9280_20_hw_init_rxgain_ini(ah);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_1,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
+ } else if (AR_SREV_9287_10(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9287Modes_tx_gain_9287_1_0,
+ ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
+ } else if (AR_SREV_9280_20(ah)) {
+ ar9280_20_hw_init_txgain_ini(ah);
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
+ u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
+
+ /* txgain table */
+ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_high_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_high_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_high_power_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_high_power_tx_gain_9285_1_2), 6);
+ }
+ } else {
+ if (AR_SREV_9285E_20(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_XE2_0_normal_power,
+ ARRAY_SIZE(
+ ar9285Modes_XE2_0_normal_power), 6);
+ } else {
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9285Modes_original_tx_gain_9285_1_2,
+ ARRAY_SIZE(
+ ar9285Modes_original_tx_gain_9285_1_2), 6);
+ }
+ }
+ }
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ u8 i;
+ u32 val;
+
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * AR9280 2.0 or later chips use SerDes values from the
+ * initvals.h initialized depending on chipset during
+ * __ath9k_hw_init()
+ */
+ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
+ INI_RA(&ah->iniPcieSerdes, i, 1));
+ }
+ } else if (AR_SREV_9280(ah) &&
+ (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+
+ /* Shut off CLKREQ active in L1 */
+ if (ah->config.pcie_clock_req)
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
+ else
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ } else {
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+
+ /*
+ * Ignore ah->ah_config.pcie_clock_req setting for
+ * pre-AR9280 11n
+ */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+ }
+
+ udelay(1000);
+
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen) {
+ val = ah->config.pcie_waen;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) {
+ val = AR9285_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
+ /*
+ * On AR9280 chips bit 22 of 0x4004 needs to be
+ * set otherwise card may disappear.
+ */
+ val = AR9280_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else
+ val = AR_WA_DEFAULT;
+ }
+
+ REG_WRITE(ah, AR_WA, val);
+ }
+
+ if (power_off) {
+ /*
+ * Set PCIe workaround bits
+ * bit 14 in WA register (disable L1) should only
+ * be set when device enters D3 and be cleared
+ * when device comes back to D0.
+ */
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ } else {
+ if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) &&
+ (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+ (AR_SREV_9280(ah) &&
+ (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ }
+ }
+ }
+}
+
+static int ar9002_hw_get_radiorev(struct ath_hw *ah)
+{
+ u32 val;
+ int i;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
+ for (i = 0; i < 8; i++)
+ REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
+ val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
+
+ return ath9k_hw_reverse_bits(val, 8);
+}
+
+int ar9002_hw_rf_claim(struct ath_hw *ah)
+{
+ u32 val;
+
+ REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ val = ar9002_hw_get_radiorev(ah);
+ switch (val & AR_RADIO_SREV_MAJOR) {
+ case 0:
+ val = AR_RAD5133_SREV_MAJOR;
+ break;
+ case AR_RAD5133_SREV_MAJOR:
+ case AR_RAD5122_SREV_MAJOR:
+ case AR_RAD2133_SREV_MAJOR:
+ case AR_RAD2122_SREV_MAJOR:
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Radio Chip Rev 0x%02X not supported\n",
+ val & AR_RADIO_SREV_MAJOR);
+ return -EOPNOTSUPP;
+ }
+
+ ah->hw_version.analog5GhzRev = val;
+
+ return 0;
+}
+
+/*
+ * Enable ASYNC FIFO
+ *
+ * If Async FIFO is enabled, the following counters change as MAC now runs
+ * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
+ *
+ * The values below tested for ht40 2 chain.
+ * Overwrite the delay/timeouts initialized in process ini.
+ */
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
+ AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
+ AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
+ AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
+
+ REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
+ REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
+
+ REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
+ AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
+ REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
+ AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
+ }
+}
+
+/*
+ * We don't enable WEP aggregation on mac80211 but we keep this
+ * around for HAL unification purposes.
+ */
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
+{
+ if (AR_SREV_9287_12_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ }
+}
+
+/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+void ar9002_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9002_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9002_hw_configpcipowersave;
+
+ ar5008_hw_attach_phy_ops(ah);
+ if (AR_SREV_9280_10_OR_LATER(ah))
+ ar9002_hw_attach_phy_ops(ah);
+
+ ar9002_hw_attach_calib_ops(ah);
+ ar9002_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 8a3bf3ab998d..dae7f3304eb8 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,1982 +14,9 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-static const u32 ar5416Modes[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
- { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
- { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
- { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
- { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000000 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8000010 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00070000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a002e },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5d50e188 },
- { 0x00009958, 0x00081fff },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x001fff00 },
- { 0x000099ac, 0x00000000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x000000aa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x00000bb5 },
- { 0x0000a22c, 0x00000011 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000000 },
- { 0x0000a26c, 0x0e79e5c6 },
- { 0x0000b26c, 0x0e79e5c6 },
- { 0x0000c26c, 0x0e79e5c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x051701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa1f },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x08000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x201400df, 0x201400df },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007081, 0x00007081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x0000000c },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000030 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000060 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000058 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9100[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
- { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
- { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
- { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
- { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
-#ifdef TB243
- { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
- { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
-#else
- { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
-#endif
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9100[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00020010, 0x00000003 },
- { 0x00020038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00004000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0x00000000 },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00000000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x0000c968, 0x000003ce },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889ae },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000a000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79a8aa33 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9100[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9100[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9100[][2] = {
- { 0x000098b0, 0x02108421},
- { 0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2_9100[][2] = {
- { 0x000098b0, 0x0e73ff17},
- { 0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3_9100[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014000f, 0x0014000f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x000180d6, 0x000180d6 },
- { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
- { 0x0000989c, 0x000000b1, 0x000000b1 },
- { 0x0000989c, 0x00002000, 0x00002000 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-
-static const u32 ar5416Bank6TPC_9100[][3] = {
-
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9100[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
-
-static const u32 ar5416Addac_9100[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000010 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000015 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static const u32 ar5416Modes_9160[][6] = {
- { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
- { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
- { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
- { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
- { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
- { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
- { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
- { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
- { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
- { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
- { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
- { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
- { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
- { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
- { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
- { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
- { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
- { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
- { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
- { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
- { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
- { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
- { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
- { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
- { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
- { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
- { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
- { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
- { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
- { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
- { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
- { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
- { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
- { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
- { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
- { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
- { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
- { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
- { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
- { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
- { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
- { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
- { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
- { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
- { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
- { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
- { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
- { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
- { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
- { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
-};
-
-static const u32 ar5416Common_9160[][2] = {
- { 0x0000000c, 0x00000000 },
- { 0x00000030, 0x00020015 },
- { 0x00000034, 0x00000005 },
- { 0x00000040, 0x00000000 },
- { 0x00000044, 0x00000008 },
- { 0x00000048, 0x00000008 },
- { 0x0000004c, 0x00000010 },
- { 0x00000050, 0x00000000 },
- { 0x00000054, 0x0000001f },
- { 0x00000800, 0x00000000 },
- { 0x00000804, 0x00000000 },
- { 0x00000808, 0x00000000 },
- { 0x0000080c, 0x00000000 },
- { 0x00000810, 0x00000000 },
- { 0x00000814, 0x00000000 },
- { 0x00000818, 0x00000000 },
- { 0x0000081c, 0x00000000 },
- { 0x00000820, 0x00000000 },
- { 0x00000824, 0x00000000 },
- { 0x00001040, 0x002ffc0f },
- { 0x00001044, 0x002ffc0f },
- { 0x00001048, 0x002ffc0f },
- { 0x0000104c, 0x002ffc0f },
- { 0x00001050, 0x002ffc0f },
- { 0x00001054, 0x002ffc0f },
- { 0x00001058, 0x002ffc0f },
- { 0x0000105c, 0x002ffc0f },
- { 0x00001060, 0x002ffc0f },
- { 0x00001064, 0x002ffc0f },
- { 0x00001230, 0x00000000 },
- { 0x00001270, 0x00000000 },
- { 0x00001038, 0x00000000 },
- { 0x00001078, 0x00000000 },
- { 0x000010b8, 0x00000000 },
- { 0x000010f8, 0x00000000 },
- { 0x00001138, 0x00000000 },
- { 0x00001178, 0x00000000 },
- { 0x000011b8, 0x00000000 },
- { 0x000011f8, 0x00000000 },
- { 0x00001238, 0x00000000 },
- { 0x00001278, 0x00000000 },
- { 0x000012b8, 0x00000000 },
- { 0x000012f8, 0x00000000 },
- { 0x00001338, 0x00000000 },
- { 0x00001378, 0x00000000 },
- { 0x000013b8, 0x00000000 },
- { 0x000013f8, 0x00000000 },
- { 0x00001438, 0x00000000 },
- { 0x00001478, 0x00000000 },
- { 0x000014b8, 0x00000000 },
- { 0x000014f8, 0x00000000 },
- { 0x00001538, 0x00000000 },
- { 0x00001578, 0x00000000 },
- { 0x000015b8, 0x00000000 },
- { 0x000015f8, 0x00000000 },
- { 0x00001638, 0x00000000 },
- { 0x00001678, 0x00000000 },
- { 0x000016b8, 0x00000000 },
- { 0x000016f8, 0x00000000 },
- { 0x00001738, 0x00000000 },
- { 0x00001778, 0x00000000 },
- { 0x000017b8, 0x00000000 },
- { 0x000017f8, 0x00000000 },
- { 0x0000103c, 0x00000000 },
- { 0x0000107c, 0x00000000 },
- { 0x000010bc, 0x00000000 },
- { 0x000010fc, 0x00000000 },
- { 0x0000113c, 0x00000000 },
- { 0x0000117c, 0x00000000 },
- { 0x000011bc, 0x00000000 },
- { 0x000011fc, 0x00000000 },
- { 0x0000123c, 0x00000000 },
- { 0x0000127c, 0x00000000 },
- { 0x000012bc, 0x00000000 },
- { 0x000012fc, 0x00000000 },
- { 0x0000133c, 0x00000000 },
- { 0x0000137c, 0x00000000 },
- { 0x000013bc, 0x00000000 },
- { 0x000013fc, 0x00000000 },
- { 0x0000143c, 0x00000000 },
- { 0x0000147c, 0x00000000 },
- { 0x00004030, 0x00000002 },
- { 0x0000403c, 0x00000002 },
- { 0x00007010, 0x00000020 },
- { 0x00007038, 0x000004c2 },
- { 0x00008004, 0x00000000 },
- { 0x00008008, 0x00000000 },
- { 0x0000800c, 0x00000000 },
- { 0x00008018, 0x00000700 },
- { 0x00008020, 0x00000000 },
- { 0x00008038, 0x00000000 },
- { 0x0000803c, 0x00000000 },
- { 0x00008048, 0x40000000 },
- { 0x00008054, 0x00000000 },
- { 0x00008058, 0x00000000 },
- { 0x0000805c, 0x000fc78f },
- { 0x00008060, 0x0000000f },
- { 0x00008064, 0x00000000 },
- { 0x000080c0, 0x2a82301a },
- { 0x000080c4, 0x05dc01e0 },
- { 0x000080c8, 0x1f402710 },
- { 0x000080cc, 0x01f40000 },
- { 0x000080d0, 0x00001e00 },
- { 0x000080d4, 0x00000000 },
- { 0x000080d8, 0x00400000 },
- { 0x000080e0, 0xffffffff },
- { 0x000080e4, 0x0000ffff },
- { 0x000080e8, 0x003f3f3f },
- { 0x000080ec, 0x00000000 },
- { 0x000080f0, 0x00000000 },
- { 0x000080f4, 0x00000000 },
- { 0x000080f8, 0x00000000 },
- { 0x000080fc, 0x00020000 },
- { 0x00008100, 0x00020000 },
- { 0x00008104, 0x00000001 },
- { 0x00008108, 0x00000052 },
- { 0x0000810c, 0x00000000 },
- { 0x00008110, 0x00000168 },
- { 0x00008118, 0x000100aa },
- { 0x0000811c, 0x00003210 },
- { 0x00008120, 0x08f04800 },
- { 0x00008124, 0x00000000 },
- { 0x00008128, 0x00000000 },
- { 0x0000812c, 0x00000000 },
- { 0x00008130, 0x00000000 },
- { 0x00008134, 0x00000000 },
- { 0x00008138, 0x00000000 },
- { 0x0000813c, 0x00000000 },
- { 0x00008144, 0xffffffff },
- { 0x00008168, 0x00000000 },
- { 0x0000816c, 0x00000000 },
- { 0x00008170, 0x32143320 },
- { 0x00008174, 0xfaa4fa50 },
- { 0x00008178, 0x00000100 },
- { 0x0000817c, 0x00000000 },
- { 0x000081c4, 0x00000000 },
- { 0x000081d0, 0x00003210 },
- { 0x000081ec, 0x00000000 },
- { 0x000081f0, 0x00000000 },
- { 0x000081f4, 0x00000000 },
- { 0x000081f8, 0x00000000 },
- { 0x000081fc, 0x00000000 },
- { 0x00008200, 0x00000000 },
- { 0x00008204, 0x00000000 },
- { 0x00008208, 0x00000000 },
- { 0x0000820c, 0x00000000 },
- { 0x00008210, 0x00000000 },
- { 0x00008214, 0x00000000 },
- { 0x00008218, 0x00000000 },
- { 0x0000821c, 0x00000000 },
- { 0x00008220, 0x00000000 },
- { 0x00008224, 0x00000000 },
- { 0x00008228, 0x00000000 },
- { 0x0000822c, 0x00000000 },
- { 0x00008230, 0x00000000 },
- { 0x00008234, 0x00000000 },
- { 0x00008238, 0x00000000 },
- { 0x0000823c, 0x00000000 },
- { 0x00008240, 0x00100000 },
- { 0x00008244, 0x0010f400 },
- { 0x00008248, 0x00000100 },
- { 0x0000824c, 0x0001e800 },
- { 0x00008250, 0x00000000 },
- { 0x00008254, 0x00000000 },
- { 0x00008258, 0x00000000 },
- { 0x0000825c, 0x400000ff },
- { 0x00008260, 0x00080922 },
- { 0x00008270, 0x00000000 },
- { 0x00008274, 0x40000000 },
- { 0x00008278, 0x003e4180 },
- { 0x0000827c, 0x00000000 },
- { 0x00008284, 0x0000002c },
- { 0x00008288, 0x0000002c },
- { 0x0000828c, 0x00000000 },
- { 0x00008294, 0x00000000 },
- { 0x00008298, 0x00000000 },
- { 0x00008300, 0x00000000 },
- { 0x00008304, 0x00000000 },
- { 0x00008308, 0x00000000 },
- { 0x0000830c, 0x00000000 },
- { 0x00008310, 0x00000000 },
- { 0x00008314, 0x00000000 },
- { 0x00008318, 0x00000000 },
- { 0x00008328, 0x00000000 },
- { 0x0000832c, 0x00000007 },
- { 0x00008330, 0x00000302 },
- { 0x00008334, 0x00000e00 },
- { 0x00008338, 0x00ff0000 },
- { 0x0000833c, 0x00000000 },
- { 0x00008340, 0x000107ff },
- { 0x00009808, 0x00000000 },
- { 0x0000980c, 0xad848e19 },
- { 0x00009810, 0x7d14e000 },
- { 0x00009814, 0x9c0a9f6b },
- { 0x0000981c, 0x00000000 },
- { 0x0000982c, 0x0000a000 },
- { 0x00009830, 0x00000000 },
- { 0x0000983c, 0x00200400 },
- { 0x00009840, 0x206a01ae },
- { 0x0000984c, 0x1284233c },
- { 0x00009854, 0x00000859 },
- { 0x00009900, 0x00000000 },
- { 0x00009904, 0x00000000 },
- { 0x00009908, 0x00000000 },
- { 0x0000990c, 0x00000000 },
- { 0x0000991c, 0x10000fff },
- { 0x00009920, 0x05100000 },
- { 0x0000a920, 0x05100000 },
- { 0x0000b920, 0x05100000 },
- { 0x00009928, 0x00000001 },
- { 0x0000992c, 0x00000004 },
- { 0x00009934, 0x1e1f2022 },
- { 0x00009938, 0x0a0b0c0d },
- { 0x0000993c, 0x00000000 },
- { 0x00009948, 0x9280b212 },
- { 0x0000994c, 0x00020028 },
- { 0x00009954, 0x5f3ca3de },
- { 0x00009958, 0x2108ecff },
- { 0x00009940, 0x00750604 },
- { 0x0000c95c, 0x004b6a8e },
- { 0x00009970, 0x190fb515 },
- { 0x00009974, 0x00000000 },
- { 0x00009978, 0x00000001 },
- { 0x0000997c, 0x00000000 },
- { 0x00009980, 0x00000000 },
- { 0x00009984, 0x00000000 },
- { 0x00009988, 0x00000000 },
- { 0x0000998c, 0x00000000 },
- { 0x00009990, 0x00000000 },
- { 0x00009994, 0x00000000 },
- { 0x00009998, 0x00000000 },
- { 0x0000999c, 0x00000000 },
- { 0x000099a0, 0x00000000 },
- { 0x000099a4, 0x00000001 },
- { 0x000099a8, 0x201fff00 },
- { 0x000099ac, 0x006f0000 },
- { 0x000099b0, 0x03051000 },
- { 0x000099dc, 0x00000000 },
- { 0x000099e0, 0x00000200 },
- { 0x000099e4, 0xaaaaaaaa },
- { 0x000099e8, 0x3c466478 },
- { 0x000099ec, 0x0cc80caa },
- { 0x000099fc, 0x00001042 },
- { 0x00009b00, 0x00000000 },
- { 0x00009b04, 0x00000001 },
- { 0x00009b08, 0x00000002 },
- { 0x00009b0c, 0x00000003 },
- { 0x00009b10, 0x00000004 },
- { 0x00009b14, 0x00000005 },
- { 0x00009b18, 0x00000008 },
- { 0x00009b1c, 0x00000009 },
- { 0x00009b20, 0x0000000a },
- { 0x00009b24, 0x0000000b },
- { 0x00009b28, 0x0000000c },
- { 0x00009b2c, 0x0000000d },
- { 0x00009b30, 0x00000010 },
- { 0x00009b34, 0x00000011 },
- { 0x00009b38, 0x00000012 },
- { 0x00009b3c, 0x00000013 },
- { 0x00009b40, 0x00000014 },
- { 0x00009b44, 0x00000015 },
- { 0x00009b48, 0x00000018 },
- { 0x00009b4c, 0x00000019 },
- { 0x00009b50, 0x0000001a },
- { 0x00009b54, 0x0000001b },
- { 0x00009b58, 0x0000001c },
- { 0x00009b5c, 0x0000001d },
- { 0x00009b60, 0x00000020 },
- { 0x00009b64, 0x00000021 },
- { 0x00009b68, 0x00000022 },
- { 0x00009b6c, 0x00000023 },
- { 0x00009b70, 0x00000024 },
- { 0x00009b74, 0x00000025 },
- { 0x00009b78, 0x00000028 },
- { 0x00009b7c, 0x00000029 },
- { 0x00009b80, 0x0000002a },
- { 0x00009b84, 0x0000002b },
- { 0x00009b88, 0x0000002c },
- { 0x00009b8c, 0x0000002d },
- { 0x00009b90, 0x00000030 },
- { 0x00009b94, 0x00000031 },
- { 0x00009b98, 0x00000032 },
- { 0x00009b9c, 0x00000033 },
- { 0x00009ba0, 0x00000034 },
- { 0x00009ba4, 0x00000035 },
- { 0x00009ba8, 0x00000035 },
- { 0x00009bac, 0x00000035 },
- { 0x00009bb0, 0x00000035 },
- { 0x00009bb4, 0x00000035 },
- { 0x00009bb8, 0x00000035 },
- { 0x00009bbc, 0x00000035 },
- { 0x00009bc0, 0x00000035 },
- { 0x00009bc4, 0x00000035 },
- { 0x00009bc8, 0x00000035 },
- { 0x00009bcc, 0x00000035 },
- { 0x00009bd0, 0x00000035 },
- { 0x00009bd4, 0x00000035 },
- { 0x00009bd8, 0x00000035 },
- { 0x00009bdc, 0x00000035 },
- { 0x00009be0, 0x00000035 },
- { 0x00009be4, 0x00000035 },
- { 0x00009be8, 0x00000035 },
- { 0x00009bec, 0x00000035 },
- { 0x00009bf0, 0x00000035 },
- { 0x00009bf4, 0x00000035 },
- { 0x00009bf8, 0x00000010 },
- { 0x00009bfc, 0x0000001a },
- { 0x0000a210, 0x40806333 },
- { 0x0000a214, 0x00106c10 },
- { 0x0000a218, 0x009c4060 },
- { 0x0000a220, 0x018830c6 },
- { 0x0000a224, 0x00000400 },
- { 0x0000a228, 0x001a0bb5 },
- { 0x0000a22c, 0x00000000 },
- { 0x0000a234, 0x20202020 },
- { 0x0000a238, 0x20202020 },
- { 0x0000a23c, 0x13c889af },
- { 0x0000a240, 0x38490a20 },
- { 0x0000a244, 0x00007bb6 },
- { 0x0000a248, 0x0fff3ffc },
- { 0x0000a24c, 0x00000001 },
- { 0x0000a250, 0x0000e000 },
- { 0x0000a254, 0x00000000 },
- { 0x0000a258, 0x0cc75380 },
- { 0x0000a25c, 0x0f0f0f01 },
- { 0x0000a260, 0xdfa91f01 },
- { 0x0000a268, 0x00000001 },
- { 0x0000a26c, 0x0ebae9c6 },
- { 0x0000b26c, 0x0ebae9c6 },
- { 0x0000c26c, 0x0ebae9c6 },
- { 0x0000d270, 0x00820820 },
- { 0x0000a278, 0x1ce739ce },
- { 0x0000a27c, 0x050701ce },
- { 0x0000a338, 0x00000000 },
- { 0x0000a33c, 0x00000000 },
- { 0x0000a340, 0x00000000 },
- { 0x0000a344, 0x00000000 },
- { 0x0000a348, 0x3fffffff },
- { 0x0000a34c, 0x3fffffff },
- { 0x0000a350, 0x3fffffff },
- { 0x0000a354, 0x0003ffff },
- { 0x0000a358, 0x79bfaa03 },
- { 0x0000d35c, 0x07ffffef },
- { 0x0000d360, 0x0fffffe7 },
- { 0x0000d364, 0x17ffffe5 },
- { 0x0000d368, 0x1fffffe4 },
- { 0x0000d36c, 0x37ffffe3 },
- { 0x0000d370, 0x3fffffe3 },
- { 0x0000d374, 0x57ffffe3 },
- { 0x0000d378, 0x5fffffe2 },
- { 0x0000d37c, 0x7fffffe2 },
- { 0x0000d380, 0x7f3c7bba },
- { 0x0000d384, 0xf3307ff0 },
- { 0x0000a388, 0x0c000000 },
- { 0x0000a38c, 0x20202020 },
- { 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x1ce739ce },
- { 0x0000a398, 0x000001ce },
- { 0x0000a39c, 0x00000001 },
- { 0x0000a3a0, 0x00000000 },
- { 0x0000a3a4, 0x00000000 },
- { 0x0000a3a8, 0x00000000 },
- { 0x0000a3ac, 0x00000000 },
- { 0x0000a3b0, 0x00000000 },
- { 0x0000a3b4, 0x00000000 },
- { 0x0000a3b8, 0x00000000 },
- { 0x0000a3bc, 0x00000000 },
- { 0x0000a3c0, 0x00000000 },
- { 0x0000a3c4, 0x00000000 },
- { 0x0000a3c8, 0x00000246 },
- { 0x0000a3cc, 0x20202020 },
- { 0x0000a3d0, 0x20202020 },
- { 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x1ce739ce },
- { 0x0000a3e0, 0x000001ce },
-};
-
-static const u32 ar5416Bank0_9160[][2] = {
- { 0x000098b0, 0x1e5795e5 },
- { 0x000098e0, 0x02008020 },
-};
-
-static const u32 ar5416BB_RfGain_9160[][3] = {
- { 0x00009a00, 0x00000000, 0x00000000 },
- { 0x00009a04, 0x00000040, 0x00000040 },
- { 0x00009a08, 0x00000080, 0x00000080 },
- { 0x00009a0c, 0x000001a1, 0x00000141 },
- { 0x00009a10, 0x000001e1, 0x00000181 },
- { 0x00009a14, 0x00000021, 0x000001c1 },
- { 0x00009a18, 0x00000061, 0x00000001 },
- { 0x00009a1c, 0x00000168, 0x00000041 },
- { 0x00009a20, 0x000001a8, 0x000001a8 },
- { 0x00009a24, 0x000001e8, 0x000001e8 },
- { 0x00009a28, 0x00000028, 0x00000028 },
- { 0x00009a2c, 0x00000068, 0x00000068 },
- { 0x00009a30, 0x00000189, 0x000000a8 },
- { 0x00009a34, 0x000001c9, 0x00000169 },
- { 0x00009a38, 0x00000009, 0x000001a9 },
- { 0x00009a3c, 0x00000049, 0x000001e9 },
- { 0x00009a40, 0x00000089, 0x00000029 },
- { 0x00009a44, 0x00000170, 0x00000069 },
- { 0x00009a48, 0x000001b0, 0x00000190 },
- { 0x00009a4c, 0x000001f0, 0x000001d0 },
- { 0x00009a50, 0x00000030, 0x00000010 },
- { 0x00009a54, 0x00000070, 0x00000050 },
- { 0x00009a58, 0x00000191, 0x00000090 },
- { 0x00009a5c, 0x000001d1, 0x00000151 },
- { 0x00009a60, 0x00000011, 0x00000191 },
- { 0x00009a64, 0x00000051, 0x000001d1 },
- { 0x00009a68, 0x00000091, 0x00000011 },
- { 0x00009a6c, 0x000001b8, 0x00000051 },
- { 0x00009a70, 0x000001f8, 0x00000198 },
- { 0x00009a74, 0x00000038, 0x000001d8 },
- { 0x00009a78, 0x00000078, 0x00000018 },
- { 0x00009a7c, 0x00000199, 0x00000058 },
- { 0x00009a80, 0x000001d9, 0x00000098 },
- { 0x00009a84, 0x00000019, 0x00000159 },
- { 0x00009a88, 0x00000059, 0x00000199 },
- { 0x00009a8c, 0x00000099, 0x000001d9 },
- { 0x00009a90, 0x000000d9, 0x00000019 },
- { 0x00009a94, 0x000000f9, 0x00000059 },
- { 0x00009a98, 0x000000f9, 0x00000099 },
- { 0x00009a9c, 0x000000f9, 0x000000d9 },
- { 0x00009aa0, 0x000000f9, 0x000000f9 },
- { 0x00009aa4, 0x000000f9, 0x000000f9 },
- { 0x00009aa8, 0x000000f9, 0x000000f9 },
- { 0x00009aac, 0x000000f9, 0x000000f9 },
- { 0x00009ab0, 0x000000f9, 0x000000f9 },
- { 0x00009ab4, 0x000000f9, 0x000000f9 },
- { 0x00009ab8, 0x000000f9, 0x000000f9 },
- { 0x00009abc, 0x000000f9, 0x000000f9 },
- { 0x00009ac0, 0x000000f9, 0x000000f9 },
- { 0x00009ac4, 0x000000f9, 0x000000f9 },
- { 0x00009ac8, 0x000000f9, 0x000000f9 },
- { 0x00009acc, 0x000000f9, 0x000000f9 },
- { 0x00009ad0, 0x000000f9, 0x000000f9 },
- { 0x00009ad4, 0x000000f9, 0x000000f9 },
- { 0x00009ad8, 0x000000f9, 0x000000f9 },
- { 0x00009adc, 0x000000f9, 0x000000f9 },
- { 0x00009ae0, 0x000000f9, 0x000000f9 },
- { 0x00009ae4, 0x000000f9, 0x000000f9 },
- { 0x00009ae8, 0x000000f9, 0x000000f9 },
- { 0x00009aec, 0x000000f9, 0x000000f9 },
- { 0x00009af0, 0x000000f9, 0x000000f9 },
- { 0x00009af4, 0x000000f9, 0x000000f9 },
- { 0x00009af8, 0x000000f9, 0x000000f9 },
- { 0x00009afc, 0x000000f9, 0x000000f9 },
-};
-
-static const u32 ar5416Bank1_9160[][2] = {
- { 0x000098b0, 0x02108421 },
- { 0x000098ec, 0x00000008 },
-};
-
-static const u32 ar5416Bank2_9160[][2] = {
- { 0x000098b0, 0x0e73ff17 },
- { 0x000098e0, 0x00000420 },
-};
-
-static const u32 ar5416Bank3_9160[][3] = {
- { 0x000098f0, 0x01400018, 0x01c00018 },
-};
-
-static const u32 ar5416Bank6_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x004210a2, 0x004210a2 },
- { 0x0000989c, 0x0014008f, 0x0014008f },
- { 0x0000989c, 0x00c40003, 0x00c40003 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000f1, 0x000000f1 },
- { 0x0000989c, 0x00002081, 0x00002081 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank6TPC_9160[][3] = {
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00000000, 0x00000000 },
- { 0x0000989c, 0x00e00000, 0x00e00000 },
- { 0x0000989c, 0x005e0000, 0x005e0000 },
- { 0x0000989c, 0x00120000, 0x00120000 },
- { 0x0000989c, 0x00620000, 0x00620000 },
- { 0x0000989c, 0x00020000, 0x00020000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x40ff0000, 0x40ff0000 },
- { 0x0000989c, 0x005f0000, 0x005f0000 },
- { 0x0000989c, 0x00870000, 0x00870000 },
- { 0x0000989c, 0x00f90000, 0x00f90000 },
- { 0x0000989c, 0x007b0000, 0x007b0000 },
- { 0x0000989c, 0x00ff0000, 0x00ff0000 },
- { 0x0000989c, 0x00f50000, 0x00f50000 },
- { 0x0000989c, 0x00dc0000, 0x00dc0000 },
- { 0x0000989c, 0x00110000, 0x00110000 },
- { 0x0000989c, 0x006100a8, 0x006100a8 },
- { 0x0000989c, 0x00423022, 0x00423022 },
- { 0x0000989c, 0x2014008f, 0x2014008f },
- { 0x0000989c, 0x00c40002, 0x00c40002 },
- { 0x0000989c, 0x003000f2, 0x003000f2 },
- { 0x0000989c, 0x00440016, 0x00440016 },
- { 0x0000989c, 0x00410040, 0x00410040 },
- { 0x0000989c, 0x0001805e, 0x0001805e },
- { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
- { 0x0000989c, 0x000000e1, 0x000000e1 },
- { 0x0000989c, 0x00007080, 0x00007080 },
- { 0x0000989c, 0x000000d4, 0x000000d4 },
- { 0x000098d0, 0x0000000f, 0x0010000f },
-};
-
-static const u32 ar5416Bank7_9160[][2] = {
- { 0x0000989c, 0x00000500 },
- { 0x0000989c, 0x00000800 },
- { 0x000098cc, 0x0000000e },
-};
+#ifndef INITVALS_9002_10_H
+#define INITVALS_9002_10_H
-static u32 ar5416Addac_9160[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000003 },
- {0x0000989c, 0x00000008 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-static u32 ar5416Addac_91601_1[][2] = {
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000018 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x000000c0 },
- {0x0000989c, 0x00000019 },
- {0x0000989c, 0x00000004 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x0000989c, 0x00000000 },
- {0x000098cc, 0x00000000 },
-};
-
-/* XXX 9280 1 */
static const u32 ar9280Modes_9280[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2766,7 +793,7 @@ static const u32 ar9280Common_9280_2[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -3441,7 +1468,7 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
};
/* AR9285 Revsion 10*/
-static const u_int32_t ar9285Modes_9285[][6] = {
+static const u32 ar9285Modes_9285[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -3763,7 +1790,7 @@ static const u_int32_t ar9285Modes_9285[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285[][2] = {
+static const u32 ar9285Common_9285[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -3936,7 +1963,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -4096,7 +2123,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4109,7 +2136,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4123,7 +2150,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
};
/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */
-static const u_int32_t ar9285Modes_9285_1_2[][6] = {
+static const u32 ar9285Modes_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4184,7 +2211,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4198,8 +2225,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4312,7 +2339,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4326,8 +2353,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4429,7 +2456,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9285Common_9285_1_2[][2] = {
+static const u32 ar9285Common_9285_1_2[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -4731,17 +2758,12 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007808, 0x54214514 },
{ 0x0000780c, 0x02025830 },
{ 0x00007810, 0x71c0d388 },
- { 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
{ 0x00007824, 0x00d86fff },
- { 0x00007828, 0x26d2491b },
{ 0x0000782c, 0x6e36d97b },
- { 0x00007830, 0xedb6d96e },
{ 0x00007834, 0x71400087 },
- { 0x0000783c, 0x0001fffe },
- { 0x00007840, 0xffeb1a20 },
{ 0x00007844, 0x000c0db6 },
- { 0x00007848, 0x6db61b6f },
+ { 0x00007848, 0x6db6246f },
{ 0x0000784c, 0x6d9b66db },
{ 0x00007850, 0x6d8c6dba },
{ 0x00007854, 0x00040000 },
@@ -4753,7 +2775,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
{ 0x00007870, 0x10142c00 },
};
-static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
@@ -4777,7 +2799,12 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
{ 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
{ 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
{ 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
@@ -4789,7 +2816,7 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
};
-static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
+static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
@@ -4813,7 +2840,52 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
+ { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
+ { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
{ 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 },
+ { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
+ { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
+ { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c },
+ { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+ { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
+ { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
+};
+
+static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae },
+ { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
{ 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
{ 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
{ 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
@@ -4825,7 +2897,47 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
{ 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
};
-static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
+static const u32 ar9285Modes_XE2_0_high_power[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 },
+ { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
+ { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
+ { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e },
+ { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 },
+ { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
+ { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
+ { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 },
+ { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+ { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
+};
+
+static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4838,7 +2950,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
+static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -4852,7 +2964,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
};
/* AR9287 Revision 10 */
-static const u_int32_t ar9287Modes_9287_1_0[][6] = {
+static const u32 ar9287Modes_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4899,7 +3011,7 @@ static const u_int32_t ar9287Modes_9287_1_0[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_0[][2] = {
+static const u32 ar9287Common_9287_1_0[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -5073,7 +3185,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x00008258, 0x00000000 },
{ 0x0000825c, 0x400000ff },
{ 0x00008260, 0x00080922 },
- { 0x00008264, 0xa8a00010 },
+ { 0x00008264, 0x88a00010 },
{ 0x00008270, 0x00000000 },
{ 0x00008274, 0x40000000 },
{ 0x00008278, 0x003e4180 },
@@ -5270,7 +3382,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
{ 0x000078b8, 0x2a850160 },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -5320,7 +3432,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -5582,7 +3694,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5595,7 +3707,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -5610,7 +3722,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
/* AR9287 Revision 11 */
-static const u_int32_t ar9287Modes_9287_1_1[][6] = {
+static const u32 ar9287Modes_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5657,7 +3769,7 @@ static const u_int32_t ar9287Modes_9287_1_1[][6] = {
{ 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
};
-static const u_int32_t ar9287Common_9287_1_1[][2] = {
+static const u32 ar9287Common_9287_1_1[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020015 },
{ 0x00000034, 0x00000005 },
@@ -6027,21 +4139,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
/*
* For Japanese regulatory requirements, 2484 MHz requires the following three
- * registers be programmed differently from the channel between 2412 and 2472 MHz.
+ * registers be programmed differently from the channel between 2412 and
+ * 2472 MHz.
*/
-static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00fffeff },
{ 0x0000a1f8, 0x00f5f9ff },
{ 0x0000a1fc, 0xb79f6427 },
};
-static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
+static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
{ 0x0000a1f4, 0x00000000 },
{ 0x0000a1f8, 0xefff0301 },
{ 0x0000a1fc, 0xca9228ee },
};
-static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -6090,7 +4203,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
{ 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
};
-static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
+static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
/* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
{ 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
{ 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -6352,7 +4465,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
{ 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
};
-static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6365,7 +4478,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
{0x00004044, 0x00000000 },
};
-static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
+static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
{0x00004040, 0x9248fd00 },
{0x00004040, 0x24924924 },
{0x00004040, 0xa8000019 },
@@ -6380,7 +4493,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
/* AR9271 initialization values automaticaly created: 06/04/09 */
-static const u_int32_t ar9271Modes_9271[][6] = {
+static const u32 ar9271Modes_9271[][6] = {
{ 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
{ 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
{ 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6441,7 +4554,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6455,8 +4568,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6569,7 +4682,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
{ 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
{ 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
- { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 },
+ { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
{ 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
{ 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
{ 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6583,8 +4696,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
{ 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
{ 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
- { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
- { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
+ { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
+ { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
{ 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
{ 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
{ 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6683,29 +4796,10 @@ static const u_int32_t ar9271Modes_9271[][6] = {
{ 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
{ 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
{ 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
- { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
- { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
- { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
- { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
- { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
- { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
- { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
- { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
- { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
- { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
- { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
- { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
- { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
- { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
- { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
- { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
- { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
- { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
- { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
{ 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
};
-static const u_int32_t ar9271Common_9271[][2] = {
+static const u32 ar9271Common_9271[][2] = {
{ 0x0000000c, 0x00000000 },
{ 0x00000030, 0x00020045 },
{ 0x00000034, 0x00000005 },
@@ -6910,13 +5004,10 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007810, 0x71c0d388 },
{ 0x00007814, 0x924934a8 },
{ 0x0000781c, 0x00000000 },
- { 0x00007820, 0x00000c04 },
- { 0x00007824, 0x00d8abff },
{ 0x00007828, 0x66964300 },
{ 0x0000782c, 0x8db6d961 },
{ 0x00007830, 0x8db6d96c },
{ 0x00007834, 0x6140008b },
- { 0x00007838, 0x00000029 },
{ 0x0000783c, 0x72ee0a72 },
{ 0x00007840, 0xbbfffffc },
{ 0x00007844, 0x000c0db6 },
@@ -6929,7 +5020,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x00007860, 0x21084210 },
{ 0x00007864, 0xf7d7ffde },
{ 0x00007868, 0xc2034080 },
- { 0x0000786c, 0x48609eb4 },
{ 0x00007870, 0x10142c00 },
{ 0x00009808, 0x00000000 },
{ 0x0000980c, 0xafe68e30 },
@@ -6982,9 +5072,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x000099e8, 0x3c466478 },
{ 0x000099ec, 0x0cc80caa },
{ 0x000099f0, 0x00000000 },
- { 0x0000a1f4, 0x00000000 },
- { 0x0000a1f8, 0x71733d01 },
- { 0x0000a1fc, 0xd0ad5c12 },
{ 0x0000a208, 0x803e68c8 },
{ 0x0000a210, 0x4080a333 },
{ 0x0000a214, 0x00206c10 },
@@ -7004,13 +5091,9 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a260, 0xdfa90f01 },
{ 0x0000a268, 0x00000000 },
{ 0x0000a26c, 0x0ebae9e6 },
- { 0x0000a278, 0x3bdef7bd },
- { 0x0000a27c, 0x050e83bd },
{ 0x0000a388, 0x0c000000 },
{ 0x0000a38c, 0x20202020 },
{ 0x0000a390, 0x20202020 },
- { 0x0000a394, 0x3bdef7bd },
- { 0x0000a398, 0x000003bd },
{ 0x0000a39c, 0x00000001 },
{ 0x0000a3a0, 0x00000000 },
{ 0x0000a3a4, 0x00000000 },
@@ -7025,8 +5108,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000a3cc, 0x20202020 },
{ 0x0000a3d0, 0x20202020 },
{ 0x0000a3d4, 0x20202020 },
- { 0x0000a3dc, 0x3bdef7bd },
- { 0x0000a3e0, 0x000003bd },
{ 0x0000a3e4, 0x00000000 },
{ 0x0000a3e8, 0x18c43433 },
{ 0x0000a3ec, 0x00f70081 },
@@ -7046,7 +5127,104 @@ static const u_int32_t ar9271Common_9271[][2] = {
{ 0x0000d384, 0xf3307ff0 },
};
-static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
+static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00fffeff },
+ { 0x0000a1f8, 0x00f5f9ff },
+ { 0x0000a1fc, 0xb79f6427 },
+};
+
+static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
+ { 0x0000a1f4, 0x00000000 },
+ { 0x0000a1f8, 0xefff0301 },
+ { 0x0000a1fc, 0xca9228ee },
+};
+
+static const u32 ar9271Modes_9271_1_0_only[][6] = {
{ 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
{ 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
};
+
+static const u32 ar9271Modes_9271_ANI_reg[][6] = {
+ { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
+ { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
+ { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
+ { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 },
+ { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
+ { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 },
+ { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
+ { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
+};
+
+static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 },
+ { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff },
+ { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
+ { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
+ { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
+ { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd },
+ { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+ { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
+ { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
+};
+
+static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
+ { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 },
+ { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 },
+ { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 },
+ { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 },
+ { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 },
+ { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 },
+ { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 },
+ { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 },
+ { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 },
+ { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 },
+ { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 },
+ { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 },
+ { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 },
+ { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
+ { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
+ { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
+ { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
+ { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b },
+ { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
+ { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
+ { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
+ { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
+ { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
+ { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
+ { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+ { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
+ { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
+};
+
+#endif /* INITVALS_9002_10_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
new file mode 100644
index 000000000000..2be20d2070c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+
+#define AR_BufLen 0x00000fff
+
+static void ar9002_hw_rx_enable(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_CR, AR_CR_RXE);
+}
+
+static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ ((struct ath_desc*) ds)->ds_link = ds_link;
+}
+
+static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ *ds_link = &((struct ath_desc *)ds)->ds_link;
+}
+
+static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ bool fatal_int = false;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!AR_SREV_9100(ah)) {
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON) {
+ isr = REG_READ(ah, AR_ISR);
+ }
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
+ AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+ } else {
+ *masked = 0;
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+ if (isr2 & AR_ISR_S2_TIM)
+ mask2 |= ATH9K_INT_TIM;
+ if (isr2 & AR_ISR_S2_DTIM)
+ mask2 |= ATH9K_INT_DTIM;
+ if (isr2 & AR_ISR_S2_DTIMSYNC)
+ mask2 |= ATH9K_INT_DTIMSYNC;
+ if (isr2 & (AR_ISR_S2_CABEND))
+ mask2 |= ATH9K_INT_CABEND;
+ if (isr2 & AR_ISR_S2_GTT)
+ mask2 |= ATH9K_INT_GTT;
+ if (isr2 & AR_ISR_S2_CST)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
+ }
+
+ isr = REG_READ(ah, AR_ISR_RAC);
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation) {
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RX;
+ }
+
+ if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RX;
+ if (isr &
+ (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
+ AR_ISR_TXEOL)) {
+ u32 s0_s, s1_s;
+
+ *masked |= ATH9K_INT_TX;
+
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+
+ if (isr & AR_ISR_RXORN) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "receive FIFO overrun interrupt\n");
+ }
+
+ if (!AR_SREV_9100(ah)) {
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
+ if (isr5 & AR_ISR_S5_TIM_TIMER)
+ *masked |= ATH9K_INT_TIM_TIMER;
+ }
+ }
+
+ *masked |= mask2;
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5_s;
+
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ if (isr & AR_ISR_GENTMR) {
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ }
+ }
+
+ if (sync_cause) {
+ fatal_int =
+ (sync_cause &
+ (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
+ ? true : false;
+
+ if (fatal_int) {
+ if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI FATAL interrupt\n");
+ }
+ if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
+ ath_print(common, ATH_DBG_ANY,
+ "received PCI PERR interrupt\n");
+ }
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+ }
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ }
+
+ return true;
+}
+
+static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_data = buf_addr;
+
+ if (is_firstseg) {
+ ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen;
+ ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
+ ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
+ } else {
+ ads->ds_ctl0 = 0;
+ ads->ds_ctl1 = seglen | AR_TxMore;
+ ads->ds_ctl2 = 0;
+ ads->ds_ctl3 = 0;
+ }
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+
+static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if ((ads->ds_txstatus9 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
+ ts->ts_tstamp = ads->AR_SendTimestamp;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->ds_txstatus1 & AR_FrmXmitOK)
+ ts->ts_status |= ATH9K_TX_ACKED;
+ if (ads->ds_txstatus1 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->ds_txstatus1 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus9 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->ds_txstatus1 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->ds_txstatus1 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->ds_txstatus0 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->AR_BaBitmapLow;
+ ts->ba_high = ads->AR_BaBitmapHigh;
+ }
+
+ ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
+ switch (ts->ts_rateindex) {
+ case 0:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
+ break;
+ case 1:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
+ break;
+ case 2:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
+ break;
+ case 3:
+ ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
+ break;
+ }
+
+ ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
+ ts->evm0 = ads->AR_TxEVM0;
+ ts->evm1 = ads->AR_TxEVM1;
+ ts->evm2 = ads->AR_TxEVM2;
+ ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ return 0;
+}
+
+static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ txPower += ah->txpower_indexoffset;
+ if (txPower > 63)
+ txPower = 63;
+
+ ads->ds_ctl0 = (pktLen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txPower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
+
+ ads->ds_ctl1 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ds_ctl6 = SM(keyType, AR_EncrType);
+
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
+ ads->ds_ctl8 = 0;
+ ads->ds_ctl9 = 0;
+ ads->ds_ctl10 = 0;
+ ads->ds_ctl11 = 0;
+ }
+}
+
+static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ar5416_desc *last_ads = AR5416DESC(lastds);
+ u32 ds_ctl0;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ds_ctl0 = ads->ds_ctl0;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ds_ctl0 &= ~AR_CTSEnable;
+ ds_ctl0 |= AR_RTSEnable;
+ } else {
+ ds_ctl0 &= ~AR_RTSEnable;
+ ds_ctl0 |= AR_CTSEnable;
+ }
+
+ ads->ds_ctl0 = ds_ctl0;
+ } else {
+ ads->ds_ctl0 =
+ (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ds_ctl2 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ds_ctl3 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ds_ctl7 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ last_ads->ds_ctl2 = ads->ds_ctl2;
+ last_ads->ds_ctl3 = ads->ds_ctl3;
+}
+
+static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+ ads->ds_ctl6 &= ~AR_AggrLen;
+ ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ unsigned int ctl6;
+
+ ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
+
+ ctl6 = ads->ds_ctl6;
+ ctl6 &= ~AR_PadDelim;
+ ctl6 |= SM(numDelims, AR_PadDelim);
+ ads->ds_ctl6 = ctl6;
+}
+
+static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 |= AR_IsAggr;
+ ads->ds_ctl1 &= ~AR_MoreAggr;
+ ads->ds_ctl6 &= ~AR_PadDelim;
+}
+
+static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_ctl2 &= ~AR_BurstDur;
+ ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
+}
+
+static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ if (vmf)
+ ads->ds_ctl0 |= AR_VirtMoreFrag;
+ else
+ ads->ds_ctl0 &= ~AR_VirtMoreFrag;
+}
+
+void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
+ u32 size, u32 flags)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+ ads->ds_ctl1 = size & AR_BufLen;
+ if (flags & ATH9K_RXDESC_INTREQ)
+ ads->ds_ctl1 |= AR_RxIntrReq;
+
+ ads->ds_rxstatus8 &= ~AR_RxDone;
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ memset(&(ads->u), 0, sizeof(ads->u));
+}
+EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
+
+void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ ops->rx_enable = ar9002_hw_rx_enable;
+ ops->set_desc_link = ar9002_hw_set_desc_link;
+ ops->get_desc_link = ar9002_hw_get_desc_link;
+ ops->get_isr = ar9002_hw_get_isr;
+ ops->fill_txdesc = ar9002_hw_fill_txdesc;
+ ops->proc_txdesc = ar9002_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
new file mode 100644
index 000000000000..ed314e89bfe1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: Programming Atheros 802.11n analog front end radios
+ *
+ * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
+ * devices have either an external AR2133 analog front end radio for single
+ * band 2.4 GHz communication or an AR5133 analog front end radio for dual
+ * band 2.4 GHz / 5 GHz communication.
+ *
+ * All devices after the AR5416 and AR5418 family starting with the AR9280
+ * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
+ * into a single-chip and require less programming.
+ *
+ * The following single-chips exist with a respective embedded radio:
+ *
+ * AR9280 - 11n dual-band 2x2 MIMO for PCIe
+ * AR9281 - 11n single-band 1x2 MIMO for PCIe
+ * AR9285 - 11n single-band 1x1 for PCIe
+ * AR9287 - 11n single-band 2x2 MIMO for PCIe
+ *
+ * AR9220 - 11n dual-band 2x2 MIMO for PCI
+ * AR9223 - 11n single-band 2x2 MIMO for PCI
+ *
+ * AR9287 - 11n single-band 1x1 MIMO for USB
+ */
+
+#include "hw.h"
+#include "ar9002_phy.h"
+
+/**
+ * ar9002_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ */
+static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode, aModeRefSel = 0;
+ u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
+ struct chan_centers centers;
+ u32 refDivA = 24;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
+ reg32 &= 0xc0000000;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ u32 txctl;
+ int regWrites = 0;
+
+ bMode = 1;
+ fracMode = 1;
+ aModeRefSel = 0;
+ channelSel = CHANSEL_2G(freq);
+
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
+ 1, regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniCckfirNormal,
+ 1, regWrites);
+ }
+ } else {
+ txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+ }
+ } else {
+ bMode = 0;
+ fracMode = 0;
+
+ switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
+ case 0:
+ if ((freq % 20) == 0)
+ aModeRefSel = 3;
+ else if ((freq % 10) == 0)
+ aModeRefSel = 2;
+ if (aModeRefSel)
+ break;
+ case 1:
+ default:
+ aModeRefSel = 0;
+ /*
+ * Enable 2G (fractional) mode for channels
+ * which are 5MHz spaced.
+ */
+ fracMode = 1;
+ refDivA = 1;
+ channelSel = CHANSEL_5G(freq);
+
+ /* RefDivA setting */
+ REG_RMW_FIELD(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA, refDivA);
+
+ }
+
+ if (!fracMode) {
+ ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
+ channelSel = ndiv & 0x1ff;
+ channelFrac = (ndiv & 0xfffffe00) * 2;
+ channelSel = (channelSel << 17) | channelFrac;
+ }
+ }
+
+ reg32 = reg32 |
+ (bMode << 29) |
+ (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
+
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9002_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int freq;
+ int bin, cur_bin;
+ int bb_spur_off, spur_subchannel_sd;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int upper, lower, cur_vit_mask;
+ int tmp, newVal;
+ int i;
+ int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ };
+ int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ };
+ int inc[4] = { 0, 100, 0, 0 };
+ struct chan_centers centers;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ ah->config.spurmode = SPUR_ENABLE_EEPROM;
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+
+ if (is2GHz)
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
+ else
+ cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
+
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - freq;
+
+ if (IS_CHAN_HT40(chan)) {
+ if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
+ (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur) {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ return;
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
+ AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
+ }
+
+ bin = bb_spur * 320;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
+
+ newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
+
+ if (IS_CHAN_HT40(chan)) {
+ if (bb_spur < 0) {
+ spur_subchannel_sd = 1;
+ bb_spur_off = bb_spur + 10;
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur - 10;
+ }
+ } else {
+ spur_subchannel_sd = 0;
+ bb_spur_off = bb_spur;
+ }
+
+ if (IS_CHAN_HT40(chan))
+ spur_delta_phase =
+ ((bb_spur * 262144) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+ else
+ spur_delta_phase =
+ ((bb_spur * 524288) /
+ 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
+ spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
+
+ newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, newVal);
+
+ newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
+ REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
+
+ cur_bin = -6000;
+ upper = bin + 100;
+ lower = bin - 100;
+
+ for (i = 0; i < 4; i++) {
+ int pilot_mask = 0;
+ int chan_mask = 0;
+ int bp = 0;
+ for (bp = 0; bp < 30; bp++) {
+ if ((cur_bin > lower) && (cur_bin < upper)) {
+ pilot_mask = pilot_mask | 0x1 << bp;
+ chan_mask = chan_mask | 0x1 << bp;
+ }
+ cur_bin += 100;
+ }
+ cur_bin += inc[i];
+ REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
+ REG_WRITE(ah, chan_mask_reg[i], chan_mask);
+ }
+
+ cur_vit_mask = 6100;
+ upper = bin + 120;
+ lower = bin - 120;
+
+ for (i = 0; i < 123; i++) {
+ if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
+
+ /* workaround for gcc bug #37014 */
+ volatile int tmp_v = abs(cur_vit_mask - bin);
+
+ if (tmp_v < 75)
+ mask_amt = 1;
+ else
+ mask_amt = 0;
+ if (cur_vit_mask < 0)
+ mask_m[abs(cur_vit_mask / 100)] = mask_amt;
+ else
+ mask_p[cur_vit_mask / 100] = mask_amt;
+ }
+ cur_vit_mask -= 100;
+ }
+
+ tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
+ | (mask_m[48] << 26) | (mask_m[49] << 24)
+ | (mask_m[50] << 22) | (mask_m[51] << 20)
+ | (mask_m[52] << 18) | (mask_m[53] << 16)
+ | (mask_m[54] << 14) | (mask_m[55] << 12)
+ | (mask_m[56] << 10) | (mask_m[57] << 8)
+ | (mask_m[58] << 6) | (mask_m[59] << 4)
+ | (mask_m[60] << 2) | (mask_m[61] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
+
+ tmp_mask = (mask_m[31] << 28)
+ | (mask_m[32] << 26) | (mask_m[33] << 24)
+ | (mask_m[34] << 22) | (mask_m[35] << 20)
+ | (mask_m[36] << 18) | (mask_m[37] << 16)
+ | (mask_m[48] << 14) | (mask_m[39] << 12)
+ | (mask_m[40] << 10) | (mask_m[41] << 8)
+ | (mask_m[42] << 6) | (mask_m[43] << 4)
+ | (mask_m[44] << 2) | (mask_m[45] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
+
+ tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
+ | (mask_m[18] << 26) | (mask_m[18] << 24)
+ | (mask_m[20] << 22) | (mask_m[20] << 20)
+ | (mask_m[22] << 18) | (mask_m[22] << 16)
+ | (mask_m[24] << 14) | (mask_m[24] << 12)
+ | (mask_m[25] << 10) | (mask_m[26] << 8)
+ | (mask_m[27] << 6) | (mask_m[28] << 4)
+ | (mask_m[29] << 2) | (mask_m[30] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
+
+ tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
+ | (mask_m[2] << 26) | (mask_m[3] << 24)
+ | (mask_m[4] << 22) | (mask_m[5] << 20)
+ | (mask_m[6] << 18) | (mask_m[7] << 16)
+ | (mask_m[8] << 14) | (mask_m[9] << 12)
+ | (mask_m[10] << 10) | (mask_m[11] << 8)
+ | (mask_m[12] << 6) | (mask_m[13] << 4)
+ | (mask_m[14] << 2) | (mask_m[15] << 0);
+ REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
+
+ tmp_mask = (mask_p[15] << 28)
+ | (mask_p[14] << 26) | (mask_p[13] << 24)
+ | (mask_p[12] << 22) | (mask_p[11] << 20)
+ | (mask_p[10] << 18) | (mask_p[9] << 16)
+ | (mask_p[8] << 14) | (mask_p[7] << 12)
+ | (mask_p[6] << 10) | (mask_p[5] << 8)
+ | (mask_p[4] << 6) | (mask_p[3] << 4)
+ | (mask_p[2] << 2) | (mask_p[1] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
+
+ tmp_mask = (mask_p[30] << 28)
+ | (mask_p[29] << 26) | (mask_p[28] << 24)
+ | (mask_p[27] << 22) | (mask_p[26] << 20)
+ | (mask_p[25] << 18) | (mask_p[24] << 16)
+ | (mask_p[23] << 14) | (mask_p[22] << 12)
+ | (mask_p[21] << 10) | (mask_p[20] << 8)
+ | (mask_p[19] << 6) | (mask_p[18] << 4)
+ | (mask_p[17] << 2) | (mask_p[16] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
+
+ tmp_mask = (mask_p[45] << 28)
+ | (mask_p[44] << 26) | (mask_p[43] << 24)
+ | (mask_p[42] << 22) | (mask_p[41] << 20)
+ | (mask_p[40] << 18) | (mask_p[39] << 16)
+ | (mask_p[38] << 14) | (mask_p[37] << 12)
+ | (mask_p[36] << 10) | (mask_p[35] << 8)
+ | (mask_p[34] << 6) | (mask_p[33] << 4)
+ | (mask_p[32] << 2) | (mask_p[31] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
+
+ tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
+ | (mask_p[59] << 26) | (mask_p[58] << 24)
+ | (mask_p[57] << 22) | (mask_p[56] << 20)
+ | (mask_p[55] << 18) | (mask_p[54] << 16)
+ | (mask_p[53] << 14) | (mask_p[52] << 12)
+ | (mask_p[51] << 10) | (mask_p[50] << 8)
+ | (mask_p[49] << 6) | (mask_p[48] << 4)
+ | (mask_p[47] << 2) | (mask_p[46] << 0);
+ REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
+ REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+}
+
+static void ar9002_olc_init(struct ath_hw *ah)
+{
+ u32 i;
+
+ if (!OLC_FOR_AR9280_20_LATER)
+ return;
+
+ if (OLC_FOR_AR9287_10_LATER) {
+ REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
+ AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
+ ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
+ AR9287_AN_TXPC0_TXPCMODE,
+ AR9287_AN_TXPC0_TXPCMODE_S,
+ AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
+ udelay(100);
+ } else {
+ for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
+ ah->originalGain[i] =
+ MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
+ AR_PHY_TX_GAIN);
+ ah->PDADCdelta = 0;
+ }
+}
+
+static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
+
+ if (chan && IS_CHAN_5GHZ(chan)) {
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ pll = 0x142c;
+ else if (AR_SREV_9280_20(ah))
+ pll = 0x2850;
+ else
+ pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
+ } else {
+ pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
+ }
+
+ return pll;
+}
+
+static void ar9002_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[0] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
+ AR9280_PHY_CH1_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+ }
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+
+ if (AR_SREV_9271(ah) && (nf >= -114))
+ nf = -116;
+
+ nfarray[3] = nf;
+
+ if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
+ nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
+ AR9280_PHY_CH1_EXT_MINCCA_PWR);
+
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+ }
+}
+
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->set_rf_regs = NULL;
+ priv_ops->rf_alloc_ext_banks = NULL;
+ priv_ops->rf_free_ext_banks = NULL;
+ priv_ops->rf_set_freq = ar9002_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
+ priv_ops->olc_init = ar9002_olc_init;
+ priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
+ priv_ops->do_getnf = ar9002_hw_do_getnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
new file mode 100644
index 000000000000..81bf6e5840e1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef AR9002_PHY_H
+#define AR9002_PHY_H
+
+#define AR_PHY_TEST 0x9800
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+
+#define AR_PHY_TURBO 0x9804
+#define AR_PHY_FC_TURBO_MODE 0x00000001
+#define AR_PHY_FC_TURBO_SHORT 0x00000002
+#define AR_PHY_FC_DYN2040_EN 0x00000004
+#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
+#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
+/* For 25 MHz channel spacing -- not used but supported by hw */
+#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
+#define AR_PHY_FC_HT_EN 0x00000040
+#define AR_PHY_FC_SHORT_GI_40 0x00000080
+#define AR_PHY_FC_WALSH 0x00000100
+#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
+#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
+
+#define AR_PHY_TEST2 0x9808
+
+#define AR_PHY_TIMING2 0x9810
+#define AR_PHY_TIMING3 0x9814
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+
+#define AR_PHY_CHIP_ID_REV_0 0x80
+#define AR_PHY_CHIP_ID_REV_1 0x81
+#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
+
+#define AR_PHY_ACTIVE 0x981C
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+
+#define AR_PHY_RF_CTL2 0x9824
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+
+#define AR_PHY_RF_CTL3 0x9828
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+
+#define AR_PHY_ADC_CTL 0x982C
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
+#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
+#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
+#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
+#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
+#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
+
+#define AR_PHY_ADC_SERIAL_CTL 0x9830
+#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
+#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
+
+#define AR_PHY_RF_CTL4 0x9834
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
+
+#define AR_PHY_TSTDAC_CONST 0x983c
+
+#define AR_PHY_SETTLING 0x9844
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+
+#define AR_PHY_RXGAIN 0x9848
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+
+#define AR_PHY_DESIRED_SZ 0x9850
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+
+#define AR_PHY_FIND_SIG 0x9858
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+
+#define AR_PHY_AGC_CTL1 0x985C
+#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
+#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
+
+#define AR_PHY_CCA 0x9864
+#define AR_PHY_MINCCA_PWR 0x0FF80000
+#define AR_PHY_MINCCA_PWR_S 19
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+
+#define AR_PHY_SFCORR_LOW 0x986C
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+
+#define AR_PHY_SFCORR 0x9868
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+
+#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
+#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
+#define AR_PHY_SYNTH_CONTROL 0x9874
+#define AR_PHY_SLEEP_SCAL 0x9878
+
+#define AR_PHY_PLL_CTL 0x987c
+#define AR_PHY_PLL_CTL_40 0xaa
+#define AR_PHY_PLL_CTL_40_5413 0x04
+#define AR_PHY_PLL_CTL_44 0xab
+#define AR_PHY_PLL_CTL_44_2133 0xeb
+#define AR_PHY_PLL_CTL_40_2133 0xea
+
+#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
+#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
+
+#define AR_PHY_RX_DELAY 0x9914
+#define AR_PHY_SEARCH_START_DELAY 0x9918
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+
+#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
+#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
+#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
+
+#define AR_PHY_TIMING5 0x9924
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+
+#define AR_PHY_FRAME_CTL 0x9944
+#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
+#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
+
+#define AR_PHY_TXPWRADJ 0x994C
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
+#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
+#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
+
+#define AR_PHY_RADAR_EXT 0x9940
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR_PHY_RADAR_0 0x9954
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+
+#define AR_PHY_RADAR_1 0x9958
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+
+#define AR_PHY_SWITCH_CHAIN_0 0x9960
+#define AR_PHY_SWITCH_COM 0x9964
+
+#define AR_PHY_SIGMA_DELTA 0x996C
+#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
+#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
+#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
+#define AR_PHY_SIGMA_DELTA_FILT2_S 3
+#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
+#define AR_PHY_SIGMA_DELTA_FILT1_S 8
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
+#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
+
+#define AR_PHY_RESTART 0x9970
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+
+#define AR_PHY_RFBUS_REQ 0x997C
+#define AR_PHY_RFBUS_REQ_EN 0x00000001
+
+#define AR_PHY_TIMING7 0x9980
+#define AR_PHY_TIMING8 0x9984
+#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
+
+#define AR_PHY_BIN_MASK2_1 0x9988
+#define AR_PHY_BIN_MASK2_2 0x998c
+#define AR_PHY_BIN_MASK2_3 0x9990
+#define AR_PHY_BIN_MASK2_4 0x9994
+
+#define AR_PHY_BIN_MASK_1 0x9900
+#define AR_PHY_BIN_MASK_2 0x9904
+#define AR_PHY_BIN_MASK_3 0x9908
+
+#define AR_PHY_MASK_CTL 0x990c
+
+#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
+#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
+
+#define AR_PHY_TIMING9 0x9998
+#define AR_PHY_TIMING10 0x999c
+#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
+#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
+
+#define AR_PHY_TIMING11 0x99a0
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
+
+#define AR_PHY_RX_CHAINMASK 0x99a4
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+
+#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
+#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
+#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
+#define AR_PHY_9285_ANT_DIV_CTL_S 24
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
+#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
+#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
+#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
+#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
+#define AR_PHY_9285_ANT_DIV_LNA1 2
+#define AR_PHY_9285_ANT_DIV_LNA2 1
+#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
+#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
+#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+
+#define AR_PHY_EXT_CCA0 0x99b8
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+
+#define AR_PHY_EXT_CCA 0x99bc
+#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
+#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_SFCORR_EXT 0x99c0
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+
+#define AR_PHY_HALFGI 0x99D0
+#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_HALFGI_DSC_MAN_S 4
+#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
+#define AR_PHY_HALFGI_DSC_EXP_S 0
+
+#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+
+#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
+#define AR_PHY_M_SLEEP 0x99f0
+#define AR_PHY_REFCLKDLY 0x99f4
+#define AR_PHY_REFCLKPD 0x99f8
+
+#define AR_PHY_CALMODE 0x99f0
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+
+#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
+#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
+
+#define AR_PHY_CURRENT_RSSI 0x9c1c
+#define AR9280_PHY_CURRENT_RSSI 0x9c3c
+
+#define AR_PHY_RFBUS_GRANT 0x9C20
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001
+
+#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+
+#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
+
+#define AR_PHY_MODE 0xA200
+#define AR_PHY_MODE_ASYNCFIFO 0x80
+#define AR_PHY_MODE_AR2133 0x08
+#define AR_PHY_MODE_AR5111 0x00
+#define AR_PHY_MODE_AR5112 0x08
+#define AR_PHY_MODE_DYNAMIC 0x04
+#define AR_PHY_MODE_RF2GHZ 0x02
+#define AR_PHY_MODE_RF5GHZ 0x00
+#define AR_PHY_MODE_CCK 0x01
+#define AR_PHY_MODE_OFDM 0x00
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
+
+#define AR_PHY_CCK_TX_CTRL 0xA204
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
+#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
+
+#define AR_PHY_CCK_DETECT 0xA208
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+/* [12:6] settling time for antenna switch */
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
+
+#define AR_PHY_GAIN_2GHZ 0xA20C
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
+#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
+#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
+
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
+#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
+#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
+
+#define AR_PHY_CCK_RXCTRL4 0xA21C
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
+#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
+
+#define AR_PHY_DAG_CTRLCCK 0xA228
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
+#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
+
+#define AR_PHY_POWER_TX_RATE3 0xA234
+#define AR_PHY_POWER_TX_RATE4 0xA238
+
+#define AR_PHY_SCRM_SEQ_XR 0xA23C
+#define AR_PHY_HEADER_DETECT_XR 0xA240
+#define AR_PHY_CHIRP_DETECTED_XR 0xA244
+#define AR_PHY_BLUETOOTH 0xA254
+
+#define AR_PHY_TPCRG1 0xA258
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_TX_PWRCTRL4 0xa264
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
+#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
+#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
+
+#define AR_PHY_TX_PWRCTRL6_0 0xa270
+#define AR_PHY_TX_PWRCTRL6_1 0xb270
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
+#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
+
+#define AR_PHY_TX_PWRCTRL7 0xa274
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
+#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
+
+#define AR_PHY_TX_PWRCTRL9 0xa27C
+#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
+#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
+#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
+
+#define AR_PHY_TX_GAIN_TBL1 0xa300
+#define AR_PHY_TX_GAIN 0x0007F000
+#define AR_PHY_TX_GAIN_S 12
+
+#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
+#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
+
+#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
+#define AR_PHY_MASK2_M_31_45 0xa3a4
+#define AR_PHY_MASK2_M_16_30 0xa3a8
+#define AR_PHY_MASK2_M_00_15 0xa3ac
+#define AR_PHY_MASK2_P_15_01 0xa3b8
+#define AR_PHY_MASK2_P_30_16 0xa3bc
+#define AR_PHY_MASK2_P_45_31 0xa3c0
+#define AR_PHY_MASK2_P_61_45 0xa3c4
+#define AR_PHY_SPUR_REG 0x994c
+
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
+#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
+#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+
+#define AR_PHY_PILOT_MASK_01_30 0xa3b0
+#define AR_PHY_PILOT_MASK_31_60 0xa3b4
+
+#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
+#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
+
+#define AR_PHY_ANALOG_SWAP 0xa268
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+
+#define AR_PHY_TPCRG5 0xA26C
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+
+/* Carrier leak calibration control, do it after AGC calibration */
+#define AR_PHY_CL_CAL_CTL 0xA358
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+
+#define AR_PHY_POWER_TX_RATE5 0xA38C
+#define AR_PHY_POWER_TX_RATE6 0xA390
+
+#define AR_PHY_CAL_CHAINMASK 0xA39C
+
+#define AR_PHY_POWER_TX_SUB 0xA3C8
+#define AR_PHY_POWER_TX_RATE7 0xA3CC
+#define AR_PHY_POWER_TX_RATE8 0xA3D0
+#define AR_PHY_POWER_TX_RATE9 0xA3D4
+
+#define AR_PHY_XPA_CFG 0xA3D8
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+
+#define AR_PHY_CH1_CCA 0xa864
+#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH1_MINCCA_PWR_S 19
+#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_CH1_MINCCA_PWR_S 20
+
+#define AR_PHY_CH2_CCA 0xb864
+#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
+#define AR_PHY_CH2_MINCCA_PWR_S 19
+
+#define AR_PHY_CH1_EXT_CCA 0xa9bc
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+#define AR_PHY_CH2_EXT_CCA 0xb9bc
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
new file mode 100644
index 000000000000..56a9e5fa6d66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ar9003_phy.h"
+
+static void ar9003_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Select calibration to run */
+ switch (currCal->calData->calType) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * Start calibration with
+ * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples
+ */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
+ currCal->calData->calCountMax);
+ REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
+
+ /* Kick-off cal */
+ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
+ break;
+ case TEMP_COMP_CAL:
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_LOCAL, 1);
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
+ AR_PHY_65NM_CH0_THERM_START, 1);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "starting Temperature Compensation Calibration\n");
+ break;
+ case ADC_DC_INIT_CAL:
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ /* Not yet */
+ break;
+ }
+}
+
+/*
+ * Generic calibration routine.
+ * Recalibrate the lower PHY chips to account for temperature/environment
+ * changes.
+ */
+static bool ar9003_hw_per_calibration(struct ath_hw *ah,
+ struct ath9k_channel *ichan,
+ u8 rxchainmask,
+ struct ath9k_cal_list *currCal)
+{
+ /* Cal is assumed not done until explicitly set below */
+ bool iscaldone = false;
+
+ /* Calibration in progress. */
+ if (currCal->calState == CAL_RUNNING) {
+ /* Check to see if it has finished. */
+ if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
+ /*
+ * Accumulate cal measures for active chains
+ */
+ currCal->calData->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >=
+ currCal->calData->calNumSamples) {
+ unsigned int i, numChains = 0;
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
+ /*
+ * Process accumulated data
+ */
+ currCal->calData->calPostProc(ah, numChains);
+
+ /* Calibration has finished. */
+ ichan->CalValid |= currCal->calData->calType;
+ currCal->calState = CAL_DONE;
+ iscaldone = true;
+ } else {
+ /*
+ * Set-up collection of another sub-sample until we
+ * get desired number
+ */
+ ar9003_hw_setup_calibration(ah, currCal);
+ }
+ }
+ } else if (!(ichan->CalValid & currCal->calData->calType)) {
+ /* If current cal is marked invalid in channel, kick it off */
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+
+ return iscaldone;
+}
+
+static bool ar9003_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ bool iscaldone = true;
+ struct ath9k_cal_list *currCal = ah->cal_list_curr;
+
+ /*
+ * For given calibration:
+ * 1. Call generic cal routine
+ * 2. When this cal is done (isCalDone) if we have more cals waiting
+ * (eg after reset), mask this to upper layers by not propagating
+ * isCalDone if it is set to TRUE.
+ * Instead, change isCalDone to FALSE and setup the waiting cal(s)
+ * to be run.
+ */
+ if (currCal &&
+ (currCal->calState == CAL_RUNNING ||
+ currCal->calState == CAL_WAITING)) {
+ iscaldone = ar9003_hw_per_calibration(ah, chan,
+ rxchainmask, currCal);
+ if (iscaldone) {
+ ah->cal_list_curr = currCal = currCal->calNext;
+
+ if (currCal->calState == CAL_WAITING) {
+ iscaldone = false;
+ ath9k_hw_reset_calibration(ah, currCal);
+ }
+ }
+ }
+
+ /* Do NF cal only at longer intervals */
+ if (longcal) {
+ /*
+ * Load the NF from history buffer of the current channel.
+ * NF is slow time-variant, so it is OK to use a historical
+ * value.
+ */
+ ath9k_hw_loadnf(ah, ah->curchan);
+
+ /* start NF calibration, without updating BB NF register */
+ ath9k_hw_start_nfcal(ah);
+ }
+
+ return iscaldone;
+}
+
+static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
+{
+ int i;
+
+ /* Accumulate IQ cal measures for active chains */
+ for (i = 0; i < AR5416_MAX_CHAINS; i++) {
+ ah->totalPowerMeasI[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
+ ah->totalPowerMeasQ[i] +=
+ REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
+ ah->totalIqCorrMeas[i] +=
+ (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
+ ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
+ }
+}
+
+static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 powerMeasQ, powerMeasI, iqCorrMeas;
+ u32 qCoffDenom, iCoffDenom;
+ int32_t qCoff, iCoff;
+ int iqCorrNeg, i;
+ const u_int32_t offset_array[3] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+
+ for (i = 0; i < numChains; i++) {
+ powerMeasI = ah->totalPowerMeasI[i];
+ powerMeasQ = ah->totalPowerMeasQ[i];
+ iqCorrMeas = ah->totalIqCorrMeas[i];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
+
+ iqCorrNeg = 0;
+
+ if (iqCorrMeas > 0x80000000) {
+ iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
+ iqCorrNeg = 1;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
+
+ iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
+ qCoffDenom = powerMeasQ / 64;
+
+ if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
+ iCoff = iqCorrMeas / iCoffDenom;
+ qCoff = powerMeasI / qCoffDenom - 64;
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
+
+ /* Force bounds on iCoff */
+ if (iCoff >= 63)
+ iCoff = 63;
+ else if (iCoff <= -63)
+ iCoff = -63;
+
+ /* Negate iCoff if iqCorrNeg == 0 */
+ if (iqCorrNeg == 0x0)
+ iCoff = -iCoff;
+
+ /* Force bounds on qCoff */
+ if (qCoff >= 63)
+ qCoff = 63;
+ else if (qCoff <= -63)
+ qCoff = -63;
+
+ iCoff = iCoff & 0x7f;
+ qCoff = qCoff & 0x7f;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) "
+ "before update = 0x%x\n",
+ offset_array[i],
+ REG_READ(ah, offset_array[i]));
+
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ iCoff);
+ REG_RMW_FIELD(ah, offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ qCoff);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QI COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ REG_READ(ah, offset_array[i]));
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QQ COFF "
+ "(bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ REG_READ(ah, offset_array[i]));
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
+ }
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction (offset 0x%04x) enabled "
+ "(bit position 0x%08x). New Value 0x%08x\n",
+ (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
+ REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
+}
+
+static const struct ath9k_percal_data iq_cal_single_sample = {
+ IQ_MISMATCH_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+ ar9003_hw_iqcal_collect,
+ ar9003_hw_iqcalibrate
+};
+
+static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+}
+
+static bool ar9003_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ switch (calType & ah->supp_cals) {
+ case IQ_MISMATCH_CAL:
+ /*
+ * XXX: Run IQ Mismatch for non-CCK only
+ * Note that CHANNEL_B is never set though.
+ */
+ return true;
+ case ADC_GAIN_CAL:
+ case ADC_DC_CAL:
+ return false;
+ case TEMP_COMP_CAL:
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * solve 4x4 linear equation used in loopback iq cal.
+ */
+static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
+ s32 sin_2phi_1,
+ s32 cos_2phi_1,
+ s32 sin_2phi_2,
+ s32 cos_2phi_2,
+ s32 mag_a0_d0,
+ s32 phs_a0_d0,
+ s32 mag_a1_d0,
+ s32 phs_a1_d0,
+ s32 solved_eq[])
+{
+ s32 f1 = cos_2phi_1 - cos_2phi_2,
+ f3 = sin_2phi_1 - sin_2phi_2,
+ f2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ const s32 result_shift = 1 << 15;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ f2 = (f1 * f1 + f3 * f3) / result_shift;
+
+ if (!f2) {
+ ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ return false;
+ }
+
+ /* mag mismatch, tx */
+ mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0);
+ /* phs mismatch, tx */
+ phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0);
+
+ mag_tx = (mag_tx / f2);
+ phs_tx = (phs_tx / f2);
+
+ /* mag mismatch, rx */
+ mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) /
+ result_shift;
+ /* phs mismatch, rx */
+ phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) /
+ result_shift;
+
+ solved_eq[0] = mag_tx;
+ solved_eq[1] = phs_tx;
+ solved_eq[2] = mag_rx;
+ solved_eq[3] = phs_rx;
+
+ return true;
+}
+
+static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im)
+{
+ s32 abs_i = abs(in_re),
+ abs_q = abs(in_im),
+ max_abs, min_abs;
+
+ if (abs_i > abs_q) {
+ max_abs = abs_i;
+ min_abs = abs_q;
+ } else {
+ max_abs = abs_q;
+ min_abs = abs_i;
+ }
+
+ return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4);
+}
+
+#define DELPT 32
+
+static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
+ s32 chain_idx,
+ const s32 iq_res[],
+ s32 iqc_coeff[])
+{
+ s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0,
+ i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1,
+ i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0,
+ i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1;
+ s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1,
+ phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2;
+ s32 mag_tx, phs_tx, mag_rx, phs_rx;
+ s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx,
+ q_q_coff, q_i_coff;
+ const s32 res_scale = 1 << 15;
+ const s32 delpt_shift = 1 << 8;
+ s32 mag1, mag2;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ i2_m_q2_a0_d0 = iq_res[0] & 0xfff;
+ i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff;
+ iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8);
+
+ if (i2_m_q2_a0_d0 > 0x800)
+ i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1);
+
+ if (i2_p_q2_a0_d0 > 0x800)
+ i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1);
+
+ if (iq_corr_a0_d0 > 0x800)
+ iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1);
+
+ i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff;
+ i2_p_q2_a0_d1 = (iq_res[2] & 0xfff);
+ iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff;
+
+ if (i2_m_q2_a0_d1 > 0x800)
+ i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
+
+ if (i2_p_q2_a0_d1 > 0x800)
+ i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
+
+ if (iq_corr_a0_d1 > 0x800)
+ iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
+
+ i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8);
+ i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff;
+ iq_corr_a1_d0 = iq_res[4] & 0xfff;
+
+ if (i2_m_q2_a1_d0 > 0x800)
+ i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1);
+
+ if (i2_p_q2_a1_d0 > 0x800)
+ i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1);
+
+ if (iq_corr_a1_d0 > 0x800)
+ iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1);
+
+ i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff;
+ i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8);
+ iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff;
+
+ if (i2_m_q2_a1_d1 > 0x800)
+ i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1);
+
+ if (i2_p_q2_a1_d1 > 0x800)
+ i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1);
+
+ if (iq_corr_a1_d1 > 0x800)
+ iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1);
+
+ if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
+ (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0:\na0_d0=%d\n"
+ "a0_d1=%d\na2_d0=%d\na1_d1=%d\n",
+ i2_p_q2_a0_d0, i2_p_q2_a0_d1,
+ i2_p_q2_a1_d0, i2_p_q2_a1_d1);
+ return false;
+ }
+
+ mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+ phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
+
+ mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+ phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1;
+
+ mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+ phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0;
+
+ mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+ phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1;
+
+ /* w/o analog phase shift */
+ sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT);
+ /* w/o analog phase shift */
+ cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT);
+ /* w/ analog phase shift */
+ cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT);
+
+ /*
+ * force sin^2 + cos^2 = 1;
+ * find magnitude by approximation
+ */
+ mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1);
+ mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
+
+ if ((mag1 == 0) || (mag2 == 0)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag1=%d, mag2=%d\n",
+ mag1, mag2);
+ return false;
+ }
+
+ /* normalization sin and cos by mag */
+ sin_2phi_1 = (sin_2phi_1 * res_scale / mag1);
+ cos_2phi_1 = (cos_2phi_1 * res_scale / mag1);
+ sin_2phi_2 = (sin_2phi_2 * res_scale / mag2);
+ cos_2phi_2 = (cos_2phi_2 * res_scale / mag2);
+
+ /* calculate IQ mismatch */
+ if (!ar9003_hw_solve_iq_cal(ah,
+ sin_2phi_1, cos_2phi_1,
+ sin_2phi_2, cos_2phi_2,
+ mag_a0_d0, phs_a0_d0,
+ mag_a1_d0,
+ phs_a1_d0, solved_eq)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ return false;
+ }
+
+ mag_tx = solved_eq[0];
+ phs_tx = solved_eq[1];
+ mag_rx = solved_eq[2];
+ phs_rx = solved_eq[3];
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "chain %d: mag mismatch=%d phase mismatch=%d\n",
+ chain_idx, mag_tx/res_scale, phs_tx/res_scale);
+
+ if (res_scale == mag_tx) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_tx=%d, res_scale=%d\n",
+ mag_tx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Tx IQ correction factor */
+ mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx);
+ phs_corr_tx = -phs_tx;
+
+ q_q_coff = (mag_corr_tx * 128 / res_scale);
+ q_i_coff = (phs_corr_tx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[0]);
+
+ if (-mag_rx == res_scale) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_rx=%d, res_scale=%d\n",
+ mag_rx, res_scale);
+ return false;
+ }
+
+ /* calculate and quantize Rx IQ correction factors */
+ mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx);
+ phs_corr_rx = -phs_rx;
+
+ q_q_coff = (mag_corr_rx * 128 / res_scale);
+ q_i_coff = (phs_corr_rx * 256 / res_scale);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
+
+ if (q_i_coff < -63)
+ q_i_coff = -63;
+ if (q_i_coff > 63)
+ q_i_coff = 63;
+ if (q_q_coff < -63)
+ q_q_coff = -63;
+ if (q_q_coff > 63)
+ q_q_coff = 63;
+
+ iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[1]);
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B1,
+ AR_PHY_TX_IQCAL_STATUS_B2,
+ };
+ const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
+ };
+ const u32 rx_corr[AR9300_MAX_CHAINS] = {
+ AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B1,
+ AR_PHY_RX_IQCAL_CORR_B2,
+ };
+ const u_int32_t chan_info_tab[] = {
+ AR_PHY_CHAN_INFO_TAB_0,
+ AR_PHY_CHAN_INFO_TAB_1,
+ AR_PHY_CHAN_INFO_TAB_2,
+ };
+ s32 iq_res[6];
+ s32 iqc_coeff[2];
+ s32 i, j;
+ u32 num_chains = 0;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->txchainmask & (1 << i))
+ num_chains++;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ AR_PHY_TX_IQCAL_START_DO_CAL);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal not complete.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Doing Tx IQ Cal for chain %d.\n", i);
+
+ if (REG_READ(ah, txiqcal_status[i]) &
+ AR_PHY_TX_IQCAL_STATUS_FAILED) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal failed for chain %d.\n", i);
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ for (j = 0; j < 3; j++) {
+ u_int8_t idx = 2 * j,
+ offset = 4 * j;
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 0);
+
+ /* 32 bits */
+ iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
+
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ, 1);
+
+ /* 16 bits */
+ iq_res[idx+1] = 0xffff & REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
+ idx, iq_res[idx], idx+1, iq_res[idx+1]);
+ }
+
+ if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "Failed in calculation of IQ correction.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
+ iqc_coeff[0], iqc_coeff[1]);
+
+ REG_RMW_FIELD(ah, tx_corr_coeff[i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ iqc_coeff[0]);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
+ iqc_coeff[1] >> 7);
+ REG_RMW_FIELD(ah, rx_corr[i],
+ AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
+ iqc_coeff[1]);
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+
+ return;
+
+TX_IQ_CAL_FAILED:
+ ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+}
+
+static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+ * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before
+ * running AGC/TxIQ cals
+ */
+ ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
+
+ /* Calibrate the AGC */
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to "
+ "complete in 1ms; noisy environment?\n");
+ return false;
+ }
+
+ /* Do Tx IQ Calibration */
+ if (ah->config.tx_iq_calibration)
+ ar9003_hw_tx_iq_cal(ah);
+
+ /* Revert chainmasks to their original values before NF cal */
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ if (ar9003_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
+ }
+
+ if (ar9003_hw_iscal_supported(ah, TEMP_COMP_CAL)) {
+ INIT_CAL(&ah->tempCompCalData);
+ INSERT_CAL(ah, &ah->tempCompCalData);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "enabling Temperature Compensation Calibration.\n");
+ }
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ chan->CalValid = 0;
+
+ return true;
+}
+
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
+ priv_ops->init_cal = ar9003_hw_init_cal;
+ priv_ops->setup_calibration = ar9003_hw_setup_calibration;
+ priv_ops->iscal_supported = ar9003_hw_iscal_supported;
+
+ ops->calibrate = ar9003_hw_calibrate;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
new file mode 100644
index 000000000000..23eb60ea5455
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -0,0 +1,1838 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_eeprom.h"
+
+#define COMP_HDR_LEN 4
+#define COMP_CKSUM_LEN 2
+
+#define AR_CH0_TOP (0x00016288)
+#define AR_CH0_TOP_XPABIASLVL (0x3)
+#define AR_CH0_TOP_XPABIASLVL_S (8)
+
+#define AR_CH0_THERM (0x00016290)
+#define AR_CH0_THERM_SPARE (0x3f)
+#define AR_CH0_THERM_SPARE_S (0)
+
+#define AR_SWITCH_TABLE_COM_ALL (0xffff)
+#define AR_SWITCH_TABLE_COM_ALL_S (0)
+
+#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM2_ALL_S (0)
+
+#define AR_SWITCH_TABLE_ALL (0xfff)
+#define AR_SWITCH_TABLE_ALL_S (0)
+
+#define LE16(x) __constant_cpu_to_le16(x)
+#define LE32(x) __constant_cpu_to_le32(x)
+
+static const struct ar9300_eeprom ar9300_default = {
+ .eepromVersion = 2,
+ .templateVersion = 2,
+ .macAddr = {1, 2, 3, 4, 5, 6},
+ .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0c,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 3,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 36,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {0, 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = { /* [32] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2484, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {36, 36, 36, 36} },
+ { {36, 36, 36, 36} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ { {32, 32, 28, 24} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .calTargetPower2GHT40 = {
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */
+ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+
+ { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+
+ { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x000), LE16(0x000), LE16(0x000),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 68,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ { {20, 20, 20, 10} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 0}, {60, 1}, {60, 1}, {60, 0},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ }
+ },
+ {
+ {
+ {60, 1}, {60, 0}, {60, 1}, {60, 1},
+ {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ }
+ },
+ }
+};
+
+static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
+ enum eeprom_param param)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+
+ switch (param) {
+ case EEP_MAC_LSW:
+ return eep->macAddr[0] << 8 | eep->macAddr[1];
+ case EEP_MAC_MID:
+ return eep->macAddr[2] << 8 | eep->macAddr[3];
+ case EEP_MAC_MSW:
+ return eep->macAddr[4] << 8 | eep->macAddr[5];
+ case EEP_REG_0:
+ return le16_to_cpu(pBase->regDmn[0]);
+ case EEP_REG_1:
+ return le16_to_cpu(pBase->regDmn[1]);
+ case EEP_OP_CAP:
+ return pBase->deviceCap;
+ case EEP_OP_MODE:
+ return pBase->opCapFlags.opFlags;
+ case EEP_RF_SILENT:
+ return pBase->rfSilent;
+ case EEP_TX_MASK:
+ return (pBase->txrxMask >> 4) & 0xf;
+ case EEP_RX_MASK:
+ return pBase->txrxMask & 0xf;
+ case EEP_DRIVE_STRENGTH:
+#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
+ return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
+ case EEP_INTERNAL_REGULATOR:
+ /* Bit 4 is internal regulator flag */
+ return (pBase->featureEnable & 0x10) >> 4;
+ case EEP_SWREG:
+ return le32_to_cpu(pBase->swreg);
+ default:
+ return 0;
+ }
+}
+
+static bool ar9300_eeprom_read_byte(struct ath_common *common, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ return false;
+
+ *buffer = (val >> (8 * (address % 2))) & 0xff;
+ return true;
+}
+
+static bool ar9300_eeprom_read_word(struct ath_common *common, int address,
+ u8 *buffer)
+{
+ u16 val;
+
+ if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
+ return false;
+
+ buffer[0] = val >> 8;
+ buffer[1] = val & 0xff;
+
+ return true;
+}
+
+static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
+ int count)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i;
+
+ if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "eeprom address not in range\n");
+ return false;
+ }
+
+ /*
+ * Since we're reading the bytes in reverse order from a little-endian
+ * word stream, an even address means we only use the lower half of
+ * the 16-bit word at that address
+ */
+ if (address % 2 == 0) {
+ if (!ar9300_eeprom_read_byte(common, address--, buffer++))
+ goto error;
+
+ count--;
+ }
+
+ for (i = 0; i < count / 2; i++) {
+ if (!ar9300_eeprom_read_word(common, address, buffer))
+ goto error;
+
+ address -= 2;
+ buffer += 2;
+ }
+
+ if (count % 2)
+ if (!ar9300_eeprom_read_byte(common, address, buffer))
+ goto error;
+
+ return true;
+
+error:
+ ath_print(common, ATH_DBG_EEPROM,
+ "unable to read eeprom region at offset %d\n", address);
+ return false;
+}
+
+static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
+ int *length, int *major, int *minor)
+{
+ unsigned long value[4];
+
+ value[0] = best[0];
+ value[1] = best[1];
+ value[2] = best[2];
+ value[3] = best[3];
+ *code = ((value[0] >> 5) & 0x0007);
+ *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020);
+ *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f);
+ *major = (value[2] & 0x000f);
+ *minor = (value[3] & 0x00ff);
+}
+
+static u16 ar9300_comp_cksum(u8 *data, int dsize)
+{
+ int it, checksum = 0;
+
+ for (it = 0; it < dsize; it++) {
+ checksum += data[it];
+ checksum &= 0xffff;
+ }
+
+ return checksum;
+}
+
+static bool ar9300_uncompress_block(struct ath_hw *ah,
+ u8 *mptr,
+ int mdataSize,
+ u8 *block,
+ int size)
+{
+ int it;
+ int spot;
+ int offset;
+ int length;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ spot = 0;
+
+ for (it = 0; it < size; it += (length+2)) {
+ offset = block[it];
+ offset &= 0xff;
+ spot += offset;
+ length = block[it+1];
+ length &= 0xff;
+
+ if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ memcpy(&mptr[spot], &block[it+2], length);
+ spot += length;
+ } else if (length > 0) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Bad restore at %d: spot=%d "
+ "offset=%d length=%d\n",
+ it, spot, offset, length);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int ar9300_compress_decision(struct ath_hw *ah,
+ int it,
+ int code,
+ int reference,
+ u8 *mptr,
+ u8 *word, int length, int mdata_size)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 *dptr;
+
+ switch (code) {
+ case _CompressNone:
+ if (length != mdata_size) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "EEPROM structure size mismatch"
+ "memory=%d eeprom=%d\n", mdata_size, length);
+ return -1;
+ }
+ memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
+ ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:"
+ " uncompressed, length %d\n", it, length);
+ break;
+ case _CompressBlock:
+ if (reference == 0) {
+ dptr = mptr;
+ } else {
+ if (reference != 2) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "cant find reference eeprom"
+ "struct %d\n", reference);
+ return -1;
+ }
+ memcpy(mptr, &ar9300_default, mdata_size);
+ }
+ ath_print(common, ATH_DBG_EEPROM,
+ "restore eeprom %d: block, reference %d,"
+ " length %d\n", it, reference, length);
+ ar9300_uncompress_block(ah, mptr, mdata_size,
+ (u8 *) (word + COMP_HDR_LEN), length);
+ break;
+ default:
+ ath_print(common, ATH_DBG_EEPROM, "unknown compression"
+ " code %d\n", code);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Read the configuration data from the eeprom.
+ * The data can be put in any specified memory buffer.
+ *
+ * Returns -1 on error.
+ * Returns address of next memory location on success.
+ */
+static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
+ u8 *mptr, int mdata_size)
+{
+#define MDEFAULT 15
+#define MSTATE 100
+ int cptr;
+ u8 *word;
+ int code;
+ int reference, length, major, minor;
+ int osize;
+ int it;
+ u16 checksum, mchecksum;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ word = kzalloc(2048, GFP_KERNEL);
+ if (!word)
+ return -1;
+
+ memcpy(mptr, &ar9300_default, mdata_size);
+
+ cptr = AR9300_BASE_ADDR;
+ for (it = 0; it < MSTATE; it++) {
+ if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
+ goto fail;
+
+ if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
+ word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
+ && word[2] == 0xff && word[3] == 0xff))
+ break;
+
+ ar9300_comp_hdr_unpack(word, &code, &reference,
+ &length, &major, &minor);
+ ath_print(common, ATH_DBG_EEPROM,
+ "Found block at %x: code=%d ref=%d"
+ "length=%d major=%d minor=%d\n", cptr, code,
+ reference, length, major, minor);
+ if (length >= 1024) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Skipping bad header\n");
+ cptr -= COMP_HDR_LEN;
+ continue;
+ }
+
+ osize = length;
+ ar9300_read_eeprom(ah, cptr, word,
+ COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
+ mchecksum = word[COMP_HDR_LEN + osize] |
+ (word[COMP_HDR_LEN + osize + 1] << 8);
+ ath_print(common, ATH_DBG_EEPROM,
+ "checksum %x %x\n", checksum, mchecksum);
+ if (checksum == mchecksum) {
+ ar9300_compress_decision(ah, it, code, reference, mptr,
+ word, length, mdata_size);
+ } else {
+ ath_print(common, ATH_DBG_EEPROM,
+ "skipping block with bad checksum\n");
+ }
+ cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ }
+
+ kfree(word);
+ return cptr;
+
+fail:
+ kfree(word);
+ return -1;
+}
+
+/*
+ * Restore the configuration structure by reading the eeprom.
+ * This function destroys any existing in-memory structure
+ * content.
+ */
+static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
+{
+ u8 *mptr = (u8 *) &ah->eeprom.ar9300_eep;
+
+ if (ar9300_eeprom_restore_internal(ah, mptr,
+ sizeof(struct ar9300_eeprom)) < 0)
+ return false;
+
+ return true;
+}
+
+/* XXX: review hardware docs */
+static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
+{
+ return ah->eeprom.ar9300_eep.eepromVersion;
+}
+
+/* XXX: could be read from the eepromVersion, not sure yet */
+static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
+{
+ return 0;
+}
+
+static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
+ enum ieee80211_band freq_band)
+{
+ return 1;
+}
+
+static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return -EINVAL;
+}
+
+static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is2ghz)
+ return eep->modalHeader2G.xpaBiasLvl;
+ else
+ return eep->modalHeader5G.xpaBiasLvl;
+}
+
+static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
+{
+ int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
+ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
+ ((bias >> 2) & 0x3));
+}
+
+static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le32 val;
+
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlCommon;
+ else
+ val = eep->modalHeader5G.antCtrlCommon;
+ return le32_to_cpu(val);
+}
+
+static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le32 val;
+
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlCommon2;
+ else
+ val = eep->modalHeader5G.antCtrlCommon2;
+ return le32_to_cpu(val);
+}
+
+static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
+ int chain,
+ bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ __le16 val = 0;
+
+ if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
+ if (is2ghz)
+ val = eep->modalHeader2G.antCtrlChain[chain];
+ else
+ val = eep->modalHeader5G.antCtrlChain[chain];
+ }
+
+ return le16_to_cpu(val);
+}
+
+static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+{
+ u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value);
+}
+
+static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
+{
+ int drive_strength;
+ unsigned long reg;
+
+ drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
+
+ if (!drive_strength)
+ return;
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1);
+ reg &= ~0x00ffffc0;
+ reg |= 0x5 << 21;
+ reg |= 0x5 << 18;
+ reg |= 0x5 << 15;
+ reg |= 0x5 << 12;
+ reg |= 0x5 << 9;
+ reg |= 0x5 << 6;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2);
+ reg &= ~0xffffffe0;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ reg |= 0x5 << 20;
+ reg |= 0x5 << 17;
+ reg |= 0x5 << 14;
+ reg |= 0x5 << 11;
+ reg |= 0x5 << 8;
+ reg |= 0x5 << 5;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg);
+
+ reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4);
+ reg &= ~0xff800000;
+ reg |= 0x5 << 29;
+ reg |= 0x5 << 26;
+ reg |= 0x5 << 23;
+ REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
+}
+
+static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+{
+ int internal_regulator =
+ ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
+
+ if (internal_regulator) {
+ /* Internal regulator is ON. Write swreg register. */
+ int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah, AR_RTC_REG_CONTROL1) &
+ (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
+ REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
+ /* Set REG_CONTROL1.SWREG_PROGRAM */
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah,
+ AR_RTC_REG_CONTROL1) |
+ AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
+ } else {
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK,
+ (REG_READ(ah,
+ AR_RTC_SLEEP_CLK) |
+ AR_RTC_FORCE_SWREG_PRD));
+ }
+}
+
+static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
+ ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_internal_regulator_apply(ah);
+}
+
+static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+}
+
+/*
+ * Returns the interpolated y value corresponding to the specified x value
+ * from the np ordered pairs of data (px,py).
+ * The pairs do not have to be in any order.
+ * If the specified x value is less than any of the px,
+ * the returned y value is equal to the py for the lowest px.
+ * If the specified x value is greater than any of the px,
+ * the returned y value is equal to the py for the highest px.
+ */
+static int ar9003_hw_power_interpolate(int32_t x,
+ int32_t *px, int32_t *py, u_int16_t np)
+{
+ int ip = 0;
+ int lx = 0, ly = 0, lhave = 0;
+ int hx = 0, hy = 0, hhave = 0;
+ int dx = 0;
+ int y = 0;
+
+ lhave = 0;
+ hhave = 0;
+
+ /* identify best lower and higher x calibration measurement */
+ for (ip = 0; ip < np; ip++) {
+ dx = x - px[ip];
+
+ /* this measurement is higher than our desired x */
+ if (dx <= 0) {
+ if (!hhave || dx > (x - hx)) {
+ /* new best higher x measurement */
+ hx = px[ip];
+ hy = py[ip];
+ hhave = 1;
+ }
+ }
+ /* this measurement is lower than our desired x */
+ if (dx >= 0) {
+ if (!lhave || dx < (x - lx)) {
+ /* new best lower x measurement */
+ lx = px[ip];
+ ly = py[ip];
+ lhave = 1;
+ }
+ }
+ }
+
+ /* the low x is good */
+ if (lhave) {
+ /* so is the high x */
+ if (hhave) {
+ /* they're the same, so just pick one */
+ if (hx == lx)
+ y = ly;
+ else /* interpolate */
+ y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
+ } else /* only low is good, use it */
+ y = ly;
+ } else if (hhave) /* only high is good, use it */
+ y = hy;
+ else /* nothing is good,this should never happen unless np=0, ???? */
+ y = -(1 << 30);
+ return y;
+}
+
+static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2G;
+ pFreqBin = eep->calTarget_freqbin_2G;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5G;
+ pFreqBin = eep->calTarget_freqbin_5G;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT20;
+ pFreqBin = eep->calTarget_freqbin_2GHT20;
+ } else {
+ numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT20;
+ pFreqBin = eep->calTarget_freqbin_5GHT20;
+ }
+
+ /*
+ * create array of channels and targetpower
+ * from targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex,
+ u16 freq, bool is2GHz)
+{
+ u16 numPiers, i;
+ s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_ht *pEepromTargetPwr;
+ u8 *pFreqBin;
+
+ if (is2GHz) {
+ numPiers = AR9300_NUM_2G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower2GHT40;
+ pFreqBin = eep->calTarget_freqbin_2GHT40;
+ } else {
+ numPiers = AR9300_NUM_5G_40_TARGET_POWERS;
+ pEepromTargetPwr = eep->calTargetPower5GHT40;
+ pFreqBin = eep->calTarget_freqbin_5GHT40;
+ }
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
+ u16 rateIndex, u16 freq)
+{
+ u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i;
+ s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck;
+ u8 *pFreqBin = eep->calTarget_freqbin_Cck;
+
+ /*
+ * create array of channels and targetpower from
+ * targetpower piers stored on eeprom
+ */
+ for (i = 0; i < numPiers; i++) {
+ freqArray[i] = FBIN2FREQ(pFreqBin[i], 1);
+ targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
+ }
+
+ /* interpolate to get target power for given frequency */
+ return (u8) ar9003_hw_power_interpolate((s32) freq,
+ freqArray,
+ targetPowerArray, numPiers);
+}
+
+/* Set tx power registers to array of values passed in */
+static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+{
+#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+ /* make sure forced gain is not set */
+ REG_WRITE(ah, 0xa458, 0);
+
+ /* Write the OFDM power per rate set */
+
+ /* 6 (LSB), 9, 12, 18 (MSB) */
+ REG_WRITE(ah, 0xa3c0,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* 24 (LSB), 36, 48, 54 (MSB) */
+ REG_WRITE(ah, 0xa3c4,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
+
+ /* Write the CCK power per rate set */
+
+ /* 1L (LSB), reserved, 2L, 2S (MSB) */
+ REG_WRITE(ah, 0xa3c8,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
+ /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
+
+ /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
+ REG_WRITE(ah, 0xa3cc,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
+ /* Write the HT20 power per rate set */
+
+ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
+ REG_WRITE(ah, 0xa3d0,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3d4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3e4,
+ POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0)
+ );
+
+ /* Mixed HT20 and HT40 rates */
+
+ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
+ REG_WRITE(ah, 0xa3e8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0)
+ );
+
+ /*
+ * Write the HT40 power per rate set
+ * correct PAR difference between HT40 and HT20/LEGACY
+ * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
+ */
+ REG_WRITE(ah, 0xa3d8,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0)
+ );
+
+ /* 6 (LSB), 7, 12, 13 (MSB) */
+ REG_WRITE(ah, 0xa3dc,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0)
+ );
+
+ /* 14 (LSB), 15, 20, 21 */
+ REG_WRITE(ah, 0xa3ec,
+ POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0)
+ );
+
+ return 0;
+#undef POW_SM
+}
+
+static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
+{
+ u8 targetPowerValT2[ar9300RateSize];
+ /* XXX: hard code for now, need to get from eeprom struct */
+ u8 ht40PowerIncForPdadc = 0;
+ bool is2GHz = false;
+ unsigned int i = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (freq < 4000)
+ is2GHz = true;
+
+ targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_36] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_48] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_54] =
+ ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
+ freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_5S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11L] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
+ targetPowerValT2[ALL_TARGET_LEGACY_11S] =
+ ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
+ targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq, is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_4] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_5] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_6] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_7] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_12] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_13] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_14] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_15] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_20] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_21] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_22] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT20_23] =
+ ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz);
+ targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
+ freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_4] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_5] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_6] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_7] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_12] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_13] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_14] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_15] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_20] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_21] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_22] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+ targetPowerValT2[ALL_TARGET_HT40_23] =
+ ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
+ is2GHz) + ht40PowerIncForPdadc;
+
+ while (i < ar9300RateSize) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
+ i++;
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ i++;
+ }
+
+ /* Write target power array to registers */
+ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+}
+
+static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
+ int mode,
+ int ipier,
+ int ichain,
+ int *pfrequency,
+ int *pcorrection,
+ int *ptemperature, int *pvoltage)
+{
+ u8 *pCalPier;
+ struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
+ int is2GHz;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (ichain >= AR9300_MAX_CHAINS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid chain index, must be less than %d\n",
+ AR9300_MAX_CHAINS);
+ return -1;
+ }
+
+ if (mode) { /* 5GHz */
+ if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 5GHz cal pier index, must "
+ "be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
+ return -1;
+ }
+ pCalPier = &(eep->calFreqPier5G[ipier]);
+ pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
+ is2GHz = 0;
+ } else {
+ if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "Invalid 2GHz cal pier index, must "
+ "be less than %d\n", AR9300_NUM_2G_CAL_PIERS);
+ return -1;
+ }
+
+ pCalPier = &(eep->calFreqPier2G[ipier]);
+ pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
+ is2GHz = 1;
+ }
+
+ *pfrequency = FBIN2FREQ(*pCalPier, is2GHz);
+ *pcorrection = pCalPierStruct->refPower;
+ *ptemperature = pCalPierStruct->tempMeas;
+ *pvoltage = pCalPierStruct->voltMeas;
+
+ return 0;
+}
+
+static int ar9003_hw_power_control_override(struct ath_hw *ah,
+ int frequency,
+ int *correction,
+ int *voltage, int *temperature)
+{
+ int tempSlope = 0;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ REG_RMW(ah, AR_PHY_TPC_11_B0,
+ (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B1,
+ (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ REG_RMW(ah, AR_PHY_TPC_11_B2,
+ (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+
+ /* enable open loop power control on chip */
+ REG_RMW(ah, AR_PHY_TPC_6_B0,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B1,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ REG_RMW(ah, AR_PHY_TPC_6_B2,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+
+ /*
+ * enable temperature compensation
+ * Need to use register names
+ */
+ if (frequency < 4000)
+ tempSlope = eep->modalHeader2G.tempSlope;
+ else
+ tempSlope = eep->modalHeader5G.tempSlope;
+
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
+ temperature[0]);
+
+ return 0;
+}
+
+/* Apply the recorded correction values. */
+static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
+{
+ int ichain, ipier, npier;
+ int mode;
+ int lfrequency[AR9300_MAX_CHAINS],
+ lcorrection[AR9300_MAX_CHAINS],
+ ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
+ int hfrequency[AR9300_MAX_CHAINS],
+ hcorrection[AR9300_MAX_CHAINS],
+ htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
+ int fdiff;
+ int correction[AR9300_MAX_CHAINS],
+ voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
+ int pfrequency, pcorrection, ptemperature, pvoltage;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mode = (frequency >= 4000);
+ if (mode)
+ npier = AR9300_NUM_5G_CAL_PIERS;
+ else
+ npier = AR9300_NUM_2G_CAL_PIERS;
+
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ lfrequency[ichain] = 0;
+ hfrequency[ichain] = 100000;
+ }
+ /* identify best lower and higher frequency calibration measurement */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ for (ipier = 0; ipier < npier; ipier++) {
+ if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
+ &pfrequency, &pcorrection,
+ &ptemperature, &pvoltage)) {
+ fdiff = frequency - pfrequency;
+
+ /*
+ * this measurement is higher than
+ * our desired frequency
+ */
+ if (fdiff <= 0) {
+ if (hfrequency[ichain] <= 0 ||
+ hfrequency[ichain] >= 100000 ||
+ fdiff >
+ (frequency - hfrequency[ichain])) {
+ /*
+ * new best higher
+ * frequency measurement
+ */
+ hfrequency[ichain] = pfrequency;
+ hcorrection[ichain] =
+ pcorrection;
+ htemperature[ichain] =
+ ptemperature;
+ hvoltage[ichain] = pvoltage;
+ }
+ }
+ if (fdiff >= 0) {
+ if (lfrequency[ichain] <= 0
+ || fdiff <
+ (frequency - lfrequency[ichain])) {
+ /*
+ * new best lower
+ * frequency measurement
+ */
+ lfrequency[ichain] = pfrequency;
+ lcorrection[ichain] =
+ pcorrection;
+ ltemperature[ichain] =
+ ptemperature;
+ lvoltage[ichain] = pvoltage;
+ }
+ }
+ }
+ }
+ }
+
+ /* interpolate */
+ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
+ ath_print(common, ATH_DBG_EEPROM,
+ "ch=%d f=%d low=%d %d h=%d %d\n",
+ ichain, frequency, lfrequency[ichain],
+ lcorrection[ichain], hfrequency[ichain],
+ hcorrection[ichain]);
+ /* they're the same, so just pick one */
+ if (hfrequency[ichain] == lfrequency[ichain]) {
+ correction[ichain] = lcorrection[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ }
+ /* the low frequency is good */
+ else if (frequency - lfrequency[ichain] < 1000) {
+ /* so is the high frequency, interpolate */
+ if (hfrequency[ichain] - frequency < 1000) {
+
+ correction[ichain] = lcorrection[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (hcorrection[ichain] -
+ lcorrection[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ temperature[ichain] = ltemperature[ichain] +
+ (((frequency - lfrequency[ichain]) *
+ (htemperature[ichain] -
+ ltemperature[ichain])) /
+ (hfrequency[ichain] - lfrequency[ichain]));
+
+ voltage[ichain] =
+ lvoltage[ichain] +
+ (((frequency -
+ lfrequency[ichain]) * (hvoltage[ichain] -
+ lvoltage[ichain]))
+ / (hfrequency[ichain] -
+ lfrequency[ichain]));
+ }
+ /* only low is good, use it */
+ else {
+ correction[ichain] = lcorrection[ichain];
+ temperature[ichain] = ltemperature[ichain];
+ voltage[ichain] = lvoltage[ichain];
+ }
+ }
+ /* only high is good, use it */
+ else if (hfrequency[ichain] - frequency < 1000) {
+ correction[ichain] = hcorrection[ichain];
+ temperature[ichain] = htemperature[ichain];
+ voltage[ichain] = hvoltage[ichain];
+ } else { /* nothing is good, presume 0???? */
+ correction[ichain] = 0;
+ temperature[ichain] = 0;
+ voltage[ichain] = 0;
+ }
+ }
+
+ ar9003_hw_power_control_override(ah, frequency, correction, voltage,
+ temperature);
+
+ ath_print(common, ATH_DBG_EEPROM,
+ "for frequency=%d, calibration correction = %d %d %d\n",
+ frequency, correction[0], correction[1], correction[2]);
+
+ return 0;
+}
+
+static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
+ struct ath9k_channel *chan, u16 cfgCtl,
+ u8 twiceAntennaReduction,
+ u8 twiceMaxRegulatoryPower,
+ u8 powerLimit)
+{
+ ah->txpower_limit = powerLimit;
+ ar9003_hw_set_target_power_eeprom(ah, chan->channel);
+ ar9003_hw_calibration_apply(ah, chan->channel);
+}
+
+static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
+ u16 i, bool is2GHz)
+{
+ return AR_NO_SPUR;
+}
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */
+}
+
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
+}
+
+const struct eeprom_ops eep_ar9300_ops = {
+ .check_eeprom = ath9k_hw_ar9300_check_eeprom,
+ .get_eeprom = ath9k_hw_ar9300_get_eeprom,
+ .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
+ .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
+ .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
+ .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
+ .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
+ .set_board_values = ath9k_hw_ar9300_set_board_values,
+ .set_addac = ath9k_hw_ar9300_set_addac,
+ .set_txpower = ath9k_hw_ar9300_set_txpower,
+ .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
+};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
new file mode 100644
index 000000000000..23fb353c3bba
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -0,0 +1,323 @@
+#ifndef AR9003_EEPROM_H
+#define AR9003_EEPROM_H
+
+#include <linux/types.h>
+
+#define AR9300_EEP_VER 0xD000
+#define AR9300_EEP_VER_MINOR_MASK 0xFFF
+#define AR9300_EEP_MINOR_VER_1 0x1
+#define AR9300_EEP_MINOR_VER AR9300_EEP_MINOR_VER_1
+
+/* 16-bit offset location start of calibration struct */
+#define AR9300_EEP_START_LOC 256
+#define AR9300_NUM_5G_CAL_PIERS 8
+#define AR9300_NUM_2G_CAL_PIERS 3
+#define AR9300_NUM_5G_20_TARGET_POWERS 8
+#define AR9300_NUM_5G_40_TARGET_POWERS 8
+#define AR9300_NUM_2G_CCK_TARGET_POWERS 2
+#define AR9300_NUM_2G_20_TARGET_POWERS 3
+#define AR9300_NUM_2G_40_TARGET_POWERS 3
+/* #define AR9300_NUM_CTLS 21 */
+#define AR9300_NUM_CTLS_5G 9
+#define AR9300_NUM_CTLS_2G 12
+#define AR9300_CTL_MODE_M 0xF
+#define AR9300_NUM_BAND_EDGES_5G 8
+#define AR9300_NUM_BAND_EDGES_2G 4
+#define AR9300_NUM_PD_GAINS 4
+#define AR9300_PD_GAINS_IN_MASK 4
+#define AR9300_PD_GAIN_ICEPTS 5
+#define AR9300_EEPROM_MODAL_SPURS 5
+#define AR9300_MAX_RATE_POWER 63
+#define AR9300_NUM_PDADC_VALUES 128
+#define AR9300_NUM_RATES 16
+#define AR9300_BCHAN_UNUSED 0xFF
+#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
+#define AR9300_OPFLAGS_11A 0x01
+#define AR9300_OPFLAGS_11G 0x02
+#define AR9300_OPFLAGS_5G_HT40 0x04
+#define AR9300_OPFLAGS_2G_HT40 0x08
+#define AR9300_OPFLAGS_5G_HT20 0x10
+#define AR9300_OPFLAGS_2G_HT20 0x20
+#define AR9300_EEPMISC_BIG_ENDIAN 0x01
+#define AR9300_EEPMISC_WOW 0x02
+#define AR9300_CUSTOMER_DATA_SIZE 20
+
+#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
+#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
+#define AR9300_MAX_CHAINS 3
+#define AR9300_ANT_16S 25
+#define AR9300_FUTURE_MODAL_SZ 6
+
+#define AR9300_NUM_ANT_CHAIN_FIELDS 7
+#define AR9300_NUM_ANT_COMMON_FIELDS 4
+#define AR9300_SIZE_ANT_CHAIN_FIELD 3
+#define AR9300_SIZE_ANT_COMMON_FIELD 4
+#define AR9300_ANT_CHAIN_MASK 0x7
+#define AR9300_ANT_COMMON_MASK 0xf
+#define AR9300_CHAIN_0_IDX 0
+#define AR9300_CHAIN_1_IDX 1
+#define AR9300_CHAIN_2_IDX 2
+
+#define AR928X_NUM_ANT_CHAIN_FIELDS 6
+#define AR928X_SIZE_ANT_CHAIN_FIELD 2
+#define AR928X_ANT_CHAIN_MASK 0x3
+
+/* Delta from which to start power to pdadc table */
+/* This offset is used in both open loop and closed loop power control
+ * schemes. In open loop power control, it is not really needed, but for
+ * the "sake of consistency" it was kept. For certain AP designs, this
+ * value is overwritten by the value in the flag "pwrTableOffset" just
+ * before writing the pdadc vs pwr into the chip registers.
+ */
+#define AR9300_PWR_TABLE_OFFSET 0
+
+/* enable flags for voltage and temp compensation */
+#define ENABLE_TEMP_COMPENSATION 0x01
+#define ENABLE_VOLT_COMPENSATION 0x02
+/* byte addressable */
+#define AR9300_EEPROM_SIZE (16*1024)
+#define FIXED_CCA_THRESHOLD 15
+
+#define AR9300_BASE_ADDR 0x3ff
+
+enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+ HT_TARGET_RATE_1_3_9_11_17_19,
+ HT_TARGET_RATE_4,
+ HT_TARGET_RATE_5,
+ HT_TARGET_RATE_6,
+ HT_TARGET_RATE_7,
+ HT_TARGET_RATE_12,
+ HT_TARGET_RATE_13,
+ HT_TARGET_RATE_14,
+ HT_TARGET_RATE_15,
+ HT_TARGET_RATE_20,
+ HT_TARGET_RATE_21,
+ HT_TARGET_RATE_22,
+ HT_TARGET_RATE_23
+};
+
+enum targetPowerLegacyRates {
+ LEGACY_TARGET_RATE_6_24,
+ LEGACY_TARGET_RATE_36,
+ LEGACY_TARGET_RATE_48,
+ LEGACY_TARGET_RATE_54
+};
+
+enum targetPowerCckRates {
+ LEGACY_TARGET_RATE_1L_5L,
+ LEGACY_TARGET_RATE_5S,
+ LEGACY_TARGET_RATE_11L,
+ LEGACY_TARGET_RATE_11S
+};
+
+enum ar9300_Rates {
+ ALL_TARGET_LEGACY_6_24,
+ ALL_TARGET_LEGACY_36,
+ ALL_TARGET_LEGACY_48,
+ ALL_TARGET_LEGACY_54,
+ ALL_TARGET_LEGACY_1L_5L,
+ ALL_TARGET_LEGACY_5S,
+ ALL_TARGET_LEGACY_11L,
+ ALL_TARGET_LEGACY_11S,
+ ALL_TARGET_HT20_0_8_16,
+ ALL_TARGET_HT20_1_3_9_11_17_19,
+ ALL_TARGET_HT20_4,
+ ALL_TARGET_HT20_5,
+ ALL_TARGET_HT20_6,
+ ALL_TARGET_HT20_7,
+ ALL_TARGET_HT20_12,
+ ALL_TARGET_HT20_13,
+ ALL_TARGET_HT20_14,
+ ALL_TARGET_HT20_15,
+ ALL_TARGET_HT20_20,
+ ALL_TARGET_HT20_21,
+ ALL_TARGET_HT20_22,
+ ALL_TARGET_HT20_23,
+ ALL_TARGET_HT40_0_8_16,
+ ALL_TARGET_HT40_1_3_9_11_17_19,
+ ALL_TARGET_HT40_4,
+ ALL_TARGET_HT40_5,
+ ALL_TARGET_HT40_6,
+ ALL_TARGET_HT40_7,
+ ALL_TARGET_HT40_12,
+ ALL_TARGET_HT40_13,
+ ALL_TARGET_HT40_14,
+ ALL_TARGET_HT40_15,
+ ALL_TARGET_HT40_20,
+ ALL_TARGET_HT40_21,
+ ALL_TARGET_HT40_22,
+ ALL_TARGET_HT40_23,
+ ar9300RateSize,
+};
+
+
+struct eepFlags {
+ u8 opFlags;
+ u8 eepMisc;
+} __packed;
+
+enum CompressAlgorithm {
+ _CompressNone = 0,
+ _CompressLzma,
+ _CompressPairs,
+ _CompressBlock,
+ _Compress4,
+ _Compress5,
+ _Compress6,
+ _Compress7,
+};
+
+struct ar9300_base_eep_hdr {
+ __le16 regDmn[2];
+ /* 4 bits tx and 4 bits rx */
+ u8 txrxMask;
+ struct eepFlags opCapFlags;
+ u8 rfSilent;
+ u8 blueToothOptions;
+ u8 deviceCap;
+ /* takes lower byte in eeprom location */
+ u8 deviceType;
+ /* offset in dB to be added to beginning
+ * of pdadc table in calibration
+ */
+ int8_t pwrTableOffset;
+ u8 params_for_tuning_caps[2];
+ /*
+ * bit0 - enable tx temp comp
+ * bit1 - enable tx volt comp
+ * bit2 - enable fastClock - default to 1
+ * bit3 - enable doubling - default to 1
+ * bit4 - enable internal regulator - default to 1
+ */
+ u8 featureEnable;
+ /* misc flags: bit0 - turn down drivestrength */
+ u8 miscConfiguration;
+ u8 eepromWriteEnableGpio;
+ u8 wlanDisableGpio;
+ u8 wlanLedGpio;
+ u8 rxBandSelectGpio;
+ u8 txrxgain;
+ /* SW controlled internal regulator fields */
+ __le32 swreg;
+} __packed;
+
+struct ar9300_modal_eep_header {
+ /* 4 idle, t1, t2, b (4 bits per setting) */
+ __le32 antCtrlCommon;
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ __le32 antCtrlCommon2;
+ /* 6 idle, t, r, rx1, rx12, b (2 bits each) */
+ __le16 antCtrlChain[AR9300_MAX_CHAINS];
+ /* 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ u8 xatten1DB[AR9300_MAX_CHAINS];
+ /* 3 xatten1_margin for merlin (0xa20c/b20c 16:12 */
+ u8 xatten1Margin[AR9300_MAX_CHAINS];
+ int8_t tempSlope;
+ int8_t voltSlope;
+ /* spur channels in usual fbin coding format */
+ u8 spurChans[AR9300_EEPROM_MODAL_SPURS];
+ /* 3 Check if the register is per chain */
+ int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
+ u8 ob[AR9300_MAX_CHAINS];
+ u8 db_stage2[AR9300_MAX_CHAINS];
+ u8 db_stage3[AR9300_MAX_CHAINS];
+ u8 db_stage4[AR9300_MAX_CHAINS];
+ u8 xpaBiasLvl;
+ u8 txFrameToDataStart;
+ u8 txFrameToPaOn;
+ u8 txClip;
+ int8_t antennaGain;
+ u8 switchSettling;
+ int8_t adcDesiredSize;
+ u8 txEndToXpaOff;
+ u8 txEndToRxOn;
+ u8 txFrameToXpaOn;
+ u8 thresh62;
+ u8 futureModal[32];
+} __packed;
+
+struct ar9300_cal_data_per_freq_op_loop {
+ int8_t refPower;
+ /* pdadc voltage at power measurement */
+ u8 voltMeas;
+ /* pcdac used for power measurement */
+ u8 tempMeas;
+ /* range is -60 to -127 create a mapping equation 1db resolution */
+ int8_t rxNoisefloorCal;
+ /*range is same as noisefloor */
+ int8_t rxNoisefloorPower;
+ /* temp measured when noisefloor cal was performed */
+ u8 rxTempMeas;
+} __packed;
+
+struct cal_tgt_pow_legacy {
+ u8 tPow2x[4];
+} __packed;
+
+struct cal_tgt_pow_ht {
+ u8 tPow2x[14];
+} __packed;
+
+struct cal_ctl_edge_pwr {
+ u8 tPower:6,
+ flag:2;
+} __packed;
+
+struct cal_ctl_data_2g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+} __packed;
+
+struct cal_ctl_data_5g {
+ struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+} __packed;
+
+struct ar9300_eeprom {
+ u8 eepromVersion;
+ u8 templateVersion;
+ u8 macAddr[6];
+ u8 custData[AR9300_CUSTOMER_DATA_SIZE];
+
+ struct ar9300_base_eep_hdr baseEepHeader;
+
+ struct ar9300_modal_eep_header modalHeader2G;
+ u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
+ u8 calTarget_freqbin_Cck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ u8 calTarget_freqbin_2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPowerCck[AR9300_NUM_2G_CCK_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower2G[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
+ u8 ctlIndex_2G[AR9300_NUM_CTLS_2G];
+ u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
+ struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
+ struct ar9300_modal_eep_header modalHeader5G;
+ u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
+ struct ar9300_cal_data_per_freq_op_loop
+ calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
+ u8 calTarget_freqbin_5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ u8 calTarget_freqbin_5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ struct cal_tgt_pow_legacy
+ calTargetPower5G[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
+ struct cal_tgt_pow_ht
+ calTargetPower5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
+ u8 ctlIndex_5G[AR9300_NUM_CTLS_5G];
+ u8 ctl_freqbin_5G[AR9300_NUM_CTLS_5G][AR9300_NUM_BAND_EDGES_5G];
+ struct cal_ctl_data_5g ctlPowerData_5G[AR9300_NUM_CTLS_5G];
+} __packed;
+
+s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
+s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
new file mode 100644
index 000000000000..b15309caf1da
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_mac.h"
+#include "ar9003_initvals.h"
+
+/* General hardware code for the AR9003 hadware family */
+
+static bool ar9003_hw_macversion_supported(u32 macversion)
+{
+ switch (macversion) {
+ case AR_SREV_VERSION_9300:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
+/*
+ * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
+ * ensuring it does not affect hardware bring up
+ */
+static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+{
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9300_2p0_mac_core,
+ ARRAY_SIZE(ar9300_2p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9300_2p0_mac_postamble,
+ ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9300_2p0_baseband_core,
+ ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9300_2p0_baseband_postamble,
+ ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9300_2p0_radio_core,
+ ARRAY_SIZE(ar9300_2p0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9300_2p0_radio_postamble,
+ ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9300_2p0_soc_preamble,
+ ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9300_2p0_soc_postamble,
+ ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9300PciePhy_clkreq_enable_L1_2p0,
+ ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
+ 2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9300Modes_fast_clock_2p0,
+ ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
+ 3);
+}
+
+static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_tx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ case 2:
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
+ 5);
+ break;
+ }
+}
+
+static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
+{
+ switch (ar9003_hw_get_rx_gain_idx(ah)) {
+ case 0:
+ default:
+ INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
+ 2);
+ break;
+ case 1:
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p0,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
+ 2);
+ break;
+ }
+}
+
+/* set gain table pointers according to values read from the eeprom */
+static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ ar9003_tx_gain_table_apply(ah);
+ ar9003_rx_gain_table_apply(ah);
+}
+
+/*
+ * Helper for ASPM support.
+ *
+ * Disable PLL when in L0s as well as receiver clock when in L1.
+ * This power saving option must be enabled through the SerDes.
+ *
+ * Programming the SerDes must go through the same 288 bit serial shift
+ * register as the other analog registers. Hence the 9 writes.
+ */
+static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ if (ah->is_pciexpress != true)
+ return;
+
+ /* Do not touch SerDes registers */
+ if (ah->config.pcie_powersave_enable == 2)
+ return;
+
+ /* Nothing to do on restore for 11N */
+ if (!restore) {
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen)
+ REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
+ }
+}
+
+/* Sets up the AR9003 hardware familiy callbacks */
+void ar9003_hw_attach_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+ struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+
+ priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
+ priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
+ priv_ops->macversion_supported = ar9003_hw_macversion_supported;
+
+ ops->config_pci_powersave = ar9003_hw_configpcipowersave;
+
+ ar9003_hw_attach_phy_ops(ah);
+ ar9003_hw_attach_calib_ops(ah);
+ ar9003_hw_attach_mac_ops(ah);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
new file mode 100644
index 000000000000..db019dd220b7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
@@ -0,0 +1,1784 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9003_H
+#define INITVALS_9003_H
+
+/* AR9003 2.0 */
+
+static const u32 ar9300_2p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
+ {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
+ {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_fast_clock_2p0[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9300_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x556cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00000000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x54214514},
+ {0x0001608c, 0x119f481e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd2888888},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x00adb6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x3fffbe01},
+ {0x00016104, 0xfff80000},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x00000000},
+ {0x00016280, 0x058a0001},
+ {0x00016284, 0x3d840208},
+ {0x00016288, 0x05a20408},
+ {0x0001628c, 0x00038c07},
+ {0x00016290, 0x40000004},
+ {0x00016294, 0x458aa14f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x556cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x3fffbe01},
+ {0x00016504, 0xfff80000},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x00000000},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00800700},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x556cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x3fffbe01},
+ {0x00016904, 0xfff80000},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x00000000},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00800700},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x02000101},
+ {0x0000a004, 0x02000102},
+ {0x0000a008, 0x02000103},
+ {0x0000a00c, 0x02000104},
+ {0x0000a010, 0x02000200},
+ {0x0000a014, 0x02000201},
+ {0x0000a018, 0x02000202},
+ {0x0000a01c, 0x02000203},
+ {0x0000a020, 0x02000204},
+ {0x0000a024, 0x02000205},
+ {0x0000a028, 0x02000208},
+ {0x0000a02c, 0x02000302},
+ {0x0000a030, 0x02000303},
+ {0x0000a034, 0x02000304},
+ {0x0000a038, 0x02000400},
+ {0x0000a03c, 0x02010300},
+ {0x0000a040, 0x02010301},
+ {0x0000a044, 0x02010302},
+ {0x0000a048, 0x02000500},
+ {0x0000a04c, 0x02010400},
+ {0x0000a050, 0x02020300},
+ {0x0000a054, 0x02020301},
+ {0x0000a058, 0x02020302},
+ {0x0000a05c, 0x02020303},
+ {0x0000a060, 0x02020400},
+ {0x0000a064, 0x02030300},
+ {0x0000a068, 0x02030301},
+ {0x0000a06c, 0x02030302},
+ {0x0000a070, 0x02030303},
+ {0x0000a074, 0x02030400},
+ {0x0000a078, 0x02040300},
+ {0x0000a07c, 0x02040301},
+ {0x0000a080, 0x02040302},
+ {0x0000a084, 0x02040303},
+ {0x0000a088, 0x02030500},
+ {0x0000a08c, 0x02040400},
+ {0x0000a090, 0x02050203},
+ {0x0000a094, 0x02050204},
+ {0x0000a098, 0x02050205},
+ {0x0000a09c, 0x02040500},
+ {0x0000a0a0, 0x02050301},
+ {0x0000a0a4, 0x02050302},
+ {0x0000a0a8, 0x02050303},
+ {0x0000a0ac, 0x02050400},
+ {0x0000a0b0, 0x02050401},
+ {0x0000a0b4, 0x02050402},
+ {0x0000a0b8, 0x02050403},
+ {0x0000a0bc, 0x02050500},
+ {0x0000a0c0, 0x02050501},
+ {0x0000a0c4, 0x02050502},
+ {0x0000a0c8, 0x02050503},
+ {0x0000a0cc, 0x02050504},
+ {0x0000a0d0, 0x02050600},
+ {0x0000a0d4, 0x02050601},
+ {0x0000a0d8, 0x02050602},
+ {0x0000a0dc, 0x02050603},
+ {0x0000a0e0, 0x02050604},
+ {0x0000a0e4, 0x02050700},
+ {0x0000a0e8, 0x02050701},
+ {0x0000a0ec, 0x02050702},
+ {0x0000a0f0, 0x02050703},
+ {0x0000a0f4, 0x02050704},
+ {0x0000a0f8, 0x02050705},
+ {0x0000a0fc, 0x02050708},
+ {0x0000a100, 0x02050709},
+ {0x0000a104, 0x0205070a},
+ {0x0000a108, 0x0205070b},
+ {0x0000a10c, 0x0205070c},
+ {0x0000a110, 0x0205070d},
+ {0x0000a114, 0x02050710},
+ {0x0000a118, 0x02050711},
+ {0x0000a11c, 0x02050712},
+ {0x0000a120, 0x02050713},
+ {0x0000a124, 0x02050714},
+ {0x0000a128, 0x02050715},
+ {0x0000a12c, 0x02050730},
+ {0x0000a130, 0x02050731},
+ {0x0000a134, 0x02050732},
+ {0x0000a138, 0x02050733},
+ {0x0000a13c, 0x02050734},
+ {0x0000a140, 0x02050735},
+ {0x0000a144, 0x02050750},
+ {0x0000a148, 0x02050751},
+ {0x0000a14c, 0x02050752},
+ {0x0000a150, 0x02050753},
+ {0x0000a154, 0x02050754},
+ {0x0000a158, 0x02050755},
+ {0x0000a15c, 0x02050770},
+ {0x0000a160, 0x02050771},
+ {0x0000a164, 0x02050772},
+ {0x0000a168, 0x02050773},
+ {0x0000a16c, 0x02050774},
+ {0x0000a170, 0x02050775},
+ {0x0000a174, 0x00000776},
+ {0x0000a178, 0x00000776},
+ {0x0000a17c, 0x00000776},
+ {0x0000a180, 0x00000776},
+ {0x0000a184, 0x00000776},
+ {0x0000a188, 0x00000776},
+ {0x0000a18c, 0x00000776},
+ {0x0000a190, 0x00000776},
+ {0x0000a194, 0x00000776},
+ {0x0000a198, 0x00000776},
+ {0x0000a19c, 0x00000776},
+ {0x0000a1a0, 0x00000776},
+ {0x0000a1a4, 0x00000776},
+ {0x0000a1a8, 0x00000776},
+ {0x0000a1ac, 0x00000776},
+ {0x0000a1b0, 0x00000776},
+ {0x0000a1b4, 0x00000776},
+ {0x0000a1b8, 0x00000776},
+ {0x0000a1bc, 0x00000776},
+ {0x0000a1c0, 0x00000776},
+ {0x0000a1c4, 0x00000776},
+ {0x0000a1c8, 0x00000776},
+ {0x0000a1cc, 0x00000776},
+ {0x0000a1d0, 0x00000776},
+ {0x0000a1d4, 0x00000776},
+ {0x0000a1d8, 0x00000776},
+ {0x0000a1dc, 0x00000776},
+ {0x0000a1e0, 0x00000776},
+ {0x0000a1e4, 0x00000776},
+ {0x0000a1e8, 0x00000776},
+ {0x0000a1ec, 0x00000776},
+ {0x0000a1f0, 0x00000776},
+ {0x0000a1f4, 0x00000776},
+ {0x0000a1f8, 0x00000776},
+ {0x0000a1fc, 0x00000776},
+ {0x0000b000, 0x02000101},
+ {0x0000b004, 0x02000102},
+ {0x0000b008, 0x02000103},
+ {0x0000b00c, 0x02000104},
+ {0x0000b010, 0x02000200},
+ {0x0000b014, 0x02000201},
+ {0x0000b018, 0x02000202},
+ {0x0000b01c, 0x02000203},
+ {0x0000b020, 0x02000204},
+ {0x0000b024, 0x02000205},
+ {0x0000b028, 0x02000208},
+ {0x0000b02c, 0x02000302},
+ {0x0000b030, 0x02000303},
+ {0x0000b034, 0x02000304},
+ {0x0000b038, 0x02000400},
+ {0x0000b03c, 0x02010300},
+ {0x0000b040, 0x02010301},
+ {0x0000b044, 0x02010302},
+ {0x0000b048, 0x02000500},
+ {0x0000b04c, 0x02010400},
+ {0x0000b050, 0x02020300},
+ {0x0000b054, 0x02020301},
+ {0x0000b058, 0x02020302},
+ {0x0000b05c, 0x02020303},
+ {0x0000b060, 0x02020400},
+ {0x0000b064, 0x02030300},
+ {0x0000b068, 0x02030301},
+ {0x0000b06c, 0x02030302},
+ {0x0000b070, 0x02030303},
+ {0x0000b074, 0x02030400},
+ {0x0000b078, 0x02040300},
+ {0x0000b07c, 0x02040301},
+ {0x0000b080, 0x02040302},
+ {0x0000b084, 0x02040303},
+ {0x0000b088, 0x02030500},
+ {0x0000b08c, 0x02040400},
+ {0x0000b090, 0x02050203},
+ {0x0000b094, 0x02050204},
+ {0x0000b098, 0x02050205},
+ {0x0000b09c, 0x02040500},
+ {0x0000b0a0, 0x02050301},
+ {0x0000b0a4, 0x02050302},
+ {0x0000b0a8, 0x02050303},
+ {0x0000b0ac, 0x02050400},
+ {0x0000b0b0, 0x02050401},
+ {0x0000b0b4, 0x02050402},
+ {0x0000b0b8, 0x02050403},
+ {0x0000b0bc, 0x02050500},
+ {0x0000b0c0, 0x02050501},
+ {0x0000b0c4, 0x02050502},
+ {0x0000b0c8, 0x02050503},
+ {0x0000b0cc, 0x02050504},
+ {0x0000b0d0, 0x02050600},
+ {0x0000b0d4, 0x02050601},
+ {0x0000b0d8, 0x02050602},
+ {0x0000b0dc, 0x02050603},
+ {0x0000b0e0, 0x02050604},
+ {0x0000b0e4, 0x02050700},
+ {0x0000b0e8, 0x02050701},
+ {0x0000b0ec, 0x02050702},
+ {0x0000b0f0, 0x02050703},
+ {0x0000b0f4, 0x02050704},
+ {0x0000b0f8, 0x02050705},
+ {0x0000b0fc, 0x02050708},
+ {0x0000b100, 0x02050709},
+ {0x0000b104, 0x0205070a},
+ {0x0000b108, 0x0205070b},
+ {0x0000b10c, 0x0205070c},
+ {0x0000b110, 0x0205070d},
+ {0x0000b114, 0x02050710},
+ {0x0000b118, 0x02050711},
+ {0x0000b11c, 0x02050712},
+ {0x0000b120, 0x02050713},
+ {0x0000b124, 0x02050714},
+ {0x0000b128, 0x02050715},
+ {0x0000b12c, 0x02050730},
+ {0x0000b130, 0x02050731},
+ {0x0000b134, 0x02050732},
+ {0x0000b138, 0x02050733},
+ {0x0000b13c, 0x02050734},
+ {0x0000b140, 0x02050735},
+ {0x0000b144, 0x02050750},
+ {0x0000b148, 0x02050751},
+ {0x0000b14c, 0x02050752},
+ {0x0000b150, 0x02050753},
+ {0x0000b154, 0x02050754},
+ {0x0000b158, 0x02050755},
+ {0x0000b15c, 0x02050770},
+ {0x0000b160, 0x02050771},
+ {0x0000b164, 0x02050772},
+ {0x0000b168, 0x02050773},
+ {0x0000b16c, 0x02050774},
+ {0x0000b170, 0x02050775},
+ {0x0000b174, 0x00000776},
+ {0x0000b178, 0x00000776},
+ {0x0000b17c, 0x00000776},
+ {0x0000b180, 0x00000776},
+ {0x0000b184, 0x00000776},
+ {0x0000b188, 0x00000776},
+ {0x0000b18c, 0x00000776},
+ {0x0000b190, 0x00000776},
+ {0x0000b194, 0x00000776},
+ {0x0000b198, 0x00000776},
+ {0x0000b19c, 0x00000776},
+ {0x0000b1a0, 0x00000776},
+ {0x0000b1a4, 0x00000776},
+ {0x0000b1a8, 0x00000776},
+ {0x0000b1ac, 0x00000776},
+ {0x0000b1b0, 0x00000776},
+ {0x0000b1b4, 0x00000776},
+ {0x0000b1b8, 0x00000776},
+ {0x0000b1bc, 0x00000776},
+ {0x0000b1c0, 0x00000776},
+ {0x0000b1c4, 0x00000776},
+ {0x0000b1c8, 0x00000776},
+ {0x0000b1cc, 0x00000776},
+ {0x0000b1d0, 0x00000776},
+ {0x0000b1d4, 0x00000776},
+ {0x0000b1d8, 0x00000776},
+ {0x0000b1dc, 0x00000776},
+ {0x0000b1e0, 0x00000776},
+ {0x0000b1e4, 0x00000776},
+ {0x0000b1e8, 0x00000776},
+ {0x0000b1ec, 0x00000776},
+ {0x0000b1f0, 0x00000776},
+ {0x0000b1f4, 0x00000776},
+ {0x0000b1f8, 0x00000776},
+ {0x0000b1fc, 0x00000776},
+};
+
+static const u32 ar9300_2p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9300_2p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar9200_merlin_2p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00007800, 0x00040000},
+ {0x00007804, 0xdb005012},
+ {0x00007808, 0x04924914},
+ {0x0000780c, 0x21084210},
+ {0x00007810, 0x6d801300},
+ {0x00007814, 0x0019beff},
+ {0x00007818, 0x07e41000},
+ {0x0000781c, 0x00392000},
+ {0x00007820, 0x92592480},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb005012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x6d801300},
+ {0x00007838, 0x0019beff},
+ {0x0000783c, 0x07e40000},
+ {0x00007840, 0x00392000},
+ {0x00007844, 0x92592480},
+ {0x00007848, 0x00100000},
+ {0x0000784c, 0x773f0567},
+ {0x00007850, 0x54214514},
+ {0x00007854, 0x12035828},
+ {0x00007858, 0x92592692},
+ {0x0000785c, 0x00000000},
+ {0x00007860, 0x56400000},
+ {0x00007864, 0x0a8e370e},
+ {0x00007868, 0xc0102850},
+ {0x0000786c, 0x812d4000},
+ {0x00007870, 0x807ec400},
+ {0x00007874, 0x001b6db0},
+ {0x00007878, 0x00376b63},
+ {0x0000787c, 0x06db6db6},
+ {0x00007880, 0x006d8000},
+ {0x00007884, 0xffeffffe},
+ {0x00007888, 0xffeffffe},
+ {0x0000788c, 0x00010000},
+ {0x00007890, 0x02060aeb},
+ {0x00007894, 0x5a108000},
+};
+
+static const u32 ar9300_2p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+};
+
+static const u32 ar9300_2p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e3c, 0xcf946222},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009e54, 0x00000000},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a22c, 0x01036a1e},
+ {0x0000a234, 0x10000fff},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000246},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00000000},
+ {0x0000a610, 0x00000000},
+ {0x0000a614, 0x00000000},
+ {0x0000a618, 0x00000000},
+ {0x0000a61c, 0x00000000},
+ {0x0000a620, 0x00000000},
+ {0x0000a624, 0x00000000},
+ {0x0000a628, 0x00000000},
+ {0x0000a62c, 0x00000000},
+ {0x0000a630, 0x00000000},
+ {0x0000a634, 0x00000000},
+ {0x0000a638, 0x00000000},
+ {0x0000a63c, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00000637},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2dc, 0x00000000},
+ {0x0000b2e0, 0x00000000},
+ {0x0000b2e4, 0x00000000},
+ {0x0000b2e8, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2dc, 0x00000000},
+ {0x0000c2e0, 0x00000000},
+ {0x0000c2e4, 0x00000000},
+ {0x0000c2e8, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+ {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
+ {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
+ {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
+};
+
+static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+ {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+ {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+ {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
+ {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
+ {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+ {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+ {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300_2p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f424},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e848},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x98a00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9300_2p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
+ {0x00007008, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08212e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08253e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x08213e5e},
+ {0x00004040, 0x0008003b},
+ {0x00004044, 0x00000000},
+};
+
+#endif /* INITVALS_9003_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
new file mode 100644
index 000000000000..37ba37481a47
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "hw.h"
+#include "ar9003_mac.h"
+
+static void ar9003_hw_rx_enable(struct ath_hw *hw)
+{
+ REG_WRITE(hw, AR_CR, 0);
+}
+
+static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
+{
+ int checksum;
+
+ checksum = ads->info + ads->link
+ + ads->data0 + ads->ctl3
+ + ads->data1 + ads->ctl5
+ + ads->data2 + ads->ctl7
+ + ads->data3 + ads->ctl9;
+
+ return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
+}
+
+static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ ads->link = ds_link;
+ ads->ctl10 &= ~AR_TxPtrChkSum;
+ ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
+}
+
+static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
+{
+ struct ar9003_txc *ads = ds;
+
+ *ds_link = &ads->link;
+}
+
+static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ u32 isr = 0;
+ u32 mask2 = 0;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ u32 sync_cause = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ == AR_RTC_STATUS_ON)
+ isr = REG_READ(ah, AR_ISR);
+ }
+
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+
+ *masked = 0;
+
+ if (!isr && !sync_cause)
+ return false;
+
+ if (isr) {
+ if (isr & AR_ISR_BCNMISC) {
+ u32 isr2;
+ isr2 = REG_READ(ah, AR_ISR_S2);
+
+ mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
+ MAP_ISR_S2_TIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
+ MAP_ISR_S2_DTIM);
+ mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
+ MAP_ISR_S2_DTIMSYNC);
+ mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
+ MAP_ISR_S2_CABEND);
+ mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
+ MAP_ISR_S2_GTT);
+ mask2 |= ((isr2 & AR_ISR_S2_CST) <<
+ MAP_ISR_S2_CST);
+ mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
+ MAP_ISR_S2_TSFOOR);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
+ }
+
+ if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
+ isr = REG_READ(ah, AR_ISR_RAC);
+
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+ }
+
+ *masked = isr & ATH9K_INT_COMMON;
+
+ if (ah->config.rx_intr_mitigation)
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (ah->config.tx_intr_mitigation)
+ if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
+ *masked |= ATH9K_INT_TX;
+
+ if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
+ *masked |= ATH9K_INT_RXLP;
+
+ if (isr & AR_ISR_HP_RXOK)
+ *masked |= ATH9K_INT_RXHP;
+
+ if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
+ *masked |= ATH9K_INT_TX;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ u32 s0, s1;
+ s0 = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0);
+ s1 = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1);
+
+ isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+ }
+
+ if (isr & AR_ISR_GENTMR) {
+ u32 s5;
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ s5 = REG_READ(ah, AR_ISR_S5_S);
+ else
+ s5 = REG_READ(ah, AR_ISR_S5);
+
+ ah->intr_gen_timer_trigger =
+ MS(s5, AR_ISR_S5_GENTIMER_TRIG);
+
+ ah->intr_gen_timer_thresh =
+ MS(s5, AR_ISR_S5_GENTIMER_THRESH);
+
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5);
+ isr &= ~AR_ISR_GENTMR;
+ }
+
+ }
+
+ *masked |= mask2;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+
+ (void) REG_READ(ah, AR_ISR);
+ }
+ }
+
+ if (sync_cause) {
+ if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
+ REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
+ REG_WRITE(ah, AR_RC, 0);
+ *masked |= ATH9K_INT_FATAL;
+ }
+
+ if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+ ath_print(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+
+ }
+ return true;
+}
+
+static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int descid = 0;
+
+ ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
+ (1 << AR_TxRxDesc_S) |
+ (1 << AR_CtrlStat_S) |
+ (qcu << AR_TxQcuNum_S) | 0x17;
+
+ ads->data0 = buf_addr;
+ ads->data1 = 0;
+ ads->data2 = 0;
+ ads->data3 = 0;
+
+ ads->ctl3 = (seglen << AR_BufLen_S);
+ ads->ctl3 &= AR_BufLen;
+
+ /* Fill in pointer checksum and descriptor id */
+ ads->ctl10 = ar9003_calc_ptr_chksum(ads);
+ ads->ctl10 |= (descid << AR_TxDescId_S);
+
+ if (is_firstseg) {
+ ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
+ } else if (is_lastseg) {
+ ads->ctl11 = 0;
+ ads->ctl12 = 0;
+ ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
+ ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
+ } else {
+ /* XXX Intermediate descriptor in a multi-descriptor frame.*/
+ ads->ctl11 = 0;
+ ads->ctl12 = AR_TxMore;
+ ads->ctl13 = 0;
+ ads->ctl14 = 0;
+ }
+}
+
+static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ struct ar9003_txs *ads;
+
+ ads = &ah->ts_ring[ah->ts_tail];
+
+ if ((ads->status8 & AR_TxDone) == 0)
+ return -EINPROGRESS;
+
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
+
+ if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
+ (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "Tx Descriptor error %x\n", ads->ds_info);
+ memset(ads, 0, sizeof(*ads));
+ return -EIO;
+ }
+
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
+ ts->desc_id = MS(ads->status1, AR_TxDescId);
+ ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
+ ts->ts_tstamp = ads->status4;
+ ts->ts_status = 0;
+ ts->ts_flags = 0;
+
+ if (ads->status3 & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (ads->status3 & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (ads->status3 & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status8 & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ if (ads->status3 & AR_TxTimerExpired)
+ ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
+
+ if (ads->status3 & AR_DescCfgErr)
+ ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
+ if (ads->status3 & AR_TxDataUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status3 & AR_TxDelimUnderrun) {
+ ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
+ if (ads->status2 & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->status5;
+ ts->ba_high = ads->status6;
+ }
+
+ ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
+
+ ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
+ ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
+ ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
+ ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
+ ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
+ ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
+ ts->ts_antenna = 0;
+
+ ts->tid = MS(ads->status8, AR_TxTid);
+
+ memset(ads, 0, sizeof(*ads));
+
+ return 0;
+}
+
+static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
+ u32 keyIx, enum ath9k_key_type keyType, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (txpower > ah->txpower_limit)
+ txpower = ah->txpower_limit;
+
+ txpower += ah->txpower_indexoffset;
+ if (txpower > 63)
+ txpower = 63;
+
+ ads->ctl11 = (pktlen & AR_FrameLen)
+ | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
+ | SM(txpower, AR_XmitPower)
+ | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
+ | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
+ | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
+ | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
+
+ ads->ctl12 =
+ (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
+ | SM(type, AR_FrameType)
+ | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
+ | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
+ | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
+
+ ads->ctl17 = SM(keyType, AR_EncrType) |
+ (flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
+ ads->ctl18 = 0;
+ ads->ctl19 = AR_Not_Sounding;
+
+ ads->ctl20 = 0;
+ ads->ctl21 = 0;
+ ads->ctl22 = 0;
+}
+
+static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
+ u_int32_t ctl11;
+
+ if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
+ ctl11 = ads->ctl11;
+
+ if (flags & ATH9K_TXDESC_RTSENA) {
+ ctl11 &= ~AR_CTSEnable;
+ ctl11 |= AR_RTSEnable;
+ } else {
+ ctl11 &= ~AR_RTSEnable;
+ ctl11 |= AR_CTSEnable;
+ }
+
+ ads->ctl11 = ctl11;
+ } else {
+ ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
+ }
+
+ ads->ctl13 = set11nTries(series, 0)
+ | set11nTries(series, 1)
+ | set11nTries(series, 2)
+ | set11nTries(series, 3)
+ | (durUpdateEn ? AR_DurUpdateEna : 0)
+ | SM(0, AR_BurstDur);
+
+ ads->ctl14 = set11nRate(series, 0)
+ | set11nRate(series, 1)
+ | set11nRate(series, 2)
+ | set11nRate(series, 3);
+
+ ads->ctl15 = set11nPktDurRTSCTS(series, 0)
+ | set11nPktDurRTSCTS(series, 1);
+
+ ads->ctl16 = set11nPktDurRTSCTS(series, 2)
+ | set11nPktDurRTSCTS(series, 3);
+
+ ads->ctl18 = set11nRateFlags(series, 0)
+ | set11nRateFlags(series, 1)
+ | set11nRateFlags(series, 2)
+ | set11nRateFlags(series, 3)
+ | SM(rtsctsRate, AR_RTSCTSRate);
+ ads->ctl19 = AR_Not_Sounding;
+
+ last_ads->ctl13 = ads->ctl13;
+ last_ads->ctl14 = ads->ctl14;
+}
+
+static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ ads->ctl17 &= ~AR_AggrLen;
+ ads->ctl17 |= SM(aggrLen, AR_AggrLen);
+}
+
+static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+ unsigned int ctl17;
+
+ ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
+
+ /*
+ * We use a stack variable to manipulate ctl6 to reduce uncached
+ * read modify, modfiy, write.
+ */
+ ctl17 = ads->ctl17;
+ ctl17 &= ~AR_PadDelim;
+ ctl17 |= SM(numDelims, AR_PadDelim);
+ ads->ctl17 = ctl17;
+}
+
+static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 |= AR_IsAggr;
+ ads->ctl12 &= ~AR_MoreAggr;
+ ads->ctl17 &= ~AR_PadDelim;
+}
+
+static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
+}
+
+static void ar9003_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ ads->ctl13 &= ~AR_BurstDur;
+ ads->ctl13 |= SM(burstDuration, AR_BurstDur);
+
+}
+
+static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ struct ar9003_txc *ads = (struct ar9003_txc *) ds;
+
+ if (vmf)
+ ads->ctl11 |= AR_VirtMoreFrag;
+ else
+ ads->ctl11 &= ~AR_VirtMoreFrag;
+}
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
+{
+ struct ath_hw_ops *ops = ath9k_hw_ops(hw);
+
+ ops->rx_enable = ar9003_hw_rx_enable;
+ ops->set_desc_link = ar9003_hw_set_desc_link;
+ ops->get_desc_link = ar9003_hw_get_desc_link;
+ ops->get_isr = ar9003_hw_get_isr;
+ ops->fill_txdesc = ar9003_hw_fill_txdesc;
+ ops->proc_txdesc = ar9003_hw_proc_txdesc;
+ ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
+ ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
+ ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
+ ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
+ ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
+ ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
+ ops->set11n_burstduration = ar9003_hw_set11n_burstduration;
+ ops->set11n_virtualmorefrag = ar9003_hw_set11n_virtualmorefrag;
+}
+
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
+{
+ REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
+}
+EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
+
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype)
+{
+ if (qtype == ATH9K_RX_QUEUE_HP)
+ REG_WRITE(ah, AR_HP_RXDP, rxdp);
+ else
+ REG_WRITE(ah, AR_LP_RXDP, rxdp);
+}
+EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
+ void *buf_addr)
+{
+ struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
+ unsigned int phyerr;
+
+ /* TODO: byte swap on big endian for ar9300_10 */
+
+ if ((rxsp->status11 & AR_RxDone) == 0)
+ return -EINPROGRESS;
+
+ if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
+ return -EINVAL;
+
+ if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
+ return -EINPROGRESS;
+
+ if (!rxs)
+ return 0;
+
+ rxs->rs_status = 0;
+ rxs->rs_flags = 0;
+
+ rxs->rs_datalen = rxsp->status2 & AR_DataLen;
+ rxs->rs_tstamp = rxsp->status3;
+
+ /* XXX: Keycache */
+ rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
+ rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
+ rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
+ rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
+ rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
+ rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
+ rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
+
+ if (rxsp->status11 & AR_RxKeyIdxValid)
+ rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
+ else
+ rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
+
+ rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
+ rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
+
+ rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
+ rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
+ rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
+ rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
+ rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
+
+ rxs->evm0 = rxsp->status6;
+ rxs->evm1 = rxsp->status7;
+ rxs->evm2 = rxsp->status8;
+ rxs->evm3 = rxsp->status9;
+ rxs->evm4 = (rxsp->status10 & 0xffff);
+
+ if (rxsp->status11 & AR_PreDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+
+ if (rxsp->status11 & AR_PostDelimCRCErr)
+ rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+
+ if (rxsp->status11 & AR_DecryptBusyErr)
+ rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+
+ if ((rxsp->status11 & AR_RxFrameOK) == 0) {
+ if (rxsp->status11 & AR_CRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_CRC;
+ } else if (rxsp->status11 & AR_PHYErr) {
+ rxs->rs_status |= ATH9K_RXERR_PHY;
+ phyerr = MS(rxsp->status11, AR_PHYErrCode);
+ rxs->rs_phyerr = phyerr;
+ } else if (rxsp->status11 & AR_DecryptCRCErr) {
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ } else if (rxsp->status11 & AR_MichaelErr) {
+ rxs->rs_status |= ATH9K_RXERR_MIC;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
+
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
+{
+ ah->ts_tail = 0;
+
+ memset((void *) ah->ts_ring, 0,
+ ah->ts_size * sizeof(struct ar9003_txs));
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
+ ah->ts_paddr_start, ah->ts_paddr_end,
+ ah->ts_ring, ah->ts_size);
+
+ REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
+ REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
+}
+
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size)
+{
+
+ ah->ts_paddr_start = ts_paddr_start;
+ ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
+ ah->ts_size = size;
+ ah->ts_ring = (struct ar9003_txs *) ts_start;
+
+ ath9k_hw_reset_txstatus_ring(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_setup_statusring);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
new file mode 100644
index 000000000000..f17558b14539
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MAC_H
+#define AR9003_MAC_H
+
+#define AR_DescId 0xffff0000
+#define AR_DescId_S 16
+#define AR_CtrlStat 0x00004000
+#define AR_CtrlStat_S 14
+#define AR_TxRxDesc 0x00008000
+#define AR_TxRxDesc_S 15
+#define AR_TxQcuNum 0x00000f00
+#define AR_TxQcuNum_S 8
+
+#define AR_BufLen 0x0fff0000
+#define AR_BufLen_S 16
+
+#define AR_TxDescId 0xffff0000
+#define AR_TxDescId_S 16
+#define AR_TxPtrChkSum 0x0000ffff
+
+#define AR_TxTid 0xf0000000
+#define AR_TxTid_S 28
+
+#define AR_LowRxChain 0x00004000
+
+#define AR_Not_Sounding 0x20000000
+
+#define MAP_ISR_S2_CST 6
+#define MAP_ISR_S2_GTT 6
+#define MAP_ISR_S2_TIM 3
+#define MAP_ISR_S2_CABEND 0
+#define MAP_ISR_S2_DTIMSYNC 7
+#define MAP_ISR_S2_DTIM 7
+#define MAP_ISR_S2_TSFOOR 4
+
+#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
+
+struct ar9003_rxs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+ u32 status9;
+ u32 status10;
+ u32 status11;
+} __packed;
+
+/* Transmit Control Descriptor */
+struct ar9003_txc {
+ u32 info; /* descriptor information */
+ u32 link; /* link pointer */
+ u32 data0; /* data pointer to 1st buffer */
+ u32 ctl3; /* DMA control 3 */
+ u32 data1; /* data pointer to 2nd buffer */
+ u32 ctl5; /* DMA control 5 */
+ u32 data2; /* data pointer to 3rd buffer */
+ u32 ctl7; /* DMA control 7 */
+ u32 data3; /* data pointer to 4th buffer */
+ u32 ctl9; /* DMA control 9 */
+ u32 ctl10; /* DMA control 10 */
+ u32 ctl11; /* DMA control 11 */
+ u32 ctl12; /* DMA control 12 */
+ u32 ctl13; /* DMA control 13 */
+ u32 ctl14; /* DMA control 14 */
+ u32 ctl15; /* DMA control 15 */
+ u32 ctl16; /* DMA control 16 */
+ u32 ctl17; /* DMA control 17 */
+ u32 ctl18; /* DMA control 18 */
+ u32 ctl19; /* DMA control 19 */
+ u32 ctl20; /* DMA control 20 */
+ u32 ctl21; /* DMA control 21 */
+ u32 ctl22; /* DMA control 22 */
+ u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
+} __packed;
+
+struct ar9003_txs {
+ u32 ds_info;
+ u32 status1;
+ u32 status2;
+ u32 status3;
+ u32 status4;
+ u32 status5;
+ u32 status6;
+ u32 status7;
+ u32 status8;
+} __packed;
+
+void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
+void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
+void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
+ enum ath9k_rx_qtype qtype);
+
+int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
+ struct ath_rx_status *rxs,
+ void *buf_addr);
+void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
+void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
+ u32 ts_paddr_start,
+ u8 size);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
new file mode 100644
index 000000000000..80431a2f6dc1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "ar9003_phy.h"
+
+/**
+ * ar9003_hw_set_channel - set channel on single-chip device
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * This is the function to change channel on single-chip devices, that is
+ * all devices after ar9280.
+ *
+ * This function takes the channel value in MHz and sets
+ * hardware channel value. Assumes writes have been enabled to analog bus.
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ *
+ * For 5GHz channels which are 5MHz spaced,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ */
+static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ u16 bMode, fracMode = 0, aModeRefSel = 0;
+ u32 freq, channelSel = 0, reg32 = 0;
+ struct chan_centers centers;
+ int loadSynthChannel;
+
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ channelSel = CHANSEL_2G(freq);
+ /* Set to 2G mode */
+ bMode = 1;
+ } else {
+ channelSel = CHANSEL_5G(freq);
+ /* Doubler is ON, so, divide channelSel by 2. */
+ channelSel >>= 1;
+ /* Set to 5G mode */
+ bMode = 0;
+ }
+
+ /* Enable fractional mode for all channels */
+ fracMode = 1;
+ aModeRefSel = 0;
+ loadSynthChannel = 0;
+
+ reg32 = (bMode << 29);
+ REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ /* Enable Long shift Select for Synthesizer */
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
+ AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
+
+ /* Program Synth. setting */
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ /* Toggle Load Synth channel bit */
+ loadSynthChannel = 1;
+ reg32 = (channelSel << 2) | (fracMode << 30) |
+ (aModeRefSel << 28) | (loadSynthChannel << 31);
+ REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
+
+ ah->curchan = chan;
+ ah->curchan_rad_index = -1;
+
+ return 0;
+}
+
+/**
+ * ar9003_hw_spur_mitigate - convert baseband spur frequency
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ *
+ * Spur mitigation for MRC CCK
+ */
+static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
+ int cur_bb_spur, negative = 0, cck_spur_freq;
+ int i;
+
+ /*
+ * Need to verify range +/- 10 MHz in control channel, otherwise spur
+ * is out-of-band and can be ignored.
+ */
+
+ for (i = 0; i < 4; i++) {
+ negative = 0;
+ cur_bb_spur = spur_freq[i] - chan->channel;
+
+ if (cur_bb_spur < 0) {
+ negative = 1;
+ cur_bb_spur = -cur_bb_spur;
+ }
+ if (cur_bb_spur < 10) {
+ cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
+
+ if (negative == 1)
+ cck_spur_freq = -cck_spur_freq;
+
+ cck_spur_freq = cck_spur_freq & 0xfffff;
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
+ 0x2);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
+ 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
+ cck_spur_freq);
+
+ return;
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
+ AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
+}
+
+/* Clean all spur register fields */
+static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
+{
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
+}
+
+static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
+ int freq_offset,
+ int spur_freq_sd,
+ int spur_delta_phase,
+ int spur_subchannel_sd)
+{
+ int mask_index = 0;
+
+ /* OFDM Spur mitigation */
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING11,
+ AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
+
+ if (REG_READ_FIELD(ah, AR_PHY_MODE,
+ AR_PHY_MODE_DYNAMIC) == 0x1)
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
+
+ mask_index = (freq_offset << 4) / 5;
+ if (mask_index < 0)
+ mask_index = mask_index - 1;
+
+ mask_index = mask_index & 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING4,
+ AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
+ REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
+ AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
+ AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
+ AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
+}
+
+static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ int freq_offset)
+{
+ int spur_freq_sd = 0;
+ int spur_subchannel_sd = 0;
+ int spur_delta_phase = 0;
+
+ if (IS_CHAN_HT40(chan)) {
+ if (freq_offset < 0) {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 1;
+ else
+ spur_subchannel_sd = 0;
+
+ spur_freq_sd = ((freq_offset + 10) << 9) / 11;
+
+ } else {
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ spur_subchannel_sd = 0;
+ else
+ spur_subchannel_sd = 1;
+
+ spur_freq_sd = ((freq_offset - 10) << 9) / 11;
+
+ }
+
+ spur_delta_phase = (freq_offset << 17) / 5;
+
+ } else {
+ spur_subchannel_sd = 0;
+ spur_freq_sd = (freq_offset << 9) /11;
+ spur_delta_phase = (freq_offset << 18) / 5;
+ }
+
+ spur_freq_sd = spur_freq_sd & 0x3ff;
+ spur_delta_phase = spur_delta_phase & 0xfffff;
+
+ ar9003_hw_spur_ofdm(ah,
+ freq_offset,
+ spur_freq_sd,
+ spur_delta_phase,
+ spur_subchannel_sd);
+}
+
+/* Spur mitigation for OFDM */
+static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int synth_freq;
+ int range = 10;
+ int freq_offset = 0;
+ int mode;
+ u8* spurChansPtr;
+ unsigned int i;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (IS_CHAN_5GHZ(chan)) {
+ spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
+ mode = 0;
+ }
+ else {
+ spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
+ mode = 1;
+ }
+
+ if (spurChansPtr[0] == 0)
+ return; /* No spur in the mode */
+
+ if (IS_CHAN_HT40(chan)) {
+ range = 19;
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
+ synth_freq = chan->channel - 10;
+ else
+ synth_freq = chan->channel + 10;
+ } else {
+ range = 10;
+ synth_freq = chan->channel;
+ }
+
+ ar9003_hw_spur_ofdm_clear(ah);
+
+ for (i = 0; spurChansPtr[i] && i < 5; i++) {
+ freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
+ if (abs(freq_offset) < range) {
+ ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
+ break;
+ }
+ }
+}
+
+static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
+ ar9003_hw_spur_mitigate_ofdm(ah, chan);
+}
+
+static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 pll;
+
+ pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
+
+ if (chan && IS_CHAN_HALF_RATE(chan))
+ pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
+ else if (chan && IS_CHAN_QUARTER_RATE(chan))
+ pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
+
+ pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
+
+ return pll;
+}
+
+static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 phymode;
+ u32 enableDacFifo = 0;
+
+ enableDacFifo =
+ (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
+
+ /* Enable 11n HT, 20 MHz */
+ phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_WALSH |
+ AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
+
+ /* Configure baseband for dynamic 20/40 operation */
+ if (IS_CHAN_HT40(chan)) {
+ phymode |= AR_PHY_GC_DYN2040_EN;
+ /* Configure control (primary) channel at +-10MHz */
+ if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
+ (chan->chanmode == CHANNEL_G_HT40PLUS))
+ phymode |= AR_PHY_GC_DYN2040_PRI_CH;
+
+ }
+
+ /* make sure we preserve INI settings */
+ phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
+ /* turn off Green Field detection for STA for now */
+ phymode &= ~AR_PHY_GC_GF_DETECT_EN;
+
+ REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
+
+ /* Configure MAC for 20/40 operation */
+ ath9k_hw_set11nmac2040(ah);
+
+ /* global transmit timeout (25 TUs default)*/
+ REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
+ /* carrier sense timeout */
+ REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+}
+
+static void ar9003_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 synthDelay;
+
+ /*
+ * Wait for the frequency synth to settle (synth goes on
+ * via AR_PHY_ACTIVE_EN). Read the phy active delay register.
+ * Value is in 100ns increments.
+ */
+ synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(chan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ /* Activate the PHY (includes baseband activate + synthesizer on) */
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+
+ /*
+ * There is an issue if the AP starts the calibration before
+ * the base band timeout completes. This could result in the
+ * rx_clear false triggering. As a workaround we add delay an
+ * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
+ * does not happen.
+ */
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+}
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+{
+ switch (rx) {
+ case 0x5:
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ case 0x3:
+ case 0x1:
+ case 0x2:
+ case 0x7:
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ if (tx == 0x5) {
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+ }
+}
+
+/*
+ * Override INI values with chip specific configuration.
+ */
+static void ar9003_hw_override_ini(struct ath_hw *ah)
+{
+ u32 val;
+
+ /*
+ * Set the RX_ABORT and RX_DIS and clear it only after
+ * RXE is set for MAC. This prevents frames with
+ * corrupted descriptor status.
+ */
+ REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID. By default,
+ * this feature is enabled. But since the driver is not using this
+ * feature, we switch it off; otherwise multicast search based on
+ * MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
+ REG_WRITE(ah, AR_PCU_MISC_MODE2,
+ val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
+}
+
+static void ar9003_hw_prog_ini(struct ath_hw *ah,
+ struct ar5416IniArray *iniArr,
+ int column)
+{
+ unsigned int i, regWrites = 0;
+
+ /* New INI format: Array may be undefined (pre, core, post arrays) */
+ if (!iniArr->ia_array)
+ return;
+
+ /*
+ * New INI format: Pre, core, and post arrays for a given subsystem
+ * may be modal (> 2 columns) or non-modal (2 columns). Determine if
+ * the array is non-modal and force the column to 1.
+ */
+ if (column >= iniArr->ia_columns)
+ column = 1;
+
+ for (i = 0; i < iniArr->ia_rows; i++) {
+ u32 reg = INI_RA(iniArr, i, 0);
+ u32 val = INI_RA(iniArr, i, column);
+
+ REG_WRITE(ah, reg, val);
+
+ /*
+ * Determine if this is a shift register value, and insert the
+ * configured delay if so.
+ */
+ if (reg >= 0x16000 && reg < 0x17000
+ && ah->config.analog_shiftreg)
+ udelay(100);
+
+ DO_DELAY(regWrites);
+ }
+}
+
+static int ar9003_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ unsigned int regWrites = 0, i;
+ struct ieee80211_channel *channel = chan->chan;
+ u32 modesIndex, freqIndex;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ modesIndex = 1;
+ freqIndex = 1;
+ break;
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ modesIndex = 2;
+ freqIndex = 1;
+ break;
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ modesIndex = 4;
+ freqIndex = 2;
+ break;
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ modesIndex = 3;
+ freqIndex = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
+ ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
+ ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
+ }
+
+ REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+
+ /*
+ * For 5GHz channels requiring Fast Clock, apply
+ * different modal values.
+ */
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ REG_WRITE_ARRAY(&ah->iniModesAdditional,
+ modesIndex, regWrites);
+
+ ar9003_hw_override_ini(ah);
+ ar9003_hw_set_channel_regs(ah, chan);
+ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
+ /* Set TX power */
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ channel->max_antenna_gain * 2,
+ channel->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit));
+
+ return 0;
+}
+
+static void ar9003_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 rfMode = 0;
+
+ if (chan == NULL)
+ return;
+
+ rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
+ ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+
+ REG_WRITE(ah, AR_PHY_MODE, rfMode);
+}
+
+static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+}
+
+static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ u32 coef_scaled, ds_coef_exp, ds_coef_man;
+ u32 clockMhzScaled = 0x64000000;
+ struct chan_centers centers;
+
+ /*
+ * half and quarter rate can divide the scaled clock by 2 or 4
+ * scale for selected channel bandwidth
+ */
+ if (IS_CHAN_HALF_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 1;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ clockMhzScaled = clockMhzScaled >> 2;
+
+ /*
+ * ALGO -> coef = 1e8/fcarrier*fclock/40;
+ * scaled coef to provide precision for this floating calculation
+ */
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+ coef_scaled = clockMhzScaled / centers.synth_center;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_TIMING3,
+ AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
+
+ /*
+ * For Short GI,
+ * scaled coeff is 9/10 that of normal coeff
+ */
+ coef_scaled = (9 * coef_scaled) / 10;
+
+ ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
+ &ds_coef_exp);
+
+ /* for short gi */
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_MAN, ds_coef_man);
+ REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
+ AR_PHY_SGI_DSC_EXP, ds_coef_exp);
+}
+
+static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
+{
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
+ return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
+ AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
+}
+
+/*
+ * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
+ * Read the phy active delay register. Value is in 100ns increments.
+ */
+static void ar9003_hw_rfbus_done(struct ath_hw *ah)
+{
+ u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
+ if (IS_CHAN_B(ah->curchan))
+ synthDelay = (4 * synthDelay) / 22;
+ else
+ synthDelay /= 10;
+
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+
+ REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+}
+
+/*
+ * Set the interrupt and GPIO values so the ISR can disable RF
+ * on a switch signal. Assumes GPIO port and interrupt polarity
+ * are set prior to call.
+ */
+static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
+{
+ /* Connect rfsilent_bb_l to baseband */
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ /* Set input mux for rfsilent_bb_l to GPIO #0 */
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ /*
+ * Configure the desired GPIO port for input and
+ * enable baseband rf silence.
+ */
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
+static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
+ if (value)
+ v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ else
+ v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
+}
+
+static bool ar9003_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ struct ar5416AniState *aniState = ah->curani;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ switch (cmd & ah->ani_function) {
+ case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ return false;
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_TOT_DES,
+ ah->totalSizeDesired[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_LOW,
+ ah->coarse_low[level]);
+ REG_RMW_FIELD(ah, AR_PHY_AGC,
+ AR_PHY_AGC_COARSE_HIGH,
+ ah->coarse_high[level]);
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
+
+ if (level > aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_niup++;
+ else if (level < aniState->noiseImmunityLevel)
+ ah->stats.ast_ani_nidown++;
+ aniState->noiseImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
+ const int m1ThreshLow[] = { 127, 50 };
+ const int m2ThreshLow[] = { 127, 40 };
+ const int m1Thresh[] = { 127, 0x4d };
+ const int m2Thresh[] = { 127, 0x40 };
+ const int m2CountThr[] = { 31, 16 };
+ const int m2CountThrLow[] = { 63, 48 };
+ u32 on = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+ m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+ m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+ AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+ m2CountThrLow[on]);
+
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
+ REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+ AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
+
+ if (on)
+ REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
+ AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
+
+ if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on)
+ ah->stats.ast_ani_ofdmon++;
+ else
+ ah->stats.ast_ani_ofdmoff++;
+ aniState->ofdmWeakSigDetectOff = !on;
+ }
+ break;
+ }
+ case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
+ const int weakSigThrCck[] = { 8, 6 };
+ u32 high = param ? 1 : 0;
+
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
+ AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
+ weakSigThrCck[high]);
+ if (high != aniState->cckWeakSigThreshold) {
+ if (high)
+ ah->stats.ast_ani_cckhigh++;
+ else
+ ah->stats.ast_ani_ccklow++;
+ aniState->cckWeakSigThreshold = high;
+ }
+ break;
+ }
+ case ATH9K_ANI_FIRSTEP_LEVEL:{
+ const int firstep[] = { 0, 4, 8 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(firstep)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(firstep));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
+ AR_PHY_FIND_SIG_FIRSTEP,
+ firstep[level]);
+ if (level > aniState->firstepLevel)
+ ah->stats.ast_ani_stepup++;
+ else if (level < aniState->firstepLevel)
+ ah->stats.ast_ani_stepdown++;
+ aniState->firstepLevel = level;
+ break;
+ }
+ case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
+ const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ u32 level = param;
+
+ if (level >= ARRAY_SIZE(cycpwrThr1)) {
+ ath_print(common, ATH_DBG_ANI,
+ "level out of range (%u > %u)\n",
+ level,
+ (unsigned) ARRAY_SIZE(cycpwrThr1));
+ return false;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_TIMING5,
+ AR_PHY_TIMING5_CYCPWR_THR1,
+ cycpwrThr1[level]);
+ if (level > aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurup++;
+ else if (level < aniState->spurImmunityLevel)
+ ah->stats.ast_ani_spurdown++;
+ aniState->spurImmunityLevel = level;
+ break;
+ }
+ case ATH9K_ANI_PRESENT:
+ break;
+ default:
+ ath_print(common, ATH_DBG_ANI,
+ "invalid cmd %u\n", cmd);
+ return false;
+ }
+
+ ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_print(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
+ "ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_print(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, "
+ "firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_print(common, ATH_DBG_ANI,
+ "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ aniState->cycleCount,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
+
+ return true;
+}
+
+static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_2g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_2g_max);
+ *nf = ah->nf_2g_max;
+ } else if (*nf < ah->nf_2g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "2 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_2g_min);
+ *nf = ah->nf_2g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (*nf > ah->nf_5g_max) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) > MAX (%d), "
+ "correcting to MAX",
+ *nf, ah->nf_5g_max);
+ *nf = ah->nf_5g_max;
+ } else if (*nf < ah->nf_5g_min) {
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "5 GHz NF (%d) < MIN (%d), "
+ "correcting to MIN",
+ *nf, ah->nf_5g_min);
+ *nf = ah->nf_5g_min;
+ }
+}
+
+static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
+{
+ if (IS_CHAN_2GHZ(ah->curchan))
+ ar9003_hw_nf_sanitize_2g(ah, nf);
+ else
+ ar9003_hw_nf_sanitize_5g(ah, nf);
+}
+
+static void ar9003_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int16_t nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ctl] [chain 2] is %d\n", nf);
+ nfarray[2] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ ar9003_hw_nf_sanitize(ah, &nf);
+ ath_print(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [ext] [chain 2] is %d\n", nf);
+ nfarray[5] = nf;
+}
+
+void ar9003_hw_set_nf_limits(struct ath_hw *ah)
+{
+ ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
+ ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
+ ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
+ ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
+}
+
+/*
+ * Find out which of the RX chains are enabled
+ */
+static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah)
+{
+ u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK);
+ /*
+ * The bits [2:0] indicate the rx chain mask and are to be
+ * interpreted as follows:
+ * 00x => Only chain 0 is enabled
+ * 01x => Chain 1 and 0 enabled
+ * 1xx => Chain 2,1 and 0 enabled
+ */
+ return chain & 0x7;
+}
+
+static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ struct ath9k_nfcal_hist *h;
+ unsigned i, j;
+ int32_t val;
+ const u32 ar9300_cca_regs[6] = {
+ AR_PHY_CCA_0,
+ AR_PHY_CCA_1,
+ AR_PHY_CCA_2,
+ AR_PHY_EXT_CCA,
+ AR_PHY_EXT_CCA_1,
+ AR_PHY_EXT_CCA_2,
+ };
+ u8 chainmask, rx_chain_status;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ rx_chain_status = ar9003_hw_get_rx_chainmask(ah);
+
+ chainmask = 0x3F;
+ h = ah->nfCalHist;
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+
+ /*
+ * Load software filtered NF value into baseband internal minCCApwr
+ * variable.
+ */
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+
+ /*
+ * Wait for load to complete, should be fast, a few 10s of us.
+ * The max delay was changed from an original 250us to 10000us
+ * since 250us often results in NF load timeout and causes deaf
+ * condition during stress testing 12/12/2009
+ */
+ for (j = 0; j < 1000; j++) {
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_NF) == 0)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * We timed out waiting for the noisefloor to load, probably due to an
+ * in-progress rx. Simply return here and allow the load plenty of time
+ * to complete before the next calibration interval. We need to avoid
+ * trying to load -50 (which happens below) while the previous load is
+ * still in progress as this can cause rx deafness. Instead by returning
+ * here, the baseband nf cal will just be capped by our present
+ * noisefloor until the next calibration timer.
+ */
+ if (j == 1000) {
+ ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
+ "to load: AR_PHY_AGC_CONTROL=0x%x\n",
+ REG_READ(ah, AR_PHY_AGC_CONTROL));
+ return;
+ }
+
+ /*
+ * Restore maxCCAPower register parameter again so that we're not capped
+ * by the median we just loaded. This will be initial (and max) value
+ * of next noise floor calibration the baseband does.
+ */
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
+ val = REG_READ(ah, ar9300_cca_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+ REG_WRITE(ah, ar9300_cca_regs[i], val);
+ }
+ }
+}
+
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ priv_ops->rf_set_freq = ar9003_hw_set_channel;
+ priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
+ priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
+ priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
+ priv_ops->init_bb = ar9003_hw_init_bb;
+ priv_ops->process_ini = ar9003_hw_process_ini;
+ priv_ops->set_rfmode = ar9003_hw_set_rfmode;
+ priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
+ priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
+ priv_ops->rfbus_req = ar9003_hw_rfbus_req;
+ priv_ops->rfbus_done = ar9003_hw_rfbus_done;
+ priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
+ priv_ops->set_diversity = ar9003_hw_set_diversity;
+ priv_ops->ani_control = ar9003_hw_ani_control;
+ priv_ops->do_getnf = ar9003_hw_do_getnf;
+ priv_ops->loadnf = ar9003_hw_loadnf;
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
new file mode 100644
index 000000000000..f08cc8bda005
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -0,0 +1,847 @@
+/*
+ * Copyright (c) 2002-2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_PHY_H
+#define AR9003_PHY_H
+
+/*
+ * Channel Register Map
+ */
+#define AR_CHAN_BASE 0x9800
+
+#define AR_PHY_TIMING1 (AR_CHAN_BASE + 0x0)
+#define AR_PHY_TIMING2 (AR_CHAN_BASE + 0x4)
+#define AR_PHY_TIMING3 (AR_CHAN_BASE + 0x8)
+#define AR_PHY_TIMING4 (AR_CHAN_BASE + 0xc)
+#define AR_PHY_TIMING5 (AR_CHAN_BASE + 0x10)
+#define AR_PHY_TIMING6 (AR_CHAN_BASE + 0x14)
+#define AR_PHY_TIMING11 (AR_CHAN_BASE + 0x18)
+#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
+#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
+#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
+
+#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
+#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
+
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
+#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC 0x40000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC_S 30
+
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR 0x80000000
+#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR_S 31
+
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT 0x4000000
+#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT_S 26
+
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 /* bins move with freq offset */
+#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM_S 17
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x000000FF
+#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI 0x00000100
+#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI_S 8
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL 0x03FC0000
+#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
+
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN 0x20000000
+#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN_S 29
+
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN 0x80000000
+#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN_S 31
+
+#define AR_PHY_FIND_SIG_LOW (AR_CHAN_BASE + 0x20)
+
+#define AR_PHY_SFCORR (AR_CHAN_BASE + 0x24)
+#define AR_PHY_SFCORR_LOW (AR_CHAN_BASE + 0x28)
+#define AR_PHY_SFCORR_EXT (AR_CHAN_BASE + 0x2c)
+
+#define AR_PHY_EXT_CCA (AR_CHAN_BASE + 0x30)
+#define AR_PHY_RADAR_0 (AR_CHAN_BASE + 0x34)
+#define AR_PHY_RADAR_1 (AR_CHAN_BASE + 0x38)
+#define AR_PHY_RADAR_EXT (AR_CHAN_BASE + 0x3c)
+#define AR_PHY_MULTICHAIN_CTRL (AR_CHAN_BASE + 0x80)
+#define AR_PHY_PERCHAIN_CSD (AR_CHAN_BASE + 0x84)
+
+#define AR_PHY_TX_PHASE_RAMP_0 (AR_CHAN_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_0 (AR_CHAN_BASE + 0xd4)
+#define AR_PHY_IQ_ADC_MEAS_0_B0 (AR_CHAN_BASE + 0xc0)
+#define AR_PHY_IQ_ADC_MEAS_1_B0 (AR_CHAN_BASE + 0xc4)
+#define AR_PHY_IQ_ADC_MEAS_2_B0 (AR_CHAN_BASE + 0xc8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0 (AR_CHAN_BASE + 0xcc)
+
+/* The following registers changed position from AR9300 1.0 to AR9300 2.0 */
+#define AR_PHY_TX_PHASE_RAMP_0_9300_10 (AR_CHAN_BASE + 0xd0 - 0x10)
+#define AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 (AR_CHAN_BASE + 0xd4 - 0x10)
+#define AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 (AR_CHAN_BASE + 0xc0 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 (AR_CHAN_BASE + 0xc4 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 (AR_CHAN_BASE + 0xc8 + 0x8)
+#define AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 (AR_CHAN_BASE + 0xcc + 0x8)
+
+#define AR_PHY_TX_CRC (AR_CHAN_BASE + 0xa0)
+#define AR_PHY_TST_DAC_CONST (AR_CHAN_BASE + 0xa4)
+#define AR_PHY_SPUR_REPORT_0 (AR_CHAN_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_0 (AR_CHAN_BASE + 0x300)
+
+/*
+ * Channel Field Definitions
+ */
+#define AR_PHY_TIMING2_USE_FORCE_PPM 0x00001000
+#define AR_PHY_TIMING2_FORCE_PPM_VAL 0x00000fff
+#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
+#define AR_PHY_TIMING3_DSC_MAN_S 17
+#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
+#define AR_PHY_TIMING3_DSC_EXP_S 13
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX 0xF000
+#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX_S 12
+#define AR_PHY_TIMING4_DO_CAL 0x10000
+
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK 0x10000000
+#define AR_PHY_TIMING4_ENABLE_PILOT_MASK_S 28
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK 0x20000000
+#define AR_PHY_TIMING4_ENABLE_CHAN_MASK_S 29
+
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER 0x40000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER_S 30
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI 0x80000000
+#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI_S 31
+
+#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
+#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
+#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
+#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
+#define AR_PHY_SFCORR_M2COUNT_THR_S 0
+#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
+#define AR_PHY_SFCORR_M1_THRESH_S 17
+#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
+#define AR_PHY_SFCORR_M2_THRESH_S 24
+#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
+#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
+#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
+#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
+#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
+#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD 0x10000000
+#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD_S 28
+#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
+#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
+#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_EXT_MINCCA_PWR_S 16
+#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
+#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
+#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE_S 0
+#define AR_PHY_TIMING5_CYCPWR_THR1A 0x007F0000
+#define AR_PHY_TIMING5_CYCPWR_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A (0x7F << 16)
+#define AR_PHY_TIMING5_RSSI_THR1A_S 16
+#define AR_PHY_TIMING5_RSSI_THR1A_ENA (0x1 << 15)
+#define AR_PHY_RADAR_0_ENA 0x00000001
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+#define AR_PHY_RADAR_0_INBAND 0x0000003e
+#define AR_PHY_RADAR_0_INBAND_S 1
+#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
+#define AR_PHY_RADAR_0_PRSSI_S 6
+#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
+#define AR_PHY_RADAR_0_HEIGHT_S 12
+#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
+#define AR_PHY_RADAR_0_RRSSI_S 18
+#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
+#define AR_PHY_RADAR_0_FIRPWR_S 24
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+#define AR_PHY_RADAR_DC_PWR_THRESH 0x007f8000
+#define AR_PHY_RADAR_DC_PWR_THRESH_S 15
+#define AR_PHY_RADAR_LB_DC_CAP 0x7f800000
+#define AR_PHY_RADAR_LB_DC_CAP_S 23
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW (0x3f << 6)
+#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW_S 6
+#define AR_PHY_FIND_SIG_LOW_FIRPWR (0x7f << 12)
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_S 12
+#define AR_PHY_FIND_SIG_LOW_FIRPWR_SIGN_BIT 19
+#define AR_PHY_FIND_SIG_LOW_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_LOW_RELSTEP_SIGN_BIT 5
+#define AR_PHY_CHAN_INFO_TAB_S2_READ 0x00000008
+#define AR_PHY_CHAN_INFO_TAB_S2_READ_S 3
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF 0x0000007F
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF_S 0
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF 0x00003F80
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF_S 7
+#define AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE 0x00004000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF 0x003f8000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF_S 15
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF 0x1fc00000
+#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF_S 22
+
+/*
+ * MRC Register Map
+ */
+#define AR_MRC_BASE 0x9c00
+
+#define AR_PHY_TIMING_3A (AR_MRC_BASE + 0x0)
+#define AR_PHY_LDPC_CNTL1 (AR_MRC_BASE + 0x4)
+#define AR_PHY_LDPC_CNTL2 (AR_MRC_BASE + 0x8)
+#define AR_PHY_PILOT_SPUR_MASK (AR_MRC_BASE + 0xc)
+#define AR_PHY_CHAN_SPUR_MASK (AR_MRC_BASE + 0x10)
+#define AR_PHY_SGI_DELTA (AR_MRC_BASE + 0x14)
+#define AR_PHY_ML_CNTL_1 (AR_MRC_BASE + 0x18)
+#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
+#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
+
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
+#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
+
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
+#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
+
+/*
+ * MRC Feild Definitions
+ */
+#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
+#define AR_PHY_SGI_DSC_MAN_S 4
+#define AR_PHY_SGI_DSC_EXP 0x0000000F
+#define AR_PHY_SGI_DSC_EXP_S 0
+/*
+ * BBB Register Map
+ */
+#define AR_BBB_BASE 0x9d00
+
+/*
+ * AGC Register Map
+ */
+#define AR_AGC_BASE 0x9e00
+
+#define AR_PHY_SETTLING (AR_AGC_BASE + 0x0)
+#define AR_PHY_FORCEMAX_GAINS_0 (AR_AGC_BASE + 0x4)
+#define AR_PHY_GAINS_MINOFF0 (AR_AGC_BASE + 0x8)
+#define AR_PHY_DESIRED_SZ (AR_AGC_BASE + 0xc)
+#define AR_PHY_FIND_SIG (AR_AGC_BASE + 0x10)
+#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
+#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
+#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
+#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
+#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
+#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
+#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
+#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
+#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
+#define AR_PHY_RIFS_SRCH (AR_AGC_BASE + 0x38)
+#define AR_PHY_PEAK_DET_CTRL_1 (AR_AGC_BASE + 0x3c)
+#define AR_PHY_PEAK_DET_CTRL_2 (AR_AGC_BASE + 0x40)
+#define AR_PHY_RX_GAIN_BOUNDS_1 (AR_AGC_BASE + 0x44)
+#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
+#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
+#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
+#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
+#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
+
+#define AR_PHY_CCK_SPUR_MIT (AR_AGC_BASE + 0x1cc)
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR 0x000001fe
+#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR_S 1
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE 0x60000000
+#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE_S 29
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT 0x00000001
+#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_S 0
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
+#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
+
+#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
+
+#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
+#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
+#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
+#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+
+/*
+ * AGC Field Definitions
+ */
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN 0x00FC0000
+#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN_S 18
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN 0x00003C00
+#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN_S 10
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN 0x0000001F
+#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN_S 0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN 0x003E0000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN_S 17
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN 0x0001F000
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN_S 12
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB 0x00000FC0
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB_S 6
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB 0x0000003F
+#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB_S 0
+#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
+#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
+#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
+#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
+#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
+#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
+#define AR_PHY_SETTLING_SWITCH 0x00003F80
+#define AR_PHY_SETTLING_SWITCH_S 7
+#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
+#define AR_PHY_DESIRED_SZ_ADC_S 0
+#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
+#define AR_PHY_DESIRED_SZ_PGA_S 8
+#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
+#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
+#define AR_PHY_MINCCA_PWR 0x1FF00000
+#define AR_PHY_MINCCA_PWR_S 20
+#define AR_PHY_CCA_THRESH62 0x0007F000
+#define AR_PHY_CCA_THRESH62_S 12
+#define AR9280_PHY_MINCCA_PWR 0x1FF00000
+#define AR9280_PHY_MINCCA_PWR_S 20
+#define AR9280_PHY_CCA_THRESH62 0x000FF000
+#define AR9280_PHY_CCA_THRESH62_S 12
+#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
+#define AR_PHY_EXT_CCA0_THRESH62_S 0
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
+#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
+#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
+#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
+
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
+#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR_S 9
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
+#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
+
+#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_COARSE_LOW 0x00007F80
+#define AR_PHY_AGC_COARSE_LOW_S 7
+#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
+#define AR_PHY_AGC_COARSE_HIGH_S 15
+#define AR_PHY_AGC_COARSE_PWR_CONST 0x0000007F
+#define AR_PHY_AGC_COARSE_PWR_CONST_S 0
+#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
+#define AR_PHY_FIND_SIG_FIRSTEP_S 12
+#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
+#define AR_PHY_FIND_SIG_FIRPWR_S 18
+#define AR_PHY_FIND_SIG_FIRPWR_SIGN_BIT 25
+#define AR_PHY_FIND_SIG_RELPWR (0x1f << 6)
+#define AR_PHY_FIND_SIG_RELPWR_S 6
+#define AR_PHY_FIND_SIG_RELPWR_SIGN_BIT 11
+#define AR_PHY_FIND_SIG_RELSTEP 0x1f
+#define AR_PHY_FIND_SIG_RELSTEP_S 0
+#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
+#define AR_PHY_RESTART_DIV_GC 0x001C0000
+#define AR_PHY_RESTART_DIV_GC_S 18
+#define AR_PHY_RESTART_ENA 0x01
+#define AR_PHY_DC_RESTART_DIS 0x40000000
+
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON 0xFF000000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON_S 24
+#define AR_PHY_TPC_OLPC_GAIN_DELTA 0x00FF0000
+#define AR_PHY_TPC_OLPC_GAIN_DELTA_S 16
+
+#define AR_PHY_TPC_6_ERROR_EST_MODE 0x03000000
+#define AR_PHY_TPC_6_ERROR_EST_MODE_S 24
+
+/*
+ * SM Register Map
+ */
+#define AR_SM_BASE 0xa200
+
+#define AR_PHY_D2_CHIP_ID (AR_SM_BASE + 0x0)
+#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
+#define AR_PHY_MODE (AR_SM_BASE + 0x8)
+#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
+#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20)
+#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
+#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
+#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
+#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
+#define AR_PHY_MAX_RX_LEN (AR_SM_BASE + 0x34)
+#define AR_PHY_FRAME_CTL (AR_SM_BASE + 0x38)
+#define AR_PHY_RFBUS_REQ (AR_SM_BASE + 0x3c)
+#define AR_PHY_RFBUS_GRANT (AR_SM_BASE + 0x40)
+#define AR_PHY_RIFS (AR_SM_BASE + 0x44)
+#define AR_PHY_RX_CLR_DELAY (AR_SM_BASE + 0x50)
+#define AR_PHY_RX_DELAY (AR_SM_BASE + 0x54)
+
+#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
+#define AR_PHY_MISC_PA_CTL (AR_SM_BASE + 0x80)
+#define AR_PHY_SWITCH_CHAIN_0 (AR_SM_BASE + 0x84)
+#define AR_PHY_SWITCH_COM (AR_SM_BASE + 0x88)
+#define AR_PHY_SWITCH_COM_2 (AR_SM_BASE + 0x8c)
+#define AR_PHY_RX_CHAINMASK (AR_SM_BASE + 0xa0)
+#define AR_PHY_CAL_CHAINMASK (AR_SM_BASE + 0xc0)
+#define AR_PHY_CALMODE (AR_SM_BASE + 0xc8)
+#define AR_PHY_FCAL_1 (AR_SM_BASE + 0xcc)
+#define AR_PHY_FCAL_2_0 (AR_SM_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_0 (AR_SM_BASE + 0xd4)
+#define AR_PHY_CL_CAL_CTL (AR_SM_BASE + 0xd8)
+#define AR_PHY_CL_TAB_0 (AR_SM_BASE + 0x100)
+#define AR_PHY_SYNTH_CONTROL (AR_SM_BASE + 0x140)
+#define AR_PHY_ADDAC_CLK_SEL (AR_SM_BASE + 0x144)
+#define AR_PHY_PLL_CTL (AR_SM_BASE + 0x148)
+#define AR_PHY_ANALOG_SWAP (AR_SM_BASE + 0x14c)
+#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
+#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
+
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
+#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
+
+#define AR_PHY_TEST (AR_SM_BASE + 0x160)
+
+#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
+#define AR_PHY_TEST_BBB_OBS_SEL_S 19
+
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5_S 23
+#define AR_PHY_TEST_RX_OBS_SEL_BIT5 (1 << AR_PHY_TEST_RX_OBS_SEL_BIT5_S)
+
+#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
+#define AR_PHY_TEST_CHAIN_SEL_S 30
+
+#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164)
+#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
+#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
+#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
+#define AR_PHY_TEST_CTL_TX_OBS_SEL_S 2
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL 0x60
+#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL_S 5
+#define AR_PHY_TEST_CTL_TSTADC_EN 0x100
+#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
+#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
+#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+
+
+#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
+
+#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
+#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
+#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
+#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
+#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
+#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
+#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
+
+#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
+#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+
+#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
+#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
+
+#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
+#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
+#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
+#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
+#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
+#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
+
+#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
+
+#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
+
+#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
+#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
+#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
+
+#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0)
+#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4)
+#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8)
+#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc)
+#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
+#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
+#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
+#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
+
+#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
+#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S 1
+#define AR_PHY_65NM_CH0_SYNTH7 0x16098
+#define AR_PHY_65NM_CH0_BIAS1 0x160c0
+#define AR_PHY_65NM_CH0_BIAS2 0x160c4
+#define AR_PHY_65NM_CH0_BIAS4 0x160cc
+#define AR_PHY_65NM_CH0_RXTX4 0x1610c
+#define AR_PHY_65NM_CH0_THERM 0x16290
+
+#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
+#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
+#define AR_PHY_65NM_CH0_THERM_START 0x20000000
+#define AR_PHY_65NM_CH0_THERM_START_S 29
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
+#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
+
+#define AR_PHY_65NM_CH0_RXTX1 0x16100
+#define AR_PHY_65NM_CH0_RXTX2 0x16104
+#define AR_PHY_65NM_CH1_RXTX1 0x16500
+#define AR_PHY_65NM_CH1_RXTX2 0x16504
+#define AR_PHY_65NM_CH2_RXTX1 0x16900
+#define AR_PHY_65NM_CH2_RXTX2 0x16904
+
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
+#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
+#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT_S 22
+#define AR_PHY_LNAGAIN_LONG_SHIFT 0xe0000000
+#define AR_PHY_LNAGAIN_LONG_SHIFT_S 29
+#define AR_PHY_MXRGAIN_LONG_SHIFT 0x03000000
+#define AR_PHY_MXRGAIN_LONG_SHIFT_S 24
+#define AR_PHY_VGAGAIN_LONG_SHIFT 0x1c000000
+#define AR_PHY_VGAGAIN_LONG_SHIFT_S 26
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT 0x00000001
+#define AR_PHY_SCFIR_GAIN_LONG_SHIFT_S 0
+#define AR_PHY_MANRXGAIN_LONG_SHIFT 0x00000002
+#define AR_PHY_MANRXGAIN_LONG_SHIFT_S 1
+
+/*
+ * SM Field Definitions
+ */
+#define AR_PHY_CL_CAL_ENABLE 0x00000002
+#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
+#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
+
+#define AR_PHY_ADDAC_PARACTL_OFF_PWDADC 0x00008000
+
+#define AR_PHY_FCAL20_CAP_STATUS_0 0x01f00000
+#define AR_PHY_FCAL20_CAP_STATUS_0_S 20
+
+#define AR_PHY_RFBUS_REQ_EN 0x00000001 /* request for RF bus */
+#define AR_PHY_RFBUS_GRANT_EN 0x00000001 /* RF bus granted */
+#define AR_PHY_GC_TURBO_MODE 0x00000001 /* set turbo mode bits */
+#define AR_PHY_GC_TURBO_SHORT 0x00000002 /* set short symbols to turbo mode setting */
+#define AR_PHY_GC_DYN2040_EN 0x00000004 /* enable dyn 20/40 mode */
+#define AR_PHY_GC_DYN2040_PRI_ONLY 0x00000008 /* dyn 20/40 - primary only */
+#define AR_PHY_GC_DYN2040_PRI_CH 0x00000010 /* dyn 20/40 - primary ch offset (0=+10MHz, 1=-10MHz)*/
+#define AR_PHY_GC_DYN2040_PRI_CH_S 4
+#define AR_PHY_GC_DYN2040_EXT_CH 0x00000020 /* dyn 20/40 - ext ch spacing (0=20MHz/ 1=25MHz) */
+#define AR_PHY_GC_HT_EN 0x00000040 /* ht enable */
+#define AR_PHY_GC_SHORT_GI_40 0x00000080 /* allow short GI for HT 40 */
+#define AR_PHY_GC_WALSH 0x00000100 /* walsh spatial spreading for 2 chains,2 streams TX */
+#define AR_PHY_GC_SINGLE_HT_LTF1 0x00000200 /* single length (4us) 1st HT long training symbol */
+#define AR_PHY_GC_GF_DETECT_EN 0x00000400 /* enable Green Field detection. Only affects rx, not tx */
+#define AR_PHY_GC_ENABLE_DAC_FIFO 0x00000800 /* fifo between bb and dac */
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF /* delay from wakeup to rx ena */
+
+#define AR_PHY_CALMODE_IQ 0x00000000
+#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
+#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
+#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
+#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
+#define AR_PHY_MODE_OFDM 0x00000000
+#define AR_PHY_MODE_CCK 0x00000001
+#define AR_PHY_MODE_DYNAMIC 0x00000004
+#define AR_PHY_MODE_DYNAMIC_S 2
+#define AR_PHY_MODE_HALF 0x00000020
+#define AR_PHY_MODE_QUARTER 0x00000040
+#define AR_PHY_MAC_CLK_MODE 0x00000080
+#define AR_PHY_MODE_DYN_CCK_DISABLE 0x00000100
+#define AR_PHY_MODE_SVD_HALF 0x00000200
+#define AR_PHY_ACTIVE_EN 0x00000001
+#define AR_PHY_ACTIVE_DIS 0x00000000
+#define AR_PHY_FORCE_XPA_CFG 0x000000001
+#define AR_PHY_FORCE_XPA_CFG_S 0
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF 0xFF000000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF_S 24
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF 0x00FF0000
+#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF_S 16
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON 0x0000FF00
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON_S 8
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON 0x000000FF
+#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON_S 0
+#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
+#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
+#define AR_PHY_TX_END_DATA_START 0x000000FF
+#define AR_PHY_TX_END_DATA_START_S 0
+#define AR_PHY_TX_END_PA_ON 0x0000FF00
+#define AR_PHY_TX_END_PA_ON_S 8
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
+#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
+#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
+#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
+#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
+#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
+#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
+#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
+#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
+#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
+#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN 0x0000003e
+#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
+#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
+#define AR_PHY_TXGAIN_FORCE 0x00000001
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
+#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRB_S 14
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD 0x00c00000
+#define AR_PHY_TXGAIN_FORCED_PADVGNRD_S 22
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN 0x000003c0
+#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN_S 6
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
+#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
+
+#define AR_PHY_POWER_TX_RATE1 0x9934
+#define AR_PHY_POWER_TX_RATE2 0x9938
+#define AR_PHY_POWER_TX_RATE_MAX 0x993c
+#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
+#define PHY_AGC_CLR 0x10000000
+#define RFSILENT_BB 0x00002000
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_MASK 0xFFF
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_SIGNED_BIT 0x800
+#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
+#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
+#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
+#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
+#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
+#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
+#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
+#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
+#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
+
+#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
+
+#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
+#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
+#define AR_PHY_TPC_19_ALPHA_THERM 0xff
+#define AR_PHY_TPC_19_ALPHA_THERM_S 0
+
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
+#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
+#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
+
+/*
+ * Channel 1 Register Map
+ */
+#define AR_CHAN1_BASE 0xa800
+
+#define AR_PHY_EXT_CCA_1 (AR_CHAN1_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_1 (AR_CHAN1_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_1 (AR_CHAN1_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_1 (AR_CHAN1_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_1 (AR_CHAN1_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B1 (AR_CHAN1_BASE + 0xdc)
+
+/*
+ * Channel 1 Field Definitions
+ */
+#define AR_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH1_EXT_MINCCA_PWR_S 16
+
+/*
+ * AGC 1 Register Map
+ */
+#define AR_AGC1_BASE 0xae00
+
+#define AR_PHY_FORCEMAX_GAINS_1 (AR_AGC1_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_1 (AR_AGC1_BASE + 0x18)
+#define AR_PHY_CCA_1 (AR_AGC1_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_1 (AR_AGC1_BASE + 0x20)
+#define AR_PHY_RSSI_1 (AR_AGC1_BASE + 0x180)
+#define AR_PHY_SPUR_CCK_REP_1 (AR_AGC1_BASE + 0x184)
+#define AR_PHY_RX_OCGAIN_2 (AR_AGC1_BASE + 0x200)
+
+/*
+ * AGC 1 Field Definitions
+ */
+#define AR_PHY_CH1_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH1_MINCCA_PWR_S 20
+
+/*
+ * SM 1 Register Map
+ */
+#define AR_SM1_BASE 0xb200
+
+#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
+#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
+#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
+#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
+#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
+#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
+#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450)
+
+/*
+ * Channel 2 Register Map
+ */
+#define AR_CHAN2_BASE 0xb800
+
+#define AR_PHY_EXT_CCA_2 (AR_CHAN2_BASE + 0x30)
+#define AR_PHY_TX_PHASE_RAMP_2 (AR_CHAN2_BASE + 0xd0)
+#define AR_PHY_ADC_GAIN_DC_CORR_2 (AR_CHAN2_BASE + 0xd4)
+
+#define AR_PHY_SPUR_REPORT_2 (AR_CHAN2_BASE + 0xa8)
+#define AR_PHY_CHAN_INFO_TAB_2 (AR_CHAN2_BASE + 0x300)
+#define AR_PHY_RX_IQCAL_CORR_B2 (AR_CHAN2_BASE + 0xdc)
+
+/*
+ * Channel 2 Field Definitions
+ */
+#define AR_PHY_CH2_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH2_EXT_MINCCA_PWR_S 16
+/*
+ * AGC 2 Register Map
+ */
+#define AR_AGC2_BASE 0xbe00
+
+#define AR_PHY_FORCEMAX_GAINS_2 (AR_AGC2_BASE + 0x4)
+#define AR_PHY_EXT_ATTEN_CTL_2 (AR_AGC2_BASE + 0x18)
+#define AR_PHY_CCA_2 (AR_AGC2_BASE + 0x1c)
+#define AR_PHY_CCA_CTRL_2 (AR_AGC2_BASE + 0x20)
+#define AR_PHY_RSSI_2 (AR_AGC2_BASE + 0x180)
+
+/*
+ * AGC 2 Field Definitions
+ */
+#define AR_PHY_CH2_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH2_MINCCA_PWR_S 20
+
+/*
+ * SM 2 Register Map
+ */
+#define AR_SM2_BASE 0xc200
+
+#define AR_PHY_SWITCH_CHAIN_2 (AR_SM2_BASE + 0x84)
+#define AR_PHY_FCAL_2_2 (AR_SM2_BASE + 0xd0)
+#define AR_PHY_DFT_TONE_CTL_2 (AR_SM2_BASE + 0xd4)
+#define AR_PHY_CL_TAB_2 (AR_SM2_BASE + 0x100)
+#define AR_PHY_CHAN_INFO_GAIN_2 (AR_SM2_BASE + 0x180)
+#define AR_PHY_TPC_4_B2 (AR_SM2_BASE + 0x204)
+#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
+#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
+#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
+#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
+#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450)
+
+#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
+
+/*
+ * AGC 3 Register Map
+ */
+#define AR_AGC3_BASE 0xce00
+
+#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
+
+/*
+ * Misc helper defines
+ */
+#define AR_PHY_CHAIN_OFFSET (AR_CHAN1_BASE - AR_CHAN_BASE)
+
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (AR_PHY_ADC_GAIN_DC_CORR_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_NEW_ADC_DC_GAIN_CORR_9300_10(_i) (AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_SWITCH_CHAIN(_i) (AR_PHY_SWITCH_CHAIN_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_EXT_ATTEN_CTL(_i) (AR_PHY_EXT_ATTEN_CTL_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_RXGAIN(_i) (AR_PHY_FORCEMAX_GAINS_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_TPCRG5(_i) (AR_PHY_TPC_5_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_PDADC_TAB(_i) (AR_PHY_PDADC_TAB_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_CAL_MEAS_0(_i) (AR_PHY_IQ_ADC_MEAS_0_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1(_i) (AR_PHY_IQ_ADC_MEAS_1_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2(_i) (AR_PHY_IQ_ADC_MEAS_2_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3(_i) (AR_PHY_IQ_ADC_MEAS_3_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_0_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_1_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
+
+#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
+#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000
+#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC
+
+#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002
+#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004
+#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9
+
+#define AR_PHY_BB_WD_STATUS 0x00000007
+#define AR_PHY_BB_WD_STATUS_S 0
+#define AR_PHY_BB_WD_DET_HANG 0x00000008
+#define AR_PHY_BB_WD_DET_HANG_S 3
+#define AR_PHY_BB_WD_RADAR_SM 0x000000F0
+#define AR_PHY_BB_WD_RADAR_SM_S 4
+#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00
+#define AR_PHY_BB_WD_RX_OFDM_SM_S 8
+#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000
+#define AR_PHY_BB_WD_RX_CCK_SM_S 12
+#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000
+#define AR_PHY_BB_WD_TX_OFDM_SM_S 16
+#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000
+#define AR_PHY_BB_WD_TX_CCK_SM_S 20
+#define AR_PHY_BB_WD_AGC_SM 0x0F000000
+#define AR_PHY_BB_WD_AGC_SM_S 24
+#define AR_PHY_BB_WD_SRCH_SM 0xF0000000
+#define AR_PHY_BB_WD_SRCH_SM_S 28
+
+#define AR_PHY_BB_WD_STATUS_CLR 0x00000008
+
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+
+#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 83c7ea4c007f..fbb7dec6ddeb 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -114,8 +114,10 @@ enum buffer_type {
#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
+#define ATH_TXSTATUS_RING_SIZE 64
+
struct ath_descdma {
- struct ath_desc *dd_desc;
+ void *dd_desc;
dma_addr_t dd_desc_paddr;
u32 dd_desc_len;
struct ath_buf *dd_bufptr;
@@ -123,7 +125,7 @@ struct ath_descdma {
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc);
+ int nbuf, int ndesc, bool is_tx);
void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head);
@@ -178,9 +180,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
-#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
-#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
-#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
#define ATH_TX_COMPLETE_POLL_INT 1000
@@ -191,6 +190,7 @@ enum ATH_AGGR_STATUS {
ATH_AGGR_LIMITED,
};
+#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
u32 axq_qnum;
u32 *axq_link;
@@ -200,6 +200,10 @@ struct ath_txq {
bool stopped;
bool axq_tx_inprogress;
struct list_head axq_acq;
+ struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
+ struct list_head txq_fifo_pending;
+ u8 txq_headidx;
+ u8 txq_tailidx;
};
#define AGGR_CLEANUP BIT(1)
@@ -226,6 +230,12 @@ struct ath_tx {
struct ath_descdma txdma;
};
+struct ath_rx_edma {
+ struct sk_buff_head rx_fifo;
+ struct sk_buff_head rx_buffers;
+ u32 rx_fifo_hwsize;
+};
+
struct ath_rx {
u8 defant;
u8 rxotherant;
@@ -235,6 +245,8 @@ struct ath_rx {
spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
+ struct ath_buf *rx_bufptr;
+ struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
};
int ath_startrecv(struct ath_softc *sc);
@@ -243,7 +255,7 @@ void ath_flushrecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
-int ath_rx_tasklet(struct ath_softc *sc, int flush);
+int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_setup(struct ath_softc *sc, int haltype);
@@ -261,6 +273,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_tasklet(struct ath_softc *sc);
+void ath_tx_edma_tasklet(struct ath_softc *sc);
void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -483,7 +496,6 @@ struct ath_softc {
bool ps_enabled;
bool ps_idle;
unsigned long ps_usecount;
- enum ath9k_int imask;
struct ath_config config;
struct ath_rx rx;
@@ -511,6 +523,8 @@ struct ath_softc {
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
struct ath_btcoex btcoex;
+
+ struct ath_descdma txsdma;
};
struct ath_wiphy {
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b4a31a43a62c..f43d85a302c4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -76,24 +76,13 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
ds = bf->bf_desc;
flags = ATH9K_TXDESC_NOACK;
- if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) &&
- (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
- ds->ds_link = bf->bf_daddr; /* self-linked */
- flags |= ATH9K_TXDESC_VEOL;
- /* Let hardware handle antenna switching. */
- antenna = 0;
- } else {
- ds->ds_link = 0;
- /*
- * Switch antenna every beacon.
- * Should only switch every beacon period, not for every SWBA
- * XXX assumes two antennae
- */
- antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
- }
-
- ds->ds_data = bf->bf_buf_addr;
+ ds->ds_link = 0;
+ /*
+ * Switch antenna every beacon.
+ * Should only switch every beacon period, not for every SWBA
+ * XXX assumes two antennae
+ */
+ antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
@@ -109,7 +98,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
/* NB: beacon's BufLen must be a multiple of 4 bytes */
ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4),
- true, true, ds);
+ true, true, ds, bf->bf_buf_addr,
+ sc->beacon.beaconq);
memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
series[0].Tries = 1;
@@ -216,36 +206,6 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
return bf;
}
-/*
- * Startup beacon transmission for adhoc mode when they are sent entirely
- * by the hardware using the self-linked descriptor + veol trick.
-*/
-static void ath_beacon_start_adhoc(struct ath_softc *sc,
- struct ieee80211_vif *vif)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_buf *bf;
- struct ath_vif *avp;
- struct sk_buff *skb;
-
- avp = (void *)vif->drv_priv;
-
- if (avp->av_bcbuf == NULL)
- return;
-
- bf = avp->av_bcbuf;
- skb = bf->bf_mpdu;
-
- ath_beacon_setup(sc, avp, bf, 0);
-
- /* NB: caller is known to have already stopped tx dma */
- ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
- ath9k_hw_txstart(ah, sc->beacon.beaconq);
- ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
- sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
-}
-
int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
{
struct ath_softc *sc = aphy->sc;
@@ -266,7 +226,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
list_del(&avp->av_bcbuf->list);
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
- !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
+ sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
+ sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
int slot;
/*
* Assign the vif to a beacon xmit slot. As
@@ -275,17 +236,11 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
avp->av_bslot = 0;
for (slot = 0; slot < ATH_BCBUF; slot++)
if (sc->beacon.bslot[slot] == NULL) {
- /*
- * XXX hack, space out slots to better
- * deal with misses
- */
- if (slot+1 < ATH_BCBUF &&
- sc->beacon.bslot[slot+1] == NULL) {
- avp->av_bslot = slot+1;
- break;
- }
avp->av_bslot = slot;
+
/* NB: keep looking for a double slot */
+ if (slot == 0 || !sc->beacon.bslot[slot-1])
+ break;
}
BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
sc->beacon.bslot[avp->av_bslot] = vif;
@@ -524,6 +479,7 @@ static void ath9k_beacon_init(struct ath_softc *sc,
static void ath_beacon_config_ap(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
+ struct ath_hw *ah = sc->sc_ah;
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
@@ -539,15 +495,15 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* prepare beacon frames.
*/
intval |= ATH9K_BEACON_ENA;
- sc->imask |= ATH9K_INT_SWBA;
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed AP beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, ah->imask);
/* Clear the reset TSF flag, so that subsequent beacon updation
will not reset the HW TSF. */
@@ -566,7 +522,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
static void ath_beacon_config_sta(struct ath_softc *sc,
struct ath_beacon_config *conf)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_beacon_state bs;
int dtimperiod, dtimcount, sleepduration;
int cfpperiod, cfpcount;
@@ -605,7 +562,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim+cfp state for the result.
*/
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
num_beacons = tsftu / intval + 1;
@@ -678,17 +635,18 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* Set the computed STA beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
- ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs);
- sc->imask |= ATH9K_INT_BMISS;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
+ ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_set_sta_beacon_timers(ah, &bs);
+ ah->imask |= ATH9K_INT_BMISS;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
static void ath_beacon_config_adhoc(struct ath_softc *sc,
struct ath_beacon_config *conf,
struct ieee80211_vif *vif)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u64 tsf;
u32 tsftu, intval, nexttbtt;
@@ -703,7 +661,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
else if (intval)
nexttbtt = roundup(nexttbtt, intval);
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
do {
nexttbtt += intval;
@@ -719,21 +677,16 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
* self-linked tx descriptor and let the hardware deal with things.
*/
intval |= ATH9K_BEACON_ENA;
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
- sc->imask |= ATH9K_INT_SWBA;
+ ah->imask |= ATH9K_INT_SWBA;
ath_beaconq_config(sc);
/* Set the computed ADHOC beacon timers */
- ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ ath9k_hw_set_interrupts(ah, 0);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
-
- /* FIXME: Handle properly when vif is NULL */
- if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
- ath_beacon_start_adhoc(sc, vif);
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 238a5744d8e9..07b8fa6fb62f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -15,6 +15,9 @@
*/
#include "hw.h"
+#include "hw-ops.h"
+
+/* Common calibration code */
/* We can tune this as we go by monitoring really low values */
#define ATH9K_NF_TOO_LOW -60
@@ -83,93 +86,11 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
}
}
- return;
}
-static void ath9k_hw_do_getnf(struct ath_hw *ah,
- int16_t nfarray[NUM_NF_READINGS])
-{
- struct ath_common *common = ath9k_hw_common(ah);
- int16_t nf;
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 0] is %d\n", nf);
- nfarray[0] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR9280_PHY_CH1_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
- AR_PHY_CH1_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 1] is %d\n", nf);
- nfarray[1] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
- AR_PHY_CH2_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ctl] [chain 2] is %d\n", nf);
- nfarray[2] = nf;
- }
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR9280_PHY_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
- AR_PHY_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 0] is %d\n", nf);
- nfarray[3] = nf;
-
- if (!AR_SREV_9285(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah))
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR9280_PHY_CH1_EXT_MINCCA_PWR);
- else
- nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
- AR_PHY_CH1_EXT_MINCCA_PWR);
-
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 1] is %d\n", nf);
- nfarray[4] = nf;
-
- if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
- nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
- AR_PHY_CH2_EXT_MINCCA_PWR);
- if (nf & 0x100)
- nf = 0 - ((nf ^ 0x1ff) + 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [ext] [chain 2] is %d\n", nf);
- nfarray[5] = nf;
- }
- }
-}
-
-static bool getNoiseFloorThresh(struct ath_hw *ah,
- enum ieee80211_band band,
- int16_t *nft)
+static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
+ enum ieee80211_band band,
+ int16_t *nft)
{
switch (band) {
case IEEE80211_BAND_5GHZ:
@@ -186,44 +107,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
return true;
}
-static void ath9k_hw_setup_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
- currCal->calData->calCountMax);
-
- switch (currCal->calData->calType) {
- case IQ_MISMATCH_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting IQ Mismatch Calibration\n");
- break;
- case ADC_GAIN_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
- break;
- case ADC_DC_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
- break;
- case ADC_DC_INIT_CAL:
- REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting Init ADC DC Calibration\n");
- break;
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_DO_CAL);
-}
-
-static void ath9k_hw_reset_calibration(struct ath_hw *ah,
- struct ath9k_cal_list *currCal)
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
{
int i;
@@ -241,324 +126,6 @@ static void ath9k_hw_reset_calibration(struct ath_hw *ah,
ah->cal_samples = 0;
}
-static bool ath9k_hw_per_calibration(struct ath_hw *ah,
- struct ath9k_channel *ichan,
- u8 rxchainmask,
- struct ath9k_cal_list *currCal)
-{
- bool iscaldone = false;
-
- if (currCal->calState == CAL_RUNNING) {
- if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
- AR_PHY_TIMING_CTRL4_DO_CAL)) {
-
- currCal->calData->calCollect(ah);
- ah->cal_samples++;
-
- if (ah->cal_samples >= currCal->calData->calNumSamples) {
- int i, numChains = 0;
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- if (rxchainmask & (1 << i))
- numChains++;
- }
-
- currCal->calData->calPostProc(ah, numChains);
- ichan->CalValid |= currCal->calData->calType;
- currCal->calState = CAL_DONE;
- iscaldone = true;
- } else {
- ath9k_hw_setup_calibration(ah, currCal);
- }
- }
- } else if (!(ichan->CalValid & currCal->calData->calType)) {
- ath9k_hw_reset_calibration(ah, currCal);
- }
-
- return iscaldone;
-}
-
-/* Assumes you are talking about the currently configured channel */
-static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
- enum ath9k_cal_types calType)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- switch (calType & ah->supp_cals) {
- case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
- return true;
- case ADC_GAIN_CAL:
- case ADC_DC_CAL:
- if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
- conf_is_ht20(conf)))
- return true;
- break;
- }
- return false;
-}
-
-static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalPowerMeasI[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalPowerMeasQ[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalIqCorrMeas[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
- ah->cal_samples, i, ah->totalPowerMeasI[i],
- ah->totalPowerMeasQ[i],
- ah->totalIqCorrMeas[i]);
- }
-}
-
-static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcIOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcIEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcQOddPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcQEvenPhase[i] +=
- REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcIOddPhase[i],
- ah->totalAdcIEvenPhase[i],
- ah->totalAdcQOddPhase[i],
- ah->totalAdcQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
-{
- int i;
-
- for (i = 0; i < AR5416_MAX_CHAINS; i++) {
- ah->totalAdcDcOffsetIOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
- ah->totalAdcDcOffsetIEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
- ah->totalAdcDcOffsetQOddPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ah->totalAdcDcOffsetQEvenPhase[i] +=
- (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcDcOffsetIOddPhase[i],
- ah->totalAdcDcOffsetIEvenPhase[i],
- ah->totalAdcDcOffsetQOddPhase[i],
- ah->totalAdcDcOffsetQEvenPhase[i]);
- }
-}
-
-static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 powerMeasQ, powerMeasI, iqCorrMeas;
- u32 qCoffDenom, iCoffDenom;
- int32_t qCoff, iCoff;
- int iqCorrNeg, i;
-
- for (i = 0; i < numChains; i++) {
- powerMeasI = ah->totalPowerMeasI[i];
- powerMeasQ = ah->totalPowerMeasQ[i];
- iqCorrMeas = ah->totalIqCorrMeas[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
- i, ah->totalIqCorrMeas[i]);
-
- iqCorrNeg = 0;
-
- if (iqCorrMeas > 0x80000000) {
- iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
- iqCorrNeg = 1;
- }
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
-
- iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
- qCoffDenom = powerMeasQ / 64;
-
- if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
- (qCoffDenom != 0)) {
- iCoff = iqCorrMeas / iCoffDenom;
- qCoff = powerMeasI / qCoffDenom - 64;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
-
- iCoff = iCoff & 0x3f;
- ath_print(common, ATH_DBG_CALIBRATE,
- "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
- if (iqCorrNeg == 0x0)
- iCoff = 0x40 - iCoff;
-
- if (qCoff > 15)
- qCoff = 15;
- else if (qCoff <= -16)
- qCoff = 16;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
- i, iCoff, qCoff);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
- iCoff);
- REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
- AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
- qCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ Cal and Correction done for Chain %d\n",
- i);
- }
- }
-
- REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
- AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
-}
-
-static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
- u32 qGainMismatch, iGainMismatch, val, i;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC Gain Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
-
- if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
- iGainMismatch =
- ((iEvenMeasOffset * 32) /
- iOddMeasOffset) & 0x3f;
- qGainMismatch =
- ((qOddMeasOffset * 32) /
- qEvenMeasOffset) & 0x3f;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xfffff000;
- val |= (qGainMismatch) | (iGainMismatch << 6);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC Gain Cal done for Chain %d\n", i);
- }
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
-}
-
-static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 iOddMeasOffset, iEvenMeasOffset, val, i;
- int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
- const struct ath9k_percal_data *calData =
- ah->cal_list_curr->calData;
- u32 numSamples =
- (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
-
- for (i = 0; i < numChains; i++) {
- iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
- iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
- qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
- qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC DC Offset Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
-
- iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
- numSamples) & 0x1ff;
- qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
- numSamples) & 0x1ff;
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
-
- val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
- val &= 0xc0000fff;
- val |= (qDcMismatch << 12) | (iDcMismatch << 21);
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC DC Offset Cal done for Chain %d\n", i);
- }
-
- REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
- REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
- AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
-}
-
/* This is done for the currently configured channel */
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
@@ -605,72 +172,6 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath9k_nfcal_hist *h;
- int i, j;
- int32_t val;
- const u32 ar5416_cca_regs[6] = {
- AR_PHY_CCA,
- AR_PHY_CH1_CCA,
- AR_PHY_CH2_CCA,
- AR_PHY_EXT_CCA,
- AR_PHY_CH1_EXT_CCA,
- AR_PHY_CH2_EXT_CCA
- };
- u8 chainmask, rx_chain_status;
-
- rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
- if (AR_SREV_9285(ah))
- chainmask = 0x9;
- else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
- if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- } else {
- if (rx_chain_status & 0x4)
- chainmask = 0x3F;
- else if (rx_chain_status & 0x2)
- chainmask = 0x1B;
- else
- chainmask = 0x09;
- }
-
- h = ah->nfCalHist;
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_ENABLE_NF);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
-
- for (j = 0; j < 5; j++) {
- if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
- AR_PHY_AGC_CONTROL_NF) == 0)
- break;
- udelay(50);
- }
-
- for (i = 0; i < NUM_NF_READINGS; i++) {
- if (chainmask & (1 << i)) {
- val = REG_READ(ah, ar5416_cca_regs[i]);
- val &= 0xFFFFFE00;
- val |= (((u32) (-50) << 1) & 0x1ff);
- REG_WRITE(ah, ar5416_cca_regs[i], val);
- }
- }
-}
-
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -690,7 +191,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
} else {
ath9k_hw_do_getnf(ah, nfarray);
nf = nfarray[0];
- if (getNoiseFloorThresh(ah, c->band, &nfThresh)
+ if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
ath_print(common, ATH_DBG_CALIBRATE,
"noise floor failed detected; "
@@ -715,7 +216,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
if (AR_SREV_9280(ah))
noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
- else if (AR_SREV_9285(ah))
+ else if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
else if (AR_SREV_9287(ah))
noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
@@ -748,508 +249,3 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
return nf;
}
EXPORT_SYMBOL(ath9k_hw_getchan_noise);
-
-static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
-{
- u32 rddata;
- int32_t delta, currPDADC, slope;
-
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- /*
- * Zero value indicates that no frames have been transmitted yet,
- * can't do temperature compensation until frames are transmitted.
- */
- return;
- } else {
- slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
-
- if (slope == 0) { /* to avoid divide by zero case */
- delta = 0;
- } else {
- delta = ((currPDADC - ah->initPDADC)*4) / slope;
- }
- REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
- AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
- }
-}
-
-static void ath9k_olc_temp_compensation(struct ath_hw *ah)
-{
- u32 rddata, i;
- int delta, currPDADC, regval;
-
- if (OLC_FOR_AR9287_10_LATER) {
- ath9k_olc_temp_compensation_9287(ah);
- } else {
- rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
- currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
-
- if (ah->initPDADC == 0 || currPDADC == 0) {
- return;
- } else {
- if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
- delta = (currPDADC - ah->initPDADC + 4) / 8;
- else
- delta = (currPDADC - ah->initPDADC + 5) / 10;
-
- if (delta != ah->PDADCdelta) {
- ah->PDADCdelta = delta;
- for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
- regval = ah->originalGain[i] - delta;
- if (regval < 0)
- regval = 0;
-
- REG_RMW_FIELD(ah,
- AR_PHY_TX_GAIN_TBL1 + i * 4,
- AR_PHY_TX_GAIN, regval);
- }
- }
- }
- }
-}
-
-static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- u32 regVal;
- unsigned int i;
- u32 regList [][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 } ,
- { 0x7828, 0 } ,
- };
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- /* 786c,b23,1, pwddac=1 */
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- /* 7854, b5,1, pdrxtxbb=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- /* 7854, b7,1, pdv2i=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- /* 7854, b8,1, pddacinterface=1 */
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- /* 7824,b12,0, offcal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- /* 7838, b1,0, pwddb=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- /* 7820,b11,0, enpacal=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- /* 7820,b25,1, pdpadrv1=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- /* 7820,b24,0, pdpadrv2=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0);
- /* 7820,b23,0, pdpaout=0 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- /* 783c,b14-16,7, padrvgn2tab_0=7 */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- /*
- * 7838,b29-31,0, padrvgn1tab_0=0
- * does not matter since we turn it off
- */
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
-
- /* Set:
- * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
- * txon=1,paon=1,oscon=1,synthon_force=1
- */
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
-
- /* find off_6_1; */
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- //regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (20 + i)));
- regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
- << (20 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- regVal = (regVal >>20) & 0x7f;
-
- /* Update PA cal info */
- if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = regVal;
- }
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-}
-
-static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- u32 regVal;
- int i, offset, offs_6_1, offs_0;
- u32 ccomp_org, reg_field;
- u32 regList[][2] = {
- { 0x786c, 0 },
- { 0x7854, 0 },
- { 0x7820, 0 },
- { 0x7824, 0 },
- { 0x7868, 0 },
- { 0x783c, 0 },
- { 0x7838, 0 },
- };
-
- ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
-
- /* PA CAL is not needed for high power solution */
- if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
- AR5416_EEP_TXGAIN_HIGH_POWER)
- return;
-
- if (AR_SREV_9285_11(ah)) {
- REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
- udelay(10);
- }
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- regList[i][1] = REG_READ(ah, regList[i][0]);
-
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1));
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal |= (0x1 << 27);
- REG_WRITE(ah, 0x9808, regVal);
-
- REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
- ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
-
- REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
- udelay(30);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
-
- for (i = 6; i > 0; i--) {
- regVal = REG_READ(ah, 0x7834);
- regVal |= (1 << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- udelay(1);
- regVal = REG_READ(ah, 0x7834);
- regVal &= (~(0x1 << (19 + i)));
- reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
- regVal |= (reg_field << (19 + i));
- REG_WRITE(ah, 0x7834, regVal);
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
- udelay(1);
- reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
- offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
- offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
-
- offset = (offs_6_1<<1) | offs_0;
- offset = offset - 0;
- offs_6_1 = offset>>1;
- offs_0 = offset & 1;
-
- if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
- if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
- ah->pacal_info.max_skipcount =
- 2 * ah->pacal_info.max_skipcount;
- ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
- } else {
- ah->pacal_info.max_skipcount = 1;
- ah->pacal_info.skipcount = 0;
- ah->pacal_info.prev_offset = offset;
- }
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
- REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
-
- regVal = REG_READ(ah, 0x7834);
- regVal |= 0x1;
- REG_WRITE(ah, 0x7834, regVal);
- regVal = REG_READ(ah, 0x9808);
- regVal &= (~(0x1 << 27));
- REG_WRITE(ah, 0x9808, regVal);
-
- for (i = 0; i < ARRAY_SIZE(regList); i++)
- REG_WRITE(ah, regList[i][0], regList[i][1]);
-
- REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
-
- if (AR_SREV_9285_11(ah))
- REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
-
-}
-
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal)
-{
- bool iscaldone = true;
- struct ath9k_cal_list *currCal = ah->cal_list_curr;
-
- if (currCal &&
- (currCal->calState == CAL_RUNNING ||
- currCal->calState == CAL_WAITING)) {
- iscaldone = ath9k_hw_per_calibration(ah, chan,
- rxchainmask, currCal);
- if (iscaldone) {
- ah->cal_list_curr = currCal = currCal->calNext;
-
- if (currCal->calState == CAL_WAITING) {
- iscaldone = false;
- ath9k_hw_reset_calibration(ah, currCal);
- }
- }
- }
-
- /* Do NF cal only at longer intervals */
- if (longcal) {
- /* Do periodic PAOffset Cal */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, false);
- else if (AR_SREV_9285_11_OR_LATER(ah)) {
- if (!ah->pacal_info.skipcount)
- ath9k_hw_9285_pa_cal(ah, false);
- else
- ah->pacal_info.skipcount--;
- }
-
- if (OLC_FOR_AR9280_20_LATER || OLC_FOR_AR9287_10_LATER)
- ath9k_olc_temp_compensation(ah);
-
- /* Get the value from the previous NF cal and update history buffer */
- ath9k_hw_getnf(ah, chan);
-
- /*
- * Load the NF from history buffer of the current channel.
- * NF is slow time-variant, so it is OK to use a historical value.
- */
- ath9k_hw_loadnf(ah, ah->curchan);
-
- ath9k_hw_start_nfcal(ah);
- }
-
- return iscaldone;
-}
-EXPORT_SYMBOL(ath9k_hw_calibrate);
-
-/* Carrier leakage Calibration fix */
-static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- if (IS_CHAN_HT20(chan)) {
- REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset "
- "calibration failed to complete in "
- "1ms; noisy ??\n");
- return false;
- }
- REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- }
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
- "failed to complete in 1ms; noisy ??\n");
- return false;
- }
-
- REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
-
- return true;
-}
-
-bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
- if (!ar9285_clc(ah, chan))
- return false;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
-
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to "
- "complete in 1ms; noisy environment?\n");
- return false;
- }
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- if (!AR_SREV_9287_10_OR_LATER(ah))
- REG_SET_BIT(ah, AR_PHY_ADC_CTL,
- AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_FLTR_CAL);
- }
- }
-
- /* Do PA Calibration */
- if (AR_SREV_9271(ah))
- ath9k_hw_9271_pa_cal(ah, true);
- else if (AR_SREV_9285_11_OR_LATER(ah))
- ath9k_hw_9285_pa_cal(ah, true);
-
- /* Do NF Calibration after DC offset and other calibrations */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
-
- ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
-
- /* Enable IQ, ADC Gain and ADC DC offset CALs */
- if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
- if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
- INIT_CAL(&ah->adcgain_caldata);
- INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
- INIT_CAL(&ah->adcdc_caldata);
- INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
- }
- if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
- }
-
- ah->cal_list_curr = ah->cal_list;
-
- if (ah->cal_list_curr)
- ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
- }
-
- chan->CalValid = 0;
-
- return true;
-}
-
-const struct ath9k_percal_data iq_cal_multi_sample = {
- IQ_MISMATCH_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data iq_cal_single_sample = {
- IQ_MISMATCH_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_iqcal_collect,
- ath9k_hw_iqcalibrate
-};
-const struct ath9k_percal_data adc_gain_cal_multi_sample = {
- ADC_GAIN_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_gain_cal_single_sample = {
- ADC_GAIN_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_gaincal_collect,
- ath9k_hw_adc_gaincal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_multi_sample = {
- ADC_DC_CAL,
- MAX_CAL_SAMPLES,
- PER_MIN_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_dc_cal_single_sample = {
- ADC_DC_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
-const struct ath9k_percal_data adc_init_dc_cal = {
- ADC_DC_INIT_CAL,
- MIN_CAL_SAMPLES,
- INIT_LOG_COUNT,
- ath9k_hw_adc_dccal_collect,
- ath9k_hw_adc_dccal_calibrate
-};
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b2c873e97485..24538bdb9126 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,14 +19,6 @@
#include "hw.h"
-extern const struct ath9k_percal_data iq_cal_multi_sample;
-extern const struct ath9k_percal_data iq_cal_single_sample;
-extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
-extern const struct ath9k_percal_data adc_gain_cal_single_sample;
-extern const struct ath9k_percal_data adc_dc_cal_multi_sample;
-extern const struct ath9k_percal_data adc_dc_cal_single_sample;
-extern const struct ath9k_percal_data adc_init_dc_cal;
-
#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
@@ -76,7 +68,8 @@ enum ath9k_cal_types {
ADC_DC_INIT_CAL = 0x1,
ADC_GAIN_CAL = 0x2,
ADC_DC_CAL = 0x4,
- IQ_MISMATCH_CAL = 0x8
+ IQ_MISMATCH_CAL = 0x8,
+ TEMP_COMP_CAL = 0x10,
};
enum ath9k_cal_state {
@@ -122,14 +115,12 @@ struct ath9k_pacal_info{
bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
void ath9k_hw_start_nfcal(struct ath_hw *ah);
-void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
int16_t ath9k_hw_getnf(struct ath_hw *ah,
struct ath9k_channel *chan);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
-bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
- u8 rxchainmask, bool longcal);
-bool ath9k_hw_init_cal(struct ath_hw *ah,
- struct ath9k_channel *chan);
+void ath9k_hw_reset_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+
#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 4d775ae141db..7707341cd0d3 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -57,13 +57,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
* rs_more indicates chained descriptors which can be used
* to link buffers together for a sort of scatter-gather
* operation.
- *
+ * reject the frame, we don't support scatter-gather yet and
+ * the frame is probably corrupt anyway
+ */
+ if (rx_stats->rs_more)
+ return false;
+
+ /*
* The rx_stats->rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set. The
* rs_more will be false at the last element of the chained
* descriptors.
*/
- if (!rx_stats->rs_more && rx_stats->rs_status != 0) {
+ if (rx_stats->rs_status != 0) {
if (rx_stats->rs_status & ATH9K_RXERR_CRC)
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
@@ -102,11 +108,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
return true;
}
-static u8 ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- struct sk_buff *skb)
+static int ath9k_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ struct sk_buff *skb)
{
struct ieee80211_supported_band *sband;
enum ieee80211_band band;
@@ -122,25 +128,32 @@ static u8 ath9k_process_rate(struct ath_common *common,
rxs->flag |= RX_FLAG_40MHZ;
if (rx_stats->rs_flags & ATH9K_RX_GI)
rxs->flag |= RX_FLAG_SHORT_GI;
- return rx_stats->rs_rate & 0x7f;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
}
for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate)
- return i;
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
rxs->flag |= RX_FLAG_SHORTPRE;
- return i;
+ rxs->rate_idx = i;
+ return 0;
}
}
- /* No valid hardware bitrate found -- we should not get here */
+ /*
+ * No valid hardware bitrate found -- we should not get here
+ * because hardware has already validated this frame as OK.
+ */
ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
"0x%02x using 1 Mbit\n", rx_stats->rs_rate);
if ((common->debug_mask & ATH_DBG_XMIT))
print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
- return 0;
+ return -EINVAL;
}
static void ath9k_process_rssi(struct ath_common *common,
@@ -202,17 +215,22 @@ int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
struct ath_hw *ah = common->ah;
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
return -EINVAL;
ath9k_process_rssi(common, hw, skb, rx_stats);
- rx_status->rate_idx = ath9k_process_rate(common, hw,
- rx_stats, rx_status, skb);
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
+ return -EINVAL;
+
rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
- rx_status->noise = common->ani.noise_floor;
rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_TSFT;
@@ -255,7 +273,8 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
keyix = rx_stats->rs_keyix;
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
@@ -286,6 +305,345 @@ int ath9k_cmn_padpos(__le16 frame_control)
}
EXPORT_SYMBOL(ath9k_cmn_padpos);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->control.hw_key) {
+ if (tx_info->control.hw_key->alg == ALG_WEP)
+ return ATH9K_KEY_TYPE_WEP;
+ else if (tx_info->control.hw_key->alg == ALG_TKIP)
+ return ATH9K_KEY_TYPE_TKIP;
+ else if (tx_info->control.hw_key->alg == ALG_CCMP)
+ return ATH9K_KEY_TYPE_AES;
+ }
+
+ return ATH9K_KEY_TYPE_CLEAR;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
+
+static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ u32 chanmode = 0;
+
+ switch (chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_G_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_G_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_G_HT40MINUS;
+ break;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ switch (channel_type) {
+ case NL80211_CHAN_NO_HT:
+ case NL80211_CHAN_HT20:
+ chanmode = CHANNEL_A_HT20;
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ chanmode = CHANNEL_A_HT40PLUS;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ chanmode = CHANNEL_A_HT40MINUS;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return chanmode;
+}
+
+/*
+ * Update internal channel flags.
+ */
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan)
+{
+ struct ieee80211_channel *chan = hw->conf.channel;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ ichan->channel = chan->center_freq;
+ ichan->chan = chan;
+
+ if (chan->band == IEEE80211_BAND_2GHZ) {
+ ichan->chanmode = CHANNEL_G;
+ ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
+ } else {
+ ichan->chanmode = CHANNEL_A;
+ ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
+ }
+
+ if (conf_is_ht(conf))
+ ichan->chanmode = ath9k_get_extchanmode(chan,
+ conf->channel_type);
+}
+EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
+
+/*
+ * Get the internal channel reference.
+ */
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah)
+{
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *channel;
+ u8 chan_idx;
+
+ chan_idx = curchan->hw_value;
+ channel = &ah->channels[chan_idx];
+ ath9k_cmn_update_ichannel(hw, channel);
+
+ return channel;
+}
+EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+
+static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
+ struct ath9k_keyval *hk, const u8 *addr,
+ bool authenticator)
+{
+ struct ath_hw *ah = common->ah;
+ const u8 *key_rxmic;
+ const u8 *key_txmic;
+
+ key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
+ key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
+
+ if (addr == NULL) {
+ /*
+ * Group key installation - only two key cache entries are used
+ * regardless of splitmic capability since group key is only
+ * used either for TX or RX.
+ */
+ if (authenticator) {
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
+ } else {
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
+ }
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+ if (!common->splitmic) {
+ /* TX and RX keys share the same key cache entry. */
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
+ return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
+ }
+
+ /* Separate key cache entries for TX and RX */
+
+ /* TX key goes at first index, RX key at +32. */
+ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
+ if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
+ /* TX MIC entry failed. No need to proceed further */
+ ath_print(common, ATH_DBG_FATAL,
+ "Setting TX MIC Key Failed\n");
+ return 0;
+ }
+
+ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
+ /* XXX delete tx key on failure? */
+ return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
+}
+
+static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
+{
+ int i;
+
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap))
+ continue; /* At least one part of TKIP key allocated */
+ if (common->splitmic &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ continue; /* At least one part of TKIP key allocated */
+
+ /* Found a free slot for a TKIP key */
+ return i;
+ }
+ return -1;
+}
+
+static int ath_reserve_key_cache_slot(struct ath_common *common)
+{
+ int i;
+
+ /* First, try to find slots that would not be available for TKIP. */
+ if (common->splitmic) {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
+ if (!test_bit(i, common->keymap) &&
+ (test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i;
+ if (!test_bit(i + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 64, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 32;
+ if (!test_bit(i + 64, common->keymap) &&
+ (test_bit(i , common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64 + 32, common->keymap)))
+ return i + 64;
+ if (!test_bit(i + 64 + 32, common->keymap) &&
+ (test_bit(i, common->keymap) ||
+ test_bit(i + 32, common->keymap) ||
+ test_bit(i + 64, common->keymap)))
+ return i + 64 + 32;
+ }
+ } else {
+ for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
+ if (!test_bit(i, common->keymap) &&
+ test_bit(i + 64, common->keymap))
+ return i;
+ if (test_bit(i, common->keymap) &&
+ !test_bit(i + 64, common->keymap))
+ return i + 64;
+ }
+ }
+
+ /* No partially used TKIP slots, pick any available slot */
+ for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
+ /* Do not allow slots that could be needed for TKIP group keys
+ * to be used. This limitation could be removed if we know that
+ * TKIP will not be used. */
+ if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
+ continue;
+ if (common->splitmic) {
+ if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
+ continue;
+ if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
+ continue;
+ }
+
+ if (!test_bit(i, common->keymap))
+ return i; /* Found a free slot for a key */
+ }
+
+ /* No free slot found */
+ return -1;
+}
+
+/*
+ * Configure encryption in the HW.
+ */
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+ struct ath9k_keyval hk;
+ const u8 *mac = NULL;
+ int ret = 0;
+ int idx;
+
+ memset(&hk, 0, sizeof(hk));
+
+ switch (key->alg) {
+ case ALG_WEP:
+ hk.kv_type = ATH9K_CIPHER_WEP;
+ break;
+ case ALG_TKIP:
+ hk.kv_type = ATH9K_CIPHER_TKIP;
+ break;
+ case ALG_CCMP:
+ hk.kv_type = ATH9K_CIPHER_AES_CCM;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ hk.kv_len = key->keylen;
+ memcpy(hk.kv_val, key->key, key->keylen);
+
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* For now, use the default keys for broadcast keys. This may
+ * need to change with virtual interfaces. */
+ idx = key->keyidx;
+ } else if (key->keyidx) {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (vif->type != NL80211_IFTYPE_AP) {
+ /* Only keyidx 0 should be used with unicast key, but
+ * allow this for client mode for now. */
+ idx = key->keyidx;
+ } else
+ return -EIO;
+ } else {
+ if (WARN_ON(!sta))
+ return -EOPNOTSUPP;
+ mac = sta->addr;
+
+ if (key->alg == ALG_TKIP)
+ idx = ath_reserve_key_cache_slot_tkip(common);
+ else
+ idx = ath_reserve_key_cache_slot(common);
+ if (idx < 0)
+ return -ENOSPC; /* no free key cache entries */
+ }
+
+ if (key->alg == ALG_TKIP)
+ ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
+ vif->type == NL80211_IFTYPE_AP);
+ else
+ ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
+
+ if (!ret)
+ return -EIO;
+
+ set_bit(idx, common->keymap);
+ if (key->alg == ALG_TKIP) {
+ set_bit(idx + 64, common->keymap);
+ if (common->splitmic) {
+ set_bit(idx + 32, common->keymap);
+ set_bit(idx + 64 + 32, common->keymap);
+ }
+ }
+
+ return idx;
+}
+EXPORT_SYMBOL(ath9k_cmn_key_config);
+
+/*
+ * Delete Key.
+ */
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key)
+{
+ struct ath_hw *ah = common->ah;
+
+ ath9k_hw_keyreset(ah, key->hw_key_idx);
+ if (key->hw_key_idx < IEEE80211_WEP_NKID)
+ return;
+
+ clear_bit(key->hw_key_idx, common->keymap);
+ if (key->alg != ALG_TKIP)
+ return;
+
+ clear_bit(key->hw_key_idx + 64, common->keymap);
+ if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
+ clear_bit(key->hw_key_idx + 32, common->keymap);
+ clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_key_delete);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 042999c2fe9c..e08f7e5a26e0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -20,9 +20,12 @@
#include "../debug.h"
#include "hw.h"
+#include "hw-ops.h"
/* Common header for Atheros 802.11n base driver cores */
+#define IEEE80211_WEP_NKID 4
+
#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -74,11 +77,12 @@ struct ath_buf {
an aggregate) */
struct ath_buf *bf_next; /* next subframe in the aggregate */
struct sk_buff *bf_mpdu; /* enclosing frame structure */
- struct ath_desc *bf_desc; /* virtual addr of desc */
+ void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer */
bool bf_stale;
bool bf_isnullfunc;
+ bool bf_tx_aborted;
u16 bf_flags;
struct ath_buf_state bf_state;
dma_addr_t bf_dmacontext;
@@ -125,3 +129,14 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
bool decrypt_error);
int ath9k_cmn_padpos(__le16 frame_control);
+int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
+void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
+ struct ath9k_channel *ichan);
+struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
+ struct ath_hw *ah);
+int ath9k_cmn_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void ath9k_cmn_key_delete(struct ath_common *common,
+ struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 081e0085ed4c..29898f8d1893 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -78,6 +78,90 @@ static const struct file_operations fops_debug = {
#define DMA_BUF_LEN 1024
+static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->tx_chainmask = mask;
+ sc->sc_ah->caps.tx_chainmask = mask;
+ return count;
+}
+
+static const struct file_operations fops_tx_chainmask = {
+ .read = read_file_tx_chainmask,
+ .write = write_file_tx_chainmask,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+
+static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long mask;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &mask))
+ return -EINVAL;
+
+ common->rx_chainmask = mask;
+ sc->sc_ah->caps.rx_chainmask = mask;
+ return count;
+}
+
+static const struct file_operations fops_rx_chainmask = {
+ .read = read_file_rx_chainmask,
+ .write = write_file_rx_chainmask,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -157,10 +241,10 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
REG_READ_D(ah, AR_OBS_BUS_1));
len += snprintf(buf + len, DMA_BUF_LEN - len,
- "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
+ "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
@@ -180,8 +264,15 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
{
if (status)
sc->debug.stats.istats.total++;
- if (status & ATH9K_INT_RX)
- sc->debug.stats.istats.rxok++;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (status & ATH9K_INT_RXLP)
+ sc->debug.stats.istats.rxlp++;
+ if (status & ATH9K_INT_RXHP)
+ sc->debug.stats.istats.rxhp++;
+ } else {
+ if (status & ATH9K_INT_RX)
+ sc->debug.stats.istats.rxok++;
+ }
if (status & ATH9K_INT_RXEOL)
sc->debug.stats.istats.rxeol++;
if (status & ATH9K_INT_RXORN)
@@ -223,8 +314,15 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
char buf[512];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
- "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
+ } else {
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+ }
len += snprintf(buf + len, sizeof(buf) - len,
"%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
len += snprintf(buf + len, sizeof(buf) - len,
@@ -557,10 +655,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf, struct ath_tx_status *ts)
{
- struct ath_desc *ds = bf->bf_desc;
-
if (bf_isampdu(bf)) {
if (bf_isxretried(bf))
TX_STAT_INC(txq->axq_qnum, a_xretries);
@@ -570,17 +666,17 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
TX_STAT_INC(txq->axq_qnum, completed);
}
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)
+ if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(txq->axq_qnum, fifo_underrun);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP)
+ if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(txq->axq_qnum, xtxop);
- if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED)
+ if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(txq->axq_qnum, timer_exp);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR)
+ if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, data_underrun);
- if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN)
+ if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(txq->axq_qnum, delim_underrun);
}
@@ -663,30 +759,29 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
#undef PHY_ERR
}
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
- struct ath_desc *ds = bf->bf_desc;
u32 phyerr;
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ if (rs->rs_status & ATH9K_RXERR_CRC)
RX_STAT_INC(crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ if (rs->rs_status & ATH9K_RXERR_DECRYPT)
RX_STAT_INC(decrypt_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ if (rs->rs_status & ATH9K_RXERR_MIC)
RX_STAT_INC(mic_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
RX_STAT_INC(pre_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
RX_STAT_INC(post_delim_crc_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
RX_STAT_INC(decrypt_busy_err);
- if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ if (rs->rs_status & ATH9K_RXERR_PHY) {
RX_STAT_INC(phy_err);
- phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ phyerr = rs->rs_phyerr & 0x24;
RX_PHY_ERR_INC(phyerr);
}
@@ -700,6 +795,86 @@ static const struct file_operations fops_recv = {
.owner = THIS_MODULE
};
+static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long regidx;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &regidx))
+ return -EINVAL;
+
+ sc->debug.regidx = regidx;
+ return count;
+}
+
+static const struct file_operations fops_regidx = {
+ .read = read_file_regidx,
+ .write = write_file_regidx,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_regval(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ char buf[32];
+ unsigned int len;
+ u32 regval;
+
+ regval = REG_READ_D(ah, sc->debug.regidx);
+ len = snprintf(buf, sizeof(buf), "0x%08x\n", regval);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ unsigned long regval;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EINVAL;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &regval))
+ return -EINVAL;
+
+ REG_WRITE_D(ah, sc->debug.regidx, regval);
+ return count;
+}
+
+static const struct file_operations fops_regval = {
+ .read = read_file_regval,
+ .write = write_file_regval,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -711,54 +886,55 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
ath9k_debugfs_root);
if (!sc->debug.debugfs_phy)
- goto err;
+ return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG
- sc->debug.debugfs_debug = debugfs_create_file("debug",
- S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug);
- if (!sc->debug.debugfs_debug)
+ if (!debugfs_create_file("debug", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_debug))
goto err;
#endif
- sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUSR,
- sc->debug.debugfs_phy, sc, &fops_dma);
- if (!sc->debug.debugfs_dma)
+ if (!debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_dma))
+ goto err;
+
+ if (!debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_interrupt))
+ goto err;
+
+ if (!debugfs_create_file("rcstat", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_rcstat))
+ goto err;
+
+ if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_wiphy))
+ goto err;
+
+ if (!debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_xmit))
goto err;
- sc->debug.debugfs_interrupt = debugfs_create_file("interrupt",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_interrupt);
- if (!sc->debug.debugfs_interrupt)
+ if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_recv))
goto err;
- sc->debug.debugfs_rcstat = debugfs_create_file("rcstat",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_rcstat);
- if (!sc->debug.debugfs_rcstat)
+ if (!debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_rx_chainmask))
goto err;
- sc->debug.debugfs_wiphy = debugfs_create_file(
- "wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc,
- &fops_wiphy);
- if (!sc->debug.debugfs_wiphy)
+ if (!debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_tx_chainmask))
goto err;
- sc->debug.debugfs_xmit = debugfs_create_file("xmit",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_xmit);
- if (!sc->debug.debugfs_xmit)
+ if (!debugfs_create_file("regidx", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_regidx))
goto err;
- sc->debug.debugfs_recv = debugfs_create_file("recv",
- S_IRUSR,
- sc->debug.debugfs_phy,
- sc, &fops_recv);
- if (!sc->debug.debugfs_recv)
+ if (!debugfs_create_file("regval", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_regval))
goto err;
+ sc->debug.regidx = 0;
return 0;
err:
ath9k_exit_debug(ah);
@@ -770,14 +946,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
- debugfs_remove(sc->debug.debugfs_recv);
- debugfs_remove(sc->debug.debugfs_xmit);
- debugfs_remove(sc->debug.debugfs_wiphy);
- debugfs_remove(sc->debug.debugfs_rcstat);
- debugfs_remove(sc->debug.debugfs_interrupt);
- debugfs_remove(sc->debug.debugfs_dma);
- debugfs_remove(sc->debug.debugfs_debug);
- debugfs_remove(sc->debug.debugfs_phy);
+ debugfs_remove_recursive(sc->debug.debugfs_phy);
}
int ath9k_debug_create_root(void)
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 86780e68b31e..5147b8709e10 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -35,6 +35,8 @@ struct ath_buf;
* struct ath_interrupt_stats - Contains statistics about interrupts
* @total: Total no. of interrupts generated so far
* @rxok: RX with no errors
+ * @rxlp: RX with low priority RX
+ * @rxhp: RX with high priority, uapsd only
* @rxeol: RX with no more RXDESC available
* @rxorn: RX FIFO overrun
* @txok: TX completed at the requested rate
@@ -55,6 +57,8 @@ struct ath_buf;
struct ath_interrupt_stats {
u32 total;
u32 rxok;
+ u32 rxlp;
+ u32 rxhp;
u32 rxeol;
u32 rxorn;
u32 txok;
@@ -149,13 +153,7 @@ struct ath_stats {
struct ath9k_debug {
struct dentry *debugfs_phy;
- struct dentry *debugfs_debug;
- struct dentry *debugfs_dma;
- struct dentry *debugfs_interrupt;
- struct dentry *debugfs_rcstat;
- struct dentry *debugfs_wiphy;
- struct dentry *debugfs_xmit;
- struct dentry *debugfs_recv;
+ u32 regidx;
struct ath_stats stats;
};
@@ -167,8 +165,8 @@ void ath9k_debug_remove_root(void);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf);
-void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
+ struct ath_buf *bf, struct ath_tx_status *ts);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -204,12 +202,13 @@ static inline void ath_debug_stat_rc(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_txq *txq,
- struct ath_buf *bf)
+ struct ath_buf *bf,
+ struct ath_tx_status *ts)
{
}
static inline void ath_debug_stat_rx(struct ath_softc *sc,
- struct ath_buf *bf)
+ struct ath_rx_status *rs)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index dacaae934148..ca8704a9d7ac 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -36,8 +36,6 @@ void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
if (ah->config.analog_shiftreg)
udelay(100);
-
- return;
}
int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
@@ -256,14 +254,13 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
int status;
- if (AR_SREV_9287(ah)) {
- ah->eep_map = EEP_MAP_AR9287;
- ah->eep_ops = &eep_AR9287_ops;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->eep_ops = &eep_ar9300_ops;
+ else if (AR_SREV_9287(ah)) {
+ ah->eep_ops = &eep_ar9287_ops;
} else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
- ah->eep_map = EEP_MAP_4KBITS;
ah->eep_ops = &eep_4k_ops;
} else {
- ah->eep_map = EEP_MAP_DEFAULT;
ah->eep_ops = &eep_def_ops;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 2f2993b50e2f..21354c15a9a9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -19,6 +19,7 @@
#include "../ath.h"
#include <net/cfg80211.h>
+#include "ar9003_eeprom.h"
#define AH_USE_EEPROM 0x1
@@ -93,7 +94,6 @@
*/
#define AR9285_RDEXT_DEFAULT 0x1F
-#define AR_EEPROM_MAC(i) (0x1d+(i))
#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
@@ -155,6 +155,7 @@
#define AR5416_BCHAN_UNUSED 0xFF
#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
#define AR5416_MAX_CHAINS 3
+#define AR9300_MAX_CHAINS 3
#define AR5416_PWR_TABLE_OFFSET_DB -5
/* Rx gain type values */
@@ -249,16 +250,20 @@ enum eeprom_param {
EEP_MINOR_REV,
EEP_TX_MASK,
EEP_RX_MASK,
+ EEP_FSTCLK_5G,
EEP_RXGAIN_TYPE,
- EEP_TXGAIN_TYPE,
EEP_OL_PWRCTRL,
+ EEP_TXGAIN_TYPE,
EEP_RC_CHAIN_MASK,
EEP_DAC_HPWR_5G,
EEP_FRAC_N_5G,
EEP_DEV_TYPE,
EEP_TEMPSENSE_SLOPE,
EEP_TEMPSENSE_SLOPE_PAL_ON,
- EEP_PWR_TABLE_OFFSET
+ EEP_PWR_TABLE_OFFSET,
+ EEP_DRIVE_STRENGTH,
+ EEP_INTERNAL_REGULATOR,
+ EEP_SWREG
};
enum ar5416_rates {
@@ -295,7 +300,8 @@ struct base_eep_header {
u32 binBuildNumber;
u8 deviceType;
u8 pwdclkind;
- u8 futureBase_1[2];
+ u8 fastClk5g;
+ u8 divChain;
u8 rxGainType;
u8 dacHiPwrMode_5G;
u8 openLoopPwrCntl;
@@ -656,13 +662,6 @@ struct ath9k_country_entry {
u8 iso[3];
};
-enum ath9k_eep_map {
- EEP_MAP_DEFAULT = 0x0,
- EEP_MAP_4KBITS,
- EEP_MAP_AR9287,
- EEP_MAP_MAX
-};
-
struct eeprom_ops {
int (*check_eeprom)(struct ath_hw *hw);
u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
@@ -713,6 +712,8 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah);
extern const struct eeprom_ops eep_def_ops;
extern const struct eeprom_ops eep_4k_ops;
-extern const struct eeprom_ops eep_AR9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9287_ops;
+extern const struct eeprom_ops eep_ar9300_ops;
#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 68db16690abf..41a77d1bd439 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
{
@@ -43,7 +44,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -182,11 +183,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -453,6 +454,8 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
&tMinCalPower, gainBoundaries,
pdadcValues, numXpdGain);
+ ENABLE_REGWRITE_BUFFER(ah);
+
if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
SM(pdGainOverlap_t2,
@@ -493,6 +496,9 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
regOffset += 4;
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
}
@@ -758,6 +764,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
/* OFDM power per rate */
REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -820,6 +828,9 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
| ATH9K_POW_SM(ratesArray[rateDupCck], 0));
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 839d05a1df29..b471db5fb82d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
{
@@ -44,7 +45,7 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
if (!ath9k_hw_nvram_read(common,
addr + eep_start_loc, eep_data)) {
ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region \n");
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -172,11 +173,11 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -1169,7 +1170,7 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
#undef EEP_MAP9287_SPURCHAN
}
-const struct eeprom_ops eep_AR9287_ops = {
+const struct eeprom_ops eep_ar9287_ops = {
.check_eeprom = ath9k_hw_AR9287_check_eeprom,
.get_eeprom = ath9k_hw_AR9287_get_eeprom,
.fill_eeprom = ath9k_hw_AR9287_fill_eeprom,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 404a0341242c..7e1ed78d0e64 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "ar9002_phy.h"
static void ath9k_get_txgain_index(struct ath_hw *ah,
struct ath9k_channel *chan,
@@ -49,7 +50,6 @@ static void ath9k_get_txgain_index(struct ath_hw *ah,
i++;
*pcdacIdx = i;
- return;
}
static void ath9k_olc_get_pdadcs(struct ath_hw *ah,
@@ -222,6 +222,12 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
return -EINVAL;
}
+ /* Enable fixup for AR_AN_TOP2 if necessary */
+ if (AR_SREV_9280_10_OR_LATER(ah) &&
+ (eep->baseEepHeader.version & 0xff) > 0x0a &&
+ eep->baseEepHeader.pwdclkind == 0)
+ ah->need_an_top2_fixup = 1;
+
return 0;
}
@@ -237,11 +243,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pModal[0].noiseFloorThreshCh[0];
case EEP_NFTHRESH_2:
return pModal[1].noiseFloorThreshCh[0];
- case AR_EEPROM_MAC(0):
+ case EEP_MAC_LSW:
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
- case AR_EEPROM_MAC(1):
+ case EEP_MAC_MID:
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
- case AR_EEPROM_MAC(2):
+ case EEP_MAC_MSW:
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
case EEP_REG_0:
return pBase->regDmn[0];
@@ -267,6 +273,8 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return pBase->txMask;
case EEP_RX_MASK:
return pBase->rxMask;
+ case EEP_FSTCLK_5G:
+ return pBase->fastClk5g;
case EEP_RXGAIN_TYPE:
return pBase->rxGainType;
case EEP_TXGAIN_TYPE:
@@ -742,8 +750,6 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
pPDADCValues[k] = pPDADCValues[k - 1];
k++;
}
-
- return;
}
static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index deab8beb0680..0ee75e79fe35 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -283,22 +283,17 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
u32 timer_next,
u32 timer_period)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
ath9k_hw_gen_timer_stop(ah, timer);
@@ -306,8 +301,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
/* if no timer is enabled, turn off interrupt mask */
if (timer_table->timer_mask.val == 0) {
ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
+ ah->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, ah->imask);
}
}
@@ -364,7 +359,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
+ "no stomp timer running\n");
spin_lock_bh(&btcoex->btcoex_lock);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
new file mode 100644
index 000000000000..77b359162d6c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -0,0 +1,1014 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define ATH9K_FW_USB_DEV(devid, fw) \
+ { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
+
+static struct usb_device_id ath9k_hif_usb_ids[] = {
+ ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
+ ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
+
+static int __hif_usb_tx(struct hif_device_usb *hif_dev);
+
+static void hif_usb_regout_cb(struct urb *urb)
+{
+ struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ break;
+ }
+
+ if (cmd) {
+ ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
+ cmd->skb, 1);
+ kfree(cmd);
+ }
+
+ return;
+free:
+ kfree_skb(cmd->skb);
+ kfree(cmd);
+}
+
+static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct urb *urb;
+ struct cmd_buf *cmd;
+ int ret = 0;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL)
+ return -ENOMEM;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ cmd->skb = skb;
+ cmd->hif_dev = hif_dev;
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+ skb->data, skb->len,
+ hif_usb_regout_cb, cmd, 1);
+
+ usb_anchor_urb(urb, &hif_dev->regout_submitted);
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree(cmd);
+ }
+ usb_free_urb(urb);
+
+ return ret;
+}
+
+static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ dev_kfree_skb_any(skb);
+ TX_STAT_INC(skb_dropped);
+ }
+}
+
+static void hif_usb_tx_cb(struct urb *urb)
+{
+ struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
+ struct hif_device_usb *hif_dev;
+ struct sk_buff *skb;
+
+ if (!tx_buf || !tx_buf->hif_dev)
+ return;
+
+ hif_dev = tx_buf->hif_dev;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ /*
+ * The URB has been killed, free the SKBs
+ * and return.
+ */
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ return;
+ default:
+ break;
+ }
+
+ /* Check if TX has been stopped */
+ spin_lock(&hif_dev->tx.tx_lock);
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock(&hif_dev->tx.tx_lock);
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ goto add_free;
+ }
+ spin_unlock(&hif_dev->tx.tx_lock);
+
+ /* Complete the queued SKBs. */
+ while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
+ ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
+ skb, 1);
+ TX_STAT_INC(skb_completed);
+ }
+
+add_free:
+ /* Re-initialize the SKB queue */
+ tx_buf->len = tx_buf->offset = 0;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ /* Add this TX buffer to the free list */
+ spin_lock(&hif_dev->tx.tx_lock);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
+ __hif_usb_tx(hif_dev); /* Check for pending SKBs */
+ TX_STAT_INC(buf_completed);
+ spin_unlock(&hif_dev->tx.tx_lock);
+}
+
+/* TX lock has to be taken */
+static int __hif_usb_tx(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL;
+ struct sk_buff *nskb = NULL;
+ int ret = 0, i;
+ u16 *hdr, tx_skb_cnt = 0;
+ u8 *buf;
+
+ if (hif_dev->tx.tx_skb_cnt == 0)
+ return 0;
+
+ /* Check if a free TX buffer is available */
+ if (list_empty(&hif_dev->tx.tx_buf))
+ return 0;
+
+ tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
+ hif_dev->tx.tx_buf_cnt--;
+
+ tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
+
+ for (i = 0; i < tx_skb_cnt; i++) {
+ nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
+
+ /* Should never be NULL */
+ BUG_ON(!nskb);
+
+ hif_dev->tx.tx_skb_cnt--;
+
+ buf = tx_buf->buf;
+ buf += tx_buf->offset;
+ hdr = (u16 *)buf;
+ *hdr++ = nskb->len;
+ *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ buf += 4;
+ memcpy(buf, nskb->data, nskb->len);
+ tx_buf->len = nskb->len + 4;
+
+ if (i < (tx_skb_cnt - 1))
+ tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
+
+ if (i == (tx_skb_cnt - 1))
+ tx_buf->len += tx_buf->offset;
+
+ __skb_queue_tail(&tx_buf->skb_queue, nskb);
+ TX_STAT_INC(skb_queued);
+ }
+
+ usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
+ usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
+ tx_buf->buf, tx_buf->len,
+ hif_usb_tx_cb, tx_buf);
+
+ ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
+ if (ret) {
+ tx_buf->len = tx_buf->offset = 0;
+ ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
+ __skb_queue_head_init(&tx_buf->skb_queue);
+ list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ hif_dev->tx.tx_buf_cnt++;
+ }
+
+ if (!ret)
+ TX_STAT_INC(buf_queued);
+
+ return ret;
+}
+
+static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+
+ if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENODEV;
+ }
+
+ /* Check if the max queue count has been reached */
+ if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ return -ENOMEM;
+ }
+
+ __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
+ hif_dev->tx.tx_skb_cnt++;
+
+ /* Send normal frames immediately */
+ if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL)))
+ __hif_usb_tx(hif_dev);
+
+ /* Check if AMPDUs have to be sent immediately */
+ if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) &&
+ (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
+ (hif_dev->tx.tx_skb_cnt < 2)) {
+ __hif_usb_tx(hif_dev);
+ }
+
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ return 0;
+}
+
+static void hif_usb_start(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ hif_dev->flags |= HIF_USB_START;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static void hif_usb_stop(void *hif_handle, u8 pipe_id)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue);
+ hif_dev->tx.tx_skb_cnt = 0;
+ hif_dev->tx.flags |= HIF_USB_TX_STOP;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+}
+
+static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ int ret = 0;
+
+ switch (pipe_id) {
+ case USB_WLAN_TX_PIPE:
+ ret = hif_usb_send_tx(hif_dev, skb, tx_ctl);
+ break;
+ case USB_REG_OUT_PIPE:
+ ret = hif_usb_send_regout(hif_dev, skb);
+ break;
+ default:
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static struct ath9k_htc_hif hif_usb = {
+ .transport = ATH9K_HIF_USB,
+ .name = "ath9k_hif_usb",
+
+ .control_ul_pipe = USB_REG_OUT_PIPE,
+ .control_dl_pipe = USB_REG_IN_PIPE,
+
+ .start = hif_usb_start,
+ .stop = hif_usb_stop,
+ .send = hif_usb_send,
+};
+
+static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
+ int index = 0, i = 0, chk_idx, len = skb->len;
+ int rx_remain_len = 0, rx_pkt_len = 0;
+ u16 pkt_len, pkt_tag, pool_index = 0;
+ u8 *ptr;
+
+ spin_lock(&hif_dev->rx_lock);
+
+ rx_remain_len = hif_dev->rx_remain_len;
+ rx_pkt_len = hif_dev->rx_transfer_len;
+
+ if (rx_remain_len != 0) {
+ struct sk_buff *remain_skb = hif_dev->remain_skb;
+
+ if (remain_skb) {
+ ptr = (u8 *) remain_skb->data;
+
+ index = rx_remain_len;
+ rx_remain_len -= hif_dev->rx_pad_len;
+ ptr += rx_pkt_len;
+
+ memcpy(ptr, skb->data, rx_remain_len);
+
+ rx_pkt_len += rx_remain_len;
+ hif_dev->rx_remain_len = 0;
+ skb_put(remain_skb, rx_pkt_len);
+
+ skb_pool[pool_index++] = remain_skb;
+
+ } else {
+ index = rx_remain_len;
+ }
+ }
+
+ spin_unlock(&hif_dev->rx_lock);
+
+ while (index < len) {
+ ptr = (u8 *) skb->data;
+
+ pkt_len = ptr[index] + (ptr[index+1] << 8);
+ pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
+
+ if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) {
+ u16 pad_len;
+
+ pad_len = 4 - (pkt_len & 0x3);
+ if (pad_len == 4)
+ pad_len = 0;
+
+ chk_idx = index;
+ index = index + 4 + pkt_len + pad_len;
+
+ if (index > MAX_RX_BUF_SIZE) {
+ spin_lock(&hif_dev->rx_lock);
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
+ nskb = __dev_alloc_skb(pkt_len + 32,
+ GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ spin_unlock(&hif_dev->rx_lock);
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]),
+ hif_dev->rx_transfer_len);
+
+ /* Record the buffer pointer */
+ hif_dev->remain_skb = nskb;
+ spin_unlock(&hif_dev->rx_lock);
+ } else {
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation"
+ " error\n");
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
+ skb_put(nskb, pkt_len);
+ skb_pool[pool_index++] = nskb;
+ }
+ } else {
+ RX_STAT_INC(skb_dropped);
+ return;
+ }
+ }
+
+err:
+ for (i = 0; i < pool_index; i++) {
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
+ skb_pool[i]->len, USB_WLAN_RX_PIPE);
+ RX_STAT_INC(skb_completed);
+ }
+}
+
+static void ath9k_hif_usb_rx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+ ath9k_hif_usb_rx_stream(hif_dev, skb);
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto free;
+ }
+
+ return;
+free:
+ kfree_skb(skb);
+}
+
+static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct sk_buff *nskb;
+ struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+ goto free;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ goto resubmit;
+ }
+
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+
+ /* Process the command first */
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ skb->len, USB_REG_IN_PIPE);
+
+
+ nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: REG_IN memory allocation failure\n");
+ urb->context = NULL;
+ return;
+ }
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ nskb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, nskb, 1);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ kfree_skb(nskb);
+ urb->context = NULL;
+ }
+
+ return;
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ goto free;
+
+ return;
+free:
+ kfree_skb(skb);
+ urb->context = NULL;
+}
+
+static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+}
+
+static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct tx_buf *tx_buf;
+ int i;
+
+ INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
+ INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
+ spin_lock_init(&hif_dev->tx.tx_lock);
+ __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
+
+ for (i = 0; i < MAX_TX_URB_NUM; i++) {
+ tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
+ if (!tx_buf)
+ goto err;
+
+ tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
+ if (!tx_buf->buf)
+ goto err;
+
+ tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tx_buf->urb)
+ goto err;
+
+ tx_buf->hif_dev = hif_dev;
+ __skb_queue_head_init(&tx_buf->skb_queue);
+
+ list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
+ }
+
+ hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
+
+ return 0;
+err:
+ if (tx_buf) {
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ }
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->rx_submitted);
+}
+
+static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+{
+ struct urb *urb = NULL;
+ struct sk_buff *skb = NULL;
+ int i, ret;
+
+ init_usb_anchor(&hif_dev->rx_submitted);
+ spin_lock_init(&hif_dev->rx_lock);
+
+ for (i = 0; i < MAX_RX_URB_NUM; i++) {
+
+ /* Allocate URB */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+ ret = -ENOMEM;
+ goto err_urb;
+ }
+
+ /* Allocate buffer */
+ skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err_skb;
+ }
+
+ usb_fill_bulk_urb(urb, hif_dev->udev,
+ usb_rcvbulkpipe(hif_dev->udev,
+ USB_WLAN_RX_PIPE),
+ skb->data, MAX_RX_BUF_SIZE,
+ ath9k_hif_usb_rx_cb, skb);
+
+ /* Anchor URB */
+ usb_anchor_urb(urb, &hif_dev->rx_submitted);
+
+ /* Submit URB */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ goto err_submit;
+ }
+
+ /*
+ * Drop reference count.
+ * This ensures that the URB is freed when killing them.
+ */
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_submit:
+ kfree_skb(skb);
+err_skb:
+ usb_free_urb(urb);
+err_urb:
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+ return ret;
+}
+
+static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ if (hif_dev->reg_in_urb) {
+ usb_kill_urb(hif_dev->reg_in_urb);
+ if (hif_dev->reg_in_urb->context)
+ kfree_skb((void *)hif_dev->reg_in_urb->context);
+ usb_free_urb(hif_dev->reg_in_urb);
+ hif_dev->reg_in_urb = NULL;
+ }
+}
+
+static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
+{
+ struct sk_buff *skb;
+
+ hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (hif_dev->reg_in_urb == NULL)
+ return -ENOMEM;
+
+ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
+ if (!skb)
+ goto err;
+
+ usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
+ skb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, skb, 1);
+
+ if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ return -ENOMEM;
+}
+
+static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
+{
+ /* Register Write */
+ init_usb_anchor(&hif_dev->regout_submitted);
+
+ /* TX */
+ if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* RX */
+ if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
+ goto err;
+
+ /* Register Read */
+ if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
+ goto err;
+
+ return 0;
+err:
+ return -ENOMEM;
+}
+
+static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
+{
+ usb_kill_anchored_urbs(&hif_dev->regout_submitted);
+ ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
+ ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
+ ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
+}
+
+static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
+{
+ int transfer, err;
+ const void *data = hif_dev->firmware->data;
+ size_t len = hif_dev->firmware->size;
+ u32 addr = AR9271_FIRMWARE;
+ u8 *buf = kzalloc(4096, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (len) {
+ transfer = min_t(int, len, 4096);
+ memcpy(buf, data, transfer);
+
+ err = usb_control_msg(hif_dev->udev,
+ usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
+ addr >> 8, 0, buf, transfer, HZ);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ }
+
+ len -= transfer;
+ data += transfer;
+ addr += transfer;
+ }
+ kfree(buf);
+
+ /*
+ * Issue FW download complete command to firmware.
+ */
+ err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
+ FIRMWARE_DOWNLOAD_COMP,
+ 0x40 | USB_DIR_OUT,
+ AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
+ if (err)
+ return -EIO;
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
+ "ar9271.fw", (unsigned long) hif_dev->firmware->size);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
+ const char *fw_name)
+{
+ int ret;
+
+ /* Request firmware */
+ ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s not found\n", fw_name);
+ goto err_fw_req;
+ }
+
+ /* Alloc URBs */
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Unable to allocate URBs\n");
+ goto err_urb;
+ }
+
+ /* Download firmware */
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: Firmware - %s download failed\n", fw_name);
+ goto err_fw_download;
+ }
+
+ return 0;
+
+err_fw_download:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+err_urb:
+ release_firmware(hif_dev->firmware);
+err_fw_req:
+ hif_dev->firmware = NULL;
+ return ret;
+}
+
+static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
+{
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ if (hif_dev->firmware)
+ release_firmware(hif_dev->firmware);
+}
+
+static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev;
+ const char *fw_name = (const char *) id->driver_info;
+ int ret = 0;
+
+ hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
+ if (!hif_dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ usb_get_dev(udev);
+ hif_dev->udev = udev;
+ hif_dev->interface = interface;
+ hif_dev->device_id = id->idProduct;
+#ifdef CONFIG_PM
+ udev->reset_resume = 1;
+#endif
+ usb_set_intfdata(interface, hif_dev);
+
+ hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
+ &hif_dev->udev->dev);
+ if (hif_dev->htc_handle == NULL) {
+ ret = -ENOMEM;
+ goto err_htc_hw_alloc;
+ }
+
+ ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_hif_init_usb;
+ }
+
+ ret = ath9k_htc_hw_init(hif_dev->htc_handle,
+ &hif_dev->udev->dev, hif_dev->device_id);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_htc_hw_init;
+ }
+
+ dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
+
+ return 0;
+
+err_htc_hw_init:
+ ath9k_hif_usb_dev_deinit(hif_dev);
+err_hif_init_usb:
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+err_htc_hw_alloc:
+ usb_set_intfdata(interface, NULL);
+ kfree(hif_dev);
+ usb_put_dev(udev);
+err_alloc:
+ return ret;
+}
+
+static void ath9k_hif_usb_reboot(struct usb_device *udev)
+{
+ u32 reboot_cmd = 0xffffffff;
+ void *buf;
+ int ret;
+
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ memcpy(buf, &reboot_cmd, 4);
+
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
+ buf, 4, NULL, HZ);
+ if (ret)
+ dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
+
+ kfree(buf);
+}
+
+static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ if (hif_dev) {
+ ath9k_htc_hw_deinit(hif_dev->htc_handle,
+ (udev->state == USB_STATE_NOTATTACHED) ? true : false);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ usb_set_intfdata(interface, NULL);
+ }
+
+ if (hif_dev->flags & HIF_USB_START)
+ ath9k_hif_usb_reboot(udev);
+
+ kfree(hif_dev);
+ dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
+ usb_put_dev(udev);
+}
+
+#ifdef CONFIG_PM
+static int ath9k_hif_usb_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return 0;
+}
+
+static int ath9k_hif_usb_resume(struct usb_interface *interface)
+{
+ struct hif_device_usb *hif_dev =
+ (struct hif_device_usb *) usb_get_intfdata(interface);
+ int ret;
+
+ ret = ath9k_hif_usb_alloc_urbs(hif_dev);
+ if (ret)
+ return ret;
+
+ if (hif_dev->firmware) {
+ ret = ath9k_hif_usb_download_fw(hif_dev);
+ if (ret)
+ goto fail_resume;
+ } else {
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+ return -EIO;
+ }
+
+ mdelay(100);
+
+ ret = ath9k_htc_resume(hif_dev->htc_handle);
+
+ if (ret)
+ goto fail_resume;
+
+ return 0;
+
+fail_resume:
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
+
+ return ret;
+}
+#endif
+
+static struct usb_driver ath9k_hif_usb_driver = {
+ .name = "ath9k_hif_usb",
+ .probe = ath9k_hif_usb_probe,
+ .disconnect = ath9k_hif_usb_disconnect,
+#ifdef CONFIG_PM
+ .suspend = ath9k_hif_usb_suspend,
+ .resume = ath9k_hif_usb_resume,
+ .reset_resume = ath9k_hif_usb_resume,
+#endif
+ .id_table = ath9k_hif_usb_ids,
+ .soft_unbind = 1,
+};
+
+int ath9k_hif_usb_init(void)
+{
+ return usb_register(&ath9k_hif_usb_driver);
+}
+
+void ath9k_hif_usb_exit(void)
+{
+ usb_deregister(&ath9k_hif_usb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
new file mode 100644
index 000000000000..0aca49b6fcb6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_USB_H
+#define HTC_USB_H
+
+#define AR9271_FIRMWARE 0x501000
+#define AR9271_FIRMWARE_TEXT 0x903000
+
+#define FIRMWARE_DOWNLOAD 0x30
+#define FIRMWARE_DOWNLOAD_COMP 0x31
+
+#define ATH_USB_RX_STREAM_MODE_TAG 0x4e00
+#define ATH_USB_TX_STREAM_MODE_TAG 0x697e
+
+/* FIXME: Verify these numbers (with Windows) */
+#define MAX_TX_URB_NUM 8
+#define MAX_TX_BUF_NUM 1024
+#define MAX_TX_BUF_SIZE 32768
+#define MAX_TX_AGGR_NUM 20
+
+#define MAX_RX_URB_NUM 8
+#define MAX_RX_BUF_SIZE 16384
+#define MAX_PKT_NUM_IN_TRANSFER 10
+
+#define MAX_REG_OUT_URB_NUM 1
+#define MAX_REG_OUT_BUF_NUM 8
+
+#define MAX_REG_IN_BUF_SIZE 64
+
+/* USB Endpoint definition */
+#define USB_WLAN_TX_PIPE 1
+#define USB_WLAN_RX_PIPE 2
+#define USB_REG_IN_PIPE 3
+#define USB_REG_OUT_PIPE 4
+
+#define HIF_USB_MAX_RXPIPES 2
+#define HIF_USB_MAX_TXPIPES 4
+
+struct tx_buf {
+ u8 *buf;
+ u16 len;
+ u16 offset;
+ struct urb *urb;
+ struct sk_buff_head skb_queue;
+ struct hif_device_usb *hif_dev;
+ struct list_head list;
+};
+
+#define HIF_USB_TX_STOP BIT(0)
+
+struct hif_usb_tx {
+ u8 flags;
+ u8 tx_buf_cnt;
+ u16 tx_skb_cnt;
+ struct sk_buff_head tx_skb_queue;
+ struct list_head tx_buf;
+ struct list_head tx_pending;
+ spinlock_t tx_lock;
+};
+
+struct cmd_buf {
+ struct sk_buff *skb;
+ struct hif_device_usb *hif_dev;
+};
+
+#define HIF_USB_START BIT(0)
+
+struct hif_device_usb {
+ u16 device_id;
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ const struct firmware *firmware;
+ struct htc_target *htc_handle;
+ struct hif_usb_tx tx;
+ struct urb *reg_in_urb;
+ struct usb_anchor regout_submitted;
+ struct usb_anchor rx_submitted;
+ struct sk_buff *remain_skb;
+ int rx_remain_len;
+ int rx_pkt_len;
+ int rx_transfer_len;
+ int rx_pad_len;
+ spinlock_t rx_lock;
+ u8 flags; /* HIF_USB_* */
+};
+
+int ath9k_hif_usb_init(void);
+void ath9k_hif_usb_exit(void);
+
+#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
new file mode 100644
index 000000000000..c251603ab032
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "htc_hst.h"
+#include "hif_usb.h"
+#include "wmi.h"
+
+#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
+#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
+#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+
+#define ATH_DEFAULT_BMISS_LIMIT 10
+#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
+#define TSF_TO_TU(_h, _l) \
+ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
+
+extern struct ieee80211_ops ath9k_htc_ops;
+extern int htc_modparam_nohwcrypt;
+
+enum htc_phymode {
+ HTC_MODE_AUTO = 0,
+ HTC_MODE_11A = 1,
+ HTC_MODE_11B = 2,
+ HTC_MODE_11G = 3,
+ HTC_MODE_FH = 4,
+ HTC_MODE_TURBO_A = 5,
+ HTC_MODE_TURBO_G = 6,
+ HTC_MODE_11NA = 7,
+ HTC_MODE_11NG = 8
+};
+
+enum htc_opmode {
+ HTC_M_STA = 1,
+ HTC_M_IBSS = 0,
+ HTC_M_AHDEMO = 3,
+ HTC_M_HOSTAP = 6,
+ HTC_M_MONITOR = 8,
+ HTC_M_WDS = 2
+};
+
+#define ATH9K_HTC_HDRSPACE sizeof(struct htc_frame_hdr)
+#define ATH9K_HTC_AMPDU 1
+#define ATH9K_HTC_NORMAL 2
+
+#define ATH9K_HTC_TX_CTSONLY 0x1
+#define ATH9K_HTC_TX_RTSCTS 0x2
+#define ATH9K_HTC_TX_USE_MIN_RATE 0x100
+
+struct tx_frame_hdr {
+ u8 data_type;
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u32 flags; /* ATH9K_HTC_TX_* */
+ u8 key_type;
+ u8 keyix;
+ u8 reserved[26];
+} __packed;
+
+struct tx_mgmt_hdr {
+ u8 node_idx;
+ u8 vif_idx;
+ u8 tidno;
+ u8 flags;
+ u8 key_type;
+ u8 keyix;
+ u16 reserved;
+} __packed;
+
+struct tx_beacon_header {
+ u8 len_changed;
+ u8 vif_index;
+ u16 rev;
+} __packed;
+
+struct ath9k_htc_target_hw {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_cap_target {
+ u32 flags;
+ u32 flags_ext;
+ u32 ampdu_limit;
+ u8 ampdu_subframes;
+ u8 tx_chainmask;
+ u8 tx_chainmask_legacy;
+ u8 rtscts_ratecode;
+ u8 protmode;
+} __packed;
+
+struct ath9k_htc_target_vif {
+ u8 index;
+ u8 des_bssid[ETH_ALEN];
+ __be32 opmode;
+ u8 myaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u32 flags;
+ u32 flags_ext;
+ u16 ps_sta;
+ __be16 rtsthreshold;
+ u8 ath_cap;
+ u8 node;
+ s8 mcast_rate;
+} __packed;
+
+#define ATH_HTC_STA_AUTH 0x0001
+#define ATH_HTC_STA_QOS 0x0002
+#define ATH_HTC_STA_ERP 0x0004
+#define ATH_HTC_STA_HT 0x0008
+
+/* FIXME: UAPSD variables */
+struct ath9k_htc_target_sta {
+ u16 associd;
+ u16 txpower;
+ u32 ucastkey;
+ u8 macaddr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 sta_index;
+ u8 vif_index;
+ u8 vif_sta;
+ __be16 flags; /* ATH_HTC_STA_* */
+ u16 htcap;
+ u8 valid;
+ u16 capinfo;
+ struct ath9k_htc_target_hw *hw;
+ struct ath9k_htc_target_vif *vif;
+ u16 txseqmgmt;
+ u8 is_vif_sta;
+ u16 maxampdu;
+ u16 iv16;
+ u32 iv32;
+} __packed;
+
+struct ath9k_htc_target_aggr {
+ u8 sta_index;
+ u8 tidno;
+ u8 aggr_enable;
+ u8 padding;
+} __packed;
+
+#define ATH_HTC_RATE_MAX 30
+
+#define WLAN_RC_DS_FLAG 0x01
+#define WLAN_RC_40_FLAG 0x02
+#define WLAN_RC_SGI_FLAG 0x04
+#define WLAN_RC_HT_FLAG 0x08
+
+struct ath9k_htc_rateset {
+ u8 rs_nrates;
+ u8 rs_rates[ATH_HTC_RATE_MAX];
+};
+
+struct ath9k_htc_rate {
+ struct ath9k_htc_rateset legacy_rates;
+ struct ath9k_htc_rateset ht_rates;
+} __packed;
+
+struct ath9k_htc_target_rate {
+ u8 sta_index;
+ u8 isnew;
+ __be32 capflags;
+ struct ath9k_htc_rate rates;
+};
+
+struct ath9k_htc_target_stats {
+ __be32 tx_shortretry;
+ __be32 tx_longretry;
+ __be32 tx_xretries;
+ __be32 ht_txunaggr_xretry;
+ __be32 ht_tx_xretries;
+} __packed;
+
+struct ath9k_htc_vif {
+ u8 index;
+};
+
+#define ATH9K_HTC_MAX_STA 8
+#define ATH9K_HTC_MAX_TID 8
+
+enum tid_aggr_state {
+ AGGR_STOP = 0,
+ AGGR_PROGRESS,
+ AGGR_START,
+ AGGR_OPERATIONAL
+};
+
+struct ath9k_htc_sta {
+ u8 index;
+ enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+};
+
+struct ath9k_htc_aggr_work {
+ u16 tid;
+ u8 sta_addr[ETH_ALEN];
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ enum ieee80211_ampdu_mlme_action action;
+ struct mutex mutex;
+};
+
+#define ATH9K_HTC_RXBUF 256
+#define HTC_RX_FRAME_HEADER_SIZE 40
+
+struct ath9k_htc_rxbuf {
+ bool in_process;
+ struct sk_buff *skb;
+ struct ath_htc_rx_status rxstatus;
+ struct list_head list;
+};
+
+struct ath9k_htc_rx {
+ int last_rssi; /* FIXME: per-STA */
+ struct list_head rxbuf;
+ spinlock_t rxbuflock;
+};
+
+struct ath9k_htc_tx_ctl {
+ u8 type; /* ATH9K_HTC_* */
+};
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
+
+struct ath_tx_stats {
+ u32 buf_queued;
+ u32 buf_completed;
+ u32 skb_queued;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath_rx_stats {
+ u32 skb_allocated;
+ u32 skb_completed;
+ u32 skb_dropped;
+};
+
+struct ath9k_debug {
+ struct dentry *debugfs_phy;
+ struct dentry *debugfs_tgt_stats;
+ struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
+ struct ath_tx_stats tx_stats;
+ struct ath_rx_stats rx_stats;
+ u32 txrate;
+};
+
+#else
+
+#define TX_STAT_INC(c) do { } while (0)
+#define RX_STAT_INC(c) do { } while (0)
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#define ATH_LED_PIN_DEF 1
+#define ATH_LED_PIN_9287 8
+#define ATH_LED_PIN_9271 15
+#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
+#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
+
+enum ath_led_type {
+ ATH_LED_RADIO,
+ ATH_LED_ASSOC,
+ ATH_LED_TX,
+ ATH_LED_RX
+};
+
+struct ath_led {
+ struct ath9k_htc_priv *priv;
+ struct led_classdev led_cdev;
+ enum ath_led_type led_type;
+ struct delayed_work brightness_work;
+ char name[32];
+ bool registered;
+ int brightness;
+};
+
+struct htc_beacon_config {
+ u16 beacon_interval;
+ u16 listen_interval;
+ u16 dtim_period;
+ u16 bmiss_timeout;
+ u8 dtim_count;
+};
+
+#define OP_INVALID BIT(0)
+#define OP_SCANNING BIT(1)
+#define OP_FULL_RESET BIT(2)
+#define OP_LED_ASSOCIATED BIT(3)
+#define OP_LED_ON BIT(4)
+#define OP_PREAMBLE_SHORT BIT(5)
+#define OP_PROTECT_ENABLE BIT(6)
+#define OP_TXAGGR BIT(7)
+#define OP_ASSOCIATED BIT(8)
+#define OP_ENABLE_BEACON BIT(9)
+#define OP_LED_DEINIT BIT(10)
+#define OP_UNPLUGGED BIT(11)
+
+struct ath9k_htc_priv {
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ath_hw *ah;
+ struct htc_target *htc;
+ struct wmi *wmi;
+
+ enum htc_endpoint_id wmi_cmd_ep;
+ enum htc_endpoint_id beacon_ep;
+ enum htc_endpoint_id cab_ep;
+ enum htc_endpoint_id uapsd_ep;
+ enum htc_endpoint_id mgmt_ep;
+ enum htc_endpoint_id data_be_ep;
+ enum htc_endpoint_id data_bk_ep;
+ enum htc_endpoint_id data_vi_ep;
+ enum htc_endpoint_id data_vo_ep;
+
+ u16 op_flags;
+ u16 curtxpow;
+ u16 txpowlimit;
+ u16 nvifs;
+ u16 nstations;
+ u16 seq_no;
+ u32 bmiss_cnt;
+
+ spinlock_t beacon_lock;
+
+ bool tx_queues_stop;
+ spinlock_t tx_lock;
+
+ struct ieee80211_vif *vif;
+ struct htc_beacon_config cur_beacon_conf;
+ unsigned int rxfilter;
+ struct tasklet_struct wmi_tasklet;
+ struct tasklet_struct rx_tasklet;
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ath9k_htc_rx rx;
+ struct tasklet_struct tx_tasklet;
+ struct sk_buff_head tx_queue;
+ struct ath9k_htc_aggr_work aggr_work;
+ struct delayed_work ath9k_aggr_work;
+ struct delayed_work ath9k_ani_work;
+ struct work_struct ps_work;
+
+ struct mutex htc_pm_lock;
+ unsigned long ps_usecount;
+ bool ps_enabled;
+ bool ps_idle;
+
+ struct ath_led radio_led;
+ struct ath_led assoc_led;
+ struct ath_led tx_led;
+ struct ath_led rx_led;
+ struct delayed_work ath9k_led_blink_work;
+ int led_on_duration;
+ int led_off_duration;
+ int led_on_cnt;
+ int led_off_cnt;
+ int hwq_map[ATH9K_WME_AC_VO+1];
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+ struct ath9k_debug debug;
+#endif
+ struct ath9k_htc_target_rate tgt_rate;
+
+ struct mutex mutex;
+};
+
+static inline void ath_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif);
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
+
+void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id);
+void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
+ bool txok);
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok);
+
+void ath9k_htc_station_work(struct work_struct *work);
+void ath9k_htc_aggr_work(struct work_struct *work);
+void ath9k_ani_work(struct work_struct *work);;
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv);
+void ath9k_tx_tasklet(unsigned long data);
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype qtype);
+int get_hw_qnum(u16 queue, int *hwq_map);
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo);
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
+void ath9k_rx_tasklet(unsigned long data);
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
+void ath9k_ps_work(struct work_struct *work);
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
+void ath9k_init_leds(struct ath9k_htc_priv *priv);
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid);
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle);
+#endif
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+int ath9k_htc_debug_create_root(void);
+void ath9k_htc_debug_remove_root(void);
+int ath9k_htc_init_debug(struct ath_hw *ah);
+void ath9k_htc_exit_debug(struct ath_hw *ah);
+#else
+static inline int ath9k_htc_debug_create_root(void) { return 0; };
+static inline void ath9k_htc_debug_remove_root(void) {};
+static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
+static inline void ath9k_htc_exit_debug(struct ath_hw *ah) {};
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
new file mode 100644
index 000000000000..c10c7d002eb7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#define FUDGE 2
+
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_beacon_state bs;
+ enum ath9k_int imask = 0;
+ int dtimperiod, dtimcount, sleepduration;
+ int cfpperiod, cfpcount, bmiss_timeout;
+ u32 nexttbtt = 0, intval, tsftu;
+ __be32 htc_imask = 0;
+ u64 tsf;
+ int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&bs, 0, sizeof(bs));
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
+
+ /*
+ * Setup dtim and cfp parameters according to
+ * last beacon we received (which may be none).
+ */
+ dtimperiod = bss_conf->dtim_period;
+ if (dtimperiod <= 0) /* NB: 0 if not known */
+ dtimperiod = 1;
+ dtimcount = 1;
+ if (dtimcount >= dtimperiod) /* NB: sanity check */
+ dtimcount = 0;
+ cfpperiod = 1; /* NB: no PCF support yet */
+ cfpcount = 0;
+
+ sleepduration = intval;
+ if (sleepduration <= 0)
+ sleepduration = intval;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * TSF and calculate dtim+cfp state for the result.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
+
+ num_beacons = tsftu / intval + 1;
+ offset = tsftu % intval;
+ nexttbtt = tsftu - offset;
+ if (offset)
+ nexttbtt += intval;
+
+ /* DTIM Beacon every dtimperiod Beacon */
+ dtim_dec_count = num_beacons % dtimperiod;
+ /* CFP every cfpperiod DTIM Beacon */
+ cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
+ if (dtim_dec_count)
+ cfp_dec_count++;
+
+ dtimcount -= dtim_dec_count;
+ if (dtimcount < 0)
+ dtimcount += dtimperiod;
+
+ cfpcount -= cfp_dec_count;
+ if (cfpcount < 0)
+ cfpcount += cfpperiod;
+
+ bs.bs_intval = intval;
+ bs.bs_nexttbtt = nexttbtt;
+ bs.bs_dtimperiod = dtimperiod*intval;
+ bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
+ bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
+ bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
+ bs.bs_cfpmaxduration = 0;
+
+ /*
+ * Calculate the number of consecutive beacons to miss* before taking
+ * a BMISS interrupt. The configuration is specified in TU so we only
+ * need calculate based on the beacon interval. Note that we clamp the
+ * result to at most 15 beacons.
+ */
+ if (sleepduration > intval) {
+ bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
+ } else {
+ bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
+ if (bs.bs_bmissthreshold > 15)
+ bs.bs_bmissthreshold = 15;
+ else if (bs.bs_bmissthreshold <= 0)
+ bs.bs_bmissthreshold = 1;
+ }
+
+ /*
+ * Calculate sleep duration. The configuration is given in ms.
+ * We ensure a multiple of the beacon period is used. Also, if the sleep
+ * duration is greater than the DTIM period then it makes senses
+ * to make it a multiple of that.
+ *
+ * XXX fixed at 100ms
+ */
+
+ bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
+ if (bs.bs_sleepduration > bs.bs_dtimperiod)
+ bs.bs_sleepduration = bs.bs_dtimperiod;
+
+ /* TSF out of range threshold fixed at 1 second */
+ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+ ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_print(common, ATH_DBG_BEACON,
+ "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration,
+ bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+
+ /* Set the computed STA beacon timers */
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+ imask |= ATH9K_INT_BMISS;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ enum ath9k_int imask = 0;
+ u32 nexttbtt, intval;
+ __be32 htc_imask = 0;
+ int ret;
+ u8 cmd_rsp;
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ nexttbtt = intval;
+ intval |= ATH9K_BEACON_ENA;
+ if (priv->op_flags & OP_ENABLE_BEACON)
+ imask |= ATH9K_INT_SWBA;
+
+ ath_print(common, ATH_DBG_BEACON,
+ "IBSS Beacon config, intval: %d, imask: 0x%x\n",
+ bss_conf->beacon_interval, imask);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+ priv->bmiss_cnt = 0;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
+void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
+{
+ struct ath9k_htc_vif *avp = (void *)priv->vif->drv_priv;
+ struct tx_beacon_header beacon_hdr;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *beacon;
+ u8 *tx_fhdr;
+
+ memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ /* FIXME: Handle BMISS */
+ if (beacon_pending != 0) {
+ priv->bmiss_cnt++;
+ return;
+ }
+
+ spin_lock_bh(&priv->beacon_lock);
+
+ if (unlikely(priv->op_flags & OP_SCANNING)) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ /* Get a new beacon */
+ beacon = ieee80211_beacon_get(priv->hw, priv->vif);
+ if (!beacon) {
+ spin_unlock_bh(&priv->beacon_lock);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(beacon);
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *) beacon->data;
+ priv->seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+ }
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ beacon_hdr.vif_index = avp->index;
+ tx_fhdr = skb_push(beacon, sizeof(beacon_hdr));
+ memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
+
+ htc_send(priv->htc, beacon, priv->beacon_ep, &tx_ctl);
+
+ spin_unlock_bh(&priv->beacon_lock);
+}
+
+
+void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ cur_conf->beacon_interval = bss_conf->beacon_int;
+ if (cur_conf->beacon_interval == 0)
+ cur_conf->beacon_interval = 100;
+
+ cur_conf->dtim_period = bss_conf->dtim_period;
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ default:
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
+ return;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
new file mode 100644
index 000000000000..dc015077a8d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int htc_modparam_nohwcrypt;
+module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+static struct ieee80211_channel ath9k_2ghz_channels[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
+{
+ int time_left;
+
+ if (atomic_read(&priv->htc->tgt_ready) > 0) {
+ atomic_dec(&priv->htc->tgt_ready);
+ return 0;
+ }
+
+ /* Firmware can take up to 50ms to get ready, to be safe use 1 second */
+ time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ);
+ if (!time_left) {
+ dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n");
+ return -ETIMEDOUT;
+ }
+
+ atomic_dec(&priv->htc->tgt_ready);
+
+ return 0;
+}
+
+static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
+{
+ ath9k_htc_exit_debug(priv->ah);
+ ath9k_hw_deinit(priv->ah);
+ tasklet_kill(&priv->wmi_tasklet);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ kfree(priv->ah);
+ priv->ah = NULL;
+}
+
+static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
+{
+ struct ieee80211_hw *hw = priv->hw;
+
+ wiphy_rfkill_stop_polling(hw->wiphy);
+ ath9k_deinit_leds(priv);
+ ieee80211_unregister_hw(hw);
+ ath9k_rx_cleanup(priv);
+ ath9k_tx_cleanup(priv);
+ ath9k_deinit_priv(priv);
+}
+
+static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
+ u16 service_id,
+ void (*tx) (void *,
+ struct sk_buff *,
+ enum htc_endpoint_id,
+ bool txok),
+ enum htc_endpoint_id *ep_id)
+{
+ struct htc_service_connreq req;
+
+ memset(&req, 0, sizeof(struct htc_service_connreq));
+
+ req.service_id = service_id;
+ req.ep_callbacks.priv = priv;
+ req.ep_callbacks.rx = ath9k_htc_rxep;
+ req.ep_callbacks.tx = tx;
+
+ return htc_connect_service(priv->htc, &req, ep_id);
+}
+
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
+{
+ int ret;
+
+ /* WMI CMD*/
+ ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep);
+ if (ret)
+ goto err;
+
+ /* Beacon */
+ ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, ath9k_htc_beaconep,
+ &priv->beacon_ep);
+ if (ret)
+ goto err;
+
+ /* CAB */
+ ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep,
+ &priv->cab_ep);
+ if (ret)
+ goto err;
+
+
+ /* UAPSD */
+ ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep,
+ &priv->uapsd_ep);
+ if (ret)
+ goto err;
+
+ /* MGMT */
+ ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep,
+ &priv->mgmt_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BE */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep,
+ &priv->data_be_ep);
+ if (ret)
+ goto err;
+
+ /* DATA BK */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep,
+ &priv->data_bk_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VI */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep,
+ &priv->data_vi_ep);
+ if (ret)
+ goto err;
+
+ /* DATA VO */
+ ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep,
+ &priv->data_vo_ep);
+ if (ret)
+ goto err;
+
+ ret = htc_init(priv->htc);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n");
+ return ret;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ return ath_reg_notifier_apply(wiphy, request,
+ ath9k_hw_regulatory(priv->ah));
+}
+
+static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 val, reg = cpu_to_be32(reg_offset);
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *) &reg, sizeof(reg),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER READ FAILED: (0x%04x, %d)\n",
+ reg_offset, r);
+ return -EIO;
+ }
+
+ return be32_to_cpu(val);
+}
+
+static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 buf[2] = {
+ cpu_to_be32(reg_offset),
+ cpu_to_be32(val),
+ };
+ int r;
+
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &buf, sizeof(buf),
+ (u8 *) &val, sizeof(val),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ reg_offset, r);
+ }
+}
+
+static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ /* Store the register/value */
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].reg =
+ cpu_to_be32(reg_offset);
+ priv->wmi->multi_write[priv->wmi->multi_write_idx].val =
+ cpu_to_be32(val);
+
+ priv->wmi->multi_write_idx++;
+
+ /* If the buffer is full, send it out. */
+ if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (atomic_read(&priv->wmi->mwrite_cnt))
+ ath9k_regwrite_buffer(hw_priv, val, reg_offset);
+ else
+ ath9k_regwrite_single(hw_priv, val, reg_offset);
+}
+
+static void ath9k_enable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_inc(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_disable_regwrite_buffer(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ atomic_dec(&priv->wmi->mwrite_cnt);
+}
+
+static void ath9k_regwrite_flush(void *hw_priv)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ u32 rsp_status;
+ int r;
+
+ mutex_lock(&priv->wmi->multi_write_mutex);
+
+ if (priv->wmi->multi_write_idx) {
+ r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+ (u8 *) &priv->wmi->multi_write,
+ sizeof(struct register_write) * priv->wmi->multi_write_idx,
+ (u8 *) &rsp_status, sizeof(rsp_status),
+ 100);
+ if (unlikely(r)) {
+ ath_print(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
+ }
+ priv->wmi->multi_write_idx = 0;
+ }
+
+ mutex_unlock(&priv->wmi->multi_write_mutex);
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_regread,
+ .write = ath9k_regwrite,
+ .enable_write_buffer = ath9k_enable_regwrite_buffer,
+ .disable_write_buffer = ath9k_disable_regwrite_buffer,
+ .write_flush = ath9k_regwrite_flush,
+};
+
+static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT))
+ return false;
+
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
+ return true;
+}
+
+static const struct ath_bus_ops ath9k_usb_bus_ops = {
+ .ath_bus_type = ATH_USB,
+ .read_cachesize = ath_usb_read_cachesize,
+ .eeprom_read = ath_usb_eeprom_read,
+};
+
+static void setup_ht_cap(struct ath9k_htc_priv *priv,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ ht_info->mcs.rx_mask[0] = 0xff;
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_init_queues(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
+ priv->hwq_map[i] = -1;
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = priv->ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(priv->ah, (u16) i);
+
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+}
+
+static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
+{
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) {
+ priv->sbands[IEEE80211_BAND_2GHZ].channels =
+ ath9k_2ghz_channels;
+ priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_channels);
+ priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+}
+
+static void ath9k_init_misc(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ common->tx_chainmask = priv->ah->caps.tx_chainmask;
+ common->rx_chainmask = priv->ah->caps.rx_chainmask;
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ priv->op_flags |= OP_TXAGGR;
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+}
+
+static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, csz = 0;
+
+ priv->op_flags |= OP_INVALID;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = 0; /* FIXME */
+ priv->ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = &ath9k_usb_bus_ops;
+ common->ah = ah;
+ common->hw = priv->hw;
+ common->priv = priv;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&priv->wmi->wmi_lock);
+ spin_lock_init(&priv->beacon_lock);
+ spin_lock_init(&priv->tx_lock);
+ mutex_init(&priv->mutex);
+ mutex_init(&priv->aggr_work.mutex);
+ mutex_init(&priv->htc_pm_lock);
+ tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
+ INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
+ INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+ INIT_WORK(&priv->ps_work, ath9k_ps_work);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_htc_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(priv);
+ if (ret)
+ goto err_queues;
+
+ ath9k_init_crypto(priv);
+ ath9k_init_channels_rates(priv);
+ ath9k_init_misc(priv);
+
+ return 0;
+
+err_queues:
+ ath9k_htc_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+
+ kfree(ah);
+ priv->ah = NULL;
+
+ return ret;
+}
+
+static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->vif_data_size = sizeof(struct ath9k_htc_vif);
+ hw->sta_data_size = sizeof(struct ath9k_htc_sta);
+
+ /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */
+ hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
+ sizeof(struct htc_frame_hdr) + 4;
+
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->sbands[IEEE80211_BAND_2GHZ];
+
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
+ setup_ht_cap(priv,
+ &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_priv(priv, devid);
+ if (error != 0)
+ goto err_init;
+
+ ah = priv->ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(priv, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto err_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX */
+ error = ath9k_tx_init(priv);
+ if (error != 0)
+ goto err_tx;
+
+ /* Setup RX */
+ error = ath9k_rx_init(priv);
+ if (error != 0)
+ goto err_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto err_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto err_world;
+ }
+
+ ath9k_init_leds(priv);
+ ath9k_start_rfkill_poll(priv);
+
+ return 0;
+
+err_world:
+ ieee80211_unregister_hw(hw);
+err_register:
+ ath9k_rx_cleanup(priv);
+err_rx:
+ ath9k_tx_cleanup(priv);
+err_tx:
+ /* Nothing */
+err_regd:
+ ath9k_deinit_priv(priv);
+err_init:
+ return error;
+}
+
+int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
+ u16 devid)
+{
+ struct ieee80211_hw *hw;
+ struct ath9k_htc_priv *priv;
+ int ret;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->htc = htc_handle;
+ priv->dev = dev;
+ htc_handle->drv_priv = priv;
+ SET_IEEE80211_DEV(hw, priv->dev);
+
+ ret = ath9k_htc_wait_for_target(priv);
+ if (ret)
+ goto err_free;
+
+ priv->wmi = ath9k_init_wmi(priv);
+ if (!priv->wmi) {
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ ret = ath9k_init_htc_services(priv);
+ if (ret)
+ goto err_init;
+
+ /* The device may have been unplugged earlier. */
+ priv->op_flags &= ~OP_UNPLUGGED;
+
+ ret = ath9k_init_device(priv, devid);
+ if (ret)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ ath9k_deinit_wmi(priv);
+err_free:
+ ieee80211_free_hw(hw);
+ return ret;
+}
+
+void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+{
+ if (htc_handle->drv_priv) {
+
+ /* Check if the device has been yanked out. */
+ if (hotunplug)
+ htc_handle->drv_priv->op_flags |= OP_UNPLUGGED;
+
+ ath9k_deinit_device(htc_handle->drv_priv);
+ ath9k_deinit_wmi(htc_handle->drv_priv);
+ ieee80211_free_hw(htc_handle->drv_priv->hw);
+ }
+}
+
+#ifdef CONFIG_PM
+int ath9k_htc_resume(struct htc_target *htc_handle)
+{
+ int ret;
+
+ ret = ath9k_htc_wait_for_target(htc_handle->drv_priv);
+ if (ret)
+ return ret;
+
+ ret = ath9k_init_htc_services(htc_handle->drv_priv);
+ return ret;
+}
+#endif
+
+static int __init ath9k_htc_init(void)
+{
+ int error;
+
+ error = ath9k_htc_debug_create_root();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: Unable to create debugfs root: %d\n",
+ error);
+ goto err_dbg;
+ }
+
+ error = ath9k_hif_usb_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k_htc: No USB devices found,"
+ " driver not installed.\n");
+ error = -ENODEV;
+ goto err_usb;
+ }
+
+ return 0;
+
+err_usb:
+ ath9k_htc_debug_remove_root();
+err_dbg:
+ return error;
+}
+module_init(ath9k_htc_init);
+
+static void __exit ath9k_htc_exit(void)
+{
+ ath9k_hif_usb_exit();
+ ath9k_htc_debug_remove_root();
+ printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
+}
+module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
new file mode 100644
index 000000000000..9d371c18eb41
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -0,0 +1,1775 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+static struct dentry *ath9k_debugfs_root;
+#endif
+
+/*************/
+/* Utilities */
+/*************/
+
+static void ath_update_txpow(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ u32 txpow;
+
+ if (priv->curtxpow != priv->txpowlimit) {
+ ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
+ /* read back in case value is clamped */
+ ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
+ priv->curtxpow = txpow;
+ }
+}
+
+/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
+static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
+ struct ath9k_channel *ichan)
+{
+ enum htc_phymode mode;
+
+ mode = HTC_MODE_AUTO;
+
+ switch (ichan->chanmode) {
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ mode = HTC_MODE_11NG;
+ break;
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ mode = HTC_MODE_11NA;
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
+
+static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
+{
+ bool ret;
+
+ mutex_lock(&priv->htc_pm_lock);
+ ret = ath9k_hw_setpower(priv->ah, mode);
+ mutex_unlock(&priv->htc_pm_lock);
+
+ return ret;
+}
+
+void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (++priv->ps_usecount != 1)
+ goto unlock;
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
+{
+ mutex_lock(&priv->htc_pm_lock);
+ if (--priv->ps_usecount != 0)
+ goto unlock;
+
+ if (priv->ps_idle)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
+ else if (priv->ps_enabled)
+ ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
+
+unlock:
+ mutex_unlock(&priv->htc_pm_lock);
+}
+
+void ath9k_ps_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+
+ /* The chip wakes up after receiving the first beacon
+ while network sleep is enabled. For the driver to
+ be in sync with the hw, set the chip to awake and
+ only then set it to sleep.
+ */
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+}
+
+static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
+ struct ieee80211_hw *hw,
+ struct ath9k_channel *hchan)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ bool fastcc = true;
+ struct ieee80211_channel *channel = hw->conf.channel;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+ int ret;
+
+ if (priv->op_flags & OP_INVALID)
+ return -EIO;
+
+ if (priv->op_flags & OP_FULL_RESET)
+ fastcc = false;
+
+ /* Fiddle around with fastcc later on, for now just use full reset */
+ fastcc = false;
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n",
+ priv->ah->curchan->channel,
+ channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
+
+ ret = ath9k_hw_reset(ah, hchan, fastcc);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset channel (%u Mhz) "
+ "reset status %d\n", channel->center_freq, ret);
+ goto err;
+ }
+
+ ath_update_txpow(priv);
+
+ WMI_CMD(WMI_START_RECV_CMDID);
+ if (ret)
+ goto err;
+
+ ath9k_host_rx_init(priv);
+
+ mode = ath9k_htc_get_curmode(priv, hchan);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ if (ret)
+ goto err;
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+ if (ret)
+ goto err;
+
+ htc_start(priv->htc);
+
+ priv->op_flags &= ~OP_FULL_RESET;
+err:
+ ath9k_htc_ps_restore(priv);
+ return ret;
+}
+
+static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->nvifs > 0)
+ return -ENOBUFS;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+
+ hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
+ priv->ah->opmode = NL80211_IFTYPE_MONITOR;
+ hvif.index = priv->nvifs;
+
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ return ret;
+
+ priv->nvifs++;
+ return 0;
+}
+
+static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+ hvif.index = 0; /* Should do for now */
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ return ret;
+}
+
+static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_sta tsta;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp;
+
+ if (priv->nstations >= ATH9K_HTC_MAX_STA)
+ return -ENOBUFS;
+
+ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
+ memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
+ tsta.associd = common->curaid;
+ tsta.is_vif_sta = 0;
+ tsta.valid = true;
+ ista->index = priv->nstations;
+ } else {
+ memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
+ tsta.is_vif_sta = 1;
+ }
+
+ tsta.sta_index = priv->nstations;
+ tsta.vif_index = avp->index;
+ tsta.maxampdu = 0xffff;
+ if (sta && sta->ht_cap.ht_supported)
+ tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
+
+ WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to add station entry for: %pM\n", sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Added a station entry for: %pM (idx: %d)\n",
+ sta->addr, tsta.sta_index);
+
+ priv->nstations++;
+ return 0;
+}
+
+static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista;
+ int ret;
+ u8 cmd_rsp, sta_idx;
+
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
+ if (ret) {
+ if (sta)
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove station entry for: %pM\n",
+ sta->addr);
+ return ret;
+ }
+
+ if (sta)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Removed a station entry for: %pM (idx: %d)\n",
+ sta->addr, sta_idx);
+
+ priv->nstations--;
+ return 0;
+}
+
+static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_cap_target tcap;
+ int ret;
+ u8 cmd_rsp;
+
+ memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
+
+ /* FIXME: Values are hardcoded */
+ tcap.flags = 0x240c40;
+ tcap.flags_ext = 0x80601000;
+ tcap.ampdu_limit = 0xffff0000;
+ tcap.ampdu_subframes = 20;
+ tcap.tx_chainmask_legacy = 1;
+ tcap.protmode = 1;
+ tcap.tx_chainmask = 1;
+
+ WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
+
+ return ret;
+}
+
+static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ struct ieee80211_supported_band *sband;
+ struct ath9k_htc_target_rate trate;
+ u32 caps = 0;
+ u8 cmd_rsp;
+ int i, j, ret;
+
+ memset(&trate, 0, sizeof(trate));
+
+ /* Only 2GHz is supported */
+ sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ for (i = 0, j = 0; i < sband->n_bitrates; i++) {
+ if (sta->supp_rates[sband->band] & BIT(i)) {
+ priv->tgt_rate.rates.legacy_rates.rs_rates[j]
+ = (sband->bitrates[i].bitrate * 2) / 10;
+ j++;
+ }
+ }
+ priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
+
+ if (sta->ht_cap.ht_supported) {
+ for (i = 0, j = 0; i < 77; i++) {
+ if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
+ priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
+ if (j == ATH_HTC_RATE_MAX)
+ break;
+ }
+ priv->tgt_rate.rates.ht_rates.rs_nrates = j;
+
+ caps = WLAN_RC_HT_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ caps |= WLAN_RC_40_FLAG;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ caps |= WLAN_RC_SGI_FLAG;
+
+ }
+
+ priv->tgt_rate.sta_index = ista->index;
+ priv->tgt_rate.isnew = 1;
+ trate = priv->tgt_rate;
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize Rate information on target\n");
+ return ret;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
+ return 0;
+}
+
+static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+
+ if (!conf_is_ht(conf))
+ return false;
+
+ if (!(priv->op_flags & OP_ASSOCIATED) ||
+ (priv->op_flags & OP_SCANNING))
+ return false;
+
+ if (conf_is_ht40(conf)) {
+ if (priv->ah->curchan->chanmode &
+ (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
+ return false;
+ } else {
+ *cw40 = true;
+ return true;
+ }
+ } else { /* ht20 */
+ if (priv->ah->curchan->chanmode & CHANNEL_HT20)
+ return false;
+ else
+ return true;
+ }
+}
+
+static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
+{
+ struct ath9k_htc_target_rate trate;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret;
+ u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
+ u8 cmd_rsp;
+
+ memset(&trate, 0, sizeof(trate));
+
+ trate = priv->tgt_rate;
+
+ if (is_cw40)
+ caps |= WLAN_RC_40_FLAG;
+ else
+ caps &= ~WLAN_RC_40_FLAG;
+
+ priv->tgt_rate.capflags = cpu_to_be32(caps);
+ trate.capflags = cpu_to_be32(caps);
+
+ WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to update Rate information on target\n");
+ return;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
+ "caps:0x%x on target\n", priv->tgt_rate.capflags);
+}
+
+static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif,
+ u8 *sta_addr, u8 tid, bool oper)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_aggr aggr;
+ struct ieee80211_sta *sta = NULL;
+ struct ath9k_htc_sta *ista;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (tid >= ATH9K_HTC_MAX_TID)
+ return -EINVAL;
+
+ memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
+
+ rcu_read_lock();
+
+ /* Check if we are able to retrieve the station */
+ sta = ieee80211_find_sta(vif, sta_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+
+ if (oper)
+ ista->tid_state[tid] = AGGR_START;
+ else
+ ista->tid_state[tid] = AGGR_STOP;
+
+ aggr.sta_index = ista->index;
+
+ rcu_read_unlock();
+
+ aggr.tidno = tid;
+ aggr.aggr_enable = oper;
+
+ WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG,
+ "Unable to %s TX aggregation for (%pM, %d)\n",
+ (oper) ? "start" : "stop", sta->addr, tid);
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "%s aggregation for (%pM, %d)\n",
+ (oper) ? "Starting" : "Stopping", sta->addr, tid);
+
+ return ret;
+}
+
+void ath9k_htc_aggr_work(struct work_struct *work)
+{
+ int ret = 0;
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_aggr_work.work);
+ struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
+
+ mutex_lock(&wk->mutex);
+
+ switch (wk->action) {
+ case IEEE80211_AMPDU_TX_START:
+ ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, true);
+ if (!ret)
+ ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
+ wk->tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
+ wk->tid, false);
+ ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ mutex_unlock(&wk->mutex);
+}
+
+/*********/
+/* DEBUG */
+/*********/
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+
+static int ath9k_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ struct ath9k_htc_target_stats cmd_rsp;
+ char buf[512];
+ unsigned int len = 0;
+ int ret = 0;
+
+ memset(&cmd_rsp, 0, sizeof(cmd_rsp));
+
+ WMI_CMD(WMI_TGT_STATS_CMDID);
+ if (ret)
+ return -EINVAL;
+
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Short Retries",
+ be32_to_cpu(cmd_rsp.tx_shortretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Long Retries",
+ be32_to_cpu(cmd_rsp.tx_longretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries",
+ be32_to_cpu(cmd_rsp.tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Unaggr. Xretries",
+ be32_to_cpu(cmd_rsp.ht_txunaggr_xretry));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Xretries (HT)",
+ be32_to_cpu(cmd_rsp.ht_tx_xretries));
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%19s : %10u\n", "TX Rate", priv->debug.txrate);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers queued",
+ priv->debug.tx_stats.buf_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "Buffers completed",
+ priv->debug.tx_stats.buf_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs queued",
+ priv->debug.tx_stats.skb_queued);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.tx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs dropped",
+ priv->debug.tx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_xmit = {
+ .read = read_file_xmit,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath9k_htc_priv *priv =
+ (struct ath9k_htc_priv *) file->private_data;
+ char buf[512];
+ unsigned int len = 0;
+
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs allocated",
+ priv->debug.rx_stats.skb_allocated);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs completed",
+ priv->debug.rx_stats.skb_completed);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "%20s : %10u\n", "SKBs Dropped",
+ priv->debug.rx_stats.skb_dropped);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
+int ath9k_htc_init_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ priv->debug.debugfs_phy = debugfs_create_dir(wiphy_name(priv->hw->wiphy),
+ ath9k_debugfs_root);
+ if (!priv->debug.debugfs_phy)
+ goto err;
+
+ priv->debug.debugfs_tgt_stats = debugfs_create_file("tgt_stats", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_tgt_stats);
+ if (!priv->debug.debugfs_tgt_stats)
+ goto err;
+
+
+ priv->debug.debugfs_xmit = debugfs_create_file("xmit", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_xmit);
+ if (!priv->debug.debugfs_xmit)
+ goto err;
+
+ priv->debug.debugfs_recv = debugfs_create_file("recv", S_IRUSR,
+ priv->debug.debugfs_phy,
+ priv, &fops_recv);
+ if (!priv->debug.debugfs_recv)
+ goto err;
+
+ return 0;
+
+err:
+ ath9k_htc_exit_debug(ah);
+ return -ENOMEM;
+}
+
+void ath9k_htc_exit_debug(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+
+ debugfs_remove(priv->debug.debugfs_recv);
+ debugfs_remove(priv->debug.debugfs_xmit);
+ debugfs_remove(priv->debug.debugfs_tgt_stats);
+ debugfs_remove(priv->debug.debugfs_phy);
+}
+
+int ath9k_htc_debug_create_root(void)
+{
+ ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!ath9k_debugfs_root)
+ return -ENOENT;
+
+ return 0;
+}
+
+void ath9k_htc_debug_remove_root(void)
+{
+ debugfs_remove(ath9k_debugfs_root);
+ ath9k_debugfs_root = NULL;
+}
+
+#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
+
+/*******/
+/* ANI */
+/*******/
+
+static void ath_start_ani(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
+}
+
+void ath9k_ani_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv =
+ container_of(work, struct ath9k_htc_priv,
+ ath9k_ani_work.work);
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval;
+
+ short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
+ longcal = true;
+ ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >=
+ short_cal_interval) {
+ shortcal = true;
+ ath_print(common, ATH_DBG_ANI,
+ "shortcal @%lu\n", jiffies);
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Skip all processing if there's nothing to do. */
+ if (longcal || shortcal || aniflag) {
+
+ ath9k_htc_ps_wakeup(priv);
+
+ /* Call ANI routine if necessary */
+ if (aniflag)
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ common->rx_chainmask,
+ longcal);
+
+ if (longcal)
+ common->ani.noise_floor =
+ ath9k_hw_getchan_noise(ah, ah->curchan);
+
+ ath_print(common, ATH_DBG_ANI,
+ " calibrate chan %u/%x nf: %d\n",
+ ah->curchan->channel,
+ ah->curchan->channelFlags,
+ common->ani.noise_floor);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ }
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ if (priv->ah->config.enable_ani)
+ cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ msecs_to_jiffies(cal_interval));
+}
+
+/*******/
+/* LED */
+/*******/
+
+static void ath9k_led_blink_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ ath9k_led_blink_work.work);
+
+ if (!(priv->op_flags & OP_LED_ASSOCIATED))
+ return;
+
+ if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (priv->op_flags & OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work,
+ (priv->op_flags & OP_LED_ON) ?
+ msecs_to_jiffies(priv->led_off_duration) :
+ msecs_to_jiffies(priv->led_on_duration));
+
+ priv->led_on_duration = priv->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ priv->led_off_duration = priv->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ priv->led_on_cnt = priv->led_off_cnt = 0;
+
+ if (priv->op_flags & OP_LED_ON)
+ priv->op_flags &= ~OP_LED_ON;
+ else
+ priv->op_flags |= OP_LED_ON;
+}
+
+static void ath9k_led_brightness_work(struct work_struct *work)
+{
+ struct ath_led *led = container_of(work, struct ath_led,
+ brightness_work.work);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ switch (led->brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ priv->op_flags &= ~OP_LED_ON;
+ } else {
+ priv->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ priv->op_flags |= OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ priv->op_flags |= OP_LED_ON;
+ } else {
+ priv->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath9k_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ led->brightness = brightness;
+ if (!(priv->op_flags & OP_LED_DEINIT))
+ ieee80211_queue_delayed_work(priv->hw,
+ &led->brightness_work, 0);
+}
+
+static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->radio_led.brightness_work);
+ cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
+ cancel_delayed_work_sync(&priv->tx_led.brightness_work);
+ cancel_delayed_work_sync(&priv->rx_led.brightness_work);
+}
+
+static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->priv = priv;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath9k_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+
+ INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
+
+ return ret;
+}
+
+static void ath9k_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
+{
+ priv->op_flags |= OP_LED_DEINIT;
+ ath9k_unregister_led(&priv->assoc_led);
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ ath9k_unregister_led(&priv->tx_led);
+ ath9k_unregister_led(&priv->rx_led);
+ ath9k_unregister_led(&priv->radio_led);
+}
+
+void ath9k_init_leds(struct ath9k_htc_priv *priv)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9271(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9271;
+ else
+ priv->ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(priv->hw);
+ snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->radio_led, trigger);
+ priv->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(priv->hw);
+ snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
+ priv->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(priv->hw);
+ snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->tx_led, trigger);
+ priv->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(priv->hw);
+ snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->rx_led, trigger);
+ priv->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ priv->op_flags &= ~OP_LED_DEINIT;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_deinit_leds(priv);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
+{
+ return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
+ priv->ah->rfkill_polarity;
+}
+
+static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ bool blocked = !!ath_is_rfkill_set(priv);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
+{
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(priv->hw->wiphy);
+}
+
+/**********************/
+/* mac80211 Callbacks */
+/**********************/
+
+static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath9k_htc_priv *priv = hw->priv;
+ int padpos, padsize, ret;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* Add the padding after the header if this is not already done */
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize)
+ return -1;
+ skb_push(skb, padsize);
+ memmove(skb->data, skb->data + padsize, padpos);
+ }
+
+ ret = ath9k_htc_tx_start(priv, skb);
+ if (ret != 0) {
+ if (ret == -ENOMEM) {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Stopping TX queues\n");
+ ieee80211_stop_queues(hw);
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = true;
+ spin_unlock_bh(&priv->tx_lock);
+ } else {
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Tx failed");
+ }
+ goto fail_tx;
+ }
+
+ return 0;
+
+fail_tx:
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
+static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *init_channel;
+ int ret = 0;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
+
+ /* setup initial channel */
+ init_channel = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset SERDES registers */
+ ath9k_hw_configpcipowersave(ah, 0, 0);
+
+ ath9k_hw_htc_resetinit(ah);
+ ret = ath9k_hw_reset(ah, init_channel, false);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to reset hardware; reset status %d "
+ "(freq %u MHz)\n", ret, curchan->center_freq);
+ return ret;
+ }
+
+ ath_update_txpow(priv);
+
+ mode = ath9k_htc_get_curmode(priv, init_channel);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+ WMI_CMD(WMI_ATH_INIT_CMDID);
+ WMI_CMD(WMI_START_RECV_CMDID);
+
+ ath9k_host_rx_init(priv);
+
+ priv->op_flags &= ~OP_INVALID;
+ htc_start(priv->htc);
+
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (led) {
+ /* Enable LED */
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ ieee80211_wake_queues(hw);
+
+ return ret;
+}
+
+static int ath9k_htc_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+ ret = ath9k_htc_radio_enable(hw, false);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret = 0;
+ u8 cmd_rsp;
+
+ if (priv->op_flags & OP_INVALID) {
+ ath_print(common, ATH_DBG_ANY, "Device not present\n");
+ return;
+ }
+
+ if (led) {
+ /* Disable LED */
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ /* Cancel all the running timers/work .. */
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ cancel_delayed_work_sync(&priv->ath9k_aggr_work);
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_led_stop_brightness(priv);
+
+ ath9k_htc_ps_wakeup(priv);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+ ath9k_hw_phy_disable(ah);
+ ath9k_hw_disable(ah);
+ ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+
+ skb_queue_purge(&priv->tx_queue);
+
+ /* Remove monitor interface here */
+ if (ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (ath9k_htc_remove_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to remove monitor interface\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "Monitor interface removed\n");
+ }
+
+ priv->op_flags |= OP_INVALID;
+
+ ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
+}
+
+static void ath9k_htc_stop(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_radio_disable(hw, false);
+ mutex_unlock(&priv->mutex);
+}
+
+
+static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ mutex_lock(&priv->mutex);
+
+ /* Only one interface for now */
+ if (priv->nvifs > 0) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ ath9k_htc_ps_wakeup(priv);
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ hvif.opmode = cpu_to_be32(HTC_M_STA);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ hvif.opmode = cpu_to_be32(HTC_M_IBSS);
+ break;
+ default:
+ ath_print(common, ATH_DBG_FATAL,
+ "Interface type %d not yet supported\n", vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+ priv->ah->opmode = vif->type;
+
+ /* Index starts from zero on the target */
+ avp->index = hvif.index = priv->nvifs;
+ hvif.rtsthreshold = cpu_to_be16(2304);
+ WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
+ if (ret)
+ goto out;
+
+ priv->nvifs++;
+
+ /*
+ * We need a node in target to tx mgmt frames
+ * before association.
+ */
+ ret = ath9k_htc_add_station(priv, vif, NULL);
+ if (ret)
+ goto out;
+
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_print(common, ATH_DBG_CONFIG, "Failed to update"
+ " capability in target \n");
+
+ priv->vif = vif;
+out:
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
+ struct ath9k_htc_target_vif hvif;
+ int ret = 0;
+ u8 cmd_rsp;
+
+ ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
+
+ mutex_lock(&priv->mutex);
+
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
+ hvif.index = avp->index;
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+
+ ath9k_htc_remove_station(priv, vif, NULL);
+ priv->vif = NULL;
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_conf *conf = &hw->conf;
+
+ mutex_lock(&priv->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ bool enable_radio = false;
+ bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+
+ if (!idle && priv->ps_idle)
+ enable_radio = true;
+
+ priv->ps_idle = idle;
+
+ if (enable_radio) {
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ ath9k_htc_radio_enable(hw, true);
+ ath_print(common, ATH_DBG_CONFIG,
+ "not-idle: enabling radio\n");
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ int pos = curchan->hw_value;
+ bool is_cw40 = false;
+
+ ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ curchan->center_freq);
+
+ if (check_rc_update(hw, &is_cw40))
+ ath9k_htc_rc_update(priv, is_cw40);
+
+ ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
+
+ if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to set channel\n");
+ mutex_unlock(&priv->mutex);
+ return -EINVAL;
+ }
+
+ }
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS) {
+ ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
+ priv->ps_enabled = true;
+ } else {
+ priv->ps_enabled = false;
+ cancel_work_sync(&priv->ps_work);
+ ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ if (ath9k_htc_add_monitor_interface(priv))
+ ath_print(common, ATH_DBG_FATAL,
+ "Failed to set monitor mode\n");
+ else
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ }
+ }
+
+ if (priv->ps_idle) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "idle: disabling radio\n");
+ ath9k_htc_radio_disable(hw, true);
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+#define SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_FCSFAIL)
+
+static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u32 rfilt;
+
+ mutex_lock(&priv->mutex);
+
+ ath9k_htc_ps_wakeup(priv);
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+
+ priv->rxfilter = *total_flags;
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(priv->ah, rfilt);
+
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
+ "Set HW RX filter: 0x%x\n", rfilt);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ int ret;
+
+ mutex_lock(&priv->mutex);
+
+ switch (cmd) {
+ case STA_NOTIFY_ADD:
+ ret = ath9k_htc_add_station(priv, vif, sta);
+ if (!ret)
+ ath9k_htc_init_rate(priv, vif, sta);
+ break;
+ case STA_NOTIFY_REMOVE:
+ ath9k_htc_remove_station(priv, vif, sta);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_tx_queue_info qi;
+ int ret = 0, qnum;
+
+ if (queue >= WME_NUM_AC)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+
+ memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cwmin = params->cw_min;
+ qi.tqi_cwmax = params->cw_max;
+ qi.tqi_burstTime = params->txop;
+
+ qnum = get_hw_qnum(queue, priv->hwq_map);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "Configure tx [queue/hwq] [%d/%d], "
+ "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ ret = ath_htc_txq_update(priv, qnum, &qi);
+ if (ret)
+ ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static int ath9k_htc_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ int ret = 0;
+
+ if (htc_modparam_nohwcrypt)
+ return -ENOSPC;
+
+ mutex_lock(&priv->mutex);
+ ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath9k_htc_ps_wakeup(priv);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath9k_cmn_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->alg == ALG_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ ret = 0;
+ }
+ break;
+ case DISABLE_KEY:
+ ath9k_cmn_key_delete(common, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ common->curaid = bss_conf->assoc ?
+ bss_conf->aid : 0;
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ bss_conf->assoc);
+
+ if (bss_conf->assoc) {
+ priv->op_flags |= OP_ASSOCIATED;
+ ath_start_ani(priv);
+ } else {
+ priv->op_flags &= ~OP_ASSOCIATED;
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ /* Set BSSID */
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ ath9k_hw_write_associd(ah);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) ||
+ (changed & BSS_CHANGED_BEACON) ||
+ ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ bss_conf->enable_beacon)) {
+ priv->op_flags |= OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon) {
+ priv->op_flags &= ~OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ priv->op_flags |= OP_PREAMBLE_SHORT;
+ else
+ priv->op_flags &= ~OP_PREAMBLE_SHORT;
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot &&
+ hw->conf.channel->band != IEEE80211_BAND_5GHZ)
+ priv->op_flags |= OP_PROTECT_ENABLE;
+ else
+ priv->op_flags &= ~OP_PROTECT_ENABLE;
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ah->slottime = 9;
+ else
+ ah->slottime = 20;
+
+ ath9k_hw_init_global_settings(ah);
+ }
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ u64 tsf;
+
+ mutex_lock(&priv->mutex);
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ mutex_unlock(&priv->mutex);
+
+ return tsf;
+}
+
+static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ ath9k_hw_settsf64(priv->ah, tsf);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ ath9k_hw_reset_tsf(priv->ah);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta,
+ u16 tid, u16 *ssn)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_aggr_work *work = &priv->aggr_work;
+ struct ath9k_htc_sta *ista;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ if (!(priv->op_flags & OP_TXAGGR))
+ return -ENOTSUPP;
+ memcpy(work->sta_addr, sta->addr, ETH_ALEN);
+ work->hw = hw;
+ work->vif = vif;
+ work->action = action;
+ work->tid = tid;
+ ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ ista->tid_state[tid] = AGGR_OPERATIONAL;
+ break;
+ default:
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
+ "Unknown AMPDU action\n");
+ }
+
+ return 0;
+}
+
+static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ mutex_unlock(&priv->mutex);
+}
+
+static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ ath9k_htc_ps_wakeup(priv);
+ mutex_lock(&priv->mutex);
+ spin_lock_bh(&priv->beacon_lock);
+ priv->op_flags &= ~OP_SCANNING;
+ spin_unlock_bh(&priv->beacon_lock);
+ priv->op_flags |= OP_FULL_RESET;
+ if (priv->op_flags & OP_ASSOCIATED)
+ ath9k_htc_beacon_config(priv, priv->vif);
+ ath_start_ani(priv);
+ mutex_unlock(&priv->mutex);
+ ath9k_htc_ps_restore(priv);
+}
+
+static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+ priv->ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(priv->ah);
+ mutex_unlock(&priv->mutex);
+}
+
+struct ieee80211_ops ath9k_htc_ops = {
+ .tx = ath9k_htc_tx,
+ .start = ath9k_htc_start,
+ .stop = ath9k_htc_stop,
+ .add_interface = ath9k_htc_add_interface,
+ .remove_interface = ath9k_htc_remove_interface,
+ .config = ath9k_htc_config,
+ .configure_filter = ath9k_htc_configure_filter,
+ .sta_notify = ath9k_htc_sta_notify,
+ .conf_tx = ath9k_htc_conf_tx,
+ .bss_info_changed = ath9k_htc_bss_info_changed,
+ .set_key = ath9k_htc_set_key,
+ .get_tsf = ath9k_htc_get_tsf,
+ .set_tsf = ath9k_htc_set_tsf,
+ .reset_tsf = ath9k_htc_reset_tsf,
+ .ampdu_action = ath9k_htc_ampdu_action,
+ .sw_scan_start = ath9k_htc_sw_scan_start,
+ .sw_scan_complete = ath9k_htc_sw_scan_complete,
+ .set_rts_threshold = ath9k_htc_set_rts_threshold,
+ .rfkill_poll = ath9k_htc_rfkill_poll_state,
+ .set_coverage_class = ath9k_htc_set_coverage_class,
+};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
new file mode 100644
index 000000000000..2571b443ac82
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+/******/
+/* TX */
+/******/
+
+int get_hw_qnum(u16 queue, int *hwq_map)
+{
+ switch (queue) {
+ case 0:
+ return hwq_map[ATH9K_WME_AC_VO];
+ case 1:
+ return hwq_map[ATH9K_WME_AC_VI];
+ case 2:
+ return hwq_map[ATH9K_WME_AC_BE];
+ case 3:
+ return hwq_map[ATH9K_WME_AC_BK];
+ default:
+ return hwq_map[ATH9K_WME_AC_BE];
+ }
+}
+
+int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
+ struct ath9k_tx_queue_info *qinfo)
+{
+ struct ath_hw *ah = priv->ah;
+ int error = 0;
+ struct ath9k_tx_queue_info qi;
+
+ ath9k_hw_get_txq_props(ah, qnum, &qi);
+
+ qi.tqi_aifs = qinfo->tqi_aifs;
+ qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
+ qi.tqi_cwmax = qinfo->tqi_cwmax;
+ qi.tqi_burstTime = qinfo->tqi_burstTime;
+ qi.tqi_readyTime = qinfo->tqi_readyTime;
+
+ if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Unable to update hardware queue %u!\n", qnum);
+ error = -EIO;
+ } else {
+ ath9k_hw_resettxqueue(ah, qnum);
+ }
+
+ return error;
+}
+
+int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp;
+ struct ath9k_htc_tx_ctl tx_ctl;
+ enum htc_endpoint_id epid;
+ u16 qnum, hw_qnum;
+ __le16 fc;
+ u8 *tx_fhdr;
+ u8 sta_idx;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+
+ avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
+ if (sta) {
+ ista = (struct ath9k_htc_sta *) sta->drv_priv;
+ sta_idx = ista->index;
+ } else {
+ sta_idx = 0;
+ }
+
+ memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
+
+ if (ieee80211_is_data(fc)) {
+ struct tx_frame_hdr tx_hdr;
+ u8 *qc;
+
+ memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
+
+ tx_hdr.node_idx = sta_idx;
+ tx_hdr.vif_idx = avp->index;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ tx_ctl.type = ATH9K_HTC_AMPDU;
+ tx_hdr.data_type = ATH9K_HTC_AMPDU;
+ } else {
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+ tx_hdr.data_type = ATH9K_HTC_NORMAL;
+ }
+
+ if (ieee80211_is_data(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ /* Check for RTS protection */
+ if (priv->hw->wiphy->rts_threshold != (u32) -1)
+ if (skb->len > priv->hw->wiphy->rts_threshold)
+ tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
+
+ /* CTS-to-self */
+ if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
+ (priv->op_flags & OP_PROTECT_ENABLE))
+ tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
+
+ tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(tx_hdr));
+ memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
+
+ qnum = skb_get_queue_mapping(skb);
+ hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
+
+ switch (hw_qnum) {
+ case 0:
+ epid = priv->data_be_ep;
+ break;
+ case 2:
+ epid = priv->data_vi_ep;
+ break;
+ case 3:
+ epid = priv->data_vo_ep;
+ break;
+ case 1:
+ default:
+ epid = priv->data_bk_ep;
+ break;
+ }
+ } else {
+ struct tx_mgmt_hdr mgmt_hdr;
+
+ memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
+
+ tx_ctl.type = ATH9K_HTC_NORMAL;
+
+ mgmt_hdr.node_idx = sta_idx;
+ mgmt_hdr.vif_idx = avp->index;
+ mgmt_hdr.tidno = 0;
+ mgmt_hdr.flags = 0;
+
+ mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
+ if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
+ mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
+ else
+ mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
+
+ tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
+ memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
+ epid = priv->mgmt_ep;
+ }
+
+ return htc_send(priv->htc, skb, epid, &tx_ctl);
+}
+
+void ath9k_tx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb = NULL;
+ __le16 fc;
+
+ while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = hdr->frame_control;
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ memset(&tx_info->status, 0, sizeof(tx_info->status));
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (!sta) {
+ rcu_read_unlock();
+ ieee80211_tx_status(priv->hw, skb);
+ continue;
+ }
+
+ /* Check if we need to start aggregation */
+
+ if (sta && conf_is_ht(&priv->hw->conf) &&
+ (priv->op_flags & OP_TXAGGR)
+ && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc, tid;
+ struct ath9k_htc_sta *ista;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ ista = (struct ath9k_htc_sta *)sta->drv_priv;
+
+ if ((tid < ATH9K_HTC_MAX_TID) &&
+ ista->tid_state[tid] == AGGR_STOP) {
+ ieee80211_start_tx_ba_session(sta, tid);
+ ista->tid_state[tid] = AGGR_PROGRESS;
+ }
+ }
+ }
+
+ rcu_read_unlock();
+
+ /* Send status to mac80211 */
+ ieee80211_tx_status(priv->hw, skb);
+ }
+
+ /* Wake TX queues if needed */
+ spin_lock_bh(&priv->tx_lock);
+ if (priv->tx_queues_stop) {
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+ ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Waking up TX queues\n");
+ ieee80211_wake_queues(priv->hw);
+ return;
+ }
+ spin_unlock_bh(&priv->tx_lock);
+}
+
+void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id, bool txok)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return;
+
+ if (ep_id == priv->mgmt_ep) {
+ skb_pull(skb, sizeof(struct tx_mgmt_hdr));
+ } else if ((ep_id == priv->data_bk_ep) ||
+ (ep_id == priv->data_be_ep) ||
+ (ep_id == priv->data_vi_ep) ||
+ (ep_id == priv->data_vo_ep)) {
+ skb_pull(skb, sizeof(struct tx_frame_hdr));
+ } else {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unsupported TX EPID: %d\n", ep_id);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ if (txok)
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ skb_queue_tail(&priv->tx_queue, skb);
+ tasklet_schedule(&priv->tx_tasklet);
+}
+
+int ath9k_tx_init(struct ath9k_htc_priv *priv)
+{
+ skb_queue_head_init(&priv->tx_queue);
+ return 0;
+}
+
+void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
+{
+
+}
+
+bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
+ enum ath9k_tx_queue_subtype subtype)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_tx_queue_info qi;
+ int qnum;
+
+ memset(&qi, 0, sizeof(qi));
+
+ qi.tqi_subtype = subtype;
+ qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
+ qi.tqi_physCompBuf = 0;
+ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
+
+ qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
+ if (qnum == -1)
+ return false;
+
+ if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "qnum %u out of range, max %u!\n",
+ qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
+ ath9k_hw_releasetxqueue(ah, qnum);
+ return false;
+ }
+
+ priv->hwq_map[subtype] = qnum;
+ return true;
+}
+
+/******/
+/* RX */
+/******/
+
+/*
+ * Calculate the RX filter to be set in the HW.
+ */
+u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
+{
+#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
+
+ struct ath_hw *ah = priv->ah;
+ u32 rfilt;
+
+ rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
+ | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
+ | ATH9K_RX_FILTER_MCAST;
+
+ /* If not a STA, enable processing of Probe Requests */
+ if (ah->opmode != NL80211_IFTYPE_STATION)
+ rfilt |= ATH9K_RX_FILTER_PROBEREQ;
+
+ /*
+ * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
+ * mode interface or when in monitor mode. AP mode does not need this
+ * since it receives all in-BSS frames anyway.
+ */
+ if (((ah->opmode != NL80211_IFTYPE_AP) &&
+ (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
+ (ah->opmode == NL80211_IFTYPE_MONITOR))
+ rfilt |= ATH9K_RX_FILTER_PROM;
+
+ if (priv->rxfilter & FIF_CONTROL)
+ rfilt |= ATH9K_RX_FILTER_CONTROL;
+
+ if ((ah->opmode == NL80211_IFTYPE_STATION) &&
+ !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
+ rfilt |= ATH9K_RX_FILTER_MYBEACON;
+ else
+ rfilt |= ATH9K_RX_FILTER_BEACON;
+
+ if (conf_is_ht(&priv->hw->conf))
+ rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+
+ return rfilt;
+
+#undef RX_FILTER_PRESERVE
+}
+
+/*
+ * Recv initialization for opmode change.
+ */
+static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ u32 rfilt, mfilt[2];
+
+ /* configure rx filter */
+ rfilt = ath9k_htc_calcrxfilter(priv);
+ ath9k_hw_setrxfilter(ah, rfilt);
+
+ /* configure bssid mask */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ ath_hw_setbssidmask(common);
+
+ /* configure operational mode */
+ ath9k_hw_setopmode(ah);
+
+ /* Handle any link-level address change. */
+ ath9k_hw_setmac(ah, common->macaddr);
+
+ /* calculate and install multicast filter */
+ mfilt[0] = mfilt[1] = ~0;
+ ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
+}
+
+void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
+{
+ ath9k_hw_rxena(priv->ah);
+ ath9k_htc_opmode_init(priv);
+ ath9k_hw_startpcureceive(priv->ah);
+ priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
+}
+
+static void ath9k_process_rate(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *rxs,
+ u8 rx_rate, u8 rs_flags)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+
+ if (rx_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ if (rs_flags & ATH9K_RX_2040)
+ rxs->flag |= RX_FLAG_40MHZ;
+ if (rs_flags & ATH9K_RX_GI)
+ rxs->flag |= RX_FLAG_SHORT_GI;
+ rxs->rate_idx = rx_rate & 0x7f;
+ return;
+ }
+
+ band = hw->conf.channel->band;
+ sband = hw->wiphy->bands[band];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_rate) {
+ rxs->rate_idx = i;
+ return;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_rate) {
+ rxs->rate_idx = i;
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ return;
+ }
+ }
+
+}
+
+static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+ struct ath9k_htc_rxbuf *rxbuf,
+ struct ieee80211_rx_status *rx_status)
+
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_hw *hw = priv->hw;
+ struct sk_buff *skb = rxbuf->skb;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_htc_rx_status *rxstatus;
+ int hdrlen, padpos, padsize;
+ int last_rssi = ATH_RSSI_DUMMY_MARKER;
+ __le16 fc;
+
+ if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX frame, dropping\n");
+ goto rx_next;
+ }
+
+ rxstatus = (struct ath_htc_rx_status *)skb->data;
+
+ if (be16_to_cpu(rxstatus->rs_datalen) -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Corrupted RX data len, dropping "
+ "(dlen: %d, skblen: %d)\n",
+ rxstatus->rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ /* Get the RX status information */
+ memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+ padpos = ath9k_cmn_padpos(fc);
+
+ padsize = padpos & 3;
+ if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+ if (rxbuf->rxstatus.rs_status != 0) {
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
+ goto rx_next;
+
+ if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
+ /* FIXME */
+ } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
+ if (ieee80211_is_ctl(fc))
+ /*
+ * Sometimes, we get invalid
+ * MIC failures on valid control frames.
+ * Remove these mic errors.
+ */
+ rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
+ else
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ }
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_CRC))
+ goto rx_next;
+ } else {
+ if (rxbuf->rxstatus.rs_status &
+ ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
+ goto rx_next;
+ }
+ }
+ }
+
+ if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
+ u8 keyix;
+ keyix = rxbuf->rxstatus.rs_keyix;
+ if (keyix != ATH9K_RXKEYIX_INVALID) {
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc) &&
+ skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+ if (test_bit(keyix, common->keymap))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ }
+ }
+
+ ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
+ rxbuf->rxstatus.rs_flags);
+
+ if (priv->op_flags & OP_ASSOCIATED) {
+ if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+ !rxbuf->rxstatus.rs_moreaggr)
+ ATH_RSSI_LPF(priv->rx.last_rssi,
+ rxbuf->rxstatus.rs_rssi);
+
+ last_rssi = priv->rx.last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+ ATH_RSSI_EP_MULTIPLIER);
+
+ if (rxbuf->rxstatus.rs_rssi < 0)
+ rxbuf->rxstatus.rs_rssi = 0;
+
+ if (ieee80211_is_beacon(fc))
+ priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
+ }
+
+ rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
+ rx_status->band = hw->conf.channel->band;
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
+ rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->flag |= RX_FLAG_TSFT;
+
+ return true;
+
+rx_next:
+ return false;
+}
+
+/*
+ * FIXME: Handle FLUSH later on.
+ */
+void ath9k_rx_tasklet(unsigned long data)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ unsigned long flags;
+ struct ieee80211_hdr *hdr;
+
+ do {
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+
+ if (rxbuf == NULL) {
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ break;
+ }
+
+ if (!rxbuf->skb)
+ goto requeue;
+
+ if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
+ dev_kfree_skb_any(rxbuf->skb);
+ goto requeue;
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
+ sizeof(struct ieee80211_rx_status));
+ skb = rxbuf->skb;
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
+ ieee80211_queue_work(priv->hw, &priv->ps_work);
+
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+
+ ieee80211_rx(priv->hw, skb);
+
+ spin_lock_irqsave(&priv->rx.rxbuflock, flags);
+requeue:
+ rxbuf->in_process = false;
+ rxbuf->skb = NULL;
+ list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
+ rxbuf = NULL;
+ spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
+ } while (1);
+
+}
+
+void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id)
+{
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
+
+ spin_lock(&priv->rx.rxbuflock);
+ list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
+ if (!tmp_buf->in_process) {
+ rxbuf = tmp_buf;
+ break;
+ }
+ }
+ spin_unlock(&priv->rx.rxbuflock);
+
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_ANY,
+ "No free RX buffer\n");
+ goto err;
+ }
+
+ spin_lock(&priv->rx.rxbuflock);
+ rxbuf->skb = skb;
+ rxbuf->in_process = true;
+ spin_unlock(&priv->rx.rxbuflock);
+
+ tasklet_schedule(&priv->rx_tasklet);
+ return;
+err:
+ dev_kfree_skb_any(skb);
+}
+
+/* FIXME: Locking for cleanup/init */
+
+void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
+{
+ struct ath9k_htc_rxbuf *rxbuf, *tbuf;
+
+ list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
+ list_del(&rxbuf->list);
+ if (rxbuf->skb)
+ dev_kfree_skb_any(rxbuf->skb);
+ kfree(rxbuf);
+ }
+}
+
+int ath9k_rx_init(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_rxbuf *rxbuf;
+ int i = 0;
+
+ INIT_LIST_HEAD(&priv->rx.rxbuf);
+ spin_lock_init(&priv->rx.rxbuflock);
+
+ for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
+ rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to allocate RX buffers\n");
+ goto err;
+ }
+ list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
+ }
+
+ return 0;
+
+err:
+ ath9k_rx_cleanup(priv);
+ return -ENOMEM;
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
new file mode 100644
index 000000000000..064397fd738e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+
+static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
+ u16 len, u8 flags, u8 epid,
+ struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ struct htc_frame_hdr *hdr;
+ struct htc_endpoint *endpoint = &target->endpoint[epid];
+ int status;
+
+ hdr = (struct htc_frame_hdr *)
+ skb_push(skb, sizeof(struct htc_frame_hdr));
+ hdr->endpoint_id = epid;
+ hdr->flags = flags;
+ hdr->payload_len = cpu_to_be16(len);
+
+ status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb,
+ tx_ctl);
+ return status;
+}
+
+static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint)
+{
+ enum htc_endpoint_id avail_epid;
+
+ for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--)
+ if (endpoint[avail_epid].service_id == 0)
+ return &endpoint[avail_epid];
+ return NULL;
+}
+
+static u8 service_to_ulpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 4;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static u8 service_to_dlpipe(u16 service_id)
+{
+ switch (service_id) {
+ case WMI_CONTROL_SVC:
+ return 3;
+ case WMI_BEACON_SVC:
+ case WMI_CAB_SVC:
+ case WMI_UAPSD_SVC:
+ case WMI_MGMT_SVC:
+ case WMI_DATA_VO_SVC:
+ case WMI_DATA_VI_SVC:
+ case WMI_DATA_BE_SVC:
+ case WMI_DATA_BK_SVC:
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+static void htc_process_target_rdy(struct htc_target *target,
+ void *buf)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
+
+ target->credits = be16_to_cpu(htc_ready_msg->credits);
+ target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
+
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->service_id = HTC_CTRL_RSVD_SVC;
+ endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH;
+ atomic_inc(&target->tgt_ready);
+ complete(&target->target_wait);
+}
+
+static void htc_process_conn_rsp(struct htc_target *target,
+ struct htc_frame_hdr *htc_hdr)
+{
+ struct htc_conn_svc_rspmsg *svc_rspmsg;
+ struct htc_endpoint *endpoint, *tmp_endpoint = NULL;
+ u16 service_id;
+ u16 max_msglen;
+ enum htc_endpoint_id epid, tepid;
+
+ svc_rspmsg = (struct htc_conn_svc_rspmsg *)
+ ((void *) htc_hdr + sizeof(struct htc_frame_hdr));
+
+ if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
+ epid = svc_rspmsg->endpoint_id;
+ service_id = be16_to_cpu(svc_rspmsg->service_id);
+ max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
+ endpoint = &target->endpoint[epid];
+
+ for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) {
+ tmp_endpoint = &target->endpoint[tepid];
+ if (tmp_endpoint->service_id == service_id) {
+ tmp_endpoint->service_id = 0;
+ break;
+ }
+ }
+
+ if (tepid == ENDPOINT0)
+ return;
+
+ endpoint->service_id = service_id;
+ endpoint->max_txqdepth = tmp_endpoint->max_txqdepth;
+ endpoint->ep_callbacks = tmp_endpoint->ep_callbacks;
+ endpoint->ul_pipeid = tmp_endpoint->ul_pipeid;
+ endpoint->dl_pipeid = tmp_endpoint->dl_pipeid;
+ endpoint->max_msglen = max_msglen;
+ target->conn_rsp_epid = epid;
+ complete(&target->cmd_wait);
+ } else {
+ target->conn_rsp_epid = ENDPOINT_UNUSED;
+ }
+}
+
+static int htc_config_pipe_credits(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_config_pipe_msg *cp_msg;
+ int ret, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ cp_msg = (struct htc_config_pipe_msg *)
+ skb_put(skb, sizeof(struct htc_config_pipe_msg));
+
+ cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
+ cp_msg->pipe_id = USB_WLAN_TX_PIPE;
+ cp_msg->credits = 28;
+
+ target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC credit config timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int htc_setup_complete(struct htc_target *target)
+{
+ struct sk_buff *skb;
+ struct htc_comp_msg *comp_msg;
+ int ret = 0, time_left;
+
+ skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "failed to allocate send buffer\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ comp_msg = (struct htc_comp_msg *)
+ skb_put(skb, sizeof(struct htc_comp_msg));
+ comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
+
+ target->htc_flags |= HTC_OP_START_WAIT;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "HTC start timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+err:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+/* HTC APIs */
+
+int htc_init(struct htc_target *target)
+{
+ int ret;
+
+ ret = htc_config_pipe_credits(target);
+ if (ret)
+ return ret;
+
+ return htc_setup_complete(target);
+}
+
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_epid)
+{
+ struct sk_buff *skb;
+ struct htc_endpoint *endpoint;
+ struct htc_conn_svc_msg *conn_msg;
+ int ret, time_left;
+
+ /* Find an available endpoint */
+ endpoint = get_next_avail_ep(target->endpoint);
+ if (!endpoint) {
+ dev_err(target->dev, "Endpoint is not available for"
+ "service %d\n", service_connreq->service_id);
+ return -EINVAL;
+ }
+
+ endpoint->service_id = service_connreq->service_id;
+ endpoint->max_txqdepth = service_connreq->max_send_qdepth;
+ endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id);
+ endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
+ endpoint->ep_callbacks = service_connreq->ep_callbacks;
+
+ skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
+ sizeof(struct htc_frame_hdr), GFP_ATOMIC);
+ if (!skb) {
+ dev_err(target->dev, "Failed to allocate buf to send"
+ "service connect req\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, sizeof(struct htc_frame_hdr));
+
+ conn_msg = (struct htc_conn_svc_msg *)
+ skb_put(skb, sizeof(struct htc_conn_svc_msg));
+ conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
+ conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
+ conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
+ conn_msg->dl_pipeid = endpoint->dl_pipeid;
+ conn_msg->ul_pipeid = endpoint->ul_pipeid;
+
+ ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
+ if (ret)
+ goto err;
+
+ time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
+ if (!time_left) {
+ dev_err(target->dev, "Service connection timeout for: %d\n",
+ service_connreq->service_id);
+ return -ETIMEDOUT;
+ }
+
+ *conn_rsp_epid = target->conn_rsp_epid;
+ return 0;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
+{
+ return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl);
+}
+
+void htc_stop(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
+ }
+}
+
+void htc_start(struct htc_target *target)
+{
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+
+ for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
+ endpoint = &target->endpoint[epid];
+ if (endpoint->service_id != 0)
+ target->hif->start(target->hif_dev,
+ endpoint->ul_pipeid);
+ }
+}
+
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_frame_hdr *htc_hdr = NULL;
+
+ if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
+ goto ret;
+ }
+
+ if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
+ complete(&htc_handle->cmd_wait);
+ htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
+ goto ret;
+ }
+
+ if (skb) {
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ if (endpoint->ep_callbacks.tx) {
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
+ }
+ }
+
+ return;
+ret:
+ /* HTC-generated packets are freed here. */
+ if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+}
+
+/*
+ * HTC Messages are handled directly here and the obtained SKB
+ * is freed.
+ *
+ * Sevice messages (Data, WMI) passed to the corresponding
+ * endpoint RX handlers, which have to free the SKB.
+ */
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id)
+{
+ struct htc_frame_hdr *htc_hdr;
+ enum htc_endpoint_id epid;
+ struct htc_endpoint *endpoint;
+ __be16 *msg_id;
+
+ if (!htc_handle || !skb)
+ return;
+
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
+ epid = htc_hdr->endpoint_id;
+
+ if (epid >= ENDPOINT_MAX) {
+ if (pipe_id != USB_REG_IN_PIPE)
+ dev_kfree_skb_any(skb);
+ else
+ kfree_skb(skb);
+ return;
+ }
+
+ if (epid == ENDPOINT0) {
+
+ /* Handle trailer */
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ /* Move past the Watchdog pattern */
+ htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ }
+
+ /* Get the message ID */
+ msg_id = (__be16 *) ((void *) htc_hdr +
+ sizeof(struct htc_frame_hdr));
+
+ /* Now process HTC messages */
+ switch (be16_to_cpu(*msg_id)) {
+ case HTC_MSG_READY_ID:
+ htc_process_target_rdy(htc_handle, htc_hdr);
+ break;
+ case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ htc_process_conn_rsp(htc_handle, htc_hdr);
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+
+ } else {
+ if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
+ skb_trim(skb, len - htc_hdr->control[0]);
+
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+ endpoint = &htc_handle->endpoint[epid];
+ if (endpoint->ep_callbacks.rx)
+ endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
+ skb, epid);
+ }
+}
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_target *target;
+
+ target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
+ if (!target) {
+ printk(KERN_ERR "Unable to allocate memory for"
+ "target device\n");
+ return NULL;
+ }
+
+ init_completion(&target->target_wait);
+ init_completion(&target->cmd_wait);
+
+ target->hif = hif;
+ target->hif_dev = hif_handle;
+ target->dev = dev;
+
+ /* Assign control endpoint pipe IDs */
+ endpoint = &target->endpoint[ENDPOINT0];
+ endpoint->ul_pipeid = hif->control_ul_pipe;
+ endpoint->dl_pipeid = hif->control_dl_pipe;
+
+ atomic_set(&target->tgt_ready, 0);
+
+ return target;
+}
+
+void ath9k_htc_hw_free(struct htc_target *htc)
+{
+ kfree(htc);
+}
+
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid)
+{
+ if (ath9k_htc_probe_device(target, dev, devid)) {
+ printk(KERN_ERR "Failed to initialize the device\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug)
+{
+ if (target)
+ ath9k_htc_disconnect_device(target, hot_unplug);
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
new file mode 100644
index 000000000000..faba6790328b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_HST_H
+#define HTC_HST_H
+
+struct ath9k_htc_priv;
+struct htc_target;
+struct ath9k_htc_tx_ctl;
+
+enum ath9k_hif_transports {
+ ATH9K_HIF_USB,
+};
+
+struct ath9k_htc_hif {
+ struct list_head list;
+ const enum ath9k_hif_transports transport;
+ const char *name;
+
+ u8 control_dl_pipe;
+ u8 control_ul_pipe;
+
+ void (*start) (void *hif_handle, u8 pipe);
+ void (*stop) (void *hif_handle, u8 pipe);
+ int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf,
+ struct ath9k_htc_tx_ctl *tx_ctl);
+};
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT0 = 0,
+ ENDPOINT1 = 1,
+ ENDPOINT2 = 2,
+ ENDPOINT3 = 3,
+ ENDPOINT4 = 4,
+ ENDPOINT5 = 5,
+ ENDPOINT6 = 6,
+ ENDPOINT7 = 7,
+ ENDPOINT8 = 8,
+ ENDPOINT_MAX = 22
+};
+
+/* Htc frame hdr flags */
+#define HTC_FLAGS_RECV_TRAILER (1 << 1)
+
+struct htc_frame_hdr {
+ u8 endpoint_id;
+ u8 flags;
+ __be16 payload_len;
+ u8 control[4];
+} __packed;
+
+struct htc_ready_msg {
+ __be16 message_id;
+ __be16 credits;
+ __be16 credit_size;
+ u8 max_endpoints;
+ u8 pad;
+} __packed;
+
+struct htc_config_pipe_msg {
+ __be16 message_id;
+ u8 pipe_id;
+ u8 credits;
+} __packed;
+
+struct htc_packet {
+ void *pktcontext;
+ u8 *buf;
+ u8 *buf_payload;
+ u32 buflen;
+ u32 payload_len;
+
+ int endpoint;
+ int status;
+
+ void *context;
+ u32 reserved;
+};
+
+struct htc_ep_callbacks {
+ void *priv;
+ void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
+ void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
+};
+
+#define HTC_TX_QUEUE_SIZE 256
+
+struct htc_txq {
+ struct sk_buff *buf[HTC_TX_QUEUE_SIZE];
+ u32 txqdepth;
+ u16 txbuf_cnt;
+ u16 txq_head;
+ u16 txq_tail;
+};
+
+struct htc_endpoint {
+ u16 service_id;
+
+ struct htc_ep_callbacks ep_callbacks;
+ struct htc_txq htc_txq;
+ u32 max_txqdepth;
+ int max_msglen;
+
+ u8 ul_pipeid;
+ u8 dl_pipeid;
+};
+
+#define HTC_MAX_CONTROL_MESSAGE_LENGTH 255
+#define HTC_CONTROL_BUFFER_SIZE \
+ (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
+
+struct htc_control_buf {
+ struct htc_packet htc_pkt;
+ u8 buf[HTC_CONTROL_BUFFER_SIZE];
+};
+
+#define HTC_OP_START_WAIT BIT(0)
+#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
+
+struct htc_target {
+ void *hif_dev;
+ struct ath9k_htc_priv *drv_priv;
+ struct device *dev;
+ struct ath9k_htc_hif *hif;
+ struct htc_endpoint endpoint[ENDPOINT_MAX];
+ struct completion target_wait;
+ struct completion cmd_wait;
+ struct list_head list;
+ enum htc_endpoint_id conn_rsp_epid;
+ u16 credits;
+ u16 credit_size;
+ u8 htc_flags;
+ atomic_t tgt_ready;
+};
+
+enum htc_msg_id {
+ HTC_MSG_READY_ID = 1,
+ HTC_MSG_CONNECT_SERVICE_ID,
+ HTC_MSG_CONNECT_SERVICE_RESPONSE_ID,
+ HTC_MSG_SETUP_COMPLETE_ID,
+ HTC_MSG_CONFIG_PIPE_ID,
+ HTC_MSG_CONFIG_PIPE_RESPONSE_ID,
+};
+
+struct htc_service_connreq {
+ u16 service_id;
+ u16 con_flags;
+ u32 max_send_qdepth;
+ struct htc_ep_callbacks ep_callbacks;
+};
+
+/* Current service IDs */
+
+enum htc_service_group_ids{
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define HTC_LOOPBACK_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 2)
+
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_BEACON_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_CAB_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_UAPSD_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_MGMT_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 5)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 6)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 7)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
+
+struct htc_conn_svc_msg {
+ __be16 msg_id;
+ __be16 service_id;
+ __be16 con_flags;
+ u8 dl_pipeid;
+ u8 ul_pipeid;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+#define HTC_SERVICE_NO_RESOURCES 3
+#define HTC_SERVICE_NO_MORE_EP 4
+
+struct htc_conn_svc_rspmsg {
+ __be16 msg_id;
+ __be16 service_id;
+ u8 status;
+ u8 endpoint_id;
+ __be16 max_msg_len;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_comp_msg {
+ __be16 msg_id;
+} __packed;
+
+int htc_init(struct htc_target *target);
+int htc_connect_service(struct htc_target *target,
+ struct htc_service_connreq *service_connreq,
+ enum htc_endpoint_id *conn_rsp_eid);
+int htc_send(struct htc_target *target, struct sk_buff *skb,
+ enum htc_endpoint_id eid, struct ath9k_htc_tx_ctl *tx_ctl);
+void htc_stop(struct htc_target *target);
+void htc_start(struct htc_target *target);
+
+void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+ struct sk_buff *skb, u32 len, u8 pipe_id);
+void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ struct sk_buff *skb, bool txok);
+
+struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
+ struct ath9k_htc_hif *hif,
+ struct device *dev);
+void ath9k_htc_hw_free(struct htc_target *htc);
+int ath9k_htc_hw_init(struct htc_target *target,
+ struct device *dev, u16 devid);
+void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
+
+#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
new file mode 100644
index 000000000000..624422a8169e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_HW_OPS_H
+#define ATH9K_HW_OPS_H
+
+#include "hw.h"
+
+/* Hardware core and driver accessible callbacks */
+
+static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
+ int restore,
+ int power_off)
+{
+ ath9k_hw_ops(ah)->config_pci_powersave(ah, restore, power_off);
+}
+
+static inline void ath9k_hw_rxena(struct ath_hw *ah)
+{
+ ath9k_hw_ops(ah)->rx_enable(ah);
+}
+
+static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
+ u32 link)
+{
+ ath9k_hw_ops(ah)->set_desc_link(ds, link);
+}
+
+static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
+ u32 **link)
+{
+ ath9k_hw_ops(ah)->get_desc_link(ds, link);
+}
+static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal)
+{
+ return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
+}
+
+static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
+{
+ return ath9k_hw_ops(ah)->get_isr(ah, masked);
+}
+
+static inline void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu)
+{
+ ath9k_hw_ops(ah)->fill_txdesc(ah, ds, seglen, is_firstseg, is_lastseg,
+ ds0, buf_addr, qcu);
+}
+
+static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts)
+{
+ return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
+}
+
+static inline void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_txdesc(ah, ds, pktLen, type, txPower, keyIx,
+ keyType, flags);
+}
+
+static inline void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags)
+{
+ ath9k_hw_ops(ah)->set11n_ratescenario(ah, ds, lastds, durUpdateEn,
+ rtsctsRate, rtsctsDuration, series,
+ nseries, flags);
+}
+
+static inline void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
+ u32 aggrLen)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_first(ah, ds, aggrLen);
+}
+
+static inline void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
+ u32 numDelims)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_middle(ah, ds, numDelims);
+}
+
+static inline void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->set11n_aggr_last(ah, ds);
+}
+
+static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
+{
+ ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
+}
+
+static inline void ath9k_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
+ u32 burstDuration)
+{
+ ath9k_hw_ops(ah)->set11n_burstduration(ah, ds, burstDuration);
+}
+
+static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
+ u32 vmf)
+{
+ ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
+}
+
+/* Private hardware call ops */
+
+/* PHY ops */
+
+static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->rf_set_freq(ah, chan);
+}
+
+static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
+}
+
+static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
+ return 0;
+
+ return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
+}
+
+static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
+ return;
+
+ ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
+}
+
+static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex)
+{
+ if (!ath9k_hw_private_ops(ah)->set_rf_regs)
+ return true;
+
+ return ath9k_hw_private_ops(ah)->set_rf_regs(ah, chan, modesIndex);
+}
+
+static inline void ath9k_hw_init_bb(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_bb(ah, chan);
+}
+
+static inline void ath9k_hw_set_channel_regs(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_channel_regs(ah, chan);
+}
+
+static inline int ath9k_hw_process_ini(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->process_ini(ah, chan);
+}
+
+static inline void ath9k_olc_init(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->olc_init)
+ return;
+
+ return ath9k_hw_private_ops(ah)->olc_init(ah);
+}
+
+static inline void ath9k_hw_set_rfmode(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_rfmode(ah, chan);
+}
+
+static inline void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->mark_phy_inactive(ah);
+}
+
+static inline void ath9k_hw_set_delta_slope(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->set_delta_slope(ah, chan);
+}
+
+static inline bool ath9k_hw_rfbus_req(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_req(ah);
+}
+
+static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->rfbus_done(ah);
+}
+
+static inline void ath9k_enable_rfkill(struct ath_hw *ah)
+{
+ return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
+}
+
+static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->restore_chainmask)
+ return;
+
+ return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
+}
+
+static inline void ath9k_hw_set_diversity(struct ath_hw *ah, bool value)
+{
+ return ath9k_hw_private_ops(ah)->set_diversity(ah, value);
+}
+
+static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
+ enum ath9k_ani_cmd cmd, int param)
+{
+ return ath9k_hw_private_ops(ah)->ani_control(ah, cmd, param);
+}
+
+static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
+ int16_t nfarray[NUM_NF_READINGS])
+{
+ ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
+}
+
+static inline void ath9k_hw_loadnf(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ ath9k_hw_private_ops(ah)->loadnf(ah, chan);
+}
+
+static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->init_cal(ah, chan);
+}
+
+static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal)
+{
+ ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
+}
+
+static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
+ enum ath9k_cal_types calType)
+{
+ return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
+}
+
+#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 78b571129c92..c33f17dbe6f1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +19,16 @@
#include <asm/unaligned.h>
#include "hw.h"
+#include "hw-ops.h"
#include "rc.h"
-#include "initvals.h"
+#include "ar9003_mac.h"
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
+#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@@ -49,6 +47,39 @@ static void __exit ath9k_exit(void)
}
module_exit(ath9k_exit);
+/* Private hardware callbacks */
+
+static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_cal_settings(ah);
+}
+
+static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
+{
+ ath9k_hw_private_ops(ah)->init_mode_regs(ah);
+}
+
+static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
+{
+ struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+
+ return priv_ops->macversion_supported(ah->hw_version.macVersion);
+}
+
+static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
+}
+
+static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
+ return;
+
+ ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
+}
+
/********************/
/* Helper Functions */
/********************/
@@ -61,7 +92,11 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
return usecs *ATH9K_CLOCK_RATE_CCK;
if (conf->channel->band == IEEE80211_BAND_2GHZ)
return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
- return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
+ return usecs * ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
+ else
+ return usecs * ATH9K_CLOCK_RATE_5GHZ_OFDM;
}
static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
@@ -236,21 +271,6 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
}
}
-static int ath9k_hw_get_radiorev(struct ath_hw *ah)
-{
- u32 val;
- int i;
-
- REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
-
- for (i = 0; i < 8; i++)
- REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
- val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
- val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
-
- return ath9k_hw_reverse_bits(val, 8);
-}
-
/************************************/
/* HW Attach, Detach, Init Routines */
/************************************/
@@ -260,6 +280,8 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
if (AR_SREV_9100(ah))
return;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -271,20 +293,30 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
+/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
+ u32 regAddr[2] = { AR_STA_ID0 };
u32 regHold[2];
u32 patternData[4] = { 0x55555555,
0xaaaaaaaa,
0x66666666,
0x99999999 };
- int i, j;
+ int i, j, loop_max;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ loop_max = 2;
+ regAddr[1] = AR_PHY_BASE + (8 << 2);
+ } else
+ loop_max = 1;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < loop_max; i++) {
u32 addr = regAddr[i];
u32 wrData, rdData;
@@ -339,7 +371,13 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
ah->config.cck_trig_low = 100;
- ah->config.enable_ani = 1;
+
+ /*
+ * For now ANI is disabled for AR9003, it is still
+ * being tested.
+ */
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ah->config.enable_ani = 1;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -354,6 +392,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.rx_intr_mitigation = true;
/*
+ * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
+ * used by AR9003, but it is showing reliability issues.
+ * It will take a while to fix so this is currently disabled.
+ */
+
+ /*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
* This means we use it for all AR5416 devices, and the few
@@ -372,7 +416,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
if (num_possible_cpus() > 1)
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
}
-EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_defaults(struct ath_hw *ah)
{
@@ -386,8 +429,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.subvendorid = 0;
ah->ah_flags = 0;
- if (ah->hw_version.devid == AR5416_AR9100_DEVID)
- ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!AR_SREV_9100(ah))
ah->ah_flags = AH_USE_EEPROM;
@@ -400,44 +441,17 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
}
-static int ath9k_hw_rf_claim(struct ath_hw *ah)
-{
- u32 val;
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
-
- val = ath9k_hw_get_radiorev(ah);
- switch (val & AR_RADIO_SREV_MAJOR) {
- case 0:
- val = AR_RAD5133_SREV_MAJOR;
- break;
- case AR_RAD5133_SREV_MAJOR:
- case AR_RAD5122_SREV_MAJOR:
- case AR_RAD2133_SREV_MAJOR:
- case AR_RAD2122_SREV_MAJOR:
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Radio Chip Rev 0x%02X not supported\n",
- val & AR_RADIO_SREV_MAJOR);
- return -EOPNOTSUPP;
- }
-
- ah->hw_version.analog5GhzRev = val;
-
- return 0;
-}
-
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sum;
int i;
u16 eeval;
+ u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
sum = 0;
for (i = 0; i < 3; i++) {
- eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
+ eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
sum += eeval;
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
@@ -448,64 +462,20 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
-{
- u32 rxgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
- rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
-
- if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_13db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
- else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_23db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
- }
-}
-
-static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
-{
- u32 txgain_type;
-
- if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
- txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_high_power_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
- else
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
- }
-}
-
static int ath9k_hw_post_init(struct ath_hw *ah)
{
int ecode;
- if (!ath9k_hw_chip_test(ah))
- return -ENODEV;
+ if (!AR_SREV_9271(ah)) {
+ if (!ath9k_hw_chip_test(ah))
+ return -ENODEV;
+ }
- ecode = ath9k_hw_rf_claim(ah);
- if (ecode != 0)
- return ecode;
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ecode = ar9002_hw_rf_claim(ah);
+ if (ecode != 0)
+ return ecode;
+ }
ecode = ath9k_hw_eeprom_init(ah);
if (ecode != 0)
@@ -516,14 +486,12 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
- if (!AR_SREV_9280_10_OR_LATER(ah)) {
- ecode = ath9k_hw_rf_alloc_ext_banks(ah);
- if (ecode) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Failed allocating banks for "
- "external radio\n");
- return ecode;
- }
+ ecode = ath9k_hw_rf_alloc_ext_banks(ah);
+ if (ecode) {
+ ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
+ "Failed allocating banks for "
+ "external radio\n");
+ return ecode;
}
if (!AR_SREV_9100(ah)) {
@@ -534,321 +502,22 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return 0;
}
-static bool ath9k_hw_devid_supported(u16 devid)
+static void ath9k_hw_attach_ops(struct ath_hw *ah)
{
- switch (devid) {
- case AR5416_DEVID_PCI:
- case AR5416_DEVID_PCIE:
- case AR5416_AR9100_DEVID:
- case AR9160_DEVID_PCI:
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- case AR9285_DEVID_PCIE:
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- case AR9271_USB:
- case AR2427_DEVID_PCIE:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static bool ath9k_hw_macversion_supported(u32 macversion)
-{
- switch (macversion) {
- case AR_SREV_VERSION_5416_PCI:
- case AR_SREV_VERSION_5416_PCIE:
- case AR_SREV_VERSION_9160:
- case AR_SREV_VERSION_9100:
- case AR_SREV_VERSION_9280:
- case AR_SREV_VERSION_9285:
- case AR_SREV_VERSION_9287:
- case AR_SREV_VERSION_9271:
- return true;
- default:
- break;
- }
- return false;
-}
-
-static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
-{
- if (AR_SREV_9160_10_OR_LATER(ah)) {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_single_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_single_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- } else {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->adcgain_caldata.calData =
- &adc_gain_cal_multi_sample;
- ah->adcdc_caldata.calData =
- &adc_dc_cal_multi_sample;
- ah->adcdc_calinitdata.calData =
- &adc_init_dc_cal;
- }
- ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
- }
-}
-
-static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9271(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
- ARRAY_SIZE(ar9271Modes_9271), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
- ARRAY_SIZE(ar9271Common_9271), 2);
- INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
- ar9271Modes_9271_1_0_only,
- ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
- return;
- }
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
- ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
- ARRAY_SIZE(ar9287Common_9287_1_1), 2);
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
- 2);
- } else if (AR_SREV_9287_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
- ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
- ARRAY_SIZE(ar9287Common_9287_1_0), 2);
-
- if (ah->config.pcie_clock_req)
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_off_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
- else
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
- ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
- 2);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
-
-
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
- ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
- ARRAY_SIZE(ar9285Common_9285_1_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
- 2);
- }
- } else if (AR_SREV_9285_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
- ARRAY_SIZE(ar9285Modes_9285), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
- ARRAY_SIZE(ar9285Common_9285), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_off_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9285PciePhy_clkreq_always_on_L1_9285,
- ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
- }
- } else if (AR_SREV_9280_20_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
- ARRAY_SIZE(ar9280Modes_9280_2), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
- ARRAY_SIZE(ar9280Common_9280_2), 2);
-
- if (ah->config.pcie_clock_req) {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
- } else {
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
- }
- INIT_INI_ARRAY(&ah->iniModesAdditional,
- ar9280Modes_fast_clock_9280_2,
- ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
- } else if (AR_SREV_9280_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
- ARRAY_SIZE(ar9280Modes_9280), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
- ARRAY_SIZE(ar9280Common_9280), 2);
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
- ARRAY_SIZE(ar5416Modes_9160), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
- ARRAY_SIZE(ar5416Common_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
- ARRAY_SIZE(ar5416Bank0_9160), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
- ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
- ARRAY_SIZE(ar5416Bank1_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
- ARRAY_SIZE(ar5416Bank2_9160), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
- ARRAY_SIZE(ar5416Bank3_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
- ARRAY_SIZE(ar5416Bank6_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
- ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
- ARRAY_SIZE(ar5416Bank7_9160), 2);
- if (AR_SREV_9160_11(ah)) {
- INIT_INI_ARRAY(&ah->iniAddac,
- ar5416Addac_91601_1,
- ARRAY_SIZE(ar5416Addac_91601_1), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
- ARRAY_SIZE(ar5416Addac_9160), 2);
- }
- } else if (AR_SREV_9100_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
- ARRAY_SIZE(ar5416Modes_9100), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
- ARRAY_SIZE(ar5416Common_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
- ARRAY_SIZE(ar5416Bank0_9100), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
- ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
- ARRAY_SIZE(ar5416Bank1_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
- ARRAY_SIZE(ar5416Bank2_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
- ARRAY_SIZE(ar5416Bank3_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
- ARRAY_SIZE(ar5416Bank6_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
- ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
- ARRAY_SIZE(ar5416Bank7_9100), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
- ARRAY_SIZE(ar5416Addac_9100), 2);
- } else {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
- ARRAY_SIZE(ar5416Modes), 6);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
- ARRAY_SIZE(ar5416Common), 2);
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
- ARRAY_SIZE(ar5416Bank0), 2);
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
- ARRAY_SIZE(ar5416BB_RfGain), 3);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
- ARRAY_SIZE(ar5416Bank1), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
- ARRAY_SIZE(ar5416Bank2), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
- ARRAY_SIZE(ar5416Bank3), 3);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
- ARRAY_SIZE(ar5416Bank6), 3);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
- ARRAY_SIZE(ar5416Bank6TPC), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
- ARRAY_SIZE(ar5416Bank7), 2);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
- ARRAY_SIZE(ar5416Addac), 2);
- }
-}
-
-static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
-{
- if (AR_SREV_9287_11_OR_LATER(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
- else if (AR_SREV_9287_10(ah))
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
- else if (AR_SREV_9280_20(ah))
- ath9k_hw_init_rxgain_ini(ah);
-
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
- } else if (AR_SREV_9287_10(ah)) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_0,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
- } else if (AR_SREV_9280_20(ah)) {
- ath9k_hw_init_txgain_ini(ah);
- } else if (AR_SREV_9285_12_OR_LATER(ah)) {
- u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
-
- /* txgain table */
- if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_high_power_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6);
- } else {
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_original_tx_gain_9285_1_2,
- ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6);
- }
-
- }
-}
-
-static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
-{
- u32 i, j;
-
- if (ah->hw_version.devid == AR9280_DEVID_PCI) {
-
- /* EEPROM Fixup */
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
-
- for (j = 1; j < ah->iniModes.ia_columns; j++) {
- u32 val = INI_RA(&ah->iniModes, i, j);
-
- INI_RA(&ah->iniModes, i, j) =
- ath9k_hw_ini_fixup(ah,
- &ah->eeprom.def,
- reg, val);
- }
- }
- }
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_attach_ops(ah);
+ else
+ ar9002_hw_attach_ops(ah);
}
-int ath9k_hw_init(struct ath_hw *ah)
+/* Called for all hardware families */
+static int __ath9k_hw_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
- if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unsupported device ID: 0x%0x\n",
- ah->hw_version.devid);
- return -EOPNOTSUPP;
- }
-
- ath9k_hw_init_defaults(ah);
- ath9k_hw_init_config(ah);
+ if (ah->hw_version.devid == AR5416_AR9100_DEVID)
+ ah->hw_version.macVersion = AR_SREV_VERSION_9100;
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_print(common, ATH_DBG_FATAL,
@@ -856,6 +525,11 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EIO;
}
+ ath9k_hw_init_defaults(ah);
+ ath9k_hw_init_config(ah);
+
+ ath9k_hw_attach_ops(ah);
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
return -EIO;
@@ -880,7 +554,7 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
- if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
+ if (!ath9k_hw_macversion_supported(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Mac Chip Rev 0x%02x.%x is not supported by "
"this driver\n", ah->hw_version.macVersion,
@@ -888,45 +562,45 @@ int ath9k_hw_init(struct ath_hw *ah)
return -EOPNOTSUPP;
}
- if (AR_SREV_9100(ah)) {
- ah->iq_caldata.calData = &iq_cal_multi_sample;
- ah->supp_cals = IQ_MISMATCH_CAL;
- ah->is_pciexpress = false;
- }
-
- if (AR_SREV_9271(ah))
+ if (AR_SREV_9271(ah) || AR_SREV_9100(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
-
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
- if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
- } else {
- ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
- ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
- }
ath9k_hw_init_mode_regs(ah);
+ /*
+ * Configire PCIE after Ini init. SERDES values now come from ini file
+ * This enables PCIe low power mode.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ u32 regval;
+ unsigned int i;
+
+ /* Set Bits 16 and 17 in the AR_WA register. */
+ regval = REG_READ(ah, AR_WA);
+ regval |= 0x00030000;
+ REG_WRITE(ah, AR_WA, regval);
+
+ for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
+ REG_WRITE(ah,
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
+ INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
+ }
+ }
+
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
else
ath9k_hw_disablepcie(ah);
- /* Support for Japan ch.14 (2484) spread */
- if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniCckfirNormal,
- ar9287Common_normal_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
- INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- ar9287Common_japan_2484_cck_fir_coeff_92871_1,
- ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
- }
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ar9002_hw_cck_chan14_spread(ah);
r = ath9k_hw_post_init(ah);
if (r)
@@ -937,8 +611,6 @@ int ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- ath9k_hw_init_eeprom_fix(ah);
-
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_print(common, ATH_DBG_FATAL,
@@ -951,6 +623,9 @@ int ath9k_hw_init(struct ath_hw *ah)
else
ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ar9003_hw_set_nf_limits(ah);
+
ath9k_init_nfcal_hist_buffer(ah);
common->state = ATH_HW_INITIALIZED;
@@ -958,24 +633,50 @@ int ath9k_hw_init(struct ath_hw *ah)
return 0;
}
-static void ath9k_hw_init_bb(struct ath_hw *ah,
- struct ath9k_channel *chan)
+int ath9k_hw_init(struct ath_hw *ah)
{
- u32 synthDelay;
+ int ret;
+ struct ath_common *common = ath9k_hw_common(ah);
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
+ /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
+ switch (ah->hw_version.devid) {
+ case AR5416_DEVID_PCI:
+ case AR5416_DEVID_PCIE:
+ case AR5416_AR9100_DEVID:
+ case AR9160_DEVID_PCI:
+ case AR9280_DEVID_PCI:
+ case AR9280_DEVID_PCIE:
+ case AR9285_DEVID_PCIE:
+ case AR9287_DEVID_PCI:
+ case AR9287_DEVID_PCIE:
+ case AR2427_DEVID_PCIE:
+ case AR9300_DEVID_PCIE:
+ break;
+ default:
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ break;
+ ath_print(common, ATH_DBG_FATAL,
+ "Hardware device ID 0x%04x not supported\n",
+ ah->hw_version.devid);
+ return -EOPNOTSUPP;
+ }
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+ ret = __ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ return ret;
+ }
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
+ return 0;
}
+EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_qos(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -989,105 +690,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
-}
-
-static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
-{
- u32 lcr;
- u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
-
- lcr = REG_READ(ah , 0x5100c);
- lcr |= 0x80;
- REG_WRITE(ah, 0x5100c, lcr);
- REG_WRITE(ah, 0x51004, (baud_divider >> 8));
- REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
-
- lcr &= ~0x80;
- REG_WRITE(ah, 0x5100c, lcr);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- u32 pll;
-
- if (AR_SREV_9100(ah)) {
- if (chan && IS_CHAN_5GHZ(chan))
- pll = 0x1450;
- else
- pll = 0x1458;
- } else {
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan)) {
- pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
-
-
- if (AR_SREV_9280_20(ah)) {
- if (((chan->channel % 20) == 0)
- || ((chan->channel % 10) == 0))
- pll = 0x2850;
- else
- pll = 0x142c;
- }
- } else {
- pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
- }
-
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
-
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
-
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
- else
- pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
- } else {
- pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
-
- if (chan && IS_CHAN_HALF_RATE(chan))
- pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
- else if (chan && IS_CHAN_QUARTER_RATE(chan))
- pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
+ u32 pll = ath9k_hw_compute_pll_control(ah, chan);
- if (chan && IS_CHAN_5GHZ(chan))
- pll |= SM(0xa, AR_RTC_PLL_DIV);
- else
- pll |= SM(0xb, AR_RTC_PLL_DIV);
- }
- }
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
/* Switch the core clock for ar9271 to 117Mhz */
if (AR_SREV_9271(ah)) {
- if ((pll == 0x142c) || (pll == 0x2850) ) {
- udelay(500);
- /* set CLKOBS to output AHB clock */
- REG_WRITE(ah, 0x7020, 0xe);
- /*
- * 0x304: 117Mhz, ahb_ratio: 1x1
- * 0x306: 40Mhz, ahb_ratio: 1x1
- */
- REG_WRITE(ah, 0x50040, 0x304);
- /*
- * makes adjustments for the baud dividor to keep the
- * targetted baud rate based on the used core clock.
- */
- ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
- AR9271_TARGET_BAUD_RATE);
- }
+ udelay(500);
+ REG_WRITE(ah, 0x50040, 0x304);
}
udelay(RTC_PLL_SETTLE_DELAY);
@@ -1095,70 +713,58 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
}
-static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
-{
- int rx_chainmask, tx_chainmask;
-
- rx_chainmask = ah->rxchainmask;
- tx_chainmask = ah->txchainmask;
-
- switch (rx_chainmask) {
- case 0x5:
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- case 0x3:
- if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
- break;
- }
- case 0x1:
- case 0x2:
- case 0x7:
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- break;
- default:
- break;
- }
-
- REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
- if (tx_chainmask == 0x5) {
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- }
- if (AR_SREV_9100(ah))
- REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
- REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
-}
-
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
enum nl80211_iftype opmode)
{
- ah->mask_reg = AR_IMR_TXERR |
+ u32 imr_reg = AR_IMR_TXERR |
AR_IMR_TXURN |
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.rx_intr_mitigation)
- ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
- else
- ah->mask_reg |= AR_IMR_RXOK;
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ imr_reg |= AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK_LP;
- ah->mask_reg |= AR_IMR_TXOK;
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
+ else
+ imr_reg |= AR_IMR_RXOK;
+ }
+
+ if (ah->config.tx_intr_mitigation)
+ imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
+ else
+ imr_reg |= AR_IMR_TXOK;
if (opmode == NL80211_IFTYPE_AP)
- ah->mask_reg |= AR_IMR_MIB;
+ imr_reg |= AR_IMR_MIB;
+
+ ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_IMR, ah->mask_reg);
- REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
+ REG_WRITE(ah, AR_IMR, imr_reg);
+ ah->imrs2_reg |= AR_IMR_S2_GTT;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
+ }
}
static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
@@ -1241,19 +847,13 @@ void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (common->state <= ATH_HW_INITIALIZED)
+ if (common->state < ATH_HW_INITIALIZED)
goto free_hw;
- if (!AR_SREV_9100(ah))
- ath9k_hw_ani_disable(ah);
-
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
free_hw:
- if (!AR_SREV_9280_10_OR_LATER(ah))
- ath9k_hw_rf_free_ext_banks(ah);
- kfree(ah);
- ah = NULL;
+ ath9k_hw_rf_free_ext_banks(ah);
}
EXPORT_SYMBOL(ath9k_hw_deinit);
@@ -1261,136 +861,7 @@ EXPORT_SYMBOL(ath9k_hw_deinit);
/* INI */
/*******/
-static void ath9k_hw_override_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 val;
-
- if (AR_SREV_9271(ah)) {
- /*
- * Enable spectral scan to solution for issues with stuck
- * beacons on AR9271 1.0. The beacon stuck issue is not seeon on
- * AR9271 1.1
- */
- if (AR_SREV_9271_10(ah)) {
- val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
- AR_PHY_SPECTRAL_SCAN_ENABLE;
- REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
- }
- else if (AR_SREV_9271_11(ah))
- /*
- * change AR_PHY_RF_CTL3 setting to fix MAC issue
- * present on AR9271 1.1
- */
- REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001);
- return;
- }
-
- /*
- * Set the RX_ABORT and RX_DIS and clear if off only after
- * RXE is set for MAC. This prevents frames with corrupted
- * descriptor status.
- */
- REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
-
- if (AR_SREV_9280_10_OR_LATER(ah)) {
- val = REG_READ(ah, AR_PCU_MISC_MODE2) &
- (~AR_PCU_MISC_MODE2_HWWAR1);
-
- if (AR_SREV_9287_10_OR_LATER(ah))
- val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
-
- REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
- }
-
- if (!AR_SREV_5416_20_OR_LATER(ah) ||
- AR_SREV_9280_10_OR_LATER(ah))
- return;
- /*
- * Disable BB clock gating
- * Necessary to avoid issues on AR5416 2.0
- */
- REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
-
- /*
- * Disable RIFS search on some chips to avoid baseband
- * hang issues.
- */
- if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
- val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
- val &= ~AR_PHY_RIFS_INIT_DELAY;
- REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
- }
-}
-
-static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- struct base_eep_header *pBase = &(pEepData->baseEepHeader);
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (ah->hw_version.devid) {
- case AR9280_DEVID_PCI:
- if (reg == 0x7894) {
- ath_print(common, ATH_DBG_EEPROM,
- "ini VAL: %x EEPROM: %x\n", value,
- (pBase->version & 0xff));
-
- if ((pBase->version & 0xff) > 0x0a) {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND: %d\n",
- pBase->pwdclkind);
- value &= ~AR_AN_TOP2_PWDCLKIND;
- value |= AR_AN_TOP2_PWDCLKIND &
- (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
- } else {
- ath_print(common, ATH_DBG_EEPROM,
- "PWDCLKIND Earlier Rev\n");
- }
-
- ath_print(common, ATH_DBG_EEPROM,
- "final ini VAL: %x\n", value);
- }
- break;
- }
-
- return value;
-}
-
-static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
- struct ar5416_eeprom_def *pEepData,
- u32 reg, u32 value)
-{
- if (ah->eep_map == EEP_MAP_4KBITS)
- return value;
- else
- return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
-}
-
-static void ath9k_olc_init(struct ath_hw *ah)
-{
- u32 i;
-
- if (OLC_FOR_AR9287_10_LATER) {
- REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
- AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
- ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
- AR9287_AN_TXPC0_TXPCMODE,
- AR9287_AN_TXPC0_TXPCMODE_S,
- AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
- udelay(100);
- } else {
- for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
- ah->originalGain[i] =
- MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
- AR_PHY_TX_GAIN);
- ah->PDADCdelta = 0;
- }
-}
-
-static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
- struct ath9k_channel *chan)
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
@@ -1404,173 +875,24 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
return ctl;
}
-static int ath9k_hw_process_ini(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
- int i, regWrites = 0;
- struct ieee80211_channel *channel = chan->chan;
- u32 modesIndex, freqIndex;
-
- switch (chan->chanmode) {
- case CHANNEL_A:
- case CHANNEL_A_HT20:
- modesIndex = 1;
- freqIndex = 1;
- break;
- case CHANNEL_A_HT40PLUS:
- case CHANNEL_A_HT40MINUS:
- modesIndex = 2;
- freqIndex = 1;
- break;
- case CHANNEL_G:
- case CHANNEL_G_HT20:
- case CHANNEL_B:
- modesIndex = 4;
- freqIndex = 2;
- break;
- case CHANNEL_G_HT40PLUS:
- case CHANNEL_G_HT40MINUS:
- modesIndex = 3;
- freqIndex = 2;
- break;
-
- default:
- return -EINVAL;
- }
-
- REG_WRITE(ah, AR_PHY(0), 0x00000007);
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
- ah->eep_ops->set_addac(ah, chan);
-
- if (AR_SREV_5416_22_OR_LATER(ah)) {
- REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
- } else {
- struct ar5416IniArray temp;
- u32 addacSize =
- sizeof(u32) * ah->iniAddac.ia_rows *
- ah->iniAddac.ia_columns;
-
- memcpy(ah->addac5416_21,
- ah->iniAddac.ia_array, addacSize);
-
- (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
-
- temp.ia_array = ah->addac5416_21;
- temp.ia_columns = ah->iniAddac.ia_columns;
- temp.ia_rows = ah->iniAddac.ia_rows;
- REG_WRITE_ARRAY(&temp, 1, regWrites);
- }
-
- REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
-
- for (i = 0; i < ah->iniModes.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniModes, i, 0);
- u32 val = INI_RA(&ah->iniModes, i, modesIndex);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
-
- if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
- AR_SREV_9287_10_OR_LATER(ah))
- REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
-
- for (i = 0; i < ah->iniCommon.ia_rows; i++) {
- u32 reg = INI_RA(&ah->iniCommon, i, 0);
- u32 val = INI_RA(&ah->iniCommon, i, 1);
-
- REG_WRITE(ah, reg, val);
-
- if (reg >= 0x7800 && reg < 0x78a0
- && ah->config.analog_shiftreg) {
- udelay(100);
- }
-
- DO_DELAY(regWrites);
- }
-
- ath9k_hw_write_regs(ah, freqIndex, regWrites);
-
- if (AR_SREV_9271_10(ah))
- REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
- modesIndex, regWrites);
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
- REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
- regWrites);
- }
-
- ath9k_hw_override_ini(ah, chan);
- ath9k_hw_set_regs(ah, chan);
- ath9k_hw_init_chain_masks(ah);
-
- if (OLC_FOR_AR9280_20_LATER)
- ath9k_olc_init(ah);
-
- ah->eep_ops->set_txpower(ah, chan,
- ath9k_regd_get_ctl(regulatory, chan),
- channel->max_antenna_gain * 2,
- channel->max_power * 2,
- min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
-
- if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "ar5416SetRfRegs failed\n");
- return -EIO;
- }
-
- return 0;
-}
-
/****************************************/
/* Reset and Channel Switching Routines */
/****************************************/
-static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 rfMode = 0;
-
- if (chan == NULL)
- return;
-
- rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
- ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
-
- if (!AR_SREV_9280_10_OR_LATER(ah))
- rfMode |= (IS_CHAN_5GHZ(chan)) ?
- AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
-
- if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
- rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
-
- REG_WRITE(ah, AR_PHY_MODE, rfMode);
-}
-
-static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
-}
-
static inline void ath9k_hw_set_dma(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
u32 regval;
+ ENABLE_REGWRITE_BUFFER(ah);
+
/*
* set AHB_MODE not to do cacheline prefetches
*/
- regval = REG_READ(ah, AR_AHB_MODE);
- REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ regval = REG_READ(ah, AR_AHB_MODE);
+ REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
+ }
/*
* let mac dma reads be in 128 byte chunks
@@ -1578,12 +900,18 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* Restore TX Trigger Level to its pre-reset value.
* The initial value depends on whether aggregation is enabled, and is
* adjusted whenever underruns are detected.
*/
- REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
+
+ ENABLE_REGWRITE_BUFFER(ah);
/*
* let mac dma writes be in 128 byte chunks
@@ -1596,6 +924,14 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
*/
REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
+ REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
+
+ ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
+ ah->caps.rx_status_len);
+ }
+
/*
* reduce the number of usable entries in PCU TXBUF to avoid
* wrap around issues.
@@ -1611,6 +947,12 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
AR_PCU_TXBUF_CTRL_USABLE_SIZE);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_reset_txstatus_ring(ah);
}
static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
@@ -1638,10 +980,8 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
}
}
-static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
- u32 coef_scaled,
- u32 *coef_mantissa,
- u32 *coef_exponent)
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent)
{
u32 coef_exp, coef_man;
@@ -1657,40 +997,6 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
*coef_exponent = coef_exp - 16;
}
-static void ath9k_hw_set_delta_slope(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- u32 coef_scaled, ds_coef_exp, ds_coef_man;
- u32 clockMhzScaled = 0x64000000;
- struct chan_centers centers;
-
- if (IS_CHAN_HALF_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 1;
- else if (IS_CHAN_QUARTER_RATE(chan))
- clockMhzScaled = clockMhzScaled >> 2;
-
- ath9k_hw_get_channel_centers(ah, chan, &centers);
- coef_scaled = clockMhzScaled / centers.synth_center;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_TIMING3,
- AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
-
- coef_scaled = (9 * coef_scaled) / 10;
-
- ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
- &ds_coef_exp);
-
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
- REG_RMW_FIELD(ah, AR_PHY_HALFGI,
- AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
-}
-
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
{
u32 rst_flags;
@@ -1704,6 +1010,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
(void)REG_READ(ah, AR_RTC_DERIVED_CLK);
}
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
@@ -1715,11 +1023,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (tmpReg &
(AR_INTR_SYNC_LOCAL_TIMEOUT |
AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
+ u32 val;
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- } else {
+
+ val = AR_RC_HOSTIF;
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ val |= AR_RC_AHB;
+ REG_WRITE(ah, AR_RC, val);
+
+ } else if (!AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
- }
rst_flags = AR_RTC_RC_MAC_WARM;
if (type == ATH9K_RESET_COLD)
@@ -1727,6 +1040,10 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
REG_WRITE(ah, AR_RTC_RC, rst_flags);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
udelay(50);
REG_WRITE(ah, AR_RTC_RC, 0);
@@ -1747,16 +1064,23 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
{
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
- udelay(2);
- if (!AR_SREV_9100(ah))
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ udelay(2);
+
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
REG_WRITE(ah, AR_RTC_RESET, 1);
@@ -1792,34 +1116,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
}
}
-static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- u32 phymode;
- u32 enableDacFifo = 0;
-
- if (AR_SREV_9285_10_OR_LATER(ah))
- enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
- AR_PHY_FC_ENABLE_DAC_FIFO);
-
- phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
- | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
-
- if (IS_CHAN_HT40(chan)) {
- phymode |= AR_PHY_FC_DYN2040_EN;
-
- if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
- (chan->chanmode == CHANNEL_G_HT40PLUS))
- phymode |= AR_PHY_FC_DYN2040_PRI_CH;
-
- }
- REG_WRITE(ah, AR_PHY_TURBO, phymode);
-
- ath9k_hw_set11nmac2040(ah);
-
- REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
- REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
-}
-
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -1845,7 +1141,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *channel = chan->chan;
- u32 synthDelay, qnum;
+ u32 qnum;
int r;
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1857,17 +1153,15 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
}
}
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
- if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
- AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
+ if (!ath9k_hw_rfbus_req(ah)) {
ath_print(common, ATH_DBG_FATAL,
"Could not kill baseband RX\n");
return false;
}
- ath9k_hw_set_regs(ah, chan);
+ ath9k_hw_set_channel_regs(ah, chan);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r) {
ath_print(common, ATH_DBG_FATAL,
"Failed to set channel\n");
@@ -1881,20 +1175,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
min((u32) MAX_RATE_POWER,
(u32) regulatory->power_limit));
- synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
- if (IS_CHAN_B(chan))
- synthDelay = (4 * synthDelay) / 22;
- else
- synthDelay /= 10;
-
- udelay(synthDelay + BASE_ACTIVATE_DELAY);
-
- REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
+ ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
if (!chan->oneTimeCalsDone)
chan->oneTimeCalsDone = true;
@@ -1902,17 +1188,33 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return true;
}
-static void ath9k_enable_rfkill(struct ath_hw *ah)
+bool ath9k_hw_check_alive(struct ath_hw *ah)
{
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
- AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+ int count = 50;
+ u32 reg;
+
+ if (AR_SREV_9285_10_OR_LATER(ah))
+ return true;
+
+ do {
+ reg = REG_READ(ah, AR_OBS_BUS_1);
- REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
- AR_GPIO_INPUT_MUX2_RFSILENT);
+ if ((reg & 0x7E7FFFEF) == 0x00702400)
+ continue;
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
- REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+ switch (reg & 0x7E000B00) {
+ case 0x1E000000:
+ case 0x52000B00:
+ case 0x18000B00:
+ continue;
+ default:
+ return true;
+ }
+ } while (count-- > 0);
+
+ return false;
}
+EXPORT_SYMBOL(ath9k_hw_check_alive);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange)
@@ -1923,11 +1225,18 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
- int i, rx_chainmask, r;
+ int i, r;
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
+ if (!ah->chip_fullsleep) {
+ ath9k_hw_abortpcurecv(ah);
+ if (!ath9k_hw_stopdmarecv(ah))
+ ath_print(common, ATH_DBG_XMIT,
+ "Failed to stop receive dma\n");
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1940,8 +1249,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
(chan->channel != ah->curchan->channel) &&
((chan->channelFlags & CHANNEL_ALL) ==
(ah->curchan->channelFlags & CHANNEL_ALL)) &&
- !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
- IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
+ !AR_SREV_9280(ah)) {
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
@@ -1966,6 +1274,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_mark_phy_inactive(ah);
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
REG_WRITE(ah,
AR9271_RESET_POWER_DOWN_CONTROL,
@@ -1978,6 +1287,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
return -EINVAL;
}
+ /* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
ah->htc_reset_init = false;
REG_WRITE(ah,
@@ -1993,16 +1303,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (AR_SREV_9280_10_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- /* Enable ASYNC FIFO */
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
- REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
- REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
- AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
- }
r = ath9k_hw_process_ini(ah, chan);
if (r)
return r;
@@ -2027,9 +1327,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
- ah->ath9k_hw_spur_mitigate_freq(ah, chan);
+ ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
| macStaId1
@@ -2037,25 +1341,27 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
| (ah->config.
ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
| ah->sta_id1_defaults);
- ath9k_hw_set_operating_mode(ah, ah->opmode);
-
ath_hw_setbssidmask(common);
-
REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
-
ath9k_hw_write_associd(ah);
-
REG_WRITE(ah, AR_ISR, ~0);
-
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
- r = ah->ath9k_hw_rf_set_freq(ah, chan);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
+ ENABLE_REGWRITE_BUFFER(ah);
+
for (i = 0; i < AR_NUM_DCU; i++)
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
ah->intr_txqs = 0;
for (i = 0; i < ah->caps.total_queues; i++)
ath9k_hw_resettxqueue(ah, i);
@@ -2068,25 +1374,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_global_settings(ah);
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
- AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
- AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
- AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
-
- REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
-
- REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
- AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
- REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
- AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
- }
- if (AR_SREV_9287_12_OR_LATER(ah)) {
- REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ ar9002_hw_enable_async_fifo(ah);
+ ar9002_hw_enable_wep_aggregation(ah);
}
REG_WRITE(ah, AR_STA_ID1,
@@ -2101,19 +1391,24 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
+ if (ah->config.tx_intr_mitigation) {
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
+ REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
+ }
+
ath9k_hw_init_bb(ah, chan);
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
- rx_chainmask = ah->rxchainmask;
- if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
- }
+ ENABLE_REGWRITE_BUFFER(ah);
+ ath9k_hw_restore_chainmask(ah);
REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
/*
* For big endian systems turn on swapping for descriptors
*/
@@ -2143,6 +1438,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->btcoex_hw.enabled)
ath9k_hw_btcoex_enable(ah);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ath9k_hw_loadnf(ah, curchan);
+ ath9k_hw_start_nfcal(ah);
+ }
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2429,21 +1729,35 @@ EXPORT_SYMBOL(ath9k_hw_keyisvalid);
/* Power Management (Chipset) */
/******************************/
+/*
+ * Notify Power Mgt is disabled in self-generated frames.
+ * If requested, force chip to sleep.
+ */
static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- if (!AR_SREV_9100(ah))
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
- if(!AR_SREV_5416(ah))
+ /* Shutdown chip. Active low */
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
REG_CLR_BIT(ah, (AR_RTC_RESET),
AR_RTC_RESET_EN);
}
}
+/*
+ * Notify Power Management is enabled in self-generating
+ * frames. If request, set power mode of chip to
+ * auto/normal. Duration in units of 128us (1/8 TU).
+ */
static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2451,9 +1765,14 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
struct ath9k_hw_capabilities *pCap = &ah->caps;
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Set WakeOnInterrupt bit; clear ForceWake bit */
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_ON_INT);
} else {
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
}
@@ -2472,7 +1791,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
ATH9K_RESET_POWER_ON) != true) {
return false;
}
- ath9k_hw_init_pll(ah, NULL);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_init_pll(ah, NULL);
}
if (AR_SREV_9100(ah))
REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2542,424 +1862,6 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
}
EXPORT_SYMBOL(ath9k_hw_setpower);
-/*
- * Helper for ASPM support.
- *
- * Disable PLL when in L0s as well as receiver clock when in L1.
- * This power saving option must be enabled through the SerDes.
- *
- * Programming the SerDes must go through the same 288 bit serial shift
- * register as the other analog registers. Hence the 9 writes.
- */
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
-{
- u8 i;
- u32 val;
-
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
- return;
-
- /* Nothing to do on restore for 11N */
- if (!restore) {
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- /*
- * AR9280 2.0 or later chips use SerDes values from the
- * initvals.h initialized depending on chipset during
- * ath9k_hw_init()
- */
- for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
- REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
- INI_RA(&ah->iniPcieSerdes, i, 1));
- }
- } else if (AR_SREV_9280(ah) &&
- (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
-
- /* Shut off CLKREQ active in L1 */
- if (ah->config.pcie_clock_req)
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
- else
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
-
- } else {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
-
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
-
- /*
- * Ignore ah->ah_config.pcie_clock_req setting for
- * pre-AR9280 11n
- */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
-
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- }
-
- udelay(1000);
-
- /* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
-
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen) {
- val = ah->config.pcie_waen;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else {
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) {
- val = AR9285_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else if (AR_SREV_9280(ah)) {
- /*
- * On AR9280 chips bit 22 of 0x4004 needs to be
- * set otherwise card may disappear.
- */
- val = AR9280_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- } else
- val = AR_WA_DEFAULT;
- }
-
- REG_WRITE(ah, AR_WA, val);
- }
-
- if (power_off) {
- /*
- * Set PCIe workaround bits
- * bit 14 in WA register (disable L1) should only
- * be set when device enters D3 and be cleared
- * when device comes back to D0.
- */
- if (ah->config.pcie_waen) {
- if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- } else {
- if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) &&
- (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
- (AR_SREV_9280(ah) &&
- (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
- REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
- }
- }
- }
-}
-EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
-
-/**********************/
-/* Interrupt Handling */
-/**********************/
-
-bool ath9k_hw_intrpend(struct ath_hw *ah)
-{
- u32 host_isr;
-
- if (AR_SREV_9100(ah))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
- if ((host_isr & AR_INTR_SYNC_DEFAULT)
- && (host_isr != AR_INTR_SPURIOUS))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ath9k_hw_intrpend);
-
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
-{
- u32 isr = 0;
- u32 mask2 = 0;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
- bool fatal_int = false;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!AR_SREV_9100(ah)) {
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
- == AR_RTC_STATUS_ON) {
- isr = REG_READ(ah, AR_ISR);
- }
- }
-
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
- AR_INTR_SYNC_DEFAULT;
-
- *masked = 0;
-
- if (!isr && !sync_cause)
- return false;
- } else {
- *masked = 0;
- isr = REG_READ(ah, AR_ISR);
- }
-
- if (isr) {
- if (isr & AR_ISR_BCNMISC) {
- u32 isr2;
- isr2 = REG_READ(ah, AR_ISR_S2);
- if (isr2 & AR_ISR_S2_TIM)
- mask2 |= ATH9K_INT_TIM;
- if (isr2 & AR_ISR_S2_DTIM)
- mask2 |= ATH9K_INT_DTIM;
- if (isr2 & AR_ISR_S2_DTIMSYNC)
- mask2 |= ATH9K_INT_DTIMSYNC;
- if (isr2 & (AR_ISR_S2_CABEND))
- mask2 |= ATH9K_INT_CABEND;
- if (isr2 & AR_ISR_S2_GTT)
- mask2 |= ATH9K_INT_GTT;
- if (isr2 & AR_ISR_S2_CST)
- mask2 |= ATH9K_INT_CST;
- if (isr2 & AR_ISR_S2_TSFOOR)
- mask2 |= ATH9K_INT_TSFOOR;
- }
-
- isr = REG_READ(ah, AR_ISR_RAC);
- if (isr == 0xffffffff) {
- *masked = 0;
- return false;
- }
-
- *masked = isr & ATH9K_INT_COMMON;
-
- if (ah->config.rx_intr_mitigation) {
- if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
- *masked |= ATH9K_INT_RX;
- }
-
- if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
- *masked |= ATH9K_INT_RX;
- if (isr &
- (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
- AR_ISR_TXEOL)) {
- u32 s0_s, s1_s;
-
- *masked |= ATH9K_INT_TX;
-
- s0_s = REG_READ(ah, AR_ISR_S0_S);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
- ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
- s1_s = REG_READ(ah, AR_ISR_S1_S);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
- ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
- }
-
- if (isr & AR_ISR_RXORN) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "receive FIFO overrun interrupt\n");
- }
-
- if (!AR_SREV_9100(ah)) {
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
- if (isr5 & AR_ISR_S5_TIM_TIMER)
- *masked |= ATH9K_INT_TIM_TIMER;
- }
- }
-
- *masked |= mask2;
- }
-
- if (AR_SREV_9100(ah))
- return true;
-
- if (isr & AR_ISR_GENTMR) {
- u32 s5_s;
-
- s5_s = REG_READ(ah, AR_ISR_S5_S);
- if (isr & AR_ISR_GENTMR) {
- ah->intr_gen_timer_trigger =
- MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
-
- ah->intr_gen_timer_thresh =
- MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
-
- if (ah->intr_gen_timer_trigger)
- *masked |= ATH9K_INT_GENTIMER;
-
- }
- }
-
- if (sync_cause) {
- fatal_int =
- (sync_cause &
- (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
- ? true : false;
-
- if (fatal_int) {
- if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI FATAL interrupt\n");
- }
- if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI PERR interrupt\n");
- }
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
- REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
- REG_WRITE(ah, AR_RC, 0);
- *masked |= ATH9K_INT_FATAL;
- }
- if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
- }
-
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
- }
-
- return true;
-}
-EXPORT_SYMBOL(ath9k_hw_getisr);
-
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
-{
- u32 omask = ah->mask_reg;
- u32 mask, mask2;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
-
- if (omask & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
- (void) REG_READ(ah, AR_IER);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
- }
- }
-
- mask = ints & ATH9K_INT_COMMON;
- mask2 = 0;
-
- if (ints & ATH9K_INT_TX) {
- if (ah->txok_interrupt_mask)
- mask |= AR_IMR_TXOK;
- if (ah->txdesc_interrupt_mask)
- mask |= AR_IMR_TXDESC;
- if (ah->txerr_interrupt_mask)
- mask |= AR_IMR_TXERR;
- if (ah->txeol_interrupt_mask)
- mask |= AR_IMR_TXEOL;
- }
- if (ints & ATH9K_INT_RX) {
- mask |= AR_IMR_RXERR;
- if (ah->config.rx_intr_mitigation)
- mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
- else
- mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- mask |= AR_IMR_GENTMR;
- }
-
- if (ints & (ATH9K_INT_BMISC)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_TIM)
- mask2 |= AR_IMR_S2_TIM;
- if (ints & ATH9K_INT_DTIM)
- mask2 |= AR_IMR_S2_DTIM;
- if (ints & ATH9K_INT_DTIMSYNC)
- mask2 |= AR_IMR_S2_DTIMSYNC;
- if (ints & ATH9K_INT_CABEND)
- mask2 |= AR_IMR_S2_CABEND;
- if (ints & ATH9K_INT_TSFOOR)
- mask2 |= AR_IMR_S2_TSFOOR;
- }
-
- if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
- mask |= AR_IMR_BCNMISC;
- if (ints & ATH9K_INT_GTT)
- mask2 |= AR_IMR_S2_GTT;
- if (ints & ATH9K_INT_CST)
- mask2 |= AR_IMR_S2_CST;
- }
-
- ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
- REG_WRITE(ah, AR_IMR, mask);
- mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
- AR_IMR_S2_DTIM |
- AR_IMR_S2_DTIMSYNC |
- AR_IMR_S2_CABEND |
- AR_IMR_S2_CABTO |
- AR_IMR_S2_TSFOOR |
- AR_IMR_S2_GTT | AR_IMR_S2_CST);
- REG_WRITE(ah, AR_IMR_S2, mask | mask2);
- ah->mask_reg = ints;
-
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- if (ints & ATH9K_INT_TIM_TIMER)
- REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- else
- REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
- }
-
- if (ints & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
- AR_INTR_SYNC_DEFAULT);
- REG_WRITE(ah, AR_INTR_SYNC_MASK,
- AR_INTR_SYNC_DEFAULT);
- }
- ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
- REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
- }
-
- return omask;
-}
-EXPORT_SYMBOL(ath9k_hw_set_interrupts);
-
/*******************/
/* Beacon Handling */
/*******************/
@@ -2970,6 +1872,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
ah->beacon_interval = beacon_period;
+ ENABLE_REGWRITE_BUFFER(ah);
+
switch (ah->opmode) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_MONITOR:
@@ -3013,6 +1917,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
beacon_period &= ~ATH9K_BEACON_ENA;
if (beacon_period & ATH9K_BEACON_RESET_TSF) {
ath9k_hw_reset_tsf(ah);
@@ -3029,6 +1936,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
REG_WRITE(ah, AR_BEACON_PERIOD,
@@ -3036,6 +1945,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_RMW_FIELD(ah, AR_RSSI_THR,
AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
@@ -3058,6 +1970,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_NEXT_DTIM,
TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
@@ -3077,6 +1991,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
REG_SET_BIT(ah, AR_TIMER_MODE,
AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
AR_DTIM_TIMER_EN);
@@ -3219,7 +2136,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
- if (AR_SREV_9285_10_OR_LATER(ah))
+ if (AR_SREV_9271(ah))
+ pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ else if (AR_SREV_9285_10_OR_LATER(ah))
pCap->num_gpio_pins = AR9285_NUM_GPIO;
else if (AR_SREV_9280_10_OR_LATER(ah))
pCap->num_gpio_pins = AR928X_NUM_GPIO;
@@ -3246,8 +2165,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
}
#endif
-
- pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
+ if (AR_SREV_9271(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+ else
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
@@ -3291,6 +2212,26 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
}
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC |
+ ATH9K_HW_CAP_FASTCLOCK;
+ pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
+ pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
+ pCap->rx_status_len = sizeof(struct ar9003_rxs);
+ pCap->tx_desc_len = sizeof(struct ar9003_txc);
+ pCap->txs_len = sizeof(struct ar9003_txs);
+ } else {
+ pCap->tx_desc_len = sizeof(struct ath_desc);
+ if (AR_SREV_9280_20(ah) &&
+ ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <=
+ AR5416_EEP_MINOR_VER_16) ||
+ ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G)))
+ pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
+ }
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
+
return 0;
}
@@ -3323,10 +2264,6 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
case ATH9K_CAP_TKIP_SPLIT:
return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
false : true;
- case ATH9K_CAP_DIVERSITY:
- return (REG_READ(ah, AR_PHY_CCK_DETECT) &
- AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
- true : false;
case ATH9K_CAP_MCAST_KEYSRCH:
switch (capability) {
case 0:
@@ -3369,8 +2306,6 @@ EXPORT_SYMBOL(ath9k_hw_getcapability);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status)
{
- u32 v;
-
switch (type) {
case ATH9K_CAP_TKIP_MIC:
if (setting)
@@ -3380,14 +2315,6 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
ah->sta_id1_defaults &=
~AR_STA_ID1_CRPT_MIC_ENABLE;
return true;
- case ATH9K_CAP_DIVERSITY:
- v = REG_READ(ah, AR_PHY_CCK_DETECT);
- if (setting)
- v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- else
- v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
- REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
- return true;
case ATH9K_CAP_MCAST_KEYSRCH:
if (setting)
ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
@@ -3455,7 +2382,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
if (gpio >= ah->caps.num_gpio_pins)
return 0xffffffff;
- if (AR_SREV_9287_10_OR_LATER(ah))
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return MS_REG_READ(AR9300, gpio) != 0;
+ else if (AR_SREV_9271(ah))
+ return MS_REG_READ(AR9271, gpio) != 0;
+ else if (AR_SREV_9287_10_OR_LATER(ah))
return MS_REG_READ(AR9287, gpio) != 0;
else if (AR_SREV_9285_10_OR_LATER(ah))
return MS_REG_READ(AR9285, gpio) != 0;
@@ -3484,6 +2415,9 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
+ if (AR_SREV_9271(ah))
+ val = ~val;
+
REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
AR_GPIO_BIT(gpio));
}
@@ -3523,6 +2457,8 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
{
u32 phybits;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_RX_FILTER, bits);
phybits = 0;
@@ -3538,6 +2474,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
else
REG_WRITE(ah, AR_RXCFG,
REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
EXPORT_SYMBOL(ath9k_hw_setrxfilter);
@@ -3610,14 +2549,25 @@ void ath9k_hw_write_associd(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_write_associd);
+#define ATH9K_MAX_TSF_READ 10
+
u64 ath9k_hw_gettsf64(struct ath_hw *ah)
{
- u64 tsf;
+ u32 tsf_lower, tsf_upper1, tsf_upper2;
+ int i;
+
+ tsf_upper1 = REG_READ(ah, AR_TSF_U32);
+ for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
+ tsf_lower = REG_READ(ah, AR_TSF_L32);
+ tsf_upper2 = REG_READ(ah, AR_TSF_U32);
+ if (tsf_upper2 == tsf_upper1)
+ break;
+ tsf_upper1 = tsf_upper2;
+ }
- tsf = REG_READ(ah, AR_TSF_U32);
- tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
+ WARN_ON( i == ATH9K_MAX_TSF_READ );
- return tsf;
+ return (((u64)tsf_upper1 << 32) | tsf_lower);
}
EXPORT_SYMBOL(ath9k_hw_gettsf64);
@@ -3868,6 +2818,16 @@ void ath_gen_timer_isr(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath_gen_timer_isr);
+/********/
+/* HTC */
+/********/
+
+void ath9k_hw_htc_resetinit(struct ath_hw *ah)
+{
+ ah->htc_reset_init = true;
+}
+EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
+
static struct {
u32 version;
const char * name;
@@ -3882,6 +2842,7 @@ static struct {
{ AR_SREV_VERSION_9285, "9285" },
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
+ { AR_SREV_VERSION_9300, "9300" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbbf7ca5f97d..77245dff5993 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -41,18 +41,16 @@
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
#define AR2427_DEVID_PCIE 0x002c
+#define AR9287_DEVID_PCI 0x002d
+#define AR9287_DEVID_PCIE 0x002e
+#define AR9300_DEVID_PCIE 0x0030
#define AR5416_AR9100_DEVID 0x000b
-#define AR9271_USB 0x9271
-
#define AR_SUBVENDOR_ID_NOG 0x0e11
#define AR_SUBVENDOR_ID_NEW_A 0x7065
#define AR5416_MAGIC 0x19641014
-#define AR5416_DEVID_AR9287_PCI 0x002D
-#define AR5416_DEVID_AR9287_PCIE 0x002E
-
#define AR9280_COEX2WIRE_SUBSYSID 0x309b
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
@@ -70,6 +68,24 @@
#define REG_READ(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+#define ENABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \
+ } while (0)
+
+#define DISABLE_REGWRITE_BUFFER(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->disable_write_buffer((_ah)); \
+ } while (0)
+
+#define REGWRITE_BUFFER_FLUSH(_ah) \
+ do { \
+ if (AR_SREV_9271(_ah)) \
+ ath9k_hw_common(_ah)->ops->write_flush((_ah)); \
+ } while (0)
+
#define SM(_v, _f) (((_v) << _f##_S) & _f)
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
#define REG_RMW(_a, _r, _set, _clr) \
@@ -77,6 +93,8 @@
#define REG_RMW_FIELD(_a, _r, _f, _v) \
REG_WRITE(_a, _r, \
(REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
+#define REG_READ_FIELD(_a, _r, _f) \
+ (((REG_READ(_a, _r) & _f) >> _f##_S))
#define REG_SET_BIT(_a, _r, _f) \
REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
#define REG_CLR_BIT(_a, _r, _f) \
@@ -137,6 +155,16 @@
#define TU_TO_USEC(_tu) ((_tu) << 10)
+#define ATH9K_HW_RX_HP_QDEPTH 16
+#define ATH9K_HW_RX_LP_QDEPTH 128
+
+enum ath_ini_subsys {
+ ATH_INI_PRE = 0,
+ ATH_INI_CORE,
+ ATH_INI_POST,
+ ATH_INI_NUM_SPLIT,
+};
+
enum wireless_mode {
ATH9K_MODE_11A = 0,
ATH9K_MODE_11G,
@@ -167,13 +195,16 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
+ ATH9K_HW_CAP_EDMA = BIT(17),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
+ ATH9K_HW_CAP_LDPC = BIT(19),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(20),
};
enum ath9k_capability_type {
ATH9K_CAP_CIPHER = 0,
ATH9K_CAP_TKIP_MIC,
ATH9K_CAP_TKIP_SPLIT,
- ATH9K_CAP_DIVERSITY,
ATH9K_CAP_TXPOW,
ATH9K_CAP_MCAST_KEYSRCH,
ATH9K_CAP_DS
@@ -194,6 +225,11 @@ struct ath9k_hw_capabilities {
u8 num_gpio_pins;
u8 num_antcfg_2ghz;
u8 num_antcfg_5ghz;
+ u8 rx_hp_qdepth;
+ u8 rx_lp_qdepth;
+ u8 rx_status_len;
+ u8 tx_desc_len;
+ u8 txs_len;
};
struct ath9k_ops_config {
@@ -214,6 +250,7 @@ struct ath9k_ops_config {
u32 enable_ani;
int serialize_regmode;
bool rx_intr_mitigation;
+ bool tx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -225,6 +262,7 @@ struct ath9k_ops_config {
#define AR_BASE_FREQ_5GHZ 4900
#define AR_SPUR_FEEQ_BOUND_HT40 19
#define AR_SPUR_FEEQ_BOUND_HT20 10
+ bool tx_iq_calibration; /* Only available for >= AR9003 */
int spurmode;
u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
u8 max_txtrig_level;
@@ -233,6 +271,8 @@ struct ath9k_ops_config {
enum ath9k_int {
ATH9K_INT_RX = 0x00000001,
ATH9K_INT_RXDESC = 0x00000002,
+ ATH9K_INT_RXHP = 0x00000001,
+ ATH9K_INT_RXLP = 0x00000002,
ATH9K_INT_RXNOFRM = 0x00000008,
ATH9K_INT_RXEOL = 0x00000010,
ATH9K_INT_RXORN = 0x00000020,
@@ -329,10 +369,9 @@ struct ath9k_channel {
#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
-#define IS_CHAN_A_5MHZ_SPACED(_c) \
+#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
- (((_c)->channel % 20) != 0) && \
- (((_c)->channel % 10) != 0))
+ ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
/* These macros check chanmode and not channelFlags */
#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
@@ -365,6 +404,12 @@ enum ser_reg_mode {
SER_REG_MODE_AUTO = 2,
};
+enum ath9k_rx_qtype {
+ ATH9K_RX_QUEUE_HP,
+ ATH9K_RX_QUEUE_LP,
+ ATH9K_RX_QUEUE_MAX,
+};
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -442,6 +487,124 @@ struct ath_gen_timer_table {
} timer_mask;
};
+/**
+ * struct ath_hw_private_ops - callbacks used internally by hardware code
+ *
+ * This structure contains private callbacks designed to only be used internally
+ * by the hardware core.
+ *
+ * @init_cal_settings: setup types of calibrations supported
+ * @init_cal: starts actual calibration
+ *
+ * @init_mode_regs: Initializes mode registers
+ * @init_mode_gain_regs: Initialize TX/RX gain registers
+ * @macversion_supported: If this specific mac revision is supported
+ *
+ * @rf_set_freq: change frequency
+ * @spur_mitigate_freq: spur mitigation
+ * @rf_alloc_ext_banks:
+ * @rf_free_ext_banks:
+ * @set_rf_regs:
+ * @compute_pll_control: compute the PLL control value to use for
+ * AR_RTC_PLL_CONTROL for a given channel
+ * @setup_calibration: set up calibration
+ * @iscal_supported: used to query if a type of calibration is supported
+ * @loadnf: load noise floor read from each chain on the CCA registers
+ */
+struct ath_hw_private_ops {
+ /* Calibration ops */
+ void (*init_cal_settings)(struct ath_hw *ah);
+ bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
+
+ void (*init_mode_regs)(struct ath_hw *ah);
+ void (*init_mode_gain_regs)(struct ath_hw *ah);
+ bool (*macversion_supported)(u32 macversion);
+ void (*setup_calibration)(struct ath_hw *ah,
+ struct ath9k_cal_list *currCal);
+ bool (*iscal_supported)(struct ath_hw *ah,
+ enum ath9k_cal_types calType);
+
+ /* PHY ops */
+ int (*rf_set_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ void (*spur_mitigate_freq)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*rf_alloc_ext_banks)(struct ath_hw *ah);
+ void (*rf_free_ext_banks)(struct ath_hw *ah);
+ bool (*set_rf_regs)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u16 modesIndex);
+ void (*set_channel_regs)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*init_bb)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ int (*process_ini)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*olc_init)(struct ath_hw *ah);
+ void (*set_rfmode)(struct ath_hw *ah, struct ath9k_channel *chan);
+ void (*mark_phy_inactive)(struct ath_hw *ah);
+ void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
+ bool (*rfbus_req)(struct ath_hw *ah);
+ void (*rfbus_done)(struct ath_hw *ah);
+ void (*enable_rfkill)(struct ath_hw *ah);
+ void (*restore_chainmask)(struct ath_hw *ah);
+ void (*set_diversity)(struct ath_hw *ah, bool value);
+ u32 (*compute_pll_control)(struct ath_hw *ah,
+ struct ath9k_channel *chan);
+ bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
+ int param);
+ void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
+ void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
+};
+
+/**
+ * struct ath_hw_ops - callbacks used by hardware code and driver code
+ *
+ * This structure contains callbacks designed to to be used internally by
+ * hardware code and also by the lower level driver.
+ *
+ * @config_pci_powersave:
+ * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ */
+struct ath_hw_ops {
+ void (*config_pci_powersave)(struct ath_hw *ah,
+ int restore,
+ int power_off);
+ void (*rx_enable)(struct ath_hw *ah);
+ void (*set_desc_link)(void *ds, u32 link);
+ void (*get_desc_link)(void *ds, u32 **link);
+ bool (*calibrate)(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ u8 rxchainmask,
+ bool longcal);
+ bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
+ void (*fill_txdesc)(struct ath_hw *ah, void *ds, u32 seglen,
+ bool is_firstseg, bool is_is_lastseg,
+ const void *ds0, dma_addr_t buf_addr,
+ unsigned int qcu);
+ int (*proc_txdesc)(struct ath_hw *ah, void *ds,
+ struct ath_tx_status *ts);
+ void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
+ u32 pktLen, enum ath9k_pkt_type type,
+ u32 txPower, u32 keyIx,
+ enum ath9k_key_type keyType,
+ u32 flags);
+ void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
+ void *lastds,
+ u32 durUpdateEn, u32 rtsctsRate,
+ u32 rtsctsDuration,
+ struct ath9k_11n_rate_series series[],
+ u32 nseries, u32 flags);
+ void (*set11n_aggr_first)(struct ath_hw *ah, void *ds,
+ u32 aggrLen);
+ void (*set11n_aggr_middle)(struct ath_hw *ah, void *ds,
+ u32 numDelims);
+ void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
+ void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
+ void (*set11n_burstduration)(struct ath_hw *ah, void *ds,
+ u32 burstDuration);
+ void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
+ u32 vmf);
+};
+
struct ath_hw {
struct ieee80211_hw *hw;
struct ath_common common;
@@ -455,13 +618,18 @@ struct ath_hw {
struct ar5416_eeprom_def def;
struct ar5416_eeprom_4k map4k;
struct ar9287_eeprom map9287;
+ struct ar9300_eeprom ar9300_eep;
} eeprom;
const struct eeprom_ops *eep_ops;
- enum ath9k_eep_map eep_map;
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool need_an_top2_fixup;
u16 tx_trig_level;
+ s16 nf_2g_max;
+ s16 nf_2g_min;
+ s16 nf_5g_max;
+ s16 nf_5g_min;
u16 rfsilent;
u32 rfkill_gpio;
u32 rfkill_polarity;
@@ -478,7 +646,8 @@ struct ath_hw {
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
int16_t curchan_rad_index;
- u32 mask_reg;
+ enum ath9k_int imask;
+ u32 imrs2_reg;
u32 txok_interrupt_mask;
u32 txerr_interrupt_mask;
u32 txdesc_interrupt_mask;
@@ -493,6 +662,7 @@ struct ath_hw {
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_calinitdata;
struct ath9k_cal_list adcdc_caldata;
+ struct ath9k_cal_list tempCompCalData;
struct ath9k_cal_list *cal_list;
struct ath9k_cal_list *cal_list_last;
struct ath9k_cal_list *cal_list_curr;
@@ -533,12 +703,10 @@ struct ath_hw {
DONT_USE_32KHZ,
} enable_32kHz_clock;
- /* Callback for radio frequency change */
- int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan);
-
- /* Callback for baseband spur frequency */
- void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
- struct ath9k_channel *chan);
+ /* Private to hardware code */
+ struct ath_hw_private_ops private_ops;
+ /* Accessed by the lower level driver */
+ struct ath_hw_ops ops;
/* Used to program the radio on non single-chip devices */
u32 *analogBank0Data;
@@ -551,6 +719,7 @@ struct ath_hw {
u32 *addac5416_21;
u32 *bank6Temp;
+ u8 txpower_limit;
int16_t txpower_indexoffset;
int coverage_class;
u32 beacon_interval;
@@ -592,16 +761,34 @@ struct ath_hw {
struct ar5416IniArray iniBank7;
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
+ struct ar5416IniArray iniPcieSerdesLowPower;
struct ar5416IniArray iniModesAdditional;
struct ar5416IniArray iniModesRxGain;
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniModes_9271_1_0_only;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
+ struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
+ struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
+ struct ar5416IniArray iniModes_9271_ANI_reg;
+ struct ar5416IniArray iniModes_high_power_tx_gain_9271;
+ struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
+
+ struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniRadio[ATH_INI_NUM_SPLIT];
+ struct ar5416IniArray iniSOC[ATH_INI_NUM_SPLIT];
u32 intr_gen_timer_trigger;
u32 intr_gen_timer_thresh;
struct ath_gen_timer_table hw_gen_timers;
+
+ struct ar9003_txs *ts_ring;
+ void *ts_start;
+ u32 ts_paddr_start;
+ u32 ts_paddr_end;
+ u16 ts_tail;
+ u8 ts_size;
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -614,6 +801,16 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
return &(ath9k_hw_common(ah)->regulatory);
}
+static inline struct ath_hw_private_ops *ath9k_hw_private_ops(struct ath_hw *ah)
+{
+ return &ah->private_ops;
+}
+
+static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
+{
+ return &ah->ops;
+}
+
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
void ath9k_hw_deinit(struct ath_hw *ah);
@@ -625,6 +822,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 *result);
bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
u32 capability, u32 setting, int *status);
+u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* Key Cache Management */
bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
@@ -673,16 +871,10 @@ void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
+bool ath9k_hw_check_alive(struct ath_hw *ah);
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
-
-/* Interrupt Handling */
-bool ath9k_hw_intrpend(struct ath_hw *ah);
-bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked);
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
-
/* Generic hw timer primitives */
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -701,6 +893,39 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
+/* HTC */
+void ath9k_hw_htc_resetinit(struct ath_hw *ah);
+
+/* PHY */
+void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
+ u32 *coef_mantissa, u32 *coef_exponent);
+
+/*
+ * Code Specific to AR5008, AR9001 or AR9002,
+ * we stuff these here to avoid callbacks for AR9003.
+ */
+void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
+int ar9002_hw_rf_claim(struct ath_hw *ah);
+void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
+void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
+
+/*
+ * Code specifric to AR9003, we stuff these here to avoid callbacks
+ * for older families
+ */
+void ar9003_hw_set_nf_limits(struct ath_hw *ah);
+
+/* Hardware family op attach helpers */
+void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
+void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
+void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
+
+void ar9002_hw_attach_ops(struct ath_hw *ah);
+void ar9003_hw_attach_ops(struct ath_hw *ah);
+
#define ATH_PCIE_CAP_LINK_CTRL 0x70
#define ATH_PCIE_CAP_LINK_L0S 1
#define ATH_PCIE_CAP_LINK_L1 2
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3d4d897add6d..d457cb3bd772 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -175,6 +175,18 @@ static const struct ath_ops ath9k_common_ops = {
.write = ath9k_iowrite32,
};
+static int count_streams(unsigned int chainmask, int max)
+{
+ int streams = 0;
+
+ do {
+ if (++streams == max)
+ break;
+ } while ((chainmask = chainmask & (chainmask - 1)));
+
+ return streams;
+}
+
/**************************/
/* Initialization */
/**************************/
@@ -182,8 +194,10 @@ static const struct ath_ops ath9k_common_ops = {
static void setup_ht_cap(struct ath_softc *sc,
struct ieee80211_sta_ht_cap *ht_info)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u8 tx_streams, rx_streams;
+ int i, max_streams;
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -191,28 +205,40 @@ static void setup_ht_cap(struct ath_softc *sc,
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_DSSSCCK40;
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ max_streams = 3;
+ else
+ max_streams = 2;
+
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ if (max_streams >= 2)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ }
+
/* set up supported mcs set */
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
+ tx_streams = count_streams(common->tx_chainmask, max_streams);
+ rx_streams = count_streams(common->rx_chainmask, max_streams);
+
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
ht_info->mcs.tx_params |= ((tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
}
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
+ for (i = 0; i < rx_streams; i++)
+ ht_info->mcs.rx_mask[i] = 0xff;
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
}
@@ -235,31 +261,37 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
- int nbuf, int ndesc)
+ int nbuf, int ndesc, bool is_tx)
{
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
+ u8 *ds;
struct ath_buf *bf;
- int i, bsize, error;
+ int i, bsize, error, desc_len;
ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
+
+ if (is_tx)
+ desc_len = sc->sc_ah->caps.tx_desc_len;
+ else
+ desc_len = sizeof(struct ath_desc);
+
/* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
+ if ((desc_len % 4) != 0) {
ath_print(common, ATH_DBG_FATAL,
"ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ BUG_ON((desc_len % 4) != 0);
error = -ENOMEM;
goto fail;
}
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+ dd->dd_desc_len = desc_len * nbuf * ndesc;
/*
* Need additional DMA memory because we can't use
@@ -272,11 +304,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
u32 dma_len;
while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dma_len = ndesc_skipped * desc_len;
dd->dd_desc_len += dma_len;
ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
+ }
}
/* allocate descriptors */
@@ -286,7 +318,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
error = -ENOMEM;
goto fail;
}
- ds = dd->dd_desc;
+ ds = (u8 *) dd->dd_desc;
ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -300,7 +332,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
}
dd->dd_bufptr = bf;
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
@@ -316,7 +348,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
((caddr_t) dd->dd_desc +
dd->dd_desc_len));
- ds += ndesc;
+ ds += (desc_len * ndesc);
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(dd, ds);
}
@@ -514,7 +546,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ ath9k_hw_set_diversity(sc->sc_ah, true);
sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
@@ -568,13 +600,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
+ /* Initializes the hardware for all supported chipsets */
ret = ath9k_hw_init(ah);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", ret);
+ if (ret)
goto err_hw;
- }
ret = ath9k_init_debug(ah);
if (ret) {
@@ -760,6 +789,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
tasklet_kill(&sc->intr_tq);
tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(sc->sc_ah);
+ sc->sc_ah = NULL;
}
void ath9k_deinit_device(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index efc420cd42bf..0e425cb4bbb1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -25,14 +25,21 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
ah->txurn_interrupt_mask);
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_IMR_S0,
SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
| SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
REG_WRITE(ah, AR_IMR_S1,
SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
| SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
- REG_RMW_FIELD(ah, AR_IMR_S2,
- AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
+
+ ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
+ ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
}
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -55,6 +62,18 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_txstart);
+void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
+{
+ struct ar5416_desc *ads = AR5416DESC(ds);
+
+ ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
+ ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
+ ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
+ ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
+ ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
+}
+EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
+
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
u32 npend;
@@ -103,7 +122,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
if (ah->tx_trig_level >= ah->config.max_txtrig_level)
return false;
- omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
+ omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
txcfg = REG_READ(ah, AR_TXCFG);
curLevel = MS(txcfg, AR_FTRIG);
@@ -205,280 +224,6 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
}
EXPORT_SYMBOL(ath9k_hw_stoptxdma);
-void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 segLen, bool firstSeg,
- bool lastSeg, const struct ath_desc *ds0)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (firstSeg) {
- ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
- } else if (lastSeg) {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen;
- ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
- ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
- } else {
- ads->ds_ctl0 = 0;
- ads->ds_ctl1 = segLen | AR_TxMore;
- ads->ds_ctl2 = 0;
- ads->ds_ctl3 = 0;
- }
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_filltxdesc);
-
-void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
- ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
- ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
- ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
- ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
-}
-EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
-
-int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if ((ads->ds_txstatus9 & AR_TxDone) == 0)
- return -EINPROGRESS;
-
- ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
- ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
- ds->ds_txstat.ts_status = 0;
- ds->ds_txstat.ts_flags = 0;
-
- if (ads->ds_txstatus1 & AR_FrmXmitOK)
- ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
- if (ads->ds_txstatus1 & AR_ExcessiveRetries)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
- if (ads->ds_txstatus1 & AR_Filtered)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
- if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
- ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus9 & AR_TxOpExceeded)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
- if (ads->ds_txstatus1 & AR_TxTimerExpired)
- ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
- if (ads->ds_txstatus1 & AR_DescCfgErr)
- ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
- if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
- ath9k_hw_updatetxtriglevel(ah, true);
- }
- if (ads->ds_txstatus0 & AR_TxBaStatus) {
- ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
- ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
- ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
- }
-
- ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
- switch (ds->ds_txstat.ts_rateindex) {
- case 0:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
- break;
- case 1:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
- break;
- case 2:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
- break;
- case 3:
- ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
- break;
- }
-
- ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
- ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
- ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
- ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
- ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
- ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
- ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
- ds->ds_txstat.evm0 = ads->AR_TxEVM0;
- ds->ds_txstat.evm1 = ads->AR_TxEVM1;
- ds->ds_txstat.evm2 = ads->AR_TxEVM2;
- ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
- ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
- ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
- ds->ds_txstat.ts_antenna = 0;
-
- return 0;
-}
-EXPORT_SYMBOL(ath9k_hw_txprocdesc);
-
-void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
- u32 keyIx, enum ath9k_key_type keyType, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- txPower += ah->txpower_indexoffset;
- if (txPower > 63)
- txPower = 63;
-
- ads->ds_ctl0 = (pktLen & AR_FrameLen)
- | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
- | SM(txPower, AR_XmitPower)
- | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
- | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
- | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
- | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
-
- ads->ds_ctl1 =
- (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
- | SM(type, AR_FrameType)
- | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
- | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
- | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
-
- ads->ds_ctl6 = SM(keyType, AR_EncrType);
-
- if (AR_SREV_9285(ah)) {
- ads->ds_ctl8 = 0;
- ads->ds_ctl9 = 0;
- ads->ds_ctl10 = 0;
- ads->ds_ctl11 = 0;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
-
-void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
- struct ath_desc *lastds,
- u32 durUpdateEn, u32 rtsctsRate,
- u32 rtsctsDuration,
- struct ath9k_11n_rate_series series[],
- u32 nseries, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ar5416_desc *last_ads = AR5416DESC(lastds);
- u32 ds_ctl0;
-
- if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
- ds_ctl0 = ads->ds_ctl0;
-
- if (flags & ATH9K_TXDESC_RTSENA) {
- ds_ctl0 &= ~AR_CTSEnable;
- ds_ctl0 |= AR_RTSEnable;
- } else {
- ds_ctl0 &= ~AR_RTSEnable;
- ds_ctl0 |= AR_CTSEnable;
- }
-
- ads->ds_ctl0 = ds_ctl0;
- } else {
- ads->ds_ctl0 =
- (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
- }
-
- ads->ds_ctl2 = set11nTries(series, 0)
- | set11nTries(series, 1)
- | set11nTries(series, 2)
- | set11nTries(series, 3)
- | (durUpdateEn ? AR_DurUpdateEna : 0)
- | SM(0, AR_BurstDur);
-
- ads->ds_ctl3 = set11nRate(series, 0)
- | set11nRate(series, 1)
- | set11nRate(series, 2)
- | set11nRate(series, 3);
-
- ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
- | set11nPktDurRTSCTS(series, 1);
-
- ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
- | set11nPktDurRTSCTS(series, 3);
-
- ads->ds_ctl7 = set11nRateFlags(series, 0)
- | set11nRateFlags(series, 1)
- | set11nRateFlags(series, 2)
- | set11nRateFlags(series, 3)
- | SM(rtsctsRate, AR_RTSCTSRate);
- last_ads->ds_ctl2 = ads->ds_ctl2;
- last_ads->ds_ctl3 = ads->ds_ctl3;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
-
-void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
- u32 aggrLen)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
- ads->ds_ctl6 &= ~AR_AggrLen;
- ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
-
-void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
- u32 numDelims)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- unsigned int ctl6;
-
- ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
-
- ctl6 = ads->ds_ctl6;
- ctl6 &= ~AR_PadDelim;
- ctl6 |= SM(numDelims, AR_PadDelim);
- ads->ds_ctl6 = ctl6;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
-
-void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 |= AR_IsAggr;
- ads->ds_ctl1 &= ~AR_MoreAggr;
- ads->ds_ctl6 &= ~AR_PadDelim;
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
-
-void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
-}
-EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
-
-void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
- u32 burstDuration)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- ads->ds_ctl2 &= ~AR_BurstDur;
- ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
-}
-EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
-
-void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
- u32 vmf)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
-
- if (vmf)
- ads->ds_ctl0 |= AR_VirtMoreFrag;
- else
- ads->ds_ctl0 &= ~AR_VirtMoreFrag;
-}
-
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
{
*txqs &= ah->intr_txqs;
@@ -730,6 +475,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
} else
cwMin = qi->tqi_cwmin;
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_DLCL_IFS(q),
SM(cwMin, AR_D_LCL_IFS_CWMIN) |
SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
@@ -744,6 +491,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q),
AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
+ REGWRITE_BUFFER_FLUSH(ah);
+
if (qi->tqi_cbrPeriod) {
REG_WRITE(ah, AR_QCBRCFG(q),
SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
@@ -759,6 +508,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_Q_RDYTIMECFG_EN);
}
+ REGWRITE_BUFFER_FLUSH(ah);
+
REG_WRITE(ah, AR_DCHNTIME(q),
SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
(qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
@@ -776,6 +527,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_READ(ah, AR_DMISC(q)) |
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
REG_WRITE(ah, AR_DMISC(q),
REG_READ(ah, AR_DMISC(q)) |
@@ -783,6 +538,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
}
switch (qi->tqi_type) {
case ATH9K_TX_QUEUE_BEACON:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_BEACON_USE
@@ -793,8 +550,20 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
| AR_D_MISC_BEACON_USE
| AR_D_MISC_POST_FR_BKOFF_DIS);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
+ /* cwmin and cwmax should be 0 for beacon queue */
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
+ | SM(0, AR_D_LCL_IFS_CWMAX)
+ | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
+ }
break;
case ATH9K_TX_QUEUE_CAB:
+ ENABLE_REGWRITE_BUFFER(ah);
+
REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
| AR_Q_MISC_FSP_DBA_GATED
| AR_Q_MISC_CBR_INCR_DIS1
@@ -808,6 +577,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
| (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
+
+ REGWRITE_BUFFER_FLUSH(ah);
+ DISABLE_REGWRITE_BUFFER(ah);
+
break;
case ATH9K_TX_QUEUE_PSPOLL:
REG_WRITE(ah, AR_QMISC(q),
@@ -829,6 +602,9 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
AR_D_MISC_POST_FR_BKOFF_DIS);
}
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
+
if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
ah->txok_interrupt_mask |= 1 << q;
else
@@ -856,7 +632,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 pa, struct ath_desc *nds, u64 tsf)
+ struct ath_rx_status *rs, u64 tsf)
{
struct ar5416_desc ads;
struct ar5416_desc *adsp = AR5416DESC(ds);
@@ -867,92 +643,76 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
ads.u.rx = adsp->u.rx;
- ds->ds_rxstat.rs_status = 0;
- ds->ds_rxstat.rs_flags = 0;
+ rs->rs_status = 0;
+ rs->rs_flags = 0;
- ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
- ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
+ rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
+ rs->rs_tstamp = ads.AR_RcvTimestamp;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
- ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
- ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
+ rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
} else {
- ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
- ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
+ rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt00);
- ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt01);
- ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
+ rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
AR_RxRSSIAnt02);
- ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt10);
- ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt11);
- ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
+ rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
AR_RxRSSIAnt12);
}
if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
- ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
+ rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
else
- ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
+ rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
- ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
- ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+ rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
+ rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
- ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
- ds->ds_rxstat.rs_moreaggr =
+ rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
+ rs->rs_moreaggr =
(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
- ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
- ds->ds_rxstat.rs_flags =
+ rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
+ rs->rs_flags =
(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
- ds->ds_rxstat.rs_flags |=
+ rs->rs_flags |=
(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
+ rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
- ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
+ rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
if (ads.ds_rxstatus8 & AR_CRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
+ rs->rs_status |= ATH9K_RXERR_CRC;
else if (ads.ds_rxstatus8 & AR_PHYErr) {
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
+ rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
- ds->ds_rxstat.rs_phyerr = phyerr;
+ rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
+ rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
- ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
+ rs->rs_status |= ATH9K_RXERR_MIC;
}
return 0;
}
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
-void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
- u32 size, u32 flags)
-{
- struct ar5416_desc *ads = AR5416DESC(ds);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
-
- ads->ds_ctl1 = size & AR_BufLen;
- if (flags & ATH9K_RXDESC_INTREQ)
- ads->ds_ctl1 |= AR_RxIntrReq;
-
- ads->ds_rxstatus8 &= ~AR_RxDone;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
- memset(&(ads->u), 0, sizeof(ads->u));
-}
-EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
-
/*
* This can stop or re-enables RX.
*
@@ -996,12 +756,6 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
}
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
-void ath9k_hw_rxena(struct ath_hw *ah)
-{
- REG_WRITE(ah, AR_CR, AR_CR_RXE);
-}
-EXPORT_SYMBOL(ath9k_hw_rxena);
-
void ath9k_hw_startpcureceive(struct ath_hw *ah)
{
ath9k_enable_mib_counters(ah);
@@ -1020,6 +774,14 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
+void ath9k_hw_abortpcurecv(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
+
+ ath9k_hw_disable_mib_counters(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
+
bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
@@ -1065,3 +827,142 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah)
return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
+
+bool ath9k_hw_intrpend(struct ath_hw *ah)
+{
+ u32 host_isr;
+
+ if (AR_SREV_9100(ah))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ if ((host_isr & AR_INTR_SYNC_DEFAULT)
+ && (host_isr != AR_INTR_SPURIOUS))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ath9k_hw_intrpend);
+
+enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
+ enum ath9k_int ints)
+{
+ enum ath9k_int omask = ah->imask;
+ u32 mask, mask2;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
+
+ if (omask & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+ (void) REG_READ(ah, AR_IER);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ }
+ }
+
+ /* TODO: global int Ref count */
+ mask = ints & ATH9K_INT_COMMON;
+ mask2 = 0;
+
+ if (ints & ATH9K_INT_TX) {
+ if (ah->config.tx_intr_mitigation)
+ mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
+ else {
+ if (ah->txok_interrupt_mask)
+ mask |= AR_IMR_TXOK;
+ if (ah->txdesc_interrupt_mask)
+ mask |= AR_IMR_TXDESC;
+ }
+ if (ah->txerr_interrupt_mask)
+ mask |= AR_IMR_TXERR;
+ if (ah->txeol_interrupt_mask)
+ mask |= AR_IMR_TXEOL;
+ }
+ if (ints & ATH9K_INT_RX) {
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
+ if (ah->config.rx_intr_mitigation) {
+ mask &= ~AR_IMR_RXOK_LP;
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ } else {
+ mask |= AR_IMR_RXOK_LP;
+ }
+ } else {
+ if (ah->config.rx_intr_mitigation)
+ mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
+ else
+ mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
+ }
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ mask |= AR_IMR_GENTMR;
+ }
+
+ if (ints & (ATH9K_INT_BMISC)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_TIM)
+ mask2 |= AR_IMR_S2_TIM;
+ if (ints & ATH9K_INT_DTIM)
+ mask2 |= AR_IMR_S2_DTIM;
+ if (ints & ATH9K_INT_DTIMSYNC)
+ mask2 |= AR_IMR_S2_DTIMSYNC;
+ if (ints & ATH9K_INT_CABEND)
+ mask2 |= AR_IMR_S2_CABEND;
+ if (ints & ATH9K_INT_TSFOOR)
+ mask2 |= AR_IMR_S2_TSFOOR;
+ }
+
+ if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
+ mask |= AR_IMR_BCNMISC;
+ if (ints & ATH9K_INT_GTT)
+ mask2 |= AR_IMR_S2_GTT;
+ if (ints & ATH9K_INT_CST)
+ mask2 |= AR_IMR_S2_CST;
+ }
+
+ ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ REG_WRITE(ah, AR_IMR, mask);
+ ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
+ AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
+ AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
+ ah->imrs2_reg |= mask2;
+ REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if (ints & ATH9K_INT_TIM_TIMER)
+ REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ else
+ REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
+ }
+
+ if (ints & ATH9K_INT_GLOBAL) {
+ ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
+ AR_INTR_MAC_IRQ);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
+
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
+ AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK,
+ AR_INTR_SYNC_DEFAULT);
+ }
+ ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+ }
+
+ return omask;
+}
+EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 29851e6376a9..00f3e0c7528a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -37,6 +37,8 @@
AR_2040_##_index : 0) \
|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
AR_GI##_index : 0) \
+ |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
+ AR_STBC##_index : 0) \
|SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define CCK_SIFS_TIME 10
@@ -86,7 +88,6 @@
#define ATH9K_TX_DESC_CFG_ERR 0x04
#define ATH9K_TX_DATA_UNDERRUN 0x08
#define ATH9K_TX_DELIM_UNDERRUN 0x10
-#define ATH9K_TX_SW_ABORTED 0x40
#define ATH9K_TX_SW_FILTERED 0x80
/* 64 bytes */
@@ -117,7 +118,10 @@ struct ath_tx_status {
int8_t ts_rssi_ext0;
int8_t ts_rssi_ext1;
int8_t ts_rssi_ext2;
- u8 pad[3];
+ u8 qid;
+ u16 desc_id;
+ u8 tid;
+ u8 pad[2];
u32 ba_low;
u32 ba_high;
u32 evm0;
@@ -148,6 +152,34 @@ struct ath_rx_status {
u32 evm0;
u32 evm1;
u32 evm2;
+ u32 evm3;
+ u32 evm4;
+};
+
+struct ath_htc_rx_status {
+ __be64 rs_tstamp;
+ __be16 rs_datalen;
+ u8 rs_status;
+ u8 rs_phyerr;
+ int8_t rs_rssi;
+ int8_t rs_rssi_ctl0;
+ int8_t rs_rssi_ctl1;
+ int8_t rs_rssi_ctl2;
+ int8_t rs_rssi_ext0;
+ int8_t rs_rssi_ext1;
+ int8_t rs_rssi_ext2;
+ u8 rs_keyix;
+ u8 rs_rate;
+ u8 rs_antenna;
+ u8 rs_more;
+ u8 rs_isaggr;
+ u8 rs_moreaggr;
+ u8 rs_num_delims;
+ u8 rs_flags;
+ u8 rs_dummy;
+ __be32 evm0;
+ __be32 evm1;
+ __be32 evm2;
};
#define ATH9K_RXERR_CRC 0x01
@@ -207,18 +239,9 @@ struct ath_desc {
u32 ds_ctl0;
u32 ds_ctl1;
u32 ds_hw[20];
- union {
- struct ath_tx_status tx;
- struct ath_rx_status rx;
- void *stats;
- } ds_us;
void *ds_vdata;
} __packed;
-#define ds_txstat ds_us.tx
-#define ds_rxstat ds_us.rx
-#define ds_stat ds_us.stats
-
#define ATH9K_TXDESC_CLRDMASK 0x0001
#define ATH9K_TXDESC_NOACK 0x0002
#define ATH9K_TXDESC_RTSENA 0x0004
@@ -242,7 +265,8 @@ struct ath_desc {
#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
#define ATH9K_TXDESC_VMF 0x0100
#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
-#define ATH9K_TXDESC_CAB 0x0400
+#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
+#define ATH9K_TXDESC_LDPC 0x00010000
#define ATH9K_RXDESC_INTREQ 0x0020
@@ -336,7 +360,6 @@ struct ar5416_desc {
#define AR_DestIdxValid 0x40000000
#define AR_CTSEnable 0x80000000
-#define AR_BufLen 0x00000fff
#define AR_TxMore 0x00001000
#define AR_DestIdx 0x000fe000
#define AR_DestIdx_S 13
@@ -393,6 +416,7 @@ struct ar5416_desc {
#define AR_EncrType 0x0c000000
#define AR_EncrType_S 26
#define AR_TxCtlRsvd61 0xf0000000
+#define AR_LDPC 0x80000000
#define AR_2040_0 0x00000001
#define AR_GI0 0x00000002
@@ -412,7 +436,10 @@ struct ar5416_desc {
#define AR_ChainSel3_S 17
#define AR_RTSCTSRate 0x0ff00000
#define AR_RTSCTSRate_S 20
-#define AR_TxCtlRsvd70 0xf0000000
+#define AR_STBC0 0x10000000
+#define AR_STBC1 0x20000000
+#define AR_STBC2 0x40000000
+#define AR_STBC3 0x80000000
#define AR_TxRSSIAnt00 0x000000ff
#define AR_TxRSSIAnt00_S 0
@@ -476,7 +503,6 @@ struct ar5416_desc {
#define AR_RxCTLRsvd00 0xffffffff
-#define AR_BufLen 0x00000fff
#define AR_RxCtlRsvd00 0x00001000
#define AR_RxIntrReq 0x00002000